Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core locking updates from Ingo Molnar:
 "This update:

   - extends and simplifies x86 NMI callback handling code to enhance
     and fix the HP hw-watchdog driver

   - simplifies the x86 NMI callback handling code to fix a kmemcheck
     bug.

   - enhances the hung-task debugger"

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/nmi: Fix the type of the nmiaction.flags field
  x86/nmi: Fix page faults by nmiaction if kmemcheck is enabled
  x86/nmi: Add new NMI queues to deal with IO_CHK and SERR
  watchdog, hpwdt: Remove priority option for NMI callback
  hung task debugging: Inject NMI when hung and going to panic
diff --git a/Documentation/ABI/removed/ip_queue b/Documentation/ABI/removed/ip_queue
new file mode 100644
index 0000000..3243613
--- /dev/null
+++ b/Documentation/ABI/removed/ip_queue
@@ -0,0 +1,9 @@
+What:		ip_queue
+Date:		finally removed in kernel v3.5.0
+Contact:	Pablo Neira Ayuso <pablo@netfilter.org>
+Description:
+	ip_queue has been replaced by nfnetlink_queue which provides
+	more advanced queueing mechanism to user-space. The ip_queue
+	module was already announced to become obsolete years ago.
+
+Users:
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index b218e0f..c81fe89 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -14,6 +14,15 @@
                 mesh will be sent using multiple interfaces at the
                 same time (if available).
 
+What:           /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
+Date:           November 2011
+Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Description:
+                Indicates whether the bridge loop avoidance feature
+                is enabled. This feature detects and avoids loops
+                between the mesh and devices bridged with the soft
+                interface <mesh_iface>.
+
 What:           /sys/class/net/<mesh_iface>/mesh/fragmentation
 Date:           October 2010
 Contact:        Andreas Langer <an.langer@gmx.de>
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index c5ac692..f3e214f 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -516,7 +516,7 @@
 !Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe
 !Finclude/net/mac80211.h ieee80211_stop_tx_ba_session
 !Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe
-!Finclude/net/mac80211.h rate_control_changed
+!Finclude/net/mac80211.h ieee80211_rate_control_changed
 !Finclude/net/mac80211.h ieee80211_tx_rate_control
 !Finclude/net/mac80211.h rate_control_send_low
       </chapter>
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
new file mode 100644
index 0000000..52478c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -0,0 +1,27 @@
+* ARM architected timer
+
+ARM Cortex-A7 and Cortex-A15 have a per-core architected timer, which
+provides per-cpu timers.
+
+The timer is attached to a GIC to deliver its per-processor interrupts.
+
+** Timer node properties:
+
+- compatible : Should at least contain "arm,armv7-timer".
+
+- interrupts : Interrupt list for secure, non-secure, virtual and
+  hypervisor timers, in that order.
+
+- clock-frequency : The frequency of the main counter, in Hz. Optional.
+
+Example:
+
+	timer {
+		compatible = "arm,cortex-a15-timer",
+			     "arm,armv7-timer";
+		interrupts = <1 13 0xf08>,
+			     <1 14 0xf08>,
+			     <1 11 0xf08>,
+			     <1 10 0xf08>;
+		clock-frequency = <100000000>;
+	};
diff --git a/Documentation/devicetree/bindings/ata/calxeda-sata.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
similarity index 90%
rename from Documentation/devicetree/bindings/ata/calxeda-sata.txt
rename to Documentation/devicetree/bindings/ata/ahci-platform.txt
index 79caa56..8bb8a76 100644
--- a/Documentation/devicetree/bindings/ata/calxeda-sata.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -1,10 +1,10 @@
-* Calxeda SATA Controller
+* AHCI SATA Controller
 
 SATA nodes are defined to describe on-chip Serial ATA controllers.
 Each SATA controller should have its own node.
 
 Required properties:
-- compatible        : compatible list, contains "calxeda,hb-ahci"
+- compatible        : compatible list, contains "calxeda,hb-ahci" or "snps,spear-ahci"
 - interrupts        : <interrupt mapping for SATA IRQ>
 - reg               : <registers mapping>
 
@@ -14,4 +14,3 @@
                 reg = <0xffe08000 0x1000>;
                 interrupts = <115>;
         };
-
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
new file mode 100644
index 0000000..7938411
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
@@ -0,0 +1,127 @@
+Properties for an MDIO bus multiplexer/switch controlled by GPIO pins.
+
+This is a special case of a MDIO bus multiplexer.  One or more GPIO
+lines are used to control which child bus is connected.
+
+Required properties in addition to the generic multiplexer properties:
+
+- compatible : mdio-mux-gpio.
+- gpios : GPIO specifiers for each GPIO line.  One or more must be specified.
+
+
+Example :
+
+	/* The parent MDIO bus. */
+	smi1: mdio@1180000001900 {
+		compatible = "cavium,octeon-3860-mdio";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x11800 0x00001900 0x0 0x40>;
+	};
+
+	/*
+	   An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
+	   pair of GPIO lines.  Child busses 2 and 3 populated with 4
+	   PHYs each.
+	 */
+	mdio-mux {
+		compatible = "mdio-mux-gpio";
+		gpios = <&gpio1 3 0>, <&gpio1 4 0>;
+		mdio-parent-bus = <&smi1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mdio@2 {
+			reg = <2>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			phy11: ethernet-phy@1 {
+				reg = <1>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <10 8>; /* Pin 10, active low */
+			};
+			phy12: ethernet-phy@2 {
+				reg = <2>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <10 8>; /* Pin 10, active low */
+			};
+			phy13: ethernet-phy@3 {
+				reg = <3>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <10 8>; /* Pin 10, active low */
+			};
+			phy14: ethernet-phy@4 {
+				reg = <4>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <10 8>; /* Pin 10, active low */
+			};
+		};
+
+		mdio@3 {
+			reg = <3>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			phy21: ethernet-phy@1 {
+				reg = <1>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <12 8>; /* Pin 12, active low */
+			};
+			phy22: ethernet-phy@2 {
+				reg = <2>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <12 8>; /* Pin 12, active low */
+			};
+			phy23: ethernet-phy@3 {
+				reg = <3>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <12 8>; /* Pin 12, active low */
+			};
+			phy24: ethernet-phy@4 {
+				reg = <4>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <12 8>; /* Pin 12, active low */
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/net/mdio-mux.txt b/Documentation/devicetree/bindings/net/mdio-mux.txt
new file mode 100644
index 0000000..f65606f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux.txt
@@ -0,0 +1,136 @@
+Common MDIO bus multiplexer/switch properties.
+
+An MDIO bus multiplexer/switch will have several child busses that are
+numbered uniquely in a device dependent manner.  The nodes for an MDIO
+bus multiplexer/switch will have one child node for each child bus.
+
+Required properties:
+- mdio-parent-bus : phandle to the parent MDIO bus.
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- Other properties specific to the multiplexer/switch hardware.
+
+Required properties for child nodes:
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg : The sub-bus number.
+
+
+Example :
+
+	/* The parent MDIO bus. */
+	smi1: mdio@1180000001900 {
+		compatible = "cavium,octeon-3860-mdio";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x11800 0x00001900 0x0 0x40>;
+	};
+
+	/*
+	   An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
+	   pair of GPIO lines.  Child busses 2 and 3 populated with 4
+	   PHYs each.
+	 */
+	mdio-mux {
+		compatible = "mdio-mux-gpio";
+		gpios = <&gpio1 3 0>, <&gpio1 4 0>;
+		mdio-parent-bus = <&smi1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mdio@2 {
+			reg = <2>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			phy11: ethernet-phy@1 {
+				reg = <1>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <10 8>; /* Pin 10, active low */
+			};
+			phy12: ethernet-phy@2 {
+				reg = <2>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <10 8>; /* Pin 10, active low */
+			};
+			phy13: ethernet-phy@3 {
+				reg = <3>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <10 8>; /* Pin 10, active low */
+			};
+			phy14: ethernet-phy@4 {
+				reg = <4>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <10 8>; /* Pin 10, active low */
+			};
+		};
+
+		mdio@3 {
+			reg = <3>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			phy21: ethernet-phy@1 {
+				reg = <1>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <12 8>; /* Pin 12, active low */
+			};
+			phy22: ethernet-phy@2 {
+				reg = <2>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <12 8>; /* Pin 12, active low */
+			};
+			phy23: ethernet-phy@3 {
+				reg = <3>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <12 8>; /* Pin 12, active low */
+			};
+			phy24: ethernet-phy@4 {
+				reg = <4>;
+				compatible = "marvell,88e1149r";
+				marvell,reg-init = <3 0x10 0 0x5777>,
+					<3 0x11 0 0x00aa>,
+					<3 0x12 0 0x4105>,
+					<3 0x13 0 0x0a60>;
+				interrupt-parent = <&gpio>;
+				interrupts = <12 8>; /* Pin 12, active low */
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt
new file mode 100644
index 0000000..ab19e6b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt
@@ -0,0 +1,95 @@
+* Freescale IOMUX Controller (IOMUXC) for i.MX
+
+The IOMUX Controller (IOMUXC), together with the IOMUX, enables the IC
+to share one PAD to several functional blocks. The sharing is done by
+multiplexing the PAD input/output signals. For each PAD there are up to
+8 muxing options (called ALT modes). Since different modules require
+different PAD settings (like pull up, keeper, etc) the IOMUXC controls
+also the PAD settings parameters.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Freescale IMX pin configuration node is a node of a group of pins which can be
+used for a specific device or function. This node represents both mux and config
+of the pins in that group. The 'mux' selects the function mode(also named mux
+mode) this pin can work on and the 'config' configures various pad settings
+such as pull-up, open drain, drive strength, etc.
+
+Required properties for iomux controller:
+- compatible: "fsl,<soc>-iomuxc"
+  Please refer to each fsl,<soc>-pinctrl.txt binding doc for supported SoCs.
+
+Required properties for pin configuration node:
+- fsl,pins: two integers array, represents a group of pins mux and config
+  setting. The format is fsl,pins = <PIN_FUNC_ID CONFIG>, PIN_FUNC_ID is a
+  pin working on a specific function, CONFIG is the pad setting value like
+  pull-up on this pin. Please refer to fsl,<soc>-pinctrl.txt for the valid
+  pins and functions of each SoC.
+
+Bits used for CONFIG:
+NO_PAD_CTL(1 << 31): indicate this pin does not need config.
+
+SION(1 << 30): Software Input On Field.
+Force the selected mux mode input path no matter of MUX_MODE functionality.
+By default the input path is determined by functionality of the selected
+mux mode (regular).
+
+Other bits are used for PAD setting.
+Please refer to each fsl,<soc>-pinctrl,txt binding doc for SoC specific part
+of bits definitions.
+
+NOTE:
+Some requirements for using fsl,imx-pinctrl binding:
+1. We have pin function node defined under iomux controller node to represent
+   what pinmux functions this SoC supports.
+2. The pin configuration node intends to work on a specific function should
+   to be defined under that specific function node.
+   The function node's name should represent well about what function
+   this group of pins in this pin configuration node are working on.
+3. The driver can use the function node's name and pin configuration node's
+   name describe the pin function and group hierarchy.
+   For example, Linux IMX pinctrl driver takes the function node's name
+   as the function name and pin configuration node's name as group name to
+   create the map table.
+4. Each pin configuration node should have a phandle, devices can set pins
+   configurations by referring to the phandle of that pin configuration node.
+
+Examples:
+usdhc@0219c000 { /* uSDHC4 */
+	fsl,card-wired;
+	vmmc-supply = <&reg_3p3v>;
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc4_1>;
+};
+
+iomuxc@020e0000 {
+	compatible = "fsl,imx6q-iomuxc";
+	reg = <0x020e0000 0x4000>;
+
+	/* shared pinctrl settings */
+	usdhc4 {
+		pinctrl_usdhc4_1: usdhc4grp-1 {
+			fsl,pins = <1386 0x17059	/* MX6Q_PAD_SD4_CMD__USDHC4_CMD */
+				    1392 0x10059	/* MX6Q_PAD_SD4_CLK__USDHC4_CLK	*/
+				    1462 0x17059	/* MX6Q_PAD_SD4_DAT0__USDHC4_DAT0 */
+				    1470 0x17059	/* MX6Q_PAD_SD4_DAT1__USDHC4_DAT1 */
+				    1478 0x17059	/* MX6Q_PAD_SD4_DAT2__USDHC4_DAT2 */
+				    1486 0x17059	/* MX6Q_PAD_SD4_DAT3__USDHC4_DAT3 */
+				    1493 0x17059	/* MX6Q_PAD_SD4_DAT4__USDHC4_DAT4 */
+				    1501 0x17059	/* MX6Q_PAD_SD4_DAT5__USDHC4_DAT5 */
+				    1509 0x17059	/* MX6Q_PAD_SD4_DAT6__USDHC4_DAT6 */
+				    1517 0x17059>;	/* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
+		};
+	};
+	....
+};
+Refer to the IOMUXC controller chapter in imx6q datasheet,
+0x17059 means enable hysteresis, 47KOhm Pull Up, 50Mhz speed,
+80Ohm driver strength and Fast Slew Rate.
+User should refer to each SoC spec to set the correct value.
+
+TODO: when dtc macro support is available, we can change above raw data
+to dt macro which can get better readability in dts file.
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx51-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx51-pinctrl.txt
new file mode 100644
index 0000000..b96fa4c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx51-pinctrl.txt
@@ -0,0 +1,787 @@
+* Freescale IMX51 IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
+and usage.
+
+Required properties:
+- compatible: "fsl,imx51-iomuxc"
+- fsl,pins: two integers array, represents a group of pins mux and config
+  setting. The format is fsl,pins = <PIN_FUNC_ID CONFIG>, PIN_FUNC_ID is a
+  pin working on a specific function, CONFIG is the pad setting value like
+  pull-up for this pin. Please refer to imx51 datasheet for the valid pad
+  config settings.
+
+CONFIG bits definition:
+PAD_CTL_HVE			(1 << 13)
+PAD_CTL_HYS			(1 << 8)
+PAD_CTL_PKE			(1 << 7)
+PAD_CTL_PUE			(1 << 6)
+PAD_CTL_PUS_100K_DOWN		(0 << 4)
+PAD_CTL_PUS_47K_UP		(1 << 4)
+PAD_CTL_PUS_100K_UP		(2 << 4)
+PAD_CTL_PUS_22K_UP		(3 << 4)
+PAD_CTL_ODE			(1 << 3)
+PAD_CTL_DSE_LOW			(0 << 1)
+PAD_CTL_DSE_MED			(1 << 1)
+PAD_CTL_DSE_HIGH		(2 << 1)
+PAD_CTL_DSE_MAX			(3 << 1)
+PAD_CTL_SRE_FAST		(1 << 0)
+PAD_CTL_SRE_SLOW		(0 << 0)
+
+See below for available PIN_FUNC_ID for imx51:
+MX51_PAD_EIM_D16__AUD4_RXFS			0
+MX51_PAD_EIM_D16__AUD5_TXD			1
+MX51_PAD_EIM_D16__EIM_D16			2
+MX51_PAD_EIM_D16__GPIO2_0			3
+MX51_PAD_EIM_D16__I2C1_SDA			4
+MX51_PAD_EIM_D16__UART2_CTS			5
+MX51_PAD_EIM_D16__USBH2_DATA0			6
+MX51_PAD_EIM_D17__AUD5_RXD			7
+MX51_PAD_EIM_D17__EIM_D17			8
+MX51_PAD_EIM_D17__GPIO2_1			9
+MX51_PAD_EIM_D17__UART2_RXD			10
+MX51_PAD_EIM_D17__UART3_CTS			11
+MX51_PAD_EIM_D17__USBH2_DATA1			12
+MX51_PAD_EIM_D18__AUD5_TXC			13
+MX51_PAD_EIM_D18__EIM_D18			14
+MX51_PAD_EIM_D18__GPIO2_2			15
+MX51_PAD_EIM_D18__UART2_TXD			16
+MX51_PAD_EIM_D18__UART3_RTS			17
+MX51_PAD_EIM_D18__USBH2_DATA2			18
+MX51_PAD_EIM_D19__AUD4_RXC			19
+MX51_PAD_EIM_D19__AUD5_TXFS			20
+MX51_PAD_EIM_D19__EIM_D19			21
+MX51_PAD_EIM_D19__GPIO2_3			22
+MX51_PAD_EIM_D19__I2C1_SCL			23
+MX51_PAD_EIM_D19__UART2_RTS			24
+MX51_PAD_EIM_D19__USBH2_DATA3			25
+MX51_PAD_EIM_D20__AUD4_TXD			26
+MX51_PAD_EIM_D20__EIM_D20			27
+MX51_PAD_EIM_D20__GPIO2_4			28
+MX51_PAD_EIM_D20__SRTC_ALARM_DEB		29
+MX51_PAD_EIM_D20__USBH2_DATA4			30
+MX51_PAD_EIM_D21__AUD4_RXD			31
+MX51_PAD_EIM_D21__EIM_D21			32
+MX51_PAD_EIM_D21__GPIO2_5			33
+MX51_PAD_EIM_D21__SRTC_ALARM_DEB		34
+MX51_PAD_EIM_D21__USBH2_DATA5			35
+MX51_PAD_EIM_D22__AUD4_TXC			36
+MX51_PAD_EIM_D22__EIM_D22			37
+MX51_PAD_EIM_D22__GPIO2_6			38
+MX51_PAD_EIM_D22__USBH2_DATA6			39
+MX51_PAD_EIM_D23__AUD4_TXFS			40
+MX51_PAD_EIM_D23__EIM_D23			41
+MX51_PAD_EIM_D23__GPIO2_7			42
+MX51_PAD_EIM_D23__SPDIF_OUT1			43
+MX51_PAD_EIM_D23__USBH2_DATA7			44
+MX51_PAD_EIM_D24__AUD6_RXFS			45
+MX51_PAD_EIM_D24__EIM_D24			46
+MX51_PAD_EIM_D24__GPIO2_8			47
+MX51_PAD_EIM_D24__I2C2_SDA			48
+MX51_PAD_EIM_D24__UART3_CTS			49
+MX51_PAD_EIM_D24__USBOTG_DATA0			50
+MX51_PAD_EIM_D25__EIM_D25			51
+MX51_PAD_EIM_D25__KEY_COL6			52
+MX51_PAD_EIM_D25__UART2_CTS			53
+MX51_PAD_EIM_D25__UART3_RXD			54
+MX51_PAD_EIM_D25__USBOTG_DATA1			55
+MX51_PAD_EIM_D26__EIM_D26			56
+MX51_PAD_EIM_D26__KEY_COL7			57
+MX51_PAD_EIM_D26__UART2_RTS			58
+MX51_PAD_EIM_D26__UART3_TXD			59
+MX51_PAD_EIM_D26__USBOTG_DATA2			60
+MX51_PAD_EIM_D27__AUD6_RXC			61
+MX51_PAD_EIM_D27__EIM_D27			62
+MX51_PAD_EIM_D27__GPIO2_9			63
+MX51_PAD_EIM_D27__I2C2_SCL			64
+MX51_PAD_EIM_D27__UART3_RTS			65
+MX51_PAD_EIM_D27__USBOTG_DATA3			66
+MX51_PAD_EIM_D28__AUD6_TXD			67
+MX51_PAD_EIM_D28__EIM_D28			68
+MX51_PAD_EIM_D28__KEY_ROW4			69
+MX51_PAD_EIM_D28__USBOTG_DATA4			70
+MX51_PAD_EIM_D29__AUD6_RXD			71
+MX51_PAD_EIM_D29__EIM_D29			72
+MX51_PAD_EIM_D29__KEY_ROW5			73
+MX51_PAD_EIM_D29__USBOTG_DATA5			74
+MX51_PAD_EIM_D30__AUD6_TXC			75
+MX51_PAD_EIM_D30__EIM_D30			76
+MX51_PAD_EIM_D30__KEY_ROW6			77
+MX51_PAD_EIM_D30__USBOTG_DATA6			78
+MX51_PAD_EIM_D31__AUD6_TXFS			79
+MX51_PAD_EIM_D31__EIM_D31			80
+MX51_PAD_EIM_D31__KEY_ROW7			81
+MX51_PAD_EIM_D31__USBOTG_DATA7			82
+MX51_PAD_EIM_A16__EIM_A16			83
+MX51_PAD_EIM_A16__GPIO2_10			84
+MX51_PAD_EIM_A16__OSC_FREQ_SEL0			85
+MX51_PAD_EIM_A17__EIM_A17			86
+MX51_PAD_EIM_A17__GPIO2_11			87
+MX51_PAD_EIM_A17__OSC_FREQ_SEL1			88
+MX51_PAD_EIM_A18__BOOT_LPB0			89
+MX51_PAD_EIM_A18__EIM_A18			90
+MX51_PAD_EIM_A18__GPIO2_12			91
+MX51_PAD_EIM_A19__BOOT_LPB1			92
+MX51_PAD_EIM_A19__EIM_A19			93
+MX51_PAD_EIM_A19__GPIO2_13			94
+MX51_PAD_EIM_A20__BOOT_UART_SRC0		95
+MX51_PAD_EIM_A20__EIM_A20			96
+MX51_PAD_EIM_A20__GPIO2_14			97
+MX51_PAD_EIM_A21__BOOT_UART_SRC1		98
+MX51_PAD_EIM_A21__EIM_A21			99
+MX51_PAD_EIM_A21__GPIO2_15			100
+MX51_PAD_EIM_A22__EIM_A22			101
+MX51_PAD_EIM_A22__GPIO2_16			102
+MX51_PAD_EIM_A23__BOOT_HPN_EN			103
+MX51_PAD_EIM_A23__EIM_A23			104
+MX51_PAD_EIM_A23__GPIO2_17			105
+MX51_PAD_EIM_A24__EIM_A24			106
+MX51_PAD_EIM_A24__GPIO2_18			107
+MX51_PAD_EIM_A24__USBH2_CLK			108
+MX51_PAD_EIM_A25__DISP1_PIN4			109
+MX51_PAD_EIM_A25__EIM_A25			110
+MX51_PAD_EIM_A25__GPIO2_19			111
+MX51_PAD_EIM_A25__USBH2_DIR			112
+MX51_PAD_EIM_A26__CSI1_DATA_EN			113
+MX51_PAD_EIM_A26__DISP2_EXT_CLK			114
+MX51_PAD_EIM_A26__EIM_A26			115
+MX51_PAD_EIM_A26__GPIO2_20			116
+MX51_PAD_EIM_A26__USBH2_STP			117
+MX51_PAD_EIM_A27__CSI2_DATA_EN			118
+MX51_PAD_EIM_A27__DISP1_PIN1			119
+MX51_PAD_EIM_A27__EIM_A27			120
+MX51_PAD_EIM_A27__GPIO2_21			121
+MX51_PAD_EIM_A27__USBH2_NXT			122
+MX51_PAD_EIM_EB0__EIM_EB0			123
+MX51_PAD_EIM_EB1__EIM_EB1			124
+MX51_PAD_EIM_EB2__AUD5_RXFS			125
+MX51_PAD_EIM_EB2__CSI1_D2			126
+MX51_PAD_EIM_EB2__EIM_EB2			127
+MX51_PAD_EIM_EB2__FEC_MDIO			128
+MX51_PAD_EIM_EB2__GPIO2_22			129
+MX51_PAD_EIM_EB2__GPT_CMPOUT1			130
+MX51_PAD_EIM_EB3__AUD5_RXC			131
+MX51_PAD_EIM_EB3__CSI1_D3			132
+MX51_PAD_EIM_EB3__EIM_EB3			133
+MX51_PAD_EIM_EB3__FEC_RDATA1			134
+MX51_PAD_EIM_EB3__GPIO2_23			135
+MX51_PAD_EIM_EB3__GPT_CMPOUT2			136
+MX51_PAD_EIM_OE__EIM_OE				137
+MX51_PAD_EIM_OE__GPIO2_24			138
+MX51_PAD_EIM_CS0__EIM_CS0			139
+MX51_PAD_EIM_CS0__GPIO2_25			140
+MX51_PAD_EIM_CS1__EIM_CS1			141
+MX51_PAD_EIM_CS1__GPIO2_26			142
+MX51_PAD_EIM_CS2__AUD5_TXD			143
+MX51_PAD_EIM_CS2__CSI1_D4			144
+MX51_PAD_EIM_CS2__EIM_CS2			145
+MX51_PAD_EIM_CS2__FEC_RDATA2			146
+MX51_PAD_EIM_CS2__GPIO2_27			147
+MX51_PAD_EIM_CS2__USBOTG_STP			148
+MX51_PAD_EIM_CS3__AUD5_RXD			149
+MX51_PAD_EIM_CS3__CSI1_D5			150
+MX51_PAD_EIM_CS3__EIM_CS3			151
+MX51_PAD_EIM_CS3__FEC_RDATA3			152
+MX51_PAD_EIM_CS3__GPIO2_28			153
+MX51_PAD_EIM_CS3__USBOTG_NXT			154
+MX51_PAD_EIM_CS4__AUD5_TXC			155
+MX51_PAD_EIM_CS4__CSI1_D6			156
+MX51_PAD_EIM_CS4__EIM_CS4			157
+MX51_PAD_EIM_CS4__FEC_RX_ER			158
+MX51_PAD_EIM_CS4__GPIO2_29			159
+MX51_PAD_EIM_CS4__USBOTG_CLK			160
+MX51_PAD_EIM_CS5__AUD5_TXFS			161
+MX51_PAD_EIM_CS5__CSI1_D7			162
+MX51_PAD_EIM_CS5__DISP1_EXT_CLK			163
+MX51_PAD_EIM_CS5__EIM_CS5			164
+MX51_PAD_EIM_CS5__FEC_CRS			165
+MX51_PAD_EIM_CS5__GPIO2_30			166
+MX51_PAD_EIM_CS5__USBOTG_DIR			167
+MX51_PAD_EIM_DTACK__EIM_DTACK			168
+MX51_PAD_EIM_DTACK__GPIO2_31			169
+MX51_PAD_EIM_LBA__EIM_LBA			170
+MX51_PAD_EIM_LBA__GPIO3_1			171
+MX51_PAD_EIM_CRE__EIM_CRE			172
+MX51_PAD_EIM_CRE__GPIO3_2			173
+MX51_PAD_DRAM_CS1__DRAM_CS1			174
+MX51_PAD_NANDF_WE_B__GPIO3_3			175
+MX51_PAD_NANDF_WE_B__NANDF_WE_B			176
+MX51_PAD_NANDF_WE_B__PATA_DIOW			177
+MX51_PAD_NANDF_WE_B__SD3_DATA0			178
+MX51_PAD_NANDF_RE_B__GPIO3_4			179
+MX51_PAD_NANDF_RE_B__NANDF_RE_B			180
+MX51_PAD_NANDF_RE_B__PATA_DIOR			181
+MX51_PAD_NANDF_RE_B__SD3_DATA1			182
+MX51_PAD_NANDF_ALE__GPIO3_5			183
+MX51_PAD_NANDF_ALE__NANDF_ALE			184
+MX51_PAD_NANDF_ALE__PATA_BUFFER_EN		185
+MX51_PAD_NANDF_CLE__GPIO3_6			186
+MX51_PAD_NANDF_CLE__NANDF_CLE			187
+MX51_PAD_NANDF_CLE__PATA_RESET_B		188
+MX51_PAD_NANDF_WP_B__GPIO3_7			189
+MX51_PAD_NANDF_WP_B__NANDF_WP_B			190
+MX51_PAD_NANDF_WP_B__PATA_DMACK			191
+MX51_PAD_NANDF_WP_B__SD3_DATA2			192
+MX51_PAD_NANDF_RB0__ECSPI2_SS1			193
+MX51_PAD_NANDF_RB0__GPIO3_8			194
+MX51_PAD_NANDF_RB0__NANDF_RB0			195
+MX51_PAD_NANDF_RB0__PATA_DMARQ			196
+MX51_PAD_NANDF_RB0__SD3_DATA3			197
+MX51_PAD_NANDF_RB1__CSPI_MOSI			198
+MX51_PAD_NANDF_RB1__ECSPI2_RDY			199
+MX51_PAD_NANDF_RB1__GPIO3_9			200
+MX51_PAD_NANDF_RB1__NANDF_RB1			201
+MX51_PAD_NANDF_RB1__PATA_IORDY			202
+MX51_PAD_NANDF_RB1__SD4_CMD			203
+MX51_PAD_NANDF_RB2__DISP2_WAIT			204
+MX51_PAD_NANDF_RB2__ECSPI2_SCLK			205
+MX51_PAD_NANDF_RB2__FEC_COL			206
+MX51_PAD_NANDF_RB2__GPIO3_10			207
+MX51_PAD_NANDF_RB2__NANDF_RB2			208
+MX51_PAD_NANDF_RB2__USBH3_H3_DP			209
+MX51_PAD_NANDF_RB2__USBH3_NXT			210
+MX51_PAD_NANDF_RB3__DISP1_WAIT			211
+MX51_PAD_NANDF_RB3__ECSPI2_MISO			212
+MX51_PAD_NANDF_RB3__FEC_RX_CLK			213
+MX51_PAD_NANDF_RB3__GPIO3_11			214
+MX51_PAD_NANDF_RB3__NANDF_RB3			215
+MX51_PAD_NANDF_RB3__USBH3_CLK			216
+MX51_PAD_NANDF_RB3__USBH3_H3_DM			217
+MX51_PAD_GPIO_NAND__GPIO_NAND			218
+MX51_PAD_GPIO_NAND__PATA_INTRQ			219
+MX51_PAD_NANDF_CS0__GPIO3_16			220
+MX51_PAD_NANDF_CS0__NANDF_CS0			221
+MX51_PAD_NANDF_CS1__GPIO3_17			222
+MX51_PAD_NANDF_CS1__NANDF_CS1			223
+MX51_PAD_NANDF_CS2__CSPI_SCLK			224
+MX51_PAD_NANDF_CS2__FEC_TX_ER			225
+MX51_PAD_NANDF_CS2__GPIO3_18			226
+MX51_PAD_NANDF_CS2__NANDF_CS2			227
+MX51_PAD_NANDF_CS2__PATA_CS_0			228
+MX51_PAD_NANDF_CS2__SD4_CLK			229
+MX51_PAD_NANDF_CS2__USBH3_H1_DP			230
+MX51_PAD_NANDF_CS3__FEC_MDC			231
+MX51_PAD_NANDF_CS3__GPIO3_19			232
+MX51_PAD_NANDF_CS3__NANDF_CS3			233
+MX51_PAD_NANDF_CS3__PATA_CS_1			234
+MX51_PAD_NANDF_CS3__SD4_DAT0			235
+MX51_PAD_NANDF_CS3__USBH3_H1_DM			236
+MX51_PAD_NANDF_CS4__FEC_TDATA1			237
+MX51_PAD_NANDF_CS4__GPIO3_20			238
+MX51_PAD_NANDF_CS4__NANDF_CS4			239
+MX51_PAD_NANDF_CS4__PATA_DA_0			240
+MX51_PAD_NANDF_CS4__SD4_DAT1			241
+MX51_PAD_NANDF_CS4__USBH3_STP			242
+MX51_PAD_NANDF_CS5__FEC_TDATA2			243
+MX51_PAD_NANDF_CS5__GPIO3_21			244
+MX51_PAD_NANDF_CS5__NANDF_CS5			245
+MX51_PAD_NANDF_CS5__PATA_DA_1			246
+MX51_PAD_NANDF_CS5__SD4_DAT2			247
+MX51_PAD_NANDF_CS5__USBH3_DIR			248
+MX51_PAD_NANDF_CS6__CSPI_SS3			249
+MX51_PAD_NANDF_CS6__FEC_TDATA3			250
+MX51_PAD_NANDF_CS6__GPIO3_22			251
+MX51_PAD_NANDF_CS6__NANDF_CS6			252
+MX51_PAD_NANDF_CS6__PATA_DA_2			253
+MX51_PAD_NANDF_CS6__SD4_DAT3			254
+MX51_PAD_NANDF_CS7__FEC_TX_EN			255
+MX51_PAD_NANDF_CS7__GPIO3_23			256
+MX51_PAD_NANDF_CS7__NANDF_CS7			257
+MX51_PAD_NANDF_CS7__SD3_CLK			258
+MX51_PAD_NANDF_RDY_INT__ECSPI2_SS0		259
+MX51_PAD_NANDF_RDY_INT__FEC_TX_CLK		260
+MX51_PAD_NANDF_RDY_INT__GPIO3_24		261
+MX51_PAD_NANDF_RDY_INT__NANDF_RDY_INT		262
+MX51_PAD_NANDF_RDY_INT__SD3_CMD			263
+MX51_PAD_NANDF_D15__ECSPI2_MOSI			264
+MX51_PAD_NANDF_D15__GPIO3_25			265
+MX51_PAD_NANDF_D15__NANDF_D15			266
+MX51_PAD_NANDF_D15__PATA_DATA15			267
+MX51_PAD_NANDF_D15__SD3_DAT7			268
+MX51_PAD_NANDF_D14__ECSPI2_SS3			269
+MX51_PAD_NANDF_D14__GPIO3_26			270
+MX51_PAD_NANDF_D14__NANDF_D14			271
+MX51_PAD_NANDF_D14__PATA_DATA14			272
+MX51_PAD_NANDF_D14__SD3_DAT6			273
+MX51_PAD_NANDF_D13__ECSPI2_SS2			274
+MX51_PAD_NANDF_D13__GPIO3_27			275
+MX51_PAD_NANDF_D13__NANDF_D13			276
+MX51_PAD_NANDF_D13__PATA_DATA13			277
+MX51_PAD_NANDF_D13__SD3_DAT5			278
+MX51_PAD_NANDF_D12__ECSPI2_SS1			279
+MX51_PAD_NANDF_D12__GPIO3_28			280
+MX51_PAD_NANDF_D12__NANDF_D12			281
+MX51_PAD_NANDF_D12__PATA_DATA12			282
+MX51_PAD_NANDF_D12__SD3_DAT4			283
+MX51_PAD_NANDF_D11__FEC_RX_DV			284
+MX51_PAD_NANDF_D11__GPIO3_29			285
+MX51_PAD_NANDF_D11__NANDF_D11			286
+MX51_PAD_NANDF_D11__PATA_DATA11			287
+MX51_PAD_NANDF_D11__SD3_DATA3			288
+MX51_PAD_NANDF_D10__GPIO3_30			289
+MX51_PAD_NANDF_D10__NANDF_D10			290
+MX51_PAD_NANDF_D10__PATA_DATA10			291
+MX51_PAD_NANDF_D10__SD3_DATA2			292
+MX51_PAD_NANDF_D9__FEC_RDATA0			293
+MX51_PAD_NANDF_D9__GPIO3_31			294
+MX51_PAD_NANDF_D9__NANDF_D9			295
+MX51_PAD_NANDF_D9__PATA_DATA9			296
+MX51_PAD_NANDF_D9__SD3_DATA1			297
+MX51_PAD_NANDF_D8__FEC_TDATA0			298
+MX51_PAD_NANDF_D8__GPIO4_0			299
+MX51_PAD_NANDF_D8__NANDF_D8			300
+MX51_PAD_NANDF_D8__PATA_DATA8			301
+MX51_PAD_NANDF_D8__SD3_DATA0			302
+MX51_PAD_NANDF_D7__GPIO4_1			303
+MX51_PAD_NANDF_D7__NANDF_D7			304
+MX51_PAD_NANDF_D7__PATA_DATA7			305
+MX51_PAD_NANDF_D7__USBH3_DATA0			306
+MX51_PAD_NANDF_D6__GPIO4_2			307
+MX51_PAD_NANDF_D6__NANDF_D6			308
+MX51_PAD_NANDF_D6__PATA_DATA6			309
+MX51_PAD_NANDF_D6__SD4_LCTL			310
+MX51_PAD_NANDF_D6__USBH3_DATA1			311
+MX51_PAD_NANDF_D5__GPIO4_3			312
+MX51_PAD_NANDF_D5__NANDF_D5			313
+MX51_PAD_NANDF_D5__PATA_DATA5			314
+MX51_PAD_NANDF_D5__SD4_WP			315
+MX51_PAD_NANDF_D5__USBH3_DATA2			316
+MX51_PAD_NANDF_D4__GPIO4_4			317
+MX51_PAD_NANDF_D4__NANDF_D4			318
+MX51_PAD_NANDF_D4__PATA_DATA4			319
+MX51_PAD_NANDF_D4__SD4_CD			320
+MX51_PAD_NANDF_D4__USBH3_DATA3			321
+MX51_PAD_NANDF_D3__GPIO4_5			322
+MX51_PAD_NANDF_D3__NANDF_D3			323
+MX51_PAD_NANDF_D3__PATA_DATA3			324
+MX51_PAD_NANDF_D3__SD4_DAT4			325
+MX51_PAD_NANDF_D3__USBH3_DATA4			326
+MX51_PAD_NANDF_D2__GPIO4_6			327
+MX51_PAD_NANDF_D2__NANDF_D2			328
+MX51_PAD_NANDF_D2__PATA_DATA2			329
+MX51_PAD_NANDF_D2__SD4_DAT5			330
+MX51_PAD_NANDF_D2__USBH3_DATA5			331
+MX51_PAD_NANDF_D1__GPIO4_7			332
+MX51_PAD_NANDF_D1__NANDF_D1			333
+MX51_PAD_NANDF_D1__PATA_DATA1			334
+MX51_PAD_NANDF_D1__SD4_DAT6			335
+MX51_PAD_NANDF_D1__USBH3_DATA6			336
+MX51_PAD_NANDF_D0__GPIO4_8			337
+MX51_PAD_NANDF_D0__NANDF_D0			338
+MX51_PAD_NANDF_D0__PATA_DATA0			339
+MX51_PAD_NANDF_D0__SD4_DAT7			340
+MX51_PAD_NANDF_D0__USBH3_DATA7			341
+MX51_PAD_CSI1_D8__CSI1_D8			342
+MX51_PAD_CSI1_D8__GPIO3_12			343
+MX51_PAD_CSI1_D9__CSI1_D9			344
+MX51_PAD_CSI1_D9__GPIO3_13			345
+MX51_PAD_CSI1_D10__CSI1_D10			346
+MX51_PAD_CSI1_D11__CSI1_D11			347
+MX51_PAD_CSI1_D12__CSI1_D12			348
+MX51_PAD_CSI1_D13__CSI1_D13			349
+MX51_PAD_CSI1_D14__CSI1_D14			350
+MX51_PAD_CSI1_D15__CSI1_D15			351
+MX51_PAD_CSI1_D16__CSI1_D16			352
+MX51_PAD_CSI1_D17__CSI1_D17			353
+MX51_PAD_CSI1_D18__CSI1_D18			354
+MX51_PAD_CSI1_D19__CSI1_D19			355
+MX51_PAD_CSI1_VSYNC__CSI1_VSYNC			356
+MX51_PAD_CSI1_VSYNC__GPIO3_14			357
+MX51_PAD_CSI1_HSYNC__CSI1_HSYNC			358
+MX51_PAD_CSI1_HSYNC__GPIO3_15			359
+MX51_PAD_CSI1_PIXCLK__CSI1_PIXCLK		360
+MX51_PAD_CSI1_MCLK__CSI1_MCLK			361
+MX51_PAD_CSI2_D12__CSI2_D12			362
+MX51_PAD_CSI2_D12__GPIO4_9			363
+MX51_PAD_CSI2_D13__CSI2_D13			364
+MX51_PAD_CSI2_D13__GPIO4_10			365
+MX51_PAD_CSI2_D14__CSI2_D14			366
+MX51_PAD_CSI2_D15__CSI2_D15			367
+MX51_PAD_CSI2_D16__CSI2_D16			368
+MX51_PAD_CSI2_D17__CSI2_D17			369
+MX51_PAD_CSI2_D18__CSI2_D18			370
+MX51_PAD_CSI2_D18__GPIO4_11			371
+MX51_PAD_CSI2_D19__CSI2_D19			372
+MX51_PAD_CSI2_D19__GPIO4_12			373
+MX51_PAD_CSI2_VSYNC__CSI2_VSYNC			374
+MX51_PAD_CSI2_VSYNC__GPIO4_13			375
+MX51_PAD_CSI2_HSYNC__CSI2_HSYNC			376
+MX51_PAD_CSI2_HSYNC__GPIO4_14			377
+MX51_PAD_CSI2_PIXCLK__CSI2_PIXCLK		378
+MX51_PAD_CSI2_PIXCLK__GPIO4_15			379
+MX51_PAD_I2C1_CLK__GPIO4_16			380
+MX51_PAD_I2C1_CLK__I2C1_CLK			381
+MX51_PAD_I2C1_DAT__GPIO4_17			382
+MX51_PAD_I2C1_DAT__I2C1_DAT			383
+MX51_PAD_AUD3_BB_TXD__AUD3_TXD			384
+MX51_PAD_AUD3_BB_TXD__GPIO4_18			385
+MX51_PAD_AUD3_BB_RXD__AUD3_RXD			386
+MX51_PAD_AUD3_BB_RXD__GPIO4_19			387
+MX51_PAD_AUD3_BB_RXD__UART3_RXD			388
+MX51_PAD_AUD3_BB_CK__AUD3_TXC			389
+MX51_PAD_AUD3_BB_CK__GPIO4_20			390
+MX51_PAD_AUD3_BB_FS__AUD3_TXFS			391
+MX51_PAD_AUD3_BB_FS__GPIO4_21			392
+MX51_PAD_AUD3_BB_FS__UART3_TXD			393
+MX51_PAD_CSPI1_MOSI__ECSPI1_MOSI		394
+MX51_PAD_CSPI1_MOSI__GPIO4_22			395
+MX51_PAD_CSPI1_MOSI__I2C1_SDA			396
+MX51_PAD_CSPI1_MISO__AUD4_RXD			397
+MX51_PAD_CSPI1_MISO__ECSPI1_MISO		398
+MX51_PAD_CSPI1_MISO__GPIO4_23			399
+MX51_PAD_CSPI1_SS0__AUD4_TXC			400
+MX51_PAD_CSPI1_SS0__ECSPI1_SS0			401
+MX51_PAD_CSPI1_SS0__GPIO4_24			402
+MX51_PAD_CSPI1_SS1__AUD4_TXD			403
+MX51_PAD_CSPI1_SS1__ECSPI1_SS1			404
+MX51_PAD_CSPI1_SS1__GPIO4_25			405
+MX51_PAD_CSPI1_RDY__AUD4_TXFS			406
+MX51_PAD_CSPI1_RDY__ECSPI1_RDY			407
+MX51_PAD_CSPI1_RDY__GPIO4_26			408
+MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK		409
+MX51_PAD_CSPI1_SCLK__GPIO4_27			410
+MX51_PAD_CSPI1_SCLK__I2C1_SCL			411
+MX51_PAD_UART1_RXD__GPIO4_28			412
+MX51_PAD_UART1_RXD__UART1_RXD			413
+MX51_PAD_UART1_TXD__GPIO4_29			414
+MX51_PAD_UART1_TXD__PWM2_PWMO			415
+MX51_PAD_UART1_TXD__UART1_TXD			416
+MX51_PAD_UART1_RTS__GPIO4_30			417
+MX51_PAD_UART1_RTS__UART1_RTS			418
+MX51_PAD_UART1_CTS__GPIO4_31			419
+MX51_PAD_UART1_CTS__UART1_CTS			420
+MX51_PAD_UART2_RXD__FIRI_TXD			421
+MX51_PAD_UART2_RXD__GPIO1_20			422
+MX51_PAD_UART2_RXD__UART2_RXD			423
+MX51_PAD_UART2_TXD__FIRI_RXD			424
+MX51_PAD_UART2_TXD__GPIO1_21			425
+MX51_PAD_UART2_TXD__UART2_TXD			426
+MX51_PAD_UART3_RXD__CSI1_D0			427
+MX51_PAD_UART3_RXD__GPIO1_22			428
+MX51_PAD_UART3_RXD__UART1_DTR			429
+MX51_PAD_UART3_RXD__UART3_RXD			430
+MX51_PAD_UART3_TXD__CSI1_D1			431
+MX51_PAD_UART3_TXD__GPIO1_23			432
+MX51_PAD_UART3_TXD__UART1_DSR			433
+MX51_PAD_UART3_TXD__UART3_TXD			434
+MX51_PAD_OWIRE_LINE__GPIO1_24			435
+MX51_PAD_OWIRE_LINE__OWIRE_LINE			436
+MX51_PAD_OWIRE_LINE__SPDIF_OUT			437
+MX51_PAD_KEY_ROW0__KEY_ROW0			438
+MX51_PAD_KEY_ROW1__KEY_ROW1			439
+MX51_PAD_KEY_ROW2__KEY_ROW2			440
+MX51_PAD_KEY_ROW3__KEY_ROW3			441
+MX51_PAD_KEY_COL0__KEY_COL0			442
+MX51_PAD_KEY_COL0__PLL1_BYP			443
+MX51_PAD_KEY_COL1__KEY_COL1			444
+MX51_PAD_KEY_COL1__PLL2_BYP			445
+MX51_PAD_KEY_COL2__KEY_COL2			446
+MX51_PAD_KEY_COL2__PLL3_BYP			447
+MX51_PAD_KEY_COL3__KEY_COL3			448
+MX51_PAD_KEY_COL4__I2C2_SCL			449
+MX51_PAD_KEY_COL4__KEY_COL4			450
+MX51_PAD_KEY_COL4__SPDIF_OUT1			451
+MX51_PAD_KEY_COL4__UART1_RI			452
+MX51_PAD_KEY_COL4__UART3_RTS			453
+MX51_PAD_KEY_COL5__I2C2_SDA			454
+MX51_PAD_KEY_COL5__KEY_COL5			455
+MX51_PAD_KEY_COL5__UART1_DCD			456
+MX51_PAD_KEY_COL5__UART3_CTS			457
+MX51_PAD_USBH1_CLK__CSPI_SCLK			458
+MX51_PAD_USBH1_CLK__GPIO1_25			459
+MX51_PAD_USBH1_CLK__I2C2_SCL			460
+MX51_PAD_USBH1_CLK__USBH1_CLK			461
+MX51_PAD_USBH1_DIR__CSPI_MOSI			462
+MX51_PAD_USBH1_DIR__GPIO1_26			463
+MX51_PAD_USBH1_DIR__I2C2_SDA			464
+MX51_PAD_USBH1_DIR__USBH1_DIR			465
+MX51_PAD_USBH1_STP__CSPI_RDY			466
+MX51_PAD_USBH1_STP__GPIO1_27			467
+MX51_PAD_USBH1_STP__UART3_RXD			468
+MX51_PAD_USBH1_STP__USBH1_STP			469
+MX51_PAD_USBH1_NXT__CSPI_MISO			470
+MX51_PAD_USBH1_NXT__GPIO1_28			471
+MX51_PAD_USBH1_NXT__UART3_TXD			472
+MX51_PAD_USBH1_NXT__USBH1_NXT			473
+MX51_PAD_USBH1_DATA0__GPIO1_11			474
+MX51_PAD_USBH1_DATA0__UART2_CTS			475
+MX51_PAD_USBH1_DATA0__USBH1_DATA0		476
+MX51_PAD_USBH1_DATA1__GPIO1_12			477
+MX51_PAD_USBH1_DATA1__UART2_RXD			478
+MX51_PAD_USBH1_DATA1__USBH1_DATA1		479
+MX51_PAD_USBH1_DATA2__GPIO1_13			480
+MX51_PAD_USBH1_DATA2__UART2_TXD			481
+MX51_PAD_USBH1_DATA2__USBH1_DATA2		482
+MX51_PAD_USBH1_DATA3__GPIO1_14			483
+MX51_PAD_USBH1_DATA3__UART2_RTS			484
+MX51_PAD_USBH1_DATA3__USBH1_DATA3		485
+MX51_PAD_USBH1_DATA4__CSPI_SS0			486
+MX51_PAD_USBH1_DATA4__GPIO1_15			487
+MX51_PAD_USBH1_DATA4__USBH1_DATA4		488
+MX51_PAD_USBH1_DATA5__CSPI_SS1			489
+MX51_PAD_USBH1_DATA5__GPIO1_16			490
+MX51_PAD_USBH1_DATA5__USBH1_DATA5		491
+MX51_PAD_USBH1_DATA6__CSPI_SS3			492
+MX51_PAD_USBH1_DATA6__GPIO1_17			493
+MX51_PAD_USBH1_DATA6__USBH1_DATA6		494
+MX51_PAD_USBH1_DATA7__ECSPI1_SS3		495
+MX51_PAD_USBH1_DATA7__ECSPI2_SS3		496
+MX51_PAD_USBH1_DATA7__GPIO1_18			497
+MX51_PAD_USBH1_DATA7__USBH1_DATA7		498
+MX51_PAD_DI1_PIN11__DI1_PIN11			499
+MX51_PAD_DI1_PIN11__ECSPI1_SS2			500
+MX51_PAD_DI1_PIN11__GPIO3_0			501
+MX51_PAD_DI1_PIN12__DI1_PIN12			502
+MX51_PAD_DI1_PIN12__GPIO3_1			503
+MX51_PAD_DI1_PIN13__DI1_PIN13			504
+MX51_PAD_DI1_PIN13__GPIO3_2			505
+MX51_PAD_DI1_D0_CS__DI1_D0_CS			506
+MX51_PAD_DI1_D0_CS__GPIO3_3			507
+MX51_PAD_DI1_D1_CS__DI1_D1_CS			508
+MX51_PAD_DI1_D1_CS__DISP1_PIN14			509
+MX51_PAD_DI1_D1_CS__DISP1_PIN5			510
+MX51_PAD_DI1_D1_CS__GPIO3_4			511
+MX51_PAD_DISPB2_SER_DIN__DISP1_PIN1		512
+MX51_PAD_DISPB2_SER_DIN__DISPB2_SER_DIN		513
+MX51_PAD_DISPB2_SER_DIN__GPIO3_5		514
+MX51_PAD_DISPB2_SER_DIO__DISP1_PIN6		515
+MX51_PAD_DISPB2_SER_DIO__DISPB2_SER_DIO		516
+MX51_PAD_DISPB2_SER_DIO__GPIO3_6		517
+MX51_PAD_DISPB2_SER_CLK__DISP1_PIN17		518
+MX51_PAD_DISPB2_SER_CLK__DISP1_PIN7		519
+MX51_PAD_DISPB2_SER_CLK__DISPB2_SER_CLK		520
+MX51_PAD_DISPB2_SER_CLK__GPIO3_7		521
+MX51_PAD_DISPB2_SER_RS__DISP1_EXT_CLK		522
+MX51_PAD_DISPB2_SER_RS__DISP1_PIN16		523
+MX51_PAD_DISPB2_SER_RS__DISP1_PIN8		524
+MX51_PAD_DISPB2_SER_RS__DISPB2_SER_RS		525
+MX51_PAD_DISPB2_SER_RS__DISPB2_SER_RS		526
+MX51_PAD_DISPB2_SER_RS__GPIO3_8			527
+MX51_PAD_DISP1_DAT0__DISP1_DAT0			528
+MX51_PAD_DISP1_DAT1__DISP1_DAT1			529
+MX51_PAD_DISP1_DAT2__DISP1_DAT2			530
+MX51_PAD_DISP1_DAT3__DISP1_DAT3			531
+MX51_PAD_DISP1_DAT4__DISP1_DAT4			532
+MX51_PAD_DISP1_DAT5__DISP1_DAT5			533
+MX51_PAD_DISP1_DAT6__BOOT_USB_SRC		534
+MX51_PAD_DISP1_DAT6__DISP1_DAT6			535
+MX51_PAD_DISP1_DAT7__BOOT_EEPROM_CFG		536
+MX51_PAD_DISP1_DAT7__DISP1_DAT7			537
+MX51_PAD_DISP1_DAT8__BOOT_SRC0			538
+MX51_PAD_DISP1_DAT8__DISP1_DAT8			539
+MX51_PAD_DISP1_DAT9__BOOT_SRC1			540
+MX51_PAD_DISP1_DAT9__DISP1_DAT9			541
+MX51_PAD_DISP1_DAT10__BOOT_SPARE_SIZE		542
+MX51_PAD_DISP1_DAT10__DISP1_DAT10		543
+MX51_PAD_DISP1_DAT11__BOOT_LPB_FREQ2		544
+MX51_PAD_DISP1_DAT11__DISP1_DAT11		545
+MX51_PAD_DISP1_DAT12__BOOT_MLC_SEL		546
+MX51_PAD_DISP1_DAT12__DISP1_DAT12		547
+MX51_PAD_DISP1_DAT13__BOOT_MEM_CTL0		548
+MX51_PAD_DISP1_DAT13__DISP1_DAT13		549
+MX51_PAD_DISP1_DAT14__BOOT_MEM_CTL1		550
+MX51_PAD_DISP1_DAT14__DISP1_DAT14		551
+MX51_PAD_DISP1_DAT15__BOOT_BUS_WIDTH		552
+MX51_PAD_DISP1_DAT15__DISP1_DAT15		553
+MX51_PAD_DISP1_DAT16__BOOT_PAGE_SIZE0		554
+MX51_PAD_DISP1_DAT16__DISP1_DAT16		555
+MX51_PAD_DISP1_DAT17__BOOT_PAGE_SIZE1		556
+MX51_PAD_DISP1_DAT17__DISP1_DAT17		557
+MX51_PAD_DISP1_DAT18__BOOT_WEIM_MUXED0		558
+MX51_PAD_DISP1_DAT18__DISP1_DAT18		559
+MX51_PAD_DISP1_DAT18__DISP2_PIN11		560
+MX51_PAD_DISP1_DAT18__DISP2_PIN5		561
+MX51_PAD_DISP1_DAT19__BOOT_WEIM_MUXED1		562
+MX51_PAD_DISP1_DAT19__DISP1_DAT19		563
+MX51_PAD_DISP1_DAT19__DISP2_PIN12		564
+MX51_PAD_DISP1_DAT19__DISP2_PIN6		565
+MX51_PAD_DISP1_DAT20__BOOT_MEM_TYPE0		566
+MX51_PAD_DISP1_DAT20__DISP1_DAT20		567
+MX51_PAD_DISP1_DAT20__DISP2_PIN13		568
+MX51_PAD_DISP1_DAT20__DISP2_PIN7		569
+MX51_PAD_DISP1_DAT21__BOOT_MEM_TYPE1		570
+MX51_PAD_DISP1_DAT21__DISP1_DAT21		571
+MX51_PAD_DISP1_DAT21__DISP2_PIN14		572
+MX51_PAD_DISP1_DAT21__DISP2_PIN8		573
+MX51_PAD_DISP1_DAT22__BOOT_LPB_FREQ0		574
+MX51_PAD_DISP1_DAT22__DISP1_DAT22		575
+MX51_PAD_DISP1_DAT22__DISP2_D0_CS		576
+MX51_PAD_DISP1_DAT22__DISP2_DAT16		577
+MX51_PAD_DISP1_DAT23__BOOT_LPB_FREQ1		578
+MX51_PAD_DISP1_DAT23__DISP1_DAT23		579
+MX51_PAD_DISP1_DAT23__DISP2_D1_CS		580
+MX51_PAD_DISP1_DAT23__DISP2_DAT17		581
+MX51_PAD_DISP1_DAT23__DISP2_SER_CS		582
+MX51_PAD_DI1_PIN3__DI1_PIN3			583
+MX51_PAD_DI1_PIN2__DI1_PIN2			584
+MX51_PAD_DI_GP2__DISP1_SER_CLK			585
+MX51_PAD_DI_GP2__DISP2_WAIT			586
+MX51_PAD_DI_GP3__CSI1_DATA_EN			587
+MX51_PAD_DI_GP3__DISP1_SER_DIO			588
+MX51_PAD_DI_GP3__FEC_TX_ER			589
+MX51_PAD_DI2_PIN4__CSI2_DATA_EN			590
+MX51_PAD_DI2_PIN4__DI2_PIN4			591
+MX51_PAD_DI2_PIN4__FEC_CRS			592
+MX51_PAD_DI2_PIN2__DI2_PIN2			593
+MX51_PAD_DI2_PIN2__FEC_MDC			594
+MX51_PAD_DI2_PIN3__DI2_PIN3			595
+MX51_PAD_DI2_PIN3__FEC_MDIO			596
+MX51_PAD_DI2_DISP_CLK__DI2_DISP_CLK		597
+MX51_PAD_DI2_DISP_CLK__FEC_RDATA1		598
+MX51_PAD_DI_GP4__DI2_PIN15			599
+MX51_PAD_DI_GP4__DISP1_SER_DIN			600
+MX51_PAD_DI_GP4__DISP2_PIN1			601
+MX51_PAD_DI_GP4__FEC_RDATA2			602
+MX51_PAD_DISP2_DAT0__DISP2_DAT0			603
+MX51_PAD_DISP2_DAT0__FEC_RDATA3			604
+MX51_PAD_DISP2_DAT0__KEY_COL6			605
+MX51_PAD_DISP2_DAT0__UART3_RXD			606
+MX51_PAD_DISP2_DAT0__USBH3_CLK			607
+MX51_PAD_DISP2_DAT1__DISP2_DAT1			608
+MX51_PAD_DISP2_DAT1__FEC_RX_ER			609
+MX51_PAD_DISP2_DAT1__KEY_COL7			610
+MX51_PAD_DISP2_DAT1__UART3_TXD			611
+MX51_PAD_DISP2_DAT1__USBH3_DIR			612
+MX51_PAD_DISP2_DAT2__DISP2_DAT2			613
+MX51_PAD_DISP2_DAT3__DISP2_DAT3			614
+MX51_PAD_DISP2_DAT4__DISP2_DAT4			615
+MX51_PAD_DISP2_DAT5__DISP2_DAT5			616
+MX51_PAD_DISP2_DAT6__DISP2_DAT6			617
+MX51_PAD_DISP2_DAT6__FEC_TDATA1			618
+MX51_PAD_DISP2_DAT6__GPIO1_19			619
+MX51_PAD_DISP2_DAT6__KEY_ROW4			620
+MX51_PAD_DISP2_DAT6__USBH3_STP			621
+MX51_PAD_DISP2_DAT7__DISP2_DAT7			622
+MX51_PAD_DISP2_DAT7__FEC_TDATA2			623
+MX51_PAD_DISP2_DAT7__GPIO1_29			624
+MX51_PAD_DISP2_DAT7__KEY_ROW5			625
+MX51_PAD_DISP2_DAT7__USBH3_NXT			626
+MX51_PAD_DISP2_DAT8__DISP2_DAT8			627
+MX51_PAD_DISP2_DAT8__FEC_TDATA3			628
+MX51_PAD_DISP2_DAT8__GPIO1_30			629
+MX51_PAD_DISP2_DAT8__KEY_ROW6			630
+MX51_PAD_DISP2_DAT8__USBH3_DATA0		631
+MX51_PAD_DISP2_DAT9__AUD6_RXC			632
+MX51_PAD_DISP2_DAT9__DISP2_DAT9			633
+MX51_PAD_DISP2_DAT9__FEC_TX_EN			634
+MX51_PAD_DISP2_DAT9__GPIO1_31			635
+MX51_PAD_DISP2_DAT9__USBH3_DATA1		636
+MX51_PAD_DISP2_DAT10__DISP2_DAT10		637
+MX51_PAD_DISP2_DAT10__DISP2_SER_CS		638
+MX51_PAD_DISP2_DAT10__FEC_COL			639
+MX51_PAD_DISP2_DAT10__KEY_ROW7			640
+MX51_PAD_DISP2_DAT10__USBH3_DATA2		641
+MX51_PAD_DISP2_DAT11__AUD6_TXD			642
+MX51_PAD_DISP2_DAT11__DISP2_DAT11		643
+MX51_PAD_DISP2_DAT11__FEC_RX_CLK		644
+MX51_PAD_DISP2_DAT11__GPIO1_10			645
+MX51_PAD_DISP2_DAT11__USBH3_DATA3		646
+MX51_PAD_DISP2_DAT12__AUD6_RXD			647
+MX51_PAD_DISP2_DAT12__DISP2_DAT12		648
+MX51_PAD_DISP2_DAT12__FEC_RX_DV			649
+MX51_PAD_DISP2_DAT12__USBH3_DATA4		650
+MX51_PAD_DISP2_DAT13__AUD6_TXC			651
+MX51_PAD_DISP2_DAT13__DISP2_DAT13		652
+MX51_PAD_DISP2_DAT13__FEC_TX_CLK		653
+MX51_PAD_DISP2_DAT13__USBH3_DATA5		654
+MX51_PAD_DISP2_DAT14__AUD6_TXFS			655
+MX51_PAD_DISP2_DAT14__DISP2_DAT14		656
+MX51_PAD_DISP2_DAT14__FEC_RDATA0		657
+MX51_PAD_DISP2_DAT14__USBH3_DATA6		658
+MX51_PAD_DISP2_DAT15__AUD6_RXFS			659
+MX51_PAD_DISP2_DAT15__DISP1_SER_CS		660
+MX51_PAD_DISP2_DAT15__DISP2_DAT15		661
+MX51_PAD_DISP2_DAT15__FEC_TDATA0		662
+MX51_PAD_DISP2_DAT15__USBH3_DATA7		663
+MX51_PAD_SD1_CMD__AUD5_RXFS			664
+MX51_PAD_SD1_CMD__CSPI_MOSI			665
+MX51_PAD_SD1_CMD__SD1_CMD			666
+MX51_PAD_SD1_CLK__AUD5_RXC			667
+MX51_PAD_SD1_CLK__CSPI_SCLK			668
+MX51_PAD_SD1_CLK__SD1_CLK			669
+MX51_PAD_SD1_DATA0__AUD5_TXD			670
+MX51_PAD_SD1_DATA0__CSPI_MISO			671
+MX51_PAD_SD1_DATA0__SD1_DATA0			672
+MX51_PAD_EIM_DA0__EIM_DA0			673
+MX51_PAD_EIM_DA1__EIM_DA1			674
+MX51_PAD_EIM_DA2__EIM_DA2			675
+MX51_PAD_EIM_DA3__EIM_DA3			676
+MX51_PAD_SD1_DATA1__AUD5_RXD			677
+MX51_PAD_SD1_DATA1__SD1_DATA1			678
+MX51_PAD_EIM_DA4__EIM_DA4			679
+MX51_PAD_EIM_DA5__EIM_DA5			680
+MX51_PAD_EIM_DA6__EIM_DA6			681
+MX51_PAD_EIM_DA7__EIM_DA7			682
+MX51_PAD_SD1_DATA2__AUD5_TXC			683
+MX51_PAD_SD1_DATA2__SD1_DATA2			684
+MX51_PAD_EIM_DA10__EIM_DA10			685
+MX51_PAD_EIM_DA11__EIM_DA11			686
+MX51_PAD_EIM_DA8__EIM_DA8			687
+MX51_PAD_EIM_DA9__EIM_DA9			688
+MX51_PAD_SD1_DATA3__AUD5_TXFS			689
+MX51_PAD_SD1_DATA3__CSPI_SS1			690
+MX51_PAD_SD1_DATA3__SD1_DATA3			691
+MX51_PAD_GPIO1_0__CSPI_SS2			692
+MX51_PAD_GPIO1_0__GPIO1_0			693
+MX51_PAD_GPIO1_0__SD1_CD			694
+MX51_PAD_GPIO1_1__CSPI_MISO			695
+MX51_PAD_GPIO1_1__GPIO1_1			696
+MX51_PAD_GPIO1_1__SD1_WP			697
+MX51_PAD_EIM_DA12__EIM_DA12			698
+MX51_PAD_EIM_DA13__EIM_DA13			699
+MX51_PAD_EIM_DA14__EIM_DA14			700
+MX51_PAD_EIM_DA15__EIM_DA15			701
+MX51_PAD_SD2_CMD__CSPI_MOSI			702
+MX51_PAD_SD2_CMD__I2C1_SCL			703
+MX51_PAD_SD2_CMD__SD2_CMD			704
+MX51_PAD_SD2_CLK__CSPI_SCLK			705
+MX51_PAD_SD2_CLK__I2C1_SDA			706
+MX51_PAD_SD2_CLK__SD2_CLK			707
+MX51_PAD_SD2_DATA0__CSPI_MISO			708
+MX51_PAD_SD2_DATA0__SD1_DAT4			709
+MX51_PAD_SD2_DATA0__SD2_DATA0			710
+MX51_PAD_SD2_DATA1__SD1_DAT5			711
+MX51_PAD_SD2_DATA1__SD2_DATA1			712
+MX51_PAD_SD2_DATA1__USBH3_H2_DP			713
+MX51_PAD_SD2_DATA2__SD1_DAT6			714
+MX51_PAD_SD2_DATA2__SD2_DATA2			715
+MX51_PAD_SD2_DATA2__USBH3_H2_DM			716
+MX51_PAD_SD2_DATA3__CSPI_SS2			717
+MX51_PAD_SD2_DATA3__SD1_DAT7			718
+MX51_PAD_SD2_DATA3__SD2_DATA3			719
+MX51_PAD_GPIO1_2__CCM_OUT_2			720
+MX51_PAD_GPIO1_2__GPIO1_2			721
+MX51_PAD_GPIO1_2__I2C2_SCL			722
+MX51_PAD_GPIO1_2__PLL1_BYP			723
+MX51_PAD_GPIO1_2__PWM1_PWMO			724
+MX51_PAD_GPIO1_3__GPIO1_3			725
+MX51_PAD_GPIO1_3__I2C2_SDA			726
+MX51_PAD_GPIO1_3__PLL2_BYP			727
+MX51_PAD_GPIO1_3__PWM2_PWMO			728
+MX51_PAD_PMIC_INT_REQ__PMIC_INT_REQ		729
+MX51_PAD_PMIC_INT_REQ__PMIC_PMU_IRQ_B		730
+MX51_PAD_GPIO1_4__DISP2_EXT_CLK			731
+MX51_PAD_GPIO1_4__EIM_RDY			732
+MX51_PAD_GPIO1_4__GPIO1_4			733
+MX51_PAD_GPIO1_4__WDOG1_WDOG_B			734
+MX51_PAD_GPIO1_5__CSI2_MCLK			735
+MX51_PAD_GPIO1_5__DISP2_PIN16			736
+MX51_PAD_GPIO1_5__GPIO1_5			737
+MX51_PAD_GPIO1_5__WDOG2_WDOG_B			738
+MX51_PAD_GPIO1_6__DISP2_PIN17			739
+MX51_PAD_GPIO1_6__GPIO1_6			740
+MX51_PAD_GPIO1_6__REF_EN_B			741
+MX51_PAD_GPIO1_7__CCM_OUT_0			742
+MX51_PAD_GPIO1_7__GPIO1_7			743
+MX51_PAD_GPIO1_7__SD2_WP			744
+MX51_PAD_GPIO1_7__SPDIF_OUT1			745
+MX51_PAD_GPIO1_8__CSI2_DATA_EN			746
+MX51_PAD_GPIO1_8__GPIO1_8			747
+MX51_PAD_GPIO1_8__SD2_CD			748
+MX51_PAD_GPIO1_8__USBH3_PWR			749
+MX51_PAD_GPIO1_9__CCM_OUT_1			750
+MX51_PAD_GPIO1_9__DISP2_D1_CS			751
+MX51_PAD_GPIO1_9__DISP2_SER_CS			752
+MX51_PAD_GPIO1_9__GPIO1_9			753
+MX51_PAD_GPIO1_9__SD2_LCTL			754
+MX51_PAD_GPIO1_9__USBH3_OC			755
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx53-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx53-pinctrl.txt
new file mode 100644
index 0000000..ca85ca4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx53-pinctrl.txt
@@ -0,0 +1,1202 @@
+* Freescale IMX53 IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
+and usage.
+
+Required properties:
+- compatible: "fsl,imx53-iomuxc"
+- fsl,pins: two integers array, represents a group of pins mux and config
+  setting. The format is fsl,pins = <PIN_FUNC_ID CONFIG>, PIN_FUNC_ID is a
+  pin working on a specific function, CONFIG is the pad setting value like
+  pull-up for this pin. Please refer to imx53 datasheet for the valid pad
+  config settings.
+
+CONFIG bits definition:
+PAD_CTL_HVE			(1 << 13)
+PAD_CTL_HYS			(1 << 8)
+PAD_CTL_PKE			(1 << 7)
+PAD_CTL_PUE			(1 << 6)
+PAD_CTL_PUS_100K_DOWN		(0 << 4)
+PAD_CTL_PUS_47K_UP		(1 << 4)
+PAD_CTL_PUS_100K_UP		(2 << 4)
+PAD_CTL_PUS_22K_UP		(3 << 4)
+PAD_CTL_ODE			(1 << 3)
+PAD_CTL_DSE_LOW			(0 << 1)
+PAD_CTL_DSE_MED			(1 << 1)
+PAD_CTL_DSE_HIGH		(2 << 1)
+PAD_CTL_DSE_MAX			(3 << 1)
+PAD_CTL_SRE_FAST		(1 << 0)
+PAD_CTL_SRE_SLOW		(0 << 0)
+
+See below for available PIN_FUNC_ID for imx53:
+MX53_PAD_GPIO_19__KPP_COL_5				0
+MX53_PAD_GPIO_19__GPIO4_5				1
+MX53_PAD_GPIO_19__CCM_CLKO				2
+MX53_PAD_GPIO_19__SPDIF_OUT1				3
+MX53_PAD_GPIO_19__RTC_CE_RTC_EXT_TRIG2			4
+MX53_PAD_GPIO_19__ECSPI1_RDY				5
+MX53_PAD_GPIO_19__FEC_TDATA_3				6
+MX53_PAD_GPIO_19__SRC_INT_BOOT				7
+MX53_PAD_KEY_COL0__KPP_COL_0				8
+MX53_PAD_KEY_COL0__GPIO4_6				9
+MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC			10
+MX53_PAD_KEY_COL0__UART4_TXD_MUX			11
+MX53_PAD_KEY_COL0__ECSPI1_SCLK				12
+MX53_PAD_KEY_COL0__FEC_RDATA_3				13
+MX53_PAD_KEY_COL0__SRC_ANY_PU_RST			14
+MX53_PAD_KEY_ROW0__KPP_ROW_0				15
+MX53_PAD_KEY_ROW0__GPIO4_7				16
+MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD			17
+MX53_PAD_KEY_ROW0__UART4_RXD_MUX			18
+MX53_PAD_KEY_ROW0__ECSPI1_MOSI				19
+MX53_PAD_KEY_ROW0__FEC_TX_ER				20
+MX53_PAD_KEY_COL1__KPP_COL_1				21
+MX53_PAD_KEY_COL1__GPIO4_8				22
+MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS			23
+MX53_PAD_KEY_COL1__UART5_TXD_MUX			24
+MX53_PAD_KEY_COL1__ECSPI1_MISO				25
+MX53_PAD_KEY_COL1__FEC_RX_CLK				26
+MX53_PAD_KEY_COL1__USBPHY1_TXREADY			27
+MX53_PAD_KEY_ROW1__KPP_ROW_1				28
+MX53_PAD_KEY_ROW1__GPIO4_9				29
+MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD			30
+MX53_PAD_KEY_ROW1__UART5_RXD_MUX			31
+MX53_PAD_KEY_ROW1__ECSPI1_SS0				32
+MX53_PAD_KEY_ROW1__FEC_COL				33
+MX53_PAD_KEY_ROW1__USBPHY1_RXVALID			34
+MX53_PAD_KEY_COL2__KPP_COL_2				35
+MX53_PAD_KEY_COL2__GPIO4_10				36
+MX53_PAD_KEY_COL2__CAN1_TXCAN				37
+MX53_PAD_KEY_COL2__FEC_MDIO				38
+MX53_PAD_KEY_COL2__ECSPI1_SS1				39
+MX53_PAD_KEY_COL2__FEC_RDATA_2				40
+MX53_PAD_KEY_COL2__USBPHY1_RXACTIVE			41
+MX53_PAD_KEY_ROW2__KPP_ROW_2				42
+MX53_PAD_KEY_ROW2__GPIO4_11				43
+MX53_PAD_KEY_ROW2__CAN1_RXCAN				44
+MX53_PAD_KEY_ROW2__FEC_MDC				45
+MX53_PAD_KEY_ROW2__ECSPI1_SS2				46
+MX53_PAD_KEY_ROW2__FEC_TDATA_2				47
+MX53_PAD_KEY_ROW2__USBPHY1_RXERROR			48
+MX53_PAD_KEY_COL3__KPP_COL_3				49
+MX53_PAD_KEY_COL3__GPIO4_12				50
+MX53_PAD_KEY_COL3__USBOH3_H2_DP				51
+MX53_PAD_KEY_COL3__SPDIF_IN1				52
+MX53_PAD_KEY_COL3__I2C2_SCL				53
+MX53_PAD_KEY_COL3__ECSPI1_SS3				54
+MX53_PAD_KEY_COL3__FEC_CRS				55
+MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK			56
+MX53_PAD_KEY_ROW3__KPP_ROW_3				57
+MX53_PAD_KEY_ROW3__GPIO4_13				58
+MX53_PAD_KEY_ROW3__USBOH3_H2_DM				59
+MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK			60
+MX53_PAD_KEY_ROW3__I2C2_SDA				61
+MX53_PAD_KEY_ROW3__OSC32K_32K_OUT			62
+MX53_PAD_KEY_ROW3__CCM_PLL4_BYP				63
+MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0			64
+MX53_PAD_KEY_COL4__KPP_COL_4				65
+MX53_PAD_KEY_COL4__GPIO4_14				66
+MX53_PAD_KEY_COL4__CAN2_TXCAN				67
+MX53_PAD_KEY_COL4__IPU_SISG_4				68
+MX53_PAD_KEY_COL4__UART5_RTS				69
+MX53_PAD_KEY_COL4__USBOH3_USBOTG_OC			70
+MX53_PAD_KEY_COL4__USBPHY1_LINESTATE_1			71
+MX53_PAD_KEY_ROW4__KPP_ROW_4				72
+MX53_PAD_KEY_ROW4__GPIO4_15				73
+MX53_PAD_KEY_ROW4__CAN2_RXCAN				74
+MX53_PAD_KEY_ROW4__IPU_SISG_5				75
+MX53_PAD_KEY_ROW4__UART5_CTS				76
+MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR			77
+MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID			78
+MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK			79
+MX53_PAD_DI0_DISP_CLK__GPIO4_16				80
+MX53_PAD_DI0_DISP_CLK__USBOH3_USBH2_DIR			81
+MX53_PAD_DI0_DISP_CLK__SDMA_DEBUG_CORE_STATE_0		82
+MX53_PAD_DI0_DISP_CLK__EMI_EMI_DEBUG_0			83
+MX53_PAD_DI0_DISP_CLK__USBPHY1_AVALID			84
+MX53_PAD_DI0_PIN15__IPU_DI0_PIN15			85
+MX53_PAD_DI0_PIN15__GPIO4_17				86
+MX53_PAD_DI0_PIN15__AUDMUX_AUD6_TXC			87
+MX53_PAD_DI0_PIN15__SDMA_DEBUG_CORE_STATE_1		88
+MX53_PAD_DI0_PIN15__EMI_EMI_DEBUG_1			89
+MX53_PAD_DI0_PIN15__USBPHY1_BVALID			90
+MX53_PAD_DI0_PIN2__IPU_DI0_PIN2				91
+MX53_PAD_DI0_PIN2__GPIO4_18				92
+MX53_PAD_DI0_PIN2__AUDMUX_AUD6_TXD			93
+MX53_PAD_DI0_PIN2__SDMA_DEBUG_CORE_STATE_2		94
+MX53_PAD_DI0_PIN2__EMI_EMI_DEBUG_2			95
+MX53_PAD_DI0_PIN2__USBPHY1_ENDSESSION			96
+MX53_PAD_DI0_PIN3__IPU_DI0_PIN3				97
+MX53_PAD_DI0_PIN3__GPIO4_19				98
+MX53_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS			99
+MX53_PAD_DI0_PIN3__SDMA_DEBUG_CORE_STATE_3		100
+MX53_PAD_DI0_PIN3__EMI_EMI_DEBUG_3			101
+MX53_PAD_DI0_PIN3__USBPHY1_IDDIG			102
+MX53_PAD_DI0_PIN4__IPU_DI0_PIN4				103
+MX53_PAD_DI0_PIN4__GPIO4_20				104
+MX53_PAD_DI0_PIN4__AUDMUX_AUD6_RXD			105
+MX53_PAD_DI0_PIN4__ESDHC1_WP				106
+MX53_PAD_DI0_PIN4__SDMA_DEBUG_YIELD			107
+MX53_PAD_DI0_PIN4__EMI_EMI_DEBUG_4			108
+MX53_PAD_DI0_PIN4__USBPHY1_HOSTDISCONNECT		109
+MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0			110
+MX53_PAD_DISP0_DAT0__GPIO4_21				111
+MX53_PAD_DISP0_DAT0__CSPI_SCLK				112
+MX53_PAD_DISP0_DAT0__USBOH3_USBH2_DATA_0		113
+MX53_PAD_DISP0_DAT0__SDMA_DEBUG_CORE_RUN		114
+MX53_PAD_DISP0_DAT0__EMI_EMI_DEBUG_5			115
+MX53_PAD_DISP0_DAT0__USBPHY2_TXREADY			116
+MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1			117
+MX53_PAD_DISP0_DAT1__GPIO4_22				118
+MX53_PAD_DISP0_DAT1__CSPI_MOSI				119
+MX53_PAD_DISP0_DAT1__USBOH3_USBH2_DATA_1		120
+MX53_PAD_DISP0_DAT1__SDMA_DEBUG_EVENT_CHANNEL_SEL	121
+MX53_PAD_DISP0_DAT1__EMI_EMI_DEBUG_6			122
+MX53_PAD_DISP0_DAT1__USBPHY2_RXVALID			123
+MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2			124
+MX53_PAD_DISP0_DAT2__GPIO4_23				125
+MX53_PAD_DISP0_DAT2__CSPI_MISO				126
+MX53_PAD_DISP0_DAT2__USBOH3_USBH2_DATA_2		127
+MX53_PAD_DISP0_DAT2__SDMA_DEBUG_MODE			128
+MX53_PAD_DISP0_DAT2__EMI_EMI_DEBUG_7			129
+MX53_PAD_DISP0_DAT2__USBPHY2_RXACTIVE			130
+MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3			131
+MX53_PAD_DISP0_DAT3__GPIO4_24				132
+MX53_PAD_DISP0_DAT3__CSPI_SS0				133
+MX53_PAD_DISP0_DAT3__USBOH3_USBH2_DATA_3		134
+MX53_PAD_DISP0_DAT3__SDMA_DEBUG_BUS_ERROR		135
+MX53_PAD_DISP0_DAT3__EMI_EMI_DEBUG_8			136
+MX53_PAD_DISP0_DAT3__USBPHY2_RXERROR			137
+MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4			138
+MX53_PAD_DISP0_DAT4__GPIO4_25				139
+MX53_PAD_DISP0_DAT4__CSPI_SS1				140
+MX53_PAD_DISP0_DAT4__USBOH3_USBH2_DATA_4		141
+MX53_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB			142
+MX53_PAD_DISP0_DAT4__EMI_EMI_DEBUG_9			143
+MX53_PAD_DISP0_DAT4__USBPHY2_SIECLOCK			144
+MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5			145
+MX53_PAD_DISP0_DAT5__GPIO4_26				146
+MX53_PAD_DISP0_DAT5__CSPI_SS2				147
+MX53_PAD_DISP0_DAT5__USBOH3_USBH2_DATA_5		148
+MX53_PAD_DISP0_DAT5__SDMA_DEBUG_MATCHED_DMBUS		149
+MX53_PAD_DISP0_DAT5__EMI_EMI_DEBUG_10			150
+MX53_PAD_DISP0_DAT5__USBPHY2_LINESTATE_0		151
+MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6			152
+MX53_PAD_DISP0_DAT6__GPIO4_27				153
+MX53_PAD_DISP0_DAT6__CSPI_SS3				154
+MX53_PAD_DISP0_DAT6__USBOH3_USBH2_DATA_6		155
+MX53_PAD_DISP0_DAT6__SDMA_DEBUG_RTBUFFER_WRITE		156
+MX53_PAD_DISP0_DAT6__EMI_EMI_DEBUG_11			157
+MX53_PAD_DISP0_DAT6__USBPHY2_LINESTATE_1		158
+MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7			159
+MX53_PAD_DISP0_DAT7__GPIO4_28				160
+MX53_PAD_DISP0_DAT7__CSPI_RDY				161
+MX53_PAD_DISP0_DAT7__USBOH3_USBH2_DATA_7		162
+MX53_PAD_DISP0_DAT7__SDMA_DEBUG_EVENT_CHANNEL_0		163
+MX53_PAD_DISP0_DAT7__EMI_EMI_DEBUG_12			164
+MX53_PAD_DISP0_DAT7__USBPHY2_VBUSVALID			165
+MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8			166
+MX53_PAD_DISP0_DAT8__GPIO4_29				167
+MX53_PAD_DISP0_DAT8__PWM1_PWMO				168
+MX53_PAD_DISP0_DAT8__WDOG1_WDOG_B			169
+MX53_PAD_DISP0_DAT8__SDMA_DEBUG_EVENT_CHANNEL_1		170
+MX53_PAD_DISP0_DAT8__EMI_EMI_DEBUG_13			171
+MX53_PAD_DISP0_DAT8__USBPHY2_AVALID			172
+MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9			173
+MX53_PAD_DISP0_DAT9__GPIO4_30				174
+MX53_PAD_DISP0_DAT9__PWM2_PWMO				175
+MX53_PAD_DISP0_DAT9__WDOG2_WDOG_B			176
+MX53_PAD_DISP0_DAT9__SDMA_DEBUG_EVENT_CHANNEL_2		177
+MX53_PAD_DISP0_DAT9__EMI_EMI_DEBUG_14			178
+MX53_PAD_DISP0_DAT9__USBPHY2_VSTATUS_0			179
+MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10			180
+MX53_PAD_DISP0_DAT10__GPIO4_31				181
+MX53_PAD_DISP0_DAT10__USBOH3_USBH2_STP			182
+MX53_PAD_DISP0_DAT10__SDMA_DEBUG_EVENT_CHANNEL_3	183
+MX53_PAD_DISP0_DAT10__EMI_EMI_DEBUG_15			184
+MX53_PAD_DISP0_DAT10__USBPHY2_VSTATUS_1			185
+MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11			186
+MX53_PAD_DISP0_DAT11__GPIO5_5				187
+MX53_PAD_DISP0_DAT11__USBOH3_USBH2_NXT			188
+MX53_PAD_DISP0_DAT11__SDMA_DEBUG_EVENT_CHANNEL_4	189
+MX53_PAD_DISP0_DAT11__EMI_EMI_DEBUG_16			190
+MX53_PAD_DISP0_DAT11__USBPHY2_VSTATUS_2			191
+MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12			192
+MX53_PAD_DISP0_DAT12__GPIO5_6				193
+MX53_PAD_DISP0_DAT12__USBOH3_USBH2_CLK			194
+MX53_PAD_DISP0_DAT12__SDMA_DEBUG_EVENT_CHANNEL_5	195
+MX53_PAD_DISP0_DAT12__EMI_EMI_DEBUG_17			196
+MX53_PAD_DISP0_DAT12__USBPHY2_VSTATUS_3			197
+MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13			198
+MX53_PAD_DISP0_DAT13__GPIO5_7				199
+MX53_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS			200
+MX53_PAD_DISP0_DAT13__SDMA_DEBUG_EVT_CHN_LINES_0	201
+MX53_PAD_DISP0_DAT13__EMI_EMI_DEBUG_18			202
+MX53_PAD_DISP0_DAT13__USBPHY2_VSTATUS_4			203
+MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14			204
+MX53_PAD_DISP0_DAT14__GPIO5_8				205
+MX53_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC			206
+MX53_PAD_DISP0_DAT14__SDMA_DEBUG_EVT_CHN_LINES_1	207
+MX53_PAD_DISP0_DAT14__EMI_EMI_DEBUG_19			208
+MX53_PAD_DISP0_DAT14__USBPHY2_VSTATUS_5			209
+MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15			210
+MX53_PAD_DISP0_DAT15__GPIO5_9				211
+MX53_PAD_DISP0_DAT15__ECSPI1_SS1			212
+MX53_PAD_DISP0_DAT15__ECSPI2_SS1			213
+MX53_PAD_DISP0_DAT15__SDMA_DEBUG_EVT_CHN_LINES_2	214
+MX53_PAD_DISP0_DAT15__EMI_EMI_DEBUG_20			215
+MX53_PAD_DISP0_DAT15__USBPHY2_VSTATUS_6			216
+MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16			217
+MX53_PAD_DISP0_DAT16__GPIO5_10				218
+MX53_PAD_DISP0_DAT16__ECSPI2_MOSI			219
+MX53_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC			220
+MX53_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0			221
+MX53_PAD_DISP0_DAT16__SDMA_DEBUG_EVT_CHN_LINES_3	222
+MX53_PAD_DISP0_DAT16__EMI_EMI_DEBUG_21			223
+MX53_PAD_DISP0_DAT16__USBPHY2_VSTATUS_7			224
+MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17			225
+MX53_PAD_DISP0_DAT17__GPIO5_11				226
+MX53_PAD_DISP0_DAT17__ECSPI2_MISO			227
+MX53_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD			228
+MX53_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1			229
+MX53_PAD_DISP0_DAT17__SDMA_DEBUG_EVT_CHN_LINES_4	230
+MX53_PAD_DISP0_DAT17__EMI_EMI_DEBUG_22			231
+MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18			232
+MX53_PAD_DISP0_DAT18__GPIO5_12				233
+MX53_PAD_DISP0_DAT18__ECSPI2_SS0			234
+MX53_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS			235
+MX53_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS			236
+MX53_PAD_DISP0_DAT18__SDMA_DEBUG_EVT_CHN_LINES_5	237
+MX53_PAD_DISP0_DAT18__EMI_EMI_DEBUG_23			238
+MX53_PAD_DISP0_DAT18__EMI_WEIM_CS_2			239
+MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19			240
+MX53_PAD_DISP0_DAT19__GPIO5_13				241
+MX53_PAD_DISP0_DAT19__ECSPI2_SCLK			242
+MX53_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD			243
+MX53_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC			244
+MX53_PAD_DISP0_DAT19__SDMA_DEBUG_EVT_CHN_LINES_6	245
+MX53_PAD_DISP0_DAT19__EMI_EMI_DEBUG_24			246
+MX53_PAD_DISP0_DAT19__EMI_WEIM_CS_3			247
+MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20			248
+MX53_PAD_DISP0_DAT20__GPIO5_14				249
+MX53_PAD_DISP0_DAT20__ECSPI1_SCLK			250
+MX53_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC			251
+MX53_PAD_DISP0_DAT20__SDMA_DEBUG_EVT_CHN_LINES_7	252
+MX53_PAD_DISP0_DAT20__EMI_EMI_DEBUG_25			253
+MX53_PAD_DISP0_DAT20__SATA_PHY_TDI			254
+MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21			255
+MX53_PAD_DISP0_DAT21__GPIO5_15				256
+MX53_PAD_DISP0_DAT21__ECSPI1_MOSI			257
+MX53_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD			258
+MX53_PAD_DISP0_DAT21__SDMA_DEBUG_BUS_DEVICE_0		259
+MX53_PAD_DISP0_DAT21__EMI_EMI_DEBUG_26			260
+MX53_PAD_DISP0_DAT21__SATA_PHY_TDO			261
+MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22			262
+MX53_PAD_DISP0_DAT22__GPIO5_16				263
+MX53_PAD_DISP0_DAT22__ECSPI1_MISO			264
+MX53_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS			265
+MX53_PAD_DISP0_DAT22__SDMA_DEBUG_BUS_DEVICE_1		266
+MX53_PAD_DISP0_DAT22__EMI_EMI_DEBUG_27			267
+MX53_PAD_DISP0_DAT22__SATA_PHY_TCK			268
+MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23			269
+MX53_PAD_DISP0_DAT23__GPIO5_17				270
+MX53_PAD_DISP0_DAT23__ECSPI1_SS0			271
+MX53_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD			272
+MX53_PAD_DISP0_DAT23__SDMA_DEBUG_BUS_DEVICE_2		273
+MX53_PAD_DISP0_DAT23__EMI_EMI_DEBUG_28			274
+MX53_PAD_DISP0_DAT23__SATA_PHY_TMS			275
+MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK			276
+MX53_PAD_CSI0_PIXCLK__GPIO5_18				277
+MX53_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0			278
+MX53_PAD_CSI0_PIXCLK__EMI_EMI_DEBUG_29			279
+MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC			280
+MX53_PAD_CSI0_MCLK__GPIO5_19				281
+MX53_PAD_CSI0_MCLK__CCM_CSI0_MCLK			282
+MX53_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1			283
+MX53_PAD_CSI0_MCLK__EMI_EMI_DEBUG_30			284
+MX53_PAD_CSI0_MCLK__TPIU_TRCTL				285
+MX53_PAD_CSI0_DATA_EN__IPU_CSI0_DATA_EN			286
+MX53_PAD_CSI0_DATA_EN__GPIO5_20				287
+MX53_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2			288
+MX53_PAD_CSI0_DATA_EN__EMI_EMI_DEBUG_31			289
+MX53_PAD_CSI0_DATA_EN__TPIU_TRCLK			290
+MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC			291
+MX53_PAD_CSI0_VSYNC__GPIO5_21				292
+MX53_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3			293
+MX53_PAD_CSI0_VSYNC__EMI_EMI_DEBUG_32			294
+MX53_PAD_CSI0_VSYNC__TPIU_TRACE_0			295
+MX53_PAD_CSI0_DAT4__IPU_CSI0_D_4			296
+MX53_PAD_CSI0_DAT4__GPIO5_22				297
+MX53_PAD_CSI0_DAT4__KPP_COL_5				298
+MX53_PAD_CSI0_DAT4__ECSPI1_SCLK				299
+MX53_PAD_CSI0_DAT4__USBOH3_USBH3_STP			300
+MX53_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC			301
+MX53_PAD_CSI0_DAT4__EMI_EMI_DEBUG_33			302
+MX53_PAD_CSI0_DAT4__TPIU_TRACE_1			303
+MX53_PAD_CSI0_DAT5__IPU_CSI0_D_5			304
+MX53_PAD_CSI0_DAT5__GPIO5_23				305
+MX53_PAD_CSI0_DAT5__KPP_ROW_5				306
+MX53_PAD_CSI0_DAT5__ECSPI1_MOSI				307
+MX53_PAD_CSI0_DAT5__USBOH3_USBH3_NXT			308
+MX53_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD			309
+MX53_PAD_CSI0_DAT5__EMI_EMI_DEBUG_34			310
+MX53_PAD_CSI0_DAT5__TPIU_TRACE_2			311
+MX53_PAD_CSI0_DAT6__IPU_CSI0_D_6			312
+MX53_PAD_CSI0_DAT6__GPIO5_24				313
+MX53_PAD_CSI0_DAT6__KPP_COL_6				314
+MX53_PAD_CSI0_DAT6__ECSPI1_MISO				315
+MX53_PAD_CSI0_DAT6__USBOH3_USBH3_CLK			316
+MX53_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS			317
+MX53_PAD_CSI0_DAT6__EMI_EMI_DEBUG_35			318
+MX53_PAD_CSI0_DAT6__TPIU_TRACE_3			319
+MX53_PAD_CSI0_DAT7__IPU_CSI0_D_7			320
+MX53_PAD_CSI0_DAT7__GPIO5_25				321
+MX53_PAD_CSI0_DAT7__KPP_ROW_6				322
+MX53_PAD_CSI0_DAT7__ECSPI1_SS0				323
+MX53_PAD_CSI0_DAT7__USBOH3_USBH3_DIR			324
+MX53_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD			325
+MX53_PAD_CSI0_DAT7__EMI_EMI_DEBUG_36			326
+MX53_PAD_CSI0_DAT7__TPIU_TRACE_4			327
+MX53_PAD_CSI0_DAT8__IPU_CSI0_D_8			328
+MX53_PAD_CSI0_DAT8__GPIO5_26				329
+MX53_PAD_CSI0_DAT8__KPP_COL_7				330
+MX53_PAD_CSI0_DAT8__ECSPI2_SCLK				331
+MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC			332
+MX53_PAD_CSI0_DAT8__I2C1_SDA				333
+MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37			334
+MX53_PAD_CSI0_DAT8__TPIU_TRACE_5			335
+MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9			336
+MX53_PAD_CSI0_DAT9__GPIO5_27				337
+MX53_PAD_CSI0_DAT9__KPP_ROW_7				338
+MX53_PAD_CSI0_DAT9__ECSPI2_MOSI				339
+MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR			340
+MX53_PAD_CSI0_DAT9__I2C1_SCL				341
+MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38			342
+MX53_PAD_CSI0_DAT9__TPIU_TRACE_6			343
+MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10			344
+MX53_PAD_CSI0_DAT10__GPIO5_28				345
+MX53_PAD_CSI0_DAT10__UART1_TXD_MUX			346
+MX53_PAD_CSI0_DAT10__ECSPI2_MISO			347
+MX53_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC			348
+MX53_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4			349
+MX53_PAD_CSI0_DAT10__EMI_EMI_DEBUG_39			350
+MX53_PAD_CSI0_DAT10__TPIU_TRACE_7			351
+MX53_PAD_CSI0_DAT11__IPU_CSI0_D_11			352
+MX53_PAD_CSI0_DAT11__GPIO5_29				353
+MX53_PAD_CSI0_DAT11__UART1_RXD_MUX			354
+MX53_PAD_CSI0_DAT11__ECSPI2_SS0				355
+MX53_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS			356
+MX53_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5			357
+MX53_PAD_CSI0_DAT11__EMI_EMI_DEBUG_40			358
+MX53_PAD_CSI0_DAT11__TPIU_TRACE_8			359
+MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12			360
+MX53_PAD_CSI0_DAT12__GPIO5_30				361
+MX53_PAD_CSI0_DAT12__UART4_TXD_MUX			362
+MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0		363
+MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6			364
+MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41			365
+MX53_PAD_CSI0_DAT12__TPIU_TRACE_9			366
+MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13			367
+MX53_PAD_CSI0_DAT13__GPIO5_31				368
+MX53_PAD_CSI0_DAT13__UART4_RXD_MUX			369
+MX53_PAD_CSI0_DAT13__USBOH3_USBH3_DATA_1		370
+MX53_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7			371
+MX53_PAD_CSI0_DAT13__EMI_EMI_DEBUG_42			372
+MX53_PAD_CSI0_DAT13__TPIU_TRACE_10			373
+MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14			374
+MX53_PAD_CSI0_DAT14__GPIO6_0				375
+MX53_PAD_CSI0_DAT14__UART5_TXD_MUX			376
+MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2		377
+MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8			378
+MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43			379
+MX53_PAD_CSI0_DAT14__TPIU_TRACE_11			380
+MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15			381
+MX53_PAD_CSI0_DAT15__GPIO6_1				382
+MX53_PAD_CSI0_DAT15__UART5_RXD_MUX			383
+MX53_PAD_CSI0_DAT15__USBOH3_USBH3_DATA_3		384
+MX53_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9			385
+MX53_PAD_CSI0_DAT15__EMI_EMI_DEBUG_44			386
+MX53_PAD_CSI0_DAT15__TPIU_TRACE_12			387
+MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16			388
+MX53_PAD_CSI0_DAT16__GPIO6_2				389
+MX53_PAD_CSI0_DAT16__UART4_RTS				390
+MX53_PAD_CSI0_DAT16__USBOH3_USBH3_DATA_4		391
+MX53_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10			392
+MX53_PAD_CSI0_DAT16__EMI_EMI_DEBUG_45			393
+MX53_PAD_CSI0_DAT16__TPIU_TRACE_13			394
+MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17			395
+MX53_PAD_CSI0_DAT17__GPIO6_3				396
+MX53_PAD_CSI0_DAT17__UART4_CTS				397
+MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5		398
+MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11			399
+MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46			400
+MX53_PAD_CSI0_DAT17__TPIU_TRACE_14			401
+MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18			402
+MX53_PAD_CSI0_DAT18__GPIO6_4				403
+MX53_PAD_CSI0_DAT18__UART5_RTS				404
+MX53_PAD_CSI0_DAT18__USBOH3_USBH3_DATA_6		405
+MX53_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12			406
+MX53_PAD_CSI0_DAT18__EMI_EMI_DEBUG_47			407
+MX53_PAD_CSI0_DAT18__TPIU_TRACE_15			408
+MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19			409
+MX53_PAD_CSI0_DAT19__GPIO6_5				410
+MX53_PAD_CSI0_DAT19__UART5_CTS				411
+MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7		412
+MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13			413
+MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48			414
+MX53_PAD_CSI0_DAT19__USBPHY2_BISTOK			415
+MX53_PAD_EIM_A25__EMI_WEIM_A_25				416
+MX53_PAD_EIM_A25__GPIO5_2				417
+MX53_PAD_EIM_A25__ECSPI2_RDY				418
+MX53_PAD_EIM_A25__IPU_DI1_PIN12				419
+MX53_PAD_EIM_A25__CSPI_SS1				420
+MX53_PAD_EIM_A25__IPU_DI0_D1_CS				421
+MX53_PAD_EIM_A25__USBPHY1_BISTOK			422
+MX53_PAD_EIM_EB2__EMI_WEIM_EB_2				423
+MX53_PAD_EIM_EB2__GPIO2_30				424
+MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK			425
+MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS			426
+MX53_PAD_EIM_EB2__ECSPI1_SS0				427
+MX53_PAD_EIM_EB2__I2C2_SCL				428
+MX53_PAD_EIM_D16__EMI_WEIM_D_16				429
+MX53_PAD_EIM_D16__GPIO3_16				430
+MX53_PAD_EIM_D16__IPU_DI0_PIN5				431
+MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK			432
+MX53_PAD_EIM_D16__ECSPI1_SCLK				433
+MX53_PAD_EIM_D16__I2C2_SDA				434
+MX53_PAD_EIM_D17__EMI_WEIM_D_17				435
+MX53_PAD_EIM_D17__GPIO3_17				436
+MX53_PAD_EIM_D17__IPU_DI0_PIN6				437
+MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN			438
+MX53_PAD_EIM_D17__ECSPI1_MISO				439
+MX53_PAD_EIM_D17__I2C3_SCL				440
+MX53_PAD_EIM_D18__EMI_WEIM_D_18				441
+MX53_PAD_EIM_D18__GPIO3_18				442
+MX53_PAD_EIM_D18__IPU_DI0_PIN7				443
+MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO			444
+MX53_PAD_EIM_D18__ECSPI1_MOSI				445
+MX53_PAD_EIM_D18__I2C3_SDA				446
+MX53_PAD_EIM_D18__IPU_DI1_D0_CS				447
+MX53_PAD_EIM_D19__EMI_WEIM_D_19				448
+MX53_PAD_EIM_D19__GPIO3_19				449
+MX53_PAD_EIM_D19__IPU_DI0_PIN8				450
+MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS			451
+MX53_PAD_EIM_D19__ECSPI1_SS1				452
+MX53_PAD_EIM_D19__EPIT1_EPITO				453
+MX53_PAD_EIM_D19__UART1_CTS				454
+MX53_PAD_EIM_D19__USBOH3_USBH2_OC			455
+MX53_PAD_EIM_D20__EMI_WEIM_D_20				456
+MX53_PAD_EIM_D20__GPIO3_20				457
+MX53_PAD_EIM_D20__IPU_DI0_PIN16				458
+MX53_PAD_EIM_D20__IPU_SER_DISP0_CS			459
+MX53_PAD_EIM_D20__CSPI_SS0				460
+MX53_PAD_EIM_D20__EPIT2_EPITO				461
+MX53_PAD_EIM_D20__UART1_RTS				462
+MX53_PAD_EIM_D20__USBOH3_USBH2_PWR			463
+MX53_PAD_EIM_D21__EMI_WEIM_D_21				464
+MX53_PAD_EIM_D21__GPIO3_21				465
+MX53_PAD_EIM_D21__IPU_DI0_PIN17				466
+MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK			467
+MX53_PAD_EIM_D21__CSPI_SCLK				468
+MX53_PAD_EIM_D21__I2C1_SCL				469
+MX53_PAD_EIM_D21__USBOH3_USBOTG_OC			470
+MX53_PAD_EIM_D22__EMI_WEIM_D_22				471
+MX53_PAD_EIM_D22__GPIO3_22				472
+MX53_PAD_EIM_D22__IPU_DI0_PIN1				473
+MX53_PAD_EIM_D22__IPU_DISPB0_SER_DIN			474
+MX53_PAD_EIM_D22__CSPI_MISO				475
+MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR			476
+MX53_PAD_EIM_D23__EMI_WEIM_D_23				477
+MX53_PAD_EIM_D23__GPIO3_23				478
+MX53_PAD_EIM_D23__UART3_CTS				479
+MX53_PAD_EIM_D23__UART1_DCD				480
+MX53_PAD_EIM_D23__IPU_DI0_D0_CS				481
+MX53_PAD_EIM_D23__IPU_DI1_PIN2				482
+MX53_PAD_EIM_D23__IPU_CSI1_DATA_EN			483
+MX53_PAD_EIM_D23__IPU_DI1_PIN14				484
+MX53_PAD_EIM_EB3__EMI_WEIM_EB_3				485
+MX53_PAD_EIM_EB3__GPIO2_31				486
+MX53_PAD_EIM_EB3__UART3_RTS				487
+MX53_PAD_EIM_EB3__UART1_RI				488
+MX53_PAD_EIM_EB3__IPU_DI1_PIN3				489
+MX53_PAD_EIM_EB3__IPU_CSI1_HSYNC			490
+MX53_PAD_EIM_EB3__IPU_DI1_PIN16				491
+MX53_PAD_EIM_D24__EMI_WEIM_D_24				492
+MX53_PAD_EIM_D24__GPIO3_24				493
+MX53_PAD_EIM_D24__UART3_TXD_MUX				494
+MX53_PAD_EIM_D24__ECSPI1_SS2				495
+MX53_PAD_EIM_D24__CSPI_SS2				496
+MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS			497
+MX53_PAD_EIM_D24__ECSPI2_SS2				498
+MX53_PAD_EIM_D24__UART1_DTR				499
+MX53_PAD_EIM_D25__EMI_WEIM_D_25				500
+MX53_PAD_EIM_D25__GPIO3_25				501
+MX53_PAD_EIM_D25__UART3_RXD_MUX				502
+MX53_PAD_EIM_D25__ECSPI1_SS3				503
+MX53_PAD_EIM_D25__CSPI_SS3				504
+MX53_PAD_EIM_D25__AUDMUX_AUD5_RXC			505
+MX53_PAD_EIM_D25__ECSPI2_SS3				506
+MX53_PAD_EIM_D25__UART1_DSR				507
+MX53_PAD_EIM_D26__EMI_WEIM_D_26				508
+MX53_PAD_EIM_D26__GPIO3_26				509
+MX53_PAD_EIM_D26__UART2_TXD_MUX				510
+MX53_PAD_EIM_D26__FIRI_RXD				511
+MX53_PAD_EIM_D26__IPU_CSI0_D_1				512
+MX53_PAD_EIM_D26__IPU_DI1_PIN11				513
+MX53_PAD_EIM_D26__IPU_SISG_2				514
+MX53_PAD_EIM_D26__IPU_DISP1_DAT_22			515
+MX53_PAD_EIM_D27__EMI_WEIM_D_27				516
+MX53_PAD_EIM_D27__GPIO3_27				517
+MX53_PAD_EIM_D27__UART2_RXD_MUX				518
+MX53_PAD_EIM_D27__FIRI_TXD				519
+MX53_PAD_EIM_D27__IPU_CSI0_D_0				520
+MX53_PAD_EIM_D27__IPU_DI1_PIN13				521
+MX53_PAD_EIM_D27__IPU_SISG_3				522
+MX53_PAD_EIM_D27__IPU_DISP1_DAT_23			523
+MX53_PAD_EIM_D28__EMI_WEIM_D_28				524
+MX53_PAD_EIM_D28__GPIO3_28				525
+MX53_PAD_EIM_D28__UART2_CTS				526
+MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO			527
+MX53_PAD_EIM_D28__CSPI_MOSI				528
+MX53_PAD_EIM_D28__I2C1_SDA				529
+MX53_PAD_EIM_D28__IPU_EXT_TRIG				530
+MX53_PAD_EIM_D28__IPU_DI0_PIN13				531
+MX53_PAD_EIM_D29__EMI_WEIM_D_29				532
+MX53_PAD_EIM_D29__GPIO3_29				533
+MX53_PAD_EIM_D29__UART2_RTS				534
+MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS			535
+MX53_PAD_EIM_D29__CSPI_SS0				536
+MX53_PAD_EIM_D29__IPU_DI1_PIN15				537
+MX53_PAD_EIM_D29__IPU_CSI1_VSYNC			538
+MX53_PAD_EIM_D29__IPU_DI0_PIN14				539
+MX53_PAD_EIM_D30__EMI_WEIM_D_30				540
+MX53_PAD_EIM_D30__GPIO3_30				541
+MX53_PAD_EIM_D30__UART3_CTS				542
+MX53_PAD_EIM_D30__IPU_CSI0_D_3				543
+MX53_PAD_EIM_D30__IPU_DI0_PIN11				544
+MX53_PAD_EIM_D30__IPU_DISP1_DAT_21			545
+MX53_PAD_EIM_D30__USBOH3_USBH1_OC			546
+MX53_PAD_EIM_D30__USBOH3_USBH2_OC			547
+MX53_PAD_EIM_D31__EMI_WEIM_D_31				548
+MX53_PAD_EIM_D31__GPIO3_31				549
+MX53_PAD_EIM_D31__UART3_RTS				550
+MX53_PAD_EIM_D31__IPU_CSI0_D_2				551
+MX53_PAD_EIM_D31__IPU_DI0_PIN12				552
+MX53_PAD_EIM_D31__IPU_DISP1_DAT_20			553
+MX53_PAD_EIM_D31__USBOH3_USBH1_PWR			554
+MX53_PAD_EIM_D31__USBOH3_USBH2_PWR			555
+MX53_PAD_EIM_A24__EMI_WEIM_A_24				556
+MX53_PAD_EIM_A24__GPIO5_4				557
+MX53_PAD_EIM_A24__IPU_DISP1_DAT_19			558
+MX53_PAD_EIM_A24__IPU_CSI1_D_19				559
+MX53_PAD_EIM_A24__IPU_SISG_2				560
+MX53_PAD_EIM_A24__USBPHY2_BVALID			561
+MX53_PAD_EIM_A23__EMI_WEIM_A_23				562
+MX53_PAD_EIM_A23__GPIO6_6				563
+MX53_PAD_EIM_A23__IPU_DISP1_DAT_18			564
+MX53_PAD_EIM_A23__IPU_CSI1_D_18				565
+MX53_PAD_EIM_A23__IPU_SISG_3				566
+MX53_PAD_EIM_A23__USBPHY2_ENDSESSION			567
+MX53_PAD_EIM_A22__EMI_WEIM_A_22				568
+MX53_PAD_EIM_A22__GPIO2_16				569
+MX53_PAD_EIM_A22__IPU_DISP1_DAT_17			570
+MX53_PAD_EIM_A22__IPU_CSI1_D_17				571
+MX53_PAD_EIM_A22__SRC_BT_CFG1_7				572
+MX53_PAD_EIM_A21__EMI_WEIM_A_21				573
+MX53_PAD_EIM_A21__GPIO2_17				574
+MX53_PAD_EIM_A21__IPU_DISP1_DAT_16			575
+MX53_PAD_EIM_A21__IPU_CSI1_D_16				576
+MX53_PAD_EIM_A21__SRC_BT_CFG1_6				577
+MX53_PAD_EIM_A20__EMI_WEIM_A_20				578
+MX53_PAD_EIM_A20__GPIO2_18				579
+MX53_PAD_EIM_A20__IPU_DISP1_DAT_15			580
+MX53_PAD_EIM_A20__IPU_CSI1_D_15				581
+MX53_PAD_EIM_A20__SRC_BT_CFG1_5				582
+MX53_PAD_EIM_A19__EMI_WEIM_A_19				583
+MX53_PAD_EIM_A19__GPIO2_19				584
+MX53_PAD_EIM_A19__IPU_DISP1_DAT_14			585
+MX53_PAD_EIM_A19__IPU_CSI1_D_14				586
+MX53_PAD_EIM_A19__SRC_BT_CFG1_4				587
+MX53_PAD_EIM_A18__EMI_WEIM_A_18				588
+MX53_PAD_EIM_A18__GPIO2_20				589
+MX53_PAD_EIM_A18__IPU_DISP1_DAT_13			590
+MX53_PAD_EIM_A18__IPU_CSI1_D_13				591
+MX53_PAD_EIM_A18__SRC_BT_CFG1_3				592
+MX53_PAD_EIM_A17__EMI_WEIM_A_17				593
+MX53_PAD_EIM_A17__GPIO2_21				594
+MX53_PAD_EIM_A17__IPU_DISP1_DAT_12			595
+MX53_PAD_EIM_A17__IPU_CSI1_D_12				596
+MX53_PAD_EIM_A17__SRC_BT_CFG1_2				597
+MX53_PAD_EIM_A16__EMI_WEIM_A_16				598
+MX53_PAD_EIM_A16__GPIO2_22				599
+MX53_PAD_EIM_A16__IPU_DI1_DISP_CLK			600
+MX53_PAD_EIM_A16__IPU_CSI1_PIXCLK			601
+MX53_PAD_EIM_A16__SRC_BT_CFG1_1				602
+MX53_PAD_EIM_CS0__EMI_WEIM_CS_0				603
+MX53_PAD_EIM_CS0__GPIO2_23				604
+MX53_PAD_EIM_CS0__ECSPI2_SCLK				605
+MX53_PAD_EIM_CS0__IPU_DI1_PIN5				606
+MX53_PAD_EIM_CS1__EMI_WEIM_CS_1				607
+MX53_PAD_EIM_CS1__GPIO2_24				608
+MX53_PAD_EIM_CS1__ECSPI2_MOSI				609
+MX53_PAD_EIM_CS1__IPU_DI1_PIN6				610
+MX53_PAD_EIM_OE__EMI_WEIM_OE				611
+MX53_PAD_EIM_OE__GPIO2_25				612
+MX53_PAD_EIM_OE__ECSPI2_MISO				613
+MX53_PAD_EIM_OE__IPU_DI1_PIN7				614
+MX53_PAD_EIM_OE__USBPHY2_IDDIG				615
+MX53_PAD_EIM_RW__EMI_WEIM_RW				616
+MX53_PAD_EIM_RW__GPIO2_26				617
+MX53_PAD_EIM_RW__ECSPI2_SS0				618
+MX53_PAD_EIM_RW__IPU_DI1_PIN8				619
+MX53_PAD_EIM_RW__USBPHY2_HOSTDISCONNECT			620
+MX53_PAD_EIM_LBA__EMI_WEIM_LBA				621
+MX53_PAD_EIM_LBA__GPIO2_27				622
+MX53_PAD_EIM_LBA__ECSPI2_SS1				623
+MX53_PAD_EIM_LBA__IPU_DI1_PIN17				624
+MX53_PAD_EIM_LBA__SRC_BT_CFG1_0				625
+MX53_PAD_EIM_EB0__EMI_WEIM_EB_0				626
+MX53_PAD_EIM_EB0__GPIO2_28				627
+MX53_PAD_EIM_EB0__IPU_DISP1_DAT_11			628
+MX53_PAD_EIM_EB0__IPU_CSI1_D_11				629
+MX53_PAD_EIM_EB0__GPC_PMIC_RDY				630
+MX53_PAD_EIM_EB0__SRC_BT_CFG2_7				631
+MX53_PAD_EIM_EB1__EMI_WEIM_EB_1				632
+MX53_PAD_EIM_EB1__GPIO2_29				633
+MX53_PAD_EIM_EB1__IPU_DISP1_DAT_10			634
+MX53_PAD_EIM_EB1__IPU_CSI1_D_10				635
+MX53_PAD_EIM_EB1__SRC_BT_CFG2_6				636
+MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0			637
+MX53_PAD_EIM_DA0__GPIO3_0				638
+MX53_PAD_EIM_DA0__IPU_DISP1_DAT_9			639
+MX53_PAD_EIM_DA0__IPU_CSI1_D_9				640
+MX53_PAD_EIM_DA0__SRC_BT_CFG2_5				641
+MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1			642
+MX53_PAD_EIM_DA1__GPIO3_1				643
+MX53_PAD_EIM_DA1__IPU_DISP1_DAT_8			644
+MX53_PAD_EIM_DA1__IPU_CSI1_D_8				645
+MX53_PAD_EIM_DA1__SRC_BT_CFG2_4				646
+MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2			647
+MX53_PAD_EIM_DA2__GPIO3_2				648
+MX53_PAD_EIM_DA2__IPU_DISP1_DAT_7			649
+MX53_PAD_EIM_DA2__IPU_CSI1_D_7				650
+MX53_PAD_EIM_DA2__SRC_BT_CFG2_3				651
+MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3			652
+MX53_PAD_EIM_DA3__GPIO3_3				653
+MX53_PAD_EIM_DA3__IPU_DISP1_DAT_6			654
+MX53_PAD_EIM_DA3__IPU_CSI1_D_6				655
+MX53_PAD_EIM_DA3__SRC_BT_CFG2_2				656
+MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4			657
+MX53_PAD_EIM_DA4__GPIO3_4				658
+MX53_PAD_EIM_DA4__IPU_DISP1_DAT_5			659
+MX53_PAD_EIM_DA4__IPU_CSI1_D_5				660
+MX53_PAD_EIM_DA4__SRC_BT_CFG3_7				661
+MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5			662
+MX53_PAD_EIM_DA5__GPIO3_5				663
+MX53_PAD_EIM_DA5__IPU_DISP1_DAT_4			664
+MX53_PAD_EIM_DA5__IPU_CSI1_D_4				665
+MX53_PAD_EIM_DA5__SRC_BT_CFG3_6				666
+MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6			667
+MX53_PAD_EIM_DA6__GPIO3_6				668
+MX53_PAD_EIM_DA6__IPU_DISP1_DAT_3			669
+MX53_PAD_EIM_DA6__IPU_CSI1_D_3				670
+MX53_PAD_EIM_DA6__SRC_BT_CFG3_5				671
+MX53_PAD_EIM_DA7__EMI_NAND_WEIM_DA_7			672
+MX53_PAD_EIM_DA7__GPIO3_7				673
+MX53_PAD_EIM_DA7__IPU_DISP1_DAT_2			674
+MX53_PAD_EIM_DA7__IPU_CSI1_D_2				675
+MX53_PAD_EIM_DA7__SRC_BT_CFG3_4				676
+MX53_PAD_EIM_DA8__EMI_NAND_WEIM_DA_8			677
+MX53_PAD_EIM_DA8__GPIO3_8				678
+MX53_PAD_EIM_DA8__IPU_DISP1_DAT_1			679
+MX53_PAD_EIM_DA8__IPU_CSI1_D_1				680
+MX53_PAD_EIM_DA8__SRC_BT_CFG3_3				681
+MX53_PAD_EIM_DA9__EMI_NAND_WEIM_DA_9			682
+MX53_PAD_EIM_DA9__GPIO3_9				683
+MX53_PAD_EIM_DA9__IPU_DISP1_DAT_0			684
+MX53_PAD_EIM_DA9__IPU_CSI1_D_0				685
+MX53_PAD_EIM_DA9__SRC_BT_CFG3_2				686
+MX53_PAD_EIM_DA10__EMI_NAND_WEIM_DA_10			687
+MX53_PAD_EIM_DA10__GPIO3_10				688
+MX53_PAD_EIM_DA10__IPU_DI1_PIN15			689
+MX53_PAD_EIM_DA10__IPU_CSI1_DATA_EN			690
+MX53_PAD_EIM_DA10__SRC_BT_CFG3_1			691
+MX53_PAD_EIM_DA11__EMI_NAND_WEIM_DA_11			692
+MX53_PAD_EIM_DA11__GPIO3_11				693
+MX53_PAD_EIM_DA11__IPU_DI1_PIN2				694
+MX53_PAD_EIM_DA11__IPU_CSI1_HSYNC			695
+MX53_PAD_EIM_DA12__EMI_NAND_WEIM_DA_12			696
+MX53_PAD_EIM_DA12__GPIO3_12				697
+MX53_PAD_EIM_DA12__IPU_DI1_PIN3				698
+MX53_PAD_EIM_DA12__IPU_CSI1_VSYNC			699
+MX53_PAD_EIM_DA13__EMI_NAND_WEIM_DA_13			700
+MX53_PAD_EIM_DA13__GPIO3_13				701
+MX53_PAD_EIM_DA13__IPU_DI1_D0_CS			702
+MX53_PAD_EIM_DA13__CCM_DI1_EXT_CLK			703
+MX53_PAD_EIM_DA14__EMI_NAND_WEIM_DA_14			704
+MX53_PAD_EIM_DA14__GPIO3_14				705
+MX53_PAD_EIM_DA14__IPU_DI1_D1_CS			706
+MX53_PAD_EIM_DA14__CCM_DI0_EXT_CLK			707
+MX53_PAD_EIM_DA15__EMI_NAND_WEIM_DA_15			708
+MX53_PAD_EIM_DA15__GPIO3_15				709
+MX53_PAD_EIM_DA15__IPU_DI1_PIN1				710
+MX53_PAD_EIM_DA15__IPU_DI1_PIN4				711
+MX53_PAD_NANDF_WE_B__EMI_NANDF_WE_B			712
+MX53_PAD_NANDF_WE_B__GPIO6_12				713
+MX53_PAD_NANDF_RE_B__EMI_NANDF_RE_B			714
+MX53_PAD_NANDF_RE_B__GPIO6_13				715
+MX53_PAD_EIM_WAIT__EMI_WEIM_WAIT			716
+MX53_PAD_EIM_WAIT__GPIO5_0				717
+MX53_PAD_EIM_WAIT__EMI_WEIM_DTACK_B			718
+MX53_PAD_LVDS1_TX3_P__GPIO6_22				719
+MX53_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3			720
+MX53_PAD_LVDS1_TX2_P__GPIO6_24				721
+MX53_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2			722
+MX53_PAD_LVDS1_CLK_P__GPIO6_26				723
+MX53_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK			724
+MX53_PAD_LVDS1_TX1_P__GPIO6_28				725
+MX53_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1			726
+MX53_PAD_LVDS1_TX0_P__GPIO6_30				727
+MX53_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0			728
+MX53_PAD_LVDS0_TX3_P__GPIO7_22				729
+MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3			730
+MX53_PAD_LVDS0_CLK_P__GPIO7_24				731
+MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK			732
+MX53_PAD_LVDS0_TX2_P__GPIO7_26				733
+MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2			734
+MX53_PAD_LVDS0_TX1_P__GPIO7_28				735
+MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1			736
+MX53_PAD_LVDS0_TX0_P__GPIO7_30				737
+MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0			738
+MX53_PAD_GPIO_10__GPIO4_0				739
+MX53_PAD_GPIO_10__OSC32k_32K_OUT			740
+MX53_PAD_GPIO_11__GPIO4_1				741
+MX53_PAD_GPIO_12__GPIO4_2				742
+MX53_PAD_GPIO_13__GPIO4_3				743
+MX53_PAD_GPIO_14__GPIO4_4				744
+MX53_PAD_NANDF_CLE__EMI_NANDF_CLE			745
+MX53_PAD_NANDF_CLE__GPIO6_7				746
+MX53_PAD_NANDF_CLE__USBPHY1_VSTATUS_0			747
+MX53_PAD_NANDF_ALE__EMI_NANDF_ALE			748
+MX53_PAD_NANDF_ALE__GPIO6_8				749
+MX53_PAD_NANDF_ALE__USBPHY1_VSTATUS_1			750
+MX53_PAD_NANDF_WP_B__EMI_NANDF_WP_B			751
+MX53_PAD_NANDF_WP_B__GPIO6_9				752
+MX53_PAD_NANDF_WP_B__USBPHY1_VSTATUS_2			753
+MX53_PAD_NANDF_RB0__EMI_NANDF_RB_0			754
+MX53_PAD_NANDF_RB0__GPIO6_10				755
+MX53_PAD_NANDF_RB0__USBPHY1_VSTATUS_3			756
+MX53_PAD_NANDF_CS0__EMI_NANDF_CS_0			757
+MX53_PAD_NANDF_CS0__GPIO6_11				758
+MX53_PAD_NANDF_CS0__USBPHY1_VSTATUS_4			759
+MX53_PAD_NANDF_CS1__EMI_NANDF_CS_1			760
+MX53_PAD_NANDF_CS1__GPIO6_14				761
+MX53_PAD_NANDF_CS1__MLB_MLBCLK				762
+MX53_PAD_NANDF_CS1__USBPHY1_VSTATUS_5			763
+MX53_PAD_NANDF_CS2__EMI_NANDF_CS_2			764
+MX53_PAD_NANDF_CS2__GPIO6_15				765
+MX53_PAD_NANDF_CS2__IPU_SISG_0				766
+MX53_PAD_NANDF_CS2__ESAI1_TX0				767
+MX53_PAD_NANDF_CS2__EMI_WEIM_CRE			768
+MX53_PAD_NANDF_CS2__CCM_CSI0_MCLK			769
+MX53_PAD_NANDF_CS2__MLB_MLBSIG				770
+MX53_PAD_NANDF_CS2__USBPHY1_VSTATUS_6			771
+MX53_PAD_NANDF_CS3__EMI_NANDF_CS_3			772
+MX53_PAD_NANDF_CS3__GPIO6_16				773
+MX53_PAD_NANDF_CS3__IPU_SISG_1				774
+MX53_PAD_NANDF_CS3__ESAI1_TX1				775
+MX53_PAD_NANDF_CS3__EMI_WEIM_A_26			776
+MX53_PAD_NANDF_CS3__MLB_MLBDAT				777
+MX53_PAD_NANDF_CS3__USBPHY1_VSTATUS_7			778
+MX53_PAD_FEC_MDIO__FEC_MDIO				779
+MX53_PAD_FEC_MDIO__GPIO1_22				780
+MX53_PAD_FEC_MDIO__ESAI1_SCKR				781
+MX53_PAD_FEC_MDIO__FEC_COL				782
+MX53_PAD_FEC_MDIO__RTC_CE_RTC_PS2			783
+MX53_PAD_FEC_MDIO__SDMA_DEBUG_BUS_DEVICE_3		784
+MX53_PAD_FEC_MDIO__EMI_EMI_DEBUG_49			785
+MX53_PAD_FEC_REF_CLK__FEC_TX_CLK			786
+MX53_PAD_FEC_REF_CLK__GPIO1_23				787
+MX53_PAD_FEC_REF_CLK__ESAI1_FSR				788
+MX53_PAD_FEC_REF_CLK__SDMA_DEBUG_BUS_DEVICE_4		789
+MX53_PAD_FEC_REF_CLK__EMI_EMI_DEBUG_50			790
+MX53_PAD_FEC_RX_ER__FEC_RX_ER				791
+MX53_PAD_FEC_RX_ER__GPIO1_24				792
+MX53_PAD_FEC_RX_ER__ESAI1_HCKR				793
+MX53_PAD_FEC_RX_ER__FEC_RX_CLK				794
+MX53_PAD_FEC_RX_ER__RTC_CE_RTC_PS3			795
+MX53_PAD_FEC_CRS_DV__FEC_RX_DV				796
+MX53_PAD_FEC_CRS_DV__GPIO1_25				797
+MX53_PAD_FEC_CRS_DV__ESAI1_SCKT				798
+MX53_PAD_FEC_RXD1__FEC_RDATA_1				799
+MX53_PAD_FEC_RXD1__GPIO1_26				800
+MX53_PAD_FEC_RXD1__ESAI1_FST				801
+MX53_PAD_FEC_RXD1__MLB_MLBSIG				802
+MX53_PAD_FEC_RXD1__RTC_CE_RTC_PS1			803
+MX53_PAD_FEC_RXD0__FEC_RDATA_0				804
+MX53_PAD_FEC_RXD0__GPIO1_27				805
+MX53_PAD_FEC_RXD0__ESAI1_HCKT				806
+MX53_PAD_FEC_RXD0__OSC32k_32K_OUT			807
+MX53_PAD_FEC_TX_EN__FEC_TX_EN				808
+MX53_PAD_FEC_TX_EN__GPIO1_28				809
+MX53_PAD_FEC_TX_EN__ESAI1_TX3_RX2			810
+MX53_PAD_FEC_TXD1__FEC_TDATA_1				811
+MX53_PAD_FEC_TXD1__GPIO1_29				812
+MX53_PAD_FEC_TXD1__ESAI1_TX2_RX3			813
+MX53_PAD_FEC_TXD1__MLB_MLBCLK				814
+MX53_PAD_FEC_TXD1__RTC_CE_RTC_PRSC_CLK			815
+MX53_PAD_FEC_TXD0__FEC_TDATA_0				816
+MX53_PAD_FEC_TXD0__GPIO1_30				817
+MX53_PAD_FEC_TXD0__ESAI1_TX4_RX1			818
+MX53_PAD_FEC_TXD0__USBPHY2_DATAOUT_0			819
+MX53_PAD_FEC_MDC__FEC_MDC				820
+MX53_PAD_FEC_MDC__GPIO1_31				821
+MX53_PAD_FEC_MDC__ESAI1_TX5_RX0				822
+MX53_PAD_FEC_MDC__MLB_MLBDAT				823
+MX53_PAD_FEC_MDC__RTC_CE_RTC_ALARM1_TRIG		824
+MX53_PAD_FEC_MDC__USBPHY2_DATAOUT_1			825
+MX53_PAD_PATA_DIOW__PATA_DIOW				826
+MX53_PAD_PATA_DIOW__GPIO6_17				827
+MX53_PAD_PATA_DIOW__UART1_TXD_MUX			828
+MX53_PAD_PATA_DIOW__USBPHY2_DATAOUT_2			829
+MX53_PAD_PATA_DMACK__PATA_DMACK				830
+MX53_PAD_PATA_DMACK__GPIO6_18				831
+MX53_PAD_PATA_DMACK__UART1_RXD_MUX			832
+MX53_PAD_PATA_DMACK__USBPHY2_DATAOUT_3			833
+MX53_PAD_PATA_DMARQ__PATA_DMARQ				834
+MX53_PAD_PATA_DMARQ__GPIO7_0				835
+MX53_PAD_PATA_DMARQ__UART2_TXD_MUX			836
+MX53_PAD_PATA_DMARQ__CCM_CCM_OUT_0			837
+MX53_PAD_PATA_DMARQ__USBPHY2_DATAOUT_4			838
+MX53_PAD_PATA_BUFFER_EN__PATA_BUFFER_EN			839
+MX53_PAD_PATA_BUFFER_EN__GPIO7_1			840
+MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX			841
+MX53_PAD_PATA_BUFFER_EN__CCM_CCM_OUT_1			842
+MX53_PAD_PATA_BUFFER_EN__USBPHY2_DATAOUT_5		843
+MX53_PAD_PATA_INTRQ__PATA_INTRQ				844
+MX53_PAD_PATA_INTRQ__GPIO7_2				845
+MX53_PAD_PATA_INTRQ__UART2_CTS				846
+MX53_PAD_PATA_INTRQ__CAN1_TXCAN				847
+MX53_PAD_PATA_INTRQ__CCM_CCM_OUT_2			848
+MX53_PAD_PATA_INTRQ__USBPHY2_DATAOUT_6			849
+MX53_PAD_PATA_DIOR__PATA_DIOR				850
+MX53_PAD_PATA_DIOR__GPIO7_3				851
+MX53_PAD_PATA_DIOR__UART2_RTS				852
+MX53_PAD_PATA_DIOR__CAN1_RXCAN				853
+MX53_PAD_PATA_DIOR__USBPHY2_DATAOUT_7			854
+MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B		855
+MX53_PAD_PATA_RESET_B__GPIO7_4				856
+MX53_PAD_PATA_RESET_B__ESDHC3_CMD			857
+MX53_PAD_PATA_RESET_B__UART1_CTS			858
+MX53_PAD_PATA_RESET_B__CAN2_TXCAN			859
+MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0		860
+MX53_PAD_PATA_IORDY__PATA_IORDY				861
+MX53_PAD_PATA_IORDY__GPIO7_5				862
+MX53_PAD_PATA_IORDY__ESDHC3_CLK				863
+MX53_PAD_PATA_IORDY__UART1_RTS				864
+MX53_PAD_PATA_IORDY__CAN2_RXCAN				865
+MX53_PAD_PATA_IORDY__USBPHY1_DATAOUT_1			866
+MX53_PAD_PATA_DA_0__PATA_DA_0				867
+MX53_PAD_PATA_DA_0__GPIO7_6				868
+MX53_PAD_PATA_DA_0__ESDHC3_RST				869
+MX53_PAD_PATA_DA_0__OWIRE_LINE				870
+MX53_PAD_PATA_DA_0__USBPHY1_DATAOUT_2			871
+MX53_PAD_PATA_DA_1__PATA_DA_1				872
+MX53_PAD_PATA_DA_1__GPIO7_7				873
+MX53_PAD_PATA_DA_1__ESDHC4_CMD				874
+MX53_PAD_PATA_DA_1__UART3_CTS				875
+MX53_PAD_PATA_DA_1__USBPHY1_DATAOUT_3			876
+MX53_PAD_PATA_DA_2__PATA_DA_2				877
+MX53_PAD_PATA_DA_2__GPIO7_8				878
+MX53_PAD_PATA_DA_2__ESDHC4_CLK				879
+MX53_PAD_PATA_DA_2__UART3_RTS				880
+MX53_PAD_PATA_DA_2__USBPHY1_DATAOUT_4			881
+MX53_PAD_PATA_CS_0__PATA_CS_0				882
+MX53_PAD_PATA_CS_0__GPIO7_9				883
+MX53_PAD_PATA_CS_0__UART3_TXD_MUX			884
+MX53_PAD_PATA_CS_0__USBPHY1_DATAOUT_5			885
+MX53_PAD_PATA_CS_1__PATA_CS_1				886
+MX53_PAD_PATA_CS_1__GPIO7_10				887
+MX53_PAD_PATA_CS_1__UART3_RXD_MUX			888
+MX53_PAD_PATA_CS_1__USBPHY1_DATAOUT_6			889
+MX53_PAD_PATA_DATA0__PATA_DATA_0			890
+MX53_PAD_PATA_DATA0__GPIO2_0				891
+MX53_PAD_PATA_DATA0__EMI_NANDF_D_0			892
+MX53_PAD_PATA_DATA0__ESDHC3_DAT4			893
+MX53_PAD_PATA_DATA0__GPU3d_GPU_DEBUG_OUT_0		894
+MX53_PAD_PATA_DATA0__IPU_DIAG_BUS_0			895
+MX53_PAD_PATA_DATA0__USBPHY1_DATAOUT_7			896
+MX53_PAD_PATA_DATA1__PATA_DATA_1			897
+MX53_PAD_PATA_DATA1__GPIO2_1				898
+MX53_PAD_PATA_DATA1__EMI_NANDF_D_1			899
+MX53_PAD_PATA_DATA1__ESDHC3_DAT5			900
+MX53_PAD_PATA_DATA1__GPU3d_GPU_DEBUG_OUT_1		901
+MX53_PAD_PATA_DATA1__IPU_DIAG_BUS_1			902
+MX53_PAD_PATA_DATA2__PATA_DATA_2			903
+MX53_PAD_PATA_DATA2__GPIO2_2				904
+MX53_PAD_PATA_DATA2__EMI_NANDF_D_2			905
+MX53_PAD_PATA_DATA2__ESDHC3_DAT6			906
+MX53_PAD_PATA_DATA2__GPU3d_GPU_DEBUG_OUT_2		907
+MX53_PAD_PATA_DATA2__IPU_DIAG_BUS_2			908
+MX53_PAD_PATA_DATA3__PATA_DATA_3			909
+MX53_PAD_PATA_DATA3__GPIO2_3				910
+MX53_PAD_PATA_DATA3__EMI_NANDF_D_3			911
+MX53_PAD_PATA_DATA3__ESDHC3_DAT7			912
+MX53_PAD_PATA_DATA3__GPU3d_GPU_DEBUG_OUT_3		913
+MX53_PAD_PATA_DATA3__IPU_DIAG_BUS_3			914
+MX53_PAD_PATA_DATA4__PATA_DATA_4			915
+MX53_PAD_PATA_DATA4__GPIO2_4				916
+MX53_PAD_PATA_DATA4__EMI_NANDF_D_4			917
+MX53_PAD_PATA_DATA4__ESDHC4_DAT4			918
+MX53_PAD_PATA_DATA4__GPU3d_GPU_DEBUG_OUT_4		919
+MX53_PAD_PATA_DATA4__IPU_DIAG_BUS_4			920
+MX53_PAD_PATA_DATA5__PATA_DATA_5			921
+MX53_PAD_PATA_DATA5__GPIO2_5				922
+MX53_PAD_PATA_DATA5__EMI_NANDF_D_5			923
+MX53_PAD_PATA_DATA5__ESDHC4_DAT5			924
+MX53_PAD_PATA_DATA5__GPU3d_GPU_DEBUG_OUT_5		925
+MX53_PAD_PATA_DATA5__IPU_DIAG_BUS_5			926
+MX53_PAD_PATA_DATA6__PATA_DATA_6			927
+MX53_PAD_PATA_DATA6__GPIO2_6				928
+MX53_PAD_PATA_DATA6__EMI_NANDF_D_6			929
+MX53_PAD_PATA_DATA6__ESDHC4_DAT6			930
+MX53_PAD_PATA_DATA6__GPU3d_GPU_DEBUG_OUT_6		931
+MX53_PAD_PATA_DATA6__IPU_DIAG_BUS_6			932
+MX53_PAD_PATA_DATA7__PATA_DATA_7			933
+MX53_PAD_PATA_DATA7__GPIO2_7				934
+MX53_PAD_PATA_DATA7__EMI_NANDF_D_7			935
+MX53_PAD_PATA_DATA7__ESDHC4_DAT7			936
+MX53_PAD_PATA_DATA7__GPU3d_GPU_DEBUG_OUT_7		937
+MX53_PAD_PATA_DATA7__IPU_DIAG_BUS_7			938
+MX53_PAD_PATA_DATA8__PATA_DATA_8			939
+MX53_PAD_PATA_DATA8__GPIO2_8				940
+MX53_PAD_PATA_DATA8__ESDHC1_DAT4			941
+MX53_PAD_PATA_DATA8__EMI_NANDF_D_8			942
+MX53_PAD_PATA_DATA8__ESDHC3_DAT0			943
+MX53_PAD_PATA_DATA8__GPU3d_GPU_DEBUG_OUT_8		944
+MX53_PAD_PATA_DATA8__IPU_DIAG_BUS_8			945
+MX53_PAD_PATA_DATA9__PATA_DATA_9			946
+MX53_PAD_PATA_DATA9__GPIO2_9				947
+MX53_PAD_PATA_DATA9__ESDHC1_DAT5			948
+MX53_PAD_PATA_DATA9__EMI_NANDF_D_9			949
+MX53_PAD_PATA_DATA9__ESDHC3_DAT1			950
+MX53_PAD_PATA_DATA9__GPU3d_GPU_DEBUG_OUT_9		951
+MX53_PAD_PATA_DATA9__IPU_DIAG_BUS_9			952
+MX53_PAD_PATA_DATA10__PATA_DATA_10			953
+MX53_PAD_PATA_DATA10__GPIO2_10				954
+MX53_PAD_PATA_DATA10__ESDHC1_DAT6			955
+MX53_PAD_PATA_DATA10__EMI_NANDF_D_10			956
+MX53_PAD_PATA_DATA10__ESDHC3_DAT2			957
+MX53_PAD_PATA_DATA10__GPU3d_GPU_DEBUG_OUT_10		958
+MX53_PAD_PATA_DATA10__IPU_DIAG_BUS_10			959
+MX53_PAD_PATA_DATA11__PATA_DATA_11			960
+MX53_PAD_PATA_DATA11__GPIO2_11				961
+MX53_PAD_PATA_DATA11__ESDHC1_DAT7			962
+MX53_PAD_PATA_DATA11__EMI_NANDF_D_11			963
+MX53_PAD_PATA_DATA11__ESDHC3_DAT3			964
+MX53_PAD_PATA_DATA11__GPU3d_GPU_DEBUG_OUT_11		965
+MX53_PAD_PATA_DATA11__IPU_DIAG_BUS_11			966
+MX53_PAD_PATA_DATA12__PATA_DATA_12			967
+MX53_PAD_PATA_DATA12__GPIO2_12				968
+MX53_PAD_PATA_DATA12__ESDHC2_DAT4			969
+MX53_PAD_PATA_DATA12__EMI_NANDF_D_12			970
+MX53_PAD_PATA_DATA12__ESDHC4_DAT0			971
+MX53_PAD_PATA_DATA12__GPU3d_GPU_DEBUG_OUT_12		972
+MX53_PAD_PATA_DATA12__IPU_DIAG_BUS_12			973
+MX53_PAD_PATA_DATA13__PATA_DATA_13			974
+MX53_PAD_PATA_DATA13__GPIO2_13				975
+MX53_PAD_PATA_DATA13__ESDHC2_DAT5			976
+MX53_PAD_PATA_DATA13__EMI_NANDF_D_13			977
+MX53_PAD_PATA_DATA13__ESDHC4_DAT1			978
+MX53_PAD_PATA_DATA13__GPU3d_GPU_DEBUG_OUT_13		979
+MX53_PAD_PATA_DATA13__IPU_DIAG_BUS_13			980
+MX53_PAD_PATA_DATA14__PATA_DATA_14			981
+MX53_PAD_PATA_DATA14__GPIO2_14				982
+MX53_PAD_PATA_DATA14__ESDHC2_DAT6			983
+MX53_PAD_PATA_DATA14__EMI_NANDF_D_14			984
+MX53_PAD_PATA_DATA14__ESDHC4_DAT2			985
+MX53_PAD_PATA_DATA14__GPU3d_GPU_DEBUG_OUT_14		986
+MX53_PAD_PATA_DATA14__IPU_DIAG_BUS_14			987
+MX53_PAD_PATA_DATA15__PATA_DATA_15			988
+MX53_PAD_PATA_DATA15__GPIO2_15				989
+MX53_PAD_PATA_DATA15__ESDHC2_DAT7			990
+MX53_PAD_PATA_DATA15__EMI_NANDF_D_15			991
+MX53_PAD_PATA_DATA15__ESDHC4_DAT3			992
+MX53_PAD_PATA_DATA15__GPU3d_GPU_DEBUG_OUT_15		993
+MX53_PAD_PATA_DATA15__IPU_DIAG_BUS_15			994
+MX53_PAD_SD1_DATA0__ESDHC1_DAT0				995
+MX53_PAD_SD1_DATA0__GPIO1_16				996
+MX53_PAD_SD1_DATA0__GPT_CAPIN1				997
+MX53_PAD_SD1_DATA0__CSPI_MISO				998
+MX53_PAD_SD1_DATA0__CCM_PLL3_BYP			999
+MX53_PAD_SD1_DATA1__ESDHC1_DAT1				1000
+MX53_PAD_SD1_DATA1__GPIO1_17				1001
+MX53_PAD_SD1_DATA1__GPT_CAPIN2				1002
+MX53_PAD_SD1_DATA1__CSPI_SS0				1003
+MX53_PAD_SD1_DATA1__CCM_PLL4_BYP			1004
+MX53_PAD_SD1_CMD__ESDHC1_CMD				1005
+MX53_PAD_SD1_CMD__GPIO1_18				1006
+MX53_PAD_SD1_CMD__GPT_CMPOUT1				1007
+MX53_PAD_SD1_CMD__CSPI_MOSI				1008
+MX53_PAD_SD1_CMD__CCM_PLL1_BYP				1009
+MX53_PAD_SD1_DATA2__ESDHC1_DAT2				1010
+MX53_PAD_SD1_DATA2__GPIO1_19				1011
+MX53_PAD_SD1_DATA2__GPT_CMPOUT2				1012
+MX53_PAD_SD1_DATA2__PWM2_PWMO				1013
+MX53_PAD_SD1_DATA2__WDOG1_WDOG_B			1014
+MX53_PAD_SD1_DATA2__CSPI_SS1				1015
+MX53_PAD_SD1_DATA2__WDOG1_WDOG_RST_B_DEB		1016
+MX53_PAD_SD1_DATA2__CCM_PLL2_BYP			1017
+MX53_PAD_SD1_CLK__ESDHC1_CLK				1018
+MX53_PAD_SD1_CLK__GPIO1_20				1019
+MX53_PAD_SD1_CLK__OSC32k_32K_OUT			1020
+MX53_PAD_SD1_CLK__GPT_CLKIN				1021
+MX53_PAD_SD1_CLK__CSPI_SCLK				1022
+MX53_PAD_SD1_CLK__SATA_PHY_DTB_0			1023
+MX53_PAD_SD1_DATA3__ESDHC1_DAT3				1024
+MX53_PAD_SD1_DATA3__GPIO1_21				1025
+MX53_PAD_SD1_DATA3__GPT_CMPOUT3				1026
+MX53_PAD_SD1_DATA3__PWM1_PWMO				1027
+MX53_PAD_SD1_DATA3__WDOG2_WDOG_B			1028
+MX53_PAD_SD1_DATA3__CSPI_SS2				1029
+MX53_PAD_SD1_DATA3__WDOG2_WDOG_RST_B_DEB		1030
+MX53_PAD_SD1_DATA3__SATA_PHY_DTB_1			1031
+MX53_PAD_SD2_CLK__ESDHC2_CLK				1032
+MX53_PAD_SD2_CLK__GPIO1_10				1033
+MX53_PAD_SD2_CLK__KPP_COL_5				1034
+MX53_PAD_SD2_CLK__AUDMUX_AUD4_RXFS			1035
+MX53_PAD_SD2_CLK__CSPI_SCLK				1036
+MX53_PAD_SD2_CLK__SCC_RANDOM_V				1037
+MX53_PAD_SD2_CMD__ESDHC2_CMD				1038
+MX53_PAD_SD2_CMD__GPIO1_11				1039
+MX53_PAD_SD2_CMD__KPP_ROW_5				1040
+MX53_PAD_SD2_CMD__AUDMUX_AUD4_RXC			1041
+MX53_PAD_SD2_CMD__CSPI_MOSI				1042
+MX53_PAD_SD2_CMD__SCC_RANDOM				1043
+MX53_PAD_SD2_DATA3__ESDHC2_DAT3				1044
+MX53_PAD_SD2_DATA3__GPIO1_12				1045
+MX53_PAD_SD2_DATA3__KPP_COL_6				1046
+MX53_PAD_SD2_DATA3__AUDMUX_AUD4_TXC			1047
+MX53_PAD_SD2_DATA3__CSPI_SS2				1048
+MX53_PAD_SD2_DATA3__SJC_DONE				1049
+MX53_PAD_SD2_DATA2__ESDHC2_DAT2				1050
+MX53_PAD_SD2_DATA2__GPIO1_13				1051
+MX53_PAD_SD2_DATA2__KPP_ROW_6				1052
+MX53_PAD_SD2_DATA2__AUDMUX_AUD4_TXD			1053
+MX53_PAD_SD2_DATA2__CSPI_SS1				1054
+MX53_PAD_SD2_DATA2__SJC_FAIL				1055
+MX53_PAD_SD2_DATA1__ESDHC2_DAT1				1056
+MX53_PAD_SD2_DATA1__GPIO1_14				1057
+MX53_PAD_SD2_DATA1__KPP_COL_7				1058
+MX53_PAD_SD2_DATA1__AUDMUX_AUD4_TXFS			1059
+MX53_PAD_SD2_DATA1__CSPI_SS0				1060
+MX53_PAD_SD2_DATA1__RTIC_SEC_VIO			1061
+MX53_PAD_SD2_DATA0__ESDHC2_DAT0				1062
+MX53_PAD_SD2_DATA0__GPIO1_15				1063
+MX53_PAD_SD2_DATA0__KPP_ROW_7				1064
+MX53_PAD_SD2_DATA0__AUDMUX_AUD4_RXD			1065
+MX53_PAD_SD2_DATA0__CSPI_MISO				1066
+MX53_PAD_SD2_DATA0__RTIC_DONE_INT			1067
+MX53_PAD_GPIO_0__CCM_CLKO				1068
+MX53_PAD_GPIO_0__GPIO1_0				1069
+MX53_PAD_GPIO_0__KPP_COL_5				1070
+MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK			1071
+MX53_PAD_GPIO_0__EPIT1_EPITO				1072
+MX53_PAD_GPIO_0__SRTC_ALARM_DEB				1073
+MX53_PAD_GPIO_0__USBOH3_USBH1_PWR			1074
+MX53_PAD_GPIO_0__CSU_TD					1075
+MX53_PAD_GPIO_1__ESAI1_SCKR				1076
+MX53_PAD_GPIO_1__GPIO1_1				1077
+MX53_PAD_GPIO_1__KPP_ROW_5				1078
+MX53_PAD_GPIO_1__CCM_SSI_EXT2_CLK			1079
+MX53_PAD_GPIO_1__PWM2_PWMO				1080
+MX53_PAD_GPIO_1__WDOG2_WDOG_B				1081
+MX53_PAD_GPIO_1__ESDHC1_CD				1082
+MX53_PAD_GPIO_1__SRC_TESTER_ACK				1083
+MX53_PAD_GPIO_9__ESAI1_FSR				1084
+MX53_PAD_GPIO_9__GPIO1_9				1085
+MX53_PAD_GPIO_9__KPP_COL_6				1086
+MX53_PAD_GPIO_9__CCM_REF_EN_B				1087
+MX53_PAD_GPIO_9__PWM1_PWMO				1088
+MX53_PAD_GPIO_9__WDOG1_WDOG_B				1089
+MX53_PAD_GPIO_9__ESDHC1_WP				1090
+MX53_PAD_GPIO_9__SCC_FAIL_STATE				1091
+MX53_PAD_GPIO_3__ESAI1_HCKR				1092
+MX53_PAD_GPIO_3__GPIO1_3				1093
+MX53_PAD_GPIO_3__I2C3_SCL				1094
+MX53_PAD_GPIO_3__DPLLIP1_TOG_EN				1095
+MX53_PAD_GPIO_3__CCM_CLKO2				1096
+MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0		1097
+MX53_PAD_GPIO_3__USBOH3_USBH1_OC			1098
+MX53_PAD_GPIO_3__MLB_MLBCLK				1099
+MX53_PAD_GPIO_6__ESAI1_SCKT				1100
+MX53_PAD_GPIO_6__GPIO1_6				1101
+MX53_PAD_GPIO_6__I2C3_SDA				1102
+MX53_PAD_GPIO_6__CCM_CCM_OUT_0				1103
+MX53_PAD_GPIO_6__CSU_CSU_INT_DEB			1104
+MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1		1105
+MX53_PAD_GPIO_6__ESDHC2_LCTL				1106
+MX53_PAD_GPIO_6__MLB_MLBSIG				1107
+MX53_PAD_GPIO_2__ESAI1_FST				1108
+MX53_PAD_GPIO_2__GPIO1_2				1109
+MX53_PAD_GPIO_2__KPP_ROW_6				1110
+MX53_PAD_GPIO_2__CCM_CCM_OUT_1				1111
+MX53_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0			1112
+MX53_PAD_GPIO_2__OBSERVE_MUX_OBSRV_INT_OUT2		1113
+MX53_PAD_GPIO_2__ESDHC2_WP				1114
+MX53_PAD_GPIO_2__MLB_MLBDAT				1115
+MX53_PAD_GPIO_4__ESAI1_HCKT				1116
+MX53_PAD_GPIO_4__GPIO1_4				1117
+MX53_PAD_GPIO_4__KPP_COL_7				1118
+MX53_PAD_GPIO_4__CCM_CCM_OUT_2				1119
+MX53_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1			1120
+MX53_PAD_GPIO_4__OBSERVE_MUX_OBSRV_INT_OUT3		1121
+MX53_PAD_GPIO_4__ESDHC2_CD				1122
+MX53_PAD_GPIO_4__SCC_SEC_STATE				1123
+MX53_PAD_GPIO_5__ESAI1_TX2_RX3				1124
+MX53_PAD_GPIO_5__GPIO1_5				1125
+MX53_PAD_GPIO_5__KPP_ROW_7				1126
+MX53_PAD_GPIO_5__CCM_CLKO				1127
+MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2			1128
+MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4		1129
+MX53_PAD_GPIO_5__I2C3_SCL				1130
+MX53_PAD_GPIO_5__CCM_PLL1_BYP				1131
+MX53_PAD_GPIO_7__ESAI1_TX4_RX1				1132
+MX53_PAD_GPIO_7__GPIO1_7				1133
+MX53_PAD_GPIO_7__EPIT1_EPITO				1134
+MX53_PAD_GPIO_7__CAN1_TXCAN				1135
+MX53_PAD_GPIO_7__UART2_TXD_MUX				1136
+MX53_PAD_GPIO_7__FIRI_RXD				1137
+MX53_PAD_GPIO_7__SPDIF_PLOCK				1138
+MX53_PAD_GPIO_7__CCM_PLL2_BYP				1139
+MX53_PAD_GPIO_8__ESAI1_TX5_RX0				1140
+MX53_PAD_GPIO_8__GPIO1_8				1141
+MX53_PAD_GPIO_8__EPIT2_EPITO				1142
+MX53_PAD_GPIO_8__CAN1_RXCAN				1143
+MX53_PAD_GPIO_8__UART2_RXD_MUX				1144
+MX53_PAD_GPIO_8__FIRI_TXD				1145
+MX53_PAD_GPIO_8__SPDIF_SRCLK				1146
+MX53_PAD_GPIO_8__CCM_PLL3_BYP				1147
+MX53_PAD_GPIO_16__ESAI1_TX3_RX2				1148
+MX53_PAD_GPIO_16__GPIO7_11				1149
+MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT			1150
+MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1			1151
+MX53_PAD_GPIO_16__SPDIF_IN1				1152
+MX53_PAD_GPIO_16__I2C3_SDA				1153
+MX53_PAD_GPIO_16__SJC_DE_B				1154
+MX53_PAD_GPIO_17__ESAI1_TX0				1155
+MX53_PAD_GPIO_17__GPIO7_12				1156
+MX53_PAD_GPIO_17__SDMA_EXT_EVENT_0			1157
+MX53_PAD_GPIO_17__GPC_PMIC_RDY				1158
+MX53_PAD_GPIO_17__RTC_CE_RTC_FSV_TRIG			1159
+MX53_PAD_GPIO_17__SPDIF_OUT1				1160
+MX53_PAD_GPIO_17__IPU_SNOOP2				1161
+MX53_PAD_GPIO_17__SJC_JTAG_ACT				1162
+MX53_PAD_GPIO_18__ESAI1_TX1				1163
+MX53_PAD_GPIO_18__GPIO7_13				1164
+MX53_PAD_GPIO_18__SDMA_EXT_EVENT_1			1165
+MX53_PAD_GPIO_18__OWIRE_LINE				1166
+MX53_PAD_GPIO_18__RTC_CE_RTC_ALARM2_TRIG		1167
+MX53_PAD_GPIO_18__CCM_ASRC_EXT_CLK			1168
+MX53_PAD_GPIO_18__ESDHC1_LCTL				1169
+MX53_PAD_GPIO_18__SRC_SYSTEM_RST			1170
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt
new file mode 100644
index 0000000..82b43f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt
@@ -0,0 +1,1628 @@
+* Freescale IMX6Q IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
+and usage.
+
+Required properties:
+- compatible: "fsl,imx6q-iomuxc"
+- fsl,pins: two integers array, represents a group of pins mux and config
+  setting. The format is fsl,pins = <PIN_FUNC_ID CONFIG>, PIN_FUNC_ID is a
+  pin working on a specific function, CONFIG is the pad setting value like
+  pull-up for this pin. Please refer to imx6q datasheet for the valid pad
+  config settings.
+
+CONFIG bits definition:
+PAD_CTL_HYS                     (1 << 16)
+PAD_CTL_PUS_100K_DOWN           (0 << 14)
+PAD_CTL_PUS_47K_UP              (1 << 14)
+PAD_CTL_PUS_100K_UP             (2 << 14)
+PAD_CTL_PUS_22K_UP              (3 << 14)
+PAD_CTL_PUE                     (1 << 13)
+PAD_CTL_PKE                     (1 << 12)
+PAD_CTL_ODE                     (1 << 11)
+PAD_CTL_SPEED_LOW               (1 << 6)
+PAD_CTL_SPEED_MED               (2 << 6)
+PAD_CTL_SPEED_HIGH              (3 << 6)
+PAD_CTL_DSE_DISABLE             (0 << 3)
+PAD_CTL_DSE_240ohm              (1 << 3)
+PAD_CTL_DSE_120ohm              (2 << 3)
+PAD_CTL_DSE_80ohm               (3 << 3)
+PAD_CTL_DSE_60ohm               (4 << 3)
+PAD_CTL_DSE_48ohm               (5 << 3)
+PAD_CTL_DSE_40ohm               (6 << 3)
+PAD_CTL_DSE_34ohm               (7 << 3)
+PAD_CTL_SRE_FAST                (1 << 0)
+PAD_CTL_SRE_SLOW                (0 << 0)
+
+See below for available PIN_FUNC_ID for imx6q:
+MX6Q_PAD_SD2_DAT1__USDHC2_DAT1			0
+MX6Q_PAD_SD2_DAT1__ECSPI5_SS0			1
+MX6Q_PAD_SD2_DAT1__WEIM_WEIM_CS_2		2
+MX6Q_PAD_SD2_DAT1__AUDMUX_AUD4_TXFS		3
+MX6Q_PAD_SD2_DAT1__KPP_COL_7			4
+MX6Q_PAD_SD2_DAT1__GPIO_1_14			5
+MX6Q_PAD_SD2_DAT1__CCM_WAIT			6
+MX6Q_PAD_SD2_DAT1__ANATOP_TESTO_0		7
+MX6Q_PAD_SD2_DAT2__USDHC2_DAT2			8
+MX6Q_PAD_SD2_DAT2__ECSPI5_SS1			9
+MX6Q_PAD_SD2_DAT2__WEIM_WEIM_CS_3		10
+MX6Q_PAD_SD2_DAT2__AUDMUX_AUD4_TXD		11
+MX6Q_PAD_SD2_DAT2__KPP_ROW_6			12
+MX6Q_PAD_SD2_DAT2__GPIO_1_13			13
+MX6Q_PAD_SD2_DAT2__CCM_STOP			14
+MX6Q_PAD_SD2_DAT2__ANATOP_TESTO_1		15
+MX6Q_PAD_SD2_DAT0__USDHC2_DAT0			16
+MX6Q_PAD_SD2_DAT0__ECSPI5_MISO			17
+MX6Q_PAD_SD2_DAT0__AUDMUX_AUD4_RXD		18
+MX6Q_PAD_SD2_DAT0__KPP_ROW_7			19
+MX6Q_PAD_SD2_DAT0__GPIO_1_15			20
+MX6Q_PAD_SD2_DAT0__DCIC2_DCIC_OUT		21
+MX6Q_PAD_SD2_DAT0__TESTO_2			22
+MX6Q_PAD_RGMII_TXC__USBOH3_H2_DATA		23
+MX6Q_PAD_RGMII_TXC__ENET_RGMII_TXC		24
+MX6Q_PAD_RGMII_TXC__SPDIF_SPDIF_EXTCLK		25
+MX6Q_PAD_RGMII_TXC__GPIO_6_19			26
+MX6Q_PAD_RGMII_TXC__MIPI_CORE_DPHY_IN_0		27
+MX6Q_PAD_RGMII_TXC__ANATOP_24M_OUT		28
+MX6Q_PAD_RGMII_TD0__MIPI_HSI_CRL_TX_RDY		29
+MX6Q_PAD_RGMII_TD0__ENET_RGMII_TD0		30
+MX6Q_PAD_RGMII_TD0__GPIO_6_20			31
+MX6Q_PAD_RGMII_TD0__MIPI_CORE_DPHY_IN_1		32
+MX6Q_PAD_RGMII_TD1__MIPI_HSI_CRL_RX_FLG		33
+MX6Q_PAD_RGMII_TD1__ENET_RGMII_TD1		34
+MX6Q_PAD_RGMII_TD1__GPIO_6_21			35
+MX6Q_PAD_RGMII_TD1__MIPI_CORE_DPHY_IN_2		36
+MX6Q_PAD_RGMII_TD1__CCM_PLL3_BYP		37
+MX6Q_PAD_RGMII_TD2__MIPI_HSI_CRL_RX_DTA		38
+MX6Q_PAD_RGMII_TD2__ENET_RGMII_TD2		39
+MX6Q_PAD_RGMII_TD2__GPIO_6_22			40
+MX6Q_PAD_RGMII_TD2__MIPI_CORE_DPHY_IN_3		41
+MX6Q_PAD_RGMII_TD2__CCM_PLL2_BYP		42
+MX6Q_PAD_RGMII_TD3__MIPI_HSI_CRL_RX_WAK		43
+MX6Q_PAD_RGMII_TD3__ENET_RGMII_TD3		44
+MX6Q_PAD_RGMII_TD3__GPIO_6_23			45
+MX6Q_PAD_RGMII_TD3__MIPI_CORE_DPHY_IN_4		46
+MX6Q_PAD_RGMII_RX_CTL__USBOH3_H3_DATA		47
+MX6Q_PAD_RGMII_RX_CTL__RGMII_RX_CTL		48
+MX6Q_PAD_RGMII_RX_CTL__GPIO_6_24		49
+MX6Q_PAD_RGMII_RX_CTL__MIPI_DPHY_IN_5		50
+MX6Q_PAD_RGMII_RD0__MIPI_HSI_CRL_RX_RDY		51
+MX6Q_PAD_RGMII_RD0__ENET_RGMII_RD0		52
+MX6Q_PAD_RGMII_RD0__GPIO_6_25			53
+MX6Q_PAD_RGMII_RD0__MIPI_CORE_DPHY_IN_6		54
+MX6Q_PAD_RGMII_TX_CTL__USBOH3_H2_STROBE		55
+MX6Q_PAD_RGMII_TX_CTL__RGMII_TX_CTL		56
+MX6Q_PAD_RGMII_TX_CTL__GPIO_6_26		57
+MX6Q_PAD_RGMII_TX_CTL__CORE_DPHY_IN_7		58
+MX6Q_PAD_RGMII_TX_CTL__ANATOP_REF_OUT		59
+MX6Q_PAD_RGMII_RD1__MIPI_HSI_CTRL_TX_FL		60
+MX6Q_PAD_RGMII_RD1__ENET_RGMII_RD1		61
+MX6Q_PAD_RGMII_RD1__GPIO_6_27			62
+MX6Q_PAD_RGMII_RD1__CORE_DPHY_TEST_IN_8		63
+MX6Q_PAD_RGMII_RD1__SJC_FAIL			64
+MX6Q_PAD_RGMII_RD2__MIPI_HSI_CRL_TX_DTA		65
+MX6Q_PAD_RGMII_RD2__ENET_RGMII_RD2		66
+MX6Q_PAD_RGMII_RD2__GPIO_6_28			67
+MX6Q_PAD_RGMII_RD2__MIPI_CORE_DPHY_IN_9		68
+MX6Q_PAD_RGMII_RD3__MIPI_HSI_CRL_TX_WAK		69
+MX6Q_PAD_RGMII_RD3__ENET_RGMII_RD3		70
+MX6Q_PAD_RGMII_RD3__GPIO_6_29			71
+MX6Q_PAD_RGMII_RD3__MIPI_CORE_DPHY_IN10		72
+MX6Q_PAD_RGMII_RXC__USBOH3_H3_STROBE		73
+MX6Q_PAD_RGMII_RXC__ENET_RGMII_RXC		74
+MX6Q_PAD_RGMII_RXC__GPIO_6_30			75
+MX6Q_PAD_RGMII_RXC__MIPI_CORE_DPHY_IN11		76
+MX6Q_PAD_EIM_A25__WEIM_WEIM_A_25		77
+MX6Q_PAD_EIM_A25__ECSPI4_SS1			78
+MX6Q_PAD_EIM_A25__ECSPI2_RDY			79
+MX6Q_PAD_EIM_A25__IPU1_DI1_PIN12		80
+MX6Q_PAD_EIM_A25__IPU1_DI0_D1_CS		81
+MX6Q_PAD_EIM_A25__GPIO_5_2			82
+MX6Q_PAD_EIM_A25__HDMI_TX_CEC_LINE		83
+MX6Q_PAD_EIM_A25__PL301_PER1_HBURST_0		84
+MX6Q_PAD_EIM_EB2__WEIM_WEIM_EB_2		85
+MX6Q_PAD_EIM_EB2__ECSPI1_SS0			86
+MX6Q_PAD_EIM_EB2__CCM_DI1_EXT_CLK		87
+MX6Q_PAD_EIM_EB2__IPU2_CSI1_D_19		88
+MX6Q_PAD_EIM_EB2__HDMI_TX_DDC_SCL		89
+MX6Q_PAD_EIM_EB2__GPIO_2_30			90
+MX6Q_PAD_EIM_EB2__I2C2_SCL			91
+MX6Q_PAD_EIM_EB2__SRC_BT_CFG_30			92
+MX6Q_PAD_EIM_D16__WEIM_WEIM_D_16		93
+MX6Q_PAD_EIM_D16__ECSPI1_SCLK			94
+MX6Q_PAD_EIM_D16__IPU1_DI0_PIN5			95
+MX6Q_PAD_EIM_D16__IPU2_CSI1_D_18		96
+MX6Q_PAD_EIM_D16__HDMI_TX_DDC_SDA		97
+MX6Q_PAD_EIM_D16__GPIO_3_16			98
+MX6Q_PAD_EIM_D16__I2C2_SDA			99
+MX6Q_PAD_EIM_D17__WEIM_WEIM_D_17		100
+MX6Q_PAD_EIM_D17__ECSPI1_MISO			101
+MX6Q_PAD_EIM_D17__IPU1_DI0_PIN6			102
+MX6Q_PAD_EIM_D17__IPU2_CSI1_PIXCLK		103
+MX6Q_PAD_EIM_D17__DCIC1_DCIC_OUT		104
+MX6Q_PAD_EIM_D17__GPIO_3_17			105
+MX6Q_PAD_EIM_D17__I2C3_SCL			106
+MX6Q_PAD_EIM_D17__PL301_PER1_HBURST_1		107
+MX6Q_PAD_EIM_D18__WEIM_WEIM_D_18		108
+MX6Q_PAD_EIM_D18__ECSPI1_MOSI			109
+MX6Q_PAD_EIM_D18__IPU1_DI0_PIN7			110
+MX6Q_PAD_EIM_D18__IPU2_CSI1_D_17		111
+MX6Q_PAD_EIM_D18__IPU1_DI1_D0_CS		112
+MX6Q_PAD_EIM_D18__GPIO_3_18			113
+MX6Q_PAD_EIM_D18__I2C3_SDA			114
+MX6Q_PAD_EIM_D18__PL301_PER1_HBURST_2		115
+MX6Q_PAD_EIM_D19__WEIM_WEIM_D_19		116
+MX6Q_PAD_EIM_D19__ECSPI1_SS1			117
+MX6Q_PAD_EIM_D19__IPU1_DI0_PIN8			118
+MX6Q_PAD_EIM_D19__IPU2_CSI1_D_16		119
+MX6Q_PAD_EIM_D19__UART1_CTS			120
+MX6Q_PAD_EIM_D19__GPIO_3_19			121
+MX6Q_PAD_EIM_D19__EPIT1_EPITO			122
+MX6Q_PAD_EIM_D19__PL301_PER1_HRESP		123
+MX6Q_PAD_EIM_D20__WEIM_WEIM_D_20		124
+MX6Q_PAD_EIM_D20__ECSPI4_SS0			125
+MX6Q_PAD_EIM_D20__IPU1_DI0_PIN16		126
+MX6Q_PAD_EIM_D20__IPU2_CSI1_D_15		127
+MX6Q_PAD_EIM_D20__UART1_RTS			128
+MX6Q_PAD_EIM_D20__GPIO_3_20			129
+MX6Q_PAD_EIM_D20__EPIT2_EPITO			130
+MX6Q_PAD_EIM_D21__WEIM_WEIM_D_21		131
+MX6Q_PAD_EIM_D21__ECSPI4_SCLK			132
+MX6Q_PAD_EIM_D21__IPU1_DI0_PIN17		133
+MX6Q_PAD_EIM_D21__IPU2_CSI1_D_11		134
+MX6Q_PAD_EIM_D21__USBOH3_USBOTG_OC		135
+MX6Q_PAD_EIM_D21__GPIO_3_21			136
+MX6Q_PAD_EIM_D21__I2C1_SCL			137
+MX6Q_PAD_EIM_D21__SPDIF_IN1			138
+MX6Q_PAD_EIM_D22__WEIM_WEIM_D_22		139
+MX6Q_PAD_EIM_D22__ECSPI4_MISO			140
+MX6Q_PAD_EIM_D22__IPU1_DI0_PIN1			141
+MX6Q_PAD_EIM_D22__IPU2_CSI1_D_10		142
+MX6Q_PAD_EIM_D22__USBOH3_USBOTG_PWR		143
+MX6Q_PAD_EIM_D22__GPIO_3_22			144
+MX6Q_PAD_EIM_D22__SPDIF_OUT1			145
+MX6Q_PAD_EIM_D22__PL301_PER1_HWRITE		146
+MX6Q_PAD_EIM_D23__WEIM_WEIM_D_23		147
+MX6Q_PAD_EIM_D23__IPU1_DI0_D0_CS		148
+MX6Q_PAD_EIM_D23__UART3_CTS			149
+MX6Q_PAD_EIM_D23__UART1_DCD			150
+MX6Q_PAD_EIM_D23__IPU2_CSI1_DATA_EN		151
+MX6Q_PAD_EIM_D23__GPIO_3_23			152
+MX6Q_PAD_EIM_D23__IPU1_DI1_PIN2			153
+MX6Q_PAD_EIM_D23__IPU1_DI1_PIN14		154
+MX6Q_PAD_EIM_EB3__WEIM_WEIM_EB_3		155
+MX6Q_PAD_EIM_EB3__ECSPI4_RDY			156
+MX6Q_PAD_EIM_EB3__UART3_RTS			157
+MX6Q_PAD_EIM_EB3__UART1_RI			158
+MX6Q_PAD_EIM_EB3__IPU2_CSI1_HSYNC		159
+MX6Q_PAD_EIM_EB3__GPIO_2_31			160
+MX6Q_PAD_EIM_EB3__IPU1_DI1_PIN3			161
+MX6Q_PAD_EIM_EB3__SRC_BT_CFG_31			162
+MX6Q_PAD_EIM_D24__WEIM_WEIM_D_24		163
+MX6Q_PAD_EIM_D24__ECSPI4_SS2			164
+MX6Q_PAD_EIM_D24__UART3_TXD			165
+MX6Q_PAD_EIM_D24__ECSPI1_SS2			166
+MX6Q_PAD_EIM_D24__ECSPI2_SS2			167
+MX6Q_PAD_EIM_D24__GPIO_3_24			168
+MX6Q_PAD_EIM_D24__AUDMUX_AUD5_RXFS		169
+MX6Q_PAD_EIM_D24__UART1_DTR			170
+MX6Q_PAD_EIM_D25__WEIM_WEIM_D_25		171
+MX6Q_PAD_EIM_D25__ECSPI4_SS3			172
+MX6Q_PAD_EIM_D25__UART3_RXD			173
+MX6Q_PAD_EIM_D25__ECSPI1_SS3			174
+MX6Q_PAD_EIM_D25__ECSPI2_SS3			175
+MX6Q_PAD_EIM_D25__GPIO_3_25			176
+MX6Q_PAD_EIM_D25__AUDMUX_AUD5_RXC		177
+MX6Q_PAD_EIM_D25__UART1_DSR			178
+MX6Q_PAD_EIM_D26__WEIM_WEIM_D_26		179
+MX6Q_PAD_EIM_D26__IPU1_DI1_PIN11		180
+MX6Q_PAD_EIM_D26__IPU1_CSI0_D_1			181
+MX6Q_PAD_EIM_D26__IPU2_CSI1_D_14		182
+MX6Q_PAD_EIM_D26__UART2_TXD			183
+MX6Q_PAD_EIM_D26__GPIO_3_26			184
+MX6Q_PAD_EIM_D26__IPU1_SISG_2			185
+MX6Q_PAD_EIM_D26__IPU1_DISP1_DAT_22		186
+MX6Q_PAD_EIM_D27__WEIM_WEIM_D_27		187
+MX6Q_PAD_EIM_D27__IPU1_DI1_PIN13		188
+MX6Q_PAD_EIM_D27__IPU1_CSI0_D_0			189
+MX6Q_PAD_EIM_D27__IPU2_CSI1_D_13		190
+MX6Q_PAD_EIM_D27__UART2_RXD			191
+MX6Q_PAD_EIM_D27__GPIO_3_27			192
+MX6Q_PAD_EIM_D27__IPU1_SISG_3			193
+MX6Q_PAD_EIM_D27__IPU1_DISP1_DAT_23		194
+MX6Q_PAD_EIM_D28__WEIM_WEIM_D_28		195
+MX6Q_PAD_EIM_D28__I2C1_SDA			196
+MX6Q_PAD_EIM_D28__ECSPI4_MOSI			197
+MX6Q_PAD_EIM_D28__IPU2_CSI1_D_12		198
+MX6Q_PAD_EIM_D28__UART2_CTS			199
+MX6Q_PAD_EIM_D28__GPIO_3_28			200
+MX6Q_PAD_EIM_D28__IPU1_EXT_TRIG			201
+MX6Q_PAD_EIM_D28__IPU1_DI0_PIN13		202
+MX6Q_PAD_EIM_D29__WEIM_WEIM_D_29		203
+MX6Q_PAD_EIM_D29__IPU1_DI1_PIN15		204
+MX6Q_PAD_EIM_D29__ECSPI4_SS0			205
+MX6Q_PAD_EIM_D29__UART2_RTS			206
+MX6Q_PAD_EIM_D29__GPIO_3_29			207
+MX6Q_PAD_EIM_D29__IPU2_CSI1_VSYNC		208
+MX6Q_PAD_EIM_D29__IPU1_DI0_PIN14		209
+MX6Q_PAD_EIM_D30__WEIM_WEIM_D_30		210
+MX6Q_PAD_EIM_D30__IPU1_DISP1_DAT_21		211
+MX6Q_PAD_EIM_D30__IPU1_DI0_PIN11		212
+MX6Q_PAD_EIM_D30__IPU1_CSI0_D_3			213
+MX6Q_PAD_EIM_D30__UART3_CTS			214
+MX6Q_PAD_EIM_D30__GPIO_3_30			215
+MX6Q_PAD_EIM_D30__USBOH3_USBH1_OC		216
+MX6Q_PAD_EIM_D30__PL301_PER1_HPROT_0		217
+MX6Q_PAD_EIM_D31__WEIM_WEIM_D_31		218
+MX6Q_PAD_EIM_D31__IPU1_DISP1_DAT_20		219
+MX6Q_PAD_EIM_D31__IPU1_DI0_PIN12		220
+MX6Q_PAD_EIM_D31__IPU1_CSI0_D_2			221
+MX6Q_PAD_EIM_D31__UART3_RTS			222
+MX6Q_PAD_EIM_D31__GPIO_3_31			223
+MX6Q_PAD_EIM_D31__USBOH3_USBH1_PWR		224
+MX6Q_PAD_EIM_D31__PL301_PER1_HPROT_1		225
+MX6Q_PAD_EIM_A24__WEIM_WEIM_A_24		226
+MX6Q_PAD_EIM_A24__IPU1_DISP1_DAT_19		227
+MX6Q_PAD_EIM_A24__IPU2_CSI1_D_19		228
+MX6Q_PAD_EIM_A24__IPU2_SISG_2			229
+MX6Q_PAD_EIM_A24__IPU1_SISG_2			230
+MX6Q_PAD_EIM_A24__GPIO_5_4			231
+MX6Q_PAD_EIM_A24__PL301_PER1_HPROT_2		232
+MX6Q_PAD_EIM_A24__SRC_BT_CFG_24			233
+MX6Q_PAD_EIM_A23__WEIM_WEIM_A_23		234
+MX6Q_PAD_EIM_A23__IPU1_DISP1_DAT_18		235
+MX6Q_PAD_EIM_A23__IPU2_CSI1_D_18		236
+MX6Q_PAD_EIM_A23__IPU2_SISG_3			237
+MX6Q_PAD_EIM_A23__IPU1_SISG_3			238
+MX6Q_PAD_EIM_A23__GPIO_6_6			239
+MX6Q_PAD_EIM_A23__PL301_PER1_HPROT_3		240
+MX6Q_PAD_EIM_A23__SRC_BT_CFG_23			241
+MX6Q_PAD_EIM_A22__WEIM_WEIM_A_22		242
+MX6Q_PAD_EIM_A22__IPU1_DISP1_DAT_17		243
+MX6Q_PAD_EIM_A22__IPU2_CSI1_D_17		244
+MX6Q_PAD_EIM_A22__GPIO_2_16			245
+MX6Q_PAD_EIM_A22__TPSMP_HDATA_0			246
+MX6Q_PAD_EIM_A22__SRC_BT_CFG_22			247
+MX6Q_PAD_EIM_A21__WEIM_WEIM_A_21		248
+MX6Q_PAD_EIM_A21__IPU1_DISP1_DAT_16		249
+MX6Q_PAD_EIM_A21__IPU2_CSI1_D_16		250
+MX6Q_PAD_EIM_A21__RESERVED_RESERVED		251
+MX6Q_PAD_EIM_A21__MIPI_CORE_DPHY_OUT_18		252
+MX6Q_PAD_EIM_A21__GPIO_2_17			253
+MX6Q_PAD_EIM_A21__TPSMP_HDATA_1			254
+MX6Q_PAD_EIM_A21__SRC_BT_CFG_21			255
+MX6Q_PAD_EIM_A20__WEIM_WEIM_A_20		256
+MX6Q_PAD_EIM_A20__IPU1_DISP1_DAT_15		257
+MX6Q_PAD_EIM_A20__IPU2_CSI1_D_15		258
+MX6Q_PAD_EIM_A20__RESERVED_RESERVED		259
+MX6Q_PAD_EIM_A20__MIPI_CORE_DPHY_OUT_19		260
+MX6Q_PAD_EIM_A20__GPIO_2_18			261
+MX6Q_PAD_EIM_A20__TPSMP_HDATA_2			262
+MX6Q_PAD_EIM_A20__SRC_BT_CFG_20			263
+MX6Q_PAD_EIM_A19__WEIM_WEIM_A_19		264
+MX6Q_PAD_EIM_A19__IPU1_DISP1_DAT_14		265
+MX6Q_PAD_EIM_A19__IPU2_CSI1_D_14		266
+MX6Q_PAD_EIM_A19__RESERVED_RESERVED		267
+MX6Q_PAD_EIM_A19__MIPI_CORE_DPHY_OUT_20		268
+MX6Q_PAD_EIM_A19__GPIO_2_19			269
+MX6Q_PAD_EIM_A19__TPSMP_HDATA_3			270
+MX6Q_PAD_EIM_A19__SRC_BT_CFG_19			271
+MX6Q_PAD_EIM_A18__WEIM_WEIM_A_18		272
+MX6Q_PAD_EIM_A18__IPU1_DISP1_DAT_13		273
+MX6Q_PAD_EIM_A18__IPU2_CSI1_D_13		274
+MX6Q_PAD_EIM_A18__RESERVED_RESERVED		275
+MX6Q_PAD_EIM_A18__MIPI_CORE_DPHY_OUT_21		276
+MX6Q_PAD_EIM_A18__GPIO_2_20			277
+MX6Q_PAD_EIM_A18__TPSMP_HDATA_4			278
+MX6Q_PAD_EIM_A18__SRC_BT_CFG_18			279
+MX6Q_PAD_EIM_A17__WEIM_WEIM_A_17		280
+MX6Q_PAD_EIM_A17__IPU1_DISP1_DAT_12		281
+MX6Q_PAD_EIM_A17__IPU2_CSI1_D_12		282
+MX6Q_PAD_EIM_A17__RESERVED_RESERVED		283
+MX6Q_PAD_EIM_A17__MIPI_CORE_DPHY_OUT_22		284
+MX6Q_PAD_EIM_A17__GPIO_2_21			285
+MX6Q_PAD_EIM_A17__TPSMP_HDATA_5			286
+MX6Q_PAD_EIM_A17__SRC_BT_CFG_17			287
+MX6Q_PAD_EIM_A16__WEIM_WEIM_A_16		288
+MX6Q_PAD_EIM_A16__IPU1_DI1_DISP_CLK		289
+MX6Q_PAD_EIM_A16__IPU2_CSI1_PIXCLK		290
+MX6Q_PAD_EIM_A16__MIPI_CORE_DPHY_OUT_23		291
+MX6Q_PAD_EIM_A16__GPIO_2_22			292
+MX6Q_PAD_EIM_A16__TPSMP_HDATA_6			293
+MX6Q_PAD_EIM_A16__SRC_BT_CFG_16			294
+MX6Q_PAD_EIM_CS0__WEIM_WEIM_CS_0		295
+MX6Q_PAD_EIM_CS0__IPU1_DI1_PIN5			296
+MX6Q_PAD_EIM_CS0__ECSPI2_SCLK			297
+MX6Q_PAD_EIM_CS0__MIPI_CORE_DPHY_OUT_24		298
+MX6Q_PAD_EIM_CS0__GPIO_2_23			299
+MX6Q_PAD_EIM_CS0__TPSMP_HDATA_7			300
+MX6Q_PAD_EIM_CS1__WEIM_WEIM_CS_1		301
+MX6Q_PAD_EIM_CS1__IPU1_DI1_PIN6			302
+MX6Q_PAD_EIM_CS1__ECSPI2_MOSI			303
+MX6Q_PAD_EIM_CS1__MIPI_CORE_DPHY_OUT_25		304
+MX6Q_PAD_EIM_CS1__GPIO_2_24			305
+MX6Q_PAD_EIM_CS1__TPSMP_HDATA_8			306
+MX6Q_PAD_EIM_OE__WEIM_WEIM_OE			307
+MX6Q_PAD_EIM_OE__IPU1_DI1_PIN7			308
+MX6Q_PAD_EIM_OE__ECSPI2_MISO			309
+MX6Q_PAD_EIM_OE__MIPI_CORE_DPHY_OUT_26		310
+MX6Q_PAD_EIM_OE__GPIO_2_25			311
+MX6Q_PAD_EIM_OE__TPSMP_HDATA_9			312
+MX6Q_PAD_EIM_RW__WEIM_WEIM_RW			313
+MX6Q_PAD_EIM_RW__IPU1_DI1_PIN8			314
+MX6Q_PAD_EIM_RW__ECSPI2_SS0			315
+MX6Q_PAD_EIM_RW__MIPI_CORE_DPHY_OUT_27		316
+MX6Q_PAD_EIM_RW__GPIO_2_26			317
+MX6Q_PAD_EIM_RW__TPSMP_HDATA_10			318
+MX6Q_PAD_EIM_RW__SRC_BT_CFG_29			319
+MX6Q_PAD_EIM_LBA__WEIM_WEIM_LBA			320
+MX6Q_PAD_EIM_LBA__IPU1_DI1_PIN17		321
+MX6Q_PAD_EIM_LBA__ECSPI2_SS1			322
+MX6Q_PAD_EIM_LBA__GPIO_2_27			323
+MX6Q_PAD_EIM_LBA__TPSMP_HDATA_11		324
+MX6Q_PAD_EIM_LBA__SRC_BT_CFG_26			325
+MX6Q_PAD_EIM_EB0__WEIM_WEIM_EB_0		326
+MX6Q_PAD_EIM_EB0__IPU1_DISP1_DAT_11		327
+MX6Q_PAD_EIM_EB0__IPU2_CSI1_D_11		328
+MX6Q_PAD_EIM_EB0__MIPI_CORE_DPHY_OUT_0		329
+MX6Q_PAD_EIM_EB0__CCM_PMIC_RDY			330
+MX6Q_PAD_EIM_EB0__GPIO_2_28			331
+MX6Q_PAD_EIM_EB0__TPSMP_HDATA_12		332
+MX6Q_PAD_EIM_EB0__SRC_BT_CFG_27			333
+MX6Q_PAD_EIM_EB1__WEIM_WEIM_EB_1		334
+MX6Q_PAD_EIM_EB1__IPU1_DISP1_DAT_10		335
+MX6Q_PAD_EIM_EB1__IPU2_CSI1_D_10		336
+MX6Q_PAD_EIM_EB1__MIPI_CORE_DPHY__OUT_1		337
+MX6Q_PAD_EIM_EB1__GPIO_2_29			338
+MX6Q_PAD_EIM_EB1__TPSMP_HDATA_13		339
+MX6Q_PAD_EIM_EB1__SRC_BT_CFG_28			340
+MX6Q_PAD_EIM_DA0__WEIM_WEIM_DA_A_0		341
+MX6Q_PAD_EIM_DA0__IPU1_DISP1_DAT_9		342
+MX6Q_PAD_EIM_DA0__IPU2_CSI1_D_9			343
+MX6Q_PAD_EIM_DA0__MIPI_CORE_DPHY__OUT_2		344
+MX6Q_PAD_EIM_DA0__GPIO_3_0			345
+MX6Q_PAD_EIM_DA0__TPSMP_HDATA_14		346
+MX6Q_PAD_EIM_DA0__SRC_BT_CFG_0			347
+MX6Q_PAD_EIM_DA1__WEIM_WEIM_DA_A_1		348
+MX6Q_PAD_EIM_DA1__IPU1_DISP1_DAT_8		349
+MX6Q_PAD_EIM_DA1__IPU2_CSI1_D_8			350
+MX6Q_PAD_EIM_DA1__MIPI_CORE_DPHY_OUT_3		351
+MX6Q_PAD_EIM_DA1__USBPHY1_TX_LS_MODE		352
+MX6Q_PAD_EIM_DA1__GPIO_3_1			353
+MX6Q_PAD_EIM_DA1__TPSMP_HDATA_15		354
+MX6Q_PAD_EIM_DA1__SRC_BT_CFG_1			355
+MX6Q_PAD_EIM_DA2__WEIM_WEIM_DA_A_2		356
+MX6Q_PAD_EIM_DA2__IPU1_DISP1_DAT_7		357
+MX6Q_PAD_EIM_DA2__IPU2_CSI1_D_7			358
+MX6Q_PAD_EIM_DA2__MIPI_CORE_DPHY_OUT_4		359
+MX6Q_PAD_EIM_DA2__USBPHY1_TX_HS_MODE		360
+MX6Q_PAD_EIM_DA2__GPIO_3_2			361
+MX6Q_PAD_EIM_DA2__TPSMP_HDATA_16		362
+MX6Q_PAD_EIM_DA2__SRC_BT_CFG_2			363
+MX6Q_PAD_EIM_DA3__WEIM_WEIM_DA_A_3		364
+MX6Q_PAD_EIM_DA3__IPU1_DISP1_DAT_6		365
+MX6Q_PAD_EIM_DA3__IPU2_CSI1_D_6			366
+MX6Q_PAD_EIM_DA3__MIPI_CORE_DPHY_OUT_5		367
+MX6Q_PAD_EIM_DA3__USBPHY1_TX_HIZ		368
+MX6Q_PAD_EIM_DA3__GPIO_3_3			369
+MX6Q_PAD_EIM_DA3__TPSMP_HDATA_17		370
+MX6Q_PAD_EIM_DA3__SRC_BT_CFG_3			371
+MX6Q_PAD_EIM_DA4__WEIM_WEIM_DA_A_4		372
+MX6Q_PAD_EIM_DA4__IPU1_DISP1_DAT_5		373
+MX6Q_PAD_EIM_DA4__IPU2_CSI1_D_5			374
+MX6Q_PAD_EIM_DA4__MIPI_CORE_DPHY_OUT_6		375
+MX6Q_PAD_EIM_DA4__ANATOP_USBPHY1_TX_EN		376
+MX6Q_PAD_EIM_DA4__GPIO_3_4			377
+MX6Q_PAD_EIM_DA4__TPSMP_HDATA_18		378
+MX6Q_PAD_EIM_DA4__SRC_BT_CFG_4			379
+MX6Q_PAD_EIM_DA5__WEIM_WEIM_DA_A_5		380
+MX6Q_PAD_EIM_DA5__IPU1_DISP1_DAT_4		381
+MX6Q_PAD_EIM_DA5__IPU2_CSI1_D_4			382
+MX6Q_PAD_EIM_DA5__MIPI_CORE_DPHY_OUT_7		383
+MX6Q_PAD_EIM_DA5__ANATOP_USBPHY1_TX_DP		384
+MX6Q_PAD_EIM_DA5__GPIO_3_5			385
+MX6Q_PAD_EIM_DA5__TPSMP_HDATA_19		386
+MX6Q_PAD_EIM_DA5__SRC_BT_CFG_5			387
+MX6Q_PAD_EIM_DA6__WEIM_WEIM_DA_A_6		388
+MX6Q_PAD_EIM_DA6__IPU1_DISP1_DAT_3		389
+MX6Q_PAD_EIM_DA6__IPU2_CSI1_D_3			390
+MX6Q_PAD_EIM_DA6__MIPI_CORE_DPHY_OUT_8		391
+MX6Q_PAD_EIM_DA6__ANATOP_USBPHY1_TX_DN		392
+MX6Q_PAD_EIM_DA6__GPIO_3_6			393
+MX6Q_PAD_EIM_DA6__TPSMP_HDATA_20		394
+MX6Q_PAD_EIM_DA6__SRC_BT_CFG_6			395
+MX6Q_PAD_EIM_DA7__WEIM_WEIM_DA_A_7		396
+MX6Q_PAD_EIM_DA7__IPU1_DISP1_DAT_2		397
+MX6Q_PAD_EIM_DA7__IPU2_CSI1_D_2			398
+MX6Q_PAD_EIM_DA7__MIPI_CORE_DPHY_OUT_9		399
+MX6Q_PAD_EIM_DA7__GPIO_3_7			400
+MX6Q_PAD_EIM_DA7__TPSMP_HDATA_21		401
+MX6Q_PAD_EIM_DA7__SRC_BT_CFG_7			402
+MX6Q_PAD_EIM_DA8__WEIM_WEIM_DA_A_8		403
+MX6Q_PAD_EIM_DA8__IPU1_DISP1_DAT_1		404
+MX6Q_PAD_EIM_DA8__IPU2_CSI1_D_1			405
+MX6Q_PAD_EIM_DA8__MIPI_CORE_DPHY_OUT_10		406
+MX6Q_PAD_EIM_DA8__GPIO_3_8			407
+MX6Q_PAD_EIM_DA8__TPSMP_HDATA_22		408
+MX6Q_PAD_EIM_DA8__SRC_BT_CFG_8			409
+MX6Q_PAD_EIM_DA9__WEIM_WEIM_DA_A_9		410
+MX6Q_PAD_EIM_DA9__IPU1_DISP1_DAT_0		411
+MX6Q_PAD_EIM_DA9__IPU2_CSI1_D_0			412
+MX6Q_PAD_EIM_DA9__MIPI_CORE_DPHY_OUT_11		413
+MX6Q_PAD_EIM_DA9__GPIO_3_9			414
+MX6Q_PAD_EIM_DA9__TPSMP_HDATA_23		415
+MX6Q_PAD_EIM_DA9__SRC_BT_CFG_9			416
+MX6Q_PAD_EIM_DA10__WEIM_WEIM_DA_A_10		417
+MX6Q_PAD_EIM_DA10__IPU1_DI1_PIN15		418
+MX6Q_PAD_EIM_DA10__IPU2_CSI1_DATA_EN		419
+MX6Q_PAD_EIM_DA10__MIPI_CORE_DPHY_OUT12		420
+MX6Q_PAD_EIM_DA10__GPIO_3_10			421
+MX6Q_PAD_EIM_DA10__TPSMP_HDATA_24		422
+MX6Q_PAD_EIM_DA10__SRC_BT_CFG_10		423
+MX6Q_PAD_EIM_DA11__WEIM_WEIM_DA_A_11		424
+MX6Q_PAD_EIM_DA11__IPU1_DI1_PIN2		425
+MX6Q_PAD_EIM_DA11__IPU2_CSI1_HSYNC		426
+MX6Q_PAD_EIM_DA11__MIPI_CORE_DPHY_OUT13		427
+MX6Q_PAD_EIM_DA11__SDMA_DBG_EVT_CHN_6		428
+MX6Q_PAD_EIM_DA11__GPIO_3_11			429
+MX6Q_PAD_EIM_DA11__TPSMP_HDATA_25		430
+MX6Q_PAD_EIM_DA11__SRC_BT_CFG_11		431
+MX6Q_PAD_EIM_DA12__WEIM_WEIM_DA_A_12		432
+MX6Q_PAD_EIM_DA12__IPU1_DI1_PIN3		433
+MX6Q_PAD_EIM_DA12__IPU2_CSI1_VSYNC		434
+MX6Q_PAD_EIM_DA12__MIPI_CORE_DPHY_OUT14		435
+MX6Q_PAD_EIM_DA12__SDMA_DEBUG_EVT_CHN_3		436
+MX6Q_PAD_EIM_DA12__GPIO_3_12			437
+MX6Q_PAD_EIM_DA12__TPSMP_HDATA_26		438
+MX6Q_PAD_EIM_DA12__SRC_BT_CFG_12		439
+MX6Q_PAD_EIM_DA13__WEIM_WEIM_DA_A_13		440
+MX6Q_PAD_EIM_DA13__IPU1_DI1_D0_CS		441
+MX6Q_PAD_EIM_DA13__CCM_DI1_EXT_CLK		442
+MX6Q_PAD_EIM_DA13__MIPI_CORE_DPHY_OUT15		443
+MX6Q_PAD_EIM_DA13__SDMA_DEBUG_EVT_CHN_4		444
+MX6Q_PAD_EIM_DA13__GPIO_3_13			445
+MX6Q_PAD_EIM_DA13__TPSMP_HDATA_27		446
+MX6Q_PAD_EIM_DA13__SRC_BT_CFG_13		447
+MX6Q_PAD_EIM_DA14__WEIM_WEIM_DA_A_14		448
+MX6Q_PAD_EIM_DA14__IPU1_DI1_D1_CS		449
+MX6Q_PAD_EIM_DA14__CCM_DI0_EXT_CLK		450
+MX6Q_PAD_EIM_DA14__MIPI_CORE_DPHY_OUT16		451
+MX6Q_PAD_EIM_DA14__SDMA_DEBUG_EVT_CHN_5		452
+MX6Q_PAD_EIM_DA14__GPIO_3_14			453
+MX6Q_PAD_EIM_DA14__TPSMP_HDATA_28		454
+MX6Q_PAD_EIM_DA14__SRC_BT_CFG_14		455
+MX6Q_PAD_EIM_DA15__WEIM_WEIM_DA_A_15		456
+MX6Q_PAD_EIM_DA15__IPU1_DI1_PIN1		457
+MX6Q_PAD_EIM_DA15__IPU1_DI1_PIN4		458
+MX6Q_PAD_EIM_DA15__MIPI_CORE_DPHY_OUT17		459
+MX6Q_PAD_EIM_DA15__GPIO_3_15			460
+MX6Q_PAD_EIM_DA15__TPSMP_HDATA_29		461
+MX6Q_PAD_EIM_DA15__SRC_BT_CFG_15		462
+MX6Q_PAD_EIM_WAIT__WEIM_WEIM_WAIT		463
+MX6Q_PAD_EIM_WAIT__WEIM_WEIM_DTACK_B		464
+MX6Q_PAD_EIM_WAIT__GPIO_5_0			465
+MX6Q_PAD_EIM_WAIT__TPSMP_HDATA_30		466
+MX6Q_PAD_EIM_WAIT__SRC_BT_CFG_25		467
+MX6Q_PAD_EIM_BCLK__WEIM_WEIM_BCLK		468
+MX6Q_PAD_EIM_BCLK__IPU1_DI1_PIN16		469
+MX6Q_PAD_EIM_BCLK__GPIO_6_31			470
+MX6Q_PAD_EIM_BCLK__TPSMP_HDATA_31		471
+MX6Q_PAD_DI0_DISP_CLK__IPU1_DI0_DSP_CLK		472
+MX6Q_PAD_DI0_DISP_CLK__IPU2_DI0_DSP_CLK		473
+MX6Q_PAD_DI0_DISP_CLK__MIPI_CR_DPY_OT28		474
+MX6Q_PAD_DI0_DISP_CLK__SDMA_DBG_CR_STA0		475
+MX6Q_PAD_DI0_DISP_CLK__GPIO_4_16		476
+MX6Q_PAD_DI0_DISP_CLK__MMDC_DEBUG_0		477
+MX6Q_PAD_DI0_PIN15__IPU1_DI0_PIN15		478
+MX6Q_PAD_DI0_PIN15__IPU2_DI0_PIN15		479
+MX6Q_PAD_DI0_PIN15__AUDMUX_AUD6_TXC		480
+MX6Q_PAD_DI0_PIN15__MIPI_CR_DPHY_OUT_29		481
+MX6Q_PAD_DI0_PIN15__SDMA_DBG_CORE_STA_1		482
+MX6Q_PAD_DI0_PIN15__GPIO_4_17			483
+MX6Q_PAD_DI0_PIN15__MMDC_MMDC_DEBUG_1		484
+MX6Q_PAD_DI0_PIN2__IPU1_DI0_PIN2		485
+MX6Q_PAD_DI0_PIN2__IPU2_DI0_PIN2		486
+MX6Q_PAD_DI0_PIN2__AUDMUX_AUD6_TXD		487
+MX6Q_PAD_DI0_PIN2__MIPI_CR_DPHY_OUT_30		488
+MX6Q_PAD_DI0_PIN2__SDMA_DBG_CORE_STA_2		489
+MX6Q_PAD_DI0_PIN2__GPIO_4_18			490
+MX6Q_PAD_DI0_PIN2__MMDC_DEBUG_2			491
+MX6Q_PAD_DI0_PIN2__PL301_PER1_HADDR_9		492
+MX6Q_PAD_DI0_PIN3__IPU1_DI0_PIN3		493
+MX6Q_PAD_DI0_PIN3__IPU2_DI0_PIN3		494
+MX6Q_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS		495
+MX6Q_PAD_DI0_PIN3__MIPI_CORE_DPHY_OUT31		496
+MX6Q_PAD_DI0_PIN3__SDMA_DBG_CORE_STA_3		497
+MX6Q_PAD_DI0_PIN3__GPIO_4_19			498
+MX6Q_PAD_DI0_PIN3__MMDC_MMDC_DEBUG_3		499
+MX6Q_PAD_DI0_PIN3__PL301_PER1_HADDR_10		500
+MX6Q_PAD_DI0_PIN4__IPU1_DI0_PIN4		501
+MX6Q_PAD_DI0_PIN4__IPU2_DI0_PIN4		502
+MX6Q_PAD_DI0_PIN4__AUDMUX_AUD6_RXD		503
+MX6Q_PAD_DI0_PIN4__USDHC1_WP			504
+MX6Q_PAD_DI0_PIN4__SDMA_DEBUG_YIELD		505
+MX6Q_PAD_DI0_PIN4__GPIO_4_20			506
+MX6Q_PAD_DI0_PIN4__MMDC_MMDC_DEBUG_4		507
+MX6Q_PAD_DI0_PIN4__PL301_PER1_HADDR_11		508
+MX6Q_PAD_DISP0_DAT0__IPU1_DISP0_DAT_0		509
+MX6Q_PAD_DISP0_DAT0__IPU2_DISP0_DAT_0		510
+MX6Q_PAD_DISP0_DAT0__ECSPI3_SCLK		511
+MX6Q_PAD_DISP0_DAT0__USDHC1_USDHC_DBG_0		512
+MX6Q_PAD_DISP0_DAT0__SDMA_DBG_CORE_RUN		513
+MX6Q_PAD_DISP0_DAT0__GPIO_4_21			514
+MX6Q_PAD_DISP0_DAT0__MMDC_MMDC_DEBUG_5		515
+MX6Q_PAD_DISP0_DAT1__IPU1_DISP0_DAT_1		516
+MX6Q_PAD_DISP0_DAT1__IPU2_DISP0_DAT_1		517
+MX6Q_PAD_DISP0_DAT1__ECSPI3_MOSI		518
+MX6Q_PAD_DISP0_DAT1__USDHC1_USDHC_DBG_1		519
+MX6Q_PAD_DISP0_DAT1__SDMA_DBG_EVT_CHNSL		520
+MX6Q_PAD_DISP0_DAT1__GPIO_4_22			521
+MX6Q_PAD_DISP0_DAT1__MMDC_DEBUG_6		522
+MX6Q_PAD_DISP0_DAT1__PL301_PER1_HADR_12		523
+MX6Q_PAD_DISP0_DAT2__IPU1_DISP0_DAT_2		524
+MX6Q_PAD_DISP0_DAT2__IPU2_DISP0_DAT_2		525
+MX6Q_PAD_DISP0_DAT2__ECSPI3_MISO		526
+MX6Q_PAD_DISP0_DAT2__USDHC1_USDHC_DBG_2		527
+MX6Q_PAD_DISP0_DAT2__SDMA_DEBUG_MODE		528
+MX6Q_PAD_DISP0_DAT2__GPIO_4_23			529
+MX6Q_PAD_DISP0_DAT2__MMDC_DEBUG_7		530
+MX6Q_PAD_DISP0_DAT2__PL301_PER1_HADR_13		531
+MX6Q_PAD_DISP0_DAT3__IPU1_DISP0_DAT_3		532
+MX6Q_PAD_DISP0_DAT3__IPU2_DISP0_DAT_3		533
+MX6Q_PAD_DISP0_DAT3__ECSPI3_SS0			534
+MX6Q_PAD_DISP0_DAT3__USDHC1_USDHC_DBG_3		535
+MX6Q_PAD_DISP0_DAT3__SDMA_DBG_BUS_ERROR		536
+MX6Q_PAD_DISP0_DAT3__GPIO_4_24			537
+MX6Q_PAD_DISP0_DAT3__MMDC_MMDC_DBG_8		538
+MX6Q_PAD_DISP0_DAT3__PL301_PER1_HADR_14		539
+MX6Q_PAD_DISP0_DAT4__IPU1_DISP0_DAT_4		540
+MX6Q_PAD_DISP0_DAT4__IPU2_DISP0_DAT_4		541
+MX6Q_PAD_DISP0_DAT4__ECSPI3_SS1			542
+MX6Q_PAD_DISP0_DAT4__USDHC1_USDHC_DBG_4		543
+MX6Q_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB		544
+MX6Q_PAD_DISP0_DAT4__GPIO_4_25			545
+MX6Q_PAD_DISP0_DAT4__MMDC_MMDC_DEBUG_9		546
+MX6Q_PAD_DISP0_DAT4__PL301_PER1_HADR_15		547
+MX6Q_PAD_DISP0_DAT5__IPU1_DISP0_DAT_5		548
+MX6Q_PAD_DISP0_DAT5__IPU2_DISP0_DAT_5		549
+MX6Q_PAD_DISP0_DAT5__ECSPI3_SS2			550
+MX6Q_PAD_DISP0_DAT5__AUDMUX_AUD6_RXFS		551
+MX6Q_PAD_DISP0_DAT5__SDMA_DBG_MCH_DMBUS		552
+MX6Q_PAD_DISP0_DAT5__GPIO_4_26			553
+MX6Q_PAD_DISP0_DAT5__MMDC_DEBUG_10		554
+MX6Q_PAD_DISP0_DAT5__PL301_PER1_HADR_16		555
+MX6Q_PAD_DISP0_DAT6__IPU1_DISP0_DAT_6		556
+MX6Q_PAD_DISP0_DAT6__IPU2_DISP0_DAT_6		557
+MX6Q_PAD_DISP0_DAT6__ECSPI3_SS3			558
+MX6Q_PAD_DISP0_DAT6__AUDMUX_AUD6_RXC		559
+MX6Q_PAD_DISP0_DAT6__SDMA_DBG_RTBUF_WRT		560
+MX6Q_PAD_DISP0_DAT6__GPIO_4_27			561
+MX6Q_PAD_DISP0_DAT6__MMDC_DEBUG_11		562
+MX6Q_PAD_DISP0_DAT6__PL301_PER1_HADR_17		563
+MX6Q_PAD_DISP0_DAT7__IPU1_DISP0_DAT_7		564
+MX6Q_PAD_DISP0_DAT7__IPU2_DISP0_DAT_7		565
+MX6Q_PAD_DISP0_DAT7__ECSPI3_RDY			566
+MX6Q_PAD_DISP0_DAT7__USDHC1_USDHC_DBG_5		567
+MX6Q_PAD_DISP0_DAT7__SDMA_DBG_EVT_CHN_0		568
+MX6Q_PAD_DISP0_DAT7__GPIO_4_28			569
+MX6Q_PAD_DISP0_DAT7__MMDC_DEBUG_12		570
+MX6Q_PAD_DISP0_DAT7__PL301_PER1_HADR_18		571
+MX6Q_PAD_DISP0_DAT8__IPU1_DISP0_DAT_8		572
+MX6Q_PAD_DISP0_DAT8__IPU2_DISP0_DAT_8		573
+MX6Q_PAD_DISP0_DAT8__PWM1_PWMO			574
+MX6Q_PAD_DISP0_DAT8__WDOG1_WDOG_B		575
+MX6Q_PAD_DISP0_DAT8__SDMA_DBG_EVT_CHN_1		576
+MX6Q_PAD_DISP0_DAT8__GPIO_4_29			577
+MX6Q_PAD_DISP0_DAT8__MMDC_DEBUG_13		578
+MX6Q_PAD_DISP0_DAT8__PL301_PER1_HADR_19		579
+MX6Q_PAD_DISP0_DAT9__IPU1_DISP0_DAT_9		580
+MX6Q_PAD_DISP0_DAT9__IPU2_DISP0_DAT_9		581
+MX6Q_PAD_DISP0_DAT9__PWM2_PWMO			582
+MX6Q_PAD_DISP0_DAT9__WDOG2_WDOG_B		583
+MX6Q_PAD_DISP0_DAT9__SDMA_DBG_EVT_CHN_2		584
+MX6Q_PAD_DISP0_DAT9__GPIO_4_30			585
+MX6Q_PAD_DISP0_DAT9__MMDC_DEBUG_14		586
+MX6Q_PAD_DISP0_DAT9__PL301_PER1_HADR_20		587
+MX6Q_PAD_DISP0_DAT10__IPU1_DISP0_DAT_10		588
+MX6Q_PAD_DISP0_DAT10__IPU2_DISP0_DAT_10		589
+MX6Q_PAD_DISP0_DAT10__USDHC1_DBG_6		590
+MX6Q_PAD_DISP0_DAT10__SDMA_DBG_EVT_CHN3		591
+MX6Q_PAD_DISP0_DAT10__GPIO_4_31			592
+MX6Q_PAD_DISP0_DAT10__MMDC_DEBUG_15		593
+MX6Q_PAD_DISP0_DAT10__PL301_PER1_HADR21		594
+MX6Q_PAD_DISP0_DAT11__IPU1_DISP0_DAT_11		595
+MX6Q_PAD_DISP0_DAT11__IPU2_DISP0_DAT_11		596
+MX6Q_PAD_DISP0_DAT11__USDHC1_USDHC_DBG7		597
+MX6Q_PAD_DISP0_DAT11__SDMA_DBG_EVT_CHN4		598
+MX6Q_PAD_DISP0_DAT11__GPIO_5_5			599
+MX6Q_PAD_DISP0_DAT11__MMDC_DEBUG_16		600
+MX6Q_PAD_DISP0_DAT11__PL301_PER1_HADR22		601
+MX6Q_PAD_DISP0_DAT12__IPU1_DISP0_DAT_12		602
+MX6Q_PAD_DISP0_DAT12__IPU2_DISP0_DAT_12		603
+MX6Q_PAD_DISP0_DAT12__RESERVED_RESERVED		604
+MX6Q_PAD_DISP0_DAT12__SDMA_DBG_EVT_CHN5		605
+MX6Q_PAD_DISP0_DAT12__GPIO_5_6			606
+MX6Q_PAD_DISP0_DAT12__MMDC_DEBUG_17		607
+MX6Q_PAD_DISP0_DAT12__PL301_PER1_HADR23		608
+MX6Q_PAD_DISP0_DAT13__IPU1_DISP0_DAT_13		609
+MX6Q_PAD_DISP0_DAT13__IPU2_DISP0_DAT_13		610
+MX6Q_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS		611
+MX6Q_PAD_DISP0_DAT13__SDMA_DBG_EVT_CHN0		612
+MX6Q_PAD_DISP0_DAT13__GPIO_5_7			613
+MX6Q_PAD_DISP0_DAT13__MMDC_DEBUG_18		614
+MX6Q_PAD_DISP0_DAT13__PL301_PER1_HADR24		615
+MX6Q_PAD_DISP0_DAT14__IPU1_DISP0_DAT_14		616
+MX6Q_PAD_DISP0_DAT14__IPU2_DISP0_DAT_14		617
+MX6Q_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC		618
+MX6Q_PAD_DISP0_DAT14__SDMA_DBG_EVT_CHN1		619
+MX6Q_PAD_DISP0_DAT14__GPIO_5_8			620
+MX6Q_PAD_DISP0_DAT14__MMDC_DEBUG_19		621
+MX6Q_PAD_DISP0_DAT15__IPU1_DISP0_DAT_15		622
+MX6Q_PAD_DISP0_DAT15__IPU2_DISP0_DAT_15		623
+MX6Q_PAD_DISP0_DAT15__ECSPI1_SS1		624
+MX6Q_PAD_DISP0_DAT15__ECSPI2_SS1		625
+MX6Q_PAD_DISP0_DAT15__SDMA_DBG_EVT_CHN2		626
+MX6Q_PAD_DISP0_DAT15__GPIO_5_9			627
+MX6Q_PAD_DISP0_DAT15__MMDC_DEBUG_20		628
+MX6Q_PAD_DISP0_DAT15__PL301_PER1_HADR25		629
+MX6Q_PAD_DISP0_DAT16__IPU1_DISP0_DAT_16		630
+MX6Q_PAD_DISP0_DAT16__IPU2_DISP0_DAT_16		631
+MX6Q_PAD_DISP0_DAT16__ECSPI2_MOSI		632
+MX6Q_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC		633
+MX6Q_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0		634
+MX6Q_PAD_DISP0_DAT16__GPIO_5_10			635
+MX6Q_PAD_DISP0_DAT16__MMDC_DEBUG_21		636
+MX6Q_PAD_DISP0_DAT16__PL301_PER1_HADR26		637
+MX6Q_PAD_DISP0_DAT17__IPU1_DISP0_DAT_17		638
+MX6Q_PAD_DISP0_DAT17__IPU2_DISP0_DAT_17		639
+MX6Q_PAD_DISP0_DAT17__ECSPI2_MISO		640
+MX6Q_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD		641
+MX6Q_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1		642
+MX6Q_PAD_DISP0_DAT17__GPIO_5_11			643
+MX6Q_PAD_DISP0_DAT17__MMDC_DEBUG_22		644
+MX6Q_PAD_DISP0_DAT17__PL301_PER1_HADR27		645
+MX6Q_PAD_DISP0_DAT18__IPU1_DISP0_DAT_18		646
+MX6Q_PAD_DISP0_DAT18__IPU2_DISP0_DAT_18		647
+MX6Q_PAD_DISP0_DAT18__ECSPI2_SS0		648
+MX6Q_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS		649
+MX6Q_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS		650
+MX6Q_PAD_DISP0_DAT18__GPIO_5_12			651
+MX6Q_PAD_DISP0_DAT18__MMDC_DEBUG_23		652
+MX6Q_PAD_DISP0_DAT18__WEIM_WEIM_CS_2		653
+MX6Q_PAD_DISP0_DAT19__IPU1_DISP0_DAT_19		654
+MX6Q_PAD_DISP0_DAT19__IPU2_DISP0_DAT_19		655
+MX6Q_PAD_DISP0_DAT19__ECSPI2_SCLK		656
+MX6Q_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD		657
+MX6Q_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC		658
+MX6Q_PAD_DISP0_DAT19__GPIO_5_13			659
+MX6Q_PAD_DISP0_DAT19__MMDC_DEBUG_24		660
+MX6Q_PAD_DISP0_DAT19__WEIM_WEIM_CS_3		661
+MX6Q_PAD_DISP0_DAT20__IPU1_DISP0_DAT_20		662
+MX6Q_PAD_DISP0_DAT20__IPU2_DISP0_DAT_20		663
+MX6Q_PAD_DISP0_DAT20__ECSPI1_SCLK		664
+MX6Q_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC		665
+MX6Q_PAD_DISP0_DAT20__SDMA_DBG_EVT_CHN7		666
+MX6Q_PAD_DISP0_DAT20__GPIO_5_14			667
+MX6Q_PAD_DISP0_DAT20__MMDC_DEBUG_25		668
+MX6Q_PAD_DISP0_DAT20__PL301_PER1_HADR28		669
+MX6Q_PAD_DISP0_DAT21__IPU1_DISP0_DAT_21		670
+MX6Q_PAD_DISP0_DAT21__IPU2_DISP0_DAT_21		671
+MX6Q_PAD_DISP0_DAT21__ECSPI1_MOSI		672
+MX6Q_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD		673
+MX6Q_PAD_DISP0_DAT21__SDMA_DBG_BUS_DEV0		674
+MX6Q_PAD_DISP0_DAT21__GPIO_5_15			675
+MX6Q_PAD_DISP0_DAT21__MMDC_DEBUG_26		676
+MX6Q_PAD_DISP0_DAT21__PL301_PER1_HADR29		677
+MX6Q_PAD_DISP0_DAT22__IPU1_DISP0_DAT_22		678
+MX6Q_PAD_DISP0_DAT22__IPU2_DISP0_DAT_22		679
+MX6Q_PAD_DISP0_DAT22__ECSPI1_MISO		680
+MX6Q_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS		681
+MX6Q_PAD_DISP0_DAT22__SDMA_DBG_BUS_DEV1		682
+MX6Q_PAD_DISP0_DAT22__GPIO_5_16			683
+MX6Q_PAD_DISP0_DAT22__MMDC_DEBUG_27		684
+MX6Q_PAD_DISP0_DAT22__PL301_PER1_HADR30		685
+MX6Q_PAD_DISP0_DAT23__IPU1_DISP0_DAT_23		686
+MX6Q_PAD_DISP0_DAT23__IPU2_DISP0_DAT_23		687
+MX6Q_PAD_DISP0_DAT23__ECSPI1_SS0		688
+MX6Q_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD		689
+MX6Q_PAD_DISP0_DAT23__SDMA_DBG_BUS_DEV2		690
+MX6Q_PAD_DISP0_DAT23__GPIO_5_17			691
+MX6Q_PAD_DISP0_DAT23__MMDC_DEBUG_28		692
+MX6Q_PAD_DISP0_DAT23__PL301_PER1_HADR31		693
+MX6Q_PAD_ENET_MDIO__RESERVED_RESERVED		694
+MX6Q_PAD_ENET_MDIO__ENET_MDIO			695
+MX6Q_PAD_ENET_MDIO__ESAI1_SCKR			696
+MX6Q_PAD_ENET_MDIO__SDMA_DEBUG_BUS_DEV3		697
+MX6Q_PAD_ENET_MDIO__ENET_1588_EVT1_OUT		698
+MX6Q_PAD_ENET_MDIO__GPIO_1_22			699
+MX6Q_PAD_ENET_MDIO__SPDIF_PLOCK			700
+MX6Q_PAD_ENET_REF_CLK__RESERVED_RSRVED		701
+MX6Q_PAD_ENET_REF_CLK__ENET_TX_CLK		702
+MX6Q_PAD_ENET_REF_CLK__ESAI1_FSR		703
+MX6Q_PAD_ENET_REF_CLK__SDMA_DBGBUS_DEV4		704
+MX6Q_PAD_ENET_REF_CLK__GPIO_1_23		705
+MX6Q_PAD_ENET_REF_CLK__SPDIF_SRCLK		706
+MX6Q_PAD_ENET_REF_CLK__USBPHY1_RX_SQH		707
+MX6Q_PAD_ENET_RX_ER__ENET_RX_ER			708
+MX6Q_PAD_ENET_RX_ER__ESAI1_HCKR			709
+MX6Q_PAD_ENET_RX_ER__SPDIF_IN1			710
+MX6Q_PAD_ENET_RX_ER__ENET_1588_EVT2_OUT		711
+MX6Q_PAD_ENET_RX_ER__GPIO_1_24			712
+MX6Q_PAD_ENET_RX_ER__PHY_TDI			713
+MX6Q_PAD_ENET_RX_ER__USBPHY1_RX_HS_RXD		714
+MX6Q_PAD_ENET_CRS_DV__RESERVED_RSRVED		715
+MX6Q_PAD_ENET_CRS_DV__ENET_RX_EN		716
+MX6Q_PAD_ENET_CRS_DV__ESAI1_SCKT		717
+MX6Q_PAD_ENET_CRS_DV__SPDIF_EXTCLK		718
+MX6Q_PAD_ENET_CRS_DV__GPIO_1_25			719
+MX6Q_PAD_ENET_CRS_DV__PHY_TDO			720
+MX6Q_PAD_ENET_CRS_DV__USBPHY1_RX_FS_RXD		721
+MX6Q_PAD_ENET_RXD1__MLB_MLBSIG			722
+MX6Q_PAD_ENET_RXD1__ENET_RDATA_1		723
+MX6Q_PAD_ENET_RXD1__ESAI1_FST			724
+MX6Q_PAD_ENET_RXD1__ENET_1588_EVT3_OUT		725
+MX6Q_PAD_ENET_RXD1__GPIO_1_26			726
+MX6Q_PAD_ENET_RXD1__PHY_TCK			727
+MX6Q_PAD_ENET_RXD1__USBPHY1_RX_DISCON		728
+MX6Q_PAD_ENET_RXD0__OSC32K_32K_OUT		729
+MX6Q_PAD_ENET_RXD0__ENET_RDATA_0		730
+MX6Q_PAD_ENET_RXD0__ESAI1_HCKT			731
+MX6Q_PAD_ENET_RXD0__SPDIF_OUT1			732
+MX6Q_PAD_ENET_RXD0__GPIO_1_27			733
+MX6Q_PAD_ENET_RXD0__PHY_TMS			734
+MX6Q_PAD_ENET_RXD0__USBPHY1_PLL_CK20DIV		735
+MX6Q_PAD_ENET_TX_EN__RESERVED_RSRVED		736
+MX6Q_PAD_ENET_TX_EN__ENET_TX_EN			737
+MX6Q_PAD_ENET_TX_EN__ESAI1_TX3_RX2		738
+MX6Q_PAD_ENET_TX_EN__GPIO_1_28			739
+MX6Q_PAD_ENET_TX_EN__SATA_PHY_TDI		740
+MX6Q_PAD_ENET_TX_EN__USBPHY2_RX_SQH		741
+MX6Q_PAD_ENET_TXD1__MLB_MLBCLK			742
+MX6Q_PAD_ENET_TXD1__ENET_TDATA_1		743
+MX6Q_PAD_ENET_TXD1__ESAI1_TX2_RX3		744
+MX6Q_PAD_ENET_TXD1__ENET_1588_EVENT0_IN		745
+MX6Q_PAD_ENET_TXD1__GPIO_1_29			746
+MX6Q_PAD_ENET_TXD1__SATA_PHY_TDO		747
+MX6Q_PAD_ENET_TXD1__USBPHY2_RX_HS_RXD		748
+MX6Q_PAD_ENET_TXD0__RESERVED_RSRVED		749
+MX6Q_PAD_ENET_TXD0__ENET_TDATA_0		750
+MX6Q_PAD_ENET_TXD0__ESAI1_TX4_RX1		751
+MX6Q_PAD_ENET_TXD0__GPIO_1_30			752
+MX6Q_PAD_ENET_TXD0__SATA_PHY_TCK		753
+MX6Q_PAD_ENET_TXD0__USBPHY2_RX_FS_RXD		754
+MX6Q_PAD_ENET_MDC__MLB_MLBDAT			755
+MX6Q_PAD_ENET_MDC__ENET_MDC			756
+MX6Q_PAD_ENET_MDC__ESAI1_TX5_RX0		757
+MX6Q_PAD_ENET_MDC__ENET_1588_EVENT1_IN		758
+MX6Q_PAD_ENET_MDC__GPIO_1_31			759
+MX6Q_PAD_ENET_MDC__SATA_PHY_TMS			760
+MX6Q_PAD_ENET_MDC__USBPHY2_RX_DISCON		761
+MX6Q_PAD_DRAM_D40__MMDC_DRAM_D_40		762
+MX6Q_PAD_DRAM_D41__MMDC_DRAM_D_41		763
+MX6Q_PAD_DRAM_D42__MMDC_DRAM_D_42		764
+MX6Q_PAD_DRAM_D43__MMDC_DRAM_D_43		765
+MX6Q_PAD_DRAM_D44__MMDC_DRAM_D_44		766
+MX6Q_PAD_DRAM_D45__MMDC_DRAM_D_45		767
+MX6Q_PAD_DRAM_D46__MMDC_DRAM_D_46		768
+MX6Q_PAD_DRAM_D47__MMDC_DRAM_D_47		769
+MX6Q_PAD_DRAM_SDQS5__MMDC_DRAM_SDQS_5		770
+MX6Q_PAD_DRAM_DQM5__MMDC_DRAM_DQM_5		771
+MX6Q_PAD_DRAM_D32__MMDC_DRAM_D_32		772
+MX6Q_PAD_DRAM_D33__MMDC_DRAM_D_33		773
+MX6Q_PAD_DRAM_D34__MMDC_DRAM_D_34		774
+MX6Q_PAD_DRAM_D35__MMDC_DRAM_D_35		775
+MX6Q_PAD_DRAM_D36__MMDC_DRAM_D_36		776
+MX6Q_PAD_DRAM_D37__MMDC_DRAM_D_37		777
+MX6Q_PAD_DRAM_D38__MMDC_DRAM_D_38		778
+MX6Q_PAD_DRAM_D39__MMDC_DRAM_D_39		779
+MX6Q_PAD_DRAM_DQM4__MMDC_DRAM_DQM_4		780
+MX6Q_PAD_DRAM_SDQS4__MMDC_DRAM_SDQS_4		781
+MX6Q_PAD_DRAM_D24__MMDC_DRAM_D_24		782
+MX6Q_PAD_DRAM_D25__MMDC_DRAM_D_25		783
+MX6Q_PAD_DRAM_D26__MMDC_DRAM_D_26		784
+MX6Q_PAD_DRAM_D27__MMDC_DRAM_D_27		785
+MX6Q_PAD_DRAM_D28__MMDC_DRAM_D_28		786
+MX6Q_PAD_DRAM_D29__MMDC_DRAM_D_29		787
+MX6Q_PAD_DRAM_SDQS3__MMDC_DRAM_SDQS_3		788
+MX6Q_PAD_DRAM_D30__MMDC_DRAM_D_30		789
+MX6Q_PAD_DRAM_D31__MMDC_DRAM_D_31		790
+MX6Q_PAD_DRAM_DQM3__MMDC_DRAM_DQM_3		791
+MX6Q_PAD_DRAM_D16__MMDC_DRAM_D_16		792
+MX6Q_PAD_DRAM_D17__MMDC_DRAM_D_17		793
+MX6Q_PAD_DRAM_D18__MMDC_DRAM_D_18		794
+MX6Q_PAD_DRAM_D19__MMDC_DRAM_D_19		795
+MX6Q_PAD_DRAM_D20__MMDC_DRAM_D_20		796
+MX6Q_PAD_DRAM_D21__MMDC_DRAM_D_21		797
+MX6Q_PAD_DRAM_D22__MMDC_DRAM_D_22		798
+MX6Q_PAD_DRAM_SDQS2__MMDC_DRAM_SDQS_2		799
+MX6Q_PAD_DRAM_D23__MMDC_DRAM_D_23		800
+MX6Q_PAD_DRAM_DQM2__MMDC_DRAM_DQM_2		801
+MX6Q_PAD_DRAM_A0__MMDC_DRAM_A_0			802
+MX6Q_PAD_DRAM_A1__MMDC_DRAM_A_1			803
+MX6Q_PAD_DRAM_A2__MMDC_DRAM_A_2			804
+MX6Q_PAD_DRAM_A3__MMDC_DRAM_A_3			805
+MX6Q_PAD_DRAM_A4__MMDC_DRAM_A_4			806
+MX6Q_PAD_DRAM_A5__MMDC_DRAM_A_5			807
+MX6Q_PAD_DRAM_A6__MMDC_DRAM_A_6			808
+MX6Q_PAD_DRAM_A7__MMDC_DRAM_A_7			809
+MX6Q_PAD_DRAM_A8__MMDC_DRAM_A_8			810
+MX6Q_PAD_DRAM_A9__MMDC_DRAM_A_9			811
+MX6Q_PAD_DRAM_A10__MMDC_DRAM_A_10		812
+MX6Q_PAD_DRAM_A11__MMDC_DRAM_A_11		813
+MX6Q_PAD_DRAM_A12__MMDC_DRAM_A_12		814
+MX6Q_PAD_DRAM_A13__MMDC_DRAM_A_13		815
+MX6Q_PAD_DRAM_A14__MMDC_DRAM_A_14		816
+MX6Q_PAD_DRAM_A15__MMDC_DRAM_A_15		817
+MX6Q_PAD_DRAM_CAS__MMDC_DRAM_CAS		818
+MX6Q_PAD_DRAM_CS0__MMDC_DRAM_CS_0		819
+MX6Q_PAD_DRAM_CS1__MMDC_DRAM_CS_1		820
+MX6Q_PAD_DRAM_RAS__MMDC_DRAM_RAS		821
+MX6Q_PAD_DRAM_RESET__MMDC_DRAM_RESET		822
+MX6Q_PAD_DRAM_SDBA0__MMDC_DRAM_SDBA_0		823
+MX6Q_PAD_DRAM_SDBA1__MMDC_DRAM_SDBA_1		824
+MX6Q_PAD_DRAM_SDCLK_0__MMDC_DRAM_SDCLK0		825
+MX6Q_PAD_DRAM_SDBA2__MMDC_DRAM_SDBA_2		826
+MX6Q_PAD_DRAM_SDCKE0__MMDC_DRAM_SDCKE_0		827
+MX6Q_PAD_DRAM_SDCLK_1__MMDC_DRAM_SDCLK1		828
+MX6Q_PAD_DRAM_SDCKE1__MMDC_DRAM_SDCKE_1		829
+MX6Q_PAD_DRAM_SDODT0__MMDC_DRAM_ODT_0		830
+MX6Q_PAD_DRAM_SDODT1__MMDC_DRAM_ODT_1		831
+MX6Q_PAD_DRAM_SDWE__MMDC_DRAM_SDWE		832
+MX6Q_PAD_DRAM_D0__MMDC_DRAM_D_0			833
+MX6Q_PAD_DRAM_D1__MMDC_DRAM_D_1			834
+MX6Q_PAD_DRAM_D2__MMDC_DRAM_D_2			835
+MX6Q_PAD_DRAM_D3__MMDC_DRAM_D_3			836
+MX6Q_PAD_DRAM_D4__MMDC_DRAM_D_4			837
+MX6Q_PAD_DRAM_D5__MMDC_DRAM_D_5			838
+MX6Q_PAD_DRAM_SDQS0__MMDC_DRAM_SDQS_0		839
+MX6Q_PAD_DRAM_D6__MMDC_DRAM_D_6			840
+MX6Q_PAD_DRAM_D7__MMDC_DRAM_D_7			841
+MX6Q_PAD_DRAM_DQM0__MMDC_DRAM_DQM_0		842
+MX6Q_PAD_DRAM_D8__MMDC_DRAM_D_8			843
+MX6Q_PAD_DRAM_D9__MMDC_DRAM_D_9			844
+MX6Q_PAD_DRAM_D10__MMDC_DRAM_D_10		845
+MX6Q_PAD_DRAM_D11__MMDC_DRAM_D_11		846
+MX6Q_PAD_DRAM_D12__MMDC_DRAM_D_12		847
+MX6Q_PAD_DRAM_D13__MMDC_DRAM_D_13		848
+MX6Q_PAD_DRAM_D14__MMDC_DRAM_D_14		849
+MX6Q_PAD_DRAM_SDQS1__MMDC_DRAM_SDQS_1		850
+MX6Q_PAD_DRAM_D15__MMDC_DRAM_D_15		851
+MX6Q_PAD_DRAM_DQM1__MMDC_DRAM_DQM_1		852
+MX6Q_PAD_DRAM_D48__MMDC_DRAM_D_48		853
+MX6Q_PAD_DRAM_D49__MMDC_DRAM_D_49		854
+MX6Q_PAD_DRAM_D50__MMDC_DRAM_D_50		855
+MX6Q_PAD_DRAM_D51__MMDC_DRAM_D_51		856
+MX6Q_PAD_DRAM_D52__MMDC_DRAM_D_52		857
+MX6Q_PAD_DRAM_D53__MMDC_DRAM_D_53		858
+MX6Q_PAD_DRAM_D54__MMDC_DRAM_D_54		859
+MX6Q_PAD_DRAM_D55__MMDC_DRAM_D_55		860
+MX6Q_PAD_DRAM_SDQS6__MMDC_DRAM_SDQS_6		861
+MX6Q_PAD_DRAM_DQM6__MMDC_DRAM_DQM_6		862
+MX6Q_PAD_DRAM_D56__MMDC_DRAM_D_56		863
+MX6Q_PAD_DRAM_SDQS7__MMDC_DRAM_SDQS_7		864
+MX6Q_PAD_DRAM_D57__MMDC_DRAM_D_57		865
+MX6Q_PAD_DRAM_D58__MMDC_DRAM_D_58		866
+MX6Q_PAD_DRAM_D59__MMDC_DRAM_D_59		867
+MX6Q_PAD_DRAM_D60__MMDC_DRAM_D_60		868
+MX6Q_PAD_DRAM_DQM7__MMDC_DRAM_DQM_7		869
+MX6Q_PAD_DRAM_D61__MMDC_DRAM_D_61		870
+MX6Q_PAD_DRAM_D62__MMDC_DRAM_D_62		871
+MX6Q_PAD_DRAM_D63__MMDC_DRAM_D_63		872
+MX6Q_PAD_KEY_COL0__ECSPI1_SCLK			873
+MX6Q_PAD_KEY_COL0__ENET_RDATA_3			874
+MX6Q_PAD_KEY_COL0__AUDMUX_AUD5_TXC		875
+MX6Q_PAD_KEY_COL0__KPP_COL_0			876
+MX6Q_PAD_KEY_COL0__UART4_TXD			877
+MX6Q_PAD_KEY_COL0__GPIO_4_6			878
+MX6Q_PAD_KEY_COL0__DCIC1_DCIC_OUT		879
+MX6Q_PAD_KEY_COL0__SRC_ANY_PU_RST		880
+MX6Q_PAD_KEY_ROW0__ECSPI1_MOSI			881
+MX6Q_PAD_KEY_ROW0__ENET_TDATA_3			882
+MX6Q_PAD_KEY_ROW0__AUDMUX_AUD5_TXD		883
+MX6Q_PAD_KEY_ROW0__KPP_ROW_0			884
+MX6Q_PAD_KEY_ROW0__UART4_RXD			885
+MX6Q_PAD_KEY_ROW0__GPIO_4_7			886
+MX6Q_PAD_KEY_ROW0__DCIC2_DCIC_OUT		887
+MX6Q_PAD_KEY_ROW0__PL301_PER1_HADR_0		888
+MX6Q_PAD_KEY_COL1__ECSPI1_MISO			889
+MX6Q_PAD_KEY_COL1__ENET_MDIO			890
+MX6Q_PAD_KEY_COL1__AUDMUX_AUD5_TXFS		891
+MX6Q_PAD_KEY_COL1__KPP_COL_1			892
+MX6Q_PAD_KEY_COL1__UART5_TXD			893
+MX6Q_PAD_KEY_COL1__GPIO_4_8			894
+MX6Q_PAD_KEY_COL1__USDHC1_VSELECT		895
+MX6Q_PAD_KEY_COL1__PL301MX_PER1_HADR_1		896
+MX6Q_PAD_KEY_ROW1__ECSPI1_SS0			897
+MX6Q_PAD_KEY_ROW1__ENET_COL			898
+MX6Q_PAD_KEY_ROW1__AUDMUX_AUD5_RXD		899
+MX6Q_PAD_KEY_ROW1__KPP_ROW_1			900
+MX6Q_PAD_KEY_ROW1__UART5_RXD			901
+MX6Q_PAD_KEY_ROW1__GPIO_4_9			902
+MX6Q_PAD_KEY_ROW1__USDHC2_VSELECT		903
+MX6Q_PAD_KEY_ROW1__PL301_PER1_HADDR_2		904
+MX6Q_PAD_KEY_COL2__ECSPI1_SS1			905
+MX6Q_PAD_KEY_COL2__ENET_RDATA_2			906
+MX6Q_PAD_KEY_COL2__CAN1_TXCAN			907
+MX6Q_PAD_KEY_COL2__KPP_COL_2			908
+MX6Q_PAD_KEY_COL2__ENET_MDC			909
+MX6Q_PAD_KEY_COL2__GPIO_4_10			910
+MX6Q_PAD_KEY_COL2__USBOH3_H1_PWRCTL_WKP		911
+MX6Q_PAD_KEY_COL2__PL301_PER1_HADDR_3		912
+MX6Q_PAD_KEY_ROW2__ECSPI1_SS2			913
+MX6Q_PAD_KEY_ROW2__ENET_TDATA_2			914
+MX6Q_PAD_KEY_ROW2__CAN1_RXCAN			915
+MX6Q_PAD_KEY_ROW2__KPP_ROW_2			916
+MX6Q_PAD_KEY_ROW2__USDHC2_VSELECT		917
+MX6Q_PAD_KEY_ROW2__GPIO_4_11			918
+MX6Q_PAD_KEY_ROW2__HDMI_TX_CEC_LINE		919
+MX6Q_PAD_KEY_ROW2__PL301_PER1_HADR_4		920
+MX6Q_PAD_KEY_COL3__ECSPI1_SS3			921
+MX6Q_PAD_KEY_COL3__ENET_CRS			922
+MX6Q_PAD_KEY_COL3__HDMI_TX_DDC_SCL		923
+MX6Q_PAD_KEY_COL3__KPP_COL_3			924
+MX6Q_PAD_KEY_COL3__I2C2_SCL			925
+MX6Q_PAD_KEY_COL3__GPIO_4_12			926
+MX6Q_PAD_KEY_COL3__SPDIF_IN1			927
+MX6Q_PAD_KEY_COL3__PL301_PER1_HADR_5		928
+MX6Q_PAD_KEY_ROW3__OSC32K_32K_OUT		929
+MX6Q_PAD_KEY_ROW3__ASRC_ASRC_EXT_CLK		930
+MX6Q_PAD_KEY_ROW3__HDMI_TX_DDC_SDA		931
+MX6Q_PAD_KEY_ROW3__KPP_ROW_3			932
+MX6Q_PAD_KEY_ROW3__I2C2_SDA			933
+MX6Q_PAD_KEY_ROW3__GPIO_4_13			934
+MX6Q_PAD_KEY_ROW3__USDHC1_VSELECT		935
+MX6Q_PAD_KEY_ROW3__PL301_PER1_HADR_6		936
+MX6Q_PAD_KEY_COL4__CAN2_TXCAN			937
+MX6Q_PAD_KEY_COL4__IPU1_SISG_4			938
+MX6Q_PAD_KEY_COL4__USBOH3_USBOTG_OC		939
+MX6Q_PAD_KEY_COL4__KPP_COL_4			940
+MX6Q_PAD_KEY_COL4__UART5_RTS			941
+MX6Q_PAD_KEY_COL4__GPIO_4_14			942
+MX6Q_PAD_KEY_COL4__MMDC_DEBUG_49		943
+MX6Q_PAD_KEY_COL4__PL301_PER1_HADDR_7		944
+MX6Q_PAD_KEY_ROW4__CAN2_RXCAN			945
+MX6Q_PAD_KEY_ROW4__IPU1_SISG_5			946
+MX6Q_PAD_KEY_ROW4__USBOH3_USBOTG_PWR		947
+MX6Q_PAD_KEY_ROW4__KPP_ROW_4			948
+MX6Q_PAD_KEY_ROW4__UART5_CTS			949
+MX6Q_PAD_KEY_ROW4__GPIO_4_15			950
+MX6Q_PAD_KEY_ROW4__MMDC_DEBUG_50		951
+MX6Q_PAD_KEY_ROW4__PL301_PER1_HADR_8		952
+MX6Q_PAD_GPIO_0__CCM_CLKO			953
+MX6Q_PAD_GPIO_0__KPP_COL_5			954
+MX6Q_PAD_GPIO_0__ASRC_ASRC_EXT_CLK		955
+MX6Q_PAD_GPIO_0__EPIT1_EPITO			956
+MX6Q_PAD_GPIO_0__GPIO_1_0			957
+MX6Q_PAD_GPIO_0__USBOH3_USBH1_PWR		958
+MX6Q_PAD_GPIO_0__SNVS_HP_WRAP_SNVS_VIO5		959
+MX6Q_PAD_GPIO_1__ESAI1_SCKR			960
+MX6Q_PAD_GPIO_1__WDOG2_WDOG_B			961
+MX6Q_PAD_GPIO_1__KPP_ROW_5			962
+MX6Q_PAD_GPIO_1__PWM2_PWMO			963
+MX6Q_PAD_GPIO_1__GPIO_1_1			964
+MX6Q_PAD_GPIO_1__USDHC1_CD			965
+MX6Q_PAD_GPIO_1__SRC_TESTER_ACK			966
+MX6Q_PAD_GPIO_9__ESAI1_FSR			967
+MX6Q_PAD_GPIO_9__WDOG1_WDOG_B			968
+MX6Q_PAD_GPIO_9__KPP_COL_6			969
+MX6Q_PAD_GPIO_9__CCM_REF_EN_B			970
+MX6Q_PAD_GPIO_9__PWM1_PWMO			971
+MX6Q_PAD_GPIO_9__GPIO_1_9			972
+MX6Q_PAD_GPIO_9__USDHC1_WP			973
+MX6Q_PAD_GPIO_9__SRC_EARLY_RST			974
+MX6Q_PAD_GPIO_3__ESAI1_HCKR			975
+MX6Q_PAD_GPIO_3__OBSERVE_MUX_INT_OUT0		976
+MX6Q_PAD_GPIO_3__I2C3_SCL			977
+MX6Q_PAD_GPIO_3__ANATOP_24M_OUT			978
+MX6Q_PAD_GPIO_3__CCM_CLKO2			979
+MX6Q_PAD_GPIO_3__GPIO_1_3			980
+MX6Q_PAD_GPIO_3__USBOH3_USBH1_OC		981
+MX6Q_PAD_GPIO_3__MLB_MLBCLK			982
+MX6Q_PAD_GPIO_6__ESAI1_SCKT			983
+MX6Q_PAD_GPIO_6__OBSERVE_MUX_INT_OUT1		984
+MX6Q_PAD_GPIO_6__I2C3_SDA			985
+MX6Q_PAD_GPIO_6__CCM_CCM_OUT_0			986
+MX6Q_PAD_GPIO_6__CSU_CSU_INT_DEB		987
+MX6Q_PAD_GPIO_6__GPIO_1_6			988
+MX6Q_PAD_GPIO_6__USDHC2_LCTL			989
+MX6Q_PAD_GPIO_6__MLB_MLBSIG			990
+MX6Q_PAD_GPIO_2__ESAI1_FST			991
+MX6Q_PAD_GPIO_2__OBSERVE_MUX_INT_OUT2		992
+MX6Q_PAD_GPIO_2__KPP_ROW_6			993
+MX6Q_PAD_GPIO_2__CCM_CCM_OUT_1			994
+MX6Q_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0		995
+MX6Q_PAD_GPIO_2__GPIO_1_2			996
+MX6Q_PAD_GPIO_2__USDHC2_WP			997
+MX6Q_PAD_GPIO_2__MLB_MLBDAT			998
+MX6Q_PAD_GPIO_4__ESAI1_HCKT			999
+MX6Q_PAD_GPIO_4__OBSERVE_MUX_INT_OUT3		1000
+MX6Q_PAD_GPIO_4__KPP_COL_7			1001
+MX6Q_PAD_GPIO_4__CCM_CCM_OUT_2			1002
+MX6Q_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1		1003
+MX6Q_PAD_GPIO_4__GPIO_1_4			1004
+MX6Q_PAD_GPIO_4__USDHC2_CD			1005
+MX6Q_PAD_GPIO_4__OCOTP_CRL_WRAR_FUSE_LA		1006
+MX6Q_PAD_GPIO_5__ESAI1_TX2_RX3			1007
+MX6Q_PAD_GPIO_5__OBSERVE_MUX_INT_OUT4		1008
+MX6Q_PAD_GPIO_5__KPP_ROW_7			1009
+MX6Q_PAD_GPIO_5__CCM_CLKO			1010
+MX6Q_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2		1011
+MX6Q_PAD_GPIO_5__GPIO_1_5			1012
+MX6Q_PAD_GPIO_5__I2C3_SCL			1013
+MX6Q_PAD_GPIO_5__CHEETAH_EVENTI			1014
+MX6Q_PAD_GPIO_7__ESAI1_TX4_RX1			1015
+MX6Q_PAD_GPIO_7__ECSPI5_RDY			1016
+MX6Q_PAD_GPIO_7__EPIT1_EPITO			1017
+MX6Q_PAD_GPIO_7__CAN1_TXCAN			1018
+MX6Q_PAD_GPIO_7__UART2_TXD			1019
+MX6Q_PAD_GPIO_7__GPIO_1_7			1020
+MX6Q_PAD_GPIO_7__SPDIF_PLOCK			1021
+MX6Q_PAD_GPIO_7__USBOH3_OTGUSB_HST_MODE		1022
+MX6Q_PAD_GPIO_8__ESAI1_TX5_RX0			1023
+MX6Q_PAD_GPIO_8__ANATOP_ANATOP_32K_OUT		1024
+MX6Q_PAD_GPIO_8__EPIT2_EPITO			1025
+MX6Q_PAD_GPIO_8__CAN1_RXCAN			1026
+MX6Q_PAD_GPIO_8__UART2_RXD			1027
+MX6Q_PAD_GPIO_8__GPIO_1_8			1028
+MX6Q_PAD_GPIO_8__SPDIF_SRCLK			1029
+MX6Q_PAD_GPIO_8__USBOH3_OTG_PWRCTL_WAK		1030
+MX6Q_PAD_GPIO_16__ESAI1_TX3_RX2			1031
+MX6Q_PAD_GPIO_16__ENET_1588_EVENT2_IN		1032
+MX6Q_PAD_GPIO_16__ENET_ETHERNET_REF_OUT		1033
+MX6Q_PAD_GPIO_16__USDHC1_LCTL			1034
+MX6Q_PAD_GPIO_16__SPDIF_IN1			1035
+MX6Q_PAD_GPIO_16__GPIO_7_11			1036
+MX6Q_PAD_GPIO_16__I2C3_SDA			1037
+MX6Q_PAD_GPIO_16__SJC_DE_B			1038
+MX6Q_PAD_GPIO_17__ESAI1_TX0			1039
+MX6Q_PAD_GPIO_17__ENET_1588_EVENT3_IN		1040
+MX6Q_PAD_GPIO_17__CCM_PMIC_RDY			1041
+MX6Q_PAD_GPIO_17__SDMA_SDMA_EXT_EVENT_0		1042
+MX6Q_PAD_GPIO_17__SPDIF_OUT1			1043
+MX6Q_PAD_GPIO_17__GPIO_7_12			1044
+MX6Q_PAD_GPIO_17__SJC_JTAG_ACT			1045
+MX6Q_PAD_GPIO_18__ESAI1_TX1			1046
+MX6Q_PAD_GPIO_18__ENET_RX_CLK			1047
+MX6Q_PAD_GPIO_18__USDHC3_VSELECT		1048
+MX6Q_PAD_GPIO_18__SDMA_SDMA_EXT_EVENT_1		1049
+MX6Q_PAD_GPIO_18__ASRC_ASRC_EXT_CLK		1050
+MX6Q_PAD_GPIO_18__GPIO_7_13			1051
+MX6Q_PAD_GPIO_18__SNVS_HP_WRA_SNVS_VIO5		1052
+MX6Q_PAD_GPIO_18__SRC_SYSTEM_RST		1053
+MX6Q_PAD_GPIO_19__KPP_COL_5			1054
+MX6Q_PAD_GPIO_19__ENET_1588_EVENT0_OUT		1055
+MX6Q_PAD_GPIO_19__SPDIF_OUT1			1056
+MX6Q_PAD_GPIO_19__CCM_CLKO			1057
+MX6Q_PAD_GPIO_19__ECSPI1_RDY			1058
+MX6Q_PAD_GPIO_19__GPIO_4_5			1059
+MX6Q_PAD_GPIO_19__ENET_TX_ER			1060
+MX6Q_PAD_GPIO_19__SRC_INT_BOOT			1061
+MX6Q_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK		1062
+MX6Q_PAD_CSI0_PIXCLK__PCIE_CTRL_MUX_12		1063
+MX6Q_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0		1064
+MX6Q_PAD_CSI0_PIXCLK__GPIO_5_18			1065
+MX6Q_PAD_CSI0_PIXCLK___MMDC_DEBUG_29		1066
+MX6Q_PAD_CSI0_PIXCLK__CHEETAH_EVENTO		1067
+MX6Q_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC		1068
+MX6Q_PAD_CSI0_MCLK__PCIE_CTRL_MUX_13		1069
+MX6Q_PAD_CSI0_MCLK__CCM_CLKO			1070
+MX6Q_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1		1071
+MX6Q_PAD_CSI0_MCLK__GPIO_5_19			1072
+MX6Q_PAD_CSI0_MCLK__MMDC_MMDC_DEBUG_30		1073
+MX6Q_PAD_CSI0_MCLK__CHEETAH_TRCTL		1074
+MX6Q_PAD_CSI0_DATA_EN__IPU1_CSI0_DA_EN		1075
+MX6Q_PAD_CSI0_DATA_EN__WEIM_WEIM_D_0		1076
+MX6Q_PAD_CSI0_DATA_EN__PCIE_CTRL_MUX_14		1077
+MX6Q_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2		1078
+MX6Q_PAD_CSI0_DATA_EN__GPIO_5_20		1079
+MX6Q_PAD_CSI0_DATA_EN__MMDC_DEBUG_31		1080
+MX6Q_PAD_CSI0_DATA_EN__CHEETAH_TRCLK		1081
+MX6Q_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC		1082
+MX6Q_PAD_CSI0_VSYNC__WEIM_WEIM_D_1		1083
+MX6Q_PAD_CSI0_VSYNC__PCIE_CTRL_MUX_15		1084
+MX6Q_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3		1085
+MX6Q_PAD_CSI0_VSYNC__GPIO_5_21			1086
+MX6Q_PAD_CSI0_VSYNC__MMDC_DEBUG_32		1087
+MX6Q_PAD_CSI0_VSYNC__CHEETAH_TRACE_0		1088
+MX6Q_PAD_CSI0_DAT4__IPU1_CSI0_D_4		1089
+MX6Q_PAD_CSI0_DAT4__WEIM_WEIM_D_2		1090
+MX6Q_PAD_CSI0_DAT4__ECSPI1_SCLK			1091
+MX6Q_PAD_CSI0_DAT4__KPP_COL_5			1092
+MX6Q_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC		1093
+MX6Q_PAD_CSI0_DAT4__GPIO_5_22			1094
+MX6Q_PAD_CSI0_DAT4__MMDC_DEBUG_43		1095
+MX6Q_PAD_CSI0_DAT4__CHEETAH_TRACE_1		1096
+MX6Q_PAD_CSI0_DAT5__IPU1_CSI0_D_5		1097
+MX6Q_PAD_CSI0_DAT5__WEIM_WEIM_D_3		1098
+MX6Q_PAD_CSI0_DAT5__ECSPI1_MOSI			1099
+MX6Q_PAD_CSI0_DAT5__KPP_ROW_5			1100
+MX6Q_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD		1101
+MX6Q_PAD_CSI0_DAT5__GPIO_5_23			1102
+MX6Q_PAD_CSI0_DAT5__MMDC_MMDC_DEBUG_44		1103
+MX6Q_PAD_CSI0_DAT5__CHEETAH_TRACE_2		1104
+MX6Q_PAD_CSI0_DAT6__IPU1_CSI0_D_6		1105
+MX6Q_PAD_CSI0_DAT6__WEIM_WEIM_D_4		1106
+MX6Q_PAD_CSI0_DAT6__ECSPI1_MISO			1107
+MX6Q_PAD_CSI0_DAT6__KPP_COL_6			1108
+MX6Q_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS		1109
+MX6Q_PAD_CSI0_DAT6__GPIO_5_24			1110
+MX6Q_PAD_CSI0_DAT6__MMDC_MMDC_DEBUG_45		1111
+MX6Q_PAD_CSI0_DAT6__CHEETAH_TRACE_3		1112
+MX6Q_PAD_CSI0_DAT7__IPU1_CSI0_D_7		1113
+MX6Q_PAD_CSI0_DAT7__WEIM_WEIM_D_5		1114
+MX6Q_PAD_CSI0_DAT7__ECSPI1_SS0			1115
+MX6Q_PAD_CSI0_DAT7__KPP_ROW_6			1116
+MX6Q_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD		1117
+MX6Q_PAD_CSI0_DAT7__GPIO_5_25			1118
+MX6Q_PAD_CSI0_DAT7__MMDC_MMDC_DEBUG_46		1119
+MX6Q_PAD_CSI0_DAT7__CHEETAH_TRACE_4		1120
+MX6Q_PAD_CSI0_DAT8__IPU1_CSI0_D_8		1121
+MX6Q_PAD_CSI0_DAT8__WEIM_WEIM_D_6		1122
+MX6Q_PAD_CSI0_DAT8__ECSPI2_SCLK			1123
+MX6Q_PAD_CSI0_DAT8__KPP_COL_7			1124
+MX6Q_PAD_CSI0_DAT8__I2C1_SDA			1125
+MX6Q_PAD_CSI0_DAT8__GPIO_5_26			1126
+MX6Q_PAD_CSI0_DAT8__MMDC_MMDC_DEBUG_47		1127
+MX6Q_PAD_CSI0_DAT8__CHEETAH_TRACE_5		1128
+MX6Q_PAD_CSI0_DAT9__IPU1_CSI0_D_9		1129
+MX6Q_PAD_CSI0_DAT9__WEIM_WEIM_D_7		1130
+MX6Q_PAD_CSI0_DAT9__ECSPI2_MOSI			1131
+MX6Q_PAD_CSI0_DAT9__KPP_ROW_7			1132
+MX6Q_PAD_CSI0_DAT9__I2C1_SCL			1133
+MX6Q_PAD_CSI0_DAT9__GPIO_5_27			1134
+MX6Q_PAD_CSI0_DAT9__MMDC_MMDC_DEBUG_48		1135
+MX6Q_PAD_CSI0_DAT9__CHEETAH_TRACE_6		1136
+MX6Q_PAD_CSI0_DAT10__IPU1_CSI0_D_10		1137
+MX6Q_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC		1138
+MX6Q_PAD_CSI0_DAT10__ECSPI2_MISO		1139
+MX6Q_PAD_CSI0_DAT10__UART1_TXD			1140
+MX6Q_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4		1141
+MX6Q_PAD_CSI0_DAT10__GPIO_5_28			1142
+MX6Q_PAD_CSI0_DAT10__MMDC_MMDC_DEBUG_33		1143
+MX6Q_PAD_CSI0_DAT10__CHEETAH_TRACE_7		1144
+MX6Q_PAD_CSI0_DAT11__IPU1_CSI0_D_11		1145
+MX6Q_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS		1146
+MX6Q_PAD_CSI0_DAT11__ECSPI2_SS0			1147
+MX6Q_PAD_CSI0_DAT11__UART1_RXD			1148
+MX6Q_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5		1149
+MX6Q_PAD_CSI0_DAT11__GPIO_5_29			1150
+MX6Q_PAD_CSI0_DAT11__MMDC_MMDC_DEBUG_34		1151
+MX6Q_PAD_CSI0_DAT11__CHEETAH_TRACE_8		1152
+MX6Q_PAD_CSI0_DAT12__IPU1_CSI0_D_12		1153
+MX6Q_PAD_CSI0_DAT12__WEIM_WEIM_D_8		1154
+MX6Q_PAD_CSI0_DAT12__PCIE_CTRL_MUX_16		1155
+MX6Q_PAD_CSI0_DAT12__UART4_TXD			1156
+MX6Q_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6		1157
+MX6Q_PAD_CSI0_DAT12__GPIO_5_30			1158
+MX6Q_PAD_CSI0_DAT12__MMDC_MMDC_DEBUG_35		1159
+MX6Q_PAD_CSI0_DAT12__CHEETAH_TRACE_9		1160
+MX6Q_PAD_CSI0_DAT13__IPU1_CSI0_D_13		1161
+MX6Q_PAD_CSI0_DAT13__WEIM_WEIM_D_9		1162
+MX6Q_PAD_CSI0_DAT13__PCIE_CTRL_MUX_17		1163
+MX6Q_PAD_CSI0_DAT13__UART4_RXD			1164
+MX6Q_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7		1165
+MX6Q_PAD_CSI0_DAT13__GPIO_5_31			1166
+MX6Q_PAD_CSI0_DAT13__MMDC_MMDC_DEBUG_36		1167
+MX6Q_PAD_CSI0_DAT13__CHEETAH_TRACE_10		1168
+MX6Q_PAD_CSI0_DAT14__IPU1_CSI0_D_14		1169
+MX6Q_PAD_CSI0_DAT14__WEIM_WEIM_D_10		1170
+MX6Q_PAD_CSI0_DAT14__PCIE_CTRL_MUX_18		1171
+MX6Q_PAD_CSI0_DAT14__UART5_TXD			1172
+MX6Q_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8		1173
+MX6Q_PAD_CSI0_DAT14__GPIO_6_0			1174
+MX6Q_PAD_CSI0_DAT14__MMDC_MMDC_DEBUG_37		1175
+MX6Q_PAD_CSI0_DAT14__CHEETAH_TRACE_11		1176
+MX6Q_PAD_CSI0_DAT15__IPU1_CSI0_D_15		1177
+MX6Q_PAD_CSI0_DAT15__WEIM_WEIM_D_11		1178
+MX6Q_PAD_CSI0_DAT15__PCIE_CTRL_MUX_19		1179
+MX6Q_PAD_CSI0_DAT15__UART5_RXD			1180
+MX6Q_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9		1181
+MX6Q_PAD_CSI0_DAT15__GPIO_6_1			1182
+MX6Q_PAD_CSI0_DAT15__MMDC_MMDC_DEBUG_38		1183
+MX6Q_PAD_CSI0_DAT15__CHEETAH_TRACE_12		1184
+MX6Q_PAD_CSI0_DAT16__IPU1_CSI0_D_16		1185
+MX6Q_PAD_CSI0_DAT16__WEIM_WEIM_D_12		1186
+MX6Q_PAD_CSI0_DAT16__PCIE_CTRL_MUX_20		1187
+MX6Q_PAD_CSI0_DAT16__UART4_RTS			1188
+MX6Q_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10		1189
+MX6Q_PAD_CSI0_DAT16__GPIO_6_2			1190
+MX6Q_PAD_CSI0_DAT16__MMDC_MMDC_DEBUG_39		1191
+MX6Q_PAD_CSI0_DAT16__CHEETAH_TRACE_13		1192
+MX6Q_PAD_CSI0_DAT17__IPU1_CSI0_D_17		1193
+MX6Q_PAD_CSI0_DAT17__WEIM_WEIM_D_13		1194
+MX6Q_PAD_CSI0_DAT17__PCIE_CTRL_MUX_21		1195
+MX6Q_PAD_CSI0_DAT17__UART4_CTS			1196
+MX6Q_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11		1197
+MX6Q_PAD_CSI0_DAT17__GPIO_6_3			1198
+MX6Q_PAD_CSI0_DAT17__MMDC_MMDC_DEBUG_40		1199
+MX6Q_PAD_CSI0_DAT17__CHEETAH_TRACE_14		1200
+MX6Q_PAD_CSI0_DAT18__IPU1_CSI0_D_18		1201
+MX6Q_PAD_CSI0_DAT18__WEIM_WEIM_D_14		1202
+MX6Q_PAD_CSI0_DAT18__PCIE_CTRL_MUX_22		1203
+MX6Q_PAD_CSI0_DAT18__UART5_RTS			1204
+MX6Q_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12		1205
+MX6Q_PAD_CSI0_DAT18__GPIO_6_4			1206
+MX6Q_PAD_CSI0_DAT18__MMDC_MMDC_DEBUG_41		1207
+MX6Q_PAD_CSI0_DAT18__CHEETAH_TRACE_15		1208
+MX6Q_PAD_CSI0_DAT19__IPU1_CSI0_D_19		1209
+MX6Q_PAD_CSI0_DAT19__WEIM_WEIM_D_15		1210
+MX6Q_PAD_CSI0_DAT19__PCIE_CTRL_MUX_23		1211
+MX6Q_PAD_CSI0_DAT19__UART5_CTS			1212
+MX6Q_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13		1213
+MX6Q_PAD_CSI0_DAT19__GPIO_6_5			1214
+MX6Q_PAD_CSI0_DAT19__MMDC_MMDC_DEBUG_42		1215
+MX6Q_PAD_CSI0_DAT19__ANATOP_TESTO_9		1216
+MX6Q_PAD_JTAG_TMS__SJC_TMS			1217
+MX6Q_PAD_JTAG_MOD__SJC_MOD			1218
+MX6Q_PAD_JTAG_TRSTB__SJC_TRSTB			1219
+MX6Q_PAD_JTAG_TDI__SJC_TDI			1220
+MX6Q_PAD_JTAG_TCK__SJC_TCK			1221
+MX6Q_PAD_JTAG_TDO__SJC_TDO			1222
+MX6Q_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3		1223
+MX6Q_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2		1224
+MX6Q_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK		1225
+MX6Q_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1		1226
+MX6Q_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0		1227
+MX6Q_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3		1228
+MX6Q_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK		1229
+MX6Q_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2		1230
+MX6Q_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1		1231
+MX6Q_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0		1232
+MX6Q_PAD_TAMPER__SNVS_LP_WRAP_SNVS_TD1		1233
+MX6Q_PAD_PMIC_ON_REQ__SNVS_LPWRAP_WKALM		1234
+MX6Q_PAD_PMIC_STBY_REQ__CCM_PMIC_STBYRQ		1235
+MX6Q_PAD_POR_B__SRC_POR_B			1236
+MX6Q_PAD_BOOT_MODE1__SRC_BOOT_MODE_1		1237
+MX6Q_PAD_RESET_IN_B__SRC_RESET_B		1238
+MX6Q_PAD_BOOT_MODE0__SRC_BOOT_MODE_0		1239
+MX6Q_PAD_TEST_MODE__TCU_TEST_MODE		1240
+MX6Q_PAD_SD3_DAT7__USDHC3_DAT7			1241
+MX6Q_PAD_SD3_DAT7__UART1_TXD			1242
+MX6Q_PAD_SD3_DAT7__PCIE_CTRL_MUX_24		1243
+MX6Q_PAD_SD3_DAT7__USBOH3_UH3_DFD_OUT_0		1244
+MX6Q_PAD_SD3_DAT7__USBOH3_UH2_DFD_OUT_0		1245
+MX6Q_PAD_SD3_DAT7__GPIO_6_17			1246
+MX6Q_PAD_SD3_DAT7__MIPI_CORE_DPHY_IN_12		1247
+MX6Q_PAD_SD3_DAT7__USBPHY2_CLK20DIV		1248
+MX6Q_PAD_SD3_DAT6__USDHC3_DAT6			1249
+MX6Q_PAD_SD3_DAT6__UART1_RXD			1250
+MX6Q_PAD_SD3_DAT6__PCIE_CTRL_MUX_25		1251
+MX6Q_PAD_SD3_DAT6__USBOH3_UH3_DFD_OUT_1		1252
+MX6Q_PAD_SD3_DAT6__USBOH3_UH2_DFD_OUT_1		1253
+MX6Q_PAD_SD3_DAT6__GPIO_6_18			1254
+MX6Q_PAD_SD3_DAT6__MIPI_CORE_DPHY_IN_13		1255
+MX6Q_PAD_SD3_DAT6__ANATOP_TESTO_10		1256
+MX6Q_PAD_SD3_DAT5__USDHC3_DAT5			1257
+MX6Q_PAD_SD3_DAT5__UART2_TXD			1258
+MX6Q_PAD_SD3_DAT5__PCIE_CTRL_MUX_26		1259
+MX6Q_PAD_SD3_DAT5__USBOH3_UH3_DFD_OUT_2		1260
+MX6Q_PAD_SD3_DAT5__USBOH3_UH2_DFD_OUT_2		1261
+MX6Q_PAD_SD3_DAT5__GPIO_7_0			1262
+MX6Q_PAD_SD3_DAT5__MIPI_CORE_DPHY_IN_14		1263
+MX6Q_PAD_SD3_DAT5__ANATOP_TESTO_11		1264
+MX6Q_PAD_SD3_DAT4__USDHC3_DAT4			1265
+MX6Q_PAD_SD3_DAT4__UART2_RXD			1266
+MX6Q_PAD_SD3_DAT4__PCIE_CTRL_MUX_27		1267
+MX6Q_PAD_SD3_DAT4__USBOH3_UH3_DFD_OUT_3		1268
+MX6Q_PAD_SD3_DAT4__USBOH3_UH2_DFD_OUT_3		1269
+MX6Q_PAD_SD3_DAT4__GPIO_7_1			1270
+MX6Q_PAD_SD3_DAT4__MIPI_CORE_DPHY_IN_15		1271
+MX6Q_PAD_SD3_DAT4__ANATOP_TESTO_12		1272
+MX6Q_PAD_SD3_CMD__USDHC3_CMD			1273
+MX6Q_PAD_SD3_CMD__UART2_CTS			1274
+MX6Q_PAD_SD3_CMD__CAN1_TXCAN			1275
+MX6Q_PAD_SD3_CMD__USBOH3_UH3_DFD_OUT_4		1276
+MX6Q_PAD_SD3_CMD__USBOH3_UH2_DFD_OUT_4		1277
+MX6Q_PAD_SD3_CMD__GPIO_7_2			1278
+MX6Q_PAD_SD3_CMD__MIPI_CORE_DPHY_IN_16		1279
+MX6Q_PAD_SD3_CMD__ANATOP_TESTO_13		1280
+MX6Q_PAD_SD3_CLK__USDHC3_CLK			1281
+MX6Q_PAD_SD3_CLK__UART2_RTS			1282
+MX6Q_PAD_SD3_CLK__CAN1_RXCAN			1283
+MX6Q_PAD_SD3_CLK__USBOH3_UH3_DFD_OUT_5		1284
+MX6Q_PAD_SD3_CLK__USBOH3_UH2_DFD_OUT_5		1285
+MX6Q_PAD_SD3_CLK__GPIO_7_3			1286
+MX6Q_PAD_SD3_CLK__MIPI_CORE_DPHY_IN_17		1287
+MX6Q_PAD_SD3_CLK__ANATOP_TESTO_14		1288
+MX6Q_PAD_SD3_DAT0__USDHC3_DAT0			1289
+MX6Q_PAD_SD3_DAT0__UART1_CTS			1290
+MX6Q_PAD_SD3_DAT0__CAN2_TXCAN			1291
+MX6Q_PAD_SD3_DAT0__USBOH3_UH3_DFD_OUT_6		1292
+MX6Q_PAD_SD3_DAT0__USBOH3_UH2_DFD_OUT_6		1293
+MX6Q_PAD_SD3_DAT0__GPIO_7_4			1294
+MX6Q_PAD_SD3_DAT0__MIPI_CORE_DPHY_IN_18		1295
+MX6Q_PAD_SD3_DAT0__ANATOP_TESTO_15		1296
+MX6Q_PAD_SD3_DAT1__USDHC3_DAT1			1297
+MX6Q_PAD_SD3_DAT1__UART1_RTS			1298
+MX6Q_PAD_SD3_DAT1__CAN2_RXCAN			1299
+MX6Q_PAD_SD3_DAT1__USBOH3_UH3_DFD_OUT_7		1300
+MX6Q_PAD_SD3_DAT1__USBOH3_UH2_DFD_OUT_7		1301
+MX6Q_PAD_SD3_DAT1__GPIO_7_5			1302
+MX6Q_PAD_SD3_DAT1__MIPI_CORE_DPHY_IN_19		1303
+MX6Q_PAD_SD3_DAT1__ANATOP_TESTI_0		1304
+MX6Q_PAD_SD3_DAT2__USDHC3_DAT2			1305
+MX6Q_PAD_SD3_DAT2__PCIE_CTRL_MUX_28		1306
+MX6Q_PAD_SD3_DAT2__USBOH3_UH3_DFD_OUT_8		1307
+MX6Q_PAD_SD3_DAT2__USBOH3_UH2_DFD_OUT_8		1308
+MX6Q_PAD_SD3_DAT2__GPIO_7_6			1309
+MX6Q_PAD_SD3_DAT2__MIPI_CORE_DPHY_IN_20		1310
+MX6Q_PAD_SD3_DAT2__ANATOP_TESTI_1		1311
+MX6Q_PAD_SD3_DAT3__USDHC3_DAT3			1312
+MX6Q_PAD_SD3_DAT3__UART3_CTS			1313
+MX6Q_PAD_SD3_DAT3__PCIE_CTRL_MUX_29		1314
+MX6Q_PAD_SD3_DAT3__USBOH3_UH3_DFD_OUT_9		1315
+MX6Q_PAD_SD3_DAT3__USBOH3_UH2_DFD_OUT_9		1316
+MX6Q_PAD_SD3_DAT3__GPIO_7_7			1317
+MX6Q_PAD_SD3_DAT3__MIPI_CORE_DPHY_IN_21		1318
+MX6Q_PAD_SD3_DAT3__ANATOP_TESTI_2		1319
+MX6Q_PAD_SD3_RST__USDHC3_RST			1320
+MX6Q_PAD_SD3_RST__UART3_RTS			1321
+MX6Q_PAD_SD3_RST__PCIE_CTRL_MUX_30		1322
+MX6Q_PAD_SD3_RST__USBOH3_UH3_DFD_OUT_10		1323
+MX6Q_PAD_SD3_RST__USBOH3_UH2_DFD_OUT_10		1324
+MX6Q_PAD_SD3_RST__GPIO_7_8			1325
+MX6Q_PAD_SD3_RST__MIPI_CORE_DPHY_IN_22		1326
+MX6Q_PAD_SD3_RST__ANATOP_ANATOP_TESTI_3		1327
+MX6Q_PAD_NANDF_CLE__RAWNAND_CLE			1328
+MX6Q_PAD_NANDF_CLE__IPU2_SISG_4			1329
+MX6Q_PAD_NANDF_CLE__PCIE_CTRL_MUX_31		1330
+MX6Q_PAD_NANDF_CLE__USBOH3_UH3_DFD_OT11		1331
+MX6Q_PAD_NANDF_CLE__USBOH3_UH2_DFD_OT11		1332
+MX6Q_PAD_NANDF_CLE__GPIO_6_7			1333
+MX6Q_PAD_NANDF_CLE__MIPI_CORE_DPHY_IN23		1334
+MX6Q_PAD_NANDF_CLE__TPSMP_HTRANS_0		1335
+MX6Q_PAD_NANDF_ALE__RAWNAND_ALE			1336
+MX6Q_PAD_NANDF_ALE__USDHC4_RST			1337
+MX6Q_PAD_NANDF_ALE__PCIE_CTRL_MUX_0		1338
+MX6Q_PAD_NANDF_ALE__USBOH3_UH3_DFD_OT12		1339
+MX6Q_PAD_NANDF_ALE__USBOH3_UH2_DFD_OT12		1340
+MX6Q_PAD_NANDF_ALE__GPIO_6_8			1341
+MX6Q_PAD_NANDF_ALE__MIPI_CR_DPHY_IN_24		1342
+MX6Q_PAD_NANDF_ALE__TPSMP_HTRANS_1		1343
+MX6Q_PAD_NANDF_WP_B__RAWNAND_RESETN		1344
+MX6Q_PAD_NANDF_WP_B__IPU2_SISG_5		1345
+MX6Q_PAD_NANDF_WP_B__PCIE_CTRL__MUX_1		1346
+MX6Q_PAD_NANDF_WP_B__USBOH3_UH3_DFDOT13		1347
+MX6Q_PAD_NANDF_WP_B__USBOH3_UH2_DFDOT13		1348
+MX6Q_PAD_NANDF_WP_B__GPIO_6_9			1349
+MX6Q_PAD_NANDF_WP_B__MIPI_CR_DPHY_OUT32		1350
+MX6Q_PAD_NANDF_WP_B__PL301_PER1_HSIZE_0		1351
+MX6Q_PAD_NANDF_RB0__RAWNAND_READY0		1352
+MX6Q_PAD_NANDF_RB0__IPU2_DI0_PIN1		1353
+MX6Q_PAD_NANDF_RB0__PCIE_CTRL_MUX_2		1354
+MX6Q_PAD_NANDF_RB0__USBOH3_UH3_DFD_OT14		1355
+MX6Q_PAD_NANDF_RB0__USBOH3_UH2_DFD_OT14		1356
+MX6Q_PAD_NANDF_RB0__GPIO_6_10			1357
+MX6Q_PAD_NANDF_RB0__MIPI_CR_DPHY_OUT_33		1358
+MX6Q_PAD_NANDF_RB0__PL301_PER1_HSIZE_1		1359
+MX6Q_PAD_NANDF_CS0__RAWNAND_CE0N		1360
+MX6Q_PAD_NANDF_CS0__USBOH3_UH3_DFD_OT15		1361
+MX6Q_PAD_NANDF_CS0__USBOH3_UH2_DFD_OT15		1362
+MX6Q_PAD_NANDF_CS0__GPIO_6_11			1363
+MX6Q_PAD_NANDF_CS0__PL301_PER1_HSIZE_2		1364
+MX6Q_PAD_NANDF_CS1__RAWNAND_CE1N		1365
+MX6Q_PAD_NANDF_CS1__USDHC4_VSELECT		1366
+MX6Q_PAD_NANDF_CS1__USDHC3_VSELECT		1367
+MX6Q_PAD_NANDF_CS1__PCIE_CTRL_MUX_3		1368
+MX6Q_PAD_NANDF_CS1__GPIO_6_14			1369
+MX6Q_PAD_NANDF_CS1__PL301_PER1_HRDYOUT		1370
+MX6Q_PAD_NANDF_CS2__RAWNAND_CE2N		1371
+MX6Q_PAD_NANDF_CS2__IPU1_SISG_0			1372
+MX6Q_PAD_NANDF_CS2__ESAI1_TX0			1373
+MX6Q_PAD_NANDF_CS2__WEIM_WEIM_CRE		1374
+MX6Q_PAD_NANDF_CS2__CCM_CLKO2			1375
+MX6Q_PAD_NANDF_CS2__GPIO_6_15			1376
+MX6Q_PAD_NANDF_CS2__IPU2_SISG_0			1377
+MX6Q_PAD_NANDF_CS3__RAWNAND_CE3N		1378
+MX6Q_PAD_NANDF_CS3__IPU1_SISG_1			1379
+MX6Q_PAD_NANDF_CS3__ESAI1_TX1			1380
+MX6Q_PAD_NANDF_CS3__WEIM_WEIM_A_26		1381
+MX6Q_PAD_NANDF_CS3__PCIE_CTRL_MUX_4		1382
+MX6Q_PAD_NANDF_CS3__GPIO_6_16			1383
+MX6Q_PAD_NANDF_CS3__IPU2_SISG_1			1384
+MX6Q_PAD_NANDF_CS3__TPSMP_CLK			1385
+MX6Q_PAD_SD4_CMD__USDHC4_CMD			1386
+MX6Q_PAD_SD4_CMD__RAWNAND_RDN			1387
+MX6Q_PAD_SD4_CMD__UART3_TXD			1388
+MX6Q_PAD_SD4_CMD__PCIE_CTRL_MUX_5		1389
+MX6Q_PAD_SD4_CMD__GPIO_7_9			1390
+MX6Q_PAD_SD4_CMD__TPSMP_HDATA_DIR		1391
+MX6Q_PAD_SD4_CLK__USDHC4_CLK			1392
+MX6Q_PAD_SD4_CLK__RAWNAND_WRN			1393
+MX6Q_PAD_SD4_CLK__UART3_RXD			1394
+MX6Q_PAD_SD4_CLK__PCIE_CTRL_MUX_6		1395
+MX6Q_PAD_SD4_CLK__GPIO_7_10			1396
+MX6Q_PAD_NANDF_D0__RAWNAND_D0			1397
+MX6Q_PAD_NANDF_D0__USDHC1_DAT4			1398
+MX6Q_PAD_NANDF_D0__GPU3D_GPU_DBG_OUT_0		1399
+MX6Q_PAD_NANDF_D0__USBOH3_UH2_DFD_OUT16		1400
+MX6Q_PAD_NANDF_D0__USBOH3_UH3_DFD_OUT16		1401
+MX6Q_PAD_NANDF_D0__GPIO_2_0			1402
+MX6Q_PAD_NANDF_D0__IPU1_IPU_DIAG_BUS_0		1403
+MX6Q_PAD_NANDF_D0__IPU2_IPU_DIAG_BUS_0		1404
+MX6Q_PAD_NANDF_D1__RAWNAND_D1			1405
+MX6Q_PAD_NANDF_D1__USDHC1_DAT5			1406
+MX6Q_PAD_NANDF_D1__GPU3D_GPU_DEBUG_OUT1		1407
+MX6Q_PAD_NANDF_D1__USBOH3_UH2_DFD_OUT17		1408
+MX6Q_PAD_NANDF_D1__USBOH3_UH3_DFD_OUT17		1409
+MX6Q_PAD_NANDF_D1__GPIO_2_1			1410
+MX6Q_PAD_NANDF_D1__IPU1_IPU_DIAG_BUS_1		1411
+MX6Q_PAD_NANDF_D1__IPU2_IPU_DIAG_BUS_1		1412
+MX6Q_PAD_NANDF_D2__RAWNAND_D2			1413
+MX6Q_PAD_NANDF_D2__USDHC1_DAT6			1414
+MX6Q_PAD_NANDF_D2__GPU3D_GPU_DBG_OUT_2		1415
+MX6Q_PAD_NANDF_D2__USBOH3_UH2_DFD_OUT18		1416
+MX6Q_PAD_NANDF_D2__USBOH3_UH3_DFD_OUT18		1417
+MX6Q_PAD_NANDF_D2__GPIO_2_2			1418
+MX6Q_PAD_NANDF_D2__IPU1_IPU_DIAG_BUS_2		1419
+MX6Q_PAD_NANDF_D2__IPU2_IPU_DIAG_BUS_2		1420
+MX6Q_PAD_NANDF_D3__RAWNAND_D3			1421
+MX6Q_PAD_NANDF_D3__USDHC1_DAT7			1422
+MX6Q_PAD_NANDF_D3__GPU3D_GPU_DBG_OUT_3		1423
+MX6Q_PAD_NANDF_D3__USBOH3_UH2_DFD_OUT19		1424
+MX6Q_PAD_NANDF_D3__USBOH3_UH3_DFD_OUT19		1425
+MX6Q_PAD_NANDF_D3__GPIO_2_3			1426
+MX6Q_PAD_NANDF_D3__IPU1_IPU_DIAG_BUS_3		1427
+MX6Q_PAD_NANDF_D3__IPU2_IPU_DIAG_BUS_3		1428
+MX6Q_PAD_NANDF_D4__RAWNAND_D4			1429
+MX6Q_PAD_NANDF_D4__USDHC2_DAT4			1430
+MX6Q_PAD_NANDF_D4__GPU3D_GPU_DBG_OUT_4		1431
+MX6Q_PAD_NANDF_D4__USBOH3_UH2_DFD_OUT20		1432
+MX6Q_PAD_NANDF_D4__USBOH3_UH3_DFD_OUT20		1433
+MX6Q_PAD_NANDF_D4__GPIO_2_4			1434
+MX6Q_PAD_NANDF_D4__IPU1_IPU_DIAG_BUS_4		1435
+MX6Q_PAD_NANDF_D4__IPU2_IPU_DIAG_BUS_4		1436
+MX6Q_PAD_NANDF_D5__RAWNAND_D5			1437
+MX6Q_PAD_NANDF_D5__USDHC2_DAT5			1438
+MX6Q_PAD_NANDF_D5__GPU3D_GPU_DBG_OUT_5		1439
+MX6Q_PAD_NANDF_D5__USBOH3_UH2_DFD_OUT21		1440
+MX6Q_PAD_NANDF_D5__USBOH3_UH3_DFD_OUT21		1441
+MX6Q_PAD_NANDF_D5__GPIO_2_5			1442
+MX6Q_PAD_NANDF_D5__IPU1_IPU_DIAG_BUS_5		1443
+MX6Q_PAD_NANDF_D5__IPU2_IPU_DIAG_BUS_5		1444
+MX6Q_PAD_NANDF_D6__RAWNAND_D6			1445
+MX6Q_PAD_NANDF_D6__USDHC2_DAT6			1446
+MX6Q_PAD_NANDF_D6__GPU3D_GPU_DBG_OUT_6		1447
+MX6Q_PAD_NANDF_D6__USBOH3_UH2_DFD_OUT22		1448
+MX6Q_PAD_NANDF_D6__USBOH3_UH3_DFD_OUT22		1449
+MX6Q_PAD_NANDF_D6__GPIO_2_6			1450
+MX6Q_PAD_NANDF_D6__IPU1_IPU_DIAG_BUS_6		1451
+MX6Q_PAD_NANDF_D6__IPU2_IPU_DIAG_BUS_6		1452
+MX6Q_PAD_NANDF_D7__RAWNAND_D7			1453
+MX6Q_PAD_NANDF_D7__USDHC2_DAT7			1454
+MX6Q_PAD_NANDF_D7__GPU3D_GPU_DBG_OUT_7		1455
+MX6Q_PAD_NANDF_D7__USBOH3_UH2_DFD_OUT23		1456
+MX6Q_PAD_NANDF_D7__USBOH3_UH3_DFD_OUT23		1457
+MX6Q_PAD_NANDF_D7__GPIO_2_7			1458
+MX6Q_PAD_NANDF_D7__IPU1_IPU_DIAG_BUS_7		1459
+MX6Q_PAD_NANDF_D7__IPU2_IPU_DIAG_BUS_7		1460
+MX6Q_PAD_SD4_DAT0__RAWNAND_D8			1461
+MX6Q_PAD_SD4_DAT0__USDHC4_DAT0			1462
+MX6Q_PAD_SD4_DAT0__RAWNAND_DQS			1463
+MX6Q_PAD_SD4_DAT0__USBOH3_UH2_DFD_OUT24		1464
+MX6Q_PAD_SD4_DAT0__USBOH3_UH3_DFD_OUT24		1465
+MX6Q_PAD_SD4_DAT0__GPIO_2_8			1466
+MX6Q_PAD_SD4_DAT0__IPU1_IPU_DIAG_BUS_8		1467
+MX6Q_PAD_SD4_DAT0__IPU2_IPU_DIAG_BUS_8		1468
+MX6Q_PAD_SD4_DAT1__RAWNAND_D9			1469
+MX6Q_PAD_SD4_DAT1__USDHC4_DAT1			1470
+MX6Q_PAD_SD4_DAT1__PWM3_PWMO			1471
+MX6Q_PAD_SD4_DAT1__USBOH3_UH2_DFD_OUT25		1472
+MX6Q_PAD_SD4_DAT1__USBOH3_UH3_DFD_OUT25		1473
+MX6Q_PAD_SD4_DAT1__GPIO_2_9			1474
+MX6Q_PAD_SD4_DAT1__IPU1_IPU_DIAG_BUS_9		1475
+MX6Q_PAD_SD4_DAT1__IPU2_IPU_DIAG_BUS_9		1476
+MX6Q_PAD_SD4_DAT2__RAWNAND_D10			1477
+MX6Q_PAD_SD4_DAT2__USDHC4_DAT2			1478
+MX6Q_PAD_SD4_DAT2__PWM4_PWMO			1479
+MX6Q_PAD_SD4_DAT2__USBOH3_UH2_DFD_OUT26		1480
+MX6Q_PAD_SD4_DAT2__USBOH3_UH3_DFD_OUT26		1481
+MX6Q_PAD_SD4_DAT2__GPIO_2_10			1482
+MX6Q_PAD_SD4_DAT2__IPU1_IPU_DIAG_BUS_10		1483
+MX6Q_PAD_SD4_DAT2__IPU2_IPU_DIAG_BUS_10		1484
+MX6Q_PAD_SD4_DAT3__RAWNAND_D11			1485
+MX6Q_PAD_SD4_DAT3__USDHC4_DAT3			1486
+MX6Q_PAD_SD4_DAT3__USBOH3_UH2_DFD_OUT27		1487
+MX6Q_PAD_SD4_DAT3__USBOH3_UH3_DFD_OUT27		1488
+MX6Q_PAD_SD4_DAT3__GPIO_2_11			1489
+MX6Q_PAD_SD4_DAT3__IPU1_IPU_DIAG_BUS_11		1490
+MX6Q_PAD_SD4_DAT3__IPU2_IPU_DIAG_BUS_11		1491
+MX6Q_PAD_SD4_DAT4__RAWNAND_D12			1492
+MX6Q_PAD_SD4_DAT4__USDHC4_DAT4			1493
+MX6Q_PAD_SD4_DAT4__UART2_RXD			1494
+MX6Q_PAD_SD4_DAT4__USBOH3_UH2_DFD_OUT28		1495
+MX6Q_PAD_SD4_DAT4__USBOH3_UH3_DFD_OUT28		1496
+MX6Q_PAD_SD4_DAT4__GPIO_2_12			1497
+MX6Q_PAD_SD4_DAT4__IPU1_IPU_DIAG_BUS_12		1498
+MX6Q_PAD_SD4_DAT4__IPU2_IPU_DIAG_BUS_12		1499
+MX6Q_PAD_SD4_DAT5__RAWNAND_D13			1500
+MX6Q_PAD_SD4_DAT5__USDHC4_DAT5			1501
+MX6Q_PAD_SD4_DAT5__UART2_RTS			1502
+MX6Q_PAD_SD4_DAT5__USBOH3_UH2_DFD_OUT29		1503
+MX6Q_PAD_SD4_DAT5__USBOH3_UH3_DFD_OUT29		1504
+MX6Q_PAD_SD4_DAT5__GPIO_2_13			1505
+MX6Q_PAD_SD4_DAT5__IPU1_IPU_DIAG_BUS_13		1506
+MX6Q_PAD_SD4_DAT5__IPU2_IPU_DIAG_BUS_13		1507
+MX6Q_PAD_SD4_DAT6__RAWNAND_D14			1508
+MX6Q_PAD_SD4_DAT6__USDHC4_DAT6			1509
+MX6Q_PAD_SD4_DAT6__UART2_CTS			1510
+MX6Q_PAD_SD4_DAT6__USBOH3_UH2_DFD_OUT30		1511
+MX6Q_PAD_SD4_DAT6__USBOH3_UH3_DFD_OUT30		1512
+MX6Q_PAD_SD4_DAT6__GPIO_2_14			1513
+MX6Q_PAD_SD4_DAT6__IPU1_IPU_DIAG_BUS_14		1514
+MX6Q_PAD_SD4_DAT6__IPU2_IPU_DIAG_BUS_14		1515
+MX6Q_PAD_SD4_DAT7__RAWNAND_D15			1516
+MX6Q_PAD_SD4_DAT7__USDHC4_DAT7			1517
+MX6Q_PAD_SD4_DAT7__UART2_TXD			1518
+MX6Q_PAD_SD4_DAT7__USBOH3_UH2_DFD_OUT31		1519
+MX6Q_PAD_SD4_DAT7__USBOH3_UH3_DFD_OUT31		1520
+MX6Q_PAD_SD4_DAT7__GPIO_2_15			1521
+MX6Q_PAD_SD4_DAT7__IPU1_IPU_DIAG_BUS_15		1522
+MX6Q_PAD_SD4_DAT7__IPU2_IPU_DIAG_BUS_15		1523
+MX6Q_PAD_SD1_DAT1__USDHC1_DAT1			1524
+MX6Q_PAD_SD1_DAT1__ECSPI5_SS0			1525
+MX6Q_PAD_SD1_DAT1__PWM3_PWMO			1526
+MX6Q_PAD_SD1_DAT1__GPT_CAPIN2			1527
+MX6Q_PAD_SD1_DAT1__PCIE_CTRL_MUX_7		1528
+MX6Q_PAD_SD1_DAT1__GPIO_1_17			1529
+MX6Q_PAD_SD1_DAT1__HDMI_TX_OPHYDTB_0		1530
+MX6Q_PAD_SD1_DAT1__ANATOP_TESTO_8		1531
+MX6Q_PAD_SD1_DAT0__USDHC1_DAT0			1532
+MX6Q_PAD_SD1_DAT0__ECSPI5_MISO			1533
+MX6Q_PAD_SD1_DAT0__CAAM_WRAP_RNG_OSCOBS		1534
+MX6Q_PAD_SD1_DAT0__GPT_CAPIN1			1535
+MX6Q_PAD_SD1_DAT0__PCIE_CTRL_MUX_8		1536
+MX6Q_PAD_SD1_DAT0__GPIO_1_16			1537
+MX6Q_PAD_SD1_DAT0__HDMI_TX_OPHYDTB_1		1538
+MX6Q_PAD_SD1_DAT0__ANATOP_TESTO_7		1539
+MX6Q_PAD_SD1_DAT3__USDHC1_DAT3			1540
+MX6Q_PAD_SD1_DAT3__ECSPI5_SS2			1541
+MX6Q_PAD_SD1_DAT3__GPT_CMPOUT3			1542
+MX6Q_PAD_SD1_DAT3__PWM1_PWMO			1543
+MX6Q_PAD_SD1_DAT3__WDOG2_WDOG_B			1544
+MX6Q_PAD_SD1_DAT3__GPIO_1_21			1545
+MX6Q_PAD_SD1_DAT3__WDOG2_WDOG_RST_B_DEB		1546
+MX6Q_PAD_SD1_DAT3__ANATOP_TESTO_6		1547
+MX6Q_PAD_SD1_CMD__USDHC1_CMD			1548
+MX6Q_PAD_SD1_CMD__ECSPI5_MOSI			1549
+MX6Q_PAD_SD1_CMD__PWM4_PWMO			1550
+MX6Q_PAD_SD1_CMD__GPT_CMPOUT1			1551
+MX6Q_PAD_SD1_CMD__GPIO_1_18			1552
+MX6Q_PAD_SD1_CMD__ANATOP_TESTO_5		1553
+MX6Q_PAD_SD1_DAT2__USDHC1_DAT2			1554
+MX6Q_PAD_SD1_DAT2__ECSPI5_SS1			1555
+MX6Q_PAD_SD1_DAT2__GPT_CMPOUT2			1556
+MX6Q_PAD_SD1_DAT2__PWM2_PWMO			1557
+MX6Q_PAD_SD1_DAT2__WDOG1_WDOG_B			1558
+MX6Q_PAD_SD1_DAT2__GPIO_1_19			1559
+MX6Q_PAD_SD1_DAT2__WDOG1_WDOG_RST_B_DEB		1560
+MX6Q_PAD_SD1_DAT2__ANATOP_TESTO_4		1561
+MX6Q_PAD_SD1_CLK__USDHC1_CLK			1562
+MX6Q_PAD_SD1_CLK__ECSPI5_SCLK			1563
+MX6Q_PAD_SD1_CLK__OSC32K_32K_OUT		1564
+MX6Q_PAD_SD1_CLK__GPT_CLKIN			1565
+MX6Q_PAD_SD1_CLK__GPIO_1_20			1566
+MX6Q_PAD_SD1_CLK__PHY_DTB_0			1567
+MX6Q_PAD_SD1_CLK__SATA_PHY_DTB_0		1568
+MX6Q_PAD_SD2_CLK__USDHC2_CLK			1569
+MX6Q_PAD_SD2_CLK__ECSPI5_SCLK			1570
+MX6Q_PAD_SD2_CLK__KPP_COL_5			1571
+MX6Q_PAD_SD2_CLK__AUDMUX_AUD4_RXFS		1572
+MX6Q_PAD_SD2_CLK__PCIE_CTRL_MUX_9		1573
+MX6Q_PAD_SD2_CLK__GPIO_1_10			1574
+MX6Q_PAD_SD2_CLK__PHY_DTB_1			1575
+MX6Q_PAD_SD2_CLK__SATA_PHY_DTB_1		1576
+MX6Q_PAD_SD2_CMD__USDHC2_CMD			1577
+MX6Q_PAD_SD2_CMD__ECSPI5_MOSI			1578
+MX6Q_PAD_SD2_CMD__KPP_ROW_5			1579
+MX6Q_PAD_SD2_CMD__AUDMUX_AUD4_RXC		1580
+MX6Q_PAD_SD2_CMD__PCIE_CTRL_MUX_10		1581
+MX6Q_PAD_SD2_CMD__GPIO_1_11			1582
+MX6Q_PAD_SD2_DAT3__USDHC2_DAT3			1583
+MX6Q_PAD_SD2_DAT3__ECSPI5_SS3			1584
+MX6Q_PAD_SD2_DAT3__KPP_COL_6			1585
+MX6Q_PAD_SD2_DAT3__AUDMUX_AUD4_TXC		1586
+MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11		1587
+MX6Q_PAD_SD2_DAT3__GPIO_1_12			1588
+MX6Q_PAD_SD2_DAT3__SJC_DONE			1589
+MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3		1590
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,mxs-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,mxs-pinctrl.txt
new file mode 100644
index 0000000..f7e8e8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,mxs-pinctrl.txt
@@ -0,0 +1,918 @@
+* Freescale MXS Pin Controller
+
+The pins controlled by mxs pin controller are organized in banks, each bank
+has 32 pins.  Each pin has 4 multiplexing functions, and generally, the 4th
+function is GPIO.  The configuration on the pins includes drive strength,
+voltage and pull-up.
+
+Required properties:
+- compatible: "fsl,imx23-pinctrl" or "fsl,imx28-pinctrl"
+- reg: Should contain the register physical address and length for the
+  pin controller.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices.
+
+The node of mxs pin controller acts as a container for an arbitrary number of
+subnodes.  Each of these subnodes represents some desired configuration for
+a group of pins, and only affects those parameters that are explicitly listed.
+In other words, a subnode that describes a drive strength parameter implies no
+information about pull-up. For this reason, even seemingly boolean values are
+actually tristates in this binding: unspecified, off, or on. Unspecified is
+represented as an absent property, and off/on are represented as integer
+values 0 and 1.
+
+Those subnodes under mxs pin controller node will fall into two categories.
+One is to set up a group of pins for a function, both mux selection and pin
+configurations, and it's called group node in the binding document.   The other
+one is to adjust the pin configuration for some particular pins that need a
+different configuration than what is defined in group node.  The binding
+document calls this type of node config node.
+
+On mxs, there is no hardware pin group. The pin group in this binding only
+means a group of pins put together for particular peripheral to work in
+particular function, like SSP0 functioning as mmc0-8bit.  That said, the
+group node should include all the pins needed for one function rather than
+having these pins defined in several group nodes.  It also means each of
+"pinctrl-*" phandle in client device node should only have one group node
+pointed in there, while the phandle can have multiple config node referenced
+there to adjust configurations for some pins in the group.
+
+Required subnode-properties:
+- fsl,pinmux-ids: An integer array.  Each integer in the array specify a pin
+  with given mux function, with bank, pin and mux packed as below.
+
+    [15..12] : bank number
+    [11..4]  : pin number
+    [3..0]   : mux selection
+
+  This integer with mux selection packed is used as an entity by both group
+  and config nodes to identify a pin.  The mux selection in the integer takes
+  effects only on group node, and will get ignored by driver with config node,
+  since config node is only meant to set up pin configurations.
+
+  Valid values for these integers are listed below.
+
+- reg: Should be the index of the group nodes for same function.  This property
+  is required only for group nodes, and should not be present in any config
+  nodes.
+
+Optional subnode-properties:
+- fsl,drive-strength: Integer.
+    0: 4 mA
+    1: 8 mA
+    2: 12 mA
+    3: 16 mA
+- fsl,voltage: Integer.
+    0: 1.8 V
+    1: 3.3 V
+- fsl,pull-up: Integer.
+    0: Disable the internal pull-up
+    1: Enable the internal pull-up
+
+Examples:
+
+pinctrl@80018000 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	compatible = "fsl,imx28-pinctrl";
+	reg = <0x80018000 2000>;
+
+	mmc0_8bit_pins_a: mmc0-8bit@0 {
+		reg = <0>;
+		fsl,pinmux-ids = <
+			0x2000 0x2010 0x2020 0x2030
+			0x2040 0x2050 0x2060 0x2070
+			0x2080 0x2090 0x20a0>;
+		fsl,drive-strength = <1>;
+		fsl,voltage = <1>;
+		fsl,pull-up = <1>;
+	};
+
+	mmc_cd_cfg: mmc-cd-cfg {
+		fsl,pinmux-ids = <0x2090>;
+		fsl,pull-up = <0>;
+	};
+
+	mmc_sck_cfg: mmc-sck-cfg {
+		fsl,pinmux-ids = <0x20a0>;
+		fsl,drive-strength = <2>;
+		fsl,pull-up = <0>;
+	};
+};
+
+In this example, group node mmc0-8bit defines a group of pins for mxs SSP0
+to function as a 8-bit mmc device, with 8mA, 3.3V and pull-up configurations
+applied on all these pins.  And config nodes mmc-cd-cfg and mmc-sck-cfg are
+adjusting the configuration for pins card-detection and clock from what group
+node mmc0-8bit defines.  Only the configuration properties to be adjusted need
+to be listed in the config nodes.
+
+Valid values for i.MX28 pinmux-id:
+
+pinmux						id
+------						--
+MX28_PAD_GPMI_D00__GPMI_D0			0x0000
+MX28_PAD_GPMI_D01__GPMI_D1			0x0010
+MX28_PAD_GPMI_D02__GPMI_D2			0x0020
+MX28_PAD_GPMI_D03__GPMI_D3			0x0030
+MX28_PAD_GPMI_D04__GPMI_D4			0x0040
+MX28_PAD_GPMI_D05__GPMI_D5			0x0050
+MX28_PAD_GPMI_D06__GPMI_D6			0x0060
+MX28_PAD_GPMI_D07__GPMI_D7			0x0070
+MX28_PAD_GPMI_CE0N__GPMI_CE0N			0x0100
+MX28_PAD_GPMI_CE1N__GPMI_CE1N			0x0110
+MX28_PAD_GPMI_CE2N__GPMI_CE2N			0x0120
+MX28_PAD_GPMI_CE3N__GPMI_CE3N			0x0130
+MX28_PAD_GPMI_RDY0__GPMI_READY0			0x0140
+MX28_PAD_GPMI_RDY1__GPMI_READY1			0x0150
+MX28_PAD_GPMI_RDY2__GPMI_READY2			0x0160
+MX28_PAD_GPMI_RDY3__GPMI_READY3			0x0170
+MX28_PAD_GPMI_RDN__GPMI_RDN			0x0180
+MX28_PAD_GPMI_WRN__GPMI_WRN			0x0190
+MX28_PAD_GPMI_ALE__GPMI_ALE			0x01a0
+MX28_PAD_GPMI_CLE__GPMI_CLE			0x01b0
+MX28_PAD_GPMI_RESETN__GPMI_RESETN		0x01c0
+MX28_PAD_LCD_D00__LCD_D0			0x1000
+MX28_PAD_LCD_D01__LCD_D1			0x1010
+MX28_PAD_LCD_D02__LCD_D2			0x1020
+MX28_PAD_LCD_D03__LCD_D3			0x1030
+MX28_PAD_LCD_D04__LCD_D4			0x1040
+MX28_PAD_LCD_D05__LCD_D5			0x1050
+MX28_PAD_LCD_D06__LCD_D6			0x1060
+MX28_PAD_LCD_D07__LCD_D7			0x1070
+MX28_PAD_LCD_D08__LCD_D8			0x1080
+MX28_PAD_LCD_D09__LCD_D9			0x1090
+MX28_PAD_LCD_D10__LCD_D10			0x10a0
+MX28_PAD_LCD_D11__LCD_D11			0x10b0
+MX28_PAD_LCD_D12__LCD_D12			0x10c0
+MX28_PAD_LCD_D13__LCD_D13			0x10d0
+MX28_PAD_LCD_D14__LCD_D14			0x10e0
+MX28_PAD_LCD_D15__LCD_D15			0x10f0
+MX28_PAD_LCD_D16__LCD_D16			0x1100
+MX28_PAD_LCD_D17__LCD_D17			0x1110
+MX28_PAD_LCD_D18__LCD_D18			0x1120
+MX28_PAD_LCD_D19__LCD_D19			0x1130
+MX28_PAD_LCD_D20__LCD_D20			0x1140
+MX28_PAD_LCD_D21__LCD_D21			0x1150
+MX28_PAD_LCD_D22__LCD_D22			0x1160
+MX28_PAD_LCD_D23__LCD_D23			0x1170
+MX28_PAD_LCD_RD_E__LCD_RD_E			0x1180
+MX28_PAD_LCD_WR_RWN__LCD_WR_RWN			0x1190
+MX28_PAD_LCD_RS__LCD_RS				0x11a0
+MX28_PAD_LCD_CS__LCD_CS				0x11b0
+MX28_PAD_LCD_VSYNC__LCD_VSYNC			0x11c0
+MX28_PAD_LCD_HSYNC__LCD_HSYNC			0x11d0
+MX28_PAD_LCD_DOTCLK__LCD_DOTCLK			0x11e0
+MX28_PAD_LCD_ENABLE__LCD_ENABLE			0x11f0
+MX28_PAD_SSP0_DATA0__SSP0_D0			0x2000
+MX28_PAD_SSP0_DATA1__SSP0_D1			0x2010
+MX28_PAD_SSP0_DATA2__SSP0_D2			0x2020
+MX28_PAD_SSP0_DATA3__SSP0_D3			0x2030
+MX28_PAD_SSP0_DATA4__SSP0_D4			0x2040
+MX28_PAD_SSP0_DATA5__SSP0_D5			0x2050
+MX28_PAD_SSP0_DATA6__SSP0_D6			0x2060
+MX28_PAD_SSP0_DATA7__SSP0_D7			0x2070
+MX28_PAD_SSP0_CMD__SSP0_CMD			0x2080
+MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT		0x2090
+MX28_PAD_SSP0_SCK__SSP0_SCK			0x20a0
+MX28_PAD_SSP1_SCK__SSP1_SCK			0x20c0
+MX28_PAD_SSP1_CMD__SSP1_CMD			0x20d0
+MX28_PAD_SSP1_DATA0__SSP1_D0			0x20e0
+MX28_PAD_SSP1_DATA3__SSP1_D3			0x20f0
+MX28_PAD_SSP2_SCK__SSP2_SCK			0x2100
+MX28_PAD_SSP2_MOSI__SSP2_CMD			0x2110
+MX28_PAD_SSP2_MISO__SSP2_D0			0x2120
+MX28_PAD_SSP2_SS0__SSP2_D3			0x2130
+MX28_PAD_SSP2_SS1__SSP2_D4			0x2140
+MX28_PAD_SSP2_SS2__SSP2_D5			0x2150
+MX28_PAD_SSP3_SCK__SSP3_SCK			0x2180
+MX28_PAD_SSP3_MOSI__SSP3_CMD			0x2190
+MX28_PAD_SSP3_MISO__SSP3_D0			0x21a0
+MX28_PAD_SSP3_SS0__SSP3_D3			0x21b0
+MX28_PAD_AUART0_RX__AUART0_RX			0x3000
+MX28_PAD_AUART0_TX__AUART0_TX			0x3010
+MX28_PAD_AUART0_CTS__AUART0_CTS			0x3020
+MX28_PAD_AUART0_RTS__AUART0_RTS			0x3030
+MX28_PAD_AUART1_RX__AUART1_RX			0x3040
+MX28_PAD_AUART1_TX__AUART1_TX			0x3050
+MX28_PAD_AUART1_CTS__AUART1_CTS			0x3060
+MX28_PAD_AUART1_RTS__AUART1_RTS			0x3070
+MX28_PAD_AUART2_RX__AUART2_RX			0x3080
+MX28_PAD_AUART2_TX__AUART2_TX			0x3090
+MX28_PAD_AUART2_CTS__AUART2_CTS			0x30a0
+MX28_PAD_AUART2_RTS__AUART2_RTS			0x30b0
+MX28_PAD_AUART3_RX__AUART3_RX			0x30c0
+MX28_PAD_AUART3_TX__AUART3_TX			0x30d0
+MX28_PAD_AUART3_CTS__AUART3_CTS			0x30e0
+MX28_PAD_AUART3_RTS__AUART3_RTS			0x30f0
+MX28_PAD_PWM0__PWM_0				0x3100
+MX28_PAD_PWM1__PWM_1				0x3110
+MX28_PAD_PWM2__PWM_2				0x3120
+MX28_PAD_SAIF0_MCLK__SAIF0_MCLK			0x3140
+MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK		0x3150
+MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK		0x3160
+MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0		0x3170
+MX28_PAD_I2C0_SCL__I2C0_SCL			0x3180
+MX28_PAD_I2C0_SDA__I2C0_SDA			0x3190
+MX28_PAD_SAIF1_SDATA0__SAIF1_SDATA0		0x31a0
+MX28_PAD_SPDIF__SPDIF_TX			0x31b0
+MX28_PAD_PWM3__PWM_3				0x31c0
+MX28_PAD_PWM4__PWM_4				0x31d0
+MX28_PAD_LCD_RESET__LCD_RESET			0x31e0
+MX28_PAD_ENET0_MDC__ENET0_MDC			0x4000
+MX28_PAD_ENET0_MDIO__ENET0_MDIO			0x4010
+MX28_PAD_ENET0_RX_EN__ENET0_RX_EN		0x4020
+MX28_PAD_ENET0_RXD0__ENET0_RXD0			0x4030
+MX28_PAD_ENET0_RXD1__ENET0_RXD1			0x4040
+MX28_PAD_ENET0_TX_CLK__ENET0_TX_CLK		0x4050
+MX28_PAD_ENET0_TX_EN__ENET0_TX_EN		0x4060
+MX28_PAD_ENET0_TXD0__ENET0_TXD0			0x4070
+MX28_PAD_ENET0_TXD1__ENET0_TXD1			0x4080
+MX28_PAD_ENET0_RXD2__ENET0_RXD2			0x4090
+MX28_PAD_ENET0_RXD3__ENET0_RXD3			0x40a0
+MX28_PAD_ENET0_TXD2__ENET0_TXD2			0x40b0
+MX28_PAD_ENET0_TXD3__ENET0_TXD3			0x40c0
+MX28_PAD_ENET0_RX_CLK__ENET0_RX_CLK		0x40d0
+MX28_PAD_ENET0_COL__ENET0_COL			0x40e0
+MX28_PAD_ENET0_CRS__ENET0_CRS			0x40f0
+MX28_PAD_ENET_CLK__CLKCTRL_ENET			0x4100
+MX28_PAD_JTAG_RTCK__JTAG_RTCK			0x4140
+MX28_PAD_EMI_D00__EMI_DATA0			0x5000
+MX28_PAD_EMI_D01__EMI_DATA1			0x5010
+MX28_PAD_EMI_D02__EMI_DATA2			0x5020
+MX28_PAD_EMI_D03__EMI_DATA3			0x5030
+MX28_PAD_EMI_D04__EMI_DATA4			0x5040
+MX28_PAD_EMI_D05__EMI_DATA5			0x5050
+MX28_PAD_EMI_D06__EMI_DATA6			0x5060
+MX28_PAD_EMI_D07__EMI_DATA7			0x5070
+MX28_PAD_EMI_D08__EMI_DATA8			0x5080
+MX28_PAD_EMI_D09__EMI_DATA9			0x5090
+MX28_PAD_EMI_D10__EMI_DATA10			0x50a0
+MX28_PAD_EMI_D11__EMI_DATA11			0x50b0
+MX28_PAD_EMI_D12__EMI_DATA12			0x50c0
+MX28_PAD_EMI_D13__EMI_DATA13			0x50d0
+MX28_PAD_EMI_D14__EMI_DATA14			0x50e0
+MX28_PAD_EMI_D15__EMI_DATA15			0x50f0
+MX28_PAD_EMI_ODT0__EMI_ODT0			0x5100
+MX28_PAD_EMI_DQM0__EMI_DQM0			0x5110
+MX28_PAD_EMI_ODT1__EMI_ODT1			0x5120
+MX28_PAD_EMI_DQM1__EMI_DQM1			0x5130
+MX28_PAD_EMI_DDR_OPEN_FB__EMI_DDR_OPEN_FEEDBACK	0x5140
+MX28_PAD_EMI_CLK__EMI_CLK			0x5150
+MX28_PAD_EMI_DQS0__EMI_DQS0			0x5160
+MX28_PAD_EMI_DQS1__EMI_DQS1			0x5170
+MX28_PAD_EMI_DDR_OPEN__EMI_DDR_OPEN		0x51a0
+MX28_PAD_EMI_A00__EMI_ADDR0			0x6000
+MX28_PAD_EMI_A01__EMI_ADDR1			0x6010
+MX28_PAD_EMI_A02__EMI_ADDR2			0x6020
+MX28_PAD_EMI_A03__EMI_ADDR3			0x6030
+MX28_PAD_EMI_A04__EMI_ADDR4			0x6040
+MX28_PAD_EMI_A05__EMI_ADDR5			0x6050
+MX28_PAD_EMI_A06__EMI_ADDR6			0x6060
+MX28_PAD_EMI_A07__EMI_ADDR7			0x6070
+MX28_PAD_EMI_A08__EMI_ADDR8			0x6080
+MX28_PAD_EMI_A09__EMI_ADDR9			0x6090
+MX28_PAD_EMI_A10__EMI_ADDR10			0x60a0
+MX28_PAD_EMI_A11__EMI_ADDR11			0x60b0
+MX28_PAD_EMI_A12__EMI_ADDR12			0x60c0
+MX28_PAD_EMI_A13__EMI_ADDR13			0x60d0
+MX28_PAD_EMI_A14__EMI_ADDR14			0x60e0
+MX28_PAD_EMI_BA0__EMI_BA0			0x6100
+MX28_PAD_EMI_BA1__EMI_BA1			0x6110
+MX28_PAD_EMI_BA2__EMI_BA2			0x6120
+MX28_PAD_EMI_CASN__EMI_CASN			0x6130
+MX28_PAD_EMI_RASN__EMI_RASN			0x6140
+MX28_PAD_EMI_WEN__EMI_WEN			0x6150
+MX28_PAD_EMI_CE0N__EMI_CE0N			0x6160
+MX28_PAD_EMI_CE1N__EMI_CE1N			0x6170
+MX28_PAD_EMI_CKE__EMI_CKE			0x6180
+MX28_PAD_GPMI_D00__SSP1_D0			0x0001
+MX28_PAD_GPMI_D01__SSP1_D1			0x0011
+MX28_PAD_GPMI_D02__SSP1_D2			0x0021
+MX28_PAD_GPMI_D03__SSP1_D3			0x0031
+MX28_PAD_GPMI_D04__SSP1_D4			0x0041
+MX28_PAD_GPMI_D05__SSP1_D5			0x0051
+MX28_PAD_GPMI_D06__SSP1_D6			0x0061
+MX28_PAD_GPMI_D07__SSP1_D7			0x0071
+MX28_PAD_GPMI_CE0N__SSP3_D0			0x0101
+MX28_PAD_GPMI_CE1N__SSP3_D3			0x0111
+MX28_PAD_GPMI_CE2N__CAN1_TX			0x0121
+MX28_PAD_GPMI_CE3N__CAN1_RX			0x0131
+MX28_PAD_GPMI_RDY0__SSP1_CARD_DETECT		0x0141
+MX28_PAD_GPMI_RDY1__SSP1_CMD			0x0151
+MX28_PAD_GPMI_RDY2__CAN0_TX			0x0161
+MX28_PAD_GPMI_RDY3__CAN0_RX			0x0171
+MX28_PAD_GPMI_RDN__SSP3_SCK			0x0181
+MX28_PAD_GPMI_WRN__SSP1_SCK			0x0191
+MX28_PAD_GPMI_ALE__SSP3_D1			0x01a1
+MX28_PAD_GPMI_CLE__SSP3_D2			0x01b1
+MX28_PAD_GPMI_RESETN__SSP3_CMD			0x01c1
+MX28_PAD_LCD_D03__ETM_DA8			0x1031
+MX28_PAD_LCD_D04__ETM_DA9			0x1041
+MX28_PAD_LCD_D08__ETM_DA3			0x1081
+MX28_PAD_LCD_D09__ETM_DA4			0x1091
+MX28_PAD_LCD_D20__ENET1_1588_EVENT2_OUT		0x1141
+MX28_PAD_LCD_D21__ENET1_1588_EVENT2_IN		0x1151
+MX28_PAD_LCD_D22__ENET1_1588_EVENT3_OUT		0x1161
+MX28_PAD_LCD_D23__ENET1_1588_EVENT3_IN		0x1171
+MX28_PAD_LCD_RD_E__LCD_VSYNC			0x1181
+MX28_PAD_LCD_WR_RWN__LCD_HSYNC			0x1191
+MX28_PAD_LCD_RS__LCD_DOTCLK			0x11a1
+MX28_PAD_LCD_CS__LCD_ENABLE			0x11b1
+MX28_PAD_LCD_VSYNC__SAIF1_SDATA0		0x11c1
+MX28_PAD_LCD_HSYNC__SAIF1_SDATA1		0x11d1
+MX28_PAD_LCD_DOTCLK__SAIF1_MCLK			0x11e1
+MX28_PAD_SSP0_DATA4__SSP2_D0			0x2041
+MX28_PAD_SSP0_DATA5__SSP2_D3			0x2051
+MX28_PAD_SSP0_DATA6__SSP2_CMD			0x2061
+MX28_PAD_SSP0_DATA7__SSP2_SCK			0x2071
+MX28_PAD_SSP1_SCK__SSP2_D1			0x20c1
+MX28_PAD_SSP1_CMD__SSP2_D2			0x20d1
+MX28_PAD_SSP1_DATA0__SSP2_D6			0x20e1
+MX28_PAD_SSP1_DATA3__SSP2_D7			0x20f1
+MX28_PAD_SSP2_SCK__AUART2_RX			0x2101
+MX28_PAD_SSP2_MOSI__AUART2_TX			0x2111
+MX28_PAD_SSP2_MISO__AUART3_RX			0x2121
+MX28_PAD_SSP2_SS0__AUART3_TX			0x2131
+MX28_PAD_SSP2_SS1__SSP2_D1			0x2141
+MX28_PAD_SSP2_SS2__SSP2_D2			0x2151
+MX28_PAD_SSP3_SCK__AUART4_TX			0x2181
+MX28_PAD_SSP3_MOSI__AUART4_RX			0x2191
+MX28_PAD_SSP3_MISO__AUART4_RTS			0x21a1
+MX28_PAD_SSP3_SS0__AUART4_CTS			0x21b1
+MX28_PAD_AUART0_RX__I2C0_SCL			0x3001
+MX28_PAD_AUART0_TX__I2C0_SDA			0x3011
+MX28_PAD_AUART0_CTS__AUART4_RX			0x3021
+MX28_PAD_AUART0_RTS__AUART4_TX			0x3031
+MX28_PAD_AUART1_RX__SSP2_CARD_DETECT		0x3041
+MX28_PAD_AUART1_TX__SSP3_CARD_DETECT		0x3051
+MX28_PAD_AUART1_CTS__USB0_OVERCURRENT		0x3061
+MX28_PAD_AUART1_RTS__USB0_ID			0x3071
+MX28_PAD_AUART2_RX__SSP3_D1			0x3081
+MX28_PAD_AUART2_TX__SSP3_D2			0x3091
+MX28_PAD_AUART2_CTS__I2C1_SCL			0x30a1
+MX28_PAD_AUART2_RTS__I2C1_SDA			0x30b1
+MX28_PAD_AUART3_RX__CAN0_TX			0x30c1
+MX28_PAD_AUART3_TX__CAN0_RX			0x30d1
+MX28_PAD_AUART3_CTS__CAN1_TX			0x30e1
+MX28_PAD_AUART3_RTS__CAN1_RX			0x30f1
+MX28_PAD_PWM0__I2C1_SCL				0x3101
+MX28_PAD_PWM1__I2C1_SDA				0x3111
+MX28_PAD_PWM2__USB0_ID				0x3121
+MX28_PAD_SAIF0_MCLK__PWM_3			0x3141
+MX28_PAD_SAIF0_LRCLK__PWM_4			0x3151
+MX28_PAD_SAIF0_BITCLK__PWM_5			0x3161
+MX28_PAD_SAIF0_SDATA0__PWM_6			0x3171
+MX28_PAD_I2C0_SCL__TIMROT_ROTARYA		0x3181
+MX28_PAD_I2C0_SDA__TIMROT_ROTARYB		0x3191
+MX28_PAD_SAIF1_SDATA0__PWM_7			0x31a1
+MX28_PAD_LCD_RESET__LCD_VSYNC			0x31e1
+MX28_PAD_ENET0_MDC__GPMI_CE4N			0x4001
+MX28_PAD_ENET0_MDIO__GPMI_CE5N			0x4011
+MX28_PAD_ENET0_RX_EN__GPMI_CE6N			0x4021
+MX28_PAD_ENET0_RXD0__GPMI_CE7N			0x4031
+MX28_PAD_ENET0_RXD1__GPMI_READY4		0x4041
+MX28_PAD_ENET0_TX_CLK__HSADC_TRIGGER		0x4051
+MX28_PAD_ENET0_TX_EN__GPMI_READY5		0x4061
+MX28_PAD_ENET0_TXD0__GPMI_READY6		0x4071
+MX28_PAD_ENET0_TXD1__GPMI_READY7		0x4081
+MX28_PAD_ENET0_RXD2__ENET1_RXD0			0x4091
+MX28_PAD_ENET0_RXD3__ENET1_RXD1			0x40a1
+MX28_PAD_ENET0_TXD2__ENET1_TXD0			0x40b1
+MX28_PAD_ENET0_TXD3__ENET1_TXD1			0x40c1
+MX28_PAD_ENET0_RX_CLK__ENET0_RX_ER		0x40d1
+MX28_PAD_ENET0_COL__ENET1_TX_EN			0x40e1
+MX28_PAD_ENET0_CRS__ENET1_RX_EN			0x40f1
+MX28_PAD_GPMI_CE2N__ENET0_RX_ER			0x0122
+MX28_PAD_GPMI_CE3N__SAIF1_MCLK			0x0132
+MX28_PAD_GPMI_RDY0__USB0_ID			0x0142
+MX28_PAD_GPMI_RDY2__ENET0_TX_ER			0x0162
+MX28_PAD_GPMI_RDY3__HSADC_TRIGGER		0x0172
+MX28_PAD_GPMI_ALE__SSP3_D4			0x01a2
+MX28_PAD_GPMI_CLE__SSP3_D5			0x01b2
+MX28_PAD_LCD_D00__ETM_DA0			0x1002
+MX28_PAD_LCD_D01__ETM_DA1			0x1012
+MX28_PAD_LCD_D02__ETM_DA2			0x1022
+MX28_PAD_LCD_D03__ETM_DA3			0x1032
+MX28_PAD_LCD_D04__ETM_DA4			0x1042
+MX28_PAD_LCD_D05__ETM_DA5			0x1052
+MX28_PAD_LCD_D06__ETM_DA6			0x1062
+MX28_PAD_LCD_D07__ETM_DA7			0x1072
+MX28_PAD_LCD_D08__ETM_DA8			0x1082
+MX28_PAD_LCD_D09__ETM_DA9			0x1092
+MX28_PAD_LCD_D10__ETM_DA10			0x10a2
+MX28_PAD_LCD_D11__ETM_DA11			0x10b2
+MX28_PAD_LCD_D12__ETM_DA12			0x10c2
+MX28_PAD_LCD_D13__ETM_DA13			0x10d2
+MX28_PAD_LCD_D14__ETM_DA14			0x10e2
+MX28_PAD_LCD_D15__ETM_DA15			0x10f2
+MX28_PAD_LCD_D16__ETM_DA7			0x1102
+MX28_PAD_LCD_D17__ETM_DA6			0x1112
+MX28_PAD_LCD_D18__ETM_DA5			0x1122
+MX28_PAD_LCD_D19__ETM_DA4			0x1132
+MX28_PAD_LCD_D20__ETM_DA3			0x1142
+MX28_PAD_LCD_D21__ETM_DA2			0x1152
+MX28_PAD_LCD_D22__ETM_DA1			0x1162
+MX28_PAD_LCD_D23__ETM_DA0			0x1172
+MX28_PAD_LCD_RD_E__ETM_TCTL			0x1182
+MX28_PAD_LCD_WR_RWN__ETM_TCLK			0x1192
+MX28_PAD_LCD_HSYNC__ETM_TCTL			0x11d2
+MX28_PAD_LCD_DOTCLK__ETM_TCLK			0x11e2
+MX28_PAD_SSP1_SCK__ENET0_1588_EVENT2_OUT	0x20c2
+MX28_PAD_SSP1_CMD__ENET0_1588_EVENT2_IN		0x20d2
+MX28_PAD_SSP1_DATA0__ENET0_1588_EVENT3_OUT	0x20e2
+MX28_PAD_SSP1_DATA3__ENET0_1588_EVENT3_IN	0x20f2
+MX28_PAD_SSP2_SCK__SAIF0_SDATA1			0x2102
+MX28_PAD_SSP2_MOSI__SAIF0_SDATA2		0x2112
+MX28_PAD_SSP2_MISO__SAIF1_SDATA1		0x2122
+MX28_PAD_SSP2_SS0__SAIF1_SDATA2			0x2132
+MX28_PAD_SSP2_SS1__USB1_OVERCURRENT		0x2142
+MX28_PAD_SSP2_SS2__USB0_OVERCURRENT		0x2152
+MX28_PAD_SSP3_SCK__ENET1_1588_EVENT0_OUT	0x2182
+MX28_PAD_SSP3_MOSI__ENET1_1588_EVENT0_IN	0x2192
+MX28_PAD_SSP3_MISO__ENET1_1588_EVENT1_OUT	0x21a2
+MX28_PAD_SSP3_SS0__ENET1_1588_EVENT1_IN		0x21b2
+MX28_PAD_AUART0_RX__DUART_CTS			0x3002
+MX28_PAD_AUART0_TX__DUART_RTS			0x3012
+MX28_PAD_AUART0_CTS__DUART_RX			0x3022
+MX28_PAD_AUART0_RTS__DUART_TX			0x3032
+MX28_PAD_AUART1_RX__PWM_0			0x3042
+MX28_PAD_AUART1_TX__PWM_1			0x3052
+MX28_PAD_AUART1_CTS__TIMROT_ROTARYA		0x3062
+MX28_PAD_AUART1_RTS__TIMROT_ROTARYB		0x3072
+MX28_PAD_AUART2_RX__SSP3_D4			0x3082
+MX28_PAD_AUART2_TX__SSP3_D5			0x3092
+MX28_PAD_AUART2_CTS__SAIF1_BITCLK		0x30a2
+MX28_PAD_AUART2_RTS__SAIF1_LRCLK		0x30b2
+MX28_PAD_AUART3_RX__ENET0_1588_EVENT0_OUT	0x30c2
+MX28_PAD_AUART3_TX__ENET0_1588_EVENT0_IN	0x30d2
+MX28_PAD_AUART3_CTS__ENET0_1588_EVENT1_OUT	0x30e2
+MX28_PAD_AUART3_RTS__ENET0_1588_EVENT1_IN	0x30f2
+MX28_PAD_PWM0__DUART_RX				0x3102
+MX28_PAD_PWM1__DUART_TX				0x3112
+MX28_PAD_PWM2__USB1_OVERCURRENT			0x3122
+MX28_PAD_SAIF0_MCLK__AUART4_CTS			0x3142
+MX28_PAD_SAIF0_LRCLK__AUART4_RTS		0x3152
+MX28_PAD_SAIF0_BITCLK__AUART4_RX		0x3162
+MX28_PAD_SAIF0_SDATA0__AUART4_TX		0x3172
+MX28_PAD_I2C0_SCL__DUART_RX			0x3182
+MX28_PAD_I2C0_SDA__DUART_TX			0x3192
+MX28_PAD_SAIF1_SDATA0__SAIF0_SDATA1		0x31a2
+MX28_PAD_SPDIF__ENET1_RX_ER			0x31b2
+MX28_PAD_ENET0_MDC__SAIF0_SDATA1		0x4002
+MX28_PAD_ENET0_MDIO__SAIF0_SDATA2		0x4012
+MX28_PAD_ENET0_RX_EN__SAIF1_SDATA1		0x4022
+MX28_PAD_ENET0_RXD0__SAIF1_SDATA2		0x4032
+MX28_PAD_ENET0_TX_CLK__ENET0_1588_EVENT2_OUT	0x4052
+MX28_PAD_ENET0_RXD2__ENET0_1588_EVENT0_OUT	0x4092
+MX28_PAD_ENET0_RXD3__ENET0_1588_EVENT0_IN	0x40a2
+MX28_PAD_ENET0_TXD2__ENET0_1588_EVENT1_OUT	0x40b2
+MX28_PAD_ENET0_TXD3__ENET0_1588_EVENT1_IN	0x40c2
+MX28_PAD_ENET0_RX_CLK__ENET0_1588_EVENT2_IN	0x40d2
+MX28_PAD_ENET0_COL__ENET0_1588_EVENT3_OUT	0x40e2
+MX28_PAD_ENET0_CRS__ENET0_1588_EVENT3_IN	0x40f2
+MX28_PAD_GPMI_D00__GPIO_0_0			0x0003
+MX28_PAD_GPMI_D01__GPIO_0_1			0x0013
+MX28_PAD_GPMI_D02__GPIO_0_2			0x0023
+MX28_PAD_GPMI_D03__GPIO_0_3			0x0033
+MX28_PAD_GPMI_D04__GPIO_0_4			0x0043
+MX28_PAD_GPMI_D05__GPIO_0_5			0x0053
+MX28_PAD_GPMI_D06__GPIO_0_6			0x0063
+MX28_PAD_GPMI_D07__GPIO_0_7			0x0073
+MX28_PAD_GPMI_CE0N__GPIO_0_16			0x0103
+MX28_PAD_GPMI_CE1N__GPIO_0_17			0x0113
+MX28_PAD_GPMI_CE2N__GPIO_0_18			0x0123
+MX28_PAD_GPMI_CE3N__GPIO_0_19			0x0133
+MX28_PAD_GPMI_RDY0__GPIO_0_20			0x0143
+MX28_PAD_GPMI_RDY1__GPIO_0_21			0x0153
+MX28_PAD_GPMI_RDY2__GPIO_0_22			0x0163
+MX28_PAD_GPMI_RDY3__GPIO_0_23			0x0173
+MX28_PAD_GPMI_RDN__GPIO_0_24			0x0183
+MX28_PAD_GPMI_WRN__GPIO_0_25			0x0193
+MX28_PAD_GPMI_ALE__GPIO_0_26			0x01a3
+MX28_PAD_GPMI_CLE__GPIO_0_27			0x01b3
+MX28_PAD_GPMI_RESETN__GPIO_0_28			0x01c3
+MX28_PAD_LCD_D00__GPIO_1_0			0x1003
+MX28_PAD_LCD_D01__GPIO_1_1			0x1013
+MX28_PAD_LCD_D02__GPIO_1_2			0x1023
+MX28_PAD_LCD_D03__GPIO_1_3			0x1033
+MX28_PAD_LCD_D04__GPIO_1_4			0x1043
+MX28_PAD_LCD_D05__GPIO_1_5			0x1053
+MX28_PAD_LCD_D06__GPIO_1_6			0x1063
+MX28_PAD_LCD_D07__GPIO_1_7			0x1073
+MX28_PAD_LCD_D08__GPIO_1_8			0x1083
+MX28_PAD_LCD_D09__GPIO_1_9			0x1093
+MX28_PAD_LCD_D10__GPIO_1_10			0x10a3
+MX28_PAD_LCD_D11__GPIO_1_11			0x10b3
+MX28_PAD_LCD_D12__GPIO_1_12			0x10c3
+MX28_PAD_LCD_D13__GPIO_1_13			0x10d3
+MX28_PAD_LCD_D14__GPIO_1_14			0x10e3
+MX28_PAD_LCD_D15__GPIO_1_15			0x10f3
+MX28_PAD_LCD_D16__GPIO_1_16			0x1103
+MX28_PAD_LCD_D17__GPIO_1_17			0x1113
+MX28_PAD_LCD_D18__GPIO_1_18			0x1123
+MX28_PAD_LCD_D19__GPIO_1_19			0x1133
+MX28_PAD_LCD_D20__GPIO_1_20			0x1143
+MX28_PAD_LCD_D21__GPIO_1_21			0x1153
+MX28_PAD_LCD_D22__GPIO_1_22			0x1163
+MX28_PAD_LCD_D23__GPIO_1_23			0x1173
+MX28_PAD_LCD_RD_E__GPIO_1_24			0x1183
+MX28_PAD_LCD_WR_RWN__GPIO_1_25			0x1193
+MX28_PAD_LCD_RS__GPIO_1_26			0x11a3
+MX28_PAD_LCD_CS__GPIO_1_27			0x11b3
+MX28_PAD_LCD_VSYNC__GPIO_1_28			0x11c3
+MX28_PAD_LCD_HSYNC__GPIO_1_29			0x11d3
+MX28_PAD_LCD_DOTCLK__GPIO_1_30			0x11e3
+MX28_PAD_LCD_ENABLE__GPIO_1_31			0x11f3
+MX28_PAD_SSP0_DATA0__GPIO_2_0			0x2003
+MX28_PAD_SSP0_DATA1__GPIO_2_1			0x2013
+MX28_PAD_SSP0_DATA2__GPIO_2_2			0x2023
+MX28_PAD_SSP0_DATA3__GPIO_2_3			0x2033
+MX28_PAD_SSP0_DATA4__GPIO_2_4			0x2043
+MX28_PAD_SSP0_DATA5__GPIO_2_5			0x2053
+MX28_PAD_SSP0_DATA6__GPIO_2_6			0x2063
+MX28_PAD_SSP0_DATA7__GPIO_2_7			0x2073
+MX28_PAD_SSP0_CMD__GPIO_2_8			0x2083
+MX28_PAD_SSP0_DETECT__GPIO_2_9			0x2093
+MX28_PAD_SSP0_SCK__GPIO_2_10			0x20a3
+MX28_PAD_SSP1_SCK__GPIO_2_12			0x20c3
+MX28_PAD_SSP1_CMD__GPIO_2_13			0x20d3
+MX28_PAD_SSP1_DATA0__GPIO_2_14			0x20e3
+MX28_PAD_SSP1_DATA3__GPIO_2_15			0x20f3
+MX28_PAD_SSP2_SCK__GPIO_2_16			0x2103
+MX28_PAD_SSP2_MOSI__GPIO_2_17			0x2113
+MX28_PAD_SSP2_MISO__GPIO_2_18			0x2123
+MX28_PAD_SSP2_SS0__GPIO_2_19			0x2133
+MX28_PAD_SSP2_SS1__GPIO_2_20			0x2143
+MX28_PAD_SSP2_SS2__GPIO_2_21			0x2153
+MX28_PAD_SSP3_SCK__GPIO_2_24			0x2183
+MX28_PAD_SSP3_MOSI__GPIO_2_25			0x2193
+MX28_PAD_SSP3_MISO__GPIO_2_26			0x21a3
+MX28_PAD_SSP3_SS0__GPIO_2_27			0x21b3
+MX28_PAD_AUART0_RX__GPIO_3_0			0x3003
+MX28_PAD_AUART0_TX__GPIO_3_1			0x3013
+MX28_PAD_AUART0_CTS__GPIO_3_2			0x3023
+MX28_PAD_AUART0_RTS__GPIO_3_3			0x3033
+MX28_PAD_AUART1_RX__GPIO_3_4			0x3043
+MX28_PAD_AUART1_TX__GPIO_3_5			0x3053
+MX28_PAD_AUART1_CTS__GPIO_3_6			0x3063
+MX28_PAD_AUART1_RTS__GPIO_3_7			0x3073
+MX28_PAD_AUART2_RX__GPIO_3_8			0x3083
+MX28_PAD_AUART2_TX__GPIO_3_9			0x3093
+MX28_PAD_AUART2_CTS__GPIO_3_10			0x30a3
+MX28_PAD_AUART2_RTS__GPIO_3_11			0x30b3
+MX28_PAD_AUART3_RX__GPIO_3_12			0x30c3
+MX28_PAD_AUART3_TX__GPIO_3_13			0x30d3
+MX28_PAD_AUART3_CTS__GPIO_3_14			0x30e3
+MX28_PAD_AUART3_RTS__GPIO_3_15			0x30f3
+MX28_PAD_PWM0__GPIO_3_16			0x3103
+MX28_PAD_PWM1__GPIO_3_17			0x3113
+MX28_PAD_PWM2__GPIO_3_18			0x3123
+MX28_PAD_SAIF0_MCLK__GPIO_3_20			0x3143
+MX28_PAD_SAIF0_LRCLK__GPIO_3_21			0x3153
+MX28_PAD_SAIF0_BITCLK__GPIO_3_22		0x3163
+MX28_PAD_SAIF0_SDATA0__GPIO_3_23		0x3173
+MX28_PAD_I2C0_SCL__GPIO_3_24			0x3183
+MX28_PAD_I2C0_SDA__GPIO_3_25			0x3193
+MX28_PAD_SAIF1_SDATA0__GPIO_3_26		0x31a3
+MX28_PAD_SPDIF__GPIO_3_27			0x31b3
+MX28_PAD_PWM3__GPIO_3_28			0x31c3
+MX28_PAD_PWM4__GPIO_3_29			0x31d3
+MX28_PAD_LCD_RESET__GPIO_3_30			0x31e3
+MX28_PAD_ENET0_MDC__GPIO_4_0			0x4003
+MX28_PAD_ENET0_MDIO__GPIO_4_1			0x4013
+MX28_PAD_ENET0_RX_EN__GPIO_4_2			0x4023
+MX28_PAD_ENET0_RXD0__GPIO_4_3			0x4033
+MX28_PAD_ENET0_RXD1__GPIO_4_4			0x4043
+MX28_PAD_ENET0_TX_CLK__GPIO_4_5			0x4053
+MX28_PAD_ENET0_TX_EN__GPIO_4_6			0x4063
+MX28_PAD_ENET0_TXD0__GPIO_4_7			0x4073
+MX28_PAD_ENET0_TXD1__GPIO_4_8			0x4083
+MX28_PAD_ENET0_RXD2__GPIO_4_9			0x4093
+MX28_PAD_ENET0_RXD3__GPIO_4_10			0x40a3
+MX28_PAD_ENET0_TXD2__GPIO_4_11			0x40b3
+MX28_PAD_ENET0_TXD3__GPIO_4_12			0x40c3
+MX28_PAD_ENET0_RX_CLK__GPIO_4_13		0x40d3
+MX28_PAD_ENET0_COL__GPIO_4_14			0x40e3
+MX28_PAD_ENET0_CRS__GPIO_4_15			0x40f3
+MX28_PAD_ENET_CLK__GPIO_4_16			0x4103
+MX28_PAD_JTAG_RTCK__GPIO_4_20			0x4143
+
+Valid values for i.MX23 pinmux-id:
+
+pinmux						id
+------						--
+MX23_PAD_GPMI_D00__GPMI_D00			0x0000
+MX23_PAD_GPMI_D01__GPMI_D01			0x0010
+MX23_PAD_GPMI_D02__GPMI_D02			0x0020
+MX23_PAD_GPMI_D03__GPMI_D03			0x0030
+MX23_PAD_GPMI_D04__GPMI_D04			0x0040
+MX23_PAD_GPMI_D05__GPMI_D05			0x0050
+MX23_PAD_GPMI_D06__GPMI_D06			0x0060
+MX23_PAD_GPMI_D07__GPMI_D07			0x0070
+MX23_PAD_GPMI_D08__GPMI_D08			0x0080
+MX23_PAD_GPMI_D09__GPMI_D09			0x0090
+MX23_PAD_GPMI_D10__GPMI_D10			0x00a0
+MX23_PAD_GPMI_D11__GPMI_D11			0x00b0
+MX23_PAD_GPMI_D12__GPMI_D12			0x00c0
+MX23_PAD_GPMI_D13__GPMI_D13			0x00d0
+MX23_PAD_GPMI_D14__GPMI_D14			0x00e0
+MX23_PAD_GPMI_D15__GPMI_D15			0x00f0
+MX23_PAD_GPMI_CLE__GPMI_CLE			0x0100
+MX23_PAD_GPMI_ALE__GPMI_ALE			0x0110
+MX23_PAD_GPMI_CE2N__GPMI_CE2N			0x0120
+MX23_PAD_GPMI_RDY0__GPMI_RDY0			0x0130
+MX23_PAD_GPMI_RDY1__GPMI_RDY1			0x0140
+MX23_PAD_GPMI_RDY2__GPMI_RDY2			0x0150
+MX23_PAD_GPMI_RDY3__GPMI_RDY3			0x0160
+MX23_PAD_GPMI_WPN__GPMI_WPN			0x0170
+MX23_PAD_GPMI_WRN__GPMI_WRN			0x0180
+MX23_PAD_GPMI_RDN__GPMI_RDN			0x0190
+MX23_PAD_AUART1_CTS__AUART1_CTS			0x01a0
+MX23_PAD_AUART1_RTS__AUART1_RTS			0x01b0
+MX23_PAD_AUART1_RX__AUART1_RX			0x01c0
+MX23_PAD_AUART1_TX__AUART1_TX			0x01d0
+MX23_PAD_I2C_SCL__I2C_SCL			0x01e0
+MX23_PAD_I2C_SDA__I2C_SDA			0x01f0
+MX23_PAD_LCD_D00__LCD_D00			0x1000
+MX23_PAD_LCD_D01__LCD_D01			0x1010
+MX23_PAD_LCD_D02__LCD_D02			0x1020
+MX23_PAD_LCD_D03__LCD_D03			0x1030
+MX23_PAD_LCD_D04__LCD_D04			0x1040
+MX23_PAD_LCD_D05__LCD_D05			0x1050
+MX23_PAD_LCD_D06__LCD_D06			0x1060
+MX23_PAD_LCD_D07__LCD_D07			0x1070
+MX23_PAD_LCD_D08__LCD_D08			0x1080
+MX23_PAD_LCD_D09__LCD_D09			0x1090
+MX23_PAD_LCD_D10__LCD_D10			0x10a0
+MX23_PAD_LCD_D11__LCD_D11			0x10b0
+MX23_PAD_LCD_D12__LCD_D12			0x10c0
+MX23_PAD_LCD_D13__LCD_D13			0x10d0
+MX23_PAD_LCD_D14__LCD_D14			0x10e0
+MX23_PAD_LCD_D15__LCD_D15			0x10f0
+MX23_PAD_LCD_D16__LCD_D16			0x1100
+MX23_PAD_LCD_D17__LCD_D17			0x1110
+MX23_PAD_LCD_RESET__LCD_RESET			0x1120
+MX23_PAD_LCD_RS__LCD_RS				0x1130
+MX23_PAD_LCD_WR__LCD_WR				0x1140
+MX23_PAD_LCD_CS__LCD_CS				0x1150
+MX23_PAD_LCD_DOTCK__LCD_DOTCK			0x1160
+MX23_PAD_LCD_ENABLE__LCD_ENABLE			0x1170
+MX23_PAD_LCD_HSYNC__LCD_HSYNC			0x1180
+MX23_PAD_LCD_VSYNC__LCD_VSYNC			0x1190
+MX23_PAD_PWM0__PWM0				0x11a0
+MX23_PAD_PWM1__PWM1				0x11b0
+MX23_PAD_PWM2__PWM2				0x11c0
+MX23_PAD_PWM3__PWM3				0x11d0
+MX23_PAD_PWM4__PWM4				0x11e0
+MX23_PAD_SSP1_CMD__SSP1_CMD			0x2000
+MX23_PAD_SSP1_DETECT__SSP1_DETECT		0x2010
+MX23_PAD_SSP1_DATA0__SSP1_DATA0			0x2020
+MX23_PAD_SSP1_DATA1__SSP1_DATA1			0x2030
+MX23_PAD_SSP1_DATA2__SSP1_DATA2			0x2040
+MX23_PAD_SSP1_DATA3__SSP1_DATA3			0x2050
+MX23_PAD_SSP1_SCK__SSP1_SCK			0x2060
+MX23_PAD_ROTARYA__ROTARYA			0x2070
+MX23_PAD_ROTARYB__ROTARYB			0x2080
+MX23_PAD_EMI_A00__EMI_A00			0x2090
+MX23_PAD_EMI_A01__EMI_A01			0x20a0
+MX23_PAD_EMI_A02__EMI_A02			0x20b0
+MX23_PAD_EMI_A03__EMI_A03			0x20c0
+MX23_PAD_EMI_A04__EMI_A04			0x20d0
+MX23_PAD_EMI_A05__EMI_A05			0x20e0
+MX23_PAD_EMI_A06__EMI_A06			0x20f0
+MX23_PAD_EMI_A07__EMI_A07			0x2100
+MX23_PAD_EMI_A08__EMI_A08			0x2110
+MX23_PAD_EMI_A09__EMI_A09			0x2120
+MX23_PAD_EMI_A10__EMI_A10			0x2130
+MX23_PAD_EMI_A11__EMI_A11			0x2140
+MX23_PAD_EMI_A12__EMI_A12			0x2150
+MX23_PAD_EMI_BA0__EMI_BA0			0x2160
+MX23_PAD_EMI_BA1__EMI_BA1			0x2170
+MX23_PAD_EMI_CASN__EMI_CASN			0x2180
+MX23_PAD_EMI_CE0N__EMI_CE0N			0x2190
+MX23_PAD_EMI_CE1N__EMI_CE1N			0x21a0
+MX23_PAD_GPMI_CE1N__GPMI_CE1N			0x21b0
+MX23_PAD_GPMI_CE0N__GPMI_CE0N			0x21c0
+MX23_PAD_EMI_CKE__EMI_CKE			0x21d0
+MX23_PAD_EMI_RASN__EMI_RASN			0x21e0
+MX23_PAD_EMI_WEN__EMI_WEN			0x21f0
+MX23_PAD_EMI_D00__EMI_D00			0x3000
+MX23_PAD_EMI_D01__EMI_D01			0x3010
+MX23_PAD_EMI_D02__EMI_D02			0x3020
+MX23_PAD_EMI_D03__EMI_D03			0x3030
+MX23_PAD_EMI_D04__EMI_D04			0x3040
+MX23_PAD_EMI_D05__EMI_D05			0x3050
+MX23_PAD_EMI_D06__EMI_D06			0x3060
+MX23_PAD_EMI_D07__EMI_D07			0x3070
+MX23_PAD_EMI_D08__EMI_D08			0x3080
+MX23_PAD_EMI_D09__EMI_D09			0x3090
+MX23_PAD_EMI_D10__EMI_D10			0x30a0
+MX23_PAD_EMI_D11__EMI_D11			0x30b0
+MX23_PAD_EMI_D12__EMI_D12			0x30c0
+MX23_PAD_EMI_D13__EMI_D13			0x30d0
+MX23_PAD_EMI_D14__EMI_D14			0x30e0
+MX23_PAD_EMI_D15__EMI_D15			0x30f0
+MX23_PAD_EMI_DQM0__EMI_DQM0			0x3100
+MX23_PAD_EMI_DQM1__EMI_DQM1			0x3110
+MX23_PAD_EMI_DQS0__EMI_DQS0			0x3120
+MX23_PAD_EMI_DQS1__EMI_DQS1			0x3130
+MX23_PAD_EMI_CLK__EMI_CLK			0x3140
+MX23_PAD_EMI_CLKN__EMI_CLKN			0x3150
+MX23_PAD_GPMI_D00__LCD_D8			0x0001
+MX23_PAD_GPMI_D01__LCD_D9			0x0011
+MX23_PAD_GPMI_D02__LCD_D10			0x0021
+MX23_PAD_GPMI_D03__LCD_D11			0x0031
+MX23_PAD_GPMI_D04__LCD_D12			0x0041
+MX23_PAD_GPMI_D05__LCD_D13			0x0051
+MX23_PAD_GPMI_D06__LCD_D14			0x0061
+MX23_PAD_GPMI_D07__LCD_D15			0x0071
+MX23_PAD_GPMI_D08__LCD_D18			0x0081
+MX23_PAD_GPMI_D09__LCD_D19			0x0091
+MX23_PAD_GPMI_D10__LCD_D20			0x00a1
+MX23_PAD_GPMI_D11__LCD_D21			0x00b1
+MX23_PAD_GPMI_D12__LCD_D22			0x00c1
+MX23_PAD_GPMI_D13__LCD_D23			0x00d1
+MX23_PAD_GPMI_D14__AUART2_RX			0x00e1
+MX23_PAD_GPMI_D15__AUART2_TX			0x00f1
+MX23_PAD_GPMI_CLE__LCD_D16			0x0101
+MX23_PAD_GPMI_ALE__LCD_D17			0x0111
+MX23_PAD_GPMI_CE2N__ATA_A2			0x0121
+MX23_PAD_AUART1_RTS__IR_CLK			0x01b1
+MX23_PAD_AUART1_RX__IR_RX			0x01c1
+MX23_PAD_AUART1_TX__IR_TX			0x01d1
+MX23_PAD_I2C_SCL__GPMI_RDY2			0x01e1
+MX23_PAD_I2C_SDA__GPMI_CE2N			0x01f1
+MX23_PAD_LCD_D00__ETM_DA8			0x1001
+MX23_PAD_LCD_D01__ETM_DA9			0x1011
+MX23_PAD_LCD_D02__ETM_DA10			0x1021
+MX23_PAD_LCD_D03__ETM_DA11			0x1031
+MX23_PAD_LCD_D04__ETM_DA12			0x1041
+MX23_PAD_LCD_D05__ETM_DA13			0x1051
+MX23_PAD_LCD_D06__ETM_DA14			0x1061
+MX23_PAD_LCD_D07__ETM_DA15			0x1071
+MX23_PAD_LCD_D08__ETM_DA0			0x1081
+MX23_PAD_LCD_D09__ETM_DA1			0x1091
+MX23_PAD_LCD_D10__ETM_DA2			0x10a1
+MX23_PAD_LCD_D11__ETM_DA3			0x10b1
+MX23_PAD_LCD_D12__ETM_DA4			0x10c1
+MX23_PAD_LCD_D13__ETM_DA5			0x10d1
+MX23_PAD_LCD_D14__ETM_DA6			0x10e1
+MX23_PAD_LCD_D15__ETM_DA7			0x10f1
+MX23_PAD_LCD_RESET__ETM_TCTL			0x1121
+MX23_PAD_LCD_RS__ETM_TCLK			0x1131
+MX23_PAD_LCD_DOTCK__GPMI_RDY3			0x1161
+MX23_PAD_LCD_ENABLE__I2C_SCL			0x1171
+MX23_PAD_LCD_HSYNC__I2C_SDA			0x1181
+MX23_PAD_LCD_VSYNC__LCD_BUSY			0x1191
+MX23_PAD_PWM0__ROTARYA				0x11a1
+MX23_PAD_PWM1__ROTARYB				0x11b1
+MX23_PAD_PWM2__GPMI_RDY3			0x11c1
+MX23_PAD_PWM3__ETM_TCTL				0x11d1
+MX23_PAD_PWM4__ETM_TCLK				0x11e1
+MX23_PAD_SSP1_DETECT__GPMI_CE3N			0x2011
+MX23_PAD_SSP1_DATA1__I2C_SCL			0x2031
+MX23_PAD_SSP1_DATA2__I2C_SDA			0x2041
+MX23_PAD_ROTARYA__AUART2_RTS			0x2071
+MX23_PAD_ROTARYB__AUART2_CTS			0x2081
+MX23_PAD_GPMI_D00__SSP2_DATA0			0x0002
+MX23_PAD_GPMI_D01__SSP2_DATA1			0x0012
+MX23_PAD_GPMI_D02__SSP2_DATA2			0x0022
+MX23_PAD_GPMI_D03__SSP2_DATA3			0x0032
+MX23_PAD_GPMI_D04__SSP2_DATA4			0x0042
+MX23_PAD_GPMI_D05__SSP2_DATA5			0x0052
+MX23_PAD_GPMI_D06__SSP2_DATA6			0x0062
+MX23_PAD_GPMI_D07__SSP2_DATA7			0x0072
+MX23_PAD_GPMI_D08__SSP1_DATA4			0x0082
+MX23_PAD_GPMI_D09__SSP1_DATA5			0x0092
+MX23_PAD_GPMI_D10__SSP1_DATA6			0x00a2
+MX23_PAD_GPMI_D11__SSP1_DATA7			0x00b2
+MX23_PAD_GPMI_D15__GPMI_CE3N			0x00f2
+MX23_PAD_GPMI_RDY0__SSP2_DETECT			0x0132
+MX23_PAD_GPMI_RDY1__SSP2_CMD			0x0142
+MX23_PAD_GPMI_WRN__SSP2_SCK			0x0182
+MX23_PAD_AUART1_CTS__SSP1_DATA4			0x01a2
+MX23_PAD_AUART1_RTS__SSP1_DATA5			0x01b2
+MX23_PAD_AUART1_RX__SSP1_DATA6			0x01c2
+MX23_PAD_AUART1_TX__SSP1_DATA7			0x01d2
+MX23_PAD_I2C_SCL__AUART1_TX			0x01e2
+MX23_PAD_I2C_SDA__AUART1_RX			0x01f2
+MX23_PAD_LCD_D08__SAIF2_SDATA0			0x1082
+MX23_PAD_LCD_D09__SAIF1_SDATA0			0x1092
+MX23_PAD_LCD_D10__SAIF_MCLK_BITCLK		0x10a2
+MX23_PAD_LCD_D11__SAIF_LRCLK			0x10b2
+MX23_PAD_LCD_D12__SAIF2_SDATA1			0x10c2
+MX23_PAD_LCD_D13__SAIF2_SDATA2			0x10d2
+MX23_PAD_LCD_D14__SAIF1_SDATA2			0x10e2
+MX23_PAD_LCD_D15__SAIF1_SDATA1			0x10f2
+MX23_PAD_LCD_D16__SAIF_ALT_BITCLK		0x1102
+MX23_PAD_LCD_RESET__GPMI_CE3N			0x1122
+MX23_PAD_PWM0__DUART_RX				0x11a2
+MX23_PAD_PWM1__DUART_TX				0x11b2
+MX23_PAD_PWM3__AUART1_CTS			0x11d2
+MX23_PAD_PWM4__AUART1_RTS			0x11e2
+MX23_PAD_SSP1_CMD__JTAG_TDO			0x2002
+MX23_PAD_SSP1_DETECT__USB_OTG_ID		0x2012
+MX23_PAD_SSP1_DATA0__JTAG_TDI			0x2022
+MX23_PAD_SSP1_DATA1__JTAG_TCLK			0x2032
+MX23_PAD_SSP1_DATA2__JTAG_RTCK			0x2042
+MX23_PAD_SSP1_DATA3__JTAG_TMS			0x2052
+MX23_PAD_SSP1_SCK__JTAG_TRST			0x2062
+MX23_PAD_ROTARYA__SPDIF				0x2072
+MX23_PAD_ROTARYB__GPMI_CE3N			0x2082
+MX23_PAD_GPMI_D00__GPIO_0_0			0x0003
+MX23_PAD_GPMI_D01__GPIO_0_1			0x0013
+MX23_PAD_GPMI_D02__GPIO_0_2			0x0023
+MX23_PAD_GPMI_D03__GPIO_0_3			0x0033
+MX23_PAD_GPMI_D04__GPIO_0_4			0x0043
+MX23_PAD_GPMI_D05__GPIO_0_5			0x0053
+MX23_PAD_GPMI_D06__GPIO_0_6			0x0063
+MX23_PAD_GPMI_D07__GPIO_0_7			0x0073
+MX23_PAD_GPMI_D08__GPIO_0_8			0x0083
+MX23_PAD_GPMI_D09__GPIO_0_9			0x0093
+MX23_PAD_GPMI_D10__GPIO_0_10			0x00a3
+MX23_PAD_GPMI_D11__GPIO_0_11			0x00b3
+MX23_PAD_GPMI_D12__GPIO_0_12			0x00c3
+MX23_PAD_GPMI_D13__GPIO_0_13			0x00d3
+MX23_PAD_GPMI_D14__GPIO_0_14			0x00e3
+MX23_PAD_GPMI_D15__GPIO_0_15			0x00f3
+MX23_PAD_GPMI_CLE__GPIO_0_16			0x0103
+MX23_PAD_GPMI_ALE__GPIO_0_17			0x0113
+MX23_PAD_GPMI_CE2N__GPIO_0_18			0x0123
+MX23_PAD_GPMI_RDY0__GPIO_0_19			0x0133
+MX23_PAD_GPMI_RDY1__GPIO_0_20			0x0143
+MX23_PAD_GPMI_RDY2__GPIO_0_21			0x0153
+MX23_PAD_GPMI_RDY3__GPIO_0_22			0x0163
+MX23_PAD_GPMI_WPN__GPIO_0_23			0x0173
+MX23_PAD_GPMI_WRN__GPIO_0_24			0x0183
+MX23_PAD_GPMI_RDN__GPIO_0_25			0x0193
+MX23_PAD_AUART1_CTS__GPIO_0_26			0x01a3
+MX23_PAD_AUART1_RTS__GPIO_0_27			0x01b3
+MX23_PAD_AUART1_RX__GPIO_0_28			0x01c3
+MX23_PAD_AUART1_TX__GPIO_0_29			0x01d3
+MX23_PAD_I2C_SCL__GPIO_0_30			0x01e3
+MX23_PAD_I2C_SDA__GPIO_0_31			0x01f3
+MX23_PAD_LCD_D00__GPIO_1_0			0x1003
+MX23_PAD_LCD_D01__GPIO_1_1			0x1013
+MX23_PAD_LCD_D02__GPIO_1_2			0x1023
+MX23_PAD_LCD_D03__GPIO_1_3			0x1033
+MX23_PAD_LCD_D04__GPIO_1_4			0x1043
+MX23_PAD_LCD_D05__GPIO_1_5			0x1053
+MX23_PAD_LCD_D06__GPIO_1_6			0x1063
+MX23_PAD_LCD_D07__GPIO_1_7			0x1073
+MX23_PAD_LCD_D08__GPIO_1_8			0x1083
+MX23_PAD_LCD_D09__GPIO_1_9			0x1093
+MX23_PAD_LCD_D10__GPIO_1_10			0x10a3
+MX23_PAD_LCD_D11__GPIO_1_11			0x10b3
+MX23_PAD_LCD_D12__GPIO_1_12			0x10c3
+MX23_PAD_LCD_D13__GPIO_1_13			0x10d3
+MX23_PAD_LCD_D14__GPIO_1_14			0x10e3
+MX23_PAD_LCD_D15__GPIO_1_15			0x10f3
+MX23_PAD_LCD_D16__GPIO_1_16			0x1103
+MX23_PAD_LCD_D17__GPIO_1_17			0x1113
+MX23_PAD_LCD_RESET__GPIO_1_18			0x1123
+MX23_PAD_LCD_RS__GPIO_1_19			0x1133
+MX23_PAD_LCD_WR__GPIO_1_20			0x1143
+MX23_PAD_LCD_CS__GPIO_1_21			0x1153
+MX23_PAD_LCD_DOTCK__GPIO_1_22			0x1163
+MX23_PAD_LCD_ENABLE__GPIO_1_23			0x1173
+MX23_PAD_LCD_HSYNC__GPIO_1_24			0x1183
+MX23_PAD_LCD_VSYNC__GPIO_1_25			0x1193
+MX23_PAD_PWM0__GPIO_1_26			0x11a3
+MX23_PAD_PWM1__GPIO_1_27			0x11b3
+MX23_PAD_PWM2__GPIO_1_28			0x11c3
+MX23_PAD_PWM3__GPIO_1_29			0x11d3
+MX23_PAD_PWM4__GPIO_1_30			0x11e3
+MX23_PAD_SSP1_CMD__GPIO_2_0			0x2003
+MX23_PAD_SSP1_DETECT__GPIO_2_1			0x2013
+MX23_PAD_SSP1_DATA0__GPIO_2_2			0x2023
+MX23_PAD_SSP1_DATA1__GPIO_2_3			0x2033
+MX23_PAD_SSP1_DATA2__GPIO_2_4			0x2043
+MX23_PAD_SSP1_DATA3__GPIO_2_5			0x2053
+MX23_PAD_SSP1_SCK__GPIO_2_6			0x2063
+MX23_PAD_ROTARYA__GPIO_2_7			0x2073
+MX23_PAD_ROTARYB__GPIO_2_8			0x2083
+MX23_PAD_EMI_A00__GPIO_2_9			0x2093
+MX23_PAD_EMI_A01__GPIO_2_10			0x20a3
+MX23_PAD_EMI_A02__GPIO_2_11			0x20b3
+MX23_PAD_EMI_A03__GPIO_2_12			0x20c3
+MX23_PAD_EMI_A04__GPIO_2_13			0x20d3
+MX23_PAD_EMI_A05__GPIO_2_14			0x20e3
+MX23_PAD_EMI_A06__GPIO_2_15			0x20f3
+MX23_PAD_EMI_A07__GPIO_2_16			0x2103
+MX23_PAD_EMI_A08__GPIO_2_17			0x2113
+MX23_PAD_EMI_A09__GPIO_2_18			0x2123
+MX23_PAD_EMI_A10__GPIO_2_19			0x2133
+MX23_PAD_EMI_A11__GPIO_2_20			0x2143
+MX23_PAD_EMI_A12__GPIO_2_21			0x2153
+MX23_PAD_EMI_BA0__GPIO_2_22			0x2163
+MX23_PAD_EMI_BA1__GPIO_2_23			0x2173
+MX23_PAD_EMI_CASN__GPIO_2_24			0x2183
+MX23_PAD_EMI_CE0N__GPIO_2_25			0x2193
+MX23_PAD_EMI_CE1N__GPIO_2_26			0x21a3
+MX23_PAD_GPMI_CE1N__GPIO_2_27			0x21b3
+MX23_PAD_GPMI_CE0N__GPIO_2_28			0x21c3
+MX23_PAD_EMI_CKE__GPIO_2_29			0x21d3
+MX23_PAD_EMI_RASN__GPIO_2_30			0x21e3
+MX23_PAD_EMI_WEN__GPIO_2_31			0x21f3
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
new file mode 100644
index 0000000..c8e5782
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
@@ -0,0 +1,132 @@
+NVIDIA Tegra20 pinmux controller
+
+Required properties:
+- compatible: "nvidia,tegra20-pinmux"
+- reg: Should contain the register physical address and length for each of
+  the tri-state, mux, pull-up/down, and pad control register sets.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Tegra's pin configuration nodes act as a container for an abitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, tristate, drive strength, etc.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function or tristate parameter. For this
+reason, even seemingly boolean values are actually tristates in this binding:
+unspecified, off, or on. Unspecified is represented as an absent property,
+and off/on are represented as integer values 0 and 1.
+
+Required subnode-properties:
+- nvidia,pins : An array of strings. Each string contains the name of a pin or
+    group. Valid values for these names are listed below.
+
+Optional subnode-properties:
+- nvidia,function: A string containing the name of the function to mux to the
+  pin or group. Valid values for function names are listed below. See the Tegra
+  TRM to determine which are valid for each pin or group.
+- nvidia,pull: Integer, representing the pull-down/up to apply to the pin.
+    0: none, 1: down, 2: up.
+- nvidia,tristate: Integer.
+    0: drive, 1: tristate.
+- nvidia,high-speed-mode: Integer. Enable high speed mode the pins.
+    0: no, 1: yes.
+- nvidia,schmitt: Integer. Enables Schmitt Trigger on the input.
+    0: no, 1: yes.
+- nvidia,low-power-mode: Integer. Valid values 0-3. 0 is least power, 3 is
+    most power. Controls the drive power or current. See "Low Power Mode"
+    or "LPMD1" and "LPMD0" in the Tegra TRM.
+- nvidia,pull-down-strength: Integer. Controls drive strength. 0 is weakest.
+    The range of valid values depends on the pingroup. See "CAL_DRVDN" in the
+    Tegra TRM.
+- nvidia,pull-up-strength: Integer. Controls drive strength. 0 is weakest.
+    The range of valid values depends on the pingroup. See "CAL_DRVUP" in the
+    Tegra TRM.
+- nvidia,slew-rate-rising: Integer. Controls rising signal slew rate. 0 is
+    fastest. The range of valid values depends on the pingroup. See
+    "DRVDN_SLWR" in the Tegra TRM.
+- nvidia,slew-rate-falling: Integer. Controls falling signal slew rate. 0 is
+    fastest. The range of valid values depends on the pingroup. See
+    "DRVUP_SLWF" in the Tegra TRM.
+
+Note that many of these properties are only valid for certain specific pins
+or groups. See the Tegra TRM and various pinmux spreadsheets for complete
+details regarding which groups support which functionality. The Linux pinctrl
+driver may also be a useful reference, since it consolidates, disambiguates,
+and corrects data from all those sources.
+
+Valid values for pin and group names are:
+
+  mux groups:
+
+    These all support nvidia,function, nvidia,tristate, and many support
+    nvidia,pull.
+
+    ata, atb, atc, atd, ate, cdev1, cdev2, crtp, csus, dap1, dap2, dap3, dap4,
+    ddc, dta, dtb, dtc, dtd, dte, dtf, gma, gmb, gmc, gmd, gme, gpu, gpu7,
+    gpv, hdint, i2cp, irrx, irtx, kbca, kbcb, kbcc, kbcd, kbce, kbcf, lcsn,
+    ld0, ld1, ld2, ld3, ld4, ld5, ld6, ld7, ld8, ld9, ld10, ld11, ld12, ld13,
+    ld14, ld15, ld16, ld17, ldc, ldi, lhp0, lhp1, lhp2, lhs, lm0, lm1, lpp,
+    lpw0, lpw1, lpw2, lsc0, lsc1, lsck, lsda, lsdi, lspi, lvp0, lvp1, lvs,
+    owc, pmc, pta, rm, sdb, sdc, sdd, sdio1, slxa, slxc, slxd, slxk, spdi,
+    spdo, spia, spib, spic, spid, spie, spif, spig, spih, uaa, uab, uac, uad,
+    uca, ucb, uda.
+
+  tristate groups:
+
+    These only support nvidia,pull.
+
+    ck32, ddrc, pmca, pmcb, pmcc, pmcd, pmce, xm2c, xm2d, ls, lc, ld17_0,
+    ld19_18, ld21_20, ld23_22.
+
+  drive groups:
+
+    With some exceptions, these support nvidia,high-speed-mode,
+    nvidia,schmitt, nvidia,low-power-mode, nvidia,pull-down-strength,
+    nvidia,pull-up-strength, nvidia,slew_rate-rising, nvidia,slew_rate-falling.
+
+    drive_ao1, drive_ao2, drive_at1, drive_at2, drive_cdev1, drive_cdev2,
+    drive_csus, drive_dap1, drive_dap2, drive_dap3, drive_dap4, drive_dbg,
+    drive_lcd1, drive_lcd2, drive_sdmmc2, drive_sdmmc3, drive_spi, drive_uaa,
+    drive_uab, drive_uart2, drive_uart3, drive_vi1, drive_vi2, drive_xm2a,
+    drive_xm2c, drive_xm2d, drive_xm2clk, drive_sdio1, drive_crt, drive_ddc,
+    drive_gma, drive_gmb, drive_gmc, drive_gmd, drive_gme, drive_owr,
+    drive_uda.
+
+Example:
+
+	pinctrl@70000000 {
+		compatible = "nvidia,tegra20-pinmux";
+		reg = < 0x70000014 0x10    /* Tri-state registers */
+			0x70000080 0x20    /* Mux registers */
+			0x700000a0 0x14    /* Pull-up/down registers */
+			0x70000868 0xa8 >; /* Pad control registers */
+	};
+
+Example board file extract:
+
+	pinctrl@70000000 {
+		sdio4_default: sdio4_default {
+			atb {
+				nvidia,pins = "atb", "gma", "gme";
+				nvidia,function = "sdio4";
+				nvidia,pull = <0>;
+				nvidia,tristate = <0>;
+			};
+		};
+	};
+
+	sdhci@c8000600 {
+		pinctrl-names = "default";
+		pinctrl-0 = <&sdio4_default>;
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
new file mode 100644
index 0000000..c275b70
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra30-pinmux.txt
@@ -0,0 +1,132 @@
+NVIDIA Tegra30 pinmux controller
+
+The Tegra30 pinctrl binding is very similar to the Tegra20 pinctrl binding,
+as described in nvidia,tegra20-pinmux.txt. In fact, this document assumes
+that binding as a baseline, and only documents the differences between the
+two bindings.
+
+Required properties:
+- compatible: "nvidia,tegra30-pinmux"
+- reg: Should contain the register physical address and length for each of
+  the pad control and mux registers.
+
+Tegra30 adds the following optional properties for pin configuration subnodes:
+- nvidia,enable-input: Integer. Enable the pin's input path. 0: no, 1: yes.
+- nvidia,open-drain: Integer. Enable open drain mode. 0: no, 1: yes.
+- nvidia,lock: Integer. Lock the pin configuration against further changes
+    until reset. 0: no, 1: yes.
+- nvidia,io-reset: Integer. Reset the IO path. 0: no, 1: yes.
+
+As with Tegra20, see the Tegra TRM for complete details regarding which groups
+support which functionality.
+
+Valid values for pin and group names are:
+
+  per-pin mux groups:
+
+    These all support nvidia,function, nvidia,tristate, nvidia,pull,
+    nvidia,enable-input, nvidia,lock. Some support nvidia,open-drain,
+    nvidia,io-reset.
+
+    clk_32k_out_pa0, uart3_cts_n_pa1, dap2_fs_pa2, dap2_sclk_pa3,
+    dap2_din_pa4, dap2_dout_pa5, sdmmc3_clk_pa6, sdmmc3_cmd_pa7, gmi_a17_pb0,
+    gmi_a18_pb1, lcd_pwr0_pb2, lcd_pclk_pb3, sdmmc3_dat3_pb4, sdmmc3_dat2_pb5,
+    sdmmc3_dat1_pb6, sdmmc3_dat0_pb7, uart3_rts_n_pc0, lcd_pwr1_pc1,
+    uart2_txd_pc2, uart2_rxd_pc3, gen1_i2c_scl_pc4, gen1_i2c_sda_pc5,
+    lcd_pwr2_pc6, gmi_wp_n_pc7, sdmmc3_dat5_pd0, sdmmc3_dat4_pd1, lcd_dc1_pd2,
+    sdmmc3_dat6_pd3, sdmmc3_dat7_pd4, vi_d1_pd5, vi_vsync_pd6, vi_hsync_pd7,
+    lcd_d0_pe0, lcd_d1_pe1, lcd_d2_pe2, lcd_d3_pe3, lcd_d4_pe4, lcd_d5_pe5,
+    lcd_d6_pe6, lcd_d7_pe7, lcd_d8_pf0, lcd_d9_pf1, lcd_d10_pf2, lcd_d11_pf3,
+    lcd_d12_pf4, lcd_d13_pf5, lcd_d14_pf6, lcd_d15_pf7, gmi_ad0_pg0,
+    gmi_ad1_pg1, gmi_ad2_pg2, gmi_ad3_pg3, gmi_ad4_pg4, gmi_ad5_pg5,
+    gmi_ad6_pg6, gmi_ad7_pg7, gmi_ad8_ph0, gmi_ad9_ph1, gmi_ad10_ph2,
+    gmi_ad11_ph3, gmi_ad12_ph4, gmi_ad13_ph5, gmi_ad14_ph6, gmi_ad15_ph7,
+    gmi_wr_n_pi0, gmi_oe_n_pi1, gmi_dqs_pi2, gmi_cs6_n_pi3, gmi_rst_n_pi4,
+    gmi_iordy_pi5, gmi_cs7_n_pi6, gmi_wait_pi7, gmi_cs0_n_pj0, lcd_de_pj1,
+    gmi_cs1_n_pj2, lcd_hsync_pj3, lcd_vsync_pj4, uart2_cts_n_pj5,
+    uart2_rts_n_pj6, gmi_a16_pj7, gmi_adv_n_pk0, gmi_clk_pk1, gmi_cs4_n_pk2,
+    gmi_cs2_n_pk3, gmi_cs3_n_pk4, spdif_out_pk5, spdif_in_pk6, gmi_a19_pk7,
+    vi_d2_pl0, vi_d3_pl1, vi_d4_pl2, vi_d5_pl3, vi_d6_pl4, vi_d7_pl5,
+    vi_d8_pl6, vi_d9_pl7, lcd_d16_pm0, lcd_d17_pm1, lcd_d18_pm2, lcd_d19_pm3,
+    lcd_d20_pm4, lcd_d21_pm5, lcd_d22_pm6, lcd_d23_pm7, dap1_fs_pn0,
+    dap1_din_pn1, dap1_dout_pn2, dap1_sclk_pn3, lcd_cs0_n_pn4, lcd_sdout_pn5,
+    lcd_dc0_pn6, hdmi_int_pn7, ulpi_data7_po0, ulpi_data0_po1, ulpi_data1_po2,
+    ulpi_data2_po3, ulpi_data3_po4, ulpi_data4_po5, ulpi_data5_po6,
+    ulpi_data6_po7, dap3_fs_pp0, dap3_din_pp1, dap3_dout_pp2, dap3_sclk_pp3,
+    dap4_fs_pp4, dap4_din_pp5, dap4_dout_pp6, dap4_sclk_pp7, kb_col0_pq0,
+    kb_col1_pq1, kb_col2_pq2, kb_col3_pq3, kb_col4_pq4, kb_col5_pq5,
+    kb_col6_pq6, kb_col7_pq7, kb_row0_pr0, kb_row1_pr1, kb_row2_pr2,
+    kb_row3_pr3, kb_row4_pr4, kb_row5_pr5, kb_row6_pr6, kb_row7_pr7,
+    kb_row8_ps0, kb_row9_ps1, kb_row10_ps2, kb_row11_ps3, kb_row12_ps4,
+    kb_row13_ps5, kb_row14_ps6, kb_row15_ps7, vi_pclk_pt0, vi_mclk_pt1,
+    vi_d10_pt2, vi_d11_pt3, vi_d0_pt4, gen2_i2c_scl_pt5, gen2_i2c_sda_pt6,
+    sdmmc4_cmd_pt7, pu0, pu1, pu2, pu3, pu4, pu5, pu6, jtag_rtck_pu7, pv0,
+    pv1, pv2, pv3, ddc_scl_pv4, ddc_sda_pv5, crt_hsync_pv6, crt_vsync_pv7,
+    lcd_cs1_n_pw0, lcd_m1_pw1, spi2_cs1_n_pw2, spi2_cs2_n_pw3, clk1_out_pw4,
+    clk2_out_pw5, uart3_txd_pw6, uart3_rxd_pw7, spi2_mosi_px0, spi2_miso_px1,
+    spi2_sck_px2, spi2_cs0_n_px3, spi1_mosi_px4, spi1_sck_px5, spi1_cs0_n_px6,
+    spi1_miso_px7, ulpi_clk_py0, ulpi_dir_py1, ulpi_nxt_py2, ulpi_stp_py3,
+    sdmmc1_dat3_py4, sdmmc1_dat2_py5, sdmmc1_dat1_py6, sdmmc1_dat0_py7,
+    sdmmc1_clk_pz0, sdmmc1_cmd_pz1, lcd_sdin_pz2, lcd_wr_n_pz3, lcd_sck_pz4,
+    sys_clk_req_pz5, pwr_i2c_scl_pz6, pwr_i2c_sda_pz7, sdmmc4_dat0_paa0,
+    sdmmc4_dat1_paa1, sdmmc4_dat2_paa2, sdmmc4_dat3_paa3, sdmmc4_dat4_paa4,
+    sdmmc4_dat5_paa5, sdmmc4_dat6_paa6, sdmmc4_dat7_paa7, pbb0,
+    cam_i2c_scl_pbb1, cam_i2c_sda_pbb2, pbb3, pbb4, pbb5, pbb6, pbb7,
+    cam_mclk_pcc0, pcc1, pcc2, sdmmc4_rst_n_pcc3, sdmmc4_clk_pcc4,
+    clk2_req_pcc5, pex_l2_rst_n_pcc6, pex_l2_clkreq_n_pcc7,
+    pex_l0_prsnt_n_pdd0, pex_l0_rst_n_pdd1, pex_l0_clkreq_n_pdd2,
+    pex_wake_n_pdd3, pex_l1_prsnt_n_pdd4, pex_l1_rst_n_pdd5,
+    pex_l1_clkreq_n_pdd6, pex_l2_prsnt_n_pdd7, clk3_out_pee0, clk3_req_pee1,
+    clk1_req_pee2, hdmi_cec_pee3, clk_32k_in, core_pwr_req, cpu_pwr_req, owr,
+    pwr_int_n.
+
+  drive groups:
+
+    These all support nvidia,pull-down-strength, nvidia,pull-up-strength,
+    nvidia,slew_rate-rising, nvidia,slew_rate-falling. Most but not all
+    support nvidia,high-speed-mode, nvidia,schmitt, nvidia,low-power-mode.
+
+    ao1, ao2, at1, at2, at3, at4, at5, cdev1, cdev2, cec, crt, csus, dap1,
+    dap2, dap3, dap4, dbg, ddc, dev3, gma, gmb, gmc, gmd, gme, gmf, gmg,
+    gmh, gpv, lcd1, lcd2, owr, sdio1, sdio2, sdio3, spi, uaa, uab, uart2,
+    uart3, uda, vi1.
+
+Example:
+
+	pinctrl@70000000 {
+		compatible = "nvidia,tegra30-pinmux";
+		reg = < 0x70000868 0xd0     /* Pad control registers */
+			0x70003000 0x3e0 >; /* Mux registers */
+	};
+
+Example board file extract:
+
+	pinctrl@70000000 {
+		sdmmc4_default: pinmux {
+			sdmmc4_clk_pcc4 {
+				nvidia,pins =	"sdmmc4_clk_pcc4",
+						"sdmmc4_rst_n_pcc3";
+				nvidia,function = "sdmmc4";
+				nvidia,pull = <0>;
+				nvidia,tristate = <0>;
+			};
+			sdmmc4_dat0_paa0 {
+				nvidia,pins =	"sdmmc4_dat0_paa0",
+						"sdmmc4_dat1_paa1",
+						"sdmmc4_dat2_paa2",
+						"sdmmc4_dat3_paa3",
+						"sdmmc4_dat4_paa4",
+						"sdmmc4_dat5_paa5",
+						"sdmmc4_dat6_paa6",
+						"sdmmc4_dat7_paa7";
+				nvidia,function = "sdmmc4";
+				nvidia,pull = <2>;
+				nvidia,tristate = <0>;
+			};
+		};
+	};
+
+	sdhci@78000400 {
+		pinctrl-names = "default";
+		pinctrl-0 = <&sdmmc4_default>;
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
new file mode 100644
index 0000000..c95ea82
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -0,0 +1,128 @@
+== Introduction ==
+
+Hardware modules that control pin multiplexing or configuration parameters
+such as pull-up/down, tri-state, drive-strength etc are designated as pin
+controllers. Each pin controller must be represented as a node in device tree,
+just like any other hardware module.
+
+Hardware modules whose signals are affected by pin configuration are
+designated client devices. Again, each client device must be represented as a
+node in device tree, just like any other hardware module.
+
+For a client device to operate correctly, certain pin controllers must
+set up certain specific pin configurations. Some client devices need a
+single static pin configuration, e.g. set up during initialization. Others
+need to reconfigure pins at run-time, for example to tri-state pins when the
+device is inactive. Hence, each client device can define a set of named
+states. The number and names of those states is defined by the client device's
+own binding.
+
+The common pinctrl bindings defined in this file provide an infrastructure
+for client device device tree nodes to map those state names to the pin
+configuration used by those states.
+
+Note that pin controllers themselves may also be client devices of themselves.
+For example, a pin controller may set up its own "active" state when the
+driver loads. This would allow representing a board's static pin configuration
+in a single place, rather than splitting it across multiple client device
+nodes. The decision to do this or not somewhat rests with the author of
+individual board device tree files, and any requirements imposed by the
+bindings for the individual client devices in use by that board, i.e. whether
+they require certain specific named states for dynamic pin configuration.
+
+== Pinctrl client devices ==
+
+For each client device individually, every pin state is assigned an integer
+ID. These numbers start at 0, and are contiguous. For each state ID, a unique
+property exists to define the pin configuration. Each state may also be
+assigned a name. When names are used, another property exists to map from
+those names to the integer IDs.
+
+Each client device's own binding determines the set of states the must be
+defined in its device tree node, and whether to define the set of state
+IDs that must be provided, or whether to define the set of state names that
+must be provided.
+
+Required properties:
+pinctrl-0:	List of phandles, each pointing at a pin configuration
+		node. These referenced pin configuration nodes must be child
+		nodes of the pin controller that they configure. Multiple
+		entries may exist in this list so that multiple pin
+		controllers may be configured, or so that a state may be built
+		from multiple nodes for a single pin controller, each
+		contributing part of the overall configuration. See the next
+		section of this document for details of the format of these
+		pin configuration nodes.
+
+		In some cases, it may be useful to define a state, but for it
+		to be empty. This may be required when a common IP block is
+		used in an SoC either without a pin controller, or where the
+		pin controller does not affect the HW module in question. If
+		the binding for that IP block requires certain pin states to
+		exist, they must still be defined, but may be left empty.
+
+Optional properties:
+pinctrl-1:	List of phandles, each pointing at a pin configuration
+		node within a pin controller.
+...
+pinctrl-n:	List of phandles, each pointing at a pin configuration
+		node within a pin controller.
+pinctrl-names:	The list of names to assign states. List entry 0 defines the
+		name for integer state ID 0, list entry 1 for state ID 1, and
+		so on.
+
+For example:
+
+	/* For a client device requiring named states */
+	device {
+		pinctrl-names = "active", "idle";
+		pinctrl-0 = <&state_0_node_a>;
+		pinctrl-1 = <&state_1_node_a &state_1_node_b>;
+	};
+
+	/* For the same device if using state IDs */
+	device {
+		pinctrl-0 = <&state_0_node_a>;
+		pinctrl-1 = <&state_1_node_a &state_1_node_b>;
+	};
+
+	/*
+	 * For an IP block whose binding supports pin configuration,
+	 * but in use on an SoC that doesn't have any pin control hardware
+	 */
+	device {
+		pinctrl-names = "active", "idle";
+		pinctrl-0 = <>;
+		pinctrl-1 = <>;
+	};
+
+== Pin controller devices ==
+
+Pin controller devices should contain the pin configuration nodes that client
+devices reference.
+
+For example:
+
+	pincontroller {
+		... /* Standard DT properties for the device itself elided */
+
+		state_0_node_a {
+			...
+		};
+		state_1_node_a {
+			...
+		};
+		state_1_node_b {
+			...
+		};
+	}
+
+The contents of each of those pin configuration child nodes is defined
+entirely by the binding for the individual pin controller device. There
+exists no common standard for this content.
+
+The pin configuration nodes need not be direct children of the pin controller
+device; they may be grandchildren, for example. Whether this is legal, and
+whether there is any interaction between the child and intermediate parent
+nodes, is again defined entirely by the binding for the individual pin
+controller device.
diff --git a/Documentation/devicetree/bindings/pinmux/pinmux_nvidia.txt b/Documentation/devicetree/bindings/pinmux/pinmux_nvidia.txt
deleted file mode 100644
index 36f82db..0000000
--- a/Documentation/devicetree/bindings/pinmux/pinmux_nvidia.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-NVIDIA Tegra 2 pinmux controller
-
-Required properties:
-- compatible : "nvidia,tegra20-pinmux"
-
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
index 9cf57fd..2f5b6b1 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
@@ -8,6 +8,8 @@
 - startup-delay-us: startup time in microseconds
 - enable-active-high: Polarity of GPIO is Active high
 If this property is missing, the default assumed is Active low.
+- gpio-open-drain: GPIO is open drain type.
+  If this property is missing then default assumption is false.
 
 Any property defined as part of the core regulator
 binding, defined in regulator.txt, can also be used.
@@ -25,5 +27,6 @@
 		gpio = <&gpio1 16 0>;
 		startup-delay-us = <70000>;
 		enable-active-high;
-		regulator-boot-on
+		regulator-boot-on;
+		gpio-open-drain;
 	};
diff --git a/Documentation/devicetree/bindings/regulator/tps62360-regulator.txt b/Documentation/devicetree/bindings/regulator/tps62360-regulator.txt
new file mode 100644
index 0000000..c8ca6b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/tps62360-regulator.txt
@@ -0,0 +1,44 @@
+TPS62360 Voltage regulators
+
+Required properties:
+- compatible: Must be one of the following.
+	"ti,tps62360"
+	"ti,tps62361",
+	"ti,tps62362",
+	"ti,tps62363",
+- reg: I2C slave address
+
+Optional properties:
+- ti,enable-vout-discharge: Enable output discharge. This is boolean value.
+- ti,enable-pull-down: Enable pull down. This is boolean value.
+- ti,vsel0-gpio: GPIO for controlling VSEL0 line.
+  If this property is missing, then assume that there is no GPIO
+  for vsel0 control.
+- ti,vsel1-gpio: Gpio for controlling VSEL1 line.
+  If this property is missing, then assume that there is no GPIO
+  for vsel1 control.
+- ti,vsel0-state-high: Inital state of vsel0 input is high.
+  If this property is missing, then assume the state as low (0).
+- ti,vsel1-state-high: Inital state of vsel1 input is high.
+  If this property is missing, then assume the state as low (0).
+
+Any property defined as part of the core regulator binding, defined in
+regulator.txt, can also be used.
+
+Example:
+
+	abc: tps62360 {
+		compatible = "ti,tps62361";
+		reg =  <0x60>;
+		regulator-name = "tps62361-vout";
+		regulator-min-microvolt = <500000>;
+		regulator-max-microvolt = <1500000>;
+		regulator-boot-on
+		ti,vsel0-gpio = <&gpio1 16 0>;
+		ti,vsel1-gpio = <&gpio1 17 0>;
+		ti,vsel0-state-high;
+		ti,vsel1-state-high;
+		ti,enable-pull-down;
+		ti,enable-force-pwm;
+		ti,enable-vout-discharge;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/tps6586x.txt b/Documentation/devicetree/bindings/regulator/tps6586x.txt
new file mode 100644
index 0000000..0fcabaa
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/tps6586x.txt
@@ -0,0 +1,97 @@
+TPS6586x family of regulators
+
+Required properties:
+- compatible: "ti,tps6586x"
+- reg: I2C slave address
+- interrupts: the interrupt outputs of the controller
+- #gpio-cells: number of cells to describe a GPIO
+- gpio-controller: mark the device as a GPIO controller
+- regulators: list of regulators provided by this controller, must be named
+  after their hardware counterparts: sm[0-2], ldo[0-9] and ldo_rtc
+
+Each regulator is defined using the standard binding for regulators.
+
+Example:
+
+	pmu: tps6586x@34 {
+		compatible = "ti,tps6586x";
+		reg = <0x34>;
+		interrupts = <0 88 0x4>;
+
+		#gpio-cells = <2>;
+		gpio-controller;
+
+		regulators {
+			sm0_reg: sm0 {
+				regulator-min-microvolt = < 725000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			sm1_reg: sm1 {
+				regulator-min-microvolt = < 725000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			sm2_reg: sm2 {
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <4550000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo0_reg: ldo0 {
+				regulator-name = "PCIE CLK";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo1_reg: ldo1 {
+				regulator-min-microvolt = < 725000>;
+				regulator-max-microvolt = <1500000>;
+			};
+
+			ldo2_reg: ldo2 {
+				regulator-min-microvolt = < 725000>;
+				regulator-max-microvolt = <1500000>;
+			};
+
+			ldo3_reg: ldo3 {
+				regulator-min-microvolt = <1250000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo4_reg: ldo4 {
+				regulator-min-microvolt = <1700000>;
+				regulator-max-microvolt = <2475000>;
+			};
+
+			ldo5_reg: ldo5 {
+				regulator-min-microvolt = <1250000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo6_reg: ldo6 {
+				regulator-min-microvolt = <1250000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo7_reg: ldo7 {
+				regulator-min-microvolt = <1250000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo8_reg: ldo8 {
+				regulator-min-microvolt = <1250000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo9_reg: ldo9 {
+				regulator-min-microvolt = <1250000>;
+				regulator-max-microvolt = <3300000>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 2c3cd41..9cc4444 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -3,6 +3,8 @@
 Required properties:
 - compatible : "fsl,sgtl5000".
 
+- reg : the I2C address of the device
+
 Example:
 
 codec: sgtl5000@0a {
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 2a596a4..950856b 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -276,3 +276,11 @@
   devm_regulator_get()
   devm_regulator_put()
   devm_regulator_bulk_get()
+
+CLOCK
+  devm_clk_get()
+  devm_clk_put()
+
+PINCTRL
+  devm_pinctrl_get()
+  devm_pinctrl_put()
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 03ca210..e4b5775 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -539,3 +539,13 @@
 Why:	setitimer is not returning -EFAULT if user pointer is NULL. This
 	violates the spec.
 Who:	Sasikantha Babu <sasikanth.v19@gmail.com>
+
+----------------------------
+
+What:	V4L2_CID_HCENTER, V4L2_CID_VCENTER V4L2 controls
+When:	3.7
+Why:	The V4L2_CID_VCENTER, V4L2_CID_HCENTER controls have been deprecated
+	for about 4 years and they are not used by any mainline driver.
+	There are newer controls (V4L2_CID_PAN*, V4L2_CID_TILT*) that provide
+	similar	functionality.
+Who:	Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
diff --git a/Documentation/filesystems/gfs2-glocks.txt b/Documentation/filesystems/gfs2-glocks.txt
index 0494f78..fcc7995 100644
--- a/Documentation/filesystems/gfs2-glocks.txt
+++ b/Documentation/filesystems/gfs2-glocks.txt
@@ -61,7 +61,9 @@
 go_dump          | Called to print content of object for debugfs file, or on
                  | error to dump glock to the log.
 go_type          | The type of the glock, LM_TYPE_.....
-go_min_hold_time | The minimum hold time
+go_callback	 | Called if the DLM sends a callback to drop this lock
+go_flags	 | GLOF_ASPACE is set, if the glock has an address space
+                 | associated with it
 
 The minimum hold time for each lock is the time after a remote lock
 grant for which we ignore remote demote requests. This is in order to
@@ -89,6 +91,7 @@
 go_lock       |       Yes               |       No
 go_unlock     |       Yes               |       No
 go_dump       |       Sometimes         |       Yes
+go_callback   |       Sometimes (N/A)   |       Yes
 
 N.B. Operations must not drop either the bit lock or the spinlock
 if its held on entry. go_dump and do_demote_ok must never block.
@@ -111,4 +114,118 @@
 glock is used in conjunction with the i_nlink field in the inode to
 determine the lifetime of the inode in question. Locking of inodes
 is on a per-inode basis. Locking of rgrps is on a per rgrp basis.
+In general we prefer to lock local locks prior to cluster locks.
+
+                            Glock Statistics
+                           ------------------
+
+The stats are divided into two sets: those relating to the
+super block and those relating to an individual glock. The
+super block stats are done on a per cpu basis in order to
+try and reduce the overhead of gathering them. They are also
+further divided by glock type. All timings are in nanoseconds.
+
+In the case of both the super block and glock statistics,
+the same information is gathered in each case. The super
+block timing statistics are used to provide default values for
+the glock timing statistics, so that newly created glocks
+should have, as far as possible, a sensible starting point.
+The per-glock counters are initialised to zero when the
+glock is created. The per-glock statistics are lost when
+the glock is ejected from memory.
+
+The statistics are divided into three pairs of mean and
+variance, plus two counters. The mean/variance pairs are
+smoothed exponential estimates and the algorithm used is
+one which will be very familiar to those used to calculation
+of round trip times in network code. See "TCP/IP Illustrated,
+Volume 1", W. Richard Stevens, sect 21.3, "Round-Trip Time Measurement",
+p. 299 and onwards. Also, Volume 2, Sect. 25.10, p. 838 and onwards.
+Unlike the TCP/IP Illustrated case, the mean and variance are
+not scaled, but are in units of integer nanoseconds.
+
+The three pairs of mean/variance measure the following
+things:
+
+ 1. DLM lock time (non-blocking requests)
+ 2. DLM lock time (blocking requests)
+ 3. Inter-request time (again to the DLM)
+
+A non-blocking request is one which will complete right
+away, whatever the state of the DLM lock in question. That
+currently means any requests when (a) the current state of
+the lock is exclusive, i.e. a lock demotion (b) the requested
+state is either null or unlocked (again, a demotion) or (c) the
+"try lock" flag is set. A blocking request covers all the other
+lock requests.
+
+There are two counters. The first is there primarily to show
+how many lock requests have been made, and thus how much data
+has gone into the mean/variance calculations. The other counter
+is counting queuing of holders at the top layer of the glock
+code. Hopefully that number will be a lot larger than the number
+of dlm lock requests issued.
+
+So why gather these statistics? There are several reasons
+we'd like to get a better idea of these timings:
+
+1. To be able to better set the glock "min hold time"
+2. To spot performance issues more easily
+3. To improve the algorithm for selecting resource groups for
+allocation (to base it on lock wait time, rather than blindly
+using a "try lock")
+
+Due to the smoothing action of the updates, a step change in
+some input quantity being sampled will only fully be taken
+into account after 8 samples (or 4 for the variance) and this
+needs to be carefully considered when interpreting the
+results.
+
+Knowing both the time it takes a lock request to complete and
+the average time between lock requests for a glock means we
+can compute the total percentage of the time for which the
+node is able to use a glock vs. time that the rest of the
+cluster has its share. That will be very useful when setting
+the lock min hold time.
+
+Great care has been taken to ensure that we
+measure exactly the quantities that we want, as accurately
+as possible. There are always inaccuracies in any
+measuring system, but I hope this is as accurate as we
+can reasonably make it.
+
+Per sb stats can be found here:
+/sys/kernel/debug/gfs2/<fsname>/sbstats
+Per glock stats can be found here:
+/sys/kernel/debug/gfs2/<fsname>/glstats
+
+Assuming that debugfs is mounted on /sys/kernel/debug and also
+that <fsname> is replaced with the name of the gfs2 filesystem
+in question.
+
+The abbreviations used in the output as are follows:
+
+srtt     - Smoothed round trip time for non-blocking dlm requests
+srttvar  - Variance estimate for srtt
+srttb    - Smoothed round trip time for (potentially) blocking dlm requests
+srttvarb - Variance estimate for srttb
+sirt     - Smoothed inter-request time (for dlm requests)
+sirtvar  - Variance estimate for sirt
+dlm      - Number of dlm requests made (dcnt in glstats file)
+queue    - Number of glock requests queued (qcnt in glstats file)
+
+The sbstats file contains a set of these stats for each glock type (so 8 lines
+for each type) and for each cpu (one column per cpu). The glstats file contains
+a set of these stats for each glock in a similar format to the glocks file, but
+using the format mean/variance for each of the timing stats.
+
+The gfs2_glock_lock_time tracepoint prints out the current values of the stats
+for the glock in question, along with some addition information on each dlm
+reply that is received:
+
+status - The status of the dlm request
+flags  - The dlm request flags
+tdiff  - The time taken by this specific request
+(remaining fields as per above list)
+
 
diff --git a/Documentation/filesystems/gfs2.txt b/Documentation/filesystems/gfs2.txt
index 4cda926..cc4f230 100644
--- a/Documentation/filesystems/gfs2.txt
+++ b/Documentation/filesystems/gfs2.txt
@@ -1,7 +1,7 @@
 Global File System
 ------------------
 
-http://sources.redhat.com/cluster/wiki/
+https://fedorahosted.org/cluster/wiki/HomePage
 
 GFS is a cluster file system. It allows a cluster of computers to
 simultaneously use a block device that is shared between them (with FC,
@@ -30,7 +30,8 @@
 
 If you are using Fedora, you need to install the gfs2-utils package
 and, for lock_dlm, you will also need to install the cman package
-and write a cluster.conf as per the documentation.
+and write a cluster.conf as per the documentation. For F17 and above
+cman has been replaced by the dlm package.
 
 GFS2 is not on-disk compatible with previous versions of GFS, but it
 is pretty close.
@@ -39,8 +40,6 @@
   fsck.gfs2		to repair a filesystem
   gfs2_grow		to expand a filesystem online
   gfs2_jadd		to add journals to a filesystem online
-  gfs2_tool		to manipulate, examine and tune a filesystem
-  gfs2_quota	to examine and change quota values in a filesystem
+  tunegfs2		to manipulate, examine and tune a filesystem
   gfs2_convert	to convert a gfs filesystem to gfs2 in-place
-  mount.gfs2	to help mount(8) mount a filesystem
   mkfs.gfs2		to make a filesystem
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index b7413cb..ef088e5 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -996,7 +996,6 @@
  snmp          SNMP data                                                       
  sockstat      Socket statistics                                               
  tcp           TCP  sockets                                                    
- tr_rif        Token ring RIF routing table                                    
  udp           UDP sockets                                                     
  unix          UNIX domain sockets                                             
  wireless      Wireless interface data (Wavelan etc)                           
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c1601e5..f995195 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2161,6 +2161,9 @@
 				on: Turn realloc on
 		realloc		same as realloc=on
 		noari		do not use PCIe ARI.
+		pcie_scan_all	Scan all possible PCIe devices.  Otherwise we
+				only look for one device below a PCIe downstream
+				port.
 
 	pcie_aspm=	[PCIE] Forcibly enable or disable PCIe Active State Power
 			Management.
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 9ad9dde..2cc3c77 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -1,7 +1,5 @@
 00-INDEX
 	- this file
-3c359.txt
-	- information on the 3Com TokenLink Velocity XL (3c5359) driver.
 3c505.txt
 	- information on the 3Com EtherLink Plus (3c505) driver.
 3c509.txt
@@ -142,8 +140,6 @@
 	- Design of the network interface message level setting (NETIF_MSG_*).
 nfc.txt
 	- The Linux Near Field Communication (NFS) subsystem.
-olympic.txt
-	- IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info.
 openvswitch.txt
 	- Open vSwitch developer documentation.
 operstates.txt
@@ -184,8 +180,6 @@
 	- SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
 smc9.txt
 	- the driver for SMC's 9000 series of Ethernet cards
-smctr.txt
-	- SMC TokenCard TokenRing Linux driver info.
 spider-net.txt
 	- README for the Spidernet Driver (as found in PS3 / Cell BE).
 stmmac.txt
@@ -200,8 +194,6 @@
 	- kernel tuning options for low rate 'thin' TCP streams.
 tlan.txt
 	- ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
-tms380tr.txt
-	- SysKonnect Token Ring ISA/PCI adapter driver info.
 tproxy.txt
 	- Transparent proxy support user guide.
 tuntap.txt
diff --git a/Documentation/networking/3c359.txt b/Documentation/networking/3c359.txt
deleted file mode 100644
index dadfe81..0000000
--- a/Documentation/networking/3c359.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-
-3COM PCI TOKEN LINK VELOCITY XL TOKEN RING CARDS README
-
-Release 0.9.0 - Release   
-	Jul 17th 2000 Mike Phillips 
-
-	1.2.0 - Final
-	Feb 17th 2002 Mike Phillips 
-	Updated for submission to the 2.4.x kernel.
-
-Thanks:
-	Terry Murphy from 3Com for tech docs and support,
-	Adam D. Ligas for testing the driver.
- 
-Note:
-	This driver will NOT work with the 3C339 Token Ring cards, you need
-to use the tms380 driver instead.
-
-Options:
-
-The driver accepts three options: ringspeed, pkt_buf_sz and message_level.
-
-These options can be specified differently for each card found. 
-
-ringspeed:  Has one of three settings 0 (default), 4 or 16.  0 will 
-make the card autosense the ringspeed and join at the appropriate speed, 
-this will be the default option for most people.  4 or 16 allow you to 
-explicitly force the card to operate at a certain speed.  The card will fail 
-if you try to insert it at the wrong speed. (Although some hubs will allow 
-this so be *very* careful).  The main purpose for explicitly setting the ring
-speed is for when the card is first on the ring.  In autosense mode, if the card
-cannot detect any active monitors on the ring it will open at the same speed as
-its last opening. This can be hazardous if this speed does not match the speed
-you want the ring to operate at.  
-
-pkt_buf_sz:  This is this initial receive buffer allocation size.  This will
-default to 4096 if no value is entered. You may increase performance of the 
-driver by setting this to a value larger than the network packet size, although
-the driver now re-sizes buffers based on MTU settings as well. 
-
-message_level: Controls level of messages created by the driver. Defaults to 0:
-which only displays start-up and critical messages.  Presently any non-zero 
-value will display all soft messages as well.  NB This does not turn 
-debugging messages on, that must be done by modified the source code.
-
-Variable MTU size:
-
-The driver can handle a MTU size up to either 4500 or 18000 depending upon 
-ring speed.  The driver also changes the size of the receive buffers as part
-of the mtu re-sizing, so if you set mtu = 18000, you will need to be able
-to allocate 16 * (sk_buff with 18000 buffer size) call it 18500 bytes per ring 
-position = 296,000 bytes of memory space, plus of course anything 
-necessary for the tx sk_buff's.  Remember this is per card, so if you are
-building routers, gateway's etc, you could start to use a lot of memory
-real fast.
-
-2/17/02 Mike Phillips
-
diff --git a/Documentation/networking/3c509.txt b/Documentation/networking/3c509.txt
index dcc9eaf..fbf722e 100644
--- a/Documentation/networking/3c509.txt
+++ b/Documentation/networking/3c509.txt
@@ -25,7 +25,6 @@
   3c509B (later revision of the ISA card; supports full-duplex)
   3c589 (PCMCIA)
   3c589B (later revision of the 3c589; supports full-duplex)
-  3c529 (MCA)
   3c579 (EISA)
 
 Large portions of this documentation were heavily borrowed from the guide
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 221ad0c..75a5923 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -1,5 +1,3 @@
-[state: 21-08-2011]
-
 BATMAN-ADV
 ----------
 
@@ -67,18 +65,19 @@
 All  mesh  wide  settings  can be found in batman's own interface
 folder:
 
-#  ls  /sys/class/net/bat0/mesh/
-# aggregated_ogms   fragmentation gw_sel_class   vis_mode
-# ap_isolation      gw_bandwidth  hop_penalty
-# bonding           gw_mode       orig_interval
+# ls /sys/class/net/bat0/mesh/
+# aggregated_ogms        gw_bandwidth           log_level
+# ap_isolation           gw_mode                orig_interval
+# bonding                gw_sel_class           routing_algo
+# bridge_loop_avoidance  hop_penalty            vis_mode
+# fragmentation
 
 
 There is a special folder for debugging information:
 
 #  ls /sys/kernel/debug/batman_adv/bat0/
-#  gateways     socket        transtable_global  vis_data
-#  originators  softif_neigh  transtable_local
-
+# bla_claim_table    log                socket             transtable_local
+# gateways           originators        transtable_global  vis_data
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
@@ -202,12 +201,13 @@
 1 - Enable messages related to routing / flooding / broadcasting
 2 - Enable messages related to route added / changed / deleted
 4 - Enable messages related to translation table operations
-7 - Enable all messages
+8 - Enable messages related to bridge loop avoidance
+15 - enable all messages
 
 The debug output can be changed at runtime  using  the  file
 /sys/class/net/bat0/mesh/log_level. e.g.
 
-# echo 2 > /sys/class/net/bat0/mesh/log_level
+# echo 6 > /sys/class/net/bat0/mesh/log_level
 
 will enable debug messages for when routes change.
 
diff --git a/Documentation/networking/fore200e.txt b/Documentation/networking/fore200e.txt
index f648eb2..d52af53 100644
--- a/Documentation/networking/fore200e.txt
+++ b/Documentation/networking/fore200e.txt
@@ -11,12 +11,10 @@
 
 The intent is to enable the use of different models of FORE adapters at the
 same time, by hosts that have several bus interfaces (such as PCI+SBUS,
-PCI+MCA or PCI+EISA).
+or PCI+EISA).
 
 Only PCI and SBUS devices are currently supported by the driver, but support
-for other bus interfaces such as EISA should not be too hard to add (this may
-be more tricky for the MCA bus, though, as FORE made some MCA-specific
-modifications to the adapter's AALI interface).
+for other bus interfaces such as EISA should not be too hard to add.
 
 
 Firmware Copyright Notice
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 1dc1c24..703cf43 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -4,15 +4,22 @@
 
 Introduction
 ============
+The IEEE 802.15.4 working group focuses on standartization of bottom
+two layers: Medium Accsess Control (MAC) and Physical (PHY). And there
+are mainly two options available for upper layers:
+ - ZigBee - proprietary protocol from ZigBee Alliance
+ - 6LowPAN - IPv6 networking over low rate personal area networks
 
 The Linux-ZigBee project goal is to provide complete implementation
-of IEEE 802.15.4 / ZigBee / 6LoWPAN protocols. IEEE 802.15.4 is a stack
+of IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack
 of protocols for organizing Low-Rate Wireless Personal Area Networks.
 
-Currently only IEEE 802.15.4 layer is implemented. We have chosen
-to use plain Berkeley socket API, the generic Linux networking stack
-to transfer IEEE 802.15.4 messages and a special protocol over genetlink
-for configuration/management
+The stack is composed of three main parts:
+ - IEEE 802.15.4 layer;  We have chosen to use plain Berkeley socket API,
+   the generic Linux networking stack to transfer IEEE 802.15.4 messages
+   and a special protocol over genetlink for configuration/management
+ - MAC - provides access to shared channel and reliable data delivery
+ - PHY - represents device drivers
 
 
 Socket API
@@ -29,15 +36,6 @@
 One can use SOCK_RAW for passing raw data towards device xmit function. YMMV.
 
 
-MLME - MAC Level Management
-============================
-
-Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands.
-See the include/net/nl802154.h header. Our userspace tools package
-(see above) provides CLI configuration utility for radio interfaces and simple
-coordinator for IEEE 802.15.4 networks as an example users of MLME protocol.
-
-
 Kernel side
 =============
 
@@ -51,6 +49,15 @@
 Those types of devices require different approach to be hooked into Linux kernel.
 
 
+MLME - MAC Level Management
+============================
+
+Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands.
+See the include/net/nl802154.h header. Our userspace tools package
+(see above) provides CLI configuration utility for radio interfaces and simple
+coordinator for IEEE 802.15.4 networks as an example users of MLME protocol.
+
+
 HardMAC
 =======
 
@@ -73,11 +80,47 @@
 SoftMAC
 =======
 
-We are going to provide intermediate layer implementing IEEE 802.15.4 MAC
-in software. This is currently WIP.
+The MAC is the middle layer in the IEEE 802.15.4 Linux stack. This moment it
+provides interface for drivers registration and management of slave interfaces.
+
+NOTE: Currently the only monitor device type is supported - it's IEEE 802.15.4
+stack interface for network sniffers (e.g. WireShark).
+
+This layer is going to be extended soon.
 
 See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
 
+
+Device drivers API
+==================
+
+The include/net/mac802154.h defines following functions:
+ - struct ieee802154_dev *ieee802154_alloc_device
+   (size_t priv_size, struct ieee802154_ops *ops):
+   allocation of IEEE 802.15.4 compatible device
+
+ - void ieee802154_free_device(struct ieee802154_dev *dev):
+   freeing allocated device
+
+ - int ieee802154_register_device(struct ieee802154_dev *dev):
+   register PHY in the system
+
+ - void ieee802154_unregister_device(struct ieee802154_dev *dev):
+   freeing registered PHY
+
+Moreover IEEE 802.15.4 device operations structure should be filled.
+
+Fake drivers
+============
+
+In addition there are two drivers available which simulate real devices with
+HardMAC (fakehard) and SoftMAC (fakelb - IEEE 802.15.4 loopback driver)
+interfaces. This option provides possibility to test and debug stack without
+usage of real hardware.
+
+See sources in drivers/ieee802154 folder for more details.
+
+
 6LoWPAN Linux implementation
 ============================
 
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index bd80ba5..6f896b9 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -147,7 +147,7 @@
 	(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
 	if it is <= 0.
 	Possible values are [-31, 31], inclusive.
-	Default: 2
+	Default: 1
 
 tcp_allowed_congestion_control - STRING
 	Show/set the congestion control choices available to non-privileged
@@ -190,6 +190,20 @@
 tcp_dsack - BOOLEAN
 	Allows TCP to send "duplicate" SACKs.
 
+tcp_early_retrans - INTEGER
+	Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold
+	for triggering fast retransmit when the amount of outstanding data is
+	small and when no previously unsent data can be transmitted (such
+	that limited transmit could be used).
+	Possible values:
+		0 disables ER
+		1 enables ER
+		2 enables ER but delays fast recovery and fast retransmit
+		  by a fourth of RTT. This mitigates connection falsely
+		  recovers when network has a small degree of reordering
+		  (less than 3 packets).
+	Default: 2
+
 tcp_ecn - INTEGER
 	Enable Explicit Congestion Notification (ECN) in TCP. ECN is only
 	used when both ends of the TCP flow support it. It is useful to
@@ -410,7 +424,7 @@
 	net.core.rmem_max.  Calling setsockopt() with SO_RCVBUF disables
 	automatic tuning of that socket's receive buffer size, in which
 	case this value is ignored.
-	Default: between 87380B and 4MB, depending on RAM size.
+	Default: between 87380B and 6MB, depending on RAM size.
 
 tcp_sack - BOOLEAN
 	Enable select acknowledgments (SACKS).
@@ -1287,13 +1301,22 @@
 bridge-nf-filter-vlan-tagged - BOOLEAN
 	1 : pass bridged vlan-tagged ARP/IP/IPv6 traffic to {arp,ip,ip6}tables.
 	0 : disable this.
-	Default: 1
+	Default: 0
 
 bridge-nf-filter-pppoe-tagged - BOOLEAN
 	1 : pass bridged pppoe-tagged IP/IPv6 traffic to {ip,ip6}tables.
 	0 : disable this.
-	Default: 1
+	Default: 0
 
+bridge-nf-pass-vlan-input-dev - BOOLEAN
+	1: if bridge-nf-filter-vlan-tagged is enabled, try to find a vlan
+	interface on the bridge and set the netfilter input device to the vlan.
+	This allows use of e.g. "iptables -i br0.1" and makes the REDIRECT
+	target work with vlan-on-top-of-bridge interfaces.  When no matching
+	vlan interface is found, or this switch is off, the input device is
+	set to the bridge interface.
+	0: disable bridge netfilter vlan interface lookup.
+	Default: 0
 
 proc/sys/net/sctp/* Variables:
 
@@ -1484,11 +1507,8 @@
 
 
 /proc/sys/net/core/*
-dev_weight - INTEGER
-	The maximum number of packets that kernel can handle on a NAPI
-	interrupt, it's a Per-CPU variable.
+	Please see: Documentation/sysctl/net.txt for descriptions of these entries.
 
-	Default: 64
 
 /proc/sys/net/unix/*
 max_dgram_qlen - INTEGER
diff --git a/Documentation/networking/mac80211-auth-assoc-deauth.txt b/Documentation/networking/mac80211-auth-assoc-deauth.txt
index e0a2aa58..d7a15fe 100644
--- a/Documentation/networking/mac80211-auth-assoc-deauth.txt
+++ b/Documentation/networking/mac80211-auth-assoc-deauth.txt
@@ -23,7 +23,7 @@
 end note
 end
 
-mac80211->driver: config(channel, non-HT)
+mac80211->driver: config(channel, channel type)
 mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap)
 mac80211->driver: sta_state(AP, exists)
 
@@ -51,7 +51,7 @@
 end
 
 alt not previously authenticated (FT)
-mac80211->driver: config(channel, non-HT)
+mac80211->driver: config(channel, channel type)
 mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap)
 mac80211->driver: sta_state(AP, exists)
 mac80211->driver: sta_state(AP, authenticated)
@@ -67,10 +67,6 @@
 
 mac80211->driver: set up QoS parameters
 
-alt is HT channel
-mac80211->driver: config(channel, HT params)
-end
-
 mac80211->driver: bss_info_changed(QoS, HT, associated with AID)
 mac80211->userspace: associated
 
@@ -95,5 +91,5 @@
 mac80211->driver: sta_state(AP,not-exists)
 mac80211->driver: turn off powersave
 mac80211->driver: bss_info_changed(clear BSSID, not associated, no QoS, ...)
-mac80211->driver: config(non-HT channel type)
+mac80211->driver: config(channel type to non-HT)
 mac80211->userspace: disconnected
diff --git a/Documentation/networking/olympic.txt b/Documentation/networking/olympic.txt
deleted file mode 100644
index b95b5bf..0000000
--- a/Documentation/networking/olympic.txt
+++ /dev/null
@@ -1,79 +0,0 @@
-
-IBM PCI Pit/Pit-Phy/Olympic CHIPSET BASED TOKEN RING CARDS README
-
-Release 0.2.0 - Release    
-	June 8th 1999 Peter De Schrijver & Mike Phillips
-Release 0.9.C - Release
-	April 18th 2001 Mike Phillips
-
-Thanks:
-Erik De Cock, Adrian Bridgett and Frank Fiene for their 
-patience and testing.
-Donald Champion for the cardbus support
-Kyle Lucke for the dma api changes.   
-Jonathon Bitner for hardware support. 
-Everybody on linux-tr for their continued support.  
- 
-Options:
-
-The driver accepts four options: ringspeed, pkt_buf_sz,  
-message_level and network_monitor.
-
-These options can be specified differently for each card found. 
-
-ringspeed:  Has one of three settings 0 (default), 4 or 16.  0 will 
-make the card autosense the ringspeed and join at the appropriate speed, 
-this will be the default option for most people.  4 or 16 allow you to 
-explicitly force the card to operate at a certain speed.  The card will fail 
-if you try to insert it at the wrong speed. (Although some hubs will allow 
-this so be *very* careful).  The main purpose for explicitly setting the ring
-speed is for when the card is first on the ring.  In autosense mode, if the card
-cannot detect any active monitors on the ring it will not open, so you must 
-re-init the card at the appropriate speed.  Unfortunately at present the only
-way of doing this is rmmod and insmod which is a bit tough if it is compiled
-in the kernel.
-
-pkt_buf_sz:  This is this initial receive buffer allocation size.  This will
-default to 4096 if no value is entered. You may increase performance of the 
-driver by setting this to a value larger than the network packet size, although
-the driver now re-sizes buffers based on MTU settings as well. 
-
-message_level: Controls level of messages created by the driver. Defaults to 0:
-which only displays start-up and critical messages.  Presently any non-zero 
-value will display all soft messages as well.  NB This does not turn 
-debugging messages on, that must be done by modified the source code.
-
-network_monitor: Any non-zero value will provide a quasi network monitoring 
-mode.  All unexpected MAC frames (beaconing etc.) will be received
-by the driver and the source and destination addresses printed. 
-Also an entry will be added in  /proc/net called olympic_tr%d, where tr%d
-is the registered device name, i.e tr0, tr1, etc. This displays low
-level information about the configuration of the ring and the adapter.
-This feature has been designed for network administrators to assist in 
-the diagnosis of network / ring problems. (This used to OLYMPIC_NETWORK_MONITOR,
-but has now changed to allow each adapter to be configured differently and
-to alleviate the necessity to re-compile olympic to turn the option on).
-
-Multi-card:
-
-The driver will detect multiple cards and will work with shared interrupts,
-each card is assigned the next token ring device, i.e. tr0 , tr1, tr2.  The 
-driver should also happily reside in the system with other drivers.  It has 
-been tested with ibmtr.c running, and I personally have had one Olicom PCI 
-card and two IBM olympic cards (all on the same interrupt), all running
-together. 
-
-Variable MTU size:
-
-The driver can handle a MTU size up to either 4500 or 18000 depending upon 
-ring speed.  The driver also changes the size of the receive buffers as part
-of the mtu re-sizing, so if you set mtu = 18000, you will need to be able
-to allocate 16 * (sk_buff with 18000 buffer size) call it 18500 bytes per ring 
-position = 296,000 bytes of memory space, plus of course anything 
-necessary for the tx sk_buff's.  Remember this is per card, so if you are
-building routers, gateway's etc, you could start to use a lot of memory
-real fast.
-
-
-6/8/99 Peter De Schrijver and Mike Phillips
-
diff --git a/Documentation/networking/smctr.txt b/Documentation/networking/smctr.txt
deleted file mode 100644
index 9af25b8..0000000
--- a/Documentation/networking/smctr.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-Text File for the SMC TokenCard TokenRing Linux driver (smctr.c).
-        By Jay Schulist <jschlst@samba.org>
-
-The Linux SMC Token Ring driver works with the SMC TokenCard Elite (8115T) 
-ISA and SMC TokenCard Elite/A (8115T/A) MCA adapters.
-
-Latest information on this driver can be obtained on the Linux-SNA WWW site.
-Please point your browser to: http://www.linux-sna.org
-
-This driver is rather simple to use. Select Y to Token Ring adapter support
-in the kernel configuration. A choice for SMC Token Ring adapters will
-appear. This drives supports all SMC ISA/MCA adapters. Choose this
-option. I personally recommend compiling the driver as a module (M), but if you
-you would like to compile it statically answer Y instead.
-
-This driver supports multiple adapters without the need to load multiple copies
-of the driver. You should be able to load up to 7 adapters without any kernel
-modifications, if you are in need of more please contact the maintainer of this
-driver.
-
-Load the driver either by lilo/loadlin or as a module. When a module using the
-following command will suffice for most:
-
-# modprobe smctr
-smctr.c: v1.00 12/6/99 by jschlst@samba.org
-tr0: SMC TokenCard 8115T at Io 0x300, Irq 10, Rom 0xd8000, Ram 0xcc000.
-
-Now just setup the device via ifconfig and set and routes you may have. After
-this you are ready to start sending some tokens.
-
-Errata:
-1). For anyone wondering where to pick up the SMC adapters please browse
-    to http://www.smc.com
-
-2). If you are the first/only Token Ring Client on a Token Ring LAN, please
-    specify the ringspeed with the ringspeed=[4/16] module option. If no
-    ringspeed is specified the driver will attempt to autodetect the ring
-    speed and/or if the adapter is the first/only station on the ring take
-    the appropriate actions. 
-
-    NOTE: Default ring speed is 16MB UTP.
-
-3). PnP support for this adapter sucks. I recommend hard setting the 
-    IO/MEM/IRQ by the jumpers on the adapter. If this is not possible
-    load the module with the following io=[ioaddr] mem=[mem_addr]
-    irq=[irq_num].
-
-    The following IRQ, IO, and MEM settings are supported.
-
-    IO ports:
-    0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300,
-    0x320, 0x340, 0x360, 0x380.
-
-    IRQs:
-    2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15
-
-    Memory addresses:
-    0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000,
-    0xB8000, 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000,
-    0xD0000, 0xD4000, 0xD8000, 0xDC000, 0xE0000, 0xE4000,
-    0xE8000, 0xEC000, 0xF0000, 0xF4000, 0xF8000, 0xFC000
-
-This driver is under the GNU General Public License. Its Firmware image is
-included as an initialized C-array and is licensed by SMC to the Linux
-users of this driver. However no warranty about its fitness is expressed or
-implied by SMC.
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index d0aeead..ab1e8d7 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -111,11 +111,12 @@
 	int phy_addr;
 	int interface;
 	struct stmmac_mdio_bus_data *mdio_bus_data;
-	int pbl;
+	struct stmmac_dma_cfg *dma_cfg;
 	int clk_csr;
 	int has_gmac;
 	int enh_desc;
 	int tx_coe;
+	int rx_coe;
 	int bugged_jumbo;
 	int pmt;
 	int force_sf_dma_mode;
@@ -136,10 +137,12 @@
  o pbl: the Programmable Burst Length is maximum number of beats to
        be transferred in one DMA transaction.
        GMAC also enables the 4xPBL by default.
- o clk_csr: CSR Clock range selection.
+ o clk_csr: fixed CSR Clock range selection.
  o has_gmac: uses the GMAC core.
  o enh_desc: if sets the MAC will use the enhanced descriptor structure.
  o tx_coe: core is able to perform the tx csum in HW.
+ o rx_coe: the supports three check sum offloading engine types:
+	   type_1, type_2 (full csum) and no RX coe.
  o bugged_jumbo: some HWs are not able to perform the csum in HW for
 		over-sized frames due to limited buffer sizes.
 		Setting this flag the csum will be done in SW on
@@ -160,7 +163,7 @@
  o custom_cfg: this is a custom configuration that can be passed while
 	      initialising the resources.
 
-The we have:
+For MDIO bus The we have:
 
  struct stmmac_mdio_bus_data {
 	int bus_id;
@@ -177,10 +180,28 @@
  o irqs: list of IRQs, one per PHY.
  o probed_phy_irq: if irqs is NULL, use this for probed PHY.
 
+
+For DMA engine we have the following internal fields that should be
+tuned according to the HW capabilities.
+
+struct stmmac_dma_cfg {
+	int pbl;
+	int fixed_burst;
+	int burst_len_supported;
+};
+
+Where:
+ o pbl: Programmable Burst Length
+ o fixed_burst: program the DMA to use the fixed burst mode
+ o burst_len: this is the value we put in the register
+	      supported values are provided as macros in
+	      linux/stmmac.h header file.
+
+---
+
 Below an example how the structures above are using on ST platforms.
 
  static struct plat_stmmacenet_data stxYYY_ethernet_platform_data = {
-	.pbl = 32,
 	.has_gmac = 0,
 	.enh_desc = 0,
 	.fix_mac_speed = stxYYY_ethernet_fix_mac_speed,
diff --git a/Documentation/networking/tms380tr.txt b/Documentation/networking/tms380tr.txt
deleted file mode 100644
index 1f73e13..0000000
--- a/Documentation/networking/tms380tr.txt
+++ /dev/null
@@ -1,147 +0,0 @@
-Text file for the Linux SysKonnect Token Ring ISA/PCI Adapter Driver.
-	Text file by: Jay Schulist <jschlst@samba.org>
-
-The Linux SysKonnect Token Ring driver works with the SysKonnect TR4/16(+) ISA,
-SysKonnect TR4/16(+) PCI, SysKonnect TR4/16 PCI, and older revisions of the
-SK NET TR4/16 ISA card.
-
-Latest information on this driver can be obtained on the Linux-SNA WWW site.
-Please point your browser to: 
-http://www.linux-sna.org
-
-Many thanks to Christoph Goos for his excellent work on this driver and
-SysKonnect for donating the adapters to Linux-SNA for the testing and 
-maintenance of this device driver.
-
-Important information to be noted:
-1. Adapters can be slow to open (~20 secs) and close (~5 secs), please be 
-   patient.
-2. This driver works very well when autoprobing for adapters. Why even 
-   think about those nasty io/int/dma settings of modprobe when the driver 
-   will do it all for you!
-
-This driver is rather simple to use. Select Y to Token Ring adapter support
-in the kernel configuration. A choice for SysKonnect Token Ring adapters will
-appear. This drives supports all SysKonnect ISA and PCI adapters. Choose this
-option. I personally recommend compiling the driver as a module (M), but if you
-you would like to compile it statically answer Y instead.
-
-This driver supports multiple adapters without the need to load multiple copies
-of the driver. You should be able to load up to 7 adapters without any kernel
-modifications, if you are in need of more please contact the maintainer of this
-driver.
-
-Load the driver either by lilo/loadlin or as a module. When a module using the
-following command will suffice for most:
-
-# modprobe sktr
-
-This will produce output similar to the following: (Output is user specific)
-
-sktr.c: v1.01 08/29/97 by Christoph Goos
-tr0: SK NET TR 4/16 PCI found at 0x6100, using IRQ 17.
-tr1: SK NET TR 4/16 PCI found at 0x6200, using IRQ 16.
-tr2: SK NET TR 4/16 ISA found at 0xa20, using IRQ 10 and DMA 5.
-
-Now just setup the device via ifconfig and set and routes you may have. After
-this you are ready to start sending some tokens.
-
-Errata:
-For anyone wondering where to pick up the SysKonnect adapters please browse
-to http://www.syskonnect.com
-
-This driver is under the GNU General Public License. Its Firmware image is 
-included as an initialized C-array and is licensed by SysKonnect to the Linux 
-users of this driver. However no warranty about its fitness is expressed or 
-implied by SysKonnect.
-
-Below find attached the setting for the SK NET TR 4/16 ISA adapters
--------------------------------------------------------------------
-
-                    ***************************
-                    ***   C O N T E N T S   ***
-                    ***************************
-
-                1) Location of DIP-Switch W1
-                2) Default settings
-                3) DIP-Switch W1 description
-
-
-  ==============================================================
-  CHAPTER 1     LOCATION OF DIP-SWITCH
-  ==============================================================
-
-UÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄ¿
-þUÄÄÄÄÄÄ¿                         UÄÄÄÄÄ¿            UÄÄÄ¿         þ
-þAÄÄÄÄÄÄU                      W1 AÄÄÄÄÄU     UÄÄÄÄ¿ þ   þ         þ
-þUÄÄÄÄÄÄ¿                                     þ    þ þ   þ      UÄÄÅ¿
-þAÄÄÄÄÄÄU              UÄÄÄÄÄÄÄÄÄÄÄ¿          AÄÄÄÄU þ   þ      þ  þþ
-þUÄÄÄÄÄÄ¿              þ           þ          UÄÄÄ¿  AÄÄÄU      AÄÄÅU
-þAÄÄÄÄÄÄU              þ TMS380C26 þ          þ   þ                þ
-þUÄÄÄÄÄÄ¿              þ           þ          AÄÄÄU                AÄ¿
-þAÄÄÄÄÄÄU              þ           þ                               þ þ
-þ                      AÄÄÄÄÄÄÄÄÄÄÄU                               þ þ
-þ                                                                  þ þ
-þ                                                                  AÄU
-þ                                                                  þ
-þ                                                                  þ
-þ                                                                  þ
-þ                                                                  þ
-AÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄAÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄAÄÄÄÄÄÄÄÄÄU
-             AÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄU  AÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄÄU
-
-  ==============================================================
-  CHAPTER 2     DEFAULT SETTINGS
-  ==============================================================
-
-          W1    1  2  3  4  5  6  7  8
-        +------------------------------+
-        | ON    X                      |
-        | OFF      X  X  X  X  X  X  X |
-        +------------------------------+
-
-        W1.1 = ON               Adapter drives address lines SA17..19
-        W1.2 - 1.5 = OFF        BootROM disabled
-        W1.6 - 1.8 = OFF        I/O address 0A20h
-
-  ==============================================================
-  CHAPTER 3     DIP SWITCH W1 DESCRIPTION
-  ==============================================================
-
-      UÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄ¿  ON
-      þ 1 þ 2 þ 3 þ 4 þ 5 þ 6 þ 7 þ 8 þ
-      AÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄAÄÄÄU  OFF
-      |AD | BootROM Addr. |  I/O      |
-      +-+-+-------+-------+-----+-----+
-        |         |             |
-        |         |             +------ 6     7     8
-        |         |                     ON    ON    ON       1900h
-        |         |                     ON    ON    OFF      0900h
-        |         |                     ON    OFF   ON       1980h
-        |         |                     ON    OFF   OFF      0980h
-        |         |                     OFF   ON    ON       1b20h
-        |         |                     OFF   ON    OFF      0b20h
-        |         |                     OFF   OFF   ON       1a20h
-        |         |                     OFF   OFF   OFF      0a20h    (+)
-        |         |
-        |         |
-        |         +-------- 2     3     4     5
-        |                   OFF   x     x     x       disabled  (+)
-        |                   ON    ON    ON    ON      C0000
-        |                   ON    ON    ON    OFF     C4000
-        |                   ON    ON    OFF   ON      C8000
-        |                   ON    ON    OFF   OFF     CC000
-        |                   ON    OFF   ON    ON      D0000
-        |                   ON    OFF   ON    OFF     D4000
-        |                   ON    OFF   OFF   ON      D8000
-        |                   ON    OFF   OFF   OFF     DC000
-        |
-        |
-        +----- 1
-               OFF    adapter does NOT drive SA<17..19>
-               ON     adapter drives SA<17..19>  (+)
-
-
-        (+) means default setting
-
-                       ********************************
diff --git a/Documentation/nfc/nfc-hci.txt b/Documentation/nfc/nfc-hci.txt
new file mode 100644
index 0000000..216b725
--- /dev/null
+++ b/Documentation/nfc/nfc-hci.txt
@@ -0,0 +1,155 @@
+HCI backend for NFC Core
+
+Author: Eric Lapuyade, Samuel Ortiz
+Contact: eric.lapuyade@intel.com, samuel.ortiz@intel.com
+
+General
+-------
+
+The HCI layer implements much of the ETSI TS 102 622 V10.2.0 specification. It
+enables easy writing of HCI-based NFC drivers. The HCI layer runs as an NFC Core
+backend, implementing an abstract nfc device and translating NFC Core API
+to HCI commands and events.
+
+HCI
+---
+
+HCI registers as an nfc device with NFC Core. Requests coming from userspace are
+routed through netlink sockets to NFC Core and then to HCI. From this point,
+they are translated in a sequence of HCI commands sent to the HCI layer in the
+host controller (the chip). The sending context blocks while waiting for the
+response to arrive.
+HCI events can also be received from the host controller. They will be handled
+and a translation will be forwarded to NFC Core as needed.
+HCI uses 2 execution contexts:
+- one if for executing commands : nfc_hci_msg_tx_work(). Only one command
+can be executing at any given moment.
+- one if for dispatching received events and responses : nfc_hci_msg_rx_work()
+
+HCI Session initialization:
+---------------------------
+
+The Session initialization is an HCI standard which must unfortunately
+support proprietary gates. This is the reason why the driver will pass a list
+of proprietary gates that must be part of the session. HCI will ensure all
+those gates have pipes connected when the hci device is set up.
+
+HCI Gates and Pipes
+-------------------
+
+A gate defines the 'port' where some service can be found. In order to access
+a service, one must create a pipe to that gate and open it. In this
+implementation, pipes are totally hidden. The public API only knows gates.
+This is consistent with the driver need to send commands to proprietary gates
+without knowing the pipe connected to it.
+
+Driver interface
+----------------
+
+A driver would normally register itself with HCI and provide the following
+entry points:
+
+struct nfc_hci_ops {
+	int (*open)(struct nfc_hci_dev *hdev);
+	void (*close)(struct nfc_hci_dev *hdev);
+	int (*xmit)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+	int (*start_poll)(struct nfc_hci_dev *hdev, u32 protocols);
+	int (*target_from_gate)(struct nfc_hci_dev *hdev, u8 gate,
+				struct nfc_target *target);
+};
+
+open() and close() shall turn the hardware on and off. xmit() shall simply
+write a frame to the chip. start_poll() is an optional entrypoint that shall
+set the hardware in polling mode. This must be implemented only if the hardware
+uses proprietary gates or a mechanism slightly different from the HCI standard.
+target_from_gate() is another optional entrypoint to return the protocols
+corresponding to a proprietary gate.
+
+On the rx path, the driver is responsible to push incoming HCP frames to HCI
+using nfc_hci_recv_frame(). HCI will take care of re-aggregation and handling
+This must be done from a context that can sleep.
+
+SHDLC
+-----
+
+Most chips use shdlc to ensure integrity and delivery ordering of the HCP
+frames between the host controller (the chip) and hosts (entities connected
+to the chip, like the cpu). In order to simplify writing the driver, an shdlc
+layer is available for use by the driver.
+When used, the driver actually registers with shdlc, and shdlc will register
+with HCI. HCI sees shdlc as the driver and thus send its HCP frames
+through shdlc->xmit.
+SHDLC adds a new execution context (nfc_shdlc_sm_work()) to run its state
+machine and handle both its rx and tx path.
+
+Included Drivers
+----------------
+
+An HCI based driver for an NXP PN544, connected through I2C bus, and using
+shdlc is included.
+
+Execution Contexts
+------------------
+
+The execution contexts are the following:
+- IRQ handler (IRQH):
+fast, cannot sleep. stores incoming frames into an shdlc rx queue
+
+- SHDLC State Machine worker (SMW)
+handles shdlc rx & tx queues. Dispatches HCI cmd responses.
+
+- HCI Tx Cmd worker (MSGTXWQ)
+Serialize execution of HCI commands. Complete execution in case of resp timeout.
+
+- HCI Rx worker (MSGRXWQ)
+Dispatches incoming HCI commands or events.
+
+- Syscall context from a userspace call (SYSCALL)
+Any entrypoint in HCI called from NFC Core
+
+Workflow executing an HCI command (using shdlc)
+-----------------------------------------------
+
+Executing an HCI command can easily be performed synchronously using the
+following API:
+
+int nfc_hci_send_cmd (struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+			const u8 *param, size_t param_len, struct sk_buff **skb)
+
+The API must be invoked from a context that can sleep. Most of the time, this
+will be the syscall context. skb will return the result that was received in
+the response.
+
+Internally, execution is asynchronous. So all this API does is to enqueue the
+HCI command, setup a local wait queue on stack, and wait_event() for completion.
+The wait is not interruptible because it is guaranteed that the command will
+complete after some short timeout anyway.
+
+MSGTXWQ context will then be scheduled and invoke nfc_hci_msg_tx_work().
+This function will dequeue the next pending command and send its HCP fragments
+to the lower layer which happens to be shdlc. It will then start a timer to be
+able to complete the command with a timeout error if no response arrive.
+
+SMW context gets scheduled and invokes nfc_shdlc_sm_work(). This function
+handles shdlc framing in and out. It uses the driver xmit to send frames and
+receives incoming frames in an skb queue filled from the driver IRQ handler.
+SHDLC I(nformation) frames payload are HCP fragments. They are agregated to
+form complete HCI frames, which can be a response, command, or event.
+
+HCI Responses are dispatched immediately from this context to unblock
+waiting command execution. Reponse processing involves invoking the completion
+callback that was provided by nfc_hci_msg_tx_work() when it sent the command.
+The completion callback will then wake the syscall context.
+
+Workflow receiving an HCI event or command
+------------------------------------------
+
+HCI commands or events are not dispatched from SMW context. Instead, they are
+queued to HCI rx_queue and will be dispatched from HCI rx worker
+context (MSGRXWQ). This is done this way to allow a cmd or event handler
+to also execute other commands (for example, handling the
+NFC_HCI_EVT_TARGET_DISCOVERED event from PN544 requires to issue an
+ANY_GET_PARAMETER to the reader A gate to get information on the target
+that was discovered).
+
+Typically, such an event will be propagated to NFC Core from MSGRXWQ context.
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index d97bccf..e40f4b4 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -152,11 +152,9 @@
 };
 
 
-static int foo_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+static int foo_get_groups_count(struct pinctrl_dev *pctldev)
 {
-	if (selector >= ARRAY_SIZE(foo_groups))
-		return -EINVAL;
-	return 0;
+	return ARRAY_SIZE(foo_groups);
 }
 
 static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
@@ -175,7 +173,7 @@
 }
 
 static struct pinctrl_ops foo_pctrl_ops = {
-	.list_groups = foo_list_groups,
+	.get_groups_count = foo_get_groups_count,
 	.get_group_name = foo_get_group_name,
 	.get_group_pins = foo_get_group_pins,
 };
@@ -186,13 +184,12 @@
        .pctlops = &foo_pctrl_ops,
 };
 
-The pin control subsystem will call the .list_groups() function repeatedly
-beginning on 0 until it returns non-zero to determine legal selectors, then
-it will call the other functions to retrieve the name and pins of the group.
-Maintaining the data structure of the groups is up to the driver, this is
-just a simple example - in practice you may need more entries in your group
-structure, for example specific register ranges associated with each group
-and so on.
+The pin control subsystem will call the .get_groups_count() function to
+determine total number of legal selectors, then it will call the other functions
+to retrieve the name and pins of the group. Maintaining the data structure of
+the groups is up to the driver, this is just a simple example - in practice you
+may need more entries in your group structure, for example specific register
+ranges associated with each group and so on.
 
 
 Pin configuration
@@ -606,11 +603,9 @@
 };
 
 
-static int foo_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+static int foo_get_groups_count(struct pinctrl_dev *pctldev)
 {
-	if (selector >= ARRAY_SIZE(foo_groups))
-		return -EINVAL;
-	return 0;
+	return ARRAY_SIZE(foo_groups);
 }
 
 static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
@@ -629,7 +624,7 @@
 }
 
 static struct pinctrl_ops foo_pctrl_ops = {
-	.list_groups = foo_list_groups,
+	.get_groups_count = foo_get_groups_count,
 	.get_group_name = foo_get_group_name,
 	.get_group_pins = foo_get_group_pins,
 };
@@ -640,7 +635,7 @@
 	const unsigned num_groups;
 };
 
-static const char * const spi0_groups[] = { "spi0_1_grp" };
+static const char * const spi0_groups[] = { "spi0_0_grp", "spi0_1_grp" };
 static const char * const i2c0_groups[] = { "i2c0_grp" };
 static const char * const mmc0_groups[] = { "mmc0_1_grp", "mmc0_2_grp",
 					"mmc0_3_grp" };
@@ -663,11 +658,9 @@
 	},
 };
 
-int foo_list_funcs(struct pinctrl_dev *pctldev, unsigned selector)
+int foo_get_functions_count(struct pinctrl_dev *pctldev)
 {
-	if (selector >= ARRAY_SIZE(foo_functions))
-		return -EINVAL;
-	return 0;
+	return ARRAY_SIZE(foo_functions);
 }
 
 const char *foo_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
@@ -703,7 +696,7 @@
 }
 
 struct pinmux_ops foo_pmxops = {
-	.list_functions = foo_list_funcs,
+	.get_functions_count = foo_get_functions_count,
 	.get_function_name = foo_get_fname,
 	.get_function_groups = foo_get_groups,
 	.enable = foo_enable,
@@ -786,7 +779,7 @@
 
 #include <linux/pinctrl/machine.h>
 
-static const struct pinctrl_map __initdata mapping[] = {
+static const struct pinctrl_map mapping[] __initconst = {
 	{
 		.dev_name = "foo-spi.0",
 		.name = PINCTRL_STATE_DEFAULT,
@@ -952,13 +945,13 @@
 The result of grabbing this mapping from the device with something like
 this (see next paragraph):
 
-	p = pinctrl_get(dev);
+	p = devm_pinctrl_get(dev);
 	s = pinctrl_lookup_state(p, "8bit");
 	ret = pinctrl_select_state(p, s);
 
 or more simply:
 
-	p = pinctrl_get_select(dev, "8bit");
+	p = devm_pinctrl_get_select(dev, "8bit");
 
 Will be that you activate all the three bottom records in the mapping at
 once. Since they share the same name, pin controller device, function and
@@ -992,7 +985,7 @@
 	/* Allocate a state holder named "foo" etc */
 	struct foo_state *foo = ...;
 
-	foo->p = pinctrl_get(&device);
+	foo->p = devm_pinctrl_get(&device);
 	if (IS_ERR(foo->p)) {
 		/* FIXME: clean up "foo" here */
 		return PTR_ERR(foo->p);
@@ -1000,24 +993,17 @@
 
 	foo->s = pinctrl_lookup_state(foo->p, PINCTRL_STATE_DEFAULT);
 	if (IS_ERR(foo->s)) {
-		pinctrl_put(foo->p);
 		/* FIXME: clean up "foo" here */
 		return PTR_ERR(s);
 	}
 
 	ret = pinctrl_select_state(foo->s);
 	if (ret < 0) {
-		pinctrl_put(foo->p);
 		/* FIXME: clean up "foo" here */
 		return ret;
 	}
 }
 
-foo_remove()
-{
-	pinctrl_put(state->p);
-}
-
 This get/lookup/select/put sequence can just as well be handled by bus drivers
 if you don't want each and every driver to handle it and you know the
 arrangement on your bus.
@@ -1029,6 +1015,11 @@
   kernel memory to hold the pinmux state. All mapping table parsing or similar
   slow operations take place within this API.
 
+- devm_pinctrl_get() is a variant of pinctrl_get() that causes pinctrl_put()
+  to be called automatically on the retrieved pointer when the associated
+  device is removed. It is recommended to use this function over plain
+  pinctrl_get().
+
 - pinctrl_lookup_state() is called in process context to obtain a handle to a
   specific state for a the client device. This operation may be slow too.
 
@@ -1041,14 +1032,30 @@
 
 - pinctrl_put() frees all information associated with a pinctrl handle.
 
+- devm_pinctrl_put() is a variant of pinctrl_put() that may be used to
+  explicitly destroy a pinctrl object returned by devm_pinctrl_get().
+  However, use of this function will be rare, due to the automatic cleanup
+  that will occur even without calling it.
+
+  pinctrl_get() must be paired with a plain pinctrl_put().
+  pinctrl_get() may not be paired with devm_pinctrl_put().
+  devm_pinctrl_get() can optionally be paired with devm_pinctrl_put().
+  devm_pinctrl_get() may not be paired with plain pinctrl_put().
+
 Usually the pin control core handled the get/put pair and call out to the
 device drivers bookkeeping operations, like checking available functions and
 the associated pins, whereas the enable/disable pass on to the pin controller
 driver which takes care of activating and/or deactivating the mux setting by
 quickly poking some registers.
 
-The pins are allocated for your device when you issue the pinctrl_get() call,
-after this you should be able to see this in the debugfs listing of all pins.
+The pins are allocated for your device when you issue the devm_pinctrl_get()
+call, after this you should be able to see this in the debugfs listing of all
+pins.
+
+NOTE: the pinctrl system will return -EPROBE_DEFER if it cannot find the
+requested pinctrl handles, for example if the pinctrl driver has not yet
+registered. Thus make sure that the error path in your driver gracefully
+cleans up and is ready to retry the probing later in the startup process.
 
 
 System pin control hogging
@@ -1094,13 +1101,13 @@
 
 #include <linux/pinctrl/consumer.h>
 
-foo_switch()
-{
-	struct pinctrl *p;
-	struct pinctrl_state *s1, *s2;
+struct pinctrl *p;
+struct pinctrl_state *s1, *s2;
 
+foo_probe()
+{
 	/* Setup */
-	p = pinctrl_get(&device);
+	p = devm_pinctrl_get(&device);
 	if (IS_ERR(p))
 		...
 
@@ -1111,7 +1118,10 @@
 	s2 = pinctrl_lookup_state(foo->p, "pos-B");
 	if (IS_ERR(s2))
 		...
+}
 
+foo_switch()
+{
 	/* Enable on position A */
 	ret = pinctrl_select_state(s1);
 	if (ret < 0)
@@ -1125,8 +1135,6 @@
 	    ...
 
 	...
-
-	pinctrl_put(p);
 }
 
 The above has to be done from process context.
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index ec715cd..6ec291e 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -9,7 +9,7 @@
 
 II. How does it work?
 
-There are four per-task flags used for that, PF_NOFREEZE, PF_FROZEN, TIF_FREEZE
+There are three per-task flags used for that, PF_NOFREEZE, PF_FROZEN
 and PF_FREEZER_SKIP (the last one is auxiliary).  The tasks that have
 PF_NOFREEZE unset (all user space processes and some kernel threads) are
 regarded as 'freezable' and treated in a special way before the system enters a
@@ -17,30 +17,31 @@
 we only consider hibernation, but the description also applies to suspend).
 
 Namely, as the first step of the hibernation procedure the function
-freeze_processes() (defined in kernel/power/process.c) is called.  It executes
-try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
-either wakes them up, if they are kernel threads, or sends fake signals to them,
-if they are user space processes.  A task that has TIF_FREEZE set, should react
-to it by calling the function called __refrigerator() (defined in
-kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
-to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
-Then, we say that the task is 'frozen' and therefore the set of functions
-handling this mechanism is referred to as 'the freezer' (these functions are
-defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
-User space processes are generally frozen before kernel threads.
+freeze_processes() (defined in kernel/power/process.c) is called.  A system-wide
+variable system_freezing_cnt (as opposed to a per-task flag) is used to indicate
+whether the system is to undergo a freezing operation. And freeze_processes()
+sets this variable.  After this, it executes try_to_freeze_tasks() that sends a
+fake signal to all user space processes, and wakes up all the kernel threads.
+All freezable tasks must react to that by calling try_to_freeze(), which
+results in a call to __refrigerator() (defined in kernel/freezer.c), which sets
+the task's PF_FROZEN flag, changes its state to TASK_UNINTERRUPTIBLE and makes
+it loop until PF_FROZEN is cleared for it. Then, we say that the task is
+'frozen' and therefore the set of functions handling this mechanism is referred
+to as 'the freezer' (these functions are defined in kernel/power/process.c,
+kernel/freezer.c & include/linux/freezer.h). User space processes are generally
+frozen before kernel threads.
 
 __refrigerator() must not be called directly.  Instead, use the
 try_to_freeze() function (defined in include/linux/freezer.h), that checks
-the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
-flag is set.
+if the task is to be frozen and makes the task enter __refrigerator().
 
 For user space processes try_to_freeze() is called automatically from the
 signal-handling code, but the freezable kernel threads need to call it
 explicitly in suitable places or use the wait_event_freezable() or
 wait_event_freezable_timeout() macros (defined in include/linux/freezer.h)
-that combine interruptible sleep with checking if TIF_FREEZE is set and calling
-try_to_freeze().  The main loop of a freezable kernel thread may look like the
-following one:
+that combine interruptible sleep with checking if the task is to be frozen and
+calling try_to_freeze().  The main loop of a freezable kernel thread may look
+like the following one:
 
 	set_freezable();
 	do {
@@ -53,7 +54,7 @@
 (from drivers/usb/core/hub.c::hub_thread()).
 
 If a freezable kernel thread fails to call try_to_freeze() after the freezer has
-set TIF_FREEZE for it, the freezing of tasks will fail and the entire
+initiated a freezing operation, the freezing of tasks will fail and the entire
 hibernation operation will be cancelled.  For this reason, freezable kernel
 threads must call try_to_freeze() somewhere or use one of the
 wait_event_freezable() and wait_event_freezable_timeout() macros.
diff --git a/Documentation/power/regulator/regulator.txt b/Documentation/power/regulator/regulator.txt
index e272d99..1390277 100644
--- a/Documentation/power/regulator/regulator.txt
+++ b/Documentation/power/regulator/regulator.txt
@@ -11,8 +11,7 @@
 Drivers can register a regulator by calling :-
 
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, struct regulator_init_data *init_data,
-	void *driver_data, struct device_node *of_node);
+					 const struct regulator_config *config);
 
 This will register the regulators capabilities and operations to the regulator
 core.
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 83f8ea8..80441ab 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,11 @@
+Release Date    : Mon. Mar 19, 2012 17:00:00 PST 2012 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+Current Version : 00.00.06.15-rc1
+Old Version     : 00.00.06.14-rc1
+    1. Optimize HostMSIxVectors setting.
+    2. Add fpRead/WriteCapable, fpRead/WriteAcrossStripe checks.
+-------------------------------------------------------------------------------
 Release Date    : Fri. Jan 6, 2012 17:00:00 PST 2010 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt
index 7877170..d389acd 100644
--- a/Documentation/security/keys.txt
+++ b/Documentation/security/keys.txt
@@ -123,7 +123,7 @@
 
 The key service provides a number of features besides keys:
 
- (*) The key service defines two special key types:
+ (*) The key service defines three special key types:
 
      (+) "keyring"
 
@@ -137,6 +137,18 @@
 	 blobs of data. These can be created, updated and read by userspace,
 	 and aren't intended for use by kernel services.
 
+     (+) "logon"
+
+	 Like a "user" key, a "logon" key has a payload that is an arbitrary
+	 blob of data. It is intended as a place to store secrets which are
+	 accessible to the kernel but not to userspace programs.
+
+	 The description can be arbitrary, but must be prefixed with a non-zero
+	 length string that describes the key "subclass". The subclass is
+	 separated from the rest of the description by a ':'. "logon" keys can
+	 be created and updated from userspace, but the payload is only
+	 readable from kernel space.
+
  (*) Each process subscribes to three keyrings: a thread-specific keyring, a
      process-specific keyring, and a session-specific keyring.
 
diff --git a/Documentation/sparc/README-2.5 b/Documentation/sparc/README-2.5
deleted file mode 100644
index 806fe49..0000000
--- a/Documentation/sparc/README-2.5
+++ /dev/null
@@ -1,46 +0,0 @@
-BTFIXUP
--------
-
-To build new kernels you have to issue "make image". The ready kernel
-in ELF format is placed in arch/sparc/boot/image. Explanation is below.
-
-BTFIXUP is a unique feature of Linux/sparc among other architectures,
-developed by Jakub Jelinek (I think... Obviously David S. Miller took
-part, too). It allows to boot the same kernel at different 
-sub-architectures, such as sun4c, sun4m, sun4d, where SunOS uses
-different kernels. This feature is convinient for people who you move
-disks between boxes and for distrution builders.
-
-To function, BTFIXUP must link the kernel "in the draft" first,
-analyze the result, write a special stub code based on that, and
-build the final kernel with the stub (btfix.o).
-
-Kai Germaschewski improved the build system of the kernel in the 2.5 series
-significantly. Unfortunately, the traditional way of running the draft
-linking from architecture specific Makefile before the actual linking
-by generic Makefile is nearly impossible to support properly in the
-new build system. Therefore, the way we integrate BTFIXUP with the
-build system was changed in 2.5.40. Now, generic Makefile performs
-the draft linking and stores the result in file vmlinux. Architecture
-specific post-processing invokes BTFIXUP machinery and final linking
-in the same way as other architectures do bootstraps.
-
-Implications of that change are as follows.
-
-1. Hackers must type "make image" now, instead of just "make", in the same
-   way as s390 people do now. It is analogous to "make bzImage" on i386.
-   This does NOT affect sparc64, you continue to use "make" to build sparc64
-   kernels.
-
-2. vmlinux is not the final kernel, so RPM builders have to adjust
-   their spec files (if they delivered vmlinux for debugging).
-   System.map generated for vmlinux is still valid.
-
-3. Scripts that produce a.out images have to be changed. First, if they
-   invoke make, they have to use "make image". Second, they have to pick up
-   the new kernel in arch/sparc/boot/image instead of vmlinux.
-
-4. Since we are compliant with Kai's build system now, make -j is permitted.
-
--- Pete Zaitcev
-zaitcev@yahoo.com
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 3201a70..98335b7 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -43,6 +43,13 @@
 	1 - enable the JIT
 	2 - enable the JIT and ask the compiler to emit traces on kernel log.
 
+dev_weight
+--------------
+
+The maximum number of packets that kernel can handle on a NAPI interrupt,
+it's a Per-CPU variable.
+Default: 64
+
 rmem_default
 ------------
 
diff --git a/MAINTAINERS b/MAINTAINERS
index bb76fc4..73a8b56 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1431,6 +1431,7 @@
 BATMAN ADVANCED
 M:	Marek Lindner <lindner_marek@yahoo.de>
 M:	Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+M:	Antonio Quartulli <ordex@autistici.org>
 L:	b.a.t.m.a.n@lists.open-mesh.org
 W:	http://www.open-mesh.org/
 S:	Maintained
@@ -1598,6 +1599,7 @@
 
 BROCADE BFA FC SCSI DRIVER
 M:	Jing Huang <huangj@brocade.com>
+M:	Krishna C Gudipati <kgudipat@brocade.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/bfa/
@@ -1968,10 +1970,9 @@
 F:	drivers/net/ethernet/ti/cpmac.c
 
 CPU FREQUENCY DRIVERS
-M:	Dave Jones <davej@redhat.com>
+M:	Rafael J. Wysocki <rjw@sisk.pl>
 L:	cpufreq@vger.kernel.org
-W:	http://www.codemonkey.org.uk/projects/cpufreq/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
+L:	linux-pm@vger.kernel.org
 S:	Maintained
 F:	drivers/cpufreq/
 F:	include/linux/cpufreq.h
@@ -3519,12 +3520,6 @@
 S:	Maintained
 F:	drivers/char/hw_random/ixp4xx-rng.c
 
-INTEL IXP2000 ETHERNET DRIVER
-M:	Lennert Buytenhek <kernel@wantstofly.org>
-L:	netdev@vger.kernel.org
-S:	Maintained
-F:	drivers/net/ethernet/xscale/ixp2000/
-
 INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
 M:	Jeff Kirsher <jeffrey.t.kirsher@intel.com>
 M:	Jesse Brandeburg <jesse.brandeburg@intel.com>
@@ -3634,7 +3629,7 @@
 F:	drivers/net/ethernet/icplus/ipg.*
 
 IPATH DRIVER
-M:	Mike Marciniszyn <infinipath@qlogic.com>
+M:	Mike Marciniszyn <infinipath@intel.com>
 L:	linux-rdma@vger.kernel.org
 S:	Maintained
 F:	drivers/infiniband/hw/ipath/
@@ -4037,6 +4032,7 @@
 F:	drivers/scsi/53c700*
 
 LED SUBSYSTEM
+M:	Bryan Wu <bryan.wu@canonical.com>
 M:	Richard Purdie <rpurdie@rpsys.net>
 S:	Maintained
 F:	drivers/leds/
@@ -5133,19 +5129,13 @@
 PCI SUBSYSTEM
 M:	Bjorn Helgaas <bhelgaas@google.com>
 L:	linux-pci@vger.kernel.org
-Q:	http://patchwork.kernel.org/project/linux-pci/list/
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci.git
+Q:	http://patchwork.ozlabs.org/project/linux-pci/list/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/linux.git
 S:	Supported
 F:	Documentation/PCI/
 F:	drivers/pci/
 F:	include/linux/pci*
 
-PCI HOTPLUG
-M:	Bjorn Helgaas <bhelgaas@google.com>
-L:	linux-pci@vger.kernel.org
-S:	Supported
-F:	drivers/pci/hotplug
-
 PCMCIA SUBSYSTEM
 P:	Linux PCMCIA Team
 L:	linux-pcmcia@lists.infradead.org
@@ -5208,7 +5198,7 @@
 F:	include/linux/personality.h
 
 PHONET PROTOCOL
-M:	Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+M:	Remi Denis-Courmont <courmisch@gmail.com>
 S:	Supported
 F:	Documentation/networking/phonet.txt
 F:	include/linux/phonet.h
@@ -5458,7 +5448,7 @@
 S:	Maintained
 
 QIB DRIVER
-M:	Mike Marciniszyn <infinipath@qlogic.com>
+M:	Mike Marciniszyn <infinipath@intel.com>
 L:	linux-rdma@vger.kernel.org
 S:	Supported
 F:	drivers/infiniband/hw/qib/
@@ -5892,11 +5882,11 @@
 F:	drivers/scsi/st*
 
 SCTP PROTOCOL
-M:	Vlad Yasevich <vladislav.yasevich@hp.com>
+M:	Vlad Yasevich <vyasevich@gmail.com>
 M:	Sridhar Samudrala <sri@us.ibm.com>
 L:	linux-sctp@vger.kernel.org
 W:	http://lksctp.sourceforge.net
-S:	Supported
+S:	Maintained
 F:	Documentation/networking/sctp.txt
 F:	include/linux/sctp.h
 F:	include/net/sctp/
@@ -6676,6 +6666,16 @@
 S:	Maintained
 F:	sound/soc/codecs/twl4030*
 
+TI WILINK WIRELESS DRIVERS
+M:	Luciano Coelho <coelho@ti.com>
+L:	linux-wireless@vger.kernel.org
+W:	http://wireless.kernel.org/en/users/Drivers/wl12xx
+W:	http://wireless.kernel.org/en/users/Drivers/wl1251
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
+S:	Maintained
+F:	drivers/net/wireless/ti/
+F:	include/linux/wl12xx.h
+
 TIPC NETWORK LAYER
 M:	Jon Maloy <jon.maloy@ericsson.com>
 M:	Allan Stephens <allan.stephens@windriver.com>
@@ -6883,6 +6883,14 @@
 F:	drivers/cdrom/cdrom.c
 F:	include/linux/cdrom.h
 
+UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
+M:	Vinayak Holikatti <vinholikatti@gmail.com>
+M:	Santosh Y <santoshsy@gmail.com>
+L:	linux-scsi@vger.kernel.org
+S:	Supported
+F:	Documentation/scsi/ufs.txt
+F:	drivers/scsi/ufs/
+
 UNSORTED BLOCK IMAGES (UBI)
 M:	Artem Bityutskiy <dedekind1@gmail.com>
 W:	http://www.linux-mtd.infradead.org/
@@ -7432,23 +7440,6 @@
 S:	Maintained
 F:	drivers/input/misc/wistron_btns.c
 
-WL1251 WIRELESS DRIVER
-M:	Luciano Coelho <coelho@ti.com>
-L:	linux-wireless@vger.kernel.org
-W:	http://wireless.kernel.org/en/users/Drivers/wl1251
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
-S:	Maintained
-F:	drivers/net/wireless/wl1251/*
-
-WL1271 WIRELESS DRIVER
-M:	Luciano Coelho <coelho@ti.com>
-L:	linux-wireless@vger.kernel.org
-W:	http://wireless.kernel.org/en/users/Drivers/wl12xx
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
-S:	Maintained
-F:	drivers/net/wireless/wl12xx/
-F:	include/linux/wl12xx.h
-
 WL3501 WIRELESS PCMCIA CARD DRIVER
 M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 L:	linux-wireless@vger.kernel.org
diff --git a/Makefile b/Makefile
index afc868e..a687963 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
@@ -442,7 +442,7 @@
 
 no-dot-config-targets := clean mrproper distclean \
 			 cscope gtags TAGS tags help %docs check% coccicheck \
-			 include/linux/version.h headers_% archheaders \
+			 include/linux/version.h headers_% archheaders archscripts \
 			 kernelversion %src-pkg
 
 config-targets := 0
@@ -979,7 +979,7 @@
                    include/config/auto.conf
 	$(cmd_crmodverdir)
 
-archprepare: archheaders prepare1 scripts_basic
+archprepare: archheaders archscripts prepare1 scripts_basic
 
 prepare0: archprepare FORCE
 	$(Q)$(MAKE) $(build)=.
@@ -1049,8 +1049,11 @@
 PHONY += archheaders
 archheaders:
 
+PHONY += archscripts
+archscripts:
+
 PHONY += __headers
-__headers: include/linux/version.h scripts_basic asm-generic archheaders FORCE
+__headers: include/linux/version.h scripts_basic asm-generic archheaders archscripts FORCE
 	$(Q)$(MAKE) $(build)=scripts build_unifdef
 
 PHONY += headers_install_all
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 56a4df9..22e58a9 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -477,7 +477,7 @@
 
 config VGA_HOSE
 	bool
-	depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI
+	depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI)
 	default y
 	help
 	  Support VGA on an arbitrary hose; needed for several platforms
diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h
index 1f7fba6..d70408d 100644
--- a/arch/alpha/include/asm/rtc.h
+++ b/arch/alpha/include/asm/rtc.h
@@ -1,14 +1,10 @@
 #ifndef _ALPHA_RTC_H
 #define _ALPHA_RTC_H
 
-#if defined(CONFIG_ALPHA_GENERIC)
+#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \
+ || defined(CONFIG_ALPHA_GENERIC)
 # define get_rtc_time		alpha_mv.rtc_get_time
 # define set_rtc_time		alpha_mv.rtc_set_time
-#else
-# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP)
-#  define get_rtc_time		marvel_get_rtc_time
-#  define set_rtc_time		marvel_set_rtc_time
-# endif
 #endif
 
 #include <asm-generic/rtc.h>
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c
index 5e7c28f..61893d7 100644
--- a/arch/alpha/kernel/core_tsunami.c
+++ b/arch/alpha/kernel/core_tsunami.c
@@ -11,6 +11,7 @@
 #include <asm/core_tsunami.h>
 #undef __EXTERN_INLINE
 
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/sched.h>
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 14a4b6a..407accc 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -317,7 +317,7 @@
 }
 
 static int 
-marvel_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
 	struct pci_controller *hose = dev->sysdata;
 	struct io7_port *io7_port = hose->sysdata;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cf006d4..554ec1d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -11,6 +11,7 @@
 	select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
 	select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
 	select HAVE_ARCH_KGDB
+	select HAVE_ARCH_TRACEHOOK
 	select HAVE_KPROBES if !XIP_KERNEL
 	select HAVE_KRETPROBES if (HAVE_KPROBES)
 	select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
@@ -30,10 +31,12 @@
 	select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_GENERIC_HARDIRQS
+	select HARDIRQS_SW_RESEND
+	select GENERIC_IRQ_PROBE
 	select GENERIC_IRQ_SHOW
 	select CPU_PM if (SUSPEND || CPU_IDLE)
 	select GENERIC_PCI_IOMAP
-	select HAVE_BPF_JIT if NET
+	select HAVE_BPF_JIT
 	help
 	  The ARM series is a line of low-power-consumption RISC chip designs
 	  licensed by ARM Ltd and targeted at embedded applications and
@@ -126,14 +129,6 @@
 	bool
 	default y
 
-config HARDIRQS_SW_RESEND
-	bool
-	default y
-
-config GENERIC_IRQ_PROBE
-	bool
-	default y
-
 config GENERIC_LOCKBREAK
 	bool
 	default y
@@ -280,6 +275,7 @@
 	select NEED_MACH_IO_H
 	select NEED_MACH_MEMORY_H
 	select SPARSE_IRQ
+	select MULTI_IRQ_HANDLER
 	help
 	  Support for ARM's Integrator platform.
 
@@ -632,7 +628,6 @@
 	select CLKDEV_LOOKUP
 	select GENERIC_CLOCKEVENTS
 	select GPIO_PXA
-	select TICK_ONESHOT
 	select PLAT_PXA
 	select SPARSE_IRQ
 	select GENERIC_ALLOCATOR
@@ -716,7 +711,6 @@
 	select ARCH_REQUIRE_GPIOLIB
 	select GENERIC_CLOCKEVENTS
 	select GPIO_PXA
-	select TICK_ONESHOT
 	select PLAT_PXA
 	select SPARSE_IRQ
 	select AUTO_ZRELADDR
@@ -783,7 +777,6 @@
 	select CPU_FREQ
 	select GENERIC_CLOCKEVENTS
 	select CLKDEV_LOOKUP
-	select TICK_ONESHOT
 	select ARCH_REQUIRE_GPIOLIB
 	select HAVE_IDE
 	select NEED_MACH_MEMORY_H
@@ -1186,6 +1179,15 @@
 source "arch/arm/Kconfig-nommu"
 endif
 
+config ARM_ERRATA_326103
+	bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
+	depends on CPU_V6
+	help
+	  Executing a SWP instruction to read-only memory does not set bit 11
+	  of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
+	  treat the access as a read, preventing a COW from occurring and
+	  causing the faulting task to livelock.
+
 config ARM_ERRATA_411920
 	bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
 	depends on CPU_V6 || CPU_V6K
@@ -1543,10 +1545,15 @@
 	help
 	  This option enables support for the ARM system coherency unit
 
+config ARM_ARCH_TIMER
+	bool "Architected timer support"
+	depends on CPU_V7
+	help
+	  This option enables support for the ARM architected timer
+
 config HAVE_ARM_TWD
 	bool
 	depends on SMP
-	select TICK_ONESHOT
 	help
 	  This options enables support for the ARM timer and watchdog unit
 
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 047a207..aaf96bc 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -70,8 +70,6 @@
 arch-$(CONFIG_CPU_32v3)		:=-D__LINUX_ARM_ARCH__=3 -march=armv3
 
 # This selects how we optimise for the processor.
-tune-$(CONFIG_CPU_ARM610)	:=-mtune=arm610
-tune-$(CONFIG_CPU_ARM710)	:=-mtune=arm710
 tune-$(CONFIG_CPU_ARM7TDMI)	:=-mtune=arm7tdmi
 tune-$(CONFIG_CPU_ARM720T)	:=-mtune=arm7tdmi
 tune-$(CONFIG_CPU_ARM740T)	:=-mtune=arm7tdmi
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index dc7e8ce..b8c64b8 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -567,6 +567,12 @@
 		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
 		mov	pc, lr
 
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define CB_BITS 0x08
+#else
+#define CB_BITS 0x0c
+#endif
+
 __setup_mmu:	sub	r3, r4, #16384		@ Page directory size
 		bic	r3, r3, #0xff		@ Align the pointer
 		bic	r3, r3, #0x3f00
@@ -578,17 +584,14 @@
 		mov	r9, r0, lsr #18
 		mov	r9, r9, lsl #18		@ start of RAM
 		add	r10, r9, #0x10000000	@ a reasonable RAM size
-		mov	r1, #0x12
-		orr	r1, r1, #3 << 10
+		mov	r1, #0x12		@ XN|U + section mapping
+		orr	r1, r1, #3 << 10	@ AP=11
 		add	r2, r3, #16384
 1:		cmp	r1, r9			@ if virt > start of RAM
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
-		orrhs	r1, r1, #0x08		@ set cacheable
-#else
-		orrhs	r1, r1, #0x0c		@ set cacheable, bufferable
-#endif
-		cmp	r1, r10			@ if virt > end of RAM
-		bichs	r1, r1, #0x0c		@ clear cacheable, bufferable
+		cmphs	r10, r1			@   && end of RAM > virt
+		bic	r1, r1, #0x1c		@ clear XN|U + C + B
+		orrlo	r1, r1, #0x10		@ Set XN|U for non-RAM
+		orrhs	r1, r1, r6		@ set RAM section settings
 		str	r1, [r0], #4		@ 1:1 mapping
 		add	r1, r1, #1048576
 		teq	r0, r2
@@ -599,7 +602,7 @@
  * so there is no map overlap problem for up to 1 MB compressed kernel.
  * If the execution is in RAM then we would only be duplicating the above.
  */
-		mov	r1, #0x1e
+		orr	r1, r6, #0x04		@ ensure B is set for this
 		orr	r1, r1, #3 << 10
 		mov	r2, pc
 		mov	r2, r2, lsr #20
@@ -620,6 +623,7 @@
 __armv4_mmu_cache_on:
 		mov	r12, lr
 #ifdef CONFIG_MMU
+		mov	r6, #CB_BITS | 0x12	@ U
 		bl	__setup_mmu
 		mov	r0, #0
 		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
@@ -641,6 +645,7 @@
 #ifdef CONFIG_MMU
 		mrc	p15, 0, r11, c0, c1, 4	@ read ID_MMFR0
 		tst	r11, #0xf		@ VMSA
+		movne	r6, #CB_BITS | 0x02	@ !XN
 		blne	__setup_mmu
 		mov	r0, #0
 		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
@@ -655,7 +660,7 @@
 		orr	r0, r0, #1 << 25	@ big-endian page tables
 #endif
 		orrne	r0, r0, #1		@ MMU enabled
-		movne	r1, #-1
+		movne	r1, #0xfffffffd		@ domain 0 = client
 		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
 		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
 #endif
@@ -668,6 +673,7 @@
 
 __fa526_cache_on:
 		mov	r12, lr
+		mov	r6, #CB_BITS | 0x12	@ U
 		bl	__setup_mmu
 		mov	r0, #0
 		mcr	p15, 0, r0, c7, c7, 0	@ Invalidate whole cache
@@ -680,18 +686,6 @@
 		mcr	p15, 0, r0, c8, c7, 0	@ flush UTLB
 		mov	pc, r12
 
-__arm6_mmu_cache_on:
-		mov	r12, lr
-		bl	__setup_mmu
-		mov	r0, #0
-		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
-		mcr	p15, 0, r0, c5, c0, 0	@ invalidate whole TLB v3
-		mov	r0, #0x30
-		bl	__common_mmu_cache_on
-		mov	r0, #0
-		mcr	p15, 0, r0, c5, c0, 0	@ invalidate whole TLB v3
-		mov	pc, r12
-
 __common_mmu_cache_on:
 #ifndef CONFIG_THUMB2_KERNEL
 #ifndef DEBUG
@@ -756,16 +750,6 @@
 		.align	2
 		.type	proc_types,#object
 proc_types:
-		.word	0x41560600		@ ARM6/610
-		.word	0xffffffe0
-		W(b)	__arm6_mmu_cache_off	@ works, but slow
-		W(b)	__arm6_mmu_cache_off
-		mov	pc, lr
- THUMB(		nop				)
-@		b	__arm6_mmu_cache_on		@ untested
-@		b	__arm6_mmu_cache_off
-@		b	__armv3_mmu_cache_flush
-
 		.word	0x00000000		@ old ARM ID
 		.word	0x0000f000
 		mov	pc, lr
@@ -777,8 +761,10 @@
 
 		.word	0x41007000		@ ARM7/710
 		.word	0xfff8fe00
-		W(b)	__arm7_mmu_cache_off
-		W(b)	__arm7_mmu_cache_off
+		mov	pc, lr
+ THUMB(		nop				)
+		mov	pc, lr
+ THUMB(		nop				)
 		mov	pc, lr
  THUMB(		nop				)
 
@@ -977,21 +963,6 @@
 		mcr	p15, 0, r0, c7, c5, 4	@ ISB
 		mov	pc, r12
 
-__arm6_mmu_cache_off:
-		mov	r0, #0x00000030		@ ARM6 control reg.
-		b	__armv3_mmu_cache_off
-
-__arm7_mmu_cache_off:
-		mov	r0, #0x00000070		@ ARM7 control reg.
-		b	__armv3_mmu_cache_off
-
-__armv3_mmu_cache_off:
-		mcr	p15, 0, r0, c1, c0, 0	@ turn MMU and cache off
-		mov	r0, #0
-		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
-		mcr	p15, 0, r0, c5, c0, 0	@ invalidate whole TLB v3
-		mov	pc, lr
-
 /*
  * Clean and flush the cache to maintain consistency.
  *
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts
index 15ded0d..45bc4bb 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/msm8660-surf.dts
@@ -10,7 +10,7 @@
 	intc: interrupt-controller@02080000 {
 		compatible = "qcom,msm-8660-qgic";
 		interrupt-controller;
-		#interrupt-cells = <1>;
+		#interrupt-cells = <3>;
 		reg = < 0x02080000 0x1000 >,
 		      < 0x02081000 0x1000 >;
 	};
@@ -19,6 +19,6 @@
 		compatible = "qcom,msm-hsuart", "qcom,msm-uart";
 		reg = <0x19c40000 0x1000>,
 		      <0x19c00000 0x1000>;
-		interrupts = <195>;
+		interrupts = <0 195 0x0>;
 	};
 };
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 0b32925..e2fe319 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -173,7 +173,7 @@
 			mmc@5000 {
 				compatible = "arm,primecell";
 				reg = < 0x5000 0x1000>;
-				interrupts = <22>;
+				interrupts = <22 34>;
 			};
 			kmi@6000 {
 				compatible = "arm,pl050", "arm,primecell";
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 1664610..7e81752 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -41,7 +41,7 @@
 			mmc@b000 {
 				compatible = "arm,primecell";
 				reg = <0xb000 0x1000>;
-				interrupts = <23>;
+				interrupts = <23 34>;
 			};
 		};
 	};
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index dcb1349..c4110d1 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -222,7 +222,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static struct pci_ops it8152_ops = {
+struct pci_ops it8152_ops = {
 	.read = it8152_pci_read_config,
 	.write = it8152_pci_write_config,
 };
@@ -346,9 +346,4 @@
 }
 
 
-struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, nr, &it8152_ops, sys, &sys->resources);
-}
-
 EXPORT_SYMBOL(dma_set_coherent_mask);
diff --git a/arch/arm/common/via82c505.c b/arch/arm/common/via82c505.c
index 1171a50..6cb362e 100644
--- a/arch/arm/common/via82c505.c
+++ b/arch/arm/common/via82c505.c
@@ -51,7 +51,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static struct pci_ops via82c505_ops = {
+struct pci_ops via82c505_ops = {
 	.read	= via82c505_read_config,
 	.write	= via82c505_write_config,
 };
@@ -81,12 +81,3 @@
 {
 	return (nr == 0);
 }
-
-struct pci_bus * __init via82c505_scan_bus(int nr, struct pci_sys_data *sysdata)
-{
-	if (nr == 0)
-		return pci_scan_root_bus(NULL, 0, &via82c505_ops, sysdata,
-					 &sysdata->resources);
-
-	return NULL;
-}
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 7e288f9..e0d5388 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -39,6 +39,7 @@
  * struct vic_device - VIC PM device
  * @irq: The IRQ number for the base of the VIC.
  * @base: The register base for the VIC.
+ * @valid_sources: A bitmask of valid interrupts
  * @resume_sources: A bitmask of interrupts for resume.
  * @resume_irqs: The IRQs enabled for resume.
  * @int_select: Save for VIC_INT_SELECT.
@@ -50,6 +51,7 @@
 struct vic_device {
 	void __iomem	*base;
 	int		irq;
+	u32		valid_sources;
 	u32		resume_sources;
 	u32		resume_irqs;
 	u32		int_select;
@@ -164,10 +166,32 @@
 late_initcall(vic_pm_init);
 #endif /* CONFIG_PM */
 
+static struct irq_chip vic_chip;
+
+static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
+			     irq_hw_number_t hwirq)
+{
+	struct vic_device *v = d->host_data;
+
+	/* Skip invalid IRQs, only register handlers for the real ones */
+	if (!(v->valid_sources & (1 << hwirq)))
+		return -ENOTSUPP;
+	irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
+	irq_set_chip_data(irq, v->base);
+	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	return 0;
+}
+
+static struct irq_domain_ops vic_irqdomain_ops = {
+	.map = vic_irqdomain_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
 /**
  * vic_register() - Register a VIC.
  * @base: The base address of the VIC.
  * @irq: The base IRQ for the VIC.
+ * @valid_sources: bitmask of valid interrupts
  * @resume_sources: bitmask of interrupts allowed for resume sources.
  * @node: The device tree node associated with the VIC.
  *
@@ -178,7 +202,8 @@
  * This also configures the IRQ domain for the VIC.
  */
 static void __init vic_register(void __iomem *base, unsigned int irq,
-				u32 resume_sources, struct device_node *node)
+				u32 valid_sources, u32 resume_sources,
+				struct device_node *node)
 {
 	struct vic_device *v;
 
@@ -189,11 +214,12 @@
 
 	v = &vic_devices[vic_id];
 	v->base = base;
+	v->valid_sources = valid_sources;
 	v->resume_sources = resume_sources;
 	v->irq = irq;
 	vic_id++;
-	v->domain = irq_domain_add_legacy(node, 32, irq, 0,
-					  &irq_domain_simple_ops, v);
+	v->domain = irq_domain_add_legacy(node, fls(valid_sources), irq, 0,
+					  &vic_irqdomain_ops, v);
 }
 
 static void vic_ack_irq(struct irq_data *d)
@@ -287,23 +313,6 @@
 	}
 }
 
-static void __init vic_set_irq_sources(void __iomem *base,
-				unsigned int irq_start, u32 vic_sources)
-{
-	unsigned int i;
-
-	for (i = 0; i < 32; i++) {
-		if (vic_sources & (1 << i)) {
-			unsigned int irq = irq_start + i;
-
-			irq_set_chip_and_handler(irq, &vic_chip,
-						 handle_level_irq);
-			irq_set_chip_data(irq, base);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-		}
-	}
-}
-
 /*
  * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
  * The original cell has 32 interrupts, while the modified one has 64,
@@ -338,8 +347,7 @@
 		writel(32, base + VIC_PL190_DEF_VECT_ADDR);
 	}
 
-	vic_set_irq_sources(base, irq_start, vic_sources);
-	vic_register(base, irq_start, 0, node);
+	vic_register(base, irq_start, vic_sources, 0, node);
 }
 
 void __init __vic_init(void __iomem *base, unsigned int irq_start,
@@ -379,9 +387,7 @@
 
 	vic_init2(base);
 
-	vic_set_irq_sources(base, irq_start, vic_sources);
-
-	vic_register(base, irq_start, resume_sources, node);
+	vic_register(base, irq_start, vic_sources, resume_sources, node);
 }
 
 /**
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
index 42da918..082175c 100644
--- a/arch/arm/configs/mini2440_defconfig
+++ b/arch/arm/configs/mini2440_defconfig
@@ -14,6 +14,8 @@
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_ARCH_S3C24XX=y
+# CONFIG_CPU_S3C2410 is not set
+CONFIG_CPU_S3C2440=y
 CONFIG_S3C_ADC=y
 CONFIG_S3C24XX_PWM=y
 CONFIG_MACH_MINI2440=y
diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig
index af278f7..00515ef 100644
--- a/arch/arm/configs/rpc_defconfig
+++ b/arch/arm/configs/rpc_defconfig
@@ -8,8 +8,6 @@
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_RPC=y
-CONFIG_CPU_ARM610=y
-CONFIG_CPU_ARM710=y
 CONFIG_CPU_SA110=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
new file mode 100644
index 0000000..ed2e95d
--- /dev/null
+++ b/arch/arm/include/asm/arch_timer.h
@@ -0,0 +1,19 @@
+#ifndef __ASMARM_ARCH_TIMER_H
+#define __ASMARM_ARCH_TIMER_H
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+int arch_timer_of_register(void);
+int arch_timer_sched_clock_init(void);
+#else
+static inline int arch_timer_of_register(void)
+{
+	return -ENXIO;
+}
+
+static inline int arch_timer_sched_clock_init(void)
+{
+	return -ENXIO;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c..004c1bc 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -101,7 +101,7 @@
 	void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
 
 	void (*coherent_kern_range)(unsigned long, unsigned long);
-	void (*coherent_user_range)(unsigned long, unsigned long);
+	int  (*coherent_user_range)(unsigned long, unsigned long);
 	void (*flush_kern_dcache_area)(void *, size_t);
 
 	void (*dma_map_area)(const void *, size_t, int);
@@ -142,7 +142,7 @@
 extern void __cpuc_flush_user_all(void);
 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
-extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
+extern int  __cpuc_coherent_user_range(unsigned long, unsigned long);
 extern void __cpuc_flush_dcache_area(void *, size_t);
 
 /*
@@ -249,7 +249,7 @@
  * Harvard caches are synchronised for the user space address range.
  * This is used for the ARM private sys_cacheflush system call.
  */
-#define flush_cache_user_range(vma,start,end) \
+#define flush_cache_user_range(start,end) \
 	__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
 
 /*
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index d41d7cb..7eb18c1 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -229,66 +229,19 @@
 				       (unsigned long)(n),		\
 				       sizeof(*(ptr))))
 
-#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
+#define cmpxchg64(ptr, o, n)						\
+	((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr),	\
+						atomic64_t,		\
+						counter),		\
+					      (unsigned long)(o),	\
+					      (unsigned long)(n)))
 
-/*
- * Note : ARMv7-M (currently unsupported by Linux) does not support
- * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
- * not be allowed to use __cmpxchg64.
- */
-static inline unsigned long long __cmpxchg64(volatile void *ptr,
-					     unsigned long long old,
-					     unsigned long long new)
-{
-	register unsigned long long oldval asm("r0");
-	register unsigned long long __old asm("r2") = old;
-	register unsigned long long __new asm("r4") = new;
-	unsigned long res;
-
-	do {
-		asm volatile(
-		"	@ __cmpxchg8\n"
-		"	ldrexd	%1, %H1, [%2]\n"
-		"	mov	%0, #0\n"
-		"	teq	%1, %3\n"
-		"	teqeq	%H1, %H3\n"
-		"	strexdeq %0, %4, %H4, [%2]\n"
-			: "=&r" (res), "=&r" (oldval)
-			: "r" (ptr), "Ir" (__old), "r" (__new)
-			: "memory", "cc");
-	} while (res);
-
-	return oldval;
-}
-
-static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
-						unsigned long long old,
-						unsigned long long new)
-{
-	unsigned long long ret;
-
-	smp_mb();
-	ret = __cmpxchg64(ptr, old, new);
-	smp_mb();
-
-	return ret;
-}
-
-#define cmpxchg64(ptr,o,n)						\
-	((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
-					    (unsigned long long)(o),	\
-					    (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr,o,n)					\
-	((__typeof__(*(ptr)))__cmpxchg64((ptr),				\
-					 (unsigned long long)(o),	\
-					 (unsigned long long)(n)))
-
-#else /* min ARCH = ARMv6 */
-
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#endif
+#define cmpxchg64_local(ptr, o, n)					\
+	((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr),	\
+						local64_t,		\
+						a),			\
+					     (unsigned long)(o),	\
+					     (unsigned long)(n)))
 
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 354d571..8cacbcd 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -31,14 +31,6 @@
 #undef CPU_DABORT_HANDLER
 #undef MULTI_DABORT
 
-#if defined(CONFIG_CPU_ARM610)
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER cpu_arm6_data_abort
-# endif
-#endif
-
 #if defined(CONFIG_CPU_ARM710)
 # ifdef CPU_DABORT_HANDLER
 #  define MULTI_DABORT 1
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index e2be7f1..ac1dd54 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -23,15 +23,6 @@
  * CPU_NAME - the prefix for CPU related functions
  */
 
-#ifdef CONFIG_CPU_ARM610
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm6
-# endif
-#endif
-
 #ifdef CONFIG_CPU_ARM7TDMI
 # ifdef CPU_NAME
 #  undef  MULTI_CPU
@@ -41,15 +32,6 @@
 # endif
 #endif
 
-#ifdef CONFIG_CPU_ARM710
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm7
-# endif
-#endif
-
 #ifdef CONFIG_CPU_ARM720T
 # ifdef CPU_NAME
 #  undef  MULTI_CPU
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 73f84fa..d36a73d7 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -110,6 +110,6 @@
 extern void it8152_init_irq(void);
 extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
 extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
-extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys);
+extern struct pci_ops it8152_ops;
 
 #endif /* __ASM_HARDWARE_IT8152_H */
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index d943b7d..26c511f 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -12,13 +12,14 @@
 #define __ASM_MACH_PCI_H
 
 struct pci_sys_data;
+struct pci_ops;
 struct pci_bus;
 
 struct hw_pci {
 #ifdef CONFIG_PCI_DOMAINS
 	int		domain;
 #endif
-	struct list_head buses;
+	struct pci_ops	*ops;
 	int		nr_controllers;
 	int		(*setup)(int nr, struct pci_sys_data *);
 	struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
@@ -45,16 +46,10 @@
 	u8		(*swizzle)(struct pci_dev *, u8 *);
 					/* IRQ mapping				*/
 	int		(*map_irq)(const struct pci_dev *, u8, u8);
-	struct hw_pci	*hw;
 	void		*private_data;	/* platform controller private data	*/
 };
 
 /*
- * This is the standard PCI-PCI bridge swizzling algorithm.
- */
-#define pci_std_swizzle pci_common_swizzle
-
-/*
  * Call this with your hw_pci struct to initialise the PCI system.
  */
 void pci_common_init(struct hw_pci *);
@@ -62,22 +57,22 @@
 /*
  * PCI controllers
  */
+extern struct pci_ops iop3xx_ops;
 extern int iop3xx_pci_setup(int nr, struct pci_sys_data *);
-extern struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *);
 extern void iop3xx_pci_preinit(void);
 extern void iop3xx_pci_preinit_cond(void);
 
+extern struct pci_ops dc21285_ops;
 extern int dc21285_setup(int nr, struct pci_sys_data *);
-extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *);
 extern void dc21285_preinit(void);
 extern void dc21285_postinit(void);
 
+extern struct pci_ops via82c505_ops;
 extern int via82c505_setup(int nr, struct pci_sys_data *);
-extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *);
 extern void via82c505_init(void *sysdata);
 
+extern struct pci_ops pci_v3_ops;
 extern int pci_v3_setup(int nr, struct pci_sys_data *);
-extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
 extern void pci_v3_preinit(void);
 extern void pci_v3_postinit(void);
 
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index f73c908..6ca945f 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -42,4 +42,9 @@
 
 extern void timer_tick(void);
 
+struct timespec;
+typedef void (*clock_access_fn)(struct timespec *);
+extern int register_persistent_clock(clock_access_fn read_boot,
+				     clock_access_fn read_persistent);
+
 #endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b8e580a..1496565 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -34,11 +34,4 @@
 
 #endif
 
-/*
- * switch_mm() may do a full cache flush over the context switch,
- * so enable interrupts over the context switch to avoid high
- * latency.
- */
-#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
-
 #endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a0b3cac..0306bc6 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,45 +43,104 @@
 #define ASID_FIRST_VERSION	(1 << ASID_BITS)
 
 extern unsigned int cpu_last_asid;
-#ifdef CONFIG_SMP
-DECLARE_PER_CPU(struct mm_struct *, current_mm);
-#endif
 
 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void __new_context(struct mm_struct *mm);
+void cpu_set_reserved_ttbr0(void);
 
-static inline void check_context(struct mm_struct *mm)
+static inline void switch_new_context(struct mm_struct *mm)
 {
-	/*
-	 * This code is executed with interrupts enabled. Therefore,
-	 * mm->context.id cannot be updated to the latest ASID version
-	 * on a different CPU (and condition below not triggered)
-	 * without first getting an IPI to reset the context. The
-	 * alternative is to take a read_lock on mm->context.id_lock
-	 * (after changing its type to rwlock_t).
-	 */
-	if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
-		__new_context(mm);
+	unsigned long flags;
 
+	__new_context(mm);
+
+	local_irq_save(flags);
+	cpu_switch_mm(mm->pgd, mm);
+	local_irq_restore(flags);
+}
+
+static inline void check_and_switch_context(struct mm_struct *mm,
+					    struct task_struct *tsk)
+{
 	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
 		__check_kvm_seq(mm);
+
+	/*
+	 * Required during context switch to avoid speculative page table
+	 * walking with the wrong TTBR.
+	 */
+	cpu_set_reserved_ttbr0();
+
+	if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
+		/*
+		 * The ASID is from the current generation, just switch to the
+		 * new pgd. This condition is only true for calls from
+		 * context_switch() and interrupts are already disabled.
+		 */
+		cpu_switch_mm(mm->pgd, mm);
+	else if (irqs_disabled())
+		/*
+		 * Defer the new ASID allocation until after the context
+		 * switch critical region since __new_context() cannot be
+		 * called with interrupts disabled (it sends IPIs).
+		 */
+		set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+	else
+		/*
+		 * That is a direct call to switch_mm() or activate_mm() with
+		 * interrupts enabled and a new context.
+		 */
+		switch_new_context(mm);
 }
 
 #define init_new_context(tsk,mm)	(__init_new_context(tsk,mm),0)
 
-#else
-
-static inline void check_context(struct mm_struct *mm)
+#define finish_arch_post_lock_switch \
+	finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
 {
+	if (test_and_clear_thread_flag(TIF_SWITCH_MM))
+		switch_new_context(current->mm);
+}
+
+#else	/* !CONFIG_CPU_HAS_ASID */
+
 #ifdef CONFIG_MMU
+
+static inline void check_and_switch_context(struct mm_struct *mm,
+					    struct task_struct *tsk)
+{
 	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
 		__check_kvm_seq(mm);
-#endif
+
+	if (irqs_disabled())
+		/*
+		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
+		 * high interrupt latencies, defer the call and continue
+		 * running with the old mm. Since we only support UP systems
+		 * on non-ASID CPUs, the old mm will remain valid until the
+		 * finish_arch_post_lock_switch() call.
+		 */
+		set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+	else
+		cpu_switch_mm(mm->pgd, mm);
 }
 
+#define finish_arch_post_lock_switch \
+	finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+	if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
+		struct mm_struct *mm = current->mm;
+		cpu_switch_mm(mm->pgd, mm);
+	}
+}
+
+#endif	/* CONFIG_MMU */
+
 #define init_new_context(tsk,mm)	0
 
-#endif
+#endif	/* CONFIG_CPU_HAS_ASID */
 
 #define destroy_context(mm)		do { } while(0)
 
@@ -119,12 +178,7 @@
 		__flush_icache_all();
 #endif
 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
-#ifdef CONFIG_SMP
-		struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
-		*crt_mm = next;
-#endif
-		check_context(next);
-		cpu_switch_mm(next->pgd, next);
+		check_and_switch_context(next, tsk);
 		if (cache_is_vivt())
 			cpumask_clear_cpu(cpu, mm_cpumask(prev));
 	}
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 5838361..ecf9019 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -34,7 +34,6 @@
  *	processor(s) we're building for.
  *
  *	We have the following to choose from:
- *	  v3		- ARMv3
  *	  v4wt		- ARMv4 with writethrough cache, without minicache
  *	  v4wb		- ARMv4 with writeback cache, without minicache
  *	  v4_mc		- ARMv4 with minicache
@@ -44,14 +43,6 @@
 #undef _USER
 #undef MULTI_USER
 
-#ifdef CONFIG_CPU_COPY_V3
-# ifdef _USER
-#  define MULTI_USER 1
-# else
-#  define _USER v3
-# endif
-#endif
-
 #ifdef CONFIG_CPU_COPY_V4WT
 # ifdef _USER
 #  define MULTI_USER 1
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 759af70..b249035 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -69,8 +69,6 @@
  */
 #define L_PTE_PRESENT		(_AT(pteval_t, 3) << 0)		/* Valid */
 #define L_PTE_FILE		(_AT(pteval_t, 1) << 2)		/* only when !PRESENT */
-#define L_PTE_BUFFERABLE	(_AT(pteval_t, 1) << 2)		/* AttrIndx[0] */
-#define L_PTE_CACHEABLE		(_AT(pteval_t, 1) << 3)		/* AttrIndx[1] */
 #define L_PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
 #define L_PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */
 #define L_PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 451808b..355ece5 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -249,6 +249,11 @@
 	return regs->ARM_sp;
 }
 
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+	return regs->ARM_sp;
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
new file mode 100644
index 0000000..c334a23
--- /dev/null
+++ b/arch/arm/include/asm/syscall.h
@@ -0,0 +1,93 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_ARM_SYSCALL_H
+#define _ASM_ARM_SYSCALL_H
+
+#include <linux/err.h>
+
+extern const unsigned long sys_call_table[];
+
+static inline int syscall_get_nr(struct task_struct *task,
+				 struct pt_regs *regs)
+{
+	return task_thread_info(task)->syscall;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+	regs->ARM_r0 = regs->ARM_ORIG_r0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+				     struct pt_regs *regs)
+{
+	unsigned long error = regs->ARM_r0;
+	return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+					    struct pt_regs *regs)
+{
+	return regs->ARM_r0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+	regs->ARM_r0 = (long) error ? error : val;
+}
+
+#define SYSCALL_MAX_ARGS 7
+
+static inline void syscall_get_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 unsigned long *args)
+{
+	if (i + n > SYSCALL_MAX_ARGS) {
+		unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
+		unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
+		pr_warning("%s called with max args %d, handling only %d\n",
+			   __func__, i + n, SYSCALL_MAX_ARGS);
+		memset(args_bad, 0, n_bad * sizeof(args[0]));
+		n = SYSCALL_MAX_ARGS - i;
+	}
+
+	if (i == 0) {
+		args[0] = regs->ARM_ORIG_r0;
+		args++;
+		i++;
+		n--;
+	}
+
+	memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned int i, unsigned int n,
+					 const unsigned long *args)
+{
+	if (i + n > SYSCALL_MAX_ARGS) {
+		pr_warning("%s called with max args %d, handling only %d\n",
+			   __func__, i + n, SYSCALL_MAX_ARGS);
+		n = SYSCALL_MAX_ARGS - i;
+	}
+
+	if (i == 0) {
+		regs->ARM_ORIG_r0 = args[0];
+		args++;
+		i++;
+		n--;
+	}
+
+	memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+}
+
+#endif /* _ASM_ARM_SYSCALL_H */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d4c24d4..68388eb 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -118,6 +118,13 @@
 extern void vfp_sync_hwstate(struct thread_info *);
 extern void vfp_flush_hwstate(struct thread_info *);
 
+struct user_vfp;
+struct user_vfp_exc;
+
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
+					   struct user_vfp_exc __user *);
+extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+				    struct user_vfp_exc __user *);
 #endif
 
 /*
@@ -146,6 +153,7 @@
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	20
 #define TIF_SECCOMP		21
+#define TIF_SWITCH_MM		22	/* deferred switch_mm */
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 85fe61e..6e924d3 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -65,21 +65,6 @@
 #define MULTI_TLB 1
 #endif
 
-#define v3_tlb_flags	(TLB_V3_FULL | TLB_V3_PAGE)
-
-#ifdef CONFIG_CPU_TLB_V3
-# define v3_possible_flags	v3_tlb_flags
-# define v3_always_flags	v3_tlb_flags
-# ifdef _TLB
-#  define MULTI_TLB 1
-# else
-#  define _TLB v3
-# endif
-#else
-# define v3_possible_flags	0
-# define v3_always_flags	(-1UL)
-#endif
-
 #define v4_tlb_flags	(TLB_V4_U_FULL | TLB_V4_U_PAGE)
 
 #ifdef CONFIG_CPU_TLB_V4WT
@@ -298,8 +283,7 @@
  * implemented the "%?" method, but this has been discontinued due to too
  * many people getting it wrong.
  */
-#define possible_tlb_flags	(v3_possible_flags | \
-				 v4_possible_flags | \
+#define possible_tlb_flags	(v4_possible_flags | \
 				 v4wbi_possible_flags | \
 				 fr_possible_flags | \
 				 v4wb_possible_flags | \
@@ -307,8 +291,7 @@
 				 v6wbi_possible_flags | \
 				 v7wbi_possible_flags)
 
-#define always_tlb_flags	(v3_always_flags & \
-				 v4_always_flags & \
+#define always_tlb_flags	(v4_always_flags & \
 				 v4wbi_always_flags & \
 				 fr_always_flags & \
 				 v4wb_always_flags & \
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 60843eb..73409e6 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -7,6 +7,8 @@
 
 	.macro set_tls_v6k, tp, tmp1, tmp2
 	mcr	p15, 0, \tp, c13, c0, 3		@ set TLS register
+	mov	\tmp1, #0
+	mcr	p15, 0, \tmp1, c13, c0, 2	@ clear user r/w TLS register
 	.endm
 
 	.macro set_tls_v6, tp, tmp1, tmp2
@@ -15,6 +17,8 @@
 	mov	\tmp2, #0xffff0fff
 	tst	\tmp1, #HWCAP_TLS		@ hardware TLS available?
 	mcrne	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register
+	movne	\tmp1, #0
+	mcrne	p15, 0, \tmp1, c13, c0, 2	@ clear user r/w TLS register
 	streq	\tp, [\tmp2, #-15]		@ set TLS value at 0xffff0ff0
 	.endm
 
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 7b787d6..22b0f1e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_SMP)		+= smp.o smp_tlb.o
 obj-$(CONFIG_HAVE_ARM_SCU)	+= smp_scu.o
 obj-$(CONFIG_HAVE_ARM_TWD)	+= smp_twd.o
+obj-$(CONFIG_ARM_ARCH_TIMER)	+= arch_timer.o
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o insn.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o insn.o
 obj-$(CONFIG_JUMP_LABEL)	+= jump_label.o insn.o patch.o
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
new file mode 100644
index 0000000..dd58035
--- /dev/null
+++ b/arch/arm/kernel/arch_timer.c
@@ -0,0 +1,350 @@
+/*
+ *  linux/arch/arm/kernel/arch_timer.c
+ *
+ *  Copyright (C) 2011 ARM Ltd.
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/jiffies.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+#include <asm/localtimer.h>
+#include <asm/arch_timer.h>
+#include <asm/system_info.h>
+#include <asm/sched_clock.h>
+
+static unsigned long arch_timer_rate;
+static int arch_timer_ppi;
+static int arch_timer_ppi2;
+
+static struct clock_event_device __percpu **arch_timer_evt;
+
+/*
+ * Architected system timer support.
+ */
+
+#define ARCH_TIMER_CTRL_ENABLE		(1 << 0)
+#define ARCH_TIMER_CTRL_IT_MASK		(1 << 1)
+#define ARCH_TIMER_CTRL_IT_STAT		(1 << 2)
+
+#define ARCH_TIMER_REG_CTRL		0
+#define ARCH_TIMER_REG_FREQ		1
+#define ARCH_TIMER_REG_TVAL		2
+
+static void arch_timer_reg_write(int reg, u32 val)
+{
+	switch (reg) {
+	case ARCH_TIMER_REG_CTRL:
+		asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
+		break;
+	case ARCH_TIMER_REG_TVAL:
+		asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
+		break;
+	}
+
+	isb();
+}
+
+static u32 arch_timer_reg_read(int reg)
+{
+	u32 val;
+
+	switch (reg) {
+	case ARCH_TIMER_REG_CTRL:
+		asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
+		break;
+	case ARCH_TIMER_REG_FREQ:
+		asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
+		break;
+	case ARCH_TIMER_REG_TVAL:
+		asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
+		break;
+	default:
+		BUG();
+	}
+
+	return val;
+}
+
+static irqreturn_t arch_timer_handler(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+	unsigned long ctrl;
+
+	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
+	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+		arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+		evt->event_handler(evt);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static void arch_timer_disable(void)
+{
+	unsigned long ctrl;
+
+	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
+	ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+	arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+}
+
+static void arch_timer_set_mode(enum clock_event_mode mode,
+				struct clock_event_device *clk)
+{
+	switch (mode) {
+	case CLOCK_EVT_MODE_UNUSED:
+	case CLOCK_EVT_MODE_SHUTDOWN:
+		arch_timer_disable();
+		break;
+	default:
+		break;
+	}
+}
+
+static int arch_timer_set_next_event(unsigned long evt,
+				     struct clock_event_device *unused)
+{
+	unsigned long ctrl;
+
+	ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
+	ctrl |= ARCH_TIMER_CTRL_ENABLE;
+	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+	arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
+	arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
+
+	return 0;
+}
+
+static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
+{
+	/* Be safe... */
+	arch_timer_disable();
+
+	clk->features = CLOCK_EVT_FEAT_ONESHOT;
+	clk->name = "arch_sys_timer";
+	clk->rating = 450;
+	clk->set_mode = arch_timer_set_mode;
+	clk->set_next_event = arch_timer_set_next_event;
+	clk->irq = arch_timer_ppi;
+
+	clockevents_config_and_register(clk, arch_timer_rate,
+					0xf, 0x7fffffff);
+
+	*__this_cpu_ptr(arch_timer_evt) = clk;
+
+	enable_percpu_irq(clk->irq, 0);
+	if (arch_timer_ppi2)
+		enable_percpu_irq(arch_timer_ppi2, 0);
+
+	return 0;
+}
+
+/* Is the optional system timer available? */
+static int local_timer_is_architected(void)
+{
+	return (cpu_architecture() >= CPU_ARCH_ARMv7) &&
+	       ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1;
+}
+
+static int arch_timer_available(void)
+{
+	unsigned long freq;
+
+	if (!local_timer_is_architected())
+		return -ENXIO;
+
+	if (arch_timer_rate == 0) {
+		arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
+		freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
+
+		/* Check the timer frequency. */
+		if (freq == 0) {
+			pr_warn("Architected timer frequency not available\n");
+			return -EINVAL;
+		}
+
+		arch_timer_rate = freq;
+	}
+
+	pr_info_once("Architected local timer running at %lu.%02luMHz.\n",
+		     arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
+	return 0;
+}
+
+static inline cycle_t arch_counter_get_cntpct(void)
+{
+	u32 cvall, cvalh;
+
+	asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
+
+	return ((cycle_t) cvalh << 32) | cvall;
+}
+
+static inline cycle_t arch_counter_get_cntvct(void)
+{
+	u32 cvall, cvalh;
+
+	asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh));
+
+	return ((cycle_t) cvalh << 32) | cvall;
+}
+
+static u32 notrace arch_counter_get_cntvct32(void)
+{
+	cycle_t cntvct = arch_counter_get_cntvct();
+
+	/*
+	 * The sched_clock infrastructure only knows about counters
+	 * with at most 32bits. Forget about the upper 24 bits for the
+	 * time being...
+	 */
+	return (u32)(cntvct & (u32)~0);
+}
+
+static cycle_t arch_counter_read(struct clocksource *cs)
+{
+	return arch_counter_get_cntpct();
+}
+
+static struct clocksource clocksource_counter = {
+	.name	= "arch_sys_counter",
+	.rating	= 400,
+	.read	= arch_counter_read,
+	.mask	= CLOCKSOURCE_MASK(56),
+	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
+{
+	pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
+		 clk->irq, smp_processor_id());
+	disable_percpu_irq(clk->irq);
+	if (arch_timer_ppi2)
+		disable_percpu_irq(arch_timer_ppi2);
+	arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+}
+
+static struct local_timer_ops arch_timer_ops __cpuinitdata = {
+	.setup	= arch_timer_setup,
+	.stop	= arch_timer_stop,
+};
+
+static struct clock_event_device arch_timer_global_evt;
+
+static int __init arch_timer_register(void)
+{
+	int err;
+
+	err = arch_timer_available();
+	if (err)
+		return err;
+
+	arch_timer_evt = alloc_percpu(struct clock_event_device *);
+	if (!arch_timer_evt)
+		return -ENOMEM;
+
+	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
+
+	err = request_percpu_irq(arch_timer_ppi, arch_timer_handler,
+				 "arch_timer", arch_timer_evt);
+	if (err) {
+		pr_err("arch_timer: can't register interrupt %d (%d)\n",
+		       arch_timer_ppi, err);
+		goto out_free;
+	}
+
+	if (arch_timer_ppi2) {
+		err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler,
+					 "arch_timer", arch_timer_evt);
+		if (err) {
+			pr_err("arch_timer: can't register interrupt %d (%d)\n",
+			       arch_timer_ppi2, err);
+			arch_timer_ppi2 = 0;
+			goto out_free_irq;
+		}
+	}
+
+	err = local_timer_register(&arch_timer_ops);
+	if (err) {
+		/*
+		 * We couldn't register as a local timer (could be
+		 * because we're on a UP platform, or because some
+		 * other local timer is already present...). Try as a
+		 * global timer instead.
+		 */
+		arch_timer_global_evt.cpumask = cpumask_of(0);
+		err = arch_timer_setup(&arch_timer_global_evt);
+	}
+
+	if (err)
+		goto out_free_irq;
+
+	return 0;
+
+out_free_irq:
+	free_percpu_irq(arch_timer_ppi, arch_timer_evt);
+	if (arch_timer_ppi2)
+		free_percpu_irq(arch_timer_ppi2, arch_timer_evt);
+
+out_free:
+	free_percpu(arch_timer_evt);
+
+	return err;
+}
+
+static const struct of_device_id arch_timer_of_match[] __initconst = {
+	{ .compatible	= "arm,armv7-timer",	},
+	{},
+};
+
+int __init arch_timer_of_register(void)
+{
+	struct device_node *np;
+	u32 freq;
+
+	np = of_find_matching_node(NULL, arch_timer_of_match);
+	if (!np) {
+		pr_err("arch_timer: can't find DT node\n");
+		return -ENODEV;
+	}
+
+	/* Try to determine the frequency from the device tree or CNTFRQ */
+	if (!of_property_read_u32(np, "clock-frequency", &freq))
+		arch_timer_rate = freq;
+
+	arch_timer_ppi = irq_of_parse_and_map(np, 0);
+	arch_timer_ppi2 = irq_of_parse_and_map(np, 1);
+	pr_info("arch_timer: found %s irqs %d %d\n",
+		np->name, arch_timer_ppi, arch_timer_ppi2);
+
+	return arch_timer_register();
+}
+
+int __init arch_timer_sched_clock_init(void)
+{
+	int err;
+
+	err = arch_timer_available();
+	if (err)
+		return err;
+
+	setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate);
+	return 0;
+}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index ede5f77..2555250 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -374,16 +374,29 @@
 #endif
 
 /*
- * Swizzle the device pin each time we cross a bridge.
- * This might update pin and returns the slot number.
+ * Swizzle the device pin each time we cross a bridge.  If a platform does
+ * not provide a swizzle function, we perform the standard PCI swizzling.
+ *
+ * The default swizzling walks up the bus tree one level at a time, applying
+ * the standard swizzle function at each step, stopping when it finds the PCI
+ * root bus.  This will return the slot number of the bridge device on the
+ * root bus and the interrupt pin on that device which should correspond
+ * with the downstream device interrupt.
+ *
+ * Platforms may override this, in which case the slot and pin returned
+ * depend entirely on the platform code.  However, please note that the
+ * PCI standard swizzle is implemented on plug-in cards and Cardbus based
+ * PCI extenders, so it can not be ignored.
  */
 static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
 {
 	struct pci_sys_data *sys = dev->sysdata;
-	int slot = 0, oldpin = *pin;
+	int slot, oldpin = *pin;
 
 	if (sys->swizzle)
 		slot = sys->swizzle(dev, pin);
+	else
+		slot = pci_common_swizzle(dev, pin);
 
 	if (debug_pci)
 		printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
@@ -410,7 +423,7 @@
 	return irq;
 }
 
-static void __init pcibios_init_hw(struct hw_pci *hw)
+static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
 {
 	struct pci_sys_data *sys = NULL;
 	int ret;
@@ -424,7 +437,6 @@
 #ifdef CONFIG_PCI_DOMAINS
 		sys->domain  = hw->domain;
 #endif
-		sys->hw      = hw;
 		sys->busnr   = busnr;
 		sys->swizzle = hw->swizzle;
 		sys->map_irq = hw->map_irq;
@@ -440,14 +452,18 @@
 					 &iomem_resource, sys->mem_offset);
 			}
 
-			sys->bus = hw->scan(nr, sys);
+			if (hw->scan)
+				sys->bus = hw->scan(nr, sys);
+			else
+				sys->bus = pci_scan_root_bus(NULL, sys->busnr,
+						hw->ops, sys, &sys->resources);
 
 			if (!sys->bus)
 				panic("PCI: unable to scan bus!");
 
 			busnr = sys->bus->subordinate + 1;
 
-			list_add(&sys->node, &hw->buses);
+			list_add(&sys->node, head);
 		} else {
 			kfree(sys);
 			if (ret < 0)
@@ -459,19 +475,18 @@
 void __init pci_common_init(struct hw_pci *hw)
 {
 	struct pci_sys_data *sys;
-
-	INIT_LIST_HEAD(&hw->buses);
+	LIST_HEAD(head);
 
 	pci_add_flags(PCI_REASSIGN_ALL_RSRC);
 	if (hw->preinit)
 		hw->preinit();
-	pcibios_init_hw(hw);
+	pcibios_init_hw(hw, &head);
 	if (hw->postinit)
 		hw->postinit();
 
 	pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
 
-	list_for_each_entry(sys, &hw->buses, node) {
+	list_for_each_entry(sys, &head, node) {
 		struct pci_bus *bus = sys->bus;
 
 		if (!pci_has_flag(PCI_PROBE_ONLY)) {
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7fd3ad0..437f0c4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -556,10 +556,6 @@
 #endif
 	tst	r0, #0x08000000			@ only CDP/CPRT/LDC/STC have bit 27
 	tstne	r0, #0x04000000			@ bit 26 set on both ARM and Thumb-2
-#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
-	and	r8, r0, #0x0f000000		@ mask out op-code bits
-	teqne	r8, #0x0f000000			@ SWI (ARM6/7 bug)?
-#endif
 	moveq	pc, lr
 	get_thread_info r10			@ get current thread
 	and	r8, r0, #0x00000f00		@ mask out CP number
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 54ee265..7bd2d3c 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -335,20 +335,6 @@
  *-----------------------------------------------------------------------------
  */
 
-	/* If we're optimising for StrongARM the resulting code won't 
-	   run on an ARM7 and we can save a couple of instructions.  
-								--pb */
-#ifdef CONFIG_CPU_ARM710
-#define A710(code...) code
-.Larm710bug:
-	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
-	mov	r0, r0
-	add	sp, sp, #S_FRAME_SIZE
-	subs	pc, lr, #4
-#else
-#define A710(code...)
-#endif
-
 	.align	5
 ENTRY(vector_swi)
 	sub	sp, sp, #S_FRAME_SIZE
@@ -379,9 +365,6 @@
 	ldreq	r10, [lr, #-4]			@ get SWI instruction
 #else
 	ldr	r10, [lr, #-4]			@ get SWI instruction
-  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
-  A710(	teq	ip, #0x0f000000						)
-  A710(	bne	.Larm710bug						)
 #endif
 #ifdef CONFIG_CPU_ENDIAN_BE8
 	rev	r10, r10			@ little endian instruction
@@ -392,26 +375,15 @@
 	/*
 	 * Pure EABI user space always put syscall number into scno (r7).
 	 */
-  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
-  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
-  A710(	teq	ip, #0x0f000000						)
-  A710(	bne	.Larm710bug						)
-
 #elif defined(CONFIG_ARM_THUMB)
-
 	/* Legacy ABI only, possibly thumb mode. */
 	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
 	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
 	ldreq	scno, [lr, #-4]
 
 #else
-
 	/* Legacy ABI only. */
 	ldr	scno, [lr, #-4]			@ get SWI instruction
-  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
-  A710(	teq	ip, #0x0f000000						)
-  A710(	bne	.Larm710bug						)
-
 #endif
 
 #ifdef CONFIG_ALIGNMENT_TRAP
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 3bf0c7f..835898e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -277,10 +277,6 @@
 	mov	r3, r3, lsl #PMD_ORDER
 
 	add	r0, r4, r3
-	rsb	r3, r3, #0x4000			@ PTRS_PER_PGD*sizeof(long)
-	cmp	r3, #0x0800			@ limit to 512MB
-	movhi	r3, #0x0800
-	add	r6, r0, r3
 	mov	r3, r7, lsr #SECTION_SHIFT
 	ldr	r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
 	orr	r3, r7, r3, lsl #SECTION_SHIFT
@@ -289,13 +285,10 @@
 #else
 	orr	r3, r3, #PMD_SECT_XN
 #endif
-1:	str	r3, [r0], #4
+	str	r3, [r0], #4
 #ifdef CONFIG_ARM_LPAE
 	str	r7, [r0], #4
 #endif
-	add	r3, r3, #1 << SECTION_SHIFT
-	cmp	r0, r6
-	blo	1b
 
 #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
 	/* we don't need any serial debugging mappings */
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 71ccdbf..8349d4e97 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -155,10 +155,10 @@
 	}
 
 	c = irq_data_get_irq_chip(d);
-	if (c->irq_set_affinity)
-		c->irq_set_affinity(d, affinity, true);
-	else
+	if (!c->irq_set_affinity)
 		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+	else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+		cpumask_copy(d->affinity, affinity);
 
 	return ret;
 }
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 80abafb..14e3826 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -24,6 +24,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/regset.h>
 #include <linux/audit.h>
+#include <linux/tracehook.h>
 
 #include <asm/pgtable.h>
 #include <asm/traps.h>
@@ -906,49 +907,33 @@
 	return ret;
 }
 
-#ifdef __ARMEB__
-#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
-#else
-#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
-#endif
-
 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
 {
 	unsigned long ip;
 
-	/*
-	 * Save IP.  IP is used to denote syscall entry/exit:
-	 *  IP = 0 -> entry, = 1 -> exit
-	 */
-	ip = regs->ARM_ip;
-	regs->ARM_ip = why;
-
-	if (!ip)
+	if (why)
 		audit_syscall_exit(regs);
 	else
-		audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
+		audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
 				    regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
 
 	if (!test_thread_flag(TIF_SYSCALL_TRACE))
 		return scno;
-	if (!(current->ptrace & PT_PTRACED))
-		return scno;
 
 	current_thread_info()->syscall = scno;
 
-	/* the 0x80 provides a way for the tracing parent to distinguish
-	   between a syscall stop and SIGTRAP delivery */
-	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-				 ? 0x80 : 0));
 	/*
-	 * this isn't the same as continuing with a signal, but it will do
-	 * for normal use.  strace only continues with a signal if the
-	 * stopping signal is not SIGTRAP.  -brl
+	 * IP is used to denote syscall entry/exit:
+	 * IP = 0 -> entry, =1 -> exit
 	 */
-	if (current->exit_code) {
-		send_sig(current->exit_code, current, 1);
-		current->exit_code = 0;
-	}
+	ip = regs->ARM_ip;
+	regs->ARM_ip = why;
+
+	if (why)
+		tracehook_report_syscall_exit(regs, 0);
+	else if (tracehook_report_syscall_entry(regs))
+		current_thread_info()->syscall = -1;
+
 	regs->ARM_ip = ip;
 
 	return current_thread_info()->syscall;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7cb532f..73d9a42 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -180,44 +180,23 @@
 
 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
 {
-	struct thread_info *thread = current_thread_info();
-	struct vfp_hard_struct *h = &thread->vfpstate.hard;
 	const unsigned long magic = VFP_MAGIC;
 	const unsigned long size = VFP_STORAGE_SIZE;
 	int err = 0;
 
-	vfp_sync_hwstate(thread);
 	__put_user_error(magic, &frame->magic, err);
 	__put_user_error(size, &frame->size, err);
 
-	/*
-	 * Copy the floating point registers. There can be unused
-	 * registers see asm/hwcap.h for details.
-	 */
-	err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
-			      sizeof(h->fpregs));
-	/*
-	 * Copy the status and control register.
-	 */
-	__put_user_error(h->fpscr, &frame->ufp.fpscr, err);
+	if (err)
+		return -EFAULT;
 
-	/*
-	 * Copy the exception registers.
-	 */
-	__put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
-	__put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
-	__put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
-
-	return err ? -EFAULT : 0;
+	return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
 }
 
 static int restore_vfp_context(struct vfp_sigframe __user *frame)
 {
-	struct thread_info *thread = current_thread_info();
-	struct vfp_hard_struct *h = &thread->vfpstate.hard;
 	unsigned long magic;
 	unsigned long size;
-	unsigned long fpexc;
 	int err = 0;
 
 	__get_user_error(magic, &frame->magic, err);
@@ -228,33 +207,7 @@
 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
 		return -EINVAL;
 
-	vfp_flush_hwstate(thread);
-
-	/*
-	 * Copy the floating point registers. There can be unused
-	 * registers see asm/hwcap.h for details.
-	 */
-	err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
-				sizeof(h->fpregs));
-	/*
-	 * Copy the status and control register.
-	 */
-	__get_user_error(h->fpscr, &frame->ufp.fpscr, err);
-
-	/*
-	 * Sanitise and restore the exception registers.
-	 */
-	__get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
-	/* Ensure the VFP is enabled. */
-	fpexc |= FPEXC_EN;
-	/* Ensure FPINST2 is invalid and the exception flag is cleared. */
-	fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
-	h->fpexc = fpexc;
-
-	__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
-	__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
-
-	return err ? -EFAULT : 0;
+	return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
 }
 
 #endif
@@ -636,6 +589,8 @@
 	 */
 	block_sigmask(ka, sig);
 
+	tracehook_signal_handler(sig, info, ka, regs, 0);
+
 	return 0;
 }
 
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index addbbe8..cf58558 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -251,8 +251,6 @@
 	struct mm_struct *mm = &init_mm;
 	unsigned int cpu = smp_processor_id();
 
-	printk("CPU%u: Booted secondary processor\n", cpu);
-
 	/*
 	 * All kernel threads share the same mm context; grab a
 	 * reference and switch to it.
@@ -264,6 +262,8 @@
 	enter_lazy_tlb(mm, current);
 	local_flush_tlb_all();
 
+	printk("CPU%u: Booted secondary processor\n", cpu);
+
 	cpu_init();
 	preempt_disable();
 	trace_hardirqs_off();
@@ -454,6 +454,9 @@
 #ifdef CONFIG_LOCAL_TIMERS
 int local_timer_register(struct local_timer_ops *ops)
 {
+	if (!is_smp() || !setup_max_cpus)
+		return -ENXIO;
+
 	if (lt_ops)
 		return -EBUSY;
 
@@ -510,10 +513,6 @@
 	local_fiq_disable();
 	local_irq_disable();
 
-#ifdef CONFIG_HOTPLUG_CPU
-	platform_cpu_kill(cpu);
-#endif
-
 	while (1)
 		cpu_relax();
 }
@@ -576,17 +575,25 @@
 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+static void smp_kill_cpus(cpumask_t *mask)
+{
+	unsigned int cpu;
+	for_each_cpu(cpu, mask)
+		platform_cpu_kill(cpu);
+}
+#else
+static void smp_kill_cpus(cpumask_t *mask) { }
+#endif
+
 void smp_send_stop(void)
 {
 	unsigned long timeout;
+	struct cpumask mask;
 
-	if (num_online_cpus() > 1) {
-		struct cpumask mask;
-		cpumask_copy(&mask, cpu_online_mask);
-		cpumask_clear_cpu(smp_processor_id(), &mask);
-
-		smp_cross_call(&mask, IPI_CPU_STOP);
-	}
+	cpumask_copy(&mask, cpu_online_mask);
+	cpumask_clear_cpu(smp_processor_id(), &mask);
+	smp_cross_call(&mask, IPI_CPU_STOP);
 
 	/* Wait up to one second for other CPUs to stop */
 	timeout = USEC_PER_SEC;
@@ -595,6 +602,8 @@
 
 	if (num_online_cpus() > 1)
 		pr_warning("SMP: failed to stop secondary CPUs\n");
+
+	smp_kill_cpus(&mask);
 }
 
 /*
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 8f5dd79..b9f015e 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 
+#include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
@@ -74,7 +75,7 @@
 int scu_power_mode(void __iomem *scu_base, unsigned int mode)
 {
 	unsigned int val;
-	int cpu = smp_processor_id();
+	int cpu = cpu_logical_map(smp_processor_id());
 
 	if (mode > 3 || mode == 1 || cpu > 3)
 		return -EINVAL;
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index d2b1779..76cbb05 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -115,7 +115,7 @@
 		  "Ir" (THREAD_START_SP - sizeof(regs)),
 		  "r" (&regs),
 		  "Ir" (sizeof(regs))
-		: "r0", "r1", "r2", "r3", "ip", "lr", "memory");
+		: "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
 
  out:
 	return ret;
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index aab8997..7b8403b 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -20,6 +20,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 
+#include <asm/cputype.h>
 #include <asm/system_info.h>
 #include <asm/thread_notify.h>
 
@@ -67,8 +68,7 @@
 	if (cpu_arch < CPU_ARCH_ARMv7)
 		return 0;
 
-	/* processor feature register 0 */
-	asm("mrc	p15, 0, %0, c0, c1, 0\n" : "=r" (pfr0));
+	pfr0 = read_cpuid_ext(CPUID_EXT_PFR0);
 	if ((pfr0 & 0x0000f000) != 0x00001000)
 		return 0;
 
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index fe31b22..af2afb0 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -110,6 +110,42 @@
 }
 #endif
 
+static void dummy_clock_access(struct timespec *ts)
+{
+	ts->tv_sec = 0;
+	ts->tv_nsec = 0;
+}
+
+static clock_access_fn __read_persistent_clock = dummy_clock_access;
+static clock_access_fn __read_boot_clock = dummy_clock_access;;
+
+void read_persistent_clock(struct timespec *ts)
+{
+	__read_persistent_clock(ts);
+}
+
+void read_boot_clock(struct timespec *ts)
+{
+	__read_boot_clock(ts);
+}
+
+int __init register_persistent_clock(clock_access_fn read_boot,
+				     clock_access_fn read_persistent)
+{
+	/* Only allow the clockaccess functions to be registered once */
+	if (__read_persistent_clock == dummy_clock_access &&
+	    __read_boot_clock == dummy_clock_access) {
+		if (read_boot)
+			__read_boot_clock = read_boot;
+		if (read_persistent)
+			__read_persistent_clock = read_persistent;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
 #if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
 static int timer_suspend(void)
 {
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 7784547..3647170 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -479,14 +479,14 @@
 	return regs->ARM_r0;
 }
 
-static inline void
+static inline int
 do_cache_op(unsigned long start, unsigned long end, int flags)
 {
 	struct mm_struct *mm = current->active_mm;
 	struct vm_area_struct *vma;
 
 	if (end < start || flags)
-		return;
+		return -EINVAL;
 
 	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, start);
@@ -496,9 +496,11 @@
 		if (end > vma->vm_end)
 			end = vma->vm_end;
 
-		flush_cache_user_range(vma, start, end);
+		up_read(&mm->mmap_sem);
+		return flush_cache_user_range(start, end);
 	}
 	up_read(&mm->mmap_sem);
+	return -EINVAL;
 }
 
 /*
@@ -544,8 +546,7 @@
 	 * the specified region).
 	 */
 	case NR(cacheflush):
-		do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
-		return 0;
+		return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
 
 	case NR(usr26):
 		if (!(elf_hwcap & HWCAP_26BIT))
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 0ade0ac..992769a 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -17,30 +17,13 @@
 		   call_with_stack.o
 
 mmu-y	:= clear_user.o copy_page.o getuser.o putuser.o
-
-# the code in uaccess.S is not preemption safe and
-# probably faster on ARMv3 only
-ifeq ($(CONFIG_PREEMPT),y)
-  mmu-y	+= copy_from_user.o copy_to_user.o
-else
-ifneq ($(CONFIG_CPU_32v3),y)
-  mmu-y	+= copy_from_user.o copy_to_user.o
-else
-  mmu-y	+= uaccess.o
-endif
-endif
+mmu-y	+= copy_from_user.o copy_to_user.o
 
 # using lib_ here won't override already available weak symbols
 obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
 
-lib-$(CONFIG_MMU) += $(mmu-y)
-
-ifeq ($(CONFIG_CPU_32v3),y)
-  lib-y	+= io-readsw-armv3.o io-writesw-armv3.o
-else
-  lib-y	+= io-readsw-armv4.o io-writesw-armv4.o
-endif
-
+lib-$(CONFIG_MMU)		+= $(mmu-y)
+lib-y				+= io-readsw-armv4.o io-writesw-armv4.o
 lib-$(CONFIG_ARCH_RPC)		+= ecard.o io-acorn.o floppydma.o
 lib-$(CONFIG_ARCH_SHARK)	+= io-shark.o
 
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
deleted file mode 100644
index 88487c8..0000000
--- a/arch/arm/lib/io-readsw-armv3.S
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- *  linux/arch/arm/lib/io-readsw-armv3.S
- *
- *  Copyright (C) 1995-2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-.Linsw_bad_alignment:
-		adr	r0, .Linsw_bad_align_msg
-		mov	r2, lr
-		b	panic
-.Linsw_bad_align_msg:
-		.asciz	"insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
-		.align
-
-.Linsw_align:	tst	r1, #1
-		bne	.Linsw_bad_alignment
-
-		ldr	r3, [r0]
-		strb	r3, [r1], #1
-		mov	r3, r3, lsr #8
-		strb	r3, [r1], #1
-
-		subs	r2, r2, #1
-		moveq	pc, lr
-
-ENTRY(__raw_readsw)
-		teq	r2, #0		@ do we have to check for the zero len?
-		moveq	pc, lr
-		tst	r1, #3
-		bne	.Linsw_align
-
-.Linsw_aligned:	mov	ip, #0xff
-		orr	ip, ip, ip, lsl #8
-		stmfd	sp!, {r4, r5, r6, lr}
-
-		subs	r2, r2, #8
-		bmi	.Lno_insw_8
-
-.Linsw_8_lp:	ldr	r3, [r0]
-		and	r3, r3, ip
-		ldr	r4, [r0]
-		orr	r3, r3, r4, lsl #16
-
-		ldr	r4, [r0]
-		and	r4, r4, ip
-		ldr	r5, [r0]
-		orr	r4, r4, r5, lsl #16
-
-		ldr	r5, [r0]
-		and	r5, r5, ip
-		ldr	r6, [r0]
-		orr	r5, r5, r6, lsl #16
-
-		ldr	r6, [r0]
-		and	r6, r6, ip
-		ldr	lr, [r0]
-		orr	r6, r6, lr, lsl #16
-
-		stmia	r1!, {r3 - r6}
-
-		subs	r2, r2, #8
-		bpl	.Linsw_8_lp
-
-		tst	r2, #7
-		ldmeqfd	sp!, {r4, r5, r6, pc}
-
-.Lno_insw_8:	tst	r2, #4
-		beq	.Lno_insw_4
-
-		ldr	r3, [r0]
-		and	r3, r3, ip
-		ldr	r4, [r0]
-		orr	r3, r3, r4, lsl #16
-
-		ldr	r4, [r0]
-		and	r4, r4, ip
-		ldr	r5, [r0]
-		orr	r4, r4, r5, lsl #16
-
-		stmia	r1!, {r3, r4}
-
-.Lno_insw_4:	tst	r2, #2
-		beq	.Lno_insw_2
-
-		ldr	r3, [r0]
-		and	r3, r3, ip
-		ldr	r4, [r0]
-		orr	r3, r3, r4, lsl #16
-
-		str	r3, [r1], #4
-
-.Lno_insw_2:	tst	r2, #1
-		ldrne	r3, [r0]
-		strneb	r3, [r1], #1
-		movne	r3, r3, lsr #8
-		strneb	r3, [r1]
-
-		ldmfd	sp!, {r4, r5, r6, pc}
-
-
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
deleted file mode 100644
index 49b8004..0000000
--- a/arch/arm/lib/io-writesw-armv3.S
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- *  linux/arch/arm/lib/io-writesw-armv3.S
- *
- *  Copyright (C) 1995-2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-.Loutsw_bad_alignment:
-		adr	r0, .Loutsw_bad_align_msg
-		mov	r2, lr
-		b	panic
-.Loutsw_bad_align_msg:
-		.asciz	"outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
-		.align
-
-.Loutsw_align:	tst	r1, #1
-		bne	.Loutsw_bad_alignment
-
-		add	r1, r1, #2
-
-		ldr	r3, [r1, #-4]
-		mov	r3, r3, lsr #16
-		orr	r3, r3, r3, lsl #16
-		str	r3, [r0]
-		subs	r2, r2, #1
-		moveq	pc, lr
-
-ENTRY(__raw_writesw)
-		teq	r2, #0		@ do we have to check for the zero len?
-		moveq	pc, lr
-		tst	r1, #3
-		bne	.Loutsw_align
-
-		stmfd	sp!, {r4, r5, r6, lr}
-
-		subs	r2, r2, #8
-		bmi	.Lno_outsw_8
-
-.Loutsw_8_lp:	ldmia	r1!, {r3, r4, r5, r6}
-
-		mov	ip, r3, lsl #16
-		orr	ip, ip, ip, lsr #16
-		str	ip, [r0]
-
-		mov	ip, r3, lsr #16
-		orr	ip, ip, ip, lsl #16
-		str	ip, [r0]
-
-		mov	ip, r4, lsl #16
-		orr	ip, ip, ip, lsr #16
-		str	ip, [r0]
-
-		mov	ip, r4, lsr #16
-		orr	ip, ip, ip, lsl #16
-		str	ip, [r0]
-
-		mov	ip, r5, lsl #16
-		orr	ip, ip, ip, lsr #16
-		str	ip, [r0]
-
-		mov	ip, r5, lsr #16
-		orr	ip, ip, ip, lsl #16
-		str	ip, [r0]
-
-		mov	ip, r6, lsl #16
-		orr	ip, ip, ip, lsr #16
-		str	ip, [r0]
-
-		mov	ip, r6, lsr #16
-		orr	ip, ip, ip, lsl #16
-		str	ip, [r0]
-
-		subs	r2, r2, #8
-		bpl	.Loutsw_8_lp
-
-		tst	r2, #7
-		ldmeqfd	sp!, {r4, r5, r6, pc}
-
-.Lno_outsw_8:	tst	r2, #4
-		beq	.Lno_outsw_4
-
-		ldmia	r1!, {r3, r4}
-
-		mov	ip, r3, lsl #16
-		orr	ip, ip, ip, lsr #16
-		str	ip, [r0]
-
-		mov	ip, r3, lsr #16
-		orr	ip, ip, ip, lsl #16
-		str	ip, [r0]
-
-		mov	ip, r4, lsl #16
-		orr	ip, ip, ip, lsr #16
-		str	ip, [r0]
-
-		mov	ip, r4, lsr #16
-		orr	ip, ip, ip, lsl #16
-		str	ip, [r0]
-
-.Lno_outsw_4:	tst	r2, #2
-		beq	.Lno_outsw_2
-
-		ldr	r3, [r1], #4
-
-		mov	ip, r3, lsl #16
-		orr	ip, ip, ip, lsr #16
-		str	ip, [r0]
-
-		mov	ip, r3, lsr #16
-		orr	ip, ip, ip, lsl #16
-		str	ip, [r0]
-
-.Lno_outsw_2:	tst	r2, #1
-
-		ldrne	r3, [r1]
-
-		movne	ip, r3, lsl #16
-		orrne	ip, ip, ip, lsr #16
-		strne	ip, [r0]
-
-		ldmfd	sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
deleted file mode 100644
index 5c908b1..0000000
--- a/arch/arm/lib/uaccess.S
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- *  linux/arch/arm/lib/uaccess.S
- *
- *  Copyright (C) 1995, 1996,1997,1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Routines to block copy data to/from user memory
- *   These are highly optimised both for the 4k page size
- *   and for various alignments.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-#include <asm/domain.h>
-
-		.text
-
-#define PAGE_SHIFT 12
-
-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
- * Purpose  : copy a block to user memory from kernel memory
- * Params   : to   - user memory
- *          : from - kernel memory
- *          : n    - number of bytes to copy
- * Returns  : Number of bytes NOT copied.
- */
-
-.Lc2u_dest_not_aligned:
-		rsb	ip, ip, #4
-		cmp	ip, #2
-		ldrb	r3, [r1], #1
-USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
-		ldrgeb	r3, [r1], #1
-USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
-		ldrgtb	r3, [r1], #1
-USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
-		sub	r2, r2, ip
-		b	.Lc2u_dest_aligned
-
-ENTRY(__copy_to_user)
-		stmfd	sp!, {r2, r4 - r7, lr}
-		cmp	r2, #4
-		blt	.Lc2u_not_enough
-		ands	ip, r0, #3
-		bne	.Lc2u_dest_not_aligned
-.Lc2u_dest_aligned:
-
-		ands	ip, r1, #3
-		bne	.Lc2u_src_not_aligned
-/*
- * Seeing as there has to be at least 8 bytes to copy, we can
- * copy one word, and force a user-mode page fault...
- */
-
-.Lc2u_0fupi:	subs	r2, r2, #4
-		addmi	ip, r2, #4
-		bmi	.Lc2u_0nowords
-		ldr	r3, [r1], #4
-USER(	TUSER(	str)	r3, [r0], #4)			@ May fault
-		mov	ip, r0, lsl #32 - PAGE_SHIFT	@ On each page, use a ld/st??t instruction
-		rsb	ip, ip, #0
-		movs	ip, ip, lsr #32 - PAGE_SHIFT
-		beq	.Lc2u_0fupi
-/*
- * ip = max no. of bytes to copy before needing another "strt" insn
- */
-		cmp	r2, ip
-		movlt	ip, r2
-		sub	r2, r2, ip
-		subs	ip, ip, #32
-		blt	.Lc2u_0rem8lp
-
-.Lc2u_0cpy8lp:	ldmia	r1!, {r3 - r6}
-		stmia	r0!, {r3 - r6}			@ Shouldnt fault
-		ldmia	r1!, {r3 - r6}
-		subs	ip, ip, #32
-		stmia	r0!, {r3 - r6}			@ Shouldnt fault
-		bpl	.Lc2u_0cpy8lp
-
-.Lc2u_0rem8lp:	cmn	ip, #16
-		ldmgeia	r1!, {r3 - r6}
-		stmgeia	r0!, {r3 - r6}			@ Shouldnt fault
-		tst	ip, #8
-		ldmneia	r1!, {r3 - r4}
-		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
-		tst	ip, #4
-		ldrne	r3, [r1], #4
-	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault
-		ands	ip, ip, #3
-		beq	.Lc2u_0fupi
-.Lc2u_0nowords:	teq	ip, #0
-		beq	.Lc2u_finished
-.Lc2u_nowords:	cmp	ip, #2
-		ldrb	r3, [r1], #1
-USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
-		ldrgeb	r3, [r1], #1
-USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
-		ldrgtb	r3, [r1], #1
-USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
-		b	.Lc2u_finished
-
-.Lc2u_not_enough:
-		movs	ip, r2
-		bne	.Lc2u_nowords
-.Lc2u_finished:	mov	r0, #0
-		ldmfd	sp!, {r2, r4 - r7, pc}
-
-.Lc2u_src_not_aligned:
-		bic	r1, r1, #3
-		ldr	r7, [r1], #4
-		cmp	ip, #2
-		bgt	.Lc2u_3fupi
-		beq	.Lc2u_2fupi
-.Lc2u_1fupi:	subs	r2, r2, #4
-		addmi	ip, r2, #4
-		bmi	.Lc2u_1nowords
-		mov	r3, r7, pull #8
-		ldr	r7, [r1], #4
-		orr	r3, r3, r7, push #24
-USER(	TUSER(	str)	r3, [r0], #4)			@ May fault
-		mov	ip, r0, lsl #32 - PAGE_SHIFT
-		rsb	ip, ip, #0
-		movs	ip, ip, lsr #32 - PAGE_SHIFT
-		beq	.Lc2u_1fupi
-		cmp	r2, ip
-		movlt	ip, r2
-		sub	r2, r2, ip
-		subs	ip, ip, #16
-		blt	.Lc2u_1rem8lp
-
-.Lc2u_1cpy8lp:	mov	r3, r7, pull #8
-		ldmia	r1!, {r4 - r7}
-		subs	ip, ip, #16
-		orr	r3, r3, r4, push #24
-		mov	r4, r4, pull #8
-		orr	r4, r4, r5, push #24
-		mov	r5, r5, pull #8
-		orr	r5, r5, r6, push #24
-		mov	r6, r6, pull #8
-		orr	r6, r6, r7, push #24
-		stmia	r0!, {r3 - r6}			@ Shouldnt fault
-		bpl	.Lc2u_1cpy8lp
-
-.Lc2u_1rem8lp:	tst	ip, #8
-		movne	r3, r7, pull #8
-		ldmneia	r1!, {r4, r7}
-		orrne	r3, r3, r4, push #24
-		movne	r4, r4, pull #8
-		orrne	r4, r4, r7, push #24
-		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
-		tst	ip, #4
-		movne	r3, r7, pull #8
-		ldrne	r7, [r1], #4
-		orrne	r3, r3, r7, push #24
-	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault
-		ands	ip, ip, #3
-		beq	.Lc2u_1fupi
-.Lc2u_1nowords:	mov	r3, r7, get_byte_1
-		teq	ip, #0
-		beq	.Lc2u_finished
-		cmp	ip, #2
-USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
-		movge	r3, r7, get_byte_2
-USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
-		movgt	r3, r7, get_byte_3
-USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
-		b	.Lc2u_finished
-
-.Lc2u_2fupi:	subs	r2, r2, #4
-		addmi	ip, r2, #4
-		bmi	.Lc2u_2nowords
-		mov	r3, r7, pull #16
-		ldr	r7, [r1], #4
-		orr	r3, r3, r7, push #16
-USER(	TUSER(	str)	r3, [r0], #4)			@ May fault
-		mov	ip, r0, lsl #32 - PAGE_SHIFT
-		rsb	ip, ip, #0
-		movs	ip, ip, lsr #32 - PAGE_SHIFT
-		beq	.Lc2u_2fupi
-		cmp	r2, ip
-		movlt	ip, r2
-		sub	r2, r2, ip
-		subs	ip, ip, #16
-		blt	.Lc2u_2rem8lp
-
-.Lc2u_2cpy8lp:	mov	r3, r7, pull #16
-		ldmia	r1!, {r4 - r7}
-		subs	ip, ip, #16
-		orr	r3, r3, r4, push #16
-		mov	r4, r4, pull #16
-		orr	r4, r4, r5, push #16
-		mov	r5, r5, pull #16
-		orr	r5, r5, r6, push #16
-		mov	r6, r6, pull #16
-		orr	r6, r6, r7, push #16
-		stmia	r0!, {r3 - r6}			@ Shouldnt fault
-		bpl	.Lc2u_2cpy8lp
-
-.Lc2u_2rem8lp:	tst	ip, #8
-		movne	r3, r7, pull #16
-		ldmneia	r1!, {r4, r7}
-		orrne	r3, r3, r4, push #16
-		movne	r4, r4, pull #16
-		orrne	r4, r4, r7, push #16
-		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
-		tst	ip, #4
-		movne	r3, r7, pull #16
-		ldrne	r7, [r1], #4
-		orrne	r3, r3, r7, push #16
-	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault
-		ands	ip, ip, #3
-		beq	.Lc2u_2fupi
-.Lc2u_2nowords:	mov	r3, r7, get_byte_2
-		teq	ip, #0
-		beq	.Lc2u_finished
-		cmp	ip, #2
-USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
-		movge	r3, r7, get_byte_3
-USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
-		ldrgtb	r3, [r1], #0
-USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
-		b	.Lc2u_finished
-
-.Lc2u_3fupi:	subs	r2, r2, #4
-		addmi	ip, r2, #4
-		bmi	.Lc2u_3nowords
-		mov	r3, r7, pull #24
-		ldr	r7, [r1], #4
-		orr	r3, r3, r7, push #8
-USER(	TUSER(	str)	r3, [r0], #4)			@ May fault
-		mov	ip, r0, lsl #32 - PAGE_SHIFT
-		rsb	ip, ip, #0
-		movs	ip, ip, lsr #32 - PAGE_SHIFT
-		beq	.Lc2u_3fupi
-		cmp	r2, ip
-		movlt	ip, r2
-		sub	r2, r2, ip
-		subs	ip, ip, #16
-		blt	.Lc2u_3rem8lp
-
-.Lc2u_3cpy8lp:	mov	r3, r7, pull #24
-		ldmia	r1!, {r4 - r7}
-		subs	ip, ip, #16
-		orr	r3, r3, r4, push #8
-		mov	r4, r4, pull #24
-		orr	r4, r4, r5, push #8
-		mov	r5, r5, pull #24
-		orr	r5, r5, r6, push #8
-		mov	r6, r6, pull #24
-		orr	r6, r6, r7, push #8
-		stmia	r0!, {r3 - r6}			@ Shouldnt fault
-		bpl	.Lc2u_3cpy8lp
-
-.Lc2u_3rem8lp:	tst	ip, #8
-		movne	r3, r7, pull #24
-		ldmneia	r1!, {r4, r7}
-		orrne	r3, r3, r4, push #8
-		movne	r4, r4, pull #24
-		orrne	r4, r4, r7, push #8
-		stmneia	r0!, {r3 - r4}			@ Shouldnt fault
-		tst	ip, #4
-		movne	r3, r7, pull #24
-		ldrne	r7, [r1], #4
-		orrne	r3, r3, r7, push #8
-	TUSER(	strne) r3, [r0], #4			@ Shouldnt fault
-		ands	ip, ip, #3
-		beq	.Lc2u_3fupi
-.Lc2u_3nowords:	mov	r3, r7, get_byte_3
-		teq	ip, #0
-		beq	.Lc2u_finished
-		cmp	ip, #2
-USER(	TUSER(	strb)	r3, [r0], #1)			@ May fault
-		ldrgeb	r3, [r1], #1
-USER(	TUSER(	strgeb) r3, [r0], #1)			@ May fault
-		ldrgtb	r3, [r1], #0
-USER(	TUSER(	strgtb) r3, [r0], #1)			@ May fault
-		b	.Lc2u_finished
-ENDPROC(__copy_to_user)
-
-		.pushsection .fixup,"ax"
-		.align	0
-9001:		ldmfd	sp!, {r0, r4 - r7, pc}
-		.popsection
-
-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
- * Purpose  : copy a block from user memory to kernel memory
- * Params   : to   - kernel memory
- *          : from - user memory
- *          : n    - number of bytes to copy
- * Returns  : Number of bytes NOT copied.
- */
-.Lcfu_dest_not_aligned:
-		rsb	ip, ip, #4
-		cmp	ip, #2
-USER(	TUSER(	ldrb)	r3, [r1], #1)			@ May fault
-		strb	r3, [r0], #1
-USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault
-		strgeb	r3, [r0], #1
-USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault
-		strgtb	r3, [r0], #1
-		sub	r2, r2, ip
-		b	.Lcfu_dest_aligned
-
-ENTRY(__copy_from_user)
-		stmfd	sp!, {r0, r2, r4 - r7, lr}
-		cmp	r2, #4
-		blt	.Lcfu_not_enough
-		ands	ip, r0, #3
-		bne	.Lcfu_dest_not_aligned
-.Lcfu_dest_aligned:
-		ands	ip, r1, #3
-		bne	.Lcfu_src_not_aligned
-
-/*
- * Seeing as there has to be at least 8 bytes to copy, we can
- * copy one word, and force a user-mode page fault...
- */
-
-.Lcfu_0fupi:	subs	r2, r2, #4
-		addmi	ip, r2, #4
-		bmi	.Lcfu_0nowords
-USER(	TUSER(	ldr)	r3, [r1], #4)
-		str	r3, [r0], #4
-		mov	ip, r1, lsl #32 - PAGE_SHIFT	@ On each page, use a ld/st??t instruction
-		rsb	ip, ip, #0
-		movs	ip, ip, lsr #32 - PAGE_SHIFT
-		beq	.Lcfu_0fupi
-/*
- * ip = max no. of bytes to copy before needing another "strt" insn
- */
-		cmp	r2, ip
-		movlt	ip, r2
-		sub	r2, r2, ip
-		subs	ip, ip, #32
-		blt	.Lcfu_0rem8lp
-
-.Lcfu_0cpy8lp:	ldmia	r1!, {r3 - r6}			@ Shouldnt fault
-		stmia	r0!, {r3 - r6}
-		ldmia	r1!, {r3 - r6}			@ Shouldnt fault
-		subs	ip, ip, #32
-		stmia	r0!, {r3 - r6}
-		bpl	.Lcfu_0cpy8lp
-
-.Lcfu_0rem8lp:	cmn	ip, #16
-		ldmgeia	r1!, {r3 - r6}			@ Shouldnt fault
-		stmgeia	r0!, {r3 - r6}
-		tst	ip, #8
-		ldmneia	r1!, {r3 - r4}			@ Shouldnt fault
-		stmneia	r0!, {r3 - r4}
-		tst	ip, #4
-	TUSER(	ldrne) r3, [r1], #4			@ Shouldnt fault
-		strne	r3, [r0], #4
-		ands	ip, ip, #3
-		beq	.Lcfu_0fupi
-.Lcfu_0nowords:	teq	ip, #0
-		beq	.Lcfu_finished
-.Lcfu_nowords:	cmp	ip, #2
-USER(	TUSER(	ldrb)	r3, [r1], #1)			@ May fault
-		strb	r3, [r0], #1
-USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault
-		strgeb	r3, [r0], #1
-USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault
-		strgtb	r3, [r0], #1
-		b	.Lcfu_finished
-
-.Lcfu_not_enough:
-		movs	ip, r2
-		bne	.Lcfu_nowords
-.Lcfu_finished:	mov	r0, #0
-		add	sp, sp, #8
-		ldmfd	sp!, {r4 - r7, pc}
-
-.Lcfu_src_not_aligned:
-		bic	r1, r1, #3
-USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault
-		cmp	ip, #2
-		bgt	.Lcfu_3fupi
-		beq	.Lcfu_2fupi
-.Lcfu_1fupi:	subs	r2, r2, #4
-		addmi	ip, r2, #4
-		bmi	.Lcfu_1nowords
-		mov	r3, r7, pull #8
-USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault
-		orr	r3, r3, r7, push #24
-		str	r3, [r0], #4
-		mov	ip, r1, lsl #32 - PAGE_SHIFT
-		rsb	ip, ip, #0
-		movs	ip, ip, lsr #32 - PAGE_SHIFT
-		beq	.Lcfu_1fupi
-		cmp	r2, ip
-		movlt	ip, r2
-		sub	r2, r2, ip
-		subs	ip, ip, #16
-		blt	.Lcfu_1rem8lp
-
-.Lcfu_1cpy8lp:	mov	r3, r7, pull #8
-		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
-		subs	ip, ip, #16
-		orr	r3, r3, r4, push #24
-		mov	r4, r4, pull #8
-		orr	r4, r4, r5, push #24
-		mov	r5, r5, pull #8
-		orr	r5, r5, r6, push #24
-		mov	r6, r6, pull #8
-		orr	r6, r6, r7, push #24
-		stmia	r0!, {r3 - r6}
-		bpl	.Lcfu_1cpy8lp
-
-.Lcfu_1rem8lp:	tst	ip, #8
-		movne	r3, r7, pull #8
-		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
-		orrne	r3, r3, r4, push #24
-		movne	r4, r4, pull #8
-		orrne	r4, r4, r7, push #24
-		stmneia	r0!, {r3 - r4}
-		tst	ip, #4
-		movne	r3, r7, pull #8
-USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault
-		orrne	r3, r3, r7, push #24
-		strne	r3, [r0], #4
-		ands	ip, ip, #3
-		beq	.Lcfu_1fupi
-.Lcfu_1nowords:	mov	r3, r7, get_byte_1
-		teq	ip, #0
-		beq	.Lcfu_finished
-		cmp	ip, #2
-		strb	r3, [r0], #1
-		movge	r3, r7, get_byte_2
-		strgeb	r3, [r0], #1
-		movgt	r3, r7, get_byte_3
-		strgtb	r3, [r0], #1
-		b	.Lcfu_finished
-
-.Lcfu_2fupi:	subs	r2, r2, #4
-		addmi	ip, r2, #4
-		bmi	.Lcfu_2nowords
-		mov	r3, r7, pull #16
-USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault
-		orr	r3, r3, r7, push #16
-		str	r3, [r0], #4
-		mov	ip, r1, lsl #32 - PAGE_SHIFT
-		rsb	ip, ip, #0
-		movs	ip, ip, lsr #32 - PAGE_SHIFT
-		beq	.Lcfu_2fupi
-		cmp	r2, ip
-		movlt	ip, r2
-		sub	r2, r2, ip
-		subs	ip, ip, #16
-		blt	.Lcfu_2rem8lp
-
-
-.Lcfu_2cpy8lp:	mov	r3, r7, pull #16
-		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
-		subs	ip, ip, #16
-		orr	r3, r3, r4, push #16
-		mov	r4, r4, pull #16
-		orr	r4, r4, r5, push #16
-		mov	r5, r5, pull #16
-		orr	r5, r5, r6, push #16
-		mov	r6, r6, pull #16
-		orr	r6, r6, r7, push #16
-		stmia	r0!, {r3 - r6}
-		bpl	.Lcfu_2cpy8lp
-
-.Lcfu_2rem8lp:	tst	ip, #8
-		movne	r3, r7, pull #16
-		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
-		orrne	r3, r3, r4, push #16
-		movne	r4, r4, pull #16
-		orrne	r4, r4, r7, push #16
-		stmneia	r0!, {r3 - r4}
-		tst	ip, #4
-		movne	r3, r7, pull #16
-USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault
-		orrne	r3, r3, r7, push #16
-		strne	r3, [r0], #4
-		ands	ip, ip, #3
-		beq	.Lcfu_2fupi
-.Lcfu_2nowords:	mov	r3, r7, get_byte_2
-		teq	ip, #0
-		beq	.Lcfu_finished
-		cmp	ip, #2
-		strb	r3, [r0], #1
-		movge	r3, r7, get_byte_3
-		strgeb	r3, [r0], #1
-USER(	TUSER(	ldrgtb) r3, [r1], #0)			@ May fault
-		strgtb	r3, [r0], #1
-		b	.Lcfu_finished
-
-.Lcfu_3fupi:	subs	r2, r2, #4
-		addmi	ip, r2, #4
-		bmi	.Lcfu_3nowords
-		mov	r3, r7, pull #24
-USER(	TUSER(	ldr)	r7, [r1], #4)			@ May fault
-		orr	r3, r3, r7, push #8
-		str	r3, [r0], #4
-		mov	ip, r1, lsl #32 - PAGE_SHIFT
-		rsb	ip, ip, #0
-		movs	ip, ip, lsr #32 - PAGE_SHIFT
-		beq	.Lcfu_3fupi
-		cmp	r2, ip
-		movlt	ip, r2
-		sub	r2, r2, ip
-		subs	ip, ip, #16
-		blt	.Lcfu_3rem8lp
-
-.Lcfu_3cpy8lp:	mov	r3, r7, pull #24
-		ldmia	r1!, {r4 - r7}			@ Shouldnt fault
-		orr	r3, r3, r4, push #8
-		mov	r4, r4, pull #24
-		orr	r4, r4, r5, push #8
-		mov	r5, r5, pull #24
-		orr	r5, r5, r6, push #8
-		mov	r6, r6, pull #24
-		orr	r6, r6, r7, push #8
-		stmia	r0!, {r3 - r6}
-		subs	ip, ip, #16
-		bpl	.Lcfu_3cpy8lp
-
-.Lcfu_3rem8lp:	tst	ip, #8
-		movne	r3, r7, pull #24
-		ldmneia	r1!, {r4, r7}			@ Shouldnt fault
-		orrne	r3, r3, r4, push #8
-		movne	r4, r4, pull #24
-		orrne	r4, r4, r7, push #8
-		stmneia	r0!, {r3 - r4}
-		tst	ip, #4
-		movne	r3, r7, pull #24
-USER(	TUSER(	ldrne) r7, [r1], #4)			@ May fault
-		orrne	r3, r3, r7, push #8
-		strne	r3, [r0], #4
-		ands	ip, ip, #3
-		beq	.Lcfu_3fupi
-.Lcfu_3nowords:	mov	r3, r7, get_byte_3
-		teq	ip, #0
-		beq	.Lcfu_finished
-		cmp	ip, #2
-		strb	r3, [r0], #1
-USER(	TUSER(	ldrgeb) r3, [r1], #1)			@ May fault
-		strgeb	r3, [r0], #1
-USER(	TUSER(	ldrgtb) r3, [r1], #1)			@ May fault
-		strgtb	r3, [r0], #1
-		b	.Lcfu_finished
-ENDPROC(__copy_from_user)
-
-		.pushsection .fixup,"ax"
-		.align	0
-		/*
-		 * We took an exception.  r0 contains a pointer to
-		 * the byte not copied.
-		 */
-9001:		ldr	r2, [sp], #4			@ void *to
-		sub	r2, r0, r2			@ bytes copied
-		ldr	r1, [sp], #4			@ unsigned long count
-		subs	r4, r1, r2			@ bytes left to copy
-		movne	r1, r4
-		blne	__memzero
-		mov	r0, r4
-		ldmfd	sp!, {r4 - r7, pc}
-		.popsection
-
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 364c193..8910679 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -26,15 +26,6 @@
 #include "clock.h"
 #include "sam9_smc.h"
 
-static struct map_desc at91rm9200_io_desc[] __initdata = {
-	{
-		.virtual	= AT91_VA_BASE_EMAC,
-		.pfn		= __phys_to_pfn(AT91RM9200_BASE_EMAC),
-		.length		= SZ_16K,
-		.type		= MT_DEVICE,
-	},
-};
-
 /* --------------------------------------------------------------------
  *  Clocks
  * -------------------------------------------------------------------- */
@@ -315,7 +306,6 @@
 {
 	/* Map peripherals */
 	at91_init_sram(0, AT91RM9200_SRAM_BASE, AT91RM9200_SRAM_SIZE);
-	iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
 }
 
 static void __init at91rm9200_ioremap_registers(void)
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 05774e5..60c4728 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -140,8 +140,8 @@
 
 static struct resource eth_resources[] = {
 	[0] = {
-		.start	= AT91_VA_BASE_EMAC,
-		.end	= AT91_VA_BASE_EMAC + SZ_16K - 1,
+		.start	= AT91RM9200_BASE_EMAC,
+		.end	= AT91RM9200_BASE_EMAC + SZ_16K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
index e9e29a6..01db372 100644
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ b/arch/arm/mach-at91/include/mach/hardware.h
@@ -94,7 +94,6 @@
  * Virtual to Physical Address mapping for IO devices.
  */
 #define AT91_VA_BASE_SYS	AT91_IO_P2V(AT91_BASE_SYS)
-#define AT91_VA_BASE_EMAC	AT91_IO_P2V(AT91RM9200_BASE_EMAC)
 
  /* Internal SRAM is mapped below the IO devices */
 #define AT91_SRAM_MAX		SZ_1M
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 79d001f..3113283 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -166,12 +166,6 @@
 	.write = cns3xxx_pci_write_config,
 };
 
-static struct pci_bus *cns3xxx_pci_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, sys->busnr, &cns3xxx_pcie_ops, sys,
-				 &sys->resources);
-}
-
 static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
 	struct cns3xxx_pcie *cnspci = pdev_to_cnspci(dev);
@@ -221,10 +215,9 @@
 		.irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
 		.hw_pci = {
 			.domain = 0,
-			.swizzle = pci_std_swizzle,
 			.nr_controllers = 1,
+			.ops = &cns3xxx_pcie_ops,
 			.setup = cns3xxx_pci_setup,
-			.scan = cns3xxx_pci_scan_bus,
 			.map_irq = cns3xxx_pcie_map_irq,
 		},
 	},
@@ -264,10 +257,9 @@
 		.irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
 		.hw_pci = {
 			.domain = 1,
-			.swizzle = pci_std_swizzle,
 			.nr_controllers = 1,
+			.ops = &cns3xxx_pcie_ops,
 			.setup = cns3xxx_pci_setup,
-			.scan = cns3xxx_pci_scan_bus,
 			.map_irq = cns3xxx_pcie_map_irq,
 		},
 	},
diff --git a/arch/arm/mach-dove/pcie.c b/arch/arm/mach-dove/pcie.c
index 48a0320..47921b0 100644
--- a/arch/arm/mach-dove/pcie.c
+++ b/arch/arm/mach-dove/pcie.c
@@ -43,6 +43,7 @@
 		return 0;
 
 	pp = &pcie_port[nr];
+	sys->private_data = pp;
 	pp->root_bus_nr = sys->busnr;
 
 	/*
@@ -93,19 +94,6 @@
 	return 1;
 }
 
-static struct pcie_port *bus_to_port(int bus)
-{
-	int i;
-
-	for (i = num_pcie_ports - 1; i >= 0; i--) {
-		int rbus = pcie_port[i].root_bus_nr;
-		if (rbus != -1 && rbus <= bus)
-			break;
-	}
-
-	return i >= 0 ? pcie_port + i : NULL;
-}
-
 static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
 {
 	/*
@@ -121,7 +109,8 @@
 static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
 			int size, u32 *val)
 {
-	struct pcie_port *pp = bus_to_port(bus->number);
+	struct pci_sys_data *sys = bus->sysdata;
+	struct pcie_port *pp = sys->private_data;
 	unsigned long flags;
 	int ret;
 
@@ -140,7 +129,8 @@
 static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
 			int where, int size, u32 val)
 {
-	struct pcie_port *pp = bus_to_port(bus->number);
+	struct pci_sys_data *sys = bus->sysdata;
+	struct pcie_port *pp = sys->private_data;
 	unsigned long flags;
 	int ret;
 
@@ -194,14 +184,14 @@
 
 static int __init dove_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-	struct pcie_port *pp = bus_to_port(dev->bus->number);
+	struct pci_sys_data *sys = dev->sysdata;
+	struct pcie_port *pp = sys->private_data;
 
 	return pp->index ? IRQ_DOVE_PCIE1 : IRQ_DOVE_PCIE0;
 }
 
 static struct hw_pci dove_pci __initdata = {
 	.nr_controllers	= 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= dove_pcie_setup,
 	.scan		= dove_pcie_scan_bus,
 	.map_irq	= dove_pcie_map_irq,
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index e81c35f..b8df521 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -232,6 +232,9 @@
 config MACH_UNIVERSAL_C210
 	bool "Mobile UNIVERSAL_C210 Board"
 	select CPU_EXYNOS4210
+	select S5P_HRT
+	select CLKSRC_MMIO
+	select HAVE_SCHED_CLOCK
 	select S5P_GPIO_INT
 	select S5P_DEV_FIMC0
 	select S5P_DEV_FIMC1
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index df54c2a..6efd1e5 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -497,25 +497,25 @@
 		.ctrlbit	= (1 << 3),
 	}, {
 		.name		= "hsmmc",
-		.devname	= "s3c-sdhci.0",
+		.devname	= "exynos4-sdhci.0",
 		.parent		= &exynos4_clk_aclk_133.clk,
 		.enable		= exynos4_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 5),
 	}, {
 		.name		= "hsmmc",
-		.devname	= "s3c-sdhci.1",
+		.devname	= "exynos4-sdhci.1",
 		.parent		= &exynos4_clk_aclk_133.clk,
 		.enable		= exynos4_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 6),
 	}, {
 		.name		= "hsmmc",
-		.devname	= "s3c-sdhci.2",
+		.devname	= "exynos4-sdhci.2",
 		.parent		= &exynos4_clk_aclk_133.clk,
 		.enable		= exynos4_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 7),
 	}, {
 		.name		= "hsmmc",
-		.devname	= "s3c-sdhci.3",
+		.devname	= "exynos4-sdhci.3",
 		.parent		= &exynos4_clk_aclk_133.clk,
 		.enable		= exynos4_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 8),
@@ -1202,7 +1202,7 @@
 static struct clksrc_clk exynos4_clk_sclk_mmc0 = {
 	.clk	= {
 		.name		= "sclk_mmc",
-		.devname	= "s3c-sdhci.0",
+		.devname	= "exynos4-sdhci.0",
 		.parent		= &exynos4_clk_dout_mmc0.clk,
 		.enable		= exynos4_clksrc_mask_fsys_ctrl,
 		.ctrlbit	= (1 << 0),
@@ -1213,7 +1213,7 @@
 static struct clksrc_clk exynos4_clk_sclk_mmc1 = {
 	.clk	= {
 		.name		= "sclk_mmc",
-		.devname	= "s3c-sdhci.1",
+		.devname	= "exynos4-sdhci.1",
 		.parent		= &exynos4_clk_dout_mmc1.clk,
 		.enable		= exynos4_clksrc_mask_fsys_ctrl,
 		.ctrlbit	= (1 << 4),
@@ -1224,7 +1224,7 @@
 static struct clksrc_clk exynos4_clk_sclk_mmc2 = {
 	.clk	= {
 		.name		= "sclk_mmc",
-		.devname	= "s3c-sdhci.2",
+		.devname	= "exynos4-sdhci.2",
 		.parent		= &exynos4_clk_dout_mmc2.clk,
 		.enable		= exynos4_clksrc_mask_fsys_ctrl,
 		.ctrlbit	= (1 << 8),
@@ -1235,7 +1235,7 @@
 static struct clksrc_clk exynos4_clk_sclk_mmc3 = {
 	.clk	= {
 		.name		= "sclk_mmc",
-		.devname	= "s3c-sdhci.3",
+		.devname	= "exynos4-sdhci.3",
 		.parent		= &exynos4_clk_dout_mmc3.clk,
 		.enable		= exynos4_clksrc_mask_fsys_ctrl,
 		.ctrlbit	= (1 << 12),
@@ -1340,10 +1340,10 @@
 	CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos4_clk_sclk_uart1.clk),
 	CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos4_clk_sclk_uart2.clk),
 	CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos4_clk_sclk_uart3.clk),
-	CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk),
-	CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk),
-	CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk),
-	CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk),
+	CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk),
+	CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk),
+	CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk),
+	CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk),
 	CLKDEV_INIT("exynos4-fb.0", "lcd", &exynos4_clk_fimd0),
 	CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0),
 	CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1),
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index d013982..7ac6ff4 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -455,25 +455,25 @@
 		.ctrlbit	= (1 << 20),
 	}, {
 		.name		= "hsmmc",
-		.devname	= "s3c-sdhci.0",
+		.devname	= "exynos4-sdhci.0",
 		.parent		= &exynos5_clk_aclk_200.clk,
 		.enable		= exynos5_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 12),
 	}, {
 		.name		= "hsmmc",
-		.devname	= "s3c-sdhci.1",
+		.devname	= "exynos4-sdhci.1",
 		.parent		= &exynos5_clk_aclk_200.clk,
 		.enable		= exynos5_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 13),
 	}, {
 		.name		= "hsmmc",
-		.devname	= "s3c-sdhci.2",
+		.devname	= "exynos4-sdhci.2",
 		.parent		= &exynos5_clk_aclk_200.clk,
 		.enable		= exynos5_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 14),
 	}, {
 		.name		= "hsmmc",
-		.devname	= "s3c-sdhci.3",
+		.devname	= "exynos4-sdhci.3",
 		.parent		= &exynos5_clk_aclk_200.clk,
 		.enable		= exynos5_clk_ip_fsys_ctrl,
 		.ctrlbit	= (1 << 15),
@@ -678,7 +678,7 @@
 	.name		= "dma",
 	.devname	= "dma-pl330.1",
 	.enable		= exynos5_clk_ip_fsys_ctrl,
-	.ctrlbit	= (1 << 1),
+	.ctrlbit	= (1 << 2),
 };
 
 static struct clk exynos5_clk_mdma1 = {
@@ -813,7 +813,7 @@
 static struct clksrc_clk exynos5_clk_sclk_mmc0 = {
 	.clk	= {
 		.name		= "sclk_mmc",
-		.devname	= "s3c-sdhci.0",
+		.devname	= "exynos4-sdhci.0",
 		.parent		= &exynos5_clk_dout_mmc0.clk,
 		.enable		= exynos5_clksrc_mask_fsys_ctrl,
 		.ctrlbit	= (1 << 0),
@@ -824,7 +824,7 @@
 static struct clksrc_clk exynos5_clk_sclk_mmc1 = {
 	.clk	= {
 		.name		= "sclk_mmc",
-		.devname	= "s3c-sdhci.1",
+		.devname	= "exynos4-sdhci.1",
 		.parent		= &exynos5_clk_dout_mmc1.clk,
 		.enable		= exynos5_clksrc_mask_fsys_ctrl,
 		.ctrlbit	= (1 << 4),
@@ -835,7 +835,7 @@
 static struct clksrc_clk exynos5_clk_sclk_mmc2 = {
 	.clk	= {
 		.name		= "sclk_mmc",
-		.devname	= "s3c-sdhci.2",
+		.devname	= "exynos4-sdhci.2",
 		.parent		= &exynos5_clk_dout_mmc2.clk,
 		.enable		= exynos5_clksrc_mask_fsys_ctrl,
 		.ctrlbit	= (1 << 8),
@@ -846,7 +846,7 @@
 static struct clksrc_clk exynos5_clk_sclk_mmc3 = {
 	.clk	= {
 		.name		= "sclk_mmc",
-		.devname	= "s3c-sdhci.3",
+		.devname	= "exynos4-sdhci.3",
 		.parent		= &exynos5_clk_dout_mmc3.clk,
 		.enable		= exynos5_clksrc_mask_fsys_ctrl,
 		.ctrlbit	= (1 << 12),
@@ -990,10 +990,10 @@
 	CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos5_clk_sclk_uart1.clk),
 	CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos5_clk_sclk_uart2.clk),
 	CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos5_clk_sclk_uart3.clk),
-	CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk),
-	CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk),
-	CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk),
-	CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk),
+	CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk),
+	CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk),
+	CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk),
+	CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk),
 	CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos5_clk_pdma0),
 	CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos5_clk_pdma1),
 	CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos5_clk_mdma1),
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 8614aab..5ccd6e8 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -326,6 +326,11 @@
 	s3c_fimc_setname(2, "exynos4-fimc");
 	s3c_fimc_setname(3, "exynos4-fimc");
 
+	s3c_sdhci_setname(0, "exynos4-sdhci");
+	s3c_sdhci_setname(1, "exynos4-sdhci");
+	s3c_sdhci_setname(2, "exynos4-sdhci");
+	s3c_sdhci_setname(3, "exynos4-sdhci");
+
 	/* The I2C bus controllers are directly compatible with s3c2440 */
 	s3c_i2c0_setname("s3c2440-i2c");
 	s3c_i2c1_setname("s3c2440-i2c");
@@ -344,6 +349,11 @@
 	s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
 	s3c_device_i2c0.resource[1].end   = EXYNOS5_IRQ_IIC;
 
+	s3c_sdhci_setname(0, "exynos4-sdhci");
+	s3c_sdhci_setname(1, "exynos4-sdhci");
+	s3c_sdhci_setname(2, "exynos4-sdhci");
+	s3c_sdhci_setname(3, "exynos4-sdhci");
+
 	/* The I2C bus controllers are directly compatible with s3c2440 */
 	s3c_i2c0_setname("s3c2440-i2c");
 	s3c_i2c1_setname("s3c2440-i2c");
@@ -537,7 +547,9 @@
 {
 	int irq;
 
-	gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
+#ifdef CONFIG_OF
+	of_irq_init(exynos4_dt_irq_match);
+#endif
 
 	for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
 		combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
diff --git a/arch/arm/mach-exynos/dev-dwmci.c b/arch/arm/mach-exynos/dev-dwmci.c
index b025db4..7903501 100644
--- a/arch/arm/mach-exynos/dev-dwmci.c
+++ b/arch/arm/mach-exynos/dev-dwmci.c
@@ -16,6 +16,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
+#include <linux/ioport.h>
 #include <linux/mmc/dw_mmc.h>
 
 #include <plat/devs.h>
@@ -33,16 +34,8 @@
 }
 
 static struct resource exynos4_dwmci_resource[] = {
-	[0] = {
-		.start	= EXYNOS4_PA_DWMCI,
-		.end	= EXYNOS4_PA_DWMCI + SZ_4K - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= IRQ_DWMCI,
-		.end	= IRQ_DWMCI,
-		.flags	= IORESOURCE_IRQ,
-	}
+	[0] = DEFINE_RES_MEM(EXYNOS4_PA_DWMCI, SZ_4K),
+	[1] = DEFINE_RES_IRQ(EXYNOS4_IRQ_DWMCI),
 };
 
 static struct dw_mci_board exynos4_dwci_pdata = {
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index b4f1f90..ed90aef 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -112,6 +112,7 @@
 	.host_caps		= (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
 				MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
 				MMC_CAP_ERASE),
+	.host_caps2		= MMC_CAP2_BROKEN_VOLTAGE,
 	.cd_type		= S3C_SDHCI_CD_PERMANENT,
 	.clk_type		= S3C_SDHCI_CLK_DIV_EXTERNAL,
 };
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 7ebf79c..a34036e 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -40,6 +40,7 @@
 #include <plat/pd.h>
 #include <plat/regs-fb-v4.h>
 #include <plat/fimc-core.h>
+#include <plat/s5p-time.h>
 #include <plat/camport.h>
 #include <plat/mipi_csis.h>
 
@@ -747,6 +748,7 @@
 	.max_width		= 8,
 	.host_caps		= (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA |
 				MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED),
+	.host_caps2		= MMC_CAP2_BROKEN_VOLTAGE,
 	.cd_type		= S3C_SDHCI_CD_PERMANENT,
 	.clk_type		= S3C_SDHCI_CLK_DIV_EXTERNAL,
 };
@@ -1062,6 +1064,7 @@
 	exynos_init_io(NULL, 0);
 	s3c24xx_init_clocks(24000000);
 	s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
+	s5p_set_timer_source(S5P_PWM2, S5P_PWM4);
 }
 
 static void s5p_tv_setup(void)
@@ -1112,7 +1115,7 @@
 	.map_io		= universal_map_io,
 	.handle_irq	= gic_handle_irq,
 	.init_machine	= universal_machine_init,
-	.timer		= &exynos4_timer,
+	.timer		= &s5p_timer,
 	.reserve        = &universal_reserve,
 	.restart	= exynos4_restart,
 MACHINE_END
diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c
index 32321f6..5cec256 100644
--- a/arch/arm/mach-footbridge/cats-pci.c
+++ b/arch/arm/mach-footbridge/cats-pci.c
@@ -16,6 +16,11 @@
 /* cats host-specific stuff */
 static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
 
+static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
+{
+	return 0;
+}
+
 static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
 	if (dev->irq >= 255)
@@ -39,11 +44,11 @@
  * cards being used (ie, pci-pci bridge based cards)?
  */
 static struct hw_pci cats_pci __initdata = {
-	.swizzle		= NULL,
+	.swizzle		= cats_no_swizzle,
 	.map_irq		= cats_map_irq,
 	.nr_controllers		= 1,
+	.ops			= &dc21285_ops,
 	.setup			= dc21285_setup,
-	.scan			= dc21285_scan_bus,
 	.preinit		= dc21285_preinit,
 	.postinit		= dc21285_postinit,
 };
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index e17e11d..9d62e33 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -129,7 +129,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static struct pci_ops dc21285_ops = {
+struct pci_ops dc21285_ops = {
 	.read	= dc21285_read_config,
 	.write	= dc21285_write_config,
 };
@@ -284,11 +284,6 @@
 	return 1;
 }
 
-struct pci_bus * __init dc21285_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, 0, &dc21285_ops, sys, &sys->resources);
-}
-
 #define dc21285_request_irq(_a, _b, _c, _d, _e) \
 	WARN_ON(request_irq(_a, _b, _c, _d, _e) < 0)
 
diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c
index 511c673..fd12d8a 100644
--- a/arch/arm/mach-footbridge/ebsa285-pci.c
+++ b/arch/arm/mach-footbridge/ebsa285-pci.c
@@ -29,11 +29,10 @@
 }
 
 static struct hw_pci ebsa285_pci __initdata = {
-	.swizzle		= pci_std_swizzle,
 	.map_irq		= ebsa285_map_irq,
 	.nr_controllers		= 1,
+	.ops			= &dc21285_ops,
 	.setup			= dc21285_setup,
-	.scan			= dc21285_scan_bus,
 	.preinit		= dc21285_preinit,
 	.postinit		= dc21285_postinit,
 };
diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c
index 6218761..0fba513 100644
--- a/arch/arm/mach-footbridge/netwinder-pci.c
+++ b/arch/arm/mach-footbridge/netwinder-pci.c
@@ -43,11 +43,10 @@
 }
 
 static struct hw_pci netwinder_pci __initdata = {
-	.swizzle		= pci_std_swizzle,
 	.map_irq		= netwinder_map_irq,
 	.nr_controllers		= 1,
+	.ops			= &dc21285_ops,
 	.setup			= dc21285_setup,
-	.scan			= dc21285_scan_bus,
 	.preinit		= dc21285_preinit,
 	.postinit		= dc21285_postinit,
 };
diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c
index aeb651d..5c9ee54 100644
--- a/arch/arm/mach-footbridge/personal-pci.c
+++ b/arch/arm/mach-footbridge/personal-pci.c
@@ -41,8 +41,8 @@
 static struct hw_pci personal_server_pci __initdata = {
 	.map_irq		= personal_server_map_irq,
 	.nr_controllers		= 1,
+	.ops			= &dc21285_ops,
 	.setup			= dc21285_setup,
-	.scan			= dc21285_scan_bus,
 	.preinit		= dc21285_preinit,
 	.postinit		= dc21285_postinit,
 };
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 3e538da..e428f3a 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -398,24 +398,16 @@
 		struct impd1_device *idev = impd1_devs + i;
 		struct amba_device *d;
 		unsigned long pc_base;
+		char devname[32];
 
 		pc_base = dev->resource.start + idev->offset;
-
-		d = amba_device_alloc(NULL, pc_base, SZ_4K);
-		if (!d)
+		snprintf(devname, 32, "lm%x:%5.5lx", dev->id, idev->offset >> 12);
+		d = amba_ahb_device_add(&dev->dev, devname, pc_base, SZ_4K,
+					dev->irq, dev->irq,
+					idev->platform_data, idev->id);
+		if (IS_ERR(d)) {
+			dev_err(&dev->dev, "unable to register device: %ld\n", PTR_ERR(d));
 			continue;
-
-		dev_set_name(&d->dev, "lm%x:%5.5lx", dev->id, idev->offset >> 12);
-		d->dev.parent	= &dev->dev;
-		d->irq[0]	= dev->irq;
-		d->irq[1]	= dev->irq;
-		d->periphid	= idev->id;
-		d->dev.platform_data = idev->platform_data;
-
-		ret = amba_device_add(d, &dev->resource);
-		if (ret) {
-			dev_err(&d->dev, "unable to register device: %d\n", ret);
-			amba_device_put(d);
 		}
 	}
 
diff --git a/arch/arm/mach-integrator/include/mach/entry-macro.S b/arch/arm/mach-integrator/include/mach/entry-macro.S
deleted file mode 100644
index 5cc7b85..0000000
--- a/arch/arm/mach-integrator/include/mach/entry-macro.S
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * arch/arm/mach-integrator/include/mach/entry-macro.S
- *
- * Low-level IRQ helper macros for Integrator platforms
- *
- * This file is licensed under  the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#include <mach/hardware.h>
-#include <mach/platform.h>
-#include <mach/irqs.h>
-
-		.macro  get_irqnr_preamble, base, tmp
-		.endm
-
-		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp
-/* FIXME: should not be using soo many LDRs here */
-		ldr	\base, =IO_ADDRESS(INTEGRATOR_IC_BASE)
-		mov	\irqnr, #IRQ_PIC_START
-		ldr	\irqstat, [\base, #IRQ_STATUS]		@ get masked status
-		ldr	\base, =IO_ADDRESS(INTEGRATOR_HDR_BASE)
-		teq	\irqstat, #0
-		ldreq	\irqstat, [\base, #(INTEGRATOR_HDR_IC_OFFSET+IRQ_STATUS)]
-		moveq	\irqnr, #IRQ_CIC_START
-
-1001:		tst	\irqstat, #15
-		bne	1002f
-		add	\irqnr, \irqnr, #4
-		movs	\irqstat, \irqstat, lsr #4
-		bne	1001b
-1002:		tst	\irqstat, #1
-		bne	1003f
-		add	\irqnr, \irqnr, #1
-		movs	\irqstat, \irqstat, lsr #1
-		bne	1002b
-1003:		/* EQ will be set if no irqs pending */
-		.endm
-
diff --git a/arch/arm/mach-integrator/include/mach/irqs.h b/arch/arm/mach-integrator/include/mach/irqs.h
index a19a1a2..7371018 100644
--- a/arch/arm/mach-integrator/include/mach/irqs.h
+++ b/arch/arm/mach-integrator/include/mach/irqs.h
@@ -22,37 +22,37 @@
 /* 
  *  Interrupt numbers
  */
-#define IRQ_PIC_START			0
-#define IRQ_SOFTINT			0
-#define IRQ_UARTINT0			1
-#define IRQ_UARTINT1			2
-#define IRQ_KMIINT0			3
-#define IRQ_KMIINT1			4
-#define IRQ_TIMERINT0			5
-#define IRQ_TIMERINT1			6
-#define IRQ_TIMERINT2			7
-#define IRQ_RTCINT			8
-#define IRQ_AP_EXPINT0			9
-#define IRQ_AP_EXPINT1			10
-#define IRQ_AP_EXPINT2			11
-#define IRQ_AP_EXPINT3			12
-#define IRQ_AP_PCIINT0			13
-#define IRQ_AP_PCIINT1			14
-#define IRQ_AP_PCIINT2			15
-#define IRQ_AP_PCIINT3			16
-#define IRQ_AP_V3INT			17
-#define IRQ_AP_CPINT0			18
-#define IRQ_AP_CPINT1			19
-#define IRQ_AP_LBUSTIMEOUT 		20
-#define IRQ_AP_APCINT			21
-#define IRQ_CP_CLCDCINT			22
-#define IRQ_CP_MMCIINT0			23
-#define IRQ_CP_MMCIINT1			24
-#define IRQ_CP_AACIINT			25
-#define IRQ_CP_CPPLDINT			26
-#define IRQ_CP_ETHINT			27
-#define IRQ_CP_TSPENINT			28
-#define IRQ_PIC_END			31
+#define IRQ_PIC_START			1
+#define IRQ_SOFTINT			1
+#define IRQ_UARTINT0			2
+#define IRQ_UARTINT1			3
+#define IRQ_KMIINT0			4
+#define IRQ_KMIINT1			5
+#define IRQ_TIMERINT0			6
+#define IRQ_TIMERINT1			7
+#define IRQ_TIMERINT2			8
+#define IRQ_RTCINT			9
+#define IRQ_AP_EXPINT0			10
+#define IRQ_AP_EXPINT1			11
+#define IRQ_AP_EXPINT2			12
+#define IRQ_AP_EXPINT3			13
+#define IRQ_AP_PCIINT0			14
+#define IRQ_AP_PCIINT1			15
+#define IRQ_AP_PCIINT2			16
+#define IRQ_AP_PCIINT3			17
+#define IRQ_AP_V3INT			18
+#define IRQ_AP_CPINT0			19
+#define IRQ_AP_CPINT1			20
+#define IRQ_AP_LBUSTIMEOUT 		21
+#define IRQ_AP_APCINT			22
+#define IRQ_CP_CLCDCINT			23
+#define IRQ_CP_MMCIINT0			24
+#define IRQ_CP_MMCIINT1			25
+#define IRQ_CP_AACIINT			26
+#define IRQ_CP_CPPLDINT			27
+#define IRQ_CP_ETHINT			28
+#define IRQ_CP_TSPENINT			29
+#define IRQ_PIC_END			29
 
 #define IRQ_CIC_START			32
 #define IRQ_CM_SOFTINT			32
@@ -80,4 +80,3 @@
 
 #define NR_IRQS_INTEGRATOR_AP		34
 #define NR_IRQS_INTEGRATOR_CP		47
-
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 871f148..c857501 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -162,12 +162,6 @@
 
 #define INTEGRATOR_SC_VALID_INT	0x003fffff
 
-static struct fpga_irq_data sc_irq_data = {
-	.base		= VA_IC_BASE,
-	.irq_start	= 0,
-	.chip.name	= "SC",
-};
-
 static void __init ap_init_irq(void)
 {
 	/* Disable all interrupts initially. */
@@ -178,7 +172,8 @@
 	writel(-1, VA_IC_BASE + IRQ_ENABLE_CLEAR);
 	writel(-1, VA_IC_BASE + FIQ_ENABLE_CLEAR);
 
-	fpga_irq_init(-1, INTEGRATOR_SC_VALID_INT, &sc_irq_data);
+	fpga_irq_init(VA_IC_BASE, "SC", IRQ_PIC_START,
+		-1, INTEGRATOR_SC_VALID_INT, NULL);
 }
 
 #ifdef CONFIG_PM
@@ -478,6 +473,7 @@
 	.nr_irqs	= NR_IRQS_INTEGRATOR_AP,
 	.init_early	= integrator_init_early,
 	.init_irq	= ap_init_irq,
+	.handle_irq	= fpga_handle_irq,
 	.timer		= &ap_timer,
 	.init_machine	= ap_init,
 	.restart	= integrator_restart,
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 48a115a..a56c536 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -143,30 +143,14 @@
 	iotable_init(intcp_io_desc, ARRAY_SIZE(intcp_io_desc));
 }
 
-static struct fpga_irq_data cic_irq_data = {
-	.base		= INTCP_VA_CIC_BASE,
-	.irq_start	= IRQ_CIC_START,
-	.chip.name	= "CIC",
-};
-
-static struct fpga_irq_data pic_irq_data = {
-	.base		= INTCP_VA_PIC_BASE,
-	.irq_start	= IRQ_PIC_START,
-	.chip.name	= "PIC",
-};
-
-static struct fpga_irq_data sic_irq_data = {
-	.base		= INTCP_VA_SIC_BASE,
-	.irq_start	= IRQ_SIC_START,
-	.chip.name	= "SIC",
-};
-
 static void __init intcp_init_irq(void)
 {
-	u32 pic_mask, sic_mask;
+	u32 pic_mask, cic_mask, sic_mask;
 
+	/* These masks are for the HW IRQ registers */
 	pic_mask = ~((~0u) << (11 - IRQ_PIC_START));
 	pic_mask |= (~((~0u) << (29 - 22))) << 22;
+	cic_mask = ~((~0u) << (1 + IRQ_CIC_END - IRQ_CIC_START));
 	sic_mask = ~((~0u) << (1 + IRQ_SIC_END - IRQ_SIC_START));
 
 	/*
@@ -179,12 +163,14 @@
 	writel(sic_mask, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
 	writel(sic_mask, INTCP_VA_SIC_BASE + FIQ_ENABLE_CLEAR);
 
-	fpga_irq_init(-1, pic_mask, &pic_irq_data);
+	fpga_irq_init(INTCP_VA_PIC_BASE, "PIC", IRQ_PIC_START,
+		      -1, pic_mask, NULL);
 
-	fpga_irq_init(-1, ~((~0u) << (1 + IRQ_CIC_END - IRQ_CIC_START)),
-		&cic_irq_data);
+	fpga_irq_init(INTCP_VA_CIC_BASE, "CIC", IRQ_CIC_START,
+		      -1, cic_mask, NULL);
 
-	fpga_irq_init(IRQ_CP_CPPLDINT, sic_mask, &sic_irq_data);
+	fpga_irq_init(INTCP_VA_SIC_BASE, "SIC", IRQ_SIC_START,
+		      IRQ_CP_CPPLDINT, sic_mask, NULL);
 }
 
 /*
@@ -467,6 +453,7 @@
 	.nr_irqs	= NR_IRQS_INTEGRATOR_CP,
 	.init_early	= intcp_init_early,
 	.init_irq	= intcp_init_irq,
+	.handle_irq	= fpga_handle_irq,
 	.timer		= &cp_timer,
 	.init_machine	= intcp_init,
 	.restart	= integrator_restart,
diff --git a/arch/arm/mach-integrator/pci.c b/arch/arm/mach-integrator/pci.c
index f1ca9c1..6c1667e 100644
--- a/arch/arm/mach-integrator/pci.c
+++ b/arch/arm/mach-integrator/pci.c
@@ -70,21 +70,10 @@
  */
 static u8 __init integrator_swizzle(struct pci_dev *dev, u8 *pinp)
 {
-	int pin = *pinp;
+	if (*pinp == 0)
+		*pinp = 1;
 
-	if (pin == 0)
-		pin = 1;
-
-	while (dev->bus->self) {
-		pin = pci_swizzle_interrupt_pin(dev, pin);
-		/*
-		 * move up the chain of bridges, swizzling as we go.
-		 */
-		dev = dev->bus->self;
-	}
-	*pinp = pin;
-
-	return PCI_SLOT(dev->devfn);
+	return pci_common_swizzle(dev, pinp);
 }
 
 static int irq_tab[4] __initdata = {
@@ -109,7 +98,7 @@
 	.map_irq		= integrator_map_irq,
 	.setup			= pci_v3_setup,
 	.nr_controllers		= 1,
-	.scan			= pci_v3_scan_bus,
+	.ops			= &pci_v3_ops,
 	.preinit		= pci_v3_preinit,
 	.postinit		= pci_v3_postinit,
 };
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index 67e6f9a..b866880 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -340,7 +340,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static struct pci_ops pci_v3_ops = {
+struct pci_ops pci_v3_ops = {
 	.read	= v3_read_config,
 	.write	= v3_write_config,
 };
@@ -488,12 +488,6 @@
 	return ret;
 }
 
-struct pci_bus * __init pci_v3_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, sys->busnr, &pci_v3_ops, sys,
-				 &sys->resources);
-}
-
 /*
  * V3_LB_BASE? - local bus address
  * V3_LB_MAP?  - pci bus address
diff --git a/arch/arm/mach-iop13xx/iq81340mc.c b/arch/arm/mach-iop13xx/iq81340mc.c
index 5c96b73..e3f3e7d 100644
--- a/arch/arm/mach-iop13xx/iq81340mc.c
+++ b/arch/arm/mach-iop13xx/iq81340mc.c
@@ -54,7 +54,6 @@
 }
 
 static struct hw_pci iq81340mc_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 0,
 	.setup		= iop13xx_pci_setup,
 	.map_irq	= iq81340mc_pcix_map_irq,
diff --git a/arch/arm/mach-iop13xx/iq81340sc.c b/arch/arm/mach-iop13xx/iq81340sc.c
index aa4dd75..060cddd 100644
--- a/arch/arm/mach-iop13xx/iq81340sc.c
+++ b/arch/arm/mach-iop13xx/iq81340sc.c
@@ -56,7 +56,6 @@
 }
 
 static struct hw_pci iq81340sc_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 0,
 	.setup		= iop13xx_pci_setup,
 	.scan		= iop13xx_scan_bus,
diff --git a/arch/arm/mach-iop32x/em7210.c b/arch/arm/mach-iop32x/em7210.c
index 24069e0..9f369f0 100644
--- a/arch/arm/mach-iop32x/em7210.c
+++ b/arch/arm/mach-iop32x/em7210.c
@@ -103,11 +103,10 @@
 }
 
 static struct hw_pci em7210_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 1,
+	.ops		= &iop3xx_ops,
 	.setup		= iop3xx_pci_setup,
 	.preinit	= iop3xx_pci_preinit,
-	.scan		= iop3xx_pci_scan_bus,
 	.map_irq	= em7210_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-iop32x/glantank.c b/arch/arm/mach-iop32x/glantank.c
index 204e1d1..c15a100 100644
--- a/arch/arm/mach-iop32x/glantank.c
+++ b/arch/arm/mach-iop32x/glantank.c
@@ -96,11 +96,10 @@
 }
 
 static struct hw_pci glantank_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 1,
+	.ops		= &iop3xx_ops,
 	.setup		= iop3xx_pci_setup,
 	.preinit	= iop3xx_pci_preinit,
-	.scan		= iop3xx_pci_scan_bus,
 	.map_irq	= glantank_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-iop32x/iq31244.c b/arch/arm/mach-iop32x/iq31244.c
index 3eb642a..ddd1c7e 100644
--- a/arch/arm/mach-iop32x/iq31244.c
+++ b/arch/arm/mach-iop32x/iq31244.c
@@ -130,11 +130,10 @@
 }
 
 static struct hw_pci ep80219_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 1,
+	.ops		= &iop3xx_ops,
 	.setup		= iop3xx_pci_setup,
 	.preinit	= iop3xx_pci_preinit,
-	.scan		= iop3xx_pci_scan_bus,
 	.map_irq	= ep80219_pci_map_irq,
 };
 
@@ -166,11 +165,10 @@
 }
 
 static struct hw_pci iq31244_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 1,
+	.ops		= &iop3xx_ops,
 	.setup		= iop3xx_pci_setup,
 	.preinit	= iop3xx_pci_preinit,
-	.scan		= iop3xx_pci_scan_bus,
 	.map_irq	= iq31244_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-iop32x/iq80321.c b/arch/arm/mach-iop32x/iq80321.c
index 2ec724b..bf155e6 100644
--- a/arch/arm/mach-iop32x/iq80321.c
+++ b/arch/arm/mach-iop32x/iq80321.c
@@ -101,11 +101,10 @@
 }
 
 static struct hw_pci iq80321_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 1,
+	.ops		= &iop3xx_ops,
 	.setup		= iop3xx_pci_setup,
 	.preinit	= iop3xx_pci_preinit_cond,
-	.scan		= iop3xx_pci_scan_bus,
 	.map_irq	= iq80321_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 6b6d559..5a7ae91 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -114,11 +114,10 @@
 }
 
 static struct hw_pci n2100_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 1,
+	.ops		= &iop3xx_ops,
 	.setup		= iop3xx_pci_setup,
 	.preinit	= iop3xx_pci_preinit,
-	.scan		= iop3xx_pci_scan_bus,
 	.map_irq	= n2100_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-iop33x/iq80331.c b/arch/arm/mach-iop33x/iq80331.c
index abce934..e74a7de 100644
--- a/arch/arm/mach-iop33x/iq80331.c
+++ b/arch/arm/mach-iop33x/iq80331.c
@@ -84,11 +84,10 @@
 }
 
 static struct hw_pci iq80331_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 1,
+	.ops		= &iop3xx_ops,
 	.setup		= iop3xx_pci_setup,
 	.preinit	= iop3xx_pci_preinit_cond,
-	.scan		= iop3xx_pci_scan_bus,
 	.map_irq	= iq80331_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-iop33x/iq80332.c b/arch/arm/mach-iop33x/iq80332.c
index 7513559..e2f5bee 100644
--- a/arch/arm/mach-iop33x/iq80332.c
+++ b/arch/arm/mach-iop33x/iq80332.c
@@ -84,11 +84,10 @@
 }
 
 static struct hw_pci iq80332_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.nr_controllers = 1,
+	.ops		= &iop3xx_ops,
 	.setup		= iop3xx_pci_setup,
 	.preinit	= iop3xx_pci_preinit_cond,
-	.scan		= iop3xx_pci_scan_bus,
 	.map_irq	= iq80332_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp2000/enp2611.c b/arch/arm/mach-ixp2000/enp2611.c
index 4867f40..73df2f6 100644
--- a/arch/arm/mach-ixp2000/enp2611.c
+++ b/arch/arm/mach-ixp2000/enp2611.c
@@ -141,13 +141,6 @@
 	.write  = enp2611_pci_write_config
 };
 
-static struct pci_bus * __init enp2611_pci_scan_bus(int nr,
-						struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, sys->busnr, &enp2611_pci_ops, sys,
-				 &sys->resources);
-}
-
 static int __init enp2611_pci_map_irq(const struct pci_dev *dev, u8 slot,
 	u8 pin)
 {
@@ -180,9 +173,9 @@
 
 struct hw_pci enp2611_pci __initdata = {
 	.nr_controllers	= 1,
+	.ops		= &enp2611_pci_ops,
 	.setup		= enp2611_pci_setup,
 	.preinit	= enp2611_pci_preinit,
-	.scan		= enp2611_pci_scan_bus,
 	.map_irq	= enp2611_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp2000/include/mach/platform.h b/arch/arm/mach-ixp2000/include/mach/platform.h
index bb0f8dc..6b500c0 100644
--- a/arch/arm/mach-ixp2000/include/mach/platform.h
+++ b/arch/arm/mach-ixp2000/include/mach/platform.h
@@ -127,10 +127,10 @@
 
 struct pci_sys_data;
 
+extern struct pci_ops ixp2000_pci_ops;
 u32 *ixp2000_pci_config_addr(unsigned int bus, unsigned int devfn, int where);
 void ixp2000_pci_preinit(void);
 int ixp2000_pci_setup(int, struct pci_sys_data*);
-struct pci_bus* ixp2000_pci_scan_bus(int, struct pci_sys_data*);
 int ixp2000_pci_read_config(struct pci_bus*, unsigned int, int, int, u32 *);
 int ixp2000_pci_write_config(struct pci_bus*, unsigned int, int, int, u32);
 
diff --git a/arch/arm/mach-ixp2000/ixdp2400.c b/arch/arm/mach-ixp2000/ixdp2400.c
index 915ad49..4ec4480 100644
--- a/arch/arm/mach-ixp2000/ixdp2400.c
+++ b/arch/arm/mach-ixp2000/ixdp2400.c
@@ -146,10 +146,10 @@
 
 static struct hw_pci ixdp2400_pci __initdata = {
 	.nr_controllers	= 1,
+	.ops		= &ixp2000_pci_ops,
 	.setup		= ixdp2400_pci_setup,
 	.preinit	= ixdp2400_pci_preinit,
 	.postinit	= ixdp2400_pci_postinit,
-	.scan		= ixp2000_pci_scan_bus,
 	.map_irq	= ixdp2400_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp2000/ixdp2800.c b/arch/arm/mach-ixp2000/ixdp2800.c
index a9f1819..44378c3 100644
--- a/arch/arm/mach-ixp2000/ixdp2800.c
+++ b/arch/arm/mach-ixp2000/ixdp2800.c
@@ -246,10 +246,10 @@
 
 struct __initdata hw_pci ixdp2800_pci __initdata = {
 	.nr_controllers	= 1,
+	.ops		= &ixp2000_pci_ops,
 	.setup		= ixdp2800_pci_setup,
 	.preinit	= ixdp2800_pci_preinit,
 	.postinit	= ixdp2800_pci_postinit,
-	.scan		= ixp2000_pci_scan_bus,
 	.map_irq	= ixdp2800_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp2000/ixdp2x01.c b/arch/arm/mach-ixp2000/ixdp2x01.c
index 5196c39..af8b801 100644
--- a/arch/arm/mach-ixp2000/ixdp2x01.c
+++ b/arch/arm/mach-ixp2000/ixdp2x01.c
@@ -327,9 +327,9 @@
 
 struct hw_pci ixdp2x01_pci __initdata = {
 	.nr_controllers	= 1,
+	.ops		= &ixp2000_pci_ops,
 	.setup		= ixdp2x01_pci_setup,
 	.preinit	= ixdp2x01_pci_preinit,
-	.scan		= ixp2000_pci_scan_bus,
 	.map_irq	= ixdp2x01_pci_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp2000/pci.c b/arch/arm/mach-ixp2000/pci.c
index 9c02de9..d706838 100644
--- a/arch/arm/mach-ixp2000/pci.c
+++ b/arch/arm/mach-ixp2000/pci.c
@@ -124,17 +124,11 @@
 }
 
 
-static struct pci_ops ixp2000_pci_ops = {
+struct pci_ops ixp2000_pci_ops = {
 	.read	= ixp2000_pci_read_config,
 	.write	= ixp2000_pci_write_config
 };
 
-struct pci_bus *ixp2000_pci_scan_bus(int nr, struct pci_sys_data *sysdata)
-{
-	return pci_scan_root_bus(NULL, sysdata->busnr, &ixp2000_pci_ops,
-				 sysdata, &sysdata->resources);
-}
-
 
 int ixp2000_pci_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
diff --git a/arch/arm/mach-ixp23xx/include/mach/platform.h b/arch/arm/mach-ixp23xx/include/mach/platform.h
index 50de558..798d8b4 100644
--- a/arch/arm/mach-ixp23xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp23xx/include/mach/platform.h
@@ -37,7 +37,7 @@
 void ixp23xx_restart(char, const char *);
 int ixp23xx_pci_setup(int, struct pci_sys_data *);
 void ixp23xx_pci_preinit(void);
-struct pci_bus *ixp23xx_pci_scan_bus(int, struct pci_sys_data*);
+extern struct pci_ops ixp23xx_pci_ops;
 void ixp23xx_pci_slave_init(void);
 
 extern struct sys_timer ixp23xx_timer;
diff --git a/arch/arm/mach-ixp23xx/ixdp2351.c b/arch/arm/mach-ixp23xx/ixdp2351.c
index b0e07db..8b48e32 100644
--- a/arch/arm/mach-ixp23xx/ixdp2351.c
+++ b/arch/arm/mach-ixp23xx/ixdp2351.c
@@ -251,9 +251,9 @@
 
 struct hw_pci ixdp2351_pci __initdata = {
 	.nr_controllers	= 1,
+	.ops		= &ixp23xx_pci_ops,
 	.preinit	= ixp23xx_pci_preinit,
 	.setup		= ixp23xx_pci_setup,
-	.scan		= ixp23xx_pci_scan_bus,
 	.map_irq	= ixdp2351_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp23xx/pci.c b/arch/arm/mach-ixp23xx/pci.c
index 911f5a5..9211506 100644
--- a/arch/arm/mach-ixp23xx/pci.c
+++ b/arch/arm/mach-ixp23xx/pci.c
@@ -140,12 +140,6 @@
 	.write	= ixp23xx_pci_write_config,
 };
 
-struct pci_bus *ixp23xx_pci_scan_bus(int nr, struct pci_sys_data *sysdata)
-{
-	return pci_scan_root_bus(NULL, sysdata->busnr, &ixp23xx_pci_ops,
-				 sysdata, &sysdata->resources);
-}
-
 int ixp23xx_pci_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
 	volatile unsigned long temp;
diff --git a/arch/arm/mach-ixp23xx/roadrunner.c b/arch/arm/mach-ixp23xx/roadrunner.c
index eaaa3fa..8c0e5de 100644
--- a/arch/arm/mach-ixp23xx/roadrunner.c
+++ b/arch/arm/mach-ixp23xx/roadrunner.c
@@ -118,9 +118,9 @@
 
 static struct hw_pci roadrunner_pci __initdata = {
 	.nr_controllers	= 1,
+	.ops		= &ixp23xx_pci_ops,
 	.preinit	= roadrunner_pci_preinit,
 	.setup		= ixp23xx_pci_setup,
-	.scan		= ixp23xx_pci_scan_bus,
 	.map_irq	= roadrunner_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/avila-pci.c b/arch/arm/mach-ixp4xx/avila-pci.c
index 8fea0a3..548c7d4 100644
--- a/arch/arm/mach-ixp4xx/avila-pci.c
+++ b/arch/arm/mach-ixp4xx/avila-pci.c
@@ -65,10 +65,9 @@
 
 struct hw_pci avila_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit	= avila_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
 	.map_irq	= avila_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index d5719eb..1694f01 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -480,12 +480,6 @@
 	return 1;
 }
 
-struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, sys->busnr, &ixp4xx_ops, sys,
-				 &sys->resources);
-}
-
 int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
 	if (mask >= SZ_64M - 1)
diff --git a/arch/arm/mach-ixp4xx/coyote-pci.c b/arch/arm/mach-ixp4xx/coyote-pci.c
index 71f5c9c..5d14ce2 100644
--- a/arch/arm/mach-ixp4xx/coyote-pci.c
+++ b/arch/arm/mach-ixp4xx/coyote-pci.c
@@ -48,10 +48,9 @@
 
 struct hw_pci coyote_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit =        coyote_pci_preinit,
-	.swizzle =        pci_std_swizzle,
 	.setup =          ixp4xx_setup,
-	.scan =           ixp4xx_scan_bus,
 	.map_irq =        coyote_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/dsmg600-pci.c b/arch/arm/mach-ixp4xx/dsmg600-pci.c
index 0532510..8dca769 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-pci.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-pci.c
@@ -62,10 +62,9 @@
 
 struct hw_pci __initdata dsmg600_pci = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit	= dsmg600_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
 	.map_irq	= dsmg600_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/fsg-pci.c b/arch/arm/mach-ixp4xx/fsg-pci.c
index d2ac803..fd4a862 100644
--- a/arch/arm/mach-ixp4xx/fsg-pci.c
+++ b/arch/arm/mach-ixp4xx/fsg-pci.c
@@ -59,10 +59,9 @@
 
 struct hw_pci fsg_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit =	  fsg_pci_preinit,
-	.swizzle =	  pci_std_swizzle,
 	.setup =	  ixp4xx_setup,
-	.scan =		  ixp4xx_scan_bus,
 	.map_irq =	  fsg_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/gateway7001-pci.c b/arch/arm/mach-ixp4xx/gateway7001-pci.c
index 76581fb..d9d6cc0 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-pci.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-pci.c
@@ -47,10 +47,9 @@
 
 struct hw_pci gateway7001_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit =        gateway7001_pci_preinit,
-	.swizzle =        pci_std_swizzle,
 	.setup =          ixp4xx_setup,
-	.scan =           ixp4xx_scan_bus,
 	.map_irq =        gateway7001_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index 46bb924..b800a03 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -473,11 +473,10 @@
 
 static struct hw_pci gmlr_hw_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit	= gmlr_pci_preinit,
 	.postinit	= gmlr_pci_postinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
 	.map_irq	= gmlr_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-pci.c b/arch/arm/mach-ixp4xx/gtwx5715-pci.c
index d68fc06..551d114 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-pci.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-pci.c
@@ -67,10 +67,9 @@
 
 struct hw_pci gtwx5715_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit =        gtwx5715_pci_preinit,
-	.swizzle =        pci_std_swizzle,
 	.setup =          ixp4xx_setup,
-	.scan =           ixp4xx_scan_bus,
 	.map_irq =        gtwx5715_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
index 292d55e..cf03614 100644
--- a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
@@ -75,4 +75,7 @@
 #define TX_SNAPSHOT_LOCKED (1<<0)
 #define RX_SNAPSHOT_LOCKED (1<<1)
 
+/* The ptp_ixp46x module will set this variable */
+extern int ixp46x_phc_index;
+
 #endif
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h
index b66bedc..5bce94a 100644
--- a/arch/arm/mach-ixp4xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h
@@ -130,7 +130,7 @@
 extern void ixp4xx_pci_preinit(void);
 struct pci_sys_data;
 extern int ixp4xx_setup(int nr, struct pci_sys_data *sys);
-extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys);
+extern struct pci_ops ixp4xx_ops;
 
 /*
  * GPIO-functions
diff --git a/arch/arm/mach-ixp4xx/ixdp425-pci.c b/arch/arm/mach-ixp4xx/ixdp425-pci.c
index fffd8c5..318424d 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-pci.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-pci.c
@@ -60,10 +60,9 @@
 
 struct hw_pci ixdp425_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit	= ixdp425_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
 	.map_irq	= ixdp425_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/ixdpg425-pci.c b/arch/arm/mach-ixp4xx/ixdpg425-pci.c
index 34efe75..1f8717b 100644
--- a/arch/arm/mach-ixp4xx/ixdpg425-pci.c
+++ b/arch/arm/mach-ixp4xx/ixdpg425-pci.c
@@ -42,10 +42,9 @@
 
 struct hw_pci ixdpg425_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit =        ixdpg425_pci_preinit,
-	.swizzle =        pci_std_swizzle,
 	.setup =          ixp4xx_setup,
-	.scan =           ixp4xx_scan_bus,
 	.map_irq =        ixdpg425_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/miccpt-pci.c b/arch/arm/mach-ixp4xx/miccpt-pci.c
index ca0bae7..d114ccd 100644
--- a/arch/arm/mach-ixp4xx/miccpt-pci.c
+++ b/arch/arm/mach-ixp4xx/miccpt-pci.c
@@ -61,10 +61,9 @@
 
 struct hw_pci miccpt_pci __initdata = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit	= miccpt_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
 	.map_irq	= miccpt_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/nas100d-pci.c b/arch/arm/mach-ixp4xx/nas100d-pci.c
index 5434ccf..8f0eba0 100644
--- a/arch/arm/mach-ixp4xx/nas100d-pci.c
+++ b/arch/arm/mach-ixp4xx/nas100d-pci.c
@@ -58,10 +58,9 @@
 
 struct hw_pci __initdata nas100d_pci = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit	= nas100d_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
 	.map_irq	= nas100d_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/nslu2-pci.c b/arch/arm/mach-ixp4xx/nslu2-pci.c
index b571605..032defe 100644
--- a/arch/arm/mach-ixp4xx/nslu2-pci.c
+++ b/arch/arm/mach-ixp4xx/nslu2-pci.c
@@ -54,10 +54,9 @@
 
 struct hw_pci __initdata nslu2_pci = {
 	.nr_controllers = 1,
+	.ops		= &ixp4xx_ops,
 	.preinit	= nslu2_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
 	.map_irq	= nslu2_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/vulcan-pci.c b/arch/arm/mach-ixp4xx/vulcan-pci.c
index 0bc3f34..a4220fa 100644
--- a/arch/arm/mach-ixp4xx/vulcan-pci.c
+++ b/arch/arm/mach-ixp4xx/vulcan-pci.c
@@ -56,10 +56,9 @@
 
 struct hw_pci vulcan_pci __initdata = {
 	.nr_controllers	= 1,
+	.ops		= &ixp4xx_ops,
 	.preinit	= vulcan_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= ixp4xx_setup,
-	.scan		= ixp4xx_scan_bus,
 	.map_irq	= vulcan_map_irq,
 };
 
diff --git a/arch/arm/mach-ixp4xx/wg302v2-pci.c b/arch/arm/mach-ixp4xx/wg302v2-pci.c
index f27dfcf..c92e5b8 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-pci.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-pci.c
@@ -46,10 +46,9 @@
 
 struct hw_pci wg302v2_pci __initdata = {
 	.nr_controllers = 1,
+	.ops = &ixp4xx_ops,
 	.preinit =        wg302v2_pci_preinit,
-	.swizzle =        pci_std_swizzle,
 	.setup =          ixp4xx_setup,
-	.scan =           ixp4xx_scan_bus,
 	.map_irq =        wg302v2_map_irq,
 };
 
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 1c672d9..f7fe1b9 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/kexec.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <mach/bridge-regs.h>
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index f56a011..de37317 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -44,12 +44,6 @@
 static int pcie_port_map[2];
 static int num_pcie_ports;
 
-static inline struct pcie_port *bus_to_port(struct pci_bus *bus)
-{
-	struct pci_sys_data *sys = bus->sysdata;
-	return sys->private_data;
-}
-
 static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
 {
 	/*
@@ -79,7 +73,8 @@
 static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
 			int size, u32 *val)
 {
-	struct pcie_port *pp = bus_to_port(bus);
+	struct pci_sys_data *sys = bus->sysdata;
+	struct pcie_port *pp = sys->private_data;
 	unsigned long flags;
 	int ret;
 
@@ -98,7 +93,8 @@
 static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
 			int where, int size, u32 val)
 {
-	struct pcie_port *pp = bus_to_port(bus);
+	struct pci_sys_data *sys = bus->sysdata;
+	struct pcie_port *pp = sys->private_data;
 	unsigned long flags;
 	int ret;
 
@@ -248,13 +244,13 @@
 static int __init kirkwood_pcie_map_irq(const struct pci_dev *dev, u8 slot,
 	u8 pin)
 {
-	struct pcie_port *pp = bus_to_port(dev->bus);
+	struct pci_sys_data *sys = dev->sysdata;
+	struct pcie_port *pp = sys->private_data;
 
 	return pp->irq;
 }
 
 static struct hw_pci kirkwood_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.setup		= kirkwood_pcie_setup,
 	.scan		= kirkwood_pcie_scan_bus,
 	.map_irq	= kirkwood_pcie_map_irq,
diff --git a/arch/arm/mach-ks8695/pci.c b/arch/arm/mach-ks8695/pci.c
index acc7014..bb18193 100644
--- a/arch/arm/mach-ks8695/pci.c
+++ b/arch/arm/mach-ks8695/pci.c
@@ -141,12 +141,6 @@
 	.write	= ks8695_pci_writeconfig,
 };
 
-static struct pci_bus* __init ks8695_pci_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, sys->busnr, &ks8695_pci_ops, sys,
-				 &sys->resources);
-}
-
 static struct resource pci_mem = {
 	.name	= "PCI Memory space",
 	.start	= KS8695_PCIMEM_PA,
@@ -302,11 +296,10 @@
 
 static struct hw_pci ks8695_pci __initdata = {
 	.nr_controllers	= 1,
+	.ops		= &ks8695_pci_ops,
 	.preinit	= ks8695_pci_preinit,
 	.setup		= ks8695_pci_setup,
-	.scan		= ks8695_pci_scan_bus,
 	.postinit	= NULL,
-	.swizzle	= pci_std_swizzle,
 	.map_irq	= NULL,
 };
 
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 962e711..fb3496a 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -17,6 +17,7 @@
 #include <linux/irqdomain.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/memblock.h>
 
@@ -49,10 +50,22 @@
 	msm_map_msm8x60_io();
 }
 
+#ifdef CONFIG_OF
+static struct of_device_id msm_dt_gic_match[] __initdata = {
+	{ .compatible = "qcom,msm-8660-qgic", .data = gic_of_init },
+	{}
+};
+#endif
+
 static void __init msm8x60_init_irq(void)
 {
-	gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
-		 (void *)MSM_QGIC_CPU_BASE);
+	if (!of_have_populated_dt())
+		gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
+			 (void *)MSM_QGIC_CPU_BASE);
+#ifdef CONFIG_OF
+	else
+		of_irq_init(msm_dt_gic_match);
+#endif
 
 	/* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */
 	writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4);
@@ -73,16 +86,8 @@
 	{}
 };
 
-static struct of_device_id msm_dt_gic_match[] __initdata = {
-	{ .compatible = "qcom,msm-8660-qgic", },
-	{}
-};
-
 static void __init msm8x60_dt_init(void)
 {
-	irq_domain_generate_simple(msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS,
-				GIC_SPI_START);
-
 	if (of_machine_is_compatible("qcom,msm8660-surf")) {
 		printk(KERN_INFO "Init surf UART registers\n");
 		msm8x60_init_uart12dm();
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index df3e380..2e56e86 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -147,6 +147,7 @@
 		return 0;
 
 	pp = &pcie_port[nr];
+	sys->private_data = pp;
 	pp->root_bus_nr = sys->busnr;
 
 	/*
@@ -161,19 +162,6 @@
 	return 1;
 }
 
-static struct pcie_port *bus_to_port(int bus)
-{
-	int i;
-
-	for (i = num_pcie_ports - 1; i >= 0; i--) {
-		int rbus = pcie_port[i].root_bus_nr;
-		if (rbus != -1 && rbus <= bus)
-			break;
-	}
-
-	return i >= 0 ? pcie_port + i : NULL;
-}
-
 static int pcie_valid_config(struct pcie_port *pp, int bus, int dev)
 {
 	/*
@@ -189,7 +177,8 @@
 static int pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
 			int size, u32 *val)
 {
-	struct pcie_port *pp = bus_to_port(bus->number);
+	struct pci_sys_data *sys = bus->sysdata;
+	struct pcie_port *pp = sys->private_data;
 	unsigned long flags;
 	int ret;
 
@@ -208,7 +197,8 @@
 static int pcie_wr_conf(struct pci_bus *bus, u32 devfn,
 			int where, int size, u32 val)
 {
-	struct pcie_port *pp = bus_to_port(bus->number);
+	struct pci_sys_data *sys = bus->sysdata;
+	struct pcie_port *pp = sys->private_data;
 	unsigned long flags;
 	int ret;
 
@@ -263,7 +253,8 @@
 static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot,
 	u8 pin)
 {
-	struct pcie_port *pp = bus_to_port(dev->bus->number);
+	struct pci_sys_data *sys = dev->bus->sysdata;
+	struct pcie_port *pp = sys->private_data;
 
 	return IRQ_MV78XX0_PCIE_00 + (pp->maj << 2) + pp->min;
 }
@@ -271,7 +262,6 @@
 static struct hw_pci mv78xx0_pci __initdata = {
 	.nr_controllers	= 8,
 	.preinit	= mv78xx0_pcie_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= mv78xx0_pcie_setup,
 	.scan		= mv78xx0_pcie_scan_bus,
 	.map_irq	= mv78xx0_pcie_map_irq,
diff --git a/arch/arm/mach-mxs/devices-mx23.h b/arch/arm/mach-mxs/devices-mx23.h
index 4d1329d..9acdd63 100644
--- a/arch/arm/mach-mxs/devices-mx23.h
+++ b/arch/arm/mach-mxs/devices-mx23.h
@@ -11,10 +11,16 @@
 #include <mach/mx23.h>
 #include <mach/devices-common.h>
 #include <mach/mxsfb.h>
+#include <linux/amba/bus.h>
 
-extern const struct amba_device mx23_duart_device __initconst;
-#define mx23_add_duart() \
-	mxs_add_duart(&mx23_duart_device)
+static inline int mx23_add_duart(void)
+{
+	struct amba_device *d;
+
+	d = amba_ahb_device_add(NULL, "duart", MX23_DUART_BASE_ADDR, SZ_8K,
+				MX23_INT_DUART, 0, 0, 0);
+	return IS_ERR(d) ? PTR_ERR(d) : 0;
+}
 
 extern const struct mxs_auart_data mx23_auart_data[] __initconst;
 #define mx23_add_auart(id)	mxs_add_auart(&mx23_auart_data[id])
diff --git a/arch/arm/mach-mxs/devices-mx28.h b/arch/arm/mach-mxs/devices-mx28.h
index 9dbeae1..84b2960 100644
--- a/arch/arm/mach-mxs/devices-mx28.h
+++ b/arch/arm/mach-mxs/devices-mx28.h
@@ -11,10 +11,16 @@
 #include <mach/mx28.h>
 #include <mach/devices-common.h>
 #include <mach/mxsfb.h>
+#include <linux/amba/bus.h>
 
-extern const struct amba_device mx28_duart_device __initconst;
-#define mx28_add_duart() \
-	mxs_add_duart(&mx28_duart_device)
+static inline int mx28_add_duart(void)
+{
+	struct amba_device *d;
+
+	d = amba_ahb_device_add(NULL, "duart", MX28_DUART_BASE_ADDR, SZ_8K,
+				MX28_INT_DUART, 0, 0, 0);
+	return IS_ERR(d) ? PTR_ERR(d) : 0;
+}
 
 extern const struct mxs_auart_data mx28_auart_data[] __initconst;
 #define mx28_add_auart(id)	mxs_add_auart(&mx28_auart_data[id])
diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c
index 01faffe..cf50b5a 100644
--- a/arch/arm/mach-mxs/devices.c
+++ b/arch/arm/mach-mxs/devices.c
@@ -75,22 +75,6 @@
 	return pdev;
 }
 
-int __init mxs_add_amba_device(const struct amba_device *dev)
-{
-	struct amba_device *adev = amba_device_alloc(dev->dev.init_name,
-		dev->res.start, resource_size(&dev->res));
-
-	if (!adev) {
-		pr_err("%s: failed to allocate memory", __func__);
-		return -ENOMEM;
-	}
-
-	adev->irq[0] = dev->irq[0];
-	adev->irq[1] = dev->irq[1];
-
-	return amba_device_add(adev, &iomem_resource);
-}
-
 struct device mxs_apbh_bus = {
 	.init_name	= "mxs_apbh",
 	.parent         = &platform_bus,
diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile
index c8f5c95..5f72d97 100644
--- a/arch/arm/mach-mxs/devices/Makefile
+++ b/arch/arm/mach-mxs/devices/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_MXS_HAVE_AMBA_DUART) += amba-duart.o
 obj-$(CONFIG_MXS_HAVE_PLATFORM_AUART) += platform-auart.o
 obj-y += platform-dma.o
 obj-$(CONFIG_MXS_HAVE_PLATFORM_FEC) += platform-fec.o
diff --git a/arch/arm/mach-mxs/devices/amba-duart.c b/arch/arm/mach-mxs/devices/amba-duart.c
deleted file mode 100644
index a5479f7..0000000
--- a/arch/arm/mach-mxs/devices/amba-duart.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- */
-#include <asm/irq.h>
-#include <mach/mx23.h>
-#include <mach/mx28.h>
-#include <mach/devices-common.h>
-
-#define MXS_AMBA_DUART_DEVICE(name, soc)			\
-const struct amba_device name##_device __initconst = {		\
-	.dev = {						\
-		.init_name = "duart",				\
-	},							\
-	.res = {						\
-		.start = soc ## _DUART_BASE_ADDR,		\
-		.end = (soc ## _DUART_BASE_ADDR) + SZ_8K - 1,	\
-		.flags = IORESOURCE_MEM,			\
-	},							\
-	.irq = {soc ## _INT_DUART},				\
-}
-
-#ifdef CONFIG_SOC_IMX23
-MXS_AMBA_DUART_DEVICE(mx23_duart, MX23);
-#endif
-
-#ifdef CONFIG_SOC_IMX28
-MXS_AMBA_DUART_DEVICE(mx28_duart, MX28);
-#endif
-
-int __init mxs_add_duart(const struct amba_device *dev)
-{
-	return mxs_add_amba_device(dev);
-}
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index f2e3839..21e45a7 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -27,11 +27,6 @@
 			name, id, res, num_resources, data, size_data, 0);
 }
 
-int __init mxs_add_amba_device(const struct amba_device *dev);
-
-/* duart */
-int __init mxs_add_duart(const struct amba_device *dev);
-
 /* auart */
 struct mxs_auart_data {
 	int id;
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index fcce7ff..cfd98b1 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -48,7 +48,7 @@
 	struct irq_chip *irq_chip = NULL;
 	int gpio, irq_num, fiq_count;
 
-	irq_desc = irq_to_desc(IH_GPIO_BASE);
+	irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
 	if (irq_desc)
 		irq_chip = irq_desc->irq_data.chip;
 
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 930c0d3..740cee9 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -641,7 +641,7 @@
 
 static void __init igep_init(void)
 {
-	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+	regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies));
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 
 	/* Get IGEP2 hardware revision */
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
index 1e2d332..c88420d 100644
--- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
@@ -941,10 +941,10 @@
 #define OMAP4_DSI2_LANEENABLE_MASK				(0x7 << 29)
 #define OMAP4_DSI1_LANEENABLE_SHIFT				24
 #define OMAP4_DSI1_LANEENABLE_MASK				(0x1f << 24)
-#define OMAP4_DSI2_PIPD_SHIFT					19
-#define OMAP4_DSI2_PIPD_MASK					(0x1f << 19)
-#define OMAP4_DSI1_PIPD_SHIFT					14
-#define OMAP4_DSI1_PIPD_MASK					(0x1f << 14)
+#define OMAP4_DSI1_PIPD_SHIFT					19
+#define OMAP4_DSI1_PIPD_MASK					(0x1f << 19)
+#define OMAP4_DSI2_PIPD_SHIFT					14
+#define OMAP4_DSI2_PIPD_MASK					(0x1f << 14)
 
 /* CONTROL_MCBSPLP */
 #define OMAP4_ALBCTRLRX_FSX_SHIFT				31
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index e52108c..49a3fd6 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -265,7 +265,6 @@
 static struct hw_pci db88f5281_pci __initdata = {
 	.nr_controllers	= 2,
 	.preinit	= db88f5281_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= db88f5281_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index c3ed15b..8c06cca 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -86,7 +86,6 @@
 
 static struct hw_pci dns323_pci __initdata = {
 	.nr_controllers = 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= dns323_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index 47587b8..1e458ef 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -138,7 +138,6 @@
 
 static struct hw_pci kurobox_pro_pci __initdata = {
 	.nr_controllers	= 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= kurobox_pro_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h
index eac6897..db70e79 100644
--- a/arch/arm/mach-orion5x/mpp.h
+++ b/arch/arm/mach-orion5x/mpp.h
@@ -65,8 +65,8 @@
 #define MPP8_GIGE               MPP(8,  0x1, 0, 0, 1,   1,   1)
 
 #define MPP9_UNUSED		MPP(9,  0x0, 0, 0, 1,   1,   1)
-#define MPP9_GPIO		MPP(9,  0x0, 0, 0, 1,   1,   1)
-#define MPP9_GIGE               MPP(9,  0x1, 1, 1, 1,   1,   1)
+#define MPP9_GPIO		MPP(9,  0x0, 1, 1, 1,   1,   1)
+#define MPP9_GIGE               MPP(9,  0x1, 0, 0, 1,   1,   1)
 
 #define MPP10_UNUSED		MPP(10, 0x0, 0, 0, 1,   1,   1)
 #define MPP10_GPIO		MPP(10, 0x0, 1, 1, 1,   1,   1)
diff --git a/arch/arm/mach-orion5x/mss2-setup.c b/arch/arm/mach-orion5x/mss2-setup.c
index 65faaa3..1c16d04 100644
--- a/arch/arm/mach-orion5x/mss2-setup.c
+++ b/arch/arm/mach-orion5x/mss2-setup.c
@@ -89,7 +89,6 @@
 
 static struct hw_pci mss2_pci __initdata = {
 	.nr_controllers = 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= mss2_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index 292038f..78a6a11 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -149,7 +149,6 @@
 
 static struct hw_pci rd88f5181l_fxo_pci __initdata = {
 	.nr_controllers	= 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= rd88f5181l_fxo_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index c44eaba..2f5dc54 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -161,7 +161,6 @@
 
 static struct hw_pci rd88f5181l_ge_pci __initdata = {
 	.nr_controllers	= 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= rd88f5181l_ge_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c
index e3ce617..399130f 100644
--- a/arch/arm/mach-orion5x/rd88f5182-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c
@@ -200,7 +200,6 @@
 static struct hw_pci rd88f5182_pci __initdata = {
 	.nr_controllers	= 2,
 	.preinit	= rd88f5182_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= rd88f5182_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index 2c5fab0..e91bf0b 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -102,7 +102,6 @@
 
 static struct hw_pci rd88f6183ap_ge_pci __initdata = {
 	.nr_controllers	= 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= orion5x_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/terastation_pro2-setup.c b/arch/arm/mach-orion5x/terastation_pro2-setup.c
index 632a861..90e571d 100644
--- a/arch/arm/mach-orion5x/terastation_pro2-setup.c
+++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c
@@ -122,7 +122,6 @@
 static struct hw_pci tsp2_pci __initdata = {
 	.nr_controllers = 2,
 	.preinit        = tsp2_pci_preinit,
-	.swizzle        = pci_std_swizzle,
 	.setup          = orion5x_pci_sys_setup,
 	.scan           = orion5x_pci_sys_scan_bus,
 	.map_irq        = tsp2_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c
index 5d64087..b184f68 100644
--- a/arch/arm/mach-orion5x/ts209-setup.c
+++ b/arch/arm/mach-orion5x/ts209-setup.c
@@ -170,7 +170,6 @@
 static struct hw_pci qnap_ts209_pci __initdata = {
 	.nr_controllers	= 2,
 	.preinit	= qnap_ts209_pci_preinit,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= qnap_ts209_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c
index 4e6ff75..a5c2e64 100644
--- a/arch/arm/mach-orion5x/ts409-setup.c
+++ b/arch/arm/mach-orion5x/ts409-setup.c
@@ -140,7 +140,6 @@
 
 static struct hw_pci qnap_ts409_pci __initdata = {
 	.nr_controllers	= 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= qnap_ts409_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index 078c03f..754c12b 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -155,7 +155,6 @@
 
 static struct hw_pci wnr854t_pci __initdata = {
 	.nr_controllers	= 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= wnr854t_pci_map_irq,
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index 46a9778..45c2125 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -243,7 +243,6 @@
 
 static struct hw_pci wrt350n_v2_pci __initdata = {
 	.nr_controllers	= 2,
-	.swizzle	= pci_std_swizzle,
 	.setup		= orion5x_pci_sys_setup,
 	.scan		= orion5x_pci_sys_scan_bus,
 	.map_irq	= wrt350n_v2_pci_map_irq,
diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c
index 37c2de9..a7b9415 100644
--- a/arch/arm/mach-prima2/irq.c
+++ b/arch/arm/mach-prima2/irq.c
@@ -42,7 +42,8 @@
 static __init void sirfsoc_irq_init(void)
 {
 	sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32);
-	sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32);
+	sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32,
+			SIRFSOC_INTENAL_IRQ_END + 1 - 32);
 
 	writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0);
 	writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1);
@@ -68,7 +69,8 @@
 	if (!sirfsoc_intc_base)
 		panic("unable to map intc cpu registers\n");
 
-	irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL);
+	irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0,
+		&irq_domain_simple_ops, NULL);
 
 	of_node_put(np);
 
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c
index ebd9259..d8f816c 100644
--- a/arch/arm/mach-pxa/cm-x2xx-pci.c
+++ b/arch/arm/mach-pxa/cm-x2xx-pci.c
@@ -181,11 +181,10 @@
 }
 
 static struct hw_pci cmx2xx_pci __initdata = {
-	.swizzle	= pci_std_swizzle,
 	.map_irq	= cmx2xx_pci_map_irq,
 	.nr_controllers	= 1,
+	.ops		= &it8152_ops,
 	.setup		= it8152_pci_setup,
-	.scan		= it8152_pci_scan_bus,
 	.preinit	= cmx2xx_pci_preinit,
 };
 
diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
index c54cef2..cbf51ae 100644
--- a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
+++ b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h
@@ -17,6 +17,7 @@
  *
  * bit     23 - Input/Output (PXA2xx specific)
  * bit     24 - Wakeup Enable(PXA2xx specific)
+ * bit     25 - Keep Output  (PXA2xx specific)
  */
 
 #define MFP_DIR_IN		(0x0 << 23)
@@ -25,6 +26,12 @@
 #define MFP_DIR(x)		(((x) >> 23) & 0x1)
 
 #define MFP_LPM_CAN_WAKEUP	(0x1 << 24)
+
+/*
+ * MFP_LPM_KEEP_OUTPUT must be specified for pins that need to
+ * retain their last output level (low or high).
+ * Note: MFP_LPM_KEEP_OUTPUT has no effect on pins configured for input.
+ */
 #define MFP_LPM_KEEP_OUTPUT	(0x1 << 25)
 
 #define WAKEUP_ON_EDGE_RISE	(MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_RISE)
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index b0a8428..ef0426a 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -33,6 +33,8 @@
 #define BANK_OFF(n)	(((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
 #define GPLR(x)		__REG2(0x40E00000, BANK_OFF((x) >> 5))
 #define GPDR(x)		__REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c)
+#define GPSR(x)		__REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x18)
+#define GPCR(x)		__REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x24)
 
 #define PWER_WE35	(1 << 24)
 
@@ -348,6 +350,7 @@
 #ifdef CONFIG_PM
 static unsigned long saved_gafr[2][4];
 static unsigned long saved_gpdr[4];
+static unsigned long saved_gplr[4];
 static unsigned long saved_pgsr[4];
 
 static int pxa2xx_mfp_suspend(void)
@@ -366,14 +369,26 @@
 	}
 
 	for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) {
-
 		saved_gafr[0][i] = GAFR_L(i);
 		saved_gafr[1][i] = GAFR_U(i);
 		saved_gpdr[i] = GPDR(i * 32);
+		saved_gplr[i] = GPLR(i * 32);
 		saved_pgsr[i] = PGSR(i);
 
-		GPDR(i * 32) = gpdr_lpm[i];
+		GPSR(i * 32) = PGSR(i);
+		GPCR(i * 32) = ~PGSR(i);
 	}
+
+	/* set GPDR bits taking into account MFP_LPM_KEEP_OUTPUT */
+	for (i = 0; i < pxa_last_gpio; i++) {
+		if ((gpdr_lpm[gpio_to_bank(i)] & GPIO_bit(i)) ||
+		    ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
+		     (saved_gpdr[gpio_to_bank(i)] & GPIO_bit(i))))
+			GPDR(i) |= GPIO_bit(i);
+		else
+			GPDR(i) &= ~GPIO_bit(i);
+	}
+
 	return 0;
 }
 
@@ -384,6 +399,8 @@
 	for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) {
 		GAFR_L(i) = saved_gafr[0][i];
 		GAFR_U(i) = saved_gafr[1][i];
+		GPSR(i * 32) = saved_gplr[i];
+		GPCR(i * 32) = ~saved_gplr[i];
 		GPDR(i * 32) = saved_gpdr[i];
 		PGSR(i) = saved_pgsr[i];
 	}
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 6bce78e..4726c24 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -421,8 +421,11 @@
 	pxa_register_device(&pxa27x_device_i2c_power, info);
 }
 
+static struct pxa_gpio_platform_data pxa27x_gpio_info __initdata = {
+	.gpio_set_wake = gpio_set_wake,
+};
+
 static struct platform_device *devices[] __initdata = {
-	&pxa_device_gpio,
 	&pxa27x_device_udc,
 	&pxa_device_pmu,
 	&pxa_device_i2s,
@@ -458,6 +461,7 @@
 		register_syscore_ops(&pxa2xx_mfp_syscore_ops);
 		register_syscore_ops(&pxa2xx_clock_syscore_ops);
 
+		pxa_register_device(&pxa_device_gpio, &pxa27x_gpio_info);
 		ret = platform_add_devices(devices, ARRAY_SIZE(devices));
 	}
 
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 0f3a327..b34287a 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -111,10 +111,6 @@
 	help
 	  Compile in platform device definition for Samsung TouchScreen.
 
-# cpu-specific sections
-
-if CPU_S3C2410
-
 config S3C2410_DMA
 	bool
 	depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442)
@@ -127,6 +123,10 @@
 	help
 	  Power Management code common to S3C2410 and better
 
+# cpu-specific sections
+
+if CPU_S3C2410
+
 config S3C24XX_SIMTEC_NOR
 	bool
 	help
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index a8933de..3239566 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -25,6 +25,7 @@
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
 #include <linux/gpio.h>
+#include <linux/mmc/host.h>
 #include <linux/interrupt.h>
 
 #include <asm/hardware/vic.h>
@@ -765,6 +766,7 @@
 /* MoviNAND */
 static struct s3c_sdhci_platdata goni_hsmmc0_data __initdata = {
 	.max_width		= 4,
+	.host_caps2		= MMC_CAP2_BROKEN_VOLTAGE,
 	.cd_type		= S3C_SDHCI_CD_PERMANENT,
 };
 
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 7c524b4..16be4c5 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -306,7 +306,7 @@
 }
 
 static struct resource sa1100_rtc_resources[] = {
-	DEFINE_RES_MEM(0x90010000, 0x9001003f),
+	DEFINE_RES_MEM(0x90010000, 0x40),
 	DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"),
 	DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"),
 };
diff --git a/arch/arm/mach-sa1100/pci-nanoengine.c b/arch/arm/mach-sa1100/pci-nanoengine.c
index b49108b..ff02e2d 100644
--- a/arch/arm/mach-sa1100/pci-nanoengine.c
+++ b/arch/arm/mach-sa1100/pci-nanoengine.c
@@ -129,12 +129,6 @@
 	return NANOENGINE_IRQ_GPIO_PCI;
 }
 
-struct pci_bus * __init pci_nanoengine_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, sys->busnr, &pci_nano_ops, sys,
-				 &sys->resources);
-}
-
 static struct resource pci_io_ports =
 	DEFINE_RES_IO_NAMED(0x400, 0x400, "PCI IO");
 
@@ -274,7 +268,7 @@
 static struct hw_pci nanoengine_pci __initdata = {
 	.map_irq		= pci_nanoengine_map_irq,
 	.nr_controllers		= 1,
-	.scan			= pci_nanoengine_scan_bus,
+	.ops			= &pci_nano_ops,
 	.setup			= pci_nanoengine_setup,
 };
 
diff --git a/arch/arm/mach-shark/pci.c b/arch/arm/mach-shark/pci.c
index 7cb79a0..9089407 100644
--- a/arch/arm/mach-shark/pci.c
+++ b/arch/arm/mach-shark/pci.c
@@ -29,10 +29,9 @@
 
 static struct hw_pci shark_pci __initdata = {
 	.setup		= via82c505_setup,
-	.swizzle	= pci_std_swizzle,
 	.map_irq	= shark_map_irq,
 	.nr_controllers = 1,
-	.scan		= via82c505_scan_bus,
+	.ops		= &via82c505_ops,
 	.preinit	= via82c505_preinit,
 };
 
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index cb224a3..0891ec6 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -365,23 +365,13 @@
 };
 
 /* SDHI0 */
-static irqreturn_t ag5evm_sdhi0_gpio_cd(int irq, void *arg)
-{
-	struct device *dev = arg;
-	struct sh_mobile_sdhi_info *info = dev->platform_data;
-	struct tmio_mmc_data *pdata = info->pdata;
-
-	tmio_mmc_cd_wakeup(pdata);
-
-	return IRQ_HANDLED;
-}
-
 static struct sh_mobile_sdhi_info sdhi0_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
-	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT,
+	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
 	.tmio_caps	= MMC_CAP_SD_HIGHSPEED,
 	.tmio_ocr_mask	= MMC_VDD_27_28 | MMC_VDD_28_29,
+	.cd_gpio	= GPIO_PORT251,
 };
 
 static struct resource sdhi0_resources[] = {
@@ -557,7 +547,6 @@
 	lcd_backlight_reset();
 
 	/* enable SDHI0 on CN15 [SD I/F] */
-	gpio_request(GPIO_FN_SDHICD0, NULL);
 	gpio_request(GPIO_FN_SDHIWP0, NULL);
 	gpio_request(GPIO_FN_SDHICMD0, NULL);
 	gpio_request(GPIO_FN_SDHICLK0, NULL);
@@ -566,13 +555,6 @@
 	gpio_request(GPIO_FN_SDHID0_1, NULL);
 	gpio_request(GPIO_FN_SDHID0_0, NULL);
 
-	if (!request_irq(intcs_evt2irq(0x3c0), ag5evm_sdhi0_gpio_cd,
-			 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
-			 "sdhi0 cd", &sdhi0_device.dev))
-		sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
-	else
-		pr_warn("Unable to setup SDHI0 GPIO IRQ\n");
-
 	/* enable SDHI1 on CN4 [WLAN I/F] */
 	gpio_request(GPIO_FN_SDHICLK1, NULL);
 	gpio_request(GPIO_FN_SDHICMD1_PU, NULL);
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index f49e28a..8c6202b 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1011,21 +1011,12 @@
 }
 
 /* SDHI0 */
-static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg)
-{
-	struct device *dev = arg;
-	struct sh_mobile_sdhi_info *info = dev->platform_data;
-	struct tmio_mmc_data *pdata = info->pdata;
-
-	tmio_mmc_cd_wakeup(pdata);
-
-	return IRQ_HANDLED;
-}
-
 static struct sh_mobile_sdhi_info sdhi0_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
+	.tmio_flags	= TMIO_MMC_USE_GPIO_CD,
 	.tmio_caps	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+	.cd_gpio	= GPIO_PORT172,
 };
 
 static struct resource sdhi0_resources[] = {
@@ -1384,7 +1375,6 @@
 {
 	u32 srcr4;
 	struct clk *clk;
-	int ret;
 
 	/* External clock source */
 	clk_set_rate(&sh7372_dv_clki_clk, 27000000);
@@ -1481,7 +1471,6 @@
 	irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
 
 	/* enable SDHI0 */
-	gpio_request(GPIO_FN_SDHICD0, NULL);
 	gpio_request(GPIO_FN_SDHIWP0, NULL);
 	gpio_request(GPIO_FN_SDHICMD0, NULL);
 	gpio_request(GPIO_FN_SDHICLK0, NULL);
@@ -1490,13 +1479,6 @@
 	gpio_request(GPIO_FN_SDHID0_1, NULL);
 	gpio_request(GPIO_FN_SDHID0_0, NULL);
 
-	ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd,
-			  IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev);
-	if (!ret)
-		sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
-	else
-		pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret);
-
 #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
 	/* enable SDHI1 */
 	gpio_request(GPIO_FN_SDHICMD1, NULL);
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 6ac015c..b202c12 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -16,6 +16,59 @@
 
 	__CPUINIT
 
+/* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks!
+ *
+ * The secondary kernel init calls v7_flush_dcache_all before it enables
+ * the L1; however, the L1 comes out of reset in an undefined state, so
+ * the clean + invalidate performed by v7_flush_dcache_all causes a bunch
+ * of cache lines with uninitialized data and uninitialized tags to get
+ * written out to memory, which does really unpleasant things to the main
+ * processor.  We fix this by performing an invalidate, rather than a
+ * clean + invalidate, before jumping into the kernel.
+ *
+ * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs
+ * to be called for both secondary cores startup and primary core resume
+ * procedures.  Ideally, it should be moved into arch/arm/mm/cache-v7.S.
+ */
+ENTRY(v7_invalidate_l1)
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 0	@ invalidate I cache
+	mcr	p15, 2, r0, c0, c0, 0
+	mrc	p15, 1, r0, c0, c0, 0
+
+	ldr	r1, =0x7fff
+	and	r2, r1, r0, lsr #13
+
+	ldr	r1, =0x3ff
+
+	and	r3, r1, r0, lsr #3	@ NumWays - 1
+	add	r2, r2, #1		@ NumSets
+
+	and	r0, r0, #0x7
+	add	r0, r0, #4	@ SetShift
+
+	clz	r1, r3		@ WayShift
+	add	r4, r3, #1	@ NumWays
+1:	sub	r2, r2, #1	@ NumSets--
+	mov	r3, r4		@ Temp = NumWays
+2:	subs	r3, r3, #1	@ Temp--
+	mov	r5, r3, lsl r1
+	mov	r6, r2, lsl r0
+	orr	r5, r5, r6	@ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+	mcr	p15, 0, r5, c7, c6, 2
+	bgt	2b
+	cmp	r2, #0
+	bgt	1b
+	dsb
+	isb
+	mov	pc, lr
+ENDPROC(v7_invalidate_l1)
+
+ENTRY(shmobile_invalidate_start)
+	bl	v7_invalidate_l1
+	b	secondary_startup
+ENDPROC(shmobile_invalidate_start)
+
 /*
  * Reset vector for secondary CPUs.
  * This will be mapped at address 0 by SBAR register.
@@ -24,4 +77,5 @@
 	.align  12
 ENTRY(shmobile_secondary_vector)
 	ldr     pc, 1f
-1:	.long   secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET
+1:	.long   shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET
+ENDPROC(shmobile_secondary_vector)
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 83ad3fe..c85e6ec 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -4,7 +4,6 @@
 extern void shmobile_earlytimer_init(void);
 extern struct sys_timer shmobile_timer;
 struct twd_local_timer;
-void shmobile_twd_init(struct twd_local_timer *twd_local_timer);
 extern void shmobile_setup_console(void);
 extern void shmobile_secondary_vector(void);
 extern int shmobile_platform_cpu_kill(unsigned int cpu);
@@ -82,5 +81,6 @@
 extern void r8a7779_secondary_init(unsigned int cpu);
 extern int r8a7779_boot_secondary(unsigned int cpu);
 extern void r8a7779_smp_prepare_cpus(void);
+extern void r8a7779_register_twd(void);
 
 #endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 12c6f52..e98e46f 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -262,10 +262,14 @@
 			    ARRAY_SIZE(r8a7779_late_devices));
 }
 
+/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
+void __init __weak r8a7779_register_twd(void) { }
+
 static void __init r8a7779_earlytimer_init(void)
 {
 	r8a7779_clock_init();
 	shmobile_earlytimer_init();
+	r8a7779_register_twd();
 }
 
 void __init r8a7779_add_early_devices(void)
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 5bebffc..04a0dfe 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -688,10 +688,14 @@
 			    ARRAY_SIZE(sh73a0_late_devices));
 }
 
+/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
+void __init __weak sh73a0_register_twd(void) { }
+
 static void __init sh73a0_earlytimer_init(void)
 {
 	sh73a0_clock_init();
 	shmobile_earlytimer_init();
+	sh73a0_register_twd();
 }
 
 void __init sh73a0_add_early_devices(void)
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index b62e19d..6d1d023 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -64,8 +64,15 @@
 static DEFINE_SPINLOCK(scu_lock);
 static unsigned long tmp;
 
+#ifdef CONFIG_HAVE_ARM_TWD
 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
 
+void __init r8a7779_register_twd(void)
+{
+	twd_local_timer_register(&twd_local_timer);
+}
+#endif
+
 static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
 {
 	void __iomem *scu_base = scu_base_addr();
@@ -84,7 +91,6 @@
 {
 	void __iomem *scu_base = scu_base_addr();
 
-	shmobile_twd_init(&twd_local_timer);
 	return scu_get_core_count(scu_base);
 }
 
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index 14ad8b05..e36c41c 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -42,7 +42,13 @@
 static DEFINE_SPINLOCK(scu_lock);
 static unsigned long tmp;
 
+#ifdef CONFIG_HAVE_ARM_TWD
 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29);
+void __init sh73a0_register_twd(void)
+{
+	twd_local_timer_register(&twd_local_timer);
+}
+#endif
 
 static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
 {
@@ -62,7 +68,6 @@
 {
 	void __iomem *scu_base = scu_base_addr();
 
-	shmobile_twd_init(&twd_local_timer);
 	return scu_get_core_count(scu_base);
 }
 
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index 2fba5f3..8b79e79 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -46,15 +46,6 @@
 {
 }
 
-void __init shmobile_twd_init(struct twd_local_timer *twd_local_timer)
-{
-#ifdef CONFIG_HAVE_ARM_TWD
-	int err = twd_local_timer_register(twd_local_timer);
-	if (err)
-		pr_err("twd_local_timer_register failed %d\n", err);
-#endif
-}
-
 struct sys_timer shmobile_timer = {
 	.init		= shmobile_timer_init,
 };
diff --git a/arch/arm/mach-tegra/flowctrl.c b/arch/arm/mach-tegra/flowctrl.c
index fef66a7..f07488e 100644
--- a/arch/arm/mach-tegra/flowctrl.c
+++ b/arch/arm/mach-tegra/flowctrl.c
@@ -53,10 +53,10 @@
 
 void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
 {
-	return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
+	return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
 }
 
 void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
 {
-	return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
+	return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
 }
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c
index 54a816f..0e09137 100644
--- a/arch/arm/mach-tegra/pcie.c
+++ b/arch/arm/mach-tegra/pcie.c
@@ -475,7 +475,6 @@
 	.nr_controllers	= 2,
 	.setup		= tegra_pcie_setup,
 	.scan		= tegra_pcie_scan_bus,
-	.swizzle	= pci_std_swizzle,
 	.map_irq	= tegra_pcie_map_irq,
 };
 
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 1eed8d4..315672c 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -124,7 +124,7 @@
 }
 
 /*
- * read_persistent_clock -  Return time from a persistent clock.
+ * tegra_read_persistent_clock -  Return time from a persistent clock.
  *
  * Reads the time from a source which isn't disabled during PM, the
  * 32k sync timer.  Convert the cycles elapsed since last read into
@@ -133,7 +133,7 @@
  * tegra_rtc driver could be executing to avoid race conditions
  * on the RTC shadow register
  */
-void read_persistent_clock(struct timespec *ts)
+static void tegra_read_persistent_clock(struct timespec *ts)
 {
 	u64 delta;
 	struct timespec *tsp = &persistent_ts;
@@ -243,6 +243,7 @@
 	tegra_clockevent.irq = tegra_timer_irq.irq;
 	clockevents_register_device(&tegra_clockevent);
 	tegra_twd_init();
+	register_persistent_clock(NULL, tegra_read_persistent_clock);
 }
 
 struct sys_timer tegra_timer = {
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 1621ad0..3333974 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -1667,8 +1667,10 @@
 
 	for (i = 0; i < U300_VIC_IRQS_END; i++)
 		set_bit(i, (unsigned long *) &mask[0]);
-	vic_init((void __iomem *) U300_INTCON0_VBASE, 0, mask[0], mask[0]);
-	vic_init((void __iomem *) U300_INTCON1_VBASE, 32, mask[1], mask[1]);
+	vic_init((void __iomem *) U300_INTCON0_VBASE, IRQ_U300_INTCON0_START,
+		 mask[0], mask[0]);
+	vic_init((void __iomem *) U300_INTCON1_VBASE, IRQ_U300_INTCON1_START,
+		 mask[1], mask[1]);
 }
 
 
diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c
index a38f802..cb04bd6 100644
--- a/arch/arm/mach-u300/i2c.c
+++ b/arch/arm/mach-u300/i2c.c
@@ -146,9 +146,6 @@
 				.min_uV = 1800000,
 				.max_uV = 1800000,
 				.valid_modes_mask = REGULATOR_MODE_NORMAL,
-				.valid_ops_mask =
-				REGULATOR_CHANGE_VOLTAGE |
-				REGULATOR_CHANGE_STATUS,
 				.always_on = 1,
 				.boot_on = 1,
 			},
@@ -160,9 +157,6 @@
 				.min_uV = 2500000,
 				.max_uV = 2500000,
 				.valid_modes_mask = REGULATOR_MODE_NORMAL,
-				.valid_ops_mask =
-				REGULATOR_CHANGE_VOLTAGE |
-				REGULATOR_CHANGE_STATUS,
 				.always_on = 1,
 				.boot_on = 1,
 			},
@@ -230,8 +224,7 @@
 				.max_uV = 1800000,
 				.valid_modes_mask = REGULATOR_MODE_NORMAL,
 				.valid_ops_mask =
-				REGULATOR_CHANGE_VOLTAGE |
-				REGULATOR_CHANGE_STATUS,
+				REGULATOR_CHANGE_VOLTAGE,
 				.always_on = 1,
 				.boot_on = 1,
 			},
diff --git a/arch/arm/mach-u300/include/mach/irqs.h b/arch/arm/mach-u300/include/mach/irqs.h
index ee78a26..ec09c1e 100644
--- a/arch/arm/mach-u300/include/mach/irqs.h
+++ b/arch/arm/mach-u300/include/mach/irqs.h
@@ -12,101 +12,101 @@
 #ifndef __MACH_IRQS_H
 #define __MACH_IRQS_H
 
-#define IRQ_U300_INTCON0_START		0
-#define IRQ_U300_INTCON1_START		32
+#define IRQ_U300_INTCON0_START		1
+#define IRQ_U300_INTCON1_START		33
 /* These are on INTCON0 - 30 lines */
-#define IRQ_U300_IRQ0_EXT		0
-#define IRQ_U300_IRQ1_EXT		1
-#define IRQ_U300_DMA			2
-#define IRQ_U300_VIDEO_ENC_0		3
-#define IRQ_U300_VIDEO_ENC_1		4
-#define IRQ_U300_AAIF_RX		5
-#define IRQ_U300_AAIF_TX		6
-#define IRQ_U300_AAIF_VGPIO		7
-#define IRQ_U300_AAIF_WAKEUP		8
-#define IRQ_U300_PCM_I2S0_FRAME		9
-#define IRQ_U300_PCM_I2S0_FIFO		10
-#define IRQ_U300_PCM_I2S1_FRAME		11
-#define IRQ_U300_PCM_I2S1_FIFO		12
-#define IRQ_U300_XGAM_GAMCON		13
-#define IRQ_U300_XGAM_CDI		14
-#define IRQ_U300_XGAM_CDICON		15
+#define IRQ_U300_IRQ0_EXT		1
+#define IRQ_U300_IRQ1_EXT		2
+#define IRQ_U300_DMA			3
+#define IRQ_U300_VIDEO_ENC_0		4
+#define IRQ_U300_VIDEO_ENC_1		5
+#define IRQ_U300_AAIF_RX		6
+#define IRQ_U300_AAIF_TX		7
+#define IRQ_U300_AAIF_VGPIO		8
+#define IRQ_U300_AAIF_WAKEUP		9
+#define IRQ_U300_PCM_I2S0_FRAME		10
+#define IRQ_U300_PCM_I2S0_FIFO		11
+#define IRQ_U300_PCM_I2S1_FRAME		12
+#define IRQ_U300_PCM_I2S1_FIFO		13
+#define IRQ_U300_XGAM_GAMCON		14
+#define IRQ_U300_XGAM_CDI		15
+#define IRQ_U300_XGAM_CDICON		16
 #if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330)
 /* MMIACC not used on the DB3210 or DB3350 chips */
-#define IRQ_U300_XGAM_MMIACC		16
+#define IRQ_U300_XGAM_MMIACC		17
 #endif
-#define IRQ_U300_XGAM_PDI		17
-#define IRQ_U300_XGAM_PDICON		18
-#define IRQ_U300_XGAM_GAMEACC		19
-#define IRQ_U300_XGAM_MCIDCT		20
-#define IRQ_U300_APEX			21
-#define IRQ_U300_UART0			22
-#define IRQ_U300_SPI			23
-#define IRQ_U300_TIMER_APP_OS		24
-#define IRQ_U300_TIMER_APP_DD		25
-#define IRQ_U300_TIMER_APP_GP1		26
-#define IRQ_U300_TIMER_APP_GP2		27
-#define IRQ_U300_TIMER_OS		28
-#define IRQ_U300_TIMER_MS		29
-#define IRQ_U300_KEYPAD_KEYBF		30
-#define IRQ_U300_KEYPAD_KEYBR		31
+#define IRQ_U300_XGAM_PDI		18
+#define IRQ_U300_XGAM_PDICON		19
+#define IRQ_U300_XGAM_GAMEACC		20
+#define IRQ_U300_XGAM_MCIDCT		21
+#define IRQ_U300_APEX			22
+#define IRQ_U300_UART0			23
+#define IRQ_U300_SPI			24
+#define IRQ_U300_TIMER_APP_OS		25
+#define IRQ_U300_TIMER_APP_DD		26
+#define IRQ_U300_TIMER_APP_GP1		27
+#define IRQ_U300_TIMER_APP_GP2		28
+#define IRQ_U300_TIMER_OS		29
+#define IRQ_U300_TIMER_MS		30
+#define IRQ_U300_KEYPAD_KEYBF		31
+#define IRQ_U300_KEYPAD_KEYBR		32
 /* These are on INTCON1 - 32 lines */
-#define IRQ_U300_GPIO_PORT0		32
-#define IRQ_U300_GPIO_PORT1		33
-#define IRQ_U300_GPIO_PORT2		34
+#define IRQ_U300_GPIO_PORT0		33
+#define IRQ_U300_GPIO_PORT1		34
+#define IRQ_U300_GPIO_PORT2		35
 
 #if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330) || \
     defined(CONFIG_MACH_U300_BS335)
 /* These are for DB3150, DB3200 and DB3350 */
-#define IRQ_U300_WDOG			35
-#define IRQ_U300_EVHIST			36
-#define IRQ_U300_MSPRO			37
-#define IRQ_U300_MMCSD_MCIINTR0		38
-#define IRQ_U300_MMCSD_MCIINTR1		39
-#define IRQ_U300_I2C0			40
-#define IRQ_U300_I2C1			41
-#define IRQ_U300_RTC			42
-#define IRQ_U300_NFIF			43
-#define IRQ_U300_NFIF2			44
+#define IRQ_U300_WDOG			36
+#define IRQ_U300_EVHIST			37
+#define IRQ_U300_MSPRO			38
+#define IRQ_U300_MMCSD_MCIINTR0		39
+#define IRQ_U300_MMCSD_MCIINTR1		40
+#define IRQ_U300_I2C0			41
+#define IRQ_U300_I2C1			42
+#define IRQ_U300_RTC			43
+#define IRQ_U300_NFIF			44
+#define IRQ_U300_NFIF2			45
 #endif
 
 /* DB3150 and DB3200 have only 45 IRQs */
 #if defined(CONFIG_MACH_U300_BS2X) || defined(CONFIG_MACH_U300_BS330)
-#define U300_VIC_IRQS_END		45
+#define U300_VIC_IRQS_END		46
 #endif
 
 /* The DB3350-specific interrupt lines */
 #ifdef CONFIG_MACH_U300_BS335
-#define IRQ_U300_ISP_F0			45
-#define IRQ_U300_ISP_F1			46
-#define IRQ_U300_ISP_F2			47
-#define IRQ_U300_ISP_F3			48
-#define IRQ_U300_ISP_F4			49
-#define IRQ_U300_GPIO_PORT3		50
-#define IRQ_U300_SYSCON_PLL_LOCK	51
-#define IRQ_U300_UART1			52
-#define IRQ_U300_GPIO_PORT4		53
-#define IRQ_U300_GPIO_PORT5		54
-#define IRQ_U300_GPIO_PORT6		55
-#define U300_VIC_IRQS_END		56
+#define IRQ_U300_ISP_F0			46
+#define IRQ_U300_ISP_F1			47
+#define IRQ_U300_ISP_F2			48
+#define IRQ_U300_ISP_F3			49
+#define IRQ_U300_ISP_F4			50
+#define IRQ_U300_GPIO_PORT3		51
+#define IRQ_U300_SYSCON_PLL_LOCK	52
+#define IRQ_U300_UART1			53
+#define IRQ_U300_GPIO_PORT4		54
+#define IRQ_U300_GPIO_PORT5		55
+#define IRQ_U300_GPIO_PORT6		56
+#define U300_VIC_IRQS_END		57
 #endif
 
 /* The DB3210-specific interrupt lines */
 #ifdef CONFIG_MACH_U300_BS365
-#define IRQ_U300_GPIO_PORT3		35
-#define IRQ_U300_GPIO_PORT4		36
-#define IRQ_U300_WDOG			37
-#define IRQ_U300_EVHIST			38
-#define IRQ_U300_MSPRO			39
-#define IRQ_U300_MMCSD_MCIINTR0		40
-#define IRQ_U300_MMCSD_MCIINTR1		41
-#define IRQ_U300_I2C0			42
-#define IRQ_U300_I2C1			43
-#define IRQ_U300_RTC			44
-#define IRQ_U300_NFIF			45
-#define IRQ_U300_NFIF2			46
-#define IRQ_U300_SYSCON_PLL_LOCK	47
-#define U300_VIC_IRQS_END		48
+#define IRQ_U300_GPIO_PORT3		36
+#define IRQ_U300_GPIO_PORT4		37
+#define IRQ_U300_WDOG			38
+#define IRQ_U300_EVHIST			39
+#define IRQ_U300_MSPRO			40
+#define IRQ_U300_MMCSD_MCIINTR0		41
+#define IRQ_U300_MMCSD_MCIINTR1		42
+#define IRQ_U300_I2C0			43
+#define IRQ_U300_I2C1			44
+#define IRQ_U300_RTC			45
+#define IRQ_U300_NFIF			46
+#define IRQ_U300_NFIF2			47
+#define IRQ_U300_SYSCON_PLL_LOCK	48
+#define U300_VIC_IRQS_END		49
 #endif
 
 /* Maximum 8*7 GPIO lines */
@@ -117,6 +117,6 @@
 #define IRQ_U300_GPIO_END		(U300_VIC_IRQS_END)
 #endif
 
-#define NR_IRQS				(IRQ_U300_GPIO_END)
+#define NR_IRQS				(IRQ_U300_GPIO_END - IRQ_U300_INTCON0_START)
 
 #endif
diff --git a/arch/arm/mach-ux500/devices-common.c b/arch/arm/mach-ux500/devices-common.c
index c5312a4..dfdd4a5 100644
--- a/arch/arm/mach-ux500/devices-common.c
+++ b/arch/arm/mach-ux500/devices-common.c
@@ -11,7 +11,6 @@
 #include <linux/irq.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
-#include <linux/amba/bus.h>
 
 #include <plat/gpio-nomadik.h>
 
@@ -19,38 +18,6 @@
 
 #include "devices-common.h"
 
-struct amba_device *
-dbx500_add_amba_device(struct device *parent, const char *name,
-		       resource_size_t base, int irq, void *pdata,
-		       unsigned int periphid)
-{
-	struct amba_device *dev;
-	int ret;
-
-	dev = amba_device_alloc(name, base, SZ_4K);
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	dev->dma_mask = DMA_BIT_MASK(32);
-	dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-
-	dev->irq[0] = irq;
-
-	dev->periphid = periphid;
-
-	dev->dev.platform_data = pdata;
-
-	dev->dev.parent = parent;
-
-	ret = amba_device_add(dev, &iomem_resource);
-	if (ret) {
-		amba_device_put(dev);
-		return ERR_PTR(ret);
-	}
-
-	return dev;
-}
-
 static struct platform_device *
 dbx500_add_gpio(struct device *parent, int id, resource_size_t addr, int irq,
 		struct nmk_gpio_platform_data *pdata)
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index 39c74ec..f75bcb2 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -11,13 +11,9 @@
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/sys_soc.h>
+#include <linux/amba/bus.h>
 #include <plat/i2c.h>
 
-extern struct amba_device *
-dbx500_add_amba_device(struct device *parent, const char *name,
-		       resource_size_t base, int irq, void *pdata,
-		       unsigned int periphid);
-
 struct spi_master_cntlr;
 
 static inline struct amba_device *
@@ -25,8 +21,8 @@
 		   resource_size_t base, int irq,
 		   struct spi_master_cntlr *pdata)
 {
-	return dbx500_add_amba_device(parent, name, base, irq,
-				      pdata, 0);
+	return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0,
+				   pdata, 0);
 }
 
 static inline struct amba_device *
@@ -34,8 +30,8 @@
 	       int irq, struct spi_master_cntlr *pdata,
 	       u32 periphid)
 {
-	return dbx500_add_amba_device(parent, name, base, irq,
-				      pdata, periphid);
+	return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0,
+				   pdata, periphid);
 }
 
 struct mmci_platform_data;
@@ -44,8 +40,8 @@
 dbx500_add_sdi(struct device *parent, const char *name, resource_size_t base,
 	       int irq, struct mmci_platform_data *pdata, u32 periphid)
 {
-	return dbx500_add_amba_device(parent, name, base, irq,
-				      pdata, periphid);
+	return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0,
+				   pdata, periphid);
 }
 
 struct amba_pl011_data;
@@ -54,7 +50,7 @@
 dbx500_add_uart(struct device *parent, const char *name, resource_size_t base,
 		int irq, struct amba_pl011_data *pdata)
 {
-	return dbx500_add_amba_device(parent, name, base, irq, pdata, 0);
+	return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0, pdata, 0);
 }
 
 struct nmk_i2c_controller;
@@ -85,7 +81,8 @@
 static inline struct amba_device *
 dbx500_add_rtc(struct device *parent, resource_size_t base, int irq)
 {
-	return dbx500_add_amba_device(parent, "rtc-pl031", base, irq, NULL, 0);
+	return amba_apb_device_add(parent, "rtc-pl031", base, SZ_4K, irq,
+				0, NULL, 0);
 }
 
 struct nmk_gpio_platform_data;
diff --git a/arch/arm/mach-ux500/devices-db8500.h b/arch/arm/mach-ux500/devices-db8500.h
index 9fd93e9..6fc7eb2 100644
--- a/arch/arm/mach-ux500/devices-db8500.h
+++ b/arch/arm/mach-ux500/devices-db8500.h
@@ -31,7 +31,7 @@
 db8500_add_ssp(struct device *parent, const char *name, resource_size_t base,
 	       int irq, struct pl022_ssp_controller *pdata)
 {
-	return dbx500_add_amba_device(parent, name, base, irq, pdata, 0);
+	return amba_ahb_device_add(parent, name, base, SZ_4K, irq, 0, pdata, 0);
 }
 
 
diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
index 2b2d51c..0127490 100644
--- a/arch/arm/mach-ux500/mbox-db5500.c
+++ b/arch/arm/mach-ux500/mbox-db5500.c
@@ -168,7 +168,7 @@
 	return sprintf(buf, "0x%X\n", mbox_value);
 }
 
-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
 
 static int mbox_show(struct seq_file *s, void *data)
 {
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 6bbd74e..cf4687e 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -66,12 +66,6 @@
 #define VA_VIC_BASE		__io_address(VERSATILE_VIC_BASE)
 #define VA_SIC_BASE		__io_address(VERSATILE_SIC_BASE)
 
-static struct fpga_irq_data sic_irq = {
-	.base		= VA_SIC_BASE,
-	.irq_start	= IRQ_SIC_START,
-	.chip.name	= "SIC",
-};
-
 #if 1
 #define IRQ_MMCI0A	IRQ_VICSOURCE22
 #define IRQ_AACI	IRQ_VICSOURCE24
@@ -105,8 +99,11 @@
 
 	writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
 
-	fpga_irq_init(IRQ_VICSOURCE31, ~PIC_MASK, &sic_irq);
-	irq_domain_generate_simple(sic_of_match, VERSATILE_SIC_BASE, IRQ_SIC_START);
+	np = of_find_matching_node_by_address(NULL, sic_of_match,
+					      VERSATILE_SIC_BASE);
+
+	fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START,
+		IRQ_VICSOURCE31, ~PIC_MASK, np);
 
 	/*
 	 * Interrupts on secondary controller from 0 to 8 are routed to
@@ -666,17 +663,18 @@
  * having a specific name.
  */
 struct of_dev_auxdata versatile_auxdata_lookup[] __initdata = {
-	OF_DEV_AUXDATA("arm,primecell", VERSATILE_MMCI0_BASE, "fpga:05", NULL),
+	OF_DEV_AUXDATA("arm,primecell", VERSATILE_MMCI0_BASE, "fpga:05", &mmc0_plat_data),
 	OF_DEV_AUXDATA("arm,primecell", VERSATILE_KMI0_BASE, "fpga:06", NULL),
 	OF_DEV_AUXDATA("arm,primecell", VERSATILE_KMI1_BASE, "fpga:07", NULL),
 	OF_DEV_AUXDATA("arm,primecell", VERSATILE_UART3_BASE, "fpga:09", NULL),
+	/* FIXME: this is buggy, the platform data is needed for this MMC instance too */
 	OF_DEV_AUXDATA("arm,primecell", VERSATILE_MMCI1_BASE, "fpga:0b", NULL),
 
 	OF_DEV_AUXDATA("arm,primecell", VERSATILE_CLCD_BASE, "dev:20", &clcd_plat_data),
 	OF_DEV_AUXDATA("arm,primecell", VERSATILE_UART0_BASE, "dev:f1", NULL),
 	OF_DEV_AUXDATA("arm,primecell", VERSATILE_UART1_BASE, "dev:f2", NULL),
 	OF_DEV_AUXDATA("arm,primecell", VERSATILE_UART2_BASE, "dev:f3", NULL),
-	OF_DEV_AUXDATA("arm,primecell", VERSATILE_SSP_BASE, "dev:f4", NULL),
+	OF_DEV_AUXDATA("arm,primecell", VERSATILE_SSP_BASE, "dev:f4", &ssp0_plat_data),
 
 #if 0
 	/*
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index d2268be8..15c6a00 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -303,12 +303,6 @@
 }
 
 
-struct pci_bus * __init pci_versatile_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, sys->busnr, &pci_versatile_ops, sys,
-				 &sys->resources);
-}
-
 void __init pci_versatile_preinit(void)
 {
 	pcibios_min_io = 0x44000000;
@@ -339,19 +333,16 @@
 	 *  26     1     29
 	 *  27     1     30
 	 */
-	irq = 27 + ((slot + pin - 1) & 3);
-
-	printk("PCI map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq);
+	irq = 27 + ((slot - 24 + pin - 1) & 3);
 
 	return irq;
 }
 
 static struct hw_pci versatile_pci __initdata = {
-	.swizzle		= NULL,
 	.map_irq		= versatile_map_irq,
 	.nr_controllers		= 1,
+	.ops			= &pci_versatile_ops,
 	.setup			= pci_versatile_setup,
-	.scan			= pci_versatile_scan_bus,
 	.preinit		= pci_versatile_preinit,
 };
 
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 47cdcca..04dd092 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -19,8 +19,10 @@
 #include <linux/clkdev.h>
 #include <linux/mtd/physmap.h>
 
+#include <asm/arch_timer.h>
 #include <asm/mach-types.h>
 #include <asm/sizes.h>
+#include <asm/smp_twd.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/time.h>
@@ -616,7 +618,6 @@
 	}
 
 	clkdev_add_table(v2m_dt_lookups, ARRAY_SIZE(v2m_dt_lookups));
-	versatile_sched_clock_init(v2m_sysreg_base + V2M_SYS_24MHZ, 24000000);
 }
 
 static  struct of_device_id vexpress_irq_match[] __initdata = {
@@ -643,6 +644,11 @@
 		return;
 	node = of_find_node_by_path(path);
 	v2m_sp804_init(of_iomap(node, 0), irq_of_parse_and_map(node, 0));
+	if (arch_timer_of_register() != 0)
+		twd_local_timer_of_register();
+
+	if (arch_timer_sched_clock_init() != 0)
+		versatile_sched_clock_init(v2m_sysreg_base + V2M_SYS_24MHZ, 24000000);
 }
 
 static struct sys_timer v2m_dt_timer = {
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c8a7d8..101b968 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -4,23 +4,6 @@
 # which CPUs we support in the kernel image, and the compiler instruction
 # optimiser behaviour.
 
-# ARM610
-config CPU_ARM610
-	bool "Support ARM610 processor" if ARCH_RPC
-	select CPU_32v3
-	select CPU_CACHE_V3
-	select CPU_CACHE_VIVT
-	select CPU_CP15_MMU
-	select CPU_COPY_V3 if MMU
-	select CPU_TLB_V3 if MMU
-	select CPU_PABRT_LEGACY
-	help
-	  The ARM610 is the successor to the ARM3 processor
-	  and was produced by VLSI Technology Inc.
-
-	  Say Y if you want support for the ARM610 processor.
-	  Otherwise, say N.
-
 # ARM7TDMI
 config CPU_ARM7TDMI
 	bool "Support ARM7TDMI processor"
@@ -36,25 +19,6 @@
 	  Say Y if you want support for the ARM7TDMI processor.
 	  Otherwise, say N.
 
-# ARM710
-config CPU_ARM710
-	bool "Support ARM710 processor" if ARCH_RPC
-	select CPU_32v3
-	select CPU_CACHE_V3
-	select CPU_CACHE_VIVT
-	select CPU_CP15_MMU
-	select CPU_COPY_V3 if MMU
-	select CPU_TLB_V3 if MMU
-	select CPU_PABRT_LEGACY
-	help
-	  A 32-bit RISC microprocessor based on the ARM7 processor core
-	  designed by Advanced RISC Machines Ltd. The ARM710 is the
-	  successor to the ARM610 processor. It was released in
-	  July 1994 by VLSI Technology Inc.
-
-	  Say Y if you want support for the ARM710 processor.
-	  Otherwise, say N.
-
 # ARM720T
 config CPU_ARM720T
 	bool "Support ARM720T processor" if ARCH_INTEGRATOR
@@ -530,9 +494,6 @@
 
 if MMU
 # The copy-page model
-config CPU_COPY_V3
-	bool
-
 config CPU_COPY_V4WT
 	bool
 
@@ -549,11 +510,6 @@
 	bool
 
 # This selects the TLB model
-config CPU_TLB_V3
-	bool
-	help
-	  ARM Architecture Version 3 TLB.
-
 config CPU_TLB_V4WT
 	bool
 	help
@@ -731,7 +687,7 @@
 
 config CPU_ICACHE_DISABLE
 	bool "Disable I-Cache (I-bit)"
-	depends on CPU_CP15 && !(CPU_ARM610 || CPU_ARM710 || CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
+	depends on CPU_CP15 && !(CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
 	help
 	  Say Y here to disable the processor instruction cache. Unless
 	  you have a reason not to or are unsure, say N.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index bca7e61..8a9c4cb 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -44,7 +44,6 @@
 AFLAGS_cache-v6.o	:=-Wa,-march=armv6
 AFLAGS_cache-v7.o	:=-Wa,-march=armv7-a
 
-obj-$(CONFIG_CPU_COPY_V3)	+= copypage-v3.o
 obj-$(CONFIG_CPU_COPY_V4WT)	+= copypage-v4wt.o
 obj-$(CONFIG_CPU_COPY_V4WB)	+= copypage-v4wb.o
 obj-$(CONFIG_CPU_COPY_FEROCEON)	+= copypage-feroceon.o
@@ -54,7 +53,6 @@
 obj-$(CONFIG_CPU_XSC3)		+= copypage-xsc3.o
 obj-$(CONFIG_CPU_COPY_FA)	+= copypage-fa.o
 
-obj-$(CONFIG_CPU_TLB_V3)	+= tlb-v3.o
 obj-$(CONFIG_CPU_TLB_V4WT)	+= tlb-v4.o
 obj-$(CONFIG_CPU_TLB_V4WB)	+= tlb-v4wb.o
 obj-$(CONFIG_CPU_TLB_V4WBI)	+= tlb-v4wbi.o
@@ -66,8 +64,6 @@
 AFLAGS_tlb-v6.o		:=-Wa,-march=armv6
 AFLAGS_tlb-v7.o		:=-Wa,-march=armv7-a
 
-obj-$(CONFIG_CPU_ARM610)	+= proc-arm6_7.o
-obj-$(CONFIG_CPU_ARM710)	+= proc-arm6_7.o
 obj-$(CONFIG_CPU_ARM7TDMI)	+= proc-arm7tdmi.o
 obj-$(CONFIG_CPU_ARM720T)	+= proc-arm720.o
 obj-$(CONFIG_CPU_ARM740T)	+= proc-arm740.o
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index ff1f7cc..8074199 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,18 +26,23 @@
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 /*
- * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103).
- * The test below covers all the write situations, including Java bytecodes
+ * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
  */
-	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
-	tst	r5, #PSR_J_BIT			@ Java?
+#ifdef CONFIG_ARM_ERRATA_326103
+	ldr	ip, =0x4107b36
+	mrc	p15, 0, r3, c0, c0, 0		@ get processor id
+	teq	ip, r3, lsr #4			@ r0 ARM1136?
 	bne	do_DataAbort
-	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
-	ldreq	r3, [r4]			@ read aborted ARM instruction
+	tst	r5, #PSR_J_BIT			@ Java?
+	tsteq	r5, #PSR_T_BIT			@ Thumb?
+	bne	do_DataAbort
+	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
+	ldr	r3, [r4]			@ read aborted ARM instruction
 #ifdef CONFIG_CPU_ENDIAN_BE8
-	reveq	r3, r3
+	rev	r3, r3
 #endif
 	do_ldrd_abort tmp=ip, insn=r3
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
+#endif
 	b	do_DataAbort
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index a53fd2a..2a8e380 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -32,6 +32,7 @@
 static DEFINE_RAW_SPINLOCK(l2x0_lock);
 static u32 l2x0_way_mask;	/* Bitmask of active ways */
 static u32 l2x0_size;
+static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
 
 struct l2x0_regs l2x0_saved_regs;
 
@@ -61,12 +62,7 @@
 {
 	void __iomem *base = l2x0_base;
 
-#ifdef CONFIG_PL310_ERRATA_753970
-	/* write to an unmmapped register */
-	writel_relaxed(0, base + L2X0_DUMMY_REG);
-#else
-	writel_relaxed(0, base + L2X0_CACHE_SYNC);
-#endif
+	writel_relaxed(0, base + sync_reg_offset);
 	cache_wait(base + L2X0_CACHE_SYNC, 1);
 }
 
@@ -85,10 +81,13 @@
 }
 
 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
+static inline void debug_writel(unsigned long val)
+{
+	if (outer_cache.set_debug)
+		outer_cache.set_debug(val);
+}
 
-#define debug_writel(val)	outer_cache.set_debug(val)
-
-static void l2x0_set_debug(unsigned long val)
+static void pl310_set_debug(unsigned long val)
 {
 	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
 }
@@ -98,7 +97,7 @@
 {
 }
 
-#define l2x0_set_debug	NULL
+#define pl310_set_debug	NULL
 #endif
 
 #ifdef CONFIG_PL310_ERRATA_588369
@@ -331,6 +330,11 @@
 		else
 			ways = 8;
 		type = "L310";
+#ifdef CONFIG_PL310_ERRATA_753970
+		/* Unmapped register. */
+		sync_reg_offset = L2X0_DUMMY_REG;
+#endif
+		outer_cache.set_debug = pl310_set_debug;
 		break;
 	case L2X0_CACHE_ID_PART_L210:
 		ways = (aux >> 13) & 0xf;
@@ -379,7 +383,6 @@
 	outer_cache.flush_all = l2x0_flush_all;
 	outer_cache.inv_all = l2x0_inv_all;
 	outer_cache.disable = l2x0_disable;
-	outer_cache.set_debug = l2x0_set_debug;
 
 	printk(KERN_INFO "%s cache controller enabled\n", type);
 	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index c2301f2..52e35f3 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -78,6 +78,7 @@
  *	- end	 - virtual end address
  */
 ENTRY(v3_coherent_user_range)
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index fd9bb7a..022135d 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -88,6 +88,7 @@
  *	- end	 - virtual end address
  */
 ENTRY(v4_coherent_user_range)
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 4f2c141..8f1eeae 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -167,9 +167,9 @@
 	add	r0, r0, #CACHE_DLINESIZE
 	cmp	r0, r1
 	blo	1b
-	mov	ip, #0
-	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
-	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
 	mov	pc, lr
 
 
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 4d7b467..b34a5f9 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -125,6 +125,7 @@
 	add	r0, r0, #CACHE_DLINESIZE
 	cmp	r0, r1
 	blo	1b
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 74c2e5a..4b10760 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
+#include <asm/errno.h>
 #include <asm/unwind.h>
 
 #include "proc-macros.S"
@@ -135,7 +136,6 @@
 1:
  USER(	mcr	p15, 0, r0, c7, c10, 1	)	@ clean D line
 	add	r0, r0, #CACHE_LINE_SIZE
-2:
 	cmp	r0, r1
 	blo	1b
 #endif
@@ -154,13 +154,11 @@
 
 /*
  * Fault handling for the cache operation above. If the virtual address in r0
- * isn't mapped, just try the next page.
+ * isn't mapped, fail with -EFAULT.
  */
 9001:
-	mov	r0, r0, lsr #12
-	mov	r0, r0, lsl #12
-	add	r0, r0, #4096
-	b	2b
+	mov	r0, #-EFAULT
+	mov	pc, lr
  UNWIND(.fnend		)
 ENDPROC(v6_coherent_user_range)
 ENDPROC(v6_coherent_kern_range)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a655d3d..39e3fb3 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
+#include <asm/errno.h>
 #include <asm/unwind.h>
 
 #include "proc-macros.S"
@@ -198,7 +199,6 @@
 	add	r12, r12, r2
 	cmp	r12, r1
 	blo	2b
-3:
 	mov	r0, #0
 	ALT_SMP(mcr	p15, 0, r0, c7, c1, 6)	@ invalidate BTB Inner Shareable
 	ALT_UP(mcr	p15, 0, r0, c7, c5, 6)	@ invalidate BTB
@@ -208,13 +208,11 @@
 
 /*
  * Fault handling for the cache operation above. If the virtual address in r0
- * isn't mapped, just try the next page.
+ * isn't mapped, fail with -EFAULT.
  */
 9001:
-	mov	r12, r12, lsr #12
-	mov	r12, r12, lsl #12
-	add	r12, r12, #4096
-	b	3b
+	mov	r0, #-EFAULT
+	mov	pc, lr
  UNWIND(.fnend		)
 ENDPROC(v7_coherent_kern_range)
 ENDPROC(v7_coherent_user_range)
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index ee9bb36..806cc4f 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -18,30 +18,39 @@
 
 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 unsigned int cpu_last_asid = ASID_FIRST_VERSION;
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(struct mm_struct *, current_mm);
-#endif
 
 #ifdef CONFIG_ARM_LPAE
-#define cpu_set_asid(asid) {						\
-	unsigned long ttbl, ttbh;					\
-	asm volatile(							\
-	"	mrrc	p15, 0, %0, %1, c2		@ read TTBR0\n"	\
-	"	mov	%1, %2, lsl #(48 - 32)		@ set ASID\n"	\
-	"	mcrr	p15, 0, %0, %1, c2		@ set TTBR0\n"	\
-	: "=&r" (ttbl), "=&r" (ttbh)					\
-	: "r" (asid & ~ASID_MASK));					\
+void cpu_set_reserved_ttbr0(void)
+{
+	unsigned long ttbl = __pa(swapper_pg_dir);
+	unsigned long ttbh = 0;
+
+	/*
+	 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
+	 * ASID is set to 0.
+	 */
+	asm volatile(
+	"	mcrr	p15, 0, %0, %1, c2		@ set TTBR0\n"
+	:
+	: "r" (ttbl), "r" (ttbh));
+	isb();
 }
 #else
-#define cpu_set_asid(asid) \
-	asm("	mcr	p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
+void cpu_set_reserved_ttbr0(void)
+{
+	u32 ttb;
+	/* Copy TTBR1 into TTBR0 */
+	asm volatile(
+	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
+	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
+	: "=r" (ttb));
+	isb();
+}
 #endif
 
 /*
  * We fork()ed a process, and we need a new context for the child
- * to run in.  We reserve version 0 for initial tasks so we will
- * always allocate an ASID. The ASID 0 is reserved for the TTBR
- * register changing sequence.
+ * to run in.
  */
 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
@@ -51,9 +60,7 @@
 
 static void flush_context(void)
 {
-	/* set the reserved ASID before flushing the TLB */
-	cpu_set_asid(0);
-	isb();
+	cpu_set_reserved_ttbr0();
 	local_flush_tlb_all();
 	if (icache_is_vivt_asid_tagged()) {
 		__flush_icache_all();
@@ -98,14 +105,7 @@
 {
 	unsigned int asid;
 	unsigned int cpu = smp_processor_id();
-	struct mm_struct *mm = per_cpu(current_mm, cpu);
-
-	/*
-	 * Check if a current_mm was set on this CPU as it might still
-	 * be in the early booting stages and using the reserved ASID.
-	 */
-	if (!mm)
-		return;
+	struct mm_struct *mm = current->active_mm;
 
 	smp_rmb();
 	asid = cpu_last_asid + cpu + 1;
@@ -114,8 +114,7 @@
 	set_mm_context(mm, asid);
 
 	/* set the new ASID */
-	cpu_set_asid(mm->context.id);
-	isb();
+	cpu_switch_mm(mm->pgd, mm);
 }
 
 #else
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
deleted file mode 100644
index 3935bdd..0000000
--- a/arch/arm/mm/copypage-v3.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *  linux/arch/arm/mm/copypage-v3.c
- *
- *  Copyright (C) 1995-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/init.h>
-#include <linux/highmem.h>
-
-/*
- * ARMv3 optimised copy_user_highpage
- *
- * FIXME: do we need to handle cache stuff...
- */
-static void __naked
-v3_copy_user_page(void *kto, const void *kfrom)
-{
-	asm("\n\
-	stmfd	sp!, {r4, lr}			@	2\n\
-	mov	r2, %2				@	1\n\
-	ldmia	%0!, {r3, r4, ip, lr}		@	4+1\n\
-1:	stmia	%1!, {r3, r4, ip, lr}		@	4\n\
-	ldmia	%0!, {r3, r4, ip, lr}		@	4+1\n\
-	stmia	%1!, {r3, r4, ip, lr}		@	4\n\
-	ldmia	%0!, {r3, r4, ip, lr}		@	4+1\n\
-	stmia	%1!, {r3, r4, ip, lr}		@	4\n\
-	ldmia	%0!, {r3, r4, ip, lr}		@	4\n\
-	subs	r2, r2, #1			@	1\n\
-	stmia	%1!, {r3, r4, ip, lr}		@	4\n\
-	ldmneia	%0!, {r3, r4, ip, lr}		@	4\n\
-	bne	1b				@	1\n\
-	ldmfd	sp!, {r4, pc}			@	3"
-	:
-	: "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
-}
-
-void v3_copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr, struct vm_area_struct *vma)
-{
-	void *kto, *kfrom;
-
-	kto = kmap_atomic(to);
-	kfrom = kmap_atomic(from);
-	v3_copy_user_page(kto, kfrom);
-	kunmap_atomic(kfrom);
-	kunmap_atomic(kto);
-}
-
-/*
- * ARMv3 optimised clear_user_page
- *
- * FIXME: do we need to handle cache stuff...
- */
-void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
-{
-	void *ptr, *kaddr = kmap_atomic(page);
-	asm volatile("\n\
-	mov	r1, %2				@ 1\n\
-	mov	r2, #0				@ 1\n\
-	mov	r3, #0				@ 1\n\
-	mov	ip, #0				@ 1\n\
-	mov	lr, #0				@ 1\n\
-1:	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
-	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
-	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
-	stmia	%0!, {r2, r3, ip, lr}		@ 4\n\
-	subs	r1, r1, #1			@ 1\n\
-	bne	1b				@ 1"
-	: "=r" (ptr)
-	: "0" (kaddr), "I" (PAGE_SIZE / 64)
-	: "r1", "r2", "r3", "ip", "lr");
-	kunmap_atomic(kaddr);
-}
-
-struct cpu_user_fns v3_user_fns __initdata = {
-	.cpu_clear_user_highpage = v3_clear_user_highpage,
-	.cpu_copy_user_highpage	= v3_copy_user_highpage,
-};
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index f074675..c3bd834 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -247,7 +247,9 @@
 	return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
 
 check_stack:
-	if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
+	/* Don't allow expansion below FIRST_USER_ADDRESS */
+	if (vma->vm_flags & VM_GROWSDOWN &&
+	    addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
 		goto good_area;
 out:
 	return fault;
@@ -430,9 +432,6 @@
 
 	index = pgd_index(addr);
 
-	/*
-	 * FIXME: CP15 C1 is write only on ARMv3 architectures.
-	 */
 	pgd = cpu_get_pgd() + index;
 	pgd_k = init_mm.pgd + index;
 
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 595079f..8f5813b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -293,11 +293,11 @@
 #endif
 
 #ifndef CONFIG_SPARSEMEM
-static void arm_memory_present(void)
+static void __init arm_memory_present(void)
 {
 }
 #else
-static void arm_memory_present(void)
+static void __init arm_memory_present(void)
 {
 	struct memblock_region *reg;
 
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b86f893..aa78de8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -489,7 +489,8 @@
 	 */
 	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 		mem_types[i].prot_pte |= PTE_EXT_AF;
-		mem_types[i].prot_sect |= PMD_SECT_AF;
+		if (mem_types[i].prot_sect)
+			mem_types[i].prot_sect |= PMD_SECT_AF;
 	}
 	kern_pgprot |= PTE_EXT_AF;
 	vecs_pgprot |= PTE_EXT_AF;
@@ -618,8 +619,8 @@
 	}
 }
 
-static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
-	unsigned long phys, const struct mem_type *type)
+static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
+	unsigned long end, unsigned long phys, const struct mem_type *type)
 {
 	pud_t *pud = pud_offset(pgd, addr);
 	unsigned long next;
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 2349513..0650bb8 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -241,6 +241,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index c244b06..4188478 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -235,6 +235,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 38fe22e..33c6882 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -224,6 +224,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 3eb9c3c..fbc1d5f 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -218,6 +218,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
deleted file mode 100644
index 4fbeb5b..0000000
--- a/arch/arm/mm/proc-arm6_7.S
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- *  linux/arch/arm/mm/proc-arm6,7.S
- *
- *  Copyright (C) 1997-2000 Russell King
- *  hacked for non-paged-MM by Hyok S. Choi, 2003.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  These are the low level assembler for performing cache and TLB
- *  functions on the ARM610 & ARM710.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
-#include <asm/hwcap.h>
-#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
-#include <asm/ptrace.h>
-
-#include "proc-macros.S"
-
-ENTRY(cpu_arm6_dcache_clean_area)
-ENTRY(cpu_arm7_dcache_clean_area)
-		mov	pc, lr
-
-/*
- * Function: arm6_7_data_abort ()
- *
- * Params  : r2 = pt_regs
- *	   : r4 = aborted context pc
- *	   : r5 = aborted context psr
- *
- * Purpose : obtain information about current aborted instruction
- *
- * Returns : r4-r5, r10-r11, r13 preserved
- */
-
-ENTRY(cpu_arm7_data_abort)
-	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
-	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
-	ldr	r8, [r4]			@ read arm instruction
-	tst	r8, #1 << 20			@ L = 0 -> write?
-	orreq	r1, r1, #1 << 11		@ yes.
-	and	r7, r8, #15 << 24
-	add	pc, pc, r7, lsr #22		@ Now branch to the relevant processing routine
-	nop
-
-/* 0 */	b	.data_unknown
-/* 1 */	b	do_DataAbort			@ swp
-/* 2 */	b	.data_unknown
-/* 3 */	b	.data_unknown
-/* 4 */	b	.data_arm_lateldrpostconst	@ ldr	rd, [rn], #m
-/* 5 */	b	.data_arm_lateldrpreconst	@ ldr	rd, [rn, #m]
-/* 6 */	b	.data_arm_lateldrpostreg	@ ldr	rd, [rn], rm
-/* 7 */	b	.data_arm_lateldrprereg		@ ldr	rd, [rn, rm]
-/* 8 */	b	.data_arm_ldmstm		@ ldm*a	rn, <rlist>
-/* 9 */	b	.data_arm_ldmstm		@ ldm*b	rn, <rlist>
-/* a */	b	.data_unknown
-/* b */	b	.data_unknown
-/* c */	b	do_DataAbort			@ ldc	rd, [rn], #m	@ Same as ldr	rd, [rn], #m
-/* d */	b	do_DataAbort			@ ldc	rd, [rn, #m]
-/* e */	b	.data_unknown
-/* f */
-.data_unknown:	@ Part of jumptable
-	mov	r0, r4
-	mov	r1, r8
-	b	baddataabort
-
-ENTRY(cpu_arm6_data_abort)
-	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
-	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
-	ldr	r8, [r4]			@ read arm instruction
-	tst	r8, #1 << 20			@ L = 0 -> write?
-	orreq	r1, r1, #1 << 11		@ yes.
-	and	r7, r8, #14 << 24
-	teq	r7, #8 << 24			@ was it ldm/stm
-	bne	do_DataAbort
-
-.data_arm_ldmstm:
-	tst	r8, #1 << 21			@ check writeback bit
-	beq	do_DataAbort			@ no writeback -> no fixup
-	mov	r7, #0x11
-	orr	r7, r7, #0x1100
-	and	r6, r8, r7
-	and	r9, r8, r7, lsl #1
-	add	r6, r6, r9, lsr #1
-	and	r9, r8, r7, lsl #2
-	add	r6, r6, r9, lsr #2
-	and	r9, r8, r7, lsl #3
-	add	r6, r6, r9, lsr #3
-	add	r6, r6, r6, lsr #8
-	add	r6, r6, r6, lsr #4
-	and	r6, r6, #15			@ r6 = no. of registers to transfer.
-	and	r9, r8, #15 << 16		@ Extract 'n' from instruction
-	ldr	r7, [r2, r9, lsr #14]		@ Get register 'Rn'
-	tst	r8, #1 << 23			@ Check U bit
-	subne	r7, r7, r6, lsl #2		@ Undo increment
-	addeq	r7, r7, r6, lsl #2		@ Undo decrement
-	str	r7, [r2, r9, lsr #14]		@ Put register 'Rn'
-	b	do_DataAbort
-
-.data_arm_apply_r6_and_rn:
-	and	r9, r8, #15 << 16		@ Extract 'n' from instruction
-	ldr	r7, [r2, r9, lsr #14]		@ Get register 'Rn'
-	tst	r8, #1 << 23			@ Check U bit
-	subne	r7, r7, r6			@ Undo incrmenet
-	addeq	r7, r7, r6			@ Undo decrement
-	str	r7, [r2, r9, lsr #14]		@ Put register 'Rn'
-	b	do_DataAbort
-
-.data_arm_lateldrpreconst:
-	tst	r8, #1 << 21			@ check writeback bit
-	beq	do_DataAbort			@ no writeback -> no fixup
-.data_arm_lateldrpostconst:
-	movs	r6, r8, lsl #20			@ Get offset
-	beq	do_DataAbort			@ zero -> no fixup
-	and	r9, r8, #15 << 16		@ Extract 'n' from instruction
-	ldr	r7, [r2, r9, lsr #14]		@ Get register 'Rn'
-	tst	r8, #1 << 23			@ Check U bit
-	subne	r7, r7, r6, lsr #20		@ Undo increment
-	addeq	r7, r7, r6, lsr #20		@ Undo decrement
-	str	r7, [r2, r9, lsr #14]		@ Put register 'Rn'
-	b	do_DataAbort
-
-.data_arm_lateldrprereg:
-	tst	r8, #1 << 21			@ check writeback bit
-	beq	do_DataAbort			@ no writeback -> no fixup
-.data_arm_lateldrpostreg:
-	and	r7, r8, #15			@ Extract 'm' from instruction
-	ldr	r6, [r2, r7, lsl #2]		@ Get register 'Rm'
-	mov	r9, r8, lsr #7			@ get shift count
-	ands	r9, r9, #31
-	and	r7, r8, #0x70			@ get shift type
-	orreq	r7, r7, #8			@ shift count = 0
-	add	pc, pc, r7
-	nop
-
-	mov	r6, r6, lsl r9			@ 0: LSL #!0
-	b	.data_arm_apply_r6_and_rn
-	b	.data_arm_apply_r6_and_rn	@ 1: LSL #0
-	nop
-	b	.data_unknown			@ 2: MUL?
-	nop
-	b	.data_unknown			@ 3: MUL?
-	nop
-	mov	r6, r6, lsr r9			@ 4: LSR #!0
-	b	.data_arm_apply_r6_and_rn
-	mov	r6, r6, lsr #32			@ 5: LSR #32
-	b	.data_arm_apply_r6_and_rn
-	b	.data_unknown			@ 6: MUL?
-	nop
-	b	.data_unknown			@ 7: MUL?
-	nop
-	mov	r6, r6, asr r9			@ 8: ASR #!0
-	b	.data_arm_apply_r6_and_rn
-	mov	r6, r6, asr #32			@ 9: ASR #32
-	b	.data_arm_apply_r6_and_rn
-	b	.data_unknown			@ A: MUL?
-	nop
-	b	.data_unknown			@ B: MUL?
-	nop
-	mov	r6, r6, ror r9			@ C: ROR #!0
-	b	.data_arm_apply_r6_and_rn
-	mov	r6, r6, rrx			@ D: RRX
-	b	.data_arm_apply_r6_and_rn
-	b	.data_unknown			@ E: MUL?
-	nop
-	b	.data_unknown			@ F: MUL?
-
-/*
- * Function: arm6_7_proc_init (void)
- *	   : arm6_7_proc_fin (void)
- *
- * Notes   : This processor does not require these
- */
-ENTRY(cpu_arm6_proc_init)
-ENTRY(cpu_arm7_proc_init)
-		mov	pc, lr
-
-ENTRY(cpu_arm6_proc_fin)
-ENTRY(cpu_arm7_proc_fin)
-		mov	r0, #0x31			@ ....S..DP...M
-		mcr	p15, 0, r0, c1, c0, 0		@ disable caches
-		mov	pc, lr
-
-ENTRY(cpu_arm6_do_idle)
-ENTRY(cpu_arm7_do_idle)
-		mov	pc, lr
-
-/*
- * Function: arm6_7_switch_mm(unsigned long pgd_phys)
- * Params  : pgd_phys	Physical address of page table
- * Purpose : Perform a task switch, saving the old processes state, and restoring
- *	     the new.
- */
-ENTRY(cpu_arm6_switch_mm)
-ENTRY(cpu_arm7_switch_mm)
-#ifdef CONFIG_MMU
-		mov	r1, #0
-		mcr	p15, 0, r1, c7, c0, 0		@ flush cache
-		mcr	p15, 0, r0, c2, c0, 0		@ update page table ptr
-		mcr	p15, 0, r1, c5, c0, 0		@ flush TLBs
-#endif
-		mov	pc, lr
-
-/*
- * Function: arm6_7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext)
- * Params  : r0 = Address to set
- *	   : r1 = value to set
- * Purpose : Set a PTE and flush it out of any WB cache
- */
-	.align	5
-ENTRY(cpu_arm6_set_pte_ext)
-ENTRY(cpu_arm7_set_pte_ext)
-#ifdef CONFIG_MMU
-	armv3_set_pte_ext wc_disable=0
-#endif /* CONFIG_MMU */
-	mov	pc, lr
-
-/*
- * Function: _arm6_7_reset
- * Params  : r0 = address to jump to
- * Notes   : This sets up everything for a reset
- */
-		.pushsection	.idmap.text, "ax"
-ENTRY(cpu_arm6_reset)
-ENTRY(cpu_arm7_reset)
-		mov	r1, #0
-		mcr	p15, 0, r1, c7, c0, 0		@ flush cache
-#ifdef CONFIG_MMU
-		mcr	p15, 0, r1, c5, c0, 0		@ flush TLB
-#endif
-		mov	r1, #0x30
-		mcr	p15, 0, r1, c1, c0, 0		@ turn off MMU etc
-		mov	pc, r0
-ENDPROC(cpu_arm6_reset)
-ENDPROC(cpu_arm7_reset)
-		.popsection
-
-		__CPUINIT
-
-		.type	__arm6_setup, #function
-__arm6_setup:	mov	r0, #0
-		mcr	p15, 0, r0, c7, c0		@ flush caches on v3
-#ifdef CONFIG_MMU
-		mcr	p15, 0, r0, c5, c0		@ flush TLBs on v3
-		mov	r0, #0x3d			@ . ..RS BLDP WCAM
-		orr	r0, r0, #0x100			@ . ..01 0011 1101
-#else
-		mov	r0, #0x3c			@ . ..RS BLDP WCA.
-#endif
-		mov	pc, lr
-		.size	__arm6_setup, . - __arm6_setup
-
-		.type	__arm7_setup, #function
-__arm7_setup:	mov	r0, #0
-		mcr	p15, 0, r0, c7, c0		@ flush caches on v3
-#ifdef CONFIG_MMU
-		mcr	p15, 0, r0, c5, c0		@ flush TLBs on v3
-		mcr	p15, 0, r0, c3, c0		@ load domain access register
-		mov	r0, #0x7d			@ . ..RS BLDP WCAM
-		orr	r0, r0, #0x100			@ . ..01 0111 1101
-#else
-		mov	r0, #0x7c			@ . ..RS BLDP WCA.
-#endif
-		mov	pc, lr
-		.size	__arm7_setup, . - __arm7_setup
-
-		__INITDATA
-
-		@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
-		define_processor_functions arm6, dabort=cpu_arm6_data_abort, pabort=legacy_pabort
-		define_processor_functions arm7, dabort=cpu_arm7_data_abort, pabort=legacy_pabort
-
-		.section ".rodata"
-
-		string	cpu_arch_name, "armv3"
-		string	cpu_elf_name, "v3"
-		string	cpu_arm6_name, "ARM6"
-		string	cpu_arm610_name, "ARM610"
-		string	cpu_arm7_name, "ARM7"
-		string	cpu_arm710_name, "ARM710"
-
-		.align
-
-		.section ".proc.info.init", #alloc, #execinstr
-
-.macro arm67_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
-	cpu_mm_mmu_flags:req, cpu_flush:req, cpu_proc_funcs:req
-		.type	__\name\()_proc_info, #object
-__\name\()_proc_info:
-		.long	\cpu_val
-		.long	\cpu_mask
-		.long	\cpu_mm_mmu_flags
-		.long   PMD_TYPE_SECT | \
-			PMD_BIT4 | \
-			PMD_SECT_AP_WRITE | \
-			PMD_SECT_AP_READ
-		b	\cpu_flush
-		.long	cpu_arch_name
-		.long	cpu_elf_name
-		.long	HWCAP_SWP | HWCAP_26BIT
-		.long	\cpu_name
-		.long	\cpu_proc_funcs
-		.long	v3_tlb_fns
-		.long	v3_user_fns
-		.long	v3_cache_fns
-		.size	__\name\()_proc_info, . - __\name\()_proc_info
-.endm
-
-	arm67_proc_info	arm6,	0x41560600, 0xfffffff0, cpu_arm6_name, \
-		0x00000c1e, __arm6_setup, arm6_processor_functions
-	arm67_proc_info	arm610,	0x41560610, 0xfffffff0, cpu_arm610_name, \
-		0x00000c1e, __arm6_setup, arm6_processor_functions
-	arm67_proc_info	arm7,	0x41007000, 0xffffff00, cpu_arm7_name, \
-		0x00000c1e, __arm7_setup, arm7_processor_functions
-	arm67_proc_info	arm710,	0x41007100, 0xfff8ff00, cpu_arm710_name, \
-			PMD_TYPE_SECT | \
-			PMD_SECT_BUFFERABLE | \
-			PMD_SECT_CACHEABLE | \
-			PMD_BIT4 | \
-			PMD_SECT_AP_WRITE | \
-			PMD_SECT_AP_READ, \
-		__arm7_setup, arm7_processor_functions
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index cb941ae..1a8c138 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -210,6 +210,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 4ec0e07..4c44d7e 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -212,6 +212,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 9dccd9a..ec5b118 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -258,6 +258,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 820259b..c31e62c 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -221,6 +221,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 9fdc0a1..a613a7d 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -160,7 +160,7 @@
  *	- size	- region size
  */
 ENTRY(arm940_flush_kern_dcache_area)
-	mov	ip, #0
+	mov	r0, #0
 	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
 1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
 2:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
@@ -168,8 +168,8 @@
 	bcs	2b				@ entries 63 to 0
 	subs	r1, r1, #1 << 4
 	bcs	1b				@ segments 7 to 0
-	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
-	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f684cfe..9f4f299 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -190,6 +190,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index ba3c500..23a8e4c 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -232,6 +232,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index cdfedc5..b047546 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -193,6 +193,7 @@
 	cmp	r0, r1
 	blo	1b
 	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	r0, #0
 	mov	pc, lr
 
 /*
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 3a4b3e7..42ac069 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -49,15 +49,10 @@
 #ifdef CONFIG_ARM_ERRATA_754322
 	dsb
 #endif
-	mcr	p15, 0, r2, c13, c0, 1		@ set reserved context ID
-	isb
-1:	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
-	isb
-#ifdef CONFIG_ARM_ERRATA_754322
-	dsb
-#endif
 	mcr	p15, 0, r1, c13, c0, 1		@ set context ID
 	isb
+	mcr	p15, 0, r0, c2, c0, 0		@ set TTB 0
+	isb
 #endif
 	mov	pc, lr
 ENDPROC(cpu_v7_switch_mm)
diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S
deleted file mode 100644
index d253995..0000000
--- a/arch/arm/mm/tlb-v3.S
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  linux/arch/arm/mm/tlbv3.S
- *
- *  Copyright (C) 1997-2002 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  ARM architecture version 3 TLB handling functions.
- *
- * Processors: ARM610, ARM710.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/asm-offsets.h>
-#include <asm/tlbflush.h>
-#include "proc-macros.S"
-
-	.align	5
-/*
- *	v3_flush_user_tlb_range(start, end, mm)
- *
- *	Invalidate a range of TLB entries in the specified address space.
- *
- *	- start - range start address
- *	- end   - range end address
- *	- mm    - mm_struct describing address space
- */
-	.align	5
-ENTRY(v3_flush_user_tlb_range)
-	vma_vm_mm r2, r2
-	act_mm	r3				@ get current->active_mm
-	teq	r2, r3				@ == mm ?
-	movne	pc, lr				@ no, we dont do anything
-ENTRY(v3_flush_kern_tlb_range)
-	bic	r0, r0, #0x0ff
-	bic	r0, r0, #0xf00
-1:	mcr	p15, 0, r0, c6, c0, 0		@ invalidate TLB entry
-	add	r0, r0, #PAGE_SZ
-	cmp	r0, r1
-	blo	1b
-	mov	pc, lr
-
-	__INITDATA
-
-	/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
-	define_tlb_functions v3, v3_tlb_flags
diff --git a/arch/arm/plat-iop/pci.c b/arch/arm/plat-iop/pci.c
index 0da4205..8daae9b 100644
--- a/arch/arm/plat-iop/pci.c
+++ b/arch/arm/plat-iop/pci.c
@@ -160,7 +160,7 @@
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static struct pci_ops iop3xx_ops = {
+struct pci_ops iop3xx_ops = {
 	.read	= iop3xx_read_config,
 	.write	= iop3xx_write_config,
 };
@@ -220,12 +220,6 @@
 	return 1;
 }
 
-struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *sys)
-{
-	return pci_scan_root_bus(NULL, sys->busnr, &iop3xx_ops, sys,
-				 &sys->resources);
-}
-
 void __init iop3xx_atu_setup(void)
 {
 	/* BAR 0 ( Disabled ) */
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 5068fe5..44ae077 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -19,6 +19,7 @@
 #include <linux/io.h>
 #include <linux/clocksource.h>
 
+#include <asm/mach/time.h>
 #include <asm/sched_clock.h>
 
 #include <plat/hardware.h>
@@ -43,7 +44,7 @@
 }
 
 /**
- * read_persistent_clock -  Return time from a persistent clock.
+ * omap_read_persistent_clock -  Return time from a persistent clock.
  *
  * Reads the time from a source which isn't disabled during PM, the
  * 32k sync timer.  Convert the cycles elapsed since last read into
@@ -52,7 +53,7 @@
 static struct timespec persistent_ts;
 static cycles_t cycles, last_cycles;
 static unsigned int persistent_mult, persistent_shift;
-void read_persistent_clock(struct timespec *ts)
+static void omap_read_persistent_clock(struct timespec *ts)
 {
 	unsigned long long nsecs;
 	cycles_t delta;
@@ -116,6 +117,7 @@
 			printk(err, "32k_counter");
 
 		setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
+		register_persistent_clock(NULL, omap_read_persistent_clock);
 	}
 	return 0;
 }
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index ecdb3da..c58d896 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -916,6 +916,13 @@
 			l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
 	l |= OMAP_DMA_CCR_EN;
 
+	/*
+	 * As dma_write() uses IO accessors which are weakly ordered, there
+	 * is no guarantee that data in coherent DMA memory will be visible
+	 * to the DMA device.  Add a memory barrier here to ensure that any
+	 * such data is visible prior to enabling DMA.
+	 */
+	mb();
 	p->dma_write(l, CCR, lch);
 
 	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
@@ -965,6 +972,13 @@
 		p->dma_write(l, CCR, lch);
 	}
 
+	/*
+	 * Ensure that data transferred by DMA is visible to any access
+	 * after DMA has been disabled.  This is important for coherent
+	 * DMA regions.
+	 */
+	mb();
+
 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 		int next_lch, cur_lch = lch;
 		char dma_chan_link_map[dma_lch_count];
diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h
index 317e246..e834c5e 100644
--- a/arch/arm/plat-samsung/include/plat/sdhci.h
+++ b/arch/arm/plat-samsung/include/plat/sdhci.h
@@ -18,6 +18,8 @@
 #ifndef __PLAT_S3C_SDHCI_H
 #define __PLAT_S3C_SDHCI_H __FILE__
 
+#include <plat/devs.h>
+
 struct platform_device;
 struct mmc_host;
 struct mmc_card;
@@ -356,4 +358,30 @@
 
 #endif /* CONFIG_EXYNOS4_SETUP_SDHCI */
 
+static inline void s3c_sdhci_setname(int id, char *name)
+{
+	switch (id) {
+#ifdef CONFIG_S3C_DEV_HSMMC
+	case 0:
+		s3c_device_hsmmc0.name = name;
+		break;
+#endif
+#ifdef CONFIG_S3C_DEV_HSMMC1
+	case 1:
+		s3c_device_hsmmc1.name = name;
+		break;
+#endif
+#ifdef CONFIG_S3C_DEV_HSMMC2
+	case 2:
+		s3c_device_hsmmc2.name = name;
+		break;
+#endif
+#ifdef CONFIG_S3C_DEV_HSMMC3
+	case 3:
+		s3c_device_hsmmc3.name = name;
+		break;
+#endif
+	}
+}
+
 #endif /* __PLAT_S3C_SDHCI_H */
diff --git a/arch/arm/plat-versatile/Kconfig b/arch/arm/plat-versatile/Kconfig
index 043f7b0..81ee7cc 100644
--- a/arch/arm/plat-versatile/Kconfig
+++ b/arch/arm/plat-versatile/Kconfig
@@ -5,6 +5,12 @@
 
 config PLAT_VERSATILE_FPGA_IRQ
 	bool
+	select IRQ_DOMAIN
+
+config PLAT_VERSATILE_FPGA_IRQ_NR
+       int
+       default 4
+       depends on PLAT_VERSATILE_FPGA_IRQ
 
 config PLAT_VERSATILE_LEDS
 	def_bool y if LEDS_CLASS
diff --git a/arch/arm/plat-versatile/fpga-irq.c b/arch/arm/plat-versatile/fpga-irq.c
index f0cc8e1..6e70d03 100644
--- a/arch/arm/plat-versatile/fpga-irq.c
+++ b/arch/arm/plat-versatile/fpga-irq.c
@@ -3,7 +3,10 @@
  */
 #include <linux/irq.h>
 #include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
 
+#include <asm/exception.h>
 #include <asm/mach/irq.h>
 #include <plat/fpga-irq.h>
 
@@ -12,10 +15,32 @@
 #define IRQ_ENABLE_SET		0x08
 #define IRQ_ENABLE_CLEAR	0x0c
 
+/**
+ * struct fpga_irq_data - irq data container for the FPGA IRQ controller
+ * @base: memory offset in virtual memory
+ * @irq_start: first IRQ number handled by this instance
+ * @chip: chip container for this instance
+ * @domain: IRQ domain for this instance
+ * @valid: mask for valid IRQs on this controller
+ * @used_irqs: number of active IRQs on this controller
+ */
+struct fpga_irq_data {
+	void __iomem *base;
+	unsigned int irq_start;
+	struct irq_chip chip;
+	u32 valid;
+	struct irq_domain *domain;
+	u8 used_irqs;
+};
+
+/* we cannot allocate memory when the controllers are initially registered */
+static struct fpga_irq_data fpga_irq_devices[CONFIG_PLAT_VERSATILE_FPGA_IRQ_NR];
+static int fpga_irq_id;
+
 static void fpga_irq_mask(struct irq_data *d)
 {
 	struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
-	u32 mask = 1 << (d->irq - f->irq_start);
+	u32 mask = 1 << d->hwirq;
 
 	writel(mask, f->base + IRQ_ENABLE_CLEAR);
 }
@@ -23,7 +48,7 @@
 static void fpga_irq_unmask(struct irq_data *d)
 {
 	struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
-	u32 mask = 1 << (d->irq - f->irq_start);
+	u32 mask = 1 << d->hwirq;
 
 	writel(mask, f->base + IRQ_ENABLE_SET);
 }
@@ -41,32 +66,93 @@
 	do {
 		irq = ffs(status) - 1;
 		status &= ~(1 << irq);
-
-		generic_handle_irq(irq + f->irq_start);
+		generic_handle_irq(irq_find_mapping(f->domain, irq));
 	} while (status);
 }
 
-void __init fpga_irq_init(int parent_irq, u32 valid, struct fpga_irq_data *f)
+/*
+ * Handle each interrupt in a single FPGA IRQ controller.  Returns non-zero
+ * if we've handled at least one interrupt.  This does a single read of the
+ * status register and handles all interrupts in order from LSB first.
+ */
+static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
 {
-	unsigned int i;
+	int handled = 0;
+	int irq;
+	u32 status;
 
+	while ((status  = readl(f->base + IRQ_STATUS))) {
+		irq = ffs(status) - 1;
+		handle_IRQ(irq_find_mapping(f->domain, irq), regs);
+		handled = 1;
+	}
+
+	return handled;
+}
+
+/*
+ * Keep iterating over all registered FPGA IRQ controllers until there are
+ * no pending interrupts.
+ */
+asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
+{
+	int i, handled;
+
+	do {
+		for (i = 0, handled = 0; i < fpga_irq_id; ++i)
+			handled |= handle_one_fpga(&fpga_irq_devices[i], regs);
+	} while (handled);
+}
+
+static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
+		irq_hw_number_t hwirq)
+{
+	struct fpga_irq_data *f = d->host_data;
+
+	/* Skip invalid IRQs, only register handlers for the real ones */
+	if (!(f->valid & (1 << hwirq)))
+		return -ENOTSUPP;
+	irq_set_chip_data(irq, f);
+	irq_set_chip_and_handler(irq, &f->chip,
+				handle_level_irq);
+	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	f->used_irqs++;
+	return 0;
+}
+
+static struct irq_domain_ops fpga_irqdomain_ops = {
+	.map = fpga_irqdomain_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
+			  int parent_irq, u32 valid, struct device_node *node)
+{
+	struct fpga_irq_data *f;
+
+	if (fpga_irq_id >= ARRAY_SIZE(fpga_irq_devices)) {
+		printk(KERN_ERR "%s: too few FPGA IRQ controllers, increase CONFIG_PLAT_VERSATILE_FPGA_IRQ_NR\n", __func__);
+		return;
+	}
+
+	f = &fpga_irq_devices[fpga_irq_id];
+	f->base = base;
+	f->irq_start = irq_start;
+	f->chip.name = name;
 	f->chip.irq_ack = fpga_irq_mask;
 	f->chip.irq_mask = fpga_irq_mask;
 	f->chip.irq_unmask = fpga_irq_unmask;
+	f->valid = valid;
 
 	if (parent_irq != -1) {
 		irq_set_handler_data(parent_irq, f);
 		irq_set_chained_handler(parent_irq, fpga_irq_handle);
 	}
 
-	for (i = 0; i < 32; i++) {
-		if (valid & (1 << i)) {
-			unsigned int irq = f->irq_start + i;
+	f->domain = irq_domain_add_legacy(node, fls(valid), f->irq_start, 0,
+					  &fpga_irqdomain_ops, f);
+	pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs\n",
+		fpga_irq_id, name, base, f->used_irqs);
 
-			irq_set_chip_data(irq, f);
-			irq_set_chip_and_handler(irq, &f->chip,
-						 handle_level_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-		}
-	}
+	fpga_irq_id++;
 }
diff --git a/arch/arm/plat-versatile/include/plat/fpga-irq.h b/arch/arm/plat-versatile/include/plat/fpga-irq.h
index 627fafd..91bcfb6 100644
--- a/arch/arm/plat-versatile/include/plat/fpga-irq.h
+++ b/arch/arm/plat-versatile/include/plat/fpga-irq.h
@@ -1,12 +1,11 @@
 #ifndef PLAT_FPGA_IRQ_H
 #define PLAT_FPGA_IRQ_H
 
-struct fpga_irq_data {
-	void __iomem *base;
-	unsigned int irq_start;
-	struct irq_chip chip;
-};
+struct device_node;
+struct pt_regs;
 
-void fpga_irq_init(int, u32, struct fpga_irq_data *);
+void fpga_handle_irq(struct pt_regs *regs);
+void fpga_irq_init(void __iomem *, const char *, int, int, u32,
+		struct device_node *node);
 
 #endif
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index f9c9f33..2997e56 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -16,7 +16,7 @@
 # are merged into mainline or have been edited in the machine database
 # within the last 12 months.  References to machine_is_NAME() do not count!
 #
-# Last update: Tue Dec 6 11:07:38 2011
+# Last update: Thu Apr 26 08:44:23 2012
 #
 # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number
 #
@@ -205,6 +205,7 @@
 snapper_cl15		MACH_SNAPPER_CL15	SNAPPER_CL15		986
 omap_palmz71		MACH_OMAP_PALMZ71	OMAP_PALMZ71		993
 smdk2412		MACH_SMDK2412		SMDK2412		1009
+bkde303			MACH_BKDE303		BKDE303			1021
 smdk2413		MACH_SMDK2413		SMDK2413		1022
 aml_m5900		MACH_AML_M5900		AML_M5900		1024
 balloon3		MACH_BALLOON3		BALLOON3		1029
@@ -381,8 +382,6 @@
 at91sam9g10ek		MACH_AT91SAM9G10EK	AT91SAM9G10EK		2159
 omap_4430sdp		MACH_OMAP_4430SDP	OMAP_4430SDP		2160
 magx_zn5		MACH_MAGX_ZN5		MAGX_ZN5		2162
-btmavb101		MACH_BTMAVB101		BTMAVB101		2172
-btmawb101		MACH_BTMAWB101		BTMAWB101		2173
 tx25			MACH_TX25		TX25			2177
 omap3_torpedo		MACH_OMAP3_TORPEDO	OMAP3_TORPEDO		2178
 anw6410			MACH_ANW6410		ANW6410			2183
@@ -397,7 +396,6 @@
 net5big_v2		MACH_NET5BIG_V2		NET5BIG_V2		2206
 inetspace_v2		MACH_INETSPACE_V2	INETSPACE_V2		2208
 at91sam9g45ekes		MACH_AT91SAM9G45EKES	AT91SAM9G45EKES		2212
-pc7302			MACH_PC7302		PC7302			2220
 spear600		MACH_SPEAR600		SPEAR600		2236
 spear300		MACH_SPEAR300		SPEAR300		2237
 lilly1131		MACH_LILLY1131		LILLY1131		2239
@@ -407,7 +405,6 @@
 bigdisk			MACH_BIGDISK		BIGDISK			2283
 at91sam9g20ek_2mmc	MACH_AT91SAM9G20EK_2MMC	AT91SAM9G20EK_2MMC	2288
 bcmring			MACH_BCMRING		BCMRING			2289
-dp6xx			MACH_DP6XX		DP6XX			2302
 mahimahi		MACH_MAHIMAHI		MAHIMAHI		2304
 smdk6442		MACH_SMDK6442		SMDK6442		2324
 openrd_base		MACH_OPENRD_BASE	OPENRD_BASE		2325
@@ -444,8 +441,6 @@
 smartq5			MACH_SMARTQ5		SMARTQ5			2534
 davinci_dm6467tevm	MACH_DAVINCI_DM6467TEVM	DAVINCI_DM6467TEVM	2548
 mxt_td60		MACH_MXT_TD60		MXT_TD60		2550
-riot_bei2		MACH_RIOT_BEI2		RIOT_BEI2		2576
-riot_x37		MACH_RIOT_X37		RIOT_X37		2578
 pca101			MACH_PCA101		PCA101			2595
 capc7117		MACH_CAPC7117		CAPC7117		2612
 icontrol		MACH_ICONTROL		ICONTROL		2624
@@ -460,7 +455,6 @@
 aquila			MACH_AQUILA		AQUILA			2676
 esata_sheevaplug	MACH_ESATA_SHEEVAPLUG	ESATA_SHEEVAPLUG	2678
 msm7x30_surf		MACH_MSM7X30_SURF	MSM7X30_SURF		2679
-ea2478devkit		MACH_EA2478DEVKIT	EA2478DEVKIT		2683
 terastation_wxl		MACH_TERASTATION_WXL	TERASTATION_WXL		2697
 msm7x25_surf		MACH_MSM7X25_SURF	MSM7X25_SURF		2703
 msm7x25_ffa		MACH_MSM7X25_FFA	MSM7X25_FFA		2704
@@ -479,8 +473,6 @@
 msm8x60_surf		MACH_MSM8X60_SURF	MSM8X60_SURF		2755
 msm8x60_sim		MACH_MSM8X60_SIM	MSM8X60_SIM		2756
 tcc8000_sdk		MACH_TCC8000_SDK	TCC8000_SDK		2758
-nanos			MACH_NANOS		NANOS			2759
-stamp9g45		MACH_STAMP9G45		STAMP9G45		2761
 cns3420vb		MACH_CNS3420VB		CNS3420VB		2776
 omap4_panda		MACH_OMAP4_PANDA	OMAP4_PANDA		2791
 ti8168evm		MACH_TI8168EVM		TI8168EVM		2800
@@ -490,12 +482,9 @@
 eukrea_cpuimx51sd	MACH_EUKREA_CPUIMX51SD	EUKREA_CPUIMX51SD	2822
 eukrea_cpuimx51		MACH_EUKREA_CPUIMX51	EUKREA_CPUIMX51		2823
 smdkc210		MACH_SMDKC210		SMDKC210		2838
-pca102			MACH_PCA102		PCA102			2843
+pcaal1			MACH_PCAAL1		PCAAL1			2843
 t5325			MACH_T5325		T5325			2846
 income			MACH_INCOME		INCOME			2849
-vvbox_sdorig2		MACH_VVBOX_SDORIG2	VVBOX_SDORIG2		2857
-vvbox_sdlite2		MACH_VVBOX_SDLITE2	VVBOX_SDLITE2		2858
-vvbox_sdpro4		MACH_VVBOX_SDPRO4	VVBOX_SDPRO4		2859
 mx257sx			MACH_MX257SX		MX257SX			2861
 goni			MACH_GONI		GONI			2862
 bv07			MACH_BV07		BV07			2882
@@ -504,6 +493,7 @@
 miccpt			MACH_MICCPT		MICCPT			2886
 mic256			MACH_MIC256		MIC256			2887
 u5500			MACH_U5500		U5500			2890
+pov15hd			MACH_POV15HD		POV15HD			2910
 linkstation_lschl	MACH_LINKSTATION_LSCHL	LINKSTATION_LSCHL	2913
 smdkv310		MACH_SMDKV310		SMDKV310		2925
 wm8505_7in_netbook	MACH_WM8505_7IN_NETBOOK	WM8505_7IN_NETBOOK	2928
@@ -537,243 +527,24 @@
 mackerel		MACH_MACKEREL		MACKEREL		3211
 kaen			MACH_KAEN		KAEN			3217
 nokia_rm680		MACH_NOKIA_RM680	NOKIA_RM680		3220
-dm6446_adbox		MACH_DM6446_ADBOX	DM6446_ADBOX		3226
-quad_salsa		MACH_QUAD_SALSA		QUAD_SALSA		3227
-abb_gma_1_1		MACH_ABB_GMA_1_1	ABB_GMA_1_1		3228
-svcid			MACH_SVCID		SVCID			3229
 msm8960_sim		MACH_MSM8960_SIM	MSM8960_SIM		3230
 msm8960_rumi3		MACH_MSM8960_RUMI3	MSM8960_RUMI3		3231
-icon_g			MACH_ICON_G		ICON_G			3232
-mb3			MACH_MB3		MB3			3233
 gsia18s			MACH_GSIA18S		GSIA18S			3234
-pivicc			MACH_PIVICC		PIVICC			3235
-pcm048			MACH_PCM048		PCM048			3236
-dds			MACH_DDS		DDS			3237
-chalten_xa1		MACH_CHALTEN_XA1	CHALTEN_XA1		3238
-ts48xx			MACH_TS48XX		TS48XX			3239
-tonga2_tfttimer		MACH_TONGA2_TFTTIMER	TONGA2_TFTTIMER		3240
-whistler		MACH_WHISTLER		WHISTLER		3241
-asl_phoenix		MACH_ASL_PHOENIX	ASL_PHOENIX		3242
-at91sam9263otlite	MACH_AT91SAM9263OTLITE	AT91SAM9263OTLITE	3243
-ddplug			MACH_DDPLUG		DDPLUG			3244
-d2plug			MACH_D2PLUG		D2PLUG			3245
-kzm9d			MACH_KZM9D		KZM9D			3246
-verdi_lte		MACH_VERDI_LTE		VERDI_LTE		3247
-nanozoom		MACH_NANOZOOM		NANOZOOM		3248
-dm3730_som_lv		MACH_DM3730_SOM_LV	DM3730_SOM_LV		3249
-dm3730_torpedo		MACH_DM3730_TORPEDO	DM3730_TORPEDO		3250
-anchovy			MACH_ANCHOVY		ANCHOVY			3251
-re2rev20		MACH_RE2REV20		RE2REV20		3253
-re2rev21		MACH_RE2REV21		RE2REV21		3254
-cns21xx			MACH_CNS21XX		CNS21XX			3255
-rider			MACH_RIDER		RIDER			3257
-nsk330			MACH_NSK330		NSK330			3258
-cns2133evb		MACH_CNS2133EVB		CNS2133EVB		3259
-z3_816x_mod		MACH_Z3_816X_MOD	Z3_816X_MOD		3260
-z3_814x_mod		MACH_Z3_814X_MOD	Z3_814X_MOD		3261
-beect			MACH_BEECT		BEECT			3262
-dma_thunderbug		MACH_DMA_THUNDERBUG	DMA_THUNDERBUG		3263
-omn_at91sam9g20		MACH_OMN_AT91SAM9G20	OMN_AT91SAM9G20		3264
-mx25_e2s_uc		MACH_MX25_E2S_UC	MX25_E2S_UC		3265
-mione			MACH_MIONE		MIONE			3266
-top9000_tcu		MACH_TOP9000_TCU	TOP9000_TCU		3267
-top9000_bsl		MACH_TOP9000_BSL	TOP9000_BSL		3268
-kingdom			MACH_KINGDOM		KINGDOM			3269
-armadillo460		MACH_ARMADILLO460	ARMADILLO460		3270
-lq2			MACH_LQ2		LQ2			3271
-sweda_tms2		MACH_SWEDA_TMS2		SWEDA_TMS2		3272
 mx53_loco		MACH_MX53_LOCO		MX53_LOCO		3273
-acer_a8			MACH_ACER_A8		ACER_A8			3275
-acer_gauguin		MACH_ACER_GAUGUIN	ACER_GAUGUIN		3276
-guppy			MACH_GUPPY		GUPPY			3277
-mx61_ard		MACH_MX61_ARD		MX61_ARD		3278
 tx53			MACH_TX53		TX53			3279
-omapl138_case_a3	MACH_OMAPL138_CASE_A3	OMAPL138_CASE_A3	3280
-uemd			MACH_UEMD		UEMD			3281
-ccwmx51mut		MACH_CCWMX51MUT		CCWMX51MUT		3282
-rockhopper		MACH_ROCKHOPPER		ROCKHOPPER		3283
 encore			MACH_ENCORE		ENCORE			3284
-hkdkc100		MACH_HKDKC100		HKDKC100		3285
-ts42xx			MACH_TS42XX		TS42XX			3286
-aebl			MACH_AEBL		AEBL			3287
 wario			MACH_WARIO		WARIO			3288
-gfs_spm			MACH_GFS_SPM		GFS_SPM			3289
 cm_t3730		MACH_CM_T3730		CM_T3730		3290
-isc3			MACH_ISC3		ISC3			3291
-rascal			MACH_RASCAL		RASCAL			3292
 hrefv60			MACH_HREFV60		HREFV60			3293
-tpt_2_0			MACH_TPT_2_0		TPT_2_0			3294
-splendor		MACH_SPLENDOR		SPLENDOR		3296
-msm8x60_qt		MACH_MSM8X60_QT		MSM8X60_QT		3298
-htc_hd_mini		MACH_HTC_HD_MINI	HTC_HD_MINI		3299
-athene			MACH_ATHENE		ATHENE			3300
-deep_r_ek_1		MACH_DEEP_R_EK_1	DEEP_R_EK_1		3301
-vivow_ct		MACH_VIVOW_CT		VIVOW_CT		3302
-nery_1000		MACH_NERY_1000		NERY_1000		3303
-rfl109145_ssrv		MACH_RFL109145_SSRV	RFL109145_SSRV		3304
-nmh			MACH_NMH		NMH			3305
-wn802t			MACH_WN802T		WN802T			3306
-dragonet		MACH_DRAGONET		DRAGONET		3307
-at91sam9263desk16l	MACH_AT91SAM9263DESK16L	AT91SAM9263DESK16L	3309
-bcmhana_sv		MACH_BCMHANA_SV		BCMHANA_SV		3310
-bcmhana_tablet		MACH_BCMHANA_TABLET	BCMHANA_TABLET		3311
-koi			MACH_KOI		KOI			3312
-ts4800			MACH_TS4800		TS4800			3313
-tqma9263		MACH_TQMA9263		TQMA9263		3314
-holiday			MACH_HOLIDAY		HOLIDAY			3315
-pcats_overlay		MACH_PCATS_OVERLAY	PCATS_OVERLAY		3317
-hwgw6410		MACH_HWGW6410		HWGW6410		3318
-shenzhou		MACH_SHENZHOU		SHENZHOU		3319
-cwme9210		MACH_CWME9210		CWME9210		3320
-cwme9210js		MACH_CWME9210JS		CWME9210JS		3321
-colibri_tegra2		MACH_COLIBRI_TEGRA2	COLIBRI_TEGRA2		3323
-w21			MACH_W21		W21			3324
-polysat1		MACH_POLYSAT1		POLYSAT1		3325
-dataway			MACH_DATAWAY		DATAWAY			3326
-cobral138		MACH_COBRAL138		COBRAL138		3327
-roverpcs8		MACH_ROVERPCS8		ROVERPCS8		3328
-marvelc			MACH_MARVELC		MARVELC			3329
-navefihid		MACH_NAVEFIHID		NAVEFIHID		3330
-dm365_cv100		MACH_DM365_CV100	DM365_CV100		3331
-able			MACH_ABLE		ABLE			3332
-legacy			MACH_LEGACY		LEGACY			3333
-icong			MACH_ICONG		ICONG			3334
-rover_g8		MACH_ROVER_G8		ROVER_G8		3335
-t5388p			MACH_T5388P		T5388P			3336
-dingo			MACH_DINGO		DINGO			3337
-goflexhome		MACH_GOFLEXHOME		GOFLEXHOME		3338
-lanreadyfn511		MACH_LANREADYFN511	LANREADYFN511		3340
-omap3_baia		MACH_OMAP3_BAIA		OMAP3_BAIA		3341
-omap3smartdisplay	MACH_OMAP3SMARTDISPLAY	OMAP3SMARTDISPLAY	3342
-xilinx			MACH_XILINX		XILINX			3343
-a2f			MACH_A2F		A2F			3344
-sky25			MACH_SKY25		SKY25			3345
-ccmx53			MACH_CCMX53		CCMX53			3346
-ccmx53js		MACH_CCMX53JS		CCMX53JS		3347
-ccwmx53			MACH_CCWMX53		CCWMX53			3348
-ccwmx53js		MACH_CCWMX53JS		CCWMX53JS		3349
-frisms			MACH_FRISMS		FRISMS			3350
-msm7x27a_ffa		MACH_MSM7X27A_FFA	MSM7X27A_FFA		3351
-msm7x27a_surf		MACH_MSM7X27A_SURF	MSM7X27A_SURF		3352
-msm7x27a_rumi3		MACH_MSM7X27A_RUMI3	MSM7X27A_RUMI3		3353
-dimmsam9g20		MACH_DIMMSAM9G20	DIMMSAM9G20		3354
-dimm_imx28		MACH_DIMM_IMX28		DIMM_IMX28		3355
-amk_a4			MACH_AMK_A4		AMK_A4			3356
-gnet_sgme		MACH_GNET_SGME		GNET_SGME		3357
-shooter_u		MACH_SHOOTER_U		SHOOTER_U		3358
-vmx53			MACH_VMX53		VMX53			3359
-rhino			MACH_RHINO		RHINO			3360
 armlex4210		MACH_ARMLEX4210		ARMLEX4210		3361
-swarcoextmodem		MACH_SWARCOEXTMODEM	SWARCOEXTMODEM		3362
 snowball		MACH_SNOWBALL		SNOWBALL		3363
-pcm049			MACH_PCM049		PCM049			3364
-vigor			MACH_VIGOR		VIGOR			3365
-oslo_amundsen		MACH_OSLO_AMUNDSEN	OSLO_AMUNDSEN		3366
-gsl_diamond		MACH_GSL_DIAMOND	GSL_DIAMOND		3367
-cv2201			MACH_CV2201		CV2201			3368
-cv2202			MACH_CV2202		CV2202			3369
-cv2203			MACH_CV2203		CV2203			3370
-vit_ibox		MACH_VIT_IBOX		VIT_IBOX		3371
-dm6441_esp		MACH_DM6441_ESP		DM6441_ESP		3372
-at91sam9x5ek		MACH_AT91SAM9X5EK	AT91SAM9X5EK		3373
-libra			MACH_LIBRA		LIBRA			3374
-easycrrh		MACH_EASYCRRH		EASYCRRH		3375
-tripel			MACH_TRIPEL		TRIPEL			3376
-endian_mini		MACH_ENDIAN_MINI	ENDIAN_MINI		3377
 xilinx_ep107		MACH_XILINX_EP107	XILINX_EP107		3378
 nuri			MACH_NURI		NURI			3379
-janus			MACH_JANUS		JANUS			3380
-ddnas			MACH_DDNAS		DDNAS			3381
-tag			MACH_TAG		TAG			3382
-tagw			MACH_TAGW		TAGW			3383
-nitrogen_vm_imx51	MACH_NITROGEN_VM_IMX51	NITROGEN_VM_IMX51	3384
-viprinet		MACH_VIPRINET		VIPRINET		3385
-bockw			MACH_BOCKW		BOCKW			3386
-eva2000			MACH_EVA2000		EVA2000			3387
-steelyard		MACH_STEELYARD		STEELYARD		3388
-nsslsboard		MACH_NSSLSBOARD		NSSLSBOARD		3392
-geneva_b5		MACH_GENEVA_B5		GENEVA_B5		3393
-spear1340		MACH_SPEAR1340		SPEAR1340		3394
-rexmas			MACH_REXMAS		REXMAS			3395
-msm8960_cdp		MACH_MSM8960_CDP	MSM8960_CDP		3396
-msm8960_fluid		MACH_MSM8960_FLUID	MSM8960_FLUID		3398
-msm8960_apq		MACH_MSM8960_APQ	MSM8960_APQ		3399
-helios_v2		MACH_HELIOS_V2		HELIOS_V2		3400
-mif10p			MACH_MIF10P		MIF10P			3401
-iam28			MACH_IAM28		IAM28			3402
-picasso			MACH_PICASSO		PICASSO			3403
-mr301a			MACH_MR301A		MR301A			3404
-notle			MACH_NOTLE		NOTLE			3405
-eelx2			MACH_EELX2		EELX2			3406
-moon			MACH_MOON		MOON			3407
-ruby			MACH_RUBY		RUBY			3408
-goldengate		MACH_GOLDENGATE		GOLDENGATE		3409
-ctbu_gen2		MACH_CTBU_GEN2		CTBU_GEN2		3410
-kmp_am17_01		MACH_KMP_AM17_01	KMP_AM17_01		3411
 wtplug			MACH_WTPLUG		WTPLUG			3412
-mx27su2			MACH_MX27SU2		MX27SU2			3413
-nb31			MACH_NB31		NB31			3414
-hjsdu			MACH_HJSDU		HJSDU			3415
-td3_rev1		MACH_TD3_REV1		TD3_REV1		3416
-eag_ci4000		MACH_EAG_CI4000		EAG_CI4000		3417
-net5big_nand_v2		MACH_NET5BIG_NAND_V2	NET5BIG_NAND_V2		3418
-cpx2			MACH_CPX2		CPX2			3419
-net2big_nand_v2		MACH_NET2BIG_NAND_V2	NET2BIG_NAND_V2		3420
-ecuv5			MACH_ECUV5		ECUV5			3421
-hsgx6d			MACH_HSGX6D		HSGX6D			3422
-dawad7			MACH_DAWAD7		DAWAD7			3423
-sam9repeater		MACH_SAM9REPEATER	SAM9REPEATER		3424
-gt_i5700		MACH_GT_I5700		GT_I5700		3425
-ctera_plug_c2		MACH_CTERA_PLUG_C2	CTERA_PLUG_C2		3426
-marvelct		MACH_MARVELCT		MARVELCT		3427
-ag11005			MACH_AG11005		AG11005			3428
-vangogh			MACH_VANGOGH		VANGOGH			3430
-matrix505		MACH_MATRIX505		MATRIX505		3431
-oce_nigma		MACH_OCE_NIGMA		OCE_NIGMA		3432
-t55			MACH_T55		T55			3433
-bio3k			MACH_BIO3K		BIO3K			3434
-expressct		MACH_EXPRESSCT		EXPRESSCT		3435
-cardhu			MACH_CARDHU		CARDHU			3436
-aruba			MACH_ARUBA		ARUBA			3437
-bonaire			MACH_BONAIRE		BONAIRE			3438
-nuc700evb		MACH_NUC700EVB		NUC700EVB		3439
-nuc710evb		MACH_NUC710EVB		NUC710EVB		3440
-nuc740evb		MACH_NUC740EVB		NUC740EVB		3441
-nuc745evb		MACH_NUC745EVB		NUC745EVB		3442
-transcede		MACH_TRANSCEDE		TRANSCEDE		3443
-mora			MACH_MORA		MORA			3444
-nda_evm			MACH_NDA_EVM		NDA_EVM			3445
-timu			MACH_TIMU		TIMU			3446
-expressh		MACH_EXPRESSH		EXPRESSH		3447
 veridis_a300		MACH_VERIDIS_A300	VERIDIS_A300		3448
-dm368_leopard		MACH_DM368_LEOPARD	DM368_LEOPARD		3449
-omap_mcop		MACH_OMAP_MCOP		OMAP_MCOP		3450
-tritip			MACH_TRITIP		TRITIP			3451
-sm1k			MACH_SM1K		SM1K			3452
-monch			MACH_MONCH		MONCH			3453
-curacao			MACH_CURACAO		CURACAO			3454
 origen			MACH_ORIGEN		ORIGEN			3455
-epc10			MACH_EPC10		EPC10			3456
-sgh_i740		MACH_SGH_I740		SGH_I740		3457
-tuna			MACH_TUNA		TUNA			3458
-mx51_tulip		MACH_MX51_TULIP		MX51_TULIP		3459
-mx51_aster7		MACH_MX51_ASTER7	MX51_ASTER7		3460
-acro37xbrd		MACH_ACRO37XBRD		ACRO37XBRD		3461
-elke			MACH_ELKE		ELKE			3462
-sbc6000x		MACH_SBC6000X		SBC6000X		3463
-r1801e			MACH_R1801E		R1801E			3464
-h1600			MACH_H1600		H1600			3465
-mini210			MACH_MINI210		MINI210			3466
-mini8168		MACH_MINI8168		MINI8168		3467
-pc7308			MACH_PC7308		PC7308			3468
-kmm2m01			MACH_KMM2M01		KMM2M01			3470
-mx51erebus		MACH_MX51EREBUS		MX51EREBUS		3471
 wm8650refboard		MACH_WM8650REFBOARD	WM8650REFBOARD		3472
-tuxrail			MACH_TUXRAIL		TUXRAIL			3473
-arthur			MACH_ARTHUR		ARTHUR			3474
-doorboy			MACH_DOORBOY		DOORBOY			3475
 xarina			MACH_XARINA		XARINA			3476
-roverx7			MACH_ROVERX7		ROVERX7			3477
 sdvr			MACH_SDVR		SDVR			3478
 acer_maya		MACH_ACER_MAYA		ACER_MAYA		3479
 pico			MACH_PICO		PICO			3480
@@ -999,6 +770,7 @@
 amp			MACH_AMP		AMP			3709
 gnet_amp		MACH_GNET_AMP		GNET_AMP		3710
 toques			MACH_TOQUES		TOQUES			3711
+apx4devkit		MACH_APX4DEVKIT		APX4DEVKIT		3712
 dct_storm		MACH_DCT_STORM		DCT_STORM		3713
 owl			MACH_OWL		OWL			3715
 cogent_csb1741		MACH_COGENT_CSB1741	COGENT_CSB1741		3716
@@ -1063,7 +835,6 @@
 omap3_devkit8500	MACH_OMAP3_DEVKIT8500	OMAP3_DEVKIT8500	3779
 edgetd			MACH_EDGETD		EDGETD			3780
 copperyard		MACH_COPPERYARD		COPPERYARD		3781
-edge			MACH_EDGE		EDGE			3782
 edge_u			MACH_EDGE_U		EDGE_U			3783
 edge_td			MACH_EDGE_TD		EDGE_TD			3784
 wdss			MACH_WDSS		WDSS			3785
@@ -1169,3 +940,269 @@
 pov2			MACH_POV2		POV2			3889
 ipod_touch_2g		MACH_IPOD_TOUCH_2G	IPOD_TOUCH_2G		3890
 da850_pqab		MACH_DA850_PQAB		DA850_PQAB		3891
+fermi			MACH_FERMI		FERMI			3892
+ccardwmx28		MACH_CCARDWMX28		CCARDWMX28		3893
+ccardmx28		MACH_CCARDMX28		CCARDMX28		3894
+fs20_fcm2050		MACH_FS20_FCM2050	FS20_FCM2050		3895
+kinetis			MACH_KINETIS		KINETIS			3896
+kai			MACH_KAI		KAI			3897
+bcthb2			MACH_BCTHB2		BCTHB2			3898
+inels3_cu		MACH_INELS3_CU		INELS3_CU		3899
+da850_apollo		MACH_DA850_APOLLO	DA850_APOLLO		3901
+tracnas			MACH_TRACNAS		TRACNAS			3902
+mityarm335x		MACH_MITYARM335X	MITYARM335X		3903
+xcgz7x			MACH_XCGZ7X		XCGZ7X			3904
+cubox			MACH_CUBOX		CUBOX			3905
+terminator		MACH_TERMINATOR		TERMINATOR		3906
+eye03			MACH_EYE03		EYE03			3907
+kota3			MACH_KOTA3		KOTA3			3908
+pscpe			MACH_PSCPE		PSCPE			3910
+akt1100			MACH_AKT1100		AKT1100			3911
+pcaaxl2			MACH_PCAAXL2		PCAAXL2			3912
+primodd_ct		MACH_PRIMODD_CT		PRIMODD_CT		3913
+nsbc			MACH_NSBC		NSBC			3914
+meson2_skt		MACH_MESON2_SKT		MESON2_SKT		3915
+meson2_ref		MACH_MESON2_REF		MESON2_REF		3916
+ccardwmx28js		MACH_CCARDWMX28JS	CCARDWMX28JS		3917
+ccardmx28js		MACH_CCARDMX28JS	CCARDMX28JS		3918
+indico			MACH_INDICO		INDICO			3919
+msm8960dt		MACH_MSM8960DT		MSM8960DT		3920
+primods			MACH_PRIMODS		PRIMODS			3921
+beluga_m1388		MACH_BELUGA_M1388	BELUGA_M1388		3922
+primotd			MACH_PRIMOTD		PRIMOTD			3923
+varan_master		MACH_VARAN_MASTER	VARAN_MASTER		3924
+primodd			MACH_PRIMODD		PRIMODD			3925
+jetduo			MACH_JETDUO		JETDUO			3926
+mx53_umobo		MACH_MX53_UMOBO		MX53_UMOBO		3927
+trats			MACH_TRATS		TRATS			3928
+starcraft		MACH_STARCRAFT		STARCRAFT		3929
+qseven_tegra2		MACH_QSEVEN_TEGRA2	QSEVEN_TEGRA2		3930
+lichee_sun4i_devbd	MACH_LICHEE_SUN4I_DEVBD	LICHEE_SUN4I_DEVBD	3931
+movenow			MACH_MOVENOW		MOVENOW			3932
+golf_u			MACH_GOLF_U		GOLF_U			3933
+msm7627a_evb		MACH_MSM7627A_EVB	MSM7627A_EVB		3934
+rambo			MACH_RAMBO		RAMBO			3935
+golfu			MACH_GOLFU		GOLFU			3936
+mango310		MACH_MANGO310		MANGO310		3937
+dns343			MACH_DNS343		DNS343			3938
+var_som_om44		MACH_VAR_SOM_OM44	VAR_SOM_OM44		3939
+naon			MACH_NAON		NAON			3940
+vp4000			MACH_VP4000		VP4000			3941
+impcard			MACH_IMPCARD		IMPCARD			3942
+smoovcam		MACH_SMOOVCAM		SMOOVCAM		3943
+cobham3725		MACH_COBHAM3725		COBHAM3725		3944
+cobham3730		MACH_COBHAM3730		COBHAM3730		3945
+cobham3703		MACH_COBHAM3703		COBHAM3703		3946
+quetzal			MACH_QUETZAL		QUETZAL			3947
+apq8064_cdp		MACH_APQ8064_CDP	APQ8064_CDP		3948
+apq8064_mtp		MACH_APQ8064_MTP	APQ8064_MTP		3949
+apq8064_fluid		MACH_APQ8064_FLUID	APQ8064_FLUID		3950
+apq8064_liquid		MACH_APQ8064_LIQUID	APQ8064_LIQUID		3951
+mango210		MACH_MANGO210		MANGO210		3952
+mango100		MACH_MANGO100		MANGO100		3953
+mango24			MACH_MANGO24		MANGO24			3954
+mango64			MACH_MANGO64		MANGO64			3955
+nsa320			MACH_NSA320		NSA320			3956
+elv_ccu2		MACH_ELV_CCU2		ELV_CCU2		3957
+triton_x00		MACH_TRITON_X00		TRITON_X00		3958
+triton_1500_2000	MACH_TRITON_1500_2000	TRITON_1500_2000	3959
+pogoplugv4		MACH_POGOPLUGV4		POGOPLUGV4		3960
+venus_cl		MACH_VENUS_CL		VENUS_CL		3961
+vulcano_g20		MACH_VULCANO_G20	VULCANO_G20		3962
+sgs_i9100		MACH_SGS_I9100		SGS_I9100		3963
+stsv2			MACH_STSV2		STSV2			3964
+csb1724			MACH_CSB1724		CSB1724			3965
+omapl138_lcdk		MACH_OMAPL138_LCDK	OMAPL138_LCDK		3966
+pvd_mx25		MACH_PVD_MX25		PVD_MX25		3968
+meson6_skt		MACH_MESON6_SKT		MESON6_SKT		3969
+meson6_ref		MACH_MESON6_REF		MESON6_REF		3970
+pxm			MACH_PXM		PXM			3971
+pogoplugv3		MACH_POGOPLUGV3		POGOPLUGV3		3973
+mlp89626		MACH_MLP89626		MLP89626		3974
+iomegahmndce		MACH_IOMEGAHMNDCE	IOMEGAHMNDCE		3975
+pogoplugv3pci		MACH_POGOPLUGV3PCI	POGOPLUGV3PCI		3976
+bntv250			MACH_BNTV250		BNTV250			3977
+mx53_qseven		MACH_MX53_QSEVEN	MX53_QSEVEN		3978
+gtl_it1100		MACH_GTL_IT1100		GTL_IT1100		3979
+mx6q_sabresd		MACH_MX6Q_SABRESD	MX6Q_SABRESD		3980
+mt4			MACH_MT4		MT4			3981
+jumbo_d			MACH_JUMBO_D		JUMBO_D			3982
+jumbo_i			MACH_JUMBO_I		JUMBO_I			3983
+fs20_dmp		MACH_FS20_DMP		FS20_DMP		3984
+dns320			MACH_DNS320		DNS320			3985
+mx28bacos		MACH_MX28BACOS		MX28BACOS		3986
+tl80			MACH_TL80		TL80			3987
+polatis_nic_1001	MACH_POLATIS_NIC_1001	POLATIS_NIC_1001	3988
+tely			MACH_TELY		TELY			3989
+u8520			MACH_U8520		U8520			3990
+manta			MACH_MANTA		MANTA			3991
+mpq8064_cdp		MACH_MPQ8064_CDP	MPQ8064_CDP		3993
+mpq8064_dtv		MACH_MPQ8064_DTV	MPQ8064_DTV		3995
+dm368som		MACH_DM368SOM		DM368SOM		3996
+gprisb2			MACH_GPRISB2		GPRISB2			3997
+chammid			MACH_CHAMMID		CHAMMID			3998
+seoul2			MACH_SEOUL2		SEOUL2			3999
+omap4_nooktablet	MACH_OMAP4_NOOKTABLET	OMAP4_NOOKTABLET	4000
+aalto			MACH_AALTO		AALTO			4001
+metro			MACH_METRO		METRO			4002
+cydm3730		MACH_CYDM3730		CYDM3730		4003
+tqma53			MACH_TQMA53		TQMA53			4004
+msm7627a_qrd3		MACH_MSM7627A_QRD3	MSM7627A_QRD3		4005
+mx28_canby		MACH_MX28_CANBY		MX28_CANBY		4006
+tiger			MACH_TIGER		TIGER			4007
+pcats_9307_type_a	MACH_PCATS_9307_TYPE_A	PCATS_9307_TYPE_A	4008
+pcats_9307_type_o	MACH_PCATS_9307_TYPE_O	PCATS_9307_TYPE_O	4009
+pcats_9307_type_r	MACH_PCATS_9307_TYPE_R	PCATS_9307_TYPE_R	4010
+streamplug		MACH_STREAMPLUG		STREAMPLUG		4011
+icechicken_dev		MACH_ICECHICKEN_DEV	ICECHICKEN_DEV		4012
+hedgehog		MACH_HEDGEHOG		HEDGEHOG		4013
+yusend_obc		MACH_YUSEND_OBC		YUSEND_OBC		4014
+imxninja		MACH_IMXNINJA		IMXNINJA		4015
+omap4_jarod		MACH_OMAP4_JAROD	OMAP4_JAROD		4016
+eco5_pk			MACH_ECO5_PK		ECO5_PK			4017
+qj2440			MACH_QJ2440		QJ2440			4018
+mx6q_mercury		MACH_MX6Q_MERCURY	MX6Q_MERCURY		4019
+cm6810			MACH_CM6810		CM6810			4020
+omap4_torpedo		MACH_OMAP4_TORPEDO	OMAP4_TORPEDO		4021
+nsa310			MACH_NSA310		NSA310			4022
+tmx536			MACH_TMX536		TMX536			4023
+ktt20			MACH_KTT20		KTT20			4024
+dragonix		MACH_DRAGONIX		DRAGONIX		4025
+lungching		MACH_LUNGCHING		LUNGCHING		4026
+bulogics		MACH_BULOGICS		BULOGICS		4027
+mx535_sx		MACH_MX535_SX		MX535_SX		4028
+ngui3250		MACH_NGUI3250		NGUI3250		4029
+salutec_dac		MACH_SALUTEC_DAC	SALUTEC_DAC		4030
+loco			MACH_LOCO		LOCO			4031
+ctera_plug_usi		MACH_CTERA_PLUG_USI	CTERA_PLUG_USI		4032
+scepter			MACH_SCEPTER		SCEPTER			4033
+sga			MACH_SGA		SGA			4034
+p_81_j5			MACH_P_81_J5		P_81_J5			4035
+p_81_o4			MACH_P_81_O4		P_81_O4			4036
+msm8625_surf		MACH_MSM8625_SURF	MSM8625_SURF		4037
+carallon_shark		MACH_CARALLON_SHARK	CARALLON_SHARK		4038
+ordog			MACH_ORDOG		ORDOG			4040
+puente_io		MACH_PUENTE_IO		PUENTE_IO		4041
+msm8625_evb		MACH_MSM8625_EVB	MSM8625_EVB		4042
+ev_am1707		MACH_EV_AM1707		EV_AM1707		4043
+ev_am1707e2		MACH_EV_AM1707E2	EV_AM1707E2		4044
+ev_am3517e2		MACH_EV_AM3517E2	EV_AM3517E2		4045
+calabria		MACH_CALABRIA		CALABRIA		4046
+ev_imx287		MACH_EV_IMX287		EV_IMX287		4047
+erau			MACH_ERAU		ERAU			4048
+sichuan			MACH_SICHUAN		SICHUAN			4049
+davinci_da850		MACH_DAVINCI_DA850	DAVINCI_DA850		4051
+omap138_trunarc		MACH_OMAP138_TRUNARC	OMAP138_TRUNARC		4052
+bcm4761			MACH_BCM4761		BCM4761			4053
+picasso_e2		MACH_PICASSO_E2		PICASSO_E2		4054
+picasso_mf		MACH_PICASSO_MF		PICASSO_MF		4055
+miro			MACH_MIRO		MIRO			4056
+at91sam9g20ewon3	MACH_AT91SAM9G20EWON3	AT91SAM9G20EWON3	4057
+yoyo			MACH_YOYO		YOYO			4058
+windjkl			MACH_WINDJKL		WINDJKL			4059
+monarudo		MACH_MONARUDO		MONARUDO		4060
+batan			MACH_BATAN		BATAN			4061
+tadao			MACH_TADAO		TADAO			4062
+baso			MACH_BASO		BASO			4063
+mahon			MACH_MAHON		MAHON			4064
+villec2			MACH_VILLEC2		VILLEC2			4065
+asi1230			MACH_ASI1230		ASI1230			4066
+alaska			MACH_ALASKA		ALASKA			4067
+swarco_shdsl2		MACH_SWARCO_SHDSL2	SWARCO_SHDSL2		4068
+oxrtu			MACH_OXRTU		OXRTU			4069
+omap5_panda		MACH_OMAP5_PANDA	OMAP5_PANDA		4070
+c8000			MACH_C8000		C8000			4072
+bje_display3_5		MACH_BJE_DISPLAY3_5	BJE_DISPLAY3_5		4073
+picomod7		MACH_PICOMOD7		PICOMOD7		4074
+picocom5		MACH_PICOCOM5		PICOCOM5		4075
+qblissa8		MACH_QBLISSA8		QBLISSA8		4076
+armstonea8		MACH_ARMSTONEA8		ARMSTONEA8		4077
+netdcu14		MACH_NETDCU14		NETDCU14		4078
+at91sam9x5_epiphan	MACH_AT91SAM9X5_EPIPHAN	AT91SAM9X5_EPIPHAN	4079
+p2u			MACH_P2U		P2U			4080
+doris			MACH_DORIS		DORIS			4081
+j49			MACH_J49		J49			4082
+vdss2e			MACH_VDSS2E		VDSS2E			4083
+vc300			MACH_VC300		VC300			4084
+ns115_pad_test		MACH_NS115_PAD_TEST	NS115_PAD_TEST		4085
+ns115_pad_ref		MACH_NS115_PAD_REF	NS115_PAD_REF		4086
+ns115_phone_test	MACH_NS115_PHONE_TEST	NS115_PHONE_TEST	4087
+ns115_phone_ref		MACH_NS115_PHONE_REF	NS115_PHONE_REF		4088
+golfc			MACH_GOLFC		GOLFC			4089
+xerox_olympus		MACH_XEROX_OLYMPUS	XEROX_OLYMPUS		4090
+mx6sl_arm2		MACH_MX6SL_ARM2		MX6SL_ARM2		4091
+csb1701_csb1726		MACH_CSB1701_CSB1726	CSB1701_CSB1726		4092
+at91sam9xeek		MACH_AT91SAM9XEEK	AT91SAM9XEEK		4093
+ebv210			MACH_EBV210		EBV210			4094
+msm7627a_qrd7		MACH_MSM7627A_QRD7	MSM7627A_QRD7		4095
+svthin			MACH_SVTHIN		SVTHIN			4096
+duovero			MACH_DUOVERO		DUOVERO			4097
+chupacabra		MACH_CHUPACABRA		CHUPACABRA		4098
+scorpion		MACH_SCORPION		SCORPION		4099
+davinci_he_hmi10	MACH_DAVINCI_HE_HMI10	DAVINCI_HE_HMI10	4100
+topkick			MACH_TOPKICK		TOPKICK			4101
+m3_auguestrush		MACH_M3_AUGUESTRUSH	M3_AUGUESTRUSH		4102
+ipc335x			MACH_IPC335X		IPC335X			4103
+sun4i			MACH_SUN4I		SUN4I			4104
+imx233_olinuxino	MACH_IMX233_OLINUXINO	IMX233_OLINUXINO	4105
+k2_wl			MACH_K2_WL		K2_WL			4106
+k2_ul			MACH_K2_UL		K2_UL			4107
+k2_cl			MACH_K2_CL		K2_CL			4108
+minbari_w		MACH_MINBARI_W		MINBARI_W		4109
+minbari_m		MACH_MINBARI_M		MINBARI_M		4110
+k035			MACH_K035		K035			4111
+ariel			MACH_ARIEL		ARIEL			4112
+arielsaarc		MACH_ARIELSAARC		ARIELSAARC		4113
+arieldkb		MACH_ARIELDKB		ARIELDKB		4114
+armadillo810		MACH_ARMADILLO810	ARMADILLO810		4115
+tam335x			MACH_TAM335X		TAM335X			4116
+grouper			MACH_GROUPER		GROUPER			4117
+mpcsa21_9g20		MACH_MPCSA21_9G20	MPCSA21_9G20		4118
+m6u_cpu			MACH_M6U_CPU		M6U_CPU			4119
+davinci_dp10		MACH_DAVINCI_DP10	DAVINCI_DP10		4120
+ginkgo			MACH_GINKGO		GINKGO			4121
+cgt_qmx6		MACH_CGT_QMX6		CGT_QMX6		4122
+profpga			MACH_PROFPGA		PROFPGA			4123
+acfx100oc		MACH_ACFX100OC		ACFX100OC		4124
+acfx100nb		MACH_ACFX100NB		ACFX100NB		4125
+capricorn		MACH_CAPRICORN		CAPRICORN		4126
+pisces			MACH_PISCES		PISCES			4127
+aries			MACH_ARIES		ARIES			4128
+cancer			MACH_CANCER		CANCER			4129
+leo			MACH_LEO		LEO			4130
+virgo			MACH_VIRGO		VIRGO			4131
+sagittarius		MACH_SAGITTARIUS	SAGITTARIUS		4132
+devil			MACH_DEVIL		DEVIL			4133
+ballantines		MACH_BALLANTINES	BALLANTINES		4134
+omap3_procerusvpu	MACH_OMAP3_PROCERUSVPU	OMAP3_PROCERUSVPU	4135
+my27			MACH_MY27		MY27			4136
+sun6i			MACH_SUN6I		SUN6I			4137
+sun5i			MACH_SUN5I		SUN5I			4138
+mx512_mx		MACH_MX512_MX		MX512_MX		4139
+kzm9g			MACH_KZM9G		KZM9G			4140
+vdstbn			MACH_VDSTBN		VDSTBN			4141
+cfa10036		MACH_CFA10036		CFA10036		4142
+cfa10049		MACH_CFA10049		CFA10049		4143
+pcm051			MACH_PCM051		PCM051			4144
+vybrid_vf7xx		MACH_VYBRID_VF7XX	VYBRID_VF7XX		4145
+vybrid_vf6xx		MACH_VYBRID_VF6XX	VYBRID_VF6XX		4146
+vybrid_vf5xx		MACH_VYBRID_VF5XX	VYBRID_VF5XX		4147
+vybrid_vf4xx		MACH_VYBRID_VF4XX	VYBRID_VF4XX		4148
+aria_g25		MACH_ARIA_G25		ARIA_G25		4149
+bcm21553		MACH_BCM21553		BCM21553		4150
+smdk5410		MACH_SMDK5410		SMDK5410		4151
+lpc18xx			MACH_LPC18XX		LPC18XX			4152
+oratisparty		MACH_ORATISPARTY	ORATISPARTY		4153
+qseven			MACH_QSEVEN		QSEVEN			4154
+gmv_generic		MACH_GMV_GENERIC	GMV_GENERIC		4155
+th_link_eth		MACH_TH_LINK_ETH	TH_LINK_ETH		4156
+tn_muninn		MACH_TN_MUNINN		TN_MUNINN		4157
+rampage			MACH_RAMPAGE		RAMPAGE			4158
+visstrim_mv10		MACH_VISSTRIM_MV10	VISSTRIM_MV10		4159
+mx28_wilma		MACH_MX28_WILMA		MX28_WILMA		4164
+msm8625_ffa		MACH_MSM8625_FFA	MSM8625_FFA		4166
+vpu101			MACH_VPU101		VPU101			4167
+baileys			MACH_BAILEYS		BAILEYS			4169
+familybox		MACH_FAMILYBOX		FAMILYBOX		4170
+ensemble_mx35		MACH_ENSEMBLE_MX35	ENSEMBLE_MX35		4171
+sc_sps_1		MACH_SC_SPS_1		SC_SPS_1		4172
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 858748e..58696192 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -11,12 +11,15 @@
 #include <linux/types.h>
 #include <linux/cpu.h>
 #include <linux/cpu_pm.h>
+#include <linux/hardirq.h>
 #include <linux/kernel.h>
 #include <linux/notifier.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
 
 #include <asm/cp15.h>
 #include <asm/cputype.h>
@@ -238,11 +241,11 @@
 {
 	int i;
 
-	printk(KERN_ERR "VFP: Error: %s\n", reason);
-	printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
+	pr_err("VFP: Error: %s\n", reason);
+	pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
 		fmrx(FPEXC), fmrx(FPSCR), inst);
 	for (i = 0; i < 32; i += 2)
-		printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
+		pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
 		       i, vfp_get_float(i), i+1, vfp_get_float(i+1));
 }
 
@@ -430,7 +433,10 @@
 
 static void vfp_enable(void *unused)
 {
-	u32 access = get_copro_access();
+	u32 access;
+
+	BUG_ON(preemptible());
+	access = get_copro_access();
 
 	/*
 	 * Enable full access to VFP (cp10 and cp11)
@@ -446,7 +452,7 @@
 
 	/* if vfp is on, then save state for resumption */
 	if (fpexc & FPEXC_EN) {
-		printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
+		pr_debug("%s: saving vfp state\n", __func__);
 		vfp_save_state(&ti->vfpstate, fpexc);
 
 		/* disable, just in case */
@@ -529,6 +535,93 @@
 }
 
 /*
+ * Save the current VFP state into the provided structures and prepare
+ * for entry into a new function (signal handler).
+ */
+int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+				    struct user_vfp_exc __user *ufp_exc)
+{
+	struct thread_info *thread = current_thread_info();
+	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+	int err = 0;
+
+	/* Ensure that the saved hwstate is up-to-date. */
+	vfp_sync_hwstate(thread);
+
+	/*
+	 * Copy the floating point registers. There can be unused
+	 * registers see asm/hwcap.h for details.
+	 */
+	err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
+			      sizeof(hwstate->fpregs));
+	/*
+	 * Copy the status and control register.
+	 */
+	__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+	/*
+	 * Copy the exception registers.
+	 */
+	__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
+	__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+	__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+	if (err)
+		return -EFAULT;
+
+	/* Ensure that VFP is disabled. */
+	vfp_flush_hwstate(thread);
+
+	/*
+	 * As per the PCS, clear the length and stride bits for function
+	 * entry.
+	 */
+	hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
+	return 0;
+}
+
+/* Sanitise and restore the current VFP state from the provided structures. */
+int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
+			     struct user_vfp_exc __user *ufp_exc)
+{
+	struct thread_info *thread = current_thread_info();
+	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+	unsigned long fpexc;
+	int err = 0;
+
+	/* Disable VFP to avoid corrupting the new thread state. */
+	vfp_flush_hwstate(thread);
+
+	/*
+	 * Copy the floating point registers. There can be unused
+	 * registers see asm/hwcap.h for details.
+	 */
+	err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
+				sizeof(hwstate->fpregs));
+	/*
+	 * Copy the status and control register.
+	 */
+	__get_user_error(hwstate->fpscr, &ufp->fpscr, err);
+
+	/*
+	 * Sanitise and restore the exception registers.
+	 */
+	__get_user_error(fpexc, &ufp_exc->fpexc, err);
+
+	/* Ensure the VFP is enabled. */
+	fpexc |= FPEXC_EN;
+
+	/* Ensure FPINST2 is invalid and the exception flag is cleared. */
+	fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
+	hwstate->fpexc = fpexc;
+
+	__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+	__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+
+	return err ? -EFAULT : 0;
+}
+
+/*
  * VFP hardware can lose all context when a CPU goes offline.
  * As we will be running in SMP mode with CPU hotplug, we will save the
  * hardware state at every thread switch.  We clear our held state when
@@ -558,7 +651,7 @@
 	unsigned int cpu_arch = cpu_architecture();
 
 	if (cpu_arch >= CPU_ARCH_ARMv6)
-		vfp_enable(NULL);
+		on_each_cpu(vfp_enable, NULL, 1);
 
 	/*
 	 * First check that there is a VFP that we can use.
@@ -571,18 +664,16 @@
 	barrier();
 	vfp_vector = vfp_null_entry;
 
-	printk(KERN_INFO "VFP support v0.3: ");
+	pr_info("VFP support v0.3: ");
 	if (VFP_arch)
-		printk("not present\n");
+		pr_cont("not present\n");
 	else if (vfpsid & FPSID_NODOUBLE) {
-		printk("no double precision support\n");
+		pr_cont("no double precision support\n");
 	} else {
 		hotcpu_notifier(vfp_hotplug, 0);
 
-		smp_call_function(vfp_enable, NULL, 1);
-
 		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;  /* Extract the architecture version */
-		printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
+		pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
 			(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
 			(vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
 			(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
diff --git a/arch/blackfin/ADI_BSD.txt b/arch/blackfin/ADI_BSD.txt
deleted file mode 100644
index 501d0b6..0000000
--- a/arch/blackfin/ADI_BSD.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-This BSD-Style License applies to a few files in ./arch/blackfin directory,
-and is included here, so people understand which code they can use outside
-the Linux kernel, in non-GPL based projects.
-
-Using the files released under the "ADI BSD" license, must comply with
-these license terms.
-
---------------------------------------------------------------------------
-
-Copyright Analog Devices, Inc.
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-  - Redistributions of source code must retain the above copyright
-    notice, this list of conditions and the following disclaimer.
-  - Redistributions in binary form must reproduce the above copyright
-    notice, this list of conditions and the following disclaimer in
-    the documentation and/or other materials provided with the
-    distribution.
-  - Neither the name of Analog Devices, Inc. nor the names of its
-    contributors may be used to endorse or promote products derived
-    from this software without specific prior written permission.
-  - The use of this software may or may not infringe the patent rights
-    of one or more patent holders.  This license does not release you
-    from the requirement that you obtain separate licenses from these
-    patent holders to use this software.
-
-THIS SOFTWARE IS PROVIDED BY ANALOG DEVICES "AS IS" AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, NON-INFRINGEMENT,
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL ANALOG DEVICES BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, INTELLECTUAL PROPERTY RIGHTS, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
diff --git a/arch/blackfin/Clear_BSD.txt b/arch/blackfin/Clear_BSD.txt
new file mode 100644
index 0000000..bfa4b37
--- /dev/null
+++ b/arch/blackfin/Clear_BSD.txt
@@ -0,0 +1,33 @@
+The Clear BSD license:
+
+Copyright (c) 2012, Analog Devices, Inc.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted (subject to the limitations in the
+disclaimer below) provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the
+   distribution.
+
+* Neither the name of Analog Devices, Inc.  nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+GRANTED BY THIS LICENSE.  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
+HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 373a690..383e7ec 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -226,6 +226,12 @@
 	help
 	  BF561 Processor Support.
 
+config BF609
+	bool "BF609"
+	select CLKDEV_LOOKUP
+	help
+	  BF609 Processor Support.
+
 endchoice
 
 config SMP
@@ -251,27 +257,27 @@
 
 config BF_REV_MIN
 	int
-	default 0 if (BF51x || BF52x || (BF54x && !BF54xM))
+	default 0 if (BF51x || BF52x || (BF54x && !BF54xM)) || BF60x
 	default 2 if (BF537 || BF536 || BF534)
 	default 3 if (BF561 || BF533 || BF532 || BF531 || BF54xM)
 	default 4 if (BF538 || BF539)
 
 config BF_REV_MAX
 	int
-	default 2 if (BF51x || BF52x || (BF54x && !BF54xM))
+	default 2 if (BF51x || BF52x || (BF54x && !BF54xM)) || BF60x
 	default 3 if (BF537 || BF536 || BF534 || BF54xM)
 	default 5 if (BF561 || BF538 || BF539)
 	default 6 if (BF533 || BF532 || BF531)
 
 choice
 	prompt "Silicon Rev"
-	default BF_REV_0_0 if (BF51x || BF52x)
+	default BF_REV_0_0 if (BF51x || BF52x || BF60x)
 	default BF_REV_0_2 if (BF534 || BF536 || BF537 || (BF54x && !BF54xM))
 	default BF_REV_0_3 if (BF531 || BF532 || BF533 || BF54xM || BF561)
 
 config BF_REV_0_0
 	bool "0.0"
-	depends on (BF51x || BF52x || (BF54x && !BF54xM))
+	depends on (BF51x || BF52x || (BF54x && !BF54xM) || BF60x)
 
 config BF_REV_0_1
 	bool "0.1"
@@ -350,6 +356,7 @@
 source "arch/blackfin/mach-bf537/Kconfig"
 source "arch/blackfin/mach-bf538/Kconfig"
 source "arch/blackfin/mach-bf548/Kconfig"
+source "arch/blackfin/mach-bf609/Kconfig"
 
 menu "Board customizations"
 
@@ -379,6 +386,12 @@
 	  memory region is used to capture NULL pointer references as well
 	  as some core kernel functions.
 
+config PHY_RAM_BASE_ADDRESS
+	hex "Physical RAM Base"
+	default 0x0
+	help
+	  set BF609 FPGA physical SRAM base address
+
 config ROM_BASE
 	hex "Kernel ROM Base"
 	depends on ROMKERNEL
@@ -422,7 +435,7 @@
 
 config PLL_BYPASS
 	bool "Bypass PLL"
-	depends on BFIN_KERNEL_CLOCK
+	depends on BFIN_KERNEL_CLOCK && (!BF60x)
 	default n
 
 config CLKIN_HALF
@@ -441,7 +454,7 @@
 	default "20" if (BFIN537_STAMP || BFIN527_EZKIT || BFIN527_EZKIT_V2 || BFIN548_EZKIT || BFIN548_BLUETECHNIX_CM || BFIN538_EZKIT)
 	default "22" if BFIN533_BLUETECHNIX_CM
 	default "20" if (BFIN537_BLUETECHNIX_CM_E || BFIN537_BLUETECHNIX_CM_U || BFIN527_BLUETECHNIX_CM || BFIN561_BLUETECHNIX_CM)
-	default "20" if BFIN561_EZKIT
+	default "20" if (BFIN561_EZKIT || BF609)
 	default "16" if (H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN518F_EZBRD)
 	default "25" if BFIN527_AD7160EVAL
 	help
@@ -473,12 +486,45 @@
 	int "System Clock Divider"
 	depends on BFIN_KERNEL_CLOCK
 	range 1 15
-	default 5
+	default 4
 	help
-	  This sets the frequency of the system clock (including SDRAM or DDR).
+	  This sets the frequency of the system clock (including SDRAM or DDR) on
+	  !BF60x else it set the clock for system buses and provides the
+	  source from which SCLK0 and SCLK1 are derived.
 	  This can be between 1 and 15
 	  System Clock = (PLL frequency) / (this setting)
 
+config SCLK0_DIV
+	int "System Clock0 Divider"
+	depends on BFIN_KERNEL_CLOCK && BF60x
+	range 1 15
+	default 1
+	help
+	  This sets the frequency of the system clock0 for PVP and all other
+	  peripherals not clocked by SCLK1.
+	  This can be between 1 and 15
+	  System Clock0 = (System Clock) / (this setting)
+
+config SCLK1_DIV
+	int "System Clock1 Divider"
+	depends on BFIN_KERNEL_CLOCK && BF60x
+	range 1 15
+	default 1
+	help
+	  This sets the frequency of the system clock1 (including SPORT, SPI and ACM).
+	  This can be between 1 and 15
+	  System Clock1 = (System Clock) / (this setting)
+
+config DCLK_DIV
+	int "DDR Clock Divider"
+	depends on BFIN_KERNEL_CLOCK && BF60x
+	range 1 15
+	default 2
+	help
+	  This sets the frequency of the DDR memory.
+	  This can be between 1 and 15
+	  DDR Clock = (PLL frequency) / (this setting)
+
 choice
 	prompt "DDR SDRAM Chip Type"
 	depends on BFIN_KERNEL_CLOCK
@@ -494,7 +540,7 @@
 
 choice
 	prompt "DDR/SDRAM Timing"
-	depends on BFIN_KERNEL_CLOCK
+	depends on BFIN_KERNEL_CLOCK && !BF60x
 	default BFIN_KERNEL_CLOCK_MEMINIT_CALC
 	help
 	  This option allows you to specify Blackfin SDRAM/DDR Timing parameters
@@ -576,6 +622,7 @@
 	default 600000000 if BF548
 	default 533333333 if BF549
 	default 600000000 if BF561
+	default 800000000 if BF609
 
 config MIN_VCO_HZ
 	int
@@ -583,6 +630,7 @@
 
 config MAX_SCLK_HZ
 	int
+	default 200000000 if BF609
 	default 133333333
 
 config MIN_SCLK_HZ
@@ -1051,7 +1099,7 @@
 config BFIN_L2_DCACHEABLE
 	bool "Enable DCACHE for L2 SRAM"
 	depends on BFIN_DCACHE
-	depends on (BF54x || BF561) && !SMP
+	depends on (BF54x || BF561 || BF60x) && !SMP
 	default n
 choice
 	prompt "L2 SRAM DCACHE policy"
@@ -1077,6 +1125,7 @@
 comment "Asynchronous Memory Configuration"
 
 menu "EBIU_AMGCTL Global Control"
+	depends on !BF60x
 config C_AMCKEN
 	bool "Enable CLKOUT"
 	default y
@@ -1127,6 +1176,7 @@
 endmenu
 
 menu "EBIU_AMBCTL Control"
+	depends on !BF60x
 config BANK_0
 	hex "Bank 0 (AMBCTL0.L)"
 	default 0x7BB0
@@ -1206,7 +1256,7 @@
 
 choice
 	prompt "Standby Power Saving Mode"
-	depends on PM
+	depends on PM && !BF60x
 	default PM_BFIN_SLEEP_DEEPER
 config  PM_BFIN_SLEEP_DEEPER
 	bool "Sleep Deeper"
@@ -1261,6 +1311,118 @@
 	  On ADSP-BF549 this option enables the the same functionality on the
 	  /MRXON pin also PH7.
 
+config PM_BFIN_WAKE_PA15
+	bool "Allow Wake-Up from PA15"
+	depends on PM && BF60x
+	default n
+	help
+	  Enable PA15 Wake-Up
+
+config PM_BFIN_WAKE_PA15_POL
+	int "Wake-up priority"
+	depends on PM_BFIN_WAKE_PA15
+	default 0
+	help
+	  Wake-Up priority 0(low) 1(high)
+
+config PM_BFIN_WAKE_PB15
+	bool "Allow Wake-Up from PB15"
+	depends on PM && BF60x
+	default n
+	help
+	  Enable PB15 Wake-Up
+
+config PM_BFIN_WAKE_PB15_POL
+	int "Wake-up priority"
+	depends on PM_BFIN_WAKE_PB15
+	default 0
+	help
+	  Wake-Up priority 0(low) 1(high)
+
+config PM_BFIN_WAKE_PC15
+	bool "Allow Wake-Up from PC15"
+	depends on PM && BF60x
+	default n
+	help
+	  Enable PC15 Wake-Up
+
+config PM_BFIN_WAKE_PC15_POL
+	int "Wake-up priority"
+	depends on PM_BFIN_WAKE_PC15
+	default 0
+	help
+	  Wake-Up priority 0(low) 1(high)
+
+config PM_BFIN_WAKE_PD06
+	bool "Allow Wake-Up from PD06(ETH0_PHYINT)"
+	depends on PM && BF60x
+	default n
+	help
+	  Enable PD06(ETH0_PHYINT) Wake-up
+
+config PM_BFIN_WAKE_PD06_POL
+	int "Wake-up priority"
+	depends on PM_BFIN_WAKE_PD06
+	default 0
+	help
+	  Wake-Up priority 0(low) 1(high)
+
+config PM_BFIN_WAKE_PE12
+	bool "Allow Wake-Up from PE12(ETH1_PHYINT, PUSH BUTTON)"
+	depends on PM && BF60x
+	default n
+	help
+	  Enable PE12(ETH1_PHYINT, PUSH BUTTON) Wake-up
+
+config PM_BFIN_WAKE_PE12_POL
+	int "Wake-up priority"
+	depends on PM_BFIN_WAKE_PE12
+	default 0
+	help
+	  Wake-Up priority 0(low) 1(high)
+
+config PM_BFIN_WAKE_PG04
+	bool "Allow Wake-Up from PG04(CAN0_RX)"
+	depends on PM && BF60x
+	default n
+	help
+	  Enable PG04(CAN0_RX) Wake-up
+
+config PM_BFIN_WAKE_PG04_POL
+	int "Wake-up priority"
+	depends on PM_BFIN_WAKE_PG04
+	default 0
+	help
+	  Wake-Up priority 0(low) 1(high)
+
+config PM_BFIN_WAKE_PG13
+	bool "Allow Wake-Up from PG13"
+	depends on PM && BF60x
+	default n
+	help
+	  Enable PG13 Wake-Up
+
+config PM_BFIN_WAKE_PG13_POL
+	int "Wake-up priority"
+	depends on PM_BFIN_WAKE_PG13
+	default 0
+	help
+	  Wake-Up priority 0(low) 1(high)
+
+config PM_BFIN_WAKE_USB
+	bool "Allow Wake-Up from (USB)"
+	depends on PM && BF60x
+	default n
+	help
+	  Enable (USB) Wake-up
+
+config PM_BFIN_WAKE_USB_POL
+	int "Wake-up priority"
+	depends on PM_BFIN_WAKE_USB
+	default 0
+	help
+	  Wake-Up priority 0(low) 1(high)
+
 endmenu
 
 menu "CPU Frequency scaling"
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index e2a3d4c..79594694 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -253,4 +253,11 @@
 
 	  Most people should say N here.
 
+config BFIN_PM_WAKEUP_TIME_BENCH
+	bool "Display the total time for kernel to resume from power saving mode"
+	default n
+	help
+	  Display the total time when kernel resumes normal from standby or
+	  suspend to mem mode.
+
 endmenu
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 46f42b2..74fdf67 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -54,6 +54,7 @@
 machine-$(CONFIG_BF549)  := bf548
 machine-$(CONFIG_BF549M) := bf548
 machine-$(CONFIG_BF561)  := bf561
+machine-$(CONFIG_BF609)  := bf609
 MACHINE := $(machine-y)
 export MACHINE
 
@@ -86,6 +87,7 @@
 cpu-$(CONFIG_BF549)  := bf549
 cpu-$(CONFIG_BF549M) := bf549m
 cpu-$(CONFIG_BF561)  := bf561
+cpu-$(CONFIG_BF609)  := bf609
 
 rev-$(CONFIG_BF_REV_0_0)  := 0.0
 rev-$(CONFIG_BF_REV_0_1)  := 0.1
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
index 680730e..e2a2fa5 100644
--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -21,14 +21,12 @@
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_BF561=y
+CONFIG_SMP=y
 CONFIG_IRQ_TIMER0=10
 CONFIG_CLKIN_HZ=30000000
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
 CONFIG_BFIN_GPTIMERS=m
-CONFIG_BFIN_EXTMEM_WRITETHROUGH=y
-CONFIG_BFIN_L2_DCACHEABLE=y
-CONFIG_BFIN_L2_WRITETHROUGH=y
 CONFIG_C_CDPRIO=y
 CONFIG_BANK_3=0xAAC2
 CONFIG_BINFMT_FLAT=y
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
new file mode 100644
index 0000000..be9526b
--- /dev/null
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -0,0 +1,155 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_FUTEX is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+CONFIG_SLAB=y
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BF609=y
+CONFIG_PINT1_ASSIGN=0x01010000
+CONFIG_PINT2_ASSIGN=0x07000101
+CONFIG_PINT3_ASSIGN=0x02020303
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IP_CHECKSUM_L1=y
+CONFIG_SYSCALL_TAB_L1=y
+CONFIG_CPLB_SWITCH_TAB_L1=y
+# CONFIG_APP_STACK_L1 is not set
+# CONFIG_BFIN_INS_LOWOVERHEAD is not set
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM_BFIN_WAKE_PE12=y
+CONFIG_PM_BFIN_WAKE_PE12_POL=1
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_CAN=y
+CONFIG_CAN_BFIN=y
+CONFIG_IRDA=y
+CONFIG_IRTTY_SIR=y
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_FW_LOADER=m
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_UBI=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+CONFIG_STMMAC_IEEE1588=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_BFIN_ROTARY=y
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_BFIN_SIMPLE_TIMER=m
+CONFIG_BFIN_LINKPORT=y
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+CONFIG_SERIAL_BFIN_UART0=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_BLACKFIN_TWI=y
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
+CONFIG_SPI=y
+CONFIG_SPI_BFIN6XX=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_BFIN_WDT=y
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=m
+CONFIG_SND_BF6XX_I2S=m
+CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61=m
+CONFIG_SND_SOC_ALL_CODECS=m
+CONFIG_USB=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_BLACKFIN=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_ZERO=y
+CONFIG_MMC=y
+CONFIG_SDH_BFIN=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_JFFS2_FS=m
+CONFIG_UBIFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index 17bcbf6..608be5e 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -35,6 +35,11 @@
 
 extern unsigned long get_cclk(void);
 extern unsigned long get_sclk(void);
+#ifdef CONFIG_BF60x
+extern unsigned long get_sclk0(void);
+extern unsigned long get_sclk1(void);
+extern unsigned long get_dclk(void);
+#endif
 extern unsigned long sclk_to_usecs(unsigned long sclk);
 extern unsigned long usecs_to_sclk(unsigned long usecs);
 
diff --git a/arch/blackfin/include/asm/bfin6xx_spi.h b/arch/blackfin/include/asm/bfin6xx_spi.h
new file mode 100644
index 0000000..89370b6
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin6xx_spi.h
@@ -0,0 +1,258 @@
+/*
+ * Analog Devices SPI3 controller driver
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPI_CHANNEL_H_
+#define _SPI_CHANNEL_H_
+
+#include <linux/types.h>
+
+/* SPI_CONTROL */
+#define SPI_CTL_EN                  0x00000001    /* Enable */
+#define SPI_CTL_MSTR                0x00000002    /* Master/Slave */
+#define SPI_CTL_PSSE                0x00000004    /* controls modf error in master mode */
+#define SPI_CTL_ODM                 0x00000008    /* Open Drain Mode */
+#define SPI_CTL_CPHA                0x00000010    /* Clock Phase */
+#define SPI_CTL_CPOL                0x00000020    /* Clock Polarity */
+#define SPI_CTL_ASSEL               0x00000040    /* Slave Select Pin Control */
+#define SPI_CTL_SELST               0x00000080    /* Slave Select Polarity in-between transfers */
+#define SPI_CTL_EMISO               0x00000100    /* Enable MISO */
+#define SPI_CTL_SIZE                0x00000600    /* Word Transfer Size */
+#define SPI_CTL_SIZE08              0x00000000    /* SIZE: 8 bits */
+#define SPI_CTL_SIZE16              0x00000200    /* SIZE: 16 bits */
+#define SPI_CTL_SIZE32              0x00000400    /* SIZE: 32 bits */
+#define SPI_CTL_LSBF                0x00001000    /* LSB First */
+#define SPI_CTL_FCEN                0x00002000    /* Flow-Control Enable */
+#define SPI_CTL_FCCH                0x00004000    /* Flow-Control Channel Selection */
+#define SPI_CTL_FCPL                0x00008000    /* Flow-Control Polarity */
+#define SPI_CTL_FCWM                0x00030000    /* Flow-Control Water-Mark */
+#define SPI_CTL_FIFO0               0x00000000    /* FCWM: TFIFO empty or RFIFO Full */
+#define SPI_CTL_FIFO1               0x00010000    /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */
+#define SPI_CTL_FIFO2               0x00020000    /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */
+#define SPI_CTL_FMODE               0x00040000    /* Fast-mode Enable */
+#define SPI_CTL_MIOM                0x00300000    /* Multiple I/O Mode */
+#define SPI_CTL_MIO_DIS             0x00000000    /* MIOM: Disable */
+#define SPI_CTL_MIO_DUAL            0x00100000    /* MIOM: Enable DIOM (Dual I/O Mode) */
+#define SPI_CTL_MIO_QUAD            0x00200000    /* MIOM: Enable QUAD (Quad SPI Mode) */
+#define SPI_CTL_SOSI                0x00400000    /* Start on MOSI */
+/* SPI_RX_CONTROL */
+#define SPI_RXCTL_REN               0x00000001    /* Receive Channel Enable */
+#define SPI_RXCTL_RTI               0x00000004    /* Receive Transfer Initiate */
+#define SPI_RXCTL_RWCEN             0x00000008    /* Receive Word Counter Enable */
+#define SPI_RXCTL_RDR               0x00000070    /* Receive Data Request */
+#define SPI_RXCTL_RDR_DIS           0x00000000    /* RDR: Disabled */
+#define SPI_RXCTL_RDR_NE            0x00000010    /* RDR: RFIFO not empty */
+#define SPI_RXCTL_RDR_25            0x00000020    /* RDR: RFIFO 25% full */
+#define SPI_RXCTL_RDR_50            0x00000030    /* RDR: RFIFO 50% full */
+#define SPI_RXCTL_RDR_75            0x00000040    /* RDR: RFIFO 75% full */
+#define SPI_RXCTL_RDR_FULL          0x00000050    /* RDR: RFIFO full */
+#define SPI_RXCTL_RDO               0x00000100    /* Receive Data Over-Run */
+#define SPI_RXCTL_RRWM              0x00003000    /* FIFO Regular Water-Mark */
+#define SPI_RXCTL_RWM_0             0x00000000    /* RRWM: RFIFO Empty */
+#define SPI_RXCTL_RWM_25            0x00001000    /* RRWM: RFIFO 25% full */
+#define SPI_RXCTL_RWM_50            0x00002000    /* RRWM: RFIFO 50% full */
+#define SPI_RXCTL_RWM_75            0x00003000    /* RRWM: RFIFO 75% full */
+#define SPI_RXCTL_RUWM              0x00070000    /* FIFO Urgent Water-Mark */
+#define SPI_RXCTL_UWM_DIS           0x00000000    /* RUWM: Disabled */
+#define SPI_RXCTL_UWM_25            0x00010000    /* RUWM: RFIFO 25% full */
+#define SPI_RXCTL_UWM_50            0x00020000    /* RUWM: RFIFO 50% full */
+#define SPI_RXCTL_UWM_75            0x00030000    /* RUWM: RFIFO 75% full */
+#define SPI_RXCTL_UWM_FULL          0x00040000    /* RUWM: RFIFO full */
+/* SPI_TX_CONTROL */
+#define SPI_TXCTL_TEN               0x00000001    /* Transmit Channel Enable */
+#define SPI_TXCTL_TTI               0x00000004    /* Transmit Transfer Initiate */
+#define SPI_TXCTL_TWCEN             0x00000008    /* Transmit Word Counter Enable */
+#define SPI_TXCTL_TDR               0x00000070    /* Transmit Data Request */
+#define SPI_TXCTL_TDR_DIS           0x00000000    /* TDR: Disabled */
+#define SPI_TXCTL_TDR_NF            0x00000010    /* TDR: TFIFO not full */
+#define SPI_TXCTL_TDR_25            0x00000020    /* TDR: TFIFO 25% empty */
+#define SPI_TXCTL_TDR_50            0x00000030    /* TDR: TFIFO 50% empty */
+#define SPI_TXCTL_TDR_75            0x00000040    /* TDR: TFIFO 75% empty */
+#define SPI_TXCTL_TDR_EMPTY         0x00000050    /* TDR: TFIFO empty */
+#define SPI_TXCTL_TDU               0x00000100    /* Transmit Data Under-Run */
+#define SPI_TXCTL_TRWM              0x00003000    /* FIFO Regular Water-Mark */
+#define SPI_TXCTL_RWM_FULL          0x00000000    /* TRWM: TFIFO full */
+#define SPI_TXCTL_RWM_25            0x00001000    /* TRWM: TFIFO 25% empty */
+#define SPI_TXCTL_RWM_50            0x00002000    /* TRWM: TFIFO 50% empty */
+#define SPI_TXCTL_RWM_75            0x00003000    /* TRWM: TFIFO 75% empty */
+#define SPI_TXCTL_TUWM              0x00070000    /* FIFO Urgent Water-Mark */
+#define SPI_TXCTL_UWM_DIS           0x00000000    /* TUWM: Disabled */
+#define SPI_TXCTL_UWM_25            0x00010000    /* TUWM: TFIFO 25% empty */
+#define SPI_TXCTL_UWM_50            0x00020000    /* TUWM: TFIFO 50% empty */
+#define SPI_TXCTL_UWM_75            0x00030000    /* TUWM: TFIFO 75% empty */
+#define SPI_TXCTL_UWM_EMPTY         0x00040000    /* TUWM: TFIFO empty */
+/* SPI_CLOCK */
+#define SPI_CLK_BAUD                0x0000FFFF    /* Baud Rate */
+/* SPI_DELAY */
+#define SPI_DLY_STOP                0x000000FF    /* Transfer delay time in multiples of SCK period */
+#define SPI_DLY_LEADX               0x00000100    /* Extended (1 SCK) LEAD Control */
+#define SPI_DLY_LAGX                0x00000200    /* Extended (1 SCK) LAG control */
+/* SPI_SSEL */
+#define SPI_SLVSEL_SSE1             0x00000002    /* SPISSEL1 Enable */
+#define SPI_SLVSEL_SSE2             0x00000004    /* SPISSEL2 Enable */
+#define SPI_SLVSEL_SSE3             0x00000008    /* SPISSEL3 Enable */
+#define SPI_SLVSEL_SSE4             0x00000010    /* SPISSEL4 Enable */
+#define SPI_SLVSEL_SSE5             0x00000020    /* SPISSEL5 Enable */
+#define SPI_SLVSEL_SSE6             0x00000040    /* SPISSEL6 Enable */
+#define SPI_SLVSEL_SSE7             0x00000080    /* SPISSEL7 Enable */
+#define SPI_SLVSEL_SSEL1            0x00000200    /* SPISSEL1 Value */
+#define SPI_SLVSEL_SSEL2            0x00000400    /* SPISSEL2 Value */
+#define SPI_SLVSEL_SSEL3            0x00000800    /* SPISSEL3 Value */
+#define SPI_SLVSEL_SSEL4            0x00001000    /* SPISSEL4 Value */
+#define SPI_SLVSEL_SSEL5            0x00002000    /* SPISSEL5 Value */
+#define SPI_SLVSEL_SSEL6            0x00004000    /* SPISSEL6 Value */
+#define SPI_SLVSEL_SSEL7            0x00008000    /* SPISSEL7 Value */
+/* SPI_RWC */
+#define SPI_RWC_VALUE               0x0000FFFF    /* Received Word-Count */
+/* SPI_RWCR */
+#define SPI_RWCR_VALUE              0x0000FFFF    /* Received Word-Count Reload */
+/* SPI_TWC */
+#define SPI_TWC_VALUE               0x0000FFFF    /* Transmitted Word-Count */
+/* SPI_TWCR */
+#define SPI_TWCR_VALUE              0x0000FFFF    /* Transmitted Word-Count Reload */
+/* SPI_IMASK */
+#define SPI_IMSK_RUWM               0x00000002    /* Receive Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_TUWM               0x00000004    /* Transmit Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_ROM                0x00000010    /* Receive Over-Run Error Interrupt Mask */
+#define SPI_IMSK_TUM                0x00000020    /* Transmit Under-Run Error Interrupt Mask */
+#define SPI_IMSK_TCM                0x00000040    /* Transmit Collision Error Interrupt Mask */
+#define SPI_IMSK_MFM                0x00000080    /* Mode Fault Error Interrupt Mask */
+#define SPI_IMSK_RSM                0x00000100    /* Receive Start Interrupt Mask */
+#define SPI_IMSK_TSM                0x00000200    /* Transmit Start Interrupt Mask */
+#define SPI_IMSK_RFM                0x00000400    /* Receive Finish Interrupt Mask */
+#define SPI_IMSK_TFM                0x00000800    /* Transmit Finish Interrupt Mask */
+/* SPI_IMASKCL */
+#define SPI_IMSK_CLR_RUW            0x00000002    /* Receive Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_CLR_TUWM           0x00000004    /* Transmit Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_CLR_ROM            0x00000010    /* Receive Over-Run Error Interrupt Mask */
+#define SPI_IMSK_CLR_TUM            0x00000020    /* Transmit Under-Run Error Interrupt Mask */
+#define SPI_IMSK_CLR_TCM            0x00000040    /* Transmit Collision Error Interrupt Mask */
+#define SPI_IMSK_CLR_MFM            0x00000080    /* Mode Fault Error Interrupt Mask */
+#define SPI_IMSK_CLR_RSM            0x00000100    /* Receive Start Interrupt Mask */
+#define SPI_IMSK_CLR_TSM            0x00000200    /* Transmit Start Interrupt Mask */
+#define SPI_IMSK_CLR_RFM            0x00000400    /* Receive Finish Interrupt Mask */
+#define SPI_IMSK_CLR_TFM            0x00000800    /* Transmit Finish Interrupt Mask */
+/* SPI_IMASKST */
+#define SPI_IMSK_SET_RUWM           0x00000002    /* Receive Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_SET_TUWM           0x00000004    /* Transmit Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_SET_ROM            0x00000010    /* Receive Over-Run Error Interrupt Mask */
+#define SPI_IMSK_SET_TUM            0x00000020    /* Transmit Under-Run Error Interrupt Mask */
+#define SPI_IMSK_SET_TCM            0x00000040    /* Transmit Collision Error Interrupt Mask */
+#define SPI_IMSK_SET_MFM            0x00000080    /* Mode Fault Error Interrupt Mask */
+#define SPI_IMSK_SET_RSM            0x00000100    /* Receive Start Interrupt Mask */
+#define SPI_IMSK_SET_TSM            0x00000200    /* Transmit Start Interrupt Mask */
+#define SPI_IMSK_SET_RFM            0x00000400    /* Receive Finish Interrupt Mask */
+#define SPI_IMSK_SET_TFM            0x00000800    /* Transmit Finish Interrupt Mask */
+/* SPI_STATUS */
+#define SPI_STAT_SPIF               0x00000001    /* SPI Finished */
+#define SPI_STAT_RUWM               0x00000002    /* Receive Urgent Water-Mark Breached */
+#define SPI_STAT_TUWM               0x00000004    /* Transmit Urgent Water-Mark Breached */
+#define SPI_STAT_ROE                0x00000010    /* Receive Over-Run Error Indication */
+#define SPI_STAT_TUE                0x00000020    /* Transmit Under-Run Error Indication */
+#define SPI_STAT_TCE                0x00000040    /* Transmit Collision Error Indication */
+#define SPI_STAT_MODF               0x00000080    /* Mode Fault Error Indication */
+#define SPI_STAT_RS                 0x00000100    /* Receive Start Indication */
+#define SPI_STAT_TS                 0x00000200    /* Transmit Start Indication */
+#define SPI_STAT_RF                 0x00000400    /* Receive Finish Indication */
+#define SPI_STAT_TF                 0x00000800    /* Transmit Finish Indication */
+#define SPI_STAT_RFS                0x00007000    /* SPI_RFIFO status */
+#define SPI_STAT_RFIFO_EMPTY        0x00000000    /* RFS: RFIFO Empty */
+#define SPI_STAT_RFIFO_25           0x00001000    /* RFS: RFIFO 25% Full */
+#define SPI_STAT_RFIFO_50           0x00002000    /* RFS: RFIFO 50% Full */
+#define SPI_STAT_RFIFO_75           0x00003000    /* RFS: RFIFO 75% Full */
+#define SPI_STAT_RFIFO_FULL         0x00004000    /* RFS: RFIFO Full */
+#define SPI_STAT_TFS                0x00070000    /* SPI_TFIFO status */
+#define SPI_STAT_TFIFO_FULL         0x00000000    /* TFS: TFIFO full */
+#define SPI_STAT_TFIFO_25           0x00010000    /* TFS: TFIFO 25% empty */
+#define SPI_STAT_TFIFO_50           0x00020000    /* TFS: TFIFO 50% empty */
+#define SPI_STAT_TFIFO_75           0x00030000    /* TFS: TFIFO 75% empty */
+#define SPI_STAT_TFIFO_EMPTY        0x00040000    /* TFS: TFIFO empty */
+#define SPI_STAT_FCS                0x00100000    /* Flow-Control Stall Indication */
+#define SPI_STAT_RFE                0x00400000    /* SPI_RFIFO Empty */
+#define SPI_STAT_TFF                0x00800000    /* SPI_TFIFO Full */
+/* SPI_ILAT */
+#define SPI_ILAT_RUWMI              0x00000002    /* Receive Urgent Water Mark Interrupt */
+#define SPI_ILAT_TUWMI              0x00000004    /* Transmit Urgent Water Mark Interrupt */
+#define SPI_ILAT_ROI                0x00000010    /* Receive Over-Run Error Indication */
+#define SPI_ILAT_TUI                0x00000020    /* Transmit Under-Run Error Indication */
+#define SPI_ILAT_TCI                0x00000040    /* Transmit Collision Error Indication */
+#define SPI_ILAT_MFI                0x00000080    /* Mode Fault Error Indication */
+#define SPI_ILAT_RSI                0x00000100    /* Receive Start Indication */
+#define SPI_ILAT_TSI                0x00000200    /* Transmit Start Indication */
+#define SPI_ILAT_RFI                0x00000400    /* Receive Finish Indication */
+#define SPI_ILAT_TFI                0x00000800    /* Transmit Finish Indication */
+/* SPI_ILATCL */
+#define SPI_ILAT_CLR_RUWMI          0x00000002    /* Receive Urgent Water Mark Interrupt */
+#define SPI_ILAT_CLR_TUWMI          0x00000004    /* Transmit Urgent Water Mark Interrupt */
+#define SPI_ILAT_CLR_ROI            0x00000010    /* Receive Over-Run Error Indication */
+#define SPI_ILAT_CLR_TUI            0x00000020    /* Transmit Under-Run Error Indication */
+#define SPI_ILAT_CLR_TCI            0x00000040    /* Transmit Collision Error Indication */
+#define SPI_ILAT_CLR_MFI            0x00000080    /* Mode Fault Error Indication */
+#define SPI_ILAT_CLR_RSI            0x00000100    /* Receive Start Indication */
+#define SPI_ILAT_CLR_TSI            0x00000200    /* Transmit Start Indication */
+#define SPI_ILAT_CLR_RFI            0x00000400    /* Receive Finish Indication */
+#define SPI_ILAT_CLR_TFI            0x00000800    /* Transmit Finish Indication */
+
+/*
+ * bfin spi3 registers layout
+ */
+struct bfin_spi_regs {
+	u32 revid;
+	u32 control;
+	u32 rx_control;
+	u32 tx_control;
+	u32 clock;
+	u32 delay;
+	u32 ssel;
+	u32 rwc;
+	u32 rwcr;
+	u32 twc;
+	u32 twcr;
+	u32 reserved0;
+	u32 emask;
+	u32 emaskcl;
+	u32 emaskst;
+	u32 reserved1;
+	u32 status;
+	u32 elat;
+	u32 elatcl;
+	u32 reserved2;
+	u32 rfifo;
+	u32 reserved3;
+	u32 tfifo;
+};
+
+#define MAX_CTRL_CS          8  /* cs in spi controller */
+
+/* device.platform_data for SSP controller devices */
+struct bfin6xx_spi_master {
+	u16 num_chipselect;
+	u16 pin_req[7];
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct bfin6xx_spi_chip {
+	u32 control;
+	u16 cs_chg_udelay; /* Some devices require 16-bit delays */
+	u32 tx_dummy_val; /* tx value for rx only transfer */
+	bool enable_dma;
+};
+
+#endif /* _SPI_CHANNEL_H_ */
diff --git a/arch/blackfin/include/asm/bfin_crc.h b/arch/blackfin/include/asm/bfin_crc.h
new file mode 100644
index 0000000..3deb445
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_crc.h
@@ -0,0 +1,139 @@
+/*
+ * bfin_crc.h - interface to Blackfin CRC controllers
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_CRC_H__
+#define __BFIN_CRC_H__
+
+/* Function driver which use hardware crc must initialize the structure */
+struct crc_info {
+	/* Input data address */
+	unsigned char *in_addr;
+	/* Output data address */
+	unsigned char *out_addr;
+	/* Input or output bytes */
+	unsigned long datasize;
+	union {
+	/* CRC to compare with that of input buffer */
+	unsigned long crc_compare;
+	/* Value to compare with input data */
+	unsigned long val_verify;
+	/* Value to fill */
+	unsigned long val_fill;
+	};
+	/* Value to program the 32b CRC Polynomial */
+	unsigned long crc_poly;
+	union {
+	/* CRC calculated from the input data */
+	unsigned long crc_result;
+	/* First failed position to verify input data */
+	unsigned long pos_verify;
+	};
+	/* CRC mirror flags */
+	unsigned int bitmirr:1;
+	unsigned int bytmirr:1;
+	unsigned int w16swp:1;
+	unsigned int fdsel:1;
+	unsigned int rsltmirr:1;
+	unsigned int polymirr:1;
+	unsigned int cmpmirr:1;
+};
+
+/* Userspace interface */
+#define CRC_IOC_MAGIC		'C'
+#define CRC_IOC_CALC_CRC	_IOWR('C', 0x01, unsigned int)
+#define CRC_IOC_MEMCPY_CRC	_IOWR('C', 0x02, unsigned int)
+#define CRC_IOC_VERIFY_VAL	_IOWR('C', 0x03, unsigned int)
+#define CRC_IOC_FILL_VAL	_IOWR('C', 0x04, unsigned int)
+
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/miscdevice.h>
+
+struct crc_register {
+	u32 control;
+	u32 datacnt;
+	u32 datacntrld;
+	u32 __pad_1[2];
+	u32 compare;
+	u32 fillval;
+	u32 datafifo;
+	u32 intren;
+	u32 intrenset;
+	u32 intrenclr;
+	u32 poly;
+	u32 __pad_2[4];
+	u32 status;
+	u32 datacntcap;
+	u32 __pad_3;
+	u32 result;
+	u32 curresult;
+	u32 __pad_4[3];
+	u32 revid;
+};
+
+struct bfin_crc {
+	struct miscdevice mdev;
+	struct list_head list;
+	int irq;
+	int dma_ch_src;
+	int dma_ch_dest;
+	volatile struct crc_register *regs;
+	struct crc_info *info;
+	struct mutex mutex;
+	struct completion c;
+	unsigned short opmode;
+	char name[20];
+};
+
+/* CRC_STATUS Masks */
+#define CMPERR			0x00000002	/* Compare error */
+#define DCNTEXP			0x00000010	/* datacnt register expired */
+#define IBR			0x00010000	/* Input buffer ready */
+#define OBR			0x00020000	/* Output buffer ready */
+#define IRR			0x00040000	/* Immediate result readt */
+#define LUTDONE			0x00080000	/* Look-up table generation done */
+#define FSTAT			0x00700000	/* FIFO status */
+#define MAX_FIFO		4		/* Max fifo size */
+
+/* CRC_CONTROL Masks */
+#define BLKEN			0x00000001	/* Block enable */
+#define OPMODE			0x000000F0	/* Operation mode */
+#define OPMODE_OFFSET		4		/* Operation mode mask offset*/
+#define MODE_DMACPY_CRC		1		/* MTM CRC compute and compare */
+#define MODE_DATA_FILL		2		/* MTM data fill */
+#define MODE_CALC_CRC		3		/* MSM CRC compute and compare */
+#define MODE_DATA_VERIFY	4		/* MSM data verify */
+#define AUTOCLRZ		0x00000100	/* Auto clear to zero */
+#define AUTOCLRF		0x00000200	/* Auto clear to one */
+#define OBRSTALL		0x00001000	/* Stall on output buffer ready */
+#define IRRSTALL		0x00002000	/* Stall on immediate result ready */
+#define BITMIRR			0x00010000	/* Mirror bits within each byte of 32-bit input data */
+#define BITMIRR_OFFSET		16		/* Mirror bits offset */
+#define BYTMIRR			0x00020000	/* Mirror bytes of 32-bit input data */
+#define BYTMIRR_OFFSET		17		/* Mirror bytes offset */
+#define W16SWP			0x00040000	/* Mirror uppper and lower 16-bit word of 32-bit input data */
+#define W16SWP_OFFSET		18		/* Mirror 16-bit word offset */
+#define FDSEL			0x00080000	/* FIFO is written after input data is mirrored */
+#define FDSEL_OFFSET		19		/* Mirror FIFO offset */
+#define RSLTMIRR		0x00100000	/* CRC result registers are mirrored. */
+#define RSLTMIRR_OFFSET		20		/* Mirror CRC result offset. */
+#define POLYMIRR		0x00200000	/* CRC poly register is mirrored. */
+#define POLYMIRR_OFFSET		21		/* Mirror CRC poly offset. */
+#define CMPMIRR			0x00400000	/* CRC compare register is mirrored. */
+#define CMPMIRR_OFFSET		22		/* Mirror CRC compare offset. */
+
+/* CRC_INTREN Masks */
+#define CMPERRI 		0x02		/* CRC_ERROR_INTR */
+#define DCNTEXPI 		0x10		/* CRC_STATUS_INTR */
+
+#endif
+
+#endif
diff --git a/arch/blackfin/include/asm/bfin_dma.h b/arch/blackfin/include/asm/bfin_dma.h
index d511207..6319f4e 100644
--- a/arch/blackfin/include/asm/bfin_dma.h
+++ b/arch/blackfin/include/asm/bfin_dma.h
@@ -15,12 +15,55 @@
 #define DMAEN			0x0001	/* DMA Channel Enable */
 #define WNR				0x0002	/* Channel Direction (W/R*) */
 #define WDSIZE_8		0x0000	/* Transfer Word Size = 8 */
+#define PSIZE_8			0x00000000	/* Transfer Word Size = 16 */
+
+#ifdef CONFIG_BF60x
+
+#define PSIZE_16		0x00000010	/* Transfer Word Size = 16 */
+#define PSIZE_32		0x00000020	/* Transfer Word Size = 32 */
+#define PSIZE_64		0x00000030	/* Transfer Word Size = 32 */
+#define WDSIZE_16		0x00000100	/* Transfer Word Size = 16 */
+#define WDSIZE_32		0x00000200	/* Transfer Word Size = 32 */
+#define WDSIZE_64		0x00000300	/* Transfer Word Size = 32 */
+#define WDSIZE_128		0x00000400	/* Transfer Word Size = 32 */
+#define WDSIZE_256		0x00000500	/* Transfer Word Size = 32 */
+#define DMA2D			0x04000000	/* DMA Mode (2D/1D*) */
+#define RESTART			0x00000004	/* DMA Buffer Clear SYNC */
+#define DI_EN_X			0x00100000	/* Data Interrupt Enable in X count */
+#define DI_EN_Y			0x00200000	/* Data Interrupt Enable in Y count */
+#define DI_EN_P			0x00300000	/* Data Interrupt Enable in Peripheral */
+#define DI_EN			DI_EN_X		/* Data Interrupt Enable */
+#define NDSIZE_0		0x00000000	/* Next Descriptor Size = 1 */
+#define NDSIZE_1		0x00010000	/* Next Descriptor Size = 2 */
+#define NDSIZE_2		0x00020000	/* Next Descriptor Size = 3 */
+#define NDSIZE_3		0x00030000	/* Next Descriptor Size = 4 */
+#define NDSIZE_4		0x00040000	/* Next Descriptor Size = 5 */
+#define NDSIZE_5		0x00050000	/* Next Descriptor Size = 6 */
+#define NDSIZE_6		0x00060000	/* Next Descriptor Size = 7 */
+#define NDSIZE			0x00070000	/* Next Descriptor Size */
+#define NDSIZE_OFFSET		16		/* Next Descriptor Size Offset */
+#define DMAFLOW_LIST		0x00004000	/* Descriptor List Mode */
+#define DMAFLOW_LARGE		DMAFLOW_LIST
+#define DMAFLOW_ARRAY		0x00005000	/* Descriptor Array Mode */
+#define DMAFLOW_LIST_DEMAND	0x00006000	/* Descriptor Demand List Mode */
+#define DMAFLOW_ARRAY_DEMAND	0x00007000	/* Descriptor Demand Array Mode */
+#define DMA_RUN_DFETCH		0x00000100	/* DMA Channel Running Indicator (DFETCH) */
+#define DMA_RUN			0x00000200	/* DMA Channel Running Indicator */
+#define DMA_RUN_WAIT_TRIG	0x00000300	/* DMA Channel Running Indicator (WAIT TRIG) */
+#define DMA_RUN_WAIT_ACK	0x00000400	/* DMA Channel Running Indicator (WAIT ACK) */
+
+#else
+
+#define PSIZE_16		0x0000	/* Transfer Word Size = 16 */
+#define PSIZE_32		0x0000	/* Transfer Word Size = 32 */
 #define WDSIZE_16		0x0004	/* Transfer Word Size = 16 */
 #define WDSIZE_32		0x0008	/* Transfer Word Size = 32 */
 #define DMA2D			0x0010	/* DMA Mode (2D/1D*) */
 #define RESTART			0x0020	/* DMA Buffer Clear */
 #define DI_SEL			0x0040	/* Data Interrupt Timing Select */
 #define DI_EN			0x0080	/* Data Interrupt Enable */
+#define DI_EN_X			0x00C0	/* Data Interrupt Enable in X count*/
+#define DI_EN_Y			0x0080	/* Data Interrupt Enable in Y count*/
 #define NDSIZE_0		0x0000	/* Next Descriptor Size = 0 (Stop/Autobuffer) */
 #define NDSIZE_1		0x0100	/* Next Descriptor Size = 1 */
 #define NDSIZE_2		0x0200	/* Next Descriptor Size = 2 */
@@ -32,18 +75,26 @@
 #define NDSIZE_8		0x0800	/* Next Descriptor Size = 8 */
 #define NDSIZE_9		0x0900	/* Next Descriptor Size = 9 */
 #define NDSIZE			0x0f00	/* Next Descriptor Size */
-#define DMAFLOW			0x7000	/* Flow Control */
-#define DMAFLOW_STOP	0x0000	/* Stop Mode */
-#define DMAFLOW_AUTO	0x1000	/* Autobuffer Mode */
+#define NDSIZE_OFFSET		8	/* Next Descriptor Size Offset */
 #define DMAFLOW_ARRAY	0x4000	/* Descriptor Array Mode */
 #define DMAFLOW_SMALL	0x6000	/* Small Model Descriptor List Mode */
 #define DMAFLOW_LARGE	0x7000	/* Large Model Descriptor List Mode */
+#define DFETCH			0x0004	/* DMA Descriptor Fetch Indicator */
+#define DMA_RUN			0x0008	/* DMA Channel Running Indicator */
+
+#endif
+#define DMAFLOW			0x7000	/* Flow Control */
+#define DMAFLOW_STOP	0x0000	/* Stop Mode */
+#define DMAFLOW_AUTO	0x1000	/* Autobuffer Mode */
 
 /* DMA_IRQ_STATUS Masks */
 #define DMA_DONE		0x0001	/* DMA Completion Interrupt Status */
 #define DMA_ERR			0x0002	/* DMA Error Interrupt Status */
-#define DFETCH			0x0004	/* DMA Descriptor Fetch Indicator */
-#define DMA_RUN			0x0008	/* DMA Channel Running Indicator */
+#ifdef CONFIG_BF60x
+#define DMA_PIRQ		0x0004	/* DMA Peripheral Error Interrupt Status */
+#else
+#define DMA_PIRQ		0
+#endif
 
 /*
  * All Blackfin system MMRs are padded to 32bits even if the register
@@ -57,6 +108,26 @@
 struct bfin_dma_regs {
 	u32 next_desc_ptr;
 	u32 start_addr;
+#ifdef CONFIG_BF60x
+	u32 cfg;
+	u32 x_count;
+	u32 x_modify;
+	u32 y_count;
+	u32 y_modify;
+	u32 pad1;
+	u32 pad2;
+	u32 curr_desc_ptr;
+	u32 prev_desc_ptr;
+	u32 curr_addr;
+	u32 irq_status;
+	u32 curr_x_count;
+	u32 curr_y_count;
+	u32 pad3;
+	u32 bw_limit_count;
+	u32 curr_bw_limit_count;
+	u32 bw_monitor_count;
+	u32 curr_bw_monitor_count;
+#else
 	__BFP(config);
 	u32 __pad0;
 	__BFP(x_count);
@@ -71,8 +142,10 @@
 	u32 __pad1;
 	__BFP(curr_y_count);
 	u32 __pad2;
+#endif
 };
 
+#ifndef CONFIG_BF60x
 /*
  * bfin handshake mdma registers layout
  */
@@ -85,6 +158,7 @@
 	__BFP(ecount);
 	__BFP(bcount);
 };
+#endif
 
 #undef __BFP
 
diff --git a/arch/blackfin/include/asm/bfin_pfmon.h b/arch/blackfin/include/asm/bfin_pfmon.h
index accd47e..bf52e1f 100644
--- a/arch/blackfin/include/asm/bfin_pfmon.h
+++ b/arch/blackfin/include/asm/bfin_pfmon.h
@@ -3,7 +3,7 @@
  *
  * Copyright 2005-2011 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or GPL-2 (or later).
+ * Licensed under the Clear BSD license or GPL-2 (or later).
  */
 
 #ifndef __ASM_BFIN_PFMON_H__
diff --git a/arch/blackfin/include/asm/bfin_ppi.h b/arch/blackfin/include/asm/bfin_ppi.h
index 3be05fa..a4e872e 100644
--- a/arch/blackfin/include/asm/bfin_ppi.h
+++ b/arch/blackfin/include/asm/bfin_ppi.h
@@ -10,6 +10,7 @@
 #define __ASM_BFIN_PPI_H__
 
 #include <linux/types.h>
+#include <asm/blackfin.h>
 
 /*
  * All Blackfin system MMRs are padded to 32bits even if the register
@@ -48,6 +49,133 @@
 	u32 clip;
 };
 
+/*
+ * bfin eppi3 registers layout
+ */
+struct bfin_eppi3_regs {
+	u32 stat;
+	u32 hcnt;
+	u32 hdly;
+	u32 vcnt;
+	u32 vdly;
+	u32 frame;
+	u32 line;
+	u32 clkdiv;
+	u32 ctl;
+	u32 fs1_wlhb;
+	u32 fs1_paspl;
+	u32 fs2_wlvb;
+	u32 fs2_palpf;
+	u32 imsk;
+	u32 oddclip;
+	u32 evenclip;
+	u32 fs1_dly;
+	u32 fs2_dly;
+	u32 ctl2;
+};
+
 #undef __BFP
 
+#ifdef EPPI0_CTL2
+#define EPPI_STAT_CFIFOERR              0x00000001    /* Chroma FIFO Error */
+#define EPPI_STAT_YFIFOERR              0x00000002    /* Luma FIFO Error */
+#define EPPI_STAT_LTERROVR              0x00000004    /* Line Track Overflow */
+#define EPPI_STAT_LTERRUNDR             0x00000008    /* Line Track Underflow */
+#define EPPI_STAT_FTERROVR              0x00000010    /* Frame Track Overflow */
+#define EPPI_STAT_FTERRUNDR             0x00000020    /* Frame Track Underflow */
+#define EPPI_STAT_ERRNCOR               0x00000040    /* Preamble Error Not Corrected */
+#define EPPI_STAT_PXPERR                0x00000080    /* PxP Ready Error */
+#define EPPI_STAT_ERRDET                0x00004000    /* Preamble Error Detected */
+#define EPPI_STAT_FLD                   0x00008000    /* Current Field Received by EPPI */
+
+#define EPPI_HCNT_VALUE                 0x0000FFFF    /* Holds the number of samples to read in or write out per line, after PPIx_HDLY number of cycles have expired since the last assertion of PPIx_FS1 */
+
+#define EPPI_HDLY_VALUE                 0x0000FFFF    /* Number of PPIx_CLK cycles to delay after assertion of PPIx_FS1 before starting to read or write data */
+
+#define EPPI_VCNT_VALUE                 0x0000FFFF    /* Holds the number of lines to read in or write out, after PPIx_VDLY number of lines from the start of frame */
+
+#define EPPI_VDLY_VALUE                 0x0000FFFF    /* Number of lines to wait after the start of a new frame before starting to read/transmit data */
+
+#define EPPI_FRAME_VALUE                0x0000FFFF    /* Holds the number of lines expected per frame of data */
+
+#define EPPI_LINE_VALUE                 0x0000FFFF    /* Holds the number of samples expected per line */
+
+#define EPPI_CLKDIV_VALUE               0x0000FFFF    /* Internal clock divider */
+
+#define EPPI_CTL_EN                     0x00000001    /* PPI Enable */
+#define EPPI_CTL_DIR                    0x00000002    /* PPI Direction */
+#define EPPI_CTL_XFRTYPE                0x0000000C    /* PPI Operating Mode */
+#define EPPI_CTL_ACTIVE656              0x00000000    /* XFRTYPE: ITU656 Active Video Only Mode */
+#define EPPI_CTL_ENTIRE656              0x00000004    /* XFRTYPE: ITU656 Entire Field Mode */
+#define EPPI_CTL_VERT656                0x00000008    /* XFRTYPE: ITU656 Vertical Blanking Only Mode */
+#define EPPI_CTL_NON656                 0x0000000C    /* XFRTYPE: Non-ITU656 Mode (GP Mode) */
+#define EPPI_CTL_FSCFG                  0x00000030    /* Frame Sync Configuration */
+#define EPPI_CTL_SYNC0                  0x00000000    /* FSCFG: Sync Mode 0 */
+#define EPPI_CTL_SYNC1                  0x00000010    /* FSCFG: Sync Mode 1 */
+#define EPPI_CTL_SYNC2                  0x00000020    /* FSCFG: Sync Mode 2 */
+#define EPPI_CTL_SYNC3                  0x00000030    /* FSCFG: Sync Mode 3 */
+#define EPPI_CTL_FLDSEL                 0x00000040    /* Field Select/Trigger */
+#define EPPI_CTL_ITUTYPE                0x00000080    /* ITU Interlace or Progressive */
+#define EPPI_CTL_BLANKGEN               0x00000100    /* ITU Output Mode with Internal Blanking Generation */
+#define EPPI_CTL_ICLKGEN                0x00000200    /* Internal Clock Generation */
+#define EPPI_CTL_IFSGEN                 0x00000400    /* Internal Frame Sync Generation */
+#define EPPI_CTL_SIGNEXT                0x00000800    /* Sign Extension */
+#define EPPI_CTL_POLC                   0x00003000    /* Frame Sync and Data Driving and Sampling Edges */
+#define EPPI_CTL_POLC0                  0x00000000    /* POLC: Clock/Sync polarity mode 0 */
+#define EPPI_CTL_POLC1                  0x00001000    /* POLC: Clock/Sync polarity mode 1 */
+#define EPPI_CTL_POLC2                  0x00002000    /* POLC: Clock/Sync polarity mode 2 */
+#define EPPI_CTL_POLC3                  0x00003000    /* POLC: Clock/Sync polarity mode 3 */
+#define EPPI_CTL_POLS                   0x0000C000    /* Frame Sync Polarity */
+#define EPPI_CTL_FS1HI_FS2HI            0x00000000    /* POLS: FS1 and FS2 are active high */
+#define EPPI_CTL_FS1LO_FS2HI            0x00004000    /* POLS: FS1 is active low. FS2 is active high */
+#define EPPI_CTL_FS1HI_FS2LO            0x00008000    /* POLS: FS1 is active high. FS2 is active low */
+#define EPPI_CTL_FS1LO_FS2LO            0x0000C000    /* POLS: FS1 and FS2 are active low */
+#define EPPI_CTL_DLEN                   0x00070000    /* Data Length */
+#define EPPI_CTL_DLEN08                 0x00000000    /* DLEN: 8 bits */
+#define EPPI_CTL_DLEN10                 0x00010000    /* DLEN: 10 bits */
+#define EPPI_CTL_DLEN12                 0x00020000    /* DLEN: 12 bits */
+#define EPPI_CTL_DLEN14                 0x00030000    /* DLEN: 14 bits */
+#define EPPI_CTL_DLEN16                 0x00040000    /* DLEN: 16 bits */
+#define EPPI_CTL_DLEN18                 0x00050000    /* DLEN: 18 bits */
+#define EPPI_CTL_DLEN20                 0x00060000    /* DLEN: 20 bits */
+#define EPPI_CTL_DLEN24                 0x00070000    /* DLEN: 24 bits */
+#define EPPI_CTL_DMIRR                  0x00080000    /* Data Mirroring */
+#define EPPI_CTL_SKIPEN                 0x00100000    /* Skip Enable */
+#define EPPI_CTL_SKIPEO                 0x00200000    /* Skip Even or Odd */
+#define EPPI_CTL_PACKEN                 0x00400000    /* Pack/Unpack Enable */
+#define EPPI_CTL_SWAPEN                 0x00800000    /* Swap Enable */
+#define EPPI_CTL_SPLTEO                 0x01000000    /* Split Even and Odd Data Samples */
+#define EPPI_CTL_SUBSPLTODD             0x02000000    /* Sub-Split Odd Samples */
+#define EPPI_CTL_SPLTWRD                0x04000000    /* Split Word */
+#define EPPI_CTL_RGBFMTEN               0x08000000    /* RGB Formatting Enable */
+#define EPPI_CTL_DMACFG                 0x10000000    /* One or Two DMA Channels Mode */
+#define EPPI_CTL_DMAFINEN               0x20000000    /* DMA Finish Enable */
+#define EPPI_CTL_MUXSEL                 0x40000000    /* MUX Select */
+#define EPPI_CTL_CLKGATEN               0x80000000    /* Clock Gating Enable */
+
+#define EPPI_FS2_WLVB_F2VBAD            0xFF000000    /* In GP transmit mode with BLANKGEN = 1, contains number of lines of vertical blanking after field 2 */
+#define EPPI_FS2_WLVB_F2VBBD            0x00FF0000    /* In GP transmit mode with BLANKGEN = 1, contains number of lines of vertical blanking before field 2 */
+#define EPPI_FS2_WLVB_F1VBAD            0x0000FF00    /* In GP transmit mode with, BLANKGEN = 1, contains number of lines of vertical blanking after field 1 */
+#define EPPI_FS2_WLVB_F1VBBD            0x000000FF    /* In GP 2, or 3 FS modes used to generate PPIx_FS2 width (32-bit). In GP Transmit mode, with BLANKGEN=1, contains the number of lines of Vertical blanking before field 1. */
+
+#define EPPI_FS2_PALPF_F2ACT            0xFFFF0000    /* Number of lines of Active Data in Field 2 */
+#define EPPI_FS2_PALPF_F1ACT            0x0000FFFF    /* Number of lines of Active Data in Field 1 */
+
+#define EPPI_IMSK_CFIFOERR              0x00000001    /* Mask CFIFO Underflow or Overflow Error Interrupt */
+#define EPPI_IMSK_YFIFOERR              0x00000002    /* Mask YFIFO Underflow or Overflow Error Interrupt */
+#define EPPI_IMSK_LTERROVR              0x00000004    /* Mask Line Track Overflow Error Interrupt */
+#define EPPI_IMSK_LTERRUNDR             0x00000008    /* Mask Line Track Underflow Error Interrupt */
+#define EPPI_IMSK_FTERROVR              0x00000010    /* Mask Frame Track Overflow Error Interrupt */
+#define EPPI_IMSK_FTERRUNDR             0x00000020    /* Mask Frame Track Underflow Error Interrupt */
+#define EPPI_IMSK_ERRNCOR               0x00000040    /* Mask ITU Preamble Error Not Corrected Interrupt */
+#define EPPI_IMSK_PXPERR                0x00000080    /* Mask PxP Ready Error Interrupt */
+
+#define EPPI_ODDCLIP_HIGHODD            0xFFFF0000
+#define EPPI_ODDCLIP_LOWODD             0x0000FFFF
+
+#define EPPI_EVENCLIP_HIGHEVEN          0xFFFF0000
+#define EPPI_EVENCLIP_LOWEVEN           0x0000FFFF
+
+#define EPPI_CTL2_FS1FINEN              0x00000002    /* HSYNC Finish Enable */
+#endif
 #endif
diff --git a/arch/blackfin/include/asm/bfin_rotary.h b/arch/blackfin/include/asm/bfin_rotary.h
index 0b6910b..8895a75 100644
--- a/arch/blackfin/include/asm/bfin_rotary.h
+++ b/arch/blackfin/include/asm/bfin_rotary.h
@@ -39,6 +39,7 @@
 	unsigned int rotary_rel_code;
 	unsigned short debounce;	/* 0..17 */
 	unsigned short mode;
+	unsigned short pm_wakeup;
 };
 
 /* CNT_CONFIG bitmasks */
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 68bcc3d..8597158 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -18,7 +18,7 @@
     defined(CONFIG_BFIN_UART1_CTSRTS) || \
     defined(CONFIG_BFIN_UART2_CTSRTS) || \
     defined(CONFIG_BFIN_UART3_CTSRTS)
-# ifdef BFIN_UART_BF54X_STYLE
+# if defined(BFIN_UART_BF54X_STYLE) || defined(BFIN_UART_BF60X_STYLE)
 #  define CONFIG_SERIAL_BFIN_HARD_CTSRTS
 # else
 #  define CONFIG_SERIAL_BFIN_CTSRTS
@@ -58,14 +58,69 @@
 #endif
 };
 
+#ifdef BFIN_UART_BF60X_STYLE
+
+/* UART_CTL Masks */
+#define UCEN                     0x1  /* Enable UARTx Clocks */
+#define LOOP_ENA                 0x2  /* Loopback Mode Enable */
+#define UMOD_MDB                 0x10  /* Enable MDB Mode */
+#define UMOD_IRDA                0x20  /* Enable IrDA Mode */
+#define UMOD_MASK                0x30  /* Uart Mode Mask */
+#define WLS(x)                   (((x-5) & 0x03) << 8)  /* Word Length Select */
+#define WLS_MASK                 0x300  /* Word length Select Mask */
+#define WLS_OFFSET               8      /* Word length Select Offset */
+#define STB                      0x1000  /* Stop Bits */
+#define STBH                     0x2000  /* Half Stop Bits */
+#define PEN                      0x4000  /* Parity Enable */
+#define EPS                      0x8000  /* Even Parity Select */
+#define STP                      0x10000  /* Stick Parity */
+#define FPE                      0x20000  /* Force Parity Error On Transmit */
+#define FFE                      0x40000  /* Force Framing Error On Transmit */
+#define SB                       0x80000  /* Set Break */
+#define LCR_MASK		 (SB | STP | EPS | PEN | STB | WLS_MASK)
+#define FCPOL                    0x400000  /* Flow Control Pin Polarity */
+#define RPOLC                    0x800000  /* IrDA RX Polarity Change */
+#define TPOLC                    0x1000000  /* IrDA TX Polarity Change */
+#define MRTS                     0x2000000  /* Manual Request To Send */
+#define XOFF                     0x4000000  /* Transmitter Off */
+#define ARTS                     0x8000000  /* Automatic Request To Send */
+#define ACTS                     0x10000000  /* Automatic Clear To Send */
+#define RFIT                     0x20000000  /* Receive FIFO IRQ Threshold */
+#define RFRT                     0x40000000  /* Receive FIFO RTS Threshold */
+
+/* UART_STAT Masks */
+#define DR                       0x01  /* Data Ready */
+#define OE                       0x02  /* Overrun Error */
+#define PE                       0x04  /* Parity Error */
+#define FE                       0x08  /* Framing Error */
+#define BI                       0x10  /* Break Interrupt */
+#define THRE                     0x20  /* THR Empty */
+#define TEMT                     0x80  /* TSR and UART_THR Empty */
+#define TFI                      0x100  /* Transmission Finished Indicator */
+
+#define ASTKY                    0x200  /* Address Sticky */
+#define ADDR                     0x400  /* Address bit status */
+#define RO			 0x800  /* Reception Ongoing */
+#define SCTS                     0x1000  /* Sticky CTS */
+#define CTS                      0x10000  /* Clear To Send */
+#define RFCS                     0x20000  /* Receive FIFO Count Status */
+
+/* UART_CLOCK Masks */
+#define EDBO                     0x80000000 /* Enable Devide by One */
+
+#else /* BFIN_UART_BF60X_STYLE */
+
 /* UART_LCR Masks */
 #define WLS(x)                   (((x)-5) & 0x03)  /* Word Length Select */
+#define WLS_MASK                 0x03  /* Word length Select Mask */
+#define WLS_OFFSET               0     /* Word length Select Offset */
 #define STB                      0x04  /* Stop Bits */
 #define PEN                      0x08  /* Parity Enable */
 #define EPS                      0x10  /* Even Parity Select */
 #define STP                      0x20  /* Stick Parity */
 #define SB                       0x40  /* Set Break */
 #define DLAB                     0x80  /* Divisor Latch Access */
+#define LCR_MASK		 (SB | STP | EPS | PEN | STB | WLS_MASK)
 
 /* UART_LSR Masks */
 #define DR                       0x01  /* Data Ready */
@@ -77,15 +132,6 @@
 #define TEMT                     0x40  /* TSR and UART_THR Empty */
 #define TFI                      0x80  /* Transmission Finished Indicator */
 
-/* UART_IER Masks */
-#define ERBFI                    0x01  /* Enable Receive Buffer Full Interrupt */
-#define ETBEI                    0x02  /* Enable Transmit Buffer Empty Interrupt */
-#define ELSI                     0x04  /* Enable RX Status Interrupt */
-#define EDSSI                    0x08  /* Enable Modem Status Interrupt */
-#define EDTPTI                   0x10  /* Enable DMA Transmit PIRQ Interrupt */
-#define ETFI                     0x20  /* Enable Transmission Finished Interrupt */
-#define ERFCI                    0x40  /* Enable Receive FIFO Count Interrupt */
-
 /* UART_MCR Masks */
 #define XOFF                     0x01  /* Transmitter Off */
 #define MRTS                     0x02  /* Manual Request To Send */
@@ -103,13 +149,36 @@
 
 /* UART_GCTL Masks */
 #define UCEN                     0x01  /* Enable UARTx Clocks */
-#define IREN                     0x02  /* Enable IrDA Mode */
+#define UMOD_IRDA                0x02  /* Enable IrDA Mode */
+#define UMOD_MASK                0x02  /* Uart Mode Mask */
 #define TPOLC                    0x04  /* IrDA TX Polarity Change */
 #define RPOLC                    0x08  /* IrDA RX Polarity Change */
 #define FPE                      0x10  /* Force Parity Error On Transmit */
 #define FFE                      0x20  /* Force Framing Error On Transmit */
 
-#ifdef BFIN_UART_BF54X_STYLE
+#endif /* BFIN_UART_BF60X_STYLE */
+
+/* UART_IER Masks */
+#define ERBFI                    0x01  /* Enable Receive Buffer Full Interrupt */
+#define ETBEI                    0x02  /* Enable Transmit Buffer Empty Interrupt */
+#define ELSI                     0x04  /* Enable RX Status Interrupt */
+#define EDSSI                    0x08  /* Enable Modem Status Interrupt */
+#define EDTPTI                   0x10  /* Enable DMA Transmit PIRQ Interrupt */
+#define ETFI                     0x20  /* Enable Transmission Finished Interrupt */
+#define ERFCI                    0x40  /* Enable Receive FIFO Count Interrupt */
+
+#if defined(BFIN_UART_BF60X_STYLE)
+# define OFFSET_REDIV            0x00  /* Version ID Register             */
+# define OFFSET_CTL              0x04  /* Control Register                */
+# define OFFSET_STAT             0x08  /* Status Register                 */
+# define OFFSET_SCR              0x0C  /* SCR Scratch Register            */
+# define OFFSET_CLK              0x10  /* Clock Rate Register             */
+# define OFFSET_IER              0x14  /* Interrupt Enable Register       */
+# define OFFSET_IER_SET          0x18  /* Set Interrupt Enable Register   */
+# define OFFSET_IER_CLEAR        0x1C  /* Clear Interrupt Enable Register */
+# define OFFSET_RBR              0x20  /* Receive Buffer register         */
+# define OFFSET_THR              0x24  /* Transmit Holding register       */
+#elif defined(BFIN_UART_BF54X_STYLE)
 # define OFFSET_DLL              0x00  /* Divisor Latch (Low-Byte)        */
 # define OFFSET_DLH              0x04  /* Divisor Latch (High-Byte)       */
 # define OFFSET_GCTL             0x08  /* Global Control Register         */
@@ -145,7 +214,23 @@
  */
 #define __BFP(m) u16 m; u16 __pad_##m
 struct bfin_uart_regs {
-#ifdef BFIN_UART_BF54X_STYLE
+#if defined(BFIN_UART_BF60X_STYLE)
+	u32 revid;
+	u32 ctl;
+	u32 stat;
+	u32 scr;
+	u32 clk;
+	u32 ier;
+	u32 ier_set;
+	u32 ier_clear;
+	u32 rbr;
+	u32 thr;
+	u32 taip;
+	u32 tsr;
+	u32 rsr;
+	u32 txdiv;
+	u32 rxdiv;
+#elif defined(BFIN_UART_BF54X_STYLE)
 	__BFP(dll);
 	__BFP(dlh);
 	__BFP(gctl);
@@ -182,13 +267,70 @@
 };
 #undef __BFP
 
+#define port_membase(uart)     (((struct bfin_serial_port *)(uart))->port.membase)
+
+/*
 #ifndef port_membase
 # define port_membase(p) 0
 #endif
+*/
+#ifdef BFIN_UART_BF60X_STYLE
+
+#define UART_GET_CHAR(p)      bfin_read32(port_membase(p) + OFFSET_RBR)
+#define UART_GET_CLK(p)       bfin_read32(port_membase(p) + OFFSET_CLK)
+#define UART_GET_CTL(p)       bfin_read32(port_membase(p) + OFFSET_CTL)
+#define UART_GET_GCTL(p)      UART_GET_CTL(p)
+#define UART_GET_LCR(p)       UART_GET_CTL(p)
+#define UART_GET_MCR(p)       UART_GET_CTL(p)
+#if ANOMALY_05001001
+#define UART_GET_STAT(p) \
+({ \
+	u32 __ret; \
+	unsigned long flags; \
+	flags = hard_local_irq_save(); \
+	__ret = bfin_read32(port_membase(p) + OFFSET_STAT); \
+	hard_local_irq_restore(flags); \
+	__ret; \
+})
+#else
+#define UART_GET_STAT(p)      bfin_read32(port_membase(p) + OFFSET_STAT)
+#endif
+#define UART_GET_MSR(p)       UART_GET_STAT(p)
+
+#define UART_PUT_CHAR(p, v)   bfin_write32(port_membase(p) + OFFSET_THR, v)
+#define UART_PUT_CLK(p, v)    bfin_write32(port_membase(p) + OFFSET_CLK, v)
+#define UART_PUT_CTL(p, v)    bfin_write32(port_membase(p) + OFFSET_CTL, v)
+#define UART_PUT_GCTL(p, v)   UART_PUT_CTL(p, v)
+#define UART_PUT_LCR(p, v)    UART_PUT_CTL(p, v)
+#define UART_PUT_MCR(p, v)    UART_PUT_CTL(p, v)
+#define UART_PUT_STAT(p, v)   bfin_write32(port_membase(p) + OFFSET_STAT, v)
+
+#define UART_CLEAR_IER(p, v)  bfin_write32(port_membase(p) + OFFSET_IER_CLEAR, v)
+#define UART_GET_IER(p)       bfin_read32(port_membase(p) + OFFSET_IER)
+#define UART_SET_IER(p, v)    bfin_write32(port_membase(p) + OFFSET_IER_SET, v)
+
+#define UART_CLEAR_DLAB(p)    /* MMRs not muxed on BF60x */
+#define UART_SET_DLAB(p)      /* MMRs not muxed on BF60x */
+
+#define UART_CLEAR_LSR(p)     UART_PUT_STAT(p, -1)
+#define UART_GET_LSR(p)       UART_GET_STAT(p)
+#define UART_PUT_LSR(p, v)    UART_PUT_STAT(p, v)
+
+/* This handles hard CTS/RTS */
+#define BFIN_UART_CTSRTS_HARD
+#define UART_CLEAR_SCTS(p)      UART_PUT_STAT(p, SCTS)
+#define UART_GET_CTS(x)         (UART_GET_MSR(x) & CTS)
+#define UART_DISABLE_RTS(x)     UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS | MRTS))
+#define UART_ENABLE_RTS(x)      UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
+#define UART_ENABLE_INTS(x, v)  UART_SET_IER(x, v)
+#define UART_DISABLE_INTS(x)    UART_CLEAR_IER(x, 0xF)
+
+#else /* BFIN_UART_BF60X_STYLE */
 
 #define UART_GET_CHAR(p)      bfin_read16(port_membase(p) + OFFSET_RBR)
 #define UART_GET_DLL(p)       bfin_read16(port_membase(p) + OFFSET_DLL)
 #define UART_GET_DLH(p)       bfin_read16(port_membase(p) + OFFSET_DLH)
+#define UART_GET_CLK(p)	      ((UART_GET_DLH(p) << 8) | UART_GET_DLL(p))
 #define UART_GET_GCTL(p)      bfin_read16(port_membase(p) + OFFSET_GCTL)
 #define UART_GET_LCR(p)       bfin_read16(port_membase(p) + OFFSET_LCR)
 #define UART_GET_MCR(p)       bfin_read16(port_membase(p) + OFFSET_MCR)
@@ -197,6 +339,11 @@
 #define UART_PUT_CHAR(p, v)   bfin_write16(port_membase(p) + OFFSET_THR, v)
 #define UART_PUT_DLL(p, v)    bfin_write16(port_membase(p) + OFFSET_DLL, v)
 #define UART_PUT_DLH(p, v)    bfin_write16(port_membase(p) + OFFSET_DLH, v)
+#define UART_PUT_CLK(p, v) do \
+{\
+UART_PUT_DLL(p, v & 0xFF); \
+UART_PUT_DLH(p, (v >> 8) & 0xFF); } while (0);
+
 #define UART_PUT_GCTL(p, v)   bfin_write16(port_membase(p) + OFFSET_GCTL, v)
 #define UART_PUT_LCR(p, v)    bfin_write16(port_membase(p) + OFFSET_LCR, v)
 #define UART_PUT_MCR(p, v)    bfin_write16(port_membase(p) + OFFSET_MCR, v)
@@ -233,12 +380,17 @@
 #define UART_CLEAR_DLAB(p)    do { UART_PUT_LCR(p, UART_GET_LCR(p) & ~DLAB); SSYNC(); } while (0)
 #define UART_SET_DLAB(p)      do { UART_PUT_LCR(p, UART_GET_LCR(p) | DLAB); SSYNC(); } while (0)
 
+#define get_lsr_cache(uart)    (((struct bfin_serial_port *)(uart))->lsr)
+#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
+
+/*
 #ifndef put_lsr_cache
 # define put_lsr_cache(p, v)
 #endif
 #ifndef get_lsr_cache
 # define get_lsr_cache(p) 0
 #endif
+*/
 
 /* The hardware clears the LSR bits upon read, so we need to cache
  * some of the more fun bits in software so they don't get lost
@@ -267,7 +419,9 @@
 #define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
 #define UART_DISABLE_INTS(x)   UART_PUT_IER(x, 0)
 
-#endif
+#endif /* BFIN_UART_BF54X_STYLE */
+
+#endif /* BFIN_UART_BF60X_STYLE */
 
 #ifndef BFIN_UART_TX_FIFO_SIZE
 # define BFIN_UART_TX_FIFO_SIZE 2
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index 0afcfbd..f8907ea 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -24,6 +24,7 @@
 struct sport_config {
 	/* TDM (multichannels), I2S or other mode */
 	unsigned int mode:3;
+	unsigned int polled;	/* use poll instead of irq when set */
 
 	/* if TDM mode is selected, channels must be set */
 	int channels;	/* Must be in 8 units */
diff --git a/arch/blackfin/include/asm/bfin_sport3.h b/arch/blackfin/include/asm/bfin_sport3.h
new file mode 100644
index 0000000..03c0022
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_sport3.h
@@ -0,0 +1,107 @@
+/*
+ * bfin_sport - Analog Devices BF6XX SPORT registers
+ *
+ * Copyright (c) 2012 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _BFIN_SPORT3_H_
+#define _BFIN_SPORT3_H_
+
+#include <linux/types.h>
+
+#define SPORT_CTL_SPENPRI             0x00000001    /* Enable Primary Channel */
+#define SPORT_CTL_DTYPE               0x00000006    /* Data type select */
+#define SPORT_CTL_RJUSTIFY_ZFILL      0x00000000    /* DTYPE: MCM mode: Right-justify, zero-fill unused MSBs */
+#define SPORT_CTL_RJUSTIFY_SFILL      0x00000002    /* DTYPE: MCM mode: Right-justify, sign-extend unused MSBs */
+#define SPORT_CTL_USE_U_LAW           0x00000004    /* DTYPE: MCM mode: Compand using u-law */
+#define SPORT_CTL_USE_A_LAW           0x00000006    /* DTYPE: MCM mode: Compand using A-law */
+#define SPORT_CTL_LSBF                0x00000008    /* Serial bit endian select */
+#define SPORT_CTL_SLEN                0x000001F0    /* Serial Word length select */
+#define SPORT_CTL_PACK                0x00000200    /* 16-bit to 32-bit packing enable */
+#define SPORT_CTL_ICLK                0x00000400    /* Internal Clock Select */
+#define SPORT_CTL_OPMODE              0x00000800    /* Operation mode */
+#define SPORT_CTL_CKRE                0x00001000    /* Clock rising edge select */
+#define SPORT_CTL_FSR                 0x00002000    /* Frame Sync required */
+#define SPORT_CTL_IFS                 0x00004000    /* Internal Frame Sync select */
+#define SPORT_CTL_DIFS                0x00008000    /* Data-independent frame sync select */
+#define SPORT_CTL_LFS                 0x00010000    /* Active low frame sync select */
+#define SPORT_CTL_LAFS                0x00020000    /* Late Transmit frame select */
+#define SPORT_CTL_RJUST               0x00040000    /* Right Justified mode select */
+#define SPORT_CTL_FSED                0x00080000    /* External frame sync edge select */
+#define SPORT_CTL_TFIEN               0x00100000    /* Transmit finish interrrupt enable select */
+#define SPORT_CTL_GCLKEN              0x00200000    /* Gated clock mode select */
+#define SPORT_CTL_SPENSEC             0x01000000    /* Enable secondary channel */
+#define SPORT_CTL_SPTRAN              0x02000000    /* Data direction control */
+#define SPORT_CTL_DERRSEC             0x04000000    /* Secondary channel error status */
+#define SPORT_CTL_DXSSEC              0x18000000    /* Secondary channel data buffer status */
+#define SPORT_CTL_SEC_EMPTY           0x00000000    /* DXSSEC: Empty */
+#define SPORT_CTL_SEC_PART_FULL       0x10000000    /* DXSSEC: Partially full */
+#define SPORT_CTL_SEC_FULL            0x18000000    /* DXSSEC: Full */
+#define SPORT_CTL_DERRPRI             0x20000000    /* Primary channel error status */
+#define SPORT_CTL_DXSPRI              0xC0000000    /* Primary channel data buffer status */
+#define SPORT_CTL_PRM_EMPTY           0x00000000    /* DXSPRI: Empty */
+#define SPORT_CTL_PRM_PART_FULL       0x80000000    /* DXSPRI: Partially full */
+#define SPORT_CTL_PRM_FULL            0xC0000000    /* DXSPRI: Full */
+
+#define SPORT_DIV_CLKDIV              0x0000FFFF    /* Clock divisor */
+#define SPORT_DIV_FSDIV               0xFFFF0000    /* Frame sync divisor */
+
+#define SPORT_MCTL_MCE                0x00000001    /* Multichannel enable */
+#define SPORT_MCTL_MCPDE              0x00000004    /* Multichannel data packing select */
+#define SPORT_MCTL_MFD                0x000000F0    /* Multichannel frame delay */
+#define SPORT_MCTL_WSIZE              0x00007F00    /* Number of multichannel slots */
+#define SPORT_MCTL_WOFFSET            0x03FF0000    /* Window offset size */
+
+#define SPORT_CNT_CLKCNT              0x0000FFFF    /* Current state of clk div counter */
+#define SPORT_CNT_FSDIVCNT            0xFFFF0000    /* Current state of frame div counter */
+
+#define SPORT_ERR_DERRPMSK            0x00000001    /* Primary channel data error interrupt enable */
+#define SPORT_ERR_DERRSMSK            0x00000002    /* Secondary channel data error interrupt enable */
+#define SPORT_ERR_FSERRMSK            0x00000004    /* Frame sync error interrupt enable */
+#define SPORT_ERR_DERRPSTAT           0x00000010    /* Primary channel data error status */
+#define SPORT_ERR_DERRSSTAT           0x00000020    /* Secondary channel data error status */
+#define SPORT_ERR_FSERRSTAT           0x00000040    /* Frame sync error status */
+
+#define SPORT_MSTAT_CURCHAN           0x000003FF    /* Channel which is being serviced in the multichannel operation */
+
+#define SPORT_CTL2_FSMUXSEL           0x00000001    /* Frame Sync MUX Select */
+#define SPORT_CTL2_CKMUXSEL           0x00000002    /* Clock MUX Select */
+#define SPORT_CTL2_LBSEL              0x00000004    /* Loopback Select */
+
+struct sport_register {
+	u32 spctl;
+	u32 div;
+	u32 spmctl;
+	u32 spcs0;
+	u32 spcs1;
+	u32 spcs2;
+	u32 spcs3;
+	u32 spcnt;
+	u32 sperrctl;
+	u32 spmstat;
+	u32 spctl2;
+	u32 txa;
+	u32 rxa;
+	u32 txb;
+	u32 rxb;
+	u32 revid;
+};
+
+struct bfin_snd_platform_data {
+	const unsigned short *pin_req;
+};
+
+#endif
diff --git a/arch/blackfin/include/asm/bfin_twi.h b/arch/blackfin/include/asm/bfin_twi.h
index e767d64..2f3339a 100644
--- a/arch/blackfin/include/asm/bfin_twi.h
+++ b/arch/blackfin/include/asm/bfin_twi.h
@@ -10,6 +10,7 @@
 #define __ASM_BFIN_TWI_H__
 
 #include <linux/types.h>
+#include <linux/i2c.h>
 
 /*
  * All Blackfin system MMRs are padded to 32bits even if the register
@@ -42,4 +43,145 @@
 
 #undef __BFP
 
+struct bfin_twi_iface {
+	int			irq;
+	spinlock_t		lock;
+	char			read_write;
+	u8			command;
+	u8			*transPtr;
+	int			readNum;
+	int			writeNum;
+	int			cur_mode;
+	int			manual_stop;
+	int			result;
+	struct i2c_adapter	adap;
+	struct completion	complete;
+	struct i2c_msg		*pmsg;
+	int			msg_num;
+	int			cur_msg;
+	u16			saved_clkdiv;
+	u16			saved_control;
+	struct bfin_twi_regs	*regs_base;
+};
+
+#define DEFINE_TWI_REG(reg_name, reg) \
+static inline u16 read_##reg_name(struct bfin_twi_iface *iface) \
+	{ return iface->regs_base->reg; } \
+static inline void write_##reg_name(struct bfin_twi_iface *iface, u16 v) \
+	{ iface->regs_base->reg = v; }
+
+DEFINE_TWI_REG(CLKDIV, clkdiv)
+DEFINE_TWI_REG(CONTROL, control)
+DEFINE_TWI_REG(SLAVE_CTL, slave_ctl)
+DEFINE_TWI_REG(SLAVE_STAT, slave_stat)
+DEFINE_TWI_REG(SLAVE_ADDR, slave_addr)
+DEFINE_TWI_REG(MASTER_CTL, master_ctl)
+DEFINE_TWI_REG(MASTER_STAT, master_stat)
+DEFINE_TWI_REG(MASTER_ADDR, master_addr)
+DEFINE_TWI_REG(INT_STAT, int_stat)
+DEFINE_TWI_REG(INT_MASK, int_mask)
+DEFINE_TWI_REG(FIFO_CTL, fifo_ctl)
+DEFINE_TWI_REG(FIFO_STAT, fifo_stat)
+DEFINE_TWI_REG(XMT_DATA8, xmt_data8)
+DEFINE_TWI_REG(XMT_DATA16, xmt_data16)
+#if !ANOMALY_05001001
+DEFINE_TWI_REG(RCV_DATA8, rcv_data8)
+DEFINE_TWI_REG(RCV_DATA16, rcv_data16)
+#else
+static inline u16 read_RCV_DATA8(struct bfin_twi_iface *iface)
+{
+	u16 ret;
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
+	ret = iface->regs_base->rcv_data8;
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static inline u16 read_RCV_DATA16(struct bfin_twi_iface *iface)
+{
+	u16 ret;
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
+	ret = iface->regs_base->rcv_data16;
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+#endif
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
+#define	CLKLOW(x)	((x) & 0xFF)	/* Periods Clock Is Held Low                    */
+#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low                 */
+
+/* TWI_PRESCALE Masks															*/
+#define	PRESCALE	0x007F	/* SCLKs Per Internal Time Reference (10MHz)    */
+#define	TWI_ENA		0x0080	/* TWI Enable                                                                   */
+#define	SCCB		0x0200	/* SCCB Compatibility Enable                                    */
+
+/* TWI_SLAVE_CTL Masks															*/
+#define	SEN			0x0001	/* Slave Enable                                                                 */
+#define	SADD_LEN	0x0002	/* Slave Address Length                                                 */
+#define	STDVAL		0x0004	/* Slave Transmit Data Valid                                    */
+#define	NAK			0x0008	/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010	/* General Call Address Matching Enabled                */
+
+/* TWI_SLAVE_STAT Masks															*/
+#define	SDIR		0x0001	/* Slave Transfer Direction (Transmit/Receive*) */
+#define GCALL		0x0002	/* General Call Indicator                                               */
+
+/* TWI_MASTER_CTL Masks													*/
+#define	MEN			0x0001	/* Master Mode Enable                                           */
+#define	MADD_LEN	0x0002	/* Master Address Length                                        */
+#define	MDIR		0x0004	/* Master Transmit Direction (RX/TX*)           */
+#define	FAST		0x0008	/* Use Fast Mode Timing Specs                           */
+#define	STOP		0x0010	/* Issue Stop Condition                                         */
+#define	RSTART		0x0020	/* Repeat Start or Stop* At End Of Transfer     */
+#define	DCNT		0x3FC0	/* Data Bytes To Transfer                                       */
+#define	SDAOVR		0x4000	/* Serial Data Override                                         */
+#define	SCLOVR		0x8000	/* Serial Clock Override                                        */
+
+/* TWI_MASTER_STAT Masks														*/
+#define	MPROG		0x0001	/* Master Transfer In Progress                                  */
+#define	LOSTARB		0x0002	/* Lost Arbitration Indicator (Xfer Aborted)    */
+#define	ANAK		0x0004	/* Address Not Acknowledged                                             */
+#define	DNAK		0x0008	/* Data Not Acknowledged                                                */
+#define	BUFRDERR	0x0010	/* Buffer Read Error                                                    */
+#define	BUFWRERR	0x0020	/* Buffer Write Error                                                   */
+#define	SDASEN		0x0040	/* Serial Data Sense                                                    */
+#define	SCLSEN		0x0080	/* Serial Clock Sense                                                   */
+#define	BUSBUSY		0x0100	/* Bus Busy Indicator                                                   */
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
+#define	SINIT		0x0001	/* Slave Transfer Initiated     */
+#define	SCOMP		0x0002	/* Slave Transfer Complete      */
+#define	SERR		0x0004	/* Slave Transfer Error         */
+#define	SOVF		0x0008	/* Slave Overflow                       */
+#define	MCOMP		0x0010	/* Master Transfer Complete     */
+#define	MERR		0x0020	/* Master Transfer Error        */
+#define	XMTSERV		0x0040	/* Transmit FIFO Service        */
+#define	RCVSERV		0x0080	/* Receive FIFO Service         */
+
+/* TWI_FIFO_CTRL Masks												*/
+#define	XMTFLUSH	0x0001	/* Transmit Buffer Flush                        */
+#define	RCVFLUSH	0x0002	/* Receive Buffer Flush                         */
+#define	XMTINTLEN	0x0004	/* Transmit Buffer Interrupt Length     */
+#define	RCVINTLEN	0x0008	/* Receive Buffer Interrupt Length      */
+
+/* TWI_FIFO_STAT Masks															*/
+#define	XMTSTAT		0x0003	/* Transmit FIFO Status                                                 */
+#define	XMT_EMPTY	0x0000	/*              Transmit FIFO Empty                                             */
+#define	XMT_HALF	0x0001	/*              Transmit FIFO Has 1 Byte To Write               */
+#define	XMT_FULL	0x0003	/*              Transmit FIFO Full (2 Bytes To Write)   */
+
+#define	RCVSTAT		0x000C	/* Receive FIFO Status                                                  */
+#define	RCV_EMPTY	0x0000	/*              Receive FIFO Empty                                              */
+#define	RCV_HALF	0x0004	/*              Receive FIFO Has 1 Byte To Read                 */
+#define	RCV_FULL	0x000C	/*              Receive FIFO Full (2 Bytes To Read)             */
+
 #endif
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index 7be5368..f111f36 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -63,20 +63,16 @@
 
 #if ANOMALY_05000312 || ANOMALY_05000244
 #define SSYNC(scratch)	\
-do {			\
 	cli scratch;	\
 	nop; nop; nop;	\
 	SSYNC;		\
-	sti scratch;	\
-} while (0)
+	sti scratch;
 
 #define CSYNC(scratch)	\
-do {			\
 	cli scratch;	\
 	nop; nop; nop;	\
 	CSYNC;		\
-	sti scratch;	\
-} while (0)
+	sti scratch;
 
 #else
 #define SSYNC(scratch) SSYNC;
diff --git a/arch/blackfin/include/asm/clkdev.h b/arch/blackfin/include/asm/clkdev.h
new file mode 100644
index 0000000..9053bed
--- /dev/null
+++ b/arch/blackfin/include/asm/clkdev.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_CLKDEV__H_
+#define __ASM_CLKDEV__H_
+
+#include <linux/slab.h>
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+	return kzalloc(size, GFP_KERNEL);
+}
+
+#define __clk_put(clk)
+#define __clk_get(clk) ({ 1; })
+
+#endif
diff --git a/arch/blackfin/include/asm/clocks.h b/arch/blackfin/include/asm/clocks.h
index 6f0b618..9b3c85b 100644
--- a/arch/blackfin/include/asm/clocks.h
+++ b/arch/blackfin/include/asm/clocks.h
@@ -48,4 +48,27 @@
 # define CONFIG_VCO_MULT 0
 #endif
 
+#include <linux/clk.h>
+
+struct clk_ops {
+	unsigned long (*get_rate)(struct clk *clk);
+	unsigned long (*round_rate)(struct clk *clk, unsigned long rate);
+	int (*set_rate)(struct clk *clk, unsigned long rate);
+	int (*enable)(struct clk *clk);
+	int (*disable)(struct clk *clk);
+};
+
+struct clk {
+	struct clk		*parent;
+	const char              *name;
+	unsigned long           rate;
+	spinlock_t              lock;
+	u32                     flags;
+	const struct clk_ops    *ops;
+	void __iomem            *reg;
+	u32                     mask;
+	u32                     shift;
+};
+
+int clk_init(void);
 #endif
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index fda9626..5c37f62 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -62,6 +62,10 @@
 #define SIZE_4K 0x00001000      /* 4K */
 #define SIZE_1M 0x00100000      /* 1M */
 #define SIZE_4M 0x00400000      /* 4M */
+#define SIZE_16K 0x00004000      /* 16K */
+#define SIZE_64K 0x00010000      /* 64K */
+#define SIZE_16M 0x01000000      /* 16M */
+#define SIZE_64M 0x04000000      /* 64M */
 
 #define MAX_CPLBS 16
 
diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h
index 8236790..fe0ca03 100644
--- a/arch/blackfin/include/asm/def_LPBlackfin.h
+++ b/arch/blackfin/include/asm/def_LPBlackfin.h
@@ -3,7 +3,7 @@
  *
  * Copyright 2005-2008 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or GPL-2 (or later).
+ * Licensed under the Clear BSD license or GPL-2 (or later).
  */
 
 #ifndef _DEF_LPBLACKFIN_H
@@ -622,6 +622,10 @@
 #define PAGE_SIZE_4KB      0x00010000	/* 4 KB page size */
 #define PAGE_SIZE_1MB      0x00020000	/* 1 MB page size */
 #define PAGE_SIZE_4MB      0x00030000	/* 4 MB page size */
+#define PAGE_SIZE_16KB     0x00040000	/* 16 KB page size */
+#define PAGE_SIZE_64KB     0x00050000	/* 64 KB page size */
+#define PAGE_SIZE_16MB     0x00060000	/* 16 MB page size */
+#define PAGE_SIZE_64MB     0x00070000	/* 64 MB page size */
 #define CPLB_L1SRAM        0x00000020	/* 0=SRAM mapped in L1, 0=SRAM not
 					 * mapped to L1
 					 */
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index dac0c97..40e9c2b 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -22,12 +22,22 @@
 #define DATA_SIZE_8			0
 #define DATA_SIZE_16		1
 #define DATA_SIZE_32		2
+#ifdef CONFIG_BF60x
+#define DATA_SIZE_64		3
+#endif
 
 #define DMA_FLOW_STOP		0
 #define DMA_FLOW_AUTO		1
+#ifdef CONFIG_BF60x
+#define DMA_FLOW_LIST		4
+#define DMA_FLOW_ARRAY		5
+#define DMA_FLOW_LIST_DEMAND	6
+#define DMA_FLOW_ARRAY_DEMAND	7
+#else
 #define DMA_FLOW_ARRAY		4
 #define DMA_FLOW_SMALL		6
 #define DMA_FLOW_LARGE		7
+#endif
 
 #define DIMENSION_LINEAR	0
 #define DIMENSION_2D		1
@@ -36,26 +46,80 @@
 #define DIR_WRITE			1
 
 #define INTR_DISABLE		0
+#ifdef CONFIG_BF60x
+#define INTR_ON_PERI			1
+#endif
 #define INTR_ON_BUF			2
 #define INTR_ON_ROW			3
 
 #define DMA_NOSYNC_KEEP_DMA_BUF	0
 #define DMA_SYNC_RESTART		1
 
+#ifdef DMA_MMR_SIZE_32
+#define DMA_MMR_SIZE_TYPE long
+#define DMA_MMR_READ bfin_read32
+#define DMA_MMR_WRITE bfin_write32
+#else
+#define DMA_MMR_SIZE_TYPE short
+#define DMA_MMR_READ bfin_read16
+#define DMA_MMR_WRITE bfin_write16
+#endif
+
+struct dma_desc_array {
+	unsigned long start_addr;
+	unsigned DMA_MMR_SIZE_TYPE cfg;
+	unsigned DMA_MMR_SIZE_TYPE x_count;
+	DMA_MMR_SIZE_TYPE x_modify;
+} __attribute__((packed));
+
 struct dmasg {
 	void *next_desc_addr;
 	unsigned long start_addr;
-	unsigned short cfg;
-	unsigned short x_count;
-	short x_modify;
-	unsigned short y_count;
-	short y_modify;
+	unsigned DMA_MMR_SIZE_TYPE cfg;
+	unsigned DMA_MMR_SIZE_TYPE x_count;
+	DMA_MMR_SIZE_TYPE x_modify;
+	unsigned DMA_MMR_SIZE_TYPE y_count;
+	DMA_MMR_SIZE_TYPE y_modify;
 } __attribute__((packed));
 
 struct dma_register {
 	void *next_desc_ptr;	/* DMA Next Descriptor Pointer register */
 	unsigned long start_addr;	/* DMA Start address  register */
+#ifdef CONFIG_BF60x
+	unsigned long cfg;	/* DMA Configuration register */
 
+	unsigned long x_count;	/* DMA x_count register */
+
+	long x_modify;	/* DMA x_modify register */
+
+	unsigned long y_count;	/* DMA y_count register */
+
+	long y_modify;	/* DMA y_modify register */
+
+	unsigned long reserved;
+	unsigned long reserved2;
+
+	void *curr_desc_ptr;	/* DMA Current Descriptor Pointer
+					   register */
+	void *prev_desc_ptr;	/* DMA previous initial Descriptor Pointer
+					   register */
+	unsigned long curr_addr_ptr;	/* DMA Current Address Pointer
+						   register */
+	unsigned long irq_status;	/* DMA irq status register */
+
+	unsigned long curr_x_count;	/* DMA Current x-count register */
+
+	unsigned long curr_y_count;	/* DMA Current y-count register */
+
+	unsigned long reserved3;
+
+	unsigned long bw_limit_count;	/* DMA band width limit count register */
+	unsigned long curr_bw_limit_count;	/* DMA Current band width limit
+							count register */
+	unsigned long bw_monitor_count;	/* DMA band width limit count register */
+	unsigned long curr_bw_monitor_count;	/* DMA Current band width limit
+							count register */
+#else
 	unsigned short cfg;	/* DMA Configuration register */
 	unsigned short dummy1;	/* DMA Configuration register */
 
@@ -92,6 +156,7 @@
 	unsigned short dummy9;
 
 	unsigned long reserved3;
+#endif
 
 };
 
@@ -131,23 +196,23 @@
 {
 	dma_ch[channel].regs->curr_desc_ptr = addr;
 }
-static inline void set_dma_x_count(unsigned int channel, unsigned short x_count)
+static inline void set_dma_x_count(unsigned int channel, unsigned DMA_MMR_SIZE_TYPE x_count)
 {
 	dma_ch[channel].regs->x_count = x_count;
 }
-static inline void set_dma_y_count(unsigned int channel, unsigned short y_count)
+static inline void set_dma_y_count(unsigned int channel, unsigned DMA_MMR_SIZE_TYPE y_count)
 {
 	dma_ch[channel].regs->y_count = y_count;
 }
-static inline void set_dma_x_modify(unsigned int channel, short x_modify)
+static inline void set_dma_x_modify(unsigned int channel, DMA_MMR_SIZE_TYPE x_modify)
 {
 	dma_ch[channel].regs->x_modify = x_modify;
 }
-static inline void set_dma_y_modify(unsigned int channel, short y_modify)
+static inline void set_dma_y_modify(unsigned int channel, DMA_MMR_SIZE_TYPE y_modify)
 {
 	dma_ch[channel].regs->y_modify = y_modify;
 }
-static inline void set_dma_config(unsigned int channel, unsigned short config)
+static inline void set_dma_config(unsigned int channel, unsigned DMA_MMR_SIZE_TYPE config)
 {
 	dma_ch[channel].regs->cfg = config;
 }
@@ -156,23 +221,55 @@
 	dma_ch[channel].regs->curr_addr_ptr = addr;
 }
 
-static inline unsigned short
-set_bfin_dma_config(char direction, char flow_mode,
-		    char intr_mode, char dma_mode, char width, char syncmode)
+#ifdef CONFIG_BF60x
+static inline unsigned long
+set_bfin_dma_config2(char direction, char flow_mode, char intr_mode,
+		     char dma_mode, char mem_width, char syncmode, char peri_width)
 {
-	return (direction << 1) | (width << 2) | (dma_mode << 4) |
+	unsigned long config = 0;
+
+	switch (intr_mode) {
+	case INTR_ON_BUF:
+		if (dma_mode == DIMENSION_2D)
+			config = DI_EN_Y;
+		else
+			config = DI_EN_X;
+		break;
+	case INTR_ON_ROW:
+		config = DI_EN_X;
+		break;
+	case INTR_ON_PERI:
+		config = DI_EN_P;
+		break;
+	};
+
+	return config | (direction << 1) | (mem_width << 8) | (dma_mode << 26) |
+		(flow_mode << 12) | (syncmode << 2) | (peri_width << 4);
+}
+#endif
+
+static inline unsigned DMA_MMR_SIZE_TYPE
+set_bfin_dma_config(char direction, char flow_mode,
+		    char intr_mode, char dma_mode, char mem_width, char syncmode)
+{
+#ifdef CONFIG_BF60x
+	return set_bfin_dma_config2(direction, flow_mode, intr_mode, dma_mode,
+		mem_width, syncmode, mem_width);
+#else
+	return (direction << 1) | (mem_width << 2) | (dma_mode << 4) |
 		(intr_mode << 6) | (flow_mode << 12) | (syncmode << 5);
+#endif
 }
 
-static inline unsigned short get_dma_curr_irqstat(unsigned int channel)
+static inline unsigned DMA_MMR_SIZE_TYPE get_dma_curr_irqstat(unsigned int channel)
 {
 	return dma_ch[channel].regs->irq_status;
 }
-static inline unsigned short get_dma_curr_xcount(unsigned int channel)
+static inline unsigned DMA_MMR_SIZE_TYPE get_dma_curr_xcount(unsigned int channel)
 {
 	return dma_ch[channel].regs->curr_x_count;
 }
-static inline unsigned short get_dma_curr_ycount(unsigned int channel)
+static inline unsigned DMA_MMR_SIZE_TYPE get_dma_curr_ycount(unsigned int channel)
 {
 	return dma_ch[channel].regs->curr_y_count;
 }
@@ -184,7 +281,7 @@
 {
 	return dma_ch[channel].regs->curr_desc_ptr;
 }
-static inline unsigned short get_dma_config(unsigned int channel)
+static inline unsigned DMA_MMR_SIZE_TYPE get_dma_config(unsigned int channel)
 {
 	return dma_ch[channel].regs->cfg;
 }
@@ -203,8 +300,8 @@
 
 	dma_ch[channel].regs->next_desc_ptr = sg;
 	dma_ch[channel].regs->cfg =
-		(dma_ch[channel].regs->cfg & ~(0xf << 8)) |
-		((ndsize & 0xf) << 8);
+		(dma_ch[channel].regs->cfg & ~NDSIZE) |
+		((ndsize << NDSIZE_OFFSET) & NDSIZE);
 }
 
 static inline int dma_channel_active(unsigned int channel)
@@ -239,7 +336,7 @@
 }
 static inline void clear_dma_irqstat(unsigned int channel)
 {
-	dma_ch[channel].regs->irq_status = DMA_DONE | DMA_ERR;
+	dma_ch[channel].regs->irq_status = DMA_DONE | DMA_ERR | DMA_PIRQ;
 }
 
 void *dma_memcpy(void *dest, const void *src, size_t count);
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index c4ec959..e91eae8 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -9,6 +9,651 @@
 #ifndef _BLACKFIN_DPMC_H_
 #define _BLACKFIN_DPMC_H_
 
+#ifdef __ASSEMBLY__
+#define PM_REG0  R7
+#define PM_REG1  R6
+#define PM_REG2  R5
+#define PM_REG3  R4
+#define PM_REG4  R3
+#define PM_REG5  R2
+#define PM_REG6  R1
+#define PM_REG7  R0
+#define PM_REG8  P5
+#define PM_REG9  P4
+#define PM_REG10 P3
+#define PM_REG11 P2
+#define PM_REG12 P1
+#define PM_REG13 P0
+
+#define PM_REGSET0  R7:7
+#define PM_REGSET1  R7:6
+#define PM_REGSET2  R7:5
+#define PM_REGSET3  R7:4
+#define PM_REGSET4  R7:3
+#define PM_REGSET5  R7:2
+#define PM_REGSET6  R7:1
+#define PM_REGSET7  R7:0
+#define PM_REGSET8  R7:0, P5:5
+#define PM_REGSET9  R7:0, P5:4
+#define PM_REGSET10 R7:0, P5:3
+#define PM_REGSET11 R7:0, P5:2
+#define PM_REGSET12 R7:0, P5:1
+#define PM_REGSET13 R7:0, P5:0
+
+#define _PM_PUSH(n, x, w, base) PM_REG##n = w[FP + ((x) - (base))];
+#define _PM_POP(n, x, w, base)  w[FP + ((x) - (base))] = PM_REG##n;
+#define PM_PUSH_SYNC(n)         [--sp] = (PM_REGSET##n);
+#define PM_POP_SYNC(n)          (PM_REGSET##n) = [sp++];
+#define PM_PUSH(n, x)		PM_REG##n = [FP++];
+#define PM_POP(n, x)            [FP--] = PM_REG##n;
+#define PM_CORE_PUSH(n, x)      _PM_PUSH(n, x, , COREMMR_BASE)
+#define PM_CORE_POP(n, x)       _PM_POP(n, x, , COREMMR_BASE)
+#define PM_SYS_PUSH(n, x)       _PM_PUSH(n, x, , SYSMMR_BASE)
+#define PM_SYS_POP(n, x)        _PM_POP(n, x, , SYSMMR_BASE)
+#define PM_SYS_PUSH16(n, x)     _PM_PUSH(n, x, w, SYSMMR_BASE)
+#define PM_SYS_POP16(n, x)      _PM_POP(n, x, w, SYSMMR_BASE)
+
+	.macro bfin_init_pm_bench_cycles
+#ifdef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
+	R4 = 0;
+	CYCLES = R4;
+	CYCLES2 = R4;
+	R4 = SYSCFG;
+	BITSET(R4, 1);
+	SYSCFG = R4;
+#endif
+	.endm
+
+	.macro bfin_cpu_reg_save
+	/*
+	 * Save the core regs early so we can blow them away when
+	 * saving/restoring MMR states
+	 */
+	[--sp] = (R7:0, P5:0);
+	[--sp] = fp;
+	[--sp] = usp;
+
+	[--sp] = i0;
+	[--sp] = i1;
+	[--sp] = i2;
+	[--sp] = i3;
+
+	[--sp] = m0;
+	[--sp] = m1;
+	[--sp] = m2;
+	[--sp] = m3;
+
+	[--sp] = l0;
+	[--sp] = l1;
+	[--sp] = l2;
+	[--sp] = l3;
+
+	[--sp] = b0;
+	[--sp] = b1;
+	[--sp] = b2;
+	[--sp] = b3;
+	[--sp] = a0.x;
+	[--sp] = a0.w;
+	[--sp] = a1.x;
+	[--sp] = a1.w;
+
+	[--sp] = LC0;
+	[--sp] = LC1;
+	[--sp] = LT0;
+	[--sp] = LT1;
+	[--sp] = LB0;
+	[--sp] = LB1;
+
+	/* We can't push RETI directly as that'll change IPEND[4] */
+	r7 = RETI;
+	[--sp] = RETS;
+	[--sp] = ASTAT;
+#ifndef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
+	[--sp] = CYCLES;
+	[--sp] = CYCLES2;
+#endif
+	[--sp] = SYSCFG;
+	[--sp] = RETX;
+	[--sp] = SEQSTAT;
+	[--sp] = r7;
+
+	/* Save first func arg in M3 */
+	M3 = R0;
+	.endm
+
+	.macro bfin_cpu_reg_restore
+	/* Restore Core Registers */
+	RETI = [sp++];
+	SEQSTAT = [sp++];
+	RETX = [sp++];
+	SYSCFG = [sp++];
+#ifndef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
+	CYCLES2 = [sp++];
+	CYCLES = [sp++];
+#endif
+	ASTAT = [sp++];
+	RETS = [sp++];
+
+	LB1 = [sp++];
+	LB0 = [sp++];
+	LT1 = [sp++];
+	LT0 = [sp++];
+	LC1 = [sp++];
+	LC0 = [sp++];
+
+	a1.w = [sp++];
+	a1.x = [sp++];
+	a0.w = [sp++];
+	a0.x = [sp++];
+	b3 = [sp++];
+	b2 = [sp++];
+	b1 = [sp++];
+	b0 = [sp++];
+
+	l3 = [sp++];
+	l2 = [sp++];
+	l1 = [sp++];
+	l0 = [sp++];
+
+	m3 = [sp++];
+	m2 = [sp++];
+	m1 = [sp++];
+	m0 = [sp++];
+
+	i3 = [sp++];
+	i2 = [sp++];
+	i1 = [sp++];
+	i0 = [sp++];
+
+	usp = [sp++];
+	fp = [sp++];
+	(R7:0, P5:0) = [sp++];
+
+	.endm
+
+	.macro bfin_sys_mmr_save
+	/* Save system MMRs */
+	FP.H = hi(SYSMMR_BASE);
+	FP.L = lo(SYSMMR_BASE);
+#ifdef SIC_IMASK0
+	PM_SYS_PUSH(0, SIC_IMASK0)
+	PM_SYS_PUSH(1, SIC_IMASK1)
+# ifdef SIC_IMASK2
+	PM_SYS_PUSH(2, SIC_IMASK2)
+# endif
+#else
+# ifdef SIC_IMASK
+	PM_SYS_PUSH(0, SIC_IMASK)
+# endif
+#endif
+
+#ifdef SIC_IAR0
+	PM_SYS_PUSH(3, SIC_IAR0)
+	PM_SYS_PUSH(4, SIC_IAR1)
+	PM_SYS_PUSH(5, SIC_IAR2)
+#endif
+#ifdef SIC_IAR3
+	PM_SYS_PUSH(6, SIC_IAR3)
+#endif
+#ifdef SIC_IAR4
+	PM_SYS_PUSH(7, SIC_IAR4)
+	PM_SYS_PUSH(8, SIC_IAR5)
+	PM_SYS_PUSH(9, SIC_IAR6)
+#endif
+#ifdef SIC_IAR7
+	PM_SYS_PUSH(10, SIC_IAR7)
+#endif
+#ifdef SIC_IAR8
+	PM_SYS_PUSH(11, SIC_IAR8)
+	PM_SYS_PUSH(12, SIC_IAR9)
+	PM_SYS_PUSH(13, SIC_IAR10)
+#endif
+	PM_PUSH_SYNC(13)
+#ifdef SIC_IAR11
+	PM_SYS_PUSH(0, SIC_IAR11)
+#endif
+
+#ifdef SIC_IWR
+	PM_SYS_PUSH(1, SIC_IWR)
+#endif
+#ifdef SIC_IWR0
+	PM_SYS_PUSH(1, SIC_IWR0)
+#endif
+#ifdef SIC_IWR1
+	PM_SYS_PUSH(2, SIC_IWR1)
+#endif
+#ifdef SIC_IWR2
+	PM_SYS_PUSH(3, SIC_IWR2)
+#endif
+
+#ifdef PINT0_ASSIGN
+	PM_SYS_PUSH(4, PINT0_MASK_SET)
+	PM_SYS_PUSH(5, PINT1_MASK_SET)
+	PM_SYS_PUSH(6, PINT2_MASK_SET)
+	PM_SYS_PUSH(7, PINT3_MASK_SET)
+	PM_SYS_PUSH(8, PINT0_ASSIGN)
+	PM_SYS_PUSH(9, PINT1_ASSIGN)
+	PM_SYS_PUSH(10, PINT2_ASSIGN)
+	PM_SYS_PUSH(11, PINT3_ASSIGN)
+	PM_SYS_PUSH(12, PINT0_INVERT_SET)
+	PM_SYS_PUSH(13, PINT1_INVERT_SET)
+	PM_PUSH_SYNC(13)
+	PM_SYS_PUSH(0, PINT2_INVERT_SET)
+	PM_SYS_PUSH(1, PINT3_INVERT_SET)
+	PM_SYS_PUSH(2, PINT0_EDGE_SET)
+	PM_SYS_PUSH(3, PINT1_EDGE_SET)
+	PM_SYS_PUSH(4, PINT2_EDGE_SET)
+	PM_SYS_PUSH(5, PINT3_EDGE_SET)
+#endif
+
+#ifdef SYSCR
+	PM_SYS_PUSH16(6, SYSCR)
+#endif
+
+#ifdef EBIU_AMGCTL
+	PM_SYS_PUSH16(7, EBIU_AMGCTL)
+	PM_SYS_PUSH(8, EBIU_AMBCTL0)
+	PM_SYS_PUSH(9, EBIU_AMBCTL1)
+#endif
+#ifdef EBIU_FCTL
+	PM_SYS_PUSH(10, EBIU_MBSCTL)
+	PM_SYS_PUSH(11, EBIU_MODE)
+	PM_SYS_PUSH(12, EBIU_FCTL)
+	PM_PUSH_SYNC(12)
+#else
+	PM_PUSH_SYNC(9)
+#endif
+	.endm
+
+
+	.macro bfin_sys_mmr_restore
+/* Restore System MMRs */
+	FP.H = hi(SYSMMR_BASE);
+	FP.L = lo(SYSMMR_BASE);
+
+#ifdef EBIU_FCTL
+	PM_POP_SYNC(12)
+	PM_SYS_POP(12, EBIU_FCTL)
+	PM_SYS_POP(11, EBIU_MODE)
+	PM_SYS_POP(10, EBIU_MBSCTL)
+#else
+	PM_POP_SYNC(9)
+#endif
+
+#ifdef EBIU_AMBCTL
+	PM_SYS_POP(9, EBIU_AMBCTL1)
+	PM_SYS_POP(8, EBIU_AMBCTL0)
+	PM_SYS_POP16(7, EBIU_AMGCTL)
+#endif
+
+#ifdef SYSCR
+	PM_SYS_POP16(6, SYSCR)
+#endif
+
+#ifdef PINT0_ASSIGN
+	PM_SYS_POP(5, PINT3_EDGE_SET)
+	PM_SYS_POP(4, PINT2_EDGE_SET)
+	PM_SYS_POP(3, PINT1_EDGE_SET)
+	PM_SYS_POP(2, PINT0_EDGE_SET)
+	PM_SYS_POP(1, PINT3_INVERT_SET)
+	PM_SYS_POP(0, PINT2_INVERT_SET)
+	PM_POP_SYNC(13)
+	PM_SYS_POP(13, PINT1_INVERT_SET)
+	PM_SYS_POP(12, PINT0_INVERT_SET)
+	PM_SYS_POP(11, PINT3_ASSIGN)
+	PM_SYS_POP(10, PINT2_ASSIGN)
+	PM_SYS_POP(9, PINT1_ASSIGN)
+	PM_SYS_POP(8, PINT0_ASSIGN)
+	PM_SYS_POP(7, PINT3_MASK_SET)
+	PM_SYS_POP(6, PINT2_MASK_SET)
+	PM_SYS_POP(5, PINT1_MASK_SET)
+	PM_SYS_POP(4, PINT0_MASK_SET)
+#endif
+
+#ifdef SIC_IWR2
+	PM_SYS_POP(3, SIC_IWR2)
+#endif
+#ifdef SIC_IWR1
+	PM_SYS_POP(2, SIC_IWR1)
+#endif
+#ifdef SIC_IWR0
+	PM_SYS_POP(1, SIC_IWR0)
+#endif
+#ifdef SIC_IWR
+	PM_SYS_POP(1, SIC_IWR)
+#endif
+
+#ifdef SIC_IAR11
+	PM_SYS_POP(0, SIC_IAR11)
+#endif
+	PM_POP_SYNC(13)
+#ifdef SIC_IAR8
+	PM_SYS_POP(13, SIC_IAR10)
+	PM_SYS_POP(12, SIC_IAR9)
+	PM_SYS_POP(11, SIC_IAR8)
+#endif
+#ifdef SIC_IAR7
+	PM_SYS_POP(10, SIC_IAR7)
+#endif
+#ifdef SIC_IAR6
+	PM_SYS_POP(9, SIC_IAR6)
+	PM_SYS_POP(8, SIC_IAR5)
+	PM_SYS_POP(7, SIC_IAR4)
+#endif
+#ifdef SIC_IAR3
+	PM_SYS_POP(6, SIC_IAR3)
+#endif
+#ifdef SIC_IAR0
+	PM_SYS_POP(5, SIC_IAR2)
+	PM_SYS_POP(4, SIC_IAR1)
+	PM_SYS_POP(3, SIC_IAR0)
+#endif
+#ifdef SIC_IMASK0
+# ifdef SIC_IMASK2
+	PM_SYS_POP(2, SIC_IMASK2)
+# endif
+	PM_SYS_POP(1, SIC_IMASK1)
+	PM_SYS_POP(0, SIC_IMASK0)
+#else
+# ifdef SIC_IMASK
+	PM_SYS_POP(0, SIC_IMASK)
+# endif
+#endif
+	.endm
+
+	.macro bfin_core_mmr_save
+	/* Save Core MMRs */
+	I0.H = hi(COREMMR_BASE);
+	I0.L = lo(COREMMR_BASE);
+	I1 = I0;
+	I2 = I0;
+	I3 = I0;
+	B0 = I0;
+	B1 = I0;
+	B2 = I0;
+	B3 = I0;
+	I1.L = lo(DCPLB_ADDR0);
+	I2.L = lo(DCPLB_DATA0);
+	I3.L = lo(ICPLB_ADDR0);
+	B0.L = lo(ICPLB_DATA0);
+	B1.L = lo(EVT2);
+	B2.L = lo(IMASK);
+	B3.L = lo(TCNTL);
+
+	/* Event Vectors */
+	FP = B1;
+	PM_PUSH(0, EVT2)
+	PM_PUSH(1, EVT3)
+	FP += 4;	/* EVT4 */
+	PM_PUSH(2, EVT5)
+	PM_PUSH(3, EVT6)
+	PM_PUSH(4, EVT7)
+	PM_PUSH(5, EVT8)
+	PM_PUSH_SYNC(5)
+
+	PM_PUSH(0, EVT9)
+	PM_PUSH(1, EVT10)
+	PM_PUSH(2, EVT11)
+	PM_PUSH(3, EVT12)
+	PM_PUSH(4, EVT13)
+	PM_PUSH(5, EVT14)
+	PM_PUSH(6, EVT15)
+
+	/* CEC */
+	FP = B2;
+	PM_PUSH(7, IMASK)
+	FP += 4;	/* IPEND */
+	PM_PUSH(8, ILAT)
+	PM_PUSH(9, IPRIO)
+
+	/* Core Timer */
+	FP = B3;
+	PM_PUSH(10, TCNTL)
+	PM_PUSH(11, TPERIOD)
+	PM_PUSH(12, TSCALE)
+	PM_PUSH(13, TCOUNT)
+	PM_PUSH_SYNC(13)
+
+	/* Misc non-contiguous registers */
+	FP = I0;
+	PM_CORE_PUSH(0, DMEM_CONTROL);
+	PM_CORE_PUSH(1, IMEM_CONTROL);
+	PM_CORE_PUSH(2, TBUFCTL);
+	PM_PUSH_SYNC(2)
+
+	/* DCPLB Addr */
+	FP = I1;
+	PM_PUSH(0, DCPLB_ADDR0)
+	PM_PUSH(1, DCPLB_ADDR1)
+	PM_PUSH(2, DCPLB_ADDR2)
+	PM_PUSH(3, DCPLB_ADDR3)
+	PM_PUSH(4, DCPLB_ADDR4)
+	PM_PUSH(5, DCPLB_ADDR5)
+	PM_PUSH(6, DCPLB_ADDR6)
+	PM_PUSH(7, DCPLB_ADDR7)
+	PM_PUSH(8, DCPLB_ADDR8)
+	PM_PUSH(9, DCPLB_ADDR9)
+	PM_PUSH(10, DCPLB_ADDR10)
+	PM_PUSH(11, DCPLB_ADDR11)
+	PM_PUSH(12, DCPLB_ADDR12)
+	PM_PUSH(13, DCPLB_ADDR13)
+	PM_PUSH_SYNC(13)
+	PM_PUSH(0, DCPLB_ADDR14)
+	PM_PUSH(1, DCPLB_ADDR15)
+
+	/* DCPLB Data */
+	FP = I2;
+	PM_PUSH(2, DCPLB_DATA0)
+	PM_PUSH(3, DCPLB_DATA1)
+	PM_PUSH(4, DCPLB_DATA2)
+	PM_PUSH(5, DCPLB_DATA3)
+	PM_PUSH(6, DCPLB_DATA4)
+	PM_PUSH(7, DCPLB_DATA5)
+	PM_PUSH(8, DCPLB_DATA6)
+	PM_PUSH(9, DCPLB_DATA7)
+	PM_PUSH(10, DCPLB_DATA8)
+	PM_PUSH(11, DCPLB_DATA9)
+	PM_PUSH(12, DCPLB_DATA10)
+	PM_PUSH(13, DCPLB_DATA11)
+	PM_PUSH_SYNC(13)
+	PM_PUSH(0, DCPLB_DATA12)
+	PM_PUSH(1, DCPLB_DATA13)
+	PM_PUSH(2, DCPLB_DATA14)
+	PM_PUSH(3, DCPLB_DATA15)
+
+	/* ICPLB Addr */
+	FP = I3;
+	PM_PUSH(4, ICPLB_ADDR0)
+	PM_PUSH(5, ICPLB_ADDR1)
+	PM_PUSH(6, ICPLB_ADDR2)
+	PM_PUSH(7, ICPLB_ADDR3)
+	PM_PUSH(8, ICPLB_ADDR4)
+	PM_PUSH(9, ICPLB_ADDR5)
+	PM_PUSH(10, ICPLB_ADDR6)
+	PM_PUSH(11, ICPLB_ADDR7)
+	PM_PUSH(12, ICPLB_ADDR8)
+	PM_PUSH(13, ICPLB_ADDR9)
+	PM_PUSH_SYNC(13)
+	PM_PUSH(0, ICPLB_ADDR10)
+	PM_PUSH(1, ICPLB_ADDR11)
+	PM_PUSH(2, ICPLB_ADDR12)
+	PM_PUSH(3, ICPLB_ADDR13)
+	PM_PUSH(4, ICPLB_ADDR14)
+	PM_PUSH(5, ICPLB_ADDR15)
+
+	/* ICPLB Data */
+	FP = B0;
+	PM_PUSH(6, ICPLB_DATA0)
+	PM_PUSH(7, ICPLB_DATA1)
+	PM_PUSH(8, ICPLB_DATA2)
+	PM_PUSH(9, ICPLB_DATA3)
+	PM_PUSH(10, ICPLB_DATA4)
+	PM_PUSH(11, ICPLB_DATA5)
+	PM_PUSH(12, ICPLB_DATA6)
+	PM_PUSH(13, ICPLB_DATA7)
+	PM_PUSH_SYNC(13)
+	PM_PUSH(0, ICPLB_DATA8)
+	PM_PUSH(1, ICPLB_DATA9)
+	PM_PUSH(2, ICPLB_DATA10)
+	PM_PUSH(3, ICPLB_DATA11)
+	PM_PUSH(4, ICPLB_DATA12)
+	PM_PUSH(5, ICPLB_DATA13)
+	PM_PUSH(6, ICPLB_DATA14)
+	PM_PUSH(7, ICPLB_DATA15)
+	PM_PUSH_SYNC(7)
+	.endm
+
+	.macro bfin_core_mmr_restore
+	/* Restore Core MMRs */
+	I0.H = hi(COREMMR_BASE);
+	I0.L = lo(COREMMR_BASE);
+	I1 = I0;
+	I2 = I0;
+	I3 = I0;
+	B0 = I0;
+	B1 = I0;
+	B2 = I0;
+	B3 = I0;
+	I1.L = lo(DCPLB_ADDR15);
+	I2.L = lo(DCPLB_DATA15);
+	I3.L = lo(ICPLB_ADDR15);
+	B0.L = lo(ICPLB_DATA15);
+	B1.L = lo(EVT15);
+	B2.L = lo(IPRIO);
+	B3.L = lo(TCOUNT);
+
+	/* ICPLB Data */
+	FP = B0;
+	PM_POP_SYNC(7)
+	PM_POP(7, ICPLB_DATA15)
+	PM_POP(6, ICPLB_DATA14)
+	PM_POP(5, ICPLB_DATA13)
+	PM_POP(4, ICPLB_DATA12)
+	PM_POP(3, ICPLB_DATA11)
+	PM_POP(2, ICPLB_DATA10)
+	PM_POP(1, ICPLB_DATA9)
+	PM_POP(0, ICPLB_DATA8)
+	PM_POP_SYNC(13)
+	PM_POP(13, ICPLB_DATA7)
+	PM_POP(12, ICPLB_DATA6)
+	PM_POP(11, ICPLB_DATA5)
+	PM_POP(10, ICPLB_DATA4)
+	PM_POP(9, ICPLB_DATA3)
+	PM_POP(8, ICPLB_DATA2)
+	PM_POP(7, ICPLB_DATA1)
+	PM_POP(6, ICPLB_DATA0)
+
+	/* ICPLB Addr */
+	FP = I3;
+	PM_POP(5, ICPLB_ADDR15)
+	PM_POP(4, ICPLB_ADDR14)
+	PM_POP(3, ICPLB_ADDR13)
+	PM_POP(2, ICPLB_ADDR12)
+	PM_POP(1, ICPLB_ADDR11)
+	PM_POP(0, ICPLB_ADDR10)
+	PM_POP_SYNC(13)
+	PM_POP(13, ICPLB_ADDR9)
+	PM_POP(12, ICPLB_ADDR8)
+	PM_POP(11, ICPLB_ADDR7)
+	PM_POP(10, ICPLB_ADDR6)
+	PM_POP(9, ICPLB_ADDR5)
+	PM_POP(8, ICPLB_ADDR4)
+	PM_POP(7, ICPLB_ADDR3)
+	PM_POP(6, ICPLB_ADDR2)
+	PM_POP(5, ICPLB_ADDR1)
+	PM_POP(4, ICPLB_ADDR0)
+
+	/* DCPLB Data */
+	FP = I2;
+	PM_POP(3, DCPLB_DATA15)
+	PM_POP(2, DCPLB_DATA14)
+	PM_POP(1, DCPLB_DATA13)
+	PM_POP(0, DCPLB_DATA12)
+	PM_POP_SYNC(13)
+	PM_POP(13, DCPLB_DATA11)
+	PM_POP(12, DCPLB_DATA10)
+	PM_POP(11, DCPLB_DATA9)
+	PM_POP(10, DCPLB_DATA8)
+	PM_POP(9, DCPLB_DATA7)
+	PM_POP(8, DCPLB_DATA6)
+	PM_POP(7, DCPLB_DATA5)
+	PM_POP(6, DCPLB_DATA4)
+	PM_POP(5, DCPLB_DATA3)
+	PM_POP(4, DCPLB_DATA2)
+	PM_POP(3, DCPLB_DATA1)
+	PM_POP(2, DCPLB_DATA0)
+
+	/* DCPLB Addr */
+	FP = I1;
+	PM_POP(1, DCPLB_ADDR15)
+	PM_POP(0, DCPLB_ADDR14)
+	PM_POP_SYNC(13)
+	PM_POP(13, DCPLB_ADDR13)
+	PM_POP(12, DCPLB_ADDR12)
+	PM_POP(11, DCPLB_ADDR11)
+	PM_POP(10, DCPLB_ADDR10)
+	PM_POP(9, DCPLB_ADDR9)
+	PM_POP(8, DCPLB_ADDR8)
+	PM_POP(7, DCPLB_ADDR7)
+	PM_POP(6, DCPLB_ADDR6)
+	PM_POP(5, DCPLB_ADDR5)
+	PM_POP(4, DCPLB_ADDR4)
+	PM_POP(3, DCPLB_ADDR3)
+	PM_POP(2, DCPLB_ADDR2)
+	PM_POP(1, DCPLB_ADDR1)
+	PM_POP(0, DCPLB_ADDR0)
+
+
+	/* Misc non-contiguous registers */
+
+	/* icache & dcache will enable later 
+	   drop IMEM_CONTROL, DMEM_CONTROL pop
+	*/
+	FP = I0;
+	PM_POP_SYNC(2)
+	PM_CORE_POP(2, TBUFCTL)
+	PM_CORE_POP(1, IMEM_CONTROL)
+	PM_CORE_POP(0, DMEM_CONTROL)
+
+	/* Core Timer */
+	FP = B3;
+	R0 = 0x1;
+	[FP - 0xC] = R0;
+
+	PM_POP_SYNC(13)
+	FP = B3;
+	PM_POP(13, TCOUNT)
+	PM_POP(12, TSCALE)
+	PM_POP(11, TPERIOD)
+	PM_POP(10, TCNTL)
+
+	/* CEC */
+	FP = B2;
+	PM_POP(9, IPRIO)
+	PM_POP(8, ILAT)
+	FP += -4;	/* IPEND */
+	PM_POP(7, IMASK)
+
+	/* Event Vectors */
+	FP = B1;
+	PM_POP(6, EVT15)
+	PM_POP(5, EVT14)
+	PM_POP(4, EVT13)
+	PM_POP(3, EVT12)
+	PM_POP(2, EVT11)
+	PM_POP(1, EVT10)
+	PM_POP(0, EVT9)
+	PM_POP_SYNC(5)
+	PM_POP(5, EVT8)
+	PM_POP(4, EVT7)
+	PM_POP(3, EVT6)
+	PM_POP(2, EVT5)
+	FP += -4;	/* EVT4 */
+	PM_POP(1, EVT3)
+	PM_POP(0, EVT2)
+	.endm
+#endif
+
 #include <mach/pll.h>
 
 /* PLL_CTL Masks */
@@ -98,6 +743,16 @@
 #define VLEV_130		0x00F0	/* VLEV = 1.30 V (-5% - +10% Accuracy) */
 #endif
 
+#ifdef CONFIG_BF60x
+#define PA15WE			0x00000001 /* Allow Wake-Up from PA15 */
+#define PB15WE			0x00000002 /* Allow Wake-Up from PB15 */
+#define PC15WE			0x00000004 /* Allow Wake-Up from PC15 */
+#define PD06WE			0x00000008 /* Allow Wake-Up from PD06(ETH0_PHYINT) */
+#define PE12WE			0x00000010 /* Allow Wake-Up from PE12(ETH1_PHYINT, PUSH BUTTON) */
+#define PG04WE			0x00000020 /* Allow Wake-Up from PG04(CAN0_RX) */
+#define PG13WE			0x00000040 /* Allow Wake-Up from PG13 */
+#define USBWE			0x00000080 /* Allow Wake-Up from (USB) */
+#else
 #define WAKE			0x0100	/* Enable RTC/Reset Wakeup From Hibernate */
 #define CANWE			0x0200	/* Enable CAN Wakeup From Hibernate */
 #define PHYWE			0x0400	/* Enable PHY Wakeup From Hibernate */
@@ -113,6 +768,7 @@
 #else
 #define USBWE			0x0800	/* Enable USB Wakeup From Hibernate */
 #endif
+#endif
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/blackfin/include/asm/fixed_code.h b/arch/blackfin/include/asm/fixed_code.h
index 73fe53e..5395088 100644
--- a/arch/blackfin/include/asm/fixed_code.h
+++ b/arch/blackfin/include/asm/fixed_code.h
@@ -29,24 +29,28 @@
 #endif
 #endif
 
-#define FIXED_CODE_START	0x400
+#ifndef CONFIG_PHY_RAM_BASE_ADDRESS
+#define CONFIG_PHY_RAM_BASE_ADDRESS	0x0
+#endif
 
-#define SIGRETURN_STUB		0x400
+#define FIXED_CODE_START	(CONFIG_PHY_RAM_BASE_ADDRESS + 0x400)
 
-#define ATOMIC_SEQS_START	0x410
+#define SIGRETURN_STUB		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x400)
 
-#define ATOMIC_XCHG32		0x410
-#define ATOMIC_CAS32		0x420
-#define ATOMIC_ADD32		0x430
-#define ATOMIC_SUB32		0x440
-#define ATOMIC_IOR32		0x450
-#define ATOMIC_AND32		0x460
-#define ATOMIC_XOR32		0x470
+#define ATOMIC_SEQS_START	(CONFIG_PHY_RAM_BASE_ADDRESS + 0x410)
 
-#define ATOMIC_SEQS_END		0x480
+#define ATOMIC_XCHG32		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x410)
+#define ATOMIC_CAS32		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x420)
+#define ATOMIC_ADD32		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x430)
+#define ATOMIC_SUB32		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x440)
+#define ATOMIC_IOR32		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x450)
+#define ATOMIC_AND32		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x460)
+#define ATOMIC_XOR32		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x470)
 
-#define SAFE_USER_INSTRUCTION   0x480
+#define ATOMIC_SEQS_END		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x480)
 
-#define FIXED_CODE_END		0x490
+#define SAFE_USER_INSTRUCTION   (CONFIG_PHY_RAM_BASE_ADDRESS + 0x480)
+
+#define FIXED_CODE_END		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x490)
 
 #endif
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 12d3571..3d84d96 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -26,6 +26,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
+#include <linux/gpio.h>
 
 /***********************************************************
 *
@@ -244,6 +245,49 @@
 	return -EINVAL;
 }
 
+static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
+{
+	int err;
+
+	err = bfin_gpio_request(gpio, label);
+	if (err)
+		return err;
+
+	if (flags & GPIOF_DIR_IN)
+		err = bfin_gpio_direction_input(gpio);
+	else
+		err = bfin_gpio_direction_output(gpio,
+			(flags & GPIOF_INIT_HIGH) ? 1 : 0);
+
+	if (err)
+		bfin_gpio_free(gpio);
+
+	return err;
+}
+
+static inline int gpio_request_array(const struct gpio *array, size_t num)
+{
+	int i, err;
+
+	for (i = 0; i < num; i++, array++) {
+		err = gpio_request_one(array->gpio, array->flags, array->label);
+		if (err)
+			goto err_free;
+	}
+	return 0;
+
+err_free:
+	while (i--)
+		bfin_gpio_free((--array)->gpio);
+	return err;
+}
+
+static inline void gpio_free_array(const struct gpio *array, size_t num)
+{
+	while (num--)
+		bfin_gpio_free((array++)->gpio);
+}
+
 static inline int __gpio_get_value(unsigned gpio)
 {
 	return bfin_gpio_get_value(gpio);
diff --git a/arch/blackfin/include/asm/gptimers.h b/arch/blackfin/include/asm/gptimers.h
index 38bddcb..381e3d6 100644
--- a/arch/blackfin/include/asm/gptimers.h
+++ b/arch/blackfin/include/asm/gptimers.h
@@ -44,6 +44,13 @@
 # define TIMER_GROUP2          1
 #endif
 /*
+ * BF609: 8 timers:
+ */
+#if defined(CONFIG_BF60x)
+# define MAX_BLACKFIN_GPTIMERS 8
+# define TIMER0_GROUP_REG     TIMER_RUN
+#endif
+/*
  * All others: 3 timers:
  */
 #define TIMER_GROUP1           0
@@ -104,6 +111,72 @@
 # define FS2_TIMER_BIT TIMER1bit
 #endif
 
+#ifdef CONFIG_BF60x
+/*
+ * Timer Configuration Register Bits
+ */
+#define TIMER_EMU_RUN       0x8000
+#define TIMER_BPER_EN       0x4000
+#define TIMER_BWID_EN       0x2000
+#define TIMER_BDLY_EN       0x1000
+#define TIMER_OUT_DIS       0x0800
+#define TIMER_TIN_SEL       0x0400
+#define TIMER_CLK_SEL       0x0300
+#define TIMER_CLK_SCLK      0x0000
+#define TIMER_CLK_ALT_CLK0  0x0100
+#define TIMER_CLK_ALT_CLK1  0x0300
+#define TIMER_PULSE_HI 	    0x0080
+#define TIMER_SLAVE_TRIG    0x0040
+#define TIMER_IRQ_MODE      0x0030
+#define TIMER_IRQ_ACT_EDGE  0x0000
+#define TIMER_IRQ_DLY       0x0010
+#define TIMER_IRQ_WID_DLY   0x0020
+#define TIMER_IRQ_PER       0x0030
+#define TIMER_MODE          0x000f
+#define TIMER_MODE_WDOG_P   0x0008
+#define TIMER_MODE_WDOG_W   0x0009
+#define TIMER_MODE_PWM_CONT 0x000c
+#define TIMER_MODE_PWM      0x000d
+#define TIMER_MODE_WDTH     0x000a
+#define TIMER_MODE_WDTH_D   0x000b
+#define TIMER_MODE_EXT_CLK  0x000e
+#define TIMER_MODE_PININT   0x000f
+
+/*
+ * Timer Status Register Bits
+ */
+#define TIMER_STATUS_TIMIL0  0x0001
+#define TIMER_STATUS_TIMIL1  0x0002
+#define TIMER_STATUS_TIMIL2  0x0004
+#define TIMER_STATUS_TIMIL3  0x0008
+#define TIMER_STATUS_TIMIL4  0x0010
+#define TIMER_STATUS_TIMIL5  0x0020
+#define TIMER_STATUS_TIMIL6  0x0040
+#define TIMER_STATUS_TIMIL7  0x0080
+
+#define TIMER_STATUS_TOVF0   0x0001	/* timer 0 overflow error */
+#define TIMER_STATUS_TOVF1   0x0002
+#define TIMER_STATUS_TOVF2   0x0004
+#define TIMER_STATUS_TOVF3   0x0008
+#define TIMER_STATUS_TOVF4   0x0010
+#define TIMER_STATUS_TOVF5   0x0020
+#define TIMER_STATUS_TOVF6   0x0040
+#define TIMER_STATUS_TOVF7   0x0080
+
+/*
+ * Timer Slave Enable Status : write 1 to clear
+ */
+#define TIMER_STATUS_TRUN0  0x0001
+#define TIMER_STATUS_TRUN1  0x0002
+#define TIMER_STATUS_TRUN2  0x0004
+#define TIMER_STATUS_TRUN3  0x0008
+#define TIMER_STATUS_TRUN4  0x0010
+#define TIMER_STATUS_TRUN5  0x0020
+#define TIMER_STATUS_TRUN6  0x0040
+#define TIMER_STATUS_TRUN7  0x0080
+
+#else
+
 /*
  * Timer Configuration Register Bits
  */
@@ -170,12 +243,18 @@
 #define TIMER_STATUS_TRUN10 0x4000
 #define TIMER_STATUS_TRUN11 0x8000
 
+#endif
+
 /* The actual gptimer API */
 
 void     set_gptimer_pwidth(unsigned int timer_id, uint32_t width);
 uint32_t get_gptimer_pwidth(unsigned int timer_id);
 void     set_gptimer_period(unsigned int timer_id, uint32_t period);
 uint32_t get_gptimer_period(unsigned int timer_id);
+#ifdef CONFIG_BF60x
+void     set_gptimer_delay(unsigned int timer_id, uint32_t delay);
+uint32_t get_gptimer_delay(unsigned int timer_id);
+#endif
 uint32_t get_gptimer_count(unsigned int timer_id);
 int      get_gptimer_intr(unsigned int timer_id);
 void     clear_gptimer_intr(unsigned int timer_id);
@@ -217,16 +296,41 @@
 	u32 counter;
 	u32 period;
 	u32 width;
+#ifdef CONFIG_BF60x
+	u32 delay;
+#endif
 };
 
 /*
  * bfin group timer registers layout
  */
+#ifndef CONFIG_BF60x
 struct bfin_gptimer_group_regs {
 	__BFP(enable);
 	__BFP(disable);
 	u32 status;
 };
+#else
+struct bfin_gptimer_group_regs {
+	__BFP(run);
+	__BFP(enable);
+	__BFP(disable);
+	__BFP(stop_cfg);
+	__BFP(stop_cfg_set);
+	__BFP(stop_cfg_clr);
+	__BFP(data_imsk);
+	__BFP(stat_imsk);
+	__BFP(tr_msk);
+	__BFP(tr_ie);
+	__BFP(data_ilat);
+	__BFP(stat_ilat);
+	__BFP(err_status);
+	__BFP(bcast_per);
+	__BFP(bcast_wid);
+	__BFP(bcast_dly);
+
+};
+#endif
 
 #undef __BFP
 
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 43eb474..07aff23 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -67,7 +67,11 @@
 
 static inline notrace int hard_irqs_disabled_flags(unsigned long flags)
 {
+#ifdef CONFIG_BF60x
+	return (flags & IMASK_IVG11) == 0;
+#else
 	return (flags & ~0x3f) == 0;
+#endif
 }
 
 static inline notrace int hard_irqs_disabled(void)
@@ -224,7 +228,7 @@
  * Direct interface to linux/irqflags.h.
  */
 #define arch_local_save_flags()		hard_local_save_flags()
-#define arch_local_irq_save(flags)	__hard_local_irq_save()
+#define arch_local_irq_save()		__hard_local_irq_save()
 #define arch_local_irq_restore(flags)	__hard_local_irq_restore(flags)
 #define arch_local_irq_enable()		__hard_local_irq_enable()
 #define arch_local_irq_disable()	__hard_local_irq_disable()
diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
index 7202404..b93474d 100644
--- a/arch/blackfin/include/asm/page.h
+++ b/arch/blackfin/include/asm/page.h
@@ -7,14 +7,15 @@
 #ifndef _BLACKFIN_PAGE_H
 #define _BLACKFIN_PAGE_H
 
-#include <asm-generic/page.h>
-#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
+#define ARCH_PFN_OFFSET (CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT)
+#define MAP_NR(addr) ((unsigned long)(addr) >> PAGE_SHIFT)
 
 #define VM_DATA_DEFAULT_FLAGS \
 	(VM_READ | VM_WRITE | \
 	((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
 		 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
+#include <asm-generic/page.h>
 #include <asm-generic/memory_model.h>
 #include <asm-generic/getorder.h>
 
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h
index 28c2498..68d6f66 100644
--- a/arch/blackfin/include/asm/pda.h
+++ b/arch/blackfin/include/asm/pda.h
@@ -13,7 +13,9 @@
 #ifndef __ASSEMBLY__
 
 struct blackfin_pda {			/* Per-processor Data Area */
+#ifdef CONFIG_SMP
 	struct blackfin_pda *next;
+#endif
 
 	unsigned long syscfg;
 #ifdef CONFIG_SMP
diff --git a/arch/blackfin/include/asm/pm.h b/arch/blackfin/include/asm/pm.h
new file mode 100644
index 0000000..f72239b
--- /dev/null
+++ b/arch/blackfin/include/asm/pm.h
@@ -0,0 +1,31 @@
+/*
+ * Blackfin bf609 power management
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2
+ */
+
+#ifndef __PM_H__
+#define __PM_H__
+
+#include <linux/suspend.h>
+
+struct bfin_cpu_pm_fns {
+	void    (*save)(unsigned long *);
+	void    (*restore)(unsigned long *);
+	int     (*valid)(suspend_state_t state);
+	void    (*enter)(suspend_state_t state);
+	int     (*prepare)(void);
+	void    (*finish)(void);
+};
+
+extern struct bfin_cpu_pm_fns *bfin_cpu_pm;
+
+# ifdef CONFIG_BFIN_COREB
+void bfin_coreb_start(void);
+void bfin_coreb_stop(void);
+void bfin_coreb_reset(void);
+# endif
+
+#endif
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index 75ec9df..3287222 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -11,7 +11,7 @@
  */
 #define __NR_restart_syscall	  0
 #define __NR_exit		  1
-#define __NR_fork		  2
+				/* 2 __NR_fork not supported on nommu */
 #define __NR_read		  3
 #define __NR_write		  4
 #define __NR_open		  5
diff --git a/arch/blackfin/kernel/bfin_dma.c b/arch/blackfin/kernel/bfin_dma.c
index 40c2ed6..c166939 100644
--- a/arch/blackfin/kernel/bfin_dma.c
+++ b/arch/blackfin/kernel/bfin_dma.c
@@ -45,9 +45,15 @@
 		atomic_set(&dma_ch[i].chan_status, 0);
 		dma_ch[i].regs = dma_io_base_addr[i];
 	}
+#ifdef CH_MEM_STREAM3_SRC
+	/* Mark MEMDMA Channel 3 as requested since we're using it internally */
+	request_dma(CH_MEM_STREAM3_DEST, "Blackfin dma_memcpy");
+	request_dma(CH_MEM_STREAM3_SRC, "Blackfin dma_memcpy");
+#else
 	/* Mark MEMDMA Channel 0 as requested since we're using it internally */
 	request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
 	request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");
+#endif
 
 #if defined(CONFIG_DEB_DMA_URGENT)
 	bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
@@ -84,7 +90,8 @@
 
 static int __init proc_dma_init(void)
 {
-	return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL;
+	proc_create("dma", 0, NULL, &proc_dma_operations);
+	return 0;
 }
 late_initcall(proc_dma_init);
 #endif
@@ -204,6 +211,7 @@
 # ifndef MAX_DMA_SUSPEND_CHANNELS
 #  define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS
 # endif
+# ifndef CONFIG_BF60x
 int blackfin_dma_suspend(void)
 {
 	int i;
@@ -213,7 +221,6 @@
 			printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
 			return -EBUSY;
 		}
-
 		if (i < MAX_DMA_SUSPEND_CHANNELS)
 			dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
 	}
@@ -230,7 +237,6 @@
 
 	for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
 		dma_ch[i].regs->cfg = 0;
-
 		if (i < MAX_DMA_SUSPEND_CHANNELS)
 			dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
 	}
@@ -238,6 +244,16 @@
 	bfin_write_DMAC_TC_PER(0x0111);
 #endif
 }
+# else
+int blackfin_dma_suspend(void)
+{
+	return 0;
+}
+
+void blackfin_dma_resume(void)
+{
+}
+#endif
 #endif
 
 /**
@@ -279,10 +295,10 @@
 			src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
 		}
 
-		if (!bfin_read16(&src_ch->cfg))
+		if (!DMA_MMR_READ(&src_ch->cfg))
 			break;
-		else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) {
-			bfin_write16(&src_ch->cfg, 0);
+		else if (DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE) {
+			DMA_MMR_WRITE(&src_ch->cfg, 0);
 			break;
 		}
 	}
@@ -295,22 +311,31 @@
 
 	/* Destination */
 	bfin_write32(&dst_ch->start_addr, dst);
-	bfin_write16(&dst_ch->x_count, size >> 2);
-	bfin_write16(&dst_ch->x_modify, 1 << 2);
-	bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
+	DMA_MMR_WRITE(&dst_ch->x_count, size >> 2);
+	DMA_MMR_WRITE(&dst_ch->x_modify, 1 << 2);
+	DMA_MMR_WRITE(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
 
 	/* Source */
 	bfin_write32(&src_ch->start_addr, src);
-	bfin_write16(&src_ch->x_count, size >> 2);
-	bfin_write16(&src_ch->x_modify, 1 << 2);
-	bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);
+	DMA_MMR_WRITE(&src_ch->x_count, size >> 2);
+	DMA_MMR_WRITE(&src_ch->x_modify, 1 << 2);
+	DMA_MMR_WRITE(&src_ch->irq_status, DMA_DONE | DMA_ERR);
 
 	/* Enable */
-	bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
-	bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);
+	DMA_MMR_WRITE(&src_ch->cfg, DMAEN | WDSIZE_32);
+	DMA_MMR_WRITE(&dst_ch->cfg, WNR | DI_EN_X | DMAEN | WDSIZE_32);
 
 	/* Since we are atomic now, don't use the workaround ssync */
 	__builtin_bfin_ssync();
+
+#ifdef CONFIG_BF60x
+	/* Work around a possible MDMA anomaly. Running 2 MDMA channels to
+	 * transfer DDR data to L1 SRAM may corrupt data.
+	 * Should be reverted after this issue is root caused.
+	 */
+	while (!(DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE))
+		continue;
+#endif
 }
 
 void __init early_dma_memcpy_done(void)
@@ -336,6 +361,42 @@
 	__builtin_bfin_ssync();
 }
 
+#ifdef CH_MEM_STREAM3_SRC
+#define bfin_read_MDMA_S_CONFIG bfin_read_MDMA_S3_CONFIG
+#define bfin_write_MDMA_S_CONFIG bfin_write_MDMA_S3_CONFIG
+#define bfin_write_MDMA_S_START_ADDR bfin_write_MDMA_S3_START_ADDR
+#define bfin_write_MDMA_S_IRQ_STATUS bfin_write_MDMA_S3_IRQ_STATUS
+#define bfin_write_MDMA_S_X_COUNT bfin_write_MDMA_S3_X_COUNT
+#define bfin_write_MDMA_S_X_MODIFY bfin_write_MDMA_S3_X_MODIFY
+#define bfin_write_MDMA_S_Y_COUNT bfin_write_MDMA_S3_Y_COUNT
+#define bfin_write_MDMA_S_Y_MODIFY bfin_write_MDMA_S3_Y_MODIFY
+#define bfin_write_MDMA_D_CONFIG bfin_write_MDMA_D3_CONFIG
+#define bfin_write_MDMA_D_START_ADDR bfin_write_MDMA_D3_START_ADDR
+#define bfin_read_MDMA_D_IRQ_STATUS bfin_read_MDMA_D3_IRQ_STATUS
+#define bfin_write_MDMA_D_IRQ_STATUS bfin_write_MDMA_D3_IRQ_STATUS
+#define bfin_write_MDMA_D_X_COUNT bfin_write_MDMA_D3_X_COUNT
+#define bfin_write_MDMA_D_X_MODIFY bfin_write_MDMA_D3_X_MODIFY
+#define bfin_write_MDMA_D_Y_COUNT bfin_write_MDMA_D3_Y_COUNT
+#define bfin_write_MDMA_D_Y_MODIFY bfin_write_MDMA_D3_Y_MODIFY
+#else
+#define bfin_read_MDMA_S_CONFIG bfin_read_MDMA_S0_CONFIG
+#define bfin_write_MDMA_S_CONFIG bfin_write_MDMA_S0_CONFIG
+#define bfin_write_MDMA_S_START_ADDR bfin_write_MDMA_S0_START_ADDR
+#define bfin_write_MDMA_S_IRQ_STATUS bfin_write_MDMA_S0_IRQ_STATUS
+#define bfin_write_MDMA_S_X_COUNT bfin_write_MDMA_S0_X_COUNT
+#define bfin_write_MDMA_S_X_MODIFY bfin_write_MDMA_S0_X_MODIFY
+#define bfin_write_MDMA_S_Y_COUNT bfin_write_MDMA_S0_Y_COUNT
+#define bfin_write_MDMA_S_Y_MODIFY bfin_write_MDMA_S0_Y_MODIFY
+#define bfin_write_MDMA_D_CONFIG bfin_write_MDMA_D0_CONFIG
+#define bfin_write_MDMA_D_START_ADDR bfin_write_MDMA_D0_START_ADDR
+#define bfin_read_MDMA_D_IRQ_STATUS bfin_read_MDMA_D0_IRQ_STATUS
+#define bfin_write_MDMA_D_IRQ_STATUS bfin_write_MDMA_D0_IRQ_STATUS
+#define bfin_write_MDMA_D_X_COUNT bfin_write_MDMA_D0_X_COUNT
+#define bfin_write_MDMA_D_X_MODIFY bfin_write_MDMA_D0_X_MODIFY
+#define bfin_write_MDMA_D_Y_COUNT bfin_write_MDMA_D0_Y_COUNT
+#define bfin_write_MDMA_D_Y_MODIFY bfin_write_MDMA_D0_Y_MODIFY
+#endif
+
 /**
  *	__dma_memcpy - program the MDMA registers
  *
@@ -358,8 +419,8 @@
 	 */
 	__builtin_bfin_ssync();
 
-	if (bfin_read_MDMA_S0_CONFIG())
-		while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
+	if (bfin_read_MDMA_S_CONFIG())
+		while (!(bfin_read_MDMA_D_IRQ_STATUS() & DMA_DONE))
 			continue;
 
 	if (conf & DMA2D) {
@@ -374,39 +435,42 @@
 		u32 shift = abs(dmod) >> 1;
 		size_t ycnt = cnt >> (16 - shift);
 		cnt = 1 << (16 - shift);
-		bfin_write_MDMA_D0_Y_COUNT(ycnt);
-		bfin_write_MDMA_S0_Y_COUNT(ycnt);
-		bfin_write_MDMA_D0_Y_MODIFY(dmod);
-		bfin_write_MDMA_S0_Y_MODIFY(smod);
+		bfin_write_MDMA_D_Y_COUNT(ycnt);
+		bfin_write_MDMA_S_Y_COUNT(ycnt);
+		bfin_write_MDMA_D_Y_MODIFY(dmod);
+		bfin_write_MDMA_S_Y_MODIFY(smod);
 	}
 
-	bfin_write_MDMA_D0_START_ADDR(daddr);
-	bfin_write_MDMA_D0_X_COUNT(cnt);
-	bfin_write_MDMA_D0_X_MODIFY(dmod);
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
+	bfin_write_MDMA_D_START_ADDR(daddr);
+	bfin_write_MDMA_D_X_COUNT(cnt);
+	bfin_write_MDMA_D_X_MODIFY(dmod);
+	bfin_write_MDMA_D_IRQ_STATUS(DMA_DONE | DMA_ERR);
 
-	bfin_write_MDMA_S0_START_ADDR(saddr);
-	bfin_write_MDMA_S0_X_COUNT(cnt);
-	bfin_write_MDMA_S0_X_MODIFY(smod);
-	bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
+	bfin_write_MDMA_S_START_ADDR(saddr);
+	bfin_write_MDMA_S_X_COUNT(cnt);
+	bfin_write_MDMA_S_X_MODIFY(smod);
+	bfin_write_MDMA_S_IRQ_STATUS(DMA_DONE | DMA_ERR);
 
-	bfin_write_MDMA_S0_CONFIG(DMAEN | conf);
-	bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf);
+	bfin_write_MDMA_S_CONFIG(DMAEN | conf);
+	if (conf & DMA2D)
+		bfin_write_MDMA_D_CONFIG(WNR | DI_EN_Y | DMAEN | conf);
+	else
+		bfin_write_MDMA_D_CONFIG(WNR | DI_EN_X | DMAEN | conf);
 
 	spin_unlock_irqrestore(&mdma_lock, flags);
 
 	SSYNC();
 
-	while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
-		if (bfin_read_MDMA_S0_CONFIG())
+	while (!(bfin_read_MDMA_D_IRQ_STATUS() & DMA_DONE))
+		if (bfin_read_MDMA_S_CONFIG())
 			continue;
 		else
 			return;
 
-	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
+	bfin_write_MDMA_D_IRQ_STATUS(DMA_DONE | DMA_ERR);
 
-	bfin_write_MDMA_S0_CONFIG(0);
-	bfin_write_MDMA_D0_CONFIG(0);
+	bfin_write_MDMA_S_CONFIG(0);
+	bfin_write_MDMA_D_CONFIG(0);
 }
 
 /**
@@ -448,8 +512,10 @@
 	}
 	size >>= shift;
 
+#ifndef DMA_MMR_SIZE_32
 	if (size > 0x10000)
 		conf |= DMA2D;
+#endif
 
 	__dma_memcpy(dst, mod, src, mod, size, conf);
 
@@ -488,6 +554,9 @@
  */
 void *dma_memcpy_nocache(void *pdst, const void *psrc, size_t size)
 {
+#ifdef DMA_MMR_SIZE_32
+	_dma_memcpy(pdst, psrc, size);
+#else
 	size_t bulk, rest;
 
 	bulk = size & ~0xffff;
@@ -495,6 +564,7 @@
 	if (bulk)
 		_dma_memcpy(pdst, psrc, bulk);
 	_dma_memcpy(pdst + bulk, psrc + bulk, rest);
+#endif
 	return pdst;
 }
 EXPORT_SYMBOL(dma_memcpy_nocache);
@@ -514,14 +584,14 @@
 }
 EXPORT_SYMBOL(safe_dma_memcpy);
 
-static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len,
+static void _dma_out(unsigned long addr, unsigned long buf, unsigned DMA_MMR_SIZE_TYPE len,
                      u16 size, u16 dma_size)
 {
 	blackfin_dcache_flush_range(buf, buf + len * size);
 	__dma_memcpy(addr, 0, buf, size, len, dma_size);
 }
 
-static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len,
+static void _dma_in(unsigned long addr, unsigned long buf, unsigned DMA_MMR_SIZE_TYPE len,
                     u16 size, u16 dma_size)
 {
 	blackfin_dcache_invalidate_range(buf, buf + len * size);
@@ -529,7 +599,7 @@
 }
 
 #define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
-void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \
+void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned DMA_MMR_SIZE_TYPE len) \
 { \
 	_dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
 } \
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 02796b8..83139aa 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -58,7 +58,7 @@
 	(struct gpio_port_t *) FIO0_FLAG_D,
 	(struct gpio_port_t *) FIO1_FLAG_D,
 	(struct gpio_port_t *) FIO2_FLAG_D,
-#elif defined(CONFIG_BF54x)
+#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x) 
 	(struct gpio_port_t *)PORTA_FER,
 	(struct gpio_port_t *)PORTB_FER,
 	(struct gpio_port_t *)PORTC_FER,
@@ -66,9 +66,11 @@
 	(struct gpio_port_t *)PORTE_FER,
 	(struct gpio_port_t *)PORTF_FER,
 	(struct gpio_port_t *)PORTG_FER,
+# if defined(CONFIG_BF54x)
 	(struct gpio_port_t *)PORTH_FER,
 	(struct gpio_port_t *)PORTI_FER,
 	(struct gpio_port_t *)PORTJ_FER,
+# endif
 #else
 # error no gpio arrays defined
 #endif
@@ -210,7 +212,7 @@
 	else
 		*port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
 	SSYNC();
-#elif defined(CONFIG_BF54x)
+#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 	if (usage == GPIO_USAGE)
 		gpio_array[gpio_bank(gpio)]->port_fer &= ~gpio_bit(gpio);
 	else
@@ -299,7 +301,7 @@
 	pmux |= (function << offset);
 	bfin_write_PORT_MUX(pmux);
 }
-#elif defined(CONFIG_BF54x)
+#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 inline void portmux_setup(unsigned short per)
 {
 	u16 ident = P_IDENT(per);
@@ -377,7 +379,7 @@
 }
 #endif
 
-#ifndef CONFIG_BF54x
+#if !(defined(CONFIG_BF54x) || defined(CONFIG_BF60x))
 /***********************************************************
 *
 * FUNCTIONS: Blackfin General Purpose Ports Access Functions
@@ -680,7 +682,7 @@
 
 
 #endif
-#else /* CONFIG_BF54x */
+#else /* CONFIG_BF54x || CONFIG_BF60x */
 #ifdef CONFIG_PM
 
 int bfin_pm_standby_ctrl(unsigned ctrl)
@@ -726,7 +728,7 @@
 }
 EXPORT_SYMBOL(get_gpio_dir);
 
-#endif /* CONFIG_BF54x */
+#endif /* CONFIG_BF54x || CONFIG_BF60x */
 
 /***********************************************************
 *
@@ -783,7 +785,7 @@
 		 * be requested and used by several drivers
 		 */
 
-#ifdef CONFIG_BF54x
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 		if (!((per & P_MAYSHARE) && get_portmux(per) == P_FUNCT2MUX(per))) {
 #else
 		if (!(per & P_MAYSHARE)) {
@@ -937,7 +939,7 @@
 		printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved as gpio-irq!"
 		       " (Documentation/blackfin/bfin-gpio-notes.txt)\n", gpio);
 	}
-#ifndef CONFIG_BF54x
+#if !(defined(CONFIG_BF54x) || defined(CONFIG_BF60x))
 	else {	/* Reset POLAR setting when acquiring a gpio for the first time */
 		set_gpio_polar(gpio, 0);
 	}
@@ -1110,7 +1112,7 @@
 
 static inline void __bfin_gpio_direction_input(unsigned gpio)
 {
-#ifdef CONFIG_BF54x
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 	gpio_array[gpio_bank(gpio)]->dir_clear = gpio_bit(gpio);
 #else
 	gpio_array[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio);
@@ -1138,13 +1140,13 @@
 
 void bfin_gpio_irq_prepare(unsigned gpio)
 {
-#ifdef CONFIG_BF54x
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 	unsigned long flags;
 #endif
 
 	port_setup(gpio, GPIO_USAGE);
 
-#ifdef CONFIG_BF54x
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 	flags = hard_local_irq_save();
 	__bfin_gpio_direction_input(gpio);
 	hard_local_irq_restore(flags);
@@ -1173,7 +1175,7 @@
 
 	gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
 	gpio_set_value(gpio, value);
-#ifdef CONFIG_BF54x
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 	gpio_array[gpio_bank(gpio)]->dir_set = gpio_bit(gpio);
 #else
 	gpio_array[gpio_bank(gpio)]->dir |= gpio_bit(gpio);
@@ -1188,7 +1190,7 @@
 
 int bfin_gpio_get_value(unsigned gpio)
 {
-#ifdef CONFIG_BF54x
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x)
 	return (1 & (gpio_array[gpio_bank(gpio)]->data >> gpio_sub_n(gpio)));
 #else
 	unsigned long flags;
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 886e000..3e366dc 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -139,7 +139,7 @@
 	dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
 	dcplb_bounds[i_d++].data = 0;
 	/* BootROM -- largest one should be less than 1 meg.  */
-	dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
+	dcplb_bounds[i_d].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
 	dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
 	if (L2_LENGTH) {
 		/* Addressing hole up to L2 SRAM.  */
@@ -178,7 +178,7 @@
 	icplb_bounds[i_i].eaddr = BOOT_ROM_START;
 	icplb_bounds[i_i++].data = 0;
 	/* BootROM -- largest one should be less than 1 meg.  */
-	icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
+	icplb_bounds[i_i].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
 	icplb_bounds[i_i++].data = SDRAM_IGENERIC;
 
 	if (L2_LENGTH) {
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
index 5b88861..e854f90 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -179,6 +179,12 @@
 		addr = addr1;
 	}
 
+#ifdef CONFIG_BF60x
+	if ((addr >= ASYNC_BANK0_BASE)
+		&& (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
+		d_data |= PAGE_SIZE_64MB;
+#endif
+
 	/* Pick entry to evict */
 	idx = evict_one_dcplb(cpu);
 
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
index 92f6648..01232a1 100644
--- a/arch/blackfin/kernel/debug-mmrs.c
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -105,6 +105,7 @@
 DEFINE_SYSREG(syscfg, , CSYNC());
 #define D_SYSREG(sr) debugfs_create_file(#sr, S_IRUSR|S_IWUSR, parent, NULL, &fops_sysreg_##sr)
 
+#ifndef CONFIG_BF60x
 /*
  * CAN
  */
@@ -223,8 +224,10 @@
 	__DMA(CURR_DESC_PTR, curr_desc_ptr);
 	__DMA(CURR_ADDR, curr_addr);
 	__DMA(IRQ_STATUS, irq_status);
+#ifndef CONFIG_BF60x
 	if (strcmp(pfx, "IMDMA") != 0)
 		__DMA(PERIPHERAL_MAP, peripheral_map);
+#endif
 	__DMA(CURR_X_COUNT, curr_x_count);
 	__DMA(CURR_Y_COUNT, curr_y_count);
 }
@@ -568,7 +571,7 @@
 #endif
 }
 #define UART(num) bfin_debug_mmrs_uart(parent, UART##num##_DLL, num)
-
+#endif /* CONFIG_BF60x */
 /*
  * The actual debugfs generation
  */
@@ -740,7 +743,7 @@
 	D32(WPDACNT0);
 	D32(WPDACNT1);
 	D32(WPSTAT);
-
+#ifndef CONFIG_BF60x
 	/* System MMRs */
 #ifdef ATAPI_CONTROL
 	parent = debugfs_create_dir("atapi", top);
@@ -1873,7 +1876,7 @@
 
 	}
 #endif	/* BF54x */
-
+#endif /* CONFIG_BF60x */
 	debug_mmrs_dentry = top;
 
 	return 0;
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
index 686478f..f33792c 100644
--- a/arch/blackfin/kernel/entry.S
+++ b/arch/blackfin/kernel/entry.S
@@ -64,16 +64,6 @@
 	jump (p0);
 ENDPROC(_ret_from_fork)
 
-ENTRY(_sys_fork)
-	r0 = -EINVAL;
-#if (ANOMALY_05000371)
-	nop;
-	nop;
-	nop;
-#endif
-	rts;
-ENDPROC(_sys_fork)
-
 ENTRY(_sys_vfork)
 	r0 = sp;
 	r0 += 24;
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 06459f4..d776773 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -23,7 +23,11 @@
 		printk(KERN_DEBUG "%s:%s:%i: Assertion failed: " #expr "\n", __FILE__, __func__, __LINE__);
 #endif
 
-#define BFIN_TIMER_NUM_GROUP  (BFIN_TIMER_OCTET(MAX_BLACKFIN_GPTIMERS - 1) + 1)
+#ifndef CONFIG_BF60x
+# define BFIN_TIMER_NUM_GROUP  (BFIN_TIMER_OCTET(MAX_BLACKFIN_GPTIMERS - 1) + 1)
+#else
+# define BFIN_TIMER_NUM_GROUP  1
+#endif
 
 static struct bfin_gptimer_regs * const timer_regs[MAX_BLACKFIN_GPTIMERS] =
 {
@@ -158,6 +162,74 @@
 }
 EXPORT_SYMBOL(get_gptimer_count);
 
+#ifdef CONFIG_BF60x
+void set_gptimer_delay(unsigned int timer_id, uint32_t delay)
+{
+	tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+	bfin_write(&timer_regs[timer_id]->delay, delay);
+	SSYNC();
+}
+EXPORT_SYMBOL(set_gptimer_delay);
+
+uint32_t get_gptimer_delay(unsigned int timer_id)
+{
+	tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+	return bfin_read(&timer_regs[timer_id]->delay);
+}
+EXPORT_SYMBOL(get_gptimer_delay);
+#endif
+
+#ifdef CONFIG_BF60x
+int get_gptimer_intr(unsigned int timer_id)
+{
+	tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+	return !!(bfin_read(&group_regs[BFIN_TIMER_OCTET(timer_id)]->data_ilat) & timil_mask[timer_id]);
+}
+EXPORT_SYMBOL(get_gptimer_intr);
+
+void clear_gptimer_intr(unsigned int timer_id)
+{
+	tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+	bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->data_ilat, timil_mask[timer_id]);
+}
+EXPORT_SYMBOL(clear_gptimer_intr);
+
+int get_gptimer_over(unsigned int timer_id)
+{
+	tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+	return !!(bfin_read(&group_regs[BFIN_TIMER_OCTET(timer_id)]->stat_ilat) & tovf_mask[timer_id]);
+}
+EXPORT_SYMBOL(get_gptimer_over);
+
+void clear_gptimer_over(unsigned int timer_id)
+{
+	tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+	bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->stat_ilat, tovf_mask[timer_id]);
+}
+EXPORT_SYMBOL(clear_gptimer_over);
+
+int get_gptimer_run(unsigned int timer_id)
+{
+	tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
+	return !!(bfin_read(&group_regs[BFIN_TIMER_OCTET(timer_id)]->run) & trun_mask[timer_id]);
+}
+EXPORT_SYMBOL(get_gptimer_run);
+
+uint32_t get_gptimer_status(unsigned int group)
+{
+	tassert(group < BFIN_TIMER_NUM_GROUP);
+	return bfin_read(&group_regs[group]->data_ilat);
+}
+EXPORT_SYMBOL(get_gptimer_status);
+
+void set_gptimer_status(unsigned int group, uint32_t value)
+{
+	tassert(group < BFIN_TIMER_NUM_GROUP);
+	bfin_write(&group_regs[group]->data_ilat, value);
+	SSYNC();
+}
+EXPORT_SYMBOL(set_gptimer_status);
+#else
 uint32_t get_gptimer_status(unsigned int group)
 {
 	tassert(group < BFIN_TIMER_NUM_GROUP);
@@ -212,6 +284,7 @@
 	return !!(read_gptimer_status(timer_id) & trun_mask[timer_id]);
 }
 EXPORT_SYMBOL(get_gptimer_run);
+#endif
 
 void set_gptimer_config(unsigned int timer_id, uint16_t config)
 {
@@ -231,6 +304,12 @@
 void enable_gptimers(uint16_t mask)
 {
 	int i;
+#ifdef CONFIG_BF60x
+	uint16_t imask;
+	imask = bfin_read16(TIMER_DATA_IMSK);
+	imask &= ~mask;
+	bfin_write16(TIMER_DATA_IMSK, imask);
+#endif
 	tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
 	for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
 		bfin_write(&group_regs[i]->enable, mask & 0xFF);
@@ -253,12 +332,16 @@
 
 void disable_gptimers(uint16_t mask)
 {
+#ifndef CONFIG_BF60x
 	int i;
 	_disable_gptimers(mask);
 	for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
 		if (mask & (1 << i))
 			bfin_write(&group_regs[BFIN_TIMER_OCTET(i)]->status, trun_mask[i]);
 	SSYNC();
+#else
+	_disable_gptimers(mask);
+#endif
 }
 EXPORT_SYMBOL(disable_gptimers);
 
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index c0f4fe2..2e3994b 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -95,7 +95,9 @@
 			idle();
 		rcu_idle_exit();
 		tick_nohz_idle_exit();
-		schedule_preempt_disabled();
+		preempt_enable_no_resched();
+		schedule();
+		preempt_disable();
 	}
 }
 
@@ -329,12 +331,16 @@
 {
 	return in_mem_const_off(addr, size, 0, const_addr, const_size);
 }
+#ifdef CONFIG_BF60x
+#define ASYNC_ENABLED(bnum, bctlnum)	1
+#else
 #define ASYNC_ENABLED(bnum, bctlnum) \
 ({ \
 	(bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
 	bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
 	1; \
 })
+#endif
 /*
  * We can't read EBIU banks that aren't enabled or we end up hanging
  * on the access to the async space.  Make sure we validate accesses
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index b0434f8..5272e6e 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -22,6 +22,7 @@
 __attribute__ ((__l1_text__, __noreturn__))
 static void bfin_reset(void)
 {
+#ifndef CONFIG_BF60x
 	if (!ANOMALY_05000353 && !ANOMALY_05000386)
 		bfrom_SoftReset((void *)(L1_SCRATCH_START + L1_SCRATCH_LENGTH - 20));
 
@@ -57,7 +58,6 @@
 	if (__SILICON_REVISION__ < 1 && bfin_revid() < 1)
 		bfin_read_SWRST();
 #endif
-
 	/* Wait for the SWRST write to complete.  Cannot rely on SSYNC
 	 * though as the System state is all reset now.
 	 */
@@ -72,6 +72,10 @@
 	while (1)
 		/* Issue core reset */
 		asm("raise 1");
+#else
+	while (1)
+		bfin_write_RCU0_CTL(0x1);
+#endif
 }
 
 __attribute__((weak))
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 2ad747e..ada8f0f 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -25,12 +25,16 @@
 #include <asm/cacheflush.h>
 #include <asm/blackfin.h>
 #include <asm/cplbinit.h>
+#include <asm/clocks.h>
 #include <asm/div64.h>
 #include <asm/cpu.h>
 #include <asm/fixed_code.h>
 #include <asm/early_printk.h>
 #include <asm/irq_handler.h>
 #include <asm/pda.h>
+#ifdef CONFIG_BF60x
+#include <mach/pm.h>
+#endif
 
 u16 _bfin_swrst;
 EXPORT_SYMBOL(_bfin_swrst);
@@ -550,7 +554,6 @@
 {
 #ifdef CONFIG_MTD_UCLINUX
 	unsigned long mtd_phys = 0;
-	unsigned long n;
 #endif
 	unsigned long max_mem;
 
@@ -594,9 +597,9 @@
 	mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
 
 # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
-	n = ext2_image_size((void *)(mtd_phys + 0x400));
-	if (n)
-		mtd_size = PAGE_ALIGN(n * 1024);
+	if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
+		mtd_size =
+		    PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
 # endif
 
 # if defined(CONFIG_CRAMFS)
@@ -612,7 +615,8 @@
 
 		/* ROM_FS is XIP, so if we found it, we need to limit memory */
 		if (memory_end > max_mem) {
-			pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
+			pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
+				(max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
 			memory_end = max_mem;
 		}
 	}
@@ -642,7 +646,8 @@
 	 * doesn't exist, or we don't need to - then dont.
 	 */
 	if (memory_end > max_mem) {
-		pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
+		pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
+				(max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
 		memory_end = max_mem;
 	}
 
@@ -661,8 +666,8 @@
 	init_mm.end_data = (unsigned long)_edata;
 	init_mm.brk = (unsigned long)0;
 
-	printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
-	printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
+	printk(KERN_INFO "Board Memory: %ldMB\n", (physical_mem_end - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
+	printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
 
 	printk(KERN_INFO "Memory map:\n"
 	       "  fixedcode = 0x%p-0x%p\n"
@@ -705,7 +710,7 @@
 	int i;
 
 	max_pfn = 0;
-	min_low_pfn = memory_end;
+	min_low_pfn = PFN_DOWN(memory_end);
 
 	for (i = 0; i < bfin_memmap.nr_map; i++) {
 		unsigned long start, end;
@@ -748,8 +753,7 @@
 	/* pfn of the first usable page frame after kernel image*/
 	if (min_low_pfn < memory_start >> PAGE_SHIFT)
 		min_low_pfn = memory_start >> PAGE_SHIFT;
-
-	start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
+	start_pfn = CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT;
 	end_pfn = memory_end >> PAGE_SHIFT;
 
 	/*
@@ -794,8 +798,8 @@
 	}
 
 	/* reserve memory before memory_start, including bootmap */
-	reserve_bootmem(PAGE_OFFSET,
-		memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
+	reserve_bootmem(CONFIG_PHY_RAM_BASE_ADDRESS,
+		memory_start + bootmap_size + PAGE_SIZE - 1 - CONFIG_PHY_RAM_BASE_ADDRESS,
 		BOOTMEM_DEFAULT);
 }
 
@@ -844,13 +848,40 @@
 		break;
 	}
 	switch (ddrctl & 0x30000) {
-		case DEVWD_4:  ret *= 2;
-		case DEVWD_8:  ret *= 2;
-		case DEVWD_16: break;
+	case DEVWD_4:
+		ret *= 2;
+	case DEVWD_8:
+		ret *= 2;
+	case DEVWD_16:
+		break;
 	}
 	if ((ddrctl & 0xc000) == 0x4000)
 		ret *= 2;
 	return ret;
+#elif defined(CONFIG_BF60x)
+	u32 ddrctl = bfin_read_DMC0_CFG();
+	int ret;
+	switch (ddrctl & 0xf00) {
+	case DEVSZ_64:
+		ret = 64 / 8;
+		break;
+	case DEVSZ_128:
+		ret = 128 / 8;
+		break;
+	case DEVSZ_256:
+		ret = 256 / 8;
+		break;
+	case DEVSZ_512:
+		ret = 512 / 8;
+		break;
+	case DEVSZ_1G:
+		ret = 1024 / 8;
+		break;
+	case DEVSZ_2G:
+		ret = 2048 / 8;
+		break;
+	}
+	return ret;
 #endif
 	BUG();
 }
@@ -860,6 +891,22 @@
 {
 }
 
+#ifdef CONFIG_BF60x
+static inline u_long bfin_get_clk(char *name)
+{
+	struct clk *clk;
+	u_long clk_rate;
+
+	clk = clk_get(NULL, name);
+	if (IS_ERR(clk))
+		return 0;
+
+	clk_rate = clk_get_rate(clk);
+	clk_put(clk);
+	return clk_rate;
+}
+#endif
+
 void __init setup_arch(char **cmdline_p)
 {
 	u32 mmr;
@@ -870,6 +917,7 @@
 	enable_shadow_console();
 
 	/* Check to make sure we are running on the right processor */
+	mmr =  bfin_cpuid();
 	if (unlikely(CPUID != bfin_cpuid()))
 		printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
 			CPU, bfin_cpuid(), bfin_revid());
@@ -890,6 +938,10 @@
 
 	memset(&bfin_memmap, 0, sizeof(bfin_memmap));
 
+#ifdef CONFIG_BF60x
+	/* Should init clock device before parse command early */
+	clk_init();
+#endif
 	/* If the user does not specify things on the command line, use
 	 * what the bootloader set things up as
 	 */
@@ -904,6 +956,7 @@
 
 	memory_setup();
 
+#ifndef CONFIG_BF60x
 	/* Initialize Async memory banks */
 	bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
 	bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
@@ -913,6 +966,7 @@
 	bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
 	bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
 #endif
+#endif
 #ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
 	bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
 	bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
@@ -938,7 +992,7 @@
 	printk(KERN_INFO "Hardware Trace %s and %sabled\n",
 		(mmr & 0x1) ? "active" : "off",
 		(mmr & 0x2) ? "en" : "dis");
-
+#ifndef CONFIG_BF60x
 	mmr = bfin_read_SYSCR();
 	printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
 
@@ -980,7 +1034,7 @@
 		printk(KERN_INFO "Recovering from Watchdog event\n");
 	else if (_bfin_swrst & RESET_SOFTWARE)
 		printk(KERN_NOTICE "Reset caused by Software reset\n");
-
+#endif
 	printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
 	if (bfin_compiled_revid() == 0xffff)
 		printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
@@ -1008,8 +1062,13 @@
 
 	printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
 
+#ifdef CONFIG_BF60x
+	printk(KERN_INFO "Processor Speed: %lu MHz core clock, %lu MHz SCLk, %lu MHz SCLK0, %lu MHz SCLK1 and %lu MHz DCLK\n",
+		cclk / 1000000, bfin_get_clk("SYSCLK") / 1000000, get_sclk0() / 1000000, get_sclk1() / 1000000, get_dclk() / 1000000);
+#else
 	printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
 	       cclk / 1000000, sclk / 1000000);
+#endif
 
 	setup_bootmem_allocator();
 
@@ -1060,10 +1119,12 @@
 
 /* Get the input clock frequency */
 static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
+#ifndef CONFIG_BF60x
 static u_long get_clkin_hz(void)
 {
 	return cached_clkin_hz;
 }
+#endif
 static int __init early_init_clkin_hz(char *buf)
 {
 	cached_clkin_hz = simple_strtoul(buf, NULL, 0);
@@ -1075,6 +1136,7 @@
 }
 early_param("clkin_hz=", early_init_clkin_hz);
 
+#ifndef CONFIG_BF60x
 /* Get the voltage input multiplier */
 static u_long get_vco(void)
 {
@@ -1097,10 +1159,14 @@
 	cached_vco *= msel;
 	return cached_vco;
 }
+#endif
 
 /* Get the Core clock */
 u_long get_cclk(void)
 {
+#ifdef CONFIG_BF60x
+	return bfin_get_clk("CCLK");
+#else
 	static u_long cached_cclk_pll_div, cached_cclk;
 	u_long csel, ssel;
 
@@ -1120,12 +1186,39 @@
 	else
 		cached_cclk = get_vco() >> csel;
 	return cached_cclk;
+#endif
 }
 EXPORT_SYMBOL(get_cclk);
 
-/* Get the System clock */
+#ifdef CONFIG_BF60x
+/* Get the bf60x clock of SCLK0 domain */
+u_long get_sclk0(void)
+{
+	return bfin_get_clk("SCLK0");
+}
+EXPORT_SYMBOL(get_sclk0);
+
+/* Get the bf60x clock of SCLK1 domain */
+u_long get_sclk1(void)
+{
+	return bfin_get_clk("SCLK1");
+}
+EXPORT_SYMBOL(get_sclk1);
+
+/* Get the bf60x DRAM clock */
+u_long get_dclk(void)
+{
+	return bfin_get_clk("DCLK");
+}
+EXPORT_SYMBOL(get_dclk);
+#endif
+
+/* Get the default system clock */
 u_long get_sclk(void)
 {
+#ifdef CONFIG_BF60x
+	return get_sclk0();
+#else
 	static u_long cached_sclk;
 	u_long ssel;
 
@@ -1146,6 +1239,7 @@
 
 	cached_sclk = get_vco() / ssel;
 	return cached_sclk;
+#endif
 }
 EXPORT_SYMBOL(get_sclk);
 
diff --git a/arch/blackfin/kernel/shadow_console.c b/arch/blackfin/kernel/shadow_console.c
index 557e9fe..aeb8343 100644
--- a/arch/blackfin/kernel/shadow_console.c
+++ b/arch/blackfin/kernel/shadow_console.c
@@ -15,9 +15,9 @@
 #include <asm/irq_handler.h>
 #include <asm/early_printk.h>
 
-#define SHADOW_CONSOLE_START		(0x500)
-#define SHADOW_CONSOLE_END		(0x1000)
-#define SHADOW_CONSOLE_MAGIC_LOC	(0x4F0)
+#define SHADOW_CONSOLE_START		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x500)
+#define SHADOW_CONSOLE_END		(CONFIG_PHY_RAM_BASE_ADDRESS + 0x1000)
+#define SHADOW_CONSOLE_MAGIC_LOC	(CONFIG_PHY_RAM_BASE_ADDRESS + 0x4F0)
 #define SHADOW_CONSOLE_MAGIC		(0xDEADBEEF)
 
 static __initdata char *shadow_console_buffer = (char *)SHADOW_CONSOLE_START;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index d98f2d6..f608f02 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -66,8 +66,14 @@
 {
 	disable_gptimers(TIMER0bit);
 
+#ifdef CONFIG_BF60x
+	bfin_write16(TIMER_DATA_IMSK, 0);
+	set_gptimer_config(TIMER0_id,  TIMER_OUT_DIS
+		| TIMER_MODE_PWM_CONT | TIMER_PULSE_HI | TIMER_IRQ_PER);
+#else
 	set_gptimer_config(TIMER0_id, \
 		TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
+#endif
 	set_gptimer_period(TIMER0_id, -1);
 	set_gptimer_pwidth(TIMER0_id, -2);
 	SSYNC();
@@ -135,9 +141,15 @@
 {
 	switch (mode) {
 	case CLOCK_EVT_MODE_PERIODIC: {
+#ifndef CONFIG_BF60x
 		set_gptimer_config(TIMER0_id, \
 			TIMER_OUT_DIS | TIMER_IRQ_ENA | \
 			TIMER_PERIOD_CNT | TIMER_MODE_PWM);
+#else
+		set_gptimer_config(TIMER0_id,  TIMER_OUT_DIS
+			| TIMER_MODE_PWM_CONT | TIMER_PULSE_HI | TIMER_IRQ_PER);
+#endif
+
 		set_gptimer_period(TIMER0_id, get_sclk() / HZ);
 		set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
 		enable_gptimers(TIMER0bit);
@@ -145,8 +157,14 @@
 	}
 	case CLOCK_EVT_MODE_ONESHOT:
 		disable_gptimers(TIMER0bit);
+#ifndef CONFIG_BF60x
 		set_gptimer_config(TIMER0_id, \
 			TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
+#else
+		set_gptimer_config(TIMER0_id, TIMER_OUT_DIS | TIMER_MODE_PWM
+			| TIMER_PULSE_HI | TIMER_IRQ_WID_DLY);
+#endif
+
 		set_gptimer_period(TIMER0_id, 0);
 		break;
 	case CLOCK_EVT_MODE_UNUSED:
@@ -160,7 +178,7 @@
 
 static void bfin_gptmr0_ack(void)
 {
-	set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
+	clear_gptimer_intr(TIMER0_id);
 }
 
 static void __init bfin_gptmr0_init(void)
@@ -197,7 +215,7 @@
 	.rating		= 300,
 	.irq		= IRQ_TIMER0,
 	.shift		= 32,
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+	.features 	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.set_next_event = bfin_gptmr0_set_next_event,
 	.set_mode	= bfin_gptmr0_set_mode,
 };
@@ -312,6 +330,11 @@
 #endif
 
 
+#ifdef CONFIG_SMP
+	evt->broadcast = smp_timer_broadcast;
+#endif
+
+
 	evt->name = "bfin_core_timer";
 	evt->rating = 350;
 	evt->irq = -1;
diff --git a/arch/blackfin/lib/divsi3.S b/arch/blackfin/lib/divsi3.S
index f89c5a4..ef2cd99 100644
--- a/arch/blackfin/lib/divsi3.S
+++ b/arch/blackfin/lib/divsi3.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2004-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  *
  * 16 / 32 bit signed division.
  *                 Special cases :
diff --git a/arch/blackfin/lib/memchr.S b/arch/blackfin/lib/memchr.S
index 542e40f..bcfc8a1 100644
--- a/arch/blackfin/lib/memchr.S
+++ b/arch/blackfin/lib/memchr.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/memcmp.S b/arch/blackfin/lib/memcmp.S
index ce5b9f1..2e1c947 100644
--- a/arch/blackfin/lib/memcmp.S
+++ b/arch/blackfin/lib/memcmp.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2004-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/memcpy.S b/arch/blackfin/lib/memcpy.S
index c31bf22..53cb369 100644
--- a/arch/blackfin/lib/memcpy.S
+++ b/arch/blackfin/lib/memcpy.S
@@ -7,7 +7,7 @@
  *
  * Copyright 2004-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/memmove.S b/arch/blackfin/lib/memmove.S
index 4eca566..e0b7820 100644
--- a/arch/blackfin/lib/memmove.S
+++ b/arch/blackfin/lib/memmove.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/memset.S b/arch/blackfin/lib/memset.S
index eab1bef..cdcf914 100644
--- a/arch/blackfin/lib/memset.S
+++ b/arch/blackfin/lib/memset.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2004-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/modsi3.S b/arch/blackfin/lib/modsi3.S
index 8b0c7d4..f7026ce 100644
--- a/arch/blackfin/lib/modsi3.S
+++ b/arch/blackfin/lib/modsi3.S
@@ -6,7 +6,7 @@
  *
  * Copyright 2004-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 .global ___modsi3;
diff --git a/arch/blackfin/lib/muldi3.S b/arch/blackfin/lib/muldi3.S
index 953a38a..abf9b2a 100644
--- a/arch/blackfin/lib/muldi3.S
+++ b/arch/blackfin/lib/muldi3.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 .align 2
diff --git a/arch/blackfin/lib/smulsi3_highpart.S b/arch/blackfin/lib/smulsi3_highpart.S
index 99ee8c5..e50d6c4 100644
--- a/arch/blackfin/lib/smulsi3_highpart.S
+++ b/arch/blackfin/lib/smulsi3_highpart.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 .align 2
diff --git a/arch/blackfin/lib/strcmp.S b/arch/blackfin/lib/strcmp.S
index d7c1d15..9c8b986 100644
--- a/arch/blackfin/lib/strcmp.S
+++ b/arch/blackfin/lib/strcmp.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/strcpy.S b/arch/blackfin/lib/strcpy.S
index a6a0c63..9495aa7 100644
--- a/arch/blackfin/lib/strcpy.S
+++ b/arch/blackfin/lib/strcpy.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/strncmp.S b/arch/blackfin/lib/strncmp.S
index 6da37c3..3bfaedc 100644
--- a/arch/blackfin/lib/strncmp.S
+++ b/arch/blackfin/lib/strncmp.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/strncpy.S b/arch/blackfin/lib/strncpy.S
index 2c07ddd..92fd182 100644
--- a/arch/blackfin/lib/strncpy.S
+++ b/arch/blackfin/lib/strncpy.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/udivsi3.S b/arch/blackfin/lib/udivsi3.S
index 97e9043..748a6a2 100644
--- a/arch/blackfin/lib/udivsi3.S
+++ b/arch/blackfin/lib/udivsi3.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2004-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #include <linux/linkage.h>
diff --git a/arch/blackfin/lib/umodsi3.S b/arch/blackfin/lib/umodsi3.S
index 168eba7..3794c00 100644
--- a/arch/blackfin/lib/umodsi3.S
+++ b/arch/blackfin/lib/umodsi3.S
@@ -3,7 +3,7 @@
  *
  * Copyright 2004-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifdef CONFIG_ARITHMETIC_OPS_L1
diff --git a/arch/blackfin/lib/umulsi3_highpart.S b/arch/blackfin/lib/umulsi3_highpart.S
index 051824a..0dcace9 100644
--- a/arch/blackfin/lib/umulsi3_highpart.S
+++ b/arch/blackfin/lib/umulsi3_highpart.S
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 .align 2
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index a173957..f8047ca 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -529,6 +529,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -547,6 +549,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index 6eebee4..0bedc73 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -455,6 +455,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -473,6 +475,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index 56383f7..845e6bc 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -6,8 +6,7 @@
  * DO NOT EDIT THIS FILE
  *
  * Copyright 2004-2011 Analog Devices Inc.
- * Licensed under the ADI BSD license.
- *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
+ * Licensed under the Clear BSD license.
  */
 
 /* This file should be up to date with:
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
index bb79627..1c03ad4 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _CDEF_BF512_H
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
index dc98866..861221d 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _CDEF_BF514_H
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
index 142e45c..cc9bf0d 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _CDEF_BF516_H
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
index e638197..96a82fd 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _CDEF_BF518_H
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF512.h b/arch/blackfin/mach-bf518/include/mach/defBF512.h
index 7297040..e6a017f 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF512.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF512_H
@@ -1083,77 +1083,6 @@
 #define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
 
 
-/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
-#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
-
-/* TWI_PRESCALE Masks															*/
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
-#define	TWI_ENA		0x0080		/* TWI Enable									*/
-#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
-
-/* TWI_SLAVE_CTL Masks															*/
-#define	SEN			0x0001		/* Slave Enable									*/
-#define	SADD_LEN	0x0002		/* Slave Address Length							*/
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
-
-/* TWI_SLAVE_STAT Masks															*/
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
-#define GCALL		0x0002		/* General Call Indicator						*/
-
-/* TWI_MASTER_CTL Masks													*/
-#define	MEN			0x0001		/* Master Mode Enable						*/
-#define	MADD_LEN	0x0002		/* Master Address Length					*/
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
-#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
-#define	STOP		0x0010		/* Issue Stop Condition						*/
-#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
-#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
-#define	SDAOVR		0x4000		/* Serial Data Override						*/
-#define	SCLOVR		0x8000		/* Serial Clock Override					*/
-
-/* TWI_MASTER_STAT Masks														*/
-#define	MPROG		0x0001		/* Master Transfer In Progress					*/
-#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
-#define	ANAK		0x0004		/* Address Not Acknowledged						*/
-#define	DNAK		0x0008		/* Data Not Acknowledged						*/
-#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
-#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
-#define	SDASEN		0x0040		/* Serial Data Sense							*/
-#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
-#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
-#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
-#define	SERR		0x0004		/* Slave Transfer Error		*/
-#define	SOVF		0x0008		/* Slave Overflow			*/
-#define	MCOMP		0x0010		/* Master Transfer Complete	*/
-#define	MERR		0x0020		/* Master Transfer Error	*/
-#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
-#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
-
-/* TWI_FIFO_CTRL Masks												*/
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
-
-/* TWI_FIFO_STAT Masks															*/
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
-#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
-#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
-#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
-
-#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
-#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
-#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
-#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
-
-
 /*  *******************  PIN CONTROL REGISTER MASKS  ************************/
 /* PORT_MUX Masks															*/
 #define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF514.h b/arch/blackfin/mach-bf518/include/mach/defBF514.h
index cfab428..97feaa6 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF514.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF514_H
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF516.h b/arch/blackfin/mach-bf518/include/mach/defBF516.h
index 22a3aa0..7c79cb6 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF516.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF516.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF516_H
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF518.h b/arch/blackfin/mach-bf518/include/mach/defBF518.h
index cb18270..12042ff 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF518.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF518.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2009 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF518_H
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index fad7fea..d58f50e 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -569,6 +569,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -587,6 +589,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
@@ -681,6 +686,7 @@
 	.rotary_button_key = KEY_ENTER,
 	.debounce	   = 10,	/* 0..17 */
 	.mode		   = ROT_QUAD_ENC | ROT_DEBE,
+	.pm_wakeup	   = 1,
 };
 
 static struct resource bfin_rotary_resources[] = {
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 65b7fbd..413d013 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -698,6 +698,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -716,6 +718,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 17c6a24..50bda79 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -576,6 +576,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -594,6 +596,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 2f9a2bd..af732eb 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -869,6 +869,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -887,6 +889,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
@@ -1105,6 +1110,7 @@
 	.rotary_button_key = KEY_ENTER,
 	.debounce	   = 10,	/* 0..17 */
 	.mode		   = ROT_QUAD_ENC | ROT_DEBE,
+	.pm_wakeup	   = 1,
 };
 
 static struct resource bfin_rotary_resources[] = {
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index d192c0a..1509c5a 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -656,6 +656,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -674,6 +676,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index 6884706..aa14110 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -6,8 +6,7 @@
  * DO NOT EDIT THIS FILE
  *
  * Copyright 2004-2011 Analog Devices Inc.
- * Licensed under the ADI BSD license.
- *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
+ * Licensed under the Clear BSD license.
  */
 
 /* This file should be up to date with:
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF522.h b/arch/blackfin/mach-bf527/include/mach/defBF522.h
index 37d353a..e007017 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF522.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF522_H
@@ -1084,77 +1084,6 @@
 #define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
 
 
-/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
-#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
-
-/* TWI_PRESCALE Masks															*/
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
-#define	TWI_ENA		0x0080		/* TWI Enable									*/
-#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
-
-/* TWI_SLAVE_CTL Masks															*/
-#define	SEN			0x0001		/* Slave Enable									*/
-#define	SADD_LEN	0x0002		/* Slave Address Length							*/
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
-
-/* TWI_SLAVE_STAT Masks															*/
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
-#define GCALL		0x0002		/* General Call Indicator						*/
-
-/* TWI_MASTER_CTL Masks													*/
-#define	MEN			0x0001		/* Master Mode Enable						*/
-#define	MADD_LEN	0x0002		/* Master Address Length					*/
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
-#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
-#define	STOP		0x0010		/* Issue Stop Condition						*/
-#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
-#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
-#define	SDAOVR		0x4000		/* Serial Data Override						*/
-#define	SCLOVR		0x8000		/* Serial Clock Override					*/
-
-/* TWI_MASTER_STAT Masks														*/
-#define	MPROG		0x0001		/* Master Transfer In Progress					*/
-#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
-#define	ANAK		0x0004		/* Address Not Acknowledged						*/
-#define	DNAK		0x0008		/* Data Not Acknowledged						*/
-#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
-#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
-#define	SDASEN		0x0040		/* Serial Data Sense							*/
-#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
-#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
-#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
-#define	SERR		0x0004		/* Slave Transfer Error		*/
-#define	SOVF		0x0008		/* Slave Overflow			*/
-#define	MCOMP		0x0010		/* Master Transfer Complete	*/
-#define	MERR		0x0020		/* Master Transfer Error	*/
-#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
-#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
-
-/* TWI_FIFO_CTRL Masks												*/
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
-
-/* TWI_FIFO_STAT Masks															*/
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
-#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
-#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
-#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
-
-#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
-#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
-#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
-#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
-
-
 /* Omit CAN masks from defBF534.h */
 
 /*  *******************  PIN CONTROL REGISTER MASKS  ************************/
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index aab80bb..71578d9 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF525_H
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF527.h b/arch/blackfin/mach-bf527/include/mach/defBF527.h
index 05369a9..aeb8479 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF527.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF527_H
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 03f2b40..3a8f73a 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -6,8 +6,7 @@
  * DO NOT EDIT THIS FILE
  *
  * Copyright 2004-2011 Analog Devices Inc.
- * Licensed under the ADI BSD license.
- *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
+ * Licensed under the Clear BSD license.
  */
 
 /* This file should be up to date with:
diff --git a/arch/blackfin/mach-bf533/include/mach/defBF532.h b/arch/blackfin/mach-bf533/include/mach/defBF532.h
index 2376d53..d438150 100644
--- a/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -3,7 +3,7 @@
  *
  * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF532_H
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 27fd2c3..9408ab5 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -486,6 +486,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -504,6 +506,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 3f3abad..0143d8be 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -451,6 +451,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -469,6 +471,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
index 6f77bf7..8bbf0a2 100644
--- a/arch/blackfin/mach-bf537/boards/dnp5370.c
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -329,6 +329,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -347,6 +349,9 @@
 	.id            = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource      = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index d2d7128..a10f90e 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -386,6 +386,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -404,6 +406,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index f3562b0..c9d9473 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -1790,6 +1790,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -1808,6 +1810,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
@@ -2361,7 +2366,13 @@
 	},
 #endif
 };
-
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) \
+|| defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE)
+unsigned short bfin_sport0_peripherals[] = {
+	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+};
+#endif
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
 #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
 static struct resource bfin_sport0_uart_resources[] = {
@@ -2382,11 +2393,6 @@
 	},
 };
 
-static unsigned short bfin_sport0_peripherals[] = {
-	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
-};
-
 static struct platform_device bfin_sport0_uart_device = {
 	.name = "bfin-sport-uart",
 	.id = 0,
@@ -2432,7 +2438,49 @@
 };
 #endif
 #endif
-
+#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE)
+static struct resource bfin_sport0_resources[] = {
+	{
+		.start = SPORT0_TCR1,
+		.end = SPORT0_MRCS3+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_SPORT0_RX,
+		.end = IRQ_SPORT0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_SPORT0_TX,
+		.end = IRQ_SPORT0_TX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_SPORT0_ERROR,
+		.end = IRQ_SPORT0_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_SPORT0_TX,
+		.end = CH_SPORT0_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_SPORT0_RX,
+		.end = CH_SPORT0_RX,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sport0_device = {
+	.name = "bfin_sport_raw",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sport0_resources),
+	.resource = bfin_sport0_resources,
+	.dev = {
+		.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
+	},
+};
+#endif
 #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
 #define CF_IDE_NAND_CARD_USE_HDD_INTERFACE
 /* #define CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE */
@@ -2754,7 +2802,9 @@
 static struct platform_device *stamp_devices[] __initdata = {
 
 	&bfin_dpmc,
-
+#if defined(CONFIG_BFIN_SPORT) || defined(CONFIG_BFIN_SPORT_MODULE)
+	&bfin_sport0_device,
+#endif
 #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
 	&bfin_pcmcia_cf_device,
 #endif
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 3fb4218..e285c36 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -453,6 +453,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -471,6 +473,9 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 #endif
 
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index 543cd3f..df92126 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -6,8 +6,7 @@
  * DO NOT EDIT THIS FILE
  *
  * Copyright 2004-2011 Analog Devices Inc.
- * Licensed under the ADI BSD license.
- *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
+ * Licensed under the Clear BSD license.
  */
 
 /* This file should be up to date with:
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index 4a031dd..ef6a98c 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF534_H
@@ -1403,75 +1403,6 @@
 #define ERR_DET			0x4000	/* Error Detected Indicator                     */
 #define ERR_NCOR		0x8000	/* Error Not Corrected Indicator        */
 
-/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
-#define	CLKLOW(x)	((x) & 0xFF)	/* Periods Clock Is Held Low                    */
-#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low                 */
-
-/* TWI_PRESCALE Masks															*/
-#define	PRESCALE	0x007F	/* SCLKs Per Internal Time Reference (10MHz)    */
-#define	TWI_ENA		0x0080	/* TWI Enable                                                                   */
-#define	SCCB		0x0200	/* SCCB Compatibility Enable                                    */
-
-/* TWI_SLAVE_CTL Masks															*/
-#define	SEN			0x0001	/* Slave Enable                                                                 */
-#define	SADD_LEN	0x0002	/* Slave Address Length                                                 */
-#define	STDVAL		0x0004	/* Slave Transmit Data Valid                                    */
-#define	NAK			0x0008	/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010	/* General Call Address Matching Enabled                */
-
-/* TWI_SLAVE_STAT Masks															*/
-#define	SDIR		0x0001	/* Slave Transfer Direction (Transmit/Receive*) */
-#define GCALL		0x0002	/* General Call Indicator                                               */
-
-/* TWI_MASTER_CTL Masks													*/
-#define	MEN			0x0001	/* Master Mode Enable                                           */
-#define	MADD_LEN	0x0002	/* Master Address Length                                        */
-#define	MDIR		0x0004	/* Master Transmit Direction (RX/TX*)           */
-#define	FAST		0x0008	/* Use Fast Mode Timing Specs                           */
-#define	STOP		0x0010	/* Issue Stop Condition                                         */
-#define	RSTART		0x0020	/* Repeat Start or Stop* At End Of Transfer     */
-#define	DCNT		0x3FC0	/* Data Bytes To Transfer                                       */
-#define	SDAOVR		0x4000	/* Serial Data Override                                         */
-#define	SCLOVR		0x8000	/* Serial Clock Override                                        */
-
-/* TWI_MASTER_STAT Masks														*/
-#define	MPROG		0x0001	/* Master Transfer In Progress                                  */
-#define	LOSTARB		0x0002	/* Lost Arbitration Indicator (Xfer Aborted)    */
-#define	ANAK		0x0004	/* Address Not Acknowledged                                             */
-#define	DNAK		0x0008	/* Data Not Acknowledged                                                */
-#define	BUFRDERR	0x0010	/* Buffer Read Error                                                    */
-#define	BUFWRERR	0x0020	/* Buffer Write Error                                                   */
-#define	SDASEN		0x0040	/* Serial Data Sense                                                    */
-#define	SCLSEN		0x0080	/* Serial Clock Sense                                                   */
-#define	BUSBUSY		0x0100	/* Bus Busy Indicator                                                   */
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
-#define	SINIT		0x0001	/* Slave Transfer Initiated     */
-#define	SCOMP		0x0002	/* Slave Transfer Complete      */
-#define	SERR		0x0004	/* Slave Transfer Error         */
-#define	SOVF		0x0008	/* Slave Overflow                       */
-#define	MCOMP		0x0010	/* Master Transfer Complete     */
-#define	MERR		0x0020	/* Master Transfer Error        */
-#define	XMTSERV		0x0040	/* Transmit FIFO Service        */
-#define	RCVSERV		0x0080	/* Receive FIFO Service         */
-
-/* TWI_FIFO_CTRL Masks												*/
-#define	XMTFLUSH	0x0001	/* Transmit Buffer Flush                        */
-#define	RCVFLUSH	0x0002	/* Receive Buffer Flush                         */
-#define	XMTINTLEN	0x0004	/* Transmit Buffer Interrupt Length     */
-#define	RCVINTLEN	0x0008	/* Receive Buffer Interrupt Length      */
-
-/* TWI_FIFO_STAT Masks															*/
-#define	XMTSTAT		0x0003	/* Transmit FIFO Status                                                 */
-#define	XMT_EMPTY	0x0000	/*              Transmit FIFO Empty                                             */
-#define	XMT_HALF	0x0001	/*              Transmit FIFO Has 1 Byte To Write               */
-#define	XMT_FULL	0x0003	/*              Transmit FIFO Full (2 Bytes To Write)   */
-
-#define	RCVSTAT		0x000C	/* Receive FIFO Status                                                  */
-#define	RCV_EMPTY	0x0000	/*              Receive FIFO Empty                                              */
-#define	RCV_HALF	0x0004	/*              Receive FIFO Has 1 Byte To Read                 */
-#define	RCV_FULL	0x000C	/*              Receive FIFO Full (2 Bytes To Read)             */
 
 /*  *******************  PIN CONTROL REGISTER MASKS  ************************/
 /* PORT_MUX Masks															*/
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF537.h b/arch/blackfin/mach-bf537/include/mach/defBF537.h
index 3d471d7..e10332c 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF537.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF537_H
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index 1633a6f..a4fce03 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -38,7 +38,7 @@
 	.name = "rtc-bfin",
 	.id   = -1,
 };
-#endif
+#endif	/* CONFIG_RTC_DRV_BFIN */
 
 #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
 #ifdef CONFIG_SERIAL_BFIN_UART0
@@ -100,7 +100,7 @@
 		.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
 	},
 };
-#endif
+#endif	/* CONFIG_SERIAL_BFIN_UART0 */
 #ifdef CONFIG_SERIAL_BFIN_UART1
 static struct resource bfin_uart1_resources[] = {
 	{
@@ -148,7 +148,7 @@
 		.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
 	},
 };
-#endif
+#endif	/* CONFIG_SERIAL_BFIN_UART1 */
 #ifdef CONFIG_SERIAL_BFIN_UART2
 static struct resource bfin_uart2_resources[] = {
 	{
@@ -196,8 +196,8 @@
 		.platform_data = &bfin_uart2_peripherals, /* Passed to driver */
 	},
 };
-#endif
-#endif
+#endif	/* CONFIG_SERIAL_BFIN_UART2 */
+#endif	/* CONFIG_SERIAL_BFIN */
 
 #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
 #ifdef CONFIG_BFIN_SIR0
@@ -224,7 +224,7 @@
 	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
 	.resource = bfin_sir0_resources,
 };
-#endif
+#endif	/* CONFIG_BFIN_SIR0 */
 #ifdef CONFIG_BFIN_SIR1
 static struct resource bfin_sir1_resources[] = {
 	{
@@ -249,7 +249,7 @@
 	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
 	.resource = bfin_sir1_resources,
 };
-#endif
+#endif	/* CONFIG_BFIN_SIR1 */
 #ifdef CONFIG_BFIN_SIR2
 static struct resource bfin_sir2_resources[] = {
 	{
@@ -274,8 +274,8 @@
 	.num_resources = ARRAY_SIZE(bfin_sir2_resources),
 	.resource = bfin_sir2_resources,
 };
-#endif
-#endif
+#endif	/* CONFIG_BFIN_SIR2 */
+#endif	/* CONFIG_BFIN_SIR */
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
 #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
@@ -311,7 +311,7 @@
 		.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
 	},
 };
-#endif
+#endif	/* CONFIG_SERIAL_BFIN_SPORT0_UART */
 #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
 static struct resource bfin_sport1_uart_resources[] = {
 	{
@@ -345,7 +345,7 @@
 		.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
 	},
 };
-#endif
+#endif	/* CONFIG_SERIAL_BFIN_SPORT1_UART */
 #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
 static struct resource bfin_sport2_uart_resources[] = {
 	{
@@ -379,7 +379,7 @@
 		.platform_data = &bfin_sport2_peripherals, /* Passed to driver */
 	},
 };
-#endif
+#endif	/* CONFIG_SERIAL_BFIN_SPORT2_UART */
 #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
 static struct resource bfin_sport3_uart_resources[] = {
 	{
@@ -413,8 +413,8 @@
 		.platform_data = &bfin_sport3_peripherals, /* Passed to driver */
 	},
 };
-#endif
-#endif
+#endif	/* CONFIG_SERIAL_BFIN_SPORT3_UART */
+#endif	/* CONFIG_SERIAL_BFIN_SPORT */
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
 static unsigned short bfin_can_peripherals[] = {
@@ -452,7 +452,7 @@
 		.platform_data = &bfin_can_peripherals, /* Passed to driver */
 	},
 };
-#endif
+#endif	/* CONFIG_CAN_BFIN */
 
 /*
  *  USB-LAN EzExtender board
@@ -488,7 +488,7 @@
 		.platform_data	= &smc91x_info,
 	},
 };
-#endif
+#endif	/* CONFIG_SMC91X */
 
 #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
 /* all SPI peripherals info goes here */
@@ -518,7 +518,8 @@
 static struct bfin5xx_spi_chip spi_flash_chip_info = {
 	.enable_dma = 0,         /* use dma transfer with this chip*/
 };
-#endif
+#endif	/* CONFIG_MTD_M25P80 */
+#endif	/* CONFIG_SPI_BFIN5XX */
 
 #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
 #include <linux/spi/ad7879.h>
@@ -535,7 +536,7 @@
 	.gpio_export		= 1,	/* Export GPIO to gpiolib */
 	.gpio_base		= -1,	/* Dynamic allocation */
 };
-#endif
+#endif	/* CONFIG_TOUCHSCREEN_AD7879 */
 
 #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
 #include <asm/bfin-lq035q1.h>
@@ -564,7 +565,7 @@
 		.platform_data = &bfin_lq035q1_data,
 	},
 };
-#endif
+#endif	/* CONFIG_FB_BFIN_LQ035Q1 */
 
 static struct spi_board_info bf538_spi_board_info[] __initdata = {
 #if defined(CONFIG_MTD_M25P80) \
@@ -579,7 +580,7 @@
 		.controller_data = &spi_flash_chip_info,
 		.mode = SPI_MODE_3,
 	},
-#endif
+#endif	/* CONFIG_MTD_M25P80 */
 #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
 	{
 		.modalias = "ad7879",
@@ -590,7 +591,7 @@
 		.chip_select = 1,
 		.mode = SPI_CPHA | SPI_CPOL,
 	},
-#endif
+#endif	/* CONFIG_TOUCHSCREEN_AD7879_SPI */
 #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
 	{
 		.modalias = "bfin-lq035q1-spi",
@@ -599,7 +600,7 @@
 		.chip_select = 2,
 		.mode = SPI_CPHA | SPI_CPOL,
 	},
-#endif
+#endif	/* CONFIG_FB_BFIN_LQ035Q1 */
 #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
 	{
 		.modalias = "spidev",
@@ -607,7 +608,7 @@
 		.bus_num = 0,
 		.chip_select = 1,
 	},
-#endif
+#endif	/* CONFIG_SPI_SPIDEV */
 };
 
 /* SPI (0) */
@@ -716,9 +717,9 @@
 		},
 };
 
-#endif  /* spi master and devices */
-
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -737,9 +738,13 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 
-#if !defined(CONFIG_BF542)	/* The BF542 only has 1 TWI */
+static const u16 bfin_twi1_pins[] = {P_TWI1_SCL, P_TWI1_SDA, 0};
+
 static struct resource bfin_twi1_resource[] = {
 	[0] = {
 		.start = TWI1_REGBASE,
@@ -759,8 +764,8 @@
 	.num_resources = ARRAY_SIZE(bfin_twi1_resource),
 	.resource = bfin_twi1_resource,
 };
-#endif
-#endif
+#endif	/* CONFIG_BF542 */
+#endif	/* CONFIG_I2C_BLACKFIN_TWI */
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
 #include <linux/gpio_keys.h>
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index b6ca997..318d922 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -6,8 +6,7 @@
  * DO NOT EDIT THIS FILE
  *
  * Copyright 2004-2011 Analog Devices Inc.
- * Licensed under the ADI BSD license.
- *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
+ * Licensed under the Clear BSD license.
  */
 
 /* This file should be up to date with:
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF538.h b/arch/blackfin/mach-bf538/include/mach/defBF538.h
index d27f81d..876a770 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF538.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF538.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF538_H
@@ -1746,80 +1746,4 @@
 #define	SDEASE			0x00000010 /* SDRAM EAB	sticky error status - W1C */
 #define	BGSTAT			0x00000020 /* Bus granted */
 
-
-/*  ********************  TWO-WIRE INTERFACE (TWIx) MASKS  ***********************/
-/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y);	 ) */
-#ifdef _MISRA_RULES
-#define	CLKLOW(x)	((x) & 0xFFu)		/* Periods Clock Is Held Low */
-#define	CLKHI(y)	(((y)&0xFFu)<<0x8)	/* Periods Before New Clock Low */
-#else
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low */
-#define	CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low */
-#endif /* _MISRA_RULES */
-
-/* TWIx_PRESCALE Masks								 */
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz) */
-#define	TWI_ENA		0x0080		/* TWI Enable		 */
-#define	SCCB		0x0200		/* SCCB	Compatibility Enable */
-
-/* TWIx_SLAVE_CTRL Masks								 */
-#define	SEN			0x0001		/* Slave Enable		 */
-#define	SADD_LEN	0x0002		/* Slave Address Length */
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid */
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call	Adrress	Matching Enabled */
-
-/* TWIx_SLAVE_STAT Masks								 */
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*) */
-#define	GCALL		0x0002		/* General Call	Indicator */
-
-/* TWIx_MASTER_CTRL Masks						 */
-#define	MEN			0x0001		/* Master Mode Enable */
-#define	MADD_LEN	0x0002		/* Master Address Length */
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*) */
-#define	FAST		0x0008		/* Use Fast Mode Timing	Specs */
-#define	STOP		0x0010		/* Issue Stop Condition */
-#define	RSTART		0x0020		/* Repeat Start	or Stop* At End	Of Transfer */
-#define	DCNT		0x3FC0		/* Data	Bytes To Transfer */
-#define	SDAOVR		0x4000		/* Serial Data Override */
-#define	SCLOVR		0x8000		/* Serial Clock	Override */
-
-/* TWIx_MASTER_STAT Masks							 */
-#define	MPROG		0x0001		/* Master Transfer In Progress */
-#define	LOSTARB		0x0002		/* Lost	Arbitration Indicator (Xfer Aborted) */
-#define	ANAK		0x0004		/* Address Not Acknowledged */
-#define	DNAK		0x0008		/* Data	Not Acknowledged */
-#define	BUFRDERR	0x0010		/* Buffer Read Error */
-#define	BUFWRERR	0x0020		/* Buffer Write	Error */
-#define	SDASEN		0x0040		/* Serial Data Sense */
-#define	SCLSEN		0x0080		/* Serial Clock	Sense */
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator */
-
-/* TWIx_INT_SRC	and TWIx_INT_ENABLE Masks */
-#define	SINIT		0x0001		/* Slave Transfer Initiated */
-#define	SCOMP		0x0002		/* Slave Transfer Complete */
-#define	SERR		0x0004		/* Slave Transfer Error */
-#define	SOVF		0x0008		/* Slave Overflow */
-#define	MCOMP		0x0010		/* Master Transfer Complete */
-#define	MERR		0x0020		/* Master Transfer Error */
-#define	XMTSERV		0x0040		/* Transmit FIFO Service */
-#define	RCVSERV		0x0080		/* Receive FIFO	Service */
-
-/* TWIx_FIFO_CTL Masks					 */
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush */
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush */
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length */
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length */
-
-/* TWIx_FIFO_STAT Masks								 */
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status */
-#define	XMT_EMPTY	0x0000		/*		Transmit FIFO Empty */
-#define	XMT_HALF	0x0001		/*		Transmit FIFO Has 1 Byte To Write */
-#define	XMT_FULL	0x0003		/*		Transmit FIFO Full (2 Bytes To Write) */
-
-#define	RCVSTAT		0x000C		/* Receive FIFO	Status */
-#define	RCV_EMPTY	0x0000		/*		Receive	FIFO Empty */
-#define	RCV_HALF	0x0004		/*		Receive	FIFO Has 1 Byte	To Read */
-#define	RCV_FULL	0x000C		/*		Receive	FIFO Full (2 Bytes To Read) */
-
 #endif
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index 8100bcd..199e871 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF539_H
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 68af594..e925433 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -1007,6 +1007,8 @@
 #endif  /* spi master and devices */
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -1025,9 +1027,14 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 
 #if !defined(CONFIG_BF542)	/* The BF542 only has 1 TWI */
+static const u16 bfin_twi1_pins[] = {P_TWI1_SCL, P_TWI1_SDA, 0};
+
 static struct resource bfin_twi1_resource[] = {
 	[0] = {
 		.start = TWI1_REGBASE,
@@ -1046,6 +1053,9 @@
 	.id = 1,
 	.num_resources = ARRAY_SIZE(bfin_twi1_resource),
 	.resource = bfin_twi1_resource,
+	.dev = {
+		.platform_data = &bfin_twi1_pins,
+	},
 };
 #endif
 #endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 4cadaf8..3bd75ba 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -165,6 +165,7 @@
 	.rotary_button_key = KEY_ENTER,
 	.debounce	   = 10,	/* 0..17 */
 	.mode		   = ROT_QUAD_ENC | ROT_DEBE,
+	.pm_wakeup	   = 1,
 };
 
 static struct resource bfin_rotary_resources[] = {
@@ -1251,6 +1252,8 @@
 #endif
 
 #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
 static struct resource bfin_twi0_resource[] = {
 	[0] = {
 		.start = TWI0_REGBASE,
@@ -1269,9 +1272,14 @@
 	.id = 0,
 	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
 	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
 };
 
 #if !defined(CONFIG_BF542)	/* The BF542 only has 1 TWI */
+static const u16 bfin_twi1_pins[] = {P_TWI1_SCL, P_TWI1_SDA, 0};
+
 static struct resource bfin_twi1_resource[] = {
 	[0] = {
 		.start = TWI1_REGBASE,
@@ -1290,6 +1298,9 @@
 	.id = 1,
 	.num_resources = ARRAY_SIZE(bfin_twi1_resource),
 	.resource = bfin_twi1_resource,
+	.dev = {
+		.platform_data = &bfin_twi1_pins,
+	},
 };
 #endif
 #endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index ac96ee8..5b711d8 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -6,8 +6,7 @@
  * DO NOT EDIT THIS FILE
  *
  * Copyright 2004-2011 Analog Devices Inc.
- * Licensed under the ADI BSD license.
- *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
+ * Licensed under the Clear BSD license.
  */
 
 /* This file should be up to date with:
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF542.h b/arch/blackfin/mach-bf548/include/mach/defBF542.h
index 629bf21..5116157 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF542.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF542_H
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF544.h b/arch/blackfin/mach-bf548/include/mach/defBF544.h
index bcccab3..329b2c5 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF544.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF544_H
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index 1fa41ec..e18de21 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF547_H
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF548.h b/arch/blackfin/mach-bf548/include/mach/defBF548.h
index 3c7f1b6..27f2948 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF548.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF548_H
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF549.h b/arch/blackfin/mach-bf548/include/mach/defBF549.h
index 9a45cb6..ac569fc 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF549.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF549_H
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 0867c2b..8f6e192 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF54X_H
@@ -2062,115 +2062,6 @@
 #define                  LOW_EVEN  0xff0000   /* Lower Limit for Even Bytes (Luma) */
 #define                 HIGH_EVEN  0xff000000 /* Upper Limit for Even Bytes (Luma) */
 
-/* ************************************************ */
-/* The TWI bit masks fields are from the ADSP-BF538 */
-/* and they have not been verified as the final     */
-/* ones for the Moab processors ... bz 1/19/2007    */
-/* ************************************************ */
-
-/* Bit masks for TWIx_CONTROL */
-
-#define                  PRESCALE  0x7f       /* Prescale Value */
-#define                   TWI_ENA  0x80       /* TWI Enable */
-#define                      SCCB  0x200      /* Serial Camera Control Bus */
-
-/* Bit maskes for TWIx_CLKDIV */
-
-#define                    CLKLOW  0xff       /* Clock Low */
-#define                     CLKHI  0xff00     /* Clock High */
-
-/* Bit maskes for TWIx_SLAVE_CTL */
-
-#define                       SEN  0x1        /* Slave Enable */
-#define                    STDVAL  0x4        /* Slave Transmit Data Valid */
-#define                       NAK  0x8        /* Not Acknowledge */
-#define                       GEN  0x10       /* General Call Enable */
-
-/* Bit maskes for TWIx_SLAVE_ADDR */
-
-#define                     SADDR  0x7f       /* Slave Mode Address */
-
-/* Bit maskes for TWIx_SLAVE_STAT */
-
-#define                      SDIR  0x1        /* Slave Transfer Direction */
-#define                     GCALL  0x2        /* General Call */
-
-/* Bit maskes for TWIx_MASTER_CTL */
-
-#define                       MEN  0x1        /* Master Mode Enable */
-#define                      MDIR  0x4        /* Master Transfer Direction */
-#define                      FAST  0x8        /* Fast Mode */
-#define                      STOP  0x10       /* Issue Stop Condition */
-#define                    RSTART  0x20       /* Repeat Start */
-#define                      DCNT  0x3fc0     /* Data Transfer Count */
-#define                    SDAOVR  0x4000     /* Serial Data Override */
-#define                    SCLOVR  0x8000     /* Serial Clock Override */
-
-/* Bit maskes for TWIx_MASTER_ADDR */
-
-#define                     MADDR  0x7f       /* Master Mode Address */
-
-/* Bit maskes for TWIx_MASTER_STAT */
-
-#define                     MPROG  0x1        /* Master Transfer in Progress */
-#define                   LOSTARB  0x2        /* Lost Arbitration */
-#define                      ANAK  0x4        /* Address Not Acknowledged */
-#define                      DNAK  0x8        /* Data Not Acknowledged */
-#define                  BUFRDERR  0x10       /* Buffer Read Error */
-#define                  BUFWRERR  0x20       /* Buffer Write Error */
-#define                    SDASEN  0x40       /* Serial Data Sense */
-#define                    SCLSEN  0x80       /* Serial Clock Sense */
-#define                   BUSBUSY  0x100      /* Bus Busy */
-
-/* Bit maskes for TWIx_FIFO_CTL */
-
-#define                  XMTFLUSH  0x1        /* Transmit Buffer Flush */
-#define                  RCVFLUSH  0x2        /* Receive Buffer Flush */
-#define                 XMTINTLEN  0x4        /* Transmit Buffer Interrupt Length */
-#define                 RCVINTLEN  0x8        /* Receive Buffer Interrupt Length */
-
-/* Bit maskes for TWIx_FIFO_STAT */
-
-#define                   XMTSTAT  0x3        /* Transmit FIFO Status */
-#define                   RCVSTAT  0xc        /* Receive FIFO Status */
-
-/* Bit maskes for TWIx_INT_MASK */
-
-#define                    SINITM  0x1        /* Slave Transfer Initiated Interrupt Mask */
-#define                    SCOMPM  0x2        /* Slave Transfer Complete Interrupt Mask */
-#define                     SERRM  0x4        /* Slave Transfer Error Interrupt Mask */
-#define                     SOVFM  0x8        /* Slave Overflow Interrupt Mask */
-#define                    MCOMPM  0x10       /* Master Transfer Complete Interrupt Mask */
-#define                     MERRM  0x20       /* Master Transfer Error Interrupt Mask */
-#define                  XMTSERVM  0x40       /* Transmit FIFO Service Interrupt Mask */
-#define                  RCVSERVM  0x80       /* Receive FIFO Service Interrupt Mask */
-
-/* Bit maskes for TWIx_INT_STAT */
-
-#define                     SINIT  0x1        /* Slave Transfer Initiated */
-#define                     SCOMP  0x2        /* Slave Transfer Complete */
-#define                      SERR  0x4        /* Slave Transfer Error */
-#define                      SOVF  0x8        /* Slave Overflow */
-#define                     MCOMP  0x10       /* Master Transfer Complete */
-#define                      MERR  0x20       /* Master Transfer Error */
-#define                   XMTSERV  0x40       /* Transmit FIFO Service */
-#define                   RCVSERV  0x80       /* Receive FIFO Service */
-
-/* Bit maskes for TWIx_XMT_DATA8 */
-
-#define                  XMTDATA8  0xff       /* Transmit FIFO 8-Bit Data */
-
-/* Bit maskes for TWIx_XMT_DATA16 */
-
-#define                 XMTDATA16  0xffff     /* Transmit FIFO 16-Bit Data */
-
-/* Bit maskes for TWIx_RCV_DATA8 */
-
-#define                  RCVDATA8  0xff       /* Receive FIFO 8-Bit Data */
-
-/* Bit maskes for TWIx_RCV_DATA16 */
-
-#define                 RCVDATA16  0xffff     /* Receive FIFO 16-Bit Data */
 
 /* ******************************************* */
 /*     MULTI BIT MACRO ENUMERATIONS            */
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 836baee..72476ff 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -6,8 +6,7 @@
  * DO NOT EDIT THIS FILE
  *
  * Copyright 2004-2011 Analog Devices Inc.
- * Licensed under the ADI BSD license.
- *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
+ * Licensed under the Clear BSD license.
  */
 
 /* This file should be up to date with:
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 5f0ac5a..9f21f76 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -1,7 +1,7 @@
 /*
  * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF561_H
diff --git a/arch/blackfin/mach-bf609/Kconfig b/arch/blackfin/mach-bf609/Kconfig
new file mode 100644
index 0000000..2cb7272
--- /dev/null
+++ b/arch/blackfin/mach-bf609/Kconfig
@@ -0,0 +1,56 @@
+config BF60x
+	def_bool y
+	depends on (BF609)
+	select IRQ_PREFLOW_FASTEOI
+
+if (BF60x)
+
+source "arch/blackfin/mach-bf609/boards/Kconfig"
+
+menu "BF609 Specific Configuration"
+
+comment "Pin Interrupt to Port Assignment"
+menu "Assignment"
+
+config PINTx_REASSIGN
+	bool "Reprogram PINT Assignment"
+	default y
+	help
+	  The interrupt assignment registers controls the pin-to-interrupt
+	  assignment in a byte-wide manner. Each option allows you to select
+	  a set of pins (High/Low Byte) of an specific Port being mapped
+	  to one of the four PIN Interrupts IRQ_PINTx.
+
+	  You shouldn't change any of these unless you know exactly what you're doing.
+	  Please consult the Blackfin BF60x Processor Hardware Reference Manual.
+
+config PINT0_ASSIGN
+	hex "PINT0_ASSIGN"
+	depends on PINTx_REASSIGN
+	default 0x00000101
+config PINT1_ASSIGN
+	hex "PINT1_ASSIGN"
+	depends on PINTx_REASSIGN
+	default 0x00000101
+config PINT2_ASSIGN
+	hex "PINT2_ASSIGN"
+	depends on PINTx_REASSIGN
+	default 0x00000101
+config PINT3_ASSIGN
+	hex "PINT3_ASSIGN"
+	depends on PINTx_REASSIGN
+	default 0x00000101
+config PINT4_ASSIGN
+	hex "PINT3_ASSIGN"
+	depends on PINTx_REASSIGN
+	default 0x00000101
+config PINT5_ASSIGN
+	hex "PINT3_ASSIGN"
+	depends on PINTx_REASSIGN
+	default 0x00000101
+
+endmenu
+
+endmenu
+
+endif
diff --git a/arch/blackfin/mach-bf609/Makefile b/arch/blackfin/mach-bf609/Makefile
new file mode 100644
index 0000000..2a27f81
--- /dev/null
+++ b/arch/blackfin/mach-bf609/Makefile
@@ -0,0 +1,6 @@
+#
+# arch/blackfin/mach-bf609/Makefile
+#
+
+obj-y := dma.o clock.o
+obj-$(CONFIG_PM) += pm.o hibernate.o
diff --git a/arch/blackfin/mach-bf609/boards/Kconfig b/arch/blackfin/mach-bf609/boards/Kconfig
new file mode 100644
index 0000000..30e8b6b
--- /dev/null
+++ b/arch/blackfin/mach-bf609/boards/Kconfig
@@ -0,0 +1,12 @@
+choice
+	prompt "System type"
+	default BFIN609_EZKIT
+	help
+	  Select your board!
+
+config BFIN609_EZKIT
+	bool "BF609-EZKIT"
+	help
+	  BFIN609-EZKIT board support.
+	  
+endchoice
diff --git a/arch/blackfin/mach-bf609/boards/Makefile b/arch/blackfin/mach-bf609/boards/Makefile
new file mode 100644
index 0000000..11f98b0
--- /dev/null
+++ b/arch/blackfin/mach-bf609/boards/Makefile
@@ -0,0 +1,5 @@
+#
+# arch/blackfin/mach-bf609/boards/Makefile
+#
+
+obj-$(CONFIG_BFIN609_EZKIT)            += ezkit.o
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
new file mode 100644
index 0000000..ac64f47
--- /dev/null
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -0,0 +1,1340 @@
+/*
+ * Copyright 2004-2009 Analog Devices Inc.
+ *                2005 National ICT Australia (NICTA)
+ *                      Aidan Williams <aidan@nicta.com.au>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/usb/musb.h>
+#include <asm/bfin6xx_spi.h>
+#include <asm/dma.h>
+#include <asm/gpio.h>
+#include <asm/nand.h>
+#include <asm/dpmc.h>
+#include <asm/portmux.h>
+#include <asm/bfin_sdh.h>
+#include <linux/input.h>
+#include <linux/spi/ad7877.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "ADI BF609-EZKIT";
+
+/*
+ *  Driver needs to know address, irq and flag pin.
+ */
+
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+#include <linux/usb/isp1760.h>
+static struct resource bfin_isp1760_resources[] = {
+	[0] = {
+		.start  = 0x2C0C0000,
+		.end    = 0x2C0C0000 + 0xfffff,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = IRQ_PG7,
+		.end    = IRQ_PG7,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct isp1760_platform_data isp1760_priv = {
+	.is_isp1761 = 0,
+	.bus_width_16 = 1,
+	.port1_otg = 0,
+	.analog_oc = 0,
+	.dack_polarity_high = 0,
+	.dreq_polarity_high = 0,
+};
+
+static struct platform_device bfin_isp1760_device = {
+	.name           = "isp1760",
+	.id             = 0,
+	.dev = {
+		.platform_data = &isp1760_priv,
+	},
+	.num_resources  = ARRAY_SIZE(bfin_isp1760_resources),
+	.resource       = bfin_isp1760_resources,
+};
+#endif
+
+#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
+#include <asm/bfin_rotary.h>
+
+static struct bfin_rotary_platform_data bfin_rotary_data = {
+	/*.rotary_up_key     = KEY_UP,*/
+	/*.rotary_down_key   = KEY_DOWN,*/
+	.rotary_rel_code   = REL_WHEEL,
+	.rotary_button_key = KEY_ENTER,
+	.debounce	   = 10,	/* 0..17 */
+	.mode		   = ROT_QUAD_ENC | ROT_DEBE,
+};
+
+static struct resource bfin_rotary_resources[] = {
+	{
+		.start = IRQ_CNT,
+		.end = IRQ_CNT,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_rotary_device = {
+	.name		= "bfin-rotary",
+	.id		= -1,
+	.num_resources 	= ARRAY_SIZE(bfin_rotary_resources),
+	.resource 	= bfin_rotary_resources,
+	.dev		= {
+		.platform_data = &bfin_rotary_data,
+	},
+};
+#endif
+
+#if defined(CONFIG_STMMAC_ETH) || defined(CONFIG_STMMAC_ETH_MODULE)
+#include <linux/stmmac.h>
+
+static unsigned short pins[] = P_RMII0;
+
+static struct stmmac_mdio_bus_data phy_private_data = {
+	.bus_id = 0,
+	.phy_mask = 1,
+};
+
+static struct plat_stmmacenet_data eth_private_data = {
+	.bus_id   = 0,
+	.enh_desc = 1,
+	.phy_addr = 1,
+	.mdio_bus_data = &phy_private_data,
+};
+
+static struct platform_device bfin_eth_device = {
+	.name           = "stmmaceth",
+	.id             = 0,
+	.num_resources  = 2,
+	.resource       = (struct resource[]) {
+		{
+			.start  = EMAC0_MACCFG,
+			.end    = EMAC0_MACCFG + 0x1274,
+			.flags  = IORESOURCE_MEM,
+		},
+		{
+			.name   = "macirq",
+			.start  = IRQ_EMAC0_STAT,
+			.end    = IRQ_EMAC0_STAT,
+			.flags  = IORESOURCE_IRQ,
+		},
+	},
+	.dev = {
+		.power.can_wakeup = 1,
+		.platform_data = &eth_private_data,
+	}
+};
+#endif
+
+#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE)
+#include <linux/input/adxl34x.h>
+static const struct adxl34x_platform_data adxl34x_info = {
+	.x_axis_offset = 0,
+	.y_axis_offset = 0,
+	.z_axis_offset = 0,
+	.tap_threshold = 0x31,
+	.tap_duration = 0x10,
+	.tap_latency = 0x60,
+	.tap_window = 0xF0,
+	.tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN,
+	.act_axis_control = 0xFF,
+	.activity_threshold = 5,
+	.inactivity_threshold = 3,
+	.inactivity_time = 4,
+	.free_fall_threshold = 0x7,
+	.free_fall_time = 0x20,
+	.data_rate = 0x8,
+	.data_range = ADXL_FULL_RES,
+
+	.ev_type = EV_ABS,
+	.ev_code_x = ABS_X,		/* EV_REL */
+	.ev_code_y = ABS_Y,		/* EV_REL */
+	.ev_code_z = ABS_Z,		/* EV_REL */
+
+	.ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY x,y,z */
+
+/*	.ev_code_ff = KEY_F,*/		/* EV_KEY */
+/*	.ev_code_act_inactivity = KEY_A,*/	/* EV_KEY */
+	.power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
+	.fifo_mode = ADXL_FIFO_STREAM,
+	.orientation_enable = ADXL_EN_ORIENTATION_3D,
+	.deadzone_angle = ADXL_DEADZONE_ANGLE_10p8,
+	.divisor_length = ADXL_LP_FILTER_DIVISOR_16,
+	/* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */
+	.ev_codes_orient_3d = {BTN_Z, BTN_Y, BTN_X, BTN_A, BTN_B, BTN_C},
+};
+#endif
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+static struct platform_device rtc_device = {
+	.name = "rtc-bfin",
+	.id   = -1,
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+static struct resource bfin_uart0_resources[] = {
+	{
+		.start = UART0_REVID,
+		.end = UART0_RXDIV+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_TX,
+		.end = IRQ_UART0_TX,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_UART0_STAT,
+		.end = IRQ_UART0_STAT,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_TX,
+		.end = CH_UART0_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX,
+		.flags = IORESOURCE_DMA,
+	},
+#ifdef CONFIG_BFIN_UART0_CTSRTS
+	{	/* CTS pin -- 0 means not supported */
+		.start = GPIO_PD10,
+		.end = GPIO_PD10,
+		.flags = IORESOURCE_IO,
+	},
+	{	/* RTS pin -- 0 means not supported */
+		.start = GPIO_PD9,
+		.end = GPIO_PD9,
+		.flags = IORESOURCE_IO,
+	},
+#endif
+};
+
+static unsigned short bfin_uart0_peripherals[] = {
+	P_UART0_TX, P_UART0_RX,
+#ifdef CONFIG_BFIN_UART0_CTSRTS
+	P_UART0_RTS, P_UART0_CTS,
+#endif
+	0
+};
+
+static struct platform_device bfin_uart0_device = {
+	.name = "bfin-uart",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_uart0_resources),
+	.resource = bfin_uart0_resources,
+	.dev = {
+		.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
+	},
+};
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+static struct resource bfin_uart1_resources[] = {
+	{
+		.start = UART1_REVID,
+		.end = UART1_RXDIV+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART1_TX,
+		.end = IRQ_UART1_TX,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_UART1_RX,
+		.end = IRQ_UART1_RX,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_UART1_STAT,
+		.end = IRQ_UART1_STAT,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_TX,
+		.end = CH_UART1_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end = CH_UART1_RX,
+		.flags = IORESOURCE_DMA,
+	},
+#ifdef CONFIG_BFIN_UART1_CTSRTS
+	{	/* CTS pin -- 0 means not supported */
+		.start = GPIO_PG13,
+		.end = GPIO_PG13,
+		.flags = IORESOURCE_IO,
+	},
+	{	/* RTS pin -- 0 means not supported */
+		.start = GPIO_PG10,
+		.end = GPIO_PG10,
+		.flags = IORESOURCE_IO,
+	},
+#endif
+};
+
+static unsigned short bfin_uart1_peripherals[] = {
+	P_UART1_TX, P_UART1_RX,
+#ifdef CONFIG_BFIN_UART1_CTSRTS
+	P_UART1_RTS, P_UART1_CTS,
+#endif
+	0
+};
+
+static struct platform_device bfin_uart1_device = {
+	.name = "bfin-uart",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_uart1_resources),
+	.resource = bfin_uart1_resources,
+	.dev = {
+		.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
+	},
+};
+#endif
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+static struct resource bfin_sir0_resources[] = {
+	{
+		.start = 0xFFC00400,
+		.end = 0xFFC004FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_TX,
+		.end = IRQ_UART0_TX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_TX,
+		.end = CH_UART0_TX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir0_device = {
+	.name = "bfin_sir",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sir0_resources),
+	.resource = bfin_sir0_resources,
+};
+#endif
+#ifdef CONFIG_BFIN_SIR1
+static struct resource bfin_sir1_resources[] = {
+	{
+		.start = 0xFFC02000,
+		.end = 0xFFC020FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART1_TX,
+		.end = IRQ_UART1_TX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_TX,
+		.end = CH_UART1_TX+1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+static struct platform_device bfin_sir1_device = {
+	.name = "bfin_sir",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sir1_resources),
+	.resource = bfin_sir1_resources,
+};
+#endif
+#endif
+
+#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
+static struct resource musb_resources[] = {
+	[0] = {
+		.start	= 0xFFCC1000,
+		.end	= 0xFFCC1398,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {	/* general IRQ */
+		.start	= IRQ_USB_STAT,
+		.end	= IRQ_USB_STAT,
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
+	},
+	[2] = {	/* DMA IRQ */
+		.start	= IRQ_USB_DMA,
+		.end	= IRQ_USB_DMA,
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
+	},
+};
+
+static struct musb_hdrc_config musb_config = {
+	.multipoint	= 1,
+	.dyn_fifo	= 0,
+	.dma		= 1,
+	.num_eps	= 16,
+	.dma_channels	= 8,
+	.clkin          = 48,           /* musb CLKIN in MHZ */
+};
+
+static struct musb_hdrc_platform_data musb_plat = {
+#if defined(CONFIG_USB_MUSB_HDRC) && defined(CONFIG_USB_GADGET_MUSB_HDRC)
+	.mode		= MUSB_OTG,
+#elif defined(CONFIG_USB_MUSB_HDRC)
+	.mode		= MUSB_HOST,
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+	.mode		= MUSB_PERIPHERAL,
+#endif
+	.config		= &musb_config,
+};
+
+static u64 musb_dmamask = ~(u32)0;
+
+static struct platform_device musb_device = {
+	.name		= "musb-blackfin",
+	.id		= 0,
+	.dev = {
+		.dma_mask		= &musb_dmamask,
+		.coherent_dma_mask	= 0xffffffff,
+		.platform_data		= &musb_plat,
+	},
+	.num_resources	= ARRAY_SIZE(musb_resources),
+	.resource	= musb_resources,
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
+static struct resource bfin_sport0_uart_resources[] = {
+	{
+		.start = SPORT0_TCR1,
+		.end = SPORT0_MRCS3+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_SPORT0_RX,
+		.end = IRQ_SPORT0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_SPORT0_ERROR,
+		.end = IRQ_SPORT0_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static unsigned short bfin_sport0_peripherals[] = {
+	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
+};
+
+static struct platform_device bfin_sport0_uart_device = {
+	.name = "bfin-sport-uart",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
+	.resource = bfin_sport0_uart_resources,
+	.dev = {
+		.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
+	},
+};
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
+static struct resource bfin_sport1_uart_resources[] = {
+	{
+		.start = SPORT1_TCR1,
+		.end = SPORT1_MRCS3+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_SPORT1_RX,
+		.end = IRQ_SPORT1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_SPORT1_ERROR,
+		.end = IRQ_SPORT1_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static unsigned short bfin_sport1_peripherals[] = {
+	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
+};
+
+static struct platform_device bfin_sport1_uart_device = {
+	.name = "bfin-sport-uart",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
+	.resource = bfin_sport1_uart_resources,
+	.dev = {
+		.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
+	},
+};
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
+static struct resource bfin_sport2_uart_resources[] = {
+	{
+		.start = SPORT2_TCR1,
+		.end = SPORT2_MRCS3+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_SPORT2_RX,
+		.end = IRQ_SPORT2_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_SPORT2_ERROR,
+		.end = IRQ_SPORT2_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static unsigned short bfin_sport2_peripherals[] = {
+	P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
+	P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
+};
+
+static struct platform_device bfin_sport2_uart_device = {
+	.name = "bfin-sport-uart",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(bfin_sport2_uart_resources),
+	.resource = bfin_sport2_uart_resources,
+	.dev = {
+		.platform_data = &bfin_sport2_peripherals, /* Passed to driver */
+	},
+};
+#endif
+#endif
+
+#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
+
+static unsigned short bfin_can0_peripherals[] = {
+	P_CAN0_RX, P_CAN0_TX, 0
+};
+
+static struct resource bfin_can0_resources[] = {
+	{
+		.start = 0xFFC00A00,
+		.end = 0xFFC00FFF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_CAN0_RX,
+		.end = IRQ_CAN0_RX,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_CAN0_TX,
+		.end = IRQ_CAN0_TX,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_CAN0_STAT,
+		.end = IRQ_CAN0_STAT,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_can0_device = {
+	.name = "bfin_can",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_can0_resources),
+	.resource = bfin_can0_resources,
+	.dev = {
+		.platform_data = &bfin_can0_peripherals, /* Passed to driver */
+	},
+};
+
+#endif
+
+#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
+static struct mtd_partition partition_info[] = {
+	{
+		.name = "bootloader(nand)",
+		.offset = 0,
+		.size = 0x80000,
+	}, {
+		.name = "linux kernel(nand)",
+		.offset = MTDPART_OFS_APPEND,
+		.size = 4 * 1024 * 1024,
+	},
+	{
+		.name = "file system(nand)",
+		.offset = MTDPART_OFS_APPEND,
+		.size = MTDPART_SIZ_FULL,
+	},
+};
+
+static struct bf5xx_nand_platform bfin_nand_platform = {
+	.data_width = NFC_NWIDTH_8,
+	.partitions = partition_info,
+	.nr_partitions = ARRAY_SIZE(partition_info),
+	.rd_dly = 3,
+	.wr_dly = 3,
+};
+
+static struct resource bfin_nand_resources[] = {
+	{
+		.start = 0xFFC03B00,
+		.end = 0xFFC03B4F,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = CH_NFC,
+		.end = CH_NFC,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_nand_device = {
+	.name = "bfin-nand",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_nand_resources),
+	.resource = bfin_nand_resources,
+	.dev = {
+		.platform_data = &bfin_nand_platform,
+	},
+};
+#endif
+
+#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
+
+static struct bfin_sd_host bfin_sdh_data = {
+	.dma_chan = CH_RSI,
+	.irq_int0 = IRQ_RSI_INT0,
+	.pin_req = {P_RSI_DATA0, P_RSI_DATA1, P_RSI_DATA2, P_RSI_DATA3, P_RSI_CMD, P_RSI_CLK, 0},
+};
+
+static struct platform_device bfin_sdh_device = {
+	.name = "bfin-sdh",
+	.id = 0,
+	.dev = {
+		.platform_data = &bfin_sdh_data,
+	},
+};
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static struct mtd_partition ezkit_partitions[] = {
+	{
+		.name       = "bootloader(nor)",
+		.size       = 0x80000,
+		.offset     = 0,
+	}, {
+		.name       = "linux kernel(nor)",
+		.size       = 0x400000,
+		.offset     = MTDPART_OFS_APPEND,
+	}, {
+		.name       = "file system(nor)",
+		.size       = 0x1000000 - 0x80000 - 0x400000,
+		.offset     = MTDPART_OFS_APPEND,
+	},
+};
+
+int bf609_nor_flash_init(struct platform_device *dev)
+{
+#define CONFIG_SMC_GCTL_VAL     0x00000010
+	const unsigned short pins[] = {
+		P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12,
+		P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21,
+		P_A22, P_A23, P_A24, P_A25, P_NORCK, 0,
+	};
+
+	peripheral_request_list(pins, "smc0");
+
+	bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
+	bfin_write32(SMC_B0CTL, 0x01002011);
+	bfin_write32(SMC_B0TIM, 0x08170977);
+	bfin_write32(SMC_B0ETIM, 0x00092231);
+	return 0;
+}
+
+static struct physmap_flash_data ezkit_flash_data = {
+	.width      = 2,
+	.parts      = ezkit_partitions,
+	.init 	    = bf609_nor_flash_init,
+	.nr_parts   = ARRAY_SIZE(ezkit_partitions),
+};
+
+static struct resource ezkit_flash_resource = {
+	.start = 0xb0000000,
+	.end   = 0xb0ffffff,
+	.flags = IORESOURCE_MEM,
+};
+
+static struct platform_device ezkit_flash_device = {
+	.name          = "physmap-flash",
+	.id            = 0,
+	.dev = {
+		.platform_data = &ezkit_flash_data,
+	},
+	.num_resources = 1,
+	.resource      = &ezkit_flash_resource,
+};
+#endif
+
+#if defined(CONFIG_MTD_M25P80) \
+	|| defined(CONFIG_MTD_M25P80_MODULE)
+/* SPI flash chip (w25q32) */
+static struct mtd_partition bfin_spi_flash_partitions[] = {
+	{
+		.name = "bootloader(spi)",
+		.size = 0x00080000,
+		.offset = 0,
+		.mask_flags = MTD_CAP_ROM
+	}, {
+		.name = "linux kernel(spi)",
+		.size = 0x00180000,
+		.offset = MTDPART_OFS_APPEND,
+	}, {
+		.name = "file system(spi)",
+		.size = MTDPART_SIZ_FULL,
+		.offset = MTDPART_OFS_APPEND,
+	}
+};
+
+static struct flash_platform_data bfin_spi_flash_data = {
+	.name = "m25p80",
+	.parts = bfin_spi_flash_partitions,
+	.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
+	.type = "w25q32",
+};
+
+static struct bfin6xx_spi_chip spi_flash_chip_info = {
+	.enable_dma = true,         /* use dma transfer with this chip*/
+};
+#endif
+
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+static struct bfin6xx_spi_chip spidev_chip_info = {
+	.enable_dma = true,
+};
+#endif
+
+#if defined(CONFIG_SND_BF6XX_I2S) || defined(CONFIG_SND_BF6XX_I2S_MODULE)
+static struct platform_device bfin_i2s_pcm = {
+	.name = "bfin-i2s-pcm-audio",
+	.id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF6XX_SOC_I2S) || \
+	defined(CONFIG_SND_BF6XX_SOC_I2S_MODULE)
+#include <asm/bfin_sport3.h>
+static struct resource bfin_snd_resources[] = {
+	{
+		.start = SPORT0_CTL_A,
+		.end = SPORT0_CTL_A,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = SPORT0_CTL_B,
+		.end = SPORT0_CTL_B,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = CH_SPORT0_TX,
+		.end = CH_SPORT0_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_SPORT0_RX,
+		.end = CH_SPORT0_RX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = IRQ_SPORT0_TX_STAT,
+		.end = IRQ_SPORT0_TX_STAT,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_SPORT0_RX_STAT,
+		.end = IRQ_SPORT0_RX_STAT,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static const unsigned short bfin_snd_pin[] = {
+	P_SPORT0_ACLK, P_SPORT0_AFS, P_SPORT0_AD0, P_SPORT0_BCLK,
+	P_SPORT0_BFS, P_SPORT0_BD0, 0,
+};
+
+static struct bfin_snd_platform_data bfin_snd_data = {
+	.pin_req = bfin_snd_pin,
+};
+
+static struct platform_device bfin_i2s = {
+	.name = "bfin-i2s",
+	.num_resources = ARRAY_SIZE(bfin_snd_resources),
+	.resource = bfin_snd_resources,
+	.dev = {
+		.platform_data = &bfin_snd_data,
+	},
+};
+#endif
+
+#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61) || \
+	defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61_MODULE)
+static struct platform_device adau1761_device = {
+	.name = "bfin-eval-adau1x61",
+};
+#endif
+
+#if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE)
+#include <sound/adau17x1.h>
+static struct adau1761_platform_data adau1761_info = {
+	.lineout_mode = ADAU1761_OUTPUT_MODE_LINE,
+	.headphone_mode = ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS,
+};
+#endif
+
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+	|| defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+#include <linux/videodev2.h>
+#include <media/blackfin/bfin_capture.h>
+#include <media/blackfin/ppi.h>
+
+static const unsigned short ppi_req[] = {
+	P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+	P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+	P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+	0,
+};
+
+static const struct ppi_info ppi_info = {
+	.type = PPI_TYPE_EPPI3,
+	.dma_ch = CH_EPPI0_CH0,
+	.irq_err = IRQ_EPPI0_STAT,
+	.base = (void __iomem *)EPPI0_STAT,
+	.pin_req = ppi_req,
+};
+
+#if defined(CONFIG_VIDEO_VS6624) \
+	|| defined(CONFIG_VIDEO_VS6624_MODULE)
+static struct v4l2_input vs6624_inputs[] = {
+	{
+		.index = 0,
+		.name = "Camera",
+		.type = V4L2_INPUT_TYPE_CAMERA,
+		.std = V4L2_STD_UNKNOWN,
+	},
+};
+
+static struct bcap_route vs6624_routes[] = {
+	{
+		.input = 0,
+		.output = 0,
+	},
+};
+
+static const unsigned vs6624_ce_pin = GPIO_PD1;
+
+static struct bfin_capture_config bfin_capture_data = {
+	.card_name = "BF609",
+	.inputs = vs6624_inputs,
+	.num_inputs = ARRAY_SIZE(vs6624_inputs),
+	.routes = vs6624_routes,
+	.i2c_adapter_id = 0,
+	.board_info = {
+		.type = "vs6624",
+		.addr = 0x10,
+		.platform_data = (void *)&vs6624_ce_pin,
+	},
+	.ppi_info = &ppi_info,
+	.ppi_control = (PACK_EN | DLEN_8 | EPPI_CTL_FS1HI_FS2HI
+			| EPPI_CTL_POLC3 | EPPI_CTL_SYNC2 | EPPI_CTL_NON656),
+	.blank_clocks = 8,
+};
+#endif
+
+static struct platform_device bfin_capture_device = {
+	.name = "bfin_capture",
+	.dev = {
+		.platform_data = &bfin_capture_data,
+	},
+};
+#endif
+
+#if defined(CONFIG_BFIN_CRC)
+#define BFIN_CRC_NAME "bfin-crc"
+
+static struct resource bfin_crc0_resources[] = {
+	{
+		.start = REG_CRC0_CTL,
+		.end = REG_CRC0_REVID+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_CRC0_DCNTEXP,
+		.end = IRQ_CRC0_DCNTEXP,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_MEM_STREAM0_SRC_CRC0,
+		.end = CH_MEM_STREAM0_SRC_CRC0,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_MEM_STREAM0_DEST_CRC0,
+		.end = CH_MEM_STREAM0_DEST_CRC0,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_crc0_device = {
+	.name = BFIN_CRC_NAME,
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_crc0_resources),
+	.resource = bfin_crc0_resources,
+};
+
+static struct resource bfin_crc1_resources[] = {
+	{
+		.start = REG_CRC1_CTL,
+		.end = REG_CRC1_REVID+4,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_CRC1_DCNTEXP,
+		.end = IRQ_CRC1_DCNTEXP,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_MEM_STREAM1_SRC_CRC1,
+		.end = CH_MEM_STREAM1_SRC_CRC1,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_MEM_STREAM1_DEST_CRC1,
+		.end = CH_MEM_STREAM1_DEST_CRC1,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static struct platform_device bfin_crc1_device = {
+	.name = BFIN_CRC_NAME,
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_crc1_resources),
+	.resource = bfin_crc1_resources,
+};
+#endif
+
+#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
+static const struct ad7877_platform_data bfin_ad7877_ts_info = {
+	.model			= 7877,
+	.vref_delay_usecs	= 50,	/* internal, no capacitor */
+	.x_plate_ohms		= 419,
+	.y_plate_ohms		= 486,
+	.pressure_max		= 1000,
+	.pressure_min		= 0,
+	.stopacq_polarity 	= 1,
+	.first_conversion_delay = 3,
+	.acquisition_time 	= 1,
+	.averaging 		= 1,
+	.pen_down_acc_interval 	= 1,
+};
+#endif
+
+static struct spi_board_info bfin_spi_board_info[] __initdata = {
+#if defined(CONFIG_MTD_M25P80) \
+	|| defined(CONFIG_MTD_M25P80_MODULE)
+	{
+		/* the modalias must be the same as spi device driver name */
+		.modalias = "m25p80", /* Name of spi_driver for this device */
+		.max_speed_hz = 25000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0, /* Framework bus number */
+		.chip_select = 1, /* SPI_SSEL1*/
+		.platform_data = &bfin_spi_flash_data,
+		.controller_data = &spi_flash_chip_info,
+		.mode = SPI_MODE_3,
+	},
+#endif
+#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
+	{
+		.modalias		= "ad7877",
+		.platform_data		= &bfin_ad7877_ts_info,
+		.irq			= IRQ_PB4,	/* old boards (<=Rev 1.3) use IRQ_PJ11 */
+		.max_speed_hz		= 12500000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num		= 0,
+		.chip_select  		= 2,
+	},
+#endif
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+	{
+		.modalias = "spidev",
+		.max_speed_hz = 3125000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num = 0,
+		.chip_select = 1,
+		.controller_data = &spidev_chip_info,
+	},
+#endif
+#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
+	{
+		.modalias		= "adxl34x",
+		.platform_data		= &adxl34x_info,
+		.irq			= IRQ_PC5,
+		.max_speed_hz		= 5000000,     /* max spi clock (SCK) speed in HZ */
+		.bus_num		= 1,
+		.chip_select  		= 2,
+		.mode = SPI_MODE_3,
+	},
+#endif
+};
+#if defined(CONFIG_SPI_BFIN6XX) || defined(CONFIG_SPI_BFIN6XX_MODULE)
+/* SPI (0) */
+static struct resource bfin_spi0_resource[] = {
+	{
+		.start = SPI0_REGBASE,
+		.end   = SPI0_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = CH_SPI0_TX,
+		.end   = CH_SPI0_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_SPI0_RX,
+		.end   = CH_SPI0_RX,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+/* SPI (1) */
+static struct resource bfin_spi1_resource[] = {
+	{
+		.start = SPI1_REGBASE,
+		.end   = SPI1_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = CH_SPI1_TX,
+		.end   = CH_SPI1_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_SPI1_RX,
+		.end   = CH_SPI1_RX,
+		.flags = IORESOURCE_DMA,
+	},
+
+};
+
+/* SPI controller data */
+static struct bfin6xx_spi_master bf60x_spi_master_info0 = {
+	.num_chipselect = 4,
+	.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct platform_device bf60x_spi_master0 = {
+	.name = "bfin-spi",
+	.id = 0, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi0_resource),
+	.resource = bfin_spi0_resource,
+	.dev = {
+		.platform_data = &bf60x_spi_master_info0, /* Passed to driver */
+	},
+};
+
+static struct bfin6xx_spi_master bf60x_spi_master_info1 = {
+	.num_chipselect = 4,
+	.pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
+};
+
+static struct platform_device bf60x_spi_master1 = {
+	.name = "bfin-spi",
+	.id = 1, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi1_resource),
+	.resource = bfin_spi1_resource,
+	.dev = {
+		.platform_data = &bf60x_spi_master_info1, /* Passed to driver */
+	},
+};
+#endif  /* spi master and devices */
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
+
+static struct resource bfin_twi0_resource[] = {
+	[0] = {
+		.start = TWI0_CLKDIV,
+		.end   = TWI0_CLKDIV + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_TWI0,
+		.end   = IRQ_TWI0,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2c_bfin_twi0_device = {
+	.name = "i2c-bfin-twi",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
+	.resource = bfin_twi0_resource,
+	.dev = {
+		.platform_data = &bfin_twi0_pins,
+	},
+};
+
+static const u16 bfin_twi1_pins[] = {P_TWI1_SCL, P_TWI1_SDA, 0};
+
+static struct resource bfin_twi1_resource[] = {
+	[0] = {
+		.start = TWI1_CLKDIV,
+		.end   = TWI1_CLKDIV + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_TWI1,
+		.end   = IRQ_TWI1,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2c_bfin_twi1_device = {
+	.name = "i2c-bfin-twi",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_twi1_resource),
+	.resource = bfin_twi1_resource,
+	.dev = {
+		.platform_data = &bfin_twi1_pins,
+	},
+};
+#endif
+
+static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
+#if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE)
+	{
+		I2C_BOARD_INFO("adxl34x", 0x53),
+		.irq = IRQ_PC5,
+		.platform_data = (void *)&adxl34x_info,
+	},
+#endif
+#if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE)
+	{
+		I2C_BOARD_INFO("adau1761", 0x38),
+		.platform_data = (void *)&adau1761_info
+	},
+#endif
+};
+
+static struct i2c_board_info __initdata bfin_i2c_board_info1[] = {
+};
+
+static const unsigned int cclk_vlev_datasheet[] =
+{
+/*
+ * Internal VLEV BF54XSBBC1533
+ ****temporarily using these values until data sheet is updated
+ */
+	VRPAIR(VLEV_085, 150000000),
+	VRPAIR(VLEV_090, 250000000),
+	VRPAIR(VLEV_110, 276000000),
+	VRPAIR(VLEV_115, 301000000),
+	VRPAIR(VLEV_120, 525000000),
+	VRPAIR(VLEV_125, 550000000),
+	VRPAIR(VLEV_130, 600000000),
+};
+
+static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
+	.tuple_tab = cclk_vlev_datasheet,
+	.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
+	.vr_settling_time = 25 /* us */,
+};
+
+static struct platform_device bfin_dpmc = {
+	.name = "bfin dpmc",
+	.dev = {
+		.platform_data = &bfin_dmpc_vreg_data,
+	},
+};
+
+static struct platform_device *ezkit_devices[] __initdata = {
+
+	&bfin_dpmc,
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+	&rtc_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+	&bfin_uart0_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+	&bfin_uart1_device,
+#endif
+#endif
+
+#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
+#ifdef CONFIG_BFIN_SIR0
+	&bfin_sir0_device,
+#endif
+#ifdef CONFIG_BFIN_SIR1
+	&bfin_sir1_device,
+#endif
+#endif
+
+#if defined(CONFIG_STMMAC_ETH) || defined(CONFIG_STMMAC_ETH_MODULE)
+	&bfin_eth_device,
+#endif
+
+#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
+	&musb_device,
+#endif
+
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+	&bfin_isp1760_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
+	&bfin_sport0_uart_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
+	&bfin_sport1_uart_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
+	&bfin_sport2_uart_device,
+#endif
+#endif
+
+#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
+	&bfin_can0_device,
+#endif
+
+#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
+	&bfin_nand_device,
+#endif
+
+#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE)
+	&bfin_sdh_device,
+#endif
+
+#if defined(CONFIG_SPI_BFIN6XX) || defined(CONFIG_SPI_BFIN6XX_MODULE)
+	&bf60x_spi_master0,
+	&bf60x_spi_master1,
+#endif
+
+#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
+	&bfin_rotary_device,
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+	&i2c_bfin_twi0_device,
+#if !defined(CONFIG_BF542)
+	&i2c_bfin_twi1_device,
+#endif
+#endif
+
+#if defined(CONFIG_BFIN_CRC)
+	&bfin_crc0_device,
+	&bfin_crc1_device,
+#endif
+
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+	&bfin_device_gpiokeys,
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+	&ezkit_flash_device,
+#endif
+#if defined(CONFIG_SND_BF6XX_I2S) || defined(CONFIG_SND_BF6XX_I2S_MODULE)
+	&bfin_i2s_pcm,
+#endif
+#if defined(CONFIG_SND_BF6XX_SOC_I2S) || \
+	defined(CONFIG_SND_BF6XX_SOC_I2S_MODULE)
+	&bfin_i2s,
+#endif
+#if defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61) || \
+	defined(CONFIG_SND_SOC_BFIN_EVAL_ADAU1X61_MODULE)
+	&adau1761_device,
+#endif
+#if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \
+	|| defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
+	&bfin_capture_device,
+#endif
+};
+
+static int __init ezkit_init(void)
+{
+	printk(KERN_INFO "%s(): registering device resources\n", __func__);
+
+	i2c_register_board_info(0, bfin_i2c_board_info0,
+				ARRAY_SIZE(bfin_i2c_board_info0));
+	i2c_register_board_info(1, bfin_i2c_board_info1,
+				ARRAY_SIZE(bfin_i2c_board_info1));
+
+#if defined(CONFIG_STMMAC_ETH) || defined(CONFIG_STMMAC_ETH_MODULE)
+	if (!peripheral_request_list(pins, "emac0"))
+		printk(KERN_ERR "%s(): request emac pins failed\n", __func__);
+#endif
+
+	platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices));
+
+	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+
+	return 0;
+}
+
+arch_initcall(ezkit_init);
+
+static struct platform_device *ezkit_early_devices[] __initdata = {
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+	&bfin_uart0_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+	&bfin_uart1_device,
+#endif
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
+#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
+	&bfin_sport0_uart_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
+	&bfin_sport1_uart_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
+	&bfin_sport2_uart_device,
+#endif
+#endif
+};
+
+void __init native_machine_early_platform_add_devices(void)
+{
+	printk(KERN_INFO "register early platform devices\n");
+	early_platform_add_devices(ezkit_early_devices,
+		ARRAY_SIZE(ezkit_early_devices));
+}
diff --git a/arch/blackfin/mach-bf609/clock.c b/arch/blackfin/mach-bf609/clock.c
new file mode 100644
index 0000000..7f8f529
--- /dev/null
+++ b/arch/blackfin/mach-bf609/clock.c
@@ -0,0 +1,390 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/seq_file.h>
+#include <linux/clkdev.h>
+
+#include <asm/clocks.h>
+
+#define CGU0_CTL_DF (1 << 0)
+
+#define CGU0_CTL_MSEL_SHIFT 8
+#define CGU0_CTL_MSEL_MASK (0x7f << 8)
+
+#define CGU0_STAT_PLLEN (1 << 0)
+#define CGU0_STAT_PLLBP (1 << 1)
+#define CGU0_STAT_PLLLK (1 << 2)
+#define CGU0_STAT_CLKSALGN (1 << 3)
+#define CGU0_STAT_CCBF0 (1 << 4)
+#define CGU0_STAT_CCBF1 (1 << 5)
+#define CGU0_STAT_SCBF0 (1 << 6)
+#define CGU0_STAT_SCBF1 (1 << 7)
+#define CGU0_STAT_DCBF (1 << 8)
+#define CGU0_STAT_OCBF (1 << 9)
+#define CGU0_STAT_ADDRERR (1 << 16)
+#define CGU0_STAT_LWERR (1 << 17)
+#define CGU0_STAT_DIVERR (1 << 18)
+#define CGU0_STAT_WDFMSERR (1 << 19)
+#define CGU0_STAT_WDIVERR (1 << 20)
+#define CGU0_STAT_PLOCKERR (1 << 21)
+
+#define CGU0_DIV_CSEL_SHIFT 0
+#define CGU0_DIV_CSEL_MASK 0x0000001F
+#define CGU0_DIV_S0SEL_SHIFT 5
+#define CGU0_DIV_S0SEL_MASK (0x3 << CGU0_DIV_S0SEL_SHIFT)
+#define CGU0_DIV_SYSSEL_SHIFT 8
+#define CGU0_DIV_SYSSEL_MASK (0x1f << CGU0_DIV_SYSSEL_SHIFT)
+#define CGU0_DIV_S1SEL_SHIFT 13
+#define CGU0_DIV_S1SEL_MASK (0x3 << CGU0_DIV_S1SEL_SHIFT)
+#define CGU0_DIV_DSEL_SHIFT 16
+#define CGU0_DIV_DSEL_MASK (0x1f << CGU0_DIV_DSEL_SHIFT)
+#define CGU0_DIV_OSEL_SHIFT 22
+#define CGU0_DIV_OSEL_MASK (0x7f << CGU0_DIV_OSEL_SHIFT)
+
+#define CLK(_clk, _devname, _conname)                   \
+	{                                               \
+		.clk    = &_clk,                  \
+		.dev_id = _devname,                     \
+		.con_id = _conname,                     \
+	}
+
+#define NEEDS_INITIALIZATION 0x11
+
+static LIST_HEAD(clk_list);
+
+static void clk_reg_write_mask(u32 reg, uint32_t val, uint32_t mask)
+{
+	u32 val2;
+
+	val2 = bfin_read32(reg);
+	val2 &= ~mask;
+	val2 |= val;
+	bfin_write32(reg, val2);
+}
+
+static void clk_reg_set_bits(u32 reg, uint32_t mask)
+{
+	u32 val;
+
+	val = bfin_read32(reg);
+	val |= mask;
+	bfin_write32(reg, val);
+}
+
+static void clk_reg_clear_bits(u32 reg, uint32_t mask)
+{
+	u32 val;
+
+	val = bfin_read32(reg);
+	val &= ~mask;
+	bfin_write32(reg, val);
+}
+
+int wait_for_pll_align(void)
+{
+	int i = 10000;
+	while (i-- && (bfin_read32(CGU0_STAT) & CGU0_STAT_CLKSALGN));
+
+	if (bfin_read32(CGU0_STAT) & CGU0_STAT_CLKSALGN) {
+		printk(KERN_DEBUG "fail to align clk\n");
+		return -1;
+	}
+	return 0;
+}
+
+int clk_enable(struct clk *clk)
+{
+	int ret = -EIO;
+	if (clk->ops && clk->ops->enable)
+		ret = clk->ops->enable(clk);
+	return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+	if (clk->ops && clk->ops->disable)
+		clk->ops->disable(clk);
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+	unsigned long ret = 0;
+	if (clk->ops && clk->ops->get_rate)
+		ret = clk->ops->get_rate(clk);
+	return ret;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	long ret = -EIO;
+	if (clk->ops && clk->ops->round_rate)
+		ret = clk->ops->round_rate(clk, rate);
+	return ret;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int ret = -EIO;
+	if (clk->ops && clk->ops->set_rate)
+		ret = clk->ops->set_rate(clk, rate);
+	return ret;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+unsigned long vco_get_rate(struct clk *clk)
+{
+	return clk->rate;
+}
+
+unsigned long pll_get_rate(struct clk *clk)
+{
+	u32 df;
+	u32 msel;
+	u32 ctl = bfin_read32(CGU0_CTL);
+	u32 stat = bfin_read32(CGU0_STAT);
+	if (stat & CGU0_STAT_PLLBP)
+		return 0;
+	msel = (ctl & CGU0_CTL_MSEL_MASK) >> CGU0_CTL_MSEL_SHIFT;
+	df = (ctl &  CGU0_CTL_DF);
+	clk->parent->rate = clk_get_rate(clk->parent);
+	return clk->parent->rate / (df + 1) * msel * 2;
+}
+
+unsigned long pll_round_rate(struct clk *clk, unsigned long rate)
+{
+	u32 div;
+	div = rate / clk->parent->rate;
+	return clk->parent->rate * div;
+}
+
+int pll_set_rate(struct clk *clk, unsigned long rate)
+{
+	u32 msel;
+	u32 stat = bfin_read32(CGU0_STAT);
+	if (!(stat & CGU0_STAT_PLLEN))
+		return -EBUSY;
+	if (!(stat & CGU0_STAT_PLLLK))
+		return -EBUSY;
+	if (wait_for_pll_align())
+		return -EBUSY;
+	msel = rate / clk->parent->rate / 2;
+	clk_reg_write_mask(CGU0_CTL, msel << CGU0_CTL_MSEL_SHIFT,
+		CGU0_CTL_MSEL_MASK);
+	clk->rate = rate;
+	return 0;
+}
+
+unsigned long cclk_get_rate(struct clk *clk)
+{
+	if (clk->parent)
+		return clk->parent->rate;
+	else
+		return 0;
+}
+
+unsigned long sys_clk_get_rate(struct clk *clk)
+{
+	unsigned long drate;
+	u32 msel;
+	u32 df;
+	u32 ctl = bfin_read32(CGU0_CTL);
+	u32 div = bfin_read32(CGU0_DIV);
+	div = (div & clk->mask) >> clk->shift;
+	msel = (ctl & CGU0_CTL_MSEL_MASK) >> CGU0_CTL_MSEL_SHIFT;
+	df = (ctl &  CGU0_CTL_DF);
+
+	if (!strcmp(clk->parent->name, "SYS_CLKIN")) {
+		drate = clk->parent->rate / (df + 1);
+		drate *=  msel;
+		drate /= div;
+		return drate;
+	} else {
+		clk->parent->rate = clk_get_rate(clk->parent);
+		return clk->parent->rate / div;
+	}
+}
+
+unsigned long sys_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+	unsigned long max_rate;
+	unsigned long drate;
+	int i;
+	u32 msel;
+	u32 df;
+	u32 ctl = bfin_read32(CGU0_CTL);
+
+	msel = (ctl & CGU0_CTL_MSEL_MASK) >> CGU0_CTL_MSEL_SHIFT;
+	df = (ctl &  CGU0_CTL_DF);
+	max_rate = clk->parent->rate / (df + 1) * msel;
+
+	if (rate > max_rate)
+		return 0;
+
+	for (i = 1; i < clk->mask; i++) {
+		drate = max_rate / i;
+		if (rate >= drate)
+			return drate;
+	}
+	return 0;
+}
+
+int sys_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	u32 div = bfin_read32(CGU0_DIV);
+	div = (div & clk->mask) >> clk->shift;
+
+	rate = clk_round_rate(clk, rate);
+
+	if (!rate)
+		return -EINVAL;
+
+	div = (clk_get_rate(clk) * div) / rate;
+
+	if (wait_for_pll_align())
+		return -EBUSY;
+	clk_reg_write_mask(CGU0_DIV, div << clk->shift,
+			clk->mask);
+	clk->rate = rate;
+	return 0;
+}
+
+static struct clk_ops vco_ops = {
+	.get_rate = vco_get_rate,
+};
+
+static struct clk_ops pll_ops = {
+	.get_rate = pll_get_rate,
+	.set_rate = pll_set_rate,
+};
+
+static struct clk_ops cclk_ops = {
+	.get_rate = cclk_get_rate,
+};
+
+static struct clk_ops sys_clk_ops = {
+	.get_rate = sys_clk_get_rate,
+	.set_rate = sys_clk_set_rate,
+	.round_rate = sys_clk_round_rate,
+};
+
+static struct clk sys_clkin = {
+	.name       = "SYS_CLKIN",
+	.rate       = CONFIG_CLKIN_HZ,
+	.ops        = &vco_ops,
+};
+
+static struct clk pll_clk = {
+	.name       = "PLLCLK",
+	.rate       = 500000000,
+	.parent     = &sys_clkin,
+	.ops = &pll_ops,
+	.flags = NEEDS_INITIALIZATION,
+};
+
+static struct clk cclk = {
+	.name       = "CCLK",
+	.rate       = 500000000,
+	.mask       = CGU0_DIV_CSEL_MASK,
+	.shift      = CGU0_DIV_CSEL_SHIFT,
+	.parent     = &sys_clkin,
+	.ops	    = &sys_clk_ops,
+	.flags = NEEDS_INITIALIZATION,
+};
+
+static struct clk cclk0 = {
+	.name       = "CCLK0",
+	.parent     = &cclk,
+	.ops	    = &cclk_ops,
+};
+
+static struct clk cclk1 = {
+	.name       = "CCLK1",
+	.parent     = &cclk,
+	.ops	    = &cclk_ops,
+};
+
+static struct clk sysclk = {
+	.name       = "SYSCLK",
+	.rate       = 500000000,
+	.mask       = CGU0_DIV_SYSSEL_MASK,
+	.shift      = CGU0_DIV_SYSSEL_SHIFT,
+	.parent     = &sys_clkin,
+	.ops	    = &sys_clk_ops,
+	.flags = NEEDS_INITIALIZATION,
+};
+
+static struct clk sclk0 = {
+	.name       = "SCLK0",
+	.rate       = 500000000,
+	.mask       = CGU0_DIV_S0SEL_MASK,
+	.shift      = CGU0_DIV_S0SEL_SHIFT,
+	.parent     = &sysclk,
+	.ops	    = &sys_clk_ops,
+};
+
+static struct clk sclk1 = {
+	.name       = "SCLK1",
+	.rate       = 500000000,
+	.mask       = CGU0_DIV_S1SEL_MASK,
+	.shift      = CGU0_DIV_S1SEL_SHIFT,
+	.parent     = &sysclk,
+	.ops	    = &sys_clk_ops,
+};
+
+static struct clk dclk = {
+	.name       = "DCLK",
+	.rate       = 500000000,
+	.mask       = CGU0_DIV_DSEL_MASK,
+	.shift       = CGU0_DIV_DSEL_SHIFT,
+	.parent     = &sys_clkin,
+	.ops	    = &sys_clk_ops,
+};
+
+static struct clk oclk = {
+	.name       = "OCLK",
+	.rate       = 500000000,
+	.mask       = CGU0_DIV_OSEL_MASK,
+	.shift      = CGU0_DIV_OSEL_SHIFT,
+	.parent     = &pll_clk,
+};
+
+static struct clk_lookup bf609_clks[] = {
+	CLK(sys_clkin, NULL, "SYS_CLKIN"),
+	CLK(pll_clk, NULL, "PLLCLK"),
+	CLK(cclk, NULL, "CCLK"),
+	CLK(cclk0, NULL, "CCLK0"),
+	CLK(cclk1, NULL, "CCLK1"),
+	CLK(sysclk, NULL, "SYSCLK"),
+	CLK(sclk0, NULL, "SCLK0"),
+	CLK(sclk1, NULL, "SCLK1"),
+	CLK(dclk, NULL, "DCLK"),
+	CLK(oclk, NULL, "OCLK"),
+};
+
+int __init clk_init(void)
+{
+	int i;
+	struct clk *clkp;
+	for (i = 0; i < ARRAY_SIZE(bf609_clks); i++) {
+		clkp = bf609_clks[i].clk;
+		if (clkp->flags & NEEDS_INITIALIZATION)
+			clk_get_rate(clkp);
+	}
+	clkdev_add_table(bf609_clks, ARRAY_SIZE(bf609_clks));
+	return 0;
+}
diff --git a/arch/blackfin/mach-bf609/dma.c b/arch/blackfin/mach-bf609/dma.c
new file mode 100644
index 0000000..1da4b38
--- /dev/null
+++ b/arch/blackfin/mach-bf609/dma.c
@@ -0,0 +1,202 @@
+/*
+ * the simple DMA Implementation for Blackfin
+ *
+ * Copyright 2007-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+
+#include <asm/blackfin.h>
+#include <asm/dma.h>
+
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
+	(struct dma_register *) DMA0_NEXT_DESC_PTR,
+	(struct dma_register *) DMA1_NEXT_DESC_PTR,
+	(struct dma_register *) DMA2_NEXT_DESC_PTR,
+	(struct dma_register *) DMA3_NEXT_DESC_PTR,
+	(struct dma_register *) DMA4_NEXT_DESC_PTR,
+	(struct dma_register *) DMA5_NEXT_DESC_PTR,
+	(struct dma_register *) DMA6_NEXT_DESC_PTR,
+	(struct dma_register *) DMA7_NEXT_DESC_PTR,
+	(struct dma_register *) DMA8_NEXT_DESC_PTR,
+	(struct dma_register *) DMA9_NEXT_DESC_PTR,
+	(struct dma_register *) DMA10_NEXT_DESC_PTR,
+	(struct dma_register *) DMA11_NEXT_DESC_PTR,
+	(struct dma_register *) DMA12_NEXT_DESC_PTR,
+	(struct dma_register *) DMA13_NEXT_DESC_PTR,
+	(struct dma_register *) DMA14_NEXT_DESC_PTR,
+	(struct dma_register *) DMA15_NEXT_DESC_PTR,
+	(struct dma_register *) DMA16_NEXT_DESC_PTR,
+	(struct dma_register *) DMA17_NEXT_DESC_PTR,
+	(struct dma_register *) DMA18_NEXT_DESC_PTR,
+	(struct dma_register *) DMA19_NEXT_DESC_PTR,
+	(struct dma_register *) DMA20_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA0_SRC_CRC0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA0_DEST_CRC0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA1_SRC_CRC1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA1_DEST_CRC1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA2_SRC_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA2_DEST_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA3_SRC_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA3_DEST_NEXT_DESC_PTR,
+	(struct dma_register *) DMA29_NEXT_DESC_PTR,
+	(struct dma_register *) DMA30_NEXT_DESC_PTR,
+	(struct dma_register *) DMA31_NEXT_DESC_PTR,
+	(struct dma_register *) DMA32_NEXT_DESC_PTR,
+	(struct dma_register *) DMA33_NEXT_DESC_PTR,
+	(struct dma_register *) DMA34_NEXT_DESC_PTR,
+	(struct dma_register *) DMA35_NEXT_DESC_PTR,
+	(struct dma_register *) DMA36_NEXT_DESC_PTR,
+	(struct dma_register *) DMA37_NEXT_DESC_PTR,
+	(struct dma_register *) DMA38_NEXT_DESC_PTR,
+	(struct dma_register *) DMA39_NEXT_DESC_PTR,
+	(struct dma_register *) DMA40_NEXT_DESC_PTR,
+	(struct dma_register *) DMA41_NEXT_DESC_PTR,
+	(struct dma_register *) DMA42_NEXT_DESC_PTR,
+	(struct dma_register *) DMA43_NEXT_DESC_PTR,
+	(struct dma_register *) DMA44_NEXT_DESC_PTR,
+	(struct dma_register *) DMA45_NEXT_DESC_PTR,
+	(struct dma_register *) DMA46_NEXT_DESC_PTR,
+};
+EXPORT_SYMBOL(dma_io_base_addr);
+
+int channel2irq(unsigned int channel)
+{
+	int ret_irq = -1;
+
+	switch (channel) {
+	case CH_SPORT0_RX:
+		ret_irq = IRQ_SPORT0_RX;
+		break;
+	case CH_SPORT0_TX:
+		ret_irq = IRQ_SPORT0_TX;
+		break;
+	case CH_SPORT1_RX:
+		ret_irq = IRQ_SPORT1_RX;
+		break;
+	case CH_SPORT1_TX:
+		ret_irq = IRQ_SPORT1_TX;
+		break;
+	case CH_SPORT2_RX:
+		ret_irq = IRQ_SPORT2_RX;
+		break;
+	case CH_SPORT2_TX:
+		ret_irq = IRQ_SPORT2_TX;
+		break;
+	case CH_SPI0_TX:
+		ret_irq = IRQ_SPI0_TX;
+		break;
+	case CH_SPI0_RX:
+		ret_irq = IRQ_SPI0_RX;
+		break;
+	case CH_SPI1_TX:
+		ret_irq = IRQ_SPI1_TX;
+		break;
+	case CH_SPI1_RX:
+		ret_irq = IRQ_SPI1_RX;
+		break;
+	case CH_RSI:
+		ret_irq = IRQ_RSI;
+		break;
+	case CH_SDU:
+		ret_irq = IRQ_SDU;
+		break;
+	case CH_LP0:
+		ret_irq = IRQ_LP0;
+		break;
+	case CH_LP1:
+		ret_irq = IRQ_LP1;
+		break;
+	case CH_LP2:
+		ret_irq = IRQ_LP2;
+		break;
+	case CH_LP3:
+		ret_irq = IRQ_LP3;
+		break;
+	case CH_UART0_RX:
+		ret_irq = IRQ_UART0_RX;
+		break;
+	case CH_UART0_TX:
+		ret_irq = IRQ_UART0_TX;
+		break;
+	case CH_UART1_RX:
+		ret_irq = IRQ_UART1_RX;
+		break;
+	case CH_UART1_TX:
+		ret_irq = IRQ_UART1_TX;
+		break;
+	case CH_EPPI0_CH0:
+		ret_irq = IRQ_EPPI0_CH0;
+		break;
+	case CH_EPPI0_CH1:
+		ret_irq = IRQ_EPPI0_CH1;
+		break;
+	case CH_EPPI1_CH0:
+		ret_irq = IRQ_EPPI1_CH0;
+		break;
+	case CH_EPPI1_CH1:
+		ret_irq = IRQ_EPPI1_CH1;
+		break;
+	case CH_EPPI2_CH0:
+		ret_irq = IRQ_EPPI2_CH0;
+		break;
+	case CH_EPPI2_CH1:
+		ret_irq = IRQ_EPPI2_CH1;
+		break;
+	case CH_PIXC_CH0:
+		ret_irq = IRQ_PIXC_CH0;
+		break;
+	case CH_PIXC_CH1:
+		ret_irq = IRQ_PIXC_CH1;
+		break;
+	case CH_PIXC_CH2:
+		ret_irq = IRQ_PIXC_CH2;
+		break;
+	case CH_PVP_CPDOB:
+		ret_irq = IRQ_PVP_CPDOB;
+		break;
+	case CH_PVP_CPDOC:
+		ret_irq = IRQ_PVP_CPDOC;
+		break;
+	case CH_PVP_CPSTAT:
+		ret_irq = IRQ_PVP_CPSTAT;
+		break;
+	case CH_PVP_CPCI:
+		ret_irq = IRQ_PVP_CPCI;
+		break;
+	case CH_PVP_MPDO:
+		ret_irq = IRQ_PVP_MPDO;
+		break;
+	case CH_PVP_MPDI:
+		ret_irq = IRQ_PVP_MPDI;
+		break;
+	case CH_PVP_MPSTAT:
+		ret_irq = IRQ_PVP_MPSTAT;
+		break;
+	case CH_PVP_MPCI:
+		ret_irq = IRQ_PVP_MPCI;
+		break;
+	case CH_PVP_CPDOA:
+		ret_irq = IRQ_PVP_CPDOA;
+		break;
+	case CH_MEM_STREAM0_SRC:
+	case CH_MEM_STREAM0_DEST:
+		ret_irq = IRQ_MDMAS0;
+		break;
+	case CH_MEM_STREAM1_SRC:
+	case CH_MEM_STREAM1_DEST:
+		ret_irq = IRQ_MDMAS1;
+		break;
+	case CH_MEM_STREAM2_SRC:
+	case CH_MEM_STREAM2_DEST:
+		ret_irq = IRQ_MDMAS2;
+		break;
+	case CH_MEM_STREAM3_SRC:
+	case CH_MEM_STREAM3_DEST:
+		ret_irq = IRQ_MDMAS3;
+		break;
+	}
+	return ret_irq;
+}
diff --git a/arch/blackfin/mach-bf609/hibernate.S b/arch/blackfin/mach-bf609/hibernate.S
new file mode 100644
index 0000000..d37a532
--- /dev/null
+++ b/arch/blackfin/mach-bf609/hibernate.S
@@ -0,0 +1,65 @@
+#include <linux/linkage.h>
+#include <asm/blackfin.h>
+#include <asm/dpmc.h>
+
+#define PM_STACK   (COREA_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
+
+.section .l1.text
+ENTRY(_enter_hibernate)
+	/* switch stack to L1 scratch, prepare for ddr srfr */
+	P0.H = HI(PM_STACK);
+	P0.L = LO(PM_STACK);
+	SP = P0;
+
+	call _bf609_ddr_sr;
+	call _bfin_hibernate_syscontrol;
+
+	P0.H = HI(DPM0_RESTORE4);
+	P0.L = LO(DPM0_RESTORE4);
+	P1.H = _bf609_pm_data;
+	P1.L = _bf609_pm_data;
+	[P0] = P1;
+
+	P0.H = HI(DPM0_CTL);
+	P0.L = LO(DPM0_CTL);
+	R3.H = HI(0x00000010);
+	R3.L = LO(0x00000010);
+
+	bfin_init_pm_bench_cycles;
+
+	[P0] = R3;
+
+	SSYNC;
+ENDPROC(_enter_hibernate_mode)
+
+.section .text
+ENTRY(_bf609_hibernate)
+	bfin_cpu_reg_save;
+	bfin_core_mmr_save;
+
+	P0.H = _bf609_pm_data;
+	P0.L = _bf609_pm_data;
+	R1.H = 0xDEAD;
+	R1.L = 0xBEEF;
+	R2.H = .Lpm_resume_here;
+	R2.L = .Lpm_resume_here;
+	[P0++] = R1;
+	[P0++] = R2;
+	[P0++] = SP;
+
+	P1.H = _enter_hibernate;
+	P1.L = _enter_hibernate;
+
+	call (P1);
+.Lpm_resume_here:
+
+	bfin_core_mmr_restore;
+	bfin_cpu_reg_restore;
+
+	[--sp] = RETI;  /* Clear Global Interrupt Disable */
+	SP += 4;
+
+	RTS;
+
+ENDPROC(_bf609_hibernate)
+
diff --git a/arch/blackfin/mach-bf609/include/mach/anomaly.h b/arch/blackfin/mach-bf609/include/mach/anomaly.h
new file mode 100644
index 0000000..bdd39ae
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/anomaly.h
@@ -0,0 +1,130 @@
+/*
+ * DO NOT EDIT THIS FILE
+ * This file is under version control at
+ *   svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
+ * and can be replaced with that version at any time
+ * DO NOT EDIT THIS FILE
+ *
+ * Copyright 2004-2011 Analog Devices Inc.
+ * Licensed under the Clear BSD license.
+ */
+
+/* This file should be up to date with:
+ */
+
+#if __SILICON_REVISION__ < 0
+# error will not work on BF506 silicon version
+#endif
+
+#ifndef _MACH_ANOMALY_H_
+#define _MACH_ANOMALY_H_
+
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
+#define ANOMALY_05000074 (1)
+/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
+#define ANOMALY_05000119 (1)
+/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
+#define ANOMALY_05000122 (1)
+/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
+#define ANOMALY_05000245 (1)
+/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
+#define ANOMALY_05000254 (1)
+/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
+#define ANOMALY_05000265 (1)
+/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
+#define ANOMALY_05000310 (1)
+/* PPI Underflow Error Goes Undetected in ITU-R 656 Mode */
+#define ANOMALY_05000366 (1)
+/* Speculative Fetches Can Cause Undesired External FIFO Operations */
+#define ANOMALY_05000416 (1)
+/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
+#define ANOMALY_05000426 (1)
+/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
+#define ANOMALY_05000443 (1)
+/* UART IrDA Receiver Fails on Extended Bit Pulses */
+#define ANOMALY_05000447 (1)
+/* False Hardware Error when RETI Points to Invalid Memory */
+#define ANOMALY_05000461 (1)
+/* PLL Latches Incorrect Settings During Reset */
+#define ANOMALY_05000469 (1)
+/* Incorrect Default MSEL Value in PLL_CTL */
+#define ANOMALY_05000472 (1)
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
+#define ANOMALY_05000473 (1)
+/* TESTSET Instruction Cannot Be Interrupted */
+#define ANOMALY_05000477 (1)
+/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
+#define ANOMALY_05000481 (1)
+/* IFLUSH sucks at life */
+#define ANOMALY_05000491 (1)
+/* Tempopary anomaly ID for data loss in MMR read operation if interrupted */
+#define ANOMALY_05001001 (__SILICON_REVISION__ < 1)
+
+/* Anomalies that don't exist on this proc */
+#define ANOMALY_05000099 (0)
+#define ANOMALY_05000120 (0)
+#define ANOMALY_05000125 (0)
+#define ANOMALY_05000149 (0)
+#define ANOMALY_05000158 (0)
+#define ANOMALY_05000171 (0)
+#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
+#define ANOMALY_05000183 (0)
+#define ANOMALY_05000189 (0)
+#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
+#define ANOMALY_05000215 (0)
+#define ANOMALY_05000219 (0)
+#define ANOMALY_05000220 (0)
+#define ANOMALY_05000227 (0)
+#define ANOMALY_05000230 (0)
+#define ANOMALY_05000231 (0)
+#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
+#define ANOMALY_05000242 (0)
+#define ANOMALY_05000244 (0)
+#define ANOMALY_05000248 (0)
+#define ANOMALY_05000250 (0)
+#define ANOMALY_05000257 (0)
+#define ANOMALY_05000261 (0)
+#define ANOMALY_05000263 (0)
+#define ANOMALY_05000266 (0)
+#define ANOMALY_05000273 (0)
+#define ANOMALY_05000274 (0)
+#define ANOMALY_05000278 (0)
+#define ANOMALY_05000281 (0)
+#define ANOMALY_05000283 (0)
+#define ANOMALY_05000285 (0)
+#define ANOMALY_05000287 (0)
+#define ANOMALY_05000301 (0)
+#define ANOMALY_05000305 (0)
+#define ANOMALY_05000307 (0)
+#define ANOMALY_05000311 (0)
+#define ANOMALY_05000312 (0)
+#define ANOMALY_05000315 (0)
+#define ANOMALY_05000323 (0)
+#define ANOMALY_05000353 (1)
+#define ANOMALY_05000357 (0)
+#define ANOMALY_05000362 (1)
+#define ANOMALY_05000363 (0)
+#define ANOMALY_05000364 (0)
+#define ANOMALY_05000371 (0)
+#define ANOMALY_05000380 (0)
+#define ANOMALY_05000386 (0)
+#define ANOMALY_05000389 (0)
+#define ANOMALY_05000400 (0)
+#define ANOMALY_05000402 (0)
+#define ANOMALY_05000412 (0)
+#define ANOMALY_05000432 (0)
+#define ANOMALY_05000440 (0)
+#define ANOMALY_05000448 (0)
+#define ANOMALY_05000456 (0)
+#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
+#define ANOMALY_05000474 (0)
+#define ANOMALY_05000475 (0)
+#define ANOMALY_05000480 (0)
+#define ANOMALY_05000485 (0)
+
+#endif
diff --git a/arch/blackfin/mach-bf609/include/mach/bf609.h b/arch/blackfin/mach-bf609/include/mach/bf609.h
new file mode 100644
index 0000000..c897c2a
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/bf609.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __MACH_BF609_H__
+#define __MACH_BF609_H__
+
+#define OFFSET_(x) ((x) & 0x0000FFFF)
+
+/*some misc defines*/
+#define IMASK_IVG15		0x8000
+#define IMASK_IVG14		0x4000
+#define IMASK_IVG13		0x2000
+#define IMASK_IVG12		0x1000
+
+#define IMASK_IVG11		0x0800
+#define IMASK_IVG10		0x0400
+#define IMASK_IVG9		0x0200
+#define IMASK_IVG8		0x0100
+
+#define IMASK_IVG7		0x0080
+#define IMASK_IVGTMR		0x0040
+#define IMASK_IVGHW		0x0020
+
+/***************************/
+
+
+#define BFIN_DSUBBANKS		4
+#define BFIN_DWAYS		2
+#define BFIN_DLINES		64
+#define BFIN_ISUBBANKS		4
+#define BFIN_IWAYS		4
+#define BFIN_ILINES		32
+
+#define WAY0_L			0x1
+#define WAY1_L			0x2
+#define WAY01_L			0x3
+#define WAY2_L			0x4
+#define WAY02_L			0x5
+#define	WAY12_L			0x6
+#define	WAY012_L		0x7
+
+#define	WAY3_L			0x8
+#define	WAY03_L			0x9
+#define	WAY13_L			0xA
+#define	WAY013_L		0xB
+
+#define	WAY32_L			0xC
+#define	WAY320_L		0xD
+#define	WAY321_L		0xE
+#define	WAYALL_L		0xF
+
+#define DMC_ENABLE (2<<2)	/*yes, 2, not 1 */
+
+/********************************* EBIU Settings ************************************/
+#define AMBCTL0VAL	((CONFIG_BANK_1 << 16) | CONFIG_BANK_0)
+#define AMBCTL1VAL	((CONFIG_BANK_3 << 16) | CONFIG_BANK_2)
+
+#ifdef CONFIG_C_AMBEN_ALL
+#define V_AMBEN AMBEN_ALL
+#endif
+#ifdef CONFIG_C_AMBEN
+#define V_AMBEN 0x0
+#endif
+#ifdef CONFIG_C_AMBEN_B0
+#define V_AMBEN AMBEN_B0
+#endif
+#ifdef CONFIG_C_AMBEN_B0_B1
+#define V_AMBEN AMBEN_B0_B1
+#endif
+#ifdef CONFIG_C_AMBEN_B0_B1_B2
+#define V_AMBEN AMBEN_B0_B1_B2
+#endif
+#ifdef CONFIG_C_AMCKEN
+#define V_AMCKEN AMCKEN
+#else
+#define V_AMCKEN 0x0
+#endif
+
+#define AMGCTLVAL	(V_AMBEN | V_AMCKEN)
+
+#if defined(CONFIG_BF609)
+# define CPU   "BF609"
+# define CPUID 0x27fe	/* temperary fake value */
+#endif
+
+#ifndef CPU
+#error "Unknown CPU type - This kernel doesn't seem to be configured properly"
+#endif
+
+#endif	/* __MACH_BF609_H__  */
diff --git a/arch/blackfin/mach-bf609/include/mach/bfin_serial.h b/arch/blackfin/mach-bf609/include/mach/bfin_serial.h
new file mode 100644
index 0000000..1fd3981
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/bfin_serial.h
@@ -0,0 +1,17 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	2
+#define BFIN_UART_TX_FIFO_SIZE	8
+
+#define BFIN_UART_BF60X_STYLE
+
+#endif
diff --git a/arch/blackfin/mach-bf609/include/mach/blackfin.h b/arch/blackfin/mach-bf609/include/mach/blackfin.h
new file mode 100644
index 0000000..b1a48c4
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/blackfin.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _MACH_BLACKFIN_H_
+#define _MACH_BLACKFIN_H_
+
+#include "bf609.h"
+#include "anomaly.h"
+
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF609
+# include "defBF609.h"
+#endif
+
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF609
+#  include "cdefBF609.h"
+# endif
+#endif
+
+#endif
diff --git a/arch/blackfin/mach-bf609/include/mach/cdefBF609.h b/arch/blackfin/mach-bf609/include/mach/cdefBF609.h
new file mode 100644
index 0000000..c4f3fe1
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/cdefBF609.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _CDEF_BF609_H
+#define _CDEF_BF609_H
+
+/* include cdefBF60x_base.h for the set of #defines that are common to all ADSP-BF60x bfin_read_()rocessors */
+#include "cdefBF60x_base.h"
+
+/* The following are the #defines needed by ADSP-BF609 that are not in the common header */
+
+#endif /* _CDEF_BF609_H */
diff --git a/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h b/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
new file mode 100644
index 0000000..4954cf3
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
@@ -0,0 +1,3252 @@
+/*
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _CDEF_BF60X_H
+#define _CDEF_BF60X_H
+
+/* ************************************************************** */
+/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF60x    */
+/* ************************************************************** */
+
+/* Debug/MP/Emulation Registers (0xFFC00014 - 0xFFC00014) */
+
+#define bfin_read_CHIPID()		bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val)		bfin_write32(CHIPID, val)
+
+/* System Reset and Interrubfin_read_()t Controller (0xFFC00100 - 0xFFC00104) */
+
+/* SEC0 Registers */
+#define bfin_read_SEC0_CCTL()		bfin_read32(SEC0_CCTL)
+#define bfin_write_SEC0_CCTL(val)	bfin_write32(SEC0_CCTL, val)
+#define bfin_read_SEC0_CSID()		bfin_read32(SEC0_CSID)
+#define bfin_write_SEC0_CSID(val)	bfin_write32(SEC0_CSID, val)
+#define bfin_read_SEC_GCTL()		bfin_read32(SEC_GCTL)
+#define bfin_write_SEC_GCTL(val)	bfin_write32(SEC_GCTL, val)
+
+#define bfin_read_SEC_FCTL()		bfin_read32(SEC_FCTL)
+#define bfin_write_SEC_FCTL(val)	bfin_write32(SEC_FCTL, val)
+
+#define bfin_read_SEC_SCTL(sid)		bfin_read32((SEC_SCTL0 + (sid) * 8))
+#define bfin_write_SEC_SCTL(sid, val)	bfin_write32((SEC_SCTL0 + (sid) * 8), val)
+
+#define bfin_read_SEC_SSTAT(sid)	bfin_read32((SEC_SSTAT0 + (sid) * 8))
+#define bfin_write_SEC_SSTAT(sid, val)	bfin_write32((SEC_SSTAT0 + (sid) * 8), val)
+
+/* RCU0 Registers */
+#define bfin_read_RCU0_CTL()		bfin_read32(RCU0_CTL)
+#define bfin_write_RCU0_CTL(val)	bfin_write32(RCU0_CTL, val)
+
+/* Watchdog Timer Registers */
+#define bfin_read_WDOG_CTL()		bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val)	bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT()		bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val)	bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT()		bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val)	bfin_write32(WDOG_STAT, val)
+
+/* RTC Registers */
+
+/* UART0 Registers */
+
+#define bfin_read_UART0_REVID()		bfin_read32(UART0_REVID)
+#define bfin_write_UART0_REVID(val)	bfin_write32(UART0_REVID, val)
+#define bfin_read_UART0_GCTL()		bfin_read32(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val)	bfin_write32(UART0_GCTL, val)
+#define bfin_read_UART0_STAT()		bfin_read32(UART0_STAT)
+#define bfin_write_UART0_STAT(val)	bfin_write32(UART0_STAT, val)
+#define bfin_read_UART0_SCR()		bfin_read32(UART0_SCR)
+#define bfin_write_UART0_SCR(val)	bfin_write32(UART0_SCR, val)
+#define bfin_read_UART0_CLK()		bfin_read32(UART0_CLK)
+#define bfin_write_UART0_CLK(val)	bfin_write32(UART0_CLK, val)
+#define bfin_read_UART0_IER()		bfin_read32(UART0_IER)
+#define bfin_write_UART0_IER(val)	bfin_write32(UART0_IER, val)
+#define bfin_read_UART0_IER_SET()	bfin_read32(UART0_IER_SET)
+#define bfin_write_UART0_IER_SET(val)	bfin_write32(UART0_IER_SET, val)
+#define bfin_read_UART0_IER_CLEAR()	bfin_read32(UART0_IER_CLEAR)
+#define bfin_write_UART0_IER_CLEAR(val)	bfin_write32(UART0_IER_CLEAR, val)
+#define bfin_read_UART0_RBR()		bfin_read32(UART0_RBR)
+#define bfin_write_UART0_RBR(val)	bfin_write32(UART0_RBR, val)
+#define bfin_read_UART0_THR()		bfin_read32(UART0_THR)
+#define bfin_write_UART0_THR(val)	bfin_write32(UART0_THR, val)
+#define bfin_read_UART0_TAIP()		bfin_read32(UART0_TAIP)
+#define bfin_write_UART0_TAIP(val)	bfin_write32(UART0_TAIP, val)
+#define bfin_read_UART0_TSR()		bfin_read32(UART0_TSR)
+#define bfin_write_UART0_TSR(val)	bfin_write32(UART0_TSR, val)
+#define bfin_read_UART0_RSR()		bfin_read32(UART0_RSR)
+#define bfin_write_UART0_RSR(val)	bfin_write32(UART0_RSR, val)
+#define bfin_read_UART0_TXCNT()		bfin_read32(UART0_TXCNT)
+#define bfin_write_UART0_TXCNT(val)	bfin_write32(UART0_TXCNT, val)
+#define bfin_read_UART0_RXCNT()		bfin_read32(UART0_RXCNT)
+#define bfin_write_UART0_RXCNT(val)	bfin_write32(UART0_RXCNT, val)
+
+/* UART1 Registers */
+
+#define bfin_read_UART1_REVID()		bfin_read32(UART1_REVID)
+#define bfin_write_UART1_REVID(val)	bfin_write32(UART1_REVID, val)
+#define bfin_read_UART1_GCTL()		bfin_read32(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val)	bfin_write32(UART1_GCTL, val)
+#define bfin_read_UART1_STAT()		bfin_read32(UART1_STAT)
+#define bfin_write_UART1_STAT(val)	bfin_write32(UART1_STAT, val)
+#define bfin_read_UART1_SCR()		bfin_read32(UART1_SCR)
+#define bfin_write_UART1_SCR(val)	bfin_write32(UART1_SCR, val)
+#define bfin_read_UART1_CLK()		bfin_read32(UART1_CLK)
+#define bfin_write_UART1_CLK(val)	bfin_write32(UART1_CLK, val)
+#define bfin_read_UART1_IER()		bfin_read32(UART1_IER)
+#define bfin_write_UART1_IER(val)	bfin_write32(UART1_IER, val)
+#define bfin_read_UART1_IER_SET()	bfin_read32(UART1_IER_SET)
+#define bfin_write_UART1_IER_SET(val)	bfin_write32(UART1_IER_SET, val)
+#define bfin_read_UART1_IER_CLEAR()	bfin_read32(UART1_IER_CLEAR)
+#define bfin_write_UART1_IER_CLEAR(val)	bfin_write32(UART1_IER_CLEAR, val)
+#define bfin_read_UART1_RBR()		bfin_read32(UART1_RBR)
+#define bfin_write_UART1_RBR(val)	bfin_write32(UART1_RBR, val)
+#define bfin_read_UART1_THR()		bfin_read32(UART1_THR)
+#define bfin_write_UART1_THR(val)	bfin_write32(UART1_THR, val)
+#define bfin_read_UART1_TAIP()		bfin_read32(UART1_TAIP)
+#define bfin_write_UART1_TAIP(val)	bfin_write32(UART1_TAIP, val)
+#define bfin_read_UART1_TSR()		bfin_read32(UART1_TSR)
+#define bfin_write_UART1_TSR(val)	bfin_write32(UART1_TSR, val)
+#define bfin_read_UART1_RSR()		bfin_read32(UART1_RSR)
+#define bfin_write_UART1_RSR(val)	bfin_write32(UART1_RSR, val)
+#define bfin_read_UART1_TXCNT()		bfin_read32(UART1_TXCNT)
+#define bfin_write_UART1_TXCNT(val)	bfin_write32(UART1_TXCNT, val)
+#define bfin_read_UART1_RXCNT()		bfin_read32(UART1_RXCNT)
+#define bfin_write_UART1_RXCNT(val)	bfin_write32(UART1_RXCNT, val)
+
+
+/* SPI0 Registers */
+
+#define bfin_read_SPI0_CTL()		bfin_read32(SPI0_CTL)
+#define bfin_write_SPI0_CTL(val)	bfin_write32(SPI0_CTL, val)
+#define bfin_read_SPI0_RXCTL()		bfin_read32(SPI0_RXCTL)
+#define bfin_write_SPI0_RXCTL(val)	bfin_write32(SPI0_RXCTL, val)
+#define bfin_read_SPI0_TXCTL()		bfin_read32(SPI0_TXCTL)
+#define bfin_write_SPI0_TXCTL(val)	bfin_write32(SPI0_TXCTL, val)
+#define bfin_read_SPI0_CLK()		bfin_read32(SPI0_CLK)
+#define bfin_write_SPI0_CLK(val)	bfin_write32(SPI0_CLK, val)
+#define bfin_read_SPI0_DLY()		bfin_read32(SPI0_DLY)
+#define bfin_write_SPI0_DLY(val)	bfin_write32(SPI0_DLY, val)
+#define bfin_read_SPI0_SLVSEL()		bfin_read32(SPI0_SLVSEL)
+#define bfin_write_SPI0_SLVSEL(val)	bfin_write32(SPI0_SLVSEL, val)
+#define bfin_read_SPI0_RWC()		bfin_read32(SPI0_RWC)
+#define bfin_write_SPI0_RWC(val)	bfin_write32(SPI0_RWC, val)
+#define bfin_read_SPI0_RWCR()		bfin_read32(SPI0_RWCR)
+#define bfin_write_SPI0_RWCR(val)	bfin_write32(SPI0_RWCR, val)
+#define bfin_read_SPI0_TWC()		bfin_read32(SPI0_TWC)
+#define bfin_write_SPI0_TWC(val)	bfin_write32(SPI0_TWC, val)
+#define bfin_read_SPI0_TWCR()		bfin_read32(SPI0_TWCR)
+#define bfin_write_SPI0_TWCR(val)	bfin_write32(SPI0_TWCR, val)
+#define bfin_read_SPI0_IMSK()		bfin_read32(SPI0_IMSK)
+#define bfin_write_SPI0_IMSK(val)	bfin_write32(SPI0_IMSK, val)
+#define bfin_read_SPI0_IMSK_CLR()	bfin_read32(SPI0_IMSK_CLR)
+#define bfin_write_SPI0_IMSK_CLR(val)	bfin_write32(SPI0_IMSK_CLR, val)
+#define bfin_read_SPI0_IMSK_SET()	bfin_read32(SPI0_IMSK_SET)
+#define bfin_write_SPI0_IMSK_SET(val)	bfin_write32(SPI0_IMSK_SET, val)
+#define bfin_read_SPI0_STAT()		bfin_read32(SPI0_STAT)
+#define bfin_write_SPI0_STAT(val)	bfin_write32(SPI0_STAT, val)
+#define bfin_read_SPI0_ILAT()		bfin_read32(SPI0_ILAT)
+#define bfin_write_SPI0_ILAT(val)	bfin_write32(SPI0_ILAT, val)
+#define bfin_read_SPI0_ILAT_CLR()	bfin_read32(SPI0_ILAT_CLR)
+#define bfin_write_SPI0_ILAT_CLR(val)	bfin_write32(SPI0_ILAT_CLR, val)
+#define bfin_read_SPI0_RFIFO()		bfin_read32(SPI0_RFIFO)
+#define bfin_write_SPI0_RFIFO(val)	bfin_write32(SPI0_RFIFO, val)
+#define bfin_read_SPI0_TFIFO()		bfin_read32(SPI0_TFIFO)
+#define bfin_write_SPI0_TFIFO(val)	bfin_write32(SPI0_TFIFO, val)
+
+/* SPI1 Registers */
+
+#define bfin_read_SPI1_CTL()		bfin_read32(SPI1_CTL)
+#define bfin_write_SPI1_CTL(val)	bfin_write32(SPI1_CTL, val)
+#define bfin_read_SPI1_RXCTL()		bfin_read32(SPI1_RXCTL)
+#define bfin_write_SPI1_RXCTL(val)	bfin_write32(SPI1_RXCTL, val)
+#define bfin_read_SPI1_TXCTL()		bfin_read32(SPI1_TXCTL)
+#define bfin_write_SPI1_TXCTL(val)	bfin_write32(SPI1_TXCTL, val)
+#define bfin_read_SPI1_CLK()		bfin_read32(SPI1_CLK)
+#define bfin_write_SPI1_CLK(val)	bfin_write32(SPI1_CLK, val)
+#define bfin_read_SPI1_DLY()		bfin_read32(SPI1_DLY)
+#define bfin_write_SPI1_DLY(val)	bfin_write32(SPI1_DLY, val)
+#define bfin_read_SPI1_SLVSEL()		bfin_read32(SPI1_SLVSEL)
+#define bfin_write_SPI1_SLVSEL(val)	bfin_write32(SPI1_SLVSEL, val)
+#define bfin_read_SPI1_RWC()		bfin_read32(SPI1_RWC)
+#define bfin_write_SPI1_RWC(val)	bfin_write32(SPI1_RWC, val)
+#define bfin_read_SPI1_RWCR()		bfin_read32(SPI1_RWCR)
+#define bfin_write_SPI1_RWCR(val)	bfin_write32(SPI1_RWCR, val)
+#define bfin_read_SPI1_TWC()		bfin_read32(SPI1_TWC)
+#define bfin_write_SPI1_TWC(val)	bfin_write32(SPI1_TWC, val)
+#define bfin_read_SPI1_TWCR()		bfin_read32(SPI1_TWCR)
+#define bfin_write_SPI1_TWCR(val)	bfin_write32(SPI1_TWCR, val)
+#define bfin_read_SPI1_IMSK()		bfin_read32(SPI1_IMSK)
+#define bfin_write_SPI1_IMSK(val)	bfin_write32(SPI1_IMSK, val)
+#define bfin_read_SPI1_IMSK_CLR()	bfin_read32(SPI1_IMSK_CLR)
+#define bfin_write_SPI1_IMSK_CLR(val)	bfin_write32(SPI1_IMSK_CLR, val)
+#define bfin_read_SPI1_IMSK_SET()	bfin_read32(SPI1_IMSK_SET)
+#define bfin_write_SPI1_IMSK_SET(val)	bfin_write32(SPI1_IMSK_SET, val)
+#define bfin_read_SPI1_STAT()		bfin_read32(SPI1_STAT)
+#define bfin_write_SPI1_STAT(val)	bfin_write32(SPI1_STAT, val)
+#define bfin_read_SPI1_ILAT()		bfin_read32(SPI1_ILAT)
+#define bfin_write_SPI1_ILAT(val)	bfin_write32(SPI1_ILAT, val)
+#define bfin_read_SPI1_ILAT_CLR()	bfin_read32(SPI1_ILAT_CLR)
+#define bfin_write_SPI1_ILAT_CLR(val)	bfin_write32(SPI1_ILAT_CLR, val)
+#define bfin_read_SPI1_RFIFO()		bfin_read32(SPI1_RFIFO)
+#define bfin_write_SPI1_RFIFO(val)	bfin_write32(SPI1_RFIFO, val)
+#define bfin_read_SPI1_TFIFO()		bfin_read32(SPI1_TFIFO)
+#define bfin_write_SPI1_TFIFO(val)	bfin_write32(SPI1_TFIFO, val)
+
+/* Timer 0-7 registers */
+#define bfin_read_TIMER0_CONFIG()            bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val)        bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER()           bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val)       bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD()            bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val)        bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH()             bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val)         bfin_write32(TIMER0_WIDTH, val)
+#define bfin_read_TIMER1_CONFIG()            bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val)        bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER()           bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val)       bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD()            bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val)        bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH()             bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val)         bfin_write32(TIMER1_WIDTH, val)
+#define bfin_read_TIMER2_CONFIG()            bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val)        bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER()           bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val)       bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD()            bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val)        bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH()             bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val)         bfin_write32(TIMER2_WIDTH, val)
+#define bfin_read_TIMER3_CONFIG()            bfin_read16(TIMER3_CONFIG)
+#define bfin_write_TIMER3_CONFIG(val)        bfin_write16(TIMER3_CONFIG, val)
+#define bfin_read_TIMER3_COUNTER()           bfin_read32(TIMER3_COUNTER)
+#define bfin_write_TIMER3_COUNTER(val)       bfin_write32(TIMER3_COUNTER, val)
+#define bfin_read_TIMER3_PERIOD()            bfin_read32(TIMER3_PERIOD)
+#define bfin_write_TIMER3_PERIOD(val)        bfin_write32(TIMER3_PERIOD, val)
+#define bfin_read_TIMER3_WIDTH()             bfin_read32(TIMER3_WIDTH)
+#define bfin_write_TIMER3_WIDTH(val)         bfin_write32(TIMER3_WIDTH, val)
+#define bfin_read_TIMER4_CONFIG()            bfin_read16(TIMER4_CONFIG)
+#define bfin_write_TIMER4_CONFIG(val)        bfin_write16(TIMER4_CONFIG, val)
+#define bfin_read_TIMER4_COUNTER()           bfin_read32(TIMER4_COUNTER)
+#define bfin_write_TIMER4_COUNTER(val)       bfin_write32(TIMER4_COUNTER, val)
+#define bfin_read_TIMER4_PERIOD()            bfin_read32(TIMER4_PERIOD)
+#define bfin_write_TIMER4_PERIOD(val)        bfin_write32(TIMER4_PERIOD, val)
+#define bfin_read_TIMER4_WIDTH()             bfin_read32(TIMER4_WIDTH)
+#define bfin_write_TIMER4_WIDTH(val)         bfin_write32(TIMER4_WIDTH, val)
+#define bfin_read_TIMER5_CONFIG()            bfin_read16(TIMER5_CONFIG)
+#define bfin_write_TIMER5_CONFIG(val)        bfin_write16(TIMER5_CONFIG, val)
+#define bfin_read_TIMER5_COUNTER()           bfin_read32(TIMER5_COUNTER)
+#define bfin_write_TIMER5_COUNTER(val)       bfin_write32(TIMER5_COUNTER, val)
+#define bfin_read_TIMER5_PERIOD()            bfin_read32(TIMER5_PERIOD)
+#define bfin_write_TIMER5_PERIOD(val)        bfin_write32(TIMER5_PERIOD, val)
+#define bfin_read_TIMER5_WIDTH()             bfin_read32(TIMER5_WIDTH)
+#define bfin_write_TIMER5_WIDTH(val)         bfin_write32(TIMER5_WIDTH, val)
+#define bfin_read_TIMER6_CONFIG()            bfin_read16(TIMER6_CONFIG)
+#define bfin_write_TIMER6_CONFIG(val)        bfin_write16(TIMER6_CONFIG, val)
+#define bfin_read_TIMER6_COUNTER()           bfin_read32(TIMER6_COUNTER)
+#define bfin_write_TIMER6_COUNTER(val)       bfin_write32(TIMER6_COUNTER, val)
+#define bfin_read_TIMER6_PERIOD()            bfin_read32(TIMER6_PERIOD)
+#define bfin_write_TIMER6_PERIOD(val)        bfin_write32(TIMER6_PERIOD, val)
+#define bfin_read_TIMER6_WIDTH()             bfin_read32(TIMER6_WIDTH)
+#define bfin_write_TIMER6_WIDTH(val)         bfin_write32(TIMER6_WIDTH, val)
+#define bfin_read_TIMER7_CONFIG()            bfin_read16(TIMER7_CONFIG)
+#define bfin_write_TIMER7_CONFIG(val)        bfin_write16(TIMER7_CONFIG, val)
+#define bfin_read_TIMER7_COUNTER()           bfin_read32(TIMER7_COUNTER)
+#define bfin_write_TIMER7_COUNTER(val)       bfin_write32(TIMER7_COUNTER, val)
+#define bfin_read_TIMER7_PERIOD()            bfin_read32(TIMER7_PERIOD)
+#define bfin_write_TIMER7_PERIOD(val)        bfin_write32(TIMER7_PERIOD, val)
+#define bfin_read_TIMER7_WIDTH()             bfin_read32(TIMER7_WIDTH)
+#define bfin_write_TIMER7_WIDTH(val)         bfin_write32(TIMER7_WIDTH, val)
+
+
+
+
+/* Two Wire Interface Registers (TWI0) */
+
+/* SPORT1 Registers */
+
+
+/* SMC Registers */
+#define bfin_read_SMC_GCTL() bfin_read32(SMC_GCTL)
+#define bfin_write_SMC_GCTL(val) bfin_write32(SMC_GCTL, val)
+#define bfin_read_SMC_GSTAT() bfin_read32(SMC_GSTAT)
+#define bfin_read_SMC_B0CTL() bfin_read32(SMC_B0CTL)
+#define bfin_write_SMC_B0CTL(val) bfin_write32(SMC_B0CTL, val)
+#define bfin_read_SMC_B0TIM() bfin_read32(SMC_B0TIM)
+#define bfin_write_SMC_B0TIM(val) bfin_write32(SMC_B0TIM, val)
+#define bfin_read_SMC_B0ETIM() bfin_read32(SMC_B0ETIM)
+#define bfin_write_SMC_B0ETIM(val) bfin_write32(SMC_B0ETIM, val)
+#define bfin_read_SMC_B1CTL() bfin_read32(SMC_B1CTL)
+#define bfin_write_SMC_B1CTL(val) bfin_write32(SMC_B1CTL, val)
+#define bfin_read_SMC_B1TIM() bfin_read32(SMC_B1TIM)
+#define bfin_write_SMC_B1TIM(val) bfin_write32(SMC_B1TIM, val)
+#define bfin_read_SMC_B1ETIM() bfin_read32(SMC_B1ETIM)
+#define bfin_write_SMC_B1ETIM(val) bfin_write32(SMC_B1ETIM, val)
+#define bfin_read_SMC_B2CTL() bfin_read32(SMC_B2CTL)
+#define bfin_write_SMC_B2CTL(val) bfin_write32(SMC_B2CTL, val)
+#define bfin_read_SMC_B2TIM() bfin_read32(SMC_B2TIM)
+#define bfin_write_SMC_B2TIM(val) bfin_write32(SMC_B2TIM, val)
+#define bfin_read_SMC_B2ETIM() bfin_read32(SMC_B2ETIM)
+#define bfin_write_SMC_B2ETIM(val) bfin_write32(SMC_B2ETIM, val)
+#define bfin_read_SMC_B3CTL() bfin_read32(SMC_B3CTL)
+#define bfin_write_SMC_B3CTL(val) bfin_write32(SMC_B3CTL, val)
+#define bfin_read_SMC_B3TIM() bfin_read32(SMC_B3TIM)
+#define bfin_write_SMC_B3TIM(val) bfin_write32(SMC_B3TIM, val)
+#define bfin_read_SMC_B3ETIM() bfin_read32(SMC_B3ETIM)
+#define bfin_write_SMC_B3ETIM(val) bfin_write32(SMC_B3ETIM, val)
+
+/* DDR2 Memory Control Registers */
+#define bfin_read_DMC0_CFG() bfin_read32(DMC0_CFG)
+#define bfin_write_DMC0_CFG(val) bfin_write32(DMC0_CFG, val)
+#define bfin_read_DMC0_TR0() bfin_read32(DMC0_TR0)
+#define bfin_write_DMC0_TR0(val) bfin_write32(DMC0_TR0, val)
+#define bfin_read_DMC0_TR1() bfin_read32(DMC0_TR1)
+#define bfin_write_DMC0_TR1(val) bfin_write32(DMC0_TR1, val)
+#define bfin_read_DMC0_TR2() bfin_read32(DMC0_TR2)
+#define bfin_write_DMC0_TR2(val) bfin_write32(DMC0_TR2, val)
+#define bfin_read_DMC0_MR() bfin_read32(DMC0_MR)
+#define bfin_write_DMC0_MR(val) bfin_write32(DMC0_MR, val)
+#define bfin_read_DMC0_EMR1() bfin_read32(DMC0_EMR1)
+#define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val)
+#define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL)
+#define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val)
+#define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT)
+#define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val)
+#define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL)
+#define bfin_write_DMC0_DLLCTL(val) bfin_write32(DMC0_DLLCTL, val)
+
+/* DDR BankRead and Write Count Registers */
+
+
+/* DMA Channel 0 Registers */
+
+#define bfin_read_DMA0_NEXT_DESC_PTR() 		bfin_read32(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val) 	bfin_write32(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR() 		bfin_read32(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val) 	bfin_write32(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_CONFIG()			bfin_read32(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val)		bfin_write32(DMA0_CONFIG, val)
+#define bfin_read_DMA0_X_COUNT()		bfin_read32(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val)		bfin_write32(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY()		bfin_read32(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val) 		bfin_write32(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_COUNT()		bfin_read32(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val)		bfin_write32(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_Y_MODIFY()		bfin_read32(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val) 		bfin_write32(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR() 		bfin_read32(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val) 	bfin_write32(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_PREV_DESC_PTR() 		bfin_read32(DMA0_PREV_DESC_PTR)
+#define bfin_write_DMA0_PREV_DESC_PTR(val) 	bfin_write32(DMA0_PREV_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR() 		bfin_read32(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val) 		bfin_write32(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_IRQ_STATUS()		bfin_read32(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write32(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read32(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write32(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read32(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write32(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA0_BWL_COUNT()		bfin_read32(DMA0_BWL_COUNT)
+#define bfin_write_DMA0_BWL_COUNT(val)		bfin_write32(DMA0_BWL_COUNT, val)
+#define bfin_read_DMA0_CURR_BWL_COUNT()		bfin_read32(DMA0_CURR_BWL_COUNT)
+#define bfin_write_DMA0_CURR_BWL_COUNT(val)	bfin_write32(DMA0_CURR_BWL_COUNT, val)
+#define bfin_read_DMA0_BWM_COUNT()		bfin_read32(DMA0_BWM_COUNT)
+#define bfin_write_DMA0_BWM_COUNT(val)		bfin_write32(DMA0_BWM_COUNT, val)
+#define bfin_read_DMA0_CURR_BWM_COUNT()		bfin_read32(DMA0_CURR_BWM_COUNT)
+#define bfin_write_DMA0_CURR_BWM_COUNT(val)	bfin_write32(DMA0_CURR_BWM_COUNT, val)
+
+/* DMA Channel 1 Registers */
+
+#define bfin_read_DMA1_NEXT_DESC_PTR() 		bfin_read32(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val) 	bfin_write32(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR() 		bfin_read32(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val) 	bfin_write32(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_CONFIG()			bfin_read32(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val)		bfin_write32(DMA1_CONFIG, val)
+#define bfin_read_DMA1_X_COUNT()		bfin_read32(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val)		bfin_write32(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY()		bfin_read32(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val) 		bfin_write32(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_COUNT()		bfin_read32(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val)		bfin_write32(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_Y_MODIFY()		bfin_read32(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val) 		bfin_write32(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR() 		bfin_read32(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val) 	bfin_write32(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_PREV_DESC_PTR() 		bfin_read32(DMA1_PREV_DESC_PTR)
+#define bfin_write_DMA1_PREV_DESC_PTR(val) 	bfin_write32(DMA1_PREV_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR() 		bfin_read32(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val) 		bfin_write32(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_IRQ_STATUS()		bfin_read32(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write32(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read32(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write32(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read32(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write32(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_BWL_COUNT()		bfin_read32(DMA1_BWL_COUNT)
+#define bfin_write_DMA1_BWL_COUNT(val)		bfin_write32(DMA1_BWL_COUNT, val)
+#define bfin_read_DMA1_CURR_BWL_COUNT()		bfin_read32(DMA1_CURR_BWL_COUNT)
+#define bfin_write_DMA1_CURR_BWL_COUNT(val)	bfin_write32(DMA1_CURR_BWL_COUNT, val)
+#define bfin_read_DMA1_BWM_COUNT()		bfin_read32(DMA1_BWM_COUNT)
+#define bfin_write_DMA1_BWM_COUNT(val)		bfin_write32(DMA1_BWM_COUNT, val)
+#define bfin_read_DMA1_CURR_BWM_COUNT()		bfin_read32(DMA1_CURR_BWM_COUNT)
+#define bfin_write_DMA1_CURR_BWM_COUNT(val)	bfin_write32(DMA1_CURR_BWM_COUNT, val)
+
+/* DMA Channel 2 Registers */
+
+#define bfin_read_DMA2_NEXT_DESC_PTR() 		bfin_read32(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val) 	bfin_write32(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR() 		bfin_read32(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val) 	bfin_write32(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_CONFIG()			bfin_read32(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val)		bfin_write32(DMA2_CONFIG, val)
+#define bfin_read_DMA2_X_COUNT()		bfin_read32(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val)		bfin_write32(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY()		bfin_read32(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val) 		bfin_write32(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_COUNT()		bfin_read32(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val)		bfin_write32(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_Y_MODIFY()		bfin_read32(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val) 		bfin_write32(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR() 		bfin_read32(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val) 	bfin_write32(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_PREV_DESC_PTR() 		bfin_read32(DMA2_PREV_DESC_PTR)
+#define bfin_write_DMA2_PREV_DESC_PTR(val) 	bfin_write32(DMA2_PREV_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR() 		bfin_read32(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val) 		bfin_write32(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_IRQ_STATUS()		bfin_read32(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write32(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read32(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write32(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read32(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write32(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_BWL_COUNT()		bfin_read32(DMA2_BWL_COUNT)
+#define bfin_write_DMA2_BWL_COUNT(val)		bfin_write32(DMA2_BWL_COUNT, val)
+#define bfin_read_DMA2_CURR_BWL_COUNT()		bfin_read32(DMA2_CURR_BWL_COUNT)
+#define bfin_write_DMA2_CURR_BWL_COUNT(val)	bfin_write32(DMA2_CURR_BWL_COUNT, val)
+#define bfin_read_DMA2_BWM_COUNT()		bfin_read32(DMA2_BWM_COUNT)
+#define bfin_write_DMA2_BWM_COUNT(val)		bfin_write32(DMA2_BWM_COUNT, val)
+#define bfin_read_DMA2_CURR_BWM_COUNT()		bfin_read32(DMA2_CURR_BWM_COUNT)
+#define bfin_write_DMA2_CURR_BWM_COUNT(val)	bfin_write32(DMA2_CURR_BWM_COUNT, val)
+
+/* DMA Channel 3 Registers */
+
+#define bfin_read_DMA3_NEXT_DESC_PTR() 		bfin_read32(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val) 	bfin_write32(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR() 		bfin_read32(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val) 	bfin_write32(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_CONFIG()			bfin_read32(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val)		bfin_write32(DMA3_CONFIG, val)
+#define bfin_read_DMA3_X_COUNT()		bfin_read32(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val)		bfin_write32(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY()		bfin_read32(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val) 		bfin_write32(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_COUNT()		bfin_read32(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val)		bfin_write32(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_Y_MODIFY()		bfin_read32(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val) 		bfin_write32(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR() 		bfin_read32(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val) 	bfin_write32(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_PREV_DESC_PTR() 		bfin_read32(DMA3_PREV_DESC_PTR)
+#define bfin_write_DMA3_PREV_DESC_PTR(val) 	bfin_write32(DMA3_PREV_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR() 		bfin_read32(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val) 		bfin_write32(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_IRQ_STATUS()		bfin_read32(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write32(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read32(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write32(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read32(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write32(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_BWL_COUNT()		bfin_read32(DMA3_BWL_COUNT)
+#define bfin_write_DMA3_BWL_COUNT(val)		bfin_write32(DMA3_BWL_COUNT, val)
+#define bfin_read_DMA3_CURR_BWL_COUNT()		bfin_read32(DMA3_CURR_BWL_COUNT)
+#define bfin_write_DMA3_CURR_BWL_COUNT(val)	bfin_write32(DMA3_CURR_BWL_COUNT, val)
+#define bfin_read_DMA3_BWM_COUNT()		bfin_read32(DMA3_BWM_COUNT)
+#define bfin_write_DMA3_BWM_COUNT(val)		bfin_write32(DMA3_BWM_COUNT, val)
+#define bfin_read_DMA3_CURR_BWM_COUNT()		bfin_read32(DMA3_CURR_BWM_COUNT)
+#define bfin_write_DMA3_CURR_BWM_COUNT(val)	bfin_write32(DMA3_CURR_BWM_COUNT, val)
+
+/* DMA Channel 4 Registers */
+
+#define bfin_read_DMA4_NEXT_DESC_PTR() 		bfin_read32(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val) 	bfin_write32(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR() 		bfin_read32(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val) 	bfin_write32(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_CONFIG()			bfin_read32(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val)		bfin_write32(DMA4_CONFIG, val)
+#define bfin_read_DMA4_X_COUNT()		bfin_read32(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val)		bfin_write32(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY()		bfin_read32(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val) 		bfin_write32(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_COUNT()		bfin_read32(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val)		bfin_write32(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_Y_MODIFY()		bfin_read32(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val) 		bfin_write32(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR() 		bfin_read32(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val) 	bfin_write32(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_PREV_DESC_PTR() 		bfin_read32(DMA4_PREV_DESC_PTR)
+#define bfin_write_DMA4_PREV_DESC_PTR(val) 	bfin_write32(DMA4_PREV_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR() 		bfin_read32(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val) 		bfin_write32(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_IRQ_STATUS()		bfin_read32(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write32(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read32(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write32(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read32(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write32(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_BWL_COUNT()		bfin_read32(DMA4_BWL_COUNT)
+#define bfin_write_DMA4_BWL_COUNT(val)		bfin_write32(DMA4_BWL_COUNT, val)
+#define bfin_read_DMA4_CURR_BWL_COUNT()		bfin_read32(DMA4_CURR_BWL_COUNT)
+#define bfin_write_DMA4_CURR_BWL_COUNT(val)	bfin_write32(DMA4_CURR_BWL_COUNT, val)
+#define bfin_read_DMA4_BWM_COUNT()		bfin_read32(DMA4_BWM_COUNT)
+#define bfin_write_DMA4_BWM_COUNT(val)		bfin_write32(DMA4_BWM_COUNT, val)
+#define bfin_read_DMA4_CURR_BWM_COUNT()		bfin_read32(DMA4_CURR_BWM_COUNT)
+#define bfin_write_DMA4_CURR_BWM_COUNT(val)	bfin_write32(DMA4_CURR_BWM_COUNT, val)
+
+/* DMA Channel 5 Registers */
+
+#define bfin_read_DMA5_NEXT_DESC_PTR() 		bfin_read32(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val) 	bfin_write32(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR() 		bfin_read32(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val) 	bfin_write32(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_CONFIG()			bfin_read32(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val)		bfin_write32(DMA5_CONFIG, val)
+#define bfin_read_DMA5_X_COUNT()		bfin_read32(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val)		bfin_write32(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY()		bfin_read32(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val) 		bfin_write32(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_COUNT()		bfin_read32(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val)		bfin_write32(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_Y_MODIFY()		bfin_read32(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val) 		bfin_write32(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR() 		bfin_read32(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val) 	bfin_write32(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_PREV_DESC_PTR() 		bfin_read32(DMA5_PREV_DESC_PTR)
+#define bfin_write_DMA5_PREV_DESC_PTR(val) 	bfin_write32(DMA5_PREV_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR() 		bfin_read32(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val) 		bfin_write32(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_IRQ_STATUS()		bfin_read32(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write32(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read32(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write32(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read32(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write32(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_BWL_COUNT()		bfin_read32(DMA5_BWL_COUNT)
+#define bfin_write_DMA5_BWL_COUNT(val)		bfin_write32(DMA5_BWL_COUNT, val)
+#define bfin_read_DMA5_CURR_BWL_COUNT()		bfin_read32(DMA5_CURR_BWL_COUNT)
+#define bfin_write_DMA5_CURR_BWL_COUNT(val)	bfin_write32(DMA5_CURR_BWL_COUNT, val)
+#define bfin_read_DMA5_BWM_COUNT()		bfin_read32(DMA5_BWM_COUNT)
+#define bfin_write_DMA5_BWM_COUNT(val)		bfin_write32(DMA5_BWM_COUNT, val)
+#define bfin_read_DMA5_CURR_BWM_COUNT()		bfin_read32(DMA5_CURR_BWM_COUNT)
+#define bfin_write_DMA5_CURR_BWM_COUNT(val)	bfin_write32(DMA5_CURR_BWM_COUNT, val)
+
+/* DMA Channel 6 Registers */
+
+#define bfin_read_DMA6_NEXT_DESC_PTR() 		bfin_read32(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val) 	bfin_write32(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR() 		bfin_read32(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val) 	bfin_write32(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_CONFIG()			bfin_read32(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val)		bfin_write32(DMA6_CONFIG, val)
+#define bfin_read_DMA6_X_COUNT()		bfin_read32(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val)		bfin_write32(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY()		bfin_read32(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val) 		bfin_write32(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_COUNT()		bfin_read32(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val)		bfin_write32(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_Y_MODIFY()		bfin_read32(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val) 		bfin_write32(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR() 		bfin_read32(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val) 	bfin_write32(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_PREV_DESC_PTR() 		bfin_read32(DMA6_PREV_DESC_PTR)
+#define bfin_write_DMA6_PREV_DESC_PTR(val) 	bfin_write32(DMA6_PREV_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR() 		bfin_read32(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val) 		bfin_write32(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_IRQ_STATUS()		bfin_read32(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write32(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read32(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write32(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read32(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write32(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_BWL_COUNT()		bfin_read32(DMA6_BWL_COUNT)
+#define bfin_write_DMA6_BWL_COUNT(val)		bfin_write32(DMA6_BWL_COUNT, val)
+#define bfin_read_DMA6_CURR_BWL_COUNT()		bfin_read32(DMA6_CURR_BWL_COUNT)
+#define bfin_write_DMA6_CURR_BWL_COUNT(val)	bfin_write32(DMA6_CURR_BWL_COUNT, val)
+#define bfin_read_DMA6_BWM_COUNT()		bfin_read32(DMA6_BWM_COUNT)
+#define bfin_write_DMA6_BWM_COUNT(val)		bfin_write32(DMA6_BWM_COUNT, val)
+#define bfin_read_DMA6_CURR_BWM_COUNT()		bfin_read32(DMA6_CURR_BWM_COUNT)
+#define bfin_write_DMA6_CURR_BWM_COUNT(val)	bfin_write32(DMA6_CURR_BWM_COUNT, val)
+
+/* DMA Channel 7 Registers */
+
+#define bfin_read_DMA7_NEXT_DESC_PTR() 		bfin_read32(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val) 	bfin_write32(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR() 		bfin_read32(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val) 	bfin_write32(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_CONFIG()			bfin_read32(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val)		bfin_write32(DMA7_CONFIG, val)
+#define bfin_read_DMA7_X_COUNT()		bfin_read32(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val)		bfin_write32(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY()		bfin_read32(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val) 		bfin_write32(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_COUNT()		bfin_read32(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val)		bfin_write32(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_Y_MODIFY()		bfin_read32(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val) 		bfin_write32(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR() 		bfin_read32(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val) 	bfin_write32(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_PREV_DESC_PTR() 		bfin_read32(DMA7_PREV_DESC_PTR)
+#define bfin_write_DMA7_PREV_DESC_PTR(val) 	bfin_write32(DMA7_PREV_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR() 		bfin_read32(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val) 		bfin_write32(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_IRQ_STATUS()		bfin_read32(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write32(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read32(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write32(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read32(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write32(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_BWL_COUNT()		bfin_read32(DMA7_BWL_COUNT)
+#define bfin_write_DMA7_BWL_COUNT(val)		bfin_write32(DMA7_BWL_COUNT, val)
+#define bfin_read_DMA7_CURR_BWL_COUNT()		bfin_read32(DMA7_CURR_BWL_COUNT)
+#define bfin_write_DMA7_CURR_BWL_COUNT(val)	bfin_write32(DMA7_CURR_BWL_COUNT, val)
+#define bfin_read_DMA7_BWM_COUNT()		bfin_read32(DMA7_BWM_COUNT)
+#define bfin_write_DMA7_BWM_COUNT(val)		bfin_write32(DMA7_BWM_COUNT, val)
+#define bfin_read_DMA7_CURR_BWM_COUNT()		bfin_read32(DMA7_CURR_BWM_COUNT)
+#define bfin_write_DMA7_CURR_BWM_COUNT(val)	bfin_write32(DMA7_CURR_BWM_COUNT, val)
+
+/* DMA Channel 8 Registers */
+
+#define bfin_read_DMA8_NEXT_DESC_PTR() 		bfin_read32(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val) 	bfin_write32(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR() 		bfin_read32(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val) 	bfin_write32(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_CONFIG()			bfin_read32(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val)		bfin_write32(DMA8_CONFIG, val)
+#define bfin_read_DMA8_X_COUNT()		bfin_read32(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val)		bfin_write32(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY()		bfin_read32(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val) 		bfin_write32(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_COUNT()		bfin_read32(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val)		bfin_write32(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_Y_MODIFY()		bfin_read32(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val) 		bfin_write32(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR() 		bfin_read32(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val) 	bfin_write32(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_PREV_DESC_PTR() 		bfin_read32(DMA8_PREV_DESC_PTR)
+#define bfin_write_DMA8_PREV_DESC_PTR(val) 	bfin_write32(DMA8_PREV_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR() 		bfin_read32(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val) 		bfin_write32(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_IRQ_STATUS()		bfin_read32(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write32(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read32(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write32(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read32(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write32(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA8_BWL_COUNT()		bfin_read32(DMA8_BWL_COUNT)
+#define bfin_write_DMA8_BWL_COUNT(val)		bfin_write32(DMA8_BWL_COUNT, val)
+#define bfin_read_DMA8_CURR_BWL_COUNT()		bfin_read32(DMA8_CURR_BWL_COUNT)
+#define bfin_write_DMA8_CURR_BWL_COUNT(val)	bfin_write32(DMA8_CURR_BWL_COUNT, val)
+#define bfin_read_DMA8_BWM_COUNT()		bfin_read32(DMA8_BWM_COUNT)
+#define bfin_write_DMA8_BWM_COUNT(val)		bfin_write32(DMA8_BWM_COUNT, val)
+#define bfin_read_DMA8_CURR_BWM_COUNT()		bfin_read32(DMA8_CURR_BWM_COUNT)
+#define bfin_write_DMA8_CURR_BWM_COUNT(val)	bfin_write32(DMA8_CURR_BWM_COUNT, val)
+
+/* DMA Channel 9 Registers */
+
+#define bfin_read_DMA9_NEXT_DESC_PTR() 		bfin_read32(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val) 	bfin_write32(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR() 		bfin_read32(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val) 	bfin_write32(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_CONFIG()			bfin_read32(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val)		bfin_write32(DMA9_CONFIG, val)
+#define bfin_read_DMA9_X_COUNT()		bfin_read32(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val)		bfin_write32(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY()		bfin_read32(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val) 		bfin_write32(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_COUNT()		bfin_read32(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val)		bfin_write32(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_Y_MODIFY()		bfin_read32(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val) 		bfin_write32(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR() 		bfin_read32(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val) 	bfin_write32(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_PREV_DESC_PTR() 		bfin_read32(DMA9_PREV_DESC_PTR)
+#define bfin_write_DMA9_PREV_DESC_PTR(val) 	bfin_write32(DMA9_PREV_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR() 		bfin_read32(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val) 		bfin_write32(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_IRQ_STATUS()		bfin_read32(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write32(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read32(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write32(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read32(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write32(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_BWL_COUNT()		bfin_read32(DMA9_BWL_COUNT)
+#define bfin_write_DMA9_BWL_COUNT(val)		bfin_write32(DMA9_BWL_COUNT, val)
+#define bfin_read_DMA9_CURR_BWL_COUNT()		bfin_read32(DMA9_CURR_BWL_COUNT)
+#define bfin_write_DMA9_CURR_BWL_COUNT(val)	bfin_write32(DMA9_CURR_BWL_COUNT, val)
+#define bfin_read_DMA9_BWM_COUNT()		bfin_read32(DMA9_BWM_COUNT)
+#define bfin_write_DMA9_BWM_COUNT(val)		bfin_write32(DMA9_BWM_COUNT, val)
+#define bfin_read_DMA9_CURR_BWM_COUNT()		bfin_read32(DMA9_CURR_BWM_COUNT)
+#define bfin_write_DMA9_CURR_BWM_COUNT(val)	bfin_write32(DMA9_CURR_BWM_COUNT, val)
+
+/* DMA Channel 10 Registers */
+
+#define bfin_read_DMA10_NEXT_DESC_PTR() 	bfin_read32(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val) 	bfin_write32(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR() 		bfin_read32(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val) 	bfin_write32(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_CONFIG()		bfin_read32(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val)		bfin_write32(DMA10_CONFIG, val)
+#define bfin_read_DMA10_X_COUNT()		bfin_read32(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val)		bfin_write32(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY()		bfin_read32(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val) 		bfin_write32(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_COUNT()		bfin_read32(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val)		bfin_write32(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_Y_MODIFY()		bfin_read32(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val) 		bfin_write32(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR() 	bfin_read32(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val) 	bfin_write32(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_PREV_DESC_PTR() 	bfin_read32(DMA10_PREV_DESC_PTR)
+#define bfin_write_DMA10_PREV_DESC_PTR(val) 	bfin_write32(DMA10_PREV_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR() 		bfin_read32(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val) 	bfin_write32(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_IRQ_STATUS()		bfin_read32(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write32(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read32(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write32(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read32(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write32(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_BWL_COUNT()		bfin_read32(DMA10_BWL_COUNT)
+#define bfin_write_DMA10_BWL_COUNT(val)		bfin_write32(DMA10_BWL_COUNT, val)
+#define bfin_read_DMA10_CURR_BWL_COUNT()	bfin_read32(DMA10_CURR_BWL_COUNT)
+#define bfin_write_DMA10_CURR_BWL_COUNT(val)	bfin_write32(DMA10_CURR_BWL_COUNT, val)
+#define bfin_read_DMA10_BWM_COUNT()		bfin_read32(DMA10_BWM_COUNT)
+#define bfin_write_DMA10_BWM_COUNT(val)		bfin_write32(DMA10_BWM_COUNT, val)
+#define bfin_read_DMA10_CURR_BWM_COUNT()	bfin_read32(DMA10_CURR_BWM_COUNT)
+#define bfin_write_DMA10_CURR_BWM_COUNT(val)	bfin_write32(DMA10_CURR_BWM_COUNT, val)
+
+/* DMA Channel 11 Registers */
+
+#define bfin_read_DMA11_NEXT_DESC_PTR() 	bfin_read32(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val) 	bfin_write32(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR() 		bfin_read32(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val) 	bfin_write32(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_CONFIG()		bfin_read32(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val)		bfin_write32(DMA11_CONFIG, val)
+#define bfin_read_DMA11_X_COUNT()		bfin_read32(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val)		bfin_write32(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY()		bfin_read32(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val) 		bfin_write32(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_COUNT()		bfin_read32(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val)		bfin_write32(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_Y_MODIFY()		bfin_read32(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val) 		bfin_write32(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR() 	bfin_read32(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val) 	bfin_write32(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_PREV_DESC_PTR() 	bfin_read32(DMA11_PREV_DESC_PTR)
+#define bfin_write_DMA11_PREV_DESC_PTR(val) 	bfin_write32(DMA11_PREV_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR() 		bfin_read32(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val) 	bfin_write32(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_IRQ_STATUS()		bfin_read32(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write32(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read32(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write32(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read32(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write32(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_BWL_COUNT()		bfin_read32(DMA11_BWL_COUNT)
+#define bfin_write_DMA11_BWL_COUNT(val)		bfin_write32(DMA11_BWL_COUNT, val)
+#define bfin_read_DMA11_CURR_BWL_COUNT()	bfin_read32(DMA11_CURR_BWL_COUNT)
+#define bfin_write_DMA11_CURR_BWL_COUNT(val)	bfin_write32(DMA11_CURR_BWL_COUNT, val)
+#define bfin_read_DMA11_BWM_COUNT()		bfin_read32(DMA11_BWM_COUNT)
+#define bfin_write_DMA11_BWM_COUNT(val)		bfin_write32(DMA11_BWM_COUNT, val)
+#define bfin_read_DMA11_CURR_BWM_COUNT()	bfin_read32(DMA11_CURR_BWM_COUNT)
+#define bfin_write_DMA11_CURR_BWM_COUNT(val)	bfin_write32(DMA11_CURR_BWM_COUNT, val)
+
+/* DMA Channel 12 Registers */
+
+#define bfin_read_DMA12_NEXT_DESC_PTR() 	bfin_read32(DMA12_NEXT_DESC_PTR)
+#define bfin_write_DMA12_NEXT_DESC_PTR(val) 	bfin_write32(DMA12_NEXT_DESC_PTR, val)
+#define bfin_read_DMA12_START_ADDR() 		bfin_read32(DMA12_START_ADDR)
+#define bfin_write_DMA12_START_ADDR(val) 	bfin_write32(DMA12_START_ADDR, val)
+#define bfin_read_DMA12_CONFIG()		bfin_read32(DMA12_CONFIG)
+#define bfin_write_DMA12_CONFIG(val)		bfin_write32(DMA12_CONFIG, val)
+#define bfin_read_DMA12_X_COUNT()		bfin_read32(DMA12_X_COUNT)
+#define bfin_write_DMA12_X_COUNT(val)		bfin_write32(DMA12_X_COUNT, val)
+#define bfin_read_DMA12_X_MODIFY()		bfin_read32(DMA12_X_MODIFY)
+#define bfin_write_DMA12_X_MODIFY(val) 		bfin_write32(DMA12_X_MODIFY, val)
+#define bfin_read_DMA12_Y_COUNT()		bfin_read32(DMA12_Y_COUNT)
+#define bfin_write_DMA12_Y_COUNT(val)		bfin_write32(DMA12_Y_COUNT, val)
+#define bfin_read_DMA12_Y_MODIFY()		bfin_read32(DMA12_Y_MODIFY)
+#define bfin_write_DMA12_Y_MODIFY(val) 		bfin_write32(DMA12_Y_MODIFY, val)
+#define bfin_read_DMA12_CURR_DESC_PTR() 	bfin_read32(DMA12_CURR_DESC_PTR)
+#define bfin_write_DMA12_CURR_DESC_PTR(val) 	bfin_write32(DMA12_CURR_DESC_PTR, val)
+#define bfin_read_DMA12_PREV_DESC_PTR() 	bfin_read32(DMA12_PREV_DESC_PTR)
+#define bfin_write_DMA12_PREV_DESC_PTR(val) 	bfin_write32(DMA12_PREV_DESC_PTR, val)
+#define bfin_read_DMA12_CURR_ADDR() 		bfin_read32(DMA12_CURR_ADDR)
+#define bfin_write_DMA12_CURR_ADDR(val) 	bfin_write32(DMA12_CURR_ADDR, val)
+#define bfin_read_DMA12_IRQ_STATUS()		bfin_read32(DMA12_IRQ_STATUS)
+#define bfin_write_DMA12_IRQ_STATUS(val)	bfin_write32(DMA12_IRQ_STATUS, val)
+#define bfin_read_DMA12_CURR_X_COUNT()		bfin_read32(DMA12_CURR_X_COUNT)
+#define bfin_write_DMA12_CURR_X_COUNT(val)	bfin_write32(DMA12_CURR_X_COUNT, val)
+#define bfin_read_DMA12_CURR_Y_COUNT()		bfin_read32(DMA12_CURR_Y_COUNT)
+#define bfin_write_DMA12_CURR_Y_COUNT(val)	bfin_write32(DMA12_CURR_Y_COUNT, val)
+#define bfin_read_DMA12_BWL_COUNT()		bfin_read32(DMA12_BWL_COUNT)
+#define bfin_write_DMA12_BWL_COUNT(val)		bfin_write32(DMA12_BWL_COUNT, val)
+#define bfin_read_DMA12_CURR_BWL_COUNT()	bfin_read32(DMA12_CURR_BWL_COUNT)
+#define bfin_write_DMA12_CURR_BWL_COUNT(val)	bfin_write32(DMA12_CURR_BWL_COUNT, val)
+#define bfin_read_DMA12_BWM_COUNT()		bfin_read32(DMA12_BWM_COUNT)
+#define bfin_write_DMA12_BWM_COUNT(val)		bfin_write32(DMA12_BWM_COUNT, val)
+#define bfin_read_DMA12_CURR_BWM_COUNT()	bfin_read32(DMA12_CURR_BWM_COUNT)
+#define bfin_write_DMA12_CURR_BWM_COUNT(val)	bfin_write32(DMA12_CURR_BWM_COUNT, val)
+
+/* DMA Channel 13 Registers */
+
+#define bfin_read_DMA13_NEXT_DESC_PTR() 	bfin_read32(DMA13_NEXT_DESC_PTR)
+#define bfin_write_DMA13_NEXT_DESC_PTR(val) 	bfin_write32(DMA13_NEXT_DESC_PTR, val)
+#define bfin_read_DMA13_START_ADDR() 		bfin_read32(DMA13_START_ADDR)
+#define bfin_write_DMA13_START_ADDR(val) 	bfin_write32(DMA13_START_ADDR, val)
+#define bfin_read_DMA13_CONFIG()		bfin_read32(DMA13_CONFIG)
+#define bfin_write_DMA13_CONFIG(val)		bfin_write32(DMA13_CONFIG, val)
+#define bfin_read_DMA13_X_COUNT()		bfin_read32(DMA13_X_COUNT)
+#define bfin_write_DMA13_X_COUNT(val)		bfin_write32(DMA13_X_COUNT, val)
+#define bfin_read_DMA13_X_MODIFY()		bfin_read32(DMA13_X_MODIFY)
+#define bfin_write_DMA13_X_MODIFY(val) 		bfin_write32(DMA13_X_MODIFY, val)
+#define bfin_read_DMA13_Y_COUNT()		bfin_read32(DMA13_Y_COUNT)
+#define bfin_write_DMA13_Y_COUNT(val)		bfin_write32(DMA13_Y_COUNT, val)
+#define bfin_read_DMA13_Y_MODIFY()		bfin_read32(DMA13_Y_MODIFY)
+#define bfin_write_DMA13_Y_MODIFY(val) 		bfin_write32(DMA13_Y_MODIFY, val)
+#define bfin_read_DMA13_CURR_DESC_PTR() 	bfin_read32(DMA13_CURR_DESC_PTR)
+#define bfin_write_DMA13_CURR_DESC_PTR(val) 	bfin_write32(DMA13_CURR_DESC_PTR, val)
+#define bfin_read_DMA13_PREV_DESC_PTR() 	bfin_read32(DMA13_PREV_DESC_PTR)
+#define bfin_write_DMA13_PREV_DESC_PTR(val) 	bfin_write32(DMA13_PREV_DESC_PTR, val)
+#define bfin_read_DMA13_CURR_ADDR() 		bfin_read32(DMA13_CURR_ADDR)
+#define bfin_write_DMA13_CURR_ADDR(val) 	bfin_write32(DMA13_CURR_ADDR, val)
+#define bfin_read_DMA13_IRQ_STATUS()		bfin_read32(DMA13_IRQ_STATUS)
+#define bfin_write_DMA13_IRQ_STATUS(val)	bfin_write32(DMA13_IRQ_STATUS, val)
+#define bfin_read_DMA13_CURR_X_COUNT()		bfin_read32(DMA13_CURR_X_COUNT)
+#define bfin_write_DMA13_CURR_X_COUNT(val)	bfin_write32(DMA13_CURR_X_COUNT, val)
+#define bfin_read_DMA13_CURR_Y_COUNT()		bfin_read32(DMA13_CURR_Y_COUNT)
+#define bfin_write_DMA13_CURR_Y_COUNT(val)	bfin_write32(DMA13_CURR_Y_COUNT, val)
+#define bfin_read_DMA13_BWL_COUNT()		bfin_read32(DMA13_BWL_COUNT)
+#define bfin_write_DMA13_BWL_COUNT(val)		bfin_write32(DMA13_BWL_COUNT, val)
+#define bfin_read_DMA13_CURR_BWL_COUNT()	bfin_read32(DMA13_CURR_BWL_COUNT)
+#define bfin_write_DMA13_CURR_BWL_COUNT(val)	bfin_write32(DMA13_CURR_BWL_COUNT, val)
+#define bfin_read_DMA13_BWM_COUNT()		bfin_read32(DMA13_BWM_COUNT)
+#define bfin_write_DMA13_BWM_COUNT(val)		bfin_write32(DMA13_BWM_COUNT, val)
+#define bfin_read_DMA13_CURR_BWM_COUNT()	bfin_read32(DMA13_CURR_BWM_COUNT)
+#define bfin_write_DMA13_CURR_BWM_COUNT(val)	bfin_write32(DMA13_CURR_BWM_COUNT, val)
+
+/* DMA Channel 14 Registers */
+
+#define bfin_read_DMA14_NEXT_DESC_PTR() 	bfin_read32(DMA14_NEXT_DESC_PTR)
+#define bfin_write_DMA14_NEXT_DESC_PTR(val) 	bfin_write32(DMA14_NEXT_DESC_PTR, val)
+#define bfin_read_DMA14_START_ADDR() 		bfin_read32(DMA14_START_ADDR)
+#define bfin_write_DMA14_START_ADDR(val) 	bfin_write32(DMA14_START_ADDR, val)
+#define bfin_read_DMA14_CONFIG()		bfin_read32(DMA14_CONFIG)
+#define bfin_write_DMA14_CONFIG(val)		bfin_write32(DMA14_CONFIG, val)
+#define bfin_read_DMA14_X_COUNT()		bfin_read32(DMA14_X_COUNT)
+#define bfin_write_DMA14_X_COUNT(val)		bfin_write32(DMA14_X_COUNT, val)
+#define bfin_read_DMA14_X_MODIFY()		bfin_read32(DMA14_X_MODIFY)
+#define bfin_write_DMA14_X_MODIFY(val) 		bfin_write32(DMA14_X_MODIFY, val)
+#define bfin_read_DMA14_Y_COUNT()		bfin_read32(DMA14_Y_COUNT)
+#define bfin_write_DMA14_Y_COUNT(val)		bfin_write32(DMA14_Y_COUNT, val)
+#define bfin_read_DMA14_Y_MODIFY()		bfin_read32(DMA14_Y_MODIFY)
+#define bfin_write_DMA14_Y_MODIFY(val) 		bfin_write32(DMA14_Y_MODIFY, val)
+#define bfin_read_DMA14_CURR_DESC_PTR() 	bfin_read32(DMA14_CURR_DESC_PTR)
+#define bfin_write_DMA14_CURR_DESC_PTR(val) 	bfin_write32(DMA14_CURR_DESC_PTR, val)
+#define bfin_read_DMA14_PREV_DESC_PTR() 	bfin_read32(DMA14_PREV_DESC_PTR)
+#define bfin_write_DMA14_PREV_DESC_PTR(val) 	bfin_write32(DMA14_PREV_DESC_PTR, val)
+#define bfin_read_DMA14_CURR_ADDR() 		bfin_read32(DMA14_CURR_ADDR)
+#define bfin_write_DMA14_CURR_ADDR(val) 	bfin_write32(DMA14_CURR_ADDR, val)
+#define bfin_read_DMA14_IRQ_STATUS()		bfin_read32(DMA14_IRQ_STATUS)
+#define bfin_write_DMA14_IRQ_STATUS(val)	bfin_write32(DMA14_IRQ_STATUS, val)
+#define bfin_read_DMA14_CURR_X_COUNT()		bfin_read32(DMA14_CURR_X_COUNT)
+#define bfin_write_DMA14_CURR_X_COUNT(val)	bfin_write32(DMA14_CURR_X_COUNT, val)
+#define bfin_read_DMA14_CURR_Y_COUNT()		bfin_read32(DMA14_CURR_Y_COUNT)
+#define bfin_write_DMA14_CURR_Y_COUNT(val)	bfin_write32(DMA14_CURR_Y_COUNT, val)
+#define bfin_read_DMA14_BWL_COUNT()		bfin_read32(DMA14_BWL_COUNT)
+#define bfin_write_DMA14_BWL_COUNT(val)		bfin_write32(DMA14_BWL_COUNT, val)
+#define bfin_read_DMA14_CURR_BWL_COUNT()	bfin_read32(DMA14_CURR_BWL_COUNT)
+#define bfin_write_DMA14_CURR_BWL_COUNT(val)	bfin_write32(DMA14_CURR_BWL_COUNT, val)
+#define bfin_read_DMA14_BWM_COUNT()		bfin_read32(DMA14_BWM_COUNT)
+#define bfin_write_DMA14_BWM_COUNT(val)		bfin_write32(DMA14_BWM_COUNT, val)
+#define bfin_read_DMA14_CURR_BWM_COUNT()	bfin_read32(DMA14_CURR_BWM_COUNT)
+#define bfin_write_DMA14_CURR_BWM_COUNT(val)	bfin_write32(DMA14_CURR_BWM_COUNT, val)
+
+/* DMA Channel 15 Registers */
+
+#define bfin_read_DMA15_NEXT_DESC_PTR() 	bfin_read32(DMA15_NEXT_DESC_PTR)
+#define bfin_write_DMA15_NEXT_DESC_PTR(val) 	bfin_write32(DMA15_NEXT_DESC_PTR, val)
+#define bfin_read_DMA15_START_ADDR() 		bfin_read32(DMA15_START_ADDR)
+#define bfin_write_DMA15_START_ADDR(val) 	bfin_write32(DMA15_START_ADDR, val)
+#define bfin_read_DMA15_CONFIG()		bfin_read32(DMA15_CONFIG)
+#define bfin_write_DMA15_CONFIG(val)		bfin_write32(DMA15_CONFIG, val)
+#define bfin_read_DMA15_X_COUNT()		bfin_read32(DMA15_X_COUNT)
+#define bfin_write_DMA15_X_COUNT(val)		bfin_write32(DMA15_X_COUNT, val)
+#define bfin_read_DMA15_X_MODIFY()		bfin_read32(DMA15_X_MODIFY)
+#define bfin_write_DMA15_X_MODIFY(val) 		bfin_write32(DMA15_X_MODIFY, val)
+#define bfin_read_DMA15_Y_COUNT()		bfin_read32(DMA15_Y_COUNT)
+#define bfin_write_DMA15_Y_COUNT(val)		bfin_write32(DMA15_Y_COUNT, val)
+#define bfin_read_DMA15_Y_MODIFY()		bfin_read32(DMA15_Y_MODIFY)
+#define bfin_write_DMA15_Y_MODIFY(val) 		bfin_write32(DMA15_Y_MODIFY, val)
+#define bfin_read_DMA15_CURR_DESC_PTR() 	bfin_read32(DMA15_CURR_DESC_PTR)
+#define bfin_write_DMA15_CURR_DESC_PTR(val) 	bfin_write32(DMA15_CURR_DESC_PTR, val)
+#define bfin_read_DMA15_PREV_DESC_PTR() 	bfin_read32(DMA15_PREV_DESC_PTR)
+#define bfin_write_DMA15_PREV_DESC_PTR(val) 	bfin_write32(DMA15_PREV_DESC_PTR, val)
+#define bfin_read_DMA15_CURR_ADDR() 		bfin_read32(DMA15_CURR_ADDR)
+#define bfin_write_DMA15_CURR_ADDR(val) 	bfin_write32(DMA15_CURR_ADDR, val)
+#define bfin_read_DMA15_IRQ_STATUS()		bfin_read32(DMA15_IRQ_STATUS)
+#define bfin_write_DMA15_IRQ_STATUS(val)	bfin_write32(DMA15_IRQ_STATUS, val)
+#define bfin_read_DMA15_CURR_X_COUNT()		bfin_read32(DMA15_CURR_X_COUNT)
+#define bfin_write_DMA15_CURR_X_COUNT(val)	bfin_write32(DMA15_CURR_X_COUNT, val)
+#define bfin_read_DMA15_CURR_Y_COUNT()		bfin_read32(DMA15_CURR_Y_COUNT)
+#define bfin_write_DMA15_CURR_Y_COUNT(val)	bfin_write32(DMA15_CURR_Y_COUNT, val)
+#define bfin_read_DMA15_BWL_COUNT()		bfin_read32(DMA15_BWL_COUNT)
+#define bfin_write_DMA15_BWL_COUNT(val)		bfin_write32(DMA15_BWL_COUNT, val)
+#define bfin_read_DMA15_CURR_BWL_COUNT()	bfin_read32(DMA15_CURR_BWL_COUNT)
+#define bfin_write_DMA15_CURR_BWL_COUNT(val)	bfin_write32(DMA15_CURR_BWL_COUNT, val)
+#define bfin_read_DMA15_BWM_COUNT()		bfin_read32(DMA15_BWM_COUNT)
+#define bfin_write_DMA15_BWM_COUNT(val)		bfin_write32(DMA15_BWM_COUNT, val)
+#define bfin_read_DMA15_CURR_BWM_COUNT()	bfin_read32(DMA15_CURR_BWM_COUNT)
+#define bfin_write_DMA15_CURR_BWM_COUNT(val)	bfin_write32(DMA15_CURR_BWM_COUNT, val)
+
+/* DMA Channel 16 Registers */
+
+#define bfin_read_DMA16_NEXT_DESC_PTR() 	bfin_read32(DMA16_NEXT_DESC_PTR)
+#define bfin_write_DMA16_NEXT_DESC_PTR(val) 	bfin_write32(DMA16_NEXT_DESC_PTR, val)
+#define bfin_read_DMA16_START_ADDR() 		bfin_read32(DMA16_START_ADDR)
+#define bfin_write_DMA16_START_ADDR(val) 	bfin_write32(DMA16_START_ADDR, val)
+#define bfin_read_DMA16_CONFIG()		bfin_read32(DMA16_CONFIG)
+#define bfin_write_DMA16_CONFIG(val)		bfin_write32(DMA16_CONFIG, val)
+#define bfin_read_DMA16_X_COUNT()		bfin_read32(DMA16_X_COUNT)
+#define bfin_write_DMA16_X_COUNT(val)		bfin_write32(DMA16_X_COUNT, val)
+#define bfin_read_DMA16_X_MODIFY()		bfin_read32(DMA16_X_MODIFY)
+#define bfin_write_DMA16_X_MODIFY(val) 		bfin_write32(DMA16_X_MODIFY, val)
+#define bfin_read_DMA16_Y_COUNT()		bfin_read32(DMA16_Y_COUNT)
+#define bfin_write_DMA16_Y_COUNT(val)		bfin_write32(DMA16_Y_COUNT, val)
+#define bfin_read_DMA16_Y_MODIFY()		bfin_read32(DMA16_Y_MODIFY)
+#define bfin_write_DMA16_Y_MODIFY(val) 		bfin_write32(DMA16_Y_MODIFY, val)
+#define bfin_read_DMA16_CURR_DESC_PTR() 	bfin_read32(DMA16_CURR_DESC_PTR)
+#define bfin_write_DMA16_CURR_DESC_PTR(val) 	bfin_write32(DMA16_CURR_DESC_PTR, val)
+#define bfin_read_DMA16_PREV_DESC_PTR() 	bfin_read32(DMA16_PREV_DESC_PTR)
+#define bfin_write_DMA16_PREV_DESC_PTR(val) 	bfin_write32(DMA16_PREV_DESC_PTR, val)
+#define bfin_read_DMA16_CURR_ADDR() 		bfin_read32(DMA16_CURR_ADDR)
+#define bfin_write_DMA16_CURR_ADDR(val) 	bfin_write32(DMA16_CURR_ADDR, val)
+#define bfin_read_DMA16_IRQ_STATUS()		bfin_read32(DMA16_IRQ_STATUS)
+#define bfin_write_DMA16_IRQ_STATUS(val)	bfin_write32(DMA16_IRQ_STATUS, val)
+#define bfin_read_DMA16_CURR_X_COUNT()		bfin_read32(DMA16_CURR_X_COUNT)
+#define bfin_write_DMA16_CURR_X_COUNT(val)	bfin_write32(DMA16_CURR_X_COUNT, val)
+#define bfin_read_DMA16_CURR_Y_COUNT()		bfin_read32(DMA16_CURR_Y_COUNT)
+#define bfin_write_DMA16_CURR_Y_COUNT(val)	bfin_write32(DMA16_CURR_Y_COUNT, val)
+#define bfin_read_DMA16_BWL_COUNT()		bfin_read32(DMA16_BWL_COUNT)
+#define bfin_write_DMA16_BWL_COUNT(val)		bfin_write32(DMA16_BWL_COUNT, val)
+#define bfin_read_DMA16_CURR_BWL_COUNT()	bfin_read32(DMA16_CURR_BWL_COUNT)
+#define bfin_write_DMA16_CURR_BWL_COUNT(val)	bfin_write32(DMA16_CURR_BWL_COUNT, val)
+#define bfin_read_DMA16_BWM_COUNT()		bfin_read32(DMA16_BWM_COUNT)
+#define bfin_write_DMA16_BWM_COUNT(val)		bfin_write32(DMA16_BWM_COUNT, val)
+#define bfin_read_DMA16_CURR_BWM_COUNT()	bfin_read32(DMA16_CURR_BWM_COUNT)
+#define bfin_write_DMA16_CURR_BWM_COUNT(val)	bfin_write32(DMA16_CURR_BWM_COUNT, val)
+
+/* DMA Channel 17 Registers */
+
+#define bfin_read_DMA17_NEXT_DESC_PTR() 	bfin_read32(DMA17_NEXT_DESC_PTR)
+#define bfin_write_DMA17_NEXT_DESC_PTR(val) 	bfin_write32(DMA17_NEXT_DESC_PTR, val)
+#define bfin_read_DMA17_START_ADDR() 		bfin_read32(DMA17_START_ADDR)
+#define bfin_write_DMA17_START_ADDR(val) 	bfin_write32(DMA17_START_ADDR, val)
+#define bfin_read_DMA17_CONFIG()		bfin_read32(DMA17_CONFIG)
+#define bfin_write_DMA17_CONFIG(val)		bfin_write32(DMA17_CONFIG, val)
+#define bfin_read_DMA17_X_COUNT()		bfin_read32(DMA17_X_COUNT)
+#define bfin_write_DMA17_X_COUNT(val)		bfin_write32(DMA17_X_COUNT, val)
+#define bfin_read_DMA17_X_MODIFY()		bfin_read32(DMA17_X_MODIFY)
+#define bfin_write_DMA17_X_MODIFY(val) 		bfin_write32(DMA17_X_MODIFY, val)
+#define bfin_read_DMA17_Y_COUNT()		bfin_read32(DMA17_Y_COUNT)
+#define bfin_write_DMA17_Y_COUNT(val)		bfin_write32(DMA17_Y_COUNT, val)
+#define bfin_read_DMA17_Y_MODIFY()		bfin_read32(DMA17_Y_MODIFY)
+#define bfin_write_DMA17_Y_MODIFY(val) 		bfin_write32(DMA17_Y_MODIFY, val)
+#define bfin_read_DMA17_CURR_DESC_PTR() 	bfin_read32(DMA17_CURR_DESC_PTR)
+#define bfin_write_DMA17_CURR_DESC_PTR(val) 	bfin_write32(DMA17_CURR_DESC_PTR, val)
+#define bfin_read_DMA17_PREV_DESC_PTR() 	bfin_read32(DMA17_PREV_DESC_PTR)
+#define bfin_write_DMA17_PREV_DESC_PTR(val) 	bfin_write32(DMA17_PREV_DESC_PTR, val)
+#define bfin_read_DMA17_CURR_ADDR() 		bfin_read32(DMA17_CURR_ADDR)
+#define bfin_write_DMA17_CURR_ADDR(val) 	bfin_write32(DMA17_CURR_ADDR, val)
+#define bfin_read_DMA17_IRQ_STATUS()		bfin_read32(DMA17_IRQ_STATUS)
+#define bfin_write_DMA17_IRQ_STATUS(val)	bfin_write32(DMA17_IRQ_STATUS, val)
+#define bfin_read_DMA17_CURR_X_COUNT()		bfin_read32(DMA17_CURR_X_COUNT)
+#define bfin_write_DMA17_CURR_X_COUNT(val)	bfin_write32(DMA17_CURR_X_COUNT, val)
+#define bfin_read_DMA17_CURR_Y_COUNT()		bfin_read32(DMA17_CURR_Y_COUNT)
+#define bfin_write_DMA17_CURR_Y_COUNT(val)	bfin_write32(DMA17_CURR_Y_COUNT, val)
+#define bfin_read_DMA17_BWL_COUNT()		bfin_read32(DMA17_BWL_COUNT)
+#define bfin_write_DMA17_BWL_COUNT(val)		bfin_write32(DMA17_BWL_COUNT, val)
+#define bfin_read_DMA17_CURR_BWL_COUNT()	bfin_read32(DMA17_CURR_BWL_COUNT)
+#define bfin_write_DMA17_CURR_BWL_COUNT(val)	bfin_write32(DMA17_CURR_BWL_COUNT, val)
+#define bfin_read_DMA17_BWM_COUNT()		bfin_read32(DMA17_BWM_COUNT)
+#define bfin_write_DMA17_BWM_COUNT(val)		bfin_write32(DMA17_BWM_COUNT, val)
+#define bfin_read_DMA17_CURR_BWM_COUNT()	bfin_read32(DMA17_CURR_BWM_COUNT)
+#define bfin_write_DMA17_CURR_BWM_COUNT(val)	bfin_write32(DMA17_CURR_BWM_COUNT, val)
+
+/* DMA Channel 18 Registers */
+
+#define bfin_read_DMA18_NEXT_DESC_PTR() 	bfin_read32(DMA18_NEXT_DESC_PTR)
+#define bfin_write_DMA18_NEXT_DESC_PTR(val) 	bfin_write32(DMA18_NEXT_DESC_PTR, val)
+#define bfin_read_DMA18_START_ADDR() 		bfin_read32(DMA18_START_ADDR)
+#define bfin_write_DMA18_START_ADDR(val) 	bfin_write32(DMA18_START_ADDR, val)
+#define bfin_read_DMA18_CONFIG()		bfin_read32(DMA18_CONFIG)
+#define bfin_write_DMA18_CONFIG(val)		bfin_write32(DMA18_CONFIG, val)
+#define bfin_read_DMA18_X_COUNT()		bfin_read32(DMA18_X_COUNT)
+#define bfin_write_DMA18_X_COUNT(val)		bfin_write32(DMA18_X_COUNT, val)
+#define bfin_read_DMA18_X_MODIFY()		bfin_read32(DMA18_X_MODIFY)
+#define bfin_write_DMA18_X_MODIFY(val) 		bfin_write32(DMA18_X_MODIFY, val)
+#define bfin_read_DMA18_Y_COUNT()		bfin_read32(DMA18_Y_COUNT)
+#define bfin_write_DMA18_Y_COUNT(val)		bfin_write32(DMA18_Y_COUNT, val)
+#define bfin_read_DMA18_Y_MODIFY()		bfin_read32(DMA18_Y_MODIFY)
+#define bfin_write_DMA18_Y_MODIFY(val) 		bfin_write32(DMA18_Y_MODIFY, val)
+#define bfin_read_DMA18_CURR_DESC_PTR() 	bfin_read32(DMA18_CURR_DESC_PTR)
+#define bfin_write_DMA18_CURR_DESC_PTR(val) 	bfin_write32(DMA18_CURR_DESC_PTR, val)
+#define bfin_read_DMA18_PREV_DESC_PTR() 	bfin_read32(DMA18_PREV_DESC_PTR)
+#define bfin_write_DMA18_PREV_DESC_PTR(val) 	bfin_write32(DMA18_PREV_DESC_PTR, val)
+#define bfin_read_DMA18_CURR_ADDR() 		bfin_read32(DMA18_CURR_ADDR)
+#define bfin_write_DMA18_CURR_ADDR(val) 	bfin_write32(DMA18_CURR_ADDR, val)
+#define bfin_read_DMA18_IRQ_STATUS()		bfin_read32(DMA18_IRQ_STATUS)
+#define bfin_write_DMA18_IRQ_STATUS(val)	bfin_write32(DMA18_IRQ_STATUS, val)
+#define bfin_read_DMA18_CURR_X_COUNT()		bfin_read32(DMA18_CURR_X_COUNT)
+#define bfin_write_DMA18_CURR_X_COUNT(val)	bfin_write32(DMA18_CURR_X_COUNT, val)
+#define bfin_read_DMA18_CURR_Y_COUNT()		bfin_read32(DMA18_CURR_Y_COUNT)
+#define bfin_write_DMA18_CURR_Y_COUNT(val)	bfin_write32(DMA18_CURR_Y_COUNT, val)
+#define bfin_read_DMA18_BWL_COUNT()		bfin_read32(DMA18_BWL_COUNT)
+#define bfin_write_DMA18_BWL_COUNT(val)		bfin_write32(DMA18_BWL_COUNT, val)
+#define bfin_read_DMA18_CURR_BWL_COUNT()	bfin_read32(DMA18_CURR_BWL_COUNT)
+#define bfin_write_DMA18_CURR_BWL_COUNT(val)	bfin_write32(DMA18_CURR_BWL_COUNT, val)
+#define bfin_read_DMA18_BWM_COUNT()		bfin_read32(DMA18_BWM_COUNT)
+#define bfin_write_DMA18_BWM_COUNT(val)		bfin_write32(DMA18_BWM_COUNT, val)
+#define bfin_read_DMA18_CURR_BWM_COUNT()	bfin_read32(DMA18_CURR_BWM_COUNT)
+#define bfin_write_DMA18_CURR_BWM_COUNT(val)	bfin_write32(DMA18_CURR_BWM_COUNT, val)
+
+/* DMA Channel 19 Registers */
+
+#define bfin_read_DMA19_NEXT_DESC_PTR() 	bfin_read32(DMA19_NEXT_DESC_PTR)
+#define bfin_write_DMA19_NEXT_DESC_PTR(val) 	bfin_write32(DMA19_NEXT_DESC_PTR, val)
+#define bfin_read_DMA19_START_ADDR() 		bfin_read32(DMA19_START_ADDR)
+#define bfin_write_DMA19_START_ADDR(val) 	bfin_write32(DMA19_START_ADDR, val)
+#define bfin_read_DMA19_CONFIG()		bfin_read32(DMA19_CONFIG)
+#define bfin_write_DMA19_CONFIG(val)		bfin_write32(DMA19_CONFIG, val)
+#define bfin_read_DMA19_X_COUNT()		bfin_read32(DMA19_X_COUNT)
+#define bfin_write_DMA19_X_COUNT(val)		bfin_write32(DMA19_X_COUNT, val)
+#define bfin_read_DMA19_X_MODIFY()		bfin_read32(DMA19_X_MODIFY)
+#define bfin_write_DMA19_X_MODIFY(val) 		bfin_write32(DMA19_X_MODIFY, val)
+#define bfin_read_DMA19_Y_COUNT()		bfin_read32(DMA19_Y_COUNT)
+#define bfin_write_DMA19_Y_COUNT(val)		bfin_write32(DMA19_Y_COUNT, val)
+#define bfin_read_DMA19_Y_MODIFY()		bfin_read32(DMA19_Y_MODIFY)
+#define bfin_write_DMA19_Y_MODIFY(val) 		bfin_write32(DMA19_Y_MODIFY, val)
+#define bfin_read_DMA19_CURR_DESC_PTR() 	bfin_read32(DMA19_CURR_DESC_PTR)
+#define bfin_write_DMA19_CURR_DESC_PTR(val) 	bfin_write32(DMA19_CURR_DESC_PTR, val)
+#define bfin_read_DMA19_PREV_DESC_PTR() 	bfin_read32(DMA19_PREV_DESC_PTR)
+#define bfin_write_DMA19_PREV_DESC_PTR(val) 	bfin_write32(DMA19_PREV_DESC_PTR, val)
+#define bfin_read_DMA19_CURR_ADDR() 		bfin_read32(DMA19_CURR_ADDR)
+#define bfin_write_DMA19_CURR_ADDR(val) 	bfin_write32(DMA19_CURR_ADDR, val)
+#define bfin_read_DMA19_IRQ_STATUS()		bfin_read32(DMA19_IRQ_STATUS)
+#define bfin_write_DMA19_IRQ_STATUS(val)	bfin_write32(DMA19_IRQ_STATUS, val)
+#define bfin_read_DMA19_CURR_X_COUNT()		bfin_read32(DMA19_CURR_X_COUNT)
+#define bfin_write_DMA19_CURR_X_COUNT(val)	bfin_write32(DMA19_CURR_X_COUNT, val)
+#define bfin_read_DMA19_CURR_Y_COUNT()		bfin_read32(DMA19_CURR_Y_COUNT)
+#define bfin_write_DMA19_CURR_Y_COUNT(val)	bfin_write32(DMA19_CURR_Y_COUNT, val)
+#define bfin_read_DMA19_BWL_COUNT()		bfin_read32(DMA19_BWL_COUNT)
+#define bfin_write_DMA19_BWL_COUNT(val)		bfin_write32(DMA19_BWL_COUNT, val)
+#define bfin_read_DMA19_CURR_BWL_COUNT()	bfin_read32(DMA19_CURR_BWL_COUNT)
+#define bfin_write_DMA19_CURR_BWL_COUNT(val)	bfin_write32(DMA19_CURR_BWL_COUNT, val)
+#define bfin_read_DMA19_BWM_COUNT()		bfin_read32(DMA19_BWM_COUNT)
+#define bfin_write_DMA19_BWM_COUNT(val)		bfin_write32(DMA19_BWM_COUNT, val)
+#define bfin_read_DMA19_CURR_BWM_COUNT()	bfin_read32(DMA19_CURR_BWM_COUNT)
+#define bfin_write_DMA19_CURR_BWM_COUNT(val)	bfin_write32(DMA19_CURR_BWM_COUNT, val)
+
+/* DMA Channel 20 Registers */
+
+#define bfin_read_DMA20_NEXT_DESC_PTR() 	bfin_read32(DMA20_NEXT_DESC_PTR)
+#define bfin_write_DMA20_NEXT_DESC_PTR(val) 	bfin_write32(DMA20_NEXT_DESC_PTR, val)
+#define bfin_read_DMA20_START_ADDR() 		bfin_read32(DMA20_START_ADDR)
+#define bfin_write_DMA20_START_ADDR(val) 	bfin_write32(DMA20_START_ADDR, val)
+#define bfin_read_DMA20_CONFIG()		bfin_read32(DMA20_CONFIG)
+#define bfin_write_DMA20_CONFIG(val)		bfin_write32(DMA20_CONFIG, val)
+#define bfin_read_DMA20_X_COUNT()		bfin_read32(DMA20_X_COUNT)
+#define bfin_write_DMA20_X_COUNT(val)		bfin_write32(DMA20_X_COUNT, val)
+#define bfin_read_DMA20_X_MODIFY()		bfin_read32(DMA20_X_MODIFY)
+#define bfin_write_DMA20_X_MODIFY(val) 		bfin_write32(DMA20_X_MODIFY, val)
+#define bfin_read_DMA20_Y_COUNT()		bfin_read32(DMA20_Y_COUNT)
+#define bfin_write_DMA20_Y_COUNT(val)		bfin_write32(DMA20_Y_COUNT, val)
+#define bfin_read_DMA20_Y_MODIFY()		bfin_read32(DMA20_Y_MODIFY)
+#define bfin_write_DMA20_Y_MODIFY(val) 		bfin_write32(DMA20_Y_MODIFY, val)
+#define bfin_read_DMA20_CURR_DESC_PTR() 	bfin_read32(DMA20_CURR_DESC_PTR)
+#define bfin_write_DMA20_CURR_DESC_PTR(val) 	bfin_write32(DMA20_CURR_DESC_PTR, val)
+#define bfin_read_DMA20_PREV_DESC_PTR() 	bfin_read32(DMA20_PREV_DESC_PTR)
+#define bfin_write_DMA20_PREV_DESC_PTR(val) 	bfin_write32(DMA20_PREV_DESC_PTR, val)
+#define bfin_read_DMA20_CURR_ADDR() 		bfin_read32(DMA20_CURR_ADDR)
+#define bfin_write_DMA20_CURR_ADDR(val) 	bfin_write32(DMA20_CURR_ADDR, val)
+#define bfin_read_DMA20_IRQ_STATUS()		bfin_read32(DMA20_IRQ_STATUS)
+#define bfin_write_DMA20_IRQ_STATUS(val)	bfin_write32(DMA20_IRQ_STATUS, val)
+#define bfin_read_DMA20_CURR_X_COUNT()		bfin_read32(DMA20_CURR_X_COUNT)
+#define bfin_write_DMA20_CURR_X_COUNT(val)	bfin_write32(DMA20_CURR_X_COUNT, val)
+#define bfin_read_DMA20_CURR_Y_COUNT()		bfin_read32(DMA20_CURR_Y_COUNT)
+#define bfin_write_DMA20_CURR_Y_COUNT(val)	bfin_write32(DMA20_CURR_Y_COUNT, val)
+#define bfin_read_DMA20_BWL_COUNT()		bfin_read32(DMA20_BWL_COUNT)
+#define bfin_write_DMA20_BWL_COUNT(val)		bfin_write32(DMA20_BWL_COUNT, val)
+#define bfin_read_DMA20_CURR_BWL_COUNT()	bfin_read32(DMA20_CURR_BWL_COUNT)
+#define bfin_write_DMA20_CURR_BWL_COUNT(val)	bfin_write32(DMA20_CURR_BWL_COUNT, val)
+#define bfin_read_DMA20_BWM_COUNT()		bfin_read32(DMA20_BWM_COUNT)
+#define bfin_write_DMA20_BWM_COUNT(val)		bfin_write32(DMA20_BWM_COUNT, val)
+#define bfin_read_DMA20_CURR_BWM_COUNT()	bfin_read32(DMA20_CURR_BWM_COUNT)
+#define bfin_write_DMA20_CURR_BWM_COUNT(val)	bfin_write32(DMA20_CURR_BWM_COUNT, val)
+
+
+/* MDMA Stream 0 Registers (DMA Channel 21 and 22) */
+
+#define bfin_read_MDMA0_DEST_CRC0_NEXT_DESC_PTR() 	bfin_read32(MDMA0_DEST_CRC0_NEXT_DESC_PTR)
+#define bfin_write_MDMA0_DEST_CRC0_NEXT_DESC_PTR(val) 	bfin_write32(MDMA0_DEST_CRC0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA0_DEST_CRC0_START_ADDR() 		bfin_read32(MDMA0_DEST_CRC0_START_ADDR)
+#define bfin_write_MDMA0_DEST_CRC0_START_ADDR(val) 	bfin_write32(MDMA0_DEST_CRC0_START_ADDR, val)
+#define bfin_read_MDMA0_DEST_CRC0_CONFIG()		bfin_read32(MDMA0_DEST_CRC0_CONFIG)
+#define bfin_write_MDMA0_DEST_CRC0_CONFIG(val)		bfin_write32(MDMA0_DEST_CRC0_CONFIG, val)
+#define bfin_read_MDMA0_DEST_CRC0_X_COUNT()		bfin_read32(MDMA0_DEST_CRC0_X_COUNT)
+#define bfin_write_MDMA0_DEST_CRC0_X_COUNT(val)		bfin_write32(MDMA0_DEST_CRC0_X_COUNT, val)
+#define bfin_read_MDMA0_DEST_CRC0_X_MODIFY()		bfin_read32(MDMA0_DEST_CRC0_X_MODIFY)
+#define bfin_write_MDMA0_DEST_CRC0_X_MODIFY(val) 	bfin_write32(MDMA0_DEST_CRC0_X_MODIFY, val)
+#define bfin_read_MDMA0_DEST_CRC0_Y_COUNT()		bfin_read32(MDMA0_DEST_CRC0_Y_COUNT)
+#define bfin_write_MDMA0_DEST_CRC0_Y_COUNT(val)		bfin_write32(MDMA0_DEST_CRC0_Y_COUNT, val)
+#define bfin_read_MDMA0_DEST_CRC0_Y_MODIFY()		bfin_read32(MDMA0_DEST_CRC0_Y_MODIFY)
+#define bfin_write_MDMA0_DEST_CRC0_Y_MODIFY(val) 	bfin_write32(MDMA0_DEST_CRC0_Y_MODIFY, val)
+#define bfin_read_MDMA0_DEST_CRC0_CURR_DESC_PTR() 	bfin_read32(MDMA0_DEST_CRC0_CURR_DESC_PTR)
+#define bfin_write_MDMA0_DEST_CRC0_CURR_DESC_PTR(val) 	bfin_write32(MDMA0_DEST_CRC0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA0_DEST_CRC0_PREV_DESC_PTR() 	bfin_read32(MDMA0_DEST_CRC0_PREV_DESC_PTR)
+#define bfin_write_MDMA0_DEST_CRC0_PREV_DESC_PTR(val) 	bfin_write32(MDMA0_DEST_CRC0_PREV_DESC_PTR, val)
+#define bfin_read_MDMA0_DEST_CRC0_CURR_ADDR() 		bfin_read32(MDMA0_DEST_CRC0_CURR_ADDR)
+#define bfin_write_MDMA0_DEST_CRC0_CURR_ADDR(val) 	bfin_write32(MDMA0_DEST_CRC0_CURR_ADDR, val)
+#define bfin_read_MDMA0_DEST_CRC0_IRQ_STATUS()		bfin_read32(MDMA0_DEST_CRC0_IRQ_STATUS)
+#define bfin_write_MDMA0_DEST_CRC0_IRQ_STATUS(val)	bfin_write32(MDMA0_DEST_CRC0_IRQ_STATUS, val)
+#define bfin_read_MDMA0_DEST_CRC0_CURR_X_COUNT()	bfin_read32(MDMA0_DEST_CRC0_CURR_X_COUNT)
+#define bfin_write_MDMA0_DEST_CRC0_CURR_X_COUNT(val)	bfin_write32(MDMA0_DEST_CRC0_CURR_X_COUNT, val)
+#define bfin_read_MDMA0_DEST_CRC0_CURR_Y_COUNT()	bfin_read32(MDMA0_DEST_CRC0_CURR_Y_COUNT)
+#define bfin_write_MDMA0_DEST_CRC0_CURR_Y_COUNT(val)	bfin_write32(MDMA0_DEST_CRC0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA0_SRC_CRC0_NEXT_DESC_PTR() 	bfin_read32(MDMA0_SRC_CRC0_NEXT_DESC_PTR)
+#define bfin_write_MDMA0_SRC_CRC0_NEXT_DESC_PTR(val) 	bfin_write32(MDMA0_SRC_CRC0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA0_SRC_CRC0_START_ADDR() 		bfin_read32(MDMA0_SRC_CRC0_START_ADDR)
+#define bfin_write_MDMA0_SRC_CRC0_START_ADDR(val) 	bfin_write32(MDMA0_SRC_CRC0_START_ADDR, val)
+#define bfin_read_MDMA0_SRC_CRC0_CONFIG()		bfin_read32(MDMA0_SRC_CRC0_CONFIG)
+#define bfin_write_MDMA0_SRC_CRC0_CONFIG(val)		bfin_write32(MDMA0_SRC_CRC0_CONFIG, val)
+#define bfin_read_MDMA0_SRC_CRC0_X_COUNT()		bfin_read32(MDMA0_SRC_CRC0_X_COUNT)
+#define bfin_write_MDMA0_SRC_CRC0_X_COUNT(val)		bfin_write32(MDMA0_SRC_CRC0_X_COUNT, val)
+#define bfin_read_MDMA0_SRC_CRC0_X_MODIFY()		bfin_read32(MDMA0_SRC_CRC0_X_MODIFY)
+#define bfin_write_MDMA0_SRC_CRC0_X_MODIFY(val) 	bfin_write32(MDMA0_SRC_CRC0_X_MODIFY, val)
+#define bfin_read_MDMA0_SRC_CRC0_Y_COUNT()		bfin_read32(MDMA0_SRC_CRC0_Y_COUNT)
+#define bfin_write_MDMA0_SRC_CRC0_Y_COUNT(val)		bfin_write32(MDMA0_SRC_CRC0_Y_COUNT, val)
+#define bfin_read_MDMA0_SRC_CRC0_Y_MODIFY()		bfin_read32(MDMA0_SRC_CRC0_Y_MODIFY)
+#define bfin_write_MDMA0_SRC_CRC0_Y_MODIFY(val) 	bfin_write32(MDMA0_SRC_CRC0_Y_MODIFY, val)
+#define bfin_read_MDMA0_SRC_CRC0_CURR_DESC_PTR() 	bfin_read32(MDMA0_SRC_CRC0_CURR_DESC_PTR)
+#define bfin_write_MDMA0_SRC_CRC0_CURR_DESC_PTR(val) 	bfin_write32(MDMA0_SRC_CRC0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA0_SRC_CRC0_PREV_DESC_PTR() 	bfin_read32(MDMA0_SRC_CRC0_PREV_DESC_PTR)
+#define bfin_write_MDMA0_SRC_CRC0_PREV_DESC_PTR(val) 	bfin_write32(MDMA0_SRC_CRC0_PREV_DESC_PTR, val)
+#define bfin_read_MDMA0_SRC_CRC0_CURR_ADDR() 		bfin_read32(MDMA0_SRC_CRC0_CURR_ADDR)
+#define bfin_write_MDMA0_SRC_CRC0_CURR_ADDR(val) 	bfin_write32(MDMA0_SRC_CRC0_CURR_ADDR, val)
+#define bfin_read_MDMA0_SRC_CRC0_IRQ_STATUS()		bfin_read32(MDMA0_SRC_CRC0_IRQ_STATUS)
+#define bfin_write_MDMA0_SRC_CRC0_IRQ_STATUS(val)	bfin_write32(MDMA0_SRC_CRC0_IRQ_STATUS, val)
+#define bfin_read_MDMA0_SRC_CRC0_CURR_X_COUNT()		bfin_read32(MDMA0_SRC_CRC0_CURR_X_COUNT)
+#define bfin_write_MDMA0_SRC_CRC0_CURR_X_COUNT(val)	bfin_write32(MDMA0_SRC_CRC0_CURR_X_COUNT, val)
+#define bfin_read_MDMA0_SRC_CRC0_CURR_Y_COUNT()		bfin_read32(MDMA0_SRC_CRC0_CURR_Y_COUNT)
+#define bfin_write_MDMA0_SRC_CRC0_CURR_Y_COUNT(val)	bfin_write32(MDMA0_SRC_CRC0_CURR_Y_COUNT, val)
+
+/* MDMA Stream 1 Registers (DMA Channel 23 and 24) */
+
+#define bfin_read_MDMA1_DEST_CRC1_NEXT_DESC_PTR() 	bfin_read32(MDMA1_DEST_CRC1_NEXT_DESC_PTR)
+#define bfin_write_MDMA1_DEST_CRC1_NEXT_DESC_PTR(val) 	bfin_write32(MDMA1_DEST_CRC1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA1_DEST_CRC1_START_ADDR() 		bfin_read32(MDMA1_DEST_CRC1_START_ADDR)
+#define bfin_write_MDMA1_DEST_CRC1_START_ADDR(val) 	bfin_write32(MDMA1_DEST_CRC1_START_ADDR, val)
+#define bfin_read_MDMA1_DEST_CRC1_CONFIG()		bfin_read32(MDMA1_DEST_CRC1_CONFIG)
+#define bfin_write_MDMA1_DEST_CRC1_CONFIG(val)		bfin_write32(MDMA1_DEST_CRC1_CONFIG, val)
+#define bfin_read_MDMA1_DEST_CRC1_X_COUNT()		bfin_read32(MDMA1_DEST_CRC1_X_COUNT)
+#define bfin_write_MDMA1_DEST_CRC1_X_COUNT(val)		bfin_write32(MDMA1_DEST_CRC1_X_COUNT, val)
+#define bfin_read_MDMA1_DEST_CRC1_X_MODIFY()		bfin_read32(MDMA1_DEST_CRC1_X_MODIFY)
+#define bfin_write_MDMA1_DEST_CRC1_X_MODIFY(val) 	bfin_write32(MDMA1_DEST_CRC1_X_MODIFY, val)
+#define bfin_read_MDMA1_DEST_CRC1_Y_COUNT()		bfin_read32(MDMA1_DEST_CRC1_Y_COUNT)
+#define bfin_write_MDMA1_DEST_CRC1_Y_COUNT(val)		bfin_write32(MDMA1_DEST_CRC1_Y_COUNT, val)
+#define bfin_read_MDMA1_DEST_CRC1_Y_MODIFY()		bfin_read32(MDMA1_DEST_CRC1_Y_MODIFY)
+#define bfin_write_MDMA1_DEST_CRC1_Y_MODIFY(val) 	bfin_write32(MDMA1_DEST_CRC1_Y_MODIFY, val)
+#define bfin_read_MDMA1_DEST_CRC1_CURR_DESC_PTR() 	bfin_read32(MDMA1_DEST_CRC1_CURR_DESC_PTR)
+#define bfin_write_MDMA1_DEST_CRC1_CURR_DESC_PTR(val) 	bfin_write32(MDMA1_DEST_CRC1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA1_DEST_CRC1_PREV_DESC_PTR() 	bfin_read32(MDMA1_DEST_CRC1_PREV_DESC_PTR)
+#define bfin_write_MDMA1_DEST_CRC1_PREV_DESC_PTR(val) 	bfin_write32(MDMA1_DEST_CRC1_PREV_DESC_PTR, val)
+#define bfin_read_MDMA1_DEST_CRC1_CURR_ADDR() 		bfin_read32(MDMA1_DEST_CRC1_CURR_ADDR)
+#define bfin_write_MDMA1_DEST_CRC1_CURR_ADDR(val) 	bfin_write32(MDMA1_DEST_CRC1_CURR_ADDR, val)
+#define bfin_read_MDMA1_DEST_CRC1_IRQ_STATUS()		bfin_read32(MDMA1_DEST_CRC1_IRQ_STATUS)
+#define bfin_write_MDMA1_DEST_CRC1_IRQ_STATUS(val)	bfin_write32(MDMA1_DEST_CRC1_IRQ_STATUS, val)
+#define bfin_read_MDMA1_DEST_CRC1_CURR_X_COUNT()	bfin_read32(MDMA1_DEST_CRC1_CURR_X_COUNT)
+#define bfin_write_MDMA1_DEST_CRC1_CURR_X_COUNT(val)	bfin_write32(MDMA1_DEST_CRC1_CURR_X_COUNT, val)
+#define bfin_read_MDMA1_DEST_CRC1_CURR_Y_COUNT()	bfin_read32(MDMA1_DEST_CRC1_CURR_Y_COUNT)
+#define bfin_write_MDMA1_DEST_CRC1_CURR_Y_COUNT(val)	bfin_write32(MDMA1_DEST_CRC1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA1_SRC_CRC1_NEXT_DESC_PTR() 	bfin_read32(MDMA1_SRC_CRC1_NEXT_DESC_PTR)
+#define bfin_write_MDMA1_SRC_CRC1_NEXT_DESC_PTR(val) 	bfin_write32(MDMA1_SRC_CRC1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA1_SRC_CRC1_START_ADDR() 		bfin_read32(MDMA1_SRC_CRC1_START_ADDR)
+#define bfin_write_MDMA1_SRC_CRC1_START_ADDR(val) 	bfin_write32(MDMA1_SRC_CRC1_START_ADDR, val)
+#define bfin_read_MDMA1_SRC_CRC1_CONFIG()		bfin_read32(MDMA1_SRC_CRC1_CONFIG)
+#define bfin_write_MDMA1_SRC_CRC1_CONFIG(val)		bfin_write32(MDMA1_SRC_CRC1_CONFIG, val)
+#define bfin_read_MDMA1_SRC_CRC1_X_COUNT()		bfin_read32(MDMA1_SRC_CRC1_X_COUNT)
+#define bfin_write_MDMA1_SRC_CRC1_X_COUNT(val)		bfin_write32(MDMA1_SRC_CRC1_X_COUNT, val)
+#define bfin_read_MDMA1_SRC_CRC1_X_MODIFY()		bfin_read32(MDMA1_SRC_CRC1_X_MODIFY)
+#define bfin_write_MDMA1_SRC_CRC1_X_MODIFY(val) 	bfin_write32(MDMA1_SRC_CRC1_X_MODIFY, val)
+#define bfin_read_MDMA1_SRC_CRC1_Y_COUNT()		bfin_read32(MDMA1_SRC_CRC1_Y_COUNT)
+#define bfin_write_MDMA1_SRC_CRC1_Y_COUNT(val)		bfin_write32(MDMA1_SRC_CRC1_Y_COUNT, val)
+#define bfin_read_MDMA1_SRC_CRC1_Y_MODIFY()		bfin_read32(MDMA1_SRC_CRC1_Y_MODIFY)
+#define bfin_write_MDMA1_SRC_CRC1_Y_MODIFY(val) 	bfin_write32(MDMA1_SRC_CRC1_Y_MODIFY, val)
+#define bfin_read_MDMA1_SRC_CRC1_CURR_DESC_PTR() 	bfin_read32(MDMA1_SRC_CRC1_CURR_DESC_PTR)
+#define bfin_write_MDMA1_SRC_CRC1_CURR_DESC_PTR(val) 	bfin_write32(MDMA1_SRC_CRC1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA1_SRC_CRC1_PREV_DESC_PTR() 	bfin_read32(MDMA1_SRC_CRC1_PREV_DESC_PTR)
+#define bfin_write_MDMA1_SRC_CRC1_PREV_DESC_PTR(val) 	bfin_write32(MDMA1_SRC_CRC1_PREV_DESC_PTR, val)
+#define bfin_read_MDMA1_SRC_CRC1_CURR_ADDR() 		bfin_read32(MDMA1_SRC_CRC1_CURR_ADDR)
+#define bfin_write_MDMA1_SRC_CRC1_CURR_ADDR(val) 	bfin_write32(MDMA1_SRC_CRC1_CURR_ADDR, val)
+#define bfin_read_MDMA1_SRC_CRC1_IRQ_STATUS()		bfin_read32(MDMA1_SRC_CRC1_IRQ_STATUS)
+#define bfin_write_MDMA1_SRC_CRC1_IRQ_STATUS(val)	bfin_write32(MDMA1_SRC_CRC1_IRQ_STATUS, val)
+#define bfin_read_MDMA1_SRC_CRC1_CURR_X_COUNT()		bfin_read32(MDMA1_SRC_CRC1_CURR_X_COUNT)
+#define bfin_write_MDMA1_SRC_CRC1_CURR_X_COUNT(val)	bfin_write32(MDMA1_SRC_CRC1_CURR_X_COUNT, val)
+#define bfin_read_MDMA1_SRC_CRC1_CURR_Y_COUNT()		bfin_read32(MDMA1_SRC_CRC1_CURR_Y_COUNT)
+#define bfin_write_MDMA1_SRC_CRC1_CURR_Y_COUNT(val)	bfin_write32(MDMA1_SRC_CRC1_CURR_Y_COUNT, val)
+
+
+/* MDMA Stream 2 Registers (DMA Channel 25 and 26) */
+
+#define bfin_read_MDMA2_DEST_NEXT_DESC_PTR() 		bfin_read32(MDMA2_DEST_NEXT_DESC_PTR)
+#define bfin_write_MDMA2_DEST_NEXT_DESC_PTR(val) 	bfin_write32(MDMA2_DEST_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA2_DEST_START_ADDR() 		bfin_read32(MDMA2_DEST_START_ADDR)
+#define bfin_write_MDMA2_DEST_START_ADDR(val) 		bfin_write32(MDMA2_DEST_START_ADDR, val)
+#define bfin_read_MDMA2_DEST_CONFIG()			bfin_read32(MDMA2_DEST_CONFIG)
+#define bfin_write_MDMA2_DEST_CONFIG(val)		bfin_write32(MDMA2_DEST_CONFIG, val)
+#define bfin_read_MDMA2_DEST_X_COUNT()			bfin_read32(MDMA2_DEST_X_COUNT)
+#define bfin_write_MDMA2_DEST_X_COUNT(val)		bfin_write32(MDMA2_DEST_X_COUNT, val)
+#define bfin_read_MDMA2_DEST_X_MODIFY()			bfin_read32(MDMA2_DEST_X_MODIFY)
+#define bfin_write_MDMA2_DEST_X_MODIFY(val) 		bfin_write32(MDMA2_DEST_X_MODIFY, val)
+#define bfin_read_MDMA2_DEST_Y_COUNT()			bfin_read32(MDMA2_DEST_Y_COUNT)
+#define bfin_write_MDMA2_DEST_Y_COUNT(val)		bfin_write32(MDMA2_DEST_Y_COUNT, val)
+#define bfin_read_MDMA2_DEST_Y_MODIFY()			bfin_read32(MDMA2_DEST_Y_MODIFY)
+#define bfin_write_MDMA2_DEST_Y_MODIFY(val) 		bfin_write32(MDMA2_DEST_Y_MODIFY, val)
+#define bfin_read_MDMA2_DEST_CURR_DESC_PTR() 		bfin_read32(MDMA2_DEST_CURR_DESC_PTR)
+#define bfin_write_MDMA2_DEST_CURR_DESC_PTR(val) 	bfin_write32(MDMA2_DEST_CURR_DESC_PTR, val)
+#define bfin_read_MDMA2_DEST_PREV_DESC_PTR() 		bfin_read32(MDMA2_DEST_PREV_DESC_PTR)
+#define bfin_write_MDMA2_DEST_PREV_DESC_PTR(val) 	bfin_write32(MDMA2_DEST_PREV_DESC_PTR, val)
+#define bfin_read_MDMA2_DEST_CURR_ADDR() 		bfin_read32(MDMA2_DEST_CURR_ADDR)
+#define bfin_write_MDMA2_DEST_CURR_ADDR(val) 		bfin_write32(MDMA2_DEST_CURR_ADDR, val)
+#define bfin_read_MDMA2_DEST_IRQ_STATUS()		bfin_read32(MDMA2_DEST_IRQ_STATUS)
+#define bfin_write_MDMA2_DEST_IRQ_STATUS(val)		bfin_write32(MDMA2_DEST_IRQ_STATUS, val)
+#define bfin_read_MDMA2_DEST_CURR_X_COUNT()		bfin_read32(MDMA2_DEST_CURR_X_COUNT)
+#define bfin_write_MDMA2_DEST_CURR_X_COUNT(val)		bfin_write32(MDMA2_DEST_CURR_X_COUNT, val)
+#define bfin_read_MDMA2_DEST_CURR_Y_COUNT()		bfin_read32(MDMA2_DEST_CURR_Y_COUNT)
+#define bfin_write_MDMA2_DEST_CURR_Y_COUNT(val)		bfin_write32(MDMA2_DEST_CURR_Y_COUNT, val)
+#define bfin_read_MDMA2_SRC_NEXT_DESC_PTR() 		bfin_read32(MDMA2_SRC_NEXT_DESC_PTR)
+#define bfin_write_MDMA2_SRC_NEXT_DESC_PTR(val) 	bfin_write32(MDMA2_SRC_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA2_SRC_START_ADDR() 		bfin_read32(MDMA2_SRC_START_ADDR)
+#define bfin_write_MDMA2_SRC_START_ADDR(val) 		bfin_write32(MDMA2_SRC_START_ADDR, val)
+#define bfin_read_MDMA2_SRC_CONFIG()			bfin_read32(MDMA2_SRC_CONFIG)
+#define bfin_write_MDMA2_SRC_CONFIG(val)		bfin_write32(MDMA2_SRC_CONFIG, val)
+#define bfin_read_MDMA2_SRC_X_COUNT()			bfin_read32(MDMA2_SRC_X_COUNT)
+#define bfin_write_MDMA2_SRC_X_COUNT(val)		bfin_write32(MDMA2_SRC_X_COUNT, val)
+#define bfin_read_MDMA2_SRC_X_MODIFY()			bfin_read32(MDMA2_SRC_X_MODIFY)
+#define bfin_write_MDMA2_SRC_X_MODIFY(val) 		bfin_write32(MDMA2_SRC_X_MODIFY, val)
+#define bfin_read_MDMA2_SRC_Y_COUNT()			bfin_read32(MDMA2_SRC_Y_COUNT)
+#define bfin_write_MDMA2_SRC_Y_COUNT(val)		bfin_write32(MDMA2_SRC_Y_COUNT, val)
+#define bfin_read_MDMA2_SRC_Y_MODIFY()			bfin_read32(MDMA2_SRC_Y_MODIFY)
+#define bfin_write_MDMA2_SRC_Y_MODIFY(val) 		bfin_write32(MDMA2_SRC_Y_MODIFY, val)
+#define bfin_read_MDMA2_SRC_CURR_DESC_PTR() 		bfin_read32(MDMA2_SRC_CURR_DESC_PTR)
+#define bfin_write_MDMA2_SRC_CURR_DESC_PTR(val)		bfin_write32(MDMA2_SRC_CURR_DESC_PTR, val)
+#define bfin_read_MDMA2_SRC_PREV_DESC_PTR() 		bfin_read32(MDMA2_SRC_PREV_DESC_PTR)
+#define bfin_write_MDMA2_SRC_PREV_DESC_PTR(val) 	bfin_write32(MDMA2_SRC_PREV_DESC_PTR, val)
+#define bfin_read_MDMA2_SRC_CURR_ADDR() 		bfin_read32(MDMA2_SRC_CURR_ADDR)
+#define bfin_write_MDMA2_SRC_CURR_ADDR(val) 		bfin_write32(MDMA2_SRC_CURR_ADDR, val)
+#define bfin_read_MDMA2_SRC_IRQ_STATUS()		bfin_read32(MDMA2_SRC_IRQ_STATUS)
+#define bfin_write_MDMA2_SRC_IRQ_STATUS(val)		bfin_write32(MDMA2_SRC_IRQ_STATUS, val)
+#define bfin_read_MDMA2_SRC_CURR_X_COUNT()		bfin_read32(MDMA2_SRC_CURR_X_COUNT)
+#define bfin_write_MDMA2_SRC_CURR_X_COUNT(val)		bfin_write32(MDMA2_SRC_CURR_X_COUNT, val)
+#define bfin_read_MDMA2_SRC_CURR_Y_COUNT()		bfin_read32(MDMA2_SRC_CURR_Y_COUNT)
+#define bfin_write_MDMA2_SRC_CURR_Y_COUNT(val)		bfin_write32(MDMA2_SRC_CURR_Y_COUNT, val)
+
+/* MDMA Stream 3 Registers (DMA Channel 27 and 28) */
+
+#define bfin_read_MDMA3_DEST_NEXT_DESC_PTR() 		bfin_read32(MDMA3_DEST_NEXT_DESC_PTR)
+#define bfin_write_MDMA3_DEST_NEXT_DESC_PTR(val) 	bfin_write32(MDMA3_DEST_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA3_DEST_START_ADDR() 		bfin_read32(MDMA3_DEST_START_ADDR)
+#define bfin_write_MDMA3_DEST_START_ADDR(val) 		bfin_write32(MDMA3_DEST_START_ADDR, val)
+#define bfin_read_MDMA3_DEST_CONFIG()			bfin_read32(MDMA3_DEST_CONFIG)
+#define bfin_write_MDMA3_DEST_CONFIG(val)		bfin_write32(MDMA3_DEST_CONFIG, val)
+#define bfin_read_MDMA3_DEST_X_COUNT()			bfin_read32(MDMA3_DEST_X_COUNT)
+#define bfin_write_MDMA3_DEST_X_COUNT(val)		bfin_write32(MDMA3_DEST_X_COUNT, val)
+#define bfin_read_MDMA3_DEST_X_MODIFY()			bfin_read32(MDMA3_DEST_X_MODIFY)
+#define bfin_write_MDMA3_DEST_X_MODIFY(val) 		bfin_write32(MDMA3_DEST_X_MODIFY, val)
+#define bfin_read_MDMA3_DEST_Y_COUNT()			bfin_read32(MDMA3_DEST_Y_COUNT)
+#define bfin_write_MDMA3_DEST_Y_COUNT(val)		bfin_write32(MDMA3_DEST_Y_COUNT, val)
+#define bfin_read_MDMA3_DEST_Y_MODIFY()			bfin_read32(MDMA3_DEST_Y_MODIFY)
+#define bfin_write_MDMA3_DEST_Y_MODIFY(val) 		bfin_write32(MDMA3_DEST_Y_MODIFY, val)
+#define bfin_read_MDMA3_DEST_CURR_DESC_PTR() 		bfin_read32(MDMA3_DEST_CURR_DESC_PTR)
+#define bfin_write_MDMA3_DEST_CURR_DESC_PTR(val) 	bfin_write32(MDMA3_DEST_CURR_DESC_PTR, val)
+#define bfin_read_MDMA3_DEST_PREV_DESC_PTR()	 	bfin_read32(MDMA3_DEST_PREV_DESC_PTR)
+#define bfin_write_MDMA3_DEST_PREV_DESC_PTR(val) 	bfin_write32(MDMA3_DEST_PREV_DESC_PTR, val)
+#define bfin_read_MDMA3_DEST_CURR_ADDR() 		bfin_read32(MDMA3_DEST_CURR_ADDR)
+#define bfin_write_MDMA3_DEST_CURR_ADDR(val) 		bfin_write32(MDMA3_DEST_CURR_ADDR, val)
+#define bfin_read_MDMA3_DEST_IRQ_STATUS()		bfin_read32(MDMA3_DEST_IRQ_STATUS)
+#define bfin_write_MDMA3_DEST_IRQ_STATUS(val)		bfin_write32(MDMA3_DEST_IRQ_STATUS, val)
+#define bfin_read_MDMA3_DEST_CURR_X_COUNT()		bfin_read32(MDMA3_DEST_CURR_X_COUNT)
+#define bfin_write_MDMA3_DEST_CURR_X_COUNT(val)		bfin_write32(MDMA3_DEST_CURR_X_COUNT, val)
+#define bfin_read_MDMA3_DEST_CURR_Y_COUNT()		bfin_read32(MDMA3_DEST_CURR_Y_COUNT)
+#define bfin_write_MDMA3_DEST_CURR_Y_COUNT(val)		bfin_write32(MDMA3_DEST_CURR_Y_COUNT, val)
+#define bfin_read_MDMA3_SRC_NEXT_DESC_PTR() 		bfin_read32(MDMA3_SRC_NEXT_DESC_PTR)
+#define bfin_write_MDMA3_SRC_NEXT_DESC_PTR(val) 	bfin_write32(MDMA3_SRC_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA3_SRC_START_ADDR() 		bfin_read32(MDMA3_SRC_START_ADDR)
+#define bfin_write_MDMA3_SRC_START_ADDR(val) 		bfin_write32(MDMA3_SRC_START_ADDR, val)
+#define bfin_read_MDMA3_SRC_CONFIG()			bfin_read32(MDMA3_SRC_CONFIG)
+#define bfin_write_MDMA3_SRC_CONFIG(val)		bfin_write32(MDMA3_SRC_CONFIG, val)
+#define bfin_read_MDMA3_SRC_X_COUNT()			bfin_read32(MDMA3_SRC_X_COUNT)
+#define bfin_write_MDMA3_SRC_X_COUNT(val)		bfin_write32(MDMA3_SRC_X_COUNT, val)
+#define bfin_read_MDMA3_SRC_X_MODIFY()			bfin_read32(MDMA3_SRC_X_MODIFY)
+#define bfin_write_MDMA3_SRC_X_MODIFY(val) 		bfin_write32(MDMA3_SRC_X_MODIFY, val)
+#define bfin_read_MDMA3_SRC_Y_COUNT()			bfin_read32(MDMA3_SRC_Y_COUNT)
+#define bfin_write_MDMA3_SRC_Y_COUNT(val)		bfin_write32(MDMA3_SRC_Y_COUNT, val)
+#define bfin_read_MDMA3_SRC_Y_MODIFY()			bfin_read32(MDMA3_SRC_Y_MODIFY)
+#define bfin_write_MDMA3_SRC_Y_MODIFY(val) 		bfin_write32(MDMA3_SRC_Y_MODIFY, val)
+#define bfin_read_MDMA3_SRC_CURR_DESC_PTR() 		bfin_read32(MDMA3_SRC_CURR_DESC_PTR)
+#define bfin_write_MDMA3_SRC_CURR_DESC_PTR(val) 	bfin_write32(MDMA3_SRC_CURR_DESC_PTR, val)
+#define bfin_read_MDMA3_SRC_PREV_DESC_PTR() 		bfin_read32(MDMA3_SRC_PREV_DESC_PTR)
+#define bfin_write_MDMA3_SRC_PREV_DESC_PTR(val) 	bfin_write32(MDMA3_SRC_PREV_DESC_PTR, val)
+#define bfin_read_MDMA3_SRC_CURR_ADDR() 		bfin_read32(MDMA3_SRC_CURR_ADDR)
+#define bfin_write_MDMA3_SRC_CURR_ADDR(val) 		bfin_write32(MDMA3_SRC_CURR_ADDR, val)
+#define bfin_read_MDMA3_SRC_IRQ_STATUS()		bfin_read32(MDMA3_SRC_IRQ_STATUS)
+#define bfin_write_MDMA3_SRC_IRQ_STATUS(val)		bfin_write32(MDMA3_SRC_IRQ_STATUS, val)
+#define bfin_read_MDMA3_SRC_CURR_X_COUNT()		bfin_read32(MDMA3_SRC_CURR_X_COUNT)
+#define bfin_write_MDMA3_SRC_CURR_X_COUNT(val)		bfin_write32(MDMA3_SRC_CURR_X_COUNT, val)
+#define bfin_read_MDMA3_SRC_CURR_Y_COUNT()		bfin_read32(MDMA3_SRC_CURR_Y_COUNT)
+#define bfin_write_MDMA3_SRC_CURR_Y_COUNT(val)		bfin_write32(MDMA3_SRC_CURR_Y_COUNT, val)
+
+
+/* DMA Channel 29 Registers */
+
+#define bfin_read_DMA29_NEXT_DESC_PTR() 	bfin_read32(DMA29_NEXT_DESC_PTR)
+#define bfin_write_DMA29_NEXT_DESC_PTR(val) 	bfin_write32(DMA29_NEXT_DESC_PTR, val)
+#define bfin_read_DMA29_START_ADDR() 		bfin_read32(DMA29_START_ADDR)
+#define bfin_write_DMA29_START_ADDR(val) 	bfin_write32(DMA29_START_ADDR, val)
+#define bfin_read_DMA29_CONFIG()		bfin_read32(DMA29_CONFIG)
+#define bfin_write_DMA29_CONFIG(val)		bfin_write32(DMA29_CONFIG, val)
+#define bfin_read_DMA29_X_COUNT()		bfin_read32(DMA29_X_COUNT)
+#define bfin_write_DMA29_X_COUNT(val)		bfin_write32(DMA29_X_COUNT, val)
+#define bfin_read_DMA29_X_MODIFY()		bfin_read32(DMA29_X_MODIFY)
+#define bfin_write_DMA29_X_MODIFY(val) 		bfin_write32(DMA29_X_MODIFY, val)
+#define bfin_read_DMA29_Y_COUNT()		bfin_read32(DMA29_Y_COUNT)
+#define bfin_write_DMA29_Y_COUNT(val)		bfin_write32(DMA29_Y_COUNT, val)
+#define bfin_read_DMA29_Y_MODIFY()		bfin_read32(DMA29_Y_MODIFY)
+#define bfin_write_DMA29_Y_MODIFY(val) 		bfin_write32(DMA29_Y_MODIFY, val)
+#define bfin_read_DMA29_CURR_DESC_PTR() 	bfin_read32(DMA29_CURR_DESC_PTR)
+#define bfin_write_DMA29_CURR_DESC_PTR(val) 	bfin_write32(DMA29_CURR_DESC_PTR, val)
+#define bfin_read_DMA29_PREV_DESC_PTR() 	bfin_read32(DMA29_PREV_DESC_PTR)
+#define bfin_write_DMA29_PREV_DESC_PTR(val) 	bfin_write32(DMA29_PREV_DESC_PTR, val)
+#define bfin_read_DMA29_CURR_ADDR() 		bfin_read32(DMA29_CURR_ADDR)
+#define bfin_write_DMA29_CURR_ADDR(val) 	bfin_write32(DMA29_CURR_ADDR, val)
+#define bfin_read_DMA29_IRQ_STATUS()		bfin_read32(DMA29_IRQ_STATUS)
+#define bfin_write_DMA29_IRQ_STATUS(val)	bfin_write32(DMA29_IRQ_STATUS, val)
+#define bfin_read_DMA29_CURR_X_COUNT()		bfin_read32(DMA29_CURR_X_COUNT)
+#define bfin_write_DMA29_CURR_X_COUNT(val)	bfin_write32(DMA29_CURR_X_COUNT, val)
+#define bfin_read_DMA29_CURR_Y_COUNT()		bfin_read32(DMA29_CURR_Y_COUNT)
+#define bfin_write_DMA29_CURR_Y_COUNT(val)	bfin_write32(DMA29_CURR_Y_COUNT, val)
+#define bfin_read_DMA29_BWL_COUNT()		bfin_read32(DMA29_BWL_COUNT)
+#define bfin_write_DMA29_BWL_COUNT(val)		bfin_write32(DMA29_BWL_COUNT, val)
+#define bfin_read_DMA29_CURR_BWL_COUNT()	bfin_read32(DMA29_CURR_BWL_COUNT)
+#define bfin_write_DMA29_CURR_BWL_COUNT(val)	bfin_write32(DMA29_CURR_BWL_COUNT, val)
+#define bfin_read_DMA29_BWM_COUNT()		bfin_read32(DMA29_BWM_COUNT)
+#define bfin_write_DMA29_BWM_COUNT(val)		bfin_write32(DMA29_BWM_COUNT, val)
+#define bfin_read_DMA29_CURR_BWM_COUNT()	bfin_read32(DMA29_CURR_BWM_COUNT)
+#define bfin_write_DMA29_CURR_BWM_COUNT(val)	bfin_write32(DMA29_CURR_BWM_COUNT, val)
+
+/* DMA Channel 30 Registers */
+
+#define bfin_read_DMA30_NEXT_DESC_PTR() 	bfin_read32(DMA30_NEXT_DESC_PTR)
+#define bfin_write_DMA30_NEXT_DESC_PTR(val) 	bfin_write32(DMA30_NEXT_DESC_PTR, val)
+#define bfin_read_DMA30_START_ADDR() 		bfin_read32(DMA30_START_ADDR)
+#define bfin_write_DMA30_START_ADDR(val) 	bfin_write32(DMA30_START_ADDR, val)
+#define bfin_read_DMA30_CONFIG()		bfin_read32(DMA30_CONFIG)
+#define bfin_write_DMA30_CONFIG(val)		bfin_write32(DMA30_CONFIG, val)
+#define bfin_read_DMA30_X_COUNT()		bfin_read32(DMA30_X_COUNT)
+#define bfin_write_DMA30_X_COUNT(val)		bfin_write32(DMA30_X_COUNT, val)
+#define bfin_read_DMA30_X_MODIFY()		bfin_read32(DMA30_X_MODIFY)
+#define bfin_write_DMA30_X_MODIFY(val) 		bfin_write32(DMA30_X_MODIFY, val)
+#define bfin_read_DMA30_Y_COUNT()		bfin_read32(DMA30_Y_COUNT)
+#define bfin_write_DMA30_Y_COUNT(val)		bfin_write32(DMA30_Y_COUNT, val)
+#define bfin_read_DMA30_Y_MODIFY()		bfin_read32(DMA30_Y_MODIFY)
+#define bfin_write_DMA30_Y_MODIFY(val) 		bfin_write32(DMA30_Y_MODIFY, val)
+#define bfin_read_DMA30_CURR_DESC_PTR() 	bfin_read32(DMA30_CURR_DESC_PTR)
+#define bfin_write_DMA30_CURR_DESC_PTR(val) 	bfin_write32(DMA30_CURR_DESC_PTR, val)
+#define bfin_read_DMA30_PREV_DESC_PTR() 	bfin_read32(DMA30_PREV_DESC_PTR)
+#define bfin_write_DMA30_PREV_DESC_PTR(val) 	bfin_write32(DMA30_PREV_DESC_PTR, val)
+#define bfin_read_DMA30_CURR_ADDR() 		bfin_read32(DMA30_CURR_ADDR)
+#define bfin_write_DMA30_CURR_ADDR(val) 	bfin_write32(DMA30_CURR_ADDR, val)
+#define bfin_read_DMA30_IRQ_STATUS()		bfin_read32(DMA30_IRQ_STATUS)
+#define bfin_write_DMA30_IRQ_STATUS(val)	bfin_write32(DMA30_IRQ_STATUS, val)
+#define bfin_read_DMA30_CURR_X_COUNT()		bfin_read32(DMA30_CURR_X_COUNT)
+#define bfin_write_DMA30_CURR_X_COUNT(val)	bfin_write32(DMA30_CURR_X_COUNT, val)
+#define bfin_read_DMA30_CURR_Y_COUNT()		bfin_read32(DMA30_CURR_Y_COUNT)
+#define bfin_write_DMA30_CURR_Y_COUNT(val)	bfin_write32(DMA30_CURR_Y_COUNT, val)
+#define bfin_read_DMA30_BWL_COUNT()		bfin_read32(DMA30_BWL_COUNT)
+#define bfin_write_DMA30_BWL_COUNT(val)		bfin_write32(DMA30_BWL_COUNT, val)
+#define bfin_read_DMA30_CURR_BWL_COUNT()	bfin_read32(DMA30_CURR_BWL_COUNT)
+#define bfin_write_DMA30_CURR_BWL_COUNT(val)	bfin_write32(DMA30_CURR_BWL_COUNT, val)
+#define bfin_read_DMA30_BWM_COUNT()		bfin_read32(DMA30_BWM_COUNT)
+#define bfin_write_DMA30_BWM_COUNT(val)		bfin_write32(DMA30_BWM_COUNT, val)
+#define bfin_read_DMA30_CURR_BWM_COUNT()	bfin_read32(DMA30_CURR_BWM_COUNT)
+#define bfin_write_DMA30_CURR_BWM_COUNT(val)	bfin_write32(DMA30_CURR_BWM_COUNT, val)
+
+/* DMA Channel 31 Registers */
+
+#define bfin_read_DMA31_NEXT_DESC_PTR() 	bfin_read32(DMA31_NEXT_DESC_PTR)
+#define bfin_write_DMA31_NEXT_DESC_PTR(val) 	bfin_write32(DMA31_NEXT_DESC_PTR, val)
+#define bfin_read_DMA31_START_ADDR() 		bfin_read32(DMA31_START_ADDR)
+#define bfin_write_DMA31_START_ADDR(val) 	bfin_write32(DMA31_START_ADDR, val)
+#define bfin_read_DMA31_CONFIG()		bfin_read32(DMA31_CONFIG)
+#define bfin_write_DMA31_CONFIG(val)		bfin_write32(DMA31_CONFIG, val)
+#define bfin_read_DMA31_X_COUNT()		bfin_read32(DMA31_X_COUNT)
+#define bfin_write_DMA31_X_COUNT(val)		bfin_write32(DMA31_X_COUNT, val)
+#define bfin_read_DMA31_X_MODIFY()		bfin_read32(DMA31_X_MODIFY)
+#define bfin_write_DMA31_X_MODIFY(val) 		bfin_write32(DMA31_X_MODIFY, val)
+#define bfin_read_DMA31_Y_COUNT()		bfin_read32(DMA31_Y_COUNT)
+#define bfin_write_DMA31_Y_COUNT(val)		bfin_write32(DMA31_Y_COUNT, val)
+#define bfin_read_DMA31_Y_MODIFY()		bfin_read32(DMA31_Y_MODIFY)
+#define bfin_write_DMA31_Y_MODIFY(val) 		bfin_write32(DMA31_Y_MODIFY, val)
+#define bfin_read_DMA31_CURR_DESC_PTR() 	bfin_read32(DMA31_CURR_DESC_PTR)
+#define bfin_write_DMA31_CURR_DESC_PTR(val) 	bfin_write32(DMA31_CURR_DESC_PTR, val)
+#define bfin_read_DMA31_PREV_DESC_PTR() 	bfin_read32(DMA31_PREV_DESC_PTR)
+#define bfin_write_DMA31_PREV_DESC_PTR(val) 	bfin_write32(DMA31_PREV_DESC_PTR, val)
+#define bfin_read_DMA31_CURR_ADDR() 		bfin_read32(DMA31_CURR_ADDR)
+#define bfin_write_DMA31_CURR_ADDR(val) 	bfin_write32(DMA31_CURR_ADDR, val)
+#define bfin_read_DMA31_IRQ_STATUS()		bfin_read32(DMA31_IRQ_STATUS)
+#define bfin_write_DMA31_IRQ_STATUS(val)	bfin_write32(DMA31_IRQ_STATUS, val)
+#define bfin_read_DMA31_CURR_X_COUNT()		bfin_read32(DMA31_CURR_X_COUNT)
+#define bfin_write_DMA31_CURR_X_COUNT(val)	bfin_write32(DMA31_CURR_X_COUNT, val)
+#define bfin_read_DMA31_CURR_Y_COUNT()		bfin_read32(DMA31_CURR_Y_COUNT)
+#define bfin_write_DMA31_CURR_Y_COUNT(val)	bfin_write32(DMA31_CURR_Y_COUNT, val)
+#define bfin_read_DMA31_BWL_COUNT()		bfin_read32(DMA31_BWL_COUNT)
+#define bfin_write_DMA31_BWL_COUNT(val)		bfin_write32(DMA31_BWL_COUNT, val)
+#define bfin_read_DMA31_CURR_BWL_COUNT()	bfin_read32(DMA31_CURR_BWL_COUNT)
+#define bfin_write_DMA31_CURR_BWL_COUNT(val)	bfin_write32(DMA31_CURR_BWL_COUNT, val)
+#define bfin_read_DMA31_BWM_COUNT()		bfin_read32(DMA31_BWM_COUNT)
+#define bfin_write_DMA31_BWM_COUNT(val)		bfin_write32(DMA31_BWM_COUNT, val)
+#define bfin_read_DMA31_CURR_BWM_COUNT()	bfin_read32(DMA31_CURR_BWM_COUNT)
+#define bfin_write_DMA31_CURR_BWM_COUNT(val)	bfin_write32(DMA31_CURR_BWM_COUNT, val)
+
+/* DMA Channel 32 Registers */
+
+#define bfin_read_DMA32_NEXT_DESC_PTR() 	bfin_read32(DMA32_NEXT_DESC_PTR)
+#define bfin_write_DMA32_NEXT_DESC_PTR(val) 	bfin_write32(DMA32_NEXT_DESC_PTR, val)
+#define bfin_read_DMA32_START_ADDR() 		bfin_read32(DMA32_START_ADDR)
+#define bfin_write_DMA32_START_ADDR(val) 	bfin_write32(DMA32_START_ADDR, val)
+#define bfin_read_DMA32_CONFIG()		bfin_read32(DMA32_CONFIG)
+#define bfin_write_DMA32_CONFIG(val)		bfin_write32(DMA32_CONFIG, val)
+#define bfin_read_DMA32_X_COUNT()		bfin_read32(DMA32_X_COUNT)
+#define bfin_write_DMA32_X_COUNT(val)		bfin_write32(DMA32_X_COUNT, val)
+#define bfin_read_DMA32_X_MODIFY()		bfin_read32(DMA32_X_MODIFY)
+#define bfin_write_DMA32_X_MODIFY(val) 		bfin_write32(DMA32_X_MODIFY, val)
+#define bfin_read_DMA32_Y_COUNT()		bfin_read32(DMA32_Y_COUNT)
+#define bfin_write_DMA32_Y_COUNT(val)		bfin_write32(DMA32_Y_COUNT, val)
+#define bfin_read_DMA32_Y_MODIFY()		bfin_read32(DMA32_Y_MODIFY)
+#define bfin_write_DMA32_Y_MODIFY(val) 		bfin_write32(DMA32_Y_MODIFY, val)
+#define bfin_read_DMA32_CURR_DESC_PTR() 	bfin_read32(DMA32_CURR_DESC_PTR)
+#define bfin_write_DMA32_CURR_DESC_PTR(val) 	bfin_write32(DMA32_CURR_DESC_PTR, val)
+#define bfin_read_DMA32_PREV_DESC_PTR() 	bfin_read32(DMA32_PREV_DESC_PTR)
+#define bfin_write_DMA32_PREV_DESC_PTR(val) 	bfin_write32(DMA32_PREV_DESC_PTR, val)
+#define bfin_read_DMA32_CURR_ADDR() 		bfin_read32(DMA32_CURR_ADDR)
+#define bfin_write_DMA32_CURR_ADDR(val) 	bfin_write32(DMA32_CURR_ADDR, val)
+#define bfin_read_DMA32_IRQ_STATUS()		bfin_read32(DMA32_IRQ_STATUS)
+#define bfin_write_DMA32_IRQ_STATUS(val)	bfin_write32(DMA32_IRQ_STATUS, val)
+#define bfin_read_DMA32_CURR_X_COUNT()		bfin_read32(DMA32_CURR_X_COUNT)
+#define bfin_write_DMA32_CURR_X_COUNT(val)	bfin_write32(DMA32_CURR_X_COUNT, val)
+#define bfin_read_DMA32_CURR_Y_COUNT()		bfin_read32(DMA32_CURR_Y_COUNT)
+#define bfin_write_DMA32_CURR_Y_COUNT(val)	bfin_write32(DMA32_CURR_Y_COUNT, val)
+#define bfin_read_DMA32_BWL_COUNT()		bfin_read32(DMA32_BWL_COUNT)
+#define bfin_write_DMA32_BWL_COUNT(val)		bfin_write32(DMA32_BWL_COUNT, val)
+#define bfin_read_DMA32_CURR_BWL_COUNT()	bfin_read32(DMA32_CURR_BWL_COUNT)
+#define bfin_write_DMA32_CURR_BWL_COUNT(val)	bfin_write32(DMA32_CURR_BWL_COUNT, val)
+#define bfin_read_DMA32_BWM_COUNT()		bfin_read32(DMA32_BWM_COUNT)
+#define bfin_write_DMA32_BWM_COUNT(val)		bfin_write32(DMA32_BWM_COUNT, val)
+#define bfin_read_DMA32_CURR_BWM_COUNT()	bfin_read32(DMA32_CURR_BWM_COUNT)
+#define bfin_write_DMA32_CURR_BWM_COUNT(val)	bfin_write32(DMA32_CURR_BWM_COUNT, val)
+
+/* DMA Channel 33 Registers */
+
+#define bfin_read_DMA33_NEXT_DESC_PTR() 	bfin_read32(DMA33_NEXT_DESC_PTR)
+#define bfin_write_DMA33_NEXT_DESC_PTR(val) 	bfin_write32(DMA33_NEXT_DESC_PTR, val)
+#define bfin_read_DMA33_START_ADDR() 		bfin_read32(DMA33_START_ADDR)
+#define bfin_write_DMA33_START_ADDR(val) 	bfin_write32(DMA33_START_ADDR, val)
+#define bfin_read_DMA33_CONFIG()		bfin_read32(DMA33_CONFIG)
+#define bfin_write_DMA33_CONFIG(val)		bfin_write32(DMA33_CONFIG, val)
+#define bfin_read_DMA33_X_COUNT()		bfin_read32(DMA33_X_COUNT)
+#define bfin_write_DMA33_X_COUNT(val)		bfin_write32(DMA33_X_COUNT, val)
+#define bfin_read_DMA33_X_MODIFY()		bfin_read32(DMA33_X_MODIFY)
+#define bfin_write_DMA33_X_MODIFY(val) 		bfin_write32(DMA33_X_MODIFY, val)
+#define bfin_read_DMA33_Y_COUNT()		bfin_read32(DMA33_Y_COUNT)
+#define bfin_write_DMA33_Y_COUNT(val)		bfin_write32(DMA33_Y_COUNT, val)
+#define bfin_read_DMA33_Y_MODIFY()		bfin_read32(DMA33_Y_MODIFY)
+#define bfin_write_DMA33_Y_MODIFY(val) 		bfin_write32(DMA33_Y_MODIFY, val)
+#define bfin_read_DMA33_CURR_DESC_PTR() 	bfin_read32(DMA33_CURR_DESC_PTR)
+#define bfin_write_DMA33_CURR_DESC_PTR(val) 	bfin_write32(DMA33_CURR_DESC_PTR, val)
+#define bfin_read_DMA33_PREV_DESC_PTR() 	bfin_read32(DMA33_PREV_DESC_PTR)
+#define bfin_write_DMA33_PREV_DESC_PTR(val) 	bfin_write32(DMA33_PREV_DESC_PTR, val)
+#define bfin_read_DMA33_CURR_ADDR() 		bfin_read32(DMA33_CURR_ADDR)
+#define bfin_write_DMA33_CURR_ADDR(val) 	bfin_write32(DMA33_CURR_ADDR, val)
+#define bfin_read_DMA33_IRQ_STATUS()		bfin_read32(DMA33_IRQ_STATUS)
+#define bfin_write_DMA33_IRQ_STATUS(val)	bfin_write32(DMA33_IRQ_STATUS, val)
+#define bfin_read_DMA33_CURR_X_COUNT()		bfin_read32(DMA33_CURR_X_COUNT)
+#define bfin_write_DMA33_CURR_X_COUNT(val)	bfin_write32(DMA33_CURR_X_COUNT, val)
+#define bfin_read_DMA33_CURR_Y_COUNT()		bfin_read32(DMA33_CURR_Y_COUNT)
+#define bfin_write_DMA33_CURR_Y_COUNT(val)	bfin_write32(DMA33_CURR_Y_COUNT, val)
+#define bfin_read_DMA33_BWL_COUNT()		bfin_read32(DMA33_BWL_COUNT)
+#define bfin_write_DMA33_BWL_COUNT(val)		bfin_write32(DMA33_BWL_COUNT, val)
+#define bfin_read_DMA33_CURR_BWL_COUNT()	bfin_read32(DMA33_CURR_BWL_COUNT)
+#define bfin_write_DMA33_CURR_BWL_COUNT(val)	bfin_write32(DMA33_CURR_BWL_COUNT, val)
+#define bfin_read_DMA33_BWM_COUNT()		bfin_read32(DMA33_BWM_COUNT)
+#define bfin_write_DMA33_BWM_COUNT(val)		bfin_write32(DMA33_BWM_COUNT, val)
+#define bfin_read_DMA33_CURR_BWM_COUNT()	bfin_read32(DMA33_CURR_BWM_COUNT)
+#define bfin_write_DMA33_CURR_BWM_COUNT(val)	bfin_write32(DMA33_CURR_BWM_COUNT, val)
+
+/* DMA Channel 34 Registers */
+
+#define bfin_read_DMA34_NEXT_DESC_PTR() 	bfin_read32(DMA34_NEXT_DESC_PTR)
+#define bfin_write_DMA34_NEXT_DESC_PTR(val) 	bfin_write32(DMA34_NEXT_DESC_PTR, val)
+#define bfin_read_DMA34_START_ADDR() 		bfin_read32(DMA34_START_ADDR)
+#define bfin_write_DMA34_START_ADDR(val) 	bfin_write32(DMA34_START_ADDR, val)
+#define bfin_read_DMA34_CONFIG()		bfin_read32(DMA34_CONFIG)
+#define bfin_write_DMA34_CONFIG(val)		bfin_write32(DMA34_CONFIG, val)
+#define bfin_read_DMA34_X_COUNT()		bfin_read32(DMA34_X_COUNT)
+#define bfin_write_DMA34_X_COUNT(val)		bfin_write32(DMA34_X_COUNT, val)
+#define bfin_read_DMA34_X_MODIFY()		bfin_read32(DMA34_X_MODIFY)
+#define bfin_write_DMA34_X_MODIFY(val) 		bfin_write32(DMA34_X_MODIFY, val)
+#define bfin_read_DMA34_Y_COUNT()		bfin_read32(DMA34_Y_COUNT)
+#define bfin_write_DMA34_Y_COUNT(val)		bfin_write32(DMA34_Y_COUNT, val)
+#define bfin_read_DMA34_Y_MODIFY()		bfin_read32(DMA34_Y_MODIFY)
+#define bfin_write_DMA34_Y_MODIFY(val) 		bfin_write32(DMA34_Y_MODIFY, val)
+#define bfin_read_DMA34_CURR_DESC_PTR() 	bfin_read32(DMA34_CURR_DESC_PTR)
+#define bfin_write_DMA34_CURR_DESC_PTR(val) 	bfin_write32(DMA34_CURR_DESC_PTR, val)
+#define bfin_read_DMA34_PREV_DESC_PTR() 	bfin_read32(DMA34_PREV_DESC_PTR)
+#define bfin_write_DMA34_PREV_DESC_PTR(val) 	bfin_write32(DMA34_PREV_DESC_PTR, val)
+#define bfin_read_DMA34_CURR_ADDR() 		bfin_read32(DMA34_CURR_ADDR)
+#define bfin_write_DMA34_CURR_ADDR(val) 	bfin_write32(DMA34_CURR_ADDR, val)
+#define bfin_read_DMA34_IRQ_STATUS()		bfin_read32(DMA34_IRQ_STATUS)
+#define bfin_write_DMA34_IRQ_STATUS(val)	bfin_write32(DMA34_IRQ_STATUS, val)
+#define bfin_read_DMA34_CURR_X_COUNT()		bfin_read32(DMA34_CURR_X_COUNT)
+#define bfin_write_DMA34_CURR_X_COUNT(val)	bfin_write32(DMA34_CURR_X_COUNT, val)
+#define bfin_read_DMA34_CURR_Y_COUNT()		bfin_read32(DMA34_CURR_Y_COUNT)
+#define bfin_write_DMA34_CURR_Y_COUNT(val)	bfin_write32(DMA34_CURR_Y_COUNT, val)
+#define bfin_read_DMA34_BWL_COUNT()		bfin_read32(DMA34_BWL_COUNT)
+#define bfin_write_DMA34_BWL_COUNT(val)		bfin_write32(DMA34_BWL_COUNT, val)
+#define bfin_read_DMA34_CURR_BWL_COUNT()	bfin_read32(DMA34_CURR_BWL_COUNT)
+#define bfin_write_DMA34_CURR_BWL_COUNT(val)	bfin_write32(DMA34_CURR_BWL_COUNT, val)
+#define bfin_read_DMA34_BWM_COUNT()		bfin_read32(DMA34_BWM_COUNT)
+#define bfin_write_DMA34_BWM_COUNT(val)		bfin_write32(DMA34_BWM_COUNT, val)
+#define bfin_read_DMA34_CURR_BWM_COUNT()	bfin_read32(DMA34_CURR_BWM_COUNT)
+#define bfin_write_DMA34_CURR_BWM_COUNT(val)	bfin_write32(DMA34_CURR_BWM_COUNT, val)
+
+/* DMA Channel 35 Registers */
+
+#define bfin_read_DMA35_NEXT_DESC_PTR() 	bfin_read32(DMA35_NEXT_DESC_PTR)
+#define bfin_write_DMA35_NEXT_DESC_PTR(val) 	bfin_write32(DMA35_NEXT_DESC_PTR, val)
+#define bfin_read_DMA35_START_ADDR() 		bfin_read32(DMA35_START_ADDR)
+#define bfin_write_DMA35_START_ADDR(val) 	bfin_write32(DMA35_START_ADDR, val)
+#define bfin_read_DMA35_CONFIG()		bfin_read32(DMA35_CONFIG)
+#define bfin_write_DMA35_CONFIG(val)		bfin_write32(DMA35_CONFIG, val)
+#define bfin_read_DMA35_X_COUNT()		bfin_read32(DMA35_X_COUNT)
+#define bfin_write_DMA35_X_COUNT(val)		bfin_write32(DMA35_X_COUNT, val)
+#define bfin_read_DMA35_X_MODIFY()		bfin_read32(DMA35_X_MODIFY)
+#define bfin_write_DMA35_X_MODIFY(val) 		bfin_write32(DMA35_X_MODIFY, val)
+#define bfin_read_DMA35_Y_COUNT()		bfin_read32(DMA35_Y_COUNT)
+#define bfin_write_DMA35_Y_COUNT(val)		bfin_write32(DMA35_Y_COUNT, val)
+#define bfin_read_DMA35_Y_MODIFY()		bfin_read32(DMA35_Y_MODIFY)
+#define bfin_write_DMA35_Y_MODIFY(val) 		bfin_write32(DMA35_Y_MODIFY, val)
+#define bfin_read_DMA35_CURR_DESC_PTR() 	bfin_read32(DMA35_CURR_DESC_PTR)
+#define bfin_write_DMA35_CURR_DESC_PTR(val) 	bfin_write32(DMA35_CURR_DESC_PTR, val)
+#define bfin_read_DMA35_PREV_DESC_PTR() 	bfin_read32(DMA35_PREV_DESC_PTR)
+#define bfin_write_DMA35_PREV_DESC_PTR(val) 	bfin_write32(DMA35_PREV_DESC_PTR, val)
+#define bfin_read_DMA35_CURR_ADDR() 		bfin_read32(DMA35_CURR_ADDR)
+#define bfin_write_DMA35_CURR_ADDR(val) 	bfin_write32(DMA35_CURR_ADDR, val)
+#define bfin_read_DMA35_IRQ_STATUS()		bfin_read32(DMA35_IRQ_STATUS)
+#define bfin_write_DMA35_IRQ_STATUS(val)	bfin_write32(DMA35_IRQ_STATUS, val)
+#define bfin_read_DMA35_CURR_X_COUNT()		bfin_read32(DMA35_CURR_X_COUNT)
+#define bfin_write_DMA35_CURR_X_COUNT(val)	bfin_write32(DMA35_CURR_X_COUNT, val)
+#define bfin_read_DMA35_CURR_Y_COUNT()		bfin_read32(DMA35_CURR_Y_COUNT)
+#define bfin_write_DMA35_CURR_Y_COUNT(val)	bfin_write32(DMA35_CURR_Y_COUNT, val)
+#define bfin_read_DMA35_BWL_COUNT()		bfin_read32(DMA35_BWL_COUNT)
+#define bfin_write_DMA35_BWL_COUNT(val)		bfin_write32(DMA35_BWL_COUNT, val)
+#define bfin_read_DMA35_CURR_BWL_COUNT()	bfin_read32(DMA35_CURR_BWL_COUNT)
+#define bfin_write_DMA35_CURR_BWL_COUNT(val)	bfin_write32(DMA35_CURR_BWL_COUNT, val)
+#define bfin_read_DMA35_BWM_COUNT()		bfin_read32(DMA35_BWM_COUNT)
+#define bfin_write_DMA35_BWM_COUNT(val)		bfin_write32(DMA35_BWM_COUNT, val)
+#define bfin_read_DMA35_CURR_BWM_COUNT()	bfin_read32(DMA35_CURR_BWM_COUNT)
+#define bfin_write_DMA35_CURR_BWM_COUNT(val)	bfin_write32(DMA35_CURR_BWM_COUNT, val)
+
+/* DMA Channel 36 Registers */
+
+#define bfin_read_DMA36_NEXT_DESC_PTR() 	bfin_read32(DMA36_NEXT_DESC_PTR)
+#define bfin_write_DMA36_NEXT_DESC_PTR(val) 	bfin_write32(DMA36_NEXT_DESC_PTR, val)
+#define bfin_read_DMA36_START_ADDR() 		bfin_read32(DMA36_START_ADDR)
+#define bfin_write_DMA36_START_ADDR(val) 	bfin_write32(DMA36_START_ADDR, val)
+#define bfin_read_DMA36_CONFIG()		bfin_read32(DMA36_CONFIG)
+#define bfin_write_DMA36_CONFIG(val)		bfin_write32(DMA36_CONFIG, val)
+#define bfin_read_DMA36_X_COUNT()		bfin_read32(DMA36_X_COUNT)
+#define bfin_write_DMA36_X_COUNT(val)		bfin_write32(DMA36_X_COUNT, val)
+#define bfin_read_DMA36_X_MODIFY()		bfin_read32(DMA36_X_MODIFY)
+#define bfin_write_DMA36_X_MODIFY(val) 		bfin_write32(DMA36_X_MODIFY, val)
+#define bfin_read_DMA36_Y_COUNT()		bfin_read32(DMA36_Y_COUNT)
+#define bfin_write_DMA36_Y_COUNT(val)		bfin_write32(DMA36_Y_COUNT, val)
+#define bfin_read_DMA36_Y_MODIFY()		bfin_read32(DMA36_Y_MODIFY)
+#define bfin_write_DMA36_Y_MODIFY(val) 		bfin_write32(DMA36_Y_MODIFY, val)
+#define bfin_read_DMA36_CURR_DESC_PTR() 	bfin_read32(DMA36_CURR_DESC_PTR)
+#define bfin_write_DMA36_CURR_DESC_PTR(val) 	bfin_write32(DMA36_CURR_DESC_PTR, val)
+#define bfin_read_DMA36_PREV_DESC_PTR() 	bfin_read32(DMA36_PREV_DESC_PTR)
+#define bfin_write_DMA36_PREV_DESC_PTR(val) 	bfin_write32(DMA36_PREV_DESC_PTR, val)
+#define bfin_read_DMA36_CURR_ADDR() 		bfin_read32(DMA36_CURR_ADDR)
+#define bfin_write_DMA36_CURR_ADDR(val) 	bfin_write32(DMA36_CURR_ADDR, val)
+#define bfin_read_DMA36_IRQ_STATUS()		bfin_read32(DMA36_IRQ_STATUS)
+#define bfin_write_DMA36_IRQ_STATUS(val)	bfin_write32(DMA36_IRQ_STATUS, val)
+#define bfin_read_DMA36_CURR_X_COUNT()		bfin_read32(DMA36_CURR_X_COUNT)
+#define bfin_write_DMA36_CURR_X_COUNT(val)	bfin_write32(DMA36_CURR_X_COUNT, val)
+#define bfin_read_DMA36_CURR_Y_COUNT()		bfin_read32(DMA36_CURR_Y_COUNT)
+#define bfin_write_DMA36_CURR_Y_COUNT(val)	bfin_write32(DMA36_CURR_Y_COUNT, val)
+#define bfin_read_DMA36_BWL_COUNT()		bfin_read32(DMA36_BWL_COUNT)
+#define bfin_write_DMA36_BWL_COUNT(val)		bfin_write32(DMA36_BWL_COUNT, val)
+#define bfin_read_DMA36_CURR_BWL_COUNT()	bfin_read32(DMA36_CURR_BWL_COUNT)
+#define bfin_write_DMA36_CURR_BWL_COUNT(val)	bfin_write32(DMA36_CURR_BWL_COUNT, val)
+#define bfin_read_DMA36_BWM_COUNT()		bfin_read32(DMA36_BWM_COUNT)
+#define bfin_write_DMA36_BWM_COUNT(val)		bfin_write32(DMA36_BWM_COUNT, val)
+#define bfin_read_DMA36_CURR_BWM_COUNT()	bfin_read32(DMA36_CURR_BWM_COUNT)
+#define bfin_write_DMA36_CURR_BWM_COUNT(val)	bfin_write32(DMA36_CURR_BWM_COUNT, val)
+
+/* DMA Channel 37 Registers */
+
+#define bfin_read_DMA37_NEXT_DESC_PTR() 	bfin_read32(DMA37_NEXT_DESC_PTR)
+#define bfin_write_DMA37_NEXT_DESC_PTR(val) 	bfin_write32(DMA37_NEXT_DESC_PTR, val)
+#define bfin_read_DMA37_START_ADDR() 		bfin_read32(DMA37_START_ADDR)
+#define bfin_write_DMA37_START_ADDR(val) 	bfin_write32(DMA37_START_ADDR, val)
+#define bfin_read_DMA37_CONFIG()		bfin_read32(DMA37_CONFIG)
+#define bfin_write_DMA37_CONFIG(val)		bfin_write32(DMA37_CONFIG, val)
+#define bfin_read_DMA37_X_COUNT()		bfin_read32(DMA37_X_COUNT)
+#define bfin_write_DMA37_X_COUNT(val)		bfin_write32(DMA37_X_COUNT, val)
+#define bfin_read_DMA37_X_MODIFY()		bfin_read32(DMA37_X_MODIFY)
+#define bfin_write_DMA37_X_MODIFY(val) 		bfin_write32(DMA37_X_MODIFY, val)
+#define bfin_read_DMA37_Y_COUNT()		bfin_read32(DMA37_Y_COUNT)
+#define bfin_write_DMA37_Y_COUNT(val)		bfin_write32(DMA37_Y_COUNT, val)
+#define bfin_read_DMA37_Y_MODIFY()		bfin_read32(DMA37_Y_MODIFY)
+#define bfin_write_DMA37_Y_MODIFY(val) 		bfin_write32(DMA37_Y_MODIFY, val)
+#define bfin_read_DMA37_CURR_DESC_PTR() 	bfin_read32(DMA37_CURR_DESC_PTR)
+#define bfin_write_DMA37_CURR_DESC_PTR(val) 	bfin_write32(DMA37_CURR_DESC_PTR, val)
+#define bfin_read_DMA37_PREV_DESC_PTR() 	bfin_read32(DMA37_PREV_DESC_PTR)
+#define bfin_write_DMA37_PREV_DESC_PTR(val) 	bfin_write32(DMA37_PREV_DESC_PTR, val)
+#define bfin_read_DMA37_CURR_ADDR() 		bfin_read32(DMA37_CURR_ADDR)
+#define bfin_write_DMA37_CURR_ADDR(val) 	bfin_write32(DMA37_CURR_ADDR, val)
+#define bfin_read_DMA37_IRQ_STATUS()		bfin_read32(DMA37_IRQ_STATUS)
+#define bfin_write_DMA37_IRQ_STATUS(val)	bfin_write32(DMA37_IRQ_STATUS, val)
+#define bfin_read_DMA37_CURR_X_COUNT()		bfin_read32(DMA37_CURR_X_COUNT)
+#define bfin_write_DMA37_CURR_X_COUNT(val)	bfin_write32(DMA37_CURR_X_COUNT, val)
+#define bfin_read_DMA37_CURR_Y_COUNT()		bfin_read32(DMA37_CURR_Y_COUNT)
+#define bfin_write_DMA37_CURR_Y_COUNT(val)	bfin_write32(DMA37_CURR_Y_COUNT, val)
+#define bfin_read_DMA37_BWL_COUNT()		bfin_read32(DMA37_BWL_COUNT)
+#define bfin_write_DMA37_BWL_COUNT(val)		bfin_write32(DMA37_BWL_COUNT, val)
+#define bfin_read_DMA37_CURR_BWL_COUNT()	bfin_read32(DMA37_CURR_BWL_COUNT)
+#define bfin_write_DMA37_CURR_BWL_COUNT(val)	bfin_write32(DMA37_CURR_BWL_COUNT, val)
+#define bfin_read_DMA37_BWM_COUNT()		bfin_read32(DMA37_BWM_COUNT)
+#define bfin_write_DMA37_BWM_COUNT(val)		bfin_write32(DMA37_BWM_COUNT, val)
+#define bfin_read_DMA37_CURR_BWM_COUNT()	bfin_read32(DMA37_CURR_BWM_COUNT)
+#define bfin_write_DMA37_CURR_BWM_COUNT(val)	bfin_write32(DMA37_CURR_BWM_COUNT, val)
+
+/* DMA Channel 38 Registers */
+
+#define bfin_read_DMA38_NEXT_DESC_PTR() 	bfin_read32(DMA38_NEXT_DESC_PTR)
+#define bfin_write_DMA38_NEXT_DESC_PTR(val) 	bfin_write32(DMA38_NEXT_DESC_PTR, val)
+#define bfin_read_DMA38_START_ADDR() 		bfin_read32(DMA38_START_ADDR)
+#define bfin_write_DMA38_START_ADDR(val) 	bfin_write32(DMA38_START_ADDR, val)
+#define bfin_read_DMA38_CONFIG()		bfin_read32(DMA38_CONFIG)
+#define bfin_write_DMA38_CONFIG(val)		bfin_write32(DMA38_CONFIG, val)
+#define bfin_read_DMA38_X_COUNT()		bfin_read32(DMA38_X_COUNT)
+#define bfin_write_DMA38_X_COUNT(val)		bfin_write32(DMA38_X_COUNT, val)
+#define bfin_read_DMA38_X_MODIFY()		bfin_read32(DMA38_X_MODIFY)
+#define bfin_write_DMA38_X_MODIFY(val) 		bfin_write32(DMA38_X_MODIFY, val)
+#define bfin_read_DMA38_Y_COUNT()		bfin_read32(DMA38_Y_COUNT)
+#define bfin_write_DMA38_Y_COUNT(val)		bfin_write32(DMA38_Y_COUNT, val)
+#define bfin_read_DMA38_Y_MODIFY()		bfin_read32(DMA38_Y_MODIFY)
+#define bfin_write_DMA38_Y_MODIFY(val) 		bfin_write32(DMA38_Y_MODIFY, val)
+#define bfin_read_DMA38_CURR_DESC_PTR() 	bfin_read32(DMA38_CURR_DESC_PTR)
+#define bfin_write_DMA38_CURR_DESC_PTR(val) 	bfin_write32(DMA38_CURR_DESC_PTR, val)
+#define bfin_read_DMA38_PREV_DESC_PTR() 	bfin_read32(DMA38_PREV_DESC_PTR)
+#define bfin_write_DMA38_PREV_DESC_PTR(val) 	bfin_write32(DMA38_PREV_DESC_PTR, val)
+#define bfin_read_DMA38_CURR_ADDR() 		bfin_read32(DMA38_CURR_ADDR)
+#define bfin_write_DMA38_CURR_ADDR(val) 	bfin_write32(DMA38_CURR_ADDR, val)
+#define bfin_read_DMA38_IRQ_STATUS()		bfin_read32(DMA38_IRQ_STATUS)
+#define bfin_write_DMA38_IRQ_STATUS(val)	bfin_write32(DMA38_IRQ_STATUS, val)
+#define bfin_read_DMA38_CURR_X_COUNT()		bfin_read32(DMA38_CURR_X_COUNT)
+#define bfin_write_DMA38_CURR_X_COUNT(val)	bfin_write32(DMA38_CURR_X_COUNT, val)
+#define bfin_read_DMA38_CURR_Y_COUNT()		bfin_read32(DMA38_CURR_Y_COUNT)
+#define bfin_write_DMA38_CURR_Y_COUNT(val)	bfin_write32(DMA38_CURR_Y_COUNT, val)
+#define bfin_read_DMA38_BWL_COUNT()		bfin_read32(DMA38_BWL_COUNT)
+#define bfin_write_DMA38_BWL_COUNT(val)		bfin_write32(DMA38_BWL_COUNT, val)
+#define bfin_read_DMA38_CURR_BWL_COUNT()	bfin_read32(DMA38_CURR_BWL_COUNT)
+#define bfin_write_DMA38_CURR_BWL_COUNT(val)	bfin_write32(DMA38_CURR_BWL_COUNT, val)
+#define bfin_read_DMA38_BWM_COUNT()		bfin_read32(DMA38_BWM_COUNT)
+#define bfin_write_DMA38_BWM_COUNT(val)		bfin_write32(DMA38_BWM_COUNT, val)
+#define bfin_read_DMA38_CURR_BWM_COUNT()	bfin_read32(DMA38_CURR_BWM_COUNT)
+#define bfin_write_DMA38_CURR_BWM_COUNT(val)	bfin_write32(DMA38_CURR_BWM_COUNT, val)
+
+/* DMA Channel 39 Registers */
+
+#define bfin_read_DMA39_NEXT_DESC_PTR() 	bfin_read32(DMA39_NEXT_DESC_PTR)
+#define bfin_write_DMA39_NEXT_DESC_PTR(val) 	bfin_write32(DMA39_NEXT_DESC_PTR, val)
+#define bfin_read_DMA39_START_ADDR() 		bfin_read32(DMA39_START_ADDR)
+#define bfin_write_DMA39_START_ADDR(val) 	bfin_write32(DMA39_START_ADDR, val)
+#define bfin_read_DMA39_CONFIG()		bfin_read32(DMA39_CONFIG)
+#define bfin_write_DMA39_CONFIG(val)		bfin_write32(DMA39_CONFIG, val)
+#define bfin_read_DMA39_X_COUNT()		bfin_read32(DMA39_X_COUNT)
+#define bfin_write_DMA39_X_COUNT(val)		bfin_write32(DMA39_X_COUNT, val)
+#define bfin_read_DMA39_X_MODIFY()		bfin_read32(DMA39_X_MODIFY)
+#define bfin_write_DMA39_X_MODIFY(val) 		bfin_write32(DMA39_X_MODIFY, val)
+#define bfin_read_DMA39_Y_COUNT()		bfin_read32(DMA39_Y_COUNT)
+#define bfin_write_DMA39_Y_COUNT(val)		bfin_write32(DMA39_Y_COUNT, val)
+#define bfin_read_DMA39_Y_MODIFY()		bfin_read32(DMA39_Y_MODIFY)
+#define bfin_write_DMA39_Y_MODIFY(val) 		bfin_write32(DMA39_Y_MODIFY, val)
+#define bfin_read_DMA39_CURR_DESC_PTR() 	bfin_read32(DMA39_CURR_DESC_PTR)
+#define bfin_write_DMA39_CURR_DESC_PTR(val) 	bfin_write32(DMA39_CURR_DESC_PTR, val)
+#define bfin_read_DMA39_PREV_DESC_PTR() 	bfin_read32(DMA39_PREV_DESC_PTR)
+#define bfin_write_DMA39_PREV_DESC_PTR(val) 	bfin_write32(DMA39_PREV_DESC_PTR, val)
+#define bfin_read_DMA39_CURR_ADDR() 		bfin_read32(DMA39_CURR_ADDR)
+#define bfin_write_DMA39_CURR_ADDR(val) 	bfin_write32(DMA39_CURR_ADDR, val)
+#define bfin_read_DMA39_IRQ_STATUS()		bfin_read32(DMA39_IRQ_STATUS)
+#define bfin_write_DMA39_IRQ_STATUS(val)	bfin_write32(DMA39_IRQ_STATUS, val)
+#define bfin_read_DMA39_CURR_X_COUNT()		bfin_read32(DMA39_CURR_X_COUNT)
+#define bfin_write_DMA39_CURR_X_COUNT(val)	bfin_write32(DMA39_CURR_X_COUNT, val)
+#define bfin_read_DMA39_CURR_Y_COUNT()		bfin_read32(DMA39_CURR_Y_COUNT)
+#define bfin_write_DMA39_CURR_Y_COUNT(val)	bfin_write32(DMA39_CURR_Y_COUNT, val)
+#define bfin_read_DMA39_BWL_COUNT()		bfin_read32(DMA39_BWL_COUNT)
+#define bfin_write_DMA39_BWL_COUNT(val)		bfin_write32(DMA39_BWL_COUNT, val)
+#define bfin_read_DMA39_CURR_BWL_COUNT()	bfin_read32(DMA39_CURR_BWL_COUNT)
+#define bfin_write_DMA39_CURR_BWL_COUNT(val)	bfin_write32(DMA39_CURR_BWL_COUNT, val)
+#define bfin_read_DMA39_BWM_COUNT()		bfin_read32(DMA39_BWM_COUNT)
+#define bfin_write_DMA39_BWM_COUNT(val)		bfin_write32(DMA39_BWM_COUNT, val)
+#define bfin_read_DMA39_CURR_BWM_COUNT()	bfin_read32(DMA39_CURR_BWM_COUNT)
+#define bfin_write_DMA39_CURR_BWM_COUNT(val)	bfin_write32(DMA39_CURR_BWM_COUNT, val)
+
+/* DMA Channel 40 Registers */
+
+#define bfin_read_DMA40_NEXT_DESC_PTR() 	bfin_read32(DMA40_NEXT_DESC_PTR)
+#define bfin_write_DMA40_NEXT_DESC_PTR(val) 	bfin_write32(DMA40_NEXT_DESC_PTR, val)
+#define bfin_read_DMA40_START_ADDR() 		bfin_read32(DMA40_START_ADDR)
+#define bfin_write_DMA40_START_ADDR(val) 	bfin_write32(DMA40_START_ADDR, val)
+#define bfin_read_DMA40_CONFIG()		bfin_read32(DMA40_CONFIG)
+#define bfin_write_DMA40_CONFIG(val)		bfin_write32(DMA40_CONFIG, val)
+#define bfin_read_DMA40_X_COUNT()		bfin_read32(DMA40_X_COUNT)
+#define bfin_write_DMA40_X_COUNT(val)		bfin_write32(DMA40_X_COUNT, val)
+#define bfin_read_DMA40_X_MODIFY()		bfin_read32(DMA40_X_MODIFY)
+#define bfin_write_DMA40_X_MODIFY(val) 		bfin_write32(DMA40_X_MODIFY, val)
+#define bfin_read_DMA40_Y_COUNT()		bfin_read32(DMA40_Y_COUNT)
+#define bfin_write_DMA40_Y_COUNT(val)		bfin_write32(DMA40_Y_COUNT, val)
+#define bfin_read_DMA40_Y_MODIFY()		bfin_read32(DMA40_Y_MODIFY)
+#define bfin_write_DMA40_Y_MODIFY(val) 		bfin_write32(DMA40_Y_MODIFY, val)
+#define bfin_read_DMA40_CURR_DESC_PTR() 	bfin_read32(DMA40_CURR_DESC_PTR)
+#define bfin_write_DMA40_CURR_DESC_PTR(val) 	bfin_write32(DMA40_CURR_DESC_PTR, val)
+#define bfin_read_DMA40_PREV_DESC_PTR() 	bfin_read32(DMA40_PREV_DESC_PTR)
+#define bfin_write_DMA40_PREV_DESC_PTR(val) 	bfin_write32(DMA40_PREV_DESC_PTR, val)
+#define bfin_read_DMA40_CURR_ADDR() 		bfin_read32(DMA40_CURR_ADDR)
+#define bfin_write_DMA40_CURR_ADDR(val) 	bfin_write32(DMA40_CURR_ADDR, val)
+#define bfin_read_DMA40_IRQ_STATUS()		bfin_read32(DMA40_IRQ_STATUS)
+#define bfin_write_DMA40_IRQ_STATUS(val)	bfin_write32(DMA40_IRQ_STATUS, val)
+#define bfin_read_DMA40_CURR_X_COUNT()		bfin_read32(DMA40_CURR_X_COUNT)
+#define bfin_write_DMA40_CURR_X_COUNT(val)	bfin_write32(DMA40_CURR_X_COUNT, val)
+#define bfin_read_DMA40_CURR_Y_COUNT()		bfin_read32(DMA40_CURR_Y_COUNT)
+#define bfin_write_DMA40_CURR_Y_COUNT(val)	bfin_write32(DMA40_CURR_Y_COUNT, val)
+#define bfin_read_DMA40_BWL_COUNT()		bfin_read32(DMA40_BWL_COUNT)
+#define bfin_write_DMA40_BWL_COUNT(val)		bfin_write32(DMA40_BWL_COUNT, val)
+#define bfin_read_DMA40_CURR_BWL_COUNT()	bfin_read32(DMA40_CURR_BWL_COUNT)
+#define bfin_write_DMA40_CURR_BWL_COUNT(val)	bfin_write32(DMA40_CURR_BWL_COUNT, val)
+#define bfin_read_DMA40_BWM_COUNT()		bfin_read32(DMA40_BWM_COUNT)
+#define bfin_write_DMA40_BWM_COUNT(val)		bfin_write32(DMA40_BWM_COUNT, val)
+#define bfin_read_DMA40_CURR_BWM_COUNT()	bfin_read32(DMA40_CURR_BWM_COUNT)
+#define bfin_write_DMA40_CURR_BWM_COUNT(val)	bfin_write32(DMA40_CURR_BWM_COUNT, val)
+
+/* DMA Channel 41 Registers */
+
+#define bfin_read_DMA41_NEXT_DESC_PTR() 	bfin_read32(DMA41_NEXT_DESC_PTR)
+#define bfin_write_DMA41_NEXT_DESC_PTR(val) 	bfin_write32(DMA41_NEXT_DESC_PTR, val)
+#define bfin_read_DMA41_START_ADDR() 		bfin_read32(DMA41_START_ADDR)
+#define bfin_write_DMA41_START_ADDR(val) 	bfin_write32(DMA41_START_ADDR, val)
+#define bfin_read_DMA41_CONFIG()		bfin_read32(DMA41_CONFIG)
+#define bfin_write_DMA41_CONFIG(val)		bfin_write32(DMA41_CONFIG, val)
+#define bfin_read_DMA41_X_COUNT()		bfin_read32(DMA41_X_COUNT)
+#define bfin_write_DMA41_X_COUNT(val)		bfin_write32(DMA41_X_COUNT, val)
+#define bfin_read_DMA41_X_MODIFY()		bfin_read32(DMA41_X_MODIFY)
+#define bfin_write_DMA41_X_MODIFY(val) 		bfin_write32(DMA41_X_MODIFY, val)
+#define bfin_read_DMA41_Y_COUNT()		bfin_read32(DMA41_Y_COUNT)
+#define bfin_write_DMA41_Y_COUNT(val)		bfin_write32(DMA41_Y_COUNT, val)
+#define bfin_read_DMA41_Y_MODIFY()		bfin_read32(DMA41_Y_MODIFY)
+#define bfin_write_DMA41_Y_MODIFY(val) 		bfin_write32(DMA41_Y_MODIFY, val)
+#define bfin_read_DMA41_CURR_DESC_PTR() 	bfin_read32(DMA41_CURR_DESC_PTR)
+#define bfin_write_DMA41_CURR_DESC_PTR(val) 	bfin_write32(DMA41_CURR_DESC_PTR, val)
+#define bfin_read_DMA41_PREV_DESC_PTR() 	bfin_read32(DMA41_PREV_DESC_PTR)
+#define bfin_write_DMA41_PREV_DESC_PTR(val) 	bfin_write32(DMA41_PREV_DESC_PTR, val)
+#define bfin_read_DMA41_CURR_ADDR() 		bfin_read32(DMA41_CURR_ADDR)
+#define bfin_write_DMA41_CURR_ADDR(val) 	bfin_write32(DMA41_CURR_ADDR, val)
+#define bfin_read_DMA41_IRQ_STATUS()		bfin_read32(DMA41_IRQ_STATUS)
+#define bfin_write_DMA41_IRQ_STATUS(val)	bfin_write32(DMA41_IRQ_STATUS, val)
+#define bfin_read_DMA41_CURR_X_COUNT()		bfin_read32(DMA41_CURR_X_COUNT)
+#define bfin_write_DMA41_CURR_X_COUNT(val)	bfin_write32(DMA41_CURR_X_COUNT, val)
+#define bfin_read_DMA41_CURR_Y_COUNT()		bfin_read32(DMA41_CURR_Y_COUNT)
+#define bfin_write_DMA41_CURR_Y_COUNT(val)	bfin_write32(DMA41_CURR_Y_COUNT, val)
+#define bfin_read_DMA41_BWL_COUNT()		bfin_read32(DMA41_BWL_COUNT)
+#define bfin_write_DMA41_BWL_COUNT(val)		bfin_write32(DMA41_BWL_COUNT, val)
+#define bfin_read_DMA41_CURR_BWL_COUNT()	bfin_read32(DMA41_CURR_BWL_COUNT)
+#define bfin_write_DMA41_CURR_BWL_COUNT(val)	bfin_write32(DMA41_CURR_BWL_COUNT, val)
+#define bfin_read_DMA41_BWM_COUNT()		bfin_read32(DMA41_BWM_COUNT)
+#define bfin_write_DMA41_BWM_COUNT(val)		bfin_write32(DMA41_BWM_COUNT, val)
+#define bfin_read_DMA41_CURR_BWM_COUNT()	bfin_read32(DMA41_CURR_BWM_COUNT)
+#define bfin_write_DMA41_CURR_BWM_COUNT(val)	bfin_write32(DMA41_CURR_BWM_COUNT, val)
+
+/* DMA Channel 42 Registers */
+
+#define bfin_read_DMA42_NEXT_DESC_PTR() 	bfin_read32(DMA42_NEXT_DESC_PTR)
+#define bfin_write_DMA42_NEXT_DESC_PTR(val) 	bfin_write32(DMA42_NEXT_DESC_PTR, val)
+#define bfin_read_DMA42_START_ADDR() 		bfin_read32(DMA42_START_ADDR)
+#define bfin_write_DMA42_START_ADDR(val) 	bfin_write32(DMA42_START_ADDR, val)
+#define bfin_read_DMA42_CONFIG()		bfin_read32(DMA42_CONFIG)
+#define bfin_write_DMA42_CONFIG(val)		bfin_write32(DMA42_CONFIG, val)
+#define bfin_read_DMA42_X_COUNT()		bfin_read32(DMA42_X_COUNT)
+#define bfin_write_DMA42_X_COUNT(val)		bfin_write32(DMA42_X_COUNT, val)
+#define bfin_read_DMA42_X_MODIFY()		bfin_read32(DMA42_X_MODIFY)
+#define bfin_write_DMA42_X_MODIFY(val) 		bfin_write32(DMA42_X_MODIFY, val)
+#define bfin_read_DMA42_Y_COUNT()		bfin_read32(DMA42_Y_COUNT)
+#define bfin_write_DMA42_Y_COUNT(val)		bfin_write32(DMA42_Y_COUNT, val)
+#define bfin_read_DMA42_Y_MODIFY()		bfin_read32(DMA42_Y_MODIFY)
+#define bfin_write_DMA42_Y_MODIFY(val) 		bfin_write32(DMA42_Y_MODIFY, val)
+#define bfin_read_DMA42_CURR_DESC_PTR() 	bfin_read32(DMA42_CURR_DESC_PTR)
+#define bfin_write_DMA42_CURR_DESC_PTR(val) 	bfin_write32(DMA42_CURR_DESC_PTR, val)
+#define bfin_read_DMA42_PREV_DESC_PTR() 	bfin_read32(DMA42_PREV_DESC_PTR)
+#define bfin_write_DMA42_PREV_DESC_PTR(val) 	bfin_write32(DMA42_PREV_DESC_PTR, val)
+#define bfin_read_DMA42_CURR_ADDR() 		bfin_read32(DMA42_CURR_ADDR)
+#define bfin_write_DMA42_CURR_ADDR(val) 	bfin_write32(DMA42_CURR_ADDR, val)
+#define bfin_read_DMA42_IRQ_STATUS()		bfin_read32(DMA42_IRQ_STATUS)
+#define bfin_write_DMA42_IRQ_STATUS(val)	bfin_write32(DMA42_IRQ_STATUS, val)
+#define bfin_read_DMA42_CURR_X_COUNT()		bfin_read32(DMA42_CURR_X_COUNT)
+#define bfin_write_DMA42_CURR_X_COUNT(val)	bfin_write32(DMA42_CURR_X_COUNT, val)
+#define bfin_read_DMA42_CURR_Y_COUNT()		bfin_read32(DMA42_CURR_Y_COUNT)
+#define bfin_write_DMA42_CURR_Y_COUNT(val)	bfin_write32(DMA42_CURR_Y_COUNT, val)
+#define bfin_read_DMA42_BWL_COUNT()		bfin_read32(DMA42_BWL_COUNT)
+#define bfin_write_DMA42_BWL_COUNT(val)		bfin_write32(DMA42_BWL_COUNT, val)
+#define bfin_read_DMA42_CURR_BWL_COUNT()	bfin_read32(DMA42_CURR_BWL_COUNT)
+#define bfin_write_DMA42_CURR_BWL_COUNT(val)	bfin_write32(DMA42_CURR_BWL_COUNT, val)
+#define bfin_read_DMA42_BWM_COUNT()		bfin_read32(DMA42_BWM_COUNT)
+#define bfin_write_DMA42_BWM_COUNT(val)		bfin_write32(DMA42_BWM_COUNT, val)
+#define bfin_read_DMA42_CURR_BWM_COUNT()	bfin_read32(DMA42_CURR_BWM_COUNT)
+#define bfin_write_DMA42_CURR_BWM_COUNT(val)	bfin_write32(DMA42_CURR_BWM_COUNT, val)
+
+/* DMA Channel 43 Registers */
+
+#define bfin_read_DMA43_NEXT_DESC_PTR() 	bfin_read32(DMA43_NEXT_DESC_PTR)
+#define bfin_write_DMA43_NEXT_DESC_PTR(val) 	bfin_write32(DMA43_NEXT_DESC_PTR, val)
+#define bfin_read_DMA43_START_ADDR() 		bfin_read32(DMA43_START_ADDR)
+#define bfin_write_DMA43_START_ADDR(val) 	bfin_write32(DMA43_START_ADDR, val)
+#define bfin_read_DMA43_CONFIG()		bfin_read32(DMA43_CONFIG)
+#define bfin_write_DMA43_CONFIG(val)		bfin_write32(DMA43_CONFIG, val)
+#define bfin_read_DMA43_X_COUNT()		bfin_read32(DMA43_X_COUNT)
+#define bfin_write_DMA43_X_COUNT(val)		bfin_write32(DMA43_X_COUNT, val)
+#define bfin_read_DMA43_X_MODIFY()		bfin_read32(DMA43_X_MODIFY)
+#define bfin_write_DMA43_X_MODIFY(val) 		bfin_write32(DMA43_X_MODIFY, val)
+#define bfin_read_DMA43_Y_COUNT()		bfin_read32(DMA43_Y_COUNT)
+#define bfin_write_DMA43_Y_COUNT(val)		bfin_write32(DMA43_Y_COUNT, val)
+#define bfin_read_DMA43_Y_MODIFY()		bfin_read32(DMA43_Y_MODIFY)
+#define bfin_write_DMA43_Y_MODIFY(val) 		bfin_write32(DMA43_Y_MODIFY, val)
+#define bfin_read_DMA43_CURR_DESC_PTR() 	bfin_read32(DMA43_CURR_DESC_PTR)
+#define bfin_write_DMA43_CURR_DESC_PTR(val) 	bfin_write32(DMA43_CURR_DESC_PTR, val)
+#define bfin_read_DMA43_PREV_DESC_PTR() 	bfin_read32(DMA43_PREV_DESC_PTR)
+#define bfin_write_DMA43_PREV_DESC_PTR(val) 	bfin_write32(DMA43_PREV_DESC_PTR, val)
+#define bfin_read_DMA43_CURR_ADDR() 		bfin_read32(DMA43_CURR_ADDR)
+#define bfin_write_DMA43_CURR_ADDR(val) 	bfin_write32(DMA43_CURR_ADDR, val)
+#define bfin_read_DMA43_IRQ_STATUS()		bfin_read32(DMA43_IRQ_STATUS)
+#define bfin_write_DMA43_IRQ_STATUS(val)	bfin_write32(DMA43_IRQ_STATUS, val)
+#define bfin_read_DMA43_CURR_X_COUNT()		bfin_read32(DMA43_CURR_X_COUNT)
+#define bfin_write_DMA43_CURR_X_COUNT(val)	bfin_write32(DMA43_CURR_X_COUNT, val)
+#define bfin_read_DMA43_CURR_Y_COUNT()		bfin_read32(DMA43_CURR_Y_COUNT)
+#define bfin_write_DMA43_CURR_Y_COUNT(val)	bfin_write32(DMA43_CURR_Y_COUNT, val)
+#define bfin_read_DMA43_BWL_COUNT()		bfin_read32(DMA43_BWL_COUNT)
+#define bfin_write_DMA43_BWL_COUNT(val)		bfin_write32(DMA43_BWL_COUNT, val)
+#define bfin_read_DMA43_CURR_BWL_COUNT()	bfin_read32(DMA43_CURR_BWL_COUNT)
+#define bfin_write_DMA43_CURR_BWL_COUNT(val)	bfin_write32(DMA43_CURR_BWL_COUNT, val)
+#define bfin_read_DMA43_BWM_COUNT()		bfin_read32(DMA43_BWM_COUNT)
+#define bfin_write_DMA43_BWM_COUNT(val)		bfin_write32(DMA43_BWM_COUNT, val)
+#define bfin_read_DMA43_CURR_BWM_COUNT()	bfin_read32(DMA43_CURR_BWM_COUNT)
+#define bfin_write_DMA43_CURR_BWM_COUNT(val)	bfin_write32(DMA43_CURR_BWM_COUNT, val)
+
+/* DMA Channel 44 Registers */
+
+#define bfin_read_DMA44_NEXT_DESC_PTR() 	bfin_read32(DMA44_NEXT_DESC_PTR)
+#define bfin_write_DMA44_NEXT_DESC_PTR(val) 	bfin_write32(DMA44_NEXT_DESC_PTR, val)
+#define bfin_read_DMA44_START_ADDR() 		bfin_read32(DMA44_START_ADDR)
+#define bfin_write_DMA44_START_ADDR(val) 	bfin_write32(DMA44_START_ADDR, val)
+#define bfin_read_DMA44_CONFIG()		bfin_read32(DMA44_CONFIG)
+#define bfin_write_DMA44_CONFIG(val)		bfin_write32(DMA44_CONFIG, val)
+#define bfin_read_DMA44_X_COUNT()		bfin_read32(DMA44_X_COUNT)
+#define bfin_write_DMA44_X_COUNT(val)		bfin_write32(DMA44_X_COUNT, val)
+#define bfin_read_DMA44_X_MODIFY()		bfin_read32(DMA44_X_MODIFY)
+#define bfin_write_DMA44_X_MODIFY(val) 		bfin_write32(DMA44_X_MODIFY, val)
+#define bfin_read_DMA44_Y_COUNT()		bfin_read32(DMA44_Y_COUNT)
+#define bfin_write_DMA44_Y_COUNT(val)		bfin_write32(DMA44_Y_COUNT, val)
+#define bfin_read_DMA44_Y_MODIFY()		bfin_read32(DMA44_Y_MODIFY)
+#define bfin_write_DMA44_Y_MODIFY(val) 		bfin_write32(DMA44_Y_MODIFY, val)
+#define bfin_read_DMA44_CURR_DESC_PTR() 	bfin_read32(DMA44_CURR_DESC_PTR)
+#define bfin_write_DMA44_CURR_DESC_PTR(val) 	bfin_write32(DMA44_CURR_DESC_PTR, val)
+#define bfin_read_DMA44_PREV_DESC_PTR() 	bfin_read32(DMA44_PREV_DESC_PTR)
+#define bfin_write_DMA44_PREV_DESC_PTR(val) 	bfin_write32(DMA44_PREV_DESC_PTR, val)
+#define bfin_read_DMA44_CURR_ADDR() 		bfin_read32(DMA44_CURR_ADDR)
+#define bfin_write_DMA44_CURR_ADDR(val) 	bfin_write32(DMA44_CURR_ADDR, val)
+#define bfin_read_DMA44_IRQ_STATUS()		bfin_read32(DMA44_IRQ_STATUS)
+#define bfin_write_DMA44_IRQ_STATUS(val)	bfin_write32(DMA44_IRQ_STATUS, val)
+#define bfin_read_DMA44_CURR_X_COUNT()		bfin_read32(DMA44_CURR_X_COUNT)
+#define bfin_write_DMA44_CURR_X_COUNT(val)	bfin_write32(DMA44_CURR_X_COUNT, val)
+#define bfin_read_DMA44_CURR_Y_COUNT()		bfin_read32(DMA44_CURR_Y_COUNT)
+#define bfin_write_DMA44_CURR_Y_COUNT(val)	bfin_write32(DMA44_CURR_Y_COUNT, val)
+#define bfin_read_DMA44_BWL_COUNT()		bfin_read32(DMA44_BWL_COUNT)
+#define bfin_write_DMA44_BWL_COUNT(val)		bfin_write32(DMA44_BWL_COUNT, val)
+#define bfin_read_DMA44_CURR_BWL_COUNT()	bfin_read32(DMA44_CURR_BWL_COUNT)
+#define bfin_write_DMA44_CURR_BWL_COUNT(val)	bfin_write32(DMA44_CURR_BWL_COUNT, val)
+#define bfin_read_DMA44_BWM_COUNT()		bfin_read32(DMA44_BWM_COUNT)
+#define bfin_write_DMA44_BWM_COUNT(val)		bfin_write32(DMA44_BWM_COUNT, val)
+#define bfin_read_DMA44_CURR_BWM_COUNT()	bfin_read32(DMA44_CURR_BWM_COUNT)
+#define bfin_write_DMA44_CURR_BWM_COUNT(val)	bfin_write32(DMA44_CURR_BWM_COUNT, val)
+
+/* DMA Channel 45 Registers */
+
+#define bfin_read_DMA45_NEXT_DESC_PTR() 	bfin_read32(DMA45_NEXT_DESC_PTR)
+#define bfin_write_DMA45_NEXT_DESC_PTR(val) 	bfin_write32(DMA45_NEXT_DESC_PTR, val)
+#define bfin_read_DMA45_START_ADDR() 		bfin_read32(DMA45_START_ADDR)
+#define bfin_write_DMA45_START_ADDR(val) 	bfin_write32(DMA45_START_ADDR, val)
+#define bfin_read_DMA45_CONFIG()		bfin_read32(DMA45_CONFIG)
+#define bfin_write_DMA45_CONFIG(val)		bfin_write32(DMA45_CONFIG, val)
+#define bfin_read_DMA45_X_COUNT()		bfin_read32(DMA45_X_COUNT)
+#define bfin_write_DMA45_X_COUNT(val)		bfin_write32(DMA45_X_COUNT, val)
+#define bfin_read_DMA45_X_MODIFY()		bfin_read32(DMA45_X_MODIFY)
+#define bfin_write_DMA45_X_MODIFY(val) 		bfin_write32(DMA45_X_MODIFY, val)
+#define bfin_read_DMA45_Y_COUNT()		bfin_read32(DMA45_Y_COUNT)
+#define bfin_write_DMA45_Y_COUNT(val)		bfin_write32(DMA45_Y_COUNT, val)
+#define bfin_read_DMA45_Y_MODIFY()		bfin_read32(DMA45_Y_MODIFY)
+#define bfin_write_DMA45_Y_MODIFY(val) 		bfin_write32(DMA45_Y_MODIFY, val)
+#define bfin_read_DMA45_CURR_DESC_PTR() 	bfin_read32(DMA45_CURR_DESC_PTR)
+#define bfin_write_DMA45_CURR_DESC_PTR(val) 	bfin_write32(DMA45_CURR_DESC_PTR, val)
+#define bfin_read_DMA45_PREV_DESC_PTR() 	bfin_read32(DMA45_PREV_DESC_PTR)
+#define bfin_write_DMA45_PREV_DESC_PTR(val) 	bfin_write32(DMA45_PREV_DESC_PTR, val)
+#define bfin_read_DMA45_CURR_ADDR() 		bfin_read32(DMA45_CURR_ADDR)
+#define bfin_write_DMA45_CURR_ADDR(val) 	bfin_write32(DMA45_CURR_ADDR, val)
+#define bfin_read_DMA45_IRQ_STATUS()		bfin_read32(DMA45_IRQ_STATUS)
+#define bfin_write_DMA45_IRQ_STATUS(val)	bfin_write32(DMA45_IRQ_STATUS, val)
+#define bfin_read_DMA45_CURR_X_COUNT()		bfin_read32(DMA45_CURR_X_COUNT)
+#define bfin_write_DMA45_CURR_X_COUNT(val)	bfin_write32(DMA45_CURR_X_COUNT, val)
+#define bfin_read_DMA45_CURR_Y_COUNT()		bfin_read32(DMA45_CURR_Y_COUNT)
+#define bfin_write_DMA45_CURR_Y_COUNT(val)	bfin_write32(DMA45_CURR_Y_COUNT, val)
+#define bfin_read_DMA45_BWL_COUNT()		bfin_read32(DMA45_BWL_COUNT)
+#define bfin_write_DMA45_BWL_COUNT(val)		bfin_write32(DMA45_BWL_COUNT, val)
+#define bfin_read_DMA45_CURR_BWL_COUNT()	bfin_read32(DMA45_CURR_BWL_COUNT)
+#define bfin_write_DMA45_CURR_BWL_COUNT(val)	bfin_write32(DMA45_CURR_BWL_COUNT, val)
+#define bfin_read_DMA45_BWM_COUNT()		bfin_read32(DMA45_BWM_COUNT)
+#define bfin_write_DMA45_BWM_COUNT(val)		bfin_write32(DMA45_BWM_COUNT, val)
+#define bfin_read_DMA45_CURR_BWM_COUNT()	bfin_read32(DMA45_CURR_BWM_COUNT)
+#define bfin_write_DMA45_CURR_BWM_COUNT(val)	bfin_write32(DMA45_CURR_BWM_COUNT, val)
+
+/* DMA Channel 46 Registers */
+
+#define bfin_read_DMA46_NEXT_DESC_PTR() 	bfin_read32(DMA46_NEXT_DESC_PTR)
+#define bfin_write_DMA46_NEXT_DESC_PTR(val) 	bfin_write32(DMA46_NEXT_DESC_PTR, val)
+#define bfin_read_DMA46_START_ADDR() 		bfin_read32(DMA46_START_ADDR)
+#define bfin_write_DMA46_START_ADDR(val) 	bfin_write32(DMA46_START_ADDR, val)
+#define bfin_read_DMA46_CONFIG()		bfin_read32(DMA46_CONFIG)
+#define bfin_write_DMA46_CONFIG(val)		bfin_write32(DMA46_CONFIG, val)
+#define bfin_read_DMA46_X_COUNT()		bfin_read32(DMA46_X_COUNT)
+#define bfin_write_DMA46_X_COUNT(val)		bfin_write32(DMA46_X_COUNT, val)
+#define bfin_read_DMA46_X_MODIFY()		bfin_read32(DMA46_X_MODIFY)
+#define bfin_write_DMA46_X_MODIFY(val) 		bfin_write32(DMA46_X_MODIFY, val)
+#define bfin_read_DMA46_Y_COUNT()		bfin_read32(DMA46_Y_COUNT)
+#define bfin_write_DMA46_Y_COUNT(val)		bfin_write32(DMA46_Y_COUNT, val)
+#define bfin_read_DMA46_Y_MODIFY()		bfin_read32(DMA46_Y_MODIFY)
+#define bfin_write_DMA46_Y_MODIFY(val) 		bfin_write32(DMA46_Y_MODIFY, val)
+#define bfin_read_DMA46_CURR_DESC_PTR() 	bfin_read32(DMA46_CURR_DESC_PTR)
+#define bfin_write_DMA46_CURR_DESC_PTR(val) 	bfin_write32(DMA46_CURR_DESC_PTR, val)
+#define bfin_read_DMA46_PREV_DESC_PTR() 	bfin_read32(DMA46_PREV_DESC_PTR)
+#define bfin_write_DMA46_PREV_DESC_PTR(val) 	bfin_write32(DMA46_PREV_DESC_PTR, val)
+#define bfin_read_DMA46_CURR_ADDR() 		bfin_read32(DMA46_CURR_ADDR)
+#define bfin_write_DMA46_CURR_ADDR(val) 	bfin_write32(DMA46_CURR_ADDR, val)
+#define bfin_read_DMA46_IRQ_STATUS()		bfin_read32(DMA46_IRQ_STATUS)
+#define bfin_write_DMA46_IRQ_STATUS(val)	bfin_write32(DMA46_IRQ_STATUS, val)
+#define bfin_read_DMA46_CURR_X_COUNT()		bfin_read32(DMA46_CURR_X_COUNT)
+#define bfin_write_DMA46_CURR_X_COUNT(val)	bfin_write32(DMA46_CURR_X_COUNT, val)
+#define bfin_read_DMA46_CURR_Y_COUNT()		bfin_read32(DMA46_CURR_Y_COUNT)
+#define bfin_write_DMA46_CURR_Y_COUNT(val)	bfin_write32(DMA46_CURR_Y_COUNT, val)
+#define bfin_read_DMA46_BWL_COUNT()		bfin_read32(DMA46_BWL_COUNT)
+#define bfin_write_DMA46_BWL_COUNT(val)		bfin_write32(DMA46_BWL_COUNT, val)
+#define bfin_read_DMA46_CURR_BWL_COUNT()	bfin_read32(DMA46_CURR_BWL_COUNT)
+#define bfin_write_DMA46_CURR_BWL_COUNT(val)	bfin_write32(DMA46_CURR_BWL_COUNT, val)
+#define bfin_read_DMA46_BWM_COUNT()		bfin_read32(DMA46_BWM_COUNT)
+#define bfin_write_DMA46_BWM_COUNT(val)		bfin_write32(DMA46_BWM_COUNT, val)
+#define bfin_read_DMA46_CURR_BWM_COUNT()	bfin_read32(DMA46_CURR_BWM_COUNT)
+#define bfin_write_DMA46_CURR_BWM_COUNT(val)	bfin_write32(DMA46_CURR_BWM_COUNT, val)
+
+
+/* EPPI1 Registers */
+
+
+/* Port Interrubfin_read_()t 0 Registers (32-bit) */
+
+#define bfin_read_PINT0_MASK_SET()		bfin_read32(PINT0_MASK_SET)
+#define bfin_write_PINT0_MASK_SET(val)		bfin_write32(PINT0_MASK_SET, val)
+#define bfin_read_PINT0_MASK_CLEAR()		bfin_read32(PINT0_MASK_CLEAR)
+#define bfin_write_PINT0_MASK_CLEAR(val)	bfin_write32(PINT0_MASK_CLEAR, val)
+#define bfin_read_PINT0_REQUEST()		bfin_read32(PINT0_REQUEST)
+#define bfin_write_PINT0_REQUEST(val)		bfin_write32(PINT0_REQUEST, val)
+#define bfin_read_PINT0_ASSIGN()		bfin_read32(PINT0_ASSIGN)
+#define bfin_write_PINT0_ASSIGN(val)		bfin_write32(PINT0_ASSIGN, val)
+#define bfin_read_PINT0_EDGE_SET()		bfin_read32(PINT0_EDGE_SET)
+#define bfin_write_PINT0_EDGE_SET(val)		bfin_write32(PINT0_EDGE_SET, val)
+#define bfin_read_PINT0_EDGE_CLEAR()		bfin_read32(PINT0_EDGE_CLEAR)
+#define bfin_write_PINT0_EDGE_CLEAR(val)	bfin_write32(PINT0_EDGE_CLEAR, val)
+#define bfin_read_PINT0_INVERT_SET()		bfin_read32(PINT0_INVERT_SET)
+#define bfin_write_PINT0_INVERT_SET(val)	bfin_write32(PINT0_INVERT_SET, val)
+#define bfin_read_PINT0_INVERT_CLEAR()		bfin_read32(PINT0_INVERT_CLEAR)
+#define bfin_write_PINT0_INVERT_CLEAR(val)	bfin_write32(PINT0_INVERT_CLEAR, val)
+#define bfin_read_PINT0_PINSTATE()		bfin_read32(PINT0_PINSTATE)
+#define bfin_write_PINT0_PINSTATE(val)		bfin_write32(PINT0_PINSTATE, val)
+#define bfin_read_PINT0_LATCH()			bfin_read32(PINT0_LATCH)
+#define bfin_write_PINT0_LATCH(val)		bfin_write32(PINT0_LATCH, val)
+
+/* Port Interrubfin_read_()t 1 Registers (32-bit) */
+
+#define bfin_read_PINT1_MASK_SET()		bfin_read32(PINT1_MASK_SET)
+#define bfin_write_PINT1_MASK_SET(val)		bfin_write32(PINT1_MASK_SET, val)
+#define bfin_read_PINT1_MASK_CLEAR()		bfin_read32(PINT1_MASK_CLEAR)
+#define bfin_write_PINT1_MASK_CLEAR(val)	bfin_write32(PINT1_MASK_CLEAR, val)
+#define bfin_read_PINT1_REQUEST()		bfin_read32(PINT1_REQUEST)
+#define bfin_write_PINT1_REQUEST(val)		bfin_write32(PINT1_REQUEST, val)
+#define bfin_read_PINT1_ASSIGN()		bfin_read32(PINT1_ASSIGN)
+#define bfin_write_PINT1_ASSIGN(val)		bfin_write32(PINT1_ASSIGN, val)
+#define bfin_read_PINT1_EDGE_SET()		bfin_read32(PINT1_EDGE_SET)
+#define bfin_write_PINT1_EDGE_SET(val)		bfin_write32(PINT1_EDGE_SET, val)
+#define bfin_read_PINT1_EDGE_CLEAR()		bfin_read32(PINT1_EDGE_CLEAR)
+#define bfin_write_PINT1_EDGE_CLEAR(val)	bfin_write32(PINT1_EDGE_CLEAR, val)
+#define bfin_read_PINT1_INVERT_SET()		bfin_read32(PINT1_INVERT_SET)
+#define bfin_write_PINT1_INVERT_SET(val)	bfin_write32(PINT1_INVERT_SET, val)
+#define bfin_read_PINT1_INVERT_CLEAR()		bfin_read32(PINT1_INVERT_CLEAR)
+#define bfin_write_PINT1_INVERT_CLEAR(val)	bfin_write32(PINT1_INVERT_CLEAR, val)
+#define bfin_read_PINT1_PINSTATE()		bfin_read32(PINT1_PINSTATE)
+#define bfin_write_PINT1_PINSTATE(val)		bfin_write32(PINT1_PINSTATE, val)
+#define bfin_read_PINT1_LATCH()			bfin_read32(PINT1_LATCH)
+#define bfin_write_PINT1_LATCH(val)		bfin_write32(PINT1_LATCH, val)
+
+/* Port Interrubfin_read_()t 2 Registers (32-bit) */
+
+#define bfin_read_PINT2_MASK_SET()		bfin_read32(PINT2_MASK_SET)
+#define bfin_write_PINT2_MASK_SET(val)		bfin_write32(PINT2_MASK_SET, val)
+#define bfin_read_PINT2_MASK_CLEAR()		bfin_read32(PINT2_MASK_CLEAR)
+#define bfin_write_PINT2_MASK_CLEAR(val)	bfin_write32(PINT2_MASK_CLEAR, val)
+#define bfin_read_PINT2_REQUEST()		bfin_read32(PINT2_REQUEST)
+#define bfin_write_PINT2_REQUEST(val)		bfin_write32(PINT2_REQUEST, val)
+#define bfin_read_PINT2_ASSIGN()		bfin_read32(PINT2_ASSIGN)
+#define bfin_write_PINT2_ASSIGN(val)		bfin_write32(PINT2_ASSIGN, val)
+#define bfin_read_PINT2_EDGE_SET()		bfin_read32(PINT2_EDGE_SET)
+#define bfin_write_PINT2_EDGE_SET(val)		bfin_write32(PINT2_EDGE_SET, val)
+#define bfin_read_PINT2_EDGE_CLEAR()		bfin_read32(PINT2_EDGE_CLEAR)
+#define bfin_write_PINT2_EDGE_CLEAR(val)	bfin_write32(PINT2_EDGE_CLEAR, val)
+#define bfin_read_PINT2_INVERT_SET()		bfin_read32(PINT2_INVERT_SET)
+#define bfin_write_PINT2_INVERT_SET(val)	bfin_write32(PINT2_INVERT_SET, val)
+#define bfin_read_PINT2_INVERT_CLEAR()		bfin_read32(PINT2_INVERT_CLEAR)
+#define bfin_write_PINT2_INVERT_CLEAR(val)	bfin_write32(PINT2_INVERT_CLEAR, val)
+#define bfin_read_PINT2_PINSTATE()		bfin_read32(PINT2_PINSTATE)
+#define bfin_write_PINT2_PINSTATE(val)		bfin_write32(PINT2_PINSTATE, val)
+#define bfin_read_PINT2_LATCH()			bfin_read32(PINT2_LATCH)
+#define bfin_write_PINT2_LATCH(val)		bfin_write32(PINT2_LATCH, val)
+
+/* Port Interrubfin_read_()t 3 Registers (32-bit) */
+
+#define bfin_read_PINT3_MASK_SET()		bfin_read32(PINT3_MASK_SET)
+#define bfin_write_PINT3_MASK_SET(val)		bfin_write32(PINT3_MASK_SET, val)
+#define bfin_read_PINT3_MASK_CLEAR()		bfin_read32(PINT3_MASK_CLEAR)
+#define bfin_write_PINT3_MASK_CLEAR(val)	bfin_write32(PINT3_MASK_CLEAR, val)
+#define bfin_read_PINT3_REQUEST()		bfin_read32(PINT3_REQUEST)
+#define bfin_write_PINT3_REQUEST(val)		bfin_write32(PINT3_REQUEST, val)
+#define bfin_read_PINT3_ASSIGN()		bfin_read32(PINT3_ASSIGN)
+#define bfin_write_PINT3_ASSIGN(val)		bfin_write32(PINT3_ASSIGN, val)
+#define bfin_read_PINT3_EDGE_SET()		bfin_read32(PINT3_EDGE_SET)
+#define bfin_write_PINT3_EDGE_SET(val)		bfin_write32(PINT3_EDGE_SET, val)
+#define bfin_read_PINT3_EDGE_CLEAR()		bfin_read32(PINT3_EDGE_CLEAR)
+#define bfin_write_PINT3_EDGE_CLEAR(val)	bfin_write32(PINT3_EDGE_CLEAR, val)
+#define bfin_read_PINT3_INVERT_SET()		bfin_read32(PINT3_INVERT_SET)
+#define bfin_write_PINT3_INVERT_SET(val)	bfin_write32(PINT3_INVERT_SET, val)
+#define bfin_read_PINT3_INVERT_CLEAR()		bfin_read32(PINT3_INVERT_CLEAR)
+#define bfin_write_PINT3_INVERT_CLEAR(val)	bfin_write32(PINT3_INVERT_CLEAR, val)
+#define bfin_read_PINT3_PINSTATE()		bfin_read32(PINT3_PINSTATE)
+#define bfin_write_PINT3_PINSTATE(val)		bfin_write32(PINT3_PINSTATE, val)
+#define bfin_read_PINT3_LATCH()			bfin_read32(PINT3_LATCH)
+#define bfin_write_PINT3_LATCH(val)		bfin_write32(PINT3_LATCH, val)
+
+/* Port Interrubfin_read_()t 4 Registers (32-bit) */
+
+#define bfin_read_PINT4_MASK_SET()		bfin_read32(PINT4_MASK_SET)
+#define bfin_write_PINT4_MASK_SET(val)		bfin_write32(PINT4_MASK_SET, val)
+#define bfin_read_PINT4_MASK_CLEAR()		bfin_read32(PINT4_MASK_CLEAR)
+#define bfin_write_PINT4_MASK_CLEAR(val)	bfin_write32(PINT4_MASK_CLEAR, val)
+#define bfin_read_PINT4_REQUEST()		bfin_read32(PINT4_REQUEST)
+#define bfin_write_PINT4_REQUEST(val)		bfin_write32(PINT4_REQUEST, val)
+#define bfin_read_PINT4_ASSIGN()		bfin_read32(PINT4_ASSIGN)
+#define bfin_write_PINT4_ASSIGN(val)		bfin_write32(PINT4_ASSIGN, val)
+#define bfin_read_PINT4_EDGE_SET()		bfin_read32(PINT4_EDGE_SET)
+#define bfin_write_PINT4_EDGE_SET(val)		bfin_write32(PINT4_EDGE_SET, val)
+#define bfin_read_PINT4_EDGE_CLEAR()		bfin_read32(PINT4_EDGE_CLEAR)
+#define bfin_write_PINT4_EDGE_CLEAR(val)	bfin_write32(PINT4_EDGE_CLEAR, val)
+#define bfin_read_PINT4_INVERT_SET()		bfin_read32(PINT4_INVERT_SET)
+#define bfin_write_PINT4_INVERT_SET(val)	bfin_write32(PINT4_INVERT_SET, val)
+#define bfin_read_PINT4_INVERT_CLEAR()		bfin_read32(PINT4_INVERT_CLEAR)
+#define bfin_write_PINT4_INVERT_CLEAR(val)	bfin_write32(PINT4_INVERT_CLEAR, val)
+#define bfin_read_PINT4_PINSTATE()		bfin_read32(PINT4_PINSTATE)
+#define bfin_write_PINT4_PINSTATE(val)		bfin_write32(PINT4_PINSTATE, val)
+#define bfin_read_PINT4_LATCH()			bfin_read32(PINT4_LATCH)
+#define bfin_write_PINT4_LATCH(val)		bfin_write32(PINT4_LATCH, val)
+
+/* Port Interrubfin_read_()t 5 Registers (32-bit) */
+
+#define bfin_read_PINT5_MASK_SET()		bfin_read32(PINT5_MASK_SET)
+#define bfin_write_PINT5_MASK_SET(val)		bfin_write32(PINT5_MASK_SET, val)
+#define bfin_read_PINT5_MASK_CLEAR()		bfin_read32(PINT5_MASK_CLEAR)
+#define bfin_write_PINT5_MASK_CLEAR(val)	bfin_write32(PINT5_MASK_CLEAR, val)
+#define bfin_read_PINT5_REQUEST()		bfin_read32(PINT5_REQUEST)
+#define bfin_write_PINT5_REQUEST(val)		bfin_write32(PINT5_REQUEST, val)
+#define bfin_read_PINT5_ASSIGN()		bfin_read32(PINT5_ASSIGN)
+#define bfin_write_PINT5_ASSIGN(val)		bfin_write32(PINT5_ASSIGN, val)
+#define bfin_read_PINT5_EDGE_SET()		bfin_read32(PINT5_EDGE_SET)
+#define bfin_write_PINT5_EDGE_SET(val)		bfin_write32(PINT5_EDGE_SET, val)
+#define bfin_read_PINT5_EDGE_CLEAR()		bfin_read32(PINT5_EDGE_CLEAR)
+#define bfin_write_PINT5_EDGE_CLEAR(val)	bfin_write32(PINT5_EDGE_CLEAR, val)
+#define bfin_read_PINT5_INVERT_SET()		bfin_read32(PINT5_INVERT_SET)
+#define bfin_write_PINT5_INVERT_SET(val)	bfin_write32(PINT5_INVERT_SET, val)
+#define bfin_read_PINT5_INVERT_CLEAR()		bfin_read32(PINT5_INVERT_CLEAR)
+#define bfin_write_PINT5_INVERT_CLEAR(val)	bfin_write32(PINT5_INVERT_CLEAR, val)
+#define bfin_read_PINT5_PINSTATE()		bfin_read32(PINT5_PINSTATE)
+#define bfin_write_PINT5_PINSTATE(val)		bfin_write32(PINT5_PINSTATE, val)
+#define bfin_read_PINT5_LATCH()			bfin_read32(PINT5_LATCH)
+#define bfin_write_PINT5_LATCH(val)		bfin_write32(PINT5_LATCH, val)
+
+/* Port A Registers */
+
+#define bfin_read_PORTA_FER()		bfin_read32(PORTA_FER)
+#define bfin_write_PORTA_FER(val)	bfin_write32(PORTA_FER, val)
+#define bfin_read_PORTA_FER_SET()	bfin_read32(PORTA_FER_SET)
+#define bfin_write_PORTA_FER_SET(val)	bfin_write32(PORTA_FER_SET, val)
+#define bfin_read_PORTA_FER_CLEAR()	bfin_read32(PORTA_FER_CLEAR)
+#define bfin_write_PORTA_FER_CLEAR(val)	bfin_write32(PORTA_FER_CLEAR, val)
+#define bfin_read_PORTA()		bfin_read32(PORTA)
+#define bfin_write_PORTA(val)		bfin_write32(PORTA, val)
+#define bfin_read_PORTA_SET()		bfin_read32(PORTA_SET)
+#define bfin_write_PORTA_SET(val)	bfin_write32(PORTA_SET, val)
+#define bfin_read_PORTA_CLEAR()		bfin_read32(PORTA_CLEAR)
+#define bfin_write_PORTA_CLEAR(val)	bfin_write32(PORTA_CLEAR, val)
+#define bfin_read_PORTA_DIR()		bfin_read32(PORTA_DIR)
+#define bfin_write_PORTA_DIR(val)	bfin_write32(PORTA_DIR, val)
+#define bfin_read_PORTA_DIR_SET()	bfin_read32(PORTA_DIR_SET)
+#define bfin_write_PORTA_DIR_SET(val)	bfin_write32(PORTA_DIR_SET, val)
+#define bfin_read_PORTA_DIR_CLEAR()	bfin_read32(PORTA_DIR_CLEAR)
+#define bfin_write_PORTA_DIR_CLEAR(val)	bfin_write32(PORTA_DIR_CLEAR, val)
+#define bfin_read_PORTA_INEN()		bfin_read32(PORTA_INEN)
+#define bfin_write_PORTA_INEN(val)	bfin_write32(PORTA_INEN, val)
+#define bfin_read_PORTA_INEN_SET()	bfin_read32(PORTA_INEN_SET)
+#define bfin_write_PORTA_INEN_SET(val)	bfin_write32(PORTA_INEN_SET, val)
+#define bfin_read_PORTA_INEN_CLEAR()	bfin_read32(PORTA_INEN_CLEAR)
+#define bfin_write_PORTA_INEN_CLEAR(val)	bfin_write32(PORTA_INEN_CLEAR, val)
+#define bfin_read_PORTA_MUX()		bfin_read32(PORTA_MUX)
+#define bfin_write_PORTA_MUX(val)	bfin_write32(PORTA_MUX, val)
+#define bfin_read_PORTA_DATA_TGL()	bfin_read32(PORTA_DATA_TGL)
+#define bfin_write_PORTA_DATA_TGL(val)	bfin_write32(PORTA_DATA_TGL, val)
+#define bfin_read_PORTA_POL()		bfin_read32(PORTA_POL)
+#define bfin_write_PORTA_POL(val)	bfin_write32(PORTA_POL, val)
+#define bfin_read_PORTA_POL_SET()	bfin_read32(PORTA_POL_SET)
+#define bfin_write_PORTA_POL_SET(val)	bfin_write32(PORTA_POL_SET, val)
+#define bfin_read_PORTA_POL_CLEAR()	bfin_read32(PORTA_POL_CLEAR)
+#define bfin_write_PORTA_POL_CLEAR(val)	bfin_write32(PORTA_POL_CLEAR, val)
+#define bfin_read_PORTA_LOCK()		bfin_read32(PORTA_LOCK)
+#define bfin_write_PORTA_LOCK(val)	bfin_write32(PORTA_LOCK, val)
+#define bfin_read_PORTA_REVID()		bfin_read32(PORTA_REVID)
+#define bfin_write_PORTA_REVID(val)	bfin_write32(PORTA_REVID, val)
+
+
+
+/* Port B Registers */
+#define bfin_read_PORTB_FER()		bfin_read32(PORTB_FER)
+#define bfin_write_PORTB_FER(val)	bfin_write32(PORTB_FER, val)
+#define bfin_read_PORTB_FER_SET()	bfin_read32(PORTB_FER_SET)
+#define bfin_write_PORTB_FER_SET(val)	bfin_write32(PORTB_FER_SET, val)
+#define bfin_read_PORTB_FER_CLEAR()	bfin_read32(PORTB_FER_CLEAR)
+#define bfin_write_PORTB_FER_CLEAR(val)	bfin_write32(PORTB_FER_CLEAR, val)
+#define bfin_read_PORTB()		bfin_read32(PORTB)
+#define bfin_write_PORTB(val)		bfin_write32(PORTB, val)
+#define bfin_read_PORTB_SET()		bfin_read32(PORTB_SET)
+#define bfin_write_PORTB_SET(val)	bfin_write32(PORTB_SET, val)
+#define bfin_read_PORTB_CLEAR()		bfin_read32(PORTB_CLEAR)
+#define bfin_write_PORTB_CLEAR(val)	bfin_write32(PORTB_CLEAR, val)
+#define bfin_read_PORTB_DIR()		bfin_read32(PORTB_DIR)
+#define bfin_write_PORTB_DIR(val)	bfin_write32(PORTB_DIR, val)
+#define bfin_read_PORTB_DIR_SET()	bfin_read32(PORTB_DIR_SET)
+#define bfin_write_PORTB_DIR_SET(val)	bfin_write32(PORTB_DIR_SET, val)
+#define bfin_read_PORTB_DIR_CLEAR()	bfin_read32(PORTB_DIR_CLEAR)
+#define bfin_write_PORTB_DIR_CLEAR(val)	bfin_write32(PORTB_DIR_CLEAR, val)
+#define bfin_read_PORTB_INEN()		bfin_read32(PORTB_INEN)
+#define bfin_write_PORTB_INEN(val)	bfin_write32(PORTB_INEN, val)
+#define bfin_read_PORTB_INEN_SET()	bfin_read32(PORTB_INEN_SET)
+#define bfin_write_PORTB_INEN_SET(val)	bfin_write32(PORTB_INEN_SET, val)
+#define bfin_read_PORTB_INEN_CLEAR()	bfin_read32(PORTB_INEN_CLEAR)
+#define bfin_write_PORTB_INEN_CLEAR(val)	bfin_write32(PORTB_INEN_CLEAR, val)
+#define bfin_read_PORTB_MUX()		bfin_read32(PORTB_MUX)
+#define bfin_write_PORTB_MUX(val)	bfin_write32(PORTB_MUX, val)
+#define bfin_read_PORTB_DATA_TGL()	bfin_read32(PORTB_DATA_TGL)
+#define bfin_write_PORTB_DATA_TGL(val)	bfin_write32(PORTB_DATA_TGL, val)
+#define bfin_read_PORTB_POL()		bfin_read32(PORTB_POL)
+#define bfin_write_PORTB_POL(val)	bfin_write32(PORTB_POL, val)
+#define bfin_read_PORTB_POL_SET()	bfin_read32(PORTB_POL_SET)
+#define bfin_write_PORTB_POL_SET(val)	bfin_write32(PORTB_POL_SET, val)
+#define bfin_read_PORTB_POL_CLEAR()	bfin_read32(PORTB_POL_CLEAR)
+#define bfin_write_PORTB_POL_CLEAR(val)	bfin_write32(PORTB_POL_CLEAR, val)
+#define bfin_read_PORTB_LOCK()		bfin_read32(PORTB_LOCK)
+#define bfin_write_PORTB_LOCK(val)	bfin_write32(PORTB_LOCK, val)
+#define bfin_read_PORTB_REVID()		bfin_read32(PORTB_REVID)
+#define bfin_write_PORTB_REVID(val)	bfin_write32(PORTB_REVID, val)
+
+
+/* Port C Registers */
+#define bfin_read_PORTC_FER()		bfin_read32(PORTC_FER)
+#define bfin_write_PORTC_FER(val)	bfin_write32(PORTC_FER, val)
+#define bfin_read_PORTC_FER_SET()	bfin_read32(PORTC_FER_SET)
+#define bfin_write_PORTC_FER_SET(val)	bfin_write32(PORTC_FER_SET, val)
+#define bfin_read_PORTC_FER_CLEAR()	bfin_read32(PORTC_FER_CLEAR)
+#define bfin_write_PORTC_FER_CLEAR(val)	bfin_write32(PORTC_FER_CLEAR, val)
+#define bfin_read_PORTC()		bfin_read32(PORTC)
+#define bfin_write_PORTC(val)		bfin_write32(PORTC, val)
+#define bfin_read_PORTC_SET()		bfin_read32(PORTC_SET)
+#define bfin_write_PORTC_SET(val)	bfin_write32(PORTC_SET, val)
+#define bfin_read_PORTC_CLEAR()		bfin_read32(PORTC_CLEAR)
+#define bfin_write_PORTC_CLEAR(val)	bfin_write32(PORTC_CLEAR, val)
+#define bfin_read_PORTC_DIR()		bfin_read32(PORTC_DIR)
+#define bfin_write_PORTC_DIR(val)	bfin_write32(PORTC_DIR, val)
+#define bfin_read_PORTC_DIR_SET()	bfin_read32(PORTC_DIR_SET)
+#define bfin_write_PORTC_DIR_SET(val)	bfin_write32(PORTC_DIR_SET, val)
+#define bfin_read_PORTC_DIR_CLEAR()	bfin_read32(PORTC_DIR_CLEAR)
+#define bfin_write_PORTC_DIR_CLEAR(val)	bfin_write32(PORTC_DIR_CLEAR, val)
+#define bfin_read_PORTC_INEN()		bfin_read32(PORTC_INEN)
+#define bfin_write_PORTC_INEN(val)	bfin_write32(PORTC_INEN, val)
+#define bfin_read_PORTC_INEN_SET()	bfin_read32(PORTC_INEN_SET)
+#define bfin_write_PORTC_INEN_SET(val)	bfin_write32(PORTC_INEN_SET, val)
+#define bfin_read_PORTC_INEN_CLEAR()	bfin_read32(PORTC_INEN_CLEAR)
+#define bfin_write_PORTC_INEN_CLEAR(val)	bfin_write32(PORTC_INEN_CLEAR, val)
+#define bfin_read_PORTC_MUX()		bfin_read32(PORTC_MUX)
+#define bfin_write_PORTC_MUX(val)	bfin_write32(PORTC_MUX, val)
+#define bfin_read_PORTC_DATA_TGL()	bfin_read32(PORTC_DATA_TGL)
+#define bfin_write_PORTC_DATA_TGL(val)	bfin_write32(PORTC_DATA_TGL, val)
+#define bfin_read_PORTC_POL()		bfin_read32(PORTC_POL)
+#define bfin_write_PORTC_POL(val)	bfin_write32(PORTC_POL, val)
+#define bfin_read_PORTC_POL_SET()	bfin_read32(PORTC_POL_SET)
+#define bfin_write_PORTC_POL_SET(val)	bfin_write32(PORTC_POL_SET, val)
+#define bfin_read_PORTC_POL_CLEAR()	bfin_read32(PORTC_POL_CLEAR)
+#define bfin_write_PORTC_POL_CLEAR(val)	bfin_write32(PORTC_POL_CLEAR, val)
+#define bfin_read_PORTC_LOCK()		bfin_read32(PORTC_LOCK)
+#define bfin_write_PORTC_LOCK(val)	bfin_write32(PORTC_LOCK, val)
+#define bfin_read_PORTC_REVID()		bfin_read32(PORTC_REVID)
+#define bfin_write_PORTC_REVID(val)	bfin_write32(PORTC_REVID, val)
+
+
+/* Port D Registers */
+#define bfin_read_PORTD_FER()		bfin_read32(PORTD_FER)
+#define bfin_write_PORTD_FER(val)	bfin_write32(PORTD_FER, val)
+#define bfin_read_PORTD_FER_SET()	bfin_read32(PORTD_FER_SET)
+#define bfin_write_PORTD_FER_SET(val)	bfin_write32(PORTD_FER_SET, val)
+#define bfin_read_PORTD_FER_CLEAR()	bfin_read32(PORTD_FER_CLEAR)
+#define bfin_write_PORTD_FER_CLEAR(val)	bfin_write32(PORTD_FER_CLEAR, val)
+#define bfin_read_PORTD()		bfin_read32(PORTD)
+#define bfin_write_PORTD(val)		bfin_write32(PORTD, val)
+#define bfin_read_PORTD_SET()		bfin_read32(PORTD_SET)
+#define bfin_write_PORTD_SET(val)	bfin_write32(PORTD_SET, val)
+#define bfin_read_PORTD_CLEAR()		bfin_read32(PORTD_CLEAR)
+#define bfin_write_PORTD_CLEAR(val)	bfin_write32(PORTD_CLEAR, val)
+#define bfin_read_PORTD_DIR()		bfin_read32(PORTD_DIR)
+#define bfin_write_PORTD_DIR(val)	bfin_write32(PORTD_DIR, val)
+#define bfin_read_PORTD_DIR_SET()	bfin_read32(PORTD_DIR_SET)
+#define bfin_write_PORTD_DIR_SET(val)	bfin_write32(PORTD_DIR_SET, val)
+#define bfin_read_PORTD_DIR_CLEAR()	bfin_read32(PORTD_DIR_CLEAR)
+#define bfin_write_PORTD_DIR_CLEAR(val)	bfin_write32(PORTD_DIR_CLEAR, val)
+#define bfin_read_PORTD_INEN()		bfin_read32(PORTD_INEN)
+#define bfin_write_PORTD_INEN(val)	bfin_write32(PORTD_INEN, val)
+#define bfin_read_PORTD_INEN_SET()	bfin_read32(PORTD_INEN_SET)
+#define bfin_write_PORTD_INEN_SET(val)	bfin_write32(PORTD_INEN_SET, val)
+#define bfin_read_PORTD_INEN_CLEAR()	bfin_read32(PORTD_INEN_CLEAR)
+#define bfin_write_PORTD_INEN_CLEAR(val)	bfin_write32(PORTD_INEN_CLEAR, val)
+#define bfin_read_PORTD_MUX()		bfin_read32(PORTD_MUX)
+#define bfin_write_PORTD_MUX(val)	bfin_write32(PORTD_MUX, val)
+#define bfin_read_PORTD_DATA_TGL()	bfin_read32(PORTD_DATA_TGL)
+#define bfin_write_PORTD_DATA_TGL(val)	bfin_write32(PORTD_DATA_TGL, val)
+#define bfin_read_PORTD_POL()		bfin_read32(PORTD_POL)
+#define bfin_write_PORTD_POL(val)	bfin_write32(PORTD_POL, val)
+#define bfin_read_PORTD_POL_SET()	bfin_read32(PORTD_POL_SET)
+#define bfin_write_PORTD_POL_SET(val)	bfin_write32(PORTD_POL_SET, val)
+#define bfin_read_PORTD_POL_CLEAR()	bfin_read32(PORTD_POL_CLEAR)
+#define bfin_write_PORTD_POL_CLEAR(val)	bfin_write32(PORTD_POL_CLEAR, val)
+#define bfin_read_PORTD_LOCK()		bfin_read32(PORTD_LOCK)
+#define bfin_write_PORTD_LOCK(val)	bfin_write32(PORTD_LOCK, val)
+#define bfin_read_PORTD_REVID()		bfin_read32(PORTD_REVID)
+#define bfin_write_PORTD_REVID(val)	bfin_write32(PORTD_REVID, val)
+
+
+/* Port E Registers */
+#define bfin_read_PORTE_FER()		bfin_read32(PORTE_FER)
+#define bfin_write_PORTE_FER(val)	bfin_write32(PORTE_FER, val)
+#define bfin_read_PORTE_FER_SET()	bfin_read32(PORTE_FER_SET)
+#define bfin_write_PORTE_FER_SET(val)	bfin_write32(PORTE_FER_SET, val)
+#define bfin_read_PORTE_FER_CLEAR()	bfin_read32(PORTE_FER_CLEAR)
+#define bfin_write_PORTE_FER_CLEAR(val)	bfin_write32(PORTE_FER_CLEAR, val)
+#define bfin_read_PORTE()		bfin_read32(PORTE)
+#define bfin_write_PORTE(val)		bfin_write32(PORTE, val)
+#define bfin_read_PORTE_SET()		bfin_read32(PORTE_SET)
+#define bfin_write_PORTE_SET(val)	bfin_write32(PORTE_SET, val)
+#define bfin_read_PORTE_CLEAR()		bfin_read32(PORTE_CLEAR)
+#define bfin_write_PORTE_CLEAR(val)	bfin_write32(PORTE_CLEAR, val)
+#define bfin_read_PORTE_DIR()		bfin_read32(PORTE_DIR)
+#define bfin_write_PORTE_DIR(val)	bfin_write32(PORTE_DIR, val)
+#define bfin_read_PORTE_DIR_SET()	bfin_read32(PORTE_DIR_SET)
+#define bfin_write_PORTE_DIR_SET(val)	bfin_write32(PORTE_DIR_SET, val)
+#define bfin_read_PORTE_DIR_CLEAR()	bfin_read32(PORTE_DIR_CLEAR)
+#define bfin_write_PORTE_DIR_CLEAR(val)	bfin_write32(PORTE_DIR_CLEAR, val)
+#define bfin_read_PORTE_INEN()		bfin_read32(PORTE_INEN)
+#define bfin_write_PORTE_INEN(val)	bfin_write32(PORTE_INEN, val)
+#define bfin_read_PORTE_INEN_SET()	bfin_read32(PORTE_INEN_SET)
+#define bfin_write_PORTE_INEN_SET(val)	bfin_write32(PORTE_INEN_SET, val)
+#define bfin_read_PORTE_INEN_CLEAR()	bfin_read32(PORTE_INEN_CLEAR)
+#define bfin_write_PORTE_INEN_CLEAR(val)	bfin_write32(PORTE_INEN_CLEAR, val)
+#define bfin_read_PORTE_MUX()		bfin_read32(PORTE_MUX)
+#define bfin_write_PORTE_MUX(val)	bfin_write32(PORTE_MUX, val)
+#define bfin_read_PORTE_DATA_TGL()	bfin_read32(PORTE_DATA_TGL)
+#define bfin_write_PORTE_DATA_TGL(val)	bfin_write32(PORTE_DATA_TGL, val)
+#define bfin_read_PORTE_POL()		bfin_read32(PORTE_POL)
+#define bfin_write_PORTE_POL(val)	bfin_write32(PORTE_POL, val)
+#define bfin_read_PORTE_POL_SET()	bfin_read32(PORTE_POL_SET)
+#define bfin_write_PORTE_POL_SET(val)	bfin_write32(PORTE_POL_SET, val)
+#define bfin_read_PORTE_POL_CLEAR()	bfin_read32(PORTE_POL_CLEAR)
+#define bfin_write_PORTE_POL_CLEAR(val)	bfin_write32(PORTE_POL_CLEAR, val)
+#define bfin_read_PORTE_LOCK()		bfin_read32(PORTE_LOCK)
+#define bfin_write_PORTE_LOCK(val)	bfin_write32(PORTE_LOCK, val)
+#define bfin_read_PORTE_REVID()		bfin_read32(PORTE_REVID)
+#define bfin_write_PORTE_REVID(val)	bfin_write32(PORTE_REVID, val)
+
+
+/* Port F Registers */
+#define bfin_read_PORTF_FER()		bfin_read32(PORTF_FER)
+#define bfin_write_PORTF_FER(val)	bfin_write32(PORTF_FER, val)
+#define bfin_read_PORTF_FER_SET()	bfin_read32(PORTF_FER_SET)
+#define bfin_write_PORTF_FER_SET(val)	bfin_write32(PORTF_FER_SET, val)
+#define bfin_read_PORTF_FER_CLEAR()	bfin_read32(PORTF_FER_CLEAR)
+#define bfin_write_PORTF_FER_CLEAR(val)	bfin_write32(PORTF_FER_CLEAR, val)
+#define bfin_read_PORTF()		bfin_read32(PORTF)
+#define bfin_write_PORTF(val)		bfin_write32(PORTF, val)
+#define bfin_read_PORTF_SET()		bfin_read32(PORTF_SET)
+#define bfin_write_PORTF_SET(val)	bfin_write32(PORTF_SET, val)
+#define bfin_read_PORTF_CLEAR()		bfin_read32(PORTF_CLEAR)
+#define bfin_write_PORTF_CLEAR(val)	bfin_write32(PORTF_CLEAR, val)
+#define bfin_read_PORTF_DIR()		bfin_read32(PORTF_DIR)
+#define bfin_write_PORTF_DIR(val)	bfin_write32(PORTF_DIR, val)
+#define bfin_read_PORTF_DIR_SET()	bfin_read32(PORTF_DIR_SET)
+#define bfin_write_PORTF_DIR_SET(val)	bfin_write32(PORTF_DIR_SET, val)
+#define bfin_read_PORTF_DIR_CLEAR()	bfin_read32(PORTF_DIR_CLEAR)
+#define bfin_write_PORTF_DIR_CLEAR(val)	bfin_write32(PORTF_DIR_CLEAR, val)
+#define bfin_read_PORTF_INEN()		bfin_read32(PORTF_INEN)
+#define bfin_write_PORTF_INEN(val)	bfin_write32(PORTF_INEN, val)
+#define bfin_read_PORTF_INEN_SET()	bfin_read32(PORTF_INEN_SET)
+#define bfin_write_PORTF_INEN_SET(val)	bfin_write32(PORTF_INEN_SET, val)
+#define bfin_read_PORTF_INEN_CLEAR()	bfin_read32(PORTF_INEN_CLEAR)
+#define bfin_write_PORTF_INEN_CLEAR(val)	bfin_write32(PORTF_INEN_CLEAR, val)
+#define bfin_read_PORTF_MUX()		bfin_read32(PORTF_MUX)
+#define bfin_write_PORTF_MUX(val)	bfin_write32(PORTF_MUX, val)
+#define bfin_read_PORTF_DATA_TGL()	bfin_read32(PORTF_DATA_TGL)
+#define bfin_write_PORTF_DATA_TGL(val)	bfin_write32(PORTF_DATA_TGL, val)
+#define bfin_read_PORTF_POL()		bfin_read32(PORTF_POL)
+#define bfin_write_PORTF_POL(val)	bfin_write32(PORTF_POL, val)
+#define bfin_read_PORTF_POL_SET()	bfin_read32(PORTF_POL_SET)
+#define bfin_write_PORTF_POL_SET(val)	bfin_write32(PORTF_POL_SET, val)
+#define bfin_read_PORTF_POL_CLEAR()	bfin_read32(PORTF_POL_CLEAR)
+#define bfin_write_PORTF_POL_CLEAR(val)	bfin_write32(PORTF_POL_CLEAR, val)
+#define bfin_read_PORTF_LOCK()		bfin_read32(PORTF_LOCK)
+#define bfin_write_PORTF_LOCK(val)	bfin_write32(PORTF_LOCK, val)
+#define bfin_read_PORTF_REVID()		bfin_read32(PORTF_REVID)
+#define bfin_write_PORTF_REVID(val)	bfin_write32(PORTF_REVID, val)
+
+
+/* Port G Registers */
+#define bfin_read_PORTG_FER()		bfin_read32(PORTG_FER)
+#define bfin_write_PORTG_FER(val)	bfin_write32(PORTG_FER, val)
+#define bfin_read_PORTG_FER_SET()	bfin_read32(PORTG_FER_SET)
+#define bfin_write_PORTG_FER_SET(val)	bfin_write32(PORTG_FER_SET, val)
+#define bfin_read_PORTG_FER_CLEAR()	bfin_read32(PORTG_FER_CLEAR)
+#define bfin_write_PORTG_FER_CLEAR(val)	bfin_write32(PORTG_FER_CLEAR, val)
+#define bfin_read_PORTG()		bfin_read32(PORTG)
+#define bfin_write_PORTG(val)		bfin_write32(PORTG, val)
+#define bfin_read_PORTG_SET()		bfin_read32(PORTG_SET)
+#define bfin_write_PORTG_SET(val)	bfin_write32(PORTG_SET, val)
+#define bfin_read_PORTG_CLEAR()		bfin_read32(PORTG_CLEAR)
+#define bfin_write_PORTG_CLEAR(val)	bfin_write32(PORTG_CLEAR, val)
+#define bfin_read_PORTG_DIR()		bfin_read32(PORTG_DIR)
+#define bfin_write_PORTG_DIR(val)	bfin_write32(PORTG_DIR, val)
+#define bfin_read_PORTG_DIR_SET()	bfin_read32(PORTG_DIR_SET)
+#define bfin_write_PORTG_DIR_SET(val)	bfin_write32(PORTG_DIR_SET, val)
+#define bfin_read_PORTG_DIR_CLEAR()	bfin_read32(PORTG_DIR_CLEAR)
+#define bfin_write_PORTG_DIR_CLEAR(val)	bfin_write32(PORTG_DIR_CLEAR, val)
+#define bfin_read_PORTG_INEN()		bfin_read32(PORTG_INEN)
+#define bfin_write_PORTG_INEN(val)	bfin_write32(PORTG_INEN, val)
+#define bfin_read_PORTG_INEN_SET()	bfin_read32(PORTG_INEN_SET)
+#define bfin_write_PORTG_INEN_SET(val)	bfin_write32(PORTG_INEN_SET, val)
+#define bfin_read_PORTG_INEN_CLEAR()	bfin_read32(PORTG_INEN_CLEAR)
+#define bfin_write_PORTG_INEN_CLEAR(val)	bfin_write32(PORTG_INEN_CLEAR, val)
+#define bfin_read_PORTG_MUX()		bfin_read32(PORTG_MUX)
+#define bfin_write_PORTG_MUX(val)	bfin_write32(PORTG_MUX, val)
+#define bfin_read_PORTG_DATA_TGL()	bfin_read32(PORTG_DATA_TGL)
+#define bfin_write_PORTG_DATA_TGL(val)	bfin_write32(PORTG_DATA_TGL, val)
+#define bfin_read_PORTG_POL()		bfin_read32(PORTG_POL)
+#define bfin_write_PORTG_POL(val)	bfin_write32(PORTG_POL, val)
+#define bfin_read_PORTG_POL_SET()	bfin_read32(PORTG_POL_SET)
+#define bfin_write_PORTG_POL_SET(val)	bfin_write32(PORTG_POL_SET, val)
+#define bfin_read_PORTG_POL_CLEAR()	bfin_read32(PORTG_POL_CLEAR)
+#define bfin_write_PORTG_POL_CLEAR(val)	bfin_write32(PORTG_POL_CLEAR, val)
+#define bfin_read_PORTG_LOCK()		bfin_read32(PORTG_LOCK)
+#define bfin_write_PORTG_LOCK(val)	bfin_write32(PORTG_LOCK, val)
+#define bfin_read_PORTG_REVID()		bfin_read32(PORTG_REVID)
+#define bfin_write_PORTG_REVID(val)	bfin_write32(PORTG_REVID, val)
+
+
+
+
+/* CAN Controller 0 Config 1 Registers */
+
+#define bfin_read_CAN0_MC1()		bfin_read16(CAN0_MC1)
+#define bfin_write_CAN0_MC1(val)	bfin_write16(CAN0_MC1, val)
+#define bfin_read_CAN0_MD1()		bfin_read16(CAN0_MD1)
+#define bfin_write_CAN0_MD1(val)	bfin_write16(CAN0_MD1, val)
+#define bfin_read_CAN0_TRS1()		bfin_read16(CAN0_TRS1)
+#define bfin_write_CAN0_TRS1(val)	bfin_write16(CAN0_TRS1, val)
+#define bfin_read_CAN0_TRR1()		bfin_read16(CAN0_TRR1)
+#define bfin_write_CAN0_TRR1(val)	bfin_write16(CAN0_TRR1, val)
+#define bfin_read_CAN0_TA1()		bfin_read16(CAN0_TA1)
+#define bfin_write_CAN0_TA1(val)	bfin_write16(CAN0_TA1, val)
+#define bfin_read_CAN0_AA1()		bfin_read16(CAN0_AA1)
+#define bfin_write_CAN0_AA1(val)	bfin_write16(CAN0_AA1, val)
+#define bfin_read_CAN0_RMP1()		bfin_read16(CAN0_RMP1)
+#define bfin_write_CAN0_RMP1(val)	bfin_write16(CAN0_RMP1, val)
+#define bfin_read_CAN0_RML1()		bfin_read16(CAN0_RML1)
+#define bfin_write_CAN0_RML1(val)	bfin_write16(CAN0_RML1, val)
+#define bfin_read_CAN0_MBTIF1()		bfin_read16(CAN0_MBTIF1)
+#define bfin_write_CAN0_MBTIF1(val)	bfin_write16(CAN0_MBTIF1, val)
+#define bfin_read_CAN0_MBRIF1()		bfin_read16(CAN0_MBRIF1)
+#define bfin_write_CAN0_MBRIF1(val)	bfin_write16(CAN0_MBRIF1, val)
+#define bfin_read_CAN0_MBIM1()		bfin_read16(CAN0_MBIM1)
+#define bfin_write_CAN0_MBIM1(val)	bfin_write16(CAN0_MBIM1, val)
+#define bfin_read_CAN0_RFH1()		bfin_read16(CAN0_RFH1)
+#define bfin_write_CAN0_RFH1(val)	bfin_write16(CAN0_RFH1, val)
+#define bfin_read_CAN0_OPSS1()		bfin_read16(CAN0_OPSS1)
+#define bfin_write_CAN0_OPSS1(val)	bfin_write16(CAN0_OPSS1, val)
+
+/* CAN Controller 0 Config 2 Registers */
+
+#define bfin_read_CAN0_MC2()		bfin_read16(CAN0_MC2)
+#define bfin_write_CAN0_MC2(val)	bfin_write16(CAN0_MC2, val)
+#define bfin_read_CAN0_MD2()		bfin_read16(CAN0_MD2)
+#define bfin_write_CAN0_MD2(val)	bfin_write16(CAN0_MD2, val)
+#define bfin_read_CAN0_TRS2()		bfin_read16(CAN0_TRS2)
+#define bfin_write_CAN0_TRS2(val)	bfin_write16(CAN0_TRS2, val)
+#define bfin_read_CAN0_TRR2()		bfin_read16(CAN0_TRR2)
+#define bfin_write_CAN0_TRR2(val)	bfin_write16(CAN0_TRR2, val)
+#define bfin_read_CAN0_TA2()		bfin_read16(CAN0_TA2)
+#define bfin_write_CAN0_TA2(val)	bfin_write16(CAN0_TA2, val)
+#define bfin_read_CAN0_AA2()		bfin_read16(CAN0_AA2)
+#define bfin_write_CAN0_AA2(val)	bfin_write16(CAN0_AA2, val)
+#define bfin_read_CAN0_RMP2()		bfin_read16(CAN0_RMP2)
+#define bfin_write_CAN0_RMP2(val)	bfin_write16(CAN0_RMP2, val)
+#define bfin_read_CAN0_RML2()		bfin_read16(CAN0_RML2)
+#define bfin_write_CAN0_RML2(val)	bfin_write16(CAN0_RML2, val)
+#define bfin_read_CAN0_MBTIF2()		bfin_read16(CAN0_MBTIF2)
+#define bfin_write_CAN0_MBTIF2(val)	bfin_write16(CAN0_MBTIF2, val)
+#define bfin_read_CAN0_MBRIF2()		bfin_read16(CAN0_MBRIF2)
+#define bfin_write_CAN0_MBRIF2(val)	bfin_write16(CAN0_MBRIF2, val)
+#define bfin_read_CAN0_MBIM2()		bfin_read16(CAN0_MBIM2)
+#define bfin_write_CAN0_MBIM2(val)	bfin_write16(CAN0_MBIM2, val)
+#define bfin_read_CAN0_RFH2()		bfin_read16(CAN0_RFH2)
+#define bfin_write_CAN0_RFH2(val)	bfin_write16(CAN0_RFH2, val)
+#define bfin_read_CAN0_OPSS2()		bfin_read16(CAN0_OPSS2)
+#define bfin_write_CAN0_OPSS2(val)	bfin_write16(CAN0_OPSS2, val)
+
+/* CAN Controller 0 Clock/Interrubfin_read_()t/Counter Registers */
+
+#define bfin_read_CAN0_CLOCK()		bfin_read16(CAN0_CLOCK)
+#define bfin_write_CAN0_CLOCK(val)	bfin_write16(CAN0_CLOCK, val)
+#define bfin_read_CAN0_TIMING()		bfin_read16(CAN0_TIMING)
+#define bfin_write_CAN0_TIMING(val)	bfin_write16(CAN0_TIMING, val)
+#define bfin_read_CAN0_DEBUG()		bfin_read16(CAN0_DEBUG)
+#define bfin_write_CAN0_DEBUG(val)	bfin_write16(CAN0_DEBUG, val)
+#define bfin_read_CAN0_STATUS()		bfin_read16(CAN0_STATUS)
+#define bfin_write_CAN0_STATUS(val)	bfin_write16(CAN0_STATUS, val)
+#define bfin_read_CAN0_CEC()		bfin_read16(CAN0_CEC)
+#define bfin_write_CAN0_CEC(val)	bfin_write16(CAN0_CEC, val)
+#define bfin_read_CAN0_GIS()		bfin_read16(CAN0_GIS)
+#define bfin_write_CAN0_GIS(val)	bfin_write16(CAN0_GIS, val)
+#define bfin_read_CAN0_GIM()		bfin_read16(CAN0_GIM)
+#define bfin_write_CAN0_GIM(val)	bfin_write16(CAN0_GIM, val)
+#define bfin_read_CAN0_GIF()		bfin_read16(CAN0_GIF)
+#define bfin_write_CAN0_GIF(val)	bfin_write16(CAN0_GIF, val)
+#define bfin_read_CAN0_CONTROL()	bfin_read16(CAN0_CONTROL)
+#define bfin_write_CAN0_CONTROL(val)	bfin_write16(CAN0_CONTROL, val)
+#define bfin_read_CAN0_INTR()		bfin_read16(CAN0_INTR)
+#define bfin_write_CAN0_INTR(val)	bfin_write16(CAN0_INTR, val)
+#define bfin_read_CAN0_MBTD()		bfin_read16(CAN0_MBTD)
+#define bfin_write_CAN0_MBTD(val)	bfin_write16(CAN0_MBTD, val)
+#define bfin_read_CAN0_EWR()		bfin_read16(CAN0_EWR)
+#define bfin_write_CAN0_EWR(val)	bfin_write16(CAN0_EWR, val)
+#define bfin_read_CAN0_ESR()		bfin_read16(CAN0_ESR)
+#define bfin_write_CAN0_ESR(val)	bfin_write16(CAN0_ESR, val)
+#define bfin_read_CAN0_UCCNT()		bfin_read16(CAN0_UCCNT)
+#define bfin_write_CAN0_UCCNT(val)	bfin_write16(CAN0_UCCNT, val)
+#define bfin_read_CAN0_UCRC()		bfin_read16(CAN0_UCRC)
+#define bfin_write_CAN0_UCRC(val)	bfin_write16(CAN0_UCRC, val)
+#define bfin_read_CAN0_UCCNF()		bfin_read16(CAN0_UCCNF)
+#define bfin_write_CAN0_UCCNF(val)	bfin_write16(CAN0_UCCNF, val)
+
+/* CAN Controller 0 Accebfin_read_()tance Registers */
+
+#define bfin_read_CAN0_AM00L()		bfin_read16(CAN0_AM00L)
+#define bfin_write_CAN0_AM00L(val)	bfin_write16(CAN0_AM00L, val)
+#define bfin_read_CAN0_AM00H()		bfin_read16(CAN0_AM00H)
+#define bfin_write_CAN0_AM00H(val)	bfin_write16(CAN0_AM00H, val)
+#define bfin_read_CAN0_AM01L()		bfin_read16(CAN0_AM01L)
+#define bfin_write_CAN0_AM01L(val)	bfin_write16(CAN0_AM01L, val)
+#define bfin_read_CAN0_AM01H()		bfin_read16(CAN0_AM01H)
+#define bfin_write_CAN0_AM01H(val)	bfin_write16(CAN0_AM01H, val)
+#define bfin_read_CAN0_AM02L()		bfin_read16(CAN0_AM02L)
+#define bfin_write_CAN0_AM02L(val)	bfin_write16(CAN0_AM02L, val)
+#define bfin_read_CAN0_AM02H()		bfin_read16(CAN0_AM02H)
+#define bfin_write_CAN0_AM02H(val)	bfin_write16(CAN0_AM02H, val)
+#define bfin_read_CAN0_AM03L()		bfin_read16(CAN0_AM03L)
+#define bfin_write_CAN0_AM03L(val)	bfin_write16(CAN0_AM03L, val)
+#define bfin_read_CAN0_AM03H()		bfin_read16(CAN0_AM03H)
+#define bfin_write_CAN0_AM03H(val)	bfin_write16(CAN0_AM03H, val)
+#define bfin_read_CAN0_AM04L()		bfin_read16(CAN0_AM04L)
+#define bfin_write_CAN0_AM04L(val)	bfin_write16(CAN0_AM04L, val)
+#define bfin_read_CAN0_AM04H()		bfin_read16(CAN0_AM04H)
+#define bfin_write_CAN0_AM04H(val)	bfin_write16(CAN0_AM04H, val)
+#define bfin_read_CAN0_AM05L()		bfin_read16(CAN0_AM05L)
+#define bfin_write_CAN0_AM05L(val)	bfin_write16(CAN0_AM05L, val)
+#define bfin_read_CAN0_AM05H()		bfin_read16(CAN0_AM05H)
+#define bfin_write_CAN0_AM05H(val)	bfin_write16(CAN0_AM05H, val)
+#define bfin_read_CAN0_AM06L()		bfin_read16(CAN0_AM06L)
+#define bfin_write_CAN0_AM06L(val)	bfin_write16(CAN0_AM06L, val)
+#define bfin_read_CAN0_AM06H()		bfin_read16(CAN0_AM06H)
+#define bfin_write_CAN0_AM06H(val)	bfin_write16(CAN0_AM06H, val)
+#define bfin_read_CAN0_AM07L()		bfin_read16(CAN0_AM07L)
+#define bfin_write_CAN0_AM07L(val)	bfin_write16(CAN0_AM07L, val)
+#define bfin_read_CAN0_AM07H()		bfin_read16(CAN0_AM07H)
+#define bfin_write_CAN0_AM07H(val)	bfin_write16(CAN0_AM07H, val)
+#define bfin_read_CAN0_AM08L()		bfin_read16(CAN0_AM08L)
+#define bfin_write_CAN0_AM08L(val)	bfin_write16(CAN0_AM08L, val)
+#define bfin_read_CAN0_AM08H()		bfin_read16(CAN0_AM08H)
+#define bfin_write_CAN0_AM08H(val)	bfin_write16(CAN0_AM08H, val)
+#define bfin_read_CAN0_AM09L()		bfin_read16(CAN0_AM09L)
+#define bfin_write_CAN0_AM09L(val)	bfin_write16(CAN0_AM09L, val)
+#define bfin_read_CAN0_AM09H()		bfin_read16(CAN0_AM09H)
+#define bfin_write_CAN0_AM09H(val)	bfin_write16(CAN0_AM09H, val)
+#define bfin_read_CAN0_AM10L()		bfin_read16(CAN0_AM10L)
+#define bfin_write_CAN0_AM10L(val)	bfin_write16(CAN0_AM10L, val)
+#define bfin_read_CAN0_AM10H()		bfin_read16(CAN0_AM10H)
+#define bfin_write_CAN0_AM10H(val)	bfin_write16(CAN0_AM10H, val)
+#define bfin_read_CAN0_AM11L()		bfin_read16(CAN0_AM11L)
+#define bfin_write_CAN0_AM11L(val)	bfin_write16(CAN0_AM11L, val)
+#define bfin_read_CAN0_AM11H()		bfin_read16(CAN0_AM11H)
+#define bfin_write_CAN0_AM11H(val)	bfin_write16(CAN0_AM11H, val)
+#define bfin_read_CAN0_AM12L()		bfin_read16(CAN0_AM12L)
+#define bfin_write_CAN0_AM12L(val)	bfin_write16(CAN0_AM12L, val)
+#define bfin_read_CAN0_AM12H()		bfin_read16(CAN0_AM12H)
+#define bfin_write_CAN0_AM12H(val)	bfin_write16(CAN0_AM12H, val)
+#define bfin_read_CAN0_AM13L()		bfin_read16(CAN0_AM13L)
+#define bfin_write_CAN0_AM13L(val)	bfin_write16(CAN0_AM13L, val)
+#define bfin_read_CAN0_AM13H()		bfin_read16(CAN0_AM13H)
+#define bfin_write_CAN0_AM13H(val)	bfin_write16(CAN0_AM13H, val)
+#define bfin_read_CAN0_AM14L()		bfin_read16(CAN0_AM14L)
+#define bfin_write_CAN0_AM14L(val)	bfin_write16(CAN0_AM14L, val)
+#define bfin_read_CAN0_AM14H()		bfin_read16(CAN0_AM14H)
+#define bfin_write_CAN0_AM14H(val)	bfin_write16(CAN0_AM14H, val)
+#define bfin_read_CAN0_AM15L()		bfin_read16(CAN0_AM15L)
+#define bfin_write_CAN0_AM15L(val)	bfin_write16(CAN0_AM15L, val)
+#define bfin_read_CAN0_AM15H()		bfin_read16(CAN0_AM15H)
+#define bfin_write_CAN0_AM15H(val)	bfin_write16(CAN0_AM15H, val)
+
+/* CAN Controller 0 Accebfin_read_()tance Registers */
+
+#define bfin_read_CAN0_AM16L()		bfin_read16(CAN0_AM16L)
+#define bfin_write_CAN0_AM16L(val)	bfin_write16(CAN0_AM16L, val)
+#define bfin_read_CAN0_AM16H()		bfin_read16(CAN0_AM16H)
+#define bfin_write_CAN0_AM16H(val)	bfin_write16(CAN0_AM16H, val)
+#define bfin_read_CAN0_AM17L()		bfin_read16(CAN0_AM17L)
+#define bfin_write_CAN0_AM17L(val)	bfin_write16(CAN0_AM17L, val)
+#define bfin_read_CAN0_AM17H()		bfin_read16(CAN0_AM17H)
+#define bfin_write_CAN0_AM17H(val)	bfin_write16(CAN0_AM17H, val)
+#define bfin_read_CAN0_AM18L()		bfin_read16(CAN0_AM18L)
+#define bfin_write_CAN0_AM18L(val)	bfin_write16(CAN0_AM18L, val)
+#define bfin_read_CAN0_AM18H()		bfin_read16(CAN0_AM18H)
+#define bfin_write_CAN0_AM18H(val)	bfin_write16(CAN0_AM18H, val)
+#define bfin_read_CAN0_AM19L()		bfin_read16(CAN0_AM19L)
+#define bfin_write_CAN0_AM19L(val)	bfin_write16(CAN0_AM19L, val)
+#define bfin_read_CAN0_AM19H()		bfin_read16(CAN0_AM19H)
+#define bfin_write_CAN0_AM19H(val)	bfin_write16(CAN0_AM19H, val)
+#define bfin_read_CAN0_AM20L()		bfin_read16(CAN0_AM20L)
+#define bfin_write_CAN0_AM20L(val)	bfin_write16(CAN0_AM20L, val)
+#define bfin_read_CAN0_AM20H()		bfin_read16(CAN0_AM20H)
+#define bfin_write_CAN0_AM20H(val)	bfin_write16(CAN0_AM20H, val)
+#define bfin_read_CAN0_AM21L()		bfin_read16(CAN0_AM21L)
+#define bfin_write_CAN0_AM21L(val)	bfin_write16(CAN0_AM21L, val)
+#define bfin_read_CAN0_AM21H()		bfin_read16(CAN0_AM21H)
+#define bfin_write_CAN0_AM21H(val)	bfin_write16(CAN0_AM21H, val)
+#define bfin_read_CAN0_AM22L()		bfin_read16(CAN0_AM22L)
+#define bfin_write_CAN0_AM22L(val)	bfin_write16(CAN0_AM22L, val)
+#define bfin_read_CAN0_AM22H()		bfin_read16(CAN0_AM22H)
+#define bfin_write_CAN0_AM22H(val)	bfin_write16(CAN0_AM22H, val)
+#define bfin_read_CAN0_AM23L()		bfin_read16(CAN0_AM23L)
+#define bfin_write_CAN0_AM23L(val)	bfin_write16(CAN0_AM23L, val)
+#define bfin_read_CAN0_AM23H()		bfin_read16(CAN0_AM23H)
+#define bfin_write_CAN0_AM23H(val)	bfin_write16(CAN0_AM23H, val)
+#define bfin_read_CAN0_AM24L()		bfin_read16(CAN0_AM24L)
+#define bfin_write_CAN0_AM24L(val)	bfin_write16(CAN0_AM24L, val)
+#define bfin_read_CAN0_AM24H()		bfin_read16(CAN0_AM24H)
+#define bfin_write_CAN0_AM24H(val)	bfin_write16(CAN0_AM24H, val)
+#define bfin_read_CAN0_AM25L()		bfin_read16(CAN0_AM25L)
+#define bfin_write_CAN0_AM25L(val)	bfin_write16(CAN0_AM25L, val)
+#define bfin_read_CAN0_AM25H()		bfin_read16(CAN0_AM25H)
+#define bfin_write_CAN0_AM25H(val)	bfin_write16(CAN0_AM25H, val)
+#define bfin_read_CAN0_AM26L()		bfin_read16(CAN0_AM26L)
+#define bfin_write_CAN0_AM26L(val)	bfin_write16(CAN0_AM26L, val)
+#define bfin_read_CAN0_AM26H()		bfin_read16(CAN0_AM26H)
+#define bfin_write_CAN0_AM26H(val)	bfin_write16(CAN0_AM26H, val)
+#define bfin_read_CAN0_AM27L()		bfin_read16(CAN0_AM27L)
+#define bfin_write_CAN0_AM27L(val)	bfin_write16(CAN0_AM27L, val)
+#define bfin_read_CAN0_AM27H()		bfin_read16(CAN0_AM27H)
+#define bfin_write_CAN0_AM27H(val)	bfin_write16(CAN0_AM27H, val)
+#define bfin_read_CAN0_AM28L()		bfin_read16(CAN0_AM28L)
+#define bfin_write_CAN0_AM28L(val)	bfin_write16(CAN0_AM28L, val)
+#define bfin_read_CAN0_AM28H()		bfin_read16(CAN0_AM28H)
+#define bfin_write_CAN0_AM28H(val)	bfin_write16(CAN0_AM28H, val)
+#define bfin_read_CAN0_AM29L()		bfin_read16(CAN0_AM29L)
+#define bfin_write_CAN0_AM29L(val)	bfin_write16(CAN0_AM29L, val)
+#define bfin_read_CAN0_AM29H()		bfin_read16(CAN0_AM29H)
+#define bfin_write_CAN0_AM29H(val)	bfin_write16(CAN0_AM29H, val)
+#define bfin_read_CAN0_AM30L()		bfin_read16(CAN0_AM30L)
+#define bfin_write_CAN0_AM30L(val)	bfin_write16(CAN0_AM30L, val)
+#define bfin_read_CAN0_AM30H()		bfin_read16(CAN0_AM30H)
+#define bfin_write_CAN0_AM30H(val)	bfin_write16(CAN0_AM30H, val)
+#define bfin_read_CAN0_AM31L()		bfin_read16(CAN0_AM31L)
+#define bfin_write_CAN0_AM31L(val)	bfin_write16(CAN0_AM31L, val)
+#define bfin_read_CAN0_AM31H()		bfin_read16(CAN0_AM31H)
+#define bfin_write_CAN0_AM31H(val)	bfin_write16(CAN0_AM31H, val)
+
+/* CAN Controller 0 Mailbox Data Registers */
+
+#define bfin_read_CAN0_MB00_DATA0()		bfin_read16(CAN0_MB00_DATA0)
+#define bfin_write_CAN0_MB00_DATA0(val)		bfin_write16(CAN0_MB00_DATA0, val)
+#define bfin_read_CAN0_MB00_DATA1()		bfin_read16(CAN0_MB00_DATA1)
+#define bfin_write_CAN0_MB00_DATA1(val)		bfin_write16(CAN0_MB00_DATA1, val)
+#define bfin_read_CAN0_MB00_DATA2()		bfin_read16(CAN0_MB00_DATA2)
+#define bfin_write_CAN0_MB00_DATA2(val)		bfin_write16(CAN0_MB00_DATA2, val)
+#define bfin_read_CAN0_MB00_DATA3()		bfin_read16(CAN0_MB00_DATA3)
+#define bfin_write_CAN0_MB00_DATA3(val)		bfin_write16(CAN0_MB00_DATA3, val)
+#define bfin_read_CAN0_MB00_LENGTH()		bfin_read16(CAN0_MB00_LENGTH)
+#define bfin_write_CAN0_MB00_LENGTH(val)	bfin_write16(CAN0_MB00_LENGTH, val)
+#define bfin_read_CAN0_MB00_TIMESTAMP()		bfin_read16(CAN0_MB00_TIMESTAMP)
+#define bfin_write_CAN0_MB00_TIMESTAMP(val)	bfin_write16(CAN0_MB00_TIMESTAMP, val)
+#define bfin_read_CAN0_MB00_ID0()		bfin_read16(CAN0_MB00_ID0)
+#define bfin_write_CAN0_MB00_ID0(val)		bfin_write16(CAN0_MB00_ID0, val)
+#define bfin_read_CAN0_MB00_ID1()		bfin_read16(CAN0_MB00_ID1)
+#define bfin_write_CAN0_MB00_ID1(val)		bfin_write16(CAN0_MB00_ID1, val)
+#define bfin_read_CAN0_MB01_DATA0()		bfin_read16(CAN0_MB01_DATA0)
+#define bfin_write_CAN0_MB01_DATA0(val)		bfin_write16(CAN0_MB01_DATA0, val)
+#define bfin_read_CAN0_MB01_DATA1()		bfin_read16(CAN0_MB01_DATA1)
+#define bfin_write_CAN0_MB01_DATA1(val)		bfin_write16(CAN0_MB01_DATA1, val)
+#define bfin_read_CAN0_MB01_DATA2()		bfin_read16(CAN0_MB01_DATA2)
+#define bfin_write_CAN0_MB01_DATA2(val)		bfin_write16(CAN0_MB01_DATA2, val)
+#define bfin_read_CAN0_MB01_DATA3()		bfin_read16(CAN0_MB01_DATA3)
+#define bfin_write_CAN0_MB01_DATA3(val)		bfin_write16(CAN0_MB01_DATA3, val)
+#define bfin_read_CAN0_MB01_LENGTH()		bfin_read16(CAN0_MB01_LENGTH)
+#define bfin_write_CAN0_MB01_LENGTH(val)	bfin_write16(CAN0_MB01_LENGTH, val)
+#define bfin_read_CAN0_MB01_TIMESTAMP()		bfin_read16(CAN0_MB01_TIMESTAMP)
+#define bfin_write_CAN0_MB01_TIMESTAMP(val)	bfin_write16(CAN0_MB01_TIMESTAMP, val)
+#define bfin_read_CAN0_MB01_ID0()		bfin_read16(CAN0_MB01_ID0)
+#define bfin_write_CAN0_MB01_ID0(val)		bfin_write16(CAN0_MB01_ID0, val)
+#define bfin_read_CAN0_MB01_ID1()		bfin_read16(CAN0_MB01_ID1)
+#define bfin_write_CAN0_MB01_ID1(val)		bfin_write16(CAN0_MB01_ID1, val)
+#define bfin_read_CAN0_MB02_DATA0()		bfin_read16(CAN0_MB02_DATA0)
+#define bfin_write_CAN0_MB02_DATA0(val)		bfin_write16(CAN0_MB02_DATA0, val)
+#define bfin_read_CAN0_MB02_DATA1()		bfin_read16(CAN0_MB02_DATA1)
+#define bfin_write_CAN0_MB02_DATA1(val)		bfin_write16(CAN0_MB02_DATA1, val)
+#define bfin_read_CAN0_MB02_DATA2()		bfin_read16(CAN0_MB02_DATA2)
+#define bfin_write_CAN0_MB02_DATA2(val)		bfin_write16(CAN0_MB02_DATA2, val)
+#define bfin_read_CAN0_MB02_DATA3()		bfin_read16(CAN0_MB02_DATA3)
+#define bfin_write_CAN0_MB02_DATA3(val)		bfin_write16(CAN0_MB02_DATA3, val)
+#define bfin_read_CAN0_MB02_LENGTH()		bfin_read16(CAN0_MB02_LENGTH)
+#define bfin_write_CAN0_MB02_LENGTH(val)	bfin_write16(CAN0_MB02_LENGTH, val)
+#define bfin_read_CAN0_MB02_TIMESTAMP()		bfin_read16(CAN0_MB02_TIMESTAMP)
+#define bfin_write_CAN0_MB02_TIMESTAMP(val)	bfin_write16(CAN0_MB02_TIMESTAMP, val)
+#define bfin_read_CAN0_MB02_ID0()		bfin_read16(CAN0_MB02_ID0)
+#define bfin_write_CAN0_MB02_ID0(val)		bfin_write16(CAN0_MB02_ID0, val)
+#define bfin_read_CAN0_MB02_ID1()		bfin_read16(CAN0_MB02_ID1)
+#define bfin_write_CAN0_MB02_ID1(val)		bfin_write16(CAN0_MB02_ID1, val)
+#define bfin_read_CAN0_MB03_DATA0()		bfin_read16(CAN0_MB03_DATA0)
+#define bfin_write_CAN0_MB03_DATA0(val)		bfin_write16(CAN0_MB03_DATA0, val)
+#define bfin_read_CAN0_MB03_DATA1()		bfin_read16(CAN0_MB03_DATA1)
+#define bfin_write_CAN0_MB03_DATA1(val)		bfin_write16(CAN0_MB03_DATA1, val)
+#define bfin_read_CAN0_MB03_DATA2()		bfin_read16(CAN0_MB03_DATA2)
+#define bfin_write_CAN0_MB03_DATA2(val)		bfin_write16(CAN0_MB03_DATA2, val)
+#define bfin_read_CAN0_MB03_DATA3()		bfin_read16(CAN0_MB03_DATA3)
+#define bfin_write_CAN0_MB03_DATA3(val)		bfin_write16(CAN0_MB03_DATA3, val)
+#define bfin_read_CAN0_MB03_LENGTH()		bfin_read16(CAN0_MB03_LENGTH)
+#define bfin_write_CAN0_MB03_LENGTH(val)	bfin_write16(CAN0_MB03_LENGTH, val)
+#define bfin_read_CAN0_MB03_TIMESTAMP()		bfin_read16(CAN0_MB03_TIMESTAMP)
+#define bfin_write_CAN0_MB03_TIMESTAMP(val)	bfin_write16(CAN0_MB03_TIMESTAMP, val)
+#define bfin_read_CAN0_MB03_ID0()		bfin_read16(CAN0_MB03_ID0)
+#define bfin_write_CAN0_MB03_ID0(val)		bfin_write16(CAN0_MB03_ID0, val)
+#define bfin_read_CAN0_MB03_ID1()		bfin_read16(CAN0_MB03_ID1)
+#define bfin_write_CAN0_MB03_ID1(val)		bfin_write16(CAN0_MB03_ID1, val)
+#define bfin_read_CAN0_MB04_DATA0()		bfin_read16(CAN0_MB04_DATA0)
+#define bfin_write_CAN0_MB04_DATA0(val)		bfin_write16(CAN0_MB04_DATA0, val)
+#define bfin_read_CAN0_MB04_DATA1()		bfin_read16(CAN0_MB04_DATA1)
+#define bfin_write_CAN0_MB04_DATA1(val)		bfin_write16(CAN0_MB04_DATA1, val)
+#define bfin_read_CAN0_MB04_DATA2()		bfin_read16(CAN0_MB04_DATA2)
+#define bfin_write_CAN0_MB04_DATA2(val)		bfin_write16(CAN0_MB04_DATA2, val)
+#define bfin_read_CAN0_MB04_DATA3()		bfin_read16(CAN0_MB04_DATA3)
+#define bfin_write_CAN0_MB04_DATA3(val)		bfin_write16(CAN0_MB04_DATA3, val)
+#define bfin_read_CAN0_MB04_LENGTH()		bfin_read16(CAN0_MB04_LENGTH)
+#define bfin_write_CAN0_MB04_LENGTH(val)	bfin_write16(CAN0_MB04_LENGTH, val)
+#define bfin_read_CAN0_MB04_TIMESTAMP()		bfin_read16(CAN0_MB04_TIMESTAMP)
+#define bfin_write_CAN0_MB04_TIMESTAMP(val)	bfin_write16(CAN0_MB04_TIMESTAMP, val)
+#define bfin_read_CAN0_MB04_ID0()		bfin_read16(CAN0_MB04_ID0)
+#define bfin_write_CAN0_MB04_ID0(val)		bfin_write16(CAN0_MB04_ID0, val)
+#define bfin_read_CAN0_MB04_ID1()		bfin_read16(CAN0_MB04_ID1)
+#define bfin_write_CAN0_MB04_ID1(val)		bfin_write16(CAN0_MB04_ID1, val)
+#define bfin_read_CAN0_MB05_DATA0()		bfin_read16(CAN0_MB05_DATA0)
+#define bfin_write_CAN0_MB05_DATA0(val)		bfin_write16(CAN0_MB05_DATA0, val)
+#define bfin_read_CAN0_MB05_DATA1()		bfin_read16(CAN0_MB05_DATA1)
+#define bfin_write_CAN0_MB05_DATA1(val)		bfin_write16(CAN0_MB05_DATA1, val)
+#define bfin_read_CAN0_MB05_DATA2()		bfin_read16(CAN0_MB05_DATA2)
+#define bfin_write_CAN0_MB05_DATA2(val)		bfin_write16(CAN0_MB05_DATA2, val)
+#define bfin_read_CAN0_MB05_DATA3()		bfin_read16(CAN0_MB05_DATA3)
+#define bfin_write_CAN0_MB05_DATA3(val)		bfin_write16(CAN0_MB05_DATA3, val)
+#define bfin_read_CAN0_MB05_LENGTH()		bfin_read16(CAN0_MB05_LENGTH)
+#define bfin_write_CAN0_MB05_LENGTH(val)	bfin_write16(CAN0_MB05_LENGTH, val)
+#define bfin_read_CAN0_MB05_TIMESTAMP()		bfin_read16(CAN0_MB05_TIMESTAMP)
+#define bfin_write_CAN0_MB05_TIMESTAMP(val)	bfin_write16(CAN0_MB05_TIMESTAMP, val)
+#define bfin_read_CAN0_MB05_ID0()		bfin_read16(CAN0_MB05_ID0)
+#define bfin_write_CAN0_MB05_ID0(val)		bfin_write16(CAN0_MB05_ID0, val)
+#define bfin_read_CAN0_MB05_ID1()		bfin_read16(CAN0_MB05_ID1)
+#define bfin_write_CAN0_MB05_ID1(val)		bfin_write16(CAN0_MB05_ID1, val)
+#define bfin_read_CAN0_MB06_DATA0()		bfin_read16(CAN0_MB06_DATA0)
+#define bfin_write_CAN0_MB06_DATA0(val)		bfin_write16(CAN0_MB06_DATA0, val)
+#define bfin_read_CAN0_MB06_DATA1()		bfin_read16(CAN0_MB06_DATA1)
+#define bfin_write_CAN0_MB06_DATA1(val)		bfin_write16(CAN0_MB06_DATA1, val)
+#define bfin_read_CAN0_MB06_DATA2()		bfin_read16(CAN0_MB06_DATA2)
+#define bfin_write_CAN0_MB06_DATA2(val)		bfin_write16(CAN0_MB06_DATA2, val)
+#define bfin_read_CAN0_MB06_DATA3()		bfin_read16(CAN0_MB06_DATA3)
+#define bfin_write_CAN0_MB06_DATA3(val)		bfin_write16(CAN0_MB06_DATA3, val)
+#define bfin_read_CAN0_MB06_LENGTH()		bfin_read16(CAN0_MB06_LENGTH)
+#define bfin_write_CAN0_MB06_LENGTH(val)	bfin_write16(CAN0_MB06_LENGTH, val)
+#define bfin_read_CAN0_MB06_TIMESTAMP()		bfin_read16(CAN0_MB06_TIMESTAMP)
+#define bfin_write_CAN0_MB06_TIMESTAMP(val)	bfin_write16(CAN0_MB06_TIMESTAMP, val)
+#define bfin_read_CAN0_MB06_ID0()		bfin_read16(CAN0_MB06_ID0)
+#define bfin_write_CAN0_MB06_ID0(val)		bfin_write16(CAN0_MB06_ID0, val)
+#define bfin_read_CAN0_MB06_ID1()		bfin_read16(CAN0_MB06_ID1)
+#define bfin_write_CAN0_MB06_ID1(val)		bfin_write16(CAN0_MB06_ID1, val)
+#define bfin_read_CAN0_MB07_DATA0()		bfin_read16(CAN0_MB07_DATA0)
+#define bfin_write_CAN0_MB07_DATA0(val)		bfin_write16(CAN0_MB07_DATA0, val)
+#define bfin_read_CAN0_MB07_DATA1()		bfin_read16(CAN0_MB07_DATA1)
+#define bfin_write_CAN0_MB07_DATA1(val)		bfin_write16(CAN0_MB07_DATA1, val)
+#define bfin_read_CAN0_MB07_DATA2()		bfin_read16(CAN0_MB07_DATA2)
+#define bfin_write_CAN0_MB07_DATA2(val)		bfin_write16(CAN0_MB07_DATA2, val)
+#define bfin_read_CAN0_MB07_DATA3()		bfin_read16(CAN0_MB07_DATA3)
+#define bfin_write_CAN0_MB07_DATA3(val)		bfin_write16(CAN0_MB07_DATA3, val)
+#define bfin_read_CAN0_MB07_LENGTH()		bfin_read16(CAN0_MB07_LENGTH)
+#define bfin_write_CAN0_MB07_LENGTH(val)	bfin_write16(CAN0_MB07_LENGTH, val)
+#define bfin_read_CAN0_MB07_TIMESTAMP()		bfin_read16(CAN0_MB07_TIMESTAMP)
+#define bfin_write_CAN0_MB07_TIMESTAMP(val)	bfin_write16(CAN0_MB07_TIMESTAMP, val)
+#define bfin_read_CAN0_MB07_ID0()		bfin_read16(CAN0_MB07_ID0)
+#define bfin_write_CAN0_MB07_ID0(val)		bfin_write16(CAN0_MB07_ID0, val)
+#define bfin_read_CAN0_MB07_ID1()		bfin_read16(CAN0_MB07_ID1)
+#define bfin_write_CAN0_MB07_ID1(val)		bfin_write16(CAN0_MB07_ID1, val)
+#define bfin_read_CAN0_MB08_DATA0()		bfin_read16(CAN0_MB08_DATA0)
+#define bfin_write_CAN0_MB08_DATA0(val)		bfin_write16(CAN0_MB08_DATA0, val)
+#define bfin_read_CAN0_MB08_DATA1()		bfin_read16(CAN0_MB08_DATA1)
+#define bfin_write_CAN0_MB08_DATA1(val)		bfin_write16(CAN0_MB08_DATA1, val)
+#define bfin_read_CAN0_MB08_DATA2()		bfin_read16(CAN0_MB08_DATA2)
+#define bfin_write_CAN0_MB08_DATA2(val)		bfin_write16(CAN0_MB08_DATA2, val)
+#define bfin_read_CAN0_MB08_DATA3()		bfin_read16(CAN0_MB08_DATA3)
+#define bfin_write_CAN0_MB08_DATA3(val)		bfin_write16(CAN0_MB08_DATA3, val)
+#define bfin_read_CAN0_MB08_LENGTH()		bfin_read16(CAN0_MB08_LENGTH)
+#define bfin_write_CAN0_MB08_LENGTH(val)	bfin_write16(CAN0_MB08_LENGTH, val)
+#define bfin_read_CAN0_MB08_TIMESTAMP()		bfin_read16(CAN0_MB08_TIMESTAMP)
+#define bfin_write_CAN0_MB08_TIMESTAMP(val)	bfin_write16(CAN0_MB08_TIMESTAMP, val)
+#define bfin_read_CAN0_MB08_ID0()		bfin_read16(CAN0_MB08_ID0)
+#define bfin_write_CAN0_MB08_ID0(val)		bfin_write16(CAN0_MB08_ID0, val)
+#define bfin_read_CAN0_MB08_ID1()		bfin_read16(CAN0_MB08_ID1)
+#define bfin_write_CAN0_MB08_ID1(val)		bfin_write16(CAN0_MB08_ID1, val)
+#define bfin_read_CAN0_MB09_DATA0()		bfin_read16(CAN0_MB09_DATA0)
+#define bfin_write_CAN0_MB09_DATA0(val)		bfin_write16(CAN0_MB09_DATA0, val)
+#define bfin_read_CAN0_MB09_DATA1()		bfin_read16(CAN0_MB09_DATA1)
+#define bfin_write_CAN0_MB09_DATA1(val)		bfin_write16(CAN0_MB09_DATA1, val)
+#define bfin_read_CAN0_MB09_DATA2()		bfin_read16(CAN0_MB09_DATA2)
+#define bfin_write_CAN0_MB09_DATA2(val)		bfin_write16(CAN0_MB09_DATA2, val)
+#define bfin_read_CAN0_MB09_DATA3()		bfin_read16(CAN0_MB09_DATA3)
+#define bfin_write_CAN0_MB09_DATA3(val)		bfin_write16(CAN0_MB09_DATA3, val)
+#define bfin_read_CAN0_MB09_LENGTH()		bfin_read16(CAN0_MB09_LENGTH)
+#define bfin_write_CAN0_MB09_LENGTH(val)	bfin_write16(CAN0_MB09_LENGTH, val)
+#define bfin_read_CAN0_MB09_TIMESTAMP()		bfin_read16(CAN0_MB09_TIMESTAMP)
+#define bfin_write_CAN0_MB09_TIMESTAMP(val)	bfin_write16(CAN0_MB09_TIMESTAMP, val)
+#define bfin_read_CAN0_MB09_ID0()		bfin_read16(CAN0_MB09_ID0)
+#define bfin_write_CAN0_MB09_ID0(val)		bfin_write16(CAN0_MB09_ID0, val)
+#define bfin_read_CAN0_MB09_ID1()		bfin_read16(CAN0_MB09_ID1)
+#define bfin_write_CAN0_MB09_ID1(val)		bfin_write16(CAN0_MB09_ID1, val)
+#define bfin_read_CAN0_MB10_DATA0()		bfin_read16(CAN0_MB10_DATA0)
+#define bfin_write_CAN0_MB10_DATA0(val)		bfin_write16(CAN0_MB10_DATA0, val)
+#define bfin_read_CAN0_MB10_DATA1()		bfin_read16(CAN0_MB10_DATA1)
+#define bfin_write_CAN0_MB10_DATA1(val)		bfin_write16(CAN0_MB10_DATA1, val)
+#define bfin_read_CAN0_MB10_DATA2()		bfin_read16(CAN0_MB10_DATA2)
+#define bfin_write_CAN0_MB10_DATA2(val)		bfin_write16(CAN0_MB10_DATA2, val)
+#define bfin_read_CAN0_MB10_DATA3()		bfin_read16(CAN0_MB10_DATA3)
+#define bfin_write_CAN0_MB10_DATA3(val)		bfin_write16(CAN0_MB10_DATA3, val)
+#define bfin_read_CAN0_MB10_LENGTH()		bfin_read16(CAN0_MB10_LENGTH)
+#define bfin_write_CAN0_MB10_LENGTH(val)	bfin_write16(CAN0_MB10_LENGTH, val)
+#define bfin_read_CAN0_MB10_TIMESTAMP()		bfin_read16(CAN0_MB10_TIMESTAMP)
+#define bfin_write_CAN0_MB10_TIMESTAMP(val)	bfin_write16(CAN0_MB10_TIMESTAMP, val)
+#define bfin_read_CAN0_MB10_ID0()		bfin_read16(CAN0_MB10_ID0)
+#define bfin_write_CAN0_MB10_ID0(val)		bfin_write16(CAN0_MB10_ID0, val)
+#define bfin_read_CAN0_MB10_ID1()		bfin_read16(CAN0_MB10_ID1)
+#define bfin_write_CAN0_MB10_ID1(val)		bfin_write16(CAN0_MB10_ID1, val)
+#define bfin_read_CAN0_MB11_DATA0()		bfin_read16(CAN0_MB11_DATA0)
+#define bfin_write_CAN0_MB11_DATA0(val)		bfin_write16(CAN0_MB11_DATA0, val)
+#define bfin_read_CAN0_MB11_DATA1()		bfin_read16(CAN0_MB11_DATA1)
+#define bfin_write_CAN0_MB11_DATA1(val)		bfin_write16(CAN0_MB11_DATA1, val)
+#define bfin_read_CAN0_MB11_DATA2()		bfin_read16(CAN0_MB11_DATA2)
+#define bfin_write_CAN0_MB11_DATA2(val)		bfin_write16(CAN0_MB11_DATA2, val)
+#define bfin_read_CAN0_MB11_DATA3()		bfin_read16(CAN0_MB11_DATA3)
+#define bfin_write_CAN0_MB11_DATA3(val)		bfin_write16(CAN0_MB11_DATA3, val)
+#define bfin_read_CAN0_MB11_LENGTH()		bfin_read16(CAN0_MB11_LENGTH)
+#define bfin_write_CAN0_MB11_LENGTH(val)	bfin_write16(CAN0_MB11_LENGTH, val)
+#define bfin_read_CAN0_MB11_TIMESTAMP()		bfin_read16(CAN0_MB11_TIMESTAMP)
+#define bfin_write_CAN0_MB11_TIMESTAMP(val)	bfin_write16(CAN0_MB11_TIMESTAMP, val)
+#define bfin_read_CAN0_MB11_ID0()		bfin_read16(CAN0_MB11_ID0)
+#define bfin_write_CAN0_MB11_ID0(val)		bfin_write16(CAN0_MB11_ID0, val)
+#define bfin_read_CAN0_MB11_ID1()		bfin_read16(CAN0_MB11_ID1)
+#define bfin_write_CAN0_MB11_ID1(val)		bfin_write16(CAN0_MB11_ID1, val)
+#define bfin_read_CAN0_MB12_DATA0()		bfin_read16(CAN0_MB12_DATA0)
+#define bfin_write_CAN0_MB12_DATA0(val)		bfin_write16(CAN0_MB12_DATA0, val)
+#define bfin_read_CAN0_MB12_DATA1()		bfin_read16(CAN0_MB12_DATA1)
+#define bfin_write_CAN0_MB12_DATA1(val)		bfin_write16(CAN0_MB12_DATA1, val)
+#define bfin_read_CAN0_MB12_DATA2()		bfin_read16(CAN0_MB12_DATA2)
+#define bfin_write_CAN0_MB12_DATA2(val)		bfin_write16(CAN0_MB12_DATA2, val)
+#define bfin_read_CAN0_MB12_DATA3()		bfin_read16(CAN0_MB12_DATA3)
+#define bfin_write_CAN0_MB12_DATA3(val)		bfin_write16(CAN0_MB12_DATA3, val)
+#define bfin_read_CAN0_MB12_LENGTH()		bfin_read16(CAN0_MB12_LENGTH)
+#define bfin_write_CAN0_MB12_LENGTH(val)	bfin_write16(CAN0_MB12_LENGTH, val)
+#define bfin_read_CAN0_MB12_TIMESTAMP()		bfin_read16(CAN0_MB12_TIMESTAMP)
+#define bfin_write_CAN0_MB12_TIMESTAMP(val)	bfin_write16(CAN0_MB12_TIMESTAMP, val)
+#define bfin_read_CAN0_MB12_ID0()		bfin_read16(CAN0_MB12_ID0)
+#define bfin_write_CAN0_MB12_ID0(val)		bfin_write16(CAN0_MB12_ID0, val)
+#define bfin_read_CAN0_MB12_ID1()		bfin_read16(CAN0_MB12_ID1)
+#define bfin_write_CAN0_MB12_ID1(val)		bfin_write16(CAN0_MB12_ID1, val)
+#define bfin_read_CAN0_MB13_DATA0()		bfin_read16(CAN0_MB13_DATA0)
+#define bfin_write_CAN0_MB13_DATA0(val)		bfin_write16(CAN0_MB13_DATA0, val)
+#define bfin_read_CAN0_MB13_DATA1()		bfin_read16(CAN0_MB13_DATA1)
+#define bfin_write_CAN0_MB13_DATA1(val)		bfin_write16(CAN0_MB13_DATA1, val)
+#define bfin_read_CAN0_MB13_DATA2()		bfin_read16(CAN0_MB13_DATA2)
+#define bfin_write_CAN0_MB13_DATA2(val)		bfin_write16(CAN0_MB13_DATA2, val)
+#define bfin_read_CAN0_MB13_DATA3()		bfin_read16(CAN0_MB13_DATA3)
+#define bfin_write_CAN0_MB13_DATA3(val)		bfin_write16(CAN0_MB13_DATA3, val)
+#define bfin_read_CAN0_MB13_LENGTH()		bfin_read16(CAN0_MB13_LENGTH)
+#define bfin_write_CAN0_MB13_LENGTH(val)	bfin_write16(CAN0_MB13_LENGTH, val)
+#define bfin_read_CAN0_MB13_TIMESTAMP()		bfin_read16(CAN0_MB13_TIMESTAMP)
+#define bfin_write_CAN0_MB13_TIMESTAMP(val)	bfin_write16(CAN0_MB13_TIMESTAMP, val)
+#define bfin_read_CAN0_MB13_ID0()		bfin_read16(CAN0_MB13_ID0)
+#define bfin_write_CAN0_MB13_ID0(val)		bfin_write16(CAN0_MB13_ID0, val)
+#define bfin_read_CAN0_MB13_ID1()		bfin_read16(CAN0_MB13_ID1)
+#define bfin_write_CAN0_MB13_ID1(val)		bfin_write16(CAN0_MB13_ID1, val)
+#define bfin_read_CAN0_MB14_DATA0()		bfin_read16(CAN0_MB14_DATA0)
+#define bfin_write_CAN0_MB14_DATA0(val)		bfin_write16(CAN0_MB14_DATA0, val)
+#define bfin_read_CAN0_MB14_DATA1()		bfin_read16(CAN0_MB14_DATA1)
+#define bfin_write_CAN0_MB14_DATA1(val)		bfin_write16(CAN0_MB14_DATA1, val)
+#define bfin_read_CAN0_MB14_DATA2()		bfin_read16(CAN0_MB14_DATA2)
+#define bfin_write_CAN0_MB14_DATA2(val)		bfin_write16(CAN0_MB14_DATA2, val)
+#define bfin_read_CAN0_MB14_DATA3()		bfin_read16(CAN0_MB14_DATA3)
+#define bfin_write_CAN0_MB14_DATA3(val)		bfin_write16(CAN0_MB14_DATA3, val)
+#define bfin_read_CAN0_MB14_LENGTH()		bfin_read16(CAN0_MB14_LENGTH)
+#define bfin_write_CAN0_MB14_LENGTH(val)	bfin_write16(CAN0_MB14_LENGTH, val)
+#define bfin_read_CAN0_MB14_TIMESTAMP()		bfin_read16(CAN0_MB14_TIMESTAMP)
+#define bfin_write_CAN0_MB14_TIMESTAMP(val)	bfin_write16(CAN0_MB14_TIMESTAMP, val)
+#define bfin_read_CAN0_MB14_ID0()		bfin_read16(CAN0_MB14_ID0)
+#define bfin_write_CAN0_MB14_ID0(val)		bfin_write16(CAN0_MB14_ID0, val)
+#define bfin_read_CAN0_MB14_ID1()		bfin_read16(CAN0_MB14_ID1)
+#define bfin_write_CAN0_MB14_ID1(val)		bfin_write16(CAN0_MB14_ID1, val)
+#define bfin_read_CAN0_MB15_DATA0()		bfin_read16(CAN0_MB15_DATA0)
+#define bfin_write_CAN0_MB15_DATA0(val)		bfin_write16(CAN0_MB15_DATA0, val)
+#define bfin_read_CAN0_MB15_DATA1()		bfin_read16(CAN0_MB15_DATA1)
+#define bfin_write_CAN0_MB15_DATA1(val)		bfin_write16(CAN0_MB15_DATA1, val)
+#define bfin_read_CAN0_MB15_DATA2()		bfin_read16(CAN0_MB15_DATA2)
+#define bfin_write_CAN0_MB15_DATA2(val)		bfin_write16(CAN0_MB15_DATA2, val)
+#define bfin_read_CAN0_MB15_DATA3()		bfin_read16(CAN0_MB15_DATA3)
+#define bfin_write_CAN0_MB15_DATA3(val)		bfin_write16(CAN0_MB15_DATA3, val)
+#define bfin_read_CAN0_MB15_LENGTH()		bfin_read16(CAN0_MB15_LENGTH)
+#define bfin_write_CAN0_MB15_LENGTH(val)	bfin_write16(CAN0_MB15_LENGTH, val)
+#define bfin_read_CAN0_MB15_TIMESTAMP()		bfin_read16(CAN0_MB15_TIMESTAMP)
+#define bfin_write_CAN0_MB15_TIMESTAMP(val)	bfin_write16(CAN0_MB15_TIMESTAMP, val)
+#define bfin_read_CAN0_MB15_ID0()		bfin_read16(CAN0_MB15_ID0)
+#define bfin_write_CAN0_MB15_ID0(val)		bfin_write16(CAN0_MB15_ID0, val)
+#define bfin_read_CAN0_MB15_ID1()		bfin_read16(CAN0_MB15_ID1)
+#define bfin_write_CAN0_MB15_ID1(val)		bfin_write16(CAN0_MB15_ID1, val)
+
+/* CAN Controller 0 Mailbox Data Registers */
+
+#define bfin_read_CAN0_MB16_DATA0()		bfin_read16(CAN0_MB16_DATA0)
+#define bfin_write_CAN0_MB16_DATA0(val)		bfin_write16(CAN0_MB16_DATA0, val)
+#define bfin_read_CAN0_MB16_DATA1()		bfin_read16(CAN0_MB16_DATA1)
+#define bfin_write_CAN0_MB16_DATA1(val)		bfin_write16(CAN0_MB16_DATA1, val)
+#define bfin_read_CAN0_MB16_DATA2()		bfin_read16(CAN0_MB16_DATA2)
+#define bfin_write_CAN0_MB16_DATA2(val)		bfin_write16(CAN0_MB16_DATA2, val)
+#define bfin_read_CAN0_MB16_DATA3()		bfin_read16(CAN0_MB16_DATA3)
+#define bfin_write_CAN0_MB16_DATA3(val)		bfin_write16(CAN0_MB16_DATA3, val)
+#define bfin_read_CAN0_MB16_LENGTH()		bfin_read16(CAN0_MB16_LENGTH)
+#define bfin_write_CAN0_MB16_LENGTH(val)	bfin_write16(CAN0_MB16_LENGTH, val)
+#define bfin_read_CAN0_MB16_TIMESTAMP()		bfin_read16(CAN0_MB16_TIMESTAMP)
+#define bfin_write_CAN0_MB16_TIMESTAMP(val)	bfin_write16(CAN0_MB16_TIMESTAMP, val)
+#define bfin_read_CAN0_MB16_ID0()		bfin_read16(CAN0_MB16_ID0)
+#define bfin_write_CAN0_MB16_ID0(val)		bfin_write16(CAN0_MB16_ID0, val)
+#define bfin_read_CAN0_MB16_ID1()		bfin_read16(CAN0_MB16_ID1)
+#define bfin_write_CAN0_MB16_ID1(val)		bfin_write16(CAN0_MB16_ID1, val)
+#define bfin_read_CAN0_MB17_DATA0()		bfin_read16(CAN0_MB17_DATA0)
+#define bfin_write_CAN0_MB17_DATA0(val)		bfin_write16(CAN0_MB17_DATA0, val)
+#define bfin_read_CAN0_MB17_DATA1()		bfin_read16(CAN0_MB17_DATA1)
+#define bfin_write_CAN0_MB17_DATA1(val)		bfin_write16(CAN0_MB17_DATA1, val)
+#define bfin_read_CAN0_MB17_DATA2()		bfin_read16(CAN0_MB17_DATA2)
+#define bfin_write_CAN0_MB17_DATA2(val)		bfin_write16(CAN0_MB17_DATA2, val)
+#define bfin_read_CAN0_MB17_DATA3()		bfin_read16(CAN0_MB17_DATA3)
+#define bfin_write_CAN0_MB17_DATA3(val)		bfin_write16(CAN0_MB17_DATA3, val)
+#define bfin_read_CAN0_MB17_LENGTH()		bfin_read16(CAN0_MB17_LENGTH)
+#define bfin_write_CAN0_MB17_LENGTH(val)	bfin_write16(CAN0_MB17_LENGTH, val)
+#define bfin_read_CAN0_MB17_TIMESTAMP()		bfin_read16(CAN0_MB17_TIMESTAMP)
+#define bfin_write_CAN0_MB17_TIMESTAMP(val)	bfin_write16(CAN0_MB17_TIMESTAMP, val)
+#define bfin_read_CAN0_MB17_ID0()		bfin_read16(CAN0_MB17_ID0)
+#define bfin_write_CAN0_MB17_ID0(val)		bfin_write16(CAN0_MB17_ID0, val)
+#define bfin_read_CAN0_MB17_ID1()		bfin_read16(CAN0_MB17_ID1)
+#define bfin_write_CAN0_MB17_ID1(val)		bfin_write16(CAN0_MB17_ID1, val)
+#define bfin_read_CAN0_MB18_DATA0()		bfin_read16(CAN0_MB18_DATA0)
+#define bfin_write_CAN0_MB18_DATA0(val)		bfin_write16(CAN0_MB18_DATA0, val)
+#define bfin_read_CAN0_MB18_DATA1()		bfin_read16(CAN0_MB18_DATA1)
+#define bfin_write_CAN0_MB18_DATA1(val)		bfin_write16(CAN0_MB18_DATA1, val)
+#define bfin_read_CAN0_MB18_DATA2()		bfin_read16(CAN0_MB18_DATA2)
+#define bfin_write_CAN0_MB18_DATA2(val)		bfin_write16(CAN0_MB18_DATA2, val)
+#define bfin_read_CAN0_MB18_DATA3()		bfin_read16(CAN0_MB18_DATA3)
+#define bfin_write_CAN0_MB18_DATA3(val)		bfin_write16(CAN0_MB18_DATA3, val)
+#define bfin_read_CAN0_MB18_LENGTH()		bfin_read16(CAN0_MB18_LENGTH)
+#define bfin_write_CAN0_MB18_LENGTH(val)	bfin_write16(CAN0_MB18_LENGTH, val)
+#define bfin_read_CAN0_MB18_TIMESTAMP()		bfin_read16(CAN0_MB18_TIMESTAMP)
+#define bfin_write_CAN0_MB18_TIMESTAMP(val)	bfin_write16(CAN0_MB18_TIMESTAMP, val)
+#define bfin_read_CAN0_MB18_ID0()		bfin_read16(CAN0_MB18_ID0)
+#define bfin_write_CAN0_MB18_ID0(val)		bfin_write16(CAN0_MB18_ID0, val)
+#define bfin_read_CAN0_MB18_ID1()		bfin_read16(CAN0_MB18_ID1)
+#define bfin_write_CAN0_MB18_ID1(val)		bfin_write16(CAN0_MB18_ID1, val)
+#define bfin_read_CAN0_MB19_DATA0()		bfin_read16(CAN0_MB19_DATA0)
+#define bfin_write_CAN0_MB19_DATA0(val)		bfin_write16(CAN0_MB19_DATA0, val)
+#define bfin_read_CAN0_MB19_DATA1()		bfin_read16(CAN0_MB19_DATA1)
+#define bfin_write_CAN0_MB19_DATA1(val)		bfin_write16(CAN0_MB19_DATA1, val)
+#define bfin_read_CAN0_MB19_DATA2()		bfin_read16(CAN0_MB19_DATA2)
+#define bfin_write_CAN0_MB19_DATA2(val)		bfin_write16(CAN0_MB19_DATA2, val)
+#define bfin_read_CAN0_MB19_DATA3()		bfin_read16(CAN0_MB19_DATA3)
+#define bfin_write_CAN0_MB19_DATA3(val)		bfin_write16(CAN0_MB19_DATA3, val)
+#define bfin_read_CAN0_MB19_LENGTH()		bfin_read16(CAN0_MB19_LENGTH)
+#define bfin_write_CAN0_MB19_LENGTH(val)	bfin_write16(CAN0_MB19_LENGTH, val)
+#define bfin_read_CAN0_MB19_TIMESTAMP()		bfin_read16(CAN0_MB19_TIMESTAMP)
+#define bfin_write_CAN0_MB19_TIMESTAMP(val)	bfin_write16(CAN0_MB19_TIMESTAMP, val)
+#define bfin_read_CAN0_MB19_ID0()		bfin_read16(CAN0_MB19_ID0)
+#define bfin_write_CAN0_MB19_ID0(val)		bfin_write16(CAN0_MB19_ID0, val)
+#define bfin_read_CAN0_MB19_ID1()		bfin_read16(CAN0_MB19_ID1)
+#define bfin_write_CAN0_MB19_ID1(val)		bfin_write16(CAN0_MB19_ID1, val)
+#define bfin_read_CAN0_MB20_DATA0()		bfin_read16(CAN0_MB20_DATA0)
+#define bfin_write_CAN0_MB20_DATA0(val)		bfin_write16(CAN0_MB20_DATA0, val)
+#define bfin_read_CAN0_MB20_DATA1()		bfin_read16(CAN0_MB20_DATA1)
+#define bfin_write_CAN0_MB20_DATA1(val)		bfin_write16(CAN0_MB20_DATA1, val)
+#define bfin_read_CAN0_MB20_DATA2()		bfin_read16(CAN0_MB20_DATA2)
+#define bfin_write_CAN0_MB20_DATA2(val)		bfin_write16(CAN0_MB20_DATA2, val)
+#define bfin_read_CAN0_MB20_DATA3()		bfin_read16(CAN0_MB20_DATA3)
+#define bfin_write_CAN0_MB20_DATA3(val)		bfin_write16(CAN0_MB20_DATA3, val)
+#define bfin_read_CAN0_MB20_LENGTH()		bfin_read16(CAN0_MB20_LENGTH)
+#define bfin_write_CAN0_MB20_LENGTH(val)	bfin_write16(CAN0_MB20_LENGTH, val)
+#define bfin_read_CAN0_MB20_TIMESTAMP()		bfin_read16(CAN0_MB20_TIMESTAMP)
+#define bfin_write_CAN0_MB20_TIMESTAMP(val)	bfin_write16(CAN0_MB20_TIMESTAMP, val)
+#define bfin_read_CAN0_MB20_ID0()		bfin_read16(CAN0_MB20_ID0)
+#define bfin_write_CAN0_MB20_ID0(val)		bfin_write16(CAN0_MB20_ID0, val)
+#define bfin_read_CAN0_MB20_ID1()		bfin_read16(CAN0_MB20_ID1)
+#define bfin_write_CAN0_MB20_ID1(val)		bfin_write16(CAN0_MB20_ID1, val)
+#define bfin_read_CAN0_MB21_DATA0()		bfin_read16(CAN0_MB21_DATA0)
+#define bfin_write_CAN0_MB21_DATA0(val)		bfin_write16(CAN0_MB21_DATA0, val)
+#define bfin_read_CAN0_MB21_DATA1()		bfin_read16(CAN0_MB21_DATA1)
+#define bfin_write_CAN0_MB21_DATA1(val)		bfin_write16(CAN0_MB21_DATA1, val)
+#define bfin_read_CAN0_MB21_DATA2()		bfin_read16(CAN0_MB21_DATA2)
+#define bfin_write_CAN0_MB21_DATA2(val)		bfin_write16(CAN0_MB21_DATA2, val)
+#define bfin_read_CAN0_MB21_DATA3()		bfin_read16(CAN0_MB21_DATA3)
+#define bfin_write_CAN0_MB21_DATA3(val)		bfin_write16(CAN0_MB21_DATA3, val)
+#define bfin_read_CAN0_MB21_LENGTH()		bfin_read16(CAN0_MB21_LENGTH)
+#define bfin_write_CAN0_MB21_LENGTH(val)	bfin_write16(CAN0_MB21_LENGTH, val)
+#define bfin_read_CAN0_MB21_TIMESTAMP()		bfin_read16(CAN0_MB21_TIMESTAMP)
+#define bfin_write_CAN0_MB21_TIMESTAMP(val)	bfin_write16(CAN0_MB21_TIMESTAMP, val)
+#define bfin_read_CAN0_MB21_ID0()		bfin_read16(CAN0_MB21_ID0)
+#define bfin_write_CAN0_MB21_ID0(val)		bfin_write16(CAN0_MB21_ID0, val)
+#define bfin_read_CAN0_MB21_ID1()		bfin_read16(CAN0_MB21_ID1)
+#define bfin_write_CAN0_MB21_ID1(val)		bfin_write16(CAN0_MB21_ID1, val)
+#define bfin_read_CAN0_MB22_DATA0()		bfin_read16(CAN0_MB22_DATA0)
+#define bfin_write_CAN0_MB22_DATA0(val)		bfin_write16(CAN0_MB22_DATA0, val)
+#define bfin_read_CAN0_MB22_DATA1()		bfin_read16(CAN0_MB22_DATA1)
+#define bfin_write_CAN0_MB22_DATA1(val)		bfin_write16(CAN0_MB22_DATA1, val)
+#define bfin_read_CAN0_MB22_DATA2()		bfin_read16(CAN0_MB22_DATA2)
+#define bfin_write_CAN0_MB22_DATA2(val)		bfin_write16(CAN0_MB22_DATA2, val)
+#define bfin_read_CAN0_MB22_DATA3()		bfin_read16(CAN0_MB22_DATA3)
+#define bfin_write_CAN0_MB22_DATA3(val)		bfin_write16(CAN0_MB22_DATA3, val)
+#define bfin_read_CAN0_MB22_LENGTH()		bfin_read16(CAN0_MB22_LENGTH)
+#define bfin_write_CAN0_MB22_LENGTH(val)	bfin_write16(CAN0_MB22_LENGTH, val)
+#define bfin_read_CAN0_MB22_TIMESTAMP()		bfin_read16(CAN0_MB22_TIMESTAMP)
+#define bfin_write_CAN0_MB22_TIMESTAMP(val)	bfin_write16(CAN0_MB22_TIMESTAMP, val)
+#define bfin_read_CAN0_MB22_ID0()		bfin_read16(CAN0_MB22_ID0)
+#define bfin_write_CAN0_MB22_ID0(val)		bfin_write16(CAN0_MB22_ID0, val)
+#define bfin_read_CAN0_MB22_ID1()		bfin_read16(CAN0_MB22_ID1)
+#define bfin_write_CAN0_MB22_ID1(val)		bfin_write16(CAN0_MB22_ID1, val)
+#define bfin_read_CAN0_MB23_DATA0()		bfin_read16(CAN0_MB23_DATA0)
+#define bfin_write_CAN0_MB23_DATA0(val)		bfin_write16(CAN0_MB23_DATA0, val)
+#define bfin_read_CAN0_MB23_DATA1()		bfin_read16(CAN0_MB23_DATA1)
+#define bfin_write_CAN0_MB23_DATA1(val)		bfin_write16(CAN0_MB23_DATA1, val)
+#define bfin_read_CAN0_MB23_DATA2()		bfin_read16(CAN0_MB23_DATA2)
+#define bfin_write_CAN0_MB23_DATA2(val)		bfin_write16(CAN0_MB23_DATA2, val)
+#define bfin_read_CAN0_MB23_DATA3()		bfin_read16(CAN0_MB23_DATA3)
+#define bfin_write_CAN0_MB23_DATA3(val)		bfin_write16(CAN0_MB23_DATA3, val)
+#define bfin_read_CAN0_MB23_LENGTH()		bfin_read16(CAN0_MB23_LENGTH)
+#define bfin_write_CAN0_MB23_LENGTH(val)	bfin_write16(CAN0_MB23_LENGTH, val)
+#define bfin_read_CAN0_MB23_TIMESTAMP()		bfin_read16(CAN0_MB23_TIMESTAMP)
+#define bfin_write_CAN0_MB23_TIMESTAMP(val)	bfin_write16(CAN0_MB23_TIMESTAMP, val)
+#define bfin_read_CAN0_MB23_ID0()		bfin_read16(CAN0_MB23_ID0)
+#define bfin_write_CAN0_MB23_ID0(val)		bfin_write16(CAN0_MB23_ID0, val)
+#define bfin_read_CAN0_MB23_ID1()		bfin_read16(CAN0_MB23_ID1)
+#define bfin_write_CAN0_MB23_ID1(val)		bfin_write16(CAN0_MB23_ID1, val)
+#define bfin_read_CAN0_MB24_DATA0()		bfin_read16(CAN0_MB24_DATA0)
+#define bfin_write_CAN0_MB24_DATA0(val)		bfin_write16(CAN0_MB24_DATA0, val)
+#define bfin_read_CAN0_MB24_DATA1()		bfin_read16(CAN0_MB24_DATA1)
+#define bfin_write_CAN0_MB24_DATA1(val)		bfin_write16(CAN0_MB24_DATA1, val)
+#define bfin_read_CAN0_MB24_DATA2()		bfin_read16(CAN0_MB24_DATA2)
+#define bfin_write_CAN0_MB24_DATA2(val)		bfin_write16(CAN0_MB24_DATA2, val)
+#define bfin_read_CAN0_MB24_DATA3()		bfin_read16(CAN0_MB24_DATA3)
+#define bfin_write_CAN0_MB24_DATA3(val)		bfin_write16(CAN0_MB24_DATA3, val)
+#define bfin_read_CAN0_MB24_LENGTH()		bfin_read16(CAN0_MB24_LENGTH)
+#define bfin_write_CAN0_MB24_LENGTH(val)	bfin_write16(CAN0_MB24_LENGTH, val)
+#define bfin_read_CAN0_MB24_TIMESTAMP()		bfin_read16(CAN0_MB24_TIMESTAMP)
+#define bfin_write_CAN0_MB24_TIMESTAMP(val)	bfin_write16(CAN0_MB24_TIMESTAMP, val)
+#define bfin_read_CAN0_MB24_ID0()		bfin_read16(CAN0_MB24_ID0)
+#define bfin_write_CAN0_MB24_ID0(val)		bfin_write16(CAN0_MB24_ID0, val)
+#define bfin_read_CAN0_MB24_ID1()		bfin_read16(CAN0_MB24_ID1)
+#define bfin_write_CAN0_MB24_ID1(val)		bfin_write16(CAN0_MB24_ID1, val)
+#define bfin_read_CAN0_MB25_DATA0()		bfin_read16(CAN0_MB25_DATA0)
+#define bfin_write_CAN0_MB25_DATA0(val)		bfin_write16(CAN0_MB25_DATA0, val)
+#define bfin_read_CAN0_MB25_DATA1()		bfin_read16(CAN0_MB25_DATA1)
+#define bfin_write_CAN0_MB25_DATA1(val)		bfin_write16(CAN0_MB25_DATA1, val)
+#define bfin_read_CAN0_MB25_DATA2()		bfin_read16(CAN0_MB25_DATA2)
+#define bfin_write_CAN0_MB25_DATA2(val)		bfin_write16(CAN0_MB25_DATA2, val)
+#define bfin_read_CAN0_MB25_DATA3()		bfin_read16(CAN0_MB25_DATA3)
+#define bfin_write_CAN0_MB25_DATA3(val)		bfin_write16(CAN0_MB25_DATA3, val)
+#define bfin_read_CAN0_MB25_LENGTH()		bfin_read16(CAN0_MB25_LENGTH)
+#define bfin_write_CAN0_MB25_LENGTH(val)	bfin_write16(CAN0_MB25_LENGTH, val)
+#define bfin_read_CAN0_MB25_TIMESTAMP()		bfin_read16(CAN0_MB25_TIMESTAMP)
+#define bfin_write_CAN0_MB25_TIMESTAMP(val)	bfin_write16(CAN0_MB25_TIMESTAMP, val)
+#define bfin_read_CAN0_MB25_ID0()		bfin_read16(CAN0_MB25_ID0)
+#define bfin_write_CAN0_MB25_ID0(val)		bfin_write16(CAN0_MB25_ID0, val)
+#define bfin_read_CAN0_MB25_ID1()		bfin_read16(CAN0_MB25_ID1)
+#define bfin_write_CAN0_MB25_ID1(val)		bfin_write16(CAN0_MB25_ID1, val)
+#define bfin_read_CAN0_MB26_DATA0()		bfin_read16(CAN0_MB26_DATA0)
+#define bfin_write_CAN0_MB26_DATA0(val)		bfin_write16(CAN0_MB26_DATA0, val)
+#define bfin_read_CAN0_MB26_DATA1()		bfin_read16(CAN0_MB26_DATA1)
+#define bfin_write_CAN0_MB26_DATA1(val)		bfin_write16(CAN0_MB26_DATA1, val)
+#define bfin_read_CAN0_MB26_DATA2()		bfin_read16(CAN0_MB26_DATA2)
+#define bfin_write_CAN0_MB26_DATA2(val)		bfin_write16(CAN0_MB26_DATA2, val)
+#define bfin_read_CAN0_MB26_DATA3()		bfin_read16(CAN0_MB26_DATA3)
+#define bfin_write_CAN0_MB26_DATA3(val)		bfin_write16(CAN0_MB26_DATA3, val)
+#define bfin_read_CAN0_MB26_LENGTH()		bfin_read16(CAN0_MB26_LENGTH)
+#define bfin_write_CAN0_MB26_LENGTH(val)	bfin_write16(CAN0_MB26_LENGTH, val)
+#define bfin_read_CAN0_MB26_TIMESTAMP()		bfin_read16(CAN0_MB26_TIMESTAMP)
+#define bfin_write_CAN0_MB26_TIMESTAMP(val)	bfin_write16(CAN0_MB26_TIMESTAMP, val)
+#define bfin_read_CAN0_MB26_ID0()		bfin_read16(CAN0_MB26_ID0)
+#define bfin_write_CAN0_MB26_ID0(val)		bfin_write16(CAN0_MB26_ID0, val)
+#define bfin_read_CAN0_MB26_ID1()		bfin_read16(CAN0_MB26_ID1)
+#define bfin_write_CAN0_MB26_ID1(val)		bfin_write16(CAN0_MB26_ID1, val)
+#define bfin_read_CAN0_MB27_DATA0()		bfin_read16(CAN0_MB27_DATA0)
+#define bfin_write_CAN0_MB27_DATA0(val)		bfin_write16(CAN0_MB27_DATA0, val)
+#define bfin_read_CAN0_MB27_DATA1()		bfin_read16(CAN0_MB27_DATA1)
+#define bfin_write_CAN0_MB27_DATA1(val)		bfin_write16(CAN0_MB27_DATA1, val)
+#define bfin_read_CAN0_MB27_DATA2()		bfin_read16(CAN0_MB27_DATA2)
+#define bfin_write_CAN0_MB27_DATA2(val)		bfin_write16(CAN0_MB27_DATA2, val)
+#define bfin_read_CAN0_MB27_DATA3()		bfin_read16(CAN0_MB27_DATA3)
+#define bfin_write_CAN0_MB27_DATA3(val)		bfin_write16(CAN0_MB27_DATA3, val)
+#define bfin_read_CAN0_MB27_LENGTH()		bfin_read16(CAN0_MB27_LENGTH)
+#define bfin_write_CAN0_MB27_LENGTH(val)	bfin_write16(CAN0_MB27_LENGTH, val)
+#define bfin_read_CAN0_MB27_TIMESTAMP()		bfin_read16(CAN0_MB27_TIMESTAMP)
+#define bfin_write_CAN0_MB27_TIMESTAMP(val)	bfin_write16(CAN0_MB27_TIMESTAMP, val)
+#define bfin_read_CAN0_MB27_ID0()		bfin_read16(CAN0_MB27_ID0)
+#define bfin_write_CAN0_MB27_ID0(val)		bfin_write16(CAN0_MB27_ID0, val)
+#define bfin_read_CAN0_MB27_ID1()		bfin_read16(CAN0_MB27_ID1)
+#define bfin_write_CAN0_MB27_ID1(val)		bfin_write16(CAN0_MB27_ID1, val)
+#define bfin_read_CAN0_MB28_DATA0()		bfin_read16(CAN0_MB28_DATA0)
+#define bfin_write_CAN0_MB28_DATA0(val)		bfin_write16(CAN0_MB28_DATA0, val)
+#define bfin_read_CAN0_MB28_DATA1()		bfin_read16(CAN0_MB28_DATA1)
+#define bfin_write_CAN0_MB28_DATA1(val)		bfin_write16(CAN0_MB28_DATA1, val)
+#define bfin_read_CAN0_MB28_DATA2()		bfin_read16(CAN0_MB28_DATA2)
+#define bfin_write_CAN0_MB28_DATA2(val)		bfin_write16(CAN0_MB28_DATA2, val)
+#define bfin_read_CAN0_MB28_DATA3()		bfin_read16(CAN0_MB28_DATA3)
+#define bfin_write_CAN0_MB28_DATA3(val)		bfin_write16(CAN0_MB28_DATA3, val)
+#define bfin_read_CAN0_MB28_LENGTH()		bfin_read16(CAN0_MB28_LENGTH)
+#define bfin_write_CAN0_MB28_LENGTH(val)	bfin_write16(CAN0_MB28_LENGTH, val)
+#define bfin_read_CAN0_MB28_TIMESTAMP()		bfin_read16(CAN0_MB28_TIMESTAMP)
+#define bfin_write_CAN0_MB28_TIMESTAMP(val)	bfin_write16(CAN0_MB28_TIMESTAMP, val)
+#define bfin_read_CAN0_MB28_ID0()		bfin_read16(CAN0_MB28_ID0)
+#define bfin_write_CAN0_MB28_ID0(val)		bfin_write16(CAN0_MB28_ID0, val)
+#define bfin_read_CAN0_MB28_ID1()		bfin_read16(CAN0_MB28_ID1)
+#define bfin_write_CAN0_MB28_ID1(val)		bfin_write16(CAN0_MB28_ID1, val)
+#define bfin_read_CAN0_MB29_DATA0()		bfin_read16(CAN0_MB29_DATA0)
+#define bfin_write_CAN0_MB29_DATA0(val)		bfin_write16(CAN0_MB29_DATA0, val)
+#define bfin_read_CAN0_MB29_DATA1()		bfin_read16(CAN0_MB29_DATA1)
+#define bfin_write_CAN0_MB29_DATA1(val)		bfin_write16(CAN0_MB29_DATA1, val)
+#define bfin_read_CAN0_MB29_DATA2()		bfin_read16(CAN0_MB29_DATA2)
+#define bfin_write_CAN0_MB29_DATA2(val)		bfin_write16(CAN0_MB29_DATA2, val)
+#define bfin_read_CAN0_MB29_DATA3()		bfin_read16(CAN0_MB29_DATA3)
+#define bfin_write_CAN0_MB29_DATA3(val)		bfin_write16(CAN0_MB29_DATA3, val)
+#define bfin_read_CAN0_MB29_LENGTH()		bfin_read16(CAN0_MB29_LENGTH)
+#define bfin_write_CAN0_MB29_LENGTH(val)	bfin_write16(CAN0_MB29_LENGTH, val)
+#define bfin_read_CAN0_MB29_TIMESTAMP()		bfin_read16(CAN0_MB29_TIMESTAMP)
+#define bfin_write_CAN0_MB29_TIMESTAMP(val)	bfin_write16(CAN0_MB29_TIMESTAMP, val)
+#define bfin_read_CAN0_MB29_ID0()		bfin_read16(CAN0_MB29_ID0)
+#define bfin_write_CAN0_MB29_ID0(val)		bfin_write16(CAN0_MB29_ID0, val)
+#define bfin_read_CAN0_MB29_ID1()		bfin_read16(CAN0_MB29_ID1)
+#define bfin_write_CAN0_MB29_ID1(val)		bfin_write16(CAN0_MB29_ID1, val)
+#define bfin_read_CAN0_MB30_DATA0()		bfin_read16(CAN0_MB30_DATA0)
+#define bfin_write_CAN0_MB30_DATA0(val)		bfin_write16(CAN0_MB30_DATA0, val)
+#define bfin_read_CAN0_MB30_DATA1()		bfin_read16(CAN0_MB30_DATA1)
+#define bfin_write_CAN0_MB30_DATA1(val)		bfin_write16(CAN0_MB30_DATA1, val)
+#define bfin_read_CAN0_MB30_DATA2()		bfin_read16(CAN0_MB30_DATA2)
+#define bfin_write_CAN0_MB30_DATA2(val)		bfin_write16(CAN0_MB30_DATA2, val)
+#define bfin_read_CAN0_MB30_DATA3()		bfin_read16(CAN0_MB30_DATA3)
+#define bfin_write_CAN0_MB30_DATA3(val)		bfin_write16(CAN0_MB30_DATA3, val)
+#define bfin_read_CAN0_MB30_LENGTH()		bfin_read16(CAN0_MB30_LENGTH)
+#define bfin_write_CAN0_MB30_LENGTH(val)	bfin_write16(CAN0_MB30_LENGTH, val)
+#define bfin_read_CAN0_MB30_TIMESTAMP()		bfin_read16(CAN0_MB30_TIMESTAMP)
+#define bfin_write_CAN0_MB30_TIMESTAMP(val)	bfin_write16(CAN0_MB30_TIMESTAMP, val)
+#define bfin_read_CAN0_MB30_ID0()		bfin_read16(CAN0_MB30_ID0)
+#define bfin_write_CAN0_MB30_ID0(val)		bfin_write16(CAN0_MB30_ID0, val)
+#define bfin_read_CAN0_MB30_ID1()		bfin_read16(CAN0_MB30_ID1)
+#define bfin_write_CAN0_MB30_ID1(val)		bfin_write16(CAN0_MB30_ID1, val)
+#define bfin_read_CAN0_MB31_DATA0()		bfin_read16(CAN0_MB31_DATA0)
+#define bfin_write_CAN0_MB31_DATA0(val)		bfin_write16(CAN0_MB31_DATA0, val)
+#define bfin_read_CAN0_MB31_DATA1()		bfin_read16(CAN0_MB31_DATA1)
+#define bfin_write_CAN0_MB31_DATA1(val)		bfin_write16(CAN0_MB31_DATA1, val)
+#define bfin_read_CAN0_MB31_DATA2()		bfin_read16(CAN0_MB31_DATA2)
+#define bfin_write_CAN0_MB31_DATA2(val)		bfin_write16(CAN0_MB31_DATA2, val)
+#define bfin_read_CAN0_MB31_DATA3()		bfin_read16(CAN0_MB31_DATA3)
+#define bfin_write_CAN0_MB31_DATA3(val)		bfin_write16(CAN0_MB31_DATA3, val)
+#define bfin_read_CAN0_MB31_LENGTH()		bfin_read16(CAN0_MB31_LENGTH)
+#define bfin_write_CAN0_MB31_LENGTH(val)	bfin_write16(CAN0_MB31_LENGTH, val)
+#define bfin_read_CAN0_MB31_TIMESTAMP()		bfin_read16(CAN0_MB31_TIMESTAMP)
+#define bfin_write_CAN0_MB31_TIMESTAMP(val)	bfin_write16(CAN0_MB31_TIMESTAMP, val)
+#define bfin_read_CAN0_MB31_ID0()		bfin_read16(CAN0_MB31_ID0)
+#define bfin_write_CAN0_MB31_ID0(val)		bfin_write16(CAN0_MB31_ID0, val)
+#define bfin_read_CAN0_MB31_ID1()		bfin_read16(CAN0_MB31_ID1)
+#define bfin_write_CAN0_MB31_ID1(val)		bfin_write16(CAN0_MB31_ID1, val)
+
+/* Counter Registers */
+
+#define bfin_read_CNT_CONFIG()		bfin_read16(CNT_CONFIG)
+#define bfin_write_CNT_CONFIG(val)	bfin_write16(CNT_CONFIG, val)
+#define bfin_read_CNT_IMASK()		bfin_read16(CNT_IMASK)
+#define bfin_write_CNT_IMASK(val)	bfin_write16(CNT_IMASK, val)
+#define bfin_read_CNT_STATUS()		bfin_read16(CNT_STATUS)
+#define bfin_write_CNT_STATUS(val)	bfin_write16(CNT_STATUS, val)
+#define bfin_read_CNT_COMMAND()		bfin_read16(CNT_COMMAND)
+#define bfin_write_CNT_COMMAND(val)	bfin_write16(CNT_COMMAND, val)
+#define bfin_read_CNT_DEBOUNCE()	bfin_read16(CNT_DEBOUNCE)
+#define bfin_write_CNT_DEBOUNCE(val)	bfin_write16(CNT_DEBOUNCE, val)
+#define bfin_read_CNT_COUNTER()		bfin_read32(CNT_COUNTER)
+#define bfin_write_CNT_COUNTER(val)	bfin_write32(CNT_COUNTER, val)
+#define bfin_read_CNT_MAX()		bfin_read32(CNT_MAX)
+#define bfin_write_CNT_MAX(val)		bfin_write32(CNT_MAX, val)
+#define bfin_read_CNT_MIN()		bfin_read32(CNT_MIN)
+#define bfin_write_CNT_MIN(val)		bfin_write32(CNT_MIN, val)
+
+/* RSI Register */
+#define bfin_read_RSI_CLK_CTL()		bfin_read16(RSI_CLK_CONTROL)
+#define bfin_write_RSI_CLK_CTL(val)	bfin_write16(RSI_CLK_CONTROL, val)
+#define bfin_read_RSI_ARGUMENT()	bfin_read32(RSI_ARGUMENT)
+#define bfin_write_RSI_ARGUMENT(val)	bfin_write32(RSI_ARGUMENT, val)
+#define bfin_read_RSI_COMMAND()		bfin_read16(RSI_COMMAND)
+#define bfin_write_RSI_COMMAND(val)	bfin_write16(RSI_COMMAND, val)
+#define bfin_read_RSI_RESP_CMD()	bfin_read16(RSI_RESP_CMD)
+#define bfin_write_RSI_RESP_CMD(val)	bfin_write16(RSI_RESP_CMD, val)
+#define bfin_read_RSI_RESPONSE0()	bfin_read32(RSI_RESPONSE0)
+#define bfin_write_RSI_RESPONSE0(val)	bfin_write32(RSI_RESPONSE0, val)
+#define bfin_read_RSI_RESPONSE1()	bfin_read32(RSI_RESPONSE1)
+#define bfin_write_RSI_RESPONSE1(val)	bfin_write32(RSI_RESPONSE1, val)
+#define bfin_read_RSI_RESPONSE2()	bfin_read32(RSI_RESPONSE2)
+#define bfin_write_RSI_RESPONSE2(val)	bfin_write32(RSI_RESPONSE2, val)
+#define bfin_read_RSI_RESPONSE3()	bfin_read32(RSI_RESPONSE3)
+#define bfin_write_RSI_RESPONSE3(val)	bfin_write32(RSI_RESPONSE3, val)
+#define bfin_read_RSI_DATA_TIMER()	bfin_read32(RSI_DATA_TIMER)
+#define bfin_write_RSI_DATA_TIMER(val)	bfin_write32(RSI_DATA_TIMER, val)
+#define bfin_read_RSI_DATA_LGTH()	bfin_read16(RSI_DATA_LGTH)
+#define bfin_write_RSI_DATA_LGTH(val)	bfin_write16(RSI_DATA_LGTH, val)
+#define bfin_read_RSI_DATA_CTL()	bfin_read16(RSI_DATA_CONTROL)
+#define bfin_write_RSI_DATA_CTL(val)	bfin_write16(RSI_DATA_CONTROL, val)
+#define bfin_read_RSI_DATA_CNT()	bfin_read16(RSI_DATA_CNT)
+#define bfin_write_RSI_DATA_CNT(val)	bfin_write16(RSI_DATA_CNT, val)
+#define bfin_read_RSI_STATUS()		bfin_read32(RSI_STATUS)
+#define bfin_write_RSI_STATUS(val)	bfin_write32(RSI_STATUS, val)
+#define bfin_read_RSI_STATUS_CLR()	bfin_read16(RSI_STATUSCL)
+#define bfin_write_RSI_STATUS_CLR(val)	bfin_write16(RSI_STATUSCL, val)
+#define bfin_read_RSI_MASK0()		bfin_read32(RSI_MASK0)
+#define bfin_write_RSI_MASK0(val)	bfin_write32(RSI_MASK0, val)
+#define bfin_read_RSI_MASK1()		bfin_read32(RSI_MASK1)
+#define bfin_write_RSI_MASK1(val)	bfin_write32(RSI_MASK1, val)
+#define bfin_read_RSI_FIFO_CNT()	bfin_read16(RSI_FIFO_CNT)
+#define bfin_write_RSI_FIFO_CNT(val)	bfin_write16(RSI_FIFO_CNT, val)
+#define bfin_read_RSI_CEATA_CONTROL()	bfin_read16(RSI_CEATA_CONTROL)
+#define bfin_write_RSI_CEATA_CONTROL(val)	bfin_write16(RSI_CEATA_CONTROL, val)
+#define bfin_read_RSI_BLKSZ()		bfin_read16(RSI_BLKSZ)
+#define bfin_write_RSI_BLKSZ(val)	bfin_write16(RSI_BLKSZ, val)
+#define bfin_read_RSI_FIFO()		bfin_read32(RSI_FIFO)
+#define bfin_write_RSI_FIFO(val)	bfin_write32(RSI_FIFO, val)
+#define bfin_read_RSI_E_STATUS()	bfin_read32(RSI_ESTAT)
+#define bfin_write_RSI_E_STATUS(val)	bfin_write32(RSI_ESTAT, val)
+#define bfin_read_RSI_E_MASK()		bfin_read32(RSI_EMASK)
+#define bfin_write_RSI_E_MASK(val)	bfin_write32(RSI_EMASK, val)
+#define bfin_read_RSI_CFG()		bfin_read16(RSI_CONFIG)
+#define bfin_write_RSI_CFG(val)		bfin_write16(RSI_CONFIG, val)
+#define bfin_read_RSI_RD_WAIT_EN()	bfin_read16(RSI_RD_WAIT_EN)
+#define bfin_write_RSI_RD_WAIT_EN(val)	bfin_write16(RSI_RD_WAIT_EN, val)
+#define bfin_read_RSI_PID0()		bfin_read16(RSI_PID0)
+#define bfin_write_RSI_PID0(val)	bfin_write16(RSI_PID0, val)
+#define bfin_read_RSI_PID1()		bfin_read16(RSI_PID1)
+#define bfin_write_RSI_PID1(val)	bfin_write16(RSI_PID1, val)
+#define bfin_read_RSI_PID2()		bfin_read16(RSI_PID2)
+#define bfin_write_RSI_PID2(val)	bfin_write16(RSI_PID2, val)
+#define bfin_read_RSI_PID3()		bfin_read16(RSI_PID3)
+#define bfin_write_RSI_PID3(val)	bfin_write16(RSI_PID3, val)
+
+/* usb register */
+#define bfin_read_USB_PLLOSC_CTRL()    bfin_read16(USB_PLL_OSC)
+#define bfin_write_USB_PLLOSC_CTRL(val) bfin_write16(USB_PLL_OSC, val)
+#define bfin_write_USB_VBUS_CTL(val) bfin_write8(USB_VBUS_CTL, val)
+#define bfin_write_USB_APHY_CNTRL(val) bfin_write8(USB_PHY_CTL, val)
+#define bfin_read_USB_APHY_CNTRL() bfin_read8(USB_PHY_CTL)
+
+#endif /* _CDEF_BF60X_H */
+
diff --git a/arch/blackfin/mach-bf609/include/mach/defBF609.h b/arch/blackfin/mach-bf609/include/mach/defBF609.h
new file mode 100644
index 0000000..19690cc
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/defBF609.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
+ */
+
+#ifndef _DEF_BF609_H
+#define _DEF_BF609_H
+
+/* Include defBF60x_base.h for the set of #defines that are common to all ADSP-BF60x processors */
+#include "defBF60x_base.h"
+
+/* The following are the #defines needed by ADSP-BF609 that are not in the common header */
+
+#endif /* _DEF_BF609_H */
diff --git a/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h b/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h
new file mode 100644
index 0000000..6aac385
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h
@@ -0,0 +1,3587 @@
+/*
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the Clear BSD license or the GPL-2 (or later)
+ */
+
+#ifndef _DEF_BF60X_H
+#define _DEF_BF60X_H
+
+
+/* ************************************************************** */
+/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF60x    */
+/* ************************************************************** */
+
+
+/* =========================
+        CNT Registers
+   ========================= */
+
+/* =========================
+        CNT0
+   ========================= */
+#define CNT_CONFIG                 0xFFC00400         /* CNT0 Configuration Register */
+#define CNT_IMASK                  0xFFC00404         /* CNT0 Interrupt Mask Register */
+#define CNT_STATUS                 0xFFC00408         /* CNT0 Status Register */
+#define CNT_COMMAND                0xFFC0040C         /* CNT0 Command Register */
+#define CNT_DEBOUNCE               0xFFC00410         /* CNT0 Debounce Register */
+#define CNT_COUNTER                0xFFC00414         /* CNT0 Counter Register */
+#define CNT_MAX                    0xFFC00418         /* CNT0 Maximum Count Register */
+#define CNT_MIN                    0xFFC0041C         /* CNT0 Minimum Count Register */
+
+
+/* =========================
+        RSI Registers
+   ========================= */
+
+#define RSI_CLK_CONTROL            0xFFC00604         /* RSI0 Clock Control Register */
+#define RSI_ARGUMENT               0xFFC00608         /* RSI0 Argument Register */
+#define RSI_COMMAND                0xFFC0060C         /* RSI0 Command Register */
+#define RSI_RESP_CMD               0xFFC00610         /* RSI0 Response Command Register */
+#define RSI_RESPONSE0              0xFFC00614         /* RSI0 Response 0 Register */
+#define RSI_RESPONSE1              0xFFC00618         /* RSI0 Response 1 Register */
+#define RSI_RESPONSE2              0xFFC0061C         /* RSI0 Response 2 Register */
+#define RSI_RESPONSE3              0xFFC00620         /* RSI0 Response 3 Register */
+#define RSI_DATA_TIMER             0xFFC00624         /* RSI0 Data Timer Register */
+#define RSI_DATA_LGTH              0xFFC00628         /* RSI0 Data Length Register */
+#define RSI_DATA_CONTROL           0xFFC0062C         /* RSI0 Data Control Register */
+#define RSI_DATA_CNT               0xFFC00630         /* RSI0 Data Count Register */
+#define RSI_STATUS                 0xFFC00634         /* RSI0 Status Register */
+#define RSI_STATUSCL               0xFFC00638         /* RSI0 Status Clear Register */
+#define RSI_MASK0                  0xFFC0063C         /* RSI0 Interrupt 0 Mask Register */
+#define RSI_MASK1                  0xFFC00640         /* RSI0 Interrupt 1 Mask Register */
+#define RSI_FIFO_CNT               0xFFC00648         /* RSI0 FIFO Counter Register */
+#define RSI_CEATA_CONTROL          0xFFC0064C         /* RSI0 This register contains bit to dis CCS gen */
+#define RSI_BOOT_TCNTR             0xFFC00650         /* RSI0 Boot Timing Counter Register */
+#define RSI_BACK_TOUT              0xFFC00654         /* RSI0 Boot Acknowledge Timeout Register */
+#define RSI_SLP_WKUP_TOUT          0xFFC00658         /* RSI0 Sleep Wakeup Timeout Register */
+#define RSI_BLKSZ                  0xFFC0065C         /* RSI0 Block Size Register */
+#define RSI_FIFO                   0xFFC00680         /* RSI0 Data FIFO Register */
+#define RSI_ESTAT                  0xFFC006C0         /* RSI0 Exception Status Register */
+#define RSI_EMASK                  0xFFC006C4         /* RSI0 Exception Mask Register */
+#define RSI_CONFIG                 0xFFC006C8         /* RSI0 Configuration Register */
+#define RSI_RD_WAIT_EN             0xFFC006CC         /* RSI0 Read Wait Enable Register */
+#define RSI_PID0                   0xFFC006D0         /* RSI0 Peripheral Identification Register */
+#define RSI_PID1                   0xFFC006D4         /* RSI0 Peripheral Identification Register */
+#define RSI_PID2                   0xFFC006D8         /* RSI0 Peripheral Identification Register */
+#define RSI_PID3                   0xFFC006DC         /* RSI0 Peripheral Identification Register */
+
+/* =========================
+        CAN Registers
+   ========================= */
+
+/* =========================
+        CAN0
+   ========================= */
+#define CAN0_MC1                    0xFFC00A00         /* CAN0 Mailbox Configuration Register 1 */
+#define CAN0_MD1                    0xFFC00A04         /* CAN0 Mailbox Direction Register 1 */
+#define CAN0_TRS1                   0xFFC00A08         /* CAN0 Transmission Request Set Register 1 */
+#define CAN0_TRR1                   0xFFC00A0C         /* CAN0 Transmission Request Reset Register 1 */
+#define CAN0_TA1                    0xFFC00A10         /* CAN0 Transmission Acknowledge Register 1 */
+#define CAN0_AA1                    0xFFC00A14         /* CAN0 Abort Acknowledge Register 1 */
+#define CAN0_RMP1                   0xFFC00A18         /* CAN0 Receive Message Pending Register 1 */
+#define CAN0_RML1                   0xFFC00A1C         /* CAN0 Receive Message Lost Register 1 */
+#define CAN0_MBTIF1                 0xFFC00A20         /* CAN0 Mailbox Transmit Interrupt Flag Register 1 */
+#define CAN0_MBRIF1                 0xFFC00A24         /* CAN0 Mailbox Receive Interrupt Flag Register 1 */
+#define CAN0_MBIM1                  0xFFC00A28         /* CAN0 Mailbox Interrupt Mask Register 1 */
+#define CAN0_RFH1                   0xFFC00A2C         /* CAN0 Remote Frame Handling Register 1 */
+#define CAN0_OPSS1                  0xFFC00A30         /* CAN0 Overwrite Protection/Single Shot Transmission Register 1 */
+#define CAN0_MC2                    0xFFC00A40         /* CAN0 Mailbox Configuration Register 2 */
+#define CAN0_MD2                    0xFFC00A44         /* CAN0 Mailbox Direction Register 2 */
+#define CAN0_TRS2                   0xFFC00A48         /* CAN0 Transmission Request Set Register 2 */
+#define CAN0_TRR2                   0xFFC00A4C         /* CAN0 Transmission Request Reset Register 2 */
+#define CAN0_TA2                    0xFFC00A50         /* CAN0 Transmission Acknowledge Register 2 */
+#define CAN0_AA2                    0xFFC00A54         /* CAN0 Abort Acknowledge Register 2 */
+#define CAN0_RMP2                   0xFFC00A58         /* CAN0 Receive Message Pending Register 2 */
+#define CAN0_RML2                   0xFFC00A5C         /* CAN0 Receive Message Lost Register 2 */
+#define CAN0_MBTIF2                 0xFFC00A60         /* CAN0 Mailbox Transmit Interrupt Flag Register 2 */
+#define CAN0_MBRIF2                 0xFFC00A64         /* CAN0 Mailbox Receive Interrupt Flag Register 2 */
+#define CAN0_MBIM2                  0xFFC00A68         /* CAN0 Mailbox Interrupt Mask Register 2 */
+#define CAN0_RFH2                   0xFFC00A6C         /* CAN0 Remote Frame Handling Register 2 */
+#define CAN0_OPSS2                  0xFFC00A70         /* CAN0 Overwrite Protection/Single Shot Transmission Register 2 */
+#define CAN0_CLOCK                    0xFFC00A80         /* CAN0 Clock Register */
+#define CAN0_TIMING                 0xFFC00A84         /* CAN0 Timing Register */
+#define CAN0_DEBUG                    0xFFC00A88         /* CAN0 Debug Register */
+#define CAN0_STATUS                   0xFFC00A8C         /* CAN0 Status Register */
+#define CAN0_CEC                    0xFFC00A90         /* CAN0 Error Counter Register */
+#define CAN0_GIS                    0xFFC00A94         /* CAN0 Global CAN Interrupt Status */
+#define CAN0_GIM                    0xFFC00A98         /* CAN0 Global CAN Interrupt Mask */
+#define CAN0_GIF                    0xFFC00A9C         /* CAN0 Global CAN Interrupt Flag */
+#define CAN0_CONTROL                    0xFFC00AA0         /* CAN0 CAN Master Control Register */
+#define CAN0_INTR                    0xFFC00AA4         /* CAN0 Interrupt Pending Register */
+#define CAN0_MBTD                   0xFFC00AAC         /* CAN0 Temporary Mailbox Disable Register */
+#define CAN0_EWR                    0xFFC00AB0         /* CAN0 Error Counter Warning Level Register */
+#define CAN0_ESR                    0xFFC00AB4         /* CAN0 Error Status Register */
+#define CAN0_UCCNT                  0xFFC00AC4         /* CAN0 Universal Counter Register */
+#define CAN0_UCRC                   0xFFC00AC8         /* CAN0 Universal Counter Reload/Capture Register */
+#define CAN0_UCCNF                  0xFFC00ACC         /* CAN0 Universal Counter Configuration Mode Register */
+#define CAN0_AM00L                  0xFFC00B00         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM01L                  0xFFC00B08         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM02L                  0xFFC00B10         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM03L                  0xFFC00B18         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM04L                  0xFFC00B20         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM05L                  0xFFC00B28         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM06L                  0xFFC00B30         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM07L                  0xFFC00B38         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM08L                  0xFFC00B40         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM09L                  0xFFC00B48         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM10L                  0xFFC00B50         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM11L                  0xFFC00B58         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM12L                  0xFFC00B60         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM13L                  0xFFC00B68         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM14L                  0xFFC00B70         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM15L                  0xFFC00B78         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM16L                  0xFFC00B80         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM17L                  0xFFC00B88         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM18L                  0xFFC00B90         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM19L                  0xFFC00B98         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM20L                  0xFFC00BA0         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM21L                  0xFFC00BA8         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM22L                  0xFFC00BB0         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM23L                  0xFFC00BB8         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM24L                  0xFFC00BC0         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM25L                  0xFFC00BC8         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM26L                  0xFFC00BD0         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM27L                  0xFFC00BD8         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM28L                  0xFFC00BE0         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM29L                  0xFFC00BE8         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM30L                  0xFFC00BF0         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM31L                  0xFFC00BF8         /* CAN0 Acceptance Mask Register (L) */
+#define CAN0_AM00H                  0xFFC00B04         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM01H                  0xFFC00B0C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM02H                  0xFFC00B14         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM03H                  0xFFC00B1C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM04H                  0xFFC00B24         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM05H                  0xFFC00B2C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM06H                  0xFFC00B34         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM07H                  0xFFC00B3C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM08H                  0xFFC00B44         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM09H                  0xFFC00B4C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM10H                  0xFFC00B54         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM11H                  0xFFC00B5C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM12H                  0xFFC00B64         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM13H                  0xFFC00B6C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM14H                  0xFFC00B74         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM15H                  0xFFC00B7C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM16H                  0xFFC00B84         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM17H                  0xFFC00B8C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM18H                  0xFFC00B94         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM19H                  0xFFC00B9C         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM20H                  0xFFC00BA4         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM21H                  0xFFC00BAC         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM22H                  0xFFC00BB4         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM23H                  0xFFC00BBC         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM24H                  0xFFC00BC4         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM25H                  0xFFC00BCC         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM26H                  0xFFC00BD4         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM27H                  0xFFC00BDC         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM28H                  0xFFC00BE4         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM29H                  0xFFC00BEC         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM30H                  0xFFC00BF4         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_AM31H                  0xFFC00BFC         /* CAN0 Acceptance Mask Register (H) */
+#define CAN0_MB00_DATA0             0xFFC00C00         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB01_DATA0             0xFFC00C20         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB02_DATA0             0xFFC00C40         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB03_DATA0             0xFFC00C60         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB04_DATA0             0xFFC00C80         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB05_DATA0             0xFFC00CA0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB06_DATA0             0xFFC00CC0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB07_DATA0             0xFFC00CE0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB08_DATA0             0xFFC00D00         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB09_DATA0             0xFFC00D20         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB10_DATA0             0xFFC00D40         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB11_DATA0             0xFFC00D60         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB12_DATA0             0xFFC00D80         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB13_DATA0             0xFFC00DA0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB14_DATA0             0xFFC00DC0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB15_DATA0             0xFFC00DE0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB16_DATA0             0xFFC00E00         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB17_DATA0             0xFFC00E20         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB18_DATA0             0xFFC00E40         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB19_DATA0             0xFFC00E60         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB20_DATA0             0xFFC00E80         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB21_DATA0             0xFFC00EA0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB22_DATA0             0xFFC00EC0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB23_DATA0             0xFFC00EE0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB24_DATA0             0xFFC00F00         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB25_DATA0             0xFFC00F20         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB26_DATA0             0xFFC00F40         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB27_DATA0             0xFFC00F60         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB28_DATA0             0xFFC00F80         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB29_DATA0             0xFFC00FA0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB30_DATA0             0xFFC00FC0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB31_DATA0             0xFFC00FE0         /* CAN0 Mailbox Word 0 Register */
+#define CAN0_MB00_DATA1             0xFFC00C04         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB01_DATA1             0xFFC00C24         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB02_DATA1             0xFFC00C44         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB03_DATA1             0xFFC00C64         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB04_DATA1             0xFFC00C84         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB05_DATA1             0xFFC00CA4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB06_DATA1             0xFFC00CC4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB07_DATA1             0xFFC00CE4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB08_DATA1             0xFFC00D04         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB09_DATA1             0xFFC00D24         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB10_DATA1             0xFFC00D44         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB11_DATA1             0xFFC00D64         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB12_DATA1             0xFFC00D84         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB13_DATA1             0xFFC00DA4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB14_DATA1             0xFFC00DC4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB15_DATA1             0xFFC00DE4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB16_DATA1             0xFFC00E04         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB17_DATA1             0xFFC00E24         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB18_DATA1             0xFFC00E44         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB19_DATA1             0xFFC00E64         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB20_DATA1             0xFFC00E84         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB21_DATA1             0xFFC00EA4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB22_DATA1             0xFFC00EC4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB23_DATA1             0xFFC00EE4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB24_DATA1             0xFFC00F04         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB25_DATA1             0xFFC00F24         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB26_DATA1             0xFFC00F44         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB27_DATA1             0xFFC00F64         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB28_DATA1             0xFFC00F84         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB29_DATA1             0xFFC00FA4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB30_DATA1             0xFFC00FC4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB31_DATA1             0xFFC00FE4         /* CAN0 Mailbox Word 1 Register */
+#define CAN0_MB00_DATA2             0xFFC00C08         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB01_DATA2             0xFFC00C28         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB02_DATA2             0xFFC00C48         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB03_DATA2             0xFFC00C68         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB04_DATA2             0xFFC00C88         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB05_DATA2             0xFFC00CA8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB06_DATA2             0xFFC00CC8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB07_DATA2             0xFFC00CE8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB08_DATA2             0xFFC00D08         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB09_DATA2             0xFFC00D28         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB10_DATA2             0xFFC00D48         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB11_DATA2             0xFFC00D68         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB12_DATA2             0xFFC00D88         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB13_DATA2             0xFFC00DA8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB14_DATA2             0xFFC00DC8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB15_DATA2             0xFFC00DE8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB16_DATA2             0xFFC00E08         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB17_DATA2             0xFFC00E28         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB18_DATA2             0xFFC00E48         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB19_DATA2             0xFFC00E68         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB20_DATA2             0xFFC00E88         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB21_DATA2             0xFFC00EA8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB22_DATA2             0xFFC00EC8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB23_DATA2             0xFFC00EE8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB24_DATA2             0xFFC00F08         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB25_DATA2             0xFFC00F28         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB26_DATA2             0xFFC00F48         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB27_DATA2             0xFFC00F68         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB28_DATA2             0xFFC00F88         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB29_DATA2             0xFFC00FA8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB30_DATA2             0xFFC00FC8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB31_DATA2             0xFFC00FE8         /* CAN0 Mailbox Word 2 Register */
+#define CAN0_MB00_DATA3             0xFFC00C0C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB01_DATA3             0xFFC00C2C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB02_DATA3             0xFFC00C4C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB03_DATA3             0xFFC00C6C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB04_DATA3             0xFFC00C8C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB05_DATA3             0xFFC00CAC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB06_DATA3             0xFFC00CCC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB07_DATA3             0xFFC00CEC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB08_DATA3             0xFFC00D0C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB09_DATA3             0xFFC00D2C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB10_DATA3             0xFFC00D4C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB11_DATA3             0xFFC00D6C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB12_DATA3             0xFFC00D8C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB13_DATA3             0xFFC00DAC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB14_DATA3             0xFFC00DCC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB15_DATA3             0xFFC00DEC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB16_DATA3             0xFFC00E0C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB17_DATA3             0xFFC00E2C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB18_DATA3             0xFFC00E4C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB19_DATA3             0xFFC00E6C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB20_DATA3             0xFFC00E8C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB21_DATA3             0xFFC00EAC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB22_DATA3             0xFFC00ECC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB23_DATA3             0xFFC00EEC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB24_DATA3             0xFFC00F0C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB25_DATA3             0xFFC00F2C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB26_DATA3             0xFFC00F4C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB27_DATA3             0xFFC00F6C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB28_DATA3             0xFFC00F8C         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB29_DATA3             0xFFC00FAC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB30_DATA3             0xFFC00FCC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB31_DATA3             0xFFC00FEC         /* CAN0 Mailbox Word 3 Register */
+#define CAN0_MB00_LENGTH            0xFFC00C10         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB01_LENGTH            0xFFC00C30         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB02_LENGTH            0xFFC00C50         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB03_LENGTH            0xFFC00C70         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB04_LENGTH            0xFFC00C90         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB05_LENGTH            0xFFC00CB0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB06_LENGTH            0xFFC00CD0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB07_LENGTH            0xFFC00CF0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB08_LENGTH            0xFFC00D10         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB09_LENGTH            0xFFC00D30         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB10_LENGTH            0xFFC00D50         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB11_LENGTH            0xFFC00D70         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB12_LENGTH            0xFFC00D90         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB13_LENGTH            0xFFC00DB0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB14_LENGTH            0xFFC00DD0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB15_LENGTH            0xFFC00DF0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB16_LENGTH            0xFFC00E10         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB17_LENGTH            0xFFC00E30         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB18_LENGTH            0xFFC00E50         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB19_LENGTH            0xFFC00E70         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB20_LENGTH            0xFFC00E90         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB21_LENGTH            0xFFC00EB0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB22_LENGTH            0xFFC00ED0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB23_LENGTH            0xFFC00EF0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB24_LENGTH            0xFFC00F10         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB25_LENGTH            0xFFC00F30         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB26_LENGTH            0xFFC00F50         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB27_LENGTH            0xFFC00F70         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB28_LENGTH            0xFFC00F90         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB29_LENGTH            0xFFC00FB0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB30_LENGTH            0xFFC00FD0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB31_LENGTH            0xFFC00FF0         /* CAN0 Mailbox Word 4 Register */
+#define CAN0_MB00_TIMESTAMP         0xFFC00C14         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB01_TIMESTAMP         0xFFC00C34         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB02_TIMESTAMP         0xFFC00C54         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB03_TIMESTAMP         0xFFC00C74         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB04_TIMESTAMP         0xFFC00C94         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB05_TIMESTAMP         0xFFC00CB4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB06_TIMESTAMP         0xFFC00CD4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB07_TIMESTAMP         0xFFC00CF4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB08_TIMESTAMP         0xFFC00D14         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB09_TIMESTAMP         0xFFC00D34         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB10_TIMESTAMP         0xFFC00D54         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB11_TIMESTAMP         0xFFC00D74         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB12_TIMESTAMP         0xFFC00D94         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB13_TIMESTAMP         0xFFC00DB4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB14_TIMESTAMP         0xFFC00DD4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB15_TIMESTAMP         0xFFC00DF4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB16_TIMESTAMP         0xFFC00E14         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB17_TIMESTAMP         0xFFC00E34         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB18_TIMESTAMP         0xFFC00E54         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB19_TIMESTAMP         0xFFC00E74         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB20_TIMESTAMP         0xFFC00E94         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB21_TIMESTAMP         0xFFC00EB4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB22_TIMESTAMP         0xFFC00ED4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB23_TIMESTAMP         0xFFC00EF4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB24_TIMESTAMP         0xFFC00F14         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB25_TIMESTAMP         0xFFC00F34         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB26_TIMESTAMP         0xFFC00F54         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB27_TIMESTAMP         0xFFC00F74         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB28_TIMESTAMP         0xFFC00F94         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB29_TIMESTAMP         0xFFC00FB4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB30_TIMESTAMP         0xFFC00FD4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB31_TIMESTAMP         0xFFC00FF4         /* CAN0 Mailbox Word 5 Register */
+#define CAN0_MB00_ID0               0xFFC00C18         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB01_ID0               0xFFC00C38         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB02_ID0               0xFFC00C58         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB03_ID0               0xFFC00C78         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB04_ID0               0xFFC00C98         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB05_ID0               0xFFC00CB8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB06_ID0               0xFFC00CD8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB07_ID0               0xFFC00CF8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB08_ID0               0xFFC00D18         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB09_ID0               0xFFC00D38         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB10_ID0               0xFFC00D58         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB11_ID0               0xFFC00D78         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB12_ID0               0xFFC00D98         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB13_ID0               0xFFC00DB8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB14_ID0               0xFFC00DD8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB15_ID0               0xFFC00DF8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB16_ID0               0xFFC00E18         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB17_ID0               0xFFC00E38         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB18_ID0               0xFFC00E58         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB19_ID0               0xFFC00E78         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB20_ID0               0xFFC00E98         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB21_ID0               0xFFC00EB8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB22_ID0               0xFFC00ED8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB23_ID0               0xFFC00EF8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB24_ID0               0xFFC00F18         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB25_ID0               0xFFC00F38         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB26_ID0               0xFFC00F58         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB27_ID0               0xFFC00F78         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB28_ID0               0xFFC00F98         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB29_ID0               0xFFC00FB8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB30_ID0               0xFFC00FD8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB31_ID0               0xFFC00FF8         /* CAN0 Mailbox Word 6 Register */
+#define CAN0_MB00_ID1               0xFFC00C1C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB01_ID1               0xFFC00C3C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB02_ID1               0xFFC00C5C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB03_ID1               0xFFC00C7C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB04_ID1               0xFFC00C9C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB05_ID1               0xFFC00CBC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB06_ID1               0xFFC00CDC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB07_ID1               0xFFC00CFC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB08_ID1               0xFFC00D1C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB09_ID1               0xFFC00D3C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB10_ID1               0xFFC00D5C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB11_ID1               0xFFC00D7C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB12_ID1               0xFFC00D9C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB13_ID1               0xFFC00DBC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB14_ID1               0xFFC00DDC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB15_ID1               0xFFC00DFC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB16_ID1               0xFFC00E1C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB17_ID1               0xFFC00E3C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB18_ID1               0xFFC00E5C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB19_ID1               0xFFC00E7C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB20_ID1               0xFFC00E9C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB21_ID1               0xFFC00EBC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB22_ID1               0xFFC00EDC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB23_ID1               0xFFC00EFC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB24_ID1               0xFFC00F1C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB25_ID1               0xFFC00F3C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB26_ID1               0xFFC00F5C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB27_ID1               0xFFC00F7C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB28_ID1               0xFFC00F9C         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB29_ID1               0xFFC00FBC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB30_ID1               0xFFC00FDC         /* CAN0 Mailbox Word 7 Register */
+#define CAN0_MB31_ID1               0xFFC00FFC         /* CAN0 Mailbox Word 7 Register */
+
+/* =========================
+	LINK PORT Registers
+   ========================= */
+#define LP0_CTL                     0xFFC01000         /* LP0 Control Register */
+#define LP0_STAT                    0xFFC01004         /* LP0 Status Register */
+#define LP0_DIV                     0xFFC01008         /* LP0 Clock Divider Value */
+#define LP0_CNT                     0xFFC0100C         /* LP0 Current Count Value of Clock Divider */
+#define LP0_TX                      0xFFC01010         /* LP0 Transmit Buffer */
+#define LP0_RX                      0xFFC01014         /* LP0 Receive Buffer */
+#define LP0_TXIN_SHDW               0xFFC01018         /* LP0 Shadow Input Transmit Buffer */
+#define LP0_TXOUT_SHDW              0xFFC0101C         /* LP0 Shadow Output Transmit Buffer */
+#define LP1_CTL                     0xFFC01100         /* LP1 Control Register */
+#define LP1_STAT                    0xFFC01104         /* LP1 Status Register */
+#define LP1_DIV                     0xFFC01108         /* LP1 Clock Divider Value */
+#define LP1_CNT                     0xFFC0110C         /* LP1 Current Count Value of Clock Divider */
+#define LP1_TX                      0xFFC01110         /* LP1 Transmit Buffer */
+#define LP1_RX                      0xFFC01114         /* LP1 Receive Buffer */
+#define LP1_TXIN_SHDW               0xFFC01118         /* LP1 Shadow Input Transmit Buffer */
+#define LP1_TXOUT_SHDW              0xFFC0111C         /* LP1 Shadow Output Transmit Buffer */
+#define LP2_CTL                     0xFFC01200         /* LP2 Control Register */
+#define LP2_STAT                    0xFFC01204         /* LP2 Status Register */
+#define LP2_DIV                     0xFFC01208         /* LP2 Clock Divider Value */
+#define LP2_CNT                     0xFFC0120C         /* LP2 Current Count Value of Clock Divider */
+#define LP2_TX                      0xFFC01210         /* LP2 Transmit Buffer */
+#define LP2_RX                      0xFFC01214         /* LP2 Receive Buffer */
+#define LP2_TXIN_SHDW               0xFFC01218         /* LP2 Shadow Input Transmit Buffer */
+#define LP2_TXOUT_SHDW              0xFFC0121C         /* LP2 Shadow Output Transmit Buffer */
+#define LP3_CTL                     0xFFC01300         /* LP3 Control Register */
+#define LP3_STAT                    0xFFC01304         /* LP3 Status Register */
+#define LP3_DIV                     0xFFC01308         /* LP3 Clock Divider Value */
+#define LP3_CNT                     0xFFC0130C         /* LP3 Current Count Value of Clock Divider */
+#define LP3_TX                      0xFFC01310         /* LP3 Transmit Buffer */
+#define LP3_RX                      0xFFC01314         /* LP3 Receive Buffer */
+#define LP3_TXIN_SHDW               0xFFC01318         /* LP3 Shadow Input Transmit Buffer */
+#define LP3_TXOUT_SHDW              0xFFC0131C         /* LP3 Shadow Output Transmit Buffer */
+
+/* =========================
+        TIMER Registers
+   ========================= */
+#define TIMER_REVID                0xFFC01400         /* GPTIMER Timer IP Version ID */
+#define TIMER_RUN                  0xFFC01404         /* GPTIMER Timer Run Register */
+#define TIMER_RUN_SET              0xFFC01408         /* GPTIMER Run Register Alias to Set */
+#define TIMER_RUN_CLR              0xFFC0140C         /* GPTIMER Run Register Alias to Clear */
+#define TIMER_STOP_CFG             0xFFC01410         /* GPTIMER Stop Config Register */
+#define TIMER_STOP_CFG_SET         0xFFC01414         /* GPTIMER Stop Config Alias to Set */
+#define TIMER_STOP_CFG_CLR         0xFFC01418         /* GPTIMER Stop Config Alias to Clear */
+#define TIMER_DATA_IMSK            0xFFC0141C         /* GPTIMER Data Interrupt Mask register */
+#define TIMER_STAT_IMSK            0xFFC01420         /* GPTIMER Status Interrupt Mask register */
+#define TIMER_TRG_MSK              0xFFC01424         /* GPTIMER Output Trigger Mask register */
+#define TIMER_TRG_IE               0xFFC01428         /* GPTIMER Slave Trigger Enable register */
+#define TIMER_DATA_ILAT            0xFFC0142C         /* GPTIMER Data Interrupt Register */
+#define TIMER_STAT_ILAT            0xFFC01430         /* GPTIMER Status (Error) Interrupt Register */
+#define TIMER_ERR_TYPE             0xFFC01434         /* GPTIMER Register Indicating Type of Error */
+#define TIMER_BCAST_PER            0xFFC01438         /* GPTIMER Broadcast Period */
+#define TIMER_BCAST_WID            0xFFC0143C         /* GPTIMER Broadcast Width */
+#define TIMER_BCAST_DLY            0xFFC01440         /* GPTIMER Broadcast Delay */
+
+/* =========================
+	TIMER0~7
+   ========================= */
+#define TIMER0_CONFIG             0xFFC01460         /* TIMER0 Per Timer Config Register */
+#define TIMER0_COUNTER            0xFFC01464         /* TIMER0 Per Timer Counter Register */
+#define TIMER0_PERIOD             0xFFC01468         /* TIMER0 Per Timer Period Register */
+#define TIMER0_WIDTH              0xFFC0146C         /* TIMER0 Per Timer Width Register */
+#define TIMER0_DELAY              0xFFC01470         /* TIMER0 Per Timer Delay Register */
+
+#define TIMER1_CONFIG             0xFFC01480         /* TIMER1 Per Timer Config Register */
+#define TIMER1_COUNTER            0xFFC01484         /* TIMER1 Per Timer Counter Register */
+#define TIMER1_PERIOD             0xFFC01488         /* TIMER1 Per Timer Period Register */
+#define TIMER1_WIDTH              0xFFC0148C         /* TIMER1 Per Timer Width Register */
+#define TIMER1_DELAY              0xFFC01490         /* TIMER1 Per Timer Delay Register */
+
+#define TIMER2_CONFIG             0xFFC014A0         /* TIMER2 Per Timer Config Register */
+#define TIMER2_COUNTER            0xFFC014A4         /* TIMER2 Per Timer Counter Register */
+#define TIMER2_PERIOD             0xFFC014A8         /* TIMER2 Per Timer Period Register */
+#define TIMER2_WIDTH              0xFFC014AC         /* TIMER2 Per Timer Width Register */
+#define TIMER2_DELAY              0xFFC014B0         /* TIMER2 Per Timer Delay Register */
+
+#define TIMER3_CONFIG             0xFFC014C0         /* TIMER3 Per Timer Config Register */
+#define TIMER3_COUNTER            0xFFC014C4         /* TIMER3 Per Timer Counter Register */
+#define TIMER3_PERIOD             0xFFC014C8         /* TIMER3 Per Timer Period Register */
+#define TIMER3_WIDTH              0xFFC014CC         /* TIMER3 Per Timer Width Register */
+#define TIMER3_DELAY              0xFFC014D0         /* TIMER3 Per Timer Delay Register */
+
+#define TIMER4_CONFIG             0xFFC014E0         /* TIMER4 Per Timer Config Register */
+#define TIMER4_COUNTER            0xFFC014E4         /* TIMER4 Per Timer Counter Register */
+#define TIMER4_PERIOD             0xFFC014E8         /* TIMER4 Per Timer Period Register */
+#define TIMER4_WIDTH              0xFFC014EC         /* TIMER4 Per Timer Width Register */
+#define TIMER4_DELAY              0xFFC014F0         /* TIMER4 Per Timer Delay Register */
+
+#define TIMER5_CONFIG             0xFFC01500         /* TIMER5 Per Timer Config Register */
+#define TIMER5_COUNTER            0xFFC01504         /* TIMER5 Per Timer Counter Register */
+#define TIMER5_PERIOD             0xFFC01508         /* TIMER5 Per Timer Period Register */
+#define TIMER5_WIDTH              0xFFC0150C         /* TIMER5 Per Timer Width Register */
+#define TIMER5_DELAY              0xFFC01510         /* TIMER5 Per Timer Delay Register */
+
+#define TIMER6_CONFIG             0xFFC01520         /* TIMER6 Per Timer Config Register */
+#define TIMER6_COUNTER            0xFFC01524         /* TIMER6 Per Timer Counter Register */
+#define TIMER6_PERIOD             0xFFC01528         /* TIMER6 Per Timer Period Register */
+#define TIMER6_WIDTH              0xFFC0152C         /* TIMER6 Per Timer Width Register */
+#define TIMER6_DELAY              0xFFC01530         /* TIMER6 Per Timer Delay Register */
+
+#define TIMER7_CONFIG             0xFFC01540         /* TIMER7 Per Timer Config Register */
+#define TIMER7_COUNTER            0xFFC01544         /* TIMER7 Per Timer Counter Register */
+#define TIMER7_PERIOD             0xFFC01548         /* TIMER7 Per Timer Period Register */
+#define TIMER7_WIDTH              0xFFC0154C         /* TIMER7 Per Timer Width Register */
+#define TIMER7_DELAY              0xFFC01550         /* TIMER7 Per Timer Delay Register */
+
+/* =========================
+	CRC Registers
+   ========================= */
+
+/* =========================
+	CRC0
+   ========================= */
+#define REG_CRC0_CTL                    0xFFC01C00         /* CRC0 Control Register */
+#define REG_CRC0_DCNT                   0xFFC01C04         /* CRC0 Data Word Count Register */
+#define REG_CRC0_DCNTRLD                0xFFC01C08         /* CRC0 Data Word Count Reload Register */
+#define REG_CRC0_COMP                   0xFFC01C14         /* CRC0 DATA Compare Register */
+#define REG_CRC0_FILLVAL                0xFFC01C18         /* CRC0 Fill Value Register */
+#define REG_CRC0_DFIFO                  0xFFC01C1C         /* CRC0 DATA FIFO Register */
+#define REG_CRC0_INEN                   0xFFC01C20         /* CRC0 Interrupt Enable Register */
+#define REG_CRC0_INEN_SET               0xFFC01C24         /* CRC0 Interrupt Enable Set Register */
+#define REG_CRC0_INEN_CLR               0xFFC01C28         /* CRC0 Interrupt Enable Clear Register */
+#define REG_CRC0_POLY                   0xFFC01C2C         /* CRC0 Polynomial Register */
+#define REG_CRC0_STAT                   0xFFC01C40         /* CRC0 Status Register */
+#define REG_CRC0_DCNTCAP                0xFFC01C44         /* CRC0 DATA Count Capture Register */
+#define REG_CRC0_RESULT_FIN             0xFFC01C4C         /* CRC0 Final CRC Result Register */
+#define REG_CRC0_RESULT_CUR             0xFFC01C50         /* CRC0 Current CRC Result Register */
+#define REG_CRC0_REVID                  0xFFC01C60         /* CRC0 Revision ID Register */
+
+/* =========================
+	CRC1
+   ========================= */
+#define REG_CRC1_CTL                    0xFFC01D00         /* CRC1 Control Register */
+#define REG_CRC1_DCNT                   0xFFC01D04         /* CRC1 Data Word Count Register */
+#define REG_CRC1_DCNTRLD                0xFFC01D08         /* CRC1 Data Word Count Reload Register */
+#define REG_CRC1_COMP                   0xFFC01D14         /* CRC1 DATA Compare Register */
+#define REG_CRC1_FILLVAL                0xFFC01D18         /* CRC1 Fill Value Register */
+#define REG_CRC1_DFIFO                  0xFFC01D1C         /* CRC1 DATA FIFO Register */
+#define REG_CRC1_INEN                   0xFFC01D20         /* CRC1 Interrupt Enable Register */
+#define REG_CRC1_INEN_SET               0xFFC01D24         /* CRC1 Interrupt Enable Set Register */
+#define REG_CRC1_INEN_CLR               0xFFC01D28         /* CRC1 Interrupt Enable Clear Register */
+#define REG_CRC1_POLY                   0xFFC01D2C         /* CRC1 Polynomial Register */
+#define REG_CRC1_STAT                   0xFFC01D40         /* CRC1 Status Register */
+#define REG_CRC1_DCNTCAP                0xFFC01D44         /* CRC1 DATA Count Capture Register */
+#define REG_CRC1_RESULT_FIN             0xFFC01D4C         /* CRC1 Final CRC Result Register */
+#define REG_CRC1_RESULT_CUR             0xFFC01D50         /* CRC1 Current CRC Result Register */
+#define REG_CRC1_REVID                  0xFFC01D60         /* CRC1 Revision ID Register */
+
+/* =========================
+        TWI Registers
+   ========================= */
+
+/* =========================
+        TWI0
+   ========================= */
+#define TWI0_CLKDIV                    0xFFC01E00         /* TWI0 SCL Clock Divider */
+#define TWI0_CONTROL                   0xFFC01E04         /* TWI0 Control Register */
+#define TWI0_SLAVE_CTL                 0xFFC01E08         /* TWI0 Slave Mode Control Register */
+#define TWI0_SLAVE_STAT                0xFFC01E0C         /* TWI0 Slave Mode Status Register */
+#define TWI0_SLAVE_ADDR                0xFFC01E10         /* TWI0 Slave Mode Address Register */
+#define TWI0_MASTER_CTL                0xFFC01E14         /* TWI0 Master Mode Control Registers */
+#define TWI0_MASTER_STAT               0xFFC01E18         /* TWI0 Master Mode Status Register */
+#define TWI0_MASTER_ADDR               0xFFC01E1C         /* TWI0 Master Mode Address Register */
+#define TWI0_INT_STAT                  0xFFC01E20         /* TWI0 Interrupt Status Register */
+#define TWI0_INT_MASK                  0xFFC01E24         /* TWI0 Interrupt Mask Register */
+#define TWI0_FIFO_CTL                  0xFFC01E28         /* TWI0 FIFO Control Register */
+#define TWI0_FIFO_STAT                 0xFFC01E2C         /* TWI0 FIFO Status Register */
+#define TWI0_XMT_DATA8                 0xFFC01E80         /* TWI0 FIFO Transmit Data Single-Byte Register */
+#define TWI0_XMT_DATA16                0xFFC01E84         /* TWI0 FIFO Transmit Data Double-Byte Register */
+#define TWI0_RCV_DATA8                 0xFFC01E88         /* TWI0 FIFO Transmit Data Single-Byte Register */
+#define TWI0_RCV_DATA16                0xFFC01E8C         /* TWI0 FIFO Transmit Data Double-Byte Register */
+
+/* =========================
+        TWI1
+   ========================= */
+#define TWI1_CLKDIV                 0xFFC01F00         /* TWI1 SCL Clock Divider */
+#define TWI1_CONTROL                    0xFFC01F04         /* TWI1 Control Register */
+#define TWI1_SLAVE_CTL                 0xFFC01F08         /* TWI1 Slave Mode Control Register */
+#define TWI1_SLAVE_STAT                0xFFC01F0C         /* TWI1 Slave Mode Status Register */
+#define TWI1_SLAVE_ADDR                0xFFC01F10         /* TWI1 Slave Mode Address Register */
+#define TWI1_MASTER_CTL                0xFFC01F14         /* TWI1 Master Mode Control Registers */
+#define TWI1_MASTER_STAT               0xFFC01F18         /* TWI1 Master Mode Status Register */
+#define TWI1_MASTER_ADDR               0xFFC01F1C         /* TWI1 Master Mode Address Register */
+#define TWI1_INT_STAT                  0xFFC01F20         /* TWI1 Interrupt Status Register */
+#define TWI1_INT_MASK                   0xFFC01F24         /* TWI1 Interrupt Mask Register */
+#define TWI1_FIFO_CTL                0xFFC01F28         /* TWI1 FIFO Control Register */
+#define TWI1_FIFO_STAT               0xFFC01F2C         /* TWI1 FIFO Status Register */
+#define TWI1_XMT_DATA8                0xFFC01F80         /* TWI1 FIFO Transmit Data Single-Byte Register */
+#define TWI1_XMT_DATA16               0xFFC01F84         /* TWI1 FIFO Transmit Data Double-Byte Register */
+#define TWI1_RCV_DATA8                0xFFC01F88         /* TWI1 FIFO Transmit Data Single-Byte Register */
+#define TWI1_RCV_DATA16               0xFFC01F8C         /* TWI1 FIFO Transmit Data Double-Byte Register */
+
+
+/* =========================
+        UART Registers
+   ========================= */
+
+/* =========================
+        UART0
+   ========================= */
+#define UART0_REVID                 0xFFC02000         /* UART0 Revision ID Register */
+#define UART0_CTL                   0xFFC02004         /* UART0 Control Register */
+#define UART0_STAT                  0xFFC02008         /* UART0 Status Register */
+#define UART0_SCR                   0xFFC0200C         /* UART0 Scratch Register */
+#define UART0_CLK                   0xFFC02010         /* UART0 Clock Rate Register */
+#define UART0_IER                   0xFFC02014         /* UART0 Interrupt Mask Register */
+#define UART0_IER_SET               0xFFC02018         /* UART0 Interrupt Mask Set Register */
+#define UART0_IER_CLR               0xFFC0201C         /* UART0 Interrupt Mask Clear Register */
+#define UART0_RBR                   0xFFC02020         /* UART0 Receive Buffer Register */
+#define UART0_THR                   0xFFC02024         /* UART0 Transmit Hold Register */
+#define UART0_TAIP                  0xFFC02028         /* UART0 Transmit Address/Insert Pulse Register */
+#define UART0_TSR                   0xFFC0202C         /* UART0 Transmit Shift Register */
+#define UART0_RSR                   0xFFC02030         /* UART0 Receive Shift Register */
+#define UART0_TXDIV                 0xFFC02034         /* UART0 Transmit Clock Devider Register */
+#define UART0_RXDIV                 0xFFC02038         /* UART0 Receive Clock Devider Register */
+
+/* =========================
+        UART1
+   ========================= */
+#define UART1_REVID                 0xFFC02400         /* UART1 Revision ID Register */
+#define UART1_CTL                   0xFFC02404         /* UART1 Control Register */
+#define UART1_STAT                  0xFFC02408         /* UART1 Status Register */
+#define UART1_SCR                   0xFFC0240C         /* UART1 Scratch Register */
+#define UART1_CLK                   0xFFC02410         /* UART1 Clock Rate Register */
+#define UART1_IER                   0xFFC02414         /* UART1 Interrupt Mask Register */
+#define UART1_IER_SET               0xFFC02418         /* UART1 Interrupt Mask Set Register */
+#define UART1_IER_CLR               0xFFC0241C         /* UART1 Interrupt Mask Clear Register */
+#define UART1_RBR                   0xFFC02420         /* UART1 Receive Buffer Register */
+#define UART1_THR                   0xFFC02424         /* UART1 Transmit Hold Register */
+#define UART1_TAIP                  0xFFC02428         /* UART1 Transmit Address/Insert Pulse Register */
+#define UART1_TSR                   0xFFC0242C         /* UART1 Transmit Shift Register */
+#define UART1_RSR                   0xFFC02430         /* UART1 Receive Shift Register */
+#define UART1_TXDIV                 0xFFC02434         /* UART1 Transmit Clock Devider Register */
+#define UART1_RXDIV                 0xFFC02438         /* UART1 Receive Clock Devider Register */
+
+
+/* =========================
+        PORT Registers
+   ========================= */
+
+/* =========================
+        PORTA
+   ========================= */
+#define PORTA_FER                   0xFFC03000         /* PORTA Port x Function Enable Register */
+#define PORTA_FER_SET               0xFFC03004         /* PORTA Port x Function Enable Set Register */
+#define PORTA_FER_CLEAR               0xFFC03008         /* PORTA Port x Function Enable Clear Register */
+#define PORTA_DATA                  0xFFC0300C         /* PORTA Port x GPIO Data Register */
+#define PORTA_DATA_SET              0xFFC03010         /* PORTA Port x GPIO Data Set Register */
+#define PORTA_DATA_CLEAR              0xFFC03014         /* PORTA Port x GPIO Data Clear Register */
+#define PORTA_DIR                   0xFFC03018         /* PORTA Port x GPIO Direction Register */
+#define PORTA_DIR_SET               0xFFC0301C         /* PORTA Port x GPIO Direction Set Register */
+#define PORTA_DIR_CLEAR               0xFFC03020         /* PORTA Port x GPIO Direction Clear Register */
+#define PORTA_INEN                  0xFFC03024         /* PORTA Port x GPIO Input Enable Register */
+#define PORTA_INEN_SET              0xFFC03028         /* PORTA Port x GPIO Input Enable Set Register */
+#define PORTA_INEN_CLEAR              0xFFC0302C         /* PORTA Port x GPIO Input Enable Clear Register */
+#define PORTA_MUX                   0xFFC03030         /* PORTA Port x Multiplexer Control Register */
+#define PORTA_DATA_TGL              0xFFC03034         /* PORTA Port x GPIO Input Enable Toggle Register */
+#define PORTA_POL                   0xFFC03038         /* PORTA Port x GPIO Programming Inversion Register */
+#define PORTA_POL_SET               0xFFC0303C         /* PORTA Port x GPIO Programming Inversion Set Register */
+#define PORTA_POL_CLEAR               0xFFC03040         /* PORTA Port x GPIO Programming Inversion Clear Register */
+#define PORTA_LOCK                  0xFFC03044         /* PORTA Port x GPIO Lock Register */
+#define PORTA_REVID                 0xFFC0307C         /* PORTA Port x GPIO Revision ID */
+
+/* =========================
+        PORTB
+   ========================= */
+#define PORTB_FER                   0xFFC03080         /* PORTB Port x Function Enable Register */
+#define PORTB_FER_SET               0xFFC03084         /* PORTB Port x Function Enable Set Register */
+#define PORTB_FER_CLEAR               0xFFC03088         /* PORTB Port x Function Enable Clear Register */
+#define PORTB_DATA                  0xFFC0308C         /* PORTB Port x GPIO Data Register */
+#define PORTB_DATA_SET              0xFFC03090         /* PORTB Port x GPIO Data Set Register */
+#define PORTB_DATA_CLEAR              0xFFC03094         /* PORTB Port x GPIO Data Clear Register */
+#define PORTB_DIR                   0xFFC03098         /* PORTB Port x GPIO Direction Register */
+#define PORTB_DIR_SET               0xFFC0309C         /* PORTB Port x GPIO Direction Set Register */
+#define PORTB_DIR_CLEAR               0xFFC030A0         /* PORTB Port x GPIO Direction Clear Register */
+#define PORTB_INEN                  0xFFC030A4         /* PORTB Port x GPIO Input Enable Register */
+#define PORTB_INEN_SET              0xFFC030A8         /* PORTB Port x GPIO Input Enable Set Register */
+#define PORTB_INEN_CLEAR              0xFFC030AC         /* PORTB Port x GPIO Input Enable Clear Register */
+#define PORTB_MUX                   0xFFC030B0         /* PORTB Port x Multiplexer Control Register */
+#define PORTB_DATA_TGL              0xFFC030B4         /* PORTB Port x GPIO Input Enable Toggle Register */
+#define PORTB_POL                   0xFFC030B8         /* PORTB Port x GPIO Programming Inversion Register */
+#define PORTB_POL_SET               0xFFC030BC         /* PORTB Port x GPIO Programming Inversion Set Register */
+#define PORTB_POL_CLEAR               0xFFC030C0         /* PORTB Port x GPIO Programming Inversion Clear Register */
+#define PORTB_LOCK                  0xFFC030C4         /* PORTB Port x GPIO Lock Register */
+#define PORTB_REVID                 0xFFC030FC         /* PORTB Port x GPIO Revision ID */
+
+/* =========================
+        PORTC
+   ========================= */
+#define PORTC_FER                   0xFFC03100         /* PORTC Port x Function Enable Register */
+#define PORTC_FER_SET               0xFFC03104         /* PORTC Port x Function Enable Set Register */
+#define PORTC_FER_CLEAR               0xFFC03108         /* PORTC Port x Function Enable Clear Register */
+#define PORTC_DATA                  0xFFC0310C         /* PORTC Port x GPIO Data Register */
+#define PORTC_DATA_SET              0xFFC03110         /* PORTC Port x GPIO Data Set Register */
+#define PORTC_DATA_CLEAR              0xFFC03114         /* PORTC Port x GPIO Data Clear Register */
+#define PORTC_DIR                   0xFFC03118         /* PORTC Port x GPIO Direction Register */
+#define PORTC_DIR_SET               0xFFC0311C         /* PORTC Port x GPIO Direction Set Register */
+#define PORTC_DIR_CLEAR               0xFFC03120         /* PORTC Port x GPIO Direction Clear Register */
+#define PORTC_INEN                  0xFFC03124         /* PORTC Port x GPIO Input Enable Register */
+#define PORTC_INEN_SET              0xFFC03128         /* PORTC Port x GPIO Input Enable Set Register */
+#define PORTC_INEN_CLEAR              0xFFC0312C         /* PORTC Port x GPIO Input Enable Clear Register */
+#define PORTC_MUX                   0xFFC03130         /* PORTC Port x Multiplexer Control Register */
+#define PORTC_DATA_TGL              0xFFC03134         /* PORTC Port x GPIO Input Enable Toggle Register */
+#define PORTC_POL                   0xFFC03138         /* PORTC Port x GPIO Programming Inversion Register */
+#define PORTC_POL_SET               0xFFC0313C         /* PORTC Port x GPIO Programming Inversion Set Register */
+#define PORTC_POL_CLEAR               0xFFC03140         /* PORTC Port x GPIO Programming Inversion Clear Register */
+#define PORTC_LOCK                  0xFFC03144         /* PORTC Port x GPIO Lock Register */
+#define PORTC_REVID                 0xFFC0317C         /* PORTC Port x GPIO Revision ID */
+
+/* =========================
+        PORTD
+   ========================= */
+#define PORTD_FER                   0xFFC03180         /* PORTD Port x Function Enable Register */
+#define PORTD_FER_SET               0xFFC03184         /* PORTD Port x Function Enable Set Register */
+#define PORTD_FER_CLEAR               0xFFC03188         /* PORTD Port x Function Enable Clear Register */
+#define PORTD_DATA                  0xFFC0318C         /* PORTD Port x GPIO Data Register */
+#define PORTD_DATA_SET              0xFFC03190         /* PORTD Port x GPIO Data Set Register */
+#define PORTD_DATA_CLEAR              0xFFC03194         /* PORTD Port x GPIO Data Clear Register */
+#define PORTD_DIR                   0xFFC03198         /* PORTD Port x GPIO Direction Register */
+#define PORTD_DIR_SET               0xFFC0319C         /* PORTD Port x GPIO Direction Set Register */
+#define PORTD_DIR_CLEAR               0xFFC031A0         /* PORTD Port x GPIO Direction Clear Register */
+#define PORTD_INEN                  0xFFC031A4         /* PORTD Port x GPIO Input Enable Register */
+#define PORTD_INEN_SET              0xFFC031A8         /* PORTD Port x GPIO Input Enable Set Register */
+#define PORTD_INEN_CLEAR              0xFFC031AC         /* PORTD Port x GPIO Input Enable Clear Register */
+#define PORTD_MUX                   0xFFC031B0         /* PORTD Port x Multiplexer Control Register */
+#define PORTD_DATA_TGL              0xFFC031B4         /* PORTD Port x GPIO Input Enable Toggle Register */
+#define PORTD_POL                   0xFFC031B8         /* PORTD Port x GPIO Programming Inversion Register */
+#define PORTD_POL_SET               0xFFC031BC         /* PORTD Port x GPIO Programming Inversion Set Register */
+#define PORTD_POL_CLEAR               0xFFC031C0         /* PORTD Port x GPIO Programming Inversion Clear Register */
+#define PORTD_LOCK                  0xFFC031C4         /* PORTD Port x GPIO Lock Register */
+#define PORTD_REVID                 0xFFC031FC         /* PORTD Port x GPIO Revision ID */
+
+/* =========================
+        PORTE
+   ========================= */
+#define PORTE_FER                   0xFFC03200         /* PORTE Port x Function Enable Register */
+#define PORTE_FER_SET               0xFFC03204         /* PORTE Port x Function Enable Set Register */
+#define PORTE_FER_CLEAR               0xFFC03208         /* PORTE Port x Function Enable Clear Register */
+#define PORTE_DATA                  0xFFC0320C         /* PORTE Port x GPIO Data Register */
+#define PORTE_DATA_SET              0xFFC03210         /* PORTE Port x GPIO Data Set Register */
+#define PORTE_DATA_CLEAR              0xFFC03214         /* PORTE Port x GPIO Data Clear Register */
+#define PORTE_DIR                   0xFFC03218         /* PORTE Port x GPIO Direction Register */
+#define PORTE_DIR_SET               0xFFC0321C         /* PORTE Port x GPIO Direction Set Register */
+#define PORTE_DIR_CLEAR               0xFFC03220         /* PORTE Port x GPIO Direction Clear Register */
+#define PORTE_INEN                  0xFFC03224         /* PORTE Port x GPIO Input Enable Register */
+#define PORTE_INEN_SET              0xFFC03228         /* PORTE Port x GPIO Input Enable Set Register */
+#define PORTE_INEN_CLEAR              0xFFC0322C         /* PORTE Port x GPIO Input Enable Clear Register */
+#define PORTE_MUX                   0xFFC03230         /* PORTE Port x Multiplexer Control Register */
+#define PORTE_DATA_TGL              0xFFC03234         /* PORTE Port x GPIO Input Enable Toggle Register */
+#define PORTE_POL                   0xFFC03238         /* PORTE Port x GPIO Programming Inversion Register */
+#define PORTE_POL_SET               0xFFC0323C         /* PORTE Port x GPIO Programming Inversion Set Register */
+#define PORTE_POL_CLEAR               0xFFC03240         /* PORTE Port x GPIO Programming Inversion Clear Register */
+#define PORTE_LOCK                  0xFFC03244         /* PORTE Port x GPIO Lock Register */
+#define PORTE_REVID                 0xFFC0327C         /* PORTE Port x GPIO Revision ID */
+
+/* =========================
+        PORTF
+   ========================= */
+#define PORTF_FER                   0xFFC03280         /* PORTF Port x Function Enable Register */
+#define PORTF_FER_SET               0xFFC03284         /* PORTF Port x Function Enable Set Register */
+#define PORTF_FER_CLEAR               0xFFC03288         /* PORTF Port x Function Enable Clear Register */
+#define PORTF_DATA                  0xFFC0328C         /* PORTF Port x GPIO Data Register */
+#define PORTF_DATA_SET              0xFFC03290         /* PORTF Port x GPIO Data Set Register */
+#define PORTF_DATA_CLEAR              0xFFC03294         /* PORTF Port x GPIO Data Clear Register */
+#define PORTF_DIR                   0xFFC03298         /* PORTF Port x GPIO Direction Register */
+#define PORTF_DIR_SET               0xFFC0329C         /* PORTF Port x GPIO Direction Set Register */
+#define PORTF_DIR_CLEAR               0xFFC032A0         /* PORTF Port x GPIO Direction Clear Register */
+#define PORTF_INEN                  0xFFC032A4         /* PORTF Port x GPIO Input Enable Register */
+#define PORTF_INEN_SET              0xFFC032A8         /* PORTF Port x GPIO Input Enable Set Register */
+#define PORTF_INEN_CLEAR              0xFFC032AC         /* PORTF Port x GPIO Input Enable Clear Register */
+#define PORTF_MUX                   0xFFC032B0         /* PORTF Port x Multiplexer Control Register */
+#define PORTF_DATA_TGL              0xFFC032B4         /* PORTF Port x GPIO Input Enable Toggle Register */
+#define PORTF_POL                   0xFFC032B8         /* PORTF Port x GPIO Programming Inversion Register */
+#define PORTF_POL_SET               0xFFC032BC         /* PORTF Port x GPIO Programming Inversion Set Register */
+#define PORTF_POL_CLEAR               0xFFC032C0         /* PORTF Port x GPIO Programming Inversion Clear Register */
+#define PORTF_LOCK                  0xFFC032C4         /* PORTF Port x GPIO Lock Register */
+#define PORTF_REVID                 0xFFC032FC         /* PORTF Port x GPIO Revision ID */
+
+/* =========================
+        PORTG
+   ========================= */
+#define PORTG_FER                   0xFFC03300         /* PORTG Port x Function Enable Register */
+#define PORTG_FER_SET               0xFFC03304         /* PORTG Port x Function Enable Set Register */
+#define PORTG_FER_CLEAR               0xFFC03308         /* PORTG Port x Function Enable Clear Register */
+#define PORTG_DATA                  0xFFC0330C         /* PORTG Port x GPIO Data Register */
+#define PORTG_DATA_SET              0xFFC03310         /* PORTG Port x GPIO Data Set Register */
+#define PORTG_DATA_CLEAR              0xFFC03314         /* PORTG Port x GPIO Data Clear Register */
+#define PORTG_DIR                   0xFFC03318         /* PORTG Port x GPIO Direction Register */
+#define PORTG_DIR_SET               0xFFC0331C         /* PORTG Port x GPIO Direction Set Register */
+#define PORTG_DIR_CLEAR               0xFFC03320         /* PORTG Port x GPIO Direction Clear Register */
+#define PORTG_INEN                  0xFFC03324         /* PORTG Port x GPIO Input Enable Register */
+#define PORTG_INEN_SET              0xFFC03328         /* PORTG Port x GPIO Input Enable Set Register */
+#define PORTG_INEN_CLEAR              0xFFC0332C         /* PORTG Port x GPIO Input Enable Clear Register */
+#define PORTG_MUX                   0xFFC03330         /* PORTG Port x Multiplexer Control Register */
+#define PORTG_DATA_TGL              0xFFC03334         /* PORTG Port x GPIO Input Enable Toggle Register */
+#define PORTG_POL                   0xFFC03338         /* PORTG Port x GPIO Programming Inversion Register */
+#define PORTG_POL_SET               0xFFC0333C         /* PORTG Port x GPIO Programming Inversion Set Register */
+#define PORTG_POL_CLEAR               0xFFC03340         /* PORTG Port x GPIO Programming Inversion Clear Register */
+#define PORTG_LOCK                  0xFFC03344         /* PORTG Port x GPIO Lock Register */
+#define PORTG_REVID                 0xFFC0337C         /* PORTG Port x GPIO Revision ID */
+
+
+/* =========================
+        PINT Registers
+   ========================= */
+
+/* =========================
+        PINT0
+   ========================= */
+#define PINT0_MASK_SET              0xFFC04000         /* PINT0 Pint Mask Set Register */
+#define PINT0_MASK_CLEAR            0xFFC04004         /* PINT0 Pint Mask Clear Register */
+#define PINT0_REQUEST               0xFFC04008         /* PINT0 Pint Request Register */
+#define PINT0_ASSIGN                0xFFC0400C         /* PINT0 Pint Assign Register */
+#define PINT0_EDGE_SET              0xFFC04010         /* PINT0 Pint Edge Set Register */
+#define PINT0_EDGE_CLEAR            0xFFC04014         /* PINT0 Pint Edge Clear Register */
+#define PINT0_INVERT_SET            0xFFC04018         /* PINT0 Pint Invert Set Register */
+#define PINT0_INVERT_CLEAR          0xFFC0401C         /* PINT0 Pint Invert Clear Register */
+#define PINT0_PINSTATE              0xFFC04020         /* PINT0 Pint Pinstate Register */
+#define PINT0_LATCH                 0xFFC04024         /* PINT0 Pint Latch Register */
+
+/* =========================
+        PINT1
+   ========================= */
+#define PINT1_MASK_SET              0xFFC04100         /* PINT1 Pint Mask Set Register */
+#define PINT1_MASK_CLEAR            0xFFC04104         /* PINT1 Pint Mask Clear Register */
+#define PINT1_REQUEST               0xFFC04108         /* PINT1 Pint Request Register */
+#define PINT1_ASSIGN                0xFFC0410C         /* PINT1 Pint Assign Register */
+#define PINT1_EDGE_SET              0xFFC04110         /* PINT1 Pint Edge Set Register */
+#define PINT1_EDGE_CLEAR            0xFFC04114         /* PINT1 Pint Edge Clear Register */
+#define PINT1_INVERT_SET            0xFFC04118         /* PINT1 Pint Invert Set Register */
+#define PINT1_INVERT_CLEAR          0xFFC0411C         /* PINT1 Pint Invert Clear Register */
+#define PINT1_PINSTATE              0xFFC04120         /* PINT1 Pint Pinstate Register */
+#define PINT1_LATCH                 0xFFC04124         /* PINT1 Pint Latch Register */
+
+/* =========================
+        PINT2
+   ========================= */
+#define PINT2_MASK_SET              0xFFC04200         /* PINT2 Pint Mask Set Register */
+#define PINT2_MASK_CLEAR            0xFFC04204         /* PINT2 Pint Mask Clear Register */
+#define PINT2_REQUEST               0xFFC04208         /* PINT2 Pint Request Register */
+#define PINT2_ASSIGN                0xFFC0420C         /* PINT2 Pint Assign Register */
+#define PINT2_EDGE_SET              0xFFC04210         /* PINT2 Pint Edge Set Register */
+#define PINT2_EDGE_CLEAR            0xFFC04214         /* PINT2 Pint Edge Clear Register */
+#define PINT2_INVERT_SET            0xFFC04218         /* PINT2 Pint Invert Set Register */
+#define PINT2_INVERT_CLEAR          0xFFC0421C         /* PINT2 Pint Invert Clear Register */
+#define PINT2_PINSTATE              0xFFC04220         /* PINT2 Pint Pinstate Register */
+#define PINT2_LATCH                 0xFFC04224         /* PINT2 Pint Latch Register */
+
+/* =========================
+        PINT3
+   ========================= */
+#define PINT3_MASK_SET              0xFFC04300         /* PINT3 Pint Mask Set Register */
+#define PINT3_MASK_CLEAR            0xFFC04304         /* PINT3 Pint Mask Clear Register */
+#define PINT3_REQUEST               0xFFC04308         /* PINT3 Pint Request Register */
+#define PINT3_ASSIGN                0xFFC0430C         /* PINT3 Pint Assign Register */
+#define PINT3_EDGE_SET              0xFFC04310         /* PINT3 Pint Edge Set Register */
+#define PINT3_EDGE_CLEAR            0xFFC04314         /* PINT3 Pint Edge Clear Register */
+#define PINT3_INVERT_SET            0xFFC04318         /* PINT3 Pint Invert Set Register */
+#define PINT3_INVERT_CLEAR          0xFFC0431C         /* PINT3 Pint Invert Clear Register */
+#define PINT3_PINSTATE              0xFFC04320         /* PINT3 Pint Pinstate Register */
+#define PINT3_LATCH                 0xFFC04324         /* PINT3 Pint Latch Register */
+
+/* =========================
+        PINT4
+   ========================= */
+#define PINT4_MASK_SET              0xFFC04400         /* PINT4 Pint Mask Set Register */
+#define PINT4_MASK_CLEAR            0xFFC04404         /* PINT4 Pint Mask Clear Register */
+#define PINT4_REQUEST               0xFFC04408         /* PINT4 Pint Request Register */
+#define PINT4_ASSIGN                0xFFC0440C         /* PINT4 Pint Assign Register */
+#define PINT4_EDGE_SET              0xFFC04410         /* PINT4 Pint Edge Set Register */
+#define PINT4_EDGE_CLEAR            0xFFC04414         /* PINT4 Pint Edge Clear Register */
+#define PINT4_INVERT_SET            0xFFC04418         /* PINT4 Pint Invert Set Register */
+#define PINT4_INVERT_CLEAR          0xFFC0441C         /* PINT4 Pint Invert Clear Register */
+#define PINT4_PINSTATE              0xFFC04420         /* PINT4 Pint Pinstate Register */
+#define PINT4_LATCH                 0xFFC04424         /* PINT4 Pint Latch Register */
+
+/* =========================
+        PINT5
+   ========================= */
+#define PINT5_MASK_SET              0xFFC04500         /* PINT5 Pint Mask Set Register */
+#define PINT5_MASK_CLEAR            0xFFC04504         /* PINT5 Pint Mask Clear Register */
+#define PINT5_REQUEST               0xFFC04508         /* PINT5 Pint Request Register */
+#define PINT5_ASSIGN                0xFFC0450C         /* PINT5 Pint Assign Register */
+#define PINT5_EDGE_SET              0xFFC04510         /* PINT5 Pint Edge Set Register */
+#define PINT5_EDGE_CLEAR            0xFFC04514         /* PINT5 Pint Edge Clear Register */
+#define PINT5_INVERT_SET            0xFFC04518         /* PINT5 Pint Invert Set Register */
+#define PINT5_INVERT_CLEAR          0xFFC0451C         /* PINT5 Pint Invert Clear Register */
+#define PINT5_PINSTATE              0xFFC04520         /* PINT5 Pint Pinstate Register */
+#define PINT5_LATCH                 0xFFC04524         /* PINT5 Pint Latch Register */
+
+
+/* =========================
+        SMC Registers
+   ========================= */
+
+/* =========================
+        SMC0
+   ========================= */
+#define SMC_GCTL                   0xFFC16004         /* SMC0 SMC Control Register */
+#define SMC_GSTAT                  0xFFC16008         /* SMC0 SMC Status Register */
+#define SMC_B0CTL                  0xFFC1600C         /* SMC0 SMC Bank0 Control Register */
+#define SMC_B0TIM                  0xFFC16010         /* SMC0 SMC Bank0 Timing Register */
+#define SMC_B0ETIM                 0xFFC16014         /* SMC0 SMC Bank0 Extended Timing Register */
+#define SMC_B1CTL                  0xFFC1601C         /* SMC0 SMC BANK1 Control Register */
+#define SMC_B1TIM                  0xFFC16020         /* SMC0 SMC BANK1 Timing Register */
+#define SMC_B1ETIM                 0xFFC16024         /* SMC0 SMC BANK1 Extended Timing Register */
+#define SMC_B2CTL                  0xFFC1602C         /* SMC0 SMC BANK2 Control Register */
+#define SMC_B2TIM                  0xFFC16030         /* SMC0 SMC BANK2 Timing Register */
+#define SMC_B2ETIM                 0xFFC16034         /* SMC0 SMC BANK2 Extended Timing Register */
+#define SMC_B3CTL                  0xFFC1603C         /* SMC0 SMC BANK3 Control Register */
+#define SMC_B3TIM                  0xFFC16040         /* SMC0 SMC BANK3 Timing Register */
+#define SMC_B3ETIM                 0xFFC16044         /* SMC0 SMC BANK3 Extended Timing Register */
+
+
+/* =========================
+        WDOG Registers
+   ========================= */
+
+/* =========================
+        WDOG0
+   ========================= */
+#define WDOG0_CTL                   0xFFC17000         /* WDOG0 Control Register */
+#define WDOG0_CNT                   0xFFC17004         /* WDOG0 Count Register */
+#define WDOG0_STAT                  0xFFC17008         /* WDOG0 Watchdog Timer Status Register */
+#define WDOG_CTL		WDOG0_CTL
+#define WDOG_CNT		WDOG0_CNT
+#define WDOG_STAT		WDOG0_STAT
+
+/* =========================
+        WDOG1
+   ========================= */
+#define WDOG1_CTL                   0xFFC17800         /* WDOG1 Control Register */
+#define WDOG1_CNT                   0xFFC17804         /* WDOG1 Count Register */
+#define WDOG1_STAT                  0xFFC17808         /* WDOG1 Watchdog Timer Status Register */
+
+
+/* =========================
+        SDU Registers
+   ========================= */
+
+/* =========================
+        SDU0
+   ========================= */
+#define SDU0_IDCODE                 0xFFC1F020         /* SDU0 ID Code Register */
+#define SDU0_CTL                    0xFFC1F050         /* SDU0 Control Register */
+#define SDU0_STAT                   0xFFC1F054         /* SDU0 Status Register */
+#define SDU0_MACCTL                 0xFFC1F058         /* SDU0 Memory Access Control Register */
+#define SDU0_MACADDR                0xFFC1F05C         /* SDU0 Memory Access Address Register */
+#define SDU0_MACDATA                0xFFC1F060         /* SDU0 Memory Access Data Register */
+#define SDU0_DMARD                  0xFFC1F064         /* SDU0 DMA Read Data Register */
+#define SDU0_DMAWD                  0xFFC1F068         /* SDU0 DMA Write Data Register */
+#define SDU0_MSG                    0xFFC1F080         /* SDU0 Message Register */
+#define SDU0_MSG_SET                0xFFC1F084         /* SDU0 Message Set Register */
+#define SDU0_MSG_CLR                0xFFC1F088         /* SDU0 Message Clear Register */
+#define SDU0_GHLT                   0xFFC1F08C         /* SDU0 Group Halt Register */
+
+
+/* =========================
+        EMAC Registers
+   ========================= */
+/* =========================
+        EMAC0
+   ========================= */
+#define EMAC0_MACCFG                0xFFC20000         /* EMAC0 MAC Configuration Register */
+#define EMAC0_MACFRMFILT            0xFFC20004         /* EMAC0 Filter Register for filtering Received Frames */
+#define EMAC0_HASHTBL_HI            0xFFC20008         /* EMAC0 Contains the Upper 32 bits of the hash table */
+#define EMAC0_HASHTBL_LO            0xFFC2000C         /* EMAC0 Contains the lower 32 bits of the hash table */
+#define EMAC0_GMII_ADDR             0xFFC20010         /* EMAC0 Management Address Register */
+#define EMAC0_GMII_DATA             0xFFC20014         /* EMAC0 Management Data Register */
+#define EMAC0_FLOWCTL               0xFFC20018         /* EMAC0 MAC FLow Control Register */
+#define EMAC0_VLANTAG               0xFFC2001C         /* EMAC0 VLAN Tag Register */
+#define EMAC0_VER                   0xFFC20020         /* EMAC0 EMAC Version Register */
+#define EMAC0_DBG                   0xFFC20024         /* EMAC0 EMAC Debug Register */
+#define EMAC0_RMTWKUP               0xFFC20028         /* EMAC0 Remote wake up frame register */
+#define EMAC0_PMT_CTLSTAT           0xFFC2002C         /* EMAC0 PMT Control and Status Register */
+#define EMAC0_ISTAT                 0xFFC20038         /* EMAC0 EMAC Interrupt Status Register */
+#define EMAC0_IMSK                  0xFFC2003C         /* EMAC0 EMAC Interrupt Mask Register */
+#define EMAC0_ADDR0_HI              0xFFC20040         /* EMAC0 EMAC Address0 High Register */
+#define EMAC0_ADDR0_LO              0xFFC20044         /* EMAC0 EMAC Address0 Low Register */
+#define EMAC0_MMC_CTL               0xFFC20100         /* EMAC0 MMC Control Register */
+#define EMAC0_MMC_RXINT             0xFFC20104         /* EMAC0 MMC RX Interrupt Register */
+#define EMAC0_MMC_TXINT             0xFFC20108         /* EMAC0 MMC TX Interrupt Register */
+#define EMAC0_MMC_RXIMSK            0xFFC2010C         /* EMAC0 MMC RX Interrupt Mask Register */
+#define EMAC0_MMC_TXIMSK            0xFFC20110         /* EMAC0 MMC TX Interrupt Mask Register */
+#define EMAC0_TXOCTCNT_GB           0xFFC20114         /* EMAC0 Num bytes transmitted exclusive of preamble */
+#define EMAC0_TXFRMCNT_GB           0xFFC20118         /* EMAC0 Num frames transmitted exclusive of retired */
+#define EMAC0_TXBCASTFRM_G          0xFFC2011C         /* EMAC0 Number of good broadcast frames transmitted. */
+#define EMAC0_TXMCASTFRM_G          0xFFC20120         /* EMAC0 Number of good multicast frames transmitted. */
+#define EMAC0_TX64_GB               0xFFC20124         /* EMAC0 Number of 64 byte length frames */
+#define EMAC0_TX65TO127_GB          0xFFC20128         /* EMAC0 Number of frames of length b/w 65-127 (inclusive) bytes */
+#define EMAC0_TX128TO255_GB         0xFFC2012C         /* EMAC0 Number of frames of length b/w 128-255 (inclusive) bytes */
+#define EMAC0_TX256TO511_GB         0xFFC20130         /* EMAC0 Number of frames of length b/w 256-511 (inclusive) bytes */
+#define EMAC0_TX512TO1023_GB        0xFFC20134         /* EMAC0 Number of frames of length b/w 512-1023 (inclusive) bytes */
+#define EMAC0_TX1024TOMAX_GB        0xFFC20138         /* EMAC0 Number of frames of length b/w 1024-max (inclusive) bytes */
+#define EMAC0_TXUCASTFRM_GB         0xFFC2013C         /* EMAC0 Number of good and bad unicast frames transmitted */
+#define EMAC0_TXMCASTFRM_GB         0xFFC20140         /* EMAC0 Number of good and bad multicast frames transmitted */
+#define EMAC0_TXBCASTFRM_GB         0xFFC20144         /* EMAC0 Number of good and bad broadcast frames transmitted */
+#define EMAC0_TXUNDR_ERR            0xFFC20148         /* EMAC0 Number of frames aborted due to frame underflow error */
+#define EMAC0_TXSNGCOL_G            0xFFC2014C         /* EMAC0 Number of transmitted frames after single collision */
+#define EMAC0_TXMULTCOL_G           0xFFC20150         /* EMAC0 Number of transmitted frames with more than one collision */
+#define EMAC0_TXDEFERRED            0xFFC20154         /* EMAC0 Number of transmitted frames after deferral */
+#define EMAC0_TXLATECOL             0xFFC20158         /* EMAC0 Number of frames aborted due to late collision error */
+#define EMAC0_TXEXCESSCOL           0xFFC2015C         /* EMAC0 Number of aborted frames due to excessive collisions */
+#define EMAC0_TXCARR_ERR            0xFFC20160         /* EMAC0 Number of aborted frames due to carrier sense error */
+#define EMAC0_TXOCTCNT_G            0xFFC20164         /* EMAC0 Number of bytes transmitted in good frames only */
+#define EMAC0_TXFRMCNT_G            0xFFC20168         /* EMAC0 Number of good frames transmitted. */
+#define EMAC0_TXEXCESSDEF           0xFFC2016C         /* EMAC0 Number of frames aborted due to excessive deferral */
+#define EMAC0_TXPAUSEFRM            0xFFC20170         /* EMAC0 Number of good PAUSE frames transmitted. */
+#define EMAC0_TXVLANFRM_G           0xFFC20174         /* EMAC0 Number of VLAN frames transmitted */
+#define EMAC0_RXFRMCNT_GB           0xFFC20180         /* EMAC0 Number of good and bad frames received. */
+#define EMAC0_RXOCTCNT_GB           0xFFC20184         /* EMAC0 Number of bytes received in good and bad frames */
+#define EMAC0_RXOCTCNT_G            0xFFC20188         /* EMAC0 Number of bytes received only in good frames */
+#define EMAC0_RXBCASTFRM_G          0xFFC2018C         /* EMAC0 Number of good broadcast frames received. */
+#define EMAC0_RXMCASTFRM_G          0xFFC20190         /* EMAC0 Number of good multicast frames received */
+#define EMAC0_RXCRC_ERR             0xFFC20194         /* EMAC0 Number of frames received with CRC error */
+#define EMAC0_RXALIGN_ERR           0xFFC20198         /* EMAC0 Number of frames with alignment error */
+#define EMAC0_RXRUNT_ERR            0xFFC2019C         /* EMAC0 Number of frames received with runt error. */
+#define EMAC0_RXJAB_ERR             0xFFC201A0         /* EMAC0 Number of frames received with length greater than 1518 */
+#define EMAC0_RXUSIZE_G             0xFFC201A4         /* EMAC0 Number of frames received with length 64 */
+#define EMAC0_RXOSIZE_G             0xFFC201A8         /* EMAC0 Number of frames received with length greater than maxium */
+#define EMAC0_RX64_GB               0xFFC201AC         /* EMAC0 Number of good and bad frames of lengh 64 bytes */
+#define EMAC0_RX65TO127_GB          0xFFC201B0         /* EMAC0 Number of good and bad frame between 64-127(inclusive) */
+#define EMAC0_RX128TO255_GB         0xFFC201B4         /* EMAC0 Number of good and bad frames received with length between 128 and 255 (inclusive) bytes, exclusive of preamble. */
+#define EMAC0_RX256TO511_GB         0xFFC201B8         /* EMAC0 Number of good and bad frames between 256-511(inclusive) */
+#define EMAC0_RX512TO1023_GB        0xFFC201BC         /* EMAC0 Number of good and bad frames received between 512-1023 */
+#define EMAC0_RX1024TOMAX_GB        0xFFC201C0         /* EMAC0 Number of frames received between 1024 and maxsize */
+#define EMAC0_RXUCASTFRM_G          0xFFC201C4         /* EMAC0 Number of good unicast frames received. */
+#define EMAC0_RXLEN_ERR             0xFFC201C8         /* EMAC0 Number of frames received with length error */
+#define EMAC0_RXOORTYPE             0xFFC201CC         /* EMAC0 Number of frames with length not equal to valid frame size */
+#define EMAC0_RXPAUSEFRM            0xFFC201D0         /* EMAC0 Number of good and valid PAUSE frames received. */
+#define EMAC0_RXFIFO_OVF            0xFFC201D4         /* EMAC0 Number of missed received frames due to FIFO overflow. This counter is not present in the GMAC-CORE configuration. */
+#define EMAC0_RXVLANFRM_GB          0xFFC201D8         /* EMAC0 Number of good and bad VLAN frames received. */
+#define EMAC0_RXWDOG_ERR            0xFFC201DC         /* EMAC0 Frames received with error due to watchdog timeout */
+#define EMAC0_IPC_RXIMSK            0xFFC20200         /* EMAC0 MMC IPC RX Interrupt Mask Register */
+#define EMAC0_IPC_RXINT             0xFFC20208         /* EMAC0 MMC IPC RX Interrupt Register */
+#define EMAC0_RXIPV4_GD_FRM         0xFFC20210         /* EMAC0 Number of good IPv4 datagrams */
+#define EMAC0_RXIPV4_HDR_ERR_FRM    0xFFC20214         /* EMAC0 Number of IPv4 datagrams with header errors */
+#define EMAC0_RXIPV4_NOPAY_FRM      0xFFC20218         /* EMAC0 Number of IPv4 datagrams without checksum */
+#define EMAC0_RXIPV4_FRAG_FRM       0xFFC2021C         /* EMAC0 Number of good IPv4 datagrams with fragmentation */
+#define EMAC0_RXIPV4_UDSBL_FRM      0xFFC20220         /* EMAC0 Number of IPv4 UDP datagrams with disabled checksum */
+#define EMAC0_RXIPV6_GD_FRM         0xFFC20224         /* EMAC0 Number of IPv4 datagrams with TCP/UDP/ICMP payloads */
+#define EMAC0_RXIPV6_HDR_ERR_FRM    0xFFC20228         /* EMAC0 Number of IPv6 datagrams with header errors */
+#define EMAC0_RXIPV6_NOPAY_FRM      0xFFC2022C         /* EMAC0 Number of IPv6 datagrams with no TCP/UDP/ICMP payload */
+#define EMAC0_RXUDP_GD_FRM          0xFFC20230         /* EMAC0 Number of good IP datagrames with good UDP payload */
+#define EMAC0_RXUDP_ERR_FRM         0xFFC20234         /* EMAC0 Number of good IP datagrams with UDP checksum errors */
+#define EMAC0_RXTCP_GD_FRM          0xFFC20238         /* EMAC0 Number of good IP datagrams with a good TCP payload */
+#define EMAC0_RXTCP_ERR_FRM         0xFFC2023C         /* EMAC0 Number of good IP datagrams with TCP checksum errors */
+#define EMAC0_RXICMP_GD_FRM         0xFFC20240         /* EMAC0 Number of good IP datagrams with a good ICMP payload */
+#define EMAC0_RXICMP_ERR_FRM        0xFFC20244         /* EMAC0 Number of good IP datagrams with ICMP checksum errors */
+#define EMAC0_RXIPV4_GD_OCT         0xFFC20250         /* EMAC0 Bytes received in IPv4 datagrams including tcp,udp or icmp */
+#define EMAC0_RXIPV4_HDR_ERR_OCT    0xFFC20254         /* EMAC0 Bytes received in IPv4 datagrams with header errors */
+#define EMAC0_RXIPV4_NOPAY_OCT      0xFFC20258         /* EMAC0 Bytes received in IPv4 datagrams without tcp,udp,icmp load */
+#define EMAC0_RXIPV4_FRAG_OCT       0xFFC2025C         /* EMAC0 Bytes received in fragmented IPv4 datagrams */
+#define EMAC0_RXIPV4_UDSBL_OCT      0xFFC20260         /* EMAC0 Bytes received in UDP segment with checksum disabled */
+#define EMAC0_RXIPV6_GD_OCT         0xFFC20264         /* EMAC0 Bytes received in good IPv6  including tcp,udp or icmp load */
+#define EMAC0_RXIPV6_HDR_ERR_OCT    0xFFC20268         /* EMAC0 Number of bytes received in IPv6 with header errors */
+#define EMAC0_RXIPV6_NOPAY_OCT      0xFFC2026C         /* EMAC0 Bytes received in IPv6 without tcp,udp or icmp load */
+#define EMAC0_RXUDP_GD_OCT          0xFFC20270         /* EMAC0 Number of bytes received in good UDP segments */
+#define EMAC0_RXUDP_ERR_OCT         0xFFC20274         /* EMAC0 Number of bytes received in UDP segment with checksum err */
+#define EMAC0_RXTCP_GD_OCT          0xFFC20278         /* EMAC0 Number of bytes received in a good TCP segment */
+#define EMAC0_RXTCP_ERR_OCT         0xFFC2027C         /* EMAC0 Number of bytes received in TCP segment with checksum err */
+#define EMAC0_RXICMP_GD_OCT         0xFFC20280         /* EMAC0 Number of bytes received in a good ICMP segment */
+#define EMAC0_RXICMP_ERR_OCT        0xFFC20284         /* EMAC0 Bytes received in an ICMP segment with checksum errors */
+#define EMAC0_TM_CTL                0xFFC20700         /* EMAC0 EMAC Time Stamp Control Register */
+#define EMAC0_TM_SUBSEC             0xFFC20704         /* EMAC0 EMAC Time Stamp Sub Second Increment */
+#define EMAC0_TM_SEC                0xFFC20708         /* EMAC0 EMAC Time Stamp Second Register */
+#define EMAC0_TM_NSEC               0xFFC2070C         /* EMAC0 EMAC Time Stamp Nano Second Register */
+#define EMAC0_TM_SECUPDT            0xFFC20710         /* EMAC0 EMAC Time Stamp Seconds Update */
+#define EMAC0_TM_NSECUPDT           0xFFC20714         /* EMAC0 EMAC Time Stamp Nano Seconds Update */
+#define EMAC0_TM_ADDEND             0xFFC20718         /* EMAC0 EMAC Time Stamp Addend Register */
+#define EMAC0_TM_TGTM               0xFFC2071C         /* EMAC0 EMAC Time Stamp Target Time Sec. */
+#define EMAC0_TM_NTGTM              0xFFC20720         /* EMAC0 EMAC Time Stamp Target Time Nanosec. */
+#define EMAC0_TM_HISEC              0xFFC20724         /* EMAC0 EMAC Time Stamp High Second Register */
+#define EMAC0_TM_STMPSTAT           0xFFC20728         /* EMAC0 EMAC Time Stamp Status Register */
+#define EMAC0_TM_PPSCTL             0xFFC2072C         /* EMAC0 EMAC PPS Control Register */
+#define EMAC0_TM_AUXSTMP_NSEC       0xFFC20730         /* EMAC0 EMAC Auxillary Time Stamp Nano Register */
+#define EMAC0_TM_AUXSTMP_SEC        0xFFC20734         /* EMAC0 EMAC Auxillary Time Stamp Sec Register */
+#define EMAC0_DMA_BUSMODE           0xFFC21000         /* EMAC0 Bus Operating Modes for EMAC DMA */
+#define EMAC0_DMA_TXPOLL            0xFFC21004         /* EMAC0 TX DMA Poll demand register */
+#define EMAC0_DMA_RXPOLL            0xFFC21008         /* EMAC0 RX DMA Poll demand register */
+#define EMAC0_DMA_RXDSC_ADDR        0xFFC2100C         /* EMAC0 RX Descriptor List Address */
+#define EMAC0_DMA_TXDSC_ADDR        0xFFC21010         /* EMAC0 TX Descriptor List Address */
+#define EMAC0_DMA_STAT              0xFFC21014         /* EMAC0 DMA Status Register */
+#define EMAC0_DMA_OPMODE            0xFFC21018         /* EMAC0 DMA Operation Mode Register */
+#define EMAC0_DMA_IEN               0xFFC2101C         /* EMAC0 DMA Interrupt Enable Register */
+#define EMAC0_DMA_MISS_FRM          0xFFC21020         /* EMAC0 DMA missed frame and buffer overflow counter */
+#define EMAC0_DMA_RXIWDOG           0xFFC21024         /* EMAC0 DMA RX Interrupt Watch Dog timer */
+#define EMAC0_DMA_BMMODE            0xFFC21028         /* EMAC0 AXI Bus Mode Register */
+#define EMAC0_DMA_BMSTAT            0xFFC2102C         /* EMAC0 AXI Status Register */
+#define EMAC0_DMA_TXDSC_CUR         0xFFC21048         /* EMAC0 TX current descriptor register */
+#define EMAC0_DMA_RXDSC_CUR         0xFFC2104C         /* EMAC0 RX current descriptor register */
+#define EMAC0_DMA_TXBUF_CUR         0xFFC21050         /* EMAC0 TX current buffer pointer register */
+#define EMAC0_DMA_RXBUF_CUR         0xFFC21054         /* EMAC0 RX current buffer pointer register */
+#define EMAC0_HWFEAT                0xFFC21058         /* EMAC0 Hardware Feature Register */
+
+/* =========================
+        EMAC1
+   ========================= */
+#define EMAC1_MACCFG                0xFFC22000         /* EMAC1 MAC Configuration Register */
+#define EMAC1_MACFRMFILT            0xFFC22004         /* EMAC1 Filter Register for filtering Received Frames */
+#define EMAC1_HASHTBL_HI            0xFFC22008         /* EMAC1 Contains the Upper 32 bits of the hash table */
+#define EMAC1_HASHTBL_LO            0xFFC2200C         /* EMAC1 Contains the lower 32 bits of the hash table */
+#define EMAC1_GMII_ADDR             0xFFC22010         /* EMAC1 Management Address Register */
+#define EMAC1_GMII_DATA             0xFFC22014         /* EMAC1 Management Data Register */
+#define EMAC1_FLOWCTL               0xFFC22018         /* EMAC1 MAC FLow Control Register */
+#define EMAC1_VLANTAG               0xFFC2201C         /* EMAC1 VLAN Tag Register */
+#define EMAC1_VER                   0xFFC22020         /* EMAC1 EMAC Version Register */
+#define EMAC1_DBG                   0xFFC22024         /* EMAC1 EMAC Debug Register */
+#define EMAC1_RMTWKUP               0xFFC22028         /* EMAC1 Remote wake up frame register */
+#define EMAC1_PMT_CTLSTAT           0xFFC2202C         /* EMAC1 PMT Control and Status Register */
+#define EMAC1_ISTAT                 0xFFC22038         /* EMAC1 EMAC Interrupt Status Register */
+#define EMAC1_IMSK                  0xFFC2203C         /* EMAC1 EMAC Interrupt Mask Register */
+#define EMAC1_ADDR0_HI              0xFFC22040         /* EMAC1 EMAC Address0 High Register */
+#define EMAC1_ADDR0_LO              0xFFC22044         /* EMAC1 EMAC Address0 Low Register */
+#define EMAC1_MMC_CTL               0xFFC22100         /* EMAC1 MMC Control Register */
+#define EMAC1_MMC_RXINT             0xFFC22104         /* EMAC1 MMC RX Interrupt Register */
+#define EMAC1_MMC_TXINT             0xFFC22108         /* EMAC1 MMC TX Interrupt Register */
+#define EMAC1_MMC_RXIMSK            0xFFC2210C         /* EMAC1 MMC RX Interrupt Mask Register */
+#define EMAC1_MMC_TXIMSK            0xFFC22110         /* EMAC1 MMC TX Interrupt Mask Register */
+#define EMAC1_TXOCTCNT_GB           0xFFC22114         /* EMAC1 Num bytes transmitted exclusive of preamble */
+#define EMAC1_TXFRMCNT_GB           0xFFC22118         /* EMAC1 Num frames transmitted exclusive of retired */
+#define EMAC1_TXBCASTFRM_G          0xFFC2211C         /* EMAC1 Number of good broadcast frames transmitted. */
+#define EMAC1_TXMCASTFRM_G          0xFFC22120         /* EMAC1 Number of good multicast frames transmitted. */
+#define EMAC1_TX64_GB               0xFFC22124         /* EMAC1 Number of 64 byte length frames */
+#define EMAC1_TX65TO127_GB          0xFFC22128         /* EMAC1 Number of frames of length b/w 65-127 (inclusive) bytes */
+#define EMAC1_TX128TO255_GB         0xFFC2212C         /* EMAC1 Number of frames of length b/w 128-255 (inclusive) bytes */
+#define EMAC1_TX256TO511_GB         0xFFC22130         /* EMAC1 Number of frames of length b/w 256-511 (inclusive) bytes */
+#define EMAC1_TX512TO1023_GB        0xFFC22134         /* EMAC1 Number of frames of length b/w 512-1023 (inclusive) bytes */
+#define EMAC1_TX1024TOMAX_GB        0xFFC22138         /* EMAC1 Number of frames of length b/w 1024-max (inclusive) bytes */
+#define EMAC1_TXUCASTFRM_GB         0xFFC2213C         /* EMAC1 Number of good and bad unicast frames transmitted */
+#define EMAC1_TXMCASTFRM_GB         0xFFC22140         /* EMAC1 Number of good and bad multicast frames transmitted */
+#define EMAC1_TXBCASTFRM_GB         0xFFC22144         /* EMAC1 Number of good and bad broadcast frames transmitted */
+#define EMAC1_TXUNDR_ERR            0xFFC22148         /* EMAC1 Number of frames aborted due to frame underflow error */
+#define EMAC1_TXSNGCOL_G            0xFFC2214C         /* EMAC1 Number of transmitted frames after single collision */
+#define EMAC1_TXMULTCOL_G           0xFFC22150         /* EMAC1 Number of transmitted frames with more than one collision */
+#define EMAC1_TXDEFERRED            0xFFC22154         /* EMAC1 Number of transmitted frames after deferral */
+#define EMAC1_TXLATECOL             0xFFC22158         /* EMAC1 Number of frames aborted due to late collision error */
+#define EMAC1_TXEXCESSCOL           0xFFC2215C         /* EMAC1 Number of aborted frames due to excessive collisions */
+#define EMAC1_TXCARR_ERR            0xFFC22160         /* EMAC1 Number of aborted frames due to carrier sense error */
+#define EMAC1_TXOCTCNT_G            0xFFC22164         /* EMAC1 Number of bytes transmitted in good frames only */
+#define EMAC1_TXFRMCNT_G            0xFFC22168         /* EMAC1 Number of good frames transmitted. */
+#define EMAC1_TXEXCESSDEF           0xFFC2216C         /* EMAC1 Number of frames aborted due to excessive deferral */
+#define EMAC1_TXPAUSEFRM            0xFFC22170         /* EMAC1 Number of good PAUSE frames transmitted. */
+#define EMAC1_TXVLANFRM_G           0xFFC22174         /* EMAC1 Number of VLAN frames transmitted */
+#define EMAC1_RXFRMCNT_GB           0xFFC22180         /* EMAC1 Number of good and bad frames received. */
+#define EMAC1_RXOCTCNT_GB           0xFFC22184         /* EMAC1 Number of bytes received in good and bad frames */
+#define EMAC1_RXOCTCNT_G            0xFFC22188         /* EMAC1 Number of bytes received only in good frames */
+#define EMAC1_RXBCASTFRM_G          0xFFC2218C         /* EMAC1 Number of good broadcast frames received. */
+#define EMAC1_RXMCASTFRM_G          0xFFC22190         /* EMAC1 Number of good multicast frames received */
+#define EMAC1_RXCRC_ERR             0xFFC22194         /* EMAC1 Number of frames received with CRC error */
+#define EMAC1_RXALIGN_ERR           0xFFC22198         /* EMAC1 Number of frames with alignment error */
+#define EMAC1_RXRUNT_ERR            0xFFC2219C         /* EMAC1 Number of frames received with runt error. */
+#define EMAC1_RXJAB_ERR             0xFFC221A0         /* EMAC1 Number of frames received with length greater than 1518 */
+#define EMAC1_RXUSIZE_G             0xFFC221A4         /* EMAC1 Number of frames received with length 64 */
+#define EMAC1_RXOSIZE_G             0xFFC221A8         /* EMAC1 Number of frames received with length greater than maxium */
+#define EMAC1_RX64_GB               0xFFC221AC         /* EMAC1 Number of good and bad frames of lengh 64 bytes */
+#define EMAC1_RX65TO127_GB          0xFFC221B0         /* EMAC1 Number of good and bad frame between 64-127(inclusive) */
+#define EMAC1_RX128TO255_GB         0xFFC221B4         /* EMAC1 Number of good and bad frames received with length between 128 and 255 (inclusive) bytes, exclusive of preamble. */
+#define EMAC1_RX256TO511_GB         0xFFC221B8         /* EMAC1 Number of good and bad frames between 256-511(inclusive) */
+#define EMAC1_RX512TO1023_GB        0xFFC221BC         /* EMAC1 Number of good and bad frames received between 512-1023 */
+#define EMAC1_RX1024TOMAX_GB        0xFFC221C0         /* EMAC1 Number of frames received between 1024 and maxsize */
+#define EMAC1_RXUCASTFRM_G          0xFFC221C4         /* EMAC1 Number of good unicast frames received. */
+#define EMAC1_RXLEN_ERR             0xFFC221C8         /* EMAC1 Number of frames received with length error */
+#define EMAC1_RXOORTYPE             0xFFC221CC         /* EMAC1 Number of frames with length not equal to valid frame size */
+#define EMAC1_RXPAUSEFRM            0xFFC221D0         /* EMAC1 Number of good and valid PAUSE frames received. */
+#define EMAC1_RXFIFO_OVF            0xFFC221D4         /* EMAC1 Number of missed received frames due to FIFO overflow. This counter is not present in the GMAC-CORE configuration. */
+#define EMAC1_RXVLANFRM_GB          0xFFC221D8         /* EMAC1 Number of good and bad VLAN frames received. */
+#define EMAC1_RXWDOG_ERR            0xFFC221DC         /* EMAC1 Frames received with error due to watchdog timeout */
+#define EMAC1_IPC_RXIMSK            0xFFC22200         /* EMAC1 MMC IPC RX Interrupt Mask Register */
+#define EMAC1_IPC_RXINT             0xFFC22208         /* EMAC1 MMC IPC RX Interrupt Register */
+#define EMAC1_RXIPV4_GD_FRM         0xFFC22210         /* EMAC1 Number of good IPv4 datagrams */
+#define EMAC1_RXIPV4_HDR_ERR_FRM    0xFFC22214         /* EMAC1 Number of IPv4 datagrams with header errors */
+#define EMAC1_RXIPV4_NOPAY_FRM      0xFFC22218         /* EMAC1 Number of IPv4 datagrams without checksum */
+#define EMAC1_RXIPV4_FRAG_FRM       0xFFC2221C         /* EMAC1 Number of good IPv4 datagrams with fragmentation */
+#define EMAC1_RXIPV4_UDSBL_FRM      0xFFC22220         /* EMAC1 Number of IPv4 UDP datagrams with disabled checksum */
+#define EMAC1_RXIPV6_GD_FRM         0xFFC22224         /* EMAC1 Number of IPv4 datagrams with TCP/UDP/ICMP payloads */
+#define EMAC1_RXIPV6_HDR_ERR_FRM    0xFFC22228         /* EMAC1 Number of IPv6 datagrams with header errors */
+#define EMAC1_RXIPV6_NOPAY_FRM      0xFFC2222C         /* EMAC1 Number of IPv6 datagrams with no TCP/UDP/ICMP payload */
+#define EMAC1_RXUDP_GD_FRM          0xFFC22230         /* EMAC1 Number of good IP datagrames with good UDP payload */
+#define EMAC1_RXUDP_ERR_FRM         0xFFC22234         /* EMAC1 Number of good IP datagrams with UDP checksum errors */
+#define EMAC1_RXTCP_GD_FRM          0xFFC22238         /* EMAC1 Number of good IP datagrams with a good TCP payload */
+#define EMAC1_RXTCP_ERR_FRM         0xFFC2223C         /* EMAC1 Number of good IP datagrams with TCP checksum errors */
+#define EMAC1_RXICMP_GD_FRM         0xFFC22240         /* EMAC1 Number of good IP datagrams with a good ICMP payload */
+#define EMAC1_RXICMP_ERR_FRM        0xFFC22244         /* EMAC1 Number of good IP datagrams with ICMP checksum errors */
+#define EMAC1_RXIPV4_GD_OCT         0xFFC22250         /* EMAC1 Bytes received in IPv4 datagrams including tcp,udp or icmp */
+#define EMAC1_RXIPV4_HDR_ERR_OCT    0xFFC22254         /* EMAC1 Bytes received in IPv4 datagrams with header errors */
+#define EMAC1_RXIPV4_NOPAY_OCT      0xFFC22258         /* EMAC1 Bytes received in IPv4 datagrams without tcp,udp,icmp load */
+#define EMAC1_RXIPV4_FRAG_OCT       0xFFC2225C         /* EMAC1 Bytes received in fragmented IPv4 datagrams */
+#define EMAC1_RXIPV4_UDSBL_OCT      0xFFC22260         /* EMAC1 Bytes received in UDP segment with checksum disabled */
+#define EMAC1_RXIPV6_GD_OCT         0xFFC22264         /* EMAC1 Bytes received in good IPv6  including tcp,udp or icmp load */
+#define EMAC1_RXIPV6_HDR_ERR_OCT    0xFFC22268         /* EMAC1 Number of bytes received in IPv6 with header errors */
+#define EMAC1_RXIPV6_NOPAY_OCT      0xFFC2226C         /* EMAC1 Bytes received in IPv6 without tcp,udp or icmp load */
+#define EMAC1_RXUDP_GD_OCT          0xFFC22270         /* EMAC1 Number of bytes received in good UDP segments */
+#define EMAC1_RXUDP_ERR_OCT         0xFFC22274         /* EMAC1 Number of bytes received in UDP segment with checksum err */
+#define EMAC1_RXTCP_GD_OCT          0xFFC22278         /* EMAC1 Number of bytes received in a good TCP segment */
+#define EMAC1_RXTCP_ERR_OCT         0xFFC2227C         /* EMAC1 Number of bytes received in TCP segment with checksum err */
+#define EMAC1_RXICMP_GD_OCT         0xFFC22280         /* EMAC1 Number of bytes received in a good ICMP segment */
+#define EMAC1_RXICMP_ERR_OCT        0xFFC22284         /* EMAC1 Bytes received in an ICMP segment with checksum errors */
+#define EMAC1_TM_CTL                0xFFC22700         /* EMAC1 EMAC Time Stamp Control Register */
+#define EMAC1_TM_SUBSEC             0xFFC22704         /* EMAC1 EMAC Time Stamp Sub Second Increment */
+#define EMAC1_TM_SEC                0xFFC22708         /* EMAC1 EMAC Time Stamp Second Register */
+#define EMAC1_TM_NSEC               0xFFC2270C         /* EMAC1 EMAC Time Stamp Nano Second Register */
+#define EMAC1_TM_SECUPDT            0xFFC22710         /* EMAC1 EMAC Time Stamp Seconds Update */
+#define EMAC1_TM_NSECUPDT           0xFFC22714         /* EMAC1 EMAC Time Stamp Nano Seconds Update */
+#define EMAC1_TM_ADDEND             0xFFC22718         /* EMAC1 EMAC Time Stamp Addend Register */
+#define EMAC1_TM_TGTM               0xFFC2271C         /* EMAC1 EMAC Time Stamp Target Time Sec. */
+#define EMAC1_TM_NTGTM              0xFFC22720         /* EMAC1 EMAC Time Stamp Target Time Nanosec. */
+#define EMAC1_TM_HISEC              0xFFC22724         /* EMAC1 EMAC Time Stamp High Second Register */
+#define EMAC1_TM_STMPSTAT           0xFFC22728         /* EMAC1 EMAC Time Stamp Status Register */
+#define EMAC1_TM_PPSCTL             0xFFC2272C         /* EMAC1 EMAC PPS Control Register */
+#define EMAC1_TM_AUXSTMP_NSEC       0xFFC22730         /* EMAC1 EMAC Auxillary Time Stamp Nano Register */
+#define EMAC1_TM_AUXSTMP_SEC        0xFFC22734         /* EMAC1 EMAC Auxillary Time Stamp Sec Register */
+#define EMAC1_DMA_BUSMODE           0xFFC23000         /* EMAC1 Bus Operating Modes for EMAC DMA */
+#define EMAC1_DMA_TXPOLL            0xFFC23004         /* EMAC1 TX DMA Poll demand register */
+#define EMAC1_DMA_RXPOLL            0xFFC23008         /* EMAC1 RX DMA Poll demand register */
+#define EMAC1_DMA_RXDSC_ADDR        0xFFC2300C         /* EMAC1 RX Descriptor List Address */
+#define EMAC1_DMA_TXDSC_ADDR        0xFFC23010         /* EMAC1 TX Descriptor List Address */
+#define EMAC1_DMA_STAT              0xFFC23014         /* EMAC1 DMA Status Register */
+#define EMAC1_DMA_OPMODE            0xFFC23018         /* EMAC1 DMA Operation Mode Register */
+#define EMAC1_DMA_IEN               0xFFC2301C         /* EMAC1 DMA Interrupt Enable Register */
+#define EMAC1_DMA_MISS_FRM          0xFFC23020         /* EMAC1 DMA missed frame and buffer overflow counter */
+#define EMAC1_DMA_RXIWDOG           0xFFC23024         /* EMAC1 DMA RX Interrupt Watch Dog timer */
+#define EMAC1_DMA_BMMODE            0xFFC23028         /* EMAC1 AXI Bus Mode Register */
+#define EMAC1_DMA_BMSTAT            0xFFC2302C         /* EMAC1 AXI Status Register */
+#define EMAC1_DMA_TXDSC_CUR         0xFFC23048         /* EMAC1 TX current descriptor register */
+#define EMAC1_DMA_RXDSC_CUR         0xFFC2304C         /* EMAC1 RX current descriptor register */
+#define EMAC1_DMA_TXBUF_CUR         0xFFC23050         /* EMAC1 TX current buffer pointer register */
+#define EMAC1_DMA_RXBUF_CUR         0xFFC23054         /* EMAC1 RX current buffer pointer register */
+#define EMAC1_HWFEAT                0xFFC23058         /* EMAC1 Hardware Feature Register */
+
+
+/* =========================
+        SPI Registers
+   ========================= */
+
+/* =========================
+        SPI0
+   ========================= */
+#define SPI0_REGBASE                0xFFC40400
+#define SPI0_CTL                    0xFFC40404         /* SPI0 Control Register */
+#define SPI0_RXCTL                  0xFFC40408         /* SPI0 RX Control Register */
+#define SPI0_TXCTL                  0xFFC4040C         /* SPI0 TX Control Register */
+#define SPI0_CLK                    0xFFC40410         /* SPI0 Clock Rate Register */
+#define SPI0_DLY                    0xFFC40414         /* SPI0 Delay Register */
+#define SPI0_SLVSEL                 0xFFC40418         /* SPI0 Slave Select Register */
+#define SPI0_RWC                    0xFFC4041C         /* SPI0 Received Word-Count Register */
+#define SPI0_RWCR                   0xFFC40420         /* SPI0 Received Word-Count Reload Register */
+#define SPI0_TWC                    0xFFC40424         /* SPI0 Transmitted Word-Count Register */
+#define SPI0_TWCR                   0xFFC40428         /* SPI0 Transmitted Word-Count Reload Register */
+#define SPI0_IMSK                   0xFFC40430         /* SPI0 Interrupt Mask Register */
+#define SPI0_IMSK_CLR               0xFFC40434         /* SPI0 Interrupt Mask Clear Register */
+#define SPI0_IMSK_SET               0xFFC40438         /* SPI0 Interrupt Mask Set Register */
+#define SPI0_STAT                   0xFFC40440         /* SPI0 Status Register */
+#define SPI0_ILAT                   0xFFC40444         /* SPI0 Masked Interrupt Condition Register */
+#define SPI0_ILAT_CLR               0xFFC40448         /* SPI0 Masked Interrupt Clear Register */
+#define SPI0_RFIFO                  0xFFC40450         /* SPI0 Receive FIFO Data Register */
+#define SPI0_TFIFO                  0xFFC40458         /* SPI0 Transmit FIFO Data Register */
+
+/* =========================
+        SPI1
+   ========================= */
+#define SPI1_REGBASE                0xFFC40500
+#define SPI1_CTL                    0xFFC40504         /* SPI1 Control Register */
+#define SPI1_RXCTL                  0xFFC40508         /* SPI1 RX Control Register */
+#define SPI1_TXCTL                  0xFFC4050C         /* SPI1 TX Control Register */
+#define SPI1_CLK                    0xFFC40510         /* SPI1 Clock Rate Register */
+#define SPI1_DLY                    0xFFC40514         /* SPI1 Delay Register */
+#define SPI1_SLVSEL                 0xFFC40518         /* SPI1 Slave Select Register */
+#define SPI1_RWC                    0xFFC4051C         /* SPI1 Received Word-Count Register */
+#define SPI1_RWCR                   0xFFC40520         /* SPI1 Received Word-Count Reload Register */
+#define SPI1_TWC                    0xFFC40524         /* SPI1 Transmitted Word-Count Register */
+#define SPI1_TWCR                   0xFFC40528         /* SPI1 Transmitted Word-Count Reload Register */
+#define SPI1_IMSK                   0xFFC40530         /* SPI1 Interrupt Mask Register */
+#define SPI1_IMSK_CLR               0xFFC40534         /* SPI1 Interrupt Mask Clear Register */
+#define SPI1_IMSK_SET               0xFFC40538         /* SPI1 Interrupt Mask Set Register */
+#define SPI1_STAT                   0xFFC40540         /* SPI1 Status Register */
+#define SPI1_ILAT                   0xFFC40544         /* SPI1 Masked Interrupt Condition Register */
+#define SPI1_ILAT_CLR               0xFFC40548         /* SPI1 Masked Interrupt Clear Register */
+#define SPI1_RFIFO                  0xFFC40550         /* SPI1 Receive FIFO Data Register */
+#define SPI1_TFIFO                  0xFFC40558         /* SPI1 Transmit FIFO Data Register */
+
+/* =========================
+	SPORT Registers
+   ========================= */
+
+/* =========================
+	SPORT0
+   ========================= */
+#define SPORT0_CTL_A                0xFFC40000         /* SPORT0 'A' Control Register */
+#define SPORT0_DIV_A                0xFFC40004         /* SPORT0 'A' Clock and FS Divide Register */
+#define SPORT0_MCTL_A               0xFFC40008         /* SPORT0 'A' Multichannel Control Register */
+#define SPORT0_CS0_A                0xFFC4000C         /* SPORT0 'A' Multichannel Select Register (Channels 0-31) */
+#define SPORT0_CS1_A                0xFFC40010         /* SPORT0 'A' Multichannel Select Register (Channels 32-63) */
+#define SPORT0_CS2_A                0xFFC40014         /* SPORT0 'A' Multichannel Select Register (Channels 64-95) */
+#define SPORT0_CS3_A                0xFFC40018         /* SPORT0 'A' Multichannel Select Register (Channels 96-127) */
+#define SPORT0_CNT_A                0xFFC4001C         /* SPORT0 'A' Frame Sync And Clock Divisor Current Count */
+#define SPORT0_ERR_A                0xFFC40020         /* SPORT0 'A' Error Register */
+#define SPORT0_MSTAT_A              0xFFC40024         /* SPORT0 'A' Multichannel Mode Status Register */
+#define SPORT0_CTL2_A               0xFFC40028         /* SPORT0 'A' Control Register 2 */
+#define SPORT0_TXPRI_A              0xFFC40040         /* SPORT0 'A' Primary Channel Transmit Buffer Register */
+#define SPORT0_RXPRI_A              0xFFC40044         /* SPORT0 'A' Primary Channel Receive Buffer Register */
+#define SPORT0_TXSEC_A              0xFFC40048         /* SPORT0 'A' Secondary Channel Transmit Buffer Register */
+#define SPORT0_RXSEC_A              0xFFC4004C         /* SPORT0 'A' Secondary Channel Receive Buffer Register */
+#define SPORT0_CTL_B                0xFFC40080         /* SPORT0 'B' Control Register */
+#define SPORT0_DIV_B                0xFFC40084         /* SPORT0 'B' Clock and FS Divide Register */
+#define SPORT0_MCTL_B               0xFFC40088         /* SPORT0 'B' Multichannel Control Register */
+#define SPORT0_CS0_B                0xFFC4008C         /* SPORT0 'B' Multichannel Select Register (Channels 0-31) */
+#define SPORT0_CS1_B                0xFFC40090         /* SPORT0 'B' Multichannel Select Register (Channels 32-63) */
+#define SPORT0_CS2_B                0xFFC40094         /* SPORT0 'B' Multichannel Select Register (Channels 64-95) */
+#define SPORT0_CS3_B                0xFFC40098         /* SPORT0 'B' Multichannel Select Register (Channels 96-127) */
+#define SPORT0_CNT_B                0xFFC4009C         /* SPORT0 'B' Frame Sync And Clock Divisor Current Count */
+#define SPORT0_ERR_B                0xFFC400A0         /* SPORT0 'B' Error Register */
+#define SPORT0_MSTAT_B              0xFFC400A4         /* SPORT0 'B' Multichannel Mode Status Register */
+#define SPORT0_CTL2_B               0xFFC400A8         /* SPORT0 'B' Control Register 2 */
+#define SPORT0_TXPRI_B              0xFFC400C0         /* SPORT0 'B' Primary Channel Transmit Buffer Register */
+#define SPORT0_RXPRI_B              0xFFC400C4         /* SPORT0 'B' Primary Channel Receive Buffer Register */
+#define SPORT0_TXSEC_B              0xFFC400C8         /* SPORT0 'B' Secondary Channel Transmit Buffer Register */
+#define SPORT0_RXSEC_B              0xFFC400CC         /* SPORT0 'B' Secondary Channel Receive Buffer Register */
+
+/* =========================
+	SPORT1
+   ========================= */
+#define SPORT1_CTL_A                0xFFC40100         /* SPORT1 'A' Control Register */
+#define SPORT1_DIV_A                0xFFC40104         /* SPORT1 'A' Clock and FS Divide Register */
+#define SPORT1_MCTL_A               0xFFC40108         /* SPORT1 'A' Multichannel Control Register */
+#define SPORT1_CS0_A                0xFFC4010C         /* SPORT1 'A' Multichannel Select Register (Channels 0-31) */
+#define SPORT1_CS1_A                0xFFC40110         /* SPORT1 'A' Multichannel Select Register (Channels 32-63) */
+#define SPORT1_CS2_A                0xFFC40114         /* SPORT1 'A' Multichannel Select Register (Channels 64-95) */
+#define SPORT1_CS3_A                0xFFC40118         /* SPORT1 'A' Multichannel Select Register (Channels 96-127) */
+#define SPORT1_CNT_A                0xFFC4011C         /* SPORT1 'A' Frame Sync And Clock Divisor Current Count */
+#define SPORT1_ERR_A                0xFFC40120         /* SPORT1 'A' Error Register */
+#define SPORT1_MSTAT_A              0xFFC40124         /* SPORT1 'A' Multichannel Mode Status Register */
+#define SPORT1_CTL2_A               0xFFC40128         /* SPORT1 'A' Control Register 2 */
+#define SPORT1_TXPRI_A              0xFFC40140         /* SPORT1 'A' Primary Channel Transmit Buffer Register */
+#define SPORT1_RXPRI_A              0xFFC40144         /* SPORT1 'A' Primary Channel Receive Buffer Register */
+#define SPORT1_TXSEC_A              0xFFC40148         /* SPORT1 'A' Secondary Channel Transmit Buffer Register */
+#define SPORT1_RXSEC_A              0xFFC4014C         /* SPORT1 'A' Secondary Channel Receive Buffer Register */
+#define SPORT1_CTL_B                0xFFC40180         /* SPORT1 'B' Control Register */
+#define SPORT1_DIV_B                0xFFC40184         /* SPORT1 'B' Clock and FS Divide Register */
+#define SPORT1_MCTL_B               0xFFC40188         /* SPORT1 'B' Multichannel Control Register */
+#define SPORT1_CS0_B                0xFFC4018C         /* SPORT1 'B' Multichannel Select Register (Channels 0-31) */
+#define SPORT1_CS1_B                0xFFC40190         /* SPORT1 'B' Multichannel Select Register (Channels 32-63) */
+#define SPORT1_CS2_B                0xFFC40194         /* SPORT1 'B' Multichannel Select Register (Channels 64-95) */
+#define SPORT1_CS3_B                0xFFC40198         /* SPORT1 'B' Multichannel Select Register (Channels 96-127) */
+#define SPORT1_CNT_B                0xFFC4019C         /* SPORT1 'B' Frame Sync And Clock Divisor Current Count */
+#define SPORT1_ERR_B                0xFFC401A0         /* SPORT1 'B' Error Register */
+#define SPORT1_MSTAT_B              0xFFC401A4         /* SPORT1 'B' Multichannel Mode Status Register */
+#define SPORT1_CTL2_B               0xFFC401A8         /* SPORT1 'B' Control Register 2 */
+#define SPORT1_TXPRI_B              0xFFC401C0         /* SPORT1 'B' Primary Channel Transmit Buffer Register */
+#define SPORT1_RXPRI_B              0xFFC401C4         /* SPORT1 'B' Primary Channel Receive Buffer Register */
+#define SPORT1_TXSEC_B              0xFFC401C8         /* SPORT1 'B' Secondary Channel Transmit Buffer Register */
+#define SPORT1_RXSEC_B              0xFFC401CC         /* SPORT1 'B' Secondary Channel Receive Buffer Register */
+
+/* =========================
+	SPORT2
+   ========================= */
+#define SPORT2_CTL_A                0xFFC40200         /* SPORT2 'A' Control Register */
+#define SPORT2_DIV_A                0xFFC40204         /* SPORT2 'A' Clock and FS Divide Register */
+#define SPORT2_MCTL_A               0xFFC40208         /* SPORT2 'A' Multichannel Control Register */
+#define SPORT2_CS0_A                0xFFC4020C         /* SPORT2 'A' Multichannel Select Register (Channels 0-31) */
+#define SPORT2_CS1_A                0xFFC40210         /* SPORT2 'A' Multichannel Select Register (Channels 32-63) */
+#define SPORT2_CS2_A                0xFFC40214         /* SPORT2 'A' Multichannel Select Register (Channels 64-95) */
+#define SPORT2_CS3_A                0xFFC40218         /* SPORT2 'A' Multichannel Select Register (Channels 96-127) */
+#define SPORT2_CNT_A                0xFFC4021C         /* SPORT2 'A' Frame Sync And Clock Divisor Current Count */
+#define SPORT2_ERR_A                0xFFC40220         /* SPORT2 'A' Error Register */
+#define SPORT2_MSTAT_A              0xFFC40224         /* SPORT2 'A' Multichannel Mode Status Register */
+#define SPORT2_CTL2_A               0xFFC40228         /* SPORT2 'A' Control Register 2 */
+#define SPORT2_TXPRI_A              0xFFC40240         /* SPORT2 'A' Primary Channel Transmit Buffer Register */
+#define SPORT2_RXPRI_A              0xFFC40244         /* SPORT2 'A' Primary Channel Receive Buffer Register */
+#define SPORT2_TXSEC_A              0xFFC40248         /* SPORT2 'A' Secondary Channel Transmit Buffer Register */
+#define SPORT2_RXSEC_A              0xFFC4024C         /* SPORT2 'A' Secondary Channel Receive Buffer Register */
+#define SPORT2_CTL_B                0xFFC40280         /* SPORT2 'B' Control Register */
+#define SPORT2_DIV_B                0xFFC40284         /* SPORT2 'B' Clock and FS Divide Register */
+#define SPORT2_MCTL_B               0xFFC40288         /* SPORT2 'B' Multichannel Control Register */
+#define SPORT2_CS0_B                0xFFC4028C         /* SPORT2 'B' Multichannel Select Register (Channels 0-31) */
+#define SPORT2_CS1_B                0xFFC40290         /* SPORT2 'B' Multichannel Select Register (Channels 32-63) */
+#define SPORT2_CS2_B                0xFFC40294         /* SPORT2 'B' Multichannel Select Register (Channels 64-95) */
+#define SPORT2_CS3_B                0xFFC40298         /* SPORT2 'B' Multichannel Select Register (Channels 96-127) */
+#define SPORT2_CNT_B                0xFFC4029C         /* SPORT2 'B' Frame Sync And Clock Divisor Current Count */
+#define SPORT2_ERR_B                0xFFC402A0         /* SPORT2 'B' Error Register */
+#define SPORT2_MSTAT_B              0xFFC402A4         /* SPORT2 'B' Multichannel Mode Status Register */
+#define SPORT2_CTL2_B               0xFFC402A8         /* SPORT2 'B' Control Register 2 */
+#define SPORT2_TXPRI_B              0xFFC402C0         /* SPORT2 'B' Primary Channel Transmit Buffer Register */
+#define SPORT2_RXPRI_B              0xFFC402C4         /* SPORT2 'B' Primary Channel Receive Buffer Register */
+#define SPORT2_TXSEC_B              0xFFC402C8         /* SPORT2 'B' Secondary Channel Transmit Buffer Register */
+#define SPORT2_RXSEC_B              0xFFC402CC         /* SPORT2 'B' Secondary Channel Receive Buffer Register */
+
+/* =========================
+	EPPI Registers
+   ========================= */
+
+/* =========================
+	EPPI0
+   ========================= */
+#define EPPI0_STAT                  0xFFC18000         /* EPPI0 Status Register */
+#define EPPI0_HCNT                  0xFFC18004         /* EPPI0 Horizontal Transfer Count Register */
+#define EPPI0_HDLY                  0xFFC18008         /* EPPI0 Horizontal Delay Count Register */
+#define EPPI0_VCNT                  0xFFC1800C         /* EPPI0 Vertical Transfer Count Register */
+#define EPPI0_VDLY                  0xFFC18010         /* EPPI0 Vertical Delay Count Register */
+#define EPPI0_FRAME                 0xFFC18014         /* EPPI0 Lines Per Frame Register */
+#define EPPI0_LINE                  0xFFC18018         /* EPPI0 Samples Per Line Register */
+#define EPPI0_CLKDIV                0xFFC1801C         /* EPPI0 Clock Divide Register */
+#define EPPI0_CTL                   0xFFC18020         /* EPPI0 Control Register */
+#define EPPI0_FS1_WLHB              0xFFC18024         /* EPPI0 FS1 Width Register / EPPI Horizontal Blanking Samples Per Line Register */
+#define EPPI0_FS1_PASPL             0xFFC18028         /* EPPI0 FS1 Period Register / EPPI Active Samples Per Line Register */
+#define EPPI0_FS2_WLVB              0xFFC1802C         /* EPPI0 FS2 Width Register / EPPI Lines Of Vertical Blanking Register */
+#define EPPI0_FS2_PALPF             0xFFC18030         /* EPPI0 FS2 Period Register / EPPI Active Lines Per Field Register */
+#define EPPI0_IMSK                  0xFFC18034         /* EPPI0 Interrupt Mask Register */
+#define EPPI0_ODDCLIP               0xFFC1803C         /* EPPI0 Clipping Register for ODD (Chroma) Data */
+#define EPPI0_EVENCLIP              0xFFC18040         /* EPPI0 Clipping Register for EVEN (Luma) Data */
+#define EPPI0_FS1_DLY               0xFFC18044         /* EPPI0 Frame Sync 1 Delay Value */
+#define EPPI0_FS2_DLY               0xFFC18048         /* EPPI0 Frame Sync 2 Delay Value */
+#define EPPI0_CTL2                  0xFFC1804C         /* EPPI0 Control Register 2 */
+
+/* =========================
+	EPPI1
+   ========================= */
+#define EPPI1_STAT                  0xFFC18400         /* EPPI1 Status Register */
+#define EPPI1_HCNT                  0xFFC18404         /* EPPI1 Horizontal Transfer Count Register */
+#define EPPI1_HDLY                  0xFFC18408         /* EPPI1 Horizontal Delay Count Register */
+#define EPPI1_VCNT                  0xFFC1840C         /* EPPI1 Vertical Transfer Count Register */
+#define EPPI1_VDLY                  0xFFC18410         /* EPPI1 Vertical Delay Count Register */
+#define EPPI1_FRAME                 0xFFC18414         /* EPPI1 Lines Per Frame Register */
+#define EPPI1_LINE                  0xFFC18418         /* EPPI1 Samples Per Line Register */
+#define EPPI1_CLKDIV                0xFFC1841C         /* EPPI1 Clock Divide Register */
+#define EPPI1_CTL                   0xFFC18420         /* EPPI1 Control Register */
+#define EPPI1_FS1_WLHB              0xFFC18424         /* EPPI1 FS1 Width Register / EPPI Horizontal Blanking Samples Per Line Register */
+#define EPPI1_FS1_PASPL             0xFFC18428         /* EPPI1 FS1 Period Register / EPPI Active Samples Per Line Register */
+#define EPPI1_FS2_WLVB              0xFFC1842C         /* EPPI1 FS2 Width Register / EPPI Lines Of Vertical Blanking Register */
+#define EPPI1_FS2_PALPF             0xFFC18430         /* EPPI1 FS2 Period Register / EPPI Active Lines Per Field Register */
+#define EPPI1_IMSK                  0xFFC18434         /* EPPI1 Interrupt Mask Register */
+#define EPPI1_ODDCLIP               0xFFC1843C         /* EPPI1 Clipping Register for ODD (Chroma) Data */
+#define EPPI1_EVENCLIP              0xFFC18440         /* EPPI1 Clipping Register for EVEN (Luma) Data */
+#define EPPI1_FS1_DLY               0xFFC18444         /* EPPI1 Frame Sync 1 Delay Value */
+#define EPPI1_FS2_DLY               0xFFC18448         /* EPPI1 Frame Sync 2 Delay Value */
+#define EPPI1_CTL2                  0xFFC1844C         /* EPPI1 Control Register 2 */
+
+/* =========================
+	EPPI2
+   ========================= */
+#define EPPI2_STAT                  0xFFC18800         /* EPPI2 Status Register */
+#define EPPI2_HCNT                  0xFFC18804         /* EPPI2 Horizontal Transfer Count Register */
+#define EPPI2_HDLY                  0xFFC18808         /* EPPI2 Horizontal Delay Count Register */
+#define EPPI2_VCNT                  0xFFC1880C         /* EPPI2 Vertical Transfer Count Register */
+#define EPPI2_VDLY                  0xFFC18810         /* EPPI2 Vertical Delay Count Register */
+#define EPPI2_FRAME                 0xFFC18814         /* EPPI2 Lines Per Frame Register */
+#define EPPI2_LINE                  0xFFC18818         /* EPPI2 Samples Per Line Register */
+#define EPPI2_CLKDIV                0xFFC1881C         /* EPPI2 Clock Divide Register */
+#define EPPI2_CTL                   0xFFC18820         /* EPPI2 Control Register */
+#define EPPI2_FS1_WLHB              0xFFC18824         /* EPPI2 FS1 Width Register / EPPI Horizontal Blanking Samples Per Line Register */
+#define EPPI2_FS1_PASPL             0xFFC18828         /* EPPI2 FS1 Period Register / EPPI Active Samples Per Line Register */
+#define EPPI2_FS2_WLVB              0xFFC1882C         /* EPPI2 FS2 Width Register / EPPI Lines Of Vertical Blanking Register */
+#define EPPI2_FS2_PALPF             0xFFC18830         /* EPPI2 FS2 Period Register / EPPI Active Lines Per Field Register */
+#define EPPI2_IMSK                  0xFFC18834         /* EPPI2 Interrupt Mask Register */
+#define EPPI2_ODDCLIP               0xFFC1883C         /* EPPI2 Clipping Register for ODD (Chroma) Data */
+#define EPPI2_EVENCLIP              0xFFC18840         /* EPPI2 Clipping Register for EVEN (Luma) Data */
+#define EPPI2_FS1_DLY               0xFFC18844         /* EPPI2 Frame Sync 1 Delay Value */
+#define EPPI2_FS2_DLY               0xFFC18848         /* EPPI2 Frame Sync 2 Delay Value */
+#define EPPI2_CTL2                  0xFFC1884C         /* EPPI2 Control Register 2 */
+
+
+
+/* =========================
+        DDE Registers
+   ========================= */
+
+/* =========================
+        DMA0
+   ========================= */
+#define DMA0_NEXT_DESC_PTR          0xFFC41000         /* DMA0 Pointer to Next Initial Descriptor */
+#define DMA0_START_ADDR             0xFFC41004         /* DMA0 Start Address of Current Buffer */
+#define DMA0_CONFIG                 0xFFC41008         /* DMA0 Configuration Register */
+#define DMA0_X_COUNT                0xFFC4100C         /* DMA0 Inner Loop Count Start Value */
+#define DMA0_X_MODIFY               0xFFC41010         /* DMA0 Inner Loop Address Increment */
+#define DMA0_Y_COUNT                0xFFC41014         /* DMA0 Outer Loop Count Start Value (2D only) */
+#define DMA0_Y_MODIFY               0xFFC41018         /* DMA0 Outer Loop Address Increment (2D only) */
+#define DMA0_CURR_DESC_PTR          0xFFC41024         /* DMA0 Current Descriptor Pointer */
+#define DMA0_PREV_DESC_PTR          0xFFC41028         /* DMA0 Previous Initial Descriptor Pointer */
+#define DMA0_CURR_ADDR              0xFFC4102C         /* DMA0 Current Address */
+#define DMA0_IRQ_STATUS             0xFFC41030         /* DMA0 Status Register */
+#define DMA0_CURR_X_COUNT           0xFFC41034         /* DMA0 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA0_CURR_Y_COUNT           0xFFC41038         /* DMA0 Current Row Count (2D only) */
+#define DMA0_BWL_COUNT              0xFFC41040         /* DMA0 Bandwidth Limit Count */
+#define DMA0_CURR_BWL_COUNT         0xFFC41044         /* DMA0 Bandwidth Limit Count Current */
+#define DMA0_BWM_COUNT              0xFFC41048         /* DMA0 Bandwidth Monitor Count */
+#define DMA0_CURR_BWM_COUNT         0xFFC4104C         /* DMA0 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA1
+   ========================= */
+#define DMA1_NEXT_DESC_PTR             0xFFC41080         /* DMA1 Pointer to Next Initial Descriptor */
+#define DMA1_START_ADDR              0xFFC41084         /* DMA1 Start Address of Current Buffer */
+#define DMA1_CONFIG                    0xFFC41088         /* DMA1 Configuration Register */
+#define DMA1_X_COUNT                   0xFFC4108C         /* DMA1 Inner Loop Count Start Value */
+#define DMA1_X_MODIFY                   0xFFC41090         /* DMA1 Inner Loop Address Increment */
+#define DMA1_Y_COUNT                   0xFFC41094         /* DMA1 Outer Loop Count Start Value (2D only) */
+#define DMA1_Y_MODIFY                   0xFFC41098         /* DMA1 Outer Loop Address Increment (2D only) */
+#define DMA1_CURR_DESC_PTR             0xFFC410A4         /* DMA1 Current Descriptor Pointer */
+#define DMA1_PREV_DESC_PTR             0xFFC410A8         /* DMA1 Previous Initial Descriptor Pointer */
+#define DMA1_CURR_ADDR               0xFFC410AC         /* DMA1 Current Address */
+#define DMA1_IRQ_STATUS                   0xFFC410B0         /* DMA1 Status Register */
+#define DMA1_CURR_X_COUNT               0xFFC410B4         /* DMA1 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA1_CURR_Y_COUNT               0xFFC410B8         /* DMA1 Current Row Count (2D only) */
+#define DMA1_BWL_COUNT                 0xFFC410C0         /* DMA1 Bandwidth Limit Count */
+#define DMA1_CURR_BWL_COUNT             0xFFC410C4         /* DMA1 Bandwidth Limit Count Current */
+#define DMA1_BWM_COUNT                 0xFFC410C8         /* DMA1 Bandwidth Monitor Count */
+#define DMA1_CURR_BWM_COUNT             0xFFC410CC         /* DMA1 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA2
+   ========================= */
+#define DMA2_NEXT_DESC_PTR             0xFFC41100         /* DMA2 Pointer to Next Initial Descriptor */
+#define DMA2_START_ADDR              0xFFC41104         /* DMA2 Start Address of Current Buffer */
+#define DMA2_CONFIG                    0xFFC41108         /* DMA2 Configuration Register */
+#define DMA2_X_COUNT                   0xFFC4110C         /* DMA2 Inner Loop Count Start Value */
+#define DMA2_X_MODIFY                   0xFFC41110         /* DMA2 Inner Loop Address Increment */
+#define DMA2_Y_COUNT                   0xFFC41114         /* DMA2 Outer Loop Count Start Value (2D only) */
+#define DMA2_Y_MODIFY                   0xFFC41118         /* DMA2 Outer Loop Address Increment (2D only) */
+#define DMA2_CURR_DESC_PTR             0xFFC41124         /* DMA2 Current Descriptor Pointer */
+#define DMA2_PREV_DESC_PTR             0xFFC41128         /* DMA2 Previous Initial Descriptor Pointer */
+#define DMA2_CURR_ADDR               0xFFC4112C         /* DMA2 Current Address */
+#define DMA2_IRQ_STATUS                   0xFFC41130         /* DMA2 Status Register */
+#define DMA2_CURR_X_COUNT               0xFFC41134         /* DMA2 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA2_CURR_Y_COUNT               0xFFC41138         /* DMA2 Current Row Count (2D only) */
+#define DMA2_BWL_COUNT                 0xFFC41140         /* DMA2 Bandwidth Limit Count */
+#define DMA2_CURR_BWL_COUNT             0xFFC41144         /* DMA2 Bandwidth Limit Count Current */
+#define DMA2_BWM_COUNT                 0xFFC41148         /* DMA2 Bandwidth Monitor Count */
+#define DMA2_CURR_BWM_COUNT             0xFFC4114C         /* DMA2 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA3
+   ========================= */
+#define DMA3_NEXT_DESC_PTR             0xFFC41180         /* DMA3 Pointer to Next Initial Descriptor */
+#define DMA3_START_ADDR              0xFFC41184         /* DMA3 Start Address of Current Buffer */
+#define DMA3_CONFIG                    0xFFC41188         /* DMA3 Configuration Register */
+#define DMA3_X_COUNT                   0xFFC4118C         /* DMA3 Inner Loop Count Start Value */
+#define DMA3_X_MODIFY                   0xFFC41190         /* DMA3 Inner Loop Address Increment */
+#define DMA3_Y_COUNT                   0xFFC41194         /* DMA3 Outer Loop Count Start Value (2D only) */
+#define DMA3_Y_MODIFY                   0xFFC41198         /* DMA3 Outer Loop Address Increment (2D only) */
+#define DMA3_CURR_DESC_PTR             0xFFC411A4         /* DMA3 Current Descriptor Pointer */
+#define DMA3_PREV_DESC_PTR             0xFFC411A8         /* DMA3 Previous Initial Descriptor Pointer */
+#define DMA3_CURR_ADDR               0xFFC411AC         /* DMA3 Current Address */
+#define DMA3_IRQ_STATUS                   0xFFC411B0         /* DMA3 Status Register */
+#define DMA3_CURR_X_COUNT               0xFFC411B4         /* DMA3 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA3_CURR_Y_COUNT               0xFFC411B8         /* DMA3 Current Row Count (2D only) */
+#define DMA3_BWL_COUNT                 0xFFC411C0         /* DMA3 Bandwidth Limit Count */
+#define DMA3_CURR_BWL_COUNT             0xFFC411C4         /* DMA3 Bandwidth Limit Count Current */
+#define DMA3_BWM_COUNT                 0xFFC411C8         /* DMA3 Bandwidth Monitor Count */
+#define DMA3_CURR_BWM_COUNT             0xFFC411CC         /* DMA3 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA4
+   ========================= */
+#define DMA4_NEXT_DESC_PTR             0xFFC41200         /* DMA4 Pointer to Next Initial Descriptor */
+#define DMA4_START_ADDR              0xFFC41204         /* DMA4 Start Address of Current Buffer */
+#define DMA4_CONFIG                    0xFFC41208         /* DMA4 Configuration Register */
+#define DMA4_X_COUNT                   0xFFC4120C         /* DMA4 Inner Loop Count Start Value */
+#define DMA4_X_MODIFY                   0xFFC41210         /* DMA4 Inner Loop Address Increment */
+#define DMA4_Y_COUNT                   0xFFC41214         /* DMA4 Outer Loop Count Start Value (2D only) */
+#define DMA4_Y_MODIFY                   0xFFC41218         /* DMA4 Outer Loop Address Increment (2D only) */
+#define DMA4_CURR_DESC_PTR             0xFFC41224         /* DMA4 Current Descriptor Pointer */
+#define DMA4_PREV_DESC_PTR             0xFFC41228         /* DMA4 Previous Initial Descriptor Pointer */
+#define DMA4_CURR_ADDR               0xFFC4122C         /* DMA4 Current Address */
+#define DMA4_IRQ_STATUS                   0xFFC41230         /* DMA4 Status Register */
+#define DMA4_CURR_X_COUNT               0xFFC41234         /* DMA4 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA4_CURR_Y_COUNT               0xFFC41238         /* DMA4 Current Row Count (2D only) */
+#define DMA4_BWL_COUNT                 0xFFC41240         /* DMA4 Bandwidth Limit Count */
+#define DMA4_CURR_BWL_COUNT             0xFFC41244         /* DMA4 Bandwidth Limit Count Current */
+#define DMA4_BWM_COUNT                 0xFFC41248         /* DMA4 Bandwidth Monitor Count */
+#define DMA4_CURR_BWM_COUNT             0xFFC4124C         /* DMA4 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA5
+   ========================= */
+#define DMA5_NEXT_DESC_PTR             0xFFC41280         /* DMA5 Pointer to Next Initial Descriptor */
+#define DMA5_START_ADDR              0xFFC41284         /* DMA5 Start Address of Current Buffer */
+#define DMA5_CONFIG                    0xFFC41288         /* DMA5 Configuration Register */
+#define DMA5_X_COUNT                   0xFFC4128C         /* DMA5 Inner Loop Count Start Value */
+#define DMA5_X_MODIFY                   0xFFC41290         /* DMA5 Inner Loop Address Increment */
+#define DMA5_Y_COUNT                   0xFFC41294         /* DMA5 Outer Loop Count Start Value (2D only) */
+#define DMA5_Y_MODIFY                   0xFFC41298         /* DMA5 Outer Loop Address Increment (2D only) */
+#define DMA5_CURR_DESC_PTR             0xFFC412A4         /* DMA5 Current Descriptor Pointer */
+#define DMA5_PREV_DESC_PTR             0xFFC412A8         /* DMA5 Previous Initial Descriptor Pointer */
+#define DMA5_CURR_ADDR               0xFFC412AC         /* DMA5 Current Address */
+#define DMA5_IRQ_STATUS                   0xFFC412B0         /* DMA5 Status Register */
+#define DMA5_CURR_X_COUNT               0xFFC412B4         /* DMA5 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA5_CURR_Y_COUNT               0xFFC412B8         /* DMA5 Current Row Count (2D only) */
+#define DMA5_BWL_COUNT                 0xFFC412C0         /* DMA5 Bandwidth Limit Count */
+#define DMA5_CURR_BWL_COUNT             0xFFC412C4         /* DMA5 Bandwidth Limit Count Current */
+#define DMA5_BWM_COUNT                 0xFFC412C8         /* DMA5 Bandwidth Monitor Count */
+#define DMA5_CURR_BWM_COUNT             0xFFC412CC         /* DMA5 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA6
+   ========================= */
+#define DMA6_NEXT_DESC_PTR             0xFFC41300         /* DMA6 Pointer to Next Initial Descriptor */
+#define DMA6_START_ADDR              0xFFC41304         /* DMA6 Start Address of Current Buffer */
+#define DMA6_CONFIG                    0xFFC41308         /* DMA6 Configuration Register */
+#define DMA6_X_COUNT                   0xFFC4130C         /* DMA6 Inner Loop Count Start Value */
+#define DMA6_X_MODIFY                   0xFFC41310         /* DMA6 Inner Loop Address Increment */
+#define DMA6_Y_COUNT                   0xFFC41314         /* DMA6 Outer Loop Count Start Value (2D only) */
+#define DMA6_Y_MODIFY                   0xFFC41318         /* DMA6 Outer Loop Address Increment (2D only) */
+#define DMA6_CURR_DESC_PTR             0xFFC41324         /* DMA6 Current Descriptor Pointer */
+#define DMA6_PREV_DESC_PTR             0xFFC41328         /* DMA6 Previous Initial Descriptor Pointer */
+#define DMA6_CURR_ADDR               0xFFC4132C         /* DMA6 Current Address */
+#define DMA6_IRQ_STATUS                   0xFFC41330         /* DMA6 Status Register */
+#define DMA6_CURR_X_COUNT               0xFFC41334         /* DMA6 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA6_CURR_Y_COUNT               0xFFC41338         /* DMA6 Current Row Count (2D only) */
+#define DMA6_BWL_COUNT                 0xFFC41340         /* DMA6 Bandwidth Limit Count */
+#define DMA6_CURR_BWL_COUNT             0xFFC41344         /* DMA6 Bandwidth Limit Count Current */
+#define DMA6_BWM_COUNT                 0xFFC41348         /* DMA6 Bandwidth Monitor Count */
+#define DMA6_CURR_BWM_COUNT             0xFFC4134C         /* DMA6 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA7
+   ========================= */
+#define DMA7_NEXT_DESC_PTR             0xFFC41380         /* DMA7 Pointer to Next Initial Descriptor */
+#define DMA7_START_ADDR              0xFFC41384         /* DMA7 Start Address of Current Buffer */
+#define DMA7_CONFIG                    0xFFC41388         /* DMA7 Configuration Register */
+#define DMA7_X_COUNT                   0xFFC4138C         /* DMA7 Inner Loop Count Start Value */
+#define DMA7_X_MODIFY                   0xFFC41390         /* DMA7 Inner Loop Address Increment */
+#define DMA7_Y_COUNT                   0xFFC41394         /* DMA7 Outer Loop Count Start Value (2D only) */
+#define DMA7_Y_MODIFY                   0xFFC41398         /* DMA7 Outer Loop Address Increment (2D only) */
+#define DMA7_CURR_DESC_PTR             0xFFC413A4         /* DMA7 Current Descriptor Pointer */
+#define DMA7_PREV_DESC_PTR             0xFFC413A8         /* DMA7 Previous Initial Descriptor Pointer */
+#define DMA7_CURR_ADDR               0xFFC413AC         /* DMA7 Current Address */
+#define DMA7_IRQ_STATUS                   0xFFC413B0         /* DMA7 Status Register */
+#define DMA7_CURR_X_COUNT               0xFFC413B4         /* DMA7 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA7_CURR_Y_COUNT               0xFFC413B8         /* DMA7 Current Row Count (2D only) */
+#define DMA7_BWL_COUNT                 0xFFC413C0         /* DMA7 Bandwidth Limit Count */
+#define DMA7_CURR_BWL_COUNT             0xFFC413C4         /* DMA7 Bandwidth Limit Count Current */
+#define DMA7_BWM_COUNT                 0xFFC413C8         /* DMA7 Bandwidth Monitor Count */
+#define DMA7_CURR_BWM_COUNT             0xFFC413CC         /* DMA7 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA8
+   ========================= */
+#define DMA8_NEXT_DESC_PTR             0xFFC41400         /* DMA8 Pointer to Next Initial Descriptor */
+#define DMA8_START_ADDR              0xFFC41404         /* DMA8 Start Address of Current Buffer */
+#define DMA8_CONFIG                    0xFFC41408         /* DMA8 Configuration Register */
+#define DMA8_X_COUNT                   0xFFC4140C         /* DMA8 Inner Loop Count Start Value */
+#define DMA8_X_MODIFY                   0xFFC41410         /* DMA8 Inner Loop Address Increment */
+#define DMA8_Y_COUNT                   0xFFC41414         /* DMA8 Outer Loop Count Start Value (2D only) */
+#define DMA8_Y_MODIFY                   0xFFC41418         /* DMA8 Outer Loop Address Increment (2D only) */
+#define DMA8_CURR_DESC_PTR             0xFFC41424         /* DMA8 Current Descriptor Pointer */
+#define DMA8_PREV_DESC_PTR             0xFFC41428         /* DMA8 Previous Initial Descriptor Pointer */
+#define DMA8_CURR_ADDR               0xFFC4142C         /* DMA8 Current Address */
+#define DMA8_IRQ_STATUS                   0xFFC41430         /* DMA8 Status Register */
+#define DMA8_CURR_X_COUNT               0xFFC41434         /* DMA8 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA8_CURR_Y_COUNT               0xFFC41438         /* DMA8 Current Row Count (2D only) */
+#define DMA8_BWL_COUNT                 0xFFC41440         /* DMA8 Bandwidth Limit Count */
+#define DMA8_CURR_BWL_COUNT             0xFFC41444         /* DMA8 Bandwidth Limit Count Current */
+#define DMA8_BWM_COUNT                 0xFFC41448         /* DMA8 Bandwidth Monitor Count */
+#define DMA8_CURR_BWM_COUNT             0xFFC4144C         /* DMA8 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA9
+   ========================= */
+#define DMA9_NEXT_DESC_PTR             0xFFC41480         /* DMA9 Pointer to Next Initial Descriptor */
+#define DMA9_START_ADDR              0xFFC41484         /* DMA9 Start Address of Current Buffer */
+#define DMA9_CONFIG                    0xFFC41488         /* DMA9 Configuration Register */
+#define DMA9_X_COUNT                   0xFFC4148C         /* DMA9 Inner Loop Count Start Value */
+#define DMA9_X_MODIFY                   0xFFC41490         /* DMA9 Inner Loop Address Increment */
+#define DMA9_Y_COUNT                   0xFFC41494         /* DMA9 Outer Loop Count Start Value (2D only) */
+#define DMA9_Y_MODIFY                   0xFFC41498         /* DMA9 Outer Loop Address Increment (2D only) */
+#define DMA9_CURR_DESC_PTR             0xFFC414A4         /* DMA9 Current Descriptor Pointer */
+#define DMA9_PREV_DESC_PTR             0xFFC414A8         /* DMA9 Previous Initial Descriptor Pointer */
+#define DMA9_CURR_ADDR               0xFFC414AC         /* DMA9 Current Address */
+#define DMA9_IRQ_STATUS                   0xFFC414B0         /* DMA9 Status Register */
+#define DMA9_CURR_X_COUNT               0xFFC414B4         /* DMA9 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA9_CURR_Y_COUNT               0xFFC414B8         /* DMA9 Current Row Count (2D only) */
+#define DMA9_BWL_COUNT                 0xFFC414C0         /* DMA9 Bandwidth Limit Count */
+#define DMA9_CURR_BWL_COUNT             0xFFC414C4         /* DMA9 Bandwidth Limit Count Current */
+#define DMA9_BWM_COUNT                 0xFFC414C8         /* DMA9 Bandwidth Monitor Count */
+#define DMA9_CURR_BWM_COUNT             0xFFC414CC         /* DMA9 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA10
+   ========================= */
+#define DMA10_NEXT_DESC_PTR            0xFFC05000         /* DMA10 Pointer to Next Initial Descriptor */
+#define DMA10_START_ADDR             0xFFC05004         /* DMA10 Start Address of Current Buffer */
+#define DMA10_CONFIG                   0xFFC05008         /* DMA10 Configuration Register */
+#define DMA10_X_COUNT                  0xFFC0500C         /* DMA10 Inner Loop Count Start Value */
+#define DMA10_X_MODIFY                  0xFFC05010         /* DMA10 Inner Loop Address Increment */
+#define DMA10_Y_COUNT                  0xFFC05014         /* DMA10 Outer Loop Count Start Value (2D only) */
+#define DMA10_Y_MODIFY                  0xFFC05018         /* DMA10 Outer Loop Address Increment (2D only) */
+#define DMA10_CURR_DESC_PTR            0xFFC05024         /* DMA10 Current Descriptor Pointer */
+#define DMA10_PREV_DESC_PTR            0xFFC05028         /* DMA10 Previous Initial Descriptor Pointer */
+#define DMA10_CURR_ADDR              0xFFC0502C         /* DMA10 Current Address */
+#define DMA10_IRQ_STATUS                  0xFFC05030         /* DMA10 Status Register */
+#define DMA10_CURR_X_COUNT              0xFFC05034         /* DMA10 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA10_CURR_Y_COUNT              0xFFC05038         /* DMA10 Current Row Count (2D only) */
+#define DMA10_BWL_COUNT                0xFFC05040         /* DMA10 Bandwidth Limit Count */
+#define DMA10_CURR_BWL_COUNT            0xFFC05044         /* DMA10 Bandwidth Limit Count Current */
+#define DMA10_BWM_COUNT                0xFFC05048         /* DMA10 Bandwidth Monitor Count */
+#define DMA10_CURR_BWM_COUNT            0xFFC0504C         /* DMA10 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA11
+   ========================= */
+#define DMA11_NEXT_DESC_PTR            0xFFC05080         /* DMA11 Pointer to Next Initial Descriptor */
+#define DMA11_START_ADDR             0xFFC05084         /* DMA11 Start Address of Current Buffer */
+#define DMA11_CONFIG                   0xFFC05088         /* DMA11 Configuration Register */
+#define DMA11_X_COUNT                  0xFFC0508C         /* DMA11 Inner Loop Count Start Value */
+#define DMA11_X_MODIFY                  0xFFC05090         /* DMA11 Inner Loop Address Increment */
+#define DMA11_Y_COUNT                  0xFFC05094         /* DMA11 Outer Loop Count Start Value (2D only) */
+#define DMA11_Y_MODIFY                  0xFFC05098         /* DMA11 Outer Loop Address Increment (2D only) */
+#define DMA11_CURR_DESC_PTR            0xFFC050A4         /* DMA11 Current Descriptor Pointer */
+#define DMA11_PREV_DESC_PTR            0xFFC050A8         /* DMA11 Previous Initial Descriptor Pointer */
+#define DMA11_CURR_ADDR              0xFFC050AC         /* DMA11 Current Address */
+#define DMA11_IRQ_STATUS                  0xFFC050B0         /* DMA11 Status Register */
+#define DMA11_CURR_X_COUNT              0xFFC050B4         /* DMA11 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA11_CURR_Y_COUNT              0xFFC050B8         /* DMA11 Current Row Count (2D only) */
+#define DMA11_BWL_COUNT                0xFFC050C0         /* DMA11 Bandwidth Limit Count */
+#define DMA11_CURR_BWL_COUNT            0xFFC050C4         /* DMA11 Bandwidth Limit Count Current */
+#define DMA11_BWM_COUNT                0xFFC050C8         /* DMA11 Bandwidth Monitor Count */
+#define DMA11_CURR_BWM_COUNT            0xFFC050CC         /* DMA11 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA12
+   ========================= */
+#define DMA12_NEXT_DESC_PTR            0xFFC05100         /* DMA12 Pointer to Next Initial Descriptor */
+#define DMA12_START_ADDR             0xFFC05104         /* DMA12 Start Address of Current Buffer */
+#define DMA12_CONFIG                   0xFFC05108         /* DMA12 Configuration Register */
+#define DMA12_X_COUNT                  0xFFC0510C         /* DMA12 Inner Loop Count Start Value */
+#define DMA12_X_MODIFY                  0xFFC05110         /* DMA12 Inner Loop Address Increment */
+#define DMA12_Y_COUNT                  0xFFC05114         /* DMA12 Outer Loop Count Start Value (2D only) */
+#define DMA12_Y_MODIFY                  0xFFC05118         /* DMA12 Outer Loop Address Increment (2D only) */
+#define DMA12_CURR_DESC_PTR            0xFFC05124         /* DMA12 Current Descriptor Pointer */
+#define DMA12_PREV_DESC_PTR            0xFFC05128         /* DMA12 Previous Initial Descriptor Pointer */
+#define DMA12_CURR_ADDR              0xFFC0512C         /* DMA12 Current Address */
+#define DMA12_IRQ_STATUS                  0xFFC05130         /* DMA12 Status Register */
+#define DMA12_CURR_X_COUNT              0xFFC05134         /* DMA12 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA12_CURR_Y_COUNT              0xFFC05138         /* DMA12 Current Row Count (2D only) */
+#define DMA12_BWL_COUNT                0xFFC05140         /* DMA12 Bandwidth Limit Count */
+#define DMA12_CURR_BWL_COUNT            0xFFC05144         /* DMA12 Bandwidth Limit Count Current */
+#define DMA12_BWM_COUNT                0xFFC05148         /* DMA12 Bandwidth Monitor Count */
+#define DMA12_CURR_BWM_COUNT            0xFFC0514C         /* DMA12 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA13
+   ========================= */
+#define DMA13_NEXT_DESC_PTR            0xFFC07000         /* DMA13 Pointer to Next Initial Descriptor */
+#define DMA13_START_ADDR             0xFFC07004         /* DMA13 Start Address of Current Buffer */
+#define DMA13_CONFIG                   0xFFC07008         /* DMA13 Configuration Register */
+#define DMA13_X_COUNT                  0xFFC0700C         /* DMA13 Inner Loop Count Start Value */
+#define DMA13_X_MODIFY                  0xFFC07010         /* DMA13 Inner Loop Address Increment */
+#define DMA13_Y_COUNT                  0xFFC07014         /* DMA13 Outer Loop Count Start Value (2D only) */
+#define DMA13_Y_MODIFY                  0xFFC07018         /* DMA13 Outer Loop Address Increment (2D only) */
+#define DMA13_CURR_DESC_PTR            0xFFC07024         /* DMA13 Current Descriptor Pointer */
+#define DMA13_PREV_DESC_PTR            0xFFC07028         /* DMA13 Previous Initial Descriptor Pointer */
+#define DMA13_CURR_ADDR              0xFFC0702C         /* DMA13 Current Address */
+#define DMA13_IRQ_STATUS                  0xFFC07030         /* DMA13 Status Register */
+#define DMA13_CURR_X_COUNT              0xFFC07034         /* DMA13 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA13_CURR_Y_COUNT              0xFFC07038         /* DMA13 Current Row Count (2D only) */
+#define DMA13_BWL_COUNT                0xFFC07040         /* DMA13 Bandwidth Limit Count */
+#define DMA13_CURR_BWL_COUNT            0xFFC07044         /* DMA13 Bandwidth Limit Count Current */
+#define DMA13_BWM_COUNT                0xFFC07048         /* DMA13 Bandwidth Monitor Count */
+#define DMA13_CURR_BWM_COUNT            0xFFC0704C         /* DMA13 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA14
+   ========================= */
+#define DMA14_NEXT_DESC_PTR            0xFFC07080         /* DMA14 Pointer to Next Initial Descriptor */
+#define DMA14_START_ADDR             0xFFC07084         /* DMA14 Start Address of Current Buffer */
+#define DMA14_CONFIG                   0xFFC07088         /* DMA14 Configuration Register */
+#define DMA14_X_COUNT                  0xFFC0708C         /* DMA14 Inner Loop Count Start Value */
+#define DMA14_X_MODIFY                  0xFFC07090         /* DMA14 Inner Loop Address Increment */
+#define DMA14_Y_COUNT                  0xFFC07094         /* DMA14 Outer Loop Count Start Value (2D only) */
+#define DMA14_Y_MODIFY                  0xFFC07098         /* DMA14 Outer Loop Address Increment (2D only) */
+#define DMA14_CURR_DESC_PTR            0xFFC070A4         /* DMA14 Current Descriptor Pointer */
+#define DMA14_PREV_DESC_PTR            0xFFC070A8         /* DMA14 Previous Initial Descriptor Pointer */
+#define DMA14_CURR_ADDR              0xFFC070AC         /* DMA14 Current Address */
+#define DMA14_IRQ_STATUS                  0xFFC070B0         /* DMA14 Status Register */
+#define DMA14_CURR_X_COUNT              0xFFC070B4         /* DMA14 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA14_CURR_Y_COUNT              0xFFC070B8         /* DMA14 Current Row Count (2D only) */
+#define DMA14_BWL_COUNT                0xFFC070C0         /* DMA14 Bandwidth Limit Count */
+#define DMA14_CURR_BWL_COUNT            0xFFC070C4         /* DMA14 Bandwidth Limit Count Current */
+#define DMA14_BWM_COUNT                0xFFC070C8         /* DMA14 Bandwidth Monitor Count */
+#define DMA14_CURR_BWM_COUNT            0xFFC070CC         /* DMA14 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA15
+   ========================= */
+#define DMA15_NEXT_DESC_PTR            0xFFC07100         /* DMA15 Pointer to Next Initial Descriptor */
+#define DMA15_START_ADDR             0xFFC07104         /* DMA15 Start Address of Current Buffer */
+#define DMA15_CONFIG                   0xFFC07108         /* DMA15 Configuration Register */
+#define DMA15_X_COUNT                  0xFFC0710C         /* DMA15 Inner Loop Count Start Value */
+#define DMA15_X_MODIFY                  0xFFC07110         /* DMA15 Inner Loop Address Increment */
+#define DMA15_Y_COUNT                  0xFFC07114         /* DMA15 Outer Loop Count Start Value (2D only) */
+#define DMA15_Y_MODIFY                  0xFFC07118         /* DMA15 Outer Loop Address Increment (2D only) */
+#define DMA15_CURR_DESC_PTR            0xFFC07124         /* DMA15 Current Descriptor Pointer */
+#define DMA15_PREV_DESC_PTR            0xFFC07128         /* DMA15 Previous Initial Descriptor Pointer */
+#define DMA15_CURR_ADDR              0xFFC0712C         /* DMA15 Current Address */
+#define DMA15_IRQ_STATUS                  0xFFC07130         /* DMA15 Status Register */
+#define DMA15_CURR_X_COUNT              0xFFC07134         /* DMA15 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA15_CURR_Y_COUNT              0xFFC07138         /* DMA15 Current Row Count (2D only) */
+#define DMA15_BWL_COUNT                0xFFC07140         /* DMA15 Bandwidth Limit Count */
+#define DMA15_CURR_BWL_COUNT            0xFFC07144         /* DMA15 Bandwidth Limit Count Current */
+#define DMA15_BWM_COUNT                0xFFC07148         /* DMA15 Bandwidth Monitor Count */
+#define DMA15_CURR_BWM_COUNT            0xFFC0714C         /* DMA15 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA16
+   ========================= */
+#define DMA16_NEXT_DESC_PTR            0xFFC07180         /* DMA16 Pointer to Next Initial Descriptor */
+#define DMA16_START_ADDR             0xFFC07184         /* DMA16 Start Address of Current Buffer */
+#define DMA16_CONFIG                   0xFFC07188         /* DMA16 Configuration Register */
+#define DMA16_X_COUNT                  0xFFC0718C         /* DMA16 Inner Loop Count Start Value */
+#define DMA16_X_MODIFY                  0xFFC07190         /* DMA16 Inner Loop Address Increment */
+#define DMA16_Y_COUNT                  0xFFC07194         /* DMA16 Outer Loop Count Start Value (2D only) */
+#define DMA16_Y_MODIFY                  0xFFC07198         /* DMA16 Outer Loop Address Increment (2D only) */
+#define DMA16_CURR_DESC_PTR            0xFFC071A4         /* DMA16 Current Descriptor Pointer */
+#define DMA16_PREV_DESC_PTR            0xFFC071A8         /* DMA16 Previous Initial Descriptor Pointer */
+#define DMA16_CURR_ADDR              0xFFC071AC         /* DMA16 Current Address */
+#define DMA16_IRQ_STATUS                  0xFFC071B0         /* DMA16 Status Register */
+#define DMA16_CURR_X_COUNT              0xFFC071B4         /* DMA16 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA16_CURR_Y_COUNT              0xFFC071B8         /* DMA16 Current Row Count (2D only) */
+#define DMA16_BWL_COUNT                0xFFC071C0         /* DMA16 Bandwidth Limit Count */
+#define DMA16_CURR_BWL_COUNT            0xFFC071C4         /* DMA16 Bandwidth Limit Count Current */
+#define DMA16_BWM_COUNT                0xFFC071C8         /* DMA16 Bandwidth Monitor Count */
+#define DMA16_CURR_BWM_COUNT            0xFFC071CC         /* DMA16 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA17
+   ========================= */
+#define DMA17_NEXT_DESC_PTR            0xFFC07200         /* DMA17 Pointer to Next Initial Descriptor */
+#define DMA17_START_ADDR             0xFFC07204         /* DMA17 Start Address of Current Buffer */
+#define DMA17_CONFIG                   0xFFC07208         /* DMA17 Configuration Register */
+#define DMA17_X_COUNT                  0xFFC0720C         /* DMA17 Inner Loop Count Start Value */
+#define DMA17_X_MODIFY                  0xFFC07210         /* DMA17 Inner Loop Address Increment */
+#define DMA17_Y_COUNT                  0xFFC07214         /* DMA17 Outer Loop Count Start Value (2D only) */
+#define DMA17_Y_MODIFY                  0xFFC07218         /* DMA17 Outer Loop Address Increment (2D only) */
+#define DMA17_CURR_DESC_PTR            0xFFC07224         /* DMA17 Current Descriptor Pointer */
+#define DMA17_PREV_DESC_PTR            0xFFC07228         /* DMA17 Previous Initial Descriptor Pointer */
+#define DMA17_CURR_ADDR              0xFFC0722C         /* DMA17 Current Address */
+#define DMA17_IRQ_STATUS                  0xFFC07230         /* DMA17 Status Register */
+#define DMA17_CURR_X_COUNT              0xFFC07234         /* DMA17 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA17_CURR_Y_COUNT              0xFFC07238         /* DMA17 Current Row Count (2D only) */
+#define DMA17_BWL_COUNT                0xFFC07240         /* DMA17 Bandwidth Limit Count */
+#define DMA17_CURR_BWL_COUNT            0xFFC07244         /* DMA17 Bandwidth Limit Count Current */
+#define DMA17_BWM_COUNT                0xFFC07248         /* DMA17 Bandwidth Monitor Count */
+#define DMA17_CURR_BWM_COUNT            0xFFC0724C         /* DMA17 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA18
+   ========================= */
+#define DMA18_NEXT_DESC_PTR            0xFFC07280         /* DMA18 Pointer to Next Initial Descriptor */
+#define DMA18_START_ADDR             0xFFC07284         /* DMA18 Start Address of Current Buffer */
+#define DMA18_CONFIG                   0xFFC07288         /* DMA18 Configuration Register */
+#define DMA18_X_COUNT                  0xFFC0728C         /* DMA18 Inner Loop Count Start Value */
+#define DMA18_X_MODIFY                  0xFFC07290         /* DMA18 Inner Loop Address Increment */
+#define DMA18_Y_COUNT                  0xFFC07294         /* DMA18 Outer Loop Count Start Value (2D only) */
+#define DMA18_Y_MODIFY                  0xFFC07298         /* DMA18 Outer Loop Address Increment (2D only) */
+#define DMA18_CURR_DESC_PTR            0xFFC072A4         /* DMA18 Current Descriptor Pointer */
+#define DMA18_PREV_DESC_PTR            0xFFC072A8         /* DMA18 Previous Initial Descriptor Pointer */
+#define DMA18_CURR_ADDR              0xFFC072AC         /* DMA18 Current Address */
+#define DMA18_IRQ_STATUS                  0xFFC072B0         /* DMA18 Status Register */
+#define DMA18_CURR_X_COUNT              0xFFC072B4         /* DMA18 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA18_CURR_Y_COUNT              0xFFC072B8         /* DMA18 Current Row Count (2D only) */
+#define DMA18_BWL_COUNT                0xFFC072C0         /* DMA18 Bandwidth Limit Count */
+#define DMA18_CURR_BWL_COUNT            0xFFC072C4         /* DMA18 Bandwidth Limit Count Current */
+#define DMA18_BWM_COUNT                0xFFC072C8         /* DMA18 Bandwidth Monitor Count */
+#define DMA18_CURR_BWM_COUNT            0xFFC072CC         /* DMA18 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA19
+   ========================= */
+#define DMA19_NEXT_DESC_PTR            0xFFC07300         /* DMA19 Pointer to Next Initial Descriptor */
+#define DMA19_START_ADDR             0xFFC07304         /* DMA19 Start Address of Current Buffer */
+#define DMA19_CONFIG                   0xFFC07308         /* DMA19 Configuration Register */
+#define DMA19_X_COUNT                  0xFFC0730C         /* DMA19 Inner Loop Count Start Value */
+#define DMA19_X_MODIFY                  0xFFC07310         /* DMA19 Inner Loop Address Increment */
+#define DMA19_Y_COUNT                  0xFFC07314         /* DMA19 Outer Loop Count Start Value (2D only) */
+#define DMA19_Y_MODIFY                  0xFFC07318         /* DMA19 Outer Loop Address Increment (2D only) */
+#define DMA19_CURR_DESC_PTR            0xFFC07324         /* DMA19 Current Descriptor Pointer */
+#define DMA19_PREV_DESC_PTR            0xFFC07328         /* DMA19 Previous Initial Descriptor Pointer */
+#define DMA19_CURR_ADDR              0xFFC0732C         /* DMA19 Current Address */
+#define DMA19_IRQ_STATUS                  0xFFC07330         /* DMA19 Status Register */
+#define DMA19_CURR_X_COUNT              0xFFC07334         /* DMA19 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA19_CURR_Y_COUNT              0xFFC07338         /* DMA19 Current Row Count (2D only) */
+#define DMA19_BWL_COUNT                0xFFC07340         /* DMA19 Bandwidth Limit Count */
+#define DMA19_CURR_BWL_COUNT            0xFFC07344         /* DMA19 Bandwidth Limit Count Current */
+#define DMA19_BWM_COUNT                0xFFC07348         /* DMA19 Bandwidth Monitor Count */
+#define DMA19_CURR_BWM_COUNT            0xFFC0734C         /* DMA19 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA20
+   ========================= */
+#define DMA20_NEXT_DESC_PTR            0xFFC07380         /* DMA20 Pointer to Next Initial Descriptor */
+#define DMA20_START_ADDR             0xFFC07384         /* DMA20 Start Address of Current Buffer */
+#define DMA20_CONFIG                   0xFFC07388         /* DMA20 Configuration Register */
+#define DMA20_X_COUNT                  0xFFC0738C         /* DMA20 Inner Loop Count Start Value */
+#define DMA20_X_MODIFY                  0xFFC07390         /* DMA20 Inner Loop Address Increment */
+#define DMA20_Y_COUNT                  0xFFC07394         /* DMA20 Outer Loop Count Start Value (2D only) */
+#define DMA20_Y_MODIFY                  0xFFC07398         /* DMA20 Outer Loop Address Increment (2D only) */
+#define DMA20_CURR_DESC_PTR            0xFFC073A4         /* DMA20 Current Descriptor Pointer */
+#define DMA20_PREV_DESC_PTR            0xFFC073A8         /* DMA20 Previous Initial Descriptor Pointer */
+#define DMA20_CURR_ADDR              0xFFC073AC         /* DMA20 Current Address */
+#define DMA20_IRQ_STATUS                  0xFFC073B0         /* DMA20 Status Register */
+#define DMA20_CURR_X_COUNT              0xFFC073B4         /* DMA20 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA20_CURR_Y_COUNT              0xFFC073B8         /* DMA20 Current Row Count (2D only) */
+#define DMA20_BWL_COUNT                0xFFC073C0         /* DMA20 Bandwidth Limit Count */
+#define DMA20_CURR_BWL_COUNT            0xFFC073C4         /* DMA20 Bandwidth Limit Count Current */
+#define DMA20_BWM_COUNT                0xFFC073C8         /* DMA20 Bandwidth Monitor Count */
+#define DMA20_CURR_BWM_COUNT            0xFFC073CC         /* DMA20 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA21
+   ========================= */
+#define DMA21_NEXT_DESC_PTR            0xFFC09000         /* DMA21 Pointer to Next Initial Descriptor */
+#define DMA21_START_ADDR             0xFFC09004         /* DMA21 Start Address of Current Buffer */
+#define DMA21_CONFIG                   0xFFC09008         /* DMA21 Configuration Register */
+#define DMA21_X_COUNT                  0xFFC0900C         /* DMA21 Inner Loop Count Start Value */
+#define DMA21_X_MODIFY                  0xFFC09010         /* DMA21 Inner Loop Address Increment */
+#define DMA21_Y_COUNT                  0xFFC09014         /* DMA21 Outer Loop Count Start Value (2D only) */
+#define DMA21_Y_MODIFY                  0xFFC09018         /* DMA21 Outer Loop Address Increment (2D only) */
+#define DMA21_CURR_DESC_PTR            0xFFC09024         /* DMA21 Current Descriptor Pointer */
+#define DMA21_PREV_DESC_PTR            0xFFC09028         /* DMA21 Previous Initial Descriptor Pointer */
+#define DMA21_CURR_ADDR              0xFFC0902C         /* DMA21 Current Address */
+#define DMA21_IRQ_STATUS                  0xFFC09030         /* DMA21 Status Register */
+#define DMA21_CURR_X_COUNT              0xFFC09034         /* DMA21 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA21_CURR_Y_COUNT              0xFFC09038         /* DMA21 Current Row Count (2D only) */
+#define DMA21_BWL_COUNT                0xFFC09040         /* DMA21 Bandwidth Limit Count */
+#define DMA21_CURR_BWL_COUNT            0xFFC09044         /* DMA21 Bandwidth Limit Count Current */
+#define DMA21_BWM_COUNT                0xFFC09048         /* DMA21 Bandwidth Monitor Count */
+#define DMA21_CURR_BWM_COUNT            0xFFC0904C         /* DMA21 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA22
+   ========================= */
+#define DMA22_NEXT_DESC_PTR            0xFFC09080         /* DMA22 Pointer to Next Initial Descriptor */
+#define DMA22_START_ADDR             0xFFC09084         /* DMA22 Start Address of Current Buffer */
+#define DMA22_CONFIG                   0xFFC09088         /* DMA22 Configuration Register */
+#define DMA22_X_COUNT                  0xFFC0908C         /* DMA22 Inner Loop Count Start Value */
+#define DMA22_X_MODIFY                  0xFFC09090         /* DMA22 Inner Loop Address Increment */
+#define DMA22_Y_COUNT                  0xFFC09094         /* DMA22 Outer Loop Count Start Value (2D only) */
+#define DMA22_Y_MODIFY                  0xFFC09098         /* DMA22 Outer Loop Address Increment (2D only) */
+#define DMA22_CURR_DESC_PTR            0xFFC090A4         /* DMA22 Current Descriptor Pointer */
+#define DMA22_PREV_DESC_PTR            0xFFC090A8         /* DMA22 Previous Initial Descriptor Pointer */
+#define DMA22_CURR_ADDR              0xFFC090AC         /* DMA22 Current Address */
+#define DMA22_IRQ_STATUS                  0xFFC090B0         /* DMA22 Status Register */
+#define DMA22_CURR_X_COUNT              0xFFC090B4         /* DMA22 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA22_CURR_Y_COUNT              0xFFC090B8         /* DMA22 Current Row Count (2D only) */
+#define DMA22_BWL_COUNT                0xFFC090C0         /* DMA22 Bandwidth Limit Count */
+#define DMA22_CURR_BWL_COUNT            0xFFC090C4         /* DMA22 Bandwidth Limit Count Current */
+#define DMA22_BWM_COUNT                0xFFC090C8         /* DMA22 Bandwidth Monitor Count */
+#define DMA22_CURR_BWM_COUNT            0xFFC090CC         /* DMA22 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA23
+   ========================= */
+#define DMA23_NEXT_DESC_PTR            0xFFC09100         /* DMA23 Pointer to Next Initial Descriptor */
+#define DMA23_START_ADDR             0xFFC09104         /* DMA23 Start Address of Current Buffer */
+#define DMA23_CONFIG                   0xFFC09108         /* DMA23 Configuration Register */
+#define DMA23_X_COUNT                  0xFFC0910C         /* DMA23 Inner Loop Count Start Value */
+#define DMA23_X_MODIFY                  0xFFC09110         /* DMA23 Inner Loop Address Increment */
+#define DMA23_Y_COUNT                  0xFFC09114         /* DMA23 Outer Loop Count Start Value (2D only) */
+#define DMA23_Y_MODIFY                  0xFFC09118         /* DMA23 Outer Loop Address Increment (2D only) */
+#define DMA23_CURR_DESC_PTR            0xFFC09124         /* DMA23 Current Descriptor Pointer */
+#define DMA23_PREV_DESC_PTR            0xFFC09128         /* DMA23 Previous Initial Descriptor Pointer */
+#define DMA23_CURR_ADDR              0xFFC0912C         /* DMA23 Current Address */
+#define DMA23_IRQ_STATUS                  0xFFC09130         /* DMA23 Status Register */
+#define DMA23_CURR_X_COUNT              0xFFC09134         /* DMA23 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA23_CURR_Y_COUNT              0xFFC09138         /* DMA23 Current Row Count (2D only) */
+#define DMA23_BWL_COUNT                0xFFC09140         /* DMA23 Bandwidth Limit Count */
+#define DMA23_CURR_BWL_COUNT            0xFFC09144         /* DMA23 Bandwidth Limit Count Current */
+#define DMA23_BWM_COUNT                0xFFC09148         /* DMA23 Bandwidth Monitor Count */
+#define DMA23_CURR_BWM_COUNT            0xFFC0914C         /* DMA23 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA24
+   ========================= */
+#define DMA24_NEXT_DESC_PTR            0xFFC09180         /* DMA24 Pointer to Next Initial Descriptor */
+#define DMA24_START_ADDR             0xFFC09184         /* DMA24 Start Address of Current Buffer */
+#define DMA24_CONFIG                   0xFFC09188         /* DMA24 Configuration Register */
+#define DMA24_X_COUNT                  0xFFC0918C         /* DMA24 Inner Loop Count Start Value */
+#define DMA24_X_MODIFY                  0xFFC09190         /* DMA24 Inner Loop Address Increment */
+#define DMA24_Y_COUNT                  0xFFC09194         /* DMA24 Outer Loop Count Start Value (2D only) */
+#define DMA24_Y_MODIFY                  0xFFC09198         /* DMA24 Outer Loop Address Increment (2D only) */
+#define DMA24_CURR_DESC_PTR            0xFFC091A4         /* DMA24 Current Descriptor Pointer */
+#define DMA24_PREV_DESC_PTR            0xFFC091A8         /* DMA24 Previous Initial Descriptor Pointer */
+#define DMA24_CURR_ADDR              0xFFC091AC         /* DMA24 Current Address */
+#define DMA24_IRQ_STATUS                  0xFFC091B0         /* DMA24 Status Register */
+#define DMA24_CURR_X_COUNT              0xFFC091B4         /* DMA24 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA24_CURR_Y_COUNT              0xFFC091B8         /* DMA24 Current Row Count (2D only) */
+#define DMA24_BWL_COUNT                0xFFC091C0         /* DMA24 Bandwidth Limit Count */
+#define DMA24_CURR_BWL_COUNT            0xFFC091C4         /* DMA24 Bandwidth Limit Count Current */
+#define DMA24_BWM_COUNT                0xFFC091C8         /* DMA24 Bandwidth Monitor Count */
+#define DMA24_CURR_BWM_COUNT            0xFFC091CC         /* DMA24 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA25
+   ========================= */
+#define DMA25_NEXT_DESC_PTR            0xFFC09200         /* DMA25 Pointer to Next Initial Descriptor */
+#define DMA25_START_ADDR             0xFFC09204         /* DMA25 Start Address of Current Buffer */
+#define DMA25_CONFIG                   0xFFC09208         /* DMA25 Configuration Register */
+#define DMA25_X_COUNT                  0xFFC0920C         /* DMA25 Inner Loop Count Start Value */
+#define DMA25_X_MODIFY                  0xFFC09210         /* DMA25 Inner Loop Address Increment */
+#define DMA25_Y_COUNT                  0xFFC09214         /* DMA25 Outer Loop Count Start Value (2D only) */
+#define DMA25_Y_MODIFY                  0xFFC09218         /* DMA25 Outer Loop Address Increment (2D only) */
+#define DMA25_CURR_DESC_PTR            0xFFC09224         /* DMA25 Current Descriptor Pointer */
+#define DMA25_PREV_DESC_PTR            0xFFC09228         /* DMA25 Previous Initial Descriptor Pointer */
+#define DMA25_CURR_ADDR              0xFFC0922C         /* DMA25 Current Address */
+#define DMA25_IRQ_STATUS                  0xFFC09230         /* DMA25 Status Register */
+#define DMA25_CURR_X_COUNT              0xFFC09234         /* DMA25 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA25_CURR_Y_COUNT              0xFFC09238         /* DMA25 Current Row Count (2D only) */
+#define DMA25_BWL_COUNT                0xFFC09240         /* DMA25 Bandwidth Limit Count */
+#define DMA25_CURR_BWL_COUNT            0xFFC09244         /* DMA25 Bandwidth Limit Count Current */
+#define DMA25_BWM_COUNT                0xFFC09248         /* DMA25 Bandwidth Monitor Count */
+#define DMA25_CURR_BWM_COUNT            0xFFC0924C         /* DMA25 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA26
+   ========================= */
+#define DMA26_NEXT_DESC_PTR            0xFFC09280         /* DMA26 Pointer to Next Initial Descriptor */
+#define DMA26_START_ADDR             0xFFC09284         /* DMA26 Start Address of Current Buffer */
+#define DMA26_CONFIG                   0xFFC09288         /* DMA26 Configuration Register */
+#define DMA26_X_COUNT                  0xFFC0928C         /* DMA26 Inner Loop Count Start Value */
+#define DMA26_X_MODIFY                  0xFFC09290         /* DMA26 Inner Loop Address Increment */
+#define DMA26_Y_COUNT                  0xFFC09294         /* DMA26 Outer Loop Count Start Value (2D only) */
+#define DMA26_Y_MODIFY                  0xFFC09298         /* DMA26 Outer Loop Address Increment (2D only) */
+#define DMA26_CURR_DESC_PTR            0xFFC092A4         /* DMA26 Current Descriptor Pointer */
+#define DMA26_PREV_DESC_PTR            0xFFC092A8         /* DMA26 Previous Initial Descriptor Pointer */
+#define DMA26_CURR_ADDR              0xFFC092AC         /* DMA26 Current Address */
+#define DMA26_IRQ_STATUS                  0xFFC092B0         /* DMA26 Status Register */
+#define DMA26_CURR_X_COUNT              0xFFC092B4         /* DMA26 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA26_CURR_Y_COUNT              0xFFC092B8         /* DMA26 Current Row Count (2D only) */
+#define DMA26_BWL_COUNT                0xFFC092C0         /* DMA26 Bandwidth Limit Count */
+#define DMA26_CURR_BWL_COUNT            0xFFC092C4         /* DMA26 Bandwidth Limit Count Current */
+#define DMA26_BWM_COUNT                0xFFC092C8         /* DMA26 Bandwidth Monitor Count */
+#define DMA26_CURR_BWM_COUNT            0xFFC092CC         /* DMA26 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA27
+   ========================= */
+#define DMA27_NEXT_DESC_PTR            0xFFC09300         /* DMA27 Pointer to Next Initial Descriptor */
+#define DMA27_START_ADDR             0xFFC09304         /* DMA27 Start Address of Current Buffer */
+#define DMA27_CONFIG                   0xFFC09308         /* DMA27 Configuration Register */
+#define DMA27_X_COUNT                  0xFFC0930C         /* DMA27 Inner Loop Count Start Value */
+#define DMA27_X_MODIFY                  0xFFC09310         /* DMA27 Inner Loop Address Increment */
+#define DMA27_Y_COUNT                  0xFFC09314         /* DMA27 Outer Loop Count Start Value (2D only) */
+#define DMA27_Y_MODIFY                  0xFFC09318         /* DMA27 Outer Loop Address Increment (2D only) */
+#define DMA27_CURR_DESC_PTR            0xFFC09324         /* DMA27 Current Descriptor Pointer */
+#define DMA27_PREV_DESC_PTR            0xFFC09328         /* DMA27 Previous Initial Descriptor Pointer */
+#define DMA27_CURR_ADDR              0xFFC0932C         /* DMA27 Current Address */
+#define DMA27_IRQ_STATUS                  0xFFC09330         /* DMA27 Status Register */
+#define DMA27_CURR_X_COUNT              0xFFC09334         /* DMA27 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA27_CURR_Y_COUNT              0xFFC09338         /* DMA27 Current Row Count (2D only) */
+#define DMA27_BWL_COUNT                0xFFC09340         /* DMA27 Bandwidth Limit Count */
+#define DMA27_CURR_BWL_COUNT            0xFFC09344         /* DMA27 Bandwidth Limit Count Current */
+#define DMA27_BWM_COUNT                0xFFC09348         /* DMA27 Bandwidth Monitor Count */
+#define DMA27_CURR_BWM_COUNT            0xFFC0934C         /* DMA27 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA28
+   ========================= */
+#define DMA28_NEXT_DESC_PTR            0xFFC09380         /* DMA28 Pointer to Next Initial Descriptor */
+#define DMA28_START_ADDR             0xFFC09384         /* DMA28 Start Address of Current Buffer */
+#define DMA28_CONFIG                   0xFFC09388         /* DMA28 Configuration Register */
+#define DMA28_X_COUNT                  0xFFC0938C         /* DMA28 Inner Loop Count Start Value */
+#define DMA28_X_MODIFY                  0xFFC09390         /* DMA28 Inner Loop Address Increment */
+#define DMA28_Y_COUNT                  0xFFC09394         /* DMA28 Outer Loop Count Start Value (2D only) */
+#define DMA28_Y_MODIFY                  0xFFC09398         /* DMA28 Outer Loop Address Increment (2D only) */
+#define DMA28_CURR_DESC_PTR            0xFFC093A4         /* DMA28 Current Descriptor Pointer */
+#define DMA28_PREV_DESC_PTR            0xFFC093A8         /* DMA28 Previous Initial Descriptor Pointer */
+#define DMA28_CURR_ADDR              0xFFC093AC         /* DMA28 Current Address */
+#define DMA28_IRQ_STATUS                  0xFFC093B0         /* DMA28 Status Register */
+#define DMA28_CURR_X_COUNT              0xFFC093B4         /* DMA28 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA28_CURR_Y_COUNT              0xFFC093B8         /* DMA28 Current Row Count (2D only) */
+#define DMA28_BWL_COUNT                0xFFC093C0         /* DMA28 Bandwidth Limit Count */
+#define DMA28_CURR_BWL_COUNT            0xFFC093C4         /* DMA28 Bandwidth Limit Count Current */
+#define DMA28_BWM_COUNT                0xFFC093C8         /* DMA28 Bandwidth Monitor Count */
+#define DMA28_CURR_BWM_COUNT            0xFFC093CC         /* DMA28 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA29
+   ========================= */
+#define DMA29_NEXT_DESC_PTR            0xFFC0B000         /* DMA29 Pointer to Next Initial Descriptor */
+#define DMA29_START_ADDR             0xFFC0B004         /* DMA29 Start Address of Current Buffer */
+#define DMA29_CONFIG                   0xFFC0B008         /* DMA29 Configuration Register */
+#define DMA29_X_COUNT                  0xFFC0B00C         /* DMA29 Inner Loop Count Start Value */
+#define DMA29_X_MODIFY                  0xFFC0B010         /* DMA29 Inner Loop Address Increment */
+#define DMA29_Y_COUNT                  0xFFC0B014         /* DMA29 Outer Loop Count Start Value (2D only) */
+#define DMA29_Y_MODIFY                  0xFFC0B018         /* DMA29 Outer Loop Address Increment (2D only) */
+#define DMA29_CURR_DESC_PTR            0xFFC0B024         /* DMA29 Current Descriptor Pointer */
+#define DMA29_PREV_DESC_PTR            0xFFC0B028         /* DMA29 Previous Initial Descriptor Pointer */
+#define DMA29_CURR_ADDR              0xFFC0B02C         /* DMA29 Current Address */
+#define DMA29_IRQ_STATUS                  0xFFC0B030         /* DMA29 Status Register */
+#define DMA29_CURR_X_COUNT              0xFFC0B034         /* DMA29 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA29_CURR_Y_COUNT              0xFFC0B038         /* DMA29 Current Row Count (2D only) */
+#define DMA29_BWL_COUNT                0xFFC0B040         /* DMA29 Bandwidth Limit Count */
+#define DMA29_CURR_BWL_COUNT            0xFFC0B044         /* DMA29 Bandwidth Limit Count Current */
+#define DMA29_BWM_COUNT                0xFFC0B048         /* DMA29 Bandwidth Monitor Count */
+#define DMA29_CURR_BWM_COUNT            0xFFC0B04C         /* DMA29 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA30
+   ========================= */
+#define DMA30_NEXT_DESC_PTR            0xFFC0B080         /* DMA30 Pointer to Next Initial Descriptor */
+#define DMA30_START_ADDR             0xFFC0B084         /* DMA30 Start Address of Current Buffer */
+#define DMA30_CONFIG                   0xFFC0B088         /* DMA30 Configuration Register */
+#define DMA30_X_COUNT                  0xFFC0B08C         /* DMA30 Inner Loop Count Start Value */
+#define DMA30_X_MODIFY                  0xFFC0B090         /* DMA30 Inner Loop Address Increment */
+#define DMA30_Y_COUNT                  0xFFC0B094         /* DMA30 Outer Loop Count Start Value (2D only) */
+#define DMA30_Y_MODIFY                  0xFFC0B098         /* DMA30 Outer Loop Address Increment (2D only) */
+#define DMA30_CURR_DESC_PTR            0xFFC0B0A4         /* DMA30 Current Descriptor Pointer */
+#define DMA30_PREV_DESC_PTR            0xFFC0B0A8         /* DMA30 Previous Initial Descriptor Pointer */
+#define DMA30_CURR_ADDR              0xFFC0B0AC         /* DMA30 Current Address */
+#define DMA30_IRQ_STATUS                  0xFFC0B0B0         /* DMA30 Status Register */
+#define DMA30_CURR_X_COUNT              0xFFC0B0B4         /* DMA30 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA30_CURR_Y_COUNT              0xFFC0B0B8         /* DMA30 Current Row Count (2D only) */
+#define DMA30_BWL_COUNT                0xFFC0B0C0         /* DMA30 Bandwidth Limit Count */
+#define DMA30_CURR_BWL_COUNT            0xFFC0B0C4         /* DMA30 Bandwidth Limit Count Current */
+#define DMA30_BWM_COUNT                0xFFC0B0C8         /* DMA30 Bandwidth Monitor Count */
+#define DMA30_CURR_BWM_COUNT            0xFFC0B0CC         /* DMA30 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA31
+   ========================= */
+#define DMA31_NEXT_DESC_PTR            0xFFC0B100         /* DMA31 Pointer to Next Initial Descriptor */
+#define DMA31_START_ADDR             0xFFC0B104         /* DMA31 Start Address of Current Buffer */
+#define DMA31_CONFIG                   0xFFC0B108         /* DMA31 Configuration Register */
+#define DMA31_X_COUNT                  0xFFC0B10C         /* DMA31 Inner Loop Count Start Value */
+#define DMA31_X_MODIFY                  0xFFC0B110         /* DMA31 Inner Loop Address Increment */
+#define DMA31_Y_COUNT                  0xFFC0B114         /* DMA31 Outer Loop Count Start Value (2D only) */
+#define DMA31_Y_MODIFY                  0xFFC0B118         /* DMA31 Outer Loop Address Increment (2D only) */
+#define DMA31_CURR_DESC_PTR            0xFFC0B124         /* DMA31 Current Descriptor Pointer */
+#define DMA31_PREV_DESC_PTR            0xFFC0B128         /* DMA31 Previous Initial Descriptor Pointer */
+#define DMA31_CURR_ADDR              0xFFC0B12C         /* DMA31 Current Address */
+#define DMA31_IRQ_STATUS                  0xFFC0B130         /* DMA31 Status Register */
+#define DMA31_CURR_X_COUNT              0xFFC0B134         /* DMA31 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA31_CURR_Y_COUNT              0xFFC0B138         /* DMA31 Current Row Count (2D only) */
+#define DMA31_BWL_COUNT                0xFFC0B140         /* DMA31 Bandwidth Limit Count */
+#define DMA31_CURR_BWL_COUNT            0xFFC0B144         /* DMA31 Bandwidth Limit Count Current */
+#define DMA31_BWM_COUNT                0xFFC0B148         /* DMA31 Bandwidth Monitor Count */
+#define DMA31_CURR_BWM_COUNT            0xFFC0B14C         /* DMA31 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA32
+   ========================= */
+#define DMA32_NEXT_DESC_PTR            0xFFC0B180         /* DMA32 Pointer to Next Initial Descriptor */
+#define DMA32_START_ADDR             0xFFC0B184         /* DMA32 Start Address of Current Buffer */
+#define DMA32_CONFIG                   0xFFC0B188         /* DMA32 Configuration Register */
+#define DMA32_X_COUNT                  0xFFC0B18C         /* DMA32 Inner Loop Count Start Value */
+#define DMA32_X_MODIFY                  0xFFC0B190         /* DMA32 Inner Loop Address Increment */
+#define DMA32_Y_COUNT                  0xFFC0B194         /* DMA32 Outer Loop Count Start Value (2D only) */
+#define DMA32_Y_MODIFY                  0xFFC0B198         /* DMA32 Outer Loop Address Increment (2D only) */
+#define DMA32_CURR_DESC_PTR            0xFFC0B1A4         /* DMA32 Current Descriptor Pointer */
+#define DMA32_PREV_DESC_PTR            0xFFC0B1A8         /* DMA32 Previous Initial Descriptor Pointer */
+#define DMA32_CURR_ADDR              0xFFC0B1AC         /* DMA32 Current Address */
+#define DMA32_IRQ_STATUS                  0xFFC0B1B0         /* DMA32 Status Register */
+#define DMA32_CURR_X_COUNT              0xFFC0B1B4         /* DMA32 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA32_CURR_Y_COUNT              0xFFC0B1B8         /* DMA32 Current Row Count (2D only) */
+#define DMA32_BWL_COUNT                0xFFC0B1C0         /* DMA32 Bandwidth Limit Count */
+#define DMA32_CURR_BWL_COUNT            0xFFC0B1C4         /* DMA32 Bandwidth Limit Count Current */
+#define DMA32_BWM_COUNT                0xFFC0B1C8         /* DMA32 Bandwidth Monitor Count */
+#define DMA32_CURR_BWM_COUNT            0xFFC0B1CC         /* DMA32 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA33
+   ========================= */
+#define DMA33_NEXT_DESC_PTR            0xFFC0D000         /* DMA33 Pointer to Next Initial Descriptor */
+#define DMA33_START_ADDR             0xFFC0D004         /* DMA33 Start Address of Current Buffer */
+#define DMA33_CONFIG                   0xFFC0D008         /* DMA33 Configuration Register */
+#define DMA33_X_COUNT                  0xFFC0D00C         /* DMA33 Inner Loop Count Start Value */
+#define DMA33_X_MODIFY                  0xFFC0D010         /* DMA33 Inner Loop Address Increment */
+#define DMA33_Y_COUNT                  0xFFC0D014         /* DMA33 Outer Loop Count Start Value (2D only) */
+#define DMA33_Y_MODIFY                  0xFFC0D018         /* DMA33 Outer Loop Address Increment (2D only) */
+#define DMA33_CURR_DESC_PTR            0xFFC0D024         /* DMA33 Current Descriptor Pointer */
+#define DMA33_PREV_DESC_PTR            0xFFC0D028         /* DMA33 Previous Initial Descriptor Pointer */
+#define DMA33_CURR_ADDR              0xFFC0D02C         /* DMA33 Current Address */
+#define DMA33_IRQ_STATUS                  0xFFC0D030         /* DMA33 Status Register */
+#define DMA33_CURR_X_COUNT              0xFFC0D034         /* DMA33 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA33_CURR_Y_COUNT              0xFFC0D038         /* DMA33 Current Row Count (2D only) */
+#define DMA33_BWL_COUNT                0xFFC0D040         /* DMA33 Bandwidth Limit Count */
+#define DMA33_CURR_BWL_COUNT            0xFFC0D044         /* DMA33 Bandwidth Limit Count Current */
+#define DMA33_BWM_COUNT                0xFFC0D048         /* DMA33 Bandwidth Monitor Count */
+#define DMA33_CURR_BWM_COUNT            0xFFC0D04C         /* DMA33 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA34
+   ========================= */
+#define DMA34_NEXT_DESC_PTR            0xFFC0D080         /* DMA34 Pointer to Next Initial Descriptor */
+#define DMA34_START_ADDR             0xFFC0D084         /* DMA34 Start Address of Current Buffer */
+#define DMA34_CONFIG                   0xFFC0D088         /* DMA34 Configuration Register */
+#define DMA34_X_COUNT                  0xFFC0D08C         /* DMA34 Inner Loop Count Start Value */
+#define DMA34_X_MODIFY                  0xFFC0D090         /* DMA34 Inner Loop Address Increment */
+#define DMA34_Y_COUNT                  0xFFC0D094         /* DMA34 Outer Loop Count Start Value (2D only) */
+#define DMA34_Y_MODIFY                  0xFFC0D098         /* DMA34 Outer Loop Address Increment (2D only) */
+#define DMA34_CURR_DESC_PTR            0xFFC0D0A4         /* DMA34 Current Descriptor Pointer */
+#define DMA34_PREV_DESC_PTR            0xFFC0D0A8         /* DMA34 Previous Initial Descriptor Pointer */
+#define DMA34_CURR_ADDR              0xFFC0D0AC         /* DMA34 Current Address */
+#define DMA34_IRQ_STATUS                  0xFFC0D0B0         /* DMA34 Status Register */
+#define DMA34_CURR_X_COUNT              0xFFC0D0B4         /* DMA34 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA34_CURR_Y_COUNT              0xFFC0D0B8         /* DMA34 Current Row Count (2D only) */
+#define DMA34_BWL_COUNT                0xFFC0D0C0         /* DMA34 Bandwidth Limit Count */
+#define DMA34_CURR_BWL_COUNT            0xFFC0D0C4         /* DMA34 Bandwidth Limit Count Current */
+#define DMA34_BWM_COUNT                0xFFC0D0C8         /* DMA34 Bandwidth Monitor Count */
+#define DMA34_CURR_BWM_COUNT            0xFFC0D0CC         /* DMA34 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA35
+   ========================= */
+#define DMA35_NEXT_DESC_PTR            0xFFC10000         /* DMA35 Pointer to Next Initial Descriptor */
+#define DMA35_START_ADDR             0xFFC10004         /* DMA35 Start Address of Current Buffer */
+#define DMA35_CONFIG                   0xFFC10008         /* DMA35 Configuration Register */
+#define DMA35_X_COUNT                  0xFFC1000C         /* DMA35 Inner Loop Count Start Value */
+#define DMA35_X_MODIFY                  0xFFC10010         /* DMA35 Inner Loop Address Increment */
+#define DMA35_Y_COUNT                  0xFFC10014         /* DMA35 Outer Loop Count Start Value (2D only) */
+#define DMA35_Y_MODIFY                  0xFFC10018         /* DMA35 Outer Loop Address Increment (2D only) */
+#define DMA35_CURR_DESC_PTR            0xFFC10024         /* DMA35 Current Descriptor Pointer */
+#define DMA35_PREV_DESC_PTR            0xFFC10028         /* DMA35 Previous Initial Descriptor Pointer */
+#define DMA35_CURR_ADDR              0xFFC1002C         /* DMA35 Current Address */
+#define DMA35_IRQ_STATUS                  0xFFC10030         /* DMA35 Status Register */
+#define DMA35_CURR_X_COUNT              0xFFC10034         /* DMA35 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA35_CURR_Y_COUNT              0xFFC10038         /* DMA35 Current Row Count (2D only) */
+#define DMA35_BWL_COUNT                0xFFC10040         /* DMA35 Bandwidth Limit Count */
+#define DMA35_CURR_BWL_COUNT            0xFFC10044         /* DMA35 Bandwidth Limit Count Current */
+#define DMA35_BWM_COUNT                0xFFC10048         /* DMA35 Bandwidth Monitor Count */
+#define DMA35_CURR_BWM_COUNT            0xFFC1004C         /* DMA35 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA36
+   ========================= */
+#define DMA36_NEXT_DESC_PTR            0xFFC10080         /* DMA36 Pointer to Next Initial Descriptor */
+#define DMA36_START_ADDR             0xFFC10084         /* DMA36 Start Address of Current Buffer */
+#define DMA36_CONFIG                   0xFFC10088         /* DMA36 Configuration Register */
+#define DMA36_X_COUNT                  0xFFC1008C         /* DMA36 Inner Loop Count Start Value */
+#define DMA36_X_MODIFY                  0xFFC10090         /* DMA36 Inner Loop Address Increment */
+#define DMA36_Y_COUNT                  0xFFC10094         /* DMA36 Outer Loop Count Start Value (2D only) */
+#define DMA36_Y_MODIFY                  0xFFC10098         /* DMA36 Outer Loop Address Increment (2D only) */
+#define DMA36_CURR_DESC_PTR            0xFFC100A4         /* DMA36 Current Descriptor Pointer */
+#define DMA36_PREV_DESC_PTR            0xFFC100A8         /* DMA36 Previous Initial Descriptor Pointer */
+#define DMA36_CURR_ADDR              0xFFC100AC         /* DMA36 Current Address */
+#define DMA36_IRQ_STATUS                  0xFFC100B0         /* DMA36 Status Register */
+#define DMA36_CURR_X_COUNT              0xFFC100B4         /* DMA36 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA36_CURR_Y_COUNT              0xFFC100B8         /* DMA36 Current Row Count (2D only) */
+#define DMA36_BWL_COUNT                0xFFC100C0         /* DMA36 Bandwidth Limit Count */
+#define DMA36_CURR_BWL_COUNT            0xFFC100C4         /* DMA36 Bandwidth Limit Count Current */
+#define DMA36_BWM_COUNT                0xFFC100C8         /* DMA36 Bandwidth Monitor Count */
+#define DMA36_CURR_BWM_COUNT            0xFFC100CC         /* DMA36 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA37
+   ========================= */
+#define DMA37_NEXT_DESC_PTR            0xFFC10100         /* DMA37 Pointer to Next Initial Descriptor */
+#define DMA37_START_ADDR             0xFFC10104         /* DMA37 Start Address of Current Buffer */
+#define DMA37_CONFIG                   0xFFC10108         /* DMA37 Configuration Register */
+#define DMA37_X_COUNT                  0xFFC1010C         /* DMA37 Inner Loop Count Start Value */
+#define DMA37_X_MODIFY                  0xFFC10110         /* DMA37 Inner Loop Address Increment */
+#define DMA37_Y_COUNT                  0xFFC10114         /* DMA37 Outer Loop Count Start Value (2D only) */
+#define DMA37_Y_MODIFY                  0xFFC10118         /* DMA37 Outer Loop Address Increment (2D only) */
+#define DMA37_CURR_DESC_PTR            0xFFC10124         /* DMA37 Current Descriptor Pointer */
+#define DMA37_PREV_DESC_PTR            0xFFC10128         /* DMA37 Previous Initial Descriptor Pointer */
+#define DMA37_CURR_ADDR              0xFFC1012C         /* DMA37 Current Address */
+#define DMA37_IRQ_STATUS                  0xFFC10130         /* DMA37 Status Register */
+#define DMA37_CURR_X_COUNT              0xFFC10134         /* DMA37 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA37_CURR_Y_COUNT              0xFFC10138         /* DMA37 Current Row Count (2D only) */
+#define DMA37_BWL_COUNT                0xFFC10140         /* DMA37 Bandwidth Limit Count */
+#define DMA37_CURR_BWL_COUNT            0xFFC10144         /* DMA37 Bandwidth Limit Count Current */
+#define DMA37_BWM_COUNT                0xFFC10148         /* DMA37 Bandwidth Monitor Count */
+#define DMA37_CURR_BWM_COUNT            0xFFC1014C         /* DMA37 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA38
+   ========================= */
+#define DMA38_NEXT_DESC_PTR            0xFFC12000         /* DMA38 Pointer to Next Initial Descriptor */
+#define DMA38_START_ADDR             0xFFC12004         /* DMA38 Start Address of Current Buffer */
+#define DMA38_CONFIG                   0xFFC12008         /* DMA38 Configuration Register */
+#define DMA38_X_COUNT                  0xFFC1200C         /* DMA38 Inner Loop Count Start Value */
+#define DMA38_X_MODIFY                  0xFFC12010         /* DMA38 Inner Loop Address Increment */
+#define DMA38_Y_COUNT                  0xFFC12014         /* DMA38 Outer Loop Count Start Value (2D only) */
+#define DMA38_Y_MODIFY                  0xFFC12018         /* DMA38 Outer Loop Address Increment (2D only) */
+#define DMA38_CURR_DESC_PTR            0xFFC12024         /* DMA38 Current Descriptor Pointer */
+#define DMA38_PREV_DESC_PTR            0xFFC12028         /* DMA38 Previous Initial Descriptor Pointer */
+#define DMA38_CURR_ADDR              0xFFC1202C         /* DMA38 Current Address */
+#define DMA38_IRQ_STATUS                  0xFFC12030         /* DMA38 Status Register */
+#define DMA38_CURR_X_COUNT              0xFFC12034         /* DMA38 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA38_CURR_Y_COUNT              0xFFC12038         /* DMA38 Current Row Count (2D only) */
+#define DMA38_BWL_COUNT                0xFFC12040         /* DMA38 Bandwidth Limit Count */
+#define DMA38_CURR_BWL_COUNT            0xFFC12044         /* DMA38 Bandwidth Limit Count Current */
+#define DMA38_BWM_COUNT                0xFFC12048         /* DMA38 Bandwidth Monitor Count */
+#define DMA38_CURR_BWM_COUNT            0xFFC1204C         /* DMA38 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA39
+   ========================= */
+#define DMA39_NEXT_DESC_PTR            0xFFC12080         /* DMA39 Pointer to Next Initial Descriptor */
+#define DMA39_START_ADDR             0xFFC12084         /* DMA39 Start Address of Current Buffer */
+#define DMA39_CONFIG                   0xFFC12088         /* DMA39 Configuration Register */
+#define DMA39_X_COUNT                  0xFFC1208C         /* DMA39 Inner Loop Count Start Value */
+#define DMA39_X_MODIFY                  0xFFC12090         /* DMA39 Inner Loop Address Increment */
+#define DMA39_Y_COUNT                  0xFFC12094         /* DMA39 Outer Loop Count Start Value (2D only) */
+#define DMA39_Y_MODIFY                  0xFFC12098         /* DMA39 Outer Loop Address Increment (2D only) */
+#define DMA39_CURR_DESC_PTR            0xFFC120A4         /* DMA39 Current Descriptor Pointer */
+#define DMA39_PREV_DESC_PTR            0xFFC120A8         /* DMA39 Previous Initial Descriptor Pointer */
+#define DMA39_CURR_ADDR              0xFFC120AC         /* DMA39 Current Address */
+#define DMA39_IRQ_STATUS                  0xFFC120B0         /* DMA39 Status Register */
+#define DMA39_CURR_X_COUNT              0xFFC120B4         /* DMA39 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA39_CURR_Y_COUNT              0xFFC120B8         /* DMA39 Current Row Count (2D only) */
+#define DMA39_BWL_COUNT                0xFFC120C0         /* DMA39 Bandwidth Limit Count */
+#define DMA39_CURR_BWL_COUNT            0xFFC120C4         /* DMA39 Bandwidth Limit Count Current */
+#define DMA39_BWM_COUNT                0xFFC120C8         /* DMA39 Bandwidth Monitor Count */
+#define DMA39_CURR_BWM_COUNT            0xFFC120CC         /* DMA39 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA40
+   ========================= */
+#define DMA40_NEXT_DESC_PTR            0xFFC12100         /* DMA40 Pointer to Next Initial Descriptor */
+#define DMA40_START_ADDR             0xFFC12104         /* DMA40 Start Address of Current Buffer */
+#define DMA40_CONFIG                   0xFFC12108         /* DMA40 Configuration Register */
+#define DMA40_X_COUNT                  0xFFC1210C         /* DMA40 Inner Loop Count Start Value */
+#define DMA40_X_MODIFY                  0xFFC12110         /* DMA40 Inner Loop Address Increment */
+#define DMA40_Y_COUNT                  0xFFC12114         /* DMA40 Outer Loop Count Start Value (2D only) */
+#define DMA40_Y_MODIFY                  0xFFC12118         /* DMA40 Outer Loop Address Increment (2D only) */
+#define DMA40_CURR_DESC_PTR            0xFFC12124         /* DMA40 Current Descriptor Pointer */
+#define DMA40_PREV_DESC_PTR            0xFFC12128         /* DMA40 Previous Initial Descriptor Pointer */
+#define DMA40_CURR_ADDR              0xFFC1212C         /* DMA40 Current Address */
+#define DMA40_IRQ_STATUS                  0xFFC12130         /* DMA40 Status Register */
+#define DMA40_CURR_X_COUNT              0xFFC12134         /* DMA40 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA40_CURR_Y_COUNT              0xFFC12138         /* DMA40 Current Row Count (2D only) */
+#define DMA40_BWL_COUNT                0xFFC12140         /* DMA40 Bandwidth Limit Count */
+#define DMA40_CURR_BWL_COUNT            0xFFC12144         /* DMA40 Bandwidth Limit Count Current */
+#define DMA40_BWM_COUNT                0xFFC12148         /* DMA40 Bandwidth Monitor Count */
+#define DMA40_CURR_BWM_COUNT            0xFFC1214C         /* DMA40 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA41
+   ========================= */
+#define DMA41_NEXT_DESC_PTR            0xFFC12180         /* DMA41 Pointer to Next Initial Descriptor */
+#define DMA41_START_ADDR             0xFFC12184         /* DMA41 Start Address of Current Buffer */
+#define DMA41_CONFIG                   0xFFC12188         /* DMA41 Configuration Register */
+#define DMA41_X_COUNT                  0xFFC1218C         /* DMA41 Inner Loop Count Start Value */
+#define DMA41_X_MODIFY                  0xFFC12190         /* DMA41 Inner Loop Address Increment */
+#define DMA41_Y_COUNT                  0xFFC12194         /* DMA41 Outer Loop Count Start Value (2D only) */
+#define DMA41_Y_MODIFY                  0xFFC12198         /* DMA41 Outer Loop Address Increment (2D only) */
+#define DMA41_CURR_DESC_PTR            0xFFC121A4         /* DMA41 Current Descriptor Pointer */
+#define DMA41_PREV_DESC_PTR            0xFFC121A8         /* DMA41 Previous Initial Descriptor Pointer */
+#define DMA41_CURR_ADDR              0xFFC121AC         /* DMA41 Current Address */
+#define DMA41_IRQ_STATUS                  0xFFC121B0         /* DMA41 Status Register */
+#define DMA41_CURR_X_COUNT              0xFFC121B4         /* DMA41 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA41_CURR_Y_COUNT              0xFFC121B8         /* DMA41 Current Row Count (2D only) */
+#define DMA41_BWL_COUNT                0xFFC121C0         /* DMA41 Bandwidth Limit Count */
+#define DMA41_CURR_BWL_COUNT            0xFFC121C4         /* DMA41 Bandwidth Limit Count Current */
+#define DMA41_BWM_COUNT                0xFFC121C8         /* DMA41 Bandwidth Monitor Count */
+#define DMA41_CURR_BWM_COUNT            0xFFC121CC         /* DMA41 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA42
+   ========================= */
+#define DMA42_NEXT_DESC_PTR            0xFFC14000         /* DMA42 Pointer to Next Initial Descriptor */
+#define DMA42_START_ADDR             0xFFC14004         /* DMA42 Start Address of Current Buffer */
+#define DMA42_CONFIG                   0xFFC14008         /* DMA42 Configuration Register */
+#define DMA42_X_COUNT                  0xFFC1400C         /* DMA42 Inner Loop Count Start Value */
+#define DMA42_X_MODIFY                  0xFFC14010         /* DMA42 Inner Loop Address Increment */
+#define DMA42_Y_COUNT                  0xFFC14014         /* DMA42 Outer Loop Count Start Value (2D only) */
+#define DMA42_Y_MODIFY                  0xFFC14018         /* DMA42 Outer Loop Address Increment (2D only) */
+#define DMA42_CURR_DESC_PTR            0xFFC14024         /* DMA42 Current Descriptor Pointer */
+#define DMA42_PREV_DESC_PTR            0xFFC14028         /* DMA42 Previous Initial Descriptor Pointer */
+#define DMA42_CURR_ADDR              0xFFC1402C         /* DMA42 Current Address */
+#define DMA42_IRQ_STATUS                  0xFFC14030         /* DMA42 Status Register */
+#define DMA42_CURR_X_COUNT              0xFFC14034         /* DMA42 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA42_CURR_Y_COUNT              0xFFC14038         /* DMA42 Current Row Count (2D only) */
+#define DMA42_BWL_COUNT                0xFFC14040         /* DMA42 Bandwidth Limit Count */
+#define DMA42_CURR_BWL_COUNT            0xFFC14044         /* DMA42 Bandwidth Limit Count Current */
+#define DMA42_BWM_COUNT                0xFFC14048         /* DMA42 Bandwidth Monitor Count */
+#define DMA42_CURR_BWM_COUNT            0xFFC1404C         /* DMA42 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA43
+   ========================= */
+#define DMA43_NEXT_DESC_PTR            0xFFC14080         /* DMA43 Pointer to Next Initial Descriptor */
+#define DMA43_START_ADDR             0xFFC14084         /* DMA43 Start Address of Current Buffer */
+#define DMA43_CONFIG                   0xFFC14088         /* DMA43 Configuration Register */
+#define DMA43_X_COUNT                  0xFFC1408C         /* DMA43 Inner Loop Count Start Value */
+#define DMA43_X_MODIFY                  0xFFC14090         /* DMA43 Inner Loop Address Increment */
+#define DMA43_Y_COUNT                  0xFFC14094         /* DMA43 Outer Loop Count Start Value (2D only) */
+#define DMA43_Y_MODIFY                  0xFFC14098         /* DMA43 Outer Loop Address Increment (2D only) */
+#define DMA43_CURR_DESC_PTR            0xFFC140A4         /* DMA43 Current Descriptor Pointer */
+#define DMA43_PREV_DESC_PTR            0xFFC140A8         /* DMA43 Previous Initial Descriptor Pointer */
+#define DMA43_CURR_ADDR              0xFFC140AC         /* DMA43 Current Address */
+#define DMA43_IRQ_STATUS                  0xFFC140B0         /* DMA43 Status Register */
+#define DMA43_CURR_X_COUNT              0xFFC140B4         /* DMA43 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA43_CURR_Y_COUNT              0xFFC140B8         /* DMA43 Current Row Count (2D only) */
+#define DMA43_BWL_COUNT                0xFFC140C0         /* DMA43 Bandwidth Limit Count */
+#define DMA43_CURR_BWL_COUNT            0xFFC140C4         /* DMA43 Bandwidth Limit Count Current */
+#define DMA43_BWM_COUNT                0xFFC140C8         /* DMA43 Bandwidth Monitor Count */
+#define DMA43_CURR_BWM_COUNT            0xFFC140CC         /* DMA43 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA44
+   ========================= */
+#define DMA44_NEXT_DESC_PTR            0xFFC14100         /* DMA44 Pointer to Next Initial Descriptor */
+#define DMA44_START_ADDR             0xFFC14104         /* DMA44 Start Address of Current Buffer */
+#define DMA44_CONFIG                   0xFFC14108         /* DMA44 Configuration Register */
+#define DMA44_X_COUNT                  0xFFC1410C         /* DMA44 Inner Loop Count Start Value */
+#define DMA44_X_MODIFY                  0xFFC14110         /* DMA44 Inner Loop Address Increment */
+#define DMA44_Y_COUNT                  0xFFC14114         /* DMA44 Outer Loop Count Start Value (2D only) */
+#define DMA44_Y_MODIFY                  0xFFC14118         /* DMA44 Outer Loop Address Increment (2D only) */
+#define DMA44_CURR_DESC_PTR            0xFFC14124         /* DMA44 Current Descriptor Pointer */
+#define DMA44_PREV_DESC_PTR            0xFFC14128         /* DMA44 Previous Initial Descriptor Pointer */
+#define DMA44_CURR_ADDR              0xFFC1412C         /* DMA44 Current Address */
+#define DMA44_IRQ_STATUS                  0xFFC14130         /* DMA44 Status Register */
+#define DMA44_CURR_X_COUNT              0xFFC14134         /* DMA44 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA44_CURR_Y_COUNT              0xFFC14138         /* DMA44 Current Row Count (2D only) */
+#define DMA44_BWL_COUNT                0xFFC14140         /* DMA44 Bandwidth Limit Count */
+#define DMA44_CURR_BWL_COUNT            0xFFC14144         /* DMA44 Bandwidth Limit Count Current */
+#define DMA44_BWM_COUNT                0xFFC14148         /* DMA44 Bandwidth Monitor Count */
+#define DMA44_CURR_BWM_COUNT            0xFFC1414C         /* DMA44 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA45
+   ========================= */
+#define DMA45_NEXT_DESC_PTR            0xFFC14180         /* DMA45 Pointer to Next Initial Descriptor */
+#define DMA45_START_ADDR             0xFFC14184         /* DMA45 Start Address of Current Buffer */
+#define DMA45_CONFIG                   0xFFC14188         /* DMA45 Configuration Register */
+#define DMA45_X_COUNT                  0xFFC1418C         /* DMA45 Inner Loop Count Start Value */
+#define DMA45_X_MODIFY                  0xFFC14190         /* DMA45 Inner Loop Address Increment */
+#define DMA45_Y_COUNT                  0xFFC14194         /* DMA45 Outer Loop Count Start Value (2D only) */
+#define DMA45_Y_MODIFY                  0xFFC14198         /* DMA45 Outer Loop Address Increment (2D only) */
+#define DMA45_CURR_DESC_PTR            0xFFC141A4         /* DMA45 Current Descriptor Pointer */
+#define DMA45_PREV_DESC_PTR            0xFFC141A8         /* DMA45 Previous Initial Descriptor Pointer */
+#define DMA45_CURR_ADDR              0xFFC141AC         /* DMA45 Current Address */
+#define DMA45_IRQ_STATUS                  0xFFC141B0         /* DMA45 Status Register */
+#define DMA45_CURR_X_COUNT              0xFFC141B4         /* DMA45 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA45_CURR_Y_COUNT              0xFFC141B8         /* DMA45 Current Row Count (2D only) */
+#define DMA45_BWL_COUNT                0xFFC141C0         /* DMA45 Bandwidth Limit Count */
+#define DMA45_CURR_BWL_COUNT            0xFFC141C4         /* DMA45 Bandwidth Limit Count Current */
+#define DMA45_BWM_COUNT                0xFFC141C8         /* DMA45 Bandwidth Monitor Count */
+#define DMA45_CURR_BWM_COUNT            0xFFC141CC         /* DMA45 Bandwidth Monitor Count Current */
+
+/* =========================
+        DMA46
+   ========================= */
+#define DMA46_NEXT_DESC_PTR            0xFFC14200         /* DMA46 Pointer to Next Initial Descriptor */
+#define DMA46_START_ADDR             0xFFC14204         /* DMA46 Start Address of Current Buffer */
+#define DMA46_CONFIG                   0xFFC14208         /* DMA46 Configuration Register */
+#define DMA46_X_COUNT                  0xFFC1420C         /* DMA46 Inner Loop Count Start Value */
+#define DMA46_X_MODIFY                  0xFFC14210         /* DMA46 Inner Loop Address Increment */
+#define DMA46_Y_COUNT                  0xFFC14214         /* DMA46 Outer Loop Count Start Value (2D only) */
+#define DMA46_Y_MODIFY                  0xFFC14218         /* DMA46 Outer Loop Address Increment (2D only) */
+#define DMA46_CURR_DESC_PTR            0xFFC14224         /* DMA46 Current Descriptor Pointer */
+#define DMA46_PREV_DESC_PTR            0xFFC14228         /* DMA46 Previous Initial Descriptor Pointer */
+#define DMA46_CURR_ADDR              0xFFC1422C         /* DMA46 Current Address */
+#define DMA46_IRQ_STATUS                  0xFFC14230         /* DMA46 Status Register */
+#define DMA46_CURR_X_COUNT              0xFFC14234         /* DMA46 Current Count(1D) or intra-row XCNT (2D) */
+#define DMA46_CURR_Y_COUNT              0xFFC14238         /* DMA46 Current Row Count (2D only) */
+#define DMA46_BWL_COUNT                0xFFC14240         /* DMA46 Bandwidth Limit Count */
+#define DMA46_CURR_BWL_COUNT            0xFFC14244         /* DMA46 Bandwidth Limit Count Current */
+#define DMA46_BWM_COUNT                0xFFC14248         /* DMA46 Bandwidth Monitor Count */
+#define DMA46_CURR_BWM_COUNT            0xFFC1424C         /* DMA46 Bandwidth Monitor Count Current */
+
+
+/********************************************************************************
+    DMA Alias Definitions
+ ********************************************************************************/
+#define MDMA0_DEST_CRC0_NEXT_DESC_PTR   (DMA22_NEXT_DESC_PTR)
+#define MDMA0_DEST_CRC0_START_ADDR    (DMA22_START_ADDR)
+#define MDMA0_DEST_CRC0_CONFIG          (DMA22_CONFIG)
+#define MDMA0_DEST_CRC0_X_COUNT         (DMA22_X_COUNT)
+#define MDMA0_DEST_CRC0_X_MODIFY         (DMA22_X_MODIFY)
+#define MDMA0_DEST_CRC0_Y_COUNT         (DMA22_Y_COUNT)
+#define MDMA0_DEST_CRC0_Y_MODIFY         (DMA22_Y_MODIFY)
+#define MDMA0_DEST_CRC0_CURR_DESC_PTR   (DMA22_CURR_DESC_PTR)
+#define MDMA0_DEST_CRC0_PREV_DESC_PTR   (DMA22_PREV_DESC_PTR)
+#define MDMA0_DEST_CRC0_CURR_ADDR     (DMA22_CURR_ADDR)
+#define MDMA0_DEST_CRC0_IRQ_STATUS         (DMA22_IRQ_STATUS)
+#define MDMA0_DEST_CRC0_CURR_X_COUNT     (DMA22_CURR_X_COUNT)
+#define MDMA0_DEST_CRC0_CURR_Y_COUNT     (DMA22_CURR_Y_COUNT)
+#define MDMA0_DEST_CRC0_BWL_COUNT       (DMA22_BWL_COUNT)
+#define MDMA0_DEST_CRC0_CURR_BWL_COUNT   (DMA22_CURR_BWL_COUNT)
+#define MDMA0_DEST_CRC0_BWM_COUNT       (DMA22_BWM_COUNT)
+#define MDMA0_DEST_CRC0_CURR_BWM_COUNT   (DMA22_CURR_BWM_COUNT)
+#define MDMA0_SRC_CRC0_NEXT_DESC_PTR    (DMA21_NEXT_DESC_PTR)
+#define MDMA0_SRC_CRC0_START_ADDR     (DMA21_START_ADDR)
+#define MDMA0_SRC_CRC0_CONFIG           (DMA21_CONFIG)
+#define MDMA0_SRC_CRC0_X_COUNT          (DMA21_X_COUNT)
+#define MDMA0_SRC_CRC0_X_MODIFY          (DMA21_X_MODIFY)
+#define MDMA0_SRC_CRC0_Y_COUNT          (DMA21_Y_COUNT)
+#define MDMA0_SRC_CRC0_Y_MODIFY          (DMA21_Y_MODIFY)
+#define MDMA0_SRC_CRC0_CURR_DESC_PTR    (DMA21_CURR_DESC_PTR)
+#define MDMA0_SRC_CRC0_PREV_DESC_PTR    (DMA21_PREV_DESC_PTR)
+#define MDMA0_SRC_CRC0_CURR_ADDR      (DMA21_CURR_ADDR)
+#define MDMA0_SRC_CRC0_IRQ_STATUS          (DMA21_IRQ_STATUS)
+#define MDMA0_SRC_CRC0_CURR_X_COUNT      (DMA21_CURR_X_COUNT)
+#define MDMA0_SRC_CRC0_CURR_Y_COUNT      (DMA21_CURR_Y_COUNT)
+#define MDMA0_SRC_CRC0_BWL_COUNT        (DMA21_BWL_COUNT)
+#define MDMA0_SRC_CRC0_CURR_BWL_COUNT    (DMA21_CURR_BWL_COUNT)
+#define MDMA0_SRC_CRC0_BWM_COUNT        (DMA21_BWM_COUNT)
+#define MDMA0_SRC_CRC0_CURR_BWM_COUNT    (DMA21_CURR_BWM_COUNT)
+#define MDMA1_DEST_CRC1_NEXT_DESC_PTR   (DMA24_NEXT_DESC_PTR)
+#define MDMA1_DEST_CRC1_START_ADDR    (DMA24_START_ADDR)
+#define MDMA1_DEST_CRC1_CONFIG          (DMA24_CONFIG)
+#define MDMA1_DEST_CRC1_X_COUNT         (DMA24_X_COUNT)
+#define MDMA1_DEST_CRC1_X_MODIFY         (DMA24_X_MODIFY)
+#define MDMA1_DEST_CRC1_Y_COUNT         (DMA24_Y_COUNT)
+#define MDMA1_DEST_CRC1_Y_MODIFY         (DMA24_Y_MODIFY)
+#define MDMA1_DEST_CRC1_CURR_DESC_PTR   (DMA24_CURR_DESC_PTR)
+#define MDMA1_DEST_CRC1_PREV_DESC_PTR   (DMA24_PREV_DESC_PTR)
+#define MDMA1_DEST_CRC1_CURR_ADDR     (DMA24_CURR_ADDR)
+#define MDMA1_DEST_CRC1_IRQ_STATUS         (DMA24_IRQ_STATUS)
+#define MDMA1_DEST_CRC1_CURR_X_COUNT     (DMA24_CURR_X_COUNT)
+#define MDMA1_DEST_CRC1_CURR_Y_COUNT     (DMA24_CURR_Y_COUNT)
+#define MDMA1_DEST_CRC1_BWL_COUNT       (DMA24_BWL_COUNT)
+#define MDMA1_DEST_CRC1_CURR_BWL_COUNT   (DMA24_CURR_BWL_COUNT)
+#define MDMA1_DEST_CRC1_BWM_COUNT       (DMA24_BWM_COUNT)
+#define MDMA1_DEST_CRC1_CURR_BWM_COUNT   (DMA24_CURR_BWM_COUNT)
+#define MDMA1_SRC_CRC1_NEXT_DESC_PTR    (DMA23_NEXT_DESC_PTR)
+#define MDMA1_SRC_CRC1_START_ADDR     (DMA23_START_ADDR)
+#define MDMA1_SRC_CRC1_CONFIG           (DMA23_CONFIG)
+#define MDMA1_SRC_CRC1_X_COUNT          (DMA23_X_COUNT)
+#define MDMA1_SRC_CRC1_X_MODIFY          (DMA23_X_MODIFY)
+#define MDMA1_SRC_CRC1_Y_COUNT          (DMA23_Y_COUNT)
+#define MDMA1_SRC_CRC1_Y_MODIFY          (DMA23_Y_MODIFY)
+#define MDMA1_SRC_CRC1_CURR_DESC_PTR    (DMA23_CURR_DESC_PTR)
+#define MDMA1_SRC_CRC1_PREV_DESC_PTR    (DMA23_PREV_DESC_PTR)
+#define MDMA1_SRC_CRC1_CURR_ADDR      (DMA23_CURR_ADDR)
+#define MDMA1_SRC_CRC1_IRQ_STATUS          (DMA23_IRQ_STATUS)
+#define MDMA1_SRC_CRC1_CURR_X_COUNT      (DMA23_CURR_X_COUNT)
+#define MDMA1_SRC_CRC1_CURR_Y_COUNT      (DMA23_CURR_Y_COUNT)
+#define MDMA1_SRC_CRC1_BWL_COUNT        (DMA23_BWL_COUNT)
+#define MDMA1_SRC_CRC1_CURR_BWL_COUNT    (DMA23_CURR_BWL_COUNT)
+#define MDMA1_SRC_CRC1_BWM_COUNT        (DMA23_BWM_COUNT)
+#define MDMA1_SRC_CRC1_CURR_BWM_COUNT    (DMA23_CURR_BWM_COUNT)
+#define MDMA2_DEST_NEXT_DESC_PTR            (DMA26_NEXT_DESC_PTR)
+#define MDMA2_DEST_START_ADDR             (DMA26_START_ADDR)
+#define MDMA2_DEST_CONFIG                   (DMA26_CONFIG)
+#define MDMA2_DEST_X_COUNT                  (DMA26_X_COUNT)
+#define MDMA2_DEST_X_MODIFY                  (DMA26_X_MODIFY)
+#define MDMA2_DEST_Y_COUNT                  (DMA26_Y_COUNT)
+#define MDMA2_DEST_Y_MODIFY                  (DMA26_Y_MODIFY)
+#define MDMA2_DEST_CURR_DESC_PTR            (DMA26_CURR_DESC_PTR)
+#define MDMA2_DEST_PREV_DESC_PTR            (DMA26_PREV_DESC_PTR)
+#define MDMA2_DEST_CURR_ADDR              (DMA26_CURR_ADDR)
+#define MDMA2_DEST_IRQ_STATUS                  (DMA26_IRQ_STATUS)
+#define MDMA2_DEST_CURR_X_COUNT              (DMA26_CURR_X_COUNT)
+#define MDMA2_DEST_CURR_Y_COUNT              (DMA26_CURR_Y_COUNT)
+#define MDMA2_DEST_BWL_COUNT                (DMA26_BWL_COUNT)
+#define MDMA2_DEST_CURR_BWL_COUNT            (DMA26_CURR_BWL_COUNT)
+#define MDMA2_DEST_BWM_COUNT                (DMA26_BWM_COUNT)
+#define MDMA2_DEST_CURR_BWM_COUNT            (DMA26_CURR_BWM_COUNT)
+#define MDMA2_SRC_NEXT_DESC_PTR            (DMA25_NEXT_DESC_PTR)
+#define MDMA2_SRC_START_ADDR             (DMA25_START_ADDR)
+#define MDMA2_SRC_CONFIG                   (DMA25_CONFIG)
+#define MDMA2_SRC_X_COUNT                  (DMA25_X_COUNT)
+#define MDMA2_SRC_X_MODIFY                  (DMA25_X_MODIFY)
+#define MDMA2_SRC_Y_COUNT                  (DMA25_Y_COUNT)
+#define MDMA2_SRC_Y_MODIFY                  (DMA25_Y_MODIFY)
+#define MDMA2_SRC_CURR_DESC_PTR            (DMA25_CURR_DESC_PTR)
+#define MDMA2_SRC_PREV_DESC_PTR            (DMA25_PREV_DESC_PTR)
+#define MDMA2_SRC_CURR_ADDR              (DMA25_CURR_ADDR)
+#define MDMA2_SRC_IRQ_STATUS                  (DMA25_IRQ_STATUS)
+#define MDMA2_SRC_CURR_X_COUNT              (DMA25_CURR_X_COUNT)
+#define MDMA2_SRC_CURR_Y_COUNT              (DMA25_CURR_Y_COUNT)
+#define MDMA2_SRC_BWL_COUNT                (DMA25_BWL_COUNT)
+#define MDMA2_SRC_CURR_BWL_COUNT            (DMA25_CURR_BWL_COUNT)
+#define MDMA2_SRC_BWM_COUNT                (DMA25_BWM_COUNT)
+#define MDMA2_SRC_CURR_BWM_COUNT            (DMA25_CURR_BWM_COUNT)
+#define MDMA3_DEST_NEXT_DESC_PTR            (DMA28_NEXT_DESC_PTR)
+#define MDMA3_DEST_START_ADDR             (DMA28_START_ADDR)
+#define MDMA3_DEST_CONFIG                   (DMA28_CONFIG)
+#define MDMA3_DEST_X_COUNT                  (DMA28_X_COUNT)
+#define MDMA3_DEST_X_MODIFY                  (DMA28_X_MODIFY)
+#define MDMA3_DEST_Y_COUNT                  (DMA28_Y_COUNT)
+#define MDMA3_DEST_Y_MODIFY                  (DMA28_Y_MODIFY)
+#define MDMA3_DEST_CURR_DESC_PTR            (DMA28_CURR_DESC_PTR)
+#define MDMA3_DEST_PREV_DESC_PTR            (DMA28_PREV_DESC_PTR)
+#define MDMA3_DEST_CURR_ADDR              (DMA28_CURR_ADDR)
+#define MDMA3_DEST_IRQ_STATUS                  (DMA28_IRQ_STATUS)
+#define MDMA3_DEST_CURR_X_COUNT              (DMA28_CURR_X_COUNT)
+#define MDMA3_DEST_CURR_Y_COUNT              (DMA28_CURR_Y_COUNT)
+#define MDMA3_DEST_BWL_COUNT                (DMA28_BWL_COUNT)
+#define MDMA3_DEST_CURR_BWL_COUNT            (DMA28_CURR_BWL_COUNT)
+#define MDMA3_DEST_BWM_COUNT                (DMA28_BWM_COUNT)
+#define MDMA3_DEST_CURR_BWM_COUNT            (DMA28_CURR_BWM_COUNT)
+#define MDMA3_SRC_NEXT_DESC_PTR            (DMA27_NEXT_DESC_PTR)
+#define MDMA3_SRC_START_ADDR             (DMA27_START_ADDR)
+#define MDMA3_SRC_CONFIG                   (DMA27_CONFIG)
+#define MDMA3_SRC_X_COUNT                  (DMA27_X_COUNT)
+#define MDMA3_SRC_X_MODIFY                  (DMA27_X_MODIFY)
+#define MDMA3_SRC_Y_COUNT                  (DMA27_Y_COUNT)
+#define MDMA3_SRC_Y_MODIFY                  (DMA27_Y_MODIFY)
+#define MDMA3_SRC_CURR_DESC_PTR            (DMA27_CURR_DESC_PTR)
+#define MDMA3_SRC_PREV_DESC_PTR            (DMA27_PREV_DESC_PTR)
+#define MDMA3_SRC_CURR_ADDR              (DMA27_CURR_ADDR)
+#define MDMA3_SRC_IRQ_STATUS                  (DMA27_IRQ_STATUS)
+#define MDMA3_SRC_CURR_X_COUNT              (DMA27_CURR_X_COUNT)
+#define MDMA3_SRC_CURR_Y_COUNT              (DMA27_CURR_Y_COUNT)
+#define MDMA3_SRC_BWL_COUNT                (DMA27_BWL_COUNT)
+#define MDMA3_SRC_CURR_BWL_COUNT            (DMA27_CURR_BWL_COUNT)
+#define MDMA3_SRC_BWM_COUNT                (DMA27_BWM_COUNT)
+#define MDMA3_SRC_CURR_BWM_COUNT            (DMA27_CURR_BWM_COUNT)
+
+
+/* =========================
+        DMC Registers
+   ========================= */
+
+/* =========================
+        DMC0
+   ========================= */
+#define DMC0_ID                     0xFFC80000         /* DMC0 Identification Register */
+#define DMC0_CTL                    0xFFC80004         /* DMC0 Control Register */
+#define DMC0_STAT                   0xFFC80008         /* DMC0 Status Register */
+#define DMC0_EFFCTL                 0xFFC8000C         /* DMC0 Efficiency Controller */
+#define DMC0_PRIO                   0xFFC80010         /* DMC0 Priority ID Register */
+#define DMC0_PRIOMSK                0xFFC80014         /* DMC0 Priority ID Mask */
+#define DMC0_CFG                    0xFFC80040         /* DMC0 SDRAM Configuration */
+#define DMC0_TR0                    0xFFC80044         /* DMC0 Timing Register 0 */
+#define DMC0_TR1                    0xFFC80048         /* DMC0 Timing Register 1 */
+#define DMC0_TR2                    0xFFC8004C         /* DMC0 Timing Register 2 */
+#define DMC0_MSK                    0xFFC8005C         /* DMC0 Mode Register Mask */
+#define DMC0_MR                     0xFFC80060         /* DMC0 Mode Shadow register */
+#define DMC0_EMR1                   0xFFC80064         /* DMC0 EMR1 Shadow Register */
+#define DMC0_EMR2                   0xFFC80068         /* DMC0 EMR2 Shadow Register */
+#define DMC0_EMR3                   0xFFC8006C         /* DMC0 EMR3 Shadow Register */
+#define DMC0_DLLCTL                 0xFFC80080         /* DMC0 DLL Control Register */
+#define DMC0_PADCTL                 0xFFC800C0         /* DMC0 PAD Control Register 0 */
+
+#define DEVSZ_64                0x000         /* DMC External Bank Size = 64Mbit */
+#define DEVSZ_128               0x100         /* DMC External Bank Size = 128Mbit */
+#define DEVSZ_256               0x200         /* DMC External Bank Size = 256Mbit */
+#define DEVSZ_512               0x300         /* DMC External Bank Size = 512Mbit */
+#define DEVSZ_1G                0x400         /* DMC External Bank Size = 1Gbit */
+#define DEVSZ_2G                0x500         /* DMC External Bank Size = 2Gbit */
+
+
+/* =========================
+        L2CTL Registers
+   ========================= */
+
+/* =========================
+        L2CTL0
+   ========================= */
+#define L2CTL0_CTL                  0xFFCA3000         /* L2CTL0 L2 Control Register */
+#define L2CTL0_ACTL_C0              0xFFCA3004         /* L2CTL0 L2 Core 0 Access Control Register */
+#define L2CTL0_ACTL_C1              0xFFCA3008         /* L2CTL0 L2 Core 1 Access Control Register */
+#define L2CTL0_ACTL_SYS             0xFFCA300C         /* L2CTL0 L2 System Access Control Register */
+#define L2CTL0_STAT                 0xFFCA3010         /* L2CTL0 L2 Status Register */
+#define L2CTL0_RPCR                 0xFFCA3014         /* L2CTL0 L2 Read Priority Count Register */
+#define L2CTL0_WPCR                 0xFFCA3018         /* L2CTL0 L2 Write Priority Count Register */
+#define L2CTL0_RFA                  0xFFCA3024         /* L2CTL0 L2 Refresh Address Regsiter */
+#define L2CTL0_ERRADDR0             0xFFCA3040         /* L2CTL0 L2 Bank 0 ECC Error Address Register */
+#define L2CTL0_ERRADDR1             0xFFCA3044         /* L2CTL0 L2 Bank 1 ECC Error Address Register */
+#define L2CTL0_ERRADDR2             0xFFCA3048         /* L2CTL0 L2 Bank 2 ECC Error Address Register */
+#define L2CTL0_ERRADDR3             0xFFCA304C         /* L2CTL0 L2 Bank 3 ECC Error Address Register */
+#define L2CTL0_ERRADDR4             0xFFCA3050         /* L2CTL0 L2 Bank 4 ECC Error Address Register */
+#define L2CTL0_ERRADDR5             0xFFCA3054         /* L2CTL0 L2 Bank 5 ECC Error Address Register */
+#define L2CTL0_ERRADDR6             0xFFCA3058         /* L2CTL0 L2 Bank 6 ECC Error Address Register */
+#define L2CTL0_ERRADDR7             0xFFCA305C         /* L2CTL0 L2 Bank 7 ECC Error Address Register */
+#define L2CTL0_ET0                  0xFFCA3080         /* L2CTL0 L2 AXI Error 0 Type Register */
+#define L2CTL0_EADDR0               0xFFCA3084         /* L2CTL0 L2 AXI Error 0 Address Register */
+#define L2CTL0_ET1                  0xFFCA3088         /* L2CTL0 L2 AXI Error 1 Type Register */
+#define L2CTL0_EADDR1               0xFFCA308C         /* L2CTL0 L2 AXI Error 1 Address Register */
+
+
+/* =========================
+        SEC Registers
+   ========================= */
+/* ------------------------------------------------------------------------------------------------------------------------
+       SEC Core Interface (SCI) Register Definitions
+   ------------------------------------------------------------------------------------------------------------------------ */
+
+#define SEC_SCI_BASE 0xFFCA4400
+#define SEC_SCI_OFF 0x40
+#define SEC_CCTL 0x0         /* SEC Core Control Register n */
+#define SEC_CSTAT 0x4         /* SEC Core Status Register n */
+#define SEC_CPND 0x8         /* SEC Core Pending IRQ Register n */
+#define SEC_CACT 0xC         /* SEC Core Active IRQ Register n */
+#define SEC_CPMSK 0x10         /* SEC Core IRQ Priority Mask Register n */
+#define SEC_CGMSK 0x14         /* SEC Core IRQ Group Mask Register n */
+#define SEC_CPLVL 0x18         /* SEC Core IRQ Priority Level Register n */
+#define SEC_CSID 0x1C         /* SEC Core IRQ Source ID Register n */
+
+#define bfin_read_SEC_SCI(n, reg) bfin_read32(SEC_SCI_BASE + (n) * SEC_SCI_OFF + reg)
+#define bfin_write_SEC_SCI(n, reg, val) \
+	bfin_write32(SEC_SCI_BASE + (n) * SEC_SCI_OFF + reg, val)
+
+/* ------------------------------------------------------------------------------------------------------------------------
+       SEC Fault Management Interface (SFI) Register Definitions
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_FCTL                   0xFFCA4010         /* SEC Fault Control Register */
+#define SEC_FSTAT                  0xFFCA4014         /* SEC Fault Status Register */
+#define SEC_FSID                   0xFFCA4018         /* SEC Fault Source ID Register */
+#define SEC_FEND                   0xFFCA401C         /* SEC Fault End Register */
+#define SEC_FDLY                   0xFFCA4020         /* SEC Fault Delay Register */
+#define SEC_FDLY_CUR               0xFFCA4024         /* SEC Fault Delay Current Register */
+#define SEC_FSRDLY                 0xFFCA4028         /* SEC Fault System Reset Delay Register */
+#define SEC_FSRDLY_CUR             0xFFCA402C         /* SEC Fault System Reset Delay Current Register */
+#define SEC_FCOPP                  0xFFCA4030         /* SEC Fault COP Period Register */
+#define SEC_FCOPP_CUR              0xFFCA4034         /* SEC Fault COP Period Current Register */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+       SEC Global Register Definitions
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_GCTL                   0xFFCA4000         /* SEC Global Control Register */
+#define SEC_GSTAT                  0xFFCA4004         /* SEC Global Status Register */
+#define SEC_RAISE                  0xFFCA4008         /* SEC Global Raise Register */
+#define SEC_END                    0xFFCA400C         /* SEC Global End Register */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+       SEC Source Interface (SSI) Register Definitions
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_SCTL0                  0xFFCA4800         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL1                  0xFFCA4808         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL2                  0xFFCA4810         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL3                  0xFFCA4818         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL4                  0xFFCA4820         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL5                  0xFFCA4828         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL6                  0xFFCA4830         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL7                  0xFFCA4838         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL8                  0xFFCA4840         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL9                  0xFFCA4848         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL10                 0xFFCA4850         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL11                 0xFFCA4858         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL12                 0xFFCA4860         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL13                 0xFFCA4868         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL14                 0xFFCA4870         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL15                 0xFFCA4878         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL16                 0xFFCA4880         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL17                 0xFFCA4888         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL18                 0xFFCA4890         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL19                 0xFFCA4898         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL20                 0xFFCA48A0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL21                 0xFFCA48A8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL22                 0xFFCA48B0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL23                 0xFFCA48B8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL24                 0xFFCA48C0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL25                 0xFFCA48C8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL26                 0xFFCA48D0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL27                 0xFFCA48D8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL28                 0xFFCA48E0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL29                 0xFFCA48E8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL30                 0xFFCA48F0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL31                 0xFFCA48F8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL32                 0xFFCA4900         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL33                 0xFFCA4908         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL34                 0xFFCA4910         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL35                 0xFFCA4918         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL36                 0xFFCA4920         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL37                 0xFFCA4928         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL38                 0xFFCA4930         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL39                 0xFFCA4938         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL40                 0xFFCA4940         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL41                 0xFFCA4948         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL42                 0xFFCA4950         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL43                 0xFFCA4958         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL44                 0xFFCA4960         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL45                 0xFFCA4968         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL46                 0xFFCA4970         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL47                 0xFFCA4978         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL48                 0xFFCA4980         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL49                 0xFFCA4988         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL50                 0xFFCA4990         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL51                 0xFFCA4998         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL52                 0xFFCA49A0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL53                 0xFFCA49A8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL54                 0xFFCA49B0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL55                 0xFFCA49B8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL56                 0xFFCA49C0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL57                 0xFFCA49C8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL58                 0xFFCA49D0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL59                 0xFFCA49D8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL60                 0xFFCA49E0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL61                 0xFFCA49E8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL62                 0xFFCA49F0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL63                 0xFFCA49F8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL64                 0xFFCA4A00         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL65                 0xFFCA4A08         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL66                 0xFFCA4A10         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL67                 0xFFCA4A18         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL68                 0xFFCA4A20         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL69                 0xFFCA4A28         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL70                 0xFFCA4A30         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL71                 0xFFCA4A38         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL72                 0xFFCA4A40         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL73                 0xFFCA4A48         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL74                 0xFFCA4A50         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL75                 0xFFCA4A58         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL76                 0xFFCA4A60         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL77                 0xFFCA4A68         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL78                 0xFFCA4A70         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL79                 0xFFCA4A78         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL80                 0xFFCA4A80         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL81                 0xFFCA4A88         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL82                 0xFFCA4A90         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL83                 0xFFCA4A98         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL84                 0xFFCA4AA0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL85                 0xFFCA4AA8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL86                 0xFFCA4AB0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL87                 0xFFCA4AB8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL88                 0xFFCA4AC0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL89                 0xFFCA4AC8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL90                 0xFFCA4AD0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL91                 0xFFCA4AD8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL92                 0xFFCA4AE0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL93                 0xFFCA4AE8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL94                 0xFFCA4AF0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL95                 0xFFCA4AF8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL96                 0xFFCA4B00         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL97                 0xFFCA4B08         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL98                 0xFFCA4B10         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL99                 0xFFCA4B18         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL100                0xFFCA4B20         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL101                0xFFCA4B28         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL102                0xFFCA4B30         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL103                0xFFCA4B38         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL104                0xFFCA4B40         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL105                0xFFCA4B48         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL106                0xFFCA4B50         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL107                0xFFCA4B58         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL108                0xFFCA4B60         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL109                0xFFCA4B68         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL110                0xFFCA4B70         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL111                0xFFCA4B78         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL112                0xFFCA4B80         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL113                0xFFCA4B88         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL114                0xFFCA4B90         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL115                0xFFCA4B98         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL116                0xFFCA4BA0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL117                0xFFCA4BA8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL118                0xFFCA4BB0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL119                0xFFCA4BB8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL120                0xFFCA4BC0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL121                0xFFCA4BC8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL122                0xFFCA4BD0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL123                0xFFCA4BD8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL124                0xFFCA4BE0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL125                0xFFCA4BE8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL126                0xFFCA4BF0         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL127                0xFFCA4BF8         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL128                0xFFCA4C00         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL129                0xFFCA4C08         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL130                0xFFCA4C10         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL131                0xFFCA4C18         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL132                0xFFCA4C20         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL133                0xFFCA4C28         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL134                0xFFCA4C30         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL135                0xFFCA4C38         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL136                0xFFCA4C40         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL137                0xFFCA4C48         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL138                0xFFCA4C50         /* SEC IRQ Source Control Register n */
+#define SEC_SCTL139                0xFFCA4C58         /* SEC IRQ Source Control Register n */
+#define SEC_SSTAT0                 0xFFCA4804         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT1                 0xFFCA480C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT2                 0xFFCA4814         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT3                 0xFFCA481C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT4                 0xFFCA4824         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT5                 0xFFCA482C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT6                 0xFFCA4834         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT7                 0xFFCA483C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT8                 0xFFCA4844         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT9                 0xFFCA484C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT10                0xFFCA4854         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT11                0xFFCA485C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT12                0xFFCA4864         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT13                0xFFCA486C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT14                0xFFCA4874         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT15                0xFFCA487C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT16                0xFFCA4884         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT17                0xFFCA488C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT18                0xFFCA4894         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT19                0xFFCA489C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT20                0xFFCA48A4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT21                0xFFCA48AC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT22                0xFFCA48B4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT23                0xFFCA48BC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT24                0xFFCA48C4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT25                0xFFCA48CC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT26                0xFFCA48D4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT27                0xFFCA48DC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT28                0xFFCA48E4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT29                0xFFCA48EC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT30                0xFFCA48F4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT31                0xFFCA48FC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT32                0xFFCA4904         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT33                0xFFCA490C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT34                0xFFCA4914         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT35                0xFFCA491C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT36                0xFFCA4924         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT37                0xFFCA492C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT38                0xFFCA4934         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT39                0xFFCA493C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT40                0xFFCA4944         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT41                0xFFCA494C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT42                0xFFCA4954         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT43                0xFFCA495C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT44                0xFFCA4964         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT45                0xFFCA496C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT46                0xFFCA4974         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT47                0xFFCA497C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT48                0xFFCA4984         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT49                0xFFCA498C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT50                0xFFCA4994         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT51                0xFFCA499C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT52                0xFFCA49A4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT53                0xFFCA49AC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT54                0xFFCA49B4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT55                0xFFCA49BC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT56                0xFFCA49C4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT57                0xFFCA49CC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT58                0xFFCA49D4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT59                0xFFCA49DC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT60                0xFFCA49E4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT61                0xFFCA49EC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT62                0xFFCA49F4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT63                0xFFCA49FC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT64                0xFFCA4A04         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT65                0xFFCA4A0C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT66                0xFFCA4A14         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT67                0xFFCA4A1C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT68                0xFFCA4A24         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT69                0xFFCA4A2C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT70                0xFFCA4A34         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT71                0xFFCA4A3C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT72                0xFFCA4A44         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT73                0xFFCA4A4C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT74                0xFFCA4A54         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT75                0xFFCA4A5C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT76                0xFFCA4A64         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT77                0xFFCA4A6C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT78                0xFFCA4A74         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT79                0xFFCA4A7C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT80                0xFFCA4A84         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT81                0xFFCA4A8C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT82                0xFFCA4A94         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT83                0xFFCA4A9C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT84                0xFFCA4AA4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT85                0xFFCA4AAC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT86                0xFFCA4AB4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT87                0xFFCA4ABC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT88                0xFFCA4AC4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT89                0xFFCA4ACC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT90                0xFFCA4AD4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT91                0xFFCA4ADC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT92                0xFFCA4AE4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT93                0xFFCA4AEC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT94                0xFFCA4AF4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT95                0xFFCA4AFC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT96                0xFFCA4B04         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT97                0xFFCA4B0C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT98                0xFFCA4B14         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT99                0xFFCA4B1C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT100               0xFFCA4B24         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT101               0xFFCA4B2C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT102               0xFFCA4B34         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT103               0xFFCA4B3C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT104               0xFFCA4B44         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT105               0xFFCA4B4C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT106               0xFFCA4B54         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT107               0xFFCA4B5C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT108               0xFFCA4B64         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT109               0xFFCA4B6C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT110               0xFFCA4B74         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT111               0xFFCA4B7C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT112               0xFFCA4B84         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT113               0xFFCA4B8C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT114               0xFFCA4B94         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT115               0xFFCA4B9C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT116               0xFFCA4BA4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT117               0xFFCA4BAC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT118               0xFFCA4BB4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT119               0xFFCA4BBC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT120               0xFFCA4BC4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT121               0xFFCA4BCC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT122               0xFFCA4BD4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT123               0xFFCA4BDC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT124               0xFFCA4BE4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT125               0xFFCA4BEC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT126               0xFFCA4BF4         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT127               0xFFCA4BFC         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT128               0xFFCA4C04         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT129               0xFFCA4C0C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT130               0xFFCA4C14         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT131               0xFFCA4C1C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT132               0xFFCA4C24         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT133               0xFFCA4C2C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT134               0xFFCA4C34         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT135               0xFFCA4C3C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT136               0xFFCA4C44         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT137               0xFFCA4C4C         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT138               0xFFCA4C54         /* SEC IRQ Source Status Register n */
+#define SEC_SSTAT139               0xFFCA4C5C         /* SEC IRQ Source Status Register n */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_CCTL                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_CCTL_LOCK                   0x80000000    /* LOCK: Lock */
+#define SEC_CCTL_NMI_EN                 0x00010000    /* NMIEN: Enable */
+#define SEC_CCTL_WAITIDLE               0x00001000    /* WFI: Wait for Idle */
+#define SEC_CCTL_RESET                  0x00000002    /* RESET: Reset */
+#define SEC_CCTL_EN                     0x00000001    /* EN: Enable */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_CSTAT                            Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_CSTAT_NMI                   0x00010000    /* NMI Status */
+#define SEC_CSTAT_WAITING               0x00001000    /* WFI: Waiting */
+#define SEC_CSTAT_VALID_SID             0x00000400    /* SIDV: Valid */
+#define SEC_CSTAT_VALID_ACT             0x00000200    /* ACTV: Valid */
+#define SEC_CSTAT_VALID_PND             0x00000100    /* PNDV: Valid */
+#define SEC_CSTAT_ERRC                  0x00000030    /* Error Cause */
+#define SEC_CSTAT_ACKERR                0x00000010    /* ERRC: Acknowledge Error */
+#define SEC_CSTAT_ERR                   0x00000002    /* ERR: Error Occurred */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_CPND                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_CPND_PRIO                   0x0000FF00    /* Highest Pending IRQ Priority */
+#define SEC_CPND_SID                    0x000000FF    /* Highest Pending IRQ Source ID */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_CACT                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_CACT_PRIO                   0x0000FF00    /* Highest Active IRQ Priority */
+#define SEC_CACT_SID                    0x000000FF    /* Highest Active IRQ Source ID */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_CPMSK                            Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_CPMSK_LOCK                  0x80000000    /* LOCK: Lock */
+#define SEC_CPMSK_PRIO                  0x000000FF    /* IRQ Priority Mask */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_CGMSK                            Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_CGMSK_LOCK                  0x80000000    /* LOCK: Lock */
+#define SEC_CGMSK_MASK                  0x00000100    /* UGRP: Mask Ungrouped Sources */
+#define SEC_CGMSK_GRP                   0x0000000F    /* Grouped Mask */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_CPLVL                            Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_CPLVL_LOCK                  0x80000000    /* LOCK: Lock */
+#define SEC_CPLVL_PLVL                  0x00000007    /* Priority Levels */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_CSID                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_CSID_SID                    0x000000FF    /* Source ID */
+
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_FCTL                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_FCTL_LOCK                   0x80000000    /* LOCK: Lock */
+#define SEC_FCTL_FLTPND_MODE            0x00002000    /* TES: Fault Pending Mode */
+#define SEC_FCTL_COP_MODE               0x00001000    /* CMS: COP Mode */
+#define SEC_FCTL_FLTIN_EN               0x00000080    /* FIEN: Enable */
+#define SEC_FCTL_SYSRST_EN              0x00000040    /* SREN: Enable */
+#define SEC_FCTL_TRGOUT_EN              0x00000020    /* TOEN: Enable */
+#define SEC_FCTL_FLTOUT_EN              0x00000010    /* FOEN: Enable */
+#define SEC_FCTL_RESET                  0x00000002    /* RESET: Reset */
+#define SEC_FCTL_EN                     0x00000001    /* EN: Enable */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_FSTAT                            Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_FSTAT_NXTFLT                0x00000400    /* NPND: Pending */
+#define SEC_FSTAT_FLTACT                0x00000200    /* ACT: Active Fault */
+#define SEC_FSTAT_FLTPND                0x00000100    /* PND: Pending */
+#define SEC_FSTAT_ERRC                  0x00000030    /* Error Cause */
+#define SEC_FSTAT_ENDERR                0x00000020    /* ERRC: End Error */
+#define SEC_FSTAT_ERR                   0x00000002    /* ERR: Error Occurred */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_FSID                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_FSID_SRC_EXTFLT             0x00010000    /* FEXT: Fault External */
+#define SEC_FSID_SID                    0x000000FF    /* Source ID */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_FEND                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_FEND_END_EXTFLT             0x00010000    /* FEXT: Fault External */
+#define SEC_FEND_SID                    0x000000FF    /* Source ID */
+
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_GCTL                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_GCTL_LOCK                   0x80000000    /* Lock */
+#define SEC_GCTL_RESET                  0x00000002    /* Reset */
+#define SEC_GCTL_EN                     0x00000001    /* Enable */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_GSTAT                            Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_GSTAT_LWERR                 0x80000000    /* LWERR: Error Occurred */
+#define SEC_GSTAT_ADRERR                0x40000000    /* ADRERR: Error Occurred */
+#define SEC_GSTAT_SID                   0x00FF0000    /* Source ID for SSI Error */
+#define SEC_GSTAT_SCI                   0x00000F00    /* SCI ID for SCI Error */
+#define SEC_GSTAT_ERRC                  0x00000030    /* Error Cause */
+#define SEC_GSTAT_SCIERR                0x00000010    /* ERRC: SCI Error */
+#define SEC_GSTAT_SSIERR                0x00000020    /* ERRC: SSI Error */
+#define SEC_GSTAT_ERR                   0x00000002    /* ERR: Error Occurred */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_RAISE                            Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_RAISE_SID                   0x000000FF    /* Source ID IRQ Set to Pending */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_END                              Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_END_SID                     0x000000FF    /* Source ID IRQ to End */
+
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_SCTL                             Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_SCTL_LOCK                   0x80000000    /* Lock */
+#define SEC_SCTL_CTG                    0x0F000000    /* Core Target Select */
+#define SEC_SCTL_GRP                    0x000F0000    /* Group Select */
+#define SEC_SCTL_PRIO                   0x0000FF00    /* Priority Level Select */
+#define SEC_SCTL_ERR_EN                 0x00000010    /* ERREN: Enable */
+#define SEC_SCTL_EDGE                   0x00000008    /* ES: Edge Sensitive */
+#define SEC_SCTL_SRC_EN                 0x00000004    /* SEN: Enable */
+#define SEC_SCTL_FAULT_EN               0x00000002    /* FEN: Enable */
+#define SEC_SCTL_INT_EN                 0x00000001    /* IEN: Enable */
+
+/* ------------------------------------------------------------------------------------------------------------------------
+        SEC_SSTAT                            Pos/Masks     Description
+   ------------------------------------------------------------------------------------------------------------------------ */
+#define SEC_SSTAT_CHID                  0x00FF0000    /* Channel ID */
+#define SEC_SSTAT_ACTIVE_SRC            0x00000200    /* ACT: Active Source */
+#define SEC_SSTAT_PENDING               0x00000100    /* PND: Pending */
+#define SEC_SSTAT_ERRC                  0x00000030    /* Error Cause */
+#define SEC_SSTAT_ENDERR                0x00000020    /* ERRC: End Error */
+#define SEC_SSTAT_ERR                   0x00000002    /* Error */
+
+
+/* =========================
+        RCU Registers
+   ========================= */
+
+/* =========================
+        RCU0
+   ========================= */
+#define RCU0_CTL                    0xFFCA6000         /* RCU0 Control Register */
+#define RCU0_STAT                   0xFFCA6004         /* RCU0 Status Register */
+#define RCU0_CRCTL                  0xFFCA6008         /* RCU0 Core Reset Control Register */
+#define RCU0_CRSTAT                 0xFFCA600C         /* RCU0 Core Reset Status Register */
+#define RCU0_SIDIS                  0xFFCA6010         /* RCU0 System Interface Disable Register */
+#define RCU0_SISTAT                 0xFFCA6014         /* RCU0 System Interface Status Register */
+#define RCU0_SVECT_LCK              0xFFCA6018         /* RCU0 SVECT Lock Register */
+#define RCU0_BCODE                  0xFFCA601C         /* RCU0 Boot Code Register */
+#define RCU0_SVECT0                 0xFFCA6020         /* RCU0 Software Vector Register n */
+#define RCU0_SVECT1                 0xFFCA6024         /* RCU0 Software Vector Register n */
+
+
+/* =========================
+        CGU0
+   ========================= */
+#define CGU0_CTL                    0xFFCA8000         /* CGU0 Control Register */
+#define CGU0_STAT                   0xFFCA8004         /* CGU0 Status Register */
+#define CGU0_DIV                    0xFFCA8008         /* CGU0 Divisor Register */
+#define CGU0_CLKOUTSEL              0xFFCA800C         /* CGU0 CLKOUT Select Register */
+
+
+/* =========================
+        DPM Registers
+   ========================= */
+
+/* =========================
+        DPM0
+   ========================= */
+#define DPM0_CTL                    0xFFCA9000         /* DPM0 Control Register */
+#define DPM0_STAT                   0xFFCA9004         /* DPM0 Status Register */
+#define DPM0_CCBF_DIS               0xFFCA9008         /* DPM0 Core Clock Buffer Disable Register */
+#define DPM0_CCBF_EN                0xFFCA900C         /* DPM0 Core Clock Buffer Enable Register */
+#define DPM0_CCBF_STAT              0xFFCA9010         /* DPM0 Core Clock Buffer Status Register */
+#define DPM0_CCBF_STAT_STKY         0xFFCA9014         /* DPM0 Core Clock Buffer Status Sticky Register */
+#define DPM0_SCBF_DIS               0xFFCA9018         /* DPM0 System Clock Buffer Disable Register */
+#define DPM0_WAKE_EN                0xFFCA901C         /* DPM0 Wakeup Enable Register */
+#define DPM0_WAKE_POL               0xFFCA9020         /* DPM0 Wakeup Polarity Register */
+#define DPM0_WAKE_STAT              0xFFCA9024         /* DPM0 Wakeup Status Register */
+#define DPM0_HIB_DIS                0xFFCA9028         /* DPM0 Hibernate Disable Register */
+#define DPM0_PGCNTR                 0xFFCA902C         /* DPM0 Power Good Counter Register */
+#define DPM0_RESTORE0               0xFFCA9030         /* DPM0 Restore Register */
+#define DPM0_RESTORE1               0xFFCA9034         /* DPM0 Restore Register */
+#define DPM0_RESTORE2               0xFFCA9038         /* DPM0 Restore Register */
+#define DPM0_RESTORE3               0xFFCA903C         /* DPM0 Restore Register */
+#define DPM0_RESTORE4               0xFFCA9040         /* DPM0 Restore Register */
+#define DPM0_RESTORE5               0xFFCA9044         /* DPM0 Restore Register */
+#define DPM0_RESTORE6               0xFFCA9048         /* DPM0 Restore Register */
+#define DPM0_RESTORE7               0xFFCA904C         /* DPM0 Restore Register */
+#define DPM0_RESTORE8               0xFFCA9050         /* DPM0 Restore Register */
+#define DPM0_RESTORE9               0xFFCA9054         /* DPM0 Restore Register */
+#define DPM0_RESTORE10              0xFFCA9058         /* DPM0 Restore Register */
+#define DPM0_RESTORE11              0xFFCA905C         /* DPM0 Restore Register */
+#define DPM0_RESTORE12              0xFFCA9060         /* DPM0 Restore Register */
+#define DPM0_RESTORE13              0xFFCA9064         /* DPM0 Restore Register */
+#define DPM0_RESTORE14              0xFFCA9068         /* DPM0 Restore Register */
+#define DPM0_RESTORE15              0xFFCA906C         /* DPM0 Restore Register */
+
+
+/* =========================
+        DBG Registers
+   ========================= */
+
+/* USB register */
+#define USB_FADDR                  0xFFCC1000         /* USB Device Address in Peripheral Mode */
+#define USB_POWER                  0xFFCC1001         /* USB Power and Device Control */
+#define USB_INTRTX                 0xFFCC1002         /* USB Transmit Interrupt */
+#define USB_INTRRX                 0xFFCC1004         /* USB Receive Interrupts */
+#define USB_INTRTXE                0xFFCC1006         /* USB Transmit Interrupt Enable */
+#define USB_INTRRXE                0xFFCC1008         /* USB Receive Interrupt Enable */
+#define USB_INTRUSB                    0xFFCC100A         /* USB USB Interrupts */
+#define USB_INTRUSBE                    0xFFCC100B         /* USB USB Interrupt Enable */
+#define USB_FRAME                  0xFFCC100C         /* USB Frame Number */
+#define USB_INDEX                  0xFFCC100E         /* USB Index */
+#define USB_TESTMODE               0xFFCC100F         /* USB Testmodes */
+#define USB_EPI_TXMAXP0            0xFFCC1010         /* USB Transmit Maximum Packet Length */
+#define USB_EP_NI0_TXMAXP          0xFFCC1010
+#define USB_EP0I_CSR0_H            0xFFCC1012         /* USB Config and Status EP0 */
+#define USB_EPI_TXCSR0_H           0xFFCC1012         /* USB Transmit Configuration and Status */
+#define USB_EP0I_CSR0_P            0xFFCC1012         /* USB Config and Status EP0 */
+#define USB_EPI_TXCSR0_P           0xFFCC1012         /* USB Transmit Configuration and Status */
+#define USB_EPI_RXMAXP0            0xFFCC1014         /* USB Receive Maximum Packet Length */
+#define USB_EPI_RXCSR0_H           0xFFCC1016         /* USB Receive Configuration and Status Register */
+#define USB_EPI_RXCSR0_P           0xFFCC1016         /* USB Receive Configuration and Status Register */
+#define USB_EP0I_CNT0              0xFFCC1018         /* USB Number of Received Bytes for Endpoint 0 */
+#define USB_EPI_RXCNT0             0xFFCC1018         /* USB Number of Byte Received */
+#define USB_EP0I_TYPE0             0xFFCC101A         /* USB Speed for Endpoint 0 */
+#define USB_EPI_TXTYPE0            0xFFCC101A         /* USB Transmit Type */
+#define USB_EP0I_NAKLIMIT0         0xFFCC101B         /* USB NAK Response Timeout for Endpoint 0 */
+#define USB_EPI_TXINTERVAL0        0xFFCC101B         /* USB Transmit Polling Interval */
+#define USB_EPI_RXTYPE0            0xFFCC101C         /* USB Receive Type */
+#define USB_EPI_RXINTERVAL0        0xFFCC101D         /* USB Receive Polling Interval */
+#define USB_EP0I_CFGDATA0          0xFFCC101F         /* USB Configuration Information */
+#define USB_FIFOB0                 0xFFCC1020         /* USB FIFO Data */
+#define USB_FIFOB1                 0xFFCC1024         /* USB FIFO Data */
+#define USB_FIFOB2                 0xFFCC1028         /* USB FIFO Data */
+#define USB_FIFOB3                 0xFFCC102C         /* USB FIFO Data */
+#define USB_FIFOB4                 0xFFCC1030         /* USB FIFO Data */
+#define USB_FIFOB5                 0xFFCC1034         /* USB FIFO Data */
+#define USB_FIFOB6                 0xFFCC1038         /* USB FIFO Data */
+#define USB_FIFOB7                 0xFFCC103C         /* USB FIFO Data */
+#define USB_FIFOB8                 0xFFCC1040         /* USB FIFO Data */
+#define USB_FIFOB9                 0xFFCC1044         /* USB FIFO Data */
+#define USB_FIFOB10                0xFFCC1048         /* USB FIFO Data */
+#define USB_FIFOB11                0xFFCC104C         /* USB FIFO Data */
+#define USB_FIFOH0                 0xFFCC1020         /* USB FIFO Data */
+#define USB_FIFOH1                 0xFFCC1024         /* USB FIFO Data */
+#define USB_FIFOH2                 0xFFCC1028         /* USB FIFO Data */
+#define USB_FIFOH3                 0xFFCC102C         /* USB FIFO Data */
+#define USB_FIFOH4                 0xFFCC1030         /* USB FIFO Data */
+#define USB_FIFOH5                 0xFFCC1034         /* USB FIFO Data */
+#define USB_FIFOH6                 0xFFCC1038         /* USB FIFO Data */
+#define USB_FIFOH7                 0xFFCC103C         /* USB FIFO Data */
+#define USB_FIFOH8                 0xFFCC1040         /* USB FIFO Data */
+#define USB_FIFOH9                 0xFFCC1044         /* USB FIFO Data */
+#define USB_FIFOH10                0xFFCC1048         /* USB FIFO Data */
+#define USB_FIFOH11                0xFFCC104C         /* USB FIFO Data */
+#define USB_FIFO0                  0xFFCC1020         /* USB FIFO Data */
+#define USB_EP0_FIFO               0xFFCC1020
+#define USB_FIFO1                  0xFFCC1024         /* USB FIFO Data */
+#define USB_FIFO2                  0xFFCC1028         /* USB FIFO Data */
+#define USB_FIFO3                  0xFFCC102C         /* USB FIFO Data */
+#define USB_FIFO4                  0xFFCC1030         /* USB FIFO Data */
+#define USB_FIFO5                  0xFFCC1034         /* USB FIFO Data */
+#define USB_FIFO6                  0xFFCC1038         /* USB FIFO Data */
+#define USB_FIFO7                  0xFFCC103C         /* USB FIFO Data */
+#define USB_FIFO8                  0xFFCC1040         /* USB FIFO Data */
+#define USB_FIFO9                  0xFFCC1044         /* USB FIFO Data */
+#define USB_FIFO10                 0xFFCC1048         /* USB FIFO Data */
+#define USB_FIFO11                 0xFFCC104C         /* USB FIFO Data */
+#define USB_OTG_DEV_CTL                0xFFCC1060         /* USB Device Control */
+#define USB_TXFIFOSZ               0xFFCC1062         /* USB Transmit FIFO Size */
+#define USB_RXFIFOSZ               0xFFCC1063         /* USB Receive FIFO Size */
+#define USB_TXFIFOADDR             0xFFCC1064         /* USB Transmit FIFO Address */
+#define USB_RXFIFOADDR             0xFFCC1066         /* USB Receive FIFO Address */
+#define USB_VENDSTAT               0xFFCC1068         /* USB Vendor Status */
+#define USB_HWVERS                 0xFFCC106C         /* USB Hardware Version */
+#define USB_EPINFO                 0xFFCC1078         /* USB Endpoint Info */
+#define USB_RAMINFO                0xFFCC1079         /* USB Ram Information */
+#define USB_LINKINFO               0xFFCC107A         /* USB Programmable Delay Values */
+#define USB_VPLEN                  0xFFCC107B         /* USB VBus Pulse Duration */
+#define USB_HS_EOF1                0xFFCC107C         /* USB High Speed End of Frame Remaining */
+#define USB_FS_EOF1                0xFFCC107D         /* USB Full Speed End of Frame Remaining */
+#define USB_LS_EOF1                0xFFCC107E         /* USB Low Speed End of Frame Remaining */
+#define USB_SOFT_RST               0xFFCC107F         /* USB Software Reset */
+#define USB_TXFUNCADDR0            0xFFCC1080         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR1            0xFFCC1088         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR2            0xFFCC1090         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR3            0xFFCC1098         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR4            0xFFCC10A0         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR5            0xFFCC10A8         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR6            0xFFCC10B0         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR7            0xFFCC10B8         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR8            0xFFCC10C0         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR9            0xFFCC10C8         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR10           0xFFCC10D0         /* USB Transmit Function Address */
+#define USB_TXFUNCADDR11           0xFFCC10D8         /* USB Transmit Function Address */
+#define USB_TXHUBADDR0             0xFFCC1082         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR1             0xFFCC108A         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR2             0xFFCC1092         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR3             0xFFCC109A         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR4             0xFFCC10A2         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR5             0xFFCC10AA         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR6             0xFFCC10B2         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR7             0xFFCC10BA         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR8             0xFFCC10C2         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR9             0xFFCC10CA         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR10            0xFFCC10D2         /* USB Transmit Hub Address */
+#define USB_TXHUBADDR11            0xFFCC10DA         /* USB Transmit Hub Address */
+#define USB_TXHUBPORT0             0xFFCC1083         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT1             0xFFCC108B         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT2             0xFFCC1093         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT3             0xFFCC109B         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT4             0xFFCC10A3         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT5             0xFFCC10AB         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT6             0xFFCC10B3         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT7             0xFFCC10BB         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT8             0xFFCC10C3         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT9             0xFFCC10CB         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT10            0xFFCC10D3         /* USB Transmit Hub Port */
+#define USB_TXHUBPORT11            0xFFCC10DB         /* USB Transmit Hub Port */
+#define USB_RXFUNCADDR0            0xFFCC1084         /* USB Receive Function Address */
+#define USB_RXFUNCADDR1            0xFFCC108C         /* USB Receive Function Address */
+#define USB_RXFUNCADDR2            0xFFCC1094         /* USB Receive Function Address */
+#define USB_RXFUNCADDR3            0xFFCC109C         /* USB Receive Function Address */
+#define USB_RXFUNCADDR4            0xFFCC10A4         /* USB Receive Function Address */
+#define USB_RXFUNCADDR5            0xFFCC10AC         /* USB Receive Function Address */
+#define USB_RXFUNCADDR6            0xFFCC10B4         /* USB Receive Function Address */
+#define USB_RXFUNCADDR7            0xFFCC10BC         /* USB Receive Function Address */
+#define USB_RXFUNCADDR8            0xFFCC10C4         /* USB Receive Function Address */
+#define USB_RXFUNCADDR9            0xFFCC10CC         /* USB Receive Function Address */
+#define USB_RXFUNCADDR10           0xFFCC10D4         /* USB Receive Function Address */
+#define USB_RXFUNCADDR11           0xFFCC10DC         /* USB Receive Function Address */
+#define USB_RXHUBADDR0             0xFFCC1086         /* USB Receive Hub Address */
+#define USB_RXHUBADDR1             0xFFCC108E         /* USB Receive Hub Address */
+#define USB_RXHUBADDR2             0xFFCC1096         /* USB Receive Hub Address */
+#define USB_RXHUBADDR3             0xFFCC109E         /* USB Receive Hub Address */
+#define USB_RXHUBADDR4             0xFFCC10A6         /* USB Receive Hub Address */
+#define USB_RXHUBADDR5             0xFFCC10AE         /* USB Receive Hub Address */
+#define USB_RXHUBADDR6             0xFFCC10B6         /* USB Receive Hub Address */
+#define USB_RXHUBADDR7             0xFFCC10BE         /* USB Receive Hub Address */
+#define USB_RXHUBADDR8             0xFFCC10C6         /* USB Receive Hub Address */
+#define USB_RXHUBADDR9             0xFFCC10CE         /* USB Receive Hub Address */
+#define USB_RXHUBADDR10            0xFFCC10D6         /* USB Receive Hub Address */
+#define USB_RXHUBADDR11            0xFFCC10DE         /* USB Receive Hub Address */
+#define USB_RXHUBPORT0             0xFFCC1087         /* USB Receive Hub Port */
+#define USB_RXHUBPORT1             0xFFCC108F         /* USB Receive Hub Port */
+#define USB_RXHUBPORT2             0xFFCC1097         /* USB Receive Hub Port */
+#define USB_RXHUBPORT3             0xFFCC109F         /* USB Receive Hub Port */
+#define USB_RXHUBPORT4             0xFFCC10A7         /* USB Receive Hub Port */
+#define USB_RXHUBPORT5             0xFFCC10AF         /* USB Receive Hub Port */
+#define USB_RXHUBPORT6             0xFFCC10B7         /* USB Receive Hub Port */
+#define USB_RXHUBPORT7             0xFFCC10BF         /* USB Receive Hub Port */
+#define USB_RXHUBPORT8             0xFFCC10C7         /* USB Receive Hub Port */
+#define USB_RXHUBPORT9             0xFFCC10CF         /* USB Receive Hub Port */
+#define USB_RXHUBPORT10            0xFFCC10D7         /* USB Receive Hub Port */
+#define USB_RXHUBPORT11            0xFFCC10DF         /* USB Receive Hub Port */
+#define USB_EP0_CSR0_H             0xFFCC1102         /* USB Config and Status EP0 */
+#define USB_EP0_CSR0_P             0xFFCC1102         /* USB Config and Status EP0 */
+#define USB_EP0_CNT0               0xFFCC1108         /* USB Number of Received Bytes for Endpoint 0 */
+#define USB_EP0_TYPE0              0xFFCC110A         /* USB Speed for Endpoint 0 */
+#define USB_EP0_NAKLIMIT0          0xFFCC110B         /* USB NAK Response Timeout for Endpoint 0 */
+#define USB_EP0_CFGDATA0           0xFFCC110F         /* USB Configuration Information */
+#define USB_EP_TXMAXP0             0xFFCC1110         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP1             0xFFCC1120         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP2             0xFFCC1130         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP3             0xFFCC1140         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP4             0xFFCC1150         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP5             0xFFCC1160         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP6             0xFFCC1170         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP7             0xFFCC1180         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP8             0xFFCC1190         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP9             0xFFCC11A0         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXMAXP10            0xFFCC11B0         /* USB Transmit Maximum Packet Length */
+#define USB_EP_TXCSR0_H            0xFFCC1112         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR1_H            0xFFCC1122         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR2_H            0xFFCC1132         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR3_H            0xFFCC1142         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR4_H            0xFFCC1152         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR5_H            0xFFCC1162         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR6_H            0xFFCC1172         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR7_H            0xFFCC1182         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR8_H            0xFFCC1192         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR9_H            0xFFCC11A2         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR10_H           0xFFCC11B2         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR0_P            0xFFCC1112         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR1_P            0xFFCC1122         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR2_P            0xFFCC1132         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR3_P            0xFFCC1142         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR4_P            0xFFCC1152         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR5_P            0xFFCC1162         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR6_P            0xFFCC1172         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR7_P            0xFFCC1182         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR8_P            0xFFCC1192         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR9_P            0xFFCC11A2         /* USB Transmit Configuration and Status */
+#define USB_EP_TXCSR10_P           0xFFCC11B2         /* USB Transmit Configuration and Status */
+#define USB_EP_RXMAXP0             0xFFCC1114         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP1             0xFFCC1124         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP2             0xFFCC1134         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP3             0xFFCC1144         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP4             0xFFCC1154         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP5             0xFFCC1164         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP6             0xFFCC1174         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP7             0xFFCC1184         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP8             0xFFCC1194         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP9             0xFFCC11A4         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXMAXP10            0xFFCC11B4         /* USB Receive Maximum Packet Length */
+#define USB_EP_RXCSR0_H            0xFFCC1116         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR1_H            0xFFCC1126         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR2_H            0xFFCC1136         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR3_H            0xFFCC1146         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR4_H            0xFFCC1156         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR5_H            0xFFCC1166         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR6_H            0xFFCC1176         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR7_H            0xFFCC1186         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR8_H            0xFFCC1196         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR9_H            0xFFCC11A6         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR10_H           0xFFCC11B6         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR0_P            0xFFCC1116         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR1_P            0xFFCC1126         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR2_P            0xFFCC1136         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR3_P            0xFFCC1146         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR4_P            0xFFCC1156         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR5_P            0xFFCC1166         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR6_P            0xFFCC1176         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR7_P            0xFFCC1186         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR8_P            0xFFCC1196         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR9_P            0xFFCC11A6         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCSR10_P           0xFFCC11B6         /* USB Receive Configuration and Status Register */
+#define USB_EP_RXCNT0              0xFFCC1118         /* USB Number of Byte Received */
+#define USB_EP_RXCNT1              0xFFCC1128         /* USB Number of Byte Received */
+#define USB_EP_RXCNT2              0xFFCC1138         /* USB Number of Byte Received */
+#define USB_EP_RXCNT3              0xFFCC1148         /* USB Number of Byte Received */
+#define USB_EP_RXCNT4              0xFFCC1158         /* USB Number of Byte Received */
+#define USB_EP_RXCNT5              0xFFCC1168         /* USB Number of Byte Received */
+#define USB_EP_RXCNT6              0xFFCC1178         /* USB Number of Byte Received */
+#define USB_EP_RXCNT7              0xFFCC1188         /* USB Number of Byte Received */
+#define USB_EP_RXCNT8              0xFFCC1198         /* USB Number of Byte Received */
+#define USB_EP_RXCNT9              0xFFCC11A8         /* USB Number of Byte Received */
+#define USB_EP_RXCNT10             0xFFCC11B8         /* USB Number of Byte Received */
+#define USB_EP_TXTYPE0             0xFFCC111A         /* USB Transmit Type */
+#define USB_EP_TXTYPE1             0xFFCC112A         /* USB Transmit Type */
+#define USB_EP_TXTYPE2             0xFFCC113A         /* USB Transmit Type */
+#define USB_EP_TXTYPE3             0xFFCC114A         /* USB Transmit Type */
+#define USB_EP_TXTYPE4             0xFFCC115A         /* USB Transmit Type */
+#define USB_EP_TXTYPE5             0xFFCC116A         /* USB Transmit Type */
+#define USB_EP_TXTYPE6             0xFFCC117A         /* USB Transmit Type */
+#define USB_EP_TXTYPE7             0xFFCC118A         /* USB Transmit Type */
+#define USB_EP_TXTYPE8             0xFFCC119A         /* USB Transmit Type */
+#define USB_EP_TXTYPE9             0xFFCC11AA         /* USB Transmit Type */
+#define USB_EP_TXTYPE10            0xFFCC11BA         /* USB Transmit Type */
+#define USB_EP_TXINTERVAL0         0xFFCC111B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL1         0xFFCC112B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL2         0xFFCC113B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL3         0xFFCC114B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL4         0xFFCC115B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL5         0xFFCC116B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL6         0xFFCC117B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL7         0xFFCC118B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL8         0xFFCC119B         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL9         0xFFCC11AB         /* USB Transmit Polling Interval */
+#define USB_EP_TXINTERVAL10        0xFFCC11BB         /* USB Transmit Polling Interval */
+#define USB_EP_RXTYPE0             0xFFCC111C         /* USB Receive Type */
+#define USB_EP_RXTYPE1             0xFFCC112C         /* USB Receive Type */
+#define USB_EP_RXTYPE2             0xFFCC113C         /* USB Receive Type */
+#define USB_EP_RXTYPE3             0xFFCC114C         /* USB Receive Type */
+#define USB_EP_RXTYPE4             0xFFCC115C         /* USB Receive Type */
+#define USB_EP_RXTYPE5             0xFFCC116C         /* USB Receive Type */
+#define USB_EP_RXTYPE6             0xFFCC117C         /* USB Receive Type */
+#define USB_EP_RXTYPE7             0xFFCC118C         /* USB Receive Type */
+#define USB_EP_RXTYPE8             0xFFCC119C         /* USB Receive Type */
+#define USB_EP_RXTYPE9             0xFFCC11AC         /* USB Receive Type */
+#define USB_EP_RXTYPE10            0xFFCC11BC         /* USB Receive Type */
+#define USB_EP_RXINTERVAL0         0xFFCC111D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL1         0xFFCC112D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL2         0xFFCC113D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL3         0xFFCC114D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL4         0xFFCC115D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL5         0xFFCC116D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL6         0xFFCC117D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL7         0xFFCC118D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL8         0xFFCC119D         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL9         0xFFCC11AD         /* USB Receive Polling Interval */
+#define USB_EP_RXINTERVAL10        0xFFCC11BD         /* USB Receive Polling Interval */
+#define USB_DMA_IRQ                0xFFCC1200         /* USB Interrupt Register */
+#define USB_DMA_CTL0               0xFFCC1204         /* USB DMA Control */
+#define USB_DMA_CTL1               0xFFCC1214         /* USB DMA Control */
+#define USB_DMA_CTL2               0xFFCC1224         /* USB DMA Control */
+#define USB_DMA_CTL3               0xFFCC1234         /* USB DMA Control */
+#define USB_DMA_CTL4               0xFFCC1244         /* USB DMA Control */
+#define USB_DMA_CTL5               0xFFCC1254         /* USB DMA Control */
+#define USB_DMA_CTL6               0xFFCC1264         /* USB DMA Control */
+#define USB_DMA_CTL7               0xFFCC1274         /* USB DMA Control */
+#define USB_DMA_ADDR0              0xFFCC1208         /* USB DMA Address */
+#define USB_DMA_ADDR1              0xFFCC1218         /* USB DMA Address */
+#define USB_DMA_ADDR2              0xFFCC1228         /* USB DMA Address */
+#define USB_DMA_ADDR3              0xFFCC1238         /* USB DMA Address */
+#define USB_DMA_ADDR4              0xFFCC1248         /* USB DMA Address */
+#define USB_DMA_ADDR5              0xFFCC1258         /* USB DMA Address */
+#define USB_DMA_ADDR6              0xFFCC1268         /* USB DMA Address */
+#define USB_DMA_ADDR7              0xFFCC1278         /* USB DMA Address */
+#define USB_DMA_CNT0               0xFFCC120C         /* USB DMA Count */
+#define USB_DMA_CNT1               0xFFCC121C         /* USB DMA Count */
+#define USB_DMA_CNT2               0xFFCC122C         /* USB DMA Count */
+#define USB_DMA_CNT3               0xFFCC123C         /* USB DMA Count */
+#define USB_DMA_CNT4               0xFFCC124C         /* USB DMA Count */
+#define USB_DMA_CNT5               0xFFCC125C         /* USB DMA Count */
+#define USB_DMA_CNT6               0xFFCC126C         /* USB DMA Count */
+#define USB_DMA_CNT7               0xFFCC127C         /* USB DMA Count */
+#define USB_RQPKTCNT0              0xFFCC1300         /* USB Request Packet Count */
+#define USB_RQPKTCNT1              0xFFCC1304         /* USB Request Packet Count */
+#define USB_RQPKTCNT2              0xFFCC1308         /* USB Request Packet Count */
+#define USB_RQPKTCNT3              0xFFCC130C         /* USB Request Packet Count */
+#define USB_RQPKTCNT4              0xFFCC1310         /* USB Request Packet Count */
+#define USB_RQPKTCNT5              0xFFCC1314         /* USB Request Packet Count */
+#define USB_RQPKTCNT6              0xFFCC1318         /* USB Request Packet Count */
+#define USB_RQPKTCNT7              0xFFCC131C         /* USB Request Packet Count */
+#define USB_RQPKTCNT8              0xFFCC1320         /* USB Request Packet Count */
+#define USB_RQPKTCNT9              0xFFCC1324         /* USB Request Packet Count */
+#define USB_RQPKTCNT10             0xFFCC1328         /* USB Request Packet Count */
+#define USB_CT_UCH                 0xFFCC1344         /* USB Chirp Timeout */
+#define USB_CT_HHSRTN              0xFFCC1346         /* USB High Speed Resume Return to Normal */
+#define USB_CT_HSBT                0xFFCC1348         /* USB High Speed Timeout */
+#define USB_LPM_ATTR               0xFFCC1360         /* USB LPM Attribute */
+#define USB_LPM_CTL                0xFFCC1362         /* USB LPM Control */
+#define USB_LPM_IEN                0xFFCC1363         /* USB LPM Interrupt Enable */
+#define USB_LPM_IRQ                0xFFCC1364         /* USB LPM Interrupt */
+#define USB_LPM_FADDR              0xFFCC1365         /* USB LPM Function Address */
+#define USB_VBUS_CTL               0xFFCC1380         /* USB VBus Control */
+#define USB_BAT_CHG                0xFFCC1381         /* USB Battery Charging */
+#define USB_PHY_CTL                0xFFCC1394         /* USB PHY Control */
+#define USB_TESTCTL                0xFFCC1397         /* USB Test Control */
+#define USB_PLL_OSC                0xFFCC1398         /* USB PLL and Oscillator Control */
+
+
+
+/* =========================
+        CHIPID
+   ========================= */
+
+#define                           CHIPID  0xffc00014
+/* CHIPID Masks */
+#define                   CHIPID_VERSION  0xF0000000
+#define                    CHIPID_FAMILY  0x0FFFF000
+#define               CHIPID_MANUFACTURE  0x00000FFE
+
+
+#endif /* _DEF_BF60X_H */
diff --git a/arch/blackfin/mach-bf609/include/mach/dma.h b/arch/blackfin/mach-bf609/include/mach/dma.h
new file mode 100644
index 0000000..872d141
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/dma.h
@@ -0,0 +1,116 @@
+/* mach/dma.h - arch-specific DMA defines
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _MACH_DMA_H_
+#define _MACH_DMA_H_
+
+#define CH_SPORT0_TX                   0
+#define CH_SPORT0_RX                   1
+#define CH_SPORT1_TX                   2
+#define CH_SPORT1_RX                   3
+#define CH_SPORT2_TX                   4
+#define CH_SPORT2_RX                   5
+#define CH_SPI0_TX                     6
+#define CH_SPI0_RX                     7
+#define CH_SPI1_TX                     8
+#define CH_SPI1_RX                     9
+#define CH_RSI                        10
+#define CH_SDU                        11
+#define CH_LP0                        13
+#define CH_LP1                        14
+#define CH_LP2                        15
+#define CH_LP3                        16
+#define CH_UART0_TX                   17
+#define CH_UART0_RX                   18
+#define CH_UART1_TX                   19
+#define CH_UART1_RX                   20
+#define CH_MEM_STREAM0_SRC_CRC0      21
+#define CH_MEM_STREAM0_SRC           CH_MEM_STREAM0_SRC_CRC0
+#define CH_MEM_STREAM0_DEST_CRC0     22
+#define CH_MEM_STREAM0_DEST          CH_MEM_STREAM0_DEST_CRC0
+#define CH_MEM_STREAM1_SRC_CRC1      23
+#define CH_MEM_STREAM1_SRC           CH_MEM_STREAM1_SRC_CRC1
+#define CH_MEM_STREAM1_DEST_CRC1     24
+#define CH_MEM_STREAM1_DEST          CH_MEM_STREAM1_DEST_CRC1
+#define CH_MEM_STREAM2_SRC           25
+#define CH_MEM_STREAM2_DEST          26
+#define CH_MEM_STREAM3_SRC           27
+#define CH_MEM_STREAM3_DEST          28
+#define CH_EPPI0_CH0                  29
+#define CH_EPPI0_CH1                  30
+#define CH_EPPI1_CH0                  31
+#define CH_EPPI1_CH1                  32
+#define CH_EPPI2_CH0                  33
+#define CH_EPPI2_CH1                  34
+#define CH_PIXC_CH0                   35
+#define CH_PIXC_CH1                   36
+#define CH_PIXC_CH2                   37
+#define CH_PVP_CPDOB                  38
+#define CH_PVP_CPDOC                  39
+#define CH_PVP_CPSTAT                 40
+#define CH_PVP_CPCI                   41
+#define CH_PVP_MPDO                   42
+#define CH_PVP_MPDI                   43
+#define CH_PVP_MPSTAT                 44
+#define CH_PVP_MPCI                   45
+#define CH_PVP_CPDOA                  46
+
+#define MAX_DMA_CHANNELS 47
+#define MAX_DMA_SUSPEND_CHANNELS 0
+#define DMA_MMR_SIZE_32
+
+#define bfin_read_MDMA_S0_CONFIG bfin_read_MDMA0_SRC_CRC0_CONFIG
+#define bfin_write_MDMA_S0_CONFIG bfin_write_MDMA0_SRC_CRC0_CONFIG
+#define bfin_read_MDMA_S0_IRQ_STATUS bfin_read_MDMA0_SRC_CRC0_IRQ_STATUS
+#define bfin_write_MDMA_S0_IRQ_STATUS bfin_write_MDMA0_SRC_CRC0_IRQ_STATUS
+#define bfin_write_MDMA_S0_START_ADDR bfin_write_MDMA0_SRC_CRC0_START_ADDR
+#define bfin_write_MDMA_S0_X_COUNT bfin_write_MDMA0_SRC_CRC0_X_COUNT
+#define bfin_write_MDMA_S0_X_MODIFY bfin_write_MDMA0_SRC_CRC0_X_MODIFY
+#define bfin_write_MDMA_S0_Y_COUNT bfin_write_MDMA0_SRC_CRC0_Y_COUNT
+#define bfin_write_MDMA_S0_Y_MODIFY bfin_write_MDMA0_SRC_CRC0_Y_MODIFY
+#define bfin_read_MDMA_D0_CONFIG bfin_read_MDMA0_DEST_CRC0_CONFIG
+#define bfin_write_MDMA_D0_CONFIG bfin_write_MDMA0_DEST_CRC0_CONFIG
+#define bfin_read_MDMA_D0_IRQ_STATUS bfin_read_MDMA0_DEST_CRC0_IRQ_STATUS
+#define bfin_write_MDMA_D0_IRQ_STATUS bfin_write_MDMA0_DEST_CRC0_IRQ_STATUS
+#define bfin_write_MDMA_D0_START_ADDR bfin_write_MDMA0_DEST_CRC0_START_ADDR
+#define bfin_write_MDMA_D0_X_COUNT bfin_write_MDMA0_DEST_CRC0_X_COUNT
+#define bfin_write_MDMA_D0_X_MODIFY bfin_write_MDMA0_DEST_CRC0_X_MODIFY
+#define bfin_write_MDMA_D0_Y_COUNT bfin_write_MDMA0_DEST_CRC0_Y_COUNT
+#define bfin_write_MDMA_D0_Y_MODIFY bfin_write_MDMA0_DEST_CRC0_Y_MODIFY
+
+#define bfin_read_MDMA_S1_CONFIG bfin_read_MDMA1_SRC_CRC1_CONFIG
+#define bfin_write_MDMA_S1_CONFIG bfin_write_MDMA1_SRC_CRC1_CONFIG
+#define bfin_read_MDMA_D1_CONFIG bfin_read_MDMA1_DEST_CRC1_CONFIG
+#define bfin_write_MDMA_D1_CONFIG bfin_write_MDMA1_DEST_CRC1_CONFIG
+#define bfin_read_MDMA_D1_IRQ_STATUS bfin_read_MDMA1_DEST_CRC1_IRQ_STATUS
+#define bfin_write_MDMA_D1_IRQ_STATUS bfin_write_MDMA1_DEST_CRC1_IRQ_STATUS
+
+#define bfin_read_MDMA_S3_CONFIG bfin_read_MDMA3_SRC_CONFIG
+#define bfin_write_MDMA_S3_CONFIG bfin_write_MDMA3_SRC_CONFIG
+#define bfin_read_MDMA_S3_IRQ_STATUS bfin_read_MDMA3_SRC_IRQ_STATUS
+#define bfin_write_MDMA_S3_IRQ_STATUS bfin_write_MDMA3_SRC_IRQ_STATUS
+#define bfin_write_MDMA_S3_START_ADDR bfin_write_MDMA3_SRC_START_ADDR
+#define bfin_write_MDMA_S3_X_COUNT bfin_write_MDMA3_SRC_X_COUNT
+#define bfin_write_MDMA_S3_X_MODIFY bfin_write_MDMA3_SRC_X_MODIFY
+#define bfin_write_MDMA_S3_Y_COUNT bfin_write_MDMA3_SRC_Y_COUNT
+#define bfin_write_MDMA_S3_Y_MODIFY bfin_write_MDMA3_SRC_Y_MODIFY
+#define bfin_read_MDMA_D3_CONFIG bfin_read_MDMA3_DEST_CONFIG
+#define bfin_write_MDMA_D3_CONFIG bfin_write_MDMA3_DEST_CONFIG
+#define bfin_read_MDMA_D3_IRQ_STATUS bfin_read_MDMA3_DEST_IRQ_STATUS
+#define bfin_write_MDMA_D3_IRQ_STATUS bfin_write_MDMA3_DEST_IRQ_STATUS
+#define bfin_write_MDMA_D3_START_ADDR bfin_write_MDMA3_DEST_START_ADDR
+#define bfin_write_MDMA_D3_X_COUNT bfin_write_MDMA3_DEST_X_COUNT
+#define bfin_write_MDMA_D3_X_MODIFY bfin_write_MDMA3_DEST_X_MODIFY
+#define bfin_write_MDMA_D3_Y_COUNT bfin_write_MDMA3_DEST_Y_COUNT
+#define bfin_write_MDMA_D3_Y_MODIFY bfin_write_MDMA3_DEST_Y_MODIFY
+
+#define MDMA_S0_NEXT_DESC_PTR MDMA0_SRC_CRC0_NEXT_DESC_PTR
+#define MDMA_D0_NEXT_DESC_PTR MDMA0_DEST_CRC0_NEXT_DESC_PTR
+#define MDMA_S1_NEXT_DESC_PTR MDMA1_SRC_CRC1_NEXT_DESC_PTR
+#define MDMA_D1_NEXT_DESC_PTR MDMA1_DEST_CRC1_NEXT_DESC_PTR
+
+#endif
diff --git a/arch/blackfin/mach-bf609/include/mach/gpio.h b/arch/blackfin/mach-bf609/include/mach/gpio.h
new file mode 100644
index 0000000..127586b
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/gpio.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2007-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _MACH_GPIO_H_
+#define _MACH_GPIO_H_
+
+#define MAX_BLACKFIN_GPIOS 112
+
+#define GPIO_PA0	0
+#define GPIO_PA1	1
+#define GPIO_PA2	2
+#define GPIO_PA3	3
+#define GPIO_PA4	4
+#define GPIO_PA5	5
+#define GPIO_PA6	6
+#define GPIO_PA7	7
+#define GPIO_PA8	8
+#define GPIO_PA9	9
+#define GPIO_PA10	10
+#define GPIO_PA11	11
+#define GPIO_PA12	12
+#define GPIO_PA13	13
+#define GPIO_PA14	14
+#define GPIO_PA15	15
+#define GPIO_PB0	16
+#define GPIO_PB1	17
+#define GPIO_PB2	18
+#define GPIO_PB3	19
+#define GPIO_PB4	20
+#define GPIO_PB5	21
+#define GPIO_PB6	22
+#define GPIO_PB7	23
+#define GPIO_PB8	24
+#define GPIO_PB9	25
+#define GPIO_PB10	26
+#define GPIO_PB11	27
+#define GPIO_PB12	28
+#define GPIO_PB13	29
+#define GPIO_PB14	30
+#define GPIO_PB15	31
+#define GPIO_PC0	32
+#define GPIO_PC1	33
+#define GPIO_PC2	34
+#define GPIO_PC3	35
+#define GPIO_PC4	36
+#define GPIO_PC5	37
+#define GPIO_PC6	38
+#define GPIO_PC7	39
+#define GPIO_PC8	40
+#define GPIO_PC9	41
+#define GPIO_PC10	42
+#define GPIO_PC11	43
+#define GPIO_PC12	44
+#define GPIO_PC13	45
+#define GPIO_PC14	46
+#define GPIO_PC15	47
+#define GPIO_PD0	48
+#define GPIO_PD1	49
+#define GPIO_PD2	50
+#define GPIO_PD3	51
+#define GPIO_PD4	52
+#define GPIO_PD5	53
+#define GPIO_PD6	54
+#define GPIO_PD7	55
+#define GPIO_PD8	56
+#define GPIO_PD9	57
+#define GPIO_PD10	58
+#define GPIO_PD11	59
+#define GPIO_PD12	60
+#define GPIO_PD13	61
+#define GPIO_PD14	62
+#define GPIO_PD15	63
+#define GPIO_PE0	64
+#define GPIO_PE1	65
+#define GPIO_PE2	66
+#define GPIO_PE3	67
+#define GPIO_PE4	68
+#define GPIO_PE5	69
+#define GPIO_PE6	70
+#define GPIO_PE7	71
+#define GPIO_PE8	72
+#define GPIO_PE9	73
+#define GPIO_PE10	74
+#define GPIO_PE11	75
+#define GPIO_PE12	76
+#define GPIO_PE13	77
+#define GPIO_PE14	78
+#define GPIO_PE15	79
+#define GPIO_PF0	80
+#define GPIO_PF1	81
+#define GPIO_PF2	82
+#define GPIO_PF3	83
+#define GPIO_PF4	84
+#define GPIO_PF5	85
+#define GPIO_PF6	86
+#define GPIO_PF7	87
+#define GPIO_PF8	88
+#define GPIO_PF9	89
+#define GPIO_PF10	90
+#define GPIO_PF11	91
+#define GPIO_PF12	92
+#define GPIO_PF13	93
+#define GPIO_PF14	94
+#define GPIO_PF15	95
+#define GPIO_PG0	96
+#define GPIO_PG1	97
+#define GPIO_PG2	98
+#define GPIO_PG3	99
+#define GPIO_PG4	100
+#define GPIO_PG5	101
+#define GPIO_PG6	102
+#define GPIO_PG7	103
+#define GPIO_PG8	104
+#define GPIO_PG9	105
+#define GPIO_PG10	106
+#define GPIO_PG11	107
+#define GPIO_PG12	108
+#define GPIO_PG13	109
+#define GPIO_PG14	110
+#define GPIO_PG15	111
+
+
+#define BFIN_GPIO_PINT 1
+
+
+#ifndef __ASSEMBLY__
+
+struct gpio_port_t {
+	unsigned long port_fer;
+	unsigned long port_fer_set;
+	unsigned long port_fer_clear;
+	unsigned long data;
+	unsigned long data_set;
+	unsigned long data_clear;
+	unsigned long dir;
+	unsigned long dir_set;
+	unsigned long dir_clear;
+	unsigned long inen;
+	unsigned long inen_set;
+	unsigned long inen_clear;
+	unsigned long port_mux;
+	unsigned long toggle;
+	unsigned long polar;
+	unsigned long polar_set;
+	unsigned long polar_clear;
+	unsigned long lock;
+	unsigned long spare;
+	unsigned long revid;
+};
+
+struct gpio_port_s {
+	unsigned short fer;
+	unsigned short data;
+	unsigned short dir;
+	unsigned short inen;
+	unsigned int mux;
+};
+
+#endif
+
+#include <mach-common/ports-a.h>
+#include <mach-common/ports-b.h>
+#include <mach-common/ports-c.h>
+#include <mach-common/ports-d.h>
+#include <mach-common/ports-e.h>
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+
+#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf609/include/mach/irq.h b/arch/blackfin/mach-bf609/include/mach/irq.h
new file mode 100644
index 0000000..0004552
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/irq.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _BF60x_IRQ_H_
+#define _BF60x_IRQ_H_
+
+#include <mach-common/irq.h>
+
+#undef BFIN_IRQ
+#define BFIN_IRQ(x) ((x) + IVG15)
+
+#define NR_PERI_INTS		(5 * 32)
+
+#define IRQ_SEC_ERR		BFIN_IRQ(0)	/* SEC Error */
+#define IRQ_CGU_EVT		BFIN_IRQ(1)	/* CGU Event */
+#define IRQ_WATCH0		BFIN_IRQ(2)	/* Watchdog0 Interrupt */
+#define IRQ_WATCH1		BFIN_IRQ(3)	/* Watchdog1 Interrupt */
+#define IRQ_L2CTL0_ECC_ERR	BFIN_IRQ(4)	/* L2 ECC Error */
+#define IRQ_L2CTL0_ECC_WARN	BFIN_IRQ(5)	/* L2 ECC Waring */
+#define IRQ_C0_DBL_FAULT	BFIN_IRQ(6)	/* Core 0 Double Fault */
+#define IRQ_C1_DBL_FAULT	BFIN_IRQ(7)	/* Core 1 Double Fault */
+#define IRQ_C0_HW_ERR		BFIN_IRQ(8)	/* Core 0 Hardware Error */
+#define IRQ_C1_HW_ERR		BFIN_IRQ(9)	/* Core 1 Hardware Error */
+#define IRQ_C0_NMI_L1_PARITY_ERR	BFIN_IRQ(10)	/* Core 0 Unhandled NMI or L1 Memory Parity Error */
+#define IRQ_C1_NMI_L1_PARITY_ERR	BFIN_IRQ(11)	/* Core 1 Unhandled NMI or L1 Memory Parity Error */
+#define CORE_IRQS		(IRQ_C1_NMI_L1_PARITY_ERR + 1)
+
+#define IRQ_TIMER0		BFIN_IRQ(12)	/* Timer 0 Interrupt */
+#define IRQ_TIMER1		BFIN_IRQ(13)	/* Timer 1 Interrupt */
+#define IRQ_TIMER2		BFIN_IRQ(14)	/* Timer 2 Interrupt */
+#define IRQ_TIMER3		BFIN_IRQ(15)	/* Timer 3 Interrupt */
+#define IRQ_TIMER4		BFIN_IRQ(16)	/* Timer 4 Interrupt */
+#define IRQ_TIMER5		BFIN_IRQ(17)	/* Timer 5 Interrupt */
+#define IRQ_TIMER6		BFIN_IRQ(18)	/* Timer 6 Interrupt */
+#define IRQ_TIMER7		BFIN_IRQ(19)	/* Timer 7 Interrupt */
+#define IRQ_TIMER_STAT		BFIN_IRQ(20)	/* Timer Block Status */
+#define IRQ_PINT0		BFIN_IRQ(21)	/* PINT0 Interrupt */
+#define IRQ_PINT1		BFIN_IRQ(22)	/* PINT1 Interrupt */
+#define IRQ_PINT2		BFIN_IRQ(23)	/* PINT2 Interrupt */
+#define IRQ_PINT3		BFIN_IRQ(24)	/* PINT3 Interrupt */
+#define IRQ_PINT4		BFIN_IRQ(25)	/* PINT4 Interrupt */
+#define IRQ_PINT5		BFIN_IRQ(26)	/* PINT5 Interrupt */
+#define IRQ_CNT			BFIN_IRQ(27)	/* CNT Interrupt */
+#define IRQ_PWM0_TRIP		BFIN_IRQ(28)	/* PWM0 Trip Interrupt */
+#define IRQ_PWM0_SYNC		BFIN_IRQ(29)	/* PWM0 Sync Interrupt */
+#define IRQ_PWM1_TRIP		BFIN_IRQ(30)	/* PWM1 Trip Interrupt */
+#define IRQ_PWM1_SYNC		BFIN_IRQ(31)	/* PWM1 Sync Interrupt */
+#define IRQ_TWI0		BFIN_IRQ(32)	/* TWI0 Interrupt */
+#define IRQ_TWI1		BFIN_IRQ(33)	/* TWI1 Interrupt */
+#define IRQ_SOFT0		BFIN_IRQ(34)	/* Software-Driven Interrupt 0 */
+#define IRQ_SOFT1		BFIN_IRQ(35)	/* Software-Driven Interrupt 1 */
+#define IRQ_SOFT2		BFIN_IRQ(36)	/* Software-Driven Interrupt 2 */
+#define IRQ_SOFT3		BFIN_IRQ(37)	/* Software-Driven Interrupt 3 */
+#define IRQ_ACM_EVT_MISS	BFIN_IRQ(38)	/* ACM Event Miss */
+#define IRQ_ACM_EVT_COMPLETE 	BFIN_IRQ(39)	/* ACM Event Complete */
+#define IRQ_CAN0_RX		BFIN_IRQ(40)	/* CAN0 Receive Interrupt */
+#define IRQ_CAN0_TX		BFIN_IRQ(41)	/* CAN0 Transmit Interrupt */
+#define IRQ_CAN0_STAT		BFIN_IRQ(42)	/* CAN0 Status */
+#define IRQ_SPORT0_TX		BFIN_IRQ(43)	/* SPORT0 TX Interrupt (DMA0) */
+#define IRQ_SPORT0_TX_STAT	BFIN_IRQ(44)	/* SPORT0 TX Status Interrupt */
+#define IRQ_SPORT0_RX		BFIN_IRQ(45)	/* SPORT0 RX Interrupt (DMA1) */
+#define IRQ_SPORT0_RX_STAT	BFIN_IRQ(46)	/* SPORT0 RX Status Interrupt */
+#define IRQ_SPORT1_TX		BFIN_IRQ(47)	/* SPORT1 TX Interrupt (DMA2) */
+#define IRQ_SPORT1_TX_STAT	BFIN_IRQ(48)	/* SPORT1 TX Status Interrupt */
+#define IRQ_SPORT1_RX		BFIN_IRQ(49)	/* SPORT1 RX Interrupt (DMA3) */
+#define IRQ_SPORT1_RX_STAT	BFIN_IRQ(50)	/* SPORT1 RX Status Interrupt */
+#define IRQ_SPORT2_TX		BFIN_IRQ(51)	/* SPORT2 TX Interrupt (DMA4) */
+#define IRQ_SPORT2_TX_STAT	BFIN_IRQ(52)	/* SPORT2 TX Status Interrupt */
+#define IRQ_SPORT2_RX		BFIN_IRQ(53)	/* SPORT2 RX Interrupt (DMA5) */
+#define IRQ_SPORT2_RX_STAT	BFIN_IRQ(54)	/* SPORT2 RX Status Interrupt */
+#define IRQ_SPI0_TX		BFIN_IRQ(55)	/* SPI0 TX Interrupt (DMA6) */
+#define IRQ_SPI0_RX		BFIN_IRQ(56)	/* SPI0 RX Interrupt (DMA7) */
+#define IRQ_SPI0_STAT		BFIN_IRQ(57)	/* SPI0 Status Interrupt */
+#define IRQ_SPI1_TX		BFIN_IRQ(58)	/* SPI1 TX Interrupt (DMA8) */
+#define IRQ_SPI1_RX		BFIN_IRQ(59)	/* SPI1 RX Interrupt (DMA9) */
+#define IRQ_SPI1_STAT		BFIN_IRQ(60)	/* SPI1 Status Interrupt */
+#define IRQ_RSI			BFIN_IRQ(61)	/* RSI (DMA10) Interrupt */
+#define IRQ_RSI_INT0		BFIN_IRQ(62)	/* RSI Interrupt0 */
+#define IRQ_RSI_INT1		BFIN_IRQ(63)	/* RSI Interrupt1 */
+#define IRQ_SDU			BFIN_IRQ(64)	/* DMA11 Data (SDU) */
+/*       -- RESERVED --             65		   DMA12 Data (Reserved) */
+/*       -- RESERVED --             66		   Reserved */
+/*       -- RESERVED --             67		   Reserved */
+#define IRQ_EMAC0_STAT		BFIN_IRQ(68)	/* EMAC0 Status */
+/*       -- RESERVED --             69		   EMAC0 Power (Reserved) */
+#define IRQ_EMAC1_STAT		BFIN_IRQ(70)	/* EMAC1 Status */
+/*       -- RESERVED --             71		   EMAC1 Power (Reserved) */
+#define IRQ_LP0			BFIN_IRQ(72)	/* DMA13 Data (Link Port 0) */
+#define IRQ_LP0_STAT		BFIN_IRQ(73)	/* Link Port 0 Status */
+#define IRQ_LP1			BFIN_IRQ(74)	/* DMA14 Data (Link Port 1) */
+#define IRQ_LP1_STAT		BFIN_IRQ(75)	/* Link Port 1 Status */
+#define IRQ_LP2			BFIN_IRQ(76)	/* DMA15 Data (Link Port 2) */
+#define IRQ_LP2_STAT		BFIN_IRQ(77)	/* Link Port 2 Status */
+#define IRQ_LP3			BFIN_IRQ(78)	/* DMA16 Data(Link Port 3) */
+#define IRQ_LP3_STAT		BFIN_IRQ(79)	/* Link Port 3 Status */
+#define IRQ_UART0_TX		BFIN_IRQ(80)	/* UART0 TX Interrupt (DMA17) */
+#define IRQ_UART0_RX		BFIN_IRQ(81)	/* UART0 RX Interrupt (DMA18) */
+#define IRQ_UART0_STAT		BFIN_IRQ(82)	/* UART0 Status(Error) Interrupt */
+#define IRQ_UART1_TX		BFIN_IRQ(83)	/* UART1 TX Interrupt (DMA19) */
+#define IRQ_UART1_RX		BFIN_IRQ(84)	/* UART1 RX Interrupt (DMA20) */
+#define IRQ_UART1_STAT		BFIN_IRQ(85)	/* UART1 Status(Error) Interrupt */
+#define IRQ_MDMA0_SRC_CRC0	BFIN_IRQ(86)	/* DMA21 Data (MDMA Stream 0 Source/CRC0 Input Channel) */
+#define IRQ_MDMA0_DEST_CRC0	BFIN_IRQ(87)	/* DMA22 Data (MDMA Stream 0 Destination/CRC0 Output Channel) */
+#define IRQ_MDMAS0		IRQ_MDMA0_DEST_CRC0
+#define IRQ_CRC0_DCNTEXP	BFIN_IRQ(88)	/* CRC0 DATACOUNT Expiration */
+#define IRQ_CRC0_ERR		BFIN_IRQ(89)	/* CRC0 Error */
+#define IRQ_MDMA1_SRC_CRC1	BFIN_IRQ(90)	/* DMA23 Data (MDMA Stream 1 Source/CRC1 Input Channel) */
+#define IRQ_MDMA1_DEST_CRC1	BFIN_IRQ(91)	/* DMA24 Data (MDMA Stream 1 Destination/CRC1 Output Channel) */
+#define IRQ_MDMAS1		IRQ_MDMA1_DEST_CRC1
+#define IRQ_CRC1_DCNTEXP	BFIN_IRQ(92)	/* CRC1 DATACOUNT Expiration */
+#define IRQ_CRC1_ERR		BFIN_IRQ(93)	/* CRC1 Error */
+#define IRQ_MDMA2_SRC		BFIN_IRQ(94)	/* DMA25 Data (MDMA Stream 2 Source Channel) */
+#define IRQ_MDMA2_DEST		BFIN_IRQ(95)	/* DMA26 Data (MDMA Stream 2 Destination Channel) */
+#define IRQ_MDMAS2		IRQ_MDMA2_DEST
+#define IRQ_MDMA3_SRC		BFIN_IRQ(96)	/* DMA27 Data (MDMA Stream 3 Source Channel) */
+#define IRQ_MDMA3_DEST 		BFIN_IRQ(97)	/* DMA28 Data (MDMA Stream 3 Destination Channel) */
+#define IRQ_MDMAS3		IRQ_MDMA3_DEST
+#define IRQ_EPPI0_CH0 		BFIN_IRQ(98)	/* DMA29 Data (EPPI0 Channel 0) */
+#define IRQ_EPPI0_CH1 		BFIN_IRQ(99)	/* DMA30 Data (EPPI0 Channel 1) */
+#define IRQ_EPPI0_STAT		BFIN_IRQ(100)	/* EPPI0 Status */
+#define IRQ_EPPI2_CH0		BFIN_IRQ(101)	/* DMA31 Data (EPPI2 Channel 0) */
+#define IRQ_EPPI2_CH1		BFIN_IRQ(102)	/* DMA32 Data (EPPI2 Channel 1) */
+#define IRQ_EPPI2_STAT		BFIN_IRQ(103)	/* EPPI2 Status */
+#define IRQ_EPPI1_CH0		BFIN_IRQ(104)	/* DMA33 Data (EPPI1 Channel 0) */
+#define IRQ_EPPI1_CH1		BFIN_IRQ(105)	/* DMA34 Data (EPPI1 Channel 1) */
+#define IRQ_EPPI1_STAT		BFIN_IRQ(106)	/* EPPI1 Status */
+#define IRQ_PIXC_CH0		BFIN_IRQ(107)	/* DMA35 Data (PIXC Channel 0) */
+#define IRQ_PIXC_CH1		BFIN_IRQ(108)	/* DMA36 Data (PIXC Channel 1) */
+#define IRQ_PIXC_CH2		BFIN_IRQ(109)	/* DMA37 Data (PIXC Channel 2) */
+#define IRQ_PIXC_STAT		BFIN_IRQ(110)	/* PIXC Status */
+#define IRQ_PVP_CPDOB		BFIN_IRQ(111)	/* DMA38 Data (PVP0 Camera Pipe Data Out B) */
+#define IRQ_PVP_CPDOC		BFIN_IRQ(112)	/* DMA39 Data (PVP0 Camera Pipe Data Out C) */
+#define IRQ_PVP_CPSTAT		BFIN_IRQ(113)	/* DMA40 Data (PVP0 Camera Pipe Status Out) */
+#define IRQ_PVP_CPCI		BFIN_IRQ(114)	/* DMA41 Data (PVP0 Camera Pipe Control In) */
+#define IRQ_PVP_STAT0		BFIN_IRQ(115)	/* PVP0 Status 0 */
+#define IRQ_PVP_MPDO		BFIN_IRQ(116)	/* DMA42 Data (PVP0 Memory Pipe Data Out) */
+#define IRQ_PVP_MPDI		BFIN_IRQ(117)	/* DMA43 Data (PVP0 Memory Pipe Data In) */
+#define IRQ_PVP_MPSTAT		BFIN_IRQ(118)	/* DMA44 Data (PVP0 Memory Pipe Status Out) */
+#define IRQ_PVP_MPCI		BFIN_IRQ(119)	/* DMA45 Data (PVP0 Memory Pipe Control In) */
+#define IRQ_PVP_CPDOA		BFIN_IRQ(120)	/* DMA46 Data (PVP0 Camera Pipe Data Out A) */
+#define IRQ_PVP_STAT1		BFIN_IRQ(121)	/* PVP0 Status 1 */
+#define IRQ_USB_STAT		BFIN_IRQ(122)	/* USB Status Interrupt */
+#define IRQ_USB_DMA		BFIN_IRQ(123)	/* USB DMA Interrupt */
+#define IRQ_TRU_INT0		BFIN_IRQ(124)	/* TRU0 Interrupt 0 */
+#define IRQ_TRU_INT1		BFIN_IRQ(125)	/* TRU0 Interrupt 1 */
+#define IRQ_TRU_INT2		BFIN_IRQ(126)	/* TRU0 Interrupt 2 */
+#define IRQ_TRU_INT3		BFIN_IRQ(127)	/* TRU0 Interrupt 3 */
+#define IRQ_DMAC0_ERROR		BFIN_IRQ(128)	/* DMAC0 Status Interrupt */
+#define IRQ_CGU0_ERROR		BFIN_IRQ(129)	/* CGU0 Error */
+/*       -- RESERVED --             130		   Reserved */
+#define IRQ_DPM			BFIN_IRQ(131)	/* DPM0 Event */
+/*       -- RESERVED --             132		   Reserved */
+#define IRQ_SWU0		BFIN_IRQ(133)	/* SWU0 */
+#define IRQ_SWU1		BFIN_IRQ(134)	/* SWU1 */
+#define IRQ_SWU2		BFIN_IRQ(135)	/* SWU2 */
+#define IRQ_SWU3		BFIN_IRQ(136)	/* SWU3 */
+#define IRQ_SWU4		BFIN_IRQ(137)	/* SWU4 */
+#define IRQ_SWU5		BFIN_IRQ(138)	/* SWU5 */
+#define IRQ_SWU6		BFIN_IRQ(139)	/* SWU6 */
+
+#define SYS_IRQS		IRQ_SWU6
+
+#define BFIN_PA_IRQ(x)		((x) + SYS_IRQS + 1)
+#define IRQ_PA0			BFIN_PA_IRQ(0)
+#define IRQ_PA1			BFIN_PA_IRQ(1)
+#define IRQ_PA2			BFIN_PA_IRQ(2)
+#define IRQ_PA3			BFIN_PA_IRQ(3)
+#define IRQ_PA4			BFIN_PA_IRQ(4)
+#define IRQ_PA5			BFIN_PA_IRQ(5)
+#define IRQ_PA6			BFIN_PA_IRQ(6)
+#define IRQ_PA7			BFIN_PA_IRQ(7)
+#define IRQ_PA8			BFIN_PA_IRQ(8)
+#define IRQ_PA9			BFIN_PA_IRQ(9)
+#define IRQ_PA10		BFIN_PA_IRQ(10)
+#define IRQ_PA11		BFIN_PA_IRQ(11)
+#define IRQ_PA12		BFIN_PA_IRQ(12)
+#define IRQ_PA13		BFIN_PA_IRQ(13)
+#define IRQ_PA14		BFIN_PA_IRQ(14)
+#define IRQ_PA15		BFIN_PA_IRQ(15)
+
+#define BFIN_PB_IRQ(x)		((x) + IRQ_PA15 + 1)
+#define IRQ_PB0			BFIN_PB_IRQ(0)
+#define IRQ_PB1			BFIN_PB_IRQ(1)
+#define IRQ_PB2			BFIN_PB_IRQ(2)
+#define IRQ_PB3			BFIN_PB_IRQ(3)
+#define IRQ_PB4			BFIN_PB_IRQ(4)
+#define IRQ_PB5			BFIN_PB_IRQ(5)
+#define IRQ_PB6			BFIN_PB_IRQ(6)
+#define IRQ_PB7			BFIN_PB_IRQ(7)
+#define IRQ_PB8			BFIN_PB_IRQ(8)
+#define IRQ_PB9			BFIN_PB_IRQ(9)
+#define IRQ_PB10		BFIN_PB_IRQ(10)
+#define IRQ_PB11		BFIN_PB_IRQ(11)
+#define IRQ_PB12		BFIN_PB_IRQ(12)
+#define IRQ_PB13		BFIN_PB_IRQ(13)
+#define IRQ_PB14		BFIN_PB_IRQ(14)
+#define IRQ_PB15		BFIN_PB_IRQ(15)		/* N/A */
+
+#define BFIN_PC_IRQ(x)		((x) + IRQ_PB15 + 1)
+#define IRQ_PC0			BFIN_PC_IRQ(0)
+#define IRQ_PC1			BFIN_PC_IRQ(1)
+#define IRQ_PC2			BFIN_PC_IRQ(2)
+#define IRQ_PC3			BFIN_PC_IRQ(3)
+#define IRQ_PC4			BFIN_PC_IRQ(4)
+#define IRQ_PC5			BFIN_PC_IRQ(5)
+#define IRQ_PC6			BFIN_PC_IRQ(6)
+#define IRQ_PC7			BFIN_PC_IRQ(7)
+#define IRQ_PC8			BFIN_PC_IRQ(8)
+#define IRQ_PC9			BFIN_PC_IRQ(9)
+#define IRQ_PC10		BFIN_PC_IRQ(10)
+#define IRQ_PC11		BFIN_PC_IRQ(11)
+#define IRQ_PC12		BFIN_PC_IRQ(12)
+#define IRQ_PC13		BFIN_PC_IRQ(13)
+#define IRQ_PC14		BFIN_PC_IRQ(14)		/* N/A */
+#define IRQ_PC15		BFIN_PC_IRQ(15)		/* N/A */
+
+#define BFIN_PD_IRQ(x)		((x) + IRQ_PC15 + 1)
+#define IRQ_PD0			BFIN_PD_IRQ(0)
+#define IRQ_PD1			BFIN_PD_IRQ(1)
+#define IRQ_PD2			BFIN_PD_IRQ(2)
+#define IRQ_PD3			BFIN_PD_IRQ(3)
+#define IRQ_PD4			BFIN_PD_IRQ(4)
+#define IRQ_PD5			BFIN_PD_IRQ(5)
+#define IRQ_PD6			BFIN_PD_IRQ(6)
+#define IRQ_PD7			BFIN_PD_IRQ(7)
+#define IRQ_PD8			BFIN_PD_IRQ(8)
+#define IRQ_PD9			BFIN_PD_IRQ(9)
+#define IRQ_PD10		BFIN_PD_IRQ(10)
+#define IRQ_PD11		BFIN_PD_IRQ(11)
+#define IRQ_PD12		BFIN_PD_IRQ(12)
+#define IRQ_PD13		BFIN_PD_IRQ(13)
+#define IRQ_PD14		BFIN_PD_IRQ(14)
+#define IRQ_PD15		BFIN_PD_IRQ(15)
+
+#define BFIN_PE_IRQ(x)		((x) + IRQ_PD15 + 1)
+#define IRQ_PE0			BFIN_PE_IRQ(0)
+#define IRQ_PE1			BFIN_PE_IRQ(1)
+#define IRQ_PE2			BFIN_PE_IRQ(2)
+#define IRQ_PE3			BFIN_PE_IRQ(3)
+#define IRQ_PE4			BFIN_PE_IRQ(4)
+#define IRQ_PE5			BFIN_PE_IRQ(5)
+#define IRQ_PE6			BFIN_PE_IRQ(6)
+#define IRQ_PE7			BFIN_PE_IRQ(7)
+#define IRQ_PE8			BFIN_PE_IRQ(8)
+#define IRQ_PE9			BFIN_PE_IRQ(9)
+#define IRQ_PE10		BFIN_PE_IRQ(10)
+#define IRQ_PE11		BFIN_PE_IRQ(11)
+#define IRQ_PE12		BFIN_PE_IRQ(12)
+#define IRQ_PE13		BFIN_PE_IRQ(13)
+#define IRQ_PE14		BFIN_PE_IRQ(14)
+#define IRQ_PE15		BFIN_PE_IRQ(15)
+
+#define BFIN_PF_IRQ(x)		((x) + IRQ_PE15 + 1)
+#define IRQ_PF0			BFIN_PF_IRQ(0)
+#define IRQ_PF1			BFIN_PF_IRQ(1)
+#define IRQ_PF2			BFIN_PF_IRQ(2)
+#define IRQ_PF3			BFIN_PF_IRQ(3)
+#define IRQ_PF4			BFIN_PF_IRQ(4)
+#define IRQ_PF5			BFIN_PF_IRQ(5)
+#define IRQ_PF6			BFIN_PF_IRQ(6)
+#define IRQ_PF7			BFIN_PF_IRQ(7)
+#define IRQ_PF8			BFIN_PF_IRQ(8)
+#define IRQ_PF9			BFIN_PF_IRQ(9)
+#define IRQ_PF10		BFIN_PF_IRQ(10)
+#define IRQ_PF11		BFIN_PF_IRQ(11)
+#define IRQ_PF12		BFIN_PF_IRQ(12)
+#define IRQ_PF13		BFIN_PF_IRQ(13)
+#define IRQ_PF14		BFIN_PF_IRQ(14)
+#define IRQ_PF15		BFIN_PF_IRQ(15)
+
+#define BFIN_PG_IRQ(x)		((x) + IRQ_PF15 + 1)
+#define IRQ_PG0			BFIN_PG_IRQ(0)
+#define IRQ_PG1			BFIN_PG_IRQ(1)
+#define IRQ_PG2			BFIN_PG_IRQ(2)
+#define IRQ_PG3			BFIN_PG_IRQ(3)
+#define IRQ_PG4			BFIN_PG_IRQ(4)
+#define IRQ_PG5			BFIN_PG_IRQ(5)
+#define IRQ_PG6			BFIN_PG_IRQ(6)
+#define IRQ_PG7			BFIN_PG_IRQ(7)
+#define IRQ_PG8			BFIN_PG_IRQ(8)
+#define IRQ_PG9			BFIN_PG_IRQ(9)
+#define IRQ_PG10		BFIN_PG_IRQ(10)
+#define IRQ_PG11		BFIN_PG_IRQ(11)
+#define IRQ_PG12		BFIN_PG_IRQ(12)
+#define IRQ_PG13		BFIN_PG_IRQ(13)
+#define IRQ_PG14		BFIN_PG_IRQ(14)
+#define IRQ_PG15		BFIN_PG_IRQ(15)
+
+#define GPIO_IRQ_BASE		IRQ_PA0
+
+#define NR_MACH_IRQS		(IRQ_PG15 + 1)
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/*
+ * bfin pint registers layout
+ */
+struct bfin_pint_regs {
+	u32 mask_set;
+	u32 mask_clear;
+	u32 request;
+	u32 assign;
+	u32 edge_set;
+	u32 edge_clear;
+	u32 invert_set;
+	u32 invert_clear;
+	u32 pinstate;
+	u32 latch;
+	u32 __pad0[2];
+};
+
+#endif
+
+#endif
diff --git a/arch/blackfin/mach-bf609/include/mach/mem_map.h b/arch/blackfin/mach-bf609/include/mach/mem_map.h
new file mode 100644
index 0000000..20b65bf
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/mem_map.h
@@ -0,0 +1,86 @@
+/*
+ * BF60x memory map
+ *
+ * Copyright 2011 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
+
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
+
+/* Async Memory Banks */
+#define ASYNC_BANK3_BASE	0xBC000000	 /* Async Bank 3 */
+#define ASYNC_BANK3_SIZE	0x04000000	/* 64M */
+#define ASYNC_BANK2_BASE	0xB8000000	 /* Async Bank 2 */
+#define ASYNC_BANK2_SIZE	0x04000000	/* 64M */
+#define ASYNC_BANK1_BASE	0xB4000000	 /* Async Bank 1 */
+#define ASYNC_BANK1_SIZE	0x04000000	/* 64M */
+#define ASYNC_BANK0_BASE	0xB0000000	 /* Async Bank 0 */
+#define ASYNC_BANK0_SIZE	0x04000000	/* 64M */
+
+/* Boot ROM Memory */
+
+#define BOOT_ROM_START		0xC8000000
+#define BOOT_ROM_LENGTH		0x8000
+
+/* Level 1 Memory */
+
+/* Memory Map for ADSP-BF60x processors */
+#ifdef CONFIG_BFIN_ICACHE
+#define BFIN_ICACHESIZE	(16*1024)
+#define L1_CODE_LENGTH      0x10000
+#else
+#define BFIN_ICACHESIZE	(0*1024)
+#define L1_CODE_LENGTH      0x14000
+#endif
+
+#define L1_CODE_START       0xFFA00000
+#define L1_DATA_A_START     0xFF800000
+#define L1_DATA_B_START     0xFF900000
+
+
+#define COREA_L1_SCRATCH_START  0xFFB00000
+#define COREB_L1_SCRATCH_START  0xFF700000
+
+#define COREB_L1_CODE_START       0xFF600000
+#define COREB_L1_DATA_A_START     0xFF400000
+#define COREB_L1_DATA_B_START     0xFF500000
+
+#define COREB_L1_CODE_LENGTH     0x14000
+#define COREB_L1_DATA_A_LENGTH   0x8000
+#define COREB_L1_DATA_B_LENGTH   0x8000
+
+
+#ifdef CONFIG_BFIN_DCACHE
+
+#ifdef CONFIG_BFIN_DCACHE_BANKA
+#define DMEM_CNTR (ACACHE_BSRAM | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH      (0x8000 - 0x4000)
+#define L1_DATA_B_LENGTH      0x8000
+#define BFIN_DCACHESIZE	(16*1024)
+#define BFIN_DSUPBANKS	1
+#else
+#define DMEM_CNTR (ACACHE_BCACHE | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH      (0x8000 - 0x4000)
+#define L1_DATA_B_LENGTH      (0x8000 - 0x4000)
+#define BFIN_DCACHESIZE	(32*1024)
+#define BFIN_DSUPBANKS	2
+#endif
+
+#else
+#define DMEM_CNTR (ASRAM_BSRAM | ENDCPLB | PORT_PREF0)
+#define L1_DATA_A_LENGTH      0x8000
+#define L1_DATA_B_LENGTH      0x8000
+#define BFIN_DCACHESIZE	(0*1024)
+#define BFIN_DSUPBANKS	0
+#endif /*CONFIG_BFIN_DCACHE*/
+
+/* Level 2 Memory */
+#define L2_START            0xC8080000
+#define L2_LENGTH           0x40000
+
+#endif
diff --git a/arch/blackfin/mach-bf609/include/mach/pll.h b/arch/blackfin/mach-bf609/include/mach/pll.h
new file mode 100644
index 0000000..1857a4a
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/pll.h
@@ -0,0 +1 @@
+/* #include <mach-common/pll.h> */
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
new file mode 100644
index 0000000..036d9bdc8
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -0,0 +1,21 @@
+/*
+ * Blackfin bf609 power management
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2
+ */
+
+#ifndef __MACH_BF609_PM_H__
+#define __MACH_BF609_PM_H__
+
+#include <linux/suspend.h>
+
+int bfin609_pm_enter(suspend_state_t state);
+int bf609_pm_prepare(void);
+void bf609_pm_finish(void);
+
+void bf609_hibernate(void);
+void bfin_sec_raise_irq(unsigned int sid);
+void coreb_enable(void);
+#endif
diff --git a/arch/blackfin/mach-bf609/include/mach/portmux.h b/arch/blackfin/mach-bf609/include/mach/portmux.h
new file mode 100644
index 0000000..2e1a51c
--- /dev/null
+++ b/arch/blackfin/mach-bf609/include/mach/portmux.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later
+ */
+
+#ifndef _MACH_PORTMUX_H_
+#define _MACH_PORTMUX_H_
+
+#define MAX_RESOURCES	MAX_BLACKFIN_GPIOS
+
+/* EMAC RMII Port Mux */
+#define P_MII0_MDC	(P_DEFINED | P_IDENT(GPIO_PC6) | P_FUNCT(0))
+#define P_MII0_MDIO	(P_DEFINED | P_IDENT(GPIO_PC7) | P_FUNCT(0))
+#define P_MII0_ETxD0	(P_DEFINED | P_IDENT(GPIO_PC2) | P_FUNCT(0))
+#define P_MII0_ERxD0	(P_DEFINED | P_IDENT(GPIO_PC0) | P_FUNCT(0))
+#define P_MII0_ETxD1	(P_DEFINED | P_IDENT(GPIO_PC3) | P_FUNCT(0))
+#define P_MII0_ERxD1	(P_DEFINED | P_IDENT(GPIO_PC1) | P_FUNCT(0))
+#define P_MII0_ETxEN	(P_DEFINED | P_IDENT(GPIO_PB13) | P_FUNCT(0))
+#define P_MII0_PHYINT	(P_DEFINED | P_IDENT(GPIO_PD6) | P_FUNCT(0))
+#define P_MII0_CRS	(P_DEFINED | P_IDENT(GPIO_PC5) | P_FUNCT(0))
+#define P_MII0_ERxER	(P_DEFINED | P_IDENT(GPIO_PC4) | P_FUNCT(0))
+#define P_MII0_TxCLK	(P_DEFINED | P_IDENT(GPIO_PB14) | P_FUNCT(0))
+
+#define P_RMII0 {\
+	P_MII0_ETxD0, \
+	P_MII0_ETxD1, \
+	P_MII0_ETxEN, \
+	P_MII0_ERxD0, \
+	P_MII0_ERxD1, \
+	P_MII0_ERxER, \
+	P_MII0_TxCLK, \
+	P_MII0_PHYINT, \
+	P_MII0_CRS, \
+	P_MII0_MDC, \
+	P_MII0_MDIO, 0}
+
+#define P_MII1_MDC	(P_DEFINED | P_IDENT(GPIO_PE10) | P_FUNCT(0))
+#define P_MII1_MDIO	(P_DEFINED | P_IDENT(GPIO_PE11) | P_FUNCT(0))
+#define P_MII1_ETxD0	(P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(0))
+#define P_MII1_ERxD0	(P_DEFINED | P_IDENT(GPIO_PG0) | P_FUNCT(0))
+#define P_MII1_ETxD1	(P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(0))
+#define P_MII1_ERxD1	(P_DEFINED | P_IDENT(GPIO_PE15) | P_FUNCT(0))
+#define P_MII1_ETxEN	(P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(0))
+#define P_MII1_PHYINT	(P_DEFINED | P_IDENT(GPIO_PE12) | P_FUNCT(0))
+#define P_MII1_CRS	(P_DEFINED | P_IDENT(GPIO_PE13) | P_FUNCT(0))
+#define P_MII1_ERxER	(P_DEFINED | P_IDENT(GPIO_PE14) | P_FUNCT(0))
+#define P_MII1_TxCLK	(P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(0))
+
+#define P_RMII1 {\
+	P_MII1_ETxD0, \
+	P_MII1_ETxD1, \
+	P_MII1_ETxEN, \
+	P_MII1_ERxD0, \
+	P_MII1_ERxD1, \
+	P_MII1_ERxER, \
+	P_MII1_TxCLK, \
+	P_MII1_PHYINT, \
+	P_MII1_CRS, \
+	P_MII1_MDC, \
+	P_MII1_MDIO, 0}
+
+/* PPI Port Mux */
+#define P_PPI0_D0	(P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(1))
+#define P_PPI0_D1	(P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(1))
+#define P_PPI0_D2	(P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(1))
+#define P_PPI0_D3	(P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(1))
+#define P_PPI0_D4	(P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(1))
+#define P_PPI0_D5	(P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(1))
+#define P_PPI0_D6	(P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(1))
+#define P_PPI0_D7	(P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(1))
+#define P_PPI0_D8	(P_DEFINED | P_IDENT(GPIO_PF8) | P_FUNCT(1))
+#define P_PPI0_D9	(P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(1))
+#define P_PPI0_D10	(P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(1))
+#define P_PPI0_D11	(P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(1))
+#define P_PPI0_D12	(P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(1))
+#define P_PPI0_D13	(P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(1))
+#define P_PPI0_D14	(P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(1))
+#define P_PPI0_D15	(P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(1))
+#define P_PPI0_D16	(P_DEFINED | P_IDENT(GPIO_PE3) | P_FUNCT(1))
+#define P_PPI0_D17	(P_DEFINED | P_IDENT(GPIO_PE4) | P_FUNCT(1))
+#define P_PPI0_D18	(P_DEFINED | P_IDENT(GPIO_PE0) | P_FUNCT(1))
+#define P_PPI0_D19	(P_DEFINED | P_IDENT(GPIO_PE1) | P_FUNCT(1))
+#define P_PPI0_D20	(P_DEFINED | P_IDENT(GPIO_PD12) | P_FUNCT(1))
+#define P_PPI0_D21	(P_DEFINED | P_IDENT(GPIO_PD15) | P_FUNCT(1))
+#define P_PPI0_D22	(P_DEFINED | P_IDENT(GPIO_PE2) | P_FUNCT(1))
+#define P_PPI0_D23	(P_DEFINED | P_IDENT(GPIO_PE5) | P_FUNCT(1))
+#define P_PPI0_CLK	(P_DEFINED | P_IDENT(GPIO_PE9) | P_FUNCT(1))
+#define P_PPI0_FS1	(P_DEFINED | P_IDENT(GPIO_PE8) | P_FUNCT(1))
+#define P_PPI0_FS2	(P_DEFINED | P_IDENT(GPIO_PE7) | P_FUNCT(1))
+#define P_PPI0_FS3	(P_DEFINED | P_IDENT(GPIO_PE6) | P_FUNCT(1))
+
+#define P_PPI1_D0	(P_DEFINED | P_IDENT(GPIO_PC0) | P_FUNCT(1))
+#define P_PPI1_D1	(P_DEFINED | P_IDENT(GPIO_PC1) | P_FUNCT(1))
+#define P_PPI1_D2	(P_DEFINED | P_IDENT(GPIO_PC2) | P_FUNCT(1))
+#define P_PPI1_D3	(P_DEFINED | P_IDENT(GPIO_PC3) | P_FUNCT(1))
+#define P_PPI1_D4	(P_DEFINED | P_IDENT(GPIO_PC4) | P_FUNCT(1))
+#define P_PPI1_D5	(P_DEFINED | P_IDENT(GPIO_PC5) | P_FUNCT(1))
+#define P_PPI1_D6	(P_DEFINED | P_IDENT(GPIO_PC6) | P_FUNCT(1))
+#define P_PPI1_D7	(P_DEFINED | P_IDENT(GPIO_PC7) | P_FUNCT(1))
+#define P_PPI1_D8	(P_DEFINED | P_IDENT(GPIO_PC8) | P_FUNCT(1))
+#define P_PPI1_D9	(P_DEFINED | P_IDENT(GPIO_PC9) | P_FUNCT(1))
+#define P_PPI1_D10	(P_DEFINED | P_IDENT(GPIO_PC10) | P_FUNCT(1))
+#define P_PPI1_D11	(P_DEFINED | P_IDENT(GPIO_PC11) | P_FUNCT(1))
+#define P_PPI1_D12	(P_DEFINED | P_IDENT(GPIO_PC12) | P_FUNCT(1))
+#define P_PPI1_D13	(P_DEFINED | P_IDENT(GPIO_PC13) | P_FUNCT(1))
+#define P_PPI1_D14	(P_DEFINED | P_IDENT(GPIO_PC14) | P_FUNCT(1))
+#define P_PPI1_D15	(P_DEFINED | P_IDENT(GPIO_PC15) | P_FUNCT(1))
+#define P_PPI1_D16	(P_DEFINED | P_IDENT(GPIO_PD0) | P_FUNCT(1))
+#define P_PPI1_D17	(P_DEFINED | P_IDENT(GPIO_PD1) | P_FUNCT(1))
+#define P_PPI1_CLK	(P_DEFINED | P_IDENT(GPIO_PB14) | P_FUNCT(1))
+#define P_PPI1_FS1	(P_DEFINED | P_IDENT(GPIO_PB13) | P_FUNCT(1))
+#define P_PPI1_FS2	(P_DEFINED | P_IDENT(GPIO_PD6) | P_FUNCT(1))
+#define P_PPI1_FS3	(P_DEFINED | P_IDENT(GPIO_PB15) | P_FUNCT(1))
+
+#define P_PPI2_D0	(P_DEFINED | P_IDENT(GPIO_PA0) | P_FUNCT(1))
+#define P_PPI2_D1	(P_DEFINED | P_IDENT(GPIO_PA1) | P_FUNCT(1))
+#define P_PPI2_D2	(P_DEFINED | P_IDENT(GPIO_PA2) | P_FUNCT(1))
+#define P_PPI2_D3	(P_DEFINED | P_IDENT(GPIO_PA3) | P_FUNCT(1))
+#define P_PPI2_D4	(P_DEFINED | P_IDENT(GPIO_PA4) | P_FUNCT(1))
+#define P_PPI2_D5	(P_DEFINED | P_IDENT(GPIO_PA5) | P_FUNCT(1))
+#define P_PPI2_D6	(P_DEFINED | P_IDENT(GPIO_PA6) | P_FUNCT(1))
+#define P_PPI2_D7	(P_DEFINED | P_IDENT(GPIO_PA7) | P_FUNCT(1))
+#define P_PPI2_D8	(P_DEFINED | P_IDENT(GPIO_PA8) | P_FUNCT(1))
+#define P_PPI2_D9	(P_DEFINED | P_IDENT(GPIO_PA9) | P_FUNCT(1))
+#define P_PPI2_D10	(P_DEFINED | P_IDENT(GPIO_PA10) | P_FUNCT(1))
+#define P_PPI2_D11	(P_DEFINED | P_IDENT(GPIO_PA11) | P_FUNCT(1))
+#define P_PPI2_D12	(P_DEFINED | P_IDENT(GPIO_PA12) | P_FUNCT(1))
+#define P_PPI2_D13	(P_DEFINED | P_IDENT(GPIO_PA13) | P_FUNCT(1))
+#define P_PPI2_D14	(P_DEFINED | P_IDENT(GPIO_PA14) | P_FUNCT(1))
+#define P_PPI2_D15	(P_DEFINED | P_IDENT(GPIO_PA15) | P_FUNCT(1))
+#define P_PPI2_D16	(P_DEFINED | P_IDENT(GPIO_PB7) | P_FUNCT(1))
+#define P_PPI2_D17	(P_DEFINED | P_IDENT(GPIO_PB8) | P_FUNCT(1))
+#define P_PPI2_CLK	(P_DEFINED | P_IDENT(GPIO_PB0) | P_FUNCT(1))
+#define P_PPI2_FS1	(P_DEFINED | P_IDENT(GPIO_PB1) | P_FUNCT(1))
+#define P_PPI2_FS2	(P_DEFINED | P_IDENT(GPIO_PB2) | P_FUNCT(1))
+#define P_PPI2_FS3	(P_DEFINED | P_IDENT(GPIO_PB3) | P_FUNCT(1))
+
+/* SPI Port Mux */
+#define P_SPI0_SS	(P_DEFINED | P_IDENT(GPIO_PD11) | P_FUNCT(3))
+#define P_SPI0_SCK	(P_DEFINED | P_IDENT(GPIO_PD4) | P_FUNCT(0))
+#define P_SPI0_MISO	(P_DEFINED | P_IDENT(GPIO_PD2) | P_FUNCT(0))
+#define P_SPI0_MOSI	(P_DEFINED | P_IDENT(GPIO_PD3) | P_FUNCT(0))
+#define P_SPI0_RDY	(P_DEFINED | P_IDENT(GPIO_PD10) | P_FUNCT(0))
+#define P_SPI0_D2	(P_DEFINED | P_IDENT(GPIO_PD0) | P_FUNCT(0))
+#define P_SPI0_D3	(P_DEFINED | P_IDENT(GPIO_PD1) | P_FUNCT(0))
+
+#define P_SPI0_SSEL1	(P_DEFINED | P_IDENT(GPIO_PD11) | P_FUNCT(0))
+#define P_SPI0_SSEL2	(P_DEFINED | P_IDENT(GPIO_PD1) | P_FUNCT(2))
+#define P_SPI0_SSEL3	(P_DEFINED | P_IDENT(GPIO_PD0) | P_FUNCT(2))
+#define P_SPI0_SSEL4	(P_DEFINED | P_IDENT(GPIO_PC15) | P_FUNCT(0))
+#define P_SPI0_SSEL5	(P_DEFINED | P_IDENT(GPIO_PD9) | P_FUNCT(0))
+#define P_SPI0_SSEL6	(P_DEFINED | P_IDENT(GPIO_PC13) | P_FUNCT(0))
+#define P_SPI0_SSEL7	(P_DEFINED | P_IDENT(GPIO_PC12) | P_FUNCT(0))
+
+#define P_SPI1_SS	(P_DEFINED | P_IDENT(GPIO_PD12) | P_FUNCT(3))
+#define P_SPI1_SCK	(P_DEFINED | P_IDENT(GPIO_PD5) | P_FUNCT(0))
+#define P_SPI1_MISO	(P_DEFINED | P_IDENT(GPIO_PD14) | P_FUNCT(0))
+#define P_SPI1_MOSI	(P_DEFINED | P_IDENT(GPIO_PD13) | P_FUNCT(0))
+#define P_SPI1_RDY	(P_DEFINED | P_IDENT(GPIO_PE2) | P_FUNCT(0))
+#define P_SPI1_D2	(P_DEFINED | P_IDENT(GPIO_PE1) | P_FUNCT(0))
+#define P_SPI1_D3	(P_DEFINED | P_IDENT(GPIO_PE0) | P_FUNCT(0))
+
+#define P_SPI1_SSEL1	(P_DEFINED | P_IDENT(GPIO_PD12) | P_FUNCT(0))
+#define P_SPI1_SSEL2	(P_DEFINED | P_IDENT(GPIO_PD15) | P_FUNCT(2))
+#define P_SPI1_SSEL3	(P_DEFINED | P_IDENT(GPIO_PD10) | P_FUNCT(2))
+#define P_SPI1_SSEL4	(P_DEFINED | P_IDENT(GPIO_PD9) | P_FUNCT(2))
+#define P_SPI1_SSEL5	(P_DEFINED | P_IDENT(GPIO_PF8) | P_FUNCT(0))
+#define P_SPI1_SSEL6	(P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(0))
+#define P_SPI1_SSEL7	(P_DEFINED | P_IDENT(GPIO_PC14) | P_FUNCT(0))
+
+#define GPIO_DEFAULT_BOOT_SPI_CS
+#define P_DEFAULT_BOOT_SPI_CS
+
+/* CORE IDLE  */
+#define P_IDLEA		(P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(1))
+#define P_IDLEB		(P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(1))
+#define P_SLEEP		(P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(2))
+
+/* UART Port Mux */
+#define P_UART0_TX	(P_DEFINED | P_IDENT(GPIO_PD7) | P_FUNCT(1))
+#define P_UART0_RX	(P_DEFINED | P_IDENT(GPIO_PD8) | P_FUNCT(1))
+#define P_UART0_RTS	(P_DEFINED | P_IDENT(GPIO_PD9) | P_FUNCT(1))
+#define P_UART0_CTS	(P_DEFINED | P_IDENT(GPIO_PD10) | P_FUNCT(1))
+
+#define P_UART1_TX	(P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(0))
+#define P_UART1_RX	(P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(0))
+#define P_UART1_RTS	(P_DEFINED | P_IDENT(GPIO_PG10) | P_FUNCT(0))
+#define P_UART1_CTS	(P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(0))
+
+/* Timer */
+#define P_TMRCLK	(P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(3))
+#define P_TMR0		(P_DEFINED | P_IDENT(GPIO_PE14) | P_FUNCT(2))
+#define P_TMR1		(P_DEFINED | P_IDENT(GPIO_PG4) | P_FUNCT(1))
+#define P_TMR2		(P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(1))
+#define P_TMR3		(P_DEFINED | P_IDENT(GPIO_PG8) | P_FUNCT(1))
+#define P_TMR4		(P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(1))
+#define P_TMR5		(P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(1))
+#define P_TMR6		(P_DEFINED | P_IDENT(GPIO_PG11) | P_FUNCT(1))
+#define P_TMR7		(P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(1))
+
+/* RSI */
+#define P_RSI_DATA0	(P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(2))
+#define P_RSI_DATA1	(P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(2))
+#define P_RSI_DATA2	(P_DEFINED | P_IDENT(GPIO_PG0) | P_FUNCT(2))
+#define P_RSI_DATA3	(P_DEFINED | P_IDENT(GPIO_PE15) | P_FUNCT(2))
+#define P_RSI_DATA4	(P_DEFINED | P_IDENT(GPIO_PE13) | P_FUNCT(2))
+#define P_RSI_DATA5	(P_DEFINED | P_IDENT(GPIO_PE12) | P_FUNCT(2))
+#define P_RSI_DATA6	(P_DEFINED | P_IDENT(GPIO_PE10) | P_FUNCT(2))
+#define P_RSI_DATA7	(P_DEFINED | P_IDENT(GPIO_PE11) | P_FUNCT(2))
+#define P_RSI_CMD	(P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(1))
+#define P_RSI_CLK	(P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(1))
+
+/* PTP */
+#define P_PTP0_PPS	(P_DEFINED | P_IDENT(GPIO_PB15) | P_FUNCT(0))
+#define P_PTP0_CLKIN	(P_DEFINED | P_IDENT(GPIO_PC13) | P_FUNCT(2))
+#define P_PTP0_AUXIN	(P_DEFINED | P_IDENT(GPIO_PC11) | P_FUNCT(2))
+
+#define P_PTP1_PPS	(P_DEFINED | P_IDENT(GPIO_PC9) | P_FUNCT(0))
+#define P_PTP1_CLKIN	(P_DEFINED | P_IDENT(GPIO_PC13) | P_FUNCT(2))
+#define P_PTP1_AUXIN	(P_DEFINED | P_IDENT(GPIO_PC11) | P_FUNCT(2))
+
+/* SMC Port Mux */
+#define P_A3		(P_DEFINED | P_IDENT(GPIO_PA0) | P_FUNCT(0))
+#define P_A4		(P_DEFINED | P_IDENT(GPIO_PA1) | P_FUNCT(0))
+#define P_A5		(P_DEFINED | P_IDENT(GPIO_PA2) | P_FUNCT(0))
+#define P_A6		(P_DEFINED | P_IDENT(GPIO_PA3) | P_FUNCT(0))
+#define P_A7		(P_DEFINED | P_IDENT(GPIO_PA4) | P_FUNCT(0))
+#define P_A8		(P_DEFINED | P_IDENT(GPIO_PA5) | P_FUNCT(0))
+#define P_A9		(P_DEFINED | P_IDENT(GPIO_PA6) | P_FUNCT(0))
+#define P_A10		(P_DEFINED | P_IDENT(GPIO_PA7) | P_FUNCT(0))
+#define P_A11		(P_DEFINED | P_IDENT(GPIO_PA8) | P_FUNCT(0))
+#define P_A12		(P_DEFINED | P_IDENT(GPIO_PA9) | P_FUNCT(0))
+#define P_A13		(P_DEFINED | P_IDENT(GPIO_PB2) | P_FUNCT(0))
+#define P_A14		(P_DEFINED | P_IDENT(GPIO_PA10) | P_FUNCT(0))
+#define P_A15		(P_DEFINED | P_IDENT(GPIO_PA11) | P_FUNCT(0))
+#define P_A16		(P_DEFINED | P_IDENT(GPIO_PB3) | P_FUNCT(0))
+#define P_A17		(P_DEFINED | P_IDENT(GPIO_PA12) | P_FUNCT(0))
+#define P_A18		(P_DEFINED | P_IDENT(GPIO_PA13) | P_FUNCT(0))
+#define P_A19		(P_DEFINED | P_IDENT(GPIO_PA14) | P_FUNCT(0))
+#define P_A20		(P_DEFINED | P_IDENT(GPIO_PA15) | P_FUNCT(0))
+#define P_A21		(P_DEFINED | P_IDENT(GPIO_PB6) | P_FUNCT(0))
+#define P_A22		(P_DEFINED | P_IDENT(GPIO_PB7) | P_FUNCT(0))
+#define P_A23		(P_DEFINED | P_IDENT(GPIO_PB8) | P_FUNCT(0))
+#define P_A24		(P_DEFINED | P_IDENT(GPIO_PB10) | P_FUNCT(0))
+#define P_A25		(P_DEFINED | P_IDENT(GPIO_PB11) | P_FUNCT(0))
+#define P_NORCK         (P_DEFINED | P_IDENT(GPIO_PB0) | P_FUNCT(0))
+
+#define P_AMS1		(P_DEFINED | P_IDENT(GPIO_PB1) | P_FUNCT(0))
+#define P_AMS2		(P_DEFINED | P_IDENT(GPIO_PB4) | P_FUNCT(0))
+#define P_AMS3		(P_DEFINED | P_IDENT(GPIO_PB5) | P_FUNCT(0))
+
+/* CAN */
+#define P_CAN0_TX	(P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(2))
+#define P_CAN0_RX	(P_DEFINED | P_IDENT(GPIO_PG4) | P_FUNCT(2))
+
+/* SPORT */
+#define P_SPORT0_ACLK	(P_DEFINED | P_IDENT(GPIO_PB5) | P_FUNCT(2))
+#define P_SPORT0_AFS	(P_DEFINED | P_IDENT(GPIO_PB4) | P_FUNCT(2))
+#define P_SPORT0_AD0	(P_DEFINED | P_IDENT(GPIO_PB9) | P_FUNCT(2))
+#define P_SPORT0_AD1	(P_DEFINED | P_IDENT(GPIO_PB12) | P_FUNCT(2))
+#define P_SPORT0_ATDV	(P_DEFINED | P_IDENT(GPIO_PB6) | P_FUNCT(1))
+#define P_SPORT0_BCLK	(P_DEFINED | P_IDENT(GPIO_PB8) | P_FUNCT(2))
+#define P_SPORT0_BFS	(P_DEFINED | P_IDENT(GPIO_PB7) | P_FUNCT(2))
+#define P_SPORT0_BD0	(P_DEFINED | P_IDENT(GPIO_PB11) | P_FUNCT(2))
+#define P_SPORT0_BD1	(P_DEFINED | P_IDENT(GPIO_PB10) | P_FUNCT(2))
+#define P_SPORT0_BTDV	(P_DEFINED | P_IDENT(GPIO_PB12) | P_FUNCT(1))
+
+#define P_SPORT1_ACLK	(P_DEFINED | P_IDENT(GPIO_PE2) | P_FUNCT(2))
+#define P_SPORT1_AFS	(P_DEFINED | P_IDENT(GPIO_PE5) | P_FUNCT(2))
+#define P_SPORT1_AD0	(P_DEFINED | P_IDENT(GPIO_PD15) | P_FUNCT(2))
+#define P_SPORT1_AD1	(P_DEFINED | P_IDENT(GPIO_PD12) | P_FUNCT(2))
+#define P_SPORT1_ATDV	(P_DEFINED | P_IDENT(GPIO_PE6) | P_FUNCT(0))
+#define P_SPORT1_BCLK	(P_DEFINED | P_IDENT(GPIO_PE4) | P_FUNCT(2))
+#define P_SPORT1_BFS	(P_DEFINED | P_IDENT(GPIO_PE3) | P_FUNCT(2))
+#define P_SPORT1_BD0	(P_DEFINED | P_IDENT(GPIO_PE1) | P_FUNCT(2))
+#define P_SPORT1_BD1	(P_DEFINED | P_IDENT(GPIO_PE0) | P_FUNCT(2))
+#define P_SPORT1_BTDV	(P_DEFINED | P_IDENT(GPIO_PE7) | P_FUNCT(0))
+
+#define P_SPORT2_ACLK	(P_DEFINED | P_IDENT(GPIO_PG4) | P_FUNCT(0))
+#define P_SPORT2_AFS	(P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(0))
+#define P_SPORT2_AD0	(P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(0))
+#define P_SPORT2_AD1	(P_DEFINED | P_IDENT(GPIO_PG8) | P_FUNCT(0))
+#define P_SPORT2_ATDV	(P_DEFINED | P_IDENT(GPIO_PE14) | P_FUNCT(1))
+#define P_SPORT2_BCLK	(P_DEFINED | P_IDENT(GPIO_PG10) | P_FUNCT(1))
+#define P_SPORT2_BFS	(P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(0))
+#define P_SPORT2_BD0	(P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(0))
+#define P_SPORT2_BD1	(P_DEFINED | P_IDENT(GPIO_PG11) | P_FUNCT(0))
+#define P_SPORT2_BTDV	(P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(2))
+
+/* LINK PORT */
+#define P_LP0_CLK	(P_DEFINED | P_IDENT(GPIO_PB0) | P_FUNCT(2))
+#define P_LP0_ACK       (P_DEFINED | P_IDENT(GPIO_PB1) | P_FUNCT(2))
+#define P_LP0_D0        (P_DEFINED | P_IDENT(GPIO_PA0) | P_FUNCT(2))
+#define P_LP0_D1        (P_DEFINED | P_IDENT(GPIO_PA1) | P_FUNCT(2))
+#define P_LP0_D2        (P_DEFINED | P_IDENT(GPIO_PA2) | P_FUNCT(2))
+#define P_LP0_D3        (P_DEFINED | P_IDENT(GPIO_PA3) | P_FUNCT(2))
+#define P_LP0_D4        (P_DEFINED | P_IDENT(GPIO_PA4) | P_FUNCT(2))
+#define P_LP0_D5        (P_DEFINED | P_IDENT(GPIO_PA5) | P_FUNCT(2))
+#define P_LP0_D6        (P_DEFINED | P_IDENT(GPIO_PA6) | P_FUNCT(2))
+#define P_LP0_D7        (P_DEFINED | P_IDENT(GPIO_PA7) | P_FUNCT(2))
+
+#define P_LP1_CLK	(P_DEFINED | P_IDENT(GPIO_PB3) | P_FUNCT(2))
+#define P_LP1_ACK       (P_DEFINED | P_IDENT(GPIO_PB2) | P_FUNCT(2))
+#define P_LP1_D0        (P_DEFINED | P_IDENT(GPIO_PA8) | P_FUNCT(2))
+#define P_LP1_D1        (P_DEFINED | P_IDENT(GPIO_PA9) | P_FUNCT(2))
+#define P_LP1_D2        (P_DEFINED | P_IDENT(GPIO_PA10) | P_FUNCT(2))
+#define P_LP1_D3        (P_DEFINED | P_IDENT(GPIO_PA11) | P_FUNCT(2))
+#define P_LP1_D4        (P_DEFINED | P_IDENT(GPIO_PA12) | P_FUNCT(2))
+#define P_LP1_D5        (P_DEFINED | P_IDENT(GPIO_PA13) | P_FUNCT(2))
+#define P_LP1_D6        (P_DEFINED | P_IDENT(GPIO_PA14) | P_FUNCT(2))
+#define P_LP1_D7        (P_DEFINED | P_IDENT(GPIO_PA15) | P_FUNCT(2))
+
+#define P_LP2_CLK	(P_DEFINED | P_IDENT(GPIO_PE6) | P_FUNCT(2))
+#define P_LP2_ACK       (P_DEFINED | P_IDENT(GPIO_PE7) | P_FUNCT(2))
+#define P_LP2_D0        (P_DEFINED | P_IDENT(GPIO_PF0) | P_FUNCT(2))
+#define P_LP2_D1        (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(2))
+#define P_LP2_D2        (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(2))
+#define P_LP2_D3        (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(2))
+#define P_LP2_D4        (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(2))
+#define P_LP2_D5        (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(2))
+#define P_LP2_D6        (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(2))
+#define P_LP2_D7        (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(2))
+
+#define P_LP3_CLK	(P_DEFINED | P_IDENT(GPIO_PE9) | P_FUNCT(2))
+#define P_LP3_ACK       (P_DEFINED | P_IDENT(GPIO_PE8) | P_FUNCT(2))
+#define P_LP3_D0        (P_DEFINED | P_IDENT(GPIO_PF8) | P_FUNCT(2))
+#define P_LP3_D1        (P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(2))
+#define P_LP3_D2        (P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(2))
+#define P_LP3_D3        (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(2))
+#define P_LP3_D4        (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(2))
+#define P_LP3_D5        (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(2))
+#define P_LP3_D6        (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(2))
+#define P_LP3_D7        (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(2))
+
+/* TWI */
+#define P_TWI0_SCL	(P_DONTCARE)
+#define P_TWI0_SDA	(P_DONTCARE)
+#define P_TWI1_SCL	(P_DONTCARE)
+#define P_TWI1_SDA	(P_DONTCARE)
+
+/* Rotary Encoder */
+#define P_CNT_CZM	(P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(3))
+#define P_CNT_CUD	(P_DEFINED | P_IDENT(GPIO_PG11) | P_FUNCT(3))
+#define P_CNT_CDG	(P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(3))
+
+#endif				/* _MACH_PORTMUX_H_ */
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
new file mode 100644
index 0000000..b76966e
--- /dev/null
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -0,0 +1,362 @@
+/*
+ * Blackfin bf609 power management
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2
+ */
+
+#include <linux/suspend.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+
+#include <linux/delay.h>
+
+#include <asm/dpmc.h>
+#include <asm/pm.h>
+#include <mach/pm.h>
+#include <asm/blackfin.h>
+
+/***********************************************************/
+/*                                                         */
+/* Wakeup Actions for DPM_RESTORE                          */
+/*                                                         */
+/***********************************************************/
+#define BITP_ROM_WUA_CHKHDR             24
+#define BITP_ROM_WUA_DDRLOCK            7
+#define BITP_ROM_WUA_DDRDLLEN           6
+#define BITP_ROM_WUA_DDR                5
+#define BITP_ROM_WUA_CGU                4
+#define BITP_ROM_WUA_MEMBOOT            2
+#define BITP_ROM_WUA_EN                 1
+
+#define BITM_ROM_WUA_CHKHDR             (0xFF000000)
+#define ENUM_ROM_WUA_CHKHDR_AD                  0xAD000000
+
+#define BITM_ROM_WUA_DDRLOCK            (0x00000080)
+#define BITM_ROM_WUA_DDRDLLEN           (0x00000040)
+#define BITM_ROM_WUA_DDR                (0x00000020)
+#define BITM_ROM_WUA_CGU                (0x00000010)
+#define BITM_ROM_WUA_MEMBOOT            (0x00000002)
+#define BITM_ROM_WUA_EN                 (0x00000001)
+
+/***********************************************************/
+/*                                                         */
+/* Syscontrol                                              */
+/*                                                         */
+/***********************************************************/
+#define BITP_ROM_SYSCTRL_CGU_LOCKINGEN  28    /* unlocks CGU_CTL register */
+#define BITP_ROM_SYSCTRL_WUA_OVERRIDE   24
+#define BITP_ROM_SYSCTRL_WUA_DDRDLLEN   20    /* Saves the DDR DLL and PADS registers to the DPM registers */
+#define BITP_ROM_SYSCTRL_WUA_DDR        19    /* Saves the DDR registers to the DPM registers */
+#define BITP_ROM_SYSCTRL_WUA_CGU        18    /* Saves the CGU registers into DPM registers */
+#define BITP_ROM_SYSCTRL_WUA_DPMWRITE   17    /* Saves the Syscontrol structure structure contents into DPM registers */
+#define BITP_ROM_SYSCTRL_WUA_EN         16    /* reads current PLL and DDR configuration into structure */
+#define BITP_ROM_SYSCTRL_DDR_WRITE      13    /* writes the DDR registers from Syscontrol structure for wakeup initialization of DDR */
+#define BITP_ROM_SYSCTRL_DDR_READ       12    /* Read the DDR registers into the Syscontrol structure for storing prior to hibernate */
+#define BITP_ROM_SYSCTRL_CGU_AUTODIS    11    /* Disables auto handling of UPDT and ALGN fields */
+#define BITP_ROM_SYSCTRL_CGU_CLKOUTSEL  7    /* access CGU_CLKOUTSEL register */
+#define BITP_ROM_SYSCTRL_CGU_DIV        6    /* access CGU_DIV register */
+#define BITP_ROM_SYSCTRL_CGU_STAT       5    /* access CGU_STAT register */
+#define BITP_ROM_SYSCTRL_CGU_CTL        4    /* access CGU_CTL register */
+#define BITP_ROM_SYSCTRL_CGU_RTNSTAT    2    /* Update structure STAT field upon error */
+#define BITP_ROM_SYSCTRL_WRITE          1    /* write registers */
+#define BITP_ROM_SYSCTRL_READ           0    /* read registers */
+
+#define BITM_ROM_SYSCTRL_CGU_READ       (0x00000001)    /* Read CGU registers */
+#define BITM_ROM_SYSCTRL_CGU_WRITE      (0x00000002)    /* Write registers */
+#define BITM_ROM_SYSCTRL_CGU_RTNSTAT    (0x00000004)    /* Update structure STAT field upon error or after a write operation */
+#define BITM_ROM_SYSCTRL_CGU_CTL        (0x00000010)    /* Access CGU_CTL register */
+#define BITM_ROM_SYSCTRL_CGU_STAT       (0x00000020)    /* Access CGU_STAT register */
+#define BITM_ROM_SYSCTRL_CGU_DIV        (0x00000040)    /* Access CGU_DIV register */
+#define BITM_ROM_SYSCTRL_CGU_CLKOUTSEL  (0x00000080)    /* Access CGU_CLKOUTSEL register */
+#define BITM_ROM_SYSCTRL_CGU_AUTODIS    (0x00000800)    /* Disables auto handling of UPDT and ALGN fields */
+#define BITM_ROM_SYSCTRL_DDR_READ       (0x00001000)    /* Reads the contents of the DDR registers and stores them into the structure */
+#define BITM_ROM_SYSCTRL_DDR_WRITE      (0x00002000)    /* Writes the DDR registers from the structure, only really intented for wakeup functionality and not for full DDR configuration */
+#define BITM_ROM_SYSCTRL_WUA_EN         (0x00010000)    /* Wakeup entry or exit opertation enable */
+#define BITM_ROM_SYSCTRL_WUA_DPMWRITE   (0x00020000)    /* When set indicates a restore of the PLL and DDR is to be performed otherwise a save is required */
+#define BITM_ROM_SYSCTRL_WUA_CGU        (0x00040000)    /* Only applicable for a PLL and DDR save operation to the DPM, saves the current settings if cleared or the contents of the structure if set */
+#define BITM_ROM_SYSCTRL_WUA_DDR        (0x00080000)    /* Only applicable for a PLL and DDR save operation to the DPM, saves the current settings if cleared or the contents of the structure if set */
+#define BITM_ROM_SYSCTRL_WUA_DDRDLLEN   (0x00100000)    /* Enables saving/restoring of the DDR DLLCTL register */
+#define BITM_ROM_SYSCTRL_WUA_OVERRIDE   (0x01000000)
+#define BITM_ROM_SYSCTRL_CGU_LOCKINGEN  (0x10000000)    /* Unlocks the CGU_CTL register */
+
+
+/* Structures for the syscontrol() function */
+struct STRUCT_ROM_SYSCTRL {
+	uint32_t ulCGU_CTL;
+	uint32_t ulCGU_STAT;
+	uint32_t ulCGU_DIV;
+	uint32_t ulCGU_CLKOUTSEL;
+	uint32_t ulWUA_Flags;
+	uint32_t ulWUA_BootAddr;
+	uint32_t ulWUA_User;
+	uint32_t ulDDR_CTL;
+	uint32_t ulDDR_CFG;
+	uint32_t ulDDR_TR0;
+	uint32_t ulDDR_TR1;
+	uint32_t ulDDR_TR2;
+	uint32_t ulDDR_MR;
+	uint32_t ulDDR_EMR1;
+	uint32_t ulDDR_EMR2;
+	uint32_t ulDDR_PADCTL;
+	uint32_t ulDDR_DLLCTL;
+	uint32_t ulReserved;
+};
+
+struct bfin_pm_data {
+	uint32_t magic;
+	uint32_t resume_addr;
+	uint32_t sp;
+};
+
+struct bfin_pm_data bf609_pm_data;
+
+struct STRUCT_ROM_SYSCTRL configvalues;
+uint32_t dactionflags;
+
+#define FUNC_ROM_SYSCONTROL 0xC8000080
+__attribute__((l1_data))
+static uint32_t (* const bfrom_SysControl)(uint32_t action_flags, struct STRUCT_ROM_SYSCTRL *settings, void *reserved) = (void *)FUNC_ROM_SYSCONTROL;
+
+__attribute__((l1_text))
+void bfin_cpu_suspend(void)
+{
+	__asm__ __volatile__( \
+			".align 8;" \
+			"idle;" \
+			: : \
+			);
+}
+
+__attribute__((l1_text))
+void bfin_deepsleep(unsigned long mask)
+{
+	uint32_t dpm0_ctl;
+
+	bfin_write32(DPM0_WAKE_EN, 0x10);
+	bfin_write32(DPM0_WAKE_POL, 0x10);
+	dpm0_ctl = 0x00000008;
+	bfin_write32(DPM0_CTL, dpm0_ctl);
+	SSYNC();
+	__asm__ __volatile__( \
+			".align 8;" \
+			"idle;" \
+			: : \
+			);
+#ifdef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
+	__asm__ __volatile__(
+		"R0 = 0;"
+		"CYCLES = R0;"
+		"CYCLES2 = R0;"
+		"R0 = SYSCFG;"
+		"BITSET(R0, 1);"
+		"SYSCFG = R0;"
+		: : : "R0"
+	);
+#endif
+
+}
+
+__attribute__((l1_text))
+void bf609_ddr_sr(void)
+{
+	uint32_t reg;
+
+	reg = bfin_read_DMC0_CTL();
+	reg |= 0x8;
+	bfin_write_DMC0_CTL(reg);
+
+	while (!(bfin_read_DMC0_STAT() & 0x8))
+		continue;
+}
+
+__attribute__((l1_text))
+void bf609_ddr_sr_exit(void)
+{
+	uint32_t reg;
+	while (!(bfin_read_DMC0_STAT() & 0x1))
+		continue;
+
+	reg = bfin_read_DMC0_CTL();
+	reg &= ~0x8;
+	bfin_write_DMC0_CTL(reg);
+
+	while ((bfin_read_DMC0_STAT() & 0x8))
+		continue;
+}
+
+__attribute__((l1_text))
+void bfin_hibernate_syscontrol(void)
+{
+	configvalues.ulWUA_Flags = (0xAD000000 | BITM_ROM_WUA_EN
+		| BITM_ROM_WUA_CGU | BITM_ROM_WUA_DDR | BITM_ROM_WUA_DDRDLLEN);
+
+	dactionflags = (BITM_ROM_SYSCTRL_WUA_EN
+		| BITM_ROM_SYSCTRL_WUA_DPMWRITE | BITM_ROM_SYSCTRL_WUA_CGU
+		| BITM_ROM_SYSCTRL_WUA_DDR | BITM_ROM_SYSCTRL_WUA_DDRDLLEN);
+
+	bfrom_SysControl(dactionflags, &configvalues, NULL);
+
+	bfin_write32(DPM0_RESTORE5, bfin_read32(DPM0_RESTORE5) | 4);
+}
+
+#ifndef CONFIG_BF60x
+# define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
+#else
+# define SIC_SYSIRQ(irq)	((irq) - IVG15)
+#endif
+void bfin_hibernate(unsigned long mask)
+{
+	bfin_write32(DPM0_WAKE_EN, 0x10);
+	bfin_write32(DPM0_WAKE_POL, 0x10);
+	bfin_write32(DPM0_PGCNTR, 0x0000FFFF);
+	bfin_write32(DPM0_HIB_DIS, 0xFFFF);
+
+	printk(KERN_DEBUG "hibernate: restore %x pgcnt %x\n", bfin_read32(DPM0_RESTORE0), bfin_read32(DPM0_PGCNTR));
+
+	bf609_hibernate();
+}
+
+void bf609_cpu_pm_enter(suspend_state_t state)
+{
+	int error;
+	unsigned long wakeup = 0;
+	unsigned long wakeup_pol = 0;
+
+#ifdef CONFIG_PM_BFIN_WAKE_PA15
+	wakeup |= PA15WE;
+# if CONFIG_PM_BFIN_WAKE_PA15_POL
+	wakeup_pol |= PA15WE;
+# endif
+#endif
+
+#ifdef CONFIG_PM_BFIN_WAKE_PB15
+	wakeup |= PB15WE;
+# if CONFIG_PM_BFIN_WAKE_PA15_POL
+	wakeup_pol |= PB15WE;
+# endif
+#endif
+
+#ifdef CONFIG_PM_BFIN_WAKE_PC15
+	wakeup |= PC15WE;
+# if CONFIG_PM_BFIN_WAKE_PC15_POL
+	wakeup_pol |= PC15WE;
+# endif
+#endif
+
+#ifdef CONFIG_PM_BFIN_WAKE_PD06
+	wakeup |= PD06WE;
+# if CONFIG_PM_BFIN_WAKE_PD06_POL
+	wakeup_pol |= PD06WE;
+# endif
+#endif
+
+#ifdef CONFIG_PM_BFIN_WAKE_PE12
+	wakeup |= PE12WE;
+# if CONFIG_PM_BFIN_WAKE_PE12_POL
+	wakeup_pol |= PE12WE;
+# endif
+#endif
+
+#ifdef CONFIG_PM_BFIN_WAKE_PG04
+	wakeup |= PG04WE;
+# if CONFIG_PM_BFIN_WAKE_PG04_POL
+	wakeup_pol |= PG04WE;
+# endif
+#endif
+
+#ifdef CONFIG_PM_BFIN_WAKE_PG13
+	wakeup |= PG13WE;
+# if CONFIG_PM_BFIN_WAKE_PG13_POL
+	wakeup_pol |= PG13WE;
+# endif
+#endif
+
+#ifdef CONFIG_PM_BFIN_WAKE_USB
+	wakeup |= USBWE;
+# if CONFIG_PM_BFIN_WAKE_USB_POL
+	wakeup_pol |= USBWE;
+# endif
+#endif
+
+	error = irq_set_irq_wake(255, 1);
+	if(error < 0)
+		printk(KERN_DEBUG "Unable to get irq wake\n");
+	error = irq_set_irq_wake(231, 1);
+	if (error < 0)
+		printk(KERN_DEBUG "Unable to get irq wake\n");
+
+	if (state == PM_SUSPEND_STANDBY)
+		bfin_deepsleep(wakeup);
+	else {
+		bfin_hibernate(wakeup);
+	}
+}
+
+int bf609_cpu_pm_prepare(void)
+{
+	return 0;
+}
+
+void bf609_cpu_pm_finish(void)
+{
+
+}
+
+static struct bfin_cpu_pm_fns bf609_cpu_pm = {
+	.enter          = bf609_cpu_pm_enter,
+	.prepare        = bf609_cpu_pm_prepare,
+	.finish         = bf609_cpu_pm_finish,
+};
+
+static irqreturn_t test_isr(int irq, void *dev_id)
+{
+	printk(KERN_DEBUG "gpio irq %d\n", irq);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t dpm0_isr(int irq, void *dev_id)
+{
+	uint32_t wake_stat;
+
+	wake_stat = bfin_read32(DPM0_WAKE_STAT);
+	printk(KERN_DEBUG "enter %s wake stat %08x\n", __func__, wake_stat);
+
+	bfin_write32(DPM0_WAKE_STAT, wake_stat);
+	return IRQ_HANDLED;
+}
+
+static int __init bf609_init_pm(void)
+{
+	int irq;
+	int error;
+
+#if CONFIG_PM_BFIN_WAKE_PE12
+	irq = gpio_to_irq(GPIO_PE12);
+	if (irq < 0) {
+		error = irq;
+		printk(KERN_DEBUG "Unable to get irq number for GPIO %d, error %d\n",
+				GPIO_PE12, error);
+	}
+
+	error = request_irq(irq, test_isr, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, "gpiope12", NULL);
+	if(error < 0)
+		printk(KERN_DEBUG "Unable to get irq\n");
+#endif
+
+	error = request_irq(IRQ_CGU_EVT, dpm0_isr, IRQF_NO_SUSPEND, "cgu0 event", NULL);
+	if(error < 0)
+		printk(KERN_DEBUG "Unable to get irq\n");
+
+	error = request_irq(IRQ_DPM, dpm0_isr, IRQF_NO_SUSPEND, "dpm0 event", NULL);
+	if (error < 0)
+		printk(KERN_DEBUG "Unable to get irq\n");
+
+	bfin_cpu_pm = &bf609_cpu_pm;
+	return 0;
+}
+
+late_initcall(bf609_init_pm);
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index ff299f2..75f0ba2 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -6,7 +6,10 @@
 	cache.o cache-c.o entry.o head.o \
 	interrupt.o arch_checks.o ints-priority.o
 
-obj-$(CONFIG_PM)          += pm.o dpmc_modes.o
+obj-$(CONFIG_PM)          += pm.o
+ifneq ($(CONFIG_BF60x),y)
+obj-$(CONFIG_PM)	  += dpmc_modes.o
+endif
 obj-$(CONFIG_CPU_FREQ)    += cpufreq.o
 obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
 obj-$(CONFIG_SMP)         += smp.o
diff --git a/arch/blackfin/mach-common/clock.h b/arch/blackfin/mach-common/clock.h
new file mode 100644
index 0000000..645ff46
--- /dev/null
+++ b/arch/blackfin/mach-common/clock.h
@@ -0,0 +1,27 @@
+#ifndef __MACH_COMMON_CLKDEV_H
+#define __MACH_COMMON_CLKDEV_H
+
+#include <linux/clk.h>
+
+struct clk_ops {
+	unsigned long (*get_rate)(struct clk *clk);
+	unsigned long (*round_rate)(struct clk *clk, unsigned long rate);
+	int (*set_rate)(struct clk *clk, unsigned long rate);
+	int (*enable)(struct clk *clk);
+	int (*disable)(struct clk *clk);
+};
+
+struct clk {
+	const char		*name;
+	unsigned long           rate;
+	spinlock_t 		lock;
+	u32			flags;
+	const struct clk_ops    *ops;
+	const struct params 	*params;
+	void __iomem            *reg;
+	u32			mask;
+	u32			shift;
+};
+
+#endif
+
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index d5cfe61..7ad2407 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -15,10 +15,121 @@
 #include <asm/mem_init.h>
 #include <asm/dpmc.h>
 
+#ifdef CONFIG_BF60x
+#define CSEL_P			0
+#define S0SEL_P			5
+#define SYSSEL_P		8
+#define S1SEL_P			13
+#define DSEL_P			16
+#define OSEL_P			22
+#define ALGN_P			29
+#define UPDT_P			30
+#define LOCK_P			31
+
+#define CGU_CTL_VAL ((CONFIG_VCO_MULT << 8) | CLKIN_HALF)
+#define CGU_DIV_VAL \
+	((CONFIG_CCLK_DIV   << CSEL_P)   | \
+	(CONFIG_SCLK_DIV << SYSSEL_P)   | \
+	(CONFIG_SCLK0_DIV  << S0SEL_P)  | \
+	(CONFIG_SCLK1_DIV  << S1SEL_P)  | \
+	(CONFIG_DCLK_DIV   << DSEL_P))
+
+#define CONFIG_BFIN_DCLK (((CONFIG_CLKIN_HZ * CONFIG_VCO_MULT) / CONFIG_DCLK_DIV) / 1000000)
+#if ((CONFIG_BFIN_DCLK != 125) && \
+	(CONFIG_BFIN_DCLK != 133) && (CONFIG_BFIN_DCLK != 150) && \
+	(CONFIG_BFIN_DCLK != 166) && (CONFIG_BFIN_DCLK != 200) && \
+	(CONFIG_BFIN_DCLK != 225) && (CONFIG_BFIN_DCLK != 250))
+#error "DCLK must be in (125, 133, 150, 166, 200, 225, 250)MHz"
+#endif
+struct ddr_config {
+	u32 ddr_clk;
+	u32 dmc_ddrctl;
+	u32 dmc_ddrcfg;
+	u32 dmc_ddrtr0;
+	u32 dmc_ddrtr1;
+	u32 dmc_ddrtr2;
+	u32 dmc_ddrmr;
+	u32 dmc_ddrmr1;
+};
+
+struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) = {
+	[0] = {
+		.ddr_clk    = 125,
+		.dmc_ddrctl = 0x00000904,
+		.dmc_ddrcfg = 0x00000422,
+		.dmc_ddrtr0 = 0x20705212,
+		.dmc_ddrtr1 = 0x201003CF,
+		.dmc_ddrtr2 = 0x00320107,
+		.dmc_ddrmr  = 0x00000422,
+		.dmc_ddrmr1 = 0x4,
+	},
+	[1] = {
+		.ddr_clk    = 133,
+		.dmc_ddrctl = 0x00000904,
+		.dmc_ddrcfg = 0x00000422,
+		.dmc_ddrtr0 = 0x20806313,
+		.dmc_ddrtr1 = 0x2013040D,
+		.dmc_ddrtr2 = 0x00320108,
+		.dmc_ddrmr  = 0x00000632,
+		.dmc_ddrmr1 = 0x4,
+	},
+	[2] = {
+		.ddr_clk    = 150,
+		.dmc_ddrctl = 0x00000904,
+		.dmc_ddrcfg = 0x00000422,
+		.dmc_ddrtr0 = 0x20A07323,
+		.dmc_ddrtr1 = 0x20160492,
+		.dmc_ddrtr2 = 0x00320209,
+		.dmc_ddrmr  = 0x00000632,
+		.dmc_ddrmr1 = 0x4,
+	},
+	[3] = {
+		.ddr_clk    = 166,
+		.dmc_ddrctl = 0x00000904,
+		.dmc_ddrcfg = 0x00000422,
+		.dmc_ddrtr0 = 0x20A07323,
+		.dmc_ddrtr1 = 0x2016050E,
+		.dmc_ddrtr2 = 0x00320209,
+		.dmc_ddrmr  = 0x00000632,
+		.dmc_ddrmr1 = 0x4,
+	},
+	[4] = {
+		.ddr_clk    = 200,
+		.dmc_ddrctl = 0x00000904,
+		.dmc_ddrcfg = 0x00000422,
+		.dmc_ddrtr0 = 0x20a07323,
+		.dmc_ddrtr1 = 0x2016050f,
+		.dmc_ddrtr2 = 0x00320509,
+		.dmc_ddrmr  = 0x00000632,
+		.dmc_ddrmr1 = 0x4,
+	},
+	[5] = {
+		.ddr_clk    = 225,
+		.dmc_ddrctl = 0x00000904,
+		.dmc_ddrcfg = 0x00000422,
+		.dmc_ddrtr0 = 0x20E0A424,
+		.dmc_ddrtr1 = 0x302006DB,
+		.dmc_ddrtr2 = 0x0032020D,
+		.dmc_ddrmr  = 0x00000842,
+		.dmc_ddrmr1 = 0x4,
+	},
+	[6] = {
+		.ddr_clk    = 250,
+		.dmc_ddrctl = 0x00000904,
+		.dmc_ddrcfg = 0x00000422,
+		.dmc_ddrtr0 = 0x20E0A424,
+		.dmc_ddrtr1 = 0x3020079E,
+		.dmc_ddrtr2 = 0x0032020D,
+		.dmc_ddrmr  = 0x00000842,
+		.dmc_ddrmr1 = 0x4,
+	},
+};
+#else
 #define SDGCTL_WIDTH (1 << 31)	/* SDRAM external data path width */
 #define PLL_CTL_VAL \
 	(((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
-	 (PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
+		(PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
+#endif
 
 __attribute__((l1_text))
 static void do_sync(void)
@@ -33,6 +144,44 @@
 	 * in the middle of reprogramming things, and that'll screw us up.
 	 * For example, any automatic DMAs left by U-Boot for splash screens.
 	 */
+
+#ifdef CONFIG_BF60x
+	int i, dlldatacycle, dll_ctl;
+	bfin_write32(CGU0_DIV, CGU_DIV_VAL);
+	bfin_write32(CGU0_CTL, CGU_CTL_VAL);
+	while ((bfin_read32(CGU0_STAT) & 0x8) || !(bfin_read32(CGU0_STAT) & 0x4))
+		continue;
+
+	bfin_write32(CGU0_DIV, CGU_DIV_VAL | (1 << UPDT_P));
+	while (bfin_read32(CGU0_STAT) & (1 << 3))
+		continue;
+
+	for (i = 0; i < 7; i++) {
+		if (ddr_config_table[i].ddr_clk == CONFIG_BFIN_DCLK) {
+			bfin_write_DDR0_CFG(ddr_config_table[i].dmc_ddrcfg);
+			bfin_write_DDR0_TR0(ddr_config_table[i].dmc_ddrtr0);
+			bfin_write_DDR0_TR1(ddr_config_table[i].dmc_ddrtr1);
+			bfin_write_DDR0_TR2(ddr_config_table[i].dmc_ddrtr2);
+			bfin_write_DDR0_MR(ddr_config_table[i].dmc_ddrmr);
+			bfin_write_DDR0_EMR1(ddr_config_table[i].dmc_ddrmr1);
+			bfin_write_DDR0_CTL(ddr_config_table[i].dmc_ddrctl);
+			break;
+		}
+	}
+
+	do_sync();
+	while (!(bfin_read_DDR0_STAT() & 0x4))
+		continue;
+
+	dlldatacycle = (bfin_read_DDR0_STAT() & 0x00f00000) >> 20;
+	dll_ctl = bfin_read_DDR0_DLLCTL();
+	dll_ctl &= 0x0ff;
+	bfin_write_DDR0_DLLCTL(dll_ctl | (dlldatacycle << 8));
+
+	do_sync();
+	while (!(bfin_read_DDR0_STAT() & 0x2000))
+		continue;
+#else
 	size_t i;
 	for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
 		struct dma_register *dma = dma_io_base_addr[i];
@@ -91,6 +240,8 @@
 	bfin_write_EBIU_DDRQUE(CONFIG_MEM_EBIU_DDRQUE);
 #endif
 #endif
+#endif
 	do_sync();
 	bfin_read16(0);
+
 }
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 2e6eefd..6e87dc1 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
+#include <linux/clk.h>
 #include <linux/cpufreq.h>
 #include <linux/fs.h>
 #include <linux/delay.h>
@@ -17,6 +18,7 @@
 #include <asm/time.h>
 #include <asm/dpmc.h>
 
+
 /* this is the table of CCLK frequencies, in Hz */
 /* .index is the entry in the auxiliary dpm_state_table[] */
 static struct cpufreq_frequency_table bfin_freq_table[] = {
@@ -67,12 +69,22 @@
 #else
 	min_cclk = sclk;
 #endif
+
+#ifndef CONFIG_BF60x
 	csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
+#else
+	csel = bfin_read32(CGU0_DIV) & 0x1F;
+#endif
 
 	for (index = 0;  (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
 		bfin_freq_table[index].frequency = cclk >> index;
+#ifndef CONFIG_BF60x
 		dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
 		dpm_state_table[index].tscale =  (TIME_SCALE / (1 << csel)) - 1;
+#else
+		dpm_state_table[index].csel = csel;
+		dpm_state_table[index].tscale =  TIME_SCALE >> index;
+#endif
 
 		pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
 						 bfin_freq_table[index].frequency,
@@ -99,14 +111,34 @@
 	return get_cclk() / 1000;
 }
 
+#ifdef CONFIG_BF60x
+unsigned long cpu_set_cclk(int cpu, unsigned long new)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = clk_get(NULL, "CCLK");
+	if (IS_ERR(clk))
+		return -ENODEV;
+
+	ret = clk_set_rate(clk, new);
+	clk_put(clk);
+	return ret;
+}
+#endif
+
 static int bfin_target(struct cpufreq_policy *poli,
 			unsigned int target_freq, unsigned int relation)
 {
-	unsigned int index, plldiv, cpu;
+#ifndef CONFIG_BF60x
+	unsigned int plldiv;
+#endif
+	unsigned int index, cpu;
 	unsigned long flags, cclk_hz;
 	struct cpufreq_freqs freqs;
 	static unsigned long lpj_ref;
 	static unsigned int  lpj_ref_freq;
+	int ret = 0;
 
 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
 	cycles_t cycles;
@@ -134,9 +166,17 @@
 		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
 		if (cpu == CPUFREQ_CPU) {
 			flags = hard_local_irq_save();
+#ifndef CONFIG_BF60x
 			plldiv = (bfin_read_PLL_DIV() & SSEL) |
 						dpm_state_table[index].csel;
 			bfin_write_PLL_DIV(plldiv);
+#else
+			ret = cpu_set_cclk(cpu, freqs.new * 1000);
+			if (ret != 0) {
+				pr_debug("cpufreq set freq failed %d\n", ret);
+				break;
+			}
+#endif
 			on_each_cpu(bfin_adjust_core_timer, &index, 1);
 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
 			cycles = get_cycles();
@@ -161,7 +201,7 @@
 	}
 
 	pr_debug("cpufreq: done\n");
-	return 0;
+	return ret;
 }
 
 static int bfin_verify_speed(struct cpufreq_policy *policy)
@@ -169,7 +209,7 @@
 	return cpufreq_frequency_table_verify(policy, bfin_freq_table);
 }
 
-static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
+static int __bfin_cpu_init(struct cpufreq_policy *policy)
 {
 
 	unsigned long cclk, sclk;
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 1c534d2..de99f3a 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -10,7 +10,6 @@
 #include <asm/dpmc.h>
 
 .section .l1.text
-
 ENTRY(_sleep_mode)
 	[--SP] = (R7:4, P5:3);
 	[--SP] = RETS;
@@ -43,6 +42,9 @@
 	BITCLR (R7, 5);
 	w[p0] = R7.L;
 	IDLE;
+
+	bfin_init_pm_bench_cycles;
+
 	call _test_pll_locked;
 
 	RETS = [SP++];
@@ -58,12 +60,13 @@
  *
  * We accept just one argument -- the value to write to VR_CTL.
  */
+
 ENTRY(_hibernate_mode)
 	/* Save/setup the regs we need early for minor pipeline optimization */
 	R4 = R0;
+
 	P3.H = hi(VR_CTL);
 	P3.L = lo(VR_CTL);
-
 	/* Disable all wakeup sources */
 	R0 = IWR_DISABLE_ALL;
 	R1 = IWR_DISABLE_ALL;
@@ -74,6 +77,9 @@
 
 	/* Finally, we climb into our cave to hibernate */
 	W[P3] = R4.L;
+
+	bfin_init_pm_bench_cycles;
+
 	CLI R2;
 	IDLE;
 .Lforever:
@@ -158,6 +164,8 @@
 	SSYNC;
 	IDLE;
 
+	bfin_init_pm_bench_cycles;
+
 	call _test_pll_locked;
 
 	P0.H = hi(PLL_DIV);
@@ -276,327 +284,10 @@
 ENDPROC(_test_pll_locked)
 
 .section .text
-
-#define PM_REG0  R7
-#define PM_REG1  R6
-#define PM_REG2  R5
-#define PM_REG3  R4
-#define PM_REG4  R3
-#define PM_REG5  R2
-#define PM_REG6  R1
-#define PM_REG7  R0
-#define PM_REG8  P5
-#define PM_REG9  P4
-#define PM_REG10 P3
-#define PM_REG11 P2
-#define PM_REG12 P1
-#define PM_REG13 P0
-
-#define PM_REGSET0  R7:7
-#define PM_REGSET1  R7:6
-#define PM_REGSET2  R7:5
-#define PM_REGSET3  R7:4
-#define PM_REGSET4  R7:3
-#define PM_REGSET5  R7:2
-#define PM_REGSET6  R7:1
-#define PM_REGSET7  R7:0
-#define PM_REGSET8  R7:0, P5:5
-#define PM_REGSET9  R7:0, P5:4
-#define PM_REGSET10 R7:0, P5:3
-#define PM_REGSET11 R7:0, P5:2
-#define PM_REGSET12 R7:0, P5:1
-#define PM_REGSET13 R7:0, P5:0
-
-#define _PM_PUSH(n, x, w, base) PM_REG##n = w[FP + ((x) - (base))];
-#define _PM_POP(n, x, w, base)  w[FP + ((x) - (base))] = PM_REG##n;
-#define PM_PUSH_SYNC(n)         [--sp] = (PM_REGSET##n);
-#define PM_POP_SYNC(n)          (PM_REGSET##n) = [sp++];
-#define PM_PUSH(n, x)           PM_REG##n = [FP++];
-#define PM_POP(n, x)            [FP--] = PM_REG##n;
-#define PM_CORE_PUSH(n, x)      _PM_PUSH(n, x, , COREMMR_BASE)
-#define PM_CORE_POP(n, x)       _PM_POP(n, x, , COREMMR_BASE)
-#define PM_SYS_PUSH(n, x)       _PM_PUSH(n, x, , SYSMMR_BASE)
-#define PM_SYS_POP(n, x)        _PM_POP(n, x, , SYSMMR_BASE)
-#define PM_SYS_PUSH16(n, x)     _PM_PUSH(n, x, w, SYSMMR_BASE)
-#define PM_SYS_POP16(n, x)      _PM_POP(n, x, w, SYSMMR_BASE)
-
 ENTRY(_do_hibernate)
-	/*
-	 * Save the core regs early so we can blow them away when
-	 * saving/restoring MMR states
-	 */
-	[--sp] = (R7:0, P5:0);
-	[--sp] = fp;
-	[--sp] = usp;
-
-	[--sp] = i0;
-	[--sp] = i1;
-	[--sp] = i2;
-	[--sp] = i3;
-
-	[--sp] = m0;
-	[--sp] = m1;
-	[--sp] = m2;
-	[--sp] = m3;
-
-	[--sp] = l0;
-	[--sp] = l1;
-	[--sp] = l2;
-	[--sp] = l3;
-
-	[--sp] = b0;
-	[--sp] = b1;
-	[--sp] = b2;
-	[--sp] = b3;
-	[--sp] = a0.x;
-	[--sp] = a0.w;
-	[--sp] = a1.x;
-	[--sp] = a1.w;
-
-	[--sp] = LC0;
-	[--sp] = LC1;
-	[--sp] = LT0;
-	[--sp] = LT1;
-	[--sp] = LB0;
-	[--sp] = LB1;
-
-	/* We can't push RETI directly as that'll change IPEND[4] */
-	r7 = RETI;
-	[--sp] = RETS;
-	[--sp] = ASTAT;
-	[--sp] = CYCLES;
-	[--sp] = CYCLES2;
-	[--sp] = SYSCFG;
-	[--sp] = RETX;
-	[--sp] = SEQSTAT;
-	[--sp] = r7;
-
-	/* Save first func arg in M3 */
-	M3 = R0;
-
-	/* Save system MMRs */
-	FP.H = hi(SYSMMR_BASE);
-	FP.L = lo(SYSMMR_BASE);
-
-#ifdef SIC_IMASK0
-	PM_SYS_PUSH(0, SIC_IMASK0)
-	PM_SYS_PUSH(1, SIC_IMASK1)
-# ifdef SIC_IMASK2
-	PM_SYS_PUSH(2, SIC_IMASK2)
-# endif
-#else
-	PM_SYS_PUSH(0, SIC_IMASK)
-#endif
-#ifdef SIC_IAR0
-	PM_SYS_PUSH(3, SIC_IAR0)
-	PM_SYS_PUSH(4, SIC_IAR1)
-	PM_SYS_PUSH(5, SIC_IAR2)
-#endif
-#ifdef SIC_IAR3
-	PM_SYS_PUSH(6, SIC_IAR3)
-#endif
-#ifdef SIC_IAR4
-	PM_SYS_PUSH(7, SIC_IAR4)
-	PM_SYS_PUSH(8, SIC_IAR5)
-	PM_SYS_PUSH(9, SIC_IAR6)
-#endif
-#ifdef SIC_IAR7
-	PM_SYS_PUSH(10, SIC_IAR7)
-#endif
-#ifdef SIC_IAR8
-	PM_SYS_PUSH(11, SIC_IAR8)
-	PM_SYS_PUSH(12, SIC_IAR9)
-	PM_SYS_PUSH(13, SIC_IAR10)
-#endif
-	PM_PUSH_SYNC(13)
-#ifdef SIC_IAR11
-	PM_SYS_PUSH(0, SIC_IAR11)
-#endif
-
-#ifdef SIC_IWR
-	PM_SYS_PUSH(1, SIC_IWR)
-#endif
-#ifdef SIC_IWR0
-	PM_SYS_PUSH(1, SIC_IWR0)
-#endif
-#ifdef SIC_IWR1
-	PM_SYS_PUSH(2, SIC_IWR1)
-#endif
-#ifdef SIC_IWR2
-	PM_SYS_PUSH(3, SIC_IWR2)
-#endif
-
-#ifdef PINT0_ASSIGN
-	PM_SYS_PUSH(4, PINT0_MASK_SET)
-	PM_SYS_PUSH(5, PINT1_MASK_SET)
-	PM_SYS_PUSH(6, PINT2_MASK_SET)
-	PM_SYS_PUSH(7, PINT3_MASK_SET)
-	PM_SYS_PUSH(8, PINT0_ASSIGN)
-	PM_SYS_PUSH(9, PINT1_ASSIGN)
-	PM_SYS_PUSH(10, PINT2_ASSIGN)
-	PM_SYS_PUSH(11, PINT3_ASSIGN)
-	PM_SYS_PUSH(12, PINT0_INVERT_SET)
-	PM_SYS_PUSH(13, PINT1_INVERT_SET)
-	PM_PUSH_SYNC(13)
-	PM_SYS_PUSH(0, PINT2_INVERT_SET)
-	PM_SYS_PUSH(1, PINT3_INVERT_SET)
-	PM_SYS_PUSH(2, PINT0_EDGE_SET)
-	PM_SYS_PUSH(3, PINT1_EDGE_SET)
-	PM_SYS_PUSH(4, PINT2_EDGE_SET)
-	PM_SYS_PUSH(5, PINT3_EDGE_SET)
-#endif
-
-	PM_SYS_PUSH16(6, SYSCR)
-
-	PM_SYS_PUSH16(7, EBIU_AMGCTL)
-	PM_SYS_PUSH(8, EBIU_AMBCTL0)
-	PM_SYS_PUSH(9, EBIU_AMBCTL1)
-#ifdef EBIU_FCTL
-	PM_SYS_PUSH(10, EBIU_MBSCTL)
-	PM_SYS_PUSH(11, EBIU_MODE)
-	PM_SYS_PUSH(12, EBIU_FCTL)
-	PM_PUSH_SYNC(12)
-#else
-	PM_PUSH_SYNC(9)
-#endif
-
-	/* Save Core MMRs */
-	I0.H = hi(COREMMR_BASE);
-	I0.L = lo(COREMMR_BASE);
-	I1 = I0;
-	I2 = I0;
-	I3 = I0;
-	B0 = I0;
-	B1 = I0;
-	B2 = I0;
-	B3 = I0;
-	I1.L = lo(DCPLB_ADDR0);
-	I2.L = lo(DCPLB_DATA0);
-	I3.L = lo(ICPLB_ADDR0);
-	B0.L = lo(ICPLB_DATA0);
-	B1.L = lo(EVT2);
-	B2.L = lo(IMASK);
-	B3.L = lo(TCNTL);
-
-	/* DCPLB Addr */
-	FP = I1;
-	PM_PUSH(0, DCPLB_ADDR0)
-	PM_PUSH(1, DCPLB_ADDR1)
-	PM_PUSH(2, DCPLB_ADDR2)
-	PM_PUSH(3, DCPLB_ADDR3)
-	PM_PUSH(4, DCPLB_ADDR4)
-	PM_PUSH(5, DCPLB_ADDR5)
-	PM_PUSH(6, DCPLB_ADDR6)
-	PM_PUSH(7, DCPLB_ADDR7)
-	PM_PUSH(8, DCPLB_ADDR8)
-	PM_PUSH(9, DCPLB_ADDR9)
-	PM_PUSH(10, DCPLB_ADDR10)
-	PM_PUSH(11, DCPLB_ADDR11)
-	PM_PUSH(12, DCPLB_ADDR12)
-	PM_PUSH(13, DCPLB_ADDR13)
-	PM_PUSH_SYNC(13)
-	PM_PUSH(0, DCPLB_ADDR14)
-	PM_PUSH(1, DCPLB_ADDR15)
-
-	/* DCPLB Data */
-	FP = I2;
-	PM_PUSH(2, DCPLB_DATA0)
-	PM_PUSH(3, DCPLB_DATA1)
-	PM_PUSH(4, DCPLB_DATA2)
-	PM_PUSH(5, DCPLB_DATA3)
-	PM_PUSH(6, DCPLB_DATA4)
-	PM_PUSH(7, DCPLB_DATA5)
-	PM_PUSH(8, DCPLB_DATA6)
-	PM_PUSH(9, DCPLB_DATA7)
-	PM_PUSH(10, DCPLB_DATA8)
-	PM_PUSH(11, DCPLB_DATA9)
-	PM_PUSH(12, DCPLB_DATA10)
-	PM_PUSH(13, DCPLB_DATA11)
-	PM_PUSH_SYNC(13)
-	PM_PUSH(0, DCPLB_DATA12)
-	PM_PUSH(1, DCPLB_DATA13)
-	PM_PUSH(2, DCPLB_DATA14)
-	PM_PUSH(3, DCPLB_DATA15)
-
-	/* ICPLB Addr */
-	FP = I3;
-	PM_PUSH(4, ICPLB_ADDR0)
-	PM_PUSH(5, ICPLB_ADDR1)
-	PM_PUSH(6, ICPLB_ADDR2)
-	PM_PUSH(7, ICPLB_ADDR3)
-	PM_PUSH(8, ICPLB_ADDR4)
-	PM_PUSH(9, ICPLB_ADDR5)
-	PM_PUSH(10, ICPLB_ADDR6)
-	PM_PUSH(11, ICPLB_ADDR7)
-	PM_PUSH(12, ICPLB_ADDR8)
-	PM_PUSH(13, ICPLB_ADDR9)
-	PM_PUSH_SYNC(13)
-	PM_PUSH(0, ICPLB_ADDR10)
-	PM_PUSH(1, ICPLB_ADDR11)
-	PM_PUSH(2, ICPLB_ADDR12)
-	PM_PUSH(3, ICPLB_ADDR13)
-	PM_PUSH(4, ICPLB_ADDR14)
-	PM_PUSH(5, ICPLB_ADDR15)
-
-	/* ICPLB Data */
-	FP = B0;
-	PM_PUSH(6, ICPLB_DATA0)
-	PM_PUSH(7, ICPLB_DATA1)
-	PM_PUSH(8, ICPLB_DATA2)
-	PM_PUSH(9, ICPLB_DATA3)
-	PM_PUSH(10, ICPLB_DATA4)
-	PM_PUSH(11, ICPLB_DATA5)
-	PM_PUSH(12, ICPLB_DATA6)
-	PM_PUSH(13, ICPLB_DATA7)
-	PM_PUSH_SYNC(13)
-	PM_PUSH(0, ICPLB_DATA8)
-	PM_PUSH(1, ICPLB_DATA9)
-	PM_PUSH(2, ICPLB_DATA10)
-	PM_PUSH(3, ICPLB_DATA11)
-	PM_PUSH(4, ICPLB_DATA12)
-	PM_PUSH(5, ICPLB_DATA13)
-	PM_PUSH(6, ICPLB_DATA14)
-	PM_PUSH(7, ICPLB_DATA15)
-
-	/* Event Vectors */
-	FP = B1;
-	PM_PUSH(8, EVT2)
-	PM_PUSH(9, EVT3)
-	FP += 4;	/* EVT4 */
-	PM_PUSH(10, EVT5)
-	PM_PUSH(11, EVT6)
-	PM_PUSH(12, EVT7)
-	PM_PUSH(13, EVT8)
-	PM_PUSH_SYNC(13)
-	PM_PUSH(0, EVT9)
-	PM_PUSH(1, EVT10)
-	PM_PUSH(2, EVT11)
-	PM_PUSH(3, EVT12)
-	PM_PUSH(4, EVT13)
-	PM_PUSH(5, EVT14)
-	PM_PUSH(6, EVT15)
-
-	/* CEC */
-	FP = B2;
-	PM_PUSH(7, IMASK)
-	FP += 4;	/* IPEND */
-	PM_PUSH(8, ILAT)
-	PM_PUSH(9, IPRIO)
-
-	/* Core Timer */
-	FP = B3;
-	PM_PUSH(10, TCNTL)
-	PM_PUSH(11, TPERIOD)
-	PM_PUSH(12, TSCALE)
-	PM_PUSH(13, TCOUNT)
-	PM_PUSH_SYNC(13)
-
-	/* Misc non-contiguous registers */
-	FP = I0;
-	PM_CORE_PUSH(0, DMEM_CONTROL);
-	PM_CORE_PUSH(1, IMEM_CONTROL);
-	PM_CORE_PUSH(2, TBUFCTL);
-	PM_PUSH_SYNC(2)
+	bfin_cpu_reg_save;
+	bfin_sys_mmr_save;
+	bfin_core_mmr_save;
 
 	/* Setup args to hibernate mode early for pipeline optimization */
 	R0 = M3;
@@ -618,274 +309,9 @@
 
 .Lpm_resume_here:
 
-	/* Restore Core MMRs */
-	I0.H = hi(COREMMR_BASE);
-	I0.L = lo(COREMMR_BASE);
-	I1 = I0;
-	I2 = I0;
-	I3 = I0;
-	B0 = I0;
-	B1 = I0;
-	B2 = I0;
-	B3 = I0;
-	I1.L = lo(DCPLB_ADDR15);
-	I2.L = lo(DCPLB_DATA15);
-	I3.L = lo(ICPLB_ADDR15);
-	B0.L = lo(ICPLB_DATA15);
-	B1.L = lo(EVT15);
-	B2.L = lo(IPRIO);
-	B3.L = lo(TCOUNT);
-
-	/* Misc non-contiguous registers */
-	FP = I0;
-	PM_POP_SYNC(2)
-	PM_CORE_POP(2, TBUFCTL)
-	PM_CORE_POP(1, IMEM_CONTROL)
-	PM_CORE_POP(0, DMEM_CONTROL)
-
-	/* Core Timer */
-	PM_POP_SYNC(13)
-	FP = B3;
-	PM_POP(13, TCOUNT)
-	PM_POP(12, TSCALE)
-	PM_POP(11, TPERIOD)
-	PM_POP(10, TCNTL)
-
-	/* CEC */
-	FP = B2;
-	PM_POP(9, IPRIO)
-	PM_POP(8, ILAT)
-	FP += -4;	/* IPEND */
-	PM_POP(7, IMASK)
-
-	/* Event Vectors */
-	FP = B1;
-	PM_POP(6, EVT15)
-	PM_POP(5, EVT14)
-	PM_POP(4, EVT13)
-	PM_POP(3, EVT12)
-	PM_POP(2, EVT11)
-	PM_POP(1, EVT10)
-	PM_POP(0, EVT9)
-	PM_POP_SYNC(13)
-	PM_POP(13, EVT8)
-	PM_POP(12, EVT7)
-	PM_POP(11, EVT6)
-	PM_POP(10, EVT5)
-	FP += -4;	/* EVT4 */
-	PM_POP(9, EVT3)
-	PM_POP(8, EVT2)
-
-	/* ICPLB Data */
-	FP = B0;
-	PM_POP(7, ICPLB_DATA15)
-	PM_POP(6, ICPLB_DATA14)
-	PM_POP(5, ICPLB_DATA13)
-	PM_POP(4, ICPLB_DATA12)
-	PM_POP(3, ICPLB_DATA11)
-	PM_POP(2, ICPLB_DATA10)
-	PM_POP(1, ICPLB_DATA9)
-	PM_POP(0, ICPLB_DATA8)
-	PM_POP_SYNC(13)
-	PM_POP(13, ICPLB_DATA7)
-	PM_POP(12, ICPLB_DATA6)
-	PM_POP(11, ICPLB_DATA5)
-	PM_POP(10, ICPLB_DATA4)
-	PM_POP(9, ICPLB_DATA3)
-	PM_POP(8, ICPLB_DATA2)
-	PM_POP(7, ICPLB_DATA1)
-	PM_POP(6, ICPLB_DATA0)
-
-	/* ICPLB Addr */
-	FP = I3;
-	PM_POP(5, ICPLB_ADDR15)
-	PM_POP(4, ICPLB_ADDR14)
-	PM_POP(3, ICPLB_ADDR13)
-	PM_POP(2, ICPLB_ADDR12)
-	PM_POP(1, ICPLB_ADDR11)
-	PM_POP(0, ICPLB_ADDR10)
-	PM_POP_SYNC(13)
-	PM_POP(13, ICPLB_ADDR9)
-	PM_POP(12, ICPLB_ADDR8)
-	PM_POP(11, ICPLB_ADDR7)
-	PM_POP(10, ICPLB_ADDR6)
-	PM_POP(9, ICPLB_ADDR5)
-	PM_POP(8, ICPLB_ADDR4)
-	PM_POP(7, ICPLB_ADDR3)
-	PM_POP(6, ICPLB_ADDR2)
-	PM_POP(5, ICPLB_ADDR1)
-	PM_POP(4, ICPLB_ADDR0)
-
-	/* DCPLB Data */
-	FP = I2;
-	PM_POP(3, DCPLB_DATA15)
-	PM_POP(2, DCPLB_DATA14)
-	PM_POP(1, DCPLB_DATA13)
-	PM_POP(0, DCPLB_DATA12)
-	PM_POP_SYNC(13)
-	PM_POP(13, DCPLB_DATA11)
-	PM_POP(12, DCPLB_DATA10)
-	PM_POP(11, DCPLB_DATA9)
-	PM_POP(10, DCPLB_DATA8)
-	PM_POP(9, DCPLB_DATA7)
-	PM_POP(8, DCPLB_DATA6)
-	PM_POP(7, DCPLB_DATA5)
-	PM_POP(6, DCPLB_DATA4)
-	PM_POP(5, DCPLB_DATA3)
-	PM_POP(4, DCPLB_DATA2)
-	PM_POP(3, DCPLB_DATA1)
-	PM_POP(2, DCPLB_DATA0)
-
-	/* DCPLB Addr */
-	FP = I1;
-	PM_POP(1, DCPLB_ADDR15)
-	PM_POP(0, DCPLB_ADDR14)
-	PM_POP_SYNC(13)
-	PM_POP(13, DCPLB_ADDR13)
-	PM_POP(12, DCPLB_ADDR12)
-	PM_POP(11, DCPLB_ADDR11)
-	PM_POP(10, DCPLB_ADDR10)
-	PM_POP(9, DCPLB_ADDR9)
-	PM_POP(8, DCPLB_ADDR8)
-	PM_POP(7, DCPLB_ADDR7)
-	PM_POP(6, DCPLB_ADDR6)
-	PM_POP(5, DCPLB_ADDR5)
-	PM_POP(4, DCPLB_ADDR4)
-	PM_POP(3, DCPLB_ADDR3)
-	PM_POP(2, DCPLB_ADDR2)
-	PM_POP(1, DCPLB_ADDR1)
-	PM_POP(0, DCPLB_ADDR0)
-
-	/* Restore System MMRs */
-	FP.H = hi(SYSMMR_BASE);
-	FP.L = lo(SYSMMR_BASE);
-
-#ifdef EBIU_FCTL
-	PM_POP_SYNC(12)
-	PM_SYS_POP(12, EBIU_FCTL)
-	PM_SYS_POP(11, EBIU_MODE)
-	PM_SYS_POP(10, EBIU_MBSCTL)
-#else
-	PM_POP_SYNC(9)
-#endif
-	PM_SYS_POP(9, EBIU_AMBCTL1)
-	PM_SYS_POP(8, EBIU_AMBCTL0)
-	PM_SYS_POP16(7, EBIU_AMGCTL)
-
-	PM_SYS_POP16(6, SYSCR)
-
-#ifdef PINT0_ASSIGN
-	PM_SYS_POP(5, PINT3_EDGE_SET)
-	PM_SYS_POP(4, PINT2_EDGE_SET)
-	PM_SYS_POP(3, PINT1_EDGE_SET)
-	PM_SYS_POP(2, PINT0_EDGE_SET)
-	PM_SYS_POP(1, PINT3_INVERT_SET)
-	PM_SYS_POP(0, PINT2_INVERT_SET)
-	PM_POP_SYNC(13)
-	PM_SYS_POP(13, PINT1_INVERT_SET)
-	PM_SYS_POP(12, PINT0_INVERT_SET)
-	PM_SYS_POP(11, PINT3_ASSIGN)
-	PM_SYS_POP(10, PINT2_ASSIGN)
-	PM_SYS_POP(9, PINT1_ASSIGN)
-	PM_SYS_POP(8, PINT0_ASSIGN)
-	PM_SYS_POP(7, PINT3_MASK_SET)
-	PM_SYS_POP(6, PINT2_MASK_SET)
-	PM_SYS_POP(5, PINT1_MASK_SET)
-	PM_SYS_POP(4, PINT0_MASK_SET)
-#endif
-
-#ifdef SIC_IWR2
-	PM_SYS_POP(3, SIC_IWR2)
-#endif
-#ifdef SIC_IWR1
-	PM_SYS_POP(2, SIC_IWR1)
-#endif
-#ifdef SIC_IWR0
-	PM_SYS_POP(1, SIC_IWR0)
-#endif
-#ifdef SIC_IWR
-	PM_SYS_POP(1, SIC_IWR)
-#endif
-
-#ifdef SIC_IAR11
-	PM_SYS_POP(0, SIC_IAR11)
-#endif
-	PM_POP_SYNC(13)
-#ifdef SIC_IAR8
-	PM_SYS_POP(13, SIC_IAR10)
-	PM_SYS_POP(12, SIC_IAR9)
-	PM_SYS_POP(11, SIC_IAR8)
-#endif
-#ifdef SIC_IAR7
-	PM_SYS_POP(10, SIC_IAR7)
-#endif
-#ifdef SIC_IAR6
-	PM_SYS_POP(9, SIC_IAR6)
-	PM_SYS_POP(8, SIC_IAR5)
-	PM_SYS_POP(7, SIC_IAR4)
-#endif
-#ifdef SIC_IAR3
-	PM_SYS_POP(6, SIC_IAR3)
-#endif
-#ifdef SIC_IAR0
-	PM_SYS_POP(5, SIC_IAR2)
-	PM_SYS_POP(4, SIC_IAR1)
-	PM_SYS_POP(3, SIC_IAR0)
-#endif
-#ifdef SIC_IMASK0
-# ifdef SIC_IMASK2
-	PM_SYS_POP(2, SIC_IMASK2)
-# endif
-	PM_SYS_POP(1, SIC_IMASK1)
-	PM_SYS_POP(0, SIC_IMASK0)
-#else
-	PM_SYS_POP(0, SIC_IMASK)
-#endif
-
-	/* Restore Core Registers */
-	RETI = [sp++];
-	SEQSTAT = [sp++];
-	RETX = [sp++];
-	SYSCFG = [sp++];
-	CYCLES2 = [sp++];
-	CYCLES = [sp++];
-	ASTAT = [sp++];
-	RETS = [sp++];
-
-	LB1 = [sp++];
-	LB0 = [sp++];
-	LT1 = [sp++];
-	LT0 = [sp++];
-	LC1 = [sp++];
-	LC0 = [sp++];
-
-	a1.w = [sp++];
-	a1.x = [sp++];
-	a0.w = [sp++];
-	a0.x = [sp++];
-	b3 = [sp++];
-	b2 = [sp++];
-	b1 = [sp++];
-	b0 = [sp++];
-
-	l3 = [sp++];
-	l2 = [sp++];
-	l1 = [sp++];
-	l0 = [sp++];
-
-	m3 = [sp++];
-	m2 = [sp++];
-	m1 = [sp++];
-	m0 = [sp++];
-
-	i3 = [sp++];
-	i2 = [sp++];
-	i1 = [sp++];
-	i0 = [sp++];
-
-	usp = [sp++];
-	fp = [sp++];
-	(R7:0, P5:0) = [sp++];
+	bfin_core_mmr_restore;
+	bfin_sys_mmr_restore;
+	bfin_cpu_reg_restore;
 
 	[--sp] = RETI;	/* Clear Global Interrupt Disable */
 	SP += 4;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 4698a98..80aa253 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1141,7 +1141,8 @@
 	sti r0;
 
 	/* finish the userspace "atomic" functions for it */
-	r1 = FIXED_CODE_END;
+	r1.l = lo(FIXED_CODE_END);
+	r1.h = hi(FIXED_CODE_END);
 	r2 = [sp + PT_PC];
 	cc = r1 <= r2;
 	if cc jump .Lresume_userspace (bp);
@@ -1376,7 +1377,7 @@
 ENTRY(_sys_call_table)
 	.long _sys_restart_syscall	/* 0 */
 	.long _sys_exit
-	.long _sys_fork
+	.long _sys_ni_syscall	/* fork */
 	.long _sys_read
 	.long _sys_write
 	.long _sys_open		/* 5 */
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index 8b4d988..31515f0 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -210,14 +210,12 @@
 ENTRY(_real_start)
 	/* Enable nested interrupts */
 	[--sp] = reti;
-
 	/* watchdog off for now */
 	p0.l = lo(WDOG_CTL);
 	p0.h = hi(WDOG_CTL);
 	r0 = 0xAD6(z);
 	w[p0] = r0;
 	ssync;
-
 	/* Pass the u-boot arguments to the global value command line */
 	R0 = R7;
 	call _cmdline_init;
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 332dace..2729cba 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -16,6 +16,8 @@
 #include <linux/seq_file.h>
 #include <linux/irq.h>
 #include <linux/sched.h>
+#include <linux/syscore_ops.h>
+#include <asm/delay.h>
 #ifdef CONFIG_IPIPE
 #include <linux/ipipe.h>
 #endif
@@ -25,7 +27,11 @@
 #include <asm/irq_handler.h>
 #include <asm/dpmc.h>
 
-#define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
+#ifndef CONFIG_BF60x
+# define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
+#else
+# define SIC_SYSIRQ(irq)	((irq) - IVG15)
+#endif
 
 /*
  * NOTES:
@@ -50,6 +56,7 @@
 unsigned vr_wakeup;
 #endif
 
+#ifndef CONFIG_BF60x
 static struct ivgx {
 	/* irq number for request_irq, available in mach-bf5xx/irq.h */
 	unsigned int irqno;
@@ -78,7 +85,8 @@
 
 		for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
 			int irqn;
-			u32 iar = bfin_read32((unsigned long *)SIC_IAR0 +
+			u32 iar =
+				bfin_read32((unsigned long *)SIC_IAR0 +
 #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
 	defined(CONFIG_BF538) || defined(CONFIG_BF539)
 				((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
@@ -86,7 +94,6 @@
 				(irqN >> 3)
 #endif
 				);
-
 			for (irqn = irqN; irqn < irqN + 4; ++irqn) {
 				int iar_shift = (irqn & 7) * 4;
 				if (ivg == (0xf & (iar >> iar_shift))) {
@@ -99,11 +106,11 @@
 		}
 	}
 }
+#endif
 
 /*
  * This is for core internal IRQs
  */
-
 void bfin_ack_noop(struct irq_data *d)
 {
 	/* Dummy function.  */
@@ -136,21 +143,21 @@
 void bfin_internal_mask_irq(unsigned int irq)
 {
 	unsigned long flags = hard_local_irq_save();
-
+#ifndef CONFIG_BF60x
 #ifdef SIC_IMASK0
 	unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
 	unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
 	bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
-			     ~(1 << mask_bit));
-# ifdef CONFIG_SMP
+			~(1 << mask_bit));
+# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 	bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
-			     ~(1 << mask_bit));
+			~(1 << mask_bit));
 # endif
 #else
 	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
-			     ~(1 << SIC_SYSIRQ(irq)));
+			~(1 << SIC_SYSIRQ(irq)));
+#endif /* end of SIC_IMASK0 */
 #endif
-
 	hard_local_irq_restore(flags);
 }
 
@@ -160,7 +167,7 @@
 }
 
 #ifdef CONFIG_SMP
-static void bfin_internal_unmask_irq_affinity(unsigned int irq,
+void bfin_internal_unmask_irq_affinity(unsigned int irq,
 		const struct cpumask *affinity)
 #else
 void bfin_internal_unmask_irq(unsigned int irq)
@@ -168,6 +175,7 @@
 {
 	unsigned long flags = hard_local_irq_save();
 
+#ifndef CONFIG_BF60x
 #ifdef SIC_IMASK0
 	unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
 	unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
@@ -175,22 +183,239 @@
 	if (cpumask_test_cpu(0, affinity))
 # endif
 		bfin_write_SIC_IMASK(mask_bank,
-			bfin_read_SIC_IMASK(mask_bank) |
-			(1 << mask_bit));
+				bfin_read_SIC_IMASK(mask_bank) |
+				(1 << mask_bit));
 # ifdef CONFIG_SMP
 	if (cpumask_test_cpu(1, affinity))
 		bfin_write_SICB_IMASK(mask_bank,
-			bfin_read_SICB_IMASK(mask_bank) |
-			(1 << mask_bit));
+				bfin_read_SICB_IMASK(mask_bank) |
+				(1 << mask_bit));
 # endif
 #else
 	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
-			     (1 << SIC_SYSIRQ(irq)));
+			(1 << SIC_SYSIRQ(irq)));
 #endif
+#endif
+	hard_local_irq_restore(flags);
+}
+
+#ifdef CONFIG_BF60x
+static void bfin_sec_preflow_handler(struct irq_data *d)
+{
+	unsigned long flags = hard_local_irq_save();
+	unsigned int sid = SIC_SYSIRQ(d->irq);
+
+	bfin_write_SEC_SCI(0, SEC_CSID, sid);
 
 	hard_local_irq_restore(flags);
 }
 
+static void bfin_sec_mask_ack_irq(struct irq_data *d)
+{
+	unsigned long flags = hard_local_irq_save();
+	unsigned int sid = SIC_SYSIRQ(d->irq);
+
+	bfin_write_SEC_SCI(0, SEC_CSID, sid);
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_unmask_irq(struct irq_data *d)
+{
+	unsigned long flags = hard_local_irq_save();
+	unsigned int sid = SIC_SYSIRQ(d->irq);
+
+	bfin_write32(SEC_END, sid);
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_enable_ssi(unsigned int sid)
+{
+	unsigned long flags = hard_local_irq_save();
+	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
+
+	reg_sctl |= SEC_SCTL_SRC_EN;
+	bfin_write_SEC_SCTL(sid, reg_sctl);
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_disable_ssi(unsigned int sid)
+{
+	unsigned long flags = hard_local_irq_save();
+	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
+
+	reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
+	bfin_write_SEC_SCTL(sid, reg_sctl);
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
+{
+	unsigned long flags = hard_local_irq_save();
+	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
+
+	reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
+	bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_enable_sci(unsigned int sid)
+{
+	unsigned long flags = hard_local_irq_save();
+	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
+
+	if (sid == SIC_SYSIRQ(IRQ_WATCH0))
+		reg_sctl |= SEC_SCTL_FAULT_EN;
+	else
+		reg_sctl |= SEC_SCTL_INT_EN;
+	bfin_write_SEC_SCTL(sid, reg_sctl);
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_disable_sci(unsigned int sid)
+{
+	unsigned long flags = hard_local_irq_save();
+	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
+
+	reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
+	bfin_write_SEC_SCTL(sid, reg_sctl);
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_enable(struct irq_data *d)
+{
+	unsigned long flags = hard_local_irq_save();
+	unsigned int sid = SIC_SYSIRQ(d->irq);
+
+	bfin_sec_enable_sci(sid);
+	bfin_sec_enable_ssi(sid);
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_disable(struct irq_data *d)
+{
+	unsigned long flags = hard_local_irq_save();
+	unsigned int sid = SIC_SYSIRQ(d->irq);
+
+	bfin_sec_disable_sci(sid);
+	bfin_sec_disable_ssi(sid);
+
+	hard_local_irq_restore(flags);
+}
+
+static void bfin_sec_raise_irq(unsigned int sid)
+{
+	unsigned long flags = hard_local_irq_save();
+
+	bfin_write32(SEC_RAISE, sid);
+
+	hard_local_irq_restore(flags);
+}
+
+static void init_software_driven_irq(void)
+{
+	bfin_sec_set_ssi_coreid(34, 0);
+	bfin_sec_set_ssi_coreid(35, 1);
+	bfin_sec_set_ssi_coreid(36, 0);
+	bfin_sec_set_ssi_coreid(37, 1);
+}
+
+void bfin_sec_resume(void)
+{
+	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
+	udelay(100);
+	bfin_write_SEC_GCTL(SEC_GCTL_EN);
+	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
+}
+
+void handle_sec_sfi_fault(uint32_t gstat)
+{
+
+}
+
+void handle_sec_sci_fault(uint32_t gstat)
+{
+	uint32_t core_id;
+	uint32_t cstat;
+
+	core_id = gstat & SEC_GSTAT_SCI;
+	cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
+	if (cstat & SEC_CSTAT_ERR) {
+		switch (cstat & SEC_CSTAT_ERRC) {
+		case SEC_CSTAT_ACKERR:
+			printk(KERN_DEBUG "sec ack err\n");
+			break;
+		default:
+			printk(KERN_DEBUG "sec sci unknow err\n");
+		}
+	}
+
+}
+
+void handle_sec_ssi_fault(uint32_t gstat)
+{
+	uint32_t sid;
+	uint32_t sstat;
+
+	sid = gstat & SEC_GSTAT_SID;
+	sstat = bfin_read_SEC_SSTAT(sid);
+
+}
+
+void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
+{
+	uint32_t sec_gstat;
+
+	raw_spin_lock(&desc->lock);
+
+	sec_gstat = bfin_read32(SEC_GSTAT);
+	if (sec_gstat & SEC_GSTAT_ERR) {
+
+		switch (sec_gstat & SEC_GSTAT_ERRC) {
+		case 0:
+			handle_sec_sfi_fault(sec_gstat);
+			break;
+		case SEC_GSTAT_SCIERR:
+			handle_sec_sci_fault(sec_gstat);
+			break;
+		case SEC_GSTAT_SSIERR:
+			handle_sec_ssi_fault(sec_gstat);
+			break;
+		}
+
+
+	}
+
+	raw_spin_unlock(&desc->lock);
+}
+
+static int sec_suspend(void)
+{
+	return 0;
+}
+
+static void sec_resume(void)
+{
+	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
+	udelay(100);
+	bfin_write_SEC_GCTL(SEC_GCTL_EN);
+	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
+}
+
+static struct syscore_ops sec_pm_syscore_ops = {
+	.suspend = sec_suspend,
+	.resume = sec_resume,
+};
+
+#endif
+
 #ifdef CONFIG_SMP
 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 {
@@ -212,7 +437,7 @@
 }
 #endif
 
-#ifdef CONFIG_PM
+#if defined(CONFIG_PM) && !defined(CONFIG_BF60x)
 int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 {
 	u32 bank, bit, wakeup = 0;
@@ -271,22 +496,20 @@
 	return bfin_internal_set_wake(d->irq, state);
 }
 #else
+# define bfin_internal_set_wake(irq, state)
 # define bfin_internal_set_wake_chip NULL
 #endif
 
 static struct irq_chip bfin_core_irqchip = {
 	.name = "CORE",
-	.irq_ack = bfin_ack_noop,
 	.irq_mask = bfin_core_mask_irq,
 	.irq_unmask = bfin_core_unmask_irq,
 };
 
 static struct irq_chip bfin_internal_irqchip = {
 	.name = "INTN",
-	.irq_ack = bfin_ack_noop,
 	.irq_mask = bfin_internal_mask_irq_chip,
 	.irq_unmask = bfin_internal_unmask_irq_chip,
-	.irq_mask_ack = bfin_internal_mask_irq_chip,
 	.irq_disable = bfin_internal_mask_irq_chip,
 	.irq_enable = bfin_internal_unmask_irq_chip,
 #ifdef CONFIG_SMP
@@ -295,6 +518,18 @@
 	.irq_set_wake = bfin_internal_set_wake_chip,
 };
 
+#ifdef CONFIG_BF60x
+static struct irq_chip bfin_sec_irqchip = {
+	.name = "SEC",
+	.irq_mask_ack = bfin_sec_mask_ack_irq,
+	.irq_mask = bfin_sec_mask_ack_irq,
+	.irq_unmask = bfin_sec_unmask_irq,
+	.irq_eoi = bfin_sec_unmask_irq,
+	.irq_disable = bfin_sec_disable,
+	.irq_enable = bfin_sec_enable,
+};
+#endif
+
 void bfin_handle_irq(unsigned irq)
 {
 #ifdef CONFIG_IPIPE
@@ -396,8 +631,6 @@
 
 static struct irq_chip bfin_mac_status_irqchip = {
 	.name = "MACST",
-	.irq_ack = bfin_ack_noop,
-	.irq_mask_ack = bfin_mac_status_mask_irq,
 	.irq_mask = bfin_mac_status_mask_irq,
 	.irq_unmask = bfin_mac_status_unmask_irq,
 	.irq_set_wake = bfin_mac_status_set_wake,
@@ -421,15 +654,15 @@
 		} else {
 			bfin_mac_status_ack_irq(irq);
 			pr_debug("IRQ %d:"
-				 " MASKED MAC ERROR INTERRUPT ASSERTED\n",
-				 irq);
+					" MASKED MAC ERROR INTERRUPT ASSERTED\n",
+					irq);
 		}
 	} else
 		printk(KERN_ERR
-		       "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
-		       " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
-		       "(EMAC_SYSTAT=0x%X)\n",
-		       __func__, __FILE__, __LINE__, status);
+				"%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
+				" INTERRUPT ASSERTED BUT NO SOURCE FOUND"
+				"(EMAC_SYSTAT=0x%X)\n",
+				__func__, __FILE__, __LINE__, status);
 }
 #endif
 
@@ -583,7 +816,7 @@
 }
 
 void bfin_demux_gpio_irq(unsigned int inta_irq,
-			 struct irq_desc *desc)
+			struct irq_desc *desc)
 {
 	unsigned int irq;
 
@@ -635,9 +868,15 @@
 
 #else
 
+# ifndef CONFIG_BF60x
 #define NR_PINT_SYS_IRQS	4
-#define NR_PINT_BITS		32
 #define NR_PINTS		160
+# else
+#define NR_PINT_SYS_IRQS	6
+#define NR_PINTS		112
+#endif
+
+#define NR_PINT_BITS		32
 #define IRQ_NOT_AVAIL		0xFF
 
 #define PINT_2_BANK(x)		((x) >> 5)
@@ -652,8 +891,13 @@
 	(struct bfin_pint_regs *)PINT1_MASK_SET,
 	(struct bfin_pint_regs *)PINT2_MASK_SET,
 	(struct bfin_pint_regs *)PINT3_MASK_SET,
+#ifdef CONFIG_BF60x
+	(struct bfin_pint_regs *)PINT4_MASK_SET,
+	(struct bfin_pint_regs *)PINT5_MASK_SET,
+#endif
 };
 
+#ifndef CONFIG_BF60x
 inline unsigned int get_irq_base(u32 bank, u8 bmap)
 {
 	unsigned int irq_base;
@@ -666,6 +910,16 @@
 
 	return irq_base;
 }
+#else
+inline unsigned int get_irq_base(u32 bank, u8 bmap)
+{
+	unsigned int irq_base;
+
+	irq_base = IRQ_PA0 + bank * 16 + bmap * 16;
+
+	return irq_base;
+}
+#endif
 
 	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
 void init_pint_lut(void)
@@ -854,6 +1108,14 @@
 	case 1:
 		pint_irq = IRQ_PINT1;
 		break;
+#ifdef CONFIG_BF60x
+	case 4:
+		pint_irq = IRQ_PINT4;
+		break;
+	case 5:
+		pint_irq = IRQ_PINT5;
+		break;
+#endif
 	default:
 		return -EINVAL;
 	}
@@ -867,10 +1129,21 @@
 #endif
 
 void bfin_demux_gpio_irq(unsigned int inta_irq,
-			 struct irq_desc *desc)
+			struct irq_desc *desc)
 {
 	u32 bank, pint_val;
 	u32 request, irq;
+	u32 level_mask;
+	int umask = 0;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	if (chip->irq_mask_ack) {
+		chip->irq_mask_ack(&desc->irq_data);
+	} else {
+		chip->irq_mask(&desc->irq_data);
+		if (chip->irq_ack)
+			chip->irq_ack(&desc->irq_data);
+	}
 
 	switch (inta_irq) {
 	case IRQ_PINT0:
@@ -885,6 +1158,14 @@
 	case IRQ_PINT1:
 		bank = 1;
 		break;
+#ifdef CONFIG_BF60x
+	case IRQ_PINT4:
+		bank = 4;
+		break;
+	case IRQ_PINT5:
+		bank = 5;
+		break;
+#endif
 	default:
 		return;
 	}
@@ -893,15 +1174,23 @@
 
 	request = pint[bank]->request;
 
+	level_mask = pint[bank]->edge_set & request;
+
 	while (request) {
 		if (request & 1) {
 			irq = pint2irq_lut[pint_val] + SYS_IRQS;
+			if (level_mask & PINT_BIT(pint_val)) {
+				umask = 1;
+				chip->irq_unmask(&desc->irq_data);
+			}
 			bfin_handle_irq(irq);
 		}
 		pint_val++;
 		request >>= 1;
 	}
 
+	if (!umask)
+		chip->irq_unmask(&desc->irq_data);
 }
 #endif
 
@@ -951,6 +1240,7 @@
 	int irq;
 	unsigned long ilat = 0;
 
+#ifndef CONFIG_BF60x
 	/*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
 #ifdef SIC_IMASK0
 	bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
@@ -958,13 +1248,16 @@
 # ifdef SIC_IMASK2
 	bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
 # endif
-# ifdef CONFIG_SMP
+# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 	bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
 	bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
 # endif
 #else
 	bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
 #endif
+#else /* CONFIG_BF60x */
+	bfin_write_SEC_GCTL(SEC_GCTL_RESET);
+#endif
 
 	local_irq_disable();
 
@@ -974,6 +1267,10 @@
 	pint[1]->assign = CONFIG_PINT1_ASSIGN;
 	pint[2]->assign = CONFIG_PINT2_ASSIGN;
 	pint[3]->assign = CONFIG_PINT3_ASSIGN;
+# ifdef CONFIG_BF60x
+	pint[4]->assign = CONFIG_PINT4_ASSIGN;
+	pint[5]->assign = CONFIG_PINT5_ASSIGN;
+# endif
 # endif
 	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
 	init_pint_lut();
@@ -986,6 +1283,7 @@
 			irq_set_chip(irq, &bfin_internal_irqchip);
 
 		switch (irq) {
+#ifndef CONFIG_BF60x
 #if BFIN_GPIO_PINT
 		case IRQ_PINT0:
 		case IRQ_PINT1:
@@ -1015,12 +1313,13 @@
 						bfin_demux_mac_status_irq);
 			break;
 #endif
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 		case IRQ_SUPPLE_0:
 		case IRQ_SUPPLE_1:
 			irq_set_handler(irq, handle_percpu_irq);
 			break;
 #endif
+#endif
 
 #ifdef CONFIG_TICKSOURCE_CORETMR
 		case IRQ_CORETMR:
@@ -1050,7 +1349,8 @@
 
 	init_mach_irq();
 
-#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#ifndef CONFIG_BF60x
+#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) && !defined(CONFIG_BF60x)
 	for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
 		irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
 					 handle_level_irq);
@@ -1060,7 +1360,28 @@
 		irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
 		irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
 					 handle_level_irq);
-
+#else
+	for (irq = BFIN_IRQ(0); irq <= SYS_IRQS; irq++) {
+		if (irq < CORE_IRQS) {
+			irq_set_chip(irq, &bfin_sec_irqchip);
+			__irq_set_handler(irq, handle_sec_fault, 0, NULL);
+		} else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
+			irq_set_chip(irq, &bfin_sec_irqchip);
+			irq_set_chained_handler(irq, bfin_demux_gpio_irq);
+		} else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
+			irq_set_chip(irq, &bfin_sec_irqchip);
+			irq_set_handler(irq, handle_percpu_irq);
+		} else {
+			irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
+					handle_fasteoi_irq);
+			__irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
+		}
+	}
+	for (irq = GPIO_IRQ_BASE;
+		irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
+		irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
+					handle_level_irq);
+#endif
 	bfin_write_IMASK(0);
 	CSYNC();
 	ilat = bfin_read_ILAT();
@@ -1072,14 +1393,17 @@
 	/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
 	 * local_irq_enable()
 	 */
+#ifndef CONFIG_BF60x
 	program_IAR();
 	/* Therefore it's better to setup IARs before interrupts enabled */
 	search_IAR();
 
 	/* Enable interrupts IVG7-15 */
 	bfin_irq_flags |= IMASK_IVG15 |
-	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
-	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
+		IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
+		IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
+
+	bfin_sti(bfin_irq_flags);
 
 	/* This implicitly covers ANOMALY_05000171
 	 * Boot-ROM code modifies SICA_IWRx wakeup registers
@@ -1103,7 +1427,23 @@
 #else
 	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
 #endif
+#else  /* CONFIG_BF60x */
+	/* Enable interrupts IVG7-15 */
+	bfin_irq_flags |= IMASK_IVG15 |
+	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
+	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
 
+
+	bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
+	bfin_sec_enable_sci(SIC_SYSIRQ(IRQ_WATCH0));
+	bfin_sec_enable_ssi(SIC_SYSIRQ(IRQ_WATCH0));
+	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
+	udelay(100);
+	bfin_write_SEC_GCTL(SEC_GCTL_EN);
+	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
+	init_software_driven_irq();
+	register_syscore_ops(&sec_pm_syscore_ops);
+#endif
 	return 0;
 }
 
@@ -1112,13 +1452,14 @@
 #endif
 static int vec_to_irq(int vec)
 {
+#ifndef CONFIG_BF60x
 	struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
 	struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
 	unsigned long sic_status[3];
-
+#endif
 	if (likely(vec == EVT_IVTMR_P))
 		return IRQ_CORETMR;
-
+#ifndef CONFIG_BF60x
 #ifdef SIC_ISR
 	sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
 #else
@@ -1147,6 +1488,10 @@
 #endif
 			return ivg->irqno;
 	}
+#else
+	/* for bf60x read */
+	return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
+#endif  /* end of CONFIG_BF60x */
 }
 
 #ifdef CONFIG_DO_IRQ_L1
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 3c648a0..ca6655e 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -19,20 +19,33 @@
 #include <asm/gpio.h>
 #include <asm/dma.h>
 #include <asm/dpmc.h>
+#include <asm/pm.h>
 
+#ifdef CONFIG_BF60x
+struct bfin_cpu_pm_fns *bfin_cpu_pm;
+#endif
 
 void bfin_pm_suspend_standby_enter(void)
 {
+#ifndef CONFIG_BF60x
 	bfin_pm_standby_setup();
-
-#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
-	sleep_deeper(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
-#else
-	sleep_mode(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
 #endif
 
-	bfin_pm_standby_restore();
+#ifdef CONFIG_BF60x
+	bfin_cpu_pm->enter(PM_SUSPEND_STANDBY);
+#else
+# ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
+	sleep_deeper(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
+# else
+	sleep_mode(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
+# endif
+#endif
 
+#ifndef CONFIG_BF60x
+	bfin_pm_standby_restore();
+#endif
+
+#ifndef CONFIG_BF60x
 #ifdef SIC_IWR0
 	bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
 # ifdef SIC_IWR1
@@ -52,6 +65,8 @@
 #else
 	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
 #endif
+
+#endif
 }
 
 int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -83,10 +98,13 @@
 }
 
 #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
+# ifdef CONFIG_BF60x
+__attribute__((l1_text))
+# endif
 static void flushinv_all_dcache(void)
 {
-	u32 way, bank, subbank, set;
-	u32 status, addr;
+	register u32 way, bank, subbank, set;
+	register u32 status, addr;
 	u32 dmem_ctl = bfin_read_DMEM_CONTROL();
 
 	for (bank = 0; bank < 2; ++bank) {
@@ -133,6 +151,7 @@
 		return -ENOMEM;
 	}
 
+#ifndef CONFIG_BF60x
 	wakeup = bfin_read_VR_CTL() & ~FREQ;
 	wakeup |= SCKELOW;
 
@@ -142,6 +161,7 @@
 #ifdef CONFIG_PM_BFIN_WAKE_GP
 	wakeup |= GPWE;
 #endif
+#endif
 
 	ret = blackfin_dma_suspend();
 
@@ -159,7 +179,11 @@
 	_disable_icplb();
 	bf53x_suspend_l1_mem(memptr);
 
+#ifndef CONFIG_BF60x
 	do_hibernate(wakeup | vr_wakeup);	/* See you later! */
+#else
+	bfin_cpu_pm->enter(PM_SUSPEND_MEM);
+#endif
 
 	bf53x_resume_l1_mem(memptr);
 
@@ -223,9 +247,39 @@
 	return 0;
 }
 
+#ifdef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
+void bfin_pm_end(void)
+{
+	u32 cycle, cycle2;
+	u64 usec64;
+	u32 usec;
+
+	__asm__ __volatile__ (
+		"1: %0 = CYCLES2\n"
+		"%1 = CYCLES\n"
+		"%2 = CYCLES2\n"
+		"CC = %2 == %0\n"
+		"if ! CC jump 1b\n"
+		: "=d,a" (cycle2), "=d,a" (cycle), "=d,a" (usec) : : "CC"
+	);
+
+	usec64 = ((u64)cycle2 << 32) + cycle;
+	do_div(usec64, get_cclk() / USEC_PER_SEC);
+	usec = usec64;
+	if (usec == 0)
+		usec = 1;
+
+	pr_info("PM: resume of kernel completes after  %ld msec %03ld usec\n",
+		usec / USEC_PER_MSEC, usec % USEC_PER_MSEC);
+}
+#endif
+
 static const struct platform_suspend_ops bfin_pm_ops = {
 	.enter = bfin_pm_enter,
 	.valid	= bfin_pm_valid,
+#ifdef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
+	.end = bfin_pm_end,
+#endif
 };
 
 static int __init bfin_pm_init(void)
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 78daae0..9cb8553 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -48,7 +48,7 @@
 
 	unsigned long zones_size[MAX_NR_ZONES] = {
 		[0] = 0,
-		[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT,
+		[ZONE_DMA] = (end_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> PAGE_SHIFT,
 		[ZONE_NORMAL] = 0,
 #ifdef CONFIG_HIGHMEM
 		[ZONE_HIGHMEM] = 0,
@@ -60,7 +60,8 @@
 
 	pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n",
 	        PAGE_ALIGN(memory_start), end_mem);
-	free_area_init(zones_size);
+	free_area_init_node(0, zones_size,
+		CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT, NULL);
 }
 
 asmlinkage void __init init_pda(void)
@@ -75,9 +76,6 @@
 	   valid pointers to it. */
 	memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu]));
 
-	cpu_pda[0].next = &cpu_pda[1];
-	cpu_pda[1].next = &cpu_pda[0];
-
 #ifdef CONFIG_EXCEPTION_L1_SCRATCH
 	cpu_pda[cpu].ex_stack = (unsigned long *)(L1_SCRATCH_START + \
 					L1_SCRATCH_LENGTH);
@@ -109,10 +107,10 @@
 	totalram_pages = free_all_bootmem();
 
 	reservedpages = 0;
-	for (tmp = 0; tmp < max_mapnr; tmp++)
+	for (tmp = ARCH_PFN_OFFSET; tmp < max_mapnr; tmp++)
 		if (PageReserved(pfn_to_page(tmp)))
 			reservedpages++;
-	freepages =  max_mapnr - reservedpages;
+	freepages =  max_mapnr - ARCH_PFN_OFFSET - reservedpages;
 
 	/* do not count in kernel image between _rambase and _ramstart */
 	reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT;
@@ -127,7 +125,7 @@
 	printk(KERN_INFO
 	     "Memory available: %luk/%luk RAM, "
 		"(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n",
-		(unsigned long) freepages << (PAGE_SHIFT-10), _ramend >> 10,
+		(unsigned long) freepages << (PAGE_SHIFT-10), (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 10,
 		initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10)));
 }
 
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 29d98fa..342e378 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -186,9 +186,45 @@
 #endif
 }
 
+#ifdef __ADSPBF60x__
+static irqreturn_t l2_ecc_err(int irq, void *dev_id)
+{
+	int status;
+
+	printk(KERN_ERR "L2 ecc error happend\n");
+	status = bfin_read32(L2CTL0_STAT);
+	if (status & 0x1)
+		printk(KERN_ERR "Core channel error type:0x%x, addr:0x%x\n",
+			bfin_read32(L2CTL0_ET0), bfin_read32(L2CTL0_EADDR0));
+	if (status & 0x2)
+		printk(KERN_ERR "System channel error type:0x%x, addr:0x%x\n",
+			bfin_read32(L2CTL0_ET1), bfin_read32(L2CTL0_EADDR1));
+
+	status = status >> 8;
+	if (status)
+		printk(KERN_ERR "L2 Bank%d error, addr:0x%x\n",
+			status, bfin_read32(L2CTL0_ERRADDR0 + status));
+
+	panic("L2 Ecc error");
+	return IRQ_HANDLED;
+}
+#endif
+
 static void __init l2_sram_init(void)
 {
 #if L2_LENGTH != 0
+
+#ifdef __ADSPBF60x__
+	int ret;
+
+	ret = request_irq(IRQ_L2CTL0_ECC_ERR, l2_ecc_err, 0, "l2-ecc-err",
+			NULL);
+	if (unlikely(ret < 0)) {
+		printk(KERN_INFO "Fail to request l2 ecc error interrupt");
+		return;
+	}
+#endif
+
 	free_l2_sram_head.next =
 		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
 	if (!free_l2_sram_head.next) {
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 1c3ccd4..1f15b88 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -3,7 +3,7 @@
 # see Documentation/kbuild/kconfig-language.txt.
 #
 
-config TMS320C6X
+config C6X
 	def_bool y
 	select CLKDEV_LOOKUP
 	select GENERIC_IRQ_SHOW
@@ -19,24 +19,12 @@
 config MMU
 	def_bool n
 
-config ZONE_DMA
-	def_bool y
-
 config FPU
 	def_bool n
 
-config HIGHMEM
-	def_bool n
-
-config NUMA
-	def_bool n
-
 config RWSEM_GENERIC_SPINLOCK
 	def_bool y
 
-config RWSEM_XCHGADD_ALGORITHM
-	def_bool n
-
 config GENERIC_CALIBRATE_DELAY
 	def_bool y
 
diff --git a/arch/c6x/include/asm/elf.h b/arch/c6x/include/asm/elf.h
index d57865b..f4552db 100644
--- a/arch/c6x/include/asm/elf.h
+++ b/arch/c6x/include/asm/elf.h
@@ -30,7 +30,19 @@
  */
 #define elf_check_arch(x) ((x)->e_machine == EM_TI_C6000)
 
-#define elf_check_const_displacement(x) (1)
+#define elf_check_fdpic(x) (1)
+#define elf_check_const_displacement(x) (0)
+
+#define ELF_FDPIC_PLAT_INIT(_regs, _exec_map, _interp_map, _dynamic_addr) \
+do {								\
+	_regs->b4	= (_exec_map);				\
+	_regs->a6	= (_interp_map);			\
+	_regs->b6	= (_dynamic_addr);			\
+} while (0)
+
+#define ELF_FDPIC_CORE_EFLAGS	0
+
+#define ELF_CORE_COPY_FPREGS(...) 0 /* No FPU regs to copy */
 
 /*
  * These are used to set parameters in the core dumps.
diff --git a/arch/c6x/include/asm/mmu.h b/arch/c6x/include/asm/mmu.h
index 41592bf..4467e77 100644
--- a/arch/c6x/include/asm/mmu.h
+++ b/arch/c6x/include/asm/mmu.h
@@ -13,6 +13,10 @@
 
 typedef struct {
 	unsigned long		end_brk;
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+	unsigned long	exec_fdpic_loadmap;
+	unsigned long	interp_fdpic_loadmap;
+#endif
 } mm_context_t;
 
 #endif /* _ASM_C6X_MMU_H */
diff --git a/arch/c6x/include/asm/ptrace.h b/arch/c6x/include/asm/ptrace.h
index 21e8d79..b04ff59 100644
--- a/arch/c6x/include/asm/ptrace.h
+++ b/arch/c6x/include/asm/ptrace.h
@@ -97,6 +97,11 @@
 #define PT_DP	   PT_B14  /* Data Segment Pointer (B14) */
 #define PT_SP	   PT_B15  /* Stack Pointer (B15)  */
 
+#define PTRACE_GETFDPIC		31	/* get the ELF fdpic loadmap address */
+
+#define PTRACE_GETFDPIC_EXEC	0	/* [addr] request the executable loadmap */
+#define PTRACE_GETFDPIC_INTERP	1	/* [addr] request the interpreter loadmap */
+
 #ifndef __ASSEMBLY__
 
 #ifdef _BIG_ENDIAN
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
index 81c2e27..9b1a92b 100644
--- a/arch/frv/include/asm/processor.h
+++ b/arch/frv/include/asm/processor.h
@@ -135,10 +135,6 @@
 #define	KSTK_EIP(tsk)	((tsk)->thread.frame0->pc)
 #define	KSTK_ESP(tsk)	((tsk)->thread.frame0->sp)
 
-/* Allocation and freeing of basic task resources. */
-extern struct task_struct *alloc_task_struct_node(int node);
-extern void free_task_struct(struct task_struct *p);
-
 #define cpu_relax()    barrier()
 
 /* data cache prefetch */
diff --git a/arch/ia64/include/asm/irq_remapping.h b/arch/ia64/include/asm/irq_remapping.h
new file mode 100644
index 0000000..a8687b1
--- /dev/null
+++ b/arch/ia64/include/asm/irq_remapping.h
@@ -0,0 +1,4 @@
+#ifndef __IA64_INTR_REMAPPING_H
+#define __IA64_INTR_REMAPPING_H
+#define irq_remapping_enabled 0
+#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f5104b7..463fb3b 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1174,7 +1174,7 @@
 
 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
 {
-	return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
+	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
 }
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index cf318f2..b7f2e2d 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -16,6 +16,13 @@
 
 KBUILD_DEFCONFIG := multi_defconfig
 
+ifneq ($(SUBARCH),$(ARCH))
+	ifeq ($(CROSS_COMPILE),)
+		CROSS_COMPILE := $(call cc-cross-prefix, \
+			m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
+	endif
+endif
+
 #
 #	Enable processor type. Ordering of these is important - we want to
 #	use the minimum processor type of the range we support. The logic
@@ -62,12 +69,6 @@
 
 LDFLAGS := -m m68kelf
 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
-ifneq ($(SUBARCH),$(ARCH))
-	ifeq ($(CROSS_COMPILE),)
-		CROSS_COMPILE := $(call cc-cross-prefix, \
-			m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
-	endif
-endif
 
 ifdef CONFIG_SUN3
 LDFLAGS_vmlinux = -N
@@ -115,18 +116,6 @@
 core-$(CONFIG_M68EZ328)		+= arch/m68k/platform/68EZ328/
 core-$(CONFIG_M68VZ328)		+= arch/m68k/platform/68VZ328/
 core-$(CONFIG_COLDFIRE)		+= arch/m68k/platform/coldfire/
-core-$(CONFIG_M5206)		+= arch/m68k/platform/5206/
-core-$(CONFIG_M5206e)		+= arch/m68k/platform/5206/
-core-$(CONFIG_M520x)		+= arch/m68k/platform/520x/
-core-$(CONFIG_M523x)		+= arch/m68k/platform/523x/
-core-$(CONFIG_M5249)		+= arch/m68k/platform/5249/
-core-$(CONFIG_M527x)		+= arch/m68k/platform/527x/
-core-$(CONFIG_M5272)		+= arch/m68k/platform/5272/
-core-$(CONFIG_M528x)		+= arch/m68k/platform/528x/
-core-$(CONFIG_M5307)		+= arch/m68k/platform/5307/
-core-$(CONFIG_M532x)		+= arch/m68k/platform/532x/
-core-$(CONFIG_M5407)		+= arch/m68k/platform/5407/
-core-$(CONFIG_M54xx)		+= arch/m68k/platform/54xx/
 
 
 all:	zImage
diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c
index 7fd8b41..80076d3 100644
--- a/arch/m68k/amiga/platform.c
+++ b/arch/m68k/amiga/platform.c
@@ -6,6 +6,7 @@
  * for more details.
  */
 
+#include <linux/err.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/zorro.h>
@@ -46,18 +47,25 @@
 
 static int __init amiga_init_bus(void)
 {
+	struct platform_device *pdev;
+	unsigned int n;
+
 	if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
 		return -ENODEV;
 
-	platform_device_register_simple("amiga-zorro", -1, zorro_resources,
-					AMIGAHW_PRESENT(ZORRO3) ? 4 : 2);
+	n = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2;
+	pdev = platform_device_register_simple("amiga-zorro", -1,
+					       zorro_resources, n);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
 	return 0;
 }
 
 subsys_initcall(amiga_init_bus);
 
 
-static int z_dev_present(zorro_id id)
+static int __init z_dev_present(zorro_id id)
 {
 	unsigned int i;
 
@@ -126,72 +134,122 @@
 static int __init amiga_init_devices(void)
 {
 	struct platform_device *pdev;
+	int error;
 
 	if (!MACH_IS_AMIGA)
 		return -ENODEV;
 
 	/* video hardware */
-	if (AMIGAHW_PRESENT(AMI_VIDEO))
-		platform_device_register_simple("amiga-video", -1, NULL, 0);
+	if (AMIGAHW_PRESENT(AMI_VIDEO)) {
+		pdev = platform_device_register_simple("amiga-video", -1, NULL,
+						       0);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
 
 	/* sound hardware */
-	if (AMIGAHW_PRESENT(AMI_AUDIO))
-		platform_device_register_simple("amiga-audio", -1, NULL, 0);
+	if (AMIGAHW_PRESENT(AMI_AUDIO)) {
+		pdev = platform_device_register_simple("amiga-audio", -1, NULL,
+						       0);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
 
 	/* storage interfaces */
-	if (AMIGAHW_PRESENT(AMI_FLOPPY))
-		platform_device_register_simple("amiga-floppy", -1, NULL, 0);
+	if (AMIGAHW_PRESENT(AMI_FLOPPY)) {
+		pdev = platform_device_register_simple("amiga-floppy", -1,
+						       NULL, 0);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
-	if (AMIGAHW_PRESENT(A3000_SCSI))
-		platform_device_register_simple("amiga-a3000-scsi", -1,
-						&a3000_scsi_resource, 1);
+	if (AMIGAHW_PRESENT(A3000_SCSI)) {
+		pdev = platform_device_register_simple("amiga-a3000-scsi", -1,
+						       &a3000_scsi_resource, 1);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
-	if (AMIGAHW_PRESENT(A4000_SCSI))
-		platform_device_register_simple("amiga-a4000t-scsi", -1,
-						&a4000t_scsi_resource, 1);
+	if (AMIGAHW_PRESENT(A4000_SCSI)) {
+		pdev = platform_device_register_simple("amiga-a4000t-scsi", -1,
+						       &a4000t_scsi_resource,
+						       1);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
 	if (AMIGAHW_PRESENT(A1200_IDE) ||
 	    z_dev_present(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE)) {
 		pdev = platform_device_register_simple("amiga-gayle-ide", -1,
 						       &a1200_ide_resource, 1);
-		platform_device_add_data(pdev, &a1200_ide_pdata,
-					 sizeof(a1200_ide_pdata));
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+		error = platform_device_add_data(pdev, &a1200_ide_pdata,
+						 sizeof(a1200_ide_pdata));
+		if (error)
+			return error;
 	}
 
 	if (AMIGAHW_PRESENT(A4000_IDE)) {
 		pdev = platform_device_register_simple("amiga-gayle-ide", -1,
 						       &a4000_ide_resource, 1);
-		platform_device_add_data(pdev, &a4000_ide_pdata,
-					 sizeof(a4000_ide_pdata));
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+		error = platform_device_add_data(pdev, &a4000_ide_pdata,
+						 sizeof(a4000_ide_pdata));
+		if (error)
+			return error;
 	}
 
 
 	/* other I/O hardware */
-	if (AMIGAHW_PRESENT(AMI_KEYBOARD))
-		platform_device_register_simple("amiga-keyboard", -1, NULL, 0);
+	if (AMIGAHW_PRESENT(AMI_KEYBOARD)) {
+		pdev = platform_device_register_simple("amiga-keyboard", -1,
+						       NULL, 0);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
-	if (AMIGAHW_PRESENT(AMI_MOUSE))
-		platform_device_register_simple("amiga-mouse", -1, NULL, 0);
+	if (AMIGAHW_PRESENT(AMI_MOUSE)) {
+		pdev = platform_device_register_simple("amiga-mouse", -1, NULL,
+						       0);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
-	if (AMIGAHW_PRESENT(AMI_SERIAL))
-		platform_device_register_simple("amiga-serial", -1, NULL, 0);
+	if (AMIGAHW_PRESENT(AMI_SERIAL)) {
+		pdev = platform_device_register_simple("amiga-serial", -1,
+						       NULL, 0);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
-	if (AMIGAHW_PRESENT(AMI_PARALLEL))
-		platform_device_register_simple("amiga-parallel", -1, NULL, 0);
+	if (AMIGAHW_PRESENT(AMI_PARALLEL)) {
+		pdev = platform_device_register_simple("amiga-parallel", -1,
+						       NULL, 0);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
 
 	/* real time clocks */
-	if (AMIGAHW_PRESENT(A2000_CLK))
-		platform_device_register_simple("rtc-msm6242", -1,
-						&amiga_rtc_resource, 1);
+	if (AMIGAHW_PRESENT(A2000_CLK)) {
+		pdev = platform_device_register_simple("rtc-msm6242", -1,
+						       &amiga_rtc_resource, 1);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
-	if (AMIGAHW_PRESENT(A3000_CLK))
-		platform_device_register_simple("rtc-rp5c01", -1,
-						&amiga_rtc_resource, 1);
+	if (AMIGAHW_PRESENT(A3000_CLK)) {
+		pdev = platform_device_register_simple("rtc-rp5c01", -1,
+						       &amiga_rtc_resource, 1);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+	}
 
 	return 0;
 }
 
-device_initcall(amiga_init_devices);
+arch_initcall(amiga_init_devices);
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 783d8f0..3f41092 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -206,7 +206,7 @@
  * hardware with a programmable int vector (probably a VME board).
  */
 
-unsigned long atari_register_vme_int(void)
+unsigned int atari_register_vme_int(void)
 {
 	int i;
 
@@ -223,7 +223,7 @@
 EXPORT_SYMBOL(atari_register_vme_int);
 
 
-void atari_unregister_vme_int(unsigned long irq)
+void atari_unregister_vme_int(unsigned int irq)
 {
 	if (irq >= VME_SOURCE_BASE && irq < VME_SOURCE_BASE + VME_MAX_SOURCES) {
 		irq -= VME_SOURCE_BASE;
diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig
new file mode 100644
index 0000000..c5018a6
--- /dev/null
+++ b/arch/m68k/configs/m5475evb_defconfig
@@ -0,0 +1,62 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_SWAP is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_HOTPLUG is not set
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_SHMEM is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_COLDFIRE=y
+CONFIG_M547x=y
+CONFIG_CLOCK_SET=y
+CONFIG_CLOCK_FREQ=266000000
+# CONFIG_4KSTACKS is not set
+CONFIG_RAMBASE=0x0
+CONFIG_RAMSIZE=0x2000000
+CONFIG_VECTORBASE=0x0
+CONFIG_MBAR=0xff000000
+CONFIG_KERNELBASE=0x20000
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UCLINUX=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_INPUT is not set
+# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
+CONFIG_SERIAL_MCF=y
+CONFIG_SERIAL_MCF_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_ROMFS_FS=y
+CONFIG_ROMFS_BACKED_BY_MTD=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_BOOTPARAM=y
+CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
diff --git a/arch/m68k/include/asm/atariints.h b/arch/m68k/include/asm/atariints.h
index 656bbbf..5fc13bd 100644
--- a/arch/m68k/include/asm/atariints.h
+++ b/arch/m68k/include/asm/atariints.h
@@ -198,7 +198,7 @@
 	return( get_mfp_bit( irq, MFP_PENDING ) );
 }
 
-unsigned long atari_register_vme_int( void );
-void atari_unregister_vme_int( unsigned long );
+unsigned int atari_register_vme_int(void);
+void atari_unregister_vme_int(unsigned int);
 
 #endif /* linux/atariints.h */
diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h
index cb88aa9..7cafb53 100644
--- a/arch/m68k/include/asm/cacheflush_no.h
+++ b/arch/m68k/include/asm/cacheflush_no.h
@@ -30,11 +30,8 @@
 
 void mcf_cache_push(void);
 
-static inline void __flush_cache_all(void)
+static inline void __clear_cache_all(void)
 {
-#ifdef CACHE_PUSH
-	mcf_cache_push();
-#endif
 #ifdef CACHE_INVALIDATE
 	__asm__ __volatile__ (
 		"movel	%0, %%d0\n\t"
@@ -44,6 +41,14 @@
 #endif
 }
 
+static inline void __flush_cache_all(void)
+{
+#ifdef CACHE_PUSH
+	mcf_cache_push();
+#endif
+	__clear_cache_all();
+}
+
 /*
  * Some ColdFire parts implement separate instruction and data caches,
  * on those we should just flush the appropriate cache. If we don't need
@@ -76,4 +81,23 @@
 	__asm__ __volatile__ ( "nop" );
 #endif
 }
+
+/*
+ * Push cache entries at supplied address. We want to write back any dirty
+ * data and the invalidate the cache lines associated with this address.
+ */
+static inline void cache_push(unsigned long paddr, int len)
+{
+	__flush_cache_all();
+}
+
+/*
+ * Clear cache entries at supplied address (that is don't write back any
+ * dirty data).
+ */
+static inline void cache_clear(unsigned long paddr, int len)
+{
+	__clear_cache_all();
+}
+
 #endif /* _M68KNOMMU_CACHEFLUSH_H */
diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
index 622138d..d7de0f1 100644
--- a/arch/m68k/include/asm/entry.h
+++ b/arch/m68k/include/asm/entry.h
@@ -33,13 +33,11 @@
 
 /* the following macro is used when enabling interrupts */
 #if defined(MACH_ATARI_ONLY)
-	/* block out HSYNC on the atari */
-#define ALLOWINT	(~0x400)
-#define	MAX_NOINT_IPL	3
+	/* block out HSYNC = ipl 2 on the atari */
+#define ALLOWINT	(~0x500)
 #else
 	/* portable version */
 #define ALLOWINT	(~0x700)
-#define	MAX_NOINT_IPL	0
 #endif /* machine compilation types */
 
 #ifdef __ASSEMBLY__
diff --git a/arch/m68k/include/asm/flat.h b/arch/m68k/include/asm/flat.h
index a0e2907..f9454b8 100644
--- a/arch/m68k/include/asm/flat.h
+++ b/arch/m68k/include/asm/flat.h
@@ -11,6 +11,11 @@
 #define	flat_get_addr_from_rp(rp, relval, flags, p)	get_unaligned(rp)
 #define	flat_put_addr_at_rp(rp, val, relval)	put_unaligned(val,rp)
 #define	flat_get_relocate_addr(rel)		(rel)
-#define	flat_set_persistent(relval, p)		0
+
+static inline int flat_set_persistent(unsigned long relval,
+				      unsigned long *persistent)
+{
+	return 0;
+}
 
 #endif /* __M68KNOMMU_FLAT_H__ */
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 0fb3468..fa4324b 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -278,6 +278,13 @@
 #define readl(addr)      in_le32(addr)
 #define writel(val,addr) out_le32((addr),(val))
 
+#define readsb(port, buf, nr)     raw_insb((port), (u8 *)(buf), (nr))
+#define readsw(port, buf, nr)     raw_insw((port), (u16 *)(buf), (nr))
+#define readsl(port, buf, nr)     raw_insl((port), (u32 *)(buf), (nr))
+#define writesb(port, buf, nr)    raw_outsb((port), (u8 *)(buf), (nr))
+#define writesw(port, buf, nr)    raw_outsw((port), (u16 *)(buf), (nr))
+#define writesl(port, buf, nr)    raw_outsl((port), (u32 *)(buf), (nr))
+
 #define mmiowb()
 
 static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index 569476f..d63b99f 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -97,100 +97,81 @@
 /*
  * 	GPIO registers
  */
-#define MCFGPIO_PORTA		(MCF_IPSBAR + 0x00100000)
-#define MCFGPIO_PORTB		(MCF_IPSBAR + 0x00100001)
-#define MCFGPIO_PORTC		(MCF_IPSBAR + 0x00100002)
-#define MCFGPIO_PORTD		(MCF_IPSBAR + 0x00100003)
-#define MCFGPIO_PORTE		(MCF_IPSBAR + 0x00100004)
-#define MCFGPIO_PORTF		(MCF_IPSBAR + 0x00100005)
-#define MCFGPIO_PORTG		(MCF_IPSBAR + 0x00100006)
-#define MCFGPIO_PORTH		(MCF_IPSBAR + 0x00100007)
-#define MCFGPIO_PORTJ		(MCF_IPSBAR + 0x00100008)
-#define MCFGPIO_PORTDD		(MCF_IPSBAR + 0x00100009)
-#define MCFGPIO_PORTEH		(MCF_IPSBAR + 0x0010000A)
-#define MCFGPIO_PORTEL		(MCF_IPSBAR + 0x0010000B)
-#define MCFGPIO_PORTAS		(MCF_IPSBAR + 0x0010000C)
-#define MCFGPIO_PORTQS		(MCF_IPSBAR + 0x0010000D)
-#define MCFGPIO_PORTSD		(MCF_IPSBAR + 0x0010000E)
-#define MCFGPIO_PORTTC		(MCF_IPSBAR + 0x0010000F)
-#define MCFGPIO_PORTTD		(MCF_IPSBAR + 0x00100010)
-#define MCFGPIO_PORTUA		(MCF_IPSBAR + 0x00100011)
+#define MCFGPIO_PODR_A		(MCF_IPSBAR + 0x00100000)
+#define MCFGPIO_PODR_B		(MCF_IPSBAR + 0x00100001)
+#define MCFGPIO_PODR_C		(MCF_IPSBAR + 0x00100002)
+#define MCFGPIO_PODR_D		(MCF_IPSBAR + 0x00100003)
+#define MCFGPIO_PODR_E		(MCF_IPSBAR + 0x00100004)
+#define MCFGPIO_PODR_F		(MCF_IPSBAR + 0x00100005)
+#define MCFGPIO_PODR_G		(MCF_IPSBAR + 0x00100006)
+#define MCFGPIO_PODR_H		(MCF_IPSBAR + 0x00100007)
+#define MCFGPIO_PODR_J		(MCF_IPSBAR + 0x00100008)
+#define MCFGPIO_PODR_DD		(MCF_IPSBAR + 0x00100009)
+#define MCFGPIO_PODR_EH		(MCF_IPSBAR + 0x0010000A)
+#define MCFGPIO_PODR_EL		(MCF_IPSBAR + 0x0010000B)
+#define MCFGPIO_PODR_AS		(MCF_IPSBAR + 0x0010000C)
+#define MCFGPIO_PODR_QS		(MCF_IPSBAR + 0x0010000D)
+#define MCFGPIO_PODR_SD		(MCF_IPSBAR + 0x0010000E)
+#define MCFGPIO_PODR_TC		(MCF_IPSBAR + 0x0010000F)
+#define MCFGPIO_PODR_TD		(MCF_IPSBAR + 0x00100010)
+#define MCFGPIO_PODR_UA		(MCF_IPSBAR + 0x00100011)
 
-#define MCFGPIO_DDRA		(MCF_IPSBAR + 0x00100014)
-#define MCFGPIO_DDRB		(MCF_IPSBAR + 0x00100015)
-#define MCFGPIO_DDRC		(MCF_IPSBAR + 0x00100016)
-#define MCFGPIO_DDRD		(MCF_IPSBAR + 0x00100017)
-#define MCFGPIO_DDRE		(MCF_IPSBAR + 0x00100018)
-#define MCFGPIO_DDRF		(MCF_IPSBAR + 0x00100019)
-#define MCFGPIO_DDRG		(MCF_IPSBAR + 0x0010001A)
-#define MCFGPIO_DDRH		(MCF_IPSBAR + 0x0010001B)
-#define MCFGPIO_DDRJ		(MCF_IPSBAR + 0x0010001C)
-#define MCFGPIO_DDRDD		(MCF_IPSBAR + 0x0010001D)
-#define MCFGPIO_DDREH		(MCF_IPSBAR + 0x0010001E)
-#define MCFGPIO_DDREL		(MCF_IPSBAR + 0x0010001F)
-#define MCFGPIO_DDRAS		(MCF_IPSBAR + 0x00100020)
-#define MCFGPIO_DDRQS		(MCF_IPSBAR + 0x00100021)
-#define MCFGPIO_DDRSD		(MCF_IPSBAR + 0x00100022)
-#define MCFGPIO_DDRTC		(MCF_IPSBAR + 0x00100023)
-#define MCFGPIO_DDRTD		(MCF_IPSBAR + 0x00100024)
-#define MCFGPIO_DDRUA		(MCF_IPSBAR + 0x00100025)
+#define MCFGPIO_PDDR_A		(MCF_IPSBAR + 0x00100014)
+#define MCFGPIO_PDDR_B		(MCF_IPSBAR + 0x00100015)
+#define MCFGPIO_PDDR_C		(MCF_IPSBAR + 0x00100016)
+#define MCFGPIO_PDDR_D		(MCF_IPSBAR + 0x00100017)
+#define MCFGPIO_PDDR_E		(MCF_IPSBAR + 0x00100018)
+#define MCFGPIO_PDDR_F		(MCF_IPSBAR + 0x00100019)
+#define MCFGPIO_PDDR_G		(MCF_IPSBAR + 0x0010001A)
+#define MCFGPIO_PDDR_H		(MCF_IPSBAR + 0x0010001B)
+#define MCFGPIO_PDDR_J		(MCF_IPSBAR + 0x0010001C)
+#define MCFGPIO_PDDR_DD		(MCF_IPSBAR + 0x0010001D)
+#define MCFGPIO_PDDR_EH		(MCF_IPSBAR + 0x0010001E)
+#define MCFGPIO_PDDR_EL		(MCF_IPSBAR + 0x0010001F)
+#define MCFGPIO_PDDR_AS		(MCF_IPSBAR + 0x00100020)
+#define MCFGPIO_PDDR_QS		(MCF_IPSBAR + 0x00100021)
+#define MCFGPIO_PDDR_SD		(MCF_IPSBAR + 0x00100022)
+#define MCFGPIO_PDDR_TC		(MCF_IPSBAR + 0x00100023)
+#define MCFGPIO_PDDR_TD		(MCF_IPSBAR + 0x00100024)
+#define MCFGPIO_PDDR_UA		(MCF_IPSBAR + 0x00100025)
 
-#define MCFGPIO_PORTAP		(MCF_IPSBAR + 0x00100028)
-#define MCFGPIO_PORTBP		(MCF_IPSBAR + 0x00100029)
-#define MCFGPIO_PORTCP		(MCF_IPSBAR + 0x0010002A)
-#define MCFGPIO_PORTDP		(MCF_IPSBAR + 0x0010002B)
-#define MCFGPIO_PORTEP		(MCF_IPSBAR + 0x0010002C)
-#define MCFGPIO_PORTFP		(MCF_IPSBAR + 0x0010002D)
-#define MCFGPIO_PORTGP		(MCF_IPSBAR + 0x0010002E)
-#define MCFGPIO_PORTHP		(MCF_IPSBAR + 0x0010002F)
-#define MCFGPIO_PORTJP		(MCF_IPSBAR + 0x00100030)
-#define MCFGPIO_PORTDDP		(MCF_IPSBAR + 0x00100031)
-#define MCFGPIO_PORTEHP		(MCF_IPSBAR + 0x00100032)
-#define MCFGPIO_PORTELP		(MCF_IPSBAR + 0x00100033)
-#define MCFGPIO_PORTASP		(MCF_IPSBAR + 0x00100034)
-#define MCFGPIO_PORTQSP		(MCF_IPSBAR + 0x00100035)
-#define MCFGPIO_PORTSDP		(MCF_IPSBAR + 0x00100036)
-#define MCFGPIO_PORTTCP		(MCF_IPSBAR + 0x00100037)
-#define MCFGPIO_PORTTDP		(MCF_IPSBAR + 0x00100038)
-#define MCFGPIO_PORTUAP		(MCF_IPSBAR + 0x00100039)
+#define MCFGPIO_PPDSDR_A	(MCF_IPSBAR + 0x00100028)
+#define MCFGPIO_PPDSDR_B	(MCF_IPSBAR + 0x00100029)
+#define MCFGPIO_PPDSDR_C	(MCF_IPSBAR + 0x0010002A)
+#define MCFGPIO_PPDSDR_D	(MCF_IPSBAR + 0x0010002B)
+#define MCFGPIO_PPDSDR_E	(MCF_IPSBAR + 0x0010002C)
+#define MCFGPIO_PPDSDR_F	(MCF_IPSBAR + 0x0010002D)
+#define MCFGPIO_PPDSDR_G	(MCF_IPSBAR + 0x0010002E)
+#define MCFGPIO_PPDSDR_H	(MCF_IPSBAR + 0x0010002F)
+#define MCFGPIO_PPDSDR_J	(MCF_IPSBAR + 0x00100030)
+#define MCFGPIO_PPDSDR_DD	(MCF_IPSBAR + 0x00100031)
+#define MCFGPIO_PPDSDR_EH	(MCF_IPSBAR + 0x00100032)
+#define MCFGPIO_PPDSDR_EL	(MCF_IPSBAR + 0x00100033)
+#define MCFGPIO_PPDSDR_AS	(MCF_IPSBAR + 0x00100034)
+#define MCFGPIO_PPDSDR_QS	(MCF_IPSBAR + 0x00100035)
+#define MCFGPIO_PPDSDR_SD	(MCF_IPSBAR + 0x00100036)
+#define MCFGPIO_PPDSDR_TC	(MCF_IPSBAR + 0x00100037)
+#define MCFGPIO_PPDSDR_TD	(MCF_IPSBAR + 0x00100038)
+#define MCFGPIO_PPDSDR_UA	(MCF_IPSBAR + 0x00100039)
 
-#define MCFGPIO_SETA		(MCF_IPSBAR + 0x00100028)
-#define MCFGPIO_SETB		(MCF_IPSBAR + 0x00100029)
-#define MCFGPIO_SETC		(MCF_IPSBAR + 0x0010002A)
-#define MCFGPIO_SETD		(MCF_IPSBAR + 0x0010002B)
-#define MCFGPIO_SETE		(MCF_IPSBAR + 0x0010002C)
-#define MCFGPIO_SETF		(MCF_IPSBAR + 0x0010002D)
-#define MCFGPIO_SETG		(MCF_IPSBAR + 0x0010002E)
-#define MCFGPIO_SETH		(MCF_IPSBAR + 0x0010002F)
-#define MCFGPIO_SETJ		(MCF_IPSBAR + 0x00100030)
-#define MCFGPIO_SETDD		(MCF_IPSBAR + 0x00100031)
-#define MCFGPIO_SETEH		(MCF_IPSBAR + 0x00100032)
-#define MCFGPIO_SETEL		(MCF_IPSBAR + 0x00100033)
-#define MCFGPIO_SETAS		(MCF_IPSBAR + 0x00100034)
-#define MCFGPIO_SETQS		(MCF_IPSBAR + 0x00100035)
-#define MCFGPIO_SETSD		(MCF_IPSBAR + 0x00100036)
-#define MCFGPIO_SETTC		(MCF_IPSBAR + 0x00100037)
-#define MCFGPIO_SETTD		(MCF_IPSBAR + 0x00100038)
-#define MCFGPIO_SETUA		(MCF_IPSBAR + 0x00100039)
-
-#define MCFGPIO_CLRA		(MCF_IPSBAR + 0x0010003C)
-#define MCFGPIO_CLRB		(MCF_IPSBAR + 0x0010003D)
-#define MCFGPIO_CLRC		(MCF_IPSBAR + 0x0010003E)
-#define MCFGPIO_CLRD		(MCF_IPSBAR + 0x0010003F)
-#define MCFGPIO_CLRE		(MCF_IPSBAR + 0x00100040)
-#define MCFGPIO_CLRF		(MCF_IPSBAR + 0x00100041)
-#define MCFGPIO_CLRG		(MCF_IPSBAR + 0x00100042)
-#define MCFGPIO_CLRH		(MCF_IPSBAR + 0x00100043)
-#define MCFGPIO_CLRJ		(MCF_IPSBAR + 0x00100044)
-#define MCFGPIO_CLRDD		(MCF_IPSBAR + 0x00100045)
-#define MCFGPIO_CLREH		(MCF_IPSBAR + 0x00100046)
-#define MCFGPIO_CLREL		(MCF_IPSBAR + 0x00100047)
-#define MCFGPIO_CLRAS		(MCF_IPSBAR + 0x00100048)
-#define MCFGPIO_CLRQS		(MCF_IPSBAR + 0x00100049)
-#define MCFGPIO_CLRSD		(MCF_IPSBAR + 0x0010004A)
-#define MCFGPIO_CLRTC		(MCF_IPSBAR + 0x0010004B)
-#define MCFGPIO_CLRTD		(MCF_IPSBAR + 0x0010004C)
-#define MCFGPIO_CLRUA		(MCF_IPSBAR + 0x0010004D)
+#define MCFGPIO_PCLRR_A		(MCF_IPSBAR + 0x0010003C)
+#define MCFGPIO_PCLRR_B		(MCF_IPSBAR + 0x0010003D)
+#define MCFGPIO_PCLRR_C		(MCF_IPSBAR + 0x0010003E)
+#define MCFGPIO_PCLRR_D		(MCF_IPSBAR + 0x0010003F)
+#define MCFGPIO_PCLRR_E		(MCF_IPSBAR + 0x00100040)
+#define MCFGPIO_PCLRR_F		(MCF_IPSBAR + 0x00100041)
+#define MCFGPIO_PCLRR_G		(MCF_IPSBAR + 0x00100042)
+#define MCFGPIO_PCLRR_H		(MCF_IPSBAR + 0x00100043)
+#define MCFGPIO_PCLRR_J		(MCF_IPSBAR + 0x00100044)
+#define MCFGPIO_PCLRR_DD	(MCF_IPSBAR + 0x00100045)
+#define MCFGPIO_PCLRR_EH	(MCF_IPSBAR + 0x00100046)
+#define MCFGPIO_PCLRR_EL	(MCF_IPSBAR + 0x00100047)
+#define MCFGPIO_PCLRR_AS	(MCF_IPSBAR + 0x00100048)
+#define MCFGPIO_PCLRR_QS	(MCF_IPSBAR + 0x00100049)
+#define MCFGPIO_PCLRR_SD	(MCF_IPSBAR + 0x0010004A)
+#define MCFGPIO_PCLRR_TC	(MCF_IPSBAR + 0x0010004B)
+#define MCFGPIO_PCLRR_TD	(MCF_IPSBAR + 0x0010004C)
+#define MCFGPIO_PCLRR_UA	(MCF_IPSBAR + 0x0010004D)
 
 #define MCFGPIO_PBCDPAR		(MCF_IPSBAR + 0x00100050)
 #define MCFGPIO_PFPAR		(MCF_IPSBAR + 0x00100051)
@@ -242,11 +223,11 @@
  * definitions for generic gpio support
  *
  */
-#define MCFGPIO_PODR		MCFGPIO_PORTA	/* port output data */
-#define MCFGPIO_PDDR		MCFGPIO_DDRA	/* port data direction */
-#define MCFGPIO_PPDR		MCFGPIO_PORTAP	/* port pin data */
-#define MCFGPIO_SETR		MCFGPIO_SETA	/* set output */
-#define MCFGPIO_CLRR		MCFGPIO_CLRA	/* clr output */
+#define MCFGPIO_PODR		MCFGPIO_PODR_A	/* port output data */
+#define MCFGPIO_PDDR		MCFGPIO_PDDR_A	/* port data direction */
+#define MCFGPIO_PPDR		MCFGPIO_PPDSDR_A/* port pin data */
+#define MCFGPIO_SETR		MCFGPIO_PPDSDR_A/* set output */
+#define MCFGPIO_CLRR		MCFGPIO_PCLRR_A	/* clr output */
 
 #define MCFGPIO_IRQ_MAX		8
 #define MCFGPIO_IRQ_VECBASE	MCFINT_VECBASE
diff --git a/arch/m68k/include/asm/mcfgpio.h b/arch/m68k/include/asm/mcfgpio.h
index ee5e4cc..fe468ea 100644
--- a/arch/m68k/include/asm/mcfgpio.h
+++ b/arch/m68k/include/asm/mcfgpio.h
@@ -29,6 +29,9 @@
 	const u8 *gpio_to_pinmux;
 };
 
+extern struct mcf_gpio_chip mcf_gpio_chips[];
+extern unsigned int mcf_gpio_chips_size;
+
 int mcf_gpio_direction_input(struct gpio_chip *, unsigned);
 int mcf_gpio_get_value(struct gpio_chip *, unsigned);
 int mcf_gpio_direction_output(struct gpio_chip *, unsigned, int);
@@ -37,4 +40,58 @@
 int mcf_gpio_request(struct gpio_chip *, unsigned);
 void mcf_gpio_free(struct gpio_chip *, unsigned);
 
+/*
+ *	Define macros to ease the pain of setting up the GPIO tables. There
+ *	are two cases we need to deal with here, they cover all currently
+ *	available ColdFire GPIO hardware. There are of course minor differences
+ *	in the layout and number of bits in each ColdFire part, but the macros
+ *	take all that in.
+ *
+ *	Firstly is the conventional GPIO registers where we toggle individual
+ *	bits in a register, preserving the other bits in the register. For
+ *	lack of a better term I have called this the slow method.
+ */
+#define	MCFGPS(mlabel, mbase, mngpio, mpddr, mpodr, mppdr)		    \
+	{								    \
+		.gpio_chip			= {			    \
+			.label			= #mlabel,		    \
+			.request		= mcf_gpio_request,	    \
+			.free			= mcf_gpio_free,	    \
+			.direction_input	= mcf_gpio_direction_input, \
+			.direction_output	= mcf_gpio_direction_output,\
+			.get			= mcf_gpio_get_value,	    \
+			.set			= mcf_gpio_set_value,       \
+			.base			= mbase,		    \
+			.ngpio			= mngpio,		    \
+		},							    \
+		.pddr		= (void __iomem *) mpddr,		    \
+		.podr		= (void __iomem *) mpodr,		    \
+		.ppdr		= (void __iomem *) mppdr,		    \
+	}
+
+/*
+ *	Secondly is the faster case, where we have set and clear registers
+ *	that allow us to set or clear a bit with a single write, not having
+ *	to worry about preserving other bits.
+ */
+#define	MCFGPF(mlabel, mbase, mngpio)					    \
+	{								    \
+		.gpio_chip			= {			    \
+			.label			= #mlabel,		    \
+			.request		= mcf_gpio_request,	    \
+			.free			= mcf_gpio_free,	    \
+			.direction_input	= mcf_gpio_direction_input, \
+			.direction_output	= mcf_gpio_direction_output,\
+			.get			= mcf_gpio_get_value,	    \
+			.set			= mcf_gpio_set_value_fast,  \
+			.base			= mbase,		    \
+			.ngpio			= mngpio,		    \
+		},							    \
+		.pddr		= (void __iomem *) MCFGPIO_PDDR_##mlabel,   \
+		.podr		= (void __iomem *) MCFGPIO_PODR_##mlabel,   \
+		.ppdr		= (void __iomem *) MCFGPIO_PPDSDR_##mlabel, \
+		.setr		= (void __iomem *) MCFGPIO_PPDSDR_##mlabel, \
+		.clrr		= (void __iomem *) MCFGPIO_PCLRR_##mlabel,  \
+	}
+
 #endif
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
index 019caa7..f4043ae 100644
--- a/arch/m68k/include/asm/unaligned.h
+++ b/arch/m68k/include/asm/unaligned.h
@@ -2,7 +2,7 @@
 #define _ASM_M68K_UNALIGNED_H
 
 
-#ifdef CONFIG_COLDFIRE
+#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000)
 #include <linux/unaligned/be_struct.h>
 #include <linux/unaligned/le_byteshift.h>
 #include <linux/unaligned/generic.h>
diff --git a/arch/m68k/include/asm/vga.h b/arch/m68k/include/asm/vga.h
new file mode 100644
index 0000000..d3aa140
--- /dev/null
+++ b/arch/m68k/include/asm/vga.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_M68K_VGA_H
+#define _ASM_M68K_VGA_H
+
+#include <asm/raw_io.h>
+
+/*
+ * FIXME
+ * Ugh, we don't have PCI space, so map readb() and friends to use raw I/O
+ * accessors, which are identical to the z_*() Zorro bus accessors.
+ * This should make cirrusfb work again on Amiga
+ */
+#undef inb_p
+#undef inw_p
+#undef outb_p
+#undef outw
+#undef readb
+#undef writeb
+#undef writew
+#define inb_p(port)		0
+#define inw_p(port)		0
+#define outb_p(port, val)	do { } while (0)
+#define outw(port, val)		do { } while (0)
+#define readb			raw_inb
+#define writeb			raw_outb
+#define writew			raw_outw
+
+#endif /* _ASM_M68K_VGA_H */
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 90e8cb7..f6daf6e 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -1,5 +1,164 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#undef DEBUG
+
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+
+#include <asm/pgalloc.h>
+
 #ifdef CONFIG_MMU
-#include "dma_mm.c"
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+			 dma_addr_t *handle, gfp_t flag)
+{
+	struct page *page, **map;
+	pgprot_t pgprot;
+	void *addr;
+	int i, order;
+
+	pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
+
+	size = PAGE_ALIGN(size);
+	order = get_order(size);
+
+	page = alloc_pages(flag, order);
+	if (!page)
+		return NULL;
+
+	*handle = page_to_phys(page);
+	map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
+	if (!map) {
+		__free_pages(page, order);
+		return NULL;
+	}
+	split_page(page, order);
+
+	order = 1 << order;
+	size >>= PAGE_SHIFT;
+	map[0] = page;
+	for (i = 1; i < size; i++)
+		map[i] = page + i;
+	for (; i < order; i++)
+		__free_page(page + i);
+	pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+	if (CPU_IS_040_OR_060)
+		pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
+	else
+		pgprot_val(pgprot) |= _PAGE_NOCACHE030;
+	addr = vmap(map, size, VM_MAP, pgprot);
+	kfree(map);
+
+	return addr;
+}
+
+void dma_free_coherent(struct device *dev, size_t size,
+		       void *addr, dma_addr_t handle)
+{
+	pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
+	vfree(addr);
+}
+
 #else
-#include "dma_no.c"
-#endif
+
+#include <asm/cacheflush.h>
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+			   dma_addr_t *dma_handle, gfp_t gfp)
+{
+	void *ret;
+	/* ignore region specifiers */
+	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+	if (dev == NULL || (*dev->dma_mask < 0xffffffff))
+		gfp |= GFP_DMA;
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+
+	if (ret != NULL) {
+		memset(ret, 0, size);
+		*dma_handle = virt_to_phys(ret);
+	}
+	return ret;
+}
+
+void dma_free_coherent(struct device *dev, size_t size,
+			 void *vaddr, dma_addr_t dma_handle)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+
+#endif /* CONFIG_MMU */
+
+EXPORT_SYMBOL(dma_alloc_coherent);
+EXPORT_SYMBOL(dma_free_coherent);
+
+void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+				size_t size, enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_TO_DEVICE:
+		cache_push(handle, size);
+		break;
+	case DMA_FROM_DEVICE:
+		cache_clear(handle, size);
+		break;
+	default:
+		if (printk_ratelimit())
+			printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
+		break;
+	}
+}
+EXPORT_SYMBOL(dma_sync_single_for_device);
+
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
+			    enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nents; sg++, i++)
+		dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
+}
+EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
+			  enum dma_data_direction dir)
+{
+	dma_addr_t handle = virt_to_bus(addr);
+
+	dma_sync_single_for_device(dev, handle, size, dir);
+	return handle;
+}
+EXPORT_SYMBOL(dma_map_single);
+
+dma_addr_t dma_map_page(struct device *dev, struct page *page,
+			unsigned long offset, size_t size,
+			enum dma_data_direction dir)
+{
+	dma_addr_t handle = page_to_phys(page) + offset;
+
+	dma_sync_single_for_device(dev, handle, size, dir);
+	return handle;
+}
+EXPORT_SYMBOL(dma_map_page);
+
+int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+	       enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nents; sg++, i++) {
+		sg->dma_address = sg_phys(sg);
+		dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
+	}
+	return nents;
+}
+EXPORT_SYMBOL(dma_map_sg);
diff --git a/arch/m68k/kernel/dma_mm.c b/arch/m68k/kernel/dma_mm.c
deleted file mode 100644
index a3c471b..0000000
--- a/arch/m68k/kernel/dma_mm.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#undef DEBUG
-
-#include <linux/dma-mapping.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-
-#include <asm/pgalloc.h>
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-			 dma_addr_t *handle, gfp_t flag)
-{
-	struct page *page, **map;
-	pgprot_t pgprot;
-	void *addr;
-	int i, order;
-
-	pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
-
-	size = PAGE_ALIGN(size);
-	order = get_order(size);
-
-	page = alloc_pages(flag, order);
-	if (!page)
-		return NULL;
-
-	*handle = page_to_phys(page);
-	map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
-	if (!map) {
-		__free_pages(page, order);
-		return NULL;
-	}
-	split_page(page, order);
-
-	order = 1 << order;
-	size >>= PAGE_SHIFT;
-	map[0] = page;
-	for (i = 1; i < size; i++)
-		map[i] = page + i;
-	for (; i < order; i++)
-		__free_page(page + i);
-	pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
-	if (CPU_IS_040_OR_060)
-		pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
-	else
-		pgprot_val(pgprot) |= _PAGE_NOCACHE030;
-	addr = vmap(map, size, VM_MAP, pgprot);
-	kfree(map);
-
-	return addr;
-}
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-void dma_free_coherent(struct device *dev, size_t size,
-		       void *addr, dma_addr_t handle)
-{
-	pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
-	vfree(addr);
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
-				size_t size, enum dma_data_direction dir)
-{
-	switch (dir) {
-	case DMA_TO_DEVICE:
-		cache_push(handle, size);
-		break;
-	case DMA_FROM_DEVICE:
-		cache_clear(handle, size);
-		break;
-	default:
-		if (printk_ratelimit())
-			printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
-		break;
-	}
-}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
-			    enum dma_data_direction dir)
-{
-	int i;
-
-	for (i = 0; i < nents; sg++, i++)
-		dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
-}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-
-dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
-			  enum dma_data_direction dir)
-{
-	dma_addr_t handle = virt_to_bus(addr);
-
-	dma_sync_single_for_device(dev, handle, size, dir);
-	return handle;
-}
-EXPORT_SYMBOL(dma_map_single);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
-			unsigned long offset, size_t size,
-			enum dma_data_direction dir)
-{
-	dma_addr_t handle = page_to_phys(page) + offset;
-
-	dma_sync_single_for_device(dev, handle, size, dir);
-	return handle;
-}
-EXPORT_SYMBOL(dma_map_page);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-	       enum dma_data_direction dir)
-{
-	int i;
-
-	for (i = 0; i < nents; sg++, i++) {
-		sg->dma_address = sg_phys(sg);
-		dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
-	}
-	return nents;
-}
-EXPORT_SYMBOL(dma_map_sg);
diff --git a/arch/m68k/kernel/dma_no.c b/arch/m68k/kernel/dma_no.c
deleted file mode 100644
index f1dc3fc..0000000
--- a/arch/m68k/kernel/dma_no.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * We never have any address translations to worry about, so this
- * is just alloc/free.
- */
-
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/export.h>
-#include <asm/cacheflush.h>
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-			   dma_addr_t *dma_handle, gfp_t gfp)
-{
-	void *ret;
-	/* ignore region specifiers */
-	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
-	if (dev == NULL || (*dev->dma_mask < 0xffffffff))
-		gfp |= GFP_DMA;
-	ret = (void *)__get_free_pages(gfp, get_order(size));
-
-	if (ret != NULL) {
-		memset(ret, 0, size);
-		*dma_handle = virt_to_phys(ret);
-	}
-	return ret;
-}
-
-void dma_free_coherent(struct device *dev, size_t size,
-			 void *vaddr, dma_addr_t dma_handle)
-{
-	free_pages((unsigned long)vaddr, get_order(size));
-}
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
-				size_t size, enum dma_data_direction dir)
-{
-	switch (dir) {
-	case DMA_TO_DEVICE:
-		flush_dcache_range(handle, size);
-		break;
-	case DMA_FROM_DEVICE:
-		/* Should be clear already */
-		break;
-	default:
-		if (printk_ratelimit())
-			printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
-		break;
-	}
-}
-
-EXPORT_SYMBOL(dma_sync_single_for_device);
-dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
-			  enum dma_data_direction dir)
-{
-	dma_addr_t handle = virt_to_phys(addr);
-	flush_dcache_range(handle, size);
-	return handle;
-}
-EXPORT_SYMBOL(dma_map_single);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
-			unsigned long offset, size_t size,
-			enum dma_data_direction dir)
-{
-	dma_addr_t handle = page_to_phys(page) + offset;
-	dma_sync_single_for_device(dev, handle, size, dir);
-	return handle;
-}
-EXPORT_SYMBOL(dma_map_page);
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 2e25713..1747c70 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -1,5 +1,1202 @@
+/*
+ *  linux/arch/m68k/kernel/signal.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
+ *
+ * mathemu support by Roman Zippel
+ *  (Note: fpstate in the signal context is completely ignored for the emulator
+ *         and the internal floating point format is put on stack)
+ */
+
+/*
+ * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
+ * Atari :-) Current limitation: Only one sigstack can be active at one time.
+ * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
+ * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
+ * signal handlers!
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/highuid.h>
+#include <linux/personality.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+#include <linux/module.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
 #ifdef CONFIG_MMU
-#include "signal_mm.c"
+
+/*
+ * Handle the slight differences in classic 68k and ColdFire trap frames.
+ */
+#ifdef CONFIG_COLDFIRE
+#define	FORMAT		4
+#define	FMT4SIZE	0
 #else
-#include "signal_no.c"
+#define	FORMAT		0
+#define	FMT4SIZE	sizeof(((struct frame *)0)->un.fmt4)
 #endif
+
+static const int frame_size_change[16] = {
+  [1]	= -1, /* sizeof(((struct frame *)0)->un.fmt1), */
+  [2]	= sizeof(((struct frame *)0)->un.fmt2),
+  [3]	= sizeof(((struct frame *)0)->un.fmt3),
+  [4]	= FMT4SIZE,
+  [5]	= -1, /* sizeof(((struct frame *)0)->un.fmt5), */
+  [6]	= -1, /* sizeof(((struct frame *)0)->un.fmt6), */
+  [7]	= sizeof(((struct frame *)0)->un.fmt7),
+  [8]	= -1, /* sizeof(((struct frame *)0)->un.fmt8), */
+  [9]	= sizeof(((struct frame *)0)->un.fmt9),
+  [10]	= sizeof(((struct frame *)0)->un.fmta),
+  [11]	= sizeof(((struct frame *)0)->un.fmtb),
+  [12]	= -1, /* sizeof(((struct frame *)0)->un.fmtc), */
+  [13]	= -1, /* sizeof(((struct frame *)0)->un.fmtd), */
+  [14]	= -1, /* sizeof(((struct frame *)0)->un.fmte), */
+  [15]	= -1, /* sizeof(((struct frame *)0)->un.fmtf), */
+};
+
+static inline int frame_extra_sizes(int f)
+{
+	return frame_size_change[f];
+}
+
+int handle_kernel_fault(struct pt_regs *regs)
+{
+	const struct exception_table_entry *fixup;
+	struct pt_regs *tregs;
+
+	/* Are we prepared to handle this kernel fault? */
+	fixup = search_exception_tables(regs->pc);
+	if (!fixup)
+		return 0;
+
+	/* Create a new four word stack frame, discarding the old one. */
+	regs->stkadj = frame_extra_sizes(regs->format);
+	tregs =	(struct pt_regs *)((long)regs + regs->stkadj);
+	tregs->vector = regs->vector;
+	tregs->format = FORMAT;
+	tregs->pc = fixup->fixup;
+	tregs->sr = regs->sr;
+
+	return 1;
+}
+
+void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
+{
+	if (regs->orig_d0 < 0)
+		return;
+	switch (regs->d0) {
+	case -ERESTARTNOHAND:
+	case -ERESTARTSYS:
+	case -ERESTARTNOINTR:
+		regs->d0 = regs->orig_d0;
+		regs->orig_d0 = -1;
+		regs->pc -= 2;
+		break;
+	}
+}
+
+static inline void push_cache (unsigned long vaddr)
+{
+	/*
+	 * Using the old cache_push_v() was really a big waste.
+	 *
+	 * What we are trying to do is to flush 8 bytes to ram.
+	 * Flushing 2 cache lines of 16 bytes is much cheaper than
+	 * flushing 1 or 2 pages, as previously done in
+	 * cache_push_v().
+	 *                                                     Jes
+	 */
+	if (CPU_IS_040) {
+		unsigned long temp;
+
+		__asm__ __volatile__ (".chip 68040\n\t"
+				      "nop\n\t"
+				      "ptestr (%1)\n\t"
+				      "movec %%mmusr,%0\n\t"
+				      ".chip 68k"
+				      : "=r" (temp)
+				      : "a" (vaddr));
+
+		temp &= PAGE_MASK;
+		temp |= vaddr & ~PAGE_MASK;
+
+		__asm__ __volatile__ (".chip 68040\n\t"
+				      "nop\n\t"
+				      "cpushl %%bc,(%0)\n\t"
+				      ".chip 68k"
+				      : : "a" (temp));
+	}
+	else if (CPU_IS_060) {
+		unsigned long temp;
+		__asm__ __volatile__ (".chip 68060\n\t"
+				      "plpar (%0)\n\t"
+				      ".chip 68k"
+				      : "=a" (temp)
+				      : "0" (vaddr));
+		__asm__ __volatile__ (".chip 68060\n\t"
+				      "cpushl %%bc,(%0)\n\t"
+				      ".chip 68k"
+				      : : "a" (temp));
+	} else if (!CPU_IS_COLDFIRE) {
+		/*
+		 * 68030/68020 have no writeback cache;
+		 * still need to clear icache.
+		 * Note that vaddr is guaranteed to be long word aligned.
+		 */
+		unsigned long temp;
+		asm volatile ("movec %%cacr,%0" : "=r" (temp));
+		temp += 4;
+		asm volatile ("movec %0,%%caar\n\t"
+			      "movec %1,%%cacr"
+			      : : "r" (vaddr), "r" (temp));
+		asm volatile ("movec %0,%%caar\n\t"
+			      "movec %1,%%cacr"
+			      : : "r" (vaddr + 4), "r" (temp));
+	}
+}
+
+static inline void adjustformat(struct pt_regs *regs)
+{
+}
+
+static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+}
+
+#else /* CONFIG_MMU */
+
+void ret_from_user_signal(void);
+void ret_from_user_rt_signal(void);
+
+static inline int frame_extra_sizes(int f)
+{
+	/* No frame size adjustments required on non-MMU CPUs */
+	return 0;
+}
+
+static inline void adjustformat(struct pt_regs *regs)
+{
+	((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
+	/*
+	 * set format byte to make stack appear modulo 4, which it will
+	 * be when doing the rte
+	 */
+	regs->format = 0x4;
+}
+
+static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+	sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
+}
+
+static inline void push_cache(unsigned long vaddr)
+{
+}
+
+#endif /* CONFIG_MMU */
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
+{
+	mask &= _BLOCKABLE;
+	spin_lock_irq(&current->sighand->siglock);
+	current->saved_sigmask = current->blocked;
+	siginitset(&current->blocked, mask);
+	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
+
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	set_restore_sigmask();
+
+	return -ERESTARTNOHAND;
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+	      struct old_sigaction __user *oact)
+{
+	struct k_sigaction new_ka, old_ka;
+	int ret;
+
+	if (act) {
+		old_sigset_t mask;
+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+		    __get_user(mask, &act->sa_mask))
+			return -EFAULT;
+		siginitset(&new_ka.sa.sa_mask, mask);
+	}
+
+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+	if (!ret && oact) {
+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
+{
+	return do_sigaltstack(uss, uoss, rdusp());
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct sigframe
+{
+	char __user *pretcode;
+	int sig;
+	int code;
+	struct sigcontext __user *psc;
+	char retcode[8];
+	unsigned long extramask[_NSIG_WORDS-1];
+	struct sigcontext sc;
+};
+
+struct rt_sigframe
+{
+	char __user *pretcode;
+	int sig;
+	struct siginfo __user *pinfo;
+	void __user *puc;
+	char retcode[8];
+	struct siginfo info;
+	struct ucontext uc;
+};
+
+#define FPCONTEXT_SIZE	216
+#define uc_fpstate	uc_filler[0]
+#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
+#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
+
+#ifdef CONFIG_FPU
+
+static unsigned char fpu_version;	/* version number of fpu, set by setup_frame */
+
+static inline int restore_fpu_state(struct sigcontext *sc)
+{
+	int err = 1;
+
+	if (FPU_IS_EMU) {
+	    /* restore registers */
+	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
+	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
+	    return 0;
+	}
+
+	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+	    /* Verify the frame format.  */
+	    if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+		 (sc->sc_fpstate[0] != fpu_version))
+		goto out;
+	    if (CPU_IS_020_OR_030) {
+		if (m68k_fputype & FPU_68881 &&
+		    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
+		    goto out;
+		if (m68k_fputype & FPU_68882 &&
+		    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
+		    goto out;
+	    } else if (CPU_IS_040) {
+		if (!(sc->sc_fpstate[1] == 0x00 ||
+                      sc->sc_fpstate[1] == 0x28 ||
+                      sc->sc_fpstate[1] == 0x60))
+		    goto out;
+	    } else if (CPU_IS_060) {
+		if (!(sc->sc_fpstate[3] == 0x00 ||
+                      sc->sc_fpstate[3] == 0x60 ||
+		      sc->sc_fpstate[3] == 0xe0))
+		    goto out;
+	    } else if (CPU_IS_COLDFIRE) {
+		if (!(sc->sc_fpstate[0] == 0x00 ||
+		      sc->sc_fpstate[0] == 0x05 ||
+		      sc->sc_fpstate[0] == 0xe5))
+		    goto out;
+	    } else
+		goto out;
+
+	    if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
+				  "fmovel %1,%%fpcr\n\t"
+				  "fmovel %2,%%fpsr\n\t"
+				  "fmovel %3,%%fpiar"
+				  : /* no outputs */
+				  : "m" (sc->sc_fpregs[0]),
+				    "m" (sc->sc_fpcntl[0]),
+				    "m" (sc->sc_fpcntl[1]),
+				    "m" (sc->sc_fpcntl[2]));
+	    } else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fmovemx %0,%%fp0-%%fp1\n\t"
+				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+				  ".chip 68k"
+				  : /* no outputs */
+				  : "m" (*sc->sc_fpregs),
+				    "m" (*sc->sc_fpcntl));
+	    }
+	}
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "frestore %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*sc->sc_fpstate));
+	}
+	err = 0;
+
+out:
+	return err;
+}
+
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
+{
+	unsigned char fpstate[FPCONTEXT_SIZE];
+	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
+	fpregset_t fpregs;
+	int err = 1;
+
+	if (FPU_IS_EMU) {
+		/* restore fpu control register */
+		if (__copy_from_user(current->thread.fpcntl,
+				uc->uc_mcontext.fpregs.f_fpcntl, 12))
+			goto out;
+		/* restore all other fpu register */
+		if (__copy_from_user(current->thread.fp,
+				uc->uc_mcontext.fpregs.f_fpregs, 96))
+			goto out;
+		return 0;
+	}
+
+	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
+		goto out;
+	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
+			context_size = fpstate[1];
+		/* Verify the frame format.  */
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+		     (fpstate[0] != fpu_version))
+			goto out;
+		if (CPU_IS_020_OR_030) {
+			if (m68k_fputype & FPU_68881 &&
+			    !(context_size == 0x18 || context_size == 0xb4))
+				goto out;
+			if (m68k_fputype & FPU_68882 &&
+			    !(context_size == 0x38 || context_size == 0xd4))
+				goto out;
+		} else if (CPU_IS_040) {
+			if (!(context_size == 0x00 ||
+			      context_size == 0x28 ||
+			      context_size == 0x60))
+				goto out;
+		} else if (CPU_IS_060) {
+			if (!(fpstate[3] == 0x00 ||
+			      fpstate[3] == 0x60 ||
+			      fpstate[3] == 0xe0))
+				goto out;
+		} else if (CPU_IS_COLDFIRE) {
+			if (!(fpstate[3] == 0x00 ||
+			      fpstate[3] == 0x05 ||
+			      fpstate[3] == 0xe5))
+				goto out;
+		} else
+			goto out;
+		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
+				     sizeof(fpregs)))
+			goto out;
+
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
+					  "fmovel %1,%%fpcr\n\t"
+					  "fmovel %2,%%fpsr\n\t"
+					  "fmovel %3,%%fpiar"
+					  : /* no outputs */
+					  : "m" (fpregs.f_fpregs[0]),
+					    "m" (fpregs.f_fpcntl[0]),
+					    "m" (fpregs.f_fpcntl[1]),
+					    "m" (fpregs.f_fpcntl[2]));
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %0,%%fp0-%%fp7\n\t"
+					  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+					  ".chip 68k"
+					  : /* no outputs */
+					  : "m" (*fpregs.f_fpregs),
+					    "m" (*fpregs.f_fpcntl));
+		}
+	}
+	if (context_size &&
+	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
+			     context_size))
+		goto out;
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("frestore %0" : : "m" (*fpstate));
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "frestore %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*fpstate));
+	}
+	err = 0;
+
+out:
+	return err;
+}
+
+/*
+ * Set up a signal frame.
+ */
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+	if (FPU_IS_EMU) {
+		/* save registers */
+		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
+		memcpy(sc->sc_fpregs, current->thread.fp, 24);
+		return;
+	}
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fsave %0"
+				  : : "m" (*sc->sc_fpstate) : "memory");
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fsave %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*sc->sc_fpstate) : "memory");
+	}
+
+	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+		fpu_version = sc->sc_fpstate[0];
+		if (CPU_IS_020_OR_030 &&
+		    regs->vector >= (VEC_FPBRUC * 4) &&
+		    regs->vector <= (VEC_FPNAN * 4)) {
+			/* Clear pending exception in 68882 idle frame */
+			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
+				sc->sc_fpstate[0x38] |= 1 << 3;
+		}
+
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
+					  "fmovel %%fpcr,%1\n\t"
+					  "fmovel %%fpsr,%2\n\t"
+					  "fmovel %%fpiar,%3"
+					  : "=m" (sc->sc_fpregs[0]),
+					    "=m" (sc->sc_fpcntl[0]),
+					    "=m" (sc->sc_fpcntl[1]),
+					    "=m" (sc->sc_fpcntl[2])
+					  : /* no inputs */
+					  : "memory");
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %%fp0-%%fp1,%0\n\t"
+					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+					  ".chip 68k"
+					  : "=m" (*sc->sc_fpregs),
+					    "=m" (*sc->sc_fpcntl)
+					  : /* no inputs */
+					  : "memory");
+		}
+	}
+}
+
+static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
+{
+	unsigned char fpstate[FPCONTEXT_SIZE];
+	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
+	int err = 0;
+
+	if (FPU_IS_EMU) {
+		/* save fpu control register */
+		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
+				current->thread.fpcntl, 12);
+		/* save all other fpu register */
+		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
+				current->thread.fp, 96);
+		return err;
+	}
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fsave %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*fpstate) : "memory");
+	}
+
+	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
+	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+		fpregset_t fpregs;
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
+			context_size = fpstate[1];
+		fpu_version = fpstate[0];
+		if (CPU_IS_020_OR_030 &&
+		    regs->vector >= (VEC_FPBRUC * 4) &&
+		    regs->vector <= (VEC_FPNAN * 4)) {
+			/* Clear pending exception in 68882 idle frame */
+			if (*(unsigned short *) fpstate == 0x1f38)
+				fpstate[0x38] |= 1 << 3;
+		}
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
+					  "fmovel %%fpcr,%1\n\t"
+					  "fmovel %%fpsr,%2\n\t"
+					  "fmovel %%fpiar,%3"
+					  : "=m" (fpregs.f_fpregs[0]),
+					    "=m" (fpregs.f_fpcntl[0]),
+					    "=m" (fpregs.f_fpcntl[1]),
+					    "=m" (fpregs.f_fpcntl[2])
+					  : /* no inputs */
+					  : "memory");
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %%fp0-%%fp7,%0\n\t"
+					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+					  ".chip 68k"
+					  : "=m" (*fpregs.f_fpregs),
+					    "=m" (*fpregs.f_fpcntl)
+					  : /* no inputs */
+					  : "memory");
+		}
+		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
+				    sizeof(fpregs));
+	}
+	if (context_size)
+		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
+				    context_size);
+	return err;
+}
+
+#else /* CONFIG_FPU */
+
+/*
+ * For the case with no FPU configured these all do nothing.
+ */
+static inline int restore_fpu_state(struct sigcontext *sc)
+{
+	return 0;
+}
+
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
+{
+	return 0;
+}
+
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+}
+
+static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
+{
+	return 0;
+}
+
+#endif /* CONFIG_FPU */
+
+static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
+			       void __user *fp)
+{
+	int fsize = frame_extra_sizes(formatvec >> 12);
+	if (fsize < 0) {
+		/*
+		 * user process trying to return with weird frame format
+		 */
+#ifdef DEBUG
+		printk("user process returning with weird frame format\n");
+#endif
+		return 1;
+	}
+	if (!fsize) {
+		regs->format = formatvec >> 12;
+		regs->vector = formatvec & 0xfff;
+	} else {
+		struct switch_stack *sw = (struct switch_stack *)regs - 1;
+		unsigned long buf[fsize / 2]; /* yes, twice as much */
+
+		/* that'll make sure that expansion won't crap over data */
+		if (copy_from_user(buf + fsize / 4, fp, fsize))
+			return 1;
+
+		/* point of no return */
+		regs->format = formatvec >> 12;
+		regs->vector = formatvec & 0xfff;
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+		__asm__ __volatile__ (
+#ifdef CONFIG_COLDFIRE
+			 "   movel %0,%/sp\n\t"
+			 "   bra ret_from_signal\n"
+#else
+			 "   movel %0,%/a0\n\t"
+			 "   subl %1,%/a0\n\t"     /* make room on stack */
+			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
+			 /* move switch_stack and pt_regs */
+			 "1: movel %0@+,%/a0@+\n\t"
+			 "   dbra %2,1b\n\t"
+			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
+			 "   lsrl  #2,%1\n\t"
+			 "   subql #1,%1\n\t"
+			 /* copy to the gap we'd made */
+			 "2: movel %4@+,%/a0@+\n\t"
+			 "   dbra %1,2b\n\t"
+			 "   bral ret_from_signal\n"
+#endif
+			 : /* no outputs, it doesn't ever return */
+			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+			   "n" (frame_offset), "a" (buf + fsize/4)
+			 : "a0");
+#undef frame_offset
+	}
+	return 0;
+}
+
+static inline int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
+{
+	int formatvec;
+	struct sigcontext context;
+	int err = 0;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	/* get previous context */
+	if (copy_from_user(&context, usc, sizeof(context)))
+		goto badframe;
+
+	/* restore passed registers */
+	regs->d0 = context.sc_d0;
+	regs->d1 = context.sc_d1;
+	regs->a0 = context.sc_a0;
+	regs->a1 = context.sc_a1;
+	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
+	regs->pc = context.sc_pc;
+	regs->orig_d0 = -1;		/* disable syscall checks */
+	wrusp(context.sc_usp);
+	formatvec = context.sc_formatvec;
+
+	err = restore_fpu_state(&context);
+
+	if (err || mangle_kernel_stack(regs, formatvec, fp))
+		goto badframe;
+
+	return 0;
+
+badframe:
+	return 1;
+}
+
+static inline int
+rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
+		    struct ucontext __user *uc)
+{
+	int temp;
+	greg_t __user *gregs = uc->uc_mcontext.gregs;
+	unsigned long usp;
+	int err;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	err = __get_user(temp, &uc->uc_mcontext.version);
+	if (temp != MCONTEXT_VERSION)
+		goto badframe;
+	/* restore passed registers */
+	err |= __get_user(regs->d0, &gregs[0]);
+	err |= __get_user(regs->d1, &gregs[1]);
+	err |= __get_user(regs->d2, &gregs[2]);
+	err |= __get_user(regs->d3, &gregs[3]);
+	err |= __get_user(regs->d4, &gregs[4]);
+	err |= __get_user(regs->d5, &gregs[5]);
+	err |= __get_user(sw->d6, &gregs[6]);
+	err |= __get_user(sw->d7, &gregs[7]);
+	err |= __get_user(regs->a0, &gregs[8]);
+	err |= __get_user(regs->a1, &gregs[9]);
+	err |= __get_user(regs->a2, &gregs[10]);
+	err |= __get_user(sw->a3, &gregs[11]);
+	err |= __get_user(sw->a4, &gregs[12]);
+	err |= __get_user(sw->a5, &gregs[13]);
+	err |= __get_user(sw->a6, &gregs[14]);
+	err |= __get_user(usp, &gregs[15]);
+	wrusp(usp);
+	err |= __get_user(regs->pc, &gregs[16]);
+	err |= __get_user(temp, &gregs[17]);
+	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
+	regs->orig_d0 = -1;		/* disable syscall checks */
+	err |= __get_user(temp, &uc->uc_formatvec);
+
+	err |= rt_restore_fpu_state(uc);
+
+	if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
+		goto badframe;
+
+	if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
+		goto badframe;
+
+	return 0;
+
+badframe:
+	return 1;
+}
+
+asmlinkage int do_sigreturn(unsigned long __unused)
+{
+	struct switch_stack *sw = (struct switch_stack *) &__unused;
+	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+	unsigned long usp = rdusp();
+	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
+	    (_NSIG_WORDS > 1 &&
+	     __copy_from_user(&set.sig[1], &frame->extramask,
+			      sizeof(frame->extramask))))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	current->blocked = set;
+	recalc_sigpending();
+
+	if (restore_sigcontext(regs, &frame->sc, frame + 1))
+		goto badframe;
+	return regs->d0;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage int do_rt_sigreturn(unsigned long __unused)
+{
+	struct switch_stack *sw = (struct switch_stack *) &__unused;
+	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+	unsigned long usp = rdusp();
+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	sigdelsetmask(&set, ~_BLOCKABLE);
+	current->blocked = set;
+	recalc_sigpending();
+
+	if (rt_restore_ucontext(regs, sw, &frame->uc))
+		goto badframe;
+	return regs->d0;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+			     unsigned long mask)
+{
+	sc->sc_mask = mask;
+	sc->sc_usp = rdusp();
+	sc->sc_d0 = regs->d0;
+	sc->sc_d1 = regs->d1;
+	sc->sc_a0 = regs->a0;
+	sc->sc_a1 = regs->a1;
+	sc->sc_sr = regs->sr;
+	sc->sc_pc = regs->pc;
+	sc->sc_formatvec = regs->format << 12 | regs->vector;
+	save_a5_state(sc, regs);
+	save_fpu_state(sc, regs);
+}
+
+static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
+{
+	struct switch_stack *sw = (struct switch_stack *)regs - 1;
+	greg_t __user *gregs = uc->uc_mcontext.gregs;
+	int err = 0;
+
+	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
+	err |= __put_user(regs->d0, &gregs[0]);
+	err |= __put_user(regs->d1, &gregs[1]);
+	err |= __put_user(regs->d2, &gregs[2]);
+	err |= __put_user(regs->d3, &gregs[3]);
+	err |= __put_user(regs->d4, &gregs[4]);
+	err |= __put_user(regs->d5, &gregs[5]);
+	err |= __put_user(sw->d6, &gregs[6]);
+	err |= __put_user(sw->d7, &gregs[7]);
+	err |= __put_user(regs->a0, &gregs[8]);
+	err |= __put_user(regs->a1, &gregs[9]);
+	err |= __put_user(regs->a2, &gregs[10]);
+	err |= __put_user(sw->a3, &gregs[11]);
+	err |= __put_user(sw->a4, &gregs[12]);
+	err |= __put_user(sw->a5, &gregs[13]);
+	err |= __put_user(sw->a6, &gregs[14]);
+	err |= __put_user(rdusp(), &gregs[15]);
+	err |= __put_user(regs->pc, &gregs[16]);
+	err |= __put_user(regs->sr, &gregs[17]);
+	err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
+	err |= rt_save_fpu_state(uc, regs);
+	return err;
+}
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+	unsigned long usp;
+
+	/* Default to using normal stack.  */
+	usp = rdusp();
+
+	/* This is the X/Open sanctioned signal stack switching.  */
+	if (ka->sa.sa_flags & SA_ONSTACK) {
+		if (!sas_ss_flags(usp))
+			usp = current->sas_ss_sp + current->sas_ss_size;
+	}
+	return (void __user *)((usp - frame_size) & -8UL);
+}
+
+static int setup_frame (int sig, struct k_sigaction *ka,
+			 sigset_t *set, struct pt_regs *regs)
+{
+	struct sigframe __user *frame;
+	int fsize = frame_extra_sizes(regs->format);
+	struct sigcontext context;
+	int err = 0;
+
+	if (fsize < 0) {
+#ifdef DEBUG
+		printk ("setup_frame: Unknown frame format %#x\n",
+			regs->format);
+#endif
+		goto give_sigsegv;
+	}
+
+	frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
+
+	if (fsize)
+		err |= copy_to_user (frame + 1, regs + 1, fsize);
+
+	err |= __put_user((current_thread_info()->exec_domain
+			   && current_thread_info()->exec_domain->signal_invmap
+			   && sig < 32
+			   ? current_thread_info()->exec_domain->signal_invmap[sig]
+			   : sig),
+			  &frame->sig);
+
+	err |= __put_user(regs->vector, &frame->code);
+	err |= __put_user(&frame->sc, &frame->psc);
+
+	if (_NSIG_WORDS > 1)
+		err |= copy_to_user(frame->extramask, &set->sig[1],
+				    sizeof(frame->extramask));
+
+	setup_sigcontext(&context, regs, set->sig[0]);
+	err |= copy_to_user (&frame->sc, &context, sizeof(context));
+
+	/* Set up to return from userspace.  */
+#ifdef CONFIG_MMU
+	err |= __put_user(frame->retcode, &frame->pretcode);
+	/* moveq #,d0; trap #0 */
+	err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
+			  (long __user *)(frame->retcode));
+#else
+	err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
+#endif
+
+	if (err)
+		goto give_sigsegv;
+
+	push_cache ((unsigned long) &frame->retcode);
+
+	/*
+	 * Set up registers for signal handler.  All the state we are about
+	 * to destroy is successfully copied to sigframe.
+	 */
+	wrusp ((unsigned long) frame);
+	regs->pc = (unsigned long) ka->sa.sa_handler;
+	adjustformat(regs);
+
+	/*
+	 * This is subtle; if we build more than one sigframe, all but the
+	 * first one will see frame format 0 and have fsize == 0, so we won't
+	 * screw stkadj.
+	 */
+	if (fsize)
+		regs->stkadj = fsize;
+
+	/* Prepare to skip over the extra stuff in the exception frame.  */
+	if (regs->stkadj) {
+		struct pt_regs *tregs =
+			(struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+		printk("Performing stackadjust=%04x\n", regs->stkadj);
+#endif
+		/* This must be copied with decreasing addresses to
+                   handle overlaps.  */
+		tregs->vector = 0;
+		tregs->format = 0;
+		tregs->pc = regs->pc;
+		tregs->sr = regs->sr;
+	}
+	return 0;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+	return err;
+}
+
+static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
+			    sigset_t *set, struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+	int fsize = frame_extra_sizes(regs->format);
+	int err = 0;
+
+	if (fsize < 0) {
+#ifdef DEBUG
+		printk ("setup_frame: Unknown frame format %#x\n",
+			regs->format);
+#endif
+		goto give_sigsegv;
+	}
+
+	frame = get_sigframe(ka, regs, sizeof(*frame));
+
+	if (fsize)
+		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
+
+	err |= __put_user((current_thread_info()->exec_domain
+			   && current_thread_info()->exec_domain->signal_invmap
+			   && sig < 32
+			   ? current_thread_info()->exec_domain->signal_invmap[sig]
+			   : sig),
+			  &frame->sig);
+	err |= __put_user(&frame->info, &frame->pinfo);
+	err |= __put_user(&frame->uc, &frame->puc);
+	err |= copy_siginfo_to_user(&frame->info, info);
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(NULL, &frame->uc.uc_link);
+	err |= __put_user((void __user *)current->sas_ss_sp,
+			  &frame->uc.uc_stack.ss_sp);
+	err |= __put_user(sas_ss_flags(rdusp()),
+			  &frame->uc.uc_stack.ss_flags);
+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+	err |= rt_setup_ucontext(&frame->uc, regs);
+	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
+
+	/* Set up to return from userspace.  */
+#ifdef CONFIG_MMU
+	err |= __put_user(frame->retcode, &frame->pretcode);
+#ifdef __mcoldfire__
+	/* movel #__NR_rt_sigreturn,d0; trap #0 */
+	err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
+	err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
+			  (long __user *)(frame->retcode + 4));
+#else
+	/* moveq #,d0; notb d0; trap #0 */
+	err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
+			  (long __user *)(frame->retcode + 0));
+	err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
+#endif
+#else
+	err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
+#endif /* CONFIG_MMU */
+
+	if (err)
+		goto give_sigsegv;
+
+	push_cache ((unsigned long) &frame->retcode);
+
+	/*
+	 * Set up registers for signal handler.  All the state we are about
+	 * to destroy is successfully copied to sigframe.
+	 */
+	wrusp ((unsigned long) frame);
+	regs->pc = (unsigned long) ka->sa.sa_handler;
+	adjustformat(regs);
+
+	/*
+	 * This is subtle; if we build more than one sigframe, all but the
+	 * first one will see frame format 0 and have fsize == 0, so we won't
+	 * screw stkadj.
+	 */
+	if (fsize)
+		regs->stkadj = fsize;
+
+	/* Prepare to skip over the extra stuff in the exception frame.  */
+	if (regs->stkadj) {
+		struct pt_regs *tregs =
+			(struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+		printk("Performing stackadjust=%04x\n", regs->stkadj);
+#endif
+		/* This must be copied with decreasing addresses to
+                   handle overlaps.  */
+		tregs->vector = 0;
+		tregs->format = 0;
+		tregs->pc = regs->pc;
+		tregs->sr = regs->sr;
+	}
+	return 0;
+
+give_sigsegv:
+	force_sigsegv(sig, current);
+	return err;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+	switch (regs->d0) {
+	case -ERESTARTNOHAND:
+		if (!has_handler)
+			goto do_restart;
+		regs->d0 = -EINTR;
+		break;
+
+	case -ERESTART_RESTARTBLOCK:
+		if (!has_handler) {
+			regs->d0 = __NR_restart_syscall;
+			regs->pc -= 2;
+			break;
+		}
+		regs->d0 = -EINTR;
+		break;
+
+	case -ERESTARTSYS:
+		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+			regs->d0 = -EINTR;
+			break;
+		}
+	/* fallthrough */
+	case -ERESTARTNOINTR:
+	do_restart:
+		regs->d0 = regs->orig_d0;
+		regs->pc -= 2;
+		break;
+	}
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
+	      sigset_t *oldset, struct pt_regs *regs)
+{
+	int err;
+	/* are we from a system call? */
+	if (regs->orig_d0 >= 0)
+		/* If so, check system call restarting.. */
+		handle_restart(regs, ka, 1);
+
+	/* set up the stack frame */
+	if (ka->sa.sa_flags & SA_SIGINFO)
+		err = setup_rt_frame(sig, ka, info, oldset, regs);
+	else
+		err = setup_frame(sig, ka, oldset, regs);
+
+	if (err)
+		return;
+
+	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+	if (!(ka->sa.sa_flags & SA_NODEFER))
+		sigaddset(&current->blocked,sig);
+	recalc_sigpending();
+
+	if (test_thread_flag(TIF_DELAYED_TRACE)) {
+		regs->sr &= ~0x8000;
+		send_sig(SIGTRAP, current, 1);
+	}
+
+	clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+asmlinkage void do_signal(struct pt_regs *regs)
+{
+	siginfo_t info;
+	struct k_sigaction ka;
+	int signr;
+	sigset_t *oldset;
+
+	current->thread.esp0 = (unsigned long) regs;
+
+	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+		oldset = &current->saved_sigmask;
+	else
+		oldset = &current->blocked;
+
+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+	if (signr > 0) {
+		/* Whee!  Actually deliver the signal.  */
+		handle_signal(signr, &ka, &info, oldset, regs);
+		return;
+	}
+
+	/* Did we come from a system call? */
+	if (regs->orig_d0 >= 0)
+		/* Restart the system call - no handlers present */
+		handle_restart(regs, NULL, 0);
+
+	/* If there's no signal to deliver, we just restore the saved mask.  */
+	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+		clear_thread_flag(TIF_RESTORE_SIGMASK);
+		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+	}
+}
diff --git a/arch/m68k/kernel/signal_mm.c b/arch/m68k/kernel/signal_mm.c
deleted file mode 100644
index cb856f9..0000000
--- a/arch/m68k/kernel/signal_mm.c
+++ /dev/null
@@ -1,1115 +0,0 @@
-/*
- *  linux/arch/m68k/kernel/signal.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-/*
- * Linux/m68k support by Hamish Macdonald
- *
- * 68060 fixes by Jesper Skov
- *
- * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
- *
- * mathemu support by Roman Zippel
- *  (Note: fpstate in the signal context is completely ignored for the emulator
- *         and the internal floating point format is put on stack)
- */
-
-/*
- * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
- * Atari :-) Current limitation: Only one sigstack can be active at one time.
- * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
- * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
- * signal handlers!
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/highuid.h>
-#include <linux/personality.h>
-#include <linux/tty.h>
-#include <linux/binfmts.h>
-#include <linux/module.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/traps.h>
-#include <asm/ucontext.h>
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-static const int frame_extra_sizes[16] = {
-  [1]	= -1, /* sizeof(((struct frame *)0)->un.fmt1), */
-  [2]	= sizeof(((struct frame *)0)->un.fmt2),
-  [3]	= sizeof(((struct frame *)0)->un.fmt3),
-#ifdef CONFIG_COLDFIRE
-  [4]	= 0,
-#else
-  [4]	= sizeof(((struct frame *)0)->un.fmt4),
-#endif
-  [5]	= -1, /* sizeof(((struct frame *)0)->un.fmt5), */
-  [6]	= -1, /* sizeof(((struct frame *)0)->un.fmt6), */
-  [7]	= sizeof(((struct frame *)0)->un.fmt7),
-  [8]	= -1, /* sizeof(((struct frame *)0)->un.fmt8), */
-  [9]	= sizeof(((struct frame *)0)->un.fmt9),
-  [10]	= sizeof(((struct frame *)0)->un.fmta),
-  [11]	= sizeof(((struct frame *)0)->un.fmtb),
-  [12]	= -1, /* sizeof(((struct frame *)0)->un.fmtc), */
-  [13]	= -1, /* sizeof(((struct frame *)0)->un.fmtd), */
-  [14]	= -1, /* sizeof(((struct frame *)0)->un.fmte), */
-  [15]	= -1, /* sizeof(((struct frame *)0)->un.fmtf), */
-};
-
-int handle_kernel_fault(struct pt_regs *regs)
-{
-	const struct exception_table_entry *fixup;
-	struct pt_regs *tregs;
-
-	/* Are we prepared to handle this kernel fault? */
-	fixup = search_exception_tables(regs->pc);
-	if (!fixup)
-		return 0;
-
-	/* Create a new four word stack frame, discarding the old one. */
-	regs->stkadj = frame_extra_sizes[regs->format];
-	tregs =	(struct pt_regs *)((long)regs + regs->stkadj);
-	tregs->vector = regs->vector;
-#ifdef CONFIG_COLDFIRE
-	tregs->format = 4;
-#else
-	tregs->format = 0;
-#endif
-	tregs->pc = fixup->fixup;
-	tregs->sr = regs->sr;
-
-	return 1;
-}
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-asmlinkage int
-sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
-{
-	mask &= _BLOCKABLE;
-	spin_lock_irq(&current->sighand->siglock);
-	current->saved_sigmask = current->blocked;
-	siginitset(&current->blocked, mask);
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
-	current->state = TASK_INTERRUPTIBLE;
-	schedule();
-	set_restore_sigmask();
-
-	return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction __user *act,
-	      struct old_sigaction __user *oact)
-{
-	struct k_sigaction new_ka, old_ka;
-	int ret;
-
-	if (act) {
-		old_sigset_t mask;
-		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
-		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
-		    __get_user(mask, &act->sa_mask))
-			return -EFAULT;
-		siginitset(&new_ka.sa.sa_mask, mask);
-	}
-
-	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
-	if (!ret && oact) {
-		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
-		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
-		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
-			return -EFAULT;
-	}
-
-	return ret;
-}
-
-asmlinkage int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
-{
-	return do_sigaltstack(uss, uoss, rdusp());
-}
-
-
-/*
- * Do a signal return; undo the signal stack.
- *
- * Keep the return code on the stack quadword aligned!
- * That makes the cache flush below easier.
- */
-
-struct sigframe
-{
-	char __user *pretcode;
-	int sig;
-	int code;
-	struct sigcontext __user *psc;
-	char retcode[8];
-	unsigned long extramask[_NSIG_WORDS-1];
-	struct sigcontext sc;
-};
-
-struct rt_sigframe
-{
-	char __user *pretcode;
-	int sig;
-	struct siginfo __user *pinfo;
-	void __user *puc;
-	char retcode[8];
-	struct siginfo info;
-	struct ucontext uc;
-};
-
-
-static unsigned char fpu_version;	/* version number of fpu, set by setup_frame */
-
-static inline int restore_fpu_state(struct sigcontext *sc)
-{
-	int err = 1;
-
-	if (FPU_IS_EMU) {
-	    /* restore registers */
-	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
-	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
-	    return 0;
-	}
-
-	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
-	    /* Verify the frame format.  */
-	    if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
-		 (sc->sc_fpstate[0] != fpu_version))
-		goto out;
-	    if (CPU_IS_020_OR_030) {
-		if (m68k_fputype & FPU_68881 &&
-		    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
-		    goto out;
-		if (m68k_fputype & FPU_68882 &&
-		    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
-		    goto out;
-	    } else if (CPU_IS_040) {
-		if (!(sc->sc_fpstate[1] == 0x00 ||
-                      sc->sc_fpstate[1] == 0x28 ||
-                      sc->sc_fpstate[1] == 0x60))
-		    goto out;
-	    } else if (CPU_IS_060) {
-		if (!(sc->sc_fpstate[3] == 0x00 ||
-                      sc->sc_fpstate[3] == 0x60 ||
-		      sc->sc_fpstate[3] == 0xe0))
-		    goto out;
-	    } else if (CPU_IS_COLDFIRE) {
-		if (!(sc->sc_fpstate[0] == 0x00 ||
-		      sc->sc_fpstate[0] == 0x05 ||
-		      sc->sc_fpstate[0] == 0xe5))
-		    goto out;
-	    } else
-		goto out;
-
-	    if (CPU_IS_COLDFIRE) {
-		__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
-				  "fmovel %1,%%fpcr\n\t"
-				  "fmovel %2,%%fpsr\n\t"
-				  "fmovel %3,%%fpiar"
-				  : /* no outputs */
-				  : "m" (sc->sc_fpregs[0]),
-				    "m" (sc->sc_fpcntl[0]),
-				    "m" (sc->sc_fpcntl[1]),
-				    "m" (sc->sc_fpcntl[2]));
-	    } else {
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %0,%%fp0-%%fp1\n\t"
-				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
-				  ".chip 68k"
-				  : /* no outputs */
-				  : "m" (*sc->sc_fpregs),
-				    "m" (*sc->sc_fpcntl));
-	    }
-	}
-
-	if (CPU_IS_COLDFIRE) {
-		__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
-	} else {
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "frestore %0\n\t"
-				  ".chip 68k"
-				  : : "m" (*sc->sc_fpstate));
-	}
-	err = 0;
-
-out:
-	return err;
-}
-
-#define FPCONTEXT_SIZE	216
-#define uc_fpstate	uc_filler[0]
-#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
-#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
-
-static inline int rt_restore_fpu_state(struct ucontext __user *uc)
-{
-	unsigned char fpstate[FPCONTEXT_SIZE];
-	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
-	fpregset_t fpregs;
-	int err = 1;
-
-	if (FPU_IS_EMU) {
-		/* restore fpu control register */
-		if (__copy_from_user(current->thread.fpcntl,
-				uc->uc_mcontext.fpregs.f_fpcntl, 12))
-			goto out;
-		/* restore all other fpu register */
-		if (__copy_from_user(current->thread.fp,
-				uc->uc_mcontext.fpregs.f_fpregs, 96))
-			goto out;
-		return 0;
-	}
-
-	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
-		goto out;
-	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
-		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
-			context_size = fpstate[1];
-		/* Verify the frame format.  */
-		if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
-		     (fpstate[0] != fpu_version))
-			goto out;
-		if (CPU_IS_020_OR_030) {
-			if (m68k_fputype & FPU_68881 &&
-			    !(context_size == 0x18 || context_size == 0xb4))
-				goto out;
-			if (m68k_fputype & FPU_68882 &&
-			    !(context_size == 0x38 || context_size == 0xd4))
-				goto out;
-		} else if (CPU_IS_040) {
-			if (!(context_size == 0x00 ||
-			      context_size == 0x28 ||
-			      context_size == 0x60))
-				goto out;
-		} else if (CPU_IS_060) {
-			if (!(fpstate[3] == 0x00 ||
-			      fpstate[3] == 0x60 ||
-			      fpstate[3] == 0xe0))
-				goto out;
-		} else if (CPU_IS_COLDFIRE) {
-			if (!(fpstate[3] == 0x00 ||
-			      fpstate[3] == 0x05 ||
-			      fpstate[3] == 0xe5))
-				goto out;
-		} else
-			goto out;
-		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
-				     sizeof(fpregs)))
-			goto out;
-
-		if (CPU_IS_COLDFIRE) {
-			__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
-					  "fmovel %1,%%fpcr\n\t"
-					  "fmovel %2,%%fpsr\n\t"
-					  "fmovel %3,%%fpiar"
-					  : /* no outputs */
-					  : "m" (fpregs.f_fpregs[0]),
-					    "m" (fpregs.f_fpcntl[0]),
-					    "m" (fpregs.f_fpcntl[1]),
-					    "m" (fpregs.f_fpcntl[2]));
-		} else {
-			__asm__ volatile (".chip 68k/68881\n\t"
-					  "fmovemx %0,%%fp0-%%fp7\n\t"
-					  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
-					  ".chip 68k"
-					  : /* no outputs */
-					  : "m" (*fpregs.f_fpregs),
-					    "m" (*fpregs.f_fpcntl));
-		}
-	}
-	if (context_size &&
-	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
-			     context_size))
-		goto out;
-
-	if (CPU_IS_COLDFIRE) {
-		__asm__ volatile ("frestore %0" : : "m" (*fpstate));
-	} else {
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "frestore %0\n\t"
-				  ".chip 68k"
-				  : : "m" (*fpstate));
-	}
-	err = 0;
-
-out:
-	return err;
-}
-
-static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
-			       void __user *fp)
-{
-	int fsize = frame_extra_sizes[formatvec >> 12];
-	if (fsize < 0) {
-		/*
-		 * user process trying to return with weird frame format
-		 */
-#ifdef DEBUG
-		printk("user process returning with weird frame format\n");
-#endif
-		return 1;
-	}
-	if (!fsize) {
-		regs->format = formatvec >> 12;
-		regs->vector = formatvec & 0xfff;
-	} else {
-		struct switch_stack *sw = (struct switch_stack *)regs - 1;
-		unsigned long buf[fsize / 2]; /* yes, twice as much */
-
-		/* that'll make sure that expansion won't crap over data */
-		if (copy_from_user(buf + fsize / 4, fp, fsize))
-			return 1;
-
-		/* point of no return */
-		regs->format = formatvec >> 12;
-		regs->vector = formatvec & 0xfff;
-#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
-		__asm__ __volatile__ (
-#ifdef CONFIG_COLDFIRE
-			 "   movel %0,%/sp\n\t"
-			 "   bra ret_from_signal\n"
-#else
-			 "   movel %0,%/a0\n\t"
-			 "   subl %1,%/a0\n\t"     /* make room on stack */
-			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
-			 /* move switch_stack and pt_regs */
-			 "1: movel %0@+,%/a0@+\n\t"
-			 "   dbra %2,1b\n\t"
-			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
-			 "   lsrl  #2,%1\n\t"
-			 "   subql #1,%1\n\t"
-			 /* copy to the gap we'd made */
-			 "2: movel %4@+,%/a0@+\n\t"
-			 "   dbra %1,2b\n\t"
-			 "   bral ret_from_signal\n"
-#endif
-			 : /* no outputs, it doesn't ever return */
-			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
-			   "n" (frame_offset), "a" (buf + fsize/4)
-			 : "a0");
-#undef frame_offset
-	}
-	return 0;
-}
-
-static inline int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
-{
-	int formatvec;
-	struct sigcontext context;
-	int err;
-
-	/* Always make any pending restarted system calls return -EINTR */
-	current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-	/* get previous context */
-	if (copy_from_user(&context, usc, sizeof(context)))
-		goto badframe;
-
-	/* restore passed registers */
-	regs->d0 = context.sc_d0;
-	regs->d1 = context.sc_d1;
-	regs->a0 = context.sc_a0;
-	regs->a1 = context.sc_a1;
-	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
-	regs->pc = context.sc_pc;
-	regs->orig_d0 = -1;		/* disable syscall checks */
-	wrusp(context.sc_usp);
-	formatvec = context.sc_formatvec;
-
-	err = restore_fpu_state(&context);
-
-	if (err || mangle_kernel_stack(regs, formatvec, fp))
-		goto badframe;
-
-	return 0;
-
-badframe:
-	return 1;
-}
-
-static inline int
-rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
-		    struct ucontext __user *uc)
-{
-	int temp;
-	greg_t __user *gregs = uc->uc_mcontext.gregs;
-	unsigned long usp;
-	int err;
-
-	/* Always make any pending restarted system calls return -EINTR */
-	current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-	err = __get_user(temp, &uc->uc_mcontext.version);
-	if (temp != MCONTEXT_VERSION)
-		goto badframe;
-	/* restore passed registers */
-	err |= __get_user(regs->d0, &gregs[0]);
-	err |= __get_user(regs->d1, &gregs[1]);
-	err |= __get_user(regs->d2, &gregs[2]);
-	err |= __get_user(regs->d3, &gregs[3]);
-	err |= __get_user(regs->d4, &gregs[4]);
-	err |= __get_user(regs->d5, &gregs[5]);
-	err |= __get_user(sw->d6, &gregs[6]);
-	err |= __get_user(sw->d7, &gregs[7]);
-	err |= __get_user(regs->a0, &gregs[8]);
-	err |= __get_user(regs->a1, &gregs[9]);
-	err |= __get_user(regs->a2, &gregs[10]);
-	err |= __get_user(sw->a3, &gregs[11]);
-	err |= __get_user(sw->a4, &gregs[12]);
-	err |= __get_user(sw->a5, &gregs[13]);
-	err |= __get_user(sw->a6, &gregs[14]);
-	err |= __get_user(usp, &gregs[15]);
-	wrusp(usp);
-	err |= __get_user(regs->pc, &gregs[16]);
-	err |= __get_user(temp, &gregs[17]);
-	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
-	regs->orig_d0 = -1;		/* disable syscall checks */
-	err |= __get_user(temp, &uc->uc_formatvec);
-
-	err |= rt_restore_fpu_state(uc);
-
-	if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
-		goto badframe;
-
-	if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
-		goto badframe;
-
-	return 0;
-
-badframe:
-	return 1;
-}
-
-asmlinkage int do_sigreturn(unsigned long __unused)
-{
-	struct switch_stack *sw = (struct switch_stack *) &__unused;
-	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
-	unsigned long usp = rdusp();
-	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
-	sigset_t set;
-
-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-		goto badframe;
-	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
-	    (_NSIG_WORDS > 1 &&
-	     __copy_from_user(&set.sig[1], &frame->extramask,
-			      sizeof(frame->extramask))))
-		goto badframe;
-
-	sigdelsetmask(&set, ~_BLOCKABLE);
-	current->blocked = set;
-	recalc_sigpending();
-
-	if (restore_sigcontext(regs, &frame->sc, frame + 1))
-		goto badframe;
-	return regs->d0;
-
-badframe:
-	force_sig(SIGSEGV, current);
-	return 0;
-}
-
-asmlinkage int do_rt_sigreturn(unsigned long __unused)
-{
-	struct switch_stack *sw = (struct switch_stack *) &__unused;
-	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
-	unsigned long usp = rdusp();
-	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
-	sigset_t set;
-
-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-		goto badframe;
-	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-		goto badframe;
-
-	sigdelsetmask(&set, ~_BLOCKABLE);
-	current->blocked = set;
-	recalc_sigpending();
-
-	if (rt_restore_ucontext(regs, sw, &frame->uc))
-		goto badframe;
-	return regs->d0;
-
-badframe:
-	force_sig(SIGSEGV, current);
-	return 0;
-}
-
-/*
- * Set up a signal frame.
- */
-
-static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
-{
-	if (FPU_IS_EMU) {
-		/* save registers */
-		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
-		memcpy(sc->sc_fpregs, current->thread.fp, 24);
-		return;
-	}
-
-	if (CPU_IS_COLDFIRE) {
-		__asm__ volatile ("fsave %0"
-				  : : "m" (*sc->sc_fpstate) : "memory");
-	} else {
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fsave %0\n\t"
-				  ".chip 68k"
-				  : : "m" (*sc->sc_fpstate) : "memory");
-	}
-
-	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
-		fpu_version = sc->sc_fpstate[0];
-		if (CPU_IS_020_OR_030 &&
-		    regs->vector >= (VEC_FPBRUC * 4) &&
-		    regs->vector <= (VEC_FPNAN * 4)) {
-			/* Clear pending exception in 68882 idle frame */
-			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
-				sc->sc_fpstate[0x38] |= 1 << 3;
-		}
-
-		if (CPU_IS_COLDFIRE) {
-			__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
-					  "fmovel %%fpcr,%1\n\t"
-					  "fmovel %%fpsr,%2\n\t"
-					  "fmovel %%fpiar,%3"
-					  : "=m" (sc->sc_fpregs[0]),
-					    "=m" (sc->sc_fpcntl[0]),
-					    "=m" (sc->sc_fpcntl[1]),
-					    "=m" (sc->sc_fpcntl[2])
-					  : /* no inputs */
-					  : "memory");
-		} else {
-			__asm__ volatile (".chip 68k/68881\n\t"
-					  "fmovemx %%fp0-%%fp1,%0\n\t"
-					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
-					  ".chip 68k"
-					  : "=m" (*sc->sc_fpregs),
-					    "=m" (*sc->sc_fpcntl)
-					  : /* no inputs */
-					  : "memory");
-		}
-	}
-}
-
-static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
-{
-	unsigned char fpstate[FPCONTEXT_SIZE];
-	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
-	int err = 0;
-
-	if (FPU_IS_EMU) {
-		/* save fpu control register */
-		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
-				current->thread.fpcntl, 12);
-		/* save all other fpu register */
-		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
-				current->thread.fp, 96);
-		return err;
-	}
-
-	if (CPU_IS_COLDFIRE) {
-		__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
-	} else {
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fsave %0\n\t"
-				  ".chip 68k"
-				  : : "m" (*fpstate) : "memory");
-	}
-
-	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
-	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
-		fpregset_t fpregs;
-		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
-			context_size = fpstate[1];
-		fpu_version = fpstate[0];
-		if (CPU_IS_020_OR_030 &&
-		    regs->vector >= (VEC_FPBRUC * 4) &&
-		    regs->vector <= (VEC_FPNAN * 4)) {
-			/* Clear pending exception in 68882 idle frame */
-			if (*(unsigned short *) fpstate == 0x1f38)
-				fpstate[0x38] |= 1 << 3;
-		}
-		if (CPU_IS_COLDFIRE) {
-			__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
-					  "fmovel %%fpcr,%1\n\t"
-					  "fmovel %%fpsr,%2\n\t"
-					  "fmovel %%fpiar,%3"
-					  : "=m" (fpregs.f_fpregs[0]),
-					    "=m" (fpregs.f_fpcntl[0]),
-					    "=m" (fpregs.f_fpcntl[1]),
-					    "=m" (fpregs.f_fpcntl[2])
-					  : /* no inputs */
-					  : "memory");
-		} else {
-			__asm__ volatile (".chip 68k/68881\n\t"
-					  "fmovemx %%fp0-%%fp7,%0\n\t"
-					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
-					  ".chip 68k"
-					  : "=m" (*fpregs.f_fpregs),
-					    "=m" (*fpregs.f_fpcntl)
-					  : /* no inputs */
-					  : "memory");
-		}
-		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
-				    sizeof(fpregs));
-	}
-	if (context_size)
-		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
-				    context_size);
-	return err;
-}
-
-static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
-			     unsigned long mask)
-{
-	sc->sc_mask = mask;
-	sc->sc_usp = rdusp();
-	sc->sc_d0 = regs->d0;
-	sc->sc_d1 = regs->d1;
-	sc->sc_a0 = regs->a0;
-	sc->sc_a1 = regs->a1;
-	sc->sc_sr = regs->sr;
-	sc->sc_pc = regs->pc;
-	sc->sc_formatvec = regs->format << 12 | regs->vector;
-	save_fpu_state(sc, regs);
-}
-
-static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
-{
-	struct switch_stack *sw = (struct switch_stack *)regs - 1;
-	greg_t __user *gregs = uc->uc_mcontext.gregs;
-	int err = 0;
-
-	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
-	err |= __put_user(regs->d0, &gregs[0]);
-	err |= __put_user(regs->d1, &gregs[1]);
-	err |= __put_user(regs->d2, &gregs[2]);
-	err |= __put_user(regs->d3, &gregs[3]);
-	err |= __put_user(regs->d4, &gregs[4]);
-	err |= __put_user(regs->d5, &gregs[5]);
-	err |= __put_user(sw->d6, &gregs[6]);
-	err |= __put_user(sw->d7, &gregs[7]);
-	err |= __put_user(regs->a0, &gregs[8]);
-	err |= __put_user(regs->a1, &gregs[9]);
-	err |= __put_user(regs->a2, &gregs[10]);
-	err |= __put_user(sw->a3, &gregs[11]);
-	err |= __put_user(sw->a4, &gregs[12]);
-	err |= __put_user(sw->a5, &gregs[13]);
-	err |= __put_user(sw->a6, &gregs[14]);
-	err |= __put_user(rdusp(), &gregs[15]);
-	err |= __put_user(regs->pc, &gregs[16]);
-	err |= __put_user(regs->sr, &gregs[17]);
-	err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
-	err |= rt_save_fpu_state(uc, regs);
-	return err;
-}
-
-static inline void push_cache (unsigned long vaddr)
-{
-	/*
-	 * Using the old cache_push_v() was really a big waste.
-	 *
-	 * What we are trying to do is to flush 8 bytes to ram.
-	 * Flushing 2 cache lines of 16 bytes is much cheaper than
-	 * flushing 1 or 2 pages, as previously done in
-	 * cache_push_v().
-	 *                                                     Jes
-	 */
-	if (CPU_IS_040) {
-		unsigned long temp;
-
-		__asm__ __volatile__ (".chip 68040\n\t"
-				      "nop\n\t"
-				      "ptestr (%1)\n\t"
-				      "movec %%mmusr,%0\n\t"
-				      ".chip 68k"
-				      : "=r" (temp)
-				      : "a" (vaddr));
-
-		temp &= PAGE_MASK;
-		temp |= vaddr & ~PAGE_MASK;
-
-		__asm__ __volatile__ (".chip 68040\n\t"
-				      "nop\n\t"
-				      "cpushl %%bc,(%0)\n\t"
-				      ".chip 68k"
-				      : : "a" (temp));
-	}
-	else if (CPU_IS_060) {
-		unsigned long temp;
-		__asm__ __volatile__ (".chip 68060\n\t"
-				      "plpar (%0)\n\t"
-				      ".chip 68k"
-				      : "=a" (temp)
-				      : "0" (vaddr));
-		__asm__ __volatile__ (".chip 68060\n\t"
-				      "cpushl %%bc,(%0)\n\t"
-				      ".chip 68k"
-				      : : "a" (temp));
-	} else if (!CPU_IS_COLDFIRE) {
-		/*
-		 * 68030/68020 have no writeback cache;
-		 * still need to clear icache.
-		 * Note that vaddr is guaranteed to be long word aligned.
-		 */
-		unsigned long temp;
-		asm volatile ("movec %%cacr,%0" : "=r" (temp));
-		temp += 4;
-		asm volatile ("movec %0,%%caar\n\t"
-			      "movec %1,%%cacr"
-			      : : "r" (vaddr), "r" (temp));
-		asm volatile ("movec %0,%%caar\n\t"
-			      "movec %1,%%cacr"
-			      : : "r" (vaddr + 4), "r" (temp));
-	}
-}
-
-static inline void __user *
-get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
-{
-	unsigned long usp;
-
-	/* Default to using normal stack.  */
-	usp = rdusp();
-
-	/* This is the X/Open sanctioned signal stack switching.  */
-	if (ka->sa.sa_flags & SA_ONSTACK) {
-		if (!sas_ss_flags(usp))
-			usp = current->sas_ss_sp + current->sas_ss_size;
-	}
-	return (void __user *)((usp - frame_size) & -8UL);
-}
-
-static int setup_frame (int sig, struct k_sigaction *ka,
-			 sigset_t *set, struct pt_regs *regs)
-{
-	struct sigframe __user *frame;
-	int fsize = frame_extra_sizes[regs->format];
-	struct sigcontext context;
-	int err = 0;
-
-	if (fsize < 0) {
-#ifdef DEBUG
-		printk ("setup_frame: Unknown frame format %#x\n",
-			regs->format);
-#endif
-		goto give_sigsegv;
-	}
-
-	frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
-
-	if (fsize)
-		err |= copy_to_user (frame + 1, regs + 1, fsize);
-
-	err |= __put_user((current_thread_info()->exec_domain
-			   && current_thread_info()->exec_domain->signal_invmap
-			   && sig < 32
-			   ? current_thread_info()->exec_domain->signal_invmap[sig]
-			   : sig),
-			  &frame->sig);
-
-	err |= __put_user(regs->vector, &frame->code);
-	err |= __put_user(&frame->sc, &frame->psc);
-
-	if (_NSIG_WORDS > 1)
-		err |= copy_to_user(frame->extramask, &set->sig[1],
-				    sizeof(frame->extramask));
-
-	setup_sigcontext(&context, regs, set->sig[0]);
-	err |= copy_to_user (&frame->sc, &context, sizeof(context));
-
-	/* Set up to return from userspace.  */
-	err |= __put_user(frame->retcode, &frame->pretcode);
-	/* moveq #,d0; trap #0 */
-	err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
-			  (long __user *)(frame->retcode));
-
-	if (err)
-		goto give_sigsegv;
-
-	push_cache ((unsigned long) &frame->retcode);
-
-	/*
-	 * Set up registers for signal handler.  All the state we are about
-	 * to destroy is successfully copied to sigframe.
-	 */
-	wrusp ((unsigned long) frame);
-	regs->pc = (unsigned long) ka->sa.sa_handler;
-
-	/*
-	 * This is subtle; if we build more than one sigframe, all but the
-	 * first one will see frame format 0 and have fsize == 0, so we won't
-	 * screw stkadj.
-	 */
-	if (fsize)
-		regs->stkadj = fsize;
-
-	/* Prepare to skip over the extra stuff in the exception frame.  */
-	if (regs->stkadj) {
-		struct pt_regs *tregs =
-			(struct pt_regs *)((ulong)regs + regs->stkadj);
-#ifdef DEBUG
-		printk("Performing stackadjust=%04x\n", regs->stkadj);
-#endif
-		/* This must be copied with decreasing addresses to
-                   handle overlaps.  */
-		tregs->vector = 0;
-		tregs->format = 0;
-		tregs->pc = regs->pc;
-		tregs->sr = regs->sr;
-	}
-	return 0;
-
-give_sigsegv:
-	force_sigsegv(sig, current);
-	return err;
-}
-
-static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
-			    sigset_t *set, struct pt_regs *regs)
-{
-	struct rt_sigframe __user *frame;
-	int fsize = frame_extra_sizes[regs->format];
-	int err = 0;
-
-	if (fsize < 0) {
-#ifdef DEBUG
-		printk ("setup_frame: Unknown frame format %#x\n",
-			regs->format);
-#endif
-		goto give_sigsegv;
-	}
-
-	frame = get_sigframe(ka, regs, sizeof(*frame));
-
-	if (fsize)
-		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
-
-	err |= __put_user((current_thread_info()->exec_domain
-			   && current_thread_info()->exec_domain->signal_invmap
-			   && sig < 32
-			   ? current_thread_info()->exec_domain->signal_invmap[sig]
-			   : sig),
-			  &frame->sig);
-	err |= __put_user(&frame->info, &frame->pinfo);
-	err |= __put_user(&frame->uc, &frame->puc);
-	err |= copy_siginfo_to_user(&frame->info, info);
-
-	/* Create the ucontext.  */
-	err |= __put_user(0, &frame->uc.uc_flags);
-	err |= __put_user(NULL, &frame->uc.uc_link);
-	err |= __put_user((void __user *)current->sas_ss_sp,
-			  &frame->uc.uc_stack.ss_sp);
-	err |= __put_user(sas_ss_flags(rdusp()),
-			  &frame->uc.uc_stack.ss_flags);
-	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-	err |= rt_setup_ucontext(&frame->uc, regs);
-	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
-
-	/* Set up to return from userspace.  */
-	err |= __put_user(frame->retcode, &frame->pretcode);
-#ifdef __mcoldfire__
-	/* movel #__NR_rt_sigreturn,d0; trap #0 */
-	err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
-	err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
-			  (long __user *)(frame->retcode + 4));
-#else
-	/* moveq #,d0; notb d0; trap #0 */
-	err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
-			  (long __user *)(frame->retcode + 0));
-	err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
-#endif
-
-	if (err)
-		goto give_sigsegv;
-
-	push_cache ((unsigned long) &frame->retcode);
-
-	/*
-	 * Set up registers for signal handler.  All the state we are about
-	 * to destroy is successfully copied to sigframe.
-	 */
-	wrusp ((unsigned long) frame);
-	regs->pc = (unsigned long) ka->sa.sa_handler;
-
-	/*
-	 * This is subtle; if we build more than one sigframe, all but the
-	 * first one will see frame format 0 and have fsize == 0, so we won't
-	 * screw stkadj.
-	 */
-	if (fsize)
-		regs->stkadj = fsize;
-
-	/* Prepare to skip over the extra stuff in the exception frame.  */
-	if (regs->stkadj) {
-		struct pt_regs *tregs =
-			(struct pt_regs *)((ulong)regs + regs->stkadj);
-#ifdef DEBUG
-		printk("Performing stackadjust=%04x\n", regs->stkadj);
-#endif
-		/* This must be copied with decreasing addresses to
-                   handle overlaps.  */
-		tregs->vector = 0;
-		tregs->format = 0;
-		tregs->pc = regs->pc;
-		tregs->sr = regs->sr;
-	}
-	return 0;
-
-give_sigsegv:
-	force_sigsegv(sig, current);
-	return err;
-}
-
-static inline void
-handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
-{
-	switch (regs->d0) {
-	case -ERESTARTNOHAND:
-		if (!has_handler)
-			goto do_restart;
-		regs->d0 = -EINTR;
-		break;
-
-	case -ERESTART_RESTARTBLOCK:
-		if (!has_handler) {
-			regs->d0 = __NR_restart_syscall;
-			regs->pc -= 2;
-			break;
-		}
-		regs->d0 = -EINTR;
-		break;
-
-	case -ERESTARTSYS:
-		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
-			regs->d0 = -EINTR;
-			break;
-		}
-	/* fallthrough */
-	case -ERESTARTNOINTR:
-	do_restart:
-		regs->d0 = regs->orig_d0;
-		regs->pc -= 2;
-		break;
-	}
-}
-
-void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
-{
-	if (regs->orig_d0 < 0)
-		return;
-	switch (regs->d0) {
-	case -ERESTARTNOHAND:
-	case -ERESTARTSYS:
-	case -ERESTARTNOINTR:
-		regs->d0 = regs->orig_d0;
-		regs->orig_d0 = -1;
-		regs->pc -= 2;
-		break;
-	}
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
-	      sigset_t *oldset, struct pt_regs *regs)
-{
-	int err;
-	/* are we from a system call? */
-	if (regs->orig_d0 >= 0)
-		/* If so, check system call restarting.. */
-		handle_restart(regs, ka, 1);
-
-	/* set up the stack frame */
-	if (ka->sa.sa_flags & SA_SIGINFO)
-		err = setup_rt_frame(sig, ka, info, oldset, regs);
-	else
-		err = setup_frame(sig, ka, oldset, regs);
-
-	if (err)
-		return;
-
-	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-	if (!(ka->sa.sa_flags & SA_NODEFER))
-		sigaddset(&current->blocked,sig);
-	recalc_sigpending();
-
-	if (test_thread_flag(TIF_DELAYED_TRACE)) {
-		regs->sr &= ~0x8000;
-		send_sig(SIGTRAP, current, 1);
-	}
-
-	clear_thread_flag(TIF_RESTORE_SIGMASK);
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-asmlinkage void do_signal(struct pt_regs *regs)
-{
-	siginfo_t info;
-	struct k_sigaction ka;
-	int signr;
-	sigset_t *oldset;
-
-	current->thread.esp0 = (unsigned long) regs;
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
-	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-	if (signr > 0) {
-		/* Whee!  Actually deliver the signal.  */
-		handle_signal(signr, &ka, &info, oldset, regs);
-		return;
-	}
-
-	/* Did we come from a system call? */
-	if (regs->orig_d0 >= 0)
-		/* Restart the system call - no handlers present */
-		handle_restart(regs, NULL, 0);
-
-	/* If there's no signal to deliver, we just restore the saved mask.  */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
-}
diff --git a/arch/m68k/kernel/signal_no.c b/arch/m68k/kernel/signal_no.c
deleted file mode 100644
index 36a81bb..0000000
--- a/arch/m68k/kernel/signal_no.c
+++ /dev/null
@@ -1,765 +0,0 @@
-/*
- *  linux/arch/m68knommu/kernel/signal.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-/*
- * Linux/m68k support by Hamish Macdonald
- *
- * 68060 fixes by Jesper Skov
- *
- * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
- *
- * mathemu support by Roman Zippel
- *  (Note: fpstate in the signal context is completely ignored for the emulator
- *         and the internal floating point format is put on stack)
- */
-
-/*
- * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
- * Atari :-) Current limitation: Only one sigstack can be active at one time.
- * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
- * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
- * signal handlers!
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/highuid.h>
-#include <linux/tty.h>
-#include <linux/personality.h>
-#include <linux/binfmts.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/traps.h>
-#include <asm/ucontext.h>
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
-void ret_from_user_signal(void);
-void ret_from_user_rt_signal(void);
-
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-asmlinkage int
-sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
-{
-	mask &= _BLOCKABLE;
-	spin_lock_irq(&current->sighand->siglock);
-	current->saved_sigmask = current->blocked;
-	siginitset(&current->blocked, mask);
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
-	current->state = TASK_INTERRUPTIBLE;
-	schedule();
-	set_restore_sigmask();
-
-	return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction __user *act,
-	      struct old_sigaction __user *oact)
-{
-	struct k_sigaction new_ka, old_ka;
-	int ret;
-
-	if (act) {
-		old_sigset_t mask;
-		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
-		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
-		    __get_user(mask, &act->sa_mask))
-			return -EFAULT;
-		siginitset(&new_ka.sa.sa_mask, mask);
-	}
-
-	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
-	if (!ret && oact) {
-		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
-		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
-		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
-			return -EFAULT;
-	}
-
-	return ret;
-}
-
-asmlinkage int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
-{
-	return do_sigaltstack(uss, uoss, rdusp());
-}
-
-
-/*
- * Do a signal return; undo the signal stack.
- *
- * Keep the return code on the stack quadword aligned!
- * That makes the cache flush below easier.
- */
-
-struct sigframe
-{
-	char __user *pretcode;
-	int sig;
-	int code;
-	struct sigcontext __user *psc;
-	char retcode[8];
-	unsigned long extramask[_NSIG_WORDS-1];
-	struct sigcontext sc;
-};
-
-struct rt_sigframe
-{
-	char __user *pretcode;
-	int sig;
-	struct siginfo __user *pinfo;
-	void __user *puc;
-	char retcode[8];
-	struct siginfo info;
-	struct ucontext uc;
-};
-
-#ifdef CONFIG_FPU
-
-static unsigned char fpu_version = 0;	/* version number of fpu, set by setup_frame */
-
-static inline int restore_fpu_state(struct sigcontext *sc)
-{
-	int err = 1;
-
-	if (FPU_IS_EMU) {
-	    /* restore registers */
-	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
-	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
-	    return 0;
-	}
-
-	if (sc->sc_fpstate[0]) {
-	    /* Verify the frame format.  */
-	    if (sc->sc_fpstate[0] != fpu_version)
-		goto out;
-
-	    __asm__ volatile (".chip 68k/68881\n\t"
-			      "fmovemx %0,%%fp0-%%fp1\n\t"
-			      "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
-			      ".chip 68k"
-			      : /* no outputs */
-			      : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
-	}
-	__asm__ volatile (".chip 68k/68881\n\t"
-			  "frestore %0\n\t"
-			  ".chip 68k" : : "m" (*sc->sc_fpstate));
-	err = 0;
-
-out:
-	return err;
-}
-
-#define FPCONTEXT_SIZE	216
-#define uc_fpstate	uc_filler[0]
-#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
-#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
-
-static inline int rt_restore_fpu_state(struct ucontext __user *uc)
-{
-	unsigned char fpstate[FPCONTEXT_SIZE];
-	int context_size = 0;
-	fpregset_t fpregs;
-	int err = 1;
-
-	if (FPU_IS_EMU) {
-		/* restore fpu control register */
-		if (__copy_from_user(current->thread.fpcntl,
-				uc->uc_mcontext.fpregs.f_fpcntl, 12))
-			goto out;
-		/* restore all other fpu register */
-		if (__copy_from_user(current->thread.fp,
-				uc->uc_mcontext.fpregs.f_fpregs, 96))
-			goto out;
-		return 0;
-	}
-
-	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
-		goto out;
-	if (fpstate[0]) {
-		context_size = fpstate[1];
-
-		/* Verify the frame format.  */
-		if (fpstate[0] != fpu_version)
-			goto out;
-		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
-		     sizeof(fpregs)))
-			goto out;
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %0,%%fp0-%%fp7\n\t"
-				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
-				  ".chip 68k"
-				  : /* no outputs */
-				  : "m" (*fpregs.f_fpregs),
-				    "m" (*fpregs.f_fpcntl));
-	}
-	if (context_size &&
-	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
-			     context_size))
-		goto out;
-	__asm__ volatile (".chip 68k/68881\n\t"
-			  "frestore %0\n\t"
-			  ".chip 68k" : : "m" (*fpstate));
-	err = 0;
-
-out:
-	return err;
-}
-
-#endif
-
-static inline int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp,
-		   int *pd0)
-{
-	int formatvec;
-	struct sigcontext context;
-	int err = 0;
-
-	/* Always make any pending restarted system calls return -EINTR */
-	current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-	/* get previous context */
-	if (copy_from_user(&context, usc, sizeof(context)))
-		goto badframe;
-	
-	/* restore passed registers */
-	regs->d1 = context.sc_d1;
-	regs->a0 = context.sc_a0;
-	regs->a1 = context.sc_a1;
-	((struct switch_stack *)regs - 1)->a5 = context.sc_a5;
-	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
-	regs->pc = context.sc_pc;
-	regs->orig_d0 = -1;		/* disable syscall checks */
-	wrusp(context.sc_usp);
-	formatvec = context.sc_formatvec;
-	regs->format = formatvec >> 12;
-	regs->vector = formatvec & 0xfff;
-
-#ifdef CONFIG_FPU
-	err = restore_fpu_state(&context);
-#endif
-
-	*pd0 = context.sc_d0;
-	return err;
-
-badframe:
-	return 1;
-}
-
-static inline int
-rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
-		    struct ucontext __user *uc, int *pd0)
-{
-	int temp;
-	greg_t __user *gregs = uc->uc_mcontext.gregs;
-	unsigned long usp;
-	int err;
-
-	/* Always make any pending restarted system calls return -EINTR */
-	current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-	err = __get_user(temp, &uc->uc_mcontext.version);
-	if (temp != MCONTEXT_VERSION)
-		goto badframe;
-	/* restore passed registers */
-	err |= __get_user(regs->d0, &gregs[0]);
-	err |= __get_user(regs->d1, &gregs[1]);
-	err |= __get_user(regs->d2, &gregs[2]);
-	err |= __get_user(regs->d3, &gregs[3]);
-	err |= __get_user(regs->d4, &gregs[4]);
-	err |= __get_user(regs->d5, &gregs[5]);
-	err |= __get_user(sw->d6, &gregs[6]);
-	err |= __get_user(sw->d7, &gregs[7]);
-	err |= __get_user(regs->a0, &gregs[8]);
-	err |= __get_user(regs->a1, &gregs[9]);
-	err |= __get_user(regs->a2, &gregs[10]);
-	err |= __get_user(sw->a3, &gregs[11]);
-	err |= __get_user(sw->a4, &gregs[12]);
-	err |= __get_user(sw->a5, &gregs[13]);
-	err |= __get_user(sw->a6, &gregs[14]);
-	err |= __get_user(usp, &gregs[15]);
-	wrusp(usp);
-	err |= __get_user(regs->pc, &gregs[16]);
-	err |= __get_user(temp, &gregs[17]);
-	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
-	regs->orig_d0 = -1;		/* disable syscall checks */
-	regs->format = temp >> 12;
-	regs->vector = temp & 0xfff;
-
-	if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
-		goto badframe;
-
-	*pd0 = regs->d0;
-	return err;
-
-badframe:
-	return 1;
-}
-
-asmlinkage int do_sigreturn(unsigned long __unused)
-{
-	struct switch_stack *sw = (struct switch_stack *) &__unused;
-	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
-	unsigned long usp = rdusp();
-	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
-	sigset_t set;
-	int d0;
-
-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-		goto badframe;
-	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
-	    (_NSIG_WORDS > 1 &&
-	     __copy_from_user(&set.sig[1], &frame->extramask,
-			      sizeof(frame->extramask))))
-		goto badframe;
-
-	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-	
-	if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
-		goto badframe;
-	return d0;
-
-badframe:
-	force_sig(SIGSEGV, current);
-	return 0;
-}
-
-asmlinkage int do_rt_sigreturn(unsigned long __unused)
-{
-	struct switch_stack *sw = (struct switch_stack *) &__unused;
-	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
-	unsigned long usp = rdusp();
-	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
-	sigset_t set;
-	int d0;
-
-	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-		goto badframe;
-	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-		goto badframe;
-
-	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-	
-	if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
-		goto badframe;
-	return d0;
-
-badframe:
-	force_sig(SIGSEGV, current);
-	return 0;
-}
-
-#ifdef CONFIG_FPU
-/*
- * Set up a signal frame.
- */
-
-static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
-{
-	if (FPU_IS_EMU) {
-		/* save registers */
-		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
-		memcpy(sc->sc_fpregs, current->thread.fp, 24);
-		return;
-	}
-
-	__asm__ volatile (".chip 68k/68881\n\t"
-			  "fsave %0\n\t"
-			  ".chip 68k"
-			  : : "m" (*sc->sc_fpstate) : "memory");
-
-	if (sc->sc_fpstate[0]) {
-		fpu_version = sc->sc_fpstate[0];
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %%fp0-%%fp1,%0\n\t"
-				  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
-				  ".chip 68k"
-				  : "=m" (*sc->sc_fpregs),
-				    "=m" (*sc->sc_fpcntl)
-				  : /* no inputs */
-				  : "memory");
-	}
-}
-
-static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
-{
-	unsigned char fpstate[FPCONTEXT_SIZE];
-	int context_size = 0;
-	int err = 0;
-
-	if (FPU_IS_EMU) {
-		/* save fpu control register */
-		err |= copy_to_user(uc->uc_mcontext.fpregs.f_pcntl,
-				current->thread.fpcntl, 12);
-		/* save all other fpu register */
-		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
-				current->thread.fp, 96);
-		return err;
-	}
-
-	__asm__ volatile (".chip 68k/68881\n\t"
-			  "fsave %0\n\t"
-			  ".chip 68k"
-			  : : "m" (*fpstate) : "memory");
-
-	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
-	if (fpstate[0]) {
-		fpregset_t fpregs;
-		context_size = fpstate[1];
-		fpu_version = fpstate[0];
-		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %%fp0-%%fp7,%0\n\t"
-				  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
-				  ".chip 68k"
-				  : "=m" (*fpregs.f_fpregs),
-				    "=m" (*fpregs.f_fpcntl)
-				  : /* no inputs */
-				  : "memory");
-		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
-				    sizeof(fpregs));
-	}
-	if (context_size)
-		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
-				    context_size);
-	return err;
-}
-
-#endif
-
-static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
-			     unsigned long mask)
-{
-	sc->sc_mask = mask;
-	sc->sc_usp = rdusp();
-	sc->sc_d0 = regs->d0;
-	sc->sc_d1 = regs->d1;
-	sc->sc_a0 = regs->a0;
-	sc->sc_a1 = regs->a1;
-	sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
-	sc->sc_sr = regs->sr;
-	sc->sc_pc = regs->pc;
-	sc->sc_formatvec = regs->format << 12 | regs->vector;
-#ifdef CONFIG_FPU
-	save_fpu_state(sc, regs);
-#endif
-}
-
-static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
-{
-	struct switch_stack *sw = (struct switch_stack *)regs - 1;
-	greg_t __user *gregs = uc->uc_mcontext.gregs;
-	int err = 0;
-
-	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
-	err |= __put_user(regs->d0, &gregs[0]);
-	err |= __put_user(regs->d1, &gregs[1]);
-	err |= __put_user(regs->d2, &gregs[2]);
-	err |= __put_user(regs->d3, &gregs[3]);
-	err |= __put_user(regs->d4, &gregs[4]);
-	err |= __put_user(regs->d5, &gregs[5]);
-	err |= __put_user(sw->d6, &gregs[6]);
-	err |= __put_user(sw->d7, &gregs[7]);
-	err |= __put_user(regs->a0, &gregs[8]);
-	err |= __put_user(regs->a1, &gregs[9]);
-	err |= __put_user(regs->a2, &gregs[10]);
-	err |= __put_user(sw->a3, &gregs[11]);
-	err |= __put_user(sw->a4, &gregs[12]);
-	err |= __put_user(sw->a5, &gregs[13]);
-	err |= __put_user(sw->a6, &gregs[14]);
-	err |= __put_user(rdusp(), &gregs[15]);
-	err |= __put_user(regs->pc, &gregs[16]);
-	err |= __put_user(regs->sr, &gregs[17]);
-#ifdef CONFIG_FPU
-	err |= rt_save_fpu_state(uc, regs);
-#endif
-	return err;
-}
-
-static inline void __user *
-get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
-{
-	unsigned long usp;
-
-	/* Default to using normal stack.  */
-	usp = rdusp();
-
-	/* This is the X/Open sanctioned signal stack switching.  */
-	if (ka->sa.sa_flags & SA_ONSTACK) {
-		if (!sas_ss_flags(usp))
-			usp = current->sas_ss_sp + current->sas_ss_size;
-	}
-	return (void __user *)((usp - frame_size) & -8UL);
-}
-
-static int setup_frame (int sig, struct k_sigaction *ka,
-			 sigset_t *set, struct pt_regs *regs)
-{
-	struct sigframe __user *frame;
-	struct sigcontext context;
-	int err = 0;
-
-	frame = get_sigframe(ka, regs, sizeof(*frame));
-
-	err |= __put_user((current_thread_info()->exec_domain
-			   && current_thread_info()->exec_domain->signal_invmap
-			   && sig < 32
-			   ? current_thread_info()->exec_domain->signal_invmap[sig]
-			   : sig),
-			  &frame->sig);
-
-	err |= __put_user(regs->vector, &frame->code);
-	err |= __put_user(&frame->sc, &frame->psc);
-
-	if (_NSIG_WORDS > 1)
-		err |= copy_to_user(frame->extramask, &set->sig[1],
-				    sizeof(frame->extramask));
-
-	setup_sigcontext(&context, regs, set->sig[0]);
-	err |= copy_to_user (&frame->sc, &context, sizeof(context));
-
-	/* Set up to return from userspace.  */
-	err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
-
-	if (err)
-		goto give_sigsegv;
-
-	/* Set up registers for signal handler */
-	wrusp ((unsigned long) frame);
-	regs->pc = (unsigned long) ka->sa.sa_handler;
-	((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
-	regs->format = 0x4; /*set format byte to make stack appear modulo 4 
-						which it will be when doing the rte */
-
-adjust_stack:
-	/* Prepare to skip over the extra stuff in the exception frame.  */
-	if (regs->stkadj) {
-		struct pt_regs *tregs =
-			(struct pt_regs *)((ulong)regs + regs->stkadj);
-#if defined(DEBUG)
-		printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
-#endif
-		/* This must be copied with decreasing addresses to
-                   handle overlaps.  */
-		tregs->vector = 0;
-		tregs->format = 0;
-		tregs->pc = regs->pc;
-		tregs->sr = regs->sr;
-	}
-	return err;
-
-give_sigsegv:
-	force_sigsegv(sig, current);
-	goto adjust_stack;
-}
-
-static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
-			    sigset_t *set, struct pt_regs *regs)
-{
-	struct rt_sigframe __user *frame;
-	int err = 0;
-
-	frame = get_sigframe(ka, regs, sizeof(*frame));
-
-	err |= __put_user((current_thread_info()->exec_domain
-			   && current_thread_info()->exec_domain->signal_invmap
-			   && sig < 32
-			   ? current_thread_info()->exec_domain->signal_invmap[sig]
-			   : sig),
-			  &frame->sig);
-	err |= __put_user(&frame->info, &frame->pinfo);
-	err |= __put_user(&frame->uc, &frame->puc);
-	err |= copy_siginfo_to_user(&frame->info, info);
-
-	/* Create the ucontext.  */
-	err |= __put_user(0, &frame->uc.uc_flags);
-	err |= __put_user(NULL, &frame->uc.uc_link);
-	err |= __put_user((void __user *)current->sas_ss_sp,
-			  &frame->uc.uc_stack.ss_sp);
-	err |= __put_user(sas_ss_flags(rdusp()),
-			  &frame->uc.uc_stack.ss_flags);
-	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-	err |= rt_setup_ucontext(&frame->uc, regs);
-	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
-
-	/* Set up to return from userspace.  */
-	err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
-
-	if (err)
-		goto give_sigsegv;
-
-	/* Set up registers for signal handler */
-	wrusp ((unsigned long) frame);
-	regs->pc = (unsigned long) ka->sa.sa_handler;
-	((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
-	regs->format = 0x4; /*set format byte to make stack appear modulo 4 
-						which it will be when doing the rte */
-
-adjust_stack:
-	/* Prepare to skip over the extra stuff in the exception frame.  */
-	if (regs->stkadj) {
-		struct pt_regs *tregs =
-			(struct pt_regs *)((ulong)regs + regs->stkadj);
-#if defined(DEBUG)
-		printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
-#endif
-		/* This must be copied with decreasing addresses to
-                   handle overlaps.  */
-		tregs->vector = 0;
-		tregs->format = 0;
-		tregs->pc = regs->pc;
-		tregs->sr = regs->sr;
-	}
-	return err;
-
-give_sigsegv:
-	force_sigsegv(sig, current);
-	goto adjust_stack;
-}
-
-static inline void
-handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
-{
-	switch (regs->d0) {
-	case -ERESTARTNOHAND:
-		if (!has_handler)
-			goto do_restart;
-		regs->d0 = -EINTR;
-		break;
-
-	case -ERESTART_RESTARTBLOCK:
-		if (!has_handler) {
-			regs->d0 = __NR_restart_syscall;
-			regs->pc -= 2;
-			break;
-		}
-		regs->d0 = -EINTR;
-		break;
-
-	case -ERESTARTSYS:
-		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
-			regs->d0 = -EINTR;
-			break;
-		}
-	/* fallthrough */
-	case -ERESTARTNOINTR:
-	do_restart:
-		regs->d0 = regs->orig_d0;
-		regs->pc -= 2;
-		break;
-	}
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
-	      sigset_t *oldset, struct pt_regs *regs)
-{
-	int err;
-	/* are we from a system call? */
-	if (regs->orig_d0 >= 0)
-		/* If so, check system call restarting.. */
-		handle_restart(regs, ka, 1);
-
-	/* set up the stack frame */
-	if (ka->sa.sa_flags & SA_SIGINFO)
-		err = setup_rt_frame(sig, ka, info, oldset, regs);
-	else
-		err = setup_frame(sig, ka, oldset, regs);
-
-	if (err)
-		return;
-
-	spin_lock_irq(&current->sighand->siglock);
-	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-	if (!(ka->sa.sa_flags & SA_NODEFER))
-		sigaddset(&current->blocked,sig);
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
-	clear_thread_flag(TIF_RESTORE_SIGMASK);
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-asmlinkage void do_signal(struct pt_regs *regs)
-{
-	struct k_sigaction ka;
-	siginfo_t info;
-	int signr;
-	sigset_t *oldset;
-
-	/*
-	 * We want the common case to go fast, which
-	 * is why we may in certain cases get here from
-	 * kernel mode. Just return without doing anything
-	 * if so.
-	 */
-	if (!user_mode(regs))
-		return;
-
-	if (test_thread_flag(TIF_RESTORE_SIGMASK))
-		oldset = &current->saved_sigmask;
-	else
-		oldset = &current->blocked;
-
-	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-	if (signr > 0) {
-		/* Whee!  Actually deliver the signal.  */
-		handle_signal(signr, &ka, &info, oldset, regs);
-		return;
-	}
-
-	/* Did we come from a system call? */
-	if (regs->orig_d0 >= 0) {
-		/* Restart the system call - no handlers present */
-		handle_restart(regs, NULL, 0);
-	}
-
-	/* If there's no signal to deliver, we just restore the saved mask.  */
-	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-		clear_thread_flag(TIF_RESTORE_SIGMASK);
-		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
-	}
-}
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 6b020a8..aeebbb7 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -72,7 +72,8 @@
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct * vma;
-	int write, fault;
+	int fault;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
 #ifdef DEBUG
 	printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
@@ -87,6 +88,7 @@
 	if (in_atomic() || !mm)
 		goto no_context;
 
+retry:
 	down_read(&mm->mmap_sem);
 
 	vma = find_vma(mm, address);
@@ -117,14 +119,13 @@
 #ifdef DEBUG
 	printk("do_page_fault: good_area\n");
 #endif
-	write = 0;
 	switch (error_code & 3) {
 		default:	/* 3: write, present */
 			/* fall through */
 		case 2:		/* write, not present */
 			if (!(vma->vm_flags & VM_WRITE))
 				goto acc_err;
-			write++;
+			flags |= FAULT_FLAG_WRITE;
 			break;
 		case 1:		/* read, present */
 			goto acc_err;
@@ -139,10 +140,14 @@
 	 * the fault.
 	 */
 
-	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+	fault = handle_mm_fault(mm, vma, address, flags);
 #ifdef DEBUG
 	printk("handle_mm_fault returns %d\n",fault);
 #endif
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return 0;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -150,10 +155,31 @@
 			goto bus_err;
 		BUG();
 	}
-	if (fault & VM_FAULT_MAJOR)
-		current->maj_flt++;
-	else
-		current->min_flt++;
+
+	/*
+	 * Major/minor page fault accounting is only done on the
+	 * initial attempt. If we go through a retry, it is extremely
+	 * likely that the page will be found in page cache at that point.
+	 */
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR)
+			current->maj_flt++;
+		else
+			current->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+			 * of starvation. */
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			/*
+			 * No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
 
 	up_read(&mm->mmap_sem);
 	return 0;
diff --git a/arch/m68k/platform/5206/Makefile b/arch/m68k/platform/5206/Makefile
deleted file mode 100644
index b5db056..0000000
--- a/arch/m68k/platform/5206/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Makefile for the m68knommu linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs.  You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o gpio.o
-
diff --git a/arch/m68k/platform/5206/gpio.c b/arch/m68k/platform/5206/gpio.c
deleted file mode 100644
index b9ab4a1..0000000
--- a/arch/m68k/platform/5206/gpio.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "PP",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFSIM_PADDR,
-		.podr				= (void __iomem *) MCFSIM_PADAT,
-		.ppdr				= (void __iomem *) MCFSIM_PADAT,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/520x/Makefile b/arch/m68k/platform/520x/Makefile
deleted file mode 100644
index ad3f4e5..0000000
--- a/arch/m68k/platform/520x/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Makefile for the M5208 specific file.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs.  You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o gpio.o
diff --git a/arch/m68k/platform/520x/gpio.c b/arch/m68k/platform/520x/gpio.c
deleted file mode 100644
index 9bcc3e4..0000000
--- a/arch/m68k/platform/520x/gpio.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "PIRQ",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFEPORT_EPDDR,
-		.podr				= (void __iomem *) MCFEPORT_EPDR,
-		.ppdr				= (void __iomem *) MCFEPORT_EPPDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "CS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 9,
-			.ngpio			= 3,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_CS,
-		.podr				= (void __iomem *) MCFGPIO_PODR_CS,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_CS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECI2C",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 16,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECI2C,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECI2C,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECI2C,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "QSPI",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 24,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_QSPI,
-		.podr				= (void __iomem *) MCFGPIO_PODR_QSPI,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_QSPI,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TIMER",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 32,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_TIMER,
-		.podr				= (void __iomem *) MCFGPIO_PODR_TIMER,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_TIMER,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_TIMER,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_TIMER,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UART",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 40,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_UART,
-		.podr				= (void __iomem *) MCFGPIO_PODR_UART,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_UART,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_UART,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_UART,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 48,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 56,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECL,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/523x/Makefile b/arch/m68k/platform/523x/Makefile
deleted file mode 100644
index c04b8f7..0000000
--- a/arch/m68k/platform/523x/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Makefile for the m68knommu linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs.  You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o gpio.o
diff --git a/arch/m68k/platform/523x/gpio.c b/arch/m68k/platform/523x/gpio.c
deleted file mode 100644
index 327ebf1..0000000
--- a/arch/m68k/platform/523x/gpio.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "PIRQ",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.base			= 1,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *) MCFEPORT_EPDDR,
-		.podr				= (void __iomem *) MCFEPORT_EPDR,
-		.ppdr				= (void __iomem *) MCFEPORT_EPPDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "ADDR",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 13,
-			.ngpio			= 3,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_ADDR,
-		.podr				= (void __iomem *) MCFGPIO_PODR_ADDR,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_ADDR,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_ADDR,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_ADDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "DATAH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 16,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_DATAH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_DATAH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_DATAH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_DATAH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_DATAH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "DATAL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 24,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_DATAL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_DATAL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_DATAL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_DATAL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_DATAL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "BUSCTL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 32,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_BUSCTL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_BUSCTL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_BUSCTL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "BS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 40,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_BS,
-		.podr				= (void __iomem *) MCFGPIO_PODR_BS,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_BS,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_BS,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_BS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "CS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 49,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_CS,
-		.podr				= (void __iomem *) MCFGPIO_PODR_CS,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_CS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "SDRAM",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 56,
-			.ngpio			= 6,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_SDRAM,
-		.podr				= (void __iomem *) MCFGPIO_PODR_SDRAM,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_SDRAM,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_SDRAM,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_SDRAM,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECI2C",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 64,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECI2C,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECI2C,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECI2C,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UARTH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 72,
-			.ngpio			= 2,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_UARTH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_UARTH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_UARTH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_UARTH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_UARTH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UARTL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 80,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_UARTL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_UARTL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_UARTL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_UARTL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_UARTL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "QSPI",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 88,
-			.ngpio			= 5,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_QSPI,
-		.podr				= (void __iomem *) MCFGPIO_PODR_QSPI,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_QSPI,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TIMER",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 96,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_TIMER,
-		.podr				= (void __iomem *) MCFGPIO_PODR_TIMER,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_TIMER,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_TIMER,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_TIMER,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "ETPU",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 104,
-			.ngpio			= 3,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_ETPU,
-		.podr				= (void __iomem *) MCFGPIO_PODR_ETPU,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_ETPU,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_ETPU,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_ETPU,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/5249/Makefile b/arch/m68k/platform/5249/Makefile
deleted file mode 100644
index 4bed30f..0000000
--- a/arch/m68k/platform/5249/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Makefile for the m68knommu linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs.  You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o gpio.o intc2.o
-
diff --git a/arch/m68k/platform/5249/gpio.c b/arch/m68k/platform/5249/gpio.c
deleted file mode 100644
index 2b56c6e..0000000
--- a/arch/m68k/platform/5249/gpio.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "GPIO0",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.ngpio			= 32,
-		},
-		.pddr				= (void __iomem *) MCFSIM2_GPIOENABLE,
-		.podr				= (void __iomem *) MCFSIM2_GPIOWRITE,
-		.ppdr				= (void __iomem *) MCFSIM2_GPIOREAD,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "GPIO1",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.base			= 32,
-			.ngpio			= 32,
-		},
-		.pddr				= (void __iomem *) MCFSIM2_GPIO1ENABLE,
-		.podr				= (void __iomem *) MCFSIM2_GPIO1WRITE,
-		.ppdr				= (void __iomem *) MCFSIM2_GPIO1READ,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/5272/Makefile b/arch/m68k/platform/5272/Makefile
deleted file mode 100644
index 34110fc..0000000
--- a/arch/m68k/platform/5272/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs.  You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o gpio.o intc.o
-
diff --git a/arch/m68k/platform/5272/gpio.c b/arch/m68k/platform/5272/gpio.c
deleted file mode 100644
index 57ac10a..0000000
--- a/arch/m68k/platform/5272/gpio.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "PA",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.ngpio			= 16,
-		},
-		.pddr				= (void __iomem *) MCFSIM_PADDR,
-		.podr				= (void __iomem *) MCFSIM_PADAT,
-		.ppdr				= (void __iomem *) MCFSIM_PADAT,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "PB",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.base			= 16,
-			.ngpio			= 16,
-		},
-		.pddr				= (void __iomem *) MCFSIM_PBDDR,
-		.podr				= (void __iomem *) MCFSIM_PBDAT,
-		.ppdr				= (void __iomem *) MCFSIM_PBDAT,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "PC",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.base			= 32,
-			.ngpio			= 16,
-		},
-		.pddr				= (void __iomem *) MCFSIM_PCDDR,
-		.podr				= (void __iomem *) MCFSIM_PCDAT,
-		.ppdr				= (void __iomem *) MCFSIM_PCDAT,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/527x/Makefile b/arch/m68k/platform/527x/Makefile
deleted file mode 100644
index 6ac4b57..0000000
--- a/arch/m68k/platform/527x/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs.  You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o gpio.o
-
diff --git a/arch/m68k/platform/527x/gpio.c b/arch/m68k/platform/527x/gpio.c
deleted file mode 100644
index 205da0a..0000000
--- a/arch/m68k/platform/527x/gpio.c
+++ /dev/null
@@ -1,609 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-#if defined(CONFIG_M5271)
-	{
-		.gpio_chip			= {
-			.label			= "PIRQ",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.base			= 1,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *) MCFEPORT_EPDDR,
-		.podr				= (void __iomem *) MCFEPORT_EPDR,
-		.ppdr				= (void __iomem *) MCFEPORT_EPPDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "ADDR",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 13,
-			.ngpio			= 3,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_ADDR,
-		.podr				= (void __iomem *) MCFGPIO_PODR_ADDR,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_ADDR,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_ADDR,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_ADDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "DATAH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 16,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_DATAH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_DATAH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_DATAH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_DATAH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_DATAH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "DATAL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 24,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_DATAL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_DATAL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_DATAL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_DATAL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_DATAL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "BUSCTL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 32,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_BUSCTL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_BUSCTL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_BUSCTL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "BS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 40,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_BS,
-		.podr				= (void __iomem *) MCFGPIO_PODR_BS,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_BS,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_BS,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_BS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "CS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 49,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_CS,
-		.podr				= (void __iomem *) MCFGPIO_PODR_CS,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_CS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "SDRAM",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 56,
-			.ngpio			= 6,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_SDRAM,
-		.podr				= (void __iomem *) MCFGPIO_PODR_SDRAM,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_SDRAM,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_SDRAM,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_SDRAM,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECI2C",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 64,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECI2C,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECI2C,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECI2C,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UARTH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 72,
-			.ngpio			= 2,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_UARTH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_UARTH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_UARTH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_UARTH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_UARTH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UARTL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 80,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_UARTL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_UARTL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_UARTL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_UARTL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_UARTL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "QSPI",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 88,
-			.ngpio			= 5,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_QSPI,
-		.podr				= (void __iomem *) MCFGPIO_PODR_QSPI,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_QSPI,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TIMER",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 96,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_TIMER,
-		.podr				= (void __iomem *) MCFGPIO_PODR_TIMER,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_TIMER,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_TIMER,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_TIMER,
-	},
-#elif defined(CONFIG_M5275)
-	{
-		.gpio_chip			= {
-			.label			= "PIRQ",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.base			= 1,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *) MCFEPORT_EPDDR,
-		.podr				= (void __iomem *) MCFEPORT_EPDR,
-		.ppdr				= (void __iomem *) MCFEPORT_EPPDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "BUSCTL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 8,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_BUSCTL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_BUSCTL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_BUSCTL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "ADDR",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 21,
-			.ngpio			= 3,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_ADDR,
-		.podr				= (void __iomem *) MCFGPIO_PODR_ADDR,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_ADDR,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_ADDR,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_ADDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "CS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 25,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_CS,
-		.podr				= (void __iomem *) MCFGPIO_PODR_CS,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_CS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FEC0H",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 32,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FEC0H,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FEC0H,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FEC0H,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FEC0H,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FEC0H,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FEC0L",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 40,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FEC0L,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FEC0L,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FEC0L,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FEC0L,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FEC0L,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECI2C",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 48,
-			.ngpio			= 6,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECI2C,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECI2C,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECI2C,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "QSPI",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 56,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_QSPI,
-		.podr				= (void __iomem *) MCFGPIO_PODR_QSPI,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_QSPI,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "SDRAM",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 64,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_SDRAM,
-		.podr				= (void __iomem *) MCFGPIO_PODR_SDRAM,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_SDRAM,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_SDRAM,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_SDRAM,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TIMERH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 72,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_TIMERH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_TIMERH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_TIMERH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_TIMERH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_TIMERH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TIMERL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 80,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_TIMERL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_TIMERL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_TIMERL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_TIMERL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_TIMERL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UARTL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 88,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_UARTL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_UARTL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_UARTL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_UARTL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_UARTL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FEC1H",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 96,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FEC1H,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FEC1H,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FEC1H,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FEC1H,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FEC1H,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FEC1L",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 104,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FEC1L,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FEC1L,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FEC1L,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FEC1L,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FEC1L,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "BS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 114,
-			.ngpio			= 2,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_BS,
-		.podr				= (void __iomem *) MCFGPIO_PODR_BS,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_BS,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_BS,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_BS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "IRQ",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 121,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_IRQ,
-		.podr				= (void __iomem *) MCFGPIO_PODR_IRQ,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_IRQ,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_IRQ,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_IRQ,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "USBH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 128,
-			.ngpio			= 1,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_USBH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_USBH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_USBH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_USBH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_USBH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "USBL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 136,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_USBL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_USBL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_USBL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_USBL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_USBL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UARTH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 144,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_UARTH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_UARTH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_UARTH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_UARTH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_UARTH,
-	},
-#endif
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/528x/Makefile b/arch/m68k/platform/528x/Makefile
deleted file mode 100644
index 6ac4b57..0000000
--- a/arch/m68k/platform/528x/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs.  You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o gpio.o
-
diff --git a/arch/m68k/platform/528x/gpio.c b/arch/m68k/platform/528x/gpio.c
deleted file mode 100644
index 526db66..0000000
--- a/arch/m68k/platform/528x/gpio.c
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "NQ",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.base			= 1,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *)MCFEPORT_EPDDR,
-		.podr				= (void __iomem *)MCFEPORT_EPDR,
-		.ppdr				= (void __iomem *)MCFEPORT_EPPDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TA",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 8,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *)MCFGPTA_GPTDDR,
-		.podr				= (void __iomem *)MCFGPTA_GPTPORT,
-		.ppdr				= (void __iomem *)MCFGPTB_GPTPORT,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TB",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 16,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *)MCFGPTB_GPTDDR,
-		.podr				= (void __iomem *)MCFGPTB_GPTPORT,
-		.ppdr				= (void __iomem *)MCFGPTB_GPTPORT,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "QA",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 24,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *)MCFQADC_DDRQA,
-		.podr				= (void __iomem *)MCFQADC_PORTQA,
-		.ppdr				= (void __iomem *)MCFQADC_PORTQA,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "QB",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 32,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *)MCFQADC_DDRQB,
-		.podr				= (void __iomem *)MCFQADC_PORTQB,
-		.ppdr				= (void __iomem *)MCFQADC_PORTQB,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "A",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 40,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRA,
-		.podr				= (void __iomem *)MCFGPIO_PORTA,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTAP,
-		.setr				= (void __iomem *)MCFGPIO_SETA,
-		.clrr				= (void __iomem *)MCFGPIO_CLRA,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "B",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 48,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRB,
-		.podr				= (void __iomem *)MCFGPIO_PORTB,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTBP,
-		.setr				= (void __iomem *)MCFGPIO_SETB,
-		.clrr				= (void __iomem *)MCFGPIO_CLRB,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "C",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 56,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRC,
-		.podr				= (void __iomem *)MCFGPIO_PORTC,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTCP,
-		.setr				= (void __iomem *)MCFGPIO_SETC,
-		.clrr				= (void __iomem *)MCFGPIO_CLRC,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "D",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 64,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRD,
-		.podr				= (void __iomem *)MCFGPIO_PORTD,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTDP,
-		.setr				= (void __iomem *)MCFGPIO_SETD,
-		.clrr				= (void __iomem *)MCFGPIO_CLRD,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "E",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 72,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRE,
-		.podr				= (void __iomem *)MCFGPIO_PORTE,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTEP,
-		.setr				= (void __iomem *)MCFGPIO_SETE,
-		.clrr				= (void __iomem *)MCFGPIO_CLRE,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "F",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 80,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRF,
-		.podr				= (void __iomem *)MCFGPIO_PORTF,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTFP,
-		.setr				= (void __iomem *)MCFGPIO_SETF,
-		.clrr				= (void __iomem *)MCFGPIO_CLRF,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "G",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 88,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRG,
-		.podr				= (void __iomem *)MCFGPIO_PORTG,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTGP,
-		.setr				= (void __iomem *)MCFGPIO_SETG,
-		.clrr				= (void __iomem *)MCFGPIO_CLRG,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "H",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 96,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRH,
-		.podr				= (void __iomem *)MCFGPIO_PORTH,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTHP,
-		.setr				= (void __iomem *)MCFGPIO_SETH,
-		.clrr				= (void __iomem *)MCFGPIO_CLRH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "J",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 104,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRJ,
-		.podr				= (void __iomem *)MCFGPIO_PORTJ,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTJP,
-		.setr				= (void __iomem *)MCFGPIO_SETJ,
-		.clrr				= (void __iomem *)MCFGPIO_CLRJ,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "DD",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 112,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRDD,
-		.podr				= (void __iomem *)MCFGPIO_PORTDD,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTDDP,
-		.setr				= (void __iomem *)MCFGPIO_SETDD,
-		.clrr				= (void __iomem *)MCFGPIO_CLRDD,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "EH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 120,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDREH,
-		.podr				= (void __iomem *)MCFGPIO_PORTEH,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTEHP,
-		.setr				= (void __iomem *)MCFGPIO_SETEH,
-		.clrr				= (void __iomem *)MCFGPIO_CLREH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "EL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 128,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDREL,
-		.podr				= (void __iomem *)MCFGPIO_PORTEL,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTELP,
-		.setr				= (void __iomem *)MCFGPIO_SETEL,
-		.clrr				= (void __iomem *)MCFGPIO_CLREL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "AS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 136,
-			.ngpio			= 6,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRAS,
-		.podr				= (void __iomem *)MCFGPIO_PORTAS,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTASP,
-		.setr				= (void __iomem *)MCFGPIO_SETAS,
-		.clrr				= (void __iomem *)MCFGPIO_CLRAS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "QS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 144,
-			.ngpio			= 7,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRQS,
-		.podr				= (void __iomem *)MCFGPIO_PORTQS,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTQSP,
-		.setr				= (void __iomem *)MCFGPIO_SETQS,
-		.clrr				= (void __iomem *)MCFGPIO_CLRQS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "SD",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 152,
-			.ngpio			= 6,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRSD,
-		.podr				= (void __iomem *)MCFGPIO_PORTSD,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTSDP,
-		.setr				= (void __iomem *)MCFGPIO_SETSD,
-		.clrr				= (void __iomem *)MCFGPIO_CLRSD,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TC",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 160,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRTC,
-		.podr				= (void __iomem *)MCFGPIO_PORTTC,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTTCP,
-		.setr				= (void __iomem *)MCFGPIO_SETTC,
-		.clrr				= (void __iomem *)MCFGPIO_CLRTC,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TD",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 168,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRTD,
-		.podr				= (void __iomem *)MCFGPIO_PORTTD,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTTDP,
-		.setr				= (void __iomem *)MCFGPIO_SETTD,
-		.clrr				= (void __iomem *)MCFGPIO_CLRTD,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UA",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 176,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *)MCFGPIO_DDRUA,
-		.podr				= (void __iomem *)MCFGPIO_PORTUA,
-		.ppdr				= (void __iomem *)MCFGPIO_PORTUAP,
-		.setr				= (void __iomem *)MCFGPIO_SETUA,
-		.clrr				= (void __iomem *)MCFGPIO_CLRUA,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/5307/Makefile b/arch/m68k/platform/5307/Makefile
deleted file mode 100644
index d4293b7..0000000
--- a/arch/m68k/platform/5307/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Makefile for the m68knommu kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs. You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y			+= config.o gpio.o
-obj-$(CONFIG_NETtel)	+= nettel.o
-obj-$(CONFIG_CLEOPATRA)	+= nettel.o
-
diff --git a/arch/m68k/platform/5307/gpio.c b/arch/m68k/platform/5307/gpio.c
deleted file mode 100644
index 5850612..0000000
--- a/arch/m68k/platform/5307/gpio.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "PP",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.ngpio			= 16,
-		},
-		.pddr				= (void __iomem *) MCFSIM_PADDR,
-		.podr				= (void __iomem *) MCFSIM_PADAT,
-		.ppdr				= (void __iomem *) MCFSIM_PADAT,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/532x/Makefile b/arch/m68k/platform/532x/Makefile
deleted file mode 100644
index ce01669..0000000
--- a/arch/m68k/platform/532x/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Makefile for the m68knommu linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs. You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-#obj-y := config.o usb-mcf532x.o spi-mcf532x.o
-obj-y := config.o gpio.o
diff --git a/arch/m68k/platform/532x/gpio.c b/arch/m68k/platform/532x/gpio.c
deleted file mode 100644
index 212a85d..0000000
--- a/arch/m68k/platform/532x/gpio.c
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "PIRQ",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFEPORT_EPDDR,
-		.podr				= (void __iomem *) MCFEPORT_EPDR,
-		.ppdr				= (void __iomem *) MCFEPORT_EPPDR,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 8,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 16,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "SSI",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 24,
-			.ngpio			= 5,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_SSI,
-		.podr				= (void __iomem *) MCFGPIO_PODR_SSI,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_SSI,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_SSI,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_SSI,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "BUSCTL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 32,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_BUSCTL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_BUSCTL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_BUSCTL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "BE",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 40,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_BE,
-		.podr				= (void __iomem *) MCFGPIO_PODR_BE,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_BE,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_BE,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_BE,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "CS",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 49,
-			.ngpio			= 5,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_CS,
-		.podr				= (void __iomem *) MCFGPIO_PODR_CS,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_CS,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_CS,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "PWM",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 58,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_PWM,
-		.podr				= (void __iomem *) MCFGPIO_PODR_PWM,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_PWM,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_PWM,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_PWM,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "FECI2C",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 64,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_FECI2C,
-		.podr				= (void __iomem *) MCFGPIO_PODR_FECI2C,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_FECI2C,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "UART",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 72,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_UART,
-		.podr				= (void __iomem *) MCFGPIO_PODR_UART,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_UART,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_UART,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_UART,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "QSPI",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 80,
-			.ngpio			= 6,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_QSPI,
-		.podr				= (void __iomem *) MCFGPIO_PODR_QSPI,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_QSPI,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_QSPI,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "TIMER",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 88,
-			.ngpio			= 4,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_TIMER,
-		.podr				= (void __iomem *) MCFGPIO_PODR_TIMER,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_TIMER,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_TIMER,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_TIMER,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "LCDDATAH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 96,
-			.ngpio			= 2,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_LCDDATAH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_LCDDATAH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_LCDDATAH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "LCDDATAM",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 104,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_LCDDATAM,
-		.podr				= (void __iomem *) MCFGPIO_PODR_LCDDATAM,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_LCDDATAM,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "LCDDATAL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 112,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_LCDDATAL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_LCDDATAL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_LCDDATAL,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "LCDCTLH",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 120,
-			.ngpio			= 1,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_LCDCTLH,
-		.podr				= (void __iomem *) MCFGPIO_PODR_LCDCTLH,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_LCDCTLH,
-	},
-	{
-		.gpio_chip			= {
-			.label			= "LCDCTLL",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value_fast,
-			.base			= 128,
-			.ngpio			= 8,
-		},
-		.pddr				= (void __iomem *) MCFGPIO_PDDR_LCDCTLL,
-		.podr				= (void __iomem *) MCFGPIO_PODR_LCDCTLL,
-		.ppdr				= (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL,
-		.setr				= (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL,
-		.clrr				= (void __iomem *) MCFGPIO_PCLRR_LCDCTLL,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/5407/Makefile b/arch/m68k/platform/5407/Makefile
deleted file mode 100644
index e83fe14..0000000
--- a/arch/m68k/platform/5407/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Makefile for the m68knommu linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs. You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# ccflags-y := -DTRAP_DBG_INTERRUPT
-# asflags-y := -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o gpio.o
-
diff --git a/arch/m68k/platform/5407/gpio.c b/arch/m68k/platform/5407/gpio.c
deleted file mode 100644
index 5850612..0000000
--- a/arch/m68k/platform/5407/gpio.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Coldfire generic GPIO support
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-static struct mcf_gpio_chip mcf_gpio_chips[] = {
-	{
-		.gpio_chip			= {
-			.label			= "PP",
-			.request		= mcf_gpio_request,
-			.free			= mcf_gpio_free,
-			.direction_input	= mcf_gpio_direction_input,
-			.direction_output	= mcf_gpio_direction_output,
-			.get			= mcf_gpio_get_value,
-			.set			= mcf_gpio_set_value,
-			.ngpio			= 16,
-		},
-		.pddr				= (void __iomem *) MCFSIM_PADDR,
-		.podr				= (void __iomem *) MCFSIM_PADAT,
-		.ppdr				= (void __iomem *) MCFSIM_PADAT,
-	},
-};
-
-static int __init mcf_gpio_init(void)
-{
-	unsigned i = 0;
-	while (i < ARRAY_SIZE(mcf_gpio_chips))
-		(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
-	return 0;
-}
-
-core_initcall(mcf_gpio_init);
diff --git a/arch/m68k/platform/54xx/Makefile b/arch/m68k/platform/54xx/Makefile
deleted file mode 100644
index 6cfd090..0000000
--- a/arch/m68k/platform/54xx/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Makefile for the m68knommu linux kernel.
-#
-
-#
-# If you want to play with the HW breakpoints then you will
-# need to add define this,  which will give you a stack backtrace
-# on the console port whenever a DBG interrupt occurs. You have to
-# set up you HW breakpoints to trigger a DBG interrupt:
-#
-# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
-# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
-#
-
-asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
-
-obj-y := config.o
-obj-$(CONFIG_FIREBEE) += firebee.o
-
diff --git a/arch/m68k/platform/coldfire/Makefile b/arch/m68k/platform/coldfire/Makefile
index a0815c6..76d389d 100644
--- a/arch/m68k/platform/coldfire/Makefile
+++ b/arch/m68k/platform/coldfire/Makefile
@@ -15,18 +15,22 @@
 asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
 
 obj-$(CONFIG_COLDFIRE)	+= cache.o clk.o device.o dma.o entry.o vectors.o
-obj-$(CONFIG_M5206)	+= timers.o intc.o reset.o
-obj-$(CONFIG_M5206e)	+= timers.o intc.o reset.o
-obj-$(CONFIG_M520x)	+= pit.o intc-simr.o reset.o
-obj-$(CONFIG_M523x)	+= pit.o dma_timer.o intc-2.o reset.o
-obj-$(CONFIG_M5249)	+= timers.o intc.o reset.o
-obj-$(CONFIG_M527x)	+= pit.o intc-2.o reset.o
-obj-$(CONFIG_M5272)	+= timers.o
-obj-$(CONFIG_M528x)	+= pit.o intc-2.o reset.o
-obj-$(CONFIG_M5307)	+= timers.o intc.o reset.o
-obj-$(CONFIG_M532x)	+= timers.o intc-simr.o reset.o
-obj-$(CONFIG_M5407)	+= timers.o intc.o reset.o
-obj-$(CONFIG_M54xx)	+= sltimers.o intc-2.o
+obj-$(CONFIG_M5206)	+= m5206.o timers.o intc.o reset.o
+obj-$(CONFIG_M5206e)	+= m5206.o timers.o intc.o reset.o
+obj-$(CONFIG_M520x)	+= m520x.o pit.o intc-simr.o reset.o
+obj-$(CONFIG_M523x)	+= m523x.o pit.o dma_timer.o intc-2.o reset.o
+obj-$(CONFIG_M5249)	+= m5249.o timers.o intc.o intc-5249.o reset.o
+obj-$(CONFIG_M527x)	+= m527x.o pit.o intc-2.o reset.o
+obj-$(CONFIG_M5272)	+= m5272.o intc-5272.o timers.o
+obj-$(CONFIG_M528x)	+= m528x.o pit.o intc-2.o reset.o
+obj-$(CONFIG_M5307)	+= m5307.o timers.o intc.o reset.o
+obj-$(CONFIG_M532x)	+= m532x.o timers.o intc-simr.o reset.o
+obj-$(CONFIG_M5407)	+= m5407.o timers.o intc.o reset.o
+obj-$(CONFIG_M54xx)	+= m54xx.o sltimers.o intc-2.o
+
+obj-$(CONFIG_NETtel)	+= nettel.o
+obj-$(CONFIG_CLEOPATRA)	+= nettel.o
+obj-$(CONFIG_FIREBEE)	+= firebee.o
 
 obj-y			+= pinmux.o gpio.o
 extra-y := head.o
diff --git a/arch/m68k/platform/coldfire/device.c b/arch/m68k/platform/coldfire/device.c
index 7af9736..3aa77dd 100644
--- a/arch/m68k/platform/coldfire/device.c
+++ b/arch/m68k/platform/coldfire/device.c
@@ -121,7 +121,7 @@
 #endif /* MCFFEC_BASE1 */
 #endif /* CONFIG_FEC */
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 /*
  *	The ColdFire QSPI module is an SPI protocol hardware block used
  *	on a number of different ColdFire CPUs.
@@ -274,7 +274,7 @@
 	.resource		= mcf_qspi_resources,
 	.dev.platform_data	= &mcf_qspi_data,
 };
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 static struct platform_device *mcf_devices[] __initdata = {
 	&mcf_uart,
@@ -284,7 +284,7 @@
 	&mcf_fec1,
 #endif
 #endif
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	&mcf_qspi,
 #endif
 };
diff --git a/arch/m68k/platform/54xx/firebee.c b/arch/m68k/platform/coldfire/firebee.c
similarity index 100%
rename from arch/m68k/platform/54xx/firebee.c
rename to arch/m68k/platform/coldfire/firebee.c
diff --git a/arch/m68k/platform/coldfire/gpio.c b/arch/m68k/platform/coldfire/gpio.c
index 292a1a5..4c8c424 100644
--- a/arch/m68k/platform/coldfire/gpio.c
+++ b/arch/m68k/platform/coldfire/gpio.c
@@ -122,6 +122,10 @@
 
 static int __init mcf_gpio_sysinit(void)
 {
+	unsigned int i = 0;
+
+	while (i < mcf_gpio_chips_size)
+		gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
 	return subsys_system_register(&mcf_gpio_subsys, NULL);
 }
 
diff --git a/arch/m68k/platform/5249/intc2.c b/arch/m68k/platform/coldfire/intc-5249.c
similarity index 100%
rename from arch/m68k/platform/5249/intc2.c
rename to arch/m68k/platform/coldfire/intc-5249.c
diff --git a/arch/m68k/platform/5272/intc.c b/arch/m68k/platform/coldfire/intc-5272.c
similarity index 100%
rename from arch/m68k/platform/5272/intc.c
rename to arch/m68k/platform/coldfire/intc-5272.c
diff --git a/arch/m68k/platform/5206/config.c b/arch/m68k/platform/coldfire/m5206.c
similarity index 79%
rename from arch/m68k/platform/5206/config.c
rename to arch/m68k/platform/coldfire/m5206.c
index 6bfbeeb..a8b81df 100644
--- a/arch/m68k/platform/5206/config.c
+++ b/arch/m68k/platform/coldfire/m5206.c
@@ -16,6 +16,15 @@
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+/***************************************************************************/
+
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(PP, 0, 8, MCFSIM_PADDR, MCFSIM_PADAT, MCFSIM_PADAT),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
 
 /***************************************************************************/
 
diff --git a/arch/m68k/platform/520x/config.c b/arch/m68k/platform/coldfire/m520x.c
similarity index 79%
rename from arch/m68k/platform/520x/config.c
rename to arch/m68k/platform/coldfire/m520x.c
index 23594784..3264b88 100644
--- a/arch/m68k/platform/520x/config.c
+++ b/arch/m68k/platform/coldfire/m520x.c
@@ -19,10 +19,26 @@
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
+#include <asm/mcfgpio.h>
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(PIRQ, 0, 8, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
+	MCFGPF(CS, 9, 3),
+	MCFGPF(FECI2C, 16, 4),
+	MCFGPF(QSPI, 24, 4),
+	MCFGPF(TIMER, 32, 4),
+	MCFGPF(UART, 40, 8),
+	MCFGPF(FECH, 48, 8),
+	MCFGPF(FECL, 56, 8),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+/***************************************************************************/
+
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m520x_qspi_init(void)
 {
@@ -35,7 +51,7 @@
 	writew(par, MCF_GPIO_PAR_UART);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -79,7 +95,7 @@
 	mach_sched_init = hw_timer_init;
 	m520x_uarts_init();
 	m520x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m520x_qspi_init();
 #endif
 }
diff --git a/arch/m68k/platform/523x/config.c b/arch/m68k/platform/coldfire/m523x.c
similarity index 68%
rename from arch/m68k/platform/523x/config.c
rename to arch/m68k/platform/coldfire/m523x.c
index c8b405d..5d57a42 100644
--- a/arch/m68k/platform/523x/config.c
+++ b/arch/m68k/platform/coldfire/m523x.c
@@ -19,10 +19,32 @@
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(PIRQ, 1, 7, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
+	MCFGPF(ADDR, 13, 3),
+	MCFGPF(DATAH, 16, 8),
+	MCFGPF(DATAL, 24, 8),
+	MCFGPF(BUSCTL, 32, 8),
+	MCFGPF(BS, 40, 4),
+	MCFGPF(CS, 49, 7),
+	MCFGPF(SDRAM, 56, 6),
+	MCFGPF(FECI2C, 64, 4),
+	MCFGPF(UARTH, 72, 2),
+	MCFGPF(UARTL, 80, 8),
+	MCFGPF(QSPI, 88, 5),
+	MCFGPF(TIMER, 96, 8),
+	MCFGPF(ETPU, 104, 3),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+/***************************************************************************/
+
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m523x_qspi_init(void)
 {
@@ -36,7 +58,7 @@
 	writew(par, MCFGPIO_PAR_TIMER);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -58,7 +80,7 @@
 {
 	mach_sched_init = hw_timer_init;
 	m523x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m523x_qspi_init();
 #endif
 }
diff --git a/arch/m68k/platform/5249/config.c b/arch/m68k/platform/coldfire/m5249.c
similarity index 82%
rename from arch/m68k/platform/5249/config.c
rename to arch/m68k/platform/coldfire/m5249.c
index bbf0513..fdfa1ed 100644
--- a/arch/m68k/platform/5249/config.c
+++ b/arch/m68k/platform/coldfire/m5249.c
@@ -16,6 +16,16 @@
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+/***************************************************************************/
+
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(GPIO0, 0, 32, MCFSIM2_GPIOENABLE, MCFSIM2_GPIOWRITE, MCFSIM2_GPIOREAD),
+	MCFGPS(GPIO1, 32, 32, MCFSIM2_GPIO1ENABLE, MCFSIM2_GPIO1WRITE, MCFSIM2_GPIO1READ),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
 
 /***************************************************************************/
 
@@ -51,7 +61,7 @@
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m5249_qspi_init(void)
 {
@@ -61,7 +71,7 @@
 	mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -90,7 +100,7 @@
 #ifdef CONFIG_M5249C3
 	m5249_smc91x_init();
 #endif
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m5249_qspi_init();
 #endif
 }
diff --git a/arch/m68k/platform/5272/config.c b/arch/m68k/platform/coldfire/m5272.c
similarity index 87%
rename from arch/m68k/platform/5272/config.c
rename to arch/m68k/platform/coldfire/m5272.c
index e68bc7a..43e3606 100644
--- a/arch/m68k/platform/5272/config.c
+++ b/arch/m68k/platform/coldfire/m5272.c
@@ -19,6 +19,7 @@
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
+#include <asm/mcfgpio.h>
 
 /***************************************************************************/
 
@@ -30,6 +31,16 @@
 
 /***************************************************************************/
 
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(PA,  0, 16, MCFSIM_PADDR, MCFSIM_PADAT, MCFSIM_PADAT),
+	MCFGPS(PB, 16, 16, MCFSIM_PBDDR, MCFSIM_PBDAT, MCFSIM_PBDAT),
+	MCFGPS(Pc, 32, 16, MCFSIM_PCDDR, MCFSIM_PCDAT, MCFSIM_PCDAT),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+/***************************************************************************/
+
 static void __init m5272_uarts_init(void)
 {
 	u32 v;
diff --git a/arch/m68k/platform/527x/config.c b/arch/m68k/platform/coldfire/m527x.c
similarity index 66%
rename from arch/m68k/platform/527x/config.c
rename to arch/m68k/platform/coldfire/m527x.c
index f91a532..9b0b66a 100644
--- a/arch/m68k/platform/527x/config.c
+++ b/arch/m68k/platform/coldfire/m527x.c
@@ -20,10 +20,53 @@
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
+#include <asm/mcfgpio.h>
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+#if defined(CONFIG_M5271)
+	MCFGPS(PIRQ, 1, 7, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
+	MCFGPF(ADDR, 13, 3),
+	MCFGPF(DATAH, 16, 8),
+	MCFGPF(DATAL, 24, 8),
+	MCFGPF(BUSCTL, 32, 8),
+	MCFGPF(BS, 40, 4),
+	MCFGPF(CS, 49, 7),
+	MCFGPF(SDRAM, 56, 6),
+	MCFGPF(FECI2C, 64, 4),
+	MCFGPF(UARTH, 72, 2),
+	MCFGPF(UARTL, 80, 8),
+	MCFGPF(QSPI, 88, 5),
+	MCFGPF(TIMER, 96, 8),
+#elif defined(CONFIG_M5275)
+	MCFGPS(PIRQ, 1, 7, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
+	MCFGPF(BUSCTL, 8, 8),
+	MCFGPF(ADDR, 21, 3),
+	MCFGPF(CS, 25, 7),
+	MCFGPF(FEC0H, 32, 8),
+	MCFGPF(FEC0L, 40, 8),
+	MCFGPF(FECI2C, 48, 6),
+	MCFGPF(QSPI, 56, 7),
+	MCFGPF(SDRAM, 64, 8),
+	MCFGPF(TIMERH, 72, 4),
+	MCFGPF(TIMERL, 80, 4),
+	MCFGPF(UARTL, 88, 8),
+	MCFGPF(FEC1H, 96, 8),
+	MCFGPF(FEC1L, 104, 8),
+	MCFGPF(BS, 114, 2),
+	MCFGPF(IRQ, 121, 7),
+	MCFGPF(USBH, 128, 1),
+	MCFGPF(USBL, 136, 8),
+	MCFGPF(UARTH, 144, 4),
+#endif
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+/***************************************************************************/
+
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m527x_qspi_init(void)
 {
@@ -42,7 +85,7 @@
 #endif
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -90,7 +133,7 @@
 	mach_sched_init = hw_timer_init;
 	m527x_uarts_init();
 	m527x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m527x_qspi_init();
 #endif
 }
diff --git a/arch/m68k/platform/528x/config.c b/arch/m68k/platform/coldfire/m528x.c
similarity index 69%
rename from arch/m68k/platform/528x/config.c
rename to arch/m68k/platform/coldfire/m528x.c
index d449292..7ed1276b 100644
--- a/arch/m68k/platform/528x/config.c
+++ b/arch/m68k/platform/coldfire/m528x.c
@@ -21,10 +21,41 @@
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
+#include <asm/mcfgpio.h>
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(NQ, 1, 7, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
+	MCFGPS(TA, 8, 4, MCFGPTA_GPTDDR, MCFGPTA_GPTPORT, MCFGPTB_GPTPORT),
+	MCFGPS(TB, 16, 4, MCFGPTB_GPTDDR, MCFGPTB_GPTPORT, MCFGPTB_GPTPORT),
+	MCFGPS(QA, 24, 4, MCFQADC_DDRQA, MCFQADC_PORTQA, MCFQADC_PORTQA),
+	MCFGPS(QB, 32, 4, MCFQADC_DDRQB, MCFQADC_PORTQB, MCFQADC_PORTQB),
+	MCFGPF(A, 40, 8),
+	MCFGPF(B, 48, 8),
+	MCFGPF(C, 56, 8),
+	MCFGPF(D, 64, 8),
+	MCFGPF(E, 72, 8),
+	MCFGPF(F, 80, 8),
+	MCFGPF(G, 88, 8),
+	MCFGPF(H, 96, 8),
+	MCFGPF(J, 104, 8),
+	MCFGPF(DD, 112, 8),
+	MCFGPF(EH, 120, 8),
+	MCFGPF(EL, 128, 8),
+	MCFGPF(AS, 136, 6),
+	MCFGPF(QS, 144, 7),
+	MCFGPF(SD, 152, 6),
+	MCFGPF(TC, 160, 4),
+	MCFGPF(TD, 168, 4),
+	MCFGPF(UA, 176, 4),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+/***************************************************************************/
+
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m528x_qspi_init(void)
 {
@@ -32,7 +63,7 @@
 	__raw_writeb(0x07, MCFGPIO_PQSPAR);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -98,7 +129,7 @@
 	mach_sched_init = hw_timer_init;
 	m528x_uarts_init();
 	m528x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m528x_qspi_init();
 #endif
 }
diff --git a/arch/m68k/platform/5307/config.c b/arch/m68k/platform/coldfire/m5307.c
similarity index 85%
rename from arch/m68k/platform/5307/config.c
rename to arch/m68k/platform/coldfire/m5307.c
index a568d28..93b4849 100644
--- a/arch/m68k/platform/5307/config.c
+++ b/arch/m68k/platform/coldfire/m5307.c
@@ -16,6 +16,7 @@
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
 #include <asm/mcfwdebug.h>
 
 /***************************************************************************/
@@ -28,6 +29,14 @@
 
 /***************************************************************************/
 
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(PP, 0, 16, MCFSIM_PADDR, MCFSIM_PADAT, MCFSIM_PADAT),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+/***************************************************************************/
+
 void __init config_BSP(char *commandp, int size)
 {
 #if defined(CONFIG_NETtel) || \
diff --git a/arch/m68k/platform/532x/config.c b/arch/m68k/platform/coldfire/m532x.c
similarity index 92%
rename from arch/m68k/platform/532x/config.c
rename to arch/m68k/platform/coldfire/m532x.c
index 2bec347..8e9476d 100644
--- a/arch/m68k/platform/532x/config.c
+++ b/arch/m68k/platform/coldfire/m532x.c
@@ -26,11 +26,36 @@
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
 #include <asm/mcfdma.h>
+#include <asm/mcfgpio.h>
 #include <asm/mcfwdebug.h>
 
 /***************************************************************************/
 
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(PIRQ, 0, 8, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
+	MCFGPF(FECH, 8, 8),
+	MCFGPF(FECL, 16, 8),
+	MCFGPF(SSI, 24, 5),
+	MCFGPF(BUSCTL, 32, 4),
+	MCFGPF(BE, 40, 4),
+	MCFGPF(CS, 49, 5),
+	MCFGPF(PWM, 58, 4),
+	MCFGPF(FECI2C, 64, 4),
+	MCFGPF(UART, 72, 8),
+	MCFGPF(QSPI, 80, 6),
+	MCFGPF(TIMER, 88, 4),
+	MCFGPF(LCDDATAH, 96, 2),
+	MCFGPF(LCDDATAM, 104, 8),
+	MCFGPF(LCDDATAL, 112, 8),
+	MCFGPF(LCDCTLH, 120, 1),
+	MCFGPF(LCDCTLL, 128, 8),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+/***************************************************************************/
+
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 
 static void __init m532x_qspi_init(void)
 {
@@ -38,7 +63,7 @@
 	writew(0x01f0, MCF_GPIO_PAR_QSPI);
 }
 
-#endif /* CONFIG_SPI_COLDFIRE_QSPI */
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
 
 /***************************************************************************/
 
@@ -77,7 +102,7 @@
 	mach_sched_init = hw_timer_init;
 	m532x_uarts_init();
 	m532x_fec_init();
-#ifdef CONFIG_SPI_COLDFIRE_QSPI
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
 	m532x_qspi_init();
 #endif
 
diff --git a/arch/m68k/platform/5407/config.c b/arch/m68k/platform/coldfire/m5407.c
similarity index 77%
rename from arch/m68k/platform/5407/config.c
rename to arch/m68k/platform/coldfire/m5407.c
index bb6c746..faa6680 100644
--- a/arch/m68k/platform/5407/config.c
+++ b/arch/m68k/platform/coldfire/m5407.c
@@ -16,6 +16,15 @@
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfgpio.h>
+
+/***************************************************************************/
+
+struct mcf_gpio_chip mcf_gpio_chips[] = {
+	MCFGPS(PP, 0, 16, MCFSIM_PADDR, MCFSIM_PADAT, MCFSIM_PADAT),
+};
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
 
 /***************************************************************************/
 
diff --git a/arch/m68k/platform/54xx/config.c b/arch/m68k/platform/coldfire/m54xx.c
similarity index 92%
rename from arch/m68k/platform/54xx/config.c
rename to arch/m68k/platform/coldfire/m54xx.c
index 2081c6c..20672da 100644
--- a/arch/m68k/platform/54xx/config.c
+++ b/arch/m68k/platform/coldfire/m54xx.c
@@ -21,12 +21,19 @@
 #include <asm/m54xxsim.h>
 #include <asm/mcfuart.h>
 #include <asm/m54xxgpt.h>
+#include <asm/mcfgpio.h>
 #ifdef CONFIG_MMU
 #include <asm/mmu_context.h>
 #endif
 
 /***************************************************************************/
 
+struct mcf_gpio_chip mcf_gpio_chips[] = { };
+
+unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+/***************************************************************************/
+
 static void __init m54xx_uarts_init(void)
 {
 	/* enable io pins */
diff --git a/arch/m68k/platform/5307/nettel.c b/arch/m68k/platform/coldfire/nettel.c
similarity index 100%
rename from arch/m68k/platform/5307/nettel.c
rename to arch/m68k/platform/coldfire/nettel.c
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index d10403d..ed22bfc 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -1422,6 +1422,7 @@
 
 static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources)
 {
+	unsigned long io_offset;
 	struct resource *res;
 	int i;
 
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
index e215070..9c717bf 100644
--- a/arch/mips/ath79/dev-wmac.c
+++ b/arch/mips/ath79/dev-wmac.c
@@ -58,8 +58,8 @@
 
 static int ar933x_wmac_reset(void)
 {
-	ath79_device_reset_clear(AR933X_RESET_WMAC);
 	ath79_device_reset_set(AR933X_RESET_WMAC);
+	ath79_device_reset_clear(AR933X_RESET_WMAC);
 
 	return 0;
 }
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 807c97e..46c61edc 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -346,11 +346,8 @@
 CONFIG_IXGB=m
 CONFIG_S2IO=m
 CONFIG_MYRI10GE=m
-CONFIG_TR=y
 CONFIG_IBMOL=m
 CONFIG_IBMLS=m
-CONFIG_3C359=m
-CONFIG_TMS380TR=m
 CONFIG_TMSPCI=m
 CONFIG_ABYSS=m
 CONFIG_USB_CATC=m
@@ -376,7 +373,6 @@
 CONFIG_PCMCIA_XIRC2PS=m
 CONFIG_PCMCIA_AXNET=m
 CONFIG_ARCNET_COM20020_CS=m
-CONFIG_PCMCIA_IBMTR=m
 CONFIG_WAN=y
 CONFIG_LANMEDIA=m
 CONFIG_HDLC=m
diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h
index a865c98..5ad1a9c 100644
--- a/arch/mips/include/asm/mach-jz4740/irq.h
+++ b/arch/mips/include/asm/mach-jz4740/irq.h
@@ -45,7 +45,7 @@
 #define JZ4740_IRQ_LCD		JZ4740_IRQ(30)
 
 /* 2nd-level interrupts */
-#define JZ4740_IRQ_DMA(x)	(JZ4740_IRQ(32) + (X))
+#define JZ4740_IRQ_DMA(x)	(JZ4740_IRQ(32) + (x))
 
 #define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x))
 #define JZ4740_IRQ_GPIO(x)	(JZ4740_IRQ(48) + (x))
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 73c0d45..9b02cfb 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -37,12 +37,6 @@
 		write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
 	} while (0)
 
-
-static inline unsigned long get_current_pgd(void)
-{
-	return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL);
-}
-
 #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
 
 /*
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 185ca00..d5a338a 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -257,11 +257,8 @@
 		return -EFAULT;
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -286,11 +283,8 @@
 		return -EFAULT;
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -362,10 +356,7 @@
 		goto badframe;
 
 	sigdelsetmask(&blocked, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = blocked;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&blocked);
 
 	sig = restore_sigcontext(&regs, &frame->sf_sc);
 	if (sig < 0)
@@ -401,10 +392,7 @@
 		goto badframe;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
 	if (sig < 0)
@@ -580,12 +568,7 @@
 	if (ret)
 		return ret;
 
-	spin_lock_irq(&current->sighand->siglock);
-	sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
-	if (!(ka->sa.sa_flags & SA_NODEFER))
-		sigaddset(&current->blocked, sig);
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	block_sigmask(ka, sig);
 
 	return ret;
 }
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 06b5da3..ac3b8d8 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -290,11 +290,8 @@
 		return -EFAULT;
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -318,11 +315,8 @@
 		return -EFAULT;
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -488,10 +482,7 @@
 		goto badframe;
 
 	sigdelsetmask(&blocked, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = blocked;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&blocked);
 
 	sig = restore_sigcontext32(&regs, &frame->sf_sc);
 	if (sig < 0)
@@ -529,10 +520,7 @@
 		goto badframe;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
 	if (sig < 0)
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index ae29e89..86eb4b0 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -93,11 +93,8 @@
 	sigset_from_compat(&newset, &uset);
 	sigdelsetmask(&newset, ~_BLOCKABLE);
 
-	spin_lock_irq(&current->sighand->siglock);
 	current->saved_sigmask = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&newset);
 
 	current->state = TASK_INTERRUPTIBLE;
 	schedule();
@@ -121,10 +118,7 @@
 		goto badframe;
 
 	sigdelsetmask(&set, ~_BLOCKABLE);
-	spin_lock_irq(&current->sighand->siglock);
-	current->blocked = set;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
+	set_current_blocked(&set);
 
 	sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
 	if (sig < 0)
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 910dddf..9cd69ad 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -24,6 +24,7 @@
 #include <linux/sched.h>
 #include <linux/profile.h>
 #include <linux/smp.h>
+#include <linux/cpu.h>
 #include <asm/tlbflush.h>
 #include <asm/bitops.h>
 #include <asm/processor.h>
@@ -38,7 +39,6 @@
 #include "internal.h"
 
 #ifdef CONFIG_HOTPLUG_CPU
-#include <linux/cpu.h>
 #include <asm/cacheflush.h>
 
 static unsigned long sleep_mode[NR_CPUS];
@@ -874,10 +874,13 @@
 
 	cpu = smp_processor_id();
 
-	local_irq_enable();
+	notify_cpu_starting(cpu);
 
+	ipi_call_lock();
 	set_cpu_online(cpu, true);
-	smp_wmb();
+	ipi_call_unlock();
+
+	local_irq_enable();
 }
 
 /**
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index a478719..7589051 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -7,6 +7,7 @@
 	def_bool y
 	select OF
 	select OF_EARLY_FLATTREE
+	select IRQ_DOMAIN
 	select HAVE_MEMBLOCK
 	select ARCH_WANT_OPTIONAL_GPIOLIB
         select HAVE_ARCH_TRACEHOOK
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index dcea5a0..c936483 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,6 +1,7 @@
 include include/asm-generic/Kbuild.asm
 
-header-y += spr_defs.h
+header-y += elf.h
+header-y += ucontext.h
 
 generic-y += atomic.h
 generic-y += auxvec.h
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index b206ba4..fab8628 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -20,150 +20,71 @@
 /*
  * See Documentation/DMA-API-HOWTO.txt and
  * Documentation/DMA-API.txt for documentation.
- *
- * This file is written with the intention of eventually moving over
- * to largely using asm-generic/dma-mapping-common.h in its place.
  */
 
 #include <linux/dma-debug.h>
 #include <asm-generic/dma-coherent.h>
 #include <linux/kmemcheck.h>
+#include <linux/dma-mapping.h>
 
 #define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
 
+extern struct dma_map_ops or1k_dma_map_ops;
 
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
-			      dma_addr_t *dma_handle, gfp_t flag);
-void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-			    dma_addr_t dma_handle);
-dma_addr_t or1k_map_page(struct device *dev, struct page *page,
-			 unsigned long offset, size_t size,
-			 enum dma_data_direction dir,
-			 struct dma_attrs *attrs);
-void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
-		     size_t size, enum dma_data_direction dir,
-		     struct dma_attrs *attrs);
-int or1k_map_sg(struct device *dev, struct scatterlist *sg,
-		int nents, enum dma_data_direction dir,
-		struct dma_attrs *attrs);
-void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
-		   int nents, enum dma_data_direction dir,
-		   struct dma_attrs *attrs);
-void or1k_sync_single_for_cpu(struct device *dev,
-			      dma_addr_t dma_handle, size_t size,
-			      enum dma_data_direction dir);
-void or1k_sync_single_for_device(struct device *dev,
-			         dma_addr_t dma_handle, size_t size,
-			         enum dma_data_direction dir);
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
-					dma_addr_t *dma_handle, gfp_t flag)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
+	return &or1k_dma_map_ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t gfp,
+				    struct dma_attrs *attrs)
+{
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	void *memory;
 
-	memory = or1k_dma_alloc_coherent(dev, size, dma_handle, flag);
+	memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
 
 	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
+
 	return memory;
 }
 
-static inline void dma_free_coherent(struct device *dev, size_t size,
-				     void *cpu_addr, dma_addr_t dma_handle)
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *cpu_addr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
+
 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
-	or1k_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+
+	ops->free(dev, size, cpu_addr, dma_handle, attrs);
 }
 
-static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
-					size_t size,
-					enum dma_data_direction dir)
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+					  dma_addr_t *dma_handle, gfp_t gfp)
 {
-	dma_addr_t addr;
+	struct dma_attrs attrs;
 
-	kmemcheck_mark_initialized(ptr, size);
-	BUG_ON(!valid_dma_direction(dir));
-	addr = or1k_map_page(dev, virt_to_page(ptr),
-			     (unsigned long)ptr & ~PAGE_MASK, size,
-			     dir, NULL);
-	debug_dma_map_page(dev, virt_to_page(ptr),
-			   (unsigned long)ptr & ~PAGE_MASK, size,
-			   dir, addr, true);
-	return addr;
+	dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+
+	return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
 }
 
-static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
-					  size_t size,
-					  enum dma_data_direction dir)
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+					 void *cpu_addr, dma_addr_t dma_handle)
 {
-	BUG_ON(!valid_dma_direction(dir));
-	or1k_unmap_page(dev, addr, size, dir, NULL);
-	debug_dma_unmap_page(dev, addr, size, dir, true);
-}
+	struct dma_attrs attrs;
 
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
-				   int nents, enum dma_data_direction dir)
-{
-	int i, ents;
-	struct scatterlist *s;
+	dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
 
-	for_each_sg(sg, s, nents, i)
-		kmemcheck_mark_initialized(sg_virt(s), s->length);
-	BUG_ON(!valid_dma_direction(dir));
-	ents = or1k_map_sg(dev, sg, nents, dir, NULL);
-	debug_dma_map_sg(dev, sg, nents, ents, dir);
-
-	return ents;
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-				      int nents, enum dma_data_direction dir)
-{
-	BUG_ON(!valid_dma_direction(dir));
-	debug_dma_unmap_sg(dev, sg, nents, dir);
-	or1k_unmap_sg(dev, sg, nents, dir, NULL);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-				      size_t offset, size_t size,
-				      enum dma_data_direction dir)
-{
-	dma_addr_t addr;
-
-	kmemcheck_mark_initialized(page_address(page) + offset, size);
-	BUG_ON(!valid_dma_direction(dir));
-	addr = or1k_map_page(dev, page, offset, size, dir, NULL);
-	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
-	return addr;
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
-				  size_t size, enum dma_data_direction dir)
-{
-	BUG_ON(!valid_dma_direction(dir));
-	or1k_unmap_page(dev, addr, size, dir, NULL);
-	debug_dma_unmap_page(dev, addr, size, dir, true);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
-					   size_t size,
-					   enum dma_data_direction dir)
-{
-	BUG_ON(!valid_dma_direction(dir));
-	or1k_sync_single_for_cpu(dev, addr, size, dir);
-	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
-					      dma_addr_t addr, size_t size,
-					      enum dma_data_direction dir)
-{
-	BUG_ON(!valid_dma_direction(dir));
-	or1k_sync_single_for_device(dev, addr, size, dir);
-	debug_dma_sync_single_for_device(dev, addr, size, dir);
+	dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
 }
 
 static inline int dma_supported(struct device *dev, u64 dma_mask)
diff --git a/arch/openrisc/include/asm/elf.h b/arch/openrisc/include/asm/elf.h
index 2ce603b..a8fe2c5 100644
--- a/arch/openrisc/include/asm/elf.h
+++ b/arch/openrisc/include/asm/elf.h
@@ -20,11 +20,17 @@
 #define __ASM_OPENRISC_ELF_H
 
 /*
+ * This files is partially exported to userspace.  This allows us to keep
+ * the ELF bits in one place which should assist in keeping the kernel and
+ * userspace in sync.
+ */
+
+/*
  * ELF register definitions..
  */
-#include <linux/types.h>
-#include <linux/ptrace.h>
 
+/* for struct user_regs_struct definition */
+#include <asm/ptrace.h>
 
 /* The OR1K relocation types... not all relevant for module loader */
 #define R_OR32_NONE	0
@@ -62,6 +68,8 @@
 
 #ifdef __KERNEL__
 
+#include <linux/types.h>
+
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h
index 4651a73..8555c0c 100644
--- a/arch/openrisc/include/asm/ptrace.h
+++ b/arch/openrisc/include/asm/ptrace.h
@@ -19,8 +19,6 @@
 #ifndef __ASM_OPENRISC_PTRACE_H
 #define __ASM_OPENRISC_PTRACE_H
 
-#include <asm/spr_defs.h>
-
 #ifndef __ASSEMBLY__
 /*
  * This is the layout of the regset returned by the GETREGSET ptrace call
@@ -30,13 +28,13 @@
 	unsigned long gpr[32];
 	unsigned long pc;
 	unsigned long sr;
-	unsigned long pad1;
-	unsigned long pad2;
 };
 #endif
 
 #ifdef __KERNEL__
 
+#include <asm/spr_defs.h>
+
 /*
  * Make kernel PTrace/register structures opaque to userspace... userspace can
  * access thread state via the regset mechanism.  This allows us a bit of
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index f1c8ee2..0b77ddb 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -21,13 +21,16 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/dma-debug.h>
+#include <linux/export.h>
+#include <linux/dma-attrs.h>
 
 #include <asm/cpuinfo.h>
 #include <asm/spr_defs.h>
 #include <asm/tlbflush.h>
 
-static int page_set_nocache(pte_t *pte, unsigned long addr,
-			    unsigned long next, struct mm_walk *walk)
+static int
+page_set_nocache(pte_t *pte, unsigned long addr,
+		 unsigned long next, struct mm_walk *walk)
 {
 	unsigned long cl;
 
@@ -46,8 +49,9 @@
 	return 0;
 }
 
-static int page_clear_nocache(pte_t *pte, unsigned long addr,
-			      unsigned long next, struct mm_walk *walk)
+static int
+page_clear_nocache(pte_t *pte, unsigned long addr,
+		   unsigned long next, struct mm_walk *walk)
 {
 	pte_val(*pte) &= ~_PAGE_CI;
 
@@ -67,9 +71,19 @@
  * cache-inhibit bit on those pages, and makes sure that the pages are
  * flushed out of the cache before they are used.
  *
+ * If the NON_CONSISTENT attribute is set, then this function just
+ * returns "normal", cachable memory.
+ *
+ * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
+ * into consideration here, too.  All current known implementations of
+ * the OR1K support only strongly ordered memory accesses, so that flag
+ * is being ignored for now; uncached but write-combined memory is a
+ * missing feature of the OR1K.
  */
-void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
-			      dma_addr_t *dma_handle, gfp_t gfp)
+static void *
+or1k_dma_alloc(struct device *dev, size_t size,
+	       dma_addr_t *dma_handle, gfp_t gfp,
+	       struct dma_attrs *attrs)
 {
 	unsigned long va;
 	void *page;
@@ -87,20 +101,23 @@
 
 	va = (unsigned long)page;
 
-	/*
-	 * We need to iterate through the pages, clearing the dcache for
-	 * them and setting the cache-inhibit bit.
-	 */
-	if (walk_page_range(va, va + size, &walk)) {
-		free_pages_exact(page, size);
-		return NULL;
+	if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+		/*
+		 * We need to iterate through the pages, clearing the dcache for
+		 * them and setting the cache-inhibit bit.
+		 */
+		if (walk_page_range(va, va + size, &walk)) {
+			free_pages_exact(page, size);
+			return NULL;
+		}
 	}
 
 	return (void *)va;
 }
 
-void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
-			    dma_addr_t dma_handle)
+static void
+or1k_dma_free(struct device *dev, size_t size, void *vaddr,
+	      dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	unsigned long va = (unsigned long)vaddr;
 	struct mm_walk walk = {
@@ -108,16 +125,19 @@
 		.mm = &init_mm
 	};
 
-	/* walk_page_range shouldn't be able to fail here */
-	WARN_ON(walk_page_range(va, va + size, &walk));
+	if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+		/* walk_page_range shouldn't be able to fail here */
+		WARN_ON(walk_page_range(va, va + size, &walk));
+	}
 
 	free_pages_exact(vaddr, size);
 }
 
-dma_addr_t or1k_map_page(struct device *dev, struct page *page,
-			 unsigned long offset, size_t size,
-			 enum dma_data_direction dir,
-			 struct dma_attrs *attrs)
+static dma_addr_t
+or1k_map_page(struct device *dev, struct page *page,
+	      unsigned long offset, size_t size,
+	      enum dma_data_direction dir,
+	      struct dma_attrs *attrs)
 {
 	unsigned long cl;
 	dma_addr_t addr = page_to_phys(page) + offset;
@@ -147,16 +167,18 @@
 	return addr;
 }
 
-void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
-		     size_t size, enum dma_data_direction dir,
-		     struct dma_attrs *attrs)
+static void
+or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
 {
 	/* Nothing special to do here... */
 }
 
-int or1k_map_sg(struct device *dev, struct scatterlist *sg,
-		int nents, enum dma_data_direction dir,
-		struct dma_attrs *attrs)
+static int
+or1k_map_sg(struct device *dev, struct scatterlist *sg,
+	    int nents, enum dma_data_direction dir,
+	    struct dma_attrs *attrs)
 {
 	struct scatterlist *s;
 	int i;
@@ -169,9 +191,10 @@
 	return nents;
 }
 
-void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
-		   int nents, enum dma_data_direction dir,
-		   struct dma_attrs *attrs)
+static void
+or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
+	      int nents, enum dma_data_direction dir,
+	      struct dma_attrs *attrs)
 {
 	struct scatterlist *s;
 	int i;
@@ -181,9 +204,10 @@
 	}
 }
 
-void or1k_sync_single_for_cpu(struct device *dev,
-			      dma_addr_t dma_handle, size_t size,
-			      enum dma_data_direction dir)
+static void
+or1k_sync_single_for_cpu(struct device *dev,
+			 dma_addr_t dma_handle, size_t size,
+			 enum dma_data_direction dir)
 {
 	unsigned long cl;
 	dma_addr_t addr = dma_handle;
@@ -193,9 +217,10 @@
 		mtspr(SPR_DCBIR, cl);
 }
 
-void or1k_sync_single_for_device(struct device *dev,
-			         dma_addr_t dma_handle, size_t size,
-			         enum dma_data_direction dir)
+static void
+or1k_sync_single_for_device(struct device *dev,
+			    dma_addr_t dma_handle, size_t size,
+			    enum dma_data_direction dir)
 {
 	unsigned long cl;
 	dma_addr_t addr = dma_handle;
@@ -205,6 +230,18 @@
 		mtspr(SPR_DCBFR, cl);
 }
 
+struct dma_map_ops or1k_dma_map_ops = {
+	.alloc = or1k_dma_alloc,
+	.free = or1k_dma_free,
+	.map_page = or1k_map_page,
+	.unmap_page = or1k_unmap_page,
+	.map_sg = or1k_map_sg,
+	.unmap_sg = or1k_unmap_sg,
+	.sync_single_for_cpu = or1k_sync_single_for_cpu,
+	.sync_single_for_device = or1k_sync_single_for_device,
+};
+EXPORT_SYMBOL(or1k_dma_map_ops);
+
 /* Number of entries preallocated for DMA-API debugging */
 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 6e61af8..ddfcaa8 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -1117,10 +1117,10 @@
 ENTRY(sys_or1k_atomic)
 	/* FIXME: This ignores r3 and always does an XCHG */
 	DISABLE_INTERRUPTS(r17,r19)
-	l.lwz	r30,0(r4)
-	l.lwz	r28,0(r5)
-	l.sw	0(r4),r28
-	l.sw	0(r5),r30
+	l.lwz	r29,0(r4)
+	l.lwz	r27,0(r5)
+	l.sw	0(r4),r27
+	l.sw	0(r5),r29
 	ENABLE_INTERRUPTS(r17)
 	l.jr	r9
 	 l.or	r11,r0,r0
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c
index 4bfead2..e935b9d 100644
--- a/arch/openrisc/kernel/irq.c
+++ b/arch/openrisc/kernel/irq.c
@@ -14,17 +14,13 @@
  *      2 of the License, or (at your option) any later version.
  */
 
-#include <linux/ptrace.h>
-#include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/ftrace.h>
 #include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/kernel_stat.h>
 #include <linux/export.h>
-
+#include <linux/irqdomain.h>
 #include <linux/irqflags.h>
 
 /* read interrupt enabled status */
@@ -98,6 +94,7 @@
 #endif
 }
 
+#if 0
 static int or1k_pic_set_type(struct irq_data *data, unsigned int flow_type)
 {
 	/* There's nothing to do in the PIC configuration when changing
@@ -107,43 +104,64 @@
 
 	return irq_setup_alt_chip(data, flow_type);
 }
+#endif
+
+static struct irq_chip or1k_dev = {
+	.name = "or1k-PIC",
+	.irq_unmask = or1k_pic_unmask,
+	.irq_mask = or1k_pic_mask,
+	.irq_ack = or1k_pic_ack,
+	.irq_mask_ack = or1k_pic_mask_ack,
+};
+
+static struct irq_domain *root_domain;
 
 static inline int pic_get_irq(int first)
 {
-	int irq;
+	int hwirq;
 
-	irq = ffs(mfspr(SPR_PICSR) >> first);
+	hwirq = ffs(mfspr(SPR_PICSR) >> first);
+	if (!hwirq)
+		return NO_IRQ;
+	else
+		hwirq = hwirq + first -1;
 
-	return irq ? irq + first - 1 : NO_IRQ;
+	return irq_find_mapping(root_domain, hwirq);
 }
 
+
+static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler_name(irq, &or1k_dev,
+				      handle_level_irq, "level");
+	irq_set_status_flags(irq, IRQ_LEVEL | IRQ_NOPROBE);
+
+	return 0;
+}
+
+static const struct irq_domain_ops or1k_irq_domain_ops = {
+	.xlate = irq_domain_xlate_onecell,
+	.map = or1k_map,
+};
+
+/*
+ * This sets up the IRQ domain for the PIC built in to the OpenRISC
+ * 1000 CPU.  This is the "root" domain as these are the interrupts
+ * that directly trigger an exception in the CPU.
+ */
 static void __init or1k_irq_init(void)
 {
-	struct irq_chip_generic *gc;
-	struct irq_chip_type *ct;
+	struct device_node *intc = NULL;
+
+	/* The interrupt controller device node is mandatory */
+	intc = of_find_compatible_node(NULL, NULL, "opencores,or1k-pic");
+	BUG_ON(!intc);
 
 	/* Disable all interrupts until explicitly requested */
 	mtspr(SPR_PICMR, (0UL));
 
-	gc = irq_alloc_generic_chip("or1k-PIC", 1, 0, 0, handle_level_irq);
-	ct = gc->chip_types;
-
-	ct->chip.irq_unmask = or1k_pic_unmask;
-	ct->chip.irq_mask = or1k_pic_mask;
-	ct->chip.irq_ack = or1k_pic_ack;
-	ct->chip.irq_mask_ack = or1k_pic_mask_ack;
-	ct->chip.irq_set_type = or1k_pic_set_type;
-
-	/* The OR1K PIC can handle both level and edge trigged
-	 * interrupts in roughly the same manner
-	 */
-#if 0
-	/* FIXME: chip.type??? */
-	ct->chip.type = IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_MASK;
-#endif
-
-	irq_setup_generic_chip(gc, IRQ_MSK(NR_IRQS), 0,
-			       IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+	root_domain = irq_domain_add_linear(intc, 32,
+					    &or1k_irq_domain_ops, NULL);
 }
 
 void __init init_IRQ(void)
@@ -164,10 +182,3 @@
 	irq_exit();
 	set_irq_regs(old_regs);
 }
-
-unsigned int irq_create_of_mapping(struct device_node *controller,
-				   const u32 *intspec, unsigned int intsize)
-{
-	return intspec[0];
-}
-EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index a5dce82..40f850e 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -54,6 +54,7 @@
 	struct vm_area_struct *vma;
 	siginfo_t info;
 	int fault;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
 	tsk = current;
 
@@ -105,6 +106,7 @@
 	if (in_interrupt() || !mm)
 		goto no_context;
 
+retry:
 	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, address);
 
@@ -143,6 +145,7 @@
 	if (write_acc) {
 		if (!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
+		flags |= FAULT_FLAG_WRITE;
 	} else {
 		/* not present */
 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
@@ -159,7 +162,11 @@
 	 * the fault.
 	 */
 
-	fault = handle_mm_fault(mm, vma, address, write_acc);
+	fault = handle_mm_fault(mm, vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		if (fault & VM_FAULT_OOM)
 			goto out_of_memory;
@@ -167,11 +174,24 @@
 			goto do_sigbus;
 		BUG();
 	}
-	/*RGD modeled on Cris */
-	if (fault & VM_FAULT_MAJOR)
-		tsk->maj_flt++;
-	else
-		tsk->min_flt++;
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		/*RGD modeled on Cris */
+		if (fault & VM_FAULT_MAJOR)
+			tsk->maj_flt++;
+		else
+			tsk->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+			 /* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
 
 	up_read(&mm->mmap_sem);
 	return;
diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h
index 4e96268..d1d864b 100644
--- a/arch/parisc/include/asm/hardware.h
+++ b/arch/parisc/include/asm/hardware.h
@@ -2,7 +2,6 @@
 #define _PARISC_HARDWARE_H
 
 #include <linux/mod_devicetable.h>
-#include <asm/pdc.h>
 
 #define HWTYPE_ANY_ID		PA_HWTYPE_ANY_ID
 #define HVERSION_ANY_ID		PA_HVERSION_ANY_ID
@@ -95,12 +94,14 @@
 #define HPHW_MC	       15
 #define HPHW_FAULTY    31
 
+struct parisc_device_id;
 
 /* hardware.c: */
 extern const char *parisc_hardware_description(struct parisc_device_id *id);
 extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
 
 struct pci_dev;
+struct hardware_path;
 
 /* drivers.c: */
 extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index a84cc1f..4e0e7db 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -160,5 +160,11 @@
 
 #include <asm-generic/memory_model.h>
 #include <asm-generic/getorder.h>
+#include <asm/pdc.h>
+
+#define PAGE0   ((struct zeropage *)__PAGE_OFFSET)
+
+/* DEFINITION OF THE ZERO-PAGE (PAG0) */
+/* based on work by Jason Eckhardt (jason@equator.com) */
 
 #endif /* _PARISC_PAGE_H */
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 4ca510b..7f0f2d2 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -343,8 +343,6 @@
 
 #ifdef __KERNEL__
 
-#include <asm/page.h> /* for __PAGE_OFFSET */
-
 extern int pdc_type;
 
 /* Values for pdc_type */
@@ -677,11 +675,6 @@
 
 #endif /* __KERNEL__ */
 
-#define PAGE0   ((struct zeropage *)__PAGE_OFFSET)
-
-/* DEFINITION OF THE ZERO-PAGE (PAG0) */
-/* based on work by Jason Eckhardt (jason@equator.com) */
-
 /* flags of the device_path */
 #define	PF_AUTOBOOT	0x80
 #define	PF_AUTOSEARCH	0x40
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 22dadeb..ee99f23 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -44,6 +44,8 @@
 
 #endif /* !__ASSEMBLY__ */
 
+#include <asm/page.h>
+
 #define pte_ERROR(e) \
 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 #define pmd_ERROR(e) \
diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h
index c5edc60..1ee7c82 100644
--- a/arch/parisc/include/asm/prefetch.h
+++ b/arch/parisc/include/asm/prefetch.h
@@ -21,7 +21,12 @@
 #define ARCH_HAS_PREFETCH
 static inline void prefetch(const void *addr)
 {
-	__asm__("ldw 0(%0), %%r0" : : "r" (addr));
+	__asm__(
+#ifndef CONFIG_PA20
+		/* Need to avoid prefetch of NULL on PA7300LC */
+		"	extrw,u,= %0,31,32,%%r0\n"
+#endif
+		"	ldw 0(%0), %%r0" : : "r" (addr));
 }
 
 /* LDD is a PA2.0 addition. */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 804aa28..3516e0b 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -1,6 +1,8 @@
 #ifndef __ASM_SPINLOCK_H
 #define __ASM_SPINLOCK_H
 
+#include <asm/barrier.h>
+#include <asm/ldcw.h>
 #include <asm/processor.h>
 #include <asm/spinlock_types.h>
 
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 6f05944..5350342 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -581,7 +581,11 @@
 	 */
 	cmpiclr,=	0x01,\tmp,%r0
 	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+#ifdef CONFIG_64BIT
 	depd,z		\prot,8,7,\prot
+#else
+	depw,z		\prot,8,7,\prot
+#endif
 	/*
 	 * OK, it is in the temp alias region, check whether "from" or "to".
 	 * Check "subtle" note in pacache.S re: r23/r26.
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 93ff3d9..5d7218a 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -692,7 +692,7 @@
 
 	/* Purge any old translation */
 
-	pitlb		(%sr0,%r28)
+	pitlb		(%sr4,%r28)
 
 	ldil		L%icache_stride, %r1
 	ldw		R%icache_stride(%r1), %r1
@@ -706,27 +706,29 @@
 	sub		%r25, %r1, %r25
 
 
-1:      fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
-	fic,m		%r1(%r28)
+	/* fic only has the type 26 form on PA1.1, requiring an
+	 * explicit space specification, so use %sr4 */
+1:      fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
+	fic,m		%r1(%sr4,%r28)
 	cmpb,COND(<<)		%r28, %r25,1b
-	fic,m		%r1(%r28)
+	fic,m		%r1(%sr4,%r28)
 
 	sync
 	bv		%r0(%r2)
-	pitlb		(%sr0,%r25)
+	pitlb		(%sr4,%r25)
 	.exit
 
 	.procend
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 4f00459..47341aa 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -50,6 +50,7 @@
 #include <linux/init.h>
 #include <linux/major.h>
 #include <linux/tty.h>
+#include <asm/page.h>		/* for PAGE0 */
 #include <asm/pdc.h>		/* for iodc_call() proto and friends */
 
 static DEFINE_SPINLOCK(pdc_console_lock);
@@ -104,7 +105,7 @@
 
 static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
 {
-	if (!tty->count) {
+	if (tty->count == 1) {
 		del_timer_sync(&pdc_console_timer);
 		tty_port_tty_set(&tty_port, NULL);
 	}
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 0bb1d63..4dc7b79 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -31,6 +31,7 @@
 #include <linux/delay.h>
 #include <linux/bitops.h>
 #include <linux/ftrace.h>
+#include <linux/cpu.h>
 
 #include <linux/atomic.h>
 #include <asm/current.h>
@@ -295,8 +296,13 @@
 
 		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
 		machine_halt();
-	}  
+	}
+
+	notify_cpu_starting(cpunum);
+
+	ipi_call_lock();
 	set_cpu_online(cpunum, true);
+	ipi_call_unlock();
 
 	/* Initialise the idle task for this CPU */
 	atomic_inc(&init_mm.mm_count);
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 7c07743..70e105d 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -29,6 +29,7 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/irq.h>
+#include <asm/page.h>
 #include <asm/param.h>
 #include <asm/pdc.h>
 #include <asm/led.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index feab3ba..73ec039 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -141,7 +141,7 @@
 	select IRQ_FORCED_THREADING
 	select HAVE_RCU_TABLE_FREE if SMP
 	select HAVE_SYSCALL_TRACEPOINTS
-	select HAVE_BPF_JIT if (PPC64 && NET)
+	select HAVE_BPF_JIT if PPC64
 	select HAVE_ARCH_JUMP_LABEL
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 548da3a..d58fc4e 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -288,13 +288,6 @@
 /* Exception addition: Hard disable interrupts */
 #define DISABLE_INTS	SOFT_DISABLE_INTS(r10,r11)
 
-/* Exception addition: Keep interrupt state */
-#define ENABLE_INTS				\
-	ld	r11,PACAKMSR(r13);		\
-	ld	r12,_MSR(r1);			\
-	rlwimi	r11,r12,0,MSR_EE;		\
-	mtmsrd	r11,1
-
 #define ADD_NVGPRS				\
 	bl	.save_nvgprs
 
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index e648af9..0e40843 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -18,10 +18,6 @@
 #include <linux/atomic.h>
 
 
-/* Define a way to iterate across irqs. */
-#define for_each_irq(i) \
-	for ((i) = 0; (i) < NR_IRQS; ++(i))
-
 extern atomic_t ppc_n_lost_interrupts;
 
 /* This number is used when no interrupt has been assigned */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index aa795cc..fd07f43 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -81,12 +81,13 @@
 	u64 sdr1;
 	u64 hior;
 	u64 msr_mask;
-	u64 vsid_next;
 #ifdef CONFIG_PPC_BOOK3S_32
 	u32 vsid_pool[VSID_POOL_SIZE];
+	u32 vsid_next;
 #else
-	u64 vsid_first;
-	u64 vsid_max;
+	u64 proto_vsid_first;
+	u64 proto_vsid_max;
+	u64 proto_vsid_next;
 #endif
 	int context_id[SID_CONTEXTS];
 
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index f8a7a1a..ef2074c 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -588,23 +588,19 @@
 fast_exc_return_irq:
 restore:
 	/*
-	 * This is the main kernel exit path, we first check if we
-	 * have to change our interrupt state.
+	 * This is the main kernel exit path. First we check if we
+	 * are about to re-enable interrupts
 	 */
 	ld	r5,SOFTE(r1)
 	lbz	r6,PACASOFTIRQEN(r13)
-	cmpwi	cr1,r5,0
-	cmpw	cr0,r5,r6
-	beq	cr0,4f
+	cmpwi	cr0,r5,0
+	beq	restore_irq_off
 
-	/* We do, handle disable first, which is easy */
-	bne	cr1,3f;
- 	li	r0,0
-	stb	r0,PACASOFTIRQEN(r13);
-	TRACE_DISABLE_INTS
-	b	4f
+	/* We are enabling, were we already enabled ? Yes, just return */
+	cmpwi	cr0,r6,1
+	beq	cr0,do_restore
 
-3:	/*
+	/*
 	 * We are about to soft-enable interrupts (we are hard disabled
 	 * at this point). We check if there's anything that needs to
 	 * be replayed first.
@@ -626,7 +622,7 @@
 	/*
 	 * Final return path. BookE is handled in a different file
 	 */
-4:
+do_restore:
 #ifdef CONFIG_PPC_BOOK3E
 	b	.exception_return_book3e
 #else
@@ -700,6 +696,25 @@
 #endif /* CONFIG_PPC_BOOK3E */
 
 	/*
+	 * We are returning to a context with interrupts soft disabled.
+	 *
+	 * However, we may also about to hard enable, so we need to
+	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
+	 * or that bit can get out of sync and bad things will happen
+	 */
+restore_irq_off:
+	ld	r3,_MSR(r1)
+	lbz	r7,PACAIRQHAPPENED(r13)
+	andi.	r0,r3,MSR_EE
+	beq	1f
+	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
+	stb	r7,PACAIRQHAPPENED(r13)
+1:	li	r0,0
+	stb	r0,PACASOFTIRQEN(r13);
+	TRACE_DISABLE_INTS
+	b	do_restore
+
+	/*
 	 * Something did happen, check if a re-emit is needed
 	 * (this also clears paca->irq_happened)
 	 */
@@ -748,6 +763,9 @@
 #endif /* CONFIG_PPC_BOOK3E */
 1:	b	.ret_from_except /* What else to do here ? */
  
+
+
+3:
 do_work:
 #ifdef CONFIG_PREEMPT
 	andi.	r0,r3,MSR_PR	/* Returning to user mode? */
@@ -767,16 +785,6 @@
 	SOFT_DISABLE_INTS(r3,r4)
 1:	bl	.preempt_schedule_irq
 
-	/* Hard-disable interrupts again (and update PACA) */
-#ifdef CONFIG_PPC_BOOK3E
-	wrteei	0
-#else
-	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
-	mtmsrd	r10,1
-#endif /* CONFIG_PPC_BOOK3E */
-	li	r0,PACA_IRQ_HARD_DIS
-	stb	r0,PACAIRQHAPPENED(r13)
-
 	/* Re-test flags and eventually loop */
 	clrrdi	r9,r1,THREAD_SHIFT
 	ld	r4,TI_FLAGS(r9)
@@ -787,14 +795,6 @@
 user_work:
 #endif /* CONFIG_PREEMPT */
 
-	/* Enable interrupts */
-#ifdef CONFIG_PPC_BOOK3E
-	wrteei	1
-#else
-	ori	r10,r10,MSR_EE
-	mtmsrd	r10,1
-#endif /* CONFIG_PPC_BOOK3E */
-
 	andi.	r0,r4,_TIF_NEED_RESCHED
 	beq	1f
 	bl	.restore_interrupts
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index cb705fd..8f880bc 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -768,8 +768,8 @@
 	std	r3,_DAR(r1)
 	std	r4,_DSISR(r1)
 	bl	.save_nvgprs
+	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	ENABLE_INTS
 	bl	.alignment_exception
 	b	.ret_from_except
 
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5ec1b23..641da9e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,6 +229,19 @@
 	 */
 	if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
 		__hard_irq_disable();
+#ifdef CONFIG_TRACE_IRQFLAG
+	else {
+		/*
+		 * We should already be hard disabled here. We had bugs
+		 * where that wasn't the case so let's dbl check it and
+		 * warn if we are wrong. Only do that when IRQ tracing
+		 * is enabled as mfmsr() can be costly.
+		 */
+		if (WARN_ON(mfmsr() & MSR_EE))
+			__hard_irq_disable();
+	}
+#endif /* CONFIG_TRACE_IRQFLAG */
+
 	set_soft_enabled(0);
 
 	/*
@@ -260,11 +273,17 @@
  * if they are currently disabled. This is typically called before
  * schedule() or do_signal() when returning to userspace. We do it
  * in C to avoid the burden of dealing with lockdep etc...
+ *
+ * NOTE: This is called with interrupts hard disabled but not marked
+ * as such in paca->irq_happened, so we need to resync this.
  */
 void restore_interrupts(void)
 {
-	if (irqs_disabled())
+	if (irqs_disabled()) {
+		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 		local_irq_enable();
+	} else
+		__hard_irq_enable();
 }
 
 #endif /* CONFIG_PPC64 */
@@ -330,14 +349,10 @@
 
 	alloc_cpumask_var(&mask, GFP_KERNEL);
 
-	for_each_irq(irq) {
+	for_each_irq_desc(irq, desc) {
 		struct irq_data *data;
 		struct irq_chip *chip;
 
-		desc = irq_to_desc(irq);
-		if (!desc)
-			continue;
-
 		data = irq_desc_get_irq_data(desc);
 		if (irqd_is_per_cpu(data))
 			continue;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index c957b12..5df7777 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -23,14 +23,11 @@
 
 void machine_kexec_mask_interrupts(void) {
 	unsigned int i;
+	struct irq_desc *desc;
 
-	for_each_irq(i) {
-		struct irq_desc *desc = irq_to_desc(i);
+	for_each_irq_desc(i, desc) {
 		struct irq_chip *chip;
 
-		if (!desc)
-			continue;
-
 		chip = irq_desc_get_chip(desc);
 		if (!chip)
 			continue;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6aa0c66..1589723 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -248,7 +248,7 @@
 				   addr, regs->nip, regs->link, code);
 	}
 
-	if (!arch_irq_disabled_regs(regs))
+	if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
 		local_irq_enable();
 
 	memset(&info, 0, sizeof(info));
@@ -1019,7 +1019,9 @@
 		return;
 	}
 
-	local_irq_enable();
+	/* We restore the interrupt state now */
+	if (!arch_irq_disabled_regs(regs))
+		local_irq_enable();
 
 #ifdef CONFIG_MATH_EMULATION
 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
@@ -1069,6 +1071,10 @@
 {
 	int sig, code, fixed = 0;
 
+	/* We restore the interrupt state now */
+	if (!arch_irq_disabled_regs(regs))
+		local_irq_enable();
+
 	/* we don't implement logging of alignment exceptions */
 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
 		fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 6f87f39..10fc8ec 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -194,14 +194,14 @@
 	backwards_map = !backwards_map;
 
 	/* Uh-oh ... out of mappings. Let's flush! */
-	if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
-		vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+	if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
+		vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
 		memset(vcpu_book3s->sid_map, 0,
 		       sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
 		kvmppc_mmu_flush_segments(vcpu);
 	}
-	map->host_vsid = vcpu_book3s->vsid_next++;
+	map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
 
 	map->guest_vsid = gvsid;
 	map->valid = true;
@@ -319,9 +319,10 @@
 		return -1;
 	vcpu3s->context_id[0] = err;
 
-	vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1;
-	vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
-	vcpu3s->vsid_next = vcpu3s->vsid_first;
+	vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
+				  << USER_ESID_BITS) - 1;
+	vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+	vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
 
 	kvmppc_mmu_hpte_init(vcpu);
 
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index ddc485a..c3beaee 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -258,6 +258,8 @@
 			    !(memslot->userspace_addr & (s - 1))) {
 				start &= ~(s - 1);
 				pgsize = s;
+				get_page(hpage);
+				put_page(page);
 				page = hpage;
 			}
 		}
@@ -281,11 +283,8 @@
 	err = 0;
 
  out:
-	if (got) {
-		if (PageHuge(page))
-			page = compound_head(page);
+	if (got)
 		put_page(page);
-	}
 	return err;
 
  up_err:
@@ -678,8 +677,15 @@
 		SetPageDirty(page);
 
  out_put:
-	if (page)
-		put_page(page);
+	if (page) {
+		/*
+		 * We drop pages[0] here, not page because page might
+		 * have been set to the head page of a compound, but
+		 * we have to drop the reference on the correct tail
+		 * page to match the get inside gup()
+		 */
+		put_page(pages[0]);
+	}
 	return ret;
 
  out_unlock:
@@ -979,6 +985,7 @@
 			pa = *physp;
 		}
 		page = pfn_to_page(pa >> PAGE_SHIFT);
+		get_page(page);
 	} else {
 		hva = gfn_to_hva_memslot(memslot, gfn);
 		npages = get_user_pages_fast(hva, 1, 1, pages);
@@ -991,8 +998,6 @@
 		page = compound_head(page);
 		psize <<= compound_order(page);
 	}
-	if (!kvm->arch.using_mmu_notifiers)
-		get_page(page);
 	offset = gpa & (psize - 1);
 	if (nb_ret)
 		*nb_ret = psize - offset;
@@ -1003,7 +1008,6 @@
 {
 	struct page *page = virt_to_page(va);
 
-	page = compound_head(page);
 	put_page(page);
 }
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 01294a5..108d1f5 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1192,8 +1192,6 @@
 				continue;
 			pfn = physp[j] >> PAGE_SHIFT;
 			page = pfn_to_page(pfn);
-			if (PageHuge(page))
-				page = compound_head(page);
 			SetPageDirty(page);
 			put_page(page);
 		}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index def880a..cec4dad 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -463,6 +463,7 @@
 				/* insert R and C bits from PTE */
 				rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
 				args[j] |= rcbits << (56 - 5);
+				hp[0] = 0;
 				continue;
 			}
 
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 0676ae2..6e6e9ce 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -197,7 +197,8 @@
 	/* Save guest PC and MSR */
 #ifdef CONFIG_PPC64
 BEGIN_FTR_SECTION
-	andi.	r0,r12,0x2
+	andi.	r0, r12, 0x2
+	cmpwi	cr1, r0, 0
 	beq	1f
 	mfspr	r3,SPRN_HSRR0
 	mfspr	r4,SPRN_HSRR1
@@ -250,6 +251,12 @@
 	beq	ld_last_prev_inst
 	cmpwi	r12, BOOK3S_INTERRUPT_ALIGNMENT
 	beq-	ld_last_inst
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+	cmpwi	r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
+	beq-	ld_last_inst
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
 
 	b	no_ld_last_inst
 
@@ -316,23 +323,17 @@
 	 * Having set up SRR0/1 with the address where we want
 	 * to continue with relocation on (potentially in module
 	 * space), we either just go straight there with rfi[d],
-	 * or we jump to an interrupt handler with bctr if there
-	 * is an interrupt to be handled first.  In the latter
-	 * case, the rfi[d] at the end of the interrupt handler
-	 * will get us back to where we want to continue.
+	 * or we jump to an interrupt handler if there is an
+	 * interrupt to be handled first.  In the latter case,
+	 * the rfi[d] at the end of the interrupt handler will
+	 * get us back to where we want to continue.
 	 */
 
-	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
-	beq	1f
-	cmpwi	r12, BOOK3S_INTERRUPT_DECREMENTER
-	beq	1f
-	cmpwi	r12, BOOK3S_INTERRUPT_PERFMON
-1:	mtctr	r12
-
 	/* Register usage at this point:
 	 *
 	 * R1       = host R1
 	 * R2       = host R2
+	 * R10      = raw exit handler id
 	 * R12      = exit handler id
 	 * R13      = shadow vcpu (32-bit) or PACA (64-bit)
 	 * SVCPU.*  = guest *
@@ -342,12 +343,25 @@
 	PPC_LL	r6, HSTATE_HOST_MSR(r13)
 	PPC_LL	r8, HSTATE_VMHANDLER(r13)
 
-	/* Restore host msr -> SRR1 */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+	beq	cr1, 1f
+	mtspr	SPRN_HSRR1, r6
+	mtspr	SPRN_HSRR0, r8
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
+1:	/* Restore host msr -> SRR1 */
 	mtsrr1	r6
 	/* Load highmem handler address */
 	mtsrr0	r8
 
 	/* RFI into the highmem handler, or jump to interrupt handler */
-	beqctr
+	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
+	beqa	BOOK3S_INTERRUPT_EXTERNAL
+	cmpwi	r12, BOOK3S_INTERRUPT_DECREMENTER
+	beqa	BOOK3S_INTERRUPT_DECREMENTER
+	cmpwi	r12, BOOK3S_INTERRUPT_PERFMON
+	beqa	BOOK3S_INTERRUPT_PERFMON
+
 	RFI
 kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index af1ab5e..5c3cf2d 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -48,7 +48,13 @@
 /*
  * Assembly helpers from arch/powerpc/net/bpf_jit.S:
  */
-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+#define DECLARE_LOAD_FUNC(func)	\
+	extern u8 func[], func##_negative_offset[], func##_positive_offset[]
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+DECLARE_LOAD_FUNC(sk_load_byte_msh);
 
 #define FUNCTION_DESCR_SIZE	24
 
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index ff4506e..55ba385 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -31,14 +31,13 @@
  * then branch directly to slow_path_XXX if required.  (In fact, could
  * load a spare GPR with the address of slow_path_generic and pass size
  * as an argument, making the call site a mtlr, li and bllr.)
- *
- * Technically, the "is addr < 0" check is unnecessary & slowing down
- * the ABS path, as it's statically checked on generation.
  */
 	.globl	sk_load_word
 sk_load_word:
 	cmpdi	r_addr, 0
-	blt	bpf_error
+	blt	bpf_slow_path_word_neg
+	.globl	sk_load_word_positive_offset
+sk_load_word_positive_offset:
 	/* Are we accessing past headlen? */
 	subi	r_scratch1, r_HL, 4
 	cmpd	r_scratch1, r_addr
@@ -51,7 +50,9 @@
 	.globl	sk_load_half
 sk_load_half:
 	cmpdi	r_addr, 0
-	blt	bpf_error
+	blt	bpf_slow_path_half_neg
+	.globl	sk_load_half_positive_offset
+sk_load_half_positive_offset:
 	subi	r_scratch1, r_HL, 2
 	cmpd	r_scratch1, r_addr
 	blt	bpf_slow_path_half
@@ -61,7 +62,9 @@
 	.globl	sk_load_byte
 sk_load_byte:
 	cmpdi	r_addr, 0
-	blt	bpf_error
+	blt	bpf_slow_path_byte_neg
+	.globl	sk_load_byte_positive_offset
+sk_load_byte_positive_offset:
 	cmpd	r_HL, r_addr
 	ble	bpf_slow_path_byte
 	lbzx	r_A, r_D, r_addr
@@ -69,22 +72,20 @@
 
 /*
  * BPF_S_LDX_B_MSH: ldxb  4*([offset]&0xf)
- * r_addr is the offset value, already known positive
+ * r_addr is the offset value
  */
 	.globl sk_load_byte_msh
 sk_load_byte_msh:
+	cmpdi	r_addr, 0
+	blt	bpf_slow_path_byte_msh_neg
+	.globl sk_load_byte_msh_positive_offset
+sk_load_byte_msh_positive_offset:
 	cmpd	r_HL, r_addr
 	ble	bpf_slow_path_byte_msh
 	lbzx	r_X, r_D, r_addr
 	rlwinm	r_X, r_X, 2, 32-4-2, 31-2
 	blr
 
-bpf_error:
-	/* Entered with cr0 = lt */
-	li	r3, 0
-	/* Generated code will 'blt epilogue', returning 0. */
-	blr
-
 /* Call out to skb_copy_bits:
  * We'll need to back up our volatile regs first; we have
  * local variable space at r1+(BPF_PPC_STACK_BASIC).
@@ -136,3 +137,84 @@
 	lbz	r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
 	rlwinm	r_X, r_X, 2, 32-4-2, 31-2
 	blr
+
+/* Call out to bpf_internal_load_pointer_neg_helper:
+ * We'll need to back up our volatile regs first; we have
+ * local variable space at r1+(BPF_PPC_STACK_BASIC).
+ * Allocate a new stack frame here to remain ABI-compliant in
+ * stashing LR.
+ */
+#define sk_negative_common(SIZE)				\
+	mflr	r0;						\
+	std	r0, 16(r1);					\
+	/* R3 goes in parameter space of caller's frame */	\
+	std	r_skb, (BPF_PPC_STACKFRAME+48)(r1);		\
+	std	r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);		\
+	std	r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);		\
+	stdu	r1, -BPF_PPC_SLOWPATH_FRAME(r1);		\
+	/* R3 = r_skb, as passed */				\
+	mr	r4, r_addr;					\
+	li	r5, SIZE;					\
+	bl	bpf_internal_load_pointer_neg_helper;		\
+	/* R3 != 0 on success */				\
+	addi	r1, r1, BPF_PPC_SLOWPATH_FRAME;			\
+	ld	r0, 16(r1);					\
+	ld	r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1);		\
+	ld	r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1);		\
+	mtlr	r0;						\
+	cmpldi	r3, 0;						\
+	beq	bpf_error_slow;	/* cr0 = EQ */			\
+	mr	r_addr, r3;					\
+	ld	r_skb, (BPF_PPC_STACKFRAME+48)(r1);		\
+	/* Great success! */
+
+bpf_slow_path_word_neg:
+	lis     r_scratch1,-32	/* SKF_LL_OFF */
+	cmpd	r_addr, r_scratch1	/* addr < SKF_* */
+	blt	bpf_error	/* cr0 = LT */
+	.globl	sk_load_word_negative_offset
+sk_load_word_negative_offset:
+	sk_negative_common(4)
+	lwz	r_A, 0(r_addr)
+	blr
+
+bpf_slow_path_half_neg:
+	lis     r_scratch1,-32	/* SKF_LL_OFF */
+	cmpd	r_addr, r_scratch1	/* addr < SKF_* */
+	blt	bpf_error	/* cr0 = LT */
+	.globl	sk_load_half_negative_offset
+sk_load_half_negative_offset:
+	sk_negative_common(2)
+	lhz	r_A, 0(r_addr)
+	blr
+
+bpf_slow_path_byte_neg:
+	lis     r_scratch1,-32	/* SKF_LL_OFF */
+	cmpd	r_addr, r_scratch1	/* addr < SKF_* */
+	blt	bpf_error	/* cr0 = LT */
+	.globl	sk_load_byte_negative_offset
+sk_load_byte_negative_offset:
+	sk_negative_common(1)
+	lbz	r_A, 0(r_addr)
+	blr
+
+bpf_slow_path_byte_msh_neg:
+	lis     r_scratch1,-32	/* SKF_LL_OFF */
+	cmpd	r_addr, r_scratch1	/* addr < SKF_* */
+	blt	bpf_error	/* cr0 = LT */
+	.globl	sk_load_byte_msh_negative_offset
+sk_load_byte_msh_negative_offset:
+	sk_negative_common(1)
+	lbz	r_X, 0(r_addr)
+	rlwinm	r_X, r_X, 2, 32-4-2, 31-2
+	blr
+
+bpf_error_slow:
+	/* fabricate a cr0 = lt */
+	li	r_scratch1, -1
+	cmpdi	r_scratch1, 0
+bpf_error:
+	/* Entered with cr0 = lt */
+	li	r3, 0
+	/* Generated code will 'blt epilogue', returning 0. */
+	blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 73619d3..2dc8b14 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -127,6 +127,9 @@
 	PPC_BLR();
 }
 
+#define CHOOSE_LOAD_FUNC(K, func) \
+	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+
 /* Assemble the body code between the prologue & epilogue. */
 static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
 			      struct codegen_context *ctx,
@@ -391,21 +394,16 @@
 
 			/*** Absolute loads from packet header/data ***/
 		case BPF_S_LD_W_ABS:
-			func = sk_load_word;
+			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 			goto common_load;
 		case BPF_S_LD_H_ABS:
-			func = sk_load_half;
+			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
 			goto common_load;
 		case BPF_S_LD_B_ABS:
-			func = sk_load_byte;
+			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
 		common_load:
-			/*
-			 * Load from [K].  Reference with the (negative)
-			 * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
-			 */
+			/* Load from [K]. */
 			ctx->seen |= SEEN_DATAREF;
-			if ((int)K < 0)
-				return -ENOTSUPP;
 			PPC_LI64(r_scratch1, func);
 			PPC_MTLR(r_scratch1);
 			PPC_LI32(r_addr, K);
@@ -429,7 +427,7 @@
 		common_load_ind:
 			/*
 			 * Load from [X + K].  Negative offsets are tested for
-			 * in the helper functions, and result in a 'ret 0'.
+			 * in the helper functions.
 			 */
 			ctx->seen |= SEEN_DATAREF | SEEN_XREG;
 			PPC_LI64(r_scratch1, func);
@@ -443,13 +441,7 @@
 			break;
 
 		case BPF_S_LDX_B_MSH:
-			/*
-			 * x86 version drops packet (RET 0) when K<0, whereas
-			 * interpreter does allow K<0 (__load_pointer, special
-			 * ancillary data).  common_load returns ENOTSUPP if K<0,
-			 * so we fall back to interpreter & filter works.
-			 */
-			func = sk_load_byte_msh;
+			func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
 			goto common_load;
 			break;
 
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index d09f3e8..85825b5 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -114,7 +114,7 @@
 		pr_devel("axon_msi: woff %x roff %x msi %x\n",
 			  write_offset, msic->read_offset, msi);
 
-		if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
+		if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
 			generic_handle_irq(msi);
 			msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
 		} else {
@@ -276,9 +276,6 @@
 	if (rc)
 		return rc;
 
-	/* We rely on being able to stash a virq in a u16 */
-	BUILD_BUG_ON(NR_IRQS > 65536);
-
 	list_for_each_entry(entry, &dev->msi_list, list) {
 		virq = irq_create_direct_mapping(msic->irq_domain);
 		if (virq == NO_IRQ) {
@@ -392,7 +389,8 @@
 	}
 	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
 
-	msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
+	/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
+	msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
 	if (!msic->irq_domain) {
 		printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
 		       dn->full_name);
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index f9a48af..8c6dc42 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -248,6 +248,6 @@
 {
 	int	i;
 
-	for (i = 1; i < NR_IRQS; i++)
+	for (i = 1; i < nr_irqs; i++)
 		beat_destruct_irq_plug(i);
 }
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 66ad93d..c4e6305 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -57,9 +57,9 @@
 
 static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
 
-#define NR_MASK_WORDS	((NR_IRQS + 31) / 32)
-static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
-static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+/* The max irq number this driver deals with is 128; see max_irqs */
+static DECLARE_BITMAP(ppc_lost_interrupts, 128);
+static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
 static int pmac_irq_cascade = -1;
 static struct irq_domain *pmac_pic_host;
 
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index aadbe4f..178a5f3 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -30,9 +30,9 @@
 	  two or more partitions.
 
 config EEH
-	bool "PCI Extended Error Handling (EEH)" if EXPERT
+	bool
 	depends on PPC_PSERIES && PCI
-	default y if !EXPERT
+	default y
 
 config PSERIES_MSI
        bool
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index d3be961..10386b6 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -51,8 +51,7 @@
 static intctl_cpm2_t __iomem *cpm2_intctl;
 
 static struct irq_domain *cpm2_pic_host;
-#define NR_MASK_WORDS   ((NR_IRQS + 31) / 32)
-static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
 
 static const u_char irq_to_siureg[] = {
 	1, 1, 1, 1, 1, 1, 1, 1,
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index d5f5416..b724622 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -18,69 +18,45 @@
 extern int cpm_get_irq(struct pt_regs *regs);
 
 static struct irq_domain *mpc8xx_pic_host;
-#define NR_MASK_WORDS   ((NR_IRQS + 31) / 32)
-static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+static unsigned long mpc8xx_cached_irq_mask;
 static sysconf8xx_t __iomem *siu_reg;
 
-int cpm_get_irq(struct pt_regs *regs);
+static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d)
+{
+	return 0x80000000 >> irqd_to_hwirq(d);
+}
 
 static void mpc8xx_unmask_irq(struct irq_data *d)
 {
-	int	bit, word;
-	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
-
-	bit = irq_nr & 0x1f;
-	word = irq_nr >> 5;
-
-	ppc_cached_irq_mask[word] |= (1 << (31-bit));
-	out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
+	mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
+	out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
 }
 
 static void mpc8xx_mask_irq(struct irq_data *d)
 {
-	int	bit, word;
-	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
-
-	bit = irq_nr & 0x1f;
-	word = irq_nr >> 5;
-
-	ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
-	out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
+	mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d);
+	out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
 }
 
 static void mpc8xx_ack(struct irq_data *d)
 {
-	int	bit;
-	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
-
-	bit = irq_nr & 0x1f;
-	out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
+	out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d));
 }
 
 static void mpc8xx_end_irq(struct irq_data *d)
 {
-	int bit, word;
-	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
-
-	bit = irq_nr & 0x1f;
-	word = irq_nr >> 5;
-
-	ppc_cached_irq_mask[word] |= (1 << (31-bit));
-	out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
+	mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
+	out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
 }
 
 static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
 {
-	if (flow_type & IRQ_TYPE_EDGE_FALLING) {
-		irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d);
+	/* only external IRQ senses are programmable */
+	if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) {
 		unsigned int siel = in_be32(&siu_reg->sc_siel);
-
-		/* only external IRQ senses are programmable */
-		if ((hw & 1) == 0) {
-			siel |= (0x80000000 >> hw);
-			out_be32(&siu_reg->sc_siel, siel);
-			__irq_set_handler_locked(d->irq, handle_edge_irq);
-		}
+		siel |= mpc8xx_irqd_to_bit(d);
+		out_be32(&siu_reg->sc_siel, siel);
+		__irq_set_handler_locked(d->irq, handle_edge_irq);
 	}
 	return 0;
 }
@@ -132,6 +108,9 @@
 		IRQ_TYPE_EDGE_FALLING,
 	};
 
+	if (intspec[0] > 0x1f)
+		return 0;
+
 	*out_hwirq = intspec[0];
 	if (intsize > 1 && intspec[1] < 4)
 		*out_flags = map_pic_senses[intspec[1]];
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
index 49a3ece..702256a 100644
--- a/arch/powerpc/sysdev/scom.c
+++ b/arch/powerpc/sysdev/scom.c
@@ -22,6 +22,7 @@
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <asm/debug.h>
 #include <asm/prom.h>
 #include <asm/scom.h>
 
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index ea5e204..cd1d18d 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -188,6 +188,7 @@
 {
 	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
 	unsigned int irq, virq;
+	struct irq_desc *desc;
 
 	/* If we used to be the default server, move to the new "boot_cpuid" */
 	if (hw_cpu == xics_default_server)
@@ -202,8 +203,7 @@
 	/* Allow IPIs again... */
 	icp_ops->set_priority(DEFAULT_PRIORITY);
 
-	for_each_irq(virq) {
-		struct irq_desc *desc;
+	for_each_irq_desc(virq, desc) {
 		struct irq_chip *chip;
 		long server;
 		unsigned long flags;
@@ -212,9 +212,8 @@
 		/* We can't set affinity on ISA interrupts */
 		if (virq < NUM_ISA_INTERRUPTS)
 			continue;
-		desc = irq_to_desc(virq);
 		/* We only need to migrate enabled IRQS */
-		if (!desc || !desc->action)
+		if (!desc->action)
 			continue;
 		if (desc->irq_data.domain != xics_host)
 			continue;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9015060..b42f286 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -217,7 +217,7 @@
 	def_bool y
 	prompt "Kernel support for 31 bit emulation"
 	depends on 64BIT
-	select COMPAT_BINFMT_ELF
+	select COMPAT_BINFMT_ELF if BINFMT_ELF
 	select ARCH_WANT_OLD_COMPAT_IPC
 	help
 	  Select this option if you want to enable your system kernel to
@@ -234,6 +234,25 @@
 config AUDIT_ARCH
 	def_bool y
 
+config HAVE_MARCH_Z900_FEATURES
+	def_bool n
+
+config HAVE_MARCH_Z990_FEATURES
+	def_bool n
+	select HAVE_MARCH_Z900_FEATURES
+
+config HAVE_MARCH_Z9_109_FEATURES
+	def_bool n
+	select HAVE_MARCH_Z990_FEATURES
+
+config HAVE_MARCH_Z10_FEATURES
+	def_bool n
+	select HAVE_MARCH_Z9_109_FEATURES
+
+config HAVE_MARCH_Z196_FEATURES
+	def_bool n
+	select HAVE_MARCH_Z10_FEATURES
+
 comment "Code generation options"
 
 choice
@@ -249,6 +268,7 @@
 
 config MARCH_Z900
 	bool "IBM zSeries model z800 and z900"
+	select HAVE_MARCH_Z900_FEATURES if 64BIT
 	help
 	  Select this to enable optimizations for model z800/z900 (2064 and
 	  2066 series). This will enable some optimizations that are not
@@ -256,6 +276,7 @@
 
 config MARCH_Z990
 	bool "IBM zSeries model z890 and z990"
+	select HAVE_MARCH_Z990_FEATURES if 64BIT
 	help
 	  Select this to enable optimizations for model z890/z990 (2084 and
 	  2086 series). The kernel will be slightly faster but will not work
@@ -263,6 +284,7 @@
 
 config MARCH_Z9_109
 	bool "IBM System z9"
+	select HAVE_MARCH_Z9_109_FEATURES if 64BIT
 	help
 	  Select this to enable optimizations for IBM System z9 (2094 and
 	  2096 series). The kernel will be slightly faster but will not work
@@ -270,6 +292,7 @@
 
 config MARCH_Z10
 	bool "IBM System z10"
+	select HAVE_MARCH_Z10_FEATURES if 64BIT
 	help
 	  Select this to enable optimizations for IBM System z10 (2097 and
 	  2098 series). The kernel will be slightly faster but will not work
@@ -277,6 +300,7 @@
 
 config MARCH_Z196
 	bool "IBM zEnterprise 114 and 196"
+	select HAVE_MARCH_Z196_FEATURES if 64BIT
 	help
 	  Select this to enable optimizations for IBM zEnterprise 114 and 196
 	  (2818 and 2817 series). The kernel will be slightly faster but will
@@ -406,33 +430,6 @@
 
 comment "Misc"
 
-config IPL
-	def_bool y
-	prompt "Builtin IPL record support"
-	help
-	  If you want to use the produced kernel to IPL directly from a
-	  device, you have to merge a bootsector specific to the device
-	  into the first bytes of the kernel. You will have to select the
-	  IPL device.
-
-choice
-	prompt "IPL method generated into head.S"
-	depends on IPL
-	default IPL_VM
-	help
-	  Select "tape" if you want to IPL the image from a Tape.
-
-	  Select "vm_reader" if you are running under VM/ESA and want
-	  to IPL the image from the emulated card reader.
-
-config IPL_TAPE
-	bool "tape"
-
-config IPL_VM
-	bool "vm_reader"
-
-endchoice
-
 source "fs/Kconfig.binfmt"
 
 config FORCE_MAX_ZONEORDER
@@ -569,7 +566,7 @@
 
 config CRASH_DUMP
 	bool "kernel crash dumps"
-	depends on 64BIT
+	depends on 64BIT && SMP
 	select KEXEC
 	help
 	  Generate crash dump after being started by kexec.
diff --git a/arch/s390/boot/.gitignore b/arch/s390/boot/.gitignore
new file mode 100644
index 0000000..017d591
--- /dev/null
+++ b/arch/s390/boot/.gitignore
@@ -0,0 +1,2 @@
+image
+bzImage
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
new file mode 100644
index 0000000..ae06b9b
--- /dev/null
+++ b/arch/s390/boot/compressed/.gitignore
@@ -0,0 +1,3 @@
+sizes.h
+vmlinux
+vmlinux.lds
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 1957a9d..37d2bf2 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -155,7 +155,6 @@
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 451273a..10a5088 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -11,25 +11,28 @@
  * Force strict CPU ordering.
  * And yes, this is required on UP too when we're talking
  * to devices.
- *
- * This is very similar to the ppc eieio/sync instruction in that is
- * does a checkpoint syncronisation & makes sure that 
- * all memory ops have completed wrt other CPU's ( see 7-15 POP  DJB ).
  */
 
-#define eieio()	asm volatile("bcr 15,0" : : : "memory")
-#define SYNC_OTHER_CORES(x)   eieio()
-#define mb()    eieio()
-#define rmb()   eieio()
-#define wmb()   eieio()
-#define read_barrier_depends() do { } while(0)
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()    read_barrier_depends()
-#define smp_mb__before_clear_bit()     smp_mb()
-#define smp_mb__after_clear_bit()      smp_mb()
+static inline void mb(void)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+	/* Fast-BCR without checkpoint synchronization */
+	asm volatile("bcr 14,0" : : : "memory");
+#else
+	asm volatile("bcr 15,0" : : : "memory");
+#endif
+}
 
-#define set_mb(var, value)      do { var = value; mb(); } while (0)
+#define rmb()				mb()
+#define wmb()				mb()
+#define read_barrier_depends()		do { } while(0)
+#define smp_mb()			mb()
+#define smp_rmb()			rmb()
+#define smp_wmb()			wmb()
+#define smp_read_barrier_depends()	read_barrier_depends()
+#define smp_mb__before_clear_bit()	smp_mb()
+#define smp_mb__after_clear_bit()	smp_mb()
+
+#define set_mb(var, value)		do { var = value; mb(); } while (0)
 
 #endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index f2ea2c5..f2ef34f 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -29,9 +29,7 @@
 
 /**
  * struct ccwgroup_driver - driver for ccw group devices
- * @max_slaves: maximum number of slave devices
- * @driver_id: unique id
- * @probe: function called on probe
+ * @setup: function called during device creation to setup the device
  * @remove: function called on remove
  * @set_online: function called when device is set online
  * @set_offline: function called when device is set offline
@@ -44,10 +42,7 @@
  * @driver: embedded driver structure
  */
 struct ccwgroup_driver {
-	int max_slaves;
-	unsigned long driver_id;
-
-	int (*probe) (struct ccwgroup_device *);
+	int (*setup) (struct ccwgroup_device *);
 	void (*remove) (struct ccwgroup_device *);
 	int (*set_online) (struct ccwgroup_device *);
 	int (*set_offline) (struct ccwgroup_device *);
@@ -63,9 +58,8 @@
 
 extern int  ccwgroup_driver_register   (struct ccwgroup_driver *cdriver);
 extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
-int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
-				struct ccw_driver *cdrv, int num_devices,
-				const char *buf);
+int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
+			int num_devices, const char *buf);
 
 extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
 extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index b7ff6af..27216d3 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -38,11 +38,8 @@
 	return (void *) address;
 }
 
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)	__va(p)
+void *xlate_dev_mem_ptr(unsigned long phys);
+void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 
 /*
  * Convert a virtual cached pointer to an uncached pointer
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index d75c8e7..f039d86 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -258,11 +258,6 @@
 	u8 val[QDIO_MAX_BUFFERS_PER_Q];
 } __attribute__ ((packed, aligned(256)));
 
-#define CHSC_AC2_MULTI_BUFFER_AVAILABLE	0x0080
-#define CHSC_AC2_MULTI_BUFFER_ENABLED	0x0040
-#define CHSC_AC2_DATA_DIV_AVAILABLE	0x0010
-#define CHSC_AC2_DATA_DIV_ENABLED	0x0002
-
 /**
  * struct qdio_outbuf_state - SBAL related asynchronous operation information
  *   (for communication with upper layer programs)
@@ -293,6 +288,8 @@
 #define AC1_SC_QEBSM_AVAILABLE		0x02	/* available for subchannel */
 #define AC1_SC_QEBSM_ENABLED		0x01	/* enabled for subchannel */
 
+#define CHSC_AC2_MULTI_BUFFER_AVAILABLE	0x0080
+#define CHSC_AC2_MULTI_BUFFER_ENABLED	0x0040
 #define CHSC_AC2_DATA_DIV_AVAILABLE	0x0010
 #define CHSC_AC2_DATA_DIV_ENABLED	0x0002
 
@@ -328,11 +325,13 @@
 			    int, int, unsigned long);
 
 /* qdio errors reported to the upper-layer program */
-#define QDIO_ERROR_SIGA_TARGET			0x02
-#define QDIO_ERROR_SIGA_ACCESS_EXCEPTION	0x10
-#define QDIO_ERROR_SIGA_BUSY			0x20
-#define QDIO_ERROR_ACTIVATE_CHECK_CONDITION	0x40
-#define QDIO_ERROR_SLSB_STATE			0x80
+#define QDIO_ERROR_ACTIVATE			0x0001
+#define QDIO_ERROR_GET_BUF_STATE		0x0002
+#define QDIO_ERROR_SET_BUF_STATE		0x0004
+#define QDIO_ERROR_SLSB_STATE			0x0100
+
+#define QDIO_ERROR_FATAL			0x00ff
+#define QDIO_ERROR_TEMPORARY			0xff00
 
 /* for qdio_cleanup */
 #define QDIO_FLAG_CLEANUP_USING_CLEAR		0x01
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b21e46e..7244e1f 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -82,7 +82,6 @@
 #define MACHINE_FLAG_LPAR	(1UL << 12)
 #define MACHINE_FLAG_SPP	(1UL << 13)
 #define MACHINE_FLAG_TOPOLOGY	(1UL << 14)
-#define MACHINE_FLAG_STCKF	(1UL << 15)
 
 #define MACHINE_IS_VM		(S390_lowcore.machine_flags & MACHINE_FLAG_VM)
 #define MACHINE_IS_KVM		(S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -101,7 +100,6 @@
 #define MACHINE_HAS_PFMF	(0)
 #define MACHINE_HAS_SPP		(0)
 #define MACHINE_HAS_TOPOLOGY	(0)
-#define MACHINE_HAS_STCKF	(0)
 #else /* __s390x__ */
 #define MACHINE_HAS_IEEE	(1)
 #define MACHINE_HAS_CSP		(1)
@@ -113,7 +111,6 @@
 #define MACHINE_HAS_PFMF	(S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
 #define MACHINE_HAS_SPP		(S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
 #define MACHINE_HAS_TOPOLOGY	(S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
-#define MACHINE_HAS_STCKF	(S390_lowcore.machine_flags & MACHINE_FLAG_STCKF)
 #endif /* __s390x__ */
 
 #define ZFCPDUMP_HSA_SIZE	(32UL<<20)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index a730381..003b04e 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -95,7 +95,6 @@
 #define TIF_SYSCALL_AUDIT	9	/* syscall auditing active */
 #define TIF_SECCOMP		10	/* secure computing */
 #define TIF_SYSCALL_TRACEPOINT	11	/* syscall tracepoint instrumentation */
-#define TIF_SIE			12	/* guest execution active */
 #define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling
 					   TIF_NEED_RESCHED */
 #define TIF_31BIT		17	/* 32bit process */
@@ -114,7 +113,6 @@
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
-#define _TIF_SIE		(1<<TIF_SIE)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_31BIT		(1<<TIF_31BIT)
 #define _TIF_SINGLE_STEP	(1<<TIF_SINGLE_STEP)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index c447a27..239ece9 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -73,11 +73,15 @@
 
 typedef unsigned long long cycles_t;
 
-static inline unsigned long long get_clock (void)
+static inline unsigned long long get_clock(void)
 {
 	unsigned long long clk;
 
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+	asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
+#else
 	asm volatile("stck %0" : "=Q" (clk) : : "cc");
+#endif
 	return clk;
 }
 
@@ -86,17 +90,6 @@
 	asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
 }
 
-static inline unsigned long long get_clock_fast(void)
-{
-	unsigned long long clk;
-
-	if (MACHINE_HAS_STCKF)
-		asm volatile(".insn	s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
-	else
-		clk = get_clock();
-	return clk;
-}
-
 static inline unsigned long long get_clock_xt(void)
 {
 	unsigned char clk[16];
diff --git a/arch/s390/kernel/.gitignore b/arch/s390/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/arch/s390/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 28040fd..377c096 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -437,13 +437,6 @@
 			sp = current->sas_ss_sp + current->sas_ss_size;
 	}
 
-	/* This is the legacy signal stack switching. */
-	else if (!user_mode(regs) &&
-		 !(ka->sa.sa_flags & SA_RESTORER) &&
-		 ka->sa.sa_restorer) {
-		sp = (unsigned long) ka->sa.sa_restorer;
-	}
-
 	return (void __user *)((sp - frame_size) & -8ul);
 }
 
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 9475e68..d84181f 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -374,8 +374,6 @@
 		S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
 	if (test_facility(40))
 		S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
-	if (test_facility(25))
-		S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF;
 #endif
 }
 
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 74ee563..1ae93b5 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -145,22 +145,23 @@
  *  gpr2 = prev
  */
 ENTRY(__switch_to)
+	stm	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
+	st	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
 	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
 	l	%r5,__THREAD_info(%r3)		# get thread_info of next
+	lr	%r15,%r5
+	ahi	%r15,STACK_SIZE			# end of kernel stack of next
+	st	%r3,__LC_CURRENT		# store task struct of next
+	st	%r5,__LC_THREAD_INFO		# store thread info of next
+	st	%r15,__LC_KERNEL_STACK		# store end of kernel stack
+	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
+	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3)	# store pid of next
+	l	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
 	tm	__TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
 	jz	0f
 	ni	__TI_flags+3(%r4),255-_TIF_MCCK_PENDING	# clear flag in prev
 	oi	__TI_flags+3(%r5),_TIF_MCCK_PENDING	# set it in next
-0:	stm	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
-	st	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
-	l	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
-	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
-	lm	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
-	st	%r3,__LC_CURRENT		# store task struct of next
-	mvc	__LC_CURRENT_PID(4,%r0),__TASK_pid(%r3)	# store pid of next
-	st	%r5,__LC_THREAD_INFO		# store thread info of next
-	ahi	%r5,STACK_SIZE			# end of kernel stack of next
-	st	%r5,__LC_KERNEL_STACK		# store end of kernel stack
+0:	lm	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 	br	%r14
 
 __critical_start:
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 4e1c292..229fe1d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -81,16 +81,14 @@
 
 	.macro	HANDLE_SIE_INTERCEPT scratch
 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
-	tm	__TI_flags+6(%r12),_TIF_SIE>>8
-	jz	.+42
-	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_SPP
-	jz	.+8
-	.insn	s,0xb2800000,BASED(.Lhost_id)	# set host id
+	tmhh	%r8,0x0001		# interrupting from user ?
+	jnz	.+42
 	lgr	\scratch,%r9
 	slg	\scratch,BASED(.Lsie_loop)
 	clg	\scratch,BASED(.Lsie_length)
-	jhe	.+10
+	jhe	.+22
 	lg	%r9,BASED(.Lsie_loop)
+	SPP	BASED(.Lhost_id)	# set host id
 #endif
 	.endm
 
@@ -148,6 +146,14 @@
 	ssm	__LC_RETURN_PSW
 	.endm
 
+	.macro STCK savearea
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+	.insn	s,0xb27c0000,\savearea		# store clock fast
+#else
+	.insn	s,0xb2050000,\savearea		# store clock
+#endif
+	.endm
+
 	.section .kprobes.text, "ax"
 
 /*
@@ -158,22 +164,23 @@
  *  gpr2 = prev
  */
 ENTRY(__switch_to)
+	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
+	stg	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
 	lg	%r4,__THREAD_info(%r2)		# get thread_info of prev
 	lg	%r5,__THREAD_info(%r3)		# get thread_info of next
+	lgr	%r15,%r5
+	aghi	%r15,STACK_SIZE			# end of kernel stack of next
+	stg	%r3,__LC_CURRENT		# store task struct of next
+	stg	%r5,__LC_THREAD_INFO		# store thread info of next
+	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
+	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
+	mvc	__LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
+	lg	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
 	tm	__TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
 	jz	0f
 	ni	__TI_flags+7(%r4),255-_TIF_MCCK_PENDING	# clear flag in prev
 	oi	__TI_flags+7(%r5),_TIF_MCCK_PENDING	# set it in next
-0:	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
-	stg	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
-	lg	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
-	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
-	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
-	stg	%r3,__LC_CURRENT		# store task struct of next
-	mvc	__LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
-	stg	%r5,__LC_THREAD_INFO		# store thread info of next
-	aghi	%r5,STACK_SIZE			# end of kernel stack of next
-	stg	%r5,__LC_KERNEL_STACK		# store end of kernel stack
+0:	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
 	br	%r14
 
 __critical_start:
@@ -458,7 +465,7 @@
  * IO interrupt handler routine
  */
 ENTRY(io_int_handler)
-	stck	__LC_INT_CLOCK
+	STCK	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
@@ -604,7 +611,7 @@
  * External interrupt handler routine
  */
 ENTRY(ext_int_handler)
-	stck	__LC_INT_CLOCK
+	STCK	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
@@ -622,6 +629,7 @@
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
 	TRACE_IRQS_OFF
+	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 	lghi	%r1,4096
 	lgr	%r2,%r11		# pass pointer to pt_regs
 	llgf	%r3,__LC_EXT_CPU_ADDR	# get cpu address + interruption code
@@ -638,7 +646,7 @@
 	larl	%r1,psw_idle_lpsw+4
 	stg	%r1,__SF_EMPTY+8(%r15)
 	larl	%r1,.Lvtimer_max
-	stck	__IDLE_ENTER(%r2)
+	STCK	__IDLE_ENTER(%r2)
 	ltr	%r5,%r5
 	stpt	__VQ_IDLE_ENTER(%r3)
 	jz	psw_idle_lpsw
@@ -654,7 +662,7 @@
  * Machine check handler routines
  */
 ENTRY(mcck_int_handler)
-	stck	__LC_MCCK_CLOCK
+	STCK	__LC_MCCK_CLOCK
 	la	%r1,4095		# revalidate r1
 	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -967,7 +975,6 @@
 	xc	__SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
 	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
 	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
-	oi	__TI_flags+6(%r14),_TIF_SIE>>8
 sie_loop:
 	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
 	tm	__TI_flags+7(%r14),_TIF_EXIT_SIE
@@ -985,7 +992,6 @@
 	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
 sie_exit:
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
-	ni	__TI_flags+6(%r14),255-(_TIF_SIE>>8)
 	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
 	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
 	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
@@ -994,7 +1000,6 @@
 sie_fault:
 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
 	lg	%r14,__LC_THREAD_INFO		# pointer thread_info struct
-	ni	__TI_flags+6(%r14),255-(_TIF_SIE>>8)
 	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
 	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
 	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index adccd90..4939d15 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -34,125 +34,7 @@
 #endif
 
 __HEAD
-#ifndef CONFIG_IPL
-	.org   0
-	.long  0x00080000,0x80000000+startup	# Just a restart PSW
-#else
-#ifdef CONFIG_IPL_TAPE
-#define IPL_BS 1024
-	.org   0
-	.long  0x00080000,0x80000000+iplstart	# The first 24 bytes are loaded
-	.long  0x27000000,0x60000001		# by ipl to addresses 0-23.
-	.long  0x02000000,0x20000000+IPL_BS	# (a PSW and two CCWs).
-	.long  0x00000000,0x00000000		# external old psw
-	.long  0x00000000,0x00000000		# svc old psw
-	.long  0x00000000,0x00000000		# program check old psw
-	.long  0x00000000,0x00000000		# machine check old psw
-	.long  0x00000000,0x00000000		# io old psw
-	.long  0x00000000,0x00000000
-	.long  0x00000000,0x00000000
-	.long  0x00000000,0x00000000
-	.long  0x000a0000,0x00000058		# external new psw
-	.long  0x000a0000,0x00000060		# svc new psw
-	.long  0x000a0000,0x00000068		# program check new psw
-	.long  0x000a0000,0x00000070		# machine check new psw
-	.long  0x00080000,0x80000000+.Lioint	# io new psw
 
-	.org   0x100
-#
-# subroutine for loading from tape
-# Parameters:
-#  R1 = device number
-#  R2 = load address
-.Lloader:
-	st	%r14,.Lldret
-	la	%r3,.Lorbread		# r3 = address of orb
-	la	%r5,.Lirb		# r5 = address of irb
-	st	%r2,.Lccwread+4 	# initialize CCW data addresses
-	lctl	%c6,%c6,.Lcr6
-	slr	%r2,%r2
-.Lldlp:
-	la	%r6,3			# 3 retries
-.Lssch:
-	ssch	0(%r3)			# load chunk of IPL_BS bytes
-	bnz	.Llderr
-.Lw4end:
-	bas	%r14,.Lwait4io
-	tm	8(%r5),0x82		# do we have a problem ?
-	bnz	.Lrecov
-	slr	%r7,%r7
-	icm	%r7,3,10(%r5)		# get residual count
-	lcr	%r7,%r7
-	la	%r7,IPL_BS(%r7) 	# IPL_BS-residual=#bytes read
-	ar	%r2,%r7 		# add to total size
-	tm	8(%r5),0x01		# found a tape mark ?
-	bnz	.Ldone
-	l	%r0,.Lccwread+4 	# update CCW data addresses
-	ar	%r0,%r7
-	st	%r0,.Lccwread+4
-	b	.Lldlp
-.Ldone:
-	l	%r14,.Lldret
-	br	%r14			# r2 contains the total size
-.Lrecov:
-	bas	%r14,.Lsense		# do the sensing
-	bct	%r6,.Lssch		# dec. retry count & branch
-	b	.Llderr
-#
-# Sense subroutine
-#
-.Lsense:
-	st	%r14,.Lsnsret
-	la	%r7,.Lorbsense
-	ssch	0(%r7)			# start sense command
-	bnz	.Llderr
-	bas	%r14,.Lwait4io
-	l	%r14,.Lsnsret
-	tm	8(%r5),0x82		# do we have a problem ?
-	bnz	.Llderr
-	br	%r14
-#
-# Wait for interrupt subroutine
-#
-.Lwait4io:
-	lpsw	.Lwaitpsw
-.Lioint:
-	c	%r1,0xb8		# compare subchannel number
-	bne	.Lwait4io
-	tsch	0(%r5)
-	slr	%r0,%r0
-	tm	8(%r5),0x82		# do we have a problem ?
-	bnz	.Lwtexit
-	tm	8(%r5),0x04		# got device end ?
-	bz	.Lwait4io
-.Lwtexit:
-	br	%r14
-.Llderr:
-	lpsw	.Lcrash
-
-	.align	8
-.Lorbread:
-	.long	0x00000000,0x0080ff00,.Lccwread
-	.align	8
-.Lorbsense:
-	.long	0x00000000,0x0080ff00,.Lccwsense
-	.align	8
-.Lccwread:
-	.long	0x02200000+IPL_BS,0x00000000
-.Lccwsense:
-	.long	0x04200001,0x00000000
-.Lwaitpsw:
-	.long	0x020a0000,0x80000000+.Lioint
-
-.Lirb:	.long	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-.Lcr6:	.long	0xff000000
-	.align	8
-.Lcrash:.long	0x000a0000,0x00000000
-.Lldret:.long	0
-.Lsnsret: .long 0
-#endif	/* CONFIG_IPL_TAPE */
-
-#ifdef CONFIG_IPL_VM
 #define IPL_BS	0x730
 	.org	0
 	.long	0x00080000,0x80000000+iplstart	# The first 24 bytes are loaded
@@ -256,7 +138,6 @@
 	.long	0x02600050,0x00000000
 	.endr
 	.long	0x02200050,0x00000000
-#endif	/* CONFIG_IPL_VM */
 
 iplstart:
 	lh	%r1,0xb8		# test if subchannel number
@@ -325,7 +206,6 @@
 	clc	0(3,%r2),.L_eof
 	bz	.Lagain2
 
-#ifdef CONFIG_IPL_VM
 #
 # reset files in VM reader
 #
@@ -358,7 +238,6 @@
 	.long	0x00080000,0x80000000+.Lrdrint
 .Lrdrwaitpsw:
 	.long	0x020a0000,0x80000000+.Lrdrint
-#endif
 
 #
 # everything loaded, go for it
@@ -376,8 +255,6 @@
 .L_eof: .long	0xc5d6c600	 /* C'EOF' */
 .L_hdr: .long	0xc8c4d900	 /* C'HDR' */
 
-#endif	/* CONFIG_IPL */
-
 #
 # SALIPL loader support. Based on a patch by Rob van der Heij.
 # This entry point is called directly from the SALIPL loader and
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index f7582b2..8a4e2b7 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -235,13 +235,6 @@
 			sp = current->sas_ss_sp + current->sas_ss_size;
 	}
 
-	/* This is the legacy signal stack switching. */
-	else if (!user_mode(regs) &&
-		 !(ka->sa.sa_flags & SA_RESTORER) &&
-		 ka->sa.sa_restorer) {
-		sp = (unsigned long) ka->sa.sa_restorer;
-	}
-
 	return (void __user *)((sp - frame_size) & -8ul);
 }
 
@@ -414,15 +407,6 @@
 	struct k_sigaction ka;
 	sigset_t *oldset;
 
-	/*
-	 * We want the common case to go fast, which
-	 * is why we may in certain cases get here from
-	 * kernel mode. Just return without doing anything
-	 * if so.
-	 */
-	if (!user_mode(regs))
-		return;
-
 	if (test_thread_flag(TIF_RESTORE_SIGMASK))
 		oldset = &current->saved_sigmask;
 	else
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1f77227..e505458 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -226,6 +226,8 @@
 	return -ENOMEM;
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+
 static void pcpu_free_lowcore(struct pcpu *pcpu)
 {
 	pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
@@ -247,6 +249,8 @@
 	}
 }
 
+#endif /* CONFIG_HOTPLUG_CPU */
+
 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
 {
 	struct _lowcore *lc = pcpu->lowcore;
diff --git a/arch/s390/kernel/vdso32/.gitignore b/arch/s390/kernel/vdso32/.gitignore
new file mode 100644
index 0000000..e45fba9
--- /dev/null
+++ b/arch/s390/kernel/vdso32/.gitignore
@@ -0,0 +1 @@
+vdso32.lds
diff --git a/arch/s390/kernel/vdso64/.gitignore b/arch/s390/kernel/vdso64/.gitignore
new file mode 100644
index 0000000..3fd18cf
--- /dev/null
+++ b/arch/s390/kernel/vdso64/.gitignore
@@ -0,0 +1 @@
+vdso64.lds
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 46ef3fd..72cec9e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -294,7 +294,7 @@
 	down_read(&mm->mmap_sem);
 
 #ifdef CONFIG_PGSTE
-	if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
+	if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
 		address = __gmap_fault(address,
 				     (struct gmap *) S390_lowcore.gmap);
 		if (address == -EFAULT) {
@@ -549,19 +549,15 @@
 	if ((subcode & 0xff00) != __SUBCODE_MASK)
 		return;
 	kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
-	if (subcode & 0x0080) {
-		/* Get the token (= pid of the affected task). */
-		pid = sizeof(void *) == 4 ? param32 : param64;
-		rcu_read_lock();
-		tsk = find_task_by_pid_ns(pid, &init_pid_ns);
-		if (tsk)
-			get_task_struct(tsk);
-		rcu_read_unlock();
-		if (!tsk)
-			return;
-	} else {
-		tsk = current;
-	}
+	/* Get the token (= pid of the affected task). */
+	pid = sizeof(void *) == 4 ? param32 : param64;
+	rcu_read_lock();
+	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+	if (tsk)
+		get_task_struct(tsk);
+	rcu_read_unlock();
+	if (!tsk)
+		return;
 	spin_lock(&pfault_lock);
 	if (subcode & 0x0080) {
 		/* signal bit is set -> a page has been swapped in by VM */
@@ -574,6 +570,7 @@
 			tsk->thread.pfault_wait = 0;
 			list_del(&tsk->thread.list);
 			wake_up_process(tsk);
+			put_task_struct(tsk);
 		} else {
 			/* Completion interrupt was faster than initial
 			 * interrupt. Set pfault_wait to -1 so the initial
@@ -585,24 +582,35 @@
 			if (tsk->state == TASK_RUNNING)
 				tsk->thread.pfault_wait = -1;
 		}
-		put_task_struct(tsk);
 	} else {
 		/* signal bit not set -> a real page is missing. */
-		if (tsk->thread.pfault_wait == -1) {
+		if (WARN_ON_ONCE(tsk != current))
+			goto out;
+		if (tsk->thread.pfault_wait == 1) {
+			/* Already on the list with a reference: put to sleep */
+			__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+			set_tsk_need_resched(tsk);
+		} else if (tsk->thread.pfault_wait == -1) {
 			/* Completion interrupt was faster than the initial
 			 * interrupt (pfault_wait == -1). Set pfault_wait
 			 * back to zero and exit. */
 			tsk->thread.pfault_wait = 0;
 		} else {
 			/* Initial interrupt arrived before completion
-			 * interrupt. Let the task sleep. */
+			 * interrupt. Let the task sleep.
+			 * An extra task reference is needed since a different
+			 * cpu may set the task state to TASK_RUNNING again
+			 * before the scheduler is reached. */
+			get_task_struct(tsk);
 			tsk->thread.pfault_wait = 1;
 			list_add(&tsk->thread.list, &pfault_list);
-			set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+			__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 			set_tsk_need_resched(tsk);
 		}
 	}
+out:
 	spin_unlock(&pfault_lock);
+	put_task_struct(tsk);
 }
 
 static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
@@ -620,6 +628,7 @@
 			list_del(&thread->list);
 			tsk = container_of(thread, struct task_struct, thread);
 			wake_up_process(tsk);
+			put_task_struct(tsk);
 		}
 		spin_unlock_irq(&pfault_lock);
 		break;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 597bb2d..900de2b 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -58,6 +58,8 @@
 	ptep = (pte_t *) page[1].index;
 	if (!ptep)
 		return;
+	clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY,
+		    PTRS_PER_PTE * sizeof(pte_t));
 	page_table_free(&init_mm, (unsigned long *) ptep);
 	page[1].index = 0;
 }
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index e1335dc..795a0a9 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -12,6 +12,7 @@
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/gfp.h>
+#include <linux/cpu.h>
 #include <asm/ctl_reg.h>
 
 /*
@@ -166,3 +167,69 @@
 	free_page((unsigned long) buf);
 	return rc;
 }
+
+/*
+ * Check if physical address is within prefix or zero page
+ */
+static int is_swapped(unsigned long addr)
+{
+	unsigned long lc;
+	int cpu;
+
+	if (addr < sizeof(struct _lowcore))
+		return 1;
+	for_each_online_cpu(cpu) {
+		lc = (unsigned long) lowcore_ptr[cpu];
+		if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
+			continue;
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * Return swapped prefix or zero page address
+ */
+static unsigned long get_swapped(unsigned long addr)
+{
+	unsigned long prefix = store_prefix();
+
+	if (addr < sizeof(struct _lowcore))
+		return addr + prefix;
+	if (addr >= prefix && addr < prefix + sizeof(struct _lowcore))
+		return addr - prefix;
+	return addr;
+}
+
+/*
+ * Convert a physical pointer for /dev/mem access
+ *
+ * For swapped prefix pages a new buffer is returned that contains a copy of
+ * the absolute memory. The buffer size is maximum one page large.
+ */
+void *xlate_dev_mem_ptr(unsigned long addr)
+{
+	void *bounce = (void *) addr;
+	unsigned long size;
+
+	get_online_cpus();
+	preempt_disable();
+	if (is_swapped(addr)) {
+		size = PAGE_SIZE - (addr & ~PAGE_MASK);
+		bounce = (void *) __get_free_page(GFP_ATOMIC);
+		if (bounce)
+			memcpy_real(bounce, (void *) get_swapped(addr), size);
+	}
+	preempt_enable();
+	put_online_cpus();
+	return bounce;
+}
+
+/*
+ * Free converted buffer for /dev/mem access (if necessary)
+ */
+void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
+{
+	if ((void *) addr != buf)
+		free_page((unsigned long) buf);
+}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 6e765bf..a3db5a3 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -822,6 +822,8 @@
 
 	/* we copy the mm and let dup_mm create the page tables with_pgstes */
 	tsk->mm->context.alloc_pgste = 1;
+	/* make sure that both mms have a correct rss state */
+	sync_mm_rss(tsk->mm);
 	mm = dup_mm(tsk);
 	tsk->mm->context.alloc_pgste = 0;
 	if (!mm)
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 37f2f4a..f4c1c20 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -11,7 +11,7 @@
 #include <linux/types.h>
 #include <asm/cmpxchg.h>
 
-#define ATOMIC_INIT(i)	( (atomic_t) { (i) } )
+#define ATOMIC_INIT(i)	{ (i) }
 
 #define atomic_read(v)		(*(volatile int *)&(v)->counter)
 #define atomic_set(v,i)		((v)->counter = (i))
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 324eef9..e99b104 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -86,7 +86,7 @@
 	pte_t *pte_k;
 
 	/* Make sure we are in vmalloc/module/P3 area: */
-	if (!(address >= VMALLOC_START && address < P3_ADDR_MAX))
+	if (!(address >= P3SEG && address < P3_ADDR_MAX))
 		return -1;
 
 	/*
diff --git a/arch/sparc/Kbuild b/arch/sparc/Kbuild
new file mode 100644
index 0000000..5cd0116
--- /dev/null
+++ b/arch/sparc/Kbuild
@@ -0,0 +1,8 @@
+#
+# core part of the sparc kernel
+#
+
+obj-y += kernel/
+obj-y += mm/
+obj-y += math-emu/
+obj-y += net/
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6c0683d..d176c03 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -30,6 +30,7 @@
 	select USE_GENERIC_SMP_HELPERS if SMP
 	select GENERIC_PCI_IOMAP
 	select HAVE_NMI_WATCHDOG if SPARC64
+	select HAVE_BPF_JIT
 
 config SPARC32
 	def_bool !64BIT
@@ -61,6 +62,7 @@
 	select IRQ_PREFLOW_FASTEOI
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select HAVE_C_RECORDMCOUNT
+	select NO_BOOTMEM
 
 config ARCH_DEFCONFIG
 	string
@@ -73,17 +75,12 @@
 	default 32 if SPARC32
 	default 64 if SPARC64
 
-config ARCH_USES_GETTIMEOFFSET
-	bool
-	default y if SPARC32
-
 config GENERIC_CMOS_UPDATE
 	bool
 	default y
 
 config GENERIC_CLOCKEVENTS
-	bool
-	default y if SPARC64
+	def_bool y
 
 config IOMMU_HELPER
 	bool
@@ -154,7 +151,7 @@
 menu "Processor type and features"
 
 config SMP
-	bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
+	bool "Symmetric multi-processing support"
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, say N. If you have a system with more
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index eddcfb3..b9a72e2 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -19,39 +19,27 @@
 # sparc32
 #
 
-#
-# Uncomment the first KBUILD_CFLAGS if you are doing kgdb source level
-# debugging of the kernel to get the proper debugging information.
-
-AS             := $(AS) -32
-LDFLAGS        := -m elf32_sparc
 CHECKFLAGS     += -D__sparc__
+LDFLAGS        := -m elf32_sparc
 export BITS    := 32
 UTS_MACHINE    := sparc
 
-#KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
-KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
-KBUILD_AFLAGS += -m32 -Wa,-Av8
-
-#LDFLAGS_vmlinux = -N -Ttext 0xf0004000
-#  Since 2.5.40, the first stage is left not btfix-ed.
-#  Actual linking is done with "make image".
-LDFLAGS_vmlinux = -r
+KBUILD_CFLAGS  += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
+KBUILD_AFLAGS  += -m32 -Wa,-Av8
 
 else
 #####
 # sparc64
 #
 
-CHECKFLAGS      += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
+CHECKFLAGS    += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
+LDFLAGS       := -m elf64_sparc
+export BITS   := 64
+UTS_MACHINE   := sparc64
 
-LDFLAGS              := -m elf64_sparc
-export BITS          := 64
-UTS_MACHINE          := sparc64
-
-KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow   \
-                 -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \
-                 -Wa,--undeclared-regs
+KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow
+KBUILD_CFLAGS += -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare
+KBUILD_CFLAGS += -Wa,--undeclared-regs
 KBUILD_CFLAGS += $(call cc-option,-mtune=ultrasparc3)
 KBUILD_AFLAGS += -m64 -mcpu=ultrasparc -Wa,--undeclared-regs
 
@@ -64,25 +52,14 @@
 head-y                 := arch/sparc/kernel/head_$(BITS).o
 head-y                 += arch/sparc/kernel/init_task.o
 
-core-y                 += arch/sparc/kernel/
-core-y                 += arch/sparc/mm/ arch/sparc/math-emu/
+# See arch/sparc/Kbuild for the core part of the kernel
+core-y                 += arch/sparc/
 
 libs-y                 += arch/sparc/prom/
 libs-y                 += arch/sparc/lib/
 
 drivers-$(CONFIG_OPROFILE)	+= arch/sparc/oprofile/
 
-# Export what is needed by arch/sparc/boot/Makefile
-export VMLINUX_INIT VMLINUX_MAIN
-VMLINUX_INIT := $(head-y) $(init-y)
-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
-VMLINUX_MAIN += $(drivers-y) $(net-y)
-
-ifdef CONFIG_KALLSYMS
-export kallsyms.o := .tmp_kallsyms2.o
-endif
-
 boot := arch/sparc/boot
 
 # Default target
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index d56d199..6e63afb 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -6,8 +6,8 @@
 ROOT_IMG	:= /usr/src/root.img
 ELFTOAOUT	:= elftoaout
 
-hostprogs-y	:= piggyback btfixupprep
-targets		:= tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
+hostprogs-y	:= piggyback
+targets		:= tftpboot.img image zImage vmlinux.aout
 clean-files	:= System.map
 
 quiet_cmd_elftoaout	= ELFTOAOUT $@
@@ -17,58 +17,9 @@
 quiet_cmd_strip		= STRIP   $@
       cmd_strip		= $(STRIP) -R .comment -R .note -K sun4u_init -K _end -K _start $< -o $@
 
-ifeq ($(CONFIG_SPARC32),y)
-quiet_cmd_btfix		= BTFIX   $@
-      cmd_btfix		= $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@
-quiet_cmd_sysmap        = SYSMAP  $(obj)/System.map
-      cmd_sysmap        = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
-quiet_cmd_image = LD      $@
-      cmd_image = $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDFLAGS_$(@F)) -o $@
-
-define rule_image
-	$(if $($(quiet)cmd_image),               \
-	  echo '  $($(quiet)cmd_image)' &&)      \
-	  $(cmd_image);                          \
-	$(if $($(quiet)cmd_sysmap),              \
-	  echo '  $($(quiet)cmd_sysmap)' &&)  \
-	$(cmd_sysmap) $@ $(obj)/System.map;      \
-	if [ $$? -ne 0 ]; then                   \
-		rm -f $@;                        \
-		/bin/false;                      \
-	fi;                                      \
-	echo 'cmd_$@ := $(cmd_image)' > $(@D)/.$(@F).cmd
-endef
-
-BTOBJS := $(patsubst %/, %/built-in.o, $(VMLINUX_INIT))
-BTLIBS := $(patsubst %/, %/built-in.o, $(VMLINUX_MAIN))
-LDFLAGS_image := -T arch/sparc/kernel/vmlinux.lds $(BTOBJS) \
-                  --start-group $(BTLIBS) --end-group \
-                  $(kallsyms.o) $(obj)/btfix.o
-
-# Link the final image including btfixup'ed symbols.
-# This is a replacement for the link done in the top-level Makefile.
-# Note: No dependency on the prerequisite files since that would require
-# make to try check if they are updated - and due to changes
-# in gcc options (path for example) this would result in
-# these files being recompiled for each build.
-$(obj)/image: $(obj)/btfix.o FORCE
-	$(call if_changed_rule,image)
-
-$(obj)/zImage: $(obj)/image
-	$(call if_changed,strip)
-	@echo '  kernel: $@ is ready'
-
-$(obj)/btfix.S: $(obj)/btfixupprep vmlinux FORCE
-	$(call if_changed,btfix)
-
-endif
-
 ifeq ($(CONFIG_SPARC64),y)
 
 # Actual linking
-$(obj)/image: vmlinux FORCE
-	$(call if_changed,strip)
-	@echo '  kernel: $@ is ready'
 
 $(obj)/zImage: $(obj)/image
 	$(call if_changed,gzip)
@@ -79,6 +30,10 @@
 	@echo '  kernel: $@ is ready'
 else
 
+$(obj)/zImage: $(obj)/image
+	$(call if_changed,strip)
+	@echo '  kernel: $@ is ready'
+
 # The following lines make a readable image for U-Boot.
 #  uImage   - Binary file read by U-boot
 #  uImage.o - object file of uImage for loading with a
@@ -107,6 +62,10 @@
 
 endif
 
+$(obj)/image: vmlinux FORCE
+	$(call if_changed,strip)
+	@echo '  kernel: $@ is ready'
+
 $(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
 	$(call if_changed,elftoaout)
 	$(call if_changed,piggy)
diff --git a/arch/sparc/boot/btfixupprep.c b/arch/sparc/boot/btfixupprep.c
deleted file mode 100644
index da03115..0000000
--- a/arch/sparc/boot/btfixupprep.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
-   Simple utility to prepare vmlinux image for sparc.
-   Resolves all BTFIXUP uses and settings and creates
-   a special .s object to link to the image.
-   
-   Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
-   
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2 of the License, or
-   (at your option) any later version.
-   
-   This program is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  */
-   
-#include <stdio.h>
-#include <string.h>
-#include <ctype.h>
-#include <errno.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <malloc.h>
-
-#define MAXSYMS 1024
-
-static char *symtab = "SYMBOL TABLE:";
-static char *relrec = "RELOCATION RECORDS FOR [";
-static int rellen;
-static int symlen;
-int mode;
-
-struct _btfixup;
-
-typedef struct _btfixuprel {
-	char *sect;
-	unsigned long offset;
-	struct _btfixup *f;
-	int frel;
-	struct _btfixuprel *next;
-} btfixuprel;
-
-typedef struct _btfixup {
-	int type;
-	int setinitval;
-	unsigned int initval;
-	char *initvalstr;
-	char *name;
-	btfixuprel *rel;
-} btfixup;
-
-btfixup array[MAXSYMS];
-int last = 0;
-char buffer[1024];
-unsigned long lastfoffset = -1;
-unsigned long lastfrelno;
-btfixup *lastf;
-
-static void fatal(void) __attribute__((noreturn));
-static void fatal(void)
-{
-	fprintf(stderr, "Malformed output from objdump\n%s\n", buffer);
-	exit(1);
-}
-
-static btfixup *find(int type, char *name)
-{
-	int i;
-	for (i = 0; i < last; i++) {
-		if (array[i].type == type && !strcmp(array[i].name, name))
-			return array + i;
-	}
-	array[last].type = type;
-	array[last].name = strdup(name);
-	array[last].setinitval = 0;
-	if (!array[last].name) fatal();
-	array[last].rel = NULL;
-	last++;
-	if (last >= MAXSYMS) {
-		fprintf(stderr, "Ugh. Something strange. More than %d different BTFIXUP symbols\n", MAXSYMS);
-		exit(1);
-	}
-	return array + last - 1;
-}
-
-static void set_mode (char *buffer)
-{
-  	for (mode = 0;; mode++)
-		if (buffer[mode] < '0' || buffer[mode] > '9')
-			break;
-	if (mode != 8 && mode != 16)
-		fatal();
-}
-
-
-int main(int argc,char **argv)
-{
-	char *p, *q;
-	char *sect;
-	int i, j, k;
-	unsigned int initval;
-	int shift;
-	btfixup *f;
-	btfixuprel *r, **rr;
-	unsigned long offset;
-	char *initvalstr;
-
-	symlen = strlen(symtab);
-	while (fgets (buffer, 1024, stdin) != NULL)
-		if (!strncmp (buffer, symtab, symlen))
-			goto main0;
-	fatal();
-main0:
-	rellen = strlen(relrec);
-	while (fgets (buffer, 1024, stdin) != NULL)
-		if (!strncmp (buffer, relrec, rellen))
-			goto main1;
-	fatal();
-main1:
-	sect = malloc(strlen (buffer + rellen) + 1);
-	if (!sect) fatal();
-	strcpy (sect, buffer + rellen);
-	p = strchr (sect, ']');
-	if (!p) fatal();
-	*p = 0;
-	if (fgets (buffer, 1024, stdin) == NULL)
-		fatal();
-	while (fgets (buffer, 1024, stdin) != NULL) {
-		int nbase;
-		if (!strncmp (buffer, relrec, rellen))
-			goto main1;
-		if (mode == 0)
-			set_mode (buffer);
-		p = strchr (buffer, '\n');
-		if (p) *p = 0;
-		if (strlen (buffer) < 22+mode)
-			continue;
-		if (strncmp (buffer + mode, " R_SPARC_", 9))
-			continue;
-		nbase = 27 - 8 + mode;
-		if (buffer[nbase] != '_' || buffer[nbase+1] != '_' || buffer[nbase+2] != '_')
-			continue;
-		switch (buffer[nbase+3]) {
-			case 'f':	/* CALL */
-			case 'b':	/* BLACKBOX */
-			case 's':	/* SIMM13 */
-			case 'a':	/* HALF */
-			case 'h':	/* SETHI */
-			case 'i':	/* INT */
-				break;
-			default:
-				continue;
-		}
-		p = strchr (buffer + nbase+5, '+');
-		if (p) *p = 0;
-		shift = nbase + 5;
-		if (buffer[nbase+4] == 's' && buffer[nbase+5] == '_') {
-			shift = nbase + 6;
-			if (strcmp (sect, ".init.text")) {
-				fprintf(stderr,
-				    "Wrong use of '%s' BTFIXUPSET in '%s' section.\n"
-				    "BTFIXUPSET_CALL can be used only in"
-				    " __init sections\n",
-				    buffer + shift, sect);
-				exit(1);
-			}
-		} else if (buffer[nbase+4] != '_')
-			continue;
-		if (!strcmp (sect, ".text.exit"))
-			continue;
-		if (strcmp (sect, ".text") &&
-		    strcmp (sect, ".init.text") &&
-		    strcmp (sect, ".fixup") &&
-		    (strcmp (sect, "__ksymtab") || buffer[nbase+3] != 'f')) {
-			if (buffer[nbase+3] == 'f')
-				fprintf(stderr,
-				    "Wrong use of '%s' in '%s' section.\n"
-				    " It can be used only in .text, .init.text,"
-				    " .fixup and __ksymtab\n",
-				    buffer + shift, sect);
-			else
-				fprintf(stderr,
-				    "Wrong use of '%s' in '%s' section.\n"
-				    " It can be only used in .text, .init.text,"
-				    " and .fixup\n", buffer + shift, sect);
-			exit(1);
-		}
-		p = strstr (buffer + shift, "__btset_");
-		if (p && buffer[nbase+4] == 's') {
-			fprintf(stderr, "__btset_ in BTFIXUP name can only be used when defining the variable, not for setting\n%s\n", buffer);
-			exit(1);
-		}
-		initval = 0;
-		initvalstr = NULL;
-		if (p) {
-			if (p[8] != '0' || p[9] != 'x') {
-				fprintf(stderr, "Pre-initialized values can be only initialized with hexadecimal constants starting 0x\n%s\n", buffer);
-				exit(1);
-			}
-			initval = strtoul(p + 10, &q, 16);
-			if (*q || !initval) {
-				fprintf(stderr, "Pre-initialized values can be only in the form name__btset_0xXXXXXXXX where X are hex digits.\nThey cannot be name__btset_0x00000000 though. Use BTFIXUPDEF_XX instead of BTFIXUPDEF_XX_INIT then.\n%s\n", buffer);
-				exit(1);
-			}
-			initvalstr = p + 10;
-			*p = 0;
-		}
-		f = find(buffer[nbase+3], buffer + shift);
-		if (buffer[nbase+4] == 's')
-			continue;
-		switch (buffer[nbase+3]) {
-		case 'f':
-			if (initval) {
-				fprintf(stderr, "Cannot use pre-initialized fixups for calls\n%s\n", buffer);
-				exit(1);
-			}
-			if (!strcmp (sect, "__ksymtab")) {
-				if (strncmp (buffer + mode+9, "32        ", 10)) {
-					fprintf(stderr, "BTFIXUP_CALL in EXPORT_SYMBOL results in relocation other than R_SPARC_32\n\%s\n", buffer);
-					exit(1);
-				}
-			} else if (strncmp (buffer + mode+9, "WDISP30   ", 10) &&
-				   strncmp (buffer + mode+9, "HI22      ", 10) &&
-				   strncmp (buffer + mode+9, "LO10      ", 10)) {
-				fprintf(stderr, "BTFIXUP_CALL results in relocation other than R_SPARC_WDISP30, R_SPARC_HI22 or R_SPARC_LO10\n%s\n", buffer);
-				exit(1);
-			}
-			break;
-		case 'b':
-			if (initval) {
-				fprintf(stderr, "Cannot use pre-initialized fixups for blackboxes\n%s\n", buffer);
-				exit(1);
-			}
-			if (strncmp (buffer + mode+9, "HI22      ", 10)) {
-				fprintf(stderr, "BTFIXUP_BLACKBOX results in relocation other than R_SPARC_HI22\n%s\n", buffer);
-				exit(1);
-			}
-			break;
-		case 's':
-			if (initval + 0x1000 >= 0x2000) {
-				fprintf(stderr, "Wrong initializer for SIMM13. Has to be from $fffff000 to $00000fff\n%s\n", buffer);
-				exit(1);
-			}
-			if (strncmp (buffer + mode+9, "13        ", 10)) {
-				fprintf(stderr, "BTFIXUP_SIMM13 results in relocation other than R_SPARC_13\n%s\n", buffer);
-				exit(1);
-			}
-			break;
-		case 'a':
-			if (initval + 0x1000 >= 0x2000 && (initval & 0x3ff)) {
-				fprintf(stderr, "Wrong initializer for HALF.\n%s\n", buffer);
-				exit(1);
-			}
-			if (strncmp (buffer + mode+9, "13        ", 10)) {
-				fprintf(stderr, "BTFIXUP_HALF results in relocation other than R_SPARC_13\n%s\n", buffer);
-				exit(1);
-			}
-			break;
-		case 'h':
-			if (initval & 0x3ff) {
-				fprintf(stderr, "Wrong initializer for SETHI. Cannot have set low 10 bits\n%s\n", buffer);
-				exit(1);
-			}
-			if (strncmp (buffer + mode+9, "HI22      ", 10)) {
-				fprintf(stderr, "BTFIXUP_SETHI results in relocation other than R_SPARC_HI22\n%s\n", buffer);
-				exit(1);
-			}
-			break;
-		case 'i':
-			if (initval) {
-				fprintf(stderr, "Cannot use pre-initialized fixups for INT\n%s\n", buffer);
-				exit(1);
-			}
-			if (strncmp (buffer + mode+9, "HI22      ", 10) && strncmp (buffer + mode+9, "LO10      ", 10)) {
-				fprintf(stderr, "BTFIXUP_INT results in relocation other than R_SPARC_HI22 and R_SPARC_LO10\n%s\n", buffer);
-				exit(1);
-			}
-			break;
-		}
-		if (!f->setinitval) {
-			f->initval = initval;
-			if (initvalstr) {
-				f->initvalstr = strdup(initvalstr);
-				if (!f->initvalstr) fatal();
-			}
-			f->setinitval = 1;
-		} else if (f->initval != initval) {
-			fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer\n%s\n",
-					f->name, f->initvalstr ? : "0x00000000", buffer);
-			exit(1);
-		} else if (initval && strcmp(f->initvalstr, initvalstr)) {
-			fprintf(stderr, "Btfixup %s previously used with initializer %s which doesn't match with current initializer.\n"
-					"Initializers have to match literally as well.\n%s\n",
-					f->name, f->initvalstr, buffer);
-			exit(1);
-		}
-		offset = strtoul(buffer, &q, 16);
-		if (q != buffer + mode || (!offset && (mode == 8 ? strncmp (buffer, "00000000 ", 9) : strncmp (buffer, "0000000000000000 ", 17)))) {
-			fprintf(stderr, "Malformed relocation address in\n%s\n", buffer);
-			exit(1);
-		}
-		for (k = 0, r = f->rel, rr = &f->rel; r; rr = &r->next, r = r->next, k++)
-			if (r->offset == offset && !strcmp(r->sect, sect)) {
-				fprintf(stderr, "Ugh. One address has two relocation records\n");
-				exit(1);
-			}
-		*rr = malloc(sizeof(btfixuprel));
-		if (!*rr) fatal();
-		(*rr)->offset = offset;
-		(*rr)->f = NULL;
-		if (buffer[nbase+3] == 'f') {
-			lastf = f;
-			lastfoffset = offset;
-			lastfrelno = k;
-		} else if (lastfoffset + 4 == offset) {
-			(*rr)->f = lastf;
-			(*rr)->frel = lastfrelno;
-		}
-		(*rr)->sect = sect;
-		(*rr)->next = NULL;
-	}
-	printf("! Generated by btfixupprep. Do not edit.\n\n");
-	printf("\t.section\t\".data..init\",#alloc,#write\n\t.align\t4\n\n");
-	printf("\t.global\t___btfixup_start\n___btfixup_start:\n\n");
-	for (i = 0; i < last; i++) {
-		f = array + i;
-		printf("\t.global\t___%cs_%s\n", f->type, f->name);
-		if (f->type == 'f')
-			printf("___%cs_%s:\n\t.word 0x%08x,0,0,", f->type, f->name, f->type << 24);
-		else
-			printf("___%cs_%s:\n\t.word 0x%08x,0,", f->type, f->name, f->type << 24);
-		for (j = 0, r = f->rel; r != NULL; j++, r = r->next);
-		if (j)
-			printf("%d\n\t.word\t", j * 2);
-		else
-			printf("0\n");
-		for (r = f->rel, j--; r != NULL; j--, r = r->next) {
-			if (!strcmp (r->sect, ".text"))
-				printf ("_stext+0x%08lx", r->offset);
-			else if (!strcmp (r->sect, ".init.text"))
-				printf ("__init_begin+0x%08lx", r->offset);
-			else if (!strcmp (r->sect, "__ksymtab"))
-				printf ("__start___ksymtab+0x%08lx", r->offset);
-			else if (!strcmp (r->sect, ".fixup"))
-				printf ("__start___fixup+0x%08lx", r->offset);
-			else
-				fatal();
-			if (f->type == 'f' || !r->f)
-				printf (",0");
-			else
-				printf (",___fs_%s+0x%08x", r->f->name, (4 + r->frel*2)*4 + 4);
-			if (j) printf (",");
-			else printf ("\n");
-		}
-		printf("\n");
-	}
-	printf("\n\t.global\t___btfixup_end\n___btfixup_end:\n");
-	printf("\n\n! Define undefined references\n\n");
-	for (i = 0; i < last; i++) {
-		f = array + i;
-		if (f->type == 'f') {
-			printf("\t.global\t___f_%s\n", f->name);
-			printf("___f_%s:\n", f->name);
-		}
-	}
-	printf("\tretl\n\t nop\n\n");
-	for (i = 0; i < last; i++) {
-		f = array + i;
-		if (f->type != 'f') {
-			if (!f->initval) {
-				printf("\t.global\t___%c_%s\n", f->type, f->name);
-				printf("___%c_%s = 0\n", f->type, f->name);
-			} else {
-				printf("\t.global\t___%c_%s__btset_0x%s\n", f->type, f->name, f->initvalstr);
-				printf("___%c_%s__btset_0x%s = 0x%08x\n", f->type, f->name, f->initvalstr, f->initval);
-			}
-		}
-	}
-	printf("\n\n");
-    	exit(0);
-}
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h
index b2e3db6..cbb93e5 100644
--- a/arch/sparc/include/asm/asi.h
+++ b/arch/sparc/include/asm/asi.h
@@ -112,6 +112,20 @@
 
 #define ASI_M_ACTION       0x4c   /* Breakpoint Action Register (GNU/Viking) */
 
+/* LEON ASI */
+#define ASI_LEON_NOCACHE        0x01
+
+#define ASI_LEON_DCACHE_MISS    0x01
+
+#define ASI_LEON_CACHEREGS      0x02
+#define ASI_LEON_IFLUSH         0x10
+#define ASI_LEON_DFLUSH         0x11
+
+#define ASI_LEON_MMUFLUSH       0x18
+#define ASI_LEON_MMUREGS        0x19
+#define ASI_LEON_BYPASS         0x1c
+#define ASI_LEON_FLUSH_PAGE     0x10
+
 /* V9 Architecture mandary ASIs. */
 #define ASI_N			0x04 /* Nucleus				*/
 #define ASI_NL			0x0c /* Nucleus, little endian		*/
diff --git a/arch/sparc/include/asm/asmmacro.h b/arch/sparc/include/asm/asmmacro.h
index a995bf8..02a172f 100644
--- a/arch/sparc/include/asm/asmmacro.h
+++ b/arch/sparc/include/asm/asmmacro.h
@@ -6,17 +6,6 @@
 #ifndef _SPARC_ASMMACRO_H
 #define _SPARC_ASMMACRO_H
 
-#include <asm/btfixup.h>
-#include <asm/asi.h>
-
-#define GET_PROCESSOR4M_ID(reg) \
-	rd	%tbr, %reg; \
-	srl	%reg, 12, %reg; \
-	and	%reg, 3, %reg;
-
-#define GET_PROCESSOR4D_ID(reg) \
-	lda	[%g0] ASI_M_VIKING_TMP1, %reg;
-
 /* All trap entry points _must_ begin with this macro or else you
  * lose.  It makes sure the kernel has a proper window so that
  * c-code can be called.
@@ -31,10 +20,4 @@
 /* All traps low-level code here must end with this macro. */
 #define RESTORE_ALL b ret_trap_entry; clr %l6;
 
-/* sun4 probably wants half word accesses to ASI_SEGMAP, while sun4c+
-   likes byte accesses. These are to avoid ifdef mania. */
-
-#define lduXa	lduba
-#define stXa	stba
-
 #endif /* !(_SPARC_ASMMACRO_H) */
diff --git a/arch/sparc/include/asm/btfixup.h b/arch/sparc/include/asm/btfixup.h
deleted file mode 100644
index 797722c..0000000
--- a/arch/sparc/include/asm/btfixup.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- *  asm/btfixup.h:    Macros for boot time linking.
- *
- *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
- 
-#ifndef _SPARC_BTFIXUP_H
-#define _SPARC_BTFIXUP_H
-
-#include <linux/init.h>
-
-#ifndef __ASSEMBLY__
-
-#ifdef MODULE
-extern unsigned int ___illegal_use_of_BTFIXUP_SIMM13_in_module(void);
-extern unsigned int ___illegal_use_of_BTFIXUP_SETHI_in_module(void);
-extern unsigned int ___illegal_use_of_BTFIXUP_HALF_in_module(void);
-extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
-
-#define BTFIXUP_SIMM13(__name) ___illegal_use_of_BTFIXUP_SIMM13_in_module()
-#define BTFIXUP_HALF(__name) ___illegal_use_of_BTFIXUP_HALF_in_module()
-#define BTFIXUP_SETHI(__name) ___illegal_use_of_BTFIXUP_SETHI_in_module()
-#define BTFIXUP_INT(__name) ___illegal_use_of_BTFIXUP_INT_in_module()
-#define BTFIXUP_BLACKBOX(__name) ___illegal_use_of_BTFIXUP_BLACKBOX_in_module
-
-#else
-
-#define BTFIXUP_SIMM13(__name) ___sf_##__name()
-#define BTFIXUP_HALF(__name) ___af_##__name()
-#define BTFIXUP_SETHI(__name) ___hf_##__name()
-#define BTFIXUP_INT(__name) ((unsigned int)&___i_##__name)
-/* This must be written in assembly and present in a sethi */
-#define BTFIXUP_BLACKBOX(__name) ___b_##__name
-#endif /* MODULE */
-
-/* Fixup call xx */
-
-#define BTFIXUPDEF_CALL(__type, __name, __args...) 					\
-	extern __type ___f_##__name(__args);						\
-	extern unsigned ___fs_##__name[3];
-#define BTFIXUPDEF_CALL_CONST(__type, __name, __args...) 				\
-	extern __type ___f_##__name(__args) __attribute_const__;			\
-	extern unsigned ___fs_##__name[3];
-#define BTFIXUP_CALL(__name) ___f_##__name
-
-#define BTFIXUPDEF_BLACKBOX(__name)							\
-	extern unsigned ___bs_##__name[2];
-
-/* Put bottom 13bits into some register variable */
-
-#define BTFIXUPDEF_SIMM13(__name)							\
-	static inline unsigned int ___sf_##__name(void) __attribute_const__;		\
-	extern unsigned ___ss_##__name[2];						\
-	static inline unsigned int ___sf_##__name(void) {				\
-		unsigned int ret;							\
-		__asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret));			\
-		return ret;								\
-	}
-#define BTFIXUPDEF_SIMM13_INIT(__name,__val)						\
-	static inline unsigned int ___sf_##__name(void) __attribute_const__;		\
-	extern unsigned ___ss_##__name[2];						\
-	static inline unsigned int ___sf_##__name(void) {				\
-		unsigned int ret;							\
-		__asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
-		return ret;								\
-	}
-
-/* Put either bottom 13 bits, or upper 22 bits into some register variable
- * (depending on the value, this will lead into sethi FIX, reg; or
- * mov FIX, reg; )
- */
-
-#define BTFIXUPDEF_HALF(__name)								\
-	static inline unsigned int ___af_##__name(void) __attribute_const__;		\
-	extern unsigned ___as_##__name[2];						\
-	static inline unsigned int ___af_##__name(void) {				\
-		unsigned int ret;							\
-		__asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret));			\
-		return ret;								\
-	}
-#define BTFIXUPDEF_HALF_INIT(__name,__val)						\
-	static inline unsigned int ___af_##__name(void) __attribute_const__;		\
-	extern unsigned ___as_##__name[2];						\
-	static inline unsigned int ___af_##__name(void) {				\
-		unsigned int ret;							\
-		__asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
-		return ret;								\
-	}
-
-/* Put upper 22 bits into some register variable */
-
-#define BTFIXUPDEF_SETHI(__name)							\
-	static inline unsigned int ___hf_##__name(void) __attribute_const__;		\
-	extern unsigned ___hs_##__name[2];						\
-	static inline unsigned int ___hf_##__name(void) {				\
-		unsigned int ret;							\
-		__asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret));		\
-		return ret;								\
-	}
-#define BTFIXUPDEF_SETHI_INIT(__name,__val)						\
-	static inline unsigned int ___hf_##__name(void) __attribute_const__;		\
-	extern unsigned ___hs_##__name[2];						\
-	static inline unsigned int ___hf_##__name(void) {				\
-		unsigned int ret;							\
-		__asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : 	\
-			 "=r"(ret));							\
-		return ret;								\
-	}
-
-/* Put a full 32bit integer into some register variable */
-
-#define BTFIXUPDEF_INT(__name)								\
-	extern unsigned char ___i_##__name;						\
-	extern unsigned ___is_##__name[2];
-
-#define BTFIXUPCALL_NORM	0x00000000			/* Always call */
-#define BTFIXUPCALL_NOP		0x01000000			/* Possibly optimize to nop */
-#define BTFIXUPCALL_RETINT(i)	(0x90102000|((i) & 0x1fff))	/* Possibly optimize to mov i, %o0 */
-#define BTFIXUPCALL_ORINT(i)	(0x90122000|((i) & 0x1fff))	/* Possibly optimize to or %o0, i, %o0 */
-#define BTFIXUPCALL_RETO0	0x01000000			/* Return first parameter, actually a nop */
-#define BTFIXUPCALL_ANDNINT(i)	(0x902a2000|((i) & 0x1fff))	/* Possibly optimize to andn %o0, i, %o0 */
-#define BTFIXUPCALL_SWAPO0O1	0xd27a0000			/* Possibly optimize to swap [%o0],%o1 */
-#define BTFIXUPCALL_SWAPO0G0	0xc07a0000			/* Possibly optimize to swap [%o0],%g0 */
-#define BTFIXUPCALL_SWAPG1G2	0xc4784000			/* Possibly optimize to swap [%g1],%g2 */
-#define BTFIXUPCALL_STG0O0	0xc0220000			/* Possibly optimize to st %g0,[%o0] */
-#define BTFIXUPCALL_STO1O0	0xd2220000			/* Possibly optimize to st %o1,[%o0] */
-
-#define BTFIXUPSET_CALL(__name, __addr, __insn)						\
-	do {										\
-		___fs_##__name[0] |= 1;							\
-		___fs_##__name[1] = (unsigned long)__addr;				\
-		___fs_##__name[2] = __insn;						\
-	} while (0)
-	
-#define BTFIXUPSET_BLACKBOX(__name, __func)						\
-	do {										\
-		___bs_##__name[0] |= 1;							\
-		___bs_##__name[1] = (unsigned long)__func;				\
-	} while (0)
-	
-#define BTFIXUPCOPY_CALL(__name, __from)						\
-	do {										\
-		___fs_##__name[0] |= 1;							\
-		___fs_##__name[1] = ___fs_##__from[1];					\
-		___fs_##__name[2] = ___fs_##__from[2];					\
-	} while (0)
-		
-#define BTFIXUPSET_SIMM13(__name, __val)						\
-	do {										\
-		___ss_##__name[0] |= 1;							\
-		___ss_##__name[1] = (unsigned)__val;					\
-	} while (0)
-	
-#define BTFIXUPCOPY_SIMM13(__name, __from)						\
-	do {										\
-		___ss_##__name[0] |= 1;							\
-		___ss_##__name[1] = ___ss_##__from[1];					\
-	} while (0)
-		
-#define BTFIXUPSET_HALF(__name, __val)							\
-	do {										\
-		___as_##__name[0] |= 1;							\
-		___as_##__name[1] = (unsigned)__val;					\
-	} while (0)
-	
-#define BTFIXUPCOPY_HALF(__name, __from)						\
-	do {										\
-		___as_##__name[0] |= 1;							\
-		___as_##__name[1] = ___as_##__from[1];					\
-	} while (0)
-		
-#define BTFIXUPSET_SETHI(__name, __val)							\
-	do {										\
-		___hs_##__name[0] |= 1;							\
-		___hs_##__name[1] = (unsigned)__val;					\
-	} while (0)
-	
-#define BTFIXUPCOPY_SETHI(__name, __from)						\
-	do {										\
-		___hs_##__name[0] |= 1;							\
-		___hs_##__name[1] = ___hs_##__from[1];					\
-	} while (0)
-		
-#define BTFIXUPSET_INT(__name, __val)							\
-	do {										\
-		___is_##__name[0] |= 1;							\
-		___is_##__name[1] = (unsigned)__val;					\
-	} while (0)
-	
-#define BTFIXUPCOPY_INT(__name, __from)							\
-	do {										\
-		___is_##__name[0] |= 1;							\
-		___is_##__name[1] = ___is_##__from[1];					\
-	} while (0)
-	
-#define BTFIXUPVAL_CALL(__name)								\
-	((unsigned long)___fs_##__name[1])
-	
-extern void btfixup(void);
-
-#else /* __ASSEMBLY__ */
-
-#define BTFIXUP_SETHI(__name)			%hi(___h_ ## __name)
-#define BTFIXUP_SETHI_INIT(__name,__val)	%hi(___h_ ## __name ## __btset_ ## __val)
-
-#endif /* __ASSEMBLY__ */
-	
-#endif /* !(_SPARC_BTFIXUP_H) */
diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
index 69358b5..5bb6991 100644
--- a/arch/sparc/include/asm/cache.h
+++ b/arch/sparc/include/asm/cache.h
@@ -22,118 +22,4 @@
 
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
-#ifdef CONFIG_SPARC32
-#include <asm/asi.h>
-
-/* Direct access to the instruction cache is provided through and
- * alternate address space.  The IDC bit must be off in the ICCR on
- * HyperSparcs for these accesses to work.  The code below does not do
- * any checking, the caller must do so.  These routines are for
- * diagnostics only, but could end up being useful.  Use with care.
- * Also, you are asking for trouble if you execute these in one of the
- * three instructions following a %asr/%psr access or modification.
- */
-
-/* First, cache-tag access. */
-static inline unsigned int get_icache_tag(int setnum, int tagnum)
-{
-	unsigned int vaddr, retval;
-
-	vaddr = ((setnum&1) << 12) | ((tagnum&0x7f) << 5);
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (vaddr), "i" (ASI_M_TXTC_TAG));
-	return retval;
-}
-
-static inline void put_icache_tag(int setnum, int tagnum, unsigned int entry)
-{
-	unsigned int vaddr;
-
-	vaddr = ((setnum&1) << 12) | ((tagnum&0x7f) << 5);
-	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
-			     "r" (entry), "r" (vaddr), "i" (ASI_M_TXTC_TAG) :
-			     "memory");
-}
-
-/* Second cache-data access.  The data is returned two-32bit quantities
- * at a time.
- */
-static inline void get_icache_data(int setnum, int tagnum, int subblock,
-				       unsigned int *data)
-{
-	unsigned int value1, value2, vaddr;
-
-	vaddr = ((setnum&0x1) << 12) | ((tagnum&0x7f) << 5) |
-		((subblock&0x3) << 3);
-	__asm__ __volatile__("ldda [%2] %3, %%g2\n\t"
-			     "or %%g0, %%g2, %0\n\t"
-			     "or %%g0, %%g3, %1\n\t" :
-			     "=r" (value1), "=r" (value2) :
-			     "r" (vaddr), "i" (ASI_M_TXTC_DATA) :
-			     "g2", "g3");
-	data[0] = value1; data[1] = value2;
-}
-
-static inline void put_icache_data(int setnum, int tagnum, int subblock,
-				       unsigned int *data)
-{
-	unsigned int value1, value2, vaddr;
-
-	vaddr = ((setnum&0x1) << 12) | ((tagnum&0x7f) << 5) |
-		((subblock&0x3) << 3);
-	value1 = data[0]; value2 = data[1];
-	__asm__ __volatile__("or %%g0, %0, %%g2\n\t"
-			     "or %%g0, %1, %%g3\n\t"
-			     "stda %%g2, [%2] %3\n\t" : :
-			     "r" (value1), "r" (value2), 
-			     "r" (vaddr), "i" (ASI_M_TXTC_DATA) :
-			     "g2", "g3", "memory" /* no joke */);
-}
-
-/* Different types of flushes with the ICACHE.  Some of the flushes
- * affect both the ICACHE and the external cache.  Others only clear
- * the ICACHE entries on the cpu itself.  V8's (most) allow
- * granularity of flushes on the packet (element in line), whole line,
- * and entire cache (ie. all lines) level.  The ICACHE only flushes are
- * ROSS HyperSparc specific and are in ross.h
- */
-
-/* Flushes which clear out both the on-chip and external caches */
-static inline void flush_ei_page(unsigned int addr)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-			     "r" (addr), "i" (ASI_M_FLUSH_PAGE) :
-			     "memory");
-}
-
-static inline void flush_ei_seg(unsigned int addr)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-			     "r" (addr), "i" (ASI_M_FLUSH_SEG) :
-			     "memory");
-}
-
-static inline void flush_ei_region(unsigned int addr)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-			     "r" (addr), "i" (ASI_M_FLUSH_REGION) :
-			     "memory");
-}
-
-static inline void flush_ei_ctx(unsigned int addr)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-			     "r" (addr), "i" (ASI_M_FLUSH_CTX) :
-			     "memory");
-}
-
-static inline void flush_ei_user(unsigned int addr)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-			     "r" (addr), "i" (ASI_M_FLUSH_USER) :
-			     "memory");
-}
-#endif /* CONFIG_SPARC32 */
-
 #endif /* !(_SPARC_CACHE_H) */
diff --git a/arch/sparc/include/asm/cacheflush.h b/arch/sparc/include/asm/cacheflush.h
index 0491680..f6c4839 100644
--- a/arch/sparc/include/asm/cacheflush.h
+++ b/arch/sparc/include/asm/cacheflush.h
@@ -1,5 +1,9 @@
 #ifndef ___ASM_SPARC_CACHEFLUSH_H
 #define ___ASM_SPARC_CACHEFLUSH_H
+
+/* flush addr - to allow use of self-modifying code */
+#define flushi(addr)	__asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/cacheflush_64.h>
 #else
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index 68431b4..bb014c2 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -1,56 +1,18 @@
 #ifndef _SPARC_CACHEFLUSH_H
 #define _SPARC_CACHEFLUSH_H
 
-#include <linux/mm.h>		/* Common for other includes */
-// #include <linux/kernel.h> from pgalloc.h
-// #include <linux/sched.h>  from pgalloc.h
+#include <asm/cachetlb_32.h>
 
-// #include <asm/page.h>
-#include <asm/btfixup.h>
-
-/*
- * Fine grained cache flushing.
- */
-#ifdef CONFIG_SMP
-
-BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
-BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
-BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
-BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
-
-#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
-#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
-#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
-#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
-
-BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
-BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
-
-#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
-#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
-
-extern void smp_flush_cache_all(void);
-extern void smp_flush_cache_mm(struct mm_struct *mm);
-extern void smp_flush_cache_range(struct vm_area_struct *vma,
-				  unsigned long start,
-				  unsigned long end);
-extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
-
-extern void smp_flush_page_to_ram(unsigned long page);
-extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
-
-#endif /* CONFIG_SMP */
-
-BTFIXUPDEF_CALL(void, flush_cache_all, void)
-BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
-BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
-BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
-
-#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
-#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
-#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
-#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
-#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
+#define flush_cache_all() \
+	sparc32_cachetlb_ops->cache_all()
+#define flush_cache_mm(mm) \
+	sparc32_cachetlb_ops->cache_mm(mm)
+#define flush_cache_dup_mm(mm) \
+	sparc32_cachetlb_ops->cache_mm(mm)
+#define flush_cache_range(vma,start,end) \
+	sparc32_cachetlb_ops->cache_range(vma, start, end)
+#define flush_cache_page(vma,addr,pfn) \
+	sparc32_cachetlb_ops->cache_page(vma, addr)
 #define flush_icache_range(start, end)		do { } while (0)
 #define flush_icache_page(vma, pg)		do { } while (0)
 
@@ -67,11 +29,12 @@
 		memcpy(dst, src, len);				\
 	} while (0)
 
-BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
-BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
-
-#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
-#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
+#define __flush_page_to_ram(addr) \
+	sparc32_cachetlb_ops->page_to_ram(addr)
+#define flush_sig_insns(mm,insn_addr) \
+	sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
+#define flush_page_for_dma(addr) \
+	sparc32_cachetlb_ops->page_for_dma(addr)
 
 extern void sparc_flush_page_to_ram(struct page *page);
 
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index 2efea2f..301736d 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -8,9 +8,6 @@
 #include <linux/mm.h>
 
 /* Cache flush operations. */
-
-
-#define flushi(addr)	__asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
 #define flushw_all()	__asm__ __volatile__("flushw")
 
 extern void __flushw_user(void);
diff --git a/arch/sparc/include/asm/cachetlb_32.h b/arch/sparc/include/asm/cachetlb_32.h
new file mode 100644
index 0000000..efb1988
--- /dev/null
+++ b/arch/sparc/include/asm/cachetlb_32.h
@@ -0,0 +1,29 @@
+#ifndef _SPARC_CACHETLB_H
+#define _SPARC_CACHETLB_H
+
+struct mm_struct;
+struct vm_area_struct;
+
+struct sparc32_cachetlb_ops {
+	void (*cache_all)(void);
+	void (*cache_mm)(struct mm_struct *);
+	void (*cache_range)(struct vm_area_struct *, unsigned long,
+			    unsigned long);
+	void (*cache_page)(struct vm_area_struct *, unsigned long);
+
+	void (*tlb_all)(void);
+	void (*tlb_mm)(struct mm_struct *);
+	void (*tlb_range)(struct vm_area_struct *, unsigned long,
+			  unsigned long);
+	void (*tlb_page)(struct vm_area_struct *, unsigned long);
+
+	void (*page_to_ram)(unsigned long);
+	void (*sig_insns)(struct mm_struct *, unsigned long);
+	void (*page_for_dma)(unsigned long);
+};
+extern const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
+#ifdef CONFIG_SMP
+extern const struct sparc32_cachetlb_ops *local_ops;
+#endif
+
+#endif /* SPARC_CACHETLB_H */
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index c786b0a..1fae1a0 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -11,40 +11,13 @@
 #ifndef __ARCH_SPARC_CMPXCHG__
 #define __ARCH_SPARC_CMPXCHG__
 
-#include <asm/btfixup.h>
-
-/* This has special calling conventions */
-#ifndef CONFIG_SMP
-BTFIXUPDEF_CALL(void, ___xchg32, void)
-#endif
-
 static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
 {
-#ifdef CONFIG_SMP
 	__asm__ __volatile__("swap [%2], %0"
 			     : "=&r" (val)
 			     : "0" (val), "r" (m)
 			     : "memory");
 	return val;
-#else
-	register unsigned long *ptr asm("g1");
-	register unsigned long ret asm("g2");
-
-	ptr = (unsigned long *) m;
-	ret = val;
-
-	/* Note: this is magic and the nop there is
-	   really needed. */
-	__asm__ __volatile__(
-	"mov	%%o7, %%g4\n\t"
-	"call	___f____xchg32\n\t"
-	" nop\n\t"
-	: "=&r" (ret)
-	: "0" (ret), "r" (ptr)
-	: "g3", "g4", "g7", "memory", "cc");
-
-	return ret;
-#endif
 }
 
 extern void __xchg_called_with_bad_pointer(void);
diff --git a/arch/sparc/include/asm/contregs.h b/arch/sparc/include/asm/contregs.h
index 48fa8a4..b8abdfc 100644
--- a/arch/sparc/include/asm/contregs.h
+++ b/arch/sparc/include/asm/contregs.h
@@ -7,28 +7,6 @@
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  */
 
-/* 3=sun3
-   4=sun4 (as in sun4 sysmaint student book)
-   c=sun4c (according to davem) */
-
-#define AC_IDPROM     0x00000000    /* 34  ID PROM, R/O, byte, 32 bytes      */
-#define AC_PAGEMAP    0x10000000    /* 3   Pagemap R/W, long                 */
-#define AC_SEGMAP     0x20000000    /* 3   Segment map, byte                 */
-#define AC_CONTEXT    0x30000000    /* 34c current mmu-context               */
-#define AC_SENABLE    0x40000000    /* 34c system dvma/cache/reset enable reg*/
-#define AC_UDVMA_ENB  0x50000000    /* 34  Not used on Sun boards, byte      */
-#define AC_BUS_ERROR  0x60000000    /* 34  Not cleared on read, byte.        */
-#define AC_SYNC_ERR   0x60000000    /*  c fault type                         */
-#define AC_SYNC_VA    0x60000004    /*  c fault virtual address              */
-#define AC_ASYNC_ERR  0x60000008    /*  c asynchronous fault type            */
-#define AC_ASYNC_VA   0x6000000c    /*  c async fault virtual address        */
-#define AC_LEDS       0x70000000    /* 34  Zero turns on LEDs, byte          */
-#define AC_CACHETAGS  0x80000000    /* 34c direct access to the VAC tags     */
-#define AC_CACHEDDATA 0x90000000    /* 3 c direct access to the VAC data     */
-#define AC_UDVMA_MAP  0xD0000000    /* 4  Not used on Sun boards, byte       */
-#define AC_VME_VECTOR 0xE0000000    /* 4  For non-Autovector VME, byte       */
-#define AC_BOOT_SCC   0xF0000000    /* 34  bypass to access Zilog 8530. byte.*/
-
 /* s=Swift, h=Ross_HyperSPARC, v=TI_Viking, t=Tsunami, r=Ross_Cypress        */
 #define AC_M_PCR      0x0000        /* shv Processor Control Reg             */
 #define AC_M_CTPR     0x0100        /* shv Context Table Pointer Reg         */
diff --git a/arch/sparc/include/asm/cpu_type.h b/arch/sparc/include/asm/cpu_type.h
index 4ca184d..84d7d83 100644
--- a/arch/sparc/include/asm/cpu_type.h
+++ b/arch/sparc/include/asm/cpu_type.h
@@ -5,30 +5,24 @@
  * Sparc (general) CPU types
  */
 enum sparc_cpu {
-  sun4        = 0x00,
-  sun4c       = 0x01,
-  sun4m       = 0x02,
-  sun4d       = 0x03,
-  sun4e       = 0x04,
-  sun4u       = 0x05, /* V8 ploos ploos */
-  sun_unknown = 0x06,
-  ap1000      = 0x07, /* almost a sun4m */
-  sparc_leon  = 0x08, /* Leon SoC */
+  sun4m       = 0x00,
+  sun4d       = 0x01,
+  sun4e       = 0x02,
+  sun4u       = 0x03, /* V8 ploos ploos */
+  sun_unknown = 0x04,
+  ap1000      = 0x05, /* almost a sun4m */
+  sparc_leon  = 0x06, /* Leon SoC */
 };
 
 #ifdef CONFIG_SPARC32
 extern enum sparc_cpu sparc_cpu_model;
 
-#define ARCH_SUN4C (sparc_cpu_model==sun4c)
-
 #define SUN4M_NCPUS            4              /* Architectural limit of sun4m. */
 
 #else
 
 #define sparc_cpu_model sun4u
 
-/* This cannot ever be a sun4c :) That's just history. */
-#define ARCH_SUN4C 0
 #endif
 
 #endif /* __ASM_CPU_TYPE_H */
diff --git a/arch/sparc/include/asm/cpudata_32.h b/arch/sparc/include/asm/cpudata_32.h
index a4c5a93..0300d94 100644
--- a/arch/sparc/include/asm/cpudata_32.h
+++ b/arch/sparc/include/asm/cpudata_32.h
@@ -14,7 +14,6 @@
 typedef struct {
 	unsigned long udelay_val;
 	unsigned long clock_tick;
-	unsigned int multiplier;
 	unsigned int counter;
 #ifdef CONFIG_SMP
 	unsigned int irq_resched_count;
diff --git a/arch/sparc/include/asm/cypress.h b/arch/sparc/include/asm/cypress.h
deleted file mode 100644
index 95e9772..0000000
--- a/arch/sparc/include/asm/cypress.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * cypress.h: Cypress module specific definitions and defines.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SPARC_CYPRESS_H
-#define _SPARC_CYPRESS_H
-
-/* Cypress chips have %psr 'impl' of '0001' and 'vers' of '0001'. */
-
-/* The MMU control register fields on the Sparc Cypress 604/605 MMU's.
- *
- * ---------------------------------------------------------------
- * |implvers| MCA | MCM |MV| MID |BM| C|RSV|MR|CM|CL|CE|RSV|NF|ME|
- * ---------------------------------------------------------------
- *  31    24 23-22 21-20 19 18-15 14 13  12 11 10  9  8 7-2  1  0
- *
- * MCA: MultiChip Access -- Used for configuration of multiple
- *      CY7C604/605 cache units.
- * MCM: MultiChip Mask -- Again, for multiple cache unit config.
- * MV: MultiChip Valid -- Indicates MCM and MCA have valid settings.
- * MID: ModuleID -- Unique processor ID for MBus transactions. (605 only)
- * BM: Boot Mode -- 0 = not in boot mode, 1 = in boot mode
- * C: Cacheable -- Indicates whether accesses are cacheable while
- *    the MMU is off.  0=no 1=yes
- * MR: MemoryReflection -- Indicates whether the bus attached to the
- *     MBus supports memory reflection. 0=no 1=yes (605 only)
- * CM: CacheMode -- Indicates whether the cache is operating in write
- *     through or copy-back mode. 0=write-through 1=copy-back
- * CL: CacheLock -- Indicates if the entire cache is locked or not.
- *     0=not-locked 1=locked  (604 only)
- * CE: CacheEnable -- Is the virtual cache on? 0=no 1=yes
- * NF: NoFault -- Do faults generate traps? 0=yes 1=no
- * ME: MmuEnable -- Is the MMU doing translations? 0=no 1=yes
- */
-
-#define CYPRESS_MCA       0x00c00000
-#define CYPRESS_MCM       0x00300000
-#define CYPRESS_MVALID    0x00080000
-#define CYPRESS_MIDMASK   0x00078000   /* Only on 605 */
-#define CYPRESS_BMODE     0x00004000
-#define CYPRESS_ACENABLE  0x00002000
-#define CYPRESS_MRFLCT    0x00000800   /* Only on 605 */
-#define CYPRESS_CMODE     0x00000400
-#define CYPRESS_CLOCK     0x00000200   /* Only on 604 */
-#define CYPRESS_CENABLE   0x00000100
-#define CYPRESS_NFAULT    0x00000002
-#define CYPRESS_MENABLE   0x00000001
-
-static inline void cypress_flush_page(unsigned long page)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-			     "r" (page), "i" (ASI_M_FLUSH_PAGE));
-}
-
-static inline void cypress_flush_segment(unsigned long addr)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-			     "r" (addr), "i" (ASI_M_FLUSH_SEG));
-}
-
-static inline void cypress_flush_region(unsigned long addr)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-			     "r" (addr), "i" (ASI_M_FLUSH_REGION));
-}
-
-static inline void cypress_flush_context(void)
-{
-	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
-			     "i" (ASI_M_FLUSH_CTX));
-}
-
-/* XXX Displacement flushes for buggy chips and initial testing
- * XXX go here.
- */
-
-#endif /* !(_SPARC_CYPRESS_H) */
diff --git a/arch/sparc/include/asm/dma.h b/arch/sparc/include/asm/dma.h
index b554927..3d434ef 100644
--- a/arch/sparc/include/asm/dma.h
+++ b/arch/sparc/include/asm/dma.h
@@ -92,27 +92,31 @@
 #ifdef CONFIG_SPARC32
 
 /* Routines for data transfer buffers. */
-BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
-BTFIXUPDEF_CALL(void,   mmu_unlockarea, char *, unsigned long)
-
-#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
-#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
-
-struct page;
 struct device;
 struct scatterlist;
 
-/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
-BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, struct device *, char *, unsigned long)
-BTFIXUPDEF_CALL(void,  mmu_get_scsi_sgl, struct device *, struct scatterlist *, int)
-BTFIXUPDEF_CALL(void,  mmu_release_scsi_one, struct device *, __u32, unsigned long)
-BTFIXUPDEF_CALL(void,  mmu_release_scsi_sgl, struct device *, struct scatterlist *, int)
+struct sparc32_dma_ops {
+	__u32 (*get_scsi_one)(struct device *, char *, unsigned long);
+	void (*get_scsi_sgl)(struct device *, struct scatterlist *, int);
+	void (*release_scsi_one)(struct device *, __u32, unsigned long);
+	void (*release_scsi_sgl)(struct device *, struct scatterlist *,int);
+#ifdef CONFIG_SBUS
+	int (*map_dma_area)(struct device *, dma_addr_t *, unsigned long, unsigned long, int);
+	void (*unmap_dma_area)(struct device *, unsigned long, int);
+#endif
+};
+extern const struct sparc32_dma_ops *sparc32_dma_ops;
 
-#define mmu_get_scsi_one(dev,vaddr,len) BTFIXUP_CALL(mmu_get_scsi_one)(dev,vaddr,len)
-#define mmu_get_scsi_sgl(dev,sg,sz) BTFIXUP_CALL(mmu_get_scsi_sgl)(dev,sg,sz)
-#define mmu_release_scsi_one(dev,vaddr,len) BTFIXUP_CALL(mmu_release_scsi_one)(dev,vaddr,len)
-#define mmu_release_scsi_sgl(dev,sg,sz) BTFIXUP_CALL(mmu_release_scsi_sgl)(dev,sg,sz)
+#define mmu_get_scsi_one(dev,vaddr,len) \
+	sparc32_dma_ops->get_scsi_one(dev, vaddr, len)
+#define mmu_get_scsi_sgl(dev,sg,sz) \
+	sparc32_dma_ops->get_scsi_sgl(dev, sg, sz)
+#define mmu_release_scsi_one(dev,vaddr,len) \
+	sparc32_dma_ops->release_scsi_one(dev, vaddr,len)
+#define mmu_release_scsi_sgl(dev,sg,sz) \
+	sparc32_dma_ops->release_scsi_sgl(dev, sg, sz)
 
+#ifdef CONFIG_SBUS
 /*
  * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
  *
@@ -123,17 +127,17 @@
  * Second mapping is for device visible address, or "bus" address.
  * The bus address is returned at '*pba'.
  *
- * These functions seem distinct, but are hard to split. On sun4c,
- * at least for now, 'a' is equal to bus address, and retured in *pba.
+ * These functions seem distinct, but are hard to split.
  * On sun4m, page attributes depend on the CPU type, so we have to
  * know if we are mapping RAM or I/O, so it has to be an additional argument
  * to a separate mapping function for CPU visible mappings.
  */
-BTFIXUPDEF_CALL(int, mmu_map_dma_area, struct device *, dma_addr_t *, unsigned long, unsigned long, int len)
-BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, struct device *, unsigned long busa, int len)
+#define sbus_map_dma_area(dev,pba,va,a,len) \
+	sparc32_dma_ops->map_dma_area(dev, pba, va, a, len)
+#define sbus_unmap_dma_area(dev,ba,len) \
+	sparc32_dma_ops->unmap_dma_area(dev, ba, len)
+#endif /* CONFIG_SBUS */
 
-#define mmu_map_dma_area(dev,pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(dev,pba,va,a,len)
-#define mmu_unmap_dma_area(dev,ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(dev,ba,len)
 #endif
 
 #endif /* !(_ASM_SPARC_DMA_H) */
diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
index 4269ca6..2d4d755 100644
--- a/arch/sparc/include/asm/elf_32.h
+++ b/arch/sparc/include/asm/elf_32.h
@@ -118,16 +118,9 @@
    instruction set this cpu supports.  This can NOT be done in userspace
    on Sparc.  */
 
-/* Sun4c has none of the capabilities, most sun4m's have them all.
- * XXX This is gross, set some global variable at boot time. -DaveM
- */
-#define ELF_HWCAP	((ARCH_SUN4C) ? 0 : \
-			 (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
-			  HWCAP_SPARC_SWAP | \
-			  ((srmmu_modtype != Cypress && \
-			    srmmu_modtype != Cypress_vE && \
-			    srmmu_modtype != Cypress_vD) ? \
-			   HWCAP_SPARC_MULDIV : 0)))
+/* Most sun4m's have them all.  */
+#define ELF_HWCAP	(HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
+			 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV)
 
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index 698d955..fb3f169 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -12,7 +12,6 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/idprom.h>
-#include <asm/machines.h>
 #include <asm/oplib.h>
 #include <asm/auxio.h>
 #include <asm/irq.h>
@@ -103,25 +102,13 @@
 /* Routines unique to each controller type on a Sun. */
 static void sun_set_dor(unsigned char value, int fdc_82077)
 {
-	if (sparc_cpu_model == sun4c) {
-		unsigned int bits = 0;
-		if (value & 0x10)
-			bits |= AUXIO_FLPY_DSEL;
-		if ((value & 0x80) == 0)
-			bits |= AUXIO_FLPY_EJCT;
-		set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT));
-	}
-	if (fdc_82077) {
+	if (fdc_82077)
 		sun_fdc->dor_82077 = value;
-	}
 }
 
 static unsigned char sun_read_dir(void)
 {
-	if (sparc_cpu_model == sun4c)
-		return (get_auxio() & AUXIO_FLPY_DCHG) ? 0x80 : 0;
-	else
-		return sun_fdc->dir_82077;
+	return sun_fdc->dir_82077;
 }
 
 static unsigned char sun_82072_fd_inb(int port)
@@ -242,10 +229,7 @@
 static inline void sun_fd_disable_dma(void)
 {
 	doing_pdma = 0;
-	if (pdma_base) {
-		mmu_unlockarea(pdma_base, pdma_areasize);
-		pdma_base = NULL;
-	}
+	pdma_base = NULL;
 }
 
 static inline void sun_fd_set_dma_mode(int mode)
@@ -275,7 +259,6 @@
 
 static inline void sun_fd_enable_dma(void)
 {
-	pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
 	pdma_base = pdma_vaddr;
 	pdma_areasize = pdma_size;
 }
@@ -301,38 +284,36 @@
 {
 	struct platform_device *op;
 	struct device_node *dp;
-	char state[128];
-	phandle tnode, fd_node;
-	int num_regs;
 	struct resource r;
+	char state[128];
+	phandle fd_node;
+	phandle tnode;
+	int num_regs;
 
 	use_virtual_dma = 1;
 
 	/* Forget it if we aren't on a machine that could possibly
 	 * ever have a floppy drive.
 	 */
-	if((sparc_cpu_model != sun4c && sparc_cpu_model != sun4m) ||
-	   ((idprom->id_machtype == (SM_SUN4C | SM_4C_SLC)) ||
-	    (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC)))) {
+	if (sparc_cpu_model != sun4m) {
 		/* We certainly don't have a floppy controller. */
 		goto no_sun_fdc;
 	}
 	/* Well, try to find one. */
 	tnode = prom_getchild(prom_root_node);
 	fd_node = prom_searchsiblings(tnode, "obio");
-	if(fd_node != 0) {
+	if (fd_node != 0) {
 		tnode = prom_getchild(fd_node);
 		fd_node = prom_searchsiblings(tnode, "SUNW,fdtwo");
 	} else {
 		fd_node = prom_searchsiblings(tnode, "fd");
 	}
-	if(fd_node == 0) {
+	if (fd_node == 0) {
 		goto no_sun_fdc;
 	}
 
 	/* The sun4m lets us know if the controller is actually usable. */
-	if(sparc_cpu_model == sun4m &&
-	   prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
+	if (prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
 		if(!strcmp(state, "disabled")) {
 			goto no_sun_fdc;
 		}
@@ -343,12 +324,12 @@
 	memset(&r, 0, sizeof(r));
 	r.flags = fd_regs[0].which_io;
 	r.start = fd_regs[0].phys_addr;
-	sun_fdc = (struct sun_flpy_controller *)
-	    of_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
+	sun_fdc = of_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
 
 	/* Look up irq in platform_device.
 	 * We try "SUNW,fdtwo" and "fd"
 	 */
+	op = NULL;
 	for_each_node_by_name(dp, "SUNW,fdtwo") {
 		op = of_find_device_by_node(dp);
 		if (op)
@@ -367,7 +348,7 @@
 	FLOPPY_IRQ = op->archdata.irqs[0];
 
 	/* Last minute sanity check... */
-	if(sun_fdc->status_82072 == 0xff) {
+	if (sun_fdc->status_82072 == 0xff) {
 		sun_fdc = NULL;
 		goto no_sun_fdc;
 	}
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index bcef1f5..e204f90 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -161,10 +161,7 @@
 static void sun_fd_disable_dma(void)
 {
 	doing_pdma = 0;
-	if (pdma_base) {
-		mmu_unlockarea(pdma_base, pdma_areasize);
-		pdma_base = NULL;
-	}
+	pdma_base = NULL;
 }
 
 static void sun_fd_set_dma_mode(int mode)
@@ -194,7 +191,6 @@
 
 static void sun_fd_enable_dma(void)
 {
-	pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
 	pdma_base = pdma_vaddr;
 	pdma_areasize = pdma_size;
 }
diff --git a/arch/sparc/include/asm/head_32.h b/arch/sparc/include/asm/head_32.h
index 7c35491..a768748 100644
--- a/arch/sparc/include/asm/head_32.h
+++ b/arch/sparc/include/asm/head_32.h
@@ -2,15 +2,8 @@
 #define __SPARC_HEAD_H
 
 #define KERNBASE        0xf0000000  /* First address the kernel will eventually be */
-#define LOAD_ADDR       0x4000      /* prom jumps to us here unless this is elf /boot */
-#define SUN4C_SEGSZ     (1 << 18)
-#define SRMMU_L1_KBASE_OFFSET ((KERNBASE>>24)<<2)  /* Used in boot remapping. */
-#define INTS_ENAB        0x01           /* entry.S uses this. */
-
-#define SUN4_PROM_VECTOR 0xFFE81000     /* SUN4 PROM needs to be hardwired */
 
 #define WRITE_PAUSE      nop; nop; nop; /* Have to do this after %wim/%psr chg */
-#define NOP_INSN         0x01000000     /* Used to patch sparc_save_state */
 
 /* Here are some trap goodies */
 
@@ -18,9 +11,7 @@
 #define TRAP_ENTRY(type, label) \
 	rd %psr, %l0; b label; rd %wim, %l3; nop;
 
-/* Data/text faults. Defaults to sun4c version at boot time. */
-#define SPARC_TFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 1, %l7;
-#define SPARC_DFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 0, %l7;
+/* Data/text faults */
 #define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 1, %l7;
 #define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 0, %l7;
 
@@ -80,16 +71,6 @@
 #define TRAP_ENTRY_INTERRUPT(int_level) \
         mov int_level, %l7; rd %psr, %l0; b real_irq_entry; rd %wim, %l3;
 
-/* NMI's (Non Maskable Interrupts) are special, you can't keep them
- * from coming in, and basically if you get one, the shows over. ;(
- * On the sun4c they are usually asynchronous memory errors, on the
- * the sun4m they could be either due to mem errors or a software
- * initiated interrupt from the prom/kern on an SMP box saying "I
- * command you to do CPU tricks, read your mailbox for more info."
- */
-#define NMI_TRAP \
-        rd %wim, %l3; b linux_trap_nmi_sun4c; mov %psr, %l0; nop;
-
 /* Window overflows/underflows are special and we need to try to be as
  * efficient as possible here....
  */
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index a4e457f..24f802b 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -10,19 +10,6 @@
 
 #ifdef CONFIG_SPARC_LEON
 
-#define ASI_LEON_NOCACHE	0x01
-
-#define ASI_LEON_DCACHE_MISS	0x1
-
-#define ASI_LEON_CACHEREGS	0x02
-#define ASI_LEON_IFLUSH		0x10
-#define ASI_LEON_DFLUSH		0x11
-
-#define ASI_LEON_MMUFLUSH	0x18
-#define ASI_LEON_MMUREGS	0x19
-#define ASI_LEON_BYPASS		0x1c
-#define ASI_LEON_FLUSH_PAGE	0x10
-
 /* mmu register access, ASI_LEON_MMUREGS */
 #define LEON_CNR_CTRL		0x000
 #define LEON_CNR_CTXP		0x100
@@ -57,29 +44,6 @@
 #define LEON_IRQMASK_R		0x0000fffe	/* bit 15- 1 of lregs.irqmask */
 #define LEON_IRQPRIO_R		0xfffe0000	/* bit 31-17 of lregs.irqmask */
 
-/* leon uart register definitions */
-#define LEON_OFF_UDATA	0x0
-#define LEON_OFF_USTAT	0x4
-#define LEON_OFF_UCTRL	0x8
-#define LEON_OFF_USCAL	0xc
-
-#define LEON_UCTRL_RE	0x01
-#define LEON_UCTRL_TE	0x02
-#define LEON_UCTRL_RI	0x04
-#define LEON_UCTRL_TI	0x08
-#define LEON_UCTRL_PS	0x10
-#define LEON_UCTRL_PE	0x20
-#define LEON_UCTRL_FL	0x40
-#define LEON_UCTRL_LB	0x80
-
-#define LEON_USTAT_DR	0x01
-#define LEON_USTAT_TS	0x02
-#define LEON_USTAT_TH	0x04
-#define LEON_USTAT_BR	0x08
-#define LEON_USTAT_OV	0x10
-#define LEON_USTAT_PE	0x20
-#define LEON_USTAT_FE	0x40
-
 #define LEON_MCFG2_SRAMDIS		0x00002000
 #define LEON_MCFG2_SDRAMEN		0x00004000
 #define LEON_MCFG2_SRAMBANKSZ		0x00001e00	/* [12-9] */
@@ -89,8 +53,6 @@
 
 #define LEON_TCNT0_MASK	0x7fffff
 
-#define LEON_USTAT_ERROR (LEON_USTAT_OV | LEON_USTAT_PE | LEON_USTAT_FE)
-/* no break yet */
 
 #define ASI_LEON3_SYSCTRL		0x02
 #define ASI_LEON3_SYSCTRL_ICFG		0x08
@@ -278,6 +240,8 @@
 #define LEON2_CFG_SSIZE_MASK 0x00007000UL
 
 #ifndef __ASSEMBLY__
+struct vm_area_struct;
+
 extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
 extern void leon_flush_icache_all(void);
 extern void leon_flush_dcache_all(void);
@@ -285,15 +249,6 @@
 extern void leon_flush_tlb_all(void);
 extern int leon_flush_during_switch;
 extern int leon_flush_needed(void);
-
-struct vm_area_struct;
-extern void leon_flush_icache_all(void);
-extern void leon_flush_dcache_all(void);
-extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
-extern void leon_flush_cache_all(void);
-extern void leon_flush_tlb_all(void);
-extern int leon_flush_during_switch;
-extern int leon_flush_needed(void);
 extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
 
 /* struct that hold LEON3 cache configuration registers */
@@ -321,22 +276,12 @@
 extern void leon_update_virq_handling(unsigned int virq,
 			      irq_flow_handler_t flow_handler,
 			      const char *name, int do_ack);
-extern void leon_clear_clock_irq(void);
-extern void leon_load_profile_irq(int cpu, unsigned int limit);
-extern void leon_init_timers(irq_handler_t counter_fn);
-extern void leon_clear_clock_irq(void);
-extern void leon_load_profile_irq(int cpu, unsigned int limit);
+extern void leon_init_timers(void);
 extern void leon_trans_init(struct device_node *dp);
 extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
-extern void leon_init_IRQ(void);
-extern void leon_init(void);
-extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
 extern void init_leon(void);
 extern void poke_leonsparc(void);
 extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
-extern int leon_flush_needed(void);
-extern void leon_switch_mm(void);
-extern int srmmu_swprobe_trace;
 extern int leon3_ticker_irq;
 
 #ifdef CONFIG_SMP
diff --git a/arch/sparc/include/asm/machines.h b/arch/sparc/include/asm/machines.h
index cd9c099..fd6ddb0 100644
--- a/arch/sparc/include/asm/machines.h
+++ b/arch/sparc/include/asm/machines.h
@@ -12,11 +12,6 @@
 	unsigned char id_machtype;
 };
 
-/* Current number of machines we know about that has an IDPROM
- * machtype entry including one entry for the 0x80 OBP machines.
- */
-#define NUM_SUN_MACHINES   16
-
 /* The machine type in the idprom area looks like this:
  *
  * ---------------
@@ -24,36 +19,20 @@
  * ---------------
  *  7    4 3    0
  *
- * The ARCH field determines the architecture line (sun4, sun4c, etc).
+ * The ARCH field determines the architecture line (sun4m, etc).
  * The MACH field determines the machine make within that architecture.
  */
 
 #define SM_ARCH_MASK  0xf0
-#define SM_SUN4       0x20
 #define  M_LEON       0x30
-#define SM_SUN4C      0x50
 #define SM_SUN4M      0x70
 #define SM_SUN4M_OBP  0x80
 
 #define SM_TYP_MASK   0x0f
-/* Sun4 machines */
-#define SM_4_260      0x01    /* Sun 4/200 series */
-#define SM_4_110      0x02    /* Sun 4/100 series */
-#define SM_4_330      0x03    /* Sun 4/300 series */
-#define SM_4_470      0x04    /* Sun 4/400 series */
 
 /* Leon machines */
 #define M_LEON3_SOC   0x02    /* Leon3 SoC */
 
-/* Sun4c machines                Full Name              - PROM NAME */
-#define SM_4C_SS1     0x01    /* Sun4c SparcStation 1   - Sun 4/60  */
-#define SM_4C_IPC     0x02    /* Sun4c SparcStation IPC - Sun 4/40  */
-#define SM_4C_SS1PLUS 0x03    /* Sun4c SparcStation 1+  - Sun 4/65  */
-#define SM_4C_SLC     0x04    /* Sun4c SparcStation SLC - Sun 4/20  */
-#define SM_4C_SS2     0x05    /* Sun4c SparcStation 2   - Sun 4/75  */
-#define SM_4C_ELC     0x06    /* Sun4c SparcStation ELC - Sun 4/25  */
-#define SM_4C_IPX     0x07    /* Sun4c SparcStation IPX - Sun 4/50  */
-
 /* Sun4m machines, these predate the OpenBoot.  These values only mean
  * something if the value in the ARCH field is SM_SUN4M, if it is
  * SM_SUN4M_OBP then you have the following situation:
diff --git a/arch/sparc/include/asm/mbus.h b/arch/sparc/include/asm/mbus.h
index 69f07a0..14128bc 100644
--- a/arch/sparc/include/asm/mbus.h
+++ b/arch/sparc/include/asm/mbus.h
@@ -8,14 +8,10 @@
 #define _SPARC_MBUS_H
 
 #include <asm/ross.h>    /* HyperSparc stuff */
-#include <asm/cypress.h> /* Cypress Chips */
 #include <asm/viking.h>  /* Ugh, bug city... */
 
 enum mbus_module {
 	HyperSparc        = 0,
-	Cypress           = 1,
-	Cypress_vE        = 2,
-	Cypress_vD        = 3,
 	Swift_ok          = 4,
 	Swift_bad_c       = 5,
 	Swift_lots_o_bugs = 6,
diff --git a/arch/sparc/include/asm/memreg.h b/arch/sparc/include/asm/memreg.h
deleted file mode 100644
index 845ad2b..0000000
--- a/arch/sparc/include/asm/memreg.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _SPARC_MEMREG_H
-#define _SPARC_MEMREG_H
-/* memreg.h:  Definitions of the values found in the synchronous
- *            and asynchronous memory error registers when a fault
- *            occurs on the sun4c.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-/* First the synchronous error codes, these are usually just
- * normal page faults.
- */
-
-#define SUN4C_SYNC_WDRESET   0x0001  /* watchdog reset */
-#define SUN4C_SYNC_SIZE      0x0002  /* bad access size? whuz this? */
-#define SUN4C_SYNC_PARITY    0x0008  /* bad ram chips caused a parity error */
-#define SUN4C_SYNC_SBUS      0x0010  /* the SBUS had some problems... */
-#define SUN4C_SYNC_NOMEM     0x0020  /* translation to non-existent ram */
-#define SUN4C_SYNC_PROT      0x0040  /* access violated pte protections */
-#define SUN4C_SYNC_NPRESENT  0x0080  /* pte said that page was not present */
-#define SUN4C_SYNC_BADWRITE  0x8000  /* while writing something went bogus */
-
-#define SUN4C_SYNC_BOLIXED  \
-        (SUN4C_SYNC_WDRESET | SUN4C_SYNC_SIZE | SUN4C_SYNC_SBUS | \
-         SUN4C_SYNC_NOMEM | SUN4C_SYNC_PARITY)
-
-/* Now the asynchronous error codes, these are almost always produced
- * by the cache writing things back to memory and getting a bad translation.
- * Bad DVMA transactions can cause these faults too.
- */
-
-#define SUN4C_ASYNC_BADDVMA 0x0010  /* error during DVMA access */
-#define SUN4C_ASYNC_NOMEM   0x0020  /* write back pointed to bad phys addr */
-#define SUN4C_ASYNC_BADWB   0x0080  /* write back points to non-present page */
-
-/* Memory parity error register with associated bit constants. */
-#ifndef __ASSEMBLY__
-extern __volatile__ unsigned long __iomem *sun4c_memerr_reg;
-#endif
-
-#define	SUN4C_MPE_ERROR	0x80	/* Parity error detected. (ro) */
-#define	SUN4C_MPE_MULTI	0x40	/* Multiple parity errors detected. (ro) */
-#define	SUN4C_MPE_TEST	0x20	/* Write inverse parity. (rw) */
-#define	SUN4C_MPE_CHECK	0x10	/* Enable parity checking. (rw) */
-#define	SUN4C_MPE_ERR00	0x08	/* Parity error in bits 0-7. (ro) */
-#define	SUN4C_MPE_ERR08	0x04	/* Parity error in bits 8-15. (ro) */
-#define	SUN4C_MPE_ERR16	0x02	/* Parity error in bits 16-23. (ro) */
-#define	SUN4C_MPE_ERR24	0x01	/* Parity error in bits 24-31. (ro) */
-#define	SUN4C_MPE_ERRS	0x0F	/* Bit mask for the error bits. (ro) */
-
-#endif /* !(_SPARC_MEMREG_H) */
diff --git a/arch/sparc/include/asm/mmu_context_32.h b/arch/sparc/include/asm/mmu_context_32.h
index 671a997b..01456c9 100644
--- a/arch/sparc/include/asm/mmu_context_32.h
+++ b/arch/sparc/include/asm/mmu_context_32.h
@@ -1,8 +1,6 @@
 #ifndef __SPARC_MMU_CONTEXT_H
 #define __SPARC_MMU_CONTEXT_H
 
-#include <asm/btfixup.h>
-
 #ifndef __ASSEMBLY__
 
 #include <asm-generic/mm_hooks.h>
@@ -23,14 +21,11 @@
  * all the page tables have been flushed.  Our job is to destroy
  * any remaining processor-specific state.
  */
-BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *)
-
-#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
+void destroy_context(struct mm_struct *mm);
 
 /* Switch the current MM context. */
-BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *)
-
-#define switch_mm(old_mm, mm, tsk) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk)
+void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+	       struct task_struct *tsk);
 
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
diff --git a/arch/sparc/include/asm/obio.h b/arch/sparc/include/asm/obio.h
index 4ade0c8..910c1d9 100644
--- a/arch/sparc/include/asm/obio.h
+++ b/arch/sparc/include/asm/obio.h
@@ -220,19 +220,6 @@
 			      "i" (ASI_M_MXCC));
 }
 
-/* +-------+-------------+-----------+------------------------------------+
- * | bcast |  devid      |   sid     |              levels mask           |
- * +-------+-------------+-----------+------------------------------------+
- *  31      30         23 22       15 14                                 0
- */
-#define IGEN_MESSAGE(bcast, devid, sid, levels) \
-	(((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
-            
-static inline void sun4d_send_ipi(int cpu, int level)
-{
-	cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
-}
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* !(_SPARC_OBIO_H) */
diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
index 71e5e9a..2751787 100644
--- a/arch/sparc/include/asm/oplib_32.h
+++ b/arch/sparc/include/asm/oplib_32.h
@@ -105,14 +105,6 @@
 extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
 			 int context, char *program_counter);
 
-/* Sun4/sun4c specific memory-management startup hook. */
-
-/* Map the passed segment in the given context at the passed
- * virtual address.
- */
-extern void prom_putsegment(int context, unsigned long virt_addr,
-			    int physical_segment);
-
 /* Initialize the memory lists based upon the prom version. */
 void prom_meminit(void);
 
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index bb5c2ac..fab78a3 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -14,8 +14,6 @@
 #define PAGE_SIZE    (_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK    (~(PAGE_SIZE-1))
 
-#include <asm/btfixup.h>
-
 #ifndef __ASSEMBLY__
 
 #define clear_page(page)	 memset((void *)(page), 0, PAGE_SIZE)
@@ -45,12 +43,6 @@
 
 extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
 
-/* Cache alias structure.  Entry is valid if context != -1. */
-struct cache_palias {
-	unsigned long vaddr;
-	int context;
-};
-
 /* passing structs on the Sparc slow us down tremendously... */
 
 /* #define STRICT_MM_TYPECHECKS */
@@ -116,10 +108,7 @@
 typedef struct page *pgtable_t;
 
 extern unsigned long sparc_unmapped_base;
-
-BTFIXUPDEF_SETHI(sparc_unmapped_base)
-
-#define TASK_UNMAPPED_BASE	BTFIXUP_SETHI(sparc_unmapped_base)
+#define TASK_UNMAPPED_BASE	sparc_unmapped_base
 
 #else /* !(__ASSEMBLY__) */
 
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index ca2b344..e5b169b46 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -4,8 +4,10 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
+#include <asm/pgtsrmmu.h>
+#include <asm/pgtable.h>
+#include <asm/vaddrs.h>
 #include <asm/page.h>
-#include <asm/btfixup.h>
 
 struct page;
 
@@ -15,54 +17,74 @@
 	unsigned long pgtable_cache_sz;
 	unsigned long pgd_cache_sz;
 } pgt_quicklists;
+
+unsigned long srmmu_get_nocache(int size, int align);
+void srmmu_free_nocache(unsigned long vaddr, int size);
+
 #define pgd_quicklist           (pgt_quicklists.pgd_cache)
 #define pmd_quicklist           ((unsigned long *)0)
 #define pte_quicklist           (pgt_quicklists.pte_cache)
 #define pgtable_cache_size      (pgt_quicklists.pgtable_cache_sz)
 #define pgd_cache_size		(pgt_quicklists.pgd_cache_sz)
 
-extern void check_pgt_cache(void);
-BTFIXUPDEF_CALL(void,	 do_check_pgt_cache, int, int)
-#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
+#define check_pgt_cache()	do { } while (0)
 
-BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
-#define get_pgd_fast()		BTFIXUP_CALL(get_pgd_fast)()
-
-BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
-#define free_pgd_fast(pgd)	BTFIXUP_CALL(free_pgd_fast)(pgd)
+pgd_t *get_pgd_fast(void);
+static inline void free_pgd_fast(pgd_t *pgd)
+{
+	srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
+}
 
 #define pgd_free(mm, pgd)	free_pgd_fast(pgd)
 #define pgd_alloc(mm)	get_pgd_fast()
 
-BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
-#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
+static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+{
+	unsigned long pa = __nocache_pa((unsigned long)pmdp);
+
+	set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (pa >> 4)));
+}
+
 #define pgd_populate(MM, PGD, PMD)      pgd_set(PGD, PMD)
 
-BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
-#define pmd_alloc_one(mm, address)	BTFIXUP_CALL(pmd_alloc_one)(mm, address)
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
+				   unsigned long address)
+{
+	return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
+					  SRMMU_PMD_TABLE_SIZE);
+}
 
-BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
-#define free_pmd_fast(pmd)	BTFIXUP_CALL(free_pmd_fast)(pmd)
+static inline void free_pmd_fast(pmd_t * pmd)
+{
+	srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
+}
 
 #define pmd_free(mm, pmd)		free_pmd_fast(pmd)
 #define __pmd_free_tlb(tlb, pmd, addr)	pmd_free((tlb)->mm, pmd)
 
-BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
-#define pmd_populate(MM, PMD, PTE)        BTFIXUP_CALL(pmd_populate)(PMD, PTE)
+void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
 #define pmd_pgtable(pmd) pmd_page(pmd)
-BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
-#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
 
-BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
-#define pte_alloc_one(mm, address)	BTFIXUP_CALL(pte_alloc_one)(mm, address)
-BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
-#define pte_alloc_one_kernel(mm, addr)	BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
+void pmd_set(pmd_t *pmdp, pte_t *ptep);
+#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
 
-BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
-#define pte_free_kernel(mm, pte)	BTFIXUP_CALL(free_pte_fast)(pte)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
 
-BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
-#define pte_free(mm, pte)		BTFIXUP_CALL(pte_free)(pte)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+					  unsigned long address)
+{
+	return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+}
+
+
+static inline void free_pte_fast(pte_t *pte)
+{
+	srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
+}
+
+#define pte_free_kernel(mm, pte)	free_pte_fast(pte)
+
+void pte_free(struct mm_struct * mm, pgtable_t pte);
 #define __pte_free_tlb(tlb, pte, addr)	pte_free((tlb)->mm, pte)
 
 #endif /* _SPARC_PGALLOC_H */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 3d71018..cbbbed5 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -16,11 +16,9 @@
 #include <linux/spinlock.h>
 #include <linux/swap.h>
 #include <asm/types.h>
-#include <asm/pgtsun4c.h>
 #include <asm/pgtsrmmu.h>
-#include <asm/vac-ops.h>
+#include <asm/vaddrs.h>
 #include <asm/oplib.h>
-#include <asm/btfixup.h>
 #include <asm/cpu_type.h>
 
 
@@ -30,87 +28,55 @@
 extern void load_mmu(void);
 extern unsigned long calc_highpages(void);
 
-BTFIXUPDEF_SIMM13(pgdir_shift)
-BTFIXUPDEF_SETHI(pgdir_size)
-BTFIXUPDEF_SETHI(pgdir_mask)
-
-BTFIXUPDEF_SIMM13(ptrs_per_pmd)
-BTFIXUPDEF_SIMM13(ptrs_per_pgd)
-BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
-
 #define pte_ERROR(e)   __builtin_trap()
 #define pmd_ERROR(e)   __builtin_trap()
 #define pgd_ERROR(e)   __builtin_trap()
 
-BTFIXUPDEF_INT(page_none)
-BTFIXUPDEF_INT(page_copy)
-BTFIXUPDEF_INT(page_readonly)
-BTFIXUPDEF_INT(page_kernel)
-
-#define PMD_SHIFT		SUN4C_PMD_SHIFT
+#define PMD_SHIFT		22
 #define PMD_SIZE        	(1UL << PMD_SHIFT)
 #define PMD_MASK        	(~(PMD_SIZE-1))
 #define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
-#define PGDIR_SHIFT     	BTFIXUP_SIMM13(pgdir_shift)
-#define PGDIR_SIZE      	BTFIXUP_SETHI(pgdir_size)
-#define PGDIR_MASK      	BTFIXUP_SETHI(pgdir_mask)
+#define PGDIR_SHIFT     	SRMMU_PGDIR_SHIFT
+#define PGDIR_SIZE      	SRMMU_PGDIR_SIZE
+#define PGDIR_MASK      	SRMMU_PGDIR_MASK
 #define PTRS_PER_PTE    	1024
-#define PTRS_PER_PMD    	BTFIXUP_SIMM13(ptrs_per_pmd)
-#define PTRS_PER_PGD    	BTFIXUP_SIMM13(ptrs_per_pgd)
-#define USER_PTRS_PER_PGD	BTFIXUP_SIMM13(user_ptrs_per_pgd)
+#define PTRS_PER_PMD    	SRMMU_PTRS_PER_PMD
+#define PTRS_PER_PGD    	SRMMU_PTRS_PER_PGD
+#define USER_PTRS_PER_PGD	PAGE_OFFSET / SRMMU_PGDIR_SIZE
 #define FIRST_USER_ADDRESS	0
 #define PTE_SIZE		(PTRS_PER_PTE*4)
 
-#define PAGE_NONE      __pgprot(BTFIXUP_INT(page_none))
-extern pgprot_t PAGE_SHARED;
-#define PAGE_COPY      __pgprot(BTFIXUP_INT(page_copy))
-#define PAGE_READONLY  __pgprot(BTFIXUP_INT(page_readonly))
-
-extern unsigned long page_kernel;
-
-#ifdef MODULE
-#define PAGE_KERNEL	page_kernel
-#else
-#define PAGE_KERNEL    __pgprot(BTFIXUP_INT(page_kernel))
-#endif
+#define PAGE_NONE	SRMMU_PAGE_NONE
+#define PAGE_SHARED	SRMMU_PAGE_SHARED
+#define PAGE_COPY	SRMMU_PAGE_COPY
+#define PAGE_READONLY	SRMMU_PAGE_RDONLY
+#define PAGE_KERNEL	SRMMU_PAGE_KERNEL
 
 /* Top-level page directory */
 extern pgd_t swapper_pg_dir[1024];
 
 extern void paging_init(void);
 
-/* Page table for 0-4MB for everybody, on the Sparc this
- * holds the same as on the i386.
- */
-extern pte_t pg0[1024];
-extern pte_t pg1[1024];
-extern pte_t pg2[1024];
-extern pte_t pg3[1024];
-
 extern unsigned long ptr_in_current_pgd;
 
-/* Here is a trick, since mmap.c need the initializer elements for
- * protection_map[] to be constant at compile time, I set the following
- * to all zeros.  I set it to the real values after I link in the
- * appropriate MMU page table routines at boot time.
- */
-#define __P000  __pgprot(0)
-#define __P001  __pgprot(0)
-#define __P010  __pgprot(0)
-#define __P011  __pgprot(0)
-#define __P100  __pgprot(0)
-#define __P101  __pgprot(0)
-#define __P110  __pgprot(0)
-#define __P111  __pgprot(0)
+/*         xwr */
+#define __P000  PAGE_NONE
+#define __P001  PAGE_READONLY
+#define __P010  PAGE_COPY
+#define __P011  PAGE_COPY
+#define __P100  PAGE_READONLY
+#define __P101  PAGE_READONLY
+#define __P110  PAGE_COPY
+#define __P111  PAGE_COPY
 
-#define __S000	__pgprot(0)
-#define __S001	__pgprot(0)
-#define __S010	__pgprot(0)
-#define __S011	__pgprot(0)
-#define __S100	__pgprot(0)
-#define __S101	__pgprot(0)
-#define __S110	__pgprot(0)
-#define __S111	__pgprot(0)
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED
+#define __S100	PAGE_READONLY
+#define __S101	PAGE_READONLY
+#define __S110	PAGE_SHARED
+#define __S111	PAGE_SHARED
 
 extern int num_contexts;
 
@@ -137,82 +103,137 @@
 #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
 
 /*
+ * In general all page table modifications should use the V8 atomic
+ * swap instruction.  This insures the mmu and the cpu are in sync
+ * with respect to ref/mod bits in the page tables.
  */
-BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
-BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
+static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
+{
+	__asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
+	return value;
+}
 
-#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
-#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
 
-BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
-BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
+}
+
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+static inline int srmmu_device_memory(unsigned long x)
+{
+	return ((x & 0xF0000000) != 0);
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+	if (srmmu_device_memory(pmd_val(pmd)))
+		BUG();
+	return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
+}
+
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+	if (srmmu_device_memory(pgd_val(pgd))) {
+		return ~0;
+	} else {
+		unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
+		return (unsigned long)__nocache_va(v << 4);
+	}
+}
+
+static inline int pte_present(pte_t pte)
+{
+	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
+}
 
 static inline int pte_none(pte_t pte)
 {
 	return !pte_val(pte);
 }
 
-#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
-#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte)
+static inline void __pte_clear(pte_t *ptep)
+{
+	set_pte(ptep, __pte(0));
+}
 
-BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
-BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
-BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+	__pte_clear(ptep);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
+}
 
 static inline int pmd_none(pmd_t pmd)
 {
 	return !pmd_val(pmd);
 }
 
-#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
-#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
-#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+	int i;
+	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
+		set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
+}
 
-BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
-BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
-BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
-BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
+static inline int pgd_none(pgd_t pgd)          
+{
+	return !(pgd_val(pgd) & 0xFFFFFFF);
+}
 
-#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
-#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
-#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
-#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
+static inline int pgd_bad(pgd_t pgd)
+{
+	return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+	return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+	set_pte((pte_t *)pgdp, __pte(0));
+}
 
 /*
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
-BTFIXUPDEF_HALF(pte_writei)
-BTFIXUPDEF_HALF(pte_dirtyi)
-BTFIXUPDEF_HALF(pte_youngi)
-
-static int pte_write(pte_t pte) __attribute_const__;
 static inline int pte_write(pte_t pte)
 {
-	return pte_val(pte) & BTFIXUP_HALF(pte_writei);
+	return pte_val(pte) & SRMMU_WRITE;
 }
 
-static int pte_dirty(pte_t pte) __attribute_const__;
 static inline int pte_dirty(pte_t pte)
 {
-	return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
+	return pte_val(pte) & SRMMU_DIRTY;
 }
 
-static int pte_young(pte_t pte) __attribute_const__;
 static inline int pte_young(pte_t pte)
 {
-	return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
+	return pte_val(pte) & SRMMU_REF;
 }
 
 /*
  * The following only work if pte_present() is not true.
  */
-BTFIXUPDEF_HALF(pte_filei)
-
-static int pte_file(pte_t pte) __attribute_const__;
 static inline int pte_file(pte_t pte)
 {
-	return pte_val(pte) & BTFIXUP_HALF(pte_filei);
+	return pte_val(pte) & SRMMU_FILE;
 }
 
 static inline int pte_special(pte_t pte)
@@ -220,68 +241,85 @@
 	return 0;
 }
 
-/*
- */
-BTFIXUPDEF_HALF(pte_wrprotecti)
-BTFIXUPDEF_HALF(pte_mkcleani)
-BTFIXUPDEF_HALF(pte_mkoldi)
-
-static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
 static inline pte_t pte_wrprotect(pte_t pte)
 {
-	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
+	return __pte(pte_val(pte) & ~SRMMU_WRITE);
 }
 
-static pte_t pte_mkclean(pte_t pte) __attribute_const__;
 static inline pte_t pte_mkclean(pte_t pte)
 {
-	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
+	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
 }
 
-static pte_t pte_mkold(pte_t pte) __attribute_const__;
 static inline pte_t pte_mkold(pte_t pte)
 {
-	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
+	return __pte(pte_val(pte) & ~SRMMU_REF);
 }
 
-BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
-BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
-BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	return __pte(pte_val(pte) | SRMMU_WRITE);
+}
 
-#define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
-#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
-#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	return __pte(pte_val(pte) | SRMMU_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	return __pte(pte_val(pte) | SRMMU_REF);
+}
 
 #define pte_mkspecial(pte)    (pte)
 
 #define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)
 
-BTFIXUPDEF_CALL(unsigned long,	 pte_pfn, pte_t)
-#define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte)
+static inline unsigned long pte_pfn(pte_t pte)
+{
+	if (srmmu_device_memory(pte_val(pte))) {
+		/* Just return something that will cause
+		 * pfn_valid() to return false.  This makes
+		 * copy_one_pte() to just directly copy to
+		 * PTE over.
+		 */
+		return ~0UL;
+	}
+	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
+}
+
 #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
  */
-BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
+static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
+{
+	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
+}
 
-BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
-BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
-BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
+static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
+{
+	return __pte(((page) >> 4) | pgprot_val(pgprot));
+}
 
-#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
-#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
-#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
+static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
+{
+	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
+}
 
-#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
-
-BTFIXUPDEF_INT(pte_modify_mask)
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+	prot &= ~__pgprot(SRMMU_CACHE);
+	return prot;
+}
 
 static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-	return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
+	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
 		pgprot_val(newprot));
 }
 
@@ -294,74 +332,69 @@
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
 /* Find an entry in the second-level page table.. */
-BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
-#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
+static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
+{
+	return (pmd_t *) pgd_page_vaddr(*dir) +
+		((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
+}
 
 /* Find an entry in the third-level page table.. */
-BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
-#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
+pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
 
 /*
- * This shortcut works on sun4m (and sun4d) because the nocache area is static,
- * and sun4c is guaranteed to have no highmem anyway.
+ * This shortcut works on sun4m (and sun4d) because the nocache area is static.
  */
 #define pte_offset_map(d, a)		pte_offset_kernel(d,a)
 #define pte_unmap(pte)		do{}while(0)
 
-/* Certain architectures need to do special things when pte's
- * within a page table are directly modified.  Thus, the following
- * hook is made available.
- */
-
-BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
-
-#define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
 struct seq_file;
-BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
-
-#define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
+void mmu_info(struct seq_file *m);
 
 /* Fault handler stuff... */
 #define FAULT_CODE_PROT     0x1
 #define FAULT_CODE_WRITE    0x2
 #define FAULT_CODE_USER     0x4
 
-BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *)
+#define update_mmu_cache(vma, address, ptep) do { } while (0)
 
-#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep)
-
-BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
-    unsigned long, unsigned int)
-BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
-#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
-#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
-
-extern int invalid_segment;
+void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
+                      unsigned long xva, unsigned int len);
+void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
 
 /* Encode and de-code a swap entry */
-BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
-BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
-BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
+static inline unsigned long __swp_type(swp_entry_t entry)
+{
+	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
+}
 
-#define __swp_type(__x)			BTFIXUP_CALL(__swp_type)(__x)
-#define __swp_offset(__x)		BTFIXUP_CALL(__swp_offset)(__x)
-#define __swp_entry(__type,__off)	BTFIXUP_CALL(__swp_entry)(__type,__off)
+static inline unsigned long __swp_offset(swp_entry_t entry)
+{
+	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
+}
+
+static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
+{
+	return (swp_entry_t) {
+		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
+		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
+}
 
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
 
 /* file-offset-in-pte helpers */
-BTFIXUPDEF_CALL(unsigned long, pte_to_pgoff, pte_t pte);
-BTFIXUPDEF_CALL(pte_t, pgoff_to_pte, unsigned long pgoff);
+static inline unsigned long pte_to_pgoff(pte_t pte)
+{
+	return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
+}
 
-#define pte_to_pgoff(pte) BTFIXUP_CALL(pte_to_pgoff)(pte)
-#define pgoff_to_pte(off) BTFIXUP_CALL(pgoff_to_pte)(off)
+static inline pte_t pgoff_to_pte(unsigned long pgoff)
+{
+	return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
+}
 
 /*
  * This is made a constant because mm/fremap.c required a constant.
- * Note that layout of these bits is different between sun4c.c and srmmu.c.
  */
 #define PTE_FILE_MAX_BITS 24
 
@@ -399,9 +432,6 @@
 __get_phys (unsigned long addr)
 {
 	switch (sparc_cpu_model){
-	case sun4:
-	case sun4c:
-		return sun4c_get_pte (addr) << PAGE_SHIFT;
 	case sun4m:
 	case sun4d:
 		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
@@ -414,9 +444,6 @@
 __get_iospace (unsigned long addr)
 {
 	switch (sparc_cpu_model){
-	case sun4:
-	case sun4c:
-		return -1; /* Don't check iospace on sun4c */
 	case sun4m:
 	case sun4d:
 		return (srmmu_get_pte (addr) >> 28);
@@ -463,7 +490,7 @@
 		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
 		flush_tlb_page(__vma, __address);			  \
 	}								  \
-	(sparc_cpu_model == sun4c) || __changed;			  \
+	__changed;							  \
 })
 
 #include <asm-generic/pgtable.h>
@@ -471,10 +498,8 @@
 #endif /* !(__ASSEMBLY__) */
 
 #define VMALLOC_START           _AC(0xfe600000,UL)
-/* XXX Alter this when I get around to fixing sun4c - Anton */
 #define VMALLOC_END             _AC(0xffc00000,UL)
 
-
 /* We provide our own get_unmapped_area to cope with VA holes for userland */
 #define HAVE_ARCH_UNMAPPED_AREA
 
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 76e4a52..61210db 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -717,10 +717,6 @@
 struct seq_file;
 extern void mmu_info(struct seq_file *);
 
-/* These do nothing with the way I have things setup. */
-#define mmu_lockarea(vaddr, len)		(vaddr)
-#define mmu_unlockarea(vaddr, len)		do { } while(0)
-
 struct vm_area_struct;
 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
 
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index f6ae2b2..cb82870 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -173,17 +173,6 @@
 			     "memory");
 }
 
-static inline unsigned long srmmu_get_ctable_ptr(void)
-{
-	unsigned int retval;
-
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (retval) :
-			     "r" (SRMMU_CTXTBL_PTR),
-			     "i" (ASI_M_MMUREGS));
-	return (retval & SRMMU_CTX_PMASK) << 4;
-}
-
 static inline void srmmu_set_context(int context)
 {
 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
@@ -231,42 +220,6 @@
 }
 
 /* These flush types are not available on all chips... */
-static inline void srmmu_flush_tlb_ctx(void)
-{
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
-			     "r" (0x300),        /* Flush TLB ctx.. */
-			     "i" (ASI_M_FLUSH_PROBE) : "memory");
-
-}
-
-static inline void srmmu_flush_tlb_region(unsigned long addr)
-{
-	addr &= SRMMU_PGDIR_MASK;
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
-			     "r" (addr | 0x200), /* Flush TLB region.. */
-			     "i" (ASI_M_FLUSH_PROBE) : "memory");
-
-}
-
-
-static inline void srmmu_flush_tlb_segment(unsigned long addr)
-{
-	addr &= SRMMU_REAL_PMD_MASK;
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
-			     "r" (addr | 0x100), /* Flush TLB segment.. */
-			     "i" (ASI_M_FLUSH_PROBE) : "memory");
-
-}
-
-static inline void srmmu_flush_tlb_page(unsigned long page)
-{
-	page &= PAGE_MASK;
-	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
-			     "r" (page),        /* Flush TLB page.. */
-			     "i" (ASI_M_FLUSH_PROBE) : "memory");
-
-}
-
 #ifndef CONFIG_SPARC_LEON
 static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
 {
@@ -294,9 +247,6 @@
 	return entry;
 }
 
-extern unsigned long (*srmmu_read_physical)(unsigned long paddr);
-extern void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
-
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(_SPARC_PGTSRMMU_H) */
diff --git a/arch/sparc/include/asm/pgtsun4c.h b/arch/sparc/include/asm/pgtsun4c.h
deleted file mode 100644
index aeb25e9..0000000
--- a/arch/sparc/include/asm/pgtsun4c.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * pgtsun4c.h:  Sun4c specific pgtable.h defines and code.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-#ifndef _SPARC_PGTSUN4C_H
-#define _SPARC_PGTSUN4C_H
-
-#include <asm/contregs.h>
-
-/* PMD_SHIFT determines the size of the area a second-level page table can map */
-#define SUN4C_PMD_SHIFT       22
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define SUN4C_PGDIR_SHIFT       22
-#define SUN4C_PGDIR_SIZE        (1UL << SUN4C_PGDIR_SHIFT)
-#define SUN4C_PGDIR_MASK        (~(SUN4C_PGDIR_SIZE-1))
-#define SUN4C_PGDIR_ALIGN(addr) (((addr)+SUN4C_PGDIR_SIZE-1)&SUN4C_PGDIR_MASK)
-
-/* To represent how the sun4c mmu really lays things out. */
-#define SUN4C_REAL_PGDIR_SHIFT       18
-#define SUN4C_REAL_PGDIR_SIZE        (1UL << SUN4C_REAL_PGDIR_SHIFT)
-#define SUN4C_REAL_PGDIR_MASK        (~(SUN4C_REAL_PGDIR_SIZE-1))
-#define SUN4C_REAL_PGDIR_ALIGN(addr) (((addr)+SUN4C_REAL_PGDIR_SIZE-1)&SUN4C_REAL_PGDIR_MASK)
-
-/* 16 bit PFN on sun4c */
-#define SUN4C_PFN_MASK 0xffff
-
-/* Don't increase these unless the structures in sun4c.c are fixed */
-#define SUN4C_MAX_SEGMAPS 256
-#define SUN4C_MAX_CONTEXTS 16
-
-/*
- * To be efficient, and not have to worry about allocating such
- * a huge pgd, we make the kernel sun4c tables each hold 1024
- * entries and the pgd similarly just like the i386 tables.
- */
-#define SUN4C_PTRS_PER_PTE    1024
-#define SUN4C_PTRS_PER_PMD    1
-#define SUN4C_PTRS_PER_PGD    1024
-
-/*
- * Sparc SUN4C pte fields.
- */
-#define _SUN4C_PAGE_VALID        0x80000000
-#define _SUN4C_PAGE_SILENT_READ  0x80000000   /* synonym */
-#define _SUN4C_PAGE_DIRTY        0x40000000
-#define _SUN4C_PAGE_SILENT_WRITE 0x40000000   /* synonym */
-#define _SUN4C_PAGE_PRIV         0x20000000   /* privileged page */
-#define _SUN4C_PAGE_NOCACHE      0x10000000   /* non-cacheable page */
-#define _SUN4C_PAGE_PRESENT      0x08000000   /* implemented in software */
-#define _SUN4C_PAGE_IO           0x04000000   /* I/O page */
-#define _SUN4C_PAGE_FILE         0x02000000   /* implemented in software */
-#define _SUN4C_PAGE_READ         0x00800000   /* implemented in software */
-#define _SUN4C_PAGE_WRITE        0x00400000   /* implemented in software */
-#define _SUN4C_PAGE_ACCESSED     0x00200000   /* implemented in software */
-#define _SUN4C_PAGE_MODIFIED     0x00100000   /* implemented in software */
-
-#define _SUN4C_READABLE		(_SUN4C_PAGE_READ|_SUN4C_PAGE_SILENT_READ|\
-				 _SUN4C_PAGE_ACCESSED)
-#define _SUN4C_WRITEABLE	(_SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE|\
-				 _SUN4C_PAGE_MODIFIED)
-
-#define _SUN4C_PAGE_CHG_MASK	(0xffff|_SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_MODIFIED)
-
-#define SUN4C_PAGE_NONE		__pgprot(_SUN4C_PAGE_PRESENT)
-#define SUN4C_PAGE_SHARED	__pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE|\
-					 _SUN4C_PAGE_WRITE)
-#define SUN4C_PAGE_COPY		__pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE)
-#define SUN4C_PAGE_READONLY	__pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE)
-#define SUN4C_PAGE_KERNEL	__pgprot(_SUN4C_READABLE|_SUN4C_WRITEABLE|\
-					 _SUN4C_PAGE_DIRTY|_SUN4C_PAGE_PRIV)
-
-/* SUN4C swap entry encoding
- *
- * We use 5 bits for the type and 19 for the offset.  This gives us
- * 32 swapfiles of 4GB each.  Encoding looks like:
- *
- * RRRRRRRRooooooooooooooooooottttt
- * fedcba9876543210fedcba9876543210
- *
- * The top 8 bits are reserved for protection and status bits, especially
- * FILE and PRESENT.
- */
-#define SUN4C_SWP_TYPE_MASK	0x1f
-#define SUN4C_SWP_OFF_MASK	0x7ffff
-#define SUN4C_SWP_OFF_SHIFT	5
-
-#ifndef __ASSEMBLY__
-
-static inline unsigned long sun4c_get_synchronous_error(void)
-{
-	unsigned long sync_err;
-
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (sync_err) :
-			     "r" (AC_SYNC_ERR), "i" (ASI_CONTROL));
-	return sync_err;
-}
-
-static inline unsigned long sun4c_get_synchronous_address(void)
-{
-	unsigned long sync_addr;
-
-	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
-			     "=r" (sync_addr) :
-			     "r" (AC_SYNC_VA), "i" (ASI_CONTROL));
-	return sync_addr;
-}
-
-/* SUN4C pte, segmap, and context manipulation */
-static inline unsigned long sun4c_get_segmap(unsigned long addr)
-{
-  register unsigned long entry;
-
-  __asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" : 
-		       "=r" (entry) :
-		       "r" (addr), "i" (ASI_SEGMAP));
-
-  return entry;
-}
-
-static inline void sun4c_put_segmap(unsigned long addr, unsigned long entry)
-{
-
-  __asm__ __volatile__("\n\tstba %1, [%0] %2; nop; nop; nop;\n\t" : :
-		       "r" (addr), "r" (entry),
-		       "i" (ASI_SEGMAP)
-		       : "memory");
-}
-
-static inline unsigned long sun4c_get_pte(unsigned long addr)
-{
-  register unsigned long entry;
-
-  __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" : 
-		       "=r" (entry) :
-		       "r" (addr), "i" (ASI_PTE));
-  return entry;
-}
-
-static inline void sun4c_put_pte(unsigned long addr, unsigned long entry)
-{
-  __asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : :
-		       "r" (addr), 
-		       "r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE)
-		       : "memory");
-}
-
-static inline int sun4c_get_context(void)
-{
-  register int ctx;
-
-  __asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" :
-		       "=r" (ctx) :
-		       "r" (AC_CONTEXT), "i" (ASI_CONTROL));
-
-  return ctx;
-}
-
-static inline int sun4c_set_context(int ctx)
-{
-  __asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : :
-		       "r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL)
-		       : "memory");
-
-  return ctx;
-}
-
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* !(_SPARC_PGTSUN4C_H) */
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 09521c6..9cbd854 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -16,7 +16,6 @@
 #include <asm/ptrace.h>
 #include <asm/head.h>
 #include <asm/signal.h>
-#include <asm/btfixup.h>
 #include <asm/page.h>
 
 /*
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 00497ab..8a83699 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -20,10 +20,7 @@
  * Only sun4d + leon may have boot_cpu_id != 0
  */
 extern unsigned char boot_cpu_id;
-extern unsigned char boot_cpu_id4;
 
-extern unsigned long empty_bad_page;
-extern unsigned long empty_bad_page_table;
 extern unsigned long empty_zero_page;
 
 extern int serial_console;
diff --git a/arch/sparc/include/asm/shmparam_32.h b/arch/sparc/include/asm/shmparam_32.h
index 59a1243..142825c 100644
--- a/arch/sparc/include/asm/shmparam_32.h
+++ b/arch/sparc/include/asm/shmparam_32.h
@@ -4,8 +4,6 @@
 #define __ARCH_FORCE_SHMLBA 	1
 
 extern int vac_cache_size;
-#define SHMLBA (vac_cache_size ? vac_cache_size : \
-		(sparc_cpu_model == sun4c ? (64 * 1024) : \
-		 (sparc_cpu_model == sun4 ? (128 * 1024) : PAGE_SIZE)))
+#define SHMLBA (vac_cache_size ? vac_cache_size : PAGE_SIZE)
 
 #endif /* _ASMSPARC_SHMPARAM_H */
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 01c51c7..b73da3c 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -8,7 +8,6 @@
 
 #include <linux/threads.h>
 #include <asm/head.h>
-#include <asm/btfixup.h>
 
 #ifndef __ASSEMBLY__
 
@@ -58,27 +57,43 @@
 void smp_bogo(struct seq_file *);
 void smp_info(struct seq_file *);
 
-BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
-BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
-BTFIXUPDEF_CALL(void, smp_ipi_resched, int);
-BTFIXUPDEF_CALL(void, smp_ipi_single, int);
-BTFIXUPDEF_CALL(void, smp_ipi_mask_one, int);
-BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
-BTFIXUPDEF_BLACKBOX(load_current)
+struct sparc32_ipi_ops {
+	void (*cross_call)(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+			   unsigned long arg2, unsigned long arg3,
+			   unsigned long arg4);
+	void (*resched)(int cpu);
+	void (*single)(int cpu);
+	void (*mask_one)(int cpu);
+};
+extern const struct sparc32_ipi_ops *sparc32_ipi_ops;
 
-#define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
+static inline void xc0(smpfunc_t func)
+{
+	sparc32_ipi_ops->cross_call(func, *cpu_online_mask, 0, 0, 0, 0);
+}
 
-static inline void xc0(smpfunc_t func) { smp_cross_call(func, *cpu_online_mask, 0, 0, 0, 0); }
 static inline void xc1(smpfunc_t func, unsigned long arg1)
-{ smp_cross_call(func, *cpu_online_mask, arg1, 0, 0, 0); }
+{
+	sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, 0, 0, 0);
+}
 static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
-{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0); }
+{
+	sparc32_ipi_ops->cross_call(func, *cpu_online_mask, arg1, arg2, 0, 0);
+}
+
 static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
-			   unsigned long arg3)
-{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, 0); }
+		       unsigned long arg3)
+{
+	sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
+				    arg1, arg2, arg3, 0);
+}
+
 static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
-			   unsigned long arg3, unsigned long arg4)
-{ smp_cross_call(func, *cpu_online_mask, arg1, arg2, arg3, arg4); }
+		       unsigned long arg3, unsigned long arg4)
+{
+	sparc32_ipi_ops->cross_call(func, *cpu_online_mask,
+				    arg1, arg2, arg3, arg4);
+}
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -88,74 +103,7 @@
 	return cpu;
 }
 
-static inline int hard_smp4m_processor_id(void)
-{
-	int cpuid;
-
-	__asm__ __volatile__("rd %%tbr, %0\n\t"
-			     "srl %0, 12, %0\n\t"
-			     "and %0, 3, %0\n\t" :
-			     "=&r" (cpuid));
-	return cpuid;
-}
-
-static inline int hard_smp4d_processor_id(void)
-{
-	int cpuid;
-
-	__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
-			     "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
-	return cpuid;
-}
-
-extern inline int hard_smpleon_processor_id(void)
-{
-	int cpuid;
-	__asm__ __volatile__("rd     %%asr17,%0\n\t"
-			     "srl    %0,28,%0" :
-			     "=&r" (cpuid) : );
-	return cpuid;
-}
-
-#ifndef MODULE
-static inline int hard_smp_processor_id(void)
-{
-	int cpuid;
-
-	/* Black box - sun4m
-		__asm__ __volatile__("rd %%tbr, %0\n\t"
-				     "srl %0, 12, %0\n\t"
-				     "and %0, 3, %0\n\t" :
-				     "=&r" (cpuid));
-	             - sun4d
-		__asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
-				     "nop; nop" :
-				     "=&r" (cpuid));
-		     - leon
-		__asm__ __volatile__(	"rd %asr17, %0\n\t"
-					"srl %0, 0x1c, %0\n\t"
-					"nop\n\t" :
-					"=&r" (cpuid));
-	   See btfixup.h and btfixupprep.c to understand how a blackbox works.
-	 */
-	__asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
-			     "sethi %%hi(boot_cpu_id), %0\n\t"
-			     "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
-			     "=&r" (cpuid));
-	return cpuid;
-}
-#else
-static inline int hard_smp_processor_id(void)
-{
-	int cpuid;
-
-	__asm__ __volatile__("mov %%o7, %%g1\n\t"
-			     "call ___f___hard_smp_processor_id\n\t"
-			     " nop\n\t"
-			     "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
-	return cpuid;
-}
-#endif
+extern int hard_smp_processor_id(void);
 
 #define raw_smp_processor_id()		(current_thread_info()->cpu)
 
diff --git a/arch/sparc/include/asm/smpprim.h b/arch/sparc/include/asm/smpprim.h
deleted file mode 100644
index eb849d8..0000000
--- a/arch/sparc/include/asm/smpprim.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *  smpprim.h:  SMP locking primitives on the Sparc
- *
- *  God knows we won't be actually using this code for some time
- *  but I thought I'd write it since I knew how.
- *
- *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef __SPARC_SMPPRIM_H
-#define __SPARC_SMPPRIM_H
-
-/* Test and set the unsigned byte at ADDR to 1.  Returns the previous
- * value.  On the Sparc we use the ldstub instruction since it is
- * atomic.
- */
-
-static inline __volatile__ char test_and_set(void *addr)
-{
-	char state = 0;
-
-	__asm__ __volatile__("ldstub [%0], %1         ! test_and_set\n\t"
-			     "=r" (addr), "=r" (state) :
-			     "0" (addr), "1" (state) : "memory");
-
-	return state;
-}
-
-/* Initialize a spin-lock. */
-static inline __volatile__ smp_initlock(void *spinlock)
-{
-	/* Unset the lock. */
-	*((unsigned char *) spinlock) = 0;
-
-	return;
-}
-
-/* This routine spins until it acquires the lock at ADDR. */
-static inline __volatile__ smp_lock(void *addr)
-{
-	while(test_and_set(addr) == 0xff)
-		;
-
-	/* We now have the lock */
-	return;
-}
-
-/* This routine releases the lock at ADDR. */
-static inline __volatile__ smp_unlock(void *addr)
-{
-	*((unsigned char *) addr) = 0;
-}
-
-#endif /* !(__SPARC_SMPPRIM_H) */
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index edf196e..12f6785 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -61,68 +61,7 @@
 extern __kernel_size_t strlen(const char *);
 
 #define __HAVE_ARCH_STRNCMP
-
-extern int __strncmp(const char *, const char *, __kernel_size_t);
-
-static inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
-{
-	register int retval;
-	switch(count) {
-	case 0: return 0;
-	case 1: return (src[0] - dest[0]);
-	case 2: retval = (src[0] - dest[0]);
-		if(!retval && src[0])
-		  retval = (src[1] - dest[1]);
-		return retval;
-	case 3: retval = (src[0] - dest[0]);
-		if(!retval && src[0]) {
-		  retval = (src[1] - dest[1]);
-		  if(!retval && src[1])
-		    retval = (src[2] - dest[2]);
-		}
-		return retval;
-	case 4: retval = (src[0] - dest[0]);
-		if(!retval && src[0]) {
-		  retval = (src[1] - dest[1]);
-		  if(!retval && src[1]) {
-		    retval = (src[2] - dest[2]);
-		    if (!retval && src[2])
-		      retval = (src[3] - dest[3]);
-		  }
-		}
-		return retval;
-	case 5: retval = (src[0] - dest[0]);
-		if(!retval && src[0]) {
-		  retval = (src[1] - dest[1]);
-		  if(!retval && src[1]) {
-		    retval = (src[2] - dest[2]);
-		    if (!retval && src[2]) {
-		      retval = (src[3] - dest[3]);
-		      if (!retval && src[3])
-		        retval = (src[4] - dest[4]);
-		    }
-		  }
-		}
-		return retval;
-	default:
-		retval = (src[0] - dest[0]);
-		if(!retval && src[0]) {
-		  retval = (src[1] - dest[1]);
-		  if(!retval && src[1]) {
-		    retval = (src[2] - dest[2]);
-		    if(!retval && src[2])
-		      retval = __strncmp(src+3,dest+3,count-3);
-		  }
-		}
-		return retval;
-	}
-}
-
-#undef strncmp
-#define strncmp(__arg0, __arg1, __arg2)	\
-(__builtin_constant_p(__arg2) ?	\
- __constant_strncmp(__arg0, __arg1, __arg2) : \
- __strncmp(__arg0, __arg1, __arg2))
+extern int strncmp(const char *, const char *, __kernel_size_t);
 
 #endif /* !EXPORT_SYMTAB_STROPS */
 
diff --git a/arch/sparc/include/asm/sysen.h b/arch/sparc/include/asm/sysen.h
deleted file mode 100644
index 6af34ab..0000000
--- a/arch/sparc/include/asm/sysen.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * sysen.h:  Bit fields within the "System Enable" register accessed via
- *           the ASI_CONTROL address space at address AC_SYSENABLE.
- *
- * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SPARC_SYSEN_H
-#define _SPARC_SYSEN_H
-
-#define SENABLE_DVMA   0x20   /* enable dvma transfers */
-#define SENABLE_CACHE  0x10   /* enable VAC cache */
-#define SENABLE_RESET  0x04   /* reset whole machine, danger Will Robinson */
-
-#endif /* _SPARC_SYSEN_H */
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index c2a1080c..cd0b2dc 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -15,7 +15,6 @@
 
 #ifndef __ASSEMBLY__
 
-#include <asm/btfixup.h>
 #include <asm/ptrace.h>
 #include <asm/page.h>
 
@@ -82,11 +81,8 @@
 
 #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
 
-BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info_node, int)
-#define alloc_thread_info_node(tsk, node) BTFIXUP_CALL(alloc_thread_info_node)(node)
-
-BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
-#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
+struct thread_info * alloc_thread_info_node(struct task_struct *tsk, int node);
+void free_thread_info(struct thread_info *);
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/sparc/include/asm/timer_32.h b/arch/sparc/include/asm/timer_32.h
index 1a91e11..72f40a5 100644
--- a/arch/sparc/include/asm/timer_32.h
+++ b/arch/sparc/include/asm/timer_32.h
@@ -8,14 +8,37 @@
 #ifndef _SPARC_TIMER_H
 #define _SPARC_TIMER_H
 
+#include <linux/clocksource.h>
+#include <linux/irqreturn.h>
+
+#include <asm-generic/percpu.h>
+
 #include <asm/cpu_type.h>  /* For SUN4M_NCPUS */
-#include <asm/btfixup.h>
+
+#define SBUS_CLOCK_RATE   2000000 /* 2MHz */
+#define TIMER_VALUE_SHIFT 9
+#define TIMER_VALUE_MASK  0x3fffff
+#define TIMER_LIMIT_BIT   (1 << 31)  /* Bit 31 in Counter-Timer register */
+
+/* The counter timer register has the value offset by 9 bits.
+ * From sun4m manual:
+ * When a counter reaches the value in the corresponding limit register,
+ * the Limit bit is set and the counter is set to 500 nS (i.e. 0x00000200).
+ *
+ * To compensate for this add one to the value.
+ */
+static inline unsigned int timer_value(unsigned int value)
+{
+	return (value + 1) << TIMER_VALUE_SHIFT;
+}
 
 extern __volatile__ unsigned int *master_l10_counter;
 
-/* FIXME: Make do_[gs]ettimeofday btfixup calls */
-struct timespec;
-BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv)
-#define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv)
+extern irqreturn_t notrace timer_interrupt(int dummy, void *dev_id);
+
+#ifdef CONFIG_SMP
+DECLARE_PER_CPU(struct clock_event_device, sparc32_clockevent);
+extern void register_percpu_ce(int cpu);
+#endif
 
 #endif /* !(_SPARC_TIMER_H) */
diff --git a/arch/sparc/include/asm/timex_32.h b/arch/sparc/include/asm/timex_32.h
index a254750..b6ccdb0 100644
--- a/arch/sparc/include/asm/timex_32.h
+++ b/arch/sparc/include/asm/timex_32.h
@@ -12,5 +12,4 @@
 typedef unsigned long cycles_t;
 #define get_cycles()	(0)
 
-extern u32 (*do_arch_gettimeoffset)(void);
 #endif
diff --git a/arch/sparc/include/asm/tlbflush_32.h b/arch/sparc/include/asm/tlbflush_32.h
index fe0a71a..a5c4142 100644
--- a/arch/sparc/include/asm/tlbflush_32.h
+++ b/arch/sparc/include/asm/tlbflush_32.h
@@ -1,52 +1,16 @@
 #ifndef _SPARC_TLBFLUSH_H
 #define _SPARC_TLBFLUSH_H
 
-#include <linux/mm.h>
-// #include <asm/processor.h>
+#include <asm/cachetlb_32.h>
 
-/*
- * TLB flushing:
- *
- *  - flush_tlb() flushes the current mm struct TLBs	XXX Exists?
- *  - flush_tlb_all() flushes all processes TLBs
- *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
- *  - flush_tlb_page(vma, vmaddr) flushes one page
- *  - flush_tlb_range(vma, start, end) flushes a range of pages
- *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- */
-
-#ifdef CONFIG_SMP
-
-BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
-BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
-BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
-BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
-
-#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
-#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
-#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
-#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
-
-extern void smp_flush_tlb_all(void);
-extern void smp_flush_tlb_mm(struct mm_struct *mm);
-extern void smp_flush_tlb_range(struct vm_area_struct *vma,
-				  unsigned long start,
-				  unsigned long end);
-extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
-
-#endif /* CONFIG_SMP */
-
-BTFIXUPDEF_CALL(void, flush_tlb_all, void)
-BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
-BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
-BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
-
-#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
-#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
-#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
-#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
-
-// #define flush_tlb() flush_tlb_mm(current->active_mm)	/* XXX Sure? */
+#define flush_tlb_all() \
+	sparc32_cachetlb_ops->tlb_all()
+#define flush_tlb_mm(mm) \
+	sparc32_cachetlb_ops->tlb_mm(mm)
+#define flush_tlb_range(vma, start, end) \
+	sparc32_cachetlb_ops->tlb_range(vma, start, end)
+#define flush_tlb_page(vma, addr) \
+	sparc32_cachetlb_ops->tlb_page(vma, addr)
 
 /*
  * This is a kludge, until I know better. --zaitcev XXX
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 8303ac4..d50c310 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -12,7 +12,6 @@
 #include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/errno.h>
-#include <asm/vac-ops.h>
 #endif
 
 #ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/vac-ops.h b/arch/sparc/include/asm/vac-ops.h
deleted file mode 100644
index a63e88e..0000000
--- a/arch/sparc/include/asm/vac-ops.h
+++ /dev/null
@@ -1,127 +0,0 @@
-#ifndef _SPARC_VAC_OPS_H
-#define _SPARC_VAC_OPS_H
-
-/* vac-ops.h: Inline assembly routines to do operations on the Sparc
- *            VAC (virtual address cache) for the sun4c.
- *
- * Copyright (C) 1994, David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <asm/sysen.h>
-#include <asm/contregs.h>
-#include <asm/asi.h>
-
-/* The SUN4C models have a virtually addressed write-through
- * cache.
- *
- * The cache tags are directly accessible through an ASI and
- * each have the form:
- *
- * ------------------------------------------------------------
- * | MBZ | CONTEXT | WRITE | PRIV | VALID | MBZ | TagID | MBZ |
- * ------------------------------------------------------------
- *  31 25  24   22     21     20     19    18 16  15   2  1  0
- *
- * MBZ: These bits are either unused and/or reserved and should
- *      be written as zeroes.
- *
- * CONTEXT: Records the context to which this cache line belongs.
- *
- * WRITE: A copy of the writable bit from the mmu pte access bits.
- *
- * PRIV: A copy of the privileged bit from the pte access bits.
- *
- * VALID: If set, this line is valid, else invalid.
- *
- * TagID: Fourteen bits of tag ID.
- *
- * Every virtual address is seen by the cache like this:
- *
- * ----------------------------------------
- * |  RESV  | TagID | LINE | BYTE-in-LINE |
- * ----------------------------------------
- *  31    30 29   16 15   4 3            0
- *
- * RESV: Unused/reserved.
- *
- * TagID: Used to match the Tag-ID in that vac tags.
- *
- * LINE: Which line within the cache
- *
- * BYTE-in-LINE: Which byte within the cache line.
- */
-
-/* Sun4c VAC Tags */
-#define S4CVACTAG_CID      0x01c00000
-#define S4CVACTAG_W        0x00200000
-#define S4CVACTAG_P        0x00100000
-#define S4CVACTAG_V        0x00080000
-#define S4CVACTAG_TID      0x0000fffc
-
-/* Sun4c VAC Virtual Address */
-/* These aren't used, why bother? (Anton) */
-#if 0
-#define S4CVACVA_TID       0x3fff0000
-#define S4CVACVA_LINE      0x0000fff0
-#define S4CVACVA_BIL       0x0000000f
-#endif
-
-/* The indexing of cache lines creates a problem.  Because the line
- * field of a virtual address extends past the page offset within
- * the virtual address it is possible to have what are called
- * 'bad aliases' which will create inconsistencies.  So we must make
- * sure that within a context that if a physical page is mapped
- * more than once, that 'extra' line bits are the same.  If this is
- * not the case, and thus is a 'bad alias' we must turn off the
- * cacheable bit in the pte's of all such pages.
- */
-
-#define S4CVAC_BADBITS    0x0000f000
-
-/* The following is true if vaddr1 and vaddr2 would cause
- * a 'bad alias'.
- */
-#define S4CVAC_BADALIAS(vaddr1, vaddr2) \
-        ((((unsigned long) (vaddr1)) ^ ((unsigned long) (vaddr2))) & \
-	 (S4CVAC_BADBITS))
-
-/* The following structure describes the characteristics of a sun4c
- * VAC as probed from the prom during boot time.
- */
-struct sun4c_vac_props {
-	unsigned int num_bytes;     /* Size of the cache */
-	unsigned int do_hwflushes;  /* Hardware flushing available? */
-	unsigned int linesize;      /* Size of each line in bytes */
-	unsigned int log2lsize;     /* log2(linesize) */
-	unsigned int on;            /* VAC is enabled */
-};
-
-extern struct sun4c_vac_props sun4c_vacinfo;
-
-/* sun4c_enable_vac() enables the sun4c virtual address cache. */
-static inline void sun4c_enable_vac(void)
-{
-	__asm__ __volatile__("lduba [%0] %1, %%g1\n\t"
-			     "or    %%g1, %2, %%g1\n\t"
-			     "stba  %%g1, [%0] %1\n\t"
-			     : /* no outputs */
-			     : "r" ((unsigned int) AC_SENABLE),
-			     "i" (ASI_CONTROL), "i" (SENABLE_CACHE)
-			     : "g1", "memory");
-	sun4c_vacinfo.on = 1;
-}
-
-/* sun4c_disable_vac() disables the virtual address cache. */
-static inline void sun4c_disable_vac(void)
-{
-	__asm__ __volatile__("lduba [%0] %1, %%g1\n\t"
-			     "andn  %%g1, %2, %%g1\n\t"
-			     "stba  %%g1, [%0] %1\n\t"
-			     : /* no outputs */
-			     : "r" ((unsigned int) AC_SENABLE),
-			     "i" (ASI_CONTROL), "i" (SENABLE_CACHE)
-			     : "g1", "memory");
-	sun4c_vacinfo.on = 0;
-}
-
-#endif /* !(_SPARC_VAC_OPS_H) */
diff --git a/arch/sparc/include/asm/vaddrs.h b/arch/sparc/include/asm/vaddrs.h
index 541e137..da6535d 100644
--- a/arch/sparc/include/asm/vaddrs.h
+++ b/arch/sparc/include/asm/vaddrs.h
@@ -34,22 +34,6 @@
 #define IOBASE_VADDR		0xfe000000
 #define IOBASE_END		0xfe600000
 
-/*
- * On the sun4/4c we need a place
- * to reliably map locked down kernel data.  This includes the
- * task_struct and kernel stack pages of each process plus the
- * scsi buffers during dvma IO transfers, also the floppy buffers
- * during pseudo dma which runs with traps off (no faults allowed).
- * Some quick calculations yield:
- *       NR_TASKS <512> * (3 * PAGE_SIZE) == 0x600000
- * Subtract this from 0xc00000 and you get 0x927C0 of vm left
- * over to map SCSI dvma + floppy pseudo-dma buffers.  So be
- * careful if you change NR_TASKS or else there won't be enough
- * room for it all.
- */
-#define SUN4C_LOCK_VADDR	0xff000000
-#define SUN4C_LOCK_END		0xffc00000
-
 #define KADB_DEBUGGER_BEGVM	0xffc00000 /* Where kern debugger is in virt-mem */
 #define KADB_DEBUGGER_ENDVM	0xffd00000
 #define DEBUG_FIRSTVADDR	KADB_DEBUGGER_BEGVM
diff --git a/arch/sparc/include/asm/winmacro.h b/arch/sparc/include/asm/winmacro.h
index a9be04b..9b7b217 100644
--- a/arch/sparc/include/asm/winmacro.h
+++ b/arch/sparc/include/asm/winmacro.h
@@ -103,37 +103,24 @@
         st       %scratch, [%cur_reg + TI_W_SAVED];
 
 #ifdef CONFIG_SMP
-/* Results of LOAD_CURRENT() after BTFIXUP for SUN4M, SUN4D & LEON (comments) */
-#define LOAD_CURRENT4M(dest_reg, idreg) \
-        rd       %tbr, %idreg; \
-	sethi    %hi(current_set), %dest_reg; \
-        srl      %idreg, 10, %idreg; \
-	or       %dest_reg, %lo(current_set), %dest_reg; \
-	and      %idreg, 0xc, %idreg; \
-	ld       [%idreg + %dest_reg], %dest_reg;
-
-#define LOAD_CURRENT4D(dest_reg, idreg) \
-	lda	 [%g0] ASI_M_VIKING_TMP1, %idreg; \
-	sethi	%hi(C_LABEL(current_set)), %dest_reg; \
-	sll	%idreg, 2, %idreg; \
-	or	%dest_reg, %lo(C_LABEL(current_set)), %dest_reg; \
-	ld	[%idreg + %dest_reg], %dest_reg;
-
-#define LOAD_CURRENT_LEON(dest_reg, idreg)			\
-	rd	%asr17, %idreg;					\
-	sethi	%hi(current_set), %dest_reg;			\
-	srl	%idreg, 0x1c, %idreg;				\
-	or	%dest_reg, %lo(current_set), %dest_reg;		\
-	sll	%idreg, 0x2, %idreg;				\
-	ld	[%idreg + %dest_reg], %dest_reg;
-
-/* Blackbox - take care with this... - check smp4m and smp4d before changing this. */
-#define LOAD_CURRENT(dest_reg, idreg) 					\
-	sethi	 %hi(___b_load_current), %idreg;			\
-	sethi    %hi(current_set), %dest_reg; 			\
-	sethi    %hi(boot_cpu_id4), %idreg; 			\
-	or       %dest_reg, %lo(current_set), %dest_reg; 	\
-	ldub	 [%idreg + %lo(boot_cpu_id4)], %idreg;		\
+#define LOAD_CURRENT(dest_reg, idreg) 			\
+661:	rd	%tbr, %idreg;				\
+	srl	%idreg, 10, %idreg;			\
+	and	%idreg, 0xc, %idreg;			\
+	.section	.cpuid_patch, "ax";		\
+	/* Instruction location. */			\
+	.word		661b;				\
+	/* SUN4D implementation. */			\
+	lda	 [%g0] ASI_M_VIKING_TMP1, %idreg;	\
+	sll	 %idreg, 2, %idreg;			\
+	nop;						\
+	/* LEON implementation. */			\
+	rd 	%asr17, %idreg;				\
+	srl	%idreg, 0x1c, %idreg;			\
+	sll	%idreg, 0x02, %idreg;			\
+	.previous;					\
+	sethi    %hi(current_set), %dest_reg; 		\
+	or       %dest_reg, %lo(current_set), %dest_reg;\
 	ld       [%idreg + %dest_reg], %dest_reg;
 #else
 #define LOAD_CURRENT(dest_reg, idreg) \
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index cb85458..c19dd02 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -28,7 +28,7 @@
 
 # IRQ
 obj-y                   += irq_$(BITS).o
-obj-$(CONFIG_SPARC32)   += sun4m_irq.o sun4c_irq.o sun4d_irq.o
+obj-$(CONFIG_SPARC32)   += sun4m_irq.o sun4d_irq.o
 
 obj-y                   += process_$(BITS).o
 obj-y                   += signal_$(BITS).o
@@ -46,7 +46,6 @@
 obj-y                   += ptrace_$(BITS).o
 obj-y                   += unaligned_$(BITS).o
 obj-y                   += una_asm_$(BITS).o
-obj-$(CONFIG_SPARC32)   += muldiv.o
 obj-y                   += prom_common.o
 obj-y                   += prom_$(BITS).o
 obj-y                   += of_device_common.o
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index 56d0f52..e20cc55 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -32,7 +32,6 @@
 	switch (sparc_cpu_model) {
 	case sparc_leon:
 	case sun4d:
-	case sun4:
 		return;
 	default:
 		break;
@@ -65,9 +64,8 @@
 	r.start = auxregs[0].phys_addr;
 	r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1;
 	auxio_register = of_ioremap(&r, 0, auxregs[0].reg_size, "auxio");
-	/* Fix the address on sun4m and sun4c. */
-	if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 ||
-	   sparc_cpu_model == sun4c)
+	/* Fix the address on sun4m. */
+	if ((((unsigned long) auxregs[0].phys_addr) & 3) == 3)
 		auxio_register += (3 - ((unsigned long)auxio_register & 3));
 
 	set_auxio(AUXIO_LED, 0);
@@ -86,12 +84,7 @@
 	unsigned char regval;
 	unsigned long flags;
 	spin_lock_irqsave(&auxio_lock, flags);
-	switch(sparc_cpu_model) {
-	case sun4c:
-		regval = sbus_readb(auxio_register);
-		sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN,
-			auxio_register);
-		break;
+	switch (sparc_cpu_model) {
 	case sun4m:
 		if(!auxio_register)
 			break;     /* VME chassis sun4m, no auxio. */
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index 38d48a5..9708851 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -269,4 +269,4 @@
 	return 0;
 }
 
-subsys_initcall(sunfire_init);
+fs_initcall(sunfire_init);
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
index 6b2f56a..3d465e8 100644
--- a/arch/sparc/kernel/devices.c
+++ b/arch/sparc/kernel/devices.c
@@ -21,7 +21,6 @@
 #include <asm/cpu_type.h>
 
 extern void clock_stop_probe(void); /* tadpole.c */
-extern void sun4c_probe_memerr_reg(void);
 
 static char *cpu_mid_prop(void)
 {
@@ -139,7 +138,4 @@
 		auxio_power_probe();
 	}
 	clock_stop_probe();
-
-	if (ARCH_SUN4C)
-		sun4c_probe_memerr_reg();
 }
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index b93c2c9..f09257c 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -868,7 +868,7 @@
 
 static void ds_conn_reset(struct ds_info *dp)
 {
-	printk(KERN_ERR "ds-%llu: ds_conn_reset() from %p\n",
+	printk(KERN_ERR "ds-%llu: ds_conn_reset() from %pf\n",
 	       dp->id, __builtin_return_address(0));
 }
 
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index f445e98..2dbe180 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -7,6 +7,7 @@
  * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
  */
 
+#include <linux/linkage.h>
 #include <linux/errno.h>
 
 #include <asm/head.h>
@@ -17,10 +18,8 @@
 #include <asm/asm-offsets.h>
 #include <asm/psr.h>
 #include <asm/vaddrs.h>
-#include <asm/memreg.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/pgtsun4c.h>
 #include <asm/winmacro.h>
 #include <asm/signal.h>
 #include <asm/obio.h>
@@ -125,22 +124,11 @@
 	set	auxio_register, %l7
 	ld	[%l7], %l7
 
-	set	sparc_cpu_model, %l5
-	ld	[%l5], %l5
-	subcc   %l5, 1, %g0		/* enum { sun4c = 1 }; */
-	be	1f
-	 ldub	[%l7], %l5
+	ldub	[%l7], %l5
 
 	or	%l5, 0xc2, %l5
 	stb	%l5, [%l7]
 	andn    %l5, 0x02, %l5
-	b	2f
-	 nop
-
-1:
-	or      %l5, 0xf4, %l5
-	stb     %l5, [%l7]
-	andn    %l5, 0x04, %l5
 
 2:
 	/* Kill some time so the bits set */
@@ -266,6 +254,11 @@
 	WRITE_PAUSE
 	RESTORE_ALL
 
+#define GET_PROCESSOR4M_ID(reg)	\
+	rd	%tbr, %reg;	\
+	srl	%reg, 12, %reg;	\
+	and	%reg, 3, %reg;
+
 	/* Here is where we check for possible SMP IPI passed to us
 	 * on some level other than 15 which is the NMI and only used
 	 * for cross calls.  That has a separate entry point below.
@@ -328,7 +321,7 @@
 	ld	[%o5 + %o0], %o5
 	ld	[%o5 + 0x00], %o3	! sun4m_irq_percpu[cpu]->pending
 	andcc	%o3, %o2, %g0
-	be	1f			! Must be an NMI async memory error
+	be	sun4m_nmi_error		! Must be an NMI async memory error
 	 st	%o2, [%o5 + 0x04]	! sun4m_irq_percpu[cpu]->clear=0x80000000
 	WRITE_PAUSE
 	ld	[%o5 + 0x00], %g0	! sun4m_irq_percpu[cpu]->pending
@@ -342,27 +335,6 @@
 	 nop
 	b	ret_trap_lockless_ipi
 	 clr	%l6
-1:
-	/* NMI async memory error handling. */
-	sethi	%hi(0x80000000), %l4
-	sethi	%hi(sun4m_irq_global), %o5
-	ld	[%o5 + %lo(sun4m_irq_global)], %l5
-	st	%l4, [%l5 + 0x0c]	! sun4m_irq_global->mask_set=0x80000000
-	WRITE_PAUSE
-	ld	[%l5 + 0x00], %g0	! sun4m_irq_global->pending
-	WRITE_PAUSE
-	or	%l0, PSR_PIL, %l4
-	wr	%l4, 0x0, %psr
-	WRITE_PAUSE
-	wr	%l4, PSR_ET, %psr
-	WRITE_PAUSE
-	call	sun4m_nmi
-	 nop
-	st	%l4, [%l5 + 0x08]	! sun4m_irq_global->mask_clear=0x80000000
-	WRITE_PAUSE
-	ld	[%l5 + 0x00], %g0	! sun4m_irq_global->pending
-	WRITE_PAUSE
-	RESTORE_ALL
 
 	.globl	smp4d_ticker
 	/* SMP per-cpu ticker interrupts are handled specially. */
@@ -760,327 +732,38 @@
 	jmp	%l2		! advance over trap instruction
 	rett	%l2 + 0x4	! like this...
 
-	.align	4
-	.globl	linux_trap_nmi_sun4c
-linux_trap_nmi_sun4c:
-	SAVE_ALL
-
-	/* Ugh, we need to clear the IRQ line.  This is now
-	 * a very sun4c specific trap handler...
-	 */
-	sethi	%hi(interrupt_enable), %l5
-	ld	[%l5 + %lo(interrupt_enable)], %l5
-	ldub	[%l5], %l6
-	andn	%l6, INTS_ENAB, %l6
-	stb	%l6, [%l5]
-
-	/* Now it is safe to re-enable traps without recursion. */
-	or	%l0, PSR_PIL, %l0
-	wr	%l0, PSR_ET, %psr
+sun4m_nmi_error:
+	/* NMI async memory error handling. */
+	sethi	%hi(0x80000000), %l4
+	sethi	%hi(sun4m_irq_global), %o5
+	ld	[%o5 + %lo(sun4m_irq_global)], %l5
+	st	%l4, [%l5 + 0x0c]	! sun4m_irq_global->mask_set=0x80000000
 	WRITE_PAUSE
-
-	/* Now call the c-code with the pt_regs frame ptr and the
-	 * memory error registers as arguments.  The ordering chosen
-	 * here is due to unlatching semantics.
-	 */
-	sethi	%hi(AC_SYNC_ERR), %o0
-	add	%o0, 0x4, %o0
-	lda	[%o0] ASI_CONTROL, %o2	! sync vaddr
-	sub	%o0, 0x4, %o0
-	lda	[%o0] ASI_CONTROL, %o1	! sync error
-	add	%o0, 0xc, %o0
-	lda	[%o0] ASI_CONTROL, %o4	! async vaddr
-	sub	%o0, 0x4, %o0
-	lda	[%o0] ASI_CONTROL, %o3	! async error
-	call	sparc_lvl15_nmi
-	 add	%sp, STACKFRAME_SZ, %o0
-
-	RESTORE_ALL
-
-	.align	4
-	.globl	invalid_segment_patch1_ff
-	.globl	invalid_segment_patch2_ff
-invalid_segment_patch1_ff:	cmp	%l4, 0xff
-invalid_segment_patch2_ff:	mov	0xff, %l3
-
-	.align	4
-	.globl	invalid_segment_patch1_1ff
-	.globl	invalid_segment_patch2_1ff
-invalid_segment_patch1_1ff:	cmp	%l4, 0x1ff
-invalid_segment_patch2_1ff:	mov	0x1ff, %l3
-
-	.align	4
-	.globl	num_context_patch1_16, num_context_patch2_16
-num_context_patch1_16:		mov	0x10, %l7
-num_context_patch2_16:		mov	0x10, %l7
-
-	.align	4
-	.globl	vac_linesize_patch_32
-vac_linesize_patch_32:		subcc	%l7, 32, %l7
-
-	.align	4
-	.globl	vac_hwflush_patch1_on, vac_hwflush_patch2_on
-
-/*
- * Ugly, but we can't use hardware flushing on the sun4 and we'd require
- * two instructions (Anton)
- */
-vac_hwflush_patch1_on:		addcc	%l7, -PAGE_SIZE, %l7
-
-vac_hwflush_patch2_on:		sta	%g0, [%l3 + %l7] ASI_HWFLUSHSEG
-
-	.globl	invalid_segment_patch1, invalid_segment_patch2
-	.globl	num_context_patch1
-	.globl	vac_linesize_patch, vac_hwflush_patch1
-	.globl	vac_hwflush_patch2
-
-	.align	4
-	.globl	sun4c_fault
-
-! %l0 = %psr
-! %l1 = %pc
-! %l2 = %npc
-! %l3 = %wim
-! %l7 = 1 for textfault
-! We want error in %l5, vaddr in %l6
-sun4c_fault:
-	sethi	%hi(AC_SYNC_ERR), %l4
-	add	%l4, 0x4, %l6			! AC_SYNC_VA in %l6
-	lda	[%l6] ASI_CONTROL, %l5		! Address
-	lda	[%l4] ASI_CONTROL, %l6		! Error, retained for a bit
-
-	andn	%l5, 0xfff, %l5			! Encode all info into l7
-	srl	%l6, 14, %l4
-
-	and	%l4, 2, %l4
-	or	%l5, %l4, %l4
-
-	or	%l4, %l7, %l7			! l7 = [addr,write,txtfault]
-
-	andcc	%l0, PSR_PS, %g0
-	be	sun4c_fault_fromuser
-	 andcc	%l7, 1, %g0			! Text fault?
-
-	be	1f
-	 sethi	%hi(KERNBASE), %l4
-
-	mov	%l1, %l5			! PC
-
-1:
-	cmp	%l5, %l4
-	blu	sun4c_fault_fromuser
-	 sethi	%hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
-
-	/* If the kernel references a bum kernel pointer, or a pte which
-	 * points to a non existent page in ram, we will run this code
-	 * _forever_ and lock up the machine!!!!! So we must check for
-	 * this condition, the AC_SYNC_ERR bits are what we must examine.
-	 * Also a parity error would make this happen as well.  So we just
-	 * check that we are in fact servicing a tlb miss and not some
-	 * other type of fault for the kernel.
-	 */
-	andcc	%l6, 0x80, %g0
-	be	sun4c_fault_fromuser
-	 and	%l5, %l4, %l5
-
-	/* Test for NULL pte_t * in vmalloc area. */
-	sethi   %hi(VMALLOC_START), %l4
-	cmp     %l5, %l4
-	blu,a   invalid_segment_patch1
-	 lduXa	[%l5] ASI_SEGMAP, %l4
-
-	sethi   %hi(swapper_pg_dir), %l4
-	srl     %l5, SUN4C_PGDIR_SHIFT, %l6
-	or      %l4, %lo(swapper_pg_dir), %l4
-	sll     %l6, 2, %l6
-	ld      [%l4 + %l6], %l4
-	andcc   %l4, PAGE_MASK, %g0
-	be      sun4c_fault_fromuser
-	 lduXa  [%l5] ASI_SEGMAP, %l4
-
-invalid_segment_patch1:
-	cmp	%l4, 0x7f
-	bne	1f
-	 sethi	%hi(sun4c_kfree_ring), %l4
-	or	%l4, %lo(sun4c_kfree_ring), %l4
-	ld	[%l4 + 0x18], %l3
-	deccc	%l3			! do we have a free entry?
-	bcs,a	2f			! no, unmap one.
-	 sethi	%hi(sun4c_kernel_ring), %l4
-
-	st	%l3, [%l4 + 0x18]	! sun4c_kfree_ring.num_entries--
-
-	ld	[%l4 + 0x00], %l6	! entry = sun4c_kfree_ring.ringhd.next
-	st	%l5, [%l6 + 0x08]	! entry->vaddr = address
-
-	ld	[%l6 + 0x00], %l3	! next = entry->next
-	ld	[%l6 + 0x04], %l7	! entry->prev
-
-	st	%l7, [%l3 + 0x04]	! next->prev = entry->prev
-	st	%l3, [%l7 + 0x00]	! entry->prev->next = next
-
-	sethi	%hi(sun4c_kernel_ring), %l4
-	or	%l4, %lo(sun4c_kernel_ring), %l4
-					! head = &sun4c_kernel_ring.ringhd
-
-	ld	[%l4 + 0x00], %l7	! head->next
-
-	st	%l4, [%l6 + 0x04]	! entry->prev = head
-	st	%l7, [%l6 + 0x00]	! entry->next = head->next
-	st	%l6, [%l7 + 0x04]	! head->next->prev = entry
-
-	st	%l6, [%l4 + 0x00]	! head->next = entry
-
-	ld	[%l4 + 0x18], %l3
-	inc	%l3			! sun4c_kernel_ring.num_entries++
-	st	%l3, [%l4 + 0x18]
-	b	4f
-	 ld	[%l6 + 0x08], %l5
-
-2:
-	or	%l4, %lo(sun4c_kernel_ring), %l4
-					! head = &sun4c_kernel_ring.ringhd
-
-	ld	[%l4 + 0x04], %l6	! entry = head->prev
-
-	ld	[%l6 + 0x08], %l3	! tmp = entry->vaddr
-
-	! Flush segment from the cache.
-	sethi	%hi((64 * 1024)), %l7
-9:
-vac_hwflush_patch1:
-vac_linesize_patch:
-	subcc	%l7, 16, %l7
-	bne	9b
-vac_hwflush_patch2:
-	 sta	%g0, [%l3 + %l7] ASI_FLUSHSEG
-
-	st	%l5, [%l6 + 0x08]	! entry->vaddr = address
-
-	ld	[%l6 + 0x00], %l5	! next = entry->next
-	ld	[%l6 + 0x04], %l7	! entry->prev
-
-	st	%l7, [%l5 + 0x04]	! next->prev = entry->prev
-	st	%l5, [%l7 + 0x00]	! entry->prev->next = next
-	st	%l4, [%l6 + 0x04]	! entry->prev = head
-
-	ld	[%l4 + 0x00], %l7	! head->next
-
-	st	%l7, [%l6 + 0x00]	! entry->next = head->next
-	st	%l6, [%l7 + 0x04]	! head->next->prev = entry
-	st	%l6, [%l4 + 0x00]	! head->next = entry
-
-	mov	%l3, %l5		! address = tmp
-
-4:
-num_context_patch1:
-	mov	0x08, %l7
-
-	ld	[%l6 + 0x08], %l4
-	ldub	[%l6 + 0x0c], %l3
-	or	%l4, %l3, %l4		! encode new vaddr/pseg into l4
-
-	sethi	%hi(AC_CONTEXT), %l3
-	lduba	[%l3] ASI_CONTROL, %l6
-
-	/* Invalidate old mapping, instantiate new mapping,
-	 * for each context.  Registers l6/l7 are live across
-	 * this loop.
-	 */
-3:	deccc	%l7
-	sethi	%hi(AC_CONTEXT), %l3
-	stba	%l7, [%l3] ASI_CONTROL
-invalid_segment_patch2:
-	mov	0x7f, %l3
-	stXa	%l3, [%l5] ASI_SEGMAP
-	andn	%l4, 0x1ff, %l3
-	bne	3b
-	 stXa	%l4, [%l3] ASI_SEGMAP
-
-	sethi	%hi(AC_CONTEXT), %l3
-	stba	%l6, [%l3] ASI_CONTROL
-
-	andn	%l4, 0x1ff, %l5
-
-1:
-	sethi	%hi(VMALLOC_START), %l4
-	cmp	%l5, %l4
-
-	bgeu	1f
-	 mov	1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
-
-	sethi	%hi(KERNBASE), %l6
-
-	sub	%l5, %l6, %l4
-	srl	%l4, PAGE_SHIFT, %l4
-	sethi	%hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
-	or	%l3, %l4, %l3
-
-	sethi	%hi(PAGE_SIZE), %l4
-
-2:
-	sta	%l3, [%l5] ASI_PTE
-	deccc	%l7
-	inc	%l3
-	bne	2b
-	 add	%l5, %l4, %l5
-
-	b	7f
-	 sethi	%hi(sun4c_kernel_faults), %l4
-
-1:
-	srl	%l5, SUN4C_PGDIR_SHIFT, %l3
-	sethi	%hi(swapper_pg_dir), %l4
-	or	%l4, %lo(swapper_pg_dir), %l4
-	sll	%l3, 2, %l3
-	ld	[%l4 + %l3], %l4
-	and	%l4, PAGE_MASK, %l4
-
-	srl	%l5, (PAGE_SHIFT - 2), %l6
-	and	%l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
-	add	%l6, %l4, %l6
-
-	sethi	%hi(PAGE_SIZE), %l4
-
-2:
-	ld	[%l6], %l3
-	deccc	%l7
-	sta	%l3, [%l5] ASI_PTE
-	add	%l6, 0x4, %l6
-	bne	2b
-	 add	%l5, %l4, %l5
-
-	sethi	%hi(sun4c_kernel_faults), %l4
-7:
-	ld	[%l4 + %lo(sun4c_kernel_faults)], %l3
-	inc	%l3
-	st	%l3, [%l4 + %lo(sun4c_kernel_faults)]
-
-	/* Restore condition codes */
-	wr	%l0, 0x0, %psr
+	ld	[%l5 + 0x00], %g0	! sun4m_irq_global->pending
 	WRITE_PAUSE
-	jmp	%l1
-	 rett	%l2
-
-sun4c_fault_fromuser:
-	SAVE_ALL
+	or	%l0, PSR_PIL, %l4
+	wr	%l4, 0x0, %psr
+	WRITE_PAUSE
+	wr	%l4, PSR_ET, %psr
+	WRITE_PAUSE
+	call	sun4m_nmi
 	 nop
-	
-	mov	%l7, %o1		! Decode the info from %l7
-	mov	%l7, %o2
-	and	%o1, 1, %o1		! arg2 = text_faultp
-	mov	%l7, %o3
-	and	%o2, 2, %o2		! arg3 = writep
-	andn	%o3, 0xfff, %o3		! arg4 = faulting address
-
-	wr	%l0, PSR_ET, %psr
+	st	%l4, [%l5 + 0x08]	! sun4m_irq_global->mask_clear=0x80000000
 	WRITE_PAUSE
-
-	call	do_sun4c_fault
-	 add	%sp, STACKFRAME_SZ, %o0	! arg1 = pt_regs ptr
-
+	ld	[%l5 + 0x00], %g0	! sun4m_irq_global->pending
+	WRITE_PAUSE
 	RESTORE_ALL
 
+#ifndef CONFIG_SMP
+	.align	4
+	.globl	linux_trap_ipi15_sun4m
+linux_trap_ipi15_sun4m:
+	SAVE_ALL
+
+	ba	sun4m_nmi_error
+	 nop
+#endif /* CONFIG_SMP */
+
 	.align	4
 	.globl	srmmu_fault
 srmmu_fault:
@@ -1483,11 +1166,13 @@
 	.globl	__ndelay
 __ndelay:
 	save	%sp, -STACKFRAME_SZ, %sp
-	mov	%i0, %o0
-	call	.umul			! round multiplier up so large ns ok
-	 mov	0x1ae, %o1		! 2**32 / (1 000 000 000 / HZ)
-	call	.umul
-	 mov	%i1, %o1		! udelay_val
+	mov	%i0, %o0		! round multiplier up so large ns ok
+	mov	0x1ae, %o1		! 2**32 / (1 000 000 000 / HZ)
+	umul	%o0, %o1, %o0
+	rd	%y, %o1
+	mov	%i1, %o1		! udelay_val
+	umul	%o0, %o1, %o0
+	rd	%y, %o1
 	ba	delay_continue
 	 mov	%o1, %o0		! >>32 later for better resolution
 
@@ -1496,18 +1181,21 @@
 	save	%sp, -STACKFRAME_SZ, %sp
 	mov	%i0, %o0
 	sethi	%hi(0x10c7), %o1	! round multiplier up so large us ok
-	call	.umul
-	 or	%o1, %lo(0x10c7), %o1	! 2**32 / 1 000 000
-	call	.umul
-	 mov	%i1, %o1		! udelay_val
+	or	%o1, %lo(0x10c7), %o1	! 2**32 / 1 000 000
+	umul	%o0, %o1, %o0
+	rd	%y, %o1
+	mov	%i1, %o1		! udelay_val
+	umul	%o0, %o1, %o0
+	rd	%y, %o1
 	sethi	%hi(0x028f4b62), %l0	! Add in rounding constant * 2**32,
 	or	%g0, %lo(0x028f4b62), %l0
 	addcc	%o0, %l0, %o0		! 2**32 * 0.009 999
 	bcs,a	3f
 	 add	%o1, 0x01, %o1
 3:
-	call	.umul
-	 mov	HZ, %o0			! >>32 earlier for wider range
+	mov	HZ, %o0			! >>32 earlier for wider range
+	umul	%o0, %o1, %o0
+	rd	%y, %o1
 
 delay_continue:
 	cmp	%o0, 0x0
@@ -1670,4 +1358,26 @@
 	ret
 	 restore
 
+#ifdef CONFIG_SMP
+ENTRY(hard_smp_processor_id)
+661:	rd		%tbr, %g1
+	srl		%g1, 12, %o0
+	and		%o0, 3, %o0
+	.section	.cpuid_patch, "ax"
+	/* Instruction location. */
+	.word		661b
+	/* SUN4D implementation. */
+	lda		[%g0] ASI_M_VIKING_TMP1, %o0
+	nop
+	nop
+	/* LEON implementation. */
+	rd		%asr17, %o0
+	srl		%o0, 0x1c, %o0
+	nop
+	.previous
+	retl
+	 nop
+ENDPROC(hard_smp_processor_id)
+#endif
+
 /* End of entry.S */
diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S
index e806fcd..84b5f0d 100644
--- a/arch/sparc/kernel/etrap_32.S
+++ b/arch/sparc/kernel/etrap_32.S
@@ -216,9 +216,7 @@
 	/* Call MMU-architecture dependent stack checking
 	 * routine.
 	 */
-	.globl	tsetup_mmu_patchme
-tsetup_mmu_patchme:
-	b	tsetup_sun4c_stackchk
+	b	tsetup_srmmu_stackchk
 	 andcc	%sp, 0x7, %g0
 
 	/* Architecture specific stack checking routines.  When either
@@ -228,52 +226,6 @@
 	 */
 #define glob_tmp     g1
 
-tsetup_sun4c_stackchk:
-	/* Done by caller: andcc %sp, 0x7, %g0 */
-	bne	trap_setup_user_stack_is_bolixed
-	 sra	%sp, 29, %glob_tmp
-
-	add	%glob_tmp, 0x1, %glob_tmp
-	andncc	%glob_tmp, 0x1, %g0
-	bne	trap_setup_user_stack_is_bolixed
-	 and	%sp, 0xfff, %glob_tmp		! delay slot
-
-	/* See if our dump area will be on more than one
-	 * page.
-	 */
-	add	%glob_tmp, 0x38, %glob_tmp
-	andncc	%glob_tmp, 0xff8, %g0
-	be	tsetup_sun4c_onepage		! only one page to check
-	 lda	[%sp] ASI_PTE, %glob_tmp	! have to check first page anyways
-
-tsetup_sun4c_twopages:
-	/* Is first page ok permission wise? */
-	srl	%glob_tmp, 29, %glob_tmp
-	cmp	%glob_tmp, 0x6
-	bne	trap_setup_user_stack_is_bolixed
-	 add	%sp, 0x38, %glob_tmp		/* Is second page in vma hole? */
-
-	sra	%glob_tmp, 29, %glob_tmp
-	add	%glob_tmp, 0x1, %glob_tmp
-	andncc	%glob_tmp, 0x1, %g0
-	bne	trap_setup_user_stack_is_bolixed
-	 add	%sp, 0x38, %glob_tmp
-
-	lda	[%glob_tmp] ASI_PTE, %glob_tmp
-
-tsetup_sun4c_onepage:
-	srl	%glob_tmp, 29, %glob_tmp
-	cmp	%glob_tmp, 0x6				! can user write to it?
-	bne	trap_setup_user_stack_is_bolixed	! failure
-	 nop
-
-	STORE_WINDOW(sp)
-
-	restore %g0, %g0, %g0
-
-	jmpl	%t_retpc + 0x8, %g0
-	 mov	%t_kstack, %sp
-
 	.globl	tsetup_srmmu_stackchk
 tsetup_srmmu_stackchk:
 	/* Check results of callers andcc %sp, 0x7, %g0 */
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 5877857..a0f5c20 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -26,11 +26,9 @@
 #include <asm/pgtsrmmu.h>	/* SRMMU_PGDIR_SHIFT */
 
 	.data
-/* 
- * The following are used with the prom_vector node-ops to figure out
- * the cpu-type 
+/* The following are used with the prom_vector node-ops to figure out
+ * the cpu-type
  */
-
 	.align 4
 cputyp:
         .word   1
@@ -38,384 +36,35 @@
 	.align 4
 	.globl cputypval
 cputypval:
-	.asciz "sun4c"
+	.asciz "sun4m"
 	.ascii "     "
 
-cputypvalend:
-cputypvallen = cputypvar - cputypval
-
+/* Tested on SS-5, SS-10 */
 	.align 4
-/*
- * Sun people can't spell worth damn. "compatability" indeed.
- * At least we *know* we can't spell, and use a spell-checker.
- */
-
-/* Uh, actually Linus it is I who cannot spell. Too much murky
- * Sparc assembly will do this to ya.
- */
 cputypvar:
-	.asciz "compatability"
-
-/* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
-	.align 4
-cputypvar_sun4m:
 	.asciz "compatible"
 
 	.align 4
 
-sun4_notsup:
-	.asciz	"Sparc-Linux sun4 support does no longer exist.\n\n"
+sun4c_notsup:
+	.asciz	"Sparc-Linux sun4/sun4c support does no longer exist.\n\n"
 	.align 4
 
 sun4e_notsup:
         .asciz  "Sparc-Linux sun4e support does not exist\n\n"
 	.align 4
 
-	/* The Sparc trap table, bootloader gives us control at _start. */
-	__HEAD
-	.globl	_stext, _start, __stext
-	.globl  trapbase
-_start:   /* danger danger */
-__stext:
-_stext:
-trapbase:
-#ifdef CONFIG_SMP
-trapbase_cpu0:
-#endif
-/* We get control passed to us here at t_zero. */
-t_zero:	b gokernel; nop; nop; nop;
-t_tflt:	SPARC_TFAULT                        /* Inst. Access Exception        */
-t_bins:	TRAP_ENTRY(0x2, bad_instruction)    /* Illegal Instruction           */
-t_pins:	TRAP_ENTRY(0x3, priv_instruction)   /* Privileged Instruction        */
-t_fpd:	TRAP_ENTRY(0x4, fpd_trap_handler)   /* Floating Point Disabled       */
-t_wovf:	WINDOW_SPILL                        /* Window Overflow               */
-t_wunf:	WINDOW_FILL                         /* Window Underflow              */
-t_mna:	TRAP_ENTRY(0x7, mna_handler)        /* Memory Address Not Aligned    */
-t_fpe:	TRAP_ENTRY(0x8, fpe_trap_handler)   /* Floating Point Exception      */
-t_dflt:	SPARC_DFAULT                        /* Data Miss Exception           */
-t_tio:	TRAP_ENTRY(0xa, do_tag_overflow)    /* Tagged Instruction Ovrflw     */
-t_wpt:	TRAP_ENTRY(0xb, do_watchpoint)      /* Watchpoint Detected           */
-t_badc:	BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
-t_irq1:	TRAP_ENTRY_INTERRUPT(1)             /* IRQ Software/SBUS Level 1     */
-t_irq2:	TRAP_ENTRY_INTERRUPT(2)             /* IRQ SBUS Level 2              */
-t_irq3:	TRAP_ENTRY_INTERRUPT(3)             /* IRQ SCSI/DMA/SBUS Level 3     */
-t_irq4:	TRAP_ENTRY_INTERRUPT(4)             /* IRQ Software Level 4          */
-t_irq5:	TRAP_ENTRY_INTERRUPT(5)             /* IRQ SBUS/Ethernet Level 5     */
-t_irq6:	TRAP_ENTRY_INTERRUPT(6)             /* IRQ Software Level 6          */
-t_irq7:	TRAP_ENTRY_INTERRUPT(7)             /* IRQ Video/SBUS Level 5        */
-t_irq8:	TRAP_ENTRY_INTERRUPT(8)             /* IRQ SBUS Level 6              */
-t_irq9:	TRAP_ENTRY_INTERRUPT(9)             /* IRQ SBUS Level 7              */
-t_irq10:TRAP_ENTRY_INTERRUPT(10)            /* IRQ Timer #1 (one we use)     */
-t_irq11:TRAP_ENTRY_INTERRUPT(11)            /* IRQ Floppy Intr.              */
-t_irq12:TRAP_ENTRY_INTERRUPT(12)            /* IRQ Zilog serial chip         */
-t_irq13:TRAP_ENTRY_INTERRUPT(13)            /* IRQ Audio Intr.               */
-t_irq14:TRAP_ENTRY_INTERRUPT(14)            /* IRQ Timer #2                  */
-	.globl	t_nmi
-#ifndef CONFIG_SMP
-t_nmi:	NMI_TRAP                            /* Level 15 (NMI)                */
-#else
-t_nmi:	TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
-#endif
-t_racc:	TRAP_ENTRY(0x20, do_reg_access)     /* General Register Access Error */
-t_iacce:BAD_TRAP(0x21)                      /* Instr Access Error            */
-t_bad22:BAD_TRAP(0x22) BAD_TRAP(0x23)
-t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled)    /* Co-Processor Disabled         */
-t_uflsh:SKIP_TRAP(0x25, unimp_flush)        /* Unimplemented FLUSH inst.     */
-t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
-t_cpexc:TRAP_ENTRY(0x28, do_cp_exception)   /* Co-Processor Exception        */
-t_dacce:SPARC_DFAULT                        /* Data Access Error             */
-t_hwdz:	TRAP_ENTRY(0x2a, do_hw_divzero)     /* Division by zero, you lose... */
-t_dserr:BAD_TRAP(0x2b)                      /* Data Store Error              */
-t_daccm:BAD_TRAP(0x2c)                      /* Data Access MMU-Miss          */
-t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
-t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
-t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
-t_iaccm:BAD_TRAP(0x3c)                      /* Instr Access MMU-Miss         */
-t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41)
-t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46)
-t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b)
-t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50)
-t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
-t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
-t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
-t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
-t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
-t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
-t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
-t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
-t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
-t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f)
-t_bad80:BAD_TRAP(0x80)                      /* SunOS System Call             */
-t_sbkpt:BREAKPOINT_TRAP                     /* Software Breakpoint/KGDB      */
-t_divz:	TRAP_ENTRY(0x82, do_hw_divzero)     /* Divide by zero trap           */
-t_flwin:TRAP_ENTRY(0x83, do_flush_windows)  /* Flush Windows Trap            */
-t_clwin:BAD_TRAP(0x84)                      /* Clean Windows Trap            */
-t_rchk:	BAD_TRAP(0x85)                      /* Range Check                   */
-t_funal:BAD_TRAP(0x86)                      /* Fix Unaligned Access Trap     */
-t_iovf:	BAD_TRAP(0x87)                      /* Integer Overflow Trap         */
-t_bad88:BAD_TRAP(0x88)                      /* Slowaris System Call          */
-t_bad89:BAD_TRAP(0x89)                      /* Net-B.S. System Call          */
-t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e)
-t_bad8f:BAD_TRAP(0x8f)
-t_linux:LINUX_SYSCALL_TRAP                  /* Linux System Call             */
-t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95)
-t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a)
-t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f)
-t_getcc:GETCC_TRAP                          /* Get Condition Codes           */
-t_setcc:SETCC_TRAP                          /* Set Condition Codes           */
-t_getpsr:GETPSR_TRAP                        /* Get PSR Register              */
-t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
-t_bada7:BAD_TRAP(0xa7)
-t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
-t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
-t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
-t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
-t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
-t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
-t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
-t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
-t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
-t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
-t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
-t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
-t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
-t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
-t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
-t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
-t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
-t_badfc:BAD_TRAP(0xfc)
-t_kgdb:	KGDB_TRAP(0xfd)
-dbtrap:	BAD_TRAP(0xfe)                      /* Debugger/PROM breakpoint #1   */
-dbtrap2:BAD_TRAP(0xff)                      /* Debugger/PROM breakpoint #2   */	
+/* The trap-table - located in the __HEAD section */
+#include "ttable_32.S"
 
-	.globl	end_traptable
-end_traptable:
-
-#ifdef CONFIG_SMP
-	/* Trap tables for the other cpus. */
-	.globl	trapbase_cpu1, trapbase_cpu2, trapbase_cpu3
-trapbase_cpu1:
-	BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
-	TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
-	WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
-	TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
-	TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
-	BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
-	TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
-	TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
-	TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
-	TRAP_ENTRY_INTERRUPT(7)	TRAP_ENTRY_INTERRUPT(8)
-	TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
-	TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
-	TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
-	TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
-	TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
-	BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
-	BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
-	SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
-	BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
-	BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
-	BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
-	BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
-	BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
-	BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
-	BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
-	BAD_TRAP(0x50)
-	BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
-	BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
-	BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
-	BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
-	BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
-	BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
-	BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
-	BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
-	BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
-	BAD_TRAP(0x7e) BAD_TRAP(0x7f)
-	BAD_TRAP(0x80)
-	BREAKPOINT_TRAP
-	TRAP_ENTRY(0x82, do_hw_divzero)
-	TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
-	BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
-	BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
-	BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
-	LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
-	BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
-	BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
-	BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
-	BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
-	BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
-	BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
-	BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
-	BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
-	BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
-	BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
-	BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
-	BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
-	BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
-	BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
-	BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
-	BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
-	BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
-	BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
-	BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
-	BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
-	BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
-	BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
-
-trapbase_cpu2:
-	BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
-	TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
-	WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
-	TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
-	TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
-	BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
-	TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
-	TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
-	TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
-	TRAP_ENTRY_INTERRUPT(7)	TRAP_ENTRY_INTERRUPT(8)
-	TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
-	TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
-	TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
-	TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
-	TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
-	BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
-	BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
-	SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
-	BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
-	BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
-	BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
-	BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
-	BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
-	BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
-	BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
-	BAD_TRAP(0x50)
-	BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
-	BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
-	BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
-	BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
-	BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
-	BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
-	BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
-	BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
-	BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
-	BAD_TRAP(0x7e) BAD_TRAP(0x7f)
-	BAD_TRAP(0x80)
-	BREAKPOINT_TRAP
-	TRAP_ENTRY(0x82, do_hw_divzero)
-	TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
-	BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
-	BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
-	BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
-	LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
-	BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
-	BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
-	BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
-	BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
-	BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
-	BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
-	BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
-	BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
-	BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
-	BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
-	BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
-	BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
-	BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
-	BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
-	BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
-	BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
-	BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
-	BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
-	BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
-	BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
-	BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
-	BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
-
-trapbase_cpu3:
-	BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
-	TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
-	WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
-	TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
-	TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
-	BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
-	TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
-	TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
-	TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
-	TRAP_ENTRY_INTERRUPT(7)	TRAP_ENTRY_INTERRUPT(8)
-	TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
-	TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
-	TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
-	TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
-	TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
-	BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush)
-	BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
-	SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
-	BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
-	BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
-	BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
-	BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
-	BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
-	BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
-	BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
-	BAD_TRAP(0x50)
-	BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
-	BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
-	BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
-	BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
-	BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
-	BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
-	BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
-	BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
-	BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
-	BAD_TRAP(0x7e) BAD_TRAP(0x7f)
-	BAD_TRAP(0x80)
-	BREAKPOINT_TRAP
-	TRAP_ENTRY(0x82, do_hw_divzero)
-	TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
-	BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
-	BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
-	BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
-	LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
-	BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
-	BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
-	BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP
-	BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
-	BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
-	BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
-	BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
-	BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
-	BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
-	BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
-	BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
-	BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
-	BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
-	BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
-	BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
-	BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
-	BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
-	BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
-	BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
-	BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
-	BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
-	BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
-
-#endif
 	.align PAGE_SIZE
 
 /* This was the only reasonable way I could think of to properly align
  * these page-table data structures.
  */
-	.globl pg0, pg1, pg2, pg3
-	.globl empty_bad_page
-	.globl empty_bad_page_table
-	.globl empty_zero_page
 	.globl swapper_pg_dir
 swapper_pg_dir:		.skip PAGE_SIZE
-pg0:			.skip PAGE_SIZE
-pg1:			.skip PAGE_SIZE
-pg2:			.skip PAGE_SIZE
-pg3:			.skip PAGE_SIZE
-empty_bad_page:		.skip PAGE_SIZE
-empty_bad_page_table:	.skip PAGE_SIZE
+	.globl empty_zero_page
 empty_zero_page:	.skip PAGE_SIZE
 
 	.global root_flags
@@ -523,10 +172,10 @@
 		ldd	[%g2 + 0x8], %g4
 		std	%g4, [%g3 + 0x8]	! Copy proms handler
 
-/* Must determine whether we are on a sun4c MMU, SRMMU, or SUN4/400 MUTANT
- * MMU so we can remap ourselves properly.  DON'T TOUCH %l0 thru %l5 in these
- * remapping routines, we need their values afterwards!
+/* DON'T TOUCH %l0 thru %l5 in these remapping routines,
+ * we need their values afterwards!
  */
+
 		/* Now check whether we are already mapped, if we
 		 * are we can skip all this garbage coming up.
 		 */
@@ -535,26 +184,29 @@
 		be	go_to_highmem		! this will be a nop then
 		 nop
 
-		set	LOAD_ADDR, %g6
+		/* Validate that we are in fact running on an
+		 * SRMMU based cpu.
+		 */
+		set	0x4000, %g6
 		cmp	%g7, %g6
-		bne	remap_not_a_sun4	! This is not a Sun4
+		bne	not_a_sun4
 		 nop
 
-		or	%g0, 0x1, %g1
-		lduba	[%g1] ASI_CONTROL, %g1	! Only safe to try on Sun4.
-		subcc	%g1, 0x24, %g0		! Is this a mutant Sun4/400???
-		be	sun4_mutant_remap	! Ugh, it is...
+halt_sun4_or_sun4c:
+		ld	[%g7 + 0x68], %o1
+		set	sun4c_notsup, %o0
+		sub	%o0, %l6, %o0
+		call	%o1
+		 nop
+		ba	halt_me
 		 nop
 
-		b	sun4_normal_remap	! regular sun4, 2 level mmu
+not_a_sun4:
+		lda	[%g0] ASI_M_MMUREGS, %g1
+		andcc	%g1, 1, %g0
+		be	halt_sun4_or_sun4c
 		 nop
 
-remap_not_a_sun4:
-		lda	[%g0] ASI_M_MMUREGS, %g1 ! same as ASI_PTE on sun4c
-		and	%g1, 0x1, %g1		! Test SRMMU Enable bit ;-)
-		cmp	%g1, 0x0
-		be	sun4c_remap		! A sun4c MMU or normal Sun4
-		 nop
 srmmu_remap:
 		/* First, check for a viking (TI) module. */
 		set	0x40000000, %g2
@@ -660,72 +312,6 @@
 		b	go_to_highmem
 		 nop					! wheee....
 
-		/* This remaps the kernel on Sun4/4xx machines
-		 * that have the Sun Mutant Three Level MMU.
-		 * It's like a platypus, Sun didn't have the
-		 * SRMMU in conception so they kludged the three
-		 * level logic in the regular Sun4 MMU probably.
-		 *
-		 * Basically, you take each entry in the top level
-		 * directory that maps the low 3MB starting at
-		 * address zero and put the mapping in the KERNBASE
-		 * slots.  These top level pgd's are called regmaps.
-		 */
-sun4_mutant_remap:
-		or	%g0, %g0, %g3		! source base
-		sethi	%hi(KERNBASE), %g4	! destination base
-		or	%g4, %lo(KERNBASE), %g4
-		sethi	%hi(0x300000), %g5
-		or	%g5, %lo(0x300000), %g5	! upper bound 3MB
-		or	%g0, 0x1, %l6
-		sll	%l6, 24, %l6		! Regmap mapping size
-		add	%g3, 0x2, %g3		! Base magic
-		add	%g4, 0x2, %g4		! Base magic
-
-		/* Main remapping loop on Sun4-Mutant-MMU.
-		 * "I am not an animal..." -Famous Mutant Person
-		 */
-sun4_mutant_loop:
-		lduha	[%g3] ASI_REGMAP, %g2	! Get lower entry
-		stha	%g2, [%g4] ASI_REGMAP	! Store in high entry
-		add	%g4, %l6, %g4		! Move up high memory ptr
-		subcc	%g3, %g5, %g0		! Reached our limit?
-		blu	sun4_mutant_loop	! Nope, loop again
-		 add	%g3, %l6, %g3		! delay, Move up low ptr
-		b	go_to_highmem		! Jump to high memory.
-		 nop
-
-		/* The following is for non-4/4xx sun4 MMU's. */
-sun4_normal_remap:
-		mov	0, %g3			! source base
-		set	KERNBASE, %g4		! destination base
-		set	0x300000, %g5		! upper bound 3MB
-		mov	1, %l6
-		sll	%l6, 18, %l6		! sun4 mmu segmap size
-sun4_normal_loop:
-		lduha	[%g3] ASI_SEGMAP, %g6	! load phys_seg
-		stha	%g6, [%g4] ASI_SEGMAP	! stort new virt mapping
-		add	%g3, %l6, %g3		! increment source pointer
-		subcc	%g3, %g5, %g0		! reached limit?
-		blu	sun4_normal_loop	! nope, loop again
-		 add	%g4, %l6, %g4		! delay, increment dest ptr
-		b	go_to_highmem
-		 nop
-
-		/* The following works for Sun4c MMU's */
-sun4c_remap:
-		mov	0, %g3			! source base
-		set	KERNBASE, %g4		! destination base
-		set	0x300000, %g5		! upper bound 3MB
-		mov	1, %l6
-		sll	%l6, 18, %l6		! sun4c mmu segmap size
-sun4c_remap_loop:
-		lda	[%g3] ASI_SEGMAP, %g6	! load phys_seg
-		sta	%g6, [%g4] ASI_SEGMAP   ! store new virt mapping
-		add	%g3, %l6, %g3		! Increment source ptr
-		subcc	%g3, %g5, %g0		! Reached limit?
-		bl	sun4c_remap_loop	! Nope, loop again
-		 add	%g4, %l6, %g4		! delay, Increment dest ptr
 
 /* Now do a non-relative jump so that PC is in high-memory */
 go_to_highmem:
@@ -750,35 +336,12 @@
 		sethi	%hi(linux_dbvec), %g1
 		st	%o1, [%g1 + %lo(linux_dbvec)]
 
-		ld	[%o0 + 0x4], %o3
-		and	%o3, 0x3, %o5			! get the version
-
-		cmp	%o3, 0x2			! a v2 prom?
-		be	found_version
-		 nop
-
-		/* paul@sfe.com.au */
-		cmp	%o3, 0x3			! a v3 prom?
-		be	found_version
-		 nop
-
-/* Old sun4's pass our load address into %o0 instead of the prom
- * pointer. On sun4's you have to hard code the romvec pointer into
- * your code. Sun probably still does that because they don't even
- * trust their own "OpenBoot" specifications.
- */
-		set	LOAD_ADDR, %g6
-		cmp	%o0, %g6		! an old sun4?
-		be	sun4_init
-		 nop
-
-found_version:
 /* Get the machine type via the mysterious romvec node operations. */
 
-		add	%g7, 0x1c, %l1		
+		add	%g7, 0x1c, %l1
 		ld	[%l1], %l0
 		ld	[%l0], %l0
-		call 	%l0
+		call	%l0
 		 or	%g0, %g0, %o0		! next_node(0) = first_node
 		or	%o0, %g0, %g6
 
@@ -786,28 +349,13 @@
 		or	%o1, %lo(cputypvar), %o1
 		sethi	%hi(cputypval), %o2	! information, the string
 		or	%o2, %lo(cputypval), %o2
-		ld	[%l1], %l0		! 'compatibility' tells
+		ld	[%l1], %l0		! 'compatible' tells
 		ld	[%l0 + 0xc], %l0	! that we want 'sun4x' where
-		call	%l0			! x is one of '', 'c', 'm',
-		 nop				! 'd' or 'e'. %o2 holds pointer
+		call	%l0			! x is one of 'm', 'd' or 'e'.
+		 nop				! %o2 holds pointer
 						! to a buf where above string
 						! will get stored by the prom.
 
-		subcc	%o0, %g0, %g0
-		bpos	got_prop		! Got the property
-		 nop
-
-		or	%g6, %g0, %o0
-		sethi	%hi(cputypvar_sun4m), %o1
-		or	%o1, %lo(cputypvar_sun4m), %o1
-		sethi	%hi(cputypval), %o2
-		or	%o2, %lo(cputypval), %o2
-		ld	[%l1], %l0
-		ld	[%l0 + 0xc], %l0
-		call	%l0
-		 nop
-
-got_prop:
 #ifdef CONFIG_SPARC_LEON
 	        /* no cpu-type check is needed, it is a SPARC-LEON */
 
@@ -826,45 +374,29 @@
 		/* Update boot_cpu_id only on boot cpu */
 		stub	%g1, [%g2 + %lo(boot_cpu_id)]
 
-		ba sun4c_continue_boot
+		ba continue_boot
 		 nop
 #endif
+
+/* Check to cputype. We may be booted on a sun4u (64 bit box),
+ * and sun4d needs special treatment.
+ */
+
 		set	cputypval, %o2
 		ldub	[%o2 + 0x4], %l1
 
-		cmp	%l1, ' '
-		be	1f
-		 cmp	%l1, 'c'
-		be	1f
-		 cmp	%l1, 'm'
-		be	1f
+		cmp	%l1, 'm'
+		be	sun4m_init
 		 cmp	%l1, 's'
-		be	1f
+		be	sun4m_init
 		 cmp	%l1, 'd'
-		be	1f
+		be	sun4d_init
 		 cmp	%l1, 'e'
 		be	no_sun4e_here		! Could be a sun4e.
 		 nop
 		b	no_sun4u_here		! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
 		 nop
 
-1:		set	cputypval, %l1
-		ldub	[%l1 + 0x4], %l1
-		cmp	%l1, 'm'		! Test for sun4d, sun4e ?
-		be	sun4m_init
-		 cmp	%l1, 's'		! Treat sun4s as sun4m
-		be	sun4m_init
-		 cmp	%l1, 'd'		! Let us see how the beast will die
-		be	sun4d_init
-		 nop
-
-		/* Jump into mmu context zero. */
-		set	AC_CONTEXT, %g1
-		stba	%g0, [%g1] ASI_CONTROL
-
-		b	sun4c_continue_boot
-		 nop
-
 /* CPUID in bootbus can be found at PA 0xff0140000 */
 #define SUN4D_BOOTBUS_CPUID     0xf0140000
 
@@ -892,66 +424,6 @@
 	/* Fall through to sun4m_init */
 
 sun4m_init:
-	/* XXX Fucking Cypress... */
-	lda	[%g0] ASI_M_MMUREGS, %g5
-	srl	%g5, 28, %g4
-
-	cmp	%g4, 1
-	bne	1f
-	 srl	%g5, 24, %g4
-
-	and	%g4, 0xf, %g4
-	cmp	%g4, 7		/* This would be a HyperSparc. */
-
-	bne	2f
-	 nop
-
-1:
-
-#define PATCH_IT(dst, src)	\
-	set	(dst), %g5;	\
-	set	(src), %g4;	\
-	ld	[%g4], %g3;	\
-	st	%g3, [%g5];	\
-	ld	[%g4+0x4], %g3;	\
-	st	%g3, [%g5+0x4];
-
-	/* Signed multiply. */
-	PATCH_IT(.mul, .mul_patch)
-	PATCH_IT(.mul+0x08, .mul_patch+0x08)
-
-	/* Signed remainder. */
-	PATCH_IT(.rem, .rem_patch)
-	PATCH_IT(.rem+0x08, .rem_patch+0x08)
-	PATCH_IT(.rem+0x10, .rem_patch+0x10)
-	PATCH_IT(.rem+0x18, .rem_patch+0x18)
-	PATCH_IT(.rem+0x20, .rem_patch+0x20)
-	PATCH_IT(.rem+0x28, .rem_patch+0x28)
-
-	/* Signed division. */
-	PATCH_IT(.div, .div_patch)
-	PATCH_IT(.div+0x08, .div_patch+0x08)
-	PATCH_IT(.div+0x10, .div_patch+0x10)
-	PATCH_IT(.div+0x18, .div_patch+0x18)
-	PATCH_IT(.div+0x20, .div_patch+0x20)
-
-	/* Unsigned multiply. */
-	PATCH_IT(.umul, .umul_patch)
-	PATCH_IT(.umul+0x08, .umul_patch+0x08)
-
-	/* Unsigned remainder. */
-	PATCH_IT(.urem, .urem_patch)
-	PATCH_IT(.urem+0x08, .urem_patch+0x08)
-	PATCH_IT(.urem+0x10, .urem_patch+0x10)
-	PATCH_IT(.urem+0x18, .urem_patch+0x18)
-
-	/* Unsigned division. */
-	PATCH_IT(.udiv, .udiv_patch)
-	PATCH_IT(.udiv+0x08, .udiv_patch+0x08)
-	PATCH_IT(.udiv+0x10, .udiv_patch+0x10)
-
-#undef PATCH_IT
-
 /* Ok, the PROM could have done funny things and apple cider could still
  * be sitting in the fault status/address registers.  Read them all to
  * clear them so we don't get magic faults later on.
@@ -962,7 +434,7 @@
 		srl	%o1, 28, %o1		! Get a type of the CPU
 
 		subcc	%o1, 4, %g0		! TI: Viking or MicroSPARC
-		be	sun4c_continue_boot
+		be	continue_boot
 		 nop
 
 		set	AC_M_SFSR, %o0
@@ -972,7 +444,7 @@
 
 		/* Fujitsu MicroSPARC-II has no asynchronous flavors of FARs */
 		subcc	%o1, 0, %g0
-		be	sun4c_continue_boot
+		be	continue_boot
 		 nop
 
 		set	AC_M_AFSR, %o0
@@ -982,8 +454,7 @@
 		 nop
 
 
-sun4c_continue_boot:
-
+continue_boot:
 
 /* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
  * show-time!
@@ -1026,10 +497,7 @@
 		mov	%g0, %g3
 		stub	%g3, [%g2 + %lo(boot_cpu_id)]
 
-1:		/* boot_cpu_id set. calculate boot_cpu_id4 = boot_cpu_id*4 */
-		sll	%g3, 2, %g3
-		sethi	%hi(boot_cpu_id4), %g2
-		stub	%g3, [%g2 + %lo(boot_cpu_id4)]
+1:		sll	%g3, 2, %g3
 
 		/* Initialize the uwinmask value for init task just in case.
 		 * But first make current_set[boot_cpu_id] point to something useful.
@@ -1165,19 +633,6 @@
 		call	halt_me
 		 nop
 
-sun4_init:
-		sethi   %hi(SUN4_PROM_VECTOR+0x84), %o1
-		ld      [%o1 + %lo(SUN4_PROM_VECTOR+0x84)], %o1
-		set     sun4_notsup, %o0
-		call    %o1	/* printf */
-		 nop
-		sethi   %hi(SUN4_PROM_VECTOR+0xc4), %o1
-		ld      [%o1 + %lo(SUN4_PROM_VECTOR+0xc4)], %o1
-		call    %o1	/* exittomon */
-		 nop
-1:		ba      1b                      ! Cannot exit into KMON
-		 nop
-
 no_sun4e_here:
 		ld	[%g7 + 0x68], %o1
 		set	sun4e_notsup, %o0
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 0d810c2..b42ddbf 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -906,7 +906,7 @@
 	 * error and will instead write junk into the relocation and
 	 * you'll have an unbootable kernel.
 	 */
-#include "ttable.S"
+#include "ttable_64.S"
 
 ! 0x0000000000428000
 
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
index 9167db4..6bd7501 100644
--- a/arch/sparc/kernel/idprom.c
+++ b/arch/sparc/kernel/idprom.c
@@ -25,22 +25,9 @@
  * of the Sparc CPU and have a meaningful IDPROM machtype value that we
  * know about.  See asm-sparc/machines.h for empirical constants.
  */
-static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
-/* First, Sun4's */
-{ .name = "Sun 4/100 Series",        .id_machtype = (SM_SUN4 | SM_4_110) },
-{ .name = "Sun 4/200 Series",        .id_machtype = (SM_SUN4 | SM_4_260) },
-{ .name = "Sun 4/300 Series",        .id_machtype = (SM_SUN4 | SM_4_330) },
-{ .name = "Sun 4/400 Series",        .id_machtype = (SM_SUN4 | SM_4_470) },
-/* Now Leon */
+static struct Sun_Machine_Models Sun_Machines[] = {
+/* First, Leon */
 { .name = "Leon3 System-on-a-Chip",  .id_machtype = (M_LEON | M_LEON3_SOC) },
-/* Now, Sun4c's */
-{ .name = "Sun4c SparcStation 1",    .id_machtype = (SM_SUN4C | SM_4C_SS1) },
-{ .name = "Sun4c SparcStation IPC",  .id_machtype = (SM_SUN4C | SM_4C_IPC) },
-{ .name = "Sun4c SparcStation 1+",   .id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) },
-{ .name = "Sun4c SparcStation SLC",  .id_machtype = (SM_SUN4C | SM_4C_SLC) },
-{ .name = "Sun4c SparcStation 2",    .id_machtype = (SM_SUN4C | SM_4C_SS2) },
-{ .name = "Sun4c SparcStation ELC",  .id_machtype = (SM_SUN4C | SM_4C_ELC) },
-{ .name = "Sun4c SparcStation IPX",  .id_machtype = (SM_SUN4C | SM_4C_IPX) },
 /* Finally, early Sun4m's */
 { .name = "Sun4m SparcSystem600",    .id_machtype = (SM_SUN4M | SM_4M_SS60) },
 { .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) },
@@ -53,7 +40,7 @@
 	char sysname[128];
 	register int i;
 
-	for (i = 0; i < NUM_SUN_MACHINES; i++) {
+	for (i = 0; i < ARRAY_SIZE(Sun_Machines); i++) {
 		if (Sun_Machines[i].id_machtype == machtype) {
 			if (machtype != (SM_SUN4M_OBP | 0x00) ||
 			    prom_getproperty(prom_root_node, "banner-name",
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 21bd739..a2846f5 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -50,6 +50,8 @@
 #include <asm/io-unit.h>
 #include <asm/leon.h>
 
+const struct sparc32_dma_ops *sparc32_dma_ops;
+
 /* This function must make sure that caches and memory are coherent after DMA
  * On LEON systems without cache snooping it flushes the entire D-CACHE.
  */
@@ -229,7 +231,7 @@
 	}
 
 	pa &= PAGE_MASK;
-	sparc_mapiorange(bus, pa, res->start, resource_size(res));
+	srmmu_mapiorange(bus, pa, res->start, resource_size(res));
 
 	return (void __iomem *)(unsigned long)(res->start + offset);
 }
@@ -243,7 +245,7 @@
 
 	plen = resource_size(res);
 	BUG_ON((plen & (PAGE_SIZE-1)) != 0);
-	sparc_unmapiorange(res->start, plen);
+	srmmu_unmapiorange(res->start, plen);
 	release_resource(res);
 }
 
@@ -292,13 +294,13 @@
 		goto err_nova;
 	}
 
-	// XXX The mmu_map_dma_area does this for us below, see comments.
-	// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
+	// XXX The sbus_map_dma_area does this for us below, see comments.
+	// srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
 	/*
 	 * XXX That's where sdev would be used. Currently we load
 	 * all iommu tables with the same translations.
 	 */
-	if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
+	if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
 		goto err_noiommu;
 
 	res->name = op->dev.of_node->name;
@@ -343,7 +345,7 @@
 	kfree(res);
 
 	pgv = virt_to_page(p);
-	mmu_unmap_dma_area(dev, ba, n);
+	sbus_unmap_dma_area(dev, ba, n);
 
 	__free_pages(pgv, get_order(n));
 }
@@ -381,11 +383,6 @@
 		       enum dma_data_direction dir, struct dma_attrs *attrs)
 {
 	mmu_get_scsi_sgl(dev, sg, n);
-
-	/*
-	 * XXX sparc64 can return a partial length here. sun4c should do this
-	 * but it currently panics if it can't fulfill the request - Anton
-	 */
 	return n;
 }
 
@@ -469,7 +466,7 @@
 		printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
 		goto err_nova;
 	}
-	sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
+	srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
 
 	*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
 	return (void *) res->start;
@@ -514,7 +511,7 @@
 	}
 
 	dma_make_coherent(ba, n);
-	sparc_unmapiorange((unsigned long)p, n);
+	srmmu_unmapiorange((unsigned long)p, n);
 
 	release_resource(res);
 	kfree(res);
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index 5a021dd..b66b6aa 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -1,6 +1,5 @@
 #include <linux/platform_device.h>
 
-#include <asm/btfixup.h>
 #include <asm/cpu_type.h>
 
 struct irq_bucket {
@@ -10,6 +9,9 @@
         unsigned int pil;
 };
 
+#define SUN4M_HARD_INT(x)       (0x000000001 << (x))
+#define SUN4M_SOFT_INT(x)       (0x000010000 << (x))
+
 #define SUN4D_MAX_BOARD 10
 #define SUN4D_MAX_IRQ ((SUN4D_MAX_BOARD + 2) << 5)
 
@@ -41,52 +43,46 @@
 extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
 extern struct sun4m_irq_global __iomem *sun4m_irq_global;
 
+/* The following definitions describe the individual platform features: */
+#define FEAT_L10_CLOCKSOURCE (1 << 0) /* L10 timer is used as a clocksource */
+#define FEAT_L10_CLOCKEVENT  (1 << 1) /* L10 timer is used as a clockevent */
+#define FEAT_L14_ONESHOT     (1 << 2) /* L14 timer clockevent can oneshot */
+
 /*
- * Platform specific irq configuration
+ * Platform specific configuration
  * The individual platforms assign their platform
  * specifics in their init functions.
  */
-struct sparc_irq_config {
-	void (*init_timers)(irq_handler_t);
+struct sparc_config {
+	void (*init_timers)(void);
 	unsigned int (*build_device_irq)(struct platform_device *op,
 	                                 unsigned int real_irq);
+
+	/* generic clockevent features - see FEAT_* above */
+	int features;
+
+	/* clock rate used for clock event timer */
+	int clock_rate;
+
+	/* one period for clock source timer */
+	unsigned int cs_period;
+
+	/* function to obtain offsett for cs period */
+	unsigned int (*get_cycles_offset)(void);
+
+	void (*clear_clock_irq)(void);
+	void (*load_profile_irq)(int cpu, unsigned int limit);
 };
-extern struct sparc_irq_config sparc_irq_config;
+extern struct sparc_config sparc_config;
 
 unsigned int irq_alloc(unsigned int real_irq, unsigned int pil);
 void irq_link(unsigned int irq);
 void irq_unlink(unsigned int irq);
 void handler_irq(unsigned int pil, struct pt_regs *regs);
 
-/* Dave Redman (djhr@tadpole.co.uk)
- * changed these to function pointers.. it saves cycles and will allow
- * the irq dependencies to be split into different files at a later date
- * sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size.
- * Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Changed these to btfixup entities... It saves cycles :)
- */
-
-BTFIXUPDEF_CALL(void, clear_clock_irq, void)
-BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
-
-static inline void clear_clock_irq(void)
-{
-	BTFIXUP_CALL(clear_clock_irq)();
-}
-
-static inline void load_profile_irq(int cpu, int limit)
-{
-	BTFIXUP_CALL(load_profile_irq)(cpu, limit);
-}
+unsigned long leon_get_irqmask(unsigned int irq);
 
 #ifdef CONFIG_SMP
-BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
-BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
-BTFIXUPDEF_CALL(void, set_irq_udt, int)
-
-#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level)
-#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level)
-#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
 
 /* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
 #define SUN4D_IPI_IRQ 13
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index b2668af..ae04914 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -23,16 +23,8 @@
 #include "kernel.h"
 #include "irq.h"
 
-#ifdef CONFIG_SMP
-#define SMP_NOP2 "nop; nop;\n\t"
-#define SMP_NOP3 "nop; nop; nop;\n\t"
-#else
-#define SMP_NOP2
-#define SMP_NOP3
-#endif /* SMP */
-
 /* platform specific irq setup */
-struct sparc_irq_config sparc_irq_config;
+struct sparc_config sparc_config;
 
 unsigned long arch_local_irq_save(void)
 {
@@ -41,7 +33,6 @@
 
 	__asm__ __volatile__(
 		"rd	%%psr, %0\n\t"
-		SMP_NOP3	/* Sun4m + Cypress + SMP bug */
 		"or	%0, %2, %1\n\t"
 		"wr	%1, 0, %%psr\n\t"
 		"nop; nop; nop\n"
@@ -59,7 +50,6 @@
 
 	__asm__ __volatile__(
 		"rd	%%psr, %0\n\t"
-		SMP_NOP3	/* Sun4m + Cypress + SMP bug */
 		"andn	%0, %1, %0\n\t"
 		"wr	%0, 0, %%psr\n\t"
 		"nop; nop; nop\n"
@@ -76,7 +66,6 @@
 	__asm__ __volatile__(
 		"rd	%%psr, %0\n\t"
 		"and	%2, %1, %2\n\t"
-		SMP_NOP2	/* Sun4m + Cypress + SMP bug */
 		"andn	%0, %1, %0\n\t"
 		"wr	%0, %2, %%psr\n\t"
 		"nop; nop; nop\n"
@@ -346,11 +335,6 @@
 void __init init_IRQ(void)
 {
 	switch (sparc_cpu_model) {
-	case sun4c:
-	case sun4:
-		sun4c_init_IRQ();
-		break;
-
 	case sun4m:
 		pcic_probe();
 		if (pcic_present())
@@ -371,6 +355,5 @@
 		prom_printf("Cannot initialize IRQs on this Sun machine...");
 		break;
 	}
-	btfixup();
 }
 
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index dff2c3d..9bcbbe2 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -799,7 +799,7 @@
 	prom_limit0 = prom_timers->limit0;
 	prom_limit1 = prom_timers->limit1;
 
-	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
+	/* Just as in sun4c PROM uses timer which ticks at IRQ 14.
 	 * We turn both off here just to be paranoid.
 	 */
 	prom_timers->limit0 = 0;
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index fd6c36b..a86372d 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -32,9 +32,6 @@
 /* traps_32.c */
 extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
                               unsigned long npc, unsigned long psr);
-/* muldiv.c */
-extern int do_user_muldiv (struct pt_regs *, unsigned long);
-
 /* irq_32.c */
 extern struct irqaction static_irqaction[];
 extern int static_irq_count;
@@ -43,12 +40,7 @@
 extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
 extern void init_IRQ(void);
 
-/* sun4c_irq.c */
-extern void sun4c_init_IRQ(void);
-
 /* sun4m_irq.c */
-extern unsigned int lvl14_resolution;
-
 extern void sun4m_init_IRQ(void);
 extern void sun4m_unmask_profile_irq(void);
 extern void sun4m_clear_profile_irq(int cpu);
@@ -85,8 +77,6 @@
 extern void floppy_hardint(void);
 
 /* trampoline_32.S */
-extern int __smp4m_processor_id(void);
-extern int __smp4d_processor_id(void);
 extern unsigned long sun4m_cpu_startup;
 extern unsigned long sun4d_cpu_startup;
 
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 35e4367..77c1b91 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -10,6 +10,8 @@
 #include <linux/of_platform.h>
 #include <linux/interrupt.h>
 #include <linux/of_device.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
 
 #include <asm/oplib.h>
 #include <asm/timer.h>
@@ -84,7 +86,7 @@
 	sparc_leon_eirq = eirq;
 }
 
-static inline unsigned long get_irqmask(unsigned int irq)
+unsigned long leon_get_irqmask(unsigned int irq)
 {
 	unsigned long mask;
 
@@ -210,7 +212,7 @@
 	unsigned long mask;
 
 	irq = 0;
-	mask = get_irqmask(real_irq);
+	mask = leon_get_irqmask(real_irq);
 	if (mask == 0)
 		goto out;
 
@@ -250,7 +252,38 @@
 	irq_set_chip_data(virq, (void *)mask);
 }
 
-void __init leon_init_timers(irq_handler_t counter_fn)
+static u32 leon_cycles_offset(void)
+{
+	u32 rld, val, off;
+	rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld);
+	val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
+	off = rld - val;
+	return rld - val;
+}
+
+#ifdef CONFIG_SMP
+
+/* smp clockevent irq */
+irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
+{
+	struct clock_event_device *ce;
+	int cpu = smp_processor_id();
+
+	leon_clear_profile_irq(cpu);
+
+	ce = &per_cpu(sparc32_clockevent, cpu);
+
+	irq_enter();
+	if (ce->event_handler)
+		ce->event_handler(ce);
+	irq_exit();
+
+	return IRQ_HANDLED;
+}
+
+#endif /* CONFIG_SMP */
+
+void __init leon_init_timers(void)
 {
 	int irq, eirq;
 	struct device_node *rootnp, *np, *nnp;
@@ -260,6 +293,14 @@
 	int ampopts;
 	int err;
 
+	sparc_config.get_cycles_offset = leon_cycles_offset;
+	sparc_config.cs_period = 1000000 / HZ;
+	sparc_config.features |= FEAT_L10_CLOCKSOURCE;
+
+#ifndef CONFIG_SMP
+	sparc_config.features |= FEAT_L10_CLOCKEVENT;
+#endif
+
 	leondebug_irq_disable = 0;
 	leon_debug_irqout = 0;
 	master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
@@ -369,7 +410,7 @@
 		leon_eirq_setup(eirq);
 
 	irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
-	err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
+	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
 	if (err) {
 		printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
 		prom_halt();
@@ -386,7 +427,7 @@
 		 */
 		local_irq_save(flags);
 		patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
-		local_flush_cache_all();
+		local_ops->cache_all();
 		local_irq_restore(flags);
 	}
 #endif
@@ -401,7 +442,7 @@
 	/* Install per-cpu IRQ handler for broadcasted ticker */
 	irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
 				    "per-cpu", 0);
-	err = request_irq(irq, leon_percpu_timer_interrupt,
+	err = request_irq(irq, leon_percpu_timer_ce_interrupt,
 			  IRQF_PERCPU | IRQF_TIMER, "ticker",
 			  NULL);
 	if (err) {
@@ -422,13 +463,12 @@
 	return;
 }
 
-void leon_clear_clock_irq(void)
+static void leon_clear_clock_irq(void)
 {
 }
 
-void leon_load_profile_irq(int cpu, unsigned int limit)
+static void leon_load_profile_irq(int cpu, unsigned int limit)
 {
-	BUG();
 }
 
 void __init leon_trans_init(struct device_node *dp)
@@ -457,25 +497,6 @@
 }
 
 #ifdef CONFIG_SMP
-
-void leon_set_cpu_int(int cpu, int level)
-{
-	unsigned long mask;
-	mask = get_irqmask(level);
-	LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
-}
-
-static void leon_clear_ipi(int cpu, int level)
-{
-	unsigned long mask;
-	mask = get_irqmask(level);
-	LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask<<16);
-}
-
-static void leon_set_udt(int cpu)
-{
-}
-
 void leon_clear_profile_irq(int cpu)
 {
 }
@@ -483,7 +504,7 @@
 void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
 {
 	unsigned long mask, flags, *addr;
-	mask = get_irqmask(irq_nr);
+	mask = leon_get_irqmask(irq_nr);
 	spin_lock_irqsave(&leon_irq_lock, flags);
 	addr = (unsigned long *)LEON_IMASK(cpu);
 	LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask));
@@ -494,20 +515,11 @@
 
 void __init leon_init_IRQ(void)
 {
-	sparc_irq_config.init_timers      = leon_init_timers;
-	sparc_irq_config.build_device_irq = _leon_build_device_irq;
-
-	BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq,
-			BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq,
-			BTFIXUPCALL_NOP);
-
-#ifdef CONFIG_SMP
-	BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM);
-#endif
-
+	sparc_config.init_timers      = leon_init_timers;
+	sparc_config.build_device_irq = _leon_build_device_irq;
+	sparc_config.clock_rate       = 1000000;
+	sparc_config.clear_clock_irq  = leon_clear_clock_irq;
+	sparc_config.load_profile_irq = leon_load_profile_irq;
 }
 
 void __init leon_init(void)
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 160cac9..29325ba 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/gfp.h>
 #include <linux/cpu.h>
+#include <linux/clockchips.h>
 
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
@@ -43,6 +44,7 @@
 #include <asm/asi.h>
 #include <asm/leon.h>
 #include <asm/leon_amba.h>
+#include <asm/timer.h>
 
 #include "kernel.h"
 
@@ -69,26 +71,24 @@
 	return val;
 }
 
-static void smp_setup_percpu_timer(void);
-
 void __cpuinit leon_callin(void)
 {
-	int cpuid = hard_smpleon_processor_id();
+	int cpuid = hard_smp_processor_id();
 
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 	leon_configure_cache_smp();
 
 	notify_cpu_starting(cpuid);
 
 	/* Get our local ticker going. */
-	smp_setup_percpu_timer();
+	register_percpu_ce(cpuid);
 
 	calibrate_delay();
 	smp_store_cpu_info(cpuid);
 
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	/*
 	 * Unblock the master CPU _only_ when the scheduler state
@@ -99,8 +99,8 @@
 	 */
 	do_swap(&cpu_callin_map[cpuid], 1);
 
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	/* Fix idle thread fields. */
 	__asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid])
@@ -143,8 +143,8 @@
 		}
 	}
 
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 }
 
 void leon_smp_setbroadcast(unsigned int mask)
@@ -199,8 +199,7 @@
 	leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER);
 
 	leon_configure_cache_smp();
-	smp_setup_percpu_timer();
-	local_flush_cache_all();
+	local_ops->cache_all();
 
 }
 
@@ -227,7 +226,7 @@
 	/* whirrr, whirrr, whirrrrrrrrr... */
 	printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i,
 	       (unsigned int)&leon3_irqctrl_regs->mpstatus);
-	local_flush_cache_all();
+	local_ops->cache_all();
 
 	/* Make sure all IRQs are of from the start for this new CPU */
 	LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0);
@@ -252,7 +251,7 @@
 		leon_enable_irq_cpu(leon_ipi_irq, i);
 	}
 
-	local_flush_cache_all();
+	local_ops->cache_all();
 	return 0;
 }
 
@@ -272,7 +271,7 @@
 		}
 	}
 	*prev = first;
-	local_flush_cache_all();
+	local_ops->cache_all();
 
 	/* Free unneeded trap tables */
 	if (!cpu_present(1)) {
@@ -338,7 +337,7 @@
 	local_irq_save(flags);
 	trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)];
 	trap_table->inst_three += smpleon_ipi - real_irq_entry;
-	local_flush_cache_all();
+	local_ops->cache_all();
 	local_irq_restore(flags);
 
 	for_each_possible_cpu(cpu) {
@@ -347,6 +346,13 @@
 	}
 }
 
+static void leon_send_ipi(int cpu, int level)
+{
+	unsigned long mask;
+	mask = leon_get_irqmask(level);
+	LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask);
+}
+
 static void leon_ipi_single(int cpu)
 {
 	struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu);
@@ -355,7 +361,7 @@
 	work->single = 1;
 
 	/* Generate IRQ on the CPU */
-	set_cpu_int(cpu, leon_ipi_irq);
+	leon_send_ipi(cpu, leon_ipi_irq);
 }
 
 static void leon_ipi_mask_one(int cpu)
@@ -366,7 +372,7 @@
 	work->msk = 1;
 
 	/* Generate IRQ on the CPU */
-	set_cpu_int(cpu, leon_ipi_irq);
+	leon_send_ipi(cpu, leon_ipi_irq);
 }
 
 static void leon_ipi_resched(int cpu)
@@ -377,7 +383,7 @@
 	work->resched = 1;
 
 	/* Generate IRQ on the CPU (any IRQ will cause resched) */
-	set_cpu_int(cpu, leon_ipi_irq);
+	leon_send_ipi(cpu, leon_ipi_irq);
 }
 
 void leonsmp_ipi_interrupt(void)
@@ -449,7 +455,7 @@
 				if (cpumask_test_cpu(i, &mask)) {
 					ccall_info.processors_in[i] = 0;
 					ccall_info.processors_out[i] = 0;
-					set_cpu_int(i, LEON3_IRQ_CROSS_CALL);
+					leon_send_ipi(i, LEON3_IRQ_CROSS_CALL);
 
 				}
 			}
@@ -492,68 +498,19 @@
 	ccall_info.processors_out[i] = 1;
 }
 
-irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused)
-{
-	int cpu = smp_processor_id();
-
-	leon_clear_profile_irq(cpu);
-
-	profile_tick(CPU_PROFILING);
-
-	if (!--prof_counter(cpu)) {
-		int user = user_mode(get_irq_regs());
-
-		update_process_times(user);
-
-		prof_counter(cpu) = prof_multiplier(cpu);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static void __init smp_setup_percpu_timer(void)
-{
-	int cpu = smp_processor_id();
-
-	prof_counter(cpu) = prof_multiplier(cpu) = 1;
-}
-
-void __init leon_blackbox_id(unsigned *addr)
-{
-	int rd = *addr & 0x3e000000;
-	int rs1 = rd >> 11;
-
-	/* patch places where ___b_hard_smp_processor_id appears */
-	addr[0] = 0x81444000 | rd;	/* rd %asr17, reg */
-	addr[1] = 0x8130201c | rd | rs1;	/* srl reg, 0x1c, reg */
-	addr[2] = 0x01000000;	/* nop */
-}
-
-void __init leon_blackbox_current(unsigned *addr)
-{
-	int rd = *addr & 0x3e000000;
-	int rs1 = rd >> 11;
-
-	/* patch LOAD_CURRENT macro where ___b_load_current appears */
-	addr[0] = 0x81444000 | rd;	/* rd %asr17, reg */
-	addr[2] = 0x8130201c | rd | rs1;	/* srl reg, 0x1c, reg */
-	addr[4] = 0x81282002 | rd | rs1;	/* sll reg, 0x2, reg */
-
-}
+static const struct sparc32_ipi_ops leon_ipi_ops = {
+	.cross_call = leon_cross_call,
+	.resched    = leon_ipi_resched,
+	.single     = leon_ipi_single,
+	.mask_one   = leon_ipi_mask_one,
+};
 
 void __init leon_init_smp(void)
 {
 	/* Patch ipi15 trap table */
 	t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m);
 
-	BTFIXUPSET_BLACKBOX(hard_smp_processor_id, leon_blackbox_id);
-	BTFIXUPSET_BLACKBOX(load_current, leon_blackbox_current);
-	BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id,
-			BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_resched, leon_ipi_resched, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_single, leon_ipi_single, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_mask_one, leon_ipi_mask_one, BTFIXUPCALL_NORM);
+	sparc32_ipi_ops = &leon_ipi_ops;
 }
 
 #endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 276359e..15e0a16 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -32,26 +32,11 @@
 				GFP_KERNEL, PAGE_KERNEL, -1,
 				__builtin_return_address(0));
 }
-
-static char *dot2underscore(char *name)
-{
-	return name;
-}
 #else
 static void *module_map(unsigned long size)
 {
 	return vmalloc(size);
 }
-
-/* Replace references to .func with _Func */
-static char *dot2underscore(char *name)
-{
-	if (name[0] == '.') {
-		name[0] = '_';
-                name[1] = toupper(name[1]);
-	}
-	return name;
-}
 #endif /* CONFIG_SPARC64 */
 
 void *module_alloc(unsigned long size)
@@ -93,12 +78,8 @@
 
 	for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
 		if (sym[i].st_shndx == SHN_UNDEF) {
-			if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER) {
+			if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER)
 				sym[i].st_shndx = SHN_ABS;
-			} else {
-				char *name = strtab + sym[i].st_name;
-				dot2underscore(name);
-			}
 		}
 	}
 	return 0;
diff --git a/arch/sparc/kernel/muldiv.c b/arch/sparc/kernel/muldiv.c
deleted file mode 100644
index f7db516..0000000
--- a/arch/sparc/kernel/muldiv.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * muldiv.c: Hardware multiply/division illegal instruction trap
- *		for sun4c/sun4 (which do not have those instructions)
- *
- * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- *
- * 2004-12-25	Krzysztof Helt (krzysztof.h1@wp.pl) 
- *		- fixed registers constrains in inline assembly declarations
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-
-#include "kernel.h"
-
-/* #define DEBUG_MULDIV */
-
-static inline int has_imm13(int insn)
-{
-	return (insn & 0x2000);
-}
-
-static inline int is_foocc(int insn)
-{
-	return (insn & 0x800000);
-}
-
-static inline int sign_extend_imm13(int imm)
-{
-	return imm << 19 >> 19;
-}
-
-static inline void advance(struct pt_regs *regs)
-{
-	regs->pc   = regs->npc;
-	regs->npc += 4;
-}
-
-static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
-				       unsigned int rd)
-{
-	if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
-		/* Wheee... */
-		__asm__ __volatile__("save %sp, -0x40, %sp\n\t"
-				     "save %sp, -0x40, %sp\n\t"
-				     "save %sp, -0x40, %sp\n\t"
-				     "save %sp, -0x40, %sp\n\t"
-				     "save %sp, -0x40, %sp\n\t"
-				     "save %sp, -0x40, %sp\n\t"
-				     "save %sp, -0x40, %sp\n\t"
-				     "restore; restore; restore; restore;\n\t"
-				     "restore; restore; restore;\n\t");
-	}
-}
-
-#define fetch_reg(reg, regs) ({						\
-	struct reg_window32 __user *win;					\
-	register unsigned long ret;					\
-									\
-	if (!(reg)) ret = 0;						\
-	else if ((reg) < 16) {						\
-		ret = regs->u_regs[(reg)];				\
-	} else {							\
-		/* Ho hum, the slightly complicated case. */		\
-		win = (struct reg_window32 __user *)regs->u_regs[UREG_FP];\
-		if (get_user (ret, &win->locals[(reg) - 16])) return -1;\
-	}								\
-	ret;								\
-})
-
-static inline int
-store_reg(unsigned int result, unsigned int reg, struct pt_regs *regs)
-{
-	struct reg_window32 __user *win;
-
-	if (!reg)
-		return 0;
-	if (reg < 16) {
-		regs->u_regs[reg] = result;
-		return 0;
-	} else {
-		/* need to use put_user() in this case: */
-		win = (struct reg_window32 __user *) regs->u_regs[UREG_FP];
-		return (put_user(result, &win->locals[reg - 16]));
-	}
-}
-
-/* Should return 0 if mul/div emulation succeeded and SIGILL should
- * not be issued.
- */
-int do_user_muldiv(struct pt_regs *regs, unsigned long pc)
-{
-	unsigned int insn;
-	int inst;
-	unsigned int rs1, rs2, rdv;
-
-	if (!pc)
-		return -1; /* This happens to often, I think */
-	if (get_user (insn, (unsigned int __user *)pc))
-		return -1;
-	if ((insn & 0xc1400000) != 0x80400000)
-		return -1;
-	inst = ((insn >> 19) & 0xf);
-	if ((inst & 0xe) != 10 && (inst & 0xe) != 14)
-		return -1;
-
-	/* Now we know we have to do something with umul, smul, udiv or sdiv */
-	rs1 = (insn >> 14) & 0x1f;
-	rs2 = insn & 0x1f;
-	rdv = (insn >> 25) & 0x1f;
-	if (has_imm13(insn)) {
-		maybe_flush_windows(rs1, 0, rdv);
-		rs2 = sign_extend_imm13(insn);
-	} else {
-		maybe_flush_windows(rs1, rs2, rdv);
-		rs2 = fetch_reg(rs2, regs);
-	}
-	rs1 = fetch_reg(rs1, regs);
-	switch (inst) {
-	case 10: /* umul */
-#ifdef DEBUG_MULDIV	
-		printk ("unsigned muldiv: 0x%x * 0x%x = ", rs1, rs2);
-#endif		
-		__asm__ __volatile__ ("\n\t"
-			"mov	%0, %%o0\n\t"
-			"call	.umul\n\t"
-			" mov	%1, %%o1\n\t"
-			"mov	%%o0, %0\n\t"
-			"mov	%%o1, %1\n\t"
-			: "=r" (rs1), "=r" (rs2)
-		        : "0" (rs1), "1" (rs2)
-			: "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
-#ifdef DEBUG_MULDIV
-		printk ("0x%x%08x\n", rs2, rs1);
-#endif
-		if (store_reg(rs1, rdv, regs))
-			return -1;
-		regs->y = rs2;
-		break;
-	case 11: /* smul */
-#ifdef DEBUG_MULDIV
-		printk ("signed muldiv: 0x%x * 0x%x = ", rs1, rs2);
-#endif
-		__asm__ __volatile__ ("\n\t"
-			"mov	%0, %%o0\n\t"
-			"call	.mul\n\t"
-			" mov	%1, %%o1\n\t"
-			"mov	%%o0, %0\n\t"
-			"mov	%%o1, %1\n\t"
-			: "=r" (rs1), "=r" (rs2)
-		        : "0" (rs1), "1" (rs2)
-			: "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc");
-#ifdef DEBUG_MULDIV
-		printk ("0x%x%08x\n", rs2, rs1);
-#endif
-		if (store_reg(rs1, rdv, regs))
-			return -1;
-		regs->y = rs2;
-		break;
-	case 14: /* udiv */
-#ifdef DEBUG_MULDIV
-		printk ("unsigned muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2);
-#endif
-		if (!rs2) {
-#ifdef DEBUG_MULDIV
-			printk ("DIVISION BY ZERO\n");
-#endif
-			handle_hw_divzero (regs, pc, regs->npc, regs->psr);
-			return 0;
-		}
-		__asm__ __volatile__ ("\n\t"
-			"mov	%2, %%o0\n\t"
-			"mov	%0, %%o1\n\t"
-			"mov	%%g0, %%o2\n\t"
-			"call	__udivdi3\n\t"
-			" mov	%1, %%o3\n\t"
-			"mov	%%o1, %0\n\t"
-			"mov	%%o0, %1\n\t"
-			: "=r" (rs1), "=r" (rs2)
-			: "r" (regs->y), "0" (rs1), "1" (rs2)
-			: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
-			  "g1", "g2", "g3", "cc");
-#ifdef DEBUG_MULDIV
-		printk ("0x%x\n", rs1);
-#endif
-		if (store_reg(rs1, rdv, regs))
-			return -1;
-		break;
-	case 15: /* sdiv */
-#ifdef DEBUG_MULDIV
-		printk ("signed muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2);
-#endif
-		if (!rs2) {
-#ifdef DEBUG_MULDIV
-			printk ("DIVISION BY ZERO\n");
-#endif
-			handle_hw_divzero (regs, pc, regs->npc, regs->psr);
-			return 0;
-		}
-		__asm__ __volatile__ ("\n\t"
-			"mov	%2, %%o0\n\t"
-			"mov	%0, %%o1\n\t"
-			"mov	%%g0, %%o2\n\t"
-			"call	__divdi3\n\t"
-			" mov	%1, %%o3\n\t"
-			"mov	%%o1, %0\n\t"
-			"mov	%%o0, %1\n\t"
-			: "=r" (rs1), "=r" (rs2)
-			: "r" (regs->y), "0" (rs1), "1" (rs2)
-			: "o0", "o1", "o2", "o3", "o4", "o5", "o7",
-			  "g1", "g2", "g3", "cc");
-#ifdef DEBUG_MULDIV
-		printk ("0x%x\n", rs1);
-#endif
-		if (store_reg(rs1, rdv, regs))
-			return -1;
-		break;
-	}
-	if (is_foocc (insn)) {
-		regs->psr &= ~PSR_ICC;
-		if ((inst & 0xe) == 14) {
-			/* ?div */
-			if (rs2) regs->psr |= PSR_V;
-		}
-		if (!rs1) regs->psr |= PSR_Z;
-		if (((int)rs1) < 0) regs->psr |= PSR_N;
-#ifdef DEBUG_MULDIV
-		printk ("psr muldiv: %08x\n", regs->psr);
-#endif
-	}
-	advance(regs);
-	return 0;
-}
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 4ee8ce0..185aa96 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -356,7 +356,7 @@
 		op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs);
 		for (i = 0; i < op->archdata.num_irqs; i++)
 			op->archdata.irqs[i] =
-			    sparc_irq_config.build_device_irq(op, intr[i].pri);
+			    sparc_config.build_device_irq(op, intr[i].pri);
 	} else {
 		const unsigned int *irq =
 			of_get_property(dp, "interrupts", &len);
@@ -365,7 +365,7 @@
 			op->archdata.num_irqs = len / sizeof(unsigned int);
 			for (i = 0; i < op->archdata.num_irqs; i++)
 				op->archdata.irqs[i] =
-				    sparc_irq_config.build_device_irq(op, irq[i]);
+				    sparc_config.build_device_irq(op, irq[i]);
 		} else {
 			op->archdata.num_irqs = 0;
 		}
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index fcc148e..ded3f60 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -703,31 +703,28 @@
 	pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT);
 }
 
-static irqreturn_t pcic_timer_handler (int irq, void *h)
-{
-	pcic_clear_clock_irq();
-	xtime_update(1);
-#ifndef CONFIG_SMP
-	update_process_times(user_mode(get_irq_regs()));
-#endif
-	return IRQ_HANDLED;
-}
+/* CPU frequency is 100 MHz, timer increments every 4 CPU clocks */
+#define USECS_PER_JIFFY  (1000000 / HZ)
+#define TICK_TIMER_LIMIT ((100 * 1000000 / 4) / HZ)
 
-#define USECS_PER_JIFFY  10000  /* We have 100HZ "standard" timer for sparc */
-#define TICK_TIMER_LIMIT ((100*1000000/4)/100)
-
-u32 pci_gettimeoffset(void)
+static unsigned int pcic_cycles_offset(void)
 {
+	u32 value, count;
+
+	value = readl(pcic0.pcic_regs + PCI_SYS_COUNTER);
+	count = value & ~PCI_SYS_COUNTER_OVERFLOW;
+
+	if (value & PCI_SYS_COUNTER_OVERFLOW)
+		count += TICK_TIMER_LIMIT;
 	/*
-	 * We divide all by 100
+	 * We divide all by HZ
 	 * to have microsecond resolution and to avoid overflow
 	 */
-	unsigned long count =
-	    readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW;
-	count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100);
-	return count * 1000;
-}
+	count = ((count / HZ) * USECS_PER_JIFFY) / (TICK_TIMER_LIMIT / HZ);
 
+	/* Coordinate with the sparc_config.clock_rate setting */
+	return count * 2;
+}
 
 void __init pci_time_init(void)
 {
@@ -736,9 +733,16 @@
 	int timer_irq, irq;
 	int err;
 
-	do_arch_gettimeoffset = pci_gettimeoffset;
-
-	btfixup();
+#ifndef CONFIG_SMP
+	/*
+	 * The clock_rate is in SBUS dimension.
+	 * We take into account this in pcic_cycles_offset()
+	 */
+	sparc_config.clock_rate = SBUS_CLOCK_RATE / HZ;
+	sparc_config.features |= FEAT_L10_CLOCKEVENT;
+#endif
+	sparc_config.features |= FEAT_L10_CLOCKSOURCE;
+	sparc_config.get_cycles_offset = pcic_cycles_offset;
 
 	writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT);
 	/* PROM should set appropriate irq */
@@ -747,7 +751,7 @@
 	writel (PCI_COUNTER_IRQ_SET(timer_irq, 0),
 		pcic->pcic_regs+PCI_COUNTER_IRQ);
 	irq = pcic_build_device_irq(NULL, timer_irq);
-	err = request_irq(irq, pcic_timer_handler,
+	err = request_irq(irq, timer_interrupt,
 			  IRQF_TIMER, "timer", NULL);
 	if (err) {
 		prom_printf("time_init: unable to attach IRQ%d\n", timer_irq);
@@ -875,10 +879,9 @@
 
 void __init sun4m_pci_init_IRQ(void)
 {
-	sparc_irq_config.build_device_irq = pcic_build_device_irq;
-
-	BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM);
+	sparc_config.build_device_irq = pcic_build_device_irq;
+	sparc_config.clear_clock_irq  = pcic_clear_clock_irq;
+	sparc_config.load_profile_irq = pcic_load_profile_irq;
 }
 
 int pcibios_assign_resource(struct pci_dev *pdev, int resource)
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index efa0754..fe6787c 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -67,8 +67,6 @@
 
 #ifndef CONFIG_SMP
 
-#define SUN4C_FAULT_HIGH 100
-
 /*
  * the idle loop on a Sparc... ;)
  */
@@ -76,36 +74,6 @@
 {
 	/* endless idle loop with no priority at all */
 	for (;;) {
-		if (ARCH_SUN4C) {
-			static int count = HZ;
-			static unsigned long last_jiffies;
-			static unsigned long last_faults;
-			static unsigned long fps;
-			unsigned long now;
-			unsigned long faults;
-
-			extern unsigned long sun4c_kernel_faults;
-			extern void sun4c_grow_kernel_ring(void);
-
-			local_irq_disable();
-			now = jiffies;
-			count -= (now - last_jiffies);
-			last_jiffies = now;
-			if (count < 0) {
-				count += HZ;
-				faults = sun4c_kernel_faults;
-				fps = (fps + (faults - last_faults)) >> 1;
-				last_faults = faults;
-#if 0
-				printk("kernel faults / second = %ld\n", fps);
-#endif
-				if (fps >= SUN4C_FAULT_HIGH) {
-					sun4c_grow_kernel_ring();
-				}
-			}
-			local_irq_enable();
-		}
-
 		if (pm_idle) {
 			while (!need_resched())
 				(*pm_idle)();
@@ -114,7 +82,6 @@
 				cpu_relax();
 		}
 		schedule_preempt_disabled();
-		check_pgt_cache();
 	}
 }
 
@@ -137,7 +104,6 @@
 				cpu_relax();
 		}
 		schedule_preempt_disabled();
-		check_pgt_cache();
 	}
 }
 
@@ -179,88 +145,6 @@
 	machine_halt();
 }
 
-#if 0
-
-static DEFINE_SPINLOCK(sparc_backtrace_lock);
-
-void __show_backtrace(unsigned long fp)
-{
-	struct reg_window32 *rw;
-	unsigned long flags;
-	int cpu = smp_processor_id();
-
-	spin_lock_irqsave(&sparc_backtrace_lock, flags);
-
-	rw = (struct reg_window32 *)fp;
-        while(rw && (((unsigned long) rw) >= PAGE_OFFSET) &&
-            !(((unsigned long) rw) & 0x7)) {
-		printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] "
-		       "FP[%08lx] CALLER[%08lx]: ", cpu,
-		       rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
-		       rw->ins[4], rw->ins[5],
-		       rw->ins[6],
-		       rw->ins[7]);
-		printk("%pS\n", (void *) rw->ins[7]);
-		rw = (struct reg_window32 *) rw->ins[6];
-	}
-	spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
-}
-
-#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
-#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
-#define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp))
-
-void show_backtrace(void)
-{
-	unsigned long fp;
-
-	__SAVE; __SAVE; __SAVE; __SAVE;
-	__SAVE; __SAVE; __SAVE; __SAVE;
-	__RESTORE; __RESTORE; __RESTORE; __RESTORE;
-	__RESTORE; __RESTORE; __RESTORE; __RESTORE;
-
-	__GET_FP(fp);
-
-	__show_backtrace(fp);
-}
-
-#ifdef CONFIG_SMP
-void smp_show_backtrace_all_cpus(void)
-{
-	xc0((smpfunc_t) show_backtrace);
-	show_backtrace();
-}
-#endif
-
-void show_stackframe(struct sparc_stackf *sf)
-{
-	unsigned long size;
-	unsigned long *stk;
-	int i;
-
-	printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx "
-	       "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
-	       sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
-	       sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
-	printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx "
-	       "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n",
-	       sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
-	       sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
-	printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx "
-	       "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n",
-	       (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
-	       sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
-	       sf->xxargs[0]);
-	size = ((unsigned long)sf->fp) - ((unsigned long)sf);
-	size -= STACKFRAME_SZ;
-	stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
-	i = 0;
-	do {
-		printk("s%d: %08lx\n", i++, *stk++);
-	} while ((size -= sizeof(unsigned long)));
-}
-#endif
-
 void show_regs(struct pt_regs *r)
 {
 	struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14];
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 5f5f74c..7abc24e 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -128,13 +128,12 @@
 
 		wr	%glob_tmp, 0x0, %wim
 
-				/* Here comes the architecture specific 
-				 * branch to the user stack checking routine
-				 * for return from traps.
-				 */
-				.globl	rtrap_mmu_patchme
-rtrap_mmu_patchme:	b	sun4c_rett_stackchk
-				 andcc	%fp, 0x7, %g0	
+	/* Here comes the architecture specific
+	 * branch to the user stack checking routine
+	 * for return from traps.
+	 */
+	b	srmmu_rett_stackchk
+	 andcc	%fp, 0x7, %g0
 
 ret_trap_userwins_ok:
 	LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
@@ -225,69 +224,6 @@
 	b	signal_p
 	 ld	[%curptr + TI_FLAGS], %g2
 
-sun4c_rett_stackchk:
-	be	1f
-	 and	%fp, 0xfff, %g1		! delay slot
-
-	b	ret_trap_user_stack_is_bolixed + 0x4
-	 wr	%t_wim, 0x0, %wim
-
-	/* See if we have to check the sanity of one page or two */
-1:
-	add	%g1, 0x38, %g1
-	sra	%fp, 29, %g2
-	add	%g2, 0x1, %g2
-	andncc	%g2, 0x1, %g0
-	be	1f
-	 andncc	%g1, 0xff8, %g0
-
-	/* %sp is in vma hole, yuck */
-	b	ret_trap_user_stack_is_bolixed + 0x4
-	 wr	%t_wim, 0x0, %wim
-
-1:
-	be	sun4c_rett_onepage	/* Only one page to check */
-	 lda	[%fp] ASI_PTE, %g2
-
-sun4c_rett_twopages:
-	add	%fp, 0x38, %g1
-	sra	%g1, 29, %g2
-	add	%g2, 0x1, %g2
-	andncc	%g2, 0x1, %g0
-	be	1f
-	 lda	[%g1] ASI_PTE, %g2
-
-	/* Second page is in vma hole */
-	b	ret_trap_user_stack_is_bolixed + 0x4
-	 wr	%t_wim, 0x0, %wim
-
-1:
-	srl	%g2, 29, %g2
-	andcc	%g2, 0x4, %g0
-	bne	sun4c_rett_onepage
-	 lda	[%fp] ASI_PTE, %g2
-
-	/* Second page has bad perms */
-	b	ret_trap_user_stack_is_bolixed + 0x4
-	 wr	%t_wim, 0x0, %wim
-
-sun4c_rett_onepage:
-	srl	%g2, 29, %g2
-	andcc	%g2, 0x4, %g0
-	bne,a	1f
-	 restore %g0, %g0, %g0
-
-	/* A page had bad page permissions, losing... */
-	b	ret_trap_user_stack_is_bolixed + 0x4
-	 wr	%t_wim, 0x0, %wim
-
-	/* Whee, things are ok, load the window and continue. */
-1:
-	LOAD_WINDOW(sp)
-
-	b	ret_trap_userwins_ok
-	 save	%g0, %g0, %g0
-
 	.globl	srmmu_rett_stackchk
 srmmu_rett_stackchk:
 	bne	ret_trap_user_stack_is_bolixed
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 9171fc2..afa2a9e 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -73,18 +73,8 @@
 		.globl			rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
 rtrap_irq:
 rtrap:
-#ifndef CONFIG_SMP
-		sethi			%hi(__cpu_data), %l0
-		lduw			[%l0 + %lo(__cpu_data)], %l1
-#else
-		sethi			%hi(__cpu_data), %l0
-		or			%l0, %lo(__cpu_data), %l0
-		lduw			[%l0 + %g5], %l1
-#endif
-		cmp			%l1, 0
-
 		/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
-		 ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+		ldx			[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 rtrap_xcall:
 		sethi			%hi(0xf << 20), %l4
 		and			%l1, %l4, %l4
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index d444468..c052313 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -42,7 +42,6 @@
 #include <asm/vaddrs.h>
 #include <asm/mbus.h>
 #include <asm/idprom.h>
-#include <asm/machines.h>
 #include <asm/cpudata.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
@@ -106,7 +105,6 @@
 
 /* which CPU booted us (0xff = not set) */
 unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */
-unsigned char boot_cpu_id4; /* boot_cpu_id << 2 */
 
 static void
 prom_console_write(struct console *con, const char *s, unsigned n)
@@ -182,13 +180,6 @@
 	}
 }
 
-/* This routine will in the future do all the nasty prom stuff
- * to probe for the mmu type and its parameters, etc. This will
- * also be where SMP things happen.
- */
-
-extern void sun4c_probe_vac(void);
-
 extern unsigned short root_flags;
 extern unsigned short root_dev;
 extern unsigned short ram_flags;
@@ -200,6 +191,52 @@
 
 char reboot_command[COMMAND_LINE_SIZE];
 
+struct cpuid_patch_entry {
+	unsigned int	addr;
+	unsigned int	sun4d[3];
+	unsigned int	leon[3];
+};
+extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
+
+static void __init per_cpu_patch(void)
+{
+	struct cpuid_patch_entry *p;
+
+	if (sparc_cpu_model == sun4m) {
+		/* Nothing to do, this is what the unpatched code
+		 * targets.
+		 */
+		return;
+	}
+
+	p = &__cpuid_patch;
+	while (p < &__cpuid_patch_end) {
+		unsigned long addr = p->addr;
+		unsigned int *insns;
+
+		switch (sparc_cpu_model) {
+		case sun4d:
+			insns = &p->sun4d[0];
+			break;
+
+		case sparc_leon:
+			insns = &p->leon[0];
+			break;
+		default:
+			prom_printf("Unknown cpu type, halting.\n");
+			prom_halt();
+		}
+		*(unsigned int *) (addr + 0) = insns[0];
+		flushi(addr + 0);
+		*(unsigned int *) (addr + 4) = insns[1];
+		flushi(addr + 4);
+		*(unsigned int *) (addr + 8) = insns[2];
+		flushi(addr + 8);
+
+		p++;
+	}
+}
+
 enum sparc_cpu sparc_cpu_model;
 EXPORT_SYMBOL(sparc_cpu_model);
 
@@ -225,10 +262,6 @@
 
 	/* Set sparc_cpu_model */
 	sparc_cpu_model = sun_unknown;
-	if (!strcmp(&cputypval[0], "sun4 "))
-		sparc_cpu_model = sun4;
-	if (!strcmp(&cputypval[0], "sun4c"))
-		sparc_cpu_model = sun4c;
 	if (!strcmp(&cputypval[0], "sun4m"))
 		sparc_cpu_model = sun4m;
 	if (!strcmp(&cputypval[0], "sun4s"))
@@ -244,12 +277,6 @@
 
 	printk("ARCH: ");
 	switch(sparc_cpu_model) {
-	case sun4:
-		printk("SUN4\n");
-		break;
-	case sun4c:
-		printk("SUN4C\n");
-		break;
 	case sun4m:
 		printk("SUN4M\n");
 		break;
@@ -275,8 +302,6 @@
 #endif
 
 	idprom_init();
-	if (ARCH_SUN4C)
-		sun4c_probe_vac();
 	load_mmu();
 
 	phys_base = 0xffffffffUL;
@@ -313,6 +338,9 @@
 	init_mm.context = (unsigned long) NO_CONTEXT;
 	init_task.thread.kregs = &fake_swapper_regs;
 
+	/* Run-time patch instructions to match the cpu model */
+	per_cpu_patch();
+
 	paging_init();
 
 	smp_setup_cpu_possible_map();
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 1e750e41..ac8e66b 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -217,12 +217,9 @@
 /* Checks if the fp is valid */
 static inline int invalid_frame_pointer(void __user *fp, int fplen)
 {
-	if ((((unsigned long) fp) & 7) ||
-	    !__access_ok((unsigned long)fp, fplen) ||
-	    ((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) &&
-	     ((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000)))
+	if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
 		return 1;
-	
+
 	return 0;
 }
 
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index f671e7f..5771375 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -40,6 +40,8 @@
 
 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 
+const struct sparc32_ipi_ops *sparc32_ipi_ops;
+
 /* The only guaranteed locking primitive available on all Sparc
  * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
  * places the current byte at the effective address into dest_reg and
@@ -85,14 +87,6 @@
 		(bogosum/(5000/HZ))%100);
 
 	switch(sparc_cpu_model) {
-	case sun4:
-		printk("SUN4\n");
-		BUG();
-		break;
-	case sun4c:
-		printk("SUN4C\n");
-		BUG();
-		break;
 	case sun4m:
 		smp4m_smp_done();
 		break;
@@ -132,7 +126,7 @@
 	 * a single CPU. The trap handler needs only to do trap entry/return
 	 * to call schedule.
 	 */
-	BTFIXUP_CALL(smp_ipi_resched)(cpu);
+	sparc32_ipi_ops->resched(cpu);
 }
 
 void smp_send_stop(void)
@@ -142,7 +136,7 @@
 void arch_send_call_function_single_ipi(int cpu)
 {
 	/* trigger one IPI single call on one CPU */
-	BTFIXUP_CALL(smp_ipi_single)(cpu);
+	sparc32_ipi_ops->single(cpu);
 }
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -151,7 +145,7 @@
 
 	/* trigger IPI mask call on each CPU */
 	for_each_cpu(cpu, mask)
-		BTFIXUP_CALL(smp_ipi_mask_one)(cpu);
+		sparc32_ipi_ops->mask_one(cpu);
 }
 
 void smp_resched_interrupt(void)
@@ -179,150 +173,9 @@
 	irq_exit();
 }
 
-void smp_flush_cache_all(void)
-{
-	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
-	local_flush_cache_all();
-}
-
-void smp_flush_tlb_all(void)
-{
-	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
-	local_flush_tlb_all();
-}
-
-void smp_flush_cache_mm(struct mm_struct *mm)
-{
-	if(mm->context != NO_CONTEXT) {
-		cpumask_t cpu_mask;
-		cpumask_copy(&cpu_mask, mm_cpumask(mm));
-		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
-		if (!cpumask_empty(&cpu_mask))
-			xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
-		local_flush_cache_mm(mm);
-	}
-}
-
-void smp_flush_tlb_mm(struct mm_struct *mm)
-{
-	if(mm->context != NO_CONTEXT) {
-		cpumask_t cpu_mask;
-		cpumask_copy(&cpu_mask, mm_cpumask(mm));
-		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
-		if (!cpumask_empty(&cpu_mask)) {
-			xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
-			if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
-				cpumask_copy(mm_cpumask(mm),
-					     cpumask_of(smp_processor_id()));
-		}
-		local_flush_tlb_mm(mm);
-	}
-}
-
-void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
-			   unsigned long end)
-{
-	struct mm_struct *mm = vma->vm_mm;
-
-	if (mm->context != NO_CONTEXT) {
-		cpumask_t cpu_mask;
-		cpumask_copy(&cpu_mask, mm_cpumask(mm));
-		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
-		if (!cpumask_empty(&cpu_mask))
-			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
-		local_flush_cache_range(vma, start, end);
-	}
-}
-
-void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-			 unsigned long end)
-{
-	struct mm_struct *mm = vma->vm_mm;
-
-	if (mm->context != NO_CONTEXT) {
-		cpumask_t cpu_mask;
-		cpumask_copy(&cpu_mask, mm_cpumask(mm));
-		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
-		if (!cpumask_empty(&cpu_mask))
-			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
-		local_flush_tlb_range(vma, start, end);
-	}
-}
-
-void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
-{
-	struct mm_struct *mm = vma->vm_mm;
-
-	if(mm->context != NO_CONTEXT) {
-		cpumask_t cpu_mask;
-		cpumask_copy(&cpu_mask, mm_cpumask(mm));
-		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
-		if (!cpumask_empty(&cpu_mask))
-			xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
-		local_flush_cache_page(vma, page);
-	}
-}
-
-void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
-	struct mm_struct *mm = vma->vm_mm;
-
-	if(mm->context != NO_CONTEXT) {
-		cpumask_t cpu_mask;
-		cpumask_copy(&cpu_mask, mm_cpumask(mm));
-		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
-		if (!cpumask_empty(&cpu_mask))
-			xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
-		local_flush_tlb_page(vma, page);
-	}
-}
-
-void smp_flush_page_to_ram(unsigned long page)
-{
-	/* Current theory is that those who call this are the one's
-	 * who have just dirtied their cache with the pages contents
-	 * in kernel space, therefore we only run this on local cpu.
-	 *
-	 * XXX This experiment failed, research further... -DaveM
-	 */
-#if 1
-	xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
-#endif
-	local_flush_page_to_ram(page);
-}
-
-void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
-{
-	cpumask_t cpu_mask;
-	cpumask_copy(&cpu_mask, mm_cpumask(mm));
-	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
-	if (!cpumask_empty(&cpu_mask))
-		xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
-	local_flush_sig_insns(mm, insn_addr);
-}
-
-extern unsigned int lvl14_resolution;
-
-/* /proc/profile writes can call this, don't __init it please. */
-static DEFINE_SPINLOCK(prof_setup_lock);
-
 int setup_profiling_timer(unsigned int multiplier)
 {
-	int i;
-	unsigned long flags;
-
-	/* Prevent level14 ticker IRQ flooding. */
-	if((!multiplier) || (lvl14_resolution / multiplier) < 500)
-		return -EINVAL;
-
-	spin_lock_irqsave(&prof_setup_lock, flags);
-	for_each_possible_cpu(i) {
-		load_profile_irq(i, lvl14_resolution / multiplier);
-		prof_multiplier(i) = multiplier;
-	}
-	spin_unlock_irqrestore(&prof_setup_lock, flags);
-
-	return 0;
+	return -EINVAL;
 }
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -345,14 +198,6 @@
 	smp_store_cpu_info(boot_cpu_id);
 
 	switch(sparc_cpu_model) {
-	case sun4:
-		printk("SUN4\n");
-		BUG();
-		break;
-	case sun4c:
-		printk("SUN4C\n");
-		BUG();
-		break;
 	case sun4m:
 		smp4m_boot_cpus();
 		break;
@@ -418,14 +263,6 @@
 	int ret=0;
 
 	switch(sparc_cpu_model) {
-	case sun4:
-		printk("SUN4\n");
-		BUG();
-		break;
-	case sun4c:
-		printk("SUN4C\n");
-		BUG();
-		break;
 	case sun4m:
 		ret = smp4m_boot_one_cpu(cpu);
 		break;
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
index baeab87..e521c54 100644
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ b/arch/sparc/kernel/sparc_ksyms_32.c
@@ -28,19 +28,5 @@
 EXPORT_SYMBOL(__ret_efault);
 EXPORT_SYMBOL(empty_zero_page);
 
-/* Defined using magic */
-#ifndef CONFIG_SMP
-EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
-#else
-EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
-#endif
-EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
-EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
-EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
-EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
-EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
-EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
-EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
-
 /* Exporting a symbol from /init/main.c */
 EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
deleted file mode 100644
index f6bf25a..0000000
--- a/arch/sparc/kernel/sun4c_irq.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * sun4c irq support
- *
- *  djhr: Hacked out of irq.c into a CPU dependent version.
- *
- *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- *  Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
- *  Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com)
- *  Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
- */
-
-#include <linux/init.h>
-
-#include <asm/oplib.h>
-#include <asm/timer.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-
-#include "irq.h"
-
-/* Sun4c interrupts are typically laid out as follows:
- *
- *  1 - Software interrupt, SBUS level 1
- *  2 - SBUS level 2
- *  3 - ESP SCSI, SBUS level 3
- *  4 - Software interrupt
- *  5 - Lance ethernet, SBUS level 4
- *  6 - Software interrupt
- *  7 - Graphics card, SBUS level 5
- *  8 - SBUS level 6
- *  9 - SBUS level 7
- * 10 - Counter timer
- * 11 - Floppy
- * 12 - Zilog uart
- * 13 - CS4231 audio
- * 14 - Profiling timer
- * 15 - NMI
- *
- * The interrupt enable bits in the interrupt mask register are
- * really only used to enable/disable the timer interrupts, and
- * for signalling software interrupts.  There is also a master
- * interrupt enable bit in this register.
- *
- * Interrupts are enabled by setting the SUN4C_INT_* bits, they
- * are disabled by clearing those bits.
- */
-
-/*
- * Bit field defines for the interrupt registers on various
- * Sparc machines.
- */
-
-/* The sun4c interrupt register. */
-#define SUN4C_INT_ENABLE  0x01     /* Allow interrupts. */
-#define SUN4C_INT_E14     0x80     /* Enable level 14 IRQ. */
-#define SUN4C_INT_E10     0x20     /* Enable level 10 IRQ. */
-#define SUN4C_INT_E8      0x10     /* Enable level 8 IRQ. */
-#define SUN4C_INT_E6      0x08     /* Enable level 6 IRQ. */
-#define SUN4C_INT_E4      0x04     /* Enable level 4 IRQ. */
-#define SUN4C_INT_E1      0x02     /* Enable level 1 IRQ. */
-
-/*
- * Pointer to the interrupt enable byte
- * Used by entry.S
- */
-unsigned char __iomem *interrupt_enable;
-
-static void sun4c_mask_irq(struct irq_data *data)
-{
-	unsigned long mask = (unsigned long)data->chip_data;
-
-	if (mask) {
-		unsigned long flags;
-
-		local_irq_save(flags);
-		mask = sbus_readb(interrupt_enable) & ~mask;
-		sbus_writeb(mask, interrupt_enable);
-		local_irq_restore(flags);
-	}
-}
-
-static void sun4c_unmask_irq(struct irq_data *data)
-{
-	unsigned long mask = (unsigned long)data->chip_data;
-
-	if (mask) {
-		unsigned long flags;
-
-		local_irq_save(flags);
-		mask = sbus_readb(interrupt_enable) | mask;
-		sbus_writeb(mask, interrupt_enable);
-		local_irq_restore(flags);
-	}
-}
-
-static unsigned int sun4c_startup_irq(struct irq_data *data)
-{
-	irq_link(data->irq);
-	sun4c_unmask_irq(data);
-
-	return 0;
-}
-
-static void sun4c_shutdown_irq(struct irq_data *data)
-{
-	sun4c_mask_irq(data);
-	irq_unlink(data->irq);
-}
-
-static struct irq_chip sun4c_irq = {
-	.name		= "sun4c",
-	.irq_startup	= sun4c_startup_irq,
-	.irq_shutdown	= sun4c_shutdown_irq,
-	.irq_mask	= sun4c_mask_irq,
-	.irq_unmask	= sun4c_unmask_irq,
-};
-
-static unsigned int sun4c_build_device_irq(struct platform_device *op,
-					   unsigned int real_irq)
-{
-	 unsigned int irq;
-
-	if (real_irq >= 16) {
-		prom_printf("Bogus sun4c IRQ %u\n", real_irq);
-		prom_halt();
-	}
-
-	irq = irq_alloc(real_irq, real_irq);
-	if (irq) {
-		unsigned long mask = 0UL;
-
-		switch (real_irq) {
-		case 1:
-			mask = SUN4C_INT_E1;
-			break;
-		case 8:
-			mask = SUN4C_INT_E8;
-			break;
-		case 10:
-			mask = SUN4C_INT_E10;
-			break;
-		case 14:
-			mask = SUN4C_INT_E14;
-			break;
-		default:
-			/* All the rest are either always enabled,
-			 * or are for signalling software interrupts.
-			 */
-			break;
-		}
-		irq_set_chip_and_handler_name(irq, &sun4c_irq,
-		                              handle_level_irq, "level");
-		irq_set_chip_data(irq, (void *)mask);
-	}
-	return irq;
-}
-
-struct sun4c_timer_info {
-	u32		l10_count;
-	u32		l10_limit;
-	u32		l14_count;
-	u32		l14_limit;
-};
-
-static struct sun4c_timer_info __iomem *sun4c_timers;
-
-static void sun4c_clear_clock_irq(void)
-{
-	sbus_readl(&sun4c_timers->l10_limit);
-}
-
-static void sun4c_load_profile_irq(int cpu, unsigned int limit)
-{
-	/* Errm.. not sure how to do this.. */
-}
-
-static void __init sun4c_init_timers(irq_handler_t counter_fn)
-{
-	const struct linux_prom_irqs *prom_irqs;
-	struct device_node *dp;
-	unsigned int irq;
-	const u32 *addr;
-	int err;
-
-	dp = of_find_node_by_name(NULL, "counter-timer");
-	if (!dp) {
-		prom_printf("sun4c_init_timers: Unable to find counter-timer\n");
-		prom_halt();
-	}
-
-	addr = of_get_property(dp, "address", NULL);
-	if (!addr) {
-		prom_printf("sun4c_init_timers: No address property\n");
-		prom_halt();
-	}
-
-	sun4c_timers = (void __iomem *) (unsigned long) addr[0];
-
-	prom_irqs = of_get_property(dp, "intr", NULL);
-	of_node_put(dp);
-	if (!prom_irqs) {
-		prom_printf("sun4c_init_timers: No intr property\n");
-		prom_halt();
-	}
-
-	/* Have the level 10 timer tick at 100HZ.  We don't touch the
-	 * level 14 timer limit since we are letting the prom handle
-	 * them until we have a real console driver so L1-A works.
-	 */
-	sbus_writel((((1000000/HZ) + 1) << 10), &sun4c_timers->l10_limit);
-
-	master_l10_counter = &sun4c_timers->l10_count;
-
-	irq = sun4c_build_device_irq(NULL, prom_irqs[0].pri);
-	err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
-	if (err) {
-		prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err);
-		prom_halt();
-	}
-
-	/* disable timer interrupt */
-	sun4c_mask_irq(irq_get_irq_data(irq));
-}
-
-#ifdef CONFIG_SMP
-static void sun4c_nop(void)
-{
-}
-#endif
-
-void __init sun4c_init_IRQ(void)
-{
-	struct device_node *dp;
-	const u32 *addr;
-
-	dp = of_find_node_by_name(NULL, "interrupt-enable");
-	if (!dp) {
-		prom_printf("sun4c_init_IRQ: Unable to find interrupt-enable\n");
-		prom_halt();
-	}
-
-	addr = of_get_property(dp, "address", NULL);
-	of_node_put(dp);
-	if (!addr) {
-		prom_printf("sun4c_init_IRQ: No address property\n");
-		prom_halt();
-	}
-
-	interrupt_enable = (void __iomem *) (unsigned long) addr[0];
-
-	BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
-
-	sparc_irq_config.init_timers      = sun4c_init_timers;
-	sparc_irq_config.build_device_irq = sun4c_build_device_irq;
-
-#ifdef CONFIG_SMP
-	BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(set_irq_udt, sun4c_nop, BTFIXUPCALL_NOP);
-#endif
-	sbus_writeb(SUN4C_INT_ENABLE, interrupt_enable);
-	/* Cannot enable interrupts until OBP ticker is disabled. */
-}
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 1d13c5b..e490ac9 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -15,6 +15,7 @@
 #include <asm/sbi.h>
 #include <asm/cacheflush.h>
 #include <asm/setup.h>
+#include <asm/oplib.h>
 
 #include "kernel.h"
 #include "irq.h"
@@ -243,19 +244,6 @@
 };
 
 #ifdef CONFIG_SMP
-static void sun4d_set_cpu_int(int cpu, int level)
-{
-	sun4d_send_ipi(cpu, level);
-}
-
-static void sun4d_clear_ipi(int cpu, int level)
-{
-}
-
-static void sun4d_set_udt(int cpu)
-{
-}
-
 /* Setup IRQ distribution scheme. */
 void __init sun4d_distribute_irqs(void)
 {
@@ -282,7 +270,8 @@
 
 static void sun4d_load_profile_irq(int cpu, unsigned int limit)
 {
-	bw_set_prof_limit(cpu, limit);
+	unsigned int value = limit ? timer_value(limit) : 0;
+	bw_set_prof_limit(cpu, value);
 }
 
 static void __init sun4d_load_profile_irqs(void)
@@ -418,12 +407,12 @@
 	trap_table->inst_two = lvl14_save[1];
 	trap_table->inst_three = lvl14_save[2];
 	trap_table->inst_four = lvl14_save[3];
-	local_flush_cache_all();
+	local_ops->cache_all();
 	local_irq_restore(flags);
 #endif
 }
 
-static void __init sun4d_init_timers(irq_handler_t counter_fn)
+static void __init sun4d_init_timers(void)
 {
 	struct device_node *dp;
 	struct resource res;
@@ -466,12 +455,20 @@
 		prom_halt();
 	}
 
-	sbus_writel((((1000000/HZ) + 1) << 10), &sun4d_timers->l10_timer_limit);
+#ifdef CONFIG_SMP
+	sparc_config.cs_period = SBUS_CLOCK_RATE * 2;  /* 2 seconds */
+#else
+	sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec  */
+	sparc_config.features |= FEAT_L10_CLOCKEVENT;
+#endif
+	sparc_config.features |= FEAT_L10_CLOCKSOURCE;
+	sbus_writel(timer_value(sparc_config.cs_period),
+		    &sun4d_timers->l10_timer_limit);
 
 	master_l10_counter = &sun4d_timers->l10_cur_count;
 
 	irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ);
-	err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
+	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
 	if (err) {
 		prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
 		             err);
@@ -509,16 +506,11 @@
 {
 	local_irq_disable();
 
-	BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
+	sparc_config.init_timers      = sun4d_init_timers;
+	sparc_config.build_device_irq = sun4d_build_device_irq;
+	sparc_config.clock_rate       = SBUS_CLOCK_RATE;
+	sparc_config.clear_clock_irq  = sun4d_clear_clock_irq;
+	sparc_config.load_profile_irq = sun4d_load_profile_irq;
 
-	sparc_irq_config.init_timers      = sun4d_init_timers;
-	sparc_irq_config.build_device_irq = sun4d_build_device_irq;
-
-#ifdef CONFIG_SMP
-	BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
-#endif
 	/* Cannot enable interrupts until OBP ticker is disabled. */
 }
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 540b2fe..f9a1a33 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -6,16 +6,20 @@
  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  */
 
+#include <linux/clockchips.h>
 #include <linux/interrupt.h>
 #include <linux/profile.h>
 #include <linux/delay.h>
+#include <linux/sched.h>
 #include <linux/cpu.h>
 
+#include <asm/cacheflush.h>
+#include <asm/switch_to.h>
+#include <asm/tlbflush.h>
+#include <asm/timer.h>
+#include <asm/oplib.h>
 #include <asm/sbi.h>
 #include <asm/mmu.h>
-#include <asm/tlbflush.h>
-#include <asm/switch_to.h>
-#include <asm/cacheflush.h>
 
 #include "kernel.h"
 #include "irq.h"
@@ -34,7 +38,6 @@
 }
 
 static void smp4d_ipi_init(void);
-static void smp_setup_percpu_timer(void);
 
 static unsigned char cpu_leds[32];
 
@@ -49,7 +52,7 @@
 
 void __cpuinit smp4d_callin(void)
 {
-	int cpuid = hard_smp4d_processor_id();
+	int cpuid = hard_smp_processor_id();
 	unsigned long flags;
 
 	/* Show we are alive */
@@ -59,8 +62,8 @@
 	/* Enable level15 interrupt, disable level14 interrupt for now */
 	cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
 
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	notify_cpu_starting(cpuid);
 	/*
@@ -70,17 +73,17 @@
 	 * to call the scheduler code.
 	 */
 	/* Get our local ticker going. */
-	smp_setup_percpu_timer();
+	register_percpu_ce(cpuid);
 
 	calibrate_delay();
 	smp_store_cpu_info(cpuid);
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	/* Allow master to continue. */
 	sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
 		barrier();
@@ -100,8 +103,8 @@
 	atomic_inc(&init_mm.mm_count);
 	current->active_mm = &init_mm;
 
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	local_irq_enable();	/* We don't allow PIL 14 yet */
 
@@ -123,8 +126,7 @@
 	smp4d_ipi_init();
 	if (boot_cpu_id)
 		current_set[0] = NULL;
-	smp_setup_percpu_timer();
-	local_flush_cache_all();
+	local_ops->cache_all();
 }
 
 int __cpuinit smp4d_boot_one_cpu(int i)
@@ -150,7 +152,7 @@
 
 	/* whirrr, whirrr, whirrrrrrrrr... */
 	printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
-	local_flush_cache_all();
+	local_ops->cache_all();
 	prom_startcpu(cpu_node,
 		      &smp_penguin_ctable, 0, (char *)entry);
 
@@ -168,7 +170,7 @@
 		return -ENODEV;
 
 	}
-	local_flush_cache_all();
+	local_ops->cache_all();
 	return 0;
 }
 
@@ -185,7 +187,7 @@
 		prev = &cpu_data(i).next;
 	}
 	*prev = first;
-	local_flush_cache_all();
+	local_ops->cache_all();
 
 	/* Ok, they are spinning and ready to go. */
 	smp_processors_ready = 1;
@@ -233,7 +235,20 @@
 	}
 }
 
-static void smp4d_ipi_single(int cpu)
+/* +-------+-------------+-----------+------------------------------------+
+ * | bcast |  devid      |   sid     |              levels mask           |
+ * +-------+-------------+-----------+------------------------------------+
+ *  31      30         23 22       15 14                                 0
+ */
+#define IGEN_MESSAGE(bcast, devid, sid, levels) \
+	(((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
+
+static void sun4d_send_ipi(int cpu, int level)
+{
+	cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
+}
+
+static void sun4d_ipi_single(int cpu)
 {
 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
 
@@ -244,7 +259,7 @@
 	sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
 }
 
-static void smp4d_ipi_mask_one(int cpu)
+static void sun4d_ipi_mask_one(int cpu)
 {
 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
 
@@ -255,7 +270,7 @@
 	sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
 }
 
-static void smp4d_ipi_resched(int cpu)
+static void sun4d_ipi_resched(int cpu)
 {
 	struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
 
@@ -280,7 +295,7 @@
 static DEFINE_SPINLOCK(cross_call_lock);
 
 /* Cross calls must be serialized, at least currently. */
-static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 			     unsigned long arg2, unsigned long arg3,
 			     unsigned long arg4)
 {
@@ -352,7 +367,7 @@
 /* Running cross calls. */
 void smp4d_cross_call_irq(void)
 {
-	int i = hard_smp4d_processor_id();
+	int i = hard_smp_processor_id();
 
 	ccall_info.processors_in[i] = 1;
 	ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
@@ -363,7 +378,8 @@
 void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
 {
 	struct pt_regs *old_regs;
-	int cpu = hard_smp4d_processor_id();
+	int cpu = hard_smp_processor_id();
+	struct clock_event_device *ce;
 	static int cpu_tick[NR_CPUS];
 	static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
 
@@ -379,45 +395,21 @@
 		show_leds(cpu);
 	}
 
-	profile_tick(CPU_PROFILING);
+	ce = &per_cpu(sparc32_clockevent, cpu);
 
-	if (!--prof_counter(cpu)) {
-		int user = user_mode(regs);
+	irq_enter();
+	ce->event_handler(ce);
+	irq_exit();
 
-		irq_enter();
-		update_process_times(user);
-		irq_exit();
-
-		prof_counter(cpu) = prof_multiplier(cpu);
-	}
 	set_irq_regs(old_regs);
 }
 
-static void __cpuinit smp_setup_percpu_timer(void)
-{
-	int cpu = hard_smp4d_processor_id();
-
-	prof_counter(cpu) = prof_multiplier(cpu) = 1;
-	load_profile_irq(cpu, lvl14_resolution);
-}
-
-void __init smp4d_blackbox_id(unsigned *addr)
-{
-	int rd = *addr & 0x3e000000;
-
-	addr[0] = 0xc0800800 | rd;		/* lda [%g0] ASI_M_VIKING_TMP1, reg */
-	addr[1] = 0x01000000;			/* nop */
-	addr[2] = 0x01000000;			/* nop */
-}
-
-void __init smp4d_blackbox_current(unsigned *addr)
-{
-	int rd = *addr & 0x3e000000;
-
-	addr[0] = 0xc0800800 | rd;		/* lda [%g0] ASI_M_VIKING_TMP1, reg */
-	addr[2] = 0x81282002 | rd | (rd >> 11);	/* sll reg, 2, reg */
-	addr[4] = 0x01000000;			/* nop */
-}
+static const struct sparc32_ipi_ops sun4d_ipi_ops = {
+	.cross_call = sun4d_cross_call,
+	.resched    = sun4d_ipi_resched,
+	.single     = sun4d_ipi_single,
+	.mask_one   = sun4d_ipi_mask_one,
+};
 
 void __init sun4d_init_smp(void)
 {
@@ -426,14 +418,7 @@
 	/* Patch ipi15 trap table */
 	t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
 
-	/* And set btfixup... */
-	BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
-	BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
-	BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_resched, smp4d_ipi_resched, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_single, smp4d_ipi_single, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_mask_one, smp4d_ipi_mask_one, BTFIXUPCALL_NORM);
+	sparc32_ipi_ops = &sun4d_ipi_ops;
 
 	for (i = 0; i < NR_CPUS; i++) {
 		ccall_info.processors_in[i] = 1;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index e611651..c5ade9d 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -112,9 +112,6 @@
 #define SUN4M_INT_E14		0x00000080
 #define SUN4M_INT_E10		0x00080000
 
-#define SUN4M_HARD_INT(x)	(0x000000001 << (x))
-#define SUN4M_SOFT_INT(x)	(0x000010000 << (x))
-
 #define	SUN4M_INT_MASKALL	0x80000000	  /* mask all interrupts */
 #define	SUN4M_INT_MODULE_ERR	0x40000000	  /* module error */
 #define	SUN4M_INT_M2S_WRITE_ERR	0x20000000	  /* write buffer error */
@@ -282,23 +279,6 @@
 	return irq;
 }
 
-#ifdef CONFIG_SMP
-static void sun4m_send_ipi(int cpu, int level)
-{
-	sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
-}
-
-static void sun4m_clear_ipi(int cpu, int level)
-{
-	sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->clear);
-}
-
-static void sun4m_set_udt(int cpu)
-{
-	sbus_writel(cpu, &sun4m_irq_global->interrupt_target);
-}
-#endif
-
 struct sun4m_timer_percpu {
 	u32		l14_limit;
 	u32		l14_count;
@@ -318,9 +298,6 @@
 
 static struct sun4m_timer_global __iomem *timers_global;
 
-
-unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
-
 static void sun4m_clear_clock_irq(void)
 {
 	sbus_readl(&timers_global->l10_limit);
@@ -369,10 +346,11 @@
 
 static void sun4m_load_profile_irq(int cpu, unsigned int limit)
 {
-	sbus_writel(limit, &timers_percpu[cpu]->l14_limit);
+	unsigned int value = limit ? timer_value(limit) : 0;
+	sbus_writel(value, &timers_percpu[cpu]->l14_limit);
 }
 
-static void __init sun4m_init_timers(irq_handler_t counter_fn)
+static void __init sun4m_init_timers(void)
 {
 	struct device_node *dp = of_find_node_by_name(NULL, "counter");
 	int i, err, len, num_cpu_timers;
@@ -402,13 +380,22 @@
 	/* Every per-cpu timer works in timer mode */
 	sbus_writel(0x00000000, &timers_global->timer_config);
 
-	sbus_writel((((1000000/HZ) + 1) << 10), &timers_global->l10_limit);
+#ifdef CONFIG_SMP
+	sparc_config.cs_period = SBUS_CLOCK_RATE * 2;  /* 2 seconds */
+	sparc_config.features |= FEAT_L14_ONESHOT;
+#else
+	sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec  */
+	sparc_config.features |= FEAT_L10_CLOCKEVENT;
+#endif
+	sparc_config.features |= FEAT_L10_CLOCKSOURCE;
+	sbus_writel(timer_value(sparc_config.cs_period),
+	            &timers_global->l10_limit);
 
 	master_l10_counter = &timers_global->l10_count;
 
 	irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ);
 
-	err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
+	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
 	if (err) {
 		printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
 			err);
@@ -434,7 +421,7 @@
 		trap_table->inst_two = lvl14_save[1];
 		trap_table->inst_three = lvl14_save[2];
 		trap_table->inst_four = lvl14_save[3];
-		local_flush_cache_all();
+		local_ops->cache_all();
 		local_irq_restore(flags);
 	}
 #endif
@@ -475,17 +462,12 @@
 	if (num_cpu_iregs == 4)
 		sbus_writel(0, &sun4m_irq_global->interrupt_target);
 
-	BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
+	sparc_config.init_timers      = sun4m_init_timers;
+	sparc_config.build_device_irq = sun4m_build_device_irq;
+	sparc_config.clock_rate       = SBUS_CLOCK_RATE;
+	sparc_config.clear_clock_irq  = sun4m_clear_clock_irq;
+	sparc_config.load_profile_irq = sun4m_load_profile_irq;
 
-	sparc_irq_config.init_timers = sun4m_init_timers;
-	sparc_irq_config.build_device_irq = sun4m_build_device_irq;
-
-#ifdef CONFIG_SMP
-	BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(set_irq_udt, sun4m_set_udt, BTFIXUPCALL_NORM);
-#endif
 
 	/* Cannot enable interrupts until OBP ticker is disabled. */
 }
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 02db9a0..960e8ab 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -4,14 +4,18 @@
  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  */
 
+#include <linux/clockchips.h>
 #include <linux/interrupt.h>
 #include <linux/profile.h>
 #include <linux/delay.h>
+#include <linux/sched.h>
 #include <linux/cpu.h>
 
 #include <asm/cacheflush.h>
 #include <asm/switch_to.h>
 #include <asm/tlbflush.h>
+#include <asm/timer.h>
+#include <asm/oplib.h>
 
 #include "irq.h"
 #include "kernel.h"
@@ -30,26 +34,22 @@
 	return val;
 }
 
-static void smp4m_ipi_init(void);
-static void smp_setup_percpu_timer(void);
-
 void __cpuinit smp4m_callin(void)
 {
 	int cpuid = hard_smp_processor_id();
 
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	notify_cpu_starting(cpuid);
 
-	/* Get our local ticker going. */
-	smp_setup_percpu_timer();
+	register_percpu_ce(cpuid);
 
 	calibrate_delay();
 	smp_store_cpu_info(cpuid);
 
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	/*
 	 * Unblock the master CPU _only_ when the scheduler state
@@ -61,8 +61,8 @@
 	swap_ulong(&cpu_callin_map[cpuid], 1);
 
 	/* XXX: What's up with all the flushes? */
-	local_flush_cache_all();
-	local_flush_tlb_all();
+	local_ops->cache_all();
+	local_ops->tlb_all();
 
 	/* Fix idle thread fields. */
 	__asm__ __volatile__("ld [%0], %%g6\n\t"
@@ -86,9 +86,8 @@
  */
 void __init smp4m_boot_cpus(void)
 {
-	smp4m_ipi_init();
-	smp_setup_percpu_timer();
-	local_flush_cache_all();
+	sun4m_unmask_profile_irq();
+	local_ops->cache_all();
 }
 
 int __cpuinit smp4m_boot_one_cpu(int i)
@@ -117,7 +116,7 @@
 
 	/* whirrr, whirrr, whirrrrrrrrr... */
 	printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
-	local_flush_cache_all();
+	local_ops->cache_all();
 	prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
 
 	/* wheee... it's going... */
@@ -132,7 +131,7 @@
 		return -ENODEV;
 	}
 
-	local_flush_cache_all();
+	local_ops->cache_all();
 	return 0;
 }
 
@@ -149,30 +148,29 @@
 		prev = &cpu_data(i).next;
 	}
 	*prev = first;
-	local_flush_cache_all();
+	local_ops->cache_all();
 
 	/* Ok, they are spinning and ready to go. */
 }
 
-
-/* Initialize IPIs on the SUN4M SMP machine */
-static void __init smp4m_ipi_init(void)
+static void sun4m_send_ipi(int cpu, int level)
 {
+	sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set);
 }
 
-static void smp4m_ipi_resched(int cpu)
+static void sun4m_ipi_resched(int cpu)
 {
-	set_cpu_int(cpu, IRQ_IPI_RESCHED);
+	sun4m_send_ipi(cpu, IRQ_IPI_RESCHED);
 }
 
-static void smp4m_ipi_single(int cpu)
+static void sun4m_ipi_single(int cpu)
 {
-	set_cpu_int(cpu, IRQ_IPI_SINGLE);
+	sun4m_send_ipi(cpu, IRQ_IPI_SINGLE);
 }
 
-static void smp4m_ipi_mask_one(int cpu)
+static void sun4m_ipi_mask_one(int cpu)
 {
-	set_cpu_int(cpu, IRQ_IPI_MASK);
+	sun4m_send_ipi(cpu, IRQ_IPI_MASK);
 }
 
 static struct smp_funcall {
@@ -189,7 +187,7 @@
 static DEFINE_SPINLOCK(cross_call_lock);
 
 /* Cross calls must be serialized, at least currently. */
-static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
+static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
 			     unsigned long arg2, unsigned long arg3,
 			     unsigned long arg4)
 {
@@ -216,7 +214,7 @@
 				if (cpumask_test_cpu(i, &mask)) {
 					ccall_info.processors_in[i] = 0;
 					ccall_info.processors_out[i] = 0;
-					set_cpu_int(i, IRQ_CROSS_CALL);
+					sun4m_send_ipi(i, IRQ_CROSS_CALL);
 				} else {
 					ccall_info.processors_in[i] = 1;
 					ccall_info.processors_out[i] = 1;
@@ -260,64 +258,33 @@
 void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
 {
 	struct pt_regs *old_regs;
+	struct clock_event_device *ce;
 	int cpu = smp_processor_id();
 
 	old_regs = set_irq_regs(regs);
 
-	sun4m_clear_profile_irq(cpu);
+	ce = &per_cpu(sparc32_clockevent, cpu);
 
-	profile_tick(CPU_PROFILING);
+	if (ce->mode & CLOCK_EVT_MODE_PERIODIC)
+		sun4m_clear_profile_irq(cpu);
+	else
+		sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */
 
-	if (!--prof_counter(cpu)) {
-		int user = user_mode(regs);
+	irq_enter();
+	ce->event_handler(ce);
+	irq_exit();
 
-		irq_enter();
-		update_process_times(user);
-		irq_exit();
-
-		prof_counter(cpu) = prof_multiplier(cpu);
-	}
 	set_irq_regs(old_regs);
 }
 
-static void __cpuinit smp_setup_percpu_timer(void)
-{
-	int cpu = smp_processor_id();
-
-	prof_counter(cpu) = prof_multiplier(cpu) = 1;
-	load_profile_irq(cpu, lvl14_resolution);
-
-	if (cpu == boot_cpu_id)
-		sun4m_unmask_profile_irq();
-}
-
-static void __init smp4m_blackbox_id(unsigned *addr)
-{
-	int rd = *addr & 0x3e000000;
-	int rs1 = rd >> 11;
-
-	addr[0] = 0x81580000 | rd;		/* rd %tbr, reg */
-	addr[1] = 0x8130200c | rd | rs1;	/* srl reg, 0xc, reg */
-	addr[2] = 0x80082003 | rd | rs1;	/* and reg, 3, reg */
-}
-
-static void __init smp4m_blackbox_current(unsigned *addr)
-{
-	int rd = *addr & 0x3e000000;
-	int rs1 = rd >> 11;
-
-	addr[0] = 0x81580000 | rd;		/* rd %tbr, reg */
-	addr[2] = 0x8130200a | rd | rs1;	/* srl reg, 0xa, reg */
-	addr[4] = 0x8008200c | rd | rs1;	/* and reg, 0xc, reg */
-}
+static const struct sparc32_ipi_ops sun4m_ipi_ops = {
+	.cross_call = sun4m_cross_call,
+	.resched    = sun4m_ipi_resched,
+	.single     = sun4m_ipi_single,
+	.mask_one   = sun4m_ipi_mask_one,
+};
 
 void __init sun4m_init_smp(void)
 {
-	BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
-	BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
-	BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_resched, smp4m_ipi_resched, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_single, smp4m_ipi_single, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(smp_ipi_mask_one, smp4m_ipi_mask_one, BTFIXUPCALL_NORM);
+	sparc32_ipi_ops = &sun4m_ipi_ops;
 }
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 42b282f..627e89a 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -53,8 +53,6 @@
 	/* See asm-sparc/uaccess.h */
 	if (len > TASK_SIZE - PAGE_SIZE)
 		return -ENOMEM;
-	if (ARCH_SUN4C && len > 0x20000000)
-		return -ENOMEM;
 	if (!addr)
 		addr = TASK_UNMAPPED_BASE;
 
@@ -65,10 +63,6 @@
 
 	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
 		/* At this point:  (!vmm || addr < vmm->vm_end). */
-		if (ARCH_SUN4C && addr < 0xe0000000 && 0x20000000 - len < addr) {
-			addr = PAGE_OFFSET;
-			vmm = find_vma(current->mm, PAGE_OFFSET);
-		}
 		if (TASK_SIZE - PAGE_SIZE - len < addr)
 			return -ENOMEM;
 		if (!vmm || addr + len <= vmm->vm_start)
@@ -99,11 +93,6 @@
 
 int sparc_mmap_check(unsigned long addr, unsigned long len)
 {
-	if (ARCH_SUN4C &&
-	    (len > 0x20000000 ||
-	     (addr < 0xe0000000 && addr + len > 0x20000000)))
-		return -EINVAL;
-
 	/* See asm-sparc/uaccess.h */
 	if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
 		return -EINVAL;
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 7d0c088..9536415 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -26,6 +26,8 @@
 #include <linux/rtc.h>
 #include <linux/rtc/m48t59.h>
 #include <linux/timex.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/ioport.h>
@@ -40,13 +42,24 @@
 #include <asm/irq.h>
 #include <asm/io.h>
 #include <asm/idprom.h>
-#include <asm/machines.h>
 #include <asm/page.h>
 #include <asm/pcic.h>
 #include <asm/irq_regs.h>
+#include <asm/setup.h>
 
 #include "irq.h"
 
+static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock);
+static __volatile__ u64 timer_cs_internal_counter = 0;
+static char timer_cs_enabled = 0;
+
+static struct clock_event_device timer_ce;
+static char timer_ce_enabled = 0;
+
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent);
+#endif
+
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 
@@ -55,7 +68,6 @@
 unsigned long profile_pc(struct pt_regs *regs)
 {
 	extern char __copy_user_begin[], __copy_user_end[];
-	extern char __atomic_begin[], __atomic_end[];
 	extern char __bzero_begin[], __bzero_end[];
 
 	unsigned long pc = regs->pc;
@@ -63,8 +75,6 @@
 	if (in_lock_functions(pc) ||
 	    (pc >= (unsigned long) __copy_user_begin &&
 	     pc < (unsigned long) __copy_user_end) ||
-	    (pc >= (unsigned long) __atomic_begin &&
-	     pc < (unsigned long) __atomic_end) ||
 	    (pc >= (unsigned long) __bzero_begin &&
 	     pc < (unsigned long) __bzero_end))
 		pc = regs->u_regs[UREG_RETPC];
@@ -75,36 +85,168 @@
 
 __volatile__ unsigned int *master_l10_counter;
 
-u32 (*do_arch_gettimeoffset)(void);
-
 int update_persistent_clock(struct timespec now)
 {
 	return set_rtc_mmss(now.tv_sec);
 }
 
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
- */
-
-#define TICK_SIZE (tick_nsec / 1000)
-
-static irqreturn_t timer_interrupt(int dummy, void *dev_id)
+irqreturn_t notrace timer_interrupt(int dummy, void *dev_id)
 {
-#ifndef CONFIG_SMP
-	profile_tick(CPU_PROFILING);
-#endif
+	if (timer_cs_enabled) {
+		write_seqlock(&timer_cs_lock);
+		timer_cs_internal_counter++;
+		sparc_config.clear_clock_irq();
+		write_sequnlock(&timer_cs_lock);
+	} else {
+		sparc_config.clear_clock_irq();
+	}
 
-	clear_clock_irq();
+	if (timer_ce_enabled)
+		timer_ce.event_handler(&timer_ce);
 
-	xtime_update(1);
-
-#ifndef CONFIG_SMP
-	update_process_times(user_mode(get_irq_regs()));
-#endif
 	return IRQ_HANDLED;
 }
 
+static void timer_ce_set_mode(enum clock_event_mode mode,
+			      struct clock_event_device *evt)
+{
+	switch (mode) {
+		case CLOCK_EVT_MODE_PERIODIC:
+		case CLOCK_EVT_MODE_RESUME:
+			timer_ce_enabled = 1;
+			break;
+		case CLOCK_EVT_MODE_SHUTDOWN:
+			timer_ce_enabled = 0;
+			break;
+		default:
+			break;
+	}
+	smp_mb();
+}
+
+static __init void setup_timer_ce(void)
+{
+	struct clock_event_device *ce = &timer_ce;
+
+	BUG_ON(smp_processor_id() != boot_cpu_id);
+
+	ce->name     = "timer_ce";
+	ce->rating   = 100;
+	ce->features = CLOCK_EVT_FEAT_PERIODIC;
+	ce->set_mode = timer_ce_set_mode;
+	ce->cpumask  = cpu_possible_mask;
+	ce->shift    = 32;
+	ce->mult     = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
+	                      ce->shift);
+	clockevents_register_device(ce);
+}
+
+static unsigned int sbus_cycles_offset(void)
+{
+	unsigned int val, offset;
+
+	val = *master_l10_counter;
+	offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK;
+
+	/* Limit hit? */
+	if (val & TIMER_LIMIT_BIT)
+		offset += sparc_config.cs_period;
+
+	return offset;
+}
+
+static cycle_t timer_cs_read(struct clocksource *cs)
+{
+	unsigned int seq, offset;
+	u64 cycles;
+
+	do {
+		seq = read_seqbegin(&timer_cs_lock);
+
+		cycles = timer_cs_internal_counter;
+		offset = sparc_config.get_cycles_offset();
+	} while (read_seqretry(&timer_cs_lock, seq));
+
+	/* Count absolute cycles */
+	cycles *= sparc_config.cs_period;
+	cycles += offset;
+
+	return cycles;
+}
+
+static struct clocksource timer_cs = {
+	.name	= "timer_cs",
+	.rating	= 100,
+	.read	= timer_cs_read,
+	.mask	= CLOCKSOURCE_MASK(64),
+	.shift	= 2,
+	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static __init int setup_timer_cs(void)
+{
+	timer_cs_enabled = 1;
+	timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate,
+	                                    timer_cs.shift);
+
+	return clocksource_register(&timer_cs);
+}
+
+#ifdef CONFIG_SMP
+static void percpu_ce_setup(enum clock_event_mode mode,
+			struct clock_event_device *evt)
+{
+	int cpu = __first_cpu(evt->cpumask);
+
+	switch (mode) {
+		case CLOCK_EVT_MODE_PERIODIC:
+			sparc_config.load_profile_irq(cpu,
+						      SBUS_CLOCK_RATE / HZ);
+			break;
+		case CLOCK_EVT_MODE_ONESHOT:
+		case CLOCK_EVT_MODE_SHUTDOWN:
+		case CLOCK_EVT_MODE_UNUSED:
+			sparc_config.load_profile_irq(cpu, 0);
+			break;
+		default:
+			break;
+	}
+}
+
+static int percpu_ce_set_next_event(unsigned long delta,
+				    struct clock_event_device *evt)
+{
+	int cpu = __first_cpu(evt->cpumask);
+	unsigned int next = (unsigned int)delta;
+
+	sparc_config.load_profile_irq(cpu, next);
+	return 0;
+}
+
+void register_percpu_ce(int cpu)
+{
+	struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu);
+	unsigned int features = CLOCK_EVT_FEAT_PERIODIC;
+
+	if (sparc_config.features & FEAT_L14_ONESHOT)
+		features |= CLOCK_EVT_FEAT_ONESHOT;
+
+	ce->name           = "percpu_ce";
+	ce->rating         = 200;
+	ce->features       = features;
+	ce->set_mode       = percpu_ce_setup;
+	ce->set_next_event = percpu_ce_set_next_event;
+	ce->cpumask        = cpumask_of(cpu);
+	ce->shift          = 32;
+	ce->mult           = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
+	                            ce->shift);
+	ce->max_delta_ns   = clockevent_delta2ns(sparc_config.clock_rate, ce);
+	ce->min_delta_ns   = clockevent_delta2ns(100, ce);
+
+	clockevents_register_device(ce);
+}
+#endif
+
 static unsigned char mostek_read_byte(struct device *dev, u32 ofs)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -195,38 +337,28 @@
  */
 fs_initcall(clock_init);
 
-
-u32 sbus_do_gettimeoffset(void)
+static void __init sparc32_late_time_init(void)
 {
-	unsigned long val = *master_l10_counter;
-	unsigned long usec = (val >> 10) & 0x1fffff;
-
-	/* Limit hit?  */
-	if (val & 0x80000000)
-		usec += 1000000 / HZ;
-
-	return usec * 1000;
-}
-
-
-u32 arch_gettimeoffset(void)
-{
-	if (unlikely(!do_arch_gettimeoffset))
-		return 0;
-	return do_arch_gettimeoffset();
+	if (sparc_config.features & FEAT_L10_CLOCKEVENT)
+		setup_timer_ce();
+	if (sparc_config.features & FEAT_L10_CLOCKSOURCE)
+		setup_timer_cs();
+#ifdef CONFIG_SMP
+	register_percpu_ce(smp_processor_id());
+#endif
 }
 
 static void __init sbus_time_init(void)
 {
-	do_arch_gettimeoffset = sbus_do_gettimeoffset;
-
-	btfixup();
-
-	sparc_irq_config.init_timers(timer_interrupt);
+	sparc_config.get_cycles_offset = sbus_cycles_offset;
+	sparc_config.init_timers();
 }
 
 void __init time_init(void)
 {
+	sparc_config.features = 0;
+	late_time_init = sparc32_late_time_init;
+
 	if (pcic_present())
 		pci_time_init();
 	else
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 691f484..7364ddc 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -15,8 +15,8 @@
 #include <asm/contregs.h>
 #include <asm/thread_info.h>
 
-	.globl sun4m_cpu_startup, __smp4m_processor_id, __leon_processor_id
-	.globl sun4d_cpu_startup, __smp4d_processor_id
+	.globl sun4m_cpu_startup
+	.globl sun4d_cpu_startup
 
 	__CPUINIT
 	.align 4
@@ -94,24 +94,6 @@
 	call	cpu_panic
 	 nop
 
-__smp4m_processor_id:
-	rd	%tbr, %g2
-	srl	%g2, 12, %g2
-	and	%g2, 3, %g2
-	retl
-	 mov	%g1, %o7
-
-__smp4d_processor_id:
-	lda	[%g0] ASI_M_VIKING_TMP1, %g2
-	retl
-	 mov	%g1, %o7
-
-__leon_processor_id:
-	rd     %asr17,%g2
-        srl    %g2,28,%g2
-	retl
-	 mov	%g1, %o7
-
 /* CPUID in bootbus can be found at PA 0xff0140000 */
 #define SUN4D_BOOTBUS_CPUID	0xf0140000
 
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index d2de213..a5785ea 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -120,8 +120,6 @@
 	printk("Ill instr. at pc=%08lx instruction is %08lx\n",
 	       regs->pc, *(unsigned long *)regs->pc);
 #endif
-	if (!do_user_muldiv (regs, pc))
-		return;
 
 	info.si_signo = SIGILL;
 	info.si_errno = 0;
diff --git a/arch/sparc/kernel/ttable_32.S b/arch/sparc/kernel/ttable_32.S
new file mode 100644
index 0000000..8a7a96c
--- /dev/null
+++ b/arch/sparc/kernel/ttable_32.S
@@ -0,0 +1,417 @@
+/* The Sparc trap table, bootloader gives us control at _start. */
+        __HEAD
+
+        .globl  _start
+_start:
+
+	.globl _stext
+_stext:
+
+	.globl  trapbase
+trapbase:
+
+#ifdef CONFIG_SMP
+trapbase_cpu0:
+#endif
+/* We get control passed to us here at t_zero. */
+t_zero:	b gokernel; nop; nop; nop;
+t_tflt:	SRMMU_TFAULT                        /* Inst. Access Exception        */
+t_bins:	TRAP_ENTRY(0x2, bad_instruction)    /* Illegal Instruction           */
+t_pins:	TRAP_ENTRY(0x3, priv_instruction)   /* Privileged Instruction        */
+t_fpd:	TRAP_ENTRY(0x4, fpd_trap_handler)   /* Floating Point Disabled       */
+t_wovf:	WINDOW_SPILL                        /* Window Overflow               */
+t_wunf:	WINDOW_FILL                         /* Window Underflow              */
+t_mna:	TRAP_ENTRY(0x7, mna_handler)        /* Memory Address Not Aligned    */
+t_fpe:	TRAP_ENTRY(0x8, fpe_trap_handler)   /* Floating Point Exception      */
+t_dflt:	SRMMU_DFAULT                        /* Data Miss Exception           */
+t_tio:	TRAP_ENTRY(0xa, do_tag_overflow)    /* Tagged Instruction Ovrflw     */
+t_wpt:	TRAP_ENTRY(0xb, do_watchpoint)      /* Watchpoint Detected           */
+t_badc:	BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+t_irq1:	TRAP_ENTRY_INTERRUPT(1)             /* IRQ Software/SBUS Level 1     */
+t_irq2:	TRAP_ENTRY_INTERRUPT(2)             /* IRQ SBUS Level 2              */
+t_irq3:	TRAP_ENTRY_INTERRUPT(3)             /* IRQ SCSI/DMA/SBUS Level 3     */
+t_irq4:	TRAP_ENTRY_INTERRUPT(4)             /* IRQ Software Level 4          */
+t_irq5:	TRAP_ENTRY_INTERRUPT(5)             /* IRQ SBUS/Ethernet Level 5     */
+t_irq6:	TRAP_ENTRY_INTERRUPT(6)             /* IRQ Software Level 6          */
+t_irq7:	TRAP_ENTRY_INTERRUPT(7)             /* IRQ Video/SBUS Level 5        */
+t_irq8:	TRAP_ENTRY_INTERRUPT(8)             /* IRQ SBUS Level 6              */
+t_irq9:	TRAP_ENTRY_INTERRUPT(9)             /* IRQ SBUS Level 7              */
+t_irq10:TRAP_ENTRY_INTERRUPT(10)            /* IRQ Timer #1 (one we use)     */
+t_irq11:TRAP_ENTRY_INTERRUPT(11)            /* IRQ Floppy Intr.              */
+t_irq12:TRAP_ENTRY_INTERRUPT(12)            /* IRQ Zilog serial chip         */
+t_irq13:TRAP_ENTRY_INTERRUPT(13)            /* IRQ Audio Intr.               */
+t_irq14:TRAP_ENTRY_INTERRUPT(14)            /* IRQ Timer #2                  */
+
+	.globl	t_nmi
+t_nmi:	TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+
+t_racc:	TRAP_ENTRY(0x20, do_reg_access)     /* General Register Access Error */
+t_iacce:BAD_TRAP(0x21)                      /* Instr Access Error            */
+t_bad22:BAD_TRAP(0x22)
+	BAD_TRAP(0x23)
+t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled)    /* Co-Processor Disabled         */
+t_uflsh:SKIP_TRAP(0x25, unimp_flush)        /* Unimplemented FLUSH inst.     */
+t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
+t_cpexc:TRAP_ENTRY(0x28, do_cp_exception)   /* Co-Processor Exception        */
+t_dacce:SRMMU_DFAULT                        /* Data Access Error             */
+t_hwdz:	TRAP_ENTRY(0x2a, do_hw_divzero)     /* Division by zero, you lose... */
+t_dserr:BAD_TRAP(0x2b)                      /* Data Store Error              */
+t_daccm:BAD_TRAP(0x2c)                      /* Data Access MMU-Miss          */
+t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+t_iaccm:BAD_TRAP(0x3c)                      /* Instr Access MMU-Miss         */
+t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41)
+t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46)
+t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b)
+t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50)
+t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+t_bad80:BAD_TRAP(0x80)                      /* SunOS System Call             */
+t_sbkpt:BREAKPOINT_TRAP                     /* Software Breakpoint/KGDB      */
+t_divz:	TRAP_ENTRY(0x82, do_hw_divzero)     /* Divide by zero trap           */
+t_flwin:TRAP_ENTRY(0x83, do_flush_windows)  /* Flush Windows Trap            */
+t_clwin:BAD_TRAP(0x84)                      /* Clean Windows Trap            */
+t_rchk:	BAD_TRAP(0x85)                      /* Range Check                   */
+t_funal:BAD_TRAP(0x86)                      /* Fix Unaligned Access Trap     */
+t_iovf:	BAD_TRAP(0x87)                      /* Integer Overflow Trap         */
+t_bad88:BAD_TRAP(0x88)                      /* Slowaris System Call          */
+t_bad89:BAD_TRAP(0x89)                      /* Net-B.S. System Call          */
+t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e)
+t_bad8f:BAD_TRAP(0x8f)
+t_linux:LINUX_SYSCALL_TRAP                  /* Linux System Call             */
+t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95)
+t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a)
+t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f)
+t_getcc:GETCC_TRAP                          /* Get Condition Codes           */
+t_setcc:SETCC_TRAP                          /* Set Condition Codes           */
+t_getpsr:GETPSR_TRAP                        /* Get PSR Register              */
+t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+t_bada7:BAD_TRAP(0xa7)
+t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+t_badfc:BAD_TRAP(0xfc)
+t_kgdb:	KGDB_TRAP(0xfd)
+dbtrap:	BAD_TRAP(0xfe)                      /* Debugger/PROM breakpoint #1   */
+dbtrap2:BAD_TRAP(0xff)                      /* Debugger/PROM breakpoint #2   */
+
+	.globl	end_traptable
+end_traptable:
+
+#ifdef CONFIG_SMP
+	/* Trap tables for the other cpus. */
+	.globl	trapbase_cpu1, trapbase_cpu2, trapbase_cpu3
+trapbase_cpu1:
+	BAD_TRAP(0x0)
+	SRMMU_TFAULT
+	TRAP_ENTRY(0x2, bad_instruction)
+	TRAP_ENTRY(0x3, priv_instruction)
+	TRAP_ENTRY(0x4, fpd_trap_handler)
+	WINDOW_SPILL
+	WINDOW_FILL
+	TRAP_ENTRY(0x7, mna_handler)
+	TRAP_ENTRY(0x8, fpe_trap_handler)
+	SRMMU_DFAULT
+	TRAP_ENTRY(0xa, do_tag_overflow)
+	TRAP_ENTRY(0xb, do_watchpoint)
+	BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+	TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
+	TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
+	TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
+	TRAP_ENTRY_INTERRUPT(7)	TRAP_ENTRY_INTERRUPT(8)
+	TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
+	TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
+	TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
+	TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+	TRAP_ENTRY(0x20, do_reg_access)
+	BAD_TRAP(0x21)
+	BAD_TRAP(0x22)
+	BAD_TRAP(0x23)
+	TRAP_ENTRY(0x24, do_cp_disabled)
+	SKIP_TRAP(0x25, unimp_flush)
+	BAD_TRAP(0x26)
+	BAD_TRAP(0x27)
+	TRAP_ENTRY(0x28, do_cp_exception)
+	SRMMU_DFAULT
+	TRAP_ENTRY(0x2a, do_hw_divzero)
+	BAD_TRAP(0x2b)
+	BAD_TRAP(0x2c)
+	BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+	BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+	BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+	BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+	BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+	BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+	BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+	BAD_TRAP(0x50)
+	BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+	BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+	BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+	BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+	BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+	BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+	BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+	BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+	BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+	BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+	BAD_TRAP(0x80)
+	BREAKPOINT_TRAP
+	TRAP_ENTRY(0x82, do_hw_divzero)
+	TRAP_ENTRY(0x83, do_flush_windows)
+	BAD_TRAP(0x84) BAD_TRAP(0x85) BAD_TRAP(0x86)
+	BAD_TRAP(0x87) BAD_TRAP(0x88) BAD_TRAP(0x89)
+	BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+	BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+	LINUX_SYSCALL_TRAP BAD_TRAP(0x91)
+	BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+	BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+	BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+	BAD_TRAP(0x9f)
+	GETCC_TRAP
+	SETCC_TRAP
+	GETPSR_TRAP
+	BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+	BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+	BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+	BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+	BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+	BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+	BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+	BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+	BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+	BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+	BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+	BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+	BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+	BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+	BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+	BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+	BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+	BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+	BAD_TRAP(0xfc)
+	KGDB_TRAP(0xfd)
+	BAD_TRAP(0xfe)
+	BAD_TRAP(0xff)
+
+trapbase_cpu2:
+	BAD_TRAP(0x0)
+	SRMMU_TFAULT
+	TRAP_ENTRY(0x2, bad_instruction)
+	TRAP_ENTRY(0x3, priv_instruction)
+	TRAP_ENTRY(0x4, fpd_trap_handler)
+	WINDOW_SPILL
+	WINDOW_FILL
+	TRAP_ENTRY(0x7, mna_handler)
+	TRAP_ENTRY(0x8, fpe_trap_handler)
+	SRMMU_DFAULT
+	TRAP_ENTRY(0xa, do_tag_overflow)
+	TRAP_ENTRY(0xb, do_watchpoint)
+	BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+	TRAP_ENTRY_INTERRUPT(1)
+	TRAP_ENTRY_INTERRUPT(2)
+	TRAP_ENTRY_INTERRUPT(3)
+	TRAP_ENTRY_INTERRUPT(4)
+	TRAP_ENTRY_INTERRUPT(5)
+	TRAP_ENTRY_INTERRUPT(6)
+	TRAP_ENTRY_INTERRUPT(7)
+	TRAP_ENTRY_INTERRUPT(8)
+	TRAP_ENTRY_INTERRUPT(9)
+	TRAP_ENTRY_INTERRUPT(10)
+	TRAP_ENTRY_INTERRUPT(11)
+	TRAP_ENTRY_INTERRUPT(12)
+	TRAP_ENTRY_INTERRUPT(13)
+	TRAP_ENTRY_INTERRUPT(14)
+	TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+	TRAP_ENTRY(0x20, do_reg_access)
+	BAD_TRAP(0x21)
+	BAD_TRAP(0x22)
+	BAD_TRAP(0x23)
+	TRAP_ENTRY(0x24, do_cp_disabled)
+	SKIP_TRAP(0x25, unimp_flush)
+	BAD_TRAP(0x26)
+	BAD_TRAP(0x27)
+	TRAP_ENTRY(0x28, do_cp_exception)
+	SRMMU_DFAULT
+	TRAP_ENTRY(0x2a, do_hw_divzero)
+	BAD_TRAP(0x2b)
+	BAD_TRAP(0x2c)
+	BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+	BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+	BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+	BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+	BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+	BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+	BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+	BAD_TRAP(0x50)
+	BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+	BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+	BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+	BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+	BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+	BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+	BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+	BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+	BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+	BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+	BAD_TRAP(0x80)
+	BREAKPOINT_TRAP
+	TRAP_ENTRY(0x82, do_hw_divzero)
+	TRAP_ENTRY(0x83, do_flush_windows)
+	BAD_TRAP(0x84)
+	BAD_TRAP(0x85)
+	BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
+	BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+	BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+	LINUX_SYSCALL_TRAP BAD_TRAP(0x91)
+	BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+	BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+	BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+	BAD_TRAP(0x9f)
+	GETCC_TRAP
+	SETCC_TRAP
+	GETPSR_TRAP
+	BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+	BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+	BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+	BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+	BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+	BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+	BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+	BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+	BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+	BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+	BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+	BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+	BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+	BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+	BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+	BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+	BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+	BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+	BAD_TRAP(0xfc)
+	KGDB_TRAP(0xfd)
+	BAD_TRAP(0xfe)
+	BAD_TRAP(0xff)
+
+trapbase_cpu3:
+	BAD_TRAP(0x0)
+	SRMMU_TFAULT
+	TRAP_ENTRY(0x2, bad_instruction)
+	TRAP_ENTRY(0x3, priv_instruction)
+	TRAP_ENTRY(0x4, fpd_trap_handler)
+	WINDOW_SPILL
+	WINDOW_FILL
+	TRAP_ENTRY(0x7, mna_handler)
+	TRAP_ENTRY(0x8, fpe_trap_handler)
+	SRMMU_DFAULT
+	TRAP_ENTRY(0xa, do_tag_overflow)
+	TRAP_ENTRY(0xb, do_watchpoint)
+	BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+	TRAP_ENTRY_INTERRUPT(1)
+	TRAP_ENTRY_INTERRUPT(2)
+	TRAP_ENTRY_INTERRUPT(3)
+	TRAP_ENTRY_INTERRUPT(4)
+	TRAP_ENTRY_INTERRUPT(5)
+	TRAP_ENTRY_INTERRUPT(6)
+	TRAP_ENTRY_INTERRUPT(7)
+	TRAP_ENTRY_INTERRUPT(8)
+	TRAP_ENTRY_INTERRUPT(9)
+	TRAP_ENTRY_INTERRUPT(10)
+	TRAP_ENTRY_INTERRUPT(11)
+	TRAP_ENTRY_INTERRUPT(12)
+	TRAP_ENTRY_INTERRUPT(13)
+	TRAP_ENTRY_INTERRUPT(14)
+	TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+	TRAP_ENTRY(0x20, do_reg_access)
+	BAD_TRAP(0x21)
+	BAD_TRAP(0x22)
+	BAD_TRAP(0x23)
+	TRAP_ENTRY(0x24, do_cp_disabled)
+	SKIP_TRAP(0x25, unimp_flush)
+	BAD_TRAP(0x26)
+	BAD_TRAP(0x27)
+	TRAP_ENTRY(0x28, do_cp_exception)
+	SRMMU_DFAULT
+	TRAP_ENTRY(0x2a, do_hw_divzero)
+	BAD_TRAP(0x2b) BAD_TRAP(0x2c)
+	BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+	BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+	BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+	BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+	BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+	BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+	BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+	BAD_TRAP(0x50)
+	BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+	BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+	BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+	BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+	BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+	BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+	BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+	BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+	BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+	BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+	BAD_TRAP(0x80)
+	BREAKPOINT_TRAP
+	TRAP_ENTRY(0x82, do_hw_divzero)
+	TRAP_ENTRY(0x83, do_flush_windows)
+	BAD_TRAP(0x84) BAD_TRAP(0x85)
+	BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88)
+	BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+	BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+	LINUX_SYSCALL_TRAP
+	BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+	BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+	BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+	BAD_TRAP(0x9f)
+	GETCC_TRAP
+	SETCC_TRAP
+	GETPSR_TRAP
+	BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+	BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+	BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+	BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+	BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+	BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+	BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+	BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+	BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+	BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+	BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+	BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+	BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+	BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+	BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+	BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+	BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+	BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+	BAD_TRAP(0xfc)
+	KGDB_TRAP(0xfd)
+	BAD_TRAP(0xfe)
+	BAD_TRAP(0xff)
+
+#endif
diff --git a/arch/sparc/kernel/ttable.S b/arch/sparc/kernel/ttable_64.S
similarity index 100%
rename from arch/sparc/kernel/ttable.S
rename to arch/sparc/kernel/ttable_64.S
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index dae85bc..f81d038 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -21,7 +21,6 @@
 #include <linux/bitops.h>
 #include <linux/perf_event.h>
 #include <linux/ratelimit.h>
-#include <linux/bitops.h>
 #include <asm/fpumacro.h>
 #include <asm/cacheflush.h>
 
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
index 3bbcd8d..4c2de3c 100644
--- a/arch/sparc/kernel/wof.S
+++ b/arch/sparc/kernel/wof.S
@@ -163,9 +163,8 @@
 	 * the label 'spwin_user_stack_is_bolixed' which will take
 	 * care of things at that point.
 	 */
-	.globl	spwin_mmu_patchme
-spwin_mmu_patchme:	b	spwin_sun4c_stackchk
-				 andcc	%sp, 0x7, %g0
+	b	spwin_srmmu_stackchk
+	 andcc	%sp, 0x7, %g0
 
 spwin_good_ustack:
 	/* LOCATION: Window to be saved */
@@ -306,73 +305,6 @@
  * As noted above %curptr cannot be touched by this routine at all.
  */
 
-spwin_sun4c_stackchk:
-	/* LOCATION: Window to be saved on the stack */
-
-	/* See if the stack is in the address space hole but first,
-	 * check results of callers andcc %sp, 0x7, %g0
-	 */
-	be	1f
-	 sra	%sp, 29, %glob_tmp
-
-	rd	%psr, %glob_tmp
-	b	spwin_user_stack_is_bolixed + 0x4
-	 nop
-
-1:
-	add	%glob_tmp, 0x1, %glob_tmp
-	andncc	%glob_tmp, 0x1, %g0
-	be	1f
-	 and	%sp, 0xfff, %glob_tmp		! delay slot
-
-	rd	%psr, %glob_tmp
-	b	spwin_user_stack_is_bolixed + 0x4
-	 nop
-
-	/* See if our dump area will be on more than one
-	 * page.
-	 */
-1:
-	add	%glob_tmp, 0x38, %glob_tmp
-	andncc	%glob_tmp, 0xff8, %g0
-	be	spwin_sun4c_onepage		! only one page to check
-	 lda	[%sp] ASI_PTE, %glob_tmp	! have to check first page anyways
-
-spwin_sun4c_twopages:
-	/* Is first page ok permission wise? */
-	srl	%glob_tmp, 29, %glob_tmp
-	cmp	%glob_tmp, 0x6
-	be	1f
-	 add	%sp, 0x38, %glob_tmp	/* Is second page in vma hole? */
-
-	rd	%psr, %glob_tmp
-	b	spwin_user_stack_is_bolixed + 0x4
-	 nop
-
-1:
-	sra	%glob_tmp, 29, %glob_tmp
-	add	%glob_tmp, 0x1, %glob_tmp
-	andncc	%glob_tmp, 0x1, %g0
-	be	1f
-	 add	%sp, 0x38, %glob_tmp
-
-	rd	%psr, %glob_tmp
-	b	spwin_user_stack_is_bolixed + 0x4
-	 nop
-
-1:
-	lda	[%glob_tmp] ASI_PTE, %glob_tmp
-
-spwin_sun4c_onepage:
-	srl	%glob_tmp, 29, %glob_tmp
-	cmp	%glob_tmp, 0x6				! can user write to it?
-	be	spwin_good_ustack			! success
-	 nop
-
-	rd	%psr, %glob_tmp
-	b	spwin_user_stack_is_bolixed + 0x4
-	 nop
-
 	/* This is a generic SRMMU routine.  As far as I know this
 	 * works for all current v8/srmmu implementations, we'll
 	 * see...
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
index 779ff750..9fde91a 100644
--- a/arch/sparc/kernel/wuf.S
+++ b/arch/sparc/kernel/wuf.S
@@ -131,12 +131,9 @@
 
 	/* LOCATION: Window 'W' */
 
-	/* Branch to the architecture specific stack validation
-	 * routine.  They can be found below...
-	 */
-	.globl	fwin_mmu_patchme
-fwin_mmu_patchme:	b	sun4c_fwin_stackchk
-				 andcc	%sp, 0x7, %g0
+	/* Branch to the stack validation routine */
+	b	srmmu_fwin_stackchk
+	 andcc	%sp, 0x7, %g0
 
 #define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
 
@@ -242,57 +239,6 @@
 	 * 'someone elses' window possibly.
 	 */
 
-	.align	4
-sun4c_fwin_stackchk:
-	/* LOCATION: Window 'W' */
-
-	/* Caller did 'andcc %sp, 0x7, %g0' */
-	be	1f
-	 and	%sp, 0xfff, %l0		! delay slot
-
-	b,a	fwin_user_stack_is_bolixed
-
-	/* See if we have to check the sanity of one page or two */
-1:
-	add	%l0, 0x38, %l0
-	sra	%sp, 29, %l5
-	add	%l5, 0x1, %l5
-	andncc	%l5, 0x1, %g0
-	be	1f
-	 andncc	%l0, 0xff8, %g0
-
-	b,a	fwin_user_stack_is_bolixed	/* %sp is in vma hole, yuck */
-
-1:
-	be	sun4c_fwin_onepage	/* Only one page to check */
-	 lda	[%sp] ASI_PTE, %l1
-sun4c_fwin_twopages:
-	add	%sp, 0x38, %l0
-	sra	%l0, 29, %l5
-	add	%l5, 0x1, %l5
-	andncc	%l5, 0x1, %g0
-	be	1f
-	 lda	[%l0] ASI_PTE, %l1
-
-	b,a	fwin_user_stack_is_bolixed	/* Second page in vma hole */
-
-1:
-	srl	%l1, 29, %l1
-	andcc	%l1, 0x4, %g0
-	bne	sun4c_fwin_onepage
-	 lda	[%sp] ASI_PTE, %l1	
-
-	b,a	fwin_user_stack_is_bolixed	/* Second page has bad perms */
-
-sun4c_fwin_onepage:
-	srl	%l1, 29, %l1
-	andcc	%l1, 0x4, %g0
-	bne	fwin_user_stack_is_ok
-	 nop
-
-	/* A page had bad page permissions, losing... */
-	b,a	fwin_user_stack_is_bolixed
-
 	.globl	srmmu_fwin_stackchk
 srmmu_fwin_stackchk:
 	/* LOCATION: Window 'W' */
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index a3fc437..389628f 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -4,7 +4,7 @@
 asflags-y := -ansi -DST_DIV0=0x02
 ccflags-y := -Werror
 
-lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
+lib-$(CONFIG_SPARC32) += ashrdi3.o
 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
 lib-y                 += strlen.o
 lib-y                 += checksum_$(BITS).o
@@ -13,7 +13,7 @@
 lib-y                 += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o
 lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
 lib-$(CONFIG_SPARC32) += copy_user.o locks.o
-lib-y                 += atomic_$(BITS).o
+lib-$(CONFIG_SPARC64) += atomic_64.o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 
@@ -40,7 +40,7 @@
 lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
 
 obj-y                 += iomap.o
-obj-$(CONFIG_SPARC32) += atomic32.o
+obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
 obj-y                 += ksyms.o
 obj-$(CONFIG_SPARC64) += PeeCeeI.o
 obj-y                 += usercopy.o
diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S
index 17912e6..86f60de 100644
--- a/arch/sparc/lib/ashldi3.S
+++ b/arch/sparc/lib/ashldi3.S
@@ -5,10 +5,10 @@
  * Copyright (C) 1999 David S. Miller (davem@redhat.com)
  */
 
+#include <linux/linkage.h>
+
 	.text
-	.align	4
-	.globl	__ashldi3
-__ashldi3:
+ENTRY(__ashldi3)
 	cmp	%o2, 0
 	be	9f
 	 mov	0x20, %g2
@@ -32,3 +32,4 @@
 9:
 	retl
 	 nop
+ENDPROC(__ashldi3)
diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S
index 85398fd6..6eb8ba2 100644
--- a/arch/sparc/lib/ashrdi3.S
+++ b/arch/sparc/lib/ashrdi3.S
@@ -5,10 +5,10 @@
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  */
 
+#include <linux/linkage.h>
+
 	.text
-	.align	4
-	.globl __ashrdi3
-__ashrdi3:
+ENTRY(__ashrdi3)
 	tst	%o2
 	be	3f
 	 or	%g0, 32, %g2
@@ -34,3 +34,4 @@
 3:
 	jmpl	%o7 + 8, %g0
 	 nop
+ENDPROC(__ashrdi3)
diff --git a/arch/sparc/lib/atomic_32.S b/arch/sparc/lib/atomic_32.S
deleted file mode 100644
index eb6c735..0000000
--- a/arch/sparc/lib/atomic_32.S
+++ /dev/null
@@ -1,44 +0,0 @@
-/* atomic.S: Move this stuff here for better ICACHE hit rates.
- *
- * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
- */
-
-#include <asm/ptrace.h>
-#include <asm/psr.h>
-
-	.text
-	.align	4
-
-	.globl  __atomic_begin
-__atomic_begin:
-
-#ifndef CONFIG_SMP
-	.globl	___xchg32_sun4c
-___xchg32_sun4c:
-	rd	%psr, %g3
-	andcc	%g3, PSR_PIL, %g0
-	bne	1f
-	 nop
-	wr	%g3, PSR_PIL, %psr
-	nop; nop; nop
-1:
-	andcc	%g3, PSR_PIL, %g0
-	ld	[%g1], %g7
-	bne	1f
-	 st	%g2, [%g1]
-	wr	%g3, 0x0, %psr
-	nop; nop; nop
-1:
-	mov	%g7, %g2
-	jmpl	%o7 + 8, %g0
-	 mov	%g4, %o7
-
-	.globl	___xchg32_sun4md
-___xchg32_sun4md:
-	swap	[%g1], %g2
-	jmpl	%o7 + 8, %g0
-	 mov	%g4, %o7
-#endif
-
-	.globl  __atomic_end
-__atomic_end:
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 59186e0..4d502da 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -3,6 +3,7 @@
  * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
  */
 
+#include <linux/linkage.h>
 #include <asm/asi.h>
 #include <asm/backoff.h>
 
@@ -13,9 +14,7 @@
 	 * memory barriers, and a second which returns
 	 * a value and does the barriers.
 	 */
-	.globl	atomic_add
-	.type	atomic_add,#function
-atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	lduw	[%o1], %g1
 	add	%g1, %o0, %g7
@@ -26,11 +25,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
-	.size	atomic_add, .-atomic_add
+ENDPROC(atomic_add)
 
-	.globl	atomic_sub
-	.type	atomic_sub,#function
-atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	lduw	[%o1], %g1
 	sub	%g1, %o0, %g7
@@ -41,11 +38,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
-	.size	atomic_sub, .-atomic_sub
+ENDPROC(atomic_sub)
 
-	.globl	atomic_add_ret
-	.type	atomic_add_ret,#function
-atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	lduw	[%o1], %g1
 	add	%g1, %o0, %g7
@@ -56,11 +51,9 @@
 	retl
 	 sra	%g1, 0, %o0
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
-	.size	atomic_add_ret, .-atomic_add_ret
+ENDPROC(atomic_add_ret)
 
-	.globl	atomic_sub_ret
-	.type	atomic_sub_ret,#function
-atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	lduw	[%o1], %g1
 	sub	%g1, %o0, %g7
@@ -71,11 +64,9 @@
 	retl
 	 sra	%g1, 0, %o0
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
-	.size	atomic_sub_ret, .-atomic_sub_ret
+ENDPROC(atomic_sub_ret)
 
-	.globl	atomic64_add
-	.type	atomic64_add,#function
-atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	ldx	[%o1], %g1
 	add	%g1, %o0, %g7
@@ -86,11 +77,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
-	.size	atomic64_add, .-atomic64_add
+ENDPROC(atomic64_add)
 
-	.globl	atomic64_sub
-	.type	atomic64_sub,#function
-atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	ldx	[%o1], %g1
 	sub	%g1, %o0, %g7
@@ -101,11 +90,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
-	.size	atomic64_sub, .-atomic64_sub
+ENDPROC(atomic64_sub)
 
-	.globl	atomic64_add_ret
-	.type	atomic64_add_ret,#function
-atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	ldx	[%o1], %g1
 	add	%g1, %o0, %g7
@@ -116,11 +103,9 @@
 	retl
 	 add	%g1, %o0, %o0
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
-	.size	atomic64_add_ret, .-atomic64_add_ret
+ENDPROC(atomic64_add_ret)
 
-	.globl	atomic64_sub_ret
-	.type	atomic64_sub_ret,#function
-atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
 	BACKOFF_SETUP(%o2)
 1:	ldx	[%o1], %g1
 	sub	%g1, %o0, %g7
@@ -131,4 +116,4 @@
 	retl
 	 sub	%g1, %o0, %o0
 2:	BACKOFF_SPIN(%o2, %o3, 1b)
-	.size	atomic64_sub_ret, .-atomic64_sub_ret
+ENDPROC(atomic64_sub_ret)
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
index 3dc61d5..36f72cc 100644
--- a/arch/sparc/lib/bitops.S
+++ b/arch/sparc/lib/bitops.S
@@ -3,14 +3,13 @@
  * Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
  */
 
+#include <linux/linkage.h>
 #include <asm/asi.h>
 #include <asm/backoff.h>
 
 	.text
 
-	.globl	test_and_set_bit
-	.type	test_and_set_bit,#function
-test_and_set_bit:	/* %o0=nr, %o1=addr */
+ENTRY(test_and_set_bit)	/* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
 	srlx	%o0, 6, %g1
 	mov	1, %o2
@@ -29,11 +28,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
-	.size	test_and_set_bit, .-test_and_set_bit
+ENDPROC(test_and_set_bit)
 
-	.globl	test_and_clear_bit
-	.type	test_and_clear_bit,#function
-test_and_clear_bit:	/* %o0=nr, %o1=addr */
+ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
 	srlx	%o0, 6, %g1
 	mov	1, %o2
@@ -52,11 +49,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
-	.size	test_and_clear_bit, .-test_and_clear_bit
+ENDPROC(test_and_clear_bit)
 
-	.globl	test_and_change_bit
-	.type	test_and_change_bit,#function
-test_and_change_bit:	/* %o0=nr, %o1=addr */
+ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
 	srlx	%o0, 6, %g1
 	mov	1, %o2
@@ -75,11 +70,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
-	.size	test_and_change_bit, .-test_and_change_bit
+ENDPROC(test_and_change_bit)
 
-	.globl	set_bit
-	.type	set_bit,#function
-set_bit:		/* %o0=nr, %o1=addr */
+ENTRY(set_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
 	srlx	%o0, 6, %g1
 	mov	1, %o2
@@ -96,11 +89,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
-	.size	set_bit, .-set_bit
+ENDPROC(set_bit)
 
-	.globl	clear_bit
-	.type	clear_bit,#function
-clear_bit:		/* %o0=nr, %o1=addr */
+ENTRY(clear_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
 	srlx	%o0, 6, %g1
 	mov	1, %o2
@@ -117,11 +108,9 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
-	.size	clear_bit, .-clear_bit
+ENDPROC(clear_bit)
 
-	.globl	change_bit
-	.type	change_bit,#function
-change_bit:		/* %o0=nr, %o1=addr */
+ENTRY(change_bit) /* %o0=nr, %o1=addr */
 	BACKOFF_SETUP(%o3)
 	srlx	%o0, 6, %g1
 	mov	1, %o2
@@ -138,4 +127,4 @@
 	retl
 	 nop
 2:	BACKOFF_SPIN(%o3, %o4, 1b)
-	.size	change_bit, .-change_bit
+ENDPROC(change_bit)
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
index 804be87..3c77101 100644
--- a/arch/sparc/lib/blockops.S
+++ b/arch/sparc/lib/blockops.S
@@ -4,6 +4,7 @@
  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  */
 
+#include <linux/linkage.h>
 #include <asm/page.h>
 
 	/* Zero out 64 bytes of memory at (buf + offset).
@@ -44,10 +45,7 @@
 	 */
 
 	.text
-	.align	4
-	.globl	bzero_1page, __copy_1page
-
-bzero_1page:
+ENTRY(bzero_1page)
 /* NOTE: If you change the number of insns of this routine, please check
  * arch/sparc/mm/hypersparc.S */
 	/* %o0 = buf */
@@ -65,8 +63,9 @@
 
 	retl
 	 nop
+ENDPROC(bzero_1page)
 
-__copy_1page:
+ENTRY(__copy_1page)
 /* NOTE: If you change the number of insns of this routine, please check
  * arch/sparc/mm/hypersparc.S */
 	/* %o0 = dst, %o1 = src */
@@ -87,3 +86,4 @@
 
 	retl
 	 nop
+ENDPROC(__copy_1page)
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index 615f401..8c05811 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -4,11 +4,11 @@
  * Copyright (C) 2005 David S. Miller <davem@davemloft.net>
  */
 
+#include <linux/linkage.h>
+
 	.text
 
-	.globl	memset
-	.type	memset, #function
-memset:			/* %o0=buf, %o1=pat, %o2=len */
+ENTRY(memset) /* %o0=buf, %o1=pat, %o2=len */
 	and		%o1, 0xff, %o3
 	mov		%o2, %o1
 	sllx		%o3, 8, %g1
@@ -19,9 +19,7 @@
 	ba,pt		%xcc, 1f
 	 or		%g1, %o2, %o2
 
-	.globl	__bzero
-	.type	__bzero, #function
-__bzero:		/* %o0=buf, %o1=len */
+ENTRY(__bzero) /* %o0=buf, %o1=len */
 	clr		%o2
 1:	mov		%o0, %o3
 	brz,pn		%o1, __bzero_done
@@ -78,8 +76,8 @@
 __bzero_done:
 	retl
 	 mov		%o3, %o0
-	.size		__bzero, .-__bzero
-	.size		memset, .-memset
+ENDPROC(__bzero)
+ENDPROC(memset)
 
 #define EX_ST(x,y)		\
 98:	x,y;			\
@@ -89,9 +87,7 @@
 	.text;			\
 	.align 4;
 
-	.globl	__clear_user
-	.type	__clear_user, #function
-__clear_user:		/* %o0=buf, %o1=len */
+ENTRY(__clear_user) /* %o0=buf, %o1=len */
 	brz,pn		%o1, __clear_user_done
 	 cmp		%o1, 16
 	bl,pn		%icc, __clear_user_tiny
@@ -146,4 +142,4 @@
 __clear_user_done:
 	retl
 	 clr		%o0
-	.size		__clear_user, .-__clear_user
+ENDPROC(__clear_user)
diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S
index d74bc09..9614b48 100644
--- a/arch/sparc/lib/divdi3.S
+++ b/arch/sparc/lib/divdi3.S
@@ -19,7 +19,6 @@
 
 	.text
 	.align 4
-	.global .udiv
 	.globl __divdi3
 __divdi3:
 	save %sp,-104,%sp
@@ -83,8 +82,9 @@
 	bne .LL85
 	mov %i0,%o2
 	mov 1,%o0
-	call .udiv,0
 	mov 0,%o1
+	wr %g0, 0, %y
+	udiv %o0, %o1, %o0
 	mov %o0,%o4
 	mov %i0,%o2
 .LL85:
diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S
index 58ca5b9..4742d59 100644
--- a/arch/sparc/lib/ipcsum.S
+++ b/arch/sparc/lib/ipcsum.S
@@ -1,8 +1,7 @@
+#include <linux/linkage.h>
+
 	.text
-	.align	32
-	.globl	ip_fast_csum
-	.type	ip_fast_csum,#function
-ip_fast_csum:	/* %o0 = iph, %o1 = ihl */
+ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
 	sub	%o1, 4, %g7
 	lduw	[%o0 + 0x00], %o2
 	lduw	[%o0 + 0x04], %g2
@@ -31,4 +30,4 @@
 	set	0xffff, %o1
 	retl
 	 and	%o2, %o1, %o0
-	.size	ip_fast_csum, .-ip_fast_csum
+ENDPROC(ip_fast_csum)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index f73c224..2dc3087 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -56,23 +56,11 @@
 extern void (*__copy_1page)(void *, const void *);
 extern void (*bzero_1page)(void *);
 
-extern int __strncmp(const char *, const char *, __kernel_size_t);
-
 extern void ___rw_read_enter(void);
 extern void ___rw_read_try(void);
 extern void ___rw_read_exit(void);
 extern void ___rw_write_enter(void);
 
-/* Alias functions whose names begin with "." and export the aliases.
- * The module references will be fixed up by module_frob_arch_sections.
- */
-extern int _Div(int, int);
-extern int _Mul(int, int);
-extern int _Rem(int, int);
-extern unsigned _Udiv(unsigned, unsigned);
-extern unsigned _Umul(unsigned, unsigned);
-extern unsigned _Urem(unsigned, unsigned);
-
 /* Networking helper routines. */
 EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
 
@@ -81,9 +69,6 @@
 EXPORT_SYMBOL(__memmove);
 EXPORT_SYMBOL(bzero_1page);
 
-/* string functions */
-EXPORT_SYMBOL(__strncmp);
-
 /* Moving data to/from/in userspace. */
 EXPORT_SYMBOL(__copy_user);
 
@@ -100,13 +85,6 @@
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__muldi3);
 EXPORT_SYMBOL(__divdi3);
-
-EXPORT_SYMBOL(_Rem);
-EXPORT_SYMBOL(_Urem);
-EXPORT_SYMBOL(_Mul);
-EXPORT_SYMBOL(_Umul);
-EXPORT_SYMBOL(_Div);
-EXPORT_SYMBOL(_Udiv);
 #endif
 
 /*
diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S
index 47a1354..60ebc7c 100644
--- a/arch/sparc/lib/lshrdi3.S
+++ b/arch/sparc/lib/lshrdi3.S
@@ -1,6 +1,6 @@
+#include <linux/linkage.h>
 
-	.globl	__lshrdi3
-__lshrdi3:
+ENTRY(__lshrdi3)
 	cmp	%o2, 0
 	be	3f
 	 mov	0x20, %g2
@@ -24,3 +24,4 @@
 3:
 	retl 
 	 nop 
+ENDPROC(__lshrdi3)
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index 9739580..b7f6334 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -4,11 +4,10 @@
  * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
  */
 
+#include <linux/linkage.h>
+
 	.text
-	.align		32
-	.globl		memmove
-	.type		memmove,#function
-memmove:		/* o0=dst o1=src o2=len */
+ENTRY(memmove) /* o0=dst o1=src o2=len */
 	mov		%o0, %g1
 	cmp		%o0, %o1
 	bleu,pt		%xcc, memcpy
@@ -28,4 +27,4 @@
 
 	retl
 	 mov		%g1, %o0
-	.size		memmove, .-memmove
+ENDPROC(memmove)
diff --git a/arch/sparc/lib/mul.S b/arch/sparc/lib/mul.S
deleted file mode 100644
index c45470d..0000000
--- a/arch/sparc/lib/mul.S
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * mul.S:       This routine was taken from glibc-1.09 and is covered
- *              by the GNU Library General Public License Version 2.
- */
-
-/*
- * Signed multiply, from Appendix E of the Sparc Version 8
- * Architecture Manual.
- */
-
-/*
- * Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the upper 32 bits of
- * the 64-bit product).
- *
- * This code optimizes short (less than 13-bit) multiplies.
- */
-
-	.globl .mul
-	.globl _Mul
-.mul:
-_Mul:	/* needed for export */
-	mov	%o0, %y		! multiplier -> Y
-	andncc	%o0, 0xfff, %g0	! test bits 12..31
-	be	Lmul_shortway	! if zero, can do it the short way
-	 andcc	%g0, %g0, %o4	! zero the partial product and clear N and V
-
-	/*
-	 * Long multiply.  32 steps, followed by a final shift step.
-	 */
-	mulscc	%o4, %o1, %o4	! 1
-	mulscc	%o4, %o1, %o4	! 2
-	mulscc	%o4, %o1, %o4	! 3
-	mulscc	%o4, %o1, %o4	! 4
-	mulscc	%o4, %o1, %o4	! 5
-	mulscc	%o4, %o1, %o4	! 6
-	mulscc	%o4, %o1, %o4	! 7
-	mulscc	%o4, %o1, %o4	! 8
-	mulscc	%o4, %o1, %o4	! 9
-	mulscc	%o4, %o1, %o4	! 10
-	mulscc	%o4, %o1, %o4	! 11
-	mulscc	%o4, %o1, %o4	! 12
-	mulscc	%o4, %o1, %o4	! 13
-	mulscc	%o4, %o1, %o4	! 14
-	mulscc	%o4, %o1, %o4	! 15
-	mulscc	%o4, %o1, %o4	! 16
-	mulscc	%o4, %o1, %o4	! 17
-	mulscc	%o4, %o1, %o4	! 18
-	mulscc	%o4, %o1, %o4	! 19
-	mulscc	%o4, %o1, %o4	! 20
-	mulscc	%o4, %o1, %o4	! 21
-	mulscc	%o4, %o1, %o4	! 22
-	mulscc	%o4, %o1, %o4	! 23
-	mulscc	%o4, %o1, %o4	! 24
-	mulscc	%o4, %o1, %o4	! 25
-	mulscc	%o4, %o1, %o4	! 26
-	mulscc	%o4, %o1, %o4	! 27
-	mulscc	%o4, %o1, %o4	! 28
-	mulscc	%o4, %o1, %o4	! 29
-	mulscc	%o4, %o1, %o4	! 30
-	mulscc	%o4, %o1, %o4	! 31
-	mulscc	%o4, %o1, %o4	! 32
-	mulscc	%o4, %g0, %o4	! final shift
-
-	! If %o0 was negative, the result is
-	!	(%o0 * %o1) + (%o1 << 32))
-	! We fix that here.
-
-#if 0
-	tst	%o0
-	bge	1f
-	 rd	%y, %o0
-
-	! %o0 was indeed negative; fix upper 32 bits of result by subtracting 
-	! %o1 (i.e., return %o4 - %o1 in %o1).
-	retl
-	 sub	%o4, %o1, %o1
-
-1:
-	retl
-	 mov	%o4, %o1
-#else
-	/* Faster code adapted from tege@sics.se's code for umul.S.  */
-	sra	%o0, 31, %o2	! make mask from sign bit
-	and	%o1, %o2, %o2	! %o2 = 0 or %o1, depending on sign of %o0
-	rd	%y, %o0		! get lower half of product
-	retl
-	 sub	%o4, %o2, %o1	! subtract compensation 
-				!  and put upper half in place
-#endif
-
-Lmul_shortway:
-	/*
-	 * Short multiply.  12 steps, followed by a final shift step.
-	 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
-	 * but there is no problem with %o0 being negative (unlike above).
-	 */
-	mulscc	%o4, %o1, %o4	! 1
-	mulscc	%o4, %o1, %o4	! 2
-	mulscc	%o4, %o1, %o4	! 3
-	mulscc	%o4, %o1, %o4	! 4
-	mulscc	%o4, %o1, %o4	! 5
-	mulscc	%o4, %o1, %o4	! 6
-	mulscc	%o4, %o1, %o4	! 7
-	mulscc	%o4, %o1, %o4	! 8
-	mulscc	%o4, %o1, %o4	! 9
-	mulscc	%o4, %o1, %o4	! 10
-	mulscc	%o4, %o1, %o4	! 11
-	mulscc	%o4, %o1, %o4	! 12
-	mulscc	%o4, %g0, %o4	! final shift
-
-	/*
-	 *  %o4 has 20 of the bits that should be in the low part of the
-	 * result; %y has the bottom 12 (as %y's top 12).  That is:
-	 *
-	 *	  %o4		    %y
-	 * +----------------+----------------+
-	 * | -12- |   -20-  | -12- |   -20-  |
-	 * +------(---------+------)---------+
-	 *  --hi-- ----low-part----
-	 *
-	 * The upper 12 bits of %o4 should be sign-extended to form the
-	 * high part of the product (i.e., highpart = %o4 >> 20).
-	 */
-
-	rd	%y, %o5
-	sll	%o4, 12, %o0	! shift middle bits left 12
-	srl	%o5, 20, %o5	! shift low bits right 20, zero fill at left
-	or	%o5, %o0, %o0	! construct low part of result
-	retl
-	 sra	%o4, 20, %o1	! ... and extract high part of result
-
-	.globl	.mul_patch
-.mul_patch:
-	smul	%o0, %o1, %o0
-	retl
-	 rd	%y, %o1
-	nop
diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S
index 7f17872..9794939 100644
--- a/arch/sparc/lib/muldi3.S
+++ b/arch/sparc/lib/muldi3.S
@@ -63,12 +63,12 @@
 	rd  %y, %o1
 	mov  %o1, %l3
 	mov  %i1, %o0
-	call  .umul
 	mov  %i2, %o1
+	umul %o0, %o1, %o0
 	mov  %o0, %l0
 	mov  %i0, %o0
-	call  .umul
 	mov  %i3, %o1
+	umul %o0, %o1, %o0
 	add  %l0, %o0, %l0
 	mov  %l2, %i0
 	add  %l2, %l0, %i0
diff --git a/arch/sparc/lib/rem.S b/arch/sparc/lib/rem.S
deleted file mode 100644
index 42fb862..0000000
--- a/arch/sparc/lib/rem.S
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- * rem.S:       This routine was taken from glibc-1.09 and is covered
- *              by the GNU Library General Public License Version 2.
- */
-
-
-/* This file is generated from divrem.m4; DO NOT EDIT! */
-/*
- * Division and remainder, from Appendix E of the Sparc Version 8
- * Architecture Manual, with fixes from Gordon Irlam.
- */
-
-/*
- * Input: dividend and divisor in %o0 and %o1 respectively.
- *
- * m4 parameters:
- *  .rem	name of function to generate
- *  rem		rem=div => %o0 / %o1; rem=rem => %o0 % %o1
- *  true		true=true => signed; true=false => unsigned
- *
- * Algorithm parameters:
- *  N		how many bits per iteration we try to get (4)
- *  WORDSIZE	total number of bits (32)
- *
- * Derived constants:
- *  TOPBITS	number of bits in the top decade of a number
- *
- * Important variables:
- *  Q		the partial quotient under development (initially 0)
- *  R		the remainder so far, initially the dividend
- *  ITER	number of main division loop iterations required;
- *		equal to ceil(log2(quotient) / N).  Note that this
- *		is the log base (2^N) of the quotient.
- *  V		the current comparand, initially divisor*2^(ITER*N-1)
- *
- * Cost:
- *  Current estimate for non-large dividend is
- *	ceil(log2(quotient) / N) * (10 + 7N/2) + C
- *  A large dividend is one greater than 2^(31-TOPBITS) and takes a
- *  different path, as the upper bits of the quotient must be developed
- *  one bit at a time.
- */
-
-
-	.globl .rem
-	.globl _Rem
-.rem:
-_Rem:	/* needed for export */
-	! compute sign of result; if neither is negative, no problem
-	orcc	%o1, %o0, %g0	! either negative?
-	bge	2f			! no, go do the divide
-	 mov	%o0, %g2	! compute sign in any case
-
-	tst	%o1
-	bge	1f
-	 tst	%o0
-	! %o1 is definitely negative; %o0 might also be negative
-	bge	2f			! if %o0 not negative...
-	 sub	%g0, %o1, %o1	! in any case, make %o1 nonneg
-1:	! %o0 is negative, %o1 is nonnegative
-	sub	%g0, %o0, %o0	! make %o0 nonnegative
-2:
-
-	! Ready to divide.  Compute size of quotient; scale comparand.
-	orcc	%o1, %g0, %o5
-	bne	1f
-	 mov	%o0, %o3
-
-		! Divide by zero trap.  If it returns, return 0 (about as
-		! wrong as possible, but that is what SunOS does...).
-		ta	ST_DIV0
-		retl
-		 clr	%o0
-
-1:
-	cmp	%o3, %o5			! if %o1 exceeds %o0, done
-	blu	Lgot_result		! (and algorithm fails otherwise)
-	 clr	%o2
-
-	sethi	%hi(1 << (32 - 4 - 1)), %g1
-
-	cmp	%o3, %g1
-	blu	Lnot_really_big
-	 clr	%o4
-
-	! Here the dividend is >= 2**(31-N) or so.  We must be careful here,
-	! as our usual N-at-a-shot divide step will cause overflow and havoc.
-	! The number of bits in the result here is N*ITER+SC, where SC <= N.
-	! Compute ITER in an unorthodox manner: know we need to shift V into
-	! the top decade: so do not even bother to compare to R.
-	1:
-		cmp	%o5, %g1
-		bgeu	3f
-		 mov	1, %g7
-
-		sll	%o5, 4, %o5
-
-		b	1b
-		 add	%o4, 1, %o4
-
-	! Now compute %g7.
-	2:
-		addcc	%o5, %o5, %o5
-
-		bcc	Lnot_too_big
-		 add	%g7, 1, %g7
-
-		! We get here if the %o1 overflowed while shifting.
-		! This means that %o3 has the high-order bit set.
-		! Restore %o5 and subtract from %o3.
-		sll	%g1, 4, %g1	! high order bit
-		srl	%o5, 1, %o5		! rest of %o5
-		add	%o5, %g1, %o5
-
-		b	Ldo_single_div
-		 sub	%g7, 1, %g7
-
-	Lnot_too_big:
-	3:
-		cmp	%o5, %o3
-		blu	2b
-		 nop
-
-		be	Ldo_single_div
-		 nop
-	/* NB: these are commented out in the V8-Sparc manual as well */
-	/* (I do not understand this) */
-	! %o5 > %o3: went too far: back up 1 step
-	!	srl	%o5, 1, %o5
-	!	dec	%g7
-	! do single-bit divide steps
-	!
-	! We have to be careful here.  We know that %o3 >= %o5, so we can do the
-	! first divide step without thinking.  BUT, the others are conditional,
-	! and are only done if %o3 >= 0.  Because both %o3 and %o5 may have the high-
-	! order bit set in the first step, just falling into the regular
-	! division loop will mess up the first time around.
-	! So we unroll slightly...
-	Ldo_single_div:
-		subcc	%g7, 1, %g7
-		bl	Lend_regular_divide
-		 nop
-
-		sub	%o3, %o5, %o3
-		mov	1, %o2
-
-		b	Lend_single_divloop
-		 nop
-	Lsingle_divloop:
-		sll	%o2, 1, %o2
-
-		bl	1f
-		 srl	%o5, 1, %o5
-		! %o3 >= 0
-		sub	%o3, %o5, %o3
-
-		b	2f
-		 add	%o2, 1, %o2
-	1:	! %o3 < 0
-		add	%o3, %o5, %o3
-		sub	%o2, 1, %o2
-	2:
-	Lend_single_divloop:
-		subcc	%g7, 1, %g7
-		bge	Lsingle_divloop
-		 tst	%o3
-
-		b,a	Lend_regular_divide
-
-Lnot_really_big:
-1:
-	sll	%o5, 4, %o5
-	cmp	%o5, %o3
-	bleu	1b
-	 addcc	%o4, 1, %o4
-	be	Lgot_result
-	 sub	%o4, 1, %o4
-
-	tst	%o3	! set up for initial iteration
-Ldivloop:
-	sll	%o2, 4, %o2
-		! depth 1, accumulated bits 0
-	bl	L.1.16
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 2, accumulated bits 1
-	bl	L.2.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 3, accumulated bits 3
-	bl	L.3.19
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 7
-	bl	L.4.23
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-
-	b	9f
-	 add	%o2, (7*2+1), %o2
-	
-L.4.23:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (7*2-1), %o2
-	
-L.3.19:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 5
-	bl	L.4.21
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (5*2+1), %o2
-	
-L.4.21:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (5*2-1), %o2
-	
-L.2.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 3, accumulated bits 1
-	bl	L.3.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 3
-	bl	L.4.19
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (3*2+1), %o2
-
-L.4.19:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (3*2-1), %o2
-
-L.3.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 1
-	bl	L.4.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (1*2+1), %o2
-
-L.4.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (1*2-1), %o2
-
-L.1.16:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 2, accumulated bits -1
-	bl	L.2.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 3, accumulated bits -1
-	bl	L.3.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -1
-	bl	L.4.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-1*2+1), %o2
-
-L.4.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-1*2-1), %o2
-
-L.3.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -3
-	bl	L.4.13
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-3*2+1), %o2
-
-L.4.13:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-3*2-1), %o2
-
-L.2.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 3, accumulated bits -3
-	bl	L.3.13
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -5
-	bl	L.4.11
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-5*2+1), %o2
-
-L.4.11:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-5*2-1), %o2
-
-
-L.3.13:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -7
-	bl	L.4.9
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-7*2+1), %o2
-
-L.4.9:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-7*2-1), %o2
-
-	9:
-Lend_regular_divide:
-	subcc	%o4, 1, %o4
-	bge	Ldivloop
-	 tst	%o3
-
-	bl,a	Lgot_result
-	! non-restoring fixup here (one instruction only!)
-	add	%o3, %o1, %o3
-
-Lgot_result:
-	! check to see if answer should be < 0
-	tst	%g2
-	bl,a	1f
-	 sub %g0, %o3, %o3
-1:
-	retl
-	 mov %o3, %o0
-
-	.globl	.rem_patch
-.rem_patch:
-	sra	%o0, 0x1f, %o4
-	wr	%o4, 0x0, %y
-	nop
-	nop
-	nop
-	sdivcc	%o0, %o1, %o2
-	bvs,a	1f
-	 xnor	%o2, %g0, %o2
-1:	smul	%o2, %o1, %o2
-	retl
-	 sub	%o0, %o2, %o0
-	nop
diff --git a/arch/sparc/lib/sdiv.S b/arch/sparc/lib/sdiv.S
deleted file mode 100644
index f0a0d4e..0000000
--- a/arch/sparc/lib/sdiv.S
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
- * sdiv.S:      This routine was taken from glibc-1.09 and is covered
- *              by the GNU Library General Public License Version 2.
- */
-
-
-/* This file is generated from divrem.m4; DO NOT EDIT! */
-/*
- * Division and remainder, from Appendix E of the Sparc Version 8
- * Architecture Manual, with fixes from Gordon Irlam.
- */
-
-/*
- * Input: dividend and divisor in %o0 and %o1 respectively.
- *
- * m4 parameters:
- *  .div	name of function to generate
- *  div		div=div => %o0 / %o1; div=rem => %o0 % %o1
- *  true		true=true => signed; true=false => unsigned
- *
- * Algorithm parameters:
- *  N		how many bits per iteration we try to get (4)
- *  WORDSIZE	total number of bits (32)
- *
- * Derived constants:
- *  TOPBITS	number of bits in the top decade of a number
- *
- * Important variables:
- *  Q		the partial quotient under development (initially 0)
- *  R		the remainder so far, initially the dividend
- *  ITER	number of main division loop iterations required;
- *		equal to ceil(log2(quotient) / N).  Note that this
- *		is the log base (2^N) of the quotient.
- *  V		the current comparand, initially divisor*2^(ITER*N-1)
- *
- * Cost:
- *  Current estimate for non-large dividend is
- *	ceil(log2(quotient) / N) * (10 + 7N/2) + C
- *  A large dividend is one greater than 2^(31-TOPBITS) and takes a
- *  different path, as the upper bits of the quotient must be developed
- *  one bit at a time.
- */
-
-
-	.globl .div
-	.globl _Div
-.div:
-_Div:	/* needed for export */
-	! compute sign of result; if neither is negative, no problem
-	orcc	%o1, %o0, %g0	! either negative?
-	bge	2f			! no, go do the divide
-	 xor	%o1, %o0, %g2	! compute sign in any case
-
-	tst	%o1
-	bge	1f
-	 tst	%o0
-	! %o1 is definitely negative; %o0 might also be negative
-	bge	2f			! if %o0 not negative...
-	 sub	%g0, %o1, %o1	! in any case, make %o1 nonneg
-1:	! %o0 is negative, %o1 is nonnegative
-	sub	%g0, %o0, %o0	! make %o0 nonnegative
-2:
-
-	! Ready to divide.  Compute size of quotient; scale comparand.
-	orcc	%o1, %g0, %o5
-	bne	1f
-	 mov	%o0, %o3
-
-		! Divide by zero trap.  If it returns, return 0 (about as
-		! wrong as possible, but that is what SunOS does...).
-		ta	ST_DIV0
-		retl
-		 clr	%o0
-
-1:
-	cmp	%o3, %o5			! if %o1 exceeds %o0, done
-	blu	Lgot_result		! (and algorithm fails otherwise)
-	 clr	%o2
-
-	sethi	%hi(1 << (32 - 4 - 1)), %g1
-
-	cmp	%o3, %g1
-	blu	Lnot_really_big
-	 clr	%o4
-
-	! Here the dividend is >= 2**(31-N) or so.  We must be careful here,
-	! as our usual N-at-a-shot divide step will cause overflow and havoc.
-	! The number of bits in the result here is N*ITER+SC, where SC <= N.
-	! Compute ITER in an unorthodox manner: know we need to shift V into
-	! the top decade: so do not even bother to compare to R.
-	1:
-		cmp	%o5, %g1
-		bgeu	3f
-		 mov	1, %g7
-
-		sll	%o5, 4, %o5
-
-		b	1b
-		 add	%o4, 1, %o4
-
-	! Now compute %g7.
-	2:
-		addcc	%o5, %o5, %o5
-		bcc	Lnot_too_big
-		 add	%g7, 1, %g7
-
-		! We get here if the %o1 overflowed while shifting.
-		! This means that %o3 has the high-order bit set.
-		! Restore %o5 and subtract from %o3.
-		sll	%g1, 4, %g1	! high order bit
-		srl	%o5, 1, %o5		! rest of %o5
-		add	%o5, %g1, %o5
-
-		b	Ldo_single_div
-		 sub	%g7, 1, %g7
-
-	Lnot_too_big:
-	3:
-		cmp	%o5, %o3
-		blu	2b
-		 nop
-
-		be	Ldo_single_div
-		 nop
-	/* NB: these are commented out in the V8-Sparc manual as well */
-	/* (I do not understand this) */
-	! %o5 > %o3: went too far: back up 1 step
-	!	srl	%o5, 1, %o5
-	!	dec	%g7
-	! do single-bit divide steps
-	!
-	! We have to be careful here.  We know that %o3 >= %o5, so we can do the
-	! first divide step without thinking.  BUT, the others are conditional,
-	! and are only done if %o3 >= 0.  Because both %o3 and %o5 may have the high-
-	! order bit set in the first step, just falling into the regular
-	! division loop will mess up the first time around.
-	! So we unroll slightly...
-	Ldo_single_div:
-		subcc	%g7, 1, %g7
-		bl	Lend_regular_divide
-		 nop
-
-		sub	%o3, %o5, %o3
-		mov	1, %o2
-
-		b	Lend_single_divloop
-		 nop
-	Lsingle_divloop:
-		sll	%o2, 1, %o2
-
-		bl	1f
-		 srl	%o5, 1, %o5
-		! %o3 >= 0
-		sub	%o3, %o5, %o3
-
-		b	2f
-		 add	%o2, 1, %o2
-	1:	! %o3 < 0
-		add	%o3, %o5, %o3
-		sub	%o2, 1, %o2
-	2:
-	Lend_single_divloop:
-		subcc	%g7, 1, %g7
-		bge	Lsingle_divloop
-		 tst	%o3
-
-		b,a	Lend_regular_divide
-
-Lnot_really_big:
-1:
-	sll	%o5, 4, %o5
-	cmp	%o5, %o3
-	bleu	1b
-	 addcc	%o4, 1, %o4
-
-	be	Lgot_result
-	 sub	%o4, 1, %o4
-
-	tst	%o3	! set up for initial iteration
-Ldivloop:
-	sll	%o2, 4, %o2
-		! depth 1, accumulated bits 0
-	bl	L.1.16
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 2, accumulated bits 1
-	bl	L.2.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 3, accumulated bits 3
-	bl	L.3.19
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 7
-	bl	L.4.23
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (7*2+1), %o2
-
-L.4.23:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (7*2-1), %o2
-
-L.3.19:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 5
-	bl	L.4.21
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (5*2+1), %o2
-
-L.4.21:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (5*2-1), %o2
-
-L.2.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 3, accumulated bits 1
-	bl	L.3.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 3
-	bl	L.4.19
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (3*2+1), %o2
-
-L.4.19:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (3*2-1), %o2
-	
-	
-L.3.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 1
-	bl	L.4.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (1*2+1), %o2
-
-L.4.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (1*2-1), %o2
-
-L.1.16:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 2, accumulated bits -1
-	bl	L.2.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 3, accumulated bits -1
-	bl	L.3.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -1
-	bl	L.4.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-1*2+1), %o2
-
-L.4.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-1*2-1), %o2
-
-L.3.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -3
-	bl	L.4.13
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-3*2+1), %o2
-
-L.4.13:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-3*2-1), %o2
-
-L.2.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 3, accumulated bits -3
-	bl	L.3.13
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -5
-	bl	L.4.11
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-5*2+1), %o2
-
-L.4.11:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-5*2-1), %o2
-
-L.3.13:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -7
-	bl	L.4.9
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-7*2+1), %o2
-
-L.4.9:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-7*2-1), %o2
-
-	9:
-Lend_regular_divide:
-	subcc	%o4, 1, %o4
-	bge	Ldivloop
-	 tst	%o3
-
-	bl,a	Lgot_result
-	! non-restoring fixup here (one instruction only!)
-	sub	%o2, 1, %o2
-
-Lgot_result:
-	! check to see if answer should be < 0
-	tst	%g2
-	bl,a	1f
-	 sub %g0, %o2, %o2
-1:
-	retl
-	 mov %o2, %o0
-
-	.globl	.div_patch
-.div_patch:
-	sra	%o0, 0x1f, %o2
-	wr	%o2, 0x0, %y
-	nop
-	nop
-	nop
-	sdivcc	%o0, %o1, %o0
-	bvs,a	1f
-	 xnor	%o0, %g0, %o0
-1:	retl
-	 nop
diff --git a/arch/sparc/lib/strlen_user_64.S b/arch/sparc/lib/strlen_user_64.S
index 114ed11..c3df71f 100644
--- a/arch/sparc/lib/strlen_user_64.S
+++ b/arch/sparc/lib/strlen_user_64.S
@@ -8,16 +8,16 @@
  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
+#include <linux/linkage.h>
 #include <asm/asi.h>
 
 #define LO_MAGIC 0x01010101
 #define HI_MAGIC 0x80808080
 
 	.align 4
-	.global __strlen_user, __strnlen_user
-__strlen_user:
+ENTRY(__strlen_user)
 	sethi	%hi(32768), %o1
-__strnlen_user:	
+ENTRY(__strnlen_user)
 	mov	%o1, %g1
 	mov	%o0, %o1
 	andcc	%o0, 3, %g0
@@ -78,6 +78,8 @@
 	 mov	2, %o0
 23:	retl
 	 mov	3, %o0
+ENDPROC(__strlen_user)
+ENDPROC(__strnlen_user)
 
         .section .fixup,#alloc,#execinstr
         .align  4
diff --git a/arch/sparc/lib/strncmp_32.S b/arch/sparc/lib/strncmp_32.S
index 494ec66..c0d1b56 100644
--- a/arch/sparc/lib/strncmp_32.S
+++ b/arch/sparc/lib/strncmp_32.S
@@ -3,11 +3,10 @@
  *            generic strncmp routine.
  */
 
+#include <linux/linkage.h>
+
 	.text
-	.align 4
-	.global __strncmp, strncmp
-__strncmp:
-strncmp:
+ENTRY(strncmp)
 	mov	%o0, %g3
 	mov	0, %o3
 
@@ -116,3 +115,4 @@
 	and	%g2, 0xff, %o0
 	retl
 	 sub	%o3, %o0, %o0
+ENDPROC(strncmp)
diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S
index 980e837..0656627 100644
--- a/arch/sparc/lib/strncmp_64.S
+++ b/arch/sparc/lib/strncmp_64.S
@@ -4,13 +4,11 @@
  * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  */
 
+#include <linux/linkage.h>
 #include <asm/asi.h>
 
 	.text
-	.align	32
-	.globl	strncmp
-	.type	strncmp,#function
-strncmp:
+ENTRY(strncmp)
 	brlez,pn %o2, 3f
 	 lduba	[%o0] (ASI_PNF), %o3
 1:
@@ -29,4 +27,4 @@
 3:
 	retl
 	 clr	%o0
-	.size	strncmp, .-strncmp
+ENDPROC(strncmp)
diff --git a/arch/sparc/lib/strncpy_from_user_32.S b/arch/sparc/lib/strncpy_from_user_32.S
index d771989..db0ed29 100644
--- a/arch/sparc/lib/strncpy_from_user_32.S
+++ b/arch/sparc/lib/strncpy_from_user_32.S
@@ -3,11 +3,11 @@
  *  Copyright(C) 1996 David S. Miller
  */
 
+#include <linux/linkage.h>
 #include <asm/ptrace.h>
 #include <asm/errno.h>
 
 	.text
-	.align	4
 
 	/* Must return:
 	 *
@@ -16,8 +16,7 @@
 	 * bytes copied		if we hit a null byte
 	 */
 
-	.globl	__strncpy_from_user
-__strncpy_from_user:
+ENTRY(__strncpy_from_user)
 	/* %o0=dest, %o1=src, %o2=count */
 	mov	%o2, %o3
 1:
@@ -35,6 +34,7 @@
 	add	%o2, 1, %o0
 	retl
 	 sub	%o3, %o0, %o0
+ENDPROC(__strncpy_from_user)
 
 	.section .fixup,#alloc,#execinstr
 	.align	4
diff --git a/arch/sparc/lib/strncpy_from_user_64.S b/arch/sparc/lib/strncpy_from_user_64.S
index 511c8f1..d1246b7 100644
--- a/arch/sparc/lib/strncpy_from_user_64.S
+++ b/arch/sparc/lib/strncpy_from_user_64.S
@@ -4,6 +4,7 @@
  *  Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
  */
 
+#include <linux/linkage.h>
 #include <asm/asi.h>
 #include <asm/errno.h>
 
@@ -12,7 +13,6 @@
 0:	.xword	0x0101010101010101
 
 	.text
-	.align	32
 
 	/* Must return:
 	 *
@@ -30,9 +30,7 @@
 	 * and average length is 18 or so.
 	 */
 
-	.globl	__strncpy_from_user
-	.type	__strncpy_from_user,#function
-__strncpy_from_user:
+ENTRY(__strncpy_from_user)
 	/* %o0=dest, %o1=src, %o2=count */
 	andcc	%o1, 7, %g0		! IEU1	Group
 	bne,pn	%icc, 30f		! CTI
@@ -123,7 +121,7 @@
 	 mov	%o2, %o0
 2:	retl
 	 add	%o2, %o3, %o0
-	.size	__strncpy_from_user, .-__strncpy_from_user
+ENDPROC(__strncpy_from_user)
 
 	.section __ex_table,"a"
 	.align	4
diff --git a/arch/sparc/lib/ucmpdi2.c b/arch/sparc/lib/ucmpdi2.c
new file mode 100644
index 0000000..1e06ed5
--- /dev/null
+++ b/arch/sparc/lib/ucmpdi2.c
@@ -0,0 +1,19 @@
+#include <linux/module.h>
+#include "libgcc.h"
+
+word_type __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+	const DWunion au = {.ll = a};
+	const DWunion bu = {.ll = b};
+
+	if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
+		return 0;
+	else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
+		return 2;
+	if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+		return 0;
+	else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+		return 2;
+	return 1;
+}
+EXPORT_SYMBOL(__ucmpdi2);
diff --git a/arch/sparc/lib/udiv.S b/arch/sparc/lib/udiv.S
deleted file mode 100644
index 2101405..0000000
--- a/arch/sparc/lib/udiv.S
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * udiv.S:      This routine was taken from glibc-1.09 and is covered
- *              by the GNU Library General Public License Version 2.
- */
-
-
-/* This file is generated from divrem.m4; DO NOT EDIT! */
-/*
- * Division and remainder, from Appendix E of the Sparc Version 8
- * Architecture Manual, with fixes from Gordon Irlam.
- */
-
-/*
- * Input: dividend and divisor in %o0 and %o1 respectively.
- *
- * m4 parameters:
- *  .udiv	name of function to generate
- *  div		div=div => %o0 / %o1; div=rem => %o0 % %o1
- *  false		false=true => signed; false=false => unsigned
- *
- * Algorithm parameters:
- *  N		how many bits per iteration we try to get (4)
- *  WORDSIZE	total number of bits (32)
- *
- * Derived constants:
- *  TOPBITS	number of bits in the top decade of a number
- *
- * Important variables:
- *  Q		the partial quotient under development (initially 0)
- *  R		the remainder so far, initially the dividend
- *  ITER	number of main division loop iterations required;
- *		equal to ceil(log2(quotient) / N).  Note that this
- *		is the log base (2^N) of the quotient.
- *  V		the current comparand, initially divisor*2^(ITER*N-1)
- *
- * Cost:
- *  Current estimate for non-large dividend is
- *	ceil(log2(quotient) / N) * (10 + 7N/2) + C
- *  A large dividend is one greater than 2^(31-TOPBITS) and takes a
- *  different path, as the upper bits of the quotient must be developed
- *  one bit at a time.
- */
-
-
-	.globl .udiv
-	.globl _Udiv
-.udiv:
-_Udiv:	/* needed for export */
-
-	! Ready to divide.  Compute size of quotient; scale comparand.
-	orcc	%o1, %g0, %o5
-	bne	1f
-	 mov	%o0, %o3
-
-		! Divide by zero trap.  If it returns, return 0 (about as
-		! wrong as possible, but that is what SunOS does...).
-		ta	ST_DIV0
-		retl
-		 clr	%o0
-
-1:
-	cmp	%o3, %o5			! if %o1 exceeds %o0, done
-	blu	Lgot_result		! (and algorithm fails otherwise)
-	 clr	%o2
-
-	sethi	%hi(1 << (32 - 4 - 1)), %g1
-
-	cmp	%o3, %g1
-	blu	Lnot_really_big
-	 clr	%o4
-
-	! Here the dividend is >= 2**(31-N) or so.  We must be careful here,
-	! as our usual N-at-a-shot divide step will cause overflow and havoc.
-	! The number of bits in the result here is N*ITER+SC, where SC <= N.
-	! Compute ITER in an unorthodox manner: know we need to shift V into
-	! the top decade: so do not even bother to compare to R.
-	1:
-		cmp	%o5, %g1
-		bgeu	3f
-		 mov	1, %g7
-
-		sll	%o5, 4, %o5
-
-		b	1b
-		 add	%o4, 1, %o4
-
-	! Now compute %g7.
-	2:
-		addcc	%o5, %o5, %o5
-		bcc	Lnot_too_big
-		 add	%g7, 1, %g7
-
-		! We get here if the %o1 overflowed while shifting.
-		! This means that %o3 has the high-order bit set.
-		! Restore %o5 and subtract from %o3.
-		sll	%g1, 4, %g1	! high order bit
-		srl	%o5, 1, %o5		! rest of %o5
-		add	%o5, %g1, %o5
-
-		b	Ldo_single_div
-		 sub	%g7, 1, %g7
-
-	Lnot_too_big:
-	3:
-		cmp	%o5, %o3
-		blu	2b
-		 nop
-
-		be	Ldo_single_div
-		 nop
-	/* NB: these are commented out in the V8-Sparc manual as well */
-	/* (I do not understand this) */
-	! %o5 > %o3: went too far: back up 1 step
-	!	srl	%o5, 1, %o5
-	!	dec	%g7
-	! do single-bit divide steps
-	!
-	! We have to be careful here.  We know that %o3 >= %o5, so we can do the
-	! first divide step without thinking.  BUT, the others are conditional,
-	! and are only done if %o3 >= 0.  Because both %o3 and %o5 may have the high-
-	! order bit set in the first step, just falling into the regular
-	! division loop will mess up the first time around.
-	! So we unroll slightly...
-	Ldo_single_div:
-		subcc	%g7, 1, %g7
-		bl	Lend_regular_divide
-		 nop
-
-		sub	%o3, %o5, %o3
-		mov	1, %o2
-
-		b	Lend_single_divloop
-		 nop
-	Lsingle_divloop:
-		sll	%o2, 1, %o2
-		bl	1f
-		 srl	%o5, 1, %o5
-		! %o3 >= 0
-		sub	%o3, %o5, %o3
-		b	2f
-		 add	%o2, 1, %o2
-	1:	! %o3 < 0
-		add	%o3, %o5, %o3
-		sub	%o2, 1, %o2
-	2:
-	Lend_single_divloop:
-		subcc	%g7, 1, %g7
-		bge	Lsingle_divloop
-		 tst	%o3
-
-		b,a	Lend_regular_divide
-
-Lnot_really_big:
-1:
-	sll	%o5, 4, %o5
-
-	cmp	%o5, %o3
-	bleu	1b
-	 addcc	%o4, 1, %o4
-
-	be	Lgot_result
-	 sub	%o4, 1, %o4
-
-	tst	%o3	! set up for initial iteration
-Ldivloop:
-	sll	%o2, 4, %o2
-		! depth 1, accumulated bits 0
-	bl	L.1.16
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 2, accumulated bits 1
-	bl	L.2.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 3, accumulated bits 3
-	bl	L.3.19
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 7
-	bl	L.4.23
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (7*2+1), %o2
-
-L.4.23:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (7*2-1), %o2
-
-L.3.19:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 5
-	bl	L.4.21
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (5*2+1), %o2
-
-L.4.21:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (5*2-1), %o2
-
-L.2.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 3, accumulated bits 1
-	bl	L.3.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 3
-	bl	L.4.19
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (3*2+1), %o2
-
-L.4.19:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (3*2-1), %o2
-
-L.3.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 1
-	bl	L.4.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (1*2+1), %o2
-
-L.4.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (1*2-1), %o2
-
-L.1.16:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 2, accumulated bits -1
-	bl	L.2.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 3, accumulated bits -1
-	bl	L.3.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -1
-	bl	L.4.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-1*2+1), %o2
-
-L.4.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-1*2-1), %o2
-
-L.3.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -3
-	bl	L.4.13
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-3*2+1), %o2
-
-L.4.13:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-3*2-1), %o2
-
-L.2.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 3, accumulated bits -3
-	bl	L.3.13
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -5
-	bl	L.4.11
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-5*2+1), %o2
-
-L.4.11:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-5*2-1), %o2
-
-L.3.13:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -7
-	bl	L.4.9
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-7*2+1), %o2
-
-L.4.9:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-7*2-1), %o2
-
-	9:
-Lend_regular_divide:
-	subcc	%o4, 1, %o4
-	bge	Ldivloop
-	 tst	%o3
-
-	bl,a	Lgot_result
-	! non-restoring fixup here (one instruction only!)
-	sub	%o2, 1, %o2
-
-Lgot_result:
-
-	retl
-	 mov %o2, %o0
-
-	.globl	.udiv_patch
-.udiv_patch:
-	wr	%g0, 0x0, %y
-	nop
-	nop
-	retl
-	 udiv	%o0, %o1, %o0
-	nop
diff --git a/arch/sparc/lib/udivdi3.S b/arch/sparc/lib/udivdi3.S
index b430f1f..24e0a35 100644
--- a/arch/sparc/lib/udivdi3.S
+++ b/arch/sparc/lib/udivdi3.S
@@ -60,8 +60,9 @@
 	bne .LL77
 	mov %i0,%o2
 	mov 1,%o0
-	call .udiv,0
 	mov 0,%o1
+	wr %g0, 0, %y
+	udiv %o0, %o1, %o0
 	mov %o0,%o3
 	mov %i0,%o2
 .LL77:
diff --git a/arch/sparc/lib/umul.S b/arch/sparc/lib/umul.S
deleted file mode 100644
index 1f36ae6..0000000
--- a/arch/sparc/lib/umul.S
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * umul.S:      This routine was taken from glibc-1.09 and is covered
- *              by the GNU Library General Public License Version 2.
- */
-
-
-/*
- * Unsigned multiply.  Returns %o0 * %o1 in %o1%o0 (i.e., %o1 holds the
- * upper 32 bits of the 64-bit product).
- *
- * This code optimizes short (less than 13-bit) multiplies.  Short
- * multiplies require 25 instruction cycles, and long ones require
- * 45 instruction cycles.
- *
- * On return, overflow has occurred (%o1 is not zero) if and only if
- * the Z condition code is clear, allowing, e.g., the following:
- *
- *	call	.umul
- *	nop
- *	bnz	overflow	(or tnz)
- */
-
-	.globl .umul
-	.globl _Umul
-.umul:
-_Umul:	/* needed for export */
-	or	%o0, %o1, %o4
-	mov	%o0, %y		! multiplier -> Y
-
-	andncc	%o4, 0xfff, %g0	! test bits 12..31 of *both* args
-	be	Lmul_shortway	! if zero, can do it the short way
-	 andcc	%g0, %g0, %o4	! zero the partial product and clear N and V
-
-	/*
-	 * Long multiply.  32 steps, followed by a final shift step.
-	 */
-	mulscc	%o4, %o1, %o4	! 1
-	mulscc	%o4, %o1, %o4	! 2
-	mulscc	%o4, %o1, %o4	! 3
-	mulscc	%o4, %o1, %o4	! 4
-	mulscc	%o4, %o1, %o4	! 5
-	mulscc	%o4, %o1, %o4	! 6
-	mulscc	%o4, %o1, %o4	! 7
-	mulscc	%o4, %o1, %o4	! 8
-	mulscc	%o4, %o1, %o4	! 9
-	mulscc	%o4, %o1, %o4	! 10
-	mulscc	%o4, %o1, %o4	! 11
-	mulscc	%o4, %o1, %o4	! 12
-	mulscc	%o4, %o1, %o4	! 13
-	mulscc	%o4, %o1, %o4	! 14
-	mulscc	%o4, %o1, %o4	! 15
-	mulscc	%o4, %o1, %o4	! 16
-	mulscc	%o4, %o1, %o4	! 17
-	mulscc	%o4, %o1, %o4	! 18
-	mulscc	%o4, %o1, %o4	! 19
-	mulscc	%o4, %o1, %o4	! 20
-	mulscc	%o4, %o1, %o4	! 21
-	mulscc	%o4, %o1, %o4	! 22
-	mulscc	%o4, %o1, %o4	! 23
-	mulscc	%o4, %o1, %o4	! 24
-	mulscc	%o4, %o1, %o4	! 25
-	mulscc	%o4, %o1, %o4	! 26
-	mulscc	%o4, %o1, %o4	! 27
-	mulscc	%o4, %o1, %o4	! 28
-	mulscc	%o4, %o1, %o4	! 29
-	mulscc	%o4, %o1, %o4	! 30
-	mulscc	%o4, %o1, %o4	! 31
-	mulscc	%o4, %o1, %o4	! 32
-	mulscc	%o4, %g0, %o4	! final shift
-
-
-	/*
-	 * Normally, with the shift-and-add approach, if both numbers are
-	 * positive you get the correct result.  With 32-bit two's-complement
-	 * numbers, -x is represented as
-	 *
-	 *		  x		    32
-	 *	( 2  -  ------ ) mod 2  *  2
-	 *		   32
-	 *		  2
-	 *
-	 * (the `mod 2' subtracts 1 from 1.bbbb).  To avoid lots of 2^32s,
-	 * we can treat this as if the radix point were just to the left
-	 * of the sign bit (multiply by 2^32), and get
-	 *
-	 *	-x  =  (2 - x) mod 2
-	 *
-	 * Then, ignoring the `mod 2's for convenience:
-	 *
-	 *   x *  y	= xy
-	 *  -x *  y	= 2y - xy
-	 *   x * -y	= 2x - xy
-	 *  -x * -y	= 4 - 2x - 2y + xy
-	 *
-	 * For signed multiplies, we subtract (x << 32) from the partial
-	 * product to fix this problem for negative multipliers (see mul.s).
-	 * Because of the way the shift into the partial product is calculated
-	 * (N xor V), this term is automatically removed for the multiplicand,
-	 * so we don't have to adjust.
-	 *
-	 * But for unsigned multiplies, the high order bit wasn't a sign bit,
-	 * and the correction is wrong.  So for unsigned multiplies where the
-	 * high order bit is one, we end up with xy - (y << 32).  To fix it
-	 * we add y << 32.
-	 */
-#if 0
-	tst	%o1
-	bl,a	1f		! if %o1 < 0 (high order bit = 1),
-	 add	%o4, %o0, %o4	! %o4 += %o0 (add y to upper half)
-
-1:
-	rd	%y, %o0		! get lower half of product
-	retl
-	 addcc	%o4, %g0, %o1	! put upper half in place and set Z for %o1==0
-#else
-	/* Faster code from tege@sics.se.  */
-	sra	%o1, 31, %o2	! make mask from sign bit
-	and	%o0, %o2, %o2	! %o2 = 0 or %o0, depending on sign of %o1
-	rd	%y, %o0		! get lower half of product
-	retl
-	 addcc	%o4, %o2, %o1	! add compensation and put upper half in place
-#endif
-
-Lmul_shortway:
-	/*
-	 * Short multiply.  12 steps, followed by a final shift step.
-	 * The resulting bits are off by 12 and (32-12) = 20 bit positions,
-	 * but there is no problem with %o0 being negative (unlike above),
-	 * and overflow is impossible (the answer is at most 24 bits long).
-	 */
-	mulscc	%o4, %o1, %o4	! 1
-	mulscc	%o4, %o1, %o4	! 2
-	mulscc	%o4, %o1, %o4	! 3
-	mulscc	%o4, %o1, %o4	! 4
-	mulscc	%o4, %o1, %o4	! 5
-	mulscc	%o4, %o1, %o4	! 6
-	mulscc	%o4, %o1, %o4	! 7
-	mulscc	%o4, %o1, %o4	! 8
-	mulscc	%o4, %o1, %o4	! 9
-	mulscc	%o4, %o1, %o4	! 10
-	mulscc	%o4, %o1, %o4	! 11
-	mulscc	%o4, %o1, %o4	! 12
-	mulscc	%o4, %g0, %o4	! final shift
-
-	/*
-	 * %o4 has 20 of the bits that should be in the result; %y has
-	 * the bottom 12 (as %y's top 12).  That is:
-	 *
-	 *	  %o4		    %y
-	 * +----------------+----------------+
-	 * | -12- |   -20-  | -12- |   -20-  |
-	 * +------(---------+------)---------+
-	 *	   -----result-----
-	 *
-	 * The 12 bits of %o4 left of the `result' area are all zero;
-	 * in fact, all top 20 bits of %o4 are zero.
-	 */
-
-	rd	%y, %o5
-	sll	%o4, 12, %o0	! shift middle bits left 12
-	srl	%o5, 20, %o5	! shift low bits right 20
-	or	%o5, %o0, %o0
-	retl
-	 addcc	%g0, %g0, %o1	! %o1 = zero, and set Z
-
-	.globl	.umul_patch
-.umul_patch:
-	umul	%o0, %o1, %o0
-	retl
-	 rd	%y, %o1
-	nop
diff --git a/arch/sparc/lib/urem.S b/arch/sparc/lib/urem.S
deleted file mode 100644
index 77123eb..0000000
--- a/arch/sparc/lib/urem.S
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * urem.S:      This routine was taken from glibc-1.09 and is covered
- *              by the GNU Library General Public License Version 2.
- */
-
-/* This file is generated from divrem.m4; DO NOT EDIT! */
-/*
- * Division and remainder, from Appendix E of the Sparc Version 8
- * Architecture Manual, with fixes from Gordon Irlam.
- */
-
-/*
- * Input: dividend and divisor in %o0 and %o1 respectively.
- *
- * m4 parameters:
- *  .urem	name of function to generate
- *  rem		rem=div => %o0 / %o1; rem=rem => %o0 % %o1
- *  false		false=true => signed; false=false => unsigned
- *
- * Algorithm parameters:
- *  N		how many bits per iteration we try to get (4)
- *  WORDSIZE	total number of bits (32)
- *
- * Derived constants:
- *  TOPBITS	number of bits in the top decade of a number
- *
- * Important variables:
- *  Q		the partial quotient under development (initially 0)
- *  R		the remainder so far, initially the dividend
- *  ITER	number of main division loop iterations required;
- *		equal to ceil(log2(quotient) / N).  Note that this
- *		is the log base (2^N) of the quotient.
- *  V		the current comparand, initially divisor*2^(ITER*N-1)
- *
- * Cost:
- *  Current estimate for non-large dividend is
- *	ceil(log2(quotient) / N) * (10 + 7N/2) + C
- *  A large dividend is one greater than 2^(31-TOPBITS) and takes a
- *  different path, as the upper bits of the quotient must be developed
- *  one bit at a time.
- */
-
-	.globl .urem
-	.globl _Urem
-.urem:
-_Urem:	/* needed for export */
-
-	! Ready to divide.  Compute size of quotient; scale comparand.
-	orcc	%o1, %g0, %o5
-	bne	1f
-	 mov	%o0, %o3
-
-		! Divide by zero trap.  If it returns, return 0 (about as
-		! wrong as possible, but that is what SunOS does...).
-		ta	ST_DIV0
-		retl
-		 clr	%o0
-
-1:
-	cmp	%o3, %o5			! if %o1 exceeds %o0, done
-	blu	Lgot_result		! (and algorithm fails otherwise)
-	 clr	%o2
-
-	sethi	%hi(1 << (32 - 4 - 1)), %g1
-
-	cmp	%o3, %g1
-	blu	Lnot_really_big
-	 clr	%o4
-
-	! Here the dividend is >= 2**(31-N) or so.  We must be careful here,
-	! as our usual N-at-a-shot divide step will cause overflow and havoc.
-	! The number of bits in the result here is N*ITER+SC, where SC <= N.
-	! Compute ITER in an unorthodox manner: know we need to shift V into
-	! the top decade: so do not even bother to compare to R.
-	1:
-		cmp	%o5, %g1
-		bgeu	3f
-		 mov	1, %g7
-
-		sll	%o5, 4, %o5
-
-		b	1b
-		 add	%o4, 1, %o4
-
-	! Now compute %g7.
-	2:
-		addcc	%o5, %o5, %o5
-		bcc	Lnot_too_big
-		 add	%g7, 1, %g7
-
-		! We get here if the %o1 overflowed while shifting.
-		! This means that %o3 has the high-order bit set.
-		! Restore %o5 and subtract from %o3.
-		sll	%g1, 4, %g1	! high order bit
-		srl	%o5, 1, %o5		! rest of %o5
-		add	%o5, %g1, %o5
-
-		b	Ldo_single_div
-		 sub	%g7, 1, %g7
-
-	Lnot_too_big:
-	3:
-		cmp	%o5, %o3
-		blu	2b
-		 nop
-
-		be	Ldo_single_div
-		 nop
-	/* NB: these are commented out in the V8-Sparc manual as well */
-	/* (I do not understand this) */
-	! %o5 > %o3: went too far: back up 1 step
-	!	srl	%o5, 1, %o5
-	!	dec	%g7
-	! do single-bit divide steps
-	!
-	! We have to be careful here.  We know that %o3 >= %o5, so we can do the
-	! first divide step without thinking.  BUT, the others are conditional,
-	! and are only done if %o3 >= 0.  Because both %o3 and %o5 may have the high-
-	! order bit set in the first step, just falling into the regular
-	! division loop will mess up the first time around.
-	! So we unroll slightly...
-	Ldo_single_div:
-		subcc	%g7, 1, %g7
-		bl	Lend_regular_divide
-		 nop
-
-		sub	%o3, %o5, %o3
-		mov	1, %o2
-
-		b	Lend_single_divloop
-		 nop
-	Lsingle_divloop:
-		sll	%o2, 1, %o2
-		bl	1f
-		 srl	%o5, 1, %o5
-		! %o3 >= 0
-		sub	%o3, %o5, %o3
-		b	2f
-		 add	%o2, 1, %o2
-	1:	! %o3 < 0
-		add	%o3, %o5, %o3
-		sub	%o2, 1, %o2
-	2:
-	Lend_single_divloop:
-		subcc	%g7, 1, %g7
-		bge	Lsingle_divloop
-		 tst	%o3
-
-		b,a	Lend_regular_divide
-
-Lnot_really_big:
-1:
-	sll	%o5, 4, %o5
-
-	cmp	%o5, %o3
-	bleu	1b
-	 addcc	%o4, 1, %o4
-
-	be	Lgot_result
-	 sub	%o4, 1, %o4
-
-	tst	%o3	! set up for initial iteration
-Ldivloop:
-	sll	%o2, 4, %o2
-		! depth 1, accumulated bits 0
-	bl	L.1.16
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 2, accumulated bits 1
-	bl	L.2.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 3, accumulated bits 3
-	bl	L.3.19
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 7
-	bl	L.4.23
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (7*2+1), %o2
-
-L.4.23:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (7*2-1), %o2
-
-L.3.19:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 5
-	bl	L.4.21
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (5*2+1), %o2
-
-L.4.21:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (5*2-1), %o2
-
-L.2.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 3, accumulated bits 1
-	bl	L.3.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 3
-	bl	L.4.19
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (3*2+1), %o2
-
-L.4.19:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (3*2-1), %o2
-
-L.3.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits 1
-	bl	L.4.17
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (1*2+1), %o2
-	
-L.4.17:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (1*2-1), %o2
-
-L.1.16:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 2, accumulated bits -1
-	bl	L.2.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 3, accumulated bits -1
-	bl	L.3.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -1
-	bl	L.4.15
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-1*2+1), %o2
-
-L.4.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-1*2-1), %o2
-
-L.3.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -3
-	bl	L.4.13
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-3*2+1), %o2
-
-L.4.13:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-3*2-1), %o2
-
-L.2.15:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 3, accumulated bits -3
-	bl	L.3.13
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -5
-	bl	L.4.11
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-5*2+1), %o2
-	
-L.4.11:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-5*2-1), %o2
-
-L.3.13:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-			! depth 4, accumulated bits -7
-	bl	L.4.9
-	 srl	%o5,1,%o5
-	! remainder is positive
-	subcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-7*2+1), %o2
-
-L.4.9:
-	! remainder is negative
-	addcc	%o3,%o5,%o3
-	b	9f
-	 add	%o2, (-7*2-1), %o2
-
-	9:
-Lend_regular_divide:
-	subcc	%o4, 1, %o4
-	bge	Ldivloop
-	 tst	%o3
-
-	bl,a	Lgot_result
-	! non-restoring fixup here (one instruction only!)
-	add	%o3, %o1, %o3
-
-Lgot_result:
-
-	retl
-	 mov %o3, %o0
-
-	.globl	.urem_patch
-.urem_patch:
-	wr	%g0, 0x0, %y
-	nop
-	nop
-	nop
-	udiv	%o0, %o1, %o2
-	umul	%o2, %o1, %o2
-	retl
-	 sub	%o0, %o2, %o0
diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S
index f44f58f..2c05641 100644
--- a/arch/sparc/lib/xor.S
+++ b/arch/sparc/lib/xor.S
@@ -8,6 +8,7 @@
  * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  */
 
+#include <linux/linkage.h>
 #include <asm/visasm.h>
 #include <asm/asi.h>
 #include <asm/dcu.h>
@@ -19,12 +20,9 @@
  *	!(len & 127) && len >= 256
  */
 	.text
-	.align	32
 
 	/* VIS versions. */
-	.globl	xor_vis_2
-	.type	xor_vis_2,#function
-xor_vis_2:
+ENTRY(xor_vis_2)
 	rd	%fprs, %o5
 	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
 	be,pt	%icc, 0f
@@ -91,11 +89,9 @@
 	wr	%g1, %g0, %asi
 	retl
 	  wr	%g0, 0, %fprs
-	.size	xor_vis_2, .-xor_vis_2
+ENDPROC(xor_vis_2)
 
-	.globl	xor_vis_3
-	.type	xor_vis_3,#function
-xor_vis_3:
+ENTRY(xor_vis_3)
 	rd	%fprs, %o5
 	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
 	be,pt	%icc, 0f
@@ -159,11 +155,9 @@
 	wr	%g1, %g0, %asi
 	retl
 	 wr	%g0, 0, %fprs
-	.size	xor_vis_3, .-xor_vis_3
+ENDPROC(xor_vis_3)
 
-	.globl	xor_vis_4
-	.type	xor_vis_4,#function
-xor_vis_4:
+ENTRY(xor_vis_4)
 	rd	%fprs, %o5
 	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
 	be,pt	%icc, 0f
@@ -246,11 +240,9 @@
 	wr	%g1, %g0, %asi
 	retl
 	 wr	%g0, 0, %fprs
-	.size	xor_vis_4, .-xor_vis_4
+ENDPROC(xor_vis_4)
 
-	.globl	xor_vis_5
-	.type	xor_vis_5,#function
-xor_vis_5:
+ENTRY(xor_vis_5)
 	save	%sp, -192, %sp
 	rd	%fprs, %o5
 	andcc	%o5, FPRS_FEF|FPRS_DU, %g0
@@ -354,12 +346,10 @@
 	wr	%g0, 0, %fprs
 	ret
 	 restore
-	.size	xor_vis_5, .-xor_vis_5
+ENDPROC(xor_vis_5)
 
 	/* Niagara versions. */
-	.globl		xor_niagara_2
-	.type		xor_niagara_2,#function
-xor_niagara_2:		/* %o0=bytes, %o1=dest, %o2=src */
+ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
 	save		%sp, -192, %sp
 	prefetch	[%i1], #n_writes
 	prefetch	[%i2], #one_read
@@ -402,11 +392,9 @@
 	wr		%g7, 0x0, %asi
 	ret
 	 restore
-	.size		xor_niagara_2, .-xor_niagara_2
+ENDPROC(xor_niagara_2)
 
-	.globl		xor_niagara_3
-	.type		xor_niagara_3,#function
-xor_niagara_3:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
+ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
 	save		%sp, -192, %sp
 	prefetch	[%i1], #n_writes
 	prefetch	[%i2], #one_read
@@ -465,11 +453,9 @@
 	wr		%g7, 0x0, %asi
 	ret
 	 restore
-	.size		xor_niagara_3, .-xor_niagara_3
+ENDPROC(xor_niagara_3)
 
-	.globl		xor_niagara_4
-	.type		xor_niagara_4,#function
-xor_niagara_4:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
+ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
 	save		%sp, -192, %sp
 	prefetch	[%i1], #n_writes
 	prefetch	[%i2], #one_read
@@ -549,11 +535,9 @@
 	wr		%g7, 0x0, %asi
 	ret
 	 restore
-	.size		xor_niagara_4, .-xor_niagara_4
+ENDPROC(xor_niagara_4)
 
-	.globl		xor_niagara_5
-	.type		xor_niagara_5,#function
-xor_niagara_5:		/* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
+ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
 	save		%sp, -192, %sp
 	prefetch	[%i1], #n_writes
 	prefetch	[%i2], #one_read
@@ -649,4 +633,4 @@
 	wr		%g7, 0x0, %asi
 	ret
 	 restore
-	.size		xor_niagara_5, .-xor_niagara_5
+ENDPROC(xor_niagara_5)
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 301421c..69ffd31 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -7,8 +7,7 @@
 obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
 obj-y                   += fault_$(BITS).o
 obj-y                   += init_$(BITS).o
-obj-$(CONFIG_SPARC32)   += loadmmu.o
-obj-$(CONFIG_SPARC32)   += extable.o btfixup.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32)   += extable.o srmmu.o iommu.o io-unit.o
 obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
 obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
 
@@ -17,9 +16,3 @@
 
 # Only used by sparc32
 obj-$(CONFIG_HIGHMEM)   += highmem.o
-
-ifdef CONFIG_SMP
-obj-$(CONFIG_SPARC32) += nosun4c.o
-else
-obj-$(CONFIG_SPARC32) += sun4c.o
-endif
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
deleted file mode 100644
index 09d6af2..0000000
--- a/arch/sparc/mm/btfixup.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/* btfixup.c: Boot time code fixup and relocator, so that
- * we can get rid of most indirect calls to achieve single
- * image sun4c and srmmu kernel.
- *
- * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/btfixup.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/oplib.h>
-#include <asm/cacheflush.h>
-
-#define BTFIXUP_OPTIMIZE_NOP
-#define BTFIXUP_OPTIMIZE_OTHER
-
-extern char *srmmu_name;
-static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for ";
-static char str_sun4c[] __initdata = "sun4c\n";
-static char str_srmmu[] __initdata = "srmmu[%s]/";
-static char str_iommu[] __initdata = "iommu\n";
-static char str_iounit[] __initdata = "io-unit\n";
-
-static int visited __initdata = 0;
-extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[];
-extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[];
-static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n";
-static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n";
-static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n";
-static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n";
-static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n";
-static char wrong[] __initdata = "Wrong address for %c fixup %p\n";
-static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n";
-static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n";
-static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n";
-static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
-static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
-static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
-static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
-static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
-
-#ifdef BTFIXUP_OPTIMIZE_OTHER
-static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
-{
-	if (!fmangled)
-		*addr = value;
-	else {
-		unsigned int *q = (unsigned int *)q1;
-		if (*addr == 0x01000000) {
-			/* Noped */
-			*q = value;
-		} else if (addr[-1] == *q) {
-			/* Moved */
-			addr[-1] = value;
-			*q = value;
-		} else {
-			prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value);
-			prom_halt();
-		}
-	}
-}
-#else
-static inline void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
-{
-	*addr = value;
-}
-#endif
-
-void __init btfixup(void)
-{
-	unsigned int *p, *q;
-	int type, count;
-	unsigned insn;
-	unsigned *addr;
-	int fmangled = 0;
-	void (*flush_cacheall)(void);
-	
-	if (!visited) {
-		visited++;
-		printk(version);
-		if (ARCH_SUN4C)
-			printk(str_sun4c);
-		else {
-			printk(str_srmmu, srmmu_name);
-			if (sparc_cpu_model == sun4d)
-				printk(str_iounit);
-			else
-				printk(str_iommu);
-		}
-	}
-	for (p = ___btfixup_start; p < ___btfixup_end; ) {
-		count = p[2];
-		q = p + 3;
-		switch (type = *(unsigned char *)p) {
-		case 'f': 
-			count = p[3];
-			q = p + 4;
-			if (((p[0] & 1) || p[1]) 
-			    && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) {
-				prom_printf(wrong_f, p, p[1]);
-				prom_halt();
-			}
-			break;
-		case 'b':
-			if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) {
-				prom_printf(wrong_b, p, p[1]);
-				prom_halt();
-			}
-			break;
-		case 's':
-			if (p[1] + 0x1000 >= 0x2000) {
-				prom_printf(wrong_s, p, p[1]);
-				prom_halt();
-			}
-			break;
-		case 'h':
-			if (p[1] & 0x3ff) {
-				prom_printf(wrong_h, p, p[1]);
-				prom_halt();
-			}
-			break;
-		case 'a':
-			if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) {
-				prom_printf(wrong_a, p, p[1]);
-				prom_halt();
-			}
-			break;
-		}
-		if (p[0] & 1) {
-			p[0] &= ~1;
-			while (count) {
-				fmangled = 0;
-				addr = (unsigned *)*q;
-				if (addr < _stext || addr >= _end) {
-					prom_printf(wrong, type, p);
-					prom_halt();
-				}
-				insn = *addr;
-#ifdef BTFIXUP_OPTIMIZE_OTHER				
-				if (type != 'f' && q[1]) {
-					insn = *(unsigned int *)q[1];
-					if (!insn || insn == 1)
-						insn = *addr;
-					else
-						fmangled = 1;
-				}
-#endif
-				switch (type) {
-				case 'f':	/* CALL */
-					if (addr >= __start___ksymtab && addr < __stop___ksymtab) {
-						*addr = p[1];
-						break;
-					} else if (!q[1]) {
-						if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */
-							*addr = (insn & 0xffc00000) | (p[1] >> 10); break;
-						} else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */
-							*addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break;
-						} else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */
-				bad_f:
-							prom_printf(insn_f, p, addr, insn, addr[1]);
-							prom_halt();
-						}
-					} else if (q[1] != 1)
-						addr[1] = q[1];
-					if (p[2] == BTFIXUPCALL_NORM) {
-				norm_f:	
-						*addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2);
-						q[1] = 0;
-						break;
-					}
-#ifndef BTFIXUP_OPTIMIZE_NOP
-					goto norm_f;
-#else
-					if (!(addr[1] & 0x80000000)) {
-						if ((addr[1] & 0xc1c00000) != 0x01000000)	/* !SETHI */
-							goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */
-					} else {
-						if ((addr[1] & 0x01800000) == 0x01800000) {
-							if ((addr[1] & 0x01f80000) == 0x01e80000) {
-								/* RESTORE */
-								goto norm_f; /* It is dangerous to patch that */
-							}
-							goto bad_f;
-						}
-						if ((addr[1] & 0xffffe003) == 0x9e03e000) {
-							/* ADD %O7, XX, %o7 */
-							int displac = (addr[1] << 19);
-							
-							displac = (displac >> 21) + 2;
-							*addr = (0x10800000) + (displac & 0x3fffff);
-							q[1] = addr[1];
-							addr[1] = p[2];
-							break;
-						}
-						if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000)
-							goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */
-						if ((addr[1] & 0x3e000000) == 0x1e000000)
-							goto norm_f; /* rd is %o7. We'd better take care. */
-					}
-					if (p[2] == BTFIXUPCALL_NOP) {
-						*addr = 0x01000000;
-						q[1] = 1;
-						break;
-					}
-#ifndef BTFIXUP_OPTIMIZE_OTHER
-					goto norm_f;
-#else
-					if (addr[1] == 0x01000000) {	/* NOP in the delay slot */
-						q[1] = addr[1];
-						*addr = p[2];
-						break;
-					}
-					if ((addr[1] & 0xc0000000) != 0xc0000000) {
-						/* Not a memory operation */
-						if ((addr[1] & 0x30000000) == 0x10000000) {
-							/* Ok, non-memory op with rd %oX */
-							if ((addr[1] & 0x3e000000) == 0x1c000000)
-								goto bad_f; /* Aiee. Someone is playing strange %sp tricks */
-							if ((addr[1] & 0x3e000000) > 0x12000000 ||
-							    ((addr[1] & 0x3e000000) == 0x12000000 &&
-							     p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) ||
-							    ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) {
-								/* Nobody uses the result. We can nop it out. */
-								*addr = p[2];
-								q[1] = addr[1];
-								addr[1] = 0x01000000;
-								break;
-							}
-							if ((addr[1] & 0xf1ffffe0) == 0x90100000) {
-								/* MOV %reg, %Ox */
-								if ((addr[1] & 0x3e000000) == 0x10000000 &&
-								    (p[2] & 0x7c000) == 0x20000) {
-								    	/* Ok, it is call xx; mov reg, %o0 and call optimizes
-								    	   to doing something on %o0. Patch the patch. */
-									*addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14);
-									q[1] = addr[1];
-									addr[1] = 0x01000000;
-									break;
-								}
-								if ((addr[1] & 0x3e000000) == 0x12000000 &&
-								    p[2] == BTFIXUPCALL_STO1O0) {
-								    	*addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25);
-								    	q[1] = addr[1];
-								    	addr[1] = 0x01000000;
-								    	break;
-								}
-							}
-						}
-					}
-					*addr = addr[1];
-					q[1] = addr[1];
-					addr[1] = p[2];
-					break;
-#endif /* BTFIXUP_OPTIMIZE_OTHER */
-#endif /* BTFIXUP_OPTIMIZE_NOP */
-				case 'b':	/* BLACKBOX */
-					/* Has to be sethi i, xx */
-					if ((insn & 0xc1c00000) != 0x01000000) {
-						prom_printf(insn_b, p, addr, insn);
-						prom_halt();
-					} else {
-						void (*do_fixup)(unsigned *);
-						
-						do_fixup = (void (*)(unsigned *))p[1];
-						do_fixup(addr);
-					}
-					break;
-				case 's':	/* SIMM13 */
-					/* Has to be or %g0, i, xx */
-					if ((insn & 0xc1ffe000) != 0x80102000) {
-						prom_printf(insn_s, p, addr, insn);
-						prom_halt();
-					}
-					set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff));
-					break;
-				case 'h':	/* SETHI */
-					/* Has to be sethi i, xx */
-					if ((insn & 0xc1c00000) != 0x01000000) {
-						prom_printf(insn_h, p, addr, insn);
-						prom_halt();
-					}
-					set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-					break;
-				case 'a':	/* HALF */
-					/* Has to be sethi i, xx or or %g0, i, xx */
-					if ((insn & 0xc1c00000) != 0x01000000 &&
-					    (insn & 0xc1ffe000) != 0x80102000) {
-						prom_printf(insn_a, p, addr, insn);
-						prom_halt();
-					}
-					if (p[1] & 0x3ff)
-						set_addr(addr, q[1], fmangled, 
-							(insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff));
-					else
-						set_addr(addr, q[1], fmangled, 
-							(insn & 0x3e000000) | 0x01000000 | (p[1] >> 10));
-					break;
-				case 'i':	/* INT */
-					if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
-						set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-					else if ((insn & 0x80002000) == 0x80002000) /* %LO */
-						set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
-					else {
-						prom_printf(insn_i, p, addr, insn);
-						prom_halt();
-					}
-					break;
-				}
-				count -= 2;
-				q += 2;
-			}
-		} else
-			p = q + count;
-	}
-#ifdef CONFIG_SMP
-	flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all);
-#else
-	flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all);
-#endif
-	if (!flush_cacheall) {
-		prom_printf(fca_und);
-		prom_halt();
-	}
-	(*flush_cacheall)();
-}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index df3155a..f46cf6b 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -24,29 +24,19 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/memreg.h>
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 #include <asm/smp.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
 
-extern int prom_node_root;
-
 int show_unhandled_signals = 1;
 
 /* At boot time we determine these two values necessary for setting
  * up the segment maps and page table entries (pte's).
  */
 
-int num_segmaps, num_contexts;
-int invalid_segment;
-
-/* various Virtual Address Cache parameters we find at boot time... */
-
-int vac_size, vac_linesize, vac_do_hw_vac_flushes;
-int vac_entries_per_context, vac_entries_per_segment;
-int vac_entries_per_page;
+int num_contexts;
 
 /* Return how much physical memory we have.  */
 unsigned long probe_memory(void)
@@ -60,55 +50,36 @@
 	return total;
 }
 
-extern void sun4c_complete_all_stores(void);
-
-/* Whee, a level 15 NMI interrupt memory error.  Let's have fun... */
-asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
-				unsigned long svaddr, unsigned long aerr,
-				unsigned long avaddr)
-{
-	sun4c_complete_all_stores();
-	printk("FAULT: NMI received\n");
-	printk("SREGS: Synchronous Error %08lx\n", serr);
-	printk("       Synchronous Vaddr %08lx\n", svaddr);
-	printk("      Asynchronous Error %08lx\n", aerr);
-	printk("      Asynchronous Vaddr %08lx\n", avaddr);
-	if (sun4c_memerr_reg)
-		printk("     Memory Parity Error %08lx\n", *sun4c_memerr_reg);
-	printk("REGISTER DUMP:\n");
-	show_regs(regs);
-	prom_halt();
-}
-
 static void unhandled_fault(unsigned long, struct task_struct *,
 		struct pt_regs *) __attribute__ ((noreturn));
 
-static void unhandled_fault(unsigned long address, struct task_struct *tsk,
-                     struct pt_regs *regs)
+static void __noreturn unhandled_fault(unsigned long address,
+				       struct task_struct *tsk,
+				       struct pt_regs *regs)
 {
-	if((unsigned long) address < PAGE_SIZE) {
+	if ((unsigned long) address < PAGE_SIZE) {
 		printk(KERN_ALERT
 		    "Unable to handle kernel NULL pointer dereference\n");
 	} else {
-		printk(KERN_ALERT "Unable to handle kernel paging request "
-		       "at virtual address %08lx\n", address);
+		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
+		       address);
 	}
 	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
 		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
 	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
 		(tsk->mm ? (unsigned long) tsk->mm->pgd :
-		 	(unsigned long) tsk->active_mm->pgd));
+			(unsigned long) tsk->active_mm->pgd));
 	die_if_kernel("Oops", regs);
 }
 
-asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, 
+asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
 			    unsigned long address)
 {
 	struct pt_regs regs;
 	unsigned long g2;
 	unsigned int insn;
 	int i;
-	
+
 	i = search_extables_range(ret_pc, &g2);
 	switch (i) {
 	case 3:
@@ -128,14 +99,14 @@
 		/* for _from_ macros */
 		insn = *((unsigned int *) pc);
 		if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
-			return 2; 
-		break; 
+			return 2;
+		break;
 
 	default:
 		break;
 	}
 
-	memset(&regs, 0, sizeof (regs));
+	memset(&regs, 0, sizeof(regs));
 	regs.pc = pc;
 	regs.npc = pc + 4;
 	__asm__ __volatile__(
@@ -198,11 +169,10 @@
 	if (text_fault)
 		return regs->pc;
 
-	if (regs->psr & PSR_PS) {
+	if (regs->psr & PSR_PS)
 		insn = *(unsigned int *) regs->pc;
-	} else {
+	else
 		__get_user(insn, (unsigned int *) regs->pc);
-	}
 
 	return safe_compute_effective_address(regs, insn);
 }
@@ -228,7 +198,7 @@
 	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
 			      (write ? FAULT_FLAG_WRITE : 0));
 
-	if(text_fault)
+	if (text_fault)
 		address = regs->pc;
 
 	/*
@@ -241,36 +211,32 @@
 	 * nothing more.
 	 */
 	code = SEGV_MAPERR;
-	if (!ARCH_SUN4C && address >= TASK_SIZE)
+	if (address >= TASK_SIZE)
 		goto vmalloc_fault;
 
 	/*
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-        if (in_atomic() || !mm)
-                goto no_context;
+	if (in_atomic() || !mm)
+		goto no_context;
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 retry:
 	down_read(&mm->mmap_sem);
 
-	/*
-	 * The kernel referencing a bad kernel pointer can lock up
-	 * a sun4c machine completely, so we must attempt recovery.
-	 */
-	if(!from_user && address >= PAGE_OFFSET)
+	if (!from_user && address >= PAGE_OFFSET)
 		goto bad_area;
 
 	vma = find_vma(mm, address);
-	if(!vma)
+	if (!vma)
 		goto bad_area;
-	if(vma->vm_start <= address)
+	if (vma->vm_start <= address)
 		goto good_area;
-	if(!(vma->vm_flags & VM_GROWSDOWN))
+	if (!(vma->vm_flags & VM_GROWSDOWN))
 		goto bad_area;
-	if(expand_stack(vma, address))
+	if (expand_stack(vma, address))
 		goto bad_area;
 	/*
 	 * Ok, we have a good vm_area for this memory access, so
@@ -278,12 +244,12 @@
 	 */
 good_area:
 	code = SEGV_ACCERR;
-	if(write) {
-		if(!(vma->vm_flags & VM_WRITE))
+	if (write) {
+		if (!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
 	} else {
 		/* Allow reads even for write-only mappings */
-		if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 			goto bad_area;
 	}
 
@@ -349,14 +315,16 @@
 	g2 = regs->u_regs[UREG_G2];
 	if (!from_user) {
 		fixup = search_extables_range(regs->pc, &g2);
-		if (fixup > 10) { /* Values below are reserved for other things */
+		/* Values below 10 are reserved for other things */
+		if (fixup > 10) {
 			extern const unsigned __memset_start[];
 			extern const unsigned __memset_end[];
 			extern const unsigned __csum_partial_copy_start[];
 			extern const unsigned __csum_partial_copy_end[];
 
 #ifdef DEBUG_EXCEPTIONS
-			printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
+			printk("Exception: PC<%08lx> faddr<%08lx>\n",
+			       regs->pc, address);
 			printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
 				regs->pc, fixup, g2);
 #endif
@@ -364,7 +332,7 @@
 			     regs->pc < (unsigned long)__memset_end) ||
 			    (regs->pc >= (unsigned long)__csum_partial_copy_start &&
 			     regs->pc < (unsigned long)__csum_partial_copy_end)) {
-			        regs->u_regs[UREG_I4] = address;
+				regs->u_regs[UREG_I4] = address;
 				regs->u_regs[UREG_I5] = regs->pc;
 			}
 			regs->u_regs[UREG_G2] = g2;
@@ -373,8 +341,8 @@
 			return;
 		}
 	}
-	
-	unhandled_fault (address, tsk, regs);
+
+	unhandled_fault(address, tsk, regs);
 	do_exit(SIGKILL);
 
 /*
@@ -420,97 +388,12 @@
 
 		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
 			goto bad_area_nosemaphore;
+
 		*pmd = *pmd_k;
 		return;
 	}
 }
 
-asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
-			       unsigned long address)
-{
-	extern void sun4c_update_mmu_cache(struct vm_area_struct *,
-					   unsigned long,pte_t *);
-	extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
-	struct task_struct *tsk = current;
-	struct mm_struct *mm = tsk->mm;
-	pgd_t *pgdp;
-	pte_t *ptep;
-
-	if (text_fault) {
-		address = regs->pc;
-	} else if (!write &&
-		   !(regs->psr & PSR_PS)) {
-		unsigned int insn, __user *ip;
-
-		ip = (unsigned int __user *)regs->pc;
-		if (!get_user(insn, ip)) {
-			if ((insn & 0xc1680000) == 0xc0680000)
-				write = 1;
-		}
-	}
-
-	if (!mm) {
-		/* We are oopsing. */
-		do_sparc_fault(regs, text_fault, write, address);
-		BUG();	/* P3 Oops already, you bitch */
-	}
-
-	pgdp = pgd_offset(mm, address);
-	ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
-
-	if (pgd_val(*pgdp)) {
-	    if (write) {
-		if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
-				   == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
-			unsigned long flags;
-
-			*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
-				      _SUN4C_PAGE_MODIFIED |
-				      _SUN4C_PAGE_VALID |
-				      _SUN4C_PAGE_DIRTY);
-
-			local_irq_save(flags);
-			if (sun4c_get_segmap(address) != invalid_segment) {
-				sun4c_put_pte(address, pte_val(*ptep));
-				local_irq_restore(flags);
-				return;
-			}
-			local_irq_restore(flags);
-		}
-	    } else {
-		if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
-				   == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
-			unsigned long flags;
-
-			*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
-				      _SUN4C_PAGE_VALID);
-
-			local_irq_save(flags);
-			if (sun4c_get_segmap(address) != invalid_segment) {
-				sun4c_put_pte(address, pte_val(*ptep));
-				local_irq_restore(flags);
-				return;
-			}
-			local_irq_restore(flags);
-		}
-	    }
-	}
-
-	/* This conditional is 'interesting'. */
-	if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
-	    && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
-		/* Note: It is safe to not grab the MMAP semaphore here because
-		 *       we know that update_mmu_cache() will not sleep for
-		 *       any reason (at least not in the current implementation)
-		 *       and therefore there is no danger of another thread getting
-		 *       on the CPU and doing a shrink_mmap() on this vma.
-		 */
-		sun4c_update_mmu_cache (find_vma(current->mm, address), address,
-					ptep);
-	else
-		do_sparc_fault(regs, text_fault, write, address);
-}
-
 /* This always deals with user addresses. */
 static void force_user_fault(unsigned long address, int write)
 {
@@ -523,21 +406,21 @@
 
 	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, address);
-	if(!vma)
+	if (!vma)
 		goto bad_area;
-	if(vma->vm_start <= address)
+	if (vma->vm_start <= address)
 		goto good_area;
-	if(!(vma->vm_flags & VM_GROWSDOWN))
+	if (!(vma->vm_flags & VM_GROWSDOWN))
 		goto bad_area;
-	if(expand_stack(vma, address))
+	if (expand_stack(vma, address))
 		goto bad_area;
 good_area:
 	code = SEGV_ACCERR;
-	if(write) {
-		if(!(vma->vm_flags & VM_WRITE))
+	if (write) {
+		if (!(vma->vm_flags & VM_WRITE))
 			goto bad_area;
 	} else {
-		if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
 			goto bad_area;
 	}
 	switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
@@ -568,7 +451,7 @@
 	unsigned long sp;
 
 	sp = current_thread_info()->rwbuf_stkptrs[0];
-	if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 		force_user_fault(sp + 0x38, 1);
 	force_user_fault(sp, 1);
 
@@ -577,7 +460,7 @@
 
 void window_underflow_fault(unsigned long sp)
 {
-	if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 		force_user_fault(sp + 0x38, 0);
 	force_user_fault(sp, 0);
 
@@ -589,7 +472,7 @@
 	unsigned long sp;
 
 	sp = regs->u_regs[UREG_FP];
-	if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
 		force_user_fault(sp + 0x38, 0);
 	force_user_fault(sp, 0);
 
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index c5f9021..ef5c779 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -27,7 +27,6 @@
 #include <linux/gfp.h>
 
 #include <asm/sections.h>
-#include <asm/vac-ops.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/vaddrs.h>
@@ -45,9 +44,6 @@
 unsigned long pfn_base;
 EXPORT_SYMBOL(pfn_base);
 
-unsigned long page_kernel;
-EXPORT_SYMBOL(page_kernel);
-
 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
 unsigned long sparc_unmapped_base;
 
@@ -287,44 +283,16 @@
 }
 
 /*
- * check_pgt_cache
- *
- * This is called at the end of unmapping of VMA (zap_page_range),
- * to rescan the page cache for architecture specific things,
- * presumably something like sun4/sun4c PMEGs. Most architectures
- * define check_pgt_cache empty.
- *
- * We simply copy the 2.4 implementation for now.
- */
-static int pgt_cache_water[2] = { 25, 50 };
-
-void check_pgt_cache(void)
-{
-	do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
-}
-
-/*
  * paging_init() sets up the page tables: We call the MMU specific
  * init routine based upon the Sun model type on the Sparc.
  *
  */
-extern void sun4c_paging_init(void);
 extern void srmmu_paging_init(void);
 extern void device_scan(void);
 
-pgprot_t PAGE_SHARED __read_mostly;
-EXPORT_SYMBOL(PAGE_SHARED);
-
 void __init paging_init(void)
 {
 	switch(sparc_cpu_model) {
-	case sun4c:
-	case sun4e:
-	case sun4:
-		sun4c_paging_init();
-		sparc_unmapped_base = 0xe0000000;
-		BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
-		break;
 	case sparc_leon:
 		leon_init();
 		/* fall through */
@@ -332,7 +300,6 @@
 	case sun4d:
 		srmmu_paging_init();
 		sparc_unmapped_base = 0x50000000;
-		BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
 		break;
 	default:
 		prom_printf("paging_init: Cannot init paging on this Sparc\n");
@@ -341,24 +308,6 @@
 		prom_halt();
 	}
 
-	/* Initialize the protection map with non-constant, MMU dependent values. */
-	protection_map[0] = PAGE_NONE;
-	protection_map[1] = PAGE_READONLY;
-	protection_map[2] = PAGE_COPY;
-	protection_map[3] = PAGE_COPY;
-	protection_map[4] = PAGE_READONLY;
-	protection_map[5] = PAGE_READONLY;
-	protection_map[6] = PAGE_COPY;
-	protection_map[7] = PAGE_COPY;
-	protection_map[8] = PAGE_NONE;
-	protection_map[9] = PAGE_READONLY;
-	protection_map[10] = PAGE_SHARED;
-	protection_map[11] = PAGE_SHARED;
-	protection_map[12] = PAGE_READONLY;
-	protection_map[13] = PAGE_READONLY;
-	protection_map[14] = PAGE_SHARED;
-	protection_map[15] = PAGE_SHARED;
-	btfixup();
 	prom_build_devicetree();
 	of_fill_in_cpu_data();
 	device_scan();
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 21faaee..6026fdd 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -741,7 +741,6 @@
 struct node_mem_mask {
 	unsigned long mask;
 	unsigned long val;
-	unsigned long bootmem_paddr;
 };
 static struct node_mem_mask node_masks[MAX_NUMNODES];
 static int num_node_masks;
@@ -806,12 +805,6 @@
 
 	return start;
 }
-#else
-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
-{
-	*nid = 0;
-	return end;
-}
 #endif
 
 /* This must be invoked after performing all of the necessary
@@ -820,10 +813,11 @@
  */
 static void __init allocate_node_data(int nid)
 {
-	unsigned long paddr, num_pages, start_pfn, end_pfn;
 	struct pglist_data *p;
-
+	unsigned long start_pfn, end_pfn;
 #ifdef CONFIG_NEED_MULTIPLE_NODES
+	unsigned long paddr;
+
 	paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
 	if (!paddr) {
 		prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
@@ -832,7 +826,7 @@
 	NODE_DATA(nid) = __va(paddr);
 	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 
-	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+	NODE_DATA(nid)->node_id = nid;
 #endif
 
 	p = NODE_DATA(nid);
@@ -840,18 +834,6 @@
 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 	p->node_start_pfn = start_pfn;
 	p->node_spanned_pages = end_pfn - start_pfn;
-
-	if (p->node_spanned_pages) {
-		num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
-
-		paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid);
-		if (!paddr) {
-			prom_printf("Cannot allocate bootmap for nid[%d]\n",
-				  nid);
-			prom_halt();
-		}
-		node_masks[nid].bootmem_paddr = paddr;
-	}
 }
 
 static void init_node_masks_nonnuma(void)
@@ -1292,75 +1274,9 @@
 	node_set_online(0);
 }
 
-static void __init reserve_range_in_node(int nid, unsigned long start,
-					 unsigned long end)
-{
-	numadbg("    reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
-		nid, start, end);
-	while (start < end) {
-		unsigned long this_end;
-		int n;
-
-		this_end = memblock_nid_range(start, end, &n);
-		if (n == nid) {
-			numadbg("      MATCH reserving range [%lx:%lx]\n",
-				start, this_end);
-			reserve_bootmem_node(NODE_DATA(nid), start,
-					     (this_end - start), BOOTMEM_DEFAULT);
-		} else
-			numadbg("      NO MATCH, advancing start to %lx\n",
-				this_end);
-
-		start = this_end;
-	}
-}
-
-static void __init trim_reserved_in_node(int nid)
-{
-	struct memblock_region *reg;
-
-	numadbg("  trim_reserved_in_node(%d)\n", nid);
-
-	for_each_memblock(reserved, reg)
-		reserve_range_in_node(nid, reg->base, reg->base + reg->size);
-}
-
-static void __init bootmem_init_one_node(int nid)
-{
-	struct pglist_data *p;
-
-	numadbg("bootmem_init_one_node(%d)\n", nid);
-
-	p = NODE_DATA(nid);
-
-	if (p->node_spanned_pages) {
-		unsigned long paddr = node_masks[nid].bootmem_paddr;
-		unsigned long end_pfn;
-
-		end_pfn = p->node_start_pfn + p->node_spanned_pages;
-
-		numadbg("  init_bootmem_node(%d, %lx, %lx, %lx)\n",
-			nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
-
-		init_bootmem_node(p, paddr >> PAGE_SHIFT,
-				  p->node_start_pfn, end_pfn);
-
-		numadbg("  free_bootmem_with_active_regions(%d, %lx)\n",
-			nid, end_pfn);
-		free_bootmem_with_active_regions(nid, end_pfn);
-
-		trim_reserved_in_node(nid);
-
-		numadbg("  sparse_memory_present_with_active_regions(%d)\n",
-			nid);
-		sparse_memory_present_with_active_regions(nid);
-	}
-}
-
 static unsigned long __init bootmem_init(unsigned long phys_base)
 {
 	unsigned long end_pfn;
-	int nid;
 
 	end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
 	max_pfn = max_low_pfn = end_pfn;
@@ -1369,11 +1285,12 @@
 	if (bootmem_init_numa() < 0)
 		bootmem_init_nonnuma();
 
+	/* Dump memblock with node info. */
+	memblock_dump_all();
+
 	/* XXX cpu notifier XXX */
 
-	for_each_online_node(nid)
-		bootmem_init_one_node(nid);
-
+	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 	sparse_init();
 
 	return end_pfn;
@@ -1701,6 +1618,7 @@
 {
 	unsigned long end_pfn, shift, phys_base;
 	unsigned long real_end, i;
+	int node;
 
 	/* These build time checkes make sure that the dcache_dirty_cpu()
 	 * page->flags usage will work.
@@ -1826,22 +1744,24 @@
 #endif
 	}
 
+	/* Setup bootmem... */
+	last_valid_pfn = end_pfn = bootmem_init(phys_base);
+
 	/* Once the OF device tree and MDESC have been setup, we know
 	 * the list of possible cpus.  Therefore we can allocate the
 	 * IRQ stacks.
 	 */
 	for_each_possible_cpu(i) {
-		/* XXX Use node local allocations... XXX */
-		softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
-		hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+		node = cpu_to_node(i);
+
+		softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
+							THREAD_SIZE,
+							THREAD_SIZE, 0);
+		hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
+							THREAD_SIZE,
+							THREAD_SIZE, 0);
 	}
 
-	/* Setup bootmem... */
-	last_valid_pfn = end_pfn = bootmem_init(phys_base);
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-	max_mapnr = last_valid_pfn;
-#endif
 	kernel_physical_mapping_init();
 
 	{
@@ -1973,6 +1893,7 @@
 					free_all_bootmem_node(NODE_DATA(i));
 			}
 		}
+		totalram_pages += free_low_memory_core_early(MAX_NUMNODES);
 	}
 #else
 	totalram_pages = free_all_bootmem();
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index fc58c3e..eb99862 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -197,7 +197,7 @@
 }
 
 #ifdef CONFIG_SBUS
-static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
+static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
 {
 	struct iounit_struct *iounit = dev->archdata.iommu;
 	unsigned long page, end;
@@ -242,29 +242,18 @@
 }
 #endif
 
-static char *iounit_lockarea(char *vaddr, unsigned long len)
-{
-/* FIXME: Write this */
-	return vaddr;
-}
-
-static void iounit_unlockarea(char *vaddr, unsigned long len)
-{
-/* FIXME: Write this */
-}
+static const struct sparc32_dma_ops iounit_dma_ops = {
+	.get_scsi_one		= iounit_get_scsi_one,
+	.get_scsi_sgl		= iounit_get_scsi_sgl,
+	.release_scsi_one	= iounit_release_scsi_one,
+	.release_scsi_sgl	= iounit_release_scsi_sgl,
+#ifdef CONFIG_SBUS
+	.map_dma_area		= iounit_map_dma_area,
+	.unmap_dma_area		= iounit_unmap_dma_area,
+#endif
+};
 
 void __init ld_mmu_iounit(void)
 {
-	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
-	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
-
-	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
-
-#ifdef CONFIG_SBUS
-	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
-#endif
+	sparc32_dma_ops = &iounit_dma_ops;
 }
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 07fc6a6..a8a58ca 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -39,8 +39,6 @@
 
 /* srmmu.c */
 extern int viking_mxcc_present;
-BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
-#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
 extern int flush_page_for_dma_global;
 static int viking_flush;
 /* viking.S */
@@ -143,7 +141,6 @@
 
 subsys_initcall(iommu_init);
 
-/* This begs to be btfixup-ed by srmmu. */
 /* Flush the iotlb entries to ram. */
 /* This could be better if we didn't have to flush whole pages. */
 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
@@ -216,11 +213,6 @@
 	return busa + off;
 }
 
-static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
-{
-	return iommu_get_scsi_one(dev, vaddr, len);
-}
-
 static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
 {
 	flush_page_for_dma(0);
@@ -238,19 +230,6 @@
 	return iommu_get_scsi_one(dev, vaddr, len);
 }
 
-static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
-{
-	int n;
-
-	while (sz != 0) {
-		--sz;
-		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
-		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
-		sg->dma_length = sg->length;
-		sg = sg_next(sg);
-	}
-}
-
 static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
 {
 	int n;
@@ -426,40 +405,36 @@
 }
 #endif
 
-static char *iommu_lockarea(char *vaddr, unsigned long len)
-{
-	return vaddr;
-}
+static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
+	.get_scsi_one		= iommu_get_scsi_one_gflush,
+	.get_scsi_sgl		= iommu_get_scsi_sgl_gflush,
+	.release_scsi_one	= iommu_release_scsi_one,
+	.release_scsi_sgl	= iommu_release_scsi_sgl,
+#ifdef CONFIG_SBUS
+	.map_dma_area		= iommu_map_dma_area,
+	.unmap_dma_area		= iommu_unmap_dma_area,
+#endif
+};
 
-static void iommu_unlockarea(char *vaddr, unsigned long len)
-{
-}
+static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
+	.get_scsi_one		= iommu_get_scsi_one_pflush,
+	.get_scsi_sgl		= iommu_get_scsi_sgl_pflush,
+	.release_scsi_one	= iommu_release_scsi_one,
+	.release_scsi_sgl	= iommu_release_scsi_sgl,
+#ifdef CONFIG_SBUS
+	.map_dma_area		= iommu_map_dma_area,
+	.unmap_dma_area		= iommu_unmap_dma_area,
+#endif
+};
 
 void __init ld_mmu_iommu(void)
 {
-	viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
-	BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
-	BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
-
-	if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
-		/* IO coherent chip */
-		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
-		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
-	} else if (flush_page_for_dma_global) {
+	if (flush_page_for_dma_global) {
 		/* flush_page_for_dma flushes everything, no matter of what page is it */
-		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
+		sparc32_dma_ops = &iommu_dma_gflush_ops;
 	} else {
-		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
+		sparc32_dma_ops = &iommu_dma_pflush_ops;
 	}
-	BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
-
-#ifdef CONFIG_SBUS
-	BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
-#endif
 
 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index 13c2169..4c67ae6 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -15,9 +15,23 @@
 #include <asm/leon.h>
 #include <asm/tlbflush.h>
 
+#include "srmmu.h"
+
 int leon_flush_during_switch = 1;
 int srmmu_swprobe_trace;
 
+static inline unsigned long leon_get_ctable_ptr(void)
+{
+	unsigned int retval;
+
+	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+			     "=r" (retval) :
+			     "r" (SRMMU_CTXTBL_PTR),
+			     "i" (ASI_LEON_MMUREGS));
+	return (retval & SRMMU_CTX_PMASK) << 4;
+}
+
+
 unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
 {
 
@@ -33,10 +47,10 @@
 	if (srmmu_swprobe_trace)
 		printk(KERN_INFO "swprobe: trace on\n");
 
-	ctxtbl = srmmu_get_ctable_ptr();
+	ctxtbl = leon_get_ctable_ptr();
 	if (!(ctxtbl)) {
 		if (srmmu_swprobe_trace)
-			printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n");
+			printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
 		return 0;
 	}
 	if (!_pfn_valid(PFN(ctxtbl))) {
@@ -258,3 +272,80 @@
 	if (leon_flush_during_switch)
 		leon_flush_cache_all();
 }
+
+static void leon_flush_cache_mm(struct mm_struct *mm)
+{
+	leon_flush_cache_all();
+}
+
+static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+	leon_flush_pcache_all(vma, page);
+}
+
+static void leon_flush_cache_range(struct vm_area_struct *vma,
+				   unsigned long start,
+				   unsigned long end)
+{
+	leon_flush_cache_all();
+}
+
+static void leon_flush_tlb_mm(struct mm_struct *mm)
+{
+	leon_flush_tlb_all();
+}
+
+static void leon_flush_tlb_page(struct vm_area_struct *vma,
+				unsigned long page)
+{
+	leon_flush_tlb_all();
+}
+
+static void leon_flush_tlb_range(struct vm_area_struct *vma,
+				 unsigned long start,
+				 unsigned long end)
+{
+	leon_flush_tlb_all();
+}
+
+static void leon_flush_page_to_ram(unsigned long page)
+{
+	leon_flush_cache_all();
+}
+
+static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
+{
+	leon_flush_cache_all();
+}
+
+static void leon_flush_page_for_dma(unsigned long page)
+{
+	leon_flush_dcache_all();
+}
+
+void __init poke_leonsparc(void)
+{
+}
+
+static const struct sparc32_cachetlb_ops leon_ops = {
+	.cache_all	= leon_flush_cache_all,
+	.cache_mm	= leon_flush_cache_mm,
+	.cache_page	= leon_flush_cache_page,
+	.cache_range	= leon_flush_cache_range,
+	.tlb_all	= leon_flush_tlb_all,
+	.tlb_mm		= leon_flush_tlb_mm,
+	.tlb_page	= leon_flush_tlb_page,
+	.tlb_range	= leon_flush_tlb_range,
+	.page_to_ram	= leon_flush_page_to_ram,
+	.sig_insns	= leon_flush_sig_insns,
+	.page_for_dma	= leon_flush_page_for_dma,
+};
+
+void __init init_leon(void)
+{
+	srmmu_name = "LEON";
+	sparc32_cachetlb_ops = &leon_ops;
+	poke_srmmu = poke_leonsparc;
+
+	leon_flush_during_switch = leon_flush_needed();
+}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
deleted file mode 100644
index c5bf2a6..0000000
--- a/arch/sparc/mm/loadmmu.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * loadmmu.c:  This code loads up all the mm function pointers once the
- *             machine type has been determined.  It also sets the static
- *             mmu values such as PAGE_NONE, etc.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/oplib.h>
-
-struct ctx_list *ctx_list_pool;
-struct ctx_list ctx_free;
-struct ctx_list ctx_used;
-
-extern void ld_mmu_sun4c(void);
-extern void ld_mmu_srmmu(void);
-
-void __init load_mmu(void)
-{
-	switch(sparc_cpu_model) {
-	case sun4c:
-	case sun4:
-		ld_mmu_sun4c();
-		break;
-	case sun4m:
-	case sun4d:
-	case sparc_leon:
-		ld_mmu_srmmu();
-		break;
-	default:
-		prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model);
-		prom_halt();
-	}
-	btfixup();
-}
diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c
deleted file mode 100644
index 4e62c27..0000000
--- a/arch/sparc/mm/nosun4c.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * nosun4c.c: This file is a bunch of dummies for SMP compiles, 
- *         so that it does not need sun4c and avoid ifdefs.
- *
- * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <asm/pgtable.h>
-
-static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n";
-
-/* Dummies */
-struct sun4c_mmu_ring {
-	unsigned long xxx1[3];
-	unsigned char xxx2[2];
-	int xxx3;
-};
-struct sun4c_mmu_ring sun4c_kernel_ring;
-struct sun4c_mmu_ring sun4c_kfree_ring;
-unsigned long sun4c_kernel_faults;
-unsigned long *sun4c_memerr_reg;
-
-static void __init should_not_happen(void)
-{
-	prom_printf(shouldnothappen);
-	prom_halt();
-}
-
-unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
-{
-	should_not_happen();
-	return 0;
-}
-
-void __init ld_mmu_sun4c(void)
-{
-	should_not_happen();
-}
-
-void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
-{
-}
-
-void sun4c_unmapioaddr(unsigned long virt_addr)
-{
-}
-
-void sun4c_complete_all_stores(void)
-{
-}
-
-pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
-{
-	return NULL;
-}
-
-pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address)
-{
-	return NULL;
-}
-
-void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
-{
-}
-
-void __init sun4c_probe_vac(void)
-{
-	should_not_happen();
-}
-
-void __init sun4c_probe_memerr_reg(void)
-{
-	should_not_happen();
-}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index cbef74e..8e97e03 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -48,39 +48,37 @@
 #include <asm/turbosparc.h>
 #include <asm/leon.h>
 
-#include <asm/btfixup.h>
+#include "srmmu.h"
 
 enum mbus_module srmmu_modtype;
 static unsigned int hwbug_bitmask;
 int vac_cache_size;
 int vac_line_size;
 
+struct ctx_list *ctx_list_pool;
+struct ctx_list ctx_free;
+struct ctx_list ctx_used;
+
 extern struct resource sparc_iomap;
 
 extern unsigned long last_valid_pfn;
 
-extern unsigned long page_kernel;
-
 static pgd_t *srmmu_swapper_pg_dir;
 
+const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
+
 #ifdef CONFIG_SMP
+const struct sparc32_cachetlb_ops *local_ops;
+
 #define FLUSH_BEGIN(mm)
 #define FLUSH_END
 #else
-#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
+#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
 #define FLUSH_END	}
 #endif
 
-BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
-#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
-
 int flush_page_for_dma_global = 1;
 
-#ifdef CONFIG_SMP
-BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
-#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
-#endif
-
 char *srmmu_name;
 
 ctxd_t *srmmu_ctx_table_phys;
@@ -91,28 +89,6 @@
 
 static int is_hypersparc;
 
-/*
- * In general all page table modifications should use the V8 atomic
- * swap instruction.  This insures the mmu and the cpu are in sync
- * with respect to ref/mod bits in the page tables.
- */
-static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
-{
-	__asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
-	return value;
-}
-
-static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval)
-{
-	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
-}
-
-/* The very generic SRMMU page table operations. */
-static inline int srmmu_device_memory(unsigned long x)
-{
-	return ((x & 0xF0000000) != 0);
-}
-
 static int srmmu_cache_pagetables;
 
 /* these will be initialized in srmmu_nocache_calcsize() */
@@ -129,145 +105,39 @@
 void *srmmu_nocache_bitmap;
 static struct bit_map srmmu_nocache_map;
 
-static unsigned long srmmu_pte_pfn(pte_t pte)
-{
-	if (srmmu_device_memory(pte_val(pte))) {
-		/* Just return something that will cause
-		 * pfn_valid() to return false.  This makes
-		 * copy_one_pte() to just directly copy to
-		 * PTE over.
-		 */
-		return ~0UL;
-	}
-	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
-}
-
-static struct page *srmmu_pmd_page(pmd_t pmd)
-{
-
-	if (srmmu_device_memory(pmd_val(pmd)))
-		BUG();
-	return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
-}
-
-static inline unsigned long srmmu_pgd_page(pgd_t pgd)
-{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
-
-
-static inline int srmmu_pte_none(pte_t pte)
-{ return !(pte_val(pte) & 0xFFFFFFF); }
-
-static inline int srmmu_pte_present(pte_t pte)
-{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
-
-static inline void srmmu_pte_clear(pte_t *ptep)
-{ srmmu_set_pte(ptep, __pte(0)); }
-
 static inline int srmmu_pmd_none(pmd_t pmd)
 { return !(pmd_val(pmd) & 0xFFFFFFF); }
 
-static inline int srmmu_pmd_bad(pmd_t pmd)
-{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
-
-static inline int srmmu_pmd_present(pmd_t pmd)
-{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-
-static inline void srmmu_pmd_clear(pmd_t *pmdp) {
-	int i;
-	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
-		srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
-}
-
-static inline int srmmu_pgd_none(pgd_t pgd)          
-{ return !(pgd_val(pgd) & 0xFFFFFFF); }
-
-static inline int srmmu_pgd_bad(pgd_t pgd)
-{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
-
-static inline int srmmu_pgd_present(pgd_t pgd)
-{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-
-static inline void srmmu_pgd_clear(pgd_t * pgdp)
-{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); }
-
-static inline pte_t srmmu_pte_wrprotect(pte_t pte)
-{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
-
-static inline pte_t srmmu_pte_mkclean(pte_t pte)
-{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
-
-static inline pte_t srmmu_pte_mkold(pte_t pte)
-{ return __pte(pte_val(pte) & ~SRMMU_REF);}
-
-static inline pte_t srmmu_pte_mkwrite(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_WRITE);}
-
-static inline pte_t srmmu_pte_mkdirty(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
-
-static inline pte_t srmmu_pte_mkyoung(pte_t pte)
-{ return __pte(pte_val(pte) | SRMMU_REF);}
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
-{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
-
-static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
-{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
-
-static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
-{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
-
 /* XXX should we hyper_flush_whole_icache here - Anton */
 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
-{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
+{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
 
-static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
-{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
-
-static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
+void pmd_set(pmd_t *pmdp, pte_t *ptep)
 {
 	unsigned long ptp;	/* Physical address, shifted right by 4 */
 	int i;
 
 	ptp = __nocache_pa((unsigned long) ptep) >> 4;
 	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
-		srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
+		set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
 		ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
 	}
 }
 
-static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
+void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
 {
 	unsigned long ptp;	/* Physical address, shifted right by 4 */
 	int i;
 
 	ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4);	/* watch for overflow */
 	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
-		srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
+		set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
 		ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
 	}
 }
 
-static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
-{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
-
-/* to find an entry in a top-level page table... */
-static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
-{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
-
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
-{
-	return (pmd_t *) srmmu_pgd_page(*dir) +
-	    ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-}
-
 /* Find an entry in the third-level page table.. */ 
-static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
+pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
 {
 	void *pte;
 
@@ -276,23 +146,6 @@
 	    ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 }
 
-static unsigned long srmmu_swp_type(swp_entry_t entry)
-{
-	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
-}
-
-static unsigned long srmmu_swp_offset(swp_entry_t entry)
-{
-	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
-}
-
-static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
-{
-	return (swp_entry_t) {
-		  (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
-		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
-}
-
 /*
  * size: bytes to allocate in the nocache area.
  * align: bytes, number to align at.
@@ -325,7 +178,7 @@
 	return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
 }
 
-static unsigned long srmmu_get_nocache(int size, int align)
+unsigned long srmmu_get_nocache(int size, int align)
 {
 	unsigned long tmp;
 
@@ -337,7 +190,7 @@
 	return tmp;
 }
 
-static void srmmu_free_nocache(unsigned long vaddr, int size)
+void srmmu_free_nocache(unsigned long vaddr, int size)
 {
 	int offset;
 
@@ -429,15 +282,15 @@
 
 	while (vaddr < srmmu_nocache_end) {
 		pgd = pgd_offset_k(vaddr);
-		pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
-		pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
+		pmd = pmd_offset(__nocache_fix(pgd), vaddr);
+		pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
 
 		pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
 
 		if (srmmu_cache_pagetables)
 			pteval |= SRMMU_CACHE;
 
-		srmmu_set_pte(__nocache_fix(pte), __pte(pteval));
+		set_pte(__nocache_fix(pte), __pte(pteval));
 
 		vaddr += PAGE_SIZE;
 		paddr += PAGE_SIZE;
@@ -447,7 +300,7 @@
 	flush_tlb_all();
 }
 
-static inline pgd_t *srmmu_get_pgd_fast(void)
+pgd_t *get_pgd_fast(void)
 {
 	pgd_t *pgd = NULL;
 
@@ -462,21 +315,6 @@
 	return pgd;
 }
 
-static void srmmu_free_pgd_fast(pgd_t *pgd)
-{
-	srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
-}
-
-static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-	return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
-}
-
-static void srmmu_pmd_free(pmd_t * pmd)
-{
-	srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
-}
-
 /*
  * Hardware needs alignment to 256 only, but we align to whole page size
  * to reduce fragmentation problems due to the buddy principle.
@@ -485,31 +323,19 @@
  * Alignments up to the page size are the same for physical and virtual
  * addresses of the nocache area.
  */
-static pte_t *
-srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-	return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
-}
-
-static pgtable_t
-srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
 	unsigned long pte;
 	struct page *page;
 
-	if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
+	if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
 		return NULL;
 	page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
 	pgtable_page_ctor(page);
 	return page;
 }
 
-static void srmmu_free_pte_fast(pte_t *pte)
-{
-	srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
-}
-
-static void srmmu_pte_free(pgtable_t pte)
+void pte_free(struct mm_struct *mm, pgtable_t pte)
 {
 	unsigned long p;
 
@@ -560,8 +386,8 @@
 }
 
 
-static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
-    struct task_struct *tsk, int cpu)
+void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+	       struct task_struct *tsk)
 {
 	if(mm->context == NO_CONTEXT) {
 		spin_lock(&srmmu_context_spinlock);
@@ -590,8 +416,8 @@
 
 	physaddr &= PAGE_MASK;
 	pgdp = pgd_offset_k(virt_addr);
-	pmdp = srmmu_pmd_offset(pgdp, virt_addr);
-	ptep = srmmu_pte_offset(pmdp, virt_addr);
+	pmdp = pmd_offset(pgdp, virt_addr);
+	ptep = pte_offset_kernel(pmdp, virt_addr);
 	tmp = (physaddr >> 4) | SRMMU_ET_PTE;
 
 	/*
@@ -602,11 +428,11 @@
 	tmp |= (bus_type << 28);
 	tmp |= SRMMU_PRIV;
 	__flush_page_to_ram(virt_addr);
-	srmmu_set_pte(ptep, __pte(tmp));
+	set_pte(ptep, __pte(tmp));
 }
 
-static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
-    unsigned long xva, unsigned int len)
+void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
+		      unsigned long xva, unsigned int len)
 {
 	while (len != 0) {
 		len -= PAGE_SIZE;
@@ -624,14 +450,14 @@
 	pte_t *ptep;
 
 	pgdp = pgd_offset_k(virt_addr);
-	pmdp = srmmu_pmd_offset(pgdp, virt_addr);
-	ptep = srmmu_pte_offset(pmdp, virt_addr);
+	pmdp = pmd_offset(pgdp, virt_addr);
+	ptep = pte_offset_kernel(pmdp, virt_addr);
 
 	/* No need to flush uncacheable page. */
-	srmmu_pte_clear(ptep);
+	__pte_clear(ptep);
 }
 
-static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
+void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
 {
 	while (len != 0) {
 		len -= PAGE_SIZE;
@@ -647,10 +473,9 @@
  * pool.  As a side effect we are putting a little too much pressure
  * on the gfp() subsystem.  This setup also makes the logic of the
  * iommu mapping code a lot easier as we can transparently handle
- * mappings on the kernel stack without any special code as we did
- * need on the sun4c.
+ * mappings on the kernel stack without any special code.
  */
-static struct thread_info *srmmu_alloc_thread_info_node(int node)
+struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
 {
 	struct thread_info *ret;
 
@@ -664,7 +489,7 @@
 	return ret;
 }
 
-static void srmmu_free_thread_info(struct thread_info *ti)
+void free_thread_info(struct thread_info *ti)
 {
 	free_pages((unsigned long)ti, THREAD_INFO_ORDER);
 }
@@ -683,38 +508,6 @@
 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
 extern void tsunami_setup_blockops(void);
 
-/*
- * Workaround, until we find what's going on with Swift. When low on memory,
- * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
- * out it is already in page tables/ fault again on the same instruction.
- * I really don't understand it, have checked it and contexts
- * are right, flush_tlb_all is done as well, and it faults again...
- * Strange. -jj
- *
- * The following code is a deadwood that may be necessary when
- * we start to make precise page flushes again. --zaitcev
- */
-static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
-{
-#if 0
-	static unsigned long last;
-	unsigned int val;
-	/* unsigned int n; */
-
-	if (address == last) {
-		val = srmmu_hwprobe(address);
-		if (val != 0 && pte_val(*ptep) != val) {
-			printk("swift_update_mmu_cache: "
-			    "addr %lx put %08x probed %08x from %p\n",
-			    address, pte_val(*ptep), val,
-			    __builtin_return_address(0));
-			srmmu_flush_whole_tlb();
-		}
-	}
-	last = address;
-#endif
-}
-
 /* swift.S */
 extern void swift_flush_cache_all(void);
 extern void swift_flush_cache_mm(struct mm_struct *mm);
@@ -767,244 +560,6 @@
  * with respect to cache coherency.
  */
 
-/* Cypress flushes. */
-static void cypress_flush_cache_all(void)
-{
-	volatile unsigned long cypress_sucks;
-	unsigned long faddr, tagval;
-
-	flush_user_windows();
-	for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
-		__asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
-				     "=r" (tagval) :
-				     "r" (faddr), "r" (0x40000),
-				     "i" (ASI_M_DATAC_TAG));
-
-		/* If modified and valid, kick it. */
-		if((tagval & 0x60) == 0x60)
-			cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
-	}
-}
-
-static void cypress_flush_cache_mm(struct mm_struct *mm)
-{
-	register unsigned long a, b, c, d, e, f, g;
-	unsigned long flags, faddr;
-	int octx;
-
-	FLUSH_BEGIN(mm)
-	flush_user_windows();
-	local_irq_save(flags);
-	octx = srmmu_get_context();
-	srmmu_set_context(mm->context);
-	a = 0x20; b = 0x40; c = 0x60;
-	d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-	faddr = (0x10000 - 0x100);
-	goto inside;
-	do {
-		faddr -= 0x100;
-	inside:
-		__asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-				     "sta %%g0, [%0 + %2] %1\n\t"
-				     "sta %%g0, [%0 + %3] %1\n\t"
-				     "sta %%g0, [%0 + %4] %1\n\t"
-				     "sta %%g0, [%0 + %5] %1\n\t"
-				     "sta %%g0, [%0 + %6] %1\n\t"
-				     "sta %%g0, [%0 + %7] %1\n\t"
-				     "sta %%g0, [%0 + %8] %1\n\t" : :
-				     "r" (faddr), "i" (ASI_M_FLUSH_CTX),
-				     "r" (a), "r" (b), "r" (c), "r" (d),
-				     "r" (e), "r" (f), "r" (g));
-	} while(faddr);
-	srmmu_set_context(octx);
-	local_irq_restore(flags);
-	FLUSH_END
-}
-
-static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-	struct mm_struct *mm = vma->vm_mm;
-	register unsigned long a, b, c, d, e, f, g;
-	unsigned long flags, faddr;
-	int octx;
-
-	FLUSH_BEGIN(mm)
-	flush_user_windows();
-	local_irq_save(flags);
-	octx = srmmu_get_context();
-	srmmu_set_context(mm->context);
-	a = 0x20; b = 0x40; c = 0x60;
-	d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-	start &= SRMMU_REAL_PMD_MASK;
-	while(start < end) {
-		faddr = (start + (0x10000 - 0x100));
-		goto inside;
-		do {
-			faddr -= 0x100;
-		inside:
-			__asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-					     "sta %%g0, [%0 + %2] %1\n\t"
-					     "sta %%g0, [%0 + %3] %1\n\t"
-					     "sta %%g0, [%0 + %4] %1\n\t"
-					     "sta %%g0, [%0 + %5] %1\n\t"
-					     "sta %%g0, [%0 + %6] %1\n\t"
-					     "sta %%g0, [%0 + %7] %1\n\t"
-					     "sta %%g0, [%0 + %8] %1\n\t" : :
-					     "r" (faddr),
-					     "i" (ASI_M_FLUSH_SEG),
-					     "r" (a), "r" (b), "r" (c), "r" (d),
-					     "r" (e), "r" (f), "r" (g));
-		} while (faddr != start);
-		start += SRMMU_REAL_PMD_SIZE;
-	}
-	srmmu_set_context(octx);
-	local_irq_restore(flags);
-	FLUSH_END
-}
-
-static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
-{
-	register unsigned long a, b, c, d, e, f, g;
-	struct mm_struct *mm = vma->vm_mm;
-	unsigned long flags, line;
-	int octx;
-
-	FLUSH_BEGIN(mm)
-	flush_user_windows();
-	local_irq_save(flags);
-	octx = srmmu_get_context();
-	srmmu_set_context(mm->context);
-	a = 0x20; b = 0x40; c = 0x60;
-	d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-
-	page &= PAGE_MASK;
-	line = (page + PAGE_SIZE) - 0x100;
-	goto inside;
-	do {
-		line -= 0x100;
-	inside:
-			__asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-					     "sta %%g0, [%0 + %2] %1\n\t"
-					     "sta %%g0, [%0 + %3] %1\n\t"
-					     "sta %%g0, [%0 + %4] %1\n\t"
-					     "sta %%g0, [%0 + %5] %1\n\t"
-					     "sta %%g0, [%0 + %6] %1\n\t"
-					     "sta %%g0, [%0 + %7] %1\n\t"
-					     "sta %%g0, [%0 + %8] %1\n\t" : :
-					     "r" (line),
-					     "i" (ASI_M_FLUSH_PAGE),
-					     "r" (a), "r" (b), "r" (c), "r" (d),
-					     "r" (e), "r" (f), "r" (g));
-	} while(line != page);
-	srmmu_set_context(octx);
-	local_irq_restore(flags);
-	FLUSH_END
-}
-
-/* Cypress is copy-back, at least that is how we configure it. */
-static void cypress_flush_page_to_ram(unsigned long page)
-{
-	register unsigned long a, b, c, d, e, f, g;
-	unsigned long line;
-
-	a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
-	page &= PAGE_MASK;
-	line = (page + PAGE_SIZE) - 0x100;
-	goto inside;
-	do {
-		line -= 0x100;
-	inside:
-		__asm__ __volatile__("sta %%g0, [%0] %1\n\t"
-				     "sta %%g0, [%0 + %2] %1\n\t"
-				     "sta %%g0, [%0 + %3] %1\n\t"
-				     "sta %%g0, [%0 + %4] %1\n\t"
-				     "sta %%g0, [%0 + %5] %1\n\t"
-				     "sta %%g0, [%0 + %6] %1\n\t"
-				     "sta %%g0, [%0 + %7] %1\n\t"
-				     "sta %%g0, [%0 + %8] %1\n\t" : :
-				     "r" (line),
-				     "i" (ASI_M_FLUSH_PAGE),
-				     "r" (a), "r" (b), "r" (c), "r" (d),
-				     "r" (e), "r" (f), "r" (g));
-	} while(line != page);
-}
-
-/* Cypress is also IO cache coherent. */
-static void cypress_flush_page_for_dma(unsigned long page)
-{
-}
-
-/* Cypress has unified L2 VIPT, from which both instructions and data
- * are stored.  It does not have an onboard icache of any sort, therefore
- * no flush is necessary.
- */
-static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
-{
-}
-
-static void cypress_flush_tlb_all(void)
-{
-	srmmu_flush_whole_tlb();
-}
-
-static void cypress_flush_tlb_mm(struct mm_struct *mm)
-{
-	FLUSH_BEGIN(mm)
-	__asm__ __volatile__(
-	"lda	[%0] %3, %%g5\n\t"
-	"sta	%2, [%0] %3\n\t"
-	"sta	%%g0, [%1] %4\n\t"
-	"sta	%%g5, [%0] %3\n"
-	: /* no outputs */
-	: "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
-	  "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
-	: "g5");
-	FLUSH_END
-}
-
-static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-	struct mm_struct *mm = vma->vm_mm;
-	unsigned long size;
-
-	FLUSH_BEGIN(mm)
-	start &= SRMMU_PGDIR_MASK;
-	size = SRMMU_PGDIR_ALIGN(end) - start;
-	__asm__ __volatile__(
-		"lda	[%0] %5, %%g5\n\t"
-		"sta	%1, [%0] %5\n"
-		"1:\n\t"
-		"subcc	%3, %4, %3\n\t"
-		"bne	1b\n\t"
-		" sta	%%g0, [%2 + %3] %6\n\t"
-		"sta	%%g5, [%0] %5\n"
-	: /* no outputs */
-	: "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
-	  "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
-	  "i" (ASI_M_FLUSH_PROBE)
-	: "g5", "cc");
-	FLUSH_END
-}
-
-static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
-	struct mm_struct *mm = vma->vm_mm;
-
-	FLUSH_BEGIN(mm)
-	__asm__ __volatile__(
-	"lda	[%0] %3, %%g5\n\t"
-	"sta	%1, [%0] %3\n\t"
-	"sta	%%g0, [%2] %4\n\t"
-	"sta	%%g5, [%0] %3\n"
-	: /* no outputs */
-	: "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
-	  "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
-	: "g5");
-	FLUSH_END
-}
-
 /* viking.S */
 extern void viking_flush_cache_all(void);
 extern void viking_flush_cache_mm(struct mm_struct *mm);
@@ -1065,21 +620,21 @@
 
 	while(start < end) {
 		pgdp = pgd_offset_k(start);
-		if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+		if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
 			pmdp = (pmd_t *) __srmmu_get_nocache(
 			    SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
 			if (pmdp == NULL)
 				early_pgtable_allocfail("pmd");
 			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
-			srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
+			pgd_set(__nocache_fix(pgdp), pmdp);
 		}
-		pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
+		pmdp = pmd_offset(__nocache_fix(pgdp), start);
 		if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
 			ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
 			if (ptep == NULL)
 				early_pgtable_allocfail("pte");
 			memset(__nocache_fix(ptep), 0, PTE_SIZE);
-			srmmu_pmd_set(__nocache_fix(pmdp), ptep);
+			pmd_set(__nocache_fix(pmdp), ptep);
 		}
 		if (start > (0xffffffffUL - PMD_SIZE))
 			break;
@@ -1096,21 +651,21 @@
 
 	while(start < end) {
 		pgdp = pgd_offset_k(start);
-		if(srmmu_pgd_none(*pgdp)) {
+		if (pgd_none(*pgdp)) {
 			pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
 			if (pmdp == NULL)
 				early_pgtable_allocfail("pmd");
 			memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
-			srmmu_pgd_set(pgdp, pmdp);
+			pgd_set(pgdp, pmdp);
 		}
-		pmdp = srmmu_pmd_offset(pgdp, start);
+		pmdp = pmd_offset(pgdp, start);
 		if(srmmu_pmd_none(*pmdp)) {
 			ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
 							     PTE_SIZE);
 			if (ptep == NULL)
 				early_pgtable_allocfail("pte");
 			memset(ptep, 0, PTE_SIZE);
-			srmmu_pmd_set(pmdp, ptep);
+			pmd_set(pmdp, ptep);
 		}
 		if (start > (0xffffffffUL - PMD_SIZE))
 			break;
@@ -1162,21 +717,21 @@
 			start += SRMMU_PGDIR_SIZE;
 			continue;
 		}
-		if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+		if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
 			pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
 			if (pmdp == NULL)
 				early_pgtable_allocfail("pmd");
 			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
-			srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
+			pgd_set(__nocache_fix(pgdp), pmdp);
 		}
-		pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
+		pmdp = pmd_offset(__nocache_fix(pgdp), start);
 		if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
 			ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
 							     PTE_SIZE);
 			if (ptep == NULL)
 				early_pgtable_allocfail("pte");
 			memset(__nocache_fix(ptep), 0, PTE_SIZE);
-			srmmu_pmd_set(__nocache_fix(pmdp), ptep);
+			pmd_set(__nocache_fix(pmdp), ptep);
 		}
 		if(what == 1) {
 			/*
@@ -1190,7 +745,7 @@
 			start += SRMMU_REAL_PMD_SIZE;
 			continue;
 		}
-		ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
+		ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
 		*(pte_t *)__nocache_fix(ptep) = __pte(prompte);
 		start += PAGE_SIZE;
 	}
@@ -1231,13 +786,6 @@
 	return vstart;
 }
 
-static inline void memprobe_error(char *msg)
-{
-	prom_printf(msg);
-	prom_printf("Halting now...\n");
-	prom_halt();
-}
-
 static inline void map_kernel(void)
 {
 	int i;
@@ -1249,8 +797,6 @@
 	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
 		map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
 	}
-
-	BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE);
 }
 
 /* Paging initialization on the Sparc Reference MMU. */
@@ -1312,7 +858,7 @@
 	srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
 #ifdef CONFIG_SMP
 	/* Stop from hanging here... */
-	local_flush_tlb_all();
+	local_ops->tlb_all();
 #else
 	flush_tlb_all();
 #endif
@@ -1326,8 +872,8 @@
 	srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
 
 	pgd = pgd_offset_k(PKMAP_BASE);
-	pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
-	pte = srmmu_pte_offset(pmd, PKMAP_BASE);
+	pmd = pmd_offset(pgd, PKMAP_BASE);
+	pte = pte_offset_kernel(pmd, PKMAP_BASE);
 	pkmap_page_table = pte;
 
 	flush_cache_all();
@@ -1359,7 +905,7 @@
 	}
 }
 
-static void srmmu_mmu_info(struct seq_file *m)
+void mmu_info(struct seq_file *m)
 {
 	seq_printf(m, 
 		   "MMU type\t: %s\n"
@@ -1372,11 +918,7 @@
 		   srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
 }
 
-static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
-{
-}
-
-static void srmmu_destroy_context(struct mm_struct *mm)
+void destroy_context(struct mm_struct *mm)
 {
 
 	if(mm->context != NO_CONTEXT) {
@@ -1474,6 +1016,20 @@
 	clear = srmmu_get_fstatus();
 }
 
+static const struct sparc32_cachetlb_ops hypersparc_ops = {
+	.cache_all	= hypersparc_flush_cache_all,
+	.cache_mm	= hypersparc_flush_cache_mm,
+	.cache_page	= hypersparc_flush_cache_page,
+	.cache_range	= hypersparc_flush_cache_range,
+	.tlb_all	= hypersparc_flush_tlb_all,
+	.tlb_mm		= hypersparc_flush_tlb_mm,
+	.tlb_page	= hypersparc_flush_tlb_page,
+	.tlb_range	= hypersparc_flush_tlb_range,
+	.page_to_ram	= hypersparc_flush_page_to_ram,
+	.sig_insns	= hypersparc_flush_sig_insns,
+	.page_for_dma	= hypersparc_flush_page_for_dma,
+};
+
 static void __init init_hypersparc(void)
 {
 	srmmu_name = "ROSS HyperSparc";
@@ -1482,118 +1038,13 @@
 	init_vac_layout();
 
 	is_hypersparc = 1;
-
-	BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
-
+	sparc32_cachetlb_ops = &hypersparc_ops;
 
 	poke_srmmu = poke_hypersparc;
 
 	hypersparc_setup_blockops();
 }
 
-static void __cpuinit poke_cypress(void)
-{
-	unsigned long mreg = srmmu_get_mmureg();
-	unsigned long faddr, tagval;
-	volatile unsigned long cypress_sucks;
-	volatile unsigned long clear;
-
-	clear = srmmu_get_faddr();
-	clear = srmmu_get_fstatus();
-
-	if (!(mreg & CYPRESS_CENABLE)) {
-		for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
-			__asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
-					     "sta %%g0, [%0] %2\n\t" : :
-					     "r" (faddr), "r" (0x40000),
-					     "i" (ASI_M_DATAC_TAG));
-		}
-	} else {
-		for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
-			__asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
-					     "=r" (tagval) :
-					     "r" (faddr), "r" (0x40000),
-					     "i" (ASI_M_DATAC_TAG));
-
-			/* If modified and valid, kick it. */
-			if((tagval & 0x60) == 0x60)
-				cypress_sucks = *(unsigned long *)
-							(0xf0020000 + faddr);
-		}
-	}
-
-	/* And one more, for our good neighbor, Mr. Broken Cypress. */
-	clear = srmmu_get_faddr();
-	clear = srmmu_get_fstatus();
-
-	mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
-	srmmu_set_mmureg(mreg);
-}
-
-static void __init init_cypress_common(void)
-{
-	init_vac_layout();
-
-	BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
-
-
-	BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
-
-	poke_srmmu = poke_cypress;
-}
-
-static void __init init_cypress_604(void)
-{
-	srmmu_name = "ROSS Cypress-604(UP)";
-	srmmu_modtype = Cypress;
-	init_cypress_common();
-}
-
-static void __init init_cypress_605(unsigned long mrev)
-{
-	srmmu_name = "ROSS Cypress-605(MP)";
-	if(mrev == 0xe) {
-		srmmu_modtype = Cypress_vE;
-		hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
-	} else {
-		if(mrev == 0xd) {
-			srmmu_modtype = Cypress_vD;
-			hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
-		} else {
-			srmmu_modtype = Cypress;
-		}
-	}
-	init_cypress_common();
-}
-
 static void __cpuinit poke_swift(void)
 {
 	unsigned long mreg;
@@ -1617,6 +1068,20 @@
 	srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops swift_ops = {
+	.cache_all	= swift_flush_cache_all,
+	.cache_mm	= swift_flush_cache_mm,
+	.cache_page	= swift_flush_cache_page,
+	.cache_range	= swift_flush_cache_range,
+	.tlb_all	= swift_flush_tlb_all,
+	.tlb_mm		= swift_flush_tlb_mm,
+	.tlb_page	= swift_flush_tlb_page,
+	.tlb_range	= swift_flush_tlb_range,
+	.page_to_ram	= swift_flush_page_to_ram,
+	.sig_insns	= swift_flush_sig_insns,
+	.page_for_dma	= swift_flush_page_for_dma,
+};
+
 #define SWIFT_MASKID_ADDR  0x10003018
 static void __init init_swift(void)
 {
@@ -1667,23 +1132,7 @@
 		break;
 	}
 
-	BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
-
-
-	BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
-
+	sparc32_cachetlb_ops = &swift_ops;
 	flush_page_for_dma_global = 0;
 
 	/*
@@ -1816,26 +1265,25 @@
 	srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops turbosparc_ops = {
+	.cache_all	= turbosparc_flush_cache_all,
+	.cache_mm	= turbosparc_flush_cache_mm,
+	.cache_page	= turbosparc_flush_cache_page,
+	.cache_range	= turbosparc_flush_cache_range,
+	.tlb_all	= turbosparc_flush_tlb_all,
+	.tlb_mm		= turbosparc_flush_tlb_mm,
+	.tlb_page	= turbosparc_flush_tlb_page,
+	.tlb_range	= turbosparc_flush_tlb_range,
+	.page_to_ram	= turbosparc_flush_page_to_ram,
+	.sig_insns	= turbosparc_flush_sig_insns,
+	.page_for_dma	= turbosparc_flush_page_for_dma,
+};
+
 static void __init init_turbosparc(void)
 {
 	srmmu_name = "Fujitsu TurboSparc";
 	srmmu_modtype = TurboSparc;
-
-	BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
-
+	sparc32_cachetlb_ops = &turbosparc_ops;
 	poke_srmmu = poke_turbosparc;
 }
 
@@ -1850,6 +1298,20 @@
 	srmmu_set_mmureg(mreg);
 }
 
+static const struct sparc32_cachetlb_ops tsunami_ops = {
+	.cache_all	= tsunami_flush_cache_all,
+	.cache_mm	= tsunami_flush_cache_mm,
+	.cache_page	= tsunami_flush_cache_page,
+	.cache_range	= tsunami_flush_cache_range,
+	.tlb_all	= tsunami_flush_tlb_all,
+	.tlb_mm		= tsunami_flush_tlb_mm,
+	.tlb_page	= tsunami_flush_tlb_page,
+	.tlb_range	= tsunami_flush_tlb_range,
+	.page_to_ram	= tsunami_flush_page_to_ram,
+	.sig_insns	= tsunami_flush_sig_insns,
+	.page_for_dma	= tsunami_flush_page_for_dma,
+};
+
 static void __init init_tsunami(void)
 {
 	/*
@@ -1860,22 +1322,7 @@
 
 	srmmu_name = "TI Tsunami";
 	srmmu_modtype = Tsunami;
-
-	BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
-
-
-	BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
-
+	sparc32_cachetlb_ops = &tsunami_ops;
 	poke_srmmu = poke_tsunami;
 
 	tsunami_setup_blockops();
@@ -1886,7 +1333,7 @@
 	unsigned long mreg = srmmu_get_mmureg();
 	static int smp_catch;
 
-	if(viking_mxcc_present) {
+	if (viking_mxcc_present) {
 		unsigned long mxcc_control = mxcc_get_creg();
 
 		mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
@@ -1923,6 +1370,52 @@
 	srmmu_set_mmureg(mreg);
 }
 
+static struct sparc32_cachetlb_ops viking_ops = {
+	.cache_all	= viking_flush_cache_all,
+	.cache_mm	= viking_flush_cache_mm,
+	.cache_page	= viking_flush_cache_page,
+	.cache_range	= viking_flush_cache_range,
+	.tlb_all	= viking_flush_tlb_all,
+	.tlb_mm		= viking_flush_tlb_mm,
+	.tlb_page	= viking_flush_tlb_page,
+	.tlb_range	= viking_flush_tlb_range,
+	.page_to_ram	= viking_flush_page_to_ram,
+	.sig_insns	= viking_flush_sig_insns,
+	.page_for_dma	= viking_flush_page_for_dma,
+};
+
+#ifdef CONFIG_SMP
+/* On sun4d the cpu broadcasts local TLB flushes, so we can just
+ * perform the local TLB flush and all the other cpus will see it.
+ * But, unfortunately, there is a bug in the sun4d XBUS backplane
+ * that requires that we add some synchronization to these flushes.
+ *
+ * The bug is that the fifo which keeps track of all the pending TLB
+ * broadcasts in the system is an entry or two too small, so if we
+ * have too many going at once we'll overflow that fifo and lose a TLB
+ * flush resulting in corruption.
+ *
+ * Our workaround is to take a global spinlock around the TLB flushes,
+ * which guarentees we won't ever have too many pending.  It's a big
+ * hammer, but a semaphore like system to make sure we only have N TLB
+ * flushes going at once will require SMP locking anyways so there's
+ * no real value in trying any harder than this.
+ */
+static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
+	.cache_all	= viking_flush_cache_all,
+	.cache_mm	= viking_flush_cache_mm,
+	.cache_page	= viking_flush_cache_page,
+	.cache_range	= viking_flush_cache_range,
+	.tlb_all	= sun4dsmp_flush_tlb_all,
+	.tlb_mm		= sun4dsmp_flush_tlb_mm,
+	.tlb_page	= sun4dsmp_flush_tlb_page,
+	.tlb_range	= sun4dsmp_flush_tlb_range,
+	.page_to_ram	= viking_flush_page_to_ram,
+	.sig_insns	= viking_flush_sig_insns,
+	.page_for_dma	= viking_flush_page_for_dma,
+};
+#endif
+
 static void __init init_viking(void)
 {
 	unsigned long mreg = srmmu_get_mmureg();
@@ -1933,10 +1426,6 @@
 		viking_mxcc_present = 0;
 		msi_set_sync();
 
-		BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
-
 		/*
 		 * We need this to make sure old viking takes no hits
 		 * on it's cache for dma snoops to workaround the
@@ -1944,84 +1433,28 @@
 		 * This is only necessary because of the new way in
 		 * which we use the IOMMU.
 		 */
-		BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
-
+		viking_ops.page_for_dma = viking_flush_page;
+#ifdef CONFIG_SMP
+		viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
+#endif
 		flush_page_for_dma_global = 0;
 	} else {
 		srmmu_name = "TI Viking/MXCC";
 		viking_mxcc_present = 1;
-
 		srmmu_cache_pagetables = 1;
-
-		/* MXCC vikings lack the DMA snooping bug. */
-		BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
 	}
 
-	BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
-
+	sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+		&viking_ops;
 #ifdef CONFIG_SMP
-	if (sparc_cpu_model == sun4d) {
-		BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
-	} else
+	if (sparc_cpu_model == sun4d)
+		sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+			&viking_sun4d_smp_ops;
 #endif
-	{
-		BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
-	}
-
-	BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
 
 	poke_srmmu = poke_viking;
 }
 
-#ifdef CONFIG_SPARC_LEON
-
-void __init poke_leonsparc(void)
-{
-}
-
-void __init init_leon(void)
-{
-
-	srmmu_name = "LEON";
-
-	BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
-			BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
-			BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
-			BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
-			BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
-			BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
-			BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
-
-	poke_srmmu = poke_leonsparc;
-
-	srmmu_cache_pagetables = 0;
-
-	leon_flush_during_switch = leon_flush_needed();
-}
-#endif
-
 /* Probe for the srmmu chip version. */
 static void __init get_srmmu_type(void)
 {
@@ -2052,22 +1485,15 @@
 			break;
 		case 0:
 		case 2:
-			/* Uniprocessor Cypress */
-			init_cypress_604();
-			break;
 		case 10:
 		case 11:
 		case 12:
-			/* _REALLY OLD_ Cypress MP chips... */
 		case 13:
 		case 14:
 		case 15:
-			/* MP Cypress mmu/cache-controller */
-			init_cypress_605(mod_rev);
-			break;
 		default:
-			/* Some other Cypress revision, assume a 605. */
-			init_cypress_605(mod_rev);
+			prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
+			prom_halt();
 			break;
 		}
 		return;
@@ -2123,203 +1549,193 @@
 	srmmu_is_bad();
 }
 
-/* don't laugh, static pagetables */
-static void srmmu_check_pgt_cache(int low, int high)
-{
-}
-
-extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
-	tsetup_mmu_patchme, rtrap_mmu_patchme;
-
-extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
-	tsetup_srmmu_stackchk, srmmu_rett_stackchk;
-
-extern unsigned long srmmu_fault;
-
-#define PATCH_BRANCH(insn, dest) do { \
-		iaddr = &(insn); \
-		daddr = &(dest); \
-		*iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
-	} while(0)
-
-static void __init patch_window_trap_handlers(void)
-{
-	unsigned long *iaddr, *daddr;
-	
-	PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
-	PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
-	PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
-	PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
-	PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
-	PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
-	PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
-}
-
 #ifdef CONFIG_SMP
 /* Local cross-calls. */
 static void smp_flush_page_for_dma(unsigned long page)
 {
-	xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
-	local_flush_page_for_dma(page);
+	xc1((smpfunc_t) local_ops->page_for_dma, page);
+	local_ops->page_for_dma(page);
 }
 
+static void smp_flush_cache_all(void)
+{
+	xc0((smpfunc_t) local_ops->cache_all);
+	local_ops->cache_all();
+}
+
+static void smp_flush_tlb_all(void)
+{
+	xc0((smpfunc_t) local_ops->tlb_all);
+	local_ops->tlb_all();
+}
+
+static void smp_flush_cache_mm(struct mm_struct *mm)
+{
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+		local_ops->cache_mm(mm);
+	}
+}
+
+static void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask)) {
+			xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+			if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
+				cpumask_copy(mm_cpumask(mm),
+					     cpumask_of(smp_processor_id()));
+		}
+		local_ops->tlb_mm(mm);
+	}
+}
+
+static void smp_flush_cache_range(struct vm_area_struct *vma,
+				  unsigned long start,
+				  unsigned long end)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc3((smpfunc_t) local_ops->cache_range,
+			    (unsigned long) vma, start, end);
+		local_ops->cache_range(vma, start, end);
+	}
+}
+
+static void smp_flush_tlb_range(struct vm_area_struct *vma,
+				unsigned long start,
+				unsigned long end)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc3((smpfunc_t) local_ops->tlb_range,
+			    (unsigned long) vma, start, end);
+		local_ops->tlb_range(vma, start, end);
+	}
+}
+
+static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc2((smpfunc_t) local_ops->cache_page,
+			    (unsigned long) vma, page);
+		local_ops->cache_page(vma, page);
+	}
+}
+
+static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc2((smpfunc_t) local_ops->tlb_page,
+			    (unsigned long) vma, page);
+		local_ops->tlb_page(vma, page);
+	}
+}
+
+static void smp_flush_page_to_ram(unsigned long page)
+{
+	/* Current theory is that those who call this are the one's
+	 * who have just dirtied their cache with the pages contents
+	 * in kernel space, therefore we only run this on local cpu.
+	 *
+	 * XXX This experiment failed, research further... -DaveM
+	 */
+#if 1
+	xc1((smpfunc_t) local_ops->page_to_ram, page);
+#endif
+	local_ops->page_to_ram(page);
+}
+
+static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+	cpumask_t cpu_mask;
+	cpumask_copy(&cpu_mask, mm_cpumask(mm));
+	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+	if (!cpumask_empty(&cpu_mask))
+		xc2((smpfunc_t) local_ops->sig_insns,
+		    (unsigned long) mm, insn_addr);
+	local_ops->sig_insns(mm, insn_addr);
+}
+
+static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
+	.cache_all	= smp_flush_cache_all,
+	.cache_mm	= smp_flush_cache_mm,
+	.cache_page	= smp_flush_cache_page,
+	.cache_range	= smp_flush_cache_range,
+	.tlb_all	= smp_flush_tlb_all,
+	.tlb_mm		= smp_flush_tlb_mm,
+	.tlb_page	= smp_flush_tlb_page,
+	.tlb_range	= smp_flush_tlb_range,
+	.page_to_ram	= smp_flush_page_to_ram,
+	.sig_insns	= smp_flush_sig_insns,
+	.page_for_dma	= smp_flush_page_for_dma,
+};
 #endif
 
-static pte_t srmmu_pgoff_to_pte(unsigned long pgoff)
-{
-	return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE);
-}
-
-static unsigned long srmmu_pte_to_pgoff(pte_t pte)
-{
-	return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT;
-}
-
-static pgprot_t srmmu_pgprot_noncached(pgprot_t prot)
-{
-	prot &= ~__pgprot(SRMMU_CACHE);
-
-	return prot;
-}
-
 /* Load up routines and constants for sun4m and sun4d mmu */
-void __init ld_mmu_srmmu(void)
+void __init load_mmu(void)
 {
 	extern void ld_mmu_iommu(void);
 	extern void ld_mmu_iounit(void);
-	extern void ___xchg32_sun4md(void);
-
-	BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
-	BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
-	BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
-
-	BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
-	BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
-
-	BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
-	PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
-	BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
-	BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
-	BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
-	page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
 
 	/* Functions */
-	BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM);
-#ifndef CONFIG_SMP	
-	BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
-#endif
-	BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP);
-
-	BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
-	BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
-
-	BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0);
-
-	BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0);
-
-	BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
-	
-	BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
-	BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
-	BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
-	BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
-	BTFIXUPSET_HALF(pte_filei, SRMMU_FILE);
-	BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
-	BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
-	BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
-	BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
-	BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
-	BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
-	BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM);
-
 	get_srmmu_type();
-	patch_window_trap_handlers();
 
 #ifdef CONFIG_SMP
 	/* El switcheroo... */
+	local_ops = sparc32_cachetlb_ops;
 
-	BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
-	BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
-	BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
-	BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
-	BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
-	BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
-	BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
-	BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
-	BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
-	BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
-	BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
-
-	BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
-	if (sparc_cpu_model != sun4d &&
-	    sparc_cpu_model != sparc_leon) {
-		BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
+	if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
+		smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
+		smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
+		smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
+		smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
 	}
-	BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
 
 	if (poke_srmmu == poke_viking) {
 		/* Avoid unnecessary cross calls. */
-		BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
-		BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
-		BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
-		BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
-		BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
-		BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
-		BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
+		smp_cachetlb_ops.cache_all = local_ops->cache_all;
+		smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
+		smp_cachetlb_ops.cache_range = local_ops->cache_range;
+		smp_cachetlb_ops.cache_page = local_ops->cache_page;
+
+		smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
+		smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
+		smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
 	}
+
+	/* It really is const after this point. */
+	sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+		&smp_cachetlb_ops;
 #endif
 
 	if (sparc_cpu_model == sun4d)
diff --git a/arch/sparc/mm/srmmu.h b/arch/sparc/mm/srmmu.h
new file mode 100644
index 0000000..5703274
--- /dev/null
+++ b/arch/sparc/mm/srmmu.h
@@ -0,0 +1,4 @@
+/* srmmu.c */
+extern char *srmmu_name;
+
+extern void (*poke_srmmu)(void);
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
deleted file mode 100644
index 1cf4f19..0000000
--- a/arch/sparc/mm/sun4c.c
+++ /dev/null
@@ -1,2166 +0,0 @@
-/* sun4c.c: Doing in software what should be done in hardware.
- *
- * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
- * Copyright (C) 1997-2000 Anton Blanchard (anton@samba.org)
- * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#define NR_TASK_BUCKETS 512
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
-#include <linux/fs.h>
-#include <linux/seq_file.h>
-#include <linux/scatterlist.h>
-#include <linux/bitmap.h>
-
-#include <asm/sections.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/vaddrs.h>
-#include <asm/idprom.h>
-#include <asm/machines.h>
-#include <asm/memreg.h>
-#include <asm/processor.h>
-#include <asm/auxio.h>
-#include <asm/io.h>
-#include <asm/oplib.h>
-#include <asm/openprom.h>
-#include <asm/mmu_context.h>
-#include <asm/highmem.h>
-#include <asm/btfixup.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
-/* Because of our dynamic kernel TLB miss strategy, and how
- * our DVMA mapping allocation works, you _MUST_:
- *
- * 1) Disable interrupts _and_ not touch any dynamic kernel
- *    memory while messing with kernel MMU state.  By
- *    dynamic memory I mean any object which is not in
- *    the kernel image itself or a thread_union (both of
- *    which are locked into the MMU).
- * 2) Disable interrupts while messing with user MMU state.
- */
-
-extern int num_segmaps, num_contexts;
-
-extern unsigned long page_kernel;
-
-/* That's it, we prom_halt() on sun4c if the cache size is something other than 65536.
- * So let's save some cycles and just use that everywhere except for that bootup
- * sanity check.
- */
-#define SUN4C_VAC_SIZE 65536
-
-#define SUN4C_KERNEL_BUCKETS 32
-
-/* Flushing the cache. */
-struct sun4c_vac_props sun4c_vacinfo;
-unsigned long sun4c_kernel_faults;
-
-/* Invalidate every sun4c cache line tag. */
-static void __init sun4c_flush_all(void)
-{
-	unsigned long begin, end;
-
-	if (sun4c_vacinfo.on)
-		panic("SUN4C: AIEEE, trying to invalidate vac while it is on.");
-
-	/* Clear 'valid' bit in all cache line tags */
-	begin = AC_CACHETAGS;
-	end = (AC_CACHETAGS + SUN4C_VAC_SIZE);
-	while (begin < end) {
-		__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
-				     "r" (begin), "i" (ASI_CONTROL));
-		begin += sun4c_vacinfo.linesize;
-	}
-}
-
-static void sun4c_flush_context_hw(void)
-{
-	unsigned long end = SUN4C_VAC_SIZE;
-
-	__asm__ __volatile__(
-		"1:	addcc	%0, -4096, %0\n\t"
-		"	bne	1b\n\t"
-		"	 sta	%%g0, [%0] %2"
-	: "=&r" (end)
-	: "0" (end), "i" (ASI_HWFLUSHCONTEXT)
-	: "cc");
-}
-
-/* Must be called minimally with IRQs disabled. */
-static void sun4c_flush_segment_hw(unsigned long addr)
-{
-	if (sun4c_get_segmap(addr) != invalid_segment) {
-		unsigned long vac_size = SUN4C_VAC_SIZE;
-
-		__asm__ __volatile__(
-			"1:	addcc	%0, -4096, %0\n\t"
-			"	bne	1b\n\t"
-			"	 sta	%%g0, [%2 + %0] %3"
-			: "=&r" (vac_size)
-			: "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG)
-			: "cc");
-	}
-}
-
-/* File local boot time fixups. */
-BTFIXUPDEF_CALL(void, sun4c_flush_page, unsigned long)
-BTFIXUPDEF_CALL(void, sun4c_flush_segment, unsigned long)
-BTFIXUPDEF_CALL(void, sun4c_flush_context, void)
-
-#define sun4c_flush_page(addr) BTFIXUP_CALL(sun4c_flush_page)(addr)
-#define sun4c_flush_segment(addr) BTFIXUP_CALL(sun4c_flush_segment)(addr)
-#define sun4c_flush_context() BTFIXUP_CALL(sun4c_flush_context)()
-
-/* Must be called minimally with interrupts disabled. */
-static void sun4c_flush_page_hw(unsigned long addr)
-{
-	addr &= PAGE_MASK;
-	if ((int)sun4c_get_pte(addr) < 0)
-		__asm__ __volatile__("sta %%g0, [%0] %1"
-				     : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
-}
-
-/* Don't inline the software version as it eats too many cache lines if expanded. */
-static void sun4c_flush_context_sw(void)
-{
-	unsigned long nbytes = SUN4C_VAC_SIZE;
-	unsigned long lsize = sun4c_vacinfo.linesize;
-
-	__asm__ __volatile__(
-	"add	%2, %2, %%g1\n\t"
-	"add	%2, %%g1, %%g2\n\t"
-	"add	%2, %%g2, %%g3\n\t"
-	"add	%2, %%g3, %%g4\n\t"
-	"add	%2, %%g4, %%g5\n\t"
-	"add	%2, %%g5, %%o4\n\t"
-	"add	%2, %%o4, %%o5\n"
-	"1:\n\t"
-	"subcc	%0, %%o5, %0\n\t"
-	"sta	%%g0, [%0] %3\n\t"
-	"sta	%%g0, [%0 + %2] %3\n\t"
-	"sta	%%g0, [%0 + %%g1] %3\n\t"
-	"sta	%%g0, [%0 + %%g2] %3\n\t"
-	"sta	%%g0, [%0 + %%g3] %3\n\t"
-	"sta	%%g0, [%0 + %%g4] %3\n\t"
-	"sta	%%g0, [%0 + %%g5] %3\n\t"
-	"bg	1b\n\t"
-	" sta	%%g0, [%1 + %%o4] %3\n"
-	: "=&r" (nbytes)
-	: "0" (nbytes), "r" (lsize), "i" (ASI_FLUSHCTX)
-	: "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
-}
-
-/* Don't inline the software version as it eats too many cache lines if expanded. */
-static void sun4c_flush_segment_sw(unsigned long addr)
-{
-	if (sun4c_get_segmap(addr) != invalid_segment) {
-		unsigned long nbytes = SUN4C_VAC_SIZE;
-		unsigned long lsize = sun4c_vacinfo.linesize;
-
-		__asm__ __volatile__(
-		"add	%2, %2, %%g1\n\t"
-		"add	%2, %%g1, %%g2\n\t"
-		"add	%2, %%g2, %%g3\n\t"
-		"add	%2, %%g3, %%g4\n\t"
-		"add	%2, %%g4, %%g5\n\t"
-		"add	%2, %%g5, %%o4\n\t"
-		"add	%2, %%o4, %%o5\n"
-		"1:\n\t"
-		"subcc	%1, %%o5, %1\n\t"
-		"sta	%%g0, [%0] %6\n\t"
-		"sta	%%g0, [%0 + %2] %6\n\t"
-		"sta	%%g0, [%0 + %%g1] %6\n\t"
-		"sta	%%g0, [%0 + %%g2] %6\n\t"
-		"sta	%%g0, [%0 + %%g3] %6\n\t"
-		"sta	%%g0, [%0 + %%g4] %6\n\t"
-		"sta	%%g0, [%0 + %%g5] %6\n\t"
-		"sta	%%g0, [%0 + %%o4] %6\n\t"
-		"bg	1b\n\t"
-		" add	%0, %%o5, %0\n"
-		: "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
-		: "0" (addr), "1" (nbytes), "2" (lsize),
-		  "i" (ASI_FLUSHSEG)
-		: "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
-	}
-}
-
-/* Don't inline the software version as it eats too many cache lines if expanded. */
-static void sun4c_flush_page_sw(unsigned long addr)
-{
-	addr &= PAGE_MASK;
-	if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
-	    _SUN4C_PAGE_VALID) {
-		unsigned long left = PAGE_SIZE;
-		unsigned long lsize = sun4c_vacinfo.linesize;
-
-		__asm__ __volatile__(
-		"add	%2, %2, %%g1\n\t"
-		"add	%2, %%g1, %%g2\n\t"
-		"add	%2, %%g2, %%g3\n\t"
-		"add	%2, %%g3, %%g4\n\t"
-		"add	%2, %%g4, %%g5\n\t"
-		"add	%2, %%g5, %%o4\n\t"
-		"add	%2, %%o4, %%o5\n"
-		"1:\n\t"
-		"subcc	%1, %%o5, %1\n\t"
-		"sta	%%g0, [%0] %6\n\t"
-		"sta	%%g0, [%0 + %2] %6\n\t"
-		"sta	%%g0, [%0 + %%g1] %6\n\t"
-		"sta	%%g0, [%0 + %%g2] %6\n\t"
-		"sta	%%g0, [%0 + %%g3] %6\n\t"
-		"sta	%%g0, [%0 + %%g4] %6\n\t"
-		"sta	%%g0, [%0 + %%g5] %6\n\t"
-		"sta	%%g0, [%0 + %%o4] %6\n\t"
-		"bg	1b\n\t"
-		" add	%0, %%o5, %0\n"
-		: "=&r" (addr), "=&r" (left), "=&r" (lsize)
-		: "0" (addr), "1" (left), "2" (lsize),
-		  "i" (ASI_FLUSHPG)
-		: "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
-	}
-}
-
-/* The sun4c's do have an on chip store buffer.  And the way you
- * clear them out isn't so obvious.  The only way I can think of
- * to accomplish this is to read the current context register,
- * store the same value there, then read an external hardware
- * register.
- */
-void sun4c_complete_all_stores(void)
-{
-	volatile int _unused;
-
-	_unused = sun4c_get_context();
-	sun4c_set_context(_unused);
-	_unused = get_auxio();
-}
-
-/* Bootup utility functions. */
-static inline void sun4c_init_clean_segmap(unsigned char pseg)
-{
-	unsigned long vaddr;
-
-	sun4c_put_segmap(0, pseg);
-	for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE)
-		sun4c_put_pte(vaddr, 0);
-	sun4c_put_segmap(0, invalid_segment);
-}
-
-static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
-{
-	unsigned long vaddr;
-	unsigned char savectx, ctx;
-
-	savectx = sun4c_get_context();
-	for (ctx = 0; ctx < num_contexts; ctx++) {
-		sun4c_set_context(ctx);
-		for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
-			sun4c_put_segmap(vaddr, invalid_segment);
-		for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
-			sun4c_put_segmap(vaddr, invalid_segment);
-		for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
-			sun4c_put_segmap(vaddr, invalid_segment);
-		for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
-			sun4c_put_segmap(vaddr, invalid_segment);
-	}
-	sun4c_set_context(savectx);
-}
-
-void __init sun4c_probe_vac(void)
-{
-	sun4c_disable_vac();
-
-	if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
-	    (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
-		/* PROM on SS1 lacks this info, to be super safe we
-		 * hard code it here since this arch is cast in stone.
-		 */
-		sun4c_vacinfo.num_bytes = 65536;
-		sun4c_vacinfo.linesize = 16;
-	} else {
-		sun4c_vacinfo.num_bytes =
-		 prom_getintdefault(prom_root_node, "vac-size", 65536);
-		sun4c_vacinfo.linesize =
-		 prom_getintdefault(prom_root_node, "vac-linesize", 16);
-	}
-	sun4c_vacinfo.do_hwflushes =
-	 prom_getintdefault(prom_root_node, "vac-hwflush", 0);
-
-	if (sun4c_vacinfo.do_hwflushes == 0)
-		sun4c_vacinfo.do_hwflushes =
-		 prom_getintdefault(prom_root_node, "vac_hwflush", 0);
-
-	if (sun4c_vacinfo.num_bytes != 65536) {
-		prom_printf("WEIRD Sun4C VAC cache size, "
-			    "tell sparclinux@vger.kernel.org");
-		prom_halt();
-	}
-
-	switch (sun4c_vacinfo.linesize) {
-	case 16:
-		sun4c_vacinfo.log2lsize = 4;
-		break;
-	case 32:
-		sun4c_vacinfo.log2lsize = 5;
-		break;
-	default:
-		prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
-			    sun4c_vacinfo.linesize);
-		prom_halt();
-	}
-
-	sun4c_flush_all();
-	sun4c_enable_vac();
-}
-
-/* Patch instructions for the low level kernel fault handler. */
-extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff;
-extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff;
-extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff;
-extern unsigned long num_context_patch1, num_context_patch1_16;
-extern unsigned long num_context_patch2_16;
-extern unsigned long vac_linesize_patch, vac_linesize_patch_32;
-extern unsigned long vac_hwflush_patch1, vac_hwflush_patch1_on;
-extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on;
-
-#define PATCH_INSN(src, dst) do {	\
-		daddr = &(dst);		\
-		iaddr = &(src);		\
-		*daddr = *iaddr;	\
-	} while (0)
-
-static void __init patch_kernel_fault_handler(void)
-{
-	unsigned long *iaddr, *daddr;
-
-	switch (num_segmaps) {
-		case 128:
-			/* Default, nothing to do. */
-			break;
-		case 256:
-			PATCH_INSN(invalid_segment_patch1_ff,
-				   invalid_segment_patch1);
-			PATCH_INSN(invalid_segment_patch2_ff,
-				   invalid_segment_patch2);
-			break;
-		case 512:
-			PATCH_INSN(invalid_segment_patch1_1ff,
-				   invalid_segment_patch1);
-			PATCH_INSN(invalid_segment_patch2_1ff,
-				   invalid_segment_patch2);
-			break;
-		default:
-			prom_printf("Unhandled number of segmaps: %d\n",
-				    num_segmaps);
-			prom_halt();
-	}
-	switch (num_contexts) {
-		case 8:
-			/* Default, nothing to do. */
-			break;
-		case 16:
-			PATCH_INSN(num_context_patch1_16,
-				   num_context_patch1);
-			break;
-		default:
-			prom_printf("Unhandled number of contexts: %d\n",
-				    num_contexts);
-			prom_halt();
-	}
-
-	if (sun4c_vacinfo.do_hwflushes != 0) {
-		PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
-		PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2);
-	} else {
-		switch (sun4c_vacinfo.linesize) {
-		case 16:
-			/* Default, nothing to do. */
-			break;
-		case 32:
-			PATCH_INSN(vac_linesize_patch_32, vac_linesize_patch);
-			break;
-		default:
-			prom_printf("Impossible VAC linesize %d, halting...\n",
-				    sun4c_vacinfo.linesize);
-			prom_halt();
-		}
-	}
-}
-
-static void __init sun4c_probe_mmu(void)
-{
-	if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
-	    (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
-		/* Hardcode these just to be safe, PROM on SS1 does
-		* not have this info available in the root node.
-		*/
-		num_segmaps = 128;
-		num_contexts = 8;
-	} else {
-		num_segmaps =
-		    prom_getintdefault(prom_root_node, "mmu-npmg", 128);
-		num_contexts =
-		    prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
-	}
-	patch_kernel_fault_handler();
-}
-
-volatile unsigned long __iomem *sun4c_memerr_reg = NULL;
-
-void __init sun4c_probe_memerr_reg(void)
-{
-	phandle node;
-	struct linux_prom_registers regs[1];
-
-	node = prom_getchild(prom_root_node);
-	node = prom_searchsiblings(prom_root_node, "memory-error");
-	if (!node)
-		return;
-	if (prom_getproperty(node, "reg", (char *)regs, sizeof(regs)) <= 0)
-		return;
-	/* hmm I think regs[0].which_io is zero here anyways */
-	sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size);
-}
-
-static inline void sun4c_init_ss2_cache_bug(void)
-{
-	if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
-	    (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
-	    (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
-		/* Whee.. */
-		printk("SS2 cache bug detected, uncaching trap table page\n");
-		sun4c_flush_page((unsigned int) &_start);
-		sun4c_put_pte(((unsigned long) &_start),
-			(sun4c_get_pte((unsigned long) &_start) | _SUN4C_PAGE_NOCACHE));
-	}
-}
-
-/* Addr is always aligned on a page boundary for us already. */
-static int sun4c_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
-			      unsigned long addr, int len)
-{
-	unsigned long page, end;
-
-	*pba = addr;
-
-	end = PAGE_ALIGN((addr + len));
-	while (addr < end) {
-		page = va;
-		sun4c_flush_page(page);
-		page -= PAGE_OFFSET;
-		page >>= PAGE_SHIFT;
-		page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY |
-			 _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV);
-		sun4c_put_pte(addr, page);
-		addr += PAGE_SIZE;
-		va += PAGE_SIZE;
-	}
-
-	return 0;
-}
-
-static void sun4c_unmap_dma_area(struct device *dev, unsigned long busa, int len)
-{
-	/* Fortunately for us, bus_addr == uncached_virt in sun4c. */
-	/* XXX Implement this */
-}
-
-/* TLB management. */
-
-/* Don't change this struct without changing entry.S. This is used
- * in the in-window kernel fault handler, and you don't want to mess
- * with that. (See sun4c_fault in entry.S).
- */
-struct sun4c_mmu_entry {
-	struct sun4c_mmu_entry *next;
-	struct sun4c_mmu_entry *prev;
-	unsigned long vaddr;
-	unsigned char pseg;
-	unsigned char locked;
-
-	/* For user mappings only, and completely hidden from kernel
-	 * TLB miss code.
-	 */
-	unsigned char ctx;
-	struct sun4c_mmu_entry *lru_next;
-	struct sun4c_mmu_entry *lru_prev;
-};
-
-static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
-
-static void __init sun4c_init_mmu_entry_pool(void)
-{
-	int i;
-
-	for (i=0; i < SUN4C_MAX_SEGMAPS; i++) {
-		mmu_entry_pool[i].pseg = i;
-		mmu_entry_pool[i].next = NULL;
-		mmu_entry_pool[i].prev = NULL;
-		mmu_entry_pool[i].vaddr = 0;
-		mmu_entry_pool[i].locked = 0;
-		mmu_entry_pool[i].ctx = 0;
-		mmu_entry_pool[i].lru_next = NULL;
-		mmu_entry_pool[i].lru_prev = NULL;
-	}
-	mmu_entry_pool[invalid_segment].locked = 1;
-}
-
-static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
-				   unsigned long bits_off)
-{
-	unsigned long start, end;
-
-	end = vaddr + SUN4C_REAL_PGDIR_SIZE;
-	for (start = vaddr; start < end; start += PAGE_SIZE)
-		if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
-			sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
-				      ~bits_off);
-}
-
-static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
-{
-	unsigned long vaddr;
-	unsigned char pseg, ctx;
-
-	for (vaddr = KADB_DEBUGGER_BEGVM;
-	     vaddr < LINUX_OPPROM_ENDVM;
-	     vaddr += SUN4C_REAL_PGDIR_SIZE) {
-		pseg = sun4c_get_segmap(vaddr);
-		if (pseg != invalid_segment) {
-			mmu_entry_pool[pseg].locked = 1;
-			for (ctx = 0; ctx < num_contexts; ctx++)
-				prom_putsegment(ctx, vaddr, pseg);
-			fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
-		}
-	}
-
-	for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
-		pseg = sun4c_get_segmap(vaddr);
-		mmu_entry_pool[pseg].locked = 1;
-		for (ctx = 0; ctx < num_contexts; ctx++)
-			prom_putsegment(ctx, vaddr, pseg);
-		fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
-	}
-}
-
-static void __init sun4c_init_lock_area(unsigned long start, unsigned long end)
-{
-	int i, ctx;
-
-	while (start < end) {
-		for (i = 0; i < invalid_segment; i++)
-			if (!mmu_entry_pool[i].locked)
-				break;
-		mmu_entry_pool[i].locked = 1;
-		sun4c_init_clean_segmap(i);
-		for (ctx = 0; ctx < num_contexts; ctx++)
-			prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
-		start += SUN4C_REAL_PGDIR_SIZE;
-	}
-}
-
-/* Don't change this struct without changing entry.S. This is used
- * in the in-window kernel fault handler, and you don't want to mess
- * with that. (See sun4c_fault in entry.S).
- */
-struct sun4c_mmu_ring {
-	struct sun4c_mmu_entry ringhd;
-	int num_entries;
-};
-
-static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
-static struct sun4c_mmu_ring sun4c_ufree_ring;       /* free user entries */
-static struct sun4c_mmu_ring sun4c_ulru_ring;	     /* LRU user entries */
-struct sun4c_mmu_ring sun4c_kernel_ring;      /* used kernel entries */
-struct sun4c_mmu_ring sun4c_kfree_ring;       /* free kernel entries */
-
-static inline void sun4c_init_rings(void)
-{
-	int i;
-
-	for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) {
-		sun4c_context_ring[i].ringhd.next =
-			sun4c_context_ring[i].ringhd.prev =
-			&sun4c_context_ring[i].ringhd;
-		sun4c_context_ring[i].num_entries = 0;
-	}
-	sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
-		&sun4c_ufree_ring.ringhd;
-	sun4c_ufree_ring.num_entries = 0;
-	sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev =
-		&sun4c_ulru_ring.ringhd;
-	sun4c_ulru_ring.num_entries = 0;
-	sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev =
-		&sun4c_kernel_ring.ringhd;
-	sun4c_kernel_ring.num_entries = 0;
-	sun4c_kfree_ring.ringhd.next = sun4c_kfree_ring.ringhd.prev =
-		&sun4c_kfree_ring.ringhd;
-	sun4c_kfree_ring.num_entries = 0;
-}
-
-static void add_ring(struct sun4c_mmu_ring *ring,
-		     struct sun4c_mmu_entry *entry)
-{
-	struct sun4c_mmu_entry *head = &ring->ringhd;
-
-	entry->prev = head;
-	(entry->next = head->next)->prev = entry;
-	head->next = entry;
-	ring->num_entries++;
-}
-
-static inline void add_lru(struct sun4c_mmu_entry *entry)
-{
-	struct sun4c_mmu_ring *ring = &sun4c_ulru_ring;
-	struct sun4c_mmu_entry *head = &ring->ringhd;
-
-	entry->lru_next = head;
-	(entry->lru_prev = head->lru_prev)->lru_next = entry;
-	head->lru_prev = entry;
-}
-
-static void add_ring_ordered(struct sun4c_mmu_ring *ring,
-			     struct sun4c_mmu_entry *entry)
-{
-	struct sun4c_mmu_entry *head = &ring->ringhd;
-	unsigned long addr = entry->vaddr;
-
-	while ((head->next != &ring->ringhd) && (head->next->vaddr < addr))
-		head = head->next;
-
-	entry->prev = head;
-	(entry->next = head->next)->prev = entry;
-	head->next = entry;
-	ring->num_entries++;
-
-	add_lru(entry);
-}
-
-static inline void remove_ring(struct sun4c_mmu_ring *ring,
-				   struct sun4c_mmu_entry *entry)
-{
-	struct sun4c_mmu_entry *next = entry->next;
-
-	(next->prev = entry->prev)->next = next;
-	ring->num_entries--;
-}
-
-static void remove_lru(struct sun4c_mmu_entry *entry)
-{
-	struct sun4c_mmu_entry *next = entry->lru_next;
-
-	(next->lru_prev = entry->lru_prev)->lru_next = next;
-}
-
-static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
-{
-        remove_ring(sun4c_context_ring+ctx, entry);
-	remove_lru(entry);
-        add_ring(&sun4c_ufree_ring, entry);
-}
-
-static void free_kernel_entry(struct sun4c_mmu_entry *entry,
-			      struct sun4c_mmu_ring *ring)
-{
-        remove_ring(ring, entry);
-        add_ring(&sun4c_kfree_ring, entry);
-}
-
-static void __init sun4c_init_fill_kernel_ring(int howmany)
-{
-	int i;
-
-	while (howmany) {
-		for (i = 0; i < invalid_segment; i++)
-			if (!mmu_entry_pool[i].locked)
-				break;
-		mmu_entry_pool[i].locked = 1;
-		sun4c_init_clean_segmap(i);
-		add_ring(&sun4c_kfree_ring, &mmu_entry_pool[i]);
-		howmany--;
-	}
-}
-
-static void __init sun4c_init_fill_user_ring(void)
-{
-	int i;
-
-	for (i = 0; i < invalid_segment; i++) {
-		if (mmu_entry_pool[i].locked)
-			continue;
-		sun4c_init_clean_segmap(i);
-		add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
-	}
-}
-
-static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
-{
-	int savectx, ctx;
-
-	savectx = sun4c_get_context();
-	for (ctx = 0; ctx < num_contexts; ctx++) {
-		sun4c_set_context(ctx);
-		sun4c_put_segmap(kentry->vaddr, invalid_segment);
-	}
-	sun4c_set_context(savectx);
-}
-
-static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
-{
-	int savectx, ctx;
-
-	savectx = sun4c_get_context();
-	for (ctx = 0; ctx < num_contexts; ctx++) {
-		sun4c_set_context(ctx);
-		sun4c_put_segmap(kentry->vaddr, kentry->pseg);
-	}
-	sun4c_set_context(savectx);
-}
-
-#define sun4c_user_unmap(__entry) \
-	sun4c_put_segmap((__entry)->vaddr, invalid_segment)
-
-static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
-{
-	struct sun4c_mmu_entry *head = &crp->ringhd;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	if (head->next != head) {
-		struct sun4c_mmu_entry *entry = head->next;
-		int savectx = sun4c_get_context();
-
-		flush_user_windows();
-		sun4c_set_context(ctx);
-		sun4c_flush_context();
-		do {
-			struct sun4c_mmu_entry *next = entry->next;
-
-			sun4c_user_unmap(entry);
-			free_user_entry(ctx, entry);
-
-			entry = next;
-		} while (entry != head);
-		sun4c_set_context(savectx);
-	}
-	local_irq_restore(flags);
-}
-
-static int sun4c_user_taken_entries;  /* This is how much we have.             */
-static int max_user_taken_entries;    /* This limits us and prevents deadlock. */
-
-static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
-{
-	struct sun4c_mmu_entry *this_entry;
-
-	/* If some are free, return first one. */
-	if (sun4c_kfree_ring.num_entries) {
-		this_entry = sun4c_kfree_ring.ringhd.next;
-		return this_entry;
-	}
-
-	/* Else free one up. */
-	this_entry = sun4c_kernel_ring.ringhd.prev;
-	sun4c_flush_segment(this_entry->vaddr);
-	sun4c_kernel_unmap(this_entry);
-	free_kernel_entry(this_entry, &sun4c_kernel_ring);
-	this_entry = sun4c_kfree_ring.ringhd.next;
-
-	return this_entry;
-}
-
-/* Using this method to free up mmu entries eliminates a lot of
- * potential races since we have a kernel that incurs tlb
- * replacement faults.  There may be performance penalties.
- *
- * NOTE: Must be called with interrupts disabled.
- */
-static struct sun4c_mmu_entry *sun4c_user_strategy(void)
-{
-	struct sun4c_mmu_entry *entry;
-	unsigned char ctx;
-	int savectx;
-
-	/* If some are free, return first one. */
-	if (sun4c_ufree_ring.num_entries) {
-		entry = sun4c_ufree_ring.ringhd.next;
-		goto unlink_out;
-	}
-
-	if (sun4c_user_taken_entries) {
-		entry = sun4c_kernel_strategy();
-		sun4c_user_taken_entries--;
-		goto kunlink_out;
-	}
-
-	/* Grab from the beginning of the LRU list. */
-	entry = sun4c_ulru_ring.ringhd.lru_next;
-	ctx = entry->ctx;
-
-	savectx = sun4c_get_context();
-	flush_user_windows();
-	sun4c_set_context(ctx);
-	sun4c_flush_segment(entry->vaddr);
-	sun4c_user_unmap(entry);
-	remove_ring(sun4c_context_ring + ctx, entry);
-	remove_lru(entry);
-	sun4c_set_context(savectx);
-
-	return entry;
-
-unlink_out:
-	remove_ring(&sun4c_ufree_ring, entry);
-	return entry;
-kunlink_out:
-	remove_ring(&sun4c_kfree_ring, entry);
-	return entry;
-}
-
-/* NOTE: Must be called with interrupts disabled. */
-void sun4c_grow_kernel_ring(void)
-{
-	struct sun4c_mmu_entry *entry;
-
-	/* Prevent deadlock condition. */
-	if (sun4c_user_taken_entries >= max_user_taken_entries)
-		return;
-
-	if (sun4c_ufree_ring.num_entries) {
-		entry = sun4c_ufree_ring.ringhd.next;
-        	remove_ring(&sun4c_ufree_ring, entry);
-		add_ring(&sun4c_kfree_ring, entry);
-		sun4c_user_taken_entries++;
-	}
-}
-
-/* 2 page buckets for task struct and kernel stack allocation.
- *
- * TASK_STACK_BEGIN
- * bucket[0]
- * bucket[1]
- *   [ ... ]
- * bucket[NR_TASK_BUCKETS-1]
- * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS)
- *
- * Each slot looks like:
- *
- *  page 1 --  task struct + beginning of kernel stack
- *  page 2 --  rest of kernel stack
- */
-
-union task_union *sun4c_bucket[NR_TASK_BUCKETS];
-
-static int sun4c_lowbucket_avail;
-
-#define BUCKET_EMPTY     ((union task_union *) 0)
-#define BUCKET_SHIFT     (PAGE_SHIFT + 1)        /* log2(sizeof(struct task_bucket)) */
-#define BUCKET_SIZE      (1 << BUCKET_SHIFT)
-#define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT))
-#define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR)
-#define BUCKET_PTE(page)       \
-        ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))
-#define BUCKET_PTE_PAGE(pte)   \
-        (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
-
-static void get_locked_segment(unsigned long addr)
-{
-	struct sun4c_mmu_entry *stolen;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	addr &= SUN4C_REAL_PGDIR_MASK;
-	stolen = sun4c_user_strategy();
-	max_user_taken_entries--;
-	stolen->vaddr = addr;
-	flush_user_windows();
-	sun4c_kernel_map(stolen);
-	local_irq_restore(flags);
-}
-
-static void free_locked_segment(unsigned long addr)
-{
-	struct sun4c_mmu_entry *entry;
-	unsigned long flags;
-	unsigned char pseg;
-
-	local_irq_save(flags);
-	addr &= SUN4C_REAL_PGDIR_MASK;
-	pseg = sun4c_get_segmap(addr);
-	entry = &mmu_entry_pool[pseg];
-
-	flush_user_windows();
-	sun4c_flush_segment(addr);
-	sun4c_kernel_unmap(entry);
-	add_ring(&sun4c_ufree_ring, entry);
-	max_user_taken_entries++;
-	local_irq_restore(flags);
-}
-
-static inline void garbage_collect(int entry)
-{
-	int start, end;
-
-	/* 32 buckets per segment... */
-	entry &= ~31;
-	start = entry;
-	for (end = (start + 32); start < end; start++)
-		if (sun4c_bucket[start] != BUCKET_EMPTY)
-			return;
-
-	/* Entire segment empty, release it. */
-	free_locked_segment(BUCKET_ADDR(entry));
-}
-
-static struct thread_info *sun4c_alloc_thread_info_node(int node)
-{
-	unsigned long addr, pages;
-	int entry;
-
-	pages = __get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER);
-	if (!pages)
-		return NULL;
-
-	for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++)
-		if (sun4c_bucket[entry] == BUCKET_EMPTY)
-			break;
-	if (entry == NR_TASK_BUCKETS) {
-		free_pages(pages, THREAD_INFO_ORDER);
-		return NULL;
-	}
-	if (entry >= sun4c_lowbucket_avail)
-		sun4c_lowbucket_avail = entry + 1;
-
-	addr = BUCKET_ADDR(entry);
-	sun4c_bucket[entry] = (union task_union *) addr;
-	if(sun4c_get_segmap(addr) == invalid_segment)
-		get_locked_segment(addr);
-
-	/* We are changing the virtual color of the page(s)
-	 * so we must flush the cache to guarantee consistency.
-	 */
-	sun4c_flush_page(pages);
-	sun4c_flush_page(pages + PAGE_SIZE);
-
-	sun4c_put_pte(addr, BUCKET_PTE(pages));
-	sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
-
-#ifdef CONFIG_DEBUG_STACK_USAGE
-	memset((void *)addr, 0, PAGE_SIZE << THREAD_INFO_ORDER);
-#endif /* DEBUG_STACK_USAGE */
-
-	return (struct thread_info *) addr;
-}
-
-static void sun4c_free_thread_info(struct thread_info *ti)
-{
-	unsigned long tiaddr = (unsigned long) ti;
-	unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tiaddr));
-	int entry = BUCKET_NUM(tiaddr);
-
-	/* We are deleting a mapping, so the flush here is mandatory. */
-	sun4c_flush_page(tiaddr);
-	sun4c_flush_page(tiaddr + PAGE_SIZE);
-
-	sun4c_put_pte(tiaddr, 0);
-	sun4c_put_pte(tiaddr + PAGE_SIZE, 0);
-
-	sun4c_bucket[entry] = BUCKET_EMPTY;
-	if (entry < sun4c_lowbucket_avail)
-		sun4c_lowbucket_avail = entry;
-
-	free_pages(pages, THREAD_INFO_ORDER);
-	garbage_collect(entry);
-}
-
-static void __init sun4c_init_buckets(void)
-{
-	int entry;
-
-	if (sizeof(union thread_union) != (PAGE_SIZE << THREAD_INFO_ORDER)) {
-		extern void thread_info_size_is_bolixed_pete(void);
-		thread_info_size_is_bolixed_pete();
-	}
-
-	for (entry = 0; entry < NR_TASK_BUCKETS; entry++)
-		sun4c_bucket[entry] = BUCKET_EMPTY;
-	sun4c_lowbucket_avail = 0;
-}
-
-static unsigned long sun4c_iobuffer_start;
-static unsigned long sun4c_iobuffer_end;
-static unsigned long sun4c_iobuffer_high;
-static unsigned long *sun4c_iobuffer_map;
-static int iobuffer_map_size;
-
-/*
- * Alias our pages so they do not cause a trap.
- * Also one page may be aliased into several I/O areas and we may
- * finish these I/O separately.
- */
-static char *sun4c_lockarea(char *vaddr, unsigned long size)
-{
-	unsigned long base, scan;
-	unsigned long npages;
-	unsigned long vpage;
-	unsigned long pte;
-	unsigned long apage;
-	unsigned long high;
-	unsigned long flags;
-
-	npages = (((unsigned long)vaddr & ~PAGE_MASK) +
-		  size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
-
-	local_irq_save(flags);
-	base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size,
-						0, npages, 0);
-	if (base >= iobuffer_map_size)
-		goto abend;
-
-	high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
-	high = SUN4C_REAL_PGDIR_ALIGN(high);
-	while (high > sun4c_iobuffer_high) {
-		get_locked_segment(sun4c_iobuffer_high);
-		sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE;
-	}
-
-	vpage = ((unsigned long) vaddr) & PAGE_MASK;
-	for (scan = base; scan < base+npages; scan++) {
-		pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT);
- 		pte |= pgprot_val(SUN4C_PAGE_KERNEL);
-		pte |= _SUN4C_PAGE_NOCACHE;
-		set_bit(scan, sun4c_iobuffer_map);
-		apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start;
-
-		/* Flush original mapping so we see the right things later. */
-		sun4c_flush_page(vpage);
-
-		sun4c_put_pte(apage, pte);
-		vpage += PAGE_SIZE;
-	}
-	local_irq_restore(flags);
-	return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
-			 (((unsigned long) vaddr) & ~PAGE_MASK));
-
-abend:
-	local_irq_restore(flags);
-	printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
-	panic("Out of iobuffer table");
-	return NULL;
-}
-
-static void sun4c_unlockarea(char *vaddr, unsigned long size)
-{
-	unsigned long vpage, npages;
-	unsigned long flags;
-	int scan, high;
-
-	vpage = (unsigned long)vaddr & PAGE_MASK;
-	npages = (((unsigned long)vaddr & ~PAGE_MASK) +
-		  size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
-
-	local_irq_save(flags);
-	while (npages != 0) {
-		--npages;
-
-		/* This mapping is marked non-cachable, no flush necessary. */
-		sun4c_put_pte(vpage, 0);
-		clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT,
-			  sun4c_iobuffer_map);
-		vpage += PAGE_SIZE;
-	}
-
-	/* garbage collect */
-	scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT;
-	while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5])
-		scan -= 32;
-	scan += 32;
-	high = sun4c_iobuffer_start + (scan << PAGE_SHIFT);
-	high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE;
-	while (high < sun4c_iobuffer_high) {
-		sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
-		free_locked_segment(sun4c_iobuffer_high);
-	}
-	local_irq_restore(flags);
-}
-
-/* Note the scsi code at init time passes to here buffers
- * which sit on the kernel stack, those are already locked
- * by implication and fool the page locking code above
- * if passed to by mistake.
- */
-static __u32 sun4c_get_scsi_one(struct device *dev, char *bufptr, unsigned long len)
-{
-	unsigned long page;
-
-	page = ((unsigned long)bufptr) & PAGE_MASK;
-	if (!virt_addr_valid(page)) {
-		sun4c_flush_page(page);
-		return (__u32)bufptr; /* already locked */
-	}
-	return (__u32)sun4c_lockarea(bufptr, len);
-}
-
-static void sun4c_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
-{
-	while (sz != 0) {
-		--sz;
-		sg->dma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
-		sg->dma_length = sg->length;
-		sg = sg_next(sg);
-	}
-}
-
-static void sun4c_release_scsi_one(struct device *dev, __u32 bufptr, unsigned long len)
-{
-	if (bufptr < sun4c_iobuffer_start)
-		return; /* On kernel stack or similar, see above */
-	sun4c_unlockarea((char *)bufptr, len);
-}
-
-static void sun4c_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
-{
-	while (sz != 0) {
-		--sz;
-		sun4c_unlockarea((char *)sg->dma_address, sg->length);
-		sg = sg_next(sg);
-	}
-}
-
-#define TASK_ENTRY_SIZE    BUCKET_SIZE /* see above */
-#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
-
-struct vm_area_struct sun4c_kstack_vma;
-
-static void __init sun4c_init_lock_areas(void)
-{
-	unsigned long sun4c_taskstack_start;
-	unsigned long sun4c_taskstack_end;
-	int bitmap_size;
-
-	sun4c_init_buckets();
-	sun4c_taskstack_start = SUN4C_LOCK_VADDR;
-	sun4c_taskstack_end = (sun4c_taskstack_start +
-			       (TASK_ENTRY_SIZE * NR_TASK_BUCKETS));
-	if (sun4c_taskstack_end >= SUN4C_LOCK_END) {
-		prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n");
-		prom_halt();
-	}
-
-	sun4c_iobuffer_start = sun4c_iobuffer_high =
-				SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end);
-	sun4c_iobuffer_end = SUN4C_LOCK_END;
-	bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT;
-	bitmap_size = (bitmap_size + 7) >> 3;
-	bitmap_size = LONG_ALIGN(bitmap_size);
-	iobuffer_map_size = bitmap_size << 3;
-	sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL);
-	memset((void *) sun4c_iobuffer_map, 0, bitmap_size);
-
-	sun4c_kstack_vma.vm_mm = &init_mm;
-	sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
-	sun4c_kstack_vma.vm_end = sun4c_taskstack_end;
-	sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
-	sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
-	insert_vm_struct(&init_mm, &sun4c_kstack_vma);
-}
-
-/* Cache flushing on the sun4c. */
-static void sun4c_flush_cache_all(void)
-{
-	unsigned long begin, end;
-
-	flush_user_windows();
-	begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE);
-	end = (begin + SUN4C_VAC_SIZE);
-
-	if (sun4c_vacinfo.linesize == 32) {
-		while (begin < end) {
-			__asm__ __volatile__(
-			"ld	[%0 + 0x00], %%g0\n\t"
-			"ld	[%0 + 0x20], %%g0\n\t"
-			"ld	[%0 + 0x40], %%g0\n\t"
-			"ld	[%0 + 0x60], %%g0\n\t"
-			"ld	[%0 + 0x80], %%g0\n\t"
-			"ld	[%0 + 0xa0], %%g0\n\t"
-			"ld	[%0 + 0xc0], %%g0\n\t"
-			"ld	[%0 + 0xe0], %%g0\n\t"
-			"ld	[%0 + 0x100], %%g0\n\t"
-			"ld	[%0 + 0x120], %%g0\n\t"
-			"ld	[%0 + 0x140], %%g0\n\t"
-			"ld	[%0 + 0x160], %%g0\n\t"
-			"ld	[%0 + 0x180], %%g0\n\t"
-			"ld	[%0 + 0x1a0], %%g0\n\t"
-			"ld	[%0 + 0x1c0], %%g0\n\t"
-			"ld	[%0 + 0x1e0], %%g0\n"
-			: : "r" (begin));
-			begin += 512;
-		}
-	} else {
-		while (begin < end) {
-			__asm__ __volatile__(
-			"ld	[%0 + 0x00], %%g0\n\t"
-			"ld	[%0 + 0x10], %%g0\n\t"
-			"ld	[%0 + 0x20], %%g0\n\t"
-			"ld	[%0 + 0x30], %%g0\n\t"
-			"ld	[%0 + 0x40], %%g0\n\t"
-			"ld	[%0 + 0x50], %%g0\n\t"
-			"ld	[%0 + 0x60], %%g0\n\t"
-			"ld	[%0 + 0x70], %%g0\n\t"
-			"ld	[%0 + 0x80], %%g0\n\t"
-			"ld	[%0 + 0x90], %%g0\n\t"
-			"ld	[%0 + 0xa0], %%g0\n\t"
-			"ld	[%0 + 0xb0], %%g0\n\t"
-			"ld	[%0 + 0xc0], %%g0\n\t"
-			"ld	[%0 + 0xd0], %%g0\n\t"
-			"ld	[%0 + 0xe0], %%g0\n\t"
-			"ld	[%0 + 0xf0], %%g0\n"
-			: : "r" (begin));
-			begin += 256;
-		}
-	}
-}
-
-static void sun4c_flush_cache_mm(struct mm_struct *mm)
-{
-	int new_ctx = mm->context;
-
-	if (new_ctx != NO_CONTEXT) {
-		flush_user_windows();
-
-		if (sun4c_context_ring[new_ctx].num_entries) {
-			struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
-			unsigned long flags;
-
-			local_irq_save(flags);
-			if (head->next != head) {
-				struct sun4c_mmu_entry *entry = head->next;
-				int savectx = sun4c_get_context();
-
-				sun4c_set_context(new_ctx);
-				sun4c_flush_context();
-				do {
-					struct sun4c_mmu_entry *next = entry->next;
-
-					sun4c_user_unmap(entry);
-					free_user_entry(new_ctx, entry);
-
-					entry = next;
-				} while (entry != head);
-				sun4c_set_context(savectx);
-			}
-			local_irq_restore(flags);
-		}
-	}
-}
-
-static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-	struct mm_struct *mm = vma->vm_mm;
-	int new_ctx = mm->context;
-
-	if (new_ctx != NO_CONTEXT) {
-		struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
-		struct sun4c_mmu_entry *entry;
-		unsigned long flags;
-
-		flush_user_windows();
-
-		local_irq_save(flags);
-		/* All user segmap chains are ordered on entry->vaddr. */
-		for (entry = head->next;
-		     (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
-		     entry = entry->next)
-			;
-
-		/* Tracing various job mixtures showed that this conditional
-		 * only passes ~35% of the time for most worse case situations,
-		 * therefore we avoid all of this gross overhead ~65% of the time.
-		 */
-		if ((entry != head) && (entry->vaddr < end)) {
-			int octx = sun4c_get_context();
-			sun4c_set_context(new_ctx);
-
-			/* At this point, always, (start >= entry->vaddr) and
-			 * (entry->vaddr < end), once the latter condition
-			 * ceases to hold, or we hit the end of the list, we
-			 * exit the loop.  The ordering of all user allocated
-			 * segmaps makes this all work out so beautifully.
-			 */
-			do {
-				struct sun4c_mmu_entry *next = entry->next;
-				unsigned long realend;
-
-				/* "realstart" is always >= entry->vaddr */
-				realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
-				if (end < realend)
-					realend = end;
-				if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
-					unsigned long page = entry->vaddr;
-					while (page < realend) {
-						sun4c_flush_page(page);
-						page += PAGE_SIZE;
-					}
-				} else {
-					sun4c_flush_segment(entry->vaddr);
-					sun4c_user_unmap(entry);
-					free_user_entry(new_ctx, entry);
-				}
-				entry = next;
-			} while ((entry != head) && (entry->vaddr < end));
-			sun4c_set_context(octx);
-		}
-		local_irq_restore(flags);
-	}
-}
-
-static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
-{
-	struct mm_struct *mm = vma->vm_mm;
-	int new_ctx = mm->context;
-
-	/* Sun4c has no separate I/D caches so cannot optimize for non
-	 * text page flushes.
-	 */
-	if (new_ctx != NO_CONTEXT) {
-		int octx = sun4c_get_context();
-		unsigned long flags;
-
-		flush_user_windows();
-		local_irq_save(flags);
-		sun4c_set_context(new_ctx);
-		sun4c_flush_page(page);
-		sun4c_set_context(octx);
-		local_irq_restore(flags);
-	}
-}
-
-static void sun4c_flush_page_to_ram(unsigned long page)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	sun4c_flush_page(page);
-	local_irq_restore(flags);
-}
-
-/* Sun4c cache is unified, both instructions and data live there, so
- * no need to flush the on-stack instructions for new signal handlers.
- */
-static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
-{
-}
-
-/* TLB flushing on the sun4c.  These routines count on the cache
- * flushing code to flush the user register windows so that we need
- * not do so when we get here.
- */
-
-static void sun4c_flush_tlb_all(void)
-{
-	struct sun4c_mmu_entry *this_entry, *next_entry;
-	unsigned long flags;
-	int savectx, ctx;
-
-	local_irq_save(flags);
-	this_entry = sun4c_kernel_ring.ringhd.next;
-	savectx = sun4c_get_context();
-	flush_user_windows();
-	while (sun4c_kernel_ring.num_entries) {
-		next_entry = this_entry->next;
-		sun4c_flush_segment(this_entry->vaddr);
-		for (ctx = 0; ctx < num_contexts; ctx++) {
-			sun4c_set_context(ctx);
-			sun4c_put_segmap(this_entry->vaddr, invalid_segment);
-		}
-		free_kernel_entry(this_entry, &sun4c_kernel_ring);
-		this_entry = next_entry;
-	}
-	sun4c_set_context(savectx);
-	local_irq_restore(flags);
-}
-
-static void sun4c_flush_tlb_mm(struct mm_struct *mm)
-{
-	int new_ctx = mm->context;
-
-	if (new_ctx != NO_CONTEXT) {
-		struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
-		unsigned long flags;
-
-		local_irq_save(flags);
-		if (head->next != head) {
-			struct sun4c_mmu_entry *entry = head->next;
-			int savectx = sun4c_get_context();
-
-			sun4c_set_context(new_ctx);
-			sun4c_flush_context();
-			do {
-				struct sun4c_mmu_entry *next = entry->next;
-
-				sun4c_user_unmap(entry);
-				free_user_entry(new_ctx, entry);
-
-				entry = next;
-			} while (entry != head);
-			sun4c_set_context(savectx);
-		}
-		local_irq_restore(flags);
-	}
-}
-
-static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-	struct mm_struct *mm = vma->vm_mm;
-	int new_ctx = mm->context;
-
-	if (new_ctx != NO_CONTEXT) {
-		struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
-		struct sun4c_mmu_entry *entry;
-		unsigned long flags;
-
-		local_irq_save(flags);
-		/* See commentary in sun4c_flush_cache_range(). */
-		for (entry = head->next;
-		     (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
-		     entry = entry->next)
-			;
-
-		if ((entry != head) && (entry->vaddr < end)) {
-			int octx = sun4c_get_context();
-
-			sun4c_set_context(new_ctx);
-			do {
-				struct sun4c_mmu_entry *next = entry->next;
-
-				sun4c_flush_segment(entry->vaddr);
-				sun4c_user_unmap(entry);
-				free_user_entry(new_ctx, entry);
-
-				entry = next;
-			} while ((entry != head) && (entry->vaddr < end));
-			sun4c_set_context(octx);
-		}
-		local_irq_restore(flags);
-	}
-}
-
-static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
-	struct mm_struct *mm = vma->vm_mm;
-	int new_ctx = mm->context;
-
-	if (new_ctx != NO_CONTEXT) {
-		int savectx = sun4c_get_context();
-		unsigned long flags;
-
-		local_irq_save(flags);
-		sun4c_set_context(new_ctx);
-		page &= PAGE_MASK;
-		sun4c_flush_page(page);
-		sun4c_put_pte(page, 0);
-		sun4c_set_context(savectx);
-		local_irq_restore(flags);
-	}
-}
-
-static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
-{
-	unsigned long page_entry, pg_iobits;
-
-	pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
-		    _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
-
-	page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
-	page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
-	sun4c_put_pte(virt_addr, page_entry);
-}
-
-static void sun4c_mapiorange(unsigned int bus, unsigned long xpa,
-    unsigned long xva, unsigned int len)
-{
-	while (len != 0) {
-		len -= PAGE_SIZE;
-		sun4c_mapioaddr(xpa, xva);
-		xva += PAGE_SIZE;
-		xpa += PAGE_SIZE;
-	}
-}
-
-static void sun4c_unmapiorange(unsigned long virt_addr, unsigned int len)
-{
-	while (len != 0) {
-		len -= PAGE_SIZE;
-		sun4c_put_pte(virt_addr, 0);
-		virt_addr += PAGE_SIZE;
-	}
-}
-
-static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
-{
-	struct ctx_list *ctxp;
-
-	ctxp = ctx_free.next;
-	if (ctxp != &ctx_free) {
-		remove_from_ctx_list(ctxp);
-		add_to_used_ctxlist(ctxp);
-		mm->context = ctxp->ctx_number;
-		ctxp->ctx_mm = mm;
-		return;
-	}
-	ctxp = ctx_used.next;
-	if (ctxp->ctx_mm == old_mm)
-		ctxp = ctxp->next;
-	remove_from_ctx_list(ctxp);
-	add_to_used_ctxlist(ctxp);
-	ctxp->ctx_mm->context = NO_CONTEXT;
-	ctxp->ctx_mm = mm;
-	mm->context = ctxp->ctx_number;
-	sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number],
-			       ctxp->ctx_number);
-}
-
-/* Switch the current MM context. */
-static void sun4c_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
-{
-	struct ctx_list *ctx;
-	int dirty = 0;
-
-	if (mm->context == NO_CONTEXT) {
-		dirty = 1;
-		sun4c_alloc_context(old_mm, mm);
-	} else {
-		/* Update the LRU ring of contexts. */
-		ctx = ctx_list_pool + mm->context;
-		remove_from_ctx_list(ctx);
-		add_to_used_ctxlist(ctx);
-	}
-	if (dirty || old_mm != mm)
-		sun4c_set_context(mm->context);
-}
-
-static void sun4c_destroy_context(struct mm_struct *mm)
-{
-	struct ctx_list *ctx_old;
-
-	if (mm->context != NO_CONTEXT) {
-		sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context);
-		ctx_old = ctx_list_pool + mm->context;
-		remove_from_ctx_list(ctx_old);
-		add_to_free_ctxlist(ctx_old);
-		mm->context = NO_CONTEXT;
-	}
-}
-
-static void sun4c_mmu_info(struct seq_file *m)
-{
-	int used_user_entries, i;
-
-	used_user_entries = 0;
-	for (i = 0; i < num_contexts; i++)
-		used_user_entries += sun4c_context_ring[i].num_entries;
-
-	seq_printf(m, 
-		   "vacsize\t\t: %d bytes\n"
-		   "vachwflush\t: %s\n"
-		   "vaclinesize\t: %d bytes\n"
-		   "mmuctxs\t\t: %d\n"
-		   "mmupsegs\t: %d\n"
-		   "kernelpsegs\t: %d\n"
-		   "kfreepsegs\t: %d\n"
-		   "usedpsegs\t: %d\n"
-		   "ufreepsegs\t: %d\n"
-		   "user_taken\t: %d\n"
-		   "max_taken\t: %d\n",
-		   sun4c_vacinfo.num_bytes,
-		   (sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
-		   sun4c_vacinfo.linesize,
-		   num_contexts,
-		   (invalid_segment + 1),
-		   sun4c_kernel_ring.num_entries,
-		   sun4c_kfree_ring.num_entries,
-		   used_user_entries,
-		   sun4c_ufree_ring.num_entries,
-		   sun4c_user_taken_entries,
-		   max_user_taken_entries);
-}
-
-/* Nothing below here should touch the mmu hardware nor the mmu_entry
- * data structures.
- */
-
-/* First the functions which the mid-level code uses to directly
- * manipulate the software page tables.  Some defines since we are
- * emulating the i386 page directory layout.
- */
-#define PGD_PRESENT  0x001
-#define PGD_RW       0x002
-#define PGD_USER     0x004
-#define PGD_ACCESSED 0x020
-#define PGD_DIRTY    0x040
-#define PGD_TABLE    (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
-
-static void sun4c_set_pte(pte_t *ptep, pte_t pte)
-{
-	*ptep = pte;
-}
-
-static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
-{
-}
-
-static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep)
-{
-	pmdp->pmdv[0] = PGD_TABLE | (unsigned long) ptep;
-}
-
-static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep)
-{
-	if (page_address(ptep) == NULL) BUG();	/* No highmem on sun4c */
-	pmdp->pmdv[0] = PGD_TABLE | (unsigned long) page_address(ptep);
-}
-
-static int sun4c_pte_present(pte_t pte)
-{
-	return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
-}
-static void sun4c_pte_clear(pte_t *ptep)	{ *ptep = __pte(0); }
-
-static int sun4c_pmd_bad(pmd_t pmd)
-{
-	return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
-		(!virt_addr_valid(pmd_val(pmd))));
-}
-
-static int sun4c_pmd_present(pmd_t pmd)
-{
-	return ((pmd_val(pmd) & PGD_PRESENT) != 0);
-}
-
-#if 0 /* if PMD takes one word */
-static void sun4c_pmd_clear(pmd_t *pmdp)	{ *pmdp = __pmd(0); }
-#else /* if pmd_t is a longish aggregate */
-static void sun4c_pmd_clear(pmd_t *pmdp) {
-	memset((void *)pmdp, 0, sizeof(pmd_t));
-}
-#endif
-
-static int sun4c_pgd_none(pgd_t pgd)		{ return 0; }
-static int sun4c_pgd_bad(pgd_t pgd)		{ return 0; }
-static int sun4c_pgd_present(pgd_t pgd)	        { return 1; }
-static void sun4c_pgd_clear(pgd_t * pgdp)	{ }
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static pte_t sun4c_pte_mkwrite(pte_t pte)
-{
-	pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);
-	if (pte_val(pte) & _SUN4C_PAGE_MODIFIED)
-		pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
-	return pte;
-}
-
-static pte_t sun4c_pte_mkdirty(pte_t pte)
-{
-	pte = __pte(pte_val(pte) | _SUN4C_PAGE_MODIFIED);
-	if (pte_val(pte) & _SUN4C_PAGE_WRITE)
-		pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
-	return pte;
-}
-
-static pte_t sun4c_pte_mkyoung(pte_t pte)
-{
-	pte = __pte(pte_val(pte) | _SUN4C_PAGE_ACCESSED);
-	if (pte_val(pte) & _SUN4C_PAGE_READ)
-		pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_READ);
-	return pte;
-}
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot)
-{
-	return __pte(page_to_pfn(page) | pgprot_val(pgprot));
-}
-
-static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
-{
-	return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot));
-}
-
-static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
-{
-	return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
-}
-
-static unsigned long sun4c_pte_pfn(pte_t pte)
-{
-	return pte_val(pte) & SUN4C_PFN_MASK;
-}
-
-static pte_t sun4c_pgoff_to_pte(unsigned long pgoff)
-{
-	return __pte(pgoff | _SUN4C_PAGE_FILE);
-}
-
-static unsigned long sun4c_pte_to_pgoff(pte_t pte)
-{
-	return pte_val(pte) & ((1UL << PTE_FILE_MAX_BITS) - 1);
-}
-
-
-static inline unsigned long sun4c_pmd_page_v(pmd_t pmd)
-{
-	return (pmd_val(pmd) & PAGE_MASK);
-}
-
-static struct page *sun4c_pmd_page(pmd_t pmd)
-{
-	return virt_to_page(sun4c_pmd_page_v(pmd));
-}
-
-static unsigned long sun4c_pgd_page(pgd_t pgd) { return 0; }
-
-/* to find an entry in a page-table-directory */
-static inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
-{
-	return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
-}
-
-/* Find an entry in the second-level page table.. */
-static pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address)
-{
-	return (pmd_t *) dir;
-}
-
-/* Find an entry in the third-level page table.. */ 
-pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address)
-{
-	return (pte_t *) sun4c_pmd_page_v(*dir) +
-			((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
-}
-
-static unsigned long sun4c_swp_type(swp_entry_t entry)
-{
-	return (entry.val & SUN4C_SWP_TYPE_MASK);
-}
-
-static unsigned long sun4c_swp_offset(swp_entry_t entry)
-{
-	return (entry.val >> SUN4C_SWP_OFF_SHIFT) & SUN4C_SWP_OFF_MASK;
-}
-
-static swp_entry_t sun4c_swp_entry(unsigned long type, unsigned long offset)
-{
-	return (swp_entry_t) {
-		  (offset & SUN4C_SWP_OFF_MASK) << SUN4C_SWP_OFF_SHIFT
-		| (type & SUN4C_SWP_TYPE_MASK) };
-}
-
-static void sun4c_free_pte_slow(pte_t *pte)
-{
-	free_page((unsigned long)pte);
-}
-
-static void sun4c_free_pgd_slow(pgd_t *pgd)
-{
-	free_page((unsigned long)pgd);
-}
-
-static pgd_t *sun4c_get_pgd_fast(void)
-{
-	unsigned long *ret;
-
-	if ((ret = pgd_quicklist) != NULL) {
-		pgd_quicklist = (unsigned long *)(*ret);
-		ret[0] = ret[1];
-		pgtable_cache_size--;
-	} else {
-		pgd_t *init;
-		
-		ret = (unsigned long *)__get_free_page(GFP_KERNEL);
-		memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t));
-		init = sun4c_pgd_offset(&init_mm, 0);
-		memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
-			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-	}
-	return (pgd_t *)ret;
-}
-
-static void sun4c_free_pgd_fast(pgd_t *pgd)
-{
-	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
-	pgd_quicklist = (unsigned long *) pgd;
-	pgtable_cache_size++;
-}
-
-
-static inline pte_t *
-sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
-{
-	unsigned long *ret;
-
-	if ((ret = (unsigned long *)pte_quicklist) != NULL) {
-		pte_quicklist = (unsigned long *)(*ret);
-		ret[0] = ret[1];
-		pgtable_cache_size--;
-	}
-	return (pte_t *)ret;
-}
-
-static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-{
-	pte_t *pte;
-
-	if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL)
-		return pte;
-
-	pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-	return pte;
-}
-
-static pgtable_t sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-	pte_t *pte;
-	struct page *page;
-
-	pte = sun4c_pte_alloc_one_kernel(mm, address);
-	if (pte == NULL)
-		return NULL;
-	page = virt_to_page(pte);
-	pgtable_page_ctor(page);
-	return page;
-}
-
-static inline void sun4c_free_pte_fast(pte_t *pte)
-{
-	*(unsigned long *)pte = (unsigned long) pte_quicklist;
-	pte_quicklist = (unsigned long *) pte;
-	pgtable_cache_size++;
-}
-
-static void sun4c_pte_free(pgtable_t pte)
-{
-	pgtable_page_dtor(pte);
-	sun4c_free_pte_fast(page_address(pte));
-}
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-static pmd_t *sun4c_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
-	BUG();
-	return NULL;
-}
-
-static void sun4c_free_pmd_fast(pmd_t * pmd) { }
-
-static void sun4c_check_pgt_cache(int low, int high)
-{
-	if (pgtable_cache_size > high) {
-		do {
-			if (pgd_quicklist)
-				sun4c_free_pgd_slow(sun4c_get_pgd_fast());
-			if (pte_quicklist)
-				sun4c_free_pte_slow(sun4c_pte_alloc_one_fast(NULL, 0));
-		} while (pgtable_cache_size > low);
-	}
-}
-
-/* An experiment, turn off by default for now... -DaveM */
-#define SUN4C_PRELOAD_PSEG
-
-void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
-{
-	unsigned long flags;
-	int pseg;
-
-	if (vma->vm_mm->context == NO_CONTEXT)
-		return;
-
-	local_irq_save(flags);
-	address &= PAGE_MASK;
-	if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
-		struct sun4c_mmu_entry *entry = sun4c_user_strategy();
-		struct mm_struct *mm = vma->vm_mm;
-		unsigned long start, end;
-
-		entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK);
-		entry->ctx = mm->context;
-		add_ring_ordered(sun4c_context_ring + mm->context, entry);
-		sun4c_put_segmap(entry->vaddr, entry->pseg);
-		end = start + SUN4C_REAL_PGDIR_SIZE;
-		while (start < end) {
-#ifdef SUN4C_PRELOAD_PSEG
-			pgd_t *pgdp = sun4c_pgd_offset(mm, start);
-			pte_t *ptep;
-
-			if (!pgdp)
-				goto no_mapping;
-			ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, start);
-			if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT))
-				goto no_mapping;
-			sun4c_put_pte(start, pte_val(*ptep));
-			goto next;
-
-		no_mapping:
-#endif
-			sun4c_put_pte(start, 0);
-#ifdef SUN4C_PRELOAD_PSEG
-		next:
-#endif
-			start += PAGE_SIZE;
-		}
-#ifndef SUN4C_PRELOAD_PSEG
-		sun4c_put_pte(address, pte_val(*ptep));
-#endif
-		local_irq_restore(flags);
-		return;
-	} else {
-		struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
-
-		remove_lru(entry);
-		add_lru(entry);
-	}
-
-	sun4c_put_pte(address, pte_val(*ptep));
-	local_irq_restore(flags);
-}
-
-extern void sparc_context_init(int);
-extern unsigned long bootmem_init(unsigned long *pages_avail);
-extern unsigned long last_valid_pfn;
-
-void __init sun4c_paging_init(void)
-{
-	int i, cnt;
-	unsigned long kernel_end, vaddr;
-	extern struct resource sparc_iomap;
-	unsigned long end_pfn, pages_avail;
-
-	kernel_end = (unsigned long) &_end;
-	kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
-
-	pages_avail = 0;
-	last_valid_pfn = bootmem_init(&pages_avail);
-	end_pfn = last_valid_pfn;
-
-	sun4c_probe_mmu();
-	invalid_segment = (num_segmaps - 1);
-	sun4c_init_mmu_entry_pool();
-	sun4c_init_rings();
-	sun4c_init_map_kernelprom(kernel_end);
-	sun4c_init_clean_mmu(kernel_end);
-	sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
-	sun4c_init_lock_area(sparc_iomap.start, IOBASE_END);
-	sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
-	sun4c_init_lock_areas();
-	sun4c_init_fill_user_ring();
-
-	sun4c_set_context(0);
-	memset(swapper_pg_dir, 0, PAGE_SIZE);
-	memset(pg0, 0, PAGE_SIZE);
-	memset(pg1, 0, PAGE_SIZE);
-	memset(pg2, 0, PAGE_SIZE);
-	memset(pg3, 0, PAGE_SIZE);
-
-	/* Save work later. */
-	vaddr = VMALLOC_START;
-	swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg0);
-	vaddr += SUN4C_PGDIR_SIZE;
-	swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg1);
-	vaddr += SUN4C_PGDIR_SIZE;
-	swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg2);
-	vaddr += SUN4C_PGDIR_SIZE;
-	swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3);
-	sun4c_init_ss2_cache_bug();
-	sparc_context_init(num_contexts);
-
-	{
-		unsigned long zones_size[MAX_NR_ZONES];
-		unsigned long zholes_size[MAX_NR_ZONES];
-		unsigned long npages;
-		int znum;
-
-		for (znum = 0; znum < MAX_NR_ZONES; znum++)
-			zones_size[znum] = zholes_size[znum] = 0;
-
-		npages = max_low_pfn - pfn_base;
-
-		zones_size[ZONE_DMA] = npages;
-		zholes_size[ZONE_DMA] = npages - pages_avail;
-
-		npages = highend_pfn - max_low_pfn;
-		zones_size[ZONE_HIGHMEM] = npages;
-		zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
-
-		free_area_init_node(0, zones_size, pfn_base, zholes_size);
-	}
-
-	cnt = 0;
-	for (i = 0; i < num_segmaps; i++)
-		if (mmu_entry_pool[i].locked)
-			cnt++;
-
-	max_user_taken_entries = num_segmaps - cnt - 40 - 1;
-
-	printk("SUN4C: %d mmu entries for the kernel\n", cnt);
-}
-
-static pgprot_t sun4c_pgprot_noncached(pgprot_t prot)
-{
-	prot |= __pgprot(_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE);
-
-	return prot;
-}
-
-/* Load up routines and constants for sun4c mmu */
-void __init ld_mmu_sun4c(void)
-{
-	extern void ___xchg32_sun4c(void);
-	
-	printk("Loading sun4c MMU routines\n");
-
-	/* First the constants */
-	BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT);
-	BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE);
-	BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK);
-
-	BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD);
-	BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD);
-	BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
-
-	BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
-	PAGE_SHARED = pgprot_val(SUN4C_PAGE_SHARED);
-	BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
-	BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
-	BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
-	page_kernel = pgprot_val(SUN4C_PAGE_KERNEL);
-
-	/* Functions */
-	BTFIXUPSET_CALL(pgprot_noncached, sun4c_pgprot_noncached, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
-	
-	BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
-
-	if (sun4c_vacinfo.do_hwflushes) {
-		BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_hw, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_hw, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_hw, BTFIXUPCALL_NORM);
-	} else {
-		BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_sw, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_sw, BTFIXUPCALL_NORM);
-		BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_sw, BTFIXUPCALL_NORM);
-	}
-
-	BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(__flush_page_to_ram, sun4c_flush_page_to_ram, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP);
-
-	BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
-
-	BTFIXUPSET_CALL(pte_pfn, sun4c_pte_pfn, BTFIXUPCALL_NORM);
-#if 0 /* PAGE_SHIFT <= 12 */ /* Eek. Investigate. XXX */
-	BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
-#else
-	BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
-#endif
-	BTFIXUPSET_CALL(pmd_set, sun4c_pmd_set, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_populate, sun4c_pmd_populate, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
-
-	BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0);
-
-	BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0));
-	BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0));
-	BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1));
-	BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
-
-	BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
-	BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_offset_kernel, sun4c_pte_offset_kernel, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(free_pte_fast, sun4c_free_pte_fast, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_alloc_one_kernel, sun4c_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(pmd_alloc_one, sun4c_pmd_alloc_one, BTFIXUPCALL_RETO0);
-	BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE);
-	BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED);
-	BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED);
-	BTFIXUPSET_HALF(pte_filei, _SUN4C_PAGE_FILE);
-	BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE);
-	BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE);
-	BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ);
-	BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(pte_to_pgoff, sun4c_pte_to_pgoff, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(pgoff_to_pte, sun4c_pgoff_to_pte, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(__swp_type, sun4c_swp_type, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(alloc_thread_info_node, sun4c_alloc_thread_info_node, BTFIXUPCALL_NORM);
-	BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
-
-	BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
-
-	/* These should _never_ get called with two level tables. */
-	BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP);
-	BTFIXUPSET_CALL(pgd_page_vaddr, sun4c_pgd_page, BTFIXUPCALL_RETO0);
-}
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index b57a594..874162a 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -495,11 +495,11 @@
 	stx		%o7, [%g1 + GR_SNAP_O7]
 	stx		%i7, [%g1 + GR_SNAP_I7]
 	/* Don't try this at home kids... */
-	rdpr		%cwp, %g2
-	sub		%g2, 1, %g7
+	rdpr		%cwp, %g3
+	sub		%g3, 1, %g7
 	wrpr		%g7, %cwp
 	mov		%i7, %g7
-	wrpr		%g2, %cwp
+	wrpr		%g3, %cwp
 	stx		%g7, [%g1 + GR_SNAP_RPC]
 	sethi		%hi(trap_block), %g7
 	or		%g7, %lo(trap_block), %g7
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index 6dfcc13..bf8ee06 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -14,7 +14,6 @@
 #include <asm/page.h>
 #include <asm/pgtsrmmu.h>
 #include <asm/viking.h>
-#include <asm/btfixup.h>
 
 #ifdef CONFIG_SMP
 	.data
diff --git a/arch/sparc/net/Makefile b/arch/sparc/net/Makefile
new file mode 100644
index 0000000..1306a58
--- /dev/null
+++ b/arch/sparc/net/Makefile
@@ -0,0 +1,4 @@
+#
+# Arch-specific network modules
+#
+obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
diff --git a/arch/sparc/net/bpf_jit.h b/arch/sparc/net/bpf_jit.h
new file mode 100644
index 0000000..33d6b37
--- /dev/null
+++ b/arch/sparc/net/bpf_jit.h
@@ -0,0 +1,68 @@
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+/* Conventions:
+ *  %g1 : temporary
+ *  %g2 : Secondary temporary used by SKB data helper stubs.
+ *  %g3 : packet offset passed into SKB data helper stubs.
+ *  %o0 : pointer to skb (first argument given to JIT function)
+ *  %o1 : BPF A accumulator
+ *  %o2 : BPF X accumulator
+ *  %o3 : Holds saved %o7 so we can call helper functions without needing
+ *        to allocate a register window.
+ *  %o4 : skb->len - skb->data_len
+ *  %o5 : skb->data
+ */
+
+#ifndef __ASSEMBLER__
+#define G0		0x00
+#define G1		0x01
+#define G3		0x03
+#define G6		0x06
+#define O0		0x08
+#define O1		0x09
+#define O2		0x0a
+#define O3		0x0b
+#define O4		0x0c
+#define O5		0x0d
+#define SP		0x0e
+#define O7		0x0f
+#define FP		0x1e
+
+#define r_SKB		O0
+#define r_A		O1
+#define r_X		O2
+#define r_saved_O7	O3
+#define r_HEADLEN	O4
+#define r_SKB_DATA	O5
+#define r_TMP		G1
+#define r_TMP2		G2
+#define r_OFF		G3
+
+/* assembly code in arch/sparc/net/bpf_jit_asm.S */
+extern u32 bpf_jit_load_word[];
+extern u32 bpf_jit_load_half[];
+extern u32 bpf_jit_load_byte[];
+extern u32 bpf_jit_load_byte_msh[];
+extern u32 bpf_jit_load_word_positive_offset[];
+extern u32 bpf_jit_load_half_positive_offset[];
+extern u32 bpf_jit_load_byte_positive_offset[];
+extern u32 bpf_jit_load_byte_msh_positive_offset[];
+extern u32 bpf_jit_load_word_negative_offset[];
+extern u32 bpf_jit_load_half_negative_offset[];
+extern u32 bpf_jit_load_byte_negative_offset[];
+extern u32 bpf_jit_load_byte_msh_negative_offset[];
+
+#else
+#define r_SKB		%o0
+#define r_A		%o1
+#define r_X		%o2
+#define r_saved_O7	%o3
+#define r_HEADLEN	%o4
+#define r_SKB_DATA	%o5
+#define r_TMP		%g1
+#define r_TMP2		%g2
+#define r_OFF		%g3
+#endif
+
+#endif /* _BPF_JIT_H */
diff --git a/arch/sparc/net/bpf_jit_asm.S b/arch/sparc/net/bpf_jit_asm.S
new file mode 100644
index 0000000..9d016c7
--- /dev/null
+++ b/arch/sparc/net/bpf_jit_asm.S
@@ -0,0 +1,205 @@
+#include <asm/ptrace.h>
+
+#include "bpf_jit.h"
+
+#ifdef CONFIG_SPARC64
+#define SAVE_SZ		176
+#define SCRATCH_OFF	STACK_BIAS + 128
+#define BE_PTR(label)	be,pn %xcc, label
+#else
+#define SAVE_SZ		96
+#define SCRATCH_OFF	72
+#define BE_PTR(label)	be label
+#endif
+
+#define SKF_MAX_NEG_OFF	(-0x200000) /* SKF_LL_OFF from filter.h */
+
+	.text
+	.globl	bpf_jit_load_word
+bpf_jit_load_word:
+	cmp	r_OFF, 0
+	bl	bpf_slow_path_word_neg
+	 nop
+	.globl	bpf_jit_load_word_positive_offset
+bpf_jit_load_word_positive_offset:
+	sub	r_HEADLEN, r_OFF, r_TMP
+	cmp	r_TMP, 3
+	ble	bpf_slow_path_word
+	 add	r_SKB_DATA, r_OFF, r_TMP
+	andcc	r_TMP, 3, %g0
+	bne	load_word_unaligned
+	 nop
+	retl
+	 ld	[r_TMP], r_A
+load_word_unaligned:
+	ldub	[r_TMP + 0x0], r_OFF
+	ldub	[r_TMP + 0x1], r_TMP2
+	sll	r_OFF, 8, r_OFF
+	or	r_OFF, r_TMP2, r_OFF
+	ldub	[r_TMP + 0x2], r_TMP2
+	sll	r_OFF, 8, r_OFF
+	or	r_OFF, r_TMP2, r_OFF
+	ldub	[r_TMP + 0x3], r_TMP2
+	sll	r_OFF, 8, r_OFF
+	retl
+	 or	r_OFF, r_TMP2, r_A
+
+	.globl	bpf_jit_load_half
+bpf_jit_load_half:
+	cmp	r_OFF, 0
+	bl	bpf_slow_path_half_neg
+	 nop
+	.globl	bpf_jit_load_half_positive_offset
+bpf_jit_load_half_positive_offset:
+	sub	r_HEADLEN, r_OFF, r_TMP
+	cmp	r_TMP, 1
+	ble	bpf_slow_path_half
+	 add	r_SKB_DATA, r_OFF, r_TMP
+	andcc	r_TMP, 1, %g0
+	bne	load_half_unaligned
+	 nop
+	retl
+	 lduh	[r_TMP], r_A
+load_half_unaligned:
+	ldub	[r_TMP + 0x0], r_OFF
+	ldub	[r_TMP + 0x1], r_TMP2
+	sll	r_OFF, 8, r_OFF
+	retl
+	 or	r_OFF, r_TMP2, r_A
+
+	.globl	bpf_jit_load_byte
+bpf_jit_load_byte:
+	cmp	r_OFF, 0
+	bl	bpf_slow_path_byte_neg
+	 nop
+	.globl	bpf_jit_load_byte_positive_offset
+bpf_jit_load_byte_positive_offset:
+	cmp	r_OFF, r_HEADLEN
+	bge	bpf_slow_path_byte
+	 nop
+	retl
+	 ldub	[r_SKB_DATA + r_OFF], r_A
+
+	.globl	bpf_jit_load_byte_msh
+bpf_jit_load_byte_msh:
+	cmp	r_OFF, 0
+	bl	bpf_slow_path_byte_msh_neg
+	 nop
+	.globl	bpf_jit_load_byte_msh_positive_offset
+bpf_jit_load_byte_msh_positive_offset:
+	cmp	r_OFF, r_HEADLEN
+	bge	bpf_slow_path_byte_msh
+	 nop
+	ldub	[r_SKB_DATA + r_OFF], r_OFF
+	and	r_OFF, 0xf, r_OFF
+	retl
+	 sll	r_OFF, 2, r_X
+
+#define bpf_slow_path_common(LEN)	\
+	save	%sp, -SAVE_SZ, %sp;	\
+	mov	%i0, %o0;		\
+	mov	r_OFF, %o1;		\
+	add	%fp, SCRATCH_OFF, %o2;	\
+	call	skb_copy_bits;		\
+	 mov	(LEN), %o3;		\
+	cmp	%o0, 0;			\
+	restore;
+
+bpf_slow_path_word:
+	bpf_slow_path_common(4)
+	bl	bpf_error
+	 ld	[%sp + SCRATCH_OFF], r_A
+	retl
+	 nop
+bpf_slow_path_half:
+	bpf_slow_path_common(2)
+	bl	bpf_error
+	 lduh	[%sp + SCRATCH_OFF], r_A
+	retl
+	 nop
+bpf_slow_path_byte:
+	bpf_slow_path_common(1)
+	bl	bpf_error
+	 ldub	[%sp + SCRATCH_OFF], r_A
+	retl
+	 nop
+bpf_slow_path_byte_msh:
+	bpf_slow_path_common(1)
+	bl	bpf_error
+	 ldub	[%sp + SCRATCH_OFF], r_A
+	and	r_OFF, 0xf, r_OFF
+	retl
+	 sll	r_OFF, 2, r_X
+
+#define bpf_negative_common(LEN)			\
+	save	%sp, -SAVE_SZ, %sp;			\
+	mov	%i0, %o0;				\
+	mov	r_OFF, %o1;				\
+	call	bpf_internal_load_pointer_neg_helper;	\
+	 mov	(LEN), %o2;				\
+	mov	%o0, r_TMP;				\
+	cmp	%o0, 0;					\
+	BE_PTR(bpf_error);				\
+	 restore;
+
+bpf_slow_path_word_neg:
+	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
+	cmp	r_OFF, r_TMP
+	bl	bpf_error
+	 nop
+	.globl	bpf_jit_load_word_negative_offset
+bpf_jit_load_word_negative_offset:
+	bpf_negative_common(4)
+	andcc	r_TMP, 3, %g0
+	bne	load_word_unaligned
+	 nop
+	retl
+	 ld	[r_TMP], r_A
+
+bpf_slow_path_half_neg:
+	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
+	cmp	r_OFF, r_TMP
+	bl	bpf_error
+	 nop
+	.globl	bpf_jit_load_half_negative_offset
+bpf_jit_load_half_negative_offset:
+	bpf_negative_common(2)
+	andcc	r_TMP, 1, %g0
+	bne	load_half_unaligned
+	 nop
+	retl
+	 lduh	[r_TMP], r_A
+
+bpf_slow_path_byte_neg:
+	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
+	cmp	r_OFF, r_TMP
+	bl	bpf_error
+	 nop
+	.globl	bpf_jit_load_byte_negative_offset
+bpf_jit_load_byte_negative_offset:
+	bpf_negative_common(1)
+	retl
+	 ldub	[r_TMP], r_A
+
+bpf_slow_path_byte_msh_neg:
+	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
+	cmp	r_OFF, r_TMP
+	bl	bpf_error
+	 nop
+	.globl	bpf_jit_load_byte_msh_negative_offset
+bpf_jit_load_byte_msh_negative_offset:
+	bpf_negative_common(1)
+	ldub	[r_TMP], r_OFF
+	and	r_OFF, 0xf, r_OFF
+	retl
+	 sll	r_OFF, 2, r_X
+
+bpf_error:
+	/* Make the JIT program return zero.  The JIT epilogue
+	 * stores away the original %o7 into r_saved_O7.  The
+	 * normal leaf function return is to use "retl" which
+	 * would evalute to "jmpl %o7 + 8, %g0" but we want to
+	 * use the saved value thus the sequence you see here.
+	 */
+	jmpl	r_saved_O7 + 8, %g0
+	 clr	%o0
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
new file mode 100644
index 0000000..1a69244
--- /dev/null
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -0,0 +1,802 @@
+#include <linux/moduleloader.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/cache.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ptrace.h>
+
+#include "bpf_jit.h"
+
+int bpf_jit_enable __read_mostly;
+
+static inline bool is_simm13(unsigned int value)
+{
+	return value + 0x1000 < 0x2000;
+}
+
+static void bpf_flush_icache(void *start_, void *end_)
+{
+#ifdef CONFIG_SPARC64
+	/* Cheetah's I-cache is fully coherent.  */
+	if (tlb_type == spitfire) {
+		unsigned long start = (unsigned long) start_;
+		unsigned long end = (unsigned long) end_;
+
+		start &= ~7UL;
+		end = (end + 7UL) & ~7UL;
+		while (start < end) {
+			flushi(start);
+			start += 32;
+		}
+	}
+#endif
+}
+
+#define SEEN_DATAREF 1 /* might call external helpers */
+#define SEEN_XREG    2 /* ebx is used */
+#define SEEN_MEM     4 /* use mem[] for temporary storage */
+
+#define S13(X)		((X) & 0x1fff)
+#define IMMED		0x00002000
+#define RD(X)		((X) << 25)
+#define RS1(X)		((X) << 14)
+#define RS2(X)		((X))
+#define OP(X)		((X) << 30)
+#define OP2(X)		((X) << 22)
+#define OP3(X)		((X) << 19)
+#define COND(X)		((X) << 25)
+#define F1(X)		OP(X)
+#define F2(X, Y)	(OP(X) | OP2(Y))
+#define F3(X, Y)	(OP(X) | OP3(Y))
+
+#define CONDN		COND(0x0)
+#define CONDE		COND(0x1)
+#define CONDLE		COND(0x2)
+#define CONDL		COND(0x3)
+#define CONDLEU		COND(0x4)
+#define CONDCS		COND(0x5)
+#define CONDNEG		COND(0x6)
+#define CONDVC		COND(0x7)
+#define CONDA		COND(0x8)
+#define CONDNE		COND(0x9)
+#define CONDG		COND(0xa)
+#define CONDGE		COND(0xb)
+#define CONDGU		COND(0xc)
+#define CONDCC		COND(0xd)
+#define CONDPOS		COND(0xe)
+#define CONDVS		COND(0xf)
+
+#define CONDGEU		CONDCC
+#define CONDLU		CONDCS
+
+#define WDISP22(X)	(((X) >> 2) & 0x3fffff)
+
+#define BA		(F2(0, 2) | CONDA)
+#define BGU		(F2(0, 2) | CONDGU)
+#define BLEU		(F2(0, 2) | CONDLEU)
+#define BGEU		(F2(0, 2) | CONDGEU)
+#define BLU		(F2(0, 2) | CONDLU)
+#define BE		(F2(0, 2) | CONDE)
+#define BNE		(F2(0, 2) | CONDNE)
+
+#ifdef CONFIG_SPARC64
+#define BNE_PTR		(F2(0, 1) | CONDNE | (2 << 20))
+#else
+#define BNE_PTR		BNE
+#endif
+
+#define SETHI(K, REG)	\
+	(F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
+#define OR_LO(K, REG)	\
+	(F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
+
+#define ADD		F3(2, 0x00)
+#define AND		F3(2, 0x01)
+#define ANDCC		F3(2, 0x11)
+#define OR		F3(2, 0x02)
+#define SUB		F3(2, 0x04)
+#define SUBCC		F3(2, 0x14)
+#define MUL		F3(2, 0x0a)	/* umul */
+#define DIV		F3(2, 0x0e)	/* udiv */
+#define SLL		F3(2, 0x25)
+#define SRL		F3(2, 0x26)
+#define JMPL		F3(2, 0x38)
+#define CALL		F1(1)
+#define BR		F2(0, 0x01)
+#define RD_Y		F3(2, 0x28)
+#define WR_Y		F3(2, 0x30)
+
+#define LD32		F3(3, 0x00)
+#define LD8		F3(3, 0x01)
+#define LD16		F3(3, 0x02)
+#define LD64		F3(3, 0x0b)
+#define ST32		F3(3, 0x04)
+
+#ifdef CONFIG_SPARC64
+#define LDPTR		LD64
+#define BASE_STACKFRAME	176
+#else
+#define LDPTR		LD32
+#define BASE_STACKFRAME	96
+#endif
+
+#define LD32I		(LD32 | IMMED)
+#define LD8I		(LD8 | IMMED)
+#define LD16I		(LD16 | IMMED)
+#define LD64I		(LD64 | IMMED)
+#define LDPTRI		(LDPTR | IMMED)
+#define ST32I		(ST32 | IMMED)
+
+#define emit_nop()		\
+do {				\
+	*prog++ = SETHI(0, G0);	\
+} while (0)
+
+#define emit_neg()					\
+do {	/* sub %g0, r_A, r_A */				\
+	*prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A);	\
+} while (0)
+
+#define emit_reg_move(FROM, TO)				\
+do {	/* or %g0, FROM, TO */				\
+	*prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO);	\
+} while (0)
+
+#define emit_clear(REG)					\
+do {	/* or %g0, %g0, REG */				\
+	*prog++ = OR | RS1(G0) | RS2(G0) | RD(REG);	\
+} while (0)
+
+#define emit_set_const(K, REG)					\
+do {	/* sethi %hi(K), REG */					\
+	*prog++ = SETHI(K, REG);				\
+	/* or REG, %lo(K), REG */				\
+	*prog++ = OR_LO(K, REG);				\
+} while (0)
+
+	/* Emit
+	 *
+	 *	OP	r_A, r_X, r_A
+	 */
+#define emit_alu_X(OPCODE)					\
+do {								\
+	seen |= SEEN_XREG;					\
+	*prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A);	\
+} while (0)
+
+	/* Emit either:
+	 *
+	 *	OP	r_A, K, r_A
+	 *
+	 * or
+	 *
+	 *	sethi	%hi(K), r_TMP
+	 *	or	r_TMP, %lo(K), r_TMP
+	 *	OP	r_A, r_TMP, r_A
+	 *
+	 * depending upon whether K fits in a signed 13-bit
+	 * immediate instruction field.  Emit nothing if K
+	 * is zero.
+	 */
+#define emit_alu_K(OPCODE, K)					\
+do {								\
+	if (K) {						\
+		unsigned int _insn = OPCODE;			\
+		_insn |= RS1(r_A) | RD(r_A);			\
+		if (is_simm13(K)) {				\
+			*prog++ = _insn | IMMED | S13(K);	\
+		} else {					\
+			emit_set_const(K, r_TMP);		\
+			*prog++ = _insn | RS2(r_TMP);		\
+		}						\
+	}							\
+} while (0)
+
+#define emit_loadimm(K, DEST)						\
+do {									\
+	if (is_simm13(K)) {						\
+		/* or %g0, K, DEST */					\
+		*prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST);	\
+	} else {							\
+		emit_set_const(K, DEST);				\
+	}								\
+} while (0)
+
+#define emit_loadptr(BASE, STRUCT, FIELD, DEST)				\
+do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
+	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *));	\
+	*prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST);		\
+} while (0)
+
+#define emit_load32(BASE, STRUCT, FIELD, DEST)				\
+do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
+	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32));	\
+	*prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST);		\
+} while (0)
+
+#define emit_load16(BASE, STRUCT, FIELD, DEST)				\
+do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
+	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16));	\
+	*prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST);		\
+} while (0)
+
+#define __emit_load8(BASE, STRUCT, FIELD, DEST)				\
+do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
+	*prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST);		\
+} while (0)
+
+#define emit_load8(BASE, STRUCT, FIELD, DEST)				\
+do {	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8));	\
+	__emit_load8(BASE, STRUCT, FIELD, DEST);			\
+} while (0)
+
+#define emit_ldmem(OFF, DEST)					\
+do {	*prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST);	\
+} while (0)
+
+#define emit_stmem(OFF, SRC)					\
+do {	*prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC);	\
+} while (0)
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_SPARC64
+#define emit_load_cpu(REG)						\
+	emit_load16(G6, struct thread_info, cpu, REG)
+#else
+#define emit_load_cpu(REG)						\
+	emit_load32(G6, struct thread_info, cpu, REG)
+#endif
+#else
+#define emit_load_cpu(REG)	emit_clear(REG)
+#endif
+
+#define emit_skb_loadptr(FIELD, DEST) \
+	emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
+#define emit_skb_load32(FIELD, DEST) \
+	emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
+#define emit_skb_load16(FIELD, DEST) \
+	emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
+#define __emit_skb_load8(FIELD, DEST) \
+	__emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
+#define emit_skb_load8(FIELD, DEST) \
+	emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
+
+#define emit_jmpl(BASE, IMM_OFF, LREG) \
+	*prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
+
+#define emit_call(FUNC)					\
+do {	void *_here = image + addrs[i] - 8;		\
+	unsigned int _off = (void *)(FUNC) - _here;	\
+	*prog++ = CALL | (((_off) >> 2) & 0x3fffffff);	\
+	emit_nop();					\
+} while (0)
+
+#define emit_branch(BR_OPC, DEST)			\
+do {	unsigned int _here = addrs[i] - 8;		\
+	*prog++ = BR_OPC | WDISP22((DEST) - _here);	\
+} while (0)
+
+#define emit_branch_off(BR_OPC, OFF)			\
+do {	*prog++ = BR_OPC | WDISP22(OFF);		\
+} while (0)
+
+#define emit_jump(DEST)		emit_branch(BA, DEST)
+
+#define emit_read_y(REG)	*prog++ = RD_Y | RD(REG)
+#define emit_write_y(REG)	*prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
+
+#define emit_cmp(R1, R2) \
+	*prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
+
+#define emit_cmpi(R1, IMM) \
+	*prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
+
+#define emit_btst(R1, R2) \
+	*prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
+
+#define emit_btsti(R1, IMM) \
+	*prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
+
+#define emit_sub(R1, R2, R3) \
+	*prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
+
+#define emit_subi(R1, IMM, R3) \
+	*prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
+
+#define emit_add(R1, R2, R3) \
+	*prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
+
+#define emit_addi(R1, IMM, R3) \
+	*prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
+
+#define emit_alloc_stack(SZ) \
+	*prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
+
+#define emit_release_stack(SZ) \
+	*prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
+
+/* A note about branch offset calculations.  The addrs[] array,
+ * indexed by BPF instruction, records the address after all the
+ * sparc instructions emitted for that BPF instruction.
+ *
+ * The most common case is to emit a branch at the end of such
+ * a code sequence.  So this would be two instructions, the
+ * branch and it's delay slot.
+ *
+ * Therefore by default the branch emitters calculate the branch
+ * offset field as:
+ *
+ *	destination - (addrs[i] - 8)
+ *
+ * This "addrs[i] - 8" is the address of the branch itself or
+ * what "." would be in assembler notation.  The "8" part is
+ * how we take into consideration the branch and it's delay
+ * slot mentioned above.
+ *
+ * Sometimes we need to emit a branch earlier in the code
+ * sequence.  And in these situations we adjust "destination"
+ * to accomodate this difference.  For example, if we needed
+ * to emit a branch (and it's delay slot) right before the
+ * final instruction emitted for a BPF opcode, we'd use
+ * "destination + 4" instead of just plain "destination" above.
+ *
+ * This is why you see all of these funny emit_branch() and
+ * emit_jump() calls with adjusted offsets.
+ */
+
+void bpf_jit_compile(struct sk_filter *fp)
+{
+	unsigned int cleanup_addr, proglen, oldproglen = 0;
+	u32 temp[8], *prog, *func, seen = 0, pass;
+	const struct sock_filter *filter = fp->insns;
+	int i, flen = fp->len, pc_ret0 = -1;
+	unsigned int *addrs;
+	void *image;
+
+	if (!bpf_jit_enable)
+		return;
+
+	addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
+	if (addrs == NULL)
+		return;
+
+	/* Before first pass, make a rough estimation of addrs[]
+	 * each bpf instruction is translated to less than 64 bytes
+	 */
+	for (proglen = 0, i = 0; i < flen; i++) {
+		proglen += 64;
+		addrs[i] = proglen;
+	}
+	cleanup_addr = proglen; /* epilogue address */
+	image = NULL;
+	for (pass = 0; pass < 10; pass++) {
+		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
+
+		/* no prologue/epilogue for trivial filters (RET something) */
+		proglen = 0;
+		prog = temp;
+
+		/* Prologue */
+		if (seen_or_pass0) {
+			if (seen_or_pass0 & SEEN_MEM) {
+				unsigned int sz = BASE_STACKFRAME;
+				sz += BPF_MEMWORDS * sizeof(u32);
+				emit_alloc_stack(sz);
+			}
+
+			/* Make sure we dont leek kernel memory. */
+			if (seen_or_pass0 & SEEN_XREG)
+				emit_clear(r_X);
+
+			/* If this filter needs to access skb data,
+			 * load %o4 and %o5 with:
+			 *  %o4 = skb->len - skb->data_len
+			 *  %o5 = skb->data
+			 * And also back up %o7 into r_saved_O7 so we can
+			 * invoke the stubs using 'call'.
+			 */
+			if (seen_or_pass0 & SEEN_DATAREF) {
+				emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
+				emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
+				emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
+				emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
+			}
+		}
+		emit_reg_move(O7, r_saved_O7);
+
+		switch (filter[0].code) {
+		case BPF_S_RET_K:
+		case BPF_S_LD_W_LEN:
+		case BPF_S_ANC_PROTOCOL:
+		case BPF_S_ANC_PKTTYPE:
+		case BPF_S_ANC_IFINDEX:
+		case BPF_S_ANC_MARK:
+		case BPF_S_ANC_RXHASH:
+		case BPF_S_ANC_CPU:
+		case BPF_S_ANC_QUEUE:
+		case BPF_S_LD_W_ABS:
+		case BPF_S_LD_H_ABS:
+		case BPF_S_LD_B_ABS:
+			/* The first instruction sets the A register (or is
+			 * a "RET 'constant'")
+			 */
+			break;
+		default:
+			/* Make sure we dont leak kernel information to the
+			 * user.
+			 */
+			emit_clear(r_A); /* A = 0 */
+		}
+
+		for (i = 0; i < flen; i++) {
+			unsigned int K = filter[i].k;
+			unsigned int t_offset;
+			unsigned int f_offset;
+			u32 t_op, f_op;
+			int ilen;
+
+			switch (filter[i].code) {
+			case BPF_S_ALU_ADD_X:	/* A += X; */
+				emit_alu_X(ADD);
+				break;
+			case BPF_S_ALU_ADD_K:	/* A += K; */
+				emit_alu_K(ADD, K);
+				break;
+			case BPF_S_ALU_SUB_X:	/* A -= X; */
+				emit_alu_X(SUB);
+				break;
+			case BPF_S_ALU_SUB_K:	/* A -= K */
+				emit_alu_K(SUB, K);
+				break;
+			case BPF_S_ALU_AND_X:	/* A &= X */
+				emit_alu_X(AND);
+				break;
+			case BPF_S_ALU_AND_K:	/* A &= K */
+				emit_alu_K(AND, K);
+				break;
+			case BPF_S_ALU_OR_X:	/* A |= X */
+				emit_alu_X(OR);
+				break;
+			case BPF_S_ALU_OR_K:	/* A |= K */
+				emit_alu_K(OR, K);
+				break;
+			case BPF_S_ALU_LSH_X:	/* A <<= X */
+				emit_alu_X(SLL);
+				break;
+			case BPF_S_ALU_LSH_K:	/* A <<= K */
+				emit_alu_K(SLL, K);
+				break;
+			case BPF_S_ALU_RSH_X:	/* A >>= X */
+				emit_alu_X(SRL);
+				break;
+			case BPF_S_ALU_RSH_K:	/* A >>= K */
+				emit_alu_K(SRL, K);
+				break;
+			case BPF_S_ALU_MUL_X:	/* A *= X; */
+				emit_alu_X(MUL);
+				break;
+			case BPF_S_ALU_MUL_K:	/* A *= K */
+				emit_alu_K(MUL, K);
+				break;
+			case BPF_S_ALU_DIV_K:	/* A /= K */
+				emit_alu_K(MUL, K);
+				emit_read_y(r_A);
+				break;
+			case BPF_S_ALU_DIV_X:	/* A /= X; */
+				emit_cmpi(r_X, 0);
+				if (pc_ret0 > 0) {
+					t_offset = addrs[pc_ret0 - 1];
+#ifdef CONFIG_SPARC32
+					emit_branch(BE, t_offset + 20);
+#else
+					emit_branch(BE, t_offset + 8);
+#endif
+					emit_nop(); /* delay slot */
+				} else {
+					emit_branch_off(BNE, 16);
+					emit_nop();
+#ifdef CONFIG_SPARC32
+					emit_jump(cleanup_addr + 20);
+#else
+					emit_jump(cleanup_addr + 8);
+#endif
+					emit_clear(r_A);
+				}
+				emit_write_y(G0);
+#ifdef CONFIG_SPARC32
+				/* The Sparc v8 architecture requires
+				 * three instructions between a %y
+				 * register write and the first use.
+				 */
+				emit_nop();
+				emit_nop();
+				emit_nop();
+#endif
+				emit_alu_X(DIV);
+				break;
+			case BPF_S_ALU_NEG:
+				emit_neg();
+				break;
+			case BPF_S_RET_K:
+				if (!K) {
+					if (pc_ret0 == -1)
+						pc_ret0 = i;
+					emit_clear(r_A);
+				} else {
+					emit_loadimm(K, r_A);
+				}
+				/* Fallthrough */
+			case BPF_S_RET_A:
+				if (seen_or_pass0) {
+					if (i != flen - 1) {
+						emit_jump(cleanup_addr);
+						emit_nop();
+						break;
+					}
+					if (seen_or_pass0 & SEEN_MEM) {
+						unsigned int sz = BASE_STACKFRAME;
+						sz += BPF_MEMWORDS * sizeof(u32);
+						emit_release_stack(sz);
+					}
+				}
+				/* jmpl %r_saved_O7 + 8, %g0 */
+				emit_jmpl(r_saved_O7, 8, G0);
+				emit_reg_move(r_A, O0); /* delay slot */
+				break;
+			case BPF_S_MISC_TAX:
+				seen |= SEEN_XREG;
+				emit_reg_move(r_A, r_X);
+				break;
+			case BPF_S_MISC_TXA:
+				seen |= SEEN_XREG;
+				emit_reg_move(r_X, r_A);
+				break;
+			case BPF_S_ANC_CPU:
+				emit_load_cpu(r_A);
+				break;
+			case BPF_S_ANC_PROTOCOL:
+				emit_skb_load16(protocol, r_A);
+				break;
+#if 0
+				/* GCC won't let us take the address of
+				 * a bit field even though we very much
+				 * know what we are doing here.
+				 */
+			case BPF_S_ANC_PKTTYPE:
+				__emit_skb_load8(pkt_type, r_A);
+				emit_alu_K(SRL, 5);
+				break;
+#endif
+			case BPF_S_ANC_IFINDEX:
+				emit_skb_loadptr(dev, r_A);
+				emit_cmpi(r_A, 0);
+				emit_branch(BNE_PTR, cleanup_addr + 4);
+				emit_nop();
+				emit_load32(r_A, struct net_device, ifindex, r_A);
+				break;
+			case BPF_S_ANC_MARK:
+				emit_skb_load32(mark, r_A);
+				break;
+			case BPF_S_ANC_QUEUE:
+				emit_skb_load16(queue_mapping, r_A);
+				break;
+			case BPF_S_ANC_HATYPE:
+				emit_skb_loadptr(dev, r_A);
+				emit_cmpi(r_A, 0);
+				emit_branch(BNE_PTR, cleanup_addr + 4);
+				emit_nop();
+				emit_load16(r_A, struct net_device, type, r_A);
+				break;
+			case BPF_S_ANC_RXHASH:
+				emit_skb_load32(rxhash, r_A);
+				break;
+
+			case BPF_S_LD_IMM:
+				emit_loadimm(K, r_A);
+				break;
+			case BPF_S_LDX_IMM:
+				emit_loadimm(K, r_X);
+				break;
+			case BPF_S_LD_MEM:
+				emit_ldmem(K * 4, r_A);
+				break;
+			case BPF_S_LDX_MEM:
+				emit_ldmem(K * 4, r_X);
+				break;
+			case BPF_S_ST:
+				emit_stmem(K * 4, r_A);
+				break;
+			case BPF_S_STX:
+				emit_stmem(K * 4, r_X);
+				break;
+
+#define CHOOSE_LOAD_FUNC(K, func) \
+	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+
+			case BPF_S_LD_W_ABS:
+				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
+common_load:			seen |= SEEN_DATAREF;
+				emit_loadimm(K, r_OFF);
+				emit_call(func);
+				break;
+			case BPF_S_LD_H_ABS:
+				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
+				goto common_load;
+			case BPF_S_LD_B_ABS:
+				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
+				goto common_load;
+			case BPF_S_LDX_B_MSH:
+				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
+				goto common_load;
+			case BPF_S_LD_W_IND:
+				func = bpf_jit_load_word;
+common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
+				if (K) {
+					if (is_simm13(K)) {
+						emit_addi(r_X, K, r_OFF);
+					} else {
+						emit_loadimm(K, r_TMP);
+						emit_add(r_X, r_TMP, r_OFF);
+					}
+				} else {
+					emit_reg_move(r_X, r_OFF);
+				}
+				emit_call(func);
+				break;
+			case BPF_S_LD_H_IND:
+				func = bpf_jit_load_half;
+				goto common_load_ind;
+			case BPF_S_LD_B_IND:
+				func = bpf_jit_load_byte;
+				goto common_load_ind;
+			case BPF_S_JMP_JA:
+				emit_jump(addrs[i + K]);
+				emit_nop();
+				break;
+
+#define COND_SEL(CODE, TOP, FOP)	\
+	case CODE:			\
+		t_op = TOP;		\
+		f_op = FOP;		\
+		goto cond_branch
+
+			COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU);
+			COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU);
+			COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE);
+			COND_SEL(BPF_S_JMP_JSET_K, BNE, BE);
+			COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU);
+			COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU);
+			COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE);
+			COND_SEL(BPF_S_JMP_JSET_X, BNE, BE);
+
+cond_branch:			f_offset = addrs[i + filter[i].jf];
+				t_offset = addrs[i + filter[i].jt];
+
+				/* same targets, can avoid doing the test :) */
+				if (filter[i].jt == filter[i].jf) {
+					emit_jump(t_offset);
+					emit_nop();
+					break;
+				}
+
+				switch (filter[i].code) {
+				case BPF_S_JMP_JGT_X:
+				case BPF_S_JMP_JGE_X:
+				case BPF_S_JMP_JEQ_X:
+					seen |= SEEN_XREG;
+					emit_cmp(r_A, r_X);
+					break;
+				case BPF_S_JMP_JSET_X:
+					seen |= SEEN_XREG;
+					emit_btst(r_A, r_X);
+					break;
+				case BPF_S_JMP_JEQ_K:
+				case BPF_S_JMP_JGT_K:
+				case BPF_S_JMP_JGE_K:
+					if (is_simm13(K)) {
+						emit_cmpi(r_A, K);
+					} else {
+						emit_loadimm(K, r_TMP);
+						emit_cmp(r_A, r_TMP);
+					}
+					break;
+				case BPF_S_JMP_JSET_K:
+					if (is_simm13(K)) {
+						emit_btsti(r_A, K);
+					} else {
+						emit_loadimm(K, r_TMP);
+						emit_btst(r_A, r_TMP);
+					}
+					break;
+				}
+				if (filter[i].jt != 0) {
+					if (filter[i].jf)
+						t_offset += 8;
+					emit_branch(t_op, t_offset);
+					emit_nop(); /* delay slot */
+					if (filter[i].jf) {
+						emit_jump(f_offset);
+						emit_nop();
+					}
+					break;
+				}
+				emit_branch(f_op, f_offset);
+				emit_nop(); /* delay slot */
+				break;
+
+			default:
+				/* hmm, too complex filter, give up with jit compiler */
+				goto out;
+			}
+			ilen = (void *) prog - (void *) temp;
+			if (image) {
+				if (unlikely(proglen + ilen > oldproglen)) {
+					pr_err("bpb_jit_compile fatal error\n");
+					kfree(addrs);
+					module_free(NULL, image);
+					return;
+				}
+				memcpy(image + proglen, temp, ilen);
+			}
+			proglen += ilen;
+			addrs[i] = proglen;
+			prog = temp;
+		}
+		/* last bpf instruction is always a RET :
+		 * use it to give the cleanup instruction(s) addr
+		 */
+		cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */
+		if (seen_or_pass0 & SEEN_MEM)
+			cleanup_addr -= 4; /* add %sp, X, %sp; */
+
+		if (image) {
+			if (proglen != oldproglen)
+				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
+				       proglen, oldproglen);
+			break;
+		}
+		if (proglen == oldproglen) {
+			image = module_alloc(max_t(unsigned int,
+						   proglen,
+						   sizeof(struct work_struct)));
+			if (!image)
+				goto out;
+		}
+		oldproglen = proglen;
+	}
+
+	if (bpf_jit_enable > 1)
+		pr_err("flen=%d proglen=%u pass=%d image=%p\n",
+		       flen, proglen, pass, image);
+
+	if (image) {
+		if (bpf_jit_enable > 1)
+			print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
+				       16, 1, image, proglen, false);
+		bpf_flush_icache(image, image + proglen);
+		fp->bpf_func = (void *)image;
+	}
+out:
+	kfree(addrs);
+	return;
+}
+
+static void jit_free_defer(struct work_struct *arg)
+{
+	module_free(NULL, arg);
+}
+
+/* run from softirq, we must use a work_struct to call
+ * module_free() from process context
+ */
+void bpf_jit_free(struct sk_filter *fp)
+{
+	if (fp->bpf_func != sk_run_filter) {
+		struct work_struct *work = (struct work_struct *)fp->bpf_func;
+
+		INIT_WORK(work, jit_free_defer);
+		schedule_work(work);
+	}
+}
diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile
index 8287bbe..020300b 100644
--- a/arch/sparc/prom/Makefile
+++ b/arch/sparc/prom/Makefile
@@ -10,7 +10,6 @@
 lib-y                 += misc_$(BITS).o
 lib-$(CONFIG_SPARC32) += mp.o
 lib-$(CONFIG_SPARC32) += ranges.o
-lib-$(CONFIG_SPARC32) += segment.o
 lib-y                 += console_$(BITS).o
 lib-y                 += printf.o
 lib-y                 += tree_$(BITS).o
diff --git a/arch/sparc/prom/segment.c b/arch/sparc/prom/segment.c
deleted file mode 100644
index 86a663f..0000000
--- a/arch/sparc/prom/segment.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * segment.c:  Prom routine to map segments in other contexts before
- *             a standalone is completely mapped.  This is for sun4 and
- *             sun4c architectures only.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-
-extern void restore_current(void);
-
-/* Set physical segment 'segment' at virtual address 'vaddr' in
- * context 'ctx'.
- */
-void
-prom_putsegment(int ctx, unsigned long vaddr, int segment)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&prom_lock, flags);
-	(*(romvec->pv_setctxt))(ctx, (char *) vaddr, segment);
-	restore_current();
-	spin_unlock_irqrestore(&prom_lock, flags);
-}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 96033e2..74239dd 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -11,6 +11,7 @@
 	select GENERIC_IRQ_PROBE
 	select GENERIC_PENDING_IRQ if SMP
 	select GENERIC_IRQ_SHOW
+	select HAVE_SYSCALL_WRAPPERS if TILEGX
 	select SYS_HYPERVISOR
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index 5d5a635..32e6cbe 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -47,8 +47,8 @@
  */
 #define PCI_DMA_BUS_IS_PHYS     1
 
-int __devinit tile_pci_init(void);
-int __devinit pcibios_init(void);
+int __init tile_pci_init(void);
+int __init pcibios_init(void);
 
 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
 
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index bc4f562..7594764 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -100,9 +100,14 @@
 
 #else /* __ASSEMBLY__ */
 
-/* how to get the thread information struct from ASM */
+/*
+ * How to get the thread information struct from assembly.
+ * Note that we use different macros since different architectures
+ * have different semantics in their "mm" instruction and we would
+ * like to guarantee that the macro expands to exactly one instruction.
+ */
 #ifdef __tilegx__
-#define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63
+#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
 #else
 #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
 #endif
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 77763cc..cdef6e5 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -403,19 +403,17 @@
 	 * Set up registers for signal handler.
 	 * Registers that we don't modify keep the value they had from
 	 * user-space at the time we took the signal.
+	 * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+	 * since some things rely on this (e.g. glibc's debug/segfault.c).
 	 */
 	regs->pc = ptr_to_compat_reg(ka->sa.sa_handler);
 	regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
 	regs->sp = ptr_to_compat_reg(frame);
 	regs->lr = restorer;
 	regs->regs[0] = (unsigned long) usig;
-
-	if (ka->sa.sa_flags & SA_SIGINFO) {
-		/* Need extra arguments, so mark to restore caller-saves. */
-		regs->regs[1] = ptr_to_compat_reg(&frame->info);
-		regs->regs[2] = ptr_to_compat_reg(&frame->uc);
-		regs->flags |= PT_FLAGS_CALLER_SAVES;
-	}
+	regs->regs[1] = ptr_to_compat_reg(&frame->info);
+	regs->regs[2] = ptr_to_compat_reg(&frame->uc);
+	regs->flags |= PT_FLAGS_CALLER_SAVES;
 
 	/*
 	 * Notify any tracer that was single-stepping it.
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 5d56a1e..6943515 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -839,6 +839,18 @@
 	FEEDBACK_REENTER(interrupt_return)
 
 	/*
+	 * Use r33 to hold whether we have already loaded the callee-saves
+	 * into ptregs.  We don't want to do it twice in this loop, since
+	 * then we'd clobber whatever changes are made by ptrace, etc.
+	 * Get base of stack in r32.
+	 */
+	{
+	 GET_THREAD_INFO(r32)
+	 movei  r33, 0
+	}
+
+.Lretry_work_pending:
+	/*
 	 * Disable interrupts so as to make sure we don't
 	 * miss an interrupt that sets any of the thread flags (like
 	 * need_resched or sigpending) between sampling and the iret.
@@ -848,9 +860,6 @@
 	IRQ_DISABLE(r20, r21)
 	TRACE_IRQS_OFF  /* Note: clobbers registers r0-r29 */
 
-	/* Get base of stack in r32; note r30/31 are used as arguments here. */
-	GET_THREAD_INFO(r32)
-
 
 	/* Check to see if there is any work to do before returning to user. */
 	{
@@ -866,16 +875,18 @@
 
 	/*
 	 * Make sure we have all the registers saved for signal
-	 * handling or single-step.  Call out to C code to figure out
-	 * exactly what we need to do for each flag bit, then if
-	 * necessary, reload the flags and recheck.
+	 * handling, notify-resume, or single-step.  Call out to C
+	 * code to figure out exactly what we need to do for each flag bit,
+	 * then if necessary, reload the flags and recheck.
 	 */
-	push_extra_callee_saves r0
 	{
 	 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
-	 jal    do_work_pending
+	 bnz    r33, 1f
 	}
-	bnz     r0, .Lresume_userspace
+	push_extra_callee_saves r0
+	movei   r33, 1
+1:	jal     do_work_pending
+	bnz     r0, .Lretry_work_pending
 
 	/*
 	 * In the NMI case we
@@ -1180,10 +1191,12 @@
 	add     r20, r20, tp
 	lw      r21, r20
 	addi    r21, r21, 1
-	sw      r20, r21
+	{
+	 sw     r20, r21
+	 GET_THREAD_INFO(r31)
+	}
 
 	/* Trace syscalls, if requested. */
-	GET_THREAD_INFO(r31)
 	addi	r31, r31, THREAD_INFO_FLAGS_OFFSET
 	lw	r30, r31
 	andi    r30, r30, _TIF_SYSCALL_TRACE
@@ -1362,7 +1375,10 @@
 3:
 	/* set PC and continue */
 	lw      r26, r24
-	sw      r28, r26
+	{
+	 sw     r28, r26
+	 GET_THREAD_INFO(r0)
+	}
 
 	/*
 	 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
@@ -1370,7 +1386,6 @@
 	 * need to clear it here and can't really impose on all other arches.
 	 * So what's another write between friends?
 	 */
-	GET_THREAD_INFO(r0)
 
 	addi    r1, r0, THREAD_INFO_FLAGS_OFFSET
 	{
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 49d9d66..30ae76e 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -647,6 +647,20 @@
 	FEEDBACK_REENTER(interrupt_return)
 
 	/*
+	 * Use r33 to hold whether we have already loaded the callee-saves
+	 * into ptregs.  We don't want to do it twice in this loop, since
+	 * then we'd clobber whatever changes are made by ptrace, etc.
+	 */
+	{
+	 movei  r33, 0
+	 move   r32, sp
+	}
+
+	/* Get base of stack in r32. */
+	EXTRACT_THREAD_INFO(r32)
+
+.Lretry_work_pending:
+	/*
 	 * Disable interrupts so as to make sure we don't
 	 * miss an interrupt that sets any of the thread flags (like
 	 * need_resched or sigpending) between sampling and the iret.
@@ -656,9 +670,6 @@
 	IRQ_DISABLE(r20, r21)
 	TRACE_IRQS_OFF  /* Note: clobbers registers r0-r29 */
 
-	/* Get base of stack in r32; note r30/31 are used as arguments here. */
-	GET_THREAD_INFO(r32)
-
 
 	/* Check to see if there is any work to do before returning to user. */
 	{
@@ -674,16 +685,18 @@
 
 	/*
 	 * Make sure we have all the registers saved for signal
-	 * handling or single-step.  Call out to C code to figure out
+	 * handling or notify-resume.  Call out to C code to figure out
 	 * exactly what we need to do for each flag bit, then if
 	 * necessary, reload the flags and recheck.
 	 */
-	push_extra_callee_saves r0
 	{
 	 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
-	 jal    do_work_pending
+	 bnez   r33, 1f
 	}
-	bnez    r0, .Lresume_userspace
+	push_extra_callee_saves r0
+	movei   r33, 1
+1:	jal     do_work_pending
+	bnez    r0, .Lretry_work_pending
 
 	/*
 	 * In the NMI case we
@@ -968,11 +981,16 @@
 	shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
 	add     r20, r20, tp
 	ld4s    r21, r20
-	addi    r21, r21, 1
-	st4     r20, r21
+	{
+	 addi   r21, r21, 1
+	 move   r31, sp
+	}
+	{
+	 st4    r20, r21
+	 EXTRACT_THREAD_INFO(r31)
+	}
 
 	/* Trace syscalls, if requested. */
-	GET_THREAD_INFO(r31)
 	addi	r31, r31, THREAD_INFO_FLAGS_OFFSET
 	ld	r30, r31
 	andi    r30, r30, _TIF_SYSCALL_TRACE
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index a1bb59e..b56d12b 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -141,7 +141,7 @@
  *
  * Returns the number of controllers discovered.
  */
-int __devinit tile_pci_init(void)
+int __init tile_pci_init(void)
 {
 	int i;
 
@@ -287,7 +287,7 @@
  * The controllers have been set up by the time we get here, by a call to
  * tile_pci_init.
  */
-int __devinit pcibios_init(void)
+int __init pcibios_init(void)
 {
 	int i;
 
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 2d5ef61..54e6c64 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -567,6 +567,10 @@
  */
 int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
 {
+	/* If we enter in kernel mode, do nothing and exit the caller loop. */
+	if (!user_mode(regs))
+		return 0;
+
 	if (thread_info_flags & _TIF_NEED_RESCHED) {
 		schedule();
 		return 1;
@@ -589,8 +593,7 @@
 		return 1;
 	}
 	if (thread_info_flags & _TIF_SINGLESTEP) {
-		if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0)
-			single_step_once(regs);
+		single_step_once(regs);
 		return 0;
 	}
 	panic("work_pending: bad flags %#x\n", thread_info_flags);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1d14cc6..25f87bc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -77,11 +77,11 @@
 	select GENERIC_CLOCKEVENTS_MIN_ADJUST
 	select IRQ_FORCED_THREADING
 	select USE_GENERIC_SMP_HELPERS if SMP
-	select HAVE_BPF_JIT if (X86_64 && NET)
+	select HAVE_BPF_JIT if X86_64
 	select CLKEVT_I8253
 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
 	select GENERIC_IOMAP
-	select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC
+	select DCACHE_WORD_ACCESS
 
 config INSTRUCTION_DECODER
 	def_bool (KPROBES || PERF_EVENTS)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 41a7237..94e91e4 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -134,6 +134,9 @@
 KBUILD_CFLAGS += $(mflags-y)
 KBUILD_AFLAGS += $(mflags-y)
 
+archscripts:
+	$(Q)$(MAKE) $(build)=arch/x86/tools relocs
+
 ###
 # Syscall table generation
 
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index fd55a2f..e398bb5 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -40,13 +40,12 @@
 $(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
 
+targets += vmlinux.bin.all vmlinux.relocs
 
-targets += vmlinux.bin.all vmlinux.relocs relocs
-hostprogs-$(CONFIG_X86_NEED_RELOCS) += relocs
-
+CMD_RELOCS = arch/x86/tools/relocs
 quiet_cmd_relocs = RELOCS  $@
-      cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
-$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
+      cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
+$(obj)/vmlinux.relocs: vmlinux FORCE
 	$(call if_changed,relocs)
 
 vmlinux.bin.all-y := $(obj)/vmlinux.bin
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index a055993..c85e3ac 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -33,6 +33,9 @@
 	__HEAD
 ENTRY(startup_32)
 #ifdef CONFIG_EFI_STUB
+	jmp	preferred_addr
+
+	.balign	0x10
 	/*
 	 * We don't need the return address, so set up the stack so
 	 * efi_main() can find its arugments.
@@ -41,12 +44,17 @@
 
 	call	efi_main
 	cmpl	$0, %eax
-	je	preferred_addr
 	movl	%eax, %esi
-	call	1f
+	jne	2f
 1:
+	/* EFI init failed, so hang. */
+	hlt
+	jmp	1b
+2:
+	call	3f
+3:
 	popl	%eax
-	subl	$1b, %eax
+	subl	$3b, %eax
 	subl	BP_pref_address(%esi), %eax
 	add	BP_code32_start(%esi), %eax
 	leal	preferred_addr(%eax), %eax
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 558d76c..87e03a1 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -200,18 +200,28 @@
 	 * entire text+data+bss and hopefully all of memory.
 	 */
 #ifdef CONFIG_EFI_STUB
-	pushq	%rsi
+	/*
+	 * The entry point for the PE/COFF executable is 0x210, so only
+	 * legacy boot loaders will execute this jmp.
+	 */
+	jmp	preferred_addr
+
+	.org 0x210
 	mov	%rcx, %rdi
 	mov	%rdx, %rsi
 	call	efi_main
-	popq	%rsi
-	cmpq	$0,%rax
-	je	preferred_addr
 	movq	%rax,%rsi
-	call	1f
+	cmpq	$0,%rax
+	jne	2f
 1:
+	/* EFI init failed, so hang. */
+	hlt
+	jmp	1b
+2:
+	call	3f
+3:
 	popq	%rax
-	subq	$1b, %rax
+	subq	$3b, %rax
 	subq	BP_pref_address(%rsi), %rax
 	add	BP_code32_start(%esi), %eax
 	leaq	preferred_addr(%rax), %rax
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index ed54976..24443a3 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -205,8 +205,13 @@
 	put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
 
 #ifdef CONFIG_X86_32
-	/* Address of entry point */
-	put_unaligned_le32(i, &buf[pe_header + 0x28]);
+	/*
+	 * Address of entry point.
+	 *
+	 * The EFI stub entry point is +16 bytes from the start of
+	 * the .text section.
+	 */
+	put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
 
 	/* .text size */
 	put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
@@ -217,9 +222,11 @@
 	/*
 	 * Address of entry point. startup_32 is at the beginning and
 	 * the 64-bit entry point (startup_64) is always 512 bytes
-	 * after.
+	 * after. The EFI stub entry point is 16 bytes after that, as
+	 * the first instruction allows legacy loaders to jump over
+	 * the EFI stub initialisation
 	 */
-	put_unaligned_le32(i + 512, &buf[pe_header + 0x28]);
+	put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
 
 	/* .text size */
 	put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 4824fb4..07b3a68 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -294,8 +294,7 @@
 
 	/* OK, This is the point of no return */
 	set_personality(PER_LINUX);
-	set_thread_flag(TIF_IA32);
-	current->mm->context.ia32_compat = 1;
+	set_personality_ia32(false);
 
 	setup_new_exec(bprm);
 
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 47d9993..5fb9bbb 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -1,45 +1,101 @@
-#ifndef _ASM_X86_IRQ_REMAPPING_H
-#define _ASM_X86_IRQ_REMAPPING_H
+/*
+ * Copyright (C) 2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * This header file contains the interface of the interrupt remapping code to
+ * the x86 interrupt management code.
+ */
 
-#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
+#ifndef __X86_IRQ_REMAPPING_H
+#define __X86_IRQ_REMAPPING_H
+
+#include <asm/io_apic.h>
 
 #ifdef CONFIG_IRQ_REMAP
-static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
-static inline void prepare_irte(struct irte *irte, int vector,
-			        unsigned int dest)
-{
-	memset(irte, 0, sizeof(*irte));
 
-	irte->present = 1;
-	irte->dst_mode = apic->irq_dest_mode;
-	/*
-	 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
-	 * actual level or edge trigger will be setup in the IO-APIC
-	 * RTE. This will help simplify level triggered irq migration.
-	 * For more details, see the comments (in io_apic.c) explainig IO-APIC
-	 * irq migration in the presence of interrupt-remapping.
-	*/
-	irte->trigger_mode = 0;
-	irte->dlvry_mode = apic->irq_delivery_mode;
-	irte->vector = vector;
-	irte->dest_id = IRTE_DEST(dest);
-	irte->redir_hint = 1;
-}
-static inline bool irq_remapped(struct irq_cfg *cfg)
-{
-	return cfg->irq_2_iommu.iommu != NULL;
-}
-#else
-static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
-{
-}
-static inline bool irq_remapped(struct irq_cfg *cfg)
-{
-	return false;
-}
-static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
-{
-}
-#endif
+extern int irq_remapping_enabled;
 
-#endif	/* _ASM_X86_IRQ_REMAPPING_H */
+extern void setup_irq_remapping_ops(void);
+extern int irq_remapping_supported(void);
+extern int irq_remapping_prepare(void);
+extern int irq_remapping_enable(void);
+extern void irq_remapping_disable(void);
+extern int irq_remapping_reenable(int);
+extern int irq_remap_enable_fault_handling(void);
+extern int setup_ioapic_remapped_entry(int irq,
+				       struct IO_APIC_route_entry *entry,
+				       unsigned int destination,
+				       int vector,
+				       struct io_apic_irq_attr *attr);
+extern int set_remapped_irq_affinity(struct irq_data *data,
+				     const struct cpumask *mask,
+				     bool force);
+extern void free_remapped_irq(int irq);
+extern void compose_remapped_msi_msg(struct pci_dev *pdev,
+				     unsigned int irq, unsigned int dest,
+				     struct msi_msg *msg, u8 hpet_id);
+extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
+extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+				  int index, int sub_handle);
+extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
+
+#else  /* CONFIG_IRQ_REMAP */
+
+#define irq_remapping_enabled	0
+
+static inline void setup_irq_remapping_ops(void) { }
+static inline int irq_remapping_supported(void) { return 0; }
+static inline int irq_remapping_prepare(void) { return -ENODEV; }
+static inline int irq_remapping_enable(void) { return -ENODEV; }
+static inline void irq_remapping_disable(void) { }
+static inline int irq_remapping_reenable(int eim) { return -ENODEV; }
+static inline int irq_remap_enable_fault_handling(void) { return -ENODEV; }
+static inline int setup_ioapic_remapped_entry(int irq,
+					      struct IO_APIC_route_entry *entry,
+					      unsigned int destination,
+					      int vector,
+					      struct io_apic_irq_attr *attr)
+{
+	return -ENODEV;
+}
+static inline int set_remapped_irq_affinity(struct irq_data *data,
+					    const struct cpumask *mask,
+					    bool force)
+{
+	return 0;
+}
+static inline void free_remapped_irq(int irq) { }
+static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
+					    unsigned int irq, unsigned int dest,
+					    struct msi_msg *msg, u8 hpet_id)
+{
+}
+static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
+{
+	return -ENODEV;
+}
+static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+					 int index, int sub_handle)
+{
+	return -ENODEV;
+}
+static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+#endif /* __X86_IRQ_REMAPPING_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 734c376..183922e 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -170,6 +170,9 @@
 	unsigned int eax, ebx, ecx, edx;
 	char signature[13];
 
+	if (boot_cpu_data.cpuid_level < 0)
+		return 0;	/* So we don't blow up on old processors */
+
 	cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
 	memcpy(signature + 0, &ebx, 4);
 	memcpy(signature + 4, &ecx, 4);
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index 3427b77..7ef7c30 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -7,9 +7,9 @@
 #else
 # ifdef __i386__
 #  include "posix_types_32.h"
-# elif defined(__LP64__)
-#  include "posix_types_64.h"
-# else
+# elif defined(__ILP32__)
 #  include "posix_types_x32.h"
+# else
+#  include "posix_types_64.h"
 # endif
 #endif
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 4a08538..5ca71c0 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -257,7 +257,7 @@
 	__u64 oldmask;
 	__u64 cr2;
 	struct _fpstate __user *fpstate;	/* zero when no FPU context */
-#ifndef __LP64__
+#ifdef __ILP32__
 	__u32 __fpstate_pad;
 #endif
 	__u64 reserved1[8];
diff --git a/arch/x86/include/asm/siginfo.h b/arch/x86/include/asm/siginfo.h
index fc1aa55..34c47b3 100644
--- a/arch/x86/include/asm/siginfo.h
+++ b/arch/x86/include/asm/siginfo.h
@@ -2,7 +2,13 @@
 #define _ASM_X86_SIGINFO_H
 
 #ifdef __x86_64__
-# define __ARCH_SI_PREAMBLE_SIZE	(4 * sizeof(int))
+# ifdef __ILP32__ /* x32 */
+typedef long long __kernel_si_clock_t __attribute__((aligned(4)));
+#  define __ARCH_SI_CLOCK_T		__kernel_si_clock_t
+#  define __ARCH_SI_ATTRIBUTES		__attribute__((aligned(8)))
+# else /* x86-64 */
+#  define __ARCH_SI_PREAMBLE_SIZE	(4 * sizeof(int))
+# endif
 #endif
 
 #include <asm-generic/siginfo.h>
diff --git a/arch/x86/include/asm/stat.h b/arch/x86/include/asm/stat.h
index e0b1d9b..7b3ddc3 100644
--- a/arch/x86/include/asm/stat.h
+++ b/arch/x86/include/asm/stat.h
@@ -25,6 +25,12 @@
 	unsigned long  __unused5;
 };
 
+/* We don't need to memset the whole thing just to initialize the padding */
+#define INIT_STRUCT_STAT_PADDING(st) do {	\
+	st.__unused4 = 0;			\
+	st.__unused5 = 0;			\
+} while (0)
+
 #define STAT64_HAS_BROKEN_ST_INO	1
 
 /* This matches struct stat64 in glibc2.1, hence the absolutely
@@ -63,6 +69,12 @@
 	unsigned long long	st_ino;
 };
 
+/* We don't need to memset the whole thing just to initialize the padding */
+#define INIT_STRUCT_STAT64_PADDING(st) do {		\
+	memset(&st.__pad0, 0, sizeof(st.__pad0));	\
+	memset(&st.__pad3, 0, sizeof(st.__pad3));	\
+} while (0)
+
 #else /* __i386__ */
 
 struct stat {
@@ -87,6 +99,15 @@
 	unsigned long   st_ctime_nsec;
 	long		__unused[3];
 };
+
+/* We don't need to memset the whole thing just to initialize the padding */
+#define INIT_STRUCT_STAT_PADDING(st) do {	\
+	st.__pad0 = 0;				\
+	st.__unused[0] = 0;			\
+	st.__unused[1] = 0;			\
+	st.__unused[2] = 0;			\
+} while (0)
+
 #endif
 
 /* for 32bit emulation and 32 bit kernels */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 37cdc9d..4437001 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -63,10 +63,10 @@
 #else
 # ifdef __i386__
 #  include <asm/unistd_32.h>
-# elif defined(__LP64__)
-#  include <asm/unistd_64.h>
-# else
+# elif defined(__ILP32__)
 #  include <asm/unistd_x32.h>
+# else
+#  include <asm/unistd_64.h>
 # endif
 #endif
 
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index 6fe6767..e58f03b 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -43,4 +43,37 @@
 	return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
 }
 
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+	unsigned long ret, dummy;
+
+	asm(
+		"1:\tmov %2,%0\n"
+		"2:\n"
+		".section .fixup,\"ax\"\n"
+		"3:\t"
+		"lea %2,%1\n\t"
+		"and %3,%1\n\t"
+		"mov (%1),%0\n\t"
+		"leal %2,%%ecx\n\t"
+		"andl %4,%%ecx\n\t"
+		"shll $3,%%ecx\n\t"
+		"shr %%cl,%0\n\t"
+		"jmp 2b\n"
+		".previous\n"
+		_ASM_EXTABLE(1b, 3b)
+		:"=&r" (ret),"=&c" (dummy)
+		:"m" (*(unsigned long *)addr),
+		 "i" (-sizeof(unsigned long)),
+		 "i" (sizeof(unsigned long)-1));
+	return ret;
+}
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index baaca8d..764b66a 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -195,6 +195,5 @@
 
 extern void x86_init_noop(void);
 extern void x86_init_uint_noop(unsigned int unused);
-extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node);
 
 #endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index a415b1f..7c439fe 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -593,7 +593,7 @@
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 #include <acpi/processor.h>
 
-static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 {
 #ifdef CONFIG_ACPI_NUMA
 	int nid;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 103b6ab..146a49c 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -24,6 +24,10 @@
 static char temp_stack[4096];
 #endif
 
+asmlinkage void acpi_enter_s3(void)
+{
+	acpi_enter_sleep_state(3, wake_sleep_flags);
+}
 /**
  * acpi_suspend_lowlevel - save kernel state
  *
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 416d4be..d68677a 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -3,12 +3,16 @@
  */
 
 #include <asm/trampoline.h>
+#include <linux/linkage.h>
 
 extern unsigned long saved_video_mode;
 extern long saved_magic;
 
 extern int wakeup_pmode_return;
 
+extern u8 wake_sleep_flags;
+extern asmlinkage void acpi_enter_s3(void);
+
 extern unsigned long acpi_copy_wakeup_routine(unsigned long);
 extern void wakeup_long64(void);
 
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 13ab7205..7261083 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,9 +74,7 @@
 ENTRY(do_suspend_lowlevel)
 	call	save_processor_state
 	call	save_registers
-	pushl	$3
-	call	acpi_enter_sleep_state
-	addl	$4, %esp
+	call	acpi_enter_s3
 
 #	In case of S3 failure, we'll emerge here.  Jump
 # 	to ret_point to recover
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8ea5164..014d1d2 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,9 +71,7 @@
 	movq	%rsi, saved_rsi
 
 	addq	$8, %rsp
-	movl	$3, %edi
-	xorl	%eax, %eax
-	call	acpi_enter_sleep_state
+	call	acpi_enter_s3
 	/* in case something went wrong, restore the machine status and go on */
 	jmp	resume_point
 
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 11544d8..3722179a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -35,6 +35,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 
+#include <asm/irq_remapping.h>
 #include <asm/perf_event.h>
 #include <asm/x86_init.h>
 #include <asm/pgalloc.h>
@@ -1441,8 +1442,8 @@
 	 * Now that local APIC setup is completed for BP, configure the fault
 	 * handling for interrupt remapping.
 	 */
-	if (intr_remapping_enabled)
-		enable_drhd_fault_handling();
+	if (irq_remapping_enabled)
+		irq_remap_enable_fault_handling();
 
 }
 
@@ -1517,7 +1518,7 @@
 int __init enable_IR(void)
 {
 #ifdef CONFIG_IRQ_REMAP
-	if (!intr_remapping_supported()) {
+	if (!irq_remapping_supported()) {
 		pr_debug("intr-remapping not supported\n");
 		return -1;
 	}
@@ -1528,7 +1529,7 @@
 		return -1;
 	}
 
-	return enable_intr_remapping();
+	return irq_remapping_enable();
 #endif
 	return -1;
 }
@@ -1537,10 +1538,13 @@
 {
 	unsigned long flags;
 	int ret, x2apic_enabled = 0;
-	int dmar_table_init_ret;
+	int hardware_init_ret;
 
-	dmar_table_init_ret = dmar_table_init();
-	if (dmar_table_init_ret && !x2apic_supported())
+	/* Make sure irq_remap_ops are initialized */
+	setup_irq_remapping_ops();
+
+	hardware_init_ret = irq_remapping_prepare();
+	if (hardware_init_ret && !x2apic_supported())
 		return;
 
 	ret = save_ioapic_entries();
@@ -1556,7 +1560,7 @@
 	if (x2apic_preenabled && nox2apic)
 		disable_x2apic();
 
-	if (dmar_table_init_ret)
+	if (hardware_init_ret)
 		ret = -1;
 	else
 		ret = enable_IR();
@@ -1637,9 +1641,11 @@
 	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
 
 	/* The BIOS may have set up the APIC at some other address */
-	rdmsr(MSR_IA32_APICBASE, l, h);
-	if (l & MSR_IA32_APICBASE_ENABLE)
-		mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
+	if (boot_cpu_data.x86 >= 6) {
+		rdmsr(MSR_IA32_APICBASE, l, h);
+		if (l & MSR_IA32_APICBASE_ENABLE)
+			mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
+	}
 
 	pr_info("Found and enabled local APIC!\n");
 	return 0;
@@ -1657,13 +1663,15 @@
 	 * MSR. This can only be done in software for Intel P6 or later
 	 * and AMD K7 (Model > 1) or later.
 	 */
-	rdmsr(MSR_IA32_APICBASE, l, h);
-	if (!(l & MSR_IA32_APICBASE_ENABLE)) {
-		pr_info("Local APIC disabled by BIOS -- reenabling.\n");
-		l &= ~MSR_IA32_APICBASE_BASE;
-		l |= MSR_IA32_APICBASE_ENABLE | addr;
-		wrmsr(MSR_IA32_APICBASE, l, h);
-		enabled_via_apicbase = 1;
+	if (boot_cpu_data.x86 >= 6) {
+		rdmsr(MSR_IA32_APICBASE, l, h);
+		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
+			pr_info("Local APIC disabled by BIOS -- reenabling.\n");
+			l &= ~MSR_IA32_APICBASE_BASE;
+			l |= MSR_IA32_APICBASE_ENABLE | addr;
+			wrmsr(MSR_IA32_APICBASE, l, h);
+			enabled_via_apicbase = 1;
+		}
 	}
 	return apic_verify();
 }
@@ -2172,8 +2180,8 @@
 	local_irq_save(flags);
 	disable_local_APIC();
 
-	if (intr_remapping_enabled)
-		disable_intr_remapping();
+	if (irq_remapping_enabled)
+		irq_remapping_disable();
 
 	local_irq_restore(flags);
 	return 0;
@@ -2189,7 +2197,7 @@
 		return;
 
 	local_irq_save(flags);
-	if (intr_remapping_enabled) {
+	if (irq_remapping_enabled) {
 		/*
 		 * IO-APIC and PIC have their own resume routines.
 		 * We just mask them here to make sure the interrupt
@@ -2209,10 +2217,12 @@
 		 * FIXME! This will be wrong if we ever support suspend on
 		 * SMP! We'll need to do this as part of the CPU restore!
 		 */
-		rdmsr(MSR_IA32_APICBASE, l, h);
-		l &= ~MSR_IA32_APICBASE_BASE;
-		l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
-		wrmsr(MSR_IA32_APICBASE, l, h);
+		if (boot_cpu_data.x86 >= 6) {
+			rdmsr(MSR_IA32_APICBASE, l, h);
+			l &= ~MSR_IA32_APICBASE_BASE;
+			l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
+			wrmsr(MSR_IA32_APICBASE, l, h);
+		}
 	}
 
 	maxlvt = lapic_get_maxlvt();
@@ -2239,8 +2249,8 @@
 	apic_write(APIC_ESR, 0);
 	apic_read(APIC_ESR);
 
-	if (intr_remapping_enabled)
-		reenable_intr_remapping(x2apic_mode);
+	if (irq_remapping_enabled)
+		irq_remapping_reenable(x2apic_mode);
 
 	local_irq_restore(flags);
 }
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 899803e..23e7542 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -207,8 +207,11 @@
 
 static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
 {
-	c->phys_proc_id = node;
-	per_cpu(cpu_llc_id, smp_processor_id()) = node;
+
+	if (c->phys_proc_id != node) {
+		c->phys_proc_id = node;
+		per_cpu(cpu_llc_id, smp_processor_id()) = node;
+	}
 }
 
 static int __init numachip_system_init(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e88300d..ef0648c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -86,6 +86,22 @@
 	io_apic_ops = *ops;
 }
 
+#ifdef CONFIG_IRQ_REMAP
+static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
+static inline bool irq_remapped(struct irq_cfg *cfg)
+{
+	return cfg->irq_2_iommu.iommu != NULL;
+}
+#else
+static inline bool irq_remapped(struct irq_cfg *cfg)
+{
+	return false;
+}
+static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+{
+}
+#endif
+
 /*
  *      Is the SiS APIC rmw bug present ?
  *      -1 = don't know, 0 = no, 1 = yes
@@ -1361,77 +1377,13 @@
 				      fasteoi ? "fasteoi" : "edge");
 }
 
-
-static int setup_ir_ioapic_entry(int irq,
-			      struct IR_IO_APIC_route_entry *entry,
-			      unsigned int destination, int vector,
-			      struct io_apic_irq_attr *attr)
-{
-	int index;
-	struct irte irte;
-	int ioapic_id = mpc_ioapic_id(attr->ioapic);
-	struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
-
-	if (!iommu) {
-		pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
-		return -ENODEV;
-	}
-
-	index = alloc_irte(iommu, irq, 1);
-	if (index < 0) {
-		pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
-		return -ENOMEM;
-	}
-
-	prepare_irte(&irte, vector, destination);
-
-	/* Set source-id of interrupt request */
-	set_ioapic_sid(&irte, ioapic_id);
-
-	modify_irte(irq, &irte);
-
-	apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
-		"Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
-		"Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
-		"Avail:%X Vector:%02X Dest:%08X "
-		"SID:%04X SQ:%X SVT:%X)\n",
-		attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
-		irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
-		irte.avail, irte.vector, irte.dest_id,
-		irte.sid, irte.sq, irte.svt);
-
-	memset(entry, 0, sizeof(*entry));
-
-	entry->index2	= (index >> 15) & 0x1;
-	entry->zero	= 0;
-	entry->format	= 1;
-	entry->index	= (index & 0x7fff);
-	/*
-	 * IO-APIC RTE will be configured with virtual vector.
-	 * irq handler will do the explicit EOI to the io-apic.
-	 */
-	entry->vector	= attr->ioapic_pin;
-	entry->mask	= 0;			/* enable IRQ */
-	entry->trigger	= attr->trigger;
-	entry->polarity	= attr->polarity;
-
-	/* Mask level triggered irqs.
-	 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
-	 */
-	if (attr->trigger)
-		entry->mask = 1;
-
-	return 0;
-}
-
 static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
 			       unsigned int destination, int vector,
 			       struct io_apic_irq_attr *attr)
 {
-	if (intr_remapping_enabled)
-		return setup_ir_ioapic_entry(irq,
-			 (struct IR_IO_APIC_route_entry *)entry,
-			 destination, vector, attr);
+	if (irq_remapping_enabled)
+		return setup_ioapic_remapped_entry(irq, entry, destination,
+						   vector, attr);
 
 	memset(entry, 0, sizeof(*entry));
 
@@ -1588,7 +1540,7 @@
 {
 	struct IO_APIC_route_entry entry;
 
-	if (intr_remapping_enabled)
+	if (irq_remapping_enabled)
 		return;
 
 	memset(&entry, 0, sizeof(entry));
@@ -1674,7 +1626,7 @@
 
 	printk(KERN_DEBUG ".... IRQ redirection table:\n");
 
-	if (intr_remapping_enabled) {
+	if (irq_remapping_enabled) {
 		printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR"
 			" Pol Stat Indx2 Zero Vect:\n");
 	} else {
@@ -1683,7 +1635,7 @@
 	}
 
 	for (i = 0; i <= reg_01.bits.entries; i++) {
-		if (intr_remapping_enabled) {
+		if (irq_remapping_enabled) {
 			struct IO_APIC_route_entry entry;
 			struct IR_IO_APIC_route_entry *ir_entry;
 
@@ -2050,7 +2002,7 @@
 	 * IOAPIC RTE as well as interrupt-remapping table entry).
 	 * As this gets called during crash dump, keep this simple for now.
 	 */
-	if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
+	if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) {
 		struct IO_APIC_route_entry entry;
 
 		memset(&entry, 0, sizeof(entry));
@@ -2074,7 +2026,7 @@
 	 * Use virtual wire A mode when interrupt remapping is enabled.
 	 */
 	if (cpu_has_apic || apic_from_smp_config())
-		disconnect_bsp_APIC(!intr_remapping_enabled &&
+		disconnect_bsp_APIC(!irq_remapping_enabled &&
 				ioapic_i8259.pin != -1);
 }
 
@@ -2390,71 +2342,6 @@
 	return ret;
 }
 
-#ifdef CONFIG_IRQ_REMAP
-
-/*
- * Migrate the IO-APIC irq in the presence of intr-remapping.
- *
- * For both level and edge triggered, irq migration is a simple atomic
- * update(of vector and cpu destination) of IRTE and flush the hardware cache.
- *
- * For level triggered, we eliminate the io-apic RTE modification (with the
- * updated vector information), by using a virtual vector (io-apic pin number).
- * Real vector that is used for interrupting cpu will be coming from
- * the interrupt-remapping table entry.
- *
- * As the migration is a simple atomic update of IRTE, the same mechanism
- * is used to migrate MSI irq's in the presence of interrupt-remapping.
- */
-static int
-ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
-		       bool force)
-{
-	struct irq_cfg *cfg = data->chip_data;
-	unsigned int dest, irq = data->irq;
-	struct irte irte;
-
-	if (!cpumask_intersects(mask, cpu_online_mask))
-		return -EINVAL;
-
-	if (get_irte(irq, &irte))
-		return -EBUSY;
-
-	if (assign_irq_vector(irq, cfg, mask))
-		return -EBUSY;
-
-	dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
-
-	irte.vector = cfg->vector;
-	irte.dest_id = IRTE_DEST(dest);
-
-	/*
-	 * Atomically updates the IRTE with the new destination, vector
-	 * and flushes the interrupt entry cache.
-	 */
-	modify_irte(irq, &irte);
-
-	/*
-	 * After this point, all the interrupts will start arriving
-	 * at the new destination. So, time to cleanup the previous
-	 * vector allocation.
-	 */
-	if (cfg->move_in_progress)
-		send_cleanup_vector(cfg);
-
-	cpumask_copy(data->affinity, mask);
-	return 0;
-}
-
-#else
-static inline int
-ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
-		       bool force)
-{
-	return 0;
-}
-#endif
-
 asmlinkage void smp_irq_move_cleanup_interrupt(void)
 {
 	unsigned vector, me;
@@ -2699,7 +2586,7 @@
 	chip->irq_eoi = ir_ack_apic_level;
 
 #ifdef CONFIG_SMP
-	chip->irq_set_affinity = ir_ioapic_set_affinity;
+	chip->irq_set_affinity = set_remapped_irq_affinity;
 #endif
 }
 #endif /* CONFIG_IRQ_REMAP */
@@ -2912,7 +2799,7 @@
 	 * 8259A.
 	 */
 	if (pin1 == -1) {
-		if (intr_remapping_enabled)
+		if (irq_remapping_enabled)
 			panic("BIOS bug: timer not connected to IO-APIC");
 		pin1 = pin2;
 		apic1 = apic2;
@@ -2945,7 +2832,7 @@
 				clear_IO_APIC_pin(0, pin1);
 			goto out;
 		}
-		if (intr_remapping_enabled)
+		if (irq_remapping_enabled)
 			panic("timer doesn't work through Interrupt-remapped IO-APIC");
 		local_irq_disable();
 		clear_IO_APIC_pin(apic1, pin1);
@@ -3169,7 +3056,7 @@
 	irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
 
 	if (irq_remapped(cfg))
-		free_irte(irq);
+		free_remapped_irq(irq);
 	raw_spin_lock_irqsave(&vector_lock, flags);
 	__clear_irq_vector(irq, cfg);
 	raw_spin_unlock_irqrestore(&vector_lock, flags);
@@ -3198,54 +3085,34 @@
 	dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
 
 	if (irq_remapped(cfg)) {
-		struct irte irte;
-		int ir_index;
-		u16 sub_handle;
-
-		ir_index = map_irq_to_irte_handle(irq, &sub_handle);
-		BUG_ON(ir_index == -1);
-
-		prepare_irte(&irte, cfg->vector, dest);
-
-		/* Set source-id of interrupt request */
-		if (pdev)
-			set_msi_sid(&irte, pdev);
-		else
-			set_hpet_sid(&irte, hpet_id);
-
-		modify_irte(irq, &irte);
-
-		msg->address_hi = MSI_ADDR_BASE_HI;
-		msg->data = sub_handle;
-		msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
-				  MSI_ADDR_IR_SHV |
-				  MSI_ADDR_IR_INDEX1(ir_index) |
-				  MSI_ADDR_IR_INDEX2(ir_index);
-	} else {
-		if (x2apic_enabled())
-			msg->address_hi = MSI_ADDR_BASE_HI |
-					  MSI_ADDR_EXT_DEST_ID(dest);
-		else
-			msg->address_hi = MSI_ADDR_BASE_HI;
-
-		msg->address_lo =
-			MSI_ADDR_BASE_LO |
-			((apic->irq_dest_mode == 0) ?
-				MSI_ADDR_DEST_MODE_PHYSICAL:
-				MSI_ADDR_DEST_MODE_LOGICAL) |
-			((apic->irq_delivery_mode != dest_LowestPrio) ?
-				MSI_ADDR_REDIRECTION_CPU:
-				MSI_ADDR_REDIRECTION_LOWPRI) |
-			MSI_ADDR_DEST_ID(dest);
-
-		msg->data =
-			MSI_DATA_TRIGGER_EDGE |
-			MSI_DATA_LEVEL_ASSERT |
-			((apic->irq_delivery_mode != dest_LowestPrio) ?
-				MSI_DATA_DELIVERY_FIXED:
-				MSI_DATA_DELIVERY_LOWPRI) |
-			MSI_DATA_VECTOR(cfg->vector);
+		compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
+		return err;
 	}
+
+	if (x2apic_enabled())
+		msg->address_hi = MSI_ADDR_BASE_HI |
+				  MSI_ADDR_EXT_DEST_ID(dest);
+	else
+		msg->address_hi = MSI_ADDR_BASE_HI;
+
+	msg->address_lo =
+		MSI_ADDR_BASE_LO |
+		((apic->irq_dest_mode == 0) ?
+			MSI_ADDR_DEST_MODE_PHYSICAL:
+			MSI_ADDR_DEST_MODE_LOGICAL) |
+		((apic->irq_delivery_mode != dest_LowestPrio) ?
+			MSI_ADDR_REDIRECTION_CPU:
+			MSI_ADDR_REDIRECTION_LOWPRI) |
+		MSI_ADDR_DEST_ID(dest);
+
+	msg->data =
+		MSI_DATA_TRIGGER_EDGE |
+		MSI_DATA_LEVEL_ASSERT |
+		((apic->irq_delivery_mode != dest_LowestPrio) ?
+			MSI_DATA_DELIVERY_FIXED:
+			MSI_DATA_DELIVERY_LOWPRI) |
+		MSI_DATA_VECTOR(cfg->vector);
+
 	return err;
 }
 
@@ -3288,33 +3155,6 @@
 	.irq_retrigger		= ioapic_retrigger_irq,
 };
 
-/*
- * Map the PCI dev to the corresponding remapping hardware unit
- * and allocate 'nvec' consecutive interrupt-remapping table entries
- * in it.
- */
-static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
-{
-	struct intel_iommu *iommu;
-	int index;
-
-	iommu = map_dev_to_ir(dev);
-	if (!iommu) {
-		printk(KERN_ERR
-		       "Unable to map PCI %s to iommu\n", pci_name(dev));
-		return -ENOENT;
-	}
-
-	index = alloc_irte(iommu, irq, nvec);
-	if (index < 0) {
-		printk(KERN_ERR
-		       "Unable to allocate %d IRTE for PCI %s\n", nvec,
-		       pci_name(dev));
-		return -ENOSPC;
-	}
-	return index;
-}
-
 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
 {
 	struct irq_chip *chip = &msi_chip;
@@ -3345,7 +3185,6 @@
 	int node, ret, sub_handle, index = 0;
 	unsigned int irq, irq_want;
 	struct msi_desc *msidesc;
-	struct intel_iommu *iommu = NULL;
 
 	/* x86 doesn't support multiple MSI yet */
 	if (type == PCI_CAP_ID_MSI && nvec > 1)
@@ -3359,7 +3198,7 @@
 		if (irq == 0)
 			return -1;
 		irq_want = irq + 1;
-		if (!intr_remapping_enabled)
+		if (!irq_remapping_enabled)
 			goto no_ir;
 
 		if (!sub_handle) {
@@ -3367,23 +3206,16 @@
 			 * allocate the consecutive block of IRTE's
 			 * for 'nvec'
 			 */
-			index = msi_alloc_irte(dev, irq, nvec);
+			index = msi_alloc_remapped_irq(dev, irq, nvec);
 			if (index < 0) {
 				ret = index;
 				goto error;
 			}
 		} else {
-			iommu = map_dev_to_ir(dev);
-			if (!iommu) {
-				ret = -ENOENT;
+			ret = msi_setup_remapped_irq(dev, irq, index,
+						     sub_handle);
+			if (ret < 0)
 				goto error;
-			}
-			/*
-			 * setup the mapping between the irq and the IRTE
-			 * base index, the sub_handle pointing to the
-			 * appropriate interrupt remap table entry.
-			 */
-			set_irte_irq(irq, iommu, index, sub_handle);
 		}
 no_ir:
 		ret = setup_msi_irq(dev, msidesc, irq);
@@ -3501,15 +3333,8 @@
 	struct msi_msg msg;
 	int ret;
 
-	if (intr_remapping_enabled) {
-		struct intel_iommu *iommu = map_hpet_to_ir(id);
-		int index;
-
-		if (!iommu)
-			return -1;
-
-		index = alloc_irte(iommu, irq, 1);
-		if (index < 0)
+	if (irq_remapping_enabled) {
+		if (!setup_hpet_msi_remapped(irq, id))
 			return -1;
 	}
 
@@ -3888,8 +3713,8 @@
 		else
 			mask = apic->target_cpus();
 
-		if (intr_remapping_enabled)
-			ir_ioapic_set_affinity(idata, mask, false);
+		if (irq_remapping_enabled)
+			set_remapped_irq_affinity(idata, mask, false);
 		else
 			ioapic_set_affinity(idata, mask, false);
 	}
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 8a778db..991e315 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -24,6 +24,12 @@
 {
 	if (x2apic_phys)
 		return x2apic_enabled();
+	else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
+		(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
+		x2apic_enabled()) {
+		printk(KERN_DEBUG "System requires x2apic physical mode\n");
+		return 1;
+	}
 	else
 		return 0;
 }
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 0a44b90..146bb62 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -26,7 +26,8 @@
  *	contact AMD for precise details and a CPU swap.
  *
  *	See	http://www.multimania.com/poulot/k6bug.html
- *		http://www.amd.com/K6/k6docs/revgd.html
+ *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
+ *		(Publication # 21266  Issue Date: August 1998)
  *
  *	The following test is erm.. interesting. AMD neglected to up
  *	the chip setting when fixing the bug but they also tweaked some
@@ -94,7 +95,6 @@
 				"system stability may be impaired when more than 32 MB are used.\n");
 		else
 			printk(KERN_CONT "probably OK (after B9730xxxx).\n");
-		printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
 	}
 
 	/* K6 with old style WHCR */
@@ -353,10 +353,11 @@
 		node = per_cpu(cpu_llc_id, cpu);
 
 	/*
-	 * If core numbers are inconsistent, it's likely a multi-fabric platform,
-	 * so invoke platform-specific handler
+	 * On multi-fabric platform (e.g. Numascale NumaChip) a
+	 * platform-specific handler needs to be called to fixup some
+	 * IDs of the CPU.
 	 */
-	if (c->phys_proc_id != node)
+	if (x86_cpuinit.fixup_cpu_id)
 		x86_cpuinit.fixup_cpu_id(c, node);
 
 	if (!node_online(node)) {
@@ -579,6 +580,24 @@
 		}
 	}
 
+	/* re-enable TopologyExtensions if switched off by BIOS */
+	if ((c->x86 == 0x15) &&
+	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+		u64 val;
+
+		if (!rdmsrl_amd_safe(0xc0011005, &val)) {
+			val |= 1ULL << 54;
+			wrmsrl_amd_safe(0xc0011005, val);
+			rdmsrl(0xc0011005, val);
+			if (val & (1ULL << 54)) {
+				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+				printk(KERN_INFO FW_INFO "CPU: Re-enabling "
+				  "disabled Topology Extensions Support\n");
+			}
+		}
+	}
+
 	cpu_detect_cache_sizes(c);
 
 	/* Multi core CPU? */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 67e2583..cf79302 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1163,15 +1163,6 @@
 #endif /* ! CONFIG_KGDB */
 
 /*
- * Prints an error where the NUMA and configured core-number mismatch and the
- * platform didn't override this to fix it up
- */
-void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
-{
-	pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
-}
-
-/*
  * cpu_init() initializes state that is per-CPU. Some data is already
  * initialized (naturally) in the bootstrap process, such as the GDT
  * and IDT. We reload them nevertheless, this function acts as a
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 73d08ed..b8f3653 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -433,14 +433,14 @@
 	/*  check if @slot is already used or the index is already disabled */
 	ret = amd_get_l3_disable_slot(nb, slot);
 	if (ret >= 0)
-		return -EINVAL;
+		return -EEXIST;
 
 	if (index > nb->l3_cache.indices)
 		return -EINVAL;
 
 	/* check whether the other slot has disabled the same index already */
 	if (index == amd_get_l3_disable_slot(nb, !slot))
-		return -EINVAL;
+		return -EEXIST;
 
 	amd_l3_disable_index(nb, cpu, slot, index);
 
@@ -468,8 +468,8 @@
 	err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
 	if (err) {
 		if (err == -EEXIST)
-			printk(KERN_WARNING "L3 disable slot %d in use!\n",
-					    slot);
+			pr_warning("L3 slot %d in use/index already disabled!\n",
+				   slot);
 		return err;
 	}
 	return count;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d086a09..11c9166 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -945,9 +945,10 @@
 	atomic_t		inuse;
 	struct task_struct	*t;
 	__u64			paddr;
+	int			restartable;
 } mce_info[MCE_INFO_MAX];
 
-static void mce_save_info(__u64 addr)
+static void mce_save_info(__u64 addr, int c)
 {
 	struct mce_info *mi;
 
@@ -955,6 +956,7 @@
 		if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
 			mi->t = current;
 			mi->paddr = addr;
+			mi->restartable = c;
 			return;
 		}
 	}
@@ -1130,7 +1132,7 @@
 			mce_panic("Fatal machine check on current CPU", &m, msg);
 		if (worst == MCE_AR_SEVERITY) {
 			/* schedule action before return to userland */
-			mce_save_info(m.addr);
+			mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
 			set_thread_flag(TIF_MCE_NOTIFY);
 		} else if (kill_it) {
 			force_sig(SIGBUS, current);
@@ -1179,7 +1181,13 @@
 
 	pr_err("Uncorrected hardware memory error in user-access at %llx",
 		 mi->paddr);
-	if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0) {
+	/*
+	 * We must call memory_failure() here even if the current process is
+	 * doomed. We still need to mark the page as poisoned and alert any
+	 * other users of the page.
+	 */
+	if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
+			   mi->restartable == 0) {
 		pr_err("Memory error not recovered");
 		force_sig(SIGBUS, current);
 	}
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 7734bcb..2d6e649 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -235,6 +235,7 @@
 	if (tsk_used_math(tsk)) {
 		if (HAVE_HWFP && tsk == current)
 			unlazy_fpu(tsk);
+		tsk->thread.fpu.last_cpu = ~0;
 		return 0;
 	}
 
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b8ba6e4..e554e5a 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -79,7 +79,6 @@
 	u32 token;
 	int cpu;
 	bool halted;
-	struct mm_struct *mm;
 };
 
 static struct kvm_task_sleep_head {
@@ -126,9 +125,7 @@
 
 	n.token = token;
 	n.cpu = smp_processor_id();
-	n.mm = current->active_mm;
 	n.halted = idle || preempt_count() > 1;
-	atomic_inc(&n.mm->mm_count);
 	init_waitqueue_head(&n.wq);
 	hlist_add_head(&n.link, &b->list);
 	spin_unlock(&b->lock);
@@ -161,9 +158,6 @@
 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
 {
 	hlist_del_init(&n->link);
-	if (!n->mm)
-		return;
-	mmdrop(n->mm);
 	if (n->halted)
 		smp_send_reschedule(n->cpu);
 	else if (waitqueue_active(&n->wq))
@@ -207,7 +201,7 @@
 		 * async PF was not yet handled.
 		 * Add dummy entry for the token.
 		 */
-		n = kmalloc(sizeof(*n), GFP_ATOMIC);
+		n = kzalloc(sizeof(*n), GFP_ATOMIC);
 		if (!n) {
 			/*
 			 * Allocation failed! Busy wait while other cpu
@@ -219,7 +213,6 @@
 		}
 		n->token = token;
 		n->cpu = smp_processor_id();
-		n->mm = NULL;
 		init_waitqueue_head(&n->wq);
 		hlist_add_head(&n->link, &b->list);
 	} else
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 73465aa..8a2ce8f 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -82,11 +82,6 @@
 {
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
-		pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
-		return -1;
-	}
-
 	csig->rev = c->microcode;
 	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
 
@@ -380,6 +375,13 @@
 
 struct microcode_ops * __init init_amd_microcode(void)
 {
+	struct cpuinfo_x86 *c = &cpu_data(0);
+
+	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
+		pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
+		return NULL;
+	}
+
 	patch = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!patch)
 		return NULL;
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 87a0f86..c9bda6d 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -419,10 +419,8 @@
 	if (err)
 		return err;
 
-	if (microcode_init_cpu(cpu) == UCODE_ERROR) {
-		sysfs_remove_group(&dev->kobj, &mc_attr_group);
+	if (microcode_init_cpu(cpu) == UCODE_ERROR)
 		return -EINVAL;
-	}
 
 	return err;
 }
@@ -528,11 +526,11 @@
 		microcode_ops = init_intel_microcode();
 	else if (c->x86_vendor == X86_VENDOR_AMD)
 		microcode_ops = init_amd_microcode();
-
-	if (!microcode_ops) {
+	else
 		pr_err("no support for this CPU vendor\n");
+
+	if (!microcode_ops)
 		return -ENODEV;
-	}
 
 	microcode_pdev = platform_device_register_simple("microcode", -1,
 							 NULL, 0);
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 3ca42d0..0327e2b 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -147,12 +147,6 @@
 
 	memset(csig, 0, sizeof(*csig));
 
-	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
-	    cpu_has(c, X86_FEATURE_IA64)) {
-		pr_err("CPU%d not a capable Intel processor\n", cpu_num);
-		return -1;
-	}
-
 	csig->sig = cpuid_eax(0x00000001);
 
 	if ((c->x86_model >= 5) || (c->x86 > 6)) {
@@ -463,6 +457,14 @@
 
 struct microcode_ops * __init init_intel_microcode(void)
 {
+	struct cpuinfo_x86 *c = &cpu_data(0);
+
+	if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
+	    cpu_has(c, X86_FEATURE_IA64)) {
+		pr_err("Intel CPU family 0x%x not supported\n", c->x86);
+		return NULL;
+	}
+
 	return &microcode_intel_ops;
 }
 
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 733ca39..43d8b48 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -423,6 +423,7 @@
 		current_thread_info()->status |= TS_COMPAT;
 	}
 }
+EXPORT_SYMBOL_GPL(set_personality_ia32);
 
 unsigned long get_wchan(struct task_struct *p)
 {
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 71f4727..5a98aa2 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -185,10 +185,22 @@
 #endif
 	rc = -EINVAL;
 	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
-		const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
 		const size_t dyn_size = PERCPU_MODULE_RESERVE +
 			PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
+		size_t atom_size;
 
+		/*
+		 * On 64bit, use PMD_SIZE for atom_size so that embedded
+		 * percpu areas are aligned to PMD.  This, in the future,
+		 * can also allow using PMD mappings in vmalloc area.  Use
+		 * PAGE_SIZE on 32bit as vmalloc space is highly contended
+		 * and large vmalloc area allocs can easily fail.
+		 */
+#ifdef CONFIG_X86_64
+		atom_size = PMD_SIZE;
+#else
+		atom_size = PAGE_SIZE;
+#endif
 		rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
 					    dyn_size, atom_size,
 					    pcpu_cpu_distance,
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index e9f265f..9cf71d0 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -93,7 +93,6 @@
 struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
 	.early_percpu_clock_init	= x86_init_noop,
 	.setup_percpu_clockev		= setup_secondary_APIC_clock,
-	.fixup_cpu_id			= x86_default_fixup_cpu_id,
 };
 
 static void default_nmi_init(void) { };
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 91a5e98..185a2b8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6581,6 +6581,7 @@
 		kvm_inject_page_fault(vcpu, &fault);
 	}
 	vcpu->arch.apf.halted = false;
+	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 }
 
 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index d6ae30b..2e4e4b0 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -44,13 +44,6 @@
 }
 EXPORT_SYMBOL_GPL(copy_from_user_nmi);
 
-static inline unsigned long count_bytes(unsigned long mask)
-{
-	mask = (mask - 1) & ~mask;
-	mask >>= 7;
-	return count_masked_bytes(mask);
-}
-
 /*
  * Do a strncpy, return length of string without final '\0'.
  * 'count' is the user-supplied count (return 'count' if we
@@ -69,16 +62,19 @@
 		max = count;
 
 	while (max >= sizeof(unsigned long)) {
-		unsigned long c;
+		unsigned long c, mask;
 
 		/* Fall back to byte-at-a-time if we get a page fault */
 		if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
 			break;
-		/* This can write a few bytes past the NUL character, but that's ok */
+		mask = has_zero(c);
+		if (mask) {
+			mask = (mask - 1) & ~mask;
+			mask >>= 7;
+			*(unsigned long *)(dst+res) = c & mask;
+			return res + count_masked_bytes(mask);
+		}
 		*(unsigned long *)(dst+res) = c;
-		c = has_zero(c);
-		if (c)
-			return res + count_bytes(c);
 		res += sizeof(unsigned long);
 		max -= sizeof(unsigned long);
 	}
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index ed2835e..fc09c27 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -9,11 +9,11 @@
 
 struct pci_root_info {
 	struct acpi_device *bridge;
-	char *name;
+	char name[16];
 	unsigned int res_num;
 	struct resource *res;
-	struct list_head *resources;
 	int busnum;
+	struct pci_sysdata sd;
 };
 
 static bool pci_use_crs = true;
@@ -245,13 +245,6 @@
 	return AE_OK;
 }
 
-static bool resource_contains(struct resource *res, resource_size_t point)
-{
-	if (res->start <= point && point <= res->end)
-		return true;
-	return false;
-}
-
 static void coalesce_windows(struct pci_root_info *info, unsigned long type)
 {
 	int i, j;
@@ -272,10 +265,7 @@
 			 * our resources no longer match the ACPI _CRS, but
 			 * the kernel resource tree doesn't allow overlaps.
 			 */
-			if (resource_contains(res1, res2->start) ||
-			    resource_contains(res1, res2->end) ||
-			    resource_contains(res2, res1->start) ||
-			    resource_contains(res2, res1->end)) {
+			if (resource_overlaps(res1, res2)) {
 				res1->start = min(res1->start, res2->start);
 				res1->end = max(res1->end, res2->end);
 				dev_info(&info->bridge->dev,
@@ -287,7 +277,8 @@
 	}
 }
 
-static void add_resources(struct pci_root_info *info)
+static void add_resources(struct pci_root_info *info,
+			  struct list_head *resources)
 {
 	int i;
 	struct resource *res, *root, *conflict;
@@ -311,53 +302,74 @@
 				 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
 				 res, conflict->name, conflict);
 		else
-			pci_add_resource(info->resources, res);
+			pci_add_resource(resources, res);
 	}
 }
 
-static void
-get_current_resources(struct acpi_device *device, int busnum,
-		      int domain, struct list_head *resources)
+static void free_pci_root_info_res(struct pci_root_info *info)
 {
-	struct pci_root_info info;
-	size_t size;
+	kfree(info->res);
+	info->res = NULL;
+	info->res_num = 0;
+}
 
-	info.bridge = device;
-	info.res_num = 0;
-	info.resources = resources;
-	acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
-				&info);
-	if (!info.res_num)
-		return;
+static void __release_pci_root_info(struct pci_root_info *info)
+{
+	int i;
+	struct resource *res;
 
-	size = sizeof(*info.res) * info.res_num;
-	info.res = kmalloc(size, GFP_KERNEL);
-	if (!info.res)
-		return;
+	for (i = 0; i < info->res_num; i++) {
+		res = &info->res[i];
 
-	info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
-	if (!info.name)
-		goto name_alloc_fail;
+		if (!res->parent)
+			continue;
 
-	info.res_num = 0;
-	acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
-				&info);
+		if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
+			continue;
 
-	if (pci_use_crs) {
-		add_resources(&info);
-
-		return;
+		release_resource(res);
 	}
 
-	kfree(info.name);
+	free_pci_root_info_res(info);
 
-name_alloc_fail:
-	kfree(info.res);
+	kfree(info);
+}
+static void release_pci_root_info(struct pci_host_bridge *bridge)
+{
+	struct pci_root_info *info = bridge->release_data;
+
+	__release_pci_root_info(info);
+}
+
+static void
+probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
+		    int busnum, int domain)
+{
+	size_t size;
+
+	info->bridge = device;
+	info->res_num = 0;
+	acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
+				info);
+	if (!info->res_num)
+		return;
+
+	size = sizeof(*info->res) * info->res_num;
+	info->res_num = 0;
+	info->res = kmalloc(size, GFP_KERNEL);
+	if (!info->res)
+		return;
+
+	sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
+
+	acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
+				info);
 }
 
 struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
 {
 	struct acpi_device *device = root->device;
+	struct pci_root_info *info = NULL;
 	int domain = root->segment;
 	int busnum = root->secondary.start;
 	LIST_HEAD(resources);
@@ -389,17 +401,14 @@
 	if (node != -1 && !node_online(node))
 		node = -1;
 
-	/* Allocate per-root-bus (not per bus) arch-specific data.
-	 * TODO: leak; this memory is never freed.
-	 * It's arguable whether it's worth the trouble to care.
-	 */
-	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
-	if (!sd) {
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info) {
 		printk(KERN_WARNING "pci_bus %04x:%02x: "
 		       "ignored (out of memory)\n", domain, busnum);
 		return NULL;
 	}
 
+	sd = &info->sd;
 	sd->domain = domain;
 	sd->node = node;
 	/*
@@ -413,22 +422,32 @@
 		 * be replaced by sd.
 		 */
 		memcpy(bus->sysdata, sd, sizeof(*sd));
-		kfree(sd);
+		kfree(info);
 	} else {
-		get_current_resources(device, busnum, domain, &resources);
+		probe_pci_root_info(info, device, busnum, domain);
 
 		/*
 		 * _CRS with no apertures is normal, so only fall back to
 		 * defaults or native bridge info if we're ignoring _CRS.
 		 */
-		if (!pci_use_crs)
+		if (pci_use_crs)
+			add_resources(info, &resources);
+		else {
+			free_pci_root_info_res(info);
 			x86_pci_root_bus_resources(busnum, &resources);
+		}
+
 		bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd,
 					  &resources);
-		if (bus)
+		if (bus) {
 			bus->subordinate = pci_scan_child_bus(bus);
-		else
+			pci_set_host_bridge_release(
+				to_pci_host_bridge(bus->bridge),
+				release_pci_root_info, info);
+		} else {
 			pci_free_resource_list(&resources);
+			__release_pci_root_info(info);
+		}
 	}
 
 	/* After the PCI-E bus has been walked and all devices discovered,
@@ -445,9 +464,6 @@
 		}
 	}
 
-	if (!bus)
-		kfree(sd);
-
 	if (bus && node != -1) {
 #ifdef CONFIG_ACPI_NUMA
 		if (pxm >= 0)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 0567df3..5aed49b 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -32,6 +32,27 @@
 
 #define RANGE_NUM 16
 
+static struct pci_root_info __init *find_pci_root_info(int node, int link)
+{
+	struct pci_root_info *info;
+
+	/* find the position */
+	list_for_each_entry(info, &pci_root_infos, list)
+		if (info->node == node && info->link == link)
+			return info;
+
+	return NULL;
+}
+
+static void __init set_mp_bus_range_to_node(int min_bus, int max_bus, int node)
+{
+#ifdef CONFIG_NUMA
+	int j;
+
+	for (j = min_bus; j <= max_bus; j++)
+		set_mp_bus_to_node(j, node);
+#endif
+}
 /**
  * early_fill_mp_bus_to_node()
  * called before pcibios_scan_root and pci_scan_bus
@@ -41,7 +62,6 @@
 static int __init early_fill_mp_bus_info(void)
 {
 	int i;
-	int j;
 	unsigned bus;
 	unsigned slot;
 	int node;
@@ -50,7 +70,6 @@
 	int def_link;
 	struct pci_root_info *info;
 	u32 reg;
-	struct resource *res;
 	u64 start;
 	u64 end;
 	struct range range[RANGE_NUM];
@@ -86,7 +105,6 @@
 	if (!found)
 		return 0;
 
-	pci_root_num = 0;
 	for (i = 0; i < 4; i++) {
 		int min_bus;
 		int max_bus;
@@ -99,19 +117,11 @@
 		min_bus = (reg >> 16) & 0xff;
 		max_bus = (reg >> 24) & 0xff;
 		node = (reg >> 4) & 0x07;
-#ifdef CONFIG_NUMA
-		for (j = min_bus; j <= max_bus; j++)
-			set_mp_bus_to_node(j, node);
-#endif
+		set_mp_bus_range_to_node(min_bus, max_bus, node);
 		link = (reg >> 8) & 0x03;
 
-		info = &pci_root_info[pci_root_num];
-		info->bus_min = min_bus;
-		info->bus_max = max_bus;
-		info->node = node;
-		info->link = link;
+		info = alloc_pci_root_info(min_bus, max_bus, node, link);
 		sprintf(info->name, "PCI Bus #%02x", min_bus);
-		pci_root_num++;
 	}
 
 	/* get the default node and link for left over res */
@@ -134,16 +144,10 @@
 		link = (reg >> 4) & 0x03;
 		end = (reg & 0xfff000) | 0xfff;
 
-		/* find the position */
-		for (j = 0; j < pci_root_num; j++) {
-			info = &pci_root_info[j];
-			if (info->node == node && info->link == link)
-				break;
-		}
-		if (j == pci_root_num)
+		info = find_pci_root_info(node, link);
+		if (!info)
 			continue; /* not found */
 
-		info = &pci_root_info[j];
 		printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n",
 		       node, link, start, end);
 
@@ -155,13 +159,8 @@
 	}
 	/* add left over io port range to def node/link, [0, 0xffff] */
 	/* find the position */
-	for (j = 0; j < pci_root_num; j++) {
-		info = &pci_root_info[j];
-		if (info->node == def_node && info->link == def_link)
-			break;
-	}
-	if (j < pci_root_num) {
-		info = &pci_root_info[j];
+	info = find_pci_root_info(def_node, def_link);
+	if (info) {
 		for (i = 0; i < RANGE_NUM; i++) {
 			if (!range[i].end)
 				continue;
@@ -214,16 +213,10 @@
 		end <<= 8;
 		end |= 0xffff;
 
-		/* find the position */
-		for (j = 0; j < pci_root_num; j++) {
-			info = &pci_root_info[j];
-			if (info->node == node && info->link == link)
-				break;
-		}
-		if (j == pci_root_num)
-			continue; /* not found */
+		info = find_pci_root_info(node, link);
 
-		info = &pci_root_info[j];
+		if (!info)
+			continue;
 
 		printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]",
 		       node, link, start, end);
@@ -291,14 +284,8 @@
 	 * add left over mmio range to def node/link ?
 	 * that is tricky, just record range in from start_min to 4G
 	 */
-	for (j = 0; j < pci_root_num; j++) {
-		info = &pci_root_info[j];
-		if (info->node == def_node && info->link == def_link)
-			break;
-	}
-	if (j < pci_root_num) {
-		info = &pci_root_info[j];
-
+	info = find_pci_root_info(def_node, def_link);
+	if (info) {
 		for (i = 0; i < RANGE_NUM; i++) {
 			if (!range[i].end)
 				continue;
@@ -309,20 +296,16 @@
 		}
 	}
 
-	for (i = 0; i < pci_root_num; i++) {
-		int res_num;
+	list_for_each_entry(info, &pci_root_infos, list) {
 		int busnum;
+		struct pci_root_res *root_res;
 
-		info = &pci_root_info[i];
-		res_num = info->res_num;
 		busnum = info->bus_min;
 		printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n",
 		       info->bus_min, info->bus_max, info->node, info->link);
-		for (j = 0; j < res_num; j++) {
-			res = &info->res[j];
-			printk(KERN_DEBUG "bus: %02x index %x %pR\n",
-				       busnum, j, res);
-		}
+		list_for_each_entry(root_res, &info->resources, list)
+			printk(KERN_DEBUG "bus: %02x %pR\n",
+				       busnum, &root_res->res);
 	}
 
 	return 0;
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index f3a7c56..614392c 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -22,19 +22,15 @@
 static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
 {
 	struct pci_root_info *info;
+	struct pci_root_res *root_res;
 	struct resource res;
 	u16 word1, word2;
 	u8 fbus, lbus;
-	int i;
-
-	info = &pci_root_info[pci_root_num];
-	pci_root_num++;
 
 	/* read the PCI bus numbers */
 	fbus = read_pci_config_byte(bus, slot, func, 0x44);
 	lbus = read_pci_config_byte(bus, slot, func, 0x45);
-	info->bus_min = fbus;
-	info->bus_max = lbus;
+	info = alloc_pci_root_info(fbus, lbus, 0, 0);
 
 	/*
 	 * Add the legacy IDE ports on bus 0
@@ -86,8 +82,8 @@
 	res.flags = IORESOURCE_BUS;
 	printk(KERN_INFO "CNB20LE PCI Host Bridge (domain 0000 %pR)\n", &res);
 
-	for (i = 0; i < info->res_num; i++)
-		printk(KERN_INFO "host bridge window %pR\n", &info->res[i]);
+	list_for_each_entry(root_res, &info->resources, list)
+		printk(KERN_INFO "host bridge window %pR\n", &root_res->res);
 }
 
 static int __init broadcom_postcore_init(void)
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index fd3f655..306579f 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -4,35 +4,38 @@
 
 #include "bus_numa.h"
 
-int pci_root_num;
-struct pci_root_info pci_root_info[PCI_ROOT_NR];
+LIST_HEAD(pci_root_infos);
+
+static struct pci_root_info *x86_find_pci_root_info(int bus)
+{
+	struct pci_root_info *info;
+
+	if (list_empty(&pci_root_infos))
+		return NULL;
+
+	list_for_each_entry(info, &pci_root_infos, list)
+		if (info->bus_min == bus)
+			return info;
+
+	return NULL;
+}
 
 void x86_pci_root_bus_resources(int bus, struct list_head *resources)
 {
-	int i;
-	int j;
-	struct pci_root_info *info;
+	struct pci_root_info *info = x86_find_pci_root_info(bus);
+	struct pci_root_res *root_res;
 
-	if (!pci_root_num)
-		goto default_resources;
-
-	for (i = 0; i < pci_root_num; i++) {
-		if (pci_root_info[i].bus_min == bus)
-			break;
-	}
-
-	if (i == pci_root_num)
+	if (!info)
 		goto default_resources;
 
 	printk(KERN_DEBUG "PCI: root bus %02x: hardware-probed resources\n",
 	       bus);
 
-	info = &pci_root_info[i];
-	for (j = 0; j < info->res_num; j++) {
+	list_for_each_entry(root_res, &info->resources, list) {
 		struct resource *res;
 		struct resource *root;
 
-		res = &info->res[j];
+		res = &root_res->res;
 		pci_add_resource(resources, res);
 		if (res->flags & IORESOURCE_IO)
 			root = &ioport_resource;
@@ -53,11 +56,32 @@
 	pci_add_resource(resources, &iomem_resource);
 }
 
+struct pci_root_info __init *alloc_pci_root_info(int bus_min, int bus_max,
+						 int node, int link)
+{
+	struct pci_root_info *info;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+	if (!info)
+		return info;
+
+	INIT_LIST_HEAD(&info->resources);
+	info->bus_min = bus_min;
+	info->bus_max = bus_max;
+	info->node = node;
+	info->link = link;
+
+	list_add_tail(&info->list, &pci_root_infos);
+
+	return info;
+}
+
 void __devinit update_res(struct pci_root_info *info, resource_size_t start,
 			  resource_size_t end, unsigned long flags, int merge)
 {
-	int i;
 	struct resource *res;
+	struct pci_root_res *root_res;
 
 	if (start > end)
 		return;
@@ -69,11 +93,11 @@
 		goto addit;
 
 	/* try to merge it with old one */
-	for (i = 0; i < info->res_num; i++) {
+	list_for_each_entry(root_res, &info->resources, list) {
 		resource_size_t final_start, final_end;
 		resource_size_t common_start, common_end;
 
-		res = &info->res[i];
+		res = &root_res->res;
 		if (res->flags != flags)
 			continue;
 
@@ -93,14 +117,15 @@
 addit:
 
 	/* need to add that */
-	if (info->res_num >= RES_NUM)
+	root_res = kzalloc(sizeof(*root_res), GFP_KERNEL);
+	if (!root_res)
 		return;
 
-	res = &info->res[info->res_num];
+	res = &root_res->res;
 	res->name = info->name;
 	res->flags = flags;
 	res->start = start;
 	res->end = end;
-	res->child = NULL;
-	info->res_num++;
+
+	list_add_tail(&root_res->list, &info->resources);
 }
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
index 804a4b4..226a466 100644
--- a/arch/x86/pci/bus_numa.h
+++ b/arch/x86/pci/bus_numa.h
@@ -4,22 +4,24 @@
  * sub bus (transparent) will use entres from 3 to store extra from
  * root, so need to make sure we have enough slot there.
  */
-#define RES_NUM 16
+struct pci_root_res {
+	struct list_head list;
+	struct resource res;
+};
+
 struct pci_root_info {
+	struct list_head list;
 	char name[12];
-	unsigned int res_num;
-	struct resource res[RES_NUM];
+	struct list_head resources;
 	int bus_min;
 	int bus_max;
 	int node;
 	int link;
 };
 
-/* 4 at this time, it may become to 32 */
-#define PCI_ROOT_NR 4
-extern int pci_root_num;
-extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
-
+extern struct list_head pci_root_infos;
+struct pci_root_info *alloc_pci_root_info(int bus_min, int bus_max,
+						int node, int link);
 extern void update_res(struct pci_root_info *info, resource_size_t start,
 		      resource_size_t end, unsigned long flags, int merge);
 #endif
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 323481e..0ad990a 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -11,6 +11,7 @@
 #include <linux/dmi.h>
 #include <linux/slab.h>
 
+#include <asm-generic/pci-bridge.h>
 #include <asm/acpi.h>
 #include <asm/segment.h>
 #include <asm/io.h>
@@ -229,6 +230,14 @@
 }
 #endif
 
+static int __devinit set_scan_all(const struct dmi_system_id *d)
+{
+	printk(KERN_INFO "PCI: %s detected, enabling pci=pcie_scan_all\n",
+	       d->ident);
+	pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
+	return 0;
+}
+
 static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
 #ifdef __i386__
 /*
@@ -420,6 +429,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
 		},
 	},
+	{
+		.callback = set_scan_all,
+		.ident = "Stratus/NEC ftServer",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ftServer"),
+		},
+	},
 	{}
 };
 
@@ -430,9 +446,7 @@
 
 struct pci_bus * __devinit pcibios_scan_root(int busnum)
 {
-	LIST_HEAD(resources);
 	struct pci_bus *bus = NULL;
-	struct pci_sysdata *sd;
 
 	while ((bus = pci_find_next_bus(bus)) != NULL) {
 		if (bus->number == busnum) {
@@ -441,28 +455,10 @@
 		}
 	}
 
-	/* Allocate per-root-bus (not per bus) arch-specific data.
-	 * TODO: leak; this memory is never freed.
-	 * It's arguable whether it's worth the trouble to care.
-	 */
-	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
-	if (!sd) {
-		printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
-		return NULL;
-	}
-
-	sd->node = get_mp_bus_to_node(busnum);
-
-	printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
-	x86_pci_root_bus_resources(busnum, &resources);
-	bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, sd, &resources);
-	if (!bus) {
-		pci_free_resource_list(&resources);
-		kfree(sd);
-	}
-
-	return bus;
+	return pci_scan_bus_on_node(busnum, &pci_root_ops,
+					get_mp_bus_to_node(busnum));
 }
+
 void __init pcibios_set_cache_line_size(void)
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -656,6 +652,7 @@
 	}
 	sd->node = node;
 	x86_pci_root_bus_resources(busno, &resources);
+	printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busno);
 	bus = pci_scan_root_bus(NULL, busno, ops, sd, &resources);
 	if (!bus) {
 		pci_free_resource_list(&resources);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 831971e..dd8ca6f 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -57,7 +57,7 @@
 {
 	struct pcibios_fwaddrmap *map;
 
-	WARN_ON(!spin_is_locked(&pcibios_fwaddrmap_lock));
+	WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
 
 	list_for_each_entry(map, &pcibios_fwaddrmappings, list)
 		if (map->dev == dev)
diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c
index 66d377e..646e3b5 100644
--- a/arch/x86/platform/geode/net5501.c
+++ b/arch/x86/platform/geode/net5501.c
@@ -63,7 +63,7 @@
 		.name = "net5501:1",
 		.gpio = 6,
 		.default_trigger = "default-on",
-		.active_low = 1,
+		.active_low = 0,
 	},
 };
 
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e0a3723..e31bcd8 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -805,7 +805,7 @@
 		} else
 			i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
 	}
-	intel_scu_notifier_post(SCU_AVAILABLE, 0L);
+	intel_scu_notifier_post(SCU_AVAILABLE, NULL);
 }
 EXPORT_SYMBOL_GPL(intel_scu_devices_create);
 
@@ -814,7 +814,7 @@
 {
 	int i;
 
-	intel_scu_notifier_post(SCU_DOWN, 0L);
+	intel_scu_notifier_post(SCU_DOWN, NULL);
 
 	for (i = 0; i < ipc_next_dev; i++)
 		platform_device_del(ipc_devs[i]);
diff --git a/arch/x86/tools/.gitignore b/arch/x86/tools/.gitignore
new file mode 100644
index 0000000..be0ed06
--- /dev/null
+++ b/arch/x86/tools/.gitignore
@@ -0,0 +1 @@
+relocs
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index d511aa9..733057b 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -36,3 +36,7 @@
 $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
 
 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+hostprogs-y	+= relocs
+relocs: $(obj)/relocs
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/tools/relocs.c
similarity index 76%
rename from arch/x86/boot/compressed/relocs.c
rename to arch/x86/tools/relocs.c
index d3c0b02..b43cfcd 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -18,6 +18,8 @@
 static Elf32_Ehdr ehdr;
 static unsigned long reloc_count, reloc_idx;
 static unsigned long *relocs;
+static unsigned long reloc16_count, reloc16_idx;
+static unsigned long *relocs16;
 
 struct section {
 	Elf32_Shdr     shdr;
@@ -28,52 +30,86 @@
 };
 static struct section *secs;
 
+enum symtype {
+	S_ABS,
+	S_REL,
+	S_SEG,
+	S_LIN,
+	S_NSYMTYPES
+};
+
+static const char * const sym_regex_kernel[S_NSYMTYPES] = {
 /*
  * Following symbols have been audited. There values are constant and do
  * not change if bzImage is loaded at a different physical address than
  * the address for which it has been compiled. Don't warn user about
  * absolute relocations present w.r.t these symbols.
  */
-static const char abs_sym_regex[] =
+	[S_ABS] =
 	"^(xen_irq_disable_direct_reloc$|"
 	"xen_save_fl_direct_reloc$|"
 	"VDSO|"
-	"__crc_)";
-static regex_t abs_sym_regex_c;
-static int is_abs_reloc(const char *sym_name)
-{
-	return !regexec(&abs_sym_regex_c, sym_name, 0, NULL, 0);
-}
+	"__crc_)",
 
 /*
  * These symbols are known to be relative, even if the linker marks them
  * as absolute (typically defined outside any section in the linker script.)
  */
-static const char rel_sym_regex[] =
-	"^_end$";
-static regex_t rel_sym_regex_c;
-static int is_rel_reloc(const char *sym_name)
+	[S_REL] =
+	"^(__init_(begin|end)|"
+	"__x86_cpu_dev_(start|end)|"
+	"(__parainstructions|__alt_instructions)(|_end)|"
+	"(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
+	"_end)$"
+};
+
+
+static const char * const sym_regex_realmode[S_NSYMTYPES] = {
+/*
+ * These are 16-bit segment symbols when compiling 16-bit code.
+ */
+	[S_SEG] =
+	"^real_mode_seg$",
+
+/*
+ * These are offsets belonging to segments, as opposed to linear addresses,
+ * when compiling 16-bit code.
+ */
+	[S_LIN] =
+	"^pa_",
+};
+
+static const char * const *sym_regex;
+
+static regex_t sym_regex_c[S_NSYMTYPES];
+static int is_reloc(enum symtype type, const char *sym_name)
 {
-	return !regexec(&rel_sym_regex_c, sym_name, 0, NULL, 0);
+	return sym_regex[type] &&
+		!regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
 }
 
-static void regex_init(void)
+static void regex_init(int use_real_mode)
 {
         char errbuf[128];
         int err;
-	
-        err = regcomp(&abs_sym_regex_c, abs_sym_regex,
-                      REG_EXTENDED|REG_NOSUB);
-        if (err) {
-                regerror(err, &abs_sym_regex_c, errbuf, sizeof errbuf);
-                die("%s", errbuf);
-        }
+	int i;
 
-        err = regcomp(&rel_sym_regex_c, rel_sym_regex,
-                      REG_EXTENDED|REG_NOSUB);
-        if (err) {
-                regerror(err, &rel_sym_regex_c, errbuf, sizeof errbuf);
-                die("%s", errbuf);
+	if (use_real_mode)
+		sym_regex = sym_regex_realmode;
+	else
+		sym_regex = sym_regex_kernel;
+
+	for (i = 0; i < S_NSYMTYPES; i++) {
+		if (!sym_regex[i])
+			continue;
+
+		err = regcomp(&sym_regex_c[i], sym_regex[i],
+			      REG_EXTENDED|REG_NOSUB);
+
+		if (err) {
+			regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
+			die("%s", errbuf);
+		}
         }
 }
 
@@ -154,6 +190,10 @@
 		REL_TYPE(R_386_RELATIVE),
 		REL_TYPE(R_386_GOTOFF),
 		REL_TYPE(R_386_GOTPC),
+		REL_TYPE(R_386_8),
+		REL_TYPE(R_386_PC8),
+		REL_TYPE(R_386_16),
+		REL_TYPE(R_386_PC16),
 #undef REL_TYPE
 	};
 	const char *name = "unknown type rel type name";
@@ -189,7 +229,7 @@
 		name = sym_strtab + sym->st_name;
 	}
 	else {
-		name = sec_name(secs[sym->st_shndx].shdr.sh_name);
+		name = sec_name(sym->st_shndx);
 	}
 	return name;
 }
@@ -403,13 +443,11 @@
 	for (i = 0; i < ehdr.e_shnum; i++) {
 		struct section *sec = &secs[i];
 		char *sym_strtab;
-		Elf32_Sym *sh_symtab;
 		int j;
 
 		if (sec->shdr.sh_type != SHT_SYMTAB) {
 			continue;
 		}
-		sh_symtab = sec->symtab;
 		sym_strtab = sec->link->strtab;
 		for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
 			Elf32_Sym *sym;
@@ -474,7 +512,7 @@
 			 * Before warning check if this absolute symbol
 			 * relocation is harmless.
 			 */
-			if (is_abs_reloc(name) || is_rel_reloc(name))
+			if (is_reloc(S_ABS, name) || is_reloc(S_REL, name))
 				continue;
 
 			if (!printed) {
@@ -498,7 +536,8 @@
 		printf("\n");
 }
 
-static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
+			int use_real_mode)
 {
 	int i;
 	/* Walk through the relocations */
@@ -523,30 +562,67 @@
 			Elf32_Rel *rel;
 			Elf32_Sym *sym;
 			unsigned r_type;
+			const char *symname;
+			int shn_abs;
+
 			rel = &sec->reltab[j];
 			sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
 			r_type = ELF32_R_TYPE(rel->r_info);
-			/* Don't visit relocations to absolute symbols */
-			if (sym->st_shndx == SHN_ABS &&
-			    !is_rel_reloc(sym_name(sym_strtab, sym))) {
-				continue;
-			}
+
+			shn_abs = sym->st_shndx == SHN_ABS;
+
 			switch (r_type) {
 			case R_386_NONE:
 			case R_386_PC32:
+			case R_386_PC16:
+			case R_386_PC8:
 				/*
 				 * NONE can be ignored and and PC relative
 				 * relocations don't need to be adjusted.
 				 */
 				break;
+
+			case R_386_16:
+				symname = sym_name(sym_strtab, sym);
+				if (!use_real_mode)
+					goto bad;
+				if (shn_abs) {
+					if (is_reloc(S_ABS, symname))
+						break;
+					else if (!is_reloc(S_SEG, symname))
+						goto bad;
+				} else {
+					if (is_reloc(S_LIN, symname))
+						goto bad;
+					else
+						break;
+				}
+				visit(rel, sym);
+				break;
+
 			case R_386_32:
-				/* Visit relocations that need to be adjusted */
+				symname = sym_name(sym_strtab, sym);
+				if (shn_abs) {
+					if (is_reloc(S_ABS, symname))
+						break;
+					else if (!is_reloc(S_REL, symname))
+						goto bad;
+				} else {
+					if (use_real_mode &&
+					    !is_reloc(S_LIN, symname))
+						break;
+				}
 				visit(rel, sym);
 				break;
 			default:
 				die("Unsupported relocation type: %s (%d)\n",
 				    rel_type(r_type), r_type);
 				break;
+			bad:
+				symname = sym_name(sym_strtab, sym);
+				die("Invalid %s %s relocation: %s\n",
+				    shn_abs ? "absolute" : "relative",
+				    rel_type(r_type), symname);
 			}
 		}
 	}
@@ -554,13 +630,19 @@
 
 static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
 {
-	reloc_count += 1;
+	if (ELF32_R_TYPE(rel->r_info) == R_386_16)
+		reloc16_count++;
+	else
+		reloc_count++;
 }
 
 static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
 {
 	/* Remember the address that needs to be adjusted. */
-	relocs[reloc_idx++] = rel->r_offset;
+	if (ELF32_R_TYPE(rel->r_info) == R_386_16)
+		relocs16[reloc16_idx++] = rel->r_offset;
+	else
+		relocs[reloc_idx++] = rel->r_offset;
 }
 
 static int cmp_relocs(const void *va, const void *vb)
@@ -570,23 +652,41 @@
 	return (*a == *b)? 0 : (*a > *b)? 1 : -1;
 }
 
-static void emit_relocs(int as_text)
+static int write32(unsigned int v, FILE *f)
+{
+	unsigned char buf[4];
+
+	put_unaligned_le32(v, buf);
+	return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
+}
+
+static void emit_relocs(int as_text, int use_real_mode)
 {
 	int i;
 	/* Count how many relocations I have and allocate space for them. */
 	reloc_count = 0;
-	walk_relocs(count_reloc);
+	walk_relocs(count_reloc, use_real_mode);
 	relocs = malloc(reloc_count * sizeof(relocs[0]));
 	if (!relocs) {
 		die("malloc of %d entries for relocs failed\n",
 			reloc_count);
 	}
+
+	relocs16 = malloc(reloc16_count * sizeof(relocs[0]));
+	if (!relocs16) {
+		die("malloc of %d entries for relocs16 failed\n",
+			reloc16_count);
+	}
 	/* Collect up the relocations */
 	reloc_idx = 0;
-	walk_relocs(collect_reloc);
+	walk_relocs(collect_reloc, use_real_mode);
+
+	if (reloc16_count && !use_real_mode)
+		die("Segment relocations found but --realmode not specified\n");
 
 	/* Order the relocations for more efficient processing */
 	qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
+	qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs);
 
 	/* Print the relocations */
 	if (as_text) {
@@ -595,58 +695,83 @@
 		 */
 		printf(".section \".data.reloc\",\"a\"\n");
 		printf(".balign 4\n");
-		for (i = 0; i < reloc_count; i++) {
-			printf("\t .long 0x%08lx\n", relocs[i]);
+		if (use_real_mode) {
+			printf("\t.long %lu\n", reloc16_count);
+			for (i = 0; i < reloc16_count; i++)
+				printf("\t.long 0x%08lx\n", relocs16[i]);
+			printf("\t.long %lu\n", reloc_count);
+			for (i = 0; i < reloc_count; i++) {
+				printf("\t.long 0x%08lx\n", relocs[i]);
+			}
+		} else {
+			/* Print a stop */
+			printf("\t.long 0x%08lx\n", (unsigned long)0);
+			for (i = 0; i < reloc_count; i++) {
+				printf("\t.long 0x%08lx\n", relocs[i]);
+			}
 		}
+
 		printf("\n");
 	}
 	else {
-		unsigned char buf[4];
-		/* Print a stop */
-		fwrite("\0\0\0\0", 4, 1, stdout);
-		/* Now print each relocation */
-		for (i = 0; i < reloc_count; i++) {
-			put_unaligned_le32(relocs[i], buf);
-			fwrite(buf, 4, 1, stdout);
+		if (use_real_mode) {
+			write32(reloc16_count, stdout);
+			for (i = 0; i < reloc16_count; i++)
+				write32(relocs16[i], stdout);
+			write32(reloc_count, stdout);
+
+			/* Now print each relocation */
+			for (i = 0; i < reloc_count; i++)
+				write32(relocs[i], stdout);
+		} else {
+			/* Print a stop */
+			write32(0, stdout);
+
+			/* Now print each relocation */
+			for (i = 0; i < reloc_count; i++) {
+				write32(relocs[i], stdout);
+			}
 		}
 	}
 }
 
 static void usage(void)
 {
-	die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n");
+	die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
 }
 
 int main(int argc, char **argv)
 {
 	int show_absolute_syms, show_absolute_relocs;
-	int as_text;
+	int as_text, use_real_mode;
 	const char *fname;
 	FILE *fp;
 	int i;
 
-	regex_init();
-
 	show_absolute_syms = 0;
 	show_absolute_relocs = 0;
 	as_text = 0;
+	use_real_mode = 0;
 	fname = NULL;
 	for (i = 1; i < argc; i++) {
 		char *arg = argv[i];
 		if (*arg == '-') {
-			if (strcmp(argv[1], "--abs-syms") == 0) {
+			if (strcmp(arg, "--abs-syms") == 0) {
 				show_absolute_syms = 1;
 				continue;
 			}
-
-			if (strcmp(argv[1], "--abs-relocs") == 0) {
+			if (strcmp(arg, "--abs-relocs") == 0) {
 				show_absolute_relocs = 1;
 				continue;
 			}
-			else if (strcmp(argv[1], "--text") == 0) {
+			if (strcmp(arg, "--text") == 0) {
 				as_text = 1;
 				continue;
 			}
+			if (strcmp(arg, "--realmode") == 0) {
+				use_real_mode = 1;
+				continue;
+			}
 		}
 		else if (!fname) {
 			fname = arg;
@@ -657,6 +782,7 @@
 	if (!fname) {
 		usage();
 	}
+	regex_init(use_real_mode);
 	fp = fopen(fname, "r");
 	if (!fp) {
 		die("Cannot open %s: %s\n",
@@ -675,6 +801,6 @@
 		print_absolute_relocs();
 		return 0;
 	}
-	emit_relocs(as_text);
+	emit_relocs(as_text, use_real_mode);
 	return 0;
 }
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 4f51beb..95dccce 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -63,6 +63,7 @@
 #include <asm/stackprotector.h>
 #include <asm/hypervisor.h>
 #include <asm/mwait.h>
+#include <asm/pci_x86.h>
 
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
@@ -261,7 +262,8 @@
 
 static bool __init xen_check_mwait(void)
 {
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \
+	!defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
 	struct xen_platform_op op = {
 		.cmd			= XENPF_set_processor_pminfo,
 		.u.set_pminfo.id	= -1,
@@ -349,7 +351,6 @@
 	/* Xen will set CR4.OSXSAVE if supported and not disabled by force */
 	if ((cx & xsave_mask) != xsave_mask)
 		cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
-
 	if (xen_check_mwait())
 		cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
 }
@@ -809,9 +810,40 @@
 }
 
 #ifdef CONFIG_X86_LOCAL_APIC
+static unsigned long xen_set_apic_id(unsigned int x)
+{
+	WARN_ON(1);
+	return x;
+}
+static unsigned int xen_get_apic_id(unsigned long x)
+{
+	return ((x)>>24) & 0xFFu;
+}
 static u32 xen_apic_read(u32 reg)
 {
-	return 0;
+	struct xen_platform_op op = {
+		.cmd = XENPF_get_cpuinfo,
+		.interface_version = XENPF_INTERFACE_VERSION,
+		.u.pcpu_info.xen_cpuid = 0,
+	};
+	int ret = 0;
+
+	/* Shouldn't need this as APIC is turned off for PV, and we only
+	 * get called on the bootup processor. But just in case. */
+	if (!xen_initial_domain() || smp_processor_id())
+		return 0;
+
+	if (reg == APIC_LVR)
+		return 0x10;
+
+	if (reg != APIC_ID)
+		return 0;
+
+	ret = HYPERVISOR_dom0_op(&op);
+	if (ret)
+		return 0;
+
+	return op.u.pcpu_info.apic_id << 24;
 }
 
 static void xen_apic_write(u32 reg, u32 val)
@@ -849,6 +881,8 @@
 	apic->icr_write = xen_apic_icr_write;
 	apic->wait_icr_idle = xen_apic_wait_icr_idle;
 	apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
+	apic->set_apic_id = xen_set_apic_id;
+	apic->get_apic_id = xen_get_apic_id;
 }
 
 #endif
@@ -1365,8 +1399,10 @@
 		/* Make sure ACS will be enabled */
 		pci_request_acs();
 	}
-		
-
+#ifdef CONFIG_PCI
+	/* PCI BIOS service won't work from a PV guest. */
+	pci_probe &= ~PCI_PROBE_BIOS;
+#endif
 	xen_raw_console_write("about to get started...\n");
 
 	xen_setup_runstate_info(0);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b8e2794..69f5857 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -353,8 +353,13 @@
 {
 	if (val & _PAGE_PRESENT) {
 		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+		unsigned long pfn = mfn_to_pfn(mfn);
+
 		pteval_t flags = val & PTE_FLAGS_MASK;
-		val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
+		if (unlikely(pfn == ~0))
+			val = flags & ~_PAGE_PRESENT;
+		else
+			val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
 	}
 
 	return val;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 5fac691..0503c0c 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -178,6 +178,7 @@
 static void __init xen_filter_cpu_maps(void)
 {
 	int i, rc;
+	unsigned int subtract = 0;
 
 	if (!xen_initial_domain())
 		return;
@@ -192,8 +193,22 @@
 		} else {
 			set_cpu_possible(i, false);
 			set_cpu_present(i, false);
+			subtract++;
 		}
 	}
+#ifdef CONFIG_HOTPLUG_CPU
+	/* This is akin to using 'nr_cpus' on the Linux command line.
+	 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
+	 * have up to X, while nr_cpu_ids is greater than X. This
+	 * normally is not a problem, except when CPU hotplugging
+	 * is involved and then there might be more than X CPUs
+	 * in the guest - which will not work as there is no
+	 * hypercall to expand the max number of VCPUs an already
+	 * running guest has. So cap it up to X. */
+	if (subtract)
+		nr_cpu_ids = nr_cpu_ids - subtract;
+#endif
+
 }
 
 static void __init xen_smp_prepare_boot_cpu(void)
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 79d7362..3e45aa0 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -96,7 +96,7 @@
 
 	/* check for unmasked and pending */
 	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
-	jz 1f
+	jnz 1f
 2:	call check_events
 1:
 ENDPATCH(xen_restore_fl_direct)
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
index b90038e..a182a4e 100644
--- a/arch/xtensa/configs/common_defconfig
+++ b/arch/xtensa/configs/common_defconfig
@@ -333,11 +333,6 @@
 # CONFIG_S2IO is not set
 
 #
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
 # Wireless LAN (non-hamradio)
 #
 CONFIG_NET_RADIO=y
diff --git a/arch/xtensa/include/asm/hardirq.h b/arch/xtensa/include/asm/hardirq.h
index 26664ce..91695a1 100644
--- a/arch/xtensa/include/asm/hardirq.h
+++ b/arch/xtensa/include/asm/hardirq.h
@@ -11,9 +11,6 @@
 #ifndef _XTENSA_HARDIRQ_H
 #define _XTENSA_HARDIRQ_H
 
-void ack_bad_irq(unsigned int irq);
-#define ack_bad_irq ack_bad_irq
-
 #include <asm-generic/hardirq.h>
 
 #endif	/* _XTENSA_HARDIRQ_H */
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index d04cd3a..4beb43c 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -14,6 +14,7 @@
 #ifdef __KERNEL__
 #include <asm/byteorder.h>
 #include <asm/page.h>
+#include <linux/bug.h>
 #include <linux/kernel.h>
 
 #include <linux/types.h>
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index b69b000..d78869a 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -496,6 +496,7 @@
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 
 	if (signr > 0) {
+		int ret;
 
 		/* Are we from a system call? */
 
diff --git a/block/genhd.c b/block/genhd.c
index df9816e..9cf5583 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -743,7 +743,7 @@
 		struct hd_struct *part;
 		char name_buf[BDEVNAME_SIZE];
 		char devt_buf[BDEVT_SIZE];
-		u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
+		char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5];
 
 		/*
 		 * Don't show empty devices or things that have been
@@ -762,14 +762,16 @@
 		while ((part = disk_part_iter_next(&piter))) {
 			bool is_part0 = part == &disk->part0;
 
-			uuid[0] = 0;
+			uuid_buf[0] = '\0';
 			if (part->info)
-				part_unpack_uuid(part->info->uuid, uuid);
+				snprintf(uuid_buf, sizeof(uuid_buf), "%pU",
+					 part->info->uuid);
 
 			printk("%s%s %10llu %s %s", is_part0 ? "" : "  ",
 			       bdevt_str(part_devt(part), devt_buf),
 			       (unsigned long long)part->nr_sects >> 1,
-			       disk_name(disk, part->partno, name_buf), uuid);
+			       disk_name(disk, part->partno, name_buf),
+			       uuid_buf);
 			if (is_part0) {
 				if (disk->driverfs_dev != NULL &&
 				    disk->driverfs_dev->driver != NULL)
diff --git a/block/partitions/ibm.c b/block/partitions/ibm.c
index d513a07..1104aca 100644
--- a/block/partitions/ibm.c
+++ b/block/partitions/ibm.c
@@ -253,7 +253,7 @@
 				/* Are we not supposed to report this ? */
 				goto out_readerr;
 		} else
-			printk(KERN_WARNING "Warning, expected Label VOL1 not "
+			printk(KERN_INFO "Expected Label VOL1 not "
 			       "found, treating as CDL formated Disk");
 
 	}
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 8d3a056..533de95 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -397,9 +397,9 @@
 	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-		sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -478,9 +478,9 @@
 	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-		sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/aead.c b/crypto/aead.c
index e4cb351..0b8121e 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -125,9 +125,9 @@
 	raead.maxauthsize = aead->maxauthsize;
 	raead.ivsize = aead->ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
-		sizeof(struct crypto_report_aead), &raead);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
+		    sizeof(struct crypto_report_aead), &raead))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -210,9 +210,9 @@
 	raead.maxauthsize = aead->maxauthsize;
 	raead.ivsize = aead->ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD,
-		sizeof(struct crypto_report_aead), &raead);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
+		    sizeof(struct crypto_report_aead), &raead))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 33bc9b6..3887856 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -409,9 +409,9 @@
 	rhash.blocksize = alg->cra_blocksize;
 	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
-		sizeof(struct crypto_report_hash), &rhash);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
+		    sizeof(struct crypto_report_hash), &rhash))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 4dd80c7..a8d85a1 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -508,9 +508,9 @@
 	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
-		sizeof(struct crypto_report_blkcipher), &rblkcipher);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index f1ea0a0..5a37ead 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -81,9 +81,9 @@
 	rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
 	rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_CIPHER,
-		sizeof(struct crypto_report_cipher), &rcipher);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
+		    sizeof(struct crypto_report_cipher), &rcipher))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -96,9 +96,9 @@
 
 	snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
-		sizeof(struct crypto_report_comp), &rcomp);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+		    sizeof(struct crypto_report_comp), &rcomp))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -117,16 +117,16 @@
 	ualg->cru_flags = alg->cra_flags;
 	ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
 
-	NLA_PUT_U32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority);
-
+	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
+		goto nla_put_failure;
 	if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
 		struct crypto_report_larval rl;
 
 		snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
 
-		NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL,
-			sizeof(struct crypto_report_larval), &rl);
-
+		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
+			    sizeof(struct crypto_report_larval), &rl))
+			goto nla_put_failure;
 		goto out;
 	}
 
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index 2e458e5..04e083f 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -55,9 +55,9 @@
 
 	snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
-		sizeof(struct crypto_report_comp), &rpcomp);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+		    sizeof(struct crypto_report_comp), &rpcomp))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/rng.c b/crypto/rng.c
index 64f864f..f3b7894 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -69,9 +69,9 @@
 
 	rrng.seedsize = alg->cra_rng.seedsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_RNG,
-		sizeof(struct crypto_report_rng), &rrng);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_RNG,
+		    sizeof(struct crypto_report_rng), &rrng))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/crypto/shash.c b/crypto/shash.c
index 21fc12e..32067f4 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -534,9 +534,9 @@
 	rhash.blocksize = alg->cra_blocksize;
 	rhash.digestsize = salg->digestsize;
 
-	NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
-		sizeof(struct crypto_report_hash), &rhash);
-
+	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
+		    sizeof(struct crypto_report_hash), &rhash))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3263b68..3188da3 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -250,6 +250,10 @@
 		return -ENODEV;
 	}
 
+	/* For D3cold we should execute _PS3, not _PS4. */
+	if (state == ACPI_STATE_D3_COLD)
+		object_name[3] = '3';
+
 	/*
 	 * Transition Power
 	 * ----------------
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 7049a7d..0500f71 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@
 	 * We know a device's inferred power state when all the resources
 	 * required for a given D-state are 'on'.
 	 */
-	for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) {
+	for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
 		list = &device->power.states[i].resources;
 		if (list->count < 1)
 			continue;
@@ -660,7 +660,7 @@
 
 int acpi_power_transition(struct acpi_device *device, int state)
 {
-	int result;
+	int result = 0;
 
 	if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
 		return -EINVAL;
@@ -679,8 +679,11 @@
 	 * (e.g. so the device doesn't lose power while transitioning).  Then,
 	 * we dereference all power resources used in the current list.
 	 */
-	result = acpi_power_on_list(&device->power.states[state].resources);
-	if (!result)
+	if (state < ACPI_STATE_D3_COLD)
+		result = acpi_power_on_list(
+			&device->power.states[state].resources);
+
+	if (!result && device->power.state < ACPI_STATE_D3_COLD)
 		acpi_power_off_list(
 			&device->power.states[device->power.state].resources);
 
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 767e2dc..85cbfdc 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -869,7 +869,7 @@
 	/*
 	 * Enumerate supported power management states
 	 */
-	for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
+	for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
 		struct acpi_device_power_state *ps = &device->power.states[i];
 		char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
 
@@ -884,21 +884,18 @@
 				acpi_bus_add_power_resource(ps->resources.handles[j]);
 		}
 
-		/* The exist of _PR3 indicates D3Cold support */
-		if (i == ACPI_STATE_D3) {
-			status = acpi_get_handle(device->handle, object_name, &handle);
-			if (ACPI_SUCCESS(status))
-				device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
-		}
-
 		/* Evaluate "_PSx" to see if we can do explicit sets */
 		object_name[2] = 'S';
 		status = acpi_get_handle(device->handle, object_name, &handle);
 		if (ACPI_SUCCESS(status))
 			ps->flags.explicit_set = 1;
 
-		/* State is valid if we have some power control */
-		if (ps->resources.count || ps->flags.explicit_set)
+		/*
+		 * State is valid if there are means to put the device into it.
+		 * D3hot is only valid if _PR3 present.
+		 */
+		if (ps->resources.count ||
+		    (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
 			ps->flags.valid = 1;
 
 		ps->power = -1;	/* Unknown - driver assigned */
@@ -911,6 +908,10 @@
 	device->power.states[ACPI_STATE_D3].flags.valid = 1;
 	device->power.states[ACPI_STATE_D3].power = 0;
 
+	/* Set D3cold's explicit_set flag if _PS3 exists. */
+	if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
+		device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
+
 	acpi_bus_init_power(device);
 
 	return 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 1d661b5..eb6fd23 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,24 +28,34 @@
 #include "internal.h"
 #include "sleep.h"
 
+u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
 static unsigned int gts, bfs;
-module_param(gts, uint, 0644);
-module_param(bfs, uint, 0644);
+static int set_param_wake_flag(const char *val, struct kernel_param *kp)
+{
+	int ret = param_set_int(val, kp);
+
+	if (ret)
+		return ret;
+
+	if (kp->arg == (const char *)&gts) {
+		if (gts)
+			wake_sleep_flags |= ACPI_EXECUTE_GTS;
+		else
+			wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
+	}
+	if (kp->arg == (const char *)&bfs) {
+		if (bfs)
+			wake_sleep_flags |= ACPI_EXECUTE_BFS;
+		else
+			wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
+	}
+	return ret;
+}
+module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
+module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
 MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
 MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
 
-static u8 wake_sleep_flags(void)
-{
-	u8 flags = ACPI_NO_OPTIONAL_METHODS;
-
-	if (gts)
-		flags |= ACPI_EXECUTE_GTS;
-	if (bfs)
-		flags |= ACPI_EXECUTE_BFS;
-
-	return flags;
-}
-
 static u8 sleep_states[ACPI_S_STATE_COUNT];
 
 static void acpi_sleep_tts_switch(u32 acpi_state)
@@ -263,7 +273,6 @@
 {
 	acpi_status status = AE_OK;
 	u32 acpi_state = acpi_target_sleep_state;
-	u8 flags = wake_sleep_flags();
 	int error;
 
 	ACPI_FLUSH_CPU_CACHE();
@@ -271,7 +280,7 @@
 	switch (acpi_state) {
 	case ACPI_STATE_S1:
 		barrier();
-		status = acpi_enter_sleep_state(acpi_state, flags);
+		status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
 		break;
 
 	case ACPI_STATE_S3:
@@ -286,7 +295,7 @@
 	acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
 
 	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(acpi_state, flags);
+	acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
 
 	/* ACPI 3.0 specs (P62) says that it's the responsibility
 	 * of the OSPM to clear the status bit [ implying that the
@@ -550,30 +559,27 @@
 
 static int acpi_hibernation_enter(void)
 {
-	u8 flags = wake_sleep_flags();
 	acpi_status status = AE_OK;
 
 	ACPI_FLUSH_CPU_CACHE();
 
 	/* This shouldn't return.  If it returns, we have a problem */
-	status = acpi_enter_sleep_state(ACPI_STATE_S4, flags);
+	status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
 	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
 
 	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
 }
 
 static void acpi_hibernation_leave(void)
 {
-	u8 flags = wake_sleep_flags();
-
 	/*
 	 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
 	 * enable it here.
 	 */
 	acpi_enable();
 	/* Reprogram control registers and execute _BFS */
-	acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
+	acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
 	/* Check the hardware signature */
 	if (facs && s4_hardware_signature != facs->hardware_signature) {
 		printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -828,12 +834,10 @@
 
 static void acpi_power_off(void)
 {
-	u8 flags = wake_sleep_flags();
-
 	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
 	printk(KERN_DEBUG "%s called\n", __func__);
 	local_irq_disable();
-	acpi_enter_sleep_state(ACPI_STATE_S5, flags);
+	acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
 }
 
 /*
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index cc27322..b7e7285 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -527,9 +527,9 @@
 	if (ret)
 		goto err_release;
 
-	if (dev->irq[0] && dev->irq[0] != NO_IRQ)
+	if (dev->irq[0])
 		ret = device_create_file(&dev->dev, &dev_attr_irq0);
-	if (ret == 0 && dev->irq[1] && dev->irq[1] != NO_IRQ)
+	if (ret == 0 && dev->irq[1])
 		ret = device_create_file(&dev->dev, &dev_attr_irq1);
 	if (ret == 0)
 		return ret;
@@ -543,6 +543,55 @@
 }
 EXPORT_SYMBOL_GPL(amba_device_add);
 
+static struct amba_device *
+amba_aphb_device_add(struct device *parent, const char *name,
+		     resource_size_t base, size_t size, int irq1, int irq2,
+		     void *pdata, unsigned int periphid, u64 dma_mask)
+{
+	struct amba_device *dev;
+	int ret;
+
+	dev = amba_device_alloc(name, base, size);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
+
+	dev->dma_mask = dma_mask;
+	dev->dev.coherent_dma_mask = dma_mask;
+	dev->irq[0] = irq1;
+	dev->irq[1] = irq2;
+	dev->periphid = periphid;
+	dev->dev.platform_data = pdata;
+	dev->dev.parent = parent;
+
+	ret = amba_device_add(dev, &iomem_resource);
+	if (ret) {
+		amba_device_put(dev);
+		return ERR_PTR(ret);
+	}
+
+	return dev;
+}
+
+struct amba_device *
+amba_apb_device_add(struct device *parent, const char *name,
+		    resource_size_t base, size_t size, int irq1, int irq2,
+		    void *pdata, unsigned int periphid)
+{
+	return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
+				    periphid, 0);
+}
+EXPORT_SYMBOL_GPL(amba_apb_device_add);
+
+struct amba_device *
+amba_ahb_device_add(struct device *parent, const char *name,
+		    resource_size_t base, size_t size, int irq1, int irq2,
+		    void *pdata, unsigned int periphid)
+{
+	return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
+				    periphid, ~0ULL);
+}
+EXPORT_SYMBOL_GPL(amba_ahb_device_add);
+
 static void amba_device_initialize(struct amba_device *dev, const char *name)
 {
 	device_initialize(&dev->dev);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 79a1e9d..ebaf67e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -394,6 +394,8 @@
 	  .driver_data = board_ahci_yes_fbs },			/* 88se9128 */
 	{ PCI_DEVICE(0x1b4b, 0x9125),
 	  .driver_data = board_ahci_yes_fbs },			/* 88se9125 */
+	{ PCI_DEVICE(0x1b4b, 0x917a),
+	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
 	{ PCI_DEVICE(0x1b4b, 0x91a3),
 	  .driver_data = board_ahci_yes_fbs },
 
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 0c86c77..9e419e1 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -280,6 +280,7 @@
 
 static const struct of_device_id ahci_of_match[] = {
 	{ .compatible = "calxeda,hb-ahci", },
+	{ .compatible = "snps,spear-ahci", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 28db50b..23763a1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -95,7 +95,7 @@
 static void ata_dev_xfermask(struct ata_device *dev);
 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
 
-atomic_t ata_print_id = ATOMIC_INIT(1);
+atomic_t ata_print_id = ATOMIC_INIT(0);
 
 struct ata_force_param {
 	const char	*name;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c61316e..d1fbd59 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3501,7 +3501,8 @@
 	u64 now = get_jiffies_64();
 	int *trials = void_arg;
 
-	if (ent->timestamp < now - min(now, interval))
+	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
+	    (ent->timestamp < now - min(now, interval)))
 		return -1;
 
 	(*trials)++;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 93dabdc..2222635 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3399,7 +3399,8 @@
 		 */
 		shost->max_host_blocked = 1;
 
-		rc = scsi_add_host(ap->scsi_host, &ap->tdev);
+		rc = scsi_add_host_with_dma(ap->scsi_host,
+						&ap->tdev, ap->host->dev);
 		if (rc)
 			goto err_add;
 	}
@@ -3838,18 +3839,25 @@
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_stop);
 
-int ata_sas_async_port_init(struct ata_port *ap)
+/**
+ * ata_sas_async_probe - simply schedule probing and return
+ * @ap: Port to probe
+ *
+ * For batch scheduling of probe for sas attached ata devices, assumes
+ * the port has already been through ata_sas_port_init()
+ */
+void ata_sas_async_probe(struct ata_port *ap)
 {
-	int rc = ap->ops->port_start(ap);
-
-	if (!rc) {
-		ap->print_id = atomic_inc_return(&ata_print_id);
-		__ata_port_probe(ap);
-	}
-
-	return rc;
+	__ata_port_probe(ap);
 }
-EXPORT_SYMBOL_GPL(ata_sas_async_port_init);
+EXPORT_SYMBOL_GPL(ata_sas_async_probe);
+
+int ata_sas_sync_probe(struct ata_port *ap)
+{
+	return ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
+
 
 /**
  *	ata_sas_port_init - Initialize a SATA device
@@ -3866,12 +3874,10 @@
 {
 	int rc = ap->ops->port_start(ap);
 
-	if (!rc) {
-		ap->print_id = atomic_inc_return(&ata_print_id);
-		rc = ata_port_probe(ap);
-	}
-
-	return rc;
+	if (rc)
+		return rc;
+	ap->print_id = atomic_inc_return(&ata_print_id);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(ata_sas_port_init);
 
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index fc2db2a..3239517 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -943,9 +943,9 @@
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
-#endif
 
 static struct platform_driver arasan_cf_driver = {
 	.probe		= arasan_cf_probe,
@@ -953,9 +953,7 @@
 	.driver		= {
 		.name	= DRIVER_NAME,
 		.owner	= THIS_MODULE,
-#ifdef CONFIG_PM
 		.pm	= &arasan_cf_pm_ops,
-#endif
 	},
 };
 
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index f8f41e0..89b30f3 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -802,7 +802,7 @@
     }
     // cast needed as there is no %? for pointer differences
     PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
-	    skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
+	    skb, skb->head, (long) skb_end_offset(skb));
     rx.handle = virt_to_bus (skb);
     rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
     if (rx_give (dev, &rx, pool))
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 75fd691..7d01c2a 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2182,7 +2182,6 @@
     default:
       PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
       return -EINVAL;
-      break;
   }
   
   // TX traffic parameters
@@ -2357,7 +2356,6 @@
       default: {
 	PRINTD (DBG_QOS, "unsupported TX traffic class");
 	return -EINVAL;
-	break;
       }
     }
   }
@@ -2433,7 +2431,6 @@
       default: {
 	PRINTD (DBG_QOS, "unsupported RX traffic class");
 	return -EINVAL;
-	break;
       }
     }
   }
@@ -2581,7 +2578,6 @@
 //	  break;
 	default:
 	  return -ENOPROTOOPT;
-	  break;
       };
       break;
   }
@@ -2601,7 +2597,6 @@
 //	  break;
 	default:
 	  return -ENOPROTOOPT;
-	  break;
       };
       break;
   }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1c05212..8974bd2b 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1258,7 +1258,7 @@
 	tail = readl(SAR_REG_RAWCT);
 
 	pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
-				    skb_end_pointer(queue) - queue->head - 16,
+				    skb_end_offset(queue) - 16,
 				    PCI_DMA_FROMDEVICE);
 
 	while (head != tail) {
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0f6c7fb..9ef0a53 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -14,5 +14,8 @@
 config REGMAP_SPI
 	tristate
 
+config REGMAP_MMIO
+	tristate
+
 config REGMAP_IRQ
 	bool
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index defd579..5e75d1b 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -3,4 +3,5 @@
 obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
 obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
 obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
 obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index fcafc5b..b986b86 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -26,21 +26,30 @@
 	size_t val_bytes;
 	void (*format_write)(struct regmap *map,
 			     unsigned int reg, unsigned int val);
-	void (*format_reg)(void *buf, unsigned int reg);
-	void (*format_val)(void *buf, unsigned int val);
+	void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
+	void (*format_val)(void *buf, unsigned int val, unsigned int shift);
 	unsigned int (*parse_val)(void *buf);
 };
 
+typedef void (*regmap_lock)(struct regmap *map);
+typedef void (*regmap_unlock)(struct regmap *map);
+
 struct regmap {
-	struct mutex lock;
+	struct mutex mutex;
+	spinlock_t spinlock;
+	regmap_lock lock;
+	regmap_unlock unlock;
 
 	struct device *dev; /* Device we do I/O on */
 	void *work_buf;     /* Scratch buffer used to format I/O */
 	struct regmap_format format;  /* Buffer format */
 	const struct regmap_bus *bus;
+	void *bus_context;
+	const char *name;
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs;
+	const char *debugfs_name;
 #endif
 
 	unsigned int max_register;
@@ -52,6 +61,10 @@
 	u8 read_flag_mask;
 	u8 write_flag_mask;
 
+	/* number of bits to (left) shift the reg value when formatting*/
+	int reg_shift;
+	int reg_stride;
+
 	/* regcache specific members */
 	const struct regcache_ops *cache_ops;
 	enum regcache_type cache_type;
@@ -79,6 +92,9 @@
 
 	struct reg_default *patch;
 	int patch_regs;
+
+	/* if set, converts bulk rw to single rw */
+	bool use_single_rw;
 };
 
 struct regcache_ops {
@@ -101,11 +117,11 @@
 
 #ifdef CONFIG_DEBUG_FS
 extern void regmap_debugfs_initcall(void);
-extern void regmap_debugfs_init(struct regmap *map);
+extern void regmap_debugfs_init(struct regmap *map, const char *name);
 extern void regmap_debugfs_exit(struct regmap *map);
 #else
 static inline void regmap_debugfs_initcall(void) { }
-static inline void regmap_debugfs_init(struct regmap *map) { }
+static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
 static inline void regmap_debugfs_exit(struct regmap *map) { }
 #endif
 
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 483b06d..afd6aa9 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -108,7 +108,7 @@
 static inline int regcache_lzo_get_blkindex(struct regmap *map,
 					    unsigned int reg)
 {
-	return (reg * map->cache_word_size) /
+	return ((reg / map->reg_stride) * map->cache_word_size) /
 		DIV_ROUND_UP(map->cache_size_raw,
 			     regcache_lzo_block_count(map));
 }
@@ -116,9 +116,10 @@
 static inline int regcache_lzo_get_blkpos(struct regmap *map,
 					  unsigned int reg)
 {
-	return reg % (DIV_ROUND_UP(map->cache_size_raw,
-				   regcache_lzo_block_count(map)) /
-		      map->cache_word_size);
+	return (reg / map->reg_stride) %
+		    (DIV_ROUND_UP(map->cache_size_raw,
+				  regcache_lzo_block_count(map)) /
+		     map->cache_word_size);
 }
 
 static inline int regcache_lzo_get_blksize(struct regmap *map)
@@ -322,7 +323,7 @@
 	}
 
 	/* set the bit so we know we have to sync this register */
-	set_bit(reg, lzo_block->sync_bmp);
+	set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
 	kfree(tmp_dst);
 	kfree(lzo_block->src);
 	return 0;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 92b779e..e6732cf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -39,11 +39,12 @@
 };
 
 static inline void regcache_rbtree_get_base_top_reg(
+	struct regmap *map,
 	struct regcache_rbtree_node *rbnode,
 	unsigned int *base, unsigned int *top)
 {
 	*base = rbnode->base_reg;
-	*top = rbnode->base_reg + rbnode->blklen - 1;
+	*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
 }
 
 static unsigned int regcache_rbtree_get_register(
@@ -70,7 +71,8 @@
 
 	rbnode = rbtree_ctx->cached_rbnode;
 	if (rbnode) {
-		regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
+		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+						 &top_reg);
 		if (reg >= base_reg && reg <= top_reg)
 			return rbnode;
 	}
@@ -78,7 +80,8 @@
 	node = rbtree_ctx->root.rb_node;
 	while (node) {
 		rbnode = container_of(node, struct regcache_rbtree_node, node);
-		regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
+		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+						 &top_reg);
 		if (reg >= base_reg && reg <= top_reg) {
 			rbtree_ctx->cached_rbnode = rbnode;
 			return rbnode;
@@ -92,7 +95,7 @@
 	return NULL;
 }
 
-static int regcache_rbtree_insert(struct rb_root *root,
+static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
 				  struct regcache_rbtree_node *rbnode)
 {
 	struct rb_node **new, *parent;
@@ -106,7 +109,7 @@
 		rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
 					  node);
 		/* base and top registers of the current rbnode */
-		regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
+		regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
 						 &top_reg_tmp);
 		/* base register of the rbnode to be added */
 		base_reg = rbnode->base_reg;
@@ -138,19 +141,20 @@
 	unsigned int base, top;
 	int nodes = 0;
 	int registers = 0;
-	int average;
+	int this_registers, average;
 
-	mutex_lock(&map->lock);
+	map->lock(map);
 
 	for (node = rb_first(&rbtree_ctx->root); node != NULL;
 	     node = rb_next(node)) {
 		n = container_of(node, struct regcache_rbtree_node, node);
 
-		regcache_rbtree_get_base_top_reg(n, &base, &top);
-		seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
+		regcache_rbtree_get_base_top_reg(map, n, &base, &top);
+		this_registers = ((top - base) / map->reg_stride) + 1;
+		seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
 
 		nodes++;
-		registers += top - base + 1;
+		registers += this_registers;
 	}
 
 	if (nodes)
@@ -161,7 +165,7 @@
 	seq_printf(s, "%d nodes, %d registers, average %d registers\n",
 		   nodes, registers, average);
 
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return 0;
 }
@@ -255,7 +259,7 @@
 
 	rbnode = regcache_rbtree_lookup(map, reg);
 	if (rbnode) {
-		reg_tmp = reg - rbnode->base_reg;
+		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
 		*value = regcache_rbtree_get_register(rbnode, reg_tmp,
 						      map->cache_word_size);
 	} else {
@@ -310,7 +314,7 @@
 	 */
 	rbnode = regcache_rbtree_lookup(map, reg);
 	if (rbnode) {
-		reg_tmp = reg - rbnode->base_reg;
+		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
 		val = regcache_rbtree_get_register(rbnode, reg_tmp,
 						   map->cache_word_size);
 		if (val == value)
@@ -321,13 +325,15 @@
 		/* look for an adjacent register to the one we are about to add */
 		for (node = rb_first(&rbtree_ctx->root); node;
 		     node = rb_next(node)) {
-			rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node);
+			rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
+					      node);
 			for (i = 0; i < rbnode_tmp->blklen; i++) {
-				reg_tmp = rbnode_tmp->base_reg + i;
-				if (abs(reg_tmp - reg) != 1)
+				reg_tmp = rbnode_tmp->base_reg +
+						(i * map->reg_stride);
+				if (abs(reg_tmp - reg) != map->reg_stride)
 					continue;
 				/* decide where in the block to place our register */
-				if (reg_tmp + 1 == reg)
+				if (reg_tmp + map->reg_stride == reg)
 					pos = i + 1;
 				else
 					pos = i;
@@ -357,7 +363,7 @@
 			return -ENOMEM;
 		}
 		regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
-		regcache_rbtree_insert(&rbtree_ctx->root, rbnode);
+		regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
 		rbtree_ctx->cached_rbnode = rbnode;
 	}
 
@@ -397,7 +403,7 @@
 			end = rbnode->blklen;
 
 		for (i = base; i < end; i++) {
-			regtmp = rbnode->base_reg + i;
+			regtmp = rbnode->base_reg + (i * map->reg_stride);
 			val = regcache_rbtree_get_register(rbnode, i,
 							   map->cache_word_size);
 
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 74b6909..835883b 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -59,7 +59,7 @@
 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
 		val = regcache_get_val(map->reg_defaults_raw,
 				       i, map->cache_word_size);
-		if (regmap_volatile(map, i))
+		if (regmap_volatile(map, i * map->reg_stride))
 			continue;
 		count++;
 	}
@@ -76,9 +76,9 @@
 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
 		val = regcache_get_val(map->reg_defaults_raw,
 				       i, map->cache_word_size);
-		if (regmap_volatile(map, i))
+		if (regmap_volatile(map, i * map->reg_stride))
 			continue;
-		map->reg_defaults[j].reg = i;
+		map->reg_defaults[j].reg = i * map->reg_stride;
 		map->reg_defaults[j].def = val;
 		j++;
 	}
@@ -98,6 +98,10 @@
 	int i;
 	void *tmp_buf;
 
+	for (i = 0; i < config->num_reg_defaults; i++)
+		if (config->reg_defaults[i].reg % map->reg_stride)
+			return -EINVAL;
+
 	if (map->cache_type == REGCACHE_NONE) {
 		map->cache_bypass = true;
 		return 0;
@@ -264,7 +268,7 @@
 
 	BUG_ON(!map->cache_ops || !map->cache_ops->sync);
 
-	mutex_lock(&map->lock);
+	map->lock(map);
 	/* Remember the initial bypass state */
 	bypass = map->cache_bypass;
 	dev_dbg(map->dev, "Syncing %s cache\n",
@@ -278,6 +282,10 @@
 	/* Apply any patch first */
 	map->cache_bypass = 1;
 	for (i = 0; i < map->patch_regs; i++) {
+		if (map->patch[i].reg % map->reg_stride) {
+			ret = -EINVAL;
+			goto out;
+		}
 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
 		if (ret != 0) {
 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
@@ -296,7 +304,7 @@
 	trace_regcache_sync(map->dev, name, "stop");
 	/* Restore the bypass state */
 	map->cache_bypass = bypass;
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
@@ -323,7 +331,7 @@
 
 	BUG_ON(!map->cache_ops || !map->cache_ops->sync);
 
-	mutex_lock(&map->lock);
+	map->lock(map);
 
 	/* Remember the initial bypass state */
 	bypass = map->cache_bypass;
@@ -342,7 +350,7 @@
 	trace_regcache_sync(map->dev, name, "stop region");
 	/* Restore the bypass state */
 	map->cache_bypass = bypass;
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
@@ -362,11 +370,11 @@
  */
 void regcache_cache_only(struct regmap *map, bool enable)
 {
-	mutex_lock(&map->lock);
+	map->lock(map);
 	WARN_ON(map->cache_bypass && enable);
 	map->cache_only = enable;
 	trace_regmap_cache_only(map->dev, enable);
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 }
 EXPORT_SYMBOL_GPL(regcache_cache_only);
 
@@ -381,9 +389,9 @@
  */
 void regcache_mark_dirty(struct regmap *map)
 {
-	mutex_lock(&map->lock);
+	map->lock(map);
 	map->cache_dirty = true;
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 }
 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
 
@@ -400,11 +408,11 @@
  */
 void regcache_cache_bypass(struct regmap *map, bool enable)
 {
-	mutex_lock(&map->lock);
+	map->lock(map);
 	WARN_ON(map->cache_only && enable);
 	map->cache_bypass = enable;
 	trace_regmap_cache_bypass(map->dev, enable);
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 }
 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
 
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 251eb70..bb1ff17 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -80,7 +80,7 @@
 	val_len = 2 * map->format.val_bytes;
 	tot_len = reg_len + val_len + 3;      /* : \n */
 
-	for (i = 0; i < map->max_register + 1; i++) {
+	for (i = 0; i <= map->max_register; i += map->reg_stride) {
 		if (!regmap_readable(map, i))
 			continue;
 
@@ -197,7 +197,7 @@
 	reg_len = regmap_calc_reg_len(map->max_register, buf, count);
 	tot_len = reg_len + 10; /* ': R W V P\n' */
 
-	for (i = 0; i < map->max_register + 1; i++) {
+	for (i = 0; i <= map->max_register; i += map->reg_stride) {
 		/* Ignore registers which are neither readable nor writable */
 		if (!regmap_readable(map, i) && !regmap_writeable(map, i))
 			continue;
@@ -242,10 +242,17 @@
 	.llseek = default_llseek,
 };
 
-void regmap_debugfs_init(struct regmap *map)
+void regmap_debugfs_init(struct regmap *map, const char *name)
 {
-	map->debugfs = debugfs_create_dir(dev_name(map->dev),
-					  regmap_debugfs_root);
+	if (name) {
+		map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+					      dev_name(map->dev), name);
+		name = map->debugfs_name;
+	} else {
+		name = dev_name(map->dev);
+	}
+
+	map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
 	if (!map->debugfs) {
 		dev_warn(map->dev, "Failed to create debugfs directory\n");
 		return;
@@ -274,6 +281,7 @@
 void regmap_debugfs_exit(struct regmap *map)
 {
 	debugfs_remove_recursive(map->debugfs);
+	kfree(map->debugfs_name);
 }
 
 void regmap_debugfs_initcall(void)
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 9a3a8c5..5f6b247 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -15,8 +15,9 @@
 #include <linux/module.h>
 #include <linux/init.h>
 
-static int regmap_i2c_write(struct device *dev, const void *data, size_t count)
+static int regmap_i2c_write(void *context, const void *data, size_t count)
 {
+	struct device *dev = context;
 	struct i2c_client *i2c = to_i2c_client(dev);
 	int ret;
 
@@ -29,10 +30,11 @@
 		return -EIO;
 }
 
-static int regmap_i2c_gather_write(struct device *dev,
+static int regmap_i2c_gather_write(void *context,
 				   const void *reg, size_t reg_size,
 				   const void *val, size_t val_size)
 {
+	struct device *dev = context;
 	struct i2c_client *i2c = to_i2c_client(dev);
 	struct i2c_msg xfer[2];
 	int ret;
@@ -62,10 +64,11 @@
 		return -EIO;
 }
 
-static int regmap_i2c_read(struct device *dev,
+static int regmap_i2c_read(void *context,
 			   const void *reg, size_t reg_size,
 			   void *val, size_t val_size)
 {
+	struct device *dev = context;
 	struct i2c_client *i2c = to_i2c_client(dev);
 	struct i2c_msg xfer[2];
 	int ret;
@@ -107,7 +110,7 @@
 struct regmap *regmap_init_i2c(struct i2c_client *i2c,
 			       const struct regmap_config *config)
 {
-	return regmap_init(&i2c->dev, &regmap_i2c, config);
+	return regmap_init(&i2c->dev, &regmap_i2c, &i2c->dev, config);
 }
 EXPORT_SYMBOL_GPL(regmap_init_i2c);
 
@@ -124,7 +127,7 @@
 struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
 				    const struct regmap_config *config)
 {
-	return devm_regmap_init(&i2c->dev, &regmap_i2c, config);
+	return devm_regmap_init(&i2c->dev, &regmap_i2c, &i2c->dev, config);
 }
 EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
 
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1befaa7..4fac4b9 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -15,6 +15,7 @@
 #include <linux/regmap.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/slab.h>
 
 #include "internal.h"
@@ -26,18 +27,20 @@
 	struct regmap_irq_chip *chip;
 
 	int irq_base;
+	struct irq_domain *domain;
 
-	void *status_reg_buf;
 	unsigned int *status_buf;
 	unsigned int *mask_buf;
 	unsigned int *mask_buf_def;
+
+	unsigned int irq_reg_stride;
 };
 
 static inline const
 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
 				     int irq)
 {
-	return &data->chip->irqs[irq - data->irq_base];
+	return &data->chip->irqs[irq];
 }
 
 static void regmap_irq_lock(struct irq_data *data)
@@ -50,6 +53,7 @@
 static void regmap_irq_sync_unlock(struct irq_data *data)
 {
 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+	struct regmap *map = d->map;
 	int i, ret;
 
 	/*
@@ -58,11 +62,13 @@
 	 * suppress pointless writes.
 	 */
 	for (i = 0; i < d->chip->num_regs; i++) {
-		ret = regmap_update_bits(d->map, d->chip->mask_base + i,
+		ret = regmap_update_bits(d->map, d->chip->mask_base +
+						(i * map->reg_stride *
+						d->irq_reg_stride),
 					 d->mask_buf_def[i], d->mask_buf[i]);
 		if (ret != 0)
 			dev_err(d->map->dev, "Failed to sync masks in %x\n",
-				d->chip->mask_base + i);
+				d->chip->mask_base + (i * map->reg_stride));
 	}
 
 	mutex_unlock(&d->lock);
@@ -71,17 +77,19 @@
 static void regmap_irq_enable(struct irq_data *data)
 {
 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
-	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+	struct regmap *map = d->map;
+	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
 
-	d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
+	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
 }
 
 static void regmap_irq_disable(struct irq_data *data)
 {
 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
-	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+	struct regmap *map = d->map;
+	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
 
-	d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
+	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
 }
 
 static struct irq_chip regmap_irq_chip = {
@@ -98,18 +106,8 @@
 	struct regmap_irq_chip *chip = data->chip;
 	struct regmap *map = data->map;
 	int ret, i;
-	u8 *buf8 = data->status_reg_buf;
-	u16 *buf16 = data->status_reg_buf;
-	u32 *buf32 = data->status_reg_buf;
 	bool handled = false;
 
-	ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
-			       chip->num_regs);
-	if (ret != 0) {
-		dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
-		return IRQ_NONE;
-	}
-
 	/*
 	 * Ignore masked IRQs and ack if we need to; we ack early so
 	 * there is no race between handling and acknowleding the
@@ -118,36 +116,34 @@
 	 * doing a write per register.
 	 */
 	for (i = 0; i < data->chip->num_regs; i++) {
-		switch (map->format.val_bytes) {
-		case 1:
-			data->status_buf[i] = buf8[i];
-			break;
-		case 2:
-			data->status_buf[i] = buf16[i];
-			break;
-		case 4:
-			data->status_buf[i] = buf32[i];
-			break;
-		default:
-			BUG();
+		ret = regmap_read(map, chip->status_base + (i * map->reg_stride
+				   * data->irq_reg_stride),
+				   &data->status_buf[i]);
+
+		if (ret != 0) {
+			dev_err(map->dev, "Failed to read IRQ status: %d\n",
+					ret);
 			return IRQ_NONE;
 		}
 
 		data->status_buf[i] &= ~data->mask_buf[i];
 
 		if (data->status_buf[i] && chip->ack_base) {
-			ret = regmap_write(map, chip->ack_base + i,
+			ret = regmap_write(map, chip->ack_base +
+						(i * map->reg_stride *
+						data->irq_reg_stride),
 					   data->status_buf[i]);
 			if (ret != 0)
 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
-					chip->ack_base + i, ret);
+					chip->ack_base + (i * map->reg_stride),
+					ret);
 		}
 	}
 
 	for (i = 0; i < chip->num_irqs; i++) {
-		if (data->status_buf[chip->irqs[i].reg_offset] &
-		    chip->irqs[i].mask) {
-			handle_nested_irq(data->irq_base + i);
+		if (data->status_buf[chip->irqs[i].reg_offset /
+				     map->reg_stride] & chip->irqs[i].mask) {
+			handle_nested_irq(irq_find_mapping(data->domain, i));
 			handled = true;
 		}
 	}
@@ -158,6 +154,31 @@
 		return IRQ_NONE;
 }
 
+static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
+			  irq_hw_number_t hw)
+{
+	struct regmap_irq_chip_data *data = h->host_data;
+
+	irq_set_chip_data(virq, data);
+	irq_set_chip_and_handler(virq, &regmap_irq_chip, handle_edge_irq);
+	irq_set_nested_thread(virq, 1);
+
+	/* ARM needs us to explicitly flag the IRQ as valid
+	 * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+	set_irq_flags(virq, IRQF_VALID);
+#else
+	irq_set_noprobe(virq);
+#endif
+
+	return 0;
+}
+
+static struct irq_domain_ops regmap_domain_ops = {
+	.map	= regmap_irq_map,
+	.xlate	= irq_domain_xlate_twocell,
+};
+
 /**
  * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
  *
@@ -178,30 +199,37 @@
 			struct regmap_irq_chip_data **data)
 {
 	struct regmap_irq_chip_data *d;
-	int cur_irq, i;
+	int i;
 	int ret = -ENOMEM;
 
-	irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
-	if (irq_base < 0) {
-		dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
-			 irq_base);
-		return irq_base;
+	for (i = 0; i < chip->num_irqs; i++) {
+		if (chip->irqs[i].reg_offset % map->reg_stride)
+			return -EINVAL;
+		if (chip->irqs[i].reg_offset / map->reg_stride >=
+		    chip->num_regs)
+			return -EINVAL;
+	}
+
+	if (irq_base) {
+		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
+		if (irq_base < 0) {
+			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
+				 irq_base);
+			return irq_base;
+		}
 	}
 
 	d = kzalloc(sizeof(*d), GFP_KERNEL);
 	if (!d)
 		return -ENOMEM;
 
+	*data = d;
+
 	d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
 				GFP_KERNEL);
 	if (!d->status_buf)
 		goto err_alloc;
 
-	d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
-				    GFP_KERNEL);
-	if (!d->status_reg_buf)
-		goto err_alloc;
-
 	d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
 			      GFP_KERNEL);
 	if (!d->mask_buf)
@@ -215,54 +243,59 @@
 	d->map = map;
 	d->chip = chip;
 	d->irq_base = irq_base;
+
+	if (chip->irq_reg_stride)
+		d->irq_reg_stride = chip->irq_reg_stride;
+	else
+		d->irq_reg_stride = 1;
+
 	mutex_init(&d->lock);
 
 	for (i = 0; i < chip->num_irqs; i++)
-		d->mask_buf_def[chip->irqs[i].reg_offset]
+		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
 			|= chip->irqs[i].mask;
 
 	/* Mask all the interrupts by default */
 	for (i = 0; i < chip->num_regs; i++) {
 		d->mask_buf[i] = d->mask_buf_def[i];
-		ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
+		ret = regmap_write(map, chip->mask_base + (i * map->reg_stride
+				   * d->irq_reg_stride),
+				   d->mask_buf[i]);
 		if (ret != 0) {
 			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
-				chip->mask_base + i, ret);
+				chip->mask_base + (i * map->reg_stride), ret);
 			goto err_alloc;
 		}
 	}
 
-	/* Register them with genirq */
-	for (cur_irq = irq_base;
-	     cur_irq < chip->num_irqs + irq_base;
-	     cur_irq++) {
-		irq_set_chip_data(cur_irq, d);
-		irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
-					 handle_edge_irq);
-		irq_set_nested_thread(cur_irq, 1);
-
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
+	if (irq_base)
+		d->domain = irq_domain_add_legacy(map->dev->of_node,
+						  chip->num_irqs, irq_base, 0,
+						  &regmap_domain_ops, d);
+	else
+		d->domain = irq_domain_add_linear(map->dev->of_node,
+						  chip->num_irqs,
+						  &regmap_domain_ops, d);
+	if (!d->domain) {
+		dev_err(map->dev, "Failed to create IRQ domain\n");
+		ret = -ENOMEM;
+		goto err_alloc;
 	}
 
 	ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
 				   chip->name, d);
 	if (ret != 0) {
 		dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
-		goto err_alloc;
+		goto err_domain;
 	}
 
 	return 0;
 
+err_domain:
+	/* Should really dispose of the domain but... */
 err_alloc:
 	kfree(d->mask_buf_def);
 	kfree(d->mask_buf);
-	kfree(d->status_reg_buf);
 	kfree(d->status_buf);
 	kfree(d);
 	return ret;
@@ -281,9 +314,9 @@
 		return;
 
 	free_irq(irq, d);
+	/* We should unmap the domain but... */
 	kfree(d->mask_buf_def);
 	kfree(d->mask_buf);
-	kfree(d->status_reg_buf);
 	kfree(d->status_buf);
 	kfree(d);
 }
@@ -298,6 +331,21 @@
  */
 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
 {
+	WARN_ON(!data->irq_base);
 	return data->irq_base;
 }
 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
+
+/**
+ * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
+ *
+ * Useful for drivers to request their own IRQs.
+ *
+ * @data: regmap_irq controller to operate on.
+ * @irq: index of the interrupt requested in the chip IRQs
+ */
+int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
+{
+	return irq_create_mapping(data->domain, irq);
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
new file mode 100644
index 0000000..febd6de
--- /dev/null
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -0,0 +1,224 @@
+/*
+ * Register map access API - MMIO support
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+struct regmap_mmio_context {
+	void __iomem *regs;
+	unsigned val_bytes;
+};
+
+static int regmap_mmio_gather_write(void *context,
+				    const void *reg, size_t reg_size,
+				    const void *val, size_t val_size)
+{
+	struct regmap_mmio_context *ctx = context;
+	u32 offset;
+
+	BUG_ON(reg_size != 4);
+
+	offset = be32_to_cpup(reg);
+
+	while (val_size) {
+		switch (ctx->val_bytes) {
+		case 1:
+			writeb(*(u8 *)val, ctx->regs + offset);
+			break;
+		case 2:
+			writew(be16_to_cpup(val), ctx->regs + offset);
+			break;
+		case 4:
+			writel(be32_to_cpup(val), ctx->regs + offset);
+			break;
+#ifdef CONFIG_64BIT
+		case 8:
+			writeq(be64_to_cpup(val), ctx->regs + offset);
+			break;
+#endif
+		default:
+			/* Should be caught by regmap_mmio_check_config */
+			BUG();
+		}
+		val_size -= ctx->val_bytes;
+		val += ctx->val_bytes;
+		offset += ctx->val_bytes;
+	}
+
+	return 0;
+}
+
+static int regmap_mmio_write(void *context, const void *data, size_t count)
+{
+	BUG_ON(count < 4);
+
+	return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4);
+}
+
+static int regmap_mmio_read(void *context,
+			    const void *reg, size_t reg_size,
+			    void *val, size_t val_size)
+{
+	struct regmap_mmio_context *ctx = context;
+	u32 offset;
+
+	BUG_ON(reg_size != 4);
+
+	offset = be32_to_cpup(reg);
+
+	while (val_size) {
+		switch (ctx->val_bytes) {
+		case 1:
+			*(u8 *)val = readb(ctx->regs + offset);
+			break;
+		case 2:
+			*(u16 *)val = cpu_to_be16(readw(ctx->regs + offset));
+			break;
+		case 4:
+			*(u32 *)val = cpu_to_be32(readl(ctx->regs + offset));
+			break;
+#ifdef CONFIG_64BIT
+		case 8:
+			*(u64 *)val = cpu_to_be32(readq(ctx->regs + offset));
+			break;
+#endif
+		default:
+			/* Should be caught by regmap_mmio_check_config */
+			BUG();
+		}
+		val_size -= ctx->val_bytes;
+		val += ctx->val_bytes;
+		offset += ctx->val_bytes;
+	}
+
+	return 0;
+}
+
+static void regmap_mmio_free_context(void *context)
+{
+	kfree(context);
+}
+
+static struct regmap_bus regmap_mmio = {
+	.fast_io = true,
+	.write = regmap_mmio_write,
+	.gather_write = regmap_mmio_gather_write,
+	.read = regmap_mmio_read,
+	.free_context = regmap_mmio_free_context,
+};
+
+struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
+					const struct regmap_config *config)
+{
+	struct regmap_mmio_context *ctx;
+	int min_stride;
+
+	if (config->reg_bits != 32)
+		return ERR_PTR(-EINVAL);
+
+	if (config->pad_bits)
+		return ERR_PTR(-EINVAL);
+
+	switch (config->val_bits) {
+	case 8:
+		/* The core treats 0 as 1 */
+		min_stride = 0;
+		break;
+	case 16:
+		min_stride = 2;
+		break;
+	case 32:
+		min_stride = 4;
+		break;
+#ifdef CONFIG_64BIT
+	case 64:
+		min_stride = 8;
+		break;
+#endif
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (config->reg_stride < min_stride)
+		return ERR_PTR(-EINVAL);
+
+	ctx = kzalloc(GFP_KERNEL, sizeof(*ctx));
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->regs = regs;
+	ctx->val_bytes = config->val_bits / 8;
+
+	return ctx;
+}
+
+/**
+ * regmap_init_mmio(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_mmio(struct device *dev,
+				void __iomem *regs,
+				const struct regmap_config *config)
+{
+	struct regmap_mmio_context *ctx;
+
+	ctx = regmap_mmio_gen_context(regs, config);
+	if (IS_ERR(ctx))
+		return ERR_CAST(ctx);
+
+	return regmap_init(dev, &regmap_mmio, ctx, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_mmio);
+
+/**
+ * devm_regmap_init_mmio(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_mmio(struct device *dev,
+				     void __iomem *regs,
+				     const struct regmap_config *config)
+{
+	struct regmap_mmio_context *ctx;
+
+	ctx = regmap_mmio_gen_context(regs, config);
+	if (IS_ERR(ctx))
+		return ERR_CAST(ctx);
+
+	return devm_regmap_init(dev, &regmap_mmio, ctx, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_mmio);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 7c0c35a..ffa46a9 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -15,17 +15,19 @@
 #include <linux/init.h>
 #include <linux/module.h>
 
-static int regmap_spi_write(struct device *dev, const void *data, size_t count)
+static int regmap_spi_write(void *context, const void *data, size_t count)
 {
+	struct device *dev = context;
 	struct spi_device *spi = to_spi_device(dev);
 
 	return spi_write(spi, data, count);
 }
 
-static int regmap_spi_gather_write(struct device *dev,
+static int regmap_spi_gather_write(void *context,
 				   const void *reg, size_t reg_len,
 				   const void *val, size_t val_len)
 {
+	struct device *dev = context;
 	struct spi_device *spi = to_spi_device(dev);
 	struct spi_message m;
 	struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
@@ -38,10 +40,11 @@
 	return spi_sync(spi, &m);
 }
 
-static int regmap_spi_read(struct device *dev,
+static int regmap_spi_read(void *context,
 			   const void *reg, size_t reg_size,
 			   void *val, size_t val_size)
 {
+	struct device *dev = context;
 	struct spi_device *spi = to_spi_device(dev);
 
 	return spi_write_then_read(spi, reg, reg_size, val, val_size);
@@ -66,7 +69,7 @@
 struct regmap *regmap_init_spi(struct spi_device *spi,
 			       const struct regmap_config *config)
 {
-	return regmap_init(&spi->dev, &regmap_spi, config);
+	return regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
 }
 EXPORT_SYMBOL_GPL(regmap_init_spi);
 
@@ -83,7 +86,7 @@
 struct regmap *devm_regmap_init_spi(struct spi_device *spi,
 				    const struct regmap_config *config)
 {
-	return devm_regmap_init(&spi->dev, &regmap_spi, config);
+	return devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
 }
 EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
 
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7a3f535..0bcda48 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -112,25 +112,36 @@
 	out[0] = reg >> 2;
 }
 
-static void regmap_format_8(void *buf, unsigned int val)
+static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
 {
 	u8 *b = buf;
 
-	b[0] = val;
+	b[0] = val << shift;
 }
 
-static void regmap_format_16(void *buf, unsigned int val)
+static void regmap_format_16(void *buf, unsigned int val, unsigned int shift)
 {
 	__be16 *b = buf;
 
-	b[0] = cpu_to_be16(val);
+	b[0] = cpu_to_be16(val << shift);
 }
 
-static void regmap_format_32(void *buf, unsigned int val)
+static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
+{
+	u8 *b = buf;
+
+	val <<= shift;
+
+	b[0] = val >> 16;
+	b[1] = val >> 8;
+	b[2] = val;
+}
+
+static void regmap_format_32(void *buf, unsigned int val, unsigned int shift)
 {
 	__be32 *b = buf;
 
-	b[0] = cpu_to_be32(val);
+	b[0] = cpu_to_be32(val << shift);
 }
 
 static unsigned int regmap_parse_8(void *buf)
@@ -149,6 +160,16 @@
 	return b[0];
 }
 
+static unsigned int regmap_parse_24(void *buf)
+{
+	u8 *b = buf;
+	unsigned int ret = b[2];
+	ret |= ((unsigned int)b[1]) << 8;
+	ret |= ((unsigned int)b[0]) << 16;
+
+	return ret;
+}
+
 static unsigned int regmap_parse_32(void *buf)
 {
 	__be32 *b = buf;
@@ -158,11 +179,41 @@
 	return b[0];
 }
 
+static void regmap_lock_mutex(struct regmap *map)
+{
+	mutex_lock(&map->mutex);
+}
+
+static void regmap_unlock_mutex(struct regmap *map)
+{
+	mutex_unlock(&map->mutex);
+}
+
+static void regmap_lock_spinlock(struct regmap *map)
+{
+	spin_lock(&map->spinlock);
+}
+
+static void regmap_unlock_spinlock(struct regmap *map)
+{
+	spin_unlock(&map->spinlock);
+}
+
+static void dev_get_regmap_release(struct device *dev, void *res)
+{
+	/*
+	 * We don't actually have anything to do here; the goal here
+	 * is not to manage the regmap but to provide a simple way to
+	 * get the regmap back given a struct device.
+	 */
+}
+
 /**
  * regmap_init(): Initialise register map
  *
  * @dev: Device that will be interacted with
  * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
  * @config: Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer to
@@ -171,9 +222,10 @@
  */
 struct regmap *regmap_init(struct device *dev,
 			   const struct regmap_bus *bus,
+			   void *bus_context,
 			   const struct regmap_config *config)
 {
-	struct regmap *map;
+	struct regmap *map, **m;
 	int ret = -EINVAL;
 
 	if (!bus || !config)
@@ -185,20 +237,36 @@
 		goto err;
 	}
 
-	mutex_init(&map->lock);
+	if (bus->fast_io) {
+		spin_lock_init(&map->spinlock);
+		map->lock = regmap_lock_spinlock;
+		map->unlock = regmap_unlock_spinlock;
+	} else {
+		mutex_init(&map->mutex);
+		map->lock = regmap_lock_mutex;
+		map->unlock = regmap_unlock_mutex;
+	}
 	map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
 	map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
 	map->format.pad_bytes = config->pad_bits / 8;
 	map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
 	map->format.buf_size += map->format.pad_bytes;
+	map->reg_shift = config->pad_bits % 8;
+	if (config->reg_stride)
+		map->reg_stride = config->reg_stride;
+	else
+		map->reg_stride = 1;
+	map->use_single_rw = config->use_single_rw;
 	map->dev = dev;
 	map->bus = bus;
+	map->bus_context = bus_context;
 	map->max_register = config->max_register;
 	map->writeable_reg = config->writeable_reg;
 	map->readable_reg = config->readable_reg;
 	map->volatile_reg = config->volatile_reg;
 	map->precious_reg = config->precious_reg;
 	map->cache_type = config->cache_type;
+	map->name = config->name;
 
 	if (config->read_flag_mask || config->write_flag_mask) {
 		map->read_flag_mask = config->read_flag_mask;
@@ -207,7 +275,7 @@
 		map->read_flag_mask = bus->read_flag_mask;
 	}
 
-	switch (config->reg_bits) {
+	switch (config->reg_bits + map->reg_shift) {
 	case 2:
 		switch (config->val_bits) {
 		case 6:
@@ -273,12 +341,19 @@
 		map->format.format_val = regmap_format_16;
 		map->format.parse_val = regmap_parse_16;
 		break;
+	case 24:
+		map->format.format_val = regmap_format_24;
+		map->format.parse_val = regmap_parse_24;
+		break;
 	case 32:
 		map->format.format_val = regmap_format_32;
 		map->format.parse_val = regmap_parse_32;
 		break;
 	}
 
+	if (map->format.format_write)
+		map->use_single_rw = true;
+
 	if (!map->format.format_write &&
 	    !(map->format.format_reg && map->format.format_val))
 		goto err_map;
@@ -289,14 +364,25 @@
 		goto err_map;
 	}
 
-	regmap_debugfs_init(map);
+	regmap_debugfs_init(map, config->name);
 
 	ret = regcache_init(map, config);
 	if (ret < 0)
 		goto err_free_workbuf;
 
+	/* Add a devres resource for dev_get_regmap() */
+	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+	if (!m) {
+		ret = -ENOMEM;
+		goto err_cache;
+	}
+	*m = map;
+	devres_add(dev, m);
+
 	return map;
 
+err_cache:
+	regcache_exit(map);
 err_free_workbuf:
 	kfree(map->work_buf);
 err_map:
@@ -316,6 +402,7 @@
  *
  * @dev: Device that will be interacted with
  * @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
  * @config: Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer
@@ -325,6 +412,7 @@
  */
 struct regmap *devm_regmap_init(struct device *dev,
 				const struct regmap_bus *bus,
+				void *bus_context,
 				const struct regmap_config *config)
 {
 	struct regmap **ptr, *regmap;
@@ -333,7 +421,7 @@
 	if (!ptr)
 		return ERR_PTR(-ENOMEM);
 
-	regmap = regmap_init(dev, bus, config);
+	regmap = regmap_init(dev, bus, bus_context, config);
 	if (!IS_ERR(regmap)) {
 		*ptr = regmap;
 		devres_add(dev, ptr);
@@ -360,7 +448,7 @@
 {
 	int ret;
 
-	mutex_lock(&map->lock);
+	map->lock(map);
 
 	regcache_exit(map);
 	regmap_debugfs_exit(map);
@@ -372,14 +460,14 @@
 	map->precious_reg = config->precious_reg;
 	map->cache_type = config->cache_type;
 
-	regmap_debugfs_init(map);
+	regmap_debugfs_init(map, config->name);
 
 	map->cache_bypass = false;
 	map->cache_only = false;
 
 	ret = regcache_init(map, config);
 
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
@@ -391,11 +479,51 @@
 {
 	regcache_exit(map);
 	regmap_debugfs_exit(map);
+	if (map->bus->free_context)
+		map->bus->free_context(map->bus_context);
 	kfree(map->work_buf);
 	kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
 
+static int dev_get_regmap_match(struct device *dev, void *res, void *data)
+{
+	struct regmap **r = res;
+	if (!r || !*r) {
+		WARN_ON(!r || !*r);
+		return 0;
+	}
+
+	/* If the user didn't specify a name match any */
+	if (data)
+		return (*r)->name == data;
+	else
+		return 1;
+}
+
+/**
+ * dev_get_regmap(): Obtain the regmap (if any) for a device
+ *
+ * @dev: Device to retrieve the map for
+ * @name: Optional name for the register map, usually NULL.
+ *
+ * Returns the regmap for the device if one is present, or NULL.  If
+ * name is specified then it must match the name specified when
+ * registering the device, if it is NULL then the first regmap found
+ * will be used.  Devices with multiple register maps are very rare,
+ * generic code should normally not need to specify a name.
+ */
+struct regmap *dev_get_regmap(struct device *dev, const char *name)
+{
+	struct regmap **r = devres_find(dev, dev_get_regmap_release,
+					dev_get_regmap_match, (void *)name);
+
+	if (!r)
+		return NULL;
+	return *r;
+}
+EXPORT_SYMBOL_GPL(dev_get_regmap);
+
 static int _regmap_raw_write(struct regmap *map, unsigned int reg,
 			     const void *val, size_t val_len)
 {
@@ -408,7 +536,8 @@
 	/* Check for unwritable registers before we start */
 	if (map->writeable_reg)
 		for (i = 0; i < val_len / map->format.val_bytes; i++)
-			if (!map->writeable_reg(map->dev, reg + i))
+			if (!map->writeable_reg(map->dev,
+						reg + (i * map->reg_stride)))
 				return -EINVAL;
 
 	if (!map->cache_bypass && map->format.parse_val) {
@@ -417,7 +546,8 @@
 		for (i = 0; i < val_len / val_bytes; i++) {
 			memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
 			ival = map->format.parse_val(map->work_buf);
-			ret = regcache_write(map, reg + i, ival);
+			ret = regcache_write(map, reg + (i * map->reg_stride),
+					     ival);
 			if (ret) {
 				dev_err(map->dev,
 				   "Error in caching of register: %u ret: %d\n",
@@ -431,7 +561,7 @@
 		}
 	}
 
-	map->format.format_reg(map->work_buf, reg);
+	map->format.format_reg(map->work_buf, reg, map->reg_shift);
 
 	u8[0] |= map->write_flag_mask;
 
@@ -444,12 +574,12 @@
 	 */
 	if (val == (map->work_buf + map->format.pad_bytes +
 		    map->format.reg_bytes))
-		ret = map->bus->write(map->dev, map->work_buf,
+		ret = map->bus->write(map->bus_context, map->work_buf,
 				      map->format.reg_bytes +
 				      map->format.pad_bytes +
 				      val_len);
 	else if (map->bus->gather_write)
-		ret = map->bus->gather_write(map->dev, map->work_buf,
+		ret = map->bus->gather_write(map->bus_context, map->work_buf,
 					     map->format.reg_bytes +
 					     map->format.pad_bytes,
 					     val, val_len);
@@ -464,7 +594,7 @@
 		memcpy(buf, map->work_buf, map->format.reg_bytes);
 		memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
 		       val, val_len);
-		ret = map->bus->write(map->dev, buf, len);
+		ret = map->bus->write(map->bus_context, buf, len);
 
 		kfree(buf);
 	}
@@ -498,7 +628,7 @@
 
 		trace_regmap_hw_write_start(map->dev, reg, 1);
 
-		ret = map->bus->write(map->dev, map->work_buf,
+		ret = map->bus->write(map->bus_context, map->work_buf,
 				      map->format.buf_size);
 
 		trace_regmap_hw_write_done(map->dev, reg, 1);
@@ -506,7 +636,7 @@
 		return ret;
 	} else {
 		map->format.format_val(map->work_buf + map->format.reg_bytes
-				       + map->format.pad_bytes, val);
+				       + map->format.pad_bytes, val, 0);
 		return _regmap_raw_write(map, reg,
 					 map->work_buf +
 					 map->format.reg_bytes +
@@ -529,11 +659,14 @@
 {
 	int ret;
 
-	mutex_lock(&map->lock);
+	if (reg % map->reg_stride)
+		return -EINVAL;
+
+	map->lock(map);
 
 	ret = _regmap_write(map, reg, val);
 
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
@@ -560,11 +693,16 @@
 {
 	int ret;
 
-	mutex_lock(&map->lock);
+	if (val_len % map->format.val_bytes)
+		return -EINVAL;
+	if (reg % map->reg_stride)
+		return -EINVAL;
+
+	map->lock(map);
 
 	ret = _regmap_raw_write(map, reg, val, val_len);
 
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
@@ -593,8 +731,10 @@
 
 	if (!map->format.parse_val)
 		return -EINVAL;
+	if (reg % map->reg_stride)
+		return -EINVAL;
 
-	mutex_lock(&map->lock);
+	map->lock(map);
 
 	/* No formatting is require if val_byte is 1 */
 	if (val_bytes == 1) {
@@ -609,13 +749,28 @@
 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
 			map->format.parse_val(wval + i);
 	}
-	ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+	/*
+	 * Some devices does not support bulk write, for
+	 * them we have a series of single write operations.
+	 */
+	if (map->use_single_rw) {
+		for (i = 0; i < val_count; i++) {
+			ret = regmap_raw_write(map,
+						reg + (i * map->reg_stride),
+						val + (i * val_bytes),
+						val_bytes);
+			if (ret != 0)
+				return ret;
+		}
+	} else {
+		ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+	}
 
 	if (val_bytes != 1)
 		kfree(wval);
 
 out:
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(regmap_bulk_write);
@@ -626,7 +781,7 @@
 	u8 *u8 = map->work_buf;
 	int ret;
 
-	map->format.format_reg(map->work_buf, reg);
+	map->format.format_reg(map->work_buf, reg, map->reg_shift);
 
 	/*
 	 * Some buses or devices flag reads by setting the high bits in the
@@ -639,7 +794,7 @@
 	trace_regmap_hw_read_start(map->dev, reg,
 				   val_len / map->format.val_bytes);
 
-	ret = map->bus->read(map->dev, map->work_buf,
+	ret = map->bus->read(map->bus_context, map->work_buf,
 			     map->format.reg_bytes + map->format.pad_bytes,
 			     val, val_len);
 
@@ -672,6 +827,9 @@
 		trace_regmap_reg_read(map->dev, reg, *val);
 	}
 
+	if (ret == 0 && !map->cache_bypass)
+		regcache_write(map, reg, *val);
+
 	return ret;
 }
 
@@ -689,11 +847,14 @@
 {
 	int ret;
 
-	mutex_lock(&map->lock);
+	if (reg % map->reg_stride)
+		return -EINVAL;
+
+	map->lock(map);
 
 	ret = _regmap_read(map, reg, val);
 
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
@@ -718,7 +879,12 @@
 	unsigned int v;
 	int ret, i;
 
-	mutex_lock(&map->lock);
+	if (val_len % map->format.val_bytes)
+		return -EINVAL;
+	if (reg % map->reg_stride)
+		return -EINVAL;
+
+	map->lock(map);
 
 	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
 	    map->cache_type == REGCACHE_NONE) {
@@ -730,16 +896,17 @@
 		 * cost as we expect to hit the cache.
 		 */
 		for (i = 0; i < val_count; i++) {
-			ret = _regmap_read(map, reg + i, &v);
+			ret = _regmap_read(map, reg + (i * map->reg_stride),
+					   &v);
 			if (ret != 0)
 				goto out;
 
-			map->format.format_val(val + (i * val_bytes), v);
+			map->format.format_val(val + (i * val_bytes), v, 0);
 		}
 	}
 
  out:
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
@@ -765,19 +932,40 @@
 
 	if (!map->format.parse_val)
 		return -EINVAL;
+	if (reg % map->reg_stride)
+		return -EINVAL;
 
 	if (vol || map->cache_type == REGCACHE_NONE) {
-		ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
-		if (ret != 0)
-			return ret;
+		/*
+		 * Some devices does not support bulk read, for
+		 * them we have a series of single read operations.
+		 */
+		if (map->use_single_rw) {
+			for (i = 0; i < val_count; i++) {
+				ret = regmap_raw_read(map,
+						reg + (i * map->reg_stride),
+						val + (i * val_bytes),
+						val_bytes);
+				if (ret != 0)
+					return ret;
+			}
+		} else {
+			ret = regmap_raw_read(map, reg, val,
+					      val_bytes * val_count);
+			if (ret != 0)
+				return ret;
+		}
 
 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
 			map->format.parse_val(val + i);
 	} else {
 		for (i = 0; i < val_count; i++) {
-			ret = regmap_read(map, reg + i, val + (i * val_bytes));
+			unsigned int ival;
+			ret = regmap_read(map, reg + (i * map->reg_stride),
+					  &ival);
 			if (ret != 0)
 				return ret;
+			memcpy(val + (i * val_bytes), &ival, val_bytes);
 		}
 	}
 
@@ -792,7 +980,7 @@
 	int ret;
 	unsigned int tmp, orig;
 
-	mutex_lock(&map->lock);
+	map->lock(map);
 
 	ret = _regmap_read(map, reg, &orig);
 	if (ret != 0)
@@ -809,7 +997,7 @@
 	}
 
 out:
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
@@ -876,7 +1064,7 @@
 	if (map->patch)
 		return -EBUSY;
 
-	mutex_lock(&map->lock);
+	map->lock(map);
 
 	bypass = map->cache_bypass;
 
@@ -904,7 +1092,7 @@
 out:
 	map->cache_bypass = bypass;
 
-	mutex_unlock(&map->lock);
+	map->unlock(map);
 
 	return ret;
 }
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 8db9089..9a13e88 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6580,24 +6580,21 @@
 
 static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
 {
-	struct proc_dir_entry *StatusProcEntry;
 	struct proc_dir_entry *ControllerProcEntry;
-	struct proc_dir_entry *UserCommandProcEntry;
 
 	if (DAC960_ProcDirectoryEntry == NULL) {
-  		DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
-  		StatusProcEntry = proc_create("status", 0,
-					   DAC960_ProcDirectoryEntry,
-					   &dac960_proc_fops);
+		DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
+		proc_create("status", 0, DAC960_ProcDirectoryEntry,
+			    &dac960_proc_fops);
 	}
 
-      sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
-      ControllerProcEntry = proc_mkdir(Controller->ControllerName,
-				       DAC960_ProcDirectoryEntry);
-      proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
-      proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
-      UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
-      Controller->ControllerProcEntry = ControllerProcEntry;
+	sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
+	ControllerProcEntry = proc_mkdir(Controller->ControllerName,
+					 DAC960_ProcDirectoryEntry);
+	proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
+	proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
+	proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
+	Controller->ControllerProcEntry = ControllerProcEntry;
 }
 
 
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index abfaaca..946166e 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2297,7 +2297,7 @@
 		return;
 	}
 
-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
+	if (!capable(CAP_SYS_ADMIN)) {
 		retcode = ERR_PERM;
 		goto fail;
 	}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 43beaca..436f519 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -664,7 +664,7 @@
 	timeo = mdev->net_conf->try_connect_int * HZ;
 	timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
 
-	s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
+	s_listen->sk->sk_reuse    = SK_CAN_REUSE; /* SO_REUSEADDR */
 	s_listen->sk->sk_rcvtimeo = timeo;
 	s_listen->sk->sk_sndtimeo = timeo;
 	drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
@@ -841,8 +841,8 @@
 		}
 	} while (1);
 
-	msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
-	sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
+	msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
+	sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
 
 	sock->sk->sk_allocation = GFP_NOIO;
 	msock->sk->sk_allocation = GFP_NOIO;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 00f9fc9..304000c 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2510,8 +2510,10 @@
 		up(&dd->port->cmd_slot);
 		return NULL;
 	}
-	if (unlikely(*tag < 0))
+	if (unlikely(*tag < 0)) {
+		up(&dd->port->cmd_slot);
 		return NULL;
+	}
 
 	return dd->port->commands[*tag].sg;
 }
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index ae9edca..57fd867 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -75,6 +75,8 @@
 	{ USB_DEVICE(0x0CF3, 0x311D) },
 	{ USB_DEVICE(0x13d3, 0x3375) },
 	{ USB_DEVICE(0x04CA, 0x3005) },
+	{ USB_DEVICE(0x13d3, 0x3362) },
+	{ USB_DEVICE(0x0CF3, 0xE004) },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xE02C) },
@@ -94,6 +96,8 @@
 	{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
 
 	{ }	/* Terminating entry */
 };
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3311b81..9217121 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -101,12 +101,16 @@
 	{ USB_DEVICE(0x0c10, 0x0000) },
 
 	/* Broadcom BCM20702A0 */
+	{ USB_DEVICE(0x0489, 0xe042) },
 	{ USB_DEVICE(0x0a5c, 0x21e3) },
 	{ USB_DEVICE(0x0a5c, 0x21e6) },
 	{ USB_DEVICE(0x0a5c, 0x21e8) },
 	{ USB_DEVICE(0x0a5c, 0x21f3) },
 	{ USB_DEVICE(0x413c, 0x8197) },
 
+	/* Foxconn - Hon Hai */
+	{ USB_DEVICE(0x0489, 0xe033) },
+
 	{ }	/* Terminating entry */
 };
 
@@ -133,6 +137,8 @@
 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
 
 	/* Atheros AR5BBU12 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ddf86b6..cdf2f54 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1895,6 +1895,13 @@
 
 		/* Get port open/close status on the host */
 		send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+		/*
+		 * If a port was open at the time of suspending, we
+		 * have to let the host know that it's still open.
+		 */
+		if (port->guest_connected)
+			send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
 	}
 	return 0;
 }
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 6db161f..c535cf8 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -35,7 +35,12 @@
 static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
 {
 	struct clk_lookup *p, *cl = NULL;
-	int match, best = 0;
+	int match, best_found = 0, best_possible = 0;
+
+	if (dev_id)
+		best_possible += 2;
+	if (con_id)
+		best_possible += 1;
 
 	list_for_each_entry(p, &clocks, node) {
 		match = 0;
@@ -50,10 +55,10 @@
 			match += 1;
 		}
 
-		if (match > best) {
+		if (match > best_found) {
 			cl = p;
-			if (match != 3)
-				best = match;
+			if (match != best_possible)
+				best_found = match;
 			else
 				break;
 		}
@@ -89,6 +94,51 @@
 }
 EXPORT_SYMBOL(clk_put);
 
+static void devm_clk_release(struct device *dev, void *res)
+{
+	clk_put(*(struct clk **)res);
+}
+
+struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+	struct clk **ptr, *clk;
+
+	ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	clk = clk_get(dev, id);
+	if (!IS_ERR(clk)) {
+		*ptr = clk;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return clk;
+}
+EXPORT_SYMBOL(devm_clk_get);
+
+static int devm_clk_match(struct device *dev, void *res, void *data)
+{
+	struct clk **c = res;
+	if (!c || !*c) {
+		WARN_ON(!c || !*c);
+		return 0;
+	}
+	return *c == data;
+}
+
+void devm_clk_put(struct device *dev, struct clk *clk)
+{
+	int ret;
+
+	ret = devres_destroy(dev, devm_clk_release, devm_clk_match, clk);
+
+	WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_clk_put);
+
 void clkdev_add(struct clk_lookup *cl)
 {
 	mutex_lock(&clocks_mutex);
@@ -116,8 +166,9 @@
 	char	con_id[MAX_CON_ID];
 };
 
-struct clk_lookup * __init_refok
-clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
+static struct clk_lookup * __init_refok
+vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt,
+	va_list ap)
 {
 	struct clk_lookup_alloc *cla;
 
@@ -132,16 +183,25 @@
 	}
 
 	if (dev_fmt) {
-		va_list ap;
-
-		va_start(ap, dev_fmt);
 		vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
 		cla->cl.dev_id = cla->dev_id;
-		va_end(ap);
 	}
 
 	return &cla->cl;
 }
+
+struct clk_lookup * __init_refok
+clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
+{
+	struct clk_lookup *cl;
+	va_list ap;
+
+	va_start(ap, dev_fmt);
+	cl = vclkdev_alloc(clk, con_id, dev_fmt, ap);
+	va_end(ap);
+
+	return cl;
+}
 EXPORT_SYMBOL(clkdev_alloc);
 
 int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
@@ -173,3 +233,65 @@
 	kfree(cl);
 }
 EXPORT_SYMBOL(clkdev_drop);
+
+/**
+ * clk_register_clkdev - register one clock lookup for a struct clk
+ * @clk: struct clk to associate with all clk_lookups
+ * @con_id: connection ID string on device
+ * @dev_id: format string describing device name
+ *
+ * con_id or dev_id may be NULL as a wildcard, just as in the rest of
+ * clkdev.
+ *
+ * To make things easier for mass registration, we detect error clks
+ * from a previous clk_register() call, and return the error code for
+ * those.  This is to permit this function to be called immediately
+ * after clk_register().
+ */
+int clk_register_clkdev(struct clk *clk, const char *con_id,
+	const char *dev_fmt, ...)
+{
+	struct clk_lookup *cl;
+	va_list ap;
+
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	va_start(ap, dev_fmt);
+	cl = vclkdev_alloc(clk, con_id, dev_fmt, ap);
+	va_end(ap);
+
+	if (!cl)
+		return -ENOMEM;
+
+	clkdev_add(cl);
+
+	return 0;
+}
+
+/**
+ * clk_register_clkdevs - register a set of clk_lookup for a struct clk
+ * @clk: struct clk to associate with all clk_lookups
+ * @cl: array of clk_lookup structures with con_id and dev_id pre-initialized
+ * @num: number of clk_lookup structures to register
+ *
+ * To make things easier for mass registration, we detect error clks
+ * from a previous clk_register() call, and return the error code for
+ * those.  This is to permit this function to be called immediately
+ * after clk_register().
+ */
+int clk_register_clkdevs(struct clk *clk, struct clk_lookup *cl, size_t num)
+{
+	unsigned i;
+
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	for (i = 0; i < num; i++, cl++) {
+		cl->clk = clk;
+		clkdev_add(cl);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(clk_register_clkdevs);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index ab9abb4..371f13c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -111,6 +111,7 @@
 	depends on S390
 	select CRYPTO_ALGAPI
 	select CRYPTO_BLKCIPHER
+	select CRYPTO_DES
 	help
 	  This is the s390 hardware accelerated implementation of the
 	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
@@ -164,6 +165,7 @@
 	select CRYPTO_ALGAPI
 	select CRYPTO_AES
 	select CRYPTO_BLKCIPHER2
+	select CRYPTO_HASH
 	help
 	  This driver allows you to utilize the Cryptographic Engines and
 	  Security Accelerator (CESA) which can be found on the Marvell Orion
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c301a8e..3d704ab 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1429,6 +1429,7 @@
 			 * signal
 			 */
 			release_phy_channel(plchan);
+			plchan->phychan_hold = 0;
 		}
 		/* Dequeue jobs and free LLIs */
 		if (plchan->at) {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7aa58d2..bf0d7e4 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -221,10 +221,6 @@
 
 	vdbg_dump_regs(atchan);
 
-	/* clear any pending interrupt */
-	while (dma_readl(atdma, EBCISR))
-		cpu_relax();
-
 	channel_writel(atchan, SADDR, 0);
 	channel_writel(atchan, DADDR, 0);
 	channel_writel(atchan, CTRLA, 0);
@@ -249,7 +245,9 @@
 	dev_vdbg(chan2dev(&atchan->chan_common),
 		"descriptor %u complete\n", txd->cookie);
 
-	dma_cookie_complete(txd);
+	/* mark the descriptor as complete for non cyclic cases only */
+	if (!atc_chan_is_cyclic(atchan))
+		dma_cookie_complete(txd);
 
 	/* move children to free_list */
 	list_splice_init(&desc->tx_list, &atchan->free_list);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index e6f133b..f6e9b57 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -703,7 +703,9 @@
 	desc = ep93xx_dma_get_active(edmac);
 	if (desc) {
 		if (desc->complete) {
-			dma_cookie_complete(&desc->txd);
+			/* mark descriptor complete for non cyclic case only */
+			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+				dma_cookie_complete(&desc->txd);
 			list_splice_init(&edmac->active, &list);
 		}
 		callback = desc->txd.callback;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a45b5d2..bb787d8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -571,11 +571,14 @@
 	if (desc->desc.callback)
 		desc->desc.callback(desc->desc.callback_param);
 
-	dma_cookie_complete(&desc->desc);
-
-	/* If we are dealing with a cyclic descriptor keep it on ld_active */
+	/* If we are dealing with a cyclic descriptor keep it on ld_active
+	 * and dont mark the descripor as complete.
+	 * Only in non-cyclic cases it would be marked as complete
+	 */
 	if (imxdma_chan_is_doing_cyclic(imxdmac))
 		goto out;
+	else
+		dma_cookie_complete(&desc->desc);
 
 	/* Free 2D slot if it was an interleaved transfer */
 	if (imxdmac->enabled_2d) {
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index c81ef7e..655d4ce6 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -201,10 +201,6 @@
 
 static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
-
-	mxs_dma_enable_chan(mxs_chan);
-
 	return dma_cookie_assign(tx);
 }
 
@@ -558,9 +554,9 @@
 
 static void mxs_dma_issue_pending(struct dma_chan *chan)
 {
-	/*
-	 * Nothing to do. We only have a single descriptor.
-	 */
+	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+
+	mxs_dma_enable_chan(mxs_chan);
 }
 
 static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 282caf1..fa3fb21 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2225,12 +2225,9 @@
 {
 	struct dma_pl330_dmac *pdmac;
 	struct dma_pl330_desc *desc;
-	struct dma_pl330_chan *pch;
+	struct dma_pl330_chan *pch = NULL;
 	unsigned long flags;
 
-	if (list_empty(list))
-		return;
-
 	/* Finish off the work list */
 	list_for_each_entry(desc, list, node) {
 		dma_async_tx_callback callback;
@@ -2247,6 +2244,10 @@
 		desc->pchan = NULL;
 	}
 
+	/* pch will be unset if list was empty */
+	if (!pch)
+		return;
+
 	pdmac = pch->dmac;
 
 	spin_lock_irqsave(&pdmac->pool_lock, flags);
@@ -2257,12 +2258,9 @@
 static inline void handle_cyclic_desc_list(struct list_head *list)
 {
 	struct dma_pl330_desc *desc;
-	struct dma_pl330_chan *pch;
+	struct dma_pl330_chan *pch = NULL;
 	unsigned long flags;
 
-	if (list_empty(list))
-		return;
-
 	list_for_each_entry(desc, list, node) {
 		dma_async_tx_callback callback;
 
@@ -2274,6 +2272,10 @@
 			callback(desc->txd.callback_param);
 	}
 
+	/* pch will be unset if list was empty */
+	if (!pch)
+		return;
+
 	spin_lock_irqsave(&pch->lock, flags);
 	list_splice_tail_init(list, &pch->work_list);
 	spin_unlock_irqrestore(&pch->lock, flags);
@@ -2320,7 +2322,8 @@
 	/* Pick up ripe tomatoes */
 	list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
 		if (desc->status == DONE) {
-			dma_cookie_complete(&desc->txd);
+			if (pch->cyclic)
+				dma_cookie_complete(&desc->txd);
 			list_move_tail(&desc->node, &list);
 		}
 
@@ -2926,8 +2929,11 @@
 	INIT_LIST_HEAD(&pd->channels);
 
 	/* Initialize channel parameters */
-	num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
-			(u8)pi->pcfg.num_chan);
+	if (pdat)
+		num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
+	else
+		num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
+
 	pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
 
 	for (i = 0; i < num_chan; i++) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index bdd41d4..2ed1ac3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -18,6 +18,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/err.h>
 #include <linux/amba/bus.h>
+#include <linux/regulator/consumer.h>
 
 #include <plat/ste_dma40.h>
 
@@ -69,6 +70,22 @@
 };
 
 /*
+ * enum d40_events - The different Event Enables for the event lines.
+ *
+ * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
+ * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
+ * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
+ * @D40_ROUND_EVENTLINE: Status check for event line.
+ */
+
+enum d40_events {
+	D40_DEACTIVATE_EVENTLINE	= 0,
+	D40_ACTIVATE_EVENTLINE		= 1,
+	D40_SUSPEND_REQ_EVENTLINE	= 2,
+	D40_ROUND_EVENTLINE		= 3
+};
+
+/*
  * These are the registers that has to be saved and later restored
  * when the DMA hw is powered off.
  * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
@@ -870,8 +887,8 @@
 }
 #endif
 
-static int d40_channel_execute_command(struct d40_chan *d40c,
-				       enum d40_command command)
+static int __d40_execute_command_phy(struct d40_chan *d40c,
+				     enum d40_command command)
 {
 	u32 status;
 	int i;
@@ -880,6 +897,12 @@
 	unsigned long flags;
 	u32 wmask;
 
+	if (command == D40_DMA_STOP) {
+		ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
+		if (ret)
+			return ret;
+	}
+
 	spin_lock_irqsave(&d40c->base->execmd_lock, flags);
 
 	if (d40c->phy_chan->num % 2 == 0)
@@ -973,67 +996,109 @@
 		}
 
 	d40c->pending_tx = 0;
-	d40c->busy = false;
 }
 
-static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
-				   u32 event, int reg)
+static void __d40_config_set_event(struct d40_chan *d40c,
+				   enum d40_events event_type, u32 event,
+				   int reg)
 {
 	void __iomem *addr = chan_base(d40c) + reg;
 	int tries;
+	u32 status;
 
-	if (!enable) {
+	switch (event_type) {
+
+	case D40_DEACTIVATE_EVENTLINE:
+
 		writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
 		       | ~D40_EVENTLINE_MASK(event), addr);
-		return;
-	}
+		break;
 
+	case D40_SUSPEND_REQ_EVENTLINE:
+		status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
+			  D40_EVENTLINE_POS(event);
+
+		if (status == D40_DEACTIVATE_EVENTLINE ||
+		    status == D40_SUSPEND_REQ_EVENTLINE)
+			break;
+
+		writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
+		       | ~D40_EVENTLINE_MASK(event), addr);
+
+		for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
+
+			status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
+				  D40_EVENTLINE_POS(event);
+
+			cpu_relax();
+			/*
+			 * Reduce the number of bus accesses while
+			 * waiting for the DMA to suspend.
+			 */
+			udelay(3);
+
+			if (status == D40_DEACTIVATE_EVENTLINE)
+				break;
+		}
+
+		if (tries == D40_SUSPEND_MAX_IT) {
+			chan_err(d40c,
+				"unable to stop the event_line chl %d (log: %d)"
+				"status %x\n", d40c->phy_chan->num,
+				 d40c->log_num, status);
+		}
+		break;
+
+	case D40_ACTIVATE_EVENTLINE:
 	/*
 	 * The hardware sometimes doesn't register the enable when src and dst
 	 * event lines are active on the same logical channel.  Retry to ensure
 	 * it does.  Usually only one retry is sufficient.
 	 */
-	tries = 100;
-	while (--tries) {
-		writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
-		       | ~D40_EVENTLINE_MASK(event), addr);
+		tries = 100;
+		while (--tries) {
+			writel((D40_ACTIVATE_EVENTLINE <<
+				D40_EVENTLINE_POS(event)) |
+				~D40_EVENTLINE_MASK(event), addr);
 
-		if (readl(addr) & D40_EVENTLINE_MASK(event))
-			break;
+			if (readl(addr) & D40_EVENTLINE_MASK(event))
+				break;
+		}
+
+		if (tries != 99)
+			dev_dbg(chan2dev(d40c),
+				"[%s] workaround enable S%cLNK (%d tries)\n",
+				__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
+				100 - tries);
+
+		WARN_ON(!tries);
+		break;
+
+	case D40_ROUND_EVENTLINE:
+		BUG();
+		break;
+
 	}
-
-	if (tries != 99)
-		dev_dbg(chan2dev(d40c),
-			"[%s] workaround enable S%cLNK (%d tries)\n",
-			__func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
-			100 - tries);
-
-	WARN_ON(!tries);
 }
 
-static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
+static void d40_config_set_event(struct d40_chan *d40c,
+				 enum d40_events event_type)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
-
 	/* Enable event line connected to device (or memcpy) */
 	if ((d40c->dma_cfg.dir ==  STEDMA40_PERIPH_TO_MEM) ||
 	    (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
 
-		__d40_config_set_event(d40c, do_enable, event,
+		__d40_config_set_event(d40c, event_type, event,
 				       D40_CHAN_REG_SSLNK);
 	}
 
 	if (d40c->dma_cfg.dir !=  STEDMA40_PERIPH_TO_MEM) {
 		u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
 
-		__d40_config_set_event(d40c, do_enable, event,
+		__d40_config_set_event(d40c, event_type, event,
 				       D40_CHAN_REG_SDLNK);
 	}
-
-	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
 }
 
 static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1047,6 +1112,64 @@
 	return val;
 }
 
+static int
+__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
+{
+	unsigned long flags;
+	int ret = 0;
+	u32 active_status;
+	void __iomem *active_reg;
+
+	if (d40c->phy_chan->num % 2 == 0)
+		active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+	else
+		active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+
+	spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+
+	switch (command) {
+	case D40_DMA_STOP:
+	case D40_DMA_SUSPEND_REQ:
+
+		active_status = (readl(active_reg) &
+				 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+				 D40_CHAN_POS(d40c->phy_chan->num);
+
+		if (active_status == D40_DMA_RUN)
+			d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
+		else
+			d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
+
+		if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
+			ret = __d40_execute_command_phy(d40c, command);
+
+		break;
+
+	case D40_DMA_RUN:
+
+		d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
+		ret = __d40_execute_command_phy(d40c, command);
+		break;
+
+	case D40_DMA_SUSPENDED:
+		BUG();
+		break;
+	}
+
+	spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
+	return ret;
+}
+
+static int d40_channel_execute_command(struct d40_chan *d40c,
+				       enum d40_command command)
+{
+	if (chan_is_logical(d40c))
+		return __d40_execute_command_log(d40c, command);
+	else
+		return __d40_execute_command_phy(d40c, command);
+}
+
 static u32 d40_get_prmo(struct d40_chan *d40c)
 {
 	static const unsigned int phy_map[] = {
@@ -1149,15 +1272,7 @@
 	spin_lock_irqsave(&d40c->lock, flags);
 
 	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
-	if (res == 0) {
-		if (chan_is_logical(d40c)) {
-			d40_config_set_event(d40c, false);
-			/* Resume the other logical channels if any */
-			if (d40_chan_has_events(d40c))
-				res = d40_channel_execute_command(d40c,
-								  D40_DMA_RUN);
-		}
-	}
+
 	pm_runtime_mark_last_busy(d40c->base->dev);
 	pm_runtime_put_autosuspend(d40c->base->dev);
 	spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1174,45 +1289,17 @@
 
 	spin_lock_irqsave(&d40c->lock, flags);
 	pm_runtime_get_sync(d40c->base->dev);
-	if (d40c->base->rev == 0)
-		if (chan_is_logical(d40c)) {
-			res = d40_channel_execute_command(d40c,
-							  D40_DMA_SUSPEND_REQ);
-			goto no_suspend;
-		}
 
 	/* If bytes left to transfer or linked tx resume job */
-	if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
-
-		if (chan_is_logical(d40c))
-			d40_config_set_event(d40c, true);
-
+	if (d40_residue(d40c) || d40_tx_is_linked(d40c))
 		res = d40_channel_execute_command(d40c, D40_DMA_RUN);
-	}
 
-no_suspend:
 	pm_runtime_mark_last_busy(d40c->base->dev);
 	pm_runtime_put_autosuspend(d40c->base->dev);
 	spin_unlock_irqrestore(&d40c->lock, flags);
 	return res;
 }
 
-static int d40_terminate_all(struct d40_chan *chan)
-{
-	unsigned long flags;
-	int ret = 0;
-
-	ret = d40_pause(chan);
-	if (!ret && chan_is_physical(chan))
-		ret = d40_channel_execute_command(chan, D40_DMA_STOP);
-
-	spin_lock_irqsave(&chan->lock, flags);
-	d40_term_all(chan);
-	spin_unlock_irqrestore(&chan->lock, flags);
-
-	return ret;
-}
-
 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	struct d40_chan *d40c = container_of(tx->chan,
@@ -1232,20 +1319,6 @@
 
 static int d40_start(struct d40_chan *d40c)
 {
-	if (d40c->base->rev == 0) {
-		int err;
-
-		if (chan_is_logical(d40c)) {
-			err = d40_channel_execute_command(d40c,
-							  D40_DMA_SUSPEND_REQ);
-			if (err)
-				return err;
-		}
-	}
-
-	if (chan_is_logical(d40c))
-		d40_config_set_event(d40c, true);
-
 	return d40_channel_execute_command(d40c, D40_DMA_RUN);
 }
 
@@ -1258,10 +1331,10 @@
 	d40d = d40_first_queued(d40c);
 
 	if (d40d != NULL) {
-		if (!d40c->busy)
+		if (!d40c->busy) {
 			d40c->busy = true;
-
-		pm_runtime_get_sync(d40c->base->dev);
+			pm_runtime_get_sync(d40c->base->dev);
+		}
 
 		/* Remove from queue */
 		d40_desc_remove(d40d);
@@ -1388,8 +1461,8 @@
 
 	return;
 
- err:
-	/* Rescue manoeuvre if receiving double interrupts */
+err:
+	/* Rescue manouver if receiving double interrupts */
 	if (d40c->pending_tx > 0)
 		d40c->pending_tx--;
 	spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1770,7 +1843,6 @@
 	return 0;
 }
 
-
 static int d40_free_dma(struct d40_chan *d40c)
 {
 
@@ -1806,44 +1878,19 @@
 	}
 
 	pm_runtime_get_sync(d40c->base->dev);
-	res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
-	if (res) {
-		chan_err(d40c, "suspend failed\n");
-		goto out;
-	}
-
-	if (chan_is_logical(d40c)) {
-		/* Release logical channel, deactivate the event line */
-
-		d40_config_set_event(d40c, false);
-		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
-
-		/*
-		 * Check if there are more logical allocation
-		 * on this phy channel.
-		 */
-		if (!d40_alloc_mask_free(phy, is_src, event)) {
-			/* Resume the other logical channels if any */
-			if (d40_chan_has_events(d40c)) {
-				res = d40_channel_execute_command(d40c,
-								  D40_DMA_RUN);
-				if (res)
-					chan_err(d40c,
-						"Executing RUN command\n");
-			}
-			goto out;
-		}
-	} else {
-		(void) d40_alloc_mask_free(phy, is_src, 0);
-	}
-
-	/* Release physical channel */
 	res = d40_channel_execute_command(d40c, D40_DMA_STOP);
 	if (res) {
-		chan_err(d40c, "Failed to stop channel\n");
+		chan_err(d40c, "stop failed\n");
 		goto out;
 	}
 
+	d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
+
+	if (chan_is_logical(d40c))
+		d40c->base->lookup_log_chans[d40c->log_num] = NULL;
+	else
+		d40c->base->lookup_phy_chans[phy->num] = NULL;
+
 	if (d40c->busy) {
 		pm_runtime_mark_last_busy(d40c->base->dev);
 		pm_runtime_put_autosuspend(d40c->base->dev);
@@ -1852,7 +1899,6 @@
 	d40c->busy = false;
 	d40c->phy_chan = NULL;
 	d40c->configured = false;
-	d40c->base->lookup_phy_chans[phy->num] = NULL;
 out:
 
 	pm_runtime_mark_last_busy(d40c->base->dev);
@@ -2070,7 +2116,7 @@
 	if (sg_next(&sg_src[sg_len - 1]) == sg_src)
 		desc->cyclic = true;
 
-	if (direction != DMA_NONE) {
+	if (direction != DMA_TRANS_NONE) {
 		dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
 
 		if (direction == DMA_DEV_TO_MEM)
@@ -2371,6 +2417,31 @@
 	spin_unlock_irqrestore(&d40c->lock, flags);
 }
 
+static void d40_terminate_all(struct dma_chan *chan)
+{
+	unsigned long flags;
+	struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+	int ret;
+
+	spin_lock_irqsave(&d40c->lock, flags);
+
+	pm_runtime_get_sync(d40c->base->dev);
+	ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
+	if (ret)
+		chan_err(d40c, "Failed to stop channel\n");
+
+	d40_term_all(d40c);
+	pm_runtime_mark_last_busy(d40c->base->dev);
+	pm_runtime_put_autosuspend(d40c->base->dev);
+	if (d40c->busy) {
+		pm_runtime_mark_last_busy(d40c->base->dev);
+		pm_runtime_put_autosuspend(d40c->base->dev);
+	}
+	d40c->busy = false;
+
+	spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
 static int
 dma40_config_to_halfchannel(struct d40_chan *d40c,
 			    struct stedma40_half_channel_info *info,
@@ -2551,7 +2622,8 @@
 
 	switch (cmd) {
 	case DMA_TERMINATE_ALL:
-		return d40_terminate_all(d40c);
+		d40_terminate_all(chan);
+		return 0;
 	case DMA_PAUSE:
 		return d40_pause(d40c);
 	case DMA_RESUME:
@@ -2908,6 +2980,12 @@
 	dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
 		 rev, res->start);
 
+	if (rev < 2) {
+		d40_err(&pdev->dev, "hardware revision: %d is not supported",
+			rev);
+		goto failure;
+	}
+
 	plat_data = pdev->dev.platform_data;
 
 	/* Count the number of logical channels in use */
@@ -2998,6 +3076,7 @@
 
 	if (base) {
 		kfree(base->lcla_pool.alloc_map);
+		kfree(base->reg_val_backup_chan);
 		kfree(base->lookup_log_chans);
 		kfree(base->lookup_phy_chans);
 		kfree(base->phy_res);
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 8d3d490..51e8e53 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -62,8 +62,6 @@
 #define D40_SREG_ELEM_LOG_LIDX_MASK	(0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
 
 /* Link register */
-#define D40_DEACTIVATE_EVENTLINE	0x0
-#define D40_ACTIVATE_EVENTLINE		0x1
 #define D40_EVENTLINE_POS(i)		(2 * i)
 #define D40_EVENTLINE_MASK(i)		(0x3 << D40_EVENTLINE_POS(i))
 
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index d25599f..47408e8 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -191,6 +191,190 @@
 	}
 }
 
+static bool
+validate_device_path(struct efi_variable *var, int match, u8 *buffer,
+		     unsigned long len)
+{
+	struct efi_generic_dev_path *node;
+	int offset = 0;
+
+	node = (struct efi_generic_dev_path *)buffer;
+
+	if (len < sizeof(*node))
+		return false;
+
+	while (offset <= len - sizeof(*node) &&
+	       node->length >= sizeof(*node) &&
+		node->length <= len - offset) {
+		offset += node->length;
+
+		if ((node->type == EFI_DEV_END_PATH ||
+		     node->type == EFI_DEV_END_PATH2) &&
+		    node->sub_type == EFI_DEV_END_ENTIRE)
+			return true;
+
+		node = (struct efi_generic_dev_path *)(buffer + offset);
+	}
+
+	/*
+	 * If we're here then either node->length pointed past the end
+	 * of the buffer or we reached the end of the buffer without
+	 * finding a device path end node.
+	 */
+	return false;
+}
+
+static bool
+validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
+		    unsigned long len)
+{
+	/* An array of 16-bit integers */
+	if ((len % 2) != 0)
+		return false;
+
+	return true;
+}
+
+static bool
+validate_load_option(struct efi_variable *var, int match, u8 *buffer,
+		     unsigned long len)
+{
+	u16 filepathlength;
+	int i, desclength = 0, namelen;
+
+	namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
+
+	/* Either "Boot" or "Driver" followed by four digits of hex */
+	for (i = match; i < match+4; i++) {
+		if (var->VariableName[i] > 127 ||
+		    hex_to_bin(var->VariableName[i] & 0xff) < 0)
+			return true;
+	}
+
+	/* Reject it if there's 4 digits of hex and then further content */
+	if (namelen > match + 4)
+		return false;
+
+	/* A valid entry must be at least 8 bytes */
+	if (len < 8)
+		return false;
+
+	filepathlength = buffer[4] | buffer[5] << 8;
+
+	/*
+	 * There's no stored length for the description, so it has to be
+	 * found by hand
+	 */
+	desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+
+	/* Each boot entry must have a descriptor */
+	if (!desclength)
+		return false;
+
+	/*
+	 * If the sum of the length of the description, the claimed filepath
+	 * length and the original header are greater than the length of the
+	 * variable, it's malformed
+	 */
+	if ((desclength + filepathlength + 6) > len)
+		return false;
+
+	/*
+	 * And, finally, check the filepath
+	 */
+	return validate_device_path(var, match, buffer + desclength + 6,
+				    filepathlength);
+}
+
+static bool
+validate_uint16(struct efi_variable *var, int match, u8 *buffer,
+		unsigned long len)
+{
+	/* A single 16-bit integer */
+	if (len != 2)
+		return false;
+
+	return true;
+}
+
+static bool
+validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
+		      unsigned long len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		if (buffer[i] > 127)
+			return false;
+
+		if (buffer[i] == 0)
+			return true;
+	}
+
+	return false;
+}
+
+struct variable_validate {
+	char *name;
+	bool (*validate)(struct efi_variable *var, int match, u8 *data,
+			 unsigned long len);
+};
+
+static const struct variable_validate variable_validate[] = {
+	{ "BootNext", validate_uint16 },
+	{ "BootOrder", validate_boot_order },
+	{ "DriverOrder", validate_boot_order },
+	{ "Boot*", validate_load_option },
+	{ "Driver*", validate_load_option },
+	{ "ConIn", validate_device_path },
+	{ "ConInDev", validate_device_path },
+	{ "ConOut", validate_device_path },
+	{ "ConOutDev", validate_device_path },
+	{ "ErrOut", validate_device_path },
+	{ "ErrOutDev", validate_device_path },
+	{ "Timeout", validate_uint16 },
+	{ "Lang", validate_ascii_string },
+	{ "PlatformLang", validate_ascii_string },
+	{ "", NULL },
+};
+
+static bool
+validate_var(struct efi_variable *var, u8 *data, unsigned long len)
+{
+	int i;
+	u16 *unicode_name = var->VariableName;
+
+	for (i = 0; variable_validate[i].validate != NULL; i++) {
+		const char *name = variable_validate[i].name;
+		int match;
+
+		for (match = 0; ; match++) {
+			char c = name[match];
+			u16 u = unicode_name[match];
+
+			/* All special variables are plain ascii */
+			if (u > 127)
+				return true;
+
+			/* Wildcard in the matching name means we've matched */
+			if (c == '*')
+				return variable_validate[i].validate(var,
+							     match, data, len);
+
+			/* Case sensitive match */
+			if (c != u)
+				break;
+
+			/* Reached the end of the string while matching */
+			if (!c)
+				return variable_validate[i].validate(var,
+							     match, data, len);
+		}
+	}
+
+	return true;
+}
+
 static efi_status_t
 get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
 {
@@ -324,6 +508,12 @@
 		return -EINVAL;
 	}
 
+	if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+	    validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
+		printk(KERN_ERR "efivars: Malformed variable content\n");
+		return -EINVAL;
+	}
+
 	spin_lock(&efivars->lock);
 	status = efivars->ops->set_variable(new_var->VariableName,
 					    &new_var->VendorGuid,
@@ -626,6 +816,12 @@
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
+	if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+	    validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
+		printk(KERN_ERR "efivars: Malformed variable content\n");
+		return -EINVAL;
+	}
+
 	spin_lock(&efivars->lock);
 
 	/*
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 1adc2ec..4461540 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -965,18 +965,15 @@
 	}
 
 	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
-	_gpio_rmw(base, bank->regs->irqstatus, l,
-					bank->regs->irqenable_inv == false);
-	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
-	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
+	_gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
 	if (bank->regs->debounce_en)
-		_gpio_rmw(base, bank->regs->debounce_en, 0, 1);
+		__raw_writel(0, base + bank->regs->debounce_en);
 
 	/* Save OE default value (0xffffffff) in the context */
 	bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
 	 /* Initialize interface clk ungated, module enabled */
 	if (bank->regs->ctrl)
-		_gpio_rmw(base, bank->regs->ctrl, 0, 1);
+		__raw_writel(0, base + bank->regs->ctrl);
 }
 
 static __devinit void
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index e8729cc..2cd958e 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -230,16 +230,12 @@
 
 static int pch_irq_type(struct irq_data *d, unsigned int type)
 {
-	u32 im;
-	u32 __iomem *im_reg;
-	u32 ien;
-	u32 im_pos;
-	int ch;
-	unsigned long flags;
-	u32 val;
-	int irq = d->irq;
 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 	struct pch_gpio *chip = gc->private;
+	u32 im, im_pos, val;
+	u32 __iomem *im_reg;
+	unsigned long flags;
+	int ch, irq = d->irq;
 
 	ch = irq - chip->irq_base;
 	if (irq <= chip->irq_base + 7) {
@@ -270,30 +266,22 @@
 	case IRQ_TYPE_LEVEL_LOW:
 		val = PCH_LEVEL_L;
 		break;
-	case IRQ_TYPE_PROBE:
-		goto end;
 	default:
-		dev_warn(chip->dev, "%s: unknown type(%dd)",
-			__func__, type);
-		goto end;
+		goto unlock;
 	}
 
 	/* Set interrupt mode */
 	im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
 	iowrite32(im | (val << (im_pos * 4)), im_reg);
 
-	/* iclr */
-	iowrite32(BIT(ch), &chip->reg->iclr);
+	/* And the handler */
+	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+		__irq_set_handler_locked(d->irq, handle_level_irq);
+	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+		__irq_set_handler_locked(d->irq, handle_edge_irq);
 
-	/* IMASKCLR */
-	iowrite32(BIT(ch), &chip->reg->imaskclr);
-
-	/* Enable interrupt */
-	ien = ioread32(&chip->reg->ien);
-	iowrite32(ien | BIT(ch), &chip->reg->ien);
-end:
+unlock:
 	spin_unlock_irqrestore(&chip->spinlock, flags);
-
 	return 0;
 }
 
@@ -313,18 +301,24 @@
 	iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
 }
 
+static void pch_irq_ack(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct pch_gpio *chip = gc->private;
+
+	iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr);
+}
+
 static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
 {
 	struct pch_gpio *chip = dev_id;
 	u32 reg_val = ioread32(&chip->reg->istatus);
-	int i;
-	int ret = IRQ_NONE;
+	int i, ret = IRQ_NONE;
 
 	for (i = 0; i < gpio_pins[chip->ioh]; i++) {
 		if (reg_val & BIT(i)) {
 			dev_dbg(chip->dev, "%s:[%d]:irq=%d  status=0x%x\n",
 				__func__, i, irq, reg_val);
-			iowrite32(BIT(i), &chip->reg->iclr);
 			generic_handle_irq(chip->irq_base + i);
 			ret = IRQ_HANDLED;
 		}
@@ -343,6 +337,7 @@
 	gc->private = chip;
 	ct = gc->chip_types;
 
+	ct->chip.irq_ack = pch_irq_ack;
 	ct->chip.irq_mask = pch_irq_mask;
 	ct->chip.irq_unmask = pch_irq_unmask;
 	ct->chip.irq_set_type = pch_irq_type;
@@ -357,6 +352,7 @@
 	s32 ret;
 	struct pch_gpio *chip;
 	int irq_base;
+	u32 msk;
 
 	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
 	if (chip == NULL)
@@ -408,8 +404,13 @@
 	}
 	chip->irq_base = irq_base;
 
+	/* Mask all interrupts, but enable them */
+	msk = (1 << gpio_pins[chip->ioh]) - 1;
+	iowrite32(msk, &chip->reg->imask);
+	iowrite32(msk, &chip->reg->ien);
+
 	ret = request_irq(pdev->irq, pch_gpio_handler,
-			     IRQF_SHARED, KBUILD_MODNAME, chip);
+			  IRQF_SHARED, KBUILD_MODNAME, chip);
 	if (ret != 0) {
 		dev_err(&pdev->dev,
 			"%s request_irq failed\n", __func__);
@@ -418,8 +419,6 @@
 
 	pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
 
-	/* Initialize interrupt ien register */
-	iowrite32(0, &chip->reg->ien);
 end:
 	return 0;
 
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 5689ce6..fc3ace3 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -64,6 +64,7 @@
 	unsigned long	irq_mask;
 	unsigned long	irq_edge_rise;
 	unsigned long	irq_edge_fall;
+	int (*set_wake)(unsigned int gpio, unsigned int on);
 
 #ifdef CONFIG_PM
 	unsigned long	saved_gplr;
@@ -269,7 +270,8 @@
 				(value ? GPSR_OFFSET : GPCR_OFFSET));
 }
 
-static int __devinit pxa_init_gpio_chip(int gpio_end)
+static int __devinit pxa_init_gpio_chip(int gpio_end,
+					int (*set_wake)(unsigned int, unsigned int))
 {
 	int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
 	struct pxa_gpio_chip *chips;
@@ -285,6 +287,7 @@
 
 		sprintf(chips[i].label, "gpio-%d", i);
 		chips[i].regbase = gpio_reg_base + BANK_OFF(i);
+		chips[i].set_wake = set_wake;
 
 		c->base  = gpio;
 		c->label = chips[i].label;
@@ -412,6 +415,17 @@
 	writel_relaxed(gfer, c->regbase + GFER_OFFSET);
 }
 
+static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
+{
+	int gpio = pxa_irq_to_gpio(d->irq);
+	struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
+
+	if (c->set_wake)
+		return c->set_wake(gpio, on);
+	else
+		return 0;
+}
+
 static void pxa_unmask_muxed_gpio(struct irq_data *d)
 {
 	int gpio = pxa_irq_to_gpio(d->irq);
@@ -427,6 +441,7 @@
 	.irq_mask	= pxa_mask_muxed_gpio,
 	.irq_unmask	= pxa_unmask_muxed_gpio,
 	.irq_set_type	= pxa_gpio_irq_type,
+	.irq_set_wake	= pxa_gpio_set_wake,
 };
 
 static int pxa_gpio_nums(void)
@@ -471,6 +486,7 @@
 	struct pxa_gpio_chip *c;
 	struct resource *res;
 	struct clk *clk;
+	struct pxa_gpio_platform_data *info;
 	int gpio, irq, ret;
 	int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
 
@@ -516,7 +532,8 @@
 	}
 
 	/* Initialize GPIO chips */
-	pxa_init_gpio_chip(pxa_last_gpio);
+	info = dev_get_platdata(&pdev->dev);
+	pxa_init_gpio_chip(pxa_last_gpio, info ? info->gpio_set_wake : NULL);
 
 	/* clear all GPIO edge detects */
 	for_each_gpio_chip(gpio, c) {
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 19d6fc0..e991d91 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -452,12 +452,14 @@
 };
 #endif
 
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
 static struct samsung_gpio_cfg exynos_gpio_cfg = {
 	.set_pull	= exynos_gpio_setpull,
 	.get_pull	= exynos_gpio_getpull,
 	.set_config	= samsung_gpio_setcfg_4bit,
 	.get_config	= samsung_gpio_getcfg_4bit,
 };
+#endif
 
 #if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
 static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
@@ -2123,8 +2125,8 @@
  * uses the above macro and depends on the banks being listed in order here.
  */
 
-static struct samsung_gpio_chip exynos4_gpios_1[] = {
 #ifdef CONFIG_ARCH_EXYNOS4
+static struct samsung_gpio_chip exynos4_gpios_1[] = {
 	{
 		.chip	= {
 			.base	= EXYNOS4_GPA0(0),
@@ -2222,11 +2224,11 @@
 			.label	= "GPF3",
 		},
 	},
-#endif
 };
+#endif
 
-static struct samsung_gpio_chip exynos4_gpios_2[] = {
 #ifdef CONFIG_ARCH_EXYNOS4
+static struct samsung_gpio_chip exynos4_gpios_2[] = {
 	{
 		.chip	= {
 			.base	= EXYNOS4_GPJ0(0),
@@ -2367,11 +2369,11 @@
 			.to_irq	= samsung_gpiolib_to_irq,
 		},
 	},
-#endif
 };
+#endif
 
-static struct samsung_gpio_chip exynos4_gpios_3[] = {
 #ifdef CONFIG_ARCH_EXYNOS4
+static struct samsung_gpio_chip exynos4_gpios_3[] = {
 	{
 		.chip	= {
 			.base	= EXYNOS4_GPZ(0),
@@ -2379,8 +2381,8 @@
 			.label	= "GPZ",
 		},
 	},
-#endif
 };
+#endif
 
 #ifdef CONFIG_ARCH_EXYNOS5
 static struct samsung_gpio_chip exynos5_gpios_1[] = {
@@ -2719,7 +2721,9 @@
 {
 	struct samsung_gpio_chip *chip;
 	int i, nr_chips;
+#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
 	void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
+#endif
 	int group = 0;
 
 	samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
@@ -2971,6 +2975,7 @@
 
 	return 0;
 
+#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
 err_ioremap4:
 	iounmap(gpio_base3);
 err_ioremap3:
@@ -2979,6 +2984,7 @@
 	iounmap(gpio_base1);
 err_ioremap1:
 	return -ENOMEM;
+#endif
 }
 core_initcall(samsung_gpiolib_init);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 392ce71..1dffa83 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -149,22 +149,12 @@
 	unsigned long pfn;
 
 	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-		unsigned long usize = buf->size;
-
 		if (!buf->pages)
 			return -EINTR;
 
-		while (usize > 0) {
-			pfn = page_to_pfn(buf->pages[page_offset++]);
-			vm_insert_mixed(vma, f_vaddr, pfn);
-			f_vaddr += PAGE_SIZE;
-			usize -= PAGE_SIZE;
-		}
-
-		return 0;
-	}
-
-	pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
+		pfn = page_to_pfn(buf->pages[page_offset++]);
+	} else
+		pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
 
 	return vm_insert_mixed(vma, f_vaddr, pfn);
 }
@@ -524,6 +514,8 @@
 		if (!buffer->pages)
 			return -EINVAL;
 
+		vma->vm_flags |= VM_MIXEDMAP;
+
 		do {
 			ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
 			if (ret) {
@@ -710,7 +702,6 @@
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
 	struct drm_device *dev = obj->dev;
 	unsigned long f_vaddr;
 	pgoff_t page_offset;
@@ -722,21 +713,10 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	/*
-	 * allocate all pages as desired size if user wants to allocate
-	 * physically non-continuous memory.
-	 */
-	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-		ret = exynos_drm_gem_get_pages(obj);
-		if (ret < 0)
-			goto err;
-	}
-
 	ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
 	if (ret < 0)
 		DRM_ERROR("failed to map pages.\n");
 
-err:
 	mutex_unlock(&dev->struct_mutex);
 
 	return convert_to_vm_err_msg(ret);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b505b70..e6162a1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1224,6 +1224,9 @@
 	unsigned long temp, chipset, gfx;
 	int ret;
 
+	if (!IS_GEN5(dev))
+		return -ENODEV;
+
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 785f67f..ba60f3c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1701,6 +1701,9 @@
 	unsigned long diffms;
 	u32 count;
 
+	if (dev_priv->info->gen != 5)
+		return;
+
 	getrawmonotonic(&now);
 	diff1 = timespec_sub(now, dev_priv->last_time2);
 
@@ -2121,12 +2124,14 @@
 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
 		    (unsigned long) dev);
 
-	spin_lock(&mchdev_lock);
-	i915_mch_dev = dev_priv;
-	dev_priv->mchdev_lock = &mchdev_lock;
-	spin_unlock(&mchdev_lock);
+	if (IS_GEN5(dev)) {
+		spin_lock(&mchdev_lock);
+		i915_mch_dev = dev_priv;
+		dev_priv->mchdev_lock = &mchdev_lock;
+		spin_unlock(&mchdev_lock);
 
-	ips_ping_for_i915_load();
+		ips_ping_for_i915_load();
+	}
 
 	return 0;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f51a696..de43194 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1133,6 +1133,11 @@
 			return -EINVAL;
 		}
 
+		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+			DRM_DEBUG("execbuf with %u cliprects\n",
+				  args->num_cliprects);
+			return -EINVAL;
+		}
 		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
 				    GFP_KERNEL);
 		if (cliprects == NULL) {
@@ -1404,7 +1409,8 @@
 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
 	int ret;
 
-	if (args->buffer_count < 1) {
+	if (args->buffer_count < 1 ||
+	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
 		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b4bb1ef..9d24d65 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -568,6 +568,7 @@
 #define   CM0_MASK_SHIFT          16
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
 #define   CM0_ZR_OPT_DISABLE      (1<<5)
+#define	  CM0_STC_EVICT_DISABLE_LRA_SNB	(1<<5)
 #define   CM0_DEPTH_EVICT_DISABLE (1<<4)
 #define   CM0_COLOR_EVICT_DISABLE (1<<3)
 #define   CM0_DEPTH_WRITE_DISABLE (1<<1)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4d3d736..90b9793 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -430,8 +430,8 @@
 {
 	struct drm_device *dev = connector->dev;
 	struct intel_crt *crt = intel_attached_crt(connector);
-	struct drm_crtc *crtc;
 	enum drm_connector_status status;
+	struct intel_load_detect_pipe tmp;
 
 	if (I915_HAS_HOTPLUG(dev)) {
 		if (intel_crt_detect_hotplug(connector)) {
@@ -450,23 +450,16 @@
 		return connector->status;
 
 	/* for pre-945g platforms use load detect */
-	crtc = crt->base.base.crtc;
-	if (crtc && crtc->enabled) {
-		status = intel_crt_load_detect(crt);
-	} else {
-		struct intel_load_detect_pipe tmp;
-
-		if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
-					       &tmp)) {
-			if (intel_crt_detect_ddc(connector))
-				status = connector_status_connected;
-			else
-				status = intel_crt_load_detect(crt);
-			intel_release_load_detect_pipe(&crt->base, connector,
-						       &tmp);
-		} else
-			status = connector_status_unknown;
-	}
+	if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
+				       &tmp)) {
+		if (intel_crt_detect_ddc(connector))
+			status = connector_status_connected;
+		else
+			status = intel_crt_load_detect(crt);
+		intel_release_load_detect_pipe(&crt->base, connector,
+					       &tmp);
+	} else
+		status = connector_status_unknown;
 
 	return status;
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5908cd5..1b1cf3b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7072,9 +7072,6 @@
 	struct drm_device *dev = crtc->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
-	int dpll_reg = DPLL(pipe);
-	int dpll = I915_READ(dpll_reg);
 
 	if (HAS_PCH_SPLIT(dev))
 		return;
@@ -7087,10 +7084,15 @@
 	 * the manual case.
 	 */
 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+		int pipe = intel_crtc->pipe;
+		int dpll_reg = DPLL(pipe);
+		u32 dpll;
+
 		DRM_DEBUG_DRIVER("downclocking LVDS\n");
 
 		assert_panel_unlocked(dev_priv, pipe);
 
+		dpll = I915_READ(dpll_reg);
 		dpll |= DISPLAY_RATE_SELECT_FPA1;
 		I915_WRITE(dpll_reg, dpll);
 		intel_wait_for_vblank(dev, pipe);
@@ -7098,7 +7100,6 @@
 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
 	}
-
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index cae3e5f..2d7f47b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -136,7 +136,7 @@
 
 	val &= ~VIDEO_DIP_SELECT_MASK;
 
-	I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
+	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
 
 	for (i = 0; i < len; i += 4) {
 		I915_WRITE(VIDEO_DIP_DATA, *data);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30e2c82..9c71183 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -750,7 +750,7 @@
 		.ident = "Hewlett-Packard t5745",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-			DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
 		},
 	},
 	{
@@ -758,7 +758,7 @@
 		.ident = "Hewlett-Packard st5747",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
-			DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
 		},
 	},
 	{
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f75806e..62892a8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -398,6 +398,17 @@
 			return ret;
 	}
 
+
+	if (IS_GEN6(dev)) {
+		/* From the Sandybridge PRM, volume 1 part 3, page 24:
+		 * "If this bit is set, STCunit will have LRA as replacement
+		 *  policy. [...] This bit must be reset.  LRA replacement
+		 *  policy is not supported."
+		 */
+		I915_WRITE(CACHE_MODE_0,
+			   CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+	}
+
 	if (INTEL_INFO(dev)->gen >= 6) {
 		I915_WRITE(INSTPM,
 			   INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e36b171..ae5e748 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -731,6 +731,7 @@
 	uint16_t width, height;
 	uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
 	uint16_t h_sync_offset, v_sync_offset;
+	int mode_clock;
 
 	width = mode->crtc_hdisplay;
 	height = mode->crtc_vdisplay;
@@ -745,7 +746,11 @@
 	h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
 	v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
 
-	dtd->part1.clock = mode->clock / 10;
+	mode_clock = mode->clock;
+	mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
+	mode_clock /= 10;
+	dtd->part1.clock = mode_clock;
+
 	dtd->part1.h_active = width & 0xff;
 	dtd->part1.h_blank = h_blank_len & 0xff;
 	dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@ -996,7 +1001,7 @@
 	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
 	u32 sdvox;
 	struct intel_sdvo_in_out_map in_out;
-	struct intel_sdvo_dtd input_dtd;
+	struct intel_sdvo_dtd input_dtd, output_dtd;
 	int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
 	int rate;
 
@@ -1021,20 +1026,13 @@
 					  intel_sdvo->attached_output))
 		return;
 
-	/* We have tried to get input timing in mode_fixup, and filled into
-	 * adjusted_mode.
-	 */
-	if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
-		input_dtd = intel_sdvo->input_dtd;
-	} else {
-		/* Set the output timing to the screen */
-		if (!intel_sdvo_set_target_output(intel_sdvo,
-						  intel_sdvo->attached_output))
-			return;
-
-		intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
-		(void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
-	}
+	/* lvds has a special fixed output timing. */
+	if (intel_sdvo->is_lvds)
+		intel_sdvo_get_dtd_from_mode(&output_dtd,
+					     intel_sdvo->sdvo_lvds_fixed_mode);
+	else
+		intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+	(void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
 
 	/* Set the input timing to the screen. Assume always input 0. */
 	if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1052,6 +1050,10 @@
 	    !intel_sdvo_set_tv_format(intel_sdvo))
 		return;
 
+	/* We have tried to get input timing in mode_fixup, and filled into
+	 * adjusted_mode.
+	 */
+	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
 	(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
 
 	switch (pixel_multiplier) {
@@ -1218,8 +1220,14 @@
 
 static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
 {
+	struct drm_device *dev = intel_sdvo->base.base.dev;
 	u8 response[2];
 
+	/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+	 * on the line. */
+	if (IS_I945G(dev) || IS_I945GM(dev))
+		return false;
+
 	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
 				    &response, 2) && response[0];
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7814a76..284bd25 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -270,7 +270,7 @@
 	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
 	struct pci_dev *pdev = NULL;
 	int has_dsm = 0;
-	int has_optimus;
+	int has_optimus = 0;
 	int vga_count = 0;
 	bool guid_valid;
 	int retval;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 80963d0..0be4a81 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6156,10 +6156,14 @@
 
 	/* heuristic: if we ever get a non-zero connector field, assume
 	 * that all the indices are valid and we don't need fake them.
+	 *
+	 * and, as usual, a blacklist of boards with bad bios data..
 	 */
-	for (i = 0; i < dcbt->entries; i++) {
-		if (dcbt->entry[i].connector)
-			return;
+	if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
+		for (i = 0; i < dcbt->entries; i++) {
+			if (dcbt->entry[i].connector)
+				return;
+		}
 	}
 
 	/* no useful connector info available, we need to make it up
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
index 59ea1c1..c3de363 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -32,7 +32,9 @@
 hdmi_sor(struct drm_encoder *encoder)
 {
 	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
-	if (dev_priv->chipset < 0xa3)
+	if (dev_priv->chipset <  0xa3 ||
+	    dev_priv->chipset == 0xaa ||
+	    dev_priv->chipset == 0xac)
 		return false;
 	return true;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index e2be95a..77e5646 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -29,10 +29,6 @@
 #include "nouveau_i2c.h"
 #include "nouveau_hw.h"
 
-#define T_TIMEOUT  2200000
-#define T_RISEFALL 1000
-#define T_HOLD     5000
-
 static void
 i2c_drive_scl(void *data, int state)
 {
@@ -113,175 +109,6 @@
 	return 0;
 }
 
-static void
-i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
-{
-	udelay((nsec + 500) / 1000);
-}
-
-static bool
-i2c_raise_scl(struct nouveau_i2c_chan *port)
-{
-	u32 timeout = T_TIMEOUT / T_RISEFALL;
-
-	i2c_drive_scl(port, 1);
-	do {
-		i2c_delay(port, T_RISEFALL);
-	} while (!i2c_sense_scl(port) && --timeout);
-
-	return timeout != 0;
-}
-
-static int
-i2c_start(struct nouveau_i2c_chan *port)
-{
-	int ret = 0;
-
-	port->state  = i2c_sense_scl(port);
-	port->state |= i2c_sense_sda(port) << 1;
-	if (port->state != 3) {
-		i2c_drive_scl(port, 0);
-		i2c_drive_sda(port, 1);
-		if (!i2c_raise_scl(port))
-			ret = -EBUSY;
-	}
-
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
-	return ret;
-}
-
-static void
-i2c_stop(struct nouveau_i2c_chan *port)
-{
-	i2c_drive_scl(port, 0);
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_RISEFALL);
-
-	i2c_drive_scl(port, 1);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_HOLD);
-}
-
-static int
-i2c_bitw(struct nouveau_i2c_chan *port, int sda)
-{
-	i2c_drive_sda(port, sda);
-	i2c_delay(port, T_RISEFALL);
-
-	if (!i2c_raise_scl(port))
-		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
-
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
-	return 0;
-}
-
-static int
-i2c_bitr(struct nouveau_i2c_chan *port)
-{
-	int sda;
-
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_RISEFALL);
-
-	if (!i2c_raise_scl(port))
-		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
-
-	sda = i2c_sense_sda(port);
-
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
-	return sda;
-}
-
-static int
-i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
-{
-	int i, bit;
-
-	*byte = 0;
-	for (i = 7; i >= 0; i--) {
-		bit = i2c_bitr(port);
-		if (bit < 0)
-			return bit;
-		*byte |= bit << i;
-	}
-
-	return i2c_bitw(port, last ? 1 : 0);
-}
-
-static int
-i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
-{
-	int i, ret;
-	for (i = 7; i >= 0; i--) {
-		ret = i2c_bitw(port, !!(byte & (1 << i)));
-		if (ret < 0)
-			return ret;
-	}
-
-	ret = i2c_bitr(port);
-	if (ret == 1) /* nack */
-		ret = -EIO;
-	return ret;
-}
-
-static int
-i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
-{
-	u32 addr = msg->addr << 1;
-	if (msg->flags & I2C_M_RD)
-		addr |= 1;
-	return i2c_put_byte(port, addr);
-}
-
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
-	struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
-	struct i2c_msg *msg = msgs;
-	int ret = 0, mcnt = num;
-
-	while (!ret && mcnt--) {
-		u8 remaining = msg->len;
-		u8 *ptr = msg->buf;
-
-		ret = i2c_start(port);
-		if (ret == 0)
-			ret = i2c_addr(port, msg);
-
-		if (msg->flags & I2C_M_RD) {
-			while (!ret && remaining--)
-				ret = i2c_get_byte(port, ptr++, !remaining);
-		} else {
-			while (!ret && remaining--)
-				ret = i2c_put_byte(port, *ptr++);
-		}
-
-		msg++;
-	}
-
-	i2c_stop(port);
-	return (ret < 0) ? ret : num;
-}
-
-static u32
-i2c_bit_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-const struct i2c_algorithm nouveau_i2c_bit_algo = {
-	.master_xfer = i2c_bit_xfer,
-	.functionality = i2c_bit_func
-};
-
 static const uint32_t nv50_i2c_port[] = {
 	0x00e138, 0x00e150, 0x00e168, 0x00e180,
 	0x00e254, 0x00e274, 0x00e764, 0x00e780,
@@ -384,12 +211,10 @@
 		case 0: /* NV04:NV50 */
 			port->drive = entry[0];
 			port->sense = entry[1];
-			port->adapter.algo = &nouveau_i2c_bit_algo;
 			break;
 		case 4: /* NV4E */
 			port->drive = 0x600800 + entry[1];
 			port->sense = port->drive;
-			port->adapter.algo = &nouveau_i2c_bit_algo;
 			break;
 		case 5: /* NV50- */
 			port->drive = entry[0] & 0x0f;
@@ -402,7 +227,6 @@
 				port->drive = 0x00d014 + (port->drive * 0x20);
 				port->sense = port->drive;
 			}
-			port->adapter.algo = &nouveau_i2c_bit_algo;
 			break;
 		case 6: /* NV50- DP AUX */
 			port->drive = entry[0];
@@ -413,7 +237,7 @@
 			break;
 		}
 
-		if (!port->adapter.algo) {
+		if (!port->adapter.algo && !port->drive) {
 			NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
 				 i, port->type, port->drive, port->sense);
 			kfree(port);
@@ -429,7 +253,26 @@
 		port->dcb = ROM32(entry[0]);
 		i2c_set_adapdata(&port->adapter, i2c);
 
-		ret = i2c_add_adapter(&port->adapter);
+		if (port->adapter.algo != &nouveau_dp_i2c_algo) {
+			port->adapter.algo_data = &port->bit;
+			port->bit.udelay = 10;
+			port->bit.timeout = usecs_to_jiffies(2200);
+			port->bit.data = port;
+			port->bit.setsda = i2c_drive_sda;
+			port->bit.setscl = i2c_drive_scl;
+			port->bit.getsda = i2c_sense_sda;
+			port->bit.getscl = i2c_sense_scl;
+
+			i2c_drive_scl(port, 0);
+			i2c_drive_sda(port, 1);
+			i2c_drive_scl(port, 1);
+
+			ret = i2c_bit_add_bus(&port->adapter);
+		} else {
+			port->adapter.algo = &nouveau_dp_i2c_algo;
+			ret = i2c_add_adapter(&port->adapter);
+		}
+
 		if (ret) {
 			NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
 			kfree(port);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 4d2e4e9..1d08389 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -34,6 +34,7 @@
 struct nouveau_i2c_chan {
 	struct i2c_adapter adapter;
 	struct drm_device *dev;
+	struct i2c_algo_bit_data bit;
 	struct list_head head;
 	u8  index;
 	u8  type;
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 550ad3f..9d79180 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -65,7 +65,7 @@
 	if (line < 10) {
 		line = (line - 2) * 4;
 		reg  = NV_PCRTC_GPIO_EXT;
-		mask = 0x00000003 << ((line - 2) * 4);
+		mask = 0x00000003;
 		data = (dir << 1) | out;
 	} else
 	if (line < 14) {
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 5bf5503..f704e94 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -54,6 +54,11 @@
 			nvc0_mfb_subp_isr(dev, unit, subp);
 		units &= ~(1 << unit);
 	}
+
+	/* we do something horribly wrong and upset PMFB a lot, so mask off
+	 * interrupts from it after the first one until it's fixed
+	 */
+	nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
 }
 
 static void
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b5ff1f7..af1054f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -575,6 +575,9 @@
 
 		if (rdev->family < CHIP_RV770)
 			pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+		/* use frac fb div on APUs */
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+			pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 	} else {
 		pll->flags |= RADEON_PLL_LEGACY;
 
@@ -955,8 +958,8 @@
 		break;
 	}
 
-	if (radeon_encoder->active_device &
-	    (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+	if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
 		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 		struct drm_connector *connector =
 			radeon_get_connector_for_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ea7df16e2..5992502 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -241,8 +241,8 @@
 				rdev->wb.use_event = true;
 		}
 	}
-	/* always use writeback/events on NI */
-	if (ASIC_IS_DCE5(rdev)) {
+	/* always use writeback/events on NI, APUs */
+	if (rdev->family >= CHIP_PALM) {
 		rdev->wb.enabled = true;
 		rdev->wb.use_event = true;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8086c96..0a1d4bd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@
 		radeon_legacy_init_crtc(dev, radeon_crtc);
 }
 
-static const char *encoder_names[36] = {
+static const char *encoder_names[37] = {
 	"NONE",
 	"INTERNAL_LVDS",
 	"INTERNAL_TMDS1",
@@ -570,6 +570,7 @@
 	"INTERNAL_UNIPHY2",
 	"NUTMEG",
 	"TRAVIS",
+	"INTERNAL_VCE"
 };
 
 static const char *connector_names[15] = {
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 8af25a0..7233c88 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -30,37 +30,6 @@
 #include "hyperv_vmbus.h"
 
 
-/* #defines */
-
-
-/* Amount of space to write to */
-#define BYTES_AVAIL_TO_WRITE(r, w, z) \
-	((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
-
-
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
-			  u32 *read, u32 *write)
-{
-	u32 read_loc, write_loc;
-
-	smp_read_barrier_depends();
-
-	/* Capture the read/write indices before they changed */
-	read_loc = rbi->ring_buffer->read_index;
-	write_loc = rbi->ring_buffer->write_index;
-
-	*write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
-	*read = rbi->ring_datasize - *write;
-}
-
 /*
  * hv_get_next_write_location()
  *
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index ce43642..f85ce70 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -47,7 +47,7 @@
 	u16 rx ____cacheline_aligned;
 };
 
-static int ad7314_spi_read(struct ad7314_data *chip, s16 *data)
+static int ad7314_spi_read(struct ad7314_data *chip)
 {
 	int ret;
 
@@ -57,9 +57,7 @@
 		return ret;
 	}
 
-	*data = be16_to_cpu(chip->rx);
-
-	return ret;
+	return be16_to_cpu(chip->rx);
 }
 
 static ssize_t ad7314_show_temperature(struct device *dev,
@@ -70,12 +68,12 @@
 	s16 data;
 	int ret;
 
-	ret = ad7314_spi_read(chip, &data);
+	ret = ad7314_spi_read(chip);
 	if (ret < 0)
 		return ret;
 	switch (spi_get_device_id(chip->spi_dev)->driver_data) {
 	case ad7314:
-		data = (data & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET;
+		data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET;
 		data = (data << 6) >> 6;
 
 		return sprintf(buf, "%d\n", 250 * data);
@@ -86,7 +84,7 @@
 		 * with a sign bit - which is a 14 bit 2's complement
 		 * register.  1lsb - 31.25 milli degrees centigrade
 		 */
-		data &= ADT7301_TEMP_MASK;
+		data = ret & ADT7301_TEMP_MASK;
 		data = (data << 2) >> 2;
 
 		return sprintf(buf, "%d\n",
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0d3141f..b9d5123 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -52,7 +52,7 @@
 MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 
 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES		16	/* Number of Real cores per cpu */
+#define NUM_REAL_CORES		32	/* Number of Real cores per cpu */
 #define CORETEMP_NAME_LENGTH	17	/* String Length of attrs */
 #define MAX_CORE_ATTRS		4	/* Maximum no of basic attrs */
 #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
@@ -709,6 +709,10 @@
 
 	indx = TO_ATTR_NO(cpu);
 
+	/* The core id is too big, just return */
+	if (indx > MAX_CORE_DATA - 1)
+		return;
+
 	if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
 		coretemp_remove_core(pdata, &pdev->dev, indx);
 
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 37a8fc9..e8e18ca 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -128,17 +128,20 @@
  * counter saturations resulting in bogus power readings.
  * We correct this value ourselves to cope with older BIOSes.
  */
+static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+	{ 0 }
+};
+
 static void __devinit tweak_runavg_range(struct pci_dev *pdev)
 {
 	u32 val;
-	const struct pci_device_id affected_device = {
-		PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) };
 
 	/*
 	 * let this quirk apply only to the current version of the
 	 * northbridge, since future versions may change the behavior
 	 */
-	if (!pci_match_id(&affected_device, pdev))
+	if (!pci_match_id(affected_device, pdev))
 		return;
 
 	pci_bus_read_config_dword(pdev->bus,
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index f086131..c811289b 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -324,7 +324,7 @@
 {
 	long ret;
 	ret = wait_event_timeout(pch_event,
-			(adap->pch_event_flag != 0), msecs_to_jiffies(50));
+			(adap->pch_event_flag != 0), msecs_to_jiffies(1000));
 
 	if (ret == 0) {
 		pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
@@ -1063,6 +1063,6 @@
 
 MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya.rohm@gmail.com>");
 module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
 module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 3d471d5..76b8af4 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -227,6 +227,7 @@
 		return -EINVAL;
 
 	init_completion(&i2c->cmd_complete);
+	i2c->cmd_err = 0;
 
 	flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
 
@@ -252,6 +253,9 @@
 
 	if (i2c->cmd_err == -ENXIO)
 		mxs_i2c_reset(i2c);
+	else
+		writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+				i2c->regs + MXS_I2C_QUEUECTRL_CLR);
 
 	dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
 
@@ -299,8 +303,6 @@
 		    MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ))
 		/* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
 		i2c->cmd_err = -EIO;
-	else
-		i2c->cmd_err = 0;
 
 	is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
 		MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
@@ -384,8 +386,6 @@
 	if (ret)
 		return -EBUSY;
 
-	writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
-			i2c->regs + MXS_I2C_QUEUECTRL_CLR);
 	writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
 
 	platform_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 04be9f8..eb8ad53 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -546,8 +546,7 @@
 {
 	struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
 
-	/* FIXME: shouldn't this be clk_disable? */
-	clk_enable(alg_data->clk);
+	clk_disable(alg_data->clk);
 
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e978635..55e5ea6 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -516,6 +516,14 @@
 	if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
 		return 0;
 
+	/*
+	 * NACK interrupt is generated before the I2C controller generates the
+	 * STOP condition on the bus. So wait for 2 clock periods before resetting
+	 * the controller so that STOP condition has been delivered properly.
+	 */
+	if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
+		udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
 	tegra_i2c_init(i2c_dev);
 	if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
 		if (msg->flags & I2C_M_IGNORE_NAK)
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
index 9b9f43a..15c0640 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/ieee802154/Kconfig
@@ -19,4 +19,12 @@
 
           This driver can also be built as a module. To do so say M here.
 	  The module will be called 'fakehard'.
+config IEEE802154_FAKELB
+	depends on IEEE802154_DRIVERS && MAC802154
+	tristate "IEEE 802.15.4 loopback driver"
+	---help---
+	  Say Y here to enable the fake driver that can emulate a net
+	  of several interconnected radio devices.
 
+	  This driver can also be built as a module. To do so say M here.
+	  The module will be called 'fakelb'.
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index 800a389..ea784ea 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
+obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
diff --git a/drivers/ieee802154/fakelb.c b/drivers/ieee802154/fakelb.c
new file mode 100644
index 0000000..e7456fc
--- /dev/null
+++ b/drivers/ieee802154/fakelb.c
@@ -0,0 +1,294 @@
+/*
+ * Loopback IEEE 802.15.4 interface
+ *
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+static int numlbs = 1;
+
+struct fakelb_dev_priv {
+	struct ieee802154_dev *dev;
+
+	struct list_head list;
+	struct fakelb_priv *fake;
+
+	spinlock_t lock;
+	bool working;
+};
+
+struct fakelb_priv {
+	struct list_head list;
+	rwlock_t lock;
+};
+
+static int
+fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level)
+{
+	might_sleep();
+	BUG_ON(!level);
+	*level = 0xbe;
+
+	return 0;
+}
+
+static int
+fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+	pr_debug("set channel to %d\n", channel);
+
+	might_sleep();
+	dev->phy->current_page = page;
+	dev->phy->current_channel = channel;
+
+	return 0;
+}
+
+static void
+fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+{
+	struct sk_buff *newskb;
+
+	spin_lock(&priv->lock);
+	if (priv->working) {
+		newskb = pskb_copy(skb, GFP_ATOMIC);
+		ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc);
+	}
+	spin_unlock(&priv->lock);
+}
+
+static int
+fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+	struct fakelb_dev_priv *priv = dev->priv;
+	struct fakelb_priv *fake = priv->fake;
+
+	might_sleep();
+
+	read_lock_bh(&fake->lock);
+	if (priv->list.next == priv->list.prev) {
+		/* we are the only one device */
+		fakelb_hw_deliver(priv, skb);
+	} else {
+		struct fakelb_dev_priv *dp;
+		list_for_each_entry(dp, &priv->fake->list, list) {
+			if (dp != priv &&
+			    (dp->dev->phy->current_channel ==
+			     priv->dev->phy->current_channel))
+				fakelb_hw_deliver(dp, skb);
+		}
+	}
+	read_unlock_bh(&fake->lock);
+
+	return 0;
+}
+
+static int
+fakelb_hw_start(struct ieee802154_dev *dev) {
+	struct fakelb_dev_priv *priv = dev->priv;
+	int ret = 0;
+
+	spin_lock(&priv->lock);
+	if (priv->working)
+		ret = -EBUSY;
+	else
+		priv->working = 1;
+	spin_unlock(&priv->lock);
+
+	return ret;
+}
+
+static void
+fakelb_hw_stop(struct ieee802154_dev *dev) {
+	struct fakelb_dev_priv *priv = dev->priv;
+
+	spin_lock(&priv->lock);
+	priv->working = 0;
+	spin_unlock(&priv->lock);
+}
+
+static struct ieee802154_ops fakelb_ops = {
+	.owner = THIS_MODULE,
+	.xmit = fakelb_hw_xmit,
+	.ed = fakelb_hw_ed,
+	.set_channel = fakelb_hw_channel,
+	.start = fakelb_hw_start,
+	.stop = fakelb_hw_stop,
+};
+
+/* Number of dummy devices to be set up by this module. */
+module_param(numlbs, int, 0);
+MODULE_PARM_DESC(numlbs, " number of pseudo devices");
+
+static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+{
+	struct fakelb_dev_priv *priv;
+	int err;
+	struct ieee802154_dev *ieee;
+
+	ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops);
+	if (!ieee)
+		return -ENOMEM;
+
+	priv = ieee->priv;
+	priv->dev = ieee;
+
+	/* 868 MHz BPSK	802.15.4-2003 */
+	ieee->phy->channels_supported[0] |= 1;
+	/* 915 MHz BPSK	802.15.4-2003 */
+	ieee->phy->channels_supported[0] |= 0x7fe;
+	/* 2.4 GHz O-QPSK 802.15.4-2003 */
+	ieee->phy->channels_supported[0] |= 0x7FFF800;
+	/* 868 MHz ASK 802.15.4-2006 */
+	ieee->phy->channels_supported[1] |= 1;
+	/* 915 MHz ASK 802.15.4-2006 */
+	ieee->phy->channels_supported[1] |= 0x7fe;
+	/* 868 MHz O-QPSK 802.15.4-2006 */
+	ieee->phy->channels_supported[2] |= 1;
+	/* 915 MHz O-QPSK 802.15.4-2006 */
+	ieee->phy->channels_supported[2] |= 0x7fe;
+	/* 2.4 GHz CSS 802.15.4a-2007 */
+	ieee->phy->channels_supported[3] |= 0x3fff;
+	/* UWB Sub-gigahertz 802.15.4a-2007 */
+	ieee->phy->channels_supported[4] |= 1;
+	/* UWB Low band 802.15.4a-2007 */
+	ieee->phy->channels_supported[4] |= 0x1e;
+	/* UWB High band 802.15.4a-2007 */
+	ieee->phy->channels_supported[4] |= 0xffe0;
+	/* 750 MHz O-QPSK 802.15.4c-2009 */
+	ieee->phy->channels_supported[5] |= 0xf;
+	/* 750 MHz MPSK 802.15.4c-2009 */
+	ieee->phy->channels_supported[5] |= 0xf0;
+	/* 950 MHz BPSK 802.15.4d-2009 */
+	ieee->phy->channels_supported[6] |= 0x3ff;
+	/* 950 MHz GFSK 802.15.4d-2009 */
+	ieee->phy->channels_supported[6] |= 0x3ffc00;
+
+	INIT_LIST_HEAD(&priv->list);
+	priv->fake = fake;
+
+	spin_lock_init(&priv->lock);
+
+	ieee->parent = dev;
+
+	err = ieee802154_register_device(ieee);
+	if (err)
+		goto err_reg;
+
+	write_lock_bh(&fake->lock);
+	list_add_tail(&priv->list, &fake->list);
+	write_unlock_bh(&fake->lock);
+
+	return 0;
+
+err_reg:
+	ieee802154_free_device(priv->dev);
+	return err;
+}
+
+static void fakelb_del(struct fakelb_dev_priv *priv)
+{
+	write_lock_bh(&priv->fake->lock);
+	list_del(&priv->list);
+	write_unlock_bh(&priv->fake->lock);
+
+	ieee802154_unregister_device(priv->dev);
+	ieee802154_free_device(priv->dev);
+}
+
+static int __devinit fakelb_probe(struct platform_device *pdev)
+{
+	struct fakelb_priv *priv;
+	struct fakelb_dev_priv *dp;
+	int err = -ENOMEM;
+	int i;
+
+	priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
+	if (!priv)
+		goto err_alloc;
+
+	INIT_LIST_HEAD(&priv->list);
+	rwlock_init(&priv->lock);
+
+	for (i = 0; i < numlbs; i++) {
+		err = fakelb_add_one(&pdev->dev, priv);
+		if (err < 0)
+			goto err_slave;
+	}
+
+	platform_set_drvdata(pdev, priv);
+	dev_info(&pdev->dev, "added ieee802154 hardware\n");
+	return 0;
+
+err_slave:
+	list_for_each_entry(dp, &priv->list, list)
+		fakelb_del(dp);
+	kfree(priv);
+err_alloc:
+	return err;
+}
+
+static int __devexit fakelb_remove(struct platform_device *pdev)
+{
+	struct fakelb_priv *priv = platform_get_drvdata(pdev);
+	struct fakelb_dev_priv *dp, *temp;
+
+	list_for_each_entry_safe(dp, temp, &priv->list, list)
+		fakelb_del(dp);
+	kfree(priv);
+
+	return 0;
+}
+
+static struct platform_device *ieee802154fake_dev;
+
+static struct platform_driver ieee802154fake_driver = {
+	.probe = fakelb_probe,
+	.remove = __devexit_p(fakelb_remove),
+	.driver = {
+			.name = "ieee802154fakelb",
+			.owner = THIS_MODULE,
+	},
+};
+
+static __init int fakelb_init_module(void)
+{
+	ieee802154fake_dev = platform_device_register_simple(
+			     "ieee802154fakelb", -1, NULL, 0);
+	return platform_driver_register(&ieee802154fake_driver);
+}
+
+static __exit void fake_remove_module(void)
+{
+	platform_driver_unregister(&ieee802154fake_driver);
+	platform_device_unregister(ieee802154fake_dev);
+}
+
+module_init(fakelb_init_module);
+module_exit(fake_remove_module);
+MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index eb0add31..a0f29c1 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -51,6 +51,7 @@
 source "drivers/infiniband/hw/cxgb4/Kconfig"
 source "drivers/infiniband/hw/mlx4/Kconfig"
 source "drivers/infiniband/hw/nes/Kconfig"
+source "drivers/infiniband/hw/ocrdma/Kconfig"
 
 source "drivers/infiniband/ulp/ipoib/Kconfig"
 
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index a3b2d8e..bf846a1 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_INFINIBAND_CXGB4)		+= hw/cxgb4/
 obj-$(CONFIG_MLX4_INFINIBAND)		+= hw/mlx4/
 obj-$(CONFIG_INFINIBAND_NES)		+= hw/nes/
+obj-$(CONFIG_INFINIBAND_OCRDMA)		+= hw/ocrdma/
 obj-$(CONFIG_INFINIBAND_IPOIB)		+= ulp/ipoib/
 obj-$(CONFIG_INFINIBAND_SRP)		+= ulp/srp/
 obj-$(CONFIG_INFINIBAND_SRPT)		+= ulp/srpt/
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e3e470f..55d5642 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -42,6 +42,7 @@
 #include <linux/inetdevice.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <net/route.h>
 
 #include <net/tcp.h>
 #include <net/ipv6.h>
@@ -1218,13 +1219,13 @@
 	}
 	if (!conn_id) {
 		ret = -ENOMEM;
-		goto out;
+		goto err1;
 	}
 
 	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
 	ret = cma_acquire_dev(conn_id);
 	if (ret)
-		goto release_conn_id;
+		goto err2;
 
 	conn_id->cm_id.ib = cm_id;
 	cm_id->context = conn_id;
@@ -1236,31 +1237,33 @@
 	 */
 	atomic_inc(&conn_id->refcount);
 	ret = conn_id->id.event_handler(&conn_id->id, &event);
-	if (!ret) {
-		/*
-		 * Acquire mutex to prevent user executing rdma_destroy_id()
-		 * while we're accessing the cm_id.
-		 */
-		mutex_lock(&lock);
-		if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
-			ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
-		mutex_unlock(&lock);
-		mutex_unlock(&conn_id->handler_mutex);
-		cma_deref_id(conn_id);
-		goto out;
-	}
-	cma_deref_id(conn_id);
+	if (ret)
+		goto err3;
 
+	/*
+	 * Acquire mutex to prevent user executing rdma_destroy_id()
+	 * while we're accessing the cm_id.
+	 */
+	mutex_lock(&lock);
+	if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
+		ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+	mutex_unlock(&lock);
+	mutex_unlock(&conn_id->handler_mutex);
+	mutex_unlock(&listen_id->handler_mutex);
+	cma_deref_id(conn_id);
+	return 0;
+
+err3:
+	cma_deref_id(conn_id);
 	/* Destroy the CM ID by returning a non-zero value. */
 	conn_id->cm_id.ib = NULL;
-
-release_conn_id:
+err2:
 	cma_exch(conn_id, RDMA_CM_DESTROYING);
 	mutex_unlock(&conn_id->handler_mutex);
-	rdma_destroy_id(&conn_id->id);
-
-out:
+err1:
 	mutex_unlock(&listen_id->handler_mutex);
+	if (conn_id)
+		rdma_destroy_id(&conn_id->id);
 	return ret;
 }
 
@@ -1826,7 +1829,10 @@
 	route->path_rec->reversible = 1;
 	route->path_rec->pkey = cpu_to_be16(0xffff);
 	route->path_rec->mtu_selector = IB_SA_EQ;
-	route->path_rec->sl = id_priv->tos >> 5;
+	route->path_rec->sl = netdev_get_prio_tc_map(
+			ndev->priv_flags & IFF_802_1Q_VLAN ?
+				vlan_dev_real_dev(ndev) : ndev,
+			rt_tos2priority(id_priv->tos));
 
 	route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
 	route->path_rec->rate_selector = IB_SA_EQ;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 426bb76..b0d0bc8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1854,6 +1854,8 @@
 		response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
 		response->mad.mad.mad_hdr.status =
 			cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
+		if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+			response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
 
 		return true;
 	} else {
@@ -1869,6 +1871,7 @@
 	struct ib_mad_list_head *mad_list;
 	struct ib_mad_agent_private *mad_agent;
 	int port_num;
+	int ret = IB_MAD_RESULT_SUCCESS;
 
 	mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
 	qp_info = mad_list->mad_queue->qp_info;
@@ -1952,8 +1955,6 @@
 local:
 	/* Give driver "right of first refusal" on incoming MAD */
 	if (port_priv->device->process_mad) {
-		int ret;
-
 		ret = port_priv->device->process_mad(port_priv->device, 0,
 						     port_priv->port_num,
 						     wc, &recv->grh,
@@ -1981,7 +1982,8 @@
 		 * or via recv_handler in ib_mad_complete_recv()
 		 */
 		recv = NULL;
-	} else if (generate_unmatched_resp(recv, response)) {
+	} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
+		   generate_unmatched_resp(recv, response)) {
 		agent_send_response(&response->mad.mad, &recv->grh, wc,
 				    port_priv->device, port_num, qp_info->qp->qp_num);
 	}
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 396e293..e497dfbe 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -125,7 +125,8 @@
 	unsigned char *prev_tail;
 
 	prev_tail = skb_tail_pointer(skb);
-	NLA_PUT(skb, type, len, data);
+	if (nla_put(skb, type, len, data))
+		goto nla_put_failure;
 	nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
 	return 0;
 
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5861cdb..8002ae6 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -66,12 +66,6 @@
 	{ }
 };
 
-static struct ctl_path ucma_ctl_path[] = {
-	{ .procname = "net" },
-	{ .procname = "rdma_ucm" },
-	{ }
-};
-
 struct ucma_file {
 	struct mutex		mut;
 	struct file		*filp;
@@ -1392,7 +1386,7 @@
 		goto err1;
 	}
 
-	ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
+	ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
 	if (!ucma_ctl_table_hdr) {
 		printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
 		ret = -ENOMEM;
@@ -1408,7 +1402,7 @@
 
 static void __exit ucma_cleanup(void)
 {
-	unregister_sysctl_table(ucma_ctl_table_hdr);
+	unregister_net_sysctl_table(ucma_ctl_table_hdr);
 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
 	misc_deregister(&ucma_misc);
 	idr_destroy(&ctx_idr);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 71f0c0f..a841123 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -269,7 +269,7 @@
 	} else
 		down_write(&mm->mmap_sem);
 
-	current->mm->locked_vm -= diff;
+	current->mm->pinned_vm -= diff;
 	up_write(&mm->mmap_sem);
 	mmput(mm);
 	kfree(umem);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4d27e4c3..f9d0d7c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -41,13 +41,18 @@
 
 #include "uverbs.h"
 
-static struct lock_class_key pd_lock_key;
-static struct lock_class_key mr_lock_key;
-static struct lock_class_key cq_lock_key;
-static struct lock_class_key qp_lock_key;
-static struct lock_class_key ah_lock_key;
-static struct lock_class_key srq_lock_key;
-static struct lock_class_key xrcd_lock_key;
+struct uverbs_lock_class {
+	struct lock_class_key	key;
+	char			name[16];
+};
+
+static struct uverbs_lock_class pd_lock_class	= { .name = "PD-uobj" };
+static struct uverbs_lock_class mr_lock_class	= { .name = "MR-uobj" };
+static struct uverbs_lock_class cq_lock_class	= { .name = "CQ-uobj" };
+static struct uverbs_lock_class qp_lock_class	= { .name = "QP-uobj" };
+static struct uverbs_lock_class ah_lock_class	= { .name = "AH-uobj" };
+static struct uverbs_lock_class srq_lock_class	= { .name = "SRQ-uobj" };
+static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
 
 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen)			\
 	do {								\
@@ -83,13 +88,13 @@
  */
 
 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
-		      struct ib_ucontext *context, struct lock_class_key *key)
+		      struct ib_ucontext *context, struct uverbs_lock_class *c)
 {
 	uobj->user_handle = user_handle;
 	uobj->context     = context;
 	kref_init(&uobj->ref);
 	init_rwsem(&uobj->mutex);
-	lockdep_set_class(&uobj->mutex, key);
+	lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
 	uobj->live        = 0;
 }
 
@@ -522,7 +527,7 @@
 	if (!uobj)
 		return -ENOMEM;
 
-	init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
+	init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
 	down_write(&uobj->mutex);
 
 	pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
@@ -750,7 +755,7 @@
 		goto err_tree_mutex_unlock;
 	}
 
-	init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key);
+	init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
 
 	down_write(&obj->uobject.mutex);
 
@@ -947,7 +952,7 @@
 	if (!uobj)
 		return -ENOMEM;
 
-	init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
+	init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
 	down_write(&uobj->mutex);
 
 	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1115,7 +1120,7 @@
 	if (!obj)
 		return -ENOMEM;
 
-	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
+	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
 	down_write(&obj->uobject.mutex);
 
 	if (cmd.comp_channel >= 0) {
@@ -1399,6 +1404,9 @@
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
+	if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
+		return -EPERM;
+
 	INIT_UDATA(&udata, buf + sizeof cmd,
 		   (unsigned long) cmd.response + sizeof resp,
 		   in_len - sizeof cmd, out_len - sizeof resp);
@@ -1407,7 +1415,7 @@
 	if (!obj)
 		return -ENOMEM;
 
-	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
+	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
 	down_write(&obj->uevent.uobject.mutex);
 
 	if (cmd.qp_type == IB_QPT_XRC_TGT) {
@@ -1418,13 +1426,6 @@
 		}
 		device = xrcd->device;
 	} else {
-		pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
-		scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
-		if (!pd || !scq) {
-			ret = -EINVAL;
-			goto err_put;
-		}
-
 		if (cmd.qp_type == IB_QPT_XRC_INI) {
 			cmd.max_recv_wr = cmd.max_recv_sge = 0;
 		} else {
@@ -1435,13 +1436,24 @@
 					goto err_put;
 				}
 			}
-			rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ?
-			       scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
-			if (!rcq) {
-				ret = -EINVAL;
-				goto err_put;
+
+			if (cmd.recv_cq_handle != cmd.send_cq_handle) {
+				rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
+				if (!rcq) {
+					ret = -EINVAL;
+					goto err_put;
+				}
 			}
 		}
+
+		scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
+		rcq = rcq ?: scq;
+		pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
+		if (!pd || !scq) {
+			ret = -EINVAL;
+			goto err_put;
+		}
+
 		device = pd->device;
 	}
 
@@ -1585,7 +1597,7 @@
 	if (!obj)
 		return -ENOMEM;
 
-	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
+	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
 	down_write(&obj->uevent.uobject.mutex);
 
 	xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
@@ -2272,7 +2284,7 @@
 	if (!uobj)
 		return -ENOMEM;
 
-	init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
+	init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
 	down_write(&uobj->mutex);
 
 	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -2476,30 +2488,30 @@
 	if (!obj)
 		return -ENOMEM;
 
-	init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key);
+	init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
 	down_write(&obj->uevent.uobject.mutex);
 
-	pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
-	if (!pd) {
-		ret = -EINVAL;
-		goto err;
-	}
-
 	if (cmd->srq_type == IB_SRQT_XRC) {
-		attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
-		if (!attr.ext.xrc.cq) {
-			ret = -EINVAL;
-			goto err_put_pd;
-		}
-
 		attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
 		if (!attr.ext.xrc.xrcd) {
 			ret = -EINVAL;
-			goto err_put_cq;
+			goto err;
 		}
 
 		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
 		atomic_inc(&obj->uxrcd->refcnt);
+
+		attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
+		if (!attr.ext.xrc.cq) {
+			ret = -EINVAL;
+			goto err_put_xrcd;
+		}
+	}
+
+	pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
+	if (!pd) {
+		ret = -EINVAL;
+		goto err_put_cq;
 	}
 
 	attr.event_handler  = ib_uverbs_srq_event_handler;
@@ -2576,17 +2588,17 @@
 	ib_destroy_srq(srq);
 
 err_put:
-	if (cmd->srq_type == IB_SRQT_XRC) {
-		atomic_dec(&obj->uxrcd->refcnt);
-		put_uobj_read(xrcd_uobj);
-	}
+	put_pd_read(pd);
 
 err_put_cq:
 	if (cmd->srq_type == IB_SRQT_XRC)
 		put_cq_read(attr.ext.xrc.cq);
 
-err_put_pd:
-	put_pd_read(pd);
+err_put_xrcd:
+	if (cmd->srq_type == IB_SRQT_XRC) {
+		atomic_dec(&obj->uxrcd->refcnt);
+		put_uobj_read(xrcd_uobj);
+	}
 
 err:
 	put_uobj_write(&obj->uevent.uobject);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 575b780..30f199e 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -479,6 +479,7 @@
 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
 						IB_QP_PORT			|
 						IB_QP_QKEY),
+				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
 						IB_QP_PORT			|
 						IB_QP_ACCESS_FLAGS),
@@ -1183,23 +1184,33 @@
 
 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
 {
+	int ret;
+
 	if (!qp->device->attach_mcast)
 		return -ENOSYS;
 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
 		return -EINVAL;
 
-	return qp->device->attach_mcast(qp, gid, lid);
+	ret = qp->device->attach_mcast(qp, gid, lid);
+	if (!ret)
+		atomic_inc(&qp->usecnt);
+	return ret;
 }
 EXPORT_SYMBOL(ib_attach_mcast);
 
 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
 {
+	int ret;
+
 	if (!qp->device->detach_mcast)
 		return -ENOSYS;
 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
 		return -EINVAL;
 
-	return qp->device->detach_mcast(qp, gid, lid);
+	ret = qp->device->detach_mcast(qp, gid, lid);
+	if (!ret)
+		atomic_dec(&qp->usecnt);
+	return ret;
 }
 EXPORT_SYMBOL(ib_detach_mcast);
 
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index 46b878c..e11cf72 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -2,4 +2,4 @@
 
 obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
 
-iw_cxgb4-y :=  device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
+iw_cxgb4-y :=  device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 92b4c2b..55ab284e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1362,7 +1362,10 @@
 
 	ep = lookup_tid(t, tid);
 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-	BUG_ON(!ep);
+	if (!ep) {
+		printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
+		return 0;
+	}
 	mutex_lock(&ep->com.mutex);
 	switch (ep->com.state) {
 	case ABORTING:
@@ -1410,6 +1413,24 @@
 		return 0;
 	}
 
+	/*
+	 * Log interesting failures.
+	 */
+	switch (status) {
+	case CPL_ERR_CONN_RESET:
+	case CPL_ERR_CONN_TIMEDOUT:
+		break;
+	default:
+		printk(KERN_INFO MOD "Active open failure - "
+		       "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
+		       atid, status, status2errno(status),
+		       &ep->com.local_addr.sin_addr.s_addr,
+		       ntohs(ep->com.local_addr.sin_port),
+		       &ep->com.remote_addr.sin_addr.s_addr,
+		       ntohs(ep->com.remote_addr.sin_port));
+		break;
+	}
+
 	connect_reply_upcall(ep, status2errno(status));
 	state_set(&ep->com, DEAD);
 
@@ -1593,7 +1614,7 @@
 					n, n->dev, 0);
 		if (!ep->l2t)
 			goto out;
-		ep->mtu = dst_mtu(ep->dst);
+		ep->mtu = dst_mtu(dst);
 		ep->tx_chan = cxgb4_port_chan(n->dev);
 		ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
 		step = cdev->rdev.lldi.ntxq /
@@ -2656,6 +2677,12 @@
 	unsigned int tid = GET_TID(req);
 
 	ep = lookup_tid(t, tid);
+	if (!ep) {
+		printk(KERN_WARNING MOD
+		       "Abort on non-existent endpoint, tid %d\n", tid);
+		kfree_skb(skb);
+		return 0;
+	}
 	if (is_neg_adv_abort(req->status)) {
 		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
 		     ep->hwtid);
@@ -2667,11 +2694,8 @@
 
 	/*
 	 * Wake up any threads in rdma_init() or rdma_fini().
-	 * However, this is not needed if com state is just
-	 * MPA_REQ_SENT
 	 */
-	if (ep->com.state != MPA_REQ_SENT)
-		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+	c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
 	sched(dev, skb);
 	return 0;
 }
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 6d0df6e..cb4ecd7 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/debugfs.h>
+#include <linux/vmalloc.h>
 
 #include <rdma/ib_verbs.h>
 
@@ -44,6 +45,12 @@
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRV_VERSION);
 
+struct uld_ctx {
+	struct list_head entry;
+	struct cxgb4_lld_info lldi;
+	struct c4iw_dev *dev;
+};
+
 static LIST_HEAD(uld_ctx_list);
 static DEFINE_MUTEX(dev_mutex);
 
@@ -115,7 +122,7 @@
 		printk(KERN_INFO "%s null qpd?\n", __func__);
 		return 0;
 	}
-	kfree(qpd->buf);
+	vfree(qpd->buf);
 	kfree(qpd);
 	return 0;
 }
@@ -139,7 +146,7 @@
 	spin_unlock_irq(&qpd->devp->lock);
 
 	qpd->bufsize = count * 128;
-	qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL);
+	qpd->buf = vmalloc(qpd->bufsize);
 	if (!qpd->buf) {
 		ret = -ENOMEM;
 		goto err1;
@@ -240,6 +247,81 @@
 	.llseek  = default_llseek,
 };
 
+static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
+
+static int stats_show(struct seq_file *seq, void *v)
+{
+	struct c4iw_dev *dev = seq->private;
+
+	seq_printf(seq, "   Object: %10s %10s %10s %10s\n", "Total", "Current",
+		   "Max", "Fail");
+	seq_printf(seq, "     PDID: %10llu %10llu %10llu %10llu\n",
+			dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
+			dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
+	seq_printf(seq, "      QID: %10llu %10llu %10llu %10llu\n",
+			dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
+			dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
+	seq_printf(seq, "   TPTMEM: %10llu %10llu %10llu %10llu\n",
+			dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
+			dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
+	seq_printf(seq, "   PBLMEM: %10llu %10llu %10llu %10llu\n",
+			dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
+			dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
+	seq_printf(seq, "   RQTMEM: %10llu %10llu %10llu %10llu\n",
+			dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
+			dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
+	seq_printf(seq, "  OCQPMEM: %10llu %10llu %10llu %10llu\n",
+			dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
+			dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
+	seq_printf(seq, "  DB FULL: %10llu\n", dev->rdev.stats.db_full);
+	seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
+	seq_printf(seq, "  DB DROP: %10llu\n", dev->rdev.stats.db_drop);
+	seq_printf(seq, " DB State: %s Transitions %llu\n",
+		   db_state_str[dev->db_state],
+		   dev->rdev.stats.db_state_transitions);
+	return 0;
+}
+
+static int stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, stats_show, inode->i_private);
+}
+
+static ssize_t stats_clear(struct file *file, const char __user *buf,
+		size_t count, loff_t *pos)
+{
+	struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
+
+	mutex_lock(&dev->rdev.stats.lock);
+	dev->rdev.stats.pd.max = 0;
+	dev->rdev.stats.pd.fail = 0;
+	dev->rdev.stats.qid.max = 0;
+	dev->rdev.stats.qid.fail = 0;
+	dev->rdev.stats.stag.max = 0;
+	dev->rdev.stats.stag.fail = 0;
+	dev->rdev.stats.pbl.max = 0;
+	dev->rdev.stats.pbl.fail = 0;
+	dev->rdev.stats.rqt.max = 0;
+	dev->rdev.stats.rqt.fail = 0;
+	dev->rdev.stats.ocqp.max = 0;
+	dev->rdev.stats.ocqp.fail = 0;
+	dev->rdev.stats.db_full = 0;
+	dev->rdev.stats.db_empty = 0;
+	dev->rdev.stats.db_drop = 0;
+	dev->rdev.stats.db_state_transitions = 0;
+	mutex_unlock(&dev->rdev.stats.lock);
+	return count;
+}
+
+static const struct file_operations stats_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = stats_open,
+	.release = single_release,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.write   = stats_clear,
+};
+
 static int setup_debugfs(struct c4iw_dev *devp)
 {
 	struct dentry *de;
@@ -256,6 +338,12 @@
 				 (void *)devp, &stag_debugfs_fops);
 	if (de && de->d_inode)
 		de->d_inode->i_size = 4096;
+
+	de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
+			(void *)devp, &stats_debugfs_fops);
+	if (de && de->d_inode)
+		de->d_inode->i_size = 4096;
+
 	return 0;
 }
 
@@ -269,9 +357,13 @@
 	list_for_each_safe(pos, nxt, &uctx->qpids) {
 		entry = list_entry(pos, struct c4iw_qid_list, entry);
 		list_del_init(&entry->entry);
-		if (!(entry->qid & rdev->qpmask))
-			c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid,
-					  &rdev->resource.qid_fifo_lock);
+		if (!(entry->qid & rdev->qpmask)) {
+			c4iw_put_resource(&rdev->resource.qid_table,
+					  entry->qid);
+			mutex_lock(&rdev->stats.lock);
+			rdev->stats.qid.cur -= rdev->qpmask + 1;
+			mutex_unlock(&rdev->stats.lock);
+		}
 		kfree(entry);
 	}
 
@@ -332,6 +424,13 @@
 		goto err1;
 	}
 
+	rdev->stats.pd.total = T4_MAX_NUM_PD;
+	rdev->stats.stag.total = rdev->lldi.vr->stag.size;
+	rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
+	rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
+	rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
+	rdev->stats.qid.total = rdev->lldi.vr->qp.size;
+
 	err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
 	if (err) {
 		printk(KERN_ERR MOD "error %d initializing resources\n", err);
@@ -370,12 +469,6 @@
 	c4iw_destroy_resource(&rdev->resource);
 }
 
-struct uld_ctx {
-	struct list_head entry;
-	struct cxgb4_lld_info lldi;
-	struct c4iw_dev *dev;
-};
-
 static void c4iw_dealloc(struct uld_ctx *ctx)
 {
 	c4iw_rdev_close(&ctx->dev->rdev);
@@ -440,6 +533,8 @@
 	idr_init(&devp->qpidr);
 	idr_init(&devp->mmidr);
 	spin_lock_init(&devp->lock);
+	mutex_init(&devp->rdev.stats.lock);
+	mutex_init(&devp->db_mutex);
 
 	if (c4iw_debugfs_root) {
 		devp->debugfs_root = debugfs_create_dir(
@@ -585,11 +680,234 @@
 	return 0;
 }
 
+static int disable_qp_db(int id, void *p, void *data)
+{
+	struct c4iw_qp *qp = p;
+
+	t4_disable_wq_db(&qp->wq);
+	return 0;
+}
+
+static void stop_queues(struct uld_ctx *ctx)
+{
+	spin_lock_irq(&ctx->dev->lock);
+	if (ctx->dev->db_state == NORMAL) {
+		ctx->dev->rdev.stats.db_state_transitions++;
+		ctx->dev->db_state = FLOW_CONTROL;
+		idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
+	}
+	spin_unlock_irq(&ctx->dev->lock);
+}
+
+static int enable_qp_db(int id, void *p, void *data)
+{
+	struct c4iw_qp *qp = p;
+
+	t4_enable_wq_db(&qp->wq);
+	return 0;
+}
+
+static void resume_queues(struct uld_ctx *ctx)
+{
+	spin_lock_irq(&ctx->dev->lock);
+	if (ctx->dev->qpcnt <= db_fc_threshold &&
+	    ctx->dev->db_state == FLOW_CONTROL) {
+		ctx->dev->db_state = NORMAL;
+		ctx->dev->rdev.stats.db_state_transitions++;
+		idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
+	}
+	spin_unlock_irq(&ctx->dev->lock);
+}
+
+struct qp_list {
+	unsigned idx;
+	struct c4iw_qp **qps;
+};
+
+static int add_and_ref_qp(int id, void *p, void *data)
+{
+	struct qp_list *qp_listp = data;
+	struct c4iw_qp *qp = p;
+
+	c4iw_qp_add_ref(&qp->ibqp);
+	qp_listp->qps[qp_listp->idx++] = qp;
+	return 0;
+}
+
+static int count_qps(int id, void *p, void *data)
+{
+	unsigned *countp = data;
+	(*countp)++;
+	return 0;
+}
+
+static void deref_qps(struct qp_list qp_list)
+{
+	int idx;
+
+	for (idx = 0; idx < qp_list.idx; idx++)
+		c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
+}
+
+static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
+{
+	int idx;
+	int ret;
+
+	for (idx = 0; idx < qp_list->idx; idx++) {
+		struct c4iw_qp *qp = qp_list->qps[idx];
+
+		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
+					  qp->wq.sq.qid,
+					  t4_sq_host_wq_pidx(&qp->wq),
+					  t4_sq_wq_size(&qp->wq));
+		if (ret) {
+			printk(KERN_ERR MOD "%s: Fatal error - "
+			       "DB overflow recovery failed - "
+			       "error syncing SQ qid %u\n",
+			       pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
+			return;
+		}
+
+		ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
+					  qp->wq.rq.qid,
+					  t4_rq_host_wq_pidx(&qp->wq),
+					  t4_rq_wq_size(&qp->wq));
+
+		if (ret) {
+			printk(KERN_ERR MOD "%s: Fatal error - "
+			       "DB overflow recovery failed - "
+			       "error syncing RQ qid %u\n",
+			       pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
+			return;
+		}
+
+		/* Wait for the dbfifo to drain */
+		while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(usecs_to_jiffies(10));
+		}
+	}
+}
+
+static void recover_queues(struct uld_ctx *ctx)
+{
+	int count = 0;
+	struct qp_list qp_list;
+	int ret;
+
+	/* lock out kernel db ringers */
+	mutex_lock(&ctx->dev->db_mutex);
+
+	/* put all queues in to recovery mode */
+	spin_lock_irq(&ctx->dev->lock);
+	ctx->dev->db_state = RECOVERY;
+	ctx->dev->rdev.stats.db_state_transitions++;
+	idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
+	spin_unlock_irq(&ctx->dev->lock);
+
+	/* slow everybody down */
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(usecs_to_jiffies(1000));
+
+	/* Wait for the dbfifo to completely drain. */
+	while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(usecs_to_jiffies(10));
+	}
+
+	/* flush the SGE contexts */
+	ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
+	if (ret) {
+		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
+		       pci_name(ctx->lldi.pdev));
+		goto out;
+	}
+
+	/* Count active queues so we can build a list of queues to recover */
+	spin_lock_irq(&ctx->dev->lock);
+	idr_for_each(&ctx->dev->qpidr, count_qps, &count);
+
+	qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
+	if (!qp_list.qps) {
+		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
+		       pci_name(ctx->lldi.pdev));
+		spin_unlock_irq(&ctx->dev->lock);
+		goto out;
+	}
+	qp_list.idx = 0;
+
+	/* add and ref each qp so it doesn't get freed */
+	idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
+
+	spin_unlock_irq(&ctx->dev->lock);
+
+	/* now traverse the list in a safe context to recover the db state*/
+	recover_lost_dbs(ctx, &qp_list);
+
+	/* we're almost done!  deref the qps and clean up */
+	deref_qps(qp_list);
+	kfree(qp_list.qps);
+
+	/* Wait for the dbfifo to completely drain again */
+	while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(usecs_to_jiffies(10));
+	}
+
+	/* resume the queues */
+	spin_lock_irq(&ctx->dev->lock);
+	if (ctx->dev->qpcnt > db_fc_threshold)
+		ctx->dev->db_state = FLOW_CONTROL;
+	else {
+		ctx->dev->db_state = NORMAL;
+		idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
+	}
+	ctx->dev->rdev.stats.db_state_transitions++;
+	spin_unlock_irq(&ctx->dev->lock);
+
+out:
+	/* start up kernel db ringers again */
+	mutex_unlock(&ctx->dev->db_mutex);
+}
+
+static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
+{
+	struct uld_ctx *ctx = handle;
+
+	switch (control) {
+	case CXGB4_CONTROL_DB_FULL:
+		stop_queues(ctx);
+		mutex_lock(&ctx->dev->rdev.stats.lock);
+		ctx->dev->rdev.stats.db_full++;
+		mutex_unlock(&ctx->dev->rdev.stats.lock);
+		break;
+	case CXGB4_CONTROL_DB_EMPTY:
+		resume_queues(ctx);
+		mutex_lock(&ctx->dev->rdev.stats.lock);
+		ctx->dev->rdev.stats.db_empty++;
+		mutex_unlock(&ctx->dev->rdev.stats.lock);
+		break;
+	case CXGB4_CONTROL_DB_DROP:
+		recover_queues(ctx);
+		mutex_lock(&ctx->dev->rdev.stats.lock);
+		ctx->dev->rdev.stats.db_drop++;
+		mutex_unlock(&ctx->dev->rdev.stats.lock);
+		break;
+	default:
+		printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
+		       pci_name(ctx->lldi.pdev), control);
+		break;
+	}
+	return 0;
+}
+
 static struct cxgb4_uld_info c4iw_uld_info = {
 	.name = DRV_NAME,
 	.add = c4iw_uld_add,
 	.rx_handler = c4iw_uld_rx_handler,
 	.state_change = c4iw_uld_state_change,
+	.control = c4iw_uld_control,
 };
 
 static int __init c4iw_init_module(void)
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 397cb36..cf2f6b4 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -84,7 +84,7 @@
 	struct c4iw_qp *qhp;
 	u32 cqid;
 
-	spin_lock(&dev->lock);
+	spin_lock_irq(&dev->lock);
 	qhp = get_qhp(dev, CQE_QPID(err_cqe));
 	if (!qhp) {
 		printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
@@ -93,7 +93,7 @@
 		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
 		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
 		       CQE_WRID_LOW(err_cqe));
-		spin_unlock(&dev->lock);
+		spin_unlock_irq(&dev->lock);
 		goto out;
 	}
 
@@ -109,13 +109,13 @@
 		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
 		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
 		       CQE_WRID_LOW(err_cqe));
-		spin_unlock(&dev->lock);
+		spin_unlock_irq(&dev->lock);
 		goto out;
 	}
 
 	c4iw_qp_add_ref(&qhp->ibqp);
 	atomic_inc(&chp->refcnt);
-	spin_unlock(&dev->lock);
+	spin_unlock_irq(&dev->lock);
 
 	/* Bad incoming write */
 	if (RQ_TYPE(err_cqe) &&
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
new file mode 100644
index 0000000..f95e5df
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011 Chelsio Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include "iw_cxgb4.h"
+
+#define RANDOM_SKIP 16
+
+/*
+ * Trivial bitmap-based allocator. If the random flag is set, the
+ * allocator is designed to:
+ * - pseudo-randomize the id returned such that it is not trivially predictable.
+ * - avoid reuse of recently used id (at the expense of predictability)
+ */
+u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
+{
+	unsigned long flags;
+	u32 obj;
+
+	spin_lock_irqsave(&alloc->lock, flags);
+
+	obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
+	if (obj >= alloc->max)
+		obj = find_first_zero_bit(alloc->table, alloc->max);
+
+	if (obj < alloc->max) {
+		if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
+			alloc->last += random32() % RANDOM_SKIP;
+		else
+			alloc->last = obj + 1;
+		if (alloc->last >= alloc->max)
+			alloc->last = 0;
+		set_bit(obj, alloc->table);
+		obj += alloc->start;
+	} else
+		obj = -1;
+
+	spin_unlock_irqrestore(&alloc->lock, flags);
+	return obj;
+}
+
+void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
+{
+	unsigned long flags;
+
+	obj -= alloc->start;
+	BUG_ON((int)obj < 0);
+
+	spin_lock_irqsave(&alloc->lock, flags);
+	clear_bit(obj, alloc->table);
+	spin_unlock_irqrestore(&alloc->lock, flags);
+}
+
+int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
+			u32 reserved, u32 flags)
+{
+	int i;
+
+	alloc->start = start;
+	alloc->flags = flags;
+	if (flags & C4IW_ID_TABLE_F_RANDOM)
+		alloc->last = random32() % RANDOM_SKIP;
+	else
+		alloc->last = 0;
+	alloc->max  = num;
+	spin_lock_init(&alloc->lock);
+	alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long),
+				GFP_KERNEL);
+	if (!alloc->table)
+		return -ENOMEM;
+
+	bitmap_zero(alloc->table, num);
+	if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
+		for (i = 0; i < reserved; ++i)
+			set_bit(i, alloc->table);
+
+	return 0;
+}
+
+void c4iw_id_table_free(struct c4iw_id_table *alloc)
+{
+	kfree(alloc->table);
+}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 1357c5b..9beb3a9 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -45,7 +45,6 @@
 #include <linux/kref.h>
 #include <linux/timer.h>
 #include <linux/io.h>
-#include <linux/kfifo.h>
 
 #include <asm/byteorder.h>
 
@@ -79,13 +78,22 @@
 	return skb->data;
 }
 
+#define C4IW_ID_TABLE_F_RANDOM 1       /* Pseudo-randomize the id's returned */
+#define C4IW_ID_TABLE_F_EMPTY  2       /* Table is initially empty */
+
+struct c4iw_id_table {
+	u32 flags;
+	u32 start;              /* logical minimal id */
+	u32 last;               /* hint for find */
+	u32 max;
+	spinlock_t lock;
+	unsigned long *table;
+};
+
 struct c4iw_resource {
-	struct kfifo tpt_fifo;
-	spinlock_t tpt_fifo_lock;
-	struct kfifo qid_fifo;
-	spinlock_t qid_fifo_lock;
-	struct kfifo pdid_fifo;
-	spinlock_t pdid_fifo_lock;
+	struct c4iw_id_table tpt_table;
+	struct c4iw_id_table qid_table;
+	struct c4iw_id_table pdid_table;
 };
 
 struct c4iw_qid_list {
@@ -103,6 +111,27 @@
 	T4_FATAL_ERROR = (1<<0),
 };
 
+struct c4iw_stat {
+	u64 total;
+	u64 cur;
+	u64 max;
+	u64 fail;
+};
+
+struct c4iw_stats {
+	struct mutex lock;
+	struct c4iw_stat qid;
+	struct c4iw_stat pd;
+	struct c4iw_stat stag;
+	struct c4iw_stat pbl;
+	struct c4iw_stat rqt;
+	struct c4iw_stat ocqp;
+	u64  db_full;
+	u64  db_empty;
+	u64  db_drop;
+	u64  db_state_transitions;
+};
+
 struct c4iw_rdev {
 	struct c4iw_resource resource;
 	unsigned long qpshift;
@@ -117,6 +146,7 @@
 	struct cxgb4_lld_info lldi;
 	unsigned long oc_mw_pa;
 	void __iomem *oc_mw_kva;
+	struct c4iw_stats stats;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -175,6 +205,12 @@
 	return wr_waitp->ret;
 }
 
+enum db_state {
+	NORMAL = 0,
+	FLOW_CONTROL = 1,
+	RECOVERY = 2
+};
+
 struct c4iw_dev {
 	struct ib_device ibdev;
 	struct c4iw_rdev rdev;
@@ -183,7 +219,10 @@
 	struct idr qpidr;
 	struct idr mmidr;
 	spinlock_t lock;
+	struct mutex db_mutex;
 	struct dentry *debugfs_root;
+	enum db_state db_state;
+	int qpcnt;
 };
 
 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -211,29 +250,57 @@
 	return idr_find(&rhp->mmidr, mmid);
 }
 
-static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
-				void *handle, u32 id)
+static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
+				 void *handle, u32 id, int lock)
 {
 	int ret;
 	int newid;
 
 	do {
-		if (!idr_pre_get(idr, GFP_KERNEL))
+		if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
 			return -ENOMEM;
-		spin_lock_irq(&rhp->lock);
+		if (lock)
+			spin_lock_irq(&rhp->lock);
 		ret = idr_get_new_above(idr, handle, id, &newid);
-		BUG_ON(newid != id);
-		spin_unlock_irq(&rhp->lock);
+		BUG_ON(!ret && newid != id);
+		if (lock)
+			spin_unlock_irq(&rhp->lock);
 	} while (ret == -EAGAIN);
 
 	return ret;
 }
 
+static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
+				void *handle, u32 id)
+{
+	return _insert_handle(rhp, idr, handle, id, 1);
+}
+
+static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
+				       void *handle, u32 id)
+{
+	return _insert_handle(rhp, idr, handle, id, 0);
+}
+
+static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
+				   u32 id, int lock)
+{
+	if (lock)
+		spin_lock_irq(&rhp->lock);
+	idr_remove(idr, id);
+	if (lock)
+		spin_unlock_irq(&rhp->lock);
+}
+
 static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
 {
-	spin_lock_irq(&rhp->lock);
-	idr_remove(idr, id);
-	spin_unlock_irq(&rhp->lock);
+	_remove_handle(rhp, idr, id, 1);
+}
+
+static inline void remove_handle_nolock(struct c4iw_dev *rhp,
+					 struct idr *idr, u32 id)
+{
+	_remove_handle(rhp, idr, id, 0);
 }
 
 struct c4iw_pd {
@@ -353,6 +420,8 @@
 	struct c4iw_ep *llp_stream_handle;
 	u8 layer_etype;
 	u8 ecode;
+	u16 sq_db_inc;
+	u16 rq_db_inc;
 };
 
 struct c4iw_qp {
@@ -427,6 +496,8 @@
 
 enum c4iw_qp_attr_mask {
 	C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
+	C4IW_QP_ATTR_SQ_DB = 1<<1,
+	C4IW_QP_ATTR_RQ_DB = 1<<2,
 	C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
 	C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
 	C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
@@ -480,6 +551,23 @@
 	}
 }
 
+static inline int to_ib_qp_state(int c4iw_qp_state)
+{
+	switch (c4iw_qp_state) {
+	case C4IW_QP_STATE_IDLE:
+		return IB_QPS_INIT;
+	case C4IW_QP_STATE_RTS:
+		return IB_QPS_RTS;
+	case C4IW_QP_STATE_CLOSING:
+		return IB_QPS_SQD;
+	case C4IW_QP_STATE_TERMINATE:
+		return IB_QPS_SQE;
+	case C4IW_QP_STATE_ERROR:
+		return IB_QPS_ERR;
+	}
+	return IB_QPS_ERR;
+}
+
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
 	return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -693,14 +781,20 @@
 	return wscale;
 }
 
+u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
+void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
+int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
+			u32 reserved, u32 flags);
+void c4iw_id_table_free(struct c4iw_id_table *alloc);
+
 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
 
 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
 		     struct l2t_entry *l2t);
 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
 		   struct c4iw_dev_ucontext *uctx);
-u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
-void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
+u32 c4iw_get_resource(struct c4iw_id_table *id_table);
+void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
@@ -769,6 +863,8 @@
 			     struct ib_udata *udata);
 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 				 int attr_mask, struct ib_udata *udata);
+int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		     int attr_mask, struct ib_qp_init_attr *init_attr);
 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
@@ -797,5 +893,7 @@
 extern struct cxgb4_client t4c_client;
 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
 extern int c4iw_max_read_depth;
+extern int db_fc_threshold;
+
 
 #endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 40c8353..57e07c6 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -131,10 +131,14 @@
 	stag_idx = (*stag) >> 8;
 
 	if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
-		stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo,
-					     &rdev->resource.tpt_fifo_lock);
+		stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
 		if (!stag_idx)
 			return -ENOMEM;
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.stag.cur += 32;
+		if (rdev->stats.stag.cur > rdev->stats.stag.max)
+			rdev->stats.stag.max = rdev->stats.stag.cur;
+		mutex_unlock(&rdev->stats.lock);
 		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
 	}
 	PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
@@ -165,9 +169,12 @@
 				(rdev->lldi.vr->stag.start >> 5),
 				sizeof(tpt), &tpt);
 
-	if (reset_tpt_entry)
-		c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx,
-				  &rdev->resource.tpt_fifo_lock);
+	if (reset_tpt_entry) {
+		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.stag.cur -= 32;
+		mutex_unlock(&rdev->stats.lock);
+	}
 	return err;
 }
 
@@ -686,8 +693,8 @@
 	mhp = to_c4iw_mw(mw);
 	rhp = mhp->rhp;
 	mmid = (mw->rkey) >> 8;
-	deallocate_window(&rhp->rdev, mhp->attr.stag);
 	remove_handle(rhp, &rhp->mmidr, mmid);
+	deallocate_window(&rhp->rdev, mhp->attr.stag);
 	kfree(mhp);
 	PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
 	return 0;
@@ -789,12 +796,12 @@
 	mhp = to_c4iw_mr(ib_mr);
 	rhp = mhp->rhp;
 	mmid = mhp->attr.stag >> 8;
+	remove_handle(rhp, &rhp->mmidr, mmid);
 	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
 		       mhp->attr.pbl_addr);
 	if (mhp->attr.pbl_size)
 		c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
 				  mhp->attr.pbl_size << 3);
-	remove_handle(rhp, &rhp->mmidr, mmid);
 	if (mhp->kva)
 		kfree((void *) (unsigned long) mhp->kva);
 	if (mhp->umem)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index be1c18f..e084fdc 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -188,8 +188,10 @@
 	php = to_c4iw_pd(pd);
 	rhp = php->rhp;
 	PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
-	c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid,
-			  &rhp->rdev.resource.pdid_fifo_lock);
+	c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
+	mutex_lock(&rhp->rdev.stats.lock);
+	rhp->rdev.stats.pd.cur--;
+	mutex_unlock(&rhp->rdev.stats.lock);
 	kfree(php);
 	return 0;
 }
@@ -204,14 +206,12 @@
 
 	PDBG("%s ibdev %p\n", __func__, ibdev);
 	rhp = (struct c4iw_dev *) ibdev;
-	pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_fifo,
-				  &rhp->rdev.resource.pdid_fifo_lock);
+	pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_table);
 	if (!pdid)
 		return ERR_PTR(-EINVAL);
 	php = kzalloc(sizeof(*php), GFP_KERNEL);
 	if (!php) {
-		c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid,
-				  &rhp->rdev.resource.pdid_fifo_lock);
+		c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
 		return ERR_PTR(-ENOMEM);
 	}
 	php->pdid = pdid;
@@ -222,6 +222,11 @@
 			return ERR_PTR(-EFAULT);
 		}
 	}
+	mutex_lock(&rhp->rdev.stats.lock);
+	rhp->rdev.stats.pd.cur++;
+	if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
+		rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
+	mutex_unlock(&rhp->rdev.stats.lock);
 	PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
 	return &php->ibpd;
 }
@@ -438,6 +443,7 @@
 	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
 	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
 	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+	    (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
 	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
 	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
 	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
@@ -460,6 +466,7 @@
 	dev->ibdev.destroy_ah = c4iw_ah_destroy;
 	dev->ibdev.create_qp = c4iw_create_qp;
 	dev->ibdev.modify_qp = c4iw_ib_modify_qp;
+	dev->ibdev.query_qp = c4iw_ib_query_qp;
 	dev->ibdev.destroy_qp = c4iw_destroy_qp;
 	dev->ibdev.create_cq = c4iw_create_cq;
 	dev->ibdev.destroy_cq = c4iw_destroy_cq;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5f940ae..45aedf1 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -34,10 +34,19 @@
 
 #include "iw_cxgb4.h"
 
+static int db_delay_usecs = 1;
+module_param(db_delay_usecs, int, 0644);
+MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
+
 static int ocqp_support = 1;
 module_param(ocqp_support, int, 0644);
 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
 
+int db_fc_threshold = 2000;
+module_param(db_fc_threshold, int, 0644);
+MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
+		 "db flow control mode (default = 2000)");
+
 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
 {
 	unsigned long flag;
@@ -1128,6 +1137,35 @@
 	return ret;
 }
 
+/*
+ * Called by the library when the qp has user dbs disabled due to
+ * a DB_FULL condition.  This function will single-thread all user
+ * DB rings to avoid overflowing the hw db-fifo.
+ */
+static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
+{
+	int delay = db_delay_usecs;
+
+	mutex_lock(&qhp->rhp->db_mutex);
+	do {
+
+		/*
+		 * The interrupt threshold is dbfifo_int_thresh << 6. So
+		 * make sure we don't cross that and generate an interrupt.
+		 */
+		if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
+		    (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
+			writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db);
+			break;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(usecs_to_jiffies(delay));
+		delay = min(delay << 1, 2000);
+	} while (1);
+	mutex_unlock(&qhp->rhp->db_mutex);
+	return 0;
+}
+
 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
 		   enum c4iw_qp_attr_mask mask,
 		   struct c4iw_qp_attributes *attrs,
@@ -1176,6 +1214,15 @@
 		qhp->attr = newattr;
 	}
 
+	if (mask & C4IW_QP_ATTR_SQ_DB) {
+		ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
+		goto out;
+	}
+	if (mask & C4IW_QP_ATTR_RQ_DB) {
+		ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
+		goto out;
+	}
+
 	if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
 		goto out;
 	if (qhp->attr.state == attrs->next_state)
@@ -1352,6 +1399,14 @@
 	return ret;
 }
 
+static int enable_qp_db(int id, void *p, void *data)
+{
+	struct c4iw_qp *qp = p;
+
+	t4_enable_wq_db(&qp->wq);
+	return 0;
+}
+
 int c4iw_destroy_qp(struct ib_qp *ib_qp)
 {
 	struct c4iw_dev *rhp;
@@ -1369,7 +1424,16 @@
 		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
 	wait_event(qhp->wait, !qhp->ep);
 
-	remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+	spin_lock_irq(&rhp->lock);
+	remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+	rhp->qpcnt--;
+	BUG_ON(rhp->qpcnt < 0);
+	if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
+		rhp->rdev.stats.db_state_transitions++;
+		rhp->db_state = NORMAL;
+		idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
+	}
+	spin_unlock_irq(&rhp->lock);
 	atomic_dec(&qhp->refcnt);
 	wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
 
@@ -1383,6 +1447,14 @@
 	return 0;
 }
 
+static int disable_qp_db(int id, void *p, void *data)
+{
+	struct c4iw_qp *qp = p;
+
+	t4_disable_wq_db(&qp->wq);
+	return 0;
+}
+
 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
 			     struct ib_udata *udata)
 {
@@ -1469,7 +1541,16 @@
 	init_waitqueue_head(&qhp->wait);
 	atomic_set(&qhp->refcnt, 1);
 
-	ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+	spin_lock_irq(&rhp->lock);
+	if (rhp->db_state != NORMAL)
+		t4_disable_wq_db(&qhp->wq);
+	if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
+		rhp->rdev.stats.db_state_transitions++;
+		rhp->db_state = FLOW_CONTROL;
+		idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
+	}
+	ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+	spin_unlock_irq(&rhp->lock);
 	if (ret)
 		goto err2;
 
@@ -1613,6 +1694,15 @@
 			 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
 			 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
 
+	/*
+	 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
+	 * ringing the queue db when we're in DB_FULL mode.
+	 */
+	attrs.sq_db_inc = attr->sq_psn;
+	attrs.rq_db_inc = attr->rq_psn;
+	mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
+	mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+
 	return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
 }
 
@@ -1621,3 +1711,14 @@
 	PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
 	return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
 }
+
+int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		     int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
+
+	memset(attr, 0, sizeof *attr);
+	memset(init_attr, 0, sizeof *init_attr);
+	attr->qp_state = to_ib_qp_state(qhp->attr.state);
+	return 0;
+}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 407ff39..cdef4d7 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -30,96 +30,25 @@
  * SOFTWARE.
  */
 /* Crude resource management */
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
 #include <linux/spinlock.h>
-#include <linux/errno.h>
 #include <linux/genalloc.h>
 #include <linux/ratelimit.h>
 #include "iw_cxgb4.h"
 
-#define RANDOM_SIZE 16
-
-static int __c4iw_init_resource_fifo(struct kfifo *fifo,
-				   spinlock_t *fifo_lock,
-				   u32 nr, u32 skip_low,
-				   u32 skip_high,
-				   int random)
-{
-	u32 i, j, entry = 0, idx;
-	u32 random_bytes;
-	u32 rarray[16];
-	spin_lock_init(fifo_lock);
-
-	if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
-		return -ENOMEM;
-
-	for (i = 0; i < skip_low + skip_high; i++)
-		kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
-	if (random) {
-		j = 0;
-		random_bytes = random32();
-		for (i = 0; i < RANDOM_SIZE; i++)
-			rarray[i] = i + skip_low;
-		for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
-			if (j >= RANDOM_SIZE) {
-				j = 0;
-				random_bytes = random32();
-			}
-			idx = (random_bytes >> (j * 2)) & 0xF;
-			kfifo_in(fifo,
-				(unsigned char *) &rarray[idx],
-				sizeof(u32));
-			rarray[idx] = i;
-			j++;
-		}
-		for (i = 0; i < RANDOM_SIZE; i++)
-			kfifo_in(fifo,
-				(unsigned char *) &rarray[i],
-				sizeof(u32));
-	} else
-		for (i = skip_low; i < nr - skip_high; i++)
-			kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
-
-	for (i = 0; i < skip_low + skip_high; i++)
-		if (kfifo_out_locked(fifo, (unsigned char *) &entry,
-				     sizeof(u32), fifo_lock))
-			break;
-	return 0;
-}
-
-static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
-				   u32 nr, u32 skip_low, u32 skip_high)
-{
-	return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
-					  skip_high, 0);
-}
-
-static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
-				   spinlock_t *fifo_lock,
-				   u32 nr, u32 skip_low, u32 skip_high)
-{
-	return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
-					  skip_high, 1);
-}
-
-static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
+static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
 {
 	u32 i;
 
-	spin_lock_init(&rdev->resource.qid_fifo_lock);
-
-	if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size *
-			sizeof(u32), GFP_KERNEL))
+	if (c4iw_id_table_alloc(&rdev->resource.qid_table,
+				rdev->lldi.vr->qp.start,
+				rdev->lldi.vr->qp.size,
+				rdev->lldi.vr->qp.size, 0))
 		return -ENOMEM;
 
 	for (i = rdev->lldi.vr->qp.start;
-	     i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
+		i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
 		if (!(i & rdev->qpmask))
-			kfifo_in(&rdev->resource.qid_fifo,
-				    (unsigned char *) &i, sizeof(u32));
+			c4iw_id_free(&rdev->resource.qid_table, i);
 	return 0;
 }
 
@@ -127,44 +56,42 @@
 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
 {
 	int err = 0;
-	err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
-					     &rdev->resource.tpt_fifo_lock,
-					     nr_tpt, 1, 0);
+	err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
+					C4IW_ID_TABLE_F_RANDOM);
 	if (err)
 		goto tpt_err;
-	err = c4iw_init_qid_fifo(rdev);
+	err = c4iw_init_qid_table(rdev);
 	if (err)
 		goto qid_err;
-	err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
-				      &rdev->resource.pdid_fifo_lock,
-				      nr_pdid, 1, 0);
+	err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
+					nr_pdid, 1, 0);
 	if (err)
 		goto pdid_err;
 	return 0;
-pdid_err:
-	kfifo_free(&rdev->resource.qid_fifo);
-qid_err:
-	kfifo_free(&rdev->resource.tpt_fifo);
-tpt_err:
+ pdid_err:
+	c4iw_id_table_free(&rdev->resource.qid_table);
+ qid_err:
+	c4iw_id_table_free(&rdev->resource.tpt_table);
+ tpt_err:
 	return -ENOMEM;
 }
 
 /*
  * returns 0 if no resource available
  */
-u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
+u32 c4iw_get_resource(struct c4iw_id_table *id_table)
 {
 	u32 entry;
-	if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
-		return entry;
-	else
+	entry = c4iw_id_alloc(id_table);
+	if (entry == (u32)(-1))
 		return 0;
+	return entry;
 }
 
-void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
+void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
 {
 	PDBG("%s entry 0x%x\n", __func__, entry);
-	kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
+	c4iw_id_free(id_table, entry);
 }
 
 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
@@ -181,10 +108,12 @@
 		qid = entry->qid;
 		kfree(entry);
 	} else {
-		qid = c4iw_get_resource(&rdev->resource.qid_fifo,
-					&rdev->resource.qid_fifo_lock);
+		qid = c4iw_get_resource(&rdev->resource.qid_table);
 		if (!qid)
 			goto out;
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.qid.cur += rdev->qpmask + 1;
+		mutex_unlock(&rdev->stats.lock);
 		for (i = qid+1; i & rdev->qpmask; i++) {
 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
 			if (!entry)
@@ -213,6 +142,10 @@
 out:
 	mutex_unlock(&uctx->lock);
 	PDBG("%s qid 0x%x\n", __func__, qid);
+	mutex_lock(&rdev->stats.lock);
+	if (rdev->stats.qid.cur > rdev->stats.qid.max)
+		rdev->stats.qid.max = rdev->stats.qid.cur;
+	mutex_unlock(&rdev->stats.lock);
 	return qid;
 }
 
@@ -245,10 +178,12 @@
 		qid = entry->qid;
 		kfree(entry);
 	} else {
-		qid = c4iw_get_resource(&rdev->resource.qid_fifo,
-					&rdev->resource.qid_fifo_lock);
+		qid = c4iw_get_resource(&rdev->resource.qid_table);
 		if (!qid)
 			goto out;
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.qid.cur += rdev->qpmask + 1;
+		mutex_unlock(&rdev->stats.lock);
 		for (i = qid+1; i & rdev->qpmask; i++) {
 			entry = kmalloc(sizeof *entry, GFP_KERNEL);
 			if (!entry)
@@ -277,6 +212,10 @@
 out:
 	mutex_unlock(&uctx->lock);
 	PDBG("%s qid 0x%x\n", __func__, qid);
+	mutex_lock(&rdev->stats.lock);
+	if (rdev->stats.qid.cur > rdev->stats.qid.max)
+		rdev->stats.qid.max = rdev->stats.qid.cur;
+	mutex_unlock(&rdev->stats.lock);
 	return qid;
 }
 
@@ -297,9 +236,9 @@
 
 void c4iw_destroy_resource(struct c4iw_resource *rscp)
 {
-	kfifo_free(&rscp->tpt_fifo);
-	kfifo_free(&rscp->qid_fifo);
-	kfifo_free(&rscp->pdid_fifo);
+	c4iw_id_table_free(&rscp->tpt_table);
+	c4iw_id_table_free(&rscp->qid_table);
+	c4iw_id_table_free(&rscp->pdid_table);
 }
 
 /*
@@ -312,15 +251,23 @@
 {
 	unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
 	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
-	if (!addr)
-		printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n",
-		       pci_name(rdev->lldi.pdev));
+	mutex_lock(&rdev->stats.lock);
+	if (addr) {
+		rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
+		if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
+			rdev->stats.pbl.max = rdev->stats.pbl.cur;
+	} else
+		rdev->stats.pbl.fail++;
+	mutex_unlock(&rdev->stats.lock);
 	return (u32)addr;
 }
 
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+	mutex_lock(&rdev->stats.lock);
+	rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
+	mutex_unlock(&rdev->stats.lock);
 	gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
 }
 
@@ -377,12 +324,23 @@
 	if (!addr)
 		printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n",
 		       pci_name(rdev->lldi.pdev));
+	mutex_lock(&rdev->stats.lock);
+	if (addr) {
+		rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
+		if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
+			rdev->stats.rqt.max = rdev->stats.rqt.cur;
+	} else
+		rdev->stats.rqt.fail++;
+	mutex_unlock(&rdev->stats.lock);
 	return (u32)addr;
 }
 
 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+	mutex_lock(&rdev->stats.lock);
+	rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
+	mutex_unlock(&rdev->stats.lock);
 	gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
 }
 
@@ -433,12 +391,22 @@
 {
 	unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
 	PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+	if (addr) {
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
+		if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max)
+			rdev->stats.ocqp.max = rdev->stats.ocqp.cur;
+		mutex_unlock(&rdev->stats.lock);
+	}
 	return (u32)addr;
 }
 
 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
 	PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
+	mutex_lock(&rdev->stats.lock);
+	rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
+	mutex_unlock(&rdev->stats.lock);
 	gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
 }
 
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index c0221ee..16f26ab 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -62,6 +62,10 @@
 	__be16 pidx;
 	u8 qp_err;	/* flit 1 - sw owns */
 	u8 db_off;
+	u8 pad;
+	u16 host_wq_pidx;
+	u16 host_cidx;
+	u16 host_pidx;
 };
 
 #define T4_EQ_ENTRY_SIZE 64
@@ -375,6 +379,16 @@
 		wq->rq.cidx = 0;
 }
 
+static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
+{
+	return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
+}
+
+static inline u16 t4_rq_wq_size(struct t4_wq *wq)
+{
+		return wq->rq.size * T4_RQ_NUM_SLOTS;
+}
+
 static inline int t4_sq_onchip(struct t4_sq *sq)
 {
 	return sq->flags & T4_SQ_ONCHIP;
@@ -412,6 +426,16 @@
 		wq->sq.cidx = 0;
 }
 
+static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
+{
+	return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
+}
+
+static inline u16 t4_sq_wq_size(struct t4_wq *wq)
+{
+		return wq->sq.size * T4_SQ_NUM_SLOTS;
+}
+
 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
 {
 	wmb();
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h
index e6669d5..32b754c 100644
--- a/drivers/infiniband/hw/cxgb4/user.h
+++ b/drivers/infiniband/hw/cxgb4/user.h
@@ -32,7 +32,7 @@
 #ifndef __C4IW_USER_H__
 #define __C4IW_USER_H__
 
-#define C4IW_UVERBS_ABI_VERSION	1
+#define C4IW_UVERBS_ABI_VERSION	2
 
 /*
  * Make sure that all structs defined in this file remain laid out so
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 1d7aea1..7cc3054 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -596,8 +596,7 @@
 
 	ipath_format_hwerrors(hwerrs,
 			      ipath_6110_hwerror_msgs,
-			      sizeof(ipath_6110_hwerror_msgs) /
-			      sizeof(ipath_6110_hwerror_msgs[0]),
+			      ARRAY_SIZE(ipath_6110_hwerror_msgs),
 			      msg, msgl);
 
 	if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index c0a03ac..26dfbc8 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -209,8 +209,7 @@
 {
 	int i;
 	const int glen =
-	    sizeof(ipath_generic_hwerror_msgs) /
-	    sizeof(ipath_generic_hwerror_msgs[0]);
+	    ARRAY_SIZE(ipath_generic_hwerror_msgs);
 
 	for (i=0; i<glen; i++) {
 		if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 77c8cb4..6d4ef71c 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -50,7 +50,7 @@
 	struct ib_cq *ibcq;
 
 	if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
-		printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
+		pr_warn("Unexpected event type %d "
 		       "on CQ %06x\n", type, cq->cqn);
 		return;
 	}
@@ -222,6 +222,9 @@
 		uar = &dev->priv_uar;
 	}
 
+	if (dev->eq_table)
+		vector = dev->eq_table[vector % ibdev->num_comp_vectors];
+
 	err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
 			    cq->db.dma, &cq->mcq, vector, 0);
 	if (err)
@@ -463,7 +466,7 @@
 {
 	__be32 *buf = cqe;
 
-	printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
+	pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
 	       be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
 	       be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
 	       be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
@@ -473,7 +476,7 @@
 				     struct ib_wc *wc)
 {
 	if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
-		printk(KERN_DEBUG "local QP operation err "
+		pr_debug("local QP operation err "
 		       "(QPN %06x, WQE index %x, vendor syndrome %02x, "
 		       "opcode = %02x)\n",
 		       be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
@@ -576,7 +579,7 @@
 
 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
 		     is_send)) {
-		printk(KERN_WARNING "Completion for NOP opcode detected!\n");
+		pr_warn("Completion for NOP opcode detected!\n");
 		return -EINVAL;
 	}
 
@@ -606,7 +609,7 @@
 		mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
 				       be32_to_cpu(cqe->vlan_my_qpn));
 		if (unlikely(!mqp)) {
-			printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
+			pr_warn("CQ %06x with entry for unknown QPN %06x\n",
 			       cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
 			return -EINVAL;
 		}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 669673e..ee1c577 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -247,7 +247,7 @@
 		err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
 				   NULL, NULL, in_mad, out_mad);
 		if (err)
-			return err;
+			goto out;
 
 		/* Checking LinkSpeedActive for FDR-10 */
 		if (out_mad->data[15] & 0x1)
@@ -789,7 +789,7 @@
 		list_del(&ge->list);
 		kfree(ge);
 	} else
-		printk(KERN_WARNING "could not find mgid entry\n");
+		pr_warn("could not find mgid entry\n");
 
 	mutex_unlock(&mqp->mutex);
 
@@ -902,7 +902,7 @@
 
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox)) {
-		printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
+		pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
 		return;
 	}
 
@@ -913,7 +913,7 @@
 		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
 		       MLX4_CMD_NATIVE);
 	if (err)
-		printk(KERN_WARNING "set port command failed\n");
+		pr_warn("set port command failed\n");
 	else {
 		memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
 		event.device = &gw->dev->ib_dev;
@@ -1076,18 +1076,98 @@
 	return NOTIFY_DONE;
 }
 
+static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
+{
+	char name[32];
+	int eq_per_port = 0;
+	int added_eqs = 0;
+	int total_eqs = 0;
+	int i, j, eq;
+
+	/* Init eq table */
+	ibdev->eq_table = NULL;
+	ibdev->eq_added = 0;
+
+	/* Legacy mode? */
+	if (dev->caps.comp_pool == 0)
+		return;
+
+	eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
+					dev->caps.num_ports);
+
+	/* Init eq table */
+	added_eqs = 0;
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+		added_eqs += eq_per_port;
+
+	total_eqs = dev->caps.num_comp_vectors + added_eqs;
+
+	ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
+	if (!ibdev->eq_table)
+		return;
+
+	ibdev->eq_added = added_eqs;
+
+	eq = 0;
+	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
+		for (j = 0; j < eq_per_port; j++) {
+			sprintf(name, "mlx4-ib-%d-%d@%s",
+				i, j, dev->pdev->bus->name);
+			/* Set IRQ for specific name (per ring) */
+			if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) {
+				/* Use legacy (same as mlx4_en driver) */
+				pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
+				ibdev->eq_table[eq] =
+					(eq % dev->caps.num_comp_vectors);
+			}
+			eq++;
+		}
+	}
+
+	/* Fill the reset of the vector with legacy EQ */
+	for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
+		ibdev->eq_table[eq++] = i;
+
+	/* Advertise the new number of EQs to clients */
+	ibdev->ib_dev.num_comp_vectors = total_eqs;
+}
+
+static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
+{
+	int i;
+	int total_eqs;
+
+	/* Reset the advertised EQ number */
+	ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
+
+	/* Free only the added eqs */
+	for (i = 0; i < ibdev->eq_added; i++) {
+		/* Don't free legacy eqs if used */
+		if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
+			continue;
+		mlx4_release_eq(dev, ibdev->eq_table[i]);
+	}
+
+	total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
+	memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
+	kfree(ibdev->eq_table);
+
+	ibdev->eq_table = NULL;
+	ibdev->eq_added = 0;
+}
+
 static void *mlx4_ib_add(struct mlx4_dev *dev)
 {
 	struct mlx4_ib_dev *ibdev;
 	int num_ports = 0;
-	int i;
+	int i, j;
 	int err;
 	struct mlx4_ib_iboe *iboe;
 
-	printk_once(KERN_INFO "%s", mlx4_ib_version);
+	pr_info_once("%s", mlx4_ib_version);
 
 	if (mlx4_is_mfunc(dev)) {
-		printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+		pr_warn("IB not yet supported in SRIOV\n");
 		return NULL;
 	}
 
@@ -1210,6 +1290,8 @@
 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
 	}
 
+	mlx4_ib_alloc_eqs(dev, ibdev);
+
 	spin_lock_init(&iboe->lock);
 
 	if (init_node_data(ibdev))
@@ -1241,9 +1323,9 @@
 			goto err_reg;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
+	for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
 		if (device_create_file(&ibdev->ib_dev.dev,
-				       mlx4_class_attributes[i]))
+				       mlx4_class_attributes[j]))
 			goto err_notif;
 	}
 
@@ -1253,7 +1335,7 @@
 
 err_notif:
 	if (unregister_netdevice_notifier(&ibdev->iboe.nb))
-		printk(KERN_WARNING "failure unregistering notifier\n");
+		pr_warn("failure unregistering notifier\n");
 	flush_workqueue(wq);
 
 err_reg:
@@ -1288,7 +1370,7 @@
 	ib_unregister_device(&ibdev->ib_dev);
 	if (ibdev->iboe.nb.notifier_call) {
 		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
-			printk(KERN_WARNING "failure unregistering notifier\n");
+			pr_warn("failure unregistering notifier\n");
 		ibdev->iboe.nb.notifier_call = NULL;
 	}
 	iounmap(ibdev->uar_map);
@@ -1298,6 +1380,8 @@
 	mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
 		mlx4_CLOSE_PORT(dev, p);
 
+	mlx4_ib_free_eqs(dev, ibdev);
+
 	mlx4_uar_free(dev, &ibdev->priv_uar);
 	mlx4_pd_free(dev, ibdev->priv_pdn);
 	ib_dealloc_device(&ibdev->ib_dev);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index ed80345..e62297c 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -202,6 +202,8 @@
 	bool			ib_active;
 	struct mlx4_ib_iboe	iboe;
 	int			counters[MLX4_MAX_PORTS];
+	int		       *eq_table;
+	int			eq_added;
 };
 
 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index dca55b1..bbaf617 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -338,7 +338,7 @@
 
 	err = mlx4_SYNC_TPT(mdev);
 	if (err)
-		printk(KERN_WARNING "mlx4_ib: SYNC_TPT error %d when "
+		pr_warn("SYNC_TPT error %d when "
 		       "unmapping FMRs\n", err);
 
 	return 0;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 3a78489..ceb3332 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -84,6 +84,11 @@
 	MLX4_IB_CACHE_LINE_SIZE	= 64,
 };
 
+enum {
+	MLX4_RAW_QP_MTU		= 7,
+	MLX4_RAW_QP_MSGMAX	= 31,
+};
+
 static const __be32 mlx4_ib_opcode[] = {
 	[IB_WR_SEND]				= cpu_to_be32(MLX4_OPCODE_SEND),
 	[IB_WR_LSO]				= cpu_to_be32(MLX4_OPCODE_LSO),
@@ -256,7 +261,7 @@
 			event.event = IB_EVENT_QP_ACCESS_ERR;
 			break;
 		default:
-			printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
+			pr_warn("Unexpected event type %d "
 			       "on QP %06x\n", type, qp->qpn);
 			return;
 		}
@@ -573,7 +578,12 @@
 	if (sqpn) {
 		qpn = sqpn;
 	} else {
-		err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
+		/* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
+		 * BlueFlame setup flow wrongly causes VLAN insertion. */
+		if (init_attr->qp_type == IB_QPT_RAW_PACKET)
+			err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
+		else
+			err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
 		if (err)
 			goto err_wrid;
 	}
@@ -715,7 +725,7 @@
 	if (qp->state != IB_QPS_RESET)
 		if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
 				   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
-			printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
+			pr_warn("modify QP %06x to RESET failed.\n",
 			       qp->mqp.qpn);
 
 	get_cqs(qp, &send_cq, &recv_cq);
@@ -791,6 +801,7 @@
 	case IB_QPT_RC:
 	case IB_QPT_UC:
 	case IB_QPT_UD:
+	case IB_QPT_RAW_PACKET:
 	{
 		qp = kzalloc(sizeof *qp, GFP_KERNEL);
 		if (!qp)
@@ -872,7 +883,8 @@
 	case IB_QPT_XRC_INI:
 	case IB_QPT_XRC_TGT:	return MLX4_QP_ST_XRC;
 	case IB_QPT_SMI:
-	case IB_QPT_GSI:	return MLX4_QP_ST_MLX;
+	case IB_QPT_GSI:
+	case IB_QPT_RAW_PACKET:	return MLX4_QP_ST_MLX;
 	default:		return -1;
 	}
 }
@@ -946,7 +958,7 @@
 
 	if (ah->ah_flags & IB_AH_GRH) {
 		if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
-			printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
+			pr_err("sgid_index (%u) too large. max is %d\n",
 			       ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
 			return -1;
 		}
@@ -1042,6 +1054,8 @@
 
 	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
 		context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
+	else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+		context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
 	else if (ibqp->qp_type == IB_QPT_UD) {
 		if (qp->flags & MLX4_IB_QP_LSO)
 			context->mtu_msgmax = (IB_MTU_4096 << 5) |
@@ -1050,7 +1064,7 @@
 			context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
 	} else if (attr_mask & IB_QP_PATH_MTU) {
 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
-			printk(KERN_ERR "path MTU (%u) is invalid\n",
+			pr_err("path MTU (%u) is invalid\n",
 			       attr->path_mtu);
 			goto out;
 		}
@@ -1200,7 +1214,8 @@
 	if (cur_state == IB_QPS_INIT &&
 	    new_state == IB_QPS_RTR  &&
 	    (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
-	     ibqp->qp_type == IB_QPT_UD)) {
+	     ibqp->qp_type == IB_QPT_UD ||
+	     ibqp->qp_type == IB_QPT_RAW_PACKET)) {
 		context->pri_path.sched_queue = (qp->port - 1) << 6;
 		if (is_qp0(dev, qp))
 			context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
@@ -1266,7 +1281,7 @@
 	if (is_qp0(dev, qp)) {
 		if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
 			if (mlx4_INIT_PORT(dev->dev, qp->port))
-				printk(KERN_WARNING "INIT_PORT failed for port %d\n",
+				pr_warn("INIT_PORT failed for port %d\n",
 				       qp->port);
 
 		if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
@@ -1319,6 +1334,11 @@
 		goto out;
 	}
 
+	if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
+	    (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
+	     IB_LINK_LAYER_ETHERNET))
+		goto out;
+
 	if (attr_mask & IB_QP_PKEY_INDEX) {
 		int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
 		if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])
@@ -1424,6 +1444,9 @@
 
 	if (is_eth) {
 		u8 *smac;
+		u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
+
+		mlx->sched_prio = cpu_to_be16(pcp);
 
 		memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
 		/* FIXME: cache smac value? */
@@ -1434,10 +1457,7 @@
 		if (!is_vlan) {
 			sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
 		} else {
-			u16 pcp;
-
 			sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
-			pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
 			sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
 		}
 	} else {
@@ -1460,16 +1480,16 @@
 	header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
 
 	if (0) {
-		printk(KERN_ERR "built UD header of size %d:\n", header_size);
+		pr_err("built UD header of size %d:\n", header_size);
 		for (i = 0; i < header_size / 4; ++i) {
 			if (i % 8 == 0)
-				printk("  [%02x] ", i * 4);
-			printk(" %08x",
-			       be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
+				pr_err("  [%02x] ", i * 4);
+			pr_cont(" %08x",
+				be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
 			if ((i + 1) % 8 == 0)
-				printk("\n");
+				pr_cont("\n");
 		}
-		printk("\n");
+		pr_err("\n");
 	}
 
 	/*
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 39542f3..60c5fb0 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -59,7 +59,7 @@
 			event.event = IB_EVENT_SRQ_ERR;
 			break;
 		default:
-			printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
+			pr_warn("Unexpected event type %d "
 			       "on SRQ %06x\n", type, srq->srqn);
 			return;
 		}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 71edfbb..020e95c 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2884,7 +2884,8 @@
 			ibevent.device = nesqp->ibqp.device;
 			ibevent.event = nesqp->terminate_eventtype;
 			ibevent.element.qp = &nesqp->ibqp;
-			nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+			if (nesqp->ibqp.event_handler)
+				nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
 		}
 	}
 
@@ -3320,6 +3321,10 @@
 
 	nesqp->private_data_len = conn_param->private_data_len;
 	nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
+	/* space for rdma0 read msg */
+	if (conn_param->ord == 0)
+		nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(1);
+
 	nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
 	nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
 		  conn_param->private_data_len);
diff --git a/drivers/infiniband/hw/ocrdma/Kconfig b/drivers/infiniband/hw/ocrdma/Kconfig
new file mode 100644
index 0000000..b5b6056
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/Kconfig
@@ -0,0 +1,8 @@
+config INFINIBAND_OCRDMA
+	tristate "Emulex One Connect HCA support"
+	depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n)
+	select NET_VENDOR_EMULEX
+	select BE2NET
+	---help---
+	  This driver provides low-level InfiniBand over Ethernet
+	  support for Emulex One Connect host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile
new file mode 100644
index 0000000..06a5bed
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Idrivers/net/ethernet/emulex/benet
+
+obj-$(CONFIG_INFINIBAND_OCRDMA)	+= ocrdma.o
+
+ocrdma-y :=	ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
new file mode 100644
index 0000000..85a69c9
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -0,0 +1,393 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) adapters.                   *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#ifndef __OCRDMA_H__
+#define __OCRDMA_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include <be_roce.h>
+#include "ocrdma_sli.h"
+
+#define OCRDMA_ROCE_DEV_VERSION "1.0.0"
+#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
+
+#define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg)
+
+#define OCRDMA_MAX_AH 512
+
+#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+struct ocrdma_dev_attr {
+	u8 fw_ver[32];
+	u32 vendor_id;
+	u32 device_id;
+	u16 max_pd;
+	u16 max_cq;
+	u16 max_cqe;
+	u16 max_qp;
+	u16 max_wqe;
+	u16 max_rqe;
+	u32 max_inline_data;
+	int max_send_sge;
+	int max_recv_sge;
+	int max_mr;
+	u64 max_mr_size;
+	u32 max_num_mr_pbl;
+	int max_fmr;
+	int max_map_per_fmr;
+	int max_pages_per_frmr;
+	u16 max_ord_per_qp;
+	u16 max_ird_per_qp;
+
+	int device_cap_flags;
+	u8 cq_overflow_detect;
+	u8 srq_supported;
+
+	u32 wqe_size;
+	u32 rqe_size;
+	u32 ird_page_size;
+	u8 local_ca_ack_delay;
+	u8 ird;
+	u8 num_ird_pages;
+};
+
+struct ocrdma_pbl {
+	void *va;
+	dma_addr_t pa;
+};
+
+struct ocrdma_queue_info {
+	void *va;
+	dma_addr_t dma;
+	u32 size;
+	u16 len;
+	u16 entry_size;		/* Size of an element in the queue */
+	u16 id;			/* qid, where to ring the doorbell. */
+	u16 head, tail;
+	bool created;
+	atomic_t used;		/* Number of valid elements in the queue */
+};
+
+struct ocrdma_eq {
+	struct ocrdma_queue_info q;
+	u32 vector;
+	int cq_cnt;
+	struct ocrdma_dev *dev;
+	char irq_name[32];
+};
+
+struct ocrdma_mq {
+	struct ocrdma_queue_info sq;
+	struct ocrdma_queue_info cq;
+	bool rearm_cq;
+};
+
+struct mqe_ctx {
+	struct mutex lock; /* for serializing mailbox commands on MQ */
+	wait_queue_head_t cmd_wait;
+	u32 tag;
+	u16 cqe_status;
+	u16 ext_status;
+	bool cmd_done;
+};
+
+struct ocrdma_dev {
+	struct ib_device ibdev;
+	struct ocrdma_dev_attr attr;
+
+	struct mutex dev_lock; /* provides syncronise access to device data */
+	spinlock_t flush_q_lock ____cacheline_aligned;
+
+	struct ocrdma_cq **cq_tbl;
+	struct ocrdma_qp **qp_tbl;
+
+	struct ocrdma_eq meq;
+	struct ocrdma_eq *qp_eq_tbl;
+	int eq_cnt;
+	u16 base_eqid;
+	u16 max_eq;
+
+	union ib_gid *sgid_tbl;
+	/* provided synchronization to sgid table for
+	 * updating gid entries triggered by notifier.
+	 */
+	spinlock_t sgid_lock;
+
+	int gsi_qp_created;
+	struct ocrdma_cq *gsi_sqcq;
+	struct ocrdma_cq *gsi_rqcq;
+
+	struct {
+		struct ocrdma_av *va;
+		dma_addr_t pa;
+		u32 size;
+		u32 num_ah;
+		/* provide synchronization for av
+		 * entry allocations.
+		 */
+		spinlock_t lock;
+		u32 ahid;
+		struct ocrdma_pbl pbl;
+	} av_tbl;
+
+	void *mbx_cmd;
+	struct ocrdma_mq mq;
+	struct mqe_ctx mqe_ctx;
+
+	struct be_dev_info nic_info;
+
+	struct list_head entry;
+	struct rcu_head rcu;
+	int id;
+};
+
+struct ocrdma_cq {
+	struct ib_cq ibcq;
+	struct ocrdma_dev *dev;
+	struct ocrdma_cqe *va;
+	u32 phase;
+	u32 getp;	/* pointer to pending wrs to
+			 * return to stack, wrap arounds
+			 * at max_hw_cqe
+			 */
+	u32 max_hw_cqe;
+	bool phase_change;
+	bool armed, solicited;
+	bool arm_needed;
+
+	spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
+						   * to cq polling
+						   */
+	/* syncronizes cq completion handler invoked from multiple context */
+	spinlock_t comp_handler_lock ____cacheline_aligned;
+	u16 id;
+	u16 eqn;
+
+	struct ocrdma_ucontext *ucontext;
+	dma_addr_t pa;
+	u32 len;
+	atomic_t use_cnt;
+
+	/* head of all qp's sq and rq for which cqes need to be flushed
+	 * by the software.
+	 */
+	struct list_head sq_head, rq_head;
+};
+
+struct ocrdma_pd {
+	struct ib_pd ibpd;
+	struct ocrdma_dev *dev;
+	struct ocrdma_ucontext *uctx;
+	atomic_t use_cnt;
+	u32 id;
+	int num_dpp_qp;
+	u32 dpp_page;
+	bool dpp_enabled;
+};
+
+struct ocrdma_ah {
+	struct ib_ah ibah;
+	struct ocrdma_dev *dev;
+	struct ocrdma_av *av;
+	u16 sgid_index;
+	u32 id;
+};
+
+struct ocrdma_qp_hwq_info {
+	u8 *va;			/* virtual address */
+	u32 max_sges;
+	u32 head, tail;
+	u32 entry_size;
+	u32 max_cnt;
+	u32 max_wqe_idx;
+	u32 free_delta;
+	u16 dbid;		/* qid, where to ring the doorbell. */
+	u32 len;
+	dma_addr_t pa;
+};
+
+struct ocrdma_srq {
+	struct ib_srq ibsrq;
+	struct ocrdma_dev *dev;
+	u8 __iomem *db;
+	/* provide synchronization to multiple context(s) posting rqe */
+	spinlock_t q_lock ____cacheline_aligned;
+
+	struct ocrdma_qp_hwq_info rq;
+	struct ocrdma_pd *pd;
+	atomic_t use_cnt;
+	u32 id;
+	u64 *rqe_wr_id_tbl;
+	u32 *idx_bit_fields;
+	u32 bit_fields_len;
+};
+
+struct ocrdma_qp {
+	struct ib_qp ibqp;
+	struct ocrdma_dev *dev;
+
+	u8 __iomem *sq_db;
+	/* provide synchronization to multiple context(s) posting wqe, rqe */
+	spinlock_t q_lock ____cacheline_aligned;
+	struct ocrdma_qp_hwq_info sq;
+	struct {
+		uint64_t wrid;
+		uint16_t dpp_wqe_idx;
+		uint16_t dpp_wqe;
+		uint8_t  signaled;
+		uint8_t  rsvd[3];
+	} *wqe_wr_id_tbl;
+	u32 max_inline_data;
+	struct ocrdma_cq *sq_cq;
+	/* list maintained per CQ to flush SQ errors */
+	struct list_head sq_entry;
+
+	u8 __iomem *rq_db;
+	struct ocrdma_qp_hwq_info rq;
+	u64 *rqe_wr_id_tbl;
+	struct ocrdma_cq *rq_cq;
+	struct ocrdma_srq *srq;
+	/* list maintained per CQ to flush RQ errors */
+	struct list_head rq_entry;
+
+	enum ocrdma_qp_state state;	/*  QP state */
+	int cap_flags;
+	u32 max_ord, max_ird;
+
+	u32 id;
+	struct ocrdma_pd *pd;
+
+	enum ib_qp_type qp_type;
+
+	int sgid_idx;
+	u32 qkey;
+	bool dpp_enabled;
+	u8 *ird_q_va;
+};
+
+#define OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp) \
+	(((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) && \
+		(qp->id < 64)) ? 24 : 16)
+
+struct ocrdma_hw_mr {
+	struct ocrdma_dev *dev;
+	u32 lkey;
+	u8 fr_mr;
+	u8 remote_atomic;
+	u8 remote_rd;
+	u8 remote_wr;
+	u8 local_rd;
+	u8 local_wr;
+	u8 mw_bind;
+	u8 rsvd;
+	u64 len;
+	struct ocrdma_pbl *pbl_table;
+	u32 num_pbls;
+	u32 num_pbes;
+	u32 pbl_size;
+	u32 pbe_size;
+	u64 fbo;
+	u64 va;
+};
+
+struct ocrdma_mr {
+	struct ib_mr ibmr;
+	struct ib_umem *umem;
+	struct ocrdma_hw_mr hwmr;
+	struct ocrdma_pd *pd;
+};
+
+struct ocrdma_ucontext {
+	struct ib_ucontext ibucontext;
+	struct ocrdma_dev *dev;
+
+	struct list_head mm_head;
+	struct mutex mm_list_lock; /* protects list entries of mm type */
+	struct {
+		u32 *va;
+		dma_addr_t pa;
+		u32 len;
+	} ah_tbl;
+};
+
+struct ocrdma_mm {
+	struct {
+		u64 phy_addr;
+		unsigned long len;
+	} key;
+	struct list_head entry;
+};
+
+static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)
+{
+	return container_of(ibdev, struct ocrdma_dev, ibdev);
+}
+
+static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext
+							  *ibucontext)
+{
+	return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);
+}
+
+static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)
+{
+	return container_of(ibpd, struct ocrdma_pd, ibpd);
+}
+
+static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)
+{
+	return container_of(ibcq, struct ocrdma_cq, ibcq);
+}
+
+static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)
+{
+	return container_of(ibqp, struct ocrdma_qp, ibqp);
+}
+
+static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)
+{
+	return container_of(ibmr, struct ocrdma_mr, ibmr);
+}
+
+static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)
+{
+	return container_of(ibah, struct ocrdma_ah, ibah);
+}
+
+static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
+{
+	return container_of(ibsrq, struct ocrdma_srq, ibsrq);
+}
+
+#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
new file mode 100644
index 0000000..a411a4e
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -0,0 +1,134 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) adapters.                   *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#ifndef __OCRDMA_ABI_H__
+#define __OCRDMA_ABI_H__
+
+struct ocrdma_alloc_ucontext_resp {
+	u32 dev_id;
+	u32 wqe_size;
+	u32 max_inline_data;
+	u32 dpp_wqe_size;
+	u64 ah_tbl_page;
+	u32 ah_tbl_len;
+	u32 rsvd;
+	u8 fw_ver[32];
+	u32 rqe_size;
+	u64 rsvd1;
+} __packed;
+
+/* user kernel communication data structures. */
+struct ocrdma_alloc_pd_ureq {
+	u64 rsvd1;
+} __packed;
+
+struct ocrdma_alloc_pd_uresp {
+	u32 id;
+	u32 dpp_enabled;
+	u32 dpp_page_addr_hi;
+	u32 dpp_page_addr_lo;
+	u64 rsvd1;
+} __packed;
+
+struct ocrdma_create_cq_ureq {
+	u32 dpp_cq;
+	u32 rsvd;
+} __packed;
+
+#define MAX_CQ_PAGES 8
+struct ocrdma_create_cq_uresp {
+	u32 cq_id;
+	u32 page_size;
+	u32 num_pages;
+	u32 max_hw_cqe;
+	u64 page_addr[MAX_CQ_PAGES];
+	u64 db_page_addr;
+	u32 db_page_size;
+	u32 phase_change;
+	u64 rsvd1;
+	u64 rsvd2;
+} __packed;
+
+#define MAX_QP_PAGES 8
+#define MAX_UD_AV_PAGES 8
+
+struct ocrdma_create_qp_ureq {
+	u8 enable_dpp_cq;
+	u8 rsvd;
+	u16 dpp_cq_id;
+	u32 rsvd1;
+};
+
+struct ocrdma_create_qp_uresp {
+	u16 qp_id;
+	u16 sq_dbid;
+	u16 rq_dbid;
+	u16 resv0;
+	u32 sq_page_size;
+	u32 rq_page_size;
+	u32 num_sq_pages;
+	u32 num_rq_pages;
+	u64 sq_page_addr[MAX_QP_PAGES];
+	u64 rq_page_addr[MAX_QP_PAGES];
+	u64 db_page_addr;
+	u32 db_page_size;
+	u32 dpp_credit;
+	u32 dpp_offset;
+	u32 rsvd1;
+	u32 num_wqe_allocated;
+	u32 num_rqe_allocated;
+	u32 free_wqe_delta;
+	u32 free_rqe_delta;
+	u32 db_sq_offset;
+	u32 db_rq_offset;
+	u32 db_shift;
+	u64 rsvd2;
+	u64 rsvd3;
+} __packed;
+
+struct ocrdma_create_srq_uresp {
+	u16 rq_dbid;
+	u16 resv0;
+	u32 resv1;
+
+	u32 rq_page_size;
+	u32 num_rq_pages;
+
+	u64 rq_page_addr[MAX_QP_PAGES];
+	u64 db_page_addr;
+
+	u32 db_page_size;
+	u32 num_rqe_allocated;
+	u32 db_rq_offset;
+	u32 db_shift;
+
+	u32 free_rqe_delta;
+	u32 rsvd2;
+	u64 rsvd3;
+} __packed;
+
+#endif				/* __OCRDMA_ABI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
new file mode 100644
index 0000000..a877a8e
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -0,0 +1,172 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) adapters.                   *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "ocrdma.h"
+#include "ocrdma_verbs.h"
+#include "ocrdma_ah.h"
+#include "ocrdma_hw.h"
+
+static inline int set_av_attr(struct ocrdma_ah *ah,
+				struct ib_ah_attr *attr, int pdid)
+{
+	int status = 0;
+	u16 vlan_tag; bool vlan_enabled = false;
+	struct ocrdma_dev *dev = ah->dev;
+	struct ocrdma_eth_vlan eth;
+	struct ocrdma_grh grh;
+	int eth_sz;
+
+	memset(&eth, 0, sizeof(eth));
+	memset(&grh, 0, sizeof(grh));
+
+	ah->sgid_index = attr->grh.sgid_index;
+
+	vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
+	if (vlan_tag && (vlan_tag < 0x1000)) {
+		eth.eth_type = cpu_to_be16(0x8100);
+		eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+		vlan_tag |= (attr->sl & 7) << 13;
+		eth.vlan_tag = cpu_to_be16(vlan_tag);
+		eth_sz = sizeof(struct ocrdma_eth_vlan);
+		vlan_enabled = true;
+	} else {
+		eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+		eth_sz = sizeof(struct ocrdma_eth_basic);
+	}
+	memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
+	status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, &eth.dmac[0]);
+	if (status)
+		return status;
+	status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
+			(union ib_gid *)&grh.sgid[0]);
+	if (status)
+		return status;
+
+	grh.tclass_flow = cpu_to_be32((6 << 28) |
+			(attr->grh.traffic_class << 24) |
+			attr->grh.flow_label);
+	/* 0x1b is next header value in GRH */
+	grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
+			(0x1b << 8) | attr->grh.hop_limit);
+
+	memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
+	memcpy(&ah->av->eth_hdr, &eth, eth_sz);
+	memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+	if (vlan_enabled)
+		ah->av->valid |= OCRDMA_AV_VLAN_VALID;
+	return status;
+}
+
+struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+	u32 *ahid_addr;
+	int status;
+	struct ocrdma_ah *ah;
+	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+	struct ocrdma_dev *dev = pd->dev;
+
+	if (!(attr->ah_flags & IB_AH_GRH))
+		return ERR_PTR(-EINVAL);
+
+	ah = kzalloc(sizeof *ah, GFP_ATOMIC);
+	if (!ah)
+		return ERR_PTR(-ENOMEM);
+	ah->dev = pd->dev;
+
+	status = ocrdma_alloc_av(dev, ah);
+	if (status)
+		goto av_err;
+	status = set_av_attr(ah, attr, pd->id);
+	if (status)
+		goto av_conf_err;
+
+	/* if pd is for the user process, pass the ah_id to user space */
+	if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
+		ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
+		*ahid_addr = ah->id;
+	}
+	return &ah->ibah;
+
+av_conf_err:
+	ocrdma_free_av(dev, ah);
+av_err:
+	kfree(ah);
+	return ERR_PTR(status);
+}
+
+int ocrdma_destroy_ah(struct ib_ah *ibah)
+{
+	struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
+	ocrdma_free_av(ah->dev, ah);
+	kfree(ah);
+	return 0;
+}
+
+int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
+{
+	struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
+	struct ocrdma_av *av = ah->av;
+	struct ocrdma_grh *grh;
+	attr->ah_flags |= IB_AH_GRH;
+	if (ah->av->valid & Bit(1)) {
+		grh = (struct ocrdma_grh *)((u8 *)ah->av +
+				sizeof(struct ocrdma_eth_vlan));
+		attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
+	} else {
+		grh = (struct ocrdma_grh *)((u8 *)ah->av +
+					sizeof(struct ocrdma_eth_basic));
+		attr->sl = 0;
+	}
+	memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid));
+	attr->grh.sgid_index = ah->sgid_index;
+	attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff;
+	attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24;
+	attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff;
+	return 0;
+}
+
+int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
+{
+	/* modify_ah is unsupported */
+	return -ENOSYS;
+}
+
+int ocrdma_process_mad(struct ib_device *ibdev,
+		       int process_mad_flags,
+		       u8 port_num,
+		       struct ib_wc *in_wc,
+		       struct ib_grh *in_grh,
+		       struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+	return IB_MAD_RESULT_SUCCESS;
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
new file mode 100644
index 0000000..8ac49e7
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -0,0 +1,42 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) adapters.                   *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#ifndef __OCRDMA_AH_H__
+#define __OCRDMA_AH_H__
+
+struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
+int ocrdma_destroy_ah(struct ib_ah *);
+int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
+int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);
+
+int ocrdma_process_mad(struct ib_device *,
+		       int process_mad_flags,
+		       u8 port_num,
+		       struct ib_wc *in_wc,
+		       struct ib_grh *in_grh,
+		       struct ib_mad *in_mad, struct ib_mad *out_mad);
+#endif				/* __OCRDMA_AH_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
new file mode 100644
index 0000000..9b204b1
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -0,0 +1,2640 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) CNA Adapters.              *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/dma-mapping.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include "ocrdma.h"
+#include "ocrdma_hw.h"
+#include "ocrdma_verbs.h"
+#include "ocrdma_ah.h"
+
+enum mbx_status {
+	OCRDMA_MBX_STATUS_FAILED		= 1,
+	OCRDMA_MBX_STATUS_ILLEGAL_FIELD		= 3,
+	OCRDMA_MBX_STATUS_OOR			= 100,
+	OCRDMA_MBX_STATUS_INVALID_PD		= 101,
+	OCRDMA_MBX_STATUS_PD_INUSE		= 102,
+	OCRDMA_MBX_STATUS_INVALID_CQ		= 103,
+	OCRDMA_MBX_STATUS_INVALID_QP		= 104,
+	OCRDMA_MBX_STATUS_INVALID_LKEY		= 105,
+	OCRDMA_MBX_STATUS_ORD_EXCEEDS		= 106,
+	OCRDMA_MBX_STATUS_IRD_EXCEEDS		= 107,
+	OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS	= 108,
+	OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS	= 109,
+	OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS	= 110,
+	OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS	= 111,
+	OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS	= 112,
+	OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE	= 113,
+	OCRDMA_MBX_STATUS_MW_BOUND		= 114,
+	OCRDMA_MBX_STATUS_INVALID_VA		= 115,
+	OCRDMA_MBX_STATUS_INVALID_LENGTH	= 116,
+	OCRDMA_MBX_STATUS_INVALID_FBO		= 117,
+	OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS	= 118,
+	OCRDMA_MBX_STATUS_INVALID_PBE_SIZE	= 119,
+	OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY	= 120,
+	OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT	= 121,
+	OCRDMA_MBX_STATUS_INVALID_SRQ_ID	= 129,
+	OCRDMA_MBX_STATUS_SRQ_ERROR		= 133,
+	OCRDMA_MBX_STATUS_RQE_EXCEEDS		= 134,
+	OCRDMA_MBX_STATUS_MTU_EXCEEDS		= 135,
+	OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS	= 136,
+	OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS	= 137,
+	OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS	= 138,
+	OCRDMA_MBX_STATUS_QP_BOUND		= 130,
+	OCRDMA_MBX_STATUS_INVALID_CHANGE	= 139,
+	OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP	= 140,
+	OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER	= 141,
+	OCRDMA_MBX_STATUS_MW_STILL_BOUND	= 142,
+	OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID	= 143,
+	OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS	= 144
+};
+
+enum additional_status {
+	OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
+};
+
+enum cqe_status {
+	OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES	= 1,
+	OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER		= 2,
+	OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES	= 3,
+	OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING		= 4,
+	OCRDMA_MBX_CQE_STATUS_DMA_FAILED		= 5
+};
+
+static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
+{
+	return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
+}
+
+static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
+{
+	eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
+}
+
+static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
+{
+	struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
+	    ((u8 *) dev->mq.cq.va +
+	     (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
+
+	if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
+		return NULL;
+	return cqe;
+}
+
+static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
+{
+	dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
+}
+
+static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
+{
+	return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va +
+				     (dev->mq.sq.head *
+				      sizeof(struct ocrdma_mqe)));
+}
+
+static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
+{
+	dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
+	atomic_inc(&dev->mq.sq.used);
+}
+
+static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
+{
+	return (void *)((u8 *) dev->mq.sq.va +
+			(dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)));
+}
+
+enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
+{
+	switch (qps) {
+	case OCRDMA_QPS_RST:
+		return IB_QPS_RESET;
+	case OCRDMA_QPS_INIT:
+		return IB_QPS_INIT;
+	case OCRDMA_QPS_RTR:
+		return IB_QPS_RTR;
+	case OCRDMA_QPS_RTS:
+		return IB_QPS_RTS;
+	case OCRDMA_QPS_SQD:
+	case OCRDMA_QPS_SQ_DRAINING:
+		return IB_QPS_SQD;
+	case OCRDMA_QPS_SQE:
+		return IB_QPS_SQE;
+	case OCRDMA_QPS_ERR:
+		return IB_QPS_ERR;
+	};
+	return IB_QPS_ERR;
+}
+
+static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
+{
+	switch (qps) {
+	case IB_QPS_RESET:
+		return OCRDMA_QPS_RST;
+	case IB_QPS_INIT:
+		return OCRDMA_QPS_INIT;
+	case IB_QPS_RTR:
+		return OCRDMA_QPS_RTR;
+	case IB_QPS_RTS:
+		return OCRDMA_QPS_RTS;
+	case IB_QPS_SQD:
+		return OCRDMA_QPS_SQD;
+	case IB_QPS_SQE:
+		return OCRDMA_QPS_SQE;
+	case IB_QPS_ERR:
+		return OCRDMA_QPS_ERR;
+	};
+	return OCRDMA_QPS_ERR;
+}
+
+static int ocrdma_get_mbx_errno(u32 status)
+{
+	int err_num = -EFAULT;
+	u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
+					OCRDMA_MBX_RSP_STATUS_SHIFT;
+	u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
+					OCRDMA_MBX_RSP_ASTATUS_SHIFT;
+
+	switch (mbox_status) {
+	case OCRDMA_MBX_STATUS_OOR:
+	case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
+		err_num = -EAGAIN;
+		break;
+
+	case OCRDMA_MBX_STATUS_INVALID_PD:
+	case OCRDMA_MBX_STATUS_INVALID_CQ:
+	case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
+	case OCRDMA_MBX_STATUS_INVALID_QP:
+	case OCRDMA_MBX_STATUS_INVALID_CHANGE:
+	case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
+	case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
+	case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
+	case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
+	case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
+	case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
+	case OCRDMA_MBX_STATUS_INVALID_LKEY:
+	case OCRDMA_MBX_STATUS_INVALID_VA:
+	case OCRDMA_MBX_STATUS_INVALID_LENGTH:
+	case OCRDMA_MBX_STATUS_INVALID_FBO:
+	case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
+	case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
+	case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
+	case OCRDMA_MBX_STATUS_SRQ_ERROR:
+	case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
+		err_num = -EINVAL;
+		break;
+
+	case OCRDMA_MBX_STATUS_PD_INUSE:
+	case OCRDMA_MBX_STATUS_QP_BOUND:
+	case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
+	case OCRDMA_MBX_STATUS_MW_BOUND:
+		err_num = -EBUSY;
+		break;
+
+	case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
+	case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
+	case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
+	case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
+	case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
+	case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
+	case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
+	case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
+	case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
+		err_num = -ENOBUFS;
+		break;
+
+	case OCRDMA_MBX_STATUS_FAILED:
+		switch (add_status) {
+		case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
+			err_num = -EAGAIN;
+			break;
+		}
+	default:
+		err_num = -EFAULT;
+	}
+	return err_num;
+}
+
+static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
+{
+	int err_num = -EINVAL;
+
+	switch (cqe_status) {
+	case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
+		err_num = -EPERM;
+		break;
+	case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
+		err_num = -EINVAL;
+		break;
+	case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
+	case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
+		err_num = -EAGAIN;
+		break;
+	case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
+		err_num = -EIO;
+		break;
+	}
+	return err_num;
+}
+
+void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
+		       bool solicited, u16 cqe_popped)
+{
+	u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
+
+	val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
+	     OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
+
+	if (armed)
+		val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
+	if (solicited)
+		val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
+	val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
+	iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
+}
+
+static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
+{
+	u32 val = 0;
+
+	val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
+	val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
+	iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
+}
+
+static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
+			      bool arm, bool clear_int, u16 num_eqe)
+{
+	u32 val = 0;
+
+	val |= eq_id & OCRDMA_EQ_ID_MASK;
+	val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
+	if (arm)
+		val |= (1 << OCRDMA_REARM_SHIFT);
+	if (clear_int)
+		val |= (1 << OCRDMA_EQ_CLR_SHIFT);
+	val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
+	val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
+	iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
+}
+
+static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
+			    u8 opcode, u8 subsys, u32 cmd_len)
+{
+	cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
+	cmd_hdr->timeout = 20; /* seconds */
+	cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
+}
+
+static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
+{
+	struct ocrdma_mqe *mqe;
+
+	mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
+	if (!mqe)
+		return NULL;
+	mqe->hdr.spcl_sge_cnt_emb |=
+		(OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
+					OCRDMA_MQE_HDR_EMB_MASK;
+	mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
+
+	ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
+			mqe->hdr.pyld_len);
+	return mqe;
+}
+
+static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
+{
+	dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
+}
+
+static int ocrdma_alloc_q(struct ocrdma_dev *dev,
+			  struct ocrdma_queue_info *q, u16 len, u16 entry_size)
+{
+	memset(q, 0, sizeof(*q));
+	q->len = len;
+	q->entry_size = entry_size;
+	q->size = len * entry_size;
+	q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
+				   &q->dma, GFP_KERNEL);
+	if (!q->va)
+		return -ENOMEM;
+	memset(q->va, 0, q->size);
+	return 0;
+}
+
+static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
+					dma_addr_t host_pa, int hw_page_size)
+{
+	int i;
+
+	for (i = 0; i < cnt; i++) {
+		q_pa[i].lo = (u32) (host_pa & 0xffffffff);
+		q_pa[i].hi = (u32) upper_32_bits(host_pa);
+		host_pa += hw_page_size;
+	}
+}
+
+static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,
+				       struct ocrdma_eq *eq)
+{
+	/* assign vector and update vector id for next EQ */
+	eq->vector = dev->nic_info.msix.start_vector;
+	dev->nic_info.msix.start_vector += 1;
+}
+
+static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)
+{
+	/* this assumes that EQs are freed in exactly reverse order
+	 * as its allocation.
+	 */
+	dev->nic_info.msix.start_vector -= 1;
+}
+
+static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
+			       int queue_type)
+{
+	u8 opcode = 0;
+	int status;
+	struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
+
+	switch (queue_type) {
+	case QTYPE_MCCQ:
+		opcode = OCRDMA_CMD_DELETE_MQ;
+		break;
+	case QTYPE_CQ:
+		opcode = OCRDMA_CMD_DELETE_CQ;
+		break;
+	case QTYPE_EQ:
+		opcode = OCRDMA_CMD_DELETE_EQ;
+		break;
+	default:
+		BUG();
+	}
+	memset(cmd, 0, sizeof(*cmd));
+	ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+	cmd->id = q->id;
+
+	status = be_roce_mcc_cmd(dev->nic_info.netdev,
+				 cmd, sizeof(*cmd), NULL, NULL);
+	if (!status)
+		q->created = false;
+	return status;
+}
+
+static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+{
+	int status;
+	struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
+	struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
+
+	memset(cmd, 0, sizeof(*cmd));
+	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
+			sizeof(*cmd));
+	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
+		cmd->req.rsvd_version = 0;
+	else
+		cmd->req.rsvd_version = 2;
+
+	cmd->num_pages = 4;
+	cmd->valid = OCRDMA_CREATE_EQ_VALID;
+	cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
+
+	ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
+			     PAGE_SIZE_4K);
+	status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
+				 NULL);
+	if (!status) {
+		eq->q.id = rsp->vector_eqid & 0xffff;
+		if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
+			ocrdma_assign_eq_vect_gen2(dev, eq);
+		else {
+			eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
+			dev->nic_info.msix.start_vector += 1;
+		}
+		eq->q.created = true;
+	}
+	return status;
+}
+
+static int ocrdma_create_eq(struct ocrdma_dev *dev,
+			    struct ocrdma_eq *eq, u16 q_len)
+{
+	int status;
+
+	status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
+				sizeof(struct ocrdma_eqe));
+	if (status)
+		return status;
+
+	status = ocrdma_mbx_create_eq(dev, eq);
+	if (status)
+		goto mbx_err;
+	eq->dev = dev;
+	ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
+
+	return 0;
+mbx_err:
+	ocrdma_free_q(dev, &eq->q);
+	return status;
+}
+
+static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+{
+	int irq;
+
+	if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
+		irq = dev->nic_info.pdev->irq;
+	else
+		irq = dev->nic_info.msix.vector_list[eq->vector];
+	return irq;
+}
+
+static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+{
+	if (eq->q.created) {
+		ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
+		if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
+			ocrdma_free_eq_vect_gen2(dev);
+		ocrdma_free_q(dev, &eq->q);
+	}
+}
+
+static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+{
+	int irq;
+
+	/* disarm EQ so that interrupts are not generated
+	 * during freeing and EQ delete is in progress.
+	 */
+	ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
+
+	irq = ocrdma_get_irq(dev, eq);
+	free_irq(irq, eq);
+	_ocrdma_destroy_eq(dev, eq);
+}
+
+static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)
+{
+	int i;
+
+	/* deallocate the data path eqs */
+	for (i = 0; i < dev->eq_cnt; i++)
+		ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
+}
+
+static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
+				   struct ocrdma_queue_info *cq,
+				   struct ocrdma_queue_info *eq)
+{
+	struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
+	struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
+	int status;
+
+	memset(cmd, 0, sizeof(*cmd));
+	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
+			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+	cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size);
+	cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
+	cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT);
+
+	ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt,
+			     cq->dma, PAGE_SIZE_4K);
+	status = be_roce_mcc_cmd(dev->nic_info.netdev,
+				 cmd, sizeof(*cmd), NULL, NULL);
+	if (!status) {
+		cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
+		cq->created = true;
+	}
+	return status;
+}
+
+static u32 ocrdma_encoded_q_len(int q_len)
+{
+	u32 len_encoded = fls(q_len);	/* log2(len) + 1 */
+
+	if (len_encoded == 16)
+		len_encoded = 0;
+	return len_encoded;
+}
+
+static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
+				struct ocrdma_queue_info *mq,
+				struct ocrdma_queue_info *cq)
+{
+	int num_pages, status;
+	struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
+	struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
+	struct ocrdma_pa *pa;
+
+	memset(cmd, 0, sizeof(*cmd));
+	num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
+
+	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+		ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ,
+				OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+		cmd->v0.pages = num_pages;
+		cmd->v0.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
+		cmd->v0.async_cqid_valid = (cq->id << 1);
+		cmd->v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
+					     OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
+		cmd->v0.cqid_ringsize |=
+			(cq->id << OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT);
+		cmd->v0.valid = OCRDMA_CREATE_MQ_VALID;
+		pa = &cmd->v0.pa[0];
+	} else {
+		ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
+				OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+		cmd->req.rsvd_version = 1;
+		cmd->v1.cqid_pages = num_pages;
+		cmd->v1.cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
+		cmd->v1.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
+		cmd->v1.async_event_bitmap = Bit(20);
+		cmd->v1.async_cqid_ringsize = cq->id;
+		cmd->v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
+					     OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
+		cmd->v1.valid = OCRDMA_CREATE_MQ_VALID;
+		pa = &cmd->v1.pa[0];
+	}
+	ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
+	status = be_roce_mcc_cmd(dev->nic_info.netdev,
+				 cmd, sizeof(*cmd), NULL, NULL);
+	if (!status) {
+		mq->id = rsp->id;
+		mq->created = true;
+	}
+	return status;
+}
+
+static int ocrdma_create_mq(struct ocrdma_dev *dev)
+{
+	int status;
+
+	/* Alloc completion queue for Mailbox queue */
+	status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
+				sizeof(struct ocrdma_mcqe));
+	if (status)
+		goto alloc_err;
+
+	status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);
+	if (status)
+		goto mbx_cq_free;
+
+	memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
+	init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
+	mutex_init(&dev->mqe_ctx.lock);
+
+	/* Alloc Mailbox queue */
+	status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
+				sizeof(struct ocrdma_mqe));
+	if (status)
+		goto mbx_cq_destroy;
+	status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
+	if (status)
+		goto mbx_q_free;
+	ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
+	return 0;
+
+mbx_q_free:
+	ocrdma_free_q(dev, &dev->mq.sq);
+mbx_cq_destroy:
+	ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
+mbx_cq_free:
+	ocrdma_free_q(dev, &dev->mq.cq);
+alloc_err:
+	return status;
+}
+
+static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
+{
+	struct ocrdma_queue_info *mbxq, *cq;
+
+	/* mqe_ctx lock synchronizes with any other pending cmds. */
+	mutex_lock(&dev->mqe_ctx.lock);
+	mbxq = &dev->mq.sq;
+	if (mbxq->created) {
+		ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
+		ocrdma_free_q(dev, mbxq);
+	}
+	mutex_unlock(&dev->mqe_ctx.lock);
+
+	cq = &dev->mq.cq;
+	if (cq->created) {
+		ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
+		ocrdma_free_q(dev, cq);
+	}
+}
+
+static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
+				       struct ocrdma_qp *qp)
+{
+	enum ib_qp_state new_ib_qps = IB_QPS_ERR;
+	enum ib_qp_state old_ib_qps;
+
+	if (qp == NULL)
+		BUG();
+	ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);
+}
+
+static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
+				    struct ocrdma_ae_mcqe *cqe)
+{
+	struct ocrdma_qp *qp = NULL;
+	struct ocrdma_cq *cq = NULL;
+	struct ib_event ib_evt;
+	int cq_event = 0;
+	int qp_event = 1;
+	int srq_event = 0;
+	int dev_event = 0;
+	int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
+	    OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
+
+	if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID)
+		qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];
+	if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
+		cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
+
+	ib_evt.device = &dev->ibdev;
+
+	switch (type) {
+	case OCRDMA_CQ_ERROR:
+		ib_evt.element.cq = &cq->ibcq;
+		ib_evt.event = IB_EVENT_CQ_ERR;
+		cq_event = 1;
+		qp_event = 0;
+		break;
+	case OCRDMA_CQ_OVERRUN_ERROR:
+		ib_evt.element.cq = &cq->ibcq;
+		ib_evt.event = IB_EVENT_CQ_ERR;
+		break;
+	case OCRDMA_CQ_QPCAT_ERROR:
+		ib_evt.element.qp = &qp->ibqp;
+		ib_evt.event = IB_EVENT_QP_FATAL;
+		ocrdma_process_qpcat_error(dev, qp);
+		break;
+	case OCRDMA_QP_ACCESS_ERROR:
+		ib_evt.element.qp = &qp->ibqp;
+		ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
+		break;
+	case OCRDMA_QP_COMM_EST_EVENT:
+		ib_evt.element.qp = &qp->ibqp;
+		ib_evt.event = IB_EVENT_COMM_EST;
+		break;
+	case OCRDMA_SQ_DRAINED_EVENT:
+		ib_evt.element.qp = &qp->ibqp;
+		ib_evt.event = IB_EVENT_SQ_DRAINED;
+		break;
+	case OCRDMA_DEVICE_FATAL_EVENT:
+		ib_evt.element.port_num = 1;
+		ib_evt.event = IB_EVENT_DEVICE_FATAL;
+		qp_event = 0;
+		dev_event = 1;
+		break;
+	case OCRDMA_SRQCAT_ERROR:
+		ib_evt.element.srq = &qp->srq->ibsrq;
+		ib_evt.event = IB_EVENT_SRQ_ERR;
+		srq_event = 1;
+		qp_event = 0;
+		break;
+	case OCRDMA_SRQ_LIMIT_EVENT:
+		ib_evt.element.srq = &qp->srq->ibsrq;
+		ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+		srq_event = 1;
+		qp_event = 0;
+		break;
+	case OCRDMA_QP_LAST_WQE_EVENT:
+		ib_evt.element.qp = &qp->ibqp;
+		ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+		break;
+	default:
+		cq_event = 0;
+		qp_event = 0;
+		srq_event = 0;
+		dev_event = 0;
+		ocrdma_err("%s() unknown type=0x%x\n", __func__, type);
+		break;
+	}
+
+	if (qp_event) {
+		if (qp->ibqp.event_handler)
+			qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
+	} else if (cq_event) {
+		if (cq->ibcq.event_handler)
+			cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
+	} else if (srq_event) {
+		if (qp->srq->ibsrq.event_handler)
+			qp->srq->ibsrq.event_handler(&ib_evt,
+						     qp->srq->ibsrq.
+						     srq_context);
+	} else if (dev_event)
+		ib_dispatch_event(&ib_evt);
+
+}
+
+static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
+{
+	/* async CQE processing */
+	struct ocrdma_ae_mcqe *cqe = ae_cqe;
+	u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
+			OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
+
+	if (evt_code == OCRDMA_ASYNC_EVE_CODE)
+		ocrdma_dispatch_ibevent(dev, cqe);
+	else
+		ocrdma_err("%s(%d) invalid evt code=0x%x\n",
+			   __func__, dev->id, evt_code);
+}
+
+static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
+{
+	if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
+		dev->mqe_ctx.cqe_status = (cqe->status &
+		     OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
+		dev->mqe_ctx.ext_status =
+		    (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
+		    >> OCRDMA_MCQE_ESTATUS_SHIFT;
+		dev->mqe_ctx.cmd_done = true;
+		wake_up(&dev->mqe_ctx.cmd_wait);
+	} else
+		ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
+			   __func__, cqe->tag_lo, dev->mqe_ctx.tag);
+}
+
+static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
+{
+	u16 cqe_popped = 0;
+	struct ocrdma_mcqe *cqe;
+
+	while (1) {
+		cqe = ocrdma_get_mcqe(dev);
+		if (cqe == NULL)
+			break;
+		ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
+		cqe_popped += 1;
+		if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
+			ocrdma_process_acqe(dev, cqe);
+		else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
+			ocrdma_process_mcqe(dev, cqe);
+		else
+			ocrdma_err("%s() cqe->compl is not set.\n", __func__);
+		memset(cqe, 0, sizeof(struct ocrdma_mcqe));
+		ocrdma_mcq_inc_tail(dev);
+	}
+	ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
+	return 0;
+}
+
+static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+				       struct ocrdma_cq *cq)
+{
+	unsigned long flags;
+	struct ocrdma_qp *qp;
+	bool buddy_cq_found = false;
+	/* Go through list of QPs in error state which are using this CQ
+	 * and invoke its callback handler to trigger CQE processing for
+	 * error/flushed CQE. It is rare to find more than few entries in
+	 * this list as most consumers stops after getting error CQE.
+	 * List is traversed only once when a matching buddy cq found for a QP.
+	 */
+	spin_lock_irqsave(&dev->flush_q_lock, flags);
+	list_for_each_entry(qp, &cq->sq_head, sq_entry) {
+		if (qp->srq)
+			continue;
+		/* if wq and rq share the same cq, than comp_handler
+		 * is already invoked.
+		 */
+		if (qp->sq_cq == qp->rq_cq)
+			continue;
+		/* if completion came on sq, rq's cq is buddy cq.
+		 * if completion came on rq, sq's cq is buddy cq.
+		 */
+		if (qp->sq_cq == cq)
+			cq = qp->rq_cq;
+		else
+			cq = qp->sq_cq;
+		buddy_cq_found = true;
+		break;
+	}
+	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
+	if (buddy_cq_found == false)
+		return;
+	if (cq->ibcq.comp_handler) {
+		spin_lock_irqsave(&cq->comp_handler_lock, flags);
+		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+		spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+	}
+}
+
+static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
+{
+	unsigned long flags;
+	struct ocrdma_cq *cq;
+
+	if (cq_idx >= OCRDMA_MAX_CQ)
+		BUG();
+
+	cq = dev->cq_tbl[cq_idx];
+	if (cq == NULL) {
+		ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
+		return;
+	}
+	spin_lock_irqsave(&cq->cq_lock, flags);
+	cq->armed = false;
+	cq->solicited = false;
+	spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+	ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
+
+	if (cq->ibcq.comp_handler) {
+		spin_lock_irqsave(&cq->comp_handler_lock, flags);
+		(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+		spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+	}
+	ocrdma_qp_buddy_cq_handler(dev, cq);
+}
+
+static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
+{
+	/* process the MQ-CQE. */
+	if (cq_id == dev->mq.cq.id)
+		ocrdma_mq_cq_handler(dev, cq_id);
+	else
+		ocrdma_qp_cq_handler(dev, cq_id);
+}
+
+static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
+{
+	struct ocrdma_eq *eq = handle;
+	struct ocrdma_dev *dev = eq->dev;
+	struct ocrdma_eqe eqe;
+	struct ocrdma_eqe *ptr;
+	u16 eqe_popped = 0;
+	u16 cq_id;
+	while (1) {
+		ptr = ocrdma_get_eqe(eq);
+		eqe = *ptr;
+		ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
+		if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
+			break;
+		eqe_popped += 1;
+		ptr->id_valid = 0;
+		/* check whether its CQE or not. */
+		if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
+			cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
+			ocrdma_cq_handler(dev, cq_id);
+		}
+		ocrdma_eq_inc_tail(eq);
+	}
+	ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
+	/* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
+	if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
+		ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
+	return IRQ_HANDLED;
+}
+
+static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
+{
+	struct ocrdma_mqe *mqe;
+
+	dev->mqe_ctx.tag = dev->mq.sq.head;
+	dev->mqe_ctx.cmd_done = false;
+	mqe = ocrdma_get_mqe(dev);
+	cmd->hdr.tag_lo = dev->mq.sq.head;
+	ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
+	/* make sure descriptor is written before ringing doorbell */
+	wmb();
+	ocrdma_mq_inc_head(dev);
+	ocrdma_ring_mq_db(dev);
+}
+
+static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
+{
+	long status;
+	/* 30 sec timeout */
+	status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
+				    (dev->mqe_ctx.cmd_done != false),
+				    msecs_to_jiffies(30000));
+	if (status)
+		return 0;
+	else
+		return -1;
+}
+
+/* issue a mailbox command on the MQ */
+static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
+{
+	int status = 0;
+	u16 cqe_status, ext_status;
+	struct ocrdma_mqe *rsp;
+
+	mutex_lock(&dev->mqe_ctx.lock);
+	ocrdma_post_mqe(dev, mqe);
+	status = ocrdma_wait_mqe_cmpl(dev);
+	if (status)
+		goto mbx_err;
+	cqe_status = dev->mqe_ctx.cqe_status;
+	ext_status = dev->mqe_ctx.ext_status;
+	rsp = ocrdma_get_mqe_rsp(dev);
+	ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
+	if (cqe_status || ext_status) {
+		ocrdma_err
+		    ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
+		     __func__,
+		     (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+		     OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
+		status = ocrdma_get_mbx_cqe_errno(cqe_status);
+		goto mbx_err;
+	}
+	if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
+		status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
+mbx_err:
+	mutex_unlock(&dev->mqe_ctx.lock);
+	return status;
+}
+
+static void ocrdma_get_attr(struct ocrdma_dev *dev,
+			      struct ocrdma_dev_attr *attr,
+			      struct ocrdma_mbx_query_config *rsp)
+{
+	int max_q_mem;
+
+	attr->max_pd =
+	    (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+	attr->max_qp =
+	    (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
+	attr->max_send_sge = ((rsp->max_write_send_sge &
+			       OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
+			      OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
+	attr->max_recv_sge = (rsp->max_write_send_sge &
+			      OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+	attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
+				OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
+	attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
+				OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
+	attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
+				    OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
+	attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
+			       OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
+	attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
+				    OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
+	    OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
+	attr->max_mr = rsp->max_mr;
+	attr->max_mr_size = ~0ull;
+	attr->max_fmr = 0;
+	attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
+	attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
+	attr->max_cqe = rsp->max_cq_cqes_per_cq &
+			OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
+	attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
+		OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
+		OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
+		OCRDMA_WQE_STRIDE;
+	attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
+		OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
+		OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
+		OCRDMA_WQE_STRIDE;
+	attr->max_inline_data =
+	    attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
+			      sizeof(struct ocrdma_sge));
+	max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
+	/* hw can queue one less then the configured size,
+	 * so publish less by one to stack.
+	 */
+	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+		dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
+		attr->ird = 1;
+		attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
+		attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
+	} else
+		dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;
+	dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;
+}
+
+static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
+				   struct ocrdma_fw_conf_rsp *conf)
+{
+	u32 fn_mode;
+
+	fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
+	if (fn_mode != OCRDMA_FN_MODE_RDMA)
+		return -EINVAL;
+	dev->base_eqid = conf->base_eqid;
+	dev->max_eq = conf->max_eq;
+	dev->attr.max_cq = OCRDMA_MAX_CQ - 1;
+	return 0;
+}
+
+/* can be issued only during init time. */
+static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
+{
+	int status = -ENOMEM;
+	struct ocrdma_mqe *cmd;
+	struct ocrdma_fw_ver_rsp *rsp;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+	ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+			OCRDMA_CMD_GET_FW_VER,
+			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_fw_ver_rsp *)cmd;
+	memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
+	memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
+	       sizeof(rsp->running_ver));
+	ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+/* can be issued only during init time. */
+static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
+{
+	int status = -ENOMEM;
+	struct ocrdma_mqe *cmd;
+	struct ocrdma_fw_conf_rsp *rsp;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+	ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+			OCRDMA_CMD_GET_FW_CONFIG,
+			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_fw_conf_rsp *)cmd;
+	status = ocrdma_check_fw_config(dev, rsp);
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
+{
+	int status = -ENOMEM;
+	struct ocrdma_mbx_query_config *rsp;
+	struct ocrdma_mqe *cmd;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_mbx_query_config *)cmd;
+	ocrdma_get_attr(dev, &dev->attr, rsp);
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+	int status = -ENOMEM;
+	struct ocrdma_alloc_pd *cmd;
+	struct ocrdma_alloc_pd_rsp *rsp;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	if (pd->dpp_enabled)
+		cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
+	pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
+	if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
+		pd->dpp_enabled = true;
+		pd->dpp_page = rsp->dpp_page_pdid >>
+				OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+	} else {
+		pd->dpp_enabled = false;
+		pd->num_dpp_qp = 0;
+	}
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+	int status = -ENOMEM;
+	struct ocrdma_dealloc_pd *cmd;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	cmd->id = pd->id;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	kfree(cmd);
+	return status;
+}
+
+static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
+			       int *num_pages, int *page_size)
+{
+	int i;
+	int mem_size;
+
+	*num_entries = roundup_pow_of_two(*num_entries);
+	mem_size = *num_entries * entry_size;
+	/* find the possible lowest possible multiplier */
+	for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
+		if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
+			break;
+	}
+	if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
+		return -EINVAL;
+	mem_size = roundup(mem_size,
+		       ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
+	*num_pages =
+	    mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
+	*page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
+	*num_entries = mem_size / entry_size;
+	return 0;
+}
+
+static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
+{
+	int i ;
+	int status = 0;
+	int max_ah;
+	struct ocrdma_create_ah_tbl *cmd;
+	struct ocrdma_create_ah_tbl_rsp *rsp;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	dma_addr_t pa;
+	struct ocrdma_pbe *pbes;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
+	if (!cmd)
+		return status;
+
+	max_ah = OCRDMA_MAX_AH;
+	dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
+
+	/* number of PBEs in PBL */
+	cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
+				OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
+				OCRDMA_CREATE_AH_NUM_PAGES_MASK;
+
+	/* page size */
+	for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
+		if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
+			break;
+	}
+	cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
+				OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
+
+	/* ah_entry size */
+	cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
+				OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
+				OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
+
+	dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+						&dev->av_tbl.pbl.pa,
+						GFP_KERNEL);
+	if (dev->av_tbl.pbl.va == NULL)
+		goto mem_err;
+
+	dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
+					    &pa, GFP_KERNEL);
+	if (dev->av_tbl.va == NULL)
+		goto mem_err_ah;
+	dev->av_tbl.pa = pa;
+	dev->av_tbl.num_ah = max_ah;
+	memset(dev->av_tbl.va, 0, dev->av_tbl.size);
+
+	pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
+	for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
+		pbes[i].pa_lo = (u32) (pa & 0xffffffff);
+		pbes[i].pa_hi = (u32) upper_32_bits(pa);
+		pa += PAGE_SIZE;
+	}
+	cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
+	cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
+	dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
+	kfree(cmd);
+	return 0;
+
+mbx_err:
+	dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
+			  dev->av_tbl.pa);
+	dev->av_tbl.va = NULL;
+mem_err_ah:
+	dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
+			  dev->av_tbl.pbl.pa);
+	dev->av_tbl.pbl.va = NULL;
+	dev->av_tbl.size = 0;
+mem_err:
+	kfree(cmd);
+	return status;
+}
+
+static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
+{
+	struct ocrdma_delete_ah_tbl *cmd;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+
+	if (dev->av_tbl.va == NULL)
+		return;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
+	if (!cmd)
+		return;
+	cmd->ahid = dev->av_tbl.ahid;
+
+	ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
+			  dev->av_tbl.pa);
+	dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
+			  dev->av_tbl.pbl.pa);
+	kfree(cmd);
+}
+
+/* Multiple CQs uses the EQ. This routine returns least used
+ * EQ to associate with CQ. This will distributes the interrupt
+ * processing and CPU load to associated EQ, vector and so to that CPU.
+ */
+static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
+{
+	int i, selected_eq = 0, cq_cnt = 0;
+	u16 eq_id;
+
+	mutex_lock(&dev->dev_lock);
+	cq_cnt = dev->qp_eq_tbl[0].cq_cnt;
+	eq_id = dev->qp_eq_tbl[0].q.id;
+	/* find the EQ which is has the least number of
+	 * CQs associated with it.
+	 */
+	for (i = 0; i < dev->eq_cnt; i++) {
+		if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {
+			cq_cnt = dev->qp_eq_tbl[i].cq_cnt;
+			eq_id = dev->qp_eq_tbl[i].q.id;
+			selected_eq = i;
+		}
+	}
+	dev->qp_eq_tbl[selected_eq].cq_cnt += 1;
+	mutex_unlock(&dev->dev_lock);
+	return eq_id;
+}
+
+static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
+{
+	int i;
+
+	mutex_lock(&dev->dev_lock);
+	for (i = 0; i < dev->eq_cnt; i++) {
+		if (dev->qp_eq_tbl[i].q.id != eq_id)
+			continue;
+		dev->qp_eq_tbl[i].cq_cnt -= 1;
+		break;
+	}
+	mutex_unlock(&dev->dev_lock);
+}
+
+int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+			 int entries, int dpp_cq)
+{
+	int status = -ENOMEM; int max_hw_cqe;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	struct ocrdma_create_cq *cmd;
+	struct ocrdma_create_cq_rsp *rsp;
+	u32 hw_pages, cqe_size, page_size, cqe_count;
+
+	if (dpp_cq)
+		return -EINVAL;
+	if (entries > dev->attr.max_cqe) {
+		ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
+			   __func__, dev->id, dev->attr.max_cqe, entries);
+		return -EINVAL;
+	}
+	if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
+		return -EINVAL;
+
+	if (dpp_cq) {
+		cq->max_hw_cqe = 1;
+		max_hw_cqe = 1;
+		cqe_size = OCRDMA_DPP_CQE_SIZE;
+		hw_pages = 1;
+	} else {
+		cq->max_hw_cqe = dev->attr.max_cqe;
+		max_hw_cqe = dev->attr.max_cqe;
+		cqe_size = sizeof(struct ocrdma_cqe);
+		hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
+	}
+
+	cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+	ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
+			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+	cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+	if (!cq->va) {
+		status = -ENOMEM;
+		goto mem_err;
+	}
+	memset(cq->va, 0, cq->len);
+	page_size = cq->len / hw_pages;
+	cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+					OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
+	cmd->cmd.pgsz_pgcnt |= hw_pages;
+	cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
+
+	if (dev->eq_cnt < 0)
+		goto eq_err;
+	cq->eqn = ocrdma_bind_eq(dev);
+	cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+	cqe_count = cq->len / cqe_size;
+	if (cqe_count > 1024)
+		/* Set cnt to 3 to indicate more than 1024 cq entries */
+		cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
+	else {
+		u8 count = 0;
+		switch (cqe_count) {
+		case 256:
+			count = 0;
+			break;
+		case 512:
+			count = 1;
+			break;
+		case 1024:
+			count = 2;
+			break;
+		default:
+			goto mbx_err;
+		}
+		cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
+	}
+	/* shared eq between all the consumer cqs. */
+	cmd->cmd.eqn = cq->eqn;
+	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+		if (dpp_cq)
+			cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
+				OCRDMA_CREATE_CQ_TYPE_SHIFT;
+		cq->phase_change = false;
+		cmd->cmd.cqe_count = (cq->len / cqe_size);
+	} else {
+		cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;
+		cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
+		cq->phase_change = true;
+	}
+
+	ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+
+	rsp = (struct ocrdma_create_cq_rsp *)cmd;
+	cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
+	kfree(cmd);
+	return 0;
+mbx_err:
+	ocrdma_unbind_eq(dev, cq->eqn);
+eq_err:
+	dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
+mem_err:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
+{
+	int status = -ENOMEM;
+	struct ocrdma_destroy_cq *cmd;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
+			OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+	cmd->bypass_flush_qid |=
+	    (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
+	    OCRDMA_DESTROY_CQ_QID_MASK;
+
+	ocrdma_unbind_eq(dev, cq->eqn);
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
+			  u32 pdid, int addr_check)
+{
+	int status = -ENOMEM;
+	struct ocrdma_alloc_lkey *cmd;
+	struct ocrdma_alloc_lkey_rsp *rsp;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	cmd->pdid = pdid;
+	cmd->pbl_sz_flags |= addr_check;
+	cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
+	cmd->pbl_sz_flags |=
+	    (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
+	cmd->pbl_sz_flags |=
+	    (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
+	cmd->pbl_sz_flags |=
+	    (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
+	cmd->pbl_sz_flags |=
+	    (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
+	cmd->pbl_sz_flags |=
+	    (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
+
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
+	hwmr->lkey = rsp->lrkey;
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
+{
+	int status = -ENOMEM;
+	struct ocrdma_dealloc_lkey *cmd;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+	cmd->lkey = lkey;
+	cmd->rsvd_frmr = fr_mr ? 1 : 0;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
+			     u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
+{
+	int status = -ENOMEM;
+	int i;
+	struct ocrdma_reg_nsmr *cmd;
+	struct ocrdma_reg_nsmr_rsp *rsp;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+	cmd->num_pbl_pdid =
+	    pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
+
+	cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
+				    OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
+	cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
+				    OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
+	cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
+				    OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
+	cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
+				    OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
+	cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
+				    OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
+	cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
+
+	cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
+	cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
+					OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
+	cmd->totlen_low = hwmr->len;
+	cmd->totlen_high = upper_32_bits(hwmr->len);
+	cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
+	cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
+	cmd->va_loaddr = (u32) hwmr->va;
+	cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
+
+	for (i = 0; i < pbl_cnt; i++) {
+		cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
+		cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
+	}
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
+	hwmr->lkey = rsp->lrkey;
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
+				  struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
+				  u32 pbl_offset, u32 last)
+{
+	int status = -ENOMEM;
+	int i;
+	struct ocrdma_reg_nsmr_cont *cmd;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
+	if (!cmd)
+		return -ENOMEM;
+	cmd->lrkey = hwmr->lkey;
+	cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
+	    (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
+	cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
+
+	for (i = 0; i < pbl_cnt; i++) {
+		cmd->pbl[i].lo =
+		    (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
+		cmd->pbl[i].hi =
+		    upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
+	}
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_reg_mr(struct ocrdma_dev *dev,
+		  struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
+{
+	int status;
+	u32 last = 0;
+	u32 cur_pbl_cnt, pbl_offset;
+	u32 pending_pbl_cnt = hwmr->num_pbls;
+
+	pbl_offset = 0;
+	cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
+	if (cur_pbl_cnt == pending_pbl_cnt)
+		last = 1;
+
+	status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
+				   cur_pbl_cnt, hwmr->pbe_size, last);
+	if (status) {
+		ocrdma_err("%s() status=%d\n", __func__, status);
+		return status;
+	}
+	/* if there is no more pbls to register then exit. */
+	if (last)
+		return 0;
+
+	while (!last) {
+		pbl_offset += cur_pbl_cnt;
+		pending_pbl_cnt -= cur_pbl_cnt;
+		cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
+		/* if we reach the end of the pbls, then need to set the last
+		 * bit, indicating no more pbls to register for this memory key.
+		 */
+		if (cur_pbl_cnt == pending_pbl_cnt)
+			last = 1;
+
+		status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
+						pbl_offset, last);
+		if (status)
+			break;
+	}
+	if (status)
+		ocrdma_err("%s() err. status=%d\n", __func__, status);
+
+	return status;
+}
+
+bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
+{
+	struct ocrdma_qp *tmp;
+	bool found = false;
+	list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
+		if (qp == tmp) {
+			found = true;
+			break;
+		}
+	}
+	return found;
+}
+
+bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
+{
+	struct ocrdma_qp *tmp;
+	bool found = false;
+	list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
+		if (qp == tmp) {
+			found = true;
+			break;
+		}
+	}
+	return found;
+}
+
+void ocrdma_flush_qp(struct ocrdma_qp *qp)
+{
+	bool found;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
+	found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
+	if (!found)
+		list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
+	if (!qp->srq) {
+		found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
+		if (!found)
+			list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
+	}
+	spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
+}
+
+int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
+			    enum ib_qp_state *old_ib_state)
+{
+	unsigned long flags;
+	int status = 0;
+	enum ocrdma_qp_state new_state;
+	new_state = get_ocrdma_qp_state(new_ib_state);
+
+	/* sync with wqe and rqe posting */
+	spin_lock_irqsave(&qp->q_lock, flags);
+
+	if (old_ib_state)
+		*old_ib_state = get_ibqp_state(qp->state);
+	if (new_state == qp->state) {
+		spin_unlock_irqrestore(&qp->q_lock, flags);
+		return 1;
+	}
+
+	switch (qp->state) {
+	case OCRDMA_QPS_RST:
+		switch (new_state) {
+		case OCRDMA_QPS_RST:
+		case OCRDMA_QPS_INIT:
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case OCRDMA_QPS_INIT:
+		/* qps: INIT->XXX */
+		switch (new_state) {
+		case OCRDMA_QPS_INIT:
+		case OCRDMA_QPS_RTR:
+			break;
+		case OCRDMA_QPS_ERR:
+			ocrdma_flush_qp(qp);
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case OCRDMA_QPS_RTR:
+		/* qps: RTS->XXX */
+		switch (new_state) {
+		case OCRDMA_QPS_RTS:
+			break;
+		case OCRDMA_QPS_ERR:
+			ocrdma_flush_qp(qp);
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case OCRDMA_QPS_RTS:
+		/* qps: RTS->XXX */
+		switch (new_state) {
+		case OCRDMA_QPS_SQD:
+		case OCRDMA_QPS_SQE:
+			break;
+		case OCRDMA_QPS_ERR:
+			ocrdma_flush_qp(qp);
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case OCRDMA_QPS_SQD:
+		/* qps: SQD->XXX */
+		switch (new_state) {
+		case OCRDMA_QPS_RTS:
+		case OCRDMA_QPS_SQE:
+		case OCRDMA_QPS_ERR:
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case OCRDMA_QPS_SQE:
+		switch (new_state) {
+		case OCRDMA_QPS_RTS:
+		case OCRDMA_QPS_ERR:
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	case OCRDMA_QPS_ERR:
+		/* qps: ERR->XXX */
+		switch (new_state) {
+		case OCRDMA_QPS_RST:
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		};
+		break;
+	default:
+		status = -EINVAL;
+		break;
+	};
+	if (!status)
+		qp->state = new_state;
+
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+	return status;
+}
+
+static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
+{
+	u32 flags = 0;
+	if (qp->cap_flags & OCRDMA_QP_INB_RD)
+		flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
+	if (qp->cap_flags & OCRDMA_QP_INB_WR)
+		flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
+	if (qp->cap_flags & OCRDMA_QP_MW_BIND)
+		flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
+	if (qp->cap_flags & OCRDMA_QP_LKEY0)
+		flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
+	if (qp->cap_flags & OCRDMA_QP_FAST_REG)
+		flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
+	return flags;
+}
+
+static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
+					struct ib_qp_init_attr *attrs,
+					struct ocrdma_qp *qp)
+{
+	int status;
+	u32 len, hw_pages, hw_page_size;
+	dma_addr_t pa;
+	struct ocrdma_dev *dev = qp->dev;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	u32 max_wqe_allocated;
+	u32 max_sges = attrs->cap.max_send_sge;
+
+	max_wqe_allocated = attrs->cap.max_send_wr;
+	/* need to allocate one extra to for GEN1 family */
+	if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)
+		max_wqe_allocated += 1;
+
+	status = ocrdma_build_q_conf(&max_wqe_allocated,
+		dev->attr.wqe_size, &hw_pages, &hw_page_size);
+	if (status) {
+		ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__,
+			   max_wqe_allocated);
+		return -EINVAL;
+	}
+	qp->sq.max_cnt = max_wqe_allocated;
+	len = (hw_pages * hw_page_size);
+
+	qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+	if (!qp->sq.va)
+		return -EINVAL;
+	memset(qp->sq.va, 0, len);
+	qp->sq.len = len;
+	qp->sq.pa = pa;
+	qp->sq.entry_size = dev->attr.wqe_size;
+	ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
+
+	cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
+				<< OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
+	cmd->num_wq_rq_pages |= (hw_pages <<
+				 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
+	    OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
+	cmd->max_sge_send_write |= (max_sges <<
+				    OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
+	    OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
+	cmd->max_sge_send_write |= (max_sges <<
+				    OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
+					OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
+	cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
+			     OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
+	cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
+			      OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
+	return 0;
+}
+
+static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
+					struct ib_qp_init_attr *attrs,
+					struct ocrdma_qp *qp)
+{
+	int status;
+	u32 len, hw_pages, hw_page_size;
+	dma_addr_t pa = 0;
+	struct ocrdma_dev *dev = qp->dev;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
+
+	status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
+				     &hw_pages, &hw_page_size);
+	if (status) {
+		ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__,
+			   attrs->cap.max_recv_wr + 1);
+		return status;
+	}
+	qp->rq.max_cnt = max_rqe_allocated;
+	len = (hw_pages * hw_page_size);
+
+	qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+	if (!qp->rq.va)
+		return status;
+	memset(qp->rq.va, 0, len);
+	qp->rq.pa = pa;
+	qp->rq.len = len;
+	qp->rq.entry_size = dev->attr.rqe_size;
+
+	ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
+	cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+		OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
+	cmd->num_wq_rq_pages |=
+	    (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
+	    OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
+	cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
+				OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
+	cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
+				OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
+	cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
+			OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
+			OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
+	return 0;
+}
+
+static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
+					 struct ocrdma_pd *pd,
+					 struct ocrdma_qp *qp,
+					 u8 enable_dpp_cq, u16 dpp_cq_id)
+{
+	pd->num_dpp_qp--;
+	qp->dpp_enabled = true;
+	cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
+	if (!enable_dpp_cq)
+		return;
+	cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
+	cmd->dpp_credits_cqid = dpp_cq_id;
+	cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
+					OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
+}
+
+static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
+					struct ocrdma_qp *qp)
+{
+	struct ocrdma_dev *dev = qp->dev;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	dma_addr_t pa = 0;
+	int ird_page_size = dev->attr.ird_page_size;
+	int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
+
+	if (dev->attr.ird == 0)
+		return 0;
+
+	qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
+					&pa, GFP_KERNEL);
+	if (!qp->ird_q_va)
+		return -ENOMEM;
+	memset(qp->ird_q_va, 0, ird_q_len);
+	ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
+			     pa, ird_page_size);
+	return 0;
+}
+
+static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
+				     struct ocrdma_qp *qp,
+				     struct ib_qp_init_attr *attrs,
+				     u16 *dpp_offset, u16 *dpp_credit_lmt)
+{
+	u32 max_wqe_allocated, max_rqe_allocated;
+	qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
+	qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
+	qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
+	qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
+	qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
+	qp->dpp_enabled = false;
+	if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
+		qp->dpp_enabled = true;
+		*dpp_credit_lmt = (rsp->dpp_response &
+				OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
+				OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
+		*dpp_offset = (rsp->dpp_response &
+				OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
+				OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
+	}
+	max_wqe_allocated =
+		rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
+	max_wqe_allocated = 1 << max_wqe_allocated;
+	max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
+
+	if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+		qp->sq.free_delta = 0;
+		qp->rq.free_delta = 1;
+	} else
+		qp->sq.free_delta = 1;
+
+	qp->sq.max_cnt = max_wqe_allocated;
+	qp->sq.max_wqe_idx = max_wqe_allocated - 1;
+
+	if (!attrs->srq) {
+		qp->rq.max_cnt = max_rqe_allocated;
+		qp->rq.max_wqe_idx = max_rqe_allocated - 1;
+		qp->rq.free_delta = 1;
+	}
+}
+
+int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
+			 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
+			 u16 *dpp_credit_lmt)
+{
+	int status = -ENOMEM;
+	u32 flags = 0;
+	struct ocrdma_dev *dev = qp->dev;
+	struct ocrdma_pd *pd = qp->pd;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	struct ocrdma_cq *cq;
+	struct ocrdma_create_qp_req *cmd;
+	struct ocrdma_create_qp_rsp *rsp;
+	int qptype;
+
+	switch (attrs->qp_type) {
+	case IB_QPT_GSI:
+		qptype = OCRDMA_QPT_GSI;
+		break;
+	case IB_QPT_RC:
+		qptype = OCRDMA_QPT_RC;
+		break;
+	case IB_QPT_UD:
+		qptype = OCRDMA_QPT_UD;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
+						OCRDMA_CREATE_QP_REQ_QPT_MASK;
+	status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
+	if (status)
+		goto sq_err;
+
+	if (attrs->srq) {
+		struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
+		cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
+		cmd->rq_addr[0].lo = srq->id;
+		qp->srq = srq;
+	} else {
+		status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
+		if (status)
+			goto rq_err;
+	}
+
+	status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
+	if (status)
+		goto mbx_err;
+
+	cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
+
+	flags = ocrdma_set_create_qp_mbx_access_flags(qp);
+
+	cmd->max_sge_recv_flags |= flags;
+	cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
+			     OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
+	cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
+			     OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
+	cq = get_ocrdma_cq(attrs->send_cq);
+	cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
+	qp->sq_cq = cq;
+	cq = get_ocrdma_cq(attrs->recv_cq);
+	cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
+				OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
+	qp->rq_cq = cq;
+
+	if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
+	    (attrs->cap.max_inline_data <= dev->attr.max_inline_data))
+		ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
+					     dpp_cq_id);
+
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_create_qp_rsp *)cmd;
+	ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
+	qp->state = OCRDMA_QPS_RST;
+	kfree(cmd);
+	return 0;
+mbx_err:
+	if (qp->rq.va)
+		dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
+rq_err:
+	ocrdma_err("%s(%d) rq_err\n", __func__, dev->id);
+	dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
+sq_err:
+	ocrdma_err("%s(%d) sq_err\n", __func__, dev->id);
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
+			struct ocrdma_qp_params *param)
+{
+	int status = -ENOMEM;
+	struct ocrdma_query_qp *cmd;
+	struct ocrdma_query_qp_rsp *rsp;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	cmd->qp_id = qp->id;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_query_qp_rsp *)cmd;
+	memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
+			u8 *mac_addr)
+{
+	struct in6_addr in6;
+
+	memcpy(&in6, dgid, sizeof in6);
+	if (rdma_is_multicast_addr(&in6))
+		rdma_get_mcast_mac(&in6, mac_addr);
+	else if (rdma_link_local_addr(&in6))
+		rdma_get_ll_mac(&in6, mac_addr);
+	else {
+		ocrdma_err("%s() fail to resolve mac_addr.\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void ocrdma_set_av_params(struct ocrdma_qp *qp,
+				struct ocrdma_modify_qp *cmd,
+				struct ib_qp_attr *attrs)
+{
+	struct ib_ah_attr *ah_attr = &attrs->ah_attr;
+	union ib_gid sgid;
+	u32 vlan_id;
+	u8 mac_addr[6];
+	if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
+		return;
+	cmd->params.tclass_sq_psn |=
+	    (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
+	cmd->params.rnt_rc_sl_fl |=
+	    (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
+	cmd->params.hop_lmt_rq_psn |=
+	    (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
+	cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
+	memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
+	       sizeof(cmd->params.dgid));
+	ocrdma_query_gid(&qp->dev->ibdev, 1,
+			 ah_attr->grh.sgid_index, &sgid);
+	qp->sgid_idx = ah_attr->grh.sgid_index;
+	memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
+	ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
+	cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
+				(mac_addr[2] << 16) | (mac_addr[3] << 24);
+	/* convert them to LE format. */
+	ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
+	ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
+	cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
+	vlan_id = rdma_get_vlan_id(&sgid);
+	if (vlan_id && (vlan_id < 0x1000)) {
+		cmd->params.vlan_dmac_b4_to_b5 |=
+		    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
+		cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
+	}
+}
+
+static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
+				struct ocrdma_modify_qp *cmd,
+				struct ib_qp_attr *attrs, int attr_mask,
+				enum ib_qp_state old_qps)
+{
+	int status = 0;
+	struct net_device *netdev = qp->dev->nic_info.netdev;
+	int eth_mtu = iboe_get_mtu(netdev->mtu);
+
+	if (attr_mask & IB_QP_PKEY_INDEX) {
+		cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
+					    OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
+		cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
+	}
+	if (attr_mask & IB_QP_QKEY) {
+		qp->qkey = attrs->qkey;
+		cmd->params.qkey = attrs->qkey;
+		cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
+	}
+	if (attr_mask & IB_QP_AV)
+		ocrdma_set_av_params(qp, cmd, attrs);
+	else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
+		/* set the default mac address for UD, GSI QPs */
+		cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
+			(qp->dev->nic_info.mac_addr[1] << 8) |
+			(qp->dev->nic_info.mac_addr[2] << 16) |
+			(qp->dev->nic_info.mac_addr[3] << 24);
+		cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
+					(qp->dev->nic_info.mac_addr[5] << 8);
+	}
+	if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
+	    attrs->en_sqd_async_notify) {
+		cmd->params.max_sge_recv_flags |=
+			OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
+		cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
+	}
+	if (attr_mask & IB_QP_DEST_QPN) {
+		cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
+				OCRDMA_QP_PARAMS_DEST_QPN_MASK);
+		cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
+	}
+	if (attr_mask & IB_QP_PATH_MTU) {
+		if (ib_mtu_enum_to_int(eth_mtu) <
+		    ib_mtu_enum_to_int(attrs->path_mtu)) {
+			status = -EINVAL;
+			goto pmtu_err;
+		}
+		cmd->params.path_mtu_pkey_indx |=
+		    (ib_mtu_enum_to_int(attrs->path_mtu) <<
+		     OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
+		    OCRDMA_QP_PARAMS_PATH_MTU_MASK;
+		cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
+	}
+	if (attr_mask & IB_QP_TIMEOUT) {
+		cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
+		    OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
+		cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
+	}
+	if (attr_mask & IB_QP_RETRY_CNT) {
+		cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
+				      OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
+		    OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
+		cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
+	}
+	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+		cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
+				      OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
+		    OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
+		cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
+	}
+	if (attr_mask & IB_QP_RNR_RETRY) {
+		cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
+			OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
+			& OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
+		cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
+	}
+	if (attr_mask & IB_QP_SQ_PSN) {
+		cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
+		cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
+	}
+	if (attr_mask & IB_QP_RQ_PSN) {
+		cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
+		cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
+	}
+	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+		if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
+			status = -EINVAL;
+			goto pmtu_err;
+		}
+		qp->max_ord = attrs->max_rd_atomic;
+		cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
+	}
+	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+		if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
+			status = -EINVAL;
+			goto pmtu_err;
+		}
+		qp->max_ird = attrs->max_dest_rd_atomic;
+		cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
+	}
+	cmd->params.max_ord_ird = (qp->max_ord <<
+				OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
+				(qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
+pmtu_err:
+	return status;
+}
+
+int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
+			 struct ib_qp_attr *attrs, int attr_mask,
+			 enum ib_qp_state old_qps)
+{
+	int status = -ENOMEM;
+	struct ocrdma_modify_qp *cmd;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
+	if (!cmd)
+		return status;
+
+	cmd->params.id = qp->id;
+	cmd->flags = 0;
+	if (attr_mask & IB_QP_STATE) {
+		cmd->params.max_sge_recv_flags |=
+		    (get_ocrdma_qp_state(attrs->qp_state) <<
+		     OCRDMA_QP_PARAMS_STATE_SHIFT) &
+		    OCRDMA_QP_PARAMS_STATE_MASK;
+		cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
+	} else
+		cmd->params.max_sge_recv_flags |=
+		    (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
+		    OCRDMA_QP_PARAMS_STATE_MASK;
+	status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
+	if (status)
+		goto mbx_err;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+
+mbx_err:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
+{
+	int status = -ENOMEM;
+	struct ocrdma_destroy_qp *cmd;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	cmd->qp_id = qp->id;
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+
+mbx_err:
+	kfree(cmd);
+	if (qp->sq.va)
+		dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
+	if (!qp->srq && qp->rq.va)
+		dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
+	if (qp->dpp_enabled)
+		qp->pd->num_dpp_qp++;
+	return status;
+}
+
+int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
+			  struct ib_srq_init_attr *srq_attr,
+			  struct ocrdma_pd *pd)
+{
+	int status = -ENOMEM;
+	int hw_pages, hw_page_size;
+	int len;
+	struct ocrdma_create_srq_rsp *rsp;
+	struct ocrdma_create_srq *cmd;
+	dma_addr_t pa;
+	struct ocrdma_dev *dev = srq->dev;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	u32 max_rqe_allocated;
+
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+	if (!cmd)
+		return status;
+
+	cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
+	max_rqe_allocated = srq_attr->attr.max_wr + 1;
+	status = ocrdma_build_q_conf(&max_rqe_allocated,
+				dev->attr.rqe_size,
+				&hw_pages, &hw_page_size);
+	if (status) {
+		ocrdma_err("%s() req. max_wr=0x%x\n", __func__,
+			   srq_attr->attr.max_wr);
+		status = -EINVAL;
+		goto ret;
+	}
+	len = hw_pages * hw_page_size;
+	srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+	if (!srq->rq.va) {
+		status = -ENOMEM;
+		goto ret;
+	}
+	ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
+
+	srq->rq.entry_size = dev->attr.rqe_size;
+	srq->rq.pa = pa;
+	srq->rq.len = len;
+	srq->rq.max_cnt = max_rqe_allocated;
+
+	cmd->max_sge_rqe = ilog2(max_rqe_allocated);
+	cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
+				OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
+
+	cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
+		<< OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
+	cmd->pages_rqe_sz |= (dev->attr.rqe_size
+		<< OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
+		& OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
+	cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
+
+	status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+	if (status)
+		goto mbx_err;
+	rsp = (struct ocrdma_create_srq_rsp *)cmd;
+	srq->id = rsp->id;
+	srq->rq.dbid = rsp->id;
+	max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
+		OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
+		OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
+	max_rqe_allocated = (1 << max_rqe_allocated);
+	srq->rq.max_cnt = max_rqe_allocated;
+	srq->rq.max_wqe_idx = max_rqe_allocated - 1;
+	srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
+		OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
+		OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
+	goto ret;
+mbx_err:
+	dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
+ret:
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
+{
+	int status = -ENOMEM;
+	struct ocrdma_modify_srq *cmd;
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	cmd->id = srq->id;
+	cmd->limit_max_rqe |= srq_attr->srq_limit <<
+	    OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
+	status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
+{
+	int status = -ENOMEM;
+	struct ocrdma_query_srq *cmd;
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	cmd->id = srq->rq.dbid;
+	status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+	if (status == 0) {
+		struct ocrdma_query_srq_rsp *rsp =
+		    (struct ocrdma_query_srq_rsp *)cmd;
+		srq_attr->max_sge =
+		    rsp->srq_lmt_max_sge &
+		    OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
+		srq_attr->max_wr =
+		    rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
+		srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
+		    OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
+	}
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
+{
+	int status = -ENOMEM;
+	struct ocrdma_destroy_srq *cmd;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
+	if (!cmd)
+		return status;
+	cmd->id = srq->id;
+	status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+	if (srq->rq.va)
+		dma_free_coherent(&pdev->dev, srq->rq.len,
+				  srq->rq.va, srq->rq.pa);
+	kfree(cmd);
+	return status;
+}
+
+int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
+{
+	int i;
+	int status = -EINVAL;
+	struct ocrdma_av *av;
+	unsigned long flags;
+
+	av = dev->av_tbl.va;
+	spin_lock_irqsave(&dev->av_tbl.lock, flags);
+	for (i = 0; i < dev->av_tbl.num_ah; i++) {
+		if (av->valid == 0) {
+			av->valid = OCRDMA_AV_VALID;
+			ah->av = av;
+			ah->id = i;
+			status = 0;
+			break;
+		}
+		av++;
+	}
+	if (i == dev->av_tbl.num_ah)
+		status = -EAGAIN;
+	spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
+	return status;
+}
+
+int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&dev->av_tbl.lock, flags);
+	ah->av->valid = 0;
+	spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
+	return 0;
+}
+
+static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
+{
+	int status;
+	int irq;
+	unsigned long flags = 0;
+	int num_eq = 0;
+
+	if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
+		flags = IRQF_SHARED;
+	else {
+		num_eq = dev->nic_info.msix.num_vectors -
+				dev->nic_info.msix.start_vector;
+		/* minimum two vectors/eq are required for rdma to work.
+		 * one for control path and one for data path.
+		 */
+		if (num_eq < 2)
+			return -EBUSY;
+	}
+
+	status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);
+	if (status)
+		return status;
+	sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);
+	irq = ocrdma_get_irq(dev, &dev->meq);
+	status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,
+			     &dev->meq);
+	if (status)
+		_ocrdma_destroy_eq(dev, &dev->meq);
+	return status;
+}
+
+static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
+{
+	int num_eq, i, status = 0;
+	int irq;
+	unsigned long flags = 0;
+
+	num_eq = dev->nic_info.msix.num_vectors -
+			dev->nic_info.msix.start_vector;
+	if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
+		num_eq = 1;
+		flags = IRQF_SHARED;
+	} else
+		num_eq = min_t(u32, num_eq, num_online_cpus());
+	dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
+	if (!dev->qp_eq_tbl)
+		return -ENOMEM;
+
+	for (i = 0; i < num_eq; i++) {
+		status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],
+					  OCRDMA_EQ_LEN);
+		if (status) {
+			status = -EINVAL;
+			break;
+		}
+		sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",
+			dev->id, i);
+		irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);
+		status = request_irq(irq, ocrdma_irq_handler, flags,
+				     dev->qp_eq_tbl[i].irq_name,
+				     &dev->qp_eq_tbl[i]);
+		if (status) {
+			_ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
+			status = -EINVAL;
+			break;
+		}
+		dev->eq_cnt += 1;
+	}
+	/* one eq is sufficient for data path to work */
+	if (dev->eq_cnt >= 1)
+		return 0;
+	if (status)
+		ocrdma_destroy_qp_eqs(dev);
+	return status;
+}
+
+int ocrdma_init_hw(struct ocrdma_dev *dev)
+{
+	int status;
+	/* set up control path eq */
+	status = ocrdma_create_mq_eq(dev);
+	if (status)
+		return status;
+	/* set up data path eq */
+	status = ocrdma_create_qp_eqs(dev);
+	if (status)
+		goto qpeq_err;
+	status = ocrdma_create_mq(dev);
+	if (status)
+		goto mq_err;
+	status = ocrdma_mbx_query_fw_config(dev);
+	if (status)
+		goto conf_err;
+	status = ocrdma_mbx_query_dev(dev);
+	if (status)
+		goto conf_err;
+	status = ocrdma_mbx_query_fw_ver(dev);
+	if (status)
+		goto conf_err;
+	status = ocrdma_mbx_create_ah_tbl(dev);
+	if (status)
+		goto conf_err;
+	return 0;
+
+conf_err:
+	ocrdma_destroy_mq(dev);
+mq_err:
+	ocrdma_destroy_qp_eqs(dev);
+qpeq_err:
+	ocrdma_destroy_eq(dev, &dev->meq);
+	ocrdma_err("%s() status=%d\n", __func__, status);
+	return status;
+}
+
+void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
+{
+	ocrdma_mbx_delete_ah_tbl(dev);
+
+	/* cleanup the data path eqs */
+	ocrdma_destroy_qp_eqs(dev);
+
+	/* cleanup the control path */
+	ocrdma_destroy_mq(dev);
+	ocrdma_destroy_eq(dev, &dev->meq);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
new file mode 100644
index 0000000..be5db77
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -0,0 +1,132 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) CNA Adapters.              *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#ifndef __OCRDMA_HW_H__
+#define __OCRDMA_HW_H__
+
+#include "ocrdma_sli.h"
+
+static inline void ocrdma_cpu_to_le32(void *dst, u32 len)
+{
+#ifdef __BIG_ENDIAN
+	int i = 0;
+	u32 *src_ptr = dst;
+	u32 *dst_ptr = dst;
+	for (; i < (len / 4); i++)
+		*(dst_ptr + i) = cpu_to_le32p(src_ptr + i);
+#endif
+}
+
+static inline void ocrdma_le32_to_cpu(void *dst, u32 len)
+{
+#ifdef __BIG_ENDIAN
+	int i = 0;
+	u32 *src_ptr = dst;
+	u32 *dst_ptr = dst;
+	for (; i < (len / sizeof(u32)); i++)
+		*(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));
+#endif
+}
+
+static inline void ocrdma_copy_cpu_to_le32(void *dst, void *src, u32 len)
+{
+#ifdef __BIG_ENDIAN
+	int i = 0;
+	u32 *src_ptr = src;
+	u32 *dst_ptr = dst;
+	for (; i < (len / sizeof(u32)); i++)
+		*(dst_ptr + i) = cpu_to_le32p(src_ptr + i);
+#else
+	memcpy(dst, src, len);
+#endif
+}
+
+static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)
+{
+#ifdef __BIG_ENDIAN
+	int i = 0;
+	u32 *src_ptr = src;
+	u32 *dst_ptr = dst;
+	for (; i < len / sizeof(u32); i++)
+		*(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));
+#else
+	memcpy(dst, src, len);
+#endif
+}
+
+int ocrdma_init_hw(struct ocrdma_dev *);
+void ocrdma_cleanup_hw(struct ocrdma_dev *);
+
+enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps);
+void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
+		       bool solicited, u16 cqe_popped);
+
+/* verbs specific mailbox commands */
+int ocrdma_query_config(struct ocrdma_dev *,
+			struct ocrdma_mbx_query_config *config);
+int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
+
+int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
+int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
+
+int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
+			  u32 pd_id, int addr_check);
+int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
+
+int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
+			u32 pd_id, int acc);
+int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
+				int entries, int dpp_cq);
+int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
+
+int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
+			 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
+			 u16 *dpp_credit_lmt);
+int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
+			 struct ib_qp_attr *attrs, int attr_mask,
+			 enum ib_qp_state old_qps);
+int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
+			struct ocrdma_qp_params *param);
+int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
+
+int ocrdma_mbx_create_srq(struct ocrdma_srq *,
+			  struct ib_srq_init_attr *,
+			  struct ocrdma_pd *);
+int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
+int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);
+int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
+
+int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
+int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
+
+int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state,
+			    enum ib_qp_state *old_ib_state);
+bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
+bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
+void ocrdma_flush_qp(struct ocrdma_qp *);
+
+#endif				/* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
new file mode 100644
index 0000000..a20d16e
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -0,0 +1,577 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) adapters.                   *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/idr.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include <linux/netdevice.h>
+#include <net/addrconf.h>
+
+#include "ocrdma.h"
+#include "ocrdma_verbs.h"
+#include "ocrdma_ah.h"
+#include "be_roce.h"
+#include "ocrdma_hw.h"
+
+MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
+MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
+MODULE_AUTHOR("Emulex Corporation");
+MODULE_LICENSE("GPL");
+
+static LIST_HEAD(ocrdma_dev_list);
+static DEFINE_SPINLOCK(ocrdma_devlist_lock);
+static DEFINE_IDR(ocrdma_dev_id);
+
+static union ib_gid ocrdma_zero_sgid;
+
+static int ocrdma_get_instance(void)
+{
+	int instance = 0;
+
+	/* Assign an unused number */
+	if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))
+		return -1;
+	if (idr_get_new(&ocrdma_dev_id, NULL, &instance))
+		return -1;
+	return instance;
+}
+
+void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
+{
+	u8 mac_addr[6];
+
+	memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
+	guid[0] = mac_addr[0] ^ 2;
+	guid[1] = mac_addr[1];
+	guid[2] = mac_addr[2];
+	guid[3] = 0xff;
+	guid[4] = 0xfe;
+	guid[5] = mac_addr[3];
+	guid[6] = mac_addr[4];
+	guid[7] = mac_addr[5];
+}
+
+static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
+				  bool is_vlan, u16 vlan_id)
+{
+	sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+	sgid->raw[8] = mac_addr[0] ^ 2;
+	sgid->raw[9] = mac_addr[1];
+	sgid->raw[10] = mac_addr[2];
+	if (is_vlan) {
+		sgid->raw[11] = vlan_id >> 8;
+		sgid->raw[12] = vlan_id & 0xff;
+	} else {
+		sgid->raw[11] = 0xff;
+		sgid->raw[12] = 0xfe;
+	}
+	sgid->raw[13] = mac_addr[3];
+	sgid->raw[14] = mac_addr[4];
+	sgid->raw[15] = mac_addr[5];
+}
+
+static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+			    bool is_vlan, u16 vlan_id)
+{
+	int i;
+	bool found = false;
+	union ib_gid new_sgid;
+	int free_idx = OCRDMA_MAX_SGID;
+	unsigned long flags;
+
+	memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
+
+	ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id);
+
+	spin_lock_irqsave(&dev->sgid_lock, flags);
+	for (i = 0; i < OCRDMA_MAX_SGID; i++) {
+		if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
+			    sizeof(union ib_gid))) {
+			/* found free entry */
+			if (!found) {
+				free_idx = i;
+				found = true;
+				break;
+			}
+		} else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
+				   sizeof(union ib_gid))) {
+			/* entry already present, no addition is required. */
+			spin_unlock_irqrestore(&dev->sgid_lock, flags);
+			return;
+		}
+	}
+	/* if entry doesn't exist and if table has some space, add entry */
+	if (found)
+		memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
+		       sizeof(union ib_gid));
+	spin_unlock_irqrestore(&dev->sgid_lock, flags);
+}
+
+static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+			    bool is_vlan, u16 vlan_id)
+{
+	int found = false;
+	int i;
+	union ib_gid sgid;
+	unsigned long flags;
+
+	ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id);
+
+	spin_lock_irqsave(&dev->sgid_lock, flags);
+	/* first is default sgid, which cannot be deleted. */
+	for (i = 1; i < OCRDMA_MAX_SGID; i++) {
+		if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) {
+			/* found matching entry */
+			memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid));
+			found = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->sgid_lock, flags);
+	return found;
+}
+
+static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
+{
+	/* GID Index 0 - Invariant manufacturer-assigned EUI-64 */
+	union ib_gid *sgid = &dev->sgid_tbl[0];
+
+	sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+	ocrdma_get_guid(dev, &sgid->raw[8]);
+}
+
+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+{
+	struct net_device *netdev, *tmp;
+	u16 vlan_id;
+	bool is_vlan;
+
+	netdev = dev->nic_info.netdev;
+
+	ocrdma_add_default_sgid(dev);
+
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, tmp) {
+		if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
+			if (!netif_running(tmp) || !netif_oper_up(tmp))
+				continue;
+			if (netdev != tmp) {
+				vlan_id = vlan_dev_vlan_id(tmp);
+				is_vlan = true;
+			} else {
+				is_vlan = false;
+				vlan_id = 0;
+				tmp = netdev;
+			}
+			ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id);
+		}
+	}
+	rcu_read_unlock();
+	return 0;
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+static int ocrdma_inet6addr_event(struct notifier_block *notifier,
+				  unsigned long event, void *ptr)
+{
+	struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+	struct net_device *event_netdev = ifa->idev->dev;
+	struct net_device *netdev = NULL;
+	struct ib_event gid_event;
+	struct ocrdma_dev *dev;
+	bool found = false;
+	bool is_vlan = false;
+	u16 vid = 0;
+
+	netdev = vlan_dev_real_dev(event_netdev);
+	if (netdev != event_netdev) {
+		is_vlan = true;
+		vid = vlan_dev_vlan_id(event_netdev);
+	}
+	rcu_read_lock();
+	list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
+		if (dev->nic_info.netdev == netdev) {
+			found = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	if (!found)
+		return NOTIFY_DONE;
+	if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr))
+		return NOTIFY_DONE;
+
+	mutex_lock(&dev->dev_lock);
+	switch (event) {
+	case NETDEV_UP:
+		ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+		break;
+	case NETDEV_DOWN:
+		found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
+		if (found) {
+			/* found the matching entry, notify
+			 * the consumers about it
+			 */
+			gid_event.device = &dev->ibdev;
+			gid_event.element.port_num = 1;
+			gid_event.event = IB_EVENT_GID_CHANGE;
+			ib_dispatch_event(&gid_event);
+		}
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&dev->dev_lock);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block ocrdma_inet6addr_notifier = {
+	.notifier_call = ocrdma_inet6addr_event
+};
+
+#endif /* IPV6 */
+
+static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
+					      u8 port_num)
+{
+	return IB_LINK_LAYER_ETHERNET;
+}
+
+static int ocrdma_register_device(struct ocrdma_dev *dev)
+{
+	strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
+	ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
+	memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
+	       sizeof(OCRDMA_NODE_DESC));
+	dev->ibdev.owner = THIS_MODULE;
+	dev->ibdev.uverbs_cmd_mask =
+	    OCRDMA_UVERBS(GET_CONTEXT) |
+	    OCRDMA_UVERBS(QUERY_DEVICE) |
+	    OCRDMA_UVERBS(QUERY_PORT) |
+	    OCRDMA_UVERBS(ALLOC_PD) |
+	    OCRDMA_UVERBS(DEALLOC_PD) |
+	    OCRDMA_UVERBS(REG_MR) |
+	    OCRDMA_UVERBS(DEREG_MR) |
+	    OCRDMA_UVERBS(CREATE_COMP_CHANNEL) |
+	    OCRDMA_UVERBS(CREATE_CQ) |
+	    OCRDMA_UVERBS(RESIZE_CQ) |
+	    OCRDMA_UVERBS(DESTROY_CQ) |
+	    OCRDMA_UVERBS(REQ_NOTIFY_CQ) |
+	    OCRDMA_UVERBS(CREATE_QP) |
+	    OCRDMA_UVERBS(MODIFY_QP) |
+	    OCRDMA_UVERBS(QUERY_QP) |
+	    OCRDMA_UVERBS(DESTROY_QP) |
+	    OCRDMA_UVERBS(POLL_CQ) |
+	    OCRDMA_UVERBS(POST_SEND) |
+	    OCRDMA_UVERBS(POST_RECV);
+
+	dev->ibdev.uverbs_cmd_mask |=
+	    OCRDMA_UVERBS(CREATE_AH) |
+	     OCRDMA_UVERBS(MODIFY_AH) |
+	     OCRDMA_UVERBS(QUERY_AH) |
+	     OCRDMA_UVERBS(DESTROY_AH);
+
+	dev->ibdev.node_type = RDMA_NODE_IB_CA;
+	dev->ibdev.phys_port_cnt = 1;
+	dev->ibdev.num_comp_vectors = 1;
+
+	/* mandatory verbs. */
+	dev->ibdev.query_device = ocrdma_query_device;
+	dev->ibdev.query_port = ocrdma_query_port;
+	dev->ibdev.modify_port = ocrdma_modify_port;
+	dev->ibdev.query_gid = ocrdma_query_gid;
+	dev->ibdev.get_link_layer = ocrdma_link_layer;
+	dev->ibdev.alloc_pd = ocrdma_alloc_pd;
+	dev->ibdev.dealloc_pd = ocrdma_dealloc_pd;
+
+	dev->ibdev.create_cq = ocrdma_create_cq;
+	dev->ibdev.destroy_cq = ocrdma_destroy_cq;
+	dev->ibdev.resize_cq = ocrdma_resize_cq;
+
+	dev->ibdev.create_qp = ocrdma_create_qp;
+	dev->ibdev.modify_qp = ocrdma_modify_qp;
+	dev->ibdev.query_qp = ocrdma_query_qp;
+	dev->ibdev.destroy_qp = ocrdma_destroy_qp;
+
+	dev->ibdev.query_pkey = ocrdma_query_pkey;
+	dev->ibdev.create_ah = ocrdma_create_ah;
+	dev->ibdev.destroy_ah = ocrdma_destroy_ah;
+	dev->ibdev.query_ah = ocrdma_query_ah;
+	dev->ibdev.modify_ah = ocrdma_modify_ah;
+
+	dev->ibdev.poll_cq = ocrdma_poll_cq;
+	dev->ibdev.post_send = ocrdma_post_send;
+	dev->ibdev.post_recv = ocrdma_post_recv;
+	dev->ibdev.req_notify_cq = ocrdma_arm_cq;
+
+	dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
+	dev->ibdev.dereg_mr = ocrdma_dereg_mr;
+	dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
+
+	/* mandatory to support user space verbs consumer. */
+	dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
+	dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
+	dev->ibdev.mmap = ocrdma_mmap;
+	dev->ibdev.dma_device = &dev->nic_info.pdev->dev;
+
+	dev->ibdev.process_mad = ocrdma_process_mad;
+
+	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+		dev->ibdev.uverbs_cmd_mask |=
+		     OCRDMA_UVERBS(CREATE_SRQ) |
+		     OCRDMA_UVERBS(MODIFY_SRQ) |
+		     OCRDMA_UVERBS(QUERY_SRQ) |
+		     OCRDMA_UVERBS(DESTROY_SRQ) |
+		     OCRDMA_UVERBS(POST_SRQ_RECV);
+
+		dev->ibdev.create_srq = ocrdma_create_srq;
+		dev->ibdev.modify_srq = ocrdma_modify_srq;
+		dev->ibdev.query_srq = ocrdma_query_srq;
+		dev->ibdev.destroy_srq = ocrdma_destroy_srq;
+		dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;
+	}
+	return ib_register_device(&dev->ibdev, NULL);
+}
+
+static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
+{
+	mutex_init(&dev->dev_lock);
+	dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
+				OCRDMA_MAX_SGID, GFP_KERNEL);
+	if (!dev->sgid_tbl)
+		goto alloc_err;
+	spin_lock_init(&dev->sgid_lock);
+
+	dev->cq_tbl = kzalloc(sizeof(struct ocrdma_cq *) *
+			      OCRDMA_MAX_CQ, GFP_KERNEL);
+	if (!dev->cq_tbl)
+		goto alloc_err;
+
+	if (dev->attr.max_qp) {
+		dev->qp_tbl = kzalloc(sizeof(struct ocrdma_qp *) *
+				      OCRDMA_MAX_QP, GFP_KERNEL);
+		if (!dev->qp_tbl)
+			goto alloc_err;
+	}
+	spin_lock_init(&dev->av_tbl.lock);
+	spin_lock_init(&dev->flush_q_lock);
+	return 0;
+alloc_err:
+	ocrdma_err("%s(%d) error.\n", __func__, dev->id);
+	return -ENOMEM;
+}
+
+static void ocrdma_free_resources(struct ocrdma_dev *dev)
+{
+	kfree(dev->qp_tbl);
+	kfree(dev->cq_tbl);
+	kfree(dev->sgid_tbl);
+}
+
+static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
+{
+	int status = 0;
+	struct ocrdma_dev *dev;
+
+	dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
+	if (!dev) {
+		ocrdma_err("Unable to allocate ib device\n");
+		return NULL;
+	}
+	dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
+	if (!dev->mbx_cmd)
+		goto idr_err;
+
+	memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
+	dev->id = ocrdma_get_instance();
+	if (dev->id < 0)
+		goto idr_err;
+
+	status = ocrdma_init_hw(dev);
+	if (status)
+		goto init_err;
+
+	status = ocrdma_alloc_resources(dev);
+	if (status)
+		goto alloc_err;
+
+	status = ocrdma_build_sgid_tbl(dev);
+	if (status)
+		goto alloc_err;
+
+	status = ocrdma_register_device(dev);
+	if (status)
+		goto alloc_err;
+
+	spin_lock(&ocrdma_devlist_lock);
+	list_add_tail_rcu(&dev->entry, &ocrdma_dev_list);
+	spin_unlock(&ocrdma_devlist_lock);
+	return dev;
+
+alloc_err:
+	ocrdma_free_resources(dev);
+	ocrdma_cleanup_hw(dev);
+init_err:
+	idr_remove(&ocrdma_dev_id, dev->id);
+idr_err:
+	kfree(dev->mbx_cmd);
+	ib_dealloc_device(&dev->ibdev);
+	ocrdma_err("%s() leaving. ret=%d\n", __func__, status);
+	return NULL;
+}
+
+static void ocrdma_remove_free(struct rcu_head *rcu)
+{
+	struct ocrdma_dev *dev = container_of(rcu, struct ocrdma_dev, rcu);
+
+	ocrdma_free_resources(dev);
+	ocrdma_cleanup_hw(dev);
+
+	idr_remove(&ocrdma_dev_id, dev->id);
+	kfree(dev->mbx_cmd);
+	ib_dealloc_device(&dev->ibdev);
+}
+
+static void ocrdma_remove(struct ocrdma_dev *dev)
+{
+	/* first unregister with stack to stop all the active traffic
+	 * of the registered clients.
+	 */
+	ib_unregister_device(&dev->ibdev);
+
+	spin_lock(&ocrdma_devlist_lock);
+	list_del_rcu(&dev->entry);
+	spin_unlock(&ocrdma_devlist_lock);
+	call_rcu(&dev->rcu, ocrdma_remove_free);
+}
+
+static int ocrdma_open(struct ocrdma_dev *dev)
+{
+	struct ib_event port_event;
+
+	port_event.event = IB_EVENT_PORT_ACTIVE;
+	port_event.element.port_num = 1;
+	port_event.device = &dev->ibdev;
+	ib_dispatch_event(&port_event);
+	return 0;
+}
+
+static int ocrdma_close(struct ocrdma_dev *dev)
+{
+	int i;
+	struct ocrdma_qp *qp, **cur_qp;
+	struct ib_event err_event;
+	struct ib_qp_attr attrs;
+	int attr_mask = IB_QP_STATE;
+
+	attrs.qp_state = IB_QPS_ERR;
+	mutex_lock(&dev->dev_lock);
+	if (dev->qp_tbl) {
+		cur_qp = dev->qp_tbl;
+		for (i = 0; i < OCRDMA_MAX_QP; i++) {
+			qp = cur_qp[i];
+			if (qp) {
+				/* change the QP state to ERROR */
+				_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
+
+				err_event.event = IB_EVENT_QP_FATAL;
+				err_event.element.qp = &qp->ibqp;
+				err_event.device = &dev->ibdev;
+				ib_dispatch_event(&err_event);
+			}
+		}
+	}
+	mutex_unlock(&dev->dev_lock);
+
+	err_event.event = IB_EVENT_PORT_ERR;
+	err_event.element.port_num = 1;
+	err_event.device = &dev->ibdev;
+	ib_dispatch_event(&err_event);
+	return 0;
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
+{
+	switch (event) {
+	case BE_DEV_UP:
+		ocrdma_open(dev);
+		break;
+	case BE_DEV_DOWN:
+		ocrdma_close(dev);
+		break;
+	};
+}
+
+static struct ocrdma_driver ocrdma_drv = {
+	.name			= "ocrdma_driver",
+	.add			= ocrdma_add,
+	.remove			= ocrdma_remove,
+	.state_change_handler	= ocrdma_event_handler,
+};
+
+static void ocrdma_unregister_inet6addr_notifier(void)
+{
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);
+#endif
+}
+
+static int __init ocrdma_init_module(void)
+{
+	int status;
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+	status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
+	if (status)
+		return status;
+#endif
+
+	status = be_roce_register_driver(&ocrdma_drv);
+	if (status)
+		ocrdma_unregister_inet6addr_notifier();
+
+	return status;
+}
+
+static void __exit ocrdma_exit_module(void)
+{
+	be_roce_unregister_driver(&ocrdma_drv);
+	ocrdma_unregister_inet6addr_notifier();
+}
+
+module_init(ocrdma_init_module);
+module_exit(ocrdma_exit_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
new file mode 100644
index 0000000..7fd80cc
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -0,0 +1,1672 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) adapters.                   *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#ifndef __OCRDMA_SLI_H__
+#define __OCRDMA_SLI_H__
+
+#define Bit(_b) (1 << (_b))
+
+#define OCRDMA_GEN1_FAMILY	0xB
+#define OCRDMA_GEN2_FAMILY	0x2
+
+#define OCRDMA_SUBSYS_ROCE 10
+enum {
+	OCRDMA_CMD_QUERY_CONFIG = 1,
+	OCRDMA_CMD_ALLOC_PD,
+	OCRDMA_CMD_DEALLOC_PD,
+
+	OCRDMA_CMD_CREATE_AH_TBL,
+	OCRDMA_CMD_DELETE_AH_TBL,
+
+	OCRDMA_CMD_CREATE_QP,
+	OCRDMA_CMD_QUERY_QP,
+	OCRDMA_CMD_MODIFY_QP,
+	OCRDMA_CMD_DELETE_QP,
+
+	OCRDMA_CMD_RSVD1,
+	OCRDMA_CMD_ALLOC_LKEY,
+	OCRDMA_CMD_DEALLOC_LKEY,
+	OCRDMA_CMD_REGISTER_NSMR,
+	OCRDMA_CMD_REREGISTER_NSMR,
+	OCRDMA_CMD_REGISTER_NSMR_CONT,
+	OCRDMA_CMD_QUERY_NSMR,
+	OCRDMA_CMD_ALLOC_MW,
+	OCRDMA_CMD_QUERY_MW,
+
+	OCRDMA_CMD_CREATE_SRQ,
+	OCRDMA_CMD_QUERY_SRQ,
+	OCRDMA_CMD_MODIFY_SRQ,
+	OCRDMA_CMD_DELETE_SRQ,
+
+	OCRDMA_CMD_ATTACH_MCAST,
+	OCRDMA_CMD_DETACH_MCAST,
+
+	OCRDMA_CMD_MAX
+};
+
+#define OCRDMA_SUBSYS_COMMON 1
+enum {
+	OCRDMA_CMD_CREATE_CQ		= 12,
+	OCRDMA_CMD_CREATE_EQ		= 13,
+	OCRDMA_CMD_CREATE_MQ		= 21,
+	OCRDMA_CMD_GET_FW_VER		= 35,
+	OCRDMA_CMD_DELETE_MQ		= 53,
+	OCRDMA_CMD_DELETE_CQ		= 54,
+	OCRDMA_CMD_DELETE_EQ		= 55,
+	OCRDMA_CMD_GET_FW_CONFIG	= 58,
+	OCRDMA_CMD_CREATE_MQ_EXT	= 90
+};
+
+enum {
+	QTYPE_EQ	= 1,
+	QTYPE_CQ	= 2,
+	QTYPE_MCCQ	= 3
+};
+
+#define OCRDMA_MAX_SGID (8)
+
+#define OCRDMA_MAX_QP    2048
+#define OCRDMA_MAX_CQ    2048
+
+enum {
+	OCRDMA_DB_RQ_OFFSET		= 0xE0,
+	OCRDMA_DB_GEN2_RQ1_OFFSET	= 0x100,
+	OCRDMA_DB_GEN2_RQ2_OFFSET	= 0xC0,
+	OCRDMA_DB_SQ_OFFSET		= 0x60,
+	OCRDMA_DB_GEN2_SQ_OFFSET	= 0x1C0,
+	OCRDMA_DB_SRQ_OFFSET		= OCRDMA_DB_RQ_OFFSET,
+	OCRDMA_DB_GEN2_SRQ_OFFSET	= OCRDMA_DB_GEN2_RQ1_OFFSET,
+	OCRDMA_DB_CQ_OFFSET		= 0x120,
+	OCRDMA_DB_EQ_OFFSET		= OCRDMA_DB_CQ_OFFSET,
+	OCRDMA_DB_MQ_OFFSET		= 0x140
+};
+
+#define OCRDMA_DB_CQ_RING_ID_MASK       0x3FF	/* bits 0 - 9 */
+#define OCRDMA_DB_CQ_RING_ID_EXT_MASK  0x0C00	/* bits 10-11 of qid at 12-11 */
+/* qid #2 msbits at 12-11 */
+#define OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT  0x1
+#define OCRDMA_DB_CQ_NUM_POPPED_SHIFT       (16)	/* bits 16 - 28 */
+/* Rearm bit */
+#define OCRDMA_DB_CQ_REARM_SHIFT        (29)	/* bit 29 */
+/* solicited bit */
+#define OCRDMA_DB_CQ_SOLICIT_SHIFT   (31)	/* bit 31 */
+
+#define OCRDMA_EQ_ID_MASK		0x1FF	/* bits 0 - 8 */
+#define OCRDMA_EQ_ID_EXT_MASK		0x3e00	/* bits 9-13 */
+#define OCRDMA_EQ_ID_EXT_MASK_SHIFT	(2)	/* qid bits 9-13 at 11-15 */
+
+/* Clear the interrupt for this eq */
+#define OCRDMA_EQ_CLR_SHIFT			(9)	/* bit 9 */
+/* Must be 1 */
+#define OCRDMA_EQ_TYPE_SHIFT		(10)	/* bit 10 */
+/* Number of event entries processed */
+#define OCRDMA_NUM_EQE_SHIFT		(16)	/* bits 16 - 28 */
+/* Rearm bit */
+#define OCRDMA_REARM_SHIFT		(29)	/* bit 29 */
+
+#define OCRDMA_MQ_ID_MASK		0x7FF	/* bits 0 - 10 */
+/* Number of entries posted */
+#define OCRDMA_MQ_NUM_MQE_SHIFT	(16)	/* bits 16 - 29 */
+
+#define OCRDMA_MIN_HPAGE_SIZE (4096)
+
+#define OCRDMA_MIN_Q_PAGE_SIZE (4096)
+#define OCRDMA_MAX_Q_PAGES     (8)
+
+/*
+# 0: 4K Bytes
+# 1: 8K Bytes
+# 2: 16K Bytes
+# 3: 32K Bytes
+# 4: 64K Bytes
+*/
+#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (5)
+#define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES)
+
+#define MAX_OCRDMA_QP_PAGES      (8)
+#define OCRDMA_MAX_WQE_MEM_SIZE (MAX_OCRDMA_QP_PAGES * OCRDMA_MIN_HQ_PAGE_SIZE)
+
+#define OCRDMA_CREATE_CQ_MAX_PAGES (4)
+#define OCRDMA_DPP_CQE_SIZE (4)
+
+#define OCRDMA_GEN2_MAX_CQE 1024
+#define OCRDMA_GEN2_CQ_PAGE_SIZE 4096
+#define OCRDMA_GEN2_WQE_SIZE 256
+#define OCRDMA_MAX_CQE  4095
+#define OCRDMA_CQ_PAGE_SIZE 16384
+#define OCRDMA_WQE_SIZE 128
+#define OCRDMA_WQE_STRIDE 8
+#define OCRDMA_WQE_ALIGN_BYTES 16
+
+#define MAX_OCRDMA_SRQ_PAGES MAX_OCRDMA_QP_PAGES
+
+enum {
+	OCRDMA_MCH_OPCODE_SHIFT	= 0,
+	OCRDMA_MCH_OPCODE_MASK	= 0xFF,
+	OCRDMA_MCH_SUBSYS_SHIFT	= 8,
+	OCRDMA_MCH_SUBSYS_MASK	= 0xFF00
+};
+
+/* mailbox cmd header */
+struct ocrdma_mbx_hdr {
+	u32 subsys_op;
+	u32 timeout;		/* in seconds */
+	u32 cmd_len;
+	u32 rsvd_version;
+} __packed;
+
+enum {
+	OCRDMA_MBX_RSP_OPCODE_SHIFT	= 0,
+	OCRDMA_MBX_RSP_OPCODE_MASK	= 0xFF,
+	OCRDMA_MBX_RSP_SUBSYS_SHIFT	= 8,
+	OCRDMA_MBX_RSP_SUBSYS_MASK	= 0xFF << OCRDMA_MBX_RSP_SUBSYS_SHIFT,
+
+	OCRDMA_MBX_RSP_STATUS_SHIFT	= 0,
+	OCRDMA_MBX_RSP_STATUS_MASK	= 0xFF,
+	OCRDMA_MBX_RSP_ASTATUS_SHIFT	= 8,
+	OCRDMA_MBX_RSP_ASTATUS_MASK	= 0xFF << OCRDMA_MBX_RSP_ASTATUS_SHIFT
+};
+
+/* mailbox cmd response */
+struct ocrdma_mbx_rsp {
+	u32 subsys_op;
+	u32 status;
+	u32 rsp_len;
+	u32 add_rsp_len;
+} __packed;
+
+enum {
+	OCRDMA_MQE_EMBEDDED	= 1,
+	OCRDMA_MQE_NONEMBEDDED	= 0
+};
+
+struct ocrdma_mqe_sge {
+	u32 pa_lo;
+	u32 pa_hi;
+	u32 len;
+} __packed;
+
+enum {
+	OCRDMA_MQE_HDR_EMB_SHIFT	= 0,
+	OCRDMA_MQE_HDR_EMB_MASK		= Bit(0),
+	OCRDMA_MQE_HDR_SGE_CNT_SHIFT	= 3,
+	OCRDMA_MQE_HDR_SGE_CNT_MASK	= 0x1F << OCRDMA_MQE_HDR_SGE_CNT_SHIFT,
+	OCRDMA_MQE_HDR_SPECIAL_SHIFT	= 24,
+	OCRDMA_MQE_HDR_SPECIAL_MASK	= 0xFF << OCRDMA_MQE_HDR_SPECIAL_SHIFT
+};
+
+struct ocrdma_mqe_hdr {
+	u32 spcl_sge_cnt_emb;
+	u32 pyld_len;
+	u32 tag_lo;
+	u32 tag_hi;
+	u32 rsvd3;
+} __packed;
+
+struct ocrdma_mqe_emb_cmd {
+	struct ocrdma_mbx_hdr mch;
+	u8 pyld[220];
+} __packed;
+
+struct ocrdma_mqe {
+	struct ocrdma_mqe_hdr hdr;
+	union {
+		struct ocrdma_mqe_emb_cmd emb_req;
+		struct {
+			struct ocrdma_mqe_sge sge[19];
+		} nonemb_req;
+		u8 cmd[236];
+		struct ocrdma_mbx_rsp rsp;
+	} u;
+} __packed;
+
+#define OCRDMA_EQ_LEN       4096
+#define OCRDMA_MQ_CQ_LEN    256
+#define OCRDMA_MQ_LEN       128
+
+#define PAGE_SHIFT_4K		12
+#define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)
+
+/* Returns number of pages spanned by the data starting at the given addr */
+#define PAGES_4K_SPANNED(_address, size) \
+	((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) +	\
+			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
+
+struct ocrdma_delete_q_req {
+	struct ocrdma_mbx_hdr req;
+	u32 id;
+} __packed;
+
+struct ocrdma_pa {
+	u32 lo;
+	u32 hi;
+} __packed;
+
+#define MAX_OCRDMA_EQ_PAGES (8)
+struct ocrdma_create_eq_req {
+	struct ocrdma_mbx_hdr req;
+	u32 num_pages;
+	u32 valid;
+	u32 cnt;
+	u32 delay;
+	u32 rsvd;
+	struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES];
+} __packed;
+
+enum {
+	OCRDMA_CREATE_EQ_VALID	= Bit(29),
+	OCRDMA_CREATE_EQ_CNT_SHIFT	= 26,
+	OCRDMA_CREATE_CQ_DELAY_SHIFT	= 13,
+};
+
+struct ocrdma_create_eq_rsp {
+	struct ocrdma_mbx_rsp rsp;
+	u32 vector_eqid;
+};
+
+#define OCRDMA_EQ_MINOR_OTHER (0x1)
+
+enum {
+	OCRDMA_MCQE_STATUS_SHIFT	= 0,
+	OCRDMA_MCQE_STATUS_MASK		= 0xFFFF,
+	OCRDMA_MCQE_ESTATUS_SHIFT	= 16,
+	OCRDMA_MCQE_ESTATUS_MASK	= 0xFFFF << OCRDMA_MCQE_ESTATUS_SHIFT,
+	OCRDMA_MCQE_CONS_SHIFT		= 27,
+	OCRDMA_MCQE_CONS_MASK		= Bit(27),
+	OCRDMA_MCQE_CMPL_SHIFT		= 28,
+	OCRDMA_MCQE_CMPL_MASK		= Bit(28),
+	OCRDMA_MCQE_AE_SHIFT		= 30,
+	OCRDMA_MCQE_AE_MASK		= Bit(30),
+	OCRDMA_MCQE_VALID_SHIFT		= 31,
+	OCRDMA_MCQE_VALID_MASK		= Bit(31)
+};
+
+struct ocrdma_mcqe {
+	u32 status;
+	u32 tag_lo;
+	u32 tag_hi;
+	u32 valid_ae_cmpl_cons;
+} __packed;
+
+enum {
+	OCRDMA_AE_MCQE_QPVALID		= Bit(31),
+	OCRDMA_AE_MCQE_QPID_MASK	= 0xFFFF,
+
+	OCRDMA_AE_MCQE_CQVALID		= Bit(31),
+	OCRDMA_AE_MCQE_CQID_MASK	= 0xFFFF,
+	OCRDMA_AE_MCQE_VALID		= Bit(31),
+	OCRDMA_AE_MCQE_AE		= Bit(30),
+	OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT	= 16,
+	OCRDMA_AE_MCQE_EVENT_TYPE_MASK	=
+					0xFF << OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT,
+	OCRDMA_AE_MCQE_EVENT_CODE_SHIFT	= 8,
+	OCRDMA_AE_MCQE_EVENT_CODE_MASK	=
+					0xFF << OCRDMA_AE_MCQE_EVENT_CODE_SHIFT
+};
+struct ocrdma_ae_mcqe {
+	u32 qpvalid_qpid;
+	u32 cqvalid_cqid;
+	u32 evt_tag;
+	u32 valid_ae_event;
+} __packed;
+
+enum {
+	OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT		= 16,
+	OCRDMA_AE_MPA_MCQE_REQ_ID_MASK		= 0xFFFF <<
+					OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT,
+
+	OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT	= 8,
+	OCRDMA_AE_MPA_MCQE_EVENT_CODE_MASK	= 0xFF <<
+					OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT,
+	OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT	= 16,
+	OCRDMA_AE_MPA_MCQE_EVENT_TYPE_MASK	= 0xFF <<
+					OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT,
+	OCRDMA_AE_MPA_MCQE_EVENT_AE_SHIFT	= 30,
+	OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK	= Bit(30),
+	OCRDMA_AE_MPA_MCQE_EVENT_VALID_SHIFT	= 31,
+	OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK	= Bit(31)
+};
+
+struct ocrdma_ae_mpa_mcqe {
+	u32 req_id;
+	u32 w1;
+	u32 w2;
+	u32 valid_ae_event;
+} __packed;
+
+enum {
+	OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT	= 0,
+	OCRDMA_AE_QP_MCQE_NEW_QP_STATE_MASK	= 0xFFFF,
+	OCRDMA_AE_QP_MCQE_QP_ID_SHIFT		= 16,
+	OCRDMA_AE_QP_MCQE_QP_ID_MASK		= 0xFFFF <<
+						OCRDMA_AE_QP_MCQE_QP_ID_SHIFT,
+
+	OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT	= 8,
+	OCRDMA_AE_QP_MCQE_EVENT_CODE_MASK	= 0xFF <<
+				OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT,
+	OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT	= 16,
+	OCRDMA_AE_QP_MCQE_EVENT_TYPE_MASK	= 0xFF <<
+				OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT,
+	OCRDMA_AE_QP_MCQE_EVENT_AE_SHIFT	= 30,
+	OCRDMA_AE_QP_MCQE_EVENT_AE_MASK		= Bit(30),
+	OCRDMA_AE_QP_MCQE_EVENT_VALID_SHIFT	= 31,
+	OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK	= Bit(31)
+};
+
+struct ocrdma_ae_qp_mcqe {
+	u32 qp_id_state;
+	u32 w1;
+	u32 w2;
+	u32 valid_ae_event;
+} __packed;
+
+#define OCRDMA_ASYNC_EVE_CODE 0x14
+
+enum OCRDMA_ASYNC_EVENT_TYPE {
+	OCRDMA_CQ_ERROR			= 0x00,
+	OCRDMA_CQ_OVERRUN_ERROR		= 0x01,
+	OCRDMA_CQ_QPCAT_ERROR		= 0x02,
+	OCRDMA_QP_ACCESS_ERROR		= 0x03,
+	OCRDMA_QP_COMM_EST_EVENT	= 0x04,
+	OCRDMA_SQ_DRAINED_EVENT		= 0x05,
+	OCRDMA_DEVICE_FATAL_EVENT	= 0x08,
+	OCRDMA_SRQCAT_ERROR		= 0x0E,
+	OCRDMA_SRQ_LIMIT_EVENT		= 0x0F,
+	OCRDMA_QP_LAST_WQE_EVENT	= 0x10
+};
+
+/* mailbox command request and responses */
+enum {
+	OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT		= 2,
+	OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK		= Bit(2),
+	OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT	= 3,
+	OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK		= Bit(3),
+	OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT		= 8,
+	OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK		= 0xFFFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT		= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK		= 0xFFFF <<
+					OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT,
+	OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT		= 8,
+	OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK		= 0xFF <<
+				OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT		= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK		= 0xFFFF,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT	= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK	= 0xFFFF,
+	OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT	= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK	= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET	= 24,
+	OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK		= 0xFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET,
+	OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET	= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK		= 0xFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET,
+	OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET	= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET		= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET,
+	OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET	= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET		= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET,
+	OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET	= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_MASK	= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET		= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET	= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_MASK	= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+	OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET	= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK	= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET		= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET,
+	OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET	= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK	= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET,
+
+	OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET		= 16,
+	OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET,
+	OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET		= 0,
+	OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK		= 0xFFFF <<
+				OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
+};
+
+struct ocrdma_mbx_query_config {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+	u32 qp_srq_cq_ird_ord;
+	u32 max_pd_ca_ack_delay;
+	u32 max_write_send_sge;
+	u32 max_ird_ord_per_qp;
+	u32 max_shared_ird_ord;
+	u32 max_mr;
+	u64 max_mr_size;
+	u32 max_num_mr_pbl;
+	u32 max_mw;
+	u32 max_fmr;
+	u32 max_pages_per_frmr;
+	u32 max_mcast_group;
+	u32 max_mcast_qp_attach;
+	u32 max_total_mcast_qp_attach;
+	u32 wqe_rqe_stride_max_dpp_cqs;
+	u32 max_srq_rpir_qps;
+	u32 max_dpp_pds_credits;
+	u32 max_dpp_credits_pds_per_pd;
+	u32 max_wqes_rqes_per_q;
+	u32 max_cq_cqes_per_cq;
+	u32 max_srq_rqe_sge;
+} __packed;
+
+struct ocrdma_fw_ver_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u8 running_ver[32];
+} __packed;
+
+struct ocrdma_fw_conf_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u32 config_num;
+	u32 asic_revision;
+	u32 phy_port;
+	u32 fn_mode;
+	struct {
+		u32 mode;
+		u32 nic_wqid_base;
+		u32 nic_wq_tot;
+		u32 prot_wqid_base;
+		u32 prot_wq_tot;
+		u32 prot_rqid_base;
+		u32 prot_rqid_tot;
+		u32 rsvd[6];
+	} ulp[2];
+	u32 fn_capabilities;
+	u32 rsvd1;
+	u32 rsvd2;
+	u32 base_eqid;
+	u32 max_eq;
+
+} __packed;
+
+enum {
+	OCRDMA_FN_MODE_RDMA	= 0x4
+};
+
+enum {
+	OCRDMA_CREATE_CQ_VER2			= 2,
+
+	OCRDMA_CREATE_CQ_PAGE_CNT_MASK		= 0xFFFF,
+	OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT	= 16,
+	OCRDMA_CREATE_CQ_PAGE_SIZE_MASK		= 0xFF,
+
+	OCRDMA_CREATE_CQ_COALESCWM_SHIFT	= 12,
+	OCRDMA_CREATE_CQ_COALESCWM_MASK		= Bit(13) | Bit(12),
+	OCRDMA_CREATE_CQ_FLAGS_NODELAY		= Bit(14),
+	OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID	= Bit(15),
+
+	OCRDMA_CREATE_CQ_EQ_ID_MASK		= 0xFFFF,
+	OCRDMA_CREATE_CQ_CQE_COUNT_MASK		= 0xFFFF
+};
+
+enum {
+	OCRDMA_CREATE_CQ_VER0			= 0,
+	OCRDMA_CREATE_CQ_DPP			= 1,
+	OCRDMA_CREATE_CQ_TYPE_SHIFT		= 24,
+	OCRDMA_CREATE_CQ_EQID_SHIFT		= 22,
+
+	OCRDMA_CREATE_CQ_CNT_SHIFT		= 27,
+	OCRDMA_CREATE_CQ_FLAGS_VALID		= Bit(29),
+	OCRDMA_CREATE_CQ_FLAGS_EVENTABLE	= Bit(31),
+	OCRDMA_CREATE_CQ_DEF_FLAGS		= OCRDMA_CREATE_CQ_FLAGS_VALID |
+					OCRDMA_CREATE_CQ_FLAGS_EVENTABLE |
+					OCRDMA_CREATE_CQ_FLAGS_NODELAY
+};
+
+struct ocrdma_create_cq_cmd {
+	struct ocrdma_mbx_hdr req;
+	u32 pgsz_pgcnt;
+	u32 ev_cnt_flags;
+	u32 eqn;
+	u32 cqe_count;
+	u32 rsvd6;
+	struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
+};
+
+struct ocrdma_create_cq {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_create_cq_cmd cmd;
+} __packed;
+
+enum {
+	OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK	= 0xFFFF
+};
+
+struct ocrdma_create_cq_cmd_rsp {
+	struct ocrdma_mbx_rsp rsp;
+	u32 cq_id;
+} __packed;
+
+struct ocrdma_create_cq_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_create_cq_cmd_rsp rsp;
+} __packed;
+
+enum {
+	OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT		= 22,
+	OCRDMA_CREATE_MQ_CQ_ID_SHIFT		= 16,
+	OCRDMA_CREATE_MQ_RING_SIZE_SHIFT	= 16,
+	OCRDMA_CREATE_MQ_VALID			= Bit(31),
+	OCRDMA_CREATE_MQ_ASYNC_CQ_VALID		= Bit(0)
+};
+
+struct ocrdma_create_mq_v0 {
+	u32 pages;
+	u32 cqid_ringsize;
+	u32 valid;
+	u32 async_cqid_valid;
+	u32 rsvd;
+	struct ocrdma_pa pa[8];
+} __packed;
+
+struct ocrdma_create_mq_v1 {
+	u32 cqid_pages;
+	u32 async_event_bitmap;
+	u32 async_cqid_ringsize;
+	u32 valid;
+	u32 async_cqid_valid;
+	u32 rsvd;
+	struct ocrdma_pa pa[8];
+} __packed;
+
+struct ocrdma_create_mq_req {
+	struct ocrdma_mbx_hdr req;
+	union {
+		struct ocrdma_create_mq_v0 v0;
+		struct ocrdma_create_mq_v1 v1;
+	};
+} __packed;
+
+struct ocrdma_create_mq_rsp {
+	struct ocrdma_mbx_rsp rsp;
+	u32 id;
+} __packed;
+
+enum {
+	OCRDMA_DESTROY_CQ_QID_SHIFT			= 0,
+	OCRDMA_DESTROY_CQ_QID_MASK			= 0xFFFF,
+	OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT	= 16,
+	OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_MASK		= 0xFFFF <<
+				OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT
+};
+
+struct ocrdma_destroy_cq {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+	u32 bypass_flush_qid;
+} __packed;
+
+struct ocrdma_destroy_cq_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+} __packed;
+
+enum {
+	OCRDMA_QPT_GSI	= 1,
+	OCRDMA_QPT_RC	= 2,
+	OCRDMA_QPT_UD	= 4,
+};
+
+enum {
+	OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT	= 0,
+	OCRDMA_CREATE_QP_REQ_PD_ID_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT	= 16,
+	OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT	= 19,
+	OCRDMA_CREATE_QP_REQ_QPT_SHIFT		= 29,
+	OCRDMA_CREATE_QP_REQ_QPT_MASK		= Bit(31) | Bit(30) | Bit(29),
+
+	OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT	= 0,
+	OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK	= 0xFFFF,
+	OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT	= 16,
+	OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK	= 0xFFFF <<
+					OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT,
+
+	OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT	= 0,
+	OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT		= 16,
+	OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK		= 0xFFFF <<
+					OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT,
+
+	OCRDMA_CREATE_QP_REQ_FMR_EN_SHIFT		= 0,
+	OCRDMA_CREATE_QP_REQ_FMR_EN_MASK		= Bit(0),
+	OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_SHIFT		= 1,
+	OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK		= Bit(1),
+	OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_SHIFT		= 2,
+	OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK		= Bit(2),
+	OCRDMA_CREATE_QP_REQ_INB_WREN_SHIFT		= 3,
+	OCRDMA_CREATE_QP_REQ_INB_WREN_MASK		= Bit(3),
+	OCRDMA_CREATE_QP_REQ_INB_RDEN_SHIFT		= 4,
+	OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK		= Bit(4),
+	OCRDMA_CREATE_QP_REQ_USE_SRQ_SHIFT		= 5,
+	OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK		= Bit(5),
+	OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_SHIFT		= 6,
+	OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK		= Bit(6),
+	OCRDMA_CREATE_QP_REQ_ENABLE_DPP_SHIFT		= 7,
+	OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK		= Bit(7),
+	OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_SHIFT	= 8,
+	OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK		= Bit(8),
+	OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT		= 16,
+	OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT,
+
+	OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT		= 0,
+	OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT		= 16,
+	OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT,
+
+	OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT		= 0,
+	OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT		= 16,
+	OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT,
+
+	OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT		= 0,
+	OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT		= 16,
+	OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT,
+
+	OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT		= 0,
+	OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT		= 16,
+	OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT,
+
+	OCRDMA_CREATE_QP_REQ_DPP_CQPID_SHIFT		= 0,
+	OCRDMA_CREATE_QP_REQ_DPP_CQPID_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT		= 16,
+	OCRDMA_CREATE_QP_REQ_DPP_CREDIT_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT
+};
+
+enum {
+	OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT	= 16,
+	OCRDMA_CREATE_QP_RSP_DPP_PAGE_SHIFT	= 1
+};
+
+#define MAX_OCRDMA_IRD_PAGES 4
+
+enum ocrdma_qp_flags {
+	OCRDMA_QP_MW_BIND	= 1,
+	OCRDMA_QP_LKEY0		= (1 << 1),
+	OCRDMA_QP_FAST_REG	= (1 << 2),
+	OCRDMA_QP_INB_RD	= (1 << 6),
+	OCRDMA_QP_INB_WR	= (1 << 7),
+};
+
+enum ocrdma_qp_state {
+	OCRDMA_QPS_RST		= 0,
+	OCRDMA_QPS_INIT		= 1,
+	OCRDMA_QPS_RTR		= 2,
+	OCRDMA_QPS_RTS		= 3,
+	OCRDMA_QPS_SQE		= 4,
+	OCRDMA_QPS_SQ_DRAINING	= 5,
+	OCRDMA_QPS_ERR		= 6,
+	OCRDMA_QPS_SQD		= 7
+};
+
+struct ocrdma_create_qp_req {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+	u32 type_pgsz_pdn;
+	u32 max_wqe_rqe;
+	u32 max_sge_send_write;
+	u32 max_sge_recv_flags;
+	u32 max_ord_ird;
+	u32 num_wq_rq_pages;
+	u32 wqe_rqe_size;
+	u32 wq_rq_cqid;
+	struct ocrdma_pa wq_addr[MAX_OCRDMA_QP_PAGES];
+	struct ocrdma_pa rq_addr[MAX_OCRDMA_QP_PAGES];
+	u32 dpp_credits_cqid;
+	u32 rpir_lkey;
+	struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES];
+} __packed;
+
+enum {
+	OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT		= 0,
+	OCRDMA_CREATE_QP_RSP_QP_ID_MASK			= 0xFFFF,
+
+	OCRDMA_CREATE_QP_RSP_MAX_RQE_SHIFT		= 0,
+	OCRDMA_CREATE_QP_RSP_MAX_RQE_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT		= 16,
+	OCRDMA_CREATE_QP_RSP_MAX_WQE_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT,
+
+	OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_SHIFT	= 0,
+	OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT		= 16,
+	OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT,
+
+	OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT		= 16,
+	OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT,
+
+	OCRDMA_CREATE_QP_RSP_MAX_IRD_SHIFT		= 0,
+	OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK		= 0xFFFF,
+	OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT		= 16,
+	OCRDMA_CREATE_QP_RSP_MAX_ORD_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT,
+
+	OCRDMA_CREATE_QP_RSP_RQ_ID_SHIFT		= 0,
+	OCRDMA_CREATE_QP_RSP_RQ_ID_MASK			= 0xFFFF,
+	OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT		= 16,
+	OCRDMA_CREATE_QP_RSP_SQ_ID_MASK			= 0xFFFF <<
+				OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT,
+
+	OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK		= Bit(0),
+	OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT	= 1,
+	OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK	= 0x7FFF <<
+				OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT,
+	OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT		= 16,
+	OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK		= 0xFFFF <<
+				OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT,
+};
+
+struct ocrdma_create_qp_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u32 qp_id;
+	u32 max_wqe_rqe;
+	u32 max_sge_send_write;
+	u32 max_sge_recv;
+	u32 max_ord_ird;
+	u32 sq_rq_id;
+	u32 dpp_response;
+} __packed;
+
+struct ocrdma_destroy_qp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 qp_id;
+} __packed;
+
+struct ocrdma_destroy_qp_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+} __packed;
+
+enum {
+	OCRDMA_MODIFY_QP_ID_SHIFT	= 0,
+	OCRDMA_MODIFY_QP_ID_MASK	= 0xFFFF,
+
+	OCRDMA_QP_PARA_QPS_VALID	= Bit(0),
+	OCRDMA_QP_PARA_SQD_ASYNC_VALID	= Bit(1),
+	OCRDMA_QP_PARA_PKEY_VALID	= Bit(2),
+	OCRDMA_QP_PARA_QKEY_VALID	= Bit(3),
+	OCRDMA_QP_PARA_PMTU_VALID	= Bit(4),
+	OCRDMA_QP_PARA_ACK_TO_VALID	= Bit(5),
+	OCRDMA_QP_PARA_RETRY_CNT_VALID	= Bit(6),
+	OCRDMA_QP_PARA_RRC_VALID	= Bit(7),
+	OCRDMA_QP_PARA_RQPSN_VALID	= Bit(8),
+	OCRDMA_QP_PARA_MAX_IRD_VALID	= Bit(9),
+	OCRDMA_QP_PARA_MAX_ORD_VALID	= Bit(10),
+	OCRDMA_QP_PARA_RNT_VALID	= Bit(11),
+	OCRDMA_QP_PARA_SQPSN_VALID	= Bit(12),
+	OCRDMA_QP_PARA_DST_QPN_VALID	= Bit(13),
+	OCRDMA_QP_PARA_MAX_WQE_VALID	= Bit(14),
+	OCRDMA_QP_PARA_MAX_RQE_VALID	= Bit(15),
+	OCRDMA_QP_PARA_SGE_SEND_VALID	= Bit(16),
+	OCRDMA_QP_PARA_SGE_RECV_VALID	= Bit(17),
+	OCRDMA_QP_PARA_SGE_WR_VALID	= Bit(18),
+	OCRDMA_QP_PARA_INB_RDEN_VALID	= Bit(19),
+	OCRDMA_QP_PARA_INB_WREN_VALID	= Bit(20),
+	OCRDMA_QP_PARA_FLOW_LBL_VALID	= Bit(21),
+	OCRDMA_QP_PARA_BIND_EN_VALID	= Bit(22),
+	OCRDMA_QP_PARA_ZLKEY_EN_VALID	= Bit(23),
+	OCRDMA_QP_PARA_FMR_EN_VALID	= Bit(24),
+	OCRDMA_QP_PARA_INBAT_EN_VALID	= Bit(25),
+	OCRDMA_QP_PARA_VLAN_EN_VALID	= Bit(26),
+
+	OCRDMA_MODIFY_QP_FLAGS_RD	= Bit(0),
+	OCRDMA_MODIFY_QP_FLAGS_WR	= Bit(1),
+	OCRDMA_MODIFY_QP_FLAGS_SEND	= Bit(2),
+	OCRDMA_MODIFY_QP_FLAGS_ATOMIC	= Bit(3)
+};
+
+enum {
+	OCRDMA_QP_PARAMS_SRQ_ID_SHIFT		= 0,
+	OCRDMA_QP_PARAMS_SRQ_ID_MASK		= 0xFFFF,
+
+	OCRDMA_QP_PARAMS_MAX_RQE_SHIFT		= 0,
+	OCRDMA_QP_PARAMS_MAX_RQE_MASK		= 0xFFFF,
+	OCRDMA_QP_PARAMS_MAX_WQE_SHIFT		= 16,
+	OCRDMA_QP_PARAMS_MAX_WQE_MASK		= 0xFFFF <<
+	    OCRDMA_QP_PARAMS_MAX_WQE_SHIFT,
+
+	OCRDMA_QP_PARAMS_MAX_SGE_WRITE_SHIFT	= 0,
+	OCRDMA_QP_PARAMS_MAX_SGE_WRITE_MASK	= 0xFFFF,
+	OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT	= 16,
+	OCRDMA_QP_PARAMS_MAX_SGE_SEND_MASK	= 0xFFFF <<
+					OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT,
+
+	OCRDMA_QP_PARAMS_FLAGS_FMR_EN		= Bit(0),
+	OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN	= Bit(1),
+	OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN	= Bit(2),
+	OCRDMA_QP_PARAMS_FLAGS_INBWR_EN		= Bit(3),
+	OCRDMA_QP_PARAMS_FLAGS_INBRD_EN		= Bit(4),
+	OCRDMA_QP_PARAMS_STATE_SHIFT		= 5,
+	OCRDMA_QP_PARAMS_STATE_MASK		= Bit(5) | Bit(6) | Bit(7),
+	OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC	= Bit(8),
+	OCRDMA_QP_PARAMS_FLAGS_INB_ATEN		= Bit(9),
+	OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT	= 16,
+	OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK	= 0xFFFF <<
+					OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT,
+
+	OCRDMA_QP_PARAMS_MAX_IRD_SHIFT		= 0,
+	OCRDMA_QP_PARAMS_MAX_IRD_MASK		= 0xFFFF,
+	OCRDMA_QP_PARAMS_MAX_ORD_SHIFT		= 16,
+	OCRDMA_QP_PARAMS_MAX_ORD_MASK		= 0xFFFF <<
+					OCRDMA_QP_PARAMS_MAX_ORD_SHIFT,
+
+	OCRDMA_QP_PARAMS_RQ_CQID_SHIFT		= 0,
+	OCRDMA_QP_PARAMS_RQ_CQID_MASK		= 0xFFFF,
+	OCRDMA_QP_PARAMS_WQ_CQID_SHIFT		= 16,
+	OCRDMA_QP_PARAMS_WQ_CQID_MASK		= 0xFFFF <<
+					OCRDMA_QP_PARAMS_WQ_CQID_SHIFT,
+
+	OCRDMA_QP_PARAMS_RQ_PSN_SHIFT		= 0,
+	OCRDMA_QP_PARAMS_RQ_PSN_MASK		= 0xFFFFFF,
+	OCRDMA_QP_PARAMS_HOP_LMT_SHIFT		= 24,
+	OCRDMA_QP_PARAMS_HOP_LMT_MASK		= 0xFF <<
+					OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
+
+	OCRDMA_QP_PARAMS_SQ_PSN_SHIFT		= 0,
+	OCRDMA_QP_PARAMS_SQ_PSN_MASK		= 0xFFFFFF,
+	OCRDMA_QP_PARAMS_TCLASS_SHIFT		= 24,
+	OCRDMA_QP_PARAMS_TCLASS_MASK		= 0xFF <<
+					OCRDMA_QP_PARAMS_TCLASS_SHIFT,
+
+	OCRDMA_QP_PARAMS_DEST_QPN_SHIFT		= 0,
+	OCRDMA_QP_PARAMS_DEST_QPN_MASK		= 0xFFFFFF,
+	OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT	= 24,
+	OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK	= 0x7 <<
+					OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT,
+	OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT	= 27,
+	OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK	= 0x1F <<
+					OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT,
+
+	OCRDMA_QP_PARAMS_PKEY_IDNEX_SHIFT	= 0,
+	OCRDMA_QP_PARAMS_PKEY_INDEX_MASK	= 0xFFFF,
+	OCRDMA_QP_PARAMS_PATH_MTU_SHIFT		= 18,
+	OCRDMA_QP_PARAMS_PATH_MTU_MASK		= 0x3FFF <<
+					OCRDMA_QP_PARAMS_PATH_MTU_SHIFT,
+
+	OCRDMA_QP_PARAMS_FLOW_LABEL_SHIFT	= 0,
+	OCRDMA_QP_PARAMS_FLOW_LABEL_MASK	= 0xFFFFF,
+	OCRDMA_QP_PARAMS_SL_SHIFT		= 20,
+	OCRDMA_QP_PARAMS_SL_MASK		= 0xF <<
+					OCRDMA_QP_PARAMS_SL_SHIFT,
+	OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT	= 24,
+	OCRDMA_QP_PARAMS_RETRY_CNT_MASK		= 0x7 <<
+					OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT,
+	OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT	= 27,
+	OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK	= 0x1F <<
+					OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT,
+
+	OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_SHIFT	= 0,
+	OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_MASK	= 0xFFFF,
+	OCRDMA_QP_PARAMS_VLAN_SHIFT		= 16,
+	OCRDMA_QP_PARAMS_VLAN_MASK		= 0xFFFF <<
+					OCRDMA_QP_PARAMS_VLAN_SHIFT
+};
+
+struct ocrdma_qp_params {
+	u32 id;
+	u32 max_wqe_rqe;
+	u32 max_sge_send_write;
+	u32 max_sge_recv_flags;
+	u32 max_ord_ird;
+	u32 wq_rq_cqid;
+	u32 hop_lmt_rq_psn;
+	u32 tclass_sq_psn;
+	u32 ack_to_rnr_rtc_dest_qpn;
+	u32 path_mtu_pkey_indx;
+	u32 rnt_rc_sl_fl;
+	u8 sgid[16];
+	u8 dgid[16];
+	u32 dmac_b0_to_b3;
+	u32 vlan_dmac_b4_to_b5;
+	u32 qkey;
+} __packed;
+
+
+struct ocrdma_modify_qp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+	struct ocrdma_qp_params params;
+	u32 flags;
+	u32 rdma_flags;
+	u32 num_outstanding_atomic_rd;
+} __packed;
+
+enum {
+	OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT	= 0,
+	OCRDMA_MODIFY_QP_RSP_MAX_RQE_MASK	= 0xFFFF,
+	OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT	= 16,
+	OCRDMA_MODIFY_QP_RSP_MAX_WQE_MASK	= 0xFFFF <<
+					OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT,
+
+	OCRDMA_MODIFY_QP_RSP_MAX_IRD_SHIFT	= 0,
+	OCRDMA_MODIFY_QP_RSP_MAX_IRD_MASK	= 0xFFFF,
+	OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT	= 16,
+	OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK	= 0xFFFF <<
+					OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT
+};
+struct ocrdma_modify_qp_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u32 max_wqe_rqe;
+	u32 max_ord_ird;
+} __packed;
+
+struct ocrdma_query_qp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
+#define OCRDMA_QUERY_UP_QP_ID_MASK   0xFFFFFF
+	u32 qp_id;
+} __packed;
+
+struct ocrdma_query_qp_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+	struct ocrdma_qp_params params;
+} __packed;
+
+enum {
+	OCRDMA_CREATE_SRQ_PD_ID_SHIFT		= 0,
+	OCRDMA_CREATE_SRQ_PD_ID_MASK		= 0xFFFF,
+	OCRDMA_CREATE_SRQ_PG_SZ_SHIFT		= 16,
+	OCRDMA_CREATE_SRQ_PG_SZ_MASK		= 0x3 <<
+					OCRDMA_CREATE_SRQ_PG_SZ_SHIFT,
+
+	OCRDMA_CREATE_SRQ_MAX_RQE_SHIFT		= 0,
+	OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT	= 16,
+	OCRDMA_CREATE_SRQ_MAX_SGE_RECV_MASK	= 0xFFFF <<
+					OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT,
+
+	OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT	= 0,
+	OCRDMA_CREATE_SRQ_RQE_SIZE_MASK		= 0xFFFF,
+	OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT	= 16,
+	OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_MASK	= 0xFFFF <<
+					OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT
+};
+
+struct ocrdma_create_srq {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+	u32 pgsz_pdid;
+	u32 max_sge_rqe;
+	u32 pages_rqe_sz;
+	struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES];
+} __packed;
+
+enum {
+	OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT			= 0,
+	OCRDMA_CREATE_SRQ_RSP_SRQ_ID_MASK			= 0xFFFFFF,
+
+	OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT		= 0,
+	OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK		= 0xFFFF,
+	OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT	= 16,
+	OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK	= 0xFFFF <<
+			OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT
+};
+
+struct ocrdma_create_srq_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u32 id;
+	u32 max_sge_rqe_allocated;
+} __packed;
+
+enum {
+	OCRDMA_MODIFY_SRQ_ID_SHIFT	= 0,
+	OCRDMA_MODIFY_SRQ_ID_MASK	= 0xFFFFFF,
+
+	OCRDMA_MODIFY_SRQ_MAX_RQE_SHIFT	= 0,
+	OCRDMA_MODIFY_SRQ_MAX_RQE_MASK	= 0xFFFF,
+	OCRDMA_MODIFY_SRQ_LIMIT_SHIFT	= 16,
+	OCRDMA_MODIFY_SRQ__LIMIT_MASK	= 0xFFFF <<
+					OCRDMA_MODIFY_SRQ_LIMIT_SHIFT
+};
+
+struct ocrdma_modify_srq {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rep;
+
+	u32 id;
+	u32 limit_max_rqe;
+} __packed;
+
+enum {
+	OCRDMA_QUERY_SRQ_ID_SHIFT	= 0,
+	OCRDMA_QUERY_SRQ_ID_MASK	= 0xFFFFFF
+};
+
+struct ocrdma_query_srq {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp req;
+
+	u32 id;
+} __packed;
+
+enum {
+	OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT	= 0,
+	OCRDMA_QUERY_SRQ_RSP_PD_ID_MASK		= 0xFFFF,
+	OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT	= 16,
+	OCRDMA_QUERY_SRQ_RSP_MAX_RQE_MASK	= 0xFFFF <<
+					OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT,
+
+	OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_SHIFT	= 0,
+	OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK	= 0xFFFF,
+	OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT	= 16,
+	OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_MASK	= 0xFFFF <<
+					OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT
+};
+
+struct ocrdma_query_srq_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp req;
+
+	u32 max_rqe_pdid;
+	u32 srq_lmt_max_sge;
+} __packed;
+
+enum {
+	OCRDMA_DESTROY_SRQ_ID_SHIFT	= 0,
+	OCRDMA_DESTROY_SRQ_ID_MASK	= 0xFFFFFF
+};
+
+struct ocrdma_destroy_srq {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp req;
+
+	u32 id;
+} __packed;
+
+enum {
+	OCRDMA_ALLOC_PD_ENABLE_DPP	= BIT(16),
+	OCRDMA_PD_MAX_DPP_ENABLED_QP	= 8,
+	OCRDMA_DPP_PAGE_SIZE		= 4096
+};
+
+struct ocrdma_alloc_pd {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 enable_dpp_rsvd;
+} __packed;
+
+enum {
+	OCRDMA_ALLOC_PD_RSP_DPP			= Bit(16),
+	OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT	= 20,
+	OCRDMA_ALLOC_PD_RSP_PDID_MASK		= 0xFFFF,
+};
+
+struct ocrdma_alloc_pd_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+	u32 dpp_page_pdid;
+} __packed;
+
+struct ocrdma_dealloc_pd {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 id;
+} __packed;
+
+struct ocrdma_dealloc_pd_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+} __packed;
+
+enum {
+	OCRDMA_ADDR_CHECK_ENABLE	= 1,
+	OCRDMA_ADDR_CHECK_DISABLE	= 0
+};
+
+enum {
+	OCRDMA_ALLOC_LKEY_PD_ID_SHIFT		= 0,
+	OCRDMA_ALLOC_LKEY_PD_ID_MASK		= 0xFFFF,
+
+	OCRDMA_ALLOC_LKEY_ADDR_CHECK_SHIFT	= 0,
+	OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK	= Bit(0),
+	OCRDMA_ALLOC_LKEY_FMR_SHIFT		= 1,
+	OCRDMA_ALLOC_LKEY_FMR_MASK		= Bit(1),
+	OCRDMA_ALLOC_LKEY_REMOTE_INV_SHIFT	= 2,
+	OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK	= Bit(2),
+	OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT	= 3,
+	OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK	= Bit(3),
+	OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT	= 4,
+	OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK	= Bit(4),
+	OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT	= 5,
+	OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK		= Bit(5),
+	OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK	= Bit(6),
+	OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT	= 6,
+	OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT	= 16,
+	OCRDMA_ALLOC_LKEY_PBL_SIZE_MASK		= 0xFFFF <<
+						OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT
+};
+
+struct ocrdma_alloc_lkey {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+	u32 pdid;
+	u32 pbl_sz_flags;
+} __packed;
+
+struct ocrdma_alloc_lkey_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u32 lrkey;
+	u32 num_pbl_rsvd;
+} __packed;
+
+struct ocrdma_dealloc_lkey {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+	u32 lkey;
+	u32 rsvd_frmr;
+} __packed;
+
+struct ocrdma_dealloc_lkey_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+} __packed;
+
+#define MAX_OCRDMA_NSMR_PBL    (u32)22
+#define MAX_OCRDMA_PBL_SIZE     65536
+#define MAX_OCRDMA_PBL_PER_LKEY	32767
+
+enum {
+	OCRDMA_REG_NSMR_LRKEY_INDEX_SHIFT	= 0,
+	OCRDMA_REG_NSMR_LRKEY_INDEX_MASK	= 0xFFFFFF,
+	OCRDMA_REG_NSMR_LRKEY_SHIFT		= 24,
+	OCRDMA_REG_NSMR_LRKEY_MASK		= 0xFF <<
+					OCRDMA_REG_NSMR_LRKEY_SHIFT,
+
+	OCRDMA_REG_NSMR_PD_ID_SHIFT		= 0,
+	OCRDMA_REG_NSMR_PD_ID_MASK		= 0xFFFF,
+	OCRDMA_REG_NSMR_NUM_PBL_SHIFT		= 16,
+	OCRDMA_REG_NSMR_NUM_PBL_MASK		= 0xFFFF <<
+					OCRDMA_REG_NSMR_NUM_PBL_SHIFT,
+
+	OCRDMA_REG_NSMR_PBE_SIZE_SHIFT		= 0,
+	OCRDMA_REG_NSMR_PBE_SIZE_MASK		= 0xFFFF,
+	OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT	= 16,
+	OCRDMA_REG_NSMR_HPAGE_SIZE_MASK		= 0xFF <<
+					OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT,
+	OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT	= 24,
+	OCRDMA_REG_NSMR_BIND_MEMWIN_MASK	= Bit(24),
+	OCRDMA_REG_NSMR_ZB_SHIFT		= 25,
+	OCRDMA_REG_NSMR_ZB_SHIFT_MASK		= Bit(25),
+	OCRDMA_REG_NSMR_REMOTE_INV_SHIFT	= 26,
+	OCRDMA_REG_NSMR_REMOTE_INV_MASK		= Bit(26),
+	OCRDMA_REG_NSMR_REMOTE_WR_SHIFT		= 27,
+	OCRDMA_REG_NSMR_REMOTE_WR_MASK		= Bit(27),
+	OCRDMA_REG_NSMR_REMOTE_RD_SHIFT		= 28,
+	OCRDMA_REG_NSMR_REMOTE_RD_MASK		= Bit(28),
+	OCRDMA_REG_NSMR_LOCAL_WR_SHIFT		= 29,
+	OCRDMA_REG_NSMR_LOCAL_WR_MASK		= Bit(29),
+	OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT	= 30,
+	OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK	= Bit(30),
+	OCRDMA_REG_NSMR_LAST_SHIFT		= 31,
+	OCRDMA_REG_NSMR_LAST_MASK		= Bit(31)
+};
+
+struct ocrdma_reg_nsmr {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr cmd;
+
+	u32 lrkey_key_index;
+	u32 num_pbl_pdid;
+	u32 flags_hpage_pbe_sz;
+	u32 totlen_low;
+	u32 totlen_high;
+	u32 fbo_low;
+	u32 fbo_high;
+	u32 va_loaddr;
+	u32 va_hiaddr;
+	struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
+} __packed;
+
+enum {
+	OCRDMA_REG_NSMR_CONT_PBL_SHIFT		= 0,
+	OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK	= 0xFFFF,
+	OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT	= 16,
+	OCRDMA_REG_NSMR_CONT_NUM_PBL_MASK	= 0xFFFF <<
+					OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT,
+
+	OCRDMA_REG_NSMR_CONT_LAST_SHIFT		= 31,
+	OCRDMA_REG_NSMR_CONT_LAST_MASK		= Bit(31)
+};
+
+struct ocrdma_reg_nsmr_cont {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr cmd;
+
+	u32 lrkey;
+	u32 num_pbl_offset;
+	u32 last;
+
+	struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
+} __packed;
+
+struct ocrdma_pbe {
+	u32 pa_hi;
+	u32 pa_lo;
+} __packed;
+
+enum {
+	OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT	= 16,
+	OCRDMA_REG_NSMR_RSP_NUM_PBL_MASK	= 0xFFFF0000
+};
+struct ocrdma_reg_nsmr_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u32 lrkey;
+	u32 num_pbl;
+} __packed;
+
+enum {
+	OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT	= 0,
+	OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_MASK	= 0xFFFFFF,
+	OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT		= 24,
+	OCRDMA_REG_NSMR_CONT_RSP_LRKEY_MASK		= 0xFF <<
+					OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT,
+
+	OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT		= 16,
+	OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_MASK		= 0xFFFF <<
+					OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT
+};
+
+struct ocrdma_reg_nsmr_cont_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u32 lrkey_key_index;
+	u32 num_pbl;
+} __packed;
+
+enum {
+	OCRDMA_ALLOC_MW_PD_ID_SHIFT	= 0,
+	OCRDMA_ALLOC_MW_PD_ID_MASK	= 0xFFFF
+};
+
+struct ocrdma_alloc_mw {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+	u32 pdid;
+} __packed;
+
+enum {
+	OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT	= 0,
+	OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_MASK	= 0xFFFFFF
+};
+
+struct ocrdma_alloc_mw_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+
+	u32 lrkey_index;
+} __packed;
+
+struct ocrdma_attach_mcast {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 qp_id;
+	u8 mgid[16];
+	u32 mac_b0_to_b3;
+	u32 vlan_mac_b4_to_b5;
+} __packed;
+
+struct ocrdma_attach_mcast_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+} __packed;
+
+struct ocrdma_detach_mcast {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 qp_id;
+	u8 mgid[16];
+	u32 mac_b0_to_b3;
+	u32 vlan_mac_b4_to_b5;
+} __packed;
+
+struct ocrdma_detach_mcast_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+} __packed;
+
+enum {
+	OCRDMA_CREATE_AH_NUM_PAGES_SHIFT	= 19,
+	OCRDMA_CREATE_AH_NUM_PAGES_MASK		= 0xF <<
+					OCRDMA_CREATE_AH_NUM_PAGES_SHIFT,
+
+	OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT	= 16,
+	OCRDMA_CREATE_AH_PAGE_SIZE_MASK		= 0x7 <<
+					OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT,
+
+	OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT	= 23,
+	OCRDMA_CREATE_AH_ENTRY_SIZE_MASK	= 0x1FF <<
+					OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT,
+};
+
+#define OCRDMA_AH_TBL_PAGES 8
+
+struct ocrdma_create_ah_tbl {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+
+	u32 ah_conf;
+	struct ocrdma_pa tbl_addr[8];
+} __packed;
+
+struct ocrdma_create_ah_tbl_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+	u32 ahid;
+} __packed;
+
+struct ocrdma_delete_ah_tbl {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_hdr req;
+	u32 ahid;
+} __packed;
+
+struct ocrdma_delete_ah_tbl_rsp {
+	struct ocrdma_mqe_hdr hdr;
+	struct ocrdma_mbx_rsp rsp;
+} __packed;
+
+enum {
+	OCRDMA_EQE_VALID_SHIFT		= 0,
+	OCRDMA_EQE_VALID_MASK		= Bit(0),
+	OCRDMA_EQE_FOR_CQE_MASK		= 0xFFFE,
+	OCRDMA_EQE_RESOURCE_ID_SHIFT	= 16,
+	OCRDMA_EQE_RESOURCE_ID_MASK	= 0xFFFF <<
+				OCRDMA_EQE_RESOURCE_ID_SHIFT,
+};
+
+struct ocrdma_eqe {
+	u32 id_valid;
+} __packed;
+
+enum OCRDMA_CQE_STATUS {
+	OCRDMA_CQE_SUCCESS = 0,
+	OCRDMA_CQE_LOC_LEN_ERR,
+	OCRDMA_CQE_LOC_QP_OP_ERR,
+	OCRDMA_CQE_LOC_EEC_OP_ERR,
+	OCRDMA_CQE_LOC_PROT_ERR,
+	OCRDMA_CQE_WR_FLUSH_ERR,
+	OCRDMA_CQE_MW_BIND_ERR,
+	OCRDMA_CQE_BAD_RESP_ERR,
+	OCRDMA_CQE_LOC_ACCESS_ERR,
+	OCRDMA_CQE_REM_INV_REQ_ERR,
+	OCRDMA_CQE_REM_ACCESS_ERR,
+	OCRDMA_CQE_REM_OP_ERR,
+	OCRDMA_CQE_RETRY_EXC_ERR,
+	OCRDMA_CQE_RNR_RETRY_EXC_ERR,
+	OCRDMA_CQE_LOC_RDD_VIOL_ERR,
+	OCRDMA_CQE_REM_INV_RD_REQ_ERR,
+	OCRDMA_CQE_REM_ABORT_ERR,
+	OCRDMA_CQE_INV_EECN_ERR,
+	OCRDMA_CQE_INV_EEC_STATE_ERR,
+	OCRDMA_CQE_FATAL_ERR,
+	OCRDMA_CQE_RESP_TIMEOUT_ERR,
+	OCRDMA_CQE_GENERAL_ERR
+};
+
+enum {
+	/* w0 */
+	OCRDMA_CQE_WQEIDX_SHIFT		= 0,
+	OCRDMA_CQE_WQEIDX_MASK		= 0xFFFF,
+
+	/* w1 */
+	OCRDMA_CQE_UD_XFER_LEN_SHIFT	= 16,
+	OCRDMA_CQE_PKEY_SHIFT		= 0,
+	OCRDMA_CQE_PKEY_MASK		= 0xFFFF,
+
+	/* w2 */
+	OCRDMA_CQE_QPN_SHIFT		= 0,
+	OCRDMA_CQE_QPN_MASK		= 0x0000FFFF,
+
+	OCRDMA_CQE_BUFTAG_SHIFT		= 16,
+	OCRDMA_CQE_BUFTAG_MASK		= 0xFFFF << OCRDMA_CQE_BUFTAG_SHIFT,
+
+	/* w3 */
+	OCRDMA_CQE_UD_STATUS_SHIFT	= 24,
+	OCRDMA_CQE_UD_STATUS_MASK	= 0x7 << OCRDMA_CQE_UD_STATUS_SHIFT,
+	OCRDMA_CQE_STATUS_SHIFT		= 16,
+	OCRDMA_CQE_STATUS_MASK		= 0xFF << OCRDMA_CQE_STATUS_SHIFT,
+	OCRDMA_CQE_VALID		= Bit(31),
+	OCRDMA_CQE_INVALIDATE		= Bit(30),
+	OCRDMA_CQE_QTYPE		= Bit(29),
+	OCRDMA_CQE_IMM			= Bit(28),
+	OCRDMA_CQE_WRITE_IMM		= Bit(27),
+	OCRDMA_CQE_QTYPE_SQ		= 0,
+	OCRDMA_CQE_QTYPE_RQ		= 1,
+	OCRDMA_CQE_SRCQP_MASK		= 0xFFFFFF
+};
+
+struct ocrdma_cqe {
+	union {
+		/* w0 to w2 */
+		struct {
+			u32 wqeidx;
+			u32 bytes_xfered;
+			u32 qpn;
+		} wq;
+		struct {
+			u32 lkey_immdt;
+			u32 rxlen;
+			u32 buftag_qpn;
+		} rq;
+		struct {
+			u32 lkey_immdt;
+			u32 rxlen_pkey;
+			u32 buftag_qpn;
+		} ud;
+		struct {
+			u32 word_0;
+			u32 word_1;
+			u32 qpn;
+		} cmn;
+	};
+	u32 flags_status_srcqpn;	/* w3 */
+} __packed;
+
+#define is_cqe_valid(cq, cqe) \
+	(((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID)\
+	== cq->phase) ? 1 : 0)
+#define is_cqe_for_sq(cqe) \
+	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 0 : 1)
+#define is_cqe_for_rq(cqe) \
+	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_QTYPE) ? 1 : 0)
+#define is_cqe_invalidated(cqe) \
+	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_INVALIDATE) ? \
+	1 : 0)
+#define is_cqe_imm(cqe) \
+	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_IMM) ? 1 : 0)
+#define is_cqe_wr_imm(cqe) \
+	((le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_WRITE_IMM) ? 1 : 0)
+
+struct ocrdma_sge {
+	u32 addr_hi;
+	u32 addr_lo;
+	u32 lrkey;
+	u32 len;
+} __packed;
+
+enum {
+	OCRDMA_FLAG_SIG		= 0x1,
+	OCRDMA_FLAG_INV		= 0x2,
+	OCRDMA_FLAG_FENCE_L	= 0x4,
+	OCRDMA_FLAG_FENCE_R	= 0x8,
+	OCRDMA_FLAG_SOLICIT	= 0x10,
+	OCRDMA_FLAG_IMM		= 0x20,
+
+	/* Stag flags */
+	OCRDMA_LKEY_FLAG_LOCAL_WR	= 0x1,
+	OCRDMA_LKEY_FLAG_REMOTE_RD	= 0x2,
+	OCRDMA_LKEY_FLAG_REMOTE_WR	= 0x4,
+	OCRDMA_LKEY_FLAG_VATO		= 0x8,
+};
+
+enum OCRDMA_WQE_OPCODE {
+	OCRDMA_WRITE		= 0x06,
+	OCRDMA_READ		= 0x0C,
+	OCRDMA_RESV0		= 0x02,
+	OCRDMA_SEND		= 0x00,
+	OCRDMA_CMP_SWP		= 0x14,
+	OCRDMA_BIND_MW		= 0x10,
+	OCRDMA_RESV1		= 0x0A,
+	OCRDMA_LKEY_INV		= 0x15,
+	OCRDMA_FETCH_ADD	= 0x13,
+	OCRDMA_POST_RQ		= 0x12
+};
+
+enum {
+	OCRDMA_TYPE_INLINE	= 0x0,
+	OCRDMA_TYPE_LKEY	= 0x1,
+};
+
+enum {
+	OCRDMA_WQE_OPCODE_SHIFT		= 0,
+	OCRDMA_WQE_OPCODE_MASK		= 0x0000001F,
+	OCRDMA_WQE_FLAGS_SHIFT		= 5,
+	OCRDMA_WQE_TYPE_SHIFT		= 16,
+	OCRDMA_WQE_TYPE_MASK		= 0x00030000,
+	OCRDMA_WQE_SIZE_SHIFT		= 18,
+	OCRDMA_WQE_SIZE_MASK		= 0xFF,
+	OCRDMA_WQE_NXT_WQE_SIZE_SHIFT	= 25,
+
+	OCRDMA_WQE_LKEY_FLAGS_SHIFT	= 0,
+	OCRDMA_WQE_LKEY_FLAGS_MASK	= 0xF
+};
+
+/* header WQE for all the SQ and RQ operations */
+struct ocrdma_hdr_wqe {
+	u32 cw;
+	union {
+		u32 rsvd_tag;
+		u32 rsvd_lkey_flags;
+	};
+	union {
+		u32 immdt;
+		u32 lkey;
+	};
+	u32 total_len;
+} __packed;
+
+struct ocrdma_ewqe_ud_hdr {
+	u32 rsvd_dest_qpn;
+	u32 qkey;
+	u32 rsvd_ahid;
+	u32 rsvd;
+} __packed;
+
+struct ocrdma_eth_basic {
+	u8 dmac[6];
+	u8 smac[6];
+	__be16 eth_type;
+} __packed;
+
+struct ocrdma_eth_vlan {
+	u8 dmac[6];
+	u8 smac[6];
+	__be16 eth_type;
+	__be16 vlan_tag;
+#define OCRDMA_ROCE_ETH_TYPE 0x8915
+	__be16 roce_eth_type;
+} __packed;
+
+struct ocrdma_grh {
+	__be32	tclass_flow;
+	__be32	pdid_hoplimit;
+	u8	sgid[16];
+	u8	dgid[16];
+	u16	rsvd;
+} __packed;
+
+#define OCRDMA_AV_VALID		Bit(0)
+#define OCRDMA_AV_VLAN_VALID	Bit(1)
+
+struct ocrdma_av {
+	struct ocrdma_eth_vlan eth_hdr;
+	struct ocrdma_grh grh;
+	u32 valid;
+} __packed;
+
+#endif				/* __OCRDMA_SLI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
new file mode 100644
index 0000000..e9f74d1
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -0,0 +1,2537 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) adapters.                   *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#include <linux/dma-mapping.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+
+#include "ocrdma.h"
+#include "ocrdma_hw.h"
+#include "ocrdma_verbs.h"
+#include "ocrdma_abi.h"
+
+int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+	if (index > 1)
+		return -EINVAL;
+
+	*pkey = 0xffff;
+	return 0;
+}
+
+int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
+		     int index, union ib_gid *sgid)
+{
+	struct ocrdma_dev *dev;
+
+	dev = get_ocrdma_dev(ibdev);
+	memset(sgid, 0, sizeof(*sgid));
+	if (index > OCRDMA_MAX_SGID)
+		return -EINVAL;
+
+	memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
+
+	return 0;
+}
+
+int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
+{
+	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+
+	memset(attr, 0, sizeof *attr);
+	memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
+	       min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
+	ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
+	attr->max_mr_size = ~0ull;
+	attr->page_size_cap = 0xffff000;
+	attr->vendor_id = dev->nic_info.pdev->vendor;
+	attr->vendor_part_id = dev->nic_info.pdev->device;
+	attr->hw_ver = 0;
+	attr->max_qp = dev->attr.max_qp;
+	attr->max_ah = dev->attr.max_qp;
+	attr->max_qp_wr = dev->attr.max_wqe;
+
+	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+					IB_DEVICE_RC_RNR_NAK_GEN |
+					IB_DEVICE_SHUTDOWN_PORT |
+					IB_DEVICE_SYS_IMAGE_GUID |
+					IB_DEVICE_LOCAL_DMA_LKEY;
+	attr->max_sge = dev->attr.max_send_sge;
+	attr->max_sge_rd = dev->attr.max_send_sge;
+	attr->max_cq = dev->attr.max_cq;
+	attr->max_cqe = dev->attr.max_cqe;
+	attr->max_mr = dev->attr.max_mr;
+	attr->max_mw = 0;
+	attr->max_pd = dev->attr.max_pd;
+	attr->atomic_cap = 0;
+	attr->max_fmr = 0;
+	attr->max_map_per_fmr = 0;
+	attr->max_qp_rd_atom =
+	    min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
+	attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
+	attr->max_srq = (dev->attr.max_qp - 1);
+	attr->max_srq_sge = attr->max_sge;
+	attr->max_srq_wr = dev->attr.max_rqe;
+	attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
+	attr->max_fast_reg_page_list_len = 0;
+	attr->max_pkeys = 1;
+	return 0;
+}
+
+int ocrdma_query_port(struct ib_device *ibdev,
+		      u8 port, struct ib_port_attr *props)
+{
+	enum ib_port_state port_state;
+	struct ocrdma_dev *dev;
+	struct net_device *netdev;
+
+	dev = get_ocrdma_dev(ibdev);
+	if (port > 1) {
+		ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__,
+			   dev->id, port);
+		return -EINVAL;
+	}
+	netdev = dev->nic_info.netdev;
+	if (netif_running(netdev) && netif_oper_up(netdev)) {
+		port_state = IB_PORT_ACTIVE;
+		props->phys_state = 5;
+	} else {
+		port_state = IB_PORT_DOWN;
+		props->phys_state = 3;
+	}
+	props->max_mtu = IB_MTU_4096;
+	props->active_mtu = iboe_get_mtu(netdev->mtu);
+	props->lid = 0;
+	props->lmc = 0;
+	props->sm_lid = 0;
+	props->sm_sl = 0;
+	props->state = port_state;
+	props->port_cap_flags =
+	    IB_PORT_CM_SUP |
+	    IB_PORT_REINIT_SUP |
+	    IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+	props->gid_tbl_len = OCRDMA_MAX_SGID;
+	props->pkey_tbl_len = 1;
+	props->bad_pkey_cntr = 0;
+	props->qkey_viol_cntr = 0;
+	props->active_width = IB_WIDTH_1X;
+	props->active_speed = 4;
+	props->max_msg_sz = 0x80000000;
+	props->max_vl_num = 4;
+	return 0;
+}
+
+int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
+		       struct ib_port_modify *props)
+{
+	struct ocrdma_dev *dev;
+
+	dev = get_ocrdma_dev(ibdev);
+	if (port > 1) {
+		ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__,
+			   dev->id, port);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
+			   unsigned long len)
+{
+	struct ocrdma_mm *mm;
+
+	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+	if (mm == NULL)
+		return -ENOMEM;
+	mm->key.phy_addr = phy_addr;
+	mm->key.len = len;
+	INIT_LIST_HEAD(&mm->entry);
+
+	mutex_lock(&uctx->mm_list_lock);
+	list_add_tail(&mm->entry, &uctx->mm_head);
+	mutex_unlock(&uctx->mm_list_lock);
+	return 0;
+}
+
+static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
+			    unsigned long len)
+{
+	struct ocrdma_mm *mm, *tmp;
+
+	mutex_lock(&uctx->mm_list_lock);
+	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+		if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+			continue;
+
+		list_del(&mm->entry);
+		kfree(mm);
+		break;
+	}
+	mutex_unlock(&uctx->mm_list_lock);
+}
+
+static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
+			      unsigned long len)
+{
+	bool found = false;
+	struct ocrdma_mm *mm;
+
+	mutex_lock(&uctx->mm_list_lock);
+	list_for_each_entry(mm, &uctx->mm_head, entry) {
+		if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+			continue;
+
+		found = true;
+		break;
+	}
+	mutex_unlock(&uctx->mm_list_lock);
+	return found;
+}
+
+struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
+					  struct ib_udata *udata)
+{
+	int status;
+	struct ocrdma_ucontext *ctx;
+	struct ocrdma_alloc_ucontext_resp resp;
+	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
+
+	if (!udata)
+		return ERR_PTR(-EFAULT);
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+	ctx->dev = dev;
+	INIT_LIST_HEAD(&ctx->mm_head);
+	mutex_init(&ctx->mm_list_lock);
+
+	ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
+					    &ctx->ah_tbl.pa, GFP_KERNEL);
+	if (!ctx->ah_tbl.va) {
+		kfree(ctx);
+		return ERR_PTR(-ENOMEM);
+	}
+	memset(ctx->ah_tbl.va, 0, map_len);
+	ctx->ah_tbl.len = map_len;
+
+	resp.ah_tbl_len = ctx->ah_tbl.len;
+	resp.ah_tbl_page = ctx->ah_tbl.pa;
+
+	status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
+	if (status)
+		goto map_err;
+	resp.dev_id = dev->id;
+	resp.max_inline_data = dev->attr.max_inline_data;
+	resp.wqe_size = dev->attr.wqe_size;
+	resp.rqe_size = dev->attr.rqe_size;
+	resp.dpp_wqe_size = dev->attr.wqe_size;
+	resp.rsvd = 0;
+
+	memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
+	status = ib_copy_to_udata(udata, &resp, sizeof(resp));
+	if (status)
+		goto cpy_err;
+	return &ctx->ibucontext;
+
+cpy_err:
+	ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
+map_err:
+	dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
+			  ctx->ah_tbl.pa);
+	kfree(ctx);
+	return ERR_PTR(status);
+}
+
+int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+	struct ocrdma_mm *mm, *tmp;
+	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
+	struct pci_dev *pdev = uctx->dev->nic_info.pdev;
+
+	ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
+	dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
+			  uctx->ah_tbl.pa);
+
+	list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+		list_del(&mm->entry);
+		kfree(mm);
+	}
+	kfree(uctx);
+	return 0;
+}
+
+int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+	struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
+	struct ocrdma_dev *dev = ucontext->dev;
+	unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
+	u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
+	unsigned long len = (vma->vm_end - vma->vm_start);
+	int status = 0;
+	bool found;
+
+	if (vma->vm_start & (PAGE_SIZE - 1))
+		return -EINVAL;
+	found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
+	if (!found)
+		return -EINVAL;
+
+	if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
+		dev->nic_info.db_total_size)) &&
+		(len <=	dev->nic_info.db_page_size)) {
+		/* doorbell mapping */
+		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+					    len, vma->vm_page_prot);
+	} else if (dev->nic_info.dpp_unmapped_len &&
+		(vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
+		(vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
+			dev->nic_info.dpp_unmapped_len)) &&
+		(len <= dev->nic_info.dpp_unmapped_len)) {
+		/* dpp area mapping */
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+		status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+					    len, vma->vm_page_prot);
+	} else {
+		/* queue memory mapping */
+		status = remap_pfn_range(vma, vma->vm_start,
+					 vma->vm_pgoff, len, vma->vm_page_prot);
+	}
+	return status;
+}
+
+static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
+				struct ib_ucontext *ib_ctx,
+				struct ib_udata *udata)
+{
+	int status;
+	u64 db_page_addr;
+	u64 dpp_page_addr = 0;
+	u32 db_page_size;
+	struct ocrdma_alloc_pd_uresp rsp;
+	struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+
+	rsp.id = pd->id;
+	rsp.dpp_enabled = pd->dpp_enabled;
+	db_page_addr = pd->dev->nic_info.unmapped_db +
+			(pd->id * pd->dev->nic_info.db_page_size);
+	db_page_size = pd->dev->nic_info.db_page_size;
+
+	status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
+	if (status)
+		return status;
+
+	if (pd->dpp_enabled) {
+		dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr +
+				(pd->id * OCRDMA_DPP_PAGE_SIZE);
+		status = ocrdma_add_mmap(uctx, dpp_page_addr,
+				 OCRDMA_DPP_PAGE_SIZE);
+		if (status)
+			goto dpp_map_err;
+		rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
+		rsp.dpp_page_addr_lo = dpp_page_addr;
+	}
+
+	status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
+	if (status)
+		goto ucopy_err;
+
+	pd->uctx = uctx;
+	return 0;
+
+ucopy_err:
+	if (pd->dpp_enabled)
+		ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE);
+dpp_map_err:
+	ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
+	return status;
+}
+
+struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
+			      struct ib_ucontext *context,
+			      struct ib_udata *udata)
+{
+	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+	struct ocrdma_pd *pd;
+	int status;
+
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return ERR_PTR(-ENOMEM);
+	pd->dev = dev;
+	if (udata && context) {
+		pd->dpp_enabled = (dev->nic_info.dev_family ==
+					OCRDMA_GEN2_FAMILY) ? true : false;
+		pd->num_dpp_qp =
+			pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+	}
+	status = ocrdma_mbx_alloc_pd(dev, pd);
+	if (status) {
+		kfree(pd);
+		return ERR_PTR(status);
+	}
+	atomic_set(&pd->use_cnt, 0);
+
+	if (udata && context) {
+		status = ocrdma_copy_pd_uresp(pd, context, udata);
+		if (status)
+			goto err;
+	}
+	return &pd->ibpd;
+
+err:
+	ocrdma_dealloc_pd(&pd->ibpd);
+	return ERR_PTR(status);
+}
+
+int ocrdma_dealloc_pd(struct ib_pd *ibpd)
+{
+	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+	struct ocrdma_dev *dev = pd->dev;
+	int status;
+	u64 usr_db;
+
+	if (atomic_read(&pd->use_cnt)) {
+		ocrdma_err("%s(%d) pd=0x%x is in use.\n",
+			   __func__, dev->id, pd->id);
+		status = -EFAULT;
+		goto dealloc_err;
+	}
+	status = ocrdma_mbx_dealloc_pd(dev, pd);
+	if (pd->uctx) {
+		u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
+		    (pd->id * OCRDMA_DPP_PAGE_SIZE);
+		if (pd->dpp_enabled)
+			ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE);
+		usr_db = dev->nic_info.unmapped_db +
+		    (pd->id * dev->nic_info.db_page_size);
+		ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
+	}
+	kfree(pd);
+dealloc_err:
+	return status;
+}
+
+static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
+					   int acc, u32 num_pbls,
+					   u32 addr_check)
+{
+	int status;
+	struct ocrdma_mr *mr;
+	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+	struct ocrdma_dev *dev = pd->dev;
+
+	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
+		ocrdma_err("%s(%d) leaving err, invalid access rights\n",
+			   __func__, dev->id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr)
+		return ERR_PTR(-ENOMEM);
+	mr->hwmr.dev = dev;
+	mr->hwmr.fr_mr = 0;
+	mr->hwmr.local_rd = 1;
+	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+	mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
+	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+	mr->hwmr.num_pbls = num_pbls;
+
+	status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check);
+	if (status) {
+		kfree(mr);
+		return ERR_PTR(-ENOMEM);
+	}
+	mr->pd = pd;
+	atomic_inc(&pd->use_cnt);
+	mr->ibmr.lkey = mr->hwmr.lkey;
+	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
+		mr->ibmr.rkey = mr->hwmr.lkey;
+	return mr;
+}
+
+struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+	struct ocrdma_mr *mr;
+
+	mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE);
+	if (IS_ERR(mr))
+		return ERR_CAST(mr);
+
+	return &mr->ibmr;
+}
+
+static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
+				   struct ocrdma_hw_mr *mr)
+{
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	int i = 0;
+
+	if (mr->pbl_table) {
+		for (i = 0; i < mr->num_pbls; i++) {
+			if (!mr->pbl_table[i].va)
+				continue;
+			dma_free_coherent(&pdev->dev, mr->pbl_size,
+					  mr->pbl_table[i].va,
+					  mr->pbl_table[i].pa);
+		}
+		kfree(mr->pbl_table);
+		mr->pbl_table = NULL;
+	}
+}
+
+static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
+{
+	u32 num_pbls = 0;
+	u32 idx = 0;
+	int status = 0;
+	u32 pbl_size;
+
+	do {
+		pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
+		if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
+			status = -EFAULT;
+			break;
+		}
+		num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
+		num_pbls = num_pbls / (pbl_size / sizeof(u64));
+		idx++;
+	} while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl);
+
+	mr->hwmr.num_pbes = num_pbes;
+	mr->hwmr.num_pbls = num_pbls;
+	mr->hwmr.pbl_size = pbl_size;
+	return status;
+}
+
+static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
+{
+	int status = 0;
+	int i;
+	u32 dma_len = mr->pbl_size;
+	struct pci_dev *pdev = dev->nic_info.pdev;
+	void *va;
+	dma_addr_t pa;
+
+	mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
+				mr->num_pbls, GFP_KERNEL);
+
+	if (!mr->pbl_table)
+		return -ENOMEM;
+
+	for (i = 0; i < mr->num_pbls; i++) {
+		va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+		if (!va) {
+			ocrdma_free_mr_pbl_tbl(dev, mr);
+			status = -ENOMEM;
+			break;
+		}
+		memset(va, 0, dma_len);
+		mr->pbl_table[i].va = va;
+		mr->pbl_table[i].pa = pa;
+	}
+	return status;
+}
+
+static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+			    u32 num_pbes)
+{
+	struct ocrdma_pbe *pbe;
+	struct ib_umem_chunk *chunk;
+	struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
+	struct ib_umem *umem = mr->umem;
+	int i, shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+
+	if (!mr->hwmr.num_pbes)
+		return;
+
+	pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+	pbe_cnt = 0;
+
+	shift = ilog2(umem->page_size);
+
+	list_for_each_entry(chunk, &umem->chunk_list, list) {
+		/* get all the dma regions from the chunk. */
+		for (i = 0; i < chunk->nmap; i++) {
+			pages = sg_dma_len(&chunk->page_list[i]) >> shift;
+			for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+				/* store the page address in pbe */
+				pbe->pa_lo =
+				    cpu_to_le32(sg_dma_address
+						(&chunk->page_list[i]) +
+						(umem->page_size * pg_cnt));
+				pbe->pa_hi =
+				    cpu_to_le32(upper_32_bits
+						((sg_dma_address
+						  (&chunk->page_list[i]) +
+						  umem->page_size * pg_cnt)));
+				pbe_cnt += 1;
+				total_num_pbes += 1;
+				pbe++;
+
+				/* if done building pbes, issue the mbx cmd. */
+				if (total_num_pbes == num_pbes)
+					return;
+
+				/* if the given pbl is full storing the pbes,
+				 * move to next pbl.
+				 */
+				if (pbe_cnt ==
+					(mr->hwmr.pbl_size / sizeof(u64))) {
+					pbl_tbl++;
+					pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+					pbe_cnt = 0;
+				}
+			}
+		}
+	}
+}
+
+struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+				 u64 usr_addr, int acc, struct ib_udata *udata)
+{
+	int status = -ENOMEM;
+	struct ocrdma_dev *dev;
+	struct ocrdma_mr *mr;
+	struct ocrdma_pd *pd;
+	u32 num_pbes;
+
+	pd = get_ocrdma_pd(ibpd);
+	dev = pd->dev;
+
+	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+		return ERR_PTR(-EINVAL);
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr)
+		return ERR_PTR(status);
+	mr->hwmr.dev = dev;
+	mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+	if (IS_ERR(mr->umem)) {
+		status = -EFAULT;
+		goto umem_err;
+	}
+	num_pbes = ib_umem_page_count(mr->umem);
+	status = ocrdma_get_pbl_info(mr, num_pbes);
+	if (status)
+		goto umem_err;
+
+	mr->hwmr.pbe_size = mr->umem->page_size;
+	mr->hwmr.fbo = mr->umem->offset;
+	mr->hwmr.va = usr_addr;
+	mr->hwmr.len = len;
+	mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+	mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+	mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+	mr->hwmr.local_rd = 1;
+	mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+	status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+	if (status)
+		goto umem_err;
+	build_user_pbes(dev, mr, num_pbes);
+	status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
+	if (status)
+		goto mbx_err;
+	mr->pd = pd;
+	atomic_inc(&pd->use_cnt);
+	mr->ibmr.lkey = mr->hwmr.lkey;
+	if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
+		mr->ibmr.rkey = mr->hwmr.lkey;
+
+	return &mr->ibmr;
+
+mbx_err:
+	ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+umem_err:
+	kfree(mr);
+	return ERR_PTR(status);
+}
+
+int ocrdma_dereg_mr(struct ib_mr *ib_mr)
+{
+	struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
+	struct ocrdma_dev *dev = mr->hwmr.dev;
+	int status;
+
+	status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
+
+	if (mr->hwmr.fr_mr == 0)
+		ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+
+	atomic_dec(&mr->pd->use_cnt);
+	/* it could be user registered memory. */
+	if (mr->umem)
+		ib_umem_release(mr->umem);
+	kfree(mr);
+	return status;
+}
+
+static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
+				struct ib_ucontext *ib_ctx)
+{
+	int status;
+	struct ocrdma_ucontext *uctx;
+	struct ocrdma_create_cq_uresp uresp;
+
+	uresp.cq_id = cq->id;
+	uresp.page_size = cq->len;
+	uresp.num_pages = 1;
+	uresp.max_hw_cqe = cq->max_hw_cqe;
+	uresp.page_addr[0] = cq->pa;
+	uresp.db_page_addr = cq->dev->nic_info.unmapped_db;
+	uresp.db_page_size = cq->dev->nic_info.db_page_size;
+	uresp.phase_change = cq->phase_change ? 1 : 0;
+	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	if (status) {
+		ocrdma_err("%s(%d) copy error cqid=0x%x.\n",
+			   __func__, cq->dev->id, cq->id);
+		goto err;
+	}
+	uctx = get_ocrdma_ucontext(ib_ctx);
+	status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
+	if (status)
+		goto err;
+	status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
+	if (status) {
+		ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
+		goto err;
+	}
+	cq->ucontext = uctx;
+err:
+	return status;
+}
+
+struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
+			       struct ib_ucontext *ib_ctx,
+			       struct ib_udata *udata)
+{
+	struct ocrdma_cq *cq;
+	struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+	int status;
+	struct ocrdma_create_cq_ureq ureq;
+
+	if (udata) {
+		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+			return ERR_PTR(-EFAULT);
+	} else
+		ureq.dpp_cq = 0;
+	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&cq->cq_lock);
+	spin_lock_init(&cq->comp_handler_lock);
+	atomic_set(&cq->use_cnt, 0);
+	INIT_LIST_HEAD(&cq->sq_head);
+	INIT_LIST_HEAD(&cq->rq_head);
+	cq->dev = dev;
+
+	status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
+	if (status) {
+		kfree(cq);
+		return ERR_PTR(status);
+	}
+	if (ib_ctx) {
+		status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);
+		if (status)
+			goto ctx_err;
+	}
+	cq->phase = OCRDMA_CQE_VALID;
+	cq->arm_needed = true;
+	dev->cq_tbl[cq->id] = cq;
+
+	return &cq->ibcq;
+
+ctx_err:
+	ocrdma_mbx_destroy_cq(dev, cq);
+	kfree(cq);
+	return ERR_PTR(status);
+}
+
+int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
+		     struct ib_udata *udata)
+{
+	int status = 0;
+	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+
+	if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
+		status = -EINVAL;
+		return status;
+	}
+	ibcq->cqe = new_cnt;
+	return status;
+}
+
+int ocrdma_destroy_cq(struct ib_cq *ibcq)
+{
+	int status;
+	struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+	struct ocrdma_dev *dev = cq->dev;
+
+	if (atomic_read(&cq->use_cnt))
+		return -EINVAL;
+
+	status = ocrdma_mbx_destroy_cq(dev, cq);
+
+	if (cq->ucontext) {
+		ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len);
+		ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
+				dev->nic_info.db_page_size);
+	}
+	dev->cq_tbl[cq->id] = NULL;
+
+	kfree(cq);
+	return status;
+}
+
+static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
+{
+	int status = -EINVAL;
+
+	if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
+		dev->qp_tbl[qp->id] = qp;
+		status = 0;
+	}
+	return status;
+}
+
+static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
+{
+	dev->qp_tbl[qp->id] = NULL;
+}
+
+static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
+				  struct ib_qp_init_attr *attrs)
+{
+	if (attrs->qp_type != IB_QPT_GSI &&
+	    attrs->qp_type != IB_QPT_RC &&
+	    attrs->qp_type != IB_QPT_UD) {
+		ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n",
+			   __func__, dev->id, attrs->qp_type);
+		return -EINVAL;
+	}
+	if (attrs->cap.max_send_wr > dev->attr.max_wqe) {
+		ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n",
+			   __func__, dev->id, attrs->cap.max_send_wr);
+		ocrdma_err("%s(%d) supported send_wr=0x%x\n",
+			   __func__, dev->id, dev->attr.max_wqe);
+		return -EINVAL;
+	}
+	if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
+		ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n",
+			   __func__, dev->id, attrs->cap.max_recv_wr);
+		ocrdma_err("%s(%d) supported recv_wr=0x%x\n",
+			   __func__, dev->id, dev->attr.max_rqe);
+		return -EINVAL;
+	}
+	if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
+		ocrdma_err("%s(%d) unsupported inline data size=0x%x"
+			   " requested\n", __func__, dev->id,
+			   attrs->cap.max_inline_data);
+		ocrdma_err("%s(%d) supported inline data size=0x%x\n",
+			   __func__, dev->id, dev->attr.max_inline_data);
+		return -EINVAL;
+	}
+	if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
+		ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n",
+			   __func__, dev->id, attrs->cap.max_send_sge);
+		ocrdma_err("%s(%d) supported send_sge=0x%x\n",
+			   __func__, dev->id, dev->attr.max_send_sge);
+		return -EINVAL;
+	}
+	if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
+		ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n",
+			   __func__, dev->id, attrs->cap.max_recv_sge);
+		ocrdma_err("%s(%d) supported recv_sge=0x%x\n",
+			   __func__, dev->id, dev->attr.max_recv_sge);
+		return -EINVAL;
+	}
+	/* unprivileged user space cannot create special QP */
+	if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+		ocrdma_err
+		    ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
+		     __func__, dev->id, attrs->qp_type);
+		return -EINVAL;
+	}
+	/* allow creating only one GSI type of QP */
+	if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
+		ocrdma_err("%s(%d) GSI special QPs already created.\n",
+			   __func__, dev->id);
+		return -EINVAL;
+	}
+	/* verify consumer QPs are not trying to use GSI QP's CQ */
+	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
+		if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
+		    (dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq))) {
+			ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
+				   __func__, dev->id);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
+				struct ib_udata *udata, int dpp_offset,
+				int dpp_credit_lmt, int srq)
+{
+	int status = 0;
+	u64 usr_db;
+	struct ocrdma_create_qp_uresp uresp;
+	struct ocrdma_dev *dev = qp->dev;
+	struct ocrdma_pd *pd = qp->pd;
+
+	memset(&uresp, 0, sizeof(uresp));
+	usr_db = dev->nic_info.unmapped_db +
+			(pd->id * dev->nic_info.db_page_size);
+	uresp.qp_id = qp->id;
+	uresp.sq_dbid = qp->sq.dbid;
+	uresp.num_sq_pages = 1;
+	uresp.sq_page_size = qp->sq.len;
+	uresp.sq_page_addr[0] = qp->sq.pa;
+	uresp.num_wqe_allocated = qp->sq.max_cnt;
+	if (!srq) {
+		uresp.rq_dbid = qp->rq.dbid;
+		uresp.num_rq_pages = 1;
+		uresp.rq_page_size = qp->rq.len;
+		uresp.rq_page_addr[0] = qp->rq.pa;
+		uresp.num_rqe_allocated = qp->rq.max_cnt;
+	}
+	uresp.db_page_addr = usr_db;
+	uresp.db_page_size = dev->nic_info.db_page_size;
+	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+		uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
+		uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ?
+			OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET;
+		uresp.db_shift = (qp->id < 128) ? 24 : 16;
+	} else {
+		uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
+		uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
+		uresp.db_shift = 16;
+	}
+	uresp.free_wqe_delta = qp->sq.free_delta;
+	uresp.free_rqe_delta = qp->rq.free_delta;
+
+	if (qp->dpp_enabled) {
+		uresp.dpp_credit = dpp_credit_lmt;
+		uresp.dpp_offset = dpp_offset;
+	}
+	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	if (status) {
+		ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id);
+		goto err;
+	}
+	status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
+				 uresp.sq_page_size);
+	if (status)
+		goto err;
+
+	if (!srq) {
+		status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
+					 uresp.rq_page_size);
+		if (status)
+			goto rq_map_err;
+	}
+	return status;
+rq_map_err:
+	ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
+err:
+	return status;
+}
+
+static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
+			     struct ocrdma_pd *pd)
+{
+	if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+		qp->sq_db = dev->nic_info.db +
+			(pd->id * dev->nic_info.db_page_size) +
+			OCRDMA_DB_GEN2_SQ_OFFSET;
+		qp->rq_db = dev->nic_info.db +
+			(pd->id * dev->nic_info.db_page_size) +
+			((qp->id < 128) ?
+			OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET);
+	} else {
+		qp->sq_db = dev->nic_info.db +
+			(pd->id * dev->nic_info.db_page_size) +
+			OCRDMA_DB_SQ_OFFSET;
+		qp->rq_db = dev->nic_info.db +
+			(pd->id * dev->nic_info.db_page_size) +
+			OCRDMA_DB_RQ_OFFSET;
+	}
+}
+
+static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
+{
+	qp->wqe_wr_id_tbl =
+	    kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
+		    GFP_KERNEL);
+	if (qp->wqe_wr_id_tbl == NULL)
+		return -ENOMEM;
+	qp->rqe_wr_id_tbl =
+	    kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
+	if (qp->rqe_wr_id_tbl == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
+				      struct ocrdma_pd *pd,
+				      struct ib_qp_init_attr *attrs)
+{
+	qp->pd = pd;
+	spin_lock_init(&qp->q_lock);
+	INIT_LIST_HEAD(&qp->sq_entry);
+	INIT_LIST_HEAD(&qp->rq_entry);
+
+	qp->qp_type = attrs->qp_type;
+	qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
+	qp->max_inline_data = attrs->cap.max_inline_data;
+	qp->sq.max_sges = attrs->cap.max_send_sge;
+	qp->rq.max_sges = attrs->cap.max_recv_sge;
+	qp->state = OCRDMA_QPS_RST;
+}
+
+static void ocrdma_set_qp_use_cnt(struct ocrdma_qp *qp, struct ocrdma_pd *pd)
+{
+	atomic_inc(&pd->use_cnt);
+	atomic_inc(&qp->sq_cq->use_cnt);
+	atomic_inc(&qp->rq_cq->use_cnt);
+	if (qp->srq)
+		atomic_inc(&qp->srq->use_cnt);
+	qp->ibqp.qp_num = qp->id;
+}
+
+static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
+				   struct ib_qp_init_attr *attrs)
+{
+	if (attrs->qp_type == IB_QPT_GSI) {
+		dev->gsi_qp_created = 1;
+		dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
+		dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
+	}
+}
+
+struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
+			       struct ib_qp_init_attr *attrs,
+			       struct ib_udata *udata)
+{
+	int status;
+	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+	struct ocrdma_qp *qp;
+	struct ocrdma_dev *dev = pd->dev;
+	struct ocrdma_create_qp_ureq ureq;
+	u16 dpp_credit_lmt, dpp_offset;
+
+	status = ocrdma_check_qp_params(ibpd, dev, attrs);
+	if (status)
+		goto gen_err;
+
+	memset(&ureq, 0, sizeof(ureq));
+	if (udata) {
+		if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+			return ERR_PTR(-EFAULT);
+	}
+	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+	if (!qp) {
+		status = -ENOMEM;
+		goto gen_err;
+	}
+	qp->dev = dev;
+	ocrdma_set_qp_init_params(qp, pd, attrs);
+
+	mutex_lock(&dev->dev_lock);
+	status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
+					ureq.dpp_cq_id,
+					&dpp_offset, &dpp_credit_lmt);
+	if (status)
+		goto mbx_err;
+
+	/* user space QP's wr_id table are managed in library */
+	if (udata == NULL) {
+		qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
+				  OCRDMA_QP_FAST_REG);
+		status = ocrdma_alloc_wr_id_tbl(qp);
+		if (status)
+			goto map_err;
+	}
+
+	status = ocrdma_add_qpn_map(dev, qp);
+	if (status)
+		goto map_err;
+	ocrdma_set_qp_db(dev, qp, pd);
+	if (udata) {
+		status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
+					      dpp_credit_lmt,
+					      (attrs->srq != NULL));
+		if (status)
+			goto cpy_err;
+	}
+	ocrdma_store_gsi_qp_cq(dev, attrs);
+	ocrdma_set_qp_use_cnt(qp, pd);
+	mutex_unlock(&dev->dev_lock);
+	return &qp->ibqp;
+
+cpy_err:
+	ocrdma_del_qpn_map(dev, qp);
+map_err:
+	ocrdma_mbx_destroy_qp(dev, qp);
+mbx_err:
+	mutex_unlock(&dev->dev_lock);
+	kfree(qp->wqe_wr_id_tbl);
+	kfree(qp->rqe_wr_id_tbl);
+	kfree(qp);
+	ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status);
+gen_err:
+	return ERR_PTR(status);
+}
+
+int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		      int attr_mask)
+{
+	int status = 0;
+	struct ocrdma_qp *qp;
+	struct ocrdma_dev *dev;
+	enum ib_qp_state old_qps;
+
+	qp = get_ocrdma_qp(ibqp);
+	dev = qp->dev;
+	if (attr_mask & IB_QP_STATE)
+		status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps);
+	/* if new and previous states are same hw doesn't need to
+	 * know about it.
+	 */
+	if (status < 0)
+		return status;
+	status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
+	return status;
+}
+
+int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		     int attr_mask, struct ib_udata *udata)
+{
+	unsigned long flags;
+	int status = -EINVAL;
+	struct ocrdma_qp *qp;
+	struct ocrdma_dev *dev;
+	enum ib_qp_state old_qps, new_qps;
+
+	qp = get_ocrdma_qp(ibqp);
+	dev = qp->dev;
+
+	/* syncronize with multiple context trying to change, retrive qps */
+	mutex_lock(&dev->dev_lock);
+	/* syncronize with wqe, rqe posting and cqe processing contexts */
+	spin_lock_irqsave(&qp->q_lock, flags);
+	old_qps = get_ibqp_state(qp->state);
+	if (attr_mask & IB_QP_STATE)
+		new_qps = attr->qp_state;
+	else
+		new_qps = old_qps;
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+
+	if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
+		ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for "
+			   "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
+			   __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
+			   old_qps, new_qps);
+		goto param_err;
+	}
+
+	status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
+	if (status > 0)
+		status = 0;
+param_err:
+	mutex_unlock(&dev->dev_lock);
+	return status;
+}
+
+static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
+{
+	switch (mtu) {
+	case 256:
+		return IB_MTU_256;
+	case 512:
+		return IB_MTU_512;
+	case 1024:
+		return IB_MTU_1024;
+	case 2048:
+		return IB_MTU_2048;
+	case 4096:
+		return IB_MTU_4096;
+	default:
+		return IB_MTU_1024;
+	}
+}
+
+static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
+{
+	int ib_qp_acc_flags = 0;
+
+	if (qp_cap_flags & OCRDMA_QP_INB_WR)
+		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+	if (qp_cap_flags & OCRDMA_QP_INB_RD)
+		ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+	return ib_qp_acc_flags;
+}
+
+int ocrdma_query_qp(struct ib_qp *ibqp,
+		    struct ib_qp_attr *qp_attr,
+		    int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+	int status;
+	u32 qp_state;
+	struct ocrdma_qp_params params;
+	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+	struct ocrdma_dev *dev = qp->dev;
+
+	memset(&params, 0, sizeof(params));
+	mutex_lock(&dev->dev_lock);
+	status = ocrdma_mbx_query_qp(dev, qp, &params);
+	mutex_unlock(&dev->dev_lock);
+	if (status)
+		goto mbx_err;
+	qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
+	qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
+	qp_attr->path_mtu =
+		ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
+				OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
+				OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
+	qp_attr->path_mig_state = IB_MIG_MIGRATED;
+	qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
+	qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
+	qp_attr->dest_qp_num =
+	    params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
+
+	qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
+	qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
+	qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
+	qp_attr->cap.max_send_sge = qp->sq.max_sges;
+	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+	qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
+	qp_init_attr->cap = qp_attr->cap;
+	memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
+	       sizeof(params.dgid));
+	qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
+	    OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
+	qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
+	qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
+					  OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
+						OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
+	qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
+					      OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+						OCRDMA_QP_PARAMS_TCLASS_SHIFT;
+
+	qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+	qp_attr->ah_attr.port_num = 1;
+	qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
+			       OCRDMA_QP_PARAMS_SL_MASK) >>
+				OCRDMA_QP_PARAMS_SL_SHIFT;
+	qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
+			    OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
+				OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
+	qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
+			      OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
+				OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
+	qp_attr->retry_cnt =
+	    (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
+		OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
+	qp_attr->min_rnr_timer = 0;
+	qp_attr->pkey_index = 0;
+	qp_attr->port_num = 1;
+	qp_attr->ah_attr.src_path_bits = 0;
+	qp_attr->ah_attr.static_rate = 0;
+	qp_attr->alt_pkey_index = 0;
+	qp_attr->alt_port_num = 0;
+	qp_attr->alt_timeout = 0;
+	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+	qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
+		    OCRDMA_QP_PARAMS_STATE_SHIFT;
+	qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
+	qp_attr->max_dest_rd_atomic =
+	    params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
+	qp_attr->max_rd_atomic =
+	    params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
+	qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
+				OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
+mbx_err:
+	return status;
+}
+
+static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
+{
+	int i = idx / 32;
+	unsigned int mask = (1 << (idx % 32));
+
+	if (srq->idx_bit_fields[i] & mask)
+		srq->idx_bit_fields[i] &= ~mask;
+	else
+		srq->idx_bit_fields[i] |= mask;
+}
+
+static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
+{
+	int free_cnt;
+	if (q->head >= q->tail)
+		free_cnt = (q->max_cnt - q->head) + q->tail;
+	else
+		free_cnt = q->tail - q->head;
+	if (q->free_delta)
+		free_cnt -= q->free_delta;
+	return free_cnt;
+}
+
+static int is_hw_sq_empty(struct ocrdma_qp *qp)
+{
+	return (qp->sq.tail == qp->sq.head &&
+		ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0);
+}
+
+static int is_hw_rq_empty(struct ocrdma_qp *qp)
+{
+	return (qp->rq.tail == qp->rq.head) ? 1 : 0;
+}
+
+static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
+{
+	return q->va + (q->head * q->entry_size);
+}
+
+static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
+				      u32 idx)
+{
+	return q->va + (idx * q->entry_size);
+}
+
+static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
+{
+	q->head = (q->head + 1) & q->max_wqe_idx;
+}
+
+static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
+{
+	q->tail = (q->tail + 1) & q->max_wqe_idx;
+}
+
+/* discard the cqe for a given QP */
+static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
+{
+	unsigned long cq_flags;
+	unsigned long flags;
+	int discard_cnt = 0;
+	u32 cur_getp, stop_getp;
+	struct ocrdma_cqe *cqe;
+	u32 qpn = 0;
+
+	spin_lock_irqsave(&cq->cq_lock, cq_flags);
+
+	/* traverse through the CQEs in the hw CQ,
+	 * find the matching CQE for a given qp,
+	 * mark the matching one discarded by clearing qpn.
+	 * ring the doorbell in the poll_cq() as
+	 * we don't complete out of order cqe.
+	 */
+
+	cur_getp = cq->getp;
+	/* find upto when do we reap the cq. */
+	stop_getp = cur_getp;
+	do {
+		if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
+			break;
+
+		cqe = cq->va + cur_getp;
+		/* if (a) done reaping whole hw cq, or
+		 *    (b) qp_xq becomes empty.
+		 * then exit
+		 */
+		qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
+		/* if previously discarded cqe found, skip that too. */
+		/* check for matching qp */
+		if (qpn == 0 || qpn != qp->id)
+			goto skip_cqe;
+
+		/* mark cqe discarded so that it is not picked up later
+		 * in the poll_cq().
+		 */
+		discard_cnt += 1;
+		cqe->cmn.qpn = 0;
+		if (is_cqe_for_sq(cqe))
+			ocrdma_hwq_inc_tail(&qp->sq);
+		else {
+			if (qp->srq) {
+				spin_lock_irqsave(&qp->srq->q_lock, flags);
+				ocrdma_hwq_inc_tail(&qp->srq->rq);
+				ocrdma_srq_toggle_bit(qp->srq, cur_getp);
+				spin_unlock_irqrestore(&qp->srq->q_lock, flags);
+
+			} else
+				ocrdma_hwq_inc_tail(&qp->rq);
+		}
+skip_cqe:
+		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
+	} while (cur_getp != stop_getp);
+	spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
+}
+
+static void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
+{
+	int found = false;
+	unsigned long flags;
+	struct ocrdma_dev *dev = qp->dev;
+	/* sync with any active CQ poll */
+
+	spin_lock_irqsave(&dev->flush_q_lock, flags);
+	found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
+	if (found)
+		list_del(&qp->sq_entry);
+	if (!qp->srq) {
+		found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
+		if (found)
+			list_del(&qp->rq_entry);
+	}
+	spin_unlock_irqrestore(&dev->flush_q_lock, flags);
+}
+
+int ocrdma_destroy_qp(struct ib_qp *ibqp)
+{
+	int status;
+	struct ocrdma_pd *pd;
+	struct ocrdma_qp *qp;
+	struct ocrdma_dev *dev;
+	struct ib_qp_attr attrs;
+	int attr_mask = IB_QP_STATE;
+	unsigned long flags;
+
+	qp = get_ocrdma_qp(ibqp);
+	dev = qp->dev;
+
+	attrs.qp_state = IB_QPS_ERR;
+	pd = qp->pd;
+
+	/* change the QP state to ERROR */
+	_ocrdma_modify_qp(ibqp, &attrs, attr_mask);
+
+	/* ensure that CQEs for newly created QP (whose id may be same with
+	 * one which just getting destroyed are same), dont get
+	 * discarded until the old CQEs are discarded.
+	 */
+	mutex_lock(&dev->dev_lock);
+	status = ocrdma_mbx_destroy_qp(dev, qp);
+
+	/*
+	 * acquire CQ lock while destroy is in progress, in order to
+	 * protect against proessing in-flight CQEs for this QP.
+	 */
+	spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
+	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
+		spin_lock(&qp->rq_cq->cq_lock);
+
+	ocrdma_del_qpn_map(dev, qp);
+
+	if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
+		spin_unlock(&qp->rq_cq->cq_lock);
+	spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
+
+	if (!pd->uctx) {
+		ocrdma_discard_cqes(qp, qp->sq_cq);
+		ocrdma_discard_cqes(qp, qp->rq_cq);
+	}
+	mutex_unlock(&dev->dev_lock);
+
+	if (pd->uctx) {
+		ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len);
+		if (!qp->srq)
+			ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len);
+	}
+
+	ocrdma_del_flush_qp(qp);
+
+	atomic_dec(&qp->pd->use_cnt);
+	atomic_dec(&qp->sq_cq->use_cnt);
+	atomic_dec(&qp->rq_cq->use_cnt);
+	if (qp->srq)
+		atomic_dec(&qp->srq->use_cnt);
+	kfree(qp->wqe_wr_id_tbl);
+	kfree(qp->rqe_wr_id_tbl);
+	kfree(qp);
+	return status;
+}
+
+static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
+{
+	int status;
+	struct ocrdma_create_srq_uresp uresp;
+
+	uresp.rq_dbid = srq->rq.dbid;
+	uresp.num_rq_pages = 1;
+	uresp.rq_page_addr[0] = srq->rq.pa;
+	uresp.rq_page_size = srq->rq.len;
+	uresp.db_page_addr = srq->dev->nic_info.unmapped_db +
+	    (srq->pd->id * srq->dev->nic_info.db_page_size);
+	uresp.db_page_size = srq->dev->nic_info.db_page_size;
+	uresp.num_rqe_allocated = srq->rq.max_cnt;
+	uresp.free_rqe_delta = 1;
+	if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+		uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
+		uresp.db_shift = 24;
+	} else {
+		uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
+		uresp.db_shift = 16;
+	}
+
+	status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	if (status)
+		return status;
+	status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
+				 uresp.rq_page_size);
+	if (status)
+		return status;
+	return status;
+}
+
+struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
+				 struct ib_srq_init_attr *init_attr,
+				 struct ib_udata *udata)
+{
+	int status = -ENOMEM;
+	struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+	struct ocrdma_dev *dev = pd->dev;
+	struct ocrdma_srq *srq;
+
+	if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
+		return ERR_PTR(-EINVAL);
+	if (init_attr->attr.max_wr > dev->attr.max_rqe)
+		return ERR_PTR(-EINVAL);
+
+	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+	if (!srq)
+		return ERR_PTR(status);
+
+	spin_lock_init(&srq->q_lock);
+	srq->dev = dev;
+	srq->pd = pd;
+	srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
+	status = ocrdma_mbx_create_srq(srq, init_attr, pd);
+	if (status)
+		goto err;
+
+	if (udata == NULL) {
+		srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
+			    GFP_KERNEL);
+		if (srq->rqe_wr_id_tbl == NULL)
+			goto arm_err;
+
+		srq->bit_fields_len = (srq->rq.max_cnt / 32) +
+		    (srq->rq.max_cnt % 32 ? 1 : 0);
+		srq->idx_bit_fields =
+		    kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
+		if (srq->idx_bit_fields == NULL)
+			goto arm_err;
+		memset(srq->idx_bit_fields, 0xff,
+		       srq->bit_fields_len * sizeof(u32));
+	}
+
+	if (init_attr->attr.srq_limit) {
+		status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
+		if (status)
+			goto arm_err;
+	}
+
+	atomic_set(&srq->use_cnt, 0);
+	if (udata) {
+		status = ocrdma_copy_srq_uresp(srq, udata);
+		if (status)
+			goto arm_err;
+	}
+
+	atomic_inc(&pd->use_cnt);
+	return &srq->ibsrq;
+
+arm_err:
+	ocrdma_mbx_destroy_srq(dev, srq);
+err:
+	kfree(srq->rqe_wr_id_tbl);
+	kfree(srq->idx_bit_fields);
+	kfree(srq);
+	return ERR_PTR(status);
+}
+
+int ocrdma_modify_srq(struct ib_srq *ibsrq,
+		      struct ib_srq_attr *srq_attr,
+		      enum ib_srq_attr_mask srq_attr_mask,
+		      struct ib_udata *udata)
+{
+	int status = 0;
+	struct ocrdma_srq *srq;
+
+	srq = get_ocrdma_srq(ibsrq);
+	if (srq_attr_mask & IB_SRQ_MAX_WR)
+		status = -EINVAL;
+	else
+		status = ocrdma_mbx_modify_srq(srq, srq_attr);
+	return status;
+}
+
+int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+	int status;
+	struct ocrdma_srq *srq;
+
+	srq = get_ocrdma_srq(ibsrq);
+	status = ocrdma_mbx_query_srq(srq, srq_attr);
+	return status;
+}
+
+int ocrdma_destroy_srq(struct ib_srq *ibsrq)
+{
+	int status;
+	struct ocrdma_srq *srq;
+	struct ocrdma_dev *dev;
+
+	srq = get_ocrdma_srq(ibsrq);
+	dev = srq->dev;
+	if (atomic_read(&srq->use_cnt)) {
+		ocrdma_err("%s(%d) err, srq=0x%x in use\n",
+			   __func__, dev->id, srq->id);
+		return -EAGAIN;
+	}
+
+	status = ocrdma_mbx_destroy_srq(dev, srq);
+
+	if (srq->pd->uctx)
+		ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);
+
+	atomic_dec(&srq->pd->use_cnt);
+	kfree(srq->idx_bit_fields);
+	kfree(srq->rqe_wr_id_tbl);
+	kfree(srq);
+	return status;
+}
+
+/* unprivileged verbs and their support functions. */
+static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
+				struct ocrdma_hdr_wqe *hdr,
+				struct ib_send_wr *wr)
+{
+	struct ocrdma_ewqe_ud_hdr *ud_hdr =
+		(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
+	struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
+
+	ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
+	if (qp->qp_type == IB_QPT_GSI)
+		ud_hdr->qkey = qp->qkey;
+	else
+		ud_hdr->qkey = wr->wr.ud.remote_qkey;
+	ud_hdr->rsvd_ahid = ah->id;
+}
+
+static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
+			      struct ocrdma_sge *sge, int num_sge,
+			      struct ib_sge *sg_list)
+{
+	int i;
+
+	for (i = 0; i < num_sge; i++) {
+		sge[i].lrkey = sg_list[i].lkey;
+		sge[i].addr_lo = sg_list[i].addr;
+		sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
+		sge[i].len = sg_list[i].length;
+		hdr->total_len += sg_list[i].length;
+	}
+	if (num_sge == 0)
+		memset(sge, 0, sizeof(*sge));
+}
+
+static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
+				    struct ocrdma_hdr_wqe *hdr,
+				    struct ocrdma_sge *sge,
+				    struct ib_send_wr *wr, u32 wqe_size)
+{
+	if (wr->send_flags & IB_SEND_INLINE) {
+		if (wr->sg_list[0].length > qp->max_inline_data) {
+			ocrdma_err("%s() supported_len=0x%x,"
+				" unspported len req=0x%x\n", __func__,
+				qp->max_inline_data, wr->sg_list[0].length);
+			return -EINVAL;
+		}
+		memcpy(sge,
+		       (void *)(unsigned long)wr->sg_list[0].addr,
+		       wr->sg_list[0].length);
+		hdr->total_len = wr->sg_list[0].length;
+		wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
+		hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
+	} else {
+		ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
+		if (wr->num_sge)
+			wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
+		else
+			wqe_size += sizeof(struct ocrdma_sge);
+		hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+	}
+	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
+	return 0;
+}
+
+static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+			     struct ib_send_wr *wr)
+{
+	int status;
+	struct ocrdma_sge *sge;
+	u32 wqe_size = sizeof(*hdr);
+
+	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
+		ocrdma_build_ud_hdr(qp, hdr, wr);
+		sge = (struct ocrdma_sge *)(hdr + 2);
+		wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
+	} else
+		sge = (struct ocrdma_sge *)(hdr + 1);
+
+	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
+	return status;
+}
+
+static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+			      struct ib_send_wr *wr)
+{
+	int status;
+	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
+	struct ocrdma_sge *sge = ext_rw + 1;
+	u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
+
+	status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
+	if (status)
+		return status;
+	ext_rw->addr_lo = wr->wr.rdma.remote_addr;
+	ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
+	ext_rw->lrkey = wr->wr.rdma.rkey;
+	ext_rw->len = hdr->total_len;
+	return 0;
+}
+
+static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+			      struct ib_send_wr *wr)
+{
+	struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
+	struct ocrdma_sge *sge = ext_rw + 1;
+	u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
+	    sizeof(struct ocrdma_hdr_wqe);
+
+	ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
+	hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
+	hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
+	hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+
+	ext_rw->addr_lo = wr->wr.rdma.remote_addr;
+	ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
+	ext_rw->lrkey = wr->wr.rdma.rkey;
+	ext_rw->len = hdr->total_len;
+}
+
+static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
+{
+	u32 val = qp->sq.dbid | (1 << 16);
+
+	iowrite32(val, qp->sq_db);
+}
+
+int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		     struct ib_send_wr **bad_wr)
+{
+	int status = 0;
+	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+	struct ocrdma_hdr_wqe *hdr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qp->q_lock, flags);
+	if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
+		spin_unlock_irqrestore(&qp->q_lock, flags);
+		return -EINVAL;
+	}
+
+	while (wr) {
+		if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
+		    wr->num_sge > qp->sq.max_sges) {
+			status = -ENOMEM;
+			break;
+		}
+		hdr = ocrdma_hwq_head(&qp->sq);
+		hdr->cw = 0;
+		if (wr->send_flags & IB_SEND_SIGNALED)
+			hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
+		if (wr->send_flags & IB_SEND_FENCE)
+			hdr->cw |=
+			    (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
+		if (wr->send_flags & IB_SEND_SOLICITED)
+			hdr->cw |=
+			    (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
+		hdr->total_len = 0;
+		switch (wr->opcode) {
+		case IB_WR_SEND_WITH_IMM:
+			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
+			hdr->immdt = ntohl(wr->ex.imm_data);
+		case IB_WR_SEND:
+			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
+			ocrdma_build_send(qp, hdr, wr);
+			break;
+		case IB_WR_SEND_WITH_INV:
+			hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
+			hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
+			hdr->lkey = wr->ex.invalidate_rkey;
+			status = ocrdma_build_send(qp, hdr, wr);
+			break;
+		case IB_WR_RDMA_WRITE_WITH_IMM:
+			hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
+			hdr->immdt = ntohl(wr->ex.imm_data);
+		case IB_WR_RDMA_WRITE:
+			hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
+			status = ocrdma_build_write(qp, hdr, wr);
+			break;
+		case IB_WR_RDMA_READ_WITH_INV:
+			hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
+		case IB_WR_RDMA_READ:
+			ocrdma_build_read(qp, hdr, wr);
+			break;
+		case IB_WR_LOCAL_INV:
+			hdr->cw |=
+			    (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
+			hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) /
+				OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
+			hdr->lkey = wr->ex.invalidate_rkey;
+			break;
+		default:
+			status = -EINVAL;
+			break;
+		}
+		if (status) {
+			*bad_wr = wr;
+			break;
+		}
+		if (wr->send_flags & IB_SEND_SIGNALED)
+			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
+		else
+			qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
+		qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
+		ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
+				   OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
+		/* make sure wqe is written before adapter can access it */
+		wmb();
+		/* inform hw to start processing it */
+		ocrdma_ring_sq_db(qp);
+
+		/* update pointer, counter for next wr */
+		ocrdma_hwq_inc_head(&qp->sq);
+		wr = wr->next;
+	}
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+	return status;
+}
+
+static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
+{
+	u32 val = qp->rq.dbid | (1 << OCRDMA_GET_NUM_POSTED_SHIFT_VAL(qp));
+
+	iowrite32(val, qp->rq_db);
+}
+
+static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
+			     u16 tag)
+{
+	u32 wqe_size = 0;
+	struct ocrdma_sge *sge;
+	if (wr->num_sge)
+		wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
+	else
+		wqe_size = sizeof(*sge) + sizeof(*rqe);
+
+	rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
+				OCRDMA_WQE_SIZE_SHIFT);
+	rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
+	rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+	rqe->total_len = 0;
+	rqe->rsvd_tag = tag;
+	sge = (struct ocrdma_sge *)(rqe + 1);
+	ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
+	ocrdma_cpu_to_le32(rqe, wqe_size);
+}
+
+int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+		     struct ib_recv_wr **bad_wr)
+{
+	int status = 0;
+	unsigned long flags;
+	struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+	struct ocrdma_hdr_wqe *rqe;
+
+	spin_lock_irqsave(&qp->q_lock, flags);
+	if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
+		spin_unlock_irqrestore(&qp->q_lock, flags);
+		*bad_wr = wr;
+		return -EINVAL;
+	}
+	while (wr) {
+		if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
+		    wr->num_sge > qp->rq.max_sges) {
+			*bad_wr = wr;
+			status = -ENOMEM;
+			break;
+		}
+		rqe = ocrdma_hwq_head(&qp->rq);
+		ocrdma_build_rqe(rqe, wr, 0);
+
+		qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
+		/* make sure rqe is written before adapter can access it */
+		wmb();
+
+		/* inform hw to start processing it */
+		ocrdma_ring_rq_db(qp);
+
+		/* update pointer, counter for next wr */
+		ocrdma_hwq_inc_head(&qp->rq);
+		wr = wr->next;
+	}
+	spin_unlock_irqrestore(&qp->q_lock, flags);
+	return status;
+}
+
+/* cqe for srq's rqe can potentially arrive out of order.
+ * index gives the entry in the shadow table where to store
+ * the wr_id. tag/index is returned in cqe to reference back
+ * for a given rqe.
+ */
+static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
+{
+	int row = 0;
+	int indx = 0;
+
+	for (row = 0; row < srq->bit_fields_len; row++) {
+		if (srq->idx_bit_fields[row]) {
+			indx = ffs(srq->idx_bit_fields[row]);
+			indx = (row * 32) + (indx - 1);
+			if (indx >= srq->rq.max_cnt)
+				BUG();
+			ocrdma_srq_toggle_bit(srq, indx);
+			break;
+		}
+	}
+
+	if (row == srq->bit_fields_len)
+		BUG();
+	return indx;
+}
+
+static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
+{
+	u32 val = srq->rq.dbid | (1 << 16);
+
+	iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
+}
+
+int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+			 struct ib_recv_wr **bad_wr)
+{
+	int status = 0;
+	unsigned long flags;
+	struct ocrdma_srq *srq;
+	struct ocrdma_hdr_wqe *rqe;
+	u16 tag;
+
+	srq = get_ocrdma_srq(ibsrq);
+
+	spin_lock_irqsave(&srq->q_lock, flags);
+	while (wr) {
+		if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
+		    wr->num_sge > srq->rq.max_sges) {
+			status = -ENOMEM;
+			*bad_wr = wr;
+			break;
+		}
+		tag = ocrdma_srq_get_idx(srq);
+		rqe = ocrdma_hwq_head(&srq->rq);
+		ocrdma_build_rqe(rqe, wr, tag);
+
+		srq->rqe_wr_id_tbl[tag] = wr->wr_id;
+		/* make sure rqe is written before adapter can perform DMA */
+		wmb();
+		/* inform hw to start processing it */
+		ocrdma_ring_srq_db(srq);
+		/* update pointer, counter for next wr */
+		ocrdma_hwq_inc_head(&srq->rq);
+		wr = wr->next;
+	}
+	spin_unlock_irqrestore(&srq->q_lock, flags);
+	return status;
+}
+
+static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
+{
+	enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR;
+
+	switch (status) {
+	case OCRDMA_CQE_GENERAL_ERR:
+		ibwc_status = IB_WC_GENERAL_ERR;
+		break;
+	case OCRDMA_CQE_LOC_LEN_ERR:
+		ibwc_status = IB_WC_LOC_LEN_ERR;
+		break;
+	case OCRDMA_CQE_LOC_QP_OP_ERR:
+		ibwc_status = IB_WC_LOC_QP_OP_ERR;
+		break;
+	case OCRDMA_CQE_LOC_EEC_OP_ERR:
+		ibwc_status = IB_WC_LOC_EEC_OP_ERR;
+		break;
+	case OCRDMA_CQE_LOC_PROT_ERR:
+		ibwc_status = IB_WC_LOC_PROT_ERR;
+		break;
+	case OCRDMA_CQE_WR_FLUSH_ERR:
+		ibwc_status = IB_WC_WR_FLUSH_ERR;
+		break;
+	case OCRDMA_CQE_MW_BIND_ERR:
+		ibwc_status = IB_WC_MW_BIND_ERR;
+		break;
+	case OCRDMA_CQE_BAD_RESP_ERR:
+		ibwc_status = IB_WC_BAD_RESP_ERR;
+		break;
+	case OCRDMA_CQE_LOC_ACCESS_ERR:
+		ibwc_status = IB_WC_LOC_ACCESS_ERR;
+		break;
+	case OCRDMA_CQE_REM_INV_REQ_ERR:
+		ibwc_status = IB_WC_REM_INV_REQ_ERR;
+		break;
+	case OCRDMA_CQE_REM_ACCESS_ERR:
+		ibwc_status = IB_WC_REM_ACCESS_ERR;
+		break;
+	case OCRDMA_CQE_REM_OP_ERR:
+		ibwc_status = IB_WC_REM_OP_ERR;
+		break;
+	case OCRDMA_CQE_RETRY_EXC_ERR:
+		ibwc_status = IB_WC_RETRY_EXC_ERR;
+		break;
+	case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
+		ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
+		break;
+	case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
+		ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
+		break;
+	case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
+		ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
+		break;
+	case OCRDMA_CQE_REM_ABORT_ERR:
+		ibwc_status = IB_WC_REM_ABORT_ERR;
+		break;
+	case OCRDMA_CQE_INV_EECN_ERR:
+		ibwc_status = IB_WC_INV_EECN_ERR;
+		break;
+	case OCRDMA_CQE_INV_EEC_STATE_ERR:
+		ibwc_status = IB_WC_INV_EEC_STATE_ERR;
+		break;
+	case OCRDMA_CQE_FATAL_ERR:
+		ibwc_status = IB_WC_FATAL_ERR;
+		break;
+	case OCRDMA_CQE_RESP_TIMEOUT_ERR:
+		ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
+		break;
+	default:
+		ibwc_status = IB_WC_GENERAL_ERR;
+		break;
+	};
+	return ibwc_status;
+}
+
+static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
+		      u32 wqe_idx)
+{
+	struct ocrdma_hdr_wqe *hdr;
+	struct ocrdma_sge *rw;
+	int opcode;
+
+	hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
+
+	ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
+	/* Undo the hdr->cw swap */
+	opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
+	switch (opcode) {
+	case OCRDMA_WRITE:
+		ibwc->opcode = IB_WC_RDMA_WRITE;
+		break;
+	case OCRDMA_READ:
+		rw = (struct ocrdma_sge *)(hdr + 1);
+		ibwc->opcode = IB_WC_RDMA_READ;
+		ibwc->byte_len = rw->len;
+		break;
+	case OCRDMA_SEND:
+		ibwc->opcode = IB_WC_SEND;
+		break;
+	case OCRDMA_LKEY_INV:
+		ibwc->opcode = IB_WC_LOCAL_INV;
+		break;
+	default:
+		ibwc->status = IB_WC_GENERAL_ERR;
+		ocrdma_err("%s() invalid opcode received = 0x%x\n",
+			   __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
+		break;
+	};
+}
+
+static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
+						struct ocrdma_cqe *cqe)
+{
+	if (is_cqe_for_sq(cqe)) {
+		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+				cqe->flags_status_srcqpn) &
+					~OCRDMA_CQE_STATUS_MASK);
+		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+				cqe->flags_status_srcqpn) |
+				(OCRDMA_CQE_WR_FLUSH_ERR <<
+					OCRDMA_CQE_STATUS_SHIFT));
+	} else {
+		if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
+			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+					cqe->flags_status_srcqpn) &
+						~OCRDMA_CQE_UD_STATUS_MASK);
+			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+					cqe->flags_status_srcqpn) |
+					(OCRDMA_CQE_WR_FLUSH_ERR <<
+						OCRDMA_CQE_UD_STATUS_SHIFT));
+		} else {
+			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+					cqe->flags_status_srcqpn) &
+						~OCRDMA_CQE_STATUS_MASK);
+			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+					cqe->flags_status_srcqpn) |
+					(OCRDMA_CQE_WR_FLUSH_ERR <<
+						OCRDMA_CQE_STATUS_SHIFT));
+		}
+	}
+}
+
+static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
+				  struct ocrdma_qp *qp, int status)
+{
+	bool expand = false;
+
+	ibwc->byte_len = 0;
+	ibwc->qp = &qp->ibqp;
+	ibwc->status = ocrdma_to_ibwc_err(status);
+
+	ocrdma_flush_qp(qp);
+	ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL);
+
+	/* if wqe/rqe pending for which cqe needs to be returned,
+	 * trigger inflating it.
+	 */
+	if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
+		expand = true;
+		ocrdma_set_cqe_status_flushed(qp, cqe);
+	}
+	return expand;
+}
+
+static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
+				  struct ocrdma_qp *qp, int status)
+{
+	ibwc->opcode = IB_WC_RECV;
+	ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
+	ocrdma_hwq_inc_tail(&qp->rq);
+
+	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
+}
+
+static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
+				  struct ocrdma_qp *qp, int status)
+{
+	ocrdma_update_wc(qp, ibwc, qp->sq.tail);
+	ocrdma_hwq_inc_tail(&qp->sq);
+
+	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
+}
+
+
+static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
+				 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
+				 bool *polled, bool *stop)
+{
+	bool expand;
+	int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+
+	/* when hw sq is empty, but rq is not empty, so we continue
+	 * to keep the cqe in order to get the cq event again.
+	 */
+	if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
+		/* when cq for rq and sq is same, it is safe to return
+		 * flush cqe for RQEs.
+		 */
+		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
+			*polled = true;
+			status = OCRDMA_CQE_WR_FLUSH_ERR;
+			expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+		} else {
+			/* stop processing further cqe as this cqe is used for
+			 * triggering cq event on buddy cq of RQ.
+			 * When QP is destroyed, this cqe will be removed
+			 * from the cq's hardware q.
+			 */
+			*polled = false;
+			*stop = true;
+			expand = false;
+		}
+	} else {
+		*polled = true;
+		expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
+	}
+	return expand;
+}
+
+static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
+				     struct ocrdma_cqe *cqe,
+				     struct ib_wc *ibwc, bool *polled)
+{
+	bool expand = false;
+	int tail = qp->sq.tail;
+	u32 wqe_idx;
+
+	if (!qp->wqe_wr_id_tbl[tail].signaled) {
+		expand = true;	/* CQE cannot be consumed yet */
+		*polled = false;    /* WC cannot be consumed yet */
+	} else {
+		ibwc->status = IB_WC_SUCCESS;
+		ibwc->wc_flags = 0;
+		ibwc->qp = &qp->ibqp;
+		ocrdma_update_wc(qp, ibwc, tail);
+		*polled = true;
+		wqe_idx = le32_to_cpu(cqe->wq.wqeidx) &	OCRDMA_CQE_WQEIDX_MASK;
+		if (tail != wqe_idx)
+			expand = true; /* Coalesced CQE can't be consumed yet */
+	}
+	ocrdma_hwq_inc_tail(&qp->sq);
+	return expand;
+}
+
+static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
+			     struct ib_wc *ibwc, bool *polled, bool *stop)
+{
+	int status;
+	bool expand;
+
+	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+		OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+
+	if (status == OCRDMA_CQE_SUCCESS)
+		expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
+	else
+		expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
+	return expand;
+}
+
+static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
+{
+	int status;
+
+	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+		OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
+	ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
+						OCRDMA_CQE_SRCQP_MASK;
+	ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
+						OCRDMA_CQE_PKEY_MASK;
+	ibwc->wc_flags = IB_WC_GRH;
+	ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
+					OCRDMA_CQE_UD_XFER_LEN_SHIFT);
+	return status;
+}
+
+static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
+				       struct ocrdma_cqe *cqe,
+				       struct ocrdma_qp *qp)
+{
+	unsigned long flags;
+	struct ocrdma_srq *srq;
+	u32 wqe_idx;
+
+	srq = get_ocrdma_srq(qp->ibqp.srq);
+	wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT;
+	ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
+	spin_lock_irqsave(&srq->q_lock, flags);
+	ocrdma_srq_toggle_bit(srq, wqe_idx);
+	spin_unlock_irqrestore(&srq->q_lock, flags);
+	ocrdma_hwq_inc_tail(&srq->rq);
+}
+
+static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
+				struct ib_wc *ibwc, bool *polled, bool *stop,
+				int status)
+{
+	bool expand;
+
+	/* when hw_rq is empty, but wq is not empty, so continue
+	 * to keep the cqe to get the cq event again.
+	 */
+	if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
+		if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
+			*polled = true;
+			status = OCRDMA_CQE_WR_FLUSH_ERR;
+			expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
+		} else {
+			*polled = false;
+			*stop = true;
+			expand = false;
+		}
+	} else
+		expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+	return expand;
+}
+
+static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
+				     struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
+{
+	ibwc->opcode = IB_WC_RECV;
+	ibwc->qp = &qp->ibqp;
+	ibwc->status = IB_WC_SUCCESS;
+
+	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
+		ocrdma_update_ud_rcqe(ibwc, cqe);
+	else
+		ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
+
+	if (is_cqe_imm(cqe)) {
+		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
+		ibwc->wc_flags |= IB_WC_WITH_IMM;
+	} else if (is_cqe_wr_imm(cqe)) {
+		ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
+		ibwc->wc_flags |= IB_WC_WITH_IMM;
+	} else if (is_cqe_invalidated(cqe)) {
+		ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
+		ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
+	}
+	if (qp->ibqp.srq)
+		ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
+	else {
+		ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
+		ocrdma_hwq_inc_tail(&qp->rq);
+	}
+}
+
+static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
+			     struct ib_wc *ibwc, bool *polled, bool *stop)
+{
+	int status;
+	bool expand = false;
+
+	ibwc->wc_flags = 0;
+	if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
+		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+					OCRDMA_CQE_UD_STATUS_MASK) >>
+					OCRDMA_CQE_UD_STATUS_SHIFT;
+	else
+		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+			     OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+
+	if (status == OCRDMA_CQE_SUCCESS) {
+		*polled = true;
+		ocrdma_poll_success_rcqe(qp, cqe, ibwc);
+	} else {
+		expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
+					      status);
+	}
+	return expand;
+}
+
+static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
+				   u16 cur_getp)
+{
+	if (cq->phase_change) {
+		if (cur_getp == 0)
+			cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
+	} else
+		/* clear valid bit */
+		cqe->flags_status_srcqpn = 0;
+}
+
+static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
+			    struct ib_wc *ibwc)
+{
+	u16 qpn = 0;
+	int i = 0;
+	bool expand = false;
+	int polled_hw_cqes = 0;
+	struct ocrdma_qp *qp = NULL;
+	struct ocrdma_dev *dev = cq->dev;
+	struct ocrdma_cqe *cqe;
+	u16 cur_getp; bool polled = false; bool stop = false;
+
+	cur_getp = cq->getp;
+	while (num_entries) {
+		cqe = cq->va + cur_getp;
+		/* check whether valid cqe or not */
+		if (!is_cqe_valid(cq, cqe))
+			break;
+		qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
+		/* ignore discarded cqe */
+		if (qpn == 0)
+			goto skip_cqe;
+		qp = dev->qp_tbl[qpn];
+		BUG_ON(qp == NULL);
+
+		if (is_cqe_for_sq(cqe)) {
+			expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
+						  &stop);
+		} else {
+			expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
+						  &stop);
+		}
+		if (expand)
+			goto expand_cqe;
+		if (stop)
+			goto stop_cqe;
+		/* clear qpn to avoid duplicate processing by discard_cqe() */
+		cqe->cmn.qpn = 0;
+skip_cqe:
+		polled_hw_cqes += 1;
+		cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
+		ocrdma_change_cq_phase(cq, cqe, cur_getp);
+expand_cqe:
+		if (polled) {
+			num_entries -= 1;
+			i += 1;
+			ibwc = ibwc + 1;
+			polled = false;
+		}
+	}
+stop_cqe:
+	cq->getp = cur_getp;
+	if (polled_hw_cqes || expand || stop) {
+		ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited,
+				  polled_hw_cqes);
+	}
+	return i;
+}
+
+/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
+static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
+			      struct ocrdma_qp *qp, struct ib_wc *ibwc)
+{
+	int err_cqes = 0;
+
+	while (num_entries) {
+		if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
+			break;
+		if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
+			ocrdma_update_wc(qp, ibwc, qp->sq.tail);
+			ocrdma_hwq_inc_tail(&qp->sq);
+		} else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
+			ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
+			ocrdma_hwq_inc_tail(&qp->rq);
+		} else
+			return err_cqes;
+		ibwc->byte_len = 0;
+		ibwc->status = IB_WC_WR_FLUSH_ERR;
+		ibwc = ibwc + 1;
+		err_cqes += 1;
+		num_entries -= 1;
+	}
+	return err_cqes;
+}
+
+int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+	int cqes_to_poll = num_entries;
+	struct ocrdma_cq *cq = NULL;
+	unsigned long flags;
+	struct ocrdma_dev *dev;
+	int num_os_cqe = 0, err_cqes = 0;
+	struct ocrdma_qp *qp;
+
+	cq = get_ocrdma_cq(ibcq);
+	dev = cq->dev;
+
+	/* poll cqes from adapter CQ */
+	spin_lock_irqsave(&cq->cq_lock, flags);
+	num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
+	spin_unlock_irqrestore(&cq->cq_lock, flags);
+	cqes_to_poll -= num_os_cqe;
+
+	if (cqes_to_poll) {
+		wc = wc + num_os_cqe;
+		/* adapter returns single error cqe when qp moves to
+		 * error state. So insert error cqes with wc_status as
+		 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
+		 * respectively which uses this CQ.
+		 */
+		spin_lock_irqsave(&dev->flush_q_lock, flags);
+		list_for_each_entry(qp, &cq->sq_head, sq_entry) {
+			if (cqes_to_poll == 0)
+				break;
+			err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
+			cqes_to_poll -= err_cqes;
+			num_os_cqe += err_cqes;
+			wc = wc + err_cqes;
+		}
+		spin_unlock_irqrestore(&dev->flush_q_lock, flags);
+	}
+	return num_os_cqe;
+}
+
+int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
+{
+	struct ocrdma_cq *cq;
+	unsigned long flags;
+	struct ocrdma_dev *dev;
+	u16 cq_id;
+	u16 cur_getp;
+	struct ocrdma_cqe *cqe;
+
+	cq = get_ocrdma_cq(ibcq);
+	cq_id = cq->id;
+	dev = cq->dev;
+
+	spin_lock_irqsave(&cq->cq_lock, flags);
+	if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
+		cq->armed = true;
+	if (cq_flags & IB_CQ_SOLICITED)
+		cq->solicited = true;
+
+	cur_getp = cq->getp;
+	cqe = cq->va + cur_getp;
+
+	/* check whether any valid cqe exist or not, if not then safe to
+	 * arm. If cqe is not yet consumed, then let it get consumed and then
+	 * we arm it to avoid false interrupts.
+	 */
+	if (!is_cqe_valid(cq, cqe) || cq->arm_needed) {
+		cq->arm_needed = false;
+		ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0);
+	}
+	spin_unlock_irqrestore(&cq->cq_lock, flags);
+	return 0;
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
new file mode 100644
index 0000000..e648343
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -0,0 +1,94 @@
+/*******************************************************************
+ * This file is part of the Emulex RoCE Device Driver for          *
+ * RoCE (RDMA over Converged Ethernet) adapters.                   *
+ * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ *******************************************************************/
+
+#ifndef __OCRDMA_VERBS_H__
+#define __OCRDMA_VERBS_H__
+
+#include <linux/version.h>
+int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
+		     struct ib_send_wr **bad_wr);
+int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
+		     struct ib_recv_wr **bad_wr);
+
+int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
+int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
+
+int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props);
+int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
+		       struct ib_port_modify *props);
+
+void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
+int ocrdma_query_gid(struct ib_device *, u8 port,
+		     int index, union ib_gid *gid);
+int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+
+struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
+					  struct ib_udata *);
+int ocrdma_dealloc_ucontext(struct ib_ucontext *);
+
+int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+
+struct ib_pd *ocrdma_alloc_pd(struct ib_device *,
+			      struct ib_ucontext *, struct ib_udata *);
+int ocrdma_dealloc_pd(struct ib_pd *pd);
+
+struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector,
+			       struct ib_ucontext *, struct ib_udata *);
+int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+int ocrdma_destroy_cq(struct ib_cq *);
+
+struct ib_qp *ocrdma_create_qp(struct ib_pd *,
+			       struct ib_qp_init_attr *attrs,
+			       struct ib_udata *);
+int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+		      int attr_mask);
+int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+		     int attr_mask, struct ib_udata *udata);
+int ocrdma_query_qp(struct ib_qp *,
+		    struct ib_qp_attr *qp_attr,
+		    int qp_attr_mask, struct ib_qp_init_attr *);
+int ocrdma_destroy_qp(struct ib_qp *);
+
+struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
+				 struct ib_udata *);
+int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
+		      enum ib_srq_attr_mask, struct ib_udata *);
+int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
+int ocrdma_destroy_srq(struct ib_srq *);
+int ocrdma_post_srq_recv(struct ib_srq *, struct ib_recv_wr *,
+			 struct ib_recv_wr **bad_recv_wr);
+
+int ocrdma_dereg_mr(struct ib_mr *);
+struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
+struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
+				   struct ib_phys_buf *buffer_list,
+				   int num_phys_buf, int acc, u64 *iova_start);
+struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
+				 u64 virt, int acc, struct ib_udata *);
+
+#endif				/* __OCRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 6b811e3..7e62f41 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -530,8 +530,6 @@
 	/* qib_lflags driver is waiting for */
 	u32 state_wanted;
 	spinlock_t lflags_lock;
-	/* number of (port-specific) interrupts for this port -- saturates... */
-	u32 int_counter;
 
 	/* ref count for each pkey */
 	atomic_t pkeyrefs[4];
@@ -543,24 +541,26 @@
 	u64 *statusp;
 
 	/* SendDMA related entries */
-	spinlock_t            sdma_lock;
-	struct qib_sdma_state sdma_state;
-	unsigned long         sdma_buf_jiffies;
-	struct qib_sdma_desc *sdma_descq;
-	u64                   sdma_descq_added;
-	u64                   sdma_descq_removed;
-	u16                   sdma_descq_cnt;
-	u16                   sdma_descq_tail;
-	u16                   sdma_descq_head;
-	u16                   sdma_next_intr;
-	u16                   sdma_reset_wait;
-	u8                    sdma_generation;
-	struct tasklet_struct sdma_sw_clean_up_task;
-	struct list_head      sdma_activelist;
 
+	/* read mostly */
+	struct qib_sdma_desc *sdma_descq;
+	struct qib_sdma_state sdma_state;
 	dma_addr_t       sdma_descq_phys;
 	volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
 	dma_addr_t       sdma_head_phys;
+	u16                   sdma_descq_cnt;
+
+	/* read/write using lock */
+	spinlock_t            sdma_lock ____cacheline_aligned_in_smp;
+	struct list_head      sdma_activelist;
+	u64                   sdma_descq_added;
+	u64                   sdma_descq_removed;
+	u16                   sdma_descq_tail;
+	u16                   sdma_descq_head;
+	u8                    sdma_generation;
+
+	struct tasklet_struct sdma_sw_clean_up_task
+		____cacheline_aligned_in_smp;
 
 	wait_queue_head_t state_wait; /* for state_wanted */
 
@@ -873,7 +873,14 @@
 	 * pio_writing.
 	 */
 	spinlock_t pioavail_lock;
-
+	/*
+	 * index of last buffer to optimize search for next
+	 */
+	u32 last_pio;
+	/*
+	 * min kernel pio buffer to optimize search
+	 */
+	u32 min_kernel_pio;
 	/*
 	 * Shadow copies of registers; size indicates read access size.
 	 * Most of them are readonly, but some are write-only register,
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 6fc9365..8895cfe 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -38,6 +38,7 @@
 #include <linux/netdevice.h>
 #include <linux/vmalloc.h>
 #include <linux/module.h>
+#include <linux/prefetch.h>
 
 #include "qib.h"
 
@@ -481,8 +482,10 @@
 			etail = qib_hdrget_index(rhf_addr);
 			updegr = 1;
 			if (tlen > sizeof(*hdr) ||
-			    etype >= RCVHQ_RCV_TYPE_NON_KD)
+			    etype >= RCVHQ_RCV_TYPE_NON_KD) {
 				ebuf = qib_get_egrbuf(rcd, etail);
+				prefetch_range(ebuf, tlen - sizeof(*hdr));
+			}
 		}
 		if (!eflags) {
 			u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index d0c64d5..4d352b9 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3132,6 +3132,7 @@
 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
 	dd->piobcnt2k = val & ~0U;
 	dd->piobcnt4k = val >> 32;
+	dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1;
 	/* these may be adjusted in init_chip_wc_pat() */
 	dd->pio2kbase = (u32 __iomem *)
 		(((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 3c722f7..86a0ba7 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -4157,6 +4157,7 @@
 		dd->cspec->sdmabufcnt;
 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+	dd->last_pio = dd->cspec->lastbuf_for_pio;
 	dd->pbufsctxt = dd->lastctxt_piobuf /
 		(dd->cfgctxts - dd->first_user_ctxt);
 
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 060b960..c881e74 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6379,6 +6379,7 @@
 		dd->cspec->sdmabufcnt;
 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
+	dd->last_pio = dd->cspec->lastbuf_for_pio;
 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
 
@@ -7708,7 +7709,7 @@
 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
 	msleep(20);
 	/*       Set Frequency Loop Bandwidth */
-	ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
+	ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
 	/*       Enable Frequency Loop */
 	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
 	/*       Set Timing Loop Bandwidth */
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index cf0cd30..dc14e10 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -102,6 +102,8 @@
 		dd->cfgctxts = qib_cfgctxts;
 	else
 		dd->cfgctxts = dd->ctxtcnt;
+	dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
+		dd->cfgctxts - dd->first_user_ctxt;
 }
 
 /*
@@ -402,7 +404,6 @@
 		if (rcd)
 			dd->f_rcvctrl(rcd->ppd, rcvmask, i);
 	}
-	dd->freectxts = dd->cfgctxts - dd->first_user_ctxt;
 }
 
 static void verify_interrupt(unsigned long opaque)
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index c4ff788..4339021 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -396,6 +396,7 @@
 
 static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
 {
+	int valid_mkey = 0;
 	int ret = 0;
 
 	/* Is the mkey in the process of expiring? */
@@ -406,23 +407,36 @@
 		ibp->mkeyprot = 0;
 	}
 
-	/* M_Key checking depends on Portinfo:M_Key_protect_bits */
-	if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
-	    ibp->mkey != smp->mkey &&
-	    (smp->method == IB_MGMT_METHOD_SET ||
-	     smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
-	     (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
-		if (ibp->mkey_violations != 0xFFFF)
-			++ibp->mkey_violations;
-		if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
-			ibp->mkey_lease_timeout = jiffies +
-				ibp->mkey_lease_period * HZ;
-		/* Generate a trap notice. */
-		qib_bad_mkey(ibp, smp);
-		ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-	} else if (ibp->mkey_lease_timeout)
+	if ((mad_flags & IB_MAD_IGNORE_MKEY) ||  ibp->mkey == 0 ||
+	    ibp->mkey == smp->mkey)
+		valid_mkey = 1;
+
+	/* Unset lease timeout on any valid Get/Set/TrapRepress */
+	if (valid_mkey && ibp->mkey_lease_timeout &&
+	    (smp->method == IB_MGMT_METHOD_GET ||
+	     smp->method == IB_MGMT_METHOD_SET ||
+	     smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
 		ibp->mkey_lease_timeout = 0;
 
+	if (!valid_mkey) {
+		switch (smp->method) {
+		case IB_MGMT_METHOD_GET:
+			/* Bad mkey not a violation below level 2 */
+			if (ibp->mkeyprot < 2)
+				break;
+		case IB_MGMT_METHOD_SET:
+		case IB_MGMT_METHOD_TRAP_REPRESS:
+			if (ibp->mkey_violations != 0xFFFF)
+				++ibp->mkey_violations;
+			if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
+				ibp->mkey_lease_timeout = jiffies +
+					ibp->mkey_lease_period * HZ;
+			/* Generate a trap notice. */
+			qib_bad_mkey(ibp, smp);
+			ret = 1;
+		}
+	}
+
 	return ret;
 }
 
@@ -450,6 +464,7 @@
 			ibp = to_iport(ibdev, port_num);
 			ret = check_mkey(ibp, smp, 0);
 			if (ret)
+				ret = IB_MAD_RESULT_FAILURE;
 				goto bail;
 		}
 	}
@@ -631,7 +646,7 @@
 	struct qib_devdata *dd;
 	struct qib_pportdata *ppd;
 	struct qib_ibport *ibp;
-	char clientrereg = 0;
+	u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
 	unsigned long flags;
 	u16 lid, smlid;
 	u8 lwe;
@@ -781,12 +796,6 @@
 
 	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
 
-	if (pip->clientrereg_resv_subnetto & 0x80) {
-		clientrereg = 1;
-		event.event = IB_EVENT_CLIENT_REREGISTER;
-		ib_dispatch_event(&event);
-	}
-
 	/*
 	 * Do the port state change now that the other link parameters
 	 * have been set.
@@ -844,10 +853,15 @@
 		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 
+	if (clientrereg) {
+		event.event = IB_EVENT_CLIENT_REREGISTER;
+		ib_dispatch_event(&event);
+	}
+
 	ret = subn_get_portinfo(smp, ibdev, port);
 
-	if (clientrereg)
-		pip->clientrereg_resv_subnetto |= 0x80;
+	/* restore re-reg bit per o14-12.2.1 */
+	pip->clientrereg_resv_subnetto |= clientrereg;
 
 	goto get_only;
 
@@ -1835,6 +1849,7 @@
 		    port_num && port_num <= ibdev->phys_port_cnt &&
 		    port != port_num)
 			(void) check_mkey(to_iport(ibdev, port_num), smp, 0);
+		ret = IB_MAD_RESULT_FAILURE;
 		goto bail;
 	}
 
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 7e7e16f..1ce56b5 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -1038,6 +1038,11 @@
 			goto bail_swq;
 		}
 		RCU_INIT_POINTER(qp->next, NULL);
+		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
+		if (!qp->s_hdr) {
+			ret = ERR_PTR(-ENOMEM);
+			goto bail_qp;
+		}
 		qp->timeout_jiffies =
 			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
 				1000UL);
@@ -1159,6 +1164,7 @@
 		vfree(qp->r_rq.wq);
 	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
 bail_qp:
+	kfree(qp->s_hdr);
 	kfree(qp);
 bail_swq:
 	vfree(swq);
@@ -1214,6 +1220,7 @@
 	else
 		vfree(qp->r_rq.wq);
 	vfree(qp->s_wq);
+	kfree(qp->s_hdr);
 	kfree(qp);
 	return 0;
 }
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 765b4cb..b641416 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -244,9 +244,9 @@
 	int ret = 0;
 	int delta;
 
-	ohdr = &qp->s_hdr.u.oth;
+	ohdr = &qp->s_hdr->u.oth;
 	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-		ohdr = &qp->s_hdr.u.l.oth;
+		ohdr = &qp->s_hdr->u.l.oth;
 
 	/*
 	 * The lock is needed to synchronize between the sending tasklet,
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index b4b37e4..c0ee7e0 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -688,17 +688,17 @@
 	nwords = (qp->s_cur_size + extra_bytes) >> 2;
 	lrh0 = QIB_LRH_BTH;
 	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
 					       &qp->remote_ah_attr.grh,
 					       qp->s_hdrwords, nwords);
 		lrh0 = QIB_LRH_GRH;
 	}
 	lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
 		qp->remote_ah_attr.sl << 4;
-	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-	qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
-	qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+	qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
+	qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+	qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+	qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
 				       qp->remote_ah_attr.src_path_bits);
 	bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
 	bth0 |= extra_bytes << 20;
@@ -758,7 +758,7 @@
 			 * If the packet cannot be sent now, return and
 			 * the send tasklet will be woken up later.
 			 */
-			if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
+			if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
 					   qp->s_cur_sge, qp->s_cur_size))
 				break;
 			/* Record that s_hdr is empty. */
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index dae5160..dd9cd49 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -503,8 +503,11 @@
 	struct qib_devdata *dd = dd_from_dev(dev);
 
 	/* Return the number of user ports (contexts) available. */
-	return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
-		dd->first_user_ctxt);
+	/* The calculation below deals with a special case where
+	 * cfgctxts is set to 1 on a single-port board. */
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			(dd->first_user_ctxt > dd->cfgctxts) ? 0 :
+			(dd->cfgctxts - dd->first_user_ctxt));
 }
 
 static ssize_t show_nfreectxts(struct device *device,
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index 1bf626c..31d3561 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -295,6 +295,7 @@
 
 	nbufs = last - first + 1; /* number in range to check */
 	if (dd->upd_pio_shadow) {
+update_shadow:
 		/*
 		 * Minor optimization.  If we had no buffers on last call,
 		 * start out by doing the update; continue and do scan even
@@ -304,37 +305,39 @@
 		updated++;
 	}
 	i = first;
-rescan:
 	/*
 	 * While test_and_set_bit() is atomic, we do that and then the
 	 * change_bit(), and the pair is not.  See if this is the cause
 	 * of the remaining armlaunch errors.
 	 */
 	spin_lock_irqsave(&dd->pioavail_lock, flags);
+	if (dd->last_pio >= first && dd->last_pio <= last)
+		i = dd->last_pio + 1;
+	if (!first)
+		/* adjust to min possible  */
+		nbufs = last - dd->min_kernel_pio + 1;
 	for (j = 0; j < nbufs; j++, i++) {
 		if (i > last)
-			i = first;
+			i = !first ? dd->min_kernel_pio : first;
 		if (__test_and_set_bit((2 * i) + 1, shadow))
 			continue;
 		/* flip generation bit */
 		__change_bit(2 * i, shadow);
 		/* remember that the buffer can be written to now */
 		__set_bit(i, dd->pio_writing);
+		if (!first && first != last) /* first == last on VL15, avoid */
+			dd->last_pio = i;
 		break;
 	}
 	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
 
 	if (j == nbufs) {
-		if (!updated) {
+		if (!updated)
 			/*
 			 * First time through; shadow exhausted, but may be
 			 * buffers available, try an update and then rescan.
 			 */
-			update_send_bufs(dd);
-			updated++;
-			i = first;
-			goto rescan;
-		}
+			goto update_shadow;
 		no_send_bufs(dd);
 		buf = NULL;
 	} else {
@@ -422,14 +425,20 @@
 				__clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
 					    + start, dd->pioavailshadow);
 			__set_bit(start, dd->pioavailkernel);
+			if ((start >> 1) < dd->min_kernel_pio)
+				dd->min_kernel_pio = start >> 1;
 		} else {
 			__set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
 				  dd->pioavailshadow);
 			__clear_bit(start, dd->pioavailkernel);
+			if ((start >> 1) > dd->min_kernel_pio)
+				dd->min_kernel_pio = start >> 1;
 		}
 		start += 2;
 	}
 
+	if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
+		dd->last_pio = dd->min_kernel_pio - 1;
 	spin_unlock_irqrestore(&dd->pioavail_lock, flags);
 
 	dd->f_txchk_change(dd, ostart, len, avail, rcd);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 7ce2ac2..ce7387f 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -72,9 +72,9 @@
 		goto done;
 	}
 
-	ohdr = &qp->s_hdr.u.oth;
+	ohdr = &qp->s_hdr->u.oth;
 	if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-		ohdr = &qp->s_hdr.u.l.oth;
+		ohdr = &qp->s_hdr->u.l.oth;
 
 	/* header size in 32-bit words LRH+BTH = (8+12)/4. */
 	hwords = 5;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 828609f..a468bf2 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -321,11 +321,11 @@
 
 	if (ah_attr->ah_flags & IB_AH_GRH) {
 		/* Header size in 32-bit words. */
-		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
+		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
 					       &ah_attr->grh,
 					       qp->s_hdrwords, nwords);
 		lrh0 = QIB_LRH_GRH;
-		ohdr = &qp->s_hdr.u.l.oth;
+		ohdr = &qp->s_hdr->u.l.oth;
 		/*
 		 * Don't worry about sending to locally attached multicast
 		 * QPs.  It is unspecified by the spec. what happens.
@@ -333,7 +333,7 @@
 	} else {
 		/* Header size in 32-bit words. */
 		lrh0 = QIB_LRH_BTH;
-		ohdr = &qp->s_hdr.u.oth;
+		ohdr = &qp->s_hdr->u.oth;
 	}
 	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
 		qp->s_hdrwords++;
@@ -346,15 +346,15 @@
 		lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
 	else
 		lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
-	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-	qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
-	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+	qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
+	qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
+	qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
 	lid = ppd->lid;
 	if (lid) {
 		lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
-		qp->s_hdr.lrh[3] = cpu_to_be16(lid);
+		qp->s_hdr->lrh[3] = cpu_to_be16(lid);
 	} else
-		qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
+		qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
 	if (wqe->wr.send_flags & IB_SEND_SOLICITED)
 		bth0 |= IB_BTH_SOLICITED;
 	bth0 |= extra_bytes << 20;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 0c19ef0..4876060 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -367,9 +367,10 @@
 
 struct qib_rq {
 	struct qib_rwq *wq;
-	spinlock_t lock; /* protect changes in this struct */
 	u32 size;               /* size of RWQE array */
 	u8 max_sge;
+	spinlock_t lock /* protect changes in this struct */
+		____cacheline_aligned_in_smp;
 };
 
 struct qib_srq {
@@ -412,31 +413,75 @@
  */
 struct qib_qp {
 	struct ib_qp ibqp;
-	struct qib_qp *next;            /* link list for QPN hash table */
-	struct qib_qp *timer_next;      /* link list for qib_ib_timer() */
-	struct list_head iowait;        /* link for wait PIO buf */
-	struct list_head rspwait;       /* link for waititing to respond */
+	/* read mostly fields above and below */
 	struct ib_ah_attr remote_ah_attr;
 	struct ib_ah_attr alt_ah_attr;
-	struct qib_ib_header s_hdr;     /* next packet header to send */
-	atomic_t refcount;
-	wait_queue_head_t wait;
-	wait_queue_head_t wait_dma;
-	struct timer_list s_timer;
-	struct work_struct s_work;
+	struct qib_qp *next;            /* link list for QPN hash table */
+	struct qib_swqe *s_wq;  /* send work queue */
 	struct qib_mmap_info *ip;
-	struct qib_sge_state *s_cur_sge;
-	struct qib_verbs_txreq *s_tx;
-	struct qib_mregion *s_rdma_mr;
-	struct qib_sge_state s_sge;     /* current send request data */
-	struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];
-	struct qib_sge_state s_ack_rdma_sge;
+	struct qib_ib_header *s_hdr;     /* next packet header to send */
+	unsigned long timeout_jiffies;  /* computed from timeout */
+
+	enum ib_mtu path_mtu;
+	u32 remote_qpn;
+	u32 pmtu;		/* decoded from path_mtu */
+	u32 qkey;               /* QKEY for this QP (for UD or RD) */
+	u32 s_size;             /* send work queue size */
+	u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
+
+	u8 state;               /* QP state */
+	u8 qp_access_flags;
+	u8 alt_timeout;         /* Alternate path timeout for this QP */
+	u8 timeout;             /* Timeout for this QP */
+	u8 s_srate;
+	u8 s_mig_state;
+	u8 port_num;
+	u8 s_pkey_index;        /* PKEY index to use */
+	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
+	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
+	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
+	u8 s_retry_cnt;         /* number of times to retry */
+	u8 s_rnr_retry_cnt;
+	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
+	u8 s_max_sge;           /* size of s_wq->sg_list */
+	u8 s_draining;
+
+	/* start of read/write fields */
+
+	atomic_t refcount ____cacheline_aligned_in_smp;
+	wait_queue_head_t wait;
+
+
+	struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
+		____cacheline_aligned_in_smp;
 	struct qib_sge_state s_rdma_read_sge;
+
+	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
+	unsigned long r_aflags;
+	u64 r_wr_id;            /* ID for current receive WQE */
+	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
+	u32 r_len;              /* total length of r_sge */
+	u32 r_rcv_len;          /* receive data len processed */
+	u32 r_psn;              /* expected rcv packet sequence number */
+	u32 r_msn;              /* message sequence number */
+
+	u8 r_state;             /* opcode of last packet received */
+	u8 r_flags;
+	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
+
+	struct list_head rspwait;       /* link for waititing to respond */
+
 	struct qib_sge_state r_sge;     /* current receive data */
-	spinlock_t r_lock;      /* used for APM */
-	spinlock_t s_lock;
-	atomic_t s_dma_busy;
+	struct qib_rq r_rq;             /* receive work queue */
+
+	spinlock_t s_lock ____cacheline_aligned_in_smp;
+	struct qib_sge_state *s_cur_sge;
 	u32 s_flags;
+	struct qib_verbs_txreq *s_tx;
+	struct qib_swqe *s_wqe;
+	struct qib_sge_state s_sge;     /* current send request data */
+	struct qib_mregion *s_rdma_mr;
+	atomic_t s_dma_busy;
 	u32 s_cur_size;         /* size of send packet in bytes */
 	u32 s_len;              /* total length of s_sge */
 	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
@@ -447,48 +492,6 @@
 	u32 s_psn;              /* current packet sequence number */
 	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
 	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
-	u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
-	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
-	u64 r_wr_id;            /* ID for current receive WQE */
-	unsigned long r_aflags;
-	u32 r_len;              /* total length of r_sge */
-	u32 r_rcv_len;          /* receive data len processed */
-	u32 r_psn;              /* expected rcv packet sequence number */
-	u32 r_msn;              /* message sequence number */
-	u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
-	u16 s_rdma_ack_cnt;
-	u8 state;               /* QP state */
-	u8 s_state;             /* opcode of last packet sent */
-	u8 s_ack_state;         /* opcode of packet to ACK */
-	u8 s_nak_state;         /* non-zero if NAK is pending */
-	u8 r_state;             /* opcode of last packet received */
-	u8 r_nak_state;         /* non-zero if NAK is pending */
-	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
-	u8 r_flags;
-	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
-	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
-	u8 qp_access_flags;
-	u8 s_max_sge;           /* size of s_wq->sg_list */
-	u8 s_retry_cnt;         /* number of times to retry */
-	u8 s_rnr_retry_cnt;
-	u8 s_retry;             /* requester retry counter */
-	u8 s_rnr_retry;         /* requester RNR retry counter */
-	u8 s_pkey_index;        /* PKEY index to use */
-	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
-	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
-	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
-	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
-	u8 s_srate;
-	u8 s_draining;
-	u8 s_mig_state;
-	u8 timeout;             /* Timeout for this QP */
-	u8 alt_timeout;         /* Alternate path timeout for this QP */
-	u8 port_num;
-	enum ib_mtu path_mtu;
-	u32 pmtu;		/* decoded from path_mtu */
-	u32 remote_qpn;
-	u32 qkey;               /* QKEY for this QP (for UD or RD) */
-	u32 s_size;             /* send work queue size */
 	u32 s_head;             /* new entries added here */
 	u32 s_tail;             /* next entry to process */
 	u32 s_cur;              /* current work queue entry */
@@ -496,11 +499,27 @@
 	u32 s_last;             /* last completed entry */
 	u32 s_ssn;              /* SSN of tail entry */
 	u32 s_lsn;              /* limit sequence number (credit) */
-	unsigned long timeout_jiffies;  /* computed from timeout */
-	struct qib_swqe *s_wq;  /* send work queue */
-	struct qib_swqe *s_wqe;
-	struct qib_rq r_rq;             /* receive work queue */
-	struct qib_sge r_sg_list[0];    /* verified SGEs */
+	u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
+	u16 s_rdma_ack_cnt;
+	u8 s_state;             /* opcode of last packet sent */
+	u8 s_ack_state;         /* opcode of packet to ACK */
+	u8 s_nak_state;         /* non-zero if NAK is pending */
+	u8 r_nak_state;         /* non-zero if NAK is pending */
+	u8 s_retry;             /* requester retry counter */
+	u8 s_rnr_retry;         /* requester RNR retry counter */
+	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
+	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
+
+	struct qib_sge_state s_ack_rdma_sge;
+	struct timer_list s_timer;
+	struct list_head iowait;        /* link for wait PIO buf */
+
+	struct work_struct s_work;
+
+	wait_queue_head_t wait_dma;
+
+	struct qib_sge r_sg_list[0] /* verified SGEs */
+		____cacheline_aligned_in_smp;
 };
 
 /*
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index db43b31..0ab8c9c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -573,10 +573,9 @@
 
 	err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
 			   non_blocking);
-	if (err) {
-		iscsi_destroy_endpoint(ep);
+	if (err)
 		return ERR_PTR(err);
-	}
+
 	return ep;
 }
 
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 14224ba..2dddabd 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -613,8 +613,9 @@
 	ib_conn->cma_id = NULL;
 addr_failure:
 	ib_conn->state = ISER_CONN_DOWN;
+	iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */
 connect_failure:
-	iser_conn_release(ib_conn, 1);
+	iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
 	return err;
 }
 
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index daf21b8..5f6b7f6 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1099,9 +1099,8 @@
 	dir = cmd->data_direction;
 	BUG_ON(dir == DMA_NONE);
 
-	transport_do_task_sg_chain(cmd);
-	ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
-	ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
+	ioctx->sg = sg = sg_orig = cmd->t_data_sg;
+	ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
 
 	count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
 			      opposite_dma_dir(dir));
@@ -1769,7 +1768,7 @@
 		kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
 		goto send_sense;
 	}
-	ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
+	ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
 	if (ret < 0) {
 		kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
 		if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
@@ -4004,9 +4003,6 @@
 
 	srpt_target->tf_ops = srpt_template;
 
-	/* Enable SG chaining */
-	srpt_target->tf_ops.task_sg_chaining = true;
-
 	/*
 	 * Set up default attribute lists.
 	 */
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 8081a0a..a4b14a4 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -274,7 +274,8 @@
 	static unsigned char param = 0xc8;
 	struct synaptics_data *priv = psmouse->private;
 
-	if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+	if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+	      SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
 		return 0;
 
 	if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 7ad7a3b..3e5e82a 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -4,7 +4,7 @@
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
-obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
+obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
 obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 35c1e17..3a74e44 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -36,6 +36,7 @@
 #include <linux/tboot.h>
 #include <linux/dmi.h>
 #include <linux/slab.h>
+#include <asm/irq_remapping.h>
 #include <asm/iommu_table.h>
 
 #define PREFIX "DMAR: "
@@ -555,7 +556,7 @@
 
 		dmar = (struct acpi_table_dmar *) dmar_tbl;
 
-		if (ret && intr_remapping_enabled && cpu_has_x2apic &&
+		if (ret && irq_remapping_enabled && cpu_has_x2apic &&
 		    dmar->flags & 0x1)
 			printk(KERN_INFO
 			       "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
@@ -1041,7 +1042,7 @@
 	"non-zero reserved fields in PTE",
 };
 
-static const char *intr_remap_fault_reasons[] =
+static const char *irq_remap_fault_reasons[] =
 {
 	"Detected reserved fields in the decoded interrupt-remapped request",
 	"Interrupt index exceeded the interrupt-remapping table size",
@@ -1056,10 +1057,10 @@
 
 const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
 {
-	if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
-				     ARRAY_SIZE(intr_remap_fault_reasons))) {
+	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
+					ARRAY_SIZE(irq_remap_fault_reasons))) {
 		*fault_type = INTR_REMAP;
-		return intr_remap_fault_reasons[fault_reason - 0x20];
+		return irq_remap_fault_reasons[fault_reason - 0x20];
 	} else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
 		*fault_type = DMA_REMAP;
 		return dma_remap_fault_reasons[fault_reason];
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f93d5ac..bf2fbaa 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -42,6 +42,7 @@
 #include <linux/dmi.h>
 #include <linux/pci-ats.h>
 #include <linux/memblock.h>
+#include <asm/irq_remapping.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 
@@ -4082,7 +4083,7 @@
 	if (cap == IOMMU_CAP_CACHE_COHERENCY)
 		return dmar_domain->iommu_snooping;
 	if (cap == IOMMU_CAP_INTR_REMAP)
-		return intr_remapping_enabled;
+		return irq_remapping_enabled;
 
 	return 0;
 }
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intel_irq_remapping.c
similarity index 66%
rename from drivers/iommu/intr_remapping.c
rename to drivers/iommu/intel_irq_remapping.c
index 6777ca0..6d34706 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -10,49 +10,33 @@
 #include <asm/smp.h>
 #include <asm/cpu.h>
 #include <linux/intel-iommu.h>
-#include "intr_remapping.h"
 #include <acpi/acpi.h>
+#include <asm/irq_remapping.h>
 #include <asm/pci-direct.h>
+#include <asm/msidef.h>
+
+#include "irq_remapping.h"
+
+struct ioapic_scope {
+	struct intel_iommu *iommu;
+	unsigned int id;
+	unsigned int bus;	/* PCI bus number */
+	unsigned int devfn;	/* PCI devfn number */
+};
+
+struct hpet_scope {
+	struct intel_iommu *iommu;
+	u8 id;
+	unsigned int bus;
+	unsigned int devfn;
+};
+
+#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
+#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
 
 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
 static int ir_ioapic_num, ir_hpet_num;
-int intr_remapping_enabled;
-
-static int disable_intremap;
-static int disable_sourceid_checking;
-static int no_x2apic_optout;
-
-static __init int setup_nointremap(char *str)
-{
-	disable_intremap = 1;
-	return 0;
-}
-early_param("nointremap", setup_nointremap);
-
-static __init int setup_intremap(char *str)
-{
-	if (!str)
-		return -EINVAL;
-
-	while (*str) {
-		if (!strncmp(str, "on", 2))
-			disable_intremap = 0;
-		else if (!strncmp(str, "off", 3))
-			disable_intremap = 1;
-		else if (!strncmp(str, "nosid", 5))
-			disable_sourceid_checking = 1;
-		else if (!strncmp(str, "no_x2apic_optout", 16))
-			no_x2apic_optout = 1;
-
-		str += strcspn(str, ",");
-		while (*str == ',')
-			str++;
-	}
-
-	return 0;
-}
-early_param("intremap", setup_intremap);
 
 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
 
@@ -80,7 +64,7 @@
 	return 0;
 }
 
-int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
+static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
 {
 	struct ir_table *table = iommu->ir_table;
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
@@ -152,7 +136,7 @@
 	return qi_submit_sync(&desc, iommu);
 }
 
-int map_irq_to_irte_handle(int irq, u16 *sub_handle)
+static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
 {
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 	unsigned long flags;
@@ -168,7 +152,7 @@
 	return index;
 }
 
-int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
+static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
 {
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 	unsigned long flags;
@@ -188,7 +172,7 @@
 	return 0;
 }
 
-int modify_irte(int irq, struct irte *irte_modified)
+static int modify_irte(int irq, struct irte *irte_modified)
 {
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 	struct intel_iommu *iommu;
@@ -216,7 +200,7 @@
 	return rc;
 }
 
-struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
+static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
 {
 	int i;
 
@@ -226,7 +210,7 @@
 	return NULL;
 }
 
-struct intel_iommu *map_ioapic_to_ir(int apic)
+static struct intel_iommu *map_ioapic_to_ir(int apic)
 {
 	int i;
 
@@ -236,7 +220,7 @@
 	return NULL;
 }
 
-struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
+static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
 {
 	struct dmar_drhd_unit *drhd;
 
@@ -270,7 +254,7 @@
 	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 }
 
-int free_irte(int irq)
+static int free_irte(int irq)
 {
 	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
 	unsigned long flags;
@@ -328,7 +312,7 @@
 	irte->sid = sid;
 }
 
-int set_ioapic_sid(struct irte *irte, int apic)
+static int set_ioapic_sid(struct irte *irte, int apic)
 {
 	int i;
 	u16 sid = 0;
@@ -353,7 +337,7 @@
 	return 0;
 }
 
-int set_hpet_sid(struct irte *irte, u8 id)
+static int set_hpet_sid(struct irte *irte, u8 id)
 {
 	int i;
 	u16 sid = 0;
@@ -383,7 +367,7 @@
 	return 0;
 }
 
-int set_msi_sid(struct irte *irte, struct pci_dev *dev)
+static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
 {
 	struct pci_dev *bridge;
 
@@ -410,7 +394,7 @@
 	return 0;
 }
 
-static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
+static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
 {
 	u64 addr;
 	u32 sts;
@@ -450,7 +434,7 @@
 }
 
 
-static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
+static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
 {
 	struct ir_table *ir_table;
 	struct page *pages;
@@ -473,14 +457,14 @@
 
 	ir_table->base = page_address(pages);
 
-	iommu_set_intr_remapping(iommu, mode);
+	iommu_set_irq_remapping(iommu, mode);
 	return 0;
 }
 
 /*
  * Disable Interrupt Remapping.
  */
-static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
+static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
 {
 	unsigned long flags;
 	u32 sts;
@@ -519,11 +503,11 @@
 	return dmar->flags & DMAR_X2APIC_OPT_OUT;
 }
 
-int __init intr_remapping_supported(void)
+static int __init intel_irq_remapping_supported(void)
 {
 	struct dmar_drhd_unit *drhd;
 
-	if (disable_intremap)
+	if (disable_irq_remap)
 		return 0;
 
 	if (!dmar_ir_support())
@@ -539,7 +523,7 @@
 	return 1;
 }
 
-int __init enable_intr_remapping(void)
+static int __init intel_enable_irq_remapping(void)
 {
 	struct dmar_drhd_unit *drhd;
 	int setup = 0;
@@ -577,7 +561,7 @@
 		 * Disable intr remapping and queued invalidation, if already
 		 * enabled prior to OS handover.
 		 */
-		iommu_disable_intr_remapping(iommu);
+		iommu_disable_irq_remapping(iommu);
 
 		dmar_disable_qi(iommu);
 	}
@@ -623,7 +607,7 @@
 		if (!ecap_ir_support(iommu->ecap))
 			continue;
 
-		if (setup_intr_remapping(iommu, eim))
+		if (intel_setup_irq_remapping(iommu, eim))
 			goto error;
 
 		setup = 1;
@@ -632,7 +616,7 @@
 	if (!setup)
 		goto error;
 
-	intr_remapping_enabled = 1;
+	irq_remapping_enabled = 1;
 	pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
 
 	return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
@@ -775,14 +759,14 @@
 
 int __init ir_dev_scope_init(void)
 {
-	if (!intr_remapping_enabled)
+	if (!irq_remapping_enabled)
 		return 0;
 
 	return dmar_dev_scope_init();
 }
 rootfs_initcall(ir_dev_scope_init);
 
-void disable_intr_remapping(void)
+static void disable_irq_remapping(void)
 {
 	struct dmar_drhd_unit *drhd;
 	struct intel_iommu *iommu = NULL;
@@ -794,11 +778,11 @@
 		if (!ecap_ir_support(iommu->ecap))
 			continue;
 
-		iommu_disable_intr_remapping(iommu);
+		iommu_disable_irq_remapping(iommu);
 	}
 }
 
-int reenable_intr_remapping(int eim)
+static int reenable_irq_remapping(int eim)
 {
 	struct dmar_drhd_unit *drhd;
 	int setup = 0;
@@ -816,7 +800,7 @@
 			continue;
 
 		/* Set up interrupt remapping for iommu.*/
-		iommu_set_intr_remapping(iommu, eim);
+		iommu_set_irq_remapping(iommu, eim);
 		setup = 1;
 	}
 
@@ -832,3 +816,254 @@
 	return -1;
 }
 
+static void prepare_irte(struct irte *irte, int vector,
+			 unsigned int dest)
+{
+	memset(irte, 0, sizeof(*irte));
+
+	irte->present = 1;
+	irte->dst_mode = apic->irq_dest_mode;
+	/*
+	 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
+	 * actual level or edge trigger will be setup in the IO-APIC
+	 * RTE. This will help simplify level triggered irq migration.
+	 * For more details, see the comments (in io_apic.c) explainig IO-APIC
+	 * irq migration in the presence of interrupt-remapping.
+	*/
+	irte->trigger_mode = 0;
+	irte->dlvry_mode = apic->irq_delivery_mode;
+	irte->vector = vector;
+	irte->dest_id = IRTE_DEST(dest);
+	irte->redir_hint = 1;
+}
+
+static int intel_setup_ioapic_entry(int irq,
+				    struct IO_APIC_route_entry *route_entry,
+				    unsigned int destination, int vector,
+				    struct io_apic_irq_attr *attr)
+{
+	int ioapic_id = mpc_ioapic_id(attr->ioapic);
+	struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
+	struct IR_IO_APIC_route_entry *entry;
+	struct irte irte;
+	int index;
+
+	if (!iommu) {
+		pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
+		return -ENODEV;
+	}
+
+	entry = (struct IR_IO_APIC_route_entry *)route_entry;
+
+	index = alloc_irte(iommu, irq, 1);
+	if (index < 0) {
+		pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
+		return -ENOMEM;
+	}
+
+	prepare_irte(&irte, vector, destination);
+
+	/* Set source-id of interrupt request */
+	set_ioapic_sid(&irte, ioapic_id);
+
+	modify_irte(irq, &irte);
+
+	apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
+		"Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
+		"Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
+		"Avail:%X Vector:%02X Dest:%08X "
+		"SID:%04X SQ:%X SVT:%X)\n",
+		attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
+		irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
+		irte.avail, irte.vector, irte.dest_id,
+		irte.sid, irte.sq, irte.svt);
+
+	memset(entry, 0, sizeof(*entry));
+
+	entry->index2	= (index >> 15) & 0x1;
+	entry->zero	= 0;
+	entry->format	= 1;
+	entry->index	= (index & 0x7fff);
+	/*
+	 * IO-APIC RTE will be configured with virtual vector.
+	 * irq handler will do the explicit EOI to the io-apic.
+	 */
+	entry->vector	= attr->ioapic_pin;
+	entry->mask	= 0;			/* enable IRQ */
+	entry->trigger	= attr->trigger;
+	entry->polarity	= attr->polarity;
+
+	/* Mask level triggered irqs.
+	 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
+	 */
+	if (attr->trigger)
+		entry->mask = 1;
+
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Migrate the IO-APIC irq in the presence of intr-remapping.
+ *
+ * For both level and edge triggered, irq migration is a simple atomic
+ * update(of vector and cpu destination) of IRTE and flush the hardware cache.
+ *
+ * For level triggered, we eliminate the io-apic RTE modification (with the
+ * updated vector information), by using a virtual vector (io-apic pin number).
+ * Real vector that is used for interrupting cpu will be coming from
+ * the interrupt-remapping table entry.
+ *
+ * As the migration is a simple atomic update of IRTE, the same mechanism
+ * is used to migrate MSI irq's in the presence of interrupt-remapping.
+ */
+static int
+intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+			  bool force)
+{
+	struct irq_cfg *cfg = data->chip_data;
+	unsigned int dest, irq = data->irq;
+	struct irte irte;
+
+	if (!cpumask_intersects(mask, cpu_online_mask))
+		return -EINVAL;
+
+	if (get_irte(irq, &irte))
+		return -EBUSY;
+
+	if (assign_irq_vector(irq, cfg, mask))
+		return -EBUSY;
+
+	dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
+
+	irte.vector = cfg->vector;
+	irte.dest_id = IRTE_DEST(dest);
+
+	/*
+	 * Atomically updates the IRTE with the new destination, vector
+	 * and flushes the interrupt entry cache.
+	 */
+	modify_irte(irq, &irte);
+
+	/*
+	 * After this point, all the interrupts will start arriving
+	 * at the new destination. So, time to cleanup the previous
+	 * vector allocation.
+	 */
+	if (cfg->move_in_progress)
+		send_cleanup_vector(cfg);
+
+	cpumask_copy(data->affinity, mask);
+	return 0;
+}
+#endif
+
+static void intel_compose_msi_msg(struct pci_dev *pdev,
+				  unsigned int irq, unsigned int dest,
+				  struct msi_msg *msg, u8 hpet_id)
+{
+	struct irq_cfg *cfg;
+	struct irte irte;
+	u16 sub_handle = 0;
+	int ir_index;
+
+	cfg = irq_get_chip_data(irq);
+
+	ir_index = map_irq_to_irte_handle(irq, &sub_handle);
+	BUG_ON(ir_index == -1);
+
+	prepare_irte(&irte, cfg->vector, dest);
+
+	/* Set source-id of interrupt request */
+	if (pdev)
+		set_msi_sid(&irte, pdev);
+	else
+		set_hpet_sid(&irte, hpet_id);
+
+	modify_irte(irq, &irte);
+
+	msg->address_hi = MSI_ADDR_BASE_HI;
+	msg->data = sub_handle;
+	msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
+			  MSI_ADDR_IR_SHV |
+			  MSI_ADDR_IR_INDEX1(ir_index) |
+			  MSI_ADDR_IR_INDEX2(ir_index);
+}
+
+/*
+ * Map the PCI dev to the corresponding remapping hardware unit
+ * and allocate 'nvec' consecutive interrupt-remapping table entries
+ * in it.
+ */
+static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
+{
+	struct intel_iommu *iommu;
+	int index;
+
+	iommu = map_dev_to_ir(dev);
+	if (!iommu) {
+		printk(KERN_ERR
+		       "Unable to map PCI %s to iommu\n", pci_name(dev));
+		return -ENOENT;
+	}
+
+	index = alloc_irte(iommu, irq, nvec);
+	if (index < 0) {
+		printk(KERN_ERR
+		       "Unable to allocate %d IRTE for PCI %s\n", nvec,
+		       pci_name(dev));
+		return -ENOSPC;
+	}
+	return index;
+}
+
+static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
+			       int index, int sub_handle)
+{
+	struct intel_iommu *iommu;
+
+	iommu = map_dev_to_ir(pdev);
+	if (!iommu)
+		return -ENOENT;
+	/*
+	 * setup the mapping between the irq and the IRTE
+	 * base index, the sub_handle pointing to the
+	 * appropriate interrupt remap table entry.
+	 */
+	set_irte_irq(irq, iommu, index, sub_handle);
+
+	return 0;
+}
+
+static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
+{
+	struct intel_iommu *iommu = map_hpet_to_ir(id);
+	int index;
+
+	if (!iommu)
+		return -1;
+
+	index = alloc_irte(iommu, irq, 1);
+	if (index < 0)
+		return -1;
+
+	return 0;
+}
+
+struct irq_remap_ops intel_irq_remap_ops = {
+	.supported		= intel_irq_remapping_supported,
+	.prepare		= dmar_table_init,
+	.enable			= intel_enable_irq_remapping,
+	.disable		= disable_irq_remapping,
+	.reenable		= reenable_irq_remapping,
+	.enable_faulting	= enable_drhd_fault_handling,
+	.setup_ioapic_entry	= intel_setup_ioapic_entry,
+#ifdef CONFIG_SMP
+	.set_affinity		= intel_ioapic_set_affinity,
+#endif
+	.free_irq		= free_irte,
+	.compose_msi_msg	= intel_compose_msi_msg,
+	.msi_alloc_irq		= intel_msi_alloc_irq,
+	.msi_setup_irq		= intel_msi_setup_irq,
+	.setup_hpet_msi		= intel_setup_hpet_msi,
+};
diff --git a/drivers/iommu/intr_remapping.h b/drivers/iommu/intr_remapping.h
deleted file mode 100644
index 5662fec..0000000
--- a/drivers/iommu/intr_remapping.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#include <linux/intel-iommu.h>
-
-struct ioapic_scope {
-	struct intel_iommu *iommu;
-	unsigned int id;
-	unsigned int bus;	/* PCI bus number */
-	unsigned int devfn;	/* PCI devfn number */
-};
-
-struct hpet_scope {
-	struct intel_iommu *iommu;
-	u8 id;
-	unsigned int bus;
-	unsigned int devfn;
-};
-
-#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
new file mode 100644
index 0000000..40cda8e
--- /dev/null
+++ b/drivers/iommu/irq_remapping.c
@@ -0,0 +1,166 @@
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include "irq_remapping.h"
+
+int irq_remapping_enabled;
+
+int disable_irq_remap;
+int disable_sourceid_checking;
+int no_x2apic_optout;
+
+static struct irq_remap_ops *remap_ops;
+
+static __init int setup_nointremap(char *str)
+{
+	disable_irq_remap = 1;
+	return 0;
+}
+early_param("nointremap", setup_nointremap);
+
+static __init int setup_irqremap(char *str)
+{
+	if (!str)
+		return -EINVAL;
+
+	while (*str) {
+		if (!strncmp(str, "on", 2))
+			disable_irq_remap = 0;
+		else if (!strncmp(str, "off", 3))
+			disable_irq_remap = 1;
+		else if (!strncmp(str, "nosid", 5))
+			disable_sourceid_checking = 1;
+		else if (!strncmp(str, "no_x2apic_optout", 16))
+			no_x2apic_optout = 1;
+
+		str += strcspn(str, ",");
+		while (*str == ',')
+			str++;
+	}
+
+	return 0;
+}
+early_param("intremap", setup_irqremap);
+
+void __init setup_irq_remapping_ops(void)
+{
+	remap_ops = &intel_irq_remap_ops;
+}
+
+int irq_remapping_supported(void)
+{
+	if (disable_irq_remap)
+		return 0;
+
+	if (!remap_ops || !remap_ops->supported)
+		return 0;
+
+	return remap_ops->supported();
+}
+
+int __init irq_remapping_prepare(void)
+{
+	if (!remap_ops || !remap_ops->prepare)
+		return -ENODEV;
+
+	return remap_ops->prepare();
+}
+
+int __init irq_remapping_enable(void)
+{
+	if (!remap_ops || !remap_ops->enable)
+		return -ENODEV;
+
+	return remap_ops->enable();
+}
+
+void irq_remapping_disable(void)
+{
+	if (!remap_ops || !remap_ops->disable)
+		return;
+
+	remap_ops->disable();
+}
+
+int irq_remapping_reenable(int mode)
+{
+	if (!remap_ops || !remap_ops->reenable)
+		return 0;
+
+	return remap_ops->reenable(mode);
+}
+
+int __init irq_remap_enable_fault_handling(void)
+{
+	if (!remap_ops || !remap_ops->enable_faulting)
+		return -ENODEV;
+
+	return remap_ops->enable_faulting();
+}
+
+int setup_ioapic_remapped_entry(int irq,
+				struct IO_APIC_route_entry *entry,
+				unsigned int destination, int vector,
+				struct io_apic_irq_attr *attr)
+{
+	if (!remap_ops || !remap_ops->setup_ioapic_entry)
+		return -ENODEV;
+
+	return remap_ops->setup_ioapic_entry(irq, entry, destination,
+					     vector, attr);
+}
+
+#ifdef CONFIG_SMP
+int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
+			      bool force)
+{
+	if (!remap_ops || !remap_ops->set_affinity)
+		return 0;
+
+	return remap_ops->set_affinity(data, mask, force);
+}
+#endif
+
+void free_remapped_irq(int irq)
+{
+	if (!remap_ops || !remap_ops->free_irq)
+		return;
+
+	remap_ops->free_irq(irq);
+}
+
+void compose_remapped_msi_msg(struct pci_dev *pdev,
+			      unsigned int irq, unsigned int dest,
+			      struct msi_msg *msg, u8 hpet_id)
+{
+	if (!remap_ops || !remap_ops->compose_msi_msg)
+		return;
+
+	remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+}
+
+int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
+{
+	if (!remap_ops || !remap_ops->msi_alloc_irq)
+		return -ENODEV;
+
+	return remap_ops->msi_alloc_irq(pdev, irq, nvec);
+}
+
+int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+			   int index, int sub_handle)
+{
+	if (!remap_ops || !remap_ops->msi_setup_irq)
+		return -ENODEV;
+
+	return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle);
+}
+
+int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
+{
+	if (!remap_ops || !remap_ops->setup_hpet_msi)
+		return -ENODEV;
+
+	return remap_ops->setup_hpet_msi(irq, id);
+}
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
new file mode 100644
index 0000000..be9d729
--- /dev/null
+++ b/drivers/iommu/irq_remapping.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ * This header file contains stuff that is shared between different interrupt
+ * remapping drivers but with no need to be visible outside of the IOMMU layer.
+ */
+
+#ifndef __IRQ_REMAPPING_H
+#define __IRQ_REMAPPING_H
+
+#ifdef CONFIG_IRQ_REMAP
+
+struct IO_APIC_route_entry;
+struct io_apic_irq_attr;
+struct irq_data;
+struct cpumask;
+struct pci_dev;
+struct msi_msg;
+
+extern int disable_irq_remap;
+extern int disable_sourceid_checking;
+extern int no_x2apic_optout;
+
+struct irq_remap_ops {
+	/* Check whether Interrupt Remapping is supported */
+	int (*supported)(void);
+
+	/* Initializes hardware and makes it ready for remapping interrupts */
+	int  (*prepare)(void);
+
+	/* Enables the remapping hardware */
+	int  (*enable)(void);
+
+	/* Disables the remapping hardware */
+	void (*disable)(void);
+
+	/* Reenables the remapping hardware */
+	int  (*reenable)(int);
+
+	/* Enable fault handling */
+	int  (*enable_faulting)(void);
+
+	/* IO-APIC setup routine */
+	int (*setup_ioapic_entry)(int irq, struct IO_APIC_route_entry *,
+				  unsigned int, int,
+				  struct io_apic_irq_attr *);
+
+#ifdef CONFIG_SMP
+	/* Set the CPU affinity of a remapped interrupt */
+	int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
+			    bool force);
+#endif
+
+	/* Free an IRQ */
+	int (*free_irq)(int);
+
+	/* Create MSI msg to use for interrupt remapping */
+	void (*compose_msi_msg)(struct pci_dev *,
+				unsigned int, unsigned int,
+				struct msi_msg *, u8);
+
+	/* Allocate remapping resources for MSI */
+	int (*msi_alloc_irq)(struct pci_dev *, int, int);
+
+	/* Setup the remapped MSI irq */
+	int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int);
+
+	/* Setup interrupt remapping for an HPET MSI */
+	int (*setup_hpet_msi)(unsigned int, unsigned int);
+};
+
+extern struct irq_remap_ops intel_irq_remap_ops;
+
+#endif /* CONFIG_IRQ_REMAP */
+
+#endif /* __IRQ_REMAPPING_H */
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index b902794..38c4bd8 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -336,11 +336,6 @@
 capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { }
 static inline void capincci_free_minor(struct capincci *np) { }
 
-static inline unsigned int capincci_minor_opencount(struct capincci *np)
-{
-	return 0;
-}
-
 #endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
 
 static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
@@ -372,6 +367,7 @@
 		}
 }
 
+#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
 static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
 {
 	struct capincci *np;
@@ -382,7 +378,6 @@
 	return NULL;
 }
 
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
 /* -------- handle data queue --------------------------------------- */
 
 static struct sk_buff *
@@ -578,8 +573,8 @@
 	struct tty_struct *tty;
 	struct capiminor *mp;
 	u16 datahandle;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
 	struct capincci *np;
+#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
 
 	mutex_lock(&cdev->lock);
 
@@ -597,6 +592,12 @@
 		goto unlock_out;
 	}
 
+#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
+	skb_queue_tail(&cdev->recvqueue, skb);
+	wake_up_interruptible(&cdev->recvwait);
+
+#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+
 	np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data));
 	if (!np) {
 		printk(KERN_ERR "BUG: capi_signal: ncci not found\n");
@@ -605,12 +606,6 @@
 		goto unlock_out;
 	}
 
-#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
-	skb_queue_tail(&cdev->recvqueue, skb);
-	wake_up_interruptible(&cdev->recvwait);
-
-#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
-
 	mp = np->minorp;
 	if (!mp) {
 		skb_queue_tail(&cdev->recvqueue, skb);
@@ -786,7 +781,6 @@
 		return retval;
 
 	case CAPI_GET_VERSION:
-	{
 		if (copy_from_user(&data.contr, argp,
 				   sizeof(data.contr)))
 			return -EFAULT;
@@ -796,11 +790,9 @@
 		if (copy_to_user(argp, &data.version,
 				 sizeof(data.version)))
 			return -EFAULT;
-	}
-	return 0;
+		return 0;
 
 	case CAPI_GET_SERIAL:
-	{
 		if (copy_from_user(&data.contr, argp,
 				   sizeof(data.contr)))
 			return -EFAULT;
@@ -810,10 +802,9 @@
 		if (copy_to_user(argp, data.serial,
 				 sizeof(data.serial)))
 			return -EFAULT;
-	}
-	return 0;
+		return 0;
+
 	case CAPI_GET_PROFILE:
-	{
 		if (copy_from_user(&data.contr, argp,
 				   sizeof(data.contr)))
 			return -EFAULT;
@@ -837,11 +828,9 @@
 		}
 		if (retval)
 			return -EFAULT;
-	}
-	return 0;
+		return 0;
 
 	case CAPI_GET_MANUFACTURER:
-	{
 		if (copy_from_user(&data.contr, argp,
 				   sizeof(data.contr)))
 			return -EFAULT;
@@ -853,8 +842,8 @@
 				 sizeof(data.manufacturer)))
 			return -EFAULT;
 
-	}
-	return 0;
+		return 0;
+
 	case CAPI_GET_ERRCODE:
 		data.errcode = cdev->errcode;
 		cdev->errcode = CAPI_NOERROR;
@@ -870,8 +859,7 @@
 			return 0;
 		return -ENXIO;
 
-	case CAPI_MANUFACTURER_CMD:
-	{
+	case CAPI_MANUFACTURER_CMD: {
 		struct capi_manufacturer_cmd mcmd;
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
@@ -879,8 +867,6 @@
 			return -EFAULT;
 		return capi20_manufacturer(mcmd.cmd, mcmd.data);
 	}
-	return 0;
-
 	case CAPI_SET_FLAGS:
 	case CAPI_CLR_FLAGS: {
 		unsigned userflags;
@@ -902,6 +888,11 @@
 			return -EFAULT;
 		return 0;
 
+#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
+	case CAPI_NCCI_OPENCOUNT:
+		return 0;
+
+#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
 	case CAPI_NCCI_OPENCOUNT: {
 		struct capincci *nccip;
 		unsigned ncci;
@@ -918,7 +909,6 @@
 		return count;
 	}
 
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
 	case CAPI_NCCI_GETUNIT: {
 		struct capincci *nccip;
 		struct capiminor *mp;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 6f5016b..832bc80 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -1593,7 +1593,7 @@
 		return capidrv_ioctl(c, card);
 
 	switch (c->command) {
-	case ISDN_CMD_DIAL:{
+	case ISDN_CMD_DIAL: {
 		u8 calling[ISDN_MSNLEN + 3];
 		u8 called[ISDN_MSNLEN + 2];
 
@@ -2072,7 +2072,8 @@
 	card->interface.writebuf_skb = if_sendbuf;
 	card->interface.writecmd = NULL;
 	card->interface.readstat = if_readstat;
-	card->interface.features = ISDN_FEATURE_L2_HDLC |
+	card->interface.features =
+		ISDN_FEATURE_L2_HDLC |
 		ISDN_FEATURE_L2_TRANS |
 		ISDN_FEATURE_L3_TRANS |
 		ISDN_FEATURE_P_UNKNOWN |
@@ -2080,7 +2081,8 @@
 		ISDN_FEATURE_L2_X75UI |
 		ISDN_FEATURE_L2_X75BUI;
 	if (profp->support1 & (1 << 2))
-		card->interface.features |= ISDN_FEATURE_L2_V11096 |
+		card->interface.features |=
+			ISDN_FEATURE_L2_V11096 |
 			ISDN_FEATURE_L2_V11019 |
 			ISDN_FEATURE_L2_V11038;
 	if (profp->support1 & (1 << 8))
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index afa0802..3b9278b 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -410,10 +410,10 @@
 		if (!(ucs->basstate & BS_RESETTING))
 			ucs->pending = 0;
 		break;
-		/*
-		 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
-		 * and should never end up here
-		 */
+	/*
+	 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
+	 * and should never end up here
+	 */
 	default:
 		dev_warn(&ucs->interface->dev,
 			 "unknown pending request 0x%02x cleared\n",
@@ -877,8 +877,7 @@
 		for (i = 0; i < BAS_NUMFRAMES; i++) {
 			ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
 			if (unlikely(urb->iso_frame_desc[i].status != 0 &&
-				     urb->iso_frame_desc[i].status !=
-				     -EINPROGRESS))
+				     urb->iso_frame_desc[i].status != -EINPROGRESS))
 				ubc->loststatus = urb->iso_frame_desc[i].status;
 			urb->iso_frame_desc[i].status = 0;
 			urb->iso_frame_desc[i].actual_length = 0;
@@ -2078,16 +2077,14 @@
 /* Free hardware dependent part of the B channel structure
  * parameter:
  *	bcs	B channel structure
- * return value:
- *	!=0 on success
  */
-static int gigaset_freebcshw(struct bc_state *bcs)
+static void gigaset_freebcshw(struct bc_state *bcs)
 {
 	struct bas_bc_state *ubc = bcs->hw.bas;
 	int i;
 
 	if (!ubc)
-		return 0;
+		return;
 
 	/* kill URBs and tasklets before freeing - better safe than sorry */
 	ubc->running = 0;
@@ -2105,14 +2102,13 @@
 	kfree(ubc->isooutbuf);
 	kfree(ubc);
 	bcs->hw.bas = NULL;
-	return 1;
 }
 
 /* Initialize hardware dependent part of the B channel structure
  * parameter:
  *	bcs	B channel structure
  * return value:
- *	!=0 on success
+ *	0 on success, error code < 0 on failure
  */
 static int gigaset_initbcshw(struct bc_state *bcs)
 {
@@ -2122,7 +2118,7 @@
 	bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
 	if (!ubc) {
 		pr_err("out of memory\n");
-		return 0;
+		return -ENOMEM;
 	}
 
 	ubc->running = 0;
@@ -2139,7 +2135,7 @@
 		pr_err("out of memory\n");
 		kfree(ubc);
 		bcs->hw.bas = NULL;
-		return 0;
+		return -ENOMEM;
 	}
 	tasklet_init(&ubc->sent_tasklet,
 		     write_iso_tasklet, (unsigned long) bcs);
@@ -2164,7 +2160,7 @@
 	ubc->stolen0s = 0;
 	tasklet_init(&ubc->rcvd_tasklet,
 		     read_iso_tasklet, (unsigned long) bcs);
-	return 1;
+	return 0;
 }
 
 static void gigaset_reinitbcshw(struct bc_state *bcs)
@@ -2187,6 +2183,12 @@
 	cs->hw.bas = NULL;
 }
 
+/* Initialize hardware dependent part of the cardstate structure
+ * parameter:
+ *	cs	cardstate structure
+ * return value:
+ *	0 on success, error code < 0 on failure
+ */
 static int gigaset_initcshw(struct cardstate *cs)
 {
 	struct bas_cardstate *ucs;
@@ -2194,13 +2196,13 @@
 	cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL);
 	if (!ucs) {
 		pr_err("out of memory\n");
-		return 0;
+		return -ENOMEM;
 	}
 	ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
 	if (!ucs->int_in_buf) {
 		kfree(ucs);
 		pr_err("out of memory\n");
-		return 0;
+		return -ENOMEM;
 	}
 
 	ucs->urb_cmd_in = NULL;
@@ -2219,7 +2221,7 @@
 	init_waitqueue_head(&ucs->waitqueue);
 	INIT_WORK(&ucs->int_in_wq, int_in_work);
 
-	return 1;
+	return 0;
 }
 
 /* freeurbs
@@ -2379,18 +2381,20 @@
 	/* save address of controller structure */
 	usb_set_intfdata(interface, cs);
 
-	if (!gigaset_start(cs))
+	rc = gigaset_start(cs);
+	if (rc < 0)
 		goto error;
 
 	return 0;
 
 allocerr:
 	dev_err(cs->dev, "could not allocate URBs\n");
+	rc = -ENOMEM;
 error:
 	freeurbs(cs);
 	usb_set_intfdata(interface, NULL);
 	gigaset_freecs(cs);
-	return -ENODEV;
+	return rc;
 }
 
 /* gigaset_disconnect
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 343b5c8..27e4a3e 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -14,6 +14,7 @@
 #include "gigaset.h"
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/ratelimit.h>
 #include <linux/isdn/capilli.h>
 #include <linux/isdn/capicmd.h>
 #include <linux/isdn/capiutil.h>
@@ -108,51 +109,35 @@
 	u8 *bc;
 	u8 *hlc;
 } cip2bchlc[] = {
-	[1] = { "8090A3", NULL },
-	/* Speech (A-law) */
-	[2] = { "8890", NULL },
-	/* Unrestricted digital information */
-	[3] = { "8990", NULL },
-	/* Restricted digital information */
-	[4] = { "9090A3", NULL },
-	/* 3,1 kHz audio (A-law) */
-	[5] = { "9190", NULL },
-	/* 7 kHz audio */
-	[6] = { "9890", NULL },
-	/* Video */
-	[7] = { "88C0C6E6", NULL },
-	/* Packet mode */
-	[8] = { "8890218F", NULL },
-	/* 56 kbit/s rate adaptation */
-	[9] = { "9190A5", NULL },
-	/* Unrestricted digital information with tones/announcements */
-	[16] = { "8090A3", "9181" },
-	/* Telephony */
-	[17] = { "9090A3", "9184" },
-	/* Group 2/3 facsimile */
-	[18] = { "8890", "91A1" },
-	/* Group 4 facsimile Class 1 */
-	[19] = { "8890", "91A4" },
-	/* Teletex service basic and mixed mode
-	   and Group 4 facsimile service Classes II and III */
-	[20] = { "8890", "91A8" },
-	/* Teletex service basic and processable mode */
-	[21] = { "8890", "91B1" },
-	/* Teletex service basic mode */
-	[22] = { "8890", "91B2" },
-	/* International interworking for Videotex */
-	[23] = { "8890", "91B5" },
-	/* Telex */
-	[24] = { "8890", "91B8" },
-	/* Message Handling Systems in accordance with X.400 */
-	[25] = { "8890", "91C1" },
-	/* OSI application in accordance with X.200 */
-	[26] = { "9190A5", "9181" },
-	/* 7 kHz telephony */
-	[27] = { "9190A5", "916001" },
-	/* Video telephony, first connection */
-	[28] = { "8890", "916002" },
-	/* Video telephony, second connection */
+	[1] = { "8090A3", NULL },	/* Speech (A-law) */
+	[2] = { "8890", NULL },		/* Unrestricted digital information */
+	[3] = { "8990", NULL },		/* Restricted digital information */
+	[4] = { "9090A3", NULL },	/* 3,1 kHz audio (A-law) */
+	[5] = { "9190", NULL },		/* 7 kHz audio */
+	[6] = { "9890", NULL },		/* Video */
+	[7] = { "88C0C6E6", NULL },	/* Packet mode */
+	[8] = { "8890218F", NULL },	/* 56 kbit/s rate adaptation */
+	[9] = { "9190A5", NULL },	/* Unrestricted digital information
+					 * with tones/announcements */
+	[16] = { "8090A3", "9181" },	/* Telephony */
+	[17] = { "9090A3", "9184" },	/* Group 2/3 facsimile */
+	[18] = { "8890", "91A1" },	/* Group 4 facsimile Class 1 */
+	[19] = { "8890", "91A4" },	/* Teletex service basic and mixed mode
+					 * and Group 4 facsimile service
+					 * Classes II and III */
+	[20] = { "8890", "91A8" },	/* Teletex service basic and
+					 * processable mode */
+	[21] = { "8890", "91B1" },	/* Teletex service basic mode */
+	[22] = { "8890", "91B2" },	/* International interworking for
+					 * Videotex */
+	[23] = { "8890", "91B5" },	/* Telex */
+	[24] = { "8890", "91B8" },	/* Message Handling Systems
+					 * in accordance with X.400 */
+	[25] = { "8890", "91C1" },	/* OSI application
+					 * in accordance with X.200 */
+	[26] = { "9190A5", "9181" },	/* 7 kHz telephony */
+	[27] = { "9190A5", "916001" },	/* Video telephony, first connection */
+	[28] = { "8890", "916002" },	/* Video telephony, second connection */
 };
 
 /*
@@ -223,10 +208,14 @@
 static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
 {
 #ifdef CONFIG_GIGASET_DEBUG
+	/* dump at most 20 messages in 20 secs */
+	static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
 	_cdebbuf *cdb;
 
 	if (!(gigaset_debuglevel & level))
 		return;
+	if (!___ratelimit(&msg_dump_ratelimit, tag))
+		return;
 
 	cdb = capi_cmsg2str(p);
 	if (cdb) {
@@ -1192,7 +1181,9 @@
 			confparam[3] = 2;	/* length */
 			capimsg_setu16(confparam, 4, CapiSuccess);
 			break;
-			/* ToDo: add supported services */
+
+		/* ToDo: add supported services */
+
 		default:
 			dev_notice(cs->dev,
 				   "%s: unsupported supplementary service function 0x%04x\n",
@@ -1766,7 +1757,8 @@
 
 	/* NCPI parameter: not applicable for B3 Transparent */
 	ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
-	send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
+	send_conf(iif, ap, skb,
+		  (cmsg->NCPI && cmsg->NCPI[0]) ?
 		  CapiNcpiNotSupportedByProtocol : CapiSuccess);
 }
 
@@ -1882,6 +1874,9 @@
 
 	/* check for active logical connection */
 	if (bcs->apconnstate >= APCONN_ACTIVE) {
+		/* clear it */
+		bcs->apconnstate = APCONN_SETUP;
+
 		/*
 		 * emit DISCONNECT_B3_IND with cause 0x3301
 		 * use separate cmsg structure, as the content of iif->acmsg
@@ -1906,6 +1901,7 @@
 		}
 		capi_cmsg2message(b3cmsg,
 				  __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN));
+		dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
 		kfree(b3cmsg);
 		capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
 	}
@@ -1966,7 +1962,8 @@
 	/* NCPI parameter: not applicable for B3 Transparent */
 	ignore_cstruct_param(cs, cmsg->NCPI,
 			     "DISCONNECT_B3_REQ", "NCPI");
-	send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
+	send_conf(iif, ap, skb,
+		  (cmsg->NCPI && cmsg->NCPI[0]) ?
 		  CapiNcpiNotSupportedByProtocol : CapiSuccess);
 }
 
@@ -2059,12 +2056,6 @@
 }
 
 /*
- * dump unsupported/ignored messages at most twice per minute,
- * some apps send those very frequently
- */
-static unsigned long ignored_msg_dump_time;
-
-/*
  * unsupported CAPI message handler
  */
 static void do_unsupported(struct gigaset_capi_ctr *iif,
@@ -2073,8 +2064,7 @@
 {
 	/* decode message */
 	capi_message2cmsg(&iif->acmsg, skb->data);
-	if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000))
-		dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
 	send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
 }
 
@@ -2085,11 +2075,9 @@
 		       struct gigaset_capi_appl *ap,
 		       struct sk_buff *skb)
 {
-	if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) {
-		/* decode message */
-		capi_message2cmsg(&iif->acmsg, skb->data);
-		dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
-	}
+	/* decode message */
+	capi_message2cmsg(&iif->acmsg, skb->data);
+	dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
 	dev_kfree_skb_any(skb);
 }
 
@@ -2358,7 +2346,7 @@
  * @cs:		device descriptor structure.
  * @isdnid:	device name.
  *
- * Return value: 1 for success, 0 for failure
+ * Return value: 0 on success, error code < 0 on failure
  */
 int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
 {
@@ -2368,7 +2356,7 @@
 	iif = kmalloc(sizeof(*iif), GFP_KERNEL);
 	if (!iif) {
 		pr_err("%s: out of memory\n", __func__);
-		return 0;
+		return -ENOMEM;
 	}
 
 	/* prepare controller structure */
@@ -2392,12 +2380,12 @@
 	if (rc) {
 		pr_err("attach_capi_ctr failed (%d)\n", rc);
 		kfree(iif);
-		return 0;
+		return rc;
 	}
 
 	cs->iif = iif;
 	cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
-	return 1;
+	return 0;
 }
 
 /**
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 7679270..aa41485 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -194,13 +194,13 @@
 		gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
 			bcs->channel);
 		spin_unlock_irqrestore(&bcs->cs->lock, flags);
-		return 0;
+		return -EBUSY;
 	}
 	++bcs->use_count;
 	bcs->busy = 1;
 	gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
 	spin_unlock_irqrestore(&bcs->cs->lock, flags);
-	return 1;
+	return 0;
 }
 
 struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
@@ -258,7 +258,7 @@
 			spin_unlock_irqrestore(&cs->lock, flags);
 			gig_dbg(DEBUG_CHANNEL,
 				"could not allocate all channels");
-			return 0;
+			return -EBUSY;
 		}
 	for (i = 0; i < cs->channels; ++i)
 		++cs->bcs[i].use_count;
@@ -266,7 +266,7 @@
 
 	gig_dbg(DEBUG_CHANNEL, "allocated all channels");
 
-	return 1;
+	return 0;
 }
 
 void gigaset_free_channels(struct cardstate *cs)
@@ -362,7 +362,7 @@
 }
 EXPORT_SYMBOL_GPL(gigaset_add_event);
 
-static void free_strings(struct at_state_t *at_state)
+static void clear_at_state(struct at_state_t *at_state)
 {
 	int i;
 
@@ -372,18 +372,13 @@
 	}
 }
 
-static void clear_at_state(struct at_state_t *at_state)
-{
-	free_strings(at_state);
-}
-
-static void dealloc_at_states(struct cardstate *cs)
+static void dealloc_temp_at_states(struct cardstate *cs)
 {
 	struct at_state_t *cur, *next;
 
 	list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
 		list_del(&cur->list);
-		free_strings(cur);
+		clear_at_state(cur);
 		kfree(cur);
 	}
 }
@@ -393,8 +388,7 @@
 	int i;
 
 	gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
-	if (!bcs->cs->ops->freebcshw(bcs))
-		gig_dbg(DEBUG_INIT, "failed");
+	bcs->cs->ops->freebcshw(bcs);
 
 	gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
 	clear_at_state(&bcs->at_state);
@@ -512,7 +506,7 @@
 	case 1: /* error when registering to LL */
 		gig_dbg(DEBUG_INIT, "clearing at_state");
 		clear_at_state(&cs->at_state);
-		dealloc_at_states(cs);
+		dealloc_temp_at_states(cs);
 
 		/* fall through */
 	case 0:	/* error in basic setup */
@@ -571,6 +565,8 @@
  * @inbuf:	buffer structure.
  * @src:	received data.
  * @numbytes:	number of bytes received.
+ *
+ * Return value: !=0 if some data was appended
  */
 int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
 		       unsigned numbytes)
@@ -614,8 +610,8 @@
 EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
 
 /* Initialize the b-channel structure */
-static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
-					struct cardstate *cs, int channel)
+static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs,
+			   int channel)
 {
 	int i;
 
@@ -654,11 +650,7 @@
 	bcs->apconnstate = 0;
 
 	gig_dbg(DEBUG_INIT, "  setting up bcs[%d]->hw", channel);
-	if (cs->ops->initbcshw(bcs))
-		return bcs;
-
-	gig_dbg(DEBUG_INIT, "  failed");
-	return NULL;
+	return cs->ops->initbcshw(bcs);
 }
 
 /**
@@ -757,7 +749,7 @@
 	cs->cmdbytes = 0;
 
 	gig_dbg(DEBUG_INIT, "setting up iif");
-	if (!gigaset_isdn_regdev(cs, modulename)) {
+	if (gigaset_isdn_regdev(cs, modulename) < 0) {
 		pr_err("error registering ISDN device\n");
 		goto error;
 	}
@@ -765,7 +757,7 @@
 	make_valid(cs, VALID_ID);
 	++cs->cs_init;
 	gig_dbg(DEBUG_INIT, "setting up hw");
-	if (!cs->ops->initcshw(cs))
+	if (cs->ops->initcshw(cs) < 0)
 		goto error;
 
 	++cs->cs_init;
@@ -779,7 +771,7 @@
 	/* set up channel data structures */
 	for (i = 0; i < channels; ++i) {
 		gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
-		if (!gigaset_initbcs(cs->bcs + i, cs, i)) {
+		if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) {
 			pr_err("could not allocate channel %d data\n", i);
 			goto error;
 		}
@@ -848,8 +840,7 @@
 	cs->mstate = MS_UNINITIALIZED;
 
 	clear_at_state(&cs->at_state);
-	dealloc_at_states(cs);
-	free_strings(&cs->at_state);
+	dealloc_temp_at_states(cs);
 	gigaset_at_init(&cs->at_state, NULL, cs, 0);
 
 	cs->inbuf->inputstate = INS_command;
@@ -875,7 +866,7 @@
 
 	for (i = 0; i < cs->channels; ++i) {
 		gigaset_freebcs(cs->bcs + i);
-		if (!gigaset_initbcs(cs->bcs + i, cs, i))
+		if (gigaset_initbcs(cs->bcs + i, cs, i) < 0)
 			pr_err("could not allocate channel %d data\n", i);
 	}
 
@@ -896,14 +887,14 @@
  * waiting for completion of the initialization.
  *
  * Return value:
- *	1 - success, 0 - error
+ *	0 on success, error code < 0 on failure
  */
 int gigaset_start(struct cardstate *cs)
 {
 	unsigned long flags;
 
 	if (mutex_lock_interruptible(&cs->mutex))
-		return 0;
+		return -EBUSY;
 
 	spin_lock_irqsave(&cs->lock, flags);
 	cs->connected = 1;
@@ -927,11 +918,11 @@
 	wait_event(cs->waitqueue, !cs->waiting);
 
 	mutex_unlock(&cs->mutex);
-	return 1;
+	return 0;
 
 error:
 	mutex_unlock(&cs->mutex);
-	return 0;
+	return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(gigaset_start);
 
@@ -943,7 +934,7 @@
  * waiting for completion of the shutdown.
  *
  * Return value:
- *	0 - success, -1 - error (no device associated)
+ *	0 - success, -ENODEV - error (no device associated)
  */
 int gigaset_shutdown(struct cardstate *cs)
 {
@@ -951,7 +942,7 @@
 
 	if (!(cs->flags & VALID_MINOR)) {
 		mutex_unlock(&cs->mutex);
-		return -1;
+		return -ENODEV;
 	}
 
 	cs->waiting = 1;
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
index 19b1c77..570c2d5 100644
--- a/drivers/isdn/gigaset/dummyll.c
+++ b/drivers/isdn/gigaset/dummyll.c
@@ -60,7 +60,7 @@
 
 int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
 {
-	return 1;
+	return 0;
 }
 
 void gigaset_isdn_unregdev(struct cardstate *cs)
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 624a825..2e6963d 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -153,103 +153,104 @@
  * action, command */
 
 /* initialize device, set cid mode if possible */
-	{RSP_INIT,	 -1,  -1, SEQ_INIT,		100,  1, {ACT_TIMEOUT} },
+	{RSP_INIT,	 -1,  -1, SEQ_INIT,	100,  1, {ACT_TIMEOUT} },
 
-	{EV_TIMEOUT,	100, 100, -1,			101,  3, {0},	"Z\r"},
-	{RSP_OK,	101, 103, -1,			120,  5, {ACT_GETSTRING},
-	 "+GMR\r"},
+	{EV_TIMEOUT,	100, 100, -1,		101,  3, {0},	"Z\r"},
+	{RSP_OK,	101, 103, -1,		120,  5, {ACT_GETSTRING},
+								"+GMR\r"},
 
-	{EV_TIMEOUT,	101, 101, -1,			102,  5, {0},	"Z\r"},
-	{RSP_ERROR,	101, 101, -1,			102,  5, {0},	"Z\r"},
+	{EV_TIMEOUT,	101, 101, -1,		102,  5, {0},	"Z\r"},
+	{RSP_ERROR,	101, 101, -1,		102,  5, {0},	"Z\r"},
 
-	{EV_TIMEOUT,	102, 102, -1,			108,  5, {ACT_SETDLE1},
-	 "^SDLE=0\r"},
-	{RSP_OK,	108, 108, -1,			104, -1},
-	{RSP_ZDLE,	104, 104,  0,			103,  5, {0},	"Z\r"},
-	{EV_TIMEOUT,	104, 104, -1,			  0,  0, {ACT_FAILINIT} },
-	{RSP_ERROR,	108, 108, -1,			  0,  0, {ACT_FAILINIT} },
+	{EV_TIMEOUT,	102, 102, -1,		108,  5, {ACT_SETDLE1},
+								"^SDLE=0\r"},
+	{RSP_OK,	108, 108, -1,		104, -1},
+	{RSP_ZDLE,	104, 104,  0,		103,  5, {0},	"Z\r"},
+	{EV_TIMEOUT,	104, 104, -1,		  0,  0, {ACT_FAILINIT} },
+	{RSP_ERROR,	108, 108, -1,		  0,  0, {ACT_FAILINIT} },
 
-	{EV_TIMEOUT,	108, 108, -1,			105,  2, {ACT_SETDLE0,
-								  ACT_HUPMODEM,
-								  ACT_TIMEOUT} },
-	{EV_TIMEOUT,	105, 105, -1,			103,  5, {0},	"Z\r"},
+	{EV_TIMEOUT,	108, 108, -1,		105,  2, {ACT_SETDLE0,
+							  ACT_HUPMODEM,
+							  ACT_TIMEOUT} },
+	{EV_TIMEOUT,	105, 105, -1,		103,  5, {0},	"Z\r"},
 
-	{RSP_ERROR,	102, 102, -1,			107,  5, {0},	"^GETPRE\r"},
-	{RSP_OK,	107, 107, -1,			  0,  0, {ACT_CONFIGMODE} },
-	{RSP_ERROR,	107, 107, -1,			  0,  0, {ACT_FAILINIT} },
-	{EV_TIMEOUT,	107, 107, -1,			  0,  0, {ACT_FAILINIT} },
+	{RSP_ERROR,	102, 102, -1,		107,  5, {0},	"^GETPRE\r"},
+	{RSP_OK,	107, 107, -1,		  0,  0, {ACT_CONFIGMODE} },
+	{RSP_ERROR,	107, 107, -1,		  0,  0, {ACT_FAILINIT} },
+	{EV_TIMEOUT,	107, 107, -1,		  0,  0, {ACT_FAILINIT} },
 
-	{RSP_ERROR,	103, 103, -1,			  0,  0, {ACT_FAILINIT} },
-	{EV_TIMEOUT,	103, 103, -1,			  0,  0, {ACT_FAILINIT} },
+	{RSP_ERROR,	103, 103, -1,		  0,  0, {ACT_FAILINIT} },
+	{EV_TIMEOUT,	103, 103, -1,		  0,  0, {ACT_FAILINIT} },
 
-	{RSP_STRING,	120, 120, -1,			121, -1, {ACT_SETVER} },
+	{RSP_STRING,	120, 120, -1,		121, -1, {ACT_SETVER} },
 
-	{EV_TIMEOUT,	120, 121, -1,			  0,  0, {ACT_FAILVER,
-								  ACT_INIT} },
-	{RSP_ERROR,	120, 121, -1,			  0,  0, {ACT_FAILVER,
-								  ACT_INIT} },
-	{RSP_OK,	121, 121, -1,			  0,  0, {ACT_GOTVER,
-								  ACT_INIT} },
+	{EV_TIMEOUT,	120, 121, -1,		  0,  0, {ACT_FAILVER,
+							  ACT_INIT} },
+	{RSP_ERROR,	120, 121, -1,		  0,  0, {ACT_FAILVER,
+							  ACT_INIT} },
+	{RSP_OK,	121, 121, -1,		  0,  0, {ACT_GOTVER,
+							  ACT_INIT} },
+	{RSP_NONE,	121, 121, -1,		120,  0, {ACT_GETSTRING} },
 
 /* leave dle mode */
-	{RSP_INIT,	  0,   0, SEQ_DLE0,		201,  5, {0},	"^SDLE=0\r"},
-	{RSP_OK,	201, 201, -1,			202, -1},
-	{RSP_ZDLE,	202, 202,  0,			  0,  0, {ACT_DLE0} },
-	{RSP_NODEV,	200, 249, -1,			  0,  0, {ACT_FAKEDLE0} },
-	{RSP_ERROR,	200, 249, -1,			  0,  0, {ACT_FAILDLE0} },
-	{EV_TIMEOUT,	200, 249, -1,			  0,  0, {ACT_FAILDLE0} },
+	{RSP_INIT,	  0,   0, SEQ_DLE0,	201,  5, {0},	"^SDLE=0\r"},
+	{RSP_OK,	201, 201, -1,		202, -1},
+	{RSP_ZDLE,	202, 202,  0,		  0,  0, {ACT_DLE0} },
+	{RSP_NODEV,	200, 249, -1,		  0,  0, {ACT_FAKEDLE0} },
+	{RSP_ERROR,	200, 249, -1,		  0,  0, {ACT_FAILDLE0} },
+	{EV_TIMEOUT,	200, 249, -1,		  0,  0, {ACT_FAILDLE0} },
 
 /* enter dle mode */
-	{RSP_INIT,	  0,   0, SEQ_DLE1,		251,  5, {0},	"^SDLE=1\r"},
-	{RSP_OK,	251, 251, -1,			252, -1},
-	{RSP_ZDLE,	252, 252,  1,			  0,  0, {ACT_DLE1} },
-	{RSP_ERROR,	250, 299, -1,			  0,  0, {ACT_FAILDLE1} },
-	{EV_TIMEOUT,	250, 299, -1,			  0,  0, {ACT_FAILDLE1} },
+	{RSP_INIT,	  0,   0, SEQ_DLE1,	251,  5, {0},	"^SDLE=1\r"},
+	{RSP_OK,	251, 251, -1,		252, -1},
+	{RSP_ZDLE,	252, 252,  1,		  0,  0, {ACT_DLE1} },
+	{RSP_ERROR,	250, 299, -1,		  0,  0, {ACT_FAILDLE1} },
+	{EV_TIMEOUT,	250, 299, -1,		  0,  0, {ACT_FAILDLE1} },
 
 /* incoming call */
-	{RSP_RING,	 -1,  -1, -1,			 -1, -1, {ACT_RING} },
+	{RSP_RING,	 -1,  -1, -1,		 -1, -1, {ACT_RING} },
 
 /* get cid */
-	{RSP_INIT,	  0,   0, SEQ_CID,		301,  5, {0},	"^SGCI?\r"},
-	{RSP_OK,	301, 301, -1,			302, -1},
-	{RSP_ZGCI,	302, 302, -1,			  0,  0, {ACT_CID} },
-	{RSP_ERROR,	301, 349, -1,			  0,  0, {ACT_FAILCID} },
-	{EV_TIMEOUT,	301, 349, -1,			  0,  0, {ACT_FAILCID} },
+	{RSP_INIT,	  0,   0, SEQ_CID,	301,  5, {0},	"^SGCI?\r"},
+	{RSP_OK,	301, 301, -1,		302, -1},
+	{RSP_ZGCI,	302, 302, -1,		  0,  0, {ACT_CID} },
+	{RSP_ERROR,	301, 349, -1,		  0,  0, {ACT_FAILCID} },
+	{EV_TIMEOUT,	301, 349, -1,		  0,  0, {ACT_FAILCID} },
 
 /* enter cid mode */
-	{RSP_INIT,	  0,   0, SEQ_CIDMODE,		150,  5, {0},	"^SGCI=1\r"},
-	{RSP_OK,	150, 150, -1,			  0,  0, {ACT_CMODESET} },
-	{RSP_ERROR,	150, 150, -1,			  0,  0, {ACT_FAILCMODE} },
-	{EV_TIMEOUT,	150, 150, -1,			  0,  0, {ACT_FAILCMODE} },
+	{RSP_INIT,	  0,   0, SEQ_CIDMODE,	150,  5, {0},	"^SGCI=1\r"},
+	{RSP_OK,	150, 150, -1,		  0,  0, {ACT_CMODESET} },
+	{RSP_ERROR,	150, 150, -1,		  0,  0, {ACT_FAILCMODE} },
+	{EV_TIMEOUT,	150, 150, -1,		  0,  0, {ACT_FAILCMODE} },
 
 /* leave cid mode */
-	{RSP_INIT,	  0,   0, SEQ_UMMODE,		160,  5, {0},	"Z\r"},
-	{RSP_OK,	160, 160, -1,			  0,  0, {ACT_UMODESET} },
-	{RSP_ERROR,	160, 160, -1,			  0,  0, {ACT_FAILUMODE} },
-	{EV_TIMEOUT,	160, 160, -1,			  0,  0, {ACT_FAILUMODE} },
+	{RSP_INIT,	  0,   0, SEQ_UMMODE,	160,  5, {0},	"Z\r"},
+	{RSP_OK,	160, 160, -1,		  0,  0, {ACT_UMODESET} },
+	{RSP_ERROR,	160, 160, -1,		  0,  0, {ACT_FAILUMODE} },
+	{EV_TIMEOUT,	160, 160, -1,		  0,  0, {ACT_FAILUMODE} },
 
 /* abort getting cid */
-	{RSP_INIT,	  0,   0, SEQ_NOCID,		  0,  0, {ACT_ABORTCID} },
+	{RSP_INIT,	  0,   0, SEQ_NOCID,	  0,  0, {ACT_ABORTCID} },
 
 /* reset */
-	{RSP_INIT,	  0,   0, SEQ_SHUTDOWN,		504,  5, {0},	"Z\r"},
-	{RSP_OK,	504, 504, -1,			  0,  0, {ACT_SDOWN} },
-	{RSP_ERROR,	501, 599, -1,			  0,  0, {ACT_FAILSDOWN} },
-	{EV_TIMEOUT,	501, 599, -1,			  0,  0, {ACT_FAILSDOWN} },
-	{RSP_NODEV,	501, 599, -1,			  0,  0, {ACT_FAKESDOWN} },
+	{RSP_INIT,	  0,   0, SEQ_SHUTDOWN,	504,  5, {0},	"Z\r"},
+	{RSP_OK,	504, 504, -1,		  0,  0, {ACT_SDOWN} },
+	{RSP_ERROR,	501, 599, -1,		  0,  0, {ACT_FAILSDOWN} },
+	{EV_TIMEOUT,	501, 599, -1,		  0,  0, {ACT_FAILSDOWN} },
+	{RSP_NODEV,	501, 599, -1,		  0,  0, {ACT_FAKESDOWN} },
 
-	{EV_PROC_CIDMODE, -1, -1, -1,			 -1, -1, {ACT_PROC_CIDMODE} },
-	{EV_IF_LOCK,	 -1,  -1, -1,			 -1, -1, {ACT_IF_LOCK} },
-	{EV_IF_VER,	 -1,  -1, -1,			 -1, -1, {ACT_IF_VER} },
-	{EV_START,	 -1,  -1, -1,			 -1, -1, {ACT_START} },
-	{EV_STOP,	 -1,  -1, -1,			 -1, -1, {ACT_STOP} },
-	{EV_SHUTDOWN,	 -1,  -1, -1,			 -1, -1, {ACT_SHUTDOWN} },
+	{EV_PROC_CIDMODE, -1, -1, -1,		 -1, -1, {ACT_PROC_CIDMODE} },
+	{EV_IF_LOCK,	 -1,  -1, -1,		 -1, -1, {ACT_IF_LOCK} },
+	{EV_IF_VER,	 -1,  -1, -1,		 -1, -1, {ACT_IF_VER} },
+	{EV_START,	 -1,  -1, -1,		 -1, -1, {ACT_START} },
+	{EV_STOP,	 -1,  -1, -1,		 -1, -1, {ACT_STOP} },
+	{EV_SHUTDOWN,	 -1,  -1, -1,		 -1, -1, {ACT_SHUTDOWN} },
 
 /* misc. */
-	{RSP_ERROR,	 -1,  -1, -1,			 -1, -1, {ACT_ERROR} },
-	{RSP_ZCAU,	 -1,  -1, -1,			 -1, -1, {ACT_ZCAU} },
-	{RSP_NONE,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
-	{RSP_ANY,	 -1,  -1, -1,			 -1, -1, {ACT_WARN} },
+	{RSP_ERROR,	 -1,  -1, -1,		 -1, -1, {ACT_ERROR} },
+	{RSP_ZCAU,	 -1,  -1, -1,		 -1, -1, {ACT_ZCAU} },
+	{RSP_NONE,	 -1,  -1, -1,		 -1, -1, {ACT_DEBUG} },
+	{RSP_ANY,	 -1,  -1, -1,		 -1, -1, {ACT_WARN} },
 	{RSP_LAST}
 };
 
@@ -261,90 +262,90 @@
  * action, command */
 
 /* dial */
-	{EV_DIAL,	 -1,  -1, -1,			 -1, -1, {ACT_DIAL} },
-	{RSP_INIT,	  0,   0, SEQ_DIAL,		601,  5, {ACT_CMD + AT_BC} },
-	{RSP_OK,	601, 601, -1,			603,  5, {ACT_CMD + AT_PROTO} },
-	{RSP_OK,	603, 603, -1,			604,  5, {ACT_CMD + AT_TYPE} },
-	{RSP_OK,	604, 604, -1,			605,  5, {ACT_CMD + AT_MSN} },
-	{RSP_NULL,	605, 605, -1,			606,  5, {ACT_CMD + AT_CLIP} },
-	{RSP_OK,	605, 605, -1,			606,  5, {ACT_CMD + AT_CLIP} },
-	{RSP_NULL,	606, 606, -1,			607,  5, {ACT_CMD + AT_ISO} },
-	{RSP_OK,	606, 606, -1,			607,  5, {ACT_CMD + AT_ISO} },
-	{RSP_OK,	607, 607, -1,			608,  5, {0},	"+VLS=17\r"},
-	{RSP_OK,	608, 608, -1,			609, -1},
-	{RSP_ZSAU,	609, 609, ZSAU_PROCEEDING,	610,  5, {ACT_CMD + AT_DIAL} },
-	{RSP_OK,	610, 610, -1,			650,  0, {ACT_DIALING} },
+	{EV_DIAL,	 -1,  -1, -1,		 -1, -1, {ACT_DIAL} },
+	{RSP_INIT,	  0,   0, SEQ_DIAL,	601,  5, {ACT_CMD + AT_BC} },
+	{RSP_OK,	601, 601, -1,		603,  5, {ACT_CMD + AT_PROTO} },
+	{RSP_OK,	603, 603, -1,		604,  5, {ACT_CMD + AT_TYPE} },
+	{RSP_OK,	604, 604, -1,		605,  5, {ACT_CMD + AT_MSN} },
+	{RSP_NULL,	605, 605, -1,		606,  5, {ACT_CMD + AT_CLIP} },
+	{RSP_OK,	605, 605, -1,		606,  5, {ACT_CMD + AT_CLIP} },
+	{RSP_NULL,	606, 606, -1,		607,  5, {ACT_CMD + AT_ISO} },
+	{RSP_OK,	606, 606, -1,		607,  5, {ACT_CMD + AT_ISO} },
+	{RSP_OK,	607, 607, -1,		608,  5, {0},	"+VLS=17\r"},
+	{RSP_OK,	608, 608, -1,		609, -1},
+	{RSP_ZSAU,	609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
+	{RSP_OK,	610, 610, -1,		650,  0, {ACT_DIALING} },
 
-	{RSP_ERROR,	601, 610, -1,			  0,  0, {ACT_ABORTDIAL} },
-	{EV_TIMEOUT,	601, 610, -1,			  0,  0, {ACT_ABORTDIAL} },
+	{RSP_ERROR,	601, 610, -1,		  0,  0, {ACT_ABORTDIAL} },
+	{EV_TIMEOUT,	601, 610, -1,		  0,  0, {ACT_ABORTDIAL} },
 
 /* optional dialing responses */
-	{EV_BC_OPEN,	650, 650, -1,			651, -1},
-	{RSP_ZVLS,	609, 651, 17,			 -1, -1, {ACT_DEBUG} },
-	{RSP_ZCTP,	610, 651, -1,			 -1, -1, {ACT_DEBUG} },
-	{RSP_ZCPN,	610, 651, -1,			 -1, -1, {ACT_DEBUG} },
-	{RSP_ZSAU,	650, 651, ZSAU_CALL_DELIVERED,	 -1, -1, {ACT_DEBUG} },
+	{EV_BC_OPEN,	650, 650, -1,		651, -1},
+	{RSP_ZVLS,	609, 651, 17,		 -1, -1, {ACT_DEBUG} },
+	{RSP_ZCTP,	610, 651, -1,		 -1, -1, {ACT_DEBUG} },
+	{RSP_ZCPN,	610, 651, -1,		 -1, -1, {ACT_DEBUG} },
+	{RSP_ZSAU,	650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
 
 /* connect */
-	{RSP_ZSAU,	650, 650, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT} },
-	{RSP_ZSAU,	651, 651, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT,
-								  ACT_NOTIFY_BC_UP} },
-	{RSP_ZSAU,	750, 750, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT} },
-	{RSP_ZSAU,	751, 751, ZSAU_ACTIVE,		800, -1, {ACT_CONNECT,
-								  ACT_NOTIFY_BC_UP} },
-	{EV_BC_OPEN,	800, 800, -1,			800, -1, {ACT_NOTIFY_BC_UP} },
+	{RSP_ZSAU,	650, 650, ZSAU_ACTIVE,	800, -1, {ACT_CONNECT} },
+	{RSP_ZSAU,	651, 651, ZSAU_ACTIVE,	800, -1, {ACT_CONNECT,
+							  ACT_NOTIFY_BC_UP} },
+	{RSP_ZSAU,	750, 750, ZSAU_ACTIVE,	800, -1, {ACT_CONNECT} },
+	{RSP_ZSAU,	751, 751, ZSAU_ACTIVE,	800, -1, {ACT_CONNECT,
+							  ACT_NOTIFY_BC_UP} },
+	{EV_BC_OPEN,	800, 800, -1,		800, -1, {ACT_NOTIFY_BC_UP} },
 
 /* remote hangup */
-	{RSP_ZSAU,	650, 651, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEREJECT} },
-	{RSP_ZSAU,	750, 751, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEHUP} },
-	{RSP_ZSAU,	800, 800, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_REMOTEHUP} },
+	{RSP_ZSAU,	650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
+	{RSP_ZSAU,	750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
+	{RSP_ZSAU,	800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
 
 /* hangup */
-	{EV_HUP,	 -1,  -1, -1,			 -1, -1, {ACT_HUP} },
-	{RSP_INIT,	 -1,  -1, SEQ_HUP,		401,  5, {0},	"+VLS=0\r"},
-	{RSP_OK,	401, 401, -1,			402,  5},
-	{RSP_ZVLS,	402, 402,  0,			403,  5},
-	{RSP_ZSAU,	403, 403, ZSAU_DISCONNECT_REQ,	 -1, -1, {ACT_DEBUG} },
-	{RSP_ZSAU,	403, 403, ZSAU_NULL,		  0,  0, {ACT_DISCONNECT} },
-	{RSP_NODEV,	401, 403, -1,			  0,  0, {ACT_FAKEHUP} },
-	{RSP_ERROR,	401, 401, -1,			  0,  0, {ACT_ABORTHUP} },
-	{EV_TIMEOUT,	401, 403, -1,			  0,  0, {ACT_ABORTHUP} },
+	{EV_HUP,	 -1,  -1, -1,		 -1, -1, {ACT_HUP} },
+	{RSP_INIT,	 -1,  -1, SEQ_HUP,	401,  5, {0},	"+VLS=0\r"},
+	{RSP_OK,	401, 401, -1,		402,  5},
+	{RSP_ZVLS,	402, 402,  0,		403,  5},
+	{RSP_ZSAU,	403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
+	{RSP_ZSAU,	403, 403, ZSAU_NULL,	  0,  0, {ACT_DISCONNECT} },
+	{RSP_NODEV,	401, 403, -1,		  0,  0, {ACT_FAKEHUP} },
+	{RSP_ERROR,	401, 401, -1,		  0,  0, {ACT_ABORTHUP} },
+	{EV_TIMEOUT,	401, 403, -1,		  0,  0, {ACT_ABORTHUP} },
 
-	{EV_BC_CLOSED,	  0,   0, -1,			  0, -1, {ACT_NOTIFY_BC_DOWN} },
+	{EV_BC_CLOSED,	  0,   0, -1,		  0, -1, {ACT_NOTIFY_BC_DOWN} },
 
 /* ring */
-	{RSP_ZBC,	700, 700, -1,			 -1, -1, {0} },
-	{RSP_ZHLC,	700, 700, -1,			 -1, -1, {0} },
-	{RSP_NMBR,	700, 700, -1,			 -1, -1, {0} },
-	{RSP_ZCPN,	700, 700, -1,			 -1, -1, {0} },
-	{RSP_ZCTP,	700, 700, -1,			 -1, -1, {0} },
-	{EV_TIMEOUT,	700, 700, -1,			720, 720, {ACT_ICALL} },
-	{EV_BC_CLOSED,	720, 720, -1,			  0, -1, {ACT_NOTIFY_BC_DOWN} },
+	{RSP_ZBC,	700, 700, -1,		 -1, -1, {0} },
+	{RSP_ZHLC,	700, 700, -1,		 -1, -1, {0} },
+	{RSP_NMBR,	700, 700, -1,		 -1, -1, {0} },
+	{RSP_ZCPN,	700, 700, -1,		 -1, -1, {0} },
+	{RSP_ZCTP,	700, 700, -1,		 -1, -1, {0} },
+	{EV_TIMEOUT,	700, 700, -1,		720, 720, {ACT_ICALL} },
+	{EV_BC_CLOSED,	720, 720, -1,		  0, -1, {ACT_NOTIFY_BC_DOWN} },
 
 /*accept icall*/
-	{EV_ACCEPT,	 -1,  -1, -1,			 -1, -1, {ACT_ACCEPT} },
-	{RSP_INIT,	720, 720, SEQ_ACCEPT,		721,  5, {ACT_CMD + AT_PROTO} },
-	{RSP_OK,	721, 721, -1,			722,  5, {ACT_CMD + AT_ISO} },
-	{RSP_OK,	722, 722, -1,			723,  5, {0},	"+VLS=17\r"},
-	{RSP_OK,	723, 723, -1,			724,  5, {0} },
-	{RSP_ZVLS,	724, 724, 17,			750, 50, {ACT_ACCEPTED} },
-	{RSP_ERROR,	721, 729, -1,			  0,  0, {ACT_ABORTACCEPT} },
-	{EV_TIMEOUT,	721, 729, -1,			  0,  0, {ACT_ABORTACCEPT} },
-	{RSP_ZSAU,	700, 729, ZSAU_NULL,		  0,  0, {ACT_ABORTACCEPT} },
-	{RSP_ZSAU,	700, 729, ZSAU_ACTIVE,		  0,  0, {ACT_ABORTACCEPT} },
-	{RSP_ZSAU,	700, 729, ZSAU_DISCONNECT_IND,	  0,  0, {ACT_ABORTACCEPT} },
+	{EV_ACCEPT,	 -1,  -1, -1,		 -1, -1, {ACT_ACCEPT} },
+	{RSP_INIT,	720, 720, SEQ_ACCEPT,	721,  5, {ACT_CMD + AT_PROTO} },
+	{RSP_OK,	721, 721, -1,		722,  5, {ACT_CMD + AT_ISO} },
+	{RSP_OK,	722, 722, -1,		723,  5, {0},	"+VLS=17\r"},
+	{RSP_OK,	723, 723, -1,		724,  5, {0} },
+	{RSP_ZVLS,	724, 724, 17,		750, 50, {ACT_ACCEPTED} },
+	{RSP_ERROR,	721, 729, -1,		  0,  0, {ACT_ABORTACCEPT} },
+	{EV_TIMEOUT,	721, 729, -1,		  0,  0, {ACT_ABORTACCEPT} },
+	{RSP_ZSAU,	700, 729, ZSAU_NULL,	  0,  0, {ACT_ABORTACCEPT} },
+	{RSP_ZSAU,	700, 729, ZSAU_ACTIVE,	  0,  0, {ACT_ABORTACCEPT} },
+	{RSP_ZSAU,	700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
 
-	{EV_BC_OPEN,	750, 750, -1,			751, -1},
-	{EV_TIMEOUT,	750, 751, -1,			  0,  0, {ACT_CONNTIMEOUT} },
+	{EV_BC_OPEN,	750, 750, -1,		751, -1},
+	{EV_TIMEOUT,	750, 751, -1,		  0,  0, {ACT_CONNTIMEOUT} },
 
 /* B channel closed (general case) */
-	{EV_BC_CLOSED,	 -1,  -1, -1,			 -1, -1, {ACT_NOTIFY_BC_DOWN} },
+	{EV_BC_CLOSED,	 -1,  -1, -1,		 -1, -1, {ACT_NOTIFY_BC_DOWN} },
 
 /* misc. */
-	{RSP_ZCON,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
-	{RSP_ZCAU,	 -1,  -1, -1,			 -1, -1, {ACT_ZCAU} },
-	{RSP_NONE,	 -1,  -1, -1,			 -1, -1, {ACT_DEBUG} },
-	{RSP_ANY,	 -1,  -1, -1,			 -1, -1, {ACT_WARN} },
+	{RSP_ZCON,	 -1,  -1, -1,		 -1, -1, {ACT_DEBUG} },
+	{RSP_ZCAU,	 -1,  -1, -1,		 -1, -1, {ACT_ZCAU} },
+	{RSP_NONE,	 -1,  -1, -1,		 -1, -1, {ACT_DEBUG} },
+	{RSP_ANY,	 -1,  -1, -1,		 -1, -1, {ACT_WARN} },
 	{RSP_LAST}
 };
 
@@ -648,16 +649,16 @@
 static inline struct at_state_t *get_free_channel(struct cardstate *cs,
 						  int cid)
 /* cids: >0: siemens-cid
-   0: without cid
-   -1: no cid assigned yet
-*/
+ *        0: without cid
+ *       -1: no cid assigned yet
+ */
 {
 	unsigned long flags;
 	int i;
 	struct at_state_t *ret;
 
 	for (i = 0; i < cs->channels; ++i)
-		if (gigaset_get_channel(cs->bcs + i)) {
+		if (gigaset_get_channel(cs->bcs + i) >= 0) {
 			ret = &cs->bcs[i].at_state;
 			ret->cid = cid;
 			return ret;
@@ -922,18 +923,18 @@
  * channel >= 0: getting cid for the channel failed
  * channel < 0:  entering cid mode failed
  *
- * returns 0 on failure
+ * returns 0 on success, <0 on failure
  */
 static int reinit_and_retry(struct cardstate *cs, int channel)
 {
 	int i;
 
 	if (--cs->retry_count <= 0)
-		return 0;
+		return -EFAULT;
 
 	for (i = 0; i < cs->channels; ++i)
 		if (cs->bcs[i].at_state.cid > 0)
-			return 0;
+			return -EBUSY;
 
 	if (channel < 0)
 		dev_warn(cs->dev,
@@ -944,7 +945,7 @@
 		cs->bcs[channel].at_state.pending_commands |= PC_CID;
 	}
 	schedule_init(cs, MS_INIT);
-	return 1;
+	return 0;
 }
 
 static int at_state_invalid(struct cardstate *cs,
@@ -1015,7 +1016,7 @@
 			if (cs->bcs[i].at_state.pending_commands)
 				return -EBUSY;
 
-		if (!gigaset_get_channels(cs))
+		if (gigaset_get_channels(cs) < 0)
 			return -EBUSY;
 
 		break;
@@ -1124,7 +1125,7 @@
 			init_failed(cs, M_UNKNOWN);
 			break;
 		}
-		if (!reinit_and_retry(cs, -1))
+		if (reinit_and_retry(cs, -1) < 0)
 			schedule_init(cs, MS_RECOVER);
 		break;
 	case ACT_FAILUMODE:
@@ -1267,7 +1268,7 @@
 	case ACT_FAILCID:
 		cs->cur_at_seq = SEQ_NONE;
 		channel = cs->curchannel;
-		if (!reinit_and_retry(cs, channel)) {
+		if (reinit_and_retry(cs, channel) < 0) {
 			dev_warn(cs->dev,
 				 "Could not get a call ID. Cannot dial.\n");
 			at_state2 = &cs->bcs[channel].at_state;
@@ -1314,8 +1315,9 @@
 		s = ev->ptr;
 
 		if (!strcmp(s, "OK")) {
+			/* OK without version string: assume old response */
 			*p_genresp = 1;
-			*p_resp_code = RSP_ERROR;
+			*p_resp_code = RSP_NONE;
 			break;
 		}
 
@@ -1372,7 +1374,8 @@
 			 ev->parameter, at_state->ConState);
 		break;
 
-		/* events from the LL */
+	/* events from the LL */
+
 	case ACT_DIAL:
 		start_dial(at_state, ev->ptr, ev->parameter);
 		break;
@@ -1385,7 +1388,8 @@
 		cs->commands_pending = 1;
 		break;
 
-		/* hotplug events */
+	/* hotplug events */
+
 	case ACT_STOP:
 		do_stop(cs);
 		break;
@@ -1393,7 +1397,8 @@
 		do_start(cs);
 		break;
 
-		/* events from the interface */
+	/* events from the interface */
+
 	case ACT_IF_LOCK:
 		cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
 		cs->waiting = 0;
@@ -1412,7 +1417,8 @@
 		wake_up(&cs->waitqueue);
 		break;
 
-		/* events from the proc file system */
+	/* events from the proc file system */
+
 	case ACT_PROC_CIDMODE:
 		spin_lock_irqsave(&cs->lock, flags);
 		if (ev->parameter != cs->cidmode) {
@@ -1431,7 +1437,8 @@
 		wake_up(&cs->waitqueue);
 		break;
 
-		/* events from the hardware drivers */
+	/* events from the hardware drivers */
+
 	case ACT_NOTIFY_BC_DOWN:
 		bchannel_down(bcs);
 		break;
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 1dc2513..8e2fc8f 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -163,8 +163,8 @@
 #define BAS_LOWFRAME	5	/* "    "    with negative flow control */
 #define BAS_CORRFRAMES	4	/* flow control multiplicator */
 
-#define BAS_INBUFSIZE	(BAS_MAXFRAME * BAS_NUMFRAMES)
-/* size of isoc in buf per URB */
+#define BAS_INBUFSIZE	(BAS_MAXFRAME * BAS_NUMFRAMES)	/* size of isoc in buf
+							 * per URB */
 #define BAS_OUTBUFSIZE	4096		/* size of common isoc out buffer */
 #define BAS_OUTBUFPAD	BAS_MAXFRAME	/* size of pad area for isoc out buf */
 
@@ -471,18 +471,18 @@
 					   for */
 	int commands_pending;		/* flag(s) in xxx.commands_pending have
 					   been set */
-	struct tasklet_struct event_tasklet;
-	/* tasklet for serializing AT commands.
-	 * Scheduled
-	 *   -> for modem reponses (and
-	 *      incoming data for M10x)
-	 *   -> on timeout
-	 *   -> after setting bits in
-	 *      xxx.at_state.pending_command
-	 *      (e.g. command from LL) */
-	struct tasklet_struct write_tasklet;
-	/* tasklet for serial output
-	 * (not used in base driver) */
+	struct tasklet_struct
+		event_tasklet;		/* tasklet for serializing AT commands.
+					 * Scheduled
+					 *   -> for modem reponses (and
+					 *      incoming data for M10x)
+					 *   -> on timeout
+					 *   -> after setting bits in
+					 *      xxx.at_state.pending_command
+					 *      (e.g. command from LL) */
+	struct tasklet_struct
+		write_tasklet;		/* tasklet for serial output
+					 * (not used in base driver) */
 
 	/* event queue */
 	struct event_t events[MAX_EVENTS];
@@ -583,7 +583,7 @@
 	int (*initbcshw)(struct bc_state *bcs);
 
 	/* Called by gigaset_freecs() for freeing bcs->hw.xxx */
-	int (*freebcshw)(struct bc_state *bcs);
+	void (*freebcshw)(struct bc_state *bcs);
 
 	/* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */
 	void (*reinitbcshw)(struct bc_state *bcs);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 0f13eb1..2d753290 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -229,7 +229,7 @@
 			return -EINVAL;
 		}
 		bcs = cs->bcs + ch;
-		if (!gigaset_get_channel(bcs)) {
+		if (gigaset_get_channel(bcs) < 0) {
 			dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
 			return -EBUSY;
 		}
@@ -618,7 +618,7 @@
  * @cs:		device descriptor structure.
  * @isdnid:	device name.
  *
- * Return value: 1 for success, 0 for failure
+ * Return value: 0 on success, error code < 0 on failure
  */
 int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
 {
@@ -627,14 +627,14 @@
 	iif = kmalloc(sizeof *iif, GFP_KERNEL);
 	if (!iif) {
 		pr_err("out of memory\n");
-		return 0;
+		return -ENOMEM;
 	}
 
 	if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
 	    >= sizeof iif->id) {
 		pr_err("ID too long: %s\n", isdnid);
 		kfree(iif);
-		return 0;
+		return -EINVAL;
 	}
 
 	iif->owner = THIS_MODULE;
@@ -656,13 +656,13 @@
 	if (!register_isdn(iif)) {
 		pr_err("register_isdn failed\n");
 		kfree(iif);
-		return 0;
+		return -EINVAL;
 	}
 
 	cs->iif = iif;
 	cs->myid = iif->channels;		/* Set my device id */
 	cs->hw_hdr_len = HW_HDR_LEN;
-	return 1;
+	return 0;
 }
 
 /**
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index a351c16..bc29f1d 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -56,7 +56,7 @@
 
 /* start writing
  * acquire the write semaphore
- * return true if acquired, false if busy
+ * return 0 if acquired, <0 if busy
  */
 static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
 {
@@ -64,12 +64,12 @@
 		atomic_inc(&iwb->writesem);
 		gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore",
 			__func__);
-		return 0;
+		return -EBUSY;
 	}
 	gig_dbg(DEBUG_ISO,
 		"%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
 		__func__, iwb->data[iwb->write], iwb->wbits);
-	return 1;
+	return 0;
 }
 
 /* finish writing
@@ -158,7 +158,7 @@
 		/* no wraparound in valid data */
 		if (limit >= write) {
 			/* append idle frame */
-			if (!isowbuf_startwrite(iwb))
+			if (isowbuf_startwrite(iwb) < 0)
 				return -EBUSY;
 			/* write position could have changed */
 			write = iwb->write;
@@ -403,7 +403,7 @@
 	unsigned char c;
 
 	if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
-	    !isowbuf_startwrite(iwb)) {
+	    isowbuf_startwrite(iwb) < 0) {
 		gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
 			__func__, isowbuf_freebytes(iwb));
 		return -EAGAIN;
@@ -457,7 +457,7 @@
 		return iwb->write;
 
 	if (isowbuf_freebytes(iwb) < count ||
-	    !isowbuf_startwrite(iwb)) {
+	    isowbuf_startwrite(iwb) < 0) {
 		gig_dbg(DEBUG_ISO, "can't put %d bytes", count);
 		return -EAGAIN;
 	}
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 6f3fd4c..8c91fd5eb 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -340,17 +340,16 @@
 {
 	/* unused */
 	bcs->hw.ser = NULL;
-	return 1;
+	return 0;
 }
 
 /*
  * Free B channel structure
  * Called by "gigaset_freebcs" in common.c
  */
-static int gigaset_freebcshw(struct bc_state *bcs)
+static void gigaset_freebcshw(struct bc_state *bcs)
 {
 	/* unused */
-	return 1;
 }
 
 /*
@@ -398,7 +397,7 @@
 	scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
 	if (!scs) {
 		pr_err("out of memory\n");
-		return 0;
+		return -ENOMEM;
 	}
 	cs->hw.ser = scs;
 
@@ -410,13 +409,13 @@
 		pr_err("error %d registering platform device\n", rc);
 		kfree(cs->hw.ser);
 		cs->hw.ser = NULL;
-		return 0;
+		return rc;
 	}
 	dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
 
 	tasklet_init(&cs->write_tasklet,
 		     gigaset_modem_fill, (unsigned long) cs);
-	return 1;
+	return 0;
 }
 
 /*
@@ -503,6 +502,7 @@
 gigaset_tty_open(struct tty_struct *tty)
 {
 	struct cardstate *cs;
+	int rc;
 
 	gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101");
 
@@ -515,8 +515,10 @@
 
 	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
-	if (!cs)
+	if (!cs) {
+		rc = -ENODEV;
 		goto error;
+	}
 
 	cs->dev = &cs->hw.ser->dev.dev;
 	cs->hw.ser->tty = tty;
@@ -530,7 +532,8 @@
 	 */
 	if (startmode == SM_LOCKED)
 		cs->mstate = MS_LOCKED;
-	if (!gigaset_start(cs)) {
+	rc = gigaset_start(cs);
+	if (rc < 0) {
 		tasklet_kill(&cs->write_tasklet);
 		goto error;
 	}
@@ -542,7 +545,7 @@
 	gig_dbg(DEBUG_INIT, "Startup of HLL failed");
 	tty->disc_data = NULL;
 	gigaset_freecs(cs);
-	return -ENODEV;
+	return rc;
 }
 
 /*
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 049da67..bb12d80 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -549,10 +549,9 @@
 			       0, 0, &buf, 6, 2000);
 }
 
-static int gigaset_freebcshw(struct bc_state *bcs)
+static void gigaset_freebcshw(struct bc_state *bcs)
 {
 	/* unused */
-	return 1;
 }
 
 /* Initialize the b-channel structure */
@@ -560,7 +559,7 @@
 {
 	/* unused */
 	bcs->hw.usb = NULL;
-	return 1;
+	return 0;
 }
 
 static void gigaset_reinitbcshw(struct bc_state *bcs)
@@ -582,7 +581,7 @@
 		kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
 	if (!ucs) {
 		pr_err("out of memory\n");
-		return 0;
+		return -ENOMEM;
 	}
 
 	ucs->bchars[0] = 0;
@@ -597,7 +596,7 @@
 	tasklet_init(&cs->write_tasklet,
 		     gigaset_modem_fill, (unsigned long) cs);
 
-	return 1;
+	return 0;
 }
 
 /* Send data from current skb to the device. */
@@ -766,9 +765,9 @@
 	if (startmode == SM_LOCKED)
 		cs->mstate = MS_LOCKED;
 
-	if (!gigaset_start(cs)) {
+	retval = gigaset_start(cs);
+	if (retval < 0) {
 		tasklet_kill(&cs->write_tasklet);
-		retval = -ENODEV;
 		goto error;
 	}
 	return 0;
@@ -898,8 +897,10 @@
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 				    GIGASET_MODULENAME, GIGASET_DEVNAME,
 				    &ops, THIS_MODULE);
-	if (driver == NULL)
+	if (driver == NULL) {
+		result = -ENOMEM;
 		goto error;
+	}
 
 	/* register this driver with the USB subsystem */
 	result = usb_register(&gigaset_usb_driver);
@@ -915,7 +916,7 @@
 	if (driver)
 		gigaset_freedriver(driver);
 	driver = NULL;
-	return -1;
+	return result;
 }
 
 /*
diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
index a576f32..7a0bdbd 100644
--- a/drivers/isdn/hardware/eicon/capifunc.c
+++ b/drivers/isdn/hardware/eicon/capifunc.c
@@ -1120,7 +1120,7 @@
 /*
  * init (alloc) main structures
  */
-static int DIVA_INIT_FUNCTION init_main_structs(void)
+static int __init init_main_structs(void)
 {
 	if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) {
 		DBG_ERR(("init: failed alloc mapped_msg."))
@@ -1181,7 +1181,7 @@
 /*
  * init
  */
-int DIVA_INIT_FUNCTION init_capifunc(void)
+int __init init_capifunc(void)
 {
 	diva_os_initialize_spin_lock(&api_lock, "capifunc");
 	memset(ControllerMap, 0, MAX_DESCRIPTORS + 1);
@@ -1209,7 +1209,7 @@
 /*
  * finit
  */
-void DIVA_EXIT_FUNCTION finit_capifunc(void)
+void __exit finit_capifunc(void)
 {
 	do_api_remove_start();
 	divacapi_disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
index eabe0fa..997d46a 100644
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ b/drivers/isdn/hardware/eicon/capimain.c
@@ -118,7 +118,7 @@
 /*
  * module init
  */
-static int DIVA_INIT_FUNCTION divacapi_init(void)
+static int __init divacapi_init(void)
 {
 	char tmprev[32];
 	int ret = 0;
@@ -144,7 +144,7 @@
 /*
  * module exit
  */
-static void DIVA_EXIT_FUNCTION divacapi_exit(void)
+static void __exit divacapi_exit(void)
 {
 	finit_capifunc();
 	printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
index c4c8220..b0b23ed 100644
--- a/drivers/isdn/hardware/eicon/diddfunc.c
+++ b/drivers/isdn/hardware/eicon/diddfunc.c
@@ -47,7 +47,7 @@
 /*
  * connect to didd
  */
-static int DIVA_INIT_FUNCTION connect_didd(void)
+static int __init connect_didd(void)
 {
 	int x = 0;
 	int dadapter = 0;
@@ -79,7 +79,7 @@
 /*
  * disconnect from didd
  */
-static void DIVA_EXIT_FUNCTION disconnect_didd(void)
+static void __exit disconnect_didd(void)
 {
 	IDI_SYNC_REQ req;
 
@@ -92,7 +92,7 @@
 /*
  * init
  */
-int DIVA_INIT_FUNCTION diddfunc_init(void)
+int __init diddfunc_init(void)
 {
 	diva_didd_load_time_init();
 
@@ -107,7 +107,7 @@
 /*
  * finit
  */
-void DIVA_EXIT_FUNCTION diddfunc_finit(void)
+void __exit diddfunc_finit(void)
 {
 	DbgDeregister();
 	disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index d1d3de0..fab6ccf 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -91,7 +91,7 @@
 	.release	= single_release,
 };
 
-static int DIVA_INIT_FUNCTION create_proc(void)
+static int __init create_proc(void)
 {
 	proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
 
@@ -109,7 +109,7 @@
 	remove_proc_entry("eicon", init_net.proc_net);
 }
 
-static int DIVA_INIT_FUNCTION divadidd_init(void)
+static int __init divadidd_init(void)
 {
 	char tmprev[32];
 	int ret = 0;
@@ -141,7 +141,7 @@
 	return (ret);
 }
 
-static void DIVA_EXIT_FUNCTION divadidd_exit(void)
+static void __exit divadidd_exit(void)
 {
 	diddfunc_finit();
 	remove_proc();
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index ffa0c31..48db08d 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -184,7 +184,7 @@
 	unregister_chrdev(major, DEVNAME);
 }
 
-static int DIVA_INIT_FUNCTION divas_maint_register_chrdev(void)
+static int __init divas_maint_register_chrdev(void)
 {
 	if ((major = register_chrdev(0, DEVNAME, &divas_maint_fops)) < 0)
 	{
@@ -207,7 +207,7 @@
 /*
  *  Driver Load
  */
-static int DIVA_INIT_FUNCTION maint_init(void)
+static int __init maint_init(void)
 {
 	char tmprev[50];
 	int ret = 0;
@@ -245,7 +245,7 @@
 /*
 **  Driver Unload
 */
-static void DIVA_EXIT_FUNCTION maint_exit(void)
+static void __exit maint_exit(void)
 {
 	divas_maint_unregister_chrdev();
 	mntfunc_finit();
diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
index 60aaf95..4be5f88 100644
--- a/drivers/isdn/hardware/eicon/divasfunc.c
+++ b/drivers/isdn/hardware/eicon/divasfunc.c
@@ -153,7 +153,7 @@
 /*
  * connect to didd
  */
-static int DIVA_INIT_FUNCTION connect_didd(void)
+static int __init connect_didd(void)
 {
 	int x = 0;
 	int dadapter = 0;
@@ -209,7 +209,7 @@
 /*
  * init
  */
-int DIVA_INIT_FUNCTION divasfunc_init(int dbgmask)
+int __init divasfunc_init(int dbgmask)
 {
 	char *version;
 
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index a5c8f90..4103a8c 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -114,7 +114,7 @@
 	.release	= single_release,
 };
 
-static int DIVA_INIT_FUNCTION create_um_idi_proc(void)
+static int __init create_um_idi_proc(void)
 {
 	um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
 					&um_idi_proc_fops);
@@ -146,7 +146,7 @@
 	unregister_chrdev(major, DEVNAME);
 }
 
-static int DIVA_INIT_FUNCTION divas_idi_register_chrdev(void)
+static int __init divas_idi_register_chrdev(void)
 {
 	if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0)
 	{
@@ -161,7 +161,7 @@
 /*
 ** Driver Load
 */
-static int DIVA_INIT_FUNCTION divasi_init(void)
+static int __init divasi_init(void)
 {
 	char tmprev[50];
 	int ret = 0;
@@ -202,7 +202,7 @@
 /*
 ** Driver Unload
 */
-static void DIVA_EXIT_FUNCTION divasi_exit(void)
+static void __exit divasi_exit(void)
 {
 	idifunc_finit();
 	remove_um_idi_proc();
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 7eaab06..ca6d276 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -673,7 +673,7 @@
 	unregister_chrdev(major, DEVNAME);
 }
 
-static int DIVA_INIT_FUNCTION divas_register_chrdev(void)
+static int __init divas_register_chrdev(void)
 {
 	if ((major = register_chrdev(0, DEVNAME, &divas_fops)) < 0)
 	{
@@ -767,7 +767,7 @@
 /* --------------------------------------------------------------------------
    Driver Load / Startup
    -------------------------------------------------------------------------- */
-static int DIVA_INIT_FUNCTION divas_init(void)
+static int __init divas_init(void)
 {
 	char tmprev[50];
 	int ret = 0;
@@ -831,7 +831,7 @@
 /* --------------------------------------------------------------------------
    Driver Unload
    -------------------------------------------------------------------------- */
-static void DIVA_EXIT_FUNCTION divas_exit(void)
+static void __exit divas_exit(void)
 {
 	pci_unregister_driver(&diva_pci_driver);
 	remove_divas_proc();
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
index d153e3c..fef6586 100644
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ b/drivers/isdn/hardware/eicon/idifunc.c
@@ -133,7 +133,7 @@
 /*
  * remove all adapter
  */
-static void DIVA_EXIT_FUNCTION remove_all_idi_proc(void)
+static void __exit remove_all_idi_proc(void)
 {
 	udiva_card *card;
 	diva_os_spin_lock_magic_t old_irql;
@@ -181,7 +181,7 @@
 /*
  * connect DIDD
  */
-static int DIVA_INIT_FUNCTION connect_didd(void)
+static int __init connect_didd(void)
 {
 	int x = 0;
 	int dadapter = 0;
@@ -225,7 +225,7 @@
 /*
  *  Disconnect from DIDD
  */
-static void DIVA_EXIT_FUNCTION disconnect_didd(void)
+static void __exit disconnect_didd(void)
 {
 	IDI_SYNC_REQ req;
 
@@ -240,7 +240,7 @@
 /*
  * init
  */
-int DIVA_INIT_FUNCTION idifunc_init(void)
+int __init idifunc_init(void)
 {
 	diva_os_initialize_spin_lock(&ll_lock, "idifunc");
 
@@ -260,7 +260,7 @@
 /*
  * finit
  */
-void DIVA_EXIT_FUNCTION idifunc_finit(void)
+void __exit idifunc_finit(void)
 {
 	diva_user_mode_idi_finit();
 	disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
index d607260..1cd9aff 100644
--- a/drivers/isdn/hardware/eicon/mntfunc.c
+++ b/drivers/isdn/hardware/eicon/mntfunc.c
@@ -72,7 +72,7 @@
 /*
  * connect to didd
  */
-static int DIVA_INIT_FUNCTION connect_didd(void)
+static int __init connect_didd(void)
 {
 	int x = 0;
 	int dadapter = 0;
@@ -114,7 +114,7 @@
 /*
  * disconnect from didd
  */
-static void DIVA_EXIT_FUNCTION disconnect_didd(void)
+static void __exit disconnect_didd(void)
 {
 	IDI_SYNC_REQ req;
 
@@ -300,7 +300,7 @@
 /*
  *  init
  */
-int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer,
+int __init mntfunc_init(int *buffer_length, void **buffer,
 				    unsigned long diva_dbg_mem)
 {
 	if (*buffer_length < 64) {
@@ -348,7 +348,7 @@
 /*
  *  exit
  */
-void DIVA_EXIT_FUNCTION mntfunc_finit(void)
+void __exit mntfunc_finit(void)
 {
 	void *buffer;
 	int i = 100;
diff --git a/drivers/isdn/hardware/eicon/platform.h b/drivers/isdn/hardware/eicon/platform.h
index 7331c3b..b2edb75 100644
--- a/drivers/isdn/hardware/eicon/platform.h
+++ b/drivers/isdn/hardware/eicon/platform.h
@@ -38,9 +38,6 @@
 #define DIVA_NO_DEBUGLIB
 #endif
 
-#define DIVA_INIT_FUNCTION  __init
-#define DIVA_EXIT_FUNCTION  __exit
-
 #define DIVA_USER_MODE_CARD_CONFIG 1
 #define	USE_EXTENDED_DEBUGS 1
 
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index c0b8c96..c08fc60 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -30,7 +30,7 @@
 #include "ipac.h"
 
 
-#define AVMFRITZ_REV	"2.1"
+#define AVMFRITZ_REV	"2.3"
 
 static int AVM_cnt;
 static int debug;
@@ -69,6 +69,7 @@
 #define HDLC_MODE_TRANS		0x02
 #define HDLC_MODE_CCR_7		0x04
 #define HDLC_MODE_CCR_16	0x08
+#define HDLC_FIFO_SIZE_128	0x20
 #define HDLC_MODE_TESTLOOP	0x80
 
 #define HDLC_INT_XPR		0x80
@@ -80,13 +81,16 @@
 #define HDLC_STAT_RDO		0x10
 #define HDLC_STAT_CRCVFRRAB	0x0E
 #define HDLC_STAT_CRCVFR	0x06
-#define HDLC_STAT_RML_MASK	0x3f00
+#define HDLC_STAT_RML_MASK_V1	0x3f00
+#define HDLC_STAT_RML_MASK_V2	0x7f00
 
 #define HDLC_CMD_XRS		0x80
 #define HDLC_CMD_XME		0x01
 #define HDLC_CMD_RRS		0x20
 #define HDLC_CMD_XML_MASK	0x3f00
-#define HDLC_FIFO_SIZE		32
+
+#define HDLC_FIFO_SIZE_V1	32
+#define HDLC_FIFO_SIZE_V2	128
 
 /* Fritz PCI v2.0 */
 
@@ -346,11 +350,14 @@
 {
 	struct fritzcard *fc = bch->hw;
 	struct hdlc_hw *hdlc;
+	u8 mode;
 
 	hdlc = &fc->hdlc[(bch->nr - 1) & 1];
 	pr_debug("%s: hdlc %c protocol %x-->%x ch %d\n", fc->name,
 		 '@' + bch->nr, bch->state, protocol, bch->nr);
 	hdlc->ctrl.ctrl = 0;
+	mode = (fc->type == AVM_FRITZ_PCIV2) ? HDLC_FIFO_SIZE_128 : 0;
+
 	switch (protocol) {
 	case -1: /* used for init */
 		bch->state = -1;
@@ -358,7 +365,7 @@
 		if (bch->state == ISDN_P_NONE)
 			break;
 		hdlc->ctrl.sr.cmd  = HDLC_CMD_XRS | HDLC_CMD_RRS;
-		hdlc->ctrl.sr.mode = HDLC_MODE_TRANS;
+		hdlc->ctrl.sr.mode = mode | HDLC_MODE_TRANS;
 		write_ctrl(bch, 5);
 		bch->state = ISDN_P_NONE;
 		test_and_clear_bit(FLG_HDLC, &bch->Flags);
@@ -367,7 +374,7 @@
 	case ISDN_P_B_RAW:
 		bch->state = protocol;
 		hdlc->ctrl.sr.cmd  = HDLC_CMD_XRS | HDLC_CMD_RRS;
-		hdlc->ctrl.sr.mode = HDLC_MODE_TRANS;
+		hdlc->ctrl.sr.mode = mode | HDLC_MODE_TRANS;
 		write_ctrl(bch, 5);
 		hdlc->ctrl.sr.cmd = HDLC_CMD_XRS;
 		write_ctrl(bch, 1);
@@ -377,7 +384,7 @@
 	case ISDN_P_B_HDLC:
 		bch->state = protocol;
 		hdlc->ctrl.sr.cmd  = HDLC_CMD_XRS | HDLC_CMD_RRS;
-		hdlc->ctrl.sr.mode = HDLC_MODE_ITF_FLG;
+		hdlc->ctrl.sr.mode = mode | HDLC_MODE_ITF_FLG;
 		write_ctrl(bch, 5);
 		hdlc->ctrl.sr.cmd = HDLC_CMD_XRS;
 		write_ctrl(bch, 1);
@@ -397,39 +404,40 @@
 	u32 *ptr;
 	u8 *p;
 	u32  val, addr;
-	int cnt = 0;
+	int cnt;
 	struct fritzcard *fc = bch->hw;
 
 	pr_debug("%s: %s %d\n", fc->name, __func__, count);
-	if (!bch->rx_skb) {
-		bch->rx_skb = mI_alloc_skb(bch->maxlen, GFP_ATOMIC);
-		if (!bch->rx_skb) {
-			pr_info("%s: B receive out of memory\n",
-				fc->name);
+	if (test_bit(FLG_RX_OFF, &bch->Flags)) {
+		p = NULL;
+		bch->dropcnt += count;
+	} else {
+		cnt = bchannel_get_rxbuf(bch, count);
+		if (cnt < 0) {
+			pr_warning("%s.B%d: No bufferspace for %d bytes\n",
+				   fc->name, bch->nr, count);
 			return;
 		}
+		p = skb_put(bch->rx_skb, count);
 	}
-	if ((bch->rx_skb->len + count) > bch->maxlen) {
-		pr_debug("%s: overrun %d\n", fc->name,
-			 bch->rx_skb->len + count);
-		return;
-	}
-	p = skb_put(bch->rx_skb, count);
 	ptr = (u32 *)p;
-	if (AVM_FRITZ_PCIV2 == fc->type)
+	if (fc->type == AVM_FRITZ_PCIV2)
 		addr = fc->addr + (bch->nr == 2 ?
 				   AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1);
 	else {
 		addr = fc->addr + CHIP_WINDOW;
 		outl(bch->nr == 2 ? AVM_HDLC_2 : AVM_HDLC_1, fc->addr);
 	}
+	cnt = 0;
 	while (cnt < count) {
 		val = le32_to_cpu(inl(addr));
-		put_unaligned(val, ptr);
-		ptr++;
+		if (p) {
+			put_unaligned(val, ptr);
+			ptr++;
+		}
 		cnt += 4;
 	}
-	if (debug & DEBUG_HW_BFIFO) {
+	if (p && (debug & DEBUG_HW_BFIFO)) {
 		snprintf(fc->log, LOG_SIZE, "B%1d-recv %s %d ",
 			 bch->nr, fc->name, count);
 		print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count);
@@ -441,30 +449,43 @@
 {
 	struct fritzcard *fc = bch->hw;
 	struct hdlc_hw *hdlc;
-	int count, cnt = 0;
+	int count, fs, cnt = 0, idx, fillempty = 0;
 	u8 *p;
 	u32 *ptr, val, addr;
 
-	hdlc = &fc->hdlc[(bch->nr - 1) & 1];
-	if (!bch->tx_skb)
-		return;
-	count = bch->tx_skb->len - bch->tx_idx;
-	if (count <= 0)
-		return;
-	p = bch->tx_skb->data + bch->tx_idx;
+	idx = (bch->nr - 1) & 1;
+	hdlc = &fc->hdlc[idx];
+	fs = (fc->type == AVM_FRITZ_PCIV2) ?
+		HDLC_FIFO_SIZE_V2 : HDLC_FIFO_SIZE_V1;
+	if (!bch->tx_skb) {
+		if (!test_bit(FLG_TX_EMPTY, &bch->Flags))
+			return;
+		count = fs;
+		p = bch->fill;
+		fillempty = 1;
+	} else {
+		count = bch->tx_skb->len - bch->tx_idx;
+		if (count <= 0)
+			return;
+		p = bch->tx_skb->data + bch->tx_idx;
+	}
 	hdlc->ctrl.sr.cmd &= ~HDLC_CMD_XME;
-	if (count > HDLC_FIFO_SIZE) {
-		count = HDLC_FIFO_SIZE;
+	if (count > fs) {
+		count = fs;
 	} else {
 		if (test_bit(FLG_HDLC, &bch->Flags))
 			hdlc->ctrl.sr.cmd |= HDLC_CMD_XME;
 	}
-	pr_debug("%s: %s %d/%d/%d", fc->name, __func__, count,
-		 bch->tx_idx, bch->tx_skb->len);
 	ptr = (u32 *)p;
-	bch->tx_idx += count;
-	hdlc->ctrl.sr.xml = ((count == HDLC_FIFO_SIZE) ? 0 : count);
-	if (AVM_FRITZ_PCIV2 == fc->type) {
+	if (fillempty) {
+		pr_debug("%s.B%d: %d/%d/%d", fc->name, bch->nr, count,
+			 bch->tx_idx, bch->tx_skb->len);
+		bch->tx_idx += count;
+	} else {
+		pr_debug("%s.B%d: fillempty %d\n", fc->name, bch->nr, count);
+	}
+	hdlc->ctrl.sr.xml = ((count == fs) ? 0 : count);
+	if (fc->type == AVM_FRITZ_PCIV2) {
 		__write_ctrl_pciv2(fc, hdlc, bch->nr);
 		addr = fc->addr + (bch->nr == 2 ?
 				   AVM_HDLC_FIFO_2 : AVM_HDLC_FIFO_1);
@@ -472,13 +493,21 @@
 		__write_ctrl_pci(fc, hdlc, bch->nr);
 		addr = fc->addr + CHIP_WINDOW;
 	}
-	while (cnt < count) {
-		val = get_unaligned(ptr);
-		outl(cpu_to_le32(val), addr);
-		ptr++;
-		cnt += 4;
+	if (fillempty) {
+		while (cnt < count) {
+			/* all bytes the same - no worry about endian */
+			outl(*ptr, addr);
+			cnt += 4;
+		}
+	} else {
+		while (cnt < count) {
+			val = get_unaligned(ptr);
+			outl(cpu_to_le32(val), addr);
+			ptr++;
+			cnt += 4;
+		}
 	}
-	if (debug & DEBUG_HW_BFIFO) {
+	if ((debug & DEBUG_HW_BFIFO) && !fillempty) {
 		snprintf(fc->log, LOG_SIZE, "B%1d-send %s %d ",
 			 bch->nr, fc->name, count);
 		print_hex_dump_bytes(fc->log, DUMP_PREFIX_OFFSET, p, count);
@@ -488,17 +517,17 @@
 static void
 HDLC_irq_xpr(struct bchannel *bch)
 {
-	if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
+	if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) {
 		hdlc_fill_fifo(bch);
-	else {
-		if (bch->tx_skb) {
-			/* send confirm, on trans, free on hdlc. */
-			if (test_bit(FLG_TRANSPARENT, &bch->Flags))
-				confirm_Bsend(bch);
+	} else {
+		if (bch->tx_skb)
 			dev_kfree_skb(bch->tx_skb);
-		}
-		if (get_next_bframe(bch))
+		if (get_next_bframe(bch)) {
 			hdlc_fill_fifo(bch);
+			test_and_clear_bit(FLG_TX_EMPTY, &bch->Flags);
+		} else if (test_bit(FLG_TX_EMPTY, &bch->Flags)) {
+			hdlc_fill_fifo(bch);
+		}
 	}
 }
 
@@ -506,13 +535,23 @@
 HDLC_irq(struct bchannel *bch, u32 stat)
 {
 	struct fritzcard *fc = bch->hw;
-	int		len;
+	int		len, fs;
+	u32		rmlMask;
 	struct hdlc_hw	*hdlc;
 
 	hdlc = &fc->hdlc[(bch->nr - 1) & 1];
 	pr_debug("%s: ch%d stat %#x\n", fc->name, bch->nr, stat);
+	if (fc->type == AVM_FRITZ_PCIV2) {
+		rmlMask = HDLC_STAT_RML_MASK_V2;
+		fs = HDLC_FIFO_SIZE_V2;
+	} else {
+		rmlMask = HDLC_STAT_RML_MASK_V1;
+		fs = HDLC_FIFO_SIZE_V1;
+	}
 	if (stat & HDLC_INT_RPR) {
 		if (stat & HDLC_STAT_RDO) {
+			pr_warning("%s: ch%d stat %x RDO\n",
+				   fc->name, bch->nr, stat);
 			hdlc->ctrl.sr.xml = 0;
 			hdlc->ctrl.sr.cmd |= HDLC_CMD_RRS;
 			write_ctrl(bch, 1);
@@ -521,21 +560,21 @@
 			if (bch->rx_skb)
 				skb_trim(bch->rx_skb, 0);
 		} else {
-			len = (stat & HDLC_STAT_RML_MASK) >> 8;
+			len = (stat & rmlMask) >> 8;
 			if (!len)
-				len = 32;
+				len = fs;
 			hdlc_empty_fifo(bch, len);
 			if (!bch->rx_skb)
 				goto handle_tx;
-			if ((stat & HDLC_STAT_RME) || test_bit(FLG_TRANSPARENT,
-							       &bch->Flags)) {
-				if (((stat & HDLC_STAT_CRCVFRRAB) ==
-				     HDLC_STAT_CRCVFR) ||
-				    test_bit(FLG_TRANSPARENT, &bch->Flags)) {
-					recv_Bchannel(bch, 0);
+			if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+				recv_Bchannel(bch, 0, false);
+			} else if (stat & HDLC_STAT_RME) {
+				if ((stat & HDLC_STAT_CRCVFRRAB) ==
+				    HDLC_STAT_CRCVFR) {
+					recv_Bchannel(bch, 0, false);
 				} else {
-					pr_debug("%s: got invalid frame\n",
-						 fc->name);
+					pr_warning("%s: got invalid frame\n",
+						   fc->name);
 					skb_trim(bch->rx_skb, 0);
 				}
 			}
@@ -547,16 +586,13 @@
 		 * restart transmitting the whole frame on HDLC
 		 * in transparent mode we send the next data
 		 */
-		if (bch->tx_skb)
-			pr_debug("%s: ch%d XDU len(%d) idx(%d) Flags(%lx)\n",
-				 fc->name, bch->nr, bch->tx_skb->len,
-				 bch->tx_idx, bch->Flags);
-		else
-			pr_debug("%s: ch%d XDU no tx_skb Flags(%lx)\n",
-				 fc->name, bch->nr, bch->Flags);
+		pr_warning("%s: ch%d stat %x XDU %s\n", fc->name, bch->nr,
+			   stat, bch->tx_skb ? "tx_skb" : "no tx_skb");
 		if (bch->tx_skb && bch->tx_skb->len) {
 			if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
 				bch->tx_idx = 0;
+		} else if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
+			test_and_set_bit(FLG_TX_EMPTY, &bch->Flags);
 		}
 		hdlc->ctrl.sr.xml = 0;
 		hdlc->ctrl.sr.cmd |= HDLC_CMD_XRS;
@@ -659,22 +695,17 @@
 	struct fritzcard *fc = bch->hw;
 	int ret = -EINVAL;
 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
-	u32 id;
-	u_long flags;
+	unsigned long flags;
 
 	switch (hh->prim) {
 	case PH_DATA_REQ:
 		spin_lock_irqsave(&fc->lock, flags);
 		ret = bchannel_senddata(bch, skb);
 		if (ret > 0) { /* direct TX */
-			id = hh->id; /* skb can be freed */
 			hdlc_fill_fifo(bch);
 			ret = 0;
-			spin_unlock_irqrestore(&fc->lock, flags);
-			if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
-				queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
-		} else
-			spin_unlock_irqrestore(&fc->lock, flags);
+		}
+		spin_unlock_irqrestore(&fc->lock, flags);
 		return ret;
 	case PH_ACTIVATE_REQ:
 		spin_lock_irqsave(&fc->lock, flags);
@@ -783,7 +814,7 @@
 		inithdlc(fc);
 		enable_hwirq(fc);
 		/* RESET Receiver and Transmitter */
-		if (AVM_FRITZ_PCIV2 == fc->type) {
+		if (fc->type == AVM_FRITZ_PCIV2) {
 			WriteISAC_V2(fc, ISACX_MASK, 0);
 			WriteISAC_V2(fc, ISACX_CMDRD, 0x41);
 		} else {
@@ -810,21 +841,7 @@
 static int
 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
 {
-	int ret = 0;
-	struct fritzcard *fc = bch->hw;
-
-	switch (cq->op) {
-	case MISDN_CTRL_GETOP:
-		cq->op = 0;
-		break;
-		/* Nothing implemented yet */
-	case MISDN_CTRL_FILL_EMPTY:
-	default:
-		pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	return mISDN_ctrl_bchannel(bch, cq);
 }
 
 static int
@@ -839,14 +856,10 @@
 	switch (cmd) {
 	case CLOSE_CHANNEL:
 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
-		if (test_bit(FLG_ACTIVE, &bch->Flags)) {
-			spin_lock_irqsave(&fc->lock, flags);
-			mISDN_freebchannel(bch);
-			test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
-			test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
-			modehdlc(bch, ISDN_P_NONE);
-			spin_unlock_irqrestore(&fc->lock, flags);
-		}
+		spin_lock_irqsave(&fc->lock, flags);
+		mISDN_freebchannel(bch);
+		modehdlc(bch, ISDN_P_NONE);
+		spin_unlock_irqrestore(&fc->lock, flags);
 		ch->protocol = ISDN_P_NONE;
 		ch->peer = NULL;
 		module_put(THIS_MODULE);
@@ -868,7 +881,7 @@
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_LOOP;
+		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
 		break;
 	case MISDN_CTRL_LOOP:
 		/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -878,6 +891,9 @@
 		}
 		ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel);
 		break;
+	case MISDN_CTRL_L1_TIMER3:
+		ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq->p1);
+		break;
 	default:
 		pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op);
 		ret = -EINVAL;
@@ -898,7 +914,6 @@
 	bch = &fc->bch[rq->adr.channel - 1];
 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
 		return -EBUSY; /* b-channel can be only open once */
-	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
 	bch->ch.protocol = rq->protocol;
 	rq->ch = &bch->ch;
 	return 0;
@@ -1021,6 +1036,7 @@
 setup_instance(struct fritzcard *card)
 {
 	int i, err;
+	unsigned short minsize;
 	u_long flags;
 
 	snprintf(card->name, MISDN_MAX_IDLEN - 1, "AVM.%d", AVM_cnt + 1);
@@ -1040,7 +1056,11 @@
 	for (i = 0; i < 2; i++) {
 		card->bch[i].nr = i + 1;
 		set_channelmap(i + 1, card->isac.dch.dev.channelmap);
-		mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM);
+		if (AVM_FRITZ_PCIV2 == card->type)
+			minsize = HDLC_FIFO_SIZE_V2;
+		else
+			minsize = HDLC_FIFO_SIZE_V1;
+		mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, minsize);
 		card->bch[i].hw = card;
 		card->bch[i].ch.send = avm_l2l1B;
 		card->bch[i].ch.ctrl = avm_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h
index b0588ac..c601f88 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi.h
@@ -205,18 +205,22 @@
 
 	u_int		slots;	/* number of PCM slots */
 	u_int		leds;	/* type of leds */
-	u_int		ledcount; /* used to animate leds */
 	u_long		ledstate; /* save last state of leds */
 	int		opticalsupport; /* has the e1 board */
 					/* an optical Interface */
-	int		dslot;	/* channel # of d-channel (E1) default 16 */
+
+	u_int		bmask[32]; /* bitmask of bchannels for port */
+	u_char		dnum[32]; /* array of used dchannel numbers for port */
+	u_char		created[32]; /* what port is created */
+	u_int		activity_tx; /* if there is data TX / RX */
+	u_int		activity_rx; /* bitmask according to port number */
+				     /* (will be cleared after */
+				     /* showing led-states) */
+	u_int		flash[8]; /* counter for flashing 8 leds on activity */
 
 	u_long		wdcount;	/* every 500 ms we need to */
 					/* send the watchdog a signal */
 	u_char		wdbyte; /* watchdog toggle byte */
-	u_int		activity[8];	/* if there is any action on this */
-					/* port (will be cleared after */
-					/* showing led-states) */
 	int		e1_state; /* keep track of last state */
 	int		e1_getclock; /* if sync is retrieved from interface */
 	int		syncronized; /* keep track of existing sync interface */
@@ -233,7 +237,6 @@
 	 * the bch->channel is equvalent to the hfc-channel
 	 */
 	struct hfc_chan	chan[32];
-	u_char		created[8]; /* what port is created */
 	signed char	slot_owner[256]; /* owner channel of slot */
 };
 
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 0332231..5e402cf2 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -103,14 +103,26 @@
  *	Omit this value, if all cards are interconnected or none is connected.
  *	If unsure, don't give this parameter.
  *
- * dslot:
- *	NOTE: only one dslot value must be given for every card.
- *	Also this value must be given for non-E1 cards. If omitted, the E1
- *	card has D-channel on time slot 16, which is default.
- *	If 1..15 or 17..31, an alternate time slot is used for D-channel.
- *	In this case, the application must be able to handle this.
- *	If -1 is given, the D-channel is disabled and all 31 slots can be used
- *	for B-channel. (only for specific applications)
+ * dmask and bmask:
+ *	NOTE: One dmask value must be given for every HFC-E1 card.
+ *	If omitted, the E1 card has D-channel on time slot 16, which is default.
+ *	dmask is a 32 bit mask. The bit must be set for an alternate time slot.
+ *	If multiple bits are set, multiple virtual card fragments are created.
+ *	For each bit set, a bmask value must be given. Each bit on the bmask
+ *	value stands for a B-channel. The bmask may not overlap with dmask or
+ *	with other bmask values for that card.
+ *	Example: dmask=0x00020002 bmask=0x0000fffc,0xfffc0000
+ *		This will create one fragment with D-channel on slot 1 with
+ *		B-channels on slots 2..15, and a second fragment with D-channel
+ *		on slot 17 with B-channels on slot 18..31. Slot 16 is unused.
+ *	If bit 0 is set (dmask=0x00000001) the D-channel is on slot 0 and will
+ *	not function.
+ *	Example: dmask=0x00000001 bmask=0xfffffffe
+ *		This will create a port with all 31 usable timeslots as
+ *		B-channels.
+ *	If no bits are set on bmask, no B-channel is created for that fragment.
+ *	Example: dmask=0xfffffffe bmask=0,0,0,0.... (31 0-values for bmask)
+ *		This will create 31 ports with one D-channel only.
  *	If you don't know how to use it, you don't need it!
  *
  * iomode:
@@ -172,6 +184,7 @@
 
 #define	MAX_CARDS	8
 #define	MAX_PORTS	(8 * MAX_CARDS)
+#define	MAX_FRAGS	(32 * MAX_CARDS)
 
 static LIST_HEAD(HFClist);
 static spinlock_t HFClock; /* global hfc list lock */
@@ -203,7 +216,8 @@
 
 static uint	type[MAX_CARDS];
 static int	pcm[MAX_CARDS];
-static int	dslot[MAX_CARDS];
+static uint	dmask[MAX_CARDS];
+static uint	bmask[MAX_FRAGS];
 static uint	iomode[MAX_CARDS];
 static uint	port[MAX_PORTS];
 static uint	debug;
@@ -218,7 +232,7 @@
 #define HWID_MINIP16	3
 static uint	hwid = HWID_NONE;
 
-static int	HFC_cnt, Port_cnt, PCM_cnt = 99;
+static int	HFC_cnt, E1_cnt, bmask_cnt, Port_cnt, PCM_cnt = 99;
 
 MODULE_AUTHOR("Andreas Eversberg");
 MODULE_LICENSE("GPL");
@@ -231,7 +245,8 @@
 module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR);
 module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
 module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR);
-module_param_array(dslot, int, NULL, S_IRUGO | S_IWUSR);
+module_param_array(dmask, uint, NULL, S_IRUGO | S_IWUSR);
+module_param_array(bmask, uint, NULL, S_IRUGO | S_IWUSR);
 module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
 module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
 module_param(hwid, uint, S_IRUGO | S_IWUSR); /* The hardware ID */
@@ -1156,7 +1171,7 @@
 	hc->DTMFbase = 0x1000;
 	if (test_bit(HFC_CHIP_EXRAM_128, &hc->chip)) {
 		if (debug & DEBUG_HFCMULTI_INIT)
-			printk(KERN_DEBUG "%s: changing to 128K extenal RAM\n",
+			printk(KERN_DEBUG "%s: changing to 128K external RAM\n",
 			       __func__);
 		hc->hw.r_ctrl |= V_EXT_RAM;
 		hc->hw.r_ram_sz = 1;
@@ -1167,7 +1182,7 @@
 	}
 	if (test_bit(HFC_CHIP_EXRAM_512, &hc->chip)) {
 		if (debug & DEBUG_HFCMULTI_INIT)
-			printk(KERN_DEBUG "%s: changing to 512K extenal RAM\n",
+			printk(KERN_DEBUG "%s: changing to 512K external RAM\n",
 			       __func__);
 		hc->hw.r_ctrl |= V_EXT_RAM;
 		hc->hw.r_ram_sz = 2;
@@ -1607,40 +1622,46 @@
 	struct dchannel *dch;
 	int led[4];
 
-	hc->ledcount += poll;
-	if (hc->ledcount > 4096) {
-		hc->ledcount -= 4096;
-		hc->ledstate = 0xAFFEAFFE;
-	}
-
 	switch (hc->leds) {
 	case 1: /* HFC-E1 OEM */
-		/* 2 red blinking: NT mode deactivate
-		 * 2 red steady:   TE mode deactivate
-		 * left green:     L1 active
-		 * left red:       frame sync, but no L1
-		 * right green:    L2 active
+		/* 2 red steady:       LOS
+		 * 1 red steady:       L1 not active
+		 * 2 green steady:     L1 active
+		 * 1st green flashing: activity on TX
+		 * 2nd green flashing: activity on RX
 		 */
-		if (hc->chan[hc->dslot].sync != 2) { /* no frame sync */
-			if (hc->chan[hc->dslot].dch->dev.D.protocol
-			    != ISDN_P_NT_E1) {
-				led[0] = 1;
+		led[0] = 0;
+		led[1] = 0;
+		led[2] = 0;
+		led[3] = 0;
+		dch = hc->chan[hc->dnum[0]].dch;
+		if (dch) {
+			if (hc->chan[hc->dnum[0]].los)
 				led[1] = 1;
-			} else if (hc->ledcount >> 11) {
+			if (hc->e1_state != 1) {
 				led[0] = 1;
-				led[1] = 1;
+				hc->flash[2] = 0;
+				hc->flash[3] = 0;
 			} else {
-				led[0] = 0;
-				led[1] = 0;
+				led[2] = 1;
+				led[3] = 1;
+				if (!hc->flash[2] && hc->activity_tx)
+					hc->flash[2] = poll;
+				if (!hc->flash[3] && hc->activity_rx)
+					hc->flash[3] = poll;
+				if (hc->flash[2] && hc->flash[2] < 1024)
+					led[2] = 0;
+				if (hc->flash[3] && hc->flash[3] < 1024)
+					led[3] = 0;
+				if (hc->flash[2] >= 2048)
+					hc->flash[2] = 0;
+				if (hc->flash[3] >= 2048)
+					hc->flash[3] = 0;
+				if (hc->flash[2])
+					hc->flash[2] += poll;
+				if (hc->flash[3])
+					hc->flash[3] += poll;
 			}
-			led[2] = 0;
-			led[3] = 0;
-		} else { /* with frame sync */
-			/* TODO make it work */
-			led[0] = 0;
-			led[1] = 0;
-			led[2] = 0;
-			led[3] = 1;
 		}
 		leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF;
 		/* leds are inverted */
@@ -1651,9 +1672,9 @@
 		break;
 
 	case 2: /* HFC-4S OEM */
-		/* red blinking = PH_DEACTIVATE NT Mode
-		 * red steady   = PH_DEACTIVATE TE Mode
-		 * green steady = PH_ACTIVATE
+		/* red steady:     PH_DEACTIVATE
+		 * green steady:   PH_ACTIVATE
+		 * green flashing: activity on TX
 		 */
 		for (i = 0; i < 4; i++) {
 			state = 0;
@@ -1669,17 +1690,20 @@
 			if (state) {
 				if (state == active) {
 					led[i] = 1; /* led green */
-				} else
-					if (dch->dev.D.protocol == ISDN_P_TE_S0)
-						/* TE mode: led red */
-						led[i] = 2;
-					else
-						if (hc->ledcount >> 11)
-							/* led red */
-							led[i] = 2;
-						else
-							/* led off */
-							led[i] = 0;
+					hc->activity_tx |= hc->activity_rx;
+					if (!hc->flash[i] &&
+						(hc->activity_tx & (1 << i)))
+							hc->flash[i] = poll;
+					if (hc->flash[i] && hc->flash[i] < 1024)
+						led[i] = 0; /* led off */
+					if (hc->flash[i] >= 2048)
+						hc->flash[i] = 0;
+					if (hc->flash[i])
+						hc->flash[i] += poll;
+				} else {
+					led[i] = 2; /* led red */
+					hc->flash[i] = 0;
+				}
 			} else
 				led[i] = 0; /* led off */
 		}
@@ -1712,9 +1736,9 @@
 		break;
 
 	case 3: /* HFC 1S/2S Beronet */
-		/* red blinking = PH_DEACTIVATE NT Mode
-		 * red steady   = PH_DEACTIVATE TE Mode
-		 * green steady = PH_ACTIVATE
+		/* red steady:     PH_DEACTIVATE
+		 * green steady:   PH_ACTIVATE
+		 * green flashing: activity on TX
 		 */
 		for (i = 0; i < 2; i++) {
 			state = 0;
@@ -1730,22 +1754,23 @@
 			if (state) {
 				if (state == active) {
 					led[i] = 1; /* led green */
-				} else
-					if (dch->dev.D.protocol == ISDN_P_TE_S0)
-						/* TE mode: led red */
-						led[i] = 2;
-					else
-						if (hc->ledcount >> 11)
-							/* led red */
-							led[i] = 2;
-						else
-							/* led off */
-							led[i] = 0;
+					hc->activity_tx |= hc->activity_rx;
+					if (!hc->flash[i] &&
+						(hc->activity_tx & (1 << i)))
+							hc->flash[i] = poll;
+					if (hc->flash[i] < 1024)
+						led[i] = 0; /* led off */
+					if (hc->flash[i] >= 2048)
+						hc->flash[i] = 0;
+					if (hc->flash[i])
+						hc->flash[i] += poll;
+				} else {
+					led[i] = 2; /* led red */
+					hc->flash[i] = 0;
+				}
 			} else
 				led[i] = 0; /* led off */
 		}
-
-
 		leds = (led[0] > 0) | ((led[1] > 0) << 1) | ((led[0]&1) << 2)
 			| ((led[1]&1) << 3);
 		if (leds != (int)hc->ledstate) {
@@ -1757,8 +1782,11 @@
 		}
 		break;
 	case 8: /* HFC 8S+ Beronet */
-		lled = 0;
-
+		/* off:      PH_DEACTIVATE
+		 * steady:   PH_ACTIVATE
+		 * flashing: activity on TX
+		 */
+		lled = 0xff; /* leds off */
 		for (i = 0; i < 8; i++) {
 			state = 0;
 			active = -1;
@@ -1772,14 +1800,20 @@
 			}
 			if (state) {
 				if (state == active) {
-					lled |= 0 << i;
+					lled &= ~(1 << i); /* led on */
+					hc->activity_tx |= hc->activity_rx;
+					if (!hc->flash[i] &&
+						(hc->activity_tx & (1 << i)))
+							hc->flash[i] = poll;
+					if (hc->flash[i] < 1024)
+						lled |= 1 << i; /* led off */
+					if (hc->flash[i] >= 2048)
+						hc->flash[i] = 0;
+					if (hc->flash[i])
+						hc->flash[i] += poll;
 				} else
-					if (hc->ledcount >> 11)
-						lled |= 0 << i;
-					else
-						lled |= 1 << i;
-			} else
-				lled |= 1 << i;
+					hc->flash[i] = 0;
+			}
 		}
 		leddw = lled << 24 | lled << 16 | lled << 8 | lled;
 		if (leddw != hc->ledstate) {
@@ -1794,6 +1828,8 @@
 		}
 		break;
 	}
+	hc->activity_tx = 0;
+	hc->activity_rx = 0;
 }
 /*
  * read dtmf coefficients
@@ -2093,7 +2129,8 @@
 	*txpending = 1;
 
 	/* show activity */
-	hc->activity[hc->chan[ch].port] = 1;
+	if (dch)
+		hc->activity_tx |= 1 << hc->chan[ch].port;
 
 	/* fill fifo to what we have left */
 	ii = len;
@@ -2129,13 +2166,9 @@
 		HFC_wait_nodebug(hc);
 	}
 
-	/* send confirm, since get_net_bframe will not do it with trans */
-	if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
-		confirm_Bsend(bch);
-
-	/* check for next frame */
 	dev_kfree_skb(*sp);
-	if (bch && get_next_bframe(bch)) { /* hdlc is confirmed here */
+	/* check for next frame */
+	if (bch && get_next_bframe(bch)) {
 		len = (*sp)->len;
 		goto next_frame;
 	}
@@ -2163,24 +2196,20 @@
 	int f1 = 0, f2 = 0; /* = 0, to make GCC happy */
 	int again = 0;
 	struct	bchannel *bch;
-	struct  dchannel *dch;
+	struct  dchannel *dch = NULL;
 	struct sk_buff	*skb, **sp = NULL;
 	int	maxlen;
 
 	bch = hc->chan[ch].bch;
-	dch = hc->chan[ch].dch;
-	if ((!dch) && (!bch))
-		return;
-	if (dch) {
-		if (!test_bit(FLG_ACTIVE, &dch->Flags))
-			return;
-		sp = &dch->rx_skb;
-		maxlen = dch->maxlen;
-	} else {
+	if (bch) {
 		if (!test_bit(FLG_ACTIVE, &bch->Flags))
 			return;
-		sp = &bch->rx_skb;
-		maxlen = bch->maxlen;
+	} else if (hc->chan[ch].dch) {
+		dch = hc->chan[ch].dch;
+		if (!test_bit(FLG_ACTIVE, &dch->Flags))
+			return;
+	} else {
+		return;
 	}
 next_frame:
 	/* on first AND before getting next valid frame, R_FIFO must be written
@@ -2195,8 +2224,11 @@
 	HFC_wait_nodebug(hc);
 
 	/* ignore if rx is off BUT change fifo (above) to start pending TX */
-	if (hc->chan[ch].rx_off)
+	if (hc->chan[ch].rx_off) {
+		if (bch)
+			bch->dropcnt += poll; /* not exact but fair enough */
 		return;
+	}
 
 	if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
 		f1 = HFC_inb_nodebug(hc, A_F1);
@@ -2227,16 +2259,30 @@
 	if (Zsize <= 0)
 		return;
 
-	if (*sp == NULL) {
-		*sp = mI_alloc_skb(maxlen + 3, GFP_ATOMIC);
-		if (*sp == NULL) {
-			printk(KERN_DEBUG "%s: No mem for rx_skb\n",
-			       __func__);
+	if (bch) {
+		maxlen = bchannel_get_rxbuf(bch, Zsize);
+		if (maxlen < 0) {
+			pr_warning("card%d.B%d: No bufferspace for %d bytes\n",
+				   hc->id + 1, bch->nr, Zsize);
 			return;
 		}
+		sp = &bch->rx_skb;
+		maxlen = bch->maxlen;
+	} else { /* Dchannel */
+		sp = &dch->rx_skb;
+		maxlen = dch->maxlen + 3;
+		if (*sp == NULL) {
+			*sp = mI_alloc_skb(maxlen, GFP_ATOMIC);
+			if (*sp == NULL) {
+				pr_warning("card%d: No mem for dch rx_skb\n",
+					   hc->id + 1);
+				return;
+			}
+		}
 	}
 	/* show activity */
-	hc->activity[hc->chan[ch].port] = 1;
+	if (dch)
+		hc->activity_rx |= 1 << hc->chan[ch].port;
 
 	/* empty fifo with what we have */
 	if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
@@ -2247,7 +2293,7 @@
 			       Zsize, z1, z2, (f1 == f2) ? "fragment" : "COMPLETE",
 			       f1, f2, Zsize + (*sp)->len, again);
 		/* HDLC */
-		if ((Zsize + (*sp)->len) > (maxlen + 3)) {
+		if ((Zsize + (*sp)->len) > maxlen) {
 			if (debug & DEBUG_HFCMULTI_FIFO)
 				printk(KERN_DEBUG
 				       "%s(card %d): hdlc-frame too large.\n",
@@ -2309,7 +2355,7 @@
 			if (dch)
 				recv_Dchannel(dch);
 			else
-				recv_Bchannel(bch, MISDN_ID_ANY);
+				recv_Bchannel(bch, MISDN_ID_ANY, false);
 			*sp = skb;
 			again++;
 			goto next_frame;
@@ -2317,32 +2363,14 @@
 		/* there is an incomplete frame */
 	} else {
 		/* transparent */
-		if (Zsize > skb_tailroom(*sp))
-			Zsize = skb_tailroom(*sp);
 		hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize);
-		if (((*sp)->len) < MISDN_COPY_SIZE) {
-			skb = *sp;
-			*sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
-			if (*sp) {
-				memcpy(skb_put(*sp, skb->len),
-				       skb->data, skb->len);
-				skb_trim(skb, 0);
-			} else {
-				printk(KERN_DEBUG "%s: No mem\n", __func__);
-				*sp = skb;
-				skb = NULL;
-			}
-		} else {
-			skb = NULL;
-		}
 		if (debug & DEBUG_HFCMULTI_FIFO)
 			printk(KERN_DEBUG
 			       "%s(card %d): fifo(%d) reading %d bytes "
 			       "(z1=%04x, z2=%04x) TRANS\n",
 			       __func__, hc->id + 1, ch, Zsize, z1, z2);
 		/* only bch is transparent */
-		recv_Bchannel(bch, hc->chan[ch].Zfill);
-		*sp = skb;
+		recv_Bchannel(bch, hc->chan[ch].Zfill, false);
 	}
 }
 
@@ -2430,55 +2458,55 @@
 			}
 		}
 	if (hc->ctype == HFC_TYPE_E1 && hc->created[0]) {
-		dch = hc->chan[hc->dslot].dch;
-		if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) {
-			/* LOS */
-			temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS;
-			if (!temp && hc->chan[hc->dslot].los)
+		dch = hc->chan[hc->dnum[0]].dch;
+		/* LOS */
+		temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS;
+		hc->chan[hc->dnum[0]].los = temp;
+		if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) {
+			if (!temp && hc->chan[hc->dnum[0]].los)
 				signal_state_up(dch, L1_SIGNAL_LOS_ON,
 						"LOS detected");
-			if (temp && !hc->chan[hc->dslot].los)
+			if (temp && !hc->chan[hc->dnum[0]].los)
 				signal_state_up(dch, L1_SIGNAL_LOS_OFF,
 						"LOS gone");
-			hc->chan[hc->dslot].los = temp;
 		}
-		if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dslot].cfg)) {
+		if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dnum[0]].cfg)) {
 			/* AIS */
 			temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS;
-			if (!temp && hc->chan[hc->dslot].ais)
+			if (!temp && hc->chan[hc->dnum[0]].ais)
 				signal_state_up(dch, L1_SIGNAL_AIS_ON,
 						"AIS detected");
-			if (temp && !hc->chan[hc->dslot].ais)
+			if (temp && !hc->chan[hc->dnum[0]].ais)
 				signal_state_up(dch, L1_SIGNAL_AIS_OFF,
 						"AIS gone");
-			hc->chan[hc->dslot].ais = temp;
+			hc->chan[hc->dnum[0]].ais = temp;
 		}
-		if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dslot].cfg)) {
+		if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dnum[0]].cfg)) {
 			/* SLIP */
 			temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX;
-			if (!temp && hc->chan[hc->dslot].slip_rx)
+			if (!temp && hc->chan[hc->dnum[0]].slip_rx)
 				signal_state_up(dch, L1_SIGNAL_SLIP_RX,
 						" bit SLIP detected RX");
-			hc->chan[hc->dslot].slip_rx = temp;
+			hc->chan[hc->dnum[0]].slip_rx = temp;
 			temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX;
-			if (!temp && hc->chan[hc->dslot].slip_tx)
+			if (!temp && hc->chan[hc->dnum[0]].slip_tx)
 				signal_state_up(dch, L1_SIGNAL_SLIP_TX,
 						" bit SLIP detected TX");
-			hc->chan[hc->dslot].slip_tx = temp;
+			hc->chan[hc->dnum[0]].slip_tx = temp;
 		}
-		if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dslot].cfg)) {
+		if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dnum[0]].cfg)) {
 			/* RDI */
 			temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A;
-			if (!temp && hc->chan[hc->dslot].rdi)
+			if (!temp && hc->chan[hc->dnum[0]].rdi)
 				signal_state_up(dch, L1_SIGNAL_RDI_ON,
 						"RDI detected");
-			if (temp && !hc->chan[hc->dslot].rdi)
+			if (temp && !hc->chan[hc->dnum[0]].rdi)
 				signal_state_up(dch, L1_SIGNAL_RDI_OFF,
 						"RDI gone");
-			hc->chan[hc->dslot].rdi = temp;
+			hc->chan[hc->dnum[0]].rdi = temp;
 		}
 		temp = HFC_inb_nodebug(hc, R_JATT_DIR);
-		switch (hc->chan[hc->dslot].sync) {
+		switch (hc->chan[hc->dnum[0]].sync) {
 		case 0:
 			if ((temp & 0x60) == 0x60) {
 				if (debug & DEBUG_HFCMULTI_SYNC)
@@ -2487,10 +2515,10 @@
 					       "in clock sync\n",
 					       __func__, hc->id);
 				HFC_outb(hc, R_RX_OFF,
-					 hc->chan[hc->dslot].jitter | V_RX_INIT);
+				    hc->chan[hc->dnum[0]].jitter | V_RX_INIT);
 				HFC_outb(hc, R_TX_OFF,
-					 hc->chan[hc->dslot].jitter | V_RX_INIT);
-				hc->chan[hc->dslot].sync = 1;
+				    hc->chan[hc->dnum[0]].jitter | V_RX_INIT);
+				hc->chan[hc->dnum[0]].sync = 1;
 				goto check_framesync;
 			}
 			break;
@@ -2501,7 +2529,7 @@
 					       "%s: (id=%d) E1 "
 					       "lost clock sync\n",
 					       __func__, hc->id);
-				hc->chan[hc->dslot].sync = 0;
+				hc->chan[hc->dnum[0]].sync = 0;
 				break;
 			}
 		check_framesync:
@@ -2512,7 +2540,7 @@
 					       "%s: (id=%d) E1 "
 					       "now in frame sync\n",
 					       __func__, hc->id);
-				hc->chan[hc->dslot].sync = 2;
+				hc->chan[hc->dnum[0]].sync = 2;
 			}
 			break;
 		case 2:
@@ -2522,7 +2550,7 @@
 					       "%s: (id=%d) E1 lost "
 					       "clock & frame sync\n",
 					       __func__, hc->id);
-				hc->chan[hc->dslot].sync = 0;
+				hc->chan[hc->dnum[0]].sync = 0;
 				break;
 			}
 			temp = HFC_inb_nodebug(hc, R_SYNC_STA);
@@ -2532,7 +2560,7 @@
 					       "%s: (id=%d) E1 "
 					       "lost frame sync\n",
 					       __func__, hc->id);
-				hc->chan[hc->dslot].sync = 1;
+				hc->chan[hc->dnum[0]].sync = 1;
 			}
 			break;
 		}
@@ -2673,7 +2701,7 @@
 	int			i;
 	void __iomem		*plx_acc;
 	u_short			wval;
-	u_char			e1_syncsta, temp;
+	u_char			e1_syncsta, temp, temp2;
 	u_long			flags;
 
 	if (!hc) {
@@ -2748,7 +2776,7 @@
 		if (r_irq_misc & V_STA_IRQ) {
 			if (hc->ctype == HFC_TYPE_E1) {
 				/* state machine */
-				dch = hc->chan[hc->dslot].dch;
+				dch = hc->chan[hc->dnum[0]].dch;
 				e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA);
 				if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
 				    && hc->e1_getclock) {
@@ -2758,23 +2786,26 @@
 						hc->syncronized = 0;
 				}
 				/* undocumented: status changes during read */
-				dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA);
-				while (dch->state != (temp =
+				temp = HFC_inb_nodebug(hc, R_E1_RD_STA);
+				while (temp != (temp2 =
 						      HFC_inb_nodebug(hc, R_E1_RD_STA))) {
 					if (debug & DEBUG_HFCMULTI_STATE)
 						printk(KERN_DEBUG "%s: reread "
 						       "STATE because %d!=%d\n",
-						       __func__, temp,
-						       dch->state);
-					dch->state = temp; /* repeat */
+						    __func__, temp, temp2);
+					temp = temp2; /* repeat */
 				}
-				dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA)
-					& 0x7;
-				schedule_event(dch, FLG_PHCHANGE);
+				/* broadcast state change to all fragments */
 				if (debug & DEBUG_HFCMULTI_STATE)
 					printk(KERN_DEBUG
 					       "%s: E1 (id=%d) newstate %x\n",
-					       __func__, hc->id, dch->state);
+					    __func__, hc->id, temp & 0x7);
+				for (i = 0; i < hc->ports; i++) {
+					dch = hc->chan[hc->dnum[i]].dch;
+					dch->state = temp & 0x7;
+					schedule_event(dch, FLG_PHCHANGE);
+				}
+
 				if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
 					plxsd_checksync(hc, 0);
 			}
@@ -3018,8 +3049,10 @@
 			HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
 			HFC_outb(hc, A_SUBCH_CFG, 0);
 			HFC_outb(hc, A_IRQ_MSK, 0);
-			HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
-			HFC_wait(hc);
+			if (hc->chan[ch].protocol != protocol) {
+				HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+				HFC_wait(hc);
+			}
 			HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
 					       ((ch % 4) * 4) + 1) << 1) | 1);
 			HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1);
@@ -3039,8 +3072,10 @@
 			HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
 			HFC_outb(hc, A_SUBCH_CFG, 0);
 			HFC_outb(hc, A_IRQ_MSK, 0);
-			HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
-			HFC_wait(hc);
+			if (hc->chan[ch].protocol != protocol) {
+				HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+				HFC_wait(hc);
+			}
 			/* tx silence */
 			HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
 			HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
@@ -3059,8 +3094,10 @@
 					 V_HDLC_TRP | V_IFF);
 			HFC_outb(hc, A_SUBCH_CFG, 0);
 			HFC_outb(hc, A_IRQ_MSK, 0);
-			HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
-			HFC_wait(hc);
+			if (hc->chan[ch].protocol != protocol) {
+				HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+				HFC_wait(hc);
+			}
 			/* tx silence */
 			HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
 			/* enable RX fifo */
@@ -3075,8 +3112,10 @@
 					 V_HDLC_TRP);
 			HFC_outb(hc, A_SUBCH_CFG, 0);
 			HFC_outb(hc, A_IRQ_MSK, 0);
-			HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
-			HFC_wait(hc);
+			if (hc->chan[ch].protocol != protocol) {
+				HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
+				HFC_wait(hc);
+			}
 		}
 		if (hc->ctype != HFC_TYPE_E1) {
 			hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
@@ -3433,8 +3472,7 @@
 	struct hfc_multi	*hc = bch->hw;
 	int			ret = -EINVAL;
 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
-	unsigned int		id;
-	u_long			flags;
+	unsigned long		flags;
 
 	switch (hh->prim) {
 	case PH_DATA_REQ:
@@ -3443,19 +3481,13 @@
 		spin_lock_irqsave(&hc->lock, flags);
 		ret = bchannel_senddata(bch, skb);
 		if (ret > 0) { /* direct TX */
-			id = hh->id; /* skb can be freed */
 			hfcmulti_tx(hc, bch->slot);
 			ret = 0;
 			/* start fifo */
 			HFC_outb_nodebug(hc, R_FIFO, 0);
 			HFC_wait_nodebug(hc);
-			if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) {
-				spin_unlock_irqrestore(&hc->lock, flags);
-				queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
-			} else
-				spin_unlock_irqrestore(&hc->lock, flags);
-		} else
-			spin_unlock_irqrestore(&hc->lock, flags);
+		}
+		spin_unlock_irqrestore(&hc->lock, flags);
 		return ret;
 	case PH_ACTIVATE_REQ:
 		if (debug & DEBUG_HFCMULTI_MSG)
@@ -3545,10 +3577,11 @@
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP
-			| MISDN_CTRL_RX_OFF | MISDN_CTRL_FILL_EMPTY;
+		ret = mISDN_ctrl_bchannel(bch, cq);
+		cq->op |= MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP;
 		break;
 	case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */
+		ret = mISDN_ctrl_bchannel(bch, cq);
 		hc->chan[bch->slot].rx_off = !!cq->p1;
 		if (!hc->chan[bch->slot].rx_off) {
 			/* reset fifo on rx on */
@@ -3561,11 +3594,10 @@
 			printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n",
 			       __func__, bch->nr, hc->chan[bch->slot].rx_off);
 		break;
-	case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
-		test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
-		if (debug & DEBUG_HFCMULTI_MSG)
-			printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
-			       "off=%d)\n", __func__, bch->nr, !!cq->p1);
+	case MISDN_CTRL_FILL_EMPTY:
+		ret = mISDN_ctrl_bchannel(bch, cq);
+		hc->silence = bch->fill[0];
+		memset(hc->silence_data, hc->silence, sizeof(hc->silence_data));
 		break;
 	case MISDN_CTRL_HW_FEATURES: /* fill features structure */
 		if (debug & DEBUG_HFCMULTI_MSG)
@@ -3654,9 +3686,7 @@
 			ret = -EINVAL;
 		break;
 	default:
-		printk(KERN_WARNING "%s: unknown Op %x\n",
-		       __func__, cq->op);
-		ret = -EINVAL;
+		ret = mISDN_ctrl_bchannel(bch, cq);
 		break;
 	}
 	return ret;
@@ -3676,8 +3706,7 @@
 	switch (cmd) {
 	case CLOSE_CHANNEL:
 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
-		if (test_bit(FLG_ACTIVE, &bch->Flags))
-			deactivate_bchannel(bch); /* locked there */
+		deactivate_bchannel(bch); /* locked there */
 		ch->protocol = ISDN_P_NONE;
 		ch->peer = NULL;
 		module_put(THIS_MODULE);
@@ -3839,31 +3868,37 @@
 	if (debug & DEBUG_HFCMULTI_INIT)
 		printk(KERN_DEBUG "%s: entered\n", __func__);
 
+	i = dch->slot;
+	pt = hc->chan[i].port;
 	if (hc->ctype == HFC_TYPE_E1) {
-		hc->chan[hc->dslot].slot_tx = -1;
-		hc->chan[hc->dslot].slot_rx = -1;
-		hc->chan[hc->dslot].conf = -1;
-		if (hc->dslot) {
-			mode_hfcmulti(hc, hc->dslot, dch->dev.D.protocol,
+		/* E1 */
+		hc->chan[hc->dnum[pt]].slot_tx = -1;
+		hc->chan[hc->dnum[pt]].slot_rx = -1;
+		hc->chan[hc->dnum[pt]].conf = -1;
+		if (hc->dnum[pt]) {
+			mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
 				      -1, 0, -1, 0);
 			dch->timer.function = (void *) hfcmulti_dbusy_timer;
 			dch->timer.data = (long) dch;
 			init_timer(&dch->timer);
 		}
 		for (i = 1; i <= 31; i++) {
-			if (i == hc->dslot)
+			if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
 				continue;
 			hc->chan[i].slot_tx = -1;
 			hc->chan[i].slot_rx = -1;
 			hc->chan[i].conf = -1;
 			mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0);
 		}
-		/* E1 */
-		if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) {
+	}
+	if (hc->ctype == HFC_TYPE_E1 && pt == 0) {
+		/* E1, port 0 */
+		dch = hc->chan[hc->dnum[0]].dch;
+		if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) {
 			HFC_outb(hc, R_LOS0, 255); /* 2 ms */
 			HFC_outb(hc, R_LOS1, 255); /* 512 ms */
 		}
-		if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dslot].cfg)) {
+		if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dnum[0]].cfg)) {
 			HFC_outb(hc, R_RX0, 0);
 			hc->hw.r_tx0 = 0 | V_OUT_EN;
 		} else {
@@ -3876,12 +3911,12 @@
 		HFC_outb(hc, R_TX_FR0, 0x00);
 		HFC_outb(hc, R_TX_FR1, 0xf8);
 
-		if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg))
+		if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg))
 			HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E);
 
 		HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0);
 
-		if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg))
+		if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg))
 			HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC);
 
 		if (dch->dev.D.protocol == ISDN_P_NT_E1) {
@@ -3944,13 +3979,14 @@
 			hc->syncronized = 0;
 			plxsd_checksync(hc, 0);
 		}
-	} else {
-		i = dch->slot;
+	}
+	if (hc->ctype != HFC_TYPE_E1) {
+		/* ST */
 		hc->chan[i].slot_tx = -1;
 		hc->chan[i].slot_rx = -1;
 		hc->chan[i].conf = -1;
 		mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
-		dch->timer.function = (void *)hfcmulti_dbusy_timer;
+		dch->timer.function = (void *) hfcmulti_dbusy_timer;
 		dch->timer.data = (long) dch;
 		init_timer(&dch->timer);
 		hc->chan[i - 2].slot_tx = -1;
@@ -3961,8 +3997,6 @@
 		hc->chan[i - 1].slot_rx = -1;
 		hc->chan[i - 1].conf = -1;
 		mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0);
-		/* ST */
-		pt = hc->chan[i].port;
 		/* select interface */
 		HFC_outb(hc, R_ST_SEL, pt);
 		/* undocumented: delay after R_ST_SEL */
@@ -4054,14 +4088,9 @@
 		hfcmulti_initmode(dch);
 		spin_unlock_irqrestore(&hc->lock, flags);
 	}
-
-	if (((rq->protocol == ISDN_P_NT_S0) && (dch->state == 3)) ||
-	    ((rq->protocol == ISDN_P_TE_S0) && (dch->state == 7)) ||
-	    ((rq->protocol == ISDN_P_NT_E1) && (dch->state == 1)) ||
-	    ((rq->protocol == ISDN_P_TE_E1) && (dch->state == 1))) {
+	if (test_bit(FLG_ACTIVE, &dch->Flags))
 		_queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
 			    0, NULL, GFP_KERNEL);
-	}
 	rq->ch = &dch->dev.D;
 	if (!try_module_get(THIS_MODULE))
 		printk(KERN_WARNING "%s:cannot get module\n", __func__);
@@ -4091,7 +4120,6 @@
 	}
 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
 		return -EBUSY; /* b-channel can be only open once */
-	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
 	bch->ch.protocol = rq->protocol;
 	hc->chan[ch].rx_off = 0;
 	rq->ch = &bch->ch;
@@ -4112,7 +4140,7 @@
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_HFC_OP;
+		cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_L1_TIMER3;
 		break;
 	case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */
 		wd_cnt = cq->p1 & 0xf;
@@ -4142,6 +4170,9 @@
 			       __func__);
 		HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES);
 		break;
+	case MISDN_CTRL_L1_TIMER3:
+		ret = l1_event(dch->l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
+		break;
 	default:
 		printk(KERN_WARNING "%s: unknown Op %x\n",
 		       __func__, cq->op);
@@ -4545,6 +4576,8 @@
 		}
 		/* free channels */
 		for (i = 0; i <= 31; i++) {
+			if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
+				continue;
 			if (hc->chan[i].bch) {
 				if (debug & DEBUG_HFCMULTI_INIT)
 					printk(KERN_DEBUG
@@ -4600,7 +4633,8 @@
 	spin_unlock_irqrestore(&hc->lock, flags);
 
 	if (debug & DEBUG_HFCMULTI_INIT)
-		printk(KERN_DEBUG "%s: free port %d channel D\n", __func__, pt);
+		printk(KERN_DEBUG "%s: free port %d channel D(%d)\n", __func__,
+			pt+1, ci);
 	mISDN_freedchannel(dch);
 	kfree(dch);
 
@@ -4622,15 +4656,19 @@
 	if (hc->iclock)
 		mISDN_unregister_clock(hc->iclock);
 
-	/* disable irq */
+	/* disable and free irq */
 	spin_lock_irqsave(&hc->lock, flags);
 	disable_hwirq(hc);
 	spin_unlock_irqrestore(&hc->lock, flags);
 	udelay(1000);
+	if (hc->irq) {
+		if (debug & DEBUG_HFCMULTI_INIT)
+			printk(KERN_DEBUG "%s: free irq %d (hc=%p)\n",
+			    __func__, hc->irq, hc);
+		free_irq(hc->irq, hc);
+		hc->irq = 0;
 
-	/* dimm leds */
-	if (hc->leds)
-		hfcmulti_leds(hc);
+	}
 
 	/* disable D-channels & B-channels */
 	if (debug & DEBUG_HFCMULTI_INIT)
@@ -4641,15 +4679,11 @@
 			release_port(hc, hc->chan[ch].dch);
 	}
 
-	/* release hardware & irq */
-	if (hc->irq) {
-		if (debug & DEBUG_HFCMULTI_INIT)
-			printk(KERN_DEBUG "%s: free irq %d\n",
-			       __func__, hc->irq);
-		free_irq(hc->irq, hc);
-		hc->irq = 0;
+	/* dimm leds */
+	if (hc->leds)
+		hfcmulti_leds(hc);
 
-	}
+	/* release hardware */
 	release_io_hfcmulti(hc);
 
 	if (debug & DEBUG_HFCMULTI_INIT)
@@ -4667,61 +4701,9 @@
 		       __func__);
 }
 
-static int
-init_e1_port(struct hfc_multi *hc, struct hm_map *m)
+static void
+init_e1_port_hw(struct hfc_multi *hc, struct hm_map *m)
 {
-	struct dchannel	*dch;
-	struct bchannel	*bch;
-	int		ch, ret = 0;
-	char		name[MISDN_MAX_IDLEN];
-
-	dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
-	if (!dch)
-		return -ENOMEM;
-	dch->debug = debug;
-	mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
-	dch->hw = hc;
-	dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
-	dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
-		(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
-	dch->dev.D.send = handle_dmsg;
-	dch->dev.D.ctrl = hfcm_dctrl;
-	dch->dev.nrbchan = (hc->dslot) ? 30 : 31;
-	dch->slot = hc->dslot;
-	hc->chan[hc->dslot].dch = dch;
-	hc->chan[hc->dslot].port = 0;
-	hc->chan[hc->dslot].nt_timer = -1;
-	for (ch = 1; ch <= 31; ch++) {
-		if (ch == hc->dslot) /* skip dchannel */
-			continue;
-		bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
-		if (!bch) {
-			printk(KERN_ERR "%s: no memory for bchannel\n",
-			       __func__);
-			ret = -ENOMEM;
-			goto free_chan;
-		}
-		hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
-		if (!hc->chan[ch].coeff) {
-			printk(KERN_ERR "%s: no memory for coeffs\n",
-			       __func__);
-			ret = -ENOMEM;
-			kfree(bch);
-			goto free_chan;
-		}
-		bch->nr = ch;
-		bch->slot = ch;
-		bch->debug = debug;
-		mISDN_initbchannel(bch, MAX_DATA_MEM);
-		bch->hw = hc;
-		bch->ch.send = handle_bmsg;
-		bch->ch.ctrl = hfcm_bctrl;
-		bch->ch.nr = ch;
-		list_add(&bch->ch.list, &dch->dev.bchannels);
-		hc->chan[ch].bch = bch;
-		hc->chan[ch].port = 0;
-		set_channelmap(bch->nr, dch->dev.channelmap);
-	}
 	/* set optical line type */
 	if (port[Port_cnt] & 0x001) {
 		if (!m->opticalsupport)  {
@@ -4737,7 +4719,7 @@
 				       __func__,
 				       HFC_cnt + 1, 1);
 			test_and_set_bit(HFC_CFG_OPTICAL,
-					 &hc->chan[hc->dslot].cfg);
+			    &hc->chan[hc->dnum[0]].cfg);
 		}
 	}
 	/* set LOS report */
@@ -4747,7 +4729,7 @@
 			       "LOS report: card(%d) port(%d)\n",
 			       __func__, HFC_cnt + 1, 1);
 		test_and_set_bit(HFC_CFG_REPORT_LOS,
-				 &hc->chan[hc->dslot].cfg);
+		    &hc->chan[hc->dnum[0]].cfg);
 	}
 	/* set AIS report */
 	if (port[Port_cnt] & 0x008) {
@@ -4756,7 +4738,7 @@
 			       "AIS report: card(%d) port(%d)\n",
 			       __func__, HFC_cnt + 1, 1);
 		test_and_set_bit(HFC_CFG_REPORT_AIS,
-				 &hc->chan[hc->dslot].cfg);
+		    &hc->chan[hc->dnum[0]].cfg);
 	}
 	/* set SLIP report */
 	if (port[Port_cnt] & 0x010) {
@@ -4766,7 +4748,7 @@
 			       "card(%d) port(%d)\n",
 			       __func__, HFC_cnt + 1, 1);
 		test_and_set_bit(HFC_CFG_REPORT_SLIP,
-				 &hc->chan[hc->dslot].cfg);
+		    &hc->chan[hc->dnum[0]].cfg);
 	}
 	/* set RDI report */
 	if (port[Port_cnt] & 0x020) {
@@ -4776,7 +4758,7 @@
 			       "card(%d) port(%d)\n",
 			       __func__, HFC_cnt + 1, 1);
 		test_and_set_bit(HFC_CFG_REPORT_RDI,
-				 &hc->chan[hc->dslot].cfg);
+		    &hc->chan[hc->dnum[0]].cfg);
 	}
 	/* set CRC-4 Mode */
 	if (!(port[Port_cnt] & 0x100)) {
@@ -4785,7 +4767,7 @@
 			       " card(%d) port(%d)\n",
 			       __func__, HFC_cnt + 1, 1);
 		test_and_set_bit(HFC_CFG_CRC4,
-				 &hc->chan[hc->dslot].cfg);
+		    &hc->chan[hc->dnum[0]].cfg);
 	} else {
 		if (debug & DEBUG_HFCMULTI_INIT)
 			printk(KERN_DEBUG "%s: PORT turn off CRC4"
@@ -4817,20 +4799,85 @@
 	}
 	/* set elastic jitter buffer */
 	if (port[Port_cnt] & 0x3000) {
-		hc->chan[hc->dslot].jitter = (port[Port_cnt]>>12) & 0x3;
+		hc->chan[hc->dnum[0]].jitter = (port[Port_cnt]>>12) & 0x3;
 		if (debug & DEBUG_HFCMULTI_INIT)
 			printk(KERN_DEBUG
 			       "%s: PORT set elastic "
 			       "buffer to %d: card(%d) port(%d)\n",
-			       __func__, hc->chan[hc->dslot].jitter,
+			    __func__, hc->chan[hc->dnum[0]].jitter,
 			       HFC_cnt + 1, 1);
 	} else
-		hc->chan[hc->dslot].jitter = 2; /* default */
-	snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
+		hc->chan[hc->dnum[0]].jitter = 2; /* default */
+}
+
+static int
+init_e1_port(struct hfc_multi *hc, struct hm_map *m, int pt)
+{
+	struct dchannel	*dch;
+	struct bchannel	*bch;
+	int		ch, ret = 0;
+	char		name[MISDN_MAX_IDLEN];
+	int		bcount = 0;
+
+	dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
+	if (!dch)
+		return -ENOMEM;
+	dch->debug = debug;
+	mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
+	dch->hw = hc;
+	dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
+	dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
+	    (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
+	dch->dev.D.send = handle_dmsg;
+	dch->dev.D.ctrl = hfcm_dctrl;
+	dch->slot = hc->dnum[pt];
+	hc->chan[hc->dnum[pt]].dch = dch;
+	hc->chan[hc->dnum[pt]].port = pt;
+	hc->chan[hc->dnum[pt]].nt_timer = -1;
+	for (ch = 1; ch <= 31; ch++) {
+		if (!((1 << ch) & hc->bmask[pt])) /* skip unused channel */
+			continue;
+		bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
+		if (!bch) {
+			printk(KERN_ERR "%s: no memory for bchannel\n",
+			    __func__);
+			ret = -ENOMEM;
+			goto free_chan;
+		}
+		hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
+		if (!hc->chan[ch].coeff) {
+			printk(KERN_ERR "%s: no memory for coeffs\n",
+			    __func__);
+			ret = -ENOMEM;
+			kfree(bch);
+			goto free_chan;
+		}
+		bch->nr = ch;
+		bch->slot = ch;
+		bch->debug = debug;
+		mISDN_initbchannel(bch, MAX_DATA_MEM, poll >> 1);
+		bch->hw = hc;
+		bch->ch.send = handle_bmsg;
+		bch->ch.ctrl = hfcm_bctrl;
+		bch->ch.nr = ch;
+		list_add(&bch->ch.list, &dch->dev.bchannels);
+		hc->chan[ch].bch = bch;
+		hc->chan[ch].port = pt;
+		set_channelmap(bch->nr, dch->dev.channelmap);
+		bcount++;
+	}
+	dch->dev.nrbchan = bcount;
+	if (pt == 0)
+		init_e1_port_hw(hc, m);
+	if (hc->ports > 1)
+		snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d-%d",
+				HFC_cnt + 1, pt+1);
+	else
+		snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
 	ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
 	if (ret)
 		goto free_chan;
-	hc->created[0] = 1;
+	hc->created[pt] = 1;
 	return ret;
 free_chan:
 	release_port(hc, dch);
@@ -4881,7 +4928,7 @@
 		bch->nr = ch + 1;
 		bch->slot = i + ch;
 		bch->debug = debug;
-		mISDN_initbchannel(bch, MAX_DATA_MEM);
+		mISDN_initbchannel(bch, MAX_DATA_MEM, poll >> 1);
 		bch->hw = hc;
 		bch->ch.send = handle_bmsg;
 		bch->ch.ctrl = hfcm_bctrl;
@@ -4963,7 +5010,8 @@
 	struct hfc_multi	*hc;
 	u_long		flags;
 	u_char		dips = 0, pmj = 0; /* dip settings, port mode Jumpers */
-	int		i;
+	int		i, ch;
+	u_int		maskcheck;
 
 	if (HFC_cnt >= MAX_CARDS) {
 		printk(KERN_ERR "too many cards (max=%d).\n",
@@ -4997,18 +5045,36 @@
 	hc->id = HFC_cnt;
 	hc->pcm = pcm[HFC_cnt];
 	hc->io_mode = iomode[HFC_cnt];
-	if (dslot[HFC_cnt] < 0 && hc->ctype == HFC_TYPE_E1) {
-		hc->dslot = 0;
-		printk(KERN_INFO "HFC-E1 card has disabled D-channel, but "
-		       "31 B-channels\n");
+	if (hc->ctype == HFC_TYPE_E1 && dmask[E1_cnt]) {
+		/* fragment card */
+		pt = 0;
+		maskcheck = 0;
+		for (ch = 0; ch <= 31; ch++) {
+			if (!((1 << ch) & dmask[E1_cnt]))
+				continue;
+			hc->dnum[pt] = ch;
+			hc->bmask[pt] = bmask[bmask_cnt++];
+			if ((maskcheck & hc->bmask[pt])
+			 || (dmask[E1_cnt] & hc->bmask[pt])) {
+				printk(KERN_INFO
+				       "HFC-E1 #%d has overlapping B-channels on fragment #%d\n",
+				       E1_cnt + 1, pt);
+				return -EINVAL;
+			}
+			maskcheck |= hc->bmask[pt];
+			printk(KERN_INFO
+			       "HFC-E1 #%d uses D-channel on slot %d and a B-channel map of 0x%08x\n",
+				E1_cnt + 1, ch, hc->bmask[pt]);
+			pt++;
+		}
+		hc->ports = pt;
 	}
-	if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32
-	    && hc->ctype == HFC_TYPE_E1) {
-		hc->dslot = dslot[HFC_cnt];
-		printk(KERN_INFO "HFC-E1 card has alternating D-channel on "
-		       "time slot %d\n", dslot[HFC_cnt]);
-	} else
-		hc->dslot = 16;
+	if (hc->ctype == HFC_TYPE_E1 && !dmask[E1_cnt]) {
+		/* default card layout */
+		hc->dnum[0] = 16;
+		hc->bmask[0] = 0xfffefffe;
+		hc->ports = 1;
+	}
 
 	/* set chip specific features */
 	hc->masterclk = -1;
@@ -5091,23 +5157,33 @@
 			goto free_card;
 		}
 		if (hc->ctype == HFC_TYPE_E1)
-			ret_err = init_e1_port(hc, m);
+			ret_err = init_e1_port(hc, m, pt);
 		else
 			ret_err = init_multi_port(hc, pt);
 		if (debug & DEBUG_HFCMULTI_INIT)
 			printk(KERN_DEBUG
-			       "%s: Registering D-channel, card(%d) port(%d)"
+			    "%s: Registering D-channel, card(%d) port(%d) "
 			       "result %d\n",
-			       __func__, HFC_cnt + 1, pt, ret_err);
+			    __func__, HFC_cnt + 1, pt + 1, ret_err);
 
 		if (ret_err) {
 			while (pt) { /* release already registered ports */
 				pt--;
-				release_port(hc, hc->chan[(pt << 2) + 2].dch);
+				if (hc->ctype == HFC_TYPE_E1)
+					release_port(hc,
+						hc->chan[hc->dnum[pt]].dch);
+				else
+					release_port(hc,
+						hc->chan[(pt << 2) + 2].dch);
 			}
 			goto free_card;
 		}
-		Port_cnt++;
+		if (hc->ctype != HFC_TYPE_E1)
+			Port_cnt++; /* for each S0 port */
+	}
+	if (hc->ctype == HFC_TYPE_E1) {
+		Port_cnt++; /* for each E1 port */
+		E1_cnt++;
 	}
 
 	/* disp switches */
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e2c83a2..81363ff 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -453,7 +453,7 @@
 		}
 		bz->za[new_f2].z2 = cpu_to_le16(new_z2);
 		bz->f2 = new_f2;	/* next buffer */
-		recv_Bchannel(bch, MISDN_ID_ANY);
+		recv_Bchannel(bch, MISDN_ID_ANY, false);
 	}
 }
 
@@ -565,11 +565,6 @@
 	if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
 		new_z2 -= B_FIFO_SIZE;	/* buffer wrap */
 
-	if (fcnt_rx > MAX_DATA_SIZE) {	/* flush, if oversized */
-		*z2r = cpu_to_le16(new_z2);		/* new position */
-		return;
-	}
-
 	fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
 	if (fcnt_tx <= 0)
 		fcnt_tx += B_FIFO_SIZE;
@@ -577,8 +572,16 @@
 	fcnt_tx = B_FIFO_SIZE - fcnt_tx;
 	/* remaining bytes to send (bytes in tx-fifo) */
 
-	bch->rx_skb = mI_alloc_skb(fcnt_rx, GFP_ATOMIC);
-	if (bch->rx_skb) {
+	if (test_bit(FLG_RX_OFF, &bch->Flags)) {
+		bch->dropcnt += fcnt_rx;
+		*z2r = cpu_to_le16(new_z2);
+		return;
+	}
+	maxlen = bchannel_get_rxbuf(bch, fcnt_rx);
+	if (maxlen < 0) {
+		pr_warning("B%d: No bufferspace for %d bytes\n",
+			   bch->nr, fcnt_rx);
+	} else {
 		ptr = skb_put(bch->rx_skb, fcnt_rx);
 		if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
 			maxlen = fcnt_rx;	/* complete transfer */
@@ -596,10 +599,8 @@
 			ptr1 = bdata;	/* start of buffer */
 			memcpy(ptr, ptr1, fcnt_rx);	/* rest */
 		}
-		recv_Bchannel(bch, fcnt_tx); /* bch, id */
-	} else
-		printk(KERN_WARNING "HFCPCI: receive out of memory\n");
-
+		recv_Bchannel(bch, fcnt_tx, false); /* bch, id, !force */
+	}
 	*z2r = cpu_to_le16(new_z2);		/* new position */
 }
 
@@ -760,9 +761,14 @@
 
 	if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
 		printk(KERN_DEBUG "%s\n", __func__);
-	if ((!bch->tx_skb) || bch->tx_skb->len <= 0)
-		return;
-	count = bch->tx_skb->len - bch->tx_idx;
+	if ((!bch->tx_skb) || bch->tx_skb->len == 0) {
+		if (!test_bit(FLG_FILLEMPTY, &bch->Flags) &&
+		    !test_bit(FLG_TRANSPARENT, &bch->Flags))
+			return;
+		count = HFCPCI_FILLEMPTY;
+	} else {
+		count = bch->tx_skb->len - bch->tx_idx;
+	}
 	if ((bch->nr & 2) && (!hc->hw.bswapped)) {
 		bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
 		bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
@@ -781,16 +787,10 @@
 		fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
 		if (fcnt <= 0)
 			fcnt += B_FIFO_SIZE;
-		/* fcnt contains available bytes in fifo */
-		fcnt = B_FIFO_SIZE - fcnt;
-		/* remaining bytes to send (bytes in fifo) */
-
-		/* "fill fifo if empty" feature */
-		if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) {
-			/* printk(KERN_DEBUG "%s: buffer empty, so we have "
-			   "underrun\n", __func__); */
-			/* fill buffer, to prevent future underrun */
-			count = HFCPCI_FILLEMPTY;
+		if (test_bit(FLG_FILLEMPTY, &bch->Flags)) {
+			/* fcnt contains available bytes in fifo */
+			if (count > fcnt)
+				count = fcnt;
 			new_z1 = le16_to_cpu(*z1t) + count;
 			/* new buffer Position */
 			if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
@@ -802,17 +802,20 @@
 				printk(KERN_DEBUG "hfcpci_FFt fillempty "
 				       "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
 				       fcnt, maxlen, new_z1, dst);
-			fcnt += count;
 			if (maxlen > count)
 				maxlen = count;		/* limit size */
-			memset(dst, 0x2a, maxlen);	/* first copy */
+			memset(dst, bch->fill[0], maxlen); /* first copy */
 			count -= maxlen;		/* remaining bytes */
 			if (count) {
 				dst = bdata;		/* start of buffer */
-				memset(dst, 0x2a, count);
+				memset(dst, bch->fill[0], count);
 			}
 			*z1t = cpu_to_le16(new_z1);	/* now send data */
+			return;
 		}
+		/* fcnt contains available bytes in fifo */
+		fcnt = B_FIFO_SIZE - fcnt;
+		/* remaining bytes to send (bytes in fifo) */
 
 	next_t_frame:
 		count = bch->tx_skb->len - bch->tx_idx;
@@ -849,9 +852,6 @@
 		*z1t = cpu_to_le16(new_z1);	/* now send data */
 		if (bch->tx_idx < bch->tx_skb->len)
 			return;
-		/* send confirm, on trans, free on hdlc. */
-		if (test_bit(FLG_TRANSPARENT, &bch->Flags))
-			confirm_Bsend(bch);
 		dev_kfree_skb(bch->tx_skb);
 		if (get_next_bframe(bch))
 			goto next_t_frame;
@@ -1533,24 +1533,7 @@
 static int
 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
 {
-	int	ret = 0;
-
-	switch (cq->op) {
-	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_FILL_EMPTY;
-		break;
-	case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
-		test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
-		if (debug & DEBUG_HW_OPEN)
-			printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
-			       "off=%d)\n", __func__, bch->nr, !!cq->p1);
-		break;
-	default:
-		printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	return mISDN_ctrl_bchannel(bch, cq);
 }
 static int
 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
@@ -1581,8 +1564,7 @@
 		break;
 	case CLOSE_CHANNEL:
 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
-		if (test_bit(FLG_ACTIVE, &bch->Flags))
-			deactivate_bchannel(bch);
+		deactivate_bchannel(bch);
 		ch->protocol = ISDN_P_NONE;
 		ch->peer = NULL;
 		module_put(THIS_MODULE);
@@ -1692,22 +1674,17 @@
 	struct hfc_pci		*hc = bch->hw;
 	int			ret = -EINVAL;
 	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
-	unsigned int		id;
-	u_long			flags;
+	unsigned long		flags;
 
 	switch (hh->prim) {
 	case PH_DATA_REQ:
 		spin_lock_irqsave(&hc->lock, flags);
 		ret = bchannel_senddata(bch, skb);
 		if (ret > 0) { /* direct TX */
-			id = hh->id; /* skb can be freed */
 			hfcpci_fill_fifo(bch);
 			ret = 0;
-			spin_unlock_irqrestore(&hc->lock, flags);
-			if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
-				queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
-		} else
-			spin_unlock_irqrestore(&hc->lock, flags);
+		}
+		spin_unlock_irqrestore(&hc->lock, flags);
 		return ret;
 	case PH_ACTIVATE_REQ:
 		spin_lock_irqsave(&hc->lock, flags);
@@ -1819,7 +1796,7 @@
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
 		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
-			MISDN_CTRL_DISCONNECT;
+			 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
 		break;
 	case MISDN_CTRL_LOOP:
 		/* channel 0 disabled loop */
@@ -1896,6 +1873,9 @@
 		Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
 		hc->hw.trm &= 0x7f;	/* disable IOM-loop */
 		break;
+	case MISDN_CTRL_L1_TIMER3:
+		ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
+		break;
 	default:
 		printk(KERN_WARNING "%s: unknown Op %x\n",
 		       __func__, cq->op);
@@ -1969,7 +1949,6 @@
 	bch = &hc->bch[rq->adr.channel - 1];
 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
 		return -EBUSY; /* b-channel can be only open once */
-	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
 	bch->ch.protocol = rq->protocol;
 	rq->ch = &bch->ch; /* TODO: E-channel */
 	if (!try_module_get(THIS_MODULE))
@@ -2121,7 +2100,7 @@
 		card->bch[i].nr = i + 1;
 		set_channelmap(i + 1, card->dch.dev.channelmap);
 		card->bch[i].debug = debug;
-		mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM);
+		mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM, poll >> 1);
 		card->bch[i].hw = card;
 		card->bch[i].ch.send = hfcpci_l2l1B;
 		card->bch[i].ch.ctrl = hfc_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 8cde2a0..83206e4 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -226,19 +226,12 @@
 		if (debug & DBG_HFC_CALL_TRACE)
 			printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n",
 			       hw->name, __func__, ret);
-		if (ret > 0) {
-			/*
-			 * other l1 drivers don't send early confirms on
-			 * transp data, but hfcsusb does because tx_next
-			 * skb is needed in tx_iso_complete()
-			 */
-			queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL);
+		if (ret > 0)
 			ret = 0;
-		}
 		return ret;
 	case PH_ACTIVATE_REQ:
 		if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
-			hfcsusb_start_endpoint(hw, bch->nr);
+			hfcsusb_start_endpoint(hw, bch->nr - 1);
 			ret = hfcsusb_setup_bch(bch, ch->protocol);
 		} else
 			ret = 0;
@@ -498,16 +491,9 @@
 	bch = &hw->bch[rq->adr.channel - 1];
 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
 		return -EBUSY; /* b-channel can be only open once */
-	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
 	bch->ch.protocol = rq->protocol;
 	rq->ch = &bch->ch;
 
-	/* start USB endpoint for bchannel */
-	if (rq->adr.channel  == 1)
-		hfcsusb_start_endpoint(hw, HFC_CHAN_B1);
-	else
-		hfcsusb_start_endpoint(hw, HFC_CHAN_B2);
-
 	if (!try_module_get(THIS_MODULE))
 		printk(KERN_WARNING "%s: %s:cannot get module\n",
 		       hw->name, __func__);
@@ -819,24 +805,7 @@
 static int
 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
 {
-	int	ret = 0;
-
-	switch (cq->op) {
-	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_FILL_EMPTY;
-		break;
-	case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
-		test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
-		if (debug & DEBUG_HW_OPEN)
-			printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
-			       "off=%d)\n", __func__, bch->nr, !!cq->p1);
-		break;
-	default:
-		printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	return mISDN_ctrl_bchannel(bch, cq);
 }
 
 /* collect data from incoming interrupt or isochron USB data */
@@ -873,7 +842,21 @@
 		hdlc = 1;
 	}
 	if (fifo->bch) {
+		if (test_bit(FLG_RX_OFF, &fifo->bch->Flags)) {
+			fifo->bch->dropcnt += len;
+			spin_unlock(&hw->lock);
+			return;
+		}
+		maxlen = bchannel_get_rxbuf(fifo->bch, len);
 		rx_skb = fifo->bch->rx_skb;
+		if (maxlen < 0) {
+			if (rx_skb)
+				skb_trim(rx_skb, 0);
+			pr_warning("%s.B%d: No bufferspace for %d bytes\n",
+				   hw->name, fifo->bch->nr, len);
+			spin_unlock(&hw->lock);
+			return;
+		}
 		maxlen = fifo->bch->maxlen;
 		hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
 	}
@@ -883,25 +866,22 @@
 		hdlc = 1;
 	}
 
-	if (!rx_skb) {
-		rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC);
-		if (rx_skb) {
-			if (fifo->dch)
-				fifo->dch->rx_skb = rx_skb;
-			if (fifo->bch)
-				fifo->bch->rx_skb = rx_skb;
-			if (fifo->ech)
-				fifo->ech->rx_skb = rx_skb;
-			skb_trim(rx_skb, 0);
-		} else {
-			printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
-			       hw->name, __func__);
-			spin_unlock(&hw->lock);
-			return;
-		}
-	}
-
 	if (fifo->dch || fifo->ech) {
+		if (!rx_skb) {
+			rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC);
+			if (rx_skb) {
+				if (fifo->dch)
+					fifo->dch->rx_skb = rx_skb;
+				if (fifo->ech)
+					fifo->ech->rx_skb = rx_skb;
+				skb_trim(rx_skb, 0);
+			} else {
+				printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n",
+				       hw->name, __func__);
+				spin_unlock(&hw->lock);
+				return;
+			}
+		}
 		/* D/E-Channel SKB range check */
 		if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) {
 			printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
@@ -911,16 +891,6 @@
 			spin_unlock(&hw->lock);
 			return;
 		}
-	} else if (fifo->bch) {
-		/* B-Channel SKB range check */
-		if ((rx_skb->len + len) >= (MAX_BCH_SIZE + 3)) {
-			printk(KERN_DEBUG "%s: %s: sbk mem exceeded "
-			       "for fifo(%d) HFCUSB_B_RX\n",
-			       hw->name, __func__, fifon);
-			skb_trim(rx_skb, 0);
-			spin_unlock(&hw->lock);
-			return;
-		}
 	}
 
 	memcpy(skb_put(rx_skb, len), data, len);
@@ -948,7 +918,8 @@
 				if (fifo->dch)
 					recv_Dchannel(fifo->dch);
 				if (fifo->bch)
-					recv_Bchannel(fifo->bch, MISDN_ID_ANY);
+					recv_Bchannel(fifo->bch, MISDN_ID_ANY,
+						      0);
 				if (fifo->ech)
 					recv_Echannel(fifo->ech,
 						      &hw->dch);
@@ -969,8 +940,7 @@
 		}
 	} else {
 		/* deliver transparent data to layer2 */
-		if (rx_skb->len >= poll)
-			recv_Bchannel(fifo->bch, MISDN_ID_ANY);
+		recv_Bchannel(fifo->bch, MISDN_ID_ANY, false);
 	}
 	spin_unlock(&hw->lock);
 }
@@ -1200,8 +1170,8 @@
 	int k, tx_offset, num_isoc_packets, sink, remain, current_len,
 		errcode, hdlc, i;
 	int *tx_idx;
-	int frame_complete, fifon, status;
-	__u8 threshbit;
+	int frame_complete, fifon, status, fillempty = 0;
+	__u8 threshbit, *p;
 
 	spin_lock(&hw->lock);
 	if (fifo->stop_gracefull) {
@@ -1219,6 +1189,9 @@
 		tx_skb = fifo->bch->tx_skb;
 		tx_idx = &fifo->bch->tx_idx;
 		hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags);
+		if (!tx_skb && !hdlc &&
+		    test_bit(FLG_FILLEMPTY, &fifo->bch->Flags))
+			fillempty = 1;
 	} else {
 		printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n",
 		       hw->name, __func__);
@@ -1277,6 +1250,8 @@
 			/* Generate next ISO Packets */
 			if (tx_skb)
 				remain = tx_skb->len - *tx_idx;
+			else if (fillempty)
+				remain = 15; /* > not complete */
 			else
 				remain = 0;
 
@@ -1307,15 +1282,20 @@
 				}
 
 				/* copy tx data to iso-urb buffer */
-				memcpy(context_iso_urb->buffer + tx_offset + 1,
-				       (tx_skb->data + *tx_idx), current_len);
-				*tx_idx += current_len;
-
+				p = context_iso_urb->buffer + tx_offset + 1;
+				if (fillempty) {
+					memset(p, fifo->bch->fill[0],
+					       current_len);
+				} else {
+					memcpy(p, (tx_skb->data + *tx_idx),
+					       current_len);
+					*tx_idx += current_len;
+				}
 				urb->iso_frame_desc[k].offset = tx_offset;
 				urb->iso_frame_desc[k].length = current_len + 1;
 
 				/* USB data log for every D ISO out */
-				if ((fifon == HFCUSB_D_RX) &&
+				if ((fifon == HFCUSB_D_RX) && !fillempty &&
 				    (debug & DBG_HFC_USB_VERBOSE)) {
 					printk(KERN_DEBUG
 					       "%s: %s (%d/%d) offs(%d) len(%d) ",
@@ -1365,12 +1345,8 @@
 				if (fifo->dch && get_next_dframe(fifo->dch))
 					tx_skb = fifo->dch->tx_skb;
 				else if (fifo->bch &&
-					 get_next_bframe(fifo->bch)) {
-					if (test_bit(FLG_TRANSPARENT,
-						     &fifo->bch->Flags))
-						confirm_Bsend(fifo->bch);
+					 get_next_bframe(fifo->bch))
 					tx_skb = fifo->bch->tx_skb;
-				}
 			}
 		}
 		errcode = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1812,7 +1788,7 @@
 	mISDN_clear_bchannel(bch);
 	spin_unlock_irqrestore(&hw->lock, flags);
 	hfcsusb_setup_bch(bch, ISDN_P_NONE);
-	hfcsusb_stop_endpoint(hw, bch->nr);
+	hfcsusb_stop_endpoint(hw, bch->nr - 1);
 }
 
 /*
@@ -1836,8 +1812,7 @@
 
 	case CLOSE_CHANNEL:
 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
-		if (test_bit(FLG_ACTIVE, &bch->Flags))
-			deactivate_bchannel(bch);
+		deactivate_bchannel(bch);
 		ch->protocol = ISDN_P_NONE;
 		ch->peer = NULL;
 		module_put(THIS_MODULE);
@@ -1883,7 +1858,7 @@
 		hw->bch[i].nr = i + 1;
 		set_channelmap(i + 1, hw->dch.dev.channelmap);
 		hw->bch[i].debug = debug;
-		mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM);
+		mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM, poll >> 1);
 		hw->bch[i].hw = hw;
 		hw->bch[i].ch.send = hfcusb_l2l1B;
 		hw->bch[i].ch.ctrl = hfc_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 884369f..752e082 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -603,10 +603,11 @@
 }
 
 static int
-isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para)
+isac_ctrl(struct isac_hw *isac, u32 cmd, unsigned long para)
 {
 	u8 tl = 0;
-	u_long flags;
+	unsigned long flags;
+	int ret = 0;
 
 	switch (cmd) {
 	case HW_TESTLOOP:
@@ -626,12 +627,15 @@
 		}
 		spin_unlock_irqrestore(isac->hwlock, flags);
 		break;
+	case HW_TIMER3_VALUE:
+		ret = l1_event(isac->dch.l1, HW_TIMER3_VALUE | (para & 0xff));
+		break;
 	default:
 		pr_debug("%s: %s unknown command %x %lx\n", isac->name,
 			 __func__, cmd, para);
-		return -1;
+		ret = -1;
 	}
-	return 0;
+	return ret;
 }
 
 static int
@@ -929,24 +933,23 @@
 hscx_empty_fifo(struct hscx_hw *hscx, u8 count)
 {
 	u8 *p;
+	int maxlen;
 
 	pr_debug("%s: B%1d %d\n", hscx->ip->name, hscx->bch.nr, count);
-	if (!hscx->bch.rx_skb) {
-		hscx->bch.rx_skb = mI_alloc_skb(hscx->bch.maxlen, GFP_ATOMIC);
-		if (!hscx->bch.rx_skb) {
-			pr_info("%s: B receive out of memory\n",
-				hscx->ip->name);
-			hscx_cmdr(hscx, 0x80); /* RMC */
-			return;
-		}
-	}
-	if ((hscx->bch.rx_skb->len + count) > hscx->bch.maxlen) {
-		pr_debug("%s: overrun %d\n", hscx->ip->name,
-			 hscx->bch.rx_skb->len + count);
-		skb_trim(hscx->bch.rx_skb, 0);
+	if (test_bit(FLG_RX_OFF, &hscx->bch.Flags)) {
+		hscx->bch.dropcnt += count;
 		hscx_cmdr(hscx, 0x80); /* RMC */
 		return;
 	}
+	maxlen = bchannel_get_rxbuf(&hscx->bch, count);
+	if (maxlen < 0) {
+		hscx_cmdr(hscx, 0x80); /* RMC */
+		if (hscx->bch.rx_skb)
+			skb_trim(hscx->bch.rx_skb, 0);
+		pr_warning("%s.B%d: No bufferspace for %d bytes\n",
+			   hscx->ip->name, hscx->bch.nr, count);
+		return;
+	}
 	p = skb_put(hscx->bch.rx_skb, count);
 
 	if (hscx->ip->type & IPAC_TYPE_IPACX)
@@ -971,22 +974,28 @@
 	int count, more;
 	u8 *p;
 
-	if (!hscx->bch.tx_skb)
-		return;
-	count = hscx->bch.tx_skb->len - hscx->bch.tx_idx;
-	if (count <= 0)
-		return;
-	p = hscx->bch.tx_skb->data + hscx->bch.tx_idx;
-
-	more = test_bit(FLG_TRANSPARENT, &hscx->bch.Flags) ? 1 : 0;
-	if (count > hscx->fifo_size) {
+	if (!hscx->bch.tx_skb) {
+		if (!test_bit(FLG_TX_EMPTY, &hscx->bch.Flags))
+			return;
 		count = hscx->fifo_size;
 		more = 1;
-	}
-	pr_debug("%s: B%1d %d/%d/%d\n", hscx->ip->name, hscx->bch.nr, count,
-		 hscx->bch.tx_idx, hscx->bch.tx_skb->len);
-	hscx->bch.tx_idx += count;
+		p = hscx->log;
+		memset(p, hscx->bch.fill[0], count);
+	} else {
+		count = hscx->bch.tx_skb->len - hscx->bch.tx_idx;
+		if (count <= 0)
+			return;
+		p = hscx->bch.tx_skb->data + hscx->bch.tx_idx;
 
+		more = test_bit(FLG_TRANSPARENT, &hscx->bch.Flags) ? 1 : 0;
+		if (count > hscx->fifo_size) {
+			count = hscx->fifo_size;
+			more = 1;
+		}
+		pr_debug("%s: B%1d %d/%d/%d\n", hscx->ip->name, hscx->bch.nr,
+			 count, hscx->bch.tx_idx, hscx->bch.tx_skb->len);
+		hscx->bch.tx_idx += count;
+	}
 	if (hscx->ip->type & IPAC_TYPE_IPACX)
 		hscx->ip->write_fifo(hscx->ip->hw,
 				     hscx->off + IPACX_XFIFOB, p, count);
@@ -997,7 +1006,7 @@
 	}
 	hscx_cmdr(hscx, more ? 0x08 : 0x0a);
 
-	if (hscx->bch.debug & DEBUG_HW_BFIFO) {
+	if (hscx->bch.tx_skb && (hscx->bch.debug & DEBUG_HW_BFIFO)) {
 		snprintf(hscx->log, 64, "B%1d-send %s %d ",
 			 hscx->bch.nr, hscx->ip->name, count);
 		print_hex_dump_bytes(hscx->log, DUMP_PREFIX_OFFSET, p, count);
@@ -1007,17 +1016,17 @@
 static void
 hscx_xpr(struct hscx_hw *hx)
 {
-	if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len)
+	if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) {
 		hscx_fill_fifo(hx);
-	else {
-		if (hx->bch.tx_skb) {
-			/* send confirm, on trans, free on hdlc. */
-			if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags))
-				confirm_Bsend(&hx->bch);
+	} else {
+		if (hx->bch.tx_skb)
 			dev_kfree_skb(hx->bch.tx_skb);
-		}
-		if (get_next_bframe(&hx->bch))
+		if (get_next_bframe(&hx->bch)) {
 			hscx_fill_fifo(hx);
+			test_and_clear_bit(FLG_TX_EMPTY, &hx->bch.Flags);
+		} else if (test_bit(FLG_TX_EMPTY, &hx->bch.Flags)) {
+			hscx_fill_fifo(hx);
+		}
 	}
 }
 
@@ -1069,7 +1078,7 @@
 		skb_trim(hx->bch.rx_skb, 0);
 	} else {
 		skb_trim(hx->bch.rx_skb, hx->bch.rx_skb->len - 1);
-		recv_Bchannel(&hx->bch, 0);
+		recv_Bchannel(&hx->bch, 0, false);
 	}
 }
 
@@ -1120,11 +1129,8 @@
 
 	if (istab & IPACX_B_RPF) {
 		hscx_empty_fifo(hx, hx->fifo_size);
-		if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) {
-			/* receive transparent audio data */
-			if (hx->bch.rx_skb)
-				recv_Bchannel(&hx->bch, 0);
-		}
+		if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags))
+			recv_Bchannel(&hx->bch, 0, false);
 	}
 
 	if (istab & IPACX_B_RFO) {
@@ -1137,7 +1143,9 @@
 
 	if (istab & IPACX_B_XDU) {
 		if (test_bit(FLG_TRANSPARENT, &hx->bch.Flags)) {
-			hscx_fill_fifo(hx);
+			if (test_bit(FLG_FILLEMPTY, &hx->bch.Flags))
+				test_and_set_bit(FLG_TX_EMPTY, &hx->bch.Flags);
+			hscx_xpr(hx);
 			return;
 		}
 		pr_debug("%s: B%1d XDU error at len %d\n", hx->ip->name,
@@ -1338,22 +1346,17 @@
 	struct hscx_hw	*hx = container_of(bch, struct hscx_hw, bch);
 	int ret = -EINVAL;
 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
-	u32 id;
-	u_long flags;
+	unsigned long flags;
 
 	switch (hh->prim) {
 	case PH_DATA_REQ:
 		spin_lock_irqsave(hx->ip->hwlock, flags);
 		ret = bchannel_senddata(bch, skb);
 		if (ret > 0) { /* direct TX */
-			id = hh->id; /* skb can be freed */
 			ret = 0;
 			hscx_fill_fifo(hx);
-			spin_unlock_irqrestore(hx->ip->hwlock, flags);
-			if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
-				queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
-		} else
-			spin_unlock_irqrestore(hx->ip->hwlock, flags);
+		}
+		spin_unlock_irqrestore(hx->ip->hwlock, flags);
 		return ret;
 	case PH_ACTIVATE_REQ:
 		spin_lock_irqsave(hx->ip->hwlock, flags);
@@ -1388,20 +1391,7 @@
 static int
 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
 {
-	int	ret = 0;
-
-	switch (cq->op) {
-	case MISDN_CTRL_GETOP:
-		cq->op = 0;
-		break;
-		/* Nothing implemented yet */
-	case MISDN_CTRL_FILL_EMPTY:
-	default:
-		pr_info("%s: unknown Op %x\n", __func__, cq->op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	return mISDN_ctrl_bchannel(bch, cq);
 }
 
 static int
@@ -1416,15 +1406,10 @@
 	switch (cmd) {
 	case CLOSE_CHANNEL:
 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
-		if (test_bit(FLG_ACTIVE, &bch->Flags)) {
-			spin_lock_irqsave(hx->ip->hwlock, flags);
-			mISDN_freebchannel(bch);
-			hscx_mode(hx, ISDN_P_NONE);
-			spin_unlock_irqrestore(hx->ip->hwlock, flags);
-		} else {
-			skb_queue_purge(&bch->rqueue);
-			bch->rcount = 0;
-		}
+		spin_lock_irqsave(hx->ip->hwlock, flags);
+		mISDN_freebchannel(bch);
+		hscx_mode(hx, ISDN_P_NONE);
+		spin_unlock_irqrestore(hx->ip->hwlock, flags);
 		ch->protocol = ISDN_P_NONE;
 		ch->peer = NULL;
 		module_put(hx->ip->owner);
@@ -1526,7 +1511,7 @@
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_LOOP;
+		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
 		break;
 	case MISDN_CTRL_LOOP:
 		/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -1536,6 +1521,9 @@
 		}
 		ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel);
 		break;
+	case MISDN_CTRL_L1_TIMER3:
+		ret = ipac->isac.ctrl(&ipac->isac, HW_TIMER3_VALUE, cq->p1);
+		break;
 	default:
 		pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op);
 		ret = -EINVAL;
@@ -1621,7 +1609,8 @@
 		set_channelmap(i + 1, ipac->isac.dch.dev.channelmap);
 		list_add(&ipac->hscx[i].bch.ch.list,
 			 &ipac->isac.dch.dev.bchannels);
-		mISDN_initbchannel(&ipac->hscx[i].bch, MAX_DATA_MEM);
+		mISDN_initbchannel(&ipac->hscx[i].bch, MAX_DATA_MEM,
+				   ipac->hscx[i].fifo_size);
 		ipac->hscx[i].bch.ch.nr = i + 1;
 		ipac->hscx[i].bch.ch.send = &hscx_l2l1;
 		ipac->hscx[i].bch.ch.ctrl = hscx_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 9a6da6e..be5973d 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -421,13 +421,19 @@
 static inline void
 isar_rcv_frame(struct isar_ch *ch)
 {
-	u8		*ptr;
+	u8	*ptr;
+	int	maxlen;
 
 	if (!ch->is->clsb) {
 		pr_debug("%s; ISAR zero len frame\n", ch->is->name);
 		ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
 		return;
 	}
+	if (test_bit(FLG_RX_OFF, &ch->bch.Flags)) {
+		ch->bch.dropcnt += ch->is->clsb;
+		ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
+		return;
+	}
 	switch (ch->bch.state) {
 	case ISDN_P_NONE:
 		pr_debug("%s: ISAR protocol 0 spurious IIS_RDATA %x/%x/%x\n",
@@ -437,36 +443,22 @@
 	case ISDN_P_B_RAW:
 	case ISDN_P_B_L2DTMF:
 	case ISDN_P_B_MODEM_ASYNC:
-		if (!ch->bch.rx_skb) {
-			ch->bch.rx_skb = mI_alloc_skb(ch->bch.maxlen,
-						      GFP_ATOMIC);
-			if (unlikely(!ch->bch.rx_skb)) {
-				pr_info("%s: B receive out of memory\n",
-					ch->is->name);
-				ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
-				break;
-			}
+		maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
+		if (maxlen < 0) {
+			pr_warning("%s.B%d: No bufferspace for %d bytes\n",
+				   ch->is->name, ch->bch.nr, ch->is->clsb);
+			ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
+			break;
 		}
 		rcv_mbox(ch->is, skb_put(ch->bch.rx_skb, ch->is->clsb));
-		recv_Bchannel(&ch->bch, 0);
+		recv_Bchannel(&ch->bch, 0, false);
 		break;
 	case ISDN_P_B_HDLC:
-		if (!ch->bch.rx_skb) {
-			ch->bch.rx_skb = mI_alloc_skb(ch->bch.maxlen,
-						      GFP_ATOMIC);
-			if (unlikely(!ch->bch.rx_skb)) {
-				pr_info("%s: B receive out of memory\n",
-					ch->is->name);
-				ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
-				break;
-			}
-		}
-		if ((ch->bch.rx_skb->len + ch->is->clsb) >
-		    (ch->bch.maxlen + 2)) {
-			pr_debug("%s: incoming packet too large\n",
-				 ch->is->name);
+		maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb);
+		if (maxlen < 0) {
+			pr_warning("%s.B%d: No bufferspace for %d bytes\n",
+				   ch->is->name, ch->bch.nr, ch->is->clsb);
 			ch->is->write_reg(ch->is->hw, ISAR_IIA, 0);
-			skb_trim(ch->bch.rx_skb, 0);
 			break;
 		}
 		if (ch->is->cmsb & HDLC_ERROR) {
@@ -494,7 +486,7 @@
 				break;
 			}
 			skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2);
-			recv_Bchannel(&ch->bch, 0);
+			recv_Bchannel(&ch->bch, 0, false);
 		}
 		break;
 	case ISDN_P_B_T30_FAX:
@@ -530,7 +522,7 @@
 				ch->state = STFAX_ESCAPE;
 				/* set_skb_flag(skb, DF_NOMOREDATA); */
 			}
-			recv_Bchannel(&ch->bch, 0);
+			recv_Bchannel(&ch->bch, 0, false);
 			if (ch->is->cmsb & SART_NMD)
 				deliver_status(ch, HW_MOD_NOCARR);
 			break;
@@ -570,7 +562,7 @@
 				break;
 			}
 			skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2);
-			recv_Bchannel(&ch->bch, 0);
+			recv_Bchannel(&ch->bch, 0, false);
 		}
 		if (ch->is->cmsb & SART_NMD) { /* ABORT */
 			pr_debug("%s: isar_rcv_frame: no more data\n",
@@ -598,16 +590,25 @@
 	u8 msb;
 	u8 *ptr;
 
-	pr_debug("%s: ch%d  tx_skb %p tx_idx %d\n",
-		 ch->is->name, ch->bch.nr, ch->bch.tx_skb, ch->bch.tx_idx);
-	if (!ch->bch.tx_skb)
-		return;
-	count = ch->bch.tx_skb->len - ch->bch.tx_idx;
-	if (count <= 0)
-		return;
+	pr_debug("%s: ch%d  tx_skb %d tx_idx %d\n", ch->is->name, ch->bch.nr,
+		 ch->bch.tx_skb ? ch->bch.tx_skb->len : -1, ch->bch.tx_idx);
 	if (!(ch->is->bstat &
 	      (ch->dpath == 1 ? BSTAT_RDM1 : BSTAT_RDM2)))
 		return;
+	if (!ch->bch.tx_skb) {
+		if (!test_bit(FLG_TX_EMPTY, &ch->bch.Flags) ||
+		    (ch->bch.state != ISDN_P_B_RAW))
+			return;
+		count = ch->mml;
+		/* use the card buffer */
+		memset(ch->is->buf, ch->bch.fill[0], count);
+		send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA,
+			  0, count, ch->is->buf);
+		return;
+	}
+	count = ch->bch.tx_skb->len - ch->bch.tx_idx;
+	if (count <= 0)
+		return;
 	if (count > ch->mml) {
 		msb = 0;
 		count = ch->mml;
@@ -686,9 +687,9 @@
 static void
 send_next(struct isar_ch *ch)
 {
-	pr_debug("%s: %s ch%d tx_skb %p tx_idx %d\n",
-		 ch->is->name, __func__, ch->bch.nr,
-		 ch->bch.tx_skb, ch->bch.tx_idx);
+	pr_debug("%s: %s ch%d tx_skb %d tx_idx %d\n", ch->is->name, __func__,
+		 ch->bch.nr, ch->bch.tx_skb ? ch->bch.tx_skb->len : -1,
+		 ch->bch.tx_idx);
 	if (ch->bch.state == ISDN_P_B_T30_FAX) {
 		if (ch->cmd == PCTRL_CMD_FTH) {
 			if (test_bit(FLG_LASTDATA, &ch->bch.Flags)) {
@@ -702,15 +703,14 @@
 			}
 		}
 	}
-	if (ch->bch.tx_skb) {
-		/* send confirm, on trans, free on hdlc. */
-		if (test_bit(FLG_TRANSPARENT, &ch->bch.Flags))
-			confirm_Bsend(&ch->bch);
+	if (ch->bch.tx_skb)
 		dev_kfree_skb(ch->bch.tx_skb);
-	}
-	if (get_next_bframe(&ch->bch))
+	if (get_next_bframe(&ch->bch)) {
 		isar_fill_fifo(ch);
-	else {
+		test_and_clear_bit(FLG_TX_EMPTY, &ch->bch.Flags);
+	} else if (test_bit(FLG_TX_EMPTY, &ch->bch.Flags)) {
+		isar_fill_fifo(ch);
+	} else {
 		if (test_and_clear_bit(FLG_DLEETX, &ch->bch.Flags)) {
 			if (test_and_clear_bit(FLG_LASTDATA,
 					       &ch->bch.Flags)) {
@@ -724,6 +724,8 @@
 			} else {
 				deliver_status(ch, HW_MOD_CONNECT);
 			}
+		} else if (test_bit(FLG_FILLEMPTY, &ch->bch.Flags)) {
+			test_and_set_bit(FLG_TX_EMPTY, &ch->bch.Flags);
 		}
 	}
 }
@@ -1487,14 +1489,10 @@
 		spin_lock_irqsave(ich->is->hwlock, flags);
 		ret = bchannel_senddata(bch, skb);
 		if (ret > 0) { /* direct TX */
-			id = hh->id; /* skb can be freed */
 			ret = 0;
 			isar_fill_fifo(ich);
-			spin_unlock_irqrestore(ich->is->hwlock, flags);
-			if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
-				queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
-		} else
-			spin_unlock_irqrestore(ich->is->hwlock, flags);
+		}
+		spin_unlock_irqrestore(ich->is->hwlock, flags);
 		return ret;
 	case PH_ACTIVATE_REQ:
 		spin_lock_irqsave(ich->is->hwlock, flags);
@@ -1575,20 +1573,7 @@
 static int
 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
 {
-	int	ret = 0;
-
-	switch (cq->op) {
-	case MISDN_CTRL_GETOP:
-		cq->op = 0;
-		break;
-		/* Nothing implemented yet */
-	case MISDN_CTRL_FILL_EMPTY:
-	default:
-		pr_info("%s: unknown Op %x\n", __func__, cq->op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	return mISDN_ctrl_bchannel(bch, cq);
 }
 
 static int
@@ -1603,15 +1588,10 @@
 	switch (cmd) {
 	case CLOSE_CHANNEL:
 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
-		if (test_bit(FLG_ACTIVE, &bch->Flags)) {
-			spin_lock_irqsave(ich->is->hwlock, flags);
-			mISDN_freebchannel(bch);
-			modeisar(ich, ISDN_P_NONE);
-			spin_unlock_irqrestore(ich->is->hwlock, flags);
-		} else {
-			skb_queue_purge(&bch->rqueue);
-			bch->rcount = 0;
-		}
+		spin_lock_irqsave(ich->is->hwlock, flags);
+		mISDN_freebchannel(bch);
+		modeisar(ich, ISDN_P_NONE);
+		spin_unlock_irqrestore(ich->is->hwlock, flags);
 		ch->protocol = ISDN_P_NONE;
 		ch->peer = NULL;
 		module_put(ich->is->owner);
@@ -1677,7 +1657,6 @@
 	bch = &isar->ch[rq->adr.channel - 1].bch;
 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
 		return -EBUSY; /* b-channel can be only open once */
-	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
 	bch->ch.protocol = rq->protocol;
 	rq->ch = &bch->ch;
 	return 0;
@@ -1691,7 +1670,7 @@
 	isar->hw = hw;
 	for (i = 0; i < 2; i++) {
 		isar->ch[i].bch.nr = i + 1;
-		mISDN_initbchannel(&isar->ch[i].bch, MAX_DATA_MEM);
+		mISDN_initbchannel(&isar->ch[i].bch, MAX_DATA_MEM, 32);
 		isar->ch[i].bch.ch.nr = i + 1;
 		isar->ch[i].bch.ch.send = &isar_l2l1;
 		isar->ch[i].bch.ch.ctrl = isar_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index c726e09..c3e3e76 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -386,24 +386,20 @@
 			bc->bch.nr, idx);
 	}
 	bc->lastrx = idx;
-	if (!bc->bch.rx_skb) {
-		bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, GFP_ATOMIC);
-		if (!bc->bch.rx_skb) {
-			pr_info("%s: B%1d receive out of memory\n",
-				card->name, bc->bch.nr);
-			return;
-		}
+	if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
+		bc->bch.dropcnt += cnt;
+		return;
 	}
-
-	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
-		if ((bc->bch.rx_skb->len + cnt) > bc->bch.maxlen) {
-			pr_debug("%s: B%1d overrun %d\n", card->name,
-				 bc->bch.nr, bc->bch.rx_skb->len + cnt);
-			skb_trim(bc->bch.rx_skb, 0);
-			return;
-		}
+	stat = bchannel_get_rxbuf(&bc->bch, cnt);
+	/* only transparent use the count here, HDLC overun is detected later */
+	if (stat == ENOMEM) {
+		pr_warning("%s.B%d: No memory for %d bytes\n",
+			   card->name, bc->bch.nr, cnt);
+		return;
+	}
+	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
 		p = skb_put(bc->bch.rx_skb, cnt);
-	} else
+	else
 		p = bc->hrbuf;
 
 	for (i = 0; i < cnt; i++) {
@@ -414,48 +410,45 @@
 			idx = 0;
 		p[i] = val & 0xff;
 	}
+
+	if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
+		recv_Bchannel(&bc->bch, 0, false);
+		return;
+	}
+
 	pn = bc->hrbuf;
-next_frame:
-	if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
+	while (cnt > 0) {
 		stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
 				       bc->bch.rx_skb->data, bc->bch.maxlen);
-		if (stat > 0) /* valid frame received */
+		if (stat > 0) { /* valid frame received */
 			p = skb_put(bc->bch.rx_skb, stat);
-		else if (stat == -HDLC_CRC_ERROR)
-			pr_info("%s: B%1d receive frame CRC error\n",
-				card->name, bc->bch.nr);
-		else if (stat == -HDLC_FRAMING_ERROR)
-			pr_info("%s: B%1d receive framing error\n",
-				card->name, bc->bch.nr);
-		else if (stat == -HDLC_LENGTH_ERROR)
-			pr_info("%s: B%1d receive frame too long (> %d)\n",
-				card->name, bc->bch.nr, bc->bch.maxlen);
-	} else
-		stat = cnt;
-
-	if (stat > 0) {
-		if (debug & DEBUG_HW_BFIFO) {
-			snprintf(card->log, LOG_SIZE, "B%1d-recv %s %d ",
-				 bc->bch.nr, card->name, stat);
-			print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET,
-					     p, stat);
-		}
-		recv_Bchannel(&bc->bch, 0);
-	}
-	if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
-		pn += i;
-		cnt -= i;
-		if (!bc->bch.rx_skb) {
-			bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen,
-						      GFP_ATOMIC);
-			if (!bc->bch.rx_skb) {
-				pr_info("%s: B%1d receive out of memory\n",
-					card->name, bc->bch.nr);
+			if (debug & DEBUG_HW_BFIFO) {
+				snprintf(card->log, LOG_SIZE,
+					 "B%1d-recv %s %d ", bc->bch.nr,
+					 card->name, stat);
+				print_hex_dump_bytes(card->log,
+						     DUMP_PREFIX_OFFSET, p,
+						     stat);
+			}
+			recv_Bchannel(&bc->bch, 0, false);
+			stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
+			if (stat < 0) {
+				pr_warning("%s.B%d: No memory for %d bytes\n",
+					   card->name, bc->bch.nr, cnt);
 				return;
 			}
+		} else if (stat == -HDLC_CRC_ERROR) {
+			pr_info("%s: B%1d receive frame CRC error\n",
+				card->name, bc->bch.nr);
+		} else if (stat == -HDLC_FRAMING_ERROR) {
+			pr_info("%s: B%1d receive framing error\n",
+				card->name, bc->bch.nr);
+		} else if (stat == -HDLC_LENGTH_ERROR) {
+			pr_info("%s: B%1d receive frame too long (> %d)\n",
+				card->name, bc->bch.nr, bc->bch.maxlen);
 		}
-		if (cnt > 0)
-			goto next_frame;
+		pn += i;
+		cnt -= i;
 	}
 }
 
@@ -544,22 +537,31 @@
 fill_dma(struct tiger_ch *bc)
 {
 	struct tiger_hw *card = bc->bch.hw;
-	int count, i;
-	u32 m, v;
+	int count, i, fillempty = 0;
+	u32 m, v, n = 0;
 	u8  *p;
 
 	if (bc->free == 0)
 		return;
-	count = bc->bch.tx_skb->len - bc->bch.tx_idx;
-	if (count <= 0)
-		return;
-	pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", card->name,
-		 __func__, bc->bch.nr, count, bc->free, bc->bch.tx_idx,
-		 bc->bch.tx_skb->len, bc->txstate, bc->idx, card->send.idx);
+	if (!bc->bch.tx_skb) {
+		if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
+			return;
+		fillempty = 1;
+		count = card->send.size >> 1;
+		p = bc->bch.fill;
+	} else {
+		count = bc->bch.tx_skb->len - bc->bch.tx_idx;
+		if (count <= 0)
+			return;
+		pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
+			 card->name, __func__, bc->bch.nr, count, bc->free,
+			 bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
+			 bc->idx, card->send.idx);
+		p = bc->bch.tx_skb->data + bc->bch.tx_idx;
+	}
 	if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
 		resync(bc, card);
-	p = bc->bch.tx_skb->data + bc->bch.tx_idx;
-	if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
+	if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
 		count = isdnhdlc_encode(&bc->hsend, p, count, &i,
 					bc->hsbuf, bc->free);
 		pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
@@ -570,17 +572,33 @@
 	} else {
 		if (count > bc->free)
 			count = bc->free;
-		bc->bch.tx_idx += count;
+		if (!fillempty)
+			bc->bch.tx_idx += count;
 		bc->free -= count;
 	}
 	m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
-	for (i = 0; i < count; i++) {
-		if (bc->idx >= card->send.size)
-			bc->idx = 0;
-		v = card->send.start[bc->idx];
-		v &= m;
-		v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
-		card->send.start[bc->idx++] = v;
+	if (fillempty) {
+		n = p[0];
+		if (!(bc->bch.nr & 1))
+			n <<= 8;
+		for (i = 0; i < count; i++) {
+			if (bc->idx >= card->send.size)
+				bc->idx = 0;
+			v = card->send.start[bc->idx];
+			v &= m;
+			v |= n;
+			card->send.start[bc->idx++] = v;
+		}
+	} else {
+		for (i = 0; i < count; i++) {
+			if (bc->idx >= card->send.size)
+				bc->idx = 0;
+			v = card->send.start[bc->idx];
+			v &= m;
+			n = p[i];
+			v |= (bc->bch.nr & 1) ? n : n << 8;
+			card->send.start[bc->idx++] = v;
+		}
 	}
 	if (debug & DEBUG_HW_BFIFO) {
 		snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
@@ -595,21 +613,26 @@
 static int
 bc_next_frame(struct tiger_ch *bc)
 {
-	if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len)
+	int ret = 1;
+
+	if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
 		fill_dma(bc);
-	else {
-		if (bc->bch.tx_skb) {
-			/* send confirm, on trans, free on hdlc. */
-			if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
-				confirm_Bsend(&bc->bch);
+	} else {
+		if (bc->bch.tx_skb)
 			dev_kfree_skb(bc->bch.tx_skb);
-		}
-		if (get_next_bframe(&bc->bch))
+		if (get_next_bframe(&bc->bch)) {
 			fill_dma(bc);
-		else
-			return 0;
+			test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
+		} else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
+			fill_dma(bc);
+		} else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
+			test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
+			ret = 0;
+		} else {
+			ret = 0;
+		}
 	}
-	return 1;
+	return ret;
 }
 
 static void
@@ -732,22 +755,17 @@
 	struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
 	struct tiger_hw *card = bch->hw;
 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
-	u32 id;
-	u_long flags;
+	unsigned long flags;
 
 	switch (hh->prim) {
 	case PH_DATA_REQ:
 		spin_lock_irqsave(&card->lock, flags);
 		ret = bchannel_senddata(bch, skb);
 		if (ret > 0) { /* direct TX */
-			id = hh->id; /* skb can be freed */
 			fill_dma(bc);
 			ret = 0;
-			spin_unlock_irqrestore(&card->lock, flags);
-			if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
-				queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
-		} else
-			spin_unlock_irqrestore(&card->lock, flags);
+		}
+		spin_unlock_irqrestore(&card->lock, flags);
 		return ret;
 	case PH_ACTIVATE_REQ:
 		spin_lock_irqsave(&card->lock, flags);
@@ -778,21 +796,7 @@
 static int
 channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
 {
-	int ret = 0;
-	struct tiger_hw *card  = bc->bch.hw;
-
-	switch (cq->op) {
-	case MISDN_CTRL_GETOP:
-		cq->op = 0;
-		break;
-		/* Nothing implemented yet */
-	case MISDN_CTRL_FILL_EMPTY:
-	default:
-		pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	return mISDN_ctrl_bchannel(&bc->bch, cq);
 }
 
 static int
@@ -808,14 +812,10 @@
 	switch (cmd) {
 	case CLOSE_CHANNEL:
 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
-		if (test_bit(FLG_ACTIVE, &bch->Flags)) {
-			spin_lock_irqsave(&card->lock, flags);
-			mISDN_freebchannel(bch);
-			test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
-			test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
-			mode_tiger(bc, ISDN_P_NONE);
-			spin_unlock_irqrestore(&card->lock, flags);
-		}
+		spin_lock_irqsave(&card->lock, flags);
+		mISDN_freebchannel(bch);
+		mode_tiger(bc, ISDN_P_NONE);
+		spin_unlock_irqrestore(&card->lock, flags);
 		ch->protocol = ISDN_P_NONE;
 		ch->peer = NULL;
 		module_put(THIS_MODULE);
@@ -837,7 +837,7 @@
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_LOOP;
+		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
 		break;
 	case MISDN_CTRL_LOOP:
 		/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -847,6 +847,9 @@
 		}
 		ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
 		break;
+	case MISDN_CTRL_L1_TIMER3:
+		ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
+		break;
 	default:
 		pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
 		ret = -EINVAL;
@@ -1027,7 +1030,8 @@
 	for (i = 0; i < 2; i++) {
 		card->bc[i].bch.nr = i + 1;
 		set_channelmap(i + 1, card->isac.dch.dev.channelmap);
-		mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM);
+		mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
+				   NJ_DMA_RXSIZE >> 1);
 		card->bc[i].bch.hw = card;
 		card->bc[i].bch.ch.send = nj_l2l1B;
 		card->bc[i].bch.ch.ctrl = nj_bctrl;
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 0468993..93f344d 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -224,7 +224,7 @@
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = MISDN_CTRL_LOOP;
+		cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
 		break;
 	case MISDN_CTRL_LOOP:
 		/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -234,6 +234,9 @@
 		}
 		ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel);
 		break;
+	case MISDN_CTRL_L1_TIMER3:
+		ret = sf->isac.ctrl(&sf->isac, HW_TIMER3_VALUE, cq->p1);
+		break;
 	default:
 		pr_info("%s: unknown Op %x\n", sf->name, cq->op);
 		ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 2183357..26a86b8 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -465,6 +465,7 @@
 {
 	struct w6692_hw *card = wch->bch.hw;
 	u8 *ptr;
+	int maxlen;
 
 	pr_debug("%s: empty_Bfifo %d\n", card->name, count);
 	if (unlikely(wch->bch.state == ISDN_P_NONE)) {
@@ -474,20 +475,18 @@
 			skb_trim(wch->bch.rx_skb, 0);
 		return;
 	}
-	if (!wch->bch.rx_skb) {
-		wch->bch.rx_skb = mI_alloc_skb(wch->bch.maxlen, GFP_ATOMIC);
-		if (unlikely(!wch->bch.rx_skb)) {
-			pr_info("%s: B receive out of memory\n", card->name);
-			WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK |
-				    W_B_CMDR_RACT);
-			return;
-		}
-	}
-	if (wch->bch.rx_skb->len + count > wch->bch.maxlen) {
-		pr_debug("%s: empty_Bfifo incoming packet too large\n",
-			 card->name);
+	if (test_bit(FLG_RX_OFF, &wch->bch.Flags)) {
+		wch->bch.dropcnt += count;
 		WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
-		skb_trim(wch->bch.rx_skb, 0);
+		return;
+	}
+	maxlen = bchannel_get_rxbuf(&wch->bch, count);
+	if (maxlen < 0) {
+		WriteW6692B(wch, W_B_CMDR, W_B_CMDR_RACK | W_B_CMDR_RACT);
+		if (wch->bch.rx_skb)
+			skb_trim(wch->bch.rx_skb, 0);
+		pr_warning("%s.B%d: No bufferspace for %d bytes\n",
+			   card->name, wch->bch.nr, count);
 		return;
 	}
 	ptr = skb_put(wch->bch.rx_skb, count);
@@ -504,16 +503,22 @@
 W6692_fill_Bfifo(struct w6692_ch *wch)
 {
 	struct w6692_hw *card = wch->bch.hw;
-	int count;
+	int count, fillempty = 0;
 	u8 *ptr, cmd = W_B_CMDR_RACT | W_B_CMDR_XMS;
 
 	pr_debug("%s: fill Bfifo\n", card->name);
-	if (!wch->bch.tx_skb)
-		return;
-	count = wch->bch.tx_skb->len - wch->bch.tx_idx;
-	if (count <= 0)
-		return;
-	ptr = wch->bch.tx_skb->data + wch->bch.tx_idx;
+	if (!wch->bch.tx_skb) {
+		if (!test_bit(FLG_TX_EMPTY, &wch->bch.Flags))
+			return;
+		ptr = wch->bch.fill;
+		count = W_B_FIFO_THRESH;
+		fillempty = 1;
+	} else {
+		count = wch->bch.tx_skb->len - wch->bch.tx_idx;
+		if (count <= 0)
+			return;
+		ptr = wch->bch.tx_skb->data + wch->bch.tx_idx;
+	}
 	if (count > W_B_FIFO_THRESH)
 		count = W_B_FIFO_THRESH;
 	else if (test_bit(FLG_HDLC, &wch->bch.Flags))
@@ -522,9 +527,16 @@
 	pr_debug("%s: fill Bfifo%d/%d\n", card->name,
 		 count, wch->bch.tx_idx);
 	wch->bch.tx_idx += count;
-	outsb(wch->addr + W_B_XFIFO, ptr, count);
+	if (fillempty) {
+		while (count > 0) {
+			outsb(wch->addr + W_B_XFIFO, ptr, MISDN_BCH_FILL_SIZE);
+			count -= MISDN_BCH_FILL_SIZE;
+		}
+	} else {
+		outsb(wch->addr + W_B_XFIFO, ptr, count);
+	}
 	WriteW6692B(wch, W_B_CMDR, cmd);
-	if (debug & DEBUG_HW_DFIFO) {
+	if ((debug & DEBUG_HW_BFIFO) && !fillempty) {
 		snprintf(card->log, 63, "B%1d-send %s %d ",
 			 wch->bch.nr, card->name, count);
 		print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, ptr, count);
@@ -638,17 +650,17 @@
 static void
 send_next(struct w6692_ch *wch)
 {
-	if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len)
+	if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) {
 		W6692_fill_Bfifo(wch);
-	else {
-		if (wch->bch.tx_skb) {
-			/* send confirm, on trans, free on hdlc. */
-			if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
-				confirm_Bsend(&wch->bch);
+	} else {
+		if (wch->bch.tx_skb)
 			dev_kfree_skb(wch->bch.tx_skb);
-		}
-		if (get_next_bframe(&wch->bch))
+		if (get_next_bframe(&wch->bch)) {
 			W6692_fill_Bfifo(wch);
+			test_and_clear_bit(FLG_TX_EMPTY, &wch->bch.Flags);
+		} else if (test_bit(FLG_TX_EMPTY, &wch->bch.Flags)) {
+			W6692_fill_Bfifo(wch);
+		}
 	}
 }
 
@@ -698,7 +710,7 @@
 			if (count == 0)
 				count = W_B_FIFO_THRESH;
 			W6692_empty_Bfifo(wch, count);
-			recv_Bchannel(&wch->bch, 0);
+			recv_Bchannel(&wch->bch, 0, false);
 		}
 	}
 	if (stat & W_B_EXI_RMR) {
@@ -714,9 +726,8 @@
 				    W_B_CMDR_RRST | W_B_CMDR_RACT);
 		} else {
 			W6692_empty_Bfifo(wch, W_B_FIFO_THRESH);
-			if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags) &&
-			    wch->bch.rx_skb && (wch->bch.rx_skb->len > 0))
-				recv_Bchannel(&wch->bch, 0);
+			if (test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
+				recv_Bchannel(&wch->bch, 0, false);
 		}
 	}
 	if (stat & W_B_EXI_RDOV) {
@@ -738,8 +749,8 @@
 				 wch->bch.nr, star);
 		}
 		if (star & W_B_STAR_XDOW) {
-			pr_debug("%s: B%d XDOW proto=%x\n", card->name,
-				 wch->bch.nr, wch->bch.state);
+			pr_warning("%s: B%d XDOW proto=%x\n", card->name,
+				   wch->bch.nr, wch->bch.state);
 #ifdef ERROR_STATISTIC
 			wch->bch.err_xdu++;
 #endif
@@ -752,20 +763,21 @@
 			}
 		}
 		send_next(wch);
-		if (stat & W_B_EXI_XDUN)
+		if (star & W_B_STAR_XDOW)
 			return; /* handle XDOW only once */
 	}
 	if (stat & W_B_EXI_XDUN) {
-		pr_debug("%s: B%d XDUN proto=%x\n", card->name,
-			 wch->bch.nr, wch->bch.state);
+		pr_warning("%s: B%d XDUN proto=%x\n", card->name,
+			   wch->bch.nr, wch->bch.state);
 #ifdef ERROR_STATISTIC
 		wch->bch.err_xdu++;
 #endif
-		WriteW6692B(wch, W_B_CMDR, W_B_CMDR_XRST | W_B_CMDR_RACT);
-		/* resend */
+		/* resend - no XRST needed */
 		if (wch->bch.tx_skb) {
 			if (!test_bit(FLG_TRANSPARENT, &wch->bch.Flags))
 				wch->bch.tx_idx = 0;
+		} else if (test_bit(FLG_FILLEMPTY, &wch->bch.Flags)) {
+			test_and_set_bit(FLG_TX_EMPTY, &wch->bch.Flags);
 		}
 		send_next(wch);
 	}
@@ -944,22 +956,17 @@
 	struct w6692_hw *card = bch->hw;
 	int ret = -EINVAL;
 	struct mISDNhead *hh = mISDN_HEAD_P(skb);
-	u32 id;
-	u_long flags;
+	unsigned long flags;
 
 	switch (hh->prim) {
 	case PH_DATA_REQ:
 		spin_lock_irqsave(&card->lock, flags);
 		ret = bchannel_senddata(bch, skb);
 		if (ret > 0) { /* direct TX */
-			id = hh->id; /* skb can be freed */
 			ret = 0;
 			W6692_fill_Bfifo(bc);
-			spin_unlock_irqrestore(&card->lock, flags);
-			if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
-				queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
-		} else
-			spin_unlock_irqrestore(&card->lock, flags);
+		}
+		spin_unlock_irqrestore(&card->lock, flags);
 		return ret;
 	case PH_ACTIVATE_REQ:
 		spin_lock_irqsave(&card->lock, flags);
@@ -994,20 +1001,7 @@
 static int
 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
 {
-	int	ret = 0;
-
-	switch (cq->op) {
-	case MISDN_CTRL_GETOP:
-		cq->op = 0;
-		break;
-		/* Nothing implemented yet */
-	case MISDN_CTRL_FILL_EMPTY:
-	default:
-		pr_info("%s: unknown Op %x\n", __func__, cq->op);
-		ret = -EINVAL;
-		break;
-	}
-	return ret;
+	return mISDN_ctrl_bchannel(bch, cq);
 }
 
 static int
@@ -1022,7 +1016,6 @@
 	bch = &card->bc[rq->adr.channel - 1].bch;
 	if (test_and_set_bit(FLG_OPEN, &bch->Flags))
 		return -EBUSY; /* b-channel can be only open once */
-	test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
 	bch->ch.protocol = rq->protocol;
 	rq->ch = &bch->ch;
 	return 0;
@@ -1035,7 +1028,10 @@
 
 	switch (cq->op) {
 	case MISDN_CTRL_GETOP:
-		cq->op = 0;
+		cq->op = MISDN_CTRL_L1_TIMER3;
+		break;
+	case MISDN_CTRL_L1_TIMER3:
+		ret = l1_event(card->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
 		break;
 	default:
 		pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op);
@@ -1058,15 +1054,10 @@
 	switch (cmd) {
 	case CLOSE_CHANNEL:
 		test_and_clear_bit(FLG_OPEN, &bch->Flags);
-		if (test_bit(FLG_ACTIVE, &bch->Flags)) {
-			spin_lock_irqsave(&card->lock, flags);
-			mISDN_freebchannel(bch);
-			w6692_mode(bc, ISDN_P_NONE);
-			spin_unlock_irqrestore(&card->lock, flags);
-		} else {
-			skb_queue_purge(&bch->rqueue);
-			bch->rcount = 0;
-		}
+		spin_lock_irqsave(&card->lock, flags);
+		mISDN_freebchannel(bch);
+		w6692_mode(bc, ISDN_P_NONE);
+		spin_unlock_irqrestore(&card->lock, flags);
 		ch->protocol = ISDN_P_NONE;
 		ch->peer = NULL;
 		module_put(THIS_MODULE);
@@ -1320,7 +1311,8 @@
 	card->dch.hw = card;
 	card->dch.dev.nrbchan = 2;
 	for (i = 0; i < 2; i++) {
-		mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM);
+		mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
+				   W_B_FIFO_THRESH);
 		card->bc[i].bch.hw = card;
 		card->bc[i].bch.nr = i + 1;
 		card->bc[i].bch.ch.nr = i + 1;
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index ba91333..88e4f0e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -156,17 +156,9 @@
 hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
 {
 	int rc;
-	unsigned char valbuf[128];
 	hysdn_card *card = file->private_data;
 
-	if (count > (sizeof(valbuf) - 1))
-		count = sizeof(valbuf) - 1;	/* limit length */
-	if (copy_from_user(valbuf, buf, count))
-		return (-EFAULT);	/* copy failed */
-
-	valbuf[count] = 0;	/* terminating 0 */
-
-	rc = kstrtoul(valbuf, 0, &card->debug_flags);
+	rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags);
 	if (rc < 0)
 		return rc;
 	hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c
index c59e8d2..8837ac5 100644
--- a/drivers/isdn/i4l/isdn_bsdcomp.c
+++ b/drivers/isdn/i4l/isdn_bsdcomp.c
@@ -612,7 +612,7 @@
 		db->n_bits++;
 
 	/* If output length is too large then this is an incompressible frame. */
-	if (!skb_out || (skb_out && skb_out->len >= skb_in->len)) {
+	if (!skb_out || skb_out->len >= skb_in->len) {
 		++db->incomp_count;
 		db->incomp_bytes += isize;
 		return 0;
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index a24530f..c401634 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -355,6 +355,22 @@
 }
 EXPORT_SYMBOL(mISDN_unregister_Bprotocol);
 
+static const char *msg_no_channel = "<no channel>";
+static const char *msg_no_stack = "<no stack>";
+static const char *msg_no_stackdev = "<no stack device>";
+
+const char *mISDNDevName4ch(struct mISDNchannel *ch)
+{
+	if (!ch)
+		return msg_no_channel;
+	if (!ch->st)
+		return msg_no_stack;
+	if (!ch->st->dev)
+		return msg_no_stackdev;
+	return dev_name(&ch->st->dev->dev);
+};
+EXPORT_SYMBOL(mISDNDevName4ch);
+
 static int
 mISDNInit(void)
 {
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
index afe4173..fc1733a 100644
--- a/drivers/isdn/mISDN/dsp.h
+++ b/drivers/isdn/mISDN/dsp.h
@@ -76,7 +76,9 @@
 #define MAX_SECONDS_JITTER_CHECK 5
 
 extern struct timer_list dsp_spl_tl;
-extern u32 dsp_spl_jiffies;
+
+/* the datatype need to match jiffies datatype */
+extern unsigned long dsp_spl_jiffies;
 
 /* the structure of conferences:
  *
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 334feab..a4f05c5 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -742,8 +742,8 @@
 					       member->dsp->pcm_slot_tx,
 					       member->dsp->pcm_bank_tx,
 					       member->dsp->pcm_bank_rx);
-				conf->hardware = 0;
-				conf->software = 1;
+				conf->hardware = 1;
+				conf->software = tx_data;
 				return;
 			}
 			/* find a new slot */
@@ -834,8 +834,8 @@
 					       nextm->dsp->name,
 					       member->dsp->pcm_slot_tx,
 					       member->dsp->pcm_slot_rx);
-				conf->hardware = 0;
-				conf->software = 1;
+				conf->hardware = 1;
+				conf->software = tx_data;
 				return;
 			}
 			/* find two new slot */
@@ -939,8 +939,11 @@
 	/* for more than two members.. */
 
 	/* if all members already have the same conference */
-	if (all_conf)
+	if (all_conf) {
+		conf->hardware = 1;
+		conf->software = tx_data;
 		return;
+	}
 
 	/*
 	 * if there is an existing conference, but not all members have joined
@@ -1013,6 +1016,8 @@
 			dsp_cmx_hw_message(member->dsp,
 					   MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0);
 		}
+		conf->hardware = 1;
+		conf->software = tx_data;
 		return;
 	}
 
@@ -1328,7 +1333,7 @@
 		}
 		if (dsp->conf && dsp->conf->software && dsp->conf->hardware)
 			tx_data_only = 1;
-		if (dsp->conf->software && dsp->echo.hardware)
+		if (dsp->echo.software && dsp->echo.hardware)
 			tx_data_only = 1;
 	}
 
@@ -1619,7 +1624,7 @@
 
 static u32	jittercount; /* counter for jitter check */
 struct timer_list dsp_spl_tl;
-u32	dsp_spl_jiffies; /* calculate the next time to fire */
+unsigned long	dsp_spl_jiffies; /* calculate the next time to fire */
 static u16	dsp_count; /* last sample count */
 static int	dsp_count_valid; /* if we have last sample count */
 
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 2ac2d7a..28c99c6 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -268,6 +268,7 @@
 	}
 	cq.op = MISDN_CTRL_FILL_EMPTY;
 	cq.p1 = 1;
+	cq.p2 = dsp_silence;
 	if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
 		printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
 		       __func__);
diff --git a/drivers/isdn/mISDN/dsp_dtmf.c b/drivers/isdn/mISDN/dsp_dtmf.c
index 887860b..642f30b 100644
--- a/drivers/isdn/mISDN/dsp_dtmf.c
+++ b/drivers/isdn/mISDN/dsp_dtmf.c
@@ -222,16 +222,25 @@
 		goto storedigit;
 	}
 
-	if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
+	if (dsp_debug & DEBUG_DSP_DTMFCOEFF) {
+		s32 tresh_100 = tresh/100;
+
+		if (tresh_100 == 0) {
+			tresh_100 = 1;
+			printk(KERN_DEBUG
+				"tresh(%d) too small set tresh/100 to 1\n",
+				tresh);
+		}
 		printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d"
 		       " tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n",
 		       result[0] / 10000, result[1] / 10000, result[2] / 10000,
 		       result[3] / 10000, result[4] / 10000, result[5] / 10000,
 		       result[6] / 10000, result[7] / 10000, tresh / 10000,
-		       result[0] / (tresh / 100), result[1] / (tresh / 100),
-		       result[2] / (tresh / 100), result[3] / (tresh / 100),
-		       result[4] / (tresh / 100), result[5] / (tresh / 100),
-		       result[6] / (tresh / 100), result[7] / (tresh / 100));
+		       result[0] / (tresh_100), result[1] / (tresh_100),
+		       result[2] / (tresh_100), result[3] / (tresh_100),
+		       result[4] / (tresh_100), result[5] / (tresh_100),
+		       result[6] / (tresh_100), result[7] / (tresh_100));
+	}
 
 	/* calc digit (lowgroup/highgroup) */
 	lowgroup = -1;
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index c74c363..ef34fd4 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -81,10 +81,16 @@
 EXPORT_SYMBOL(mISDN_initdchannel);
 
 int
-mISDN_initbchannel(struct bchannel *ch, int maxlen)
+mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
+		   unsigned short minlen)
 {
 	ch->Flags = 0;
+	ch->minlen = minlen;
+	ch->next_minlen = minlen;
+	ch->init_minlen = minlen;
 	ch->maxlen = maxlen;
+	ch->next_maxlen = maxlen;
+	ch->init_maxlen = maxlen;
 	ch->hw = NULL;
 	ch->rx_skb = NULL;
 	ch->tx_skb = NULL;
@@ -134,6 +140,14 @@
 	test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
 	test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
 	test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
+	test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
+	test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
+	test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
+	ch->dropcnt = 0;
+	ch->minlen = ch->init_minlen;
+	ch->next_minlen = ch->init_minlen;
+	ch->maxlen = ch->init_maxlen;
+	ch->next_maxlen = ch->init_maxlen;
 }
 EXPORT_SYMBOL(mISDN_clear_bchannel);
 
@@ -148,6 +162,51 @@
 }
 EXPORT_SYMBOL(mISDN_freebchannel);
 
+int
+mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
+{
+	int ret = 0;
+
+	switch (cq->op) {
+	case MISDN_CTRL_GETOP:
+		cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
+			 MISDN_CTRL_RX_OFF;
+		break;
+	case MISDN_CTRL_FILL_EMPTY:
+		if (cq->p1) {
+			memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
+			test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
+		} else {
+			test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
+		}
+		break;
+	case MISDN_CTRL_RX_OFF:
+		/* read back dropped byte count */
+		cq->p2 = bch->dropcnt;
+		if (cq->p1)
+			test_and_set_bit(FLG_RX_OFF, &bch->Flags);
+		else
+			test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
+		bch->dropcnt = 0;
+		break;
+	case MISDN_CTRL_RX_BUFFER:
+		if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
+			bch->next_maxlen = cq->p2;
+		if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
+			bch->next_minlen = cq->p1;
+		/* we return the old values */
+		cq->p1 = bch->minlen;
+		cq->p2 = bch->maxlen;
+		break;
+	default:
+		pr_info("mISDN unhandled control %x operation\n", cq->op);
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(mISDN_ctrl_bchannel);
+
 static inline u_int
 get_sapi_tei(u_char *p)
 {
@@ -197,24 +256,37 @@
 EXPORT_SYMBOL(recv_Echannel);
 
 void
-recv_Bchannel(struct bchannel *bch, unsigned int id)
+recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
 {
 	struct mISDNhead *hh;
 
-	hh = mISDN_HEAD_P(bch->rx_skb);
-	hh->prim = PH_DATA_IND;
-	hh->id = id;
-	if (bch->rcount >= 64) {
-		printk(KERN_WARNING "B-channel %p receive queue overflow, "
-		       "flushing!\n", bch);
-		skb_queue_purge(&bch->rqueue);
-		bch->rcount = 0;
+	/* if allocation did fail upper functions still may call us */
+	if (unlikely(!bch->rx_skb))
 		return;
+	if (unlikely(!bch->rx_skb->len)) {
+		/* we have no data to send - this may happen after recovery
+		 * from overflow or too small allocation.
+		 * We need to free the buffer here */
+		dev_kfree_skb(bch->rx_skb);
+		bch->rx_skb = NULL;
+	} else {
+		if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
+		    (bch->rx_skb->len < bch->minlen) && !force)
+				return;
+		hh = mISDN_HEAD_P(bch->rx_skb);
+		hh->prim = PH_DATA_IND;
+		hh->id = id;
+		if (bch->rcount >= 64) {
+			printk(KERN_WARNING
+			       "B%d receive queue overflow - flushing!\n",
+			       bch->nr);
+			skb_queue_purge(&bch->rqueue);
+		}
+		bch->rcount++;
+		skb_queue_tail(&bch->rqueue, bch->rx_skb);
+		bch->rx_skb = NULL;
+		schedule_event(bch, FLG_RECVQUEUE);
 	}
-	bch->rcount++;
-	skb_queue_tail(&bch->rqueue, bch->rx_skb);
-	bch->rx_skb = NULL;
-	schedule_event(bch, FLG_RECVQUEUE);
 }
 EXPORT_SYMBOL(recv_Bchannel);
 
@@ -272,7 +344,7 @@
 }
 EXPORT_SYMBOL(get_next_dframe);
 
-void
+static void
 confirm_Bsend(struct bchannel *bch)
 {
 	struct sk_buff	*skb;
@@ -294,7 +366,6 @@
 	skb_queue_tail(&bch->rqueue, skb);
 	schedule_event(bch, FLG_RECVQUEUE);
 }
-EXPORT_SYMBOL(confirm_Bsend);
 
 int
 get_next_bframe(struct bchannel *bch)
@@ -305,8 +376,8 @@
 		if (bch->tx_skb) {
 			bch->next_skb = NULL;
 			test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
-			if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
-				confirm_Bsend(bch); /* not for transparent */
+			/* confirm imediately to allow next data */
+			confirm_Bsend(bch);
 			return 1;
 		} else {
 			test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
@@ -395,7 +466,62 @@
 		/* write to fifo */
 		ch->tx_skb = skb;
 		ch->tx_idx = 0;
+		confirm_Bsend(ch);
 		return 1;
 	}
 }
 EXPORT_SYMBOL(bchannel_senddata);
+
+/* The function allocates a new receive skb on demand with a size for the
+ * requirements of the current protocol. It returns the tailroom of the
+ * receive skb or an error.
+ */
+int
+bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
+{
+	int len;
+
+	if (bch->rx_skb) {
+		len = skb_tailroom(bch->rx_skb);
+		if (len < reqlen) {
+			pr_warning("B%d no space for %d (only %d) bytes\n",
+				   bch->nr, reqlen, len);
+			if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+				/* send what we have now and try a new buffer */
+				recv_Bchannel(bch, 0, true);
+			} else {
+				/* on HDLC we have to drop too big frames */
+				return -EMSGSIZE;
+			}
+		} else {
+			return len;
+		}
+	}
+	/* update current min/max length first */
+	if (unlikely(bch->maxlen != bch->next_maxlen))
+		bch->maxlen = bch->next_maxlen;
+	if (unlikely(bch->minlen != bch->next_minlen))
+		bch->minlen = bch->next_minlen;
+	if (unlikely(reqlen > bch->maxlen))
+		return -EMSGSIZE;
+	if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+		if (reqlen >= bch->minlen) {
+			len = reqlen;
+		} else {
+			len = 2 * bch->minlen;
+			if (len > bch->maxlen)
+				len = bch->maxlen;
+		}
+	} else {
+		/* with HDLC we do not know the length yet */
+		len = bch->maxlen;
+	}
+	bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
+	if (!bch->rx_skb) {
+		pr_warning("B%d receive no memory for %d bytes\n",
+			   bch->nr, len);
+		len = -ENOMEM;
+	}
+	return len;
+}
+EXPORT_SYMBOL(bchannel_get_rxbuf);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 0f88acf..db50f78 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1420,7 +1420,7 @@
 		bch->nr = i + ch;
 		bch->slot = i + ch;
 		bch->debug = debug;
-		mISDN_initbchannel(bch, MAX_DATA_MEM);
+		mISDN_initbchannel(bch, MAX_DATA_MEM, 0);
 		bch->hw = hc;
 		bch->ch.send = handle_bmsg;
 		bch->ch.ctrl = l1oip_bctrl;
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index 0fc49b3..bebc57b 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -28,13 +28,15 @@
 struct layer1 {
 	u_long Flags;
 	struct FsmInst l1m;
-	struct FsmTimer timer;
+	struct FsmTimer timer3;
+	struct FsmTimer timerX;
 	int delay;
+	int t3_value;
 	struct dchannel *dch;
 	dchannel_l1callback *dcb;
 };
 
-#define TIMER3_VALUE 7000
+#define TIMER3_DEFAULT_VALUE	7000
 
 static
 struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL};
@@ -134,7 +136,7 @@
 	struct layer1 *l1 = fi->userdata;
 
 	mISDN_FsmChangeState(fi, ST_L1_F3);
-	mISDN_FsmRestartTimer(&l1->timer, 550, EV_TIMER_DEACT, NULL, 2);
+	mISDN_FsmRestartTimer(&l1->timerX, 550, EV_TIMER_DEACT, NULL, 2);
 	test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags);
 }
 
@@ -179,11 +181,11 @@
 	mISDN_FsmChangeState(fi, ST_L1_F7);
 	l1->dcb(l1->dch, INFO3_P8);
 	if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags))
-		mISDN_FsmDelTimer(&l1->timer, 4);
+		mISDN_FsmDelTimer(&l1->timerX, 4);
 	if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) {
 		if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags))
-			mISDN_FsmDelTimer(&l1->timer, 3);
-		mISDN_FsmRestartTimer(&l1->timer, 110, EV_TIMER_ACT, NULL, 2);
+			mISDN_FsmDelTimer(&l1->timer3, 3);
+		mISDN_FsmRestartTimer(&l1->timerX, 110, EV_TIMER_ACT, NULL, 2);
 		test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags);
 	}
 }
@@ -201,7 +203,7 @@
 	}
 	if (l1->l1m.state != ST_L1_F6) {
 		mISDN_FsmChangeState(fi, ST_L1_F3);
-		l1->dcb(l1->dch, HW_POWERUP_REQ);
+		/* do not force anything here, we need send INFO 0 */
 	}
 }
 
@@ -233,8 +235,9 @@
 {
 	struct layer1 *l1 = fi->userdata;
 
-	mISDN_FsmRestartTimer(&l1->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2);
+	mISDN_FsmRestartTimer(&l1->timer3, l1->t3_value, EV_TIMER3, NULL, 2);
 	test_and_set_bit(FLG_L1_T3RUN, &l1->Flags);
+	/* Tell HW to send INFO 1 */
 	l1->dcb(l1->dch, HW_RESET_REQ);
 }
 
@@ -302,7 +305,8 @@
 
 static void
 release_l1(struct layer1 *l1) {
-	mISDN_FsmDelTimer(&l1->timer, 0);
+	mISDN_FsmDelTimer(&l1->timerX, 0);
+	mISDN_FsmDelTimer(&l1->timer3, 0);
 	if (l1->dch)
 		l1->dch->l1 = NULL;
 	module_put(THIS_MODULE);
@@ -356,6 +360,16 @@
 		release_l1(l1);
 		break;
 	default:
+		if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) {
+			int val = event & HW_TIMER3_VMASK;
+
+			if (val < 5)
+				val = 5;
+			if (val > 30)
+				val = 30;
+			l1->t3_value = val;
+			break;
+		}
 		if (*debug & DEBUG_L1)
 			printk(KERN_DEBUG "%s %x unhandled\n",
 			       __func__, event);
@@ -377,13 +391,15 @@
 	nl1->l1m.fsm = &l1fsm_s;
 	nl1->l1m.state = ST_L1_F3;
 	nl1->Flags = 0;
+	nl1->t3_value = TIMER3_DEFAULT_VALUE;
 	nl1->l1m.debug = *debug & DEBUG_L1_FSM;
 	nl1->l1m.userdata = nl1;
 	nl1->l1m.userint = 0;
 	nl1->l1m.printdebug = l1m_debug;
 	nl1->dch = dch;
 	nl1->dcb = dcb;
-	mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer);
+	mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer3);
+	mISDN_FsmInitTimer(&nl1->l1m, &nl1->timerX);
 	__module_get(THIS_MODULE);
 	dch->l1 = nl1;
 	return 0;
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 39d7375..0dc8abc 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -58,6 +58,8 @@
 	EV_L1_DEACTIVATE,
 	EV_L2_T200,
 	EV_L2_T203,
+	EV_L2_T200I,
+	EV_L2_T203I,
 	EV_L2_SET_OWN_BUSY,
 	EV_L2_CLEAR_OWN_BUSY,
 	EV_L2_FRAME_ERROR,
@@ -86,6 +88,8 @@
 	"EV_L1_DEACTIVATE",
 	"EV_L2_T200",
 	"EV_L2_T203",
+	"EV_L2_T200I",
+	"EV_L2_T203I",
 	"EV_L2_SET_OWN_BUSY",
 	"EV_L2_CLEAR_OWN_BUSY",
 	"EV_L2_FRAME_ERROR",
@@ -106,8 +110,8 @@
 	vaf.fmt = fmt;
 	vaf.va = &va;
 
-	printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
-	       l2->sapi, l2->tei, &vaf);
+	printk(KERN_DEBUG "%s l2 (sapi %d tei %d): %pV\n",
+	       mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf);
 
 	va_end(va);
 }
@@ -150,7 +154,8 @@
 	mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
 	err = l2->up->send(l2->up, skb);
 	if (err) {
-		printk(KERN_WARNING "%s: err=%d\n", __func__, err);
+		printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
+		       mISDNDevName4ch(&l2->ch), err);
 		dev_kfree_skb(skb);
 	}
 }
@@ -174,7 +179,8 @@
 		memcpy(skb_put(skb, len), arg, len);
 	err = l2->up->send(l2->up, skb);
 	if (err) {
-		printk(KERN_WARNING "%s: err=%d\n", __func__, err);
+		printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
+		       mISDNDevName4ch(&l2->ch), err);
 		dev_kfree_skb(skb);
 	}
 }
@@ -185,7 +191,8 @@
 
 	ret = l2->ch.recv(l2->ch.peer, skb);
 	if (ret && (*debug & DEBUG_L2_RECV))
-		printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
+		printk(KERN_DEBUG "l2down_skb: dev %s ret(%d)\n",
+		       mISDNDevName4ch(&l2->ch), ret);
 	return ret;
 }
 
@@ -276,12 +283,37 @@
 	return ret;
 }
 
+static void
+l2_timeout(struct FsmInst *fi, int event, void *arg)
+{
+	struct layer2 *l2 = fi->userdata;
+	struct sk_buff *skb;
+	struct mISDNhead *hh;
+
+	skb = mI_alloc_skb(0, GFP_ATOMIC);
+	if (!skb) {
+		printk(KERN_WARNING "%s: L2(%d,%d) nr:%x timer %s no skb\n",
+		       mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
+		       l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
+		return;
+	}
+	hh = mISDN_HEAD_P(skb);
+	hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND;
+	hh->id = l2->ch.nr;
+	if (*debug & DEBUG_TIMER)
+		printk(KERN_DEBUG "%s: L2(%d,%d) nr:%x timer %s expired\n",
+		       mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
+		       l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
+	if (l2->ch.st)
+		l2->ch.st->own.recv(&l2->ch.st->own, skb);
+}
+
 static int
 l2mgr(struct layer2 *l2, u_int prim, void *arg) {
 	long c = (long)arg;
 
-	printk(KERN_WARNING
-	       "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
+	printk(KERN_WARNING "l2mgr: dev %s addr:%x prim %x %c\n",
+	       mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c);
 	if (test_bit(FLG_LAPD, &l2->flag) &&
 	    !test_bit(FLG_FIXED_TEI, &l2->flag)) {
 		switch (c) {
@@ -603,8 +635,8 @@
 	else {
 		skb = mI_alloc_skb(i, GFP_ATOMIC);
 		if (!skb) {
-			printk(KERN_WARNING "%s: can't alloc skbuff\n",
-			       __func__);
+			printk(KERN_WARNING "%s: can't alloc skbuff in %s\n",
+			       mISDNDevName4ch(&l2->ch), __func__);
 			return;
 		}
 	}
@@ -1089,8 +1121,8 @@
 		tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
 	skb = mI_alloc_skb(i, GFP_ATOMIC);
 	if (!skb) {
-		printk(KERN_WARNING
-		       "isdnl2 can't alloc sbbuff for enquiry_cr\n");
+		printk(KERN_WARNING "%s: isdnl2 can't alloc sbbuff in %s\n",
+		       mISDNDevName4ch(&l2->ch), __func__);
 		return;
 	}
 	memcpy(skb_put(skb, i), tmp, i);
@@ -1150,7 +1182,7 @@
 			else
 				printk(KERN_WARNING
 				       "%s: windowar[%d] is NULL\n",
-				       __func__, p1);
+				       mISDNDevName4ch(&l2->ch), p1);
 			l2->windowar[p1] = NULL;
 		}
 		mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
@@ -1461,8 +1493,8 @@
 		p1 = (l2->vs - l2->va) % 8;
 	p1 = (p1 + l2->sow) % l2->window;
 	if (l2->windowar[p1]) {
-		printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
-		       p1);
+		printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
+		       mISDNDevName4ch(&l2->ch), p1);
 		dev_kfree_skb(l2->windowar[p1]);
 	}
 	l2->windowar[p1] = skb;
@@ -1482,12 +1514,14 @@
 		memcpy(skb_push(nskb, i), header, i);
 	else {
 		printk(KERN_WARNING
-		       "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
+		       "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
+		       mISDNDevName4ch(&l2->ch), i, p1);
 		oskb = nskb;
 		nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
 		if (!nskb) {
 			dev_kfree_skb(oskb);
-			printk(KERN_WARNING "%s: no skb mem\n", __func__);
+			printk(KERN_WARNING "%s: no skb mem in %s\n",
+			       mISDNDevName4ch(&l2->ch), __func__);
 			return;
 		}
 		memcpy(skb_put(nskb, i), header, i);
@@ -1814,11 +1848,16 @@
 	{ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
 	{ST_L2_7, EV_L2_I, l2_got_iframe},
 	{ST_L2_8, EV_L2_I, l2_got_iframe},
-	{ST_L2_5, EV_L2_T200, l2_st5_tout_200},
-	{ST_L2_6, EV_L2_T200, l2_st6_tout_200},
-	{ST_L2_7, EV_L2_T200, l2_st7_tout_200},
-	{ST_L2_8, EV_L2_T200, l2_st8_tout_200},
-	{ST_L2_7, EV_L2_T203, l2_st7_tout_203},
+	{ST_L2_5, EV_L2_T200, l2_timeout},
+	{ST_L2_6, EV_L2_T200, l2_timeout},
+	{ST_L2_7, EV_L2_T200, l2_timeout},
+	{ST_L2_8, EV_L2_T200, l2_timeout},
+	{ST_L2_7, EV_L2_T203, l2_timeout},
+	{ST_L2_5, EV_L2_T200I, l2_st5_tout_200},
+	{ST_L2_6, EV_L2_T200I, l2_st6_tout_200},
+	{ST_L2_7, EV_L2_T200I, l2_st7_tout_200},
+	{ST_L2_8, EV_L2_T200I, l2_st8_tout_200},
+	{ST_L2_7, EV_L2_T203I, l2_st7_tout_203},
 	{ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
 	{ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
 	{ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
@@ -1858,7 +1897,8 @@
 		ptei = *datap++;
 		if ((psapi & 1) || !(ptei & 1)) {
 			printk(KERN_WARNING
-			       "l2 D-channel frame wrong EA0/EA1\n");
+			       "%s l2 D-channel frame wrong EA0/EA1\n",
+			       mISDNDevName4ch(&l2->ch));
 			return ret;
 		}
 		psapi >>= 2;
@@ -1867,7 +1907,8 @@
 			/* not our business */
 			if (*debug & DEBUG_L2)
 				printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
-				       __func__, psapi, l2->sapi);
+				       mISDNDevName4ch(&l2->ch), psapi,
+				       l2->sapi);
 			dev_kfree_skb(skb);
 			return 0;
 		}
@@ -1875,7 +1916,7 @@
 			/* not our business */
 			if (*debug & DEBUG_L2)
 				printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
-				       __func__, ptei, l2->tei);
+				       mISDNDevName4ch(&l2->ch), ptei, l2->tei);
 			dev_kfree_skb(skb);
 			return 0;
 		}
@@ -1916,7 +1957,8 @@
 	} else
 		c = 'L';
 	if (c) {
-		printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
+		printk(KERN_WARNING "%s:l2 D-channel frame error %c\n",
+		       mISDNDevName4ch(&l2->ch), c);
 		mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
 	}
 	return ret;
@@ -1930,8 +1972,17 @@
 	int			ret = -EINVAL;
 
 	if (*debug & DEBUG_L2_RECV)
-		printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
-		       __func__, hh->prim, hh->id, l2->sapi, l2->tei);
+		printk(KERN_DEBUG "%s: %s prim(%x) id(%x) sapi(%d) tei(%d)\n",
+		       __func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id,
+		       l2->sapi, l2->tei);
+	if (hh->prim == DL_INTERN_MSG) {
+		struct mISDNhead *chh = hh + 1; /* saved copy */
+
+		*hh = *chh;
+		if (*debug & DEBUG_L2_RECV)
+			printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n",
+				mISDNDevName4ch(&l2->ch), hh->prim, hh->id);
+	}
 	switch (hh->prim) {
 	case PH_DATA_IND:
 		ret = ph_data_indication(l2, hh, skb);
@@ -1987,6 +2038,12 @@
 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
 				     skb);
 		break;
+	case DL_TIMER200_IND:
+		mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL);
+		break;
+	case DL_TIMER203_IND:
+		mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL);
+		break;
 	default:
 		if (*debug & DEBUG_L2)
 			l2m_debug(&l2->l2m, "l2 unknown pr %04x",
@@ -2005,7 +2062,8 @@
 	int		ret = -EINVAL;
 
 	if (*debug & DEBUG_L2_TEI)
-		printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
+		printk(KERN_DEBUG "%s: cmd(%x) in %s\n",
+		       mISDNDevName4ch(&l2->ch), cmd, __func__);
 	switch (cmd) {
 	case (MDL_ASSIGN_REQ):
 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
@@ -2018,7 +2076,8 @@
 		break;
 	case (MDL_ERROR_RSP):
 		/* ETS 300-125 5.3.2.1 Test: TC13010 */
-		printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
+		printk(KERN_NOTICE "%s: MDL_ERROR|REQ (tei_l2)\n",
+		       mISDNDevName4ch(&l2->ch));
 		ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
 		break;
 	}
@@ -2050,7 +2109,8 @@
 	u_int			info;
 
 	if (*debug & DEBUG_L2_CTRL)
-		printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
+		printk(KERN_DEBUG "%s: %s cmd(%x)\n",
+		       mISDNDevName4ch(ch), __func__, cmd);
 
 	switch (cmd) {
 	case OPEN_CHANNEL:
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index ba2bc0c..be88728 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -790,18 +790,23 @@
 static struct layer2 *
 create_new_tei(struct manager *mgr, int tei, int sapi)
 {
-	u_long		opt = 0;
-	u_long		flags;
-	int		id;
-	struct layer2	*l2;
+	unsigned long		opt = 0;
+	unsigned long		flags;
+	int			id;
+	struct layer2		*l2;
+	struct channel_req	rq;
 
 	if (!mgr->up)
 		return NULL;
 	if ((tei >= 0) && (tei < 64))
 		test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
-	if (mgr->ch.st->dev->Dprotocols
-	    & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
+	if (mgr->ch.st->dev->Dprotocols & ((1 << ISDN_P_TE_E1) |
+	    (1 << ISDN_P_NT_E1))) {
 		test_and_set_bit(OPTION_L2_PMX, &opt);
+		rq.protocol = ISDN_P_NT_E1;
+	} else {
+		rq.protocol = ISDN_P_NT_S0;
+	}
 	l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi);
 	if (!l2) {
 		printk(KERN_WARNING "%s:no memory for layer2\n", __func__);
@@ -836,6 +841,14 @@
 		l2->ch.recv = mgr->ch.recv;
 		l2->ch.peer = mgr->ch.peer;
 		l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
+		/* We need open here L1 for the manager as well (refcounting) */
+		rq.adr.dev = mgr->ch.st->dev->id;
+		id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq);
+		if (id < 0) {
+			printk(KERN_WARNING "%s: cannot open L1\n", __func__);
+			l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
+			l2 = NULL;
+		}
 	}
 	return l2;
 }
@@ -978,10 +991,11 @@
 static int
 create_teimgr(struct manager *mgr, struct channel_req *crq)
 {
-	struct layer2	*l2;
-	u_long		opt = 0;
-	u_long		flags;
-	int		id;
+	struct layer2		*l2;
+	unsigned long		opt = 0;
+	unsigned long		flags;
+	int			id;
+	struct channel_req	l1rq;
 
 	if (*debug & DEBUG_L2_TEI)
 		printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
@@ -1016,6 +1030,7 @@
 		if (crq->protocol == ISDN_P_LAPD_TE)
 			test_and_set_bit(MGR_OPT_USER, &mgr->options);
 	}
+	l1rq.adr = crq->adr;
 	if (mgr->ch.st->dev->Dprotocols
 	    & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
 		test_and_set_bit(OPTION_L2_PMX, &opt);
@@ -1023,6 +1038,8 @@
 		mgr->up = crq->ch;
 		id = DL_INFO_L2_CONNECT;
 		teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id);
+		if (test_bit(MGR_PH_ACTIVE, &mgr->options))
+			teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
 		crq->ch = NULL;
 		if (!list_empty(&mgr->layer2)) {
 			read_lock_irqsave(&mgr->lock, flags);
@@ -1053,24 +1070,34 @@
 		l2->tm->tei_m.fsm = &teifsmu;
 		l2->tm->tei_m.state = ST_TEI_NOP;
 		l2->tm->tval = 1000; /* T201  1 sec */
+		if (test_bit(OPTION_L2_PMX, &opt))
+			l1rq.protocol = ISDN_P_TE_E1;
+		else
+			l1rq.protocol = ISDN_P_TE_S0;
 	} else {
 		l2->tm->tei_m.fsm = &teifsmn;
 		l2->tm->tei_m.state = ST_TEI_NOP;
 		l2->tm->tval = 2000; /* T202  2 sec */
+		if (test_bit(OPTION_L2_PMX, &opt))
+			l1rq.protocol = ISDN_P_NT_E1;
+		else
+			l1rq.protocol = ISDN_P_NT_S0;
 	}
 	mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
 	write_lock_irqsave(&mgr->lock, flags);
 	id = get_free_id(mgr);
 	list_add_tail(&l2->list, &mgr->layer2);
 	write_unlock_irqrestore(&mgr->lock, flags);
-	if (id < 0) {
-		l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
-	} else {
+	if (id >= 0) {
 		l2->ch.nr = id;
 		l2->up->nr = id;
 		crq->ch = &l2->ch;
-		id = 0;
+		/* We need open here L1 for the manager as well (refcounting) */
+		id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL,
+					  &l1rq);
 	}
+	if (id < 0)
+		l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
 	return id;
 }
 
@@ -1096,12 +1123,16 @@
 		break;
 	case PH_ACTIVATE_IND:
 		test_and_set_bit(MGR_PH_ACTIVE, &mgr->options);
+		if (mgr->up)
+			teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
 		mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL);
 		do_send(mgr);
 		ret = 0;
 		break;
 	case PH_DEACTIVATE_IND:
 		test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options);
+		if (mgr->up)
+			teiup_create(mgr, PH_DEACTIVATE_IND, 0, NULL);
 		mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL);
 		ret = 0;
 		break;
@@ -1263,7 +1294,7 @@
 mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
 {
 	struct manager		*mgr = container_of(ch, struct manager, bcast);
-	struct mISDNhead	*hh = mISDN_HEAD_P(skb);
+	struct mISDNhead	*hhc, *hh = mISDN_HEAD_P(skb);
 	struct sk_buff		*cskb = NULL;
 	struct layer2		*l2;
 	u_long			flags;
@@ -1278,10 +1309,17 @@
 				skb = NULL;
 			} else {
 				if (!cskb)
-					cskb = skb_copy(skb, GFP_KERNEL);
+					cskb = skb_copy(skb, GFP_ATOMIC);
 			}
 			if (cskb) {
-				ret = l2->ch.send(&l2->ch, cskb);
+				hhc = mISDN_HEAD_P(cskb);
+				/* save original header behind normal header */
+				hhc++;
+				*hhc = *hh;
+				hhc--;
+				hhc->prim = DL_INTERN_MSG;
+				hhc->id = l2->ch.nr;
+				ret = ch->st->own.recv(&ch->st->own, cskb);
 				if (ret) {
 					if (*debug & DEBUG_SEND_ERR)
 						printk(KERN_DEBUG
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index d8433f2..73973fd 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -112,7 +112,7 @@
 	return err;
 }
 
-static void __devexit gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
+static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
 {
 	int i;
 
@@ -294,7 +294,7 @@
 
 static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
 
-static void __devexit delete_netxbig_led(struct netxbig_led_data *led_dat)
+static void delete_netxbig_led(struct netxbig_led_data *led_dat)
 {
 	if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
 		device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 2f0a144..01cf89e 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -255,7 +255,7 @@
 	return ret;
 }
 
-static void __devexit delete_ns2_led(struct ns2_led_data *led_dat)
+static void delete_ns2_led(struct ns2_led_data *led_dat)
 {
 	device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
 	led_classdev_unregister(&led_dat->cdev);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 97e73e5..17e2b47 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1727,8 +1727,7 @@
 	bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize)
 			      - BITMAP_BLOCK_SHIFT);
 
-	/* now that chunksize and chunkshift are set, we can use these macros */
-	chunks = (blocks + bitmap->chunkshift - 1) >>
+	chunks = (blocks + (1 << bitmap->chunkshift) - 1) >>
 			bitmap->chunkshift;
 	pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
 
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 55ca5ae..b44b0aba 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -101,9 +101,6 @@
 
 #define BITMAP_BLOCK_SHIFT 9
 
-/* how many blocks per chunk? (this is variable) */
-#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT)
-
 #endif
 
 /*
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 1f23e04..08d9a20 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -134,7 +134,7 @@
 {
 	struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
 
-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
+	if (!capable(CAP_SYS_ADMIN))
 		return;
 
 	spin_lock(&receiving_list_lock);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 922a338..754f38f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -718,8 +718,8 @@
 		return 0;
 
 	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
-	request_module("scsi_dh_%s", m->hw_handler_name);
-	if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
+	if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
+				     "scsi_dh_%s", m->hw_handler_name)) {
 		ti->error = "unknown hardware handler type";
 		ret = -EINVAL;
 		goto fail;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 213ae32..eb3d138 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -279,8 +279,10 @@
 
 	hlist_del(&cell->list);
 
-	bio_list_add(inmates, cell->holder);
-	bio_list_merge(inmates, &cell->bios);
+	if (inmates) {
+		bio_list_add(inmates, cell->holder);
+		bio_list_merge(inmates, &cell->bios);
+	}
 
 	mempool_free(cell, prison->cell_pool);
 }
@@ -303,9 +305,10 @@
  */
 static void __cell_release_singleton(struct cell *cell, struct bio *bio)
 {
-	hlist_del(&cell->list);
 	BUG_ON(cell->holder != bio);
 	BUG_ON(!bio_list_empty(&cell->bios));
+
+	__cell_release(cell, NULL);
 }
 
 static void cell_release_singleton(struct cell *cell, struct bio *bio)
@@ -1177,6 +1180,7 @@
 static void process_discard(struct thin_c *tc, struct bio *bio)
 {
 	int r;
+	unsigned long flags;
 	struct pool *pool = tc->pool;
 	struct cell *cell, *cell2;
 	struct cell_key key, key2;
@@ -1218,7 +1222,9 @@
 			m->bio = bio;
 
 			if (!ds_add_work(&pool->all_io_ds, &m->list)) {
+				spin_lock_irqsave(&pool->lock, flags);
 				list_add(&m->list, &pool->prepared_discards);
+				spin_unlock_irqrestore(&pool->lock, flags);
 				wake_worker(pool);
 			}
 		} else {
@@ -1626,6 +1632,21 @@
 	pool->low_water_blocks = pt->low_water_blocks;
 	pool->pf = pt->pf;
 
+	/*
+	 * If discard_passdown was enabled verify that the data device
+	 * supports discards.  Disable discard_passdown if not; otherwise
+	 * -EOPNOTSUPP will be returned.
+	 */
+	if (pt->pf.discard_passdown) {
+		struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
+		if (!q || !blk_queue_discard(q)) {
+			char buf[BDEVNAME_SIZE];
+			DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
+			       bdevname(pt->data_dev->bdev, buf));
+			pool->pf.discard_passdown = 0;
+		}
+	}
+
 	return 0;
 }
 
@@ -1982,19 +2003,6 @@
 		goto out_flags_changed;
 	}
 
-	/*
-	 * If discard_passdown was enabled verify that the data device
-	 * supports discards.  Disable discard_passdown if not; otherwise
-	 * -EOPNOTSUPP will be returned.
-	 */
-	if (pf.discard_passdown) {
-		struct request_queue *q = bdev_get_queue(data_dev->bdev);
-		if (!q || !blk_queue_discard(q)) {
-			DMWARN("Discard unsupported by data device: Disabling discard passdown.");
-			pf.discard_passdown = 0;
-		}
-	}
-
 	pt->pool = pool;
 	pt->ti = ti;
 	pt->metadata_dev = metadata_dev;
@@ -2379,7 +2387,7 @@
 		       (unsigned long long)pt->low_water_blocks);
 
 		count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
-			!pool->pf.discard_passdown;
+			!pt->pf.discard_passdown;
 		DMEMIT("%u ", count);
 
 		if (!pool->pf.zero_new_blocks)
@@ -2388,7 +2396,7 @@
 		if (!pool->pf.discard_enabled)
 			DMEMIT("ignore_discard ");
 
-		if (!pool->pf.discard_passdown)
+		if (!pt->pf.discard_passdown)
 			DMEMIT("no_discard_passdown ");
 
 		break;
@@ -2626,8 +2634,10 @@
 	if (h->all_io_entry) {
 		INIT_LIST_HEAD(&work);
 		ds_dec(h->all_io_entry, &work);
+		spin_lock_irqsave(&pool->lock, flags);
 		list_for_each_entry_safe(m, tmp, &work, list)
 			list_add(&m->list, &pool->prepared_discards);
+		spin_unlock_irqrestore(&pool->lock, flags);
 	}
 
 	mempool_free(h, pool->endio_hook_pool);
@@ -2759,6 +2769,6 @@
 module_init(dm_thin_init);
 module_exit(dm_thin_exit);
 
-MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target");
+MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 477eb2e..01233d8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -391,6 +391,8 @@
 	synchronize_rcu();
 	wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
 	mddev->pers->quiesce(mddev, 1);
+
+	del_timer_sync(&mddev->safemode_timer);
 }
 EXPORT_SYMBOL_GPL(mddev_suspend);
 
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c8dbb84..3f91c2e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3164,12 +3164,40 @@
 	return size << conf->chunk_shift;
 }
 
+static void calc_sectors(struct r10conf *conf, sector_t size)
+{
+	/* Calculate the number of sectors-per-device that will
+	 * actually be used, and set conf->dev_sectors and
+	 * conf->stride
+	 */
+
+	size = size >> conf->chunk_shift;
+	sector_div(size, conf->far_copies);
+	size = size * conf->raid_disks;
+	sector_div(size, conf->near_copies);
+	/* 'size' is now the number of chunks in the array */
+	/* calculate "used chunks per device" */
+	size = size * conf->copies;
+
+	/* We need to round up when dividing by raid_disks to
+	 * get the stride size.
+	 */
+	size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
+
+	conf->dev_sectors = size << conf->chunk_shift;
+
+	if (conf->far_offset)
+		conf->stride = 1 << conf->chunk_shift;
+	else {
+		sector_div(size, conf->far_copies);
+		conf->stride = size << conf->chunk_shift;
+	}
+}
 
 static struct r10conf *setup_conf(struct mddev *mddev)
 {
 	struct r10conf *conf = NULL;
 	int nc, fc, fo;
-	sector_t stride, size;
 	int err = -EINVAL;
 
 	if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
@@ -3219,28 +3247,7 @@
 	if (!conf->r10bio_pool)
 		goto out;
 
-	size = mddev->dev_sectors >> conf->chunk_shift;
-	sector_div(size, fc);
-	size = size * conf->raid_disks;
-	sector_div(size, nc);
-	/* 'size' is now the number of chunks in the array */
-	/* calculate "used chunks per device" in 'stride' */
-	stride = size * conf->copies;
-
-	/* We need to round up when dividing by raid_disks to
-	 * get the stride size.
-	 */
-	stride += conf->raid_disks - 1;
-	sector_div(stride, conf->raid_disks);
-
-	conf->dev_sectors = stride << conf->chunk_shift;
-
-	if (fo)
-		stride = 1;
-	else
-		sector_div(stride, fc);
-	conf->stride = stride << conf->chunk_shift;
-
+	calc_sectors(conf, mddev->dev_sectors);
 
 	spin_lock_init(&conf->device_lock);
 	INIT_LIST_HEAD(&conf->retry_list);
@@ -3468,7 +3475,8 @@
 		mddev->recovery_cp = oldsize;
 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 	}
-	mddev->dev_sectors = sectors;
+	calc_sectors(conf, sectors);
+	mddev->dev_sectors = conf->dev_sectors;
 	mddev->resync_max_sectors = size;
 	return 0;
 }
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 0f64d71..cb888d8 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -1921,6 +1921,10 @@
 	} else {
 		/* default values */
 		switch (c->delivery_system) {
+		case SYS_DVBS:
+		case SYS_DVBS2:
+		case SYS_ISDBS:
+		case SYS_TURBO:
 		case SYS_DVBC_ANNEX_A:
 		case SYS_DVBC_ANNEX_C:
 			fepriv->min_delay = HZ / 20;
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 860c112..bef5296 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1018,22 +1018,6 @@
 
 	spin_lock_init(&dev->hw_lock);
 
-	/* claim the resources */
-	error = -EBUSY;
-	dev->hw_io = pnp_port_start(pnp_dev, 0);
-	if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
-		dev->hw_io = -1;
-		dev->irq = -1;
-		goto error;
-	}
-
-	dev->irq = pnp_irq(pnp_dev, 0);
-	if (request_irq(dev->irq, ene_isr,
-			IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
-		dev->irq = -1;
-		goto error;
-	}
-
 	pnp_set_drvdata(pnp_dev, dev);
 	dev->pnp_dev = pnp_dev;
 
@@ -1086,6 +1070,22 @@
 	device_set_wakeup_capable(&pnp_dev->dev, true);
 	device_set_wakeup_enable(&pnp_dev->dev, true);
 
+	/* claim the resources */
+	error = -EBUSY;
+	dev->hw_io = pnp_port_start(pnp_dev, 0);
+	if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
+		dev->hw_io = -1;
+		dev->irq = -1;
+		goto error;
+	}
+
+	dev->irq = pnp_irq(pnp_dev, 0);
+	if (request_irq(dev->irq, ene_isr,
+			IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
+		dev->irq = -1;
+		goto error;
+	}
+
 	error = rc_register_device(rdev);
 	if (error < 0)
 		goto error;
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 392d4be..4a3a238 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -197,7 +197,7 @@
 	/*
 	 * Newer reviews of this chipset uses port 8 instead of 5
 	 */
-	if ((chip != 0x0408) || (chip != 0x0804))
+	if ((chip != 0x0408) && (chip != 0x0804))
 		fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV2;
 	else
 		fintek->logical_dev_cir = LOGICAL_DEV_CIR_REV1;
@@ -514,16 +514,6 @@
 
 	spin_lock_init(&fintek->fintek_lock);
 
-	ret = -EBUSY;
-	/* now claim resources */
-	if (!request_region(fintek->cir_addr,
-			    fintek->cir_port_len, FINTEK_DRIVER_NAME))
-		goto failure;
-
-	if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
-			FINTEK_DRIVER_NAME, (void *)fintek))
-		goto failure;
-
 	pnp_set_drvdata(pdev, fintek);
 	fintek->pdev = pdev;
 
@@ -558,6 +548,16 @@
 	/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
 	rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
 
+	ret = -EBUSY;
+	/* now claim resources */
+	if (!request_region(fintek->cir_addr,
+			    fintek->cir_port_len, FINTEK_DRIVER_NAME))
+		goto failure;
+
+	if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
+			FINTEK_DRIVER_NAME, (void *)fintek))
+		goto failure;
+
 	ret = rc_register_device(rdev);
 	if (ret)
 		goto failure;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 682009d..0e49c99 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1515,16 +1515,6 @@
 	/* initialize raw event */
 	init_ir_raw_event(&itdev->rawir);
 
-	ret = -EBUSY;
-	/* now claim resources */
-	if (!request_region(itdev->cir_addr,
-				dev_desc->io_region_size, ITE_DRIVER_NAME))
-		goto failure;
-
-	if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
-			ITE_DRIVER_NAME, (void *)itdev))
-		goto failure;
-
 	/* set driver data into the pnp device */
 	pnp_set_drvdata(pdev, itdev);
 	itdev->pdev = pdev;
@@ -1600,6 +1590,16 @@
 	rdev->driver_name = ITE_DRIVER_NAME;
 	rdev->map_name = RC_MAP_RC6_MCE;
 
+	ret = -EBUSY;
+	/* now claim resources */
+	if (!request_region(itdev->cir_addr,
+				dev_desc->io_region_size, ITE_DRIVER_NAME))
+		goto failure;
+
+	if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
+			ITE_DRIVER_NAME, (void *)itdev))
+		goto failure;
+
 	ret = rc_register_device(rdev);
 	if (ret)
 		goto failure;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 144f3f5..8b2c071 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -1021,24 +1021,6 @@
 	spin_lock_init(&nvt->nvt_lock);
 	spin_lock_init(&nvt->tx.lock);
 
-	ret = -EBUSY;
-	/* now claim resources */
-	if (!request_region(nvt->cir_addr,
-			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
-		goto failure;
-
-	if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
-			NVT_DRIVER_NAME, (void *)nvt))
-		goto failure;
-
-	if (!request_region(nvt->cir_wake_addr,
-			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
-		goto failure;
-
-	if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
-			NVT_DRIVER_NAME, (void *)nvt))
-		goto failure;
-
 	pnp_set_drvdata(pdev, nvt);
 	nvt->pdev = pdev;
 
@@ -1085,6 +1067,24 @@
 	rdev->tx_resolution = XYZ;
 #endif
 
+	ret = -EBUSY;
+	/* now claim resources */
+	if (!request_region(nvt->cir_addr,
+			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+		goto failure;
+
+	if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
+			NVT_DRIVER_NAME, (void *)nvt))
+		goto failure;
+
+	if (!request_region(nvt->cir_wake_addr,
+			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+		goto failure;
+
+	if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
+			NVT_DRIVER_NAME, (void *)nvt))
+		goto failure;
+
 	ret = rc_register_device(rdev);
 	if (ret)
 		goto failure;
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index af52658..342c2c8 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -991,39 +991,10 @@
 		"(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
 		data->wbase, data->ebase, data->sbase, data->irq);
 
-	if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
-		dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
-			data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
-		err = -EBUSY;
-		goto exit_free_data;
-	}
-
-	if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
-		dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
-			data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
-		err = -EBUSY;
-		goto exit_release_wbase;
-	}
-
-	if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
-		dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
-			data->sbase, data->sbase + SP_IOMEM_LEN - 1);
-		err = -EBUSY;
-		goto exit_release_ebase;
-	}
-
-	err = request_irq(data->irq, wbcir_irq_handler,
-			  IRQF_DISABLED, DRVNAME, device);
-	if (err) {
-		dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
-		err = -EBUSY;
-		goto exit_release_sbase;
-	}
-
 	led_trigger_register_simple("cir-tx", &data->txtrigger);
 	if (!data->txtrigger) {
 		err = -ENOMEM;
-		goto exit_free_irq;
+		goto exit_free_data;
 	}
 
 	led_trigger_register_simple("cir-rx", &data->rxtrigger);
@@ -1062,9 +1033,38 @@
 	data->dev->priv = data;
 	data->dev->dev.parent = &device->dev;
 
+	if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
+		dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+			data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
+		err = -EBUSY;
+		goto exit_free_rc;
+	}
+
+	if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
+		dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+			data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
+		err = -EBUSY;
+		goto exit_release_wbase;
+	}
+
+	if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
+		dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+			data->sbase, data->sbase + SP_IOMEM_LEN - 1);
+		err = -EBUSY;
+		goto exit_release_ebase;
+	}
+
+	err = request_irq(data->irq, wbcir_irq_handler,
+			  IRQF_DISABLED, DRVNAME, device);
+	if (err) {
+		dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
+		err = -EBUSY;
+		goto exit_release_sbase;
+	}
+
 	err = rc_register_device(data->dev);
 	if (err)
-		goto exit_free_rc;
+		goto exit_free_irq;
 
 	device_init_wakeup(&device->dev, 1);
 
@@ -1072,14 +1072,6 @@
 
 	return 0;
 
-exit_free_rc:
-	rc_free_device(data->dev);
-exit_unregister_led:
-	led_classdev_unregister(&data->led);
-exit_unregister_rxtrigger:
-	led_trigger_unregister_simple(data->rxtrigger);
-exit_unregister_txtrigger:
-	led_trigger_unregister_simple(data->txtrigger);
 exit_free_irq:
 	free_irq(data->irq, device);
 exit_release_sbase:
@@ -1088,6 +1080,14 @@
 	release_region(data->ebase, EHFUNC_IOMEM_LEN);
 exit_release_wbase:
 	release_region(data->wbase, WAKEUP_IOMEM_LEN);
+exit_free_rc:
+	rc_free_device(data->dev);
+exit_unregister_led:
+	led_classdev_unregister(&data->led);
+exit_unregister_rxtrigger:
+	led_trigger_unregister_simple(data->rxtrigger);
+exit_unregister_txtrigger:
+	led_trigger_unregister_simple(data->txtrigger);
 exit_free_data:
 	kfree(data);
 	pnp_set_drvdata(device, NULL);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index db8e508..863c755 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -2923,6 +2923,10 @@
 	 * not the JPEG end of frame ('ff d9').
 	 */
 
+	/* count the packets and their size */
+	sd->npkt++;
+	sd->pktsz += len;
+
 /*fixme: assumption about the following code:
  *	- there can be only one marker in a packet
  */
@@ -2945,10 +2949,6 @@
 		data += i;
 	}
 
-	/* count the packets and their size */
-	sd->npkt++;
-	sd->pktsz += len;
-
 	/* search backwards if there is a marker in the packet */
 	for (i = len - 1; --i >= 0; ) {
 		if (data[i] != 0xff) {
diff --git a/drivers/media/video/marvell-ccic/mmp-driver.c b/drivers/media/video/marvell-ccic/mmp-driver.c
index d235523..c4c17fe 100644
--- a/drivers/media/video/marvell-ccic/mmp-driver.c
+++ b/drivers/media/video/marvell-ccic/mmp-driver.c
@@ -181,7 +181,6 @@
 	INIT_LIST_HEAD(&cam->devlist);
 
 	mcam = &cam->mcam;
-	mcam->platform = MHP_Armada610;
 	mcam->plat_power_up = mmpcam_power_up;
 	mcam->plat_power_down = mmpcam_power_down;
 	mcam->dev = &pdev->dev;
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index b06efd2..7e9b2c6 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -246,28 +246,37 @@
 
 }
 
-static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
-{
-	if (!fr || plane >= fr->fmt->memplanes)
-		return 0;
-	return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
-}
-
-static int queue_setup(struct vb2_queue *vq,  const struct v4l2_format *pfmt,
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
 		       unsigned int *num_buffers, unsigned int *num_planes,
 		       unsigned int sizes[], void *allocators[])
 {
+	const struct v4l2_pix_format_mplane *pixm = NULL;
 	struct fimc_ctx *ctx = vq->drv_priv;
-	struct fimc_fmt *fmt = ctx->d_frame.fmt;
+	struct fimc_frame *frame = &ctx->d_frame;
+	struct fimc_fmt *fmt = frame->fmt;
+	unsigned long wh;
 	int i;
 
-	if (!fmt)
+	if (pfmt) {
+		pixm = &pfmt->fmt.pix_mp;
+		fmt = fimc_find_format(&pixm->pixelformat, NULL,
+				       FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1);
+		wh = pixm->width * pixm->height;
+	} else {
+		wh = frame->f_width * frame->f_height;
+	}
+
+	if (fmt == NULL)
 		return -EINVAL;
 
 	*num_planes = fmt->memplanes;
 
 	for (i = 0; i < fmt->memplanes; i++) {
-		sizes[i] = get_plane_size(&ctx->d_frame, i);
+		unsigned int size = (wh * fmt->depth[i]) / 8;
+		if (pixm)
+			sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+		else
+			sizes[i] = size;
 		allocators[i] = ctx->fimc_dev->alloc_ctx;
 	}
 
@@ -1383,7 +1392,7 @@
 	fimc_capture_try_crop(ctx, r, crop->pad);
 
 	if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
-		mutex_lock(&fimc->lock);
+		mutex_unlock(&fimc->lock);
 		*v4l2_subdev_get_try_crop(fh, crop->pad) = *r;
 		return 0;
 	}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index e184e65..e09ba7b 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1048,14 +1048,14 @@
  * @mask: the color flags to match
  * @index: offset in the fimc_formats array, ignored if negative
  */
-struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code,
+struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
 				  unsigned int mask, int index)
 {
 	struct fimc_fmt *fmt, *def_fmt = NULL;
 	unsigned int i;
 	int id = 0;
 
-	if (index >= ARRAY_SIZE(fimc_formats))
+	if (index >= (int)ARRAY_SIZE(fimc_formats))
 		return NULL;
 
 	for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index a18291e..84fd835 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -718,7 +718,7 @@
 int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f);
 void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
 			       struct v4l2_pix_format_mplane *pix);
-struct fimc_fmt *fimc_find_format(u32 *pixelformat, u32 *mbus_code,
+struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
 				  unsigned int mask, int index);
 
 int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index eb25756..aedb970 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -530,7 +530,10 @@
 		if (icl->reset)
 			icl->reset(icd->pdev);
 
+		/* Don't mess with the host during probe */
+		mutex_lock(&ici->host_lock);
 		ret = ici->ops->add(icd);
+		mutex_unlock(&ici->host_lock);
 		if (ret < 0) {
 			dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
 			goto eiciadd;
@@ -956,7 +959,7 @@
 {
 	struct soc_camera_device *icd;
 
-	mutex_lock(&list_lock);
+	mutex_lock(&ici->host_lock);
 
 	list_for_each_entry(icd, &devices, list) {
 		if (icd->iface == ici->nr) {
@@ -967,7 +970,7 @@
 		}
 	}
 
-	mutex_unlock(&list_lock);
+	mutex_unlock(&ici->host_lock);
 }
 
 #ifdef CONFIG_I2C_BOARDINFO
@@ -1313,6 +1316,7 @@
 	list_add_tail(&ici->list, &hosts);
 	mutex_unlock(&list_lock);
 
+	mutex_init(&ici->host_lock);
 	scan_add_host(ici);
 
 	return 0;
diff --git a/drivers/media/video/videobuf2-dma-contig.c b/drivers/media/video/videobuf2-dma-contig.c
index f17ad98..4b71326 100644
--- a/drivers/media/video/videobuf2-dma-contig.c
+++ b/drivers/media/video/videobuf2-dma-contig.c
@@ -15,6 +15,7 @@
 #include <linux/dma-mapping.h>
 
 #include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
 #include <media/videobuf2-memops.h>
 
 struct vb2_dc_conf {
@@ -85,7 +86,7 @@
 {
 	struct vb2_dc_buf *buf = buf_priv;
 	if (!buf)
-		return 0;
+		return NULL;
 
 	return buf->vaddr;
 }
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
index c41cb60..504cd4c 100644
--- a/drivers/media/video/videobuf2-memops.c
+++ b/drivers/media/video/videobuf2-memops.c
@@ -55,6 +55,7 @@
 
 	return vma_copy;
 }
+EXPORT_SYMBOL_GPL(vb2_get_vma);
 
 /**
  * vb2_put_userptr() - release a userspace virtual memory area
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index c171afa..69e9d54 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -69,7 +69,6 @@
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
-// #include <linux/trdevice.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 11e44386..b5a0032 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -162,6 +162,7 @@
 	bool "TPS6586x Power Management chips"
 	depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
 	select MFD_CORE
+	depends on REGULATOR
 	help
 	  If you say yes here you get support for the TPS6586X series of
 	  Power Management chips.
@@ -376,6 +377,7 @@
 
 config MFD_DA9052_SPI
 	bool "Support Dialog Semiconductor DA9052/53 PMIC variants with SPI"
+	select IRQ_DOMAIN
 	select REGMAP_SPI
 	select REGMAP_IRQ
 	select PMIC_DA9052
@@ -388,6 +390,7 @@
 
 config MFD_DA9052_I2C
 	bool "Support Dialog Semiconductor DA9052/53 PMIC variants with I2C"
+	select IRQ_DOMAIN
 	select REGMAP_I2C
 	select REGMAP_IRQ
 	select PMIC_DA9052
@@ -558,6 +561,7 @@
 	bool "Support Wolfson Microelectronics WM8994"
 	select MFD_CORE
 	select REGMAP_I2C
+	select IRQ_DOMAIN
 	select REGMAP_IRQ
 	depends on I2C=y && GENERIC_HARDIRQS
 	help
@@ -888,6 +892,16 @@
 	  MFD controller. This controller embeds regulator and
 	  thermal devices for Freescale i.MX platforms.
 
+config MFD_PALMAS
+	bool "Support for the TI Palmas series chips"
+	select MFD_CORE
+	select REGMAP_I2C
+	select REGMAP_IRQ
+	depends on I2C=y
+	help
+	  If you say yes here you get support for the Palmas
+	  series of PMIC chips from Texas Instruments.
+
 endmenu
 endif
 
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 05fa538..77293e1 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -113,6 +113,8 @@
 obj-$(CONFIG_MFD_TPS65090)	+= tps65090.o
 obj-$(CONFIG_MFD_AAT2870_CORE)	+= aat2870-core.o
 obj-$(CONFIG_MFD_INTEL_MSIC)	+= intel_msic.o
+obj-$(CONFIG_MFD_PALMAS)	+= palmas.o
 obj-$(CONFIG_MFD_RC5T583)	+= rc5t583.o rc5t583-irq.o
 obj-$(CONFIG_MFD_S5M_CORE)	+= s5m-core.o s5m-irq.o
 obj-$(CONFIG_MFD_ANATOP)	+= anatop-mfd.o
+obj-$(CONFIG_MFD_LM3533)	+= lm3533-core.o lm3533-ctrlbank.o
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 7ff313f..7776aff 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -659,12 +659,11 @@
 	ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
 				  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
 				  da9052->irq_base, &da9052_regmap_irq_chip,
-				  NULL);
+				  &da9052->irq_data);
 	if (ret < 0)
 		goto regmap_err;
 
-	desc = irq_to_desc(da9052->chip_irq);
-	da9052->irq_base = regmap_irq_chip_get_base(desc->action->dev_id);
+	da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
 
 	ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
 			      ARRAY_SIZE(da9052_subdev_info), NULL, 0);
@@ -681,8 +680,7 @@
 
 void da9052_device_exit(struct da9052 *da9052)
 {
-	regmap_del_irq_chip(da9052->chip_irq,
-			    irq_get_irq_data(da9052->irq_base)->chip_data);
+	regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
 	mfd_remove_devices(da9052->dev);
 }
 
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index c8aae66..7e96bb2 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,6 +25,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
+#include <plat/cpu.h>
 #include <plat/usb.h>
 #include <linux/pm_runtime.h>
 
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
new file mode 100644
index 0000000..00c0aba
--- /dev/null
+++ b/drivers/mfd/palmas.c
@@ -0,0 +1,509 @@
+/*
+ * TI Palmas MFD Driver
+ *
+ * Copyright 2011-2012 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/palmas.h>
+
+static const struct resource gpadc_resource[] = {
+	{
+		.name = "EOC_SW",
+		.start = PALMAS_GPADC_EOC_SW_IRQ,
+		.end = PALMAS_GPADC_EOC_SW_IRQ,
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+static const struct resource usb_resource[] = {
+	{
+		.name = "ID",
+		.start = PALMAS_ID_OTG_IRQ,
+		.end = PALMAS_ID_OTG_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "ID_WAKEUP",
+		.start = PALMAS_ID_IRQ,
+		.end = PALMAS_ID_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS",
+		.start = PALMAS_VBUS_OTG_IRQ,
+		.end = PALMAS_VBUS_OTG_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_WAKEUP",
+		.start = PALMAS_VBUS_IRQ,
+		.end = PALMAS_VBUS_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static const struct resource rtc_resource[] = {
+	{
+		.name = "RTC_ALARM",
+		.start = PALMAS_RTC_ALARM_IRQ,
+		.end = PALMAS_RTC_ALARM_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static const struct resource pwron_resource[] = {
+	{
+		.name = "PWRON_BUTTON",
+		.start = PALMAS_PWRON_IRQ,
+		.end = PALMAS_PWRON_IRQ,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+enum palmas_ids {
+	PALMAS_PMIC_ID,
+	PALMAS_GPIO_ID,
+	PALMAS_LEDS_ID,
+	PALMAS_WDT_ID,
+	PALMAS_RTC_ID,
+	PALMAS_PWRBUTTON_ID,
+	PALMAS_GPADC_ID,
+	PALMAS_RESOURCE_ID,
+	PALMAS_CLK_ID,
+	PALMAS_PWM_ID,
+	PALMAS_USB_ID,
+};
+
+static const struct mfd_cell palmas_children[] = {
+	{
+		.name = "palmas-pmic",
+		.id = PALMAS_PMIC_ID,
+	},
+	{
+		.name = "palmas-gpio",
+		.id = PALMAS_GPIO_ID,
+	},
+	{
+		.name = "palmas-leds",
+		.id = PALMAS_LEDS_ID,
+	},
+	{
+		.name = "palmas-wdt",
+		.id = PALMAS_WDT_ID,
+	},
+	{
+		.name = "palmas-rtc",
+		.num_resources = ARRAY_SIZE(rtc_resource),
+		.resources = rtc_resource,
+		.id = PALMAS_RTC_ID,
+	},
+	{
+		.name = "palmas-pwrbutton",
+		.num_resources = ARRAY_SIZE(pwron_resource),
+		.resources = pwron_resource,
+		.id = PALMAS_PWRBUTTON_ID,
+	},
+	{
+		.name = "palmas-gpadc",
+		.num_resources = ARRAY_SIZE(gpadc_resource),
+		.resources = gpadc_resource,
+		.id = PALMAS_GPADC_ID,
+	},
+	{
+		.name = "palmas-resource",
+		.id = PALMAS_RESOURCE_ID,
+	},
+	{
+		.name = "palmas-clk",
+		.id = PALMAS_CLK_ID,
+	},
+	{
+		.name = "palmas-pwm",
+		.id = PALMAS_PWM_ID,
+	},
+	{
+		.name = "palmas-usb",
+		.num_resources = ARRAY_SIZE(usb_resource),
+		.resources = usb_resource,
+		.id = PALMAS_USB_ID,
+	}
+};
+
+static const struct regmap_config palmas_regmap_config[PALMAS_NUM_CLIENTS] = {
+	{
+		.reg_bits = 8,
+		.val_bits = 8,
+		.max_register = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
+					PALMAS_PRIMARY_SECONDARY_PAD3),
+	},
+	{
+		.reg_bits = 8,
+		.val_bits = 8,
+		.max_register = PALMAS_BASE_TO_REG(PALMAS_GPADC_BASE,
+					PALMAS_GPADC_SMPS_VSEL_MONITORING),
+	},
+	{
+		.reg_bits = 8,
+		.val_bits = 8,
+		.max_register = PALMAS_BASE_TO_REG(PALMAS_TRIM_GPADC_BASE,
+					PALMAS_GPADC_TRIM16),
+	},
+};
+
+static const struct regmap_irq palmas_irqs[] = {
+	/* INT1 IRQs */
+	[PALMAS_CHARG_DET_N_VBUS_OVV_IRQ] = {
+		.mask = PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV,
+	},
+	[PALMAS_PWRON_IRQ] = {
+		.mask = PALMAS_INT1_STATUS_PWRON,
+	},
+	[PALMAS_LONG_PRESS_KEY_IRQ] = {
+		.mask = PALMAS_INT1_STATUS_LONG_PRESS_KEY,
+	},
+	[PALMAS_RPWRON_IRQ] = {
+		.mask = PALMAS_INT1_STATUS_RPWRON,
+	},
+	[PALMAS_PWRDOWN_IRQ] = {
+		.mask = PALMAS_INT1_STATUS_PWRDOWN,
+	},
+	[PALMAS_HOTDIE_IRQ] = {
+		.mask = PALMAS_INT1_STATUS_HOTDIE,
+	},
+	[PALMAS_VSYS_MON_IRQ] = {
+		.mask = PALMAS_INT1_STATUS_VSYS_MON,
+	},
+	[PALMAS_VBAT_MON_IRQ] = {
+		.mask = PALMAS_INT1_STATUS_VBAT_MON,
+	},
+	/* INT2 IRQs*/
+	[PALMAS_RTC_ALARM_IRQ] = {
+		.mask = PALMAS_INT2_STATUS_RTC_ALARM,
+		.reg_offset = 1,
+	},
+	[PALMAS_RTC_TIMER_IRQ] = {
+		.mask = PALMAS_INT2_STATUS_RTC_TIMER,
+		.reg_offset = 1,
+	},
+	[PALMAS_WDT_IRQ] = {
+		.mask = PALMAS_INT2_STATUS_WDT,
+		.reg_offset = 1,
+	},
+	[PALMAS_BATREMOVAL_IRQ] = {
+		.mask = PALMAS_INT2_STATUS_BATREMOVAL,
+		.reg_offset = 1,
+	},
+	[PALMAS_RESET_IN_IRQ] = {
+		.mask = PALMAS_INT2_STATUS_RESET_IN,
+		.reg_offset = 1,
+	},
+	[PALMAS_FBI_BB_IRQ] = {
+		.mask = PALMAS_INT2_STATUS_FBI_BB,
+		.reg_offset = 1,
+	},
+	[PALMAS_SHORT_IRQ] = {
+		.mask = PALMAS_INT2_STATUS_SHORT,
+		.reg_offset = 1,
+	},
+	[PALMAS_VAC_ACOK_IRQ] = {
+		.mask = PALMAS_INT2_STATUS_VAC_ACOK,
+		.reg_offset = 1,
+	},
+	/* INT3 IRQs */
+	[PALMAS_GPADC_AUTO_0_IRQ] = {
+		.mask = PALMAS_INT3_STATUS_GPADC_AUTO_0,
+		.reg_offset = 2,
+	},
+	[PALMAS_GPADC_AUTO_1_IRQ] = {
+		.mask = PALMAS_INT3_STATUS_GPADC_AUTO_1,
+		.reg_offset = 2,
+	},
+	[PALMAS_GPADC_EOC_SW_IRQ] = {
+		.mask = PALMAS_INT3_STATUS_GPADC_EOC_SW,
+		.reg_offset = 2,
+	},
+	[PALMAS_GPADC_EOC_RT_IRQ] = {
+		.mask = PALMAS_INT3_STATUS_GPADC_EOC_RT,
+		.reg_offset = 2,
+	},
+	[PALMAS_ID_OTG_IRQ] = {
+		.mask = PALMAS_INT3_STATUS_ID_OTG,
+		.reg_offset = 2,
+	},
+	[PALMAS_ID_IRQ] = {
+		.mask = PALMAS_INT3_STATUS_ID,
+		.reg_offset = 2,
+	},
+	[PALMAS_VBUS_OTG_IRQ] = {
+		.mask = PALMAS_INT3_STATUS_VBUS_OTG,
+		.reg_offset = 2,
+	},
+	[PALMAS_VBUS_IRQ] = {
+		.mask = PALMAS_INT3_STATUS_VBUS,
+		.reg_offset = 2,
+	},
+	/* INT4 IRQs */
+	[PALMAS_GPIO_0_IRQ] = {
+		.mask = PALMAS_INT4_STATUS_GPIO_0,
+		.reg_offset = 3,
+	},
+	[PALMAS_GPIO_1_IRQ] = {
+		.mask = PALMAS_INT4_STATUS_GPIO_1,
+		.reg_offset = 3,
+	},
+	[PALMAS_GPIO_2_IRQ] = {
+		.mask = PALMAS_INT4_STATUS_GPIO_2,
+		.reg_offset = 3,
+	},
+	[PALMAS_GPIO_3_IRQ] = {
+		.mask = PALMAS_INT4_STATUS_GPIO_3,
+		.reg_offset = 3,
+	},
+	[PALMAS_GPIO_4_IRQ] = {
+		.mask = PALMAS_INT4_STATUS_GPIO_4,
+		.reg_offset = 3,
+	},
+	[PALMAS_GPIO_5_IRQ] = {
+		.mask = PALMAS_INT4_STATUS_GPIO_5,
+		.reg_offset = 3,
+	},
+	[PALMAS_GPIO_6_IRQ] = {
+		.mask = PALMAS_INT4_STATUS_GPIO_6,
+		.reg_offset = 3,
+	},
+	[PALMAS_GPIO_7_IRQ] = {
+		.mask = PALMAS_INT4_STATUS_GPIO_7,
+		.reg_offset = 3,
+	},
+};
+
+static struct regmap_irq_chip palmas_irq_chip = {
+	.name = "palmas",
+	.irqs = palmas_irqs,
+	.num_irqs = ARRAY_SIZE(palmas_irqs),
+
+	.num_regs = 4,
+	.irq_reg_stride = 5,
+	.status_base = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE,
+			PALMAS_INT1_STATUS),
+	.mask_base = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE,
+			PALMAS_INT1_MASK),
+};
+
+static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
+			    const struct i2c_device_id *id)
+{
+	struct palmas *palmas;
+	struct palmas_platform_data *pdata;
+	int ret = 0, i;
+	unsigned int reg, addr;
+	int slave;
+	struct mfd_cell *children;
+
+	pdata = dev_get_platdata(&i2c->dev);
+	if (!pdata)
+		return -EINVAL;
+
+	palmas = devm_kzalloc(&i2c->dev, sizeof(struct palmas), GFP_KERNEL);
+	if (palmas == NULL)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, palmas);
+	palmas->dev = &i2c->dev;
+	palmas->id = id->driver_data;
+	palmas->irq = i2c->irq;
+
+	for (i = 0; i < PALMAS_NUM_CLIENTS; i++) {
+		if (i == 0)
+			palmas->i2c_clients[i] = i2c;
+		else {
+			palmas->i2c_clients[i] =
+					i2c_new_dummy(i2c->adapter,
+							i2c->addr + i);
+			if (!palmas->i2c_clients[i]) {
+				dev_err(palmas->dev,
+					"can't attach client %d\n", i);
+				ret = -ENOMEM;
+				goto err;
+			}
+		}
+		palmas->regmap[i] = devm_regmap_init_i2c(palmas->i2c_clients[i],
+				&palmas_regmap_config[i]);
+		if (IS_ERR(palmas->regmap[i])) {
+			ret = PTR_ERR(palmas->regmap[i]);
+			dev_err(palmas->dev,
+				"Failed to allocate regmap %d, err: %d\n",
+				i, ret);
+			goto err;
+		}
+	}
+
+	ret = regmap_add_irq_chip(palmas->regmap[1], palmas->irq,
+			IRQF_ONESHOT | IRQF_TRIGGER_LOW, -1, &palmas_irq_chip,
+			&palmas->irq_data);
+	if (ret < 0)
+		goto err;
+
+	slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+	addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
+			PALMAS_PRIMARY_SECONDARY_PAD1);
+
+	if (pdata->mux_from_pdata) {
+		reg = pdata->pad1;
+		ret = regmap_write(palmas->regmap[slave], addr, reg);
+		if (ret)
+			goto err;
+	} else {
+		ret = regmap_read(palmas->regmap[slave], addr, &reg);
+		if (ret)
+			goto err;
+	}
+
+	if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0))
+		palmas->gpio_muxed |= PALMAS_GPIO_0_MUXED;
+	if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK))
+		palmas->gpio_muxed |= PALMAS_GPIO_1_MUXED;
+	else if ((reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK) ==
+			(2 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT))
+		palmas->led_muxed |= PALMAS_LED1_MUXED;
+	else if ((reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK) ==
+			(3 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT))
+		palmas->pwm_muxed |= PALMAS_PWM1_MUXED;
+	if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK))
+		palmas->gpio_muxed |= PALMAS_GPIO_2_MUXED;
+	else if ((reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK) ==
+			(2 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT))
+		palmas->led_muxed |= PALMAS_LED2_MUXED;
+	else if ((reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK) ==
+			(3 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT))
+		palmas->pwm_muxed |= PALMAS_PWM2_MUXED;
+	if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3))
+		palmas->gpio_muxed |= PALMAS_GPIO_3_MUXED;
+
+	addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
+			PALMAS_PRIMARY_SECONDARY_PAD2);
+
+	if (pdata->mux_from_pdata) {
+		reg = pdata->pad2;
+		ret = regmap_write(palmas->regmap[slave], addr, reg);
+		if (ret)
+			goto err;
+	} else {
+		ret = regmap_read(palmas->regmap[slave], addr, &reg);
+		if (ret)
+			goto err;
+	}
+
+	if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4))
+		palmas->gpio_muxed |= PALMAS_GPIO_4_MUXED;
+	if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK))
+		palmas->gpio_muxed |= PALMAS_GPIO_5_MUXED;
+	if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6))
+		palmas->gpio_muxed |= PALMAS_GPIO_6_MUXED;
+	if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK))
+		palmas->gpio_muxed |= PALMAS_GPIO_7_MUXED;
+
+	dev_info(palmas->dev, "Muxing GPIO %x, PWM %x, LED %x\n",
+			palmas->gpio_muxed, palmas->pwm_muxed,
+			palmas->led_muxed);
+
+	reg = pdata->power_ctrl;
+
+	slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
+	addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_POWER_CTRL);
+
+	ret = regmap_write(palmas->regmap[slave], addr, reg);
+	if (ret)
+		goto err;
+
+	children = kmemdup(palmas_children, sizeof(palmas_children),
+			   GFP_KERNEL);
+	if (!children) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = mfd_add_devices(palmas->dev, -1,
+			      children, ARRAY_SIZE(palmas_children),
+			      NULL, regmap_irq_chip_get_base(palmas->irq_data));
+	kfree(children);
+
+	if (ret < 0)
+		goto err;
+
+	return ret;
+
+err:
+	mfd_remove_devices(palmas->dev);
+	kfree(palmas);
+	return ret;
+}
+
+static int palmas_i2c_remove(struct i2c_client *i2c)
+{
+	struct palmas *palmas = i2c_get_clientdata(i2c);
+
+	mfd_remove_devices(palmas->dev);
+	regmap_del_irq_chip(palmas->irq, palmas->irq_data);
+
+	return 0;
+}
+
+static const struct i2c_device_id palmas_i2c_id[] = {
+	{ "palmas", },
+	{ "twl6035", },
+	{ "twl6037", },
+	{ "tps65913", },
+};
+MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
+
+static struct of_device_id __devinitdata of_palmas_match_tbl[] = {
+	{ .compatible = "ti,palmas", },
+	{ /* end */ }
+};
+
+static struct i2c_driver palmas_i2c_driver = {
+	.driver = {
+		   .name = "palmas",
+		   .of_match_table = of_palmas_match_tbl,
+		   .owner = THIS_MODULE,
+	},
+	.probe = palmas_i2c_probe,
+	.remove = palmas_i2c_remove,
+	.id_table = palmas_i2c_id,
+};
+
+static int __init palmas_i2c_init(void)
+{
+	return i2c_add_driver(&palmas_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(palmas_i2c_init);
+
+static void __exit palmas_i2c_exit(void)
+{
+	i2c_del_driver(&palmas_i2c_driver);
+}
+module_exit(palmas_i2c_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_DESCRIPTION("Palmas chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index a66d4df..47f802b 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -78,17 +78,6 @@
 	},
 };
 
-struct tps65090 {
-	struct mutex		lock;
-	struct device		*dev;
-	struct i2c_client	*client;
-	struct regmap		*rmap;
-	struct irq_chip		irq_chip;
-	struct mutex		irq_lock;
-	int			irq_base;
-	unsigned int		id;
-};
-
 int tps65090_write(struct device *dev, int reg, uint8_t val)
 {
 	struct tps65090 *tps = dev_get_drvdata(dev);
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index a5ddf31..c84b550 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
+#include <linux/regulator/of_regulator.h>
 
 #include <linux/mfd/core.h>
 #include <linux/mfd/tps6586x.h>
@@ -460,6 +461,7 @@
 
 		pdev->dev.parent = tps6586x->dev;
 		pdev->dev.platform_data = subdev->platform_data;
+		pdev->dev.of_node = subdev->of_node;
 
 		ret = platform_device_add(pdev);
 		if (ret) {
@@ -474,6 +476,86 @@
 	return ret;
 }
 
+#ifdef CONFIG_OF
+static struct of_regulator_match tps6586x_matches[] = {
+	{ .name = "sm0",     .driver_data = (void *)TPS6586X_ID_SM_0    },
+	{ .name = "sm1",     .driver_data = (void *)TPS6586X_ID_SM_1    },
+	{ .name = "sm2",     .driver_data = (void *)TPS6586X_ID_SM_2    },
+	{ .name = "ldo0",    .driver_data = (void *)TPS6586X_ID_LDO_0   },
+	{ .name = "ldo1",    .driver_data = (void *)TPS6586X_ID_LDO_1   },
+	{ .name = "ldo2",    .driver_data = (void *)TPS6586X_ID_LDO_2   },
+	{ .name = "ldo3",    .driver_data = (void *)TPS6586X_ID_LDO_3   },
+	{ .name = "ldo4",    .driver_data = (void *)TPS6586X_ID_LDO_4   },
+	{ .name = "ldo5",    .driver_data = (void *)TPS6586X_ID_LDO_5   },
+	{ .name = "ldo6",    .driver_data = (void *)TPS6586X_ID_LDO_6   },
+	{ .name = "ldo7",    .driver_data = (void *)TPS6586X_ID_LDO_7   },
+	{ .name = "ldo8",    .driver_data = (void *)TPS6586X_ID_LDO_8   },
+	{ .name = "ldo9",    .driver_data = (void *)TPS6586X_ID_LDO_9   },
+	{ .name = "ldo_rtc", .driver_data = (void *)TPS6586X_ID_LDO_RTC },
+};
+
+static struct tps6586x_platform_data *tps6586x_parse_dt(struct i2c_client *client)
+{
+	const unsigned int num = ARRAY_SIZE(tps6586x_matches);
+	struct device_node *np = client->dev.of_node;
+	struct tps6586x_platform_data *pdata;
+	struct tps6586x_subdev_info *devs;
+	struct device_node *regs;
+	unsigned int count;
+	unsigned int i, j;
+	int err;
+
+	regs = of_find_node_by_name(np, "regulators");
+	if (!regs)
+		return NULL;
+
+	err = of_regulator_match(&client->dev, regs, tps6586x_matches, num);
+	if (err < 0) {
+		of_node_put(regs);
+		return NULL;
+	}
+
+	of_node_put(regs);
+	count = err;
+
+	devs = devm_kzalloc(&client->dev, count * sizeof(*devs), GFP_KERNEL);
+	if (!devs)
+		return NULL;
+
+	for (i = 0, j = 0; i < num && j < count; i++) {
+		if (!tps6586x_matches[i].init_data)
+			continue;
+
+		devs[j].name = "tps6586x-regulator";
+		devs[j].platform_data = tps6586x_matches[i].init_data;
+		devs[j].id = (int)tps6586x_matches[i].driver_data;
+		devs[j].of_node = tps6586x_matches[i].of_node;
+		j++;
+	}
+
+	pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	pdata->num_subdevs = count;
+	pdata->subdevs = devs;
+	pdata->gpio_base = -1;
+	pdata->irq_base = -1;
+
+	return pdata;
+}
+
+static struct of_device_id tps6586x_of_match[] = {
+	{ .compatible = "ti,tps6586x", },
+	{ },
+};
+#else
+static struct tps6586x_platform_data *tps6586x_parse_dt(struct i2c_client *client)
+{
+	return NULL;
+}
+#endif
+
 static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
 					const struct i2c_device_id *id)
 {
@@ -481,6 +563,9 @@
 	struct tps6586x *tps6586x;
 	int ret;
 
+	if (!pdata && client->dev.of_node)
+		pdata = tps6586x_parse_dt(client);
+
 	if (!pdata) {
 		dev_err(&client->dev, "tps6586x requires platform data\n");
 		return -ENOTSUPP;
@@ -573,6 +658,7 @@
 	.driver	= {
 		.name	= "tps6586x",
 		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(tps6586x_of_match),
 	},
 	.probe		= tps6586x_i2c_probe,
 	.remove		= __devexit_p(tps6586x_i2c_remove),
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 7c2267e..6fc90be 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -224,13 +224,6 @@
 #define HIGH_PERF_SQ			(1 << 3)
 #define CK32K_LOWPWR_EN			(1 << 7)
 
-
-/* chip-specific feature flags, for i2c_device_id.driver_data */
-#define TWL4030_VAUX2		BIT(0)	/* pre-5030 voltage ranges */
-#define TPS_SUBSET		BIT(1)	/* tps659[23]0 have fewer LDOs */
-#define TWL5031			BIT(2)  /* twl5031 has different registers */
-#define TWL6030_CLASS		BIT(3)	/* TWL6030 class */
-
 /*----------------------------------------------------------------------*/
 
 /* is driver active, bound to a chip? */
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 46b20c4..f1837f6 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -147,12 +147,6 @@
 		return 0;
 	}
 
-	if (!wm8994->irq_base) {
-		dev_err(wm8994->dev,
-			"No interrupt base specified, no interrupts\n");
-		return 0;
-	}
-
 	ret = regmap_add_irq_chip(wm8994->regmap, wm8994->irq,
 				  IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
 				  wm8994->irq_base, &wm8994_irq_chip,
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 032b847..b6f3842 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -94,6 +94,17 @@
 	.signal_direction	= true,
 };
 
+static struct variant_data variant_nomadik = {
+	.fifosize		= 16 * 4,
+	.fifohalfsize		= 8 * 4,
+	.clkreg			= MCI_CLK_ENABLE,
+	.datalength_bits	= 24,
+	.sdio			= true,
+	.st_clkdiv		= true,
+	.pwrreg_powerup		= MCI_PWR_ON,
+	.signal_direction	= true,
+};
+
 static struct variant_data variant_ux500 = {
 	.fifosize		= 30 * 4,
 	.fifohalfsize		= 8 * 4,
@@ -1397,7 +1408,7 @@
 	if (ret)
 		goto unmap;
 
-	if (dev->irq[1] == NO_IRQ || !dev->irq[1])
+	if (!dev->irq[1])
 		host->singleirq = true;
 	else {
 		ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
@@ -1569,6 +1580,11 @@
 		.data	= &variant_u300,
 	},
 	{
+		.id     = 0x10180180,
+		.mask   = 0xf0ffffff,
+		.data	= &variant_nomadik,
+	},
+	{
 		.id     = 0x00280180,
 		.mask   = 0x00ffffff,
 		.data	= &variant_u300,
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index b0f2ef9..e3f5af9 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -363,6 +363,7 @@
 		goto out;
 
 	dmaengine_submit(desc);
+	dma_async_issue_pending(host->dmach);
 	return;
 
 out:
@@ -403,6 +404,7 @@
 		goto out;
 
 	dmaengine_submit(desc);
+	dma_async_issue_pending(host->dmach);
 	return;
 
 out:
@@ -531,6 +533,7 @@
 		goto out;
 
 	dmaengine_submit(desc);
+	dma_async_issue_pending(host->dmach);
 	return;
 out:
 	dev_warn(mmc_dev(host->mmc),
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 58fc65f..f2f482b 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -376,7 +376,7 @@
 	 * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
 	 * operations are supported.
 	 */
-	if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP)
+	if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP)
 		return -EOPNOTSUPP;
 
 	switch (mode) {
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 7341695..861ca8f 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -212,18 +212,17 @@
 	/* Link the private data with the MTD structure */
 	ams_delta_mtd->priv = this;
 
-	if (!request_mem_region(res->start, resource_size(res),
-			dev_name(&pdev->dev))) {
-		dev_err(&pdev->dev, "request_mem_region failed\n");
-		err = -EBUSY;
-		goto out_free;
-	}
+	/*
+	 * Don't try to request the memory region from here,
+	 * it should have been already requested from the
+	 * gpio-omap driver and requesting it again would fail.
+	 */
 
 	io_base = ioremap(res->start, resource_size(res));
 	if (io_base == NULL) {
 		dev_err(&pdev->dev, "ioremap failed\n");
 		err = -EIO;
-		goto out_release_io;
+		goto out_free;
 	}
 
 	this->priv = io_base;
@@ -271,8 +270,6 @@
 	platform_set_drvdata(pdev, NULL);
 	gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
 	iounmap(io_base);
-out_release_io:
-	release_mem_region(res->start, resource_size(res));
 out_free:
 	kfree(ams_delta_mtd);
  out:
@@ -285,7 +282,6 @@
 static int __devexit ams_delta_cleanup(struct platform_device *pdev)
 {
 	void __iomem *io_base = platform_get_drvdata(pdev);
-	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	/* Release resources, unregister device */
 	nand_release(ams_delta_mtd);
@@ -293,7 +289,6 @@
 	gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
 	gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
 	iounmap(io_base);
-	release_mem_region(res->start, resource_size(res));
 
 	/* Free the MTD device structure */
 	kfree(ams_delta_mtd);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 75b1dde..9ec51ce 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -266,6 +266,7 @@
 	desc->callback		= dma_irq_callback;
 	desc->callback_param	= this;
 	dmaengine_submit(desc);
+	dma_async_issue_pending(get_dma_chan(this));
 
 	/* Wait for the interrupt from the DMA block. */
 	err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b982854..0c2bd80 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -66,10 +66,7 @@
 	  <http://www.tldp.org/docs.html#guide>.
 
 	  To compile this driver as a module, choose M here: the module
-	  will be called dummy.  If you want to use more than one dummy
-	  device at a time, you need to compile this driver as a module.
-	  Instead of 'dummy', the devices will then be called 'dummy0',
-	  'dummy1' etc.
+	  will be called dummy.
 
 config EQUALIZER
 	tristate "EQL (serial line load balancing) support"
@@ -285,8 +282,6 @@
 
 source "drivers/s390/net/Kconfig"
 
-source "drivers/net/tokenring/Kconfig"
-
 source "drivers/net/usb/Kconfig"
 
 source "drivers/net/wireless/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a6b8ce1..3d375ca 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -50,7 +50,6 @@
 obj-$(CONFIG_SLHC) += slip/
 obj-$(CONFIG_NET_SB1000) += sb1000.o
 obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
-obj-$(CONFIG_TR) += tokenring/
 obj-$(CONFIG_WAN) += wan/
 obj-$(CONFIG_WLAN) += wireless/
 obj-$(CONFIG_WIMAX) += wimax/
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 88bbd8f..e3f0fac 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -29,7 +29,6 @@
  */
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/trdevice.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/netlink.h>
@@ -134,22 +133,9 @@
 	{NULL, 0},
 };
 
-static struct devprobe2 mca_probes[] __initdata = {
-#ifdef CONFIG_NE2_MCA
-	{ne2_probe, 0},
-#endif
-#ifdef CONFIG_ELMC		/* 3c523 */
-	{elmc_probe, 0},
-#endif
-#ifdef CONFIG_ELMC_II		/* 3c527 */
-	{mc32_probe, 0},
-#endif
-	{NULL, 0},
-};
-
 /*
  * ISA probes that touch addresses < 0x400 (including those that also
- * look for EISA/PCI/MCA cards in addition to ISA cards).
+ * look for EISA/PCI cards in addition to ISA cards).
  */
 static struct devprobe2 isa_probes[] __initdata = {
 #if defined(CONFIG_HP100) && defined(CONFIG_ISA)	/* ISA, EISA */
@@ -279,51 +265,10 @@
 
 	(void)(	probe_list2(unit, m68k_probes, base_addr == 0) &&
 		probe_list2(unit, eisa_probes, base_addr == 0) &&
-		probe_list2(unit, mca_probes, base_addr == 0) &&
 		probe_list2(unit, isa_probes, base_addr == 0) &&
 		probe_list2(unit, parport_probes, base_addr == 0));
 }
 
-#ifdef CONFIG_TR
-/* Token-ring device probe */
-extern int ibmtr_probe_card(struct net_device *);
-extern struct net_device *smctr_probe(int unit);
-
-static struct devprobe2 tr_probes2[] __initdata = {
-#ifdef CONFIG_SMCTR
-	{smctr_probe, 0},
-#endif
-	{NULL, 0},
-};
-
-static __init int trif_probe(int unit)
-{
-	int err = -ENODEV;
-#ifdef CONFIG_IBMTR
-	struct net_device *dev = alloc_trdev(0);
-	if (!dev)
-		return -ENOMEM;
-
-	sprintf(dev->name, "tr%d", unit);
-	netdev_boot_setup_check(dev);
-	err = ibmtr_probe_card(dev);
-	if (err)
-		free_netdev(dev);
-#endif
-	return err;
-}
-
-static void __init trif_probe2(int unit)
-{
-	unsigned long base_addr = netdev_boot_base("tr", unit);
-
-	if (base_addr == 1)
-		return;
-	probe_list2(unit, tr_probes2, base_addr == 0);
-}
-#endif
-
-
 /*  Statically configured drivers -- order matters here. */
 static int __init net_olddevs_init(void)
 {
@@ -333,11 +278,6 @@
 	for (num = 0; num < 8; ++num)
 		sbni_probe(num);
 #endif
-#ifdef CONFIG_TR
-	for (num = 0; num < 8; ++num)
-		if (!trif_probe(num))
-			trif_probe2(num);
-#endif
 	for (num = 0; num < 8; ++num)
 		ethif_probe2(num);
 
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 793b001..3463b46 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2173,9 +2173,10 @@
  * received frames (loopback). Since only the payload is given to this
  * function, it check for loopback.
  */
-static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
+static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
 {
 	struct port *port;
+	int ret = RX_HANDLER_ANOTHER;
 
 	if (length >= sizeof(struct lacpdu)) {
 
@@ -2184,11 +2185,12 @@
 		if (!port->slave) {
 			pr_warning("%s: Warning: port of slave %s is uninitialized\n",
 				   slave->dev->name, slave->dev->master->name);
-			return;
+			return ret;
 		}
 
 		switch (lacpdu->subtype) {
 		case AD_TYPE_LACPDU:
+			ret = RX_HANDLER_CONSUMED;
 			pr_debug("Received LACPDU on port %d\n",
 				 port->actor_port_number);
 			/* Protect against concurrent state machines */
@@ -2198,6 +2200,7 @@
 			break;
 
 		case AD_TYPE_MARKER:
+			ret = RX_HANDLER_CONSUMED;
 			// No need to convert fields to Little Endian since we don't use the marker's fields.
 
 			switch (((struct bond_marker *)lacpdu)->tlv_type) {
@@ -2219,6 +2222,7 @@
 			}
 		}
 	}
+	return ret;
 }
 
 /**
@@ -2456,18 +2460,20 @@
 	return NETDEV_TX_OK;
 }
 
-void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
+int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
 			  struct slave *slave)
 {
+	int ret = RX_HANDLER_ANOTHER;
 	if (skb->protocol != PKT_TYPE_LACPDU)
-		return;
+		return ret;
 
 	if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
-		return;
+		return ret;
 
 	read_lock(&bond->lock);
-	bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
+	ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
 	read_unlock(&bond->lock);
+	return ret;
 }
 
 /*
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 235b2cc..5ee7e3c 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,7 +274,7 @@
 void bond_3ad_handle_link_change(struct slave *slave, char link);
 int  bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
-void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
+int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
 			  struct slave *slave);
 int bond_3ad_set_carrier(struct bonding *bond);
 void bond_3ad_update_lacp_rate(struct bonding *bond);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9abfde4..0f59c15 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -332,7 +332,7 @@
 	if ((client_info->assigned) &&
 	    (client_info->ip_src == arp->ip_dst) &&
 	    (client_info->ip_dst == arp->ip_src) &&
-	    (compare_ether_addr_64bits(client_info->mac_dst, arp->mac_src))) {
+	    (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
 		/* update the clients MAC address */
 		memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
 		client_info->ntt = 1;
@@ -342,26 +342,26 @@
 	_unlock_rx_hashtbl_bh(bond);
 }
 
-static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
+static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
 			 struct slave *slave)
 {
 	struct arp_pkt *arp;
 
 	if (skb->protocol != cpu_to_be16(ETH_P_ARP))
-		return;
+		goto out;
 
 	arp = (struct arp_pkt *) skb->data;
 	if (!arp) {
 		pr_debug("Packet has no ARP data\n");
-		return;
+		goto out;
 	}
 
 	if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
-		return;
+		goto out;
 
 	if (skb->len < sizeof(struct arp_pkt)) {
 		pr_debug("Packet is too small to be an ARP\n");
-		return;
+		goto out;
 	}
 
 	if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -369,6 +369,8 @@
 		rlb_update_entry_from_arp(bond, arp);
 		pr_debug("Server received an ARP Reply from client\n");
 	}
+out:
+	return RX_HANDLER_ANOTHER;
 }
 
 /* Caller must hold bond lock for read */
@@ -448,8 +450,8 @@
 
 			if (assigned_slave) {
 				rx_hash_table[index].slave = assigned_slave;
-				if (compare_ether_addr_64bits(rx_hash_table[index].mac_dst,
-							      mac_bcast)) {
+				if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst,
+							     mac_bcast)) {
 					bond_info->rx_hashtbl[index].ntt = 1;
 					bond_info->rx_ntt = 1;
 					/* A slave has been removed from the
@@ -561,7 +563,7 @@
 		client_info = &(bond_info->rx_hashtbl[hash_index]);
 
 		if ((client_info->slave == slave) &&
-		    compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
+		    !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
 			client_info->ntt = 1;
 			ntt = 1;
 		}
@@ -600,9 +602,9 @@
 		 * unicast mac address.
 		 */
 		if ((client_info->ip_src == src_ip) &&
-		    compare_ether_addr_64bits(client_info->slave->dev->dev_addr,
-			   bond->dev->dev_addr) &&
-		    compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
+		    !ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
+					     bond->dev->dev_addr) &&
+		    !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
 			client_info->ntt = 1;
 			bond_info->rx_ntt = 1;
 		}
@@ -629,7 +631,7 @@
 		if ((client_info->ip_src == arp->ip_src) &&
 		    (client_info->ip_dst == arp->ip_dst)) {
 			/* the entry is already assigned to this client */
-			if (compare_ether_addr_64bits(arp->mac_dst, mac_bcast)) {
+			if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
 				/* update mac address from arp */
 				memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
 			}
@@ -664,7 +666,7 @@
 		memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
 		client_info->slave = assigned_slave;
 
-		if (compare_ether_addr_64bits(client_info->mac_dst, mac_bcast)) {
+		if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
 			client_info->ntt = 1;
 			bond->alb_info.rx_ntt = 1;
 		} else {
@@ -1009,18 +1011,18 @@
 	int perm_curr_diff;
 	int perm_bond_diff;
 
-	perm_curr_diff = compare_ether_addr_64bits(slave->perm_hwaddr,
-						   slave->dev->dev_addr);
-	perm_bond_diff = compare_ether_addr_64bits(slave->perm_hwaddr,
-						   bond->dev->dev_addr);
+	perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
+						  slave->dev->dev_addr);
+	perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
+						  bond->dev->dev_addr);
 
 	if (perm_curr_diff && perm_bond_diff) {
 		struct slave *tmp_slave;
 		int i, found = 0;
 
 		bond_for_each_slave(bond, tmp_slave, i) {
-			if (!compare_ether_addr_64bits(slave->perm_hwaddr,
-						       tmp_slave->dev->dev_addr)) {
+			if (ether_addr_equal_64bits(slave->perm_hwaddr,
+						    tmp_slave->dev->dev_addr)) {
 				found = 1;
 				break;
 			}
@@ -1074,10 +1076,10 @@
 	 * check uniqueness of slave's mac address against the other
 	 * slaves in the bond.
 	 */
-	if (compare_ether_addr_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
+	if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
 		bond_for_each_slave(bond, tmp_slave1, i) {
-			if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr,
-						       slave->dev->dev_addr)) {
+			if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
+						    slave->dev->dev_addr)) {
 				found = 1;
 				break;
 			}
@@ -1099,8 +1101,8 @@
 	bond_for_each_slave(bond, tmp_slave1, i) {
 		found = 0;
 		bond_for_each_slave(bond, tmp_slave2, j) {
-			if (!compare_ether_addr_64bits(tmp_slave1->perm_hwaddr,
-						       tmp_slave2->dev->dev_addr)) {
+			if (ether_addr_equal_64bits(tmp_slave1->perm_hwaddr,
+						    tmp_slave2->dev->dev_addr)) {
 				found = 1;
 				break;
 			}
@@ -1115,8 +1117,8 @@
 		}
 
 		if (!has_bond_addr) {
-			if (!compare_ether_addr_64bits(tmp_slave1->dev->dev_addr,
-						       bond->dev->dev_addr)) {
+			if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
+						    bond->dev->dev_addr)) {
 
 				has_bond_addr = tmp_slave1;
 			}
@@ -1257,7 +1259,7 @@
 	case ETH_P_IP: {
 		const struct iphdr *iph = ip_hdr(skb);
 
-		if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast) ||
+		if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
 		    (iph->daddr == ip_bcast) ||
 		    (iph->protocol == IPPROTO_IGMP)) {
 			do_tx_balance = 0;
@@ -1271,7 +1273,7 @@
 		/* IPv6 doesn't really use broadcast mac address, but leave
 		 * that here just in case.
 		 */
-		if (!compare_ether_addr_64bits(eth_data->h_dest, mac_bcast)) {
+		if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
 			do_tx_balance = 0;
 			break;
 		}
@@ -1279,7 +1281,7 @@
 		/* IPv6 uses all-nodes multicast as an equivalent to
 		 * broadcasts in IPv4.
 		 */
-		if (!compare_ether_addr_64bits(eth_data->h_dest, mac_v6_allmcast)) {
+		if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
 			do_tx_balance = 0;
 			break;
 		}
@@ -1603,8 +1605,8 @@
 		struct slave *tmp_slave;
 		/* find slave that is holding the bond's mac address */
 		bond_for_each_slave(bond, tmp_slave, i) {
-			if (!compare_ether_addr_64bits(tmp_slave->dev->dev_addr,
-						       bond->dev->dev_addr)) {
+			if (ether_addr_equal_64bits(tmp_slave->dev->dev_addr,
+						    bond->dev->dev_addr)) {
 				swap_slave = tmp_slave;
 				break;
 			}
@@ -1681,8 +1683,8 @@
 	swap_slave = NULL;
 
 	bond_for_each_slave(bond, slave, i) {
-		if (!compare_ether_addr_64bits(slave->dev->dev_addr,
-					       bond_dev->dev_addr)) {
+		if (ether_addr_equal_64bits(slave->dev->dev_addr,
+					    bond_dev->dev_addr)) {
 			swap_slave = slave;
 			break;
 		}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62d2409..2ee8cf9 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -549,9 +549,9 @@
  * Get link speed and duplex from the slave's base driver
  * using ethtool. If for some reason the call fails or the
  * values are invalid, set speed and duplex to -1,
- * and return error.
+ * and return.
  */
-static int bond_update_speed_duplex(struct slave *slave)
+static void bond_update_speed_duplex(struct slave *slave)
 {
 	struct net_device *slave_dev = slave->dev;
 	struct ethtool_cmd ecmd;
@@ -563,24 +563,24 @@
 
 	res = __ethtool_get_settings(slave_dev, &ecmd);
 	if (res < 0)
-		return -1;
+		return;
 
 	slave_speed = ethtool_cmd_speed(&ecmd);
 	if (slave_speed == 0 || slave_speed == ((__u32) -1))
-		return -1;
+		return;
 
 	switch (ecmd.duplex) {
 	case DUPLEX_FULL:
 	case DUPLEX_HALF:
 		break;
 	default:
-		return -1;
+		return;
 	}
 
 	slave->speed = slave_speed;
 	slave->duplex = ecmd.duplex;
 
-	return 0;
+	return;
 }
 
 /*
@@ -1444,8 +1444,9 @@
 	struct sk_buff *skb = *pskb;
 	struct slave *slave;
 	struct bonding *bond;
-	void (*recv_probe)(struct sk_buff *, struct bonding *,
+	int (*recv_probe)(struct sk_buff *, struct bonding *,
 				struct slave *);
+	int ret = RX_HANDLER_ANOTHER;
 
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (unlikely(!skb))
@@ -1464,8 +1465,12 @@
 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
 
 		if (likely(nskb)) {
-			recv_probe(nskb, bond, slave);
+			ret = recv_probe(nskb, bond, slave);
 			dev_kfree_skb(nskb);
+			if (ret == RX_HANDLER_CONSUMED) {
+				consume_skb(skb);
+				return ret;
+			}
 		}
 	}
 
@@ -1487,7 +1492,7 @@
 		memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
 	}
 
-	return RX_HANDLER_ANOTHER;
+	return ret;
 }
 
 /* enslave device <slave> to bond device <master> */
@@ -1726,7 +1731,8 @@
 
 	read_lock(&bond->lock);
 
-	new_slave->last_arp_rx = jiffies;
+	new_slave->last_arp_rx = jiffies -
+		(msecs_to_jiffies(bond->params.arp_interval) + 1);
 
 	if (bond->params.miimon && !bond->params.use_carrier) {
 		link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1751,22 +1757,30 @@
 	}
 
 	/* check for initial state */
-	if (!bond->params.miimon ||
-	    (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
-		if (bond->params.updelay) {
-			pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n");
-			new_slave->link  = BOND_LINK_BACK;
-			new_slave->delay = bond->params.updelay;
+	if (bond->params.miimon) {
+		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
+			if (bond->params.updelay) {
+				new_slave->link = BOND_LINK_BACK;
+				new_slave->delay = bond->params.updelay;
+			} else {
+				new_slave->link = BOND_LINK_UP;
+			}
 		} else {
-			pr_debug("Initial state of slave_dev is BOND_LINK_UP\n");
-			new_slave->link  = BOND_LINK_UP;
+			new_slave->link = BOND_LINK_DOWN;
 		}
-		new_slave->jiffies = jiffies;
+	} else if (bond->params.arp_interval) {
+		new_slave->link = (netif_carrier_ok(slave_dev) ?
+			BOND_LINK_UP : BOND_LINK_DOWN);
 	} else {
-		pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n");
-		new_slave->link  = BOND_LINK_DOWN;
+		new_slave->link = BOND_LINK_UP;
 	}
 
+	if (new_slave->link != BOND_LINK_DOWN)
+		new_slave->jiffies = jiffies;
+	pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
+		new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+			(new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+
 	bond_update_speed_duplex(new_slave);
 
 	if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
@@ -1952,7 +1966,7 @@
 	write_lock_bh(&bond->lock);
 
 	if (!bond->params.fail_over_mac) {
-		if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
+		if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
 		    bond->slave_cnt > 1)
 			pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
 				   bond_dev->name, slave_dev->name,
@@ -2723,7 +2737,7 @@
 	}
 }
 
-static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
+static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
 			 struct slave *slave)
 {
 	struct arphdr *arp;
@@ -2731,7 +2745,7 @@
 	__be32 sip, tip;
 
 	if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
-		return;
+		return RX_HANDLER_ANOTHER;
 
 	read_lock(&bond->lock);
 
@@ -2776,6 +2790,7 @@
 
 out_unlock:
 	read_unlock(&bond->lock);
+	return RX_HANDLER_ANOTHER;
 }
 
 /*
@@ -4820,12 +4835,9 @@
 	return 0;
 }
 
-static int bond_get_tx_queues(struct net *net, struct nlattr *tb[],
-			      unsigned int *num_queues,
-			      unsigned int *real_num_queues)
+static int bond_get_tx_queues(struct net *net, struct nlattr *tb[])
 {
-	*num_queues = tx_queues;
-	return 0;
+	return tx_queues;
 }
 
 static struct rtnl_link_ops bond_link_ops __read_mostly = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 9f2bae66..4581aa5 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -218,7 +218,7 @@
 	struct   slave *primary_slave;
 	bool     force_primary;
 	s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
-	void     (*recv_probe)(struct sk_buff *, struct bonding *,
+	int     (*recv_probe)(struct sk_buff *, struct bonding *,
 			       struct slave *);
 	rwlock_t lock;
 	rwlock_t curr_slave_lock;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 9c1c8cd..1520814 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -6,6 +6,8 @@
  * License terms: GNU General Public License (GPL) version 2.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/device.h>
@@ -19,6 +21,7 @@
 #include <linux/if_arp.h>
 #include <linux/timer.h>
 #include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/caif_hsi.h>
 
@@ -34,6 +37,10 @@
 module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
 
+static int aggregation_timeout = 1;
+module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
+
 /*
  * HSI padding options.
  * Warning: must be a base of 2 (& operation used) and can not be zero !
@@ -86,24 +93,84 @@
 		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
 }
 
+static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
+					   const struct sk_buff *skb,
+					   int direction)
+{
+	struct caif_payload_info *info;
+	int hpad, tpad, len;
+
+	info = (struct caif_payload_info *)&skb->cb;
+	hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
+	tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+	len = skb->len + hpad + tpad;
+
+	if (direction > 0)
+		cfhsi->aggregation_len += len;
+	else if (direction < 0)
+		cfhsi->aggregation_len -= len;
+}
+
+static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
+{
+	int i;
+
+	if (cfhsi->aggregation_timeout < 0)
+		return true;
+
+	for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
+		if (cfhsi->qhead[i].qlen)
+			return true;
+	}
+
+	/* TODO: Use aggregation_len instead */
+	if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
+		return true;
+
+	return false;
+}
+
+static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
+{
+	struct sk_buff *skb;
+	int i;
+
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
+		skb = skb_dequeue(&cfhsi->qhead[i]);
+		if (skb)
+			break;
+	}
+
+	return skb;
+}
+
+static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
+{
+	int i, len = 0;
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
+		len += skb_queue_len(&cfhsi->qhead[i]);
+	return len;
+}
+
 static void cfhsi_abort_tx(struct cfhsi *cfhsi)
 {
 	struct sk_buff *skb;
 
 	for (;;) {
 		spin_lock_bh(&cfhsi->lock);
-		skb = skb_dequeue(&cfhsi->qhead);
+		skb = cfhsi_dequeue(cfhsi);
 		if (!skb)
 			break;
 
 		cfhsi->ndev->stats.tx_errors++;
 		cfhsi->ndev->stats.tx_dropped++;
+		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
 		spin_unlock_bh(&cfhsi->lock);
 		kfree_skb(skb);
 	}
 	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
-		mod_timer(&cfhsi->timer,
+		mod_timer(&cfhsi->inactivity_timer,
 			jiffies + cfhsi->inactivity_timeout);
 	spin_unlock_bh(&cfhsi->lock);
 }
@@ -169,7 +236,7 @@
 	struct sk_buff *skb;
 	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
 
-	skb = skb_dequeue(&cfhsi->qhead);
+	skb = cfhsi_dequeue(cfhsi);
 	if (!skb)
 		return 0;
 
@@ -196,11 +263,16 @@
 			pemb += hpad;
 
 			/* Update network statistics. */
+			spin_lock_bh(&cfhsi->lock);
 			cfhsi->ndev->stats.tx_packets++;
 			cfhsi->ndev->stats.tx_bytes += skb->len;
+			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
+			spin_unlock_bh(&cfhsi->lock);
 
 			/* Copy in embedded CAIF frame. */
 			skb_copy_bits(skb, 0, pemb, skb->len);
+
+			/* Consume the SKB */
 			consume_skb(skb);
 			skb = NULL;
 		}
@@ -214,7 +286,7 @@
 		int tpad = 0;
 
 		if (!skb)
-			skb = skb_dequeue(&cfhsi->qhead);
+			skb = cfhsi_dequeue(cfhsi);
 
 		if (!skb)
 			break;
@@ -233,8 +305,11 @@
 		pfrm += hpad;
 
 		/* Update network statistics. */
+		spin_lock_bh(&cfhsi->lock);
 		cfhsi->ndev->stats.tx_packets++;
 		cfhsi->ndev->stats.tx_bytes += skb->len;
+		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
+		spin_unlock_bh(&cfhsi->lock);
 
 		/* Copy in CAIF frame. */
 		skb_copy_bits(skb, 0, pfrm, skb->len);
@@ -244,6 +319,8 @@
 
 		/* Update frame pointer. */
 		pfrm += skb->len + tpad;
+
+		/* Consume the SKB */
 		consume_skb(skb);
 		skb = NULL;
 
@@ -258,8 +335,7 @@
 	}
 
 	/* Check if we can piggy-back another descriptor. */
-	skb = skb_peek(&cfhsi->qhead);
-	if (skb)
+	if (cfhsi_can_send_aggregate(cfhsi))
 		desc->header |= CFHSI_PIGGY_DESC;
 	else
 		desc->header &= ~CFHSI_PIGGY_DESC;
@@ -267,61 +343,71 @@
 	return CFHSI_DESC_SZ + pld_len;
 }
 
-static void cfhsi_tx_done(struct cfhsi *cfhsi)
+static void cfhsi_start_tx(struct cfhsi *cfhsi)
 {
-	struct cfhsi_desc *desc = NULL;
-	int len = 0;
-	int res;
+	struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
+	int len, res;
 
 	dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
 
 	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
 		return;
 
-	desc = (struct cfhsi_desc *)cfhsi->tx_buf;
-
 	do {
-		/*
-		 * Send flow on if flow off has been previously signalled
-		 * and number of packets is below low water mark.
-		 */
-		spin_lock_bh(&cfhsi->lock);
-		if (cfhsi->flow_off_sent &&
-				cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
-				cfhsi->cfdev.flowctrl) {
-
-			cfhsi->flow_off_sent = 0;
-			cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
-		}
-		spin_unlock_bh(&cfhsi->lock);
-
 		/* Create HSI frame. */
-		do {
-			len = cfhsi_tx_frm(desc, cfhsi);
-			if (!len) {
-				spin_lock_bh(&cfhsi->lock);
-				if (unlikely(skb_peek(&cfhsi->qhead))) {
-					spin_unlock_bh(&cfhsi->lock);
-					continue;
-				}
-				cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
-				/* Start inactivity timer. */
-				mod_timer(&cfhsi->timer,
-					jiffies + cfhsi->inactivity_timeout);
+		len = cfhsi_tx_frm(desc, cfhsi);
+		if (!len) {
+			spin_lock_bh(&cfhsi->lock);
+			if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
 				spin_unlock_bh(&cfhsi->lock);
-				goto done;
+				res = -EAGAIN;
+				continue;
 			}
-		} while (!len);
+			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+			/* Start inactivity timer. */
+			mod_timer(&cfhsi->inactivity_timer,
+				jiffies + cfhsi->inactivity_timeout);
+			spin_unlock_bh(&cfhsi->lock);
+			break;
+		}
 
 		/* Set up new transfer. */
 		res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
-		if (WARN_ON(res < 0)) {
+		if (WARN_ON(res < 0))
 			dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
 				__func__, res);
-		}
 	} while (res < 0);
+}
 
-done:
+static void cfhsi_tx_done(struct cfhsi *cfhsi)
+{
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+
+	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+		return;
+
+	/*
+	 * Send flow on if flow off has been previously signalled
+	 * and number of packets is below low water mark.
+	 */
+	spin_lock_bh(&cfhsi->lock);
+	if (cfhsi->flow_off_sent &&
+			cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
+			cfhsi->cfdev.flowctrl) {
+
+		cfhsi->flow_off_sent = 0;
+		cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
+	}
+
+	if (cfhsi_can_send_aggregate(cfhsi)) {
+		spin_unlock_bh(&cfhsi->lock);
+		cfhsi_start_tx(cfhsi);
+	} else {
+		mod_timer(&cfhsi->aggregation_timer,
+			jiffies + cfhsi->aggregation_timeout);
+		spin_unlock_bh(&cfhsi->lock);
+	}
+
 	return;
 }
 
@@ -560,7 +646,7 @@
 
 	/* Update inactivity timer if pending. */
 	spin_lock_bh(&cfhsi->lock);
-	mod_timer_pending(&cfhsi->timer,
+	mod_timer_pending(&cfhsi->inactivity_timer,
 			jiffies + cfhsi->inactivity_timeout);
 	spin_unlock_bh(&cfhsi->lock);
 
@@ -793,12 +879,12 @@
 
 	spin_lock_bh(&cfhsi->lock);
 
-	/* Resume transmit if queue is not empty. */
-	if (!skb_peek(&cfhsi->qhead)) {
+	/* Resume transmit if queues are not empty. */
+	if (!cfhsi_tx_queue_len(cfhsi)) {
 		dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
 			__func__);
 		/* Start inactivity timer. */
-		mod_timer(&cfhsi->timer,
+		mod_timer(&cfhsi->inactivity_timer,
 				jiffies + cfhsi->inactivity_timeout);
 		spin_unlock_bh(&cfhsi->lock);
 		return;
@@ -934,20 +1020,53 @@
 	wake_up_interruptible(&cfhsi->wake_down_wait);
 }
 
+static void cfhsi_aggregation_tout(unsigned long arg)
+{
+	struct cfhsi *cfhsi = (struct cfhsi *)arg;
+
+	dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+		__func__);
+
+	cfhsi_start_tx(cfhsi);
+}
+
 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct cfhsi *cfhsi = NULL;
 	int start_xfer = 0;
 	int timer_active;
+	int prio;
 
 	if (!dev)
 		return -EINVAL;
 
 	cfhsi = netdev_priv(dev);
 
+	switch (skb->priority) {
+	case TC_PRIO_BESTEFFORT:
+	case TC_PRIO_FILLER:
+	case TC_PRIO_BULK:
+		prio = CFHSI_PRIO_BEBK;
+		break;
+	case TC_PRIO_INTERACTIVE_BULK:
+		prio = CFHSI_PRIO_VI;
+		break;
+	case TC_PRIO_INTERACTIVE:
+		prio = CFHSI_PRIO_VO;
+		break;
+	case TC_PRIO_CONTROL:
+	default:
+		prio = CFHSI_PRIO_CTL;
+		break;
+	}
+
 	spin_lock_bh(&cfhsi->lock);
 
-	skb_queue_tail(&cfhsi->qhead, skb);
+	/* Update aggregation statistics  */
+	cfhsi_update_aggregation_stats(cfhsi, skb, 1);
+
+	/* Queue the SKB */
+	skb_queue_tail(&cfhsi->qhead[prio], skb);
 
 	/* Sanity check; xmit should not be called after unregister_netdev */
 	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
@@ -958,7 +1077,7 @@
 
 	/* Send flow off if number of packets is above high water mark. */
 	if (!cfhsi->flow_off_sent &&
-		cfhsi->qhead.qlen > cfhsi->q_high_mark &&
+		cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
 		cfhsi->cfdev.flowctrl) {
 		cfhsi->flow_off_sent = 1;
 		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -970,12 +1089,18 @@
 	}
 
 	if (!start_xfer) {
+		/* Send aggregate if it is possible */
+		bool aggregate_ready =
+			cfhsi_can_send_aggregate(cfhsi) &&
+			del_timer(&cfhsi->aggregation_timer) > 0;
 		spin_unlock_bh(&cfhsi->lock);
+		if (aggregate_ready)
+			cfhsi_start_tx(cfhsi);
 		return 0;
 	}
 
 	/* Delete inactivity timer if started. */
-	timer_active = del_timer_sync(&cfhsi->timer);
+	timer_active = del_timer_sync(&cfhsi->inactivity_timer);
 
 	spin_unlock_bh(&cfhsi->lock);
 
@@ -1004,28 +1129,11 @@
 	return 0;
 }
 
-static int cfhsi_open(struct net_device *dev)
-{
-	netif_wake_queue(dev);
-
-	return 0;
-}
-
-static int cfhsi_close(struct net_device *dev)
-{
-	netif_stop_queue(dev);
-
-	return 0;
-}
-
-static const struct net_device_ops cfhsi_ops = {
-	.ndo_open = cfhsi_open,
-	.ndo_stop = cfhsi_close,
-	.ndo_start_xmit = cfhsi_xmit
-};
+static const struct net_device_ops cfhsi_ops;
 
 static void cfhsi_setup(struct net_device *dev)
 {
+	int i;
 	struct cfhsi *cfhsi = netdev_priv(dev);
 	dev->features = 0;
 	dev->netdev_ops = &cfhsi_ops;
@@ -1034,7 +1142,8 @@
 	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
 	dev->tx_queue_len = 0;
 	dev->destructor = free_netdev;
-	skb_queue_head_init(&cfhsi->qhead);
+	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
+		skb_queue_head_init(&cfhsi->qhead[i]);
 	cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
 	cfhsi->cfdev.use_frag = false;
 	cfhsi->cfdev.use_stx = false;
@@ -1046,7 +1155,7 @@
 {
 	struct cfhsi *cfhsi = NULL;
 	struct net_device *ndev;
-	struct cfhsi_dev *dev;
+
 	int res;
 
 	ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
@@ -1057,6 +1166,34 @@
 	cfhsi->ndev = ndev;
 	cfhsi->pdev = pdev;
 
+	/* Assign the HSI device. */
+	cfhsi->dev = pdev->dev.platform_data;
+
+	/* Assign the driver to this HSI device. */
+	cfhsi->dev->drv = &cfhsi->drv;
+
+	/* Register network device. */
+	res = register_netdev(ndev);
+	if (res) {
+		dev_err(&ndev->dev, "%s: Registration error: %d.\n",
+			__func__, res);
+		free_netdev(ndev);
+	}
+	/* Add CAIF HSI device to list. */
+	spin_lock(&cfhsi_list_lock);
+	list_add_tail(&cfhsi->list, &cfhsi_list);
+	spin_unlock(&cfhsi_list_lock);
+
+	return res;
+}
+
+static int cfhsi_open(struct net_device *ndev)
+{
+	struct cfhsi *cfhsi = netdev_priv(ndev);
+	int res;
+
+	clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
+
 	/* Initialize state vaiables. */
 	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
 	cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
@@ -1066,12 +1203,6 @@
 	cfhsi->q_low_mark = LOW_WATER_MARK;
 	cfhsi->q_high_mark = HIGH_WATER_MARK;
 
-	/* Assign the HSI device. */
-	dev = (struct cfhsi_dev *)pdev->dev.platform_data;
-	cfhsi->dev = dev;
-
-	/* Assign the driver to this HSI device. */
-	dev->drv = &cfhsi->drv;
 
 	/*
 	 * Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1111,6 +1242,9 @@
 		cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
 	}
 
+	/* Initialize aggregation timeout */
+	cfhsi->aggregation_timeout = aggregation_timeout;
+
 	/* Initialize recieve vaiables. */
 	cfhsi->rx_ptr = cfhsi->rx_buf;
 	cfhsi->rx_len = CFHSI_DESC_SZ;
@@ -1136,9 +1270,9 @@
 	clear_bit(CFHSI_AWAKE, &cfhsi->bits);
 
 	/* Create work thread. */
-	cfhsi->wq = create_singlethread_workqueue(pdev->name);
+	cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
 	if (!cfhsi->wq) {
-		dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
+		dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
 			__func__);
 		res = -ENODEV;
 		goto err_create_wq;
@@ -1150,18 +1284,17 @@
 	init_waitqueue_head(&cfhsi->flush_fifo_wait);
 
 	/* Setup the inactivity timer. */
-	init_timer(&cfhsi->timer);
-	cfhsi->timer.data = (unsigned long)cfhsi;
-	cfhsi->timer.function = cfhsi_inactivity_tout;
+	init_timer(&cfhsi->inactivity_timer);
+	cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
+	cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
 	/* Setup the slowpath RX timer. */
 	init_timer(&cfhsi->rx_slowpath_timer);
 	cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
 	cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
-
-	/* Add CAIF HSI device to list. */
-	spin_lock(&cfhsi_list_lock);
-	list_add_tail(&cfhsi->list, &cfhsi_list);
-	spin_unlock(&cfhsi_list_lock);
+	/* Setup the aggregation timer. */
+	init_timer(&cfhsi->aggregation_timer);
+	cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
+	cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
 
 	/* Activate HSI interface. */
 	res = cfhsi->dev->cfhsi_up(cfhsi->dev);
@@ -1175,21 +1308,10 @@
 	/* Flush FIFO */
 	res = cfhsi_flush_fifo(cfhsi);
 	if (res) {
-		dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
+		dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
 			__func__, res);
 		goto err_net_reg;
 	}
-
-	/* Register network device. */
-	res = register_netdev(ndev);
-	if (res) {
-		dev_err(&ndev->dev, "%s: Registration error: %d.\n",
-			__func__, res);
-		goto err_net_reg;
-	}
-
-	netif_stop_queue(ndev);
-
 	return res;
 
  err_net_reg:
@@ -1203,18 +1325,14 @@
  err_alloc_rx:
 	kfree(cfhsi->tx_buf);
  err_alloc_tx:
-	free_netdev(ndev);
-
 	return res;
 }
 
-static void cfhsi_shutdown(struct cfhsi *cfhsi)
+static int cfhsi_close(struct net_device *ndev)
 {
+	struct cfhsi *cfhsi = netdev_priv(ndev);
 	u8 *tx_buf, *rx_buf, *flip_buf;
 
-	/* Stop TXing */
-	netif_tx_stop_all_queues(cfhsi->ndev);
-
 	/* going to shutdown driver */
 	set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
 
@@ -1222,8 +1340,9 @@
 	flush_workqueue(cfhsi->wq);
 
 	/* Delete timers if pending */
-	del_timer_sync(&cfhsi->timer);
+	del_timer_sync(&cfhsi->inactivity_timer);
 	del_timer_sync(&cfhsi->rx_slowpath_timer);
+	del_timer_sync(&cfhsi->aggregation_timer);
 
 	/* Cancel pending RX request (if any) */
 	cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
@@ -1241,15 +1360,19 @@
 	/* Deactivate interface */
 	cfhsi->dev->cfhsi_down(cfhsi->dev);
 
-	/* Finally unregister the network device. */
-	unregister_netdev(cfhsi->ndev);
-
 	/* Free buffers. */
 	kfree(tx_buf);
 	kfree(rx_buf);
 	kfree(flip_buf);
+	return 0;
 }
 
+static const struct net_device_ops cfhsi_ops = {
+	.ndo_open = cfhsi_open,
+	.ndo_stop = cfhsi_close,
+	.ndo_start_xmit = cfhsi_xmit
+};
+
 int cfhsi_remove(struct platform_device *pdev)
 {
 	struct list_head *list_node;
@@ -1266,10 +1389,6 @@
 			/* Remove from list. */
 			list_del(list_node);
 			spin_unlock(&cfhsi_list_lock);
-
-			/* Shutdown driver. */
-			cfhsi_shutdown(cfhsi);
-
 			return 0;
 		}
 	}
@@ -1300,8 +1419,7 @@
 		list_del(list_node);
 		spin_unlock(&cfhsi_list_lock);
 
-		/* Shutdown driver. */
-		cfhsi_shutdown(cfhsi);
+		unregister_netdevice(cfhsi->ndev);
 
 		spin_lock(&cfhsi_list_lock);
 	}
@@ -1326,8 +1444,6 @@
 		goto err_dev_register;
 	}
 
-	return result;
-
  err_dev_register:
 	return result;
 }
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 5b20413..bc497d7 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -13,6 +13,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/if_arp.h>
+#include <linux/io.h>
 
 #include <net/caif/caif_device.h>
 #include <net/caif/caif_shm.h>
@@ -647,6 +648,9 @@
 		if (pshm_dev->shm_loopback)
 			tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
 		else
+			/*
+			 * FIXME: the result of ioremap is not a pointer - arnd
+			 */
 			tx_buf->desc_vptr =
 					ioremap(tx_buf->phy_addr, TX_BUF_SZ);
 
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c5fe3a3..f03d7a4 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -687,18 +687,19 @@
 
 	if (priv->do_get_state)
 		priv->do_get_state(dev, &state);
-	NLA_PUT_U32(skb, IFLA_CAN_STATE, state);
-	NLA_PUT(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm);
-	NLA_PUT_U32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms);
-	NLA_PUT(skb, IFLA_CAN_BITTIMING,
-		sizeof(priv->bittiming), &priv->bittiming);
-	NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock);
-	if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec))
-		NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec);
-	if (priv->bittiming_const)
-		NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST,
-			sizeof(*priv->bittiming_const), priv->bittiming_const);
-
+	if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+	    nla_put(skb, IFLA_CAN_BITTIMING,
+		    sizeof(priv->bittiming), &priv->bittiming) ||
+	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
+	    (priv->do_get_berr_counter &&
+	     !priv->do_get_berr_counter(dev, &bec) &&
+	     nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+	    (priv->bittiming_const &&
+	     nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+		     sizeof(*priv->bittiming_const), priv->bittiming_const)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -714,9 +715,9 @@
 {
 	struct can_priv *priv = netdev_priv(dev);
 
-	NLA_PUT(skb, IFLA_INFO_XSTATS,
-		sizeof(priv->can_stats), &priv->can_stats);
-
+	if (nla_put(skb, IFLA_INFO_XSTATS,
+		    sizeof(priv->can_stats), &priv->can_stats))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 2bb215e..1226297 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1274,17 +1274,7 @@
 	.resume = pch_can_resume,
 };
 
-static int __init pch_can_pci_init(void)
-{
-	return pci_register_driver(&pch_can_pci_driver);
-}
-module_init(pch_can_pci_init);
-
-static void __exit pch_can_pci_exit(void)
-{
-	pci_unregister_driver(&pch_can_pci_driver);
-}
-module_exit(pch_can_pci_exit);
+module_pci_driver(pch_can_pci_driver);
 
 MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 36f4f97..5c6d412 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -371,16 +371,4 @@
 	.remove = ems_pci_del_card,
 };
 
-static int __init ems_pci_init(void)
-{
-	return pci_register_driver(&ems_pci_driver);
-}
-
-static void __exit ems_pci_exit(void)
-{
-	pci_unregister_driver(&ems_pci_driver);
-}
-
-module_init(ems_pci_init);
-module_exit(ems_pci_exit);
-
+module_pci_driver(ems_pci_driver);
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index ed004ce..23ed6ea 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -397,15 +397,4 @@
 	.remove = __devexit_p(kvaser_pci_remove_one),
 };
 
-static int __init kvaser_pci_init(void)
-{
-	return pci_register_driver(&kvaser_pci_driver);
-}
-
-static void __exit kvaser_pci_exit(void)
-{
-	pci_unregister_driver(&kvaser_pci_driver);
-}
-
-module_init(kvaser_pci_init);
-module_exit(kvaser_pci_exit);
+module_pci_driver(kvaser_pci_driver);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5f92b86..f0a1296 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -749,14 +749,4 @@
 	.remove = __devexit_p(peak_pci_remove),
 };
 
-static int __init peak_pci_init(void)
-{
-	return pci_register_driver(&peak_pci_driver);
-}
-module_init(peak_pci_init);
-
-static void __exit peak_pci_exit(void)
-{
-	pci_unregister_driver(&peak_pci_driver);
-}
-module_exit(peak_pci_exit);
+module_pci_driver(peak_pci_driver);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index a227586..8bc9598 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -609,15 +609,4 @@
 	.remove = plx_pci_del_card,
 };
 
-static int __init plx_pci_init(void)
-{
-	return pci_register_driver(&plx_pci_driver);
-}
-
-static void __exit plx_pci_exit(void)
-{
-	pci_unregister_driver(&plx_pci_driver);
-}
-
-module_init(plx_pci_init);
-module_exit(plx_pci_exit);
+module_pci_driver(plx_pci_driver);
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 41719da..1a8eef2 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -69,7 +69,6 @@
 #define TX_TIMEOUT  (400*HZ/1000)
 
 #include <linux/module.h>
-#include <linux/mca.h>
 #include <linux/isa.h>
 #include <linux/pnp.h>
 #include <linux/string.h>
@@ -102,7 +101,7 @@
 #endif
 
 /* Used to do a global count of all the cards in the system.  Must be
- * a global variable so that the mca/eisa probe routines can increment
+ * a global variable so that the eisa probe routines can increment
  * it */
 static int el3_cards = 0;
 #define EL3_MAX_CARDS 8
@@ -163,7 +162,7 @@
  */
 #define SKB_QUEUE_SIZE	64
 
-enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA };
+enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_EISA };
 
 struct el3_private {
 	spinlock_t lock;
@@ -505,41 +504,6 @@
 static int eisa_registered;
 #endif
 
-#ifdef CONFIG_MCA
-static int el3_mca_probe(struct device *dev);
-
-static short el3_mca_adapter_ids[] __initdata = {
-		0x627c,
-		0x627d,
-		0x62db,
-		0x62f6,
-		0x62f7,
-		0x0000
-};
-
-static char *el3_mca_adapter_names[] __initdata = {
-		"3Com 3c529 EtherLink III (10base2)",
-		"3Com 3c529 EtherLink III (10baseT)",
-		"3Com 3c529 EtherLink III (test mode)",
-		"3Com 3c529 EtherLink III (TP or coax)",
-		"3Com 3c529 EtherLink III (TP)",
-		NULL
-};
-
-static struct mca_driver el3_mca_driver = {
-		.id_table = el3_mca_adapter_ids,
-		.driver = {
-				.name = "3c529",
-				.bus = &mca_bus_type,
-				.probe = el3_mca_probe,
-				.remove = __devexit_p(el3_device_remove),
-				.suspend = el3_suspend,
-				.resume  = el3_resume,
-		},
-};
-static int mca_registered;
-#endif /* CONFIG_MCA */
-
 static const struct net_device_ops netdev_ops = {
 	.ndo_open 		= el3_open,
 	.ndo_stop	 	= el3_close,
@@ -600,76 +564,6 @@
 	free_netdev (dev);
 }
 
-#ifdef CONFIG_MCA
-static int __init el3_mca_probe(struct device *device)
-{
-	/* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
-	 * heavily modified by Chris Beauregard
-	 * (cpbeaure@csclub.uwaterloo.ca) to support standard MCA
-	 * probing.
-	 *
-	 * redone for multi-card detection by ZP Gu (zpg@castle.net)
-	 * now works as a module */
-
-	short i;
-	int ioaddr, irq, if_port;
-	__be16 phys_addr[3];
-	struct net_device *dev = NULL;
-	u_char pos4, pos5;
-	struct mca_device *mdev = to_mca_device(device);
-	int slot = mdev->slot;
-	int err;
-
-	pos4 = mca_device_read_stored_pos(mdev, 4);
-	pos5 = mca_device_read_stored_pos(mdev, 5);
-
-	ioaddr = ((short)((pos4&0xfc)|0x02)) << 8;
-	irq = pos5 & 0x0f;
-
-
-	pr_info("3c529: found %s at slot %d\n",
-		el3_mca_adapter_names[mdev->index], slot + 1);
-
-	/* claim the slot */
-	strncpy(mdev->name, el3_mca_adapter_names[mdev->index],
-			sizeof(mdev->name));
-	mca_device_set_claim(mdev, 1);
-
-	if_port = pos4 & 0x03;
-
-	irq = mca_device_transform_irq(mdev, irq);
-	ioaddr = mca_device_transform_ioport(mdev, ioaddr);
-	if (el3_debug > 2) {
-		pr_debug("3c529: irq %d  ioaddr 0x%x  ifport %d\n", irq, ioaddr, if_port);
-	}
-	EL3WINDOW(0);
-	for (i = 0; i < 3; i++)
-		phys_addr[i] = htons(read_eeprom(ioaddr, i));
-
-	dev = alloc_etherdev(sizeof (struct el3_private));
-	if (dev == NULL) {
-		release_region(ioaddr, EL3_IO_EXTENT);
-		return -ENOMEM;
-	}
-
-	netdev_boot_setup_check(dev);
-
-	el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA);
-	dev_set_drvdata(device, dev);
-	err = el3_common_init(dev);
-
-	if (err) {
-		dev_set_drvdata(device, NULL);
-		free_netdev(dev);
-		return -ENOMEM;
-	}
-
-	el3_devs[el3_cards++] = dev;
-	return 0;
-}
-
-#endif /* CONFIG_MCA */
-
 #ifdef CONFIG_EISA
 static int __init el3_eisa_probe (struct device *device)
 {
@@ -1547,11 +1441,6 @@
 	if (!ret)
 		eisa_registered = 1;
 #endif
-#ifdef CONFIG_MCA
-	ret = mca_register_driver(&el3_mca_driver);
-	if (!ret)
-		mca_registered = 1;
-#endif
 
 #ifdef CONFIG_PNP
 	if (pnp_registered)
@@ -1563,10 +1452,6 @@
 	if (eisa_registered)
 		ret = 0;
 #endif
-#ifdef CONFIG_MCA
-	if (mca_registered)
-		ret = 0;
-#endif
 	return ret;
 }
 
@@ -1584,10 +1469,6 @@
 	if (eisa_registered)
 		eisa_driver_unregister(&el3_eisa_driver);
 #endif
-#ifdef CONFIG_MCA
-	if (mca_registered)
-		mca_unregister_driver(&el3_mca_driver);
-#endif
 }
 
 module_init (el3_init_module);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index e04ade4..2e53867 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -60,6 +60,7 @@
 config AX88796
 	tristate "ASIX AX88796 NE2000 clone support"
 	depends on (ARM || MIPS || SUPERH)
+	select CRC32
 	select PHYLIB
 	select MDIO_BITBANG
 	---help---
@@ -181,18 +182,6 @@
 	  To compile this driver as a module, choose M here. The module
 	  will be called ne.
 
-config NE2_MCA
-	tristate "NE/2 (ne2000 MCA version) support"
-	depends on MCA_LEGACY
-	select CRC32
-	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
-
-	  To compile this driver as a module, choose M here. The module
-	  will be called ne2.
-
 config NE2K_PCI
 	tristate "PCI NE2000 and clones support (see help)"
 	depends on PCI
@@ -266,18 +255,6 @@
 
 	  If unsure, say N.
 
-config ULTRAMCA
-	tristate "SMC Ultra MCA support"
-	depends on MCA
-	select CRC32
-	---help---
-	  If you have a network (Ethernet) card of this type and are running
-	  an MCA based system (PS/2), say Y and read the Ethernet-HOWTO,
-	  available from <http://www.tldp.org/docs.html#howto>.
-
-	  To compile this driver as a module, choose M here. The module
-	  will be called smc-mca.
-
 config ULTRA
 	tristate "SMC Ultra support"
 	depends on ISA
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index 3337d7f..d13790b 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -24,6 +24,5 @@
 obj-$(CONFIG_STNIC) += stnic.o 8390.o
 obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
 obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
-obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
 obj-$(CONFIG_WD80x3) += wd.o 8390.o
 obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 11476ca..203ff9d 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -501,6 +501,7 @@
 	.get_settings		= ax_get_settings,
 	.set_settings		= ax_set_settings,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 #ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index dbefd56..8322c54 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -635,6 +635,7 @@
 	.get_settings	= etherh_get_settings,
 	.set_settings	= etherh_set_settings,
 	.get_drvinfo	= etherh_get_drvinfo,
+	.get_ts_info	= ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops etherh_netdev_ops = {
diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c
deleted file mode 100644
index ef85839..0000000
--- a/drivers/net/ethernet/8390/ne2.c
+++ /dev/null
@@ -1,798 +0,0 @@
-/* ne2.c: A NE/2 Ethernet Driver for Linux. */
-/*
-   Based on the NE2000 driver written by Donald Becker (1992-94).
-   modified by Wim Dumon (Apr 1996)
-
-   This software may be used and distributed according to the terms
-   of the GNU General Public License, incorporated herein by reference.
-
-   The author may be reached as wimpie@linux.cc.kuleuven.ac.be
-
-   Currently supported: NE/2
-   This patch was never tested on other MCA-ethernet adapters, but it
-   might work. Just give it a try and let me know if you have problems.
-   Also mail me if it really works, please!
-
-   Changelog:
-   Mon Feb  3 16:26:02 MET 1997
-   - adapted the driver to work with the 2.1.25 kernel
-   - multiple ne2 support (untested)
-   - module support (untested)
-
-   Fri Aug 28 00:18:36 CET 1998 (David Weinehall)
-   - fixed a few minor typos
-   - made the MODULE_PARM conditional (it only works with the v2.1.x kernels)
-   - fixed the module support (Now it's working...)
-
-   Mon Sep  7 19:01:44 CET 1998 (David Weinehall)
-   - added support for Arco Electronics AE/2-card (experimental)
-
-   Mon Sep 14 09:53:42 CET 1998 (David Weinehall)
-   - added support for Compex ENET-16MC/P (experimental)
-
-   Tue Sep 15 16:21:12 CET 1998 (David Weinehall, Magnus Jonsson, Tomas Ogren)
-   - Miscellaneous bugfixes
-
-   Tue Sep 19 16:21:12 CET 1998 (Magnus Jonsson)
-   - Cleanup
-
-   Wed Sep 23 14:33:34 CET 1998 (David Weinehall)
-   - Restructuring and rewriting for v2.1.x compliance
-
-   Wed Oct 14 17:19:21 CET 1998 (David Weinehall)
-   - Added code that unregisters irq and proc-info
-   - Version# bump
-
-   Mon Nov 16 15:28:23 CET 1998 (Wim Dumon)
-   - pass 'dev' as last parameter of request_irq in stead of 'NULL'
-
-   Wed Feb  7 21:24:00 CET 2001 (Alfred Arnold)
-   - added support for the D-Link DE-320CT
-
-   *    WARNING
-	-------
-	This is alpha-test software.  It is not guaranteed to work. As a
-	matter of fact, I'm quite sure there are *LOTS* of bugs in here. I
-	would like to hear from you if you use this driver, even if it works.
-	If it doesn't work, be sure to send me a mail with the problems !
-*/
-
-static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.org>\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/mca-legacy.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "8390.h"
-
-#define DRV_NAME "ne2"
-
-/* Some defines that people can play with if so inclined. */
-
-/* Do we perform extra sanity checks on stuff ? */
-/* #define NE_SANITY_CHECK */
-
-/* Do we implement the read before write bugfix ? */
-/* #define NE_RW_BUGFIX */
-
-/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
-/* #define PACKETBUF_MEMSIZE	0x40 */
-
-
-/* ---- No user-serviceable parts below ---- */
-
-#define NE_BASE	 (dev->base_addr)
-#define NE_CMD	 	0x00
-#define NE_DATAPORT	0x10	/* NatSemi-defined port window offset. */
-#define NE_RESET	0x20	/* Issue a read to reset, a write to clear. */
-#define NE_IO_EXTENT	0x30
-
-#define NE1SM_START_PG	0x20	/* First page of TX buffer */
-#define NE1SM_STOP_PG 	0x40	/* Last page +1 of RX ring */
-#define NESM_START_PG	0x40	/* First page of TX buffer */
-#define NESM_STOP_PG	0x80	/* Last page +1 of RX ring */
-
-/* From the .ADF file: */
-static unsigned int addresses[7] __initdata =
-		{0x1000, 0x2020, 0x8020, 0xa0a0, 0xb0b0, 0xc0c0, 0xc3d0};
-static int irqs[4] __initdata = {3, 4, 5, 9};
-
-/* From the D-Link ADF file: */
-static unsigned int dlink_addresses[4] __initdata =
-                {0x300, 0x320, 0x340, 0x360};
-static int dlink_irqs[8] __initdata = {3, 4, 5, 9, 10, 11, 14, 15};
-
-struct ne2_adapters_t {
-	unsigned int	id;
-	char		*name;
-};
-
-static struct ne2_adapters_t ne2_adapters[] __initdata = {
-	{ 0x6354, "Arco Ethernet Adapter AE/2" },
-	{ 0x70DE, "Compex ENET-16 MC/P" },
-	{ 0x7154, "Novell Ethernet Adapter NE/2" },
-        { 0x56ea, "D-Link DE-320CT" },
-	{ 0x0000, NULL }
-};
-
-extern int netcard_probe(struct net_device *dev);
-
-static int ne2_probe1(struct net_device *dev, int slot);
-
-static void ne_reset_8390(struct net_device *dev);
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
-		int ring_page);
-static void ne_block_input(struct net_device *dev, int count,
-		struct sk_buff *skb, int ring_offset);
-static void ne_block_output(struct net_device *dev, const int count,
-		const unsigned char *buf, const int start_page);
-
-
-/*
- * special code to read the DE-320's MAC address EEPROM.  In contrast to a
- * standard NE design, this is a serial EEPROM (93C46) that has to be read
- * bit by bit.  The EEPROM cotrol port at base + 0x1e has the following
- * layout:
- *
- * Bit 0 = Data out (read from EEPROM)
- * Bit 1 = Data in  (write to EEPROM)
- * Bit 2 = Clock
- * Bit 3 = Chip Select
- * Bit 7 = ~50 kHz clock for defined delays
- *
- */
-
-static void __init dlink_put_eeprom(unsigned char value, unsigned int addr)
-{
-	int z;
-	unsigned char v1, v2;
-
-	/* write the value to the NIC EEPROM register */
-
-	outb(value, addr + 0x1e);
-
-	/* now wait the clock line to toggle twice.  Effectively, we are
-	   waiting (at least) for one clock cycle */
-
-	for (z = 0; z < 2; z++) {
-		do {
-			v1 = inb(addr + 0x1e);
-			v2 = inb(addr + 0x1e);
-		}
-		while (!((v1 ^ v2) & 0x80));
-	}
-}
-
-static void __init dlink_send_eeprom_bit(unsigned int bit, unsigned int addr)
-{
-	/* shift data bit into correct position */
-
-	bit = bit << 1;
-
-	/* write value, keep clock line high for two cycles */
-
-	dlink_put_eeprom(0x09 | bit, addr);
-	dlink_put_eeprom(0x0d | bit, addr);
-	dlink_put_eeprom(0x0d | bit, addr);
-	dlink_put_eeprom(0x09 | bit, addr);
-}
-
-static void __init dlink_send_eeprom_word(unsigned int value, unsigned int len, unsigned int addr)
-{
-	int z;
-
-	/* adjust bits so that they are left-aligned in a 16-bit-word */
-
-	value = value << (16 - len);
-
-	/* shift bits out to the EEPROM */
-
-	for (z = 0; z < len; z++) {
-		dlink_send_eeprom_bit((value & 0x8000) >> 15, addr);
-		value = value << 1;
-	}
-}
-
-static unsigned int __init dlink_get_eeprom(unsigned int eeaddr, unsigned int addr)
-{
-	int z;
-	unsigned int value = 0;
-
-	/* pull the CS line low for a moment.  This resets the EEPROM-
-	   internal logic, and makes it ready for a new command. */
-
-	dlink_put_eeprom(0x01, addr);
-	dlink_put_eeprom(0x09, addr);
-
-	/* send one start bit, read command (1 - 0), plus the address to
-           the EEPROM */
-
-	dlink_send_eeprom_word(0x0180 | (eeaddr & 0x3f), 9, addr);
-
-	/* get the data word.  We clock by sending 0s to the EEPROM, which
-	   get ignored during the read process */
-
-	for (z = 0; z < 16; z++) {
-		dlink_send_eeprom_bit(0, addr);
-		value = (value << 1) | (inb(addr + 0x1e) & 0x01);
-	}
-
-	return value;
-}
-
-/*
- * Note that at boot, this probe only picks up one card at a time.
- */
-
-static int __init do_ne2_probe(struct net_device *dev)
-{
-	static int current_mca_slot = -1;
-	int i;
-	int adapter_found = 0;
-
-	/* Do not check any supplied i/o locations.
-	   POS registers usually don't fail :) */
-
-	/* MCA cards have POS registers.
-	   Autodetecting MCA cards is extremely simple.
-	   Just search for the card. */
-
-	for(i = 0; (ne2_adapters[i].name != NULL) && !adapter_found; i++) {
-		current_mca_slot =
-			mca_find_unused_adapter(ne2_adapters[i].id, 0);
-
-		if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) {
-			int res;
-			mca_set_adapter_name(current_mca_slot,
-					ne2_adapters[i].name);
-			mca_mark_as_used(current_mca_slot);
-
-			res = ne2_probe1(dev, current_mca_slot);
-			if (res)
-				mca_mark_as_unused(current_mca_slot);
-			return res;
-		}
-	}
-	return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init ne2_probe(int unit)
-{
-	struct net_device *dev = alloc_eip_netdev();
-	int err;
-
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	sprintf(dev->name, "eth%d", unit);
-	netdev_boot_setup_check(dev);
-
-	err = do_ne2_probe(dev);
-	if (err)
-		goto out;
-	return dev;
-out:
-	free_netdev(dev);
-	return ERR_PTR(err);
-}
-#endif
-
-static int ne2_procinfo(char *buf, int slot, struct net_device *dev)
-{
-	int len=0;
-
-	len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" );
-	len += sprintf(buf+len, "Driver written by Wim Dumon ");
-	len += sprintf(buf+len, "<wimpie@kotnet.org>\n");
-	len += sprintf(buf+len, "Modified by ");
-	len += sprintf(buf+len, "David Weinehall <tao@acc.umu.se>\n");
-	len += sprintf(buf+len, "and by Magnus Jonsson <bigfoot@acc.umu.se>\n");
-	len += sprintf(buf+len, "Based on the original NE2000 drivers\n" );
-	len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr);
-	len += sprintf(buf+len, "IRQ    : %d\n", dev->irq);
-	len += sprintf(buf+len, "HW addr : %pM\n", dev->dev_addr);
-
-	return len;
-}
-
-static int __init ne2_probe1(struct net_device *dev, int slot)
-{
-	int i, base_addr, irq, retval;
-	unsigned char POS;
-	unsigned char SA_prom[32];
-	const char *name = "NE/2";
-	int start_page, stop_page;
-	static unsigned version_printed;
-
-	if (ei_debug && version_printed++ == 0)
-		printk(version);
-
-	printk("NE/2 ethercard found in slot %d:", slot);
-
-	/* Read base IO and IRQ from the POS-registers */
-	POS = mca_read_stored_pos(slot, 2);
-	if(!(POS % 2)) {
-		printk(" disabled.\n");
-		return -ENODEV;
-	}
-
-	/* handle different POS register structure for D-Link card */
-
-	if (mca_read_stored_pos(slot, 0) == 0xea) {
-		base_addr = dlink_addresses[(POS >> 5) & 0x03];
-		irq = dlink_irqs[(POS >> 2) & 0x07];
-	}
-        else {
-		i = (POS & 0xE)>>1;
-		/* printk("Halleluja sdog, als er na de pijl een 1 staat is 1 - 1 == 0"
-	   	" en zou het moeten werken -> %d\n", i);
-	   	The above line was for remote testing, thanx to sdog ... */
-		base_addr = addresses[i - 1];
-		irq = irqs[(POS & 0x60)>>5];
-	}
-
-	if (!request_region(base_addr, NE_IO_EXTENT, DRV_NAME))
-		return -EBUSY;
-
-#ifdef DEBUG
-	printk("POS info : pos 2 = %#x ; base = %#x ; irq = %ld\n", POS,
-			base_addr, irq);
-#endif
-
-#ifndef CRYNWR_WAY
-	/* Reset the card the way they do it in the Crynwr packet driver */
-	for (i=0; i<8; i++)
-		outb(0x0, base_addr + NE_RESET);
-	inb(base_addr + NE_RESET);
-	outb(0x21, base_addr + NE_CMD);
-	if (inb(base_addr + NE_CMD) != 0x21) {
-		printk("NE/2 adapter not responding\n");
-		retval = -ENODEV;
-		goto out;
-	}
-
-	/* In the crynwr sources they do a RAM-test here. I skip it. I suppose
-	   my RAM is okay.  Suppose your memory is broken.  Then this test
-	   should fail and you won't be able to use your card.  But if I do not
-	   test, you won't be able to use your card, neither.  So this test
-	   won't help you. */
-
-#else  /* _I_ never tested it this way .. Go ahead and try ...*/
-	/* Reset card. Who knows what dain-bramaged state it was left in. */
-	{
-		unsigned long reset_start_time = jiffies;
-
-		/* DON'T change these to inb_p/outb_p or reset will fail on
-		   clones.. */
-		outb(inb(base_addr + NE_RESET), base_addr + NE_RESET);
-
-		while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0)
-			if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
-				printk(" not found (no reset ack).\n");
-				retval = -ENODEV;
-				goto out;
-			}
-
-		outb_p(0xff, base_addr + EN0_ISR);         /* Ack all intr. */
-	}
-#endif
-
-
-	/* Read the 16 bytes of station address PROM.
-	   We must first initialize registers, similar to
-	   NS8390p_init(eifdev, 0).
-	   We can't reliably read the SAPROM address without this.
-	   (I learned the hard way!). */
-	{
-		struct {
-			unsigned char value, offset;
-		} program_seq[] = {
-						/* Select page 0 */
-			{E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD},
-			{0x49,	EN0_DCFG},  /* Set WORD-wide (0x49) access. */
-			{0x00,	EN0_RCNTLO},  /* Clear the count regs. */
-			{0x00,	EN0_RCNTHI},
-			{0x00,	EN0_IMR},  /* Mask completion irq. */
-			{0xFF,	EN0_ISR},
-			{E8390_RXOFF, EN0_RXCR},  /* 0x20  Set to monitor */
-			{E8390_TXOFF, EN0_TXCR},  /* 0x02  and loopback mode. */
-			{32,	EN0_RCNTLO},
-			{0x00,	EN0_RCNTHI},
-			{0x00,	EN0_RSARLO},  /* DMA starting at 0x0000. */
-			{0x00,	EN0_RSARHI},
-			{E8390_RREAD+E8390_START, E8390_CMD},
-		};
-
-		for (i = 0; i < ARRAY_SIZE(program_seq); i++)
-			outb_p(program_seq[i].value, base_addr +
-				program_seq[i].offset);
-
-	}
-	for(i = 0; i < 6 /*sizeof(SA_prom)*/; i+=1) {
-		SA_prom[i] = inb(base_addr + NE_DATAPORT);
-	}
-
-	/* I don't know whether the previous sequence includes the general
-           board reset procedure, so better don't omit it and just overwrite
-           the garbage read from a DE-320 with correct stuff. */
-
-	if (mca_read_stored_pos(slot, 0) == 0xea) {
-		unsigned int v;
-
-		for (i = 0; i < 3; i++) {
- 			v = dlink_get_eeprom(i, base_addr);
-			SA_prom[(i << 1)    ] = v & 0xff;
-			SA_prom[(i << 1) + 1] = (v >> 8) & 0xff;
-		}
-	}
-
-	start_page = NESM_START_PG;
-	stop_page = NESM_STOP_PG;
-
-	dev->irq=irq;
-
-	/* Snarf the interrupt now.  There's no point in waiting since we cannot
-	   share and the board will usually be enabled. */
-	retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev);
-	if (retval) {
-		printk (" unable to get IRQ %d (irqval=%d).\n",
-				dev->irq, retval);
-		goto out;
-	}
-
-	dev->base_addr = base_addr;
-
-	for (i = 0; i < ETH_ALEN; i++)
-		dev->dev_addr[i] = SA_prom[i];
-
-	printk(" %pM\n", dev->dev_addr);
-
-	printk("%s: %s found at %#x, using IRQ %d.\n",
-			dev->name, name, base_addr, dev->irq);
-
-	mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev);
-
-	ei_status.name = name;
-	ei_status.tx_start_page = start_page;
-	ei_status.stop_page = stop_page;
-	ei_status.word16 = (2 == 2);
-
-	ei_status.rx_start_page = start_page + TX_PAGES;
-#ifdef PACKETBUF_MEMSIZE
-	/* Allow the packet buffer size to be overridden by know-it-alls. */
-	ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
-#endif
-
-	ei_status.reset_8390 = &ne_reset_8390;
-	ei_status.block_input = &ne_block_input;
-	ei_status.block_output = &ne_block_output;
-	ei_status.get_8390_hdr = &ne_get_8390_hdr;
-
-	ei_status.priv = slot;
-
-	dev->netdev_ops = &eip_netdev_ops;
-	NS8390p_init(dev, 0);
-
-	retval = register_netdev(dev);
-	if (retval)
-		goto out1;
-	return 0;
-out1:
-	mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
-	free_irq(dev->irq, dev);
-out:
-	release_region(base_addr, NE_IO_EXTENT);
-	return retval;
-}
-
-/* Hard reset the card.  This used to pause for the same period that a
-   8390 reset command required, but that shouldn't be necessary. */
-static void ne_reset_8390(struct net_device *dev)
-{
-	unsigned long reset_start_time = jiffies;
-
-	if (ei_debug > 1)
-		printk("resetting the 8390 t=%ld...", jiffies);
-
-	/* DON'T change these to inb_p/outb_p or reset will fail on clones. */
-	outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
-
-	ei_status.txing = 0;
-	ei_status.dmaing = 0;
-
-	/* This check _should_not_ be necessary, omit eventually. */
-	while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
-		if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
-			printk("%s: ne_reset_8390() did not complete.\n",
-					dev->name);
-			break;
-		}
-	outb_p(ENISR_RESET, NE_BASE + EN0_ISR);	/* Ack intr. */
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
-   we don't need to be concerned with ring wrap as the header will be at
-   the start of a page, so we optimize accordingly. */
-
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
-		int ring_page)
-{
-
-	int nic_base = dev->base_addr;
-
-	/* This *shouldn't* happen.
-	   If it does, it's the last thing you'll see */
-	if (ei_status.dmaing) {
-		printk("%s: DMAing conflict in ne_get_8390_hdr "
-				"[DMAstat:%d][irqlock:%d].\n",
-				dev->name, ei_status.dmaing, ei_status.irqlock);
-		return;
-	}
-
-	ei_status.dmaing |= 0x01;
-	outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
-	outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
-	outb_p(0, nic_base + EN0_RCNTHI);
-	outb_p(0, nic_base + EN0_RSARLO);		/* On page boundary */
-	outb_p(ring_page, nic_base + EN0_RSARHI);
-	outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
-
-	if (ei_status.word16)
-		insw(NE_BASE + NE_DATAPORT, hdr,
-				sizeof(struct e8390_pkt_hdr)>>1);
-	else
-		insb(NE_BASE + NE_DATAPORT, hdr,
-				sizeof(struct e8390_pkt_hdr));
-
-	outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
-	ei_status.dmaing &= ~0x01;
-}
-
-/* Block input and output, similar to the Crynwr packet driver.  If you
-   are porting to a new ethercard, look at the packet driver source for
-   hints. The NEx000 doesn't share the on-board packet memory -- you have
-   to put the packet out through the "remote DMA" dataport using outb. */
-
-static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb,
-		int ring_offset)
-{
-#ifdef NE_SANITY_CHECK
-	int xfer_count = count;
-#endif
-	int nic_base = dev->base_addr;
-	char *buf = skb->data;
-
-	/* This *shouldn't* happen.
-	   If it does, it's the last thing you'll see */
-	if (ei_status.dmaing) {
-		printk("%s: DMAing conflict in ne_block_input "
-				"[DMAstat:%d][irqlock:%d].\n",
-				dev->name, ei_status.dmaing, ei_status.irqlock);
-		return;
-	}
-	ei_status.dmaing |= 0x01;
-	outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
-	outb_p(count & 0xff, nic_base + EN0_RCNTLO);
-	outb_p(count >> 8, nic_base + EN0_RCNTHI);
-	outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
-	outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
-	outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
-	if (ei_status.word16) {
-		insw(NE_BASE + NE_DATAPORT,buf,count>>1);
-		if (count & 0x01) {
-			buf[count-1] = inb(NE_BASE + NE_DATAPORT);
-#ifdef NE_SANITY_CHECK
-			xfer_count++;
-#endif
-		}
-	} else {
-		insb(NE_BASE + NE_DATAPORT, buf, count);
-	}
-
-#ifdef NE_SANITY_CHECK
-	/* This was for the ALPHA version only, but enough people have
-	   been encountering problems so it is still here.  If you see
-	   this message you either 1) have a slightly incompatible clone
-	   or 2) have noise/speed problems with your bus. */
-	if (ei_debug > 1) {	/* DMA termination address check... */
-		int addr, tries = 20;
-		do {
-			/* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
-			   -- it's broken for Rx on some cards! */
-			int high = inb_p(nic_base + EN0_RSARHI);
-			int low = inb_p(nic_base + EN0_RSARLO);
-			addr = (high << 8) + low;
-			if (((ring_offset + xfer_count) & 0xff) == low)
-				break;
-		} while (--tries > 0);
-		if (tries <= 0)
-			printk("%s: RX transfer address mismatch,"
-				"%#4.4x (expected) vs. %#4.4x (actual).\n",
-				dev->name, ring_offset + xfer_count, addr);
-	}
-#endif
-	outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
-	ei_status.dmaing &= ~0x01;
-}
-
-static void ne_block_output(struct net_device *dev, int count,
-		const unsigned char *buf, const int start_page)
-{
-	int nic_base = NE_BASE;
-	unsigned long dma_start;
-#ifdef NE_SANITY_CHECK
-	int retries = 0;
-#endif
-
-	/* Round the count up for word writes. Do we need to do this?
-	   What effect will an odd byte count have on the 8390?
-	   I should check someday. */
-	if (ei_status.word16 && (count & 0x01))
-		count++;
-
-	/* This *shouldn't* happen.
-	   If it does, it's the last thing you'll see */
-	if (ei_status.dmaing) {
-		printk("%s: DMAing conflict in ne_block_output."
-				"[DMAstat:%d][irqlock:%d]\n",
-				dev->name, ei_status.dmaing, ei_status.irqlock);
-		return;
-	}
-	ei_status.dmaing |= 0x01;
-	/* We should already be in page 0, but to be safe... */
-	outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
-
-#ifdef NE_SANITY_CHECK
-retry:
-#endif
-
-#ifdef NE8390_RW_BUGFIX
-	/* Handle the read-before-write bug the same way as the
-	   Crynwr packet driver -- the NatSemi method doesn't work.
-	   Actually this doesn't always work either, but if you have
-	   problems with your NEx000 this is better than nothing! */
-	outb_p(0x42, nic_base + EN0_RCNTLO);
-	outb_p(0x00, nic_base + EN0_RCNTHI);
-	outb_p(0x42, nic_base + EN0_RSARLO);
-	outb_p(0x00, nic_base + EN0_RSARHI);
-	outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
-	/* Make certain that the dummy read has occurred. */
-	SLOW_DOWN_IO;
-	SLOW_DOWN_IO;
-	SLOW_DOWN_IO;
-#endif
-
-	outb_p(ENISR_RDC, nic_base + EN0_ISR);
-
-	/* Now the normal output. */
-	outb_p(count & 0xff, nic_base + EN0_RCNTLO);
-	outb_p(count >> 8,   nic_base + EN0_RCNTHI);
-	outb_p(0x00, nic_base + EN0_RSARLO);
-	outb_p(start_page, nic_base + EN0_RSARHI);
-
-	outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
-	if (ei_status.word16) {
-		outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
-	} else {
-		outsb(NE_BASE + NE_DATAPORT, buf, count);
-	}
-
-	dma_start = jiffies;
-
-#ifdef NE_SANITY_CHECK
-	/* This was for the ALPHA version only, but enough people have
-	   been encountering problems so it is still here. */
-
-	if (ei_debug > 1) {		/* DMA termination address check... */
-		int addr, tries = 20;
-		do {
-			int high = inb_p(nic_base + EN0_RSARHI);
-			int low = inb_p(nic_base + EN0_RSARLO);
-			addr = (high << 8) + low;
-			if ((start_page << 8) + count == addr)
-				break;
-		} while (--tries > 0);
-		if (tries <= 0) {
-			printk("%s: Tx packet transfer address mismatch,"
-					"%#4.4x (expected) vs. %#4.4x (actual).\n",
-					dev->name, (start_page << 8) + count, addr);
-			if (retries++ == 0)
-				goto retry;
-		}
-	}
-#endif
-
-	while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
-		if (time_after(jiffies, dma_start + 2*HZ/100)) {		/* 20ms */
-			printk("%s: timeout waiting for Tx RDC.\n", dev->name);
-			ne_reset_8390(dev);
-			NS8390p_init(dev, 1);
-			break;
-		}
-
-	outb_p(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
-	ei_status.dmaing &= ~0x01;
-}
-
-
-#ifdef MODULE
-#define MAX_NE_CARDS	4	/* Max number of NE cards per module */
-static struct net_device *dev_ne[MAX_NE_CARDS];
-static int io[MAX_NE_CARDS];
-static int irq[MAX_NE_CARDS];
-static int bad[MAX_NE_CARDS];	/* 0xbad = bad sig or no reset ack */
-MODULE_LICENSE("GPL");
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(bad, int, NULL, 0);
-MODULE_PARM_DESC(io, "(ignored)");
-MODULE_PARM_DESC(irq, "(ignored)");
-MODULE_PARM_DESC(bad, "(ignored)");
-
-/* Module code fixed by David Weinehall */
-
-int __init init_module(void)
-{
-	struct net_device *dev;
-	int this_dev, found = 0;
-
-	for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-		dev = alloc_eip_netdev();
-		if (!dev)
-			break;
-		dev->irq = irq[this_dev];
-		dev->mem_end = bad[this_dev];
-		dev->base_addr = io[this_dev];
-		if (do_ne2_probe(dev) == 0) {
-			dev_ne[found++] = dev;
-			continue;
-		}
-		free_netdev(dev);
-		break;
-	}
-	if (found)
-		return 0;
-	printk(KERN_WARNING "ne2.c: No NE/2 card found\n");
-	return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
-	mca_mark_as_unused(ei_status.priv);
-	mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
-	free_irq(dev->irq, dev);
-	release_region(dev->base_addr, NE_IO_EXTENT);
-}
-
-void __exit cleanup_module(void)
-{
-	int this_dev;
-
-	for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-		struct net_device *dev = dev_ne[this_dev];
-		if (dev) {
-			unregister_netdev(dev);
-			cleanup_card(dev);
-			free_netdev(dev);
-		}
-	}
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/smc-mca.c b/drivers/net/ethernet/8390/smc-mca.c
deleted file mode 100644
index 7a68590..0000000
--- a/drivers/net/ethernet/8390/smc-mca.c
+++ /dev/null
@@ -1,575 +0,0 @@
-/* smc-mca.c: A SMC Ultra ethernet driver for linux. */
-/*
-    Most of this driver, except for ultramca_probe is nearly
-    verbatim from smc-ultra.c by Donald Becker. The rest is
-    written and copyright 1996 by David Weis, weisd3458@uni.edu
-
-    This is a driver for the SMC Ultra and SMC EtherEZ ethercards.
-
-    This driver uses the cards in the 8390-compatible, shared memory mode.
-    Most of the run-time complexity is handled by the generic code in
-    8390.c.
-
-    This driver enables the shared memory only when doing the actual data
-    transfers to avoid a bug in early version of the card that corrupted
-    data transferred by a AHA1542.
-
-    This driver does not support the programmed-I/O data transfer mode of
-    the EtherEZ.  That support (if available) is smc-ez.c.  Nor does it
-    use the non-8390-compatible "Altego" mode. (No support currently planned.)
-
-    Changelog:
-
-    Paul Gortmaker	 : multiple card support for module users.
-    David Weis		 : Micro Channel-ized it.
-    Tom Sightler	 : Added support for IBM PS/2 Ethernet Adapter/A
-    Christopher Turcksin : Changed MCA-probe so that multiple adapters are
-			   found correctly (Jul 16, 1997)
-    Chris Beauregard	 : Tried to merge the two changes above (Dec 15, 1997)
-    Tom Sightler	 : Fixed minor detection bug caused by above merge
-    Tom Sightler	 : Added support for three more Western Digital
-			   MCA-adapters
-    Tom Sightler	 : Added support for 2.2.x mca_find_unused_adapter
-    Hartmut Schmidt	 : - Modified parameter detection to handle each
-			     card differently depending on a switch-list
-			   - 'card_ver' removed from the adapter list
-			   - Some minor bug fixes
-*/
-
-#include <linux/mca.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "smc-mca"
-
-static int ultramca_open(struct net_device *dev);
-static void ultramca_reset_8390(struct net_device *dev);
-static void ultramca_get_8390_hdr(struct net_device *dev,
-                                  struct e8390_pkt_hdr *hdr,
-                                  int ring_page);
-static void ultramca_block_input(struct net_device *dev, int count,
-                                 struct sk_buff *skb,
-                                 int ring_offset);
-static void ultramca_block_output(struct net_device *dev, int count,
-                                  const unsigned char *buf,
-                                  const int start_page);
-static int ultramca_close_card(struct net_device *dev);
-
-#define START_PG        0x00    /* First page of TX buffer */
-
-#define ULTRA_CMDREG 0      /* Offset to ASIC command register. */
-#define ULTRA_RESET  0x80   /* Board reset, in ULTRA_CMDREG. */
-#define ULTRA_MEMENB 0x40   /* Enable the shared memory. */
-#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
-#define ULTRA_IO_EXTENT 32
-#define EN0_ERWCNT      0x08  /* Early receive warning count. */
-
-#define _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A            0
-#define _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A            1
-#define _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A              2
-#define _6fc1_WD_Starcard_PLUS_A_WD8003ST_A                            3
-#define _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A                        4
-#define _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A        5
-#define _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A        6
-#define _efe5_IBM_PS2_Adapter_A_for_Ethernet                           7
-
-struct smc_mca_adapters_t {
-	unsigned int id;
-	char *name;
-};
-
-#define MAX_ULTRAMCA_CARDS 4	/* Max number of Ultra cards per module */
-
-static int ultra_io[MAX_ULTRAMCA_CARDS];
-static int ultra_irq[MAX_ULTRAMCA_CARDS];
-MODULE_LICENSE("GPL");
-
-module_param_array(ultra_io, int, NULL, 0);
-module_param_array(ultra_irq, int, NULL, 0);
-MODULE_PARM_DESC(ultra_io, "SMC Ultra/EtherEZ MCA I/O base address(es)");
-MODULE_PARM_DESC(ultra_irq, "SMC Ultra/EtherEZ MCA IRQ number(s)");
-
-static const struct {
-  unsigned int base_addr;
-} addr_table[] = {
-    { 0x0800 },
-    { 0x1800 },
-    { 0x2800 },
-    { 0x3800 },
-    { 0x4800 },
-    { 0x5800 },
-    { 0x6800 },
-    { 0x7800 },
-    { 0x8800 },
-    { 0x9800 },
-    { 0xa800 },
-    { 0xb800 },
-    { 0xc800 },
-    { 0xd800 },
-    { 0xe800 },
-    { 0xf800 }
-};
-
-#define MEM_MASK 64
-
-static const struct {
-  unsigned char mem_index;
-  unsigned long mem_start;
-  unsigned char num_pages;
-} mem_table[] = {
-    { 16, 0x0c0000, 40 },
-    { 18, 0x0c4000, 40 },
-    { 20, 0x0c8000, 40 },
-    { 22, 0x0cc000, 40 },
-    { 24, 0x0d0000, 40 },
-    { 26, 0x0d4000, 40 },
-    { 28, 0x0d8000, 40 },
-    { 30, 0x0dc000, 40 },
-    {144, 0xfc0000, 40 },
-    {148, 0xfc8000, 40 },
-    {154, 0xfd0000, 40 },
-    {156, 0xfd8000, 40 },
-    {  0, 0x0c0000, 20 },
-    {  1, 0x0c2000, 20 },
-    {  2, 0x0c4000, 20 },
-    {  3, 0x0c6000, 20 }
-};
-
-#define IRQ_MASK 243
-static const struct {
-   unsigned char new_irq;
-   unsigned char old_irq;
-} irq_table[] = {
-   {  3,  3 },
-   {  4,  4 },
-   { 10, 10 },
-   { 14, 15 }
-};
-
-static short smc_mca_adapter_ids[] __initdata = {
-	0x61c8,
-	0x61c9,
-	0x6fc0,
-	0x6fc1,
-	0x6fc2,
-	0xefd4,
-	0xefd5,
-	0xefe5,
-	0x0000
-};
-
-static char *smc_mca_adapter_names[] __initdata = {
-	"SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
-	"SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
-	"WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
-	"WD Starcard PLUS/A (WD8003ST/A)",
-	"WD Ethercard PLUS 10T/A (WD8003W/A)",
-	"IBM PS/2 Adapter/A for Ethernet UTP/AUI (WD8013WP/A)",
-	"IBM PS/2 Adapter/A for Ethernet BNC/AUI (WD8013EP/A)",
-	"IBM PS/2 Adapter/A for Ethernet",
-	NULL
-};
-
-static int ultra_found = 0;
-
-
-static const struct net_device_ops ultramca_netdev_ops = {
-	.ndo_open		= ultramca_open,
-	.ndo_stop		= ultramca_close_card,
-
-	.ndo_start_xmit		= ei_start_xmit,
-	.ndo_tx_timeout		= ei_tx_timeout,
-	.ndo_get_stats		= ei_get_stats,
-	.ndo_set_rx_mode	= ei_set_multicast_list,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address 	= eth_mac_addr,
-	.ndo_change_mtu		= eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller 	= ei_poll,
-#endif
-};
-
-static int __init ultramca_probe(struct device *gen_dev)
-{
-	unsigned short ioaddr;
-	struct net_device *dev;
-	unsigned char reg4, num_pages;
-	struct mca_device *mca_dev = to_mca_device(gen_dev);
-	char slot = mca_dev->slot;
-	unsigned char pos2 = 0xff, pos3 = 0xff, pos4 = 0xff, pos5 = 0xff;
-	int i, rc;
-	int adapter = mca_dev->index;
-	int tbase = 0;
-	int tirq = 0;
-	int base_addr = ultra_io[ultra_found];
-	int irq = ultra_irq[ultra_found];
-
-	if (base_addr || irq) {
-		printk(KERN_INFO "Probing for SMC MCA adapter");
-		if (base_addr) {
-			printk(KERN_INFO " at I/O address 0x%04x%c",
-			       base_addr, irq ? ' ' : '\n');
-		}
-		if (irq) {
-			printk(KERN_INFO "using irq %d\n", irq);
-		}
-	}
-
-	tirq = 0;
-	tbase = 0;
-
-	/* If we're trying to match a specificied irq or io address,
-	 * we'll reject the adapter found unless it's the one we're
-	 * looking for */
-
-	pos2 = mca_device_read_stored_pos(mca_dev, 2); /* io_addr */
-	pos3 = mca_device_read_stored_pos(mca_dev, 3); /* shared mem */
-	pos4 = mca_device_read_stored_pos(mca_dev, 4); /* ROM bios addr range */
-	pos5 = mca_device_read_stored_pos(mca_dev, 5); /* irq, media and RIPL */
-
-	/* Test the following conditions:
-	 * - If an irq parameter is supplied, compare it
-	 *   with the irq of the adapter we found
-	 * - If a base_addr paramater is given, compare it
-	 *   with the base_addr of the adapter we found
-	 * - Check that the irq and the base_addr of the
-	 *   adapter we found is not already in use by
-	 *   this driver
-	 */
-
-	switch (mca_dev->index) {
-	case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
-	case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
-	case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
-	case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
-		{
-			tbase = addr_table[(pos2 & 0xf0) >> 4].base_addr;
-			tirq  = irq_table[(pos5 & 0xc) >> 2].new_irq;
-			break;
-		}
-	case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
-	case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
-	case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
-	case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
-		{
-			tbase = ((pos2 & 0x0fe) * 0x10);
-			tirq  = irq_table[(pos5 & 3)].old_irq;
-			break;
-		}
-	}
-
-	if(!tirq || !tbase ||
-	   (irq && irq != tirq) ||
-	   (base_addr && tbase != base_addr))
-		/* FIXME: we're trying to force the ordering of the
-		 * devices here, there should be a way of getting this
-		 * to happen */
-		return -ENXIO;
-
-        /* Adapter found. */
-	dev  = alloc_ei_netdev();
-	if(!dev)
-		return -ENODEV;
-
-	SET_NETDEV_DEV(dev, gen_dev);
-	mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]);
-	mca_device_set_claim(mca_dev, 1);
-
-	printk(KERN_INFO "smc_mca: %s found in slot %d\n",
-		       smc_mca_adapter_names[adapter], slot + 1);
-
-	ultra_found++;
-
-	dev->base_addr = ioaddr = mca_device_transform_ioport(mca_dev, tbase);
-	dev->irq       = mca_device_transform_irq(mca_dev, tirq);
-	dev->mem_start = 0;
-	num_pages      = 40;
-
-	switch (adapter) {	/* card-# in const array above [hs] */
-		case _61c8_SMC_Ethercard_PLUS_Elite_A_BNC_AUI_WD8013EP_A:
-		case _61c9_SMC_Ethercard_PLUS_Elite_A_UTP_AUI_WD8013EP_A:
-		{
-			for (i = 0; i < 16; i++) { /* taking 16 counts
-						    * up to 15 [hs] */
-				if (mem_table[i].mem_index == (pos3 & ~MEM_MASK)) {
-					dev->mem_start = (unsigned long)
-					  mca_device_transform_memory(mca_dev, (void *)mem_table[i].mem_start);
-					num_pages = mem_table[i].num_pages;
-				}
-			}
-			break;
-		}
-		case _6fc0_WD_Ethercard_PLUS_A_WD8003E_A_OR_WD8003ET_A:
-		case _6fc1_WD_Starcard_PLUS_A_WD8003ST_A:
-		case _6fc2_WD_Ethercard_PLUS_10T_A_WD8003W_A:
-		case _efe5_IBM_PS2_Adapter_A_for_Ethernet:
-		{
-			dev->mem_start = (unsigned long)
-			  mca_device_transform_memory(mca_dev, (void *)((pos3 & 0xfc) * 0x1000));
-			num_pages = 0x40;
-			break;
-		}
-		case _efd4_IBM_PS2_Adapter_A_for_Ethernet_UTP_AUI_WD8013WP_A:
-		case _efd5_IBM_PS2_Adapter_A_for_Ethernet_BNC_AUI_WD8013WP_A:
-		{
-			/* courtesy of gamera@quartz.ocn.ne.jp, pos3 indicates
-			 * the index of the 0x2000 step.
-			 * beware different number of pages [hs]
-			 */
-			dev->mem_start = (unsigned long)
-			  mca_device_transform_memory(mca_dev, (void *)(0xc0000 + (0x2000 * (pos3 & 0xf))));
-			num_pages = 0x20 + (2 * (pos3 & 0x10));
-			break;
-		}
-	}
-
-	/* sanity check, shouldn't happen */
-	if (dev->mem_start == 0) {
-		rc = -ENODEV;
-		goto err_unclaim;
-	}
-
-	if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) {
-		rc = -ENODEV;
-		goto err_unclaim;
-	}
-
-	reg4 = inb(ioaddr + 4) & 0x7f;
-	outb(reg4, ioaddr + 4);
-
-	for (i = 0; i < 6; i++)
-		dev->dev_addr[i] = inb(ioaddr + 8 + i);
-
-	printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM",
-	       slot + 1, ioaddr, dev->dev_addr);
-
-	/* Switch from the station address to the alternate register set
-	 * and read the useful registers there.
-	 */
-
-	outb(0x80 | reg4, ioaddr + 4);
-
-	/* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot.
-	 */
-
-	outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
-
-	/* Switch back to the station address register set so that
-	 * the MS-DOS driver can find the card after a warm boot.
-	 */
-
-	outb(reg4, ioaddr + 4);
-
-	dev_set_drvdata(gen_dev, dev);
-
-	/* The 8390 isn't at the base address, so fake the offset
-	 */
-
-	dev->base_addr = ioaddr + ULTRA_NIC_OFFSET;
-
-	ei_status.name = "SMC Ultra MCA";
-	ei_status.word16 = 1;
-	ei_status.tx_start_page = START_PG;
-	ei_status.rx_start_page = START_PG + TX_PAGES;
-	ei_status.stop_page = num_pages;
-
-	ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG) * 256);
-	if (!ei_status.mem) {
-		rc = -ENOMEM;
-		goto err_release_region;
-	}
-
-	dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG) * 256;
-
-	printk(", IRQ %d memory %#lx-%#lx.\n",
-	dev->irq, dev->mem_start, dev->mem_end - 1);
-
-	ei_status.reset_8390 = &ultramca_reset_8390;
-	ei_status.block_input = &ultramca_block_input;
-	ei_status.block_output = &ultramca_block_output;
-	ei_status.get_8390_hdr = &ultramca_get_8390_hdr;
-
-	ei_status.priv = slot;
-
-	dev->netdev_ops = &ultramca_netdev_ops;
-
-	NS8390_init(dev, 0);
-
-	rc = register_netdev(dev);
-	if (rc)
-		goto err_unmap;
-
-	return 0;
-
-err_unmap:
-	iounmap(ei_status.mem);
-err_release_region:
-	release_region(ioaddr, ULTRA_IO_EXTENT);
-err_unclaim:
-	mca_device_set_claim(mca_dev, 0);
-	free_netdev(dev);
-	return rc;
-}
-
-static int ultramca_open(struct net_device *dev)
-{
-	int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
-	int retval;
-
-	if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
-		return retval;
-
-	outb(ULTRA_MEMENB, ioaddr); /* Enable memory */
-	outb(0x80, ioaddr + 5);     /* ??? */
-	outb(0x01, ioaddr + 6);     /* Enable interrupts and memory. */
-	outb(0x04, ioaddr + 5);     /* ??? */
-
-	/* Set the early receive warning level in window 0 high enough not
-	 * to receive ERW interrupts.
-	 */
-
-	/* outb_p(E8390_NODMA + E8390_PAGE0, dev->base_addr);
-	 * outb(0xff, dev->base_addr + EN0_ERWCNT);
-	 */
-
-	ei_open(dev);
-	return 0;
-}
-
-static void ultramca_reset_8390(struct net_device *dev)
-{
-	int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
-
-	outb(ULTRA_RESET, ioaddr);
-	if (ei_debug > 1)
-		printk("resetting Ultra, t=%ld...", jiffies);
-	ei_status.txing = 0;
-
-	outb(0x80, ioaddr + 5);     /* ??? */
-	outb(0x01, ioaddr + 6);     /* Enable interrupts and memory. */
-
-	if (ei_debug > 1)
-		printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly.
- */
-
-static void ultramca_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
-	void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG) << 8);
-
-#ifdef notdef
-	/* Officially this is what we are doing, but the readl() is faster */
-	memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
-#else
-	((unsigned int*)hdr)[0] = readl(hdr_start);
-#endif
-}
-
-/* Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps.
- */
-
-static void ultramca_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
-	void __iomem *xfer_start = ei_status.mem + ring_offset - START_PG * 256;
-
-	if (ring_offset + count > ei_status.stop_page * 256) {
-		/* We must wrap the input move. */
-		int semi_count = ei_status.stop_page * 256 - ring_offset;
-		memcpy_fromio(skb->data, xfer_start, semi_count);
-		count -= semi_count;
-		memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
-	} else {
-		memcpy_fromio(skb->data, xfer_start, count);
-	}
-
-}
-
-static void ultramca_block_output(struct net_device *dev, int count, const unsigned char *buf,
-                int start_page)
-{
-	void __iomem *shmem = ei_status.mem + ((start_page - START_PG) << 8);
-
-	memcpy_toio(shmem, buf, count);
-}
-
-static int ultramca_close_card(struct net_device *dev)
-{
-	int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
-
-	netif_stop_queue(dev);
-
-	if (ei_debug > 1)
-		printk("%s: Shutting down ethercard.\n", dev->name);
-
-	outb(0x00, ioaddr + 6);     /* Disable interrupts. */
-	free_irq(dev->irq, dev);
-
-	NS8390_init(dev, 0);
-	/* We should someday disable shared memory and change to 8-bit mode
-         * "just in case"...
-	 */
-
-	return 0;
-}
-
-static int ultramca_remove(struct device *gen_dev)
-{
-	struct mca_device *mca_dev = to_mca_device(gen_dev);
-	struct net_device *dev = dev_get_drvdata(gen_dev);
-
-	if (dev) {
-		/* NB: ultra_close_card() does free_irq */
-		int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET;
-
-		unregister_netdev(dev);
-		mca_device_set_claim(mca_dev, 0);
-		release_region(ioaddr, ULTRA_IO_EXTENT);
-		iounmap(ei_status.mem);
-		free_netdev(dev);
-	}
-	return 0;
-}
-
-
-static struct mca_driver ultra_driver = {
-	.id_table = smc_mca_adapter_ids,
-	.driver = {
-		.name = "smc-mca",
-		.bus = &mca_bus_type,
-		.probe = ultramca_probe,
-		.remove = ultramca_remove,
-	}
-};
-
-static int __init ultramca_init_module(void)
-{
-	if(!MCA_bus)
-		return -ENXIO;
-
-	mca_register_driver(&ultra_driver);
-
-	return ultra_found ? 0 : -ENXIO;
-}
-
-static void __exit ultramca_cleanup_module(void)
-{
-	mca_unregister_driver(&ultra_driver);
-}
-module_init(ultramca_init_module);
-module_exit(ultramca_cleanup_module);
-
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c63a64c..a11af5c 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -174,6 +174,7 @@
 source "drivers/net/ethernet/toshiba/Kconfig"
 source "drivers/net/ethernet/tundra/Kconfig"
 source "drivers/net/ethernet/via/Kconfig"
+source "drivers/net/ethernet/wiznet/Kconfig"
 source "drivers/net/ethernet/xilinx/Kconfig"
 source "drivers/net/ethernet/xircom/Kconfig"
 
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9676a51..878ad32 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -73,5 +73,6 @@
 obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
 obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
 obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
 obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
 obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d896816..d920a52 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -114,15 +114,6 @@
 #define DMA_BURST_SIZE 128
 #endif
 
-/* Used to pass the media type, etc.
-   Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
-   The media type is usually passed in 'options[]'.
-   These variables are deprecated, use ethtool instead. -Ion
-*/
-#define MAX_UNITS 8		/* More are supported, limit only on options */
-static int options[MAX_UNITS] = {0, };
-static int full_duplex[MAX_UNITS] = {0, };
-
 /* Operational parameters that are set at compile time. */
 
 /* The "native" ring sizes are either 256 or 2048.
@@ -192,8 +183,6 @@
 module_param(rx_copybreak, int, 0);
 module_param(intr_latency, int, 0);
 module_param(small_frames, int, 0);
-module_param_array(options, int, NULL, 0);
-module_param_array(full_duplex, int, NULL, 0);
 module_param(enable_hw_cksum, int, 0);
 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
 MODULE_PARM_DESC(mtu, "MTU (all boards)");
@@ -201,8 +190,6 @@
 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
-MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
-MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
 
 /*
@@ -657,10 +644,10 @@
 static int __devinit starfire_init_one(struct pci_dev *pdev,
 				       const struct pci_device_id *ent)
 {
+	struct device *d = &pdev->dev;
 	struct netdev_private *np;
-	int i, irq, option, chip_idx = ent->driver_data;
+	int i, irq, chip_idx = ent->driver_data;
 	struct net_device *dev;
-	static int card_idx = -1;
 	long ioaddr;
 	void __iomem *base;
 	int drv_flags, io_size;
@@ -673,15 +660,13 @@
 		printk(version);
 #endif
 
-	card_idx++;
-
 	if (pci_enable_device (pdev))
 		return -EIO;
 
 	ioaddr = pci_resource_start(pdev, 0);
 	io_size = pci_resource_len(pdev, 0);
 	if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
-		printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
+		dev_err(d, "no PCI MEM resources, aborting\n");
 		return -ENODEV;
 	}
 
@@ -694,14 +679,14 @@
 	irq = pdev->irq;
 
 	if (pci_request_regions (pdev, DRV_NAME)) {
-		printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
+		dev_err(d, "cannot reserve PCI resources, aborting\n");
 		goto err_out_free_netdev;
 	}
 
 	base = ioremap(ioaddr, io_size);
 	if (!base) {
-		printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
-			card_idx, io_size, ioaddr);
+		dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
+			io_size, ioaddr);
 		goto err_out_free_res;
 	}
 
@@ -753,9 +738,6 @@
 	/* wait a little longer */
 	udelay(1000);
 
-	dev->base_addr = (unsigned long)base;
-	dev->irq = irq;
-
 	np = netdev_priv(dev);
 	np->dev = dev;
 	np->base = base;
@@ -772,21 +754,6 @@
 
 	drv_flags = netdrv_tbl[chip_idx].drv_flags;
 
-	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
-	if (dev->mem_start)
-		option = dev->mem_start;
-
-	/* The lower four bits are the media type. */
-	if (option & 0x200)
-		np->mii_if.full_duplex = 1;
-
-	if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
-		np->mii_if.full_duplex = 1;
-
-	if (np->mii_if.full_duplex)
-		np->mii_if.force_media = 1;
-	else
-		np->mii_if.force_media = 0;
 	np->speed100 = 1;
 
 	/* timer resolution is 128 * 0.8us */
@@ -909,13 +876,14 @@
 	const __be32 *fw_rx_data, *fw_tx_data;
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base;
+	const int irq = np->pci_dev->irq;
 	int i, retval;
 	size_t tx_size, rx_size;
 	size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
 
 	/* Do we ever need to reset the chip??? */
 
-	retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (retval)
 		return retval;
 
@@ -924,7 +892,7 @@
 	writel(1, ioaddr + PCIDeviceConfig);
 	if (debug > 1)
 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-		       dev->name, dev->irq);
+		       dev->name, irq);
 
 	/* Allocate the various queues. */
 	if (!np->queue_mem) {
@@ -935,7 +903,7 @@
 		np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
 		np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
 		if (np->queue_mem == NULL) {
-			free_irq(dev->irq, dev);
+			free_irq(irq, dev);
 			return -ENOMEM;
 		}
 
@@ -1962,7 +1930,7 @@
 		}
 	}
 
-	free_irq(dev->irq, dev);
+	free_irq(np->pci_dev->irq, dev);
 
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index ab4daec..f816426 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,6 +548,25 @@
 	return 0;
 }
 
+static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
+	struct ethtool_ts_info *info)
+{
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_SYS_HARDWARE;
+	info->phc_index = -1;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+	return 0;
+}
+
 static const struct ethtool_ops bfin_mac_ethtool_ops = {
 	.get_settings = bfin_mac_ethtool_getsettings,
 	.set_settings = bfin_mac_ethtool_setsettings,
@@ -555,6 +574,7 @@
 	.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
 	.get_wol = bfin_mac_ethtool_getwol,
 	.set_wol = bfin_mac_ethtool_setwol,
+	.get_ts_info = bfin_mac_ethtool_get_ts_info,
 };
 
 /**************************************************************************/
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index f4c228e..f2958df 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -213,10 +213,10 @@
 						(const void *)priv->rx_buff[entry],
 						pkt_len);
 			skb->protocol = eth_type_trans(skb, dev);
-			netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
+			netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data %p len %u\n",
 				   ((u_short *)skb->data)[6],
 				   skb->data + 6, skb->data,
-				   (int)skb->data, (int)skb->len);
+				   skb->data, skb->len);
 
 			netif_rx(skb);
 			dev->stats.rx_packets++;
@@ -566,10 +566,10 @@
 
 	/* Fill in a Tx ring entry */
 
-	netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
+	netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data %p len %u\n",
 		   ((u_short *)skb->data)[6],
 		   skb->data + 6, skb->data,
-		   (int)skb->data, (int)skb->len);
+		   skb->data, skb->len);
 
 	local_irq_save(flags);
 
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 70ed79c..84219df 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -558,21 +558,18 @@
 			printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
 			return 0;
 		}
-		dev->irq = (unsigned short)IRQ_AUTO_5;
+		dev->irq = IRQ_AUTO_5;
 	}
 	else {
-		/* For VME-RieblCards, request a free VME int;
-		 * (This must be unsigned long, since dev->irq is short and the
-		 * IRQ_MACHSPEC bit would be cut off...)
-		 */
-		unsigned long irq = atari_register_vme_int();
+		/* For VME-RieblCards, request a free VME int */
+		unsigned int irq = atari_register_vme_int();
 		if (!irq) {
 			printk( "Lance: request for VME interrupt failed\n" );
 			return 0;
 		}
 		if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
 		            "Riebl-VME Ethernet", dev)) {
-			printk( "Lance: request for irq %ld failed\n", irq );
+			printk( "Lance: request for irq %u failed\n", irq );
 			return 0;
 		}
 		dev->irq = irq;
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
index 86dd957..c771de7 100644
--- a/drivers/net/ethernet/amd/depca.c
+++ b/drivers/net/ethernet/amd/depca.c
@@ -155,23 +155,10 @@
     2 depca's in a PC).
 
     ************************************************************************
-    Support for MCA EtherWORKS cards added 11-3-98.
+    Support for MCA EtherWORKS cards added 11-3-98. (MCA since deleted)
     Verified to work with up to 2 DE212 cards in a system (although not
       fully stress-tested).
 
-    Currently known bugs/limitations:
-
-    Note:  with the MCA stuff as a module, it trusts the MCA configuration,
-           not the command line for IRQ and memory address.  You can
-           specify them if you want, but it will throw your values out.
-           You still have to pass the IO address it was configured as
-           though.
-
-    ************************************************************************
-    TO DO:
-    ------
-
-
     Revision History
     ----------------
 
@@ -261,10 +248,6 @@
 #include <asm/io.h>
 #include <asm/dma.h>
 
-#ifdef CONFIG_MCA
-#include <linux/mca.h>
-#endif
-
 #ifdef CONFIG_EISA
 #include <linux/eisa.h>
 #endif
@@ -360,44 +343,6 @@
 };
 #endif
 
-#ifdef CONFIG_MCA
-/*
-** Adapter ID for the MCA EtherWORKS DE210/212 adapter
-*/
-#define DE210_ID 0x628d
-#define DE212_ID 0x6def
-
-static short depca_mca_adapter_ids[] = {
-	DE210_ID,
-	DE212_ID,
-	0x0000
-};
-
-static char *depca_mca_adapter_name[] = {
-	"DEC EtherWORKS MC Adapter (DE210)",
-	"DEC EtherWORKS MC Adapter (DE212)",
-	NULL
-};
-
-static enum depca_type depca_mca_adapter_type[] = {
-	de210,
-	de212,
-	0
-};
-
-static int depca_mca_probe (struct device *);
-
-static struct mca_driver depca_mca_driver = {
-	.id_table = depca_mca_adapter_ids,
-	.driver   = {
-		.name   = depca_string,
-		.bus    = &mca_bus_type,
-		.probe  = depca_mca_probe,
-		.remove = __devexit_p(depca_device_remove),
-	},
-};
-#endif
-
 static int depca_isa_probe (struct platform_device *);
 
 static int __devexit depca_isa_remove(struct platform_device *pdev)
@@ -464,8 +409,7 @@
 	char adapter_name[DEPCA_STRLEN];	/* /proc/ioports string                  */
 	enum depca_type adapter;		/* Adapter type */
 	enum {
-                DEPCA_BUS_MCA = 1,
-                DEPCA_BUS_ISA,
+                DEPCA_BUS_ISA = 1,
                 DEPCA_BUS_EISA,
         } depca_bus;	        /* type of bus */
 	struct depca_init init_block;	/* Shadow Initialization block            */
@@ -624,12 +568,6 @@
 	       dev_name(device), depca_signature[lp->adapter], ioaddr);
 
 	switch (lp->depca_bus) {
-#ifdef CONFIG_MCA
-	case DEPCA_BUS_MCA:
-		printk(" (MCA slot %d)", to_mca_device(device)->slot + 1);
-		break;
-#endif
-
 #ifdef CONFIG_EISA
 	case DEPCA_BUS_EISA:
 		printk(" (EISA slot %d)", to_eisa_device(device)->slot);
@@ -661,10 +599,7 @@
 	if (nicsr & BUF) {
 		nicsr &= ~BS;	/* DEPCA RAM in top 32k */
 		netRAM -= 32;
-
-		/* Only EISA/ISA needs start address to be re-computed */
-		if (lp->depca_bus != DEPCA_BUS_MCA)
-			mem_start += 0x8000;
+		mem_start += 0x8000;
 	}
 
 	if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
@@ -1079,7 +1014,8 @@
 						} else {
 							lp->pktStats.multicast++;
 						}
-					} else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
+					} else if (ether_addr_equal(buf,
+								    dev->dev_addr)) {
 						lp->pktStats.unicast++;
 					}
 
@@ -1324,130 +1260,6 @@
 	return status;
 }
 
-#ifdef CONFIG_MCA
-/*
-** Microchannel bus I/O device probe
-*/
-static int __init depca_mca_probe(struct device *device)
-{
-	unsigned char pos[2];
-	unsigned char where;
-	unsigned long iobase, mem_start;
-	int irq, err;
-	struct mca_device *mdev = to_mca_device (device);
-	struct net_device *dev;
-	struct depca_private *lp;
-
-	/*
-	** Search for the adapter.  If an address has been given, search
-	** specifically for the card at that address.  Otherwise find the
-	** first card in the system.
-	*/
-
-	pos[0] = mca_device_read_stored_pos(mdev, 2);
-	pos[1] = mca_device_read_stored_pos(mdev, 3);
-
-	/*
-	** IO of card is handled by bits 1 and 2 of pos0.
-	**
-	**    bit2 bit1    IO
-	**       0    0    0x2c00
-	**       0    1    0x2c10
-	**       1    0    0x2c20
-	**       1    1    0x2c30
-	*/
-	where = (pos[0] & 6) >> 1;
-	iobase = 0x2c00 + (0x10 * where);
-
-	/*
-	** Found the adapter we were looking for. Now start setting it up.
-	**
-	** First work on decoding the IRQ.  It's stored in the lower 4 bits
-	** of pos1.  Bits are as follows (from the ADF file):
-	**
-	**      Bits
-	**   3   2   1   0    IRQ
-	**   --------------------
-	**   0   0   1   0     5
-	**   0   0   0   1     9
-	**   0   1   0   0    10
-	**   1   0   0   0    11
-	*/
-	where = pos[1] & 0x0f;
-	switch (where) {
-	case 1:
-		irq = 9;
-		break;
-	case 2:
-		irq = 5;
-		break;
-	case 4:
-		irq = 10;
-		break;
-	case 8:
-		irq = 11;
-		break;
-	default:
-		printk("%s: mca_probe IRQ error.  You should never get here (%d).\n", mdev->name, where);
-		return -EINVAL;
-	}
-
-	/*
-	** Shared memory address of adapter is stored in bits 3-5 of pos0.
-	** They are mapped as follows:
-	**
-	**    Bit
-	**   5  4  3       Memory Addresses
-	**   0  0  0       C0000-CFFFF (64K)
-	**   1  0  0       C8000-CFFFF (32K)
-	**   0  0  1       D0000-DFFFF (64K)
-	**   1  0  1       D8000-DFFFF (32K)
-	**   0  1  0       E0000-EFFFF (64K)
-	**   1  1  0       E8000-EFFFF (32K)
-	*/
-	where = (pos[0] & 0x18) >> 3;
-	mem_start = 0xc0000 + (where * 0x10000);
-	if (pos[0] & 0x20) {
-		mem_start += 0x8000;
-	}
-
-	/* claim the slot */
-	strncpy(mdev->name, depca_mca_adapter_name[mdev->index],
-		sizeof(mdev->name));
-	mca_device_set_claim(mdev, 1);
-
-        /*
-	** Get everything allocated and initialized...  (almost just
-	** like the ISA and EISA probes)
-	*/
-	irq = mca_device_transform_irq(mdev, irq);
-	iobase = mca_device_transform_ioport(mdev, iobase);
-
-	if ((err = depca_common_init (iobase, &dev)))
-		goto out_unclaim;
-
-	dev->irq = irq;
-	dev->base_addr = iobase;
-	lp = netdev_priv(dev);
-	lp->depca_bus = DEPCA_BUS_MCA;
-	lp->adapter = depca_mca_adapter_type[mdev->index];
-	lp->mem_start = mem_start;
-
-	if ((err = depca_hw_init(dev, device)))
-		goto out_free;
-
-	return 0;
-
- out_free:
-	free_netdev (dev);
-	release_region (iobase, DEPCA_TOTAL_SIZE);
- out_unclaim:
-	mca_device_set_claim(mdev, 0);
-
-	return err;
-}
-#endif
-
 /*
 ** ISA bus I/O device probe
 */
@@ -2058,15 +1870,10 @@
 {
 	int err = 0;
 
-#ifdef CONFIG_MCA
-	err = mca_register_driver(&depca_mca_driver);
-	if (err)
-		goto err;
-#endif
 #ifdef CONFIG_EISA
 	err = eisa_driver_register(&depca_eisa_driver);
 	if (err)
-		goto err_mca;
+		goto err_eisa;
 #endif
 	err = platform_driver_register(&depca_isa_driver);
 	if (err)
@@ -2078,11 +1885,6 @@
 err_eisa:
 #ifdef CONFIG_EISA
 	eisa_driver_unregister(&depca_eisa_driver);
-err_mca:
-#endif
-#ifdef CONFIG_MCA
-	mca_unregister_driver(&depca_mca_driver);
-err:
 #endif
 	return err;
 }
@@ -2090,9 +1892,6 @@
 static void __exit depca_module_exit (void)
 {
 	int i;
-#ifdef CONFIG_MCA
-        mca_unregister_driver (&depca_mca_driver);
-#endif
 #ifdef CONFIG_EISA
         eisa_driver_unregister (&depca_eisa_driver);
 #endif
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index ca70e16..b2bf324 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -74,8 +74,6 @@
 
 #define AT_RX_BUF_SIZE		(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
 #define MAX_JUMBO_FRAME_SIZE	(6*1024)
-#define MAX_TSO_FRAME_SIZE      (7*1024)
-#define MAX_TX_OFFLOAD_THRESH	(9*1024)
 
 #define AT_MAX_RECEIVE_QUEUE    4
 #define AT_DEF_RECEIVE_QUEUE	1
@@ -100,7 +98,7 @@
 #define ATL1C_ASPM_L0s_ENABLE		0x0001
 #define ATL1C_ASPM_L1_ENABLE		0x0002
 
-#define AT_REGS_LEN	(75 * sizeof(u32))
+#define AT_REGS_LEN	(74 * sizeof(u32))
 #define AT_EEPROM_LEN 	512
 
 #define ATL1C_GET_DESC(R, i, type)	(&(((type *)((R)->desc))[i]))
@@ -297,20 +295,6 @@
 	atl1c_dma_req_4096 = 5
 };
 
-enum atl1c_rss_mode {
-	atl1c_rss_mode_disable = 0,
-	atl1c_rss_sig_que = 1,
-	atl1c_rss_mul_que_sig_int = 2,
-	atl1c_rss_mul_que_mul_int = 4,
-};
-
-enum atl1c_rss_type {
-	atl1c_rss_disable = 0,
-	atl1c_rss_ipv4 = 1,
-	atl1c_rss_ipv4_tcp = 2,
-	atl1c_rss_ipv6 = 4,
-	atl1c_rss_ipv6_tcp = 8
-};
 
 enum atl1c_nic_type {
 	athr_l1c = 0,
@@ -388,7 +372,6 @@
 	enum atl1c_dma_order dma_order;
 	enum atl1c_dma_rcb   rcb_value;
 	enum atl1c_dma_req_block dmar_block;
-	enum atl1c_dma_req_block dmaw_block;
 
 	u16 device_id;
 	u16 vendor_id;
@@ -399,8 +382,6 @@
 	u16 phy_id2;
 
 	u32 intr_mask;
-	u8 dmaw_dly_cnt;
-	u8 dmar_dly_cnt;
 
 	u8 preamble_len;
 	u16 max_frame_size;
@@ -440,10 +421,6 @@
 #define ATL1C_FPGA_VERSION              0x8000
 	u16 link_cap_flags;
 #define ATL1C_LINK_CAP_1000M		0x0001
-	u16 cmb_tpd;
-	u16 cmb_rrd;
-	u16 cmb_rx_timer; /* 2us resolution */
-	u16 cmb_tx_timer;
 	u32 smb_timer;
 
 	u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
@@ -451,9 +428,6 @@
 	u16 tpd_thresh;
 	u8 tpd_burst;   /* Number of TPD to prefetch in cache-aligned burst. */
 	u8 rfd_burst;
-	enum atl1c_rss_type rss_type;
-	enum atl1c_rss_mode rss_mode;
-	u8 rss_hash_bits;
 	u32 base_cpu;
 	u32 indirect_tab;
 	u8 mac_addr[ETH_ALEN];
@@ -462,12 +436,12 @@
 	bool phy_configured;
 	bool re_autoneg;
 	bool emi_ca;
+	bool msi_lnkpatch;	/* link patch for specific platforms */
 };
 
 /*
  * atl1c_ring_header represents a single, contiguous block of DMA space
- * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
- * message blocks (cmb, smb) described below
+ * mapped for the three descriptor rings (tpd, rfd, rrd) described below
  */
 struct atl1c_ring_header {
 	void *desc;		/* virtual address */
@@ -541,16 +515,6 @@
 	u16 next_to_clean;
 };
 
-struct atl1c_cmb {
-	void *cmb;
-	dma_addr_t dma;
-};
-
-struct atl1c_smb {
-	void *smb;
-	dma_addr_t dma;
-};
-
 /* board specific private data structure */
 struct atl1c_adapter {
 	struct net_device   *netdev;
@@ -586,11 +550,8 @@
 	/* All Descriptor memory */
 	struct atl1c_ring_header ring_header;
 	struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
-	struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE];
-	struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE];
-	struct atl1c_cmb cmb;
-	struct atl1c_smb smb;
-	int num_rx_queues;
+	struct atl1c_rfd_ring rfd_ring;
+	struct atl1c_rrd_ring rrd_ring;
 	u32 bd_number;     /* board number;*/
 };
 
@@ -618,8 +579,14 @@
 #define AT_WRITE_REGW(a, reg, value) (\
 		writew((value), ((a)->hw_addr + reg)))
 
-#define AT_READ_REGW(a, reg) (\
-		readw((a)->hw_addr + reg))
+#define AT_READ_REGW(a, reg, pdata) do {				\
+		if (unlikely((a)->hibernate)) {				\
+			readw((a)->hw_addr + reg);			\
+			*(u16 *)pdata = readw((a)->hw_addr + reg);	\
+		} else {						\
+			*(u16 *)pdata = readw((a)->hw_addr + reg);	\
+		}							\
+	} while (0)
 
 #define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
 		writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 0a9326a..859ea84 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -141,8 +141,7 @@
 
 	memset(p, 0, AT_REGS_LEN);
 
-	regs->version = 0;
-	AT_READ_REG(hw, REG_VPD_CAP, 		  p++);
+	regs->version = 1;
 	AT_READ_REG(hw, REG_PM_CTRL, 		  p++);
 	AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL,  p++);
 	AT_READ_REG(hw, REG_TWSI_CTRL, 		  p++);
@@ -154,7 +153,7 @@
 	AT_READ_REG(hw, REG_LINK_CTRL, 		  p++);
 	AT_READ_REG(hw, REG_IDLE_STATUS, 	  p++);
 	AT_READ_REG(hw, REG_MDIO_CTRL, 		  p++);
-	AT_READ_REG(hw, REG_SERDES_LOCK, 	  p++);
+	AT_READ_REG(hw, REG_SERDES,		  p++);
 	AT_READ_REG(hw, REG_MAC_CTRL, 		  p++);
 	AT_READ_REG(hw, REG_MAC_IPG_IFG, 	  p++);
 	AT_READ_REG(hw, REG_MAC_STA_ADDR, 	  p++);
@@ -167,9 +166,9 @@
 	AT_READ_REG(hw, REG_WOL_CTRL, 		  p++);
 
 	atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
-	regs_buff[73] =	(u32) phy_data;
+	regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data;
 	atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
-	regs_buff[74] = (u32) phy_data;
+	regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data;
 }
 
 static int atl1c_get_eeprom_len(struct net_device *netdev)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index bd1667c..ff9c738 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -43,7 +43,7 @@
 	return 0;
 }
 
-void atl1c_hw_set_mac_addr(struct atl1c_hw *hw)
+void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr)
 {
 	u32 value;
 	/*
@@ -51,35 +51,48 @@
 	 * 0:  6AF600DC 1: 000B
 	 * low dword
 	 */
-	value = (((u32)hw->mac_addr[2]) << 24) |
-		(((u32)hw->mac_addr[3]) << 16) |
-		(((u32)hw->mac_addr[4]) << 8)  |
-		(((u32)hw->mac_addr[5])) ;
+	value = mac_addr[2] << 24 |
+		mac_addr[3] << 16 |
+		mac_addr[4] << 8  |
+		mac_addr[5];
 	AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
 	/* hight dword */
-	value = (((u32)hw->mac_addr[0]) << 8) |
-		(((u32)hw->mac_addr[1])) ;
+	value = mac_addr[0] << 8 |
+		mac_addr[1];
 	AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
 }
 
+/* read mac address from hardware register */
+static bool atl1c_read_current_addr(struct atl1c_hw *hw, u8 *eth_addr)
+{
+	u32 addr[2];
+
+	AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
+	AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
+
+	*(u32 *) &eth_addr[2] = htonl(addr[0]);
+	*(u16 *) &eth_addr[0] = htons((u16)addr[1]);
+
+	return is_valid_ether_addr(eth_addr);
+}
+
 /*
  * atl1c_get_permanent_address
  * return 0 if get valid mac address,
  */
 static int atl1c_get_permanent_address(struct atl1c_hw *hw)
 {
-	u32 addr[2];
 	u32 i;
 	u32 otp_ctrl_data;
 	u32 twsi_ctrl_data;
-	u32 ltssm_ctrl_data;
-	u32 wol_data;
-	u8  eth_addr[ETH_ALEN];
 	u16 phy_data;
 	bool raise_vol = false;
 
+	/* MAC-address from BIOS is the 1st priority */
+	if (atl1c_read_current_addr(hw, hw->perm_mac_addr))
+		return 0;
+
 	/* init */
-	addr[0] = addr[1] = 0;
 	AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
 	if (atl1c_check_eeprom_exist(hw)) {
 		if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
@@ -91,33 +104,17 @@
 				msleep(1);
 			}
 		}
-
-		if (hw->nic_type == athr_l2c_b ||
-		    hw->nic_type == athr_l2c_b2 ||
-		    hw->nic_type == athr_l1d) {
-			atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
-			if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
-				goto out;
-			phy_data &= 0xFF7F;
-			atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
-
-			atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
-			if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
-				goto out;
-			phy_data |= 0x8;
-			atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+		/* raise voltage temporally for l2cb */
+		if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
+			atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data);
+			phy_data &= ~ANACTRL_HB_EN;
+			atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data);
+			atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
+			phy_data |= VOLT_CTRL_SWLOWEST;
+			atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
 			udelay(20);
 			raise_vol = true;
 		}
-		/* close open bit of ReadOnly*/
-		AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
-		ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
-		AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
-
-		/* clear any WOL settings */
-		AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
-		AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
-
 
 		AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
 		twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
@@ -138,37 +135,18 @@
 		msleep(1);
 	}
 	if (raise_vol) {
-		if (hw->nic_type == athr_l2c_b ||
-		    hw->nic_type == athr_l2c_b2 ||
-		    hw->nic_type == athr_l1d ||
-		    hw->nic_type == athr_l1d_2) {
-			atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
-			if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
-				goto out;
-			phy_data |= 0x80;
-			atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
-
-			atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
-			if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
-				goto out;
-			phy_data &= 0xFFF7;
-			atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
-			udelay(20);
-		}
+		atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data);
+		phy_data |= ANACTRL_HB_EN;
+		atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data);
+		atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
+		phy_data &= ~VOLT_CTRL_SWLOWEST;
+		atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
+		udelay(20);
 	}
 
-	/* maybe MAC-address is from BIOS */
-	AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
-	AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
-	*(u32 *) &eth_addr[2] = swab32(addr[0]);
-	*(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
-
-	if (is_valid_ether_addr(eth_addr)) {
-		memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+	if (atl1c_read_current_addr(hw, hw->perm_mac_addr))
 		return 0;
-	}
 
-out:
 	return -1;
 }
 
@@ -278,33 +256,158 @@
 }
 
 /*
+ * wait mdio module be idle
+ * return true: idle
+ *        false: still busy
+ */
+bool atl1c_wait_mdio_idle(struct atl1c_hw *hw)
+{
+	u32 val;
+	int i;
+
+	for (i = 0; i < MDIO_MAX_AC_TO; i++) {
+		AT_READ_REG(hw, REG_MDIO_CTRL, &val);
+		if (!(val & (MDIO_CTRL_BUSY | MDIO_CTRL_START)))
+			break;
+		udelay(10);
+	}
+
+	return i != MDIO_MAX_AC_TO;
+}
+
+void atl1c_stop_phy_polling(struct atl1c_hw *hw)
+{
+	if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
+		return;
+
+	AT_WRITE_REG(hw, REG_MDIO_CTRL, 0);
+	atl1c_wait_mdio_idle(hw);
+}
+
+void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel)
+{
+	u32 val;
+
+	if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
+		return;
+
+	val = MDIO_CTRL_SPRES_PRMBL |
+		FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
+		FIELDX(MDIO_CTRL_REG, 1) |
+		MDIO_CTRL_START |
+		MDIO_CTRL_OP_READ;
+	AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+	atl1c_wait_mdio_idle(hw);
+	val |= MDIO_CTRL_AP_EN;
+	val &= ~MDIO_CTRL_START;
+	AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+	udelay(30);
+}
+
+
+/*
+ * atl1c_read_phy_core
+ * core funtion to read register in PHY via MDIO control regsiter.
+ * ext: extension register (see IEEE 802.3)
+ * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
+ * reg: reg to read
+ */
+int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
+			u16 reg, u16 *phy_data)
+{
+	u32 val;
+	u16 clk_sel = MDIO_CTRL_CLK_25_4;
+
+	atl1c_stop_phy_polling(hw);
+
+	*phy_data = 0;
+
+	/* only l2c_b2 & l1d_2 could use slow clock */
+	if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) &&
+		hw->hibernate)
+		clk_sel = MDIO_CTRL_CLK_25_128;
+	if (ext) {
+		val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg);
+		AT_WRITE_REG(hw, REG_MDIO_EXTN, val);
+		val = MDIO_CTRL_SPRES_PRMBL |
+			FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
+			MDIO_CTRL_START |
+			MDIO_CTRL_MODE_EXT |
+			MDIO_CTRL_OP_READ;
+	} else {
+		val = MDIO_CTRL_SPRES_PRMBL |
+			FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
+			FIELDX(MDIO_CTRL_REG, reg) |
+			MDIO_CTRL_START |
+			MDIO_CTRL_OP_READ;
+	}
+	AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+	if (!atl1c_wait_mdio_idle(hw))
+		return -1;
+
+	AT_READ_REG(hw, REG_MDIO_CTRL, &val);
+	*phy_data = (u16)FIELD_GETX(val, MDIO_CTRL_DATA);
+
+	atl1c_start_phy_polling(hw, clk_sel);
+
+	return 0;
+}
+
+/*
+ * atl1c_write_phy_core
+ * core funtion to write to register in PHY via MDIO control regsiter.
+ * ext: extension register (see IEEE 802.3)
+ * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
+ * reg: reg to write
+ */
+int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
+			u16 reg, u16 phy_data)
+{
+	u32 val;
+	u16 clk_sel = MDIO_CTRL_CLK_25_4;
+
+	atl1c_stop_phy_polling(hw);
+
+
+	/* only l2c_b2 & l1d_2 could use slow clock */
+	if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) &&
+		hw->hibernate)
+		clk_sel = MDIO_CTRL_CLK_25_128;
+
+	if (ext) {
+		val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg);
+		AT_WRITE_REG(hw, REG_MDIO_EXTN, val);
+		val = MDIO_CTRL_SPRES_PRMBL |
+			FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
+			FIELDX(MDIO_CTRL_DATA, phy_data) |
+			MDIO_CTRL_START |
+			MDIO_CTRL_MODE_EXT;
+	} else {
+		val = MDIO_CTRL_SPRES_PRMBL |
+			FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
+			FIELDX(MDIO_CTRL_DATA, phy_data) |
+			FIELDX(MDIO_CTRL_REG, reg) |
+			MDIO_CTRL_START;
+	}
+	AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+
+	if (!atl1c_wait_mdio_idle(hw))
+		return -1;
+
+	atl1c_start_phy_polling(hw, clk_sel);
+
+	return 0;
+}
+
+/*
  * Reads the value from a PHY register
  * hw - Struct containing variables accessed by shared code
  * reg_addr - address of the PHY register to read
  */
 int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
 {
-	u32 val;
-	int i;
-
-	val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
-		MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW |
-		MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
-
-	AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
-
-	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
-		udelay(2);
-		AT_READ_REG(hw, REG_MDIO_CTRL, &val);
-		if (!(val & (MDIO_START | MDIO_BUSY)))
-			break;
-	}
-	if (!(val & (MDIO_START | MDIO_BUSY))) {
-		*phy_data = (u16)val;
-		return 0;
-	}
-
-	return -1;
+	return atl1c_read_phy_core(hw, false, 0, reg_addr, phy_data);
 }
 
 /*
@@ -315,27 +418,47 @@
  */
 int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
 {
-	int i;
-	u32 val;
+	return atl1c_write_phy_core(hw, false, 0, reg_addr, phy_data);
+}
 
-	val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT   |
-	       (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
-	       MDIO_SUP_PREAMBLE | MDIO_START |
-	       MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+/* read from PHY extension register */
+int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
+			u16 reg_addr, u16 *phy_data)
+{
+	return atl1c_read_phy_core(hw, true, dev_addr, reg_addr, phy_data);
+}
 
-	AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
+/* write to PHY extension register */
+int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
+			u16 reg_addr, u16 phy_data)
+{
+	return atl1c_write_phy_core(hw, true, dev_addr, reg_addr, phy_data);
+}
 
-	for (i = 0; i < MDIO_WAIT_TIMES; i++) {
-		udelay(2);
-		AT_READ_REG(hw, REG_MDIO_CTRL, &val);
-		if (!(val & (MDIO_START | MDIO_BUSY)))
-			break;
-	}
+int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+	int err;
 
-	if (!(val & (MDIO_START | MDIO_BUSY)))
-		return 0;
+	err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr);
+	if (unlikely(err))
+		return err;
+	else
+		err = atl1c_read_phy_reg(hw, MII_DBG_DATA, phy_data);
 
-	return -1;
+	return err;
+}
+
+int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data)
+{
+	int err;
+
+	err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr);
+	if (unlikely(err))
+		return err;
+	else
+		err = atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+	return err;
 }
 
 /*
@@ -380,119 +503,100 @@
 
 void atl1c_phy_disable(struct atl1c_hw *hw)
 {
-	AT_WRITE_REGW(hw, REG_GPHY_CTRL,
-			GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
+	atl1c_power_saving(hw, 0);
 }
 
-static void atl1c_phy_magic_data(struct atl1c_hw *hw)
-{
-	u16 data;
-
-	data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
-		((1 & ANA_INTERVAL_SEL_TIMER_MASK) <<
-		ANA_INTERVAL_SEL_TIMER_SHIFT);
-
-	atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_18);
-	atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
-
-	data = (2 & ANA_SERDES_CDR_BW_MASK) | ANA_MS_PAD_DBG |
-		ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
-		ANA_SERDES_EN_LCKDT;
-
-	atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_5);
-	atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
-
-	data = (44 & ANA_LONG_CABLE_TH_100_MASK) |
-		((33 & ANA_SHORT_CABLE_TH_100_MASK) <<
-		ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM |
-		ANA_BP_SMALL_BW;
-
-	atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_54);
-	atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
-
-	data = (11 & ANA_IECHO_ADJ_MASK) | ((11 & ANA_IECHO_ADJ_MASK) <<
-		ANA_IECHO_ADJ_2_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
-		ANA_IECHO_ADJ_1_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
-		ANA_IECHO_ADJ_0_SHIFT);
-
-	atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_4);
-	atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
-
-	data = ANA_RESTART_CAL | ((7 & ANA_MANUL_SWICH_ON_MASK) <<
-		ANA_MANUL_SWICH_ON_SHIFT) | ANA_MAN_ENABLE |
-		ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M;
-
-	atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_0);
-	atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
-
-	if (hw->ctrl_flags & ATL1C_HIB_DISABLE) {
-		atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_41);
-		if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
-			return;
-		data &= ~ANA_TOP_PS_EN;
-		atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
-
-		atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_11);
-		if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
-			return;
-		data &= ~ANA_PS_HIB_EN;
-		atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
-	}
-}
 
 int atl1c_phy_reset(struct atl1c_hw *hw)
 {
 	struct atl1c_adapter *adapter = hw->adapter;
 	struct pci_dev *pdev = adapter->pdev;
 	u16 phy_data;
-	u32 phy_ctrl_data = GPHY_CTRL_DEFAULT;
-	u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
+	u32 phy_ctrl_data, lpi_ctrl;
 	int err;
 
-	if (hw->ctrl_flags & ATL1C_HIB_DISABLE)
-		phy_ctrl_data &= ~GPHY_CTRL_HIB_EN;
-
+	/* reset PHY core */
+	AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl_data);
+	phy_ctrl_data &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_PHY_IDDQ |
+		GPHY_CTRL_GATE_25M_EN | GPHY_CTRL_PWDOWN_HW | GPHY_CTRL_CLS);
+	phy_ctrl_data |= GPHY_CTRL_SEL_ANA_RST;
+	if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE))
+		phy_ctrl_data |= (GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE);
+	else
+		phy_ctrl_data &= ~(GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE);
 	AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
 	AT_WRITE_FLUSH(hw);
-	msleep(40);
-	phy_ctrl_data |= GPHY_CTRL_EXT_RESET;
-	AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
+	udelay(10);
+	AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data | GPHY_CTRL_EXT_RESET);
 	AT_WRITE_FLUSH(hw);
-	msleep(10);
+	udelay(10 * GPHY_CTRL_EXT_RST_TO);	/* delay 800us */
 
+	/* switch clock */
 	if (hw->nic_type == athr_l2c_b) {
-		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A);
-		atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
-		atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF);
+		atl1c_read_phy_dbg(hw, MIIDBG_CFGLPSPD, &phy_data);
+		atl1c_write_phy_dbg(hw, MIIDBG_CFGLPSPD,
+			phy_data & ~CFGLPSPD_RSTCNT_CLK125SW);
 	}
 
-	if (hw->nic_type == athr_l2c_b ||
-	    hw->nic_type == athr_l2c_b2 ||
-	    hw->nic_type == athr_l1d ||
-	    hw->nic_type == athr_l1d_2) {
-		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
-		atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
-		atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
-		msleep(20);
+	/* tx-half amplitude issue fix */
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
+		atl1c_read_phy_dbg(hw, MIIDBG_CABLE1TH_DET, &phy_data);
+		phy_data |= CABLE1TH_DET_EN;
+		atl1c_write_phy_dbg(hw, MIIDBG_CABLE1TH_DET, phy_data);
 	}
-	if (hw->nic_type == athr_l1d) {
-		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
-		atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
+
+	/* clear bit3 of dbgport 3B to lower voltage */
+	if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) {
+		if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
+			atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
+			phy_data &= ~VOLT_CTRL_SWLOWEST;
+			atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
+		}
+		/* power saving config */
+		phy_data =
+			hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ?
+			L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF;
+		atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data);
+		/* hib */
+		atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
+			SYSMODCTRL_IECHOADJ_DEF);
+	} else {
+		/* disable pws */
+		atl1c_read_phy_dbg(hw, MIIDBG_LEGCYPS, &phy_data);
+		atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS,
+			phy_data & ~LEGCYPS_EN);
+		/* disable hibernate */
+		atl1c_read_phy_dbg(hw, MIIDBG_HIBNEG, &phy_data);
+		atl1c_write_phy_dbg(hw, MIIDBG_HIBNEG,
+			phy_data & HIBNEG_PSHIB_EN);
 	}
-	if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
-		|| hw->nic_type == athr_l2c) {
-		atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
-		atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
+	/* disable AZ(EEE) by default */
+	if (hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ||
+	    hw->nic_type == athr_l2c_b2) {
+		AT_READ_REG(hw, REG_LPI_CTRL, &lpi_ctrl);
+		AT_WRITE_REG(hw, REG_LPI_CTRL, lpi_ctrl & ~LPI_CTRL_EN);
+		atl1c_write_phy_ext(hw, MIIEXT_ANEG, MIIEXT_LOCAL_EEEADV, 0);
+		atl1c_write_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL3,
+			L2CB_CLDCTRL3);
 	}
-	err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
+
+	/* other debug port to set */
+	atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, ANACTRL_DEF);
+	atl1c_write_phy_dbg(hw, MIIDBG_SRDSYSMOD, SRDSYSMOD_DEF);
+	atl1c_write_phy_dbg(hw, MIIDBG_TST10BTCFG, TST10BTCFG_DEF);
+	/* UNH-IOL test issue, set bit7 */
+	atl1c_write_phy_dbg(hw, MIIDBG_TST100BTCFG,
+		TST100BTCFG_DEF | TST100BTCFG_LITCH_EN);
+
+	/* set phy interrupt mask */
+	phy_data = IER_LINK_UP | IER_LINK_DOWN;
+	err = atl1c_write_phy_reg(hw, MII_IER, phy_data);
 	if (err) {
 		if (netif_msg_hw(adapter))
 			dev_err(&pdev->dev,
 				"Error enable PHY linkChange Interrupt\n");
 		return err;
 	}
-	if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
-		atl1c_phy_magic_data(hw);
 	return 0;
 }
 
@@ -589,7 +693,8 @@
 	return 0;
 }
 
-int atl1c_phy_power_saving(struct atl1c_hw *hw)
+/* select one link mode to get lower power consumption */
+int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
 {
 	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
 	struct pci_dev *pdev = adapter->pdev;
@@ -660,3 +765,101 @@
 
 	return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
 }
+
+int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
+{
+	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+	struct pci_dev *pdev = adapter->pdev;
+	u32 master_ctrl, mac_ctrl, phy_ctrl;
+	u32 wol_ctrl, speed;
+	u16 phy_data;
+
+	wol_ctrl = 0;
+	speed = adapter->link_speed == SPEED_1000 ?
+		MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100;
+
+	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl);
+	AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl);
+	AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl);
+
+	master_ctrl &= ~MASTER_CTRL_CLK_SEL_DIS;
+	mac_ctrl = FIELD_SETX(mac_ctrl, MAC_CTRL_SPEED, speed);
+	mac_ctrl &= ~(MAC_CTRL_DUPLX | MAC_CTRL_RX_EN | MAC_CTRL_TX_EN);
+	if (adapter->link_duplex == FULL_DUPLEX)
+		mac_ctrl |= MAC_CTRL_DUPLX;
+	phy_ctrl &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_CLS);
+	phy_ctrl |= GPHY_CTRL_SEL_ANA_RST | GPHY_CTRL_HIB_PULSE |
+		GPHY_CTRL_HIB_EN;
+	if (!wufc) { /* without WoL */
+		master_ctrl |= MASTER_CTRL_CLK_SEL_DIS;
+		phy_ctrl |= GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PWDOWN_HW;
+		AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl);
+		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl);
+		AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl);
+		AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
+		hw->phy_configured = false; /* re-init PHY when resume */
+		return 0;
+	}
+	phy_ctrl |= GPHY_CTRL_EXT_RESET;
+	if (wufc & AT_WUFC_MAG) {
+		mac_ctrl |= MAC_CTRL_RX_EN | MAC_CTRL_BC_EN;
+		wol_ctrl |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+		if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V11)
+			wol_ctrl |= WOL_PATTERN_EN | WOL_PATTERN_PME_EN;
+	}
+	if (wufc & AT_WUFC_LNKC) {
+		wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
+		if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
+			dev_dbg(&pdev->dev, "%s: write phy MII_IER faild.\n",
+				atl1c_driver_name);
+		}
+	}
+	/* clear PHY interrupt */
+	atl1c_read_phy_reg(hw, MII_ISR, &phy_data);
+
+	dev_dbg(&pdev->dev, "%s: suspend MAC=%x,MASTER=%x,PHY=0x%x,WOL=%x\n",
+		atl1c_driver_name, mac_ctrl, master_ctrl, phy_ctrl, wol_ctrl);
+	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl);
+	AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl);
+	AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl);
+	AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl);
+
+	return 0;
+}
+
+
+/* configure phy after Link change Event */
+void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed)
+{
+	u16 phy_val;
+	bool adj_thresh = false;
+
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ||
+	    hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2)
+		adj_thresh = true;
+
+	if (link_speed != SPEED_0) { /* link up */
+		/* az with brcm, half-amp */
+		if (hw->nic_type == athr_l1d_2) {
+			atl1c_read_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL6,
+				&phy_val);
+			phy_val = FIELD_GETX(phy_val, CLDCTRL6_CAB_LEN);
+			phy_val = phy_val > CLDCTRL6_CAB_LEN_SHORT ?
+				AZ_ANADECT_LONG : AZ_ANADECT_DEF;
+			atl1c_write_phy_dbg(hw, MIIDBG_AZ_ANADECT, phy_val);
+		}
+		/* threshold adjust */
+		if (adj_thresh && link_speed == SPEED_100 && hw->msi_lnkpatch) {
+			atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_UP);
+			atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
+				L1D_SYSMODCTRL_IECHOADJ_DEF);
+		}
+	} else { /* link down */
+		if (adj_thresh && hw->msi_lnkpatch) {
+			atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
+				SYSMODCTRL_IECHOADJ_DEF);
+			atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB,
+				L1D_MSE16DB_DOWN);
+		}
+	}
+}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 655fc6c..17d935b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -25,12 +25,18 @@
 #include <linux/types.h>
 #include <linux/mii.h>
 
+#define FIELD_GETX(_x, _name)   ((_x) >> (_name##_SHIFT) & (_name##_MASK))
+#define FIELD_SETX(_x, _name, _v) \
+(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\
+(((_v) & (_name##_MASK)) << (_name##_SHIFT)))
+#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT))
+
 struct atl1c_adapter;
 struct atl1c_hw;
 
 /* function prototype */
 void atl1c_phy_disable(struct atl1c_hw *hw);
-void atl1c_hw_set_mac_addr(struct atl1c_hw *hw);
+void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr);
 int atl1c_phy_reset(struct atl1c_hw *hw);
 int atl1c_read_mac_addr(struct atl1c_hw *hw);
 int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex);
@@ -42,47 +48,45 @@
 int atl1c_phy_init(struct atl1c_hw *hw);
 int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
 int atl1c_restart_autoneg(struct atl1c_hw *hw);
-int atl1c_phy_power_saving(struct atl1c_hw *hw);
+int atl1c_phy_to_ps_link(struct atl1c_hw *hw);
+int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc);
+bool atl1c_wait_mdio_idle(struct atl1c_hw *hw);
+void atl1c_stop_phy_polling(struct atl1c_hw *hw);
+void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel);
+int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
+			u16 reg, u16 *phy_data);
+int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
+			u16 reg, u16 phy_data);
+int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
+			u16 reg_addr, u16 *phy_data);
+int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
+			u16 reg_addr, u16 phy_data);
+int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data);
+int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data);
+void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
+
+/* hw-ids */
+#define PCI_DEVICE_ID_ATTANSIC_L2C      0x1062
+#define PCI_DEVICE_ID_ATTANSIC_L1C      0x1063
+#define PCI_DEVICE_ID_ATHEROS_L2C_B	0x2060 /* AR8152 v1.1 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L2C_B2	0x2062 /* AR8152 v2.0 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L1D	0x1073 /* AR8151 v1.0 Gigabit 1000 */
+#define PCI_DEVICE_ID_ATHEROS_L1D_2_0	0x1083 /* AR8151 v2.0 Gigabit 1000 */
+#define L2CB_V10			0xc0
+#define L2CB_V11			0xc1
+
 /* register definition */
 #define REG_DEVICE_CAP              	0x5C
 #define DEVICE_CAP_MAX_PAYLOAD_MASK     0x7
 #define DEVICE_CAP_MAX_PAYLOAD_SHIFT    0
 
-#define REG_DEVICE_CTRL			0x60
-#define DEVICE_CTRL_MAX_PAYLOAD_MASK    0x7
-#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT   5
-#define DEVICE_CTRL_MAX_RREQ_SZ_MASK    0x7
-#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT   12
+#define DEVICE_CTRL_MAXRRS_MIN		2
 
 #define REG_LINK_CTRL			0x68
 #define LINK_CTRL_L0S_EN		0x01
 #define LINK_CTRL_L1_EN			0x02
 #define LINK_CTRL_EXT_SYNC		0x80
 
-#define REG_VPD_CAP			0x6C
-#define VPD_CAP_ID_MASK                 0xff
-#define VPD_CAP_ID_SHIFT                0
-#define VPD_CAP_NEXT_PTR_MASK           0xFF
-#define VPD_CAP_NEXT_PTR_SHIFT          8
-#define VPD_CAP_VPD_ADDR_MASK           0x7FFF
-#define VPD_CAP_VPD_ADDR_SHIFT          16
-#define VPD_CAP_VPD_FLAG                0x80000000
-
-#define REG_VPD_DATA                	0x70
-
-#define REG_PCIE_UC_SEVERITY		0x10C
-#define PCIE_UC_SERVRITY_TRN		0x00000001
-#define PCIE_UC_SERVRITY_DLP		0x00000010
-#define PCIE_UC_SERVRITY_PSN_TLP	0x00001000
-#define PCIE_UC_SERVRITY_FCP		0x00002000
-#define PCIE_UC_SERVRITY_CPL_TO		0x00004000
-#define PCIE_UC_SERVRITY_CA		0x00008000
-#define PCIE_UC_SERVRITY_UC		0x00010000
-#define PCIE_UC_SERVRITY_ROV		0x00020000
-#define PCIE_UC_SERVRITY_MLFP		0x00040000
-#define PCIE_UC_SERVRITY_ECRC		0x00080000
-#define PCIE_UC_SERVRITY_UR		0x00100000
-
 #define REG_DEV_SERIALNUM_CTRL		0x200
 #define REG_DEV_MAC_SEL_MASK		0x0 /* 0:EUI; 1:MAC */
 #define REG_DEV_MAC_SEL_SHIFT		0
@@ -90,25 +94,17 @@
 #define REG_DEV_SERIAL_NUM_EN_SHIFT	1
 
 #define REG_TWSI_CTRL               	0x218
+#define TWSI_CTLR_FREQ_MASK		0x3UL
+#define TWSI_CTRL_FREQ_SHIFT		24
+#define TWSI_CTRL_FREQ_100K		0
+#define TWSI_CTRL_FREQ_200K		1
+#define TWSI_CTRL_FREQ_300K		2
+#define TWSI_CTRL_FREQ_400K		3
+#define TWSI_CTRL_LD_EXIST		BIT(23)
+#define TWSI_CTRL_HW_LDSTAT		BIT(12)	/* 0:finish,1:in progress */
+#define TWSI_CTRL_SW_LDSTART            BIT(11)
 #define TWSI_CTRL_LD_OFFSET_MASK        0xFF
 #define TWSI_CTRL_LD_OFFSET_SHIFT       0
-#define TWSI_CTRL_LD_SLV_ADDR_MASK      0x7
-#define TWSI_CTRL_LD_SLV_ADDR_SHIFT     8
-#define TWSI_CTRL_SW_LDSTART            0x800
-#define TWSI_CTRL_HW_LDSTART            0x1000
-#define TWSI_CTRL_SMB_SLV_ADDR_MASK     0x7F
-#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT    15
-#define TWSI_CTRL_LD_EXIST              0x400000
-#define TWSI_CTRL_READ_FREQ_SEL_MASK    0x3
-#define TWSI_CTRL_READ_FREQ_SEL_SHIFT   23
-#define TWSI_CTRL_FREQ_SEL_100K         0
-#define TWSI_CTRL_FREQ_SEL_200K         1
-#define TWSI_CTRL_FREQ_SEL_300K         2
-#define TWSI_CTRL_FREQ_SEL_400K         3
-#define TWSI_CTRL_SMB_SLV_ADDR
-#define TWSI_CTRL_WRITE_FREQ_SEL_MASK   0x3
-#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT  24
-
 
 #define REG_PCIE_DEV_MISC_CTRL      	0x21C
 #define PCIE_DEV_MISC_EXT_PIPE     	0x2
@@ -118,16 +114,23 @@
 #define PCIE_DEV_MISC_SERDES_SEL_DIN   	0x10
 
 #define REG_PCIE_PHYMISC	    	0x1000
-#define PCIE_PHYMISC_FORCE_RCV_DET	0x4
+#define PCIE_PHYMISC_FORCE_RCV_DET	BIT(2)
+#define PCIE_PHYMISC_NFTS_MASK		0xFFUL
+#define PCIE_PHYMISC_NFTS_SHIFT		16
 
 #define REG_PCIE_PHYMISC2		0x1004
-#define PCIE_PHYMISC2_SERDES_CDR_MASK	0x3
-#define PCIE_PHYMISC2_SERDES_CDR_SHIFT	16
-#define PCIE_PHYMISC2_SERDES_TH_MASK	0x3
-#define PCIE_PHYMISC2_SERDES_TH_SHIFT	18
+#define PCIE_PHYMISC2_L0S_TH_MASK	0x3UL
+#define PCIE_PHYMISC2_L0S_TH_SHIFT	18
+#define L2CB1_PCIE_PHYMISC2_L0S_TH	3
+#define PCIE_PHYMISC2_CDR_BW_MASK	0x3UL
+#define PCIE_PHYMISC2_CDR_BW_SHIFT	16
+#define L2CB1_PCIE_PHYMISC2_CDR_BW	3
 
 #define REG_TWSI_DEBUG			0x1108
-#define TWSI_DEBUG_DEV_EXIST		0x20000000
+#define TWSI_DEBUG_DEV_EXIST		BIT(29)
+
+#define REG_DMA_DBG			0x1114
+#define DMA_DBG_VENDOR_MSG		BIT(0)
 
 #define REG_EEPROM_CTRL			0x12C0
 #define EEPROM_CTRL_DATA_HI_MASK	0xFFFF
@@ -140,56 +143,81 @@
 #define REG_EEPROM_DATA_LO		0x12C4
 
 #define REG_OTP_CTRL			0x12F0
-#define OTP_CTRL_CLK_EN			0x0002
+#define OTP_CTRL_CLK_EN			BIT(1)
 
 #define REG_PM_CTRL			0x12F8
-#define PM_CTRL_SDES_EN			0x00000001
-#define PM_CTRL_RBER_EN			0x00000002
-#define PM_CTRL_CLK_REQ_EN		0x00000004
-#define PM_CTRL_ASPM_L1_EN		0x00000008
-#define PM_CTRL_SERDES_L1_EN		0x00000010
-#define PM_CTRL_SERDES_PLL_L1_EN	0x00000020
-#define PM_CTRL_SERDES_PD_EX_L1		0x00000040
-#define PM_CTRL_SERDES_BUDS_RX_L1_EN	0x00000080
-#define PM_CTRL_L0S_ENTRY_TIMER_MASK	0xF
-#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT	8
-#define PM_CTRL_ASPM_L0S_EN		0x00001000
-#define PM_CTRL_CLK_SWH_L1		0x00002000
-#define PM_CTRL_CLK_PWM_VER1_1		0x00004000
-#define PM_CTRL_RCVR_WT_TIMER		0x00008000
-#define PM_CTRL_L1_ENTRY_TIMER_MASK	0xF
-#define PM_CTRL_L1_ENTRY_TIMER_SHIFT	16
-#define PM_CTRL_PM_REQ_TIMER_MASK	0xF
-#define PM_CTRL_PM_REQ_TIMER_SHIFT	20
-#define PM_CTRL_LCKDET_TIMER_MASK	0xF
+#define PM_CTRL_HOTRST			BIT(31)
+#define PM_CTRL_MAC_ASPM_CHK		BIT(30)	/* L0s/L1 dis by MAC based on
+						 * thrghput(setting in 15A0) */
+#define PM_CTRL_SA_DLY_EN		BIT(29)
+#define PM_CTRL_L0S_BUFSRX_EN		BIT(28)
+#define PM_CTRL_LCKDET_TIMER_MASK	0xFUL
 #define PM_CTRL_LCKDET_TIMER_SHIFT	24
-#define PM_CTRL_EN_BUFS_RX_L0S		0x10000000
-#define PM_CTRL_SA_DLY_EN		0x20000000
-#define PM_CTRL_MAC_ASPM_CHK		0x40000000
-#define PM_CTRL_HOTRST			0x80000000
+#define PM_CTRL_LCKDET_TIMER_DEF	0xC
+#define PM_CTRL_PM_REQ_TIMER_MASK	0xFUL
+#define PM_CTRL_PM_REQ_TIMER_SHIFT	20	/* pm_request_l1 time > @
+						 * ->L0s not L1 */
+#define PM_CTRL_PM_REQ_TO_DEF		0xF
+#define PMCTRL_TXL1_AFTER_L0S		BIT(19)	/* l1dv2.0+ */
+#define L1D_PMCTRL_L1_ENTRY_TM_MASK	7UL	/* l1dv2.0+, 3bits */
+#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT	16
+#define L1D_PMCTRL_L1_ENTRY_TM_DIS	0
+#define L1D_PMCTRL_L1_ENTRY_TM_2US	1
+#define L1D_PMCTRL_L1_ENTRY_TM_4US	2
+#define L1D_PMCTRL_L1_ENTRY_TM_8US	3
+#define L1D_PMCTRL_L1_ENTRY_TM_16US	4
+#define L1D_PMCTRL_L1_ENTRY_TM_24US	5
+#define L1D_PMCTRL_L1_ENTRY_TM_32US	6
+#define L1D_PMCTRL_L1_ENTRY_TM_63US	7
+#define PM_CTRL_L1_ENTRY_TIMER_MASK	0xFUL  /* l1C 4bits */
+#define PM_CTRL_L1_ENTRY_TIMER_SHIFT	16
+#define L2CB1_PM_CTRL_L1_ENTRY_TM	7
+#define L1C_PM_CTRL_L1_ENTRY_TM		0xF
+#define PM_CTRL_RCVR_WT_TIMER		BIT(15)	/* 1:1us, 0:2ms */
+#define PM_CTRL_CLK_PWM_VER1_1		BIT(14)	/* 0:1.0a,1:1.1 */
+#define PM_CTRL_CLK_SWH_L1		BIT(13)	/* en pcie clk sw in L1 */
+#define PM_CTRL_ASPM_L0S_EN		BIT(12)
+#define PM_CTRL_RXL1_AFTER_L0S		BIT(11)	/* l1dv2.0+ */
+#define L1D_PMCTRL_L0S_TIMER_MASK	7UL	/* l1d2.0+, 3bits*/
+#define L1D_PMCTRL_L0S_TIMER_SHIFT	8
+#define PM_CTRL_L0S_ENTRY_TIMER_MASK	0xFUL	/* l1c, 4bits */
+#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT	8
+#define PM_CTRL_SERDES_BUFS_RX_L1_EN	BIT(7)
+#define PM_CTRL_SERDES_PD_EX_L1		BIT(6)	/* power down serdes rx */
+#define PM_CTRL_SERDES_PLL_L1_EN	BIT(5)
+#define PM_CTRL_SERDES_L1_EN		BIT(4)
+#define PM_CTRL_ASPM_L1_EN		BIT(3)
+#define PM_CTRL_CLK_REQ_EN		BIT(2)
+#define PM_CTRL_RBER_EN			BIT(1)
+#define PM_CTRL_SPRSDWER_EN		BIT(0)
 
 #define REG_LTSSM_ID_CTRL		0x12FC
 #define LTSSM_ID_EN_WRO			0x1000
+
+
 /* Selene Master Control Register */
 #define REG_MASTER_CTRL			0x1400
-#define MASTER_CTRL_SOFT_RST            0x1
-#define MASTER_CTRL_TEST_MODE_MASK	0x3
-#define MASTER_CTRL_TEST_MODE_SHIFT	2
-#define MASTER_CTRL_BERT_START		0x10
-#define MASTER_CTRL_OOB_DIS_OFF		0x40
-#define MASTER_CTRL_SA_TIMER_EN		0x80
-#define MASTER_CTRL_MTIMER_EN           0x100
-#define MASTER_CTRL_MANUAL_INT          0x200
-#define MASTER_CTRL_TX_ITIMER_EN	0x400
-#define MASTER_CTRL_RX_ITIMER_EN	0x800
-#define MASTER_CTRL_CLK_SEL_DIS		0x1000
-#define MASTER_CTRL_CLK_SWH_MODE	0x2000
-#define MASTER_CTRL_INT_RDCLR		0x4000
-#define MASTER_CTRL_REV_NUM_SHIFT	16
-#define MASTER_CTRL_REV_NUM_MASK	0xff
-#define MASTER_CTRL_DEV_ID_SHIFT	24
-#define MASTER_CTRL_DEV_ID_MASK		0x7f
-#define MASTER_CTRL_OTP_SEL		0x80000000
+#define MASTER_CTRL_OTP_SEL		BIT(31)
+#define MASTER_DEV_NUM_MASK		0x7FUL
+#define MASTER_DEV_NUM_SHIFT		24
+#define MASTER_REV_NUM_MASK		0xFFUL
+#define MASTER_REV_NUM_SHIFT		16
+#define MASTER_CTRL_INT_RDCLR		BIT(14)
+#define MASTER_CTRL_CLK_SEL_DIS		BIT(12)	/* 1:alwys sel pclk from
+						 * serdes, not sw to 25M */
+#define MASTER_CTRL_RX_ITIMER_EN	BIT(11)	/* IRQ MODURATION FOR RX */
+#define MASTER_CTRL_TX_ITIMER_EN	BIT(10)	/* MODURATION FOR TX/RX */
+#define MASTER_CTRL_MANU_INT		BIT(9)	/* SOFT MANUAL INT */
+#define MASTER_CTRL_MANUTIMER_EN	BIT(8)
+#define MASTER_CTRL_SA_TIMER_EN		BIT(7)	/* SYS ALIVE TIMER EN */
+#define MASTER_CTRL_OOB_DIS		BIT(6)	/* OUT OF BOX DIS */
+#define MASTER_CTRL_WAKEN_25M		BIT(5)	/* WAKE WO. PCIE CLK */
+#define MASTER_CTRL_BERT_START		BIT(4)
+#define MASTER_PCIE_TSTMOD_MASK		3UL
+#define MASTER_PCIE_TSTMOD_SHIFT	2
+#define MASTER_PCIE_RST			BIT(1)
+#define MASTER_CTRL_SOFT_RST		BIT(0)	/* RST MAC & DMA */
+#define DMA_MAC_RST_TO			50
 
 /* Timer Initial Value Register */
 #define REG_MANUAL_TIMER_INIT       	0x1404
@@ -201,87 +229,85 @@
 #define IRQ_MODRT_RX_TIMER_SHIFT	16
 
 #define REG_GPHY_CTRL               	0x140C
-#define GPHY_CTRL_EXT_RESET         	0x1
-#define GPHY_CTRL_RTL_MODE		0x2
-#define GPHY_CTRL_LED_MODE		0x4
-#define GPHY_CTRL_ANEG_NOW		0x8
-#define GPHY_CTRL_REV_ANEG		0x10
-#define GPHY_CTRL_GATE_25M_EN       	0x20
-#define GPHY_CTRL_LPW_EXIT          	0x40
-#define GPHY_CTRL_PHY_IDDQ          	0x80
-#define GPHY_CTRL_PHY_IDDQ_DIS      	0x100
-#define GPHY_CTRL_GIGA_DIS		0x200
-#define GPHY_CTRL_HIB_EN            	0x400
-#define GPHY_CTRL_HIB_PULSE         	0x800
-#define GPHY_CTRL_SEL_ANA_RST       	0x1000
-#define GPHY_CTRL_PHY_PLL_ON        	0x2000
-#define GPHY_CTRL_PWDOWN_HW		0x4000
-#define GPHY_CTRL_PHY_PLL_BYPASS	0x8000
+#define GPHY_CTRL_ADDR_MASK		0x1FUL
+#define GPHY_CTRL_ADDR_SHIFT		19
+#define GPHY_CTRL_BP_VLTGSW		BIT(18)
+#define GPHY_CTRL_100AB_EN		BIT(17)
+#define GPHY_CTRL_10AB_EN		BIT(16)
+#define GPHY_CTRL_PHY_PLL_BYPASS	BIT(15)
+#define GPHY_CTRL_PWDOWN_HW		BIT(14)	/* affect MAC&PHY, to low pw */
+#define GPHY_CTRL_PHY_PLL_ON		BIT(13)	/* 1:pll always on, 0:can sw */
+#define GPHY_CTRL_SEL_ANA_RST		BIT(12)
+#define GPHY_CTRL_HIB_PULSE		BIT(11)
+#define GPHY_CTRL_HIB_EN		BIT(10)
+#define GPHY_CTRL_GIGA_DIS		BIT(9)
+#define GPHY_CTRL_PHY_IDDQ_DIS		BIT(8)	/* pw on RST */
+#define GPHY_CTRL_PHY_IDDQ		BIT(7)	/* bit8 affect bit7 while rb */
+#define GPHY_CTRL_LPW_EXIT		BIT(6)
+#define GPHY_CTRL_GATE_25M_EN		BIT(5)
+#define GPHY_CTRL_REV_ANEG		BIT(4)
+#define GPHY_CTRL_ANEG_NOW		BIT(3)
+#define GPHY_CTRL_LED_MODE		BIT(2)
+#define GPHY_CTRL_RTL_MODE		BIT(1)
+#define GPHY_CTRL_EXT_RESET		BIT(0)	/* 1:out of DSP RST status */
+#define GPHY_CTRL_EXT_RST_TO		80	/* 800us atmost */
+#define GPHY_CTRL_CLS			(\
+	GPHY_CTRL_LED_MODE		|\
+	GPHY_CTRL_100AB_EN		|\
+	GPHY_CTRL_PHY_PLL_ON)
 
-#define GPHY_CTRL_DEFAULT (		 \
-		GPHY_CTRL_SEL_ANA_RST	|\
-		GPHY_CTRL_HIB_PULSE	|\
-		GPHY_CTRL_HIB_EN)
-
-#define GPHY_CTRL_PW_WOL_DIS (		 \
-		GPHY_CTRL_SEL_ANA_RST	|\
-		GPHY_CTRL_HIB_PULSE	|\
-		GPHY_CTRL_HIB_EN	|\
-		GPHY_CTRL_PWDOWN_HW	|\
-		GPHY_CTRL_PHY_IDDQ)
-
-#define GPHY_CTRL_POWER_SAVING (	\
-		GPHY_CTRL_SEL_ANA_RST	|\
-		GPHY_CTRL_HIB_EN	|\
-		GPHY_CTRL_HIB_PULSE	|\
-		GPHY_CTRL_PWDOWN_HW	|\
-		GPHY_CTRL_PHY_IDDQ)
 /* Block IDLE Status Register */
-#define REG_IDLE_STATUS  		0x1410
-#define IDLE_STATUS_MASK		0x00FF
-#define IDLE_STATUS_RXMAC_NO_IDLE      	0x1
-#define IDLE_STATUS_TXMAC_NO_IDLE      	0x2
-#define IDLE_STATUS_RXQ_NO_IDLE        	0x4
-#define IDLE_STATUS_TXQ_NO_IDLE        	0x8
-#define IDLE_STATUS_DMAR_NO_IDLE       	0x10
-#define IDLE_STATUS_DMAW_NO_IDLE       	0x20
-#define IDLE_STATUS_SMB_NO_IDLE        	0x40
-#define IDLE_STATUS_CMB_NO_IDLE        	0x80
+#define REG_IDLE_STATUS			0x1410
+#define IDLE_STATUS_SFORCE_MASK		0xFUL
+#define IDLE_STATUS_SFORCE_SHIFT	14
+#define IDLE_STATUS_CALIB_DONE		BIT(13)
+#define IDLE_STATUS_CALIB_RES_MASK	0x1FUL
+#define IDLE_STATUS_CALIB_RES_SHIFT	8
+#define IDLE_STATUS_CALIBERR_MASK	0xFUL
+#define IDLE_STATUS_CALIBERR_SHIFT	4
+#define IDLE_STATUS_TXQ_BUSY		BIT(3)
+#define IDLE_STATUS_RXQ_BUSY		BIT(2)
+#define IDLE_STATUS_TXMAC_BUSY		BIT(1)
+#define IDLE_STATUS_RXMAC_BUSY		BIT(0)
+#define IDLE_STATUS_MASK		(\
+	IDLE_STATUS_TXQ_BUSY		|\
+	IDLE_STATUS_RXQ_BUSY		|\
+	IDLE_STATUS_TXMAC_BUSY		|\
+	IDLE_STATUS_RXMAC_BUSY)
 
 /* MDIO Control Register */
 #define REG_MDIO_CTRL           	0x1414
-#define MDIO_DATA_MASK          	0xffff  /* On MDIO write, the 16-bit
-						 * control data to write to PHY
-						 * MII management register */
-#define MDIO_DATA_SHIFT         	0       /* On MDIO read, the 16-bit
-						 * status data that was read
-						 * from the PHY MII management register */
-#define MDIO_REG_ADDR_MASK      	0x1f    /* MDIO register address */
-#define MDIO_REG_ADDR_SHIFT     	16
-#define MDIO_RW                 	0x200000  /* 1: read, 0: write */
-#define MDIO_SUP_PREAMBLE       	0x400000  /* Suppress preamble */
-#define MDIO_START              	0x800000  /* Write 1 to initiate the MDIO
-						   * master. And this bit is self
-						   * cleared after one cycle */
-#define MDIO_CLK_SEL_SHIFT      	24
-#define MDIO_CLK_25_4           	0
-#define MDIO_CLK_25_6           	2
-#define MDIO_CLK_25_8           	3
-#define MDIO_CLK_25_10          	4
-#define MDIO_CLK_25_14          	5
-#define MDIO_CLK_25_20          	6
-#define MDIO_CLK_25_28          	7
-#define MDIO_BUSY               	0x8000000
-#define MDIO_AP_EN              	0x10000000
-#define MDIO_WAIT_TIMES         	10
+#define MDIO_CTRL_MODE_EXT		BIT(30)
+#define MDIO_CTRL_POST_READ		BIT(29)
+#define MDIO_CTRL_AP_EN			BIT(28)
+#define MDIO_CTRL_BUSY			BIT(27)
+#define MDIO_CTRL_CLK_SEL_MASK		0x7UL
+#define MDIO_CTRL_CLK_SEL_SHIFT		24
+#define MDIO_CTRL_CLK_25_4		0	/* 25MHz divide 4 */
+#define MDIO_CTRL_CLK_25_6		2
+#define MDIO_CTRL_CLK_25_8		3
+#define MDIO_CTRL_CLK_25_10		4
+#define MDIO_CTRL_CLK_25_32		5
+#define MDIO_CTRL_CLK_25_64		6
+#define MDIO_CTRL_CLK_25_128		7
+#define MDIO_CTRL_START			BIT(23)
+#define MDIO_CTRL_SPRES_PRMBL		BIT(22)
+#define MDIO_CTRL_OP_READ		BIT(21)	/* 1:read, 0:write */
+#define MDIO_CTRL_REG_MASK		0x1FUL
+#define MDIO_CTRL_REG_SHIFT		16
+#define MDIO_CTRL_DATA_MASK		0xFFFFUL
+#define MDIO_CTRL_DATA_SHIFT		0
+#define MDIO_MAX_AC_TO			120	/* 1.2ms timeout for slow clk */
 
-/* MII PHY Status Register */
-#define REG_PHY_STATUS           	0x1418
-#define PHY_GENERAL_STATUS_MASK		0xFFFF
-#define PHY_STATUS_RECV_ENABLE		0x0001
-#define PHY_OE_PWSP_STATUS_MASK		0x07FF
-#define PHY_OE_PWSP_STATUS_SHIFT	16
-#define PHY_STATUS_LPW_STATE		0x80000000
+/* for extension reg access */
+#define REG_MDIO_EXTN			0x1448
+#define MDIO_EXTN_PORTAD_MASK		0x1FUL
+#define MDIO_EXTN_PORTAD_SHIFT		21
+#define MDIO_EXTN_DEVAD_MASK		0x1FUL
+#define MDIO_EXTN_DEVAD_SHIFT		16
+#define MDIO_EXTN_REG_MASK		0xFFFFUL
+#define MDIO_EXTN_REG_SHIFT		0
+
 /* BIST Control and Status Register0 (for the Packet Memory) */
 #define REG_BIST0_CTRL              	0x141c
 #define BIST0_NOW                   	0x1
@@ -299,50 +325,81 @@
 #define BIST1_FUSE_FLAG             	0x4
 
 /* SerDes Lock Detect Control and Status Register */
-#define REG_SERDES_LOCK            	0x1424
-#define SERDES_LOCK_DETECT          	0x1  /* SerDes lock detected. This signal
-					      * comes from Analog SerDes */
-#define SERDES_LOCK_DETECT_EN       	0x2  /* 1: Enable SerDes Lock detect function */
-#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE
-#define SERDES_LOCK_STS_SELFB_PLL_MASK  0x3
-#define SERDES_OVCLK_18_25		0x0
-#define SERDES_OVCLK_12_18		0x1
-#define SERDES_OVCLK_0_4		0x2
-#define SERDES_OVCLK_4_12		0x3
-#define SERDES_MAC_CLK_SLOWDOWN		0x20000
-#define SERDES_PYH_CLK_SLOWDOWN		0x40000
+#define REG_SERDES			0x1424
+#define SERDES_PHY_CLK_SLOWDOWN		BIT(18)
+#define SERDES_MAC_CLK_SLOWDOWN		BIT(17)
+#define SERDES_SELFB_PLL_MASK		0x3UL
+#define SERDES_SELFB_PLL_SHIFT		14
+#define SERDES_PHYCLK_SEL_GTX		BIT(13)	/* 1:gtx_clk, 0:25M */
+#define SERDES_PCIECLK_SEL_SRDS		BIT(12)	/* 1:serdes,0:25M */
+#define SERDES_BUFS_RX_EN		BIT(11)
+#define SERDES_PD_RX			BIT(10)
+#define SERDES_PLL_EN			BIT(9)
+#define SERDES_EN			BIT(8)
+#define SERDES_SELFB_PLL_SEL_CSR	BIT(6)	/* 0:state-machine,1:csr */
+#define SERDES_SELFB_PLL_CSR_MASK	0x3UL
+#define SERDES_SELFB_PLL_CSR_SHIFT	4
+#define SERDES_SELFB_PLL_CSR_4		3	/* 4-12% OV-CLK */
+#define SERDES_SELFB_PLL_CSR_0		2	/* 0-4% OV-CLK */
+#define SERDES_SELFB_PLL_CSR_12		1	/* 12-18% OV-CLK */
+#define SERDES_SELFB_PLL_CSR_18		0	/* 18-25% OV-CLK */
+#define SERDES_VCO_SLOW			BIT(3)
+#define SERDES_VCO_FAST			BIT(2)
+#define SERDES_LOCK_DETECT_EN		BIT(1)
+#define SERDES_LOCK_DETECT		BIT(0)
+
+#define REG_LPI_DECISN_TIMER            0x143C
+#define L2CB_LPI_DESISN_TIMER		0x7D00
+
+#define REG_LPI_CTRL                    0x1440
+#define LPI_CTRL_CHK_DA			BIT(31)
+#define LPI_CTRL_ENH_TO_MASK		0x1FFFUL
+#define LPI_CTRL_ENH_TO_SHIFT		12
+#define LPI_CTRL_ENH_TH_MASK		0x1FUL
+#define LPI_CTRL_ENH_TH_SHIFT		6
+#define LPI_CTRL_ENH_EN			BIT(5)
+#define LPI_CTRL_CHK_RX			BIT(4)
+#define LPI_CTRL_CHK_STATE		BIT(3)
+#define LPI_CTRL_GMII			BIT(2)
+#define LPI_CTRL_TO_PHY			BIT(1)
+#define LPI_CTRL_EN			BIT(0)
+
+#define REG_LPI_WAIT			0x1444
+#define LPI_WAIT_TIMER_MASK		0xFFFFUL
+#define LPI_WAIT_TIMER_SHIFT		0
 
 /* MAC Control Register  */
 #define REG_MAC_CTRL         		0x1480
-#define MAC_CTRL_TX_EN			0x1
-#define MAC_CTRL_RX_EN			0x2
-#define MAC_CTRL_TX_FLOW		0x4
-#define MAC_CTRL_RX_FLOW            	0x8
-#define MAC_CTRL_LOOPBACK          	0x10
-#define MAC_CTRL_DUPLX              	0x20
-#define MAC_CTRL_ADD_CRC            	0x40
-#define MAC_CTRL_PAD                	0x80
-#define MAC_CTRL_LENCHK             	0x100
-#define MAC_CTRL_HUGE_EN            	0x200
-#define MAC_CTRL_PRMLEN_SHIFT       	10
-#define MAC_CTRL_PRMLEN_MASK        	0xf
-#define MAC_CTRL_RMV_VLAN           	0x4000
-#define MAC_CTRL_PROMIS_EN          	0x8000
-#define MAC_CTRL_TX_PAUSE           	0x10000
-#define MAC_CTRL_SCNT               	0x20000
-#define MAC_CTRL_SRST_TX            	0x40000
-#define MAC_CTRL_TX_SIMURST         	0x80000
-#define MAC_CTRL_SPEED_SHIFT        	20
-#define MAC_CTRL_SPEED_MASK         	0x3
-#define MAC_CTRL_DBG_TX_BKPRESURE   	0x400000
-#define MAC_CTRL_TX_HUGE            	0x800000
-#define MAC_CTRL_RX_CHKSUM_EN       	0x1000000
-#define MAC_CTRL_MC_ALL_EN          	0x2000000
-#define MAC_CTRL_BC_EN              	0x4000000
-#define MAC_CTRL_DBG                	0x8000000
-#define MAC_CTRL_SINGLE_PAUSE_EN	0x10000000
-#define MAC_CTRL_HASH_ALG_CRC32		0x20000000
-#define MAC_CTRL_SPEED_MODE_SW		0x40000000
+#define MAC_CTRL_SPEED_MODE_SW		BIT(30) /* 0:phy,1:sw */
+#define MAC_CTRL_HASH_ALG_CRC32		BIT(29) /* 1:legacy,0:lw_5b */
+#define MAC_CTRL_SINGLE_PAUSE_EN	BIT(28)
+#define MAC_CTRL_DBG			BIT(27)
+#define MAC_CTRL_BC_EN			BIT(26)
+#define MAC_CTRL_MC_ALL_EN		BIT(25)
+#define MAC_CTRL_RX_CHKSUM_EN		BIT(24)
+#define MAC_CTRL_TX_HUGE		BIT(23)
+#define MAC_CTRL_DBG_TX_BKPRESURE	BIT(22)
+#define MAC_CTRL_SPEED_MASK		3UL
+#define MAC_CTRL_SPEED_SHIFT		20
+#define MAC_CTRL_SPEED_10_100		1
+#define MAC_CTRL_SPEED_1000		2
+#define MAC_CTRL_TX_SIMURST		BIT(19)
+#define MAC_CTRL_SCNT			BIT(17)
+#define MAC_CTRL_TX_PAUSE		BIT(16)
+#define MAC_CTRL_PROMIS_EN		BIT(15)
+#define MAC_CTRL_RMV_VLAN		BIT(14)
+#define MAC_CTRL_PRMLEN_MASK		0xFUL
+#define MAC_CTRL_PRMLEN_SHIFT		10
+#define MAC_CTRL_HUGE_EN		BIT(9)
+#define MAC_CTRL_LENCHK			BIT(8)
+#define MAC_CTRL_PAD			BIT(7)
+#define MAC_CTRL_ADD_CRC		BIT(6)
+#define MAC_CTRL_DUPLX			BIT(5)
+#define MAC_CTRL_LOOPBACK		BIT(4)
+#define MAC_CTRL_RX_FLOW		BIT(3)
+#define MAC_CTRL_TX_FLOW		BIT(2)
+#define MAC_CTRL_RX_EN			BIT(1)
+#define MAC_CTRL_TX_EN			BIT(0)
 
 /* MAC IPG/IFG Control Register  */
 #define REG_MAC_IPG_IFG             	0x1484
@@ -386,34 +443,53 @@
 
 /* Wake-On-Lan control register */
 #define REG_WOL_CTRL                	0x14a0
-#define WOL_PATTERN_EN              	0x00000001
-#define WOL_PATTERN_PME_EN              0x00000002
-#define WOL_MAGIC_EN                    0x00000004
-#define WOL_MAGIC_PME_EN                0x00000008
-#define WOL_LINK_CHG_EN                 0x00000010
-#define WOL_LINK_CHG_PME_EN             0x00000020
-#define WOL_PATTERN_ST                  0x00000100
-#define WOL_MAGIC_ST                    0x00000200
-#define WOL_LINKCHG_ST                  0x00000400
-#define WOL_CLK_SWITCH_EN               0x00008000
-#define WOL_PT0_EN                      0x00010000
-#define WOL_PT1_EN                      0x00020000
-#define WOL_PT2_EN                      0x00040000
-#define WOL_PT3_EN                      0x00080000
-#define WOL_PT4_EN                      0x00100000
-#define WOL_PT5_EN                      0x00200000
-#define WOL_PT6_EN                      0x00400000
+#define WOL_PT7_MATCH			BIT(31)
+#define WOL_PT6_MATCH			BIT(30)
+#define WOL_PT5_MATCH			BIT(29)
+#define WOL_PT4_MATCH			BIT(28)
+#define WOL_PT3_MATCH			BIT(27)
+#define WOL_PT2_MATCH			BIT(26)
+#define WOL_PT1_MATCH			BIT(25)
+#define WOL_PT0_MATCH			BIT(24)
+#define WOL_PT7_EN			BIT(23)
+#define WOL_PT6_EN			BIT(22)
+#define WOL_PT5_EN			BIT(21)
+#define WOL_PT4_EN			BIT(20)
+#define WOL_PT3_EN			BIT(19)
+#define WOL_PT2_EN			BIT(18)
+#define WOL_PT1_EN			BIT(17)
+#define WOL_PT0_EN			BIT(16)
+#define WOL_LNKCHG_ST			BIT(10)
+#define WOL_MAGIC_ST			BIT(9)
+#define WOL_PATTERN_ST			BIT(8)
+#define WOL_OOB_EN			BIT(6)
+#define WOL_LINK_CHG_PME_EN		BIT(5)
+#define WOL_LINK_CHG_EN			BIT(4)
+#define WOL_MAGIC_PME_EN		BIT(3)
+#define WOL_MAGIC_EN			BIT(2)
+#define WOL_PATTERN_PME_EN		BIT(1)
+#define WOL_PATTERN_EN			BIT(0)
 
 /* WOL Length ( 2 DWORD ) */
-#define REG_WOL_PATTERN_LEN         	0x14a4
-#define WOL_PT_LEN_MASK                 0x7f
-#define WOL_PT0_LEN_SHIFT               0
-#define WOL_PT1_LEN_SHIFT               8
-#define WOL_PT2_LEN_SHIFT               16
-#define WOL_PT3_LEN_SHIFT               24
-#define WOL_PT4_LEN_SHIFT               0
-#define WOL_PT5_LEN_SHIFT               8
-#define WOL_PT6_LEN_SHIFT               16
+#define REG_WOL_PTLEN1			0x14A4
+#define WOL_PTLEN1_3_MASK		0xFFUL
+#define WOL_PTLEN1_3_SHIFT		24
+#define WOL_PTLEN1_2_MASK		0xFFUL
+#define WOL_PTLEN1_2_SHIFT		16
+#define WOL_PTLEN1_1_MASK		0xFFUL
+#define WOL_PTLEN1_1_SHIFT		8
+#define WOL_PTLEN1_0_MASK		0xFFUL
+#define WOL_PTLEN1_0_SHIFT		0
+
+#define REG_WOL_PTLEN2			0x14A8
+#define WOL_PTLEN2_7_MASK		0xFFUL
+#define WOL_PTLEN2_7_SHIFT		24
+#define WOL_PTLEN2_6_MASK		0xFFUL
+#define WOL_PTLEN2_6_SHIFT		16
+#define WOL_PTLEN2_5_MASK		0xFFUL
+#define WOL_PTLEN2_5_SHIFT		8
+#define WOL_PTLEN2_4_MASK		0xFFUL
+#define WOL_PTLEN2_4_SHIFT		0
 
 /* Internal SRAM Partition Register */
 #define RFDX_HEAD_ADDR_MASK		0x03FF
@@ -458,66 +534,50 @@
  */
 #define REG_RX_BASE_ADDR_HI		0x1540
 #define REG_TX_BASE_ADDR_HI		0x1544
-#define REG_SMB_BASE_ADDR_HI		0x1548
-#define REG_SMB_BASE_ADDR_LO		0x154C
 #define REG_RFD0_HEAD_ADDR_LO		0x1550
-#define REG_RFD1_HEAD_ADDR_LO		0x1554
-#define REG_RFD2_HEAD_ADDR_LO		0x1558
-#define REG_RFD3_HEAD_ADDR_LO		0x155C
 #define REG_RFD_RING_SIZE		0x1560
 #define RFD_RING_SIZE_MASK		0x0FFF
 #define REG_RX_BUF_SIZE			0x1564
 #define RX_BUF_SIZE_MASK		0xFFFF
 #define REG_RRD0_HEAD_ADDR_LO		0x1568
-#define REG_RRD1_HEAD_ADDR_LO		0x156C
-#define REG_RRD2_HEAD_ADDR_LO		0x1570
-#define REG_RRD3_HEAD_ADDR_LO		0x1574
 #define REG_RRD_RING_SIZE		0x1578
 #define RRD_RING_SIZE_MASK		0x0FFF
-#define REG_HTPD_HEAD_ADDR_LO		0x157C
-#define REG_NTPD_HEAD_ADDR_LO		0x1580
+#define REG_TPD_PRI1_ADDR_LO		0x157C
+#define REG_TPD_PRI0_ADDR_LO		0x1580
 #define REG_TPD_RING_SIZE		0x1584
 #define TPD_RING_SIZE_MASK		0xFFFF
-#define REG_CMB_BASE_ADDR_LO		0x1588
-
-/* RSS about */
-#define REG_RSS_KEY0                    0x14B0
-#define REG_RSS_KEY1                    0x14B4
-#define REG_RSS_KEY2                    0x14B8
-#define REG_RSS_KEY3                    0x14BC
-#define REG_RSS_KEY4                    0x14C0
-#define REG_RSS_KEY5                    0x14C4
-#define REG_RSS_KEY6                    0x14C8
-#define REG_RSS_KEY7                    0x14CC
-#define REG_RSS_KEY8                    0x14D0
-#define REG_RSS_KEY9                    0x14D4
-#define REG_IDT_TABLE0                	0x14E0
-#define REG_IDT_TABLE1                  0x14E4
-#define REG_IDT_TABLE2                  0x14E8
-#define REG_IDT_TABLE3                  0x14EC
-#define REG_IDT_TABLE4                  0x14F0
-#define REG_IDT_TABLE5                  0x14F4
-#define REG_IDT_TABLE6                  0x14F8
-#define REG_IDT_TABLE7                  0x14FC
-#define REG_IDT_TABLE                   REG_IDT_TABLE0
-#define REG_RSS_HASH_VALUE              0x15B0
-#define REG_RSS_HASH_FLAG               0x15B4
-#define REG_BASE_CPU_NUMBER             0x15B8
 
 /* TXQ Control Register */
-#define REG_TXQ_CTRL                	0x1590
-#define	TXQ_NUM_TPD_BURST_MASK     	0xF
-#define TXQ_NUM_TPD_BURST_SHIFT    	0
-#define TXQ_CTRL_IP_OPTION_EN		0x10
-#define TXQ_CTRL_EN                     0x20
-#define TXQ_CTRL_ENH_MODE               0x40
-#define TXQ_CTRL_LS_8023_EN		0x80
-#define TXQ_TXF_BURST_NUM_SHIFT    	16
-#define TXQ_TXF_BURST_NUM_MASK     	0xFFFF
+#define REG_TXQ_CTRL			0x1590
+#define TXQ_TXF_BURST_NUM_MASK          0xFFFFUL
+#define TXQ_TXF_BURST_NUM_SHIFT		16
+#define L1C_TXQ_TXF_BURST_PREF          0x200
+#define L2CB_TXQ_TXF_BURST_PREF         0x40
+#define TXQ_CTRL_PEDING_CLR             BIT(8)
+#define TXQ_CTRL_LS_8023_EN             BIT(7)
+#define TXQ_CTRL_ENH_MODE               BIT(6)
+#define TXQ_CTRL_EN                     BIT(5)
+#define TXQ_CTRL_IP_OPTION_EN           BIT(4)
+#define TXQ_NUM_TPD_BURST_MASK          0xFUL
+#define TXQ_NUM_TPD_BURST_SHIFT         0
+#define TXQ_NUM_TPD_BURST_DEF           5
+#define TXQ_CFGV			(\
+	FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\
+	TXQ_CTRL_ENH_MODE |\
+	TXQ_CTRL_LS_8023_EN |\
+	TXQ_CTRL_IP_OPTION_EN)
+#define L1C_TXQ_CFGV			(\
+	TXQ_CFGV |\
+	FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF))
+#define L2CB_TXQ_CFGV			(\
+	TXQ_CFGV |\
+	FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF))
+
 
 /* Jumbo packet Threshold for task offload */
 #define REG_TX_TSO_OFFLOAD_THRESH	0x1594 /* In 8-bytes */
 #define TX_TSO_OFFLOAD_THRESH_MASK	0x07FF
+#define MAX_TSO_FRAME_SIZE		(7*1024)
 
 #define	REG_TXF_WATER_MARK		0x1598 /* In 8-bytes */
 #define TXF_WATER_MARK_MASK		0x0FFF
@@ -537,26 +597,21 @@
 #define ASPM_THRUPUT_LIMIT_NO		0x00
 #define ASPM_THRUPUT_LIMIT_1M		0x01
 #define ASPM_THRUPUT_LIMIT_10M		0x02
-#define ASPM_THRUPUT_LIMIT_100M		0x04
-#define RXQ1_CTRL_EN			0x10
-#define RXQ2_CTRL_EN			0x20
-#define RXQ3_CTRL_EN			0x40
-#define IPV6_CHKSUM_CTRL_EN		0x80
-#define RSS_HASH_BITS_MASK		0x00FF
-#define RSS_HASH_BITS_SHIFT		8
-#define RSS_HASH_IPV4			0x10000
-#define RSS_HASH_IPV4_TCP		0x20000
-#define RSS_HASH_IPV6			0x40000
-#define RSS_HASH_IPV6_TCP		0x80000
+#define ASPM_THRUPUT_LIMIT_100M		0x03
+#define IPV6_CHKSUM_CTRL_EN		BIT(7)
 #define RXQ_RFD_BURST_NUM_MASK		0x003F
 #define RXQ_RFD_BURST_NUM_SHIFT		20
-#define RSS_MODE_MASK			0x0003
+#define RXQ_NUM_RFD_PREF_DEF		8
+#define RSS_MODE_MASK			3UL
 #define RSS_MODE_SHIFT			26
-#define RSS_NIP_QUEUE_SEL_MASK		0x1
-#define RSS_NIP_QUEUE_SEL_SHIFT		28
-#define RRS_HASH_CTRL_EN		0x20000000
-#define RX_CUT_THRU_EN			0x40000000
-#define RXQ_CTRL_EN			0x80000000
+#define RSS_MODE_DIS			0
+#define RSS_MODE_SQSI			1
+#define RSS_MODE_MQSI			2
+#define RSS_MODE_MQMI			3
+#define RSS_NIP_QUEUE_SEL		BIT(28) /* 0:q0, 1:table */
+#define RRS_HASH_CTRL_EN		BIT(29)
+#define RX_CUT_THRU_EN			BIT(30)
+#define RXQ_CTRL_EN			BIT(31)
 
 #define REG_RFD_FREE_THRESH		0x15A4
 #define RFD_FREE_THRESH_MASK		0x003F
@@ -577,57 +632,45 @@
 #define RXD_DMA_DOWN_TIMER_SHIFT	16
 
 /* DMA Engine Control Register */
-#define REG_DMA_CTRL                	0x15C0
-#define DMA_CTRL_DMAR_IN_ORDER          0x1
-#define DMA_CTRL_DMAR_ENH_ORDER         0x2
-#define DMA_CTRL_DMAR_OUT_ORDER         0x4
-#define DMA_CTRL_RCB_VALUE              0x8
-#define DMA_CTRL_DMAR_BURST_LEN_MASK    0x0007
-#define DMA_CTRL_DMAR_BURST_LEN_SHIFT   4
-#define DMA_CTRL_DMAW_BURST_LEN_MASK    0x0007
-#define DMA_CTRL_DMAW_BURST_LEN_SHIFT   7
-#define DMA_CTRL_DMAR_REQ_PRI           0x400
-#define DMA_CTRL_DMAR_DLY_CNT_MASK      0x001F
-#define DMA_CTRL_DMAR_DLY_CNT_SHIFT     11
-#define DMA_CTRL_DMAW_DLY_CNT_MASK      0x000F
-#define DMA_CTRL_DMAW_DLY_CNT_SHIFT     16
-#define DMA_CTRL_CMB_EN               	0x100000
-#define DMA_CTRL_SMB_EN			0x200000
-#define DMA_CTRL_CMB_NOW		0x400000
-#define MAC_CTRL_SMB_DIS		0x1000000
-#define DMA_CTRL_SMB_NOW		0x80000000
+#define REG_DMA_CTRL			0x15C0
+#define DMA_CTRL_SMB_NOW                BIT(31)
+#define DMA_CTRL_WPEND_CLR              BIT(30)
+#define DMA_CTRL_RPEND_CLR              BIT(29)
+#define DMA_CTRL_WDLY_CNT_MASK          0xFUL
+#define DMA_CTRL_WDLY_CNT_SHIFT         16
+#define DMA_CTRL_WDLY_CNT_DEF           4
+#define DMA_CTRL_RDLY_CNT_MASK          0x1FUL
+#define DMA_CTRL_RDLY_CNT_SHIFT         11
+#define DMA_CTRL_RDLY_CNT_DEF           15
+#define DMA_CTRL_RREQ_PRI_DATA          BIT(10)      /* 0:tpd, 1:data */
+#define DMA_CTRL_WREQ_BLEN_MASK         7UL
+#define DMA_CTRL_WREQ_BLEN_SHIFT        7
+#define DMA_CTRL_RREQ_BLEN_MASK         7UL
+#define DMA_CTRL_RREQ_BLEN_SHIFT        4
+#define L1C_CTRL_DMA_RCB_LEN128         BIT(3)   /* 0:64bytes,1:128bytes */
+#define DMA_CTRL_RORDER_MODE_MASK       7UL
+#define DMA_CTRL_RORDER_MODE_SHIFT      0
+#define DMA_CTRL_RORDER_MODE_OUT        4
+#define DMA_CTRL_RORDER_MODE_ENHANCE    2
+#define DMA_CTRL_RORDER_MODE_IN         1
 
-/* CMB/SMB Control Register */
+/* INT-triggle/SMB Control Register */
 #define REG_SMB_STAT_TIMER		0x15C4	/* 2us resolution */
 #define SMB_STAT_TIMER_MASK		0xFFFFFF
-#define REG_CMB_TPD_THRESH		0x15C8
-#define CMB_TPD_THRESH_MASK		0xFFFF
-#define REG_CMB_TX_TIMER		0x15CC	/* 2us resolution */
-#define CMB_TX_TIMER_MASK		0xFFFF
+#define REG_TINT_TPD_THRESH             0x15C8 /* tpd th to trig intrrupt */
 
 /* Mail box */
 #define MB_RFDX_PROD_IDX_MASK		0xFFFF
 #define REG_MB_RFD0_PROD_IDX		0x15E0
-#define REG_MB_RFD1_PROD_IDX		0x15E4
-#define REG_MB_RFD2_PROD_IDX		0x15E8
-#define REG_MB_RFD3_PROD_IDX		0x15EC
 
-#define MB_PRIO_PROD_IDX_MASK		0xFFFF
-#define REG_MB_PRIO_PROD_IDX		0x15F0
-#define MB_HTPD_PROD_IDX_SHIFT		0
-#define MB_NTPD_PROD_IDX_SHIFT		16
-
-#define MB_PRIO_CONS_IDX_MASK		0xFFFF
-#define REG_MB_PRIO_CONS_IDX		0x15F4
-#define MB_HTPD_CONS_IDX_SHIFT		0
-#define MB_NTPD_CONS_IDX_SHIFT		16
+#define REG_TPD_PRI1_PIDX               0x15F0	/* 16bit,hi-tpd producer idx */
+#define REG_TPD_PRI0_PIDX		0x15F2	/* 16bit,lo-tpd producer idx */
+#define REG_TPD_PRI1_CIDX		0x15F4	/* 16bit,hi-tpd consumer idx */
+#define REG_TPD_PRI0_CIDX		0x15F6	/* 16bit,lo-tpd consumer idx */
 
 #define REG_MB_RFD01_CONS_IDX		0x15F8
 #define MB_RFD0_CONS_IDX_MASK		0x0000FFFF
 #define MB_RFD1_CONS_IDX_MASK		0xFFFF0000
-#define REG_MB_RFD23_CONS_IDX		0x15FC
-#define MB_RFD2_CONS_IDX_MASK		0x0000FFFF
-#define MB_RFD3_CONS_IDX_MASK		0xFFFF0000
 
 /* Interrupt Status Register */
 #define REG_ISR    			0x1600
@@ -705,13 +748,6 @@
 #define REG_INT_RETRIG_TIMER		0x1608
 #define INT_RETRIG_TIMER_MASK		0xFFFF
 
-#define REG_HDS_CTRL			0x160C
-#define HDS_CTRL_EN			0x0001
-#define HDS_CTRL_BACKFILLSIZE_SHIFT	8
-#define HDS_CTRL_BACKFILLSIZE_MASK	0x0FFF
-#define HDS_CTRL_MAX_HDRSIZE_SHIFT	20
-#define HDS_CTRL_MAC_HDRSIZE_MASK	0x0FFF
-
 #define REG_MAC_RX_STATUS_BIN 		0x1700
 #define REG_MAC_RX_STATUS_END 		0x175c
 #define REG_MAC_TX_STATUS_BIN 		0x1760
@@ -796,73 +832,188 @@
 #define MII_DBG_ADDR			0x1D
 #define MII_DBG_DATA			0x1E
 
-#define MII_ANA_CTRL_0			0x0
-#define ANA_RESTART_CAL			0x0001
-#define ANA_MANUL_SWICH_ON_SHIFT	0x1
-#define ANA_MANUL_SWICH_ON_MASK		0xF
-#define ANA_MAN_ENABLE			0x0020
-#define ANA_SEL_HSP			0x0040
-#define ANA_EN_HB			0x0080
-#define ANA_EN_HBIAS			0x0100
-#define ANA_OEN_125M			0x0200
-#define ANA_EN_LCKDT			0x0400
-#define ANA_LCKDT_PHY			0x0800
-#define ANA_AFE_MODE			0x1000
-#define ANA_VCO_SLOW			0x2000
-#define ANA_VCO_FAST			0x4000
-#define ANA_SEL_CLK125M_DSP		0x8000
+/***************************** debug port *************************************/
 
-#define MII_ANA_CTRL_4			0x4
-#define ANA_IECHO_ADJ_MASK		0xF
-#define ANA_IECHO_ADJ_3_SHIFT		0
-#define ANA_IECHO_ADJ_2_SHIFT		4
-#define ANA_IECHO_ADJ_1_SHIFT		8
-#define ANA_IECHO_ADJ_0_SHIFT		12
+#define MIIDBG_ANACTRL                  0x00
+#define ANACTRL_CLK125M_DELAY_EN        0x8000
+#define ANACTRL_VCO_FAST                0x4000
+#define ANACTRL_VCO_SLOW                0x2000
+#define ANACTRL_AFE_MODE_EN             0x1000
+#define ANACTRL_LCKDET_PHY              0x800
+#define ANACTRL_LCKDET_EN               0x400
+#define ANACTRL_OEN_125M                0x200
+#define ANACTRL_HBIAS_EN                0x100
+#define ANACTRL_HB_EN                   0x80
+#define ANACTRL_SEL_HSP                 0x40
+#define ANACTRL_CLASSA_EN               0x20
+#define ANACTRL_MANUSWON_SWR_MASK       3U
+#define ANACTRL_MANUSWON_SWR_SHIFT      2
+#define ANACTRL_MANUSWON_SWR_2V         0
+#define ANACTRL_MANUSWON_SWR_1P9V       1
+#define ANACTRL_MANUSWON_SWR_1P8V       2
+#define ANACTRL_MANUSWON_SWR_1P7V       3
+#define ANACTRL_MANUSWON_BW3_4M         0x2
+#define ANACTRL_RESTART_CAL             0x1
+#define ANACTRL_DEF                     0x02EF
 
-#define MII_ANA_CTRL_5			0x5
-#define ANA_SERDES_CDR_BW_SHIFT		0
-#define ANA_SERDES_CDR_BW_MASK		0x3
-#define ANA_MS_PAD_DBG			0x0004
-#define ANA_SPEEDUP_DBG			0x0008
-#define ANA_SERDES_TH_LOS_SHIFT		4
-#define ANA_SERDES_TH_LOS_MASK		0x3
-#define ANA_SERDES_EN_DEEM		0x0040
-#define ANA_SERDES_TXELECIDLE		0x0080
-#define ANA_SERDES_BEACON		0x0100
-#define ANA_SERDES_HALFTXDR		0x0200
-#define ANA_SERDES_SEL_HSP		0x0400
-#define ANA_SERDES_EN_PLL		0x0800
-#define ANA_SERDES_EN			0x1000
-#define ANA_SERDES_EN_LCKDT		0x2000
+#define MIIDBG_SYSMODCTRL               0x04
+#define SYSMODCTRL_IECHOADJ_PFMH_PHY    0x8000
+#define SYSMODCTRL_IECHOADJ_BIASGEN     0x4000
+#define SYSMODCTRL_IECHOADJ_PFML_PHY    0x2000
+#define SYSMODCTRL_IECHOADJ_PS_MASK     3U
+#define SYSMODCTRL_IECHOADJ_PS_SHIFT    10
+#define SYSMODCTRL_IECHOADJ_PS_40       3
+#define SYSMODCTRL_IECHOADJ_PS_20       2
+#define SYSMODCTRL_IECHOADJ_PS_0        1
+#define SYSMODCTRL_IECHOADJ_10BT_100MV  0x40 /* 1:100mv, 0:200mv */
+#define SYSMODCTRL_IECHOADJ_HLFAP_MASK  3U
+#define SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4
+#define SYSMODCTRL_IECHOADJ_VDFULBW     0x8
+#define SYSMODCTRL_IECHOADJ_VDBIASHLF   0x4
+#define SYSMODCTRL_IECHOADJ_VDAMPHLF    0x2
+#define SYSMODCTRL_IECHOADJ_VDLANSW     0x1
+#define SYSMODCTRL_IECHOADJ_DEF         0x88BB /* ???? */
 
-#define MII_ANA_CTRL_11			0xB
-#define ANA_PS_HIB_EN			0x8000
+/* for l1d & l2cb */
+#define SYSMODCTRL_IECHOADJ_CUR_ADD     0x8000
+#define SYSMODCTRL_IECHOADJ_CUR_MASK    7U
+#define SYSMODCTRL_IECHOADJ_CUR_SHIFT   12
+#define SYSMODCTRL_IECHOADJ_VOL_MASK    0xFU
+#define SYSMODCTRL_IECHOADJ_VOL_SHIFT   8
+#define SYSMODCTRL_IECHOADJ_VOL_17ALL   3
+#define SYSMODCTRL_IECHOADJ_VOL_100M15  1
+#define SYSMODCTRL_IECHOADJ_VOL_10M17   0
+#define SYSMODCTRL_IECHOADJ_BIAS1_MASK  0xFU
+#define SYSMODCTRL_IECHOADJ_BIAS1_SHIFT 4
+#define SYSMODCTRL_IECHOADJ_BIAS2_MASK  0xFU
+#define SYSMODCTRL_IECHOADJ_BIAS2_SHIFT 0
+#define L1D_SYSMODCTRL_IECHOADJ_DEF     0x4FBB
 
-#define MII_ANA_CTRL_18			0x12
-#define ANA_TEST_MODE_10BT_01SHIFT	0
-#define ANA_TEST_MODE_10BT_01MASK	0x3
-#define ANA_LOOP_SEL_10BT		0x0004
-#define ANA_RGMII_MODE_SW		0x0008
-#define ANA_EN_LONGECABLE		0x0010
-#define ANA_TEST_MODE_10BT_2		0x0020
-#define ANA_EN_10BT_IDLE		0x0400
-#define ANA_EN_MASK_TB			0x0800
-#define ANA_TRIGGER_SEL_TIMER_SHIFT	12
-#define ANA_TRIGGER_SEL_TIMER_MASK	0x3
-#define ANA_INTERVAL_SEL_TIMER_SHIFT	14
-#define ANA_INTERVAL_SEL_TIMER_MASK	0x3
+#define MIIDBG_SRDSYSMOD                0x05
+#define SRDSYSMOD_LCKDET_EN             0x2000
+#define SRDSYSMOD_PLL_EN                0x800
+#define SRDSYSMOD_SEL_HSP               0x400
+#define SRDSYSMOD_HLFTXDR               0x200
+#define SRDSYSMOD_TXCLK_DELAY_EN        0x100
+#define SRDSYSMOD_TXELECIDLE            0x80
+#define SRDSYSMOD_DEEMP_EN              0x40
+#define SRDSYSMOD_MS_PAD                0x4
+#define SRDSYSMOD_CDR_ADC_VLTG          0x2
+#define SRDSYSMOD_CDR_DAC_1MA           0x1
+#define SRDSYSMOD_DEF                   0x2C46
 
-#define MII_ANA_CTRL_41			0x29
-#define ANA_TOP_PS_EN			0x8000
+#define MIIDBG_CFGLPSPD                 0x0A
+#define CFGLPSPD_RSTCNT_MASK            3U
+#define CFGLPSPD_RSTCNT_SHIFT           14
+#define CFGLPSPD_RSTCNT_CLK125SW        0x2000
 
-#define MII_ANA_CTRL_54			0x36
-#define ANA_LONG_CABLE_TH_100_SHIFT	0
-#define ANA_LONG_CABLE_TH_100_MASK	0x3F
-#define ANA_DESERVED			0x0040
-#define ANA_EN_LIT_CH			0x0080
-#define ANA_SHORT_CABLE_TH_100_SHIFT	8
-#define ANA_SHORT_CABLE_TH_100_MASK	0x3F
-#define ANA_BP_BAD_LINK_ACCUM		0x4000
-#define ANA_BP_SMALL_BW			0x8000
+#define MIIDBG_HIBNEG                   0x0B
+#define HIBNEG_PSHIB_EN                 0x8000
+#define HIBNEG_WAKE_BOTH                0x4000
+#define HIBNEG_ONOFF_ANACHG_SUDEN       0x2000
+#define HIBNEG_HIB_PULSE                0x1000
+#define HIBNEG_GATE_25M_EN              0x800
+#define HIBNEG_RST_80U                  0x400
+#define HIBNEG_RST_TIMER_MASK           3U
+#define HIBNEG_RST_TIMER_SHIFT          8
+#define HIBNEG_GTX_CLK_DELAY_MASK       3U
+#define HIBNEG_GTX_CLK_DELAY_SHIFT      5
+#define HIBNEG_BYPSS_BRKTIMER           0x10
+#define HIBNEG_DEF                      0xBC40
+
+#define MIIDBG_TST10BTCFG               0x12
+#define TST10BTCFG_INTV_TIMER_MASK      3U
+#define TST10BTCFG_INTV_TIMER_SHIFT     14
+#define TST10BTCFG_TRIGER_TIMER_MASK    3U
+#define TST10BTCFG_TRIGER_TIMER_SHIFT   12
+#define TST10BTCFG_DIV_MAN_MLT3_EN      0x800
+#define TST10BTCFG_OFF_DAC_IDLE         0x400
+#define TST10BTCFG_LPBK_DEEP            0x4 /* 1:deep,0:shallow */
+#define TST10BTCFG_DEF                  0x4C04
+
+#define MIIDBG_AZ_ANADECT		0x15
+#define AZ_ANADECT_10BTRX_TH		0x8000
+#define AZ_ANADECT_BOTH_01CHNL		0x4000
+#define AZ_ANADECT_INTV_MASK		0x3FU
+#define AZ_ANADECT_INTV_SHIFT		8
+#define AZ_ANADECT_THRESH_MASK		0xFU
+#define AZ_ANADECT_THRESH_SHIFT		4
+#define AZ_ANADECT_CHNL_MASK		0xFU
+#define AZ_ANADECT_CHNL_SHIFT		0
+#define AZ_ANADECT_DEF			0x3220
+#define AZ_ANADECT_LONG                 0xb210
+
+#define MIIDBG_MSE16DB			0x18	/* l1d */
+#define L1D_MSE16DB_UP			0x05EA
+#define L1D_MSE16DB_DOWN		0x02EA
+
+#define MIIDBG_LEGCYPS                  0x29
+#define LEGCYPS_EN                      0x8000
+#define LEGCYPS_DAC_AMP1000_MASK        7U
+#define LEGCYPS_DAC_AMP1000_SHIFT       12
+#define LEGCYPS_DAC_AMP100_MASK         7U
+#define LEGCYPS_DAC_AMP100_SHIFT        9
+#define LEGCYPS_DAC_AMP10_MASK          7U
+#define LEGCYPS_DAC_AMP10_SHIFT         6
+#define LEGCYPS_UNPLUG_TIMER_MASK       7U
+#define LEGCYPS_UNPLUG_TIMER_SHIFT      3
+#define LEGCYPS_UNPLUG_DECT_EN          0x4
+#define LEGCYPS_ECNC_PS_EN              0x1
+#define L1D_LEGCYPS_DEF                 0x129D
+#define L1C_LEGCYPS_DEF                 0x36DD
+
+#define MIIDBG_TST100BTCFG              0x36
+#define TST100BTCFG_NORMAL_BW_EN        0x8000
+#define TST100BTCFG_BADLNK_BYPASS       0x4000
+#define TST100BTCFG_SHORTCABL_TH_MASK   0x3FU
+#define TST100BTCFG_SHORTCABL_TH_SHIFT  8
+#define TST100BTCFG_LITCH_EN            0x80
+#define TST100BTCFG_VLT_SW              0x40
+#define TST100BTCFG_LONGCABL_TH_MASK    0x3FU
+#define TST100BTCFG_LONGCABL_TH_SHIFT   0
+#define TST100BTCFG_DEF                 0xE12C
+
+#define MIIDBG_VOLT_CTRL                0x3B	/* only for l2cb 1 & 2 */
+#define VOLT_CTRL_CABLE1TH_MASK         0x1FFU
+#define VOLT_CTRL_CABLE1TH_SHIFT        7
+#define VOLT_CTRL_AMPCTRL_MASK          3U
+#define VOLT_CTRL_AMPCTRL_SHIFT         5
+#define VOLT_CTRL_SW_BYPASS             0x10
+#define VOLT_CTRL_SWLOWEST              0x8
+#define VOLT_CTRL_DACAMP10_MASK         7U
+#define VOLT_CTRL_DACAMP10_SHIFT        0
+
+#define MIIDBG_CABLE1TH_DET             0x3E
+#define CABLE1TH_DET_EN                 0x8000
+
+
+/******* dev 3 *********/
+#define MIIEXT_PCS                      3
+
+#define MIIEXT_CLDCTRL3                 0x8003
+#define CLDCTRL3_BP_CABLE1TH_DET_GT     0x8000
+#define CLDCTRL3_AZ_DISAMP              0x1000
+#define L2CB_CLDCTRL3                   0x4D19
+#define L1D_CLDCTRL3                    0xDD19
+
+#define MIIEXT_CLDCTRL6			0x8006
+#define CLDCTRL6_CAB_LEN_MASK		0x1FFU
+#define CLDCTRL6_CAB_LEN_SHIFT          0
+#define CLDCTRL6_CAB_LEN_SHORT          0x50
+
+/********* dev 7 **********/
+#define MIIEXT_ANEG                     7
+
+#define MIIEXT_LOCAL_EEEADV             0x3C
+#define LOCAL_EEEADV_1000BT             0x4
+#define LOCAL_EEEADV_100BT              0x2
+
+#define MIIEXT_REMOTE_EEEADV            0x3D
+#define REMOTE_EEEADV_1000BT            0x4
+#define REMOTE_EEEADV_100BT             0x2
+
+#define MIIEXT_EEE_ANEG                 0x8000
+#define EEE_ANEG_1000M                  0x4
+#define EEE_ANEG_100M                   0x2
 
 #endif /*_ATL1C_HW_H_*/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1ef0c927..9cc1570 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -24,14 +24,6 @@
 #define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
 char atl1c_driver_name[] = "atl1c";
 char atl1c_driver_version[] = ATL1C_DRV_VERSION;
-#define PCI_DEVICE_ID_ATTANSIC_L2C      0x1062
-#define PCI_DEVICE_ID_ATTANSIC_L1C      0x1063
-#define PCI_DEVICE_ID_ATHEROS_L2C_B	0x2060 /* AR8152 v1.1 Fast 10/100 */
-#define PCI_DEVICE_ID_ATHEROS_L2C_B2	0x2062 /* AR8152 v2.0 Fast 10/100 */
-#define PCI_DEVICE_ID_ATHEROS_L1D	0x1073 /* AR8151 v1.0 Gigabit 1000 */
-#define PCI_DEVICE_ID_ATHEROS_L1D_2_0	0x1083 /* AR8151 v2.0 Gigabit 1000 */
-#define L2CB_V10			0xc0
-#define L2CB_V11			0xc1
 
 /*
  * atl1c_pci_tbl - PCI Device ID Table
@@ -54,70 +46,72 @@
 };
 MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
 
-MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>");
-MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
+MODULE_AUTHOR("Jie Yang");
+MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(ATL1C_DRV_VERSION);
 
 static int atl1c_stop_mac(struct atl1c_hw *hw);
-static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
-static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
 static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
-static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
-static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
-static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
+static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
+static void atl1c_start_mac(struct atl1c_adapter *adapter);
+static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
 		   int *work_done, int work_to_do);
 static int atl1c_up(struct atl1c_adapter *adapter);
 static void atl1c_down(struct atl1c_adapter *adapter);
+static int atl1c_reset_mac(struct atl1c_hw *hw);
+static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
+static int atl1c_configure(struct atl1c_adapter *adapter);
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter);
 
 static const u16 atl1c_pay_load_size[] = {
 	128, 256, 512, 1024, 2048, 4096,
 };
 
-static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
-{
-	REG_MB_RFD0_PROD_IDX,
-	REG_MB_RFD1_PROD_IDX,
-	REG_MB_RFD2_PROD_IDX,
-	REG_MB_RFD3_PROD_IDX
-};
-
-static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
-{
-	REG_RFD0_HEAD_ADDR_LO,
-	REG_RFD1_HEAD_ADDR_LO,
-	REG_RFD2_HEAD_ADDR_LO,
-	REG_RFD3_HEAD_ADDR_LO
-};
-
-static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
-{
-	REG_RRD0_HEAD_ADDR_LO,
-	REG_RRD1_HEAD_ADDR_LO,
-	REG_RRD2_HEAD_ADDR_LO,
-	REG_RRD3_HEAD_ADDR_LO
-};
 
 static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 	NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
 static void atl1c_pcie_patch(struct atl1c_hw *hw)
 {
-	u32 data;
+	u32 mst_data, data;
 
-	AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
-	data |= PCIE_PHYMISC_FORCE_RCV_DET;
-	AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
+	/* pclk sel could switch to 25M */
+	AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data);
+	mst_data &= ~MASTER_CTRL_CLK_SEL_DIS;
+	AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data);
 
+	/* WoL/PCIE related settings */
+	if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
+		AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
+		data |= PCIE_PHYMISC_FORCE_RCV_DET;
+		AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
+	} else { /* new dev set bit5 of MASTER */
+		if (!(mst_data & MASTER_CTRL_WAKEN_25M))
+			AT_WRITE_REG(hw, REG_MASTER_CTRL,
+				mst_data | MASTER_CTRL_WAKEN_25M);
+	}
+	/* aspm/PCIE setting only for l2cb 1.0 */
 	if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
 		AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
-
-		data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK <<
-			PCIE_PHYMISC2_SERDES_CDR_SHIFT);
-		data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
-		data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
-			PCIE_PHYMISC2_SERDES_TH_SHIFT);
-		data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
+		data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW,
+			L2CB1_PCIE_PHYMISC2_CDR_BW);
+		data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH,
+			L2CB1_PCIE_PHYMISC2_L0S_TH);
 		AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
+		/* extend L1 sync timer */
+		AT_READ_REG(hw, REG_LINK_CTRL, &data);
+		data |= LINK_CTRL_EXT_SYNC;
+		AT_WRITE_REG(hw, REG_LINK_CTRL, data);
+	}
+	/* l2cb 1.x & l1d 1.x */
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) {
+		AT_READ_REG(hw, REG_PM_CTRL, &data);
+		data |= PM_CTRL_L0S_BUFSRX_EN;
+		AT_WRITE_REG(hw, REG_PM_CTRL, data);
+		/* clear vendor msg */
+		AT_READ_REG(hw, REG_DMA_DBG, &data);
+		AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG);
 	}
 }
 
@@ -130,6 +124,7 @@
 	u32 data;
 	u32 pci_cmd;
 	struct pci_dev *pdev = hw->adapter->pdev;
+	int pos;
 
 	AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
 	pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
@@ -142,14 +137,23 @@
 	 */
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
+	/* wol sts read-clear */
+	AT_READ_REG(hw, REG_WOL_CTRL, &data);
+	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
 
 	/*
 	 * Mask some pcie error bits
 	 */
-	AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data);
-	data &= ~PCIE_UC_SERVRITY_DLP;
-	data &= ~PCIE_UC_SERVRITY_FCP;
-	AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data);
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+	pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
+	data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
+	pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+	/* clear error status */
+	pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA,
+			PCI_EXP_DEVSTA_NFED |
+			PCI_EXP_DEVSTA_FED |
+			PCI_EXP_DEVSTA_CED |
+			PCI_EXP_DEVSTA_URD);
 
 	AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
 	data &= ~LTSSM_ID_EN_WRO;
@@ -158,11 +162,6 @@
 	atl1c_pcie_patch(hw);
 	if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
 		atl1c_disable_l0s_l1(hw);
-	if (flag & ATL1C_PCIE_PHY_RESET)
-		AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
-	else
-		AT_WRITE_REG(hw, REG_GPHY_CTRL,
-			GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
 
 	msleep(5);
 }
@@ -207,14 +206,14 @@
  * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
  * of the idle status register until the device is actually idle
  */
-static u32 atl1c_wait_until_idle(struct atl1c_hw *hw)
+static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
 {
 	int timeout;
 	u32 data;
 
 	for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
 		AT_READ_REG(hw, REG_IDLE_STATUS, &data);
-		if ((data & IDLE_STATUS_MASK) == 0)
+		if ((data & modu_ctrl) == 0)
 			return 0;
 		msleep(1);
 	}
@@ -261,15 +260,16 @@
 
 	if ((phy_data & BMSR_LSTATUS) == 0) {
 		/* link down */
-		hw->hibernate = true;
-		if (atl1c_stop_mac(hw) != 0)
-			if (netif_msg_hw(adapter))
-				dev_warn(&pdev->dev, "stop mac failed\n");
-		atl1c_set_aspm(hw, false);
 		netif_carrier_off(netdev);
 		netif_stop_queue(netdev);
-		atl1c_phy_reset(hw);
-		atl1c_phy_init(&adapter->hw);
+		hw->hibernate = true;
+		if (atl1c_reset_mac(hw) != 0)
+			if (netif_msg_hw(adapter))
+				dev_warn(&pdev->dev, "reset mac failed\n");
+		atl1c_set_aspm(hw, SPEED_0);
+		atl1c_post_phy_linkchg(hw, SPEED_0);
+		atl1c_reset_dma_ring(adapter);
+		atl1c_configure(adapter);
 	} else {
 		/* Link Up */
 		hw->hibernate = false;
@@ -283,10 +283,9 @@
 		    adapter->link_duplex != duplex) {
 			adapter->link_speed  = speed;
 			adapter->link_duplex = duplex;
-			atl1c_set_aspm(hw, true);
-			atl1c_enable_tx_ctrl(hw);
-			atl1c_enable_rx_ctrl(hw);
-			atl1c_setup_mac_ctrl(adapter);
+			atl1c_set_aspm(hw, speed);
+			atl1c_post_phy_linkchg(hw, speed);
+			atl1c_start_mac(adapter);
 			if (netif_msg_link(adapter))
 				dev_info(&pdev->dev,
 					"%s: %s NIC Link is Up<%d Mbps %s>\n",
@@ -337,6 +336,9 @@
 	adapter = container_of(work, struct atl1c_adapter, common_task);
 	netdev = adapter->netdev;
 
+	if (test_bit(__AT_DOWN, &adapter->flags))
+		return;
+
 	if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
 		netif_device_detach(netdev);
 		atl1c_down(adapter);
@@ -345,8 +347,11 @@
 	}
 
 	if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
-		&adapter->work_event))
+		&adapter->work_event)) {
+		atl1c_irq_disable(adapter);
 		atl1c_check_link_status(adapter);
+		atl1c_irq_enable(adapter);
+	}
 }
 
 
@@ -470,7 +475,7 @@
 	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
 	netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
 
-	atl1c_hw_set_mac_addr(&adapter->hw);
+	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
 
 	return 0;
 }
@@ -523,11 +528,16 @@
 static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
+	struct atl1c_hw *hw = &adapter->hw;
 	int old_mtu   = netdev->mtu;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
 
-	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
-			(max_frame > MAX_JUMBO_FRAME_SIZE)) {
+	/* Fast Ethernet controller doesn't support jumbo packet */
+	if (((hw->nic_type == athr_l2c ||
+	      hw->nic_type == athr_l2c_b ||
+	      hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) ||
+	      max_frame < ETH_ZLEN + ETH_FCS_LEN ||
+	      max_frame > MAX_JUMBO_FRAME_SIZE) {
 		if (netif_msg_link(adapter))
 			dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
 		return -EINVAL;
@@ -543,14 +553,6 @@
 		netdev_update_features(netdev);
 		atl1c_up(adapter);
 		clear_bit(__AT_RESETTING, &adapter->flags);
-		if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
-			u32 phy_data;
-
-			AT_READ_REG(&adapter->hw, 0x1414, &phy_data);
-			phy_data |= 0x10000000;
-			AT_WRITE_REG(&adapter->hw, 0x1414, phy_data);
-		}
-
 	}
 	return 0;
 }
@@ -563,7 +565,7 @@
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 	u16 result;
 
-	atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
+	atl1c_read_phy_reg(&adapter->hw, reg_num, &result);
 	return result;
 }
 
@@ -572,7 +574,7 @@
 {
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 
-	atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
+	atl1c_write_phy_reg(&adapter->hw, reg_num, val);
 }
 
 /*
@@ -687,21 +689,15 @@
 
 static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
 {
-	u32 phy_status_data;
 	u32 link_ctrl_data;
 
 	atl1c_set_mac_type(hw);
-	AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
 	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
 
 	hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE  |
 			 ATL1C_TXQ_MODE_ENHANCE;
-	if (link_ctrl_data & LINK_CTRL_L0S_EN)
-		hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
-	if (link_ctrl_data & LINK_CTRL_L1_EN)
-		hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
-	if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
-		hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
+	hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT |
+			  ATL1C_ASPM_L1_SUPPORT;
 	hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
 
 	if (hw->nic_type == athr_l1c ||
@@ -710,6 +706,55 @@
 		hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
 	return 0;
 }
+
+struct atl1c_platform_patch {
+	u16 pci_did;
+	u8  pci_revid;
+	u16 subsystem_vid;
+	u16 subsystem_did;
+	u32 patch_flag;
+#define ATL1C_LINK_PATCH	0x1
+};
+static const struct atl1c_platform_patch plats[] __devinitdata = {
+{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
+{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
+{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
+{0x2062, 0xC0, 0x1019, 0x8152, 0x1},
+{0x2062, 0xC0, 0x1019, 0x2062, 0x1},
+{0x2062, 0xC0, 0x1458, 0xE000, 0x1},
+{0x2062, 0xC1, 0x1019, 0x8152, 0x1},
+{0x2062, 0xC1, 0x1019, 0x2062, 0x1},
+{0x2062, 0xC1, 0x1458, 0xE000, 0x1},
+{0x2062, 0xC1, 0x1565, 0x2802, 0x1},
+{0x2062, 0xC1, 0x1565, 0x2801, 0x1},
+{0x1073, 0xC0, 0x1019, 0x8151, 0x1},
+{0x1073, 0xC0, 0x1019, 0x1073, 0x1},
+{0x1073, 0xC0, 0x1458, 0xE000, 0x1},
+{0x1083, 0xC0, 0x1458, 0xE000, 0x1},
+{0x1083, 0xC0, 0x1019, 0x8151, 0x1},
+{0x1083, 0xC0, 0x1019, 0x1083, 0x1},
+{0x1083, 0xC0, 0x1462, 0x7680, 0x1},
+{0x1083, 0xC0, 0x1565, 0x2803, 0x1},
+{0},
+};
+
+static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
+{
+	int i = 0;
+
+	hw->msi_lnkpatch = false;
+
+	while (plats[i].pci_did != 0) {
+		if (plats[i].pci_did == hw->device_id &&
+		    plats[i].pci_revid == hw->revision_id &&
+		    plats[i].subsystem_vid == hw->subsystem_vendor_id &&
+		    plats[i].subsystem_did == hw->subsystem_id) {
+			if (plats[i].patch_flag & ATL1C_LINK_PATCH)
+				hw->msi_lnkpatch = true;
+		}
+		i++;
+	}
+}
 /*
  * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
  * @adapter: board private structure to initialize
@@ -729,9 +774,8 @@
 	device_set_wakeup_enable(&pdev->dev, false);
 	adapter->link_speed = SPEED_0;
 	adapter->link_duplex = FULL_DUPLEX;
-	adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
 	adapter->tpd_ring[0].count = 1024;
-	adapter->rfd_ring[0].count = 512;
+	adapter->rfd_ring.count = 512;
 
 	hw->vendor_id = pdev->vendor;
 	hw->device_id = pdev->device;
@@ -746,26 +790,18 @@
 		dev_err(&pdev->dev, "set mac function pointers failed\n");
 		return -1;
 	}
+	atl1c_patch_assign(hw);
+
 	hw->intr_mask = IMR_NORMAL_MASK;
 	hw->phy_configured = false;
 	hw->preamble_len = 7;
 	hw->max_frame_size = adapter->netdev->mtu;
-	if (adapter->num_rx_queues < 2) {
-		hw->rss_type = atl1c_rss_disable;
-		hw->rss_mode = atl1c_rss_mode_disable;
-	} else {
-		hw->rss_type = atl1c_rss_ipv4;
-		hw->rss_mode = atl1c_rss_mul_que_mul_int;
-		hw->rss_hash_bits = 16;
-	}
 	hw->autoneg_advertised = ADVERTISED_Autoneg;
 	hw->indirect_tab = 0xE4E4E4E4;
 	hw->base_cpu = 0;
 
 	hw->ict = 50000;		/* 100ms */
 	hw->smb_timer = 200000;	  	/* 400ms */
-	hw->cmb_tpd = 4;
-	hw->cmb_tx_timer = 1;		/* 2 us  */
 	hw->rx_imt = 200;
 	hw->tx_imt = 1000;
 
@@ -773,9 +809,6 @@
 	hw->rfd_burst = 8;
 	hw->dma_order = atl1c_dma_ord_out;
 	hw->dmar_block = atl1c_dma_req_1024;
-	hw->dmaw_block = atl1c_dma_req_1024;
-	hw->dmar_dly_cnt = 15;
-	hw->dmaw_dly_cnt = 4;
 
 	if (atl1c_alloc_queues(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
@@ -851,24 +884,22 @@
  */
 static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
 {
-	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
-	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct atl1c_buffer *buffer_info;
 	struct pci_dev *pdev = adapter->pdev;
-	int i, j;
+	int j;
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		for (j = 0; j < rfd_ring[i].count; j++) {
-			buffer_info = &rfd_ring[i].buffer_info[j];
-			atl1c_clean_buffer(pdev, buffer_info, 0);
-		}
-		/* zero out the descriptor ring */
-		memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
-		rfd_ring[i].next_to_clean = 0;
-		rfd_ring[i].next_to_use = 0;
-		rrd_ring[i].next_to_use = 0;
-		rrd_ring[i].next_to_clean = 0;
+	for (j = 0; j < rfd_ring->count; j++) {
+		buffer_info = &rfd_ring->buffer_info[j];
+		atl1c_clean_buffer(pdev, buffer_info, 0);
 	}
+	/* zero out the descriptor ring */
+	memset(rfd_ring->desc, 0, rfd_ring->size);
+	rfd_ring->next_to_clean = 0;
+	rfd_ring->next_to_use = 0;
+	rrd_ring->next_to_use = 0;
+	rrd_ring->next_to_clean = 0;
 }
 
 /*
@@ -877,8 +908,8 @@
 static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
 {
 	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
-	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
-	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct atl1c_buffer *buffer_info;
 	int i, j;
 
@@ -890,15 +921,13 @@
 			ATL1C_SET_BUFFER_STATE(&buffer_info[i],
 					ATL1C_BUFFER_FREE);
 	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		rfd_ring[i].next_to_use = 0;
-		rfd_ring[i].next_to_clean = 0;
-		rrd_ring[i].next_to_use = 0;
-		rrd_ring[i].next_to_clean = 0;
-		for (j = 0; j < rfd_ring[i].count; j++) {
-			buffer_info = &rfd_ring[i].buffer_info[j];
-			ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
-		}
+	rfd_ring->next_to_use = 0;
+	rfd_ring->next_to_clean = 0;
+	rrd_ring->next_to_use = 0;
+	rrd_ring->next_to_clean = 0;
+	for (j = 0; j < rfd_ring->count; j++) {
+		buffer_info = &rfd_ring->buffer_info[j];
+		ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
 	}
 }
 
@@ -935,27 +964,23 @@
 {
 	struct pci_dev *pdev = adapter->pdev;
 	struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
-	struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
-	struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct atl1c_ring_header *ring_header = &adapter->ring_header;
-	int num_rx_queues = adapter->num_rx_queues;
 	int size;
 	int i;
 	int count = 0;
 	int rx_desc_count = 0;
 	u32 offset = 0;
 
-	rrd_ring[0].count = rfd_ring[0].count;
+	rrd_ring->count = rfd_ring->count;
 	for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
 		tpd_ring[i].count = tpd_ring[0].count;
 
-	for (i = 1; i < adapter->num_rx_queues; i++)
-		rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
-
 	/* 2 tpd queue, one high priority queue,
 	 * another normal priority queue */
 	size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
-		rfd_ring->count * num_rx_queues);
+		rfd_ring->count);
 	tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
 	if (unlikely(!tpd_ring->buffer_info)) {
 		dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
@@ -968,12 +993,11 @@
 		count += tpd_ring[i].count;
 	}
 
-	for (i = 0; i < num_rx_queues; i++) {
-		rfd_ring[i].buffer_info =
-			(struct atl1c_buffer *) (tpd_ring->buffer_info + count);
-		count += rfd_ring[i].count;
-		rx_desc_count += rfd_ring[i].count;
-	}
+	rfd_ring->buffer_info =
+		(struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+	count += rfd_ring->count;
+	rx_desc_count += rfd_ring->count;
+
 	/*
 	 * real ring DMA buffer
 	 * each ring/block may need up to 8 bytes for alignment, hence the
@@ -983,8 +1007,7 @@
 		sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
 		sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
 		sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
-		sizeof(struct atl1c_hw_stats) +
-		8 * 4 + 8 * 2 * num_rx_queues;
+		8 * 4;
 
 	ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
 				&ring_header->dma);
@@ -1005,25 +1028,18 @@
 		offset += roundup(tpd_ring[i].size, 8);
 	}
 	/* init RFD ring */
-	for (i = 0; i < num_rx_queues; i++) {
-		rfd_ring[i].dma = ring_header->dma + offset;
-		rfd_ring[i].desc = (u8 *) ring_header->desc + offset;
-		rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
-				rfd_ring[i].count;
-		offset += roundup(rfd_ring[i].size, 8);
-	}
+	rfd_ring->dma = ring_header->dma + offset;
+	rfd_ring->desc = (u8 *) ring_header->desc + offset;
+	rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count;
+	offset += roundup(rfd_ring->size, 8);
 
 	/* init RRD ring */
-	for (i = 0; i < num_rx_queues; i++) {
-		rrd_ring[i].dma = ring_header->dma + offset;
-		rrd_ring[i].desc = (u8 *) ring_header->desc + offset;
-		rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
-				rrd_ring[i].count;
-		offset += roundup(rrd_ring[i].size, 8);
-	}
+	rrd_ring->dma = ring_header->dma + offset;
+	rrd_ring->desc = (u8 *) ring_header->desc + offset;
+	rrd_ring->size = sizeof(struct atl1c_recv_ret_status) *
+		rrd_ring->count;
+	offset += roundup(rrd_ring->size, 8);
 
-	adapter->smb.dma = ring_header->dma + offset;
-	adapter->smb.smb = (u8 *)ring_header->desc + offset;
 	return 0;
 
 err_nomem:
@@ -1034,26 +1050,20 @@
 static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
 {
 	struct atl1c_hw *hw = &adapter->hw;
-	struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *)
-				adapter->rfd_ring;
-	struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
-				adapter->rrd_ring;
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
 				adapter->tpd_ring;
-	struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
-	struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
-	int i;
-	u32 data;
 
 	/* TPD */
 	AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
 			(u32)((tpd_ring[atl1c_trans_normal].dma &
 				AT_DMA_HI_ADDR_MASK) >> 32));
 	/* just enable normal priority TX queue */
-	AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO,
+	AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO,
 			(u32)(tpd_ring[atl1c_trans_normal].dma &
 				AT_DMA_LO_ADDR_MASK));
-	AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO,
+	AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO,
 			(u32)(tpd_ring[atl1c_trans_high].dma &
 				AT_DMA_LO_ADDR_MASK));
 	AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
@@ -1062,31 +1072,21 @@
 
 	/* RFD */
 	AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
-			(u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i],
-			(u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+			(u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
+	AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO,
+			(u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK));
 
 	AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
-			rfd_ring[0].count & RFD_RING_SIZE_MASK);
+			rfd_ring->count & RFD_RING_SIZE_MASK);
 	AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
 			adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
 
 	/* RRD */
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i],
-			(u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+	AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO,
+			(u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK));
 	AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
-			(rrd_ring[0].count & RRD_RING_SIZE_MASK));
+			(rrd_ring->count & RRD_RING_SIZE_MASK));
 
-	/* CMB */
-	AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
-
-	/* SMB */
-	AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
-			(u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
-	AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
-			(u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
 	if (hw->nic_type == athr_l2c_b) {
 		AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
 		AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
@@ -1097,13 +1097,6 @@
 		AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0);	/* TX watermark, to enter l1 state.*/
 		AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0);		/* RXD threshold.*/
 	}
-	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
-			/* Power Saving for L2c_B */
-		AT_READ_REG(hw, REG_SERDES_LOCK, &data);
-		data |= SERDES_MAC_CLK_SLOWDOWN;
-		data |= SERDES_PYH_CLK_SLOWDOWN;
-		AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
-	}
 	/* Load all of base address above */
 	AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
 }
@@ -1111,32 +1104,26 @@
 static void atl1c_configure_tx(struct atl1c_adapter *adapter)
 {
 	struct atl1c_hw *hw = &adapter->hw;
-	u32 dev_ctrl_data;
-	u32 max_pay_load;
+	int max_pay_load;
 	u16 tx_offload_thresh;
 	u32 txq_ctrl_data;
-	u32 max_pay_load_data;
 
-	tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
+	tx_offload_thresh = MAX_TSO_FRAME_SIZE;
 	AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
 		(tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
-	AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
-	max_pay_load  = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
-			DEVICE_CTRL_MAX_PAYLOAD_MASK;
-	hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
-	max_pay_load  = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
-			DEVICE_CTRL_MAX_RREQ_SZ_MASK;
+	max_pay_load = pcie_get_readrq(adapter->pdev) >> 8;
 	hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
-
-	txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
-			TXQ_NUM_TPD_BURST_SHIFT;
-	if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE)
-		txq_ctrl_data |= TXQ_CTRL_ENH_MODE;
-	max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] &
-			TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT;
-	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2)
-		max_pay_load_data >>= 1;
-	txq_ctrl_data |= max_pay_load_data;
+	/*
+	 * if BIOS had changed the dam-read-max-length to an invalid value,
+	 * restore it to default value
+	 */
+	if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) {
+		pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN);
+		hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN;
+	}
+	txq_ctrl_data =
+		hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ?
+		L2CB_TXQ_CFGV : L1C_TXQ_CFGV;
 
 	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
 }
@@ -1151,71 +1138,25 @@
 
 	if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
 		rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
-	if (hw->rss_type == atl1c_rss_ipv4)
-		rxq_ctrl_data |= RSS_HASH_IPV4;
-	if (hw->rss_type == atl1c_rss_ipv4_tcp)
-		rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
-	if (hw->rss_type == atl1c_rss_ipv6)
-		rxq_ctrl_data |= RSS_HASH_IPV6;
-	if (hw->rss_type == atl1c_rss_ipv6_tcp)
-		rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
-	if (hw->rss_type != atl1c_rss_disable)
-		rxq_ctrl_data |= RRS_HASH_CTRL_EN;
 
-	rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
-			RSS_MODE_SHIFT;
-	rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
-			RSS_HASH_BITS_SHIFT;
-	if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
-		rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
-			ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
+	/* aspm for gigabit */
+	if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0)
+		rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT,
+			ASPM_THRUPUT_LIMIT_100M);
 
 	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
 }
 
-static void atl1c_configure_rss(struct atl1c_adapter *adapter)
-{
-	struct atl1c_hw *hw = &adapter->hw;
-
-	AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
-	AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
-}
-
 static void atl1c_configure_dma(struct atl1c_adapter *adapter)
 {
 	struct atl1c_hw *hw = &adapter->hw;
 	u32 dma_ctrl_data;
 
-	dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI;
-	if (hw->ctrl_flags & ATL1C_CMB_ENABLE)
-		dma_ctrl_data |= DMA_CTRL_CMB_EN;
-	if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
-		dma_ctrl_data |= DMA_CTRL_SMB_EN;
-	else
-		dma_ctrl_data |= MAC_CTRL_SMB_DIS;
-
-	switch (hw->dma_order) {
-	case atl1c_dma_ord_in:
-		dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
-		break;
-	case atl1c_dma_ord_enh:
-		dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
-		break;
-	case atl1c_dma_ord_out:
-		dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
-		break;
-	default:
-		break;
-	}
-
-	dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
-		<< DMA_CTRL_DMAR_BURST_LEN_SHIFT;
-	dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
-		<< DMA_CTRL_DMAW_BURST_LEN_SHIFT;
-	dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
-		<< DMA_CTRL_DMAR_DLY_CNT_SHIFT;
-	dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
-		<< DMA_CTRL_DMAW_DLY_CNT_SHIFT;
+	dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) |
+		DMA_CTRL_RREQ_PRI_DATA |
+		FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) |
+		FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) |
+		FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF);
 
 	AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
 }
@@ -1230,52 +1171,53 @@
 	u32 data;
 
 	AT_READ_REG(hw, REG_RXQ_CTRL, &data);
-	data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN |
-		  RXQ3_CTRL_EN | RXQ_CTRL_EN);
+	data &= ~RXQ_CTRL_EN;
 	AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
 
 	AT_READ_REG(hw, REG_TXQ_CTRL, &data);
 	data &= ~TXQ_CTRL_EN;
-	AT_WRITE_REG(hw, REG_TWSI_CTRL, data);
+	AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
 
-	atl1c_wait_until_idle(hw);
+	atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY);
 
 	AT_READ_REG(hw, REG_MAC_CTRL, &data);
 	data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
 	AT_WRITE_REG(hw, REG_MAC_CTRL, data);
 
-	return (int)atl1c_wait_until_idle(hw);
+	return (int)atl1c_wait_until_idle(hw,
+		IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY);
 }
 
-static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
+static void atl1c_start_mac(struct atl1c_adapter *adapter)
 {
-	u32 data;
+	struct atl1c_hw *hw = &adapter->hw;
+	u32 mac, txq, rxq;
 
-	AT_READ_REG(hw, REG_RXQ_CTRL, &data);
-	switch (hw->adapter->num_rx_queues) {
-	case 4:
-		data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
-		break;
-	case 3:
-		data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
-		break;
-	case 2:
-		data |= RXQ1_CTRL_EN;
-		break;
-	default:
-		break;
-	}
-	data |= RXQ_CTRL_EN;
-	AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
-}
+	hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false;
+	hw->mac_speed = adapter->link_speed == SPEED_1000 ?
+		atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
 
-static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw)
-{
-	u32 data;
+	AT_READ_REG(hw, REG_TXQ_CTRL, &txq);
+	AT_READ_REG(hw, REG_RXQ_CTRL, &rxq);
+	AT_READ_REG(hw, REG_MAC_CTRL, &mac);
 
-	AT_READ_REG(hw, REG_TXQ_CTRL, &data);
-	data |= TXQ_CTRL_EN;
-	AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
+	txq |= TXQ_CTRL_EN;
+	rxq |= RXQ_CTRL_EN;
+	mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW |
+	       MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW |
+	       MAC_CTRL_ADD_CRC | MAC_CTRL_PAD |
+	       MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN |
+	       MAC_CTRL_HASH_ALG_CRC32;
+	if (hw->mac_duplex)
+		mac |= MAC_CTRL_DUPLX;
+	else
+		mac &= ~MAC_CTRL_DUPLX;
+	mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed);
+	mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len);
+
+	AT_WRITE_REG(hw, REG_TXQ_CTRL, txq);
+	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq);
+	AT_WRITE_REG(hw, REG_MAC_CTRL, mac);
 }
 
 /*
@@ -1287,10 +1229,7 @@
 {
 	struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
 	struct pci_dev *pdev = adapter->pdev;
-	u32 master_ctrl_data = 0;
-
-	AT_WRITE_REG(hw, REG_IMR, 0);
-	AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
+	u32 ctrl_data = 0;
 
 	atl1c_stop_mac(hw);
 	/*
@@ -1299,194 +1238,148 @@
 	 * the current PCI configuration.  The global reset bit is self-
 	 * clearing, and should clear within a microsecond.
 	 */
-	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
-	master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF;
-	AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST)
-			& 0xFFFF));
+	AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data);
+	ctrl_data |= MASTER_CTRL_OOB_DIS;
+	AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST);
 
 	AT_WRITE_FLUSH(hw);
 	msleep(10);
 	/* Wait at least 10ms for All module to be Idle */
 
-	if (atl1c_wait_until_idle(hw)) {
+	if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) {
 		dev_err(&pdev->dev,
 			"MAC state machine can't be idle since"
 			" disabled for 10ms second\n");
 		return -1;
 	}
+	AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data);
+
+	/* driver control speed/duplex */
+	AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data);
+	AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW);
+
+	/* clk switch setting */
+	AT_READ_REG(hw, REG_SERDES, &ctrl_data);
+	switch (hw->nic_type) {
+	case athr_l2c_b:
+		ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN |
+				SERDES_MAC_CLK_SLOWDOWN);
+		AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
+		break;
+	case athr_l2c_b2:
+	case athr_l1d_2:
+		ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN;
+		AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
+		break;
+	default:
+		break;
+	}
+
 	return 0;
 }
 
 static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
 {
-	u32 pm_ctrl_data;
+	u16 ctrl_flags = hw->ctrl_flags;
 
-	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
-	pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
-			PM_CTRL_L1_ENTRY_TIMER_SHIFT);
-	pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
-	pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-	pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-	pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
-	pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
-
-	pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
-	pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
-	pm_ctrl_data |=	PM_CTRL_SERDES_L1_EN;
-	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
+	hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT);
+	atl1c_set_aspm(hw, SPEED_0);
+	hw->ctrl_flags = ctrl_flags;
 }
 
 /*
  * Set ASPM state.
  * Enable/disable L0s/L1 depend on link state.
  */
-static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
+static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
 {
 	u32 pm_ctrl_data;
-	u32 link_ctrl_data;
-	u32 link_l1_timer = 0xF;
+	u32 link_l1_timer;
 
 	AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
-	AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
-
-	pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
-	pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
-			PM_CTRL_L1_ENTRY_TIMER_SHIFT);
-	pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
-			PM_CTRL_LCKDET_TIMER_SHIFT);
-	pm_ctrl_data |= AT_LCKDET_TIMER	<< PM_CTRL_LCKDET_TIMER_SHIFT;
-
-	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
-		hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
-		link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
-		if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
-			if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10)
-				link_ctrl_data |= LINK_CTRL_EXT_SYNC;
-		}
-
-		AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
-
-		pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER;
-		pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK <<
-			PM_CTRL_PM_REQ_TIMER_SHIFT);
-		pm_ctrl_data |= AT_ASPM_L1_TIMER <<
-			PM_CTRL_PM_REQ_TIMER_SHIFT;
-		pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
-		pm_ctrl_data &= ~PM_CTRL_HOTRST;
-		pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
-		pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
-	}
-	pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
-	if (linkup) {
-		pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-		if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
-			pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
-		if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
-			pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
-
-		if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
-			hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
-			if (hw->nic_type == athr_l2c_b)
-				if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
-					pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-			pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
-			pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
-			pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
-			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
-		if (hw->adapter->link_speed == SPEED_100 ||
-				hw->adapter->link_speed == SPEED_1000) {
-				pm_ctrl_data &=  ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
-					PM_CTRL_L1_ENTRY_TIMER_SHIFT);
-				if (hw->nic_type == athr_l2c_b)
-					link_l1_timer = 7;
-				else if (hw->nic_type == athr_l2c_b2 ||
-					hw->nic_type == athr_l1d_2)
-					link_l1_timer = 4;
-				pm_ctrl_data |= link_l1_timer <<
-					PM_CTRL_L1_ENTRY_TIMER_SHIFT;
-			}
-		} else {
-			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
-			pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
-			pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
-			pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
-			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
-
-		}
+	pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN |
+			  PM_CTRL_ASPM_L0S_EN |
+			  PM_CTRL_MAC_ASPM_CHK);
+	/* L1 timer */
+	if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
+		pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S;
+		link_l1_timer =
+			link_speed == SPEED_1000 || link_speed == SPEED_100 ?
+			L1D_PMCTRL_L1_ENTRY_TM_16US : 1;
+		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
+			L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer);
 	} else {
-		pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
-		pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
-		pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
-		pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+		link_l1_timer = hw->nic_type == athr_l2c_b ?
+			L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM;
+		if (link_speed != SPEED_1000 && link_speed != SPEED_100)
+			link_l1_timer = 1;
+		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
+			PM_CTRL_L1_ENTRY_TIMER, link_l1_timer);
+	}
 
-		if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
-			pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
-		else
-			pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+	/* L0S/L1 enable */
+	if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0)
+		pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK;
+	if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
+		pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK;
+
+	/* l2cb & l1d & l2cb2 & l1d2 */
+	if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
+	    hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
+		pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
+			PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF);
+		pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER |
+				PM_CTRL_SERDES_PD_EX_L1 |
+				PM_CTRL_CLK_SWH_L1;
+		pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
+				  PM_CTRL_SERDES_PLL_L1_EN |
+				  PM_CTRL_SERDES_BUFS_RX_L1_EN |
+				  PM_CTRL_SA_DLY_EN |
+				  PM_CTRL_HOTRST);
+		/* disable l0s if link down or l2cb */
+		if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b)
+			pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+	} else { /* l1c */
+		pm_ctrl_data =
+			FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0);
+		if (link_speed != SPEED_0) {
+			pm_ctrl_data |= PM_CTRL_SERDES_L1_EN |
+					PM_CTRL_SERDES_PLL_L1_EN |
+					PM_CTRL_SERDES_BUFS_RX_L1_EN;
+			pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 |
+					  PM_CTRL_CLK_SWH_L1 |
+					  PM_CTRL_ASPM_L0S_EN |
+					  PM_CTRL_ASPM_L1_EN);
+		} else { /* link down */
+			pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+			pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
+					  PM_CTRL_SERDES_PLL_L1_EN |
+					  PM_CTRL_SERDES_BUFS_RX_L1_EN |
+					  PM_CTRL_ASPM_L0S_EN);
+		}
 	}
 	AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
 
 	return;
 }
 
-static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
-{
-	struct atl1c_hw *hw = &adapter->hw;
-	struct net_device *netdev = adapter->netdev;
-	u32 mac_ctrl_data;
-
-	mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
-	mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
-
-	if (adapter->link_duplex == FULL_DUPLEX) {
-		hw->mac_duplex = true;
-		mac_ctrl_data |= MAC_CTRL_DUPLX;
-	}
-
-	if (adapter->link_speed == SPEED_1000)
-		hw->mac_speed = atl1c_mac_speed_1000;
-	else
-		hw->mac_speed = atl1c_mac_speed_10_100;
-
-	mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) <<
-			MAC_CTRL_SPEED_SHIFT;
-
-	mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
-	mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) <<
-			MAC_CTRL_PRMLEN_SHIFT);
-
-	__atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
-
-	mac_ctrl_data |= MAC_CTRL_BC_EN;
-	if (netdev->flags & IFF_PROMISC)
-		mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
-	if (netdev->flags & IFF_ALLMULTI)
-		mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
-
-	mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
-	if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
-	    hw->nic_type == athr_l1d_2) {
-		mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
-		mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
-	}
-	AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
-}
-
 /*
  * atl1c_configure - Configure Transmit&Receive Unit after Reset
  * @adapter: board private structure
  *
  * Configure the Tx /Rx unit of the MAC after a reset.
  */
-static int atl1c_configure(struct atl1c_adapter *adapter)
+static int atl1c_configure_mac(struct atl1c_adapter *adapter)
 {
 	struct atl1c_hw *hw = &adapter->hw;
 	u32 master_ctrl_data = 0;
 	u32 intr_modrt_data;
 	u32 data;
 
+	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
+	master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN |
+			      MASTER_CTRL_RX_ITIMER_EN |
+			      MASTER_CTRL_INT_RDCLR);
 	/* clear interrupt status */
 	AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
 	/*  Clear any WOL status */
@@ -1525,30 +1418,39 @@
 	master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
 	AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
 
-	if (hw->ctrl_flags & ATL1C_CMB_ENABLE) {
-		AT_WRITE_REG(hw, REG_CMB_TPD_THRESH,
-			hw->cmb_tpd & CMB_TPD_THRESH_MASK);
-		AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
-			hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
-	}
+	AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
+		hw->smb_timer & SMB_STAT_TIMER_MASK);
 
-	if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
-		AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
-			hw->smb_timer & SMB_STAT_TIMER_MASK);
 	/* set MTU */
 	AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
 			VLAN_HLEN + ETH_FCS_LEN);
-	/* HDS, disable */
-	AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
 
 	atl1c_configure_tx(adapter);
 	atl1c_configure_rx(adapter);
-	atl1c_configure_rss(adapter);
 	atl1c_configure_dma(adapter);
 
 	return 0;
 }
 
+static int atl1c_configure(struct atl1c_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int num;
+
+	atl1c_init_ring_ptrs(adapter);
+	atl1c_set_multi(netdev);
+	atl1c_restore_vlan(adapter);
+
+	num = atl1c_alloc_rx_buffer(adapter);
+	if (unlikely(num == 0))
+		return -ENOMEM;
+
+	if (atl1c_configure_mac(adapter))
+		return -EIO;
+
+	return 0;
+}
+
 static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
 {
 	u16 hw_reg_addr = 0;
@@ -1635,16 +1537,11 @@
 	struct pci_dev *pdev = adapter->pdev;
 	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
 	u16 hw_next_to_clean;
-	u16 shift;
-	u32 data;
+	u16 reg;
 
-	if (type == atl1c_trans_high)
-		shift = MB_HTPD_CONS_IDX_SHIFT;
-	else
-		shift = MB_NTPD_CONS_IDX_SHIFT;
+	reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
 
-	AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data);
-	hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
+	AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean);
 
 	while (next_to_clean != hw_next_to_clean) {
 		buffer_info = &tpd_ring->buffer_info[next_to_clean];
@@ -1746,9 +1643,9 @@
 	skb_checksum_none_assert(skb);
 }
 
-static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
 {
-	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid];
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
 	struct pci_dev *pdev = adapter->pdev;
 	struct atl1c_buffer *buffer_info, *next_info;
 	struct sk_buff *skb;
@@ -1800,7 +1697,7 @@
 		/* TODO: update mailbox here */
 		wmb();
 		rfd_ring->next_to_use = rfd_next_to_use;
-		AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid],
+		AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX,
 			rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
 	}
 
@@ -1839,7 +1736,7 @@
 	rfd_ring->next_to_clean = rfd_index;
 }
 
-static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
+static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
 		   int *work_done, int work_to_do)
 {
 	u16 rfd_num, rfd_index;
@@ -1847,8 +1744,8 @@
 	u16 length;
 	struct pci_dev *pdev = adapter->pdev;
 	struct net_device *netdev  = adapter->netdev;
-	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que];
-	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que];
+	struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+	struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
 	struct sk_buff *skb;
 	struct atl1c_recv_ret_status *rrs;
 	struct atl1c_buffer *buffer_info;
@@ -1914,7 +1811,7 @@
 		count++;
 	}
 	if (count)
-		atl1c_alloc_rx_buffer(adapter, que);
+		atl1c_alloc_rx_buffer(adapter);
 }
 
 /*
@@ -1931,7 +1828,7 @@
 	if (!netif_carrier_ok(adapter->netdev))
 		goto quit_polling;
 	/* just enable one RXQ */
-	atl1c_clean_rx_irq(adapter, 0, &work_done, budget);
+	atl1c_clean_rx_irq(adapter, &work_done, budget);
 
 	if (work_done < budget) {
 quit_polling:
@@ -2206,23 +2103,10 @@
 			   struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
 {
 	struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
-	u32 prod_data;
+	u16 reg;
 
-	AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data);
-	switch (type) {
-	case atl1c_trans_high:
-		prod_data &= 0xFFFF0000;
-		prod_data |= tpd_ring->next_to_use & 0xFFFF;
-		break;
-	case atl1c_trans_normal:
-		prod_data &= 0x0000FFFF;
-		prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
-		break;
-	default:
-		break;
-	}
-	wmb();
-	AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
+	reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX;
+	AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use);
 }
 
 static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
@@ -2307,8 +2191,7 @@
 				"Unable to allocate MSI interrupt Error: %d\n",
 				err);
 		adapter->have_msi = false;
-	} else
-		netdev->irq = pdev->irq;
+	}
 
 	if (!adapter->have_msi)
 		flags |= IRQF_SHARED;
@@ -2328,44 +2211,38 @@
 	return err;
 }
 
+
+static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter)
+{
+	/* release tx-pending skbs and reset tx/rx ring index */
+	atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
+	atl1c_clean_tx_ring(adapter, atl1c_trans_high);
+	atl1c_clean_rx_ring(adapter);
+}
+
 static int atl1c_up(struct atl1c_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
-	int num;
 	int err;
-	int i;
 
 	netif_carrier_off(netdev);
-	atl1c_init_ring_ptrs(adapter);
-	atl1c_set_multi(netdev);
-	atl1c_restore_vlan(adapter);
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		num = atl1c_alloc_rx_buffer(adapter, i);
-		if (unlikely(num == 0)) {
-			err = -ENOMEM;
-			goto err_alloc_rx;
-		}
-	}
-
-	if (atl1c_configure(adapter)) {
-		err = -EIO;
+	err = atl1c_configure(adapter);
+	if (unlikely(err))
 		goto err_up;
-	}
 
 	err = atl1c_request_irq(adapter);
 	if (unlikely(err))
 		goto err_up;
 
+	atl1c_check_link_status(adapter);
 	clear_bit(__AT_DOWN, &adapter->flags);
 	napi_enable(&adapter->napi);
 	atl1c_irq_enable(adapter);
-	atl1c_check_link_status(adapter);
 	netif_start_queue(netdev);
 	return err;
 
 err_up:
-err_alloc_rx:
 	atl1c_clean_rx_ring(adapter);
 	return err;
 }
@@ -2383,15 +2260,15 @@
 	napi_disable(&adapter->napi);
 	atl1c_irq_disable(adapter);
 	atl1c_free_irq(adapter);
+	/* disable ASPM if device inactive */
+	atl1c_disable_l0s_l1(&adapter->hw);
 	/* reset MAC to disable all RX/TX */
 	atl1c_reset_mac(&adapter->hw);
 	msleep(1);
 
 	adapter->link_speed = SPEED_0;
 	adapter->link_duplex = -1;
-	atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
-	atl1c_clean_tx_ring(adapter, atl1c_trans_high);
-	atl1c_clean_rx_ring(adapter);
+	atl1c_reset_dma_ring(adapter);
 }
 
 /*
@@ -2424,13 +2301,6 @@
 	if (unlikely(err))
 		goto err_up;
 
-	if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
-		u32 phy_data;
-
-		AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data);
-		phy_data |= MDIO_AP_EN;
-		AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data);
-	}
 	return 0;
 
 err_up:
@@ -2456,6 +2326,8 @@
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 
 	WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
+	set_bit(__AT_DOWN, &adapter->flags);
+	cancel_work_sync(&adapter->common_task);
 	atl1c_down(adapter);
 	atl1c_free_ring_resources(adapter);
 	return 0;
@@ -2467,10 +2339,6 @@
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 	struct atl1c_hw *hw = &adapter->hw;
-	u32 mac_ctrl_data = 0;
-	u32 master_ctrl_data = 0;
-	u32 wol_ctrl_data = 0;
-	u16 mii_intr_status_data = 0;
 	u32 wufc = adapter->wol;
 
 	atl1c_disable_l0s_l1(hw);
@@ -2481,75 +2349,10 @@
 	netif_device_detach(netdev);
 
 	if (wufc)
-		if (atl1c_phy_power_saving(hw) != 0)
+		if (atl1c_phy_to_ps_link(hw) != 0)
 			dev_dbg(&pdev->dev, "phy power saving failed");
 
-	AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
-	AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
-
-	master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
-	mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
-	mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
-			MAC_CTRL_PRMLEN_MASK) <<
-			MAC_CTRL_PRMLEN_SHIFT);
-	mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
-	mac_ctrl_data &= ~MAC_CTRL_DUPLX;
-
-	if (wufc) {
-		mac_ctrl_data |= MAC_CTRL_RX_EN;
-		if (adapter->link_speed == SPEED_1000 ||
-			adapter->link_speed == SPEED_0) {
-			mac_ctrl_data |= atl1c_mac_speed_1000 <<
-					MAC_CTRL_SPEED_SHIFT;
-			mac_ctrl_data |= MAC_CTRL_DUPLX;
-		} else
-			mac_ctrl_data |= atl1c_mac_speed_10_100 <<
-					MAC_CTRL_SPEED_SHIFT;
-
-		if (adapter->link_duplex == DUPLEX_FULL)
-			mac_ctrl_data |= MAC_CTRL_DUPLX;
-
-		/* turn on magic packet wol */
-		if (wufc & AT_WUFC_MAG)
-			wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
-
-		if (wufc & AT_WUFC_LNKC) {
-			wol_ctrl_data |=  WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
-			/* only link up can wake up */
-			if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
-				dev_dbg(&pdev->dev, "%s: read write phy "
-						  "register failed.\n",
-						  atl1c_driver_name);
-			}
-		}
-		/* clear phy interrupt */
-		atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
-		/* Config MAC Ctrl register */
-		__atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
-
-		/* magic packet maybe Broadcast&multicast&Unicast frame */
-		if (wufc & AT_WUFC_MAG)
-			mac_ctrl_data |= MAC_CTRL_BC_EN;
-
-		dev_dbg(&pdev->dev,
-			"%s: suspend MAC=0x%x\n",
-			atl1c_driver_name, mac_ctrl_data);
-		AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
-		AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
-		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
-
-		AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
-			GPHY_CTRL_EXT_RESET);
-	} else {
-		AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
-		master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
-		mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
-		mac_ctrl_data |= MAC_CTRL_DUPLX;
-		AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
-		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
-		AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
-		hw->phy_configured = false; /* re-init PHY when resume */
-	}
+	atl1c_power_saving(hw, wufc);
 
 	return 0;
 }
@@ -2562,8 +2365,7 @@
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 
 	AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
-	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
-			ATL1C_PCIE_PHY_RESET);
+	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
 
 	atl1c_phy_reset(&adapter->hw);
 	atl1c_reset_mac(&adapter->hw);
@@ -2616,7 +2418,6 @@
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 	pci_set_drvdata(pdev, netdev);
 
-	netdev->irq  = pdev->irq;
 	netdev->netdev_ops = &atl1c_netdev_ops;
 	netdev->watchdog_timeo = AT_TX_WATCHDOG;
 	atl1c_set_ethtool_ops(netdev);
@@ -2706,14 +2507,13 @@
 		dev_err(&pdev->dev, "cannot map device registers\n");
 		goto err_ioremap;
 	}
-	netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
 
 	/* init mii data */
 	adapter->mii.dev = netdev;
 	adapter->mii.mdio_read  = atl1c_mdio_read;
 	adapter->mii.mdio_write = atl1c_mdio_write;
 	adapter->mii.phy_id_mask = 0x1f;
-	adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
+	adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
 	netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
 	setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
 			(unsigned long)adapter);
@@ -2723,8 +2523,7 @@
 		dev_err(&pdev->dev, "net device private data init failed\n");
 		goto err_sw_init;
 	}
-	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
-			ATL1C_PCIE_PHY_RESET);
+	atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
 
 	/* Init GPHY as early as possible due to power saving issue  */
 	atl1c_phy_reset(&adapter->hw);
@@ -2752,7 +2551,7 @@
 		dev_dbg(&pdev->dev, "mac address : %pM\n",
 			adapter->hw.mac_addr);
 
-	atl1c_hw_set_mac_addr(&adapter->hw);
+	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
 	INIT_WORK(&adapter->common_task, atl1c_common_task);
 	adapter->work_event = 0;
 	err = register_netdev(netdev);
@@ -2796,6 +2595,8 @@
 	struct atl1c_adapter *adapter = netdev_priv(netdev);
 
 	unregister_netdev(netdev);
+	/* restore permanent address */
+	atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr);
 	atl1c_phy_disable(&adapter->hw);
 
 	iounmap(adapter->hw.hw_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 93ff2b2..1220e51 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1883,27 +1883,24 @@
 	int err = 0;
 
 	adapter->have_msi = true;
-	err = pci_enable_msi(adapter->pdev);
+	err = pci_enable_msi(pdev);
 	if (err) {
-		netdev_dbg(adapter->netdev,
+		netdev_dbg(netdev,
 			   "Unable to allocate MSI interrupt Error: %d\n", err);
 		adapter->have_msi = false;
-	} else
-		netdev->irq = pdev->irq;
-
+	}
 
 	if (!adapter->have_msi)
 		flags |= IRQF_SHARED;
-	err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
-			netdev->name, netdev);
+	err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
 	if (err) {
 		netdev_dbg(adapter->netdev,
 			   "Unable to allocate interrupt Error: %d\n", err);
 		if (adapter->have_msi)
-			pci_disable_msi(adapter->pdev);
+			pci_disable_msi(pdev);
 		return err;
 	}
-	netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
+	netdev_dbg(netdev, "atl1e_request_irq OK\n");
 	return err;
 }
 
@@ -2233,7 +2230,6 @@
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 	pci_set_drvdata(pdev, netdev);
 
-	netdev->irq  = pdev->irq;
 	netdev->netdev_ops = &atl1e_netdev_ops;
 
 	netdev->watchdog_timeo = AT_TX_WATCHDOG;
@@ -2319,7 +2315,6 @@
 		netdev_err(netdev, "cannot map device registers\n");
 		goto err_ioremap;
 	}
-	netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
 
 	/* init mii data */
 	adapter->mii.dev = netdev;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index c926857..5d10884 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -266,7 +266,7 @@
 	 * interrupts & Clear any pending interrupt events
 	 */
 	/*
-	 * iowrite32(0, hw->hw_addr + REG_IMR);
+	 * atlx_irq_disable(adapter);
 	 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
 	 */
 
@@ -1917,7 +1917,7 @@
 	return num_alloc;
 }
 
-static void atl1_intr_rx(struct atl1_adapter *adapter)
+static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
 {
 	int i, count;
 	u16 length;
@@ -1933,7 +1933,7 @@
 
 	rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
 
-	while (1) {
+	while (count < budget) {
 		rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
 		i = 1;
 		if (likely(rrd->xsz.valid)) {	/* packet valid */
@@ -2032,7 +2032,7 @@
 
 			__vlan_hwaccel_put_tag(skb, vlan_tag);
 		}
-		netif_rx(skb);
+		netif_receive_skb(skb);
 
 		/* let protocol layer free skb */
 		buffer_info->skb = NULL;
@@ -2065,14 +2065,17 @@
 		iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
 		spin_unlock(&adapter->mb_lock);
 	}
+
+	return count;
 }
 
-static void atl1_intr_tx(struct atl1_adapter *adapter)
+static int atl1_intr_tx(struct atl1_adapter *adapter)
 {
 	struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
 	struct atl1_buffer *buffer_info;
 	u16 sw_tpd_next_to_clean;
 	u16 cmb_tpd_next_to_clean;
+	int count = 0;
 
 	sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
 	cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
@@ -2092,12 +2095,16 @@
 
 		if (++sw_tpd_next_to_clean == tpd_ring->count)
 			sw_tpd_next_to_clean = 0;
+
+		count++;
 	}
 	atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
 
 	if (netif_queue_stopped(adapter->netdev) &&
 	    netif_carrier_ok(adapter->netdev))
 		netif_wake_queue(adapter->netdev);
+
+	return count;
 }
 
 static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
@@ -2439,6 +2446,49 @@
 	return NETDEV_TX_OK;
 }
 
+static int atl1_rings_clean(struct napi_struct *napi, int budget)
+{
+	struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi);
+	int work_done = atl1_intr_rx(adapter, budget);
+
+	if (atl1_intr_tx(adapter))
+		work_done = budget;
+
+	/* Let's come again to process some more packets */
+	if (work_done >= budget)
+		return work_done;
+
+	napi_complete(napi);
+	/* re-enable Interrupt */
+	if (likely(adapter->int_enabled))
+		atlx_imr_set(adapter, IMR_NORMAL_MASK);
+	return work_done;
+}
+
+static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
+{
+	if (!napi_schedule_prep(&adapter->napi))
+		/* It is possible in case even the RX/TX ints are disabled via IMR
+		 * register the ISR bits are set anyway (but do not produce IRQ).
+		 * To handle such situation the napi functions used to check is
+		 * something scheduled or not.
+		 */
+		return 0;
+
+	__napi_schedule(&adapter->napi);
+
+	/*
+	 * Disable RX/TX ints via IMR register if it is
+	 * allowed. NAPI handler must reenable them in same
+	 * way.
+	 */
+	if (!adapter->int_enabled)
+		return 1;
+
+	atlx_imr_set(adapter, IMR_NORXTX_MASK);
+	return 1;
+}
+
 /*
  * atl1_intr - Interrupt Handler
  * @irq: interrupt number
@@ -2449,78 +2499,74 @@
 {
 	struct atl1_adapter *adapter = netdev_priv(data);
 	u32 status;
-	int max_ints = 10;
 
 	status = adapter->cmb.cmb->int_stats;
 	if (!status)
 		return IRQ_NONE;
 
-	do {
-		/* clear CMB interrupt status at once */
-		adapter->cmb.cmb->int_stats = 0;
+	/* clear CMB interrupt status at once,
+	 * but leave rx/tx interrupt status in case it should be dropped
+	 * only if rx/tx processing queued. In other case interrupt
+	 * can be lost.
+	 */
+	adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX);
 
-		if (status & ISR_GPHY)	/* clear phy status */
-			atlx_clear_phy_int(adapter);
+	if (status & ISR_GPHY)	/* clear phy status */
+		atlx_clear_phy_int(adapter);
 
-		/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
-		iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
+	/* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+	iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
 
-		/* check if SMB intr */
-		if (status & ISR_SMB)
-			atl1_inc_smb(adapter);
+	/* check if SMB intr */
+	if (status & ISR_SMB)
+		atl1_inc_smb(adapter);
 
-		/* check if PCIE PHY Link down */
-		if (status & ISR_PHY_LINKDOWN) {
-			if (netif_msg_intr(adapter))
-				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-					"pcie phy link down %x\n", status);
-			if (netif_running(adapter->netdev)) {	/* reset MAC */
-				iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-				schedule_work(&adapter->reset_dev_task);
-				return IRQ_HANDLED;
-			}
-		}
-
-		/* check if DMA read/write error ? */
-		if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
-			if (netif_msg_intr(adapter))
-				dev_printk(KERN_DEBUG, &adapter->pdev->dev,
-					"pcie DMA r/w error (status = 0x%x)\n",
-					status);
-			iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+	/* check if PCIE PHY Link down */
+	if (status & ISR_PHY_LINKDOWN) {
+		if (netif_msg_intr(adapter))
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"pcie phy link down %x\n", status);
+		if (netif_running(adapter->netdev)) {	/* reset MAC */
+			atlx_irq_disable(adapter);
 			schedule_work(&adapter->reset_dev_task);
 			return IRQ_HANDLED;
 		}
+	}
 
-		/* link event */
-		if (status & ISR_GPHY) {
-			adapter->soft_stats.tx_carrier_errors++;
-			atl1_check_for_link(adapter);
-		}
+	/* check if DMA read/write error ? */
+	if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+		if (netif_msg_intr(adapter))
+			dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+				"pcie DMA r/w error (status = 0x%x)\n",
+				status);
+		atlx_irq_disable(adapter);
+		schedule_work(&adapter->reset_dev_task);
+		return IRQ_HANDLED;
+	}
 
-		/* transmit event */
-		if (status & ISR_CMB_TX)
-			atl1_intr_tx(adapter);
+	/* link event */
+	if (status & ISR_GPHY) {
+		adapter->soft_stats.tx_carrier_errors++;
+		atl1_check_for_link(adapter);
+	}
 
-		/* rx exception */
-		if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
-			ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
-			ISR_HOST_RRD_OV | ISR_CMB_RX))) {
-			if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
-				ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
-				ISR_HOST_RRD_OV))
-				if (netif_msg_intr(adapter))
-					dev_printk(KERN_DEBUG,
-						&adapter->pdev->dev,
-						"rx exception, ISR = 0x%x\n",
-						status);
-			atl1_intr_rx(adapter);
-		}
+	/* transmit or receive event */
+	if (status & (ISR_CMB_TX | ISR_CMB_RX) &&
+	    atl1_sched_rings_clean(adapter))
+		adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats &
+					      ~(ISR_CMB_TX | ISR_CMB_RX);
 
-		if (--max_ints < 0)
-			break;
-
-	} while ((status = adapter->cmb.cmb->int_stats));
+	/* rx exception */
+	if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+		ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+		ISR_HOST_RRD_OV))) {
+		if (netif_msg_intr(adapter))
+			dev_printk(KERN_DEBUG,
+				&adapter->pdev->dev,
+				"rx exception, ISR = 0x%x\n",
+				status);
+		atl1_sched_rings_clean(adapter);
+	}
 
 	/* re-enable Interrupt */
 	iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
@@ -2599,6 +2645,7 @@
 	if (unlikely(err))
 		goto err_up;
 
+	napi_enable(&adapter->napi);
 	atlx_irq_enable(adapter);
 	atl1_check_link(adapter);
 	netif_start_queue(netdev);
@@ -2615,6 +2662,7 @@
 {
 	struct net_device *netdev = adapter->netdev;
 
+	napi_disable(&adapter->napi);
 	netif_stop_queue(netdev);
 	del_timer_sync(&adapter->phy_config_timer);
 	adapter->phy_timer_pending = false;
@@ -2971,6 +3019,7 @@
 
 	netdev->netdev_ops = &atl1_netdev_ops;
 	netdev->watchdog_timeo = 5 * HZ;
+	netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
 
 	netdev->ethtool_ops = &atl1_ethtool_ops;
 	adapter->bd_number = cards_found;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index e04bf4d..3bf79a5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -275,13 +275,17 @@
 #define ISR_DIS_SMB				0x20000000
 #define ISR_DIS_DMA				0x40000000
 
-/* Normal Interrupt mask  */
-#define IMR_NORMAL_MASK	(\
+/* Normal Interrupt mask without RX/TX enabled */
+#define IMR_NORXTX_MASK	(\
 	ISR_SMB		|\
 	ISR_GPHY	|\
 	ISR_PHY_LINKDOWN|\
 	ISR_DMAR_TO_RST	|\
-	ISR_DMAW_TO_RST	|\
+	ISR_DMAW_TO_RST)
+
+/* Normal Interrupt mask  */
+#define IMR_NORMAL_MASK	(\
+	IMR_NORXTX_MASK	|\
 	ISR_CMB_TX	|\
 	ISR_CMB_RX)
 
@@ -758,6 +762,7 @@
 	u16 link_speed;
 	u16 link_duplex;
 	spinlock_t lock;
+	struct napi_struct napi;
 	struct work_struct reset_dev_task;
 	struct work_struct link_chg_task;
 
@@ -781,6 +786,12 @@
 	u16 ict;		/* interrupt clear timer (2us resolution */
 	struct mii_if_info mii;	/* MII interface info */
 
+	/*
+	 * Use this value to check is napi handler allowed to
+	 * enable ints or not
+	 */
+	bool int_enabled;
+
 	u32 bd_number;		/* board number */
 	bool pci_using_64;
 	struct atl1_hw hw;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index c9e9dc5..b4f3aa4 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -155,14 +155,21 @@
 	}
 }
 
+static inline void atlx_imr_set(struct atlx_adapter *adapter,
+				unsigned int imr)
+{
+	iowrite32(imr, adapter->hw.hw_addr + REG_IMR);
+	ioread32(adapter->hw.hw_addr + REG_IMR);
+}
+
 /*
  * atlx_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
  */
 static void atlx_irq_enable(struct atlx_adapter *adapter)
 {
-	iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
-	ioread32(adapter->hw.hw_addr + REG_IMR);
+	atlx_imr_set(adapter, IMR_NORMAL_MASK);
+	adapter->int_enabled = true;
 }
 
 /*
@@ -171,8 +178,8 @@
  */
 static void atlx_irq_disable(struct atlx_adapter *adapter)
 {
-	iowrite32(0, adapter->hw.hw_addr + REG_IMR);
-	ioread32(adapter->hw.hw_addr + REG_IMR);
+	adapter->int_enabled = false;
+	atlx_imr_set(adapter, 0);
 	synchronize_irq(adapter->pdev->irq);
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8297e28..ac7b744 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3006,7 +3006,7 @@
 
 	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
 			 PCI_DMA_FROMDEVICE);
-	skb = build_skb(data);
+	skb = build_skb(data, 0);
 	if (!skb) {
 		kfree(data);
 		goto error;
@@ -7343,8 +7343,7 @@
 	{ "rx_fw_discards" },
 };
 
-#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
-			sizeof(bnx2_stats_str_arr[0]))
+#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
 
 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
 
@@ -7976,7 +7975,6 @@
 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 {
 	struct bnx2 *bp;
-	unsigned long mem_len;
 	int rc, i, j;
 	u32 reg;
 	u64 dma_mask, persist_dma_mask;
@@ -8036,13 +8034,8 @@
 #endif
 	INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
-	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
-	dev->mem_end = dev->mem_start + mem_len;
-	dev->irq = pdev->irq;
-
-	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
-
+	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
+							 TX_MAX_TSS_RINGS + 1));
 	if (!bp->regview) {
 		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
 		rc = -ENOMEM;
@@ -8346,10 +8339,8 @@
 		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
 	}
 
-	if (bp->regview) {
-		iounmap(bp->regview);
-		bp->regview = NULL;
-	}
+	pci_iounmap(pdev, bp->regview);
+	bp->regview = NULL;
 
 err_out_release:
 	pci_release_regions(pdev);
@@ -8432,7 +8423,7 @@
 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	static int version_printed = 0;
-	struct net_device *dev = NULL;
+	struct net_device *dev;
 	struct bnx2 *bp;
 	int rc;
 	char str[40];
@@ -8442,15 +8433,12 @@
 
 	/* dev zeroed in init_etherdev */
 	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
-
 	if (!dev)
 		return -ENOMEM;
 
 	rc = bnx2_init_board(pdev, dev);
-	if (rc < 0) {
-		free_netdev(dev);
-		return rc;
-	}
+	if (rc < 0)
+		goto err_free;
 
 	dev->netdev_ops = &bnx2_netdev_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
@@ -8480,22 +8468,21 @@
 		goto error;
 	}
 
-	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
-		    board_info[ent->driver_data].name,
+	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
+		    "node addr %pM\n", board_info[ent->driver_data].name,
 		    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
 		    ((CHIP_ID(bp) & 0x0ff0) >> 4),
-		    bnx2_bus_string(bp, str),
-		    dev->base_addr,
-		    bp->pdev->irq, dev->dev_addr);
+		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
+		    pdev->irq, dev->dev_addr);
 
 	return 0;
 
 error:
-	if (bp->regview)
-		iounmap(bp->regview);
+	iounmap(bp->regview);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
+err_free:
 	free_netdev(dev);
 	return rc;
 }
@@ -8511,8 +8498,7 @@
 	del_timer_sync(&bp->timer);
 	cancel_work_sync(&bp->reset_task);
 
-	if (bp->regview)
-		iounmap(bp->regview);
+	pci_iounmap(bp->pdev, bp->regview);
 
 	kfree(bp->temp_stats_blk);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2c9ee55..e30e2a2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,13 +23,17 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.72.10-0"
-#define DRV_MODULE_RELDATE      "2012/02/20"
+#define DRV_MODULE_VERSION      "1.72.50-0"
+#define DRV_MODULE_RELDATE      "2012/04/23"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
 #define BCM_DCBNL
 #endif
+
+
+#include "bnx2x_hsi.h"
+
 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 #define BCM_CNIC 1
 #include "../cnic_if.h"
@@ -345,7 +349,6 @@
 #define SGE_PAGE_SIZE		PAGE_SIZE
 #define SGE_PAGE_SHIFT		PAGE_SHIFT
 #define SGE_PAGE_ALIGN(addr)	PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
-#define SGE_PAGES		(SGE_PAGE_SIZE * PAGES_PER_SGE)
 
 /* SGE ring related macros */
 #define NUM_RX_SGE_PAGES	2
@@ -815,6 +818,8 @@
 #define CHIP_NUM_57800_MF		0x16a5
 #define CHIP_NUM_57810			0x168e
 #define CHIP_NUM_57810_MF		0x16ae
+#define CHIP_NUM_57811			0x163d
+#define CHIP_NUM_57811_MF		0x163e
 #define CHIP_NUM_57840			0x168d
 #define CHIP_NUM_57840_MF		0x16ab
 #define CHIP_IS_E1(bp)			(CHIP_NUM(bp) == CHIP_NUM_57710)
@@ -826,6 +831,8 @@
 #define CHIP_IS_57800_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800_MF)
 #define CHIP_IS_57810(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810)
 #define CHIP_IS_57810_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57811(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811)
+#define CHIP_IS_57811_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811_MF)
 #define CHIP_IS_57840(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840)
 #define CHIP_IS_57840_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840_MF)
 #define CHIP_IS_E1H(bp)			(CHIP_IS_57711(bp) || \
@@ -836,6 +843,8 @@
 					 CHIP_IS_57800_MF(bp) || \
 					 CHIP_IS_57810(bp) || \
 					 CHIP_IS_57810_MF(bp) || \
+					 CHIP_IS_57811(bp) || \
+					 CHIP_IS_57811_MF(bp) || \
 					 CHIP_IS_57840(bp) || \
 					 CHIP_IS_57840_MF(bp))
 #define CHIP_IS_E1x(bp)			(CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
@@ -1053,6 +1062,13 @@
 		struct flow_control_configuration pfc_config;
 	} func_rdata;
 
+	/* afex ramrod can not be a part of func_rdata union because these
+	 * events might arrive in parallel to other events from func_rdata.
+	 * Therefore, if they would have been defined in the same union,
+	 * data can get corrupted.
+	 */
+	struct afex_vif_list_ramrod_data func_afex_rdata;
+
 	/* used by dmae command executer */
 	struct dmae_command		dmae[MAX_DMAE_C];
 
@@ -1169,6 +1185,7 @@
 enum {
 	BNX2X_SP_RTNL_SETUP_TC,
 	BNX2X_SP_RTNL_TX_TIMEOUT,
+	BNX2X_SP_RTNL_AFEX_F_UPDATE,
 	BNX2X_SP_RTNL_FAN_FAILURE,
 };
 
@@ -1222,7 +1239,6 @@
 #define ETH_MAX_JUMBO_PACKET_SIZE	9600
 /* TCP with Timestamp Option (32) + IPv6 (40) */
 #define ETH_MAX_TPA_HEADER_SIZE		72
-#define ETH_MIN_TPA_HEADER_SIZE		40
 
 	/* Max supported alignment is 256 (8 shift) */
 #define BNX2X_RX_ALIGN_SHIFT		min(8, L1_CACHE_SHIFT)
@@ -1300,6 +1316,7 @@
 #define NO_ISCSI_FLAG			(1 << 14)
 #define NO_FCOE_FLAG			(1 << 15)
 #define BC_SUPPORTS_PFC_STATS		(1 << 17)
+#define USING_SINGLE_MSIX_FLAG		(1 << 20)
 
 #define NO_ISCSI(bp)		((bp)->flags & NO_ISCSI_FLAG)
 #define NO_ISCSI_OOO(bp)	((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1329,21 +1346,20 @@
 	struct bnx2x_common	common;
 	struct bnx2x_port	port;
 
-	struct cmng_struct_per_port cmng;
-	u32			vn_weight_sum;
+	struct cmng_init	cmng;
+
 	u32			mf_config[E1HVN_MAX];
-	u32			mf2_config[E2_FUNC_MAX];
+	u32			mf_ext_config;
 	u32			path_has_ovlan; /* E3 */
 	u16			mf_ov;
 	u8			mf_mode;
 #define IS_MF(bp)		(bp->mf_mode != 0)
 #define IS_MF_SI(bp)		(bp->mf_mode == MULTI_FUNCTION_SI)
 #define IS_MF_SD(bp)		(bp->mf_mode == MULTI_FUNCTION_SD)
+#define IS_MF_AFEX(bp)		(bp->mf_mode == MULTI_FUNCTION_AFEX)
 
 	u8			wol;
 
-	bool			gro_check;
-
 	int			rx_ring_size;
 
 	u16			tx_quick_cons_trip_int;
@@ -1371,7 +1387,6 @@
 #define BNX2X_STATE_DIAG		0xe000
 #define BNX2X_STATE_ERROR		0xf000
 
-	int			multi_mode;
 #define BNX2X_MAX_PRIORITY		8
 #define BNX2X_MAX_ENTRIES_PER_PRI	16
 #define BNX2X_MAX_COS			3
@@ -1582,6 +1597,9 @@
 	struct dcbx_features			dcbx_remote_feat;
 	u32					dcbx_remote_flags;
 #endif
+	/* AFEX: store default vlan used */
+	int					afex_def_vlan_tag;
+	enum mf_cfg_afex_vlan_mode		afex_vlan_mode;
 	u32					pending_max;
 
 	/* multiple tx classes of service */
@@ -2138,9 +2156,16 @@
 #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
 #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
 
+#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp)  ((bp)->mf_ext_config & \
+					 MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+
+#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
 #define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
 				(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
 				 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+#else
+#define IS_MF_FCOE_AFEX(bp)	false
 #endif
 
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4b05481..ad0743b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -23,7 +23,6 @@
 #include <linux/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_checksum.h>
-#include <linux/firmware.h>
 #include <linux/prefetch.h>
 #include "bnx2x_cmn.h"
 #include "bnx2x_init.h"
@@ -329,16 +328,6 @@
 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
 		tpa_info->full_page =
 			SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
-		/*
-		 * FW 7.2.16 BUG workaround:
-		 * if SGE size is (exactly) multiple gro_size
-		 * fw will place one less frag on SGE.
-		 * the calculation is done only for potentially
-		 * dangerous MTUs.
-		 */
-		if (unlikely(bp->gro_check))
-			if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
-				tpa_info->full_page -= gro_size;
 		tpa_info->gro_size = gro_size;
 	}
 
@@ -369,8 +358,8 @@
  * Approximate value of the MSS for this aggregation calculated using
  * the first packet of it.
  */
-static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
-				    u16 len_on_bd)
+static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+			     u16 len_on_bd)
 {
 	/*
 	 * TPA arrgregation won't have either IP options or TCP options
@@ -396,6 +385,36 @@
 	return len_on_bd - hdrs_len;
 }
 
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
+			      struct bnx2x_fastpath *fp, u16 index)
+{
+	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+	dma_addr_t mapping;
+
+	if (unlikely(page == NULL)) {
+		BNX2X_ERR("Can't alloc sge\n");
+		return -ENOMEM;
+	}
+
+	mapping = dma_map_page(&bp->pdev->dev, page, 0,
+			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		__free_pages(page, PAGES_PER_SGE_SHIFT);
+		BNX2X_ERR("Can't map sge\n");
+		return -ENOMEM;
+	}
+
+	sw_buf->page = page;
+	dma_unmap_addr_set(sw_buf, mapping, mapping);
+
+	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
+	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	return 0;
+}
+
 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 			       struct bnx2x_agg_info *tpa_info,
 			       u16 pages,
@@ -494,11 +513,11 @@
 	return 0;
 }
 
-static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-				  struct bnx2x_agg_info *tpa_info,
-				  u16 pages,
-				  struct eth_end_agg_rx_cqe *cqe,
-				  u16 cqe_idx)
+static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			   struct bnx2x_agg_info *tpa_info,
+			   u16 pages,
+			   struct eth_end_agg_rx_cqe *cqe,
+			   u16 cqe_idx)
 {
 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 	u8 pad = tpa_info->placement_offset;
@@ -524,7 +543,7 @@
 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 			 fp->rx_buf_size, DMA_FROM_DEVICE);
 	if (likely(new_data))
-		skb = build_skb(data);
+		skb = build_skb(data, 0);
 
 	if (likely(skb)) {
 #ifdef BNX2X_STOP_ON_ERROR
@@ -568,6 +587,36 @@
 	fp->eth_q_stats.rx_skb_alloc_failed++;
 }
 
+static int bnx2x_alloc_rx_data(struct bnx2x *bp,
+			       struct bnx2x_fastpath *fp, u16 index)
+{
+	u8 *data;
+	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
+	dma_addr_t mapping;
+
+	data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+	if (unlikely(data == NULL))
+		return -ENOMEM;
+
+	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+				 fp->rx_buf_size,
+				 DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		kfree(data);
+		BNX2X_ERR("Can't map rx data\n");
+		return -ENOMEM;
+	}
+
+	rx_buf->data = data;
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	return 0;
+}
+
 
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
@@ -732,7 +781,7 @@
 						 dma_unmap_addr(rx_buf, mapping),
 						 fp->rx_buf_size,
 						 DMA_FROM_DEVICE);
-				skb = build_skb(data);
+				skb = build_skb(data, 0);
 				if (unlikely(!skb)) {
 					kfree(data);
 					fp->eth_q_stats.rx_skb_alloc_failed++;
@@ -881,8 +930,8 @@
  *
  * It uses a none-atomic bit operations because is called under the mutex.
  */
-static inline void bnx2x_fill_report_data(struct bnx2x *bp,
-					  struct bnx2x_link_report_data *data)
+static void bnx2x_fill_report_data(struct bnx2x *bp,
+				   struct bnx2x_link_report_data *data)
 {
 	u16 line_speed = bnx2x_get_mf_speed(bp);
 
@@ -1000,6 +1049,47 @@
 	}
 }
 
+static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
+{
+	int i;
+
+	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+		struct eth_rx_sge *sge;
+
+		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
+		sge->addr_hi =
+			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
+			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+
+		sge->addr_lo =
+			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
+			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+	}
+}
+
+static void bnx2x_free_tpa_pool(struct bnx2x *bp,
+				struct bnx2x_fastpath *fp, int last)
+{
+	int i;
+
+	for (i = 0; i < last; i++) {
+		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
+		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+		u8 *data = first_buf->data;
+
+		if (data == NULL) {
+			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
+			continue;
+		}
+		if (tpa_info->tpa_state == BNX2X_TPA_START)
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(first_buf, mapping),
+					 fp->rx_buf_size, DMA_FROM_DEVICE);
+		kfree(data);
+		first_buf->data = NULL;
+	}
+}
+
 void bnx2x_init_rx_rings(struct bnx2x *bp)
 {
 	int func = BP_FUNC(bp);
@@ -1212,16 +1302,15 @@
 
 void bnx2x_free_irq(struct bnx2x *bp)
 {
-	if (bp->flags & USING_MSIX_FLAG)
+	if (bp->flags & USING_MSIX_FLAG &&
+	    !(bp->flags & USING_SINGLE_MSIX_FLAG))
 		bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
 				     CNIC_PRESENT + 1);
-	else if (bp->flags & USING_MSI_FLAG)
-		free_irq(bp->pdev->irq, bp->dev);
 	else
-		free_irq(bp->pdev->irq, bp->dev);
+		free_irq(bp->dev->irq, bp->dev);
 }
 
-int bnx2x_enable_msix(struct bnx2x *bp)
+int __devinit bnx2x_enable_msix(struct bnx2x *bp)
 {
 	int msix_vec = 0, i, rc, req_cnt;
 
@@ -1261,8 +1350,8 @@
 		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
 
 		if (rc) {
-			BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
-			return rc;
+			BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+			goto no_msix;
 		}
 		/*
 		 * decrease number of queues by number of unallocated entries
@@ -1270,18 +1359,34 @@
 		bp->num_queues -= diff;
 
 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
-				  bp->num_queues);
-	} else if (rc) {
-		/* fall to INTx if not enough memory */
-		if (rc == -ENOMEM)
-			bp->flags |= DISABLE_MSI_FLAG;
+			       bp->num_queues);
+	} else if (rc > 0) {
+		/* Get by with single vector */
+		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
+		if (rc) {
+			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
+				       rc);
+			goto no_msix;
+		}
+
+		BNX2X_DEV_INFO("Using single MSI-X vector\n");
+		bp->flags |= USING_SINGLE_MSIX_FLAG;
+
+	} else if (rc < 0) {
 		BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
-		return rc;
+		goto no_msix;
 	}
 
 	bp->flags |= USING_MSIX_FLAG;
 
 	return 0;
+
+no_msix:
+	/* fall to INTx if not enough memory */
+	if (rc == -ENOMEM)
+		bp->flags |= DISABLE_MSI_FLAG;
+
+	return rc;
 }
 
 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
@@ -1343,22 +1448,26 @@
 static int bnx2x_req_irq(struct bnx2x *bp)
 {
 	unsigned long flags;
-	int rc;
+	unsigned int irq;
 
-	if (bp->flags & USING_MSI_FLAG)
+	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
 		flags = 0;
 	else
 		flags = IRQF_SHARED;
 
-	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
-			 bp->dev->name, bp->dev);
-	return rc;
+	if (bp->flags & USING_MSIX_FLAG)
+		irq = bp->msix_table[0].vector;
+	else
+		irq = bp->pdev->irq;
+
+	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
 }
 
-static inline int bnx2x_setup_irqs(struct bnx2x *bp)
+static int bnx2x_setup_irqs(struct bnx2x *bp)
 {
 	int rc = 0;
-	if (bp->flags & USING_MSIX_FLAG) {
+	if (bp->flags & USING_MSIX_FLAG &&
+	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
 		rc = bnx2x_req_msix_irqs(bp);
 		if (rc)
 			return rc;
@@ -1371,15 +1480,20 @@
 		}
 		if (bp->flags & USING_MSI_FLAG) {
 			bp->dev->irq = bp->pdev->irq;
-			netdev_info(bp->dev, "using MSI  IRQ %d\n",
-			       bp->pdev->irq);
+			netdev_info(bp->dev, "using MSI IRQ %d\n",
+				    bp->dev->irq);
+		}
+		if (bp->flags & USING_MSIX_FLAG) {
+			bp->dev->irq = bp->msix_table[0].vector;
+			netdev_info(bp->dev, "using MSIX IRQ %d\n",
+				    bp->dev->irq);
 		}
 	}
 
 	return 0;
 }
 
-static inline void bnx2x_napi_enable(struct bnx2x *bp)
+static void bnx2x_napi_enable(struct bnx2x *bp)
 {
 	int i;
 
@@ -1387,7 +1501,7 @@
 		napi_enable(&bnx2x_fp(bp, i, napi));
 }
 
-static inline void bnx2x_napi_disable(struct bnx2x *bp)
+static void bnx2x_napi_disable(struct bnx2x *bp)
 {
 	int i;
 
@@ -1437,24 +1551,15 @@
 	return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
 }
 
+
 void bnx2x_set_num_queues(struct bnx2x *bp)
 {
-	switch (bp->multi_mode) {
-	case ETH_RSS_MODE_DISABLED:
-		bp->num_queues = 1;
-		break;
-	case ETH_RSS_MODE_REGULAR:
-		bp->num_queues = bnx2x_calc_num_queues(bp);
-		break;
-
-	default:
-		bp->num_queues = 1;
-		break;
-	}
+	/* RSS queues */
+	bp->num_queues = bnx2x_calc_num_queues(bp);
 
 #ifdef BCM_CNIC
-	/* override in STORAGE SD mode */
-	if (IS_MF_STORAGE_SD(bp))
+	/* override in STORAGE SD modes */
+	if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
 		bp->num_queues = 1;
 #endif
 	/* Add special queues */
@@ -1483,7 +1588,7 @@
  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
  */
-static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
+static int bnx2x_set_real_num_queues(struct bnx2x *bp)
 {
 	int rc, tx, rx;
 
@@ -1515,7 +1620,7 @@
 	return rc;
 }
 
-static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
 {
 	int i;
 
@@ -1543,22 +1648,19 @@
 	}
 }
 
-static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
+static int bnx2x_init_rss_pf(struct bnx2x *bp)
 {
 	int i;
 	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
 
-	/*
-	 * Prepare the inital contents fo the indirection table if RSS is
+	/* Prepare the initial contents fo the indirection table if RSS is
 	 * enabled
 	 */
-	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
-		for (i = 0; i < sizeof(ind_table); i++)
-			ind_table[i] =
-				bp->fp->cl_id +
-				ethtool_rxfh_indir_default(i, num_eth_queues);
-	}
+	for (i = 0; i < sizeof(ind_table); i++)
+		ind_table[i] =
+			bp->fp->cl_id +
+			ethtool_rxfh_indir_default(i, num_eth_queues);
 
 	/*
 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
@@ -1568,11 +1670,12 @@
 	 * For 57712 and newer on the other hand it's a per-function
 	 * configuration.
 	 */
-	return bnx2x_config_rss_pf(bp, ind_table,
-				   bp->port.pmf || !CHIP_IS_E1x(bp));
+	return bnx2x_config_rss_eth(bp, ind_table,
+				    bp->port.pmf || !CHIP_IS_E1x(bp));
 }
 
-int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+			u8 *ind_table, bool config_hash)
 {
 	struct bnx2x_config_rss_params params = {NULL};
 	int i;
@@ -1584,58 +1687,35 @@
 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
 	 */
 
-	params.rss_obj = &bp->rss_conf_obj;
+	params.rss_obj = rss_obj;
 
 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
 
-	/* RSS mode */
-	switch (bp->multi_mode) {
-	case ETH_RSS_MODE_DISABLED:
-		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
-		break;
-	case ETH_RSS_MODE_REGULAR:
-		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
-		break;
-	case ETH_RSS_MODE_VLAN_PRI:
-		__set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
-		break;
-	case ETH_RSS_MODE_E1HOV_PRI:
-		__set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
-		break;
-	case ETH_RSS_MODE_IP_DSCP:
-		__set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
-		break;
-	default:
-		BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
-		return -EINVAL;
-	}
+	__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
 
-	/* If RSS is enabled */
-	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
-		/* RSS configuration */
-		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
-		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
-		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
-		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+	/* RSS configuration */
+	__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+	__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+	__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+	__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
 
-		/* Hash bits */
-		params.rss_result_mask = MULTI_MASK;
+	/* Hash bits */
+	params.rss_result_mask = MULTI_MASK;
 
-		memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+	memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
 
-		if (config_hash) {
-			/* RSS keys */
-			for (i = 0; i < sizeof(params.rss_key) / 4; i++)
-				params.rss_key[i] = random32();
+	if (config_hash) {
+		/* RSS keys */
+		for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+			params.rss_key[i] = random32();
 
-			__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
-		}
+		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
 	}
 
 	return bnx2x_config_rss(bp, &params);
 }
 
-static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
 {
 	struct bnx2x_func_state_params func_params = {NULL};
 
@@ -1744,6 +1824,87 @@
 	return true;
 }
 
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp:		driver handle
+ * @index:	fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	struct napi_struct orig_napi = fp->napi;
+	/* bzero bnx2x_fastpath contents */
+	if (bp->stats_init)
+		memset(fp, 0, sizeof(*fp));
+	else {
+		/* Keep Queue statistics */
+		struct bnx2x_eth_q_stats *tmp_eth_q_stats;
+		struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
+
+		tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
+					  GFP_KERNEL);
+		if (tmp_eth_q_stats)
+			memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
+			       sizeof(struct bnx2x_eth_q_stats));
+
+		tmp_eth_q_stats_old =
+			kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
+				GFP_KERNEL);
+		if (tmp_eth_q_stats_old)
+			memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
+			       sizeof(struct bnx2x_eth_q_stats_old));
+
+		memset(fp, 0, sizeof(*fp));
+
+		if (tmp_eth_q_stats) {
+			memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
+				   sizeof(struct bnx2x_eth_q_stats));
+			kfree(tmp_eth_q_stats);
+		}
+
+		if (tmp_eth_q_stats_old) {
+			memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
+			       sizeof(struct bnx2x_eth_q_stats_old));
+			kfree(tmp_eth_q_stats_old);
+		}
+
+	}
+
+	/* Restore the NAPI object as it has been already initialized */
+	fp->napi = orig_napi;
+
+	fp->bp = bp;
+	fp->index = index;
+	if (IS_ETH_FP(fp))
+		fp->max_cos = bp->max_cos;
+	else
+		/* Special queues support only one CoS */
+		fp->max_cos = 1;
+
+	/*
+	 * set the tpa flag for each queue. The tpa flag determines the queue
+	 * minimal size so it must be set prior to queue memory allocation
+	 */
+	fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
+				  (bp->flags & GRO_ENABLE_FLAG &&
+				   bnx2x_mtu_allows_gro(bp->dev->mtu)));
+	if (bp->flags & TPA_ENABLE_FLAG)
+		fp->mode = TPA_MODE_LRO;
+	else if (bp->flags & GRO_ENABLE_FLAG)
+		fp->mode = TPA_MODE_GRO;
+
+#ifdef BCM_CNIC
+	/* We don't want TPA on an FCoE L2 ring */
+	if (IS_FCOE_FP(fp))
+		fp->disable_tpa = 1;
+#endif
+}
+
+
 /* must be called with rtnl_lock */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 {
@@ -1911,8 +2072,14 @@
 			SHMEM2_WR(bp, dcc_support,
 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+		if (SHMEM2_HAS(bp, afex_driver_support))
+			SHMEM2_WR(bp, afex_driver_support,
+				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
 	}
 
+	/* Set AFEX default VLAN tag to an invalid value */
+	bp->afex_def_vlan_tag = -1;
+
 	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
 	rc = bnx2x_func_start(bp);
 	if (rc) {
@@ -2968,6 +3135,8 @@
 
 	netdev_tx_sent_queue(txq, skb->len);
 
+	skb_tx_timestamp(skb);
+
 	txdata->tx_pkt_prod++;
 	/*
 	 * Make sure that the BD data is updated before updating the producer
@@ -3084,7 +3253,8 @@
 	}
 
 #ifdef BCM_CNIC
-	if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) {
+	if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
+	    !is_zero_ether_addr(addr->sa_data)) {
 		BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
 		return -EINVAL;
 	}
@@ -3181,7 +3351,7 @@
 		bnx2x_free_fp_mem_at(bp, i);
 }
 
-static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
+static void set_sb_shortcuts(struct bnx2x *bp, int index)
 {
 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
 	if (!CHIP_IS_E1x(bp)) {
@@ -3197,6 +3367,63 @@
 	}
 }
 
+/* Returns the number of actually allocated BDs */
+static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
+			      int rx_ring_size)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 ring_prod, cqe_ring_prod;
+	int i, failure_cnt = 0;
+
+	fp->rx_comp_cons = 0;
+	cqe_ring_prod = ring_prod = 0;
+
+	/* This routine is called only during fo init so
+	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
+	 */
+	for (i = 0; i < rx_ring_size; i++) {
+		if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
+			failure_cnt++;
+			continue;
+		}
+		ring_prod = NEXT_RX_IDX(ring_prod);
+		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+		WARN_ON(ring_prod <= (i - failure_cnt));
+	}
+
+	if (failure_cnt)
+		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
+			  i - failure_cnt, fp->index);
+
+	fp->rx_bd_prod = ring_prod;
+	/* Limit the CQE producer by the CQE ring size */
+	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+			       cqe_ring_prod);
+	fp->rx_pkt = fp->rx_calls = 0;
+
+	fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+
+	return i - failure_cnt;
+}
+
+static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
+{
+	int i;
+
+	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+		struct eth_rx_cqe_next_page *nextpg;
+
+		nextpg = (struct eth_rx_cqe_next_page *)
+			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+		nextpg->addr_hi =
+			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+		nextpg->addr_lo =
+			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+	}
+}
+
 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
 {
 	union host_hc_status_block *sb;
@@ -3206,7 +3433,8 @@
 	int rx_ring_size = 0;
 
 #ifdef BCM_CNIC
-	if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) {
+	if (!bp->rx_ring_size &&
+	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
 		rx_ring_size = MIN_RX_SIZE_NONTPA;
 		bp->rx_ring_size = rx_ring_size;
 	} else
@@ -3528,8 +3756,6 @@
 	 */
 	dev->mtu = new_mtu;
 
-	bp->gro_check = bnx2x_need_gro_check(new_mtu);
-
 	return bnx2x_reload_if_running(dev);
 }
 
@@ -3687,9 +3913,9 @@
 			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
 }
 
-static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
-					     u8 fw_sb_id, u8 sb_index,
-					     u8 ticks)
+static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+				    u8 fw_sb_id, u8 sb_index,
+				    u8 ticks)
 {
 
 	u32 addr = BAR_CSTRORM_INTMEM +
@@ -3700,9 +3926,9 @@
 	   port, fw_sb_id, sb_index, ticks);
 }
 
-static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
-					     u16 fw_sb_id, u8 sb_index,
-					     u8 disable)
+static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+				    u16 fw_sb_id, u8 sb_index,
+				    u8 disable)
 {
 	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
 	u32 addr = BAR_CSTRORM_INTMEM +
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 5c27454..7cd99b7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -86,13 +86,15 @@
 void bnx2x_send_unload_done(struct bnx2x *bp);
 
 /**
- * bnx2x_config_rss_pf - configure RSS parameters.
+ * bnx2x_config_rss_pf - configure RSS parameters in a PF.
  *
  * @bp:			driver handle
+ * @rss_obj		RSS object to use
  * @ind_table:		indirection table to configure
  * @config_hash:	re-configure RSS hash keys configuration
  */
-int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
+int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+			u8 *ind_table, bool config_hash);
 
 /**
  * bnx2x__init_func_obj - init function object
@@ -485,7 +487,7 @@
  * fills msix_table, requests vectors, updates num_queues
  * according to number of available vectors.
  */
-int bnx2x_enable_msix(struct bnx2x *bp);
+int __devinit bnx2x_enable_msix(struct bnx2x *bp);
 
 /**
  * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -610,53 +612,6 @@
 	barrier();
 }
 
-static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
-					  u8 idu_sb_id, bool is_Pf)
-{
-	u32 data, ctl, cnt = 100;
-	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
-	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
-	u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
-	u32 sb_bit =  1 << (idu_sb_id%32);
-	u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
-	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
-
-	/* Not supported in BC mode */
-	if (CHIP_INT_MODE_IS_BC(bp))
-		return;
-
-	data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
-			<< IGU_REGULAR_CLEANUP_TYPE_SHIFT)	|
-		IGU_REGULAR_CLEANUP_SET				|
-		IGU_REGULAR_BCLEANUP;
-
-	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
-	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
-	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
-
-	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
-			 data, igu_addr_data);
-	REG_WR(bp, igu_addr_data, data);
-	mmiowb();
-	barrier();
-	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
-			  ctl, igu_addr_ctl);
-	REG_WR(bp, igu_addr_ctl, ctl);
-	mmiowb();
-	barrier();
-
-	/* wait for clean up to finish */
-	while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
-		msleep(20);
-
-
-	if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
-		DP(NETIF_MSG_HW,
-		   "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
-			  idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
-	}
-}
-
 static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
 				   u8 storm, u16 index, u8 op, u8 update)
 {
@@ -843,7 +798,7 @@
 {
 	if (bp->flags & USING_MSIX_FLAG) {
 		pci_disable_msix(bp->pdev);
-		bp->flags &= ~USING_MSIX_FLAG;
+		bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
 	} else if (bp->flags & USING_MSI_FLAG) {
 		pci_disable_msi(bp->pdev);
 		bp->flags &= ~USING_MSI_FLAG;
@@ -883,66 +838,6 @@
 	bnx2x_clear_sge_mask_next_elems(fp);
 }
 
-static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
-				     struct bnx2x_fastpath *fp, u16 index)
-{
-	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
-	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
-	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
-	dma_addr_t mapping;
-
-	if (unlikely(page == NULL)) {
-		BNX2X_ERR("Can't alloc sge\n");
-		return -ENOMEM;
-	}
-
-	mapping = dma_map_page(&bp->pdev->dev, page, 0,
-			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-		__free_pages(page, PAGES_PER_SGE_SHIFT);
-		BNX2X_ERR("Can't map sge\n");
-		return -ENOMEM;
-	}
-
-	sw_buf->page = page;
-	dma_unmap_addr_set(sw_buf, mapping, mapping);
-
-	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
-	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
-
-	return 0;
-}
-
-static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
-				      struct bnx2x_fastpath *fp, u16 index)
-{
-	u8 *data;
-	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
-	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
-	dma_addr_t mapping;
-
-	data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
-	if (unlikely(data == NULL))
-		return -ENOMEM;
-
-	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
-				 fp->rx_buf_size,
-				 DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-		kfree(data);
-		BNX2X_ERR("Can't map rx data\n");
-		return -ENOMEM;
-	}
-
-	rx_buf->data = data;
-	dma_unmap_addr_set(rx_buf, mapping, mapping);
-
-	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
-	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-
-	return 0;
-}
-
 /* note that we are not allocating a new buffer,
  * we are just moving one from cons to prod
  * we are not creating a new mapping,
@@ -964,6 +859,19 @@
 
 /************************* Init ******************************************/
 
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+	return 2 * vn + BP_PORT(bp);
+}
+
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
+				       bool config_hash)
+{
+	return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
+				   config_hash);
+}
+
 /**
  * bnx2x_func_start - init function
  *
@@ -1027,66 +935,6 @@
 		bnx2x_free_rx_sge(bp, fp, i);
 }
 
-static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
-				       struct bnx2x_fastpath *fp, int last)
-{
-	int i;
-
-	for (i = 0; i < last; i++) {
-		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
-		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
-		u8 *data = first_buf->data;
-
-		if (data == NULL) {
-			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
-			continue;
-		}
-		if (tpa_info->tpa_state == BNX2X_TPA_START)
-			dma_unmap_single(&bp->pdev->dev,
-					 dma_unmap_addr(first_buf, mapping),
-					 fp->rx_buf_size, DMA_FROM_DEVICE);
-		kfree(data);
-		first_buf->data = NULL;
-	}
-}
-
-static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
-{
-	int i;
-
-	for (i = 1; i <= NUM_TX_RINGS; i++) {
-		struct eth_tx_next_bd *tx_next_bd =
-			&txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
-
-		tx_next_bd->addr_hi =
-			cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
-				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
-		tx_next_bd->addr_lo =
-			cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
-				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
-	}
-
-	SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
-	txdata->tx_db.data.zero_fill1 = 0;
-	txdata->tx_db.data.prod = 0;
-
-	txdata->tx_pkt_prod = 0;
-	txdata->tx_pkt_cons = 0;
-	txdata->tx_bd_prod = 0;
-	txdata->tx_bd_cons = 0;
-	txdata->tx_pkt = 0;
-}
-
-static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
-{
-	int i;
-	u8 cos;
-
-	for_each_tx_queue(bp, i)
-		for_each_cos_in_tx_queue(&bp->fp[i], cos)
-			bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
-}
-
 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
 {
 	int i;
@@ -1104,80 +952,6 @@
 	}
 }
 
-static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
-{
-	int i;
-
-	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
-		struct eth_rx_sge *sge;
-
-		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
-		sge->addr_hi =
-			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
-			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
-
-		sge->addr_lo =
-			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
-			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
-	}
-}
-
-static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
-{
-	int i;
-	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
-		struct eth_rx_cqe_next_page *nextpg;
-
-		nextpg = (struct eth_rx_cqe_next_page *)
-			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
-		nextpg->addr_hi =
-			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
-				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
-		nextpg->addr_lo =
-			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
-				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
-	}
-}
-
-/* Returns the number of actually allocated BDs */
-static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
-				      int rx_ring_size)
-{
-	struct bnx2x *bp = fp->bp;
-	u16 ring_prod, cqe_ring_prod;
-	int i, failure_cnt = 0;
-
-	fp->rx_comp_cons = 0;
-	cqe_ring_prod = ring_prod = 0;
-
-	/* This routine is called only during fo init so
-	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
-	 */
-	for (i = 0; i < rx_ring_size; i++) {
-		if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
-			failure_cnt++;
-			continue;
-		}
-		ring_prod = NEXT_RX_IDX(ring_prod);
-		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
-		WARN_ON(ring_prod <= (i - failure_cnt));
-	}
-
-	if (failure_cnt)
-		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
-			  i - failure_cnt, fp->index);
-
-	fp->rx_bd_prod = ring_prod;
-	/* Limit the CQE producer by the CQE ring size */
-	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
-			       cqe_ring_prod);
-	fp->rx_pkt = fp->rx_calls = 0;
-
-	fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
-
-	return i - failure_cnt;
-}
-
 /* Statistics ID are global per chip/path, while Client IDs for E1x are per
  * port.
  */
@@ -1406,30 +1180,6 @@
 		REG_WR(bp, addr + (i * 4), data[i]);
 }
 
-static inline void storm_memset_func_cfg(struct bnx2x *bp,
-				struct tstorm_eth_function_common_config *tcfg,
-				u16 abs_fid)
-{
-	size_t size = sizeof(struct tstorm_eth_function_common_config);
-
-	u32 addr = BAR_TSTRORM_INTMEM +
-			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
-
-	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
-}
-
-static inline void storm_memset_cmng(struct bnx2x *bp,
-				struct cmng_struct_per_port *cmng,
-				u8 port)
-{
-	size_t size = sizeof(struct cmng_struct_per_port);
-
-	u32 addr = BAR_XSTRORM_INTMEM +
-			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
-
-	__storm_memset_struct(bp, addr, size, (u32 *)cmng);
-}
-
 /**
  * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
  *
@@ -1512,93 +1262,6 @@
 	 */
 	return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
 }
-
-static inline bool bnx2x_need_gro_check(int mtu)
-{
-	return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
-		(SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
-}
-
-/**
- * bnx2x_bz_fp - zero content of the fastpath structure.
- *
- * @bp:		driver handle
- * @index:	fastpath index to be zeroed
- *
- * Makes sure the contents of the bp->fp[index].napi is kept
- * intact.
- */
-static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
-{
-	struct bnx2x_fastpath *fp = &bp->fp[index];
-	struct napi_struct orig_napi = fp->napi;
-	/* bzero bnx2x_fastpath contents */
-	if (bp->stats_init)
-		memset(fp, 0, sizeof(*fp));
-	else {
-		/* Keep Queue statistics */
-		struct bnx2x_eth_q_stats *tmp_eth_q_stats;
-		struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
-
-		tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
-					  GFP_KERNEL);
-		if (tmp_eth_q_stats)
-			memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
-			       sizeof(struct bnx2x_eth_q_stats));
-
-		tmp_eth_q_stats_old =
-			kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
-				GFP_KERNEL);
-		if (tmp_eth_q_stats_old)
-			memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
-			       sizeof(struct bnx2x_eth_q_stats_old));
-
-		memset(fp, 0, sizeof(*fp));
-
-		if (tmp_eth_q_stats) {
-			memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
-				   sizeof(struct bnx2x_eth_q_stats));
-			kfree(tmp_eth_q_stats);
-		}
-
-		if (tmp_eth_q_stats_old) {
-			memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
-			       sizeof(struct bnx2x_eth_q_stats_old));
-			kfree(tmp_eth_q_stats_old);
-		}
-
-	}
-
-	/* Restore the NAPI object as it has been already initialized */
-	fp->napi = orig_napi;
-
-	fp->bp = bp;
-	fp->index = index;
-	if (IS_ETH_FP(fp))
-		fp->max_cos = bp->max_cos;
-	else
-		/* Special queues support only one CoS */
-		fp->max_cos = 1;
-
-	/*
-	 * set the tpa flag for each queue. The tpa flag determines the queue
-	 * minimal size so it must be set prior to queue memory allocation
-	 */
-	fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
-				  (bp->flags & GRO_ENABLE_FLAG &&
-				   bnx2x_mtu_allows_gro(bp->dev->mtu)));
-	if (bp->flags & TPA_ENABLE_FLAG)
-		fp->mode = TPA_MODE_LRO;
-	else if (bp->flags & GRO_ENABLE_FLAG)
-		fp->mode = TPA_MODE_GRO;
-
-#ifdef BCM_CNIC
-	/* We don't want TPA on an FCoE L2 ring */
-	if (IS_FCOE_FP(fp))
-		fp->disable_tpa = 1;
-#endif
-}
-
 #ifdef BCM_CNIC
 /**
  * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
@@ -1608,11 +1271,6 @@
  */
 void bnx2x_get_iscsi_info(struct bnx2x *bp);
 #endif
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
-	return 2 * vn + BP_PORT(bp);
-}
 
 /**
  * bnx2x_link_sync_notify - send notification to other functions.
@@ -1667,7 +1325,8 @@
 	if (is_valid_ether_addr(addr))
 		return true;
 #ifdef BCM_CNIC
-	if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp))
+	if (is_zero_ether_addr(addr) &&
+	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
 		return true;
 #endif
 	return false;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2cc0a17..ddc18ee 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -22,13 +22,10 @@
 #include <linux/types.h>
 #include <linux/sched.h>
 #include <linux/crc32.h>
-
-
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 #include "bnx2x_dump.h"
 #include "bnx2x_init.h"
-#include "bnx2x_sp.h"
 
 /* Note: in the format strings below %s is replaced by the queue-name which is
  * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -595,8 +592,8 @@
 #define IS_E3_ONLINE(info)	(((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
 #define IS_E3B0_ONLINE(info)	(((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
 
-static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
-				       const struct reg_addr *reg_info)
+static bool bnx2x_is_reg_online(struct bnx2x *bp,
+				const struct reg_addr *reg_info)
 {
 	if (CHIP_IS_E1(bp))
 		return IS_E1_ONLINE(reg_info->info);
@@ -613,7 +610,7 @@
 }
 
 /******* Paged registers info selectors ********/
-static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
+static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
 {
 	if (CHIP_IS_E2(bp))
 		return page_vals_e2;
@@ -623,7 +620,7 @@
 		return NULL;
 }
 
-static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
+static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
 {
 	if (CHIP_IS_E2(bp))
 		return PAGE_MODE_VALUES_E2;
@@ -633,7 +630,7 @@
 		return 0;
 }
 
-static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
+static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
 {
 	if (CHIP_IS_E2(bp))
 		return page_write_regs_e2;
@@ -643,7 +640,7 @@
 		return NULL;
 }
 
-static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
+static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
 {
 	if (CHIP_IS_E2(bp))
 		return PAGE_WRITE_REGS_E2;
@@ -653,7 +650,7 @@
 		return 0;
 }
 
-static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
+static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
 {
 	if (CHIP_IS_E2(bp))
 		return page_read_regs_e2;
@@ -663,7 +660,7 @@
 		return NULL;
 }
 
-static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
+static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
 {
 	if (CHIP_IS_E2(bp))
 		return PAGE_READ_REGS_E2;
@@ -673,7 +670,7 @@
 		return 0;
 }
 
-static inline int __bnx2x_get_regs_len(struct bnx2x *bp)
+static int __bnx2x_get_regs_len(struct bnx2x *bp)
 {
 	int num_pages = __bnx2x_get_page_reg_num(bp);
 	int page_write_num = __bnx2x_get_page_write_num(bp);
@@ -718,7 +715,7 @@
  * ("read address"). There may be more than one write address per "page" and
  * more than one read address per write address.
  */
-static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
+static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
 {
 	u32 i, j, k, n;
 	/* addresses of the paged registers */
@@ -747,7 +744,7 @@
 	}
 }
 
-static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
 {
 	u32 i, j;
 
@@ -1433,7 +1430,7 @@
 	else
 		ering->rx_pending = MAX_RX_AVAIL;
 
-	ering->tx_max_pending = MAX_TX_AVAIL;
+	ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
 	ering->tx_pending = bp->tx_ring_size;
 }
 
@@ -1451,7 +1448,7 @@
 	if ((ering->rx_pending > MAX_RX_AVAIL) ||
 	    (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
 						    MIN_RX_SIZE_TPA)) ||
-	    (ering->tx_pending > MAX_TX_AVAIL) ||
+	    (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
 	    (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
 		DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
 		return -EINVAL;
@@ -2212,7 +2209,7 @@
 /* ethtool statistics are displayed for all regular ethernet queues and the
  * fcoe L2 queue if not disabled
  */
-static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
+static int bnx2x_num_stat_queues(struct bnx2x *bp)
 {
 	return BNX2X_NUM_ETH_QUEUES(bp);
 }
@@ -2396,10 +2393,7 @@
 
 static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
 {
-	struct bnx2x *bp = netdev_priv(dev);
-
-	return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
-		0 : T_ETH_INDIRECTION_TABLE_SIZE);
+	return T_ETH_INDIRECTION_TABLE_SIZE;
 }
 
 static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
@@ -2445,7 +2439,7 @@
 		ind_table[i] = indir[i] + bp->fp->cl_id;
 	}
 
-	return bnx2x_config_rss_pf(bp, ind_table, false);
+	return bnx2x_config_rss_eth(bp, ind_table, false);
 }
 
 static const struct ethtool_ops bnx2x_ethtool_ops = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index b9b2633..426f77a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -387,7 +387,7 @@
 
 #define STATS_QUERY_CMD_COUNT 16
 
-#define NIV_LIST_TABLE_SIZE 4096
+#define AFEX_LIST_TABLE_SIZE 4096
 
 #define INVALID_VNIC_ID	0xFF
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index dbff591..a440a8b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -833,6 +833,7 @@
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF      0x00000100
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE      0x00000400
 
 	/* The interval in seconds between sending LLDP packets. Set to zero
 	   to disable the feature */
@@ -1235,6 +1236,8 @@
 	#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL     0x00050006
 	#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
 	#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
+	#define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED        0xa2000000
+	#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED        0x00070002
 	#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
 	#define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
 
@@ -1242,6 +1245,13 @@
 	#define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
 
 	#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+
+	#define DRV_MSG_CODE_AFEX_DRIVER_SETMAC         0xd0000000
+	#define DRV_MSG_CODE_AFEX_LISTGET_ACK           0xd1000000
+	#define DRV_MSG_CODE_AFEX_LISTSET_ACK           0xd2000000
+	#define DRV_MSG_CODE_AFEX_STATSGET_ACK          0xd3000000
+	#define DRV_MSG_CODE_AFEX_VIFSET_ACK            0xd4000000
+
 	#define DRV_MSG_CODE_DRV_INFO_ACK               0xd8000000
 	#define DRV_MSG_CODE_DRV_INFO_NACK              0xd9000000
 
@@ -1299,6 +1309,14 @@
 	#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
 	#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
 	#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+	#define FW_MSG_CODE_HW_SET_INVALID_IMAGE        0xb0100000
+
+	#define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE     0xd0100000
+	#define FW_MSG_CODE_AFEX_LISTGET_ACK            0xd1100000
+	#define FW_MSG_CODE_AFEX_LISTSET_ACK            0xd2100000
+	#define FW_MSG_CODE_AFEX_STATSGET_ACK           0xd3100000
+	#define FW_MSG_CODE_AFEX_VIFSET_ACK             0xd4100000
+
 	#define FW_MSG_CODE_DRV_INFO_ACK                0xd8100000
 	#define FW_MSG_CODE_DRV_INFO_NACK               0xd9100000
 
@@ -1357,6 +1375,12 @@
 
 	#define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
 	#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+	#define DRV_STATUS_AFEX_EVENT_MASK              0x03f00000
+	#define DRV_STATUS_AFEX_LISTGET_REQ             0x00100000
+	#define DRV_STATUS_AFEX_LISTSET_REQ             0x00200000
+	#define DRV_STATUS_AFEX_STATSGET_REQ            0x00400000
+	#define DRV_STATUS_AFEX_VIFSET_REQ              0x00800000
+
 	#define DRV_STATUS_DRV_INFO_REQ                 0x04000000
 
 	u32 virt_mac_upper;
@@ -1448,7 +1472,26 @@
 	#define FUNC_MF_CFG_E1HOV_TAG_SHIFT             0
 	#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT         FUNC_MF_CFG_E1HOV_TAG_MASK
 
-	u32 reserved[2];
+	/* afex default VLAN ID - 12 bits */
+	#define FUNC_MF_CFG_AFEX_VLAN_MASK              0x0fff0000
+	#define FUNC_MF_CFG_AFEX_VLAN_SHIFT             16
+
+	u32 afex_config;
+	#define FUNC_MF_CFG_AFEX_COS_FILTER_MASK                     0x000000ff
+	#define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT                    0
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK                    0x0000ff00
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT                   8
+	#define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL                     0x00000100
+	#define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK                      0x000f0000
+	#define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT                     16
+
+	u32 reserved;
+};
+
+enum mf_cfg_afex_vlan_mode {
+	FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
+	FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
+	FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
 };
 
 /* This structure is not applicable and should not be accessed on 57711 */
@@ -1945,18 +1988,29 @@
 
 	u32 nvm_retain_bitmap_addr;			/* 0x0070 */
 
-	u32	reserved1;	/* 0x0074 */
+	/* afex support of that driver */
+	u32 afex_driver_support;			/* 0x0074 */
+	#define SHMEM_AFEX_VERSION_MASK                  0x100f
+	#define SHMEM_AFEX_SUPPORTED_VERSION_ONE         0x1001
+	#define SHMEM_AFEX_REDUCED_DRV_LOADED            0x8000
 
-	u32	reserved2[E2_FUNC_MAX];
+	/* driver receives addr in scratchpad to which it should respond */
+	u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
 
-	u32	reserved3[E2_FUNC_MAX];/* 0x0088 */
-	u32	reserved4[E2_FUNC_MAX];/* 0x0098 */
+	/* generic params from MCP to driver (value depends on the msg sent
+	 * to driver
+	 */
+	u32 afex_param1_to_driver[E2_FUNC_MAX];		/* 0x0088 */
+	u32 afex_param2_to_driver[E2_FUNC_MAX];		/* 0x0098 */
 
 	u32 swim_base_addr;				/* 0x0108 */
 	u32 swim_funcs;
 	u32 swim_main_cb;
 
-	u32	reserved5[2];
+	/* bitmap notifying which VIF profiles stored in nvram are enabled by
+	 * switch
+	 */
+	u32 afex_profiles_enabled[2];
 
 	/* generic flags controlled by the driver */
 	u32 drv_flags;
@@ -2696,10 +2750,51 @@
 	struct fcoe_stats_info	fcoe_stat;
 	struct iscsi_stats_info	iscsi_stat;
 };
+
+/* stats collected for afex.
+ * NOTE: structure is exactly as expected to be received by the switch.
+ *       order must remain exactly as is unless protocol changes !
+ */
+struct afex_stats {
+	u32 tx_unicast_frames_hi;
+	u32 tx_unicast_frames_lo;
+	u32 tx_unicast_bytes_hi;
+	u32 tx_unicast_bytes_lo;
+	u32 tx_multicast_frames_hi;
+	u32 tx_multicast_frames_lo;
+	u32 tx_multicast_bytes_hi;
+	u32 tx_multicast_bytes_lo;
+	u32 tx_broadcast_frames_hi;
+	u32 tx_broadcast_frames_lo;
+	u32 tx_broadcast_bytes_hi;
+	u32 tx_broadcast_bytes_lo;
+	u32 tx_frames_discarded_hi;
+	u32 tx_frames_discarded_lo;
+	u32 tx_frames_dropped_hi;
+	u32 tx_frames_dropped_lo;
+
+	u32 rx_unicast_frames_hi;
+	u32 rx_unicast_frames_lo;
+	u32 rx_unicast_bytes_hi;
+	u32 rx_unicast_bytes_lo;
+	u32 rx_multicast_frames_hi;
+	u32 rx_multicast_frames_lo;
+	u32 rx_multicast_bytes_hi;
+	u32 rx_multicast_bytes_lo;
+	u32 rx_broadcast_frames_hi;
+	u32 rx_broadcast_frames_lo;
+	u32 rx_broadcast_bytes_hi;
+	u32 rx_broadcast_bytes_lo;
+	u32 rx_frames_discarded_hi;
+	u32 rx_frames_discarded_lo;
+	u32 rx_frames_dropped_hi;
+	u32 rx_frames_dropped_lo;
+};
+
 #define BCM_5710_FW_MAJOR_VERSION			7
 #define BCM_5710_FW_MINOR_VERSION			2
-#define BCM_5710_FW_REVISION_VERSION		16
-#define BCM_5710_FW_ENGINEERING_VERSION		0
+#define BCM_5710_FW_REVISION_VERSION			51
+#define BCM_5710_FW_ENGINEERING_VERSION			0
 #define BCM_5710_FW_COMPILE_FLAGS			1
 
 
@@ -3389,7 +3484,7 @@
 #define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
 #define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
 	u8 default_vlan_flg;
-	u8 reserved2;
+	u8 force_default_pri_flg;
 	__le32 reserved3;
 };
 
@@ -4375,8 +4470,21 @@
 
 
 /*
+ * The data afex vif list ramrod need
+ */
+struct afex_vif_list_ramrod_data {
+	u8 afex_vif_list_command;
+	u8 func_bit_map;
+	__le16 vif_list_index;
+	u8 func_to_clear;
+	u8 echo;
+	__le16 reserved1;
+};
+
+
+/*
  * cfc delete event data
-*/
+ */
 struct cfc_del_event_data {
 	u32 cid;
 	u32 reserved0;
@@ -4448,6 +4556,65 @@
 	struct cmng_flags_per_port flags;
 };
 
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter {
+	u32 quota;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved0;
+	u16 rate;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rate;
+	u16 __reserved0;
+#endif
+};
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn {
+	struct rate_shaping_counter vn_counter;
+};
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn {
+	u32 cos_credit_delta[MAX_COS_NUMBER];
+	u32 vn_credit_delta;
+	u32 __reserved0;
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_vnic {
+	struct rate_shaping_vars_per_vn vnic_max_rate[4];
+	struct fairness_vars_per_vn vnic_min_rate[4];
+};
+
+/*
+ * cmng port init state
+ */
+struct cmng_init {
+	struct cmng_struct_per_port port;
+	struct cmng_vnic vnic;
+};
+
+
+/*
+ * driver parameters for congestion management init, all rates are in Mbps
+ */
+struct cmng_init_input {
+	u32 port_rate;
+	u16 vnic_min_rate[4];
+	u16 vnic_max_rate[4];
+	u16 cos_min_rate[MAX_COS_NUMBER];
+	u16 cos_to_pause_mask[MAX_COS_NUMBER];
+	struct cmng_flags_per_port flags;
+};
+
 
 /*
  * Protocol-common command ID for slow path elements
@@ -4462,7 +4629,7 @@
 	RAMROD_CMD_ID_COMMON_STAT_QUERY,
 	RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
 	RAMROD_CMD_ID_COMMON_START_TRAFFIC,
-	RAMROD_CMD_ID_COMMON_RESERVED1,
+	RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
 	MAX_COMMON_SPQE_CMD_ID
 };
 
@@ -4670,6 +4837,17 @@
 };
 
 /*
+ * vif list event data
+ */
+struct vif_list_event_data {
+	u8 func_bit_map;
+	u8 echo;
+	__le16 reserved0;
+	__le32 reserved1;
+	__le32 reserved2;
+};
+
+/*
  * union for all event ring message types
  */
 union event_data {
@@ -4678,6 +4856,7 @@
 	struct cfc_del_event_data cfc_del_event;
 	struct vf_flr_event_data vf_flr_event;
 	struct malicious_vf_event_data malicious_vf_event;
+	struct vif_list_event_data vif_list_event;
 };
 
 
@@ -4743,7 +4922,7 @@
 	EVENT_RING_OPCODE_FORWARD_SETUP,
 	EVENT_RING_OPCODE_RSS_UPDATE_RULES,
 	EVENT_RING_OPCODE_FUNCTION_UPDATE,
-	EVENT_RING_OPCODE_RESERVED1,
+	EVENT_RING_OPCODE_AFEX_VIF_LISTS,
 	EVENT_RING_OPCODE_SET_MAC,
 	EVENT_RING_OPCODE_CLASSIFICATION_RULES,
 	EVENT_RING_OPCODE_FILTERS_RULES,
@@ -4763,16 +4942,6 @@
 
 
 /*
- * per-vnic fairness variables
- */
-struct fairness_vars_per_vn {
-	u32 cos_credit_delta[MAX_COS_NUMBER];
-	u32 vn_credit_delta;
-	u32 __reserved0;
-};
-
-
-/*
  * Priority and cos
  */
 struct priority_cos {
@@ -4800,12 +4969,27 @@
 struct function_start_data {
 	__le16 function_mode;
 	__le16 sd_vlan_tag;
-	u16 reserved;
+	__le16 vif_id;
 	u8 path_id;
 	u8 network_cos_mode;
 };
 
 
+struct function_update_data {
+	u8 vif_id_change_flg;
+	u8 afex_default_vlan_change_flg;
+	u8 allowed_priorities_change_flg;
+	u8 network_cos_mode_change_flg;
+	__le16 vif_id;
+	__le16 afex_default_vlan;
+	u8 allowed_priorities;
+	u8 network_cos_mode;
+	u8 lb_mode_en;
+	u8 reserved0;
+	__le32 reserved1;
+};
+
+
 /*
  * FW version stored in the Xstorm RAM
  */
@@ -5003,7 +5187,7 @@
 	SINGLE_FUNCTION,
 	MULTI_FUNCTION_SD,
 	MULTI_FUNCTION_SI,
-	MULTI_FUNCTION_RESERVED,
+	MULTI_FUNCTION_AFEX,
 	MAX_MF_MODE
 };
 
@@ -5128,6 +5312,7 @@
 	u8 protocol_data[8];
 	struct regpair phy_address;
 	struct regpair mac_config_addr;
+	struct afex_vif_list_ramrod_data afex_vif_list_data;
 };
 
 /*
@@ -5140,29 +5325,6 @@
 
 
 /*
- * a single rate shaping counter. can be used as protocol or vnic counter
- */
-struct rate_shaping_counter {
-	u32 quota;
-#if defined(__BIG_ENDIAN)
-	u16 __reserved0;
-	u16 rate;
-#elif defined(__LITTLE_ENDIAN)
-	u16 rate;
-	u16 __reserved0;
-#endif
-};
-
-
-/*
- * per-vnic rate shaping variables
- */
-struct rate_shaping_vars_per_vn {
-	struct rate_shaping_counter vn_counter;
-};
-
-
-/*
  * The send queue element
  */
 struct slow_path_element {
@@ -5330,6 +5492,18 @@
 
 
 /*
+ * vif_list_rule_kind
+ */
+enum vif_list_rule_kind {
+	VIF_LIST_RULE_SET,
+	VIF_LIST_RULE_GET,
+	VIF_LIST_RULE_CLEAR_ALL,
+	VIF_LIST_RULE_CLEAR_FUNC,
+	MAX_VIF_LIST_RULE_KIND
+};
+
+
+/*
  * zone A per-queue data
  */
 struct xstorm_queue_zone_data {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 29f5c3c..559c396 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -125,7 +125,7 @@
 	MODE_MF                        = 0x00000100,
 	MODE_MF_SD                     = 0x00000200,
 	MODE_MF_SI                     = 0x00000400,
-	MODE_MF_NIV                    = 0x00000800,
+	MODE_MF_AFEX                   = 0x00000800,
 	MODE_E3_A0                     = 0x00001000,
 	MODE_E3_B0                     = 0x00002000,
 	MODE_COS3                      = 0x00004000,
@@ -241,7 +241,8 @@
 			REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
 
 			/* set/clear queue bit in command-queue bit map
-			(E2/E3A0 only, valid COS values are 0/1) */
+			 * (E2/E3A0 only, valid COS values are 0/1)
+			 */
 			if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
 				reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
 				reg_bit_map = REG_RD(bp, reg_addr);
@@ -277,7 +278,215 @@
 }
 
 
-/* Returns the index of start or end of a specific block stage in ops array*/
+/* congestion managment port init api description
+ * the api works as follows:
+ * the driver should pass the cmng_init_input struct, the port_init function
+ * will prepare the required internal ram structure which will be passed back
+ * to the driver (cmng_init) that will write it into the internal ram.
+ *
+ * IMPORTANT REMARKS:
+ * 1. the cmng_init struct does not represent the contiguous internal ram
+ *    structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
+ *    offset in order to write the port sub struct and the
+ *    PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
+ *    words - don't use memcpy!).
+ * 2. although the cmng_init struct is filled for the maximal vnic number
+ *    possible, the driver should only write the valid vnics into the internal
+ *    ram according to the appropriate port mode.
+ */
+#define BITS_TO_BYTES(x) ((x)/8)
+
+/* CMNG constants, as derived from system spec calculations */
+
+/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
+#define DEF_MIN_RATE 100
+
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
+
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer
+ */
+#define QM_ARB_BYTES 160000
+
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+
+/* how many bytes above threshold for
+ * the minimal credit of Min algorithm
+ */
+#define MIN_ABOVE_THRESH 32768
+
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair
+ */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+
+/* Memory of fairness algorithm - 2 cycles */
+#define FAIR_MEM 2
+#define SAFC_TIMEOUT_USEC 52
+
+#define SDM_TICKS 4
+
+
+static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
+				  u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+	/* rate shaping per-port variables
+	 * 100 micro seconds in SDM ticks = 25
+	 * since each tick is 4 microSeconds
+	 */
+
+	pdata->rs_vars.rs_periodic_timeout =
+	RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
+
+	/* this is the threshold below which no timer arming will occur.
+	 * 1.25 coefficient is for the threshold to be a little bigger
+	 * then the real time to compensate for timer in-accuracy
+	 */
+	pdata->rs_vars.rs_threshold =
+	(5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
+
+	/* rate shaping per-vnic variables */
+	for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+		/* global vnic counter */
+		vdata->vnic_max_rate[vnic].vn_counter.rate =
+		input_data->vnic_max_rate[vnic];
+		/* maximal Mbps for this vnic
+		 * the quota in each timer period - number of bytes
+		 * transmitted in this period
+		 */
+		vdata->vnic_max_rate[vnic].vn_counter.quota =
+			RS_PERIODIC_TIMEOUT_USEC *
+			(u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
+	}
+
+}
+
+static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
+				  u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+
+	/* this is the resolution of the fairness timer */
+	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+
+	/* fairness per-port variables
+	 * for 10G it is 1000usec. for 1G it is 10000usec.
+	 */
+	tFair = T_FAIR_COEF / input_data->port_rate;
+
+	/* this is the threshold below which we won't arm the timer anymore */
+	pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
+
+	/* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
+	 * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
+	 */
+	pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
+
+	/* since each tick is 4 microSeconds */
+	pdata->fair_vars.fairness_timeout =
+				fair_periodic_timeout_usec / SDM_TICKS;
+
+	/* calculate sum of weights */
+	vnicWeightSum = 0;
+
+	for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
+		vnicWeightSum += input_data->vnic_min_rate[vnic];
+
+	/* global vnic counter */
+	if (vnicWeightSum > 0) {
+		/* fairness per-vnic variables */
+		for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+			/* this is the credit for each period of the fairness
+			 * algorithm - number of bytes in T_FAIR (this vnic
+			 * share of the port rate)
+			 */
+			vdata->vnic_min_rate[vnic].vn_credit_delta =
+				(u32)input_data->vnic_min_rate[vnic] * 100 *
+				(T_FAIR_COEF / (8 * 100 * vnicWeightSum));
+			if (vdata->vnic_min_rate[vnic].vn_credit_delta <
+			    pdata->fair_vars.fair_threshold +
+			    MIN_ABOVE_THRESH) {
+				vdata->vnic_min_rate[vnic].vn_credit_delta =
+					pdata->fair_vars.fair_threshold +
+					MIN_ABOVE_THRESH;
+			}
+		}
+	}
+}
+
+static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
+				     u32 r_param, struct cmng_init *ram_data)
+{
+	u32 vnic, cos;
+	u32 cosWeightSum = 0;
+	struct cmng_vnic *vdata = &ram_data->vnic;
+	struct cmng_struct_per_port *pdata = &ram_data->port;
+
+	for (cos = 0; cos < MAX_COS_NUMBER; cos++)
+		cosWeightSum += input_data->cos_min_rate[cos];
+
+	if (cosWeightSum > 0) {
+
+		for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
+			/* Since cos and vnic shouldn't work together the rate
+			 * to divide between the coses is the port rate.
+			 */
+			u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
+			for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
+				/* this is the credit for each period of
+				 * the fairness algorithm - number of bytes
+				 * in T_FAIR (this cos share of the vnic rate)
+				 */
+				ccd[cos] =
+				    (u32)input_data->cos_min_rate[cos] * 100 *
+				    (T_FAIR_COEF / (8 * 100 * cosWeightSum));
+				 if (ccd[cos] < pdata->fair_vars.fair_threshold
+						+ MIN_ABOVE_THRESH) {
+					ccd[cos] =
+					    pdata->fair_vars.fair_threshold +
+					    MIN_ABOVE_THRESH;
+				}
+			}
+		}
+	}
+}
+
+static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
+				   struct cmng_init *ram_data)
+{
+	/* in microSeconds */
+	ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
+}
+
+/* Congestion management port init */
+static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
+				   struct cmng_init *ram_data)
+{
+	u32 r_param;
+	memset(ram_data, 0, sizeof(struct cmng_init));
+
+	ram_data->port.flags = input_data->flags;
+
+	/* number of bytes transmitted in a rate of 10Gbps
+	 * in one usec = 1.25KB.
+	 */
+	r_param = BITS_TO_BYTES(input_data->port_rate);
+	bnx2x_init_max(input_data, r_param, ram_data);
+	bnx2x_init_min(input_data, r_param, ram_data);
+	bnx2x_init_fw_wrr(input_data, r_param, ram_data);
+	bnx2x_init_safc(input_data, ram_data);
+}
+
+
+
+/* Returns the index of start or end of a specific block stage in ops array */
 #define BLOCK_OPS_IDX(block, stage, end) \
 			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
 
@@ -499,9 +708,7 @@
 	bnx2x_set_mcp_parity(bp, false);
 }
 
-/**
- * Clear the parity error status registers.
- */
+/* Clear the parity error status registers. */
 static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
 {
 	int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 64392ec..a3fb721 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -138,7 +138,6 @@
 
 
 
-/* */
 #define SFP_EEPROM_CON_TYPE_ADDR		0x2
 	#define SFP_EEPROM_CON_TYPE_VAL_LC	0x7
 	#define SFP_EEPROM_CON_TYPE_VAL_COPPER	0x21
@@ -404,8 +403,7 @@
 
 	DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
 
-	/*
-	 * mapping between entry  priority to client number (0,1,2 -debug and
+	/* mapping between entry  priority to client number (0,1,2 -debug and
 	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
 	 * 3bits client num.
 	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -413,8 +411,7 @@
 	 */
 
 	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
-	/*
-	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
 	 * COS0 entry, 4 - COS1 entry.
 	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -425,13 +422,11 @@
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
 	/* defines which entries (clients) are subjected to WFQ arbitration */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-	/*
-	 * For strict priority entries defines the number of consecutive
+	/* For strict priority entries defines the number of consecutive
 	 * slots for the highest priority.
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/*
-	 * mapping between the CREDIT_WEIGHT registers and actual client
+	/* mapping between the CREDIT_WEIGHT registers and actual client
 	 * numbers
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
@@ -443,8 +438,7 @@
 	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
 	/* ETS mode disable */
 	REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
-	/*
-	 * If ETS mode is enabled (there is no strict priority) defines a WFQ
+	/* If ETS mode is enabled (there is no strict priority) defines a WFQ
 	 * weight for COS0/COS1.
 	 */
 	REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
@@ -471,10 +465,9 @@
 			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
 	} else
 		min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
-	/**
-	 *  If the link isn't up (static configuration for example ) The
-	 *  link will be according to 20GBPS.
-	*/
+	/* If the link isn't up (static configuration for example ) The
+	 * link will be according to 20GBPS.
+	 */
 	return min_w_val;
 }
 /******************************************************************************
@@ -538,8 +531,7 @@
 	struct bnx2x *bp = params->bp;
 	const u8 port = params->port;
 	const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
-	/**
-	 * mapping between entry  priority to client number (0,1,2 -debug and
+	/* Mapping between entry  priority to client number (0,1,2 -debug and
 	 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
 	 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
 	 * reset value or init tool
@@ -551,18 +543,14 @@
 		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
 		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
 	}
-	/**
-	* For strict priority entries defines the number of consecutive
-	* slots for the highest priority.
-	*/
-	/* TODO_ETS - Should be done by reset value or init tool */
+	/* For strict priority entries defines the number of consecutive
+	 * slots for the highest priority.
+	 */
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
 		   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/**
-	 * mapping between the CREDIT_WEIGHT registers and actual client
+	/* Mapping between the CREDIT_WEIGHT registers and actual client
 	 * numbers
 	 */
-	/* TODO_ETS - Should be done by reset value or init tool */
 	if (port) {
 		/*Port 1 has 6 COS*/
 		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
@@ -574,8 +562,7 @@
 		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
 	}
 
-	/**
-	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
 	 * COS0 entry, 4 - COS1 entry.
 	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -590,13 +577,12 @@
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
 		   NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
 
-	/**
-	* Please notice the register address are note continuous and a
-	* for here is note appropriate.In 2 port mode port0 only COS0-5
-	* can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
-	* port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
-	* are never used for WFQ
-	*/
+	/* Please notice the register address are note continuous and a
+	 * for here is note appropriate.In 2 port mode port0 only COS0-5
+	 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+	 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+	 * are never used for WFQ
+	 */
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
 		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
@@ -633,10 +619,9 @@
 	u32 base_upper_bound = 0;
 	u8 max_cos = 0;
 	u8 i = 0;
-	/**
-	* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
-	* port mode port1 has COS0-2 that can be used for WFQ.
-	*/
+	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+	 * port mode port1 has COS0-2 that can be used for WFQ.
+	 */
 	if (!port) {
 		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
 		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -666,8 +651,7 @@
 	u32 base_weight = 0;
 	u8 max_cos = 0;
 
-	/**
-	 * mapping between entry  priority to client number 0 - COS0
+	/* Mapping between entry  priority to client number 0 - COS0
 	 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
 	 * TODO_ETS - Should be done by reset value or init tool
 	 */
@@ -695,10 +679,9 @@
 
 	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
 		   PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
-	/**
-	* In 2 port mode port0 has COS0-5 that can be used for WFQ.
-	* In 4 port mode port1 has COS0-2 that can be used for WFQ.
-	*/
+	/* In 2 port mode port0 has COS0-5 that can be used for WFQ.
+	 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+	 */
 	if (!port) {
 		base_weight = PBF_REG_COS0_WEIGHT_P0;
 		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -738,7 +721,7 @@
 /******************************************************************************
 * Description:
 *	Disable will return basicly the values to init values.
-*.
+*
 ******************************************************************************/
 int bnx2x_ets_disabled(struct link_params *params,
 		      struct link_vars *vars)
@@ -867,7 +850,7 @@
 /******************************************************************************
 * Description:
 *	Calculate the total BW.A value of 0 isn't legal.
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_get_total_bw(
 	const struct link_params *params,
@@ -879,7 +862,6 @@
 	u8 is_bw_cos_exist = 0;
 
 	*total_bw = 0 ;
-
 	/* Calculate total BW requested */
 	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
 		if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
@@ -887,10 +869,9 @@
 			if (!ets_params->cos[cos_idx].params.bw_params.bw) {
 				DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
 						   "was set to 0\n");
-				/*
-				 * This is to prevent a state when ramrods
+				/* This is to prevent a state when ramrods
 				 * can't be sent
-				*/
+				 */
 				ets_params->cos[cos_idx].params.bw_params.bw
 					 = 1;
 			}
@@ -908,8 +889,7 @@
 		}
 		DP(NETIF_MSG_LINK,
 		   "bnx2x_ets_E3B0_config total BW should be 100\n");
-		/*
-		 * We can handle a case whre the BW isn't 100 this can happen
+		/* We can handle a case whre the BW isn't 100 this can happen
 		 * if the TC are joined.
 		 */
 	}
@@ -919,7 +899,7 @@
 /******************************************************************************
 * Description:
 *	Invalidate all the sp_pri_to_cos.
-*.
+*
 ******************************************************************************/
 static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
 {
@@ -931,7 +911,7 @@
 * Description:
 *	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
 *	according to sp_pri_to_cos.
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
 					    u8 *sp_pri_to_cos, const u8 pri,
@@ -964,7 +944,7 @@
 * Description:
 *	Returns the correct value according to COS and priority in
 *	the sp_pri_cli register.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
 					 const u8 pri_set,
@@ -981,7 +961,7 @@
 * Description:
 *	Returns the correct value according to COS and priority in the
 *	sp_pri_cli register for NIG.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
 {
@@ -997,7 +977,7 @@
 * Description:
 *	Returns the correct value according to COS and priority in the
 *	sp_pri_cli register for PBF.
-*.
+*
 ******************************************************************************/
 static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
 {
@@ -1013,7 +993,7 @@
 * Description:
 *	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
 *	according to sp_pri_to_cos.(which COS has higher priority)
-*.
+*
 ******************************************************************************/
 static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
 					     u8 *sp_pri_to_cos)
@@ -1149,8 +1129,7 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * Upper bound is set according to current link speed (min_w_val
+	/* Upper bound is set according to current link speed (min_w_val
 	 * should be the same for upper bound and COS credit val).
 	 */
 	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
@@ -1160,8 +1139,7 @@
 	for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
 		if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
 			cos_bw_bitmap |= (1 << cos_entry);
-			/*
-			 * The function also sets the BW in HW(not the mappin
+			/* The function also sets the BW in HW(not the mappin
 			 * yet)
 			 */
 			bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
@@ -1217,14 +1195,12 @@
 	/* ETS disabled configuration */
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
-	/*
-	 * defines which entries (clients) are subjected to WFQ arbitration
+	/* Defines which entries (clients) are subjected to WFQ arbitration
 	 * COS0 0x8
 	 * COS1 0x10
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
-	/*
-	 * mapping between the ARB_CREDIT_WEIGHT registers and actual
+	/* Mapping between the ARB_CREDIT_WEIGHT registers and actual
 	 * client numbers (WEIGHT_0 does not actually have to represent
 	 * client 0)
 	 *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -1242,8 +1218,7 @@
 
 	/* Defines the number of consecutive slots for the strict priority */
 	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-	/*
-	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
 	 * entry, 4 - COS1 entry.
 	 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1298,8 +1273,7 @@
 	u32 val	= 0;
 
 	DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
-	/*
-	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	/* Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries,
 	 * 3 - COS0 entry, 4 - COS1 entry.
 	 *  COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1307,8 +1281,7 @@
 	 * MCP and debug are strict
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
-	/*
-	 * For strict priority entries defines the number of consecutive slots
+	/* For strict priority entries defines the number of consecutive slots
 	 * for the highest priority.
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
@@ -1320,8 +1293,7 @@
 	/* Defines the number of consecutive slots for the strict priority */
 	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
 
-	/*
-	 * mapping between entry  priority to client number (0,1,2 -debug and
+	/* Mapping between entry  priority to client number (0,1,2 -debug and
 	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
 	 * 3bits client num.
 	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
@@ -1356,15 +1328,12 @@
 	if (!(params->feature_config_flags &
 	      FEATURE_CONFIG_PFC_ENABLED)) {
 
-		/*
-		 * RX flow control - Process pause frame in receive direction
+		/* RX flow control - Process pause frame in receive direction
 		 */
 		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
 			pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
 
-		/*
-		 * TX flow control - Send pause packet when buffer is full
-		 */
+		/* TX flow control - Send pause packet when buffer is full */
 		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
 			pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
 	} else {/* PFC support */
@@ -1457,8 +1426,7 @@
 static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
 {
 	u32 mode, emac_base;
-	/**
-	 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+	/* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
 	 * (a value of 49==0x31) and make sure that the AUTO poll is off
 	 */
 
@@ -1578,15 +1546,6 @@
 
 	DP(NETIF_MSG_LINK, "enabling UMAC\n");
 
-	/**
-	 * This register determines on which events the MAC will assert
-	 * error on the i/f to the NIG along w/ EOP.
-	 */
-
-	/**
-	 * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
-	 * params->port*0x14,      0xfffff.
-	 */
 	/* This register opens the gate for the UMAC despite its name */
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
 
@@ -1649,8 +1608,7 @@
 		val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
 	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
 
-	/*
-	 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	/* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
 	 * length used by the MAC receive logic to check frames.
 	 */
 	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -1666,8 +1624,7 @@
 	struct bnx2x *bp = params->bp;
 	u32 is_port4mode = bnx2x_is_4_port_mode(bp);
 
-	/*
-	 * In 4-port mode, need to set the mode only once, so if XMAC is
+	/* In 4-port mode, need to set the mode only once, so if XMAC is
 	 * already out of reset, it means the mode has already been set,
 	 * and it must not* reset the XMAC again, since it controls both
 	 * ports of the path
@@ -1691,13 +1648,13 @@
 	if (is_port4mode) {
 		DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
 
-		/*  Set the number of ports on the system side to up to 2 */
+		/* Set the number of ports on the system side to up to 2 */
 		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
 
 		/* Set the number of ports on the Warp Core to 10G */
 		REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
 	} else {
-		/*  Set the number of ports on the system side to 1 */
+		/* Set the number of ports on the system side to 1 */
 		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
 		if (max_speed == SPEED_10000) {
 			DP(NETIF_MSG_LINK,
@@ -1729,8 +1686,7 @@
 
 	if (REG_RD(bp, MISC_REG_RESET_REG_2) &
 	    MISC_REGISTERS_RESET_REG_2_XMAC) {
-		/*
-		 * Send an indication to change the state in the NIG back to XON
+		/* Send an indication to change the state in the NIG back to XON
 		 * Clearing this bit enables the next set of this bit to get
 		 * rising edge
 		 */
@@ -1755,13 +1711,11 @@
 
 	bnx2x_xmac_init(params, vars->line_speed);
 
-	/*
-	 * This register determines on which events the MAC will assert
+	/* This register determines on which events the MAC will assert
 	 * error on the i/f to the NIG along w/ EOP.
 	 */
 
-	/*
-	 * This register tells the NIG whether to send traffic to UMAC
+	/* This register tells the NIG whether to send traffic to UMAC
 	 * or XMAC
 	 */
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
@@ -1863,8 +1817,7 @@
 	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
 	val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
 
-	/*
-	 * Setting this bit causes MAC control frames (except for pause
+	/* Setting this bit causes MAC control frames (except for pause
 	 * frames) to be passed on for processing. This setting has no
 	 * affect on the operation of the pause frames. This bit effects
 	 * all packets regardless of RX Parser packet sorting logic.
@@ -1963,8 +1916,7 @@
 				   struct link_vars *vars,
 				   u8 is_lb)
 {
-	/*
-	 * Set rx control: Strip CRC and enable BigMAC to relay
+	/* Set rx control: Strip CRC and enable BigMAC to relay
 	 * control packets to the system as well
 	 */
 	u32 wb_data[2];
@@ -2016,8 +1968,7 @@
 
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
 
-	/*
-	 * Set Time (based unit is 512 bit time) between automatic
+	/* Set Time (based unit is 512 bit time) between automatic
 	 * re-sending of PP packets amd enable automatic re-send of
 	 * Per-Priroity Packet as long as pp_gen is asserted and
 	 * pp_disable is low.
@@ -2086,7 +2037,7 @@
 	config_val->default_class1.full_xon = 0;
 
 	if (CHIP_IS_E2(bp)) {
-		/*  class0 defaults */
+		/* Class0 defaults */
 		config_val->default_class0.pause_xoff =
 			DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
 		config_val->default_class0.pause_xon =
@@ -2095,7 +2046,7 @@
 			DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
 		config_val->default_class0.full_xon =
 			DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
-		/*  pause able*/
+		/* Pause able*/
 		config_val->pauseable_th.pause_xoff =
 			PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 		config_val->pauseable_th.pause_xon =
@@ -2114,7 +2065,7 @@
 		config_val->non_pauseable_th.full_xon =
 			PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 	} else if (CHIP_IS_E3A0(bp)) {
-		/*  class0 defaults */
+		/* Class0 defaults */
 		config_val->default_class0.pause_xoff =
 			DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
 		config_val->default_class0.pause_xon =
@@ -2123,7 +2074,7 @@
 			DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
 		config_val->default_class0.full_xon =
 			DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
-		/*  pause able */
+		/* Pause able */
 		config_val->pauseable_th.pause_xoff =
 			PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
 		config_val->pauseable_th.pause_xon =
@@ -2142,7 +2093,7 @@
 		config_val->non_pauseable_th.full_xon =
 			PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
 	} else if (CHIP_IS_E3B0(bp)) {
-		/*  class0 defaults */
+		/* Class0 defaults */
 		config_val->default_class0.pause_xoff =
 			DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
 		config_val->default_class0.pause_xon =
@@ -2305,27 +2256,23 @@
 			reg_th_config = &config_val.non_pauseable_th;
 	} else
 		reg_th_config = &config_val.default_class0;
-	/*
-	 * The number of free blocks below which the pause signal to class 0
+	/* The number of free blocks below which the pause signal to class 0
 	 * of MAC #n is asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
 	       BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
 	       reg_th_config->pause_xoff);
-	/*
-	 * The number of free blocks above which the pause signal to class 0
+	/* The number of free blocks above which the pause signal to class 0
 	 * of MAC #n is de-asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
 	       BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
-	/*
-	 * The number of free blocks below which the full signal to class 0
+	/* The number of free blocks below which the full signal to class 0
 	 * of MAC #n is asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
 	       BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
-	/*
-	 * The number of free blocks above which the full signal to class 0
+	/* The number of free blocks above which the full signal to class 0
 	 * of MAC #n is de-asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
@@ -2339,30 +2286,26 @@
 			reg_th_config = &config_val.non_pauseable_th;
 	} else
 		reg_th_config = &config_val.default_class1;
-	/*
-	 * The number of free blocks below which the pause signal to
+	/* The number of free blocks below which the pause signal to
 	 * class 1 of MAC #n is asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
 	       BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
 	       reg_th_config->pause_xoff);
 
-	/*
-	 * The number of free blocks above which the pause signal to
+	/* The number of free blocks above which the pause signal to
 	 * class 1 of MAC #n is de-asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
 	       BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
 	       reg_th_config->pause_xon);
-	/*
-	 * The number of free blocks below which the full signal to
+	/* The number of free blocks below which the full signal to
 	 * class 1 of MAC #n is asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
 	       BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
 	       reg_th_config->full_xoff);
-	/*
-	 * The number of free blocks above which the full signal to
+	/* The number of free blocks above which the full signal to
 	 * class 1 of MAC #n is de-asserted. n=0,1
 	 */
 	REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
@@ -2379,49 +2322,41 @@
 		REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
 			   e3b0_val.per_class_guaranty_mode);
 
-		/*
-		 * The hysteresis on the guarantied buffer space for the Lb
+		/* The hysteresis on the guarantied buffer space for the Lb
 		 * port before signaling XON.
 		 */
 		REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
 			   e3b0_val.lb_guarantied_hyst);
 
-		/*
-		 * The number of free blocks below which the full signal to the
+		/* The number of free blocks below which the full signal to the
 		 * LB port is asserted.
 		 */
 		REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
 		       e3b0_val.full_lb_xoff_th);
-		/*
-		 * The number of free blocks above which the full signal to the
+		/* The number of free blocks above which the full signal to the
 		 * LB port is de-asserted.
 		 */
 		REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
 		       e3b0_val.full_lb_xon_threshold);
-		/*
-		 * The number of blocks guarantied for the MAC #n port. n=0,1
+		/* The number of blocks guarantied for the MAC #n port. n=0,1
 		 */
 
-		/* The number of blocks guarantied for the LB port.*/
+		/* The number of blocks guarantied for the LB port. */
 		REG_WR(bp, BRB1_REG_LB_GUARANTIED,
 		       e3b0_val.lb_guarantied);
 
-		/*
-		 * The number of blocks guarantied for the MAC #n port.
-		 */
+		/* The number of blocks guarantied for the MAC #n port. */
 		REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
 		       2 * e3b0_val.mac_0_class_t_guarantied);
 		REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
 		       2 * e3b0_val.mac_1_class_t_guarantied);
-		/*
-		 * The number of blocks guarantied for class #t in MAC0. t=0,1
+		/* The number of blocks guarantied for class #t in MAC0. t=0,1
 		 */
 		REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
 		       e3b0_val.mac_0_class_t_guarantied);
 		REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
 		       e3b0_val.mac_0_class_t_guarantied);
-		/*
-		 * The hysteresis on the guarantied buffer space for class in
+		/* The hysteresis on the guarantied buffer space for class in
 		 * MAC0.  t=0,1
 		 */
 		REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
@@ -2429,15 +2364,13 @@
 		REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
 		       e3b0_val.mac_0_class_t_guarantied_hyst);
 
-		/*
-		 * The number of blocks guarantied for class #t in MAC1.t=0,1
+		/* The number of blocks guarantied for class #t in MAC1.t=0,1
 		 */
 		REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
 		       e3b0_val.mac_1_class_t_guarantied);
 		REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
 		       e3b0_val.mac_1_class_t_guarantied);
-		/*
-		 * The hysteresis on the guarantied buffer space for class #t
+		/* The hysteresis on the guarantied buffer space for class #t
 		 * in MAC1.  t=0,1
 		 */
 		REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
@@ -2520,15 +2453,13 @@
 		FEATURE_CONFIG_PFC_ENABLED;
 	DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
 
-	/*
-	 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+	/* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
 	 * MAC control frames (that are not pause packets)
 	 * will be forwarded to the XCM.
 	 */
 	xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
 			  NIG_REG_LLH0_XCM_MASK);
-	/*
-	 * nig params will override non PFC params, since it's possible to
+	/* NIG params will override non PFC params, since it's possible to
 	 * do transition from PFC to SAFC
 	 */
 	if (set_pfc) {
@@ -2548,7 +2479,7 @@
 			llfc_out_en = nig_params->llfc_out_en;
 			llfc_enable = nig_params->llfc_enable;
 			pause_enable = nig_params->pause_enable;
-		} else  /*defaul non PFC mode - PAUSE */
+		} else  /* Default non PFC mode - PAUSE */
 			pause_enable = 1;
 
 		xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
@@ -2608,8 +2539,7 @@
 		      struct link_vars *vars,
 		      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
 {
-	/*
-	 * The PFC and pause are orthogonal to one another, meaning when
+	/* The PFC and pause are orthogonal to one another, meaning when
 	 * PFC is enabled, the pause are disabled, and when PFC is
 	 * disabled, pause are set according to the pause result.
 	 */
@@ -3148,7 +3078,6 @@
 			      EMAC_MDIO_STATUS_10MB);
 
 	/* address */
-
 	tmp = ((phy->addr << 21) | (devad << 16) | reg |
 	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
 	       EMAC_MDIO_COMM_START_BUSY);
@@ -3337,8 +3266,7 @@
 		   u8 devad, u16 reg, u16 *ret_val)
 {
 	u8 phy_index;
-	/*
-	 * Probe for the phy according to the given phy_addr, and execute
+	/* Probe for the phy according to the given phy_addr, and execute
 	 * the read request on it
 	 */
 	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3355,8 +3283,7 @@
 		    u8 devad, u16 reg, u16 val)
 {
 	u8 phy_index;
-	/*
-	 * Probe for the phy according to the given phy_addr, and execute
+	/* Probe for the phy according to the given phy_addr, and execute
 	 * the write request on it
 	 */
 	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3382,7 +3309,7 @@
 	if (bnx2x_is_4_port_mode(bp)) {
 		u32 port_swap, port_swap_ovr;
 
-		/*figure out path swap value */
+		/* Figure out path swap value */
 		path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
 		if (path_swap_ovr & 0x1)
 			path_swap = (path_swap_ovr & 0x2);
@@ -3392,7 +3319,7 @@
 		if (path_swap)
 			path = path ^ 1;
 
-		/*figure out port swap value */
+		/* Figure out port swap value */
 		port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
 		if (port_swap_ovr & 0x1)
 			port_swap = (port_swap_ovr & 0x2);
@@ -3405,7 +3332,7 @@
 		lane = (port<<1) + path;
 	} else { /* two port mode - no port swap */
 
-		/*figure out path swap value */
+		/* Figure out path swap value */
 		path_swap_ovr =
 			REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
 		if (path_swap_ovr & 0x1) {
@@ -3437,8 +3364,7 @@
 
 	if (USES_WARPCORE(bp)) {
 		aer_val = bnx2x_get_warpcore_lane(phy, params);
-		/*
-		 * In Dual-lane mode, two lanes are joined together,
+		/* In Dual-lane mode, two lanes are joined together,
 		 * so in order to configure them, the AER broadcast method is
 		 * used here.
 		 * 0x200 is the broadcast address for lanes 0,1
@@ -3518,8 +3444,7 @@
 {
 	struct bnx2x *bp = params->bp;
 	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-	/**
-	 * resolve pause mode and advertisement Please refer to Table
+	/* Resolve pause mode and advertisement Please refer to Table
 	 * 28B-3 of the 802.3ab-1999 spec
 	 */
 
@@ -3642,6 +3567,7 @@
 		vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
 	if (pause_result & (1<<1))
 		vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+
 }
 
 static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
@@ -3698,6 +3624,7 @@
 	bnx2x_pause_resolve(vars, pause_result);
 
 }
+
 static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
 				   struct link_params *params,
 				   struct link_vars *vars)
@@ -3819,9 +3746,7 @@
 
 	/* Advertise pause */
 	bnx2x_ext_phy_set_pause(params, phy, vars);
-
-	/*
-	 * Set KR Autoneg Work-Around flag for Warpcore version older than D108
+	/* Set KR Autoneg Work-Around flag for Warpcore version older than D108
 	 */
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 			MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
@@ -3829,7 +3754,6 @@
 		DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
 		vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
 	}
-
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 			MDIO_WC_REG_DIGITAL5_MISC7, &val16);
 
@@ -3903,7 +3827,7 @@
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
 			 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
 
-	/*Enable encoded forced speed */
+	/* Enable encoded forced speed */
 	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
 			 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
 
@@ -4265,8 +4189,7 @@
 				PORT_HW_CFG_E3_MOD_ABS_MASK) >>
 				PORT_HW_CFG_E3_MOD_ABS_SHIFT;
 
-		/*
-		 * Should not happen. This function called upon interrupt
+		/* Should not happen. This function called upon interrupt
 		 * triggered by GPIO ( since EPIO can only generate interrupts
 		 * to MCP).
 		 * So if this function was called and none of the GPIOs was set,
@@ -4366,7 +4289,7 @@
 					"link up, rx_tx_asic_rst 0x%x\n",
 					vars->rx_tx_asic_rst);
 			} else {
-				/*reset the lane to see if link comes up.*/
+				/* Reset the lane to see if link comes up.*/
 				bnx2x_warpcore_reset_lane(bp, phy, 1);
 				bnx2x_warpcore_reset_lane(bp, phy, 0);
 
@@ -4387,7 +4310,6 @@
 	} /*params->rx_tx_asic_rst*/
 
 }
-
 static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
 				       struct link_params *params,
 				       struct link_vars *vars)
@@ -4545,7 +4467,7 @@
 	/* Update those 1-copy registers */
 	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
 			  MDIO_AER_BLOCK_AER_REG, 0);
-		/* Enable 1G MDIO (1-copy) */
+	/* Enable 1G MDIO (1-copy) */
 	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
 			MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
 			&val16);
@@ -4624,43 +4546,43 @@
 		vars->duplex = DUPLEX_FULL;
 		switch (vars->link_status &
 			LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
-			case LINK_10THD:
-				vars->duplex = DUPLEX_HALF;
-				/* fall thru */
-			case LINK_10TFD:
-				vars->line_speed = SPEED_10;
-				break;
+		case LINK_10THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_10TFD:
+			vars->line_speed = SPEED_10;
+			break;
 
-			case LINK_100TXHD:
-				vars->duplex = DUPLEX_HALF;
-				/* fall thru */
-			case LINK_100T4:
-			case LINK_100TXFD:
-				vars->line_speed = SPEED_100;
-				break;
+		case LINK_100TXHD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_100T4:
+		case LINK_100TXFD:
+			vars->line_speed = SPEED_100;
+			break;
 
-			case LINK_1000THD:
-				vars->duplex = DUPLEX_HALF;
-				/* fall thru */
-			case LINK_1000TFD:
-				vars->line_speed = SPEED_1000;
-				break;
+		case LINK_1000THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_1000TFD:
+			vars->line_speed = SPEED_1000;
+			break;
 
-			case LINK_2500THD:
-				vars->duplex = DUPLEX_HALF;
-				/* fall thru */
-			case LINK_2500TFD:
-				vars->line_speed = SPEED_2500;
-				break;
+		case LINK_2500THD:
+			vars->duplex = DUPLEX_HALF;
+			/* Fall thru */
+		case LINK_2500TFD:
+			vars->line_speed = SPEED_2500;
+			break;
 
-			case LINK_10GTFD:
-				vars->line_speed = SPEED_10000;
-				break;
-			case LINK_20GTFD:
-				vars->line_speed = SPEED_20000;
-				break;
-			default:
-				break;
+		case LINK_10GTFD:
+			vars->line_speed = SPEED_10000;
+			break;
+		case LINK_20GTFD:
+			vars->line_speed = SPEED_20000;
+			break;
+		default:
+			break;
 		}
 		vars->flow_ctrl = 0;
 		if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
@@ -4835,9 +4757,8 @@
 				 struct bnx2x_phy *phy)
 {
 	struct bnx2x *bp = params->bp;
-	/*
-	 *  Each two bits represents a lane number:
-	 *  No swap is 0123 => 0x1b no need to enable the swap
+	/* Each two bits represents a lane number:
+	 * No swap is 0123 => 0x1b no need to enable the swap
 	 */
 	u16 rx_lane_swap, tx_lane_swap;
 
@@ -5051,8 +4972,7 @@
 			  MDIO_REG_BANK_COMBO_IEEE0,
 			  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 
-	/*
-	 * program speed
+	/* Program speed
 	 *  - needed only if the speed is greater than 1G (2.5G or 10G)
 	 */
 	CL22_RD_OVER_CL45(bp, phy,
@@ -5087,8 +5007,6 @@
 	struct bnx2x *bp = params->bp;
 	u16 val = 0;
 
-	/* configure the 48 bits for BAM AN */
-
 	/* set extended capabilities */
 	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
 		val |= MDIO_OVER_1G_UP1_2_5G;
@@ -5234,11 +5152,8 @@
 	}
 }
 
-
-/*
- * link management
+/* Link management
  */
-
 static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
 					     struct link_params *params)
 {
@@ -5383,8 +5298,7 @@
 			     "ustat_val(0x8371) = 0x%x\n", ustat_val);
 		return;
 	}
-	/*
-	 * Step 3: Check CL37 Message Pages received to indicate LP
+	/* Step 3: Check CL37 Message Pages received to indicate LP
 	 * supports only CL37
 	 */
 	CL22_RD_OVER_CL45(bp, phy,
@@ -5401,8 +5315,7 @@
 			 cl37_fsm_received);
 		return;
 	}
-	/*
-	 * The combined cl37/cl73 fsm state information indicating that
+	/* The combined cl37/cl73 fsm state information indicating that
 	 * we are connected to a device which does not support cl73, but
 	 * does support cl37 BAM. In this case we disable cl73 and
 	 * restart cl37 auto-neg
@@ -5973,8 +5886,7 @@
 {
 	u32 latch_status = 0;
 
-	/*
-	 * Disable the MI INT ( external phy int ) by writing 1 to the
+	/* Disable the MI INT ( external phy int ) by writing 1 to the
 	 * status register. Link down indication is high-active-signal,
 	 * so in this case we need to write the status to clear the XOR
 	 */
@@ -6009,8 +5921,7 @@
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
 	u32 mask;
-	/*
-	 * First reset all status we assume only one line will be
+	/* First reset all status we assume only one line will be
 	 * change at a time
 	 */
 	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -6024,8 +5935,7 @@
 			if (is_10g_plus)
 				mask = NIG_STATUS_XGXS0_LINK10G;
 			else if (params->switch_cfg == SWITCH_CFG_10G) {
-				/*
-				 * Disable the link interrupt by writing 1 to
+				/* Disable the link interrupt by writing 1 to
 				 * the relevant lane in the status register
 				 */
 				u32 ser_lane =
@@ -6227,8 +6137,7 @@
 		break;
 
 	case LED_MODE_OPER:
-		/*
-		 * For all other phys, OPER mode is same as ON, so in case
+		/* For all other phys, OPER mode is same as ON, so in case
 		 * link is down, do nothing
 		 */
 		if (!vars->link_up)
@@ -6239,9 +6148,7 @@
 			 (params->phy[EXT_PHY1].type ==
 			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
 		    CHIP_IS_E2(bp) && params->num_phys == 2) {
-			/*
-			 * This is a work-around for E2+8727 Configurations
-			 */
+			/* This is a work-around for E2+8727 Configurations */
 			if (mode == LED_MODE_ON ||
 				speed == SPEED_10000){
 				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -6250,8 +6157,7 @@
 				tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
 				EMAC_WR(bp, EMAC_REG_EMAC_LED,
 					(tmp | EMAC_LED_OVERRIDE));
-				/*
-				 * return here without enabling traffic
+				/* Return here without enabling traffic
 				 * LED blink and setting rate in ON mode.
 				 * In oper mode, enabling LED blink
 				 * and setting rate is needed.
@@ -6260,8 +6166,7 @@
 					return rc;
 			}
 		} else if (SINGLE_MEDIA_DIRECT(params)) {
-			/*
-			 * This is a work-around for HW issue found when link
+			/* This is a work-around for HW issue found when link
 			 * is up in CL73
 			 */
 			if ((!CHIP_IS_E3(bp)) ||
@@ -6310,10 +6215,7 @@
 		     (speed == SPEED_1000) ||
 		     (speed == SPEED_100) ||
 		     (speed == SPEED_10))) {
-			/*
-			 * On Everest 1 Ax chip versions for speeds less than
-			 * 10G LED scheme is different
-			 */
+			/* For speeds less than 10G LED scheme is different */
 			REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
 			       + port*4, 1);
 			REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
@@ -6333,8 +6235,7 @@
 
 }
 
-/*
- * This function comes to reflect the actual link state read DIRECTLY from the
+/* This function comes to reflect the actual link state read DIRECTLY from the
  * HW
  */
 int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
@@ -6422,16 +6323,14 @@
 	int rc = 0;
 	u8 phy_index, non_ext_phy;
 	struct bnx2x *bp = params->bp;
-	/*
-	 * In case of external phy existence, the line speed would be the
+	/* In case of external phy existence, the line speed would be the
 	 * line speed linked up by the external phy. In case it is direct
 	 * only, then the line_speed during initialization will be
 	 * equal to the req_line_speed
 	 */
 	vars->line_speed = params->phy[INT_PHY].req_line_speed;
 
-	/*
-	 * Initialize the internal phy in case this is a direct board
+	/* Initialize the internal phy in case this is a direct board
 	 * (no external phys), or this board has external phy which requires
 	 * to first.
 	 */
@@ -6463,8 +6362,7 @@
 	} else {
 		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 		      phy_index++) {
-			/*
-			 * No need to initialize second phy in case of first
+			/* No need to initialize second phy in case of first
 			 * phy only selection. In case of second phy, we do
 			 * need to initialize the first phy, since they are
 			 * connected.
@@ -6492,7 +6390,6 @@
 			NIG_STATUS_XGXS0_LINK_STATUS |
 			NIG_STATUS_SERDES0_LINK_STATUS |
 			NIG_MASK_MI_INT));
-	bnx2x_update_mng(params, vars->link_status);
 	return rc;
 }
 
@@ -6577,7 +6474,7 @@
 				u8 link_10g)
 {
 	struct bnx2x *bp = params->bp;
-	u8 port = params->port;
+	u8 phy_idx, port = params->port;
 	int rc = 0;
 
 	vars->link_status |= (LINK_STATUS_LINK_UP |
@@ -6641,11 +6538,18 @@
 
 	/* update shared memory */
 	bnx2x_update_mng(params, vars->link_status);
+
+	/* Check remote fault */
+	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+		if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+			bnx2x_check_half_open_conn(params, vars, 0);
+			break;
+		}
+	}
 	msleep(20);
 	return rc;
 }
-/*
- * The bnx2x_link_update function should be called upon link
+/* The bnx2x_link_update function should be called upon link
  * interrupt.
  * Link is considered up as follows:
  * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
@@ -6702,8 +6606,7 @@
 	if (!CHIP_IS_E3(bp))
 		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
-	/*
-	 * Step 1:
+	/* Step 1:
 	 * Check external link change only for external phys, and apply
 	 * priority selection between them in case the link on both phys
 	 * is up. Note that instead of the common vars, a temporary
@@ -6734,23 +6637,20 @@
 			switch (bnx2x_phy_selection(params)) {
 			case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
 			case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
-			/*
-			 * In this option, the first PHY makes sure to pass the
+			/* In this option, the first PHY makes sure to pass the
 			 * traffic through itself only.
 			 * Its not clear how to reset the link on the second phy
 			 */
 				active_external_phy = EXT_PHY1;
 				break;
 			case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
-			/*
-			 * In this option, the first PHY makes sure to pass the
+			/* In this option, the first PHY makes sure to pass the
 			 * traffic through the second PHY.
 			 */
 				active_external_phy = EXT_PHY2;
 				break;
 			default:
-			/*
-			 * Link indication on both PHYs with the following cases
+			/* Link indication on both PHYs with the following cases
 			 * is invalid:
 			 * - FIRST_PHY means that second phy wasn't initialized,
 			 * hence its link is expected to be down
@@ -6767,8 +6667,7 @@
 		}
 	}
 	prev_line_speed = vars->line_speed;
-	/*
-	 * Step 2:
+	/* Step 2:
 	 * Read the status of the internal phy. In case of
 	 * DIRECT_SINGLE_MEDIA board, this link is the external link,
 	 * otherwise this is the link between the 577xx and the first
@@ -6778,8 +6677,7 @@
 		params->phy[INT_PHY].read_status(
 			&params->phy[INT_PHY],
 			params, vars);
-	/*
-	 * The INT_PHY flow control reside in the vars. This include the
+	/* The INT_PHY flow control reside in the vars. This include the
 	 * case where the speed or flow control are not set to AUTO.
 	 * Otherwise, the active external phy flow control result is set
 	 * to the vars. The ext_phy_line_speed is needed to check if the
@@ -6788,14 +6686,12 @@
 	 */
 	if (active_external_phy > INT_PHY) {
 		vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
-		/*
-		 * Link speed is taken from the XGXS. AN and FC result from
+		/* Link speed is taken from the XGXS. AN and FC result from
 		 * the external phy.
 		 */
 		vars->link_status |= phy_vars[active_external_phy].link_status;
 
-		/*
-		 * if active_external_phy is first PHY and link is up - disable
+		/* if active_external_phy is first PHY and link is up - disable
 		 * disable TX on second external PHY
 		 */
 		if (active_external_phy == EXT_PHY1) {
@@ -6832,8 +6728,7 @@
 	DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
 		   " ext_phy_line_speed = %d\n", vars->flow_ctrl,
 		   vars->link_status, ext_phy_line_speed);
-	/*
-	 * Upon link speed change set the NIG into drain mode. Comes to
+	/* Upon link speed change set the NIG into drain mode. Comes to
 	 * deals with possible FIFO glitch due to clk change when speed
 	 * is decreased without link down indicator
 	 */
@@ -6858,8 +6753,7 @@
 
 	bnx2x_link_int_ack(params, vars, link_10g_plus);
 
-	/*
-	 * In case external phy link is up, and internal link is down
+	/* In case external phy link is up, and internal link is down
 	 * (not initialized yet probably after link initialization, it
 	 * needs to be initialized.
 	 * Note that after link down-up as result of cable plug, the xgxs
@@ -6887,8 +6781,7 @@
 						vars);
 		}
 	}
-	/*
-	 * Link is up only if both local phy and external phy (in case of
+	/* Link is up only if both local phy and external phy (in case of
 	 * non-direct board) are up and no fault detected on active PHY.
 	 */
 	vars->link_up = (vars->phy_link_up &&
@@ -6907,6 +6800,10 @@
 	else
 		rc = bnx2x_update_link_down(params, vars);
 
+	/* Update MCP link status was changed */
+	if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
+		bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
+
 	return rc;
 }
 
@@ -7120,8 +7017,7 @@
 	}
 	/* XAUI workaround in 8073 A0: */
 
-	/*
-	 * After loading the boot ROM and restarting Autoneg, poll
+	/* After loading the boot ROM and restarting Autoneg, poll
 	 * Dev1, Reg $C820:
 	 */
 
@@ -7130,8 +7026,7 @@
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
 				&val);
-		  /*
-		   * If bit [14] = 0 or bit [13] = 0, continue on with
+		  /* If bit [14] = 0 or bit [13] = 0, continue on with
 		   * system initialization (XAUI work-around not required, as
 		   * these bits indicate 2.5G or 1G link up).
 		   */
@@ -7140,8 +7035,7 @@
 			return 0;
 		} else if (!(val & (1<<15))) {
 			DP(NETIF_MSG_LINK, "bit 15 went off\n");
-			/*
-			 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+			/* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
 			 * MSB (bit15) goes to 1 (indicating that the XAUI
 			 * workaround has completed), then continue on with
 			 * system initialization.
@@ -7291,8 +7185,7 @@
 			val = (1<<7);
 		} else if (phy->req_line_speed ==  SPEED_2500) {
 			val = (1<<5);
-			/*
-			 * Note that 2.5G works only when used with 1G
+			/* Note that 2.5G works only when used with 1G
 			 * advertisement
 			 */
 		} else
@@ -7343,8 +7236,7 @@
 	/* Add support for CL37 (passive mode) III */
 	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 
-	/*
-	 * The SNR will improve about 2db by changing BW and FEE main
+	/* The SNR will improve about 2db by changing BW and FEE main
 	 * tap. Rest commands are executed after link is up
 	 * Change FFE main cursor to 5 in EDC register
 	 */
@@ -7431,8 +7323,7 @@
 
 	link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
 	if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
-		/*
-		 * The SNR will improve about 2dbby changing the BW and FEE main
+		/* The SNR will improve about 2dbby changing the BW and FEE main
 		 * tap. The 1st write to change FFE main tap is set before
 		 * restart AN. Change PLL Bandwidth in EDC register
 		 */
@@ -7479,8 +7370,7 @@
 			bnx2x_cl45_read(bp, phy,
 					MDIO_XS_DEVAD,
 					MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
-			/*
-			 * Set bit 3 to invert Rx in 1G mode and clear this bit
+			/* Set bit 3 to invert Rx in 1G mode and clear this bit
 			 * when it`s in 10G mode.
 			 */
 			if (vars->line_speed == SPEED_1000) {
@@ -7602,8 +7492,7 @@
 					   u8 pmd_dis)
 {
 	struct bnx2x *bp = params->bp;
-	/*
-	 * Disable transmitter only for bootcodes which can enable it afterwards
+	/* Disable transmitter only for bootcodes which can enable it afterwards
 	 * (for D3 link)
 	 */
 	if (pmd_dis) {
@@ -7780,9 +7669,6 @@
 	u32 data_array[4];
 	u16 addr32;
 	struct bnx2x *bp = params->bp;
-	/*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
-					" addr %d, cnt %d\n",
-					addr, byte_cnt);*/
 	if (byte_cnt > 16) {
 		DP(NETIF_MSG_LINK,
 		   "Reading from eeprom is limited to 16 bytes\n");
@@ -7847,8 +7733,7 @@
 			 MDIO_PMA_DEVAD,
 			 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
 			 0x8002);
-	/*
-	 * Wait appropriate time for two-wire command to finish before
+	/* Wait appropriate time for two-wire command to finish before
 	 * polling the status register
 	 */
 	msleep(1);
@@ -7941,8 +7826,7 @@
 	{
 		u8 copper_module_type;
 		phy->media_type = ETH_PHY_DA_TWINAX;
-		/*
-		 * Check if its active cable (includes SFP+ module)
+		/* Check if its active cable (includes SFP+ module)
 		 * of passive cable
 		 */
 		if (bnx2x_read_sfp_module_eeprom(phy,
@@ -8019,8 +7903,7 @@
 	DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
 	return 0;
 }
-/*
- * This function read the relevant field from the module (SFP+), and verify it
+/* This function read the relevant field from the module (SFP+), and verify it
  * is compliant with this board
  */
 static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
@@ -8102,8 +7985,7 @@
 	u8 val;
 	struct bnx2x *bp = params->bp;
 	u16 timeout;
-	/*
-	 * Initialization time after hot-plug may take up to 300ms for
+	/* Initialization time after hot-plug may take up to 300ms for
 	 * some phys type ( e.g. JDSU )
 	 */
 
@@ -8125,8 +8007,7 @@
 				    u8 is_power_up) {
 	/* Make sure GPIOs are not using for LED mode */
 	u16 val;
-	/*
-	 * In the GPIO register, bit 4 is use to determine if the GPIOs are
+	/* In the GPIO register, bit 4 is use to determine if the GPIOs are
 	 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
 	 * output
 	 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
@@ -8142,8 +8023,7 @@
 	if (is_power_up)
 		val = (1<<4);
 	else
-		/*
-		 * Set GPIO control to OUTPUT, and set the power bit
+		/* Set GPIO control to OUTPUT, and set the power bit
 		 * to according to the is_power_up
 		 */
 		val = (1<<1);
@@ -8177,8 +8057,7 @@
 
 		DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
 
-		/*
-		 * Changing to LRM mode takes quite few seconds. So do it only
+		/* Changing to LRM mode takes quite few seconds. So do it only
 		 * if current mode is limiting (default is LRM)
 		 */
 		if (cur_limiting_mode != EDC_MODE_LIMITING)
@@ -8313,8 +8192,7 @@
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
 	if (CHIP_IS_E3(bp)) {
-		/*
-		 * Low ==> if SFP+ module is supported otherwise
+		/* Low ==> if SFP+ module is supported otherwise
 		 * High ==> if SFP+ module is not on the approved vendor list
 		 */
 		bnx2x_set_e3_module_fault_led(params, gpio_mode);
@@ -8339,8 +8217,7 @@
 		return;
 	DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
 		       power, pin_cfg);
-	/*
-	 * Low ==> corresponding SFP+ module is powered
+	/* Low ==> corresponding SFP+ module is powered
 	 * high ==> the SFP+ module is powered down
 	 */
 	bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
@@ -8474,14 +8351,12 @@
 		bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
 	}
 
-	/*
-	 * Check and set limiting mode / LRM mode on 8726. On 8727 it
+	/* Check and set limiting mode / LRM mode on 8726. On 8727 it
 	 * is done automatically
 	 */
 	bnx2x_set_limiting_mode(params, phy, edc_mode);
 
-	/*
-	 * Enable transmit for this module if the module is approved, or
+	/* Enable transmit for this module if the module is approved, or
 	 * if unapproved modules should also enable the Tx laser
 	 */
 	if (rc == 0 ||
@@ -8536,8 +8411,7 @@
 		bnx2x_set_gpio_int(bp, gpio_num,
 				   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
 				   gpio_port);
-		/*
-		 * Module was plugged out.
+		/* Module was plugged out.
 		 * Disable transmit for this module
 		 */
 		phy->media_type = ETH_PHY_NOT_PRESENT;
@@ -8607,8 +8481,7 @@
 
 	DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
 			" link_status 0x%x\n", rx_sd, pcs_status, val2);
-	/*
-	 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+	/* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
 	 * are set, or if the autoneg bit 1 is set
 	 */
 	link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
@@ -8722,8 +8595,7 @@
 	}
 	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
 
-	/*
-	 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	/* If TX Laser is controlled by GPIO_0, do not let PHY go into low
 	 * power mode, if TX Laser is disabled
 	 */
 
@@ -8833,8 +8705,7 @@
 
 	bnx2x_8726_external_rom_boot(phy, params);
 
-	/*
-	 * Need to call module detected on initialization since the module
+	/* Need to call module detected on initialization since the module
 	 * detection triggered by actual module insertion might occur before
 	 * driver is loaded, and when driver is loaded, it reset all
 	 * registers, including the transmitter
@@ -8871,8 +8742,7 @@
 				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 		bnx2x_cl45_write(bp, phy,
 				MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
-		/*
-		 * Enable RX-ALARM control to receive interrupt for 1G speed
+		/* Enable RX-ALARM control to receive interrupt for 1G speed
 		 * change
 		 */
 		bnx2x_cl45_write(bp, phy,
@@ -8973,8 +8843,7 @@
 				struct link_params *params) {
 	u32 swap_val, swap_override;
 	u8 port;
-	/*
-	 * The PHY reset is controlled by GPIO 1. Fake the port number
+	/* The PHY reset is controlled by GPIO 1. Fake the port number
 	 * to cancel the swap done in set_gpio()
 	 */
 	struct bnx2x *bp = params->bp;
@@ -9012,14 +8881,12 @@
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
 
-	/*
-	 * Initially configure MOD_ABS to interrupt when module is
+	/* Initially configure MOD_ABS to interrupt when module is
 	 * presence( bit 8)
 	 */
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
-	/*
-	 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+	/* Set EDC off by setting OPTXLOS signal input to low (bit 9).
 	 * When the EDC is off it locks onto a reference clock and avoids
 	 * becoming 'lost'
 	 */
@@ -9040,8 +8907,7 @@
 	if (phy->flags & FLAGS_NOC)
 		val |= (3<<5);
 
-	/*
-	 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+	/* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
 	 * status which reflect SFP+ module over-current
 	 */
 	if (!(phy->flags & FLAGS_NOC))
@@ -9067,8 +8933,7 @@
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
 		DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
-		/*
-		 * Power down the XAUI until link is up in case of dual-media
+		/* Power down the XAUI until link is up in case of dual-media
 		 * and 1G
 		 */
 		if (DUAL_MEDIA(params)) {
@@ -9093,8 +8958,7 @@
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
 	} else {
-		/*
-		 * Since the 8727 has only single reset pin, need to set the 10G
+		/* Since the 8727 has only single reset pin, need to set the 10G
 		 * registers although it is default
 		 */
 		bnx2x_cl45_write(bp, phy,
@@ -9109,8 +8973,7 @@
 				 0x0008);
 	}
 
-	/*
-	 * Set 2-wire transfer rate of SFP+ module EEPROM
+	/* Set 2-wire transfer rate of SFP+ module EEPROM
 	 * to 100Khz since some DACs(direct attached cables) do
 	 * not work at 400Khz.
 	 */
@@ -9133,8 +8996,7 @@
 				 phy->tx_preemphasis[1]);
 	}
 
-	/*
-	 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	/* If TX Laser is controlled by GPIO_0, do not let PHY go into low
 	 * power mode, if TX Laser is disabled
 	 */
 	tx_en_mode = REG_RD(bp, params->shmem_base +
@@ -9180,8 +9042,7 @@
 		DP(NETIF_MSG_LINK,
 		   "MOD_ABS indication show module is absent\n");
 		phy->media_type = ETH_PHY_NOT_PRESENT;
-		/*
-		 * 1. Set mod_abs to detect next module
+		/* 1. Set mod_abs to detect next module
 		 *    presence event
 		 * 2. Set EDC off by setting OPTXLOS signal input to low
 		 *    (bit 9).
@@ -9195,8 +9056,7 @@
 				 MDIO_PMA_DEVAD,
 				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-		/*
-		 * Clear RX alarm since it stays up as long as
+		/* Clear RX alarm since it stays up as long as
 		 * the mod_abs wasn't changed
 		 */
 		bnx2x_cl45_read(bp, phy,
@@ -9207,8 +9067,7 @@
 		/* Module is present */
 		DP(NETIF_MSG_LINK,
 		   "MOD_ABS indication show module is present\n");
-		/*
-		 * First disable transmitter, and if the module is ok, the
+		/* First disable transmitter, and if the module is ok, the
 		 * module_detection will enable it
 		 * 1. Set mod_abs to detect next module absent event ( bit 8)
 		 * 2. Restore the default polarity of the OPRXLOS signal and
@@ -9222,8 +9081,7 @@
 				 MDIO_PMA_DEVAD,
 				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-		/*
-		 * Clear RX alarm since it stays up as long as the mod_abs
+		/* Clear RX alarm since it stays up as long as the mod_abs
 		 * wasn't changed. This is need to be done before calling the
 		 * module detection, otherwise it will clear* the link update
 		 * alarm
@@ -9284,8 +9142,7 @@
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
 
-	/*
-	 * If a module is present and there is need to check
+	/* If a module is present and there is need to check
 	 * for over current
 	 */
 	if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
@@ -9350,8 +9207,7 @@
 			MDIO_PMA_DEVAD,
 			MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
 
-	/*
-	 * Bits 0..2 --> speed detected,
+	/* Bits 0..2 --> speed detected,
 	 * Bits 13..15--> link is down
 	 */
 	if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
@@ -9394,8 +9250,7 @@
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_REG_8727_PCS_GP, &val1);
-		/*
-		 * In case of dual-media board and 1G, power up the XAUI side,
+		/* In case of dual-media board and 1G, power up the XAUI side,
 		 * otherwise power it down. For 10G it is done automatically
 		 */
 		if (link_up)
@@ -9561,8 +9416,7 @@
 		/* Save spirom version */
 		bnx2x_save_848xx_spirom_version(phy, bp, params->port);
 	}
-	/*
-	 * This phy uses the NIG latch mechanism since link indication
+	/* This phy uses the NIG latch mechanism since link indication
 	 * arrives through its LED4 and not via its LASI signal, so we
 	 * get steady signal instead of clear on read
 	 */
@@ -9667,8 +9521,7 @@
 	if (phy->req_duplex == DUPLEX_FULL)
 		autoneg_val |= (1<<8);
 
-	/*
-	 * Always write this if this is not 84833.
+	/* Always write this if this is not 84833.
 	 * For 84833, write it only when it's a forced speed.
 	 */
 	if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
@@ -9916,8 +9769,7 @@
 	/* Wait for GPHY to come out of reset */
 	msleep(50);
 	if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-		/*
-		 * BCM84823 requires that XGXS links up first @ 10G for normal
+		/* BCM84823 requires that XGXS links up first @ 10G for normal
 		 * behavior.
 		 */
 		u16 temp;
@@ -10393,8 +10245,7 @@
 		break;
 	}
 
-	/*
-	 * This is a workaround for E3+84833 until autoneg
+	/* This is a workaround for E3+84833 until autoneg
 	 * restart is fixed in f/w
 	 */
 	if (CHIP_IS_E3(bp)) {
@@ -10418,8 +10269,7 @@
 	DP(NETIF_MSG_LINK, "54618SE cfg init\n");
 	usleep_range(1000, 1000);
 
-	/*
-	 * This works with E3 only, no need to check the chip
+	/* This works with E3 only, no need to check the chip
 	 * before determining the port.
 	 */
 	port = params->port;
@@ -10441,7 +10291,7 @@
 			 MDIO_PMA_REG_CTRL, 0x8000);
 	bnx2x_wait_reset_complete(bp, phy, params);
 
-	/*wait for GPHY to reset */
+	/* Wait for GPHY to reset */
 	msleep(50);
 
 	/* Configure LED4: set to INTR (0x6). */
@@ -10647,13 +10497,11 @@
 	u32 cfg_pin;
 	u8 port;
 
-	/*
-	 * In case of no EPIO routed to reset the GPHY, put it
+	/* In case of no EPIO routed to reset the GPHY, put it
 	 * in low power mode.
 	 */
 	bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
-	/*
-	 * This works with E3 only, no need to check the chip
+	/* This works with E3 only, no need to check the chip
 	 * before determining the port.
 	 */
 	port = params->port;
@@ -10762,7 +10610,7 @@
 		bnx2x_ext_phy_resolve_fc(phy, params, vars);
 
 		if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
-			/* report LP advertised speeds */
+			/* Report LP advertised speeds */
 			bnx2x_cl22_read(bp, phy, 0x5, &val);
 
 			if (val & (1<<5))
@@ -10827,8 +10675,7 @@
 	/* This register opens the gate for the UMAC despite its name */
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
 
-	/*
-	 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	/* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
 	 * length used by the MAC receive logic to check frames.
 	 */
 	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -11101,22 +10948,23 @@
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_HW_LOCK_REQUIRED,
+	.flags		= (FLAGS_HW_LOCK_REQUIRED |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
 	.supported	= (SUPPORTED_10baseT_Half |
-			     SUPPORTED_10baseT_Full |
-			     SUPPORTED_100baseT_Half |
-			     SUPPORTED_100baseT_Full |
-			     SUPPORTED_1000baseT_Full |
-			     SUPPORTED_10000baseT_Full |
-			     SUPPORTED_20000baseKR2_Full |
-			     SUPPORTED_20000baseMLD2_Full |
-			     SUPPORTED_FIBRE |
-			     SUPPORTED_Autoneg |
-			     SUPPORTED_Pause |
-			     SUPPORTED_Asym_Pause),
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_20000baseKR2_Full |
+			   SUPPORTED_20000baseMLD2_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
 	.media_type	= ETH_PHY_UNSPECIFIED,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
@@ -11258,7 +11106,8 @@
 	.addr		= 0xff,
 	.def_md_devad	= 0,
 	.flags		= (FLAGS_HW_LOCK_REQUIRED |
-			   FLAGS_INIT_XGXS_FIRST),
+			   FLAGS_INIT_XGXS_FIRST |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11289,7 +11138,8 @@
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_FAN_FAILURE_DET_REQ,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11354,8 +11204,9 @@
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
-			  FLAGS_REARM_LATCH_SIGNAL,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_REARM_LATCH_SIGNAL |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11390,8 +11241,9 @@
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
 	.addr		= 0xff,
 	.def_md_devad	= 0,
-	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
-			    FLAGS_REARM_LATCH_SIGNAL,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_REARM_LATCH_SIGNAL |
+			   FLAGS_TX_ERROR_CHECK),
 	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
 	.mdio_ctrl	= 0,
@@ -11466,9 +11318,8 @@
 	/* Get the 4 lanes xgxs config rx and tx */
 	u32 rx = 0, tx = 0, i;
 	for (i = 0; i < 2; i++) {
-		/*
-		 * INT_PHY and EXT_PHY1 share the same value location in the
-		 * shmem. When num_phys is greater than 1, than this value
+		/* INT_PHY and EXT_PHY1 share the same value location in
+		 * the shmem. When num_phys is greater than 1, than this value
 		 * applies only to EXT_PHY1
 		 */
 		if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
@@ -11546,8 +11397,7 @@
 					offsetof(struct shmem_region, dev_info.
 					port_hw_config[port].default_cfg)) &
 				 PORT_HW_CFG_NET_SERDES_IF_MASK);
-		/*
-		 * Set the appropriate supported and flags indications per
+		/* Set the appropriate supported and flags indications per
 		 * interface type of the chip
 		 */
 		switch (serdes_net_if) {
@@ -11605,8 +11455,7 @@
 			break;
 		}
 
-		/*
-		 * Enable MDC/MDIO work-around for E3 A0 since free running MDC
+		/* Enable MDC/MDIO work-around for E3 A0 since free running MDC
 		 * was not set as expected. For B0, ECO will be enabled so there
 		 * won't be an issue there
 		 */
@@ -11719,8 +11568,7 @@
 	phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
 	bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
 
-	/*
-	 * The shmem address of the phy version is located on different
+	/* The shmem address of the phy version is located on different
 	 * structures. In case this structure is too old, do not set
 	 * the address
 	 */
@@ -11754,8 +11602,7 @@
 
 	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
 	    (phy->ver_addr)) {
-		/*
-		 * Remove 100Mb link supported for BCM84833 when phy fw
+		/* Remove 100Mb link supported for BCM84833 when phy fw
 		 * version lower than or equal to 1.39
 		 */
 		u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11765,8 +11612,7 @@
 					    SUPPORTED_100baseT_Full);
 	}
 
-	/*
-	 * In case mdc/mdio_access of the external phy is different than the
+	/* In case mdc/mdio_access of the external phy is different than the
 	 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
 	 * to prevent one port interfere with another port's CL45 operations.
 	 */
@@ -11936,13 +11782,16 @@
 		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
 			break;
 
+		if (params->feature_config_flags &
+		    FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
+			phy->flags &= ~FLAGS_TX_ERROR_CHECK;
+
 		sync_offset = params->shmem_base +
 			offsetof(struct shmem_region,
 			dev_info.port_hw_config[params->port].media_type);
 		media_types = REG_RD(bp, sync_offset);
 
-		/*
-		 * Update media type for non-PMF sync only for the first time
+		/* Update media type for non-PMF sync only for the first time
 		 * In case the media type changes afterwards, it will be updated
 		 * using the update_status function
 		 */
@@ -12016,8 +11865,7 @@
 	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 	vars->mac_type = MAC_TYPE_XMAC;
 	vars->phy_flags = PHY_XGXS_FLAG;
-	/*
-	 * Set WC to loopback mode since link is required to provide clock
+	/* Set WC to loopback mode since link is required to provide clock
 	 * to the XMAC in 20G mode
 	 */
 	bnx2x_set_aer_mmd(params, &params->phy[0]);
@@ -12162,6 +12010,7 @@
 		bnx2x_link_int_enable(params);
 		break;
 	}
+	bnx2x_update_mng(params, vars->link_status);
 	return 0;
 }
 
@@ -12302,7 +12151,8 @@
 				NIG_MASK_MI_INT));
 
 		/* Need to take the phy out of low power mode in order
-			to write to access its registers */
+		 * to write to access its registers
+		 */
 		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
 			       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 			       port);
@@ -12350,8 +12200,7 @@
 				 (val | 1<<10));
 	}
 
-	/*
-	 * Toggle Transmitter: Power down and then up with 600ms delay
+	/* Toggle Transmitter: Power down and then up with 600ms delay
 	 * between
 	 */
 	msleep(600);
@@ -12494,8 +12343,7 @@
 	reset_gpio = MISC_REGISTERS_GPIO_1;
 	port = 1;
 
-	/*
-	 * Retrieve the reset gpio/port which control the reset.
+	/* Retrieve the reset gpio/port which control the reset.
 	 * Default is GPIO1, PORT1
 	 */
 	bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
@@ -12670,8 +12518,7 @@
 		break;
 
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-		/*
-		 * GPIO1 affects both ports, so there's need to pull
+		/* GPIO1 affects both ports, so there's need to pull
 		 * it for single port alone
 		 */
 		rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
@@ -12679,8 +12526,7 @@
 						phy_index, chip_id);
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
-		/*
-		 * GPIO3's are linked, and so both need to be toggled
+		/* GPIO3's are linked, and so both need to be toggled
 		 * to obtain required 2us pulse.
 		 */
 		rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
@@ -12779,7 +12625,8 @@
 }
 
 static void bnx2x_analyze_link_error(struct link_params *params,
-				     struct link_vars *vars, u32 lss_status)
+				     struct link_vars *vars, u32 lss_status,
+				     u8 notify)
 {
 	struct bnx2x *bp = params->bp;
 	/* Compare new value with previous value */
@@ -12793,8 +12640,7 @@
 	DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
 		       half_open_conn, lss_status);
 
-	/*
-	 * a. Update shmem->link_status accordingly
+	/* a. Update shmem->link_status accordingly
 	 * b. Update link_vars->link_up
 	 */
 	if (lss_status) {
@@ -12802,8 +12648,10 @@
 		vars->link_status &= ~LINK_STATUS_LINK_UP;
 		vars->link_up = 0;
 		vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
-		/*
-		 * Set LED mode to off since the PHY doesn't know about these
+
+		/* activate nig drain */
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
+		/* Set LED mode to off since the PHY doesn't know about these
 		 * errors
 		 */
 		led_mode = LED_MODE_OFF;
@@ -12813,7 +12661,11 @@
 		vars->link_up = 1;
 		vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
 		led_mode = LED_MODE_OPER;
+
+		/* Clear nig drain */
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 	}
+	bnx2x_sync_link(params, vars);
 	/* Update the LED according to the link state */
 	bnx2x_set_led(params, vars, led_mode, SPEED_10000);
 
@@ -12822,7 +12674,8 @@
 
 	/* C. Trigger General Attention */
 	vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
-	bnx2x_notify_link_changed(bp);
+	if (notify)
+		bnx2x_notify_link_changed(bp);
 }
 
 /******************************************************************************
@@ -12834,22 +12687,23 @@
 *	a fault, for example, due to break in the TX side of fiber.
 *
 ******************************************************************************/
-static void bnx2x_check_half_open_conn(struct link_params *params,
-				       struct link_vars *vars)
+int bnx2x_check_half_open_conn(struct link_params *params,
+				struct link_vars *vars,
+				u8 notify)
 {
 	struct bnx2x *bp = params->bp;
 	u32 lss_status = 0;
 	u32 mac_base;
 	/* In case link status is physically up @ 10G do */
-	if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
-		return;
+	if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
+	    (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4)))
+		return 0;
 
 	if (CHIP_IS_E3(bp) &&
 	    (REG_RD(bp, MISC_REG_RESET_REG_2) &
 	      (MISC_REGISTERS_RESET_REG_2_XMAC))) {
 		/* Check E3 XMAC */
-		/*
-		 * Note that link speed cannot be queried here, since it may be
+		/* Note that link speed cannot be queried here, since it may be
 		 * zero while link is down. In case UMAC is active, LSS will
 		 * simply not be set
 		 */
@@ -12863,7 +12717,7 @@
 		if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
 			lss_status = 1;
 
-		bnx2x_analyze_link_error(params, vars, lss_status);
+		bnx2x_analyze_link_error(params, vars, lss_status, notify);
 	} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
 		   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
 		/* Check E1X / E2 BMAC */
@@ -12880,18 +12734,21 @@
 		REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
 		lss_status = (wb_data[0] > 0);
 
-		bnx2x_analyze_link_error(params, vars, lss_status);
+		bnx2x_analyze_link_error(params, vars, lss_status, notify);
 	}
+	return 0;
 }
 
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
 {
-	struct bnx2x *bp = params->bp;
 	u16 phy_idx;
+	struct bnx2x *bp = params->bp;
 	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
 		if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
 			bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
-			bnx2x_check_half_open_conn(params, vars);
+			if (bnx2x_check_half_open_conn(params, vars, 1) !=
+			    0)
+				DP(NETIF_MSG_LINK, "Fault detection failed\n");
 			break;
 		}
 	}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 763535e..ea4371f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -254,8 +254,10 @@
 #define FEATURE_CONFIG_PFC_ENABLED			(1<<1)
 #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY		(1<<2)
 #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY	(1<<3)
+#define FEATURE_CONFIG_BC_SUPPORTS_AFEX			(1<<8)
 #define FEATURE_CONFIG_AUTOGREEEN_ENABLED			(1<<9)
 #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED		(1<<10)
+#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET		(1<<11)
 	/* Will be populated during common init */
 	struct bnx2x_phy phy[MAX_PHYS];
 
@@ -495,4 +497,6 @@
 
 void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
 
+int bnx2x_check_half_open_conn(struct link_params *params,
+			       struct link_vars *vars, u8 notify);
 #endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e077d25..f755a66 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -39,7 +39,6 @@
 #include <linux/time.h>
 #include <linux/ethtool.h>
 #include <linux/mii.h>
-#include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -93,15 +92,11 @@
 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
 MODULE_FIRMWARE(FW_FILE_NAME_E2);
 
-static int multi_mode = 1;
-module_param(multi_mode, int, 0);
-MODULE_PARM_DESC(multi_mode, " Multi queue mode "
-			     "(0 Disable; 1 Enable (default))");
 
 int num_queues;
 module_param(num_queues, int, 0);
-MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
-				" (default is as a number of CPUs)");
+MODULE_PARM_DESC(num_queues,
+		 " Set number of queues (default is as a number of CPUs)");
 
 static int disable_tpa;
 module_param(disable_tpa, int, 0);
@@ -141,7 +136,9 @@
 	BCM57810,
 	BCM57810_MF,
 	BCM57840,
-	BCM57840_MF
+	BCM57840_MF,
+	BCM57811,
+	BCM57811_MF
 };
 
 /* indexed by board_type, above */
@@ -158,8 +155,9 @@
 	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
 	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
 	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
-	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
-						"Ethernet Multi Function"}
+	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
+	{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
+	{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
 };
 
 #ifndef PCI_DEVICE_ID_NX2_57710
@@ -195,6 +193,12 @@
 #ifndef PCI_DEVICE_ID_NX2_57840_MF
 #define PCI_DEVICE_ID_NX2_57840_MF	CHIP_NUM_57840_MF
 #endif
+#ifndef PCI_DEVICE_ID_NX2_57811
+#define PCI_DEVICE_ID_NX2_57811		CHIP_NUM_57811
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_MF
+#define PCI_DEVICE_ID_NX2_57811_MF	CHIP_NUM_57811_MF
+#endif
 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -207,6 +211,8 @@
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
 	{ 0 }
 };
 
@@ -220,15 +226,15 @@
 * General service functions
 ****************************************************************************/
 
-static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
+static void __storm_memset_dma_mapping(struct bnx2x *bp,
 				       u32 addr, dma_addr_t mapping)
 {
 	REG_WR(bp,  addr, U64_LO(mapping));
 	REG_WR(bp,  addr + 4, U64_HI(mapping));
 }
 
-static inline void storm_memset_spq_addr(struct bnx2x *bp,
-					 dma_addr_t mapping, u16 abs_fid)
+static void storm_memset_spq_addr(struct bnx2x *bp,
+				  dma_addr_t mapping, u16 abs_fid)
 {
 	u32 addr = XSEM_REG_FAST_MEMORY +
 			XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
@@ -236,8 +242,8 @@
 	__storm_memset_dma_mapping(bp, addr, mapping);
 }
 
-static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
-					 u16 pf_id)
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+				  u16 pf_id)
 {
 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
 		pf_id);
@@ -249,8 +255,8 @@
 		pf_id);
 }
 
-static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
-					u8 enable)
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+				 u8 enable)
 {
 	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
 		enable);
@@ -262,8 +268,8 @@
 		enable);
 }
 
-static inline void storm_memset_eq_data(struct bnx2x *bp,
-				struct event_ring_data *eq_data,
+static void storm_memset_eq_data(struct bnx2x *bp,
+				 struct event_ring_data *eq_data,
 				u16 pfid)
 {
 	size_t size = sizeof(struct event_ring_data);
@@ -273,8 +279,8 @@
 	__storm_memset_struct(bp, addr, size, (u32 *)eq_data);
 }
 
-static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
-					u16 pfid)
+static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+				 u16 pfid)
 {
 	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
 	REG_WR16(bp, addr, eq_prod);
@@ -309,67 +315,6 @@
 #define DMAE_DP_DST_PCI		"pci dst_addr [%x:%08x]"
 #define DMAE_DP_DST_NONE	"dst_addr [none]"
 
-static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
-			  int msglvl)
-{
-	u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
-
-	switch (dmae->opcode & DMAE_COMMAND_DST) {
-	case DMAE_CMD_DST_PCI:
-		if (src_type == DMAE_CMD_SRC_PCI)
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
-			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
-			   dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		else
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src [%08x], len [%d*4], dst [%x:%08x]\n"
-			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_lo >> 2,
-			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
-			   dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		break;
-	case DMAE_CMD_DST_GRC:
-		if (src_type == DMAE_CMD_SRC_PCI)
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
-			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-			   dmae->len, dmae->dst_addr_lo >> 2,
-			   dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		else
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src [%08x], len [%d*4], dst [%08x]\n"
-			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_lo >> 2,
-			   dmae->len, dmae->dst_addr_lo >> 2,
-			   dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		break;
-	default:
-		if (src_type == DMAE_CMD_SRC_PCI)
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
-			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
-			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		else
-			DP(msglvl, "DMAE: opcode 0x%08x\n"
-			   "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
-			   "comp_addr [%x:%08x]  comp_val 0x%08x\n",
-			   dmae->opcode, dmae->src_addr_lo >> 2,
-			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
-			   dmae->comp_val);
-		break;
-	}
-
-}
 
 /* copy command into DMAE command memory and set DMAE command go */
 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -506,8 +451,6 @@
 	dmae.dst_addr_hi = 0;
 	dmae.len = len32;
 
-	bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
-
 	/* issue the command and wait for completion */
 	bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
@@ -540,8 +483,6 @@
 	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
 	dmae.len = len32;
 
-	bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
-
 	/* issue the command and wait for completion */
 	bnx2x_issue_dmae_with_comp(bp, &dmae);
 }
@@ -562,27 +503,6 @@
 	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 }
 
-/* used only for slowpath so not inlined */
-static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
-{
-	u32 wb_write[2];
-
-	wb_write[0] = val_hi;
-	wb_write[1] = val_lo;
-	REG_WR_DMAE(bp, reg, wb_write, 2);
-}
-
-#ifdef USE_WB_RD
-static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
-{
-	u32 wb_data[2];
-
-	REG_RD_DMAE(bp, reg, wb_data, 2);
-
-	return HILO_U64(wb_data[0], wb_data[1]);
-}
-#endif
-
 static int bnx2x_mc_assert(struct bnx2x *bp)
 {
 	char last_idx;
@@ -756,7 +676,7 @@
 	printk("%s" "end of fw dump\n", lvl);
 }
 
-static inline void bnx2x_fw_dump(struct bnx2x *bp)
+static void bnx2x_fw_dump(struct bnx2x *bp)
 {
 	bnx2x_fw_dump_lvl(bp, KERN_ERR);
 }
@@ -1076,8 +996,8 @@
 	   poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
 }
 
-static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
-				     u32 expected, u32 poll_count)
+static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+				    u32 expected, u32 poll_count)
 {
 	u32 cur_cnt = poll_count;
 	u32 val;
@@ -1088,8 +1008,8 @@
 	return val;
 }
 
-static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
-						  char *msg, u32 poll_cnt)
+static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+					   char *msg, u32 poll_cnt)
 {
 	u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
 	if (val != 0) {
@@ -1186,7 +1106,7 @@
 	(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
 
 
-static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
+static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
 					 u32 poll_cnt)
 {
 	struct sdm_op_gen op_gen = {0};
@@ -1220,7 +1140,7 @@
 	return ret;
 }
 
-static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
 {
 	int pos;
 	u16 status;
@@ -1361,14 +1281,17 @@
 	int port = BP_PORT(bp);
 	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 	u32 val = REG_RD(bp, addr);
-	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
-	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+	bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+	bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+	bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
 
 	if (msix) {
 		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 			 HC_CONFIG_0_REG_INT_LINE_EN_0);
 		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+		if (single_msix)
+			val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
 	} else if (msi) {
 		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
 		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
@@ -1425,8 +1348,9 @@
 static void bnx2x_igu_int_enable(struct bnx2x *bp)
 {
 	u32 val;
-	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
-	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+	bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
+	bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
+	bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
 
 	val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 
@@ -1436,6 +1360,9 @@
 		val |= (IGU_PF_CONF_FUNC_EN |
 			IGU_PF_CONF_MSI_MSIX_EN |
 			IGU_PF_CONF_ATTN_BIT_EN);
+
+		if (single_msix)
+			val |= IGU_PF_CONF_SINGLE_ISR_EN;
 	} else if (msi) {
 		val &= ~IGU_PF_CONF_INT_LINE_EN;
 		val |= (IGU_PF_CONF_FUNC_EN |
@@ -1455,6 +1382,9 @@
 
 	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 
+	if (val & IGU_PF_CONF_INT_LINE_EN)
+		pci_intx(bp->pdev, true);
+
 	barrier();
 
 	/* init leading/trailing edge */
@@ -1623,7 +1553,7 @@
  * Returns the recovery leader resource id according to the engine this function
  * belongs to. Currently only only 2 engines is supported.
  */
-static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
 {
 	if (BP_PATH(bp))
 		return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
@@ -1636,9 +1566,9 @@
  *
  * @bp: driver handle
  *
- * Tries to aquire a leader lock for cuurent engine.
+ * Tries to aquire a leader lock for current engine.
  */
-static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
 {
 	return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
 }
@@ -1719,6 +1649,27 @@
 
 	DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
 
+	if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
+	    (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
+		/* if Q update ramrod is completed for last Q in AFEX vif set
+		 * flow, then ACK MCP at the end
+		 *
+		 * mark pending ACK to MCP bit.
+		 * prevent case that both bits are cleared.
+		 * At the end of load/unload driver checks that
+		 * sp_state is cleaerd, and this order prevents
+		 * races
+		 */
+		smp_mb__before_clear_bit();
+		set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
+		wmb();
+		clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+		smp_mb__after_clear_bit();
+
+		/* schedule workqueue to send ack to MCP */
+		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+	}
+
 	return;
 }
 
@@ -2229,40 +2180,6 @@
 	return rc;
 }
 
-static void bnx2x_init_port_minmax(struct bnx2x *bp)
-{
-	u32 r_param = bp->link_vars.line_speed / 8;
-	u32 fair_periodic_timeout_usec;
-	u32 t_fair;
-
-	memset(&(bp->cmng.rs_vars), 0,
-	       sizeof(struct rate_shaping_vars_per_port));
-	memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
-
-	/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
-	bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
-
-	/* this is the threshold below which no timer arming will occur
-	   1.25 coefficient is for the threshold to be a little bigger
-	   than the real time, to compensate for timer in-accuracy */
-	bp->cmng.rs_vars.rs_threshold =
-				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
-
-	/* resolution of fairness timer */
-	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
-	/* for 10G it is 1000usec. for 1G it is 10000usec. */
-	t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
-
-	/* this is the threshold below which we won't arm the timer anymore */
-	bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
-
-	/* we multiply by 1e3/8 to get bytes/msec.
-	   We don't want the credits to pass a credit
-	   of the t_fair*FAIR_MEM (algorithm resolution) */
-	bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
-	/* since each tick is 4 usec */
-	bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
-}
 
 /* Calculates the sum of vn_min_rates.
    It's needed for further normalizing of the min_rates.
@@ -2273,12 +2190,12 @@
      In the later case fainess algorithm should be deactivated.
      If not all min_rates are zero then those that are zeroes will be set to 1.
  */
-static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+static void bnx2x_calc_vn_min(struct bnx2x *bp,
+				      struct cmng_init_input *input)
 {
 	int all_zero = 1;
 	int vn;
 
-	bp->vn_weight_sum = 0;
 	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
 		u32 vn_cfg = bp->mf_config[vn];
 		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
@@ -2286,106 +2203,56 @@
 
 		/* Skip hidden vns */
 		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
-			continue;
-
+			vn_min_rate = 0;
 		/* If min rate is zero - set it to 1 */
-		if (!vn_min_rate)
+		else if (!vn_min_rate)
 			vn_min_rate = DEF_MIN_RATE;
 		else
 			all_zero = 0;
 
-		bp->vn_weight_sum += vn_min_rate;
+		input->vnic_min_rate[vn] = vn_min_rate;
 	}
 
 	/* if ETS or all min rates are zeros - disable fairness */
 	if (BNX2X_IS_ETS_ENABLED(bp)) {
-		bp->cmng.flags.cmng_enables &=
+		input->flags.cmng_enables &=
 					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 		DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
 	} else if (all_zero) {
-		bp->cmng.flags.cmng_enables &=
+		input->flags.cmng_enables &=
 					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
-		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-		   "  fairness will be disabled\n");
+		DP(NETIF_MSG_IFUP,
+		   "All MIN values are zeroes fairness will be disabled\n");
 	} else
-		bp->cmng.flags.cmng_enables |=
+		input->flags.cmng_enables |=
 					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
+static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
+				    struct cmng_init_input *input)
 {
-	struct rate_shaping_vars_per_vn m_rs_vn;
-	struct fairness_vars_per_vn m_fair_vn;
+	u16 vn_max_rate;
 	u32 vn_cfg = bp->mf_config[vn];
-	int func = func_by_vn(bp, vn);
-	u16 vn_min_rate, vn_max_rate;
-	int i;
 
-	/* If function is hidden - set min and max to zeroes */
-	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
-		vn_min_rate = 0;
+	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
 		vn_max_rate = 0;
-
-	} else {
+	else {
 		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 
-		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
-				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-		/* If fairness is enabled (not all min rates are zeroes) and
-		   if current min rate is zero - set it to 1.
-		   This is a requirement of the algorithm. */
-		if (bp->vn_weight_sum && (vn_min_rate == 0))
-			vn_min_rate = DEF_MIN_RATE;
-
-		if (IS_MF_SI(bp))
+		if (IS_MF_SI(bp)) {
 			/* maxCfg in percents of linkspeed */
 			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
-		else
+		} else /* SD modes */
 			/* maxCfg is absolute in 100Mb units */
 			vn_max_rate = maxCfg * 100;
 	}
 
-	DP(NETIF_MSG_IFUP,
-	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
-	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
+	DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
 
-	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
-	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
-
-	/* global vn counter - maximal Mbps for this vn */
-	m_rs_vn.vn_counter.rate = vn_max_rate;
-
-	/* quota - number of bytes transmitted in this period */
-	m_rs_vn.vn_counter.quota =
-				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
-
-	if (bp->vn_weight_sum) {
-		/* credit for each period of the fairness algorithm:
-		   number of bytes in T_FAIR (the vn share the port rate).
-		   vn_weight_sum should not be larger than 10000, thus
-		   T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
-		   than zero */
-		m_fair_vn.vn_credit_delta =
-			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
-						   (8 * bp->vn_weight_sum))),
-			      (bp->cmng.fair_vars.fair_threshold +
-							MIN_ABOVE_THRESH));
-		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
-		   m_fair_vn.vn_credit_delta);
-	}
-
-	/* Store it to internal memory */
-	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
-		REG_WR(bp, BAR_XSTRORM_INTMEM +
-		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
-		       ((u32 *)(&m_rs_vn))[i]);
-
-	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
-		REG_WR(bp, BAR_XSTRORM_INTMEM +
-		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
-		       ((u32 *)(&m_fair_vn))[i]);
+	input->vnic_max_rate[vn] = vn_max_rate;
 }
 
+
 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
 {
 	if (CHIP_REV_IS_SLOW(bp))
@@ -2423,38 +2290,42 @@
 		bp->mf_config[vn] =
 			MF_CFG_RD(bp, func_mf_config[func].config);
 	}
+	if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+		DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
+		bp->flags |= MF_FUNC_DIS;
+	} else {
+		DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+		bp->flags &= ~MF_FUNC_DIS;
+	}
 }
 
 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
 {
+	struct cmng_init_input input;
+	memset(&input, 0, sizeof(struct cmng_init_input));
+
+	input.port_rate = bp->link_vars.line_speed;
 
 	if (cmng_type == CMNG_FNS_MINMAX) {
 		int vn;
 
-		/* clear cmng_enables */
-		bp->cmng.flags.cmng_enables = 0;
-
 		/* read mf conf from shmem */
 		if (read_cfg)
 			bnx2x_read_mf_cfg(bp);
 
-		/* Init rate shaping and fairness contexts */
-		bnx2x_init_port_minmax(bp);
-
 		/* vn_weight_sum and enable fairness if not 0 */
-		bnx2x_calc_vn_weight_sum(bp);
+		bnx2x_calc_vn_min(bp, &input);
 
 		/* calculate and set min-max rate for each vn */
 		if (bp->port.pmf)
 			for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
-				bnx2x_init_vn_minmax(bp, vn);
+				bnx2x_calc_vn_max(bp, vn, &input);
 
 		/* always enable rate shaping and fairness */
-		bp->cmng.flags.cmng_enables |=
+		input.flags.cmng_enables |=
 					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
-		if (!bp->vn_weight_sum)
-			DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
-				   "  fairness will be disabled\n");
+
+		bnx2x_init_cmng(&input, &bp->cmng);
 		return;
 	}
 
@@ -2463,6 +2334,35 @@
 	   "rate shaping and fairness are disabled\n");
 }
 
+static void storm_memset_cmng(struct bnx2x *bp,
+			      struct cmng_init *cmng,
+			      u8 port)
+{
+	int vn;
+	size_t size = sizeof(struct cmng_struct_per_port);
+
+	u32 addr = BAR_XSTRORM_INTMEM +
+			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
+
+	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+		int func = func_by_vn(bp, vn);
+
+		addr = BAR_XSTRORM_INTMEM +
+		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
+		size = sizeof(struct rate_shaping_vars_per_vn);
+		__storm_memset_struct(bp, addr, size,
+				      (u32 *)&cmng->vnic.vnic_max_rate[vn]);
+
+		addr = BAR_XSTRORM_INTMEM +
+		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
+		size = sizeof(struct fairness_vars_per_vn);
+		__storm_memset_struct(bp, addr, size,
+				      (u32 *)&cmng->vnic.vnic_min_rate[vn]);
+	}
+}
+
 /* This function is called upon link interrupt */
 static void bnx2x_link_attn(struct bnx2x *bp)
 {
@@ -2535,6 +2435,190 @@
 	bnx2x_link_report(bp);
 }
 
+static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
+				  u16 vlan_val, u8 allowed_prio)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	struct bnx2x_func_afex_update_params *f_update_params =
+		&func_params.params.afex_update;
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
+
+	/* no need to wait for RAMROD completion, so don't
+	 * set RAMROD_COMP_WAIT flag
+	 */
+
+	f_update_params->vif_id = vifid;
+	f_update_params->afex_default_vlan = vlan_val;
+	f_update_params->allowed_priorities = allowed_prio;
+
+	/* if ramrod can not be sent, response to MCP immediately */
+	if (bnx2x_func_state_change(bp, &func_params) < 0)
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+
+	return 0;
+}
+
+static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
+					  u16 vif_index, u8 func_bit_map)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	struct bnx2x_func_afex_viflists_params *update_params =
+		&func_params.params.afex_viflists;
+	int rc;
+	u32 drv_msg_code;
+
+	/* validate only LIST_SET and LIST_GET are received from switch */
+	if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
+		BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
+			  cmd_type);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
+
+	/* set parameters according to cmd_type */
+	update_params->afex_vif_list_command = cmd_type;
+	update_params->vif_list_index = cpu_to_le16(vif_index);
+	update_params->func_bit_map =
+		(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
+	update_params->func_to_clear = 0;
+	drv_msg_code =
+		(cmd_type == VIF_LIST_RULE_GET) ?
+		DRV_MSG_CODE_AFEX_LISTGET_ACK :
+		DRV_MSG_CODE_AFEX_LISTSET_ACK;
+
+	/* if ramrod can not be sent, respond to MCP immediately for
+	 * SET and GET requests (other are not triggered from MCP)
+	 */
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc < 0)
+		bnx2x_fw_command(bp, drv_msg_code, 0);
+
+	return 0;
+}
+
+static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
+{
+	struct afex_stats afex_stats;
+	u32 func = BP_ABS_FUNC(bp);
+	u32 mf_config;
+	u16 vlan_val;
+	u32 vlan_prio;
+	u16 vif_id;
+	u8 allowed_prio;
+	u8 vlan_mode;
+	u32 addr_to_write, vifid, addrs, stats_type, i;
+
+	if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
+		vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
+		bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
+		vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+		addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
+		   vifid, addrs);
+		bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
+					       addrs);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
+		addr_to_write = SHMEM2_RD(bp,
+			afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
+		stats_type = SHMEM2_RD(bp,
+			afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
+
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
+		   addr_to_write);
+
+		bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
+
+		/* write response to scratchpad, for MCP */
+		for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
+			REG_WR(bp, addr_to_write + i*sizeof(u32),
+			       *(((u32 *)(&afex_stats))+i));
+
+		/* send ack message to MCP */
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
+	}
+
+	if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
+		mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
+		bp->mf_config[BP_VN(bp)] = mf_config;
+		DP(BNX2X_MSG_MCP,
+		   "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
+		   mf_config);
+
+		/* if VIF_SET is "enabled" */
+		if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
+			/* set rate limit directly to internal RAM */
+			struct cmng_init_input cmng_input;
+			struct rate_shaping_vars_per_vn m_rs_vn;
+			size_t size = sizeof(struct rate_shaping_vars_per_vn);
+			u32 addr = BAR_XSTRORM_INTMEM +
+			    XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
+
+			bp->mf_config[BP_VN(bp)] = mf_config;
+
+			bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
+			m_rs_vn.vn_counter.rate =
+				cmng_input.vnic_max_rate[BP_VN(bp)];
+			m_rs_vn.vn_counter.quota =
+				(m_rs_vn.vn_counter.rate *
+				 RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+			__storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
+
+			/* read relevant values from mf_cfg struct in shmem */
+			vif_id =
+				(MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+				 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
+				FUNC_MF_CFG_E1HOV_TAG_SHIFT;
+			vlan_val =
+				(MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+				 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
+				FUNC_MF_CFG_AFEX_VLAN_SHIFT;
+			vlan_prio = (mf_config &
+				     FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
+				    FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
+			vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
+			vlan_mode =
+				(MF_CFG_RD(bp,
+					   func_mf_config[func].afex_config) &
+				 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
+				FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
+			allowed_prio =
+				(MF_CFG_RD(bp,
+					   func_mf_config[func].afex_config) &
+				 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
+				FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
+
+			/* send ramrod to FW, return in case of failure */
+			if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
+						   allowed_prio))
+				return;
+
+			bp->afex_def_vlan_tag = vlan_val;
+			bp->afex_vlan_mode = vlan_mode;
+		} else {
+			/* notify link down because BP->flags is disabled */
+			bnx2x_link_report(bp);
+
+			/* send INVALID VIF ramrod to FW */
+			bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
+
+			/* Reset the default afex VLAN */
+			bp->afex_def_vlan_tag = -1;
+		}
+	}
+}
+
 static void bnx2x_pmf_update(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
@@ -2619,6 +2703,18 @@
 }
 
 
+static void storm_memset_func_cfg(struct bnx2x *bp,
+				 struct tstorm_eth_function_common_config *tcfg,
+				 u16 abs_fid)
+{
+	size_t size = sizeof(struct tstorm_eth_function_common_config);
+
+	u32 addr = BAR_TSTRORM_INTMEM +
+			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
+}
+
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
 	if (CHIP_IS_E1x(bp)) {
@@ -2648,9 +2744,9 @@
  *
  * Return the flags that are common for the Tx-only and not normal connections.
  */
-static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
-						   struct bnx2x_fastpath *fp,
-						   bool zero_stats)
+static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+					    struct bnx2x_fastpath *fp,
+					    bool zero_stats)
 {
 	unsigned long flags = 0;
 
@@ -2670,9 +2766,9 @@
 	return flags;
 }
 
-static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
-					      struct bnx2x_fastpath *fp,
-					      bool leading)
+static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+				       struct bnx2x_fastpath *fp,
+				       bool leading)
 {
 	unsigned long flags = 0;
 
@@ -2680,8 +2776,11 @@
 	if (IS_MF_SD(bp))
 		__set_bit(BNX2X_Q_FLG_OV, &flags);
 
-	if (IS_FCOE_FP(fp))
+	if (IS_FCOE_FP(fp)) {
 		__set_bit(BNX2X_Q_FLG_FCOE, &flags);
+		/* For FCoE - force usage of default priority (for afex) */
+		__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
+	}
 
 	if (!fp->disable_tpa) {
 		__set_bit(BNX2X_Q_FLG_TPA, &flags);
@@ -2698,6 +2797,10 @@
 	/* Always set HW VLAN stripping */
 	__set_bit(BNX2X_Q_FLG_VLAN, &flags);
 
+	/* configure silent vlan removal */
+	if (IS_MF_AFEX(bp))
+		__set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
+
 
 	return flags | bnx2x_get_common_flags(bp, fp, true);
 }
@@ -2800,6 +2903,13 @@
 		rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
 	else
 		rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+	/* configure silent vlan removal
+	 * if multi function mode is afex, then mask default vlan
+	 */
+	if (IS_MF_AFEX(bp)) {
+		rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
+		rxq_init->silent_removal_mask = VLAN_VID_MASK;
+	}
 }
 
 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
@@ -3051,7 +3161,7 @@
  *	configure FW
  *	notify others function about the change
  */
-static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+static void bnx2x_config_mf_bw(struct bnx2x *bp)
 {
 	if (bp->link_vars.link_up) {
 		bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
@@ -3060,7 +3170,7 @@
 	storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 }
 
-static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+static void bnx2x_set_mf_bw(struct bnx2x *bp)
 {
 	bnx2x_config_mf_bw(bp);
 	bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
@@ -3147,7 +3257,7 @@
 }
 
 /* must be called under the spq lock */
-static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
 {
 	struct eth_spe *next_spe = bp->spq_prod_bd;
 
@@ -3163,7 +3273,7 @@
 }
 
 /* must be called under the spq lock */
-static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+static void bnx2x_sp_prod_update(struct bnx2x *bp)
 {
 	int func = BP_FUNC(bp);
 
@@ -3185,7 +3295,7 @@
  * @cmd:	command to check
  * @cmd_type:	command type
  */
-static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
 {
 	if ((cmd_type == NONE_CONNECTION_TYPE) ||
 	    (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
@@ -3319,7 +3429,7 @@
 #define BNX2X_DEF_SB_ATT_IDX	0x0001
 #define BNX2X_DEF_SB_IDX	0x0002
 
-static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
 {
 	struct host_sp_status_block *def_sb = bp->def_status_blk;
 	u16 rc = 0;
@@ -3451,7 +3561,7 @@
 	}
 }
 
-static inline void bnx2x_fan_failure(struct bnx2x *bp)
+static void bnx2x_fan_failure(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
 	u32 ext_phy_config;
@@ -3481,7 +3591,7 @@
 
 }
 
-static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
 {
 	int port = BP_PORT(bp);
 	int reg_offset;
@@ -3521,7 +3631,7 @@
 	}
 }
 
-static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
 {
 	u32 val;
 
@@ -3552,7 +3662,7 @@
 	}
 }
 
-static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
 {
 	u32 val;
 
@@ -3596,7 +3706,7 @@
 	}
 }
 
-static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
 {
 	u32 val;
 
@@ -3606,6 +3716,7 @@
 			int func = BP_FUNC(bp);
 
 			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+			bnx2x_read_mf_cfg(bp);
 			bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
 					func_mf_config[BP_ABS_FUNC(bp)].config);
 			val = SHMEM_RD(bp,
@@ -3628,6 +3739,9 @@
 				/* start dcbx state machine */
 				bnx2x_dcbx_set_params(bp,
 					BNX2X_DCBX_STATE_NEG_RECEIVED);
+			if (val & DRV_STATUS_AFEX_EVENT_MASK)
+				bnx2x_handle_afex_cmd(bp,
+					val & DRV_STATUS_AFEX_EVENT_MASK);
 			if (bp->link_vars.periodic_flags &
 			    PERIODIC_FLAGS_LINK_EVENT) {
 				/*  sync with link */
@@ -3722,7 +3836,7 @@
  *
  * Should be run under rtnl lock
  */
-static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
+static void bnx2x_clear_reset_global(struct bnx2x *bp)
 {
 	u32 val;
 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
@@ -3736,7 +3850,7 @@
  *
  * should be run under rtnl lock
  */
-static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
+static bool bnx2x_reset_is_global(struct bnx2x *bp)
 {
 	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
 
@@ -3749,7 +3863,7 @@
  *
  * Should be run under rtnl lock
  */
-static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+static void bnx2x_set_reset_done(struct bnx2x *bp)
 {
 	u32 val;
 	u32 bit = BP_PATH(bp) ?
@@ -3874,7 +3988,7 @@
  *
  * should be run under rtnl lock
  */
-static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
+static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
 {
 	u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
 			     BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3895,7 +4009,7 @@
 /*
  * Reset the load status for the current engine.
  */
-static inline void bnx2x_clear_load_status(struct bnx2x *bp)
+static void bnx2x_clear_load_status(struct bnx2x *bp)
 {
 	u32 val;
 	u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
@@ -3906,13 +4020,13 @@
 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
 }
 
-static inline void _print_next_block(int idx, const char *blk)
+static void _print_next_block(int idx, const char *blk)
 {
 	pr_cont("%s%s", idx ? ", " : "", blk);
 }
 
-static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
-						  bool print)
+static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
+					   bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -3959,8 +4073,8 @@
 	return par_num;
 }
 
-static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
-						  bool *global, bool print)
+static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
+					   bool *global, bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -4045,8 +4159,8 @@
 	return par_num;
 }
 
-static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
-						  bool print)
+static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
+					   bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -4097,8 +4211,8 @@
 	return par_num;
 }
 
-static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
-						  bool *global, bool print)
+static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
+					   bool *global, bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -4139,8 +4253,8 @@
 	return par_num;
 }
 
-static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
-						  bool print)
+static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
+					   bool print)
 {
 	int i = 0;
 	u32 cur_bit = 0;
@@ -4166,8 +4280,8 @@
 	return par_num;
 }
 
-static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
-				     u32 *sig)
+static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+			      u32 *sig)
 {
 	if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
 	    (sig[1] & HW_PRTY_ASSERT_SET_1) ||
@@ -4238,7 +4352,7 @@
 }
 
 
-static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
 {
 	u32 val;
 	if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
@@ -4430,7 +4544,7 @@
 			     igu_addr);
 }
 
-static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
 {
 	/* No memory barriers */
 	storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
@@ -4461,7 +4575,7 @@
 }
 #endif
 
-static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
 {
 	struct bnx2x_mcast_ramrod_params rparam;
 	int rc;
@@ -4486,8 +4600,8 @@
 	netif_addr_unlock_bh(bp->dev);
 }
 
-static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
-						   union event_ring_elem *elem)
+static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+					    union event_ring_elem *elem)
 {
 	unsigned long ramrod_flags = 0;
 	int rc = 0;
@@ -4534,7 +4648,7 @@
 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
 #endif
 
-static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
 {
 	netif_addr_lock_bh(bp->dev);
 
@@ -4555,7 +4669,94 @@
 	netif_addr_unlock_bh(bp->dev);
 }
 
-static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
+					      union event_ring_elem *elem)
+{
+	if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
+		DP(BNX2X_MSG_SP,
+		   "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
+		   elem->message.data.vif_list_event.func_bit_map);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
+			elem->message.data.vif_list_event.func_bit_map);
+	} else if (elem->message.data.vif_list_event.echo ==
+		   VIF_LIST_RULE_SET) {
+		DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
+	}
+}
+
+/* called with rtnl_lock */
+static void bnx2x_after_function_update(struct bnx2x *bp)
+{
+	int q, rc;
+	struct bnx2x_fastpath *fp;
+	struct bnx2x_queue_state_params queue_params = {NULL};
+	struct bnx2x_queue_update_params *q_update_params =
+		&queue_params.params.update;
+
+	/* Send Q update command with afex vlan removal values	for all Qs */
+	queue_params.cmd = BNX2X_Q_CMD_UPDATE;
+
+	/* set silent vlan removal values according to vlan mode */
+	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+		  &q_update_params->update_flags);
+	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+		  &q_update_params->update_flags);
+	__set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+	/* in access mode mark mask and value are 0 to strip all vlans */
+	if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
+		q_update_params->silent_removal_value = 0;
+		q_update_params->silent_removal_mask = 0;
+	} else {
+		q_update_params->silent_removal_value =
+			(bp->afex_def_vlan_tag & VLAN_VID_MASK);
+		q_update_params->silent_removal_mask = VLAN_VID_MASK;
+	}
+
+	for_each_eth_queue(bp, q) {
+		/* Set the appropriate Queue object */
+		fp = &bp->fp[q];
+		queue_params.q_obj = &fp->q_obj;
+
+		/* send the ramrod */
+		rc = bnx2x_queue_state_change(bp, &queue_params);
+		if (rc < 0)
+			BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+				  q);
+	}
+
+#ifdef BCM_CNIC
+	if (!NO_FCOE(bp)) {
+		fp = &bp->fp[FCOE_IDX];
+		queue_params.q_obj = &fp->q_obj;
+
+		/* clear pending completion bit */
+		__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
+
+		/* mark latest Q bit */
+		smp_mb__before_clear_bit();
+		set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
+		smp_mb__after_clear_bit();
+
+		/* send Q update ramrod for FCoE Q */
+		rc = bnx2x_queue_state_change(bp, &queue_params);
+		if (rc < 0)
+			BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
+				  q);
+	} else {
+		/* If no FCoE ring - ACK MCP now */
+		bnx2x_link_report(bp);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+	}
+#else
+	/* If no FCoE ring - ACK MCP now */
+	bnx2x_link_report(bp);
+	bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+#endif /* BCM_CNIC */
+}
+
+static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
 	struct bnx2x *bp, u32 cid)
 {
 	DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
@@ -4653,6 +4854,28 @@
 				break;
 			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
 			goto next_spqe;
+		case EVENT_RING_OPCODE_FUNCTION_UPDATE:
+			DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
+			   "AFEX: ramrod completed FUNCTION_UPDATE\n");
+			f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
+
+			/* We will perform the Queues update from sp_rtnl task
+			 * as all Queue SP operations should run under
+			 * rtnl_lock.
+			 */
+			smp_mb__before_clear_bit();
+			set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
+				&bp->sp_rtnl_state);
+			smp_mb__after_clear_bit();
+
+			schedule_delayed_work(&bp->sp_rtnl_task, 0);
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
+			f_obj->complete_cmd(bp, f_obj,
+					    BNX2X_F_CMD_AFEX_VIFLISTS);
+			bnx2x_after_afex_vif_lists(bp, elem);
+			goto next_spqe;
 		case EVENT_RING_OPCODE_FUNCTION_START:
 			DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
 			   "got FUNC_START ramrod\n");
@@ -4784,6 +5007,13 @@
 
 	bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
 	     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+
+	/* afex - poll to check if VIFSET_ACK should be sent to MFW */
+	if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
+			       &bp->sp_state)) {
+		bnx2x_link_report(bp);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
+	}
 }
 
 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -4870,7 +5100,7 @@
  * nic init service functions
  */
 
-static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
 {
 	u32 i;
 	if (!(len%4) && !(addr%4))
@@ -4883,10 +5113,10 @@
 }
 
 /* helper: writes FP SP data to FW - data_size in dwords */
-static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
-				       int fw_sb_id,
-				       u32 *sb_data_p,
-				       u32 data_size)
+static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+				int fw_sb_id,
+				u32 *sb_data_p,
+				u32 data_size)
 {
 	int index;
 	for (index = 0; index < data_size; index++)
@@ -4896,7 +5126,7 @@
 			*(sb_data_p + index));
 }
 
-static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
 {
 	u32 *sb_data_p;
 	u32 data_size = 0;
@@ -4929,7 +5159,7 @@
 }
 
 /* helper:  writes SP SB data to FW */
-static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
 		struct hc_sp_status_block_data *sp_sb_data)
 {
 	int func = BP_FUNC(bp);
@@ -4941,7 +5171,7 @@
 			*((u32 *)sp_sb_data + i));
 }
 
-static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
+static void bnx2x_zero_sp_sb(struct bnx2x *bp)
 {
 	int func = BP_FUNC(bp);
 	struct hc_sp_status_block_data sp_sb_data;
@@ -4962,8 +5192,7 @@
 }
 
 
-static inline
-void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
 					   int igu_sb_id, int igu_seg_id)
 {
 	hc_sm->igu_sb_id = igu_sb_id;
@@ -4974,8 +5203,7 @@
 
 
 /* allocates state machine ids. */
-static inline
-void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
 {
 	/* zero out state machine indices */
 	/* rx indices */
@@ -5383,7 +5611,7 @@
 	return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
 }
 
-static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
 {
 	if (CHIP_IS_E1x(fp->bp))
 		return BP_L_ID(fp->bp) + fp->index;
@@ -5444,6 +5672,43 @@
 	bnx2x_update_fpsb_idx(fp);
 }
 
+static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
+{
+	int i;
+
+	for (i = 1; i <= NUM_TX_RINGS; i++) {
+		struct eth_tx_next_bd *tx_next_bd =
+			&txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+
+		tx_next_bd->addr_hi =
+			cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+		tx_next_bd->addr_lo =
+			cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+	}
+
+	SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
+	txdata->tx_db.data.zero_fill1 = 0;
+	txdata->tx_db.data.prod = 0;
+
+	txdata->tx_pkt_prod = 0;
+	txdata->tx_pkt_cons = 0;
+	txdata->tx_bd_prod = 0;
+	txdata->tx_bd_cons = 0;
+	txdata->tx_pkt = 0;
+}
+
+static void bnx2x_init_tx_rings(struct bnx2x *bp)
+{
+	int i;
+	u8 cos;
+
+	for_each_tx_queue(bp, i)
+		for_each_cos_in_tx_queue(&bp->fp[i], cos)
+			bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
+}
+
 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
 {
 	int i;
@@ -5968,7 +6233,7 @@
 	REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
 }
 
-static inline void bnx2x__common_init_phy(struct bnx2x *bp)
+static void bnx2x__common_init_phy(struct bnx2x *bp)
 {
 	u32 shmem_base[2], shmem2_base[2];
 	shmem_base[0] =  bp->common.shmem_base;
@@ -6255,12 +6520,24 @@
 	if (!CHIP_IS_E1(bp))
 		REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
 
-	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
-		/* Bit-map indicating which L2 hdrs may appear
-		 * after the basic Ethernet header
-		 */
-		REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
-		       bp->path_has_ovlan ? 7 : 6);
+	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure that VNTag and VLAN headers must be
+			 * received in afex mode
+			 */
+			REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
+			REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
+			REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
+			REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
+			REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
+		} else {
+			/* Bit-map indicating which L2 hdrs may appear
+			 * after the basic Ethernet header
+			 */
+			REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+			       bp->path_has_ovlan ? 7 : 6);
+		}
+	}
 
 	bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
 	bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
@@ -6294,9 +6571,21 @@
 	bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
 	bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
 
-	if (!CHIP_IS_E1x(bp))
-		REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
-		       bp->path_has_ovlan ? 7 : 6);
+	if (!CHIP_IS_E1x(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure that VNTag and VLAN headers must be
+			 * sent in afex mode
+			 */
+			REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
+			REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
+			REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
+			REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
+			REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
+		} else {
+			REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+			       bp->path_has_ovlan ? 7 : 6);
+		}
+	}
 
 	REG_WR(bp, SRC_REG_SOFT_RST, 1);
 
@@ -6514,15 +6803,29 @@
 
 
 	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
-	if (CHIP_IS_E3B0(bp))
-		/* Ovlan exists only if we are in multi-function +
-		 * switch-dependent mode, in switch-independent there
-		 * is no ovlan headers
-		 */
-		REG_WR(bp, BP_PORT(bp) ?
-		       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
-		       PRS_REG_HDRS_AFTER_BASIC_PORT_0,
-		       (bp->path_has_ovlan ? 7 : 6));
+	if (CHIP_IS_E3B0(bp)) {
+		if (IS_MF_AFEX(bp)) {
+			/* configure headers for AFEX mode */
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
+			       PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_MUST_HAVE_HDRS_PORT_1 :
+			       PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
+		} else {
+			/* Ovlan exists only if we are in multi-function +
+			 * switch-dependent mode, in switch-independent there
+			 * is no ovlan headers
+			 */
+			REG_WR(bp, BP_PORT(bp) ?
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+			       PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+			       (bp->path_has_ovlan ? 7 : 6));
+		}
+	}
 
 	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
 	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
@@ -6584,10 +6887,15 @@
 		/* Bit-map indicating which L2 hdrs may appear after the
 		 * basic Ethernet header
 		 */
-		REG_WR(bp, BP_PORT(bp) ?
-			   NIG_REG_P1_HDRS_AFTER_BASIC :
-			   NIG_REG_P0_HDRS_AFTER_BASIC,
-			   IS_MF_SD(bp) ? 7 : 6);
+		if (IS_MF_AFEX(bp))
+			REG_WR(bp, BP_PORT(bp) ?
+			       NIG_REG_P1_HDRS_AFTER_BASIC :
+			       NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
+		else
+			REG_WR(bp, BP_PORT(bp) ?
+			       NIG_REG_P1_HDRS_AFTER_BASIC :
+			       NIG_REG_P0_HDRS_AFTER_BASIC,
+			       IS_MF_SD(bp) ? 7 : 6);
 
 		if (CHIP_IS_E3(bp))
 			REG_WR(bp, BP_PORT(bp) ?
@@ -6609,6 +6917,7 @@
 				val = 1;
 				break;
 			case MULTI_FUNCTION_SI:
+			case MULTI_FUNCTION_AFEX:
 				val = 2;
 				break;
 			}
@@ -6640,21 +6949,71 @@
 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
 {
 	int reg;
+	u32 wb_write[2];
 
 	if (CHIP_IS_E1(bp))
 		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
 	else
 		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
 
-	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+	wb_write[0] = ONCHIP_ADDR1(addr);
+	wb_write[1] = ONCHIP_ADDR2(addr);
+	REG_WR_DMAE(bp, reg, wb_write, 2);
 }
 
-static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
+				   u8 idu_sb_id, bool is_Pf)
+{
+	u32 data, ctl, cnt = 100;
+	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+	u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
+	u32 sb_bit =  1 << (idu_sb_id%32);
+	u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
+
+	/* Not supported in BC mode */
+	if (CHIP_INT_MODE_IS_BC(bp))
+		return;
+
+	data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
+			<< IGU_REGULAR_CLEANUP_TYPE_SHIFT)	|
+		IGU_REGULAR_CLEANUP_SET				|
+		IGU_REGULAR_BCLEANUP;
+
+	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
+	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
+	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+			 data, igu_addr_data);
+	REG_WR(bp, igu_addr_data, data);
+	mmiowb();
+	barrier();
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+			  ctl, igu_addr_ctl);
+	REG_WR(bp, igu_addr_ctl, ctl);
+	mmiowb();
+	barrier();
+
+	/* wait for clean up to finish */
+	while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
+		msleep(20);
+
+
+	if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
+		DP(NETIF_MSG_HW,
+		   "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
+			  idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
+	}
+}
+
+static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
 {
 	bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
 }
 
-static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
 {
 	u32 i, base = FUNC_ILT_BASE(func);
 	for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -7005,7 +7364,7 @@
 		       BCM_PAGE_SIZE * NUM_EQ_PAGES);
 }
 
-static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 {
 	int num_groups;
 	int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
@@ -7192,7 +7551,8 @@
 	unsigned long ramrod_flags = 0;
 
 #ifdef BCM_CNIC
-	if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) {
+	if (is_zero_ether_addr(bp->dev->dev_addr) &&
+	    (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
 		DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
 		   "Ignoring Zero MAC for STORAGE SD mode\n");
 		return 0;
@@ -7230,7 +7590,7 @@
 		BNX2X_DEV_INFO("set number of queues to 1\n");
 		break;
 	default:
-		/* Set number of queues according to bp->multi_mode value */
+		/* Set number of queues for MSI-X mode */
 		bnx2x_set_num_queues(bp);
 
 		BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
@@ -7239,15 +7599,17 @@
 		 * so try to enable MSI-X with the requested number of fp's
 		 * and fallback to MSI or legacy INTx with one fp
 		 */
-		if (bnx2x_enable_msix(bp)) {
-			/* failed to enable MSI-X */
-			BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n",
+		if (bnx2x_enable_msix(bp) ||
+		    bp->flags & USING_SINGLE_MSIX_FLAG) {
+			/* failed to enable multiple MSI-X */
+			BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
 				       bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
 
 			bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
 
 			/* Try to enable MSI */
-			if (!(bp->flags & DISABLE_MSI_FLAG))
+			if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
+			    !(bp->flags & DISABLE_MSI_FLAG))
 				bnx2x_enable_msi(bp);
 		}
 		break;
@@ -7368,7 +7730,7 @@
  *      - HC configuration
  *      - Queue's CDU context
  */
-static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
 	struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
 {
 
@@ -7718,7 +8080,7 @@
 	/* TODO: Close Doorbell port? */
 }
 
-static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
+static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
 {
 	struct bnx2x_func_state_params func_params = {NULL};
 
@@ -7733,7 +8095,7 @@
 	return bnx2x_func_state_change(bp, &func_params);
 }
 
-static inline int bnx2x_func_stop(struct bnx2x *bp)
+static int bnx2x_func_stop(struct bnx2x *bp)
 {
 	struct bnx2x_func_state_params func_params = {NULL};
 	int rc;
@@ -7848,7 +8210,7 @@
 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
 }
 
-static inline int bnx2x_func_wait_started(struct bnx2x *bp)
+static int bnx2x_func_wait_started(struct bnx2x *bp)
 {
 	int tout = 50;
 	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -8158,7 +8520,7 @@
  *
  * @bp:	driver handle
  */
-static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+static void bnx2x_mcp_wait_one(struct bnx2x *bp)
 {
 	/* special handling for emulation and FPGA,
 	   wait 10 times longer */
@@ -8494,7 +8856,7 @@
 	return rc;
 }
 
-static inline void bnx2x_recovery_failed(struct bnx2x *bp)
+static void bnx2x_recovery_failed(struct bnx2x *bp)
 {
 	netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
 
@@ -8727,7 +9089,8 @@
 #endif
 	if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
 		bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
-
+	if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
+		bnx2x_after_function_update(bp);
 	/*
 	 * in case of fan failure we need to reset id if the "stop on error"
 	 * debug flag is set, since we trying to prevent permanent overheating
@@ -9122,13 +9485,34 @@
 	return bnx2x_prev_mcp_done(bp);
 }
 
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
+{
+	u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+	if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+		BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+		REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+	}
+}
+
 static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
 {
 	int time_counter = 10;
 	u32 rc, fw, hw_lock_reg, hw_lock_val;
 	BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
 
-       /* Release previously held locks */
+	/* clear hw from errors which may have resulted from an interrupted
+	 * dmae transaction.
+	 */
+	bnx2x_prev_interrupted_dmae(bp);
+
+	/* Release previously held locks */
 	hw_lock_reg = (BP_FUNC(bp) <= 5) ?
 		      (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
 		      (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
@@ -9201,6 +9585,17 @@
 	id |= (val & 0xf);
 	bp->common.chip_id = id;
 
+	/* force 57811 according to MISC register */
+	if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
+		if (CHIP_IS_57810(bp))
+			bp->common.chip_id = (CHIP_NUM_57811 << 16) |
+				(bp->common.chip_id & 0x0000FFFF);
+		else if (CHIP_IS_57810_MF(bp))
+			bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
+				(bp->common.chip_id & 0x0000FFFF);
+		bp->common.chip_id |= 0x1;
+	}
+
 	/* Set doorbell size */
 	bp->db_size = (1 << BNX2X_DB_SHIFT);
 
@@ -9293,7 +9688,9 @@
 	bp->link_params.feature_config_flags |=
 		(val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
 		FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
-
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
+		FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
 	bp->link_params.feature_config_flags |=
 		(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
 		FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
@@ -9925,6 +10322,9 @@
 
 			} else
 				bp->flags |= NO_FCOE_FLAG;
+
+			bp->mf_ext_config = cfg;
+
 		} else { /* SD MODE */
 			if (IS_MF_STORAGE_SD(bp)) {
 				if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
@@ -9946,6 +10346,11 @@
 				memset(bp->dev->dev_addr, 0, ETH_ALEN);
 			}
 		}
+
+		if (IS_MF_FCOE_AFEX(bp))
+			/* use FIP MAC as primary MAC */
+			memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+
 #endif
 	} else {
 		/* in SF read MACs from port configuration */
@@ -10118,6 +10523,19 @@
 				} else
 					BNX2X_DEV_INFO("illegal MAC address for SI\n");
 				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
+				if ((!CHIP_IS_E1x(bp)) &&
+				    (MF_CFG_RD(bp, func_mf_config[func].
+					       mac_upper) != 0xffff) &&
+				    (SHMEM2_HAS(bp,
+						afex_driver_support))) {
+					bp->mf_mode = MULTI_FUNCTION_AFEX;
+					bp->mf_config[vn] = MF_CFG_RD(bp,
+						func_mf_config[func].config);
+				} else {
+					BNX2X_DEV_INFO("can not configure afex mode\n");
+				}
+				break;
 			case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
 				/* get OV configuration */
 				val = MF_CFG_RD(bp,
@@ -10158,6 +10576,9 @@
 				return -EPERM;
 			}
 			break;
+		case MULTI_FUNCTION_AFEX:
+			BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
+			break;
 		case MULTI_FUNCTION_SI:
 			BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
 				       func);
@@ -10325,6 +10746,9 @@
 		case MULTI_FUNCTION_SI:
 			SET_FLAGS(flags, MODE_MF_SI);
 			break;
+		case MULTI_FUNCTION_AFEX:
+			SET_FLAGS(flags, MODE_MF_AFEX);
+			break;
 		}
 	} else
 		SET_FLAGS(flags, MODE_SF);
@@ -10384,12 +10808,10 @@
 	if (BP_NOMCP(bp) && (func == 0))
 		dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
 
-	bp->multi_mode = multi_mode;
-
 	bp->disable_tpa = disable_tpa;
 
 #ifdef BCM_CNIC
-	bp->disable_tpa |= IS_MF_STORAGE_SD(bp);
+	bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
 #endif
 
 	/* Set TPA flags */
@@ -10408,7 +10830,7 @@
 
 	bp->mrrs = mrrs;
 
-	bp->tx_ring_size = MAX_TX_AVAIL;
+	bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
 
 	/* make sure that the numbers are in the right granularity */
 	bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -10439,8 +10861,6 @@
 	if (CHIP_IS_E3B0(bp))
 		bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
 
-	bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
-
 	return rc;
 }
 
@@ -10530,8 +10950,8 @@
 	return 0;
 }
 
-static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
-					 struct bnx2x_mcast_ramrod_params *p)
+static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p)
 {
 	int mc_count = netdev_mc_count(bp->dev);
 	struct bnx2x_mcast_list_elem *mc_mac =
@@ -10554,7 +10974,7 @@
 	return 0;
 }
 
-static inline void bnx2x_free_mcast_macs_list(
+static void bnx2x_free_mcast_macs_list(
 	struct bnx2x_mcast_ramrod_params *p)
 {
 	struct bnx2x_mcast_list_elem *mc_mac =
@@ -10572,7 +10992,7 @@
  *
  * We will use zero (0) as a MAC type for these MACs.
  */
-static inline int bnx2x_set_uc_list(struct bnx2x *bp)
+static int bnx2x_set_uc_list(struct bnx2x *bp)
 {
 	int rc;
 	struct net_device *dev = bp->dev;
@@ -10603,7 +11023,7 @@
 				 BNX2X_UC_LIST_MAC, &ramrod_flags);
 }
 
-static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+static int bnx2x_set_mc_list(struct bnx2x *bp)
 {
 	struct net_device *dev = bp->dev;
 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
@@ -10789,7 +11209,7 @@
 #endif
 };
 
-static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
+static int bnx2x_set_coherency_mask(struct bnx2x *bp)
 {
 	struct device *dev = &bp->pdev->dev;
 
@@ -11055,7 +11475,7 @@
 	return 0;
 }
 
-static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 {
 	const __be32 *source = (const __be32 *)_source;
 	u32 *target = (u32 *)_target;
@@ -11069,7 +11489,7 @@
    Ops array is stored in the following format:
    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
  */
-static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
 {
 	const __be32 *source = (const __be32 *)_source;
 	struct raw_op *target = (struct raw_op *)_target;
@@ -11087,7 +11507,7 @@
  * IRO array is stored in the following format:
  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
  */
-static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
 {
 	const __be32 *source = (const __be32 *)_source;
 	struct iro *target = (struct iro *)_target;
@@ -11107,7 +11527,7 @@
 	}
 }
 
-static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
 {
 	const __be16 *source = (const __be16 *)_source;
 	u16 *target = (u16 *)_target;
@@ -11244,11 +11664,13 @@
 	bnx2x_init_func_obj(bp, &bp->func_obj,
 			    bnx2x_sp(bp, func_rdata),
 			    bnx2x_sp_mapping(bp, func_rdata),
+			    bnx2x_sp(bp, func_afex_rdata),
+			    bnx2x_sp_mapping(bp, func_afex_rdata),
 			    &bnx2x_func_sp_drv);
 }
 
 /* must be called after sriov-enable */
-static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
 {
 	int cid_count = BNX2X_L2_CID_COUNT(bp);
 
@@ -11264,7 +11686,7 @@
  * @dev:	pci device
  *
  */
-static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
 {
 	int pos;
 	u16 control;
@@ -11325,6 +11747,8 @@
 	case BCM57810_MF:
 	case BCM57840:
 	case BCM57840_MF:
+	case BCM57811:
+	case BCM57811_MF:
 		max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
 		break;
 
@@ -11738,7 +12162,7 @@
  * This function will wait until the ramdord completion returns.
  * Return 0 if success, -ENODEV if ramrod doesn't return.
  */
-static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
 {
 	unsigned long ramrod_flags = 0;
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index c25803b..bbd3874 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1483,6 +1483,11 @@
    starts at 0x0 for the A0 tape-out and increments by one for each
    all-layer tape-out. */
 #define MISC_REG_CHIP_REV					 0xa40c
+/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11-
+ * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72];
+ * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
+#define MISC_REG_CHIP_TYPE					 0xac60
+#define MISC_REG_CHIP_TYPE_57811_MASK				 (1<<1)
 /* [RW 32] The following driver registers(1...16) represent 16 drivers and
    32 clients. Each client can be controlled by one driver only. One in each
    bit represent that this driver control the appropriate client (Ex: bit 5
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 5135733..6c14b4a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -633,14 +633,17 @@
 }
 
 
-static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
-				 bool add, unsigned char *dev_addr, int index)
+void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+			  bool add, unsigned char *dev_addr, int index)
 {
 	u32 wb_data[2];
 	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
 			 NIG_REG_LLH0_FUNC_MEM;
 
-	if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE)
+	if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
+		return;
+
+	if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
 		return;
 
 	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -4090,12 +4093,6 @@
 		rss_mode = ETH_RSS_MODE_DISABLED;
 	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
 		rss_mode = ETH_RSS_MODE_REGULAR;
-	else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
-		rss_mode = ETH_RSS_MODE_VLAN_PRI;
-	else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
-		rss_mode = ETH_RSS_MODE_E1HOV_PRI;
-	else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
-		rss_mode = ETH_RSS_MODE_IP_DSCP;
 
 	data->rss_mode = rss_mode;
 
@@ -4404,6 +4401,9 @@
 		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
 	tx_data->anti_spoofing_flg =
 		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
+	tx_data->force_default_pri_flg =
+		test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+
 	tx_data->tx_status_block_id = params->fw_sb_id;
 	tx_data->tx_sb_index_number = params->sb_cq_index;
 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5331,6 +5331,17 @@
 	case BNX2X_F_STATE_STARTED:
 		if (cmd == BNX2X_F_CMD_STOP)
 			next_state = BNX2X_F_STATE_INITIALIZED;
+		/* afex ramrods can be sent only in started mode, and only
+		 * if not pending for function_stop ramrod completion
+		 * for these events - next state remained STARTED.
+		 */
+		else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
+			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
+			next_state = BNX2X_F_STATE_STARTED;
 		else if (cmd == BNX2X_F_CMD_TX_STOP)
 			next_state = BNX2X_F_STATE_TX_STOPPED;
 
@@ -5618,6 +5629,83 @@
 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
 }
 
+static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
+					 struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_update_data *rdata =
+		(struct function_update_data *)o->afex_rdata;
+	dma_addr_t data_mapping = o->afex_rdata_mapping;
+	struct bnx2x_func_afex_update_params *afex_update_params =
+		&params->params.afex_update;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->vif_id_change_flg = 1;
+	rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
+	rdata->afex_default_vlan_change_flg = 1;
+	rdata->afex_default_vlan =
+		cpu_to_le16(afex_update_params->afex_default_vlan);
+	rdata->allowed_priorities_change_flg = 1;
+	rdata->allowed_priorities = afex_update_params->allowed_priorities;
+
+	/*  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+	DP(BNX2X_MSG_SP,
+	   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
+	   rdata->vif_id,
+	   rdata->afex_default_vlan, rdata->allowed_priorities);
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static
+inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
+					 struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct afex_vif_list_ramrod_data *rdata =
+		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
+	struct bnx2x_func_afex_viflists_params *afex_viflist_params =
+		&params->params.afex_viflists;
+	u64 *p_rdata = (u64 *)rdata;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->vif_list_index = afex_viflist_params->vif_list_index;
+	rdata->func_bit_map = afex_viflist_params->func_bit_map;
+	rdata->afex_vif_list_command =
+		afex_viflist_params->afex_vif_list_command;
+	rdata->func_to_clear = afex_viflist_params->func_to_clear;
+
+	/* send in echo type of sub command */
+	rdata->echo = afex_viflist_params->afex_vif_list_command;
+
+	/*  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
+	   rdata->afex_vif_list_command, rdata->vif_list_index,
+	   rdata->func_bit_map, rdata->func_to_clear);
+
+	/* this ramrod sends data directly and not through DMA mapping */
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
+			     U64_HI(*p_rdata), U64_LO(*p_rdata),
+			     NONE_CONNECTION_TYPE);
+}
+
 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
 				       struct bnx2x_func_state_params *params)
 {
@@ -5669,6 +5757,10 @@
 		return bnx2x_func_send_stop(bp, params);
 	case BNX2X_F_CMD_HW_RESET:
 		return bnx2x_func_hw_reset(bp, params);
+	case BNX2X_F_CMD_AFEX_UPDATE:
+		return bnx2x_func_send_afex_update(bp, params);
+	case BNX2X_F_CMD_AFEX_VIFLISTS:
+		return bnx2x_func_send_afex_viflists(bp, params);
 	case BNX2X_F_CMD_TX_STOP:
 		return bnx2x_func_send_tx_stop(bp, params);
 	case BNX2X_F_CMD_TX_START:
@@ -5682,6 +5774,7 @@
 void bnx2x_init_func_obj(struct bnx2x *bp,
 			 struct bnx2x_func_sp_obj *obj,
 			 void *rdata, dma_addr_t rdata_mapping,
+			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
 			 struct bnx2x_func_sp_drv_ops *drv_iface)
 {
 	memset(obj, 0, sizeof(*obj));
@@ -5690,7 +5783,8 @@
 
 	obj->rdata = rdata;
 	obj->rdata_mapping = rdata_mapping;
-
+	obj->afex_rdata = afex_rdata;
+	obj->afex_rdata_mapping = afex_rdata_mapping;
 	obj->send_cmd = bnx2x_func_send_cmd;
 	obj->check_transition = bnx2x_func_chk_transition;
 	obj->complete_cmd = bnx2x_func_comp_cmd;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 61a7670..efd80bd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -62,6 +62,8 @@
 	BNX2X_FILTER_MCAST_PENDING,
 	BNX2X_FILTER_MCAST_SCHED,
 	BNX2X_FILTER_RSS_CONF_PENDING,
+	BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
+	BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
 };
 
 struct bnx2x_raw_obj {
@@ -432,6 +434,8 @@
 	BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
 };
 
+void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+			  bool add, unsigned char *dev_addr, int index);
 
 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 
@@ -685,9 +689,6 @@
 	/* RSS_MODE bits are mutually exclusive */
 	BNX2X_RSS_MODE_DISABLED,
 	BNX2X_RSS_MODE_REGULAR,
-	BNX2X_RSS_MODE_VLAN_PRI,
-	BNX2X_RSS_MODE_E1HOV_PRI,
-	BNX2X_RSS_MODE_IP_DSCP,
 
 	BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
 
@@ -801,7 +802,8 @@
 	BNX2X_Q_FLG_TX_SWITCH,
 	BNX2X_Q_FLG_TX_SEC,
 	BNX2X_Q_FLG_ANTI_SPOOF,
-	BNX2X_Q_FLG_SILENT_VLAN_REM
+	BNX2X_Q_FLG_SILENT_VLAN_REM,
+	BNX2X_Q_FLG_FORCE_DEFAULT_PRI
 };
 
 /* Queue type options: queue type may be a compination of below. */
@@ -963,6 +965,11 @@
 	} params;
 };
 
+struct bnx2x_viflist_params {
+	u8 echo_res;
+	u8 func_bit_map_res;
+};
+
 struct bnx2x_queue_sp_obj {
 	u32		cids[BNX2X_MULTI_TX_COS];
 	u8		cl_id;
@@ -1045,6 +1052,8 @@
 	BNX2X_F_CMD_START,
 	BNX2X_F_CMD_STOP,
 	BNX2X_F_CMD_HW_RESET,
+	BNX2X_F_CMD_AFEX_UPDATE,
+	BNX2X_F_CMD_AFEX_VIFLISTS,
 	BNX2X_F_CMD_TX_STOP,
 	BNX2X_F_CMD_TX_START,
 	BNX2X_F_CMD_MAX,
@@ -1089,6 +1098,18 @@
 	u8 network_cos_mode;
 };
 
+struct bnx2x_func_afex_update_params {
+	u16 vif_id;
+	u16 afex_default_vlan;
+	u8 allowed_priorities;
+};
+
+struct bnx2x_func_afex_viflists_params {
+	u16 vif_list_index;
+	u8 func_bit_map;
+	u8 afex_vif_list_command;
+	u8 func_to_clear;
+};
 struct bnx2x_func_tx_start_params {
 	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
 	u8 dcb_enabled;
@@ -1110,6 +1131,8 @@
 		struct bnx2x_func_hw_init_params hw_init;
 		struct bnx2x_func_hw_reset_params hw_reset;
 		struct bnx2x_func_start_params start;
+		struct bnx2x_func_afex_update_params afex_update;
+		struct bnx2x_func_afex_viflists_params afex_viflists;
 		struct bnx2x_func_tx_start_params tx_start;
 	} params;
 };
@@ -1154,6 +1177,13 @@
 	void			*rdata;
 	dma_addr_t		rdata_mapping;
 
+	/* Buffer to use as a afex ramrod data and its mapping.
+	 * This can't be same rdata as above because afex ramrod requests
+	 * can arrive to the object in parallel to other ramrod requests.
+	 */
+	void			*afex_rdata;
+	dma_addr_t		afex_rdata_mapping;
+
 	/* this mutex validates that when pending flag is taken, the next
 	 * ramrod to be sent will be the one set the pending bit
 	 */
@@ -1197,6 +1227,7 @@
 void bnx2x_init_func_obj(struct bnx2x *bp,
 			 struct bnx2x_func_sp_obj *obj,
 			 void *rdata, dma_addr_t rdata_mapping,
+			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
 			 struct bnx2x_func_sp_drv_ops *drv_iface);
 
 int bnx2x_func_state_change(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index e1c9310..1e2785c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1316,7 +1316,7 @@
  *
  * @param bp
  */
-static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
+static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 {
 	int i;
 	int first_queue_query_index;
@@ -1561,3 +1561,274 @@
 		UPDATE_FW_STAT_OLD(mac_discard);
 	}
 }
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+			      u32 stats_type)
+{
+	int i;
+	struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct per_queue_stats *fcoe_q_stats =
+		&bp->fw_stats_data->queue_stats[FCOE_IDX];
+
+	struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+		&fcoe_q_stats->tstorm_queue_statistics;
+
+	struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
+		&fcoe_q_stats->ustorm_queue_statistics;
+
+	struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+		&fcoe_q_stats->xstorm_queue_statistics;
+
+	struct fcoe_statistics_params *fw_fcoe_stat =
+		&bp->fw_stats_data->fcoe;
+
+	memset(afex_stats, 0, sizeof(struct afex_stats));
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+		ADD_64(afex_stats->rx_unicast_bytes_hi,
+		       qstats->total_unicast_bytes_received_hi,
+		       afex_stats->rx_unicast_bytes_lo,
+		       qstats->total_unicast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_broadcast_bytes_hi,
+		       qstats->total_broadcast_bytes_received_hi,
+		       afex_stats->rx_broadcast_bytes_lo,
+		       qstats->total_broadcast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_multicast_bytes_hi,
+		       qstats->total_multicast_bytes_received_hi,
+		       afex_stats->rx_multicast_bytes_lo,
+		       qstats->total_multicast_bytes_received_lo);
+
+		ADD_64(afex_stats->rx_unicast_frames_hi,
+		       qstats->total_unicast_packets_received_hi,
+		       afex_stats->rx_unicast_frames_lo,
+		       qstats->total_unicast_packets_received_lo);
+
+		ADD_64(afex_stats->rx_broadcast_frames_hi,
+		       qstats->total_broadcast_packets_received_hi,
+		       afex_stats->rx_broadcast_frames_lo,
+		       qstats->total_broadcast_packets_received_lo);
+
+		ADD_64(afex_stats->rx_multicast_frames_hi,
+		       qstats->total_multicast_packets_received_hi,
+		       afex_stats->rx_multicast_frames_lo,
+		       qstats->total_multicast_packets_received_lo);
+
+		/* sum to rx_frames_discarded all discraded
+		 * packets due to size, ttl0 and checksum
+		 */
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->total_packets_received_checksum_discarded_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->total_packets_received_checksum_discarded_lo);
+
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->total_packets_received_ttl0_discarded_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->total_packets_received_ttl0_discarded_lo);
+
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       qstats->etherstatsoverrsizepkts_hi,
+		       afex_stats->rx_frames_discarded_lo,
+		       qstats->etherstatsoverrsizepkts_lo);
+
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       qstats->no_buff_discard_hi,
+		       afex_stats->rx_frames_dropped_lo,
+		       qstats->no_buff_discard_lo);
+
+		ADD_64(afex_stats->tx_unicast_bytes_hi,
+		       qstats->total_unicast_bytes_transmitted_hi,
+		       afex_stats->tx_unicast_bytes_lo,
+		       qstats->total_unicast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_broadcast_bytes_hi,
+		       qstats->total_broadcast_bytes_transmitted_hi,
+		       afex_stats->tx_broadcast_bytes_lo,
+		       qstats->total_broadcast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_multicast_bytes_hi,
+		       qstats->total_multicast_bytes_transmitted_hi,
+		       afex_stats->tx_multicast_bytes_lo,
+		       qstats->total_multicast_bytes_transmitted_lo);
+
+		ADD_64(afex_stats->tx_unicast_frames_hi,
+		       qstats->total_unicast_packets_transmitted_hi,
+		       afex_stats->tx_unicast_frames_lo,
+		       qstats->total_unicast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_broadcast_frames_hi,
+		       qstats->total_broadcast_packets_transmitted_hi,
+		       afex_stats->tx_broadcast_frames_lo,
+		       qstats->total_broadcast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_multicast_frames_hi,
+		       qstats->total_multicast_packets_transmitted_hi,
+		       afex_stats->tx_multicast_frames_lo,
+		       qstats->total_multicast_packets_transmitted_lo);
+
+		ADD_64(afex_stats->tx_frames_dropped_hi,
+		       qstats->total_transmitted_dropped_packets_error_hi,
+		       afex_stats->tx_frames_dropped_lo,
+		       qstats->total_transmitted_dropped_packets_error_lo);
+	}
+
+	/* now add FCoE statistics which are collected separately
+	 * (both offloaded and non offloaded)
+	 */
+	if (!NO_FCOE(bp)) {
+		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_bytes_lo,
+			  fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+		ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+			  afex_stats->rx_unicast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+			  afex_stats->rx_broadcast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
+			  fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+			  afex_stats->rx_multicast_bytes_lo,
+			  fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_frames_lo,
+			  fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+		ADD_64_LE(afex_stats->rx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_unicast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+		ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_broadcast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+		ADD_64_LE(afex_stats->rx_multicast_frames_hi,
+			  LE32_0,
+			  afex_stats->rx_multicast_frames_lo,
+			  fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->checksum_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->pkts_too_big_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_discarded_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_discarded_lo,
+			  fcoe_q_tstorm_stats->ttl0_discard);
+
+		ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
+			    LE16_0,
+			    afex_stats->rx_frames_dropped_lo,
+			    fcoe_q_tstorm_stats->no_buff_discard);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->ucast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->mcast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fcoe_q_ustorm_stats->bcast_no_buff_pkts);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
+
+		ADD_64_LE(afex_stats->rx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->rx_frames_dropped_lo,
+			  fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_bytes_lo,
+			  fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
+			  fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+			  afex_stats->tx_unicast_bytes_lo,
+			  fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
+			  fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+			  afex_stats->tx_broadcast_bytes_lo,
+			  fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
+			  fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+			  afex_stats->tx_multicast_bytes_lo,
+			  fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_frames_lo,
+			  fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+		ADD_64_LE(afex_stats->tx_unicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_unicast_frames_lo,
+			  fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_broadcast_frames_lo,
+			  fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_multicast_frames_hi,
+			  LE32_0,
+			  afex_stats->tx_multicast_frames_lo,
+			  fcoe_q_xstorm_stats->mcast_pkts_sent);
+
+		ADD_64_LE(afex_stats->tx_frames_dropped_hi,
+			  LE32_0,
+			  afex_stats->tx_frames_dropped_lo,
+			  fcoe_q_xstorm_stats->error_drop_pkts);
+	}
+
+	/* if port stats are requested, add them to the PMF
+	 * stats, as anyway they will be accumulated by the
+	 * MCP before sent to the switch
+	 */
+	if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       0,
+		       afex_stats->rx_frames_dropped_lo,
+		       estats->mac_filter_discard);
+		ADD_64(afex_stats->rx_frames_dropped_hi,
+		       0,
+		       afex_stats->rx_frames_dropped_lo,
+		       estats->brb_truncate_discard);
+		ADD_64(afex_stats->rx_frames_discarded_hi,
+		       0,
+		       afex_stats->rx_frames_discarded_lo,
+		       estats->mac_discard);
+	}
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2b46e1e..93e689fd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -338,6 +338,18 @@
 		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
 	} while (0)
 
+#define LE32_0 ((__force __le32) 0)
+#define LE16_0 ((__force __le16) 0)
+
+/* The _force is for cases where high value is 0 */
+#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
+		ADD_64(s_hi, le32_to_cpu(a_hi_le), \
+		       s_lo, le32_to_cpu(a_lo_le))
+
+#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
+		ADD_64(s_hi, le16_to_cpu(a_hi_le), \
+		       s_lo, le16_to_cpu(a_lo_le))
+
 /* difference = minuend - subtrahend */
 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
 	do { \
@@ -529,4 +541,7 @@
  * @bp:		driver handle
  */
 void bnx2x_save_statistics(struct bnx2x *bp);
+
+void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
+			      u32 stats_type);
 #endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 062ac33..d55df32 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -879,8 +879,13 @@
 		if (sblk->status & SD_STATUS_LINK_CHG)
 			work_exists = 1;
 	}
-	/* check for RX/TX work to do */
-	if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
+
+	/* check for TX work to do */
+	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
+		work_exists = 1;
+
+	/* check for RX work to do */
+	if (tnapi->rx_rcb_prod_idx &&
 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 		work_exists = 1;
 
@@ -5617,17 +5622,29 @@
 	}
 }
 
+static void tg3_frag_free(bool is_frag, void *data)
+{
+	if (is_frag)
+		put_page(virt_to_head_page(data));
+	else
+		kfree(data);
+}
+
 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
 {
+	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
+		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
 	if (!ri->data)
 		return;
 
 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
 			 map_sz, PCI_DMA_FROMDEVICE);
-	kfree(ri->data);
+	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
 	ri->data = NULL;
 }
 
+
 /* Returns size of skb allocated or < 0 on error.
  *
  * We only need to fill in the address because the other members
@@ -5640,7 +5657,8 @@
  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  */
 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
-			    u32 opaque_key, u32 dest_idx_unmasked)
+			     u32 opaque_key, u32 dest_idx_unmasked,
+			     unsigned int *frag_size)
 {
 	struct tg3_rx_buffer_desc *desc;
 	struct ring_info *map;
@@ -5675,7 +5693,13 @@
 	 */
 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-	data = kmalloc(skb_size, GFP_ATOMIC);
+	if (skb_size <= PAGE_SIZE) {
+		data = netdev_alloc_frag(skb_size);
+		*frag_size = skb_size;
+	} else {
+		data = kmalloc(skb_size, GFP_ATOMIC);
+		*frag_size = 0;
+	}
 	if (!data)
 		return -ENOMEM;
 
@@ -5683,8 +5707,8 @@
 				 data + TG3_RX_OFFSET(tp),
 				 data_size,
 				 PCI_DMA_FROMDEVICE);
-	if (pci_dma_mapping_error(tp->pdev, mapping)) {
-		kfree(data);
+	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
+		tg3_frag_free(skb_size <= PAGE_SIZE, data);
 		return -EIO;
 	}
 
@@ -5835,18 +5859,19 @@
 
 		if (len > TG3_RX_COPY_THRESH(tp)) {
 			int skb_size;
+			unsigned int frag_size;
 
 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
-						    *post_ptr);
+						    *post_ptr, &frag_size);
 			if (skb_size < 0)
 				goto drop_it;
 
 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
 					 PCI_DMA_FROMDEVICE);
 
-			skb = build_skb(data);
+			skb = build_skb(data, frag_size);
 			if (!skb) {
-				kfree(data);
+				tg3_frag_free(frag_size != 0, data);
 				goto drop_it_no_recycle;
 			}
 			skb_reserve(skb, TG3_RX_OFFSET(tp));
@@ -6124,6 +6149,9 @@
 			return work_done;
 	}
 
+	if (!tnapi->rx_rcb_prod_idx)
+		return work_done;
+
 	/* run RX thread, within the bounds set by NAPI.
 	 * All RX "locking" is done by ensuring outside
 	 * code synchronizes with tg3->napi.poll()
@@ -7279,7 +7307,10 @@
 
 	/* Now allocate fresh SKBs for each rx ring. */
 	for (i = 0; i < tp->rx_pending; i++) {
-		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+		unsigned int frag_size;
+
+		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
+				      &frag_size) < 0) {
 			netdev_warn(tp->dev,
 				    "Using a smaller RX standard ring. Only "
 				    "%d out of %d buffers were allocated "
@@ -7311,7 +7342,10 @@
 	}
 
 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
-		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+		unsigned int frag_size;
+
+		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
+				      &frag_size) < 0) {
 			netdev_warn(tp->dev,
 				    "Using a smaller RX jumbo ring. Only %d "
 				    "out of %d buffers were allocated "
@@ -7567,6 +7601,12 @@
 		 */
 		switch (i) {
 		default:
+			if (tg3_flag(tp, ENABLE_RSS)) {
+				tnapi->rx_rcb_prod_idx = NULL;
+				break;
+			}
+			/* Fall through */
+		case 1:
 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
 			break;
 		case 2:
@@ -12234,6 +12274,7 @@
 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
 	.get_rxfh_indir		= tg3_get_rxfh_indir,
 	.set_rxfh_indir		= tg3_set_rxfh_indir,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 77977d7..0b640fa 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -70,7 +70,6 @@
 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
 static void bfa_ioc_recover(struct bfa_ioc *ioc);
-static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
@@ -346,8 +345,6 @@
 	switch (event) {
 	case IOC_E_FWRSP_GETATTR:
 		del_timer(&ioc->ioc_timer);
-		bfa_ioc_check_attr_wwns(ioc);
-		bfa_ioc_hb_monitor(ioc);
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 		break;
 
@@ -380,6 +377,7 @@
 {
 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
+	bfa_ioc_hb_monitor(ioc);
 }
 
 static void
@@ -1207,27 +1205,62 @@
 	writel(1, sem_reg);
 }
 
+/* Clear fwver hdr */
+static void
+bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
+{
+	u32 pgnum, pgoff, loff = 0;
+	int i;
+
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
+		writel(0, ioc->ioc_regs.smem_page_start + loff);
+		loff += sizeof(u32);
+	}
+}
+
+
 static void
 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
 {
 	struct bfi_ioc_image_hdr fwhdr;
-	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	u32 fwstate, r32;
 
-	if (fwstate == BFI_IOC_UNINIT)
+	/* Spin on init semaphore to serialize. */
+	r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
+	while (r32 & 0x1) {
+		udelay(20);
+		r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
+	}
+
+	fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+	if (fwstate == BFI_IOC_UNINIT) {
+		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 		return;
+	}
 
 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
 
-	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
+		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 		return;
+	}
 
+	bfa_ioc_fwver_clear(ioc);
 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
 
 	/*
 	 * Try to lock and then unlock the semaphore.
 	 */
 	readl(ioc->ioc_regs.ioc_sem_reg);
 	writel(1, ioc->ioc_regs.ioc_sem_reg);
+
+	/* Unlock init semaphore */
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 }
 
 static void
@@ -1585,11 +1618,6 @@
 	u32 i;
 	u32 asicmode;
 
-	/**
-	 * Initialize LMEM first before code download
-	 */
-	bfa_ioc_lmem_init(ioc);
-
 	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1914,6 +1942,10 @@
 	bfa_ioc_pll_init_asic(ioc);
 
 	ioc->pllinit = true;
+
+	/* Initialize LMEM */
+	bfa_ioc_lmem_init(ioc);
+
 	/*
 	 *  release semaphore.
 	 */
@@ -2513,13 +2545,6 @@
 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
 
-static void
-bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
-{
-	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
-		return;
-}
-
 /**
  * @dg hal_iocpf_pvt BFA IOC PF private functions
  * @{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 348479b..b6b036a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -199,9 +199,9 @@
  * Host to LPU mailbox message addresses
  */
 static const struct {
-	u32 	hfn_mbox;
-	u32 	lpu_mbox;
-	u32 	hfn_pgn;
+	u32	hfn_mbox;
+	u32	lpu_mbox;
+	u32	hfn_pgn;
 } ct_fnreg[] = {
 	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
 	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
@@ -803,17 +803,72 @@
 }
 
 #define CT2_NFC_MAX_DELAY       1000
+#define CT2_NFC_VER_VALID       0x143
+#define BFA_IOC_PLL_POLL        1000000
+
+static bool
+bfa_ioc_ct2_nfc_halted(void __iomem *rb)
+{
+	volatile u32 r32;
+
+	r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+	if (r32 & __NFC_CONTROLLER_HALTED)
+		return true;
+
+	return false;
+}
+
+static void
+bfa_ioc_ct2_nfc_resume(void __iomem *rb)
+{
+	volatile u32 r32;
+	int i;
+
+	writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
+	for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+		r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+		if (!(r32 & __NFC_CONTROLLER_HALTED))
+			return;
+		udelay(1000);
+	}
+	BUG_ON(1);
+}
+
 static enum bfa_status
 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
 {
 	volatile u32 wgn, r32;
-	int i;
+	u32 nfc_ver, i;
 
-	/*
-	 * Initialize PLL if not already done by NFC
-	 */
 	wgn = readl(rb + CT2_WGN_STATUS);
-	if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+
+	nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
+
+	if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
+		(nfc_ver >= CT2_NFC_VER_VALID)) {
+		if (bfa_ioc_ct2_nfc_halted(rb))
+			bfa_ioc_ct2_nfc_resume(rb);
+		writel(__RESET_AND_START_SCLK_LCLK_PLLS,
+				rb + CT2_CSI_FW_CTL_SET_REG);
+
+		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+			if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
+				break;
+		}
+		BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
+
+		for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
+			r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
+			if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
+				break;
+		}
+		BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+		udelay(1000);
+
+		r32 = readl(rb + CT2_CSI_FW_CTL_REG);
+		BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
+	} else {
 		writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
 		for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
 			r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -821,53 +876,48 @@
 				break;
 			udelay(1000);
 		}
+
+		bfa_ioc_ct2_mac_reset(rb);
+		bfa_ioc_ct2_sclk_init(rb);
+		bfa_ioc_ct2_lclk_init(rb);
+
+		/* release soft reset on s_clk & l_clk */
+		r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+		writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+				rb + CT2_APP_PLL_SCLK_CTL_REG);
+		r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+		writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+				rb + CT2_APP_PLL_LCLK_CTL_REG);
+	}
+
+	/* Announce flash device presence, if flash was corrupted. */
+	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+		r32 = readl((rb + PSS_GPIO_OUT_REG));
+		writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
+		r32 = readl((rb + PSS_GPIO_OE_REG));
+		writel(r32 | 1, rb + PSS_GPIO_OE_REG);
 	}
 
 	/*
 	 * Mask the interrupts and clear any
 	 * pending interrupts left by BIOS/EFI
 	 */
-
 	writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
 	writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
 
-	r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-	if (r32 == 1) {
-		writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
-		readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
-	}
-	r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-	if (r32 == 1) {
-		writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
-		readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
-	}
-
-	bfa_ioc_ct2_mac_reset(rb);
-	bfa_ioc_ct2_sclk_init(rb);
-	bfa_ioc_ct2_lclk_init(rb);
-
-	/*
-	 * release soft reset on s_clk & l_clk
-	 */
-	r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
-	writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
-			(rb + CT2_APP_PLL_SCLK_CTL_REG));
-
-	/*
-	 * release soft reset on s_clk & l_clk
-	 */
-	r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
-	writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
-		      (rb + CT2_APP_PLL_LCLK_CTL_REG));
-
-	/*
-	 * Announce flash device presence, if flash was corrupted.
-	 */
-	if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
-		r32 = readl((rb + PSS_GPIO_OUT_REG));
-		writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
-		r32 = readl((rb + PSS_GPIO_OE_REG));
-		writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
+	/* For first time initialization, no need to clear interrupts */
+	r32 = readl(rb + HOST_SEM5_REG);
+	if (r32 & 0x1) {
+		r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+		if (r32 == 1) {
+			writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+			readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+		}
+		r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+		if (r32 == 1) {
+			writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+			readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+		}
 	}
 
 	bfa_ioc_ct2_mem_init(rb);
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index efacff3..0e094fe 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -339,10 +339,16 @@
 #define __A2T_AHB_LOAD			0x00000800
 #define __WGN_READY			0x00000400
 #define __GLBL_PF_VF_CFG_RDY		0x00000200
+#define CT2_NFC_CSR_CLR_REG             0x00027420
 #define CT2_NFC_CSR_SET_REG		0x00027424
 #define __HALT_NFC_CONTROLLER		0x00000002
 #define __NFC_CONTROLLER_HALTED		0x00001000
 
+#define CT2_RSC_GPR15_REG		0x0002765c
+#define CT2_CSI_FW_CTL_REG              0x00027080
+#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
+#define CT2_CSI_FW_CTL_SET_REG          0x00027088
+
 #define CT2_CSI_MAC0_CONTROL_REG	0x000270d0
 #define __CSI_MAC_RESET			0x00000010
 #define __CSI_MAC_AHB_RESET		0x00000008
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff78f77..25c4e7f 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -80,8 +80,6 @@
 	(sizeof(struct bnad_skb_unmap) * ((_depth) - 1));	\
 } while (0)
 
-#define BNAD_TXRX_SYNC_MDELAY	250	/* 250 msecs */
-
 static void
 bnad_add_to_list(struct bnad *bnad)
 {
@@ -103,7 +101,7 @@
  * Reinitialize completions in CQ, once Rx is taken down
  */
 static void
-bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
+bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
 {
 	struct bna_cq_entry *cmpl, *next_cmpl;
 	unsigned int wi_range, wis = 0, ccb_prod = 0;
@@ -141,7 +139,8 @@
 
 	for (j = 0; j < frag; j++) {
 		dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
-			  skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
+			  skb_frag_size(&skb_shinfo(skb)->frags[j]),
+						DMA_TO_DEVICE);
 		dma_unmap_addr_set(&array[index], dma_addr, 0);
 		BNA_QE_INDX_ADD(index, 1, depth);
 	}
@@ -155,7 +154,7 @@
  * so DMA unmap & freeing is fine.
  */
 static void
-bnad_free_all_txbufs(struct bnad *bnad,
+bnad_txq_cleanup(struct bnad *bnad,
 		 struct bna_tcb *tcb)
 {
 	u32		unmap_cons;
@@ -183,13 +182,12 @@
 /* Data Path Handlers */
 
 /*
- * bnad_free_txbufs : Frees the Tx bufs on Tx completion
+ * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
  * Can be called in a) Interrupt context
  *		    b) Sending context
- *		    c) Tasklet context
  */
 static u32
-bnad_free_txbufs(struct bnad *bnad,
+bnad_txcmpl_process(struct bnad *bnad,
 		 struct bna_tcb *tcb)
 {
 	u32		unmap_cons, sent_packets = 0, sent_bytes = 0;
@@ -198,13 +196,7 @@
 	struct bnad_skb_unmap *unmap_array;
 	struct sk_buff		*skb;
 
-	/*
-	 * Just return if TX is stopped. This check is useful
-	 * when bnad_free_txbufs() runs out of a tasklet scheduled
-	 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
-	 * but this routine runs actually after the cleanup has been
-	 * executed.
-	 */
+	/* Just return if TX is stopped */
 	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 		return 0;
 
@@ -243,57 +235,8 @@
 	return sent_packets;
 }
 
-/* Tx Free Tasklet function */
-/* Frees for all the tcb's in all the Tx's */
-/*
- * Scheduled from sending context, so that
- * the fat Tx lock is not held for too long
- * in the sending context.
- */
-static void
-bnad_tx_free_tasklet(unsigned long bnad_ptr)
-{
-	struct bnad *bnad = (struct bnad *)bnad_ptr;
-	struct bna_tcb *tcb;
-	u32		acked = 0;
-	int			i, j;
-
-	for (i = 0; i < bnad->num_tx; i++) {
-		for (j = 0; j < bnad->num_txq_per_tx; j++) {
-			tcb = bnad->tx_info[i].tcb[j];
-			if (!tcb)
-				continue;
-			if (((u16) (*tcb->hw_consumer_index) !=
-				tcb->consumer_index) &&
-				(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
-						  &tcb->flags))) {
-				acked = bnad_free_txbufs(bnad, tcb);
-				if (likely(test_bit(BNAD_TXQ_TX_STARTED,
-					&tcb->flags)))
-					bna_ib_ack(tcb->i_dbell, acked);
-				smp_mb__before_clear_bit();
-				clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
-			}
-			if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
-						&tcb->flags)))
-				continue;
-			if (netif_queue_stopped(bnad->netdev)) {
-				if (acked && netif_carrier_ok(bnad->netdev) &&
-					BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
-						BNAD_NETIF_WAKE_THRESHOLD) {
-					netif_wake_queue(bnad->netdev);
-					/* TODO */
-					/* Counters for individual TxQs? */
-					BNAD_UPDATE_CTR(bnad,
-						netif_queue_wakeup);
-				}
-			}
-		}
-	}
-}
-
 static u32
-bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
+bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
 {
 	struct net_device *netdev = bnad->netdev;
 	u32 sent = 0;
@@ -301,7 +244,7 @@
 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
 		return 0;
 
-	sent = bnad_free_txbufs(bnad, tcb);
+	sent = bnad_txcmpl_process(bnad, tcb);
 	if (sent) {
 		if (netif_queue_stopped(netdev) &&
 		    netif_carrier_ok(netdev) &&
@@ -330,13 +273,13 @@
 	struct bna_tcb *tcb = (struct bna_tcb *)data;
 	struct bnad *bnad = tcb->bnad;
 
-	bnad_tx(bnad, tcb);
+	bnad_tx_complete(bnad, tcb);
 
 	return IRQ_HANDLED;
 }
 
 static void
-bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
 
@@ -348,7 +291,7 @@
 }
 
 static void
-bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	struct bnad_unmap_q *unmap_q;
 	struct bnad_skb_unmap *unmap_array;
@@ -369,11 +312,11 @@
 				 DMA_FROM_DEVICE);
 		dev_kfree_skb(skb);
 	}
-	bnad_reset_rcb(bnad, rcb);
+	bnad_rcb_cleanup(bnad, rcb);
 }
 
 static void
-bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
+bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	u16 to_alloc, alloced, unmap_prod, wi_range;
 	struct bnad_unmap_q *unmap_q = rcb->unmap_q;
@@ -434,14 +377,14 @@
 	if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
 		if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
 			 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
-			bnad_alloc_n_post_rxbufs(bnad, rcb);
+			bnad_rxq_post(bnad, rcb);
 		smp_mb__before_clear_bit();
 		clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
 	}
 }
 
 static u32
-bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 {
 	struct bna_cq_entry *cmpl, *next_cmpl;
 	struct bna_rcb *rcb = NULL;
@@ -453,12 +396,8 @@
 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
 
-	set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
-
-	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
-		clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
+	if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
 		return 0;
-	}
 
 	prefetch(bnad->netdev);
 	BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
@@ -533,9 +472,8 @@
 
 		if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 			napi_gro_receive(&rx_ctrl->napi, skb);
-		else {
+		else
 			netif_receive_skb(skb);
-		}
 
 next:
 		cmpl->valid = 0;
@@ -646,7 +584,7 @@
 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
 			tcb = bnad->tx_info[i].tcb[j];
 			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
-				bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
+				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
 		}
 	}
 	/* Rx processing */
@@ -839,20 +777,9 @@
 {
 	struct bnad_tx_info *tx_info =
 			(struct bnad_tx_info *)tcb->txq->tx->priv;
-	struct bnad_unmap_q *unmap_q = tcb->unmap_q;
-
-	while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
-		cpu_relax();
-
-	bnad_free_all_txbufs(bnad, tcb);
-
-	unmap_q->producer_index = 0;
-	unmap_q->consumer_index = 0;
-
-	smp_mb__before_clear_bit();
-	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 
 	tx_info->tcb[tcb->id] = NULL;
+	tcb->priv = NULL;
 }
 
 static void
@@ -866,12 +793,6 @@
 }
 
 static void
-bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
-{
-	bnad_free_all_rxbufs(bnad, rcb);
-}
-
-static void
 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
 {
 	struct bnad_rx_info *rx_info =
@@ -916,7 +837,6 @@
 {
 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
 	struct bna_tcb *tcb;
-	struct bnad_unmap_q *unmap_q;
 	u32 txq_id;
 	int i;
 
@@ -926,23 +846,9 @@
 			continue;
 		txq_id = tcb->id;
 
-		unmap_q = tcb->unmap_q;
-
-		if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
-			continue;
-
-		while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
-			cpu_relax();
-
-		bnad_free_all_txbufs(bnad, tcb);
-
-		unmap_q->producer_index = 0;
-		unmap_q->consumer_index = 0;
-
-		smp_mb__before_clear_bit();
-		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
-
+		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
 		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
+		BUG_ON(*(tcb->hw_consumer_index) != 0);
 
 		if (netif_carrier_ok(bnad->netdev)) {
 			printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
@@ -963,6 +869,54 @@
 	}
 }
 
+/*
+ * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
+ */
+static void
+bnad_tx_cleanup(struct delayed_work *work)
+{
+	struct bnad_tx_info *tx_info =
+		container_of(work, struct bnad_tx_info, tx_cleanup_work);
+	struct bnad *bnad = NULL;
+	struct bnad_unmap_q *unmap_q;
+	struct bna_tcb *tcb;
+	unsigned long flags;
+	uint32_t i, pending = 0;
+
+	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
+		tcb = tx_info->tcb[i];
+		if (!tcb)
+			continue;
+
+		bnad = tcb->bnad;
+
+		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
+			pending++;
+			continue;
+		}
+
+		bnad_txq_cleanup(bnad, tcb);
+
+		unmap_q = tcb->unmap_q;
+		unmap_q->producer_index = 0;
+		unmap_q->consumer_index = 0;
+
+		smp_mb__before_clear_bit();
+		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
+	}
+
+	if (pending) {
+		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
+			msecs_to_jiffies(1));
+		return;
+	}
+
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	bna_tx_cleanup_complete(tx_info->tx);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
+
 static void
 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
 {
@@ -976,8 +930,7 @@
 			continue;
 	}
 
-	mdelay(BNAD_TXRX_SYNC_MDELAY);
-	bna_tx_cleanup_complete(tx);
+	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
 }
 
 static void
@@ -1001,6 +954,44 @@
 	}
 }
 
+/*
+ * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
+ */
+static void
+bnad_rx_cleanup(void *work)
+{
+	struct bnad_rx_info *rx_info =
+		container_of(work, struct bnad_rx_info, rx_cleanup_work);
+	struct bnad_rx_ctrl *rx_ctrl;
+	struct bnad *bnad = NULL;
+	unsigned long flags;
+	uint32_t i;
+
+	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
+		rx_ctrl = &rx_info->rx_ctrl[i];
+
+		if (!rx_ctrl->ccb)
+			continue;
+
+		bnad = rx_ctrl->ccb->bnad;
+
+		/*
+		 * Wait till the poll handler has exited
+		 * and nothing can be scheduled anymore
+		 */
+		napi_disable(&rx_ctrl->napi);
+
+		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
+		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
+		if (rx_ctrl->ccb->rcb[1])
+			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
+	}
+
+	spin_lock_irqsave(&bnad->bna_lock, flags);
+	bna_rx_cleanup_complete(rx_info->rx);
+	spin_unlock_irqrestore(&bnad->bna_lock, flags);
+}
+
 static void
 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
 {
@@ -1009,8 +1000,6 @@
 	struct bnad_rx_ctrl *rx_ctrl;
 	int i;
 
-	mdelay(BNAD_TXRX_SYNC_MDELAY);
-
 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
 		rx_ctrl = &rx_info->rx_ctrl[i];
 		ccb = rx_ctrl->ccb;
@@ -1021,12 +1010,9 @@
 
 		if (ccb->rcb[1])
 			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
-
-		while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
-			cpu_relax();
 	}
 
-	bna_rx_cleanup_complete(rx);
+	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
 }
 
 static void
@@ -1046,13 +1032,12 @@
 		if (!ccb)
 			continue;
 
-		bnad_cq_cmpl_init(bnad, ccb);
+		napi_enable(&rx_ctrl->napi);
 
 		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
 			rcb = ccb->rcb[j];
 			if (!rcb)
 				continue;
-			bnad_free_all_rxbufs(bnad, rcb);
 
 			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
 			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
@@ -1063,7 +1048,7 @@
 			if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
 				if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
 					>> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
-					bnad_alloc_n_post_rxbufs(bnad, rcb);
+					bnad_rxq_post(bnad, rcb);
 					smp_mb__before_clear_bit();
 				clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
 			}
@@ -1687,7 +1672,7 @@
 	if (!netif_carrier_ok(bnad->netdev))
 		goto poll_exit;
 
-	rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
+	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
 	if (rcvd >= budget)
 		return rcvd;
 
@@ -1704,7 +1689,7 @@
 
 #define BNAD_NAPI_POLL_QUOTA		64
 static void
-bnad_napi_init(struct bnad *bnad, u32 rx_id)
+bnad_napi_add(struct bnad *bnad, u32 rx_id)
 {
 	struct bnad_rx_ctrl *rx_ctrl;
 	int i;
@@ -1718,34 +1703,18 @@
 }
 
 static void
-bnad_napi_enable(struct bnad *bnad, u32 rx_id)
-{
-	struct bnad_rx_ctrl *rx_ctrl;
-	int i;
-
-	/* Initialize & enable NAPI */
-	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
-		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
-
-		napi_enable(&rx_ctrl->napi);
-	}
-}
-
-static void
-bnad_napi_disable(struct bnad *bnad, u32 rx_id)
+bnad_napi_delete(struct bnad *bnad, u32 rx_id)
 {
 	int i;
 
 	/* First disable and then clean up */
-	for (i = 0; i < bnad->num_rxp_per_rx; i++) {
-		napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
+	for (i = 0; i < bnad->num_rxp_per_rx; i++)
 		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
-	}
 }
 
 /* Should be held with conf_lock held */
 void
-bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
+bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
 {
 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
@@ -1764,9 +1733,6 @@
 		bnad_tx_msix_unregister(bnad, tx_info,
 			bnad->num_txq_per_tx);
 
-	if (0 == tx_id)
-		tasklet_kill(&bnad->tx_free_tasklet);
-
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 	bna_tx_destroy(tx_info->tx);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1832,6 +1798,9 @@
 		goto err_return;
 	tx_info->tx = tx;
 
+	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
+			(work_func_t)bnad_tx_cleanup);
+
 	/* Register ISR for the Tx object */
 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
 		err = bnad_tx_msix_register(bnad, tx_info,
@@ -1896,7 +1865,7 @@
 
 /* Called with mutex_lock(&bnad->conf_mutex) held */
 void
-bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
+bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
 {
 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
@@ -1928,7 +1897,7 @@
 	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
 		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
 
-	bnad_napi_disable(bnad, rx_id);
+	bnad_napi_delete(bnad, rx_id);
 
 	spin_lock_irqsave(&bnad->bna_lock, flags);
 	bna_rx_destroy(rx_info->rx);
@@ -1952,7 +1921,7 @@
 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
 	static const struct bna_rx_event_cbfn rx_cbfn = {
 		.rcb_setup_cbfn = bnad_cb_rcb_setup,
-		.rcb_destroy_cbfn = bnad_cb_rcb_destroy,
+		.rcb_destroy_cbfn = NULL,
 		.ccb_setup_cbfn = bnad_cb_ccb_setup,
 		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
 		.rx_stall_cbfn = bnad_cb_rx_stall,
@@ -1998,11 +1967,14 @@
 	rx_info->rx = rx;
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
+	INIT_WORK(&rx_info->rx_cleanup_work,
+			(work_func_t)(bnad_rx_cleanup));
+
 	/*
 	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
 	 * so that IRQ handler cannot schedule NAPI at this point.
 	 */
-	bnad_napi_init(bnad, rx_id);
+	bnad_napi_add(bnad, rx_id);
 
 	/* Register ISR for the Rx object */
 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2028,13 +2000,10 @@
 	bna_rx_enable(rx);
 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-	/* Enable scheduling of NAPI */
-	bnad_napi_enable(bnad, rx_id);
-
 	return 0;
 
 err_return:
-	bnad_cleanup_rx(bnad, rx_id);
+	bnad_destroy_rx(bnad, rx_id);
 	return err;
 }
 
@@ -2519,7 +2488,7 @@
 	return 0;
 
 cleanup_tx:
-	bnad_cleanup_tx(bnad, 0);
+	bnad_destroy_tx(bnad, 0);
 
 err_return:
 	mutex_unlock(&bnad->conf_mutex);
@@ -2546,8 +2515,8 @@
 
 	wait_for_completion(&bnad->bnad_completions.enet_comp);
 
-	bnad_cleanup_tx(bnad, 0);
-	bnad_cleanup_rx(bnad, 0);
+	bnad_destroy_tx(bnad, 0);
+	bnad_destroy_rx(bnad, 0);
 
 	/* Synchronize mailbox IRQ */
 	bnad_mbox_irq_sync(bnad);
@@ -2620,7 +2589,7 @@
 		if ((u16) (*tcb->hw_consumer_index) !=
 		    tcb->consumer_index &&
 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
-			acked = bnad_free_txbufs(bnad, tcb);
+			acked = bnad_txcmpl_process(bnad, tcb);
 			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
 				bna_ib_ack(tcb->i_dbell, acked);
 			smp_mb__before_clear_bit();
@@ -2843,9 +2812,6 @@
 	bna_txq_prod_indx_doorbell(tcb);
 	smp_mb();
 
-	if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
-		tasklet_schedule(&bnad->tx_free_tasklet);
-
 	return NETDEV_TX_OK;
 }
 
@@ -3127,8 +3093,8 @@
 /*
  * 1. Initialize the bnad structure
  * 2. Setup netdev pointer in pci_dev
- * 3. Initialze Tx free tasklet
- * 4. Initialize no. of TxQ & CQs & MSIX vectors
+ * 3. Initialize no. of TxQ & CQs & MSIX vectors
+ * 4. Initialize work queue.
  */
 static int
 bnad_init(struct bnad *bnad,
@@ -3171,8 +3137,11 @@
 	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
 	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
 
-	tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
-		     (unsigned long)bnad);
+	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
+	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
+
+	if (!bnad->work_q)
+		return -ENOMEM;
 
 	return 0;
 }
@@ -3185,6 +3154,12 @@
 static void
 bnad_uninit(struct bnad *bnad)
 {
+	if (bnad->work_q) {
+		flush_workqueue(bnad->work_q);
+		destroy_workqueue(bnad->work_q);
+		bnad->work_q = NULL;
+	}
+
 	if (bnad->bar0)
 		iounmap(bnad->bar0);
 	pci_set_drvdata(bnad->pcidev, NULL);
@@ -3304,7 +3279,6 @@
 	/*
 	 * Initialize bnad structure
 	 * Setup relation between pci_dev & netdev
-	 * Init Tx free tasklet
 	 */
 	err = bnad_init(bnad, pdev, netdev);
 	if (err)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 55824d9..72742be 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@
 #define BNAD_NAME			"bna"
 #define BNAD_NAME_LEN			64
 
-#define BNAD_VERSION			"3.0.2.2"
+#define BNAD_VERSION			"3.0.23.0"
 
 #define BNAD_MAILBOX_MSIX_INDEX		0
 #define BNAD_MAILBOX_MSIX_VECTORS	1
@@ -210,6 +210,7 @@
 	struct bna_tx *tx; /* 1:1 between tx_info & tx */
 	struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
 	u32 tx_id;
+	struct delayed_work tx_cleanup_work;
 } ____cacheline_aligned;
 
 struct bnad_rx_info {
@@ -217,6 +218,7 @@
 
 	struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
 	u32 rx_id;
+	struct work_struct rx_cleanup_work;
 } ____cacheline_aligned;
 
 /* Unmap queues for Tx / Rx cleanup */
@@ -318,7 +320,7 @@
 	/* Burnt in MAC address */
 	mac_t			perm_addr;
 
-	struct tasklet_struct	tx_free_tasklet;
+	struct workqueue_struct *work_q;
 
 	/* Statistics */
 	struct bnad_stats stats;
@@ -328,6 +330,7 @@
 	char			adapter_name[BNAD_NAME_LEN];
 	char			port_name[BNAD_NAME_LEN];
 	char			mbox_irq_name[BNAD_NAME_LEN];
+	char			wq_name[BNAD_NAME_LEN];
 
 	/* debugfs specific data */
 	char	*regdata;
@@ -370,8 +373,8 @@
 
 extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
 extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id);
+extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
 
 /* Timer start/stop protos */
 extern void bnad_dim_timer_start(struct bnad *bnad);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index ab753d7..40e1e84 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -464,7 +464,7 @@
 		for (i = 0; i < bnad->num_rx; i++) {
 			if (!bnad->rx_info[i].rx)
 				continue;
-			bnad_cleanup_rx(bnad, i);
+			bnad_destroy_rx(bnad, i);
 			current_err = bnad_setup_rx(bnad, i);
 			if (current_err && !err)
 				err = current_err;
@@ -492,7 +492,7 @@
 		for (i = 0; i < bnad->num_tx; i++) {
 			if (!bnad->tx_info[i].tx)
 				continue;
-			bnad_cleanup_tx(bnad, i);
+			bnad_destroy_tx(bnad, i);
 			current_err = bnad_setup_tx(bnad, i);
 			if (current_err && !err)
 				err = current_err;
@@ -539,7 +539,7 @@
 }
 
 static void
-bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
+bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
 {
 	struct bnad *bnad = netdev_priv(netdev);
 	int i, j, q_num;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 9061170..7788419 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -30,6 +30,7 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/gfp.h>
+#include <linux/phy.h>
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
@@ -51,21 +52,17 @@
 /*
  * Read from a EMAC register.
  */
-static inline unsigned long at91_emac_read(unsigned int reg)
+static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg)
 {
-	void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
-
-	return __raw_readl(emac_base + reg);
+	return __raw_readl(lp->emac_base + reg);
 }
 
 /*
  * Write to a EMAC register.
  */
-static inline void at91_emac_write(unsigned int reg, unsigned long value)
+static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value)
 {
-	void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
-
-	__raw_writel(value, emac_base + reg);
+	__raw_writel(value, lp->emac_base + reg);
 }
 
 /* ........................... PHY INTERFACE ........................... */
@@ -75,32 +72,33 @@
  * When not called from an interrupt-handler, access to the PHY must be
  *  protected by a spinlock.
  */
-static void enable_mdi(void)
+static void enable_mdi(struct at91_private *lp)
 {
 	unsigned long ctl;
 
-	ctl = at91_emac_read(AT91_EMAC_CTL);
-	at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE);	/* enable management port */
+	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
+	at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE);	/* enable management port */
 }
 
 /*
  * Disable the MDIO bit in the MAC control register
  */
-static void disable_mdi(void)
+static void disable_mdi(struct at91_private *lp)
 {
 	unsigned long ctl;
 
-	ctl = at91_emac_read(AT91_EMAC_CTL);
-	at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE);	/* disable management port */
+	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
+	at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE);	/* disable management port */
 }
 
 /*
  * Wait until the PHY operation is complete.
  */
-static inline void at91_phy_wait(void) {
+static inline void at91_phy_wait(struct at91_private *lp)
+{
 	unsigned long timeout = jiffies + 2;
 
-	while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
+	while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
 		if (time_after(jiffies, timeout)) {
 			printk("at91_ether: MIO timeout\n");
 			break;
@@ -113,28 +111,28 @@
  * Write value to the a PHY register
  * Note: MDI interface is assumed to already have been enabled.
  */
-static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value)
+static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value)
 {
-	at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
+	at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
 		| ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
 
 	/* Wait until IDLE bit in Network Status register is cleared */
-	at91_phy_wait();
+	at91_phy_wait(lp);
 }
 
 /*
  * Read value stored in a PHY register.
  * Note: MDI interface is assumed to already have been enabled.
  */
-static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value)
+static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value)
 {
-	at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
+	at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
 		| ((phy_addr & 0x1f) << 23) | (address << 18));
 
 	/* Wait until IDLE bit in Network Status register is cleared */
-	at91_phy_wait();
+	at91_phy_wait(lp);
 
-	*value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA;
+	*value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA;
 }
 
 /* ........................... PHY MANAGEMENT .......................... */
@@ -158,13 +156,13 @@
 	}
 
 	/* Link up, or auto-negotiation still in progress */
-	read_phy(lp->phy_address, MII_BMSR, &bmsr);
-	read_phy(lp->phy_address, MII_BMCR, &bmcr);
+	read_phy(lp, lp->phy_address, MII_BMSR, &bmsr);
+	read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
 	if (bmcr & BMCR_ANENABLE) {				/* AutoNegotiation is enabled */
 		if (!(bmsr & BMSR_ANEGCOMPLETE))
 			return;			/* Do nothing - another interrupt generated when negotiation complete */
 
-		read_phy(lp->phy_address, MII_LPA, &lpa);
+		read_phy(lp, lp->phy_address, MII_LPA, &lpa);
 		if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
 		else speed = SPEED_10;
 		if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
@@ -175,7 +173,7 @@
 	}
 
 	/* Update the MAC */
-	mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
+	mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
 	if (speed == SPEED_100) {
 		if (duplex == DUPLEX_FULL)		/* 100 Full Duplex */
 			mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
@@ -186,7 +184,7 @@
 			mac_cfg |= AT91_EMAC_FD;
 		else {}					/* 10 Half Duplex */
 	}
-	at91_emac_write(AT91_EMAC_CFG, mac_cfg);
+	at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg);
 
 	if (!silent)
 		printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
@@ -207,34 +205,34 @@
 	 * level-triggering.  We therefore have to check if the PHY actually has
 	 * an IRQ pending.
 	 */
-	enable_mdi();
+	enable_mdi(lp);
 	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
-		read_phy(lp->phy_address, MII_DSINTR_REG, &phy);	/* ack interrupt in Davicom PHY */
+		read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy);	/* ack interrupt in Davicom PHY */
 		if (!(phy & (1 << 0)))
 			goto done;
 	}
 	else if (lp->phy_type == MII_LXT971A_ID) {
-		read_phy(lp->phy_address, MII_ISINTS_REG, &phy);	/* ack interrupt in Intel PHY */
+		read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy);	/* ack interrupt in Intel PHY */
 		if (!(phy & (1 << 2)))
 			goto done;
 	}
 	else if (lp->phy_type == MII_BCM5221_ID) {
-		read_phy(lp->phy_address, MII_BCMINTR_REG, &phy);	/* ack interrupt in Broadcom PHY */
+		read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy);	/* ack interrupt in Broadcom PHY */
 		if (!(phy & (1 << 0)))
 			goto done;
 	}
 	else if (lp->phy_type == MII_KS8721_ID) {
-		read_phy(lp->phy_address, MII_TPISTATUS, &phy);		/* ack interrupt in Micrel PHY */
+		read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy);		/* ack interrupt in Micrel PHY */
 		if (!(phy & ((1 << 2) | 1)))
 			goto done;
 	}
-	else if (lp->phy_type == MII_T78Q21x3_ID) {			/* ack interrupt in Teridian PHY */
-		read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy);
+	else if (lp->phy_type == MII_T78Q21x3_ID) {					/* ack interrupt in Teridian PHY */
+		read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy);
 		if (!(phy & ((1 << 2) | 1)))
 			goto done;
 	}
 	else if (lp->phy_type == MII_DP83848_ID) {
-		read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy);	/* ack interrupt in DP83848 PHY */
+		read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy);	/* ack interrupt in DP83848 PHY */
 		if (!(phy & (1 << 7)))
 			goto done;
 	}
@@ -242,7 +240,7 @@
 	update_linkspeed(dev, 0);
 
 done:
-	disable_mdi();
+	disable_mdi(lp);
 
 	return IRQ_HANDLED;
 }
@@ -265,7 +263,7 @@
 		return;
 	}
 
-	irq_number = lp->board_data.phy_irq_pin;
+	irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
 	status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
 	if (status) {
 		printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
@@ -273,41 +271,41 @@
 	}
 
 	spin_lock_irq(&lp->lock);
-	enable_mdi();
+	enable_mdi(lp);
 
 	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {	/* for Davicom PHY */
-		read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
 		dsintr = dsintr & ~0xf00;		/* clear bits 8..11 */
-		write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
 	}
 	else if (lp->phy_type == MII_LXT971A_ID) {	/* for Intel PHY */
-		read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
 		dsintr = dsintr | 0xf2;			/* set bits 1, 4..7 */
-		write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
 	}
 	else if (lp->phy_type == MII_BCM5221_ID) {	/* for Broadcom PHY */
 		dsintr = (1 << 15) | ( 1 << 14);
-		write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
 	}
 	else if (lp->phy_type == MII_KS8721_ID) {	/* for Micrel PHY */
 		dsintr = (1 << 10) | ( 1 << 8);
-		write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
+		write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
 	}
 	else if (lp->phy_type == MII_T78Q21x3_ID) {	/* for Teridian PHY */
-		read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
 		dsintr = dsintr | 0x500;		/* set bits 8, 10 */
-		write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
 	}
 	else if (lp->phy_type == MII_DP83848_ID) {	/* National Semiconductor DP83848 PHY */
-		read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
 		dsintr = dsintr | 0x3c;			/* set bits 2..5 */
-		write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
-		read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
+		write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
+		read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
 		dsintr = dsintr | 0x3;			/* set bits 0,1 */
-		write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
 	}
 
-	disable_mdi();
+	disable_mdi(lp);
 	spin_unlock_irq(&lp->lock);
 }
 
@@ -326,46 +324,46 @@
 	}
 
 	spin_lock_irq(&lp->lock);
-	enable_mdi();
+	enable_mdi(lp);
 
 	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {	/* for Davicom PHY */
-		read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
 		dsintr = dsintr | 0xf00;			/* set bits 8..11 */
-		write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
 	}
 	else if (lp->phy_type == MII_LXT971A_ID) {	/* for Intel PHY */
-		read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
 		dsintr = dsintr & ~0xf2;			/* clear bits 1, 4..7 */
-		write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
 	}
 	else if (lp->phy_type == MII_BCM5221_ID) {	/* for Broadcom PHY */
-		read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr);
 		dsintr = ~(1 << 14);
-		write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
 	}
 	else if (lp->phy_type == MII_KS8721_ID) {	/* for Micrel PHY */
-		read_phy(lp->phy_address, MII_TPISTATUS, &dsintr);
+		read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr);
 		dsintr = ~((1 << 10) | (1 << 8));
-		write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
+		write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
 	}
 	else if (lp->phy_type == MII_T78Q21x3_ID) {	/* for Teridian PHY */
-		read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
 		dsintr = dsintr & ~0x500;			/* clear bits 8, 10 */
-		write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
 	}
 	else if (lp->phy_type == MII_DP83848_ID) {	/* National Semiconductor DP83848 PHY */
-		read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
+		read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
 		dsintr = dsintr & ~0x3;				/* clear bits 0, 1 */
-		write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
-		read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
+		write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
+		read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
 		dsintr = dsintr & ~0x3c;			/* clear bits 2..5 */
-		write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
+		write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
 	}
 
-	disable_mdi();
+	disable_mdi(lp);
 	spin_unlock_irq(&lp->lock);
 
-	irq_number = lp->board_data.phy_irq_pin;
+	irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
 	free_irq(irq_number, dev);			/* Free interrupt handler */
 }
 
@@ -379,17 +377,17 @@
 	unsigned int bmcr;
 
 	spin_lock_irq(&lp->lock);
-	enable_mdi();
+	enable_mdi(lp);
 
 	/* Perform PHY reset */
-	write_phy(lp->phy_address, MII_BMCR, BMCR_RESET);
+	write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET);
 
 	/* Wait until PHY reset is complete */
 	do {
-		read_phy(lp->phy_address, MII_BMCR, &bmcr);
+		read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
 	} while (!(bmcr & BMCR_RESET));
 
-	disable_mdi();
+	disable_mdi(lp);
 	spin_unlock_irq(&lp->lock);
 }
 #endif
@@ -399,13 +397,37 @@
 	struct net_device *dev = (struct net_device *) dev_id;
 	struct at91_private *lp = netdev_priv(dev);
 
-	enable_mdi();
+	enable_mdi(lp);
 	update_linkspeed(dev, 1);
-	disable_mdi();
+	disable_mdi(lp);
 
 	mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
 }
 
+/*
+ * Perform any PHY-specific initialization.
+ */
+static void __init initialize_phy(struct at91_private *lp)
+{
+	unsigned int val;
+
+	spin_lock_irq(&lp->lock);
+	enable_mdi(lp);
+
+	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
+		read_phy(lp, lp->phy_address, MII_DSCR_REG, &val);
+		if ((val & (1 << 10)) == 0)			/* DSCR bit 10 is 0 -- fiber mode */
+			lp->phy_media = PORT_FIBRE;
+	} else if (machine_is_csb337()) {
+		/* mix link activity status into LED2 link state */
+		write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22);
+	} else if (machine_is_ecbat91())
+		write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A);
+
+	disable_mdi(lp);
+	spin_unlock_irq(&lp->lock);
+}
+
 /* ......................... ADDRESS MANAGEMENT ........................ */
 
 /*
@@ -454,17 +476,19 @@
  */
 static void __init get_mac_address(struct net_device *dev)
 {
+	struct at91_private *lp = netdev_priv(dev);
+
 	/* Check Specific-Address 1 */
-	if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L)))
+	if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L)))
 		return;
 	/* Check Specific-Address 2 */
-	if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L)))
+	if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L)))
 		return;
 	/* Check Specific-Address 3 */
-	if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L)))
+	if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L)))
 		return;
 	/* Check Specific-Address 4 */
-	if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L)))
+	if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L)))
 		return;
 
 	printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
@@ -475,11 +499,13 @@
  */
 static void update_mac_address(struct net_device *dev)
 {
-	at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
-	at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
+	struct at91_private *lp = netdev_priv(dev);
 
-	at91_emac_write(AT91_EMAC_SA2L, 0);
-	at91_emac_write(AT91_EMAC_SA2H, 0);
+	at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
+	at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
+
+	at91_emac_write(lp, AT91_EMAC_SA2L, 0);
+	at91_emac_write(lp, AT91_EMAC_SA2H, 0);
 }
 
 /*
@@ -559,6 +585,7 @@
  */
 static void at91ether_sethashtable(struct net_device *dev)
 {
+	struct at91_private *lp = netdev_priv(dev);
 	struct netdev_hw_addr *ha;
 	unsigned long mc_filter[2];
 	unsigned int bitnr;
@@ -570,8 +597,8 @@
 		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
 	}
 
-	at91_emac_write(AT91_EMAC_HSL, mc_filter[0]);
-	at91_emac_write(AT91_EMAC_HSH, mc_filter[1]);
+	at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]);
+	at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]);
 }
 
 /*
@@ -579,9 +606,10 @@
  */
 static void at91ether_set_multicast_list(struct net_device *dev)
 {
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned long cfg;
 
-	cfg = at91_emac_read(AT91_EMAC_CFG);
+	cfg = at91_emac_read(lp, AT91_EMAC_CFG);
 
 	if (dev->flags & IFF_PROMISC)			/* Enable promiscuous mode */
 		cfg |= AT91_EMAC_CAF;
@@ -589,34 +617,37 @@
 		cfg &= ~AT91_EMAC_CAF;
 
 	if (dev->flags & IFF_ALLMULTI) {		/* Enable all multicast mode */
-		at91_emac_write(AT91_EMAC_HSH, -1);
-		at91_emac_write(AT91_EMAC_HSL, -1);
+		at91_emac_write(lp, AT91_EMAC_HSH, -1);
+		at91_emac_write(lp, AT91_EMAC_HSL, -1);
 		cfg |= AT91_EMAC_MTI;
 	} else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
 		at91ether_sethashtable(dev);
 		cfg |= AT91_EMAC_MTI;
 	} else if (dev->flags & (~IFF_ALLMULTI)) {	/* Disable all multicast mode */
-		at91_emac_write(AT91_EMAC_HSH, 0);
-		at91_emac_write(AT91_EMAC_HSL, 0);
+		at91_emac_write(lp, AT91_EMAC_HSH, 0);
+		at91_emac_write(lp, AT91_EMAC_HSL, 0);
 		cfg &= ~AT91_EMAC_MTI;
 	}
 
-	at91_emac_write(AT91_EMAC_CFG, cfg);
+	at91_emac_write(lp, AT91_EMAC_CFG, cfg);
 }
 
 /* ......................... ETHTOOL SUPPORT ........................... */
 
 static int mdio_read(struct net_device *dev, int phy_id, int location)
 {
+	struct at91_private *lp = netdev_priv(dev);
 	unsigned int value;
 
-	read_phy(phy_id, location, &value);
+	read_phy(lp, phy_id, location, &value);
 	return value;
 }
 
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 {
-	write_phy(phy_id, location, value);
+	struct at91_private *lp = netdev_priv(dev);
+
+	write_phy(lp, phy_id, location, value);
 }
 
 static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -625,11 +656,11 @@
 	int ret;
 
 	spin_lock_irq(&lp->lock);
-	enable_mdi();
+	enable_mdi(lp);
 
 	ret = mii_ethtool_gset(&lp->mii, cmd);
 
-	disable_mdi();
+	disable_mdi(lp);
 	spin_unlock_irq(&lp->lock);
 
 	if (lp->phy_media == PORT_FIBRE) {		/* override media type since mii.c doesn't know */
@@ -646,11 +677,11 @@
 	int ret;
 
 	spin_lock_irq(&lp->lock);
-	enable_mdi();
+	enable_mdi(lp);
 
 	ret = mii_ethtool_sset(&lp->mii, cmd);
 
-	disable_mdi();
+	disable_mdi(lp);
 	spin_unlock_irq(&lp->lock);
 
 	return ret;
@@ -662,11 +693,11 @@
 	int ret;
 
 	spin_lock_irq(&lp->lock);
-	enable_mdi();
+	enable_mdi(lp);
 
 	ret = mii_nway_restart(&lp->mii);
 
-	disable_mdi();
+	disable_mdi(lp);
 	spin_unlock_irq(&lp->lock);
 
 	return ret;
@@ -696,9 +727,9 @@
 		return -EINVAL;
 
 	spin_lock_irq(&lp->lock);
-	enable_mdi();
+	enable_mdi(lp);
 	res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
-	disable_mdi();
+	disable_mdi(lp);
 	spin_unlock_irq(&lp->lock);
 
 	return res;
@@ -731,11 +762,11 @@
 	lp->rxBuffIndex = 0;
 
 	/* Program address of descriptor list in Rx Buffer Queue register */
-	at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys);
+	at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys);
 
 	/* Enable Receive and Transmit */
-	ctl = at91_emac_read(AT91_EMAC_CTL);
-	at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
+	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
+	at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
 }
 
 /*
@@ -752,8 +783,8 @@
 	clk_enable(lp->ether_clk);		/* Re-enable Peripheral clock */
 
 	/* Clear internal statistics */
-	ctl = at91_emac_read(AT91_EMAC_CTL);
-	at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
+	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
+	at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
 
 	/* Update the MAC address (incase user has changed it) */
 	update_mac_address(dev);
@@ -762,15 +793,15 @@
 	enable_phyirq(dev);
 
 	/* Enable MAC interrupts */
-	at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
+	at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
 				| AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
 				| AT91_EMAC_ROVR | AT91_EMAC_ABT);
 
 	/* Determine current link speed */
 	spin_lock_irq(&lp->lock);
-	enable_mdi();
+	enable_mdi(lp);
 	update_linkspeed(dev, 0);
-	disable_mdi();
+	disable_mdi(lp);
 	spin_unlock_irq(&lp->lock);
 
 	at91ether_start(dev);
@@ -787,14 +818,14 @@
 	unsigned long ctl;
 
 	/* Disable Receiver and Transmitter */
-	ctl = at91_emac_read(AT91_EMAC_CTL);
-	at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
+	ctl = at91_emac_read(lp, AT91_EMAC_CTL);
+	at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
 
 	/* Disable PHY interrupt */
 	disable_phyirq(dev);
 
 	/* Disable MAC interrupts */
-	at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
+	at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
 				| AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
 				| AT91_EMAC_ROVR | AT91_EMAC_ABT);
 
@@ -812,7 +843,7 @@
 {
 	struct at91_private *lp = netdev_priv(dev);
 
-	if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
+	if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
 		netif_stop_queue(dev);
 
 		/* Store packet information (to free when Tx completed) */
@@ -822,9 +853,9 @@
 		dev->stats.tx_bytes += skb->len;
 
 		/* Set address of the data in the Transmit Address register */
-		at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr);
+		at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr);
 		/* Set length of the packet in the Transmit Control register */
-		at91_emac_write(AT91_EMAC_TCR, skb->len);
+		at91_emac_write(lp, AT91_EMAC_TCR, skb->len);
 
 	} else {
 		printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
@@ -841,31 +872,32 @@
  */
 static struct net_device_stats *at91ether_stats(struct net_device *dev)
 {
+	struct at91_private *lp = netdev_priv(dev);
 	int ale, lenerr, seqe, lcol, ecol;
 
 	if (netif_running(dev)) {
-		dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK);		/* Good frames received */
-		ale = at91_emac_read(AT91_EMAC_ALE);
+		dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK);	/* Good frames received */
+		ale = at91_emac_read(lp, AT91_EMAC_ALE);
 		dev->stats.rx_frame_errors += ale;				/* Alignment errors */
-		lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF);
+		lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF);
 		dev->stats.rx_length_errors += lenerr;				/* Excessive Length or Undersize Frame error */
-		seqe = at91_emac_read(AT91_EMAC_SEQE);
+		seqe = at91_emac_read(lp, AT91_EMAC_SEQE);
 		dev->stats.rx_crc_errors += seqe;				/* CRC error */
-		dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC);	/* Receive buffer not available */
+		dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */
 		dev->stats.rx_errors += (ale + lenerr + seqe
-			+ at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB));
+			+ at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB));
 
-		dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA);		/* Frames successfully transmitted */
-		dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE);	/* Transmit FIFO underruns */
-		dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE);	/* Carrier Sense errors */
-		dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
+		dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA);	/* Frames successfully transmitted */
+		dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE);	/* Transmit FIFO underruns */
+		dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE);	/* Carrier Sense errors */
+		dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */
 
-		lcol = at91_emac_read(AT91_EMAC_LCOL);
-		ecol = at91_emac_read(AT91_EMAC_ECOL);
+		lcol = at91_emac_read(lp, AT91_EMAC_LCOL);
+		ecol = at91_emac_read(lp, AT91_EMAC_ECOL);
 		dev->stats.tx_window_errors += lcol;			/* Late collisions */
 		dev->stats.tx_aborted_errors += ecol;			/* 16 collisions */
 
-		dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
+		dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol);
 	}
 	return &dev->stats;
 }
@@ -922,7 +954,7 @@
 
 	/* MAC Interrupt Status register indicates what interrupts are pending.
 	   It is automatically cleared once read. */
-	intstatus = at91_emac_read(AT91_EMAC_ISR);
+	intstatus = at91_emac_read(lp, AT91_EMAC_ISR);
 
 	if (intstatus & AT91_EMAC_RCOM)		/* Receive complete */
 		at91ether_rx(dev);
@@ -942,9 +974,9 @@
 
 	/* Work-around for Errata #11 */
 	if (intstatus & AT91_EMAC_RBNA) {
-		ctl = at91_emac_read(AT91_EMAC_CTL);
-		at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
-		at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
+		ctl = at91_emac_read(lp, AT91_EMAC_CTL);
+		at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
+		at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
 	}
 
 	if (intstatus & AT91_EMAC_ROVR)
@@ -980,165 +1012,20 @@
 };
 
 /*
- * Initialize the ethernet interface
+ * Detect the PHY type, and its address.
  */
-static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
-			struct platform_device *pdev, struct clk *ether_clk)
-{
-	struct macb_platform_data *board_data = pdev->dev.platform_data;
-	struct net_device *dev;
-	struct at91_private *lp;
-	unsigned int val;
-	int res;
-
-	dev = alloc_etherdev(sizeof(struct at91_private));
-	if (!dev)
-		return -ENOMEM;
-
-	dev->base_addr = AT91_VA_BASE_EMAC;
-	dev->irq = AT91RM9200_ID_EMAC;
-
-	/* Install the interrupt handler */
-	if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
-		free_netdev(dev);
-		return -EBUSY;
-	}
-
-	/* Allocate memory for DMA Receive descriptors */
-	lp = netdev_priv(dev);
-	lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
-	if (lp->dlist == NULL) {
-		free_irq(dev->irq, dev);
-		free_netdev(dev);
-		return -ENOMEM;
-	}
-	lp->board_data = *board_data;
-	lp->ether_clk = ether_clk;
-	platform_set_drvdata(pdev, dev);
-
-	spin_lock_init(&lp->lock);
-
-	ether_setup(dev);
-	dev->netdev_ops = &at91ether_netdev_ops;
-	dev->ethtool_ops = &at91ether_ethtool_ops;
-
-	SET_NETDEV_DEV(dev, &pdev->dev);
-
-	get_mac_address(dev);		/* Get ethernet address and store it in dev->dev_addr */
-	update_mac_address(dev);	/* Program ethernet address into MAC */
-
-	at91_emac_write(AT91_EMAC_CTL, 0);
-
-	if (lp->board_data.is_rmii)
-		at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
-	else
-		at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
-
-	/* Perform PHY-specific initialization */
-	spin_lock_irq(&lp->lock);
-	enable_mdi();
-	if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
-		read_phy(phy_address, MII_DSCR_REG, &val);
-		if ((val & (1 << 10)) == 0)			/* DSCR bit 10 is 0 -- fiber mode */
-			lp->phy_media = PORT_FIBRE;
-	} else if (machine_is_csb337()) {
-		/* mix link activity status into LED2 link state */
-		write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22);
-	} else if (machine_is_ecbat91())
-		write_phy(phy_address, MII_LEDCTRL_REG, 0x156A);
-
-	disable_mdi();
-	spin_unlock_irq(&lp->lock);
-
-	lp->mii.dev = dev;		/* Support for ethtool */
-	lp->mii.mdio_read = mdio_read;
-	lp->mii.mdio_write = mdio_write;
-	lp->mii.phy_id = phy_address;
-	lp->mii.phy_id_mask = 0x1f;
-	lp->mii.reg_num_mask = 0x1f;
-
-	lp->phy_type = phy_type;	/* Type of PHY connected */
-	lp->phy_address = phy_address;	/* MDI address of PHY */
-
-	/* Register the network interface */
-	res = register_netdev(dev);
-	if (res) {
-		free_irq(dev->irq, dev);
-		free_netdev(dev);
-		dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
-		return res;
-	}
-
-	/* Determine current link speed */
-	spin_lock_irq(&lp->lock);
-	enable_mdi();
-	update_linkspeed(dev, 0);
-	disable_mdi();
-	spin_unlock_irq(&lp->lock);
-	netif_carrier_off(dev);		/* will be enabled in open() */
-
-	/* If board has no PHY IRQ, use a timer to poll the PHY */
-	if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
-		init_timer(&lp->check_timer);
-		lp->check_timer.data = (unsigned long)dev;
-		lp->check_timer.function = at91ether_check_link;
-	} else if (lp->board_data.phy_irq_pin >= 32)
-		gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
-
-	/* Display ethernet banner */
-	printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
-	       dev->name, (uint) dev->base_addr, dev->irq,
-	       at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
-	       at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
-	       dev->dev_addr);
-	if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
-		printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
-	else if (phy_type == MII_LXT971A_ID)
-		printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
-	else if (phy_type == MII_RTL8201_ID)
-		printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
-	else if (phy_type == MII_BCM5221_ID)
-		printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
-	else if (phy_type == MII_DP83847_ID)
-		printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
-	else if (phy_type == MII_DP83848_ID)
-		printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
-	else if (phy_type == MII_AC101L_ID)
-		printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
-	else if (phy_type == MII_KS8721_ID)
-		printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
-	else if (phy_type == MII_T78Q21x3_ID)
-		printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
-	else if (phy_type == MII_LAN83C185_ID)
-		printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
-
-	return 0;
-}
-
-/*
- * Detect MAC and PHY and perform initialization
- */
-static int __init at91ether_probe(struct platform_device *pdev)
+static int __init at91ether_phy_detect(struct at91_private *lp)
 {
 	unsigned int phyid1, phyid2;
-	int detected = -1;
 	unsigned long phy_id;
 	unsigned short phy_address = 0;
-	struct clk *ether_clk;
 
-	ether_clk = clk_get(&pdev->dev, "ether_clk");
-	if (IS_ERR(ether_clk)) {
-		printk(KERN_ERR "at91_ether: no clock defined\n");
-		return -ENODEV;
-	}
-	clk_enable(ether_clk);					/* Enable Peripheral clock */
-
-	while ((detected != 0) && (phy_address < 32)) {
+	while (phy_address < PHY_MAX_ADDR) {
 		/* Read the PHY ID registers */
-		enable_mdi();
-		read_phy(phy_address, MII_PHYSID1, &phyid1);
-		read_phy(phy_address, MII_PHYSID2, &phyid2);
-		disable_mdi();
+		enable_mdi(lp);
+		read_phy(lp, phy_address, MII_PHYSID1, &phyid1);
+		read_phy(lp, phy_address, MII_PHYSID2, &phyid2);
+		disable_mdi(lp);
 
 		phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
 		switch (phy_id) {
@@ -1153,16 +1040,171 @@
 			case MII_KS8721_ID:		/* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
 			case MII_T78Q21x3_ID:		/* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
 			case MII_LAN83C185_ID:		/* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
-				detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk);
-				break;
+				/* store detected values */
+				lp->phy_type = phy_id;		/* Type of PHY connected */
+				lp->phy_address = phy_address;	/* MDI address of PHY */
+				return 1;
 		}
 
 		phy_address++;
 	}
 
-	clk_disable(ether_clk);					/* Disable Peripheral clock */
+	return 0;		/* not detected */
+}
 
-	return detected;
+
+/*
+ * Detect MAC & PHY and perform ethernet interface initialization
+ */
+static int __init at91ether_probe(struct platform_device *pdev)
+{
+	struct macb_platform_data *board_data = pdev->dev.platform_data;
+	struct resource *regs;
+	struct net_device *dev;
+	struct at91_private *lp;
+	int res;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENOENT;
+
+	dev = alloc_etherdev(sizeof(struct at91_private));
+	if (!dev)
+		return -ENOMEM;
+
+	lp = netdev_priv(dev);
+	lp->board_data = *board_data;
+	spin_lock_init(&lp->lock);
+
+	dev->base_addr = regs->start;		/* physical base address */
+	lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1);
+	if (!lp->emac_base) {
+		res = -ENOMEM;
+		goto err_free_dev;
+	}
+
+	/* Clock */
+	lp->ether_clk = clk_get(&pdev->dev, "ether_clk");
+	if (IS_ERR(lp->ether_clk)) {
+		res = -ENODEV;
+		goto err_ioumap;
+	}
+	clk_enable(lp->ether_clk);
+
+	/* Install the interrupt handler */
+	dev->irq = platform_get_irq(pdev, 0);
+	if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
+		res = -EBUSY;
+		goto err_disable_clock;
+	}
+
+	/* Allocate memory for DMA Receive descriptors */
+	lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
+	if (lp->dlist == NULL) {
+		res = -ENOMEM;
+		goto err_free_irq;
+	}
+
+	ether_setup(dev);
+	dev->netdev_ops = &at91ether_netdev_ops;
+	dev->ethtool_ops = &at91ether_ethtool_ops;
+	platform_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	get_mac_address(dev);		/* Get ethernet address and store it in dev->dev_addr */
+	update_mac_address(dev);	/* Program ethernet address into MAC */
+
+	at91_emac_write(lp, AT91_EMAC_CTL, 0);
+
+	if (board_data->is_rmii)
+		at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
+	else
+		at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
+
+	/* Detect PHY */
+	if (!at91ether_phy_detect(lp)) {
+		printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n");
+		res = -ENODEV;
+		goto err_free_dmamem;
+	}
+
+	initialize_phy(lp);
+
+	lp->mii.dev = dev;		/* Support for ethtool */
+	lp->mii.mdio_read = mdio_read;
+	lp->mii.mdio_write = mdio_write;
+	lp->mii.phy_id = lp->phy_address;
+	lp->mii.phy_id_mask = 0x1f;
+	lp->mii.reg_num_mask = 0x1f;
+
+	/* Register the network interface */
+	res = register_netdev(dev);
+	if (res)
+		goto err_free_dmamem;
+
+	/* Determine current link speed */
+	spin_lock_irq(&lp->lock);
+	enable_mdi(lp);
+	update_linkspeed(dev, 0);
+	disable_mdi(lp);
+	spin_unlock_irq(&lp->lock);
+	netif_carrier_off(dev);		/* will be enabled in open() */
+
+	/* If board has no PHY IRQ, use a timer to poll the PHY */
+	if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
+		gpio_request(board_data->phy_irq_pin, "ethernet_phy");
+	} else {
+		/* If board has no PHY IRQ, use a timer to poll the PHY */
+		init_timer(&lp->check_timer);
+		lp->check_timer.data = (unsigned long)dev;
+		lp->check_timer.function = at91ether_check_link;
+	}
+
+	/* Display ethernet banner */
+	printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
+	       dev->name, (uint) dev->base_addr, dev->irq,
+	       at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
+	       at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
+	       dev->dev_addr);
+	if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
+		printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
+	else if (lp->phy_type == MII_LXT971A_ID)
+		printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
+	else if (lp->phy_type == MII_RTL8201_ID)
+		printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
+	else if (lp->phy_type == MII_BCM5221_ID)
+		printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
+	else if (lp->phy_type == MII_DP83847_ID)
+		printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
+	else if (lp->phy_type == MII_DP83848_ID)
+		printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
+	else if (lp->phy_type == MII_AC101L_ID)
+		printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
+	else if (lp->phy_type == MII_KS8721_ID)
+		printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
+	else if (lp->phy_type == MII_T78Q21x3_ID)
+		printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
+	else if (lp->phy_type == MII_LAN83C185_ID)
+		printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
+
+	clk_disable(lp->ether_clk);					/* Disable Peripheral clock */
+
+	return 0;
+
+
+err_free_dmamem:
+	platform_set_drvdata(pdev, NULL);
+	dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
+err_free_irq:
+	free_irq(dev->irq, dev);
+err_disable_clock:
+	clk_disable(lp->ether_clk);
+	clk_put(lp->ether_clk);
+err_ioumap:
+	iounmap(lp->emac_base);
+err_free_dev:
+	free_netdev(dev);
+	return res;
 }
 
 static int __devexit at91ether_remove(struct platform_device *pdev)
@@ -1170,8 +1212,7 @@
 	struct net_device *dev = platform_get_drvdata(pdev);
 	struct at91_private *lp = netdev_priv(dev);
 
-	if (gpio_is_valid(lp->board_data.phy_irq_pin) &&
-	    lp->board_data.phy_irq_pin >= 32)
+	if (gpio_is_valid(lp->board_data.phy_irq_pin))
 		gpio_free(lp->board_data.phy_irq_pin);
 
 	unregister_netdev(dev);
@@ -1193,7 +1234,7 @@
 
 	if (netif_running(net_dev)) {
 		if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
-			int phy_irq = lp->board_data.phy_irq_pin;
+			int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
 			disable_irq(phy_irq);
 		}
 
@@ -1217,7 +1258,7 @@
 		netif_start_queue(net_dev);
 
 		if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
-			int phy_irq = lp->board_data.phy_irq_pin;
+			int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
 			enable_irq(phy_irq);
 		}
 	}
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
index 3725fbb0..0ef6328 100644
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -88,6 +88,7 @@
 	struct macb_platform_data board_data;	/* board-specific
 						 * configuration (shared with
 						 * macb for common data */
+	void __iomem *emac_base;		/* base register address */
 	struct clk *ether_clk;			/* clock */
 
 	/* PHY */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c4834c2..1466bc4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1213,6 +1213,7 @@
 	.set_settings		= macb_set_settings,
 	.get_drvinfo		= macb_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 63bfdd1..abb6ce7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1150,6 +1150,48 @@
 }
 
 /**
+ * t3_synchronize_rx - wait for current Rx processing on a port to complete
+ * @adap: the adapter
+ * @p: the port
+ *
+ * Ensures that current Rx processing on any of the queues associated with
+ * the given port completes before returning.  We do this by acquiring and
+ * releasing the locks of the response queues associated with the port.
+ */
+static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
+{
+	int i;
+
+	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
+		struct sge_rspq *q = &adap->sge.qs[i].rspq;
+
+		spin_lock_irq(&q->lock);
+		spin_unlock_irq(&q->lock);
+	}
+}
+
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (adapter->params.rev > 0) {
+		t3_set_vlan_accel(adapter, 1 << pi->port_id,
+				  features & NETIF_F_HW_VLAN_RX);
+	} else {
+		/* single control for all ports */
+		unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
+
+		for_each_port(adapter, i)
+			have_vlans |=
+				adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
+
+		t3_set_vlan_accel(adapter, 1, have_vlans);
+	}
+	t3_synchronize_rx(adapter, pi);
+}
+
+/**
  *	cxgb_up - enable the adapter
  *	@adapter: adapter being enabled
  *
@@ -1161,7 +1203,7 @@
  */
 static int cxgb_up(struct adapter *adap)
 {
-	int err;
+	int i, err;
 
 	if (!(adap->flags & FULL_INIT_DONE)) {
 		err = t3_check_fw_version(adap);
@@ -1198,6 +1240,9 @@
 		if (err)
 			goto out;
 
+		for_each_port(adap, i)
+			cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
+
 		setup_rss(adap);
 		if (!(adap->flags & NAPI_INIT))
 			init_napi(adap);
@@ -2508,48 +2553,6 @@
 	return 0;
 }
 
-/**
- * t3_synchronize_rx - wait for current Rx processing on a port to complete
- * @adap: the adapter
- * @p: the port
- *
- * Ensures that current Rx processing on any of the queues associated with
- * the given port completes before returning.  We do this by acquiring and
- * releasing the locks of the response queues associated with the port.
- */
-static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
-{
-	int i;
-
-	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
-		struct sge_rspq *q = &adap->sge.qs[i].rspq;
-
-		spin_lock_irq(&q->lock);
-		spin_unlock_irq(&q->lock);
-	}
-}
-
-static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
-{
-	struct port_info *pi = netdev_priv(dev);
-	struct adapter *adapter = pi->adapter;
-
-	if (adapter->params.rev > 0) {
-		t3_set_vlan_accel(adapter, 1 << pi->port_id,
-				  features & NETIF_F_HW_VLAN_RX);
-	} else {
-		/* single control for all ports */
-		unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
-
-		for_each_port(adapter, i)
-			have_vlans |=
-				adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
-
-		t3_set_vlan_accel(adapter, 1, have_vlans);
-	}
-	t3_synchronize_rx(adapter, pi);
-}
-
 static netdev_features_t cxgb_fix_features(struct net_device *dev,
 	netdev_features_t features)
 {
@@ -3353,9 +3356,6 @@
 	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
 				 &cxgb3_attr_group);
 
-	for_each_port(adapter, i)
-		cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
-
 	print_port_info(adapter, ai);
 	return 0;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 0fe1885..ec2dafe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -51,6 +51,8 @@
 #define FW_VERSION_MINOR 1
 #define FW_VERSION_MICRO 0
 
+#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
+
 enum {
 	MAX_NPORTS = 4,     /* max # of ports */
 	SERNUM_LEN = 24,    /* Serial # length */
@@ -64,6 +66,15 @@
 	MEM_MC
 };
 
+enum {
+	MEMWIN0_APERTURE = 65536,
+	MEMWIN0_BASE     = 0x30000,
+	MEMWIN1_APERTURE = 32768,
+	MEMWIN1_BASE     = 0x28000,
+	MEMWIN2_APERTURE = 2048,
+	MEMWIN2_BASE     = 0x1b800,
+};
+
 enum dev_master {
 	MASTER_CANT,
 	MASTER_MAY,
@@ -403,6 +414,9 @@
 	struct tx_sw_desc *sdesc;   /* address of SW Tx descriptor ring */
 	struct sge_qstat *stat;     /* queue status entry */
 	dma_addr_t    phys_addr;    /* physical address of the ring */
+	spinlock_t db_lock;
+	int db_disabled;
+	unsigned short db_pidx;
 };
 
 struct sge_eth_txq {                /* state for an SGE Ethernet Tx queue */
@@ -475,6 +489,7 @@
 	void __iomem *regs;
 	struct pci_dev *pdev;
 	struct device *pdev_dev;
+	unsigned int mbox;
 	unsigned int fn;
 	unsigned int flags;
 
@@ -504,6 +519,8 @@
 	void **tid_release_head;
 	spinlock_t tid_release_lock;
 	struct work_struct tid_release_task;
+	struct work_struct db_full_task;
+	struct work_struct db_drop_task;
 	bool tid_release_task_busy;
 
 	struct dentry *debugfs_root;
@@ -605,6 +622,7 @@
 void t4_sge_init(struct adapter *adap);
 void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
+extern int dbfifo_int_thresh;
 
 #define for_each_port(adapter, iter) \
 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
@@ -719,4 +737,9 @@
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int eqid);
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+void t4_db_full(struct adapter *adapter);
+void t4_db_dropped(struct adapter *adapter);
+int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
+int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
+			 u32 addr, u32 val);
 #endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index b126b98..e1f96fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -149,15 +149,6 @@
 #endif
 
 enum {
-	MEMWIN0_APERTURE = 65536,
-	MEMWIN0_BASE     = 0x30000,
-	MEMWIN1_APERTURE = 32768,
-	MEMWIN1_BASE     = 0x28000,
-	MEMWIN2_APERTURE = 2048,
-	MEMWIN2_BASE     = 0x1b800,
-};
-
-enum {
 	MAX_TXQ_ENTRIES      = 16384,
 	MAX_CTRL_TXQ_ENTRIES = 1024,
 	MAX_RSPQ_ENTRIES     = 16384,
@@ -371,6 +362,15 @@
 				uhash | mhash, sleep);
 }
 
+int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
+module_param(dbfifo_int_thresh, int, 0644);
+MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
+
+int dbfifo_drain_delay = 1000; /* usecs to sleep while draining the dbfifo */
+module_param(dbfifo_drain_delay, int, 0644);
+MODULE_PARM_DESC(dbfifo_drain_delay,
+		 "usecs to sleep while draining the dbfifo");
+
 /*
  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
  * If @mtu is -1 it is left unchanged.
@@ -389,6 +389,8 @@
 	return ret;
 }
 
+static struct workqueue_struct *workq;
+
 /**
  *	link_start - enable a port
  *	@dev: the port to enable
@@ -2196,7 +2198,7 @@
 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
 	if (!adap->tid_release_task_busy) {
 		adap->tid_release_task_busy = true;
-		schedule_work(&adap->tid_release_task);
+		queue_work(workq, &adap->tid_release_task);
 	}
 	spin_unlock_bh(&adap->tid_release_lock);
 }
@@ -2366,6 +2368,16 @@
 }
 EXPORT_SYMBOL(cxgb4_port_chan);
 
+unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
+{
+	struct adapter *adap = netdev2adap(dev);
+	u32 v;
+
+	v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+	return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
+}
+EXPORT_SYMBOL(cxgb4_dbfifo_count);
+
 /**
  *	cxgb4_port_viid - get the VI id of a port
  *	@dev: the net device for the port
@@ -2413,6 +2425,59 @@
 }
 EXPORT_SYMBOL(cxgb4_iscsi_init);
 
+int cxgb4_flush_eq_cache(struct net_device *dev)
+{
+	struct adapter *adap = netdev2adap(dev);
+	int ret;
+
+	ret = t4_fwaddrspace_write(adap, adap->mbox,
+				   0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
+	return ret;
+}
+EXPORT_SYMBOL(cxgb4_flush_eq_cache);
+
+static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
+{
+	u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
+	__be64 indices;
+	int ret;
+
+	ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
+	if (!ret) {
+		indices = be64_to_cpu(indices);
+		*cidx = (indices >> 25) & 0xffff;
+		*pidx = (indices >> 9) & 0xffff;
+	}
+	return ret;
+}
+
+int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
+			u16 size)
+{
+	struct adapter *adap = netdev2adap(dev);
+	u16 hw_pidx, hw_cidx;
+	int ret;
+
+	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
+	if (ret)
+		goto out;
+
+	if (pidx != hw_pidx) {
+		u16 delta;
+
+		if (pidx >= hw_pidx)
+			delta = pidx - hw_pidx;
+		else
+			delta = size - hw_pidx + pidx;
+		wmb();
+		t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+			     V_QID(qid) | V_PIDX(delta));
+	}
+out:
+	return ret;
+}
+EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
+
 static struct pci_driver cxgb4_driver;
 
 static void check_neigh_update(struct neighbour *neigh)
@@ -2446,6 +2511,144 @@
 	.notifier_call = netevent_cb
 };
 
+static void drain_db_fifo(struct adapter *adap, int usecs)
+{
+	u32 v;
+
+	do {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(usecs_to_jiffies(usecs));
+		v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+		if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
+			break;
+	} while (1);
+}
+
+static void disable_txq_db(struct sge_txq *q)
+{
+	spin_lock_irq(&q->db_lock);
+	q->db_disabled = 1;
+	spin_unlock_irq(&q->db_lock);
+}
+
+static void enable_txq_db(struct sge_txq *q)
+{
+	spin_lock_irq(&q->db_lock);
+	q->db_disabled = 0;
+	spin_unlock_irq(&q->db_lock);
+}
+
+static void disable_dbs(struct adapter *adap)
+{
+	int i;
+
+	for_each_ethrxq(&adap->sge, i)
+		disable_txq_db(&adap->sge.ethtxq[i].q);
+	for_each_ofldrxq(&adap->sge, i)
+		disable_txq_db(&adap->sge.ofldtxq[i].q);
+	for_each_port(adap, i)
+		disable_txq_db(&adap->sge.ctrlq[i].q);
+}
+
+static void enable_dbs(struct adapter *adap)
+{
+	int i;
+
+	for_each_ethrxq(&adap->sge, i)
+		enable_txq_db(&adap->sge.ethtxq[i].q);
+	for_each_ofldrxq(&adap->sge, i)
+		enable_txq_db(&adap->sge.ofldtxq[i].q);
+	for_each_port(adap, i)
+		enable_txq_db(&adap->sge.ctrlq[i].q);
+}
+
+static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
+{
+	u16 hw_pidx, hw_cidx;
+	int ret;
+
+	spin_lock_bh(&q->db_lock);
+	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
+	if (ret)
+		goto out;
+	if (q->db_pidx != hw_pidx) {
+		u16 delta;
+
+		if (q->db_pidx >= hw_pidx)
+			delta = q->db_pidx - hw_pidx;
+		else
+			delta = q->size - hw_pidx + q->db_pidx;
+		wmb();
+		t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+				V_QID(q->cntxt_id) | V_PIDX(delta));
+	}
+out:
+	q->db_disabled = 0;
+	spin_unlock_bh(&q->db_lock);
+	if (ret)
+		CH_WARN(adap, "DB drop recovery failed.\n");
+}
+static void recover_all_queues(struct adapter *adap)
+{
+	int i;
+
+	for_each_ethrxq(&adap->sge, i)
+		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
+	for_each_ofldrxq(&adap->sge, i)
+		sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
+	for_each_port(adap, i)
+		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
+}
+
+static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
+{
+	mutex_lock(&uld_mutex);
+	if (adap->uld_handle[CXGB4_ULD_RDMA])
+		ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
+				cmd);
+	mutex_unlock(&uld_mutex);
+}
+
+static void process_db_full(struct work_struct *work)
+{
+	struct adapter *adap;
+
+	adap = container_of(work, struct adapter, db_full_task);
+
+	notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+	drain_db_fifo(adap, dbfifo_drain_delay);
+	t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
+			F_DBFIFO_HP_INT | F_DBFIFO_LP_INT,
+			F_DBFIFO_HP_INT | F_DBFIFO_LP_INT);
+	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+}
+
+static void process_db_drop(struct work_struct *work)
+{
+	struct adapter *adap;
+
+	adap = container_of(work, struct adapter, db_drop_task);
+
+	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
+	disable_dbs(adap);
+	notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
+	drain_db_fifo(adap, 1);
+	recover_all_queues(adap);
+	enable_dbs(adap);
+}
+
+void t4_db_full(struct adapter *adap)
+{
+	t4_set_reg_field(adap, A_SGE_INT_ENABLE3,
+			F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0);
+	queue_work(workq, &adap->db_full_task);
+}
+
+void t4_db_dropped(struct adapter *adap)
+{
+	queue_work(workq, &adap->db_drop_task);
+}
+
 static void uld_attach(struct adapter *adap, unsigned int uld)
 {
 	void *handle;
@@ -2479,6 +2682,7 @@
 	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
 	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
 	lli.fw_vers = adap->params.fw_vers;
+	lli.dbfifo_int_thresh = dbfifo_int_thresh;
 
 	handle = ulds[uld].add(&lli);
 	if (IS_ERR(handle)) {
@@ -2649,6 +2853,8 @@
 {
 	t4_intr_disable(adapter);
 	cancel_work_sync(&adapter->tid_release_task);
+	cancel_work_sync(&adapter->db_full_task);
+	cancel_work_sync(&adapter->db_drop_task);
 	adapter->tid_release_task_busy = false;
 	adapter->tid_release_head = NULL;
 
@@ -3593,6 +3799,7 @@
 
 	adapter->pdev = pdev;
 	adapter->pdev_dev = &pdev->dev;
+	adapter->mbox = func;
 	adapter->fn = func;
 	adapter->msg_enable = dflt_msg_enable;
 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -3601,6 +3808,8 @@
 	spin_lock_init(&adapter->tid_release_lock);
 
 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+	INIT_WORK(&adapter->db_full_task, process_db_full);
+	INIT_WORK(&adapter->db_drop_task, process_db_drop);
 
 	err = t4_prep_adapter(adapter);
 	if (err)
@@ -3788,6 +3997,10 @@
 {
 	int ret;
 
+	workq = create_singlethread_workqueue("cxgb4");
+	if (!workq)
+		return -ENOMEM;
+
 	/* Debugfs support is optional, just warn if this fails */
 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
 	if (!cxgb4_debugfs_root)
@@ -3803,6 +4016,8 @@
 {
 	pci_unregister_driver(&cxgb4_driver);
 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
+	flush_workqueue(workq);
+	destroy_workqueue(workq);
 }
 
 module_init(cxgb4_init_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b1d39b8..d79980c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -163,6 +163,12 @@
 	CXGB4_STATE_DETACH
 };
 
+enum cxgb4_control {
+	CXGB4_CONTROL_DB_FULL,
+	CXGB4_CONTROL_DB_EMPTY,
+	CXGB4_CONTROL_DB_DROP,
+};
+
 struct pci_dev;
 struct l2t_data;
 struct net_device;
@@ -212,6 +218,7 @@
 	unsigned short ucq_density;          /* # of user CQs/page */
 	void __iomem *gts_reg;               /* address of GTS register */
 	void __iomem *db_reg;                /* address of kernel doorbell */
+	int dbfifo_int_thresh;		     /* doorbell fifo int threshold */
 };
 
 struct cxgb4_uld_info {
@@ -220,11 +227,13 @@
 	int (*rx_handler)(void *handle, const __be64 *rsp,
 			  const struct pkt_gl *gl);
 	int (*state_change)(void *handle, enum cxgb4_state new_state);
+	int (*control)(void *handle, enum cxgb4_control control, ...);
 };
 
 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
 int cxgb4_unregister_uld(enum cxgb4_uld type);
 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
 unsigned int cxgb4_port_chan(const struct net_device *dev);
 unsigned int cxgb4_port_viid(const struct net_device *dev);
 unsigned int cxgb4_port_idx(const struct net_device *dev);
@@ -236,4 +245,6 @@
 		      const unsigned int *pgsz_order);
 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
 				   unsigned int skb_len, unsigned int pull_len);
+int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
+int cxgb4_flush_eq_cache(struct net_device *dev);
 #endif  /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 2dae795..e111d97 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -767,8 +767,13 @@
 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
 {
 	wmb();            /* write descriptors before telling HW */
-	t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
-		     QID(q->cntxt_id) | PIDX(n));
+	spin_lock(&q->db_lock);
+	if (!q->db_disabled) {
+		t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+			     V_QID(q->cntxt_id) | V_PIDX(n));
+	}
+	q->db_pidx = q->pidx;
+	spin_unlock(&q->db_lock);
 }
 
 /**
@@ -2081,6 +2086,7 @@
 	q->stops = q->restarts = 0;
 	q->stat = (void *)&q->desc[q->size];
 	q->cntxt_id = id;
+	spin_lock_init(&q->db_lock);
 	adap->sge.egr_map[id - adap->sge.egr_start] = q;
 }
 
@@ -2415,6 +2421,18 @@
 			 RXPKTCPLMODE |
 			 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
 
+	/*
+	 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
+	 * and generate an interrupt when this occurs so we can recover.
+	 */
+	t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+			V_HP_INT_THRESH(M_HP_INT_THRESH) |
+			V_LP_INT_THRESH(M_LP_INT_THRESH),
+			V_HP_INT_THRESH(dbfifo_int_thresh) |
+			V_LP_INT_THRESH(dbfifo_int_thresh));
+	t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
+			F_ENABLE_DROP);
+
 	for (i = v = 0; i < 32; i += 4)
 		v |= (PAGE_SHIFT - 10) << i;
 	t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index d1ec111..32e1dd5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -868,11 +868,14 @@
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
+typedef void (*int_handler_t)(struct adapter *adap);
+
 struct intr_info {
 	unsigned int mask;       /* bits to check in interrupt status */
 	const char *msg;         /* message to print or NULL */
 	short stat_idx;          /* stat counter to increment or -1 */
 	unsigned short fatal;    /* whether the condition reported is fatal */
+	int_handler_t int_handler; /* platform-specific int handler */
 };
 
 /**
@@ -905,6 +908,8 @@
 		} else if (acts->msg && printk_ratelimit())
 			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
 				 status & acts->mask);
+		if (acts->int_handler)
+			acts->int_handler(adapter);
 		mask |= acts->mask;
 	}
 	status &= mask;
@@ -1013,7 +1018,9 @@
 		{ ERR_INVALID_CIDX_INC,
 		  "SGE GTS CIDX increment too large", -1, 0 },
 		{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
-		{ ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
+		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
+		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
 		{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
 		{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
@@ -1034,10 +1041,10 @@
 	};
 
 	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
-	    ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
+		((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
 	if (v) {
 		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
-			 (unsigned long long)v);
+				(unsigned long long)v);
 		t4_write_reg(adapter, SGE_INT_CAUSE1, v);
 		t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
 	}
@@ -1513,6 +1520,7 @@
 		     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
 		     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
 		     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
+		     F_DBFIFO_HP_INT | F_DBFIFO_LP_INT |
 		     EGRESS_SIZE_ERR);
 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
 	t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
@@ -1986,6 +1994,54 @@
 	(var).retval_len16 = htonl(FW_LEN16(var)); \
 } while (0)
 
+int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
+			  u32 addr, u32 val)
+{
+	struct fw_ldst_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
+			    F_FW_CMD_WRITE |
+			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
+	c.cycles_to_len16 = htonl(FW_LEN16(c));
+	c.u.addrval.addr = htonl(addr);
+	c.u.addrval.val = htonl(val);
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/*
+ *     t4_mem_win_read_len - read memory through PCIE memory window
+ *     @adap: the adapter
+ *     @addr: address of first byte requested aligned on 32b.
+ *     @data: len bytes to hold the data read
+ *     @len: amount of data to read from window.  Must be <=
+ *            MEMWIN0_APERATURE after adjusting for 16B alignment
+ *            requirements of the the memory window.
+ *
+ *     Read len bytes of data from MC starting at @addr.
+ */
+int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
+{
+	int i;
+	int off;
+
+	/*
+	 * Align on a 16B boundary.
+	 */
+	off = addr & 15;
+	if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
+		return -EINVAL;
+
+	t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15);
+	t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET);
+
+	for (i = 0; i < len; i += 4)
+		*data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i));
+
+	return 0;
+}
+
 /**
  *	t4_mdio_rd - read a PHY register through MDIO
  *	@adap: the adapter
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 0adc5bc..111fc32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -190,6 +190,59 @@
 #define SGE_DEBUG_DATA_LOW 0x10d4
 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
 
+#define S_LP_INT_THRESH    12
+#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
+#define S_HP_INT_THRESH    28
+#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define A_SGE_DBFIFO_STATUS 0x10a4
+
+#define S_ENABLE_DROP    13
+#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
+#define F_ENABLE_DROP    V_ENABLE_DROP(1U)
+#define A_SGE_DOORBELL_CONTROL 0x10a8
+
+#define A_SGE_CTXT_CMD 0x11fc
+#define A_SGE_DBQ_CTXT_BADDR 0x1084
+
+#define A_SGE_PF_KDOORBELL 0x0
+
+#define S_QID 15
+#define V_QID(x) ((x) << S_QID)
+
+#define S_PIDX 0
+#define V_PIDX(x) ((x) << S_PIDX)
+
+#define M_LP_COUNT 0x7ffU
+#define S_LP_COUNT 0
+#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
+
+#define M_HP_COUNT 0x7ffU
+#define S_HP_COUNT 16
+#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
+
+#define A_SGE_INT_ENABLE3 0x1040
+
+#define S_DBFIFO_HP_INT 8
+#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
+#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
+
+#define S_DBFIFO_LP_INT 7
+#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
+#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
+
+#define S_DROPPED_DB 0
+#define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
+#define F_DROPPED_DB V_DROPPED_DB(1U)
+
+#define S_ERR_DROPPED_DB 18
+#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
+#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
+
+#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
+
+#define M_HP_INT_THRESH 0xfU
+#define M_LP_INT_THRESH 0xfU
+
 #define PCIE_PF_CLI 0x44
 #define PCIE_INT_CAUSE 0x3004
 #define  UNXSPLCPLERR  0x20000000U
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index edcfd7e..ad53f79 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1620,4 +1620,19 @@
 #define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
 #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
 #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
+
+#define S_FW_CMD_OP 24
+#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
+
+#define S_FW_CMD_REQUEST 23
+#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
+#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
+
+#define S_FW_CMD_WRITE 21
+#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
+#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
+
+#define S_FW_LDST_CMD_ADDRSPACE 0
+#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+
 #endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index b9406cb..845b202 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1,105 +1,27 @@
 /* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0
- *  driver for linux.
+ *           driver for linux.
+ * Written 1996 by Russell Nelson, with reference to skeleton.c
+ * written 1993-1994 by Donald Becker.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * The author may be reached at nelson@crynwr.com, Crynwr
+ * Software, 521 Pleasant Valley Rd., Potsdam, NY 13676
+ *
+ * Other contributors:
+ * Mike Cruse        : mcruse@cti-ltd.com
+ * Russ Nelson
+ * Melody Lee        : ethernet@crystal.cirrus.com
+ * Alan Cox
+ * Andrew Morton
+ * Oskar Schirmer    : oskar@scara.com
+ * Deepak Saxena     : dsaxena@plexity.net
+ * Dmitry Pervushin  : dpervushin@ru.mvista.com
+ * Deepak Saxena     : dsaxena@plexity.net
+ * Domenico Andreoli : cavokz@gmail.com
  */
 
-/*
-	Written 1996 by Russell Nelson, with reference to skeleton.c
-	written 1993-1994 by Donald Becker.
-
-	This software may be used and distributed according to the terms
-	of the GNU General Public License, incorporated herein by reference.
-
-        The author may be reached at nelson@crynwr.com, Crynwr
-        Software, 521 Pleasant Valley Rd., Potsdam, NY 13676
-
-  Changelog:
-
-  Mike Cruse        : mcruse@cti-ltd.com
-                    : Changes for Linux 2.0 compatibility.
-                    : Added dev_id parameter in net_interrupt(),
-                    : request_irq() and free_irq(). Just NULL for now.
-
-  Mike Cruse        : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros
-                    : in net_open() and net_close() so kerneld would know
-                    : that the module is in use and wouldn't eject the
-                    : driver prematurely.
-
-  Mike Cruse        : Rewrote init_module() and cleanup_module using 8390.c
-                    : as an example. Disabled autoprobing in init_module(),
-                    : not a good thing to do to other devices while Linux
-                    : is running from all accounts.
-
-  Russ Nelson       : Jul 13 1998.  Added RxOnly DMA support.
-
-  Melody Lee        : Aug 10 1999.  Changes for Linux 2.2.5 compatibility.
-                    : email: ethernet@crystal.cirrus.com
-
-  Alan Cox          : Removed 1.2 support, added 2.1 extra counters.
-
-  Andrew Morton     : Kernel 2.3.48
-                    : Handle kmalloc() failures
-                    : Other resource allocation fixes
-                    : Add SMP locks
-                    : Integrate Russ Nelson's ALLOW_DMA functionality back in.
-                    : If ALLOW_DMA is true, make DMA runtime selectable
-                    : Folded in changes from Cirrus (Melody Lee
-                    : <klee@crystal.cirrus.com>)
-                    : Don't call netif_wake_queue() in net_send_packet()
-                    : Fixed an out-of-mem bug in dma_rx()
-                    : Updated Documentation/networking/cs89x0.txt
-
-  Andrew Morton     : Kernel 2.3.99-pre1
-                    : Use skb_reserve to longword align IP header (two places)
-                    : Remove a delay loop from dma_rx()
-                    : Replace '100' with HZ
-                    : Clean up a couple of skb API abuses
-                    : Added 'cs89x0_dma=N' kernel boot option
-                    : Correctly initialise lp->lock in non-module compile
-
-  Andrew Morton     : Kernel 2.3.99-pre4-1
-                    : MOD_INC/DEC race fix (see
-                    : http://www.uwsg.indiana.edu/hypermail/linux/kernel/0003.3/1532.html)
-
-  Andrew Morton     : Kernel 2.4.0-test7-pre2
-                    : Enhanced EEPROM support to cover more devices,
-                    :   abstracted IRQ mapping to support CONFIG_ARCH_CLPS7500 arch
-                    :   (Jason Gunthorpe <jgg@ualberta.ca>)
-
-  Andrew Morton     : Kernel 2.4.0-test11-pre4
-                    : Use dev->name in request_*() (Andrey Panin)
-                    : Fix an error-path memleak in init_module()
-                    : Preserve return value from request_irq()
-                    : Fix type of `media' module parm (Keith Owens)
-                    : Use SET_MODULE_OWNER()
-                    : Tidied up strange request_irq() abuse in net_open().
-
-  Andrew Morton     : Kernel 2.4.3-pre1
-                    : Request correct number of pages for DMA (Hugh Dickens)
-                    : Select PP_ChipID _after_ unregister_netdev in cleanup_module()
-                    :  because unregister_netdev() calls get_stats.
-                    : Make `version[]' __initdata
-                    : Uninlined the read/write reg/word functions.
-
-  Oskar Schirmer    : oskar@scara.com
-                    : HiCO.SH4 (superh) support added (irq#1, cs89x0_media=)
-
-  Deepak Saxena     : dsaxena@plexity.net
-                    : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support
-
-  Dmitry Pervushin  : dpervushin@ru.mvista.com
-                    : PNX010X platform support
-
-  Deepak Saxena     : dsaxena@plexity.net
-                    : Intel IXDP2351 platform support
-
-  Dmitry Pervushin  : dpervushin@ru.mvista.com
-                    : PNX010X platform support
-
-  Domenico Andreoli : cavokz@gmail.com
-                    : QQ2440 platform support
-
-*/
-
 
 /*
  * Set this to zero to disable DMA code
@@ -119,14 +41,12 @@
  */
 #define DEBUGGING	1
 
-/*
-  Sources:
+/* Sources:
+ *	Crynwr packet driver epktisa.
+ *	Crystal Semiconductor data sheets.
+ */
 
-	Crynwr packet driver epktisa.
-
-	Crystal Semiconductor data sheets.
-
-*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/printk.h>
@@ -147,8 +67,8 @@
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/gfp.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
 #include <asm/irq.h>
 #include <linux/atomic.h>
 #if ALLOW_DMA
@@ -157,35 +77,55 @@
 
 #include "cs89x0.h"
 
+#define cs89_dbg(val, level, fmt, ...)				\
+do {								\
+	if (val <= net_debug)					\
+		pr_##level(fmt, ##__VA_ARGS__);			\
+} while (0)
+
 static char version[] __initdata =
-"cs89x0.c: v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton\n";
+	"v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton";
 
 #define DRV_NAME "cs89x0"
 
 /* First, a few definitions that the brave might change.
-   A zero-terminated list of I/O addresses to be probed. Some special flags..
-      Addr & 1 = Read back the address port, look for signature and reset
-                 the page window before probing
-      Addr & 3 = Reset the page window and probe
-   The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space,
-   but it is possible that a Cirrus board could be plugged into the ISA
-   slots. */
+ * A zero-terminated list of I/O addresses to be probed. Some special flags..
+ * Addr & 1 = Read back the address port, look for signature and reset
+ * the page window before probing
+ * Addr & 3 = Reset the page window and probe
+ * The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space,
+ * but it is possible that a Cirrus board could be plugged into the ISA
+ * slots.
+ */
 /* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps
-   them to system IRQ numbers. This mapping is card specific and is set to
-   the configuration of the Cirrus Eval board for this chip. */
+ * them to system IRQ numbers. This mapping is card specific and is set to
+ * the configuration of the Cirrus Eval board for this chip.
+ */
 #if defined(CONFIG_MACH_IXDP2351)
 #define CS89x0_NONISA_IRQ
-static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
-static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
+static unsigned int netcard_portlist[] __used __initdata = {
+	IXDP2351_VIRT_CS8900_BASE, 0
+};
+static unsigned int cs8900_irq_map[] = {
+	IRQ_IXDP2351_CS8900, 0, 0, 0
+};
 #elif defined(CONFIG_ARCH_IXDP2X01)
 #define CS89x0_NONISA_IRQ
-static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
-static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
+static unsigned int netcard_portlist[] __used __initdata = {
+	IXDP2X01_CS8900_VIRT_BASE, 0
+};
+static unsigned int cs8900_irq_map[] = {
+	IRQ_IXDP2X01_CS8900, 0, 0, 0
+};
 #else
 #ifndef CONFIG_CS89x0_PLATFORM
-static unsigned int netcard_portlist[] __used __initdata =
-   { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
-static unsigned int cs8900_irq_map[] = {10,11,12,5};
+static unsigned int netcard_portlist[] __used __initdata = {
+	0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
+	0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0
+};
+static unsigned int cs8900_irq_map[] = {
+	10, 11, 12, 5
+};
 #endif
 #endif
 
@@ -222,6 +162,8 @@
 	int send_underrun;	/* keep track of how many underruns in a row we get */
 	int force;		/* force various values; see FORCE* above. */
 	spinlock_t lock;
+	void __iomem *virt_addr;/* CS89x0 virtual address. */
+	unsigned long size;	/* Length of CS89x0 memory region. */
 #if ALLOW_DMA
 	int use_dma;		/* Flag: we're using dma */
 	int dma;		/* DMA channel */
@@ -230,119 +172,42 @@
 	unsigned char *end_dma_buff;	/* points to the end of the buffer */
 	unsigned char *rx_dma_ptr;	/* points to the next packet  */
 #endif
-#ifdef CONFIG_CS89x0_PLATFORM
-	void __iomem *virt_addr;/* Virtual address for accessing the CS89x0. */
-	unsigned long phys_addr;/* Physical address for accessing the CS89x0. */
-	unsigned long size;	/* Length of CS89x0 memory region. */
-#endif
 };
 
-/* Index to functions, as function prototypes. */
-
-static int cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular);
-static int net_open(struct net_device *dev);
-static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t net_interrupt(int irq, void *dev_id);
-static void set_multicast_list(struct net_device *dev);
-static void net_timeout(struct net_device *dev);
-static void net_rx(struct net_device *dev);
-static int net_close(struct net_device *dev);
-static struct net_device_stats *net_get_stats(struct net_device *dev);
-static void reset_chip(struct net_device *dev);
-static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer);
-static int get_eeprom_cksum(int off, int len, int *buffer);
-static int set_mac_address(struct net_device *dev, void *addr);
-static void count_rx_errors(int status, struct net_device *dev);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void net_poll_controller(struct net_device *dev);
-#endif
-#if ALLOW_DMA
-static void get_dma_channel(struct net_device *dev);
-static void release_dma_buff(struct net_local *lp);
-#endif
-
 /* Example routines you must write ;->. */
 #define tx_done(dev) 1
 
 /*
  * Permit 'cs89x0_dma=N' in the kernel boot environment
  */
-#if !defined(MODULE) && (ALLOW_DMA != 0)
+#if !defined(MODULE)
+#if ALLOW_DMA
 static int g_cs89x0_dma;
 
 static int __init dma_fn(char *str)
 {
-	g_cs89x0_dma = simple_strtol(str,NULL,0);
+	g_cs89x0_dma = simple_strtol(str, NULL, 0);
 	return 1;
 }
 
 __setup("cs89x0_dma=", dma_fn);
-#endif	/* !defined(MODULE) && (ALLOW_DMA != 0) */
+#endif	/* ALLOW_DMA */
 
-#ifndef MODULE
 static int g_cs89x0_media__force;
 
 static int __init media_fn(char *str)
 {
-	if (!strcmp(str, "rj45")) g_cs89x0_media__force = FORCE_RJ45;
-	else if (!strcmp(str, "aui")) g_cs89x0_media__force = FORCE_AUI;
-	else if (!strcmp(str, "bnc")) g_cs89x0_media__force = FORCE_BNC;
+	if (!strcmp(str, "rj45"))
+		g_cs89x0_media__force = FORCE_RJ45;
+	else if (!strcmp(str, "aui"))
+		g_cs89x0_media__force = FORCE_AUI;
+	else if (!strcmp(str, "bnc"))
+		g_cs89x0_media__force = FORCE_BNC;
+
 	return 1;
 }
 
 __setup("cs89x0_media=", media_fn);
-
-
-#ifndef CONFIG_CS89x0_PLATFORM
-/* Check for a network adaptor of this type, and return '0' iff one exists.
-   If dev->base_addr == 0, probe all likely locations.
-   If dev->base_addr == 1, always return failure.
-   If dev->base_addr == 2, allocate space for the device and return success
-   (detachable devices only).
-   Return 0 on success.
-   */
-
-struct net_device * __init cs89x0_probe(int unit)
-{
-	struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
-	unsigned *port;
-	int err = 0;
-	int irq;
-	int io;
-
-	if (!dev)
-		return ERR_PTR(-ENODEV);
-
-	sprintf(dev->name, "eth%d", unit);
-	netdev_boot_setup_check(dev);
-	io = dev->base_addr;
-	irq = dev->irq;
-
-	if (net_debug)
-		printk("cs89x0:cs89x0_probe(0x%x)\n", io);
-
-	if (io > 0x1ff)	{	/* Check a single specified location. */
-		err = cs89x0_probe1(dev, io, 0);
-	} else if (io != 0) {	/* Don't probe at all. */
-		err = -ENXIO;
-	} else {
-		for (port = netcard_portlist; *port; port++) {
-			if (cs89x0_probe1(dev, *port, 0) == 0)
-				break;
-			dev->irq = irq;
-		}
-		if (!*port)
-			err = -ENODEV;
-	}
-	if (err)
-		goto out;
-	return dev;
-out:
-	free_netdev(dev);
-	printk(KERN_WARNING "cs89x0: no cs8900 or cs8920 detected.  Be sure to disable PnP with SETUP\n");
-	return ERR_PTR(err);
-}
-#endif
 #endif
 
 #if defined(CONFIG_MACH_IXDP2351)
@@ -369,36 +234,22 @@
 {
 	__raw_writel(value, base_addr + (portno << 1));
 }
-#else
-static u16
-readword(unsigned long base_addr, int portno)
-{
-	return inw(base_addr + portno);
-}
-
-static void
-writeword(unsigned long base_addr, int portno, u16 value)
-{
-	outw(value, base_addr + portno);
-}
 #endif
 
-static void
-readwords(unsigned long base_addr, int portno, void *buf, int length)
+static void readwords(struct net_local *lp, int portno, void *buf, int length)
 {
 	u8 *buf8 = (u8 *)buf;
 
 	do {
 		u16 tmp16;
 
-		tmp16 = readword(base_addr, portno);
+		tmp16 = ioread16(lp->virt_addr + portno);
 		*buf8++ = (u8)tmp16;
 		*buf8++ = (u8)(tmp16 >> 8);
 	} while (--length);
 }
 
-static void
-writewords(unsigned long base_addr, int portno, void *buf, int length)
+static void writewords(struct net_local *lp, int portno, void *buf, int length)
 {
 	u8 *buf8 = (u8 *)buf;
 
@@ -407,32 +258,37 @@
 
 		tmp16 = *buf8++;
 		tmp16 |= (*buf8++) << 8;
-		writeword(base_addr, portno, tmp16);
+		iowrite16(tmp16, lp->virt_addr + portno);
 	} while (--length);
 }
 
 static u16
 readreg(struct net_device *dev, u16 regno)
 {
-	writeword(dev->base_addr, ADD_PORT, regno);
-	return readword(dev->base_addr, DATA_PORT);
+	struct net_local *lp = netdev_priv(dev);
+
+	iowrite16(regno, lp->virt_addr + ADD_PORT);
+	return ioread16(lp->virt_addr + DATA_PORT);
 }
 
 static void
 writereg(struct net_device *dev, u16 regno, u16 value)
 {
-	writeword(dev->base_addr, ADD_PORT, regno);
-	writeword(dev->base_addr, DATA_PORT, value);
+	struct net_local *lp = netdev_priv(dev);
+
+	iowrite16(regno, lp->virt_addr + ADD_PORT);
+	iowrite16(value, lp->virt_addr + DATA_PORT);
 }
 
 static int __init
 wait_eeprom_ready(struct net_device *dev)
 {
 	int timeout = jiffies;
-	/* check to see if the EEPROM is ready, a timeout is used -
-	   just in case EEPROM is ready when SI_BUSY in the
-	   PP_SelfST is clear */
-	while(readreg(dev, PP_SelfST) & SI_BUSY)
+	/* check to see if the EEPROM is ready,
+	 * a timeout is used just in case EEPROM is ready when
+	 * SI_BUSY in the PP_SelfST is clear
+	 */
+	while (readreg(dev, PP_SelfST) & SI_BUSY)
 		if (jiffies - timeout >= 40)
 			return -1;
 	return 0;
@@ -443,17 +299,19 @@
 {
 	int i;
 
-	if (net_debug > 3) printk("EEPROM data from %x for %x:\n",off,len);
+	cs89_dbg(3, info, "EEPROM data from %x for %x:", off, len);
 	for (i = 0; i < len; i++) {
-		if (wait_eeprom_ready(dev) < 0) return -1;
+		if (wait_eeprom_ready(dev) < 0)
+			return -1;
 		/* Now send the EEPROM read command and EEPROM location to read */
 		writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD);
-		if (wait_eeprom_ready(dev) < 0) return -1;
+		if (wait_eeprom_ready(dev) < 0)
+			return -1;
 		buffer[i] = readreg(dev, PP_EEData);
-		if (net_debug > 3) printk("%04x ", buffer[i]);
+		cs89_dbg(3, cont, " %04x", buffer[i]);
 	}
-	if (net_debug > 3) printk("\n");
-        return 0;
+	cs89_dbg(3, cont, "\n");
+	return 0;
 }
 
 static int  __init
@@ -470,341 +328,52 @@
 	return -1;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling receive - used by netconsole and other diagnostic tools
- * to allow network i/o with interrupts disabled.
- */
-static void net_poll_controller(struct net_device *dev)
+static void
+write_irq(struct net_device *dev, int chip_type, int irq)
 {
-	disable_irq(dev->irq);
-	net_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
-}
-#endif
-
-static const struct net_device_ops net_ops = {
-	.ndo_open		= net_open,
-	.ndo_stop		= net_close,
-	.ndo_tx_timeout		= net_timeout,
-	.ndo_start_xmit 	= net_send_packet,
-	.ndo_get_stats		= net_get_stats,
-	.ndo_set_rx_mode	= set_multicast_list,
-	.ndo_set_mac_address 	= set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= net_poll_controller,
-#endif
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-/* This is the real probe routine.  Linux has a history of friendly device
-   probes on the ISA bus.  A good device probes avoids doing writes, and
-   verifies that the correct device exists and functions.
-   Return 0 on success.
- */
-
-static int __init
-cs89x0_probe1(struct net_device *dev, unsigned long ioaddr, int modular)
-{
-	struct net_local *lp = netdev_priv(dev);
-	static unsigned version_printed;
 	int i;
-	int tmp;
-	unsigned rev_type = 0;
-	int eeprom_buff[CHKSUM_LEN];
-	int retval;
 
-	/* Initialize the device structure. */
-	if (!modular) {
-		memset(lp, 0, sizeof(*lp));
-		spin_lock_init(&lp->lock);
-#ifndef MODULE
-#if ALLOW_DMA
-		if (g_cs89x0_dma) {
-			lp->use_dma = 1;
-			lp->dma = g_cs89x0_dma;
-			lp->dmasize = 16;	/* Could make this an option... */
-		}
-#endif
-		lp->force = g_cs89x0_media__force;
-#endif
-
-        }
-
-	/* Grab the region so we can find another board if autoIRQ fails. */
-	/* WTF is going on here? */
-	if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) {
-		printk(KERN_ERR "%s: request_region(0x%lx, 0x%x) failed\n",
-				DRV_NAME, ioaddr, NETCARD_IO_EXTENT);
-		retval = -EBUSY;
-		goto out1;
-	}
-
-	/* if they give us an odd I/O address, then do ONE write to
-           the address port, to get it back to address zero, where we
-           expect to find the EISA signature word. An IO with a base of 0x3
-	   will skip the test for the ADD_PORT. */
-	if (ioaddr & 1) {
-		if (net_debug > 1)
-			printk(KERN_INFO "%s: odd ioaddr 0x%lx\n", dev->name, ioaddr);
-	        if ((ioaddr & 2) != 2)
-	        	if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) {
-				printk(KERN_ERR "%s: bad signature 0x%x\n",
-					dev->name, readword(ioaddr & ~3, ADD_PORT));
-		        	retval = -ENODEV;
-				goto out2;
-			}
-	}
-
-	ioaddr &= ~3;
-	printk(KERN_DEBUG "PP_addr at %lx[%x]: 0x%x\n",
-			ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
-	writeword(ioaddr, ADD_PORT, PP_ChipID);
-
-	tmp = readword(ioaddr, DATA_PORT);
-	if (tmp != CHIP_EISA_ID_SIG) {
-		printk(KERN_DEBUG "%s: incorrect signature at %lx[%x]: 0x%x!="
-			CHIP_EISA_ID_SIG_STR "\n",
-			dev->name, ioaddr, DATA_PORT, tmp);
-  		retval = -ENODEV;
-  		goto out2;
-	}
-
-	/* Fill in the 'dev' fields. */
-	dev->base_addr = ioaddr;
-
-	/* get the chip type */
-	rev_type = readreg(dev, PRODUCT_ID_ADD);
-	lp->chip_type = rev_type &~ REVISON_BITS;
-	lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
-
-	/* Check the chip type and revision in order to set the correct send command
-	CS8920 revision C and CS8900 revision F can use the faster send. */
-	lp->send_cmd = TX_AFTER_381;
-	if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
-		lp->send_cmd = TX_NOW;
-	if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
-		lp->send_cmd = TX_NOW;
-
-	if (net_debug  &&  version_printed++ == 0)
-		printk(version);
-
-	printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#3lx ",
-	       dev->name,
-	       lp->chip_type==CS8900?'0':'2',
-	       lp->chip_type==CS8920M?"M":"",
-	       lp->chip_revision,
-	       dev->base_addr);
-
-	reset_chip(dev);
-
-        /* Here we read the current configuration of the chip. If there
-	   is no Extended EEPROM then the idea is to not disturb the chip
-	   configuration, it should have been correctly setup by automatic
-	   EEPROM read on reset. So, if the chip says it read the EEPROM
-	   the driver will always do *something* instead of complain that
-	   adapter_cnf is 0. */
-
-
-        if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
-	      (EEPROM_OK|EEPROM_PRESENT)) {
-	        /* Load the MAC. */
-		for (i=0; i < ETH_ALEN/2; i++) {
-	                unsigned int Addr;
-			Addr = readreg(dev, PP_IA+i*2);
-		        dev->dev_addr[i*2] = Addr & 0xFF;
-		        dev->dev_addr[i*2+1] = Addr >> 8;
-		}
-
-	   	/* Load the Adapter Configuration.
-		   Note:  Barring any more specific information from some
-		   other source (ie EEPROM+Schematics), we would not know
-		   how to operate a 10Base2 interface on the AUI port.
-		   However, since we  do read the status of HCB1 and use
-		   settings that always result in calls to control_dc_dc(dev,0)
-		   a BNC interface should work if the enable pin
-		   (dc/dc converter) is on HCB1. It will be called AUI
-		   however. */
-
-		lp->adapter_cnf = 0;
-		i = readreg(dev, PP_LineCTL);
-		/* Preserve the setting of the HCB1 pin. */
-		if ((i & (HCB1 | HCB1_ENBL)) ==  (HCB1 | HCB1_ENBL))
-			lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
-		/* Save the sqelch bit */
-		if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
-			lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
-		/* Check if the card is in 10Base-t only mode */
-		if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
-			lp->adapter_cnf |=  A_CNF_10B_T | A_CNF_MEDIA_10B_T;
-		/* Check if the card is in AUI only mode */
-		if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
-			lp->adapter_cnf |=  A_CNF_AUI | A_CNF_MEDIA_AUI;
-		/* Check if the card is in Auto mode. */
-		if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
-			lp->adapter_cnf |=  A_CNF_AUI | A_CNF_10B_T |
-			A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
-
-		if (net_debug > 1)
-			printk(KERN_INFO "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
-					dev->name, i, lp->adapter_cnf);
-
-		/* IRQ. Other chips already probe, see below. */
-		if (lp->chip_type == CS8900)
-			lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
-
-		printk( "[Cirrus EEPROM] ");
-	}
-
-        printk("\n");
-
-	/* First check to see if an EEPROM is attached. */
-
-	if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
-		printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
-	else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
-		printk(KERN_WARNING "\ncs89x0: EEPROM read failed, relying on command line.\n");
-        } else if (get_eeprom_cksum(START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
-		/* Check if the chip was able to read its own configuration starting
-		   at 0 in the EEPROM*/
-		if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
-		    (EEPROM_OK|EEPROM_PRESENT))
-                	printk(KERN_WARNING "cs89x0: Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
-
-        } else {
-		/* This reads an extended EEPROM that is not documented
-		   in the CS8900 datasheet. */
-
-                /* get transmission control word  but keep the autonegotiation bits */
-                if (!lp->auto_neg_cnf) lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET/2];
-                /* Store adapter configuration */
-                if (!lp->adapter_cnf) lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET/2];
-                /* Store ISA configuration */
-                lp->isa_config = eeprom_buff[ISA_CNF_OFFSET/2];
-                dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET/2] << 8;
-
-                /* eeprom_buff has 32-bit ints, so we can't just memcpy it */
-                /* store the initial memory base address */
-                for (i = 0; i < ETH_ALEN/2; i++) {
-                        dev->dev_addr[i*2] = eeprom_buff[i];
-                        dev->dev_addr[i*2+1] = eeprom_buff[i] >> 8;
-                }
-		if (net_debug > 1)
-			printk(KERN_DEBUG "%s: new adapter_cnf: 0x%x\n",
-				dev->name, lp->adapter_cnf);
-        }
-
-        /* allow them to force multiple transceivers.  If they force multiple, autosense */
-        {
-		int count = 0;
-		if (lp->force & FORCE_RJ45)	{lp->adapter_cnf |= A_CNF_10B_T; count++; }
-		if (lp->force & FORCE_AUI) 	{lp->adapter_cnf |= A_CNF_AUI; count++; }
-		if (lp->force & FORCE_BNC)	{lp->adapter_cnf |= A_CNF_10B_2; count++; }
-		if (count > 1)			{lp->adapter_cnf |= A_CNF_MEDIA_AUTO; }
-		else if (lp->force & FORCE_RJ45){lp->adapter_cnf |= A_CNF_MEDIA_10B_T; }
-		else if (lp->force & FORCE_AUI)	{lp->adapter_cnf |= A_CNF_MEDIA_AUI; }
-		else if (lp->force & FORCE_BNC)	{lp->adapter_cnf |= A_CNF_MEDIA_10B_2; }
-        }
-
-	if (net_debug > 1)
-		printk(KERN_DEBUG "%s: after force 0x%x, adapter_cnf=0x%x\n",
-			dev->name, lp->force, lp->adapter_cnf);
-
-        /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
-
-        /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
-
-        /* FIXME: we don't set the Ethernet address on the command line.  Use
-           ifconfig IFACE hw ether AABBCCDDEEFF */
-
-	printk(KERN_INFO "cs89x0 media %s%s%s",
-	       (lp->adapter_cnf & A_CNF_10B_T)?"RJ-45,":"",
-	       (lp->adapter_cnf & A_CNF_AUI)?"AUI,":"",
-	       (lp->adapter_cnf & A_CNF_10B_2)?"BNC,":"");
-
-	lp->irq_map = 0xffff;
-
-	/* If this is a CS8900 then no pnp soft */
-	if (lp->chip_type != CS8900 &&
-	    /* Check if the ISA IRQ has been set  */
-		(i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
-		 (i != 0 && i < CS8920_NO_INTS))) {
-		if (!dev->irq)
-			dev->irq = i;
-	} else {
-		i = lp->isa_config & INT_NO_MASK;
+	if (chip_type == CS8900) {
 #ifndef CONFIG_CS89x0_PLATFORM
-		if (lp->chip_type == CS8900) {
-#ifdef CS89x0_NONISA_IRQ
-		        i = cs8900_irq_map[0];
+		/* Search the mapping table for the corresponding IRQ pin. */
+		for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
+			if (cs8900_irq_map[i] == irq)
+				break;
+		/* Not found */
+		if (i == ARRAY_SIZE(cs8900_irq_map))
+			i = 3;
 #else
-			/* Translate the IRQ using the IRQ mapping table. */
-			if (i >= ARRAY_SIZE(cs8900_irq_map))
-				printk("\ncs89x0: invalid ISA interrupt number %d\n", i);
-			else
-				i = cs8900_irq_map[i];
-
-			lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */
-		} else {
-			int irq_map_buff[IRQ_MAP_LEN/2];
-
-			if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
-					    IRQ_MAP_LEN/2,
-					    irq_map_buff) >= 0) {
-				if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
-					lp->irq_map = (irq_map_buff[0]>>8) | (irq_map_buff[1] << 8);
-			}
+		/* INTRQ0 pin is used for interrupt generation. */
+		i = 0;
 #endif
-		}
-#endif
-		if (!dev->irq)
-			dev->irq = i;
+		writereg(dev, PP_CS8900_ISAINT, i);
+	} else {
+		writereg(dev, PP_CS8920_ISAINT, irq);
 	}
-
-	printk(" IRQ %d", dev->irq);
-
-#if ALLOW_DMA
-	if (lp->use_dma) {
-		get_dma_channel(dev);
-		printk(", DMA %d", dev->dma);
-	}
-	else
-#endif
-	{
-		printk(", programmed I/O");
-	}
-
-	/* print the ethernet address. */
-	printk(", MAC %pM", dev->dev_addr);
-
-	dev->netdev_ops	= &net_ops;
-	dev->watchdog_timeo = HZ;
-
-	printk("\n");
-	if (net_debug)
-		printk("cs89x0_probe1() successful\n");
-
-	retval = register_netdev(dev);
-	if (retval)
-		goto out3;
-	return 0;
-out3:
-	writeword(dev->base_addr, ADD_PORT, PP_ChipID);
-out2:
-	release_region(ioaddr & ~3, NETCARD_IO_EXTENT);
-out1:
-	return retval;
 }
 
+static void
+count_rx_errors(int status, struct net_device *dev)
+{
+	dev->stats.rx_errors++;
+	if (status & RX_RUNT)
+		dev->stats.rx_length_errors++;
+	if (status & RX_EXTRA_DATA)
+		dev->stats.rx_length_errors++;
+	if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA | RX_RUNT)))
+		/* per str 172 */
+		dev->stats.rx_crc_errors++;
+	if (status & RX_DRIBBLE)
+		dev->stats.rx_frame_errors++;
+}
 
 /*********************************
  * This page contains DMA routines
-**********************************/
+ *********************************/
 
 #if ALLOW_DMA
 
-#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
+#define dma_page_eq(ptr1, ptr2) ((long)(ptr1) >> 17 == (long)(ptr2) >> 17)
 
 static void
 get_dma_channel(struct net_device *dev)
@@ -833,11 +402,10 @@
 	struct net_local *lp = netdev_priv(dev);
 	if ((lp->isa_config & ANY_ISA_DMA) == 0)
 		return;
-	if (chip_type == CS8900) {
-		writereg(dev, PP_CS8900_ISADMA, dma-5);
-	} else {
+	if (chip_type == CS8900)
+		writereg(dev, PP_CS8900_ISADMA, dma - 5);
+	else
 		writereg(dev, PP_CS8920_ISADMA, dma);
-	}
 }
 
 static void
@@ -847,18 +415,15 @@
 
 	if (lp->use_dma) {
 		if ((lp->isa_config & ANY_ISA_DMA) == 0) {
-			if (net_debug > 3)
-				printk("set_dma_cfg(): no DMA\n");
+			cs89_dbg(3, err, "set_dma_cfg(): no DMA\n");
 			return;
 		}
 		if (lp->isa_config & ISA_RxDMA) {
 			lp->curr_rx_cfg |= RX_DMA_ONLY;
-			if (net_debug > 3)
-				printk("set_dma_cfg(): RX_DMA_ONLY\n");
+			cs89_dbg(3, info, "set_dma_cfg(): RX_DMA_ONLY\n");
 		} else {
 			lp->curr_rx_cfg |= AUTO_RX_DMA;	/* not that we support it... */
-			if (net_debug > 3)
-				printk("set_dma_cfg(): AUTO_RX_DMA\n");
+			cs89_dbg(3, info, "set_dma_cfg(): AUTO_RX_DMA\n");
 		}
 	}
 }
@@ -868,7 +433,7 @@
 {
 	struct net_local *lp = netdev_priv(dev);
 	if (lp->use_dma)
-		return (lp->isa_config & ANY_ISA_DMA)? RX_DMA_ENBL : 0;
+		return (lp->isa_config & ANY_ISA_DMA) ? RX_DMA_ENBL : 0;
 	else
 		return 0;
 }
@@ -898,13 +463,13 @@
 	int status, length;
 	unsigned char *bp = lp->rx_dma_ptr;
 
-	status = bp[0] + (bp[1]<<8);
-	length = bp[2] + (bp[3]<<8);
+	status = bp[0] + (bp[1] << 8);
+	length = bp[2] + (bp[3] << 8);
 	bp += 4;
-	if (net_debug > 5) {
-		printk(	"%s: receiving DMA packet at %lx, status %x, length %x\n",
-			dev->name, (unsigned long)bp, status, length);
-	}
+
+	cs89_dbg(5, debug, "%s: receiving DMA packet at %lx, status %x, length %x\n",
+		 dev->name, (unsigned long)bp, status, length);
+
 	if ((status & RX_OK) == 0) {
 		count_rx_errors(status, dev);
 		goto skip_this_frame;
@@ -913,14 +478,16 @@
 	/* Malloc up new buffer. */
 	skb = netdev_alloc_skb(dev, length + 2);
 	if (skb == NULL) {
-		if (net_debug)	/* I don't think we want to do this to a stressed system */
-			printk("%s: Memory squeeze, dropping packet.\n", dev->name);
+		/* I don't think we want to do this to a stressed system */
+		cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n",
+			 dev->name);
 		dev->stats.rx_dropped++;
 
 		/* AKPM: advance bp to the next frame */
 skip_this_frame:
 		bp += (length + 3) & ~3;
-		if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024;
+		if (bp >= lp->end_dma_buff)
+			bp -= lp->dmasize * 1024;
 		lp->rx_dma_ptr = bp;
 		return;
 	}
@@ -928,63 +495,38 @@
 
 	if (bp + length > lp->end_dma_buff) {
 		int semi_cnt = lp->end_dma_buff - bp;
-		memcpy(skb_put(skb,semi_cnt), bp, semi_cnt);
-		memcpy(skb_put(skb,length - semi_cnt), lp->dma_buff,
+		memcpy(skb_put(skb, semi_cnt), bp, semi_cnt);
+		memcpy(skb_put(skb, length - semi_cnt), lp->dma_buff,
 		       length - semi_cnt);
 	} else {
-		memcpy(skb_put(skb,length), bp, length);
+		memcpy(skb_put(skb, length), bp, length);
 	}
 	bp += (length + 3) & ~3;
-	if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024;
+	if (bp >= lp->end_dma_buff)
+		bp -= lp->dmasize*1024;
 	lp->rx_dma_ptr = bp;
 
-	if (net_debug > 3) {
-		printk(	"%s: received %d byte DMA packet of type %x\n",
-			dev->name, length,
-			(skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
-	}
-        skb->protocol=eth_type_trans(skb,dev);
+	cs89_dbg(3, info, "%s: received %d byte DMA packet of type %x\n",
+		 dev->name, length,
+		 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
+		  skb->data[ETH_ALEN + ETH_ALEN + 1]));
+
+	skb->protocol = eth_type_trans(skb, dev);
 	netif_rx(skb);
 	dev->stats.rx_packets++;
 	dev->stats.rx_bytes += length;
 }
 
-#endif	/* ALLOW_DMA */
-
-static void __init reset_chip(struct net_device *dev)
+static void release_dma_buff(struct net_local *lp)
 {
-#if !defined(CONFIG_MACH_MX31ADS)
-#if !defined(CS89x0_NONISA_IRQ)
-	struct net_local *lp = netdev_priv(dev);
-	int ioaddr = dev->base_addr;
-#endif /* CS89x0_NONISA_IRQ */
-	int reset_start_time;
-
-	writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
-
-	/* wait 30 ms */
-	msleep(30);
-
-#if !defined(CS89x0_NONISA_IRQ)
-	if (lp->chip_type != CS8900) {
-		/* Hardware problem requires PNP registers to be reconfigured after a reset */
-		writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
-		outb(dev->irq, ioaddr + DATA_PORT);
-		outb(0,      ioaddr + DATA_PORT + 1);
-
-		writeword(ioaddr, ADD_PORT, PP_CS8920_ISAMemB);
-		outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
-		outb((dev->mem_start >> 8) & 0xff,   ioaddr + DATA_PORT + 1);
+	if (lp->dma_buff) {
+		free_pages((unsigned long)(lp->dma_buff),
+			   get_order(lp->dmasize * 1024));
+		lp->dma_buff = NULL;
 	}
-#endif /* CS89x0_NONISA_IRQ */
-
-	/* Wait until the chip is reset */
-	reset_start_time = jiffies;
-	while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
-		;
-#endif /* !CONFIG_MACH_MX31ADS */
 }
 
+#endif	/* ALLOW_DMA */
 
 static void
 control_dc_dc(struct net_device *dev, int on_not_off)
@@ -993,8 +535,9 @@
 	unsigned int selfcontrol;
 	int timenow = jiffies;
 	/* control the DC to DC convertor in the SelfControl register.
-	   Note: This is hooked up to a general purpose pin, might not
-	   always be a DC to DC convertor. */
+	 * Note: This is hooked up to a general purpose pin, might not
+	 * always be a DC to DC convertor.
+	 */
 
 	selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */
 	if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off)
@@ -1008,6 +551,49 @@
 		;
 }
 
+/* send a test packet - return true if carrier bits are ok */
+static int
+send_test_pkt(struct net_device *dev)
+{
+	struct net_local *lp = netdev_priv(dev);
+	char test_packet[] = {
+		0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0,
+		0, 46,		/* A 46 in network order */
+		0, 0,		/* DSAP=0 & SSAP=0 fields */
+		0xf3, 0		/* Control (Test Req + P bit set) */
+	};
+	long timenow = jiffies;
+
+	writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
+
+	memcpy(test_packet,            dev->dev_addr, ETH_ALEN);
+	memcpy(test_packet + ETH_ALEN, dev->dev_addr, ETH_ALEN);
+
+	iowrite16(TX_AFTER_ALL, lp->virt_addr + TX_CMD_PORT);
+	iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT);
+
+	/* Test to see if the chip has allocated memory for the packet */
+	while (jiffies - timenow < 5)
+		if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
+			break;
+	if (jiffies - timenow >= 5)
+		return 0;	/* this shouldn't happen */
+
+	/* Write the contents of the packet */
+	writewords(lp, TX_FRAME_PORT, test_packet, (ETH_ZLEN + 1) >> 1);
+
+	cs89_dbg(1, debug, "Sending test packet ");
+	/* wait a couple of jiffies for packet to be received */
+	for (timenow = jiffies; jiffies - timenow < 3;)
+		;
+	if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
+		cs89_dbg(1, cont, "succeeded\n");
+		return 1;
+	}
+	cs89_dbg(1, cont, "failed\n");
+	return 0;
+}
+
 #define DETECTED_NONE  0
 #define DETECTED_RJ45H 1
 #define DETECTED_RJ45F 2
@@ -1021,40 +607,46 @@
 	int timenow = jiffies;
 	int fdx;
 
-	if (net_debug > 1) printk("%s: Attempting TP\n", dev->name);
+	cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name);
 
-        /* If connected to another full duplex capable 10-Base-T card the link pulses
-           seem to be lost when the auto detect bit in the LineCTL is set.
-           To overcome this the auto detect bit will be cleared whilst testing the
-           10-Base-T interface.  This would not be necessary for the sparrow chip but
-           is simpler to do it anyway. */
-	writereg(dev, PP_LineCTL, lp->linectl &~ AUI_ONLY);
+	/* If connected to another full duplex capable 10-Base-T card
+	 * the link pulses seem to be lost when the auto detect bit in
+	 * the LineCTL is set.  To overcome this the auto detect bit will
+	 * be cleared whilst testing the 10-Base-T interface.  This would
+	 * not be necessary for the sparrow chip but is simpler to do it
+	 * anyway.
+	 */
+	writereg(dev, PP_LineCTL, lp->linectl & ~AUI_ONLY);
 	control_dc_dc(dev, 0);
 
-        /* Delay for the hardware to work out if the TP cable is present - 150ms */
-	for (timenow = jiffies; jiffies - timenow < 15; )
-                ;
+	/* Delay for the hardware to work out if the TP cable is present
+	 * - 150ms
+	 */
+	for (timenow = jiffies; jiffies - timenow < 15;)
+		;
 	if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
 		return DETECTED_NONE;
 
 	if (lp->chip_type == CS8900) {
-                switch (lp->force & 0xf0) {
+		switch (lp->force & 0xf0) {
 #if 0
-                case FORCE_AUTO:
-			printk("%s: cs8900 doesn't autonegotiate\n",dev->name);
-                        return DETECTED_NONE;
+		case FORCE_AUTO:
+			pr_info("%s: cs8900 doesn't autonegotiate\n",
+				dev->name);
+			return DETECTED_NONE;
 #endif
-		/* CS8900 doesn't support AUTO, change to HALF*/
-                case FORCE_AUTO:
+			/* CS8900 doesn't support AUTO, change to HALF*/
+		case FORCE_AUTO:
 			lp->force &= ~FORCE_AUTO;
-                        lp->force |= FORCE_HALF;
+			lp->force |= FORCE_HALF;
 			break;
 		case FORCE_HALF:
 			break;
-                case FORCE_FULL:
-			writereg(dev, PP_TestCTL, readreg(dev, PP_TestCTL) | FDX_8900);
+		case FORCE_FULL:
+			writereg(dev, PP_TestCTL,
+				 readreg(dev, PP_TestCTL) | FDX_8900);
 			break;
-                }
+		}
 		fdx = readreg(dev, PP_TestCTL) & FDX_8900;
 	} else {
 		switch (lp->force & 0xf0) {
@@ -1067,15 +659,15 @@
 		case FORCE_FULL:
 			lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX;
 			break;
-                }
+		}
 
 		writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK);
 
 		if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
-			printk(KERN_INFO "%s: negotiating duplex...\n",dev->name);
+			pr_info("%s: negotiating duplex...\n", dev->name);
 			while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
 				if (jiffies - timenow > 4000) {
-					printk(KERN_ERR "**** Full / half duplex auto-negotiation timed out ****\n");
+					pr_err("**** Full / half duplex auto-negotiation timed out ****\n");
 					break;
 				}
 			}
@@ -1088,72 +680,15 @@
 		return DETECTED_RJ45H;
 }
 
-/* send a test packet - return true if carrier bits are ok */
-static int
-send_test_pkt(struct net_device *dev)
-{
-	char test_packet[] = { 0,0,0,0,0,0, 0,0,0,0,0,0,
-				 0, 46, /* A 46 in network order */
-				 0, 0, /* DSAP=0 & SSAP=0 fields */
-				 0xf3, 0 /* Control (Test Req + P bit set) */ };
-	long timenow = jiffies;
-
-	writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
-
-	memcpy(test_packet,          dev->dev_addr, ETH_ALEN);
-	memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN);
-
-        writeword(dev->base_addr, TX_CMD_PORT, TX_AFTER_ALL);
-        writeword(dev->base_addr, TX_LEN_PORT, ETH_ZLEN);
-
-	/* Test to see if the chip has allocated memory for the packet */
-	while (jiffies - timenow < 5)
-		if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
-			break;
-	if (jiffies - timenow >= 5)
-		return 0;	/* this shouldn't happen */
-
-	/* Write the contents of the packet */
-	writewords(dev->base_addr, TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1);
-
-	if (net_debug > 1) printk("Sending test packet ");
-	/* wait a couple of jiffies for packet to be received */
-	for (timenow = jiffies; jiffies - timenow < 3; )
-                ;
-        if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
-                if (net_debug > 1) printk("succeeded\n");
-                return 1;
-        }
-	if (net_debug > 1) printk("failed\n");
-	return 0;
-}
-
-
-static int
-detect_aui(struct net_device *dev)
-{
-	struct net_local *lp = netdev_priv(dev);
-
-	if (net_debug > 1) printk("%s: Attempting AUI\n", dev->name);
-	control_dc_dc(dev, 0);
-
-	writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY);
-
-	if (send_test_pkt(dev))
-		return DETECTED_AUI;
-	else
-		return DETECTED_NONE;
-}
-
 static int
 detect_bnc(struct net_device *dev)
 {
 	struct net_local *lp = netdev_priv(dev);
 
-	if (net_debug > 1) printk("%s: Attempting BNC\n", dev->name);
+	cs89_dbg(1, debug, "%s: Attempting BNC\n", dev->name);
 	control_dc_dc(dev, 1);
 
-	writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY);
+	writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
 
 	if (send_test_pkt(dev))
 		return DETECTED_BNC;
@@ -1161,361 +696,89 @@
 		return DETECTED_NONE;
 }
 
-
-static void
-write_irq(struct net_device *dev, int chip_type, int irq)
-{
-	int i;
-
-	if (chip_type == CS8900) {
-#ifndef CONFIG_CS89x0_PLATFORM
-		/* Search the mapping table for the corresponding IRQ pin. */
-		for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
-			if (cs8900_irq_map[i] == irq)
-				break;
-		/* Not found */
-		if (i == ARRAY_SIZE(cs8900_irq_map))
-			i = 3;
-#else
-		/* INTRQ0 pin is used for interrupt generation. */
-		i = 0;
-#endif
-		writereg(dev, PP_CS8900_ISAINT, i);
-	} else {
-		writereg(dev, PP_CS8920_ISAINT, irq);
-	}
-}
-
-/* Open/initialize the board.  This is called (in the current kernel)
-   sometime after booting when the 'ifconfig' program is run.
-
-   This routine should set everything up anew at each open, even
-   registers that "should" only need to be set once at boot, so that
-   there is non-reboot way to recover if something goes wrong.
-   */
-
-/* AKPM: do we need to do any locking here? */
-
 static int
-net_open(struct net_device *dev)
+detect_aui(struct net_device *dev)
 {
 	struct net_local *lp = netdev_priv(dev);
-	int result = 0;
-	int i;
-	int ret;
 
-	if (dev->irq < 2) {
-		/* Allow interrupts to be generated by the chip */
-/* Cirrus' release had this: */
-#if 0
-		writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ );
-#endif
-/* And 2.3.47 had this: */
-		writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
+	cs89_dbg(1, debug, "%s: Attempting AUI\n", dev->name);
+	control_dc_dc(dev, 0);
 
-		for (i = 2; i < CS8920_NO_INTS; i++) {
-			if ((1 << i) & lp->irq_map) {
-				if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) {
-					dev->irq = i;
-					write_irq(dev, lp->chip_type, i);
-					/* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
-					break;
-				}
-			}
-		}
+	writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY);
 
-		if (i >= CS8920_NO_INTS) {
-			writereg(dev, PP_BusCTL, 0);	/* disable interrupts. */
-			printk(KERN_ERR "cs89x0: can't get an interrupt\n");
-			ret = -EAGAIN;
-			goto bad_out;
-		}
-	}
+	if (send_test_pkt(dev))
+		return DETECTED_AUI;
 	else
-	{
-#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM)
-		if (((1 << dev->irq) & lp->irq_map) == 0) {
-			printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
-                               dev->name, dev->irq, lp->irq_map);
-			ret = -EAGAIN;
-			goto bad_out;
-		}
-#endif
-/* FIXME: Cirrus' release had this: */
-		writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ );
-/* And 2.3.47 had this: */
-#if 0
-		writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
-#endif
-		write_irq(dev, lp->chip_type, dev->irq);
-		ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
-		if (ret) {
-			printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq);
-			goto bad_out;
-		}
-	}
-
-#if ALLOW_DMA
-	if (lp->use_dma) {
-		if (lp->isa_config & ANY_ISA_DMA) {
-			unsigned long flags;
-			lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
-							get_order(lp->dmasize * 1024));
-
-			if (!lp->dma_buff) {
-				printk(KERN_ERR "%s: cannot get %dK memory for DMA\n", dev->name, lp->dmasize);
-				goto release_irq;
-			}
-			if (net_debug > 1) {
-				printk(	"%s: dma %lx %lx\n",
-					dev->name,
-					(unsigned long)lp->dma_buff,
-					(unsigned long)isa_virt_to_bus(lp->dma_buff));
-			}
-			if ((unsigned long) lp->dma_buff >= MAX_DMA_ADDRESS ||
-			    !dma_page_eq(lp->dma_buff, lp->dma_buff+lp->dmasize*1024-1)) {
-				printk(KERN_ERR "%s: not usable as DMA buffer\n", dev->name);
-				goto release_irq;
-			}
-			memset(lp->dma_buff, 0, lp->dmasize * 1024);	/* Why? */
-			if (request_dma(dev->dma, dev->name)) {
-				printk(KERN_ERR "%s: cannot get dma channel %d\n", dev->name, dev->dma);
-				goto release_irq;
-			}
-			write_dma(dev, lp->chip_type, dev->dma);
-			lp->rx_dma_ptr = lp->dma_buff;
-			lp->end_dma_buff = lp->dma_buff + lp->dmasize*1024;
-			spin_lock_irqsave(&lp->lock, flags);
-			disable_dma(dev->dma);
-			clear_dma_ff(dev->dma);
-			set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */
-			set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
-			set_dma_count(dev->dma, lp->dmasize*1024);
-			enable_dma(dev->dma);
-			spin_unlock_irqrestore(&lp->lock, flags);
-		}
-	}
-#endif	/* ALLOW_DMA */
-
-	/* set the Ethernet address */
-	for (i=0; i < ETH_ALEN/2; i++)
-		writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
-
-	/* while we're testing the interface, leave interrupts disabled */
-	writereg(dev, PP_BusCTL, MEMORY_ON);
-
-	/* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */
-	if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
-                lp->linectl = LOW_RX_SQUELCH;
-	else
-                lp->linectl = 0;
-
-        /* check to make sure that they have the "right" hardware available */
-	switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
-	case A_CNF_MEDIA_10B_T: result = lp->adapter_cnf & A_CNF_10B_T; break;
-	case A_CNF_MEDIA_AUI:   result = lp->adapter_cnf & A_CNF_AUI; break;
-	case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break;
-        default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2);
-        }
-        if (!result) {
-                printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name);
-release_dma:
-#if ALLOW_DMA
-		free_dma(dev->dma);
-release_irq:
-		release_dma_buff(lp);
-#endif
-                writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
-                free_irq(dev->irq, dev);
-		ret = -EAGAIN;
-		goto bad_out;
-	}
-
-        /* set the hardware to the configured choice */
-	switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
-	case A_CNF_MEDIA_10B_T:
-                result = detect_tp(dev);
-                if (result==DETECTED_NONE) {
-                        printk(KERN_WARNING "%s: 10Base-T (RJ-45) has no cable\n", dev->name);
-                        if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
-                                result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */
-                }
-		break;
-	case A_CNF_MEDIA_AUI:
-                result = detect_aui(dev);
-                if (result==DETECTED_NONE) {
-                        printk(KERN_WARNING "%s: 10Base-5 (AUI) has no cable\n", dev->name);
-                        if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
-                                result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
-                }
-		break;
-	case A_CNF_MEDIA_10B_2:
-                result = detect_bnc(dev);
-                if (result==DETECTED_NONE) {
-                        printk(KERN_WARNING "%s: 10Base-2 (BNC) has no cable\n", dev->name);
-                        if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
-                                result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */
-                }
-		break;
-	case A_CNF_MEDIA_AUTO:
-		writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
-		if (lp->adapter_cnf & A_CNF_10B_T)
-			if ((result = detect_tp(dev)) != DETECTED_NONE)
-				break;
-		if (lp->adapter_cnf & A_CNF_AUI)
-			if ((result = detect_aui(dev)) != DETECTED_NONE)
-				break;
-		if (lp->adapter_cnf & A_CNF_10B_2)
-			if ((result = detect_bnc(dev)) != DETECTED_NONE)
-				break;
-		printk(KERN_ERR "%s: no media detected\n", dev->name);
-		goto release_dma;
-	}
-	switch(result) {
-	case DETECTED_NONE:
-		printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name);
-		goto release_dma;
-	case DETECTED_RJ45H:
-		printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
-		break;
-	case DETECTED_RJ45F:
-		printk(KERN_INFO "%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
-		break;
-	case DETECTED_AUI:
-		printk(KERN_INFO "%s: using 10Base-5 (AUI)\n", dev->name);
-		break;
-	case DETECTED_BNC:
-		printk(KERN_INFO "%s: using 10Base-2 (BNC)\n", dev->name);
-		break;
-	}
-
-	/* Turn on both receive and transmit operations */
-	writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
-
-	/* Receive only error free packets addressed to this card */
-	lp->rx_mode = 0;
-	writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
-
-	lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
-
-	if (lp->isa_config & STREAM_TRANSFER)
-		lp->curr_rx_cfg |= RX_STREAM_ENBL;
-#if ALLOW_DMA
-	set_dma_cfg(dev);
-#endif
-	writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
-
-	writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL |
-		TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL);
-
-	writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL |
-#if ALLOW_DMA
-		dma_bufcfg(dev) |
-#endif
-		TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL);
-
-	/* now that we've got our act together, enable everything */
-	writereg(dev, PP_BusCTL, ENABLE_IRQ
-		 | (dev->mem_start?MEMORY_ON : 0) /* turn memory on */
-#if ALLOW_DMA
-		 | dma_busctl(dev)
-#endif
-                 );
-        netif_start_queue(dev);
-	if (net_debug > 1)
-		printk("cs89x0: net_open() succeeded\n");
-	return 0;
-bad_out:
-	return ret;
+		return DETECTED_NONE;
 }
 
-static void net_timeout(struct net_device *dev)
-{
-	/* If we get here, some higher level has decided we are broken.
-	   There should really be a "kick me" function call instead. */
-	if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name,
-		   tx_done(dev) ? "IRQ conflict ?" : "network cable problem");
-	/* Try to restart the adaptor. */
-	netif_wake_queue(dev);
-}
-
-static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
+/* We have a good packet(s), get it/them out of the buffers. */
+static void
+net_rx(struct net_device *dev)
 {
 	struct net_local *lp = netdev_priv(dev);
-	unsigned long flags;
+	struct sk_buff *skb;
+	int status, length;
 
-	if (net_debug > 3) {
-		printk("%s: sent %d byte packet of type %x\n",
-			dev->name, skb->len,
-			(skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
+	status = ioread16(lp->virt_addr + RX_FRAME_PORT);
+	length = ioread16(lp->virt_addr + RX_FRAME_PORT);
+
+	if ((status & RX_OK) == 0) {
+		count_rx_errors(status, dev);
+		return;
 	}
 
-	/* keep the upload from being interrupted, since we
-                  ask the chip to start transmitting before the
-                  whole packet has been completely uploaded. */
-
-	spin_lock_irqsave(&lp->lock, flags);
-	netif_stop_queue(dev);
-
-	/* initiate a transmit sequence */
-	writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd);
-	writeword(dev->base_addr, TX_LEN_PORT, skb->len);
-
-	/* Test to see if the chip has allocated memory for the packet */
-	if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
-		/*
-		 * Gasp!  It hasn't.  But that shouldn't happen since
-		 * we're waiting for TxOk, so return 1 and requeue this packet.
-		 */
-
-		spin_unlock_irqrestore(&lp->lock, flags);
-		if (net_debug) printk("cs89x0: Tx buffer not free!\n");
-		return NETDEV_TX_BUSY;
+	/* Malloc up new buffer. */
+	skb = netdev_alloc_skb(dev, length + 2);
+	if (skb == NULL) {
+#if 0		/* Again, this seems a cruel thing to do */
+		pr_warn("%s: Memory squeeze, dropping packet\n", dev->name);
+#endif
+		dev->stats.rx_dropped++;
+		return;
 	}
-	/* Write the contents of the packet */
-	writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
-	spin_unlock_irqrestore(&lp->lock, flags);
-	dev->stats.tx_bytes += skb->len;
-	dev_kfree_skb (skb);
+	skb_reserve(skb, 2);	/* longword align L3 header */
 
-	/*
-	 * We DO NOT call netif_wake_queue() here.
-	 * We also DO NOT call netif_start_queue().
-	 *
-	 * Either of these would cause another bottom half run through
-	 * net_send_packet() before this packet has fully gone out.  That causes
-	 * us to hit the "Gasp!" above and the send is rescheduled.  it runs like
-	 * a dog.  We just return and wait for the Tx completion interrupt handler
-	 * to restart the netdevice layer
-	 */
+	readwords(lp, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
+	if (length & 1)
+		skb->data[length-1] = ioread16(lp->virt_addr + RX_FRAME_PORT);
 
-	return NETDEV_TX_OK;
+	cs89_dbg(3, debug, "%s: received %d byte packet of type %x\n",
+		 dev->name, length,
+		 (skb->data[ETH_ALEN + ETH_ALEN] << 8) |
+		 skb->data[ETH_ALEN + ETH_ALEN + 1]);
+
+	skb->protocol = eth_type_trans(skb, dev);
+	netif_rx(skb);
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += length;
 }
 
 /* The typical workload of the driver:
-   Handle the network interface interrupts. */
+ * Handle the network interface interrupts.
+ */
 
 static irqreturn_t net_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = dev_id;
 	struct net_local *lp;
-	int ioaddr, status;
- 	int handled = 0;
+	int status;
+	int handled = 0;
 
-	ioaddr = dev->base_addr;
 	lp = netdev_priv(dev);
 
 	/* we MUST read all the events out of the ISQ, otherwise we'll never
-           get interrupted again.  As a consequence, we can't have any limit
-           on the number of times we loop in the interrupt handler.  The
-           hardware guarantees that eventually we'll run out of events.  Of
-           course, if you're on a slow machine, and packets are arriving
-           faster than you can read them off, you're screwed.  Hasta la
-           vista, baby!  */
-	while ((status = readword(dev->base_addr, ISQ_PORT))) {
-		if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
+	 * get interrupted again.  As a consequence, we can't have any limit
+	 * on the number of times we loop in the interrupt handler.  The
+	 * hardware guarantees that eventually we'll run out of events.  Of
+	 * course, if you're on a slow machine, and packets are arriving
+	 * faster than you can read them off, you're screwed.  Hasta la
+	 * vista, baby!
+	 */
+	while ((status = ioread16(lp->virt_addr + ISQ_PORT))) {
+		cs89_dbg(4, debug, "%s: event=%04x\n", dev->name, status);
 		handled = 1;
-		switch(status & ISQ_EVENT_MASK) {
+		switch (status & ISQ_EVENT_MASK) {
 		case ISQ_RECEIVER_EVENT:
 			/* Got a packet(s). */
 			net_rx(dev);
@@ -1523,11 +786,11 @@
 		case ISQ_TRANSMITTER_EVENT:
 			dev->stats.tx_packets++;
 			netif_wake_queue(dev);	/* Inform upper layers. */
-			if ((status & (	TX_OK |
-					TX_LOST_CRS |
-					TX_SQE_ERROR |
-					TX_LATE_COL |
-					TX_16_COL)) != TX_OK) {
+			if ((status & (TX_OK |
+				       TX_LOST_CRS |
+				       TX_SQE_ERROR |
+				       TX_LATE_COL |
+				       TX_16_COL)) != TX_OK) {
 				if ((status & TX_OK) == 0)
 					dev->stats.tx_errors++;
 				if (status & TX_LOST_CRS)
@@ -1543,37 +806,47 @@
 		case ISQ_BUFFER_EVENT:
 			if (status & READY_FOR_TX) {
 				/* we tried to transmit a packet earlier,
-                                   but inexplicably ran out of buffers.
-                                   That shouldn't happen since we only ever
-                                   load one packet.  Shrug.  Do the right
-                                   thing anyway. */
+				 * but inexplicably ran out of buffers.
+				 * That shouldn't happen since we only ever
+				 * load one packet.  Shrug.  Do the right
+				 * thing anyway.
+				 */
 				netif_wake_queue(dev);	/* Inform upper layers. */
 			}
 			if (status & TX_UNDERRUN) {
-				if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
-                                lp->send_underrun++;
-                                if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
-                                else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
+				cs89_dbg(0, err, "%s: transmit underrun\n",
+					 dev->name);
+				lp->send_underrun++;
+				if (lp->send_underrun == 3)
+					lp->send_cmd = TX_AFTER_381;
+				else if (lp->send_underrun == 6)
+					lp->send_cmd = TX_AFTER_ALL;
 				/* transmit cycle is done, although
-				   frame wasn't transmitted - this
-				   avoids having to wait for the upper
-				   layers to timeout on us, in the
-				   event of a tx underrun */
+				 * frame wasn't transmitted - this
+				 * avoids having to wait for the upper
+				 * layers to timeout on us, in the
+				 * event of a tx underrun
+				 */
 				netif_wake_queue(dev);	/* Inform upper layers. */
-                        }
+			}
 #if ALLOW_DMA
 			if (lp->use_dma && (status & RX_DMA)) {
 				int count = readreg(dev, PP_DmaFrameCnt);
-				while(count) {
-					if (net_debug > 5)
-						printk("%s: receiving %d DMA frames\n", dev->name, count);
-					if (net_debug > 2 && count >1)
-						printk("%s: receiving %d DMA frames\n", dev->name, count);
+				while (count) {
+					cs89_dbg(5, debug,
+						 "%s: receiving %d DMA frames\n",
+						 dev->name, count);
+					if (count > 1)
+						cs89_dbg(2, debug,
+							 "%s: receiving %d DMA frames\n",
+							 dev->name, count);
 					dma_rx(dev);
 					if (--count == 0)
 						count = readreg(dev, PP_DmaFrameCnt);
-					if (net_debug > 2 && count > 0)
-						printk("%s: continuing with %d DMA frames\n", dev->name, count);
+					if (count > 0)
+						cs89_dbg(2, debug,
+							 "%s: continuing with %d DMA frames\n",
+							 dev->name, count);
 				}
 			}
 #endif
@@ -1589,73 +862,274 @@
 	return IRQ_RETVAL(handled);
 }
 
-static void
-count_rx_errors(int status, struct net_device *dev)
+/* Open/initialize the board.  This is called (in the current kernel)
+   sometime after booting when the 'ifconfig' program is run.
+
+   This routine should set everything up anew at each open, even
+   registers that "should" only need to be set once at boot, so that
+   there is non-reboot way to recover if something goes wrong.
+*/
+
+/* AKPM: do we need to do any locking here? */
+
+static int
+net_open(struct net_device *dev)
 {
-	dev->stats.rx_errors++;
-	if (status & RX_RUNT)
-		dev->stats.rx_length_errors++;
-	if (status & RX_EXTRA_DATA)
-		dev->stats.rx_length_errors++;
-	if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT)))
-		/* per str 172 */
-		dev->stats.rx_crc_errors++;
-	if (status & RX_DRIBBLE)
-		dev->stats.rx_frame_errors++;
-}
+	struct net_local *lp = netdev_priv(dev);
+	int result = 0;
+	int i;
+	int ret;
 
-/* We have a good packet(s), get it/them out of the buffers. */
-static void
-net_rx(struct net_device *dev)
-{
-	struct sk_buff *skb;
-	int status, length;
-
-	int ioaddr = dev->base_addr;
-	status = readword(ioaddr, RX_FRAME_PORT);
-	length = readword(ioaddr, RX_FRAME_PORT);
-
-	if ((status & RX_OK) == 0) {
-		count_rx_errors(status, dev);
-		return;
-	}
-
-	/* Malloc up new buffer. */
-	skb = netdev_alloc_skb(dev, length + 2);
-	if (skb == NULL) {
-#if 0		/* Again, this seems a cruel thing to do */
-		printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
+	if (dev->irq < 2) {
+		/* Allow interrupts to be generated by the chip */
+/* Cirrus' release had this: */
+#if 0
+		writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ);
 #endif
-		dev->stats.rx_dropped++;
-		return;
+/* And 2.3.47 had this: */
+		writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
+
+		for (i = 2; i < CS8920_NO_INTS; i++) {
+			if ((1 << i) & lp->irq_map) {
+				if (request_irq(i, net_interrupt, 0, dev->name,
+						dev) == 0) {
+					dev->irq = i;
+					write_irq(dev, lp->chip_type, i);
+					/* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
+					break;
+				}
+			}
+		}
+
+		if (i >= CS8920_NO_INTS) {
+			writereg(dev, PP_BusCTL, 0);	/* disable interrupts. */
+			pr_err("can't get an interrupt\n");
+			ret = -EAGAIN;
+			goto bad_out;
+		}
+	} else {
+#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM)
+		if (((1 << dev->irq) & lp->irq_map) == 0) {
+			pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
+			       dev->name, dev->irq, lp->irq_map);
+			ret = -EAGAIN;
+			goto bad_out;
+		}
+#endif
+/* FIXME: Cirrus' release had this: */
+		writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ);
+/* And 2.3.47 had this: */
+#if 0
+		writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
+#endif
+		write_irq(dev, lp->chip_type, dev->irq);
+		ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
+		if (ret) {
+			pr_err("request_irq(%d) failed\n", dev->irq);
+			goto bad_out;
+		}
 	}
-	skb_reserve(skb, 2);	/* longword align L3 header */
-
-	readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
-	if (length & 1)
-		skb->data[length-1] = readword(ioaddr, RX_FRAME_PORT);
-
-	if (net_debug > 3) {
-		printk(	"%s: received %d byte packet of type %x\n",
-			dev->name, length,
-			(skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
-	}
-
-        skb->protocol=eth_type_trans(skb,dev);
-	netif_rx(skb);
-	dev->stats.rx_packets++;
-	dev->stats.rx_bytes += length;
-}
 
 #if ALLOW_DMA
-static void release_dma_buff(struct net_local *lp)
-{
-	if (lp->dma_buff) {
-		free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024));
-		lp->dma_buff = NULL;
+	if (lp->use_dma && (lp->isa_config & ANY_ISA_DMA)) {
+		unsigned long flags;
+		lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
+								get_order(lp->dmasize * 1024));
+		if (!lp->dma_buff) {
+			pr_err("%s: cannot get %dK memory for DMA\n",
+			       dev->name, lp->dmasize);
+			goto release_irq;
+		}
+		cs89_dbg(1, debug, "%s: dma %lx %lx\n",
+			 dev->name,
+			 (unsigned long)lp->dma_buff,
+			 (unsigned long)isa_virt_to_bus(lp->dma_buff));
+		if ((unsigned long)lp->dma_buff >= MAX_DMA_ADDRESS ||
+		    !dma_page_eq(lp->dma_buff,
+				 lp->dma_buff + lp->dmasize * 1024 - 1)) {
+			pr_err("%s: not usable as DMA buffer\n", dev->name);
+			goto release_irq;
+		}
+		memset(lp->dma_buff, 0, lp->dmasize * 1024);	/* Why? */
+		if (request_dma(dev->dma, dev->name)) {
+			pr_err("%s: cannot get dma channel %d\n",
+			       dev->name, dev->dma);
+			goto release_irq;
+		}
+		write_dma(dev, lp->chip_type, dev->dma);
+		lp->rx_dma_ptr = lp->dma_buff;
+		lp->end_dma_buff = lp->dma_buff + lp->dmasize * 1024;
+		spin_lock_irqsave(&lp->lock, flags);
+		disable_dma(dev->dma);
+		clear_dma_ff(dev->dma);
+		set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */
+		set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
+		set_dma_count(dev->dma, lp->dmasize * 1024);
+		enable_dma(dev->dma);
+		spin_unlock_irqrestore(&lp->lock, flags);
 	}
-}
+#endif	/* ALLOW_DMA */
+
+	/* set the Ethernet address */
+	for (i = 0; i < ETH_ALEN / 2; i++)
+		writereg(dev, PP_IA + i * 2,
+			 (dev->dev_addr[i * 2] |
+			  (dev->dev_addr[i * 2 + 1] << 8)));
+
+	/* while we're testing the interface, leave interrupts disabled */
+	writereg(dev, PP_BusCTL, MEMORY_ON);
+
+	/* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */
+	if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) &&
+	    (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
+		lp->linectl = LOW_RX_SQUELCH;
+	else
+		lp->linectl = 0;
+
+	/* check to make sure that they have the "right" hardware available */
+	switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
+	case A_CNF_MEDIA_10B_T:
+		result = lp->adapter_cnf & A_CNF_10B_T;
+		break;
+	case A_CNF_MEDIA_AUI:
+		result = lp->adapter_cnf & A_CNF_AUI;
+		break;
+	case A_CNF_MEDIA_10B_2:
+		result = lp->adapter_cnf & A_CNF_10B_2;
+		break;
+	default:
+		result = lp->adapter_cnf & (A_CNF_10B_T |
+					    A_CNF_AUI |
+					    A_CNF_10B_2);
+	}
+	if (!result) {
+		pr_err("%s: EEPROM is configured for unavailable media\n",
+		       dev->name);
+release_dma:
+#if ALLOW_DMA
+		free_dma(dev->dma);
+release_irq:
+		release_dma_buff(lp);
 #endif
+		writereg(dev, PP_LineCTL,
+			 readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
+		free_irq(dev->irq, dev);
+		ret = -EAGAIN;
+		goto bad_out;
+	}
+
+	/* set the hardware to the configured choice */
+	switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
+	case A_CNF_MEDIA_10B_T:
+		result = detect_tp(dev);
+		if (result == DETECTED_NONE) {
+			pr_warn("%s: 10Base-T (RJ-45) has no cable\n",
+				dev->name);
+			if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+				result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */
+		}
+		break;
+	case A_CNF_MEDIA_AUI:
+		result = detect_aui(dev);
+		if (result == DETECTED_NONE) {
+			pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
+			if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+				result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
+		}
+		break;
+	case A_CNF_MEDIA_10B_2:
+		result = detect_bnc(dev);
+		if (result == DETECTED_NONE) {
+			pr_warn("%s: 10Base-2 (BNC) has no cable\n", dev->name);
+			if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
+				result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */
+		}
+		break;
+	case A_CNF_MEDIA_AUTO:
+		writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
+		if (lp->adapter_cnf & A_CNF_10B_T) {
+			result = detect_tp(dev);
+			if (result != DETECTED_NONE)
+				break;
+		}
+		if (lp->adapter_cnf & A_CNF_AUI) {
+			result = detect_aui(dev);
+			if (result != DETECTED_NONE)
+				break;
+		}
+		if (lp->adapter_cnf & A_CNF_10B_2) {
+			result = detect_bnc(dev);
+			if (result != DETECTED_NONE)
+				break;
+		}
+		pr_err("%s: no media detected\n", dev->name);
+		goto release_dma;
+	}
+	switch (result) {
+	case DETECTED_NONE:
+		pr_err("%s: no network cable attached to configured media\n",
+		       dev->name);
+		goto release_dma;
+	case DETECTED_RJ45H:
+		pr_info("%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
+		break;
+	case DETECTED_RJ45F:
+		pr_info("%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
+		break;
+	case DETECTED_AUI:
+		pr_info("%s: using 10Base-5 (AUI)\n", dev->name);
+		break;
+	case DETECTED_BNC:
+		pr_info("%s: using 10Base-2 (BNC)\n", dev->name);
+		break;
+	}
+
+	/* Turn on both receive and transmit operations */
+	writereg(dev, PP_LineCTL,
+		 readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
+
+	/* Receive only error free packets addressed to this card */
+	lp->rx_mode = 0;
+	writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
+
+	lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
+
+	if (lp->isa_config & STREAM_TRANSFER)
+		lp->curr_rx_cfg |= RX_STREAM_ENBL;
+#if ALLOW_DMA
+	set_dma_cfg(dev);
+#endif
+	writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
+
+	writereg(dev, PP_TxCFG, (TX_LOST_CRS_ENBL |
+				 TX_SQE_ERROR_ENBL |
+				 TX_OK_ENBL |
+				 TX_LATE_COL_ENBL |
+				 TX_JBR_ENBL |
+				 TX_ANY_COL_ENBL |
+				 TX_16_COL_ENBL));
+
+	writereg(dev, PP_BufCFG, (READY_FOR_TX_ENBL |
+				  RX_MISS_COUNT_OVRFLOW_ENBL |
+#if ALLOW_DMA
+				  dma_bufcfg(dev) |
+#endif
+				  TX_COL_COUNT_OVRFLOW_ENBL |
+				  TX_UNDERRUN_ENBL));
+
+	/* now that we've got our act together, enable everything */
+	writereg(dev, PP_BusCTL, (ENABLE_IRQ
+				  | (dev->mem_start ? MEMORY_ON : 0) /* turn memory on */
+#if ALLOW_DMA
+				  | dma_busctl(dev)
+#endif
+			 ));
+	netif_start_queue(dev);
+	cs89_dbg(1, debug, "net_open() succeeded\n");
+	return 0;
+bad_out:
+	return ret;
+}
 
 /* The inverse routine to net_open(). */
 static int
@@ -1685,8 +1159,9 @@
 	return 0;
 }
 
-/* Get the current statistics.	This may be called with the card open or
-   closed. */
+/* Get the current statistics.
+ * This may be called with the card open or closed.
+ */
 static struct net_device_stats *
 net_get_stats(struct net_device *dev)
 {
@@ -1702,34 +1177,97 @@
 	return &dev->stats;
 }
 
+static void net_timeout(struct net_device *dev)
+{
+	/* If we get here, some higher level has decided we are broken.
+	   There should really be a "kick me" function call instead. */
+	cs89_dbg(0, err, "%s: transmit timed out, %s?\n",
+		 dev->name,
+		 tx_done(dev) ? "IRQ conflict" : "network cable problem");
+	/* Try to restart the adaptor. */
+	netif_wake_queue(dev);
+}
+
+static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+	struct net_local *lp = netdev_priv(dev);
+	unsigned long flags;
+
+	cs89_dbg(3, debug, "%s: sent %d byte packet of type %x\n",
+		 dev->name, skb->len,
+		 ((skb->data[ETH_ALEN + ETH_ALEN] << 8) |
+		  skb->data[ETH_ALEN + ETH_ALEN + 1]));
+
+	/* keep the upload from being interrupted, since we
+	 * ask the chip to start transmitting before the
+	 * whole packet has been completely uploaded.
+	 */
+
+	spin_lock_irqsave(&lp->lock, flags);
+	netif_stop_queue(dev);
+
+	/* initiate a transmit sequence */
+	iowrite16(lp->send_cmd, lp->virt_addr + TX_CMD_PORT);
+	iowrite16(skb->len, lp->virt_addr + TX_LEN_PORT);
+
+	/* Test to see if the chip has allocated memory for the packet */
+	if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
+		/* Gasp!  It hasn't.  But that shouldn't happen since
+		 * we're waiting for TxOk, so return 1 and requeue this packet.
+		 */
+
+		spin_unlock_irqrestore(&lp->lock, flags);
+		cs89_dbg(0, err, "Tx buffer not free!\n");
+		return NETDEV_TX_BUSY;
+	}
+	/* Write the contents of the packet */
+	writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
+	spin_unlock_irqrestore(&lp->lock, flags);
+	dev->stats.tx_bytes += skb->len;
+	dev_kfree_skb(skb);
+
+	/* We DO NOT call netif_wake_queue() here.
+	 * We also DO NOT call netif_start_queue().
+	 *
+	 * Either of these would cause another bottom half run through
+	 * net_send_packet() before this packet has fully gone out.
+	 * That causes us to hit the "Gasp!" above and the send is rescheduled.
+	 * it runs like a dog.  We just return and wait for the Tx completion
+	 * interrupt handler to restart the netdevice layer
+	 */
+
+	return NETDEV_TX_OK;
+}
+
 static void set_multicast_list(struct net_device *dev)
 {
 	struct net_local *lp = netdev_priv(dev);
 	unsigned long flags;
 
 	spin_lock_irqsave(&lp->lock, flags);
-	if(dev->flags&IFF_PROMISC)
-	{
+	if (dev->flags & IFF_PROMISC)
 		lp->rx_mode = RX_ALL_ACCEPT;
-	}
 	else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
-	{
-		/* The multicast-accept list is initialized to accept-all, and we
-		   rely on higher-level filtering for now. */
+		/* The multicast-accept list is initialized to accept-all,
+		 * and we rely on higher-level filtering for now.
+		 */
 		lp->rx_mode = RX_MULTCAST_ACCEPT;
-	}
 	else
 		lp->rx_mode = 0;
 
 	writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
 
-	/* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */
-	writereg(dev, PP_RxCFG, lp->curr_rx_cfg |
-	     (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0));
+	/* in promiscuous mode, we accept errored packets,
+	 * so we have to enable interrupts on them also
+	 */
+	writereg(dev, PP_RxCFG,
+		 (lp->curr_rx_cfg |
+		  (lp->rx_mode == RX_ALL_ACCEPT)
+		  ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL)
+		  : 0));
 	spin_unlock_irqrestore(&lp->lock, flags);
 }
 
-
 static int set_mac_address(struct net_device *dev, void *p)
 {
 	int i;
@@ -1740,23 +1278,476 @@
 
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 
-	if (net_debug)
-		printk("%s: Setting MAC address to %pM.\n",
-		       dev->name, dev->dev_addr);
+	cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
+		 dev->name, dev->dev_addr);
 
 	/* set the Ethernet address */
-	for (i=0; i < ETH_ALEN/2; i++)
-		writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
+	for (i = 0; i < ETH_ALEN / 2; i++)
+		writereg(dev, PP_IA + i * 2,
+			 (dev->dev_addr[i * 2] |
+			  (dev->dev_addr[i * 2 + 1] << 8)));
 
 	return 0;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void net_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	net_interrupt(dev->irq, dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+static const struct net_device_ops net_ops = {
+	.ndo_open		= net_open,
+	.ndo_stop		= net_close,
+	.ndo_tx_timeout		= net_timeout,
+	.ndo_start_xmit		= net_send_packet,
+	.ndo_get_stats		= net_get_stats,
+	.ndo_set_rx_mode	= set_multicast_list,
+	.ndo_set_mac_address	= set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= net_poll_controller,
+#endif
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static void __init reset_chip(struct net_device *dev)
+{
+#if !defined(CONFIG_MACH_MX31ADS)
+#if !defined(CS89x0_NONISA_IRQ)
+	struct net_local *lp = netdev_priv(dev);
+#endif /* CS89x0_NONISA_IRQ */
+	int reset_start_time;
+
+	writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
+
+	/* wait 30 ms */
+	msleep(30);
+
+#if !defined(CS89x0_NONISA_IRQ)
+	if (lp->chip_type != CS8900) {
+		/* Hardware problem requires PNP registers to be reconfigured after a reset */
+		iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
+		iowrite8(dev->irq, lp->virt_addr + DATA_PORT);
+		iowrite8(0, lp->virt_addr + DATA_PORT + 1);
+
+		iowrite16(PP_CS8920_ISAMemB, lp->virt_addr + ADD_PORT);
+		iowrite8((dev->mem_start >> 16) & 0xff,
+			 lp->virt_addr + DATA_PORT);
+		iowrite8((dev->mem_start >> 8) & 0xff,
+			 lp->virt_addr + DATA_PORT + 1);
+	}
+#endif /* CS89x0_NONISA_IRQ */
+
+	/* Wait until the chip is reset */
+	reset_start_time = jiffies;
+	while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 &&
+	       jiffies - reset_start_time < 2)
+		;
+#endif /* !CONFIG_MACH_MX31ADS */
+}
+
+/* This is the real probe routine.
+ * Linux has a history of friendly device probes on the ISA bus.
+ * A good device probes avoids doing writes, and
+ * verifies that the correct device exists and functions.
+ * Return 0 on success.
+ */
+static int __init
+cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
+{
+	struct net_local *lp = netdev_priv(dev);
+	int i;
+	int tmp;
+	unsigned rev_type = 0;
+	int eeprom_buff[CHKSUM_LEN];
+	int retval;
+
+	/* Initialize the device structure. */
+	if (!modular) {
+		memset(lp, 0, sizeof(*lp));
+		spin_lock_init(&lp->lock);
+#ifndef MODULE
+#if ALLOW_DMA
+		if (g_cs89x0_dma) {
+			lp->use_dma = 1;
+			lp->dma = g_cs89x0_dma;
+			lp->dmasize = 16;	/* Could make this an option... */
+		}
+#endif
+		lp->force = g_cs89x0_media__force;
+#endif
+	}
+
+	pr_debug("PP_addr at %p[%x]: 0x%x\n",
+		 ioaddr, ADD_PORT, ioread16(ioaddr + ADD_PORT));
+	iowrite16(PP_ChipID, ioaddr + ADD_PORT);
+
+	tmp = ioread16(ioaddr + DATA_PORT);
+	if (tmp != CHIP_EISA_ID_SIG) {
+		pr_debug("%s: incorrect signature at %p[%x]: 0x%x!="
+			 CHIP_EISA_ID_SIG_STR "\n",
+			 dev->name, ioaddr, DATA_PORT, tmp);
+		retval = -ENODEV;
+		goto out1;
+	}
+
+	lp->virt_addr = ioaddr;
+
+	/* get the chip type */
+	rev_type = readreg(dev, PRODUCT_ID_ADD);
+	lp->chip_type = rev_type & ~REVISON_BITS;
+	lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
+
+	/* Check the chip type and revision in order to set the correct
+	 * send command.  CS8920 revision C and CS8900 revision F can use
+	 * the faster send.
+	 */
+	lp->send_cmd = TX_AFTER_381;
+	if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
+		lp->send_cmd = TX_NOW;
+	if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
+		lp->send_cmd = TX_NOW;
+
+	pr_info_once("%s\n", version);
+
+	pr_info("%s: cs89%c0%s rev %c found at %p ",
+		dev->name,
+		lp->chip_type == CS8900  ? '0' : '2',
+		lp->chip_type == CS8920M ? "M" : "",
+		lp->chip_revision,
+		lp->virt_addr);
+
+	reset_chip(dev);
+
+	/* Here we read the current configuration of the chip.
+	 * If there is no Extended EEPROM then the idea is to not disturb
+	 * the chip configuration, it should have been correctly setup by
+	 * automatic EEPROM read on reset. So, if the chip says it read
+	 * the EEPROM the driver will always do *something* instead of
+	 * complain that adapter_cnf is 0.
+	 */
+
+	if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
+	    (EEPROM_OK | EEPROM_PRESENT)) {
+		/* Load the MAC. */
+		for (i = 0; i < ETH_ALEN / 2; i++) {
+			unsigned int Addr;
+			Addr = readreg(dev, PP_IA + i * 2);
+			dev->dev_addr[i * 2] = Addr & 0xFF;
+			dev->dev_addr[i * 2 + 1] = Addr >> 8;
+		}
+
+		/* Load the Adapter Configuration.
+		 * Note:  Barring any more specific information from some
+		 * other source (ie EEPROM+Schematics), we would not know
+		 * how to operate a 10Base2 interface on the AUI port.
+		 * However, since we  do read the status of HCB1 and use
+		 * settings that always result in calls to control_dc_dc(dev,0)
+		 * a BNC interface should work if the enable pin
+		 * (dc/dc converter) is on HCB1.
+		 * It will be called AUI however.
+		 */
+
+		lp->adapter_cnf = 0;
+		i = readreg(dev, PP_LineCTL);
+		/* Preserve the setting of the HCB1 pin. */
+		if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
+			lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
+		/* Save the sqelch bit */
+		if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
+			lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
+		/* Check if the card is in 10Base-t only mode */
+		if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
+			lp->adapter_cnf |=  A_CNF_10B_T | A_CNF_MEDIA_10B_T;
+		/* Check if the card is in AUI only mode */
+		if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
+			lp->adapter_cnf |=  A_CNF_AUI | A_CNF_MEDIA_AUI;
+		/* Check if the card is in Auto mode. */
+		if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
+			lp->adapter_cnf |=  A_CNF_AUI | A_CNF_10B_T |
+				A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
+
+		cs89_dbg(1, info, "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
+			 dev->name, i, lp->adapter_cnf);
+
+		/* IRQ. Other chips already probe, see below. */
+		if (lp->chip_type == CS8900)
+			lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
+
+		pr_cont("[Cirrus EEPROM] ");
+	}
+
+	pr_cont("\n");
+
+	/* First check to see if an EEPROM is attached. */
+
+	if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
+		pr_warn("No EEPROM, relying on command line....\n");
+	else if (get_eeprom_data(dev, START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
+		pr_warn("EEPROM read failed, relying on command line\n");
+	} else if (get_eeprom_cksum(START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) {
+		/* Check if the chip was able to read its own configuration starting
+		   at 0 in the EEPROM*/
+		if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
+		    (EEPROM_OK | EEPROM_PRESENT))
+			pr_warn("Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
+
+	} else {
+		/* This reads an extended EEPROM that is not documented
+		 * in the CS8900 datasheet.
+		 */
+
+		/* get transmission control word  but keep the autonegotiation bits */
+		if (!lp->auto_neg_cnf)
+			lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET / 2];
+		/* Store adapter configuration */
+		if (!lp->adapter_cnf)
+			lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET / 2];
+		/* Store ISA configuration */
+		lp->isa_config = eeprom_buff[ISA_CNF_OFFSET / 2];
+		dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET / 2] << 8;
+
+		/* eeprom_buff has 32-bit ints, so we can't just memcpy it */
+		/* store the initial memory base address */
+		for (i = 0; i < ETH_ALEN / 2; i++) {
+			dev->dev_addr[i * 2] = eeprom_buff[i];
+			dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8;
+		}
+		cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
+			 dev->name, lp->adapter_cnf);
+	}
+
+	/* allow them to force multiple transceivers.  If they force multiple, autosense */
+	{
+		int count = 0;
+		if (lp->force & FORCE_RJ45) {
+			lp->adapter_cnf |= A_CNF_10B_T;
+			count++;
+		}
+		if (lp->force & FORCE_AUI) {
+			lp->adapter_cnf |= A_CNF_AUI;
+			count++;
+		}
+		if (lp->force & FORCE_BNC) {
+			lp->adapter_cnf |= A_CNF_10B_2;
+			count++;
+		}
+		if (count > 1)
+			lp->adapter_cnf |= A_CNF_MEDIA_AUTO;
+		else if (lp->force & FORCE_RJ45)
+			lp->adapter_cnf |= A_CNF_MEDIA_10B_T;
+		else if (lp->force & FORCE_AUI)
+			lp->adapter_cnf |= A_CNF_MEDIA_AUI;
+		else if (lp->force & FORCE_BNC)
+			lp->adapter_cnf |= A_CNF_MEDIA_10B_2;
+	}
+
+	cs89_dbg(1, debug, "%s: after force 0x%x, adapter_cnf=0x%x\n",
+		 dev->name, lp->force, lp->adapter_cnf);
+
+	/* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
+
+	/* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
+
+	/* FIXME: we don't set the Ethernet address on the command line.  Use
+	 * ifconfig IFACE hw ether AABBCCDDEEFF
+	 */
+
+	pr_info("media %s%s%s",
+		(lp->adapter_cnf & A_CNF_10B_T) ? "RJ-45," : "",
+		(lp->adapter_cnf & A_CNF_AUI) ? "AUI," : "",
+		(lp->adapter_cnf & A_CNF_10B_2) ? "BNC," : "");
+
+	lp->irq_map = 0xffff;
+
+	/* If this is a CS8900 then no pnp soft */
+	if (lp->chip_type != CS8900 &&
+	    /* Check if the ISA IRQ has been set  */
+	    (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
+	     (i != 0 && i < CS8920_NO_INTS))) {
+		if (!dev->irq)
+			dev->irq = i;
+	} else {
+		i = lp->isa_config & INT_NO_MASK;
+#ifndef CONFIG_CS89x0_PLATFORM
+		if (lp->chip_type == CS8900) {
+#ifdef CS89x0_NONISA_IRQ
+			i = cs8900_irq_map[0];
+#else
+			/* Translate the IRQ using the IRQ mapping table. */
+			if (i >= ARRAY_SIZE(cs8900_irq_map))
+				pr_err("invalid ISA interrupt number %d\n", i);
+			else
+				i = cs8900_irq_map[i];
+
+			lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */
+		} else {
+			int irq_map_buff[IRQ_MAP_LEN/2];
+
+			if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
+					    IRQ_MAP_LEN / 2,
+					    irq_map_buff) >= 0) {
+				if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
+					lp->irq_map = ((irq_map_buff[0] >> 8) |
+						       (irq_map_buff[1] << 8));
+			}
+#endif
+		}
+#endif
+		if (!dev->irq)
+			dev->irq = i;
+	}
+
+	pr_cont(" IRQ %d", dev->irq);
+
+#if ALLOW_DMA
+	if (lp->use_dma) {
+		get_dma_channel(dev);
+		pr_cont(", DMA %d", dev->dma);
+	} else
+#endif
+		pr_cont(", programmed I/O");
+
+	/* print the ethernet address. */
+	pr_cont(", MAC %pM\n", dev->dev_addr);
+
+	dev->netdev_ops	= &net_ops;
+	dev->watchdog_timeo = HZ;
+
+	cs89_dbg(0, info, "cs89x0_probe1() successful\n");
+
+	retval = register_netdev(dev);
+	if (retval)
+		goto out2;
+	return 0;
+out2:
+	iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
+out1:
+	return retval;
+}
+
+#ifndef CONFIG_CS89x0_PLATFORM
+/*
+ * This function converts the I/O port addres used by the cs89x0_probe() and
+ * init_module() functions to the I/O memory address used by the
+ * cs89x0_probe1() function.
+ */
+static int __init
+cs89x0_ioport_probe(struct net_device *dev, unsigned long ioport, int modular)
+{
+	struct net_local *lp = netdev_priv(dev);
+	int ret;
+	void __iomem *io_mem;
+
+	if (!lp)
+		return -ENOMEM;
+
+	dev->base_addr = ioport;
+
+	if (!request_region(ioport, NETCARD_IO_EXTENT, DRV_NAME)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	io_mem = ioport_map(ioport & ~3, NETCARD_IO_EXTENT);
+	if (!io_mem) {
+		ret = -ENOMEM;
+		goto release;
+	}
+
+	/* if they give us an odd I/O address, then do ONE write to
+	 * the address port, to get it back to address zero, where we
+	 * expect to find the EISA signature word. An IO with a base of 0x3
+	 * will skip the test for the ADD_PORT.
+	 */
+	if (ioport & 1) {
+		cs89_dbg(1, info, "%s: odd ioaddr 0x%lx\n", dev->name, ioport);
+		if ((ioport & 2) != 2) {
+			if ((ioread16(io_mem + ADD_PORT) & ADD_MASK) !=
+			    ADD_SIG) {
+				pr_err("%s: bad signature 0x%x\n",
+				       dev->name, ioread16(io_mem + ADD_PORT));
+				ret = -ENODEV;
+				goto unmap;
+			}
+		}
+	}
+
+	ret = cs89x0_probe1(dev, io_mem, modular);
+	if (!ret)
+		goto out;
+unmap:
+	ioport_unmap(io_mem);
+release:
+	release_region(ioport, NETCARD_IO_EXTENT);
+out:
+	return ret;
+}
+
+#ifndef MODULE
+/* Check for a network adaptor of this type, and return '0' iff one exists.
+ * If dev->base_addr == 0, probe all likely locations.
+ * If dev->base_addr == 1, always return failure.
+ * If dev->base_addr == 2, allocate space for the device and return success
+ * (detachable devices only).
+ * Return 0 on success.
+ */
+
+struct net_device * __init cs89x0_probe(int unit)
+{
+	struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
+	unsigned *port;
+	int err = 0;
+	int irq;
+	int io;
+
+	if (!dev)
+		return ERR_PTR(-ENODEV);
+
+	sprintf(dev->name, "eth%d", unit);
+	netdev_boot_setup_check(dev);
+	io = dev->base_addr;
+	irq = dev->irq;
+
+	cs89_dbg(0, info, "cs89x0_probe(0x%x)\n", io);
+
+	if (io > 0x1ff)	{	/* Check a single specified location. */
+		err = cs89x0_ioport_probe(dev, io, 0);
+	} else if (io != 0) {	/* Don't probe at all. */
+		err = -ENXIO;
+	} else {
+		for (port = netcard_portlist; *port; port++) {
+			if (cs89x0_ioport_probe(dev, *port, 0) == 0)
+				break;
+			dev->irq = irq;
+		}
+		if (!*port)
+			err = -ENODEV;
+	}
+	if (err)
+		goto out;
+	return dev;
+out:
+	free_netdev(dev);
+	pr_warn("no cs8900 or cs8920 detected.  Be sure to disable PnP with SETUP\n");
+	return ERR_PTR(err);
+}
+#endif
+#endif
+
 #if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM)
 
 static struct net_device *dev_cs89x0;
 
-/*
- * Support the 'debug' module parm even if we're compiled for non-debug to
+/* Support the 'debug' module parm even if we're compiled for non-debug to
  * avoid breaking someone's startup scripts
  */
 
@@ -1764,11 +1755,11 @@
 static int irq;
 static int debug;
 static char media[8];
-static int duplex=-1;
+static int duplex = -1;
 
 static int use_dma;			/* These generate unused var warnings if ALLOW_DMA = 0 */
 static int dma;
-static int dmasize=16;			/* or 64 */
+static int dmasize = 16;		/* or 64 */
 
 module_param(io, int, 0);
 module_param(irq, int, 0);
@@ -1801,32 +1792,28 @@
 MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton");
 MODULE_LICENSE("GPL");
 
-
 /*
-* media=t             - specify media type
-   or media=2
-   or media=aui
-   or medai=auto
-* duplex=0            - specify forced half/full/autonegotiate duplex
-* debug=#             - debug level
-
-
-* Default Chip Configuration:
-  * DMA Burst = enabled
-  * IOCHRDY Enabled = enabled
-    * UseSA = enabled
-    * CS8900 defaults to half-duplex if not specified on command-line
-    * CS8920 defaults to autoneg if not specified on command-line
-    * Use reset defaults for other config parameters
-
-* Assumptions:
-  * media type specified is supported (circuitry is present)
-  * if memory address is > 1MB, then required mem decode hw is present
-  * if 10B-2, then agent other than driver will enable DC/DC converter
-    (hw or software util)
-
-
-*/
+ * media=t             - specify media type
+ * or media=2
+ * or media=aui
+ * or medai=auto
+ * duplex=0            - specify forced half/full/autonegotiate duplex
+ * debug=#             - debug level
+ *
+ * Default Chip Configuration:
+ * DMA Burst = enabled
+ * IOCHRDY Enabled = enabled
+ * UseSA = enabled
+ * CS8900 defaults to half-duplex if not specified on command-line
+ * CS8920 defaults to autoneg if not specified on command-line
+ * Use reset defaults for other config parameters
+ *
+ * Assumptions:
+ * media type specified is supported (circuitry is present)
+ * if memory address is > 1MB, then required mem decode hw is present
+ * if 10B-2, then agent other than driver will enable DC/DC converter
+ * (hw or software util)
+ */
 
 int __init init_module(void)
 {
@@ -1856,8 +1843,8 @@
 
 	spin_lock_init(&lp->lock);
 
-        /* boy, they'd better get these right */
-        if (!strcmp(media, "rj45"))
+	/* boy, they'd better get these right */
+	if (!strcmp(media, "rj45"))
 		lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
 	else if (!strcmp(media, "aui"))
 		lp->adapter_cnf = A_CNF_MEDIA_AUI   | A_CNF_AUI;
@@ -1866,27 +1853,28 @@
 	else
 		lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
 
-        if (duplex==-1)
+	if (duplex == -1)
 		lp->auto_neg_cnf = AUTO_NEG_ENABLE;
 
-        if (io == 0) {
-                printk(KERN_ERR "cs89x0.c: Module autoprobing not allowed.\n");
-                printk(KERN_ERR "cs89x0.c: Append io=0xNNN\n");
-                ret = -EPERM;
+	if (io == 0) {
+		pr_err("Module autoprobing not allowed\n");
+		pr_err("Append io=0xNNN\n");
+		ret = -EPERM;
 		goto out;
-        } else if (io <= 0x1ff) {
+	} else if (io <= 0x1ff) {
 		ret = -ENXIO;
 		goto out;
 	}
 
 #if ALLOW_DMA
 	if (use_dma && dmasize != 16 && dmasize != 64) {
-		printk(KERN_ERR "cs89x0.c: dma size must be either 16K or 64K, not %dK\n", dmasize);
+		pr_err("dma size must be either 16K or 64K, not %dK\n",
+		       dmasize);
 		ret = -EPERM;
 		goto out;
 	}
 #endif
-	ret = cs89x0_probe1(dev, io, 1);
+	ret = cs89x0_ioport_probe(dev, io, 1);
 	if (ret)
 		goto out;
 
@@ -1900,8 +1888,11 @@
 void __exit
 cleanup_module(void)
 {
+	struct net_local *lp = netdev_priv(dev_cs89x0);
+
 	unregister_netdev(dev_cs89x0);
-	writeword(dev_cs89x0->base_addr, ADD_PORT, PP_ChipID);
+	iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT);
+	ioport_unmap(lp->virt_addr);
 	release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
 	free_netdev(dev_cs89x0);
 }
@@ -1913,6 +1904,7 @@
 	struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
 	struct net_local *lp;
 	struct resource *mem_res;
+	void __iomem *virt_addr;
 	int err;
 
 	if (!dev)
@@ -1923,29 +1915,28 @@
 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	dev->irq = platform_get_irq(pdev, 0);
 	if (mem_res == NULL || dev->irq <= 0) {
-		dev_warn(&dev->dev, "memory/interrupt resource missing.\n");
+		dev_warn(&dev->dev, "memory/interrupt resource missing\n");
 		err = -ENXIO;
 		goto free;
 	}
 
-	lp->phys_addr = mem_res->start;
 	lp->size = resource_size(mem_res);
-	if (!request_mem_region(lp->phys_addr, lp->size, DRV_NAME)) {
-		dev_warn(&dev->dev, "request_mem_region() failed.\n");
+	if (!request_mem_region(mem_res->start, lp->size, DRV_NAME)) {
+		dev_warn(&dev->dev, "request_mem_region() failed\n");
 		err = -EBUSY;
 		goto free;
 	}
 
-	lp->virt_addr = ioremap(lp->phys_addr, lp->size);
-	if (!lp->virt_addr) {
-		dev_warn(&dev->dev, "ioremap() failed.\n");
+	virt_addr = ioremap(mem_res->start, lp->size);
+	if (!virt_addr) {
+		dev_warn(&dev->dev, "ioremap() failed\n");
 		err = -ENOMEM;
 		goto release;
 	}
 
-	err = cs89x0_probe1(dev, (unsigned long)lp->virt_addr, 0);
+	err = cs89x0_probe1(dev, virt_addr, 0);
 	if (err) {
-		dev_warn(&dev->dev, "no cs8900 or cs8920 detected.\n");
+		dev_warn(&dev->dev, "no cs8900 or cs8920 detected\n");
 		goto unmap;
 	}
 
@@ -1953,9 +1944,9 @@
 	return 0;
 
 unmap:
-	iounmap(lp->virt_addr);
+	iounmap(virt_addr);
 release:
-	release_mem_region(lp->phys_addr, lp->size);
+	release_mem_region(mem_res->start, lp->size);
 free:
 	free_netdev(dev);
 	return err;
@@ -1965,10 +1956,16 @@
 {
 	struct net_device *dev = platform_get_drvdata(pdev);
 	struct net_local *lp = netdev_priv(dev);
+	struct resource *mem_res;
 
+	/* This platform_get_resource() call will not return NULL, because
+	 * the same call in cs89x0_platform_probe() has returned a non NULL
+	 * value.
+	 */
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	unregister_netdev(dev);
 	iounmap(lp->virt_addr);
-	release_mem_region(lp->phys_addr, lp->size);
+	release_mem_region(mem_res->start, lp->size);
 	free_netdev(dev);
 	return 0;
 }
@@ -1996,13 +1993,3 @@
 module_exit(cs89x0_cleanup);
 
 #endif /* CONFIG_CS89x0_PLATFORM */
-
-/*
- * Local variables:
- *  version-control: t
- *  kept-new-versions: 5
- *  c-indent-level: 8
- *  tab-width: 8
- * End:
- *
- */
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 77b4e87..8132c78 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -944,8 +944,7 @@
 
 	for (i = 0; i < enic->mc_count; i++) {
 		for (j = 0; j < mc_count; j++)
-			if (compare_ether_addr(enic->mc_addr[i],
-				mc_addr[j]) == 0)
+			if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
 				break;
 		if (j == mc_count)
 			enic_dev_del_addr(enic, enic->mc_addr[i]);
@@ -953,8 +952,7 @@
 
 	for (i = 0; i < mc_count; i++) {
 		for (j = 0; j < enic->mc_count; j++)
-			if (compare_ether_addr(mc_addr[i],
-				enic->mc_addr[j]) == 0)
+			if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
 				break;
 		if (j == enic->mc_count)
 			enic_dev_add_addr(enic, mc_addr[i]);
@@ -999,8 +997,7 @@
 
 	for (i = 0; i < enic->uc_count; i++) {
 		for (j = 0; j < uc_count; j++)
-			if (compare_ether_addr(enic->uc_addr[i],
-				uc_addr[j]) == 0)
+			if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
 				break;
 		if (j == uc_count)
 			enic_dev_del_addr(enic, enic->uc_addr[i]);
@@ -1008,8 +1005,7 @@
 
 	for (i = 0; i < uc_count; i++) {
 		for (j = 0; j < enic->uc_count; j++)
-			if (compare_ether_addr(uc_addr[i],
-				enic->uc_addr[j]) == 0)
+			if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
 				break;
 		if (j == enic->uc_count)
 			enic_dev_add_addr(enic, uc_addr[i]);
@@ -1193,18 +1189,16 @@
 	if (err)
 		return err;
 
-	NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request);
-	NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
-	if (pp->set & ENIC_SET_NAME)
-		NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
-			pp->name);
-	if (pp->set & ENIC_SET_INSTANCE)
-		NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
-			pp->instance_uuid);
-	if (pp->set & ENIC_SET_HOST)
-		NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
-			pp->host_uuid);
-
+	if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
+	    nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
+	    ((pp->set & ENIC_SET_NAME) &&
+	     nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
+	    ((pp->set & ENIC_SET_INSTANCE) &&
+	     nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
+		     pp->instance_uuid)) ||
+	    ((pp->set & ENIC_SET_HOST) &&
+	     nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index dafea1e..43464f0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -184,7 +184,7 @@
 };
 
 static const int enic_pp_handlers_count =
-			sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
+			ARRAY_SIZE(enic_pp_handlers);
 
 static int enic_pp_preassociate(struct enic *enic, int vf,
 	struct enic_port_profile *prev_pp, int *restore_pp)
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 972b62b..9745fe5 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -4,7 +4,7 @@
 
 config DM9000
 	tristate "DM9000 support"
-	depends on ARM || BLACKFIN || MIPS
+	depends on ARM || BLACKFIN || MIPS || COLDFIRE
 	select CRC32
 	select NET_CORE
 	select MII
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
index 1879f84..17ae8c6 100644
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ b/drivers/net/ethernet/dec/ewrk3.c
@@ -1016,7 +1016,8 @@
 							} else {
 								lp->pktStats.multicast++;
 							}
-						} else if (compare_ether_addr(p, dev->dev_addr) == 0) {
+						} else if (ether_addr_equal(p,
+									    dev->dev_addr)) {
 							lp->pktStats.unicast++;
 						}
 						lp->pktStats.bins[0]++;		/* Duplicates stats.rx_packets */
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 68f1c39..61cc093 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1380,6 +1380,7 @@
 static int de_open (struct net_device *dev)
 {
 	struct de_private *de = netdev_priv(dev);
+	const int irq = de->pdev->irq;
 	int rc;
 
 	netif_dbg(de, ifup, dev, "enabling interface\n");
@@ -1394,10 +1395,9 @@
 
 	dw32(IntrMask, 0);
 
-	rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
+	rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
 	if (rc) {
-		netdev_err(dev, "IRQ %d request failure, err=%d\n",
-			   dev->irq, rc);
+		netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
 		goto err_out_free;
 	}
 
@@ -1413,7 +1413,7 @@
 	return 0;
 
 err_out_free_irq:
-	free_irq(dev->irq, dev);
+	free_irq(irq, dev);
 err_out_free:
 	de_free_rings(de);
 	return rc;
@@ -1434,7 +1434,7 @@
 	netif_carrier_off(dev);
 	spin_unlock_irqrestore(&de->lock, flags);
 
-	free_irq(dev->irq, dev);
+	free_irq(de->pdev->irq, dev);
 
 	de_free_rings(de);
 	de_adapter_sleep(de);
@@ -1444,6 +1444,7 @@
 static void de_tx_timeout (struct net_device *dev)
 {
 	struct de_private *de = netdev_priv(dev);
+	const int irq = de->pdev->irq;
 
 	netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
 		   dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
@@ -1451,7 +1452,7 @@
 
 	del_timer_sync(&de->media_timer);
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	spin_lock_irq(&de->lock);
 
 	de_stop_hw(de);
@@ -1459,12 +1460,12 @@
 	netif_carrier_off(dev);
 
 	spin_unlock_irq(&de->lock);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
 	/* Update the error counts. */
 	__de_get_stats(de);
 
-	synchronize_irq(dev->irq);
+	synchronize_irq(irq);
 	de_clean_rings(de);
 
 	de_init_rings(de);
@@ -2024,8 +2025,6 @@
 		goto err_out_res;
 	}
 
-	dev->irq = pdev->irq;
-
 	/* obtain and check validity of PCI I/O address */
 	pciaddr = pci_resource_start(pdev, 1);
 	if (!pciaddr) {
@@ -2050,7 +2049,6 @@
 		       pciaddr, pci_name(pdev));
 		goto err_out_res;
 	}
-	dev->base_addr = (unsigned long) regs;
 	de->regs = regs;
 
 	de_adapter_wake(de);
@@ -2078,11 +2076,9 @@
 		goto err_out_iomap;
 
 	/* print info about board and interface just registered */
-	netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+	netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
 		    de->de21040 ? "21040" : "21041",
-		    dev->base_addr,
-		    dev->dev_addr,
-		    dev->irq);
+		    regs, dev->dev_addr, pdev->irq);
 
 	pci_set_drvdata(pdev, dev);
 
@@ -2130,9 +2126,11 @@
 
 	rtnl_lock();
 	if (netif_running (dev)) {
+		const int irq = pdev->irq;
+
 		del_timer_sync(&de->media_timer);
 
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		spin_lock_irq(&de->lock);
 
 		de_stop_hw(de);
@@ -2141,12 +2139,12 @@
 		netif_carrier_off(dev);
 
 		spin_unlock_irq(&de->lock);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 
 		/* Update the error counts. */
 		__de_get_stats(de);
 
-		synchronize_irq(dev->irq);
+		synchronize_irq(irq);
 		de_clean_rings(de);
 
 		de_adapter_sleep(de);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 18b106c..d3cd489 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1874,7 +1874,7 @@
 	} else {
 	    lp->pktStats.multicast++;
 	}
-    } else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
+    } else if (ether_addr_equal(buf, dev->dev_addr)) {
         lp->pktStats.unicast++;
     }
 
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 1eccf494..4d6fe60 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -150,6 +150,12 @@
 #define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
 #define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
 
+#define dw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define dw16(reg, val)	iowrite16(val, ioaddr + (reg))
+#define dr32(reg)	ioread32(ioaddr + (reg))
+#define dr16(reg)	ioread16(ioaddr + (reg))
+#define dr8(reg)	ioread8(ioaddr + (reg))
+
 #define DMFE_DBUG(dbug_now, msg, value)			\
 	do {						\
 		if (dmfe_debug || (dbug_now))		\
@@ -178,14 +184,6 @@
 
 #define SROM_V41_CODE   0x14
 
-#define SROM_CLK_WRITE(data, ioaddr) \
-	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
-	udelay(5); \
-	outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
-	udelay(5); \
-	outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
-	udelay(5);
-
 #define __CHK_IO_SIZE(pci_id, dev_rev) \
  (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
 	DM9102A_IO_SIZE: DM9102_IO_SIZE)
@@ -213,11 +211,11 @@
 struct dmfe_board_info {
 	u32 chip_id;			/* Chip vendor/Device ID */
 	u8 chip_revision;		/* Chip revision */
-	struct DEVICE *next_dev;	/* next device */
+	struct net_device *next_dev;	/* next device */
 	struct pci_dev *pdev;		/* PCI device */
 	spinlock_t lock;
 
-	long ioaddr;			/* I/O base address */
+	void __iomem *ioaddr;		/* I/O base address */
 	u32 cr0_data;
 	u32 cr5_data;
 	u32 cr6_data;
@@ -320,20 +318,20 @@
 static int dmfe_stop(struct DEVICE *);
 static void dmfe_set_filter_mode(struct DEVICE *);
 static const struct ethtool_ops netdev_ethtool_ops;
-static u16 read_srom_word(long ,int);
+static u16 read_srom_word(void __iomem *, int);
 static irqreturn_t dmfe_interrupt(int , void *);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void poll_dmfe (struct net_device *dev);
 #endif
-static void dmfe_descriptor_init(struct net_device *, unsigned long);
+static void dmfe_descriptor_init(struct net_device *);
 static void allocate_rx_buffer(struct net_device *);
-static void update_cr6(u32, unsigned long);
+static void update_cr6(u32, void __iomem *);
 static void send_filter_frame(struct DEVICE *);
 static void dm9132_id_table(struct DEVICE *);
-static u16 phy_read(unsigned long, u8, u8, u32);
-static void phy_write(unsigned long, u8, u8, u16, u32);
-static void phy_write_1bit(unsigned long, u32);
-static u16 phy_read_1bit(unsigned long);
+static u16 phy_read(void __iomem *, u8, u8, u32);
+static void phy_write(void __iomem *, u8, u8, u16, u32);
+static void phy_write_1bit(void __iomem *, u32);
+static u16 phy_read_1bit(void __iomem *);
 static u8 dmfe_sense_speed(struct dmfe_board_info *);
 static void dmfe_process_mode(struct dmfe_board_info *);
 static void dmfe_timer(unsigned long);
@@ -462,14 +460,16 @@
 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 
 	db->chip_id = ent->driver_data;
-	db->ioaddr = pci_resource_start(pdev, 0);
+	/* IO type range. */
+	db->ioaddr = pci_iomap(pdev, 0, 0);
+	if (!db->ioaddr)
+		goto err_out_free_buf;
+
 	db->chip_revision = pdev->revision;
 	db->wol_mode = 0;
 
 	db->pdev = pdev;
 
-	dev->base_addr = db->ioaddr;
-	dev->irq = pdev->irq;
 	pci_set_drvdata(pdev, dev);
 	dev->netdev_ops = &netdev_ops;
 	dev->ethtool_ops = &netdev_ethtool_ops;
@@ -484,9 +484,10 @@
 		db->chip_type = 0;
 
 	/* read 64 word srom data */
-	for (i = 0; i < 64; i++)
+	for (i = 0; i < 64; i++) {
 		((__le16 *) db->srom)[i] =
 			cpu_to_le16(read_srom_word(db->ioaddr, i));
+	}
 
 	/* Set Node address */
 	for (i = 0; i < 6; i++)
@@ -494,16 +495,18 @@
 
 	err = register_netdev (dev);
 	if (err)
-		goto err_out_free_buf;
+		goto err_out_unmap;
 
 	dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
 		 ent->driver_data >> 16,
-		 pci_name(pdev), dev->dev_addr, dev->irq);
+		 pci_name(pdev), dev->dev_addr, pdev->irq);
 
 	pci_set_master(pdev);
 
 	return 0;
 
+err_out_unmap:
+	pci_iounmap(pdev, db->ioaddr);
 err_out_free_buf:
 	pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 			    db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -532,7 +535,7 @@
  	if (dev) {
 
 		unregister_netdev(dev);
-
+		pci_iounmap(db->pdev, db->ioaddr);
 		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
 					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
  					db->desc_pool_dma_ptr);
@@ -555,13 +558,13 @@
 
 static int dmfe_open(struct DEVICE *dev)
 {
-	int ret;
 	struct dmfe_board_info *db = netdev_priv(dev);
+	const int irq = db->pdev->irq;
+	int ret;
 
 	DMFE_DBUG(0, "dmfe_open", 0);
 
-	ret = request_irq(dev->irq, dmfe_interrupt,
-			  IRQF_SHARED, dev->name, dev);
+	ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
 	if (ret)
 		return ret;
 
@@ -615,14 +618,14 @@
 static void dmfe_init_dm910x(struct DEVICE *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = db->ioaddr;
+	void __iomem *ioaddr = db->ioaddr;
 
 	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
 
 	/* Reset DM910x MAC controller */
-	outl(DM910X_RESET, ioaddr + DCR0);	/* RESET MAC */
+	dw32(DCR0, DM910X_RESET);	/* RESET MAC */
 	udelay(100);
-	outl(db->cr0_data, ioaddr + DCR0);
+	dw32(DCR0, db->cr0_data);
 	udelay(5);
 
 	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
@@ -633,12 +636,12 @@
 	db->media_mode = dmfe_media_mode;
 
 	/* RESET Phyxcer Chip by GPR port bit 7 */
-	outl(0x180, ioaddr + DCR12);		/* Let bit 7 output port */
+	dw32(DCR12, 0x180);		/* Let bit 7 output port */
 	if (db->chip_id == PCI_DM9009_ID) {
-		outl(0x80, ioaddr + DCR12);	/* Issue RESET signal */
+		dw32(DCR12, 0x80);	/* Issue RESET signal */
 		mdelay(300);			/* Delay 300 ms */
 	}
-	outl(0x0, ioaddr + DCR12);	/* Clear RESET signal */
+	dw32(DCR12, 0x0);	/* Clear RESET signal */
 
 	/* Process Phyxcer Media Mode */
 	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
@@ -649,7 +652,7 @@
 		db->op_mode = db->media_mode; 	/* Force Mode */
 
 	/* Initialize Transmit/Receive decriptor and CR3/4 */
-	dmfe_descriptor_init(dev, ioaddr);
+	dmfe_descriptor_init(dev);
 
 	/* Init CR6 to program DM910x operation */
 	update_cr6(db->cr6_data, ioaddr);
@@ -662,10 +665,10 @@
 
 	/* Init CR7, interrupt active bit */
 	db->cr7_data = CR7_DEFAULT;
-	outl(db->cr7_data, ioaddr + DCR7);
+	dw32(DCR7, db->cr7_data);
 
 	/* Init CR15, Tx jabber and Rx watchdog timer */
-	outl(db->cr15_data, ioaddr + DCR15);
+	dw32(DCR15, db->cr15_data);
 
 	/* Enable DM910X Tx/Rx function */
 	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
@@ -682,6 +685,7 @@
 					 struct DEVICE *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	struct tx_desc *txptr;
 	unsigned long flags;
 
@@ -707,7 +711,7 @@
 	}
 
 	/* Disable NIC interrupt */
-	outl(0, dev->base_addr + DCR7);
+	dw32(DCR7, 0);
 
 	/* transmit this packet */
 	txptr = db->tx_insert_ptr;
@@ -721,11 +725,11 @@
 	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
 		db->tx_packet_cnt++;			/* Ready to send */
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		dw32(DCR1, 0x1);			/* Issue Tx polling */
 		dev->trans_start = jiffies;		/* saved time stamp */
 	} else {
 		db->tx_queue_cnt++;			/* queue TX packet */
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		dw32(DCR1, 0x1);			/* Issue Tx polling */
 	}
 
 	/* Tx resource check */
@@ -734,7 +738,7 @@
 
 	/* Restore CR7 to enable interrupt */
 	spin_unlock_irqrestore(&db->lock, flags);
-	outl(db->cr7_data, dev->base_addr + DCR7);
+	dw32(DCR7, db->cr7_data);
 
 	/* free this SKB */
 	dev_kfree_skb(skb);
@@ -751,7 +755,7 @@
 static int dmfe_stop(struct DEVICE *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = db->ioaddr;
 
 	DMFE_DBUG(0, "dmfe_stop", 0);
 
@@ -762,12 +766,12 @@
 	del_timer_sync(&db->timer);
 
 	/* Reset & stop DM910X board */
-	outl(DM910X_RESET, ioaddr + DCR0);
-	udelay(5);
-	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+	dw32(DCR0, DM910X_RESET);
+	udelay(100);
+	phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
 
 	/* free interrupt */
-	free_irq(dev->irq, dev);
+	free_irq(db->pdev->irq, dev);
 
 	/* free allocated rx buffer */
 	dmfe_free_rxbuffer(db);
@@ -794,7 +798,7 @@
 {
 	struct DEVICE *dev = dev_id;
 	struct dmfe_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = db->ioaddr;
 	unsigned long flags;
 
 	DMFE_DBUG(0, "dmfe_interrupt()", 0);
@@ -802,15 +806,15 @@
 	spin_lock_irqsave(&db->lock, flags);
 
 	/* Got DM910X status */
-	db->cr5_data = inl(ioaddr + DCR5);
-	outl(db->cr5_data, ioaddr + DCR5);
+	db->cr5_data = dr32(DCR5);
+	dw32(DCR5, db->cr5_data);
 	if ( !(db->cr5_data & 0xc1) ) {
 		spin_unlock_irqrestore(&db->lock, flags);
 		return IRQ_HANDLED;
 	}
 
 	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
-	outl(0, ioaddr + DCR7);
+	dw32(DCR7, 0);
 
 	/* Check system status */
 	if (db->cr5_data & 0x2000) {
@@ -838,11 +842,11 @@
 	if (db->dm910x_chk_mode & 0x2) {
 		db->dm910x_chk_mode = 0x4;
 		db->cr6_data |= 0x100;
-		update_cr6(db->cr6_data, db->ioaddr);
+		update_cr6(db->cr6_data, ioaddr);
 	}
 
 	/* Restore CR7 to enable interrupt mask */
-	outl(db->cr7_data, ioaddr + DCR7);
+	dw32(DCR7, db->cr7_data);
 
 	spin_unlock_irqrestore(&db->lock, flags);
 	return IRQ_HANDLED;
@@ -858,11 +862,14 @@
 
 static void poll_dmfe (struct net_device *dev)
 {
+	struct dmfe_board_info *db = netdev_priv(dev);
+	const int irq = db->pdev->irq;
+
 	/* disable_irq here is not very nice, but with the lockless
 	   interrupt handler we have no other choice. */
-	disable_irq(dev->irq);
-	dmfe_interrupt (dev->irq, dev);
-	enable_irq(dev->irq);
+	disable_irq(irq);
+	dmfe_interrupt (irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -873,7 +880,7 @@
 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
 {
 	struct tx_desc *txptr;
-	unsigned long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = db->ioaddr;
 	u32 tdes0;
 
 	txptr = db->tx_remove_ptr;
@@ -897,7 +904,7 @@
 					db->tx_fifo_underrun++;
 					if ( !(db->cr6_data & CR6_SFT) ) {
 						db->cr6_data = db->cr6_data | CR6_SFT;
-						update_cr6(db->cr6_data, db->ioaddr);
+						update_cr6(db->cr6_data, ioaddr);
 					}
 				}
 				if (tdes0 & 0x0100)
@@ -924,7 +931,7 @@
 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
 		db->tx_packet_cnt++;			/* Ready to send */
 		db->tx_queue_cnt--;
-		outl(0x1, ioaddr + DCR1);		/* Issue Tx polling */
+		dw32(DCR1, 0x1);			/* Issue Tx polling */
 		dev->trans_start = jiffies;		/* saved time stamp */
 	}
 
@@ -1087,12 +1094,7 @@
 
 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-	if (np->pdev)
-		strlcpy(info->bus_info, pci_name(np->pdev),
-			sizeof(info->bus_info));
-	else
-		sprintf(info->bus_info, "EISA 0x%lx %d",
-			dev->base_addr, dev->irq);
+	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 static int dmfe_ethtool_set_wol(struct net_device *dev,
@@ -1132,10 +1134,11 @@
 
 static void dmfe_timer(unsigned long data)
 {
+	struct net_device *dev = (struct net_device *)data;
+	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	u32 tmp_cr8;
 	unsigned char tmp_cr12;
-	struct DEVICE *dev = (struct DEVICE *) data;
-	struct dmfe_board_info *db = netdev_priv(dev);
  	unsigned long flags;
 
 	int link_ok, link_ok_phy;
@@ -1148,11 +1151,10 @@
 		db->first_in_callback = 1;
 		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
 			db->cr6_data &= ~0x40000;
-			update_cr6(db->cr6_data, db->ioaddr);
-			phy_write(db->ioaddr,
-				  db->phy_addr, 0, 0x1000, db->chip_id);
+			update_cr6(db->cr6_data, ioaddr);
+			phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
 			db->cr6_data |= 0x40000;
-			update_cr6(db->cr6_data, db->ioaddr);
+			update_cr6(db->cr6_data, ioaddr);
 			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
 			add_timer(&db->timer);
 			spin_unlock_irqrestore(&db->lock, flags);
@@ -1167,7 +1169,7 @@
 		db->dm910x_chk_mode = 0x4;
 
 	/* Dynamic reset DM910X : system error or transmit time-out */
-	tmp_cr8 = inl(db->ioaddr + DCR8);
+	tmp_cr8 = dr32(DCR8);
 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
 		db->reset_cr8++;
 		db->wait_reset = 1;
@@ -1177,7 +1179,7 @@
 	/* TX polling kick monitor */
 	if ( db->tx_packet_cnt &&
 	     time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
-		outl(0x1, dev->base_addr + DCR1);   /* Tx polling again */
+		dw32(DCR1, 0x1);   /* Tx polling again */
 
 		/* TX Timeout */
 		if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
@@ -1200,9 +1202,9 @@
 
 	/* Link status check, Dynamic media type change */
 	if (db->chip_id == PCI_DM9132_ID)
-		tmp_cr12 = inb(db->ioaddr + DCR9 + 3);	/* DM9132 */
+		tmp_cr12 = dr8(DCR9 + 3);	/* DM9132 */
 	else
-		tmp_cr12 = inb(db->ioaddr + DCR12);	/* DM9102/DM9102A */
+		tmp_cr12 = dr8(DCR12);		/* DM9102/DM9102A */
 
 	if ( ((db->chip_id == PCI_DM9102_ID) &&
 		(db->chip_revision == 0x30)) ||
@@ -1251,7 +1253,7 @@
 			/* 10/100M link failed, used 1M Home-Net */
 			db->cr6_data|=0x00040000;	/* bit18=1, MII */
 			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
-			update_cr6(db->cr6_data, db->ioaddr);
+			update_cr6(db->cr6_data, ioaddr);
 		}
 	} else if (!netif_carrier_ok(dev)) {
 
@@ -1288,17 +1290,18 @@
  *	Re-initialize DM910X board
  */
 
-static void dmfe_dynamic_reset(struct DEVICE *dev)
+static void dmfe_dynamic_reset(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 
 	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
 
 	/* Sopt MAC controller */
 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
-	update_cr6(db->cr6_data, dev->base_addr);
-	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
-	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+	update_cr6(db->cr6_data, ioaddr);
+	dw32(DCR7, 0);				/* Disable Interrupt */
+	dw32(DCR5, dr32(DCR5));
 
 	/* Disable upper layer interface */
 	netif_stop_queue(dev);
@@ -1364,9 +1367,10 @@
  *	Using Chain structure, and allocate Tx/Rx buffer
  */
 
-static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
+static void dmfe_descriptor_init(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	struct tx_desc *tmp_tx;
 	struct rx_desc *tmp_rx;
 	unsigned char *tmp_buf;
@@ -1379,7 +1383,7 @@
 	/* tx descriptor start pointer */
 	db->tx_insert_ptr = db->first_tx_desc;
 	db->tx_remove_ptr = db->first_tx_desc;
-	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+	dw32(DCR4, db->first_tx_desc_dma);     /* TX DESC address */
 
 	/* rx descriptor start pointer */
 	db->first_rx_desc = (void *)db->first_tx_desc +
@@ -1389,7 +1393,7 @@
 			sizeof(struct tx_desc) * TX_DESC_CNT;
 	db->rx_insert_ptr = db->first_rx_desc;
 	db->rx_ready_ptr = db->first_rx_desc;
-	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
+	dw32(DCR3, db->first_rx_desc_dma);		/* RX DESC address */
 
 	/* Init Transmit chain */
 	tmp_buf = db->buf_pool_start;
@@ -1431,14 +1435,14 @@
  *	Firstly stop DM910X , then written value and start
  */
 
-static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
 {
 	u32 cr6_tmp;
 
 	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
-	outl(cr6_tmp, ioaddr + DCR6);
+	dw32(DCR6, cr6_tmp);
 	udelay(5);
-	outl(cr6_data, ioaddr + DCR6);
+	dw32(DCR6, cr6_data);
 	udelay(5);
 }
 
@@ -1448,24 +1452,19 @@
  *	This setup frame initialize DM910X address filter mode
 */
 
-static void dm9132_id_table(struct DEVICE *dev)
+static void dm9132_id_table(struct net_device *dev)
 {
+	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr + 0xc0;
+	u16 *addrptr = (u16 *)dev->dev_addr;
 	struct netdev_hw_addr *ha;
-	u16 * addrptr;
-	unsigned long ioaddr = dev->base_addr+0xc0;		/* ID Table */
-	u32 hash_val;
 	u16 i, hash_table[4];
 
-	DMFE_DBUG(0, "dm9132_id_table()", 0);
-
 	/* Node address */
-	addrptr = (u16 *) dev->dev_addr;
-	outw(addrptr[0], ioaddr);
-	ioaddr += 4;
-	outw(addrptr[1], ioaddr);
-	ioaddr += 4;
-	outw(addrptr[2], ioaddr);
-	ioaddr += 4;
+	for (i = 0; i < 3; i++) {
+		dw16(0, addrptr[i]);
+		ioaddr += 4;
+	}
 
 	/* Clear Hash Table */
 	memset(hash_table, 0, sizeof(hash_table));
@@ -1475,13 +1474,14 @@
 
 	/* the multicast address in Hash Table : 64 bits */
 	netdev_for_each_mc_addr(ha, dev) {
-		hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
+		u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
+
 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
 	}
 
 	/* Write the hash table to MAC MD table */
 	for (i = 0; i < 4; i++, ioaddr += 4)
-		outw(hash_table[i], ioaddr);
+		dw16(0, hash_table[i]);
 }
 
 
@@ -1490,7 +1490,7 @@
  *	This setup frame initialize DM910X address filter mode
  */
 
-static void send_filter_frame(struct DEVICE *dev)
+static void send_filter_frame(struct net_device *dev)
 {
 	struct dmfe_board_info *db = netdev_priv(dev);
 	struct netdev_hw_addr *ha;
@@ -1535,12 +1535,14 @@
 
 	/* Resource Check and Send the setup packet */
 	if (!db->tx_packet_cnt) {
+		void __iomem *ioaddr = db->ioaddr;
+
 		/* Resource Empty */
 		db->tx_packet_cnt++;
 		txptr->tdes0 = cpu_to_le32(0x80000000);
-		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
-		update_cr6(db->cr6_data, dev->base_addr);
+		update_cr6(db->cr6_data | 0x2000, ioaddr);
+		dw32(DCR1, 0x1);	/* Issue Tx polling */
+		update_cr6(db->cr6_data, ioaddr);
 		dev->trans_start = jiffies;
 	} else
 		db->tx_queue_cnt++;	/* Put in TX queue */
@@ -1575,43 +1577,59 @@
 	db->rx_insert_ptr = rxptr;
 }
 
+static void srom_clk_write(void __iomem *ioaddr, u32 data)
+{
+	static const u32 cmd[] = {
+		CR9_SROM_READ | CR9_SRCS,
+		CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
+		CR9_SROM_READ | CR9_SRCS
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cmd); i++) {
+		dw32(DCR9, data | cmd[i]);
+		udelay(5);
+	}
+}
 
 /*
  *	Read one word data from the serial ROM
  */
-
-static u16 read_srom_word(long ioaddr, int offset)
+static u16 read_srom_word(void __iomem *ioaddr, int offset)
 {
+	u16 srom_data;
 	int i;
-	u16 srom_data = 0;
-	long cr9_ioaddr = ioaddr + DCR9;
 
-	outl(CR9_SROM_READ, cr9_ioaddr);
-	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+	dw32(DCR9, CR9_SROM_READ);
+	udelay(5);
+	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
+	udelay(5);
 
 	/* Send the Read Command 110b */
-	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+	srom_clk_write(ioaddr, SROM_DATA_1);
+	srom_clk_write(ioaddr, SROM_DATA_1);
+	srom_clk_write(ioaddr, SROM_DATA_0);
 
 	/* Send the offset */
 	for (i = 5; i >= 0; i--) {
 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
-		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+		srom_clk_write(ioaddr, srom_data);
 	}
 
-	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+	dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
+	udelay(5);
 
 	for (i = 16; i > 0; i--) {
-		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+		dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
 		udelay(5);
 		srom_data = (srom_data << 1) |
-				((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
-		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+				((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
+		dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 		udelay(5);
 	}
 
-	outl(CR9_SROM_READ, cr9_ioaddr);
+	dw32(DCR9, CR9_SROM_READ);
+	udelay(5);
 	return srom_data;
 }
 
@@ -1620,13 +1638,14 @@
  *	Auto sense the media mode
  */
 
-static u8 dmfe_sense_speed(struct dmfe_board_info * db)
+static u8 dmfe_sense_speed(struct dmfe_board_info *db)
 {
+	void __iomem *ioaddr = db->ioaddr;
 	u8 ErrFlag = 0;
 	u16 phy_mode;
 
 	/* CR6 bit18=0, select 10/100M */
-	update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
+	update_cr6(db->cr6_data & ~0x40000, ioaddr);
 
 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
@@ -1665,11 +1684,12 @@
 
 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
 {
+	void __iomem *ioaddr = db->ioaddr;
 	u16 phy_reg;
 
 	/* Select 10/100M phyxcer */
 	db->cr6_data &= ~0x40000;
-	update_cr6(db->cr6_data, db->ioaddr);
+	update_cr6(db->cr6_data, ioaddr);
 
 	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
 	if (db->chip_id == PCI_DM9009_ID) {
@@ -1765,18 +1785,15 @@
  *	Write a word to Phy register
  */
 
-static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
+static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
 		      u16 phy_data, u32 chip_id)
 {
 	u16 i;
-	unsigned long ioaddr;
 
 	if (chip_id == PCI_DM9132_ID) {
-		ioaddr = iobase + 0x80 + offset * 4;
-		outw(phy_data, ioaddr);
+		dw16(0x80 + offset * 4, phy_data);
 	} else {
 		/* DM9102/DM9102A Chip */
-		ioaddr = iobase + DCR9;
 
 		/* Send 33 synchronization clock to Phy controller */
 		for (i = 0; i < 35; i++)
@@ -1816,19 +1833,16 @@
  *	Read a word data from phy register
  */
 
-static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
 {
 	int i;
 	u16 phy_data;
-	unsigned long ioaddr;
 
 	if (chip_id == PCI_DM9132_ID) {
 		/* DM9132 Chip */
-		ioaddr = iobase + 0x80 + offset * 4;
-		phy_data = inw(ioaddr);
+		phy_data = dr16(0x80 + offset * 4);
 	} else {
 		/* DM9102/DM9102A Chip */
-		ioaddr = iobase + DCR9;
 
 		/* Send 33 synchronization clock to Phy controller */
 		for (i = 0; i < 35; i++)
@@ -1870,13 +1884,13 @@
  *	Write one bit data to Phy Controller
  */
 
-static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
+static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
 {
-	outl(phy_data, ioaddr);			/* MII Clock Low */
+	dw32(DCR9, phy_data);		/* MII Clock Low */
 	udelay(1);
-	outl(phy_data | MDCLKH, ioaddr);	/* MII Clock High */
+	dw32(DCR9, phy_data | MDCLKH);	/* MII Clock High */
 	udelay(1);
-	outl(phy_data, ioaddr);			/* MII Clock Low */
+	dw32(DCR9, phy_data);		/* MII Clock Low */
 	udelay(1);
 }
 
@@ -1885,14 +1899,14 @@
  *	Read one bit phy data from PHY controller
  */
 
-static u16 phy_read_1bit(unsigned long ioaddr)
+static u16 phy_read_1bit(void __iomem *ioaddr)
 {
 	u16 phy_data;
 
-	outl(0x50000, ioaddr);
+	dw32(DCR9, 0x50000);
 	udelay(1);
-	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
-	outl(0x40000, ioaddr);
+	phy_data = (dr32(DCR9) >> 19) & 0x1;
+	dw32(DCR9, 0x40000);
 	udelay(1);
 
 	return phy_data;
@@ -1978,7 +1992,7 @@
 
 	/* Check DM9801 or DM9802 present or not */
 	db->HPNA_present = 0;
-	update_cr6(db->cr6_data|0x40000, db->ioaddr);
+	update_cr6(db->cr6_data | 0x40000, db->ioaddr);
 	tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
 	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
 		/* DM9801 or DM9802 present */
@@ -2095,6 +2109,7 @@
 {
 	struct net_device *dev = pci_get_drvdata(pci_dev);
 	struct dmfe_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	u32 tmp;
 
 	/* Disable upper layer interface */
@@ -2102,11 +2117,11 @@
 
 	/* Disable Tx/Rx */
 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
-	update_cr6(db->cr6_data, dev->base_addr);
+	update_cr6(db->cr6_data, ioaddr);
 
 	/* Disable Interrupt */
-	outl(0, dev->base_addr + DCR7);
-	outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
+	dw32(DCR7, 0);
+	dw32(DCR5, dr32(DCR5));
 
 	/* Fre RX buffers */
 	dmfe_free_rxbuffer(db);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fea3641..c4f37ac 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -328,7 +328,7 @@
 	udelay(100);
 
 	if (tulip_debug > 1)
-		netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
+		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
 
 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -515,11 +515,13 @@
 static int
 tulip_open(struct net_device *dev)
 {
+	struct tulip_private *tp = netdev_priv(dev);
 	int retval;
 
 	tulip_init_ring (dev);
 
-	retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
+			     dev->name, dev);
 	if (retval)
 		goto free_ring;
 
@@ -841,7 +843,7 @@
 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
 			   ioread32 (ioaddr + CSR5));
 
-	free_irq (dev->irq, dev);
+	free_irq (tp->pdev->irq, dev);
 
 	tulip_free_ring (dev);
 
@@ -1489,8 +1491,6 @@
 
 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
 
-	dev->base_addr = (unsigned long)ioaddr;
-
 #ifdef CONFIG_TULIP_MWI
 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
 		tulip_mwi_config (pdev, dev);
@@ -1650,7 +1650,6 @@
 	for (i = 0; i < 6; i++)
 		last_phys_addr[i] = dev->dev_addr[i];
 	last_irq = irq;
-	dev->irq = irq;
 
 	/* The lower four bits are the media type. */
 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
@@ -1858,7 +1857,8 @@
 	tulip_down(dev);
 
 	netif_device_detach(dev);
-	free_irq(dev->irq, dev);
+	/* FIXME: it needlessly adds an error path. */
+	free_irq(tp->pdev->irq, dev);
 
 save_state:
 	pci_save_state(pdev);
@@ -1900,7 +1900,9 @@
 		return retval;
 	}
 
-	if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
+	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
+			     dev->name, dev);
+	if (retval) {
 		pr_err("request_irq failed in resume\n");
 		return retval;
 	}
@@ -1960,11 +1962,14 @@
 
 static void poll_tulip (struct net_device *dev)
 {
+	struct tulip_private *tp = netdev_priv(dev);
+	const int irq = tp->pdev->irq;
+
 	/* disable_irq here is not very nice, but with the lockless
 	   interrupt handler we have no other choice. */
-	disable_irq(dev->irq);
-	tulip_interrupt (dev->irq, dev);
-	enable_irq(dev->irq);
+	disable_irq(irq);
+	tulip_interrupt (irq, dev);
+	enable_irq(irq);
 }
 #endif
 
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index fc4001f..75d45f8 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -42,6 +42,8 @@
 #include <asm/dma.h>
 #include <asm/uaccess.h>
 
+#define uw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define ur32(reg)	ioread32(ioaddr + (reg))
 
 /* Board/System/Debug information/definition ---------------- */
 #define PCI_ULI5261_ID  0x526110B9	/* ULi M5261 ID*/
@@ -110,14 +112,6 @@
 
 #define SROM_V41_CODE   0x14
 
-#define SROM_CLK_WRITE(data, ioaddr)					\
-		outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);		\
-		udelay(5);						\
-		outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);	\
-		udelay(5);						\
-		outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);		\
-		udelay(5);
-
 /* Structure/enum declaration ------------------------------- */
 struct tx_desc {
         __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -132,12 +126,15 @@
 } __attribute__(( aligned(32) ));
 
 struct uli526x_board_info {
-	u32 chip_id;			/* Chip vendor/Device ID */
+	struct uli_phy_ops {
+		void (*write)(struct uli526x_board_info *, u8, u8, u16);
+		u16 (*read)(struct uli526x_board_info *, u8, u8);
+	} phy;
 	struct net_device *next_dev;	/* next device */
 	struct pci_dev *pdev;		/* PCI device */
 	spinlock_t lock;
 
-	long ioaddr;			/* I/O base address */
+	void __iomem *ioaddr;		/* I/O base address */
 	u32 cr0_data;
 	u32 cr5_data;
 	u32 cr6_data;
@@ -227,21 +224,21 @@
 static int uli526x_stop(struct net_device *);
 static void uli526x_set_filter_mode(struct net_device *);
 static const struct ethtool_ops netdev_ethtool_ops;
-static u16 read_srom_word(long, int);
+static u16 read_srom_word(struct uli526x_board_info *, int);
 static irqreturn_t uli526x_interrupt(int, void *);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void uli526x_poll(struct net_device *dev);
 #endif
-static void uli526x_descriptor_init(struct net_device *, unsigned long);
+static void uli526x_descriptor_init(struct net_device *, void __iomem *);
 static void allocate_rx_buffer(struct net_device *);
-static void update_cr6(u32, unsigned long);
+static void update_cr6(u32, void __iomem *);
 static void send_filter_frame(struct net_device *, int);
-static u16 phy_read(unsigned long, u8, u8, u32);
-static u16 phy_readby_cr10(unsigned long, u8, u8);
-static void phy_write(unsigned long, u8, u8, u16, u32);
-static void phy_writeby_cr10(unsigned long, u8, u8, u16);
-static void phy_write_1bit(unsigned long, u32, u32);
-static u16 phy_read_1bit(unsigned long, u32);
+static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8);
+static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8);
+static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16);
+static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16);
+static void phy_write_1bit(struct uli526x_board_info *db, u32);
+static u16 phy_read_1bit(struct uli526x_board_info *db);
 static u8 uli526x_sense_speed(struct uli526x_board_info *);
 static void uli526x_process_mode(struct uli526x_board_info *);
 static void uli526x_timer(unsigned long);
@@ -253,6 +250,18 @@
 static void uli526x_init(struct net_device *);
 static void uli526x_set_phyxcer(struct uli526x_board_info *);
 
+static void srom_clk_write(struct uli526x_board_info *db, u32 data)
+{
+	void __iomem *ioaddr = db->ioaddr;
+
+	uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
+	udelay(5);
+	uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
+	udelay(5);
+	uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
+	udelay(5);
+}
+
 /* ULI526X network board routine ---------------------------- */
 
 static const struct net_device_ops netdev_ops = {
@@ -277,6 +286,7 @@
 {
 	struct uli526x_board_info *db;	/* board information structure */
 	struct net_device *dev;
+	void __iomem *ioaddr;
 	int i, err;
 
 	ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -313,9 +323,9 @@
 		goto err_out_disable;
 	}
 
-	if (pci_request_regions(pdev, DRV_NAME)) {
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err < 0) {
 		pr_err("Failed to request PCI regions\n");
-		err = -ENODEV;
 		goto err_out_disable;
 	}
 
@@ -323,32 +333,41 @@
 	db = netdev_priv(dev);
 
 	/* Allocate Tx/Rx descriptor memory */
+	err = -ENOMEM;
+
 	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
-	if(db->desc_pool_ptr == NULL)
-	{
-		err = -ENOMEM;
-		goto err_out_nomem;
-	}
+	if (!db->desc_pool_ptr)
+		goto err_out_release;
+
 	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
-	if(db->buf_pool_ptr == NULL)
-	{
-		err = -ENOMEM;
-		goto err_out_nomem;
-	}
+	if (!db->buf_pool_ptr)
+		goto err_out_free_tx_desc;
 
 	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
 	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
 	db->buf_pool_start = db->buf_pool_ptr;
 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
 
-	db->chip_id = ent->driver_data;
-	db->ioaddr = pci_resource_start(pdev, 0);
+	switch (ent->driver_data) {
+	case PCI_ULI5263_ID:
+		db->phy.write	= phy_writeby_cr10;
+		db->phy.read	= phy_readby_cr10;
+		break;
+	default:
+		db->phy.write	= phy_writeby_cr9;
+		db->phy.read	= phy_readby_cr9;
+		break;
+	}
 
+	/* IO region. */
+	ioaddr = pci_iomap(pdev, 0, 0);
+	if (!ioaddr)
+		goto err_out_free_tx_buf;
+
+	db->ioaddr = ioaddr;
 	db->pdev = pdev;
 	db->init = 1;
 
-	dev->base_addr = db->ioaddr;
-	dev->irq = pdev->irq;
 	pci_set_drvdata(pdev, dev);
 
 	/* Register some necessary functions */
@@ -360,24 +379,24 @@
 
 	/* read 64 word srom data */
 	for (i = 0; i < 64; i++)
-		((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+		((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i));
 
 	/* Set Node address */
 	if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0)		/* SROM absent, so read MAC address from ID Table */
 	{
-		outl(0x10000, db->ioaddr + DCR0);	//Diagnosis mode
-		outl(0x1c0, db->ioaddr + DCR13);	//Reset dianostic pointer port
-		outl(0, db->ioaddr + DCR14);		//Clear reset port
-		outl(0x10, db->ioaddr + DCR14);		//Reset ID Table pointer
-		outl(0, db->ioaddr + DCR14);		//Clear reset port
-		outl(0, db->ioaddr + DCR13);		//Clear CR13
-		outl(0x1b0, db->ioaddr + DCR13);	//Select ID Table access port
+		uw32(DCR0, 0x10000);	//Diagnosis mode
+		uw32(DCR13, 0x1c0);	//Reset dianostic pointer port
+		uw32(DCR14, 0);		//Clear reset port
+		uw32(DCR14, 0x10);	//Reset ID Table pointer
+		uw32(DCR14, 0);		//Clear reset port
+		uw32(DCR13, 0);		//Clear CR13
+		uw32(DCR13, 0x1b0);	//Select ID Table access port
 		//Read MAC address from CR14
 		for (i = 0; i < 6; i++)
-			dev->dev_addr[i] = inl(db->ioaddr + DCR14);
+			dev->dev_addr[i] = ur32(DCR14);
 		//Read end
-		outl(0, db->ioaddr + DCR13);	//Clear CR13
-		outl(0, db->ioaddr + DCR0);		//Clear CR0
+		uw32(DCR13, 0);		//Clear CR13
+		uw32(DCR0, 0);		//Clear CR0
 		udelay(10);
 	}
 	else		/*Exist SROM*/
@@ -387,26 +406,26 @@
 	}
 	err = register_netdev (dev);
 	if (err)
-		goto err_out_res;
+		goto err_out_unmap;
 
 	netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
 		    ent->driver_data >> 16, pci_name(pdev),
-		    dev->dev_addr, dev->irq);
+		    dev->dev_addr, pdev->irq);
 
 	pci_set_master(pdev);
 
 	return 0;
 
-err_out_res:
+err_out_unmap:
+	pci_iounmap(pdev, db->ioaddr);
+err_out_free_tx_buf:
+	pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+			    db->buf_pool_ptr, db->buf_pool_dma_ptr);
+err_out_free_tx_desc:
+	pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+			    db->desc_pool_ptr, db->desc_pool_dma_ptr);
+err_out_release:
 	pci_release_regions(pdev);
-err_out_nomem:
-	if(db->desc_pool_ptr)
-		pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
-			db->desc_pool_ptr, db->desc_pool_dma_ptr);
-
-	if(db->buf_pool_ptr != NULL)
-		pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
-			db->buf_pool_ptr, db->buf_pool_dma_ptr);
 err_out_disable:
 	pci_disable_device(pdev);
 err_out_free:
@@ -422,19 +441,17 @@
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct uli526x_board_info *db = netdev_priv(dev);
 
-	ULI526X_DBUG(0, "uli526x_remove_one()", 0);
-
+	unregister_netdev(dev);
+	pci_iounmap(pdev, db->ioaddr);
 	pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
 				DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
  				db->desc_pool_dma_ptr);
 	pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 				db->buf_pool_ptr, db->buf_pool_dma_ptr);
-	unregister_netdev(dev);
 	pci_release_regions(pdev);
-	free_netdev(dev);	/* free board information */
-	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
-	ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(dev);
 }
 
 
@@ -468,7 +485,8 @@
 	/* Initialize ULI526X board */
 	uli526x_init(dev);
 
-	ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev);
+	ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED,
+			  dev->name, dev);
 	if (ret)
 		return ret;
 
@@ -496,57 +514,57 @@
 static void uli526x_init(struct net_device *dev)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = db->ioaddr;
+	struct uli_phy_ops *phy = &db->phy;
+	void __iomem *ioaddr = db->ioaddr;
 	u8	phy_tmp;
 	u8	timeout;
-	u16	phy_value;
 	u16 phy_reg_reset;
 
 
 	ULI526X_DBUG(0, "uli526x_init()", 0);
 
 	/* Reset M526x MAC controller */
-	outl(ULI526X_RESET, ioaddr + DCR0);	/* RESET MAC */
+	uw32(DCR0, ULI526X_RESET);	/* RESET MAC */
 	udelay(100);
-	outl(db->cr0_data, ioaddr + DCR0);
+	uw32(DCR0, db->cr0_data);
 	udelay(5);
 
 	/* Phy addr : In some boards,M5261/M5263 phy address != 1 */
 	db->phy_addr = 1;
-	for(phy_tmp=0;phy_tmp<32;phy_tmp++)
-	{
-		phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
-		if(phy_value != 0xffff&&phy_value!=0)
-		{
+	for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) {
+		u16 phy_value;
+
+		phy_value = phy->read(db, phy_tmp, 3);	//peer add
+		if (phy_value != 0xffff && phy_value != 0) {
 			db->phy_addr = phy_tmp;
 			break;
 		}
 	}
-	if(phy_tmp == 32)
+
+	if (phy_tmp == 32)
 		pr_warn("Can not find the phy address!!!\n");
 	/* Parser SROM and media mode */
 	db->media_mode = uli526x_media_mode;
 
 	/* phyxcer capability setting */
-	phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
+	phy_reg_reset = phy->read(db, db->phy_addr, 0);
 	phy_reg_reset = (phy_reg_reset | 0x8000);
-	phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
+	phy->write(db, db->phy_addr, 0, phy_reg_reset);
 
 	/* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
 	 * functions") or phy data sheet for details on phy reset
 	 */
 	udelay(500);
 	timeout = 10;
-	while (timeout-- &&
-		phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000)
-			udelay(100);
+	while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000)
+		udelay(100);
 
 	/* Process Phyxcer Media Mode */
 	uli526x_set_phyxcer(db);
 
 	/* Media Mode Process */
 	if ( !(db->media_mode & ULI526X_AUTO) )
-		db->op_mode = db->media_mode; 	/* Force Mode */
+		db->op_mode = db->media_mode;		/* Force Mode */
 
 	/* Initialize Transmit/Receive decriptor and CR3/4 */
 	uli526x_descriptor_init(dev, ioaddr);
@@ -559,10 +577,10 @@
 
 	/* Init CR7, interrupt active bit */
 	db->cr7_data = CR7_DEFAULT;
-	outl(db->cr7_data, ioaddr + DCR7);
+	uw32(DCR7, db->cr7_data);
 
 	/* Init CR15, Tx jabber and Rx watchdog timer */
-	outl(db->cr15_data, ioaddr + DCR15);
+	uw32(DCR15, db->cr15_data);
 
 	/* Enable ULI526X Tx/Rx function */
 	db->cr6_data |= CR6_RXSC | CR6_TXSC;
@@ -579,6 +597,7 @@
 					    struct net_device *dev)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	struct tx_desc *txptr;
 	unsigned long flags;
 
@@ -604,7 +623,7 @@
 	}
 
 	/* Disable NIC interrupt */
-	outl(0, dev->base_addr + DCR7);
+	uw32(DCR7, 0);
 
 	/* transmit this packet */
 	txptr = db->tx_insert_ptr;
@@ -615,10 +634,10 @@
 	db->tx_insert_ptr = txptr->next_tx_desc;
 
 	/* Transmit Packet Process */
-	if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
+	if (db->tx_packet_cnt < TX_DESC_CNT) {
 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
 		db->tx_packet_cnt++;			/* Ready to send */
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		uw32(DCR1, 0x1);			/* Issue Tx polling */
 		dev->trans_start = jiffies;		/* saved time stamp */
 	}
 
@@ -628,7 +647,7 @@
 
 	/* Restore CR7 to enable interrupt */
 	spin_unlock_irqrestore(&db->lock, flags);
-	outl(db->cr7_data, dev->base_addr + DCR7);
+	uw32(DCR7, db->cr7_data);
 
 	/* free this SKB */
 	dev_kfree_skb(skb);
@@ -645,9 +664,7 @@
 static int uli526x_stop(struct net_device *dev)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = dev->base_addr;
-
-	ULI526X_DBUG(0, "uli526x_stop", 0);
+	void __iomem *ioaddr = db->ioaddr;
 
 	/* disable system */
 	netif_stop_queue(dev);
@@ -656,12 +673,12 @@
 	del_timer_sync(&db->timer);
 
 	/* Reset & stop ULI526X board */
-	outl(ULI526X_RESET, ioaddr + DCR0);
+	uw32(DCR0, ULI526X_RESET);
 	udelay(5);
-	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+	db->phy.write(db, db->phy_addr, 0, 0x8000);
 
 	/* free interrupt */
-	free_irq(dev->irq, dev);
+	free_irq(db->pdev->irq, dev);
 
 	/* free allocated rx buffer */
 	uli526x_free_rxbuffer(db);
@@ -679,18 +696,18 @@
 {
 	struct net_device *dev = dev_id;
 	struct uli526x_board_info *db = netdev_priv(dev);
-	unsigned long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = db->ioaddr;
 	unsigned long flags;
 
 	spin_lock_irqsave(&db->lock, flags);
-	outl(0, ioaddr + DCR7);
+	uw32(DCR7, 0);
 
 	/* Got ULI526X status */
-	db->cr5_data = inl(ioaddr + DCR5);
-	outl(db->cr5_data, ioaddr + DCR5);
+	db->cr5_data = ur32(DCR5);
+	uw32(DCR5, db->cr5_data);
 	if ( !(db->cr5_data & 0x180c1) ) {
 		/* Restore CR7 to enable interrupt mask */
-		outl(db->cr7_data, ioaddr + DCR7);
+		uw32(DCR7, db->cr7_data);
 		spin_unlock_irqrestore(&db->lock, flags);
 		return IRQ_HANDLED;
 	}
@@ -718,7 +735,7 @@
 		uli526x_free_tx_pkt(dev, db);
 
 	/* Restore CR7 to enable interrupt mask */
-	outl(db->cr7_data, ioaddr + DCR7);
+	uw32(DCR7, db->cr7_data);
 
 	spin_unlock_irqrestore(&db->lock, flags);
 	return IRQ_HANDLED;
@@ -727,8 +744,10 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void uli526x_poll(struct net_device *dev)
 {
+	struct uli526x_board_info *db = netdev_priv(dev);
+
 	/* ISR grabs the irqsave lock, so this should be safe */
-	uli526x_interrupt(dev->irq, dev);
+	uli526x_interrupt(db->pdev->irq, dev);
 }
 #endif
 
@@ -962,12 +981,7 @@
 
 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-	if (np->pdev)
-		strlcpy(info->bus_info, pci_name(np->pdev),
-			sizeof(info->bus_info));
-	else
-		sprintf(info->bus_info, "EISA 0x%lx %d",
-			dev->base_addr, dev->irq);
+	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
@@ -1007,18 +1021,20 @@
 
 static void uli526x_timer(unsigned long data)
 {
-	u32 tmp_cr8;
-	unsigned char tmp_cr12=0;
 	struct net_device *dev = (struct net_device *) data;
 	struct uli526x_board_info *db = netdev_priv(dev);
+	struct uli_phy_ops *phy = &db->phy;
+	void __iomem *ioaddr = db->ioaddr;
  	unsigned long flags;
+	u8 tmp_cr12 = 0;
+	u32 tmp_cr8;
 
 	//ULI526X_DBUG(0, "uli526x_timer()", 0);
 	spin_lock_irqsave(&db->lock, flags);
 
 
 	/* Dynamic reset ULI526X : system error or transmit time-out */
-	tmp_cr8 = inl(db->ioaddr + DCR8);
+	tmp_cr8 = ur32(DCR8);
 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
 		db->reset_cr8++;
 		db->wait_reset = 1;
@@ -1028,7 +1044,7 @@
 	/* TX polling kick monitor */
 	if ( db->tx_packet_cnt &&
 	     time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
-		outl(0x1, dev->base_addr + DCR1);   // Tx polling again
+		uw32(DCR1, 0x1);   // Tx polling again
 
 		// TX Timeout
 		if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
@@ -1049,7 +1065,7 @@
 	}
 
 	/* Link status check, Dynamic media type change */
-	if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
+	if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0)
 		tmp_cr12 = 3;
 
 	if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
@@ -1062,7 +1078,7 @@
 		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
 		/* AUTO don't need */
 		if ( !(db->media_mode & 0x8) )
-			phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+			phy->write(db, db->phy_addr, 0, 0x1000);
 
 		/* AUTO mode, if INT phyxcer link failed, select EXT device */
 		if (db->media_mode & ULI526X_AUTO) {
@@ -1119,12 +1135,13 @@
 static void uli526x_reset_prepare(struct net_device *dev)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 
 	/* Sopt MAC controller */
 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
-	update_cr6(db->cr6_data, dev->base_addr);
-	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
-	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+	update_cr6(db->cr6_data, ioaddr);
+	uw32(DCR7, 0);				/* Disable Interrupt */
+	uw32(DCR5, ur32(DCR5));
 
 	/* Disable upper layer interface */
 	netif_stop_queue(dev);
@@ -1289,7 +1306,7 @@
  *	Using Chain structure, and allocate Tx/Rx buffer
  */
 
-static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr)
+static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
 	struct tx_desc *tmp_tx;
@@ -1304,14 +1321,14 @@
 	/* tx descriptor start pointer */
 	db->tx_insert_ptr = db->first_tx_desc;
 	db->tx_remove_ptr = db->first_tx_desc;
-	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+	uw32(DCR4, db->first_tx_desc_dma);	/* TX DESC address */
 
 	/* rx descriptor start pointer */
 	db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
 	db->first_rx_desc_dma =  db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
 	db->rx_insert_ptr = db->first_rx_desc;
 	db->rx_ready_ptr = db->first_rx_desc;
-	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
+	uw32(DCR3, db->first_rx_desc_dma);	/* RX DESC address */
 
 	/* Init Transmit chain */
 	tmp_buf = db->buf_pool_start;
@@ -1352,11 +1369,9 @@
  *	Update CR6 value
  *	Firstly stop ULI526X, then written value and start
  */
-
-static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
 {
-
-	outl(cr6_data, ioaddr + DCR6);
+	uw32(DCR6, cr6_data);
 	udelay(5);
 }
 
@@ -1375,6 +1390,7 @@
 static void send_filter_frame(struct net_device *dev, int mc_cnt)
 {
 	struct uli526x_board_info *db = netdev_priv(dev);
+	void __iomem *ioaddr = db->ioaddr;
 	struct netdev_hw_addr *ha;
 	struct tx_desc *txptr;
 	u16 * addrptr;
@@ -1420,9 +1436,9 @@
 		/* Resource Empty */
 		db->tx_packet_cnt++;
 		txptr->tdes0 = cpu_to_le32(0x80000000);
-		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
-		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
-		update_cr6(db->cr6_data, dev->base_addr);
+		update_cr6(db->cr6_data | 0x2000, ioaddr);
+		uw32(DCR1, 0x1);	/* Issue Tx polling */
+		update_cr6(db->cr6_data, ioaddr);
 		dev->trans_start = jiffies;
 	} else
 		netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
@@ -1465,37 +1481,38 @@
  *	Read one word data from the serial ROM
  */
 
-static u16 read_srom_word(long ioaddr, int offset)
+static u16 read_srom_word(struct uli526x_board_info *db, int offset)
 {
-	int i;
+	void __iomem *ioaddr = db->ioaddr;
 	u16 srom_data = 0;
-	long cr9_ioaddr = ioaddr + DCR9;
+	int i;
 
-	outl(CR9_SROM_READ, cr9_ioaddr);
-	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+	uw32(DCR9, CR9_SROM_READ);
+	uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 
 	/* Send the Read Command 110b */
-	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
-	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+	srom_clk_write(db, SROM_DATA_1);
+	srom_clk_write(db, SROM_DATA_1);
+	srom_clk_write(db, SROM_DATA_0);
 
 	/* Send the offset */
 	for (i = 5; i >= 0; i--) {
 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
-		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+		srom_clk_write(db, srom_data);
 	}
 
-	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+	uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 
 	for (i = 16; i > 0; i--) {
-		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+		uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
 		udelay(5);
-		srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
-		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+		srom_data = (srom_data << 1) |
+			    ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0);
+		uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
 		udelay(5);
 	}
 
-	outl(CR9_SROM_READ, cr9_ioaddr);
+	uw32(DCR9, CR9_SROM_READ);
 	return srom_data;
 }
 
@@ -1506,15 +1523,16 @@
 
 static u8 uli526x_sense_speed(struct uli526x_board_info * db)
 {
+	struct uli_phy_ops *phy = &db->phy;
 	u8 ErrFlag = 0;
 	u16 phy_mode;
 
-	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
-	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+	phy_mode = phy->read(db, db->phy_addr, 1);
+	phy_mode = phy->read(db, db->phy_addr, 1);
 
 	if ( (phy_mode & 0x24) == 0x24 ) {
 
-		phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
+		phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7);
 		if(phy_mode&0x8000)
 			phy_mode = 0x8000;
 		else if(phy_mode&0x4000)
@@ -1549,10 +1567,11 @@
 
 static void uli526x_set_phyxcer(struct uli526x_board_info *db)
 {
+	struct uli_phy_ops *phy = &db->phy;
 	u16 phy_reg;
 
 	/* Phyxcer capability setting */
-	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+	phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0;
 
 	if (db->media_mode & ULI526X_AUTO) {
 		/* AUTO Mode */
@@ -1573,10 +1592,10 @@
 		phy_reg|=db->PHY_reg4;
 		db->media_mode|=ULI526X_AUTO;
 	}
-	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+	phy->write(db, db->phy_addr, 4, phy_reg);
 
  	/* Restart Auto-Negotiation */
-	phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+	phy->write(db, db->phy_addr, 0, 0x1200);
 	udelay(50);
 }
 
@@ -1590,6 +1609,7 @@
 
 static void uli526x_process_mode(struct uli526x_board_info *db)
 {
+	struct uli_phy_ops *phy = &db->phy;
 	u16 phy_reg;
 
 	/* Full Duplex Mode Check */
@@ -1601,10 +1621,10 @@
 	update_cr6(db->cr6_data, db->ioaddr);
 
 	/* 10/100M phyxcer force mode need */
-	if ( !(db->media_mode & 0x8)) {
+	if (!(db->media_mode & 0x8)) {
 		/* Forece Mode */
-		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
-		if ( !(phy_reg & 0x1) ) {
+		phy_reg = phy->read(db, db->phy_addr, 6);
+		if (!(phy_reg & 0x1)) {
 			/* parter without N-Way capability */
 			phy_reg = 0x0;
 			switch(db->op_mode) {
@@ -1613,148 +1633,126 @@
 			case ULI526X_100MHF: phy_reg = 0x2000; break;
 			case ULI526X_100MFD: phy_reg = 0x2100; break;
 			}
-			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+			phy->write(db, db->phy_addr, 0, phy_reg);
 		}
 	}
 }
 
 
-/*
- *	Write a word to Phy register
- */
-
-static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+/* M5261/M5263 Chip */
+static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr,
+			    u8 offset, u16 phy_data)
 {
 	u16 i;
-	unsigned long ioaddr;
-
-	if(chip_id == PCI_ULI5263_ID)
-	{
-		phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
-		return;
-	}
-	/* M5261/M5263 Chip */
-	ioaddr = iobase + DCR9;
 
 	/* Send 33 synchronization clock to Phy controller */
 	for (i = 0; i < 35; i++)
-		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+		phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send start command(01) to Phy */
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(db, PHY_DATA_0);
+	phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send write command(01) to Phy */
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(db, PHY_DATA_0);
+	phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send Phy address */
 	for (i = 0x10; i > 0; i = i >> 1)
-		phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+		phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
 
 	/* Send register address */
 	for (i = 0x10; i > 0; i = i >> 1)
-		phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+		phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
 
 	/* written trasnition */
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(db, PHY_DATA_1);
+	phy_write_1bit(db, PHY_DATA_0);
 
 	/* Write a word data to PHY controller */
-	for ( i = 0x8000; i > 0; i >>= 1)
-		phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
-
+	for (i = 0x8000; i > 0; i >>= 1)
+		phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
 }
 
-
-/*
- *	Read a word data from phy register
- */
-
-static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset)
 {
-	int i;
 	u16 phy_data;
-	unsigned long ioaddr;
-
-	if(chip_id == PCI_ULI5263_ID)
-		return phy_readby_cr10(iobase, phy_addr, offset);
-	/* M5261/M5263 Chip */
-	ioaddr = iobase + DCR9;
+	int i;
 
 	/* Send 33 synchronization clock to Phy controller */
 	for (i = 0; i < 35; i++)
-		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+		phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send start command(01) to Phy */
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(db, PHY_DATA_0);
+	phy_write_1bit(db, PHY_DATA_1);
 
 	/* Send read command(10) to Phy */
-	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
-	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(db, PHY_DATA_1);
+	phy_write_1bit(db, PHY_DATA_0);
 
 	/* Send Phy address */
 	for (i = 0x10; i > 0; i = i >> 1)
-		phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+		phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
 
 	/* Send register address */
 	for (i = 0x10; i > 0; i = i >> 1)
-		phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+		phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
 
 	/* Skip transition state */
-	phy_read_1bit(ioaddr, chip_id);
+	phy_read_1bit(db);
 
 	/* read 16bit data */
 	for (phy_data = 0, i = 0; i < 16; i++) {
 		phy_data <<= 1;
-		phy_data |= phy_read_1bit(ioaddr, chip_id);
+		phy_data |= phy_read_1bit(db);
 	}
 
 	return phy_data;
 }
 
-static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
+static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr,
+			   u8 offset)
 {
-	unsigned long ioaddr,cr10_value;
+	void __iomem *ioaddr = db->ioaddr;
+	u32 cr10_value = phy_addr;
 
-	ioaddr = iobase + DCR10;
-	cr10_value = phy_addr;
-	cr10_value = (cr10_value<<5) + offset;
-	cr10_value = (cr10_value<<16) + 0x08000000;
-	outl(cr10_value,ioaddr);
+	cr10_value = (cr10_value <<  5) + offset;
+	cr10_value = (cr10_value << 16) + 0x08000000;
+	uw32(DCR10, cr10_value);
 	udelay(1);
-	while(1)
-	{
-		cr10_value = inl(ioaddr);
-		if(cr10_value&0x10000000)
+	while (1) {
+		cr10_value = ur32(DCR10);
+		if (cr10_value & 0x10000000)
 			break;
 	}
 	return cr10_value & 0x0ffff;
 }
 
-static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
+static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr,
+			     u8 offset, u16 phy_data)
 {
-	unsigned long ioaddr,cr10_value;
+	void __iomem *ioaddr = db->ioaddr;
+	u32 cr10_value = phy_addr;
 
-	ioaddr = iobase + DCR10;
-	cr10_value = phy_addr;
-	cr10_value = (cr10_value<<5) + offset;
-	cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
-	outl(cr10_value,ioaddr);
+	cr10_value = (cr10_value <<  5) + offset;
+	cr10_value = (cr10_value << 16) + 0x04000000 + phy_data;
+	uw32(DCR10, cr10_value);
 	udelay(1);
 }
 /*
  *	Write one bit data to Phy Controller
  */
 
-static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
+static void phy_write_1bit(struct uli526x_board_info *db, u32 data)
 {
-	outl(phy_data , ioaddr);			/* MII Clock Low */
+	void __iomem *ioaddr = db->ioaddr;
+
+	uw32(DCR9, data);		/* MII Clock Low */
 	udelay(1);
-	outl(phy_data  | MDCLKH, ioaddr);	/* MII Clock High */
+	uw32(DCR9, data | MDCLKH);	/* MII Clock High */
 	udelay(1);
-	outl(phy_data , ioaddr);			/* MII Clock Low */
+	uw32(DCR9, data);		/* MII Clock Low */
 	udelay(1);
 }
 
@@ -1763,14 +1761,15 @@
  *	Read one bit phy data from PHY controller
  */
 
-static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
+static u16 phy_read_1bit(struct uli526x_board_info *db)
 {
+	void __iomem *ioaddr = db->ioaddr;
 	u16 phy_data;
 
-	outl(0x50000 , ioaddr);
+	uw32(DCR9, 0x50000);
 	udelay(1);
-	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
-	outl(0x40000 , ioaddr);
+	phy_data = (ur32(DCR9) >> 19) & 0x1;
+	uw32(DCR9, 0x40000);
 	udelay(1);
 
 	return phy_data;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 2ac6fff..4d1ffca 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -400,9 +400,6 @@
 	   No hold time required! */
 	iowrite32(0x00000001, ioaddr + PCIBusCfg);
 
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
-
 	np = netdev_priv(dev);
 	np->pci_dev = pdev;
 	np->chip_id = chip_idx;
@@ -635,17 +632,18 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base_addr;
+	const int irq = np->pci_dev->irq;
 	int i;
 
 	iowrite32(0x00000001, ioaddr + PCIBusCfg);		/* Reset */
 
 	netif_device_detach(dev);
-	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i)
 		goto out_err;
 
 	if (debug > 1)
-		netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq);
+		netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
 
 	if((i=alloc_ringdesc(dev)))
 		goto out_err;
@@ -932,6 +930,7 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base_addr;
+	const int irq = np->pci_dev->irq;
 
 	dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
 		 ioread32(ioaddr + IntrStatus));
@@ -951,7 +950,7 @@
 	       np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
 	printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	spin_lock_irq(&np->lock);
 	/*
 	 * Under high load dirty_tx and the internal tx descriptor pointer
@@ -966,7 +965,7 @@
 	init_rxtx_rings(dev);
 	init_registers(dev);
 	spin_unlock_irq(&np->lock);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
 	netif_wake_queue(dev);
 	dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1500,7 +1499,7 @@
 	iowrite32(0x0000, ioaddr + IntrEnable);
 	spin_unlock_irq(&np->lock);
 
-	free_irq(dev->irq, dev);
+	free_irq(np->pci_dev->irq, dev);
 	wmb();
 	netif_device_attach(dev);
 
@@ -1589,7 +1588,7 @@
 		iowrite32(0, ioaddr + IntrEnable);
 		spin_unlock_irq(&np->lock);
 
-		synchronize_irq(dev->irq);
+		synchronize_irq(np->pci_dev->irq);
 		netif_tx_disable(dev);
 
 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index fdb329f..138bf83 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -41,7 +41,9 @@
 MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
 MODULE_LICENSE("GPL");
 
-
+#define xw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define xr32(reg)	ioread32(ioaddr + (reg))
+#define xr8(reg)	ioread8(ioaddr + (reg))
 
 /* IO registers on the card, offsets */
 #define CSR0	0x00
@@ -83,7 +85,7 @@
 
 	struct sk_buff *tx_skb[4];
 
-	unsigned long io_port;
+	void __iomem *ioaddr;
 	int open;
 
 	/* transmit_used is the rotating counter that indicates which transmit
@@ -137,7 +139,7 @@
 
 
 static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
-	{0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
+	{ PCI_VDEVICE(XIRCOM, 0x0003), },
 	{0,},
 };
 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
@@ -146,9 +148,7 @@
 	.name		= "xircom_cb",
 	.id_table	= xircom_pci_table,
 	.probe		= xircom_probe,
-	.remove		= xircom_remove,
-	.suspend =NULL,
-	.resume =NULL
+	.remove		= __devexit_p(xircom_remove),
 };
 
 
@@ -192,15 +192,18 @@
  */
 static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+	struct device *d = &pdev->dev;
 	struct net_device *dev = NULL;
 	struct xircom_private *private;
 	unsigned long flags;
 	unsigned short tmp16;
+	int rc;
 
 	/* First do the PCI initialisation */
 
-	if (pci_enable_device(pdev))
-		return -ENODEV;
+	rc = pci_enable_device(pdev);
+	if (rc < 0)
+		goto out;
 
 	/* disable all powermanagement */
 	pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
@@ -211,11 +214,13 @@
 	pci_read_config_word (pdev,PCI_STATUS, &tmp16);
 	pci_write_config_word (pdev, PCI_STATUS,tmp16);
 
-	if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
+	rc = pci_request_regions(pdev, "xircom_cb");
+	if (rc < 0) {
 		pr_err("%s: failed to allocate io-region\n", __func__);
-		return -ENODEV;
+		goto err_disable;
 	}
 
+	rc = -ENOMEM;
 	/*
 	   Before changing the hardware, allocate the memory.
 	   This way, we can fail gracefully if not enough memory
@@ -223,17 +228,21 @@
 	 */
 	dev = alloc_etherdev(sizeof(struct xircom_private));
 	if (!dev)
-		goto device_fail;
+		goto err_release;
 
 	private = netdev_priv(dev);
 
 	/* Allocate the send/receive buffers */
-	private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
+	private->rx_buffer = dma_alloc_coherent(d, 8192,
+						&private->rx_dma_handle,
+						GFP_KERNEL);
 	if (private->rx_buffer == NULL) {
 		pr_err("%s: no memory for rx buffer\n", __func__);
 		goto rx_buf_fail;
 	}
-	private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
+	private->tx_buffer = dma_alloc_coherent(d, 8192,
+						&private->tx_dma_handle,
+						GFP_KERNEL);
 	if (private->tx_buffer == NULL) {
 		pr_err("%s: no memory for tx buffer\n", __func__);
 		goto tx_buf_fail;
@@ -244,10 +253,13 @@
 
 	private->dev = dev;
 	private->pdev = pdev;
-	private->io_port = pci_resource_start(pdev, 0);
+
+	/* IO range. */
+	private->ioaddr = pci_iomap(pdev, 0, 0);
+	if (!private->ioaddr)
+		goto reg_fail;
+
 	spin_lock_init(&private->lock);
-	dev->irq = pdev->irq;
-	dev->base_addr = private->io_port;
 
 	initialize_card(private);
 	read_mac_address(private);
@@ -256,9 +268,10 @@
 	dev->netdev_ops = &netdev_ops;
 	pci_set_drvdata(pdev, dev);
 
-	if (register_netdev(dev)) {
+	rc = register_netdev(dev);
+	if (rc < 0) {
 		pr_err("%s: netdevice registration failed\n", __func__);
-		goto reg_fail;
+		goto err_unmap;
 	}
 
 	netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
@@ -273,17 +286,23 @@
 	spin_unlock_irqrestore(&private->lock,flags);
 
 	trigger_receive(private);
+out:
+	return rc;
 
-	return 0;
-
+err_unmap:
+	pci_iounmap(pdev, private->ioaddr);
 reg_fail:
-	kfree(private->tx_buffer);
+	pci_set_drvdata(pdev, NULL);
+	dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
 tx_buf_fail:
-	kfree(private->rx_buffer);
+	dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
 rx_buf_fail:
 	free_netdev(dev);
-device_fail:
-	return -ENODEV;
+err_release:
+	pci_release_regions(pdev);
+err_disable:
+	pci_disable_device(pdev);
+	goto out;
 }
 
 
@@ -297,25 +316,28 @@
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct xircom_private *card = netdev_priv(dev);
+	struct device *d = &pdev->dev;
 
-	pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
-	pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
-
-	release_region(dev->base_addr, 128);
 	unregister_netdev(dev);
-	free_netdev(dev);
+	pci_iounmap(pdev, card->ioaddr);
 	pci_set_drvdata(pdev, NULL);
+	dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
+	dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
+	free_netdev(dev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
 }
 
 static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
 {
 	struct net_device *dev = (struct net_device *) dev_instance;
 	struct xircom_private *card = netdev_priv(dev);
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int status;
 	int i;
 
 	spin_lock(&card->lock);
-	status = inl(card->io_port+CSR5);
+	status = xr32(CSR5);
 
 #if defined DEBUG && DEBUG > 1
 	print_binary(status);
@@ -345,7 +367,7 @@
 	/* Clear all remaining interrupts */
 	status |= 0xffffffff; /* FIXME: make this clear only the
 				        real existing bits */
-	outl(status,card->io_port+CSR5);
+	xw32(CSR5, status);
 
 
 	for (i=0;i<NUMDESCRIPTORS;i++)
@@ -423,11 +445,11 @@
 static int xircom_open(struct net_device *dev)
 {
 	struct xircom_private *xp = netdev_priv(dev);
+	const int irq = xp->pdev->irq;
 	int retval;
 
-	netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n",
-		    dev->irq);
-	retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
+	netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", irq);
+	retval = request_irq(irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
 	if (retval)
 		return retval;
 
@@ -459,7 +481,7 @@
 	spin_unlock_irqrestore(&card->lock,flags);
 
 	card->open = 0;
-	free_irq(dev->irq,dev);
+	free_irq(card->pdev->irq, dev);
 
 	return 0;
 
@@ -469,35 +491,39 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void xircom_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	xircom_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct xircom_private *xp = netdev_priv(dev);
+	const int irq = xp->pdev->irq;
+
+	disable_irq(irq);
+	xircom_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
 
 static void initialize_card(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned long flags;
+	u32 val;
 
 	spin_lock_irqsave(&card->lock, flags);
 
 	/* First: reset the card */
-	val = inl(card->io_port + CSR0);
+	val = xr32(CSR0);
 	val |= 0x01;		/* Software reset */
-	outl(val, card->io_port + CSR0);
+	xw32(CSR0, val);
 
 	udelay(100);		/* give the card some time to reset */
 
-	val = inl(card->io_port + CSR0);
+	val = xr32(CSR0);
 	val &= ~0x01;		/* disable Software reset */
-	outl(val, card->io_port + CSR0);
+	xw32(CSR0, val);
 
 
 	val = 0;		/* Value 0x00 is a safe and conservative value
 				   for the PCI configuration settings */
-	outl(val, card->io_port + CSR0);
+	xw32(CSR0, val);
 
 
 	disable_all_interrupts(card);
@@ -515,10 +541,9 @@
 */
 static void trigger_transmit(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = 0;
-	outl(val, card->io_port + CSR1);
+	xw32(CSR1, 0);
 }
 
 /*
@@ -530,10 +555,9 @@
 */
 static void trigger_receive(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = 0;
-	outl(val, card->io_port + CSR2);
+	xw32(CSR2, 0);
 }
 
 /*
@@ -542,6 +566,7 @@
 */
 static void setup_descriptors(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	u32 address;
 	int i;
 
@@ -571,7 +596,7 @@
 	wmb();
 	/* Write the receive descriptor ring address to the card */
 	address = card->rx_dma_handle;
-	outl(address, card->io_port + CSR3);	/* Receive descr list address */
+	xw32(CSR3, address);	/* Receive descr list address */
 
 
 	/* transmit descriptors */
@@ -596,7 +621,7 @@
 	wmb();
 	/* wite the transmit descriptor ring to the card */
 	address = card->tx_dma_handle;
-	outl(address, card->io_port + CSR4);	/* xmit descr list address */
+	xw32(CSR4, address);	/* xmit descr list address */
 }
 
 /*
@@ -605,11 +630,12 @@
 */
 static void remove_descriptors(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
 	val = 0;
-	outl(val, card->io_port + CSR3);	/* Receive descriptor address */
-	outl(val, card->io_port + CSR4);	/* Send descriptor address */
+	xw32(CSR3, val);	/* Receive descriptor address */
+	xw32(CSR4, val);	/* Send descriptor address */
 }
 
 /*
@@ -620,17 +646,17 @@
 */
 static int link_status_changed(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR5);	/* Status register */
-
-	if ((val & (1 << 27)) == 0)		/* no change */
+	val = xr32(CSR5);	/* Status register */
+	if (!(val & (1 << 27)))	/* no change */
 		return 0;
 
 	/* clear the event by writing a 1 to the bit in the
 	   status register. */
 	val = (1 << 27);
-	outl(val, card->io_port + CSR5);
+	xw32(CSR5, val);
 
 	return 1;
 }
@@ -642,11 +668,9 @@
 */
 static int transmit_active(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = inl(card->io_port + CSR5);	/* Status register */
-
-	if ((val & (7 << 20)) == 0)		/* transmitter disabled */
+	if (!(xr32(CSR5) & (7 << 20)))	/* transmitter disabled */
 		return 0;
 
 	return 1;
@@ -658,11 +682,9 @@
 */
 static int receive_active(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = inl(card->io_port + CSR5);	/* Status register */
-
-	if ((val & (7 << 17)) == 0)		/* receiver disabled */
+	if (!(xr32(CSR5) & (7 << 17)))	/* receiver disabled */
 		return 0;
 
 	return 1;
@@ -680,10 +702,11 @@
 */
 static void activate_receiver(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 	int counter;
 
-	val = inl(card->io_port + CSR6);	/* Operation mode */
+	val = xr32(CSR6);	/* Operation mode */
 
 	/* If the "active" bit is set and the receiver is already
 	   active, no need to do the expensive thing */
@@ -692,7 +715,7 @@
 
 
 	val = val & ~2;		/* disable the receiver */
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	counter = 10;
 	while (counter > 0) {
@@ -706,9 +729,9 @@
 	}
 
 	/* enable the receiver */
-	val = inl(card->io_port + CSR6);	/* Operation mode */
-	val = val | 2;				/* enable the receiver */
-	outl(val, card->io_port + CSR6);
+	val = xr32(CSR6);	/* Operation mode */
+	val = val | 2;		/* enable the receiver */
+	xw32(CSR6, val);
 
 	/* now wait for the card to activate again */
 	counter = 10;
@@ -733,12 +756,13 @@
 */
 static void deactivate_receiver(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 	int counter;
 
-	val = inl(card->io_port + CSR6);	/* Operation mode */
-	val = val & ~2;				/* disable the receiver */
-	outl(val, card->io_port + CSR6);
+	val = xr32(CSR6);	/* Operation mode */
+	val = val & ~2;		/* disable the receiver */
+	xw32(CSR6, val);
 
 	counter = 10;
 	while (counter > 0) {
@@ -765,10 +789,11 @@
 */
 static void activate_transmitter(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 	int counter;
 
-	val = inl(card->io_port + CSR6);	/* Operation mode */
+	val = xr32(CSR6);	/* Operation mode */
 
 	/* If the "active" bit is set and the receiver is already
 	   active, no need to do the expensive thing */
@@ -776,7 +801,7 @@
 		return;
 
 	val = val & ~(1 << 13);	/* disable the transmitter */
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	counter = 10;
 	while (counter > 0) {
@@ -791,9 +816,9 @@
 	}
 
 	/* enable the transmitter */
-	val = inl(card->io_port + CSR6);	/* Operation mode */
+	val = xr32(CSR6);	/* Operation mode */
 	val = val | (1 << 13);	/* enable the transmitter */
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	/* now wait for the card to activate again */
 	counter = 10;
@@ -818,12 +843,13 @@
 */
 static void deactivate_transmitter(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 	int counter;
 
-	val = inl(card->io_port + CSR6);	/* Operation mode */
+	val = xr32(CSR6);	/* Operation mode */
 	val = val & ~2;		/* disable the transmitter */
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	counter = 20;
 	while (counter > 0) {
@@ -846,11 +872,12 @@
 */
 static void enable_transmit_interrupt(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR7);	/* Interrupt enable register */
-	val |= 1;				/* enable the transmit interrupt */
-	outl(val, card->io_port + CSR7);
+	val = xr32(CSR7);	/* Interrupt enable register */
+	val |= 1;		/* enable the transmit interrupt */
+	xw32(CSR7, val);
 }
 
 
@@ -861,11 +888,12 @@
 */
 static void enable_receive_interrupt(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR7);	/* Interrupt enable register */
-	val = val | (1 << 6);			/* enable the receive interrupt */
-	outl(val, card->io_port + CSR7);
+	val = xr32(CSR7);	/* Interrupt enable register */
+	val = val | (1 << 6);	/* enable the receive interrupt */
+	xw32(CSR7, val);
 }
 
 /*
@@ -875,11 +903,12 @@
 */
 static void enable_link_interrupt(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR7);	/* Interrupt enable register */
-	val = val | (1 << 27);			/* enable the link status chage interrupt */
-	outl(val, card->io_port + CSR7);
+	val = xr32(CSR7);	/* Interrupt enable register */
+	val = val | (1 << 27);	/* enable the link status chage interrupt */
+	xw32(CSR7, val);
 }
 
 
@@ -891,10 +920,9 @@
 */
 static void disable_all_interrupts(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
 
-	val = 0;				/* disable all interrupts */
-	outl(val, card->io_port + CSR7);
+	xw32(CSR7, 0);
 }
 
 /*
@@ -904,9 +932,10 @@
 */
 static void enable_common_interrupts(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR7);	/* Interrupt enable register */
+	val = xr32(CSR7);	/* Interrupt enable register */
 	val |= (1<<16); /* Normal Interrupt Summary */
 	val |= (1<<15); /* Abnormal Interrupt Summary */
 	val |= (1<<13); /* Fatal bus error */
@@ -915,7 +944,7 @@
 	val |= (1<<5);  /* Transmit Underflow */
 	val |= (1<<2);  /* Transmit Buffer Unavailable */
 	val |= (1<<1);  /* Transmit Process Stopped */
-	outl(val, card->io_port + CSR7);
+	xw32(CSR7, val);
 }
 
 /*
@@ -925,11 +954,12 @@
 */
 static int enable_promisc(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned int val;
 
-	val = inl(card->io_port + CSR6);
+	val = xr32(CSR6);
 	val = val | (1 << 6);
-	outl(val, card->io_port + CSR6);
+	xw32(CSR6, val);
 
 	return 1;
 }
@@ -944,13 +974,16 @@
 */
 static int link_status(struct xircom_private *card)
 {
-	unsigned int val;
+	void __iomem *ioaddr = card->ioaddr;
+	u8 val;
 
-	val = inb(card->io_port + CSR12);
+	val = xr8(CSR12);
 
-	if (!(val&(1<<2)))  /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
+	/* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
+	if (!(val & (1 << 2)))
 		return 10;
-	if (!(val&(1<<1)))  /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
+	/* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
+	if (!(val & (1 << 1)))
 		return 100;
 
 	/* If we get here -> no link at all */
@@ -969,29 +1002,31 @@
  */
 static void read_mac_address(struct xircom_private *card)
 {
-	unsigned char j, tuple, link, data_id, data_count;
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned long flags;
+	u8 link;
 	int i;
 
 	spin_lock_irqsave(&card->lock, flags);
 
-	outl(1 << 12, card->io_port + CSR9);	/* enable boot rom access */
+	xw32(CSR9, 1 << 12);	/* enable boot rom access */
 	for (i = 0x100; i < 0x1f7; i += link + 2) {
-		outl(i, card->io_port + CSR10);
-		tuple = inl(card->io_port + CSR9) & 0xff;
-		outl(i + 1, card->io_port + CSR10);
-		link = inl(card->io_port + CSR9) & 0xff;
-		outl(i + 2, card->io_port + CSR10);
-		data_id = inl(card->io_port + CSR9) & 0xff;
-		outl(i + 3, card->io_port + CSR10);
-		data_count = inl(card->io_port + CSR9) & 0xff;
+		u8 tuple, data_id, data_count;
+
+		xw32(CSR10, i);
+		tuple = xr32(CSR9);
+		xw32(CSR10, i + 1);
+		link = xr32(CSR9);
+		xw32(CSR10, i + 2);
+		data_id = xr32(CSR9);
+		xw32(CSR10, i + 3);
+		data_count = xr32(CSR9);
 		if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
-			/*
-			 * This is it.  We have the data we want.
-			 */
+			int j;
+
 			for (j = 0; j < 6; j++) {
-				outl(i + j + 4, card->io_port + CSR10);
-				card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff;
+				xw32(CSR10, i + j + 4);
+				card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
 			}
 			break;
 		} else if (link == 0) {
@@ -1010,6 +1045,7 @@
  */
 static void transceiver_voodoo(struct xircom_private *card)
 {
+	void __iomem *ioaddr = card->ioaddr;
 	unsigned long flags;
 
 	/* disable all powermanagement */
@@ -1019,14 +1055,14 @@
 
 	spin_lock_irqsave(&card->lock, flags);
 
-	outl(0x0008, card->io_port + CSR15);
-        udelay(25);
-        outl(0xa8050000, card->io_port + CSR15);
-        udelay(25);
-        outl(0xa00f0000, card->io_port + CSR15);
-        udelay(25);
+	xw32(CSR15, 0x0008);
+	udelay(25);
+	xw32(CSR15, 0xa8050000);
+	udelay(25);
+	xw32(CSR15, 0xa00f0000);
+	udelay(25);
 
-        spin_unlock_irqrestore(&card->lock, flags);
+	spin_unlock_irqrestore(&card->lock, flags);
 
 	netif_start_queue(card->dev);
 }
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index b2dc2c8..a059f0c 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -16,6 +16,13 @@
 #include "dl2k.h"
 #include <linux/dma-mapping.h>
 
+#define dw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define dw16(reg, val)	iowrite16(val, ioaddr + (reg))
+#define dw8(reg, val)	iowrite8(val, ioaddr + (reg))
+#define dr32(reg)	ioread32(ioaddr + (reg))
+#define dr16(reg)	ioread16(ioaddr + (reg))
+#define dr8(reg)	ioread8(ioaddr + (reg))
+
 static char version[] __devinitdata =
       KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
 #define MAX_UNITS 8
@@ -49,8 +56,13 @@
 /* Enable the default interrupts */
 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
        UpdateStats | LinkEvent)
-#define EnableInt() \
-writew(DEFAULT_INTR, ioaddr + IntEnable)
+
+static void dl2k_enable_int(struct netdev_private *np)
+{
+	void __iomem *ioaddr = np->ioaddr;
+
+	dw16(IntEnable, DEFAULT_INTR);
+}
 
 static const int max_intrloop = 50;
 static const int multicast_filter_limit = 0x40;
@@ -73,7 +85,7 @@
 static int rio_close (struct net_device *dev);
 static int find_miiphy (struct net_device *dev);
 static int parse_eeprom (struct net_device *dev);
-static int read_eeprom (long ioaddr, int eep_addr);
+static int read_eeprom (struct netdev_private *, int eep_addr);
 static int mii_wait_link (struct net_device *dev, int wait);
 static int mii_set_media (struct net_device *dev);
 static int mii_get_media (struct net_device *dev);
@@ -106,7 +118,7 @@
 	static int card_idx;
 	int chip_idx = ent->driver_data;
 	int err, irq;
-	long ioaddr;
+	void __iomem *ioaddr;
 	static int version_printed;
 	void *ring_space;
 	dma_addr_t ring_dma;
@@ -124,26 +136,29 @@
 		goto err_out_disable;
 
 	pci_set_master (pdev);
+
+	err = -ENOMEM;
+
 	dev = alloc_etherdev (sizeof (*np));
-	if (!dev) {
-		err = -ENOMEM;
+	if (!dev)
 		goto err_out_res;
-	}
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-#ifdef MEM_MAPPING
-	ioaddr = pci_resource_start (pdev, 1);
-	ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE);
-	if (!ioaddr) {
-		err = -ENOMEM;
-		goto err_out_dev;
-	}
-#else
-	ioaddr = pci_resource_start (pdev, 0);
-#endif
-	dev->base_addr = ioaddr;
-	dev->irq = irq;
 	np = netdev_priv(dev);
+
+	/* IO registers range. */
+	ioaddr = pci_iomap(pdev, 0, 0);
+	if (!ioaddr)
+		goto err_out_dev;
+	np->eeprom_addr = ioaddr;
+
+#ifdef MEM_MAPPING
+	/* MM registers range. */
+	ioaddr = pci_iomap(pdev, 1, 0);
+	if (!ioaddr)
+		goto err_out_iounmap;
+#endif
+	np->ioaddr = ioaddr;
 	np->chip_id = chip_idx;
 	np->pdev = pdev;
 	spin_lock_init (&np->tx_lock);
@@ -239,7 +254,7 @@
 		goto err_out_unmap_rx;
 
 	/* Fiber device? */
-	np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0;
+	np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
 	np->link_status = 0;
 	/* Set media and reset PHY */
 	if (np->phy_media) {
@@ -276,22 +291,20 @@
 		printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
 	return 0;
 
-      err_out_unmap_rx:
+err_out_unmap_rx:
 	pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
-      err_out_unmap_tx:
+err_out_unmap_tx:
 	pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
-      err_out_iounmap:
+err_out_iounmap:
 #ifdef MEM_MAPPING
-	iounmap ((void *) ioaddr);
-
-      err_out_dev:
+	pci_iounmap(pdev, np->ioaddr);
 #endif
+	pci_iounmap(pdev, np->eeprom_addr);
+err_out_dev:
 	free_netdev (dev);
-
-      err_out_res:
+err_out_res:
 	pci_release_regions (pdev);
-
-      err_out_disable:
+err_out_disable:
 	pci_disable_device (pdev);
 	return err;
 }
@@ -299,11 +312,9 @@
 static int
 find_miiphy (struct net_device *dev)
 {
+	struct netdev_private *np = netdev_priv(dev);
 	int i, phy_found = 0;
-	struct netdev_private *np;
-	long ioaddr;
 	np = netdev_priv(dev);
-	ioaddr = dev->base_addr;
 	np->phy_addr = 1;
 
 	for (i = 31; i >= 0; i--) {
@@ -323,26 +334,19 @@
 static int
 parse_eeprom (struct net_device *dev)
 {
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	int i, j;
-	long ioaddr = dev->base_addr;
 	u8 sromdata[256];
 	u8 *psib;
 	u32 crc;
 	PSROM_t psrom = (PSROM_t) sromdata;
-	struct netdev_private *np = netdev_priv(dev);
 
 	int cid, next;
 
-#ifdef	MEM_MAPPING
-	ioaddr = pci_resource_start (np->pdev, 0);
-#endif
-	/* Read eeprom */
-	for (i = 0; i < 128; i++) {
-		((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
-	}
-#ifdef	MEM_MAPPING
-	ioaddr = dev->base_addr;
-#endif
+	for (i = 0; i < 128; i++)
+		((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
+
 	if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {	/* D-Link Only */
 		/* Check CRC */
 		crc = ~ether_crc_le (256 - 4, sromdata);
@@ -378,8 +382,7 @@
 			return 0;
 		case 2:	/* Duplex Polarity */
 			np->duplex_polarity = psib[i];
-			writeb (readb (ioaddr + PhyCtrl) | psib[i],
-				ioaddr + PhyCtrl);
+			dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
 			break;
 		case 3:	/* Wake Polarity */
 			np->wake_polarity = psib[i];
@@ -407,59 +410,57 @@
 rio_open (struct net_device *dev)
 {
 	struct netdev_private *np = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = np->ioaddr;
+	const int irq = np->pdev->irq;
 	int i;
 	u16 macctrl;
 
-	i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
+	i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
 	if (i)
 		return i;
 
 	/* Reset all logic functions */
-	writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset,
-		ioaddr + ASICCtrl + 2);
+	dw16(ASICCtrl + 2,
+	     GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
 	mdelay(10);
 
 	/* DebugCtrl bit 4, 5, 9 must set */
-	writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl);
+	dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
 
 	/* Jumbo frame */
 	if (np->jumbo != 0)
-		writew (MAX_JUMBO+14, ioaddr + MaxFrameSize);
+		dw16(MaxFrameSize, MAX_JUMBO+14);
 
 	alloc_list (dev);
 
 	/* Get station address */
 	for (i = 0; i < 6; i++)
-		writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
+		dw8(StationAddr0 + i, dev->dev_addr[i]);
 
 	set_multicast (dev);
 	if (np->coalesce) {
-		writel (np->rx_coalesce | np->rx_timeout << 16,
-			ioaddr + RxDMAIntCtrl);
+		dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
 	}
 	/* Set RIO to poll every N*320nsec. */
-	writeb (0x20, ioaddr + RxDMAPollPeriod);
-	writeb (0xff, ioaddr + TxDMAPollPeriod);
-	writeb (0x30, ioaddr + RxDMABurstThresh);
-	writeb (0x30, ioaddr + RxDMAUrgentThresh);
-	writel (0x0007ffff, ioaddr + RmonStatMask);
+	dw8(RxDMAPollPeriod, 0x20);
+	dw8(TxDMAPollPeriod, 0xff);
+	dw8(RxDMABurstThresh, 0x30);
+	dw8(RxDMAUrgentThresh, 0x30);
+	dw32(RmonStatMask, 0x0007ffff);
 	/* clear statistics */
 	clear_stats (dev);
 
 	/* VLAN supported */
 	if (np->vlan) {
 		/* priority field in RxDMAIntCtrl  */
-		writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
-			ioaddr + RxDMAIntCtrl);
+		dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
 		/* VLANId */
-		writew (np->vlan, ioaddr + VLANId);
+		dw16(VLANId, np->vlan);
 		/* Length/Type should be 0x8100 */
-		writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag);
+		dw32(VLANTag, 0x8100 << 16 | np->vlan);
 		/* Enable AutoVLANuntagging, but disable AutoVLANtagging.
 		   VLAN information tagged by TFC' VID, CFI fields. */
-		writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging,
-			ioaddr + MACCtrl);
+		dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
 	}
 
 	init_timer (&np->timer);
@@ -469,20 +470,18 @@
 	add_timer (&np->timer);
 
 	/* Start Tx/Rx */
-	writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,
-			ioaddr + MACCtrl);
+	dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
 
 	macctrl = 0;
 	macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
 	macctrl |= (np->full_duplex) ? DuplexSelect : 0;
 	macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
 	macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
-	writew(macctrl,	ioaddr + MACCtrl);
+	dw16(MACCtrl, macctrl);
 
 	netif_start_queue (dev);
 
-	/* Enable default interrupts */
-	EnableInt ();
+	dl2k_enable_int(np);
 	return 0;
 }
 
@@ -533,10 +532,11 @@
 static void
 rio_tx_timeout (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 
 	printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
-		dev->name, readl (ioaddr + TxStatus));
+		dev->name, dr32(TxStatus));
 	rio_free_tx(dev, 0);
 	dev->if_port = 0;
 	dev->trans_start = jiffies; /* prevent tx timeout */
@@ -547,6 +547,7 @@
 alloc_list (struct net_device *dev)
 {
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	int i;
 
 	np->cur_rx = np->cur_tx = 0;
@@ -594,24 +595,23 @@
 	}
 
 	/* Set RFDListPtr */
-	writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
-	writel (0, dev->base_addr + RFDListPtr1);
+	dw32(RFDListPtr0, np->rx_ring_dma);
+	dw32(RFDListPtr1, 0);
 }
 
 static netdev_tx_t
 start_xmit (struct sk_buff *skb, struct net_device *dev)
 {
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	struct netdev_desc *txdesc;
 	unsigned entry;
-	u32 ioaddr;
 	u64 tfc_vlan_tag = 0;
 
 	if (np->link_status == 0) {	/* Link Down */
 		dev_kfree_skb(skb);
 		return NETDEV_TX_OK;
 	}
-	ioaddr = dev->base_addr;
 	entry = np->cur_tx % TX_RING_SIZE;
 	np->tx_skbuff[entry] = skb;
 	txdesc = &np->tx_ring[entry];
@@ -646,9 +646,9 @@
 					      (1 << FragCountShift));
 
 	/* TxDMAPollNow */
-	writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
+	dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
 	/* Schedule ISR */
-	writel(10000, ioaddr + CountDown);
+	dw32(CountDown, 10000);
 	np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
 	if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
 			< TX_QUEUE_LEN - 1 && np->speed != 10) {
@@ -658,10 +658,10 @@
 	}
 
 	/* The first TFDListPtr */
-	if (readl (dev->base_addr + TFDListPtr0) == 0) {
-		writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
-			dev->base_addr + TFDListPtr0);
-		writel (0, dev->base_addr + TFDListPtr1);
+	if (!dr32(TFDListPtr0)) {
+		dw32(TFDListPtr0, np->tx_ring_dma +
+		     entry * sizeof (struct netdev_desc));
+		dw32(TFDListPtr1, 0);
 	}
 
 	return NETDEV_TX_OK;
@@ -671,17 +671,15 @@
 rio_interrupt (int irq, void *dev_instance)
 {
 	struct net_device *dev = dev_instance;
-	struct netdev_private *np;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	unsigned int_status;
-	long ioaddr;
 	int cnt = max_intrloop;
 	int handled = 0;
 
-	ioaddr = dev->base_addr;
-	np = netdev_priv(dev);
 	while (1) {
-		int_status = readw (ioaddr + IntStatus);
-		writew (int_status, ioaddr + IntStatus);
+		int_status = dr16(IntStatus);
+		dw16(IntStatus, int_status);
 		int_status &= DEFAULT_INTR;
 		if (int_status == 0 || --cnt < 0)
 			break;
@@ -692,7 +690,7 @@
 		/* TxDMAComplete interrupt */
 		if ((int_status & (TxDMAComplete|IntRequested))) {
 			int tx_status;
-			tx_status = readl (ioaddr + TxStatus);
+			tx_status = dr32(TxStatus);
 			if (tx_status & 0x01)
 				tx_error (dev, tx_status);
 			/* Free used tx skbuffs */
@@ -705,7 +703,7 @@
 			rio_error (dev, int_status);
 	}
 	if (np->cur_tx != np->old_tx)
-		writel (100, ioaddr + CountDown);
+		dw32(CountDown, 100);
 	return IRQ_RETVAL(handled);
 }
 
@@ -765,13 +763,11 @@
 static void
 tx_error (struct net_device *dev, int tx_status)
 {
-	struct netdev_private *np;
-	long ioaddr = dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	int frame_id;
 	int i;
 
-	np = netdev_priv(dev);
-
 	frame_id = (tx_status & 0xffff0000);
 	printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
 		dev->name, tx_status, frame_id);
@@ -779,23 +775,21 @@
 	/* Ttransmit Underrun */
 	if (tx_status & 0x10) {
 		np->stats.tx_fifo_errors++;
-		writew (readw (ioaddr + TxStartThresh) + 0x10,
-			ioaddr + TxStartThresh);
+		dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
 		/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
-		writew (TxReset | DMAReset | FIFOReset | NetworkReset,
-			ioaddr + ASICCtrl + 2);
+		dw16(ASICCtrl + 2,
+		     TxReset | DMAReset | FIFOReset | NetworkReset);
 		/* Wait for ResetBusy bit clear */
 		for (i = 50; i > 0; i--) {
-			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+			if (!(dr16(ASICCtrl + 2) & ResetBusy))
 				break;
 			mdelay (1);
 		}
 		rio_free_tx (dev, 1);
 		/* Reset TFDListPtr */
-		writel (np->tx_ring_dma +
-			np->old_tx * sizeof (struct netdev_desc),
-			dev->base_addr + TFDListPtr0);
-		writel (0, dev->base_addr + TFDListPtr1);
+		dw32(TFDListPtr0, np->tx_ring_dma +
+		     np->old_tx * sizeof (struct netdev_desc));
+		dw32(TFDListPtr1, 0);
 
 		/* Let TxStartThresh stay default value */
 	}
@@ -803,10 +797,10 @@
 	if (tx_status & 0x04) {
 		np->stats.tx_fifo_errors++;
 		/* TxReset and clear FIFO */
-		writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);
+		dw16(ASICCtrl + 2, TxReset | FIFOReset);
 		/* Wait reset done */
 		for (i = 50; i > 0; i--) {
-			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
+			if (!(dr16(ASICCtrl + 2) & ResetBusy))
 				break;
 			mdelay (1);
 		}
@@ -821,7 +815,7 @@
 		np->stats.collisions++;
 #endif
 	/* Restart the Tx */
-	writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);
+	dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
 }
 
 static int
@@ -931,8 +925,8 @@
 static void
 rio_error (struct net_device *dev, int int_status)
 {
-	long ioaddr = dev->base_addr;
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	u16 macctrl;
 
 	/* Link change event */
@@ -954,7 +948,7 @@
 				TxFlowControlEnable : 0;
 			macctrl |= (np->rx_flow) ?
 				RxFlowControlEnable : 0;
-			writew(macctrl,	ioaddr + MACCtrl);
+			dw16(MACCtrl, macctrl);
 			np->link_status = 1;
 			netif_carrier_on(dev);
 		} else {
@@ -974,7 +968,7 @@
 	if (int_status & HostError) {
 		printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
 			dev->name, int_status);
-		writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);
+		dw16(ASICCtrl + 2, GlobalReset | HostReset);
 		mdelay (500);
 	}
 }
@@ -982,8 +976,8 @@
 static struct net_device_stats *
 get_stats (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 #ifdef MEM_MAPPING
 	int i;
 #endif
@@ -992,106 +986,107 @@
 	/* All statistics registers need to be acknowledged,
 	   else statistic overflow could cause problems */
 
-	np->stats.rx_packets += readl (ioaddr + FramesRcvOk);
-	np->stats.tx_packets += readl (ioaddr + FramesXmtOk);
-	np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);
-	np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);
+	np->stats.rx_packets += dr32(FramesRcvOk);
+	np->stats.tx_packets += dr32(FramesXmtOk);
+	np->stats.rx_bytes += dr32(OctetRcvOk);
+	np->stats.tx_bytes += dr32(OctetXmtOk);
 
-	np->stats.multicast = readl (ioaddr + McstFramesRcvdOk);
-	np->stats.collisions += readl (ioaddr + SingleColFrames)
-			     +  readl (ioaddr + MultiColFrames);
+	np->stats.multicast = dr32(McstFramesRcvdOk);
+	np->stats.collisions += dr32(SingleColFrames)
+			     +  dr32(MultiColFrames);
 
 	/* detailed tx errors */
-	stat_reg = readw (ioaddr + FramesAbortXSColls);
+	stat_reg = dr16(FramesAbortXSColls);
 	np->stats.tx_aborted_errors += stat_reg;
 	np->stats.tx_errors += stat_reg;
 
-	stat_reg = readw (ioaddr + CarrierSenseErrors);
+	stat_reg = dr16(CarrierSenseErrors);
 	np->stats.tx_carrier_errors += stat_reg;
 	np->stats.tx_errors += stat_reg;
 
 	/* Clear all other statistic register. */
-	readl (ioaddr + McstOctetXmtOk);
-	readw (ioaddr + BcstFramesXmtdOk);
-	readl (ioaddr + McstFramesXmtdOk);
-	readw (ioaddr + BcstFramesRcvdOk);
-	readw (ioaddr + MacControlFramesRcvd);
-	readw (ioaddr + FrameTooLongErrors);
-	readw (ioaddr + InRangeLengthErrors);
-	readw (ioaddr + FramesCheckSeqErrors);
-	readw (ioaddr + FramesLostRxErrors);
-	readl (ioaddr + McstOctetXmtOk);
-	readl (ioaddr + BcstOctetXmtOk);
-	readl (ioaddr + McstFramesXmtdOk);
-	readl (ioaddr + FramesWDeferredXmt);
-	readl (ioaddr + LateCollisions);
-	readw (ioaddr + BcstFramesXmtdOk);
-	readw (ioaddr + MacControlFramesXmtd);
-	readw (ioaddr + FramesWEXDeferal);
+	dr32(McstOctetXmtOk);
+	dr16(BcstFramesXmtdOk);
+	dr32(McstFramesXmtdOk);
+	dr16(BcstFramesRcvdOk);
+	dr16(MacControlFramesRcvd);
+	dr16(FrameTooLongErrors);
+	dr16(InRangeLengthErrors);
+	dr16(FramesCheckSeqErrors);
+	dr16(FramesLostRxErrors);
+	dr32(McstOctetXmtOk);
+	dr32(BcstOctetXmtOk);
+	dr32(McstFramesXmtdOk);
+	dr32(FramesWDeferredXmt);
+	dr32(LateCollisions);
+	dr16(BcstFramesXmtdOk);
+	dr16(MacControlFramesXmtd);
+	dr16(FramesWEXDeferal);
 
 #ifdef MEM_MAPPING
 	for (i = 0x100; i <= 0x150; i += 4)
-		readl (ioaddr + i);
+		dr32(i);
 #endif
-	readw (ioaddr + TxJumboFrames);
-	readw (ioaddr + RxJumboFrames);
-	readw (ioaddr + TCPCheckSumErrors);
-	readw (ioaddr + UDPCheckSumErrors);
-	readw (ioaddr + IPCheckSumErrors);
+	dr16(TxJumboFrames);
+	dr16(RxJumboFrames);
+	dr16(TCPCheckSumErrors);
+	dr16(UDPCheckSumErrors);
+	dr16(IPCheckSumErrors);
 	return &np->stats;
 }
 
 static int
 clear_stats (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 #ifdef MEM_MAPPING
 	int i;
 #endif
 
 	/* All statistics registers need to be acknowledged,
 	   else statistic overflow could cause problems */
-	readl (ioaddr + FramesRcvOk);
-	readl (ioaddr + FramesXmtOk);
-	readl (ioaddr + OctetRcvOk);
-	readl (ioaddr + OctetXmtOk);
+	dr32(FramesRcvOk);
+	dr32(FramesXmtOk);
+	dr32(OctetRcvOk);
+	dr32(OctetXmtOk);
 
-	readl (ioaddr + McstFramesRcvdOk);
-	readl (ioaddr + SingleColFrames);
-	readl (ioaddr + MultiColFrames);
-	readl (ioaddr + LateCollisions);
+	dr32(McstFramesRcvdOk);
+	dr32(SingleColFrames);
+	dr32(MultiColFrames);
+	dr32(LateCollisions);
 	/* detailed rx errors */
-	readw (ioaddr + FrameTooLongErrors);
-	readw (ioaddr + InRangeLengthErrors);
-	readw (ioaddr + FramesCheckSeqErrors);
-	readw (ioaddr + FramesLostRxErrors);
+	dr16(FrameTooLongErrors);
+	dr16(InRangeLengthErrors);
+	dr16(FramesCheckSeqErrors);
+	dr16(FramesLostRxErrors);
 
 	/* detailed tx errors */
-	readw (ioaddr + FramesAbortXSColls);
-	readw (ioaddr + CarrierSenseErrors);
+	dr16(FramesAbortXSColls);
+	dr16(CarrierSenseErrors);
 
 	/* Clear all other statistic register. */
-	readl (ioaddr + McstOctetXmtOk);
-	readw (ioaddr + BcstFramesXmtdOk);
-	readl (ioaddr + McstFramesXmtdOk);
-	readw (ioaddr + BcstFramesRcvdOk);
-	readw (ioaddr + MacControlFramesRcvd);
-	readl (ioaddr + McstOctetXmtOk);
-	readl (ioaddr + BcstOctetXmtOk);
-	readl (ioaddr + McstFramesXmtdOk);
-	readl (ioaddr + FramesWDeferredXmt);
-	readw (ioaddr + BcstFramesXmtdOk);
-	readw (ioaddr + MacControlFramesXmtd);
-	readw (ioaddr + FramesWEXDeferal);
+	dr32(McstOctetXmtOk);
+	dr16(BcstFramesXmtdOk);
+	dr32(McstFramesXmtdOk);
+	dr16(BcstFramesRcvdOk);
+	dr16(MacControlFramesRcvd);
+	dr32(McstOctetXmtOk);
+	dr32(BcstOctetXmtOk);
+	dr32(McstFramesXmtdOk);
+	dr32(FramesWDeferredXmt);
+	dr16(BcstFramesXmtdOk);
+	dr16(MacControlFramesXmtd);
+	dr16(FramesWEXDeferal);
 #ifdef MEM_MAPPING
 	for (i = 0x100; i <= 0x150; i += 4)
-		readl (ioaddr + i);
+		dr32(i);
 #endif
-	readw (ioaddr + TxJumboFrames);
-	readw (ioaddr + RxJumboFrames);
-	readw (ioaddr + TCPCheckSumErrors);
-	readw (ioaddr + UDPCheckSumErrors);
-	readw (ioaddr + IPCheckSumErrors);
+	dr16(TxJumboFrames);
+	dr16(RxJumboFrames);
+	dr16(TCPCheckSumErrors);
+	dr16(UDPCheckSumErrors);
+	dr16(IPCheckSumErrors);
 	return 0;
 }
 
@@ -1114,10 +1109,10 @@
 static void
 set_multicast (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	u32 hash_table[2];
 	u16 rx_mode = 0;
-	struct netdev_private *np = netdev_priv(dev);
 
 	hash_table[0] = hash_table[1] = 0;
 	/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
@@ -1153,9 +1148,9 @@
 		rx_mode |= ReceiveVLANMatch;
 	}
 
-	writel (hash_table[0], ioaddr + HashTable0);
-	writel (hash_table[1], ioaddr + HashTable1);
-	writew (rx_mode, ioaddr + ReceiveMode);
+	dw32(HashTable0, hash_table[0]);
+	dw32(HashTable1, hash_table[1]);
+	dw16(ReceiveMode, rx_mode);
 }
 
 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1259,55 +1254,21 @@
 {
 	int phy_addr;
 	struct netdev_private *np = netdev_priv(dev);
-	struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
-
-	struct netdev_desc *desc;
-	int i;
+	struct mii_ioctl_data *miidata = if_mii(rq);
 
 	phy_addr = np->phy_addr;
 	switch (cmd) {
-	case SIOCDEVPRIVATE:
+	case SIOCGMIIPHY:
+		miidata->phy_id = phy_addr;
 		break;
-
-	case SIOCDEVPRIVATE + 1:
-		miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
+	case SIOCGMIIREG:
+		miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
 		break;
-	case SIOCDEVPRIVATE + 2:
-		mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
+	case SIOCSMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
 		break;
-	case SIOCDEVPRIVATE + 3:
-		break;
-	case SIOCDEVPRIVATE + 4:
-		break;
-	case SIOCDEVPRIVATE + 5:
-		netif_stop_queue (dev);
-		break;
-	case SIOCDEVPRIVATE + 6:
-		netif_wake_queue (dev);
-		break;
-	case SIOCDEVPRIVATE + 7:
-		printk
-		    ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
-		     netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
-		     np->old_rx);
-		break;
-	case SIOCDEVPRIVATE + 8:
-		printk("TX ring:\n");
-		for (i = 0; i < TX_RING_SIZE; i++) {
-			desc = &np->tx_ring[i];
-			printk
-			    ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
-			     i,
-			     (u32) (np->tx_ring_dma + i * sizeof (*desc)),
-			     (u32)le64_to_cpu(desc->next_desc),
-			     (u32)le64_to_cpu(desc->status),
-			     (u32)(le64_to_cpu(desc->fraginfo) >> 32),
-			     (u32)le64_to_cpu(desc->fraginfo));
-			printk ("\n");
-		}
-		printk ("\n");
-		break;
-
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -1318,15 +1279,15 @@
 #define EEP_BUSY 0x8000
 /* Read the EEPROM word */
 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */
-static int
-read_eeprom (long ioaddr, int eep_addr)
+static int read_eeprom(struct netdev_private *np, int eep_addr)
 {
+	void __iomem *ioaddr = np->eeprom_addr;
 	int i = 1000;
-	outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl);
+
+	dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
 	while (i-- > 0) {
-		if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) {
-			return inw (ioaddr + EepromData);
-		}
+		if (!(dr16(EepromCtrl) & EEP_BUSY))
+			return dr16(EepromData);
 	}
 	return 0;
 }
@@ -1336,38 +1297,40 @@
 	MII_DUPLEX = 0x08,
 };
 
-#define mii_delay() readb(ioaddr)
+#define mii_delay() dr8(PhyCtrl)
 static void
 mii_sendbit (struct net_device *dev, u32 data)
 {
-	long ioaddr = dev->base_addr + PhyCtrl;
-	data = (data) ? MII_DATA1 : 0;
-	data |= MII_WRITE;
-	data |= (readb (ioaddr) & 0xf8) | MII_WRITE;
-	writeb (data, ioaddr);
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
+
+	data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
+	dw8(PhyCtrl, data);
 	mii_delay ();
-	writeb (data | MII_CLK, ioaddr);
+	dw8(PhyCtrl, data | MII_CLK);
 	mii_delay ();
 }
 
 static int
 mii_getbit (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr + PhyCtrl;
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
 	u8 data;
 
-	data = (readb (ioaddr) & 0xf8) | MII_READ;
-	writeb (data, ioaddr);
+	data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
+	dw8(PhyCtrl, data);
 	mii_delay ();
-	writeb (data | MII_CLK, ioaddr);
+	dw8(PhyCtrl, data | MII_CLK);
 	mii_delay ();
-	return ((readb (ioaddr) >> 1) & 1);
+	return (dr8(PhyCtrl) >> 1) & 1;
 }
 
 static void
 mii_send_bits (struct net_device *dev, u32 data, int len)
 {
 	int i;
+
 	for (i = len - 1; i >= 0; i--) {
 		mii_sendbit (dev, data & (1 << i));
 	}
@@ -1721,28 +1684,29 @@
 static int
 rio_close (struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
+
+	struct pci_dev *pdev = np->pdev;
 	struct sk_buff *skb;
 	int i;
 
 	netif_stop_queue (dev);
 
 	/* Disable interrupts */
-	writew (0, ioaddr + IntEnable);
+	dw16(IntEnable, 0);
 
 	/* Stop Tx and Rx logics */
-	writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl);
+	dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
 
-	free_irq (dev->irq, dev);
+	free_irq(pdev->irq, dev);
 	del_timer_sync (&np->timer);
 
 	/* Free all the skbuffs in the queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		skb = np->rx_skbuff[i];
 		if (skb) {
-			pci_unmap_single(np->pdev,
-					 desc_to_dma(&np->rx_ring[i]),
+			pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
 					 skb->len, PCI_DMA_FROMDEVICE);
 			dev_kfree_skb (skb);
 			np->rx_skbuff[i] = NULL;
@@ -1753,8 +1717,7 @@
 	for (i = 0; i < TX_RING_SIZE; i++) {
 		skb = np->tx_skbuff[i];
 		if (skb) {
-			pci_unmap_single(np->pdev,
-					 desc_to_dma(&np->tx_ring[i]),
+			pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
 					 skb->len, PCI_DMA_TODEVICE);
 			dev_kfree_skb (skb);
 			np->tx_skbuff[i] = NULL;
@@ -1778,8 +1741,9 @@
 		pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
 				     np->tx_ring_dma);
 #ifdef MEM_MAPPING
-		iounmap ((char *) (dev->base_addr));
+		pci_iounmap(pdev, np->ioaddr);
 #endif
+		pci_iounmap(pdev, np->eeprom_addr);
 		free_netdev (dev);
 		pci_release_regions (pdev);
 		pci_disable_device (pdev);
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba0adca..3699565 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -42,23 +42,6 @@
 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
 
-/* This driver was written to use PCI memory space, however x86-oriented
-   hardware often uses I/O space accesses. */
-#ifndef MEM_MAPPING
-#undef readb
-#undef readw
-#undef readl
-#undef writeb
-#undef writew
-#undef writel
-#define readb inb
-#define readw inw
-#define readl inl
-#define writeb outb
-#define writew outw
-#define writel outl
-#endif
-
 /* Offsets to the device registers.
    Unlike software-only systems, device drivers interact with complex hardware.
    It's not useful to define symbolic names for every register bit in the
@@ -365,13 +348,6 @@
 	char *data;
 };
 
-struct mii_data {
-	__u16 reserved;
-	__u16 reg_num;
-	__u16 in_value;
-	__u16 out_value;
-};
-
 /* The Rx and Tx buffer descriptors. */
 struct netdev_desc {
 	__le64 next_desc;
@@ -391,6 +367,8 @@
 	dma_addr_t tx_ring_dma;
 	dma_addr_t rx_ring_dma;
 	struct pci_dev *pdev;
+	void __iomem *ioaddr;
+	void __iomem *eeprom_addr;
 	spinlock_t tx_lock;
 	spinlock_t rx_lock;
 	struct net_device_stats stats;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d783f4f..d7bb52a 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -522,9 +522,6 @@
 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
-
 	np = netdev_priv(dev);
 	np->base = ioaddr;
 	np->pci_dev = pdev;
@@ -828,18 +825,19 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->base;
+	const int irq = np->pci_dev->irq;
 	unsigned long flags;
 	int i;
 
 	/* Do we need to reset the chip??? */
 
-	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i)
 		return i;
 
 	if (netif_msg_ifup(np))
-		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-			   dev->name, dev->irq);
+		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
+
 	init_ring(dev);
 
 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
@@ -1814,7 +1812,7 @@
 	}
 #endif /* __i386__ debugging only */
 
-	free_irq(dev->irq, dev);
+	free_irq(np->pci_dev->irq, dev);
 
 	del_timer_sync(&np->timer);
 
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b276469..290b26f 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -815,6 +815,7 @@
 	.set_settings		= dnet_set_settings,
 	.get_drvinfo		= dnet_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops dnet_netdev_ops = {
diff --git a/drivers/net/ethernet/emulex/benet/Makefile b/drivers/net/ethernet/emulex/benet/Makefile
index a60cd80..1a91b27 100644
--- a/drivers/net/ethernet/emulex/benet/Makefile
+++ b/drivers/net/ethernet/emulex/benet/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_BE2NET) += be2net.o
 
-be2net-y :=  be_main.o be_cmds.o be_ethtool.o
+be2net-y :=  be_main.o be_cmds.o be_ethtool.o be_roce.o
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 9576ac0..c5c4c0e 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -32,8 +32,9 @@
 #include <linux/u64_stats_sync.h>
 
 #include "be_hw.h"
+#include "be_roce.h"
 
-#define DRV_VER			"4.2.116u"
+#define DRV_VER			"4.2.220u"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
 #define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC"
@@ -102,7 +103,8 @@
 #define MAX_RX_QS		(MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
 
 #define MAX_TX_QS		8
-#define MAX_MSIX_VECTORS	MAX_RSS_QS
+#define MAX_ROCE_EQS		5
+#define MAX_MSIX_VECTORS	(MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */
 #define BE_TX_BUDGET		256
 #define BE_NAPI_WEIGHT		64
 #define MAX_RX_POST		BE_NAPI_WEIGHT /* Frags posted at a time */
@@ -162,6 +164,11 @@
 	index_inc(&q->head, q->len);
 }
 
+static inline void index_dec(u16 *index, u16 limit)
+{
+	*index = MODULO((*index - 1), limit);
+}
+
 static inline void queue_tail_inc(struct be_queue_info *q)
 {
 	index_inc(&q->tail, q->len);
@@ -308,11 +315,33 @@
 	u32 tx_rate;
 };
 
+enum vf_state {
+	ENABLED = 0,
+	ASSIGNED = 1
+};
+
 #define BE_FLAGS_LINK_STATUS_INIT		1
 #define BE_FLAGS_WORKER_SCHEDULED		(1 << 3)
 #define BE_UC_PMAC_COUNT		30
 #define BE_VF_UC_PMAC_COUNT		2
 
+struct phy_info {
+	u8 transceiver;
+	u8 autoneg;
+	u8 fc_autoneg;
+	u8 port_type;
+	u16 phy_type;
+	u16 interface_type;
+	u32 misc_params;
+	u16 auto_speeds_supported;
+	u16 fixed_speeds_supported;
+	int link_speed;
+	int forced_port_speed;
+	u32 dac_cable_len;
+	u32 advertising;
+	u32 supported;
+};
+
 struct be_adapter {
 	struct pci_dev *pdev;
 	struct net_device *netdev;
@@ -377,29 +406,41 @@
 	u32 rx_fc;		/* Rx flow control */
 	u32 tx_fc;		/* Tx flow control */
 	bool stats_cmd_sent;
-	int link_speed;
-	u8 port_type;
-	u8 transceiver;
-	u8 autoneg;
 	u8 generation;		/* BladeEngine ASIC generation */
+	u32 if_type;
+	struct {
+		u8 __iomem *base;	/* Door Bell */
+		u32 size;
+		u32 total_size;
+		u64 io_addr;
+	} roce_db;
+	u32 num_msix_roce_vec;
+	struct ocrdma_dev *ocrdma_dev;
+	struct list_head entry;
+
 	u32 flash_status;
 	struct completion flash_compl;
 
-	u32 num_vfs;
-	u8 is_virtfn;
+	u32 num_vfs;		/* Number of VFs provisioned by PF driver */
+	u32 dev_num_vfs;	/* Number of VFs supported by HW */
+	u8 virtfn;
 	struct be_vf_cfg *vf_cfg;
 	bool be3_native;
 	u32 sli_family;
 	u8 hba_port_num;
 	u16 pvid;
+	struct phy_info phy;
 	u8 wol_cap;
 	bool wol;
 	u32 max_pmac_cnt;	/* Max secondary UC MACs programmable */
 	u32 uc_macs;		/* Count of secondary UC MAC programmed */
+	u32 msg_enable;
 };
 
-#define be_physfn(adapter) (!adapter->is_virtfn)
+#define be_physfn(adapter)		(!adapter->virtfn)
 #define	sriov_enabled(adapter)		(adapter->num_vfs > 0)
+#define	sriov_want(adapter)		(adapter->dev_num_vfs && num_vfs && \
+					 be_physfn(adapter))
 #define for_all_vfs(adapter, vf_cfg, i)					\
 	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
 		i++, vf_cfg++)
@@ -413,6 +454,10 @@
 #define lancer_chip(adapter)	((adapter->pdev->device == OC_DEVICE_ID3) || \
 				 (adapter->pdev->device == OC_DEVICE_ID4))
 
+#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
+				adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
+				(adapter->function_mode & RDMA_ENABLED))
+
 extern const struct ethtool_ops be_ethtool_ops;
 
 #define msix_enabled(adapter)		(adapter->num_msix_vec > 0)
@@ -528,14 +573,6 @@
 	return val;
 }
 
-static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
-{
-	u32 sli_intf;
-
-	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
-	adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
-}
-
 static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
 {
 	u32 addr;
@@ -577,10 +614,31 @@
 	}
 }
 
+static inline bool be_type_2_3(struct be_adapter *adapter)
+{
+	return (adapter->if_type == SLI_INTF_TYPE_2 ||
+		adapter->if_type == SLI_INTF_TYPE_3) ? true : false;
+}
+
 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
 		u16 num_popped);
 extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
 extern void be_parse_stats(struct be_adapter *adapter);
 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
 extern bool be_is_wol_supported(struct be_adapter *adapter);
+extern bool be_pause_supported(struct be_adapter *adapter);
+extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+
+/*
+ * internal function to initialize-cleanup roce device.
+ */
+extern void be_roce_dev_add(struct be_adapter *);
+extern void be_roce_dev_remove(struct be_adapter *);
+
+/*
+ * internal function to open-close roce device during ifup-ifdown.
+ */
+extern void be_roce_dev_open(struct be_adapter *);
+extern void be_roce_dev_close(struct be_adapter *);
+
 #endif				/* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 67b030d..8d06ea3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -15,6 +15,7 @@
  * Costa Mesa, CA 92626
  */
 
+#include <linux/module.h>
 #include "be.h"
 #include "be_cmds.h"
 
@@ -61,10 +62,21 @@
 	compl->flags = 0;
 }
 
+static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
+{
+	unsigned long addr;
+
+	addr = tag1;
+	addr = ((addr << 16) << 16) | tag0;
+	return (void *)addr;
+}
+
 static int be_mcc_compl_process(struct be_adapter *adapter,
-	struct be_mcc_compl *compl)
+				struct be_mcc_compl *compl)
 {
 	u16 compl_status, extd_status;
+	struct be_cmd_resp_hdr *resp_hdr;
+	u8 opcode = 0, subsystem = 0;
 
 	/* Just swap the status to host endian; mcc tag is opaquely copied
 	 * from mcc_wrb */
@@ -73,32 +85,36 @@
 	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
 				CQE_STATUS_COMPL_MASK;
 
-	if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
-		(compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
-		(compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
+	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+
+	if (resp_hdr) {
+		opcode = resp_hdr->opcode;
+		subsystem = resp_hdr->subsystem;
+	}
+
+	if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
+	     (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
+	    (subsystem == CMD_SUBSYSTEM_COMMON)) {
 		adapter->flash_status = compl_status;
 		complete(&adapter->flash_compl);
 	}
 
 	if (compl_status == MCC_STATUS_SUCCESS) {
-		if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
-			 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
-			(compl->tag1 == CMD_SUBSYSTEM_ETH)) {
+		if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
+		     (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
+		    (subsystem == CMD_SUBSYSTEM_ETH)) {
 			be_parse_stats(adapter);
 			adapter->stats_cmd_sent = false;
 		}
-		if (compl->tag0 ==
-				OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
-			struct be_mcc_wrb *mcc_wrb =
-				queue_index_node(&adapter->mcc_obj.q,
-						compl->tag1);
+		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
+		    subsystem == CMD_SUBSYSTEM_COMMON) {
 			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
-				embedded_payload(mcc_wrb);
+				(void *)resp_hdr;
 			adapter->drv_stats.be_on_die_temperature =
 				resp->on_die_temperature;
 		}
 	} else {
-		if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
+		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
 			be_get_temp_freq = 0;
 
 		if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
@@ -108,13 +124,13 @@
 		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
 			dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
 				"permitted to execute this cmd (opcode %d)\n",
-				compl->tag0);
+				opcode);
 		} else {
 			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
 					CQE_STATUS_EXTD_MASK;
 			dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
 				"status %d, extd-status %d\n",
-				compl->tag0, compl_status, extd_status);
+				opcode, compl_status, extd_status);
 		}
 	}
 done:
@@ -126,7 +142,7 @@
 		struct be_async_event_link_state *evt)
 {
 	/* When link status changes, link speed must be re-queried from FW */
-	adapter->link_speed = -1;
+	adapter->phy.link_speed = -1;
 
 	/* For the initial link status do not rely on the ASYNC event as
 	 * it may not be received in some cases.
@@ -153,7 +169,7 @@
 {
 	if (evt->physical_port == adapter->port_num) {
 		/* qos_link_speed is in units of 10 Mbps */
-		adapter->link_speed = evt->qos_link_speed * 10;
+		adapter->phy.link_speed = evt->qos_link_speed * 10;
 	}
 }
 
@@ -286,7 +302,7 @@
 	if (i == mcc_timeout) {
 		dev_err(&adapter->pdev->dev, "FW not responding\n");
 		adapter->fw_timeout = true;
-		return -1;
+		return -EIO;
 	}
 	return status;
 }
@@ -294,8 +310,26 @@
 /* Notify MCC requests and wait for completion */
 static int be_mcc_notify_wait(struct be_adapter *adapter)
 {
+	int status;
+	struct be_mcc_wrb *wrb;
+	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+	u16 index = mcc_obj->q.head;
+	struct be_cmd_resp_hdr *resp;
+
+	index_dec(&index, mcc_obj->q.len);
+	wrb = queue_index_node(&mcc_obj->q, index);
+
+	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
+
 	be_mcc_notify(adapter);
-	return be_mcc_wait_compl(adapter);
+
+	status = be_mcc_wait_compl(adapter);
+	if (status == -EIO)
+		goto out;
+
+	status = resp->status;
+out:
+	return status;
 }
 
 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
@@ -435,14 +469,17 @@
 				struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
 {
 	struct be_sge *sge;
+	unsigned long addr = (unsigned long)req_hdr;
+	u64 req_addr = addr;
 
 	req_hdr->opcode = opcode;
 	req_hdr->subsystem = subsystem;
 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
 	req_hdr->version = 0;
 
-	wrb->tag0 = opcode;
-	wrb->tag1 = subsystem;
+	wrb->tag0 = req_addr & 0xFFFFFFFF;
+	wrb->tag1 = upper_32_bits(req_addr);
+
 	wrb->payload_length = cmd_len;
 	if (mem) {
 		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -1221,7 +1258,7 @@
 			OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
 			nonemb_cmd);
 
-	req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
+	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
 	req->cmd_params.params.reset_stats = 0;
 
 	be_mcc_notify(adapter);
@@ -1283,13 +1320,10 @@
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_get_cntl_addnl_attribs *req;
-	u16 mccq_index;
 	int status;
 
 	spin_lock_bh(&adapter->mcc_lock);
 
-	mccq_index = adapter->mcc_obj.q.head;
-
 	wrb = wrb_from_mccq(adapter);
 	if (!wrb) {
 		status = -EBUSY;
@@ -1301,8 +1335,6 @@
 		OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
 		wrb, NULL);
 
-	wrb->tag1 = mccq_index;
-
 	be_mcc_notify(adapter);
 
 err:
@@ -1824,18 +1856,16 @@
 	spin_unlock_bh(&adapter->mcc_lock);
 
 	if (!wait_for_completion_timeout(&adapter->flash_compl,
-			msecs_to_jiffies(12000)))
+					 msecs_to_jiffies(30000)))
 		status = -1;
 	else
 		status = adapter->flash_status;
 
 	resp = embedded_payload(wrb);
-	if (!status) {
+	if (!status)
 		*data_written = le32_to_cpu(resp->actual_write_len);
-	} else {
+	else
 		*addn_status = resp->additional_status;
-		status = resp->status;
-	}
 
 	return status;
 
@@ -1950,7 +1980,7 @@
 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
 
-	req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
+	req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
 	req->params.offset = cpu_to_le32(offset);
 	req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2136,8 +2166,7 @@
 	return status;
 }
 
-int be_cmd_get_phy_info(struct be_adapter *adapter,
-				struct be_phy_info *phy_info)
+int be_cmd_get_phy_info(struct be_adapter *adapter)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_get_phy_info *req;
@@ -2170,9 +2199,15 @@
 	if (!status) {
 		struct be_phy_info *resp_phy_info =
 				cmd.va + sizeof(struct be_cmd_req_hdr);
-		phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
-		phy_info->interface_type =
+		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
+		adapter->phy.interface_type =
 			le16_to_cpu(resp_phy_info->interface_type);
+		adapter->phy.auto_speeds_supported =
+			le16_to_cpu(resp_phy_info->auto_speeds_supported);
+		adapter->phy.fixed_speeds_supported =
+			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
+		adapter->phy.misc_params =
+			le32_to_cpu(resp_phy_info->misc_params);
 	}
 	pci_free_consistent(adapter->pdev, cmd.size,
 				cmd.va, cmd.dma);
@@ -2555,4 +2590,98 @@
 	mutex_unlock(&adapter->mbox_lock);
 	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
 	return status;
+
 }
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+				   struct be_dma_mem *cmd)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_ext_fat_caps *req;
+	int status;
+
+	if (mutex_lock_interruptible(&adapter->mbox_lock))
+		return -1;
+
+	wrb = wrb_from_mbox(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = cmd->va;
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
+			       cmd->size, wrb, cmd);
+	req->parameter_type = cpu_to_le32(1);
+
+	status = be_mbox_notify_wait(adapter);
+err:
+	mutex_unlock(&adapter->mbox_lock);
+	return status;
+}
+
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+				   struct be_dma_mem *cmd,
+				   struct be_fat_conf_params *configs)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_set_ext_fat_caps *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+
+	req = cmd->va;
+	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
+	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
+			       cmd->size, wrb, cmd);
+
+	status = be_mcc_notify_wait(adapter);
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
+int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
+			int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+{
+	struct be_adapter *adapter = netdev_priv(netdev_handle);
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
+	struct be_cmd_req_hdr *req;
+	struct be_cmd_resp_hdr *resp;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = embedded_payload(wrb);
+	resp = embedded_payload(wrb);
+
+	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
+			       hdr->opcode, wrb_payload_size, wrb, NULL);
+	memcpy(req, wrb_payload, wrb_payload_size);
+	be_dws_cpu_to_le(req, wrb_payload_size);
+
+	status = be_mcc_notify_wait(adapter);
+	if (cmd_status)
+		*cmd_status = (status & 0xffff);
+	if (ext_status)
+		*ext_status = 0;
+	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
+	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d5b680c..9625bf4 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -189,6 +189,8 @@
 #define OPCODE_COMMON_GET_PHY_DETAILS			102
 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP		103
 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES	121
+#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITES		125
+#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITES		126
 #define OPCODE_COMMON_GET_MAC_LIST			147
 #define OPCODE_COMMON_SET_MAC_LIST			148
 #define OPCODE_COMMON_GET_HSW_CONFIG			152
@@ -225,8 +227,12 @@
 #define RESP_HDR_INFO_OPCODE_SHIFT	0	/* bits 0 - 7 */
 #define RESP_HDR_INFO_SUBSYS_SHIFT	8 	/* bits 8 - 15 */
 struct be_cmd_resp_hdr {
-	u32 info;		/* dword 0 */
-	u32 status;		/* dword 1 */
+	u8 opcode;		/* dword 0 */
+	u8 subsystem;		/* dword 0 */
+	u8 rsvd[2];		/* dword 0 */
+	u8 status;		/* dword 1 */
+	u8 add_status;		/* dword 1 */
+	u8 rsvd1[2];		/* dword 1 */
 	u32 response_length;	/* dword 2 */
 	u32 actual_resp_len;	/* dword 3 */
 };
@@ -1056,6 +1062,7 @@
 /* The HW can come up in either of the following multi-channel modes
  * based on the skew/IPL.
  */
+#define RDMA_ENABLED				0x4
 #define FLEX10_MODE				0x400
 #define VNIC_MODE				0x20000
 #define UMC_ENABLED				0x1000000
@@ -1309,9 +1316,36 @@
 	PHY_TYPE_KX4_10GB,
 	PHY_TYPE_BASET_10GB,
 	PHY_TYPE_BASET_1GB,
+	PHY_TYPE_BASEX_1GB,
+	PHY_TYPE_SGMII,
 	PHY_TYPE_DISABLED = 255
 };
 
+#define BE_SUPPORTED_SPEED_NONE		0
+#define BE_SUPPORTED_SPEED_10MBPS	1
+#define BE_SUPPORTED_SPEED_100MBPS	2
+#define BE_SUPPORTED_SPEED_1GBPS	4
+#define BE_SUPPORTED_SPEED_10GBPS	8
+
+#define BE_AN_EN			0x2
+#define BE_PAUSE_SYM_EN			0x80
+
+/* MAC speed valid values */
+#define SPEED_DEFAULT  0x0
+#define SPEED_FORCED_10GB  0x1
+#define SPEED_FORCED_1GB  0x2
+#define SPEED_AUTONEG_10GB  0x3
+#define SPEED_AUTONEG_1GB  0x4
+#define SPEED_AUTONEG_100MB  0x5
+#define SPEED_AUTONEG_10GB_1GB 0x6
+#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
+#define SPEED_AUTONEG_1GB_100MB  0x8
+#define SPEED_AUTONEG_10MB  0x9
+#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
+#define SPEED_AUTONEG_100MB_10MB 0xb
+#define SPEED_FORCED_100MB  0xc
+#define SPEED_FORCED_10MB  0xd
+
 struct be_cmd_req_get_phy_info {
 	struct be_cmd_req_hdr hdr;
 	u8 rsvd0[24];
@@ -1321,7 +1355,11 @@
 	u16 phy_type;
 	u16 interface_type;
 	u32 misc_params;
-	u32 future_use[4];
+	u16 ext_phy_details;
+	u16 rsvd;
+	u16 auto_speeds_supported;
+	u16 fixed_speeds_supported;
+	u32 future_use[2];
 };
 
 struct be_cmd_resp_get_phy_info {
@@ -1567,6 +1605,56 @@
 	}
 }
 
+
+/************** get fat capabilites *******************/
+#define MAX_MODULES 27
+#define MAX_MODES 4
+#define MODE_UART 0
+#define FW_LOG_LEVEL_DEFAULT 48
+#define FW_LOG_LEVEL_FATAL 64
+
+struct ext_fat_mode {
+	u8 mode;
+	u8 rsvd0;
+	u16 port_mask;
+	u32 dbg_lvl;
+	u64 fun_mask;
+} __packed;
+
+struct ext_fat_modules {
+	u8 modules_str[32];
+	u32 modules_id;
+	u32 num_modes;
+	struct ext_fat_mode trace_lvl[MAX_MODES];
+} __packed;
+
+struct be_fat_conf_params {
+	u32 max_log_entries;
+	u32 log_entry_size;
+	u8 log_type;
+	u8 max_log_funs;
+	u8 max_log_ports;
+	u8 rsvd0;
+	u32 supp_modes;
+	u32 num_modules;
+	struct ext_fat_modules module[MAX_MODULES];
+} __packed;
+
+struct be_cmd_req_get_ext_fat_caps {
+	struct be_cmd_req_hdr hdr;
+	u32 parameter_type;
+};
+
+struct be_cmd_resp_get_ext_fat_caps {
+	struct be_cmd_resp_hdr hdr;
+	struct be_fat_conf_params get_params;
+};
+
+struct be_cmd_req_set_ext_fat_caps {
+	struct be_cmd_req_hdr hdr;
+	struct be_fat_conf_params set_params;
+};
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1655,8 +1743,7 @@
 				struct be_dma_mem *nonemb_cmd);
 extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
 				u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter,
-				struct be_phy_info *phy_info);
+extern int be_cmd_get_phy_info(struct be_adapter *adapter);
 extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
 extern void be_detect_dump_ue(struct be_adapter *adapter);
 extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
@@ -1673,4 +1760,9 @@
 extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
 			u32 domain, u16 intf_id);
 extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+					  struct be_dma_mem *cmd);
+extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+					  struct be_dma_mem *cmd,
+					  struct be_fat_conf_params *cfgs);
 
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index c1ff73c..63e51d4 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -433,102 +433,193 @@
 	}
 }
 
+static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
+{
+	u32 port;
+
+	switch (phy_type) {
+	case PHY_TYPE_BASET_1GB:
+	case PHY_TYPE_BASEX_1GB:
+	case PHY_TYPE_SGMII:
+		port = PORT_TP;
+		break;
+	case PHY_TYPE_SFP_PLUS_10GB:
+		port = dac_cable_len ? PORT_DA : PORT_FIBRE;
+		break;
+	case PHY_TYPE_XFP_10GB:
+	case PHY_TYPE_SFP_1GB:
+		port = PORT_FIBRE;
+		break;
+	case PHY_TYPE_BASET_10GB:
+		port = PORT_TP;
+		break;
+	default:
+		port = PORT_OTHER;
+	}
+
+	return port;
+}
+
+static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
+{
+	u32 val = 0;
+
+	switch (if_type) {
+	case PHY_TYPE_BASET_1GB:
+	case PHY_TYPE_BASEX_1GB:
+	case PHY_TYPE_SGMII:
+		val |= SUPPORTED_TP;
+		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+			val |= SUPPORTED_1000baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
+			val |= SUPPORTED_100baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
+			val |= SUPPORTED_10baseT_Full;
+		break;
+	case PHY_TYPE_KX4_10GB:
+		val |= SUPPORTED_Backplane;
+		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+			val |= SUPPORTED_1000baseKX_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+			val |= SUPPORTED_10000baseKX4_Full;
+		break;
+	case PHY_TYPE_KR_10GB:
+		val |= SUPPORTED_Backplane |
+				SUPPORTED_10000baseKR_Full;
+		break;
+	case PHY_TYPE_SFP_PLUS_10GB:
+	case PHY_TYPE_XFP_10GB:
+	case PHY_TYPE_SFP_1GB:
+		val |= SUPPORTED_FIBRE;
+		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+			val |= SUPPORTED_10000baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+			val |= SUPPORTED_1000baseT_Full;
+		break;
+	case PHY_TYPE_BASET_10GB:
+		val |= SUPPORTED_TP;
+		if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
+			val |= SUPPORTED_10000baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
+			val |= SUPPORTED_1000baseT_Full;
+		if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
+			val |= SUPPORTED_100baseT_Full;
+		break;
+	default:
+		val |= SUPPORTED_TP;
+	}
+
+	return val;
+}
+
+static int convert_to_et_speed(u32 be_speed)
+{
+	int et_speed = SPEED_10000;
+
+	switch (be_speed) {
+	case PHY_LINK_SPEED_10MBPS:
+		et_speed = SPEED_10;
+		break;
+	case PHY_LINK_SPEED_100MBPS:
+		et_speed = SPEED_100;
+		break;
+	case PHY_LINK_SPEED_1GBPS:
+		et_speed = SPEED_1000;
+		break;
+	case PHY_LINK_SPEED_10GBPS:
+		et_speed = SPEED_10000;
+		break;
+	}
+
+	return et_speed;
+}
+
+bool be_pause_supported(struct be_adapter *adapter)
+{
+	return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
+		adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
+		false : true;
+}
+
 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
-	struct be_phy_info phy_info;
-	u8 mac_speed = 0;
+	u8 port_speed = 0;
 	u16 link_speed = 0;
 	u8 link_status;
+	u32 et_speed = 0;
 	int status;
 
-	if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
-		status = be_cmd_link_status_query(adapter, &mac_speed,
-						  &link_speed, &link_status, 0);
-		if (!status)
-			be_link_status_update(adapter, link_status);
-
-		/* link_speed is in units of 10 Mbps */
-		if (link_speed) {
-			ethtool_cmd_speed_set(ecmd, link_speed*10);
+	if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
+		if (adapter->phy.forced_port_speed < 0) {
+			status = be_cmd_link_status_query(adapter, &port_speed,
+						&link_speed, &link_status, 0);
+			if (!status)
+				be_link_status_update(adapter, link_status);
+			if (link_speed)
+				et_speed = link_speed * 10;
+			else if (link_status)
+				et_speed = convert_to_et_speed(port_speed);
 		} else {
-			switch (mac_speed) {
-			case PHY_LINK_SPEED_10MBPS:
-				ethtool_cmd_speed_set(ecmd, SPEED_10);
-				break;
-			case PHY_LINK_SPEED_100MBPS:
-				ethtool_cmd_speed_set(ecmd, SPEED_100);
-				break;
-			case PHY_LINK_SPEED_1GBPS:
-				ethtool_cmd_speed_set(ecmd, SPEED_1000);
-				break;
-			case PHY_LINK_SPEED_10GBPS:
-				ethtool_cmd_speed_set(ecmd, SPEED_10000);
-				break;
-			case PHY_LINK_SPEED_ZERO:
-				ethtool_cmd_speed_set(ecmd, 0);
-				break;
-			}
+			et_speed = adapter->phy.forced_port_speed;
 		}
 
-		status = be_cmd_get_phy_info(adapter, &phy_info);
-		if (!status) {
-			switch (phy_info.interface_type) {
-			case PHY_TYPE_XFP_10GB:
-			case PHY_TYPE_SFP_1GB:
-			case PHY_TYPE_SFP_PLUS_10GB:
-				ecmd->port = PORT_FIBRE;
-				break;
-			default:
-				ecmd->port = PORT_TP;
-				break;
-			}
+		ethtool_cmd_speed_set(ecmd, et_speed);
 
-			switch (phy_info.interface_type) {
-			case PHY_TYPE_KR_10GB:
-			case PHY_TYPE_KX4_10GB:
-				ecmd->autoneg = AUTONEG_ENABLE;
+		status = be_cmd_get_phy_info(adapter);
+		if (status)
+			return status;
+
+		ecmd->supported =
+			convert_to_et_setting(adapter->phy.interface_type,
+					adapter->phy.auto_speeds_supported |
+					adapter->phy.fixed_speeds_supported);
+		ecmd->advertising =
+			convert_to_et_setting(adapter->phy.interface_type,
+					adapter->phy.auto_speeds_supported);
+
+		ecmd->port = be_get_port_type(adapter->phy.interface_type,
+					      adapter->phy.dac_cable_len);
+
+		if (adapter->phy.auto_speeds_supported) {
+			ecmd->supported |= SUPPORTED_Autoneg;
+			ecmd->autoneg = AUTONEG_ENABLE;
+			ecmd->advertising |= ADVERTISED_Autoneg;
+		}
+
+		if (be_pause_supported(adapter)) {
+			ecmd->supported |= SUPPORTED_Pause;
+			ecmd->advertising |= ADVERTISED_Pause;
+		}
+
+		switch (adapter->phy.interface_type) {
+		case PHY_TYPE_KR_10GB:
+		case PHY_TYPE_KX4_10GB:
 			ecmd->transceiver = XCVR_INTERNAL;
-				break;
-			default:
-				ecmd->autoneg = AUTONEG_DISABLE;
-				ecmd->transceiver = XCVR_EXTERNAL;
-				break;
-			}
+			break;
+		default:
+			ecmd->transceiver = XCVR_EXTERNAL;
+			break;
 		}
 
 		/* Save for future use */
-		adapter->link_speed = ethtool_cmd_speed(ecmd);
-		adapter->port_type = ecmd->port;
-		adapter->transceiver = ecmd->transceiver;
-		adapter->autoneg = ecmd->autoneg;
+		adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
+		adapter->phy.port_type = ecmd->port;
+		adapter->phy.transceiver = ecmd->transceiver;
+		adapter->phy.autoneg = ecmd->autoneg;
+		adapter->phy.advertising = ecmd->advertising;
+		adapter->phy.supported = ecmd->supported;
 	} else {
-		ethtool_cmd_speed_set(ecmd, adapter->link_speed);
-		ecmd->port = adapter->port_type;
-		ecmd->transceiver = adapter->transceiver;
-		ecmd->autoneg = adapter->autoneg;
+		ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
+		ecmd->port = adapter->phy.port_type;
+		ecmd->transceiver = adapter->phy.transceiver;
+		ecmd->autoneg = adapter->phy.autoneg;
+		ecmd->advertising = adapter->phy.advertising;
+		ecmd->supported = adapter->phy.supported;
 	}
 
-	ecmd->duplex = DUPLEX_FULL;
+	ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
 	ecmd->phy_address = adapter->port_num;
-	switch (ecmd->port) {
-	case PORT_FIBRE:
-		ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-		break;
-	case PORT_TP:
-		ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
-		break;
-	case PORT_AUI:
-		ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
-		break;
-	}
-
-	if (ecmd->autoneg) {
-		ecmd->supported |= SUPPORTED_1000baseT_Full;
-		ecmd->supported |= SUPPORTED_Autoneg;
-		ecmd->advertising |= (ADVERTISED_10000baseT_Full |
-				ADVERTISED_1000baseT_Full);
-	}
 
 	return 0;
 }
@@ -548,7 +639,7 @@
 	struct be_adapter *adapter = netdev_priv(netdev);
 
 	be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
-	ecmd->autoneg = 0;
+	ecmd->autoneg = adapter->phy.fc_autoneg;
 }
 
 static int
@@ -702,7 +793,7 @@
 		}
 	}
 
-	if (be_test_ddr_dma(adapter) != 0) {
+	if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
 		data[3] = 1;
 		test->flags |= ETH_TEST_FL_FAILED;
 	}
@@ -787,6 +878,81 @@
 	return status;
 }
 
+static u32 be_get_msg_level(struct net_device *netdev)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+
+	if (lancer_chip(adapter)) {
+		dev_err(&adapter->pdev->dev, "Operation not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	return adapter->msg_enable;
+}
+
+static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
+{
+	struct be_dma_mem extfat_cmd;
+	struct be_fat_conf_params *cfgs;
+	int status;
+	int i, j;
+
+	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+					     &extfat_cmd.dma);
+	if (!extfat_cmd.va) {
+		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+			__func__);
+		goto err;
+	}
+	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+	if (!status) {
+		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+					sizeof(struct be_cmd_resp_hdr));
+		for (i = 0; i < cfgs->num_modules; i++) {
+			for (j = 0; j < cfgs->module[i].num_modes; j++) {
+				if (cfgs->module[i].trace_lvl[j].mode ==
+								MODE_UART)
+					cfgs->module[i].trace_lvl[j].dbg_lvl =
+							cpu_to_le32(level);
+			}
+		}
+		status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd,
+							cfgs);
+		if (status)
+			dev_err(&adapter->pdev->dev,
+				"Message level set failed\n");
+	} else {
+		dev_err(&adapter->pdev->dev, "Message level get failed\n");
+	}
+
+	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+			    extfat_cmd.dma);
+err:
+	return;
+}
+
+static void be_set_msg_level(struct net_device *netdev, u32 level)
+{
+	struct be_adapter *adapter = netdev_priv(netdev);
+
+	if (lancer_chip(adapter)) {
+		dev_err(&adapter->pdev->dev, "Operation not supported\n");
+		return;
+	}
+
+	if (adapter->msg_enable == level)
+		return;
+
+	if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
+		be_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
+				    FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL);
+	adapter->msg_enable = level;
+
+	return;
+}
+
 const struct ethtool_ops be_ethtool_ops = {
 	.get_settings = be_get_settings,
 	.get_drvinfo = be_get_drvinfo,
@@ -802,6 +968,8 @@
 	.set_pauseparam = be_set_pauseparam,
 	.get_strings = be_get_stat_strings,
 	.set_phys_id = be_set_phys_id,
+	.get_msglevel = be_get_msg_level,
+	.set_msglevel = be_set_msg_level,
 	.get_sset_count = be_get_sset_count,
 	.get_ethtool_stats = be_get_ethtool_stats,
 	.get_regs_len = be_get_reg_len,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index f2c89e3..d9fb0c5 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -58,6 +58,8 @@
 
 #define SLI_PORT_CONTROL_IP_MASK	0x08000000
 
+#define PCICFG_CUST_SCRATCHPAD_CSR	0x1EC
+
 /********* Memory BAR register ************/
 #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 	0xfc
 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -98,11 +100,13 @@
 #define SLI_INTF_REV_SHIFT			4
 #define SLI_INTF_FT_MASK			0x00000001
 
+#define SLI_INTF_TYPE_2		2
+#define SLI_INTF_TYPE_3		3
 
 /* SLI family */
 #define BE_SLI_FAMILY		0x0
 #define LANCER_A0_SLI_FAMILY	0xA
-
+#define SKYHAWK_SLI_FAMILY      0x2
 
 /********* ISR0 Register offset **********/
 #define CEV_ISR0_OFFSET 			0xC18
@@ -162,22 +166,23 @@
 #define QUERY_FAT	1
 
 /* Flashrom related descriptors */
+#define MAX_FLASH_COMP			32
 #define IMAGE_TYPE_FIRMWARE		160
 #define IMAGE_TYPE_BOOTCODE		224
 #define IMAGE_TYPE_OPTIONROM		32
 
 #define NUM_FLASHDIR_ENTRIES		32
 
-#define IMG_TYPE_ISCSI_ACTIVE		0
-#define IMG_TYPE_REDBOOT		1
-#define IMG_TYPE_BIOS			2
-#define IMG_TYPE_PXE_BIOS		3
-#define IMG_TYPE_FCOE_BIOS		8
-#define IMG_TYPE_ISCSI_BACKUP		9
-#define IMG_TYPE_FCOE_FW_ACTIVE		10
-#define IMG_TYPE_FCOE_FW_BACKUP 	11
-#define IMG_TYPE_NCSI_FW		13
-#define IMG_TYPE_PHY_FW			99
+#define OPTYPE_ISCSI_ACTIVE		0
+#define OPTYPE_REDBOOT			1
+#define OPTYPE_BIOS			2
+#define OPTYPE_PXE_BIOS			3
+#define OPTYPE_FCOE_BIOS		8
+#define OPTYPE_ISCSI_BACKUP		9
+#define OPTYPE_FCOE_FW_ACTIVE		10
+#define OPTYPE_FCOE_FW_BACKUP		11
+#define OPTYPE_NCSI_FW			13
+#define OPTYPE_PHY_FW			99
 #define TN_8022				13
 
 #define ILLEGAL_IOCTL_REQ		2
@@ -223,6 +228,24 @@
 #define FLASH_REDBOOT_START_g3             (262144)
 #define FLASH_PHY_FW_START_g3		   1310720
 
+#define IMAGE_NCSI			16
+#define IMAGE_OPTION_ROM_PXE		32
+#define IMAGE_OPTION_ROM_FCoE		33
+#define IMAGE_OPTION_ROM_ISCSI		34
+#define IMAGE_FLASHISM_JUMPVECTOR	48
+#define IMAGE_FLASH_ISM			49
+#define IMAGE_JUMP_VECTOR		50
+#define IMAGE_FIRMWARE_iSCSI		160
+#define IMAGE_FIRMWARE_COMP_iSCSI	161
+#define IMAGE_FIRMWARE_FCoE		162
+#define IMAGE_FIRMWARE_COMP_FCoE	163
+#define IMAGE_FIRMWARE_BACKUP_iSCSI	176
+#define IMAGE_FIRMWARE_BACKUP_COMP_iSCSI 177
+#define IMAGE_FIRMWARE_BACKUP_FCoE	178
+#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
+#define IMAGE_FIRMWARE_PHY		192
+#define IMAGE_BOOT_CODE			224
+
 /************* Rx Packet Type Encoding **************/
 #define BE_UNICAST_PACKET		0
 #define BE_MULTICAST_PACKET		1
@@ -445,6 +468,7 @@
 	unsigned long offset;
 	int optype;
 	int size;
+	int img_type;
 };
 
 struct image_hdr {
@@ -481,17 +505,19 @@
 	u32 format_rev;
 	u32 cksum;
 	u32 antidote;
-	u32 build_no;
-	u8 id_string[64];
-	u32 active_entry_mask;
-	u32 valid_entry_mask;
-	u32 org_content_mask;
-	u32 rsvd0;
-	u32 rsvd1;
-	u32 rsvd2;
-	u32 rsvd3;
-	u32 rsvd4;
-};
+	u32 num_images;
+	u8 id_string[128];
+	u32 rsvd[4];
+} __packed;
+
+struct flash_section_hdr_g2 {
+	u32 format_rev;
+	u32 cksum;
+	u32 antidote;
+	u32 build_num;
+	u8 id_string[128];
+	u32 rsvd[8];
+} __packed;
 
 struct flash_section_entry {
 	u32 type;
@@ -503,10 +529,16 @@
 	u32 rsvd0;
 	u32 rsvd1;
 	u8 ver_data[32];
-};
+} __packed;
 
 struct flash_section_info {
 	u8 cookie[32];
 	struct flash_section_hdr fsec_hdr;
 	struct flash_section_entry fsec_entry[32];
-};
+} __packed;
+
+struct flash_section_info_g2 {
+	u8 cookie[32];
+	struct flash_section_hdr_g2 fsec_hdr;
+	struct flash_section_entry fsec_entry[32];
+} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 528a886..08efd30 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -421,6 +421,9 @@
 		populate_be2_stats(adapter);
 	}
 
+	if (lancer_chip(adapter))
+		goto done;
+
 	/* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
 	for_all_rx_queues(adapter, rxo, i) {
 		/* below erx HW counter can actually wrap around after
@@ -429,6 +432,8 @@
 		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
 				(u16)erx->rx_drops_no_fragments[rxo->q.id]);
 	}
+done:
+	return;
 }
 
 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
@@ -797,22 +802,30 @@
 	if (adapter->promiscuous)
 		return 0;
 
-	if (adapter->vlans_added <= adapter->max_vlans)  {
-		/* Construct VLAN Table to give to HW */
-		for (i = 0; i < VLAN_N_VID; i++) {
-			if (adapter->vlan_tag[i]) {
-				vtag[ntags] = cpu_to_le16(i);
-				ntags++;
-			}
-		}
-		status = be_cmd_vlan_config(adapter, adapter->if_handle,
-					vtag, ntags, 1, 0);
-	} else {
-		status = be_cmd_vlan_config(adapter, adapter->if_handle,
-					NULL, 0, 1, 1);
+	if (adapter->vlans_added > adapter->max_vlans)
+		goto set_vlan_promisc;
+
+	/* Construct VLAN Table to give to HW */
+	for (i = 0; i < VLAN_N_VID; i++)
+		if (adapter->vlan_tag[i])
+			vtag[ntags++] = cpu_to_le16(i);
+
+	status = be_cmd_vlan_config(adapter, adapter->if_handle,
+				    vtag, ntags, 1, 0);
+
+	/* Set to VLAN promisc mode as setting VLAN filter failed */
+	if (status) {
+		dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+		dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
+		goto set_vlan_promisc;
 	}
 
 	return status;
+
+set_vlan_promisc:
+	status = be_cmd_vlan_config(adapter, adapter->if_handle,
+				    NULL, 0, 1, 1);
+	return status;
 }
 
 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
@@ -862,6 +875,7 @@
 static void be_set_rx_mode(struct net_device *netdev)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
+	int status;
 
 	if (netdev->flags & IFF_PROMISC) {
 		be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
@@ -908,7 +922,14 @@
 		}
 	}
 
-	be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
+	status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
+
+	/* Set to MCAST promisc mode if setting MULTICAST address fails */
+	if (status) {
+		dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
+		dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
+		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+	}
 done:
 	return;
 }
@@ -1028,6 +1049,29 @@
 	return status;
 }
 
+static int be_find_vfs(struct be_adapter *adapter, int vf_state)
+{
+	struct pci_dev *dev, *pdev = adapter->pdev;
+	int vfs = 0, assigned_vfs = 0, pos, vf_fn;
+	u16 offset, stride;
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+	pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+
+	dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
+	while (dev) {
+		vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
+		if (dev->is_virtfn && dev->devfn == vf_fn) {
+			vfs++;
+			if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+				assigned_vfs++;
+		}
+		dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
+	}
+	return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
+}
+
 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
 {
 	struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
@@ -1238,6 +1282,7 @@
 		skb_checksum_none_assert(skb);
 
 	skb->protocol = eth_type_trans(skb, netdev);
+	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
 	if (netdev->features & NETIF_F_RXHASH)
 		skb->rxhash = rxcp->rss_hash;
 
@@ -1294,6 +1339,7 @@
 	skb->len = rxcp->pkt_size;
 	skb->data_len = rxcp->pkt_size;
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
 	if (adapter->netdev->features & NETIF_F_RXHASH)
 		skb->rxhash = rxcp->rss_hash;
 
@@ -1555,7 +1601,9 @@
 	if (!num)
 		rearm = true;
 
-	be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
+	if (num || msix_enabled(eqo->adapter))
+		be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
+
 	if (num)
 		napi_schedule(&eqo->napi);
 
@@ -1764,9 +1812,9 @@
 
 static int be_num_txqs_want(struct be_adapter *adapter)
 {
-	if (sriov_enabled(adapter) || be_is_mc(adapter) ||
-		lancer_chip(adapter) || !be_physfn(adapter) ||
-		adapter->generation == BE_GEN2)
+	if (sriov_want(adapter) || be_is_mc(adapter) ||
+	    lancer_chip(adapter) || !be_physfn(adapter) ||
+	    adapter->generation == BE_GEN2)
 		return 1;
 	else
 		return MAX_TX_QS;
@@ -2093,7 +2141,7 @@
 static uint be_num_rss_want(struct be_adapter *adapter)
 {
 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-	     adapter->num_vfs == 0 && be_physfn(adapter) &&
+	     !sriov_want(adapter) && be_physfn(adapter) &&
 	     !be_is_mc(adapter))
 		return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
 	else
@@ -2103,10 +2151,17 @@
 static void be_msix_enable(struct be_adapter *adapter)
 {
 #define BE_MIN_MSIX_VECTORS		1
-	int i, status, num_vec;
+	int i, status, num_vec, num_roce_vec = 0;
 
 	/* If RSS queues are not used, need a vec for default RX Q */
 	num_vec = min(be_num_rss_want(adapter), num_online_cpus());
+	if (be_roce_supported(adapter)) {
+		num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
+					(num_online_cpus() + 1));
+		num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
+		num_vec += num_roce_vec;
+		num_vec = min(num_vec, MAX_MSIX_VECTORS);
+	}
 	num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
 
 	for (i = 0; i < num_vec; i++)
@@ -2123,55 +2178,18 @@
 	}
 	return;
 done:
-	adapter->num_msix_vec = num_vec;
-	return;
-}
-
-static int be_sriov_enable(struct be_adapter *adapter)
-{
-	be_check_sriov_fn_type(adapter);
-
-#ifdef CONFIG_PCI_IOV
-	if (be_physfn(adapter) && num_vfs) {
-		int status, pos;
-		u16 dev_vfs;
-
-		pos = pci_find_ext_capability(adapter->pdev,
-						PCI_EXT_CAP_ID_SRIOV);
-		pci_read_config_word(adapter->pdev,
-				     pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
-
-		adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
-		if (adapter->num_vfs != num_vfs)
-			dev_info(&adapter->pdev->dev,
-				 "Device supports %d VFs and not %d\n",
-				 adapter->num_vfs, num_vfs);
-
-		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
-		if (status)
-			adapter->num_vfs = 0;
-
-		if (adapter->num_vfs) {
-			adapter->vf_cfg = kcalloc(num_vfs,
-						sizeof(struct be_vf_cfg),
-						GFP_KERNEL);
-			if (!adapter->vf_cfg)
-				return -ENOMEM;
+	if (be_roce_supported(adapter)) {
+		if (num_vec > num_roce_vec) {
+			adapter->num_msix_vec = num_vec - num_roce_vec;
+			adapter->num_msix_roce_vec =
+				num_vec - adapter->num_msix_vec;
+		} else {
+			adapter->num_msix_vec = num_vec;
+			adapter->num_msix_roce_vec = 0;
 		}
-	}
-#endif
-	return 0;
-}
-
-static void be_sriov_disable(struct be_adapter *adapter)
-{
-#ifdef CONFIG_PCI_IOV
-	if (sriov_enabled(adapter)) {
-		pci_disable_sriov(adapter->pdev);
-		kfree(adapter->vf_cfg);
-		adapter->num_vfs = 0;
-	}
-#endif
+	} else
+		adapter->num_msix_vec = num_vec;
+	return;
 }
 
 static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -2282,6 +2300,8 @@
 	struct be_eq_obj *eqo;
 	int i;
 
+	be_roce_dev_close(adapter);
+
 	be_async_mcc_disable(adapter);
 
 	if (!lancer_chip(adapter))
@@ -2390,6 +2410,7 @@
 	if (!status)
 		be_link_status_update(adapter, link_status);
 
+	be_roce_dev_open(adapter);
 	return 0;
 err:
 	be_close(adapter->netdev);
@@ -2475,6 +2496,11 @@
 	struct be_vf_cfg *vf_cfg;
 	u32 vf;
 
+	if (be_find_vfs(adapter, ASSIGNED)) {
+		dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
+		goto done;
+	}
+
 	for_all_vfs(adapter, vf_cfg, vf) {
 		if (lancer_chip(adapter))
 			be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
@@ -2484,6 +2510,10 @@
 
 		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
 	}
+	pci_disable_sriov(adapter->pdev);
+done:
+	kfree(adapter->vf_cfg);
+	adapter->num_vfs = 0;
 }
 
 static int be_clear(struct be_adapter *adapter)
@@ -2513,29 +2543,60 @@
 	be_cmd_fw_clean(adapter);
 
 	be_msix_disable(adapter);
-	kfree(adapter->pmac_id);
+	pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
 	return 0;
 }
 
-static void be_vf_setup_init(struct be_adapter *adapter)
+static int be_vf_setup_init(struct be_adapter *adapter)
 {
 	struct be_vf_cfg *vf_cfg;
 	int vf;
 
+	adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
+				  GFP_KERNEL);
+	if (!adapter->vf_cfg)
+		return -ENOMEM;
+
 	for_all_vfs(adapter, vf_cfg, vf) {
 		vf_cfg->if_handle = -1;
 		vf_cfg->pmac_id = -1;
 	}
+	return 0;
 }
 
 static int be_vf_setup(struct be_adapter *adapter)
 {
 	struct be_vf_cfg *vf_cfg;
+	struct device *dev = &adapter->pdev->dev;
 	u32 cap_flags, en_flags, vf;
 	u16 def_vlan, lnk_speed;
-	int status;
+	int status, enabled_vfs;
 
-	be_vf_setup_init(adapter);
+	enabled_vfs = be_find_vfs(adapter, ENABLED);
+	if (enabled_vfs) {
+		dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
+		dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+		return 0;
+	}
+
+	if (num_vfs > adapter->dev_num_vfs) {
+		dev_warn(dev, "Device supports %d VFs and not %d\n",
+			 adapter->dev_num_vfs, num_vfs);
+		num_vfs = adapter->dev_num_vfs;
+	}
+
+	status = pci_enable_sriov(adapter->pdev, num_vfs);
+	if (!status) {
+		adapter->num_vfs = num_vfs;
+	} else {
+		/* Platform doesn't support SRIOV though device supports it */
+		dev_warn(dev, "SRIOV enable failed\n");
+		return 0;
+	}
+
+	status = be_vf_setup_init(adapter);
+	if (status)
+		goto err;
 
 	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
 				BE_IF_FLAGS_MULTICAST;
@@ -2546,9 +2607,11 @@
 			goto err;
 	}
 
-	status = be_vf_eth_addr_config(adapter);
-	if (status)
-		goto err;
+	if (!enabled_vfs) {
+		status = be_vf_eth_addr_config(adapter);
+		if (status)
+			goto err;
+	}
 
 	for_all_vfs(adapter, vf_cfg, vf) {
 		status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
@@ -2571,11 +2634,12 @@
 static void be_setup_init(struct be_adapter *adapter)
 {
 	adapter->vlan_prio_bmap = 0xff;
-	adapter->link_speed = -1;
+	adapter->phy.link_speed = -1;
 	adapter->if_handle = -1;
 	adapter->be3_native = false;
 	adapter->promiscuous = false;
 	adapter->eq_next_idx = 0;
+	adapter->phy.forced_port_speed = -1;
 }
 
 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
@@ -2604,9 +2668,25 @@
 	return status;
 }
 
+/* Routine to query per function resource limits */
+static int be_get_config(struct be_adapter *adapter)
+{
+	int pos;
+	u16 dev_num_vfs;
+
+	pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
+	if (pos) {
+		pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
+				     &dev_num_vfs);
+		adapter->dev_num_vfs = dev_num_vfs;
+	}
+	return 0;
+}
+
 static int be_setup(struct be_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
+	struct device *dev = &adapter->pdev->dev;
 	u32 cap_flags, en_flags;
 	u32 tx_fc, rx_fc;
 	int status;
@@ -2614,6 +2694,8 @@
 
 	be_setup_init(adapter);
 
+	be_get_config(adapter);
+
 	be_cmd_req_native_mode(adapter);
 
 	be_msix_enable(adapter);
@@ -2680,36 +2762,33 @@
 
 	be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
 
-	status = be_vid_config(adapter, false, 0);
-	if (status)
-		goto err;
+	be_vid_config(adapter, false, 0);
 
 	be_set_rx_mode(adapter->netdev);
 
-	status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
-	/* For Lancer: It is legal for this cmd to fail on VF */
-	if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
-		goto err;
+	be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
 
-	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
-		status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
+	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
+		be_cmd_set_flow_control(adapter, adapter->tx_fc,
 					adapter->rx_fc);
-		/* For Lancer: It is legal for this cmd to fail on VF */
-		if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
-			goto err;
-	}
 
 	pcie_set_readrq(adapter->pdev, 4096);
 
-	if (sriov_enabled(adapter)) {
-		status = be_vf_setup(adapter);
-		if (status)
-			goto err;
+	if (be_physfn(adapter) && num_vfs) {
+		if (adapter->dev_num_vfs)
+			be_vf_setup(adapter);
+		else
+			dev_warn(dev, "device doesn't support SRIOV\n");
 	}
 
+	be_cmd_get_phy_info(adapter);
+	if (be_pause_supported(adapter))
+		adapter->phy.fc_autoneg = 1;
+
 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
 
+	pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
 	return 0;
 err:
 	be_clear(adapter);
@@ -2731,6 +2810,8 @@
 #endif
 
 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
+char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
+
 static bool be_flash_redboot(struct be_adapter *adapter,
 			const u8 *p, u32 img_start, int image_size,
 			int hdr_size)
@@ -2760,71 +2841,105 @@
 
 static bool phy_flashing_required(struct be_adapter *adapter)
 {
-	int status = 0;
-	struct be_phy_info phy_info;
+	return (adapter->phy.phy_type == TN_8022 &&
+		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
+}
 
-	status = be_cmd_get_phy_info(adapter, &phy_info);
-	if (status)
-		return false;
-	if ((phy_info.phy_type == TN_8022) &&
-		(phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
-		return true;
+static bool is_comp_in_ufi(struct be_adapter *adapter,
+			   struct flash_section_info *fsec, int type)
+{
+	int i = 0, img_type = 0;
+	struct flash_section_info_g2 *fsec_g2 = NULL;
+
+	if (adapter->generation != BE_GEN3)
+		fsec_g2 = (struct flash_section_info_g2 *)fsec;
+
+	for (i = 0; i < MAX_FLASH_COMP; i++) {
+		if (fsec_g2)
+			img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
+		else
+			img_type = le32_to_cpu(fsec->fsec_entry[i].type);
+
+		if (img_type == type)
+			return true;
 	}
 	return false;
+
+}
+
+struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
+					 int header_size,
+					 const struct firmware *fw)
+{
+	struct flash_section_info *fsec = NULL;
+	const u8 *p = fw->data;
+
+	p += header_size;
+	while (p < (fw->data + fw->size)) {
+		fsec = (struct flash_section_info *)p;
+		if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
+			return fsec;
+		p += 32;
+	}
+	return NULL;
 }
 
 static int be_flash_data(struct be_adapter *adapter,
-			const struct firmware *fw,
-			struct be_dma_mem *flash_cmd, int num_of_images)
+			 const struct firmware *fw,
+			 struct be_dma_mem *flash_cmd,
+			 int num_of_images)
 
 {
 	int status = 0, i, filehdr_size = 0;
+	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
 	u32 total_bytes = 0, flash_op;
 	int num_bytes;
 	const u8 *p = fw->data;
 	struct be_cmd_write_flashrom *req = flash_cmd->va;
 	const struct flash_comp *pflashcomp;
-	int num_comp;
+	int num_comp, hdr_size;
+	struct flash_section_info *fsec = NULL;
 
-	static const struct flash_comp gen3_flash_types[10] = {
-		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
-			FLASH_IMAGE_MAX_SIZE_g3},
-		{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
-			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
-		{ FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
-			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
-		{ FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
-			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
-		{ FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
-			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
-		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
-			FLASH_IMAGE_MAX_SIZE_g3},
-		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
-			FLASH_IMAGE_MAX_SIZE_g3},
-		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
-			FLASH_IMAGE_MAX_SIZE_g3},
-		{ FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
-			FLASH_NCSI_IMAGE_MAX_SIZE_g3},
-		{ FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
-			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
+	struct flash_comp gen3_flash_types[] = {
+		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
+			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
+		{ FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
+			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
+		{ FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
+			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
+		{ FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
+			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
+		{ FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
+			FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
+		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
+			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
+		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
+			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
+		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
+			FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
+		{ FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
+			FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
+		{ FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
+			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
 	};
-	static const struct flash_comp gen2_flash_types[8] = {
-		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
-			FLASH_IMAGE_MAX_SIZE_g2},
-		{ FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
-			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
-		{ FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
-			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
-		{ FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
-			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
-		{ FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
-			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
-		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
-			FLASH_IMAGE_MAX_SIZE_g2},
-		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
-			FLASH_IMAGE_MAX_SIZE_g2},
-		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
-			 FLASH_IMAGE_MAX_SIZE_g2}
+
+	struct flash_comp gen2_flash_types[] = {
+		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
+			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
+		{ FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
+			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
+		{ FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
+			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
+		{ FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
+			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
+		{ FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
+			FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
+		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
+			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
+		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
+			FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
+		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
+			 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
 	};
 
 	if (adapter->generation == BE_GEN3) {
@@ -2836,22 +2951,37 @@
 		filehdr_size = sizeof(struct flash_file_hdr_g2);
 		num_comp = ARRAY_SIZE(gen2_flash_types);
 	}
+	/* Get flash section info*/
+	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
+	if (!fsec) {
+		dev_err(&adapter->pdev->dev,
+			"Invalid Cookie. UFI corrupted ?\n");
+		return -1;
+	}
 	for (i = 0; i < num_comp; i++) {
-		if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
-				memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+		if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
 			continue;
-		if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
+
+		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
+		    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
+			continue;
+
+		if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
 			if (!phy_flashing_required(adapter))
 				continue;
 		}
-		if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
-			(!be_flash_redboot(adapter, fw->data,
-			pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
-			(num_of_images * sizeof(struct image_hdr)))))
+
+		hdr_size = filehdr_size +
+			   (num_of_images * sizeof(struct image_hdr));
+
+		if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
+		    (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
+				       pflashcomp[i].size, hdr_size)))
 			continue;
+
+		/* Flash the component */
 		p = fw->data;
-		p += filehdr_size + pflashcomp[i].offset
-			+ (num_of_images * sizeof(struct image_hdr));
+		p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
 		if (p + pflashcomp[i].size > fw->data + fw->size)
 			return -1;
 		total_bytes = pflashcomp[i].size;
@@ -2862,12 +2992,12 @@
 				num_bytes = total_bytes;
 			total_bytes -= num_bytes;
 			if (!total_bytes) {
-				if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
+				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
 					flash_op = FLASHROM_OPER_PHY_FLASH;
 				else
 					flash_op = FLASHROM_OPER_FLASH;
 			} else {
-				if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
+				if (pflashcomp[i].optype == OPTYPE_PHY_FW)
 					flash_op = FLASHROM_OPER_PHY_SAVE;
 				else
 					flash_op = FLASHROM_OPER_SAVE;
@@ -2879,7 +3009,7 @@
 			if (status) {
 				if ((status == ILLEGAL_IOCTL_REQ) &&
 					(pflashcomp[i].optype ==
-						IMG_TYPE_PHY_FW))
+						OPTYPE_PHY_FW))
 					break;
 				dev_err(&adapter->pdev->dev,
 					"cmd to write to flash rom failed.\n");
@@ -3122,6 +3252,24 @@
 		iounmap(adapter->csr);
 	if (adapter->db)
 		iounmap(adapter->db);
+	if (adapter->roce_db.base)
+		pci_iounmap(adapter->pdev, adapter->roce_db.base);
+}
+
+static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	u8 __iomem *addr;
+
+	addr = pci_iomap(pdev, 2, 0);
+	if (addr == NULL)
+		return -ENOMEM;
+
+	adapter->roce_db.base = addr;
+	adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
+	adapter->roce_db.size = 8192;
+	adapter->roce_db.total_size = pci_resource_len(pdev, 2);
+	return 0;
 }
 
 static int be_map_pci_bars(struct be_adapter *adapter)
@@ -3130,11 +3278,18 @@
 	int db_reg;
 
 	if (lancer_chip(adapter)) {
-		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
-			pci_resource_len(adapter->pdev, 0));
-		if (addr == NULL)
-			return -ENOMEM;
-		adapter->db = addr;
+		if (be_type_2_3(adapter)) {
+			addr = ioremap_nocache(
+					pci_resource_start(adapter->pdev, 0),
+					pci_resource_len(adapter->pdev, 0));
+			if (addr == NULL)
+				return -ENOMEM;
+			adapter->db = addr;
+		}
+		if (adapter->if_type == SLI_INTF_TYPE_3) {
+			if (lancer_roce_map_pci_bars(adapter))
+				goto pci_map_err;
+		}
 		return 0;
 	}
 
@@ -3159,14 +3314,19 @@
 	if (addr == NULL)
 		goto pci_map_err;
 	adapter->db = addr;
-
+	if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
+		adapter->roce_db.size = 4096;
+		adapter->roce_db.io_addr =
+				pci_resource_start(adapter->pdev, db_reg);
+		adapter->roce_db.total_size =
+				pci_resource_len(adapter->pdev, db_reg);
+	}
 	return 0;
 pci_map_err:
 	be_unmap_pci_bars(adapter);
 	return -ENOMEM;
 }
 
-
 static void be_ctrl_cleanup(struct be_adapter *adapter)
 {
 	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
@@ -3272,6 +3432,8 @@
 	if (!adapter)
 		return;
 
+	be_roce_dev_remove(adapter);
+
 	unregister_netdev(adapter->netdev);
 
 	be_clear(adapter);
@@ -3280,8 +3442,6 @@
 
 	be_ctrl_cleanup(adapter);
 
-	be_sriov_disable(adapter);
-
 	pci_set_drvdata(pdev, NULL);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
@@ -3295,9 +3455,43 @@
 		!be_is_wol_excluded(adapter)) ? true : false;
 }
 
-static int be_get_config(struct be_adapter *adapter)
+u32 be_get_fw_log_level(struct be_adapter *adapter)
+{
+	struct be_dma_mem extfat_cmd;
+	struct be_fat_conf_params *cfgs;
+	int status;
+	u32 level = 0;
+	int j;
+
+	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+					     &extfat_cmd.dma);
+
+	if (!extfat_cmd.va) {
+		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+			__func__);
+		goto err;
+	}
+
+	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
+	if (!status) {
+		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+						sizeof(struct be_cmd_resp_hdr));
+		for (j = 0; j < cfgs->module[0].num_modes; j++) {
+			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
+				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+		}
+	}
+	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+			    extfat_cmd.dma);
+err:
+	return level;
+}
+static int be_get_initial_config(struct be_adapter *adapter)
 {
 	int status;
+	u32 level;
 
 	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
 			&adapter->function_mode, &adapter->function_caps);
@@ -3335,10 +3529,13 @@
 	if (be_is_wol_supported(adapter))
 		adapter->wol = true;
 
+	level = be_get_fw_log_level(adapter);
+	adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+
 	return 0;
 }
 
-static int be_dev_family_check(struct be_adapter *adapter)
+static int be_dev_type_check(struct be_adapter *adapter)
 {
 	struct pci_dev *pdev = adapter->pdev;
 	u32 sli_intf = 0, if_type;
@@ -3350,17 +3547,27 @@
 		break;
 	case BE_DEVICE_ID2:
 	case OC_DEVICE_ID2:
-	case OC_DEVICE_ID5:
 		adapter->generation = BE_GEN3;
 		break;
 	case OC_DEVICE_ID3:
 	case OC_DEVICE_ID4:
 		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+		adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+						SLI_INTF_IF_TYPE_SHIFT;
 		if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
 						SLI_INTF_IF_TYPE_SHIFT;
-
 		if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
-			if_type != 0x02) {
+			!be_type_2_3(adapter)) {
+			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
+			return -EINVAL;
+		}
+		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
+					 SLI_INTF_FAMILY_SHIFT);
+		adapter->generation = BE_GEN3;
+		break;
+	case OC_DEVICE_ID5:
+		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+		if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
 			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
 			return -EINVAL;
 		}
@@ -3371,6 +3578,9 @@
 	default:
 		adapter->generation = 0;
 	}
+
+	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
 	return 0;
 }
 
@@ -3514,6 +3724,14 @@
 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
 
+static bool be_reset_required(struct be_adapter *adapter)
+{
+	u32 reg;
+
+	pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
+	return reg;
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
 			const struct pci_device_id *pdev_id)
 {
@@ -3539,7 +3757,7 @@
 	adapter->pdev = pdev;
 	pci_set_drvdata(pdev, adapter);
 
-	status = be_dev_family_check(adapter);
+	status = be_dev_type_check(adapter);
 	if (status)
 		goto free_netdev;
 
@@ -3557,13 +3775,9 @@
 		}
 	}
 
-	status = be_sriov_enable(adapter);
-	if (status)
-		goto free_netdev;
-
 	status = be_ctrl_init(adapter);
 	if (status)
-		goto disable_sriov;
+		goto free_netdev;
 
 	if (lancer_chip(adapter)) {
 		status = lancer_wait_ready(adapter);
@@ -3590,9 +3804,11 @@
 	if (status)
 		goto ctrl_clean;
 
-	status = be_cmd_reset_function(adapter);
-	if (status)
-		goto ctrl_clean;
+	if (be_reset_required(adapter)) {
+		status = be_cmd_reset_function(adapter);
+		if (status)
+			goto ctrl_clean;
+	}
 
 	/* The INTR bit may be set in the card when probed by a kdump kernel
 	 * after a crash.
@@ -3604,7 +3820,7 @@
 	if (status)
 		goto ctrl_clean;
 
-	status = be_get_config(adapter);
+	status = be_get_initial_config(adapter);
 	if (status)
 		goto stats_clean;
 
@@ -3620,6 +3836,8 @@
 	if (status != 0)
 		goto unsetup;
 
+	be_roce_dev_add(adapter);
+
 	dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
 		adapter->port_num);
 
@@ -3633,8 +3851,6 @@
 	be_stats_cleanup(adapter);
 ctrl_clean:
 	be_ctrl_cleanup(adapter);
-disable_sriov:
-	be_sriov_disable(adapter);
 free_netdev:
 	free_netdev(netdev);
 	pci_set_drvdata(pdev, NULL);
@@ -3749,6 +3965,11 @@
 
 	pci_disable_device(pdev);
 
+	/* The error could cause the FW to trigger a flash debug dump.
+	 * Resetting the card while flash dump is in progress
+	 * can cause it not to recover; wait for it to finish
+	 */
+	ssleep(30);
 	return PCI_ERS_RESULT_NEED_RESET;
 }
 
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
new file mode 100644
index 0000000..deecc44
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "be.h"
+#include "be_cmds.h"
+
+static struct ocrdma_driver *ocrdma_drv;
+static LIST_HEAD(be_adapter_list);
+static DEFINE_MUTEX(be_adapter_list_lock);
+
+static void _be_roce_dev_add(struct be_adapter *adapter)
+{
+	struct be_dev_info dev_info;
+	int i, num_vec;
+	struct pci_dev *pdev = adapter->pdev;
+
+	if (!ocrdma_drv)
+		return;
+	if (pdev->device == OC_DEVICE_ID5) {
+		/* only msix is supported on these devices */
+		if (!msix_enabled(adapter))
+			return;
+		/* DPP region address and length */
+		dev_info.dpp_unmapped_addr = pci_resource_start(pdev, 2);
+		dev_info.dpp_unmapped_len = pci_resource_len(pdev, 2);
+	} else {
+		dev_info.dpp_unmapped_addr = 0;
+		dev_info.dpp_unmapped_len = 0;
+	}
+	dev_info.pdev = adapter->pdev;
+	if (adapter->sli_family == SKYHAWK_SLI_FAMILY)
+		dev_info.db = adapter->db;
+	else
+		dev_info.db = adapter->roce_db.base;
+	dev_info.unmapped_db = adapter->roce_db.io_addr;
+	dev_info.db_page_size = adapter->roce_db.size;
+	dev_info.db_total_size = adapter->roce_db.total_size;
+	dev_info.netdev = adapter->netdev;
+	memcpy(dev_info.mac_addr, adapter->netdev->dev_addr, ETH_ALEN);
+	dev_info.dev_family = adapter->sli_family;
+	if (msix_enabled(adapter)) {
+		/* provide all the vectors, so that EQ creation response
+		 * can decide which one to use.
+		 */
+		num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
+		dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
+		dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS);
+		/* provide start index of the vector,
+		 * so in case of linear usage,
+		 * it can use the base as starting point.
+		 */
+		dev_info.msix.start_vector = adapter->num_evt_qs;
+		for (i = 0; i < dev_info.msix.num_vectors; i++) {
+			dev_info.msix.vector_list[i] =
+			    adapter->msix_entries[i].vector;
+		}
+	} else {
+		dev_info.msix.num_vectors = 0;
+		dev_info.intr_mode = BE_INTERRUPT_MODE_INTX;
+	}
+	adapter->ocrdma_dev = ocrdma_drv->add(&dev_info);
+}
+
+void be_roce_dev_add(struct be_adapter *adapter)
+{
+	if (be_roce_supported(adapter)) {
+		INIT_LIST_HEAD(&adapter->entry);
+		mutex_lock(&be_adapter_list_lock);
+		list_add_tail(&adapter->entry, &be_adapter_list);
+
+		/* invoke add() routine of roce driver only if
+		 * valid driver registered with add method and add() is not yet
+		 * invoked on a given adapter.
+		 */
+		_be_roce_dev_add(adapter);
+		mutex_unlock(&be_adapter_list_lock);
+	}
+}
+
+void _be_roce_dev_remove(struct be_adapter *adapter)
+{
+	if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
+		ocrdma_drv->remove(adapter->ocrdma_dev);
+	adapter->ocrdma_dev = NULL;
+}
+
+void be_roce_dev_remove(struct be_adapter *adapter)
+{
+	if (be_roce_supported(adapter)) {
+		mutex_lock(&be_adapter_list_lock);
+		_be_roce_dev_remove(adapter);
+		list_del(&adapter->entry);
+		mutex_unlock(&be_adapter_list_lock);
+	}
+}
+
+void _be_roce_dev_open(struct be_adapter *adapter)
+{
+	if (ocrdma_drv && adapter->ocrdma_dev &&
+	    ocrdma_drv->state_change_handler)
+		ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 0);
+}
+
+void be_roce_dev_open(struct be_adapter *adapter)
+{
+	if (be_roce_supported(adapter)) {
+		mutex_lock(&be_adapter_list_lock);
+		_be_roce_dev_open(adapter);
+		mutex_unlock(&be_adapter_list_lock);
+	}
+}
+
+void _be_roce_dev_close(struct be_adapter *adapter)
+{
+	if (ocrdma_drv && adapter->ocrdma_dev &&
+	    ocrdma_drv->state_change_handler)
+		ocrdma_drv->state_change_handler(adapter->ocrdma_dev, 1);
+}
+
+void be_roce_dev_close(struct be_adapter *adapter)
+{
+	if (be_roce_supported(adapter)) {
+		mutex_lock(&be_adapter_list_lock);
+		_be_roce_dev_close(adapter);
+		mutex_unlock(&be_adapter_list_lock);
+	}
+}
+
+int be_roce_register_driver(struct ocrdma_driver *drv)
+{
+	struct be_adapter *dev;
+
+	mutex_lock(&be_adapter_list_lock);
+	if (ocrdma_drv) {
+		mutex_unlock(&be_adapter_list_lock);
+		return -EINVAL;
+	}
+	ocrdma_drv = drv;
+	list_for_each_entry(dev, &be_adapter_list, entry) {
+		struct net_device *netdev;
+		_be_roce_dev_add(dev);
+		netdev = dev->netdev;
+		if (netif_running(netdev) && netif_oper_up(netdev))
+			_be_roce_dev_open(dev);
+	}
+	mutex_unlock(&be_adapter_list_lock);
+	return 0;
+}
+EXPORT_SYMBOL(be_roce_register_driver);
+
+void be_roce_unregister_driver(struct ocrdma_driver *drv)
+{
+	struct be_adapter *dev;
+
+	mutex_lock(&be_adapter_list_lock);
+	list_for_each_entry(dev, &be_adapter_list, entry) {
+		if (dev->ocrdma_dev)
+			_be_roce_dev_remove(dev);
+	}
+	ocrdma_drv = NULL;
+	mutex_unlock(&be_adapter_list_lock);
+}
+EXPORT_SYMBOL(be_roce_unregister_driver);
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
new file mode 100644
index 0000000..db4ea80
--- /dev/null
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2005 - 2011 Emulex
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation. The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef BE_ROCE_H
+#define BE_ROCE_H
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+struct ocrdma_dev;
+
+enum be_interrupt_mode {
+	BE_INTERRUPT_MODE_MSIX	= 0,
+	BE_INTERRUPT_MODE_INTX	= 1,
+	BE_INTERRUPT_MODE_MSI	= 2,
+};
+
+#define MAX_ROCE_MSIX_VECTORS   16
+struct be_dev_info {
+	u8 __iomem *db;
+	u64 unmapped_db;
+	u32 db_page_size;
+	u32 db_total_size;
+	u64 dpp_unmapped_addr;
+	u32 dpp_unmapped_len;
+	struct pci_dev *pdev;
+	struct net_device *netdev;
+	u8 mac_addr[ETH_ALEN];
+	u32 dev_family;
+	enum be_interrupt_mode intr_mode;
+	struct {
+		int num_vectors;
+		int start_vector;
+		u32 vector_list[MAX_ROCE_MSIX_VECTORS];
+	} msix;
+};
+
+/* ocrdma driver register's the callback functions with nic driver. */
+struct ocrdma_driver {
+	unsigned char name[32];
+	struct ocrdma_dev *(*add) (struct be_dev_info *dev_info);
+	void (*remove) (struct ocrdma_dev *);
+	void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
+};
+
+enum {
+	BE_DEV_UP	= 0,
+	BE_DEV_DOWN	= 1
+};
+
+/* APIs for RoCE driver to register callback handlers,
+ * which will be invoked when device is added, removed, ifup, ifdown
+ */
+int be_roce_register_driver(struct ocrdma_driver *drv);
+void be_roce_unregister_driver(struct ocrdma_driver *drv);
+
+/* API for RoCE driver to issue mailbox commands */
+int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
+		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status);
+
+#endif /* BE_ROCE_H */
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 1637b98..9d71c9c 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -545,9 +545,6 @@
 	/* Reset the chip to erase previous misconfiguration. */
 	iowrite32(0x00000001, ioaddr + BCR);
 
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
-
 	/* Make certain the descriptor lists are aligned. */
 	np = netdev_priv(dev);
 	np->mem = ioaddr;
@@ -832,11 +829,13 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem *ioaddr = np->mem;
-	int i;
+	const int irq = np->pci_dev->irq;
+	int rc, i;
 
 	iowrite32(0x00000001, ioaddr + BCR);	/* Reset */
 
-	if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev))
+	rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	if (rc)
 		return -EAGAIN;
 
 	for (i = 0; i < 3; i++)
@@ -924,8 +923,7 @@
 	np->reset_timer.data = (unsigned long) dev;
 	np->reset_timer.function = reset_timer;
 	np->reset_timer_armed = 0;
-
-	return 0;
+	return rc;
 }
 
 
@@ -1910,7 +1908,7 @@
 	del_timer_sync(&np->timer);
 	del_timer_sync(&np->reset_timer);
 
-	free_irq(dev->irq, dev);
+	free_irq(np->pci_dev->irq, dev);
 
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index a12b3f5..7fa0227 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1161,6 +1161,7 @@
 	.set_settings		= fec_enet_set_settings,
 	.get_drvinfo		= fec_enet_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7b34d8c..97f947b 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -811,6 +811,7 @@
 	.get_link = ethtool_op_get_link,
 	.get_msglevel = mpc52xx_fec_get_msglevel,
 	.set_msglevel = mpc52xx_fec_set_msglevel,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index e4e6cd2..2b7633f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -963,6 +963,7 @@
 	.get_msglevel = fs_get_msglevel,
 	.set_msglevel = fs_set_msglevel,
 	.get_regs = fs_get_regs,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bed53..1adb024 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -136,7 +136,7 @@
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-			      int amount_pull);
+			      int amount_pull, struct napi_struct *napi);
 void gfar_halt(struct net_device *dev);
 static void gfar_halt_nodisable(struct net_device *dev);
 void gfar_start(struct net_device *dev);
@@ -2675,12 +2675,12 @@
 /* gfar_process_frame() -- handle one incoming packet if skb
  * isn't NULL.  */
 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-			      int amount_pull)
+			      int amount_pull, struct napi_struct *napi)
 {
 	struct gfar_private *priv = netdev_priv(dev);
 	struct rxfcb *fcb = NULL;
 
-	int ret;
+	gro_result_t ret;
 
 	/* fcb is at the beginning if exists */
 	fcb = (struct rxfcb *)skb->data;
@@ -2719,9 +2719,9 @@
 		__vlan_hwaccel_put_tag(skb, fcb->vlctl);
 
 	/* Send the packet up the stack */
-	ret = netif_receive_skb(skb);
+	ret = napi_gro_receive(napi, skb);
 
-	if (NET_RX_DROP == ret)
+	if (GRO_DROP == ret)
 		priv->extra_stats.kernel_dropped++;
 
 	return 0;
@@ -2783,7 +2783,8 @@
 				skb_put(skb, pkt_len);
 				rx_queue->stats.rx_bytes += pkt_len;
 				skb_record_rx_queue(skb, rx_queue->qindex);
-				gfar_process_frame(dev, skb, amount_pull);
+				gfar_process_frame(dev, skb, amount_pull,
+						&rx_queue->grp->napi);
 
 			} else {
 				netif_warn(priv, rx_err, dev, "Missing skb!\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4c9f8d4..2136c7f 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1210,4 +1210,7 @@
 	struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
 };
 
+/* The gianfar_ptp module will set this variable */
+extern int gfar_phc_index;
+
 #endif /* __GIANFAR_H */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8d74efd..8a02557 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/mm.h>
@@ -1739,6 +1740,34 @@
 	return ret;
 }
 
+int gfar_phc_index = -1;
+
+static int gfar_get_ts_info(struct net_device *dev,
+			    struct ethtool_ts_info *info)
+{
+	struct gfar_private *priv = netdev_priv(dev);
+
+	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
+		info->so_timestamping =
+			SOF_TIMESTAMPING_RX_SOFTWARE |
+			SOF_TIMESTAMPING_SOFTWARE;
+		info->phc_index = -1;
+		return 0;
+	}
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = gfar_phc_index;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_ALL);
+	return 0;
+}
+
 const struct ethtool_ops gfar_ethtool_ops = {
 	.get_settings = gfar_gsettings,
 	.set_settings = gfar_ssettings,
@@ -1761,4 +1790,5 @@
 #endif
 	.set_rxnfc = gfar_set_nfc,
 	.get_rxnfc = gfar_get_nfc,
+	.get_ts_info = gfar_get_ts_info,
 };
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 5fd620b..c08e5d4 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -515,6 +515,7 @@
 		err = PTR_ERR(etsects->clock);
 		goto no_clock;
 	}
+	gfar_phc_clock = ptp_clock_index(etsects->clock);
 
 	dev_set_drvdata(&dev->dev, etsects);
 
@@ -538,6 +539,7 @@
 	gfar_write(&etsects->regs->tmr_temask, 0);
 	gfar_write(&etsects->regs->tmr_ctrl,   0);
 
+	gfar_phc_clock = -1;
 	ptp_clock_unregister(etsects->clock);
 	iounmap(etsects->regs);
 	release_resource(etsects->rsrc);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 17a46e7..9ac14f8 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -116,10 +116,10 @@
 	.maxGroupAddrInHash = 4,
 	.maxIndAddrInHash = 4,
 	.prel = 7,
-	.maxFrameLength = 1518,
+	.maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
 	.minFrameLength = 64,
-	.maxD1Length = 1520,
-	.maxD2Length = 1520,
+	.maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
+	.maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
 	.vlantype = 0x8100,
 	.ecamptr = ((uint32_t) NULL),
 	.eventRegMask = UCCE_OTHER,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 2e395a2..f71b3e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -877,7 +877,7 @@
 
 /* Driver definitions */
 #define TX_BD_RING_LEN                          0x10
-#define RX_BD_RING_LEN                          0x10
+#define RX_BD_RING_LEN                          0x20
 
 #define TX_RING_MOD_MASK(size)                  (size-1)
 #define RX_RING_MOD_MASK(size)                  (size-1)
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index a97257f..37b0353 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -415,6 +415,7 @@
 	.get_ethtool_stats      = uec_get_ethtool_stats,
 	.get_wol		= uec_get_wol,
 	.set_wol		= uec_set_wol,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c
index 3d94797..4b80dc4 100644
--- a/drivers/net/ethernet/fujitsu/at1700.c
+++ b/drivers/net/ethernet/fujitsu/at1700.c
@@ -27,7 +27,7 @@
 	ATI provided their EEPROM configuration code header file.
     Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
 
-    MCA bus (AT1720) support by Rene Schmit <rene@bss.lu>
+    MCA bus (AT1720) support (now deleted) by Rene Schmit <rene@bss.lu>
 
   Bugs:
 	The MB86965 has a design flaw that makes all probes unreliable.  Not
@@ -38,7 +38,6 @@
 #include <linux/errno.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/mca-legacy.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
@@ -79,24 +78,6 @@
 	0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
 };
 
-/*
- *	MCA
- */
-#ifdef CONFIG_MCA_LEGACY
-static int at1700_ioaddr_pattern[] __initdata = {
-	0x00, 0x04, 0x01, 0x05, 0x02, 0x06, 0x03, 0x07
-};
-
-static int at1700_mca_probe_list[] __initdata = {
-	0x400, 0x1400, 0x2400, 0x3400, 0x4400, 0x5400, 0x6400, 0x7400, 0
-};
-
-static int at1700_irq_pattern[] __initdata = {
-	0x00, 0x00, 0x00, 0x30, 0x70, 0xb0, 0x00, 0x00,
-	0x00, 0xf0, 0x34, 0x74, 0xb4, 0x00, 0x00, 0xf4, 0x00
-};
-#endif
-
 /* use 0 for production, 1 for verification, >2 for debug */
 #ifndef NET_DEBUG
 #define NET_DEBUG 1
@@ -114,7 +95,6 @@
 	uint tx_queue_ready:1;			/* Tx queue is ready to be sent. */
 	uint rx_started:1;			/* Packets are Rxing. */
 	uchar tx_queue;				/* Number of packet on the Tx queue. */
-	char mca_slot;				/* -1 means ISA */
 	ushort tx_queue_len;			/* Current length of the Tx queue. */
 };
 
@@ -166,21 +146,6 @@
 static void net_tx_timeout (struct net_device *dev);
 
 
-#ifdef CONFIG_MCA_LEGACY
-struct at1720_mca_adapters_struct {
-	char* name;
-	int id;
-};
-/* rEnE : maybe there are others I don't know off... */
-
-static struct at1720_mca_adapters_struct at1720_mca_adapters[] __initdata = {
-	{ "Allied Telesys AT1720AT",	0x6410 },
-	{ "Allied Telesys AT1720BT", 	0x6413 },
-	{ "Allied Telesys AT1720T",	0x6416 },
-	{ NULL, 0 },
-};
-#endif
-
 /* Check for a network adaptor of this type, and return '0' iff one exists.
    If dev->base_addr == 0, probe all likely locations.
    If dev->base_addr == 1, always return failure.
@@ -194,11 +159,6 @@
 
 static void cleanup_card(struct net_device *dev)
 {
-#ifdef CONFIG_MCA_LEGACY
-	struct net_local *lp = netdev_priv(dev);
-	if (lp->mca_slot >= 0)
-		mca_mark_as_unused(lp->mca_slot);
-#endif
 	free_irq(dev->irq, NULL);
 	release_region(dev->base_addr, AT1700_IO_EXTENT);
 }
@@ -273,7 +233,7 @@
 	static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
 	static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
 	unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
-	int slot, ret = -ENODEV;
+	int ret = -ENODEV;
 	struct net_local *lp = netdev_priv(dev);
 
 	if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME))
@@ -288,64 +248,6 @@
 		   ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
 		   read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
 #endif
-
-#ifdef CONFIG_MCA_LEGACY
-	/* rEnE (rene@bss.lu): got this from 3c509 driver source , adapted for AT1720 */
-
-    /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, heavily
-	modified by Chris Beauregard (cpbeaure@csclub.uwaterloo.ca)
-	to support standard MCA probing. */
-
-	/* redone for multi-card detection by ZP Gu (zpg@castle.net) */
-	/* now works as a module */
-
-	if (MCA_bus) {
-		int j;
-		int l_i;
-		u_char pos3, pos4;
-
-		for (j = 0; at1720_mca_adapters[j].name != NULL; j ++) {
-			slot = 0;
-			while (slot != MCA_NOTFOUND) {
-
-				slot = mca_find_unused_adapter( at1720_mca_adapters[j].id, slot );
-				if (slot == MCA_NOTFOUND) break;
-
-				/* if we get this far, an adapter has been detected and is
-				enabled */
-
-				pos3 = mca_read_stored_pos( slot, 3 );
-				pos4 = mca_read_stored_pos( slot, 4 );
-
-				for (l_i = 0; l_i < 8; l_i++)
-					if (( pos3 & 0x07) == at1700_ioaddr_pattern[l_i])
-						break;
-				ioaddr = at1700_mca_probe_list[l_i];
-
-				for (irq = 0; irq < 0x10; irq++)
-					if (((((pos4>>4) & 0x0f) | (pos3 & 0xf0)) & 0xff) == at1700_irq_pattern[irq])
-						break;
-
-					/* probing for a card at a particular IO/IRQ */
-				if ((dev->irq && dev->irq != irq) ||
-				    (dev->base_addr && dev->base_addr != ioaddr)) {
-				  	slot++;		/* probing next slot */
-				  	continue;
-				}
-
-				dev->irq = irq;
-
-				/* claim the slot */
-				mca_set_adapter_name( slot, at1720_mca_adapters[j].name );
-				mca_mark_as_used(slot);
-
-				goto found;
-			}
-		}
-		/* if we get here, we didn't find an MCA adapter - try ISA */
-	}
-#endif
-	slot = -1;
 	/* We must check for the EEPROM-config boards first, else accessing
 	   IOCONFIG0 will move the board! */
 	if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr &&
@@ -360,11 +262,7 @@
 		goto err_out;
 	}
 
-#ifdef CONFIG_MCA_LEGACY
-found:
-#endif
-
-		/* Reset the internal state machines. */
+	/* Reset the internal state machines. */
 	outb(0, ioaddr + RESET);
 
 	if (is_at1700) {
@@ -380,11 +278,11 @@
 					break;
 			}
 			if (i == 8) {
-				goto err_mca;
+				goto err_out;
 			}
 		} else {
 			if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr)
-				goto err_mca;
+				goto err_out;
 			irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
 		}
 	}
@@ -464,23 +362,17 @@
 	spin_lock_init(&lp->lock);
 
 	lp->jumpered = is_fmv18x;
-	lp->mca_slot = slot;
 	/* Snarf the interrupt vector now. */
 	ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev);
 	if (ret) {
 		printk(KERN_ERR "AT1700 at %#3x is unusable due to a "
 		       "conflict on IRQ %d.\n",
 		       ioaddr, irq);
-		goto err_mca;
+		goto err_out;
 	}
 
 	return 0;
 
-err_mca:
-#ifdef CONFIG_MCA_LEGACY
-	if (slot >= 0)
-		mca_mark_as_unused(slot);
-#endif
 err_out:
 	release_region(ioaddr, AT1700_IO_EXTENT);
 	return ret;
diff --git a/drivers/net/ethernet/i825xx/3c523.c b/drivers/net/ethernet/i825xx/3c523.c
deleted file mode 100644
index 8451ecd..0000000
--- a/drivers/net/ethernet/i825xx/3c523.c
+++ /dev/null
@@ -1,1312 +0,0 @@
-/*
-   net-3-driver for the 3c523 Etherlink/MC card (i82586 Ethernet chip)
-
-
-   This is an extension to the Linux operating system, and is covered by the
-   same GNU General Public License that covers that work.
-
-   Copyright 1995, 1996 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
-
-   This is basically Michael Hipp's ni52 driver, with a new probing
-   algorithm and some minor changes to the 82586 CA and reset routines.
-   Thanks a lot Michael for a really clean i82586 implementation!  Unless
-   otherwise documented in ni52.c, any bugs are mine.
-
-   Contrary to the Ethernet-HOWTO, this isn't based on the 3c507 driver in
-   any way.  The ni52 is a lot easier to modify.
-
-   sources:
-   ni52.c
-
-   Crynwr packet driver collection was a great reference for my first
-   attempt at this sucker.  The 3c507 driver also helped, until I noticed
-   that ni52.c was a lot nicer.
-
-   EtherLink/MC: Micro Channel Ethernet Adapter Technical Reference
-   Manual, courtesy of 3Com CardFacts, documents the 3c523-specific
-   stuff.  Information on CardFacts is found in the Ethernet HOWTO.
-   Also see <a href="http://www.3com.com/">
-
-   Microprocessor Communications Support Chips, T.J. Byers, ISBN
-   0-444-01224-9, has a section on the i82586.  It tells you just enough
-   to know that you really don't want to learn how to program the chip.
-
-   The original device probe code was stolen from ps2esdi.c
-
-   Known Problems:
-   Since most of the code was stolen from ni52.c, you'll run across the
-   same bugs in the 0.62 version of ni52.c, plus maybe a few because of
-   the 3c523 idiosynchacies.  The 3c523 has 16K of RAM though, so there
-   shouldn't be the overrun problem that the 8K ni52 has.
-
-   This driver is for a 16K adapter.  It should work fine on the 64K
-   adapters, but it will only use one of the 4 banks of RAM.  Modifying
-   this for the 64K version would require a lot of heinous bank
-   switching, which I'm sure not interested in doing.  If you try to
-   implement a bank switching version, you'll basically have to remember
-   what bank is enabled and do a switch every time you access a memory
-   location that's not current.  You'll also have to remap pointers on
-   the driver side, because it only knows about 16K of the memory.
-   Anyone desperate or masochistic enough to try?
-
-   It seems to be stable now when multiple transmit buffers are used.  I
-   can't see any performance difference, but then I'm working on a 386SX.
-
-   Multicast doesn't work.  It doesn't even pretend to work.  Don't use
-   it.  Don't compile your kernel with multicast support.  I don't know
-   why.
-
-   Features:
-   This driver is useable as a loadable module.  If you try to specify an
-   IRQ or a IO address (via insmod 3c523.o irq=xx io=0xyyy), it will
-   search the MCA slots until it finds a 3c523 with the specified
-   parameters.
-
-   This driver does support multiple ethernet cards when used as a module
-   (up to MAX_3C523_CARDS, the default being 4)
-
-   This has been tested with both BNC and TP versions, internal and
-   external transceivers.  Haven't tested with the 64K version (that I
-   know of).
-
-   History:
-   Jan 1st, 1996
-   first public release
-   Feb 4th, 1996
-   update to 1.3.59, incorporated multicast diffs from ni52.c
-   Feb 15th, 1996
-   added shared irq support
-   Apr 1999
-   added support for multiple cards when used as a module
-   added option to disable multicast as is causes problems
-       Ganesh Sittampalam <ganesh.sittampalam@magdalen.oxford.ac.uk>
-       Stuart Adamson <stuart.adamson@compsoc.net>
-   Nov 2001
-   added support for ethtool (jgarzik)
-
-   $Header: /fsys2/home/chrisb/linux-1.3.59-MCA/drivers/net/RCS/3c523.c,v 1.1 1996/02/05 01:53:46 chrisb Exp chrisb $
- */
-
-#define DRV_NAME		"3c523"
-#define DRV_VERSION		"17-Nov-2001"
-
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/mca-legacy.h>
-#include <linux/ethtool.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-
-#include "3c523.h"
-
-/*************************************************************************/
-#define DEBUG			/* debug on */
-#define SYSBUSVAL 0		/* 1 = 8 Bit, 0 = 16 bit - 3c523 only does 16 bit */
-#undef ELMC_MULTICAST		/* Disable multicast support as it is somewhat seriously broken at the moment */
-
-#define make32(ptr16) (p->memtop + (short) (ptr16) )
-#define make24(ptr32) ((char *) (ptr32) - p->base)
-#define make16(ptr32) ((unsigned short) ((unsigned long) (ptr32) - (unsigned long) p->memtop ))
-
-/*************************************************************************/
-/*
-   Tables to which we can map values in the configuration registers.
- */
-static int irq_table[] __initdata = {
-	12, 7, 3, 9
-};
-
-static int csr_table[] __initdata = {
-	0x300, 0x1300, 0x2300, 0x3300
-};
-
-static int shm_table[] __initdata = {
-	0x0c0000, 0x0c8000, 0x0d0000, 0x0d8000
-};
-
-/******************* how to calculate the buffers *****************************
-
-
-  * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
-  * --------------- in a different (more stable?) mode. Only in this mode it's
-  *                 possible to configure the driver with 'NO_NOPCOMMANDS'
-
-sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
-sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
-sizeof(rfd) = 24; sizeof(rbd) = 12;
-sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
-sizeof(nop_cmd) = 8;
-
-  * if you don't know the driver, better do not change this values: */
-
-#define RECV_BUFF_SIZE 1524	/* slightly oversized */
-#define XMIT_BUFF_SIZE 1524	/* slightly oversized */
-#define NUM_XMIT_BUFFS 1	/* config for both, 8K and 16K shmem */
-#define NUM_RECV_BUFFS_8  4	/* config for 8K shared mem */
-#define NUM_RECV_BUFFS_16 9	/* config for 16K shared mem */
-
-#if (NUM_XMIT_BUFFS == 1)
-#define NO_NOPCOMMANDS		/* only possible with NUM_XMIT_BUFFS=1 */
-#endif
-
-/**************************************************************************/
-
-#define DELAY(x) { mdelay(32 * x); }
-
-/* a much shorter delay: */
-#define DELAY_16(); { udelay(16) ; }
-
-/* wait for command with timeout: */
-#define WAIT_4_SCB_CMD() { int i; \
-  for(i=0;i<1024;i++) { \
-    if(!p->scb->cmd) break; \
-    DELAY_16(); \
-    if(i == 1023) { \
-      pr_warning("%s:%d: scb_cmd timed out .. resetting i82586\n",\
-      	dev->name,__LINE__); \
-      elmc_id_reset586(); } } }
-
-static irqreturn_t elmc_interrupt(int irq, void *dev_id);
-static int elmc_open(struct net_device *dev);
-static int elmc_close(struct net_device *dev);
-static netdev_tx_t elmc_send_packet(struct sk_buff *, struct net_device *);
-static struct net_device_stats *elmc_get_stats(struct net_device *dev);
-static void elmc_timeout(struct net_device *dev);
-#ifdef ELMC_MULTICAST
-static void set_multicast_list(struct net_device *dev);
-#endif
-static const struct ethtool_ops netdev_ethtool_ops;
-
-/* helper-functions */
-static int init586(struct net_device *dev);
-static int check586(struct net_device *dev, unsigned long where, unsigned size);
-static void alloc586(struct net_device *dev);
-static void startrecv586(struct net_device *dev);
-static void *alloc_rfa(struct net_device *dev, void *ptr);
-static void elmc_rcv_int(struct net_device *dev);
-static void elmc_xmt_int(struct net_device *dev);
-static void elmc_rnr_int(struct net_device *dev);
-
-struct priv {
-	unsigned long base;
-	char *memtop;
-	unsigned long mapped_start;		/* Start of ioremap */
-	volatile struct rfd_struct *rfd_last, *rfd_top, *rfd_first;
-	volatile struct scp_struct *scp;	/* volatile is important */
-	volatile struct iscp_struct *iscp;	/* volatile is important */
-	volatile struct scb_struct *scb;	/* volatile is important */
-	volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
-#if (NUM_XMIT_BUFFS == 1)
-	volatile struct transmit_cmd_struct *xmit_cmds[2];
-	volatile struct nop_cmd_struct *nop_cmds[2];
-#else
-	volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
-	volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
-#endif
-	volatile int nop_point, num_recv_buffs;
-	volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
-	volatile int xmit_count, xmit_last;
-	volatile int slot;
-};
-
-#define elmc_attn586()  {elmc_do_attn586(dev->base_addr,ELMC_CTRL_INTE);}
-#define elmc_reset586() {elmc_do_reset586(dev->base_addr,ELMC_CTRL_INTE);}
-
-/* with interrupts disabled - this will clear the interrupt bit in the
-   3c523 control register, and won't put it back.  This effectively
-   disables interrupts on the card. */
-#define elmc_id_attn586()  {elmc_do_attn586(dev->base_addr,0);}
-#define elmc_id_reset586() {elmc_do_reset586(dev->base_addr,0);}
-
-/*************************************************************************/
-/*
-   Do a Channel Attention on the 3c523.  This is extremely board dependent.
- */
-static void elmc_do_attn586(int ioaddr, int ints)
-{
-	/* the 3c523 requires a minimum of 500 ns.  The delays here might be
-	   a little too large, and hence they may cut the performance of the
-	   card slightly.  If someone who knows a little more about Linux
-	   timing would care to play with these, I'd appreciate it. */
-
-	/* this bit masking stuff is crap.  I'd rather have separate
-	   registers with strobe triggers for each of these functions.  <sigh>
-	   Ya take what ya got. */
-
-	outb(ELMC_CTRL_RST | 0x3 | ELMC_CTRL_CA | ints, ioaddr + ELMC_CTRL);
-	DELAY_16();		/* > 500 ns */
-	outb(ELMC_CTRL_RST | 0x3 | ints, ioaddr + ELMC_CTRL);
-}
-
-/*************************************************************************/
-/*
-   Reset the 82586 on the 3c523.  Also very board dependent.
- */
-static void elmc_do_reset586(int ioaddr, int ints)
-{
-	/* toggle the RST bit low then high */
-	outb(0x3 | ELMC_CTRL_LBK, ioaddr + ELMC_CTRL);
-	DELAY_16();		/* > 500 ns */
-	outb(ELMC_CTRL_RST | ELMC_CTRL_LBK | 0x3, ioaddr + ELMC_CTRL);
-
-	elmc_do_attn586(ioaddr, ints);
-}
-
-/**********************************************
- * close device
- */
-
-static int elmc_close(struct net_device *dev)
-{
-	netif_stop_queue(dev);
-	elmc_id_reset586();	/* the hard way to stop the receiver */
-	free_irq(dev->irq, dev);
-	return 0;
-}
-
-/**********************************************
- * open device
- */
-
-static int elmc_open(struct net_device *dev)
-{
-	int ret;
-
-	elmc_id_attn586();	/* disable interrupts */
-
-	ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED,
-			  dev->name, dev);
-	if (ret) {
-		pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
-		elmc_id_reset586();
-		return ret;
-	}
-	alloc586(dev);
-	init586(dev);
-	startrecv586(dev);
-	netif_start_queue(dev);
-	return 0;		/* most done by init */
-}
-
-/**********************************************
- * Check to see if there's an 82586 out there.
- */
-
-static int __init check586(struct net_device *dev, unsigned long where, unsigned size)
-{
-	struct priv *p = netdev_priv(dev);
-	char *iscp_addrs[2];
-	int i = 0;
-
-	p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000;
-	p->memtop = isa_bus_to_virt((unsigned long)where) + size;
-	p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
-	memset((char *) p->scp, 0, sizeof(struct scp_struct));
-	p->scp->sysbus = SYSBUSVAL;	/* 1 = 8Bit-Bus, 0 = 16 Bit */
-
-	iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
-	iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
-
-	for (i = 0; i < 2; i++) {
-		p->iscp = (struct iscp_struct *) iscp_addrs[i];
-		memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
-
-		p->scp->iscp = make24(p->iscp);
-		p->iscp->busy = 1;
-
-		elmc_id_reset586();
-
-		/* reset586 does an implicit CA */
-
-		/* apparently, you sometimes have to kick the 82586 twice... */
-		elmc_id_attn586();
-		DELAY(1);
-
-		if (p->iscp->busy) {	/* i82586 clears 'busy' after successful init */
-			return 0;
-		}
-	}
-	return 1;
-}
-
-/******************************************************************
- * set iscp at the right place, called by elmc_probe and open586.
- */
-
-static void alloc586(struct net_device *dev)
-{
-	struct priv *p = netdev_priv(dev);
-
-	elmc_id_reset586();
-	DELAY(2);
-
-	p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
-	p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
-	p->iscp = (struct iscp_struct *) ((char *) p->scp - sizeof(struct iscp_struct));
-
-	memset((char *) p->iscp, 0, sizeof(struct iscp_struct));
-	memset((char *) p->scp, 0, sizeof(struct scp_struct));
-
-	p->scp->iscp = make24(p->iscp);
-	p->scp->sysbus = SYSBUSVAL;
-	p->iscp->scb_offset = make16(p->scb);
-
-	p->iscp->busy = 1;
-	elmc_id_reset586();
-	elmc_id_attn586();
-
-	DELAY(2);
-
-	if (p->iscp->busy)
-		pr_err("%s: Init-Problems (alloc).\n", dev->name);
-
-	memset((char *) p->scb, 0, sizeof(struct scb_struct));
-}
-
-/*****************************************************************/
-
-static int elmc_getinfo(char *buf, int slot, void *d)
-{
-	int len = 0;
-	struct net_device *dev = d;
-
-	if (dev == NULL)
-		return len;
-
-	len += sprintf(buf + len, "Revision: 0x%x\n",
-		       inb(dev->base_addr + ELMC_REVISION) & 0xf);
-	len += sprintf(buf + len, "IRQ: %d\n", dev->irq);
-	len += sprintf(buf + len, "IO Address: %#lx-%#lx\n", dev->base_addr,
-		       dev->base_addr + ELMC_IO_EXTENT);
-	len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
-		       dev->mem_end - 1);
-	len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ?
-		       "External" : "Internal");
-	len += sprintf(buf + len, "Device: %s\n", dev->name);
-	len += sprintf(buf + len, "Hardware Address: %pM\n",
-		       dev->dev_addr);
-
-	return len;
-}				/* elmc_getinfo() */
-
-static const struct net_device_ops netdev_ops = {
-	.ndo_open 		= elmc_open,
-	.ndo_stop		= elmc_close,
-	.ndo_get_stats		= elmc_get_stats,
-	.ndo_start_xmit		= elmc_send_packet,
-	.ndo_tx_timeout		= elmc_timeout,
-#ifdef ELMC_MULTICAST
-	.ndo_set_rx_mode	= set_multicast_list,
-#endif
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_set_mac_address 	= eth_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-/*****************************************************************/
-
-static int __init do_elmc_probe(struct net_device *dev)
-{
-	static int slot;
-	int base_addr = dev->base_addr;
-	int irq = dev->irq;
-	u_char status = 0;
-	u_char revision = 0;
-	int i = 0;
-	unsigned int size = 0;
-	int retval;
-	struct priv *pr = netdev_priv(dev);
-
-	if (MCA_bus == 0) {
-		return -ENODEV;
-	}
-	/* search through the slots for the 3c523. */
-	slot = mca_find_adapter(ELMC_MCA_ID, 0);
-	while (slot != -1) {
-		status = mca_read_stored_pos(slot, 2);
-
-		dev->irq=irq_table[(status & ELMC_STATUS_IRQ_SELECT) >> 6];
-		dev->base_addr=csr_table[(status & ELMC_STATUS_CSR_SELECT) >> 1];
-
-		/*
-		   If we're trying to match a specified irq or IO address,
-		   we'll reject a match unless it's what we're looking for.
-		   Also reject it if the card is already in use.
-		 */
-
-		if ((irq && irq != dev->irq) ||
-		    (base_addr && base_addr != dev->base_addr)) {
-			slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
-			continue;
-		}
-		if (!request_region(dev->base_addr, ELMC_IO_EXTENT, DRV_NAME)) {
-			slot = mca_find_adapter(ELMC_MCA_ID, slot + 1);
-			continue;
-		}
-
-		/* found what we're looking for... */
-		break;
-	}
-
-	/* we didn't find any 3c523 in the slots we checked for */
-	if (slot == MCA_NOTFOUND)
-		return (base_addr || irq) ? -ENXIO : -ENODEV;
-
-	mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC");
-	mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
-
-	/* if we get this far, adapter has been found - carry on */
-	pr_info("%s: 3c523 adapter found in slot %d\n", dev->name, slot + 1);
-
-	/* Now we extract configuration info from the card.
-	   The 3c523 provides information in two of the POS registers, but
-	   the second one is only needed if we want to tell the card what IRQ
-	   to use.  I suspect that whoever sets the thing up initially would
-	   prefer we don't screw with those things.
-
-	   Note that we read the status info when we found the card...
-
-	   See 3c523.h for more details.
-	 */
-
-	/* revision is stored in the first 4 bits of the revision register */
-	revision = inb(dev->base_addr + ELMC_REVISION) & 0xf;
-
-	/* according to docs, we read the interrupt and write it back to
-	   the IRQ select register, since the POST might not configure the IRQ
-	   properly. */
-	switch (dev->irq) {
-	case 3:
-		mca_write_pos(slot, 3, 0x04);
-		break;
-	case 7:
-		mca_write_pos(slot, 3, 0x02);
-		break;
-	case 9:
-		mca_write_pos(slot, 3, 0x08);
-		break;
-	case 12:
-		mca_write_pos(slot, 3, 0x01);
-		break;
-	}
-
-	pr->slot = slot;
-
-	pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
-	       dev->base_addr);
-
-	/* Determine if we're using the on-board transceiver (i.e. coax) or
-	   an external one.  The information is pretty much useless, but I
-	   guess it's worth brownie points. */
-	dev->if_port = (status & ELMC_STATUS_DISABLE_THIN);
-
-	/* The 3c523 has a 24K chunk of memory.  The first 16K is the
-	   shared memory, while the last 8K is for the EtherStart BIOS ROM.
-	   Which we don't care much about here.  We'll just tell Linux that
-	   we're using 16K.  MCA won't permit address space conflicts caused
-	   by not mapping the other 8K. */
-	dev->mem_start = shm_table[(status & ELMC_STATUS_MEMORY_SELECT) >> 3];
-
-	/* We're using MCA, so it's a given that the information about memory
-	   size is correct.  The Crynwr drivers do something like this. */
-
-	elmc_id_reset586();	/* seems like a good idea before checking it... */
-
-	size = 0x4000;		/* check for 16K mem */
-	if (!check586(dev, dev->mem_start, size)) {
-		pr_err("%s: memprobe, Can't find memory at 0x%lx!\n", dev->name,
-		       dev->mem_start);
-		retval = -ENODEV;
-		goto err_out;
-	}
-	dev->mem_end = dev->mem_start + size;	/* set mem_end showed by 'ifconfig' */
-
-	pr->memtop = isa_bus_to_virt(dev->mem_start) + size;
-	pr->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
-	alloc586(dev);
-
-	elmc_id_reset586();	/* make sure it doesn't generate spurious ints */
-
-	/* set number of receive-buffs according to memsize */
-	pr->num_recv_buffs = NUM_RECV_BUFFS_16;
-
-	/* dump all the assorted information */
-	pr_info("%s: IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->name,
-	       dev->irq, dev->if_port ? "ex" : "in",
-	       dev->mem_start, dev->mem_end - 1);
-
-	/* The hardware address for the 3c523 is stored in the first six
-	   bytes of the IO address. */
-	for (i = 0; i < 6; i++)
-		dev->dev_addr[i] = inb(dev->base_addr + i);
-
-	pr_info("%s: hardware address %pM\n",
-	       dev->name, dev->dev_addr);
-
-	dev->netdev_ops = &netdev_ops;
-	dev->watchdog_timeo = HZ;
-	dev->ethtool_ops = &netdev_ethtool_ops;
-
-	/* note that we haven't actually requested the IRQ from the kernel.
-	   That gets done in elmc_open().  I'm not sure that's such a good idea,
-	   but it works, so I'll go with it. */
-
-#ifndef ELMC_MULTICAST
-        dev->flags&=~IFF_MULTICAST;     /* Multicast doesn't work */
-#endif
-
-	retval = register_netdev(dev);
-	if (retval)
-		goto err_out;
-
-	return 0;
-err_out:
-	mca_set_adapter_procfn(slot, NULL, NULL);
-	release_region(dev->base_addr, ELMC_IO_EXTENT);
-	return retval;
-}
-
-#ifdef MODULE
-static void cleanup_card(struct net_device *dev)
-{
-	mca_set_adapter_procfn(((struct priv *)netdev_priv(dev))->slot,
-				NULL, NULL);
-	release_region(dev->base_addr, ELMC_IO_EXTENT);
-}
-#else
-struct net_device * __init elmc_probe(int unit)
-{
-	struct net_device *dev = alloc_etherdev(sizeof(struct priv));
-	int err;
-
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	sprintf(dev->name, "eth%d", unit);
-	netdev_boot_setup_check(dev);
-
-	err = do_elmc_probe(dev);
-	if (err)
-		goto out;
-	return dev;
-out:
-	free_netdev(dev);
-	return ERR_PTR(err);
-}
-#endif
-
-/**********************************************
- * init the chip (elmc-interrupt should be disabled?!)
- * needs a correct 'allocated' memory
- */
-
-static int init586(struct net_device *dev)
-{
-	void *ptr;
-	unsigned long s;
-	int i, result = 0;
-	struct priv *p = netdev_priv(dev);
-	volatile struct configure_cmd_struct *cfg_cmd;
-	volatile struct iasetup_cmd_struct *ias_cmd;
-	volatile struct tdr_cmd_struct *tdr_cmd;
-	volatile struct mcsetup_cmd_struct *mc_cmd;
-	struct netdev_hw_addr *ha;
-	int num_addrs = netdev_mc_count(dev);
-
-	ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
-
-	cfg_cmd = (struct configure_cmd_struct *) ptr;	/* configure-command */
-	cfg_cmd->cmd_status = 0;
-	cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST;
-	cfg_cmd->cmd_link = 0xffff;
-
-	cfg_cmd->byte_cnt = 0x0a;	/* number of cfg bytes */
-	cfg_cmd->fifo = 0x08;	/* fifo-limit (8=tx:32/rx:64) */
-	cfg_cmd->sav_bf = 0x40;	/* hold or discard bad recv frames (bit 7) */
-	cfg_cmd->adr_len = 0x2e;	/* addr_len |!src_insert |pre-len |loopback */
-	cfg_cmd->priority = 0x00;
-	cfg_cmd->ifs = 0x60;
-	cfg_cmd->time_low = 0x00;
-	cfg_cmd->time_high = 0xf2;
-	cfg_cmd->promisc = 0;
-	if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC))
-		cfg_cmd->promisc = 1;
-	cfg_cmd->carr_coll = 0x00;
-
-	p->scb->cbl_offset = make16(cfg_cmd);
-
-	p->scb->cmd = CUC_START;	/* cmd.-unit start */
-	elmc_id_attn586();
-
-	s = jiffies;		/* warning: only active with interrupts on !! */
-	while (!(cfg_cmd->cmd_status & STAT_COMPL)) {
-		if (time_after(jiffies, s + 30*HZ/100))
-			break;
-	}
-
-	if ((cfg_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_COMPL | STAT_OK)) {
-		pr_warning("%s (elmc): configure command failed: %x\n", dev->name, cfg_cmd->cmd_status);
-		return 1;
-	}
-	/*
-	 * individual address setup
-	 */
-	ias_cmd = (struct iasetup_cmd_struct *) ptr;
-
-	ias_cmd->cmd_status = 0;
-	ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST;
-	ias_cmd->cmd_link = 0xffff;
-
-	memcpy((char *) &ias_cmd->iaddr, (char *) dev->dev_addr, ETH_ALEN);
-
-	p->scb->cbl_offset = make16(ias_cmd);
-
-	p->scb->cmd = CUC_START;	/* cmd.-unit start */
-	elmc_id_attn586();
-
-	s = jiffies;
-	while (!(ias_cmd->cmd_status & STAT_COMPL)) {
-		if (time_after(jiffies, s + 30*HZ/100))
-			break;
-	}
-
-	if ((ias_cmd->cmd_status & (STAT_OK | STAT_COMPL)) != (STAT_OK | STAT_COMPL)) {
-		pr_warning("%s (elmc): individual address setup command failed: %04x\n",
-			dev->name, ias_cmd->cmd_status);
-		return 1;
-	}
-	/*
-	 * TDR, wire check .. e.g. no resistor e.t.c
-	 */
-	tdr_cmd = (struct tdr_cmd_struct *) ptr;
-
-	tdr_cmd->cmd_status = 0;
-	tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST;
-	tdr_cmd->cmd_link = 0xffff;
-	tdr_cmd->status = 0;
-
-	p->scb->cbl_offset = make16(tdr_cmd);
-
-	p->scb->cmd = CUC_START;	/* cmd.-unit start */
-	elmc_attn586();
-
-	s = jiffies;
-	while (!(tdr_cmd->cmd_status & STAT_COMPL)) {
-		if (time_after(jiffies, s + 30*HZ/100)) {
-			pr_warning("%s: %d Problems while running the TDR.\n", dev->name, __LINE__);
-			result = 1;
-			break;
-		}
-	}
-
-	if (!result) {
-		DELAY(2);	/* wait for result */
-		result = tdr_cmd->status;
-
-		p->scb->cmd = p->scb->status & STAT_MASK;
-		elmc_id_attn586();	/* ack the interrupts */
-
-		if (result & TDR_LNK_OK) {
-			/* empty */
-		} else if (result & TDR_XCVR_PRB) {
-			pr_warning("%s: TDR: Transceiver problem!\n", dev->name);
-		} else if (result & TDR_ET_OPN) {
-			pr_warning("%s: TDR: No correct termination %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
-		} else if (result & TDR_ET_SRT) {
-			if (result & TDR_TIMEMASK)	/* time == 0 -> strange :-) */
-				pr_warning("%s: TDR: Detected a short circuit %d clocks away.\n", dev->name, result & TDR_TIMEMASK);
-		} else {
-			pr_warning("%s: TDR: Unknown status %04x\n", dev->name, result);
-		}
-	}
-	/*
-	 * ack interrupts
-	 */
-	p->scb->cmd = p->scb->status & STAT_MASK;
-	elmc_id_attn586();
-
-	/*
-	 * alloc nop/xmit-cmds
-	 */
-#if (NUM_XMIT_BUFFS == 1)
-	for (i = 0; i < 2; i++) {
-		p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
-		p->nop_cmds[i]->cmd_cmd = CMD_NOP;
-		p->nop_cmds[i]->cmd_status = 0;
-		p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
-		ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
-	}
-	p->xmit_cmds[0] = (struct transmit_cmd_struct *) ptr;	/* transmit cmd/buff 0 */
-	ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
-#else
-	for (i = 0; i < NUM_XMIT_BUFFS; i++) {
-		p->nop_cmds[i] = (struct nop_cmd_struct *) ptr;
-		p->nop_cmds[i]->cmd_cmd = CMD_NOP;
-		p->nop_cmds[i]->cmd_status = 0;
-		p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
-		ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
-		p->xmit_cmds[i] = (struct transmit_cmd_struct *) ptr;	/*transmit cmd/buff 0 */
-		ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
-	}
-#endif
-
-	ptr = alloc_rfa(dev, (void *) ptr);	/* init receive-frame-area */
-
-	/*
-	 * Multicast setup
-	 */
-
-	if (num_addrs) {
-		/* I don't understand this: do we really need memory after the init? */
-		int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
-		if (len <= 0) {
-			pr_err("%s: Ooooops, no memory for MC-Setup!\n", dev->name);
-		} else {
-			if (len < num_addrs) {
-				num_addrs = len;
-				pr_warning("%s: Sorry, can only apply %d MC-Address(es).\n",
-				       dev->name, num_addrs);
-			}
-			mc_cmd = (struct mcsetup_cmd_struct *) ptr;
-			mc_cmd->cmd_status = 0;
-			mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
-			mc_cmd->cmd_link = 0xffff;
-			mc_cmd->mc_cnt = num_addrs * 6;
-			i = 0;
-			netdev_for_each_mc_addr(ha, dev)
-				memcpy((char *) mc_cmd->mc_list[i++],
-				       ha->addr, 6);
-			p->scb->cbl_offset = make16(mc_cmd);
-			p->scb->cmd = CUC_START;
-			elmc_id_attn586();
-			s = jiffies;
-			while (!(mc_cmd->cmd_status & STAT_COMPL)) {
-				if (time_after(jiffies, s + 30*HZ/100))
-					break;
-			}
-			if (!(mc_cmd->cmd_status & STAT_COMPL)) {
-				pr_warning("%s: Can't apply multicast-address-list.\n", dev->name);
-			}
-		}
-	}
-	/*
-	 * alloc xmit-buffs / init xmit_cmds
-	 */
-	for (i = 0; i < NUM_XMIT_BUFFS; i++) {
-		p->xmit_cbuffs[i] = (char *) ptr;	/* char-buffs */
-		ptr = (char *) ptr + XMIT_BUFF_SIZE;
-		p->xmit_buffs[i] = (struct tbd_struct *) ptr;	/* TBD */
-		ptr = (char *) ptr + sizeof(struct tbd_struct);
-		if ((void *) ptr > (void *) p->iscp) {
-			pr_err("%s: not enough shared-mem for your configuration!\n", dev->name);
-			return 1;
-		}
-		memset((char *) (p->xmit_cmds[i]), 0, sizeof(struct transmit_cmd_struct));
-		memset((char *) (p->xmit_buffs[i]), 0, sizeof(struct tbd_struct));
-		p->xmit_cmds[i]->cmd_status = STAT_COMPL;
-		p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT;
-		p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
-		p->xmit_buffs[i]->next = 0xffff;
-		p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
-	}
-
-	p->xmit_count = 0;
-	p->xmit_last = 0;
-#ifndef NO_NOPCOMMANDS
-	p->nop_point = 0;
-#endif
-
-	/*
-	 * 'start transmitter' (nop-loop)
-	 */
-#ifndef NO_NOPCOMMANDS
-	p->scb->cbl_offset = make16(p->nop_cmds[0]);
-	p->scb->cmd = CUC_START;
-	elmc_id_attn586();
-	WAIT_4_SCB_CMD();
-#else
-	p->xmit_cmds[0]->cmd_link = 0xffff;
-	p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_LAST | CMD_INT;
-#endif
-
-	return 0;
-}
-
-/******************************************************
- * This is a helper routine for elmc_rnr_int() and init586().
- * It sets up the Receive Frame Area (RFA).
- */
-
-static void *alloc_rfa(struct net_device *dev, void *ptr)
-{
-	volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr;
-	volatile struct rbd_struct *rbd;
-	int i;
-	struct priv *p = netdev_priv(dev);
-
-	memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs);
-	p->rfd_first = rfd;
-
-	for (i = 0; i < p->num_recv_buffs; i++) {
-		rfd[i].next = make16(rfd + (i + 1) % p->num_recv_buffs);
-	}
-	rfd[p->num_recv_buffs - 1].last = RFD_SUSP;	/* RU suspend */
-
-	ptr = (void *) (rfd + p->num_recv_buffs);
-
-	rbd = (struct rbd_struct *) ptr;
-	ptr = (void *) (rbd + p->num_recv_buffs);
-
-	/* clr descriptors */
-	memset((char *) rbd, 0, sizeof(struct rbd_struct) * p->num_recv_buffs);
-
-	for (i = 0; i < p->num_recv_buffs; i++) {
-		rbd[i].next = make16((rbd + (i + 1) % p->num_recv_buffs));
-		rbd[i].size = RECV_BUFF_SIZE;
-		rbd[i].buffer = make24(ptr);
-		ptr = (char *) ptr + RECV_BUFF_SIZE;
-	}
-
-	p->rfd_top = p->rfd_first;
-	p->rfd_last = p->rfd_first + p->num_recv_buffs - 1;
-
-	p->scb->rfa_offset = make16(p->rfd_first);
-	p->rfd_first->rbd_offset = make16(rbd);
-
-	return ptr;
-}
-
-
-/**************************************************
- * Interrupt Handler ...
- */
-
-static irqreturn_t
-elmc_interrupt(int irq, void *dev_id)
-{
-	struct net_device *dev = dev_id;
-	unsigned short stat;
-	struct priv *p;
-
-	if (!netif_running(dev)) {
-		/* The 3c523 has this habit of generating interrupts during the
-		   reset.  I'm not sure if the ni52 has this same problem, but it's
-		   really annoying if we haven't finished initializing it.  I was
-		   hoping all the elmc_id_* commands would disable this, but I
-		   might have missed a few. */
-
-		elmc_id_attn586();	/* ack inter. and disable any more */
-		return IRQ_HANDLED;
-	} else if (!(ELMC_CTRL_INT & inb(dev->base_addr + ELMC_CTRL))) {
-		/* wasn't this device */
-		return IRQ_NONE;
-	}
-	/* reading ELMC_CTRL also clears the INT bit. */
-
-	p = netdev_priv(dev);
-
-	while ((stat = p->scb->status & STAT_MASK))
-	{
-		p->scb->cmd = stat;
-		elmc_attn586();	/* ack inter. */
-
-		if (stat & STAT_CX) {
-			/* command with I-bit set complete */
-			elmc_xmt_int(dev);
-		}
-		if (stat & STAT_FR) {
-			/* received a frame */
-			elmc_rcv_int(dev);
-		}
-#ifndef NO_NOPCOMMANDS
-		if (stat & STAT_CNA) {
-			/* CU went 'not ready' */
-			if (netif_running(dev)) {
-				pr_warning("%s: oops! CU has left active state. stat: %04x/%04x.\n",
-					dev->name, (int) stat, (int) p->scb->status);
-			}
-		}
-#endif
-
-		if (stat & STAT_RNR) {
-			/* RU went 'not ready' */
-
-			if (p->scb->status & RU_SUSPEND) {
-				/* special case: RU_SUSPEND */
-
-				WAIT_4_SCB_CMD();
-				p->scb->cmd = RUC_RESUME;
-				elmc_attn586();
-			} else {
-				pr_warning("%s: Receiver-Unit went 'NOT READY': %04x/%04x.\n",
-					dev->name, (int) stat, (int) p->scb->status);
-				elmc_rnr_int(dev);
-			}
-		}
-		WAIT_4_SCB_CMD();	/* wait for ack. (elmc_xmt_int can be faster than ack!!) */
-		if (p->scb->cmd) {	/* timed out? */
-			break;
-		}
-	}
-	return IRQ_HANDLED;
-}
-
-/*******************************************************
- * receive-interrupt
- */
-
-static void elmc_rcv_int(struct net_device *dev)
-{
-	int status;
-	unsigned short totlen;
-	struct sk_buff *skb;
-	struct rbd_struct *rbd;
-	struct priv *p = netdev_priv(dev);
-
-	for (; (status = p->rfd_top->status) & STAT_COMPL;) {
-		rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
-
-		if (status & STAT_OK) {		/* frame received without error? */
-			if ((totlen = rbd->status) & RBD_LAST) {	/* the first and the last buffer? */
-				totlen &= RBD_MASK;	/* length of this frame */
-				rbd->status = 0;
-				skb = netdev_alloc_skb(dev, totlen + 2);
-				if (skb != NULL) {
-					skb_reserve(skb, 2);	/* 16 byte alignment */
-					skb_put(skb,totlen);
-					skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
-					skb->protocol = eth_type_trans(skb, dev);
-					netif_rx(skb);
-					dev->stats.rx_packets++;
-					dev->stats.rx_bytes += totlen;
-				} else {
-					dev->stats.rx_dropped++;
-				}
-			} else {
-				pr_warning("%s: received oversized frame.\n", dev->name);
-				dev->stats.rx_dropped++;
-			}
-		} else {	/* frame !(ok), only with 'save-bad-frames' */
-			pr_warning("%s: oops! rfd-error-status: %04x\n", dev->name, status);
-			dev->stats.rx_errors++;
-		}
-		p->rfd_top->status = 0;
-		p->rfd_top->last = RFD_SUSP;
-		p->rfd_last->last = 0;	/* delete RU_SUSP  */
-		p->rfd_last = p->rfd_top;
-		p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next);	/* step to next RFD */
-	}
-}
-
-/**********************************************************
- * handle 'Receiver went not ready'.
- */
-
-static void elmc_rnr_int(struct net_device *dev)
-{
-	struct priv *p = netdev_priv(dev);
-
-	dev->stats.rx_errors++;
-
-	WAIT_4_SCB_CMD();	/* wait for the last cmd */
-	p->scb->cmd = RUC_ABORT;	/* usually the RU is in the 'no resource'-state .. abort it now. */
-	elmc_attn586();
-	WAIT_4_SCB_CMD();	/* wait for accept cmd. */
-
-	alloc_rfa(dev, (char *) p->rfd_first);
-	startrecv586(dev);	/* restart RU */
-
-	pr_warning("%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->status);
-
-}
-
-/**********************************************************
- * handle xmit - interrupt
- */
-
-static void elmc_xmt_int(struct net_device *dev)
-{
-	int status;
-	struct priv *p = netdev_priv(dev);
-
-	status = p->xmit_cmds[p->xmit_last]->cmd_status;
-	if (!(status & STAT_COMPL)) {
-		pr_warning("%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
-	}
-	if (status & STAT_OK) {
-		dev->stats.tx_packets++;
-		dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
-	} else {
-		dev->stats.tx_errors++;
-		if (status & TCMD_LATECOLL) {
-			pr_warning("%s: late collision detected.\n", dev->name);
-			dev->stats.collisions++;
-		} else if (status & TCMD_NOCARRIER) {
-			dev->stats.tx_carrier_errors++;
-			pr_warning("%s: no carrier detected.\n", dev->name);
-		} else if (status & TCMD_LOSTCTS) {
-			pr_warning("%s: loss of CTS detected.\n", dev->name);
-		} else if (status & TCMD_UNDERRUN) {
-			dev->stats.tx_fifo_errors++;
-			pr_warning("%s: DMA underrun detected.\n", dev->name);
-		} else if (status & TCMD_MAXCOLL) {
-			pr_warning("%s: Max. collisions exceeded.\n", dev->name);
-			dev->stats.collisions += 16;
-		}
-	}
-
-#if (NUM_XMIT_BUFFS != 1)
-	if ((++p->xmit_last) == NUM_XMIT_BUFFS) {
-		p->xmit_last = 0;
-	}
-#endif
-
-	netif_wake_queue(dev);
-}
-
-/***********************************************************
- * (re)start the receiver
- */
-
-static void startrecv586(struct net_device *dev)
-{
-	struct priv *p = netdev_priv(dev);
-
-	p->scb->rfa_offset = make16(p->rfd_first);
-	p->scb->cmd = RUC_START;
-	elmc_attn586();		/* start cmd. */
-	WAIT_4_SCB_CMD();	/* wait for accept cmd. (no timeout!!) */
-}
-
-/******************************************************
- * timeout
- */
-
-static void elmc_timeout(struct net_device *dev)
-{
-	struct priv *p = netdev_priv(dev);
-	/* COMMAND-UNIT active? */
-	if (p->scb->status & CU_ACTIVE) {
-		pr_debug("%s: strange ... timeout with CU active?!?\n", dev->name);
-		pr_debug("%s: X0: %04x N0: %04x N1: %04x %d\n", dev->name,
-			(int)p->xmit_cmds[0]->cmd_status,
-			(int)p->nop_cmds[0]->cmd_status,
-			(int)p->nop_cmds[1]->cmd_status, (int)p->nop_point);
-		p->scb->cmd = CUC_ABORT;
-		elmc_attn586();
-		WAIT_4_SCB_CMD();
-		p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
-		p->scb->cmd = CUC_START;
-		elmc_attn586();
-		WAIT_4_SCB_CMD();
-		netif_wake_queue(dev);
-	} else {
-		pr_debug("%s: xmitter timed out, try to restart! stat: %04x\n",
-			dev->name, p->scb->status);
-		pr_debug("%s: command-stats: %04x %04x\n", dev->name,
-			p->xmit_cmds[0]->cmd_status, p->xmit_cmds[1]->cmd_status);
-		elmc_close(dev);
-		elmc_open(dev);
-	}
-}
-
-/******************************************************
- * send frame
- */
-
-static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
-	int len;
-	int i;
-#ifndef NO_NOPCOMMANDS
-	int next_nop;
-#endif
-	struct priv *p = netdev_priv(dev);
-
-	netif_stop_queue(dev);
-
-	len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
-
-	if (len != skb->len)
-		memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN);
-	skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
-
-#if (NUM_XMIT_BUFFS == 1)
-#ifdef NO_NOPCOMMANDS
-	p->xmit_buffs[0]->size = TBD_LAST | len;
-	for (i = 0; i < 16; i++) {
-		p->scb->cbl_offset = make16(p->xmit_cmds[0]);
-		p->scb->cmd = CUC_START;
-		p->xmit_cmds[0]->cmd_status = 0;
-			elmc_attn586();
-		if (!i) {
-			dev_kfree_skb(skb);
-		}
-		WAIT_4_SCB_CMD();
-		if ((p->scb->status & CU_ACTIVE)) {	/* test it, because CU sometimes doesn't start immediately */
-			break;
-		}
-		if (p->xmit_cmds[0]->cmd_status) {
-			break;
-		}
-		if (i == 15) {
-			pr_warning("%s: Can't start transmit-command.\n", dev->name);
-		}
-	}
-#else
-	next_nop = (p->nop_point + 1) & 0x1;
-	p->xmit_buffs[0]->size = TBD_LAST | len;
-
-	p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
-	    = make16((p->nop_cmds[next_nop]));
-	p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
-
-	p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
-	p->nop_point = next_nop;
-	dev_kfree_skb(skb);
-#endif
-#else
-	p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len;
-	if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) {
-		next_nop = 0;
-	}
-	p->xmit_cmds[p->xmit_count]->cmd_status = 0;
-	p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link
-	    = make16((p->nop_cmds[next_nop]));
-	p->nop_cmds[next_nop]->cmd_status = 0;
-		p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
-	p->xmit_count = next_nop;
-	if (p->xmit_count != p->xmit_last)
-		netif_wake_queue(dev);
-	dev_kfree_skb(skb);
-#endif
-	return NETDEV_TX_OK;
-}
-
-/*******************************************
- * Someone wanna have the statistics
- */
-
-static struct net_device_stats *elmc_get_stats(struct net_device *dev)
-{
-	struct priv *p = netdev_priv(dev);
-	unsigned short crc, aln, rsc, ovrn;
-
-	crc = p->scb->crc_errs;	/* get error-statistic from the ni82586 */
-	p->scb->crc_errs -= crc;
-	aln = p->scb->aln_errs;
-	p->scb->aln_errs -= aln;
-	rsc = p->scb->rsc_errs;
-	p->scb->rsc_errs -= rsc;
-	ovrn = p->scb->ovrn_errs;
-	p->scb->ovrn_errs -= ovrn;
-
-	dev->stats.rx_crc_errors += crc;
-	dev->stats.rx_fifo_errors += ovrn;
-	dev->stats.rx_frame_errors += aln;
-	dev->stats.rx_dropped += rsc;
-
-	return &dev->stats;
-}
-
-/********************************************************
- * Set MC list ..
- */
-
-#ifdef ELMC_MULTICAST
-static void set_multicast_list(struct net_device *dev)
-{
-	if (!dev->start) {
-		/* without a running interface, promiscuous doesn't work */
-		return;
-	}
-	dev->start = 0;
-	alloc586(dev);
-	init586(dev);
-	startrecv586(dev);
-	dev->start = 1;
-}
-#endif
-
-static void netdev_get_drvinfo(struct net_device *dev,
-			       struct ethtool_drvinfo *info)
-{
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
-	.get_drvinfo		= netdev_get_drvinfo,
-};
-
-#ifdef MODULE
-
-/* Increase if needed ;) */
-#define MAX_3C523_CARDS 4
-
-static struct net_device *dev_elmc[MAX_3C523_CARDS];
-static int irq[MAX_3C523_CARDS];
-static int io[MAX_3C523_CARDS];
-module_param_array(irq, int, NULL, 0);
-module_param_array(io, int, NULL, 0);
-MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
-	int this_dev,found = 0;
-
-	/* Loop until we either can't find any more cards, or we have MAX_3C523_CARDS */
-	for(this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
-		struct net_device *dev = alloc_etherdev(sizeof(struct priv));
-		if (!dev)
-			break;
-		dev->irq=irq[this_dev];
-		dev->base_addr=io[this_dev];
-		if (do_elmc_probe(dev) == 0) {
-			dev_elmc[this_dev] = dev;
-			found++;
-			continue;
-		}
-		free_netdev(dev);
-		if (io[this_dev]==0)
-			break;
-		pr_warning("3c523.c: No 3c523 card found at io=%#x\n",io[this_dev]);
-	}
-
-	if(found==0) {
-		if (io[0]==0)
-			pr_notice("3c523.c: No 3c523 cards found\n");
-		return -ENXIO;
-	} else return 0;
-}
-
-void __exit cleanup_module(void)
-{
-	int this_dev;
-	for (this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
-		struct net_device *dev = dev_elmc[this_dev];
-		if (dev) {
-			unregister_netdev(dev);
-			cleanup_card(dev);
-			free_netdev(dev);
-		}
-	}
-}
-
-#endif				/* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c523.h b/drivers/net/ethernet/i825xx/3c523.h
deleted file mode 100644
index 6956441..0000000
--- a/drivers/net/ethernet/i825xx/3c523.h
+++ /dev/null
@@ -1,355 +0,0 @@
-#ifndef _3c523_INCLUDE_
-#define _3c523_INCLUDE_
-/*
-	This is basically a hacked version of ni52.h, for the 3c523
-	Etherlink/MC.
-*/
-
-/*
- * Intel i82586 Ethernet definitions
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * Copyright 1995 by Chris Beauregard (cpbeaure@undergrad.math.uwaterloo.ca)
- *
- * See 3c523.c for details.
- *
- * $Header: /home/chrisb/linux-1.2.13-3c523/drivers/net/RCS/3c523.h,v 1.6 1996/01/20 05:09:00 chrisb Exp chrisb $
- */
-
-/*
- * where to find the System Configuration Pointer (SCP)
- */
-#define SCP_DEFAULT_ADDRESS 0xfffff4
-
-
-/*
- * System Configuration Pointer Struct
- */
-
-struct scp_struct
-{
-  unsigned short zero_dum0;	/* has to be zero */
-  unsigned char  sysbus;	/* 0=16Bit,1=8Bit */
-  unsigned char  zero_dum1;	/* has to be zero for 586 */
-  unsigned short zero_dum2;
-  unsigned short zero_dum3;
-  char          *iscp;		/* pointer to the iscp-block */
-};
-
-
-/*
- * Intermediate System Configuration Pointer (ISCP)
- */
-struct iscp_struct
-{
-  unsigned char  busy;          /* 586 clears after successful init */
-  unsigned char  zero_dummy;    /* hast to be zero */
-  unsigned short scb_offset;    /* pointeroffset to the scb_base */
-  char          *scb_base;      /* base-address of all 16-bit offsets */
-};
-
-/*
- * System Control Block (SCB)
- */
-struct scb_struct
-{
-  unsigned short status;        /* status word */
-  unsigned short cmd;           /* command word */
-  unsigned short cbl_offset;    /* pointeroffset, command block list */
-  unsigned short rfa_offset;    /* pointeroffset, receive frame area */
-  unsigned short crc_errs;      /* CRC-Error counter */
-  unsigned short aln_errs;      /* alignmenterror counter */
-  unsigned short rsc_errs;      /* Resourceerror counter */
-  unsigned short ovrn_errs;     /* OVerrunerror counter */
-};
-
-/*
- * possible command values for the command word
- */
-#define RUC_MASK	0x0070	/* mask for RU commands */
-#define RUC_NOP		0x0000	/* NOP-command */
-#define RUC_START	0x0010	/* start RU */
-#define RUC_RESUME	0x0020	/* resume RU after suspend */
-#define RUC_SUSPEND	0x0030	/* suspend RU */
-#define RUC_ABORT	0x0040	/* abort receiver operation immediately */
-
-#define CUC_MASK	0x0700	/* mask for CU command */
-#define CUC_NOP		0x0000	/* NOP-command */
-#define CUC_START	0x0100	/* start execution of 1. cmd on the CBL */
-#define CUC_RESUME	0x0200	/* resume after suspend */
-#define CUC_SUSPEND	0x0300	/* Suspend CU */
-#define CUC_ABORT	0x0400	/* abort command operation immediately */
-
-#define ACK_MASK	0xf000	/* mask for ACK command */
-#define ACK_CX		0x8000	/* acknowledges STAT_CX */
-#define ACK_FR		0x4000	/* ack. STAT_FR */
-#define ACK_CNA		0x2000	/* ack. STAT_CNA */
-#define ACK_RNR		0x1000	/* ack. STAT_RNR */
-
-/*
- * possible status values for the status word
- */
-#define STAT_MASK	0xf000	/* mask for cause of interrupt */
-#define STAT_CX		0x8000	/* CU finished cmd with its I bit set */
-#define STAT_FR		0x4000	/* RU finished receiving a frame */
-#define STAT_CNA	0x2000	/* CU left active state */
-#define STAT_RNR	0x1000	/* RU left ready state */
-
-#define CU_STATUS	0x700	/* CU status, 0=idle */
-#define CU_SUSPEND	0x100	/* CU is suspended */
-#define CU_ACTIVE	0x200	/* CU is active */
-
-#define RU_STATUS	0x70	/* RU status, 0=idle */
-#define RU_SUSPEND	0x10	/* RU suspended */
-#define RU_NOSPACE	0x20	/* RU no resources */
-#define RU_READY	0x40	/* RU is ready */
-
-/*
- * Receive Frame Descriptor (RFD)
- */
-struct rfd_struct
-{
-  unsigned short status;	/* status word */
-  unsigned short last;		/* Bit15,Last Frame on List / Bit14,suspend */
-  unsigned short next;		/* linkoffset to next RFD */
-  unsigned short rbd_offset;	/* pointeroffset to RBD-buffer */
-  unsigned char  dest[6];	/* ethernet-address, destination */
-  unsigned char  source[6];	/* ethernet-address, source */
-  unsigned short length;	/* 802.3 frame-length */
-  unsigned short zero_dummy;	/* dummy */
-};
-
-#define RFD_LAST     0x8000	/* last: last rfd in the list */
-#define RFD_SUSP     0x4000	/* last: suspend RU after  */
-#define RFD_ERRMASK  0x0fe1     /* status: errormask */
-#define RFD_MATCHADD 0x0002     /* status: Destinationaddress !matches IA */
-#define RFD_RNR      0x0200	/* status: receiver out of resources */
-
-/*
- * Receive Buffer Descriptor (RBD)
- */
-struct rbd_struct
-{
-  unsigned short status;	/* status word,number of used bytes in buff */
-  unsigned short next;		/* pointeroffset to next RBD */
-  char          *buffer;	/* receive buffer address pointer */
-  unsigned short size;		/* size of this buffer */
-  unsigned short zero_dummy;    /* dummy */
-};
-
-#define RBD_LAST	0x8000	/* last buffer */
-#define RBD_USED	0x4000	/* this buffer has data */
-#define RBD_MASK	0x3fff	/* size-mask for length */
-
-/*
- * Statusvalues for Commands/RFD
- */
-#define STAT_COMPL   0x8000	/* status: frame/command is complete */
-#define STAT_BUSY    0x4000	/* status: frame/command is busy */
-#define STAT_OK      0x2000	/* status: frame/command is ok */
-
-/*
- * Action-Commands
- */
-#define CMD_NOP		0x0000	/* NOP */
-#define CMD_IASETUP	0x0001	/* initial address setup command */
-#define CMD_CONFIGURE	0x0002	/* configure command */
-#define CMD_MCSETUP	0x0003	/* MC setup command */
-#define CMD_XMIT	0x0004	/* transmit command */
-#define CMD_TDR		0x0005	/* time domain reflectometer (TDR) command */
-#define CMD_DUMP	0x0006	/* dump command */
-#define CMD_DIAGNOSE	0x0007	/* diagnose command */
-
-/*
- * Action command bits
- */
-#define CMD_LAST	0x8000	/* indicates last command in the CBL */
-#define CMD_SUSPEND	0x4000	/* suspend CU after this CB */
-#define CMD_INT		0x2000	/* generate interrupt after execution */
-
-/*
- * NOP - command
- */
-struct nop_cmd_struct
-{
-  unsigned short cmd_status;	/* status of this command */
-  unsigned short cmd_cmd;       /* the command itself (+bits) */
-  unsigned short cmd_link;      /* offsetpointer to next command */
-};
-
-/*
- * IA Setup command
- */
-struct iasetup_cmd_struct
-{
-  unsigned short cmd_status;
-  unsigned short cmd_cmd;
-  unsigned short cmd_link;
-  unsigned char  iaddr[6];
-};
-
-/*
- * Configure command
- */
-struct configure_cmd_struct
-{
-  unsigned short cmd_status;
-  unsigned short cmd_cmd;
-  unsigned short cmd_link;
-  unsigned char  byte_cnt;   /* size of the config-cmd */
-  unsigned char  fifo;       /* fifo/recv monitor */
-  unsigned char  sav_bf;     /* save bad frames (bit7=1)*/
-  unsigned char  adr_len;    /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
-  unsigned char  priority;   /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
-  unsigned char  ifs;        /* inter frame spacing */
-  unsigned char  time_low;   /* slot time low */
-  unsigned char  time_high;  /* slot time high(0-2) and max. retries(4-7) */
-  unsigned char  promisc;    /* promisc-mode(0) , et al (1-7) */
-  unsigned char  carr_coll;  /* carrier(0-3)/collision(4-7) stuff */
-  unsigned char  fram_len;   /* minimal frame len */
-  unsigned char  dummy;	     /* dummy */
-};
-
-/*
- * Multicast Setup command
- */
-struct mcsetup_cmd_struct
-{
-  unsigned short cmd_status;
-  unsigned short cmd_cmd;
-  unsigned short cmd_link;
-  unsigned short mc_cnt;		/* number of bytes in the MC-List */
-  unsigned char  mc_list[0][6];  	/* pointer to 6 bytes entries */
-};
-
-/*
- * transmit command
- */
-struct transmit_cmd_struct
-{
-  unsigned short cmd_status;
-  unsigned short cmd_cmd;
-  unsigned short cmd_link;
-  unsigned short tbd_offset;	/* pointeroffset to TBD */
-  unsigned char  dest[6];       /* destination address of the frame */
-  unsigned short length;	/* user defined: 802.3 length / Ether type */
-};
-
-#define TCMD_ERRMASK     0x0fa0
-#define TCMD_MAXCOLLMASK 0x000f
-#define TCMD_MAXCOLL     0x0020
-#define TCMD_HEARTBEAT   0x0040
-#define TCMD_DEFERRED    0x0080
-#define TCMD_UNDERRUN    0x0100
-#define TCMD_LOSTCTS     0x0200
-#define TCMD_NOCARRIER   0x0400
-#define TCMD_LATECOLL    0x0800
-
-struct tdr_cmd_struct
-{
-  unsigned short cmd_status;
-  unsigned short cmd_cmd;
-  unsigned short cmd_link;
-  unsigned short status;
-};
-
-#define TDR_LNK_OK	0x8000	/* No link problem identified */
-#define TDR_XCVR_PRB	0x4000	/* indicates a transceiver problem */
-#define TDR_ET_OPN	0x2000	/* open, no correct termination */
-#define TDR_ET_SRT	0x1000	/* TDR detected a short circuit */
-#define TDR_TIMEMASK	0x07ff	/* mask for the time field */
-
-/*
- * Transmit Buffer Descriptor (TBD)
- */
-struct tbd_struct
-{
-  unsigned short size;		/* size + EOF-Flag(15) */
-  unsigned short next;          /* pointeroffset to next TBD */
-  char          *buffer;        /* pointer to buffer */
-};
-
-#define TBD_LAST 0x8000         /* EOF-Flag, indicates last buffer in list */
-
-/*************************************************************************/
-/*
-Verbatim from the Crynwyr stuff:
-
-    The 3c523 responds with adapter code 0x6042 at slot
-registers xxx0 and xxx1.  The setup register is at xxx2 and
-contains the following bits:
-
-0: card enable
-2,1: csr address select
-    00 = 0300
-    01 = 1300
-    10 = 2300
-    11 = 3300
-4,3: shared memory address select
-    00 = 0c0000
-    01 = 0c8000
-    10 = 0d0000
-    11 = 0d8000
-5: set to disable on-board thinnet
-7,6: (read-only) shows selected irq
-    00 = 12
-    01 = 7
-    10 = 3
-    11 = 9
-
-The interrupt-select register is at xxx3 and uses one bit per irq.
-
-0: int 12
-1: int 7
-2: int 3
-3: int 9
-
-    Again, the documentation stresses that the setup register
-should never be written.  The interrupt-select register may be
-written with the value corresponding to bits 7.6 in
-the setup register to insure corret setup.
-*/
-
-/* Offsets from the base I/O address. */
-#define	ELMC_SA		0	/* first 6 bytes are IEEE network address */
-#define ELMC_CTRL	6	/* control & status register */
-#define ELMC_REVISION	7	/* revision register, first 4 bits only */
-#define ELMC_IO_EXTENT  8
-
-/* these are the bit selects for the port register 2 */
-#define ELMC_STATUS_ENABLED	0x01
-#define ELMC_STATUS_CSR_SELECT	0x06
-#define ELMC_STATUS_MEMORY_SELECT	0x18
-#define ELMC_STATUS_DISABLE_THIN	0x20
-#define ELMC_STATUS_IRQ_SELECT	0xc0
-
-/* this is the card id used in the detection code.  You might recognize
-it from @6042.adf */
-#define ELMC_MCA_ID 0x6042
-
-/*
-   The following define the bits for the control & status register
-
-   The bank select registers can be used if more than 16K of memory is
-   on the card.  For some stupid reason, bank 3 is the one for the
-   bottom 16K, and the card defaults to bank 0.  So we have to set the
-   bank to 3 before the card will even think of operating.  To get bank
-   3, set BS0 and BS1 to high (of course...)
-*/
-#define ELMC_CTRL_BS0	0x01	/* RW bank select */
-#define ELMC_CTRL_BS1	0x02	/* RW bank select */
-#define ELMC_CTRL_INTE	0x04	/* RW interrupt enable, assert high */
-#define ELMC_CTRL_INT	0x08	/* R interrupt active, assert high */
-/*#define ELMC_CTRL_*	0x10*/	/* reserved */
-#define ELMC_CTRL_LBK	0x20	/* RW loopback enable, assert high */
-#define ELMC_CTRL_CA	0x40	/* RW channel attention, assert high */
-#define ELMC_CTRL_RST	0x80	/* RW 82586 reset, assert low */
-
-/* some handy compound bits */
-
-/* normal operation should have bank 3 and RST high, ints enabled */
-#define ELMC_NORMAL (ELMC_CTRL_INTE|ELMC_CTRL_RST|0x3)
-
-#endif /* _3c523_INCLUDE_ */
diff --git a/drivers/net/ethernet/i825xx/3c527.c b/drivers/net/ethernet/i825xx/3c527.c
deleted file mode 100644
index 278e791..0000000
--- a/drivers/net/ethernet/i825xx/3c527.c
+++ /dev/null
@@ -1,1660 +0,0 @@
-/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
- *
- *	(c) Copyright 1998 Red Hat Software Inc
- *	Written by Alan Cox.
- *	Further debugging by Carl Drougge.
- *      Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
- *      Heavily modified by Richard Procter <rnp@paradise.net.nz>
- *
- *	Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
- *	(for the MCA stuff) written by Wim Dumon.
- *
- *	Thanks to 3Com for making this possible by providing me with the
- *	documentation.
- *
- *	This software may be used and distributed according to the terms
- *	of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#define DRV_NAME		"3c527"
-#define DRV_VERSION		"0.7-SMP"
-#define DRV_RELDATE		"2003/09/21"
-
-static const char *version =
-DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
-
-/**
- * DOC: Traps for the unwary
- *
- *	The diagram (Figure 1-1) and the POS summary disagree with the
- *	"Interrupt Level" section in the manual.
- *
- *	The manual contradicts itself when describing the minimum number
- *	buffers in the 'configure lists' command.
- *	My card accepts a buffer config of 4/4.
- *
- *	Setting the SAV BP bit does not save bad packets, but
- *	only enables RX on-card stats collection.
- *
- *	The documentation in places seems to miss things. In actual fact
- *	I've always eventually found everything is documented, it just
- *	requires careful study.
- *
- * DOC: Theory Of Operation
- *
- *	The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
- *	amount of on board intelligence that housekeeps a somewhat dumber
- *	Intel NIC. For performance we want to keep the transmit queue deep
- *	as the card can transmit packets while fetching others from main
- *	memory by bus master DMA. Transmission and reception are driven by
- *	circular buffer queues.
- *
- *	The mailboxes can be used for controlling how the card traverses
- *	its buffer rings, but are used only for initial setup in this
- *	implementation.  The exec mailbox allows a variety of commands to
- *	be executed. Each command must complete before the next is
- *	executed. Primarily we use the exec mailbox for controlling the
- *	multicast lists.  We have to do a certain amount of interesting
- *	hoop jumping as the multicast list changes can occur in interrupt
- *	state when the card has an exec command pending. We defer such
- *	events until the command completion interrupt.
- *
- *	A copy break scheme (taken from 3c59x.c) is employed whereby
- *	received frames exceeding a configurable length are passed
- *	directly to the higher networking layers without incuring a copy,
- *	in what amounts to a time/space trade-off.
- *
- *	The card also keeps a large amount of statistical information
- *	on-board. In a perfect world, these could be used safely at no
- *	cost. However, lacking information to the contrary, processing
- *	them without races would involve so much extra complexity as to
- *	make it unworthwhile to do so. In the end, a hybrid SW/HW
- *	implementation was made necessary --- see mc32_update_stats().
- *
- * DOC: Notes
- *
- *	It should be possible to use two or more cards, but at this stage
- *	only by loading two copies of the same module.
- *
- *	The on-board 82586 NIC has trouble receiving multiple
- *	back-to-back frames and so is likely to drop packets from fast
- *	senders.
-**/
-
-#include <linux/module.h>
-
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/mca-legacy.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/wait.h>
-#include <linux/ethtool.h>
-#include <linux/completion.h>
-#include <linux/bitops.h>
-#include <linux/semaphore.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "3c527.h"
-
-MODULE_LICENSE("GPL");
-
-/*
- * The name of the card. Is used for messages and in the requests for
- * io regions, irqs and dma channels
- */
-static const char* cardname = DRV_NAME;
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 2
-#endif
-
-static unsigned int mc32_debug = NET_DEBUG;
-
-/* The number of low I/O ports used by the ethercard. */
-#define MC32_IO_EXTENT	8
-
-/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
-#define TX_RING_LEN     32       /* Typically the card supports 37  */
-#define RX_RING_LEN     8        /*     "       "        "          */
-
-/* Copy break point, see above for details.
- * Setting to > 1512 effectively disables this feature.	*/
-#define RX_COPYBREAK    200      /* Value from 3c59x.c */
-
-/* Issue the 82586 workaround command - this is for "busy lans", but
- * basically means for all lans now days - has a performance (latency)
- * cost, but best set. */
-static const int WORKAROUND_82586=1;
-
-/* Pointers to buffers and their on-card records */
-struct mc32_ring_desc
-{
-	volatile struct skb_header *p;
-	struct sk_buff *skb;
-};
-
-/* Information that needs to be kept for each board. */
-struct mc32_local
-{
-	int slot;
-
-	u32 base;
-	volatile struct mc32_mailbox *rx_box;
-	volatile struct mc32_mailbox *tx_box;
-	volatile struct mc32_mailbox *exec_box;
-        volatile struct mc32_stats *stats;    /* Start of on-card statistics */
-        u16 tx_chain;           /* Transmit list start offset */
-	u16 rx_chain;           /* Receive list start offset */
-        u16 tx_len;             /* Transmit list count */
-        u16 rx_len;             /* Receive list count */
-
-	u16 xceiver_desired_state; /* HALTED or RUNNING */
-	u16 cmd_nonblocking;    /* Thread is uninterested in command result */
-	u16 mc_reload_wait;	/* A multicast load request is pending */
-	u32 mc_list_valid;	/* True when the mclist is set */
-
-	struct mc32_ring_desc tx_ring[TX_RING_LEN];	/* Host Transmit ring */
-	struct mc32_ring_desc rx_ring[RX_RING_LEN];	/* Host Receive ring */
-
-	atomic_t tx_count;	/* buffers left */
-	atomic_t tx_ring_head;  /* index to tx en-queue end */
-	u16 tx_ring_tail;       /* index to tx de-queue end */
-
-	u16 rx_ring_tail;       /* index to rx de-queue end */
-
-	struct semaphore cmd_mutex;    /* Serialises issuing of execute commands */
-        struct completion execution_cmd; /* Card has completed an execute command */
-	struct completion xceiver_cmd;   /* Card has completed a tx or rx command */
-};
-
-/* The station (ethernet) address prefix, used for a sanity check. */
-#define SA_ADDR0 0x02
-#define SA_ADDR1 0x60
-#define SA_ADDR2 0xAC
-
-struct mca_adapters_t {
-	unsigned int	id;
-	char		*name;
-};
-
-static const struct mca_adapters_t mc32_adapters[] = {
-	{ 0x0041, "3COM EtherLink MC/32" },
-	{ 0x8EF5, "IBM High Performance Lan Adapter" },
-	{ 0x0000, NULL }
-};
-
-
-/* Macros for ring index manipulations */
-static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
-static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
-
-static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
-
-
-/* Index to functions, as function prototypes. */
-static int	mc32_probe1(struct net_device *dev, int ioaddr);
-static int      mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
-static int	mc32_open(struct net_device *dev);
-static void	mc32_timeout(struct net_device *dev);
-static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
-				    struct net_device *dev);
-static irqreturn_t mc32_interrupt(int irq, void *dev_id);
-static int	mc32_close(struct net_device *dev);
-static struct	net_device_stats *mc32_get_stats(struct net_device *dev);
-static void	mc32_set_multicast_list(struct net_device *dev);
-static void	mc32_reset_multicast_list(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-
-static void cleanup_card(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	unsigned slot = lp->slot;
-	mca_mark_as_unused(slot);
-	mca_set_adapter_name(slot, NULL);
-	free_irq(dev->irq, dev);
-	release_region(dev->base_addr, MC32_IO_EXTENT);
-}
-
-/**
- * mc32_probe 	-	Search for supported boards
- * @unit: interface number to use
- *
- * Because MCA bus is a real bus and we can scan for cards we could do a
- * single scan for all boards here. Right now we use the passed in device
- * structure and scan for only one board. This needs fixing for modules
- * in particular.
- */
-
-struct net_device *__init mc32_probe(int unit)
-{
-	struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
-	static int current_mca_slot = -1;
-	int i;
-	int err;
-
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	if (unit >= 0)
-		sprintf(dev->name, "eth%d", unit);
-
-	/* Do not check any supplied i/o locations.
-	   POS registers usually don't fail :) */
-
-	/* MCA cards have POS registers.
-	   Autodetecting MCA cards is extremely simple.
-	   Just search for the card. */
-
-	for(i = 0; (mc32_adapters[i].name != NULL); i++) {
-		current_mca_slot =
-			mca_find_unused_adapter(mc32_adapters[i].id, 0);
-
-		if(current_mca_slot != MCA_NOTFOUND) {
-			if(!mc32_probe1(dev, current_mca_slot))
-			{
-				mca_set_adapter_name(current_mca_slot,
-						mc32_adapters[i].name);
-				mca_mark_as_used(current_mca_slot);
-				err = register_netdev(dev);
-				if (err) {
-					cleanup_card(dev);
-					free_netdev(dev);
-					dev = ERR_PTR(err);
-				}
-				return dev;
-			}
-
-		}
-	}
-	free_netdev(dev);
-	return ERR_PTR(-ENODEV);
-}
-
-static const struct net_device_ops netdev_ops = {
-	.ndo_open		= mc32_open,
-	.ndo_stop		= mc32_close,
-	.ndo_start_xmit		= mc32_send_packet,
-	.ndo_get_stats		= mc32_get_stats,
-	.ndo_set_rx_mode	= mc32_set_multicast_list,
-	.ndo_tx_timeout		= mc32_timeout,
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_set_mac_address 	= eth_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-/**
- * mc32_probe1	-	Check a given slot for a board and test the card
- * @dev:  Device structure to fill in
- * @slot: The MCA bus slot being used by this card
- *
- * Decode the slot data and configure the card structures. Having done this we
- * can reset the card and configure it. The card does a full self test cycle
- * in firmware so we have to wait for it to return and post us either a
- * failure case or some addresses we use to find the board internals.
- */
-
-static int __init mc32_probe1(struct net_device *dev, int slot)
-{
-	static unsigned version_printed;
-	int i, err;
-	u8 POS;
-	u32 base;
-	struct mc32_local *lp = netdev_priv(dev);
-	static const u16 mca_io_bases[] = {
-		0x7280,0x7290,
-		0x7680,0x7690,
-		0x7A80,0x7A90,
-		0x7E80,0x7E90
-	};
-	static const u32 mca_mem_bases[] = {
-		0x00C0000,
-		0x00C4000,
-		0x00C8000,
-		0x00CC000,
-		0x00D0000,
-		0x00D4000,
-		0x00D8000,
-		0x00DC000
-	};
-	static const char * const failures[] = {
-		"Processor instruction",
-		"Processor data bus",
-		"Processor data bus",
-		"Processor data bus",
-		"Adapter bus",
-		"ROM checksum",
-		"Base RAM",
-		"Extended RAM",
-		"82586 internal loopback",
-		"82586 initialisation failure",
-		"Adapter list configuration error"
-	};
-
-	/* Time to play MCA games */
-
-	if (mc32_debug  &&  version_printed++ == 0)
-		pr_debug("%s", version);
-
-	pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot);
-
-	POS = mca_read_stored_pos(slot, 2);
-
-	if(!(POS&1))
-	{
-		pr_cont("disabled.\n");
-		return -ENODEV;
-	}
-
-	/* Fill in the 'dev' fields. */
-	dev->base_addr = mca_io_bases[(POS>>1)&7];
-	dev->mem_start = mca_mem_bases[(POS>>4)&7];
-
-	POS = mca_read_stored_pos(slot, 4);
-	if(!(POS&1))
-	{
-		pr_cont("memory window disabled.\n");
-		return -ENODEV;
-	}
-
-	POS = mca_read_stored_pos(slot, 5);
-
-	i=(POS>>4)&3;
-	if(i==3)
-	{
-		pr_cont("invalid memory window.\n");
-		return -ENODEV;
-	}
-
-	i*=16384;
-	i+=16384;
-
-	dev->mem_end=dev->mem_start + i;
-
-	dev->irq = ((POS>>2)&3)+9;
-
-	if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
-	{
-		pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr);
-		return -EBUSY;
-	}
-
-	pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
-		dev->base_addr, dev->irq, dev->mem_start, i/1024);
-
-
-	/* We ought to set the cache line size here.. */
-
-
-	/*
-	 *	Go PROM browsing
-	 */
-
-	/* Retrieve and print the ethernet address. */
-	for (i = 0; i < 6; i++)
-	{
-		mca_write_pos(slot, 6, i+12);
-		mca_write_pos(slot, 7, 0);
-
-		dev->dev_addr[i] = mca_read_pos(slot,3);
-	}
-
-	pr_info("%s: Address %pM ", dev->name, dev->dev_addr);
-
-	mca_write_pos(slot, 6, 0);
-	mca_write_pos(slot, 7, 0);
-
-	POS = mca_read_stored_pos(slot, 4);
-
-	if(POS&2)
-		pr_cont(": BNC port selected.\n");
-	else
-		pr_cont(": AUI port selected.\n");
-
-	POS=inb(dev->base_addr+HOST_CTRL);
-	POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
-	POS&=~HOST_CTRL_INTE;
-	outb(POS, dev->base_addr+HOST_CTRL);
-	/* Reset adapter */
-	udelay(100);
-	/* Reset off */
-	POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
-	outb(POS, dev->base_addr+HOST_CTRL);
-
-	udelay(300);
-
-	/*
-	 *	Grab the IRQ
-	 */
-
-	err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev);
-	if (err) {
-		release_region(dev->base_addr, MC32_IO_EXTENT);
-		pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
-		goto err_exit_ports;
-	}
-
-	memset(lp, 0, sizeof(struct mc32_local));
-	lp->slot = slot;
-
-	i=0;
-
-	base = inb(dev->base_addr);
-
-	while(base == 0xFF)
-	{
-		i++;
-		if(i == 1000)
-		{
-			pr_err("%s: failed to boot adapter.\n", dev->name);
-			err = -ENODEV;
-			goto err_exit_irq;
-		}
-		udelay(1000);
-		if(inb(dev->base_addr+2)&(1<<5))
-			base = inb(dev->base_addr);
-	}
-
-	if(base>0)
-	{
-		if(base < 0x0C)
-			pr_err("%s: %s%s.\n", dev->name, failures[base-1],
-				base<0x0A?" test failure":"");
-		else
-			pr_err("%s: unknown failure %d.\n", dev->name, base);
-		err = -ENODEV;
-		goto err_exit_irq;
-	}
-
-	base=0;
-	for(i=0;i<4;i++)
-	{
-		int n=0;
-
-		while(!(inb(dev->base_addr+2)&(1<<5)))
-		{
-			n++;
-			udelay(50);
-			if(n>100)
-			{
-				pr_err("%s: mailbox read fail (%d).\n", dev->name, i);
-				err = -ENODEV;
-				goto err_exit_irq;
-			}
-		}
-
-		base|=(inb(dev->base_addr)<<(8*i));
-	}
-
-	lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
-
-	base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
-
-	lp->base = dev->mem_start+base;
-
-	lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
-	lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
-
-	lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
-
-	/*
-	 *	Descriptor chains (card relative)
-	 */
-
-	lp->tx_chain 		= lp->exec_box->data[8];   /* Transmit list start offset */
-	lp->rx_chain 		= lp->exec_box->data[10];  /* Receive list start offset */
-	lp->tx_len 		= lp->exec_box->data[9];   /* Transmit list count */
-	lp->rx_len 		= lp->exec_box->data[11];  /* Receive list count */
-
-	sema_init(&lp->cmd_mutex, 0);
-	init_completion(&lp->execution_cmd);
-	init_completion(&lp->xceiver_cmd);
-
-	pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
-		dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
-
-	dev->netdev_ops		= &netdev_ops;
-	dev->watchdog_timeo	= HZ*5;	/* Board does all the work */
-	dev->ethtool_ops	= &netdev_ethtool_ops;
-
-	return 0;
-
-err_exit_irq:
-	free_irq(dev->irq, dev);
-err_exit_ports:
-	release_region(dev->base_addr, MC32_IO_EXTENT);
-	return err;
-}
-
-
-/**
- *	mc32_ready_poll		-	wait until we can feed it a command
- *	@dev:	The device to wait for
- *
- *	Wait until the card becomes ready to accept a command via the
- *	command register. This tells us nothing about the completion
- *	status of any pending commands and takes very little time at all.
- */
-
-static inline void mc32_ready_poll(struct net_device *dev)
-{
-	int ioaddr = dev->base_addr;
-	while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
-}
-
-
-/**
- *	mc32_command_nowait	-	send a command non blocking
- *	@dev: The 3c527 to issue the command to
- *	@cmd: The command word to write to the mailbox
- *	@data: A data block if the command expects one
- *	@len: Length of the data block
- *
- *	Send a command from interrupt state. If there is a command
- *	currently being executed then we return an error of -1. It
- *	simply isn't viable to wait around as commands may be
- *	slow. This can theoretically be starved on SMP, but it's hard
- *	to see a realistic situation.  We do not wait for the command
- *	to complete --- we rely on the interrupt handler to tidy up
- *	after us.
- */
-
-static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	int ioaddr = dev->base_addr;
-	int ret = -1;
-
-	if (down_trylock(&lp->cmd_mutex) == 0)
-	{
-		lp->cmd_nonblocking=1;
-		lp->exec_box->mbox=0;
-		lp->exec_box->mbox=cmd;
-		memcpy((void *)lp->exec_box->data, data, len);
-		barrier();	/* the memcpy forgot the volatile so be sure */
-
-		/* Send the command */
-		mc32_ready_poll(dev);
-		outb(1<<6, ioaddr+HOST_CMD);
-
-		ret = 0;
-
-		/* Interrupt handler will signal mutex on completion */
-	}
-
-	return ret;
-}
-
-
-/**
- *	mc32_command	-	send a command and sleep until completion
- *	@dev: The 3c527 card to issue the command to
- *	@cmd: The command word to write to the mailbox
- *	@data: A data block if the command expects one
- *	@len: Length of the data block
- *
- *	Sends exec commands in a user context. This permits us to wait around
- *	for the replies and also to wait for the command buffer to complete
- *	from a previous command before we execute our command. After our
- *	command completes we will attempt any pending multicast reload
- *	we blocked off by hogging the exec buffer.
- *
- *	You feed the card a command, you wait, it interrupts you get a
- *	reply. All well and good. The complication arises because you use
- *	commands for filter list changes which come in at bh level from things
- *	like IPV6 group stuff.
- */
-
-static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	int ioaddr = dev->base_addr;
-	int ret = 0;
-
-	down(&lp->cmd_mutex);
-
-	/*
-	 *     My Turn
-	 */
-
-	lp->cmd_nonblocking=0;
-	lp->exec_box->mbox=0;
-	lp->exec_box->mbox=cmd;
-	memcpy((void *)lp->exec_box->data, data, len);
-	barrier();	/* the memcpy forgot the volatile so be sure */
-
-	mc32_ready_poll(dev);
-	outb(1<<6, ioaddr+HOST_CMD);
-
-	wait_for_completion(&lp->execution_cmd);
-
-	if(lp->exec_box->mbox&(1<<13))
-		ret = -1;
-
-	up(&lp->cmd_mutex);
-
-	/*
-	 *	A multicast set got blocked - try it now
-         */
-
-	if(lp->mc_reload_wait)
-	{
-		mc32_reset_multicast_list(dev);
-	}
-
-	return ret;
-}
-
-
-/**
- *	mc32_start_transceiver	-	tell board to restart tx/rx
- *	@dev: The 3c527 card to issue the command to
- *
- *	This may be called from the interrupt state, where it is used
- *	to restart the rx ring if the card runs out of rx buffers.
- *
- * 	We must first check if it's ok to (re)start the transceiver. See
- *      mc32_close for details.
- */
-
-static void mc32_start_transceiver(struct net_device *dev) {
-
-	struct mc32_local *lp = netdev_priv(dev);
-	int ioaddr = dev->base_addr;
-
-	/* Ignore RX overflow on device closure */
-	if (lp->xceiver_desired_state==HALTED)
-		return;
-
-	/* Give the card the offset to the post-EOL-bit RX descriptor */
-	mc32_ready_poll(dev);
-	lp->rx_box->mbox=0;
-	lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
-	outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
-
-	mc32_ready_poll(dev);
-	lp->tx_box->mbox=0;
-	outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD);   /* card ignores this on RX restart */
-
-	/* We are not interrupted on start completion */
-}
-
-
-/**
- *	mc32_halt_transceiver	-	tell board to stop tx/rx
- *	@dev: The 3c527 card to issue the command to
- *
- *	We issue the commands to halt the card's transceiver. In fact,
- *	after some experimenting we now simply tell the card to
- *	suspend. When issuing aborts occasionally odd things happened.
- *
- *	We then sleep until the card has notified us that both rx and
- *	tx have been suspended.
- */
-
-static void mc32_halt_transceiver(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	int ioaddr = dev->base_addr;
-
-	mc32_ready_poll(dev);
-	lp->rx_box->mbox=0;
-	outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
-	wait_for_completion(&lp->xceiver_cmd);
-
-	mc32_ready_poll(dev);
-	lp->tx_box->mbox=0;
-	outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
-	wait_for_completion(&lp->xceiver_cmd);
-}
-
-
-/**
- *	mc32_load_rx_ring	-	load the ring of receive buffers
- *	@dev: 3c527 to build the ring for
- *
- *	This initialises the on-card and driver datastructures to
- *	the point where mc32_start_transceiver() can be called.
- *
- *	The card sets up the receive ring for us. We are required to use the
- *	ring it provides, although the size of the ring is configurable.
- *
- * 	We allocate an sk_buff for each ring entry in turn and
- * 	initialise its house-keeping info. At the same time, we read
- * 	each 'next' pointer in our rx_ring array. This reduces slow
- * 	shared-memory reads and makes it easy to access predecessor
- * 	descriptors.
- *
- *	We then set the end-of-list bit for the last entry so that the
- * 	card will know when it has run out of buffers.
- */
-
-static int mc32_load_rx_ring(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	int i;
-	u16 rx_base;
-	volatile struct skb_header *p;
-
-	rx_base=lp->rx_chain;
-
-	for(i=0; i<RX_RING_LEN; i++) {
-		lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
-		if (lp->rx_ring[i].skb==NULL) {
-			for (;i>=0;i--)
-				kfree_skb(lp->rx_ring[i].skb);
-			return -ENOBUFS;
-		}
-		skb_reserve(lp->rx_ring[i].skb, 18);
-
-		p=isa_bus_to_virt(lp->base+rx_base);
-
-		p->control=0;
-		p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
-		p->status=0;
-		p->length=1532;
-
-		lp->rx_ring[i].p=p;
-		rx_base=p->next;
-	}
-
-	lp->rx_ring[i-1].p->control |= CONTROL_EOL;
-
-	lp->rx_ring_tail=0;
-
-	return 0;
-}
-
-
-/**
- *	mc32_flush_rx_ring	-	free the ring of receive buffers
- *	@lp: Local data of 3c527 to flush the rx ring of
- *
- *	Free the buffer for each ring slot. This may be called
- *      before mc32_load_rx_ring(), eg. on error in mc32_open().
- *      Requires rx skb pointers to point to a valid skb, or NULL.
- */
-
-static void mc32_flush_rx_ring(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	int i;
-
-	for(i=0; i < RX_RING_LEN; i++)
-	{
-		if (lp->rx_ring[i].skb) {
-			dev_kfree_skb(lp->rx_ring[i].skb);
-			lp->rx_ring[i].skb = NULL;
-		}
-		lp->rx_ring[i].p=NULL;
-	}
-}
-
-
-/**
- *	mc32_load_tx_ring	-	load transmit ring
- *	@dev: The 3c527 card to issue the command to
- *
- *	This sets up the host transmit data-structures.
- *
- *	First, we obtain from the card it's current position in the tx
- *	ring, so that we will know where to begin transmitting
- *	packets.
- *
- * 	Then, we read the 'next' pointers from the on-card tx ring into
- *  	our tx_ring array to reduce slow shared-mem reads. Finally, we
- * 	intitalise the tx house keeping variables.
- *
- */
-
-static void mc32_load_tx_ring(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	volatile struct skb_header *p;
-	int i;
-	u16 tx_base;
-
-	tx_base=lp->tx_box->data[0];
-
-	for(i=0 ; i<TX_RING_LEN ; i++)
-	{
-		p=isa_bus_to_virt(lp->base+tx_base);
-		lp->tx_ring[i].p=p;
-		lp->tx_ring[i].skb=NULL;
-
-		tx_base=p->next;
-	}
-
-	/* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
-	/* see mc32_tx_ring */
-
-	atomic_set(&lp->tx_count, TX_RING_LEN-1);
-	atomic_set(&lp->tx_ring_head, 0);
-	lp->tx_ring_tail=0;
-}
-
-
-/**
- *	mc32_flush_tx_ring 	-	free transmit ring
- *	@lp: Local data of 3c527 to flush the tx ring of
- *
- *      If the ring is non-empty, zip over the it, freeing any
- *      allocated skb_buffs.  The tx ring house-keeping variables are
- *      then reset. Requires rx skb pointers to point to a valid skb,
- *      or NULL.
- */
-
-static void mc32_flush_tx_ring(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	int i;
-
-	for (i=0; i < TX_RING_LEN; i++)
-	{
-		if (lp->tx_ring[i].skb)
-		{
-			dev_kfree_skb(lp->tx_ring[i].skb);
-			lp->tx_ring[i].skb = NULL;
-		}
-	}
-
-	atomic_set(&lp->tx_count, 0);
-	atomic_set(&lp->tx_ring_head, 0);
-	lp->tx_ring_tail=0;
-}
-
-
-/**
- *	mc32_open	-	handle 'up' of card
- *	@dev: device to open
- *
- *	The user is trying to bring the card into ready state. This requires
- *	a brief dialogue with the card. Firstly we enable interrupts and then
- *	'indications'. Without these enabled the card doesn't bother telling
- *	us what it has done. This had me puzzled for a week.
- *
- *	We configure the number of card descriptors, then load the network
- *	address and multicast filters. Turn on the workaround mode. This
- *	works around a bug in the 82586 - it asks the firmware to do
- *	so. It has a performance (latency) hit but is needed on busy
- *	[read most] lans. We load the ring with buffers then we kick it
- *	all off.
- */
-
-static int mc32_open(struct net_device *dev)
-{
-	int ioaddr = dev->base_addr;
-	struct mc32_local *lp = netdev_priv(dev);
-	u8 one=1;
-	u8 regs;
-	u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
-
-	/*
-	 *	Interrupts enabled
-	 */
-
-	regs=inb(ioaddr+HOST_CTRL);
-	regs|=HOST_CTRL_INTE;
-	outb(regs, ioaddr+HOST_CTRL);
-
-	/*
-	 *      Allow ourselves to issue commands
-	 */
-
-	up(&lp->cmd_mutex);
-
-
-	/*
-	 *	Send the indications on command
-	 */
-
-	mc32_command(dev, 4, &one, 2);
-
-	/*
-	 *	Poke it to make sure it's really dead.
-	 */
-
-	mc32_halt_transceiver(dev);
-	mc32_flush_tx_ring(dev);
-
-	/*
-	 *	Ask card to set up on-card descriptors to our spec
-	 */
-
-	if(mc32_command(dev, 8, descnumbuffs, 4)) {
-		pr_info("%s: %s rejected our buffer configuration!\n",
-	 	       dev->name, cardname);
-		mc32_close(dev);
-		return -ENOBUFS;
-	}
-
-	/* Report new configuration */
-	mc32_command(dev, 6, NULL, 0);
-
-	lp->tx_chain 		= lp->exec_box->data[8];   /* Transmit list start offset */
-	lp->rx_chain 		= lp->exec_box->data[10];  /* Receive list start offset */
-	lp->tx_len 		= lp->exec_box->data[9];   /* Transmit list count */
-	lp->rx_len 		= lp->exec_box->data[11];  /* Receive list count */
-
-	/* Set Network Address */
-	mc32_command(dev, 1, dev->dev_addr, 6);
-
-	/* Set the filters */
-	mc32_set_multicast_list(dev);
-
-	if (WORKAROUND_82586) {
-		u16 zero_word=0;
-		mc32_command(dev, 0x0D, &zero_word, 2);   /* 82586 bug workaround on  */
-	}
-
-	mc32_load_tx_ring(dev);
-
-	if(mc32_load_rx_ring(dev))
-	{
-		mc32_close(dev);
-		return -ENOBUFS;
-	}
-
-	lp->xceiver_desired_state = RUNNING;
-
-	/* And finally, set the ball rolling... */
-	mc32_start_transceiver(dev);
-
-	netif_start_queue(dev);
-
-	return 0;
-}
-
-
-/**
- *	mc32_timeout	-	handle a timeout from the network layer
- *	@dev: 3c527 that timed out
- *
- *	Handle a timeout on transmit from the 3c527. This normally means
- *	bad things as the hardware handles cable timeouts and mess for
- *	us.
- *
- */
-
-static void mc32_timeout(struct net_device *dev)
-{
-	pr_warning("%s: transmit timed out?\n", dev->name);
-	/* Try to restart the adaptor. */
-	netif_wake_queue(dev);
-}
-
-
-/**
- *	mc32_send_packet	-	queue a frame for transmit
- *	@skb: buffer to transmit
- *	@dev: 3c527 to send it out of
- *
- *	Transmit a buffer. This normally means throwing the buffer onto
- *	the transmit queue as the queue is quite large. If the queue is
- *	full then we set tx_busy and return. Once the interrupt handler
- *	gets messages telling it to reclaim transmit queue entries, we will
- *	clear tx_busy and the kernel will start calling this again.
- *
- *      We do not disable interrupts or acquire any locks; this can
- *      run concurrently with mc32_tx_ring(), and the function itself
- *      is serialised at a higher layer. However, similarly for the
- *      card itself, we must ensure that we update tx_ring_head only
- *      after we've established a valid packet on the tx ring (and
- *      before we let the card "see" it, to prevent it racing with the
- *      irq handler).
- *
- */
-
-static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
-				    struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	u32 head = atomic_read(&lp->tx_ring_head);
-
-	volatile struct skb_header *p, *np;
-
-	netif_stop_queue(dev);
-
-	if(atomic_read(&lp->tx_count)==0) {
-		return NETDEV_TX_BUSY;
-	}
-
-	if (skb_padto(skb, ETH_ZLEN)) {
-		netif_wake_queue(dev);
-		return NETDEV_TX_OK;
-	}
-
-	atomic_dec(&lp->tx_count);
-
-	/* P is the last sending/sent buffer as a pointer */
-	p=lp->tx_ring[head].p;
-
-	head = next_tx(head);
-
-	/* NP is the buffer we will be loading */
-	np=lp->tx_ring[head].p;
-
-	/* We will need this to flush the buffer out */
-	lp->tx_ring[head].skb=skb;
-
-	np->length      = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
-	np->data	= isa_virt_to_bus(skb->data);
-	np->status	= 0;
-	np->control     = CONTROL_EOP | CONTROL_EOL;
-	wmb();
-
-	/*
-	 * The new frame has been setup; we can now
-	 * let the interrupt handler and card "see" it
-	 */
-
-	atomic_set(&lp->tx_ring_head, head);
-	p->control     &= ~CONTROL_EOL;
-
-	netif_wake_queue(dev);
-	return NETDEV_TX_OK;
-}
-
-
-/**
- *	mc32_update_stats	-	pull off the on board statistics
- *	@dev: 3c527 to service
- *
- *
- *	Query and reset the on-card stats. There's the small possibility
- *	of a race here, which would result in an underestimation of
- *	actual errors. As such, we'd prefer to keep all our stats
- *	collection in software. As a rule, we do. However it can't be
- *	used for rx errors and collisions as, by default, the card discards
- *	bad rx packets.
- *
- *	Setting the SAV BP in the rx filter command supposedly
- *	stops this behaviour. However, testing shows that it only seems to
- *	enable the collation of on-card rx statistics --- the driver
- *	never sees an RX descriptor with an error status set.
- *
- */
-
-static void mc32_update_stats(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	volatile struct mc32_stats *st = lp->stats;
-
-	u32 rx_errors=0;
-
-	rx_errors+=dev->stats.rx_crc_errors   +=st->rx_crc_errors;
-	                                           st->rx_crc_errors=0;
-	rx_errors+=dev->stats.rx_fifo_errors  +=st->rx_overrun_errors;
-	                                           st->rx_overrun_errors=0;
-	rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
- 	                                           st->rx_alignment_errors=0;
-	rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
-	                                           st->rx_tooshort_errors=0;
-	rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
-	                                           st->rx_outofresource_errors=0;
-        dev->stats.rx_errors=rx_errors;
-
-	/* Number of packets which saw one collision */
-	dev->stats.collisions+=st->dataC[10];
-	st->dataC[10]=0;
-
-	/* Number of packets which saw 2--15 collisions */
-	dev->stats.collisions+=st->dataC[11];
-	st->dataC[11]=0;
-}
-
-
-/**
- *	mc32_rx_ring	-	process the receive ring
- *	@dev: 3c527 that needs its receive ring processing
- *
- *
- *	We have received one or more indications from the card that a
- *	receive has completed. The buffer ring thus contains dirty
- *	entries. We walk the ring by iterating over the circular rx_ring
- *	array, starting at the next dirty buffer (which happens to be the
- *	one we finished up at last time around).
- *
- *	For each completed packet, we will either copy it and pass it up
- * 	the stack or, if the packet is near MTU sized, we allocate
- *	another buffer and flip the old one up the stack.
- *
- *	We must succeed in keeping a buffer on the ring. If necessary we
- *	will toss a received packet rather than lose a ring entry. Once
- *	the first uncompleted descriptor is found, we move the
- *	End-Of-List bit to include the buffers just processed.
- *
- */
-
-static void mc32_rx_ring(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	volatile struct skb_header *p;
-	u16 rx_ring_tail;
-	u16 rx_old_tail;
-	int x=0;
-
-	rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
-
-	do
-	{
-		p=lp->rx_ring[rx_ring_tail].p;
-
-		if(!(p->status & (1<<7))) { /* Not COMPLETED */
-			break;
-		}
-		if(p->status & (1<<6)) /* COMPLETED_OK */
-		{
-
-			u16 length=p->length;
-			struct sk_buff *skb;
-			struct sk_buff *newskb;
-
-			/* Try to save time by avoiding a copy on big frames */
-
-			if ((length > RX_COPYBREAK) &&
-			    ((newskb = netdev_alloc_skb(dev, 1532)) != NULL))
-			{
-				skb=lp->rx_ring[rx_ring_tail].skb;
-				skb_put(skb, length);
-
-				skb_reserve(newskb,18);
-				lp->rx_ring[rx_ring_tail].skb=newskb;
-				p->data=isa_virt_to_bus(newskb->data);
-			}
-			else
-			{
-				skb = netdev_alloc_skb(dev, length + 2);
-
-				if(skb==NULL) {
-					dev->stats.rx_dropped++;
-					goto dropped;
-				}
-
-				skb_reserve(skb,2);
-				memcpy(skb_put(skb, length),
-				       lp->rx_ring[rx_ring_tail].skb->data, length);
-			}
-
-			skb->protocol=eth_type_trans(skb,dev);
- 			dev->stats.rx_packets++;
- 			dev->stats.rx_bytes += length;
-			netif_rx(skb);
-		}
-
-	dropped:
-		p->length = 1532;
-		p->status = 0;
-
-		rx_ring_tail=next_rx(rx_ring_tail);
-	}
-        while(x++<48);
-
-	/* If there was actually a frame to be processed, place the EOL bit */
-	/* at the descriptor prior to the one to be filled next */
-
-	if (rx_ring_tail != rx_old_tail)
-	{
-		lp->rx_ring[prev_rx(rx_ring_tail)].p->control |=  CONTROL_EOL;
-		lp->rx_ring[prev_rx(rx_old_tail)].p->control  &= ~CONTROL_EOL;
-
-		lp->rx_ring_tail=rx_ring_tail;
-	}
-}
-
-
-/**
- *	mc32_tx_ring	-	process completed transmits
- *	@dev: 3c527 that needs its transmit ring processing
- *
- *
- *	This operates in a similar fashion to mc32_rx_ring. We iterate
- *	over the transmit ring. For each descriptor which has been
- *	processed by the card, we free its associated buffer and note
- *	any errors. This continues until the transmit ring is emptied
- *	or we reach a descriptor that hasn't yet been processed by the
- *	card.
- *
- */
-
-static void mc32_tx_ring(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	volatile struct skb_header *np;
-
-	/*
-	 * We rely on head==tail to mean 'queue empty'.
-	 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
-	 * tx_ring_head wrapping to tail and confusing a 'queue empty'
-	 * condition with 'queue full'
-	 */
-
-	while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
-	{
-		u16 t;
-
-		t=next_tx(lp->tx_ring_tail);
-		np=lp->tx_ring[t].p;
-
-		if(!(np->status & (1<<7)))
-		{
-			/* Not COMPLETED */
-			break;
-		}
-		dev->stats.tx_packets++;
-		if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
-		{
-			dev->stats.tx_errors++;
-
-			switch(np->status&0x0F)
-			{
-				case 1:
-					dev->stats.tx_aborted_errors++;
-					break; /* Max collisions */
-				case 2:
-					dev->stats.tx_fifo_errors++;
-					break;
-				case 3:
-					dev->stats.tx_carrier_errors++;
-					break;
-				case 4:
-					dev->stats.tx_window_errors++;
-					break;  /* CTS Lost */
-				case 5:
-					dev->stats.tx_aborted_errors++;
-					break; /* Transmit timeout */
-			}
-		}
-		/* Packets are sent in order - this is
-		    basically a FIFO queue of buffers matching
-		    the card ring */
-		dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
-		dev_kfree_skb_irq(lp->tx_ring[t].skb);
-		lp->tx_ring[t].skb=NULL;
-		atomic_inc(&lp->tx_count);
-		netif_wake_queue(dev);
-
-		lp->tx_ring_tail=t;
-	}
-
-}
-
-
-/**
- *	mc32_interrupt		-	handle an interrupt from a 3c527
- *	@irq: Interrupt number
- *	@dev_id: 3c527 that requires servicing
- *	@regs: Registers (unused)
- *
- *
- *	An interrupt is raised whenever the 3c527 writes to the command
- *	register. This register contains the message it wishes to send us
- *	packed into a single byte field. We keep reading status entries
- *	until we have processed all the control items, but simply count
- *	transmit and receive reports. When all reports are in we empty the
- *	transceiver rings as appropriate. This saves the overhead of
- *	multiple command requests.
- *
- *	Because MCA is level-triggered, we shouldn't miss indications.
- *	Therefore, we needn't ask the card to suspend interrupts within
- *	this handler. The card receives an implicit acknowledgment of the
- *	current interrupt when we read the command register.
- *
- */
-
-static irqreturn_t mc32_interrupt(int irq, void *dev_id)
-{
-	struct net_device *dev = dev_id;
-	struct mc32_local *lp;
-	int ioaddr, status, boguscount = 0;
-	int rx_event = 0;
-	int tx_event = 0;
-
-	ioaddr = dev->base_addr;
-	lp = netdev_priv(dev);
-
-	/* See whats cooking */
-
-	while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
-	{
-		status=inb(ioaddr+HOST_CMD);
-
-		pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
-			(status&7), (status>>3)&7, (status>>6)&1,
-			(status>>7)&1, boguscount);
-
-		switch(status&7)
-		{
-			case 0:
-				break;
-			case 6: /* TX fail */
-			case 2:	/* TX ok */
-				tx_event = 1;
-				break;
-			case 3: /* Halt */
-			case 4: /* Abort */
-				complete(&lp->xceiver_cmd);
-				break;
-			default:
-				pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
-		}
-		status>>=3;
-		switch(status&7)
-		{
-			case 0:
-				break;
-			case 2:	/* RX */
-				rx_event=1;
-				break;
-			case 3: /* Halt */
-			case 4: /* Abort */
-				complete(&lp->xceiver_cmd);
-				break;
-			case 6:
-				/* Out of RX buffers stat */
-				/* Must restart rx */
-				dev->stats.rx_dropped++;
-				mc32_rx_ring(dev);
-				mc32_start_transceiver(dev);
-				break;
-			default:
-				pr_notice("%s: strange rx ack %d\n",
-					dev->name, status&7);
-		}
-		status>>=3;
-		if(status&1)
-		{
-			/*
-			 * No thread is waiting: we need to tidy
-			 * up ourself.
-			 */
-
-			if (lp->cmd_nonblocking) {
-				up(&lp->cmd_mutex);
-				if (lp->mc_reload_wait)
-					mc32_reset_multicast_list(dev);
-			}
-			else complete(&lp->execution_cmd);
-		}
-		if(status&2)
-		{
-			/*
-			 *	We get interrupted once per
-			 *	counter that is about to overflow.
-			 */
-
-			mc32_update_stats(dev);
-		}
-	}
-
-
-	/*
-	 *	Process the transmit and receive rings
-         */
-
-	if(tx_event)
-		mc32_tx_ring(dev);
-
-	if(rx_event)
-		mc32_rx_ring(dev);
-
-	return IRQ_HANDLED;
-}
-
-
-/**
- *	mc32_close	-	user configuring the 3c527 down
- *	@dev: 3c527 card to shut down
- *
- *	The 3c527 is a bus mastering device. We must be careful how we
- *	shut it down. It may also be running shared interrupt so we have
- *	to be sure to silence it properly
- *
- *	We indicate that the card is closing to the rest of the
- *	driver.  Otherwise, it is possible that the card may run out
- *	of receive buffers and restart the transceiver while we're
- *	trying to close it.
- *
- *	We abort any receive and transmits going on and then wait until
- *	any pending exec commands have completed in other code threads.
- *	In theory we can't get here while that is true, in practice I am
- *	paranoid
- *
- *	We turn off the interrupt enable for the board to be sure it can't
- *	intefere with other devices.
- */
-
-static int mc32_close(struct net_device *dev)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	int ioaddr = dev->base_addr;
-
-	u8 regs;
-	u16 one=1;
-
-	lp->xceiver_desired_state = HALTED;
-	netif_stop_queue(dev);
-
-	/*
-	 *	Send the indications on command (handy debug check)
-	 */
-
-	mc32_command(dev, 4, &one, 2);
-
-	/* Shut down the transceiver */
-
-	mc32_halt_transceiver(dev);
-
-	/* Ensure we issue no more commands beyond this point */
-
-	down(&lp->cmd_mutex);
-
-	/* Ok the card is now stopping */
-
-	regs=inb(ioaddr+HOST_CTRL);
-	regs&=~HOST_CTRL_INTE;
-	outb(regs, ioaddr+HOST_CTRL);
-
-	mc32_flush_rx_ring(dev);
-	mc32_flush_tx_ring(dev);
-
-	mc32_update_stats(dev);
-
-	return 0;
-}
-
-
-/**
- *	mc32_get_stats		-	hand back stats to network layer
- *	@dev: The 3c527 card to handle
- *
- *	We've collected all the stats we can in software already. Now
- *	it's time to update those kept on-card and return the lot.
- *
- */
-
-static struct net_device_stats *mc32_get_stats(struct net_device *dev)
-{
-	mc32_update_stats(dev);
-	return &dev->stats;
-}
-
-
-/**
- *	do_mc32_set_multicast_list	-	attempt to update multicasts
- *	@dev: 3c527 device to load the list on
- *	@retry: indicates this is not the first call.
- *
- *
- * 	Actually set or clear the multicast filter for this adaptor. The
- *	locking issues are handled by this routine. We have to track
- *	state as it may take multiple calls to get the command sequence
- *	completed. We just keep trying to schedule the loads until we
- *	manage to process them all.
- *
- *	num_addrs == -1	Promiscuous mode, receive all packets
- *
- *	num_addrs == 0	Normal mode, clear multicast list
- *
- *	num_addrs > 0	Multicast mode, receive normal and MC packets,
- *			and do best-effort filtering.
- *
- *	See mc32_update_stats() regards setting the SAV BP bit.
- *
- */
-
-static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
-{
-	struct mc32_local *lp = netdev_priv(dev);
-	u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
-
-	if ((dev->flags&IFF_PROMISC) ||
-	    (dev->flags&IFF_ALLMULTI) ||
-	    netdev_mc_count(dev) > 10)
-		/* Enable promiscuous mode */
-		filt |= 1;
-	else if (!netdev_mc_empty(dev))
-	{
-		unsigned char block[62];
-		unsigned char *bp;
-		struct netdev_hw_addr *ha;
-
-		if(retry==0)
-			lp->mc_list_valid = 0;
-		if(!lp->mc_list_valid)
-		{
-			block[1]=0;
-			block[0]=netdev_mc_count(dev);
-			bp=block+2;
-
-			netdev_for_each_mc_addr(ha, dev) {
-				memcpy(bp, ha->addr, 6);
-				bp+=6;
-			}
-			if(mc32_command_nowait(dev, 2, block,
-					       2+6*netdev_mc_count(dev))==-1)
-			{
-				lp->mc_reload_wait = 1;
-				return;
-			}
-			lp->mc_list_valid=1;
-		}
-	}
-
-	if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
-	{
-		lp->mc_reload_wait = 1;
-	}
-	else {
-		lp->mc_reload_wait = 0;
-	}
-}
-
-
-/**
- *	mc32_set_multicast_list	-	queue multicast list update
- *	@dev: The 3c527 to use
- *
- *	Commence loading the multicast list. This is called when the kernel
- *	changes the lists. It will override any pending list we are trying to
- *	load.
- */
-
-static void mc32_set_multicast_list(struct net_device *dev)
-{
-	do_mc32_set_multicast_list(dev,0);
-}
-
-
-/**
- *	mc32_reset_multicast_list	-	reset multicast list
- *	@dev: The 3c527 to use
- *
- *	Attempt the next step in loading the multicast lists. If this attempt
- *	fails to complete then it will be scheduled and this function called
- *	again later from elsewhere.
- */
-
-static void mc32_reset_multicast_list(struct net_device *dev)
-{
-	do_mc32_set_multicast_list(dev,1);
-}
-
-static void netdev_get_drvinfo(struct net_device *dev,
-			       struct ethtool_drvinfo *info)
-{
-	strcpy(info->driver, DRV_NAME);
-	strcpy(info->version, DRV_VERSION);
-	sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
-	return mc32_debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
-	mc32_debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
-	.get_drvinfo		= netdev_get_drvinfo,
-	.get_msglevel		= netdev_get_msglevel,
-	.set_msglevel		= netdev_set_msglevel,
-};
-
-#ifdef MODULE
-
-static struct net_device *this_device;
-
-/**
- *	init_module		-	entry point
- *
- *	Probe and locate a 3c527 card. This really should probe and locate
- *	all the 3c527 cards in the machine not just one of them. Yes you can
- *	insmod multiple modules for now but it's a hack.
- */
-
-int __init init_module(void)
-{
-	this_device = mc32_probe(-1);
-	if (IS_ERR(this_device))
-		return PTR_ERR(this_device);
-	return 0;
-}
-
-/**
- *	cleanup_module	-	free resources for an unload
- *
- *	Unloading time. We release the MCA bus resources and the interrupt
- *	at which point everything is ready to unload. The card must be stopped
- *	at this point or we would not have been called. When we unload we
- *	leave the card stopped but not totally shut down. When the card is
- *	initialized it must be rebooted or the rings reloaded before any
- *	transmit operations are allowed to start scribbling into memory.
- */
-
-void __exit cleanup_module(void)
-{
-	unregister_netdev(this_device);
-	cleanup_card(this_device);
-	free_netdev(this_device);
-}
-
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c527.h b/drivers/net/ethernet/i825xx/3c527.h
deleted file mode 100644
index d693b8d..0000000
--- a/drivers/net/ethernet/i825xx/3c527.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *	3COM "EtherLink MC/32" Descriptions
- */
-
-/*
- *	Registers
- */
-
-#define HOST_CMD		0
-#define         HOST_CMD_START_RX   (1<<3)
-#define         HOST_CMD_SUSPND_RX  (3<<3)
-#define         HOST_CMD_RESTRT_RX  (5<<3)
-
-#define         HOST_CMD_SUSPND_TX  3
-#define         HOST_CMD_RESTRT_TX  5
-
-
-#define HOST_STATUS		2
-#define		HOST_STATUS_CRR	(1<<6)
-#define		HOST_STATUS_CWR	(1<<5)
-
-
-#define HOST_CTRL		6
-#define		HOST_CTRL_ATTN	(1<<7)
-#define 	HOST_CTRL_RESET	(1<<6)
-#define 	HOST_CTRL_INTE	(1<<2)
-
-#define HOST_RAMPAGE		8
-
-#define HALTED 0
-#define RUNNING 1
-
-struct mc32_mailbox
-{
- 	u16 mbox;
- 	u16 data[1];
-} __packed;
-
-struct skb_header
-{
-	u8 status;
-	u8 control;
-	u16 next;	/* Do not change! */
-	u16 length;
-	u32 data;
-} __packed;
-
-struct mc32_stats
-{
-	/* RX Errors */
-	u32 rx_crc_errors;
-	u32 rx_alignment_errors;
-	u32 rx_overrun_errors;
-	u32 rx_tooshort_errors;
-	u32 rx_toolong_errors;
-	u32 rx_outofresource_errors;
-
-	u32 rx_discarded;  /* via card pattern match filter */
-
-	/* TX Errors */
-	u32 tx_max_collisions;
-	u32 tx_carrier_errors;
-	u32 tx_underrun_errors;
-	u32 tx_cts_errors;
-	u32 tx_timeout_errors;
-
-	/* various cruft */
-	u32 dataA[6];
-	u16 dataB[5];
-	u32 dataC[14];
-} __packed;
-
-#define STATUS_MASK	0x0F
-#define COMPLETED	(1<<7)
-#define COMPLETED_OK	(1<<6)
-#define BUFFER_BUSY	(1<<5)
-
-#define CONTROL_EOP	(1<<7)	/* End Of Packet */
-#define CONTROL_EOL	(1<<6)	/* End of List */
-
-#define MCA_MC32_ID	0x0041	/* Our MCA ident */
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index ca1ae98..fed5080 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -43,28 +43,6 @@
 	  To compile this driver as a module, choose M here. The module
 	  will be called 3c507.
 
-config ELMC
-	tristate "3c523 \"EtherLink/MC\" support"
-	depends on MCA_LEGACY
-	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
-
-	  To compile this driver as a module, choose M here. The module
-	  will be called 3c523.
-
-config ELMC_II
-	tristate "3c527 \"EtherLink/MC 32\" support (EXPERIMENTAL)"
-	depends on MCA && MCA_LEGACY
-	---help---
-	  If you have a network (Ethernet) card of this type, say Y and read
-	  the Ethernet-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
-
-	  To compile this driver as a module, choose M here. The module
-	  will be called 3c527.
-
 config ARM_ETHER1
 	tristate "Acorn Ether1 support"
 	depends on ARM && ARCH_ACORN
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
index f68a369..6adff85 100644
--- a/drivers/net/ethernet/i825xx/Makefile
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -7,8 +7,6 @@
 obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
 obj-$(CONFIG_ELPLUS) += 3c505.o
 obj-$(CONFIG_EL16) += 3c507.o
-obj-$(CONFIG_ELMC) += 3c523.o
-obj-$(CONFIG_ELMC_II) += 3c527.o
 obj-$(CONFIG_LP486E) += lp486e.o
 obj-$(CONFIG_NI52) += ni52.o
 obj-$(CONFIG_SUN3_82586) += sun3_82586.o
diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c
index cc2e66a..7a6a2f0 100644
--- a/drivers/net/ethernet/i825xx/eexpress.c
+++ b/drivers/net/ethernet/i825xx/eexpress.c
@@ -9,7 +9,7 @@
  * Many modifications, and currently maintained, by
  *  Philip Blundell <philb@gnu.org>
  * Added the Compaq LTE  Alan Cox <alan@lxorguk.ukuu.org.uk>
- * Added MCA support Adam Fritzler
+ * Added MCA support Adam Fritzler (now deleted)
  *
  * Note - this driver is experimental still - it has problems on faster
  * machines. Someone needs to sit down and go through it line by line with
@@ -111,7 +111,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
-#include <linux/mca-legacy.h>
 #include <linux/spinlock.h>
 #include <linux/bitops.h>
 #include <linux/jiffies.h>
@@ -227,16 +226,6 @@
 /* maps irq number to EtherExpress magic value */
 static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
 
-#ifdef CONFIG_MCA_LEGACY
-/* mapping of the first four bits of the second POS register */
-static unsigned short mca_iomap[] = {
-	0x270, 0x260, 0x250, 0x240, 0x230, 0x220, 0x210, 0x200,
-	0x370, 0x360, 0x350, 0x340, 0x330, 0x320, 0x310, 0x300
-};
-/* bits 5-7 of the second POS register */
-static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 };
-#endif
-
 /*
  * Prototypes for Linux interface
  */
@@ -340,53 +329,6 @@
 
 	dev->if_port = 0xff; /* not set */
 
-#ifdef CONFIG_MCA_LEGACY
-	if (MCA_bus) {
-		int slot = 0;
-
-		/*
-		 * Only find one card at a time.  Subsequent calls
-		 * will find others, however, proper multicard MCA
-		 * probing and setup can't be done with the
-		 * old-style Space.c init routines.  -- ASF
-		 */
-		while (slot != MCA_NOTFOUND) {
-			int pos0, pos1;
-
-			slot = mca_find_unused_adapter(0x628B, slot);
-			if (slot == MCA_NOTFOUND)
-				break;
-
-			pos0 = mca_read_stored_pos(slot, 2);
-			pos1 = mca_read_stored_pos(slot, 3);
-			ioaddr = mca_iomap[pos1&0xf];
-
-			dev->irq = mca_irqmap[(pos1>>4)&0x7];
-
-			/*
-			 * XXX: Transceiver selection is done
-			 * differently on the MCA version.
-			 * How to get it to select something
-			 * other than external/AUI is currently
-			 * unknown.  This code is just for looks. -- ASF
-			 */
-			if ((pos0 & 0x7) == 0x1)
-				dev->if_port = AUI;
-			else if ((pos0 & 0x7) == 0x5) {
-				if (pos1 & 0x80)
-					dev->if_port = BNC;
-				else
-					dev->if_port = TPE;
-			}
-
-			mca_set_adapter_name(slot, "Intel EtherExpress 16 MCA");
-			mca_set_adapter_procfn(slot, NULL, dev);
-			mca_mark_as_used(slot);
-
-			break;
-		}
-	}
-#endif
 	if (ioaddr&0xfe00) {
 		if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress"))
 			return -EBUSY;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3516e17..f4d2da0 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -290,16 +290,18 @@
 
 				arr[i].adh = adapter->handle;
 				arr[i].port_id = port->logical_port_id;
-				arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
-						  EHEA_BCMC_MULTICAST |
+				arr[i].reg_type = EHEA_BCMC_MULTICAST |
 						  EHEA_BCMC_UNTAGGED;
+				if (mc_entry->macaddr == 0)
+					arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
 				arr[i++].macaddr = mc_entry->macaddr;
 
 				arr[i].adh = adapter->handle;
 				arr[i].port_id = port->logical_port_id;
-				arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
-						  EHEA_BCMC_MULTICAST |
+				arr[i].reg_type = EHEA_BCMC_MULTICAST |
 						  EHEA_BCMC_VLANID_ALL;
+				if (mc_entry->macaddr == 0)
+					arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
 				arr[i++].macaddr = mc_entry->macaddr;
 				num_registrations -= 2;
 			}
@@ -1838,8 +1840,9 @@
 	u64 hret;
 	u8 reg_type;
 
-	reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
-		 | EHEA_BCMC_UNTAGGED;
+	reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
+	if (mc_mac_addr == 0)
+		reg_type |= EHEA_BCMC_SCOPE_ALL;
 
 	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
 				     port->logical_port_id,
@@ -1847,8 +1850,9 @@
 	if (hret)
 		goto out;
 
-	reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
-		 | EHEA_BCMC_VLANID_ALL;
+	reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
+	if (mc_mac_addr == 0)
+		reg_type |= EHEA_BCMC_SCOPE_ALL;
 
 	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
 				     port->logical_port_id,
@@ -1898,7 +1902,7 @@
 				netdev_err(dev,
 					   "failed enabling IFF_ALLMULTI\n");
 		}
-	} else
+	} else {
 		if (!enable) {
 			/* Disable ALLMULTI */
 			hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
@@ -1908,6 +1912,7 @@
 				netdev_err(dev,
 					   "failed disabling IFF_ALLMULTI\n");
 		}
+	}
 }
 
 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
@@ -1941,11 +1946,7 @@
 	struct netdev_hw_addr *ha;
 	int ret;
 
-	if (port->promisc) {
-		ehea_promiscuous(dev, 1);
-		return;
-	}
-	ehea_promiscuous(dev, 0);
+	ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
 
 	if (dev->flags & IFF_ALLMULTI) {
 		ehea_allmulti(dev, 1);
@@ -2463,6 +2464,7 @@
 		return 0;
 
 	ehea_drop_multicast_list(dev);
+	ehea_allmulti(dev, 0);
 	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
 
 	ehea_free_interrupts(dev);
@@ -3261,6 +3263,7 @@
 	struct ehea_adapter *adapter;
 	const u64 *adapter_handle;
 	int ret;
+	int i;
 
 	if (!dev || !dev->dev.of_node) {
 		pr_err("Invalid ibmebus device probed\n");
@@ -3314,17 +3317,9 @@
 	tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
 		     (unsigned long)adapter);
 
-	ret = ibmebus_request_irq(adapter->neq->attr.ist1,
-				  ehea_interrupt_neq, IRQF_DISABLED,
-				  "ehea_neq", adapter);
-	if (ret) {
-		dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
-		goto out_kill_eq;
-	}
-
 	ret = ehea_create_device_sysfs(dev);
 	if (ret)
-		goto out_free_irq;
+		goto out_kill_eq;
 
 	ret = ehea_setup_ports(adapter);
 	if (ret) {
@@ -3332,15 +3327,30 @@
 		goto out_rem_dev_sysfs;
 	}
 
+	ret = ibmebus_request_irq(adapter->neq->attr.ist1,
+				  ehea_interrupt_neq, IRQF_DISABLED,
+				  "ehea_neq", adapter);
+	if (ret) {
+		dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
+		goto out_shutdown_ports;
+	}
+
+	/* Handle any events that might be pending. */
+	tasklet_hi_schedule(&adapter->neq_tasklet);
+
 	ret = 0;
 	goto out;
 
+out_shutdown_ports:
+	for (i = 0; i < EHEA_MAX_PORTS; i++)
+		if (adapter->port[i]) {
+			ehea_shutdown_single_port(adapter->port[i]);
+			adapter->port[i] = NULL;
+		}
+
 out_rem_dev_sysfs:
 	ehea_remove_device_sysfs(dev);
 
-out_free_irq:
-	ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
-
 out_kill_eq:
 	ehea_destroy_eq(adapter->neq);
 
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 52c456e..8364815 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -450,7 +450,7 @@
 			    void *cb_addr);
 
 #define H_REGBCMC_PN            EHEA_BMASK_IBM(48, 63)
-#define H_REGBCMC_REGTYPE       EHEA_BMASK_IBM(61, 63)
+#define H_REGBCMC_REGTYPE       EHEA_BMASK_IBM(60, 63)
 #define H_REGBCMC_MACADDR       EHEA_BMASK_IBM(16, 63)
 #define H_REGBCMC_VLANID        EHEA_BMASK_IBM(52, 63)
 
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 7621316..79b07ec 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -7,7 +7,7 @@
 	default y
 	depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
 		   ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
-		   GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \
+		   GSC || BVME6000 || MVME16x || \
 		   (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
 		   EXPERIMENTAL
 	---help---
@@ -120,6 +120,17 @@
 	  driver.  DCA is a method for warming the CPU cache before data
 	  is used, with the intent of lessening the impact of cache misses.
 
+config IGB_PTP
+	bool "PTP Hardware Clock (PHC)"
+	default y
+	depends on IGB && PTP_1588_CLOCK
+	---help---
+	  Say Y here if you want to use PTP Hardware Clock (PHC) in the
+	  driver.  Only the basic clock operations have been implemented.
+
+	  Every timestamp and clock read operations must consult the
+	  overflow counter to form a correct time value.
+
 config IGBVF
 	tristate "Intel(R) 82576 Virtual Function Ethernet support"
 	depends on PCI
@@ -182,6 +193,14 @@
 	  To compile this driver as a module, choose M here. The module
 	  will be called ixgbe.
 
+config IXGBE_HWMON
+	bool "Intel(R) 10GbE PCI Express adapters HWMON support"
+	default y
+	depends on IXGBE && HWMON && !(IXGBE=y && HWMON=m)
+	---help---
+	  Say Y if you want to expose the thermal sensor data on some of
+	  our cards, via a hwmon sysfs interface.
+
 config IXGBE_DCA
 	bool "Direct Cache Access (DCA) Support"
 	default y
@@ -201,6 +220,17 @@
 
 	  If unsure, say N.
 
+config IXGBE_PTP
+	bool "PTP Clock Support"
+	default n
+	depends on IXGBE && PTP_1588_CLOCK
+	---help---
+	  Say Y here if you want support for 1588 Timestamping with a
+	  PHC device, using the PTP 1588 Clock support. This is
+	  required to enable timestamping support for the device.
+
+	  If unsure, say N.
+
 config IXGBEVF
 	tristate "Intel(R) 82599 Virtual Function Ethernet support"
 	depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index e498eff..ada720b 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1759,6 +1759,7 @@
 		skb->data, skb->len, PCI_DMA_TODEVICE));
 	/* check for mapping failure? */
 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
+	skb_tx_timestamp(skb);
 }
 
 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
@@ -2733,6 +2734,7 @@
 	.set_phys_id		= e100_set_phys_id,
 	.get_ethtool_stats	= e100_get_ethtool_stats,
 	.get_sset_count		= e100_get_sset_count,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 4348b6f..95731c8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -493,7 +493,11 @@
 static void e1000_down_and_stop(struct e1000_adapter *adapter)
 {
 	set_bit(__E1000_DOWN, &adapter->flags);
-	cancel_work_sync(&adapter->reset_task);
+
+	/* Only kill reset task if adapter is not resetting */
+	if (!test_bit(__E1000_RESETTING, &adapter->flags))
+		cancel_work_sync(&adapter->reset_task);
+
 	cancel_delayed_work_sync(&adapter->watchdog_task);
 	cancel_delayed_work_sync(&adapter->phy_info_task);
 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
@@ -827,9 +831,10 @@
 	if (changed & NETIF_F_HW_VLAN_RX)
 		e1000_vlan_mode(netdev, features);
 
-	if (!(changed & NETIF_F_RXCSUM))
+	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 		return 0;
 
+	netdev->features = features;
 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 
 	if (netif_running(netdev))
@@ -1074,6 +1079,7 @@
 
 	netdev->features |= netdev->hw_features;
 	netdev->hw_features |= NETIF_F_RXCSUM;
+	netdev->hw_features |= NETIF_F_RXALL;
 	netdev->hw_features |= NETIF_F_RXFCS;
 
 	if (pci_using_dac) {
@@ -1841,6 +1847,22 @@
 			break;
 	}
 
+	/* This is useful for sniffing bad packets. */
+	if (adapter->netdev->features & NETIF_F_RXALL) {
+		/* UPE and MPE will be handled by normal PROMISC logic
+		 * in e1000e_set_rx_mode */
+		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
+			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+			  E1000_RCTL_DPF | /* Allow filtered pause */
+			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+		 * and that breaks VLANs.
+		 */
+	}
+
 	ew32(RCTL, rctl);
 }
 
@@ -3243,6 +3265,8 @@
 	                     nr_frags, mss);
 
 	if (count) {
+		skb_tx_timestamp(skb);
+
 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
 		/* Make sure there is space in the ring for the next send. */
 		e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
@@ -3380,7 +3404,7 @@
 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
 		struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
-		struct my_u { u64 a; u64 b; };
+		struct my_u { __le64 a; __le64 b; };
 		struct my_u *u = (struct my_u *)tx_desc;
 		const char *type;
 
@@ -3424,7 +3448,7 @@
 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
 		struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
-		struct my_u { u64 a; u64 b; };
+		struct my_u { __le64 a; __le64 b; };
 		struct my_u *u = (struct my_u *)rx_desc;
 		const char *type;
 
@@ -4046,7 +4070,11 @@
 		/* errors is only valid for DD + EOP descriptors */
 		if (unlikely((status & E1000_RXD_STAT_EOP) &&
 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
-			u8 last_byte = *(skb->data + length - 1);
+			u8 *mapped;
+			u8 last_byte;
+
+			mapped = page_address(buffer_info->page);
+			last_byte = *(mapped + length - 1);
 			if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
 				       last_byte)) {
 				spin_lock_irqsave(&adapter->stats_lock,
@@ -4057,6 +4085,8 @@
 				                       irq_flags);
 				length--;
 			} else {
+				if (netdev->features & NETIF_F_RXALL)
+					goto process_skb;
 				/* recycle both page and skb */
 				buffer_info->skb = skb;
 				/* an error means any chain goes out the window
@@ -4069,6 +4099,7 @@
 		}
 
 #define rxtop rx_ring->rx_skb_top
+process_skb:
 		if (!(status & E1000_RXD_STAT_EOP)) {
 			/* this descriptor is only the beginning (or middle) */
 			if (!rxtop) {
@@ -4276,12 +4307,15 @@
 				                       flags);
 				length--;
 			} else {
+				if (netdev->features & NETIF_F_RXALL)
+					goto process_skb;
 				/* recycle */
 				buffer_info->skb = skb;
 				goto next_desc;
 			}
 		}
 
+process_skb:
 		total_rx_bytes += (length - 4); /* don't count FCS */
 		total_rx_packets++;
 
@@ -4365,30 +4399,6 @@
 			break;
 		}
 
-		/* Fix for errata 23, can't cross 64kB boundary */
-		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
-			struct sk_buff *oldskb = skb;
-			e_err(rx_err, "skb align check failed: %u bytes at "
-			      "%p\n", bufsz, skb->data);
-			/* Try again, without freeing the previous */
-			skb = netdev_alloc_skb_ip_align(netdev, bufsz);
-			/* Failed allocation, critical failure */
-			if (!skb) {
-				dev_kfree_skb(oldskb);
-				adapter->alloc_rx_buff_failed++;
-				break;
-			}
-
-			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
-				/* give up */
-				dev_kfree_skb(skb);
-				dev_kfree_skb(oldskb);
-				break; /* while (cleaned_count--) */
-			}
-
-			/* Use new allocation */
-			dev_kfree_skb(oldskb);
-		}
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
 check_page:
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index bac9dda..4dd18a1 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -228,9 +228,7 @@
 	/* FWSM register */
 	mac->has_fwsm = true;
 	/* ARC supported; valid only if manageability features are enabled. */
-	mac->arc_subsystem_valid =
-	        (er32(FWSM) & E1000_FWSM_MODE_MASK)
-	                ? true : false;
+	mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK);
 	/* Adaptive IFS not supported */
 	mac->adaptive_ifs = false;
 
@@ -766,6 +764,7 @@
 {
 	u32 ctrl;
 	s32 ret_val;
+	u16 kum_reg_data;
 
 	/*
 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -791,6 +790,13 @@
 	ew32(CTRL, ctrl | E1000_CTRL_RST);
 	e1000_release_phy_80003es2lan(hw);
 
+	/* Disable IBIST slave mode (far-end loopback) */
+	e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+					&kum_reg_data);
+	kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+	e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+					 kum_reg_data);
+
 	ret_val = e1000e_get_auto_rd_done(hw);
 	if (ret_val)
 		/* We don't want to continue accessing MAC registers. */
@@ -938,6 +944,14 @@
 	else
 		reg |= (1 << 28);
 	ew32(TARC(1), reg);
+
+	/*
+	 * Disable IPv6 extension header parsing because some malformed
+	 * IPv6 headers can hang the Rx.
+	 */
+	reg = er32(RFCTL);
+	reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+	ew32(RFCTL, reg);
 }
 
 /**
@@ -1433,6 +1447,7 @@
 	/* setup_physical_interface dependent on media type */
 	.setup_led		= e1000e_setup_led_generic,
 	.config_collision_dist	= e1000e_config_collision_dist_generic,
+	.rar_set		= e1000e_rar_set_generic,
 };
 
 static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index b3fdc69..36db4df 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -295,9 +295,8 @@
 		 * ARC supported; valid only if manageability features are
 		 * enabled.
 		 */
-		mac->arc_subsystem_valid =
-			(er32(FWSM) & E1000_FWSM_MODE_MASK)
-			? true : false;
+		mac->arc_subsystem_valid = !!(er32(FWSM) &
+					      E1000_FWSM_MODE_MASK);
 		break;
 	case e1000_82574:
 	case e1000_82583:
@@ -798,7 +797,7 @@
 	/* Check for pending operations. */
 	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
 		usleep_range(1000, 2000);
-		if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+		if (!(er32(EECD) & E1000_EECD_FLUPD))
 			break;
 	}
 
@@ -822,7 +821,7 @@
 
 	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
 		usleep_range(1000, 2000);
-		if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+		if (!(er32(EECD) & E1000_EECD_FLUPD))
 			break;
 	}
 
@@ -1000,7 +999,7 @@
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-	u32 ctrl, ctrl_ext;
+	u32 ctrl, ctrl_ext, eecd;
 	s32 ret_val;
 
 	/*
@@ -1073,6 +1072,16 @@
 	 */
 
 	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * REQ and GNT bits need to be cleared when using AUTO_RD
+		 * to access the EEPROM.
+		 */
+		eecd = er32(EECD);
+		eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
+		ew32(EECD, eecd);
+		break;
 	case e1000_82573:
 	case e1000_82574:
 	case e1000_82583:
@@ -1280,6 +1289,16 @@
 		ew32(CTRL_EXT, reg);
 	}
 
+	/*
+	 * Disable IPv6 extension header parsing because some malformed
+	 * IPv6 headers can hang the Rx.
+	 */
+	if (hw->mac.type <= e1000_82573) {
+		reg = er32(RFCTL);
+		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
+		ew32(RFCTL, reg);
+	}
+
 	/* PCI-Ex Control Registers */
 	switch (hw->mac.type) {
 	case e1000_82574:
@@ -1763,7 +1782,8 @@
 		 * incoming packets directed to this port are dropped.
 		 * Eventually the LAA will be in RAR[0] and RAR[14].
 		 */
-		e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
+		hw->mac.ops.rar_set(hw, hw->mac.addr,
+				    hw->mac.rar_entry_count - 1);
 }
 
 /**
@@ -1927,6 +1947,7 @@
 	.setup_led		= e1000e_setup_led_generic,
 	.config_collision_dist	= e1000e_config_collision_dist_generic,
 	.read_mac_addr		= e1000_read_mac_addr_82571,
+	.rar_set		= e1000e_rar_set_generic,
 };
 
 static const struct e1000_phy_operations e82_phy_ops_igp = {
@@ -2061,9 +2082,11 @@
 				  | FLAG_HAS_SMART_POWER_DOWN
 				  | FLAG_HAS_AMT
 				  | FLAG_HAS_CTRLEXT_ON_LOAD,
-	.flags2			  = FLAG2_CHECK_PHY_HANG
+	.flags2			 = FLAG2_CHECK_PHY_HANG
 				  | FLAG2_DISABLE_ASPM_L0S
-				  | FLAG2_NO_DISABLE_RX,
+				  | FLAG2_DISABLE_ASPM_L1
+				  | FLAG2_NO_DISABLE_RX
+				  | FLAG2_DMA_BURST,
 	.pba			= 32,
 	.max_hw_frame_size	= DEFAULT_JUMBO,
 	.get_variants		= e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 3a50259..351a409 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -74,7 +74,9 @@
 #define E1000_WUS_BC           E1000_WUFC_BC
 
 /* Extended Device Control */
+#define E1000_CTRL_EXT_LPCD  0x00000004     /* LCD Power Cycle Done */
 #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
 #define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
 #define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
 #define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
@@ -573,6 +575,7 @@
 #define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
 
 /* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP 100TX Full Dplx Capable */
 #define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
 #define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
 
@@ -739,6 +742,7 @@
 #define I82577_E_PHY_ID      0x01540050
 #define I82578_E_PHY_ID      0x004DD040
 #define I82579_E_PHY_ID      0x01540090
+#define I217_E_PHY_ID        0x015400A0
 
 /* M88E1000 Specific Registers */
 #define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
@@ -850,4 +854,8 @@
 /* SerDes Control */
 #define E1000_GEN_POLL_TIMEOUT          640
 
+/* FW Semaphore */
+#define E1000_FWSM_WLOCK_MAC_MASK	0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT	7
+
 #endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index b83897f..6e6fffb 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -206,6 +206,7 @@
 	board_ich10lan,
 	board_pchlan,
 	board_pch2lan,
+	board_pch_lpt,
 };
 
 struct e1000_ps_page {
@@ -528,6 +529,7 @@
 extern const struct e1000_info e1000_ich10_info;
 extern const struct e1000_info e1000_pch_info;
 extern const struct e1000_info e1000_pch2_info;
+extern const struct e1000_info e1000_pch_lpt_info;
 extern const struct e1000_info e1000_es2_info;
 
 extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
@@ -576,7 +578,7 @@
 extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
 					       u8 *mc_addr_list,
 					       u32 mc_addr_count);
-extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
 extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
 extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
 extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
@@ -673,11 +675,21 @@
 	return hw->phy.ops.read_reg(hw, offset, data);
 }
 
+static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return hw->phy.ops.read_reg_locked(hw, offset, data);
+}
+
 static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
 {
 	return hw->phy.ops.write_reg(hw, offset, data);
 }
 
+static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return hw->phy.ops.write_reg_locked(hw, offset, data);
+}
+
 static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
 {
 	return hw->phy.ops.get_cable_length(hw);
@@ -735,9 +747,46 @@
 	return readl(hw->hw_addr + reg);
 }
 
+#define er32(reg)	__er32(hw, E1000_##reg)
+
+/**
+ * __ew32_prepare - prepare to write to MAC CSR register on certain parts
+ * @hw: pointer to the HW structure
+ *
+ * When updating the MAC CSR registers, the Manageability Engine (ME) could
+ * be accessing the registers at the same time.  Normally, this is handled in
+ * h/w by an arbiter but on some parts there is a bug that acknowledges Host
+ * accesses later than it should which could result in the register to have
+ * an incorrect value.  Workaround this by checking the FWSM register which
+ * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
+ * and try again a number of times.
+ **/
+static inline s32 __ew32_prepare(struct e1000_hw *hw)
+{
+	s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
+
+	while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
+		udelay(50);
+
+	return i;
+}
+
 static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
 {
+	if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		__ew32_prepare(hw);
+
 	writel(val, hw->hw_addr + reg);
 }
 
+#define ew32(reg, val)	__ew32(hw, E1000_##reg, (val))
+
+#define e1e_flush()	er32(STATUS)
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
+	(__ew32((a), (reg + ((offset) << 2)), (value)))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) \
+	(readl((a)->hw_addr + reg + ((offset) << 2)))
+
 #endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index db35dd5..d863075 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -259,8 +259,7 @@
 	 * cannot be changed
 	 */
 	if (hw->phy.ops.check_reset_block(hw)) {
-		e_err("Cannot change link characteristics when SoL/IDER is "
-		      "active.\n");
+		e_err("Cannot change link characteristics when SoL/IDER is active.\n");
 		return -EINVAL;
 	}
 
@@ -403,15 +402,15 @@
 	regs_buff[1]  = er32(STATUS);
 
 	regs_buff[2]  = er32(RCTL);
-	regs_buff[3]  = er32(RDLEN);
-	regs_buff[4]  = er32(RDH);
-	regs_buff[5]  = er32(RDT);
+	regs_buff[3]  = er32(RDLEN(0));
+	regs_buff[4]  = er32(RDH(0));
+	regs_buff[5]  = er32(RDT(0));
 	regs_buff[6]  = er32(RDTR);
 
 	regs_buff[7]  = er32(TCTL);
-	regs_buff[8]  = er32(TDLEN);
-	regs_buff[9]  = er32(TDH);
-	regs_buff[10] = er32(TDT);
+	regs_buff[8]  = er32(TDLEN(0));
+	regs_buff[9]  = er32(TDH(0));
+	regs_buff[10] = er32(TDT(0));
 	regs_buff[11] = er32(TIDV);
 
 	regs_buff[12] = adapter->hw.phy.type;  /* PHY type (IGP=1, M88=0) */
@@ -727,9 +726,8 @@
 				      (test[pat] & write));
 		val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
 		if (val != (test[pat] & write & mask)) {
-			e_err("pattern test reg %04X failed: got 0x%08X "
-			      "expected 0x%08X\n", reg + offset, val,
-			      (test[pat] & write & mask));
+			e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+			      reg + offset, val, (test[pat] & write & mask));
 			*data = reg;
 			return 1;
 		}
@@ -744,8 +742,8 @@
 	__ew32(&adapter->hw, reg, write & mask);
 	val = __er32(&adapter->hw, reg);
 	if ((write & mask) != (val & mask)) {
-		e_err("set/check reg %04X test failed: got 0x%08X "
-		      "expected 0x%08X\n", reg, (val & mask), (write & mask));
+		e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+		      reg, (val & mask), (write & mask));
 		*data = reg;
 		return 1;
 	}
@@ -775,6 +773,7 @@
 	u32 i;
 	u32 toggle;
 	u32 mask;
+	u32 wlock_mac = 0;
 
 	/*
 	 * The status register is Read Only, so a write should fail.
@@ -797,8 +796,8 @@
 	ew32(STATUS, toggle);
 	after = er32(STATUS) & toggle;
 	if (value != after) {
-		e_err("failed STATUS register test got: 0x%08X expected: "
-		      "0x%08X\n", after, value);
+		e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+		      after, value);
 		*data = 1;
 		return 1;
 	}
@@ -813,15 +812,15 @@
 	}
 
 	REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
-	REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
-	REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
-	REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
-	REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
+	REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
 	REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
 	REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
 	REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
-	REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
-	REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
+	REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
 
 	REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
 
@@ -830,29 +829,41 @@
 	REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
 
 	REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
-	REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
 	if (!(adapter->flags & FLAG_IS_ICH))
 		REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
-	REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
 	REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
 	mask = 0x8003FFFF;
 	switch (mac->type) {
 	case e1000_ich10lan:
 	case e1000_pchlan:
 	case e1000_pch2lan:
+	case e1000_pch_lpt:
 		mask |= (1 << 18);
 		break;
 	default:
 		break;
 	}
-	for (i = 0; i < mac->rar_entry_count; i++)
+
+	if (mac->type == e1000_pch_lpt)
+		wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
+		    E1000_FWSM_WLOCK_MAC_SHIFT;
+
+	for (i = 0; i < mac->rar_entry_count; i++) {
+		/* Cannot test write-protected SHRAL[n] registers */
+		if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
+			continue;
+
 		REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
-		                       mask, 0xFFFFFFFF);
+				       mask, 0xFFFFFFFF);
+	}
 
 	for (i = 0; i < mac->mta_reg_count; i++)
 		REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
 
 	*data = 0;
+
 	return 0;
 }
 
@@ -1104,11 +1115,11 @@
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
 
-	ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
-	ew32(TDBAH, ((u64) tx_ring->dma >> 32));
-	ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
-	ew32(TDH, 0);
-	ew32(TDT, 0);
+	ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+	ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
+	ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
+	ew32(TDH(0), 0);
+	ew32(TDT(0), 0);
 	ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
 	     E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
 	     E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
@@ -1168,11 +1179,11 @@
 	rctl = er32(RCTL);
 	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
-	ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
-	ew32(RDBAH, ((u64) rx_ring->dma >> 32));
-	ew32(RDLEN, rx_ring->size);
-	ew32(RDH, 0);
-	ew32(RDT, 0);
+	ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
+	ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
+	ew32(RDLEN(0), rx_ring->size);
+	ew32(RDH(0), 0);
+	ew32(RDT(0), 0);
 	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
 		E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
 		E1000_RCTL_SBP | E1000_RCTL_SECRC |
@@ -1534,7 +1545,7 @@
 	int ret_val = 0;
 	unsigned long time;
 
-	ew32(RDT, rx_ring->count - 1);
+	ew32(RDT(0), rx_ring->count - 1);
 
 	/*
 	 * Calculate the loop count based on the largest descriptor ring
@@ -1561,7 +1572,7 @@
 			if (k == tx_ring->count)
 				k = 0;
 		}
-		ew32(TDT, k);
+		ew32(TDT(0), k);
 		e1e_flush();
 		msleep(200);
 		time = jiffies; /* set the start time for the receive */
@@ -1791,8 +1802,7 @@
 		wol->supported &= ~WAKE_UCAST;
 
 		if (adapter->wol & E1000_WUFC_EX)
-			e_err("Interface does not support directed (unicast) "
-			      "frame wake-up packets\n");
+			e_err("Interface does not support directed (unicast) frame wake-up packets\n");
 	}
 
 	if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f82ecf5..ed5b409 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -36,16 +36,6 @@
 
 #include "defines.h"
 
-#define er32(reg)	__er32(hw, E1000_##reg)
-#define ew32(reg,val)	__ew32(hw, E1000_##reg, (val))
-#define e1e_flush()	er32(STATUS)
-
-#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
-	(writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
-
-#define E1000_READ_REG_ARRAY(a, reg, offset) \
-	(readl((a)->hw_addr + reg + ((offset) << 2)))
-
 enum e1e_registers {
 	E1000_CTRL     = 0x00000, /* Device Control - RW */
 	E1000_STATUS   = 0x00008, /* Device Status - RO */
@@ -61,6 +51,7 @@
 	E1000_FEXTNVM  = 0x00028, /* Future Extended NVM - RW */
 	E1000_FCT      = 0x00030, /* Flow Control Type - RW */
 	E1000_VET      = 0x00038, /* VLAN Ether Type - RW */
+	E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
 	E1000_ICR      = 0x000C0, /* Interrupt Cause Read - R/clr */
 	E1000_ITR      = 0x000C4, /* Interrupt Throttling Rate - RW */
 	E1000_ICS      = 0x000C8, /* Interrupt Cause Set - WO */
@@ -94,31 +85,40 @@
 	E1000_FCRTL    = 0x02160, /* Flow Control Receive Threshold Low - RW */
 	E1000_FCRTH    = 0x02168, /* Flow Control Receive Threshold High - RW */
 	E1000_PSRCTL   = 0x02170, /* Packet Split Receive Control - RW */
-	E1000_RDBAL    = 0x02800, /* Rx Descriptor Base Address Low - RW */
-	E1000_RDBAH    = 0x02804, /* Rx Descriptor Base Address High - RW */
-	E1000_RDLEN    = 0x02808, /* Rx Descriptor Length - RW */
-	E1000_RDH      = 0x02810, /* Rx Descriptor Head - RW */
-	E1000_RDT      = 0x02818, /* Rx Descriptor Tail - RW */
+/*
+ * Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL(current_rx_queue)
+ */
+	E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
+#define E1000_RDBAL(_n)	(E1000_RDBAL_BASE + (_n << 8))
+	E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
+#define E1000_RDBAH(_n)	(E1000_RDBAH_BASE + (_n << 8))
+	E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
+#define E1000_RDLEN(_n)	(E1000_RDLEN_BASE + (_n << 8))
+	E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
+#define E1000_RDH(_n)	(E1000_RDH_BASE + (_n << 8))
+	E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
+#define E1000_RDT(_n)	(E1000_RDT_BASE + (_n << 8))
 	E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
 	E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
 #define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
 	E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
 
-/* Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL_REG(current_rx_queue)
- *
- */
-#define E1000_RDBAL_REG(_n)   (E1000_RDBAL + (_n << 8))
 	E1000_KABGTXD  = 0x03004, /* AFE Band Gap Transmit Ref Data */
-	E1000_TDBAL    = 0x03800, /* Tx Descriptor Base Address Low - RW */
-	E1000_TDBAH    = 0x03804, /* Tx Descriptor Base Address High - RW */
-	E1000_TDLEN    = 0x03808, /* Tx Descriptor Length - RW */
-	E1000_TDH      = 0x03810, /* Tx Descriptor Head - RW */
-	E1000_TDT      = 0x03818, /* Tx Descriptor Tail - RW */
+	E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
+#define E1000_TDBAL(_n)	(E1000_TDBAL_BASE + (_n << 8))
+	E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
+#define E1000_TDBAH(_n)	(E1000_TDBAH_BASE + (_n << 8))
+	E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
+#define E1000_TDLEN(_n)	(E1000_TDLEN_BASE + (_n << 8))
+	E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
+#define E1000_TDH(_n)	(E1000_TDH_BASE + (_n << 8))
+	E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
+#define E1000_TDT(_n)	(E1000_TDT_BASE + (_n << 8))
 	E1000_TIDV     = 0x03820, /* Tx Interrupt Delay Value - RW */
 	E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
 #define E1000_TXDCTL(_n)   (E1000_TXDCTL_BASE + (_n << 8))
@@ -200,6 +200,14 @@
 #define E1000_RA        (E1000_RAL(0))
 	E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
 #define E1000_RAH(_n)   (E1000_RAH_BASE + ((_n) * 8))
+	E1000_SHRAL_PCH_LPT_BASE = 0x05408,
+#define E1000_SHRAL_PCH_LPT(_n)   (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
+	E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
+#define E1000_SHRAH_PCH_LPT(_n)   (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
+	E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
+#define E1000_SHRAL(_n)   (E1000_SHRAL_BASE + ((_n) * 8))
+	E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
+#define E1000_SHRAH(_n)   (E1000_SHRAH_BASE + ((_n) * 8))
 	E1000_VFTA     = 0x05600, /* VLAN Filter Table Array - RW Array */
 	E1000_WUC      = 0x05800, /* Wakeup Control - RW */
 	E1000_WUFC     = 0x05808, /* Wakeup Filter Control - RW */
@@ -402,6 +410,8 @@
 #define E1000_DEV_ID_PCH_D_HV_DC		0x10F0
 #define E1000_DEV_ID_PCH2_LV_LM			0x1502
 #define E1000_DEV_ID_PCH2_LV_V			0x1503
+#define E1000_DEV_ID_PCH_LPT_I217_LM		0x153A
+#define E1000_DEV_ID_PCH_LPT_I217_V		0x153B
 
 #define E1000_REVISION_4 4
 
@@ -422,6 +432,7 @@
 	e1000_ich10lan,
 	e1000_pchlan,
 	e1000_pch2lan,
+	e1000_pch_lpt,
 };
 
 enum e1000_media_type {
@@ -459,6 +470,7 @@
 	e1000_phy_82578,
 	e1000_phy_82577,
 	e1000_phy_82579,
+	e1000_phy_i217,
 };
 
 enum e1000_bus_width {
@@ -782,6 +794,7 @@
 	s32  (*setup_led)(struct e1000_hw *);
 	void (*write_vfta)(struct e1000_hw *, u32, u32);
 	void (*config_collision_dist)(struct e1000_hw *);
+	void (*rar_set)(struct e1000_hw *, u8 *, u32);
 	s32  (*read_mac_addr)(struct e1000_hw *);
 };
 
@@ -966,6 +979,7 @@
 	struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
 	bool nvm_k1_enabled;
 	bool eee_disable;
+	u16 eee_lp_ability;
 };
 
 struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index b461c24..bbf70ba 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -105,6 +105,9 @@
 #define E1000_FEXTNVM_SW_CONFIG		1
 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
 
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK    0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC  0x08000000
+
 #define E1000_FEXTNVM4_BEACON_DURATION_MASK    0x7
 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
@@ -112,6 +115,8 @@
 #define PCIE_ICH8_SNOOP_ALL		PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES		7
+#define E1000_PCH2_RAR_ENTRIES		5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES	12 /* RAR[0], SHRA[0-10] */
 
 #define PHY_PAGE_SHIFT 5
 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
@@ -127,14 +132,22 @@
 
 #define SW_FLAG_TIMEOUT    1000 /* SW Semaphore flag timeout in milliseconds */
 
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL		PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS	0x0001
+
 /* SMBus Address Phy Register */
 #define HV_SMB_ADDR            PHY_REG(768, 26)
 #define HV_SMB_ADDR_MASK       0x007F
 #define HV_SMB_ADDR_PEC_EN     0x0200
 #define HV_SMB_ADDR_VALID      0x0080
+#define HV_SMB_ADDR_FREQ_MASK           0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT      8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT     12
 
 /* PHY Power Management Control */
 #define HV_PM_CTRL		PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA	0x100
 
 /* PHY Low Power Idle Control */
 #define I82579_LPI_CTRL				PHY_REG(772, 20)
@@ -147,11 +160,26 @@
 #define I82579_LPI_UPDATE_TIMER 0x4805	/* in 40ns units + 40 ns base value */
 #define I82579_MSE_THRESHOLD    0x084F	/* Mean Square Error Threshold */
 #define I82579_MSE_LINK_DOWN    0x2411	/* MSE count before dropping link */
+#define I217_EEE_ADVERTISEMENT  0x8001	/* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY     0x8002	/* IEEE MMD Register 7.61 */
+#define I217_EEE_100_SUPPORTED  (1 << 1)	/* 100BaseTx EEE supported */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL                 PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE    0x0080
+#define I217_SxCTRL                     PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_MASK                0x1000
+#define I217_CGFREG                     PHY_REG(772, 29)
+#define I217_CGFREG_MASK                0x0002
+#define I217_MEMPWR                     PHY_REG(772, 26)
+#define I217_MEMPWR_MASK                0x0010
 
 /* Strapping Option Register - RO */
 #define E1000_STRAP                     0x0000C
 #define E1000_STRAP_SMBUS_ADDRESS_MASK  0x00FE0000
 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK       0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT      12
 
 /* OEM Bits Phy Register */
 #define HV_OEM_BITS            PHY_REG(768, 25)
@@ -255,6 +283,8 @@
 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
 
@@ -283,18 +313,161 @@
 #define ew16flash(reg, val)	__ew16flash(hw, (reg), (val))
 #define ew32flash(reg, val)	__ew32flash(hw, (reg), (val))
 
-static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
+/**
+ *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
+ *  @hw: pointer to the HW structure
+ *
+ *  Test access to the PHY registers by reading the PHY ID registers.  If
+ *  the PHY ID is already known (e.g. resume path) compare it with known ID,
+ *  otherwise assume the read PHY ID is correct if it is valid.
+ *
+ *  Assumes the sw/fw/hw semaphore is already acquired.
+ **/
+static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 {
-	u32 ctrl;
+	u16 phy_reg;
+	u32 phy_id;
 
-	ctrl = er32(CTRL);
-	ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
-	ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
-	ew32(CTRL, ctrl);
-	e1e_flush();
-	udelay(10);
-	ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
-	ew32(CTRL, ctrl);
+	e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+	phy_id = (u32)(phy_reg << 16);
+	e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+	phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+
+	if (hw->phy.id) {
+		if (hw->phy.id == phy_id)
+			return true;
+	} else {
+		if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
+			hw->phy.id = phy_id;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
+ *  @hw: pointer to the HW structure
+ *
+ *  Workarounds/flow necessary for PHY initialization during driver load
+ *  and resume paths.
+ **/
+static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
+{
+	u32 mac_reg, fwsm = er32(FWSM);
+	s32 ret_val;
+	u16 phy_reg;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val) {
+		e_dbg("Failed to initialize PHY flow\n");
+		return ret_val;
+	}
+
+	/*
+	 * The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
+	 * inaccessible and resetting the PHY is not blocked, toggle the
+	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
+	 */
+	switch (hw->mac.type) {
+	case e1000_pch_lpt:
+		if (e1000_phy_is_accessible_pchlan(hw))
+			break;
+
+		/*
+		 * Before toggling LANPHYPC, see if PHY is accessible by
+		 * forcing MAC to SMBus mode first.
+		 */
+		mac_reg = er32(CTRL_EXT);
+		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+		ew32(CTRL_EXT, mac_reg);
+
+		/* fall-through */
+	case e1000_pch2lan:
+		/*
+		 * Gate automatic PHY configuration by hardware on
+		 * non-managed 82579
+		 */
+		if ((hw->mac.type == e1000_pch2lan) &&
+		    !(fwsm & E1000_ICH_FWSM_FW_VALID))
+			e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+		if (e1000_phy_is_accessible_pchlan(hw)) {
+			if (hw->mac.type == e1000_pch_lpt) {
+				/* Unforce SMBus mode in PHY */
+				e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+				phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+				e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+
+				/* Unforce SMBus mode in MAC */
+				mac_reg = er32(CTRL_EXT);
+				mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+				ew32(CTRL_EXT, mac_reg);
+			}
+			break;
+		}
+
+		/* fall-through */
+	case e1000_pchlan:
+		if ((hw->mac.type == e1000_pchlan) &&
+		    (fwsm & E1000_ICH_FWSM_FW_VALID))
+			break;
+
+		if (hw->phy.ops.check_reset_block(hw)) {
+			e_dbg("Required LANPHYPC toggle blocked by ME\n");
+			break;
+		}
+
+		e_dbg("Toggling LANPHYPC\n");
+
+		/* Set Phy Config Counter to 50msec */
+		mac_reg = er32(FEXTNVM3);
+		mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+		mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+		ew32(FEXTNVM3, mac_reg);
+
+		/* Toggle LANPHYPC Value bit */
+		mac_reg = er32(CTRL);
+		mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+		mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+		ew32(CTRL, mac_reg);
+		e1e_flush();
+		udelay(10);
+		mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+		ew32(CTRL, mac_reg);
+		e1e_flush();
+		if (hw->mac.type < e1000_pch_lpt) {
+			msleep(50);
+		} else {
+			u16 count = 20;
+			do {
+				usleep_range(5000, 10000);
+			} while (!(er32(CTRL_EXT) &
+				   E1000_CTRL_EXT_LPCD) && count--);
+		}
+		break;
+	default:
+		break;
+	}
+
+	hw->phy.ops.release(hw);
+
+	/*
+	 * Reset the PHY before any access to it.  Doing so, ensures
+	 * that the PHY is in a known good state before we read/write
+	 * PHY registers.  The generic reset is sufficient here,
+	 * because we haven't determined the PHY type yet.
+	 */
+	ret_val = e1000e_phy_hw_reset_generic(hw);
+
+	/* Ungate automatic PHY configuration on non-managed 82579 */
+	if ((hw->mac.type == e1000_pch2lan) &&
+	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+		usleep_range(10000, 20000);
+		e1000_gate_hw_phy_config_ich8lan(hw, false);
+	}
+
+	return ret_val;
 }
 
 /**
@@ -324,70 +497,41 @@
 	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
 	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
-	if (!hw->phy.ops.check_reset_block(hw)) {
-		u32 fwsm = er32(FWSM);
-
-		/*
-		 * The MAC-PHY interconnect may still be in SMBus mode after
-		 * Sx->S0.  If resetting the PHY is not blocked, toggle the
-		 * LANPHYPC Value bit to force the interconnect to PCIe mode.
-		 */
-		e1000_toggle_lanphypc_value_ich8lan(hw);
-		msleep(50);
-
-		/*
-		 * Gate automatic PHY configuration by hardware on
-		 * non-managed 82579
-		 */
-		if ((hw->mac.type == e1000_pch2lan) &&
-		    !(fwsm & E1000_ICH_FWSM_FW_VALID))
-			e1000_gate_hw_phy_config_ich8lan(hw, true);
-
-		/*
-		 * Reset the PHY before any access to it.  Doing so, ensures
-		 * that the PHY is in a known good state before we read/write
-		 * PHY registers.  The generic reset is sufficient here,
-		 * because we haven't determined the PHY type yet.
-		 */
-		ret_val = e1000e_phy_hw_reset_generic(hw);
-		if (ret_val)
-			return ret_val;
-
-		/* Ungate automatic PHY configuration on non-managed 82579 */
-		if ((hw->mac.type == e1000_pch2lan) &&
-		    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
-			usleep_range(10000, 20000);
-			e1000_gate_hw_phy_config_ich8lan(hw, false);
-		}
-	}
-
 	phy->id = e1000_phy_unknown;
-	switch (hw->mac.type) {
-	default:
-		ret_val = e1000e_get_phy_id(hw);
-		if (ret_val)
-			return ret_val;
-		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+
+	ret_val = e1000_init_phy_workarounds_pchlan(hw);
+	if (ret_val)
+		return ret_val;
+
+	if (phy->id == e1000_phy_unknown)
+		switch (hw->mac.type) {
+		default:
+			ret_val = e1000e_get_phy_id(hw);
+			if (ret_val)
+				return ret_val;
+			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+				break;
+			/* fall-through */
+		case e1000_pch2lan:
+		case e1000_pch_lpt:
+			/*
+			 * In case the PHY needs to be in mdio slow mode,
+			 * set slow mode and try to get the PHY id again.
+			 */
+			ret_val = e1000_set_mdio_slow_mode_hv(hw);
+			if (ret_val)
+				return ret_val;
+			ret_val = e1000e_get_phy_id(hw);
+			if (ret_val)
+				return ret_val;
 			break;
-		/* fall-through */
-	case e1000_pch2lan:
-		/*
-		 * In case the PHY needs to be in mdio slow mode,
-		 * set slow mode and try to get the PHY id again.
-		 */
-		ret_val = e1000_set_mdio_slow_mode_hv(hw);
-		if (ret_val)
-			return ret_val;
-		ret_val = e1000e_get_phy_id(hw);
-		if (ret_val)
-			return ret_val;
-		break;
-	}
+		}
 	phy->type = e1000e_get_phy_type_from_id(phy->id);
 
 	switch (phy->type) {
 	case e1000_phy_82577:
 	case e1000_phy_82579:
+	case e1000_phy_i217:
 		phy->ops.check_polarity = e1000_check_polarity_82577;
 		phy->ops.force_speed_duplex =
 		    e1000_phy_force_speed_duplex_82577;
@@ -572,7 +716,7 @@
 	/* Adaptive IFS supported */
 	mac->adaptive_ifs = true;
 
-	/* LED operations */
+	/* LED and other operations */
 	switch (mac->type) {
 	case e1000_ich8lan:
 	case e1000_ich9lan:
@@ -591,8 +735,12 @@
 		mac->ops.led_on = e1000_led_on_ich8lan;
 		mac->ops.led_off = e1000_led_off_ich8lan;
 		break;
-	case e1000_pchlan:
 	case e1000_pch2lan:
+		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
+		mac->ops.rar_set = e1000_rar_set_pch2lan;
+		/* fall-through */
+	case e1000_pch_lpt:
+	case e1000_pchlan:
 		/* check management mode */
 		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
 		/* ID LED init */
@@ -609,12 +757,20 @@
 		break;
 	}
 
+	if (mac->type == e1000_pch_lpt) {
+		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
+		mac->ops.rar_set = e1000_rar_set_pch_lpt;
+	}
+
 	/* Enable PCS Lock-loss workaround for ICH8 */
 	if (mac->type == e1000_ich8lan)
 		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 
-	/* Gate automatic PHY configuration by hardware on managed 82579 */
-	if ((mac->type == e1000_pch2lan) &&
+	/*
+	 * Gate automatic PHY configuration by hardware on managed
+	 * 82579 and i217
+	 */
+	if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
 	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
 		e1000_gate_hw_phy_config_ich8lan(hw, true);
 
@@ -630,22 +786,50 @@
  **/
 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 {
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 	s32 ret_val = 0;
 	u16 phy_reg;
 
-	if (hw->phy.type != e1000_phy_82579)
+	if ((hw->phy.type != e1000_phy_82579) &&
+	    (hw->phy.type != e1000_phy_i217))
 		return 0;
 
 	ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
 	if (ret_val)
 		return ret_val;
 
-	if (hw->dev_spec.ich8lan.eee_disable)
+	if (dev_spec->eee_disable)
 		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
 	else
 		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
 
-	return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+	ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+	if (ret_val)
+		return ret_val;
+
+	if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
+		/* Save off link partner's EEE ability */
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+		ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+					  I217_EEE_LP_ABILITY);
+		if (ret_val)
+			goto release;
+		e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
+
+		/*
+		 * EEE is not supported in 100Half, so ignore partner's EEE
+		 * in 100 ability if full-duplex is not advertised.
+		 */
+		e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
+		if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
+			dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
+release:
+		hw->phy.ops.release(hw);
+	}
+
+	return 0;
 }
 
 /**
@@ -687,6 +871,9 @@
 			return ret_val;
 	}
 
+	/* Clear link partner's EEE ability */
+	hw->dev_spec.ich8lan.eee_lp_ability = 0;
+
 	if (!link)
 		return 0; /* No link detected */
 
@@ -782,6 +969,7 @@
 		break;
 	case e1000_pchlan:
 	case e1000_pch2lan:
+	case e1000_pch_lpt:
 		rc = e1000_init_phy_params_pchlan(hw);
 		break;
 	default:
@@ -967,6 +1155,145 @@
 }
 
 /**
+ *  e1000_rar_set_pch2lan - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.  For 82579, RAR[0] is the base address register that is to
+ *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
+ *  Use SHRA[0-3] in place of those reserved for ME.
+ **/
+static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+
+	/*
+	 * HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32)addr[0] |
+		   ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high)
+		rar_high |= E1000_RAH_AV;
+
+	if (index == 0) {
+		ew32(RAL(index), rar_low);
+		e1e_flush();
+		ew32(RAH(index), rar_high);
+		e1e_flush();
+		return;
+	}
+
+	if (index < hw->mac.rar_entry_count) {
+		s32 ret_val;
+
+		ret_val = e1000_acquire_swflag_ich8lan(hw);
+		if (ret_val)
+			goto out;
+
+		ew32(SHRAL(index - 1), rar_low);
+		e1e_flush();
+		ew32(SHRAH(index - 1), rar_high);
+		e1e_flush();
+
+		e1000_release_swflag_ich8lan(hw);
+
+		/* verify the register updates */
+		if ((er32(SHRAL(index - 1)) == rar_low) &&
+		    (er32(SHRAH(index - 1)) == rar_high))
+			return;
+
+		e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
+		      (index - 1), er32(FWSM));
+	}
+
+out:
+	e_dbg("Failed to write receive address at index %d\n", index);
+}
+
+/**
+ *  e1000_rar_set_pch_lpt - Set receive address registers
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address register array at index to the address passed
+ *  in by addr. For LPT, RAR[0] is the base address register that is to
+ *  contain the MAC address. SHRA[0-10] are the shared receive address
+ *  registers that are shared between the Host and manageability engine (ME).
+ **/
+static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+	u32 wlock_mac;
+
+	/*
+	 * HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high)
+		rar_high |= E1000_RAH_AV;
+
+	if (index == 0) {
+		ew32(RAL(index), rar_low);
+		e1e_flush();
+		ew32(RAH(index), rar_high);
+		e1e_flush();
+		return;
+	}
+
+	/*
+	 * The manageability engine (ME) can lock certain SHRAR registers that
+	 * it is using - those registers are unavailable for use.
+	 */
+	if (index < hw->mac.rar_entry_count) {
+		wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+		/* Check if all SHRAR registers are locked */
+		if (wlock_mac == 1)
+			goto out;
+
+		if ((wlock_mac == 0) || (index <= wlock_mac)) {
+			s32 ret_val;
+
+			ret_val = e1000_acquire_swflag_ich8lan(hw);
+
+			if (ret_val)
+				goto out;
+
+			ew32(SHRAL_PCH_LPT(index - 1), rar_low);
+			e1e_flush();
+			ew32(SHRAH_PCH_LPT(index - 1), rar_high);
+			e1e_flush();
+
+			e1000_release_swflag_ich8lan(hw);
+
+			/* verify the register updates */
+			if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
+			    (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
+				return;
+		}
+	}
+
+out:
+	e_dbg("Failed to write receive address at index %d\n", index);
+}
+
+/**
  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
  *  @hw: pointer to the HW structure
  *
@@ -994,6 +1321,8 @@
 {
 	u16 phy_data;
 	u32 strap = er32(STRAP);
+	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
+	    E1000_STRAP_SMT_FREQ_SHIFT;
 	s32 ret_val = 0;
 
 	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -1006,6 +1335,19 @@
 	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
 
+	if (hw->phy.type == e1000_phy_i217) {
+		/* Restore SMBus frequency */
+		if (freq--) {
+			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
+			phy_data |= (freq & (1 << 0)) <<
+			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
+			phy_data |= (freq & (1 << 1)) <<
+			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
+		} else {
+			e_dbg("Unsupported SMB frequency in PHY\n");
+		}
+	}
+
 	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
 }
 
@@ -1043,6 +1385,7 @@
 		/* Fall-thru */
 	case e1000_pchlan:
 	case e1000_pch2lan:
+	case e1000_pch_lpt:
 		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
 		break;
 	default:
@@ -1062,10 +1405,9 @@
 	 * extended configuration before SW configuration
 	 */
 	data = er32(EXTCNF_CTRL);
-	if (!(hw->mac.type == e1000_pch2lan)) {
-		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
-			goto release;
-	}
+	if ((hw->mac.type < e1000_pch2lan) &&
+	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
+		goto release;
 
 	cnf_size = er32(EXTCNF_SIZE);
 	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -1076,9 +1418,9 @@
 	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
 	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
 
-	if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
-	    (hw->mac.type == e1000_pchlan)) ||
-	     (hw->mac.type == e1000_pch2lan)) {
+	if (((hw->mac.type == e1000_pchlan) &&
+	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
+	    (hw->mac.type > e1000_pchlan)) {
 		/*
 		 * HW configures the SMBus address and LEDs when the
 		 * OEM and LCD Write Enable bits are set in the NVM.
@@ -1121,8 +1463,7 @@
 		reg_addr &= PHY_REG_MASK;
 		reg_addr |= phy_page;
 
-		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
-						    reg_data);
+		ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
 		if (ret_val)
 			goto release;
 	}
@@ -1159,8 +1500,8 @@
 	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
 	if (link) {
 		if (hw->phy.type == e1000_phy_82578) {
-			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
-			                                          &status_reg);
+			ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
+						  &status_reg);
 			if (ret_val)
 				goto release;
 
@@ -1175,8 +1516,7 @@
 		}
 
 		if (hw->phy.type == e1000_phy_82577) {
-			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
-			                                          &status_reg);
+			ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
 			if (ret_val)
 				goto release;
 
@@ -1191,15 +1531,13 @@
 		}
 
 		/* Link stall fix for link up */
-		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
-		                                           0x0100);
+		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
 		if (ret_val)
 			goto release;
 
 	} else {
 		/* Link stall fix for link down */
-		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
-		                                           0x4100);
+		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
 		if (ret_val)
 			goto release;
 	}
@@ -1279,14 +1617,14 @@
 	u32 mac_reg;
 	u16 oem_reg;
 
-	if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
+	if (hw->mac.type < e1000_pchlan)
 		return ret_val;
 
 	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
 
-	if (!(hw->mac.type == e1000_pch2lan)) {
+	if (hw->mac.type == e1000_pchlan) {
 		mac_reg = er32(EXTCNF_CTRL);
 		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
 			goto release;
@@ -1298,7 +1636,7 @@
 
 	mac_reg = er32(PHY_CTRL);
 
-	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+	ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
 	if (ret_val)
 		goto release;
 
@@ -1325,7 +1663,7 @@
 	    !hw->phy.ops.check_reset_block(hw))
 		oem_reg |= HV_OEM_BITS_RESTART_AN;
 
-	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+	ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
 
 release:
 	hw->phy.ops.release(hw);
@@ -1421,11 +1759,10 @@
 	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
-	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+	ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
 	if (ret_val)
 		goto release;
-	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
-					       phy_data & 0x00FF);
+	ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
 release:
 	hw->phy.ops.release(hw);
 
@@ -1484,7 +1821,7 @@
 	u32 mac_reg;
 	u16 i;
 
-	if (hw->mac.type != e1000_pch2lan)
+	if (hw->mac.type < e1000_pch2lan)
 		return 0;
 
 	/* disable Rx path while enabling/disabling workaround */
@@ -1657,20 +1994,18 @@
 	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
 		return ret_val;
-	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
-					       I82579_MSE_THRESHOLD);
+	ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
 	if (ret_val)
 		goto release;
 	/* set MSE higher to enable link to stay up when noise is high */
-	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034);
+	ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
 	if (ret_val)
 		goto release;
-	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
-					       I82579_MSE_LINK_DOWN);
+	ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
 	if (ret_val)
 		goto release;
 	/* drop link after 5 times MSE threshold was reached */
-	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005);
+	ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
 release:
 	hw->phy.ops.release(hw);
 
@@ -1708,8 +2043,18 @@
 			return ret_val;
 
 		if (status_reg & HV_M_STATUS_SPEED_1000) {
+			u16 pm_phy_reg;
+
 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
 			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+			/* LV 1G Packet drop issue wa  */
+			ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
+			if (ret_val)
+				return ret_val;
+			pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
+			ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
+			if (ret_val)
+				return ret_val;
 		} else {
 			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
 			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
@@ -1733,7 +2078,7 @@
 {
 	u32 extcnf_ctrl;
 
-	if (hw->mac.type != e1000_pch2lan)
+	if (hw->mac.type < e1000_pch2lan)
 		return;
 
 	extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -1835,12 +2180,10 @@
 		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			return ret_val;
-		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
-						       I82579_LPI_UPDATE_TIMER);
+		ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+					  I82579_LPI_UPDATE_TIMER);
 		if (!ret_val)
-			ret_val = hw->phy.ops.write_reg_locked(hw,
-							       I82579_EMI_DATA,
-							       0x1387);
+			ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
 		hw->phy.ops.release(hw);
 	}
 
@@ -2213,7 +2556,7 @@
 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
 
 	/* Check if the flash descriptor is valid */
-	if (hsfsts.hsf_status.fldesvalid == 0) {
+	if (!hsfsts.hsf_status.fldesvalid) {
 		e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
 		return -E1000_ERR_NVM;
 	}
@@ -2233,7 +2576,7 @@
 	 * completed.
 	 */
 
-	if (hsfsts.hsf_status.flcinprog == 0) {
+	if (!hsfsts.hsf_status.flcinprog) {
 		/*
 		 * There is no cycle running at present,
 		 * so we can start a cycle.
@@ -2251,7 +2594,7 @@
 		 */
 		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-			if (hsfsts.hsf_status.flcinprog == 0) {
+			if (!hsfsts.hsf_status.flcinprog) {
 				ret_val = 0;
 				break;
 			}
@@ -2293,12 +2636,12 @@
 	/* wait till FDONE bit is set to 1 */
 	do {
 		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-		if (hsfsts.hsf_status.flcdone == 1)
+		if (hsfsts.hsf_status.flcdone)
 			break;
 		udelay(1);
 	} while (i++ < timeout);
 
-	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
+	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
 		return 0;
 
 	return -E1000_ERR_NVM;
@@ -2409,10 +2752,10 @@
 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
 			 */
 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-			if (hsfsts.hsf_status.flcerr == 1) {
+			if (hsfsts.hsf_status.flcerr) {
 				/* Repeat for some time before giving up. */
 				continue;
-			} else if (hsfsts.hsf_status.flcdone == 0) {
+			} else if (!hsfsts.hsf_status.flcdone) {
 				e_dbg("Timeout error - flash cycle did not complete.\n");
 				break;
 			}
@@ -2642,7 +2985,7 @@
 	if (ret_val)
 		return ret_val;
 
-	if ((data & 0x40) == 0) {
+	if (!(data & 0x40)) {
 		data |= 0x40;
 		ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
 		if (ret_val)
@@ -2760,10 +3103,10 @@
 		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
 		 */
 		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-		if (hsfsts.hsf_status.flcerr == 1)
+		if (hsfsts.hsf_status.flcerr)
 			/* Repeat for some time before giving up. */
 			continue;
-		if (hsfsts.hsf_status.flcdone == 0) {
+		if (!hsfsts.hsf_status.flcdone) {
 			e_dbg("Timeout error - flash cycle did not complete.\n");
 			break;
 		}
@@ -2915,10 +3258,10 @@
 			 * a few more times else Done
 			 */
 			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
-			if (hsfsts.hsf_status.flcerr == 1)
+			if (hsfsts.hsf_status.flcerr)
 				/* repeat for some time before giving up */
 				continue;
-			else if (hsfsts.hsf_status.flcdone == 0)
+			else if (!hsfsts.hsf_status.flcdone)
 				return ret_val;
 		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
 	}
@@ -3060,8 +3403,8 @@
 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
 {
 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
-	u16 reg;
-	u32 ctrl, kab;
+	u16 kum_cfg;
+	u32 ctrl, reg;
 	s32 ret_val;
 
 	/*
@@ -3095,12 +3438,12 @@
 	}
 
 	if (hw->mac.type == e1000_pchlan) {
-		/* Save the NVM K1 bit setting*/
-		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
+		/* Save the NVM K1 bit setting */
+		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
 		if (ret_val)
 			return ret_val;
 
-		if (reg & E1000_NVM_K1_ENABLE)
+		if (kum_cfg & E1000_NVM_K1_ENABLE)
 			dev_spec->nvm_k1_enabled = true;
 		else
 			dev_spec->nvm_k1_enabled = false;
@@ -3130,6 +3473,14 @@
 	/* cannot issue a flush here because it hangs the hardware */
 	msleep(20);
 
+	/* Set Phy Config Counter to 50msec */
+	if (hw->mac.type == e1000_pch2lan) {
+		reg = er32(FEXTNVM3);
+		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+		ew32(FEXTNVM3, reg);
+	}
+
 	if (!ret_val)
 		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
 
@@ -3154,9 +3505,9 @@
 	ew32(IMC, 0xffffffff);
 	er32(ICR);
 
-	kab = er32(KABGTXD);
-	kab |= E1000_KABGTXD_BGSQLBIAS;
-	ew32(KABGTXD, kab);
+	reg = er32(KABGTXD);
+	reg |= E1000_KABGTXD_BGSQLBIAS;
+	ew32(KABGTXD, reg);
 
 	return 0;
 }
@@ -3309,6 +3660,13 @@
 	 */
 	reg = er32(RFCTL);
 	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+
+	/*
+	 * Disable IPv6 extension header parsing because some malformed
+	 * IPv6 headers can hang the Rx.
+	 */
+	if (hw->mac.type == e1000_ich8lan)
+		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
 	ew32(RFCTL, reg);
 }
 
@@ -3359,6 +3717,7 @@
 	ew32(FCTTV, hw->fc.pause_time);
 	if ((hw->phy.type == e1000_phy_82578) ||
 	    (hw->phy.type == e1000_phy_82579) ||
+	    (hw->phy.type == e1000_phy_i217) ||
 	    (hw->phy.type == e1000_phy_82577)) {
 		ew32(FCRTV_PCH, hw->fc.refresh_time);
 
@@ -3422,6 +3781,7 @@
 		break;
 	case e1000_phy_82577:
 	case e1000_phy_82579:
+	case e1000_phy_i217:
 		ret_val = e1000_copper_link_setup_82577(hw);
 		if (ret_val)
 			return ret_val;
@@ -3668,14 +4028,88 @@
  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
  *  needs to be written.
+ *  Parts that support (and are linked to a partner which support) EEE in
+ *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
+ *  than 10Mbps w/o EEE.
  **/
 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
 {
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 	u32 phy_ctrl;
 	s32 ret_val;
 
 	phy_ctrl = er32(PHY_CTRL);
 	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
+	if (hw->phy.type == e1000_phy_i217) {
+		u16 phy_reg;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+
+		if (!dev_spec->eee_disable) {
+			u16 eee_advert;
+
+			ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+						  I217_EEE_ADVERTISEMENT);
+			if (ret_val)
+				goto release;
+			e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
+
+			/*
+			 * Disable LPLU if both link partners support 100BaseT
+			 * EEE and 100Full is advertised on both ends of the
+			 * link.
+			 */
+			if ((eee_advert & I217_EEE_100_SUPPORTED) &&
+			    (dev_spec->eee_lp_ability &
+			     I217_EEE_100_SUPPORTED) &&
+			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
+				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
+					      E1000_PHY_CTRL_NOND0A_LPLU);
+		}
+
+		/*
+		 * For i217 Intel Rapid Start Technology support,
+		 * when the system is going into Sx and no manageability engine
+		 * is present, the driver must configure proxy to reset only on
+		 * power good.  LPI (Low Power Idle) state must also reset only
+		 * on power good, as well as the MTA (Multicast table array).
+		 * The SMBus release must also be disabled on LCD reset.
+		 */
+		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+
+			/* Enable proxy to reset only on power good. */
+			e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
+			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
+			e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
+
+			/*
+			 * Set bit enable LPI (EEE) to reset only on
+			 * power good.
+			 */
+			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
+			phy_reg |= I217_SxCTRL_MASK;
+			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
+
+			/* Disable the SMB release on LCD reset. */
+			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+			phy_reg &= ~I217_MEMPWR;
+			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
+		}
+
+		/*
+		 * Enable MTA to reset for Intel Rapid Start Technology
+		 * Support
+		 */
+		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+		phy_reg |= I217_CGFREG_MASK;
+		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
+
+release:
+		hw->phy.ops.release(hw);
+	}
+out:
 	ew32(PHY_CTRL, phy_ctrl);
 
 	if (hw->mac.type == e1000_ich8lan)
@@ -3704,44 +4138,61 @@
  *  on which PHY resets are not blocked, if the PHY registers cannot be
  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
  *  the PHY.
+ *  On i217, setup Intel Rapid Start Technology.
  **/
 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
 {
-	u16 phy_id1, phy_id2;
 	s32 ret_val;
 
-	if ((hw->mac.type != e1000_pch2lan) ||
-	    hw->phy.ops.check_reset_block(hw))
+	if (hw->mac.type < e1000_pch2lan)
 		return;
 
-	ret_val = hw->phy.ops.acquire(hw);
+	ret_val = e1000_init_phy_workarounds_pchlan(hw);
 	if (ret_val) {
-		e_dbg("Failed to acquire PHY semaphore in resume\n");
+		e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
 		return;
 	}
 
-	/* Test access to the PHY registers by reading the ID regs */
-	ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
-	if (ret_val)
-		goto release;
-	ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
-	if (ret_val)
-		goto release;
+	/*
+	 * For i217 Intel Rapid Start Technology support when the system
+	 * is transitioning from Sx and no manageability engine is present
+	 * configure SMBus to restore on reset, disable proxy, and enable
+	 * the reset on MTA (Multicast table array).
+	 */
+	if (hw->phy.type == e1000_phy_i217) {
+		u16 phy_reg;
 
-	if (hw->phy.id == ((u32)(phy_id1 << 16) |
-			   (u32)(phy_id2 & PHY_REVISION_MASK)))
-		goto release;
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val) {
+			e_dbg("Failed to setup iRST\n");
+			return;
+		}
 
-	e1000_toggle_lanphypc_value_ich8lan(hw);
+		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+			/*
+			 * Restore clear on SMB if no manageability engine
+			 * is present
+			 */
+			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+			if (ret_val)
+				goto release;
+			phy_reg |= I217_MEMPWR_MASK;
+			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
 
-	hw->phy.ops.release(hw);
-	msleep(50);
-	e1000_phy_hw_reset(hw);
-	msleep(50);
-	return;
-
+			/* Disable Proxy */
+			e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
+		}
+		/* Enable reset on MTA */
+		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+		if (ret_val)
+			goto release;
+		phy_reg &= ~I217_CGFREG_MASK;
+		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
 release:
-	hw->phy.ops.release(hw);
+		if (ret_val)
+			e_dbg("Error %d in resume workarounds\n", ret_val);
+		hw->phy.ops.release(hw);
+	}
 }
 
 /**
@@ -3921,7 +4372,7 @@
 
 	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
 	if (hw->mac.type <= e1000_ich9lan) {
-		if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
+		if (!(er32(EECD) & E1000_EECD_PRES) &&
 		    (hw->phy.type == e1000_phy_igp_3)) {
 			e1000e_phy_init_script_igp3(hw);
 		}
@@ -3982,6 +4433,7 @@
 	/* Clear PHY statistics registers */
 	if ((hw->phy.type == e1000_phy_82578) ||
 	    (hw->phy.type == e1000_phy_82579) ||
+	    (hw->phy.type == e1000_phy_i217) ||
 	    (hw->phy.type == e1000_phy_82577)) {
 		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
@@ -4026,6 +4478,7 @@
 	.setup_physical_interface= e1000_setup_copper_link_ich8lan,
 	/* id_led_init dependent on mac type */
 	.config_collision_dist	= e1000e_config_collision_dist_generic,
+	.rar_set		= e1000e_rar_set_generic,
 };
 
 static const struct e1000_phy_operations ich8_phy_ops = {
@@ -4140,3 +4593,22 @@
 	.phy_ops		= &ich8_phy_ops,
 	.nvm_ops		= &ich8_nvm_ops,
 };
+
+const struct e1000_info e1000_pch_lpt_info = {
+	.mac			= e1000_pch_lpt,
+	.flags			= FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS
+				  | FLAG2_HAS_EEE,
+	.pba			= 26,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index decad98..026e8b3 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -143,12 +143,12 @@
 	/* Setup the receive address */
 	e_dbg("Programming MAC Address into RAR[0]\n");
 
-	e1000e_rar_set(hw, hw->mac.addr, 0);
+	hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
 
 	/* Zero out the other (rar_entry_count - 1) receive addresses */
 	e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
 	for (i = 1; i < rar_count; i++)
-		e1000e_rar_set(hw, mac_addr, i);
+		hw->mac.ops.rar_set(hw, mac_addr, i);
 }
 
 /**
@@ -215,13 +215,13 @@
 	 * same as the normal permanent MAC address stored by the HW into the
 	 * RAR. Do this by mapping this address into RAR0.
 	 */
-	e1000e_rar_set(hw, alt_mac_addr, 0);
+	hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
 
 	return 0;
 }
 
 /**
- *  e1000e_rar_set - Set receive address register
+ *  e1000e_rar_set_generic - Set receive address register
  *  @hw: pointer to the HW structure
  *  @addr: pointer to the receive address
  *  @index: receive address array register
@@ -229,7 +229,7 @@
  *  Sets the receive address array register at index to the address passed
  *  in by addr.
  **/
-void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
 {
 	u32 rar_low, rar_high;
 
@@ -681,7 +681,7 @@
 		return ret_val;
 	}
 
-	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+	if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
 		hw->fc.requested_mode = e1000_fc_none;
 	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
 		hw->fc.requested_mode = e1000_fc_tx_pause;
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 473f8e7..bacc950 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -85,7 +85,7 @@
 
 	/* Check that the host interface is enabled. */
 	hicr = er32(HICR);
-	if ((hicr & E1000_HICR_EN) == 0) {
+	if (!(hicr & E1000_HICR_EN)) {
 		e_dbg("E1000_HOST_EN bit disabled.\n");
 		return -E1000_ERR_HOST_INTERFACE_COMMAND;
 	}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 19ab215..a4b0435 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION
+#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -79,6 +79,7 @@
 	[board_ich10lan]	= &e1000_ich10_info,
 	[board_pchlan]		= &e1000_pch_info,
 	[board_pch2lan]		= &e1000_pch2_info,
+	[board_pch_lpt]		= &e1000_pch_lpt_info,
 };
 
 struct e1000_reg_info {
@@ -110,14 +111,14 @@
 
 	/* Rx Registers */
 	{E1000_RCTL, "RCTL"},
-	{E1000_RDLEN, "RDLEN"},
-	{E1000_RDH, "RDH"},
-	{E1000_RDT, "RDT"},
+	{E1000_RDLEN(0), "RDLEN"},
+	{E1000_RDH(0), "RDH"},
+	{E1000_RDT(0), "RDT"},
 	{E1000_RDTR, "RDTR"},
 	{E1000_RXDCTL(0), "RXDCTL"},
 	{E1000_ERT, "ERT"},
-	{E1000_RDBAL, "RDBAL"},
-	{E1000_RDBAH, "RDBAH"},
+	{E1000_RDBAL(0), "RDBAL"},
+	{E1000_RDBAH(0), "RDBAH"},
 	{E1000_RDFH, "RDFH"},
 	{E1000_RDFT, "RDFT"},
 	{E1000_RDFHS, "RDFHS"},
@@ -126,11 +127,11 @@
 
 	/* Tx Registers */
 	{E1000_TCTL, "TCTL"},
-	{E1000_TDBAL, "TDBAL"},
-	{E1000_TDBAH, "TDBAH"},
-	{E1000_TDLEN, "TDLEN"},
-	{E1000_TDH, "TDH"},
-	{E1000_TDT, "TDT"},
+	{E1000_TDBAL(0), "TDBAL"},
+	{E1000_TDBAH(0), "TDBAH"},
+	{E1000_TDLEN(0), "TDLEN"},
+	{E1000_TDH(0), "TDH"},
+	{E1000_TDT(0), "TDT"},
 	{E1000_TIDV, "TIDV"},
 	{E1000_TXDCTL(0), "TXDCTL"},
 	{E1000_TADV, "TADV"},
@@ -538,43 +539,15 @@
 	adapter->hw_csum_good++;
 }
 
-/**
- * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
- * @hw: pointer to the HW structure
- * @tail: address of tail descriptor register
- * @i: value to write to tail descriptor register
- *
- * When updating the tail register, the ME could be accessing Host CSR
- * registers at the same time.  Normally, this is handled in h/w by an
- * arbiter but on some parts there is a bug that acknowledges Host accesses
- * later than it should which could result in the descriptor register to
- * have an incorrect value.  Workaround this by checking the FWSM register
- * which has bit 24 set while ME is accessing Host CSR registers, wait
- * if it is set and try again a number of times.
- **/
-static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail,
-					unsigned int i)
-{
-	unsigned int j = 0;
-
-	while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
-	       (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
-		udelay(50);
-
-	writel(i, tail);
-
-	if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
-		return E1000_ERR_SWFW_SYNC;
-
-	return 0;
-}
-
 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
 {
 	struct e1000_adapter *adapter = rx_ring->adapter;
 	struct e1000_hw *hw = &adapter->hw;
+	s32 ret_val = __ew32_prepare(hw);
 
-	if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) {
+	writel(i, rx_ring->tail);
+
+	if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
 		u32 rctl = er32(RCTL);
 		ew32(RCTL, rctl & ~E1000_RCTL_EN);
 		e_err("ME firmware caused invalid RDT - resetting\n");
@@ -586,8 +559,11 @@
 {
 	struct e1000_adapter *adapter = tx_ring->adapter;
 	struct e1000_hw *hw = &adapter->hw;
+	s32 ret_val = __ew32_prepare(hw);
 
-	if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) {
+	writel(i, tx_ring->tail);
+
+	if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
 		u32 tctl = er32(TCTL);
 		ew32(TCTL, tctl & ~E1000_TCTL_EN);
 		e_err("ME firmware caused invalid TDT - resetting\n");
@@ -1053,7 +1029,8 @@
 
 	if (!adapter->tx_hang_recheck &&
 	    (adapter->flags2 & FLAG2_DMA_BURST)) {
-		/* May be block on write-back, flush and detect again
+		/*
+		 * May be block on write-back, flush and detect again
 		 * flush pending descriptor writebacks to memory
 		 */
 		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -1108,6 +1085,10 @@
 	      phy_1000t_status,
 	      phy_ext_status,
 	      pci_status);
+
+	/* Suggest workaround for known h/w issue */
+	if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
+		e_err("Try turning off Tx pause (flow control) via ethtool\n");
 }
 
 /**
@@ -1645,7 +1626,10 @@
 	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
 
 	writel(0, rx_ring->head);
-	writel(0, rx_ring->tail);
+	if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		e1000e_update_rdt_wa(rx_ring, 0);
+	else
+		writel(0, rx_ring->tail);
 }
 
 static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2318,7 +2302,10 @@
 	tx_ring->next_to_clean = 0;
 
 	writel(0, tx_ring->head);
-	writel(0, tx_ring->tail);
+	if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		e1000e_update_tdt_wa(tx_ring, 0);
+	else
+		writel(0, tx_ring->tail);
 }
 
 /**
@@ -2530,33 +2517,31 @@
 }
 
 /**
- * e1000_clean - NAPI Rx polling callback
+ * e1000e_poll - NAPI Rx polling callback
  * @napi: struct associated with this polling callback
- * @budget: amount of packets driver is allowed to process this poll
+ * @weight: number of packets driver is allowed to process this poll
  **/
-static int e1000_clean(struct napi_struct *napi, int budget)
+static int e1000e_poll(struct napi_struct *napi, int weight)
 {
-	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+						     napi);
 	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *poll_dev = adapter->netdev;
 	int tx_cleaned = 1, work_done = 0;
 
 	adapter = netdev_priv(poll_dev);
 
-	if (adapter->msix_entries &&
-	    !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
-		goto clean_rx;
+	if (!adapter->msix_entries ||
+	    (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
+		tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
 
-	tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
-
-clean_rx:
-	adapter->clean_rx(adapter->rx_ring, &work_done, budget);
+	adapter->clean_rx(adapter->rx_ring, &work_done, weight);
 
 	if (!tx_cleaned)
-		work_done = budget;
+		work_done = weight;
 
-	/* If budget not fully consumed, exit the polling mode */
-	if (work_done < budget) {
+	/* If weight not fully consumed, exit the polling mode */
+	if (work_done < weight) {
 		if (adapter->itr_setting & 3)
 			e1000_set_itr(adapter);
 		napi_complete(napi);
@@ -2800,13 +2785,13 @@
 	/* Setup the HW Tx Head and Tail descriptor pointers */
 	tdba = tx_ring->dma;
 	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
-	ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
-	ew32(TDBAH, (tdba >> 32));
-	ew32(TDLEN, tdlen);
-	ew32(TDH, 0);
-	ew32(TDT, 0);
-	tx_ring->head = adapter->hw.hw_addr + E1000_TDH;
-	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT;
+	ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
+	ew32(TDBAH(0), (tdba >> 32));
+	ew32(TDLEN(0), tdlen);
+	ew32(TDH(0), 0);
+	ew32(TDT(0), 0);
+	tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
+	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
 
 	/* Set the Tx Interrupt Delay register */
 	ew32(TIDV, adapter->tx_int_delay);
@@ -2879,8 +2864,8 @@
 	u32 rctl, rfctl;
 	u32 pages = 0;
 
-	/* Workaround Si errata on 82579 - configure jumbo frame flow */
-	if (hw->mac.type == e1000_pch2lan) {
+	/* Workaround Si errata on PCHx - configure jumbo frame flow */
+	if (hw->mac.type >= e1000_pch2lan) {
 		s32 ret_val;
 
 		if (adapter->netdev->mtu > ETH_DATA_LEN)
@@ -2955,6 +2940,7 @@
 	/* Enable Extended Status in all Receive Descriptors */
 	rfctl = er32(RFCTL);
 	rfctl |= E1000_RFCTL_EXTEN;
+	ew32(RFCTL, rfctl);
 
 	/*
 	 * 82571 and greater support packet-split where the protocol
@@ -2980,13 +2966,6 @@
 	if (adapter->rx_ps_pages) {
 		u32 psrctl = 0;
 
-		/*
-		 * disable packet split support for IPv6 extension headers,
-		 * because some malformed IPv6 headers can hang the Rx
-		 */
-		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
-			  E1000_RFCTL_NEW_IPV6_EXT_DIS);
-
 		/* Enable Packet split descriptors */
 		rctl |= E1000_RCTL_DTYP_PS;
 
@@ -3025,7 +3004,6 @@
 		 */
 	}
 
-	ew32(RFCTL, rfctl);
 	ew32(RCTL, rctl);
 	/* just started the receive unit, no need to restart */
 	adapter->flags &= ~FLAG_RX_RESTART_NOW;
@@ -3110,13 +3088,13 @@
 	 * the Base and Length of the Rx Descriptor Ring
 	 */
 	rdba = rx_ring->dma;
-	ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
-	ew32(RDBAH, (rdba >> 32));
-	ew32(RDLEN, rdlen);
-	ew32(RDH, 0);
-	ew32(RDT, 0);
-	rx_ring->head = adapter->hw.hw_addr + E1000_RDH;
-	rx_ring->tail = adapter->hw.hw_addr + E1000_RDT;
+	ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
+	ew32(RDBAH(0), (rdba >> 32));
+	ew32(RDLEN(0), rdlen);
+	ew32(RDH(0), 0);
+	ew32(RDT(0), 0);
+	rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
+	rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
 
 	/* Enable Receive Checksum Offload for TCP and UDP */
 	rxcsum = er32(RXCSUM);
@@ -3229,7 +3207,7 @@
 		netdev_for_each_uc_addr(ha, netdev) {
 			if (!rar_entries)
 				break;
-			e1000e_rar_set(hw, ha->addr, rar_entries--);
+			hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
 			count++;
 		}
 	}
@@ -3510,6 +3488,7 @@
 		fc->refresh_time = 0x1000;
 		break;
 	case e1000_pch2lan:
+	case e1000_pch_lpt:
 		fc->high_water = 0x05C20;
 		fc->low_water = 0x05048;
 		fc->pause_time = 0x0650;
@@ -3799,7 +3778,7 @@
 	/* fire an unusual interrupt on the test handler */
 	ew32(ICS, E1000_ICS_RXSEQ);
 	e1e_flush();
-	msleep(50);
+	msleep(100);
 
 	e1000_irq_disable(adapter);
 
@@ -4038,6 +4017,7 @@
 static int e1000_set_mac(struct net_device *netdev, void *p)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
 	struct sockaddr *addr = p;
 
 	if (!is_valid_ether_addr(addr->sa_data))
@@ -4046,7 +4026,7 @@
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
 
-	e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+	hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
 
 	if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
 		/* activate the work around */
@@ -4060,9 +4040,8 @@
 		 * are dropped. Eventually the LAA will be in RAR[0] and
 		 * RAR[14]
 		 */
-		e1000e_rar_set(&adapter->hw,
-			      adapter->hw.mac.addr,
-			      adapter->hw.mac.rar_entry_count - 1);
+		hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
+				    adapter->hw.mac.rar_entry_count - 1);
 	}
 
 	return 0;
@@ -4641,7 +4620,7 @@
 	 * reset from the other port. Set the appropriate LAA in RAR[0]
 	 */
 	if (e1000e_get_laa_state_82571(hw))
-		e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+		hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
 
 	if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
 		e1000e_check_82574_phy_workaround(adapter);
@@ -5151,6 +5130,8 @@
 	/* if count is 0 then mapping error has occurred */
 	count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
 	if (count) {
+		skb_tx_timestamp(skb);
+
 		netdev_sent_queue(netdev, skb->len);
 		e1000_tx_queue(tx_ring, tx_flags, count);
 		/* Make sure there is space in the ring for the next send. */
@@ -5285,22 +5266,14 @@
 		return -EINVAL;
 	}
 
-	/* Jumbo frame workaround on 82579 requires CRC be stripped */
-	if ((adapter->hw.mac.type == e1000_pch2lan) &&
+	/* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
+	if ((adapter->hw.mac.type >= e1000_pch2lan) &&
 	    !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
 	    (new_mtu > ETH_DATA_LEN)) {
-		e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
+		e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
 		return -EINVAL;
 	}
 
-	/* 82573 Errata 17 */
-	if (((adapter->hw.mac.type == e1000_82573) ||
-	     (adapter->hw.mac.type == e1000_82574)) &&
-	    (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
-		adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
-		e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
-	}
-
 	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
 		usleep_range(1000, 2000);
 	/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -5694,7 +5667,7 @@
 			return err;
 	}
 
-	if (hw->mac.type == e1000_pch2lan)
+	if (hw->mac.type >= e1000_pch2lan)
 		e1000_resume_workarounds_pchlan(&adapter->hw);
 
 	e1000e_power_up_phy(adapter);
@@ -6226,7 +6199,7 @@
 	netdev->netdev_ops		= &e1000e_netdev_ops;
 	e1000e_set_ethtool_ops(netdev);
 	netdev->watchdog_timeo		= 5 * HZ;
-	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+	netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
 	strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
 	netdev->mem_start = mmio_start;
@@ -6593,6 +6566,9 @@
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
 
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
+
 	{ 0, 0, 0, 0, 0, 0, 0 }	/* terminate list */
 };
 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ff796e4..55cc1565b 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -106,7 +106,7 @@
 /*
  * Interrupt Throttle Rate (interrupts/sec)
  *
- * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
  */
 E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
 #define DEFAULT_ITR 3
@@ -166,8 +166,8 @@
  *
  * Default Value: 1 (enabled)
  */
-E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
-                          "the CRC");
+E1000_PARAM(CrcStripping,
+	    "Enable CRC Stripping, disable if your BMC needs the CRC");
 
 struct e1000_option {
 	enum { enable_option, range_option, list_option } type;
@@ -344,53 +344,60 @@
 
 		if (num_InterruptThrottleRate > bd) {
 			adapter->itr = InterruptThrottleRate[bd];
-			switch (adapter->itr) {
-			case 0:
-				e_info("%s turned off\n", opt.name);
-				break;
-			case 1:
-				e_info("%s set to dynamic mode\n", opt.name);
-				adapter->itr_setting = adapter->itr;
-				adapter->itr = 20000;
-				break;
-			case 3:
-				e_info("%s set to dynamic conservative mode\n",
-					opt.name);
-				adapter->itr_setting = adapter->itr;
-				adapter->itr = 20000;
-				break;
-			case 4:
-				e_info("%s set to simplified (2000-8000 ints) "
-				       "mode\n", opt.name);
-				adapter->itr_setting = 4;
-				break;
-			default:
-				/*
-				 * Save the setting, because the dynamic bits
-				 * change itr.
-				 */
-				if (e1000_validate_option(&adapter->itr, &opt,
-							  adapter) &&
-				    (adapter->itr == 3)) {
-					/*
-					 * In case of invalid user value,
-					 * default to conservative mode.
-					 */
-					adapter->itr_setting = adapter->itr;
-					adapter->itr = 20000;
-				} else {
-					/*
-					 * Clear the lower two bits because
-					 * they are used as control.
-					 */
-					adapter->itr_setting =
-						adapter->itr & ~3;
-				}
-				break;
-			}
+
+			/*
+			 * Make sure a message is printed for non-special
+			 * values. And in case of an invalid option, display
+			 * warning, use default and go through itr/itr_setting
+			 * adjustment logic below
+			 */
+			if ((adapter->itr > 4) &&
+			    e1000_validate_option(&adapter->itr, &opt, adapter))
+				adapter->itr = opt.def;
 		} else {
-			adapter->itr_setting = opt.def;
+			/*
+			 * If no option specified, use default value and go
+			 * through the logic below to adjust itr/itr_setting
+			 */
+			adapter->itr = opt.def;
+
+			/*
+			 * Make sure a message is printed for non-special
+			 * default values
+			 */
+			if (adapter->itr > 4)
+				e_info("%s set to default %d\n", opt.name,
+				       adapter->itr);
+		}
+
+		adapter->itr_setting = adapter->itr;
+		switch (adapter->itr) {
+		case 0:
+			e_info("%s turned off\n", opt.name);
+			break;
+		case 1:
+			e_info("%s set to dynamic mode\n", opt.name);
 			adapter->itr = 20000;
+			break;
+		case 3:
+			e_info("%s set to dynamic conservative mode\n",
+			       opt.name);
+			adapter->itr = 20000;
+			break;
+		case 4:
+			e_info("%s set to simplified (2000-8000 ints) mode\n",
+			       opt.name);
+			break;
+		default:
+			/*
+			 * Save the setting, because the dynamic bits
+			 * change itr.
+			 *
+			 * Clear the lower two bits because
+			 * they are used as control.
+			 */
+			adapter->itr_setting &= ~3;
+			break;
 		}
 	}
 	{ /* Interrupt Mode */
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 35b4557..0334d01 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -639,6 +639,45 @@
 }
 
 /**
+ *  e1000_set_master_slave_mode - Setup PHY for Master/slave mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Master/slave mode
+ **/
+static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Resolve Master/Slave mode */
+	ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* load defaults for future use */
+	hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+	    ((phy_data & CR_1000T_MS_VALUE) ?
+	     e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto;
+
+	switch (hw->phy.ms_type) {
+	case e1000_ms_force_master:
+		phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+		break;
+	case e1000_ms_force_slave:
+		phy_data |= CR_1000T_MS_ENABLE;
+		phy_data &= ~(CR_1000T_MS_VALUE);
+		break;
+	case e1000_ms_auto:
+		phy_data &= ~CR_1000T_MS_ENABLE;
+		/* fall-through */
+	default:
+		break;
+	}
+
+	return e1e_wphy(hw, PHY_1000T_CTRL, phy_data);
+}
+
+/**
  *  e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
  *  @hw: pointer to the HW structure
  *
@@ -659,7 +698,11 @@
 	/* Enable downshift */
 	phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
 
-	return e1e_wphy(hw, I82577_CFG_REG, phy_data);
+	ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	return e1000_set_master_slave_mode(hw);
 }
 
 /**
@@ -718,12 +761,28 @@
 	 *   1 - Enabled
 	 */
 	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
-	if (phy->disable_polarity_correction == 1)
+	if (phy->disable_polarity_correction)
 		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
 
 	/* Enable downshift on BM (disabled by default) */
-	if (phy->type == e1000_phy_bm)
+	if (phy->type == e1000_phy_bm) {
+		/* For 82574/82583, first disable then enable downshift */
+		if (phy->id == BME1000_E_PHY_ID_R2) {
+			phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT;
+			ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL,
+					   phy_data);
+			if (ret_val)
+				return ret_val;
+			/* Commit the changes. */
+			ret_val = e1000e_commit_phy(hw);
+			if (ret_val) {
+				e_dbg("Error committing the PHY changes\n");
+				return ret_val;
+			}
+		}
+
 		phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
+	}
 
 	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
 	if (ret_val)
@@ -879,31 +938,7 @@
 				return ret_val;
 		}
 
-		ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
-		if (ret_val)
-			return ret_val;
-
-		/* load defaults for future use */
-		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
-			((data & CR_1000T_MS_VALUE) ?
-			e1000_ms_force_master :
-			e1000_ms_force_slave) :
-			e1000_ms_auto;
-
-		switch (phy->ms_type) {
-		case e1000_ms_force_master:
-			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
-			break;
-		case e1000_ms_force_slave:
-			data |= CR_1000T_MS_ENABLE;
-			data &= ~(CR_1000T_MS_VALUE);
-			break;
-		case e1000_ms_auto:
-			data &= ~CR_1000T_MS_ENABLE;
-		default:
-			break;
-		}
-		ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+		ret_val = e1000_set_master_slave_mode(hw);
 	}
 
 	return ret_val;
@@ -1090,7 +1125,7 @@
 	 * If autoneg_advertised is zero, we assume it was not defaulted
 	 * by the calling code so we set to advertise full capability.
 	 */
-	if (phy->autoneg_advertised == 0)
+	if (!phy->autoneg_advertised)
 		phy->autoneg_advertised = phy->autoneg_mask;
 
 	e_dbg("Reconfiguring auto-neg advertisement params\n");
@@ -1596,7 +1631,7 @@
 	ret_val = e1e_rphy(hw, offset, &phy_data);
 
 	if (!ret_val)
-		phy->speed_downgraded = (phy_data & mask);
+		phy->speed_downgraded = !!(phy_data & mask);
 
 	return ret_val;
 }
@@ -1925,8 +1960,8 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->polarity_correction = (phy_data &
-				    M88E1000_PSCR_POLARITY_REVERSAL);
+	phy->polarity_correction = !!(phy_data &
+				      M88E1000_PSCR_POLARITY_REVERSAL);
 
 	ret_val = e1000_check_polarity_m88(hw);
 	if (ret_val)
@@ -1936,7 +1971,7 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX);
+	phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
 
 	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
 		ret_val = e1000_get_cable_length(hw);
@@ -1999,7 +2034,7 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX);
+	phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
 
 	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
 	    IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -2052,8 +2087,7 @@
 	ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
 	if (ret_val)
 		return ret_val;
-	phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
-	                           ? false : true;
+	phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
 
 	if (phy->polarity_correction) {
 		ret_val = e1000_check_polarity_ife(hw);
@@ -2070,7 +2104,7 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
+	phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
 
 	/* The following parameters are undefined for 10/100 operation. */
 	phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
@@ -2320,6 +2354,9 @@
 	case I82579_E_PHY_ID:
 		phy_type = e1000_phy_82579;
 		break;
+	case I217_E_PHY_ID:
+		phy_type = e1000_phy_i217;
+		break;
 	default:
 		phy_type = e1000_phy_unknown;
 		break;
@@ -2979,7 +3016,7 @@
 		if ((hw->phy.type == e1000_phy_82578) &&
 		    (hw->phy.revision >= 1) &&
 		    (hw->phy.addr == 2) &&
-		    ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) {
+		    !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
 			u16 data2 = 0x7EFF;
 			ret_val = e1000_access_phy_debug_regs_hv(hw,
 								 (1 << 6) | 0x3,
@@ -3265,7 +3302,7 @@
 	if (ret_val)
 		return ret_val;
 
-	phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
+	phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
 
 	if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
 	    I82577_PHY_STATUS2_SPEED_1000MBPS) {
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 6565c46..97c197f 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -33,5 +33,7 @@
 obj-$(CONFIG_IGB) += igb.o
 
 igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
-	    e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
+	    e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
+	    e1000_i210.o
 
+igb-$(CONFIG_IGB_PTP) += igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 08bdc33..e650839 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -36,6 +36,7 @@
 
 #include "e1000_mac.h"
 #include "e1000_82575.h"
+#include "e1000_i210.h"
 
 static s32  igb_get_invariants_82575(struct e1000_hw *);
 static s32  igb_acquire_phy_82575(struct e1000_hw *);
@@ -52,6 +53,8 @@
 static s32  igb_reset_hw_82575(struct e1000_hw *);
 static s32  igb_reset_hw_82580(struct e1000_hw *);
 static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
+static s32  igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
+static s32  igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
 static s32  igb_setup_copper_link_82575(struct e1000_hw *);
 static s32  igb_setup_serdes_link_82575(struct e1000_hw *);
 static s32  igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
@@ -96,6 +99,8 @@
 		break;
 	case e1000_82580:
 	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
 		reg = rd32(E1000_MDICNFG);
 		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
 		break;
@@ -150,6 +155,17 @@
 	case E1000_DEV_ID_I350_SGMII:
 		mac->type = e1000_i350;
 		break;
+	case E1000_DEV_ID_I210_COPPER:
+	case E1000_DEV_ID_I210_COPPER_OEM1:
+	case E1000_DEV_ID_I210_COPPER_IT:
+	case E1000_DEV_ID_I210_FIBER:
+	case E1000_DEV_ID_I210_SERDES:
+	case E1000_DEV_ID_I210_SGMII:
+		mac->type = e1000_i210;
+		break;
+	case E1000_DEV_ID_I211_COPPER:
+		mac->type = e1000_i211;
+		break;
 	default:
 		return -E1000_ERR_MAC_INIT;
 		break;
@@ -182,26 +198,44 @@
 	/* Set mta register count */
 	mac->mta_reg_count = 128;
 	/* Set rar entry count */
-	mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
-	if (mac->type == e1000_82576)
+	switch (mac->type) {
+	case e1000_82576:
 		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
-	if (mac->type == e1000_82580)
+		break;
+	case e1000_82580:
 		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
-	if (mac->type == e1000_i350)
+		break;
+	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
 		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+		break;
+	default:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+		break;
+	}
 	/* reset */
 	if (mac->type >= e1000_82580)
 		mac->ops.reset_hw = igb_reset_hw_82580;
 	else
 		mac->ops.reset_hw = igb_reset_hw_82575;
+
+	if (mac->type >= e1000_i210) {
+		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+		mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
+	} else {
+		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+		mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
+	}
+
 	/* Set if part includes ASF firmware */
 	mac->asf_firmware_present = true;
 	/* Set if manageability features are enabled. */
 	mac->arc_subsystem_valid =
 		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
 			? true : false;
-	/* enable EEE on i350 parts */
-	if (mac->type == e1000_i350)
+	/* enable EEE on i350 parts and later parts */
+	if (mac->type >= e1000_i350)
 		dev_spec->eee_disable = false;
 	else
 		dev_spec->eee_disable = true;
@@ -213,26 +247,6 @@
 
 	/* NVM initialization */
 	eecd = rd32(E1000_EECD);
-
-	nvm->opcode_bits        = 8;
-	nvm->delay_usec         = 1;
-	switch (nvm->override) {
-	case e1000_nvm_override_spi_large:
-		nvm->page_size    = 32;
-		nvm->address_bits = 16;
-		break;
-	case e1000_nvm_override_spi_small:
-		nvm->page_size    = 8;
-		nvm->address_bits = 8;
-		break;
-	default:
-		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
-		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
-		break;
-	}
-
-	nvm->type = e1000_nvm_eeprom_spi;
-
 	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
 		     E1000_EECD_SIZE_EX_SHIFT);
 
@@ -242,6 +256,33 @@
 	 */
 	size += NVM_WORD_SIZE_BASE_SHIFT;
 
+	nvm->word_size = 1 << size;
+	if (hw->mac.type < e1000_i210) {
+		nvm->opcode_bits        = 8;
+		nvm->delay_usec         = 1;
+		switch (nvm->override) {
+		case e1000_nvm_override_spi_large:
+			nvm->page_size    = 32;
+			nvm->address_bits = 16;
+			break;
+		case e1000_nvm_override_spi_small:
+			nvm->page_size    = 8;
+			nvm->address_bits = 8;
+			break;
+		default:
+			nvm->page_size    = eecd
+				& E1000_EECD_ADDR_BITS ? 32 : 8;
+			nvm->address_bits = eecd
+				& E1000_EECD_ADDR_BITS ? 16 : 8;
+			break;
+		}
+		if (nvm->word_size == (1 << 15))
+			nvm->page_size = 128;
+
+		nvm->type = e1000_nvm_eeprom_spi;
+	} else
+		nvm->type = e1000_nvm_flash_hw;
+
 	/*
 	 * Check for invalid size
 	 */
@@ -249,32 +290,60 @@
 		pr_notice("The NVM size is not valid, defaulting to 32K\n");
 		size = 15;
 	}
-	nvm->word_size = 1 << size;
-	if (nvm->word_size == (1 << 15))
-		nvm->page_size = 128;
 
 	/* NVM Function Pointers */
-	nvm->ops.acquire = igb_acquire_nvm_82575;
-	if (nvm->word_size < (1 << 15))
-		nvm->ops.read = igb_read_nvm_eerd;
-	else
-		nvm->ops.read = igb_read_nvm_spi;
-
-	nvm->ops.release = igb_release_nvm_82575;
 	switch (hw->mac.type) {
 	case e1000_82580:
 		nvm->ops.validate = igb_validate_nvm_checksum_82580;
 		nvm->ops.update = igb_update_nvm_checksum_82580;
+		nvm->ops.acquire = igb_acquire_nvm_82575;
+		nvm->ops.release = igb_release_nvm_82575;
+		if (nvm->word_size < (1 << 15))
+			nvm->ops.read = igb_read_nvm_eerd;
+		else
+			nvm->ops.read = igb_read_nvm_spi;
+		nvm->ops.write = igb_write_nvm_spi;
 		break;
 	case e1000_i350:
 		nvm->ops.validate = igb_validate_nvm_checksum_i350;
 		nvm->ops.update = igb_update_nvm_checksum_i350;
+		nvm->ops.acquire = igb_acquire_nvm_82575;
+		nvm->ops.release = igb_release_nvm_82575;
+		if (nvm->word_size < (1 << 15))
+			nvm->ops.read = igb_read_nvm_eerd;
+		else
+			nvm->ops.read = igb_read_nvm_spi;
+		nvm->ops.write = igb_write_nvm_spi;
+		break;
+	case e1000_i210:
+		nvm->ops.validate = igb_validate_nvm_checksum_i210;
+		nvm->ops.update   = igb_update_nvm_checksum_i210;
+		nvm->ops.acquire = igb_acquire_nvm_i210;
+		nvm->ops.release = igb_release_nvm_i210;
+		nvm->ops.read    = igb_read_nvm_srrd_i210;
+		nvm->ops.valid_led_default = igb_valid_led_default_i210;
+		break;
+	case e1000_i211:
+		nvm->ops.acquire  = igb_acquire_nvm_i210;
+		nvm->ops.release  = igb_release_nvm_i210;
+		nvm->ops.read     = igb_read_nvm_i211;
+		nvm->ops.valid_led_default = igb_valid_led_default_i210;
+		nvm->ops.validate = NULL;
+		nvm->ops.update   = NULL;
+		nvm->ops.write    = NULL;
 		break;
 	default:
 		nvm->ops.validate = igb_validate_nvm_checksum;
 		nvm->ops.update = igb_update_nvm_checksum;
+		nvm->ops.acquire = igb_acquire_nvm_82575;
+		nvm->ops.release = igb_release_nvm_82575;
+		if (nvm->word_size < (1 << 15))
+			nvm->ops.read = igb_read_nvm_eerd;
+		else
+			nvm->ops.read = igb_read_nvm_spi;
+		nvm->ops.write = igb_write_nvm_spi;
+		break;
 	}
-	nvm->ops.write = igb_write_nvm_spi;
 
 	/* if part supports SR-IOV then initialize mailbox parameters */
 	switch (mac->type) {
@@ -312,9 +381,13 @@
 	if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
 		phy->ops.read_reg   = igb_read_phy_reg_sgmii_82575;
 		phy->ops.write_reg  = igb_write_phy_reg_sgmii_82575;
-	} else if (hw->mac.type >= e1000_82580) {
+	} else if ((hw->mac.type == e1000_82580)
+		|| (hw->mac.type == e1000_i350)) {
 		phy->ops.read_reg   = igb_read_phy_reg_82580;
 		phy->ops.write_reg  = igb_write_phy_reg_82580;
+	} else if (hw->phy.type >= e1000_phy_i210) {
+		phy->ops.read_reg   = igb_read_phy_reg_gs40g;
+		phy->ops.write_reg  = igb_write_phy_reg_gs40g;
 	} else {
 		phy->ops.read_reg   = igb_read_phy_reg_igp;
 		phy->ops.write_reg  = igb_write_phy_reg_igp;
@@ -343,6 +416,14 @@
 		else
 			phy->ops.get_cable_length = igb_get_cable_length_m88;
 
+		if (phy->id == I210_I_PHY_ID) {
+			phy->ops.get_cable_length =
+					 igb_get_cable_length_m88_gen2;
+			phy->ops.set_d0_lplu_state =
+					igb_set_d0_lplu_state_82580;
+			phy->ops.set_d3_lplu_state =
+					igb_set_d3_lplu_state_82580;
+		}
 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
 		break;
 	case IGP03E1000_E_PHY_ID:
@@ -359,6 +440,17 @@
 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
 		phy->ops.get_cable_length   = igb_get_cable_length_82580;
 		phy->ops.get_phy_info       = igb_get_phy_info_82580;
+		phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82580;
+		phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state_82580;
+		break;
+	case I210_I_PHY_ID:
+		phy->type                   = e1000_phy_i210;
+		phy->ops.get_phy_info       = igb_get_phy_info_m88;
+		phy->ops.check_polarity     = igb_check_polarity_m88;
+		phy->ops.get_cable_length   = igb_get_cable_length_m88_gen2;
+		phy->ops.set_d0_lplu_state  = igb_set_d0_lplu_state_82580;
+		phy->ops.set_d3_lplu_state  = igb_set_d3_lplu_state_82580;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
 		break;
 	default:
 		return -E1000_ERR_PHY;
@@ -385,7 +477,7 @@
 	else if (hw->bus.func == E1000_FUNC_3)
 		mask = E1000_SWFW_PHY3_SM;
 
-	return igb_acquire_swfw_sync_82575(hw, mask);
+	return hw->mac.ops.acquire_swfw_sync(hw, mask);
 }
 
 /**
@@ -406,7 +498,7 @@
 	else if (hw->bus.func == E1000_FUNC_3)
 		mask = E1000_SWFW_PHY3_SM;
 
-	igb_release_swfw_sync_82575(hw, mask);
+	hw->mac.ops.release_swfw_sync(hw, mask);
 }
 
 /**
@@ -510,6 +602,8 @@
 			break;
 		case e1000_82580:
 		case e1000_i350:
+		case e1000_i210:
+		case e1000_i211:
 			mdic = rd32(E1000_MDICNFG);
 			mdic &= E1000_MDICNFG_PHY_MASK;
 			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
@@ -674,6 +768,96 @@
 }
 
 /**
+ *  igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 data;
+
+	data = rd32(E1000_82580_PHY_POWER_MGMT);
+
+	if (active) {
+		data |= E1000_82580_PM_D0_LPLU;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		data &= ~E1000_82580_PM_SPD;
+	} else {
+		data &= ~E1000_82580_PM_D0_LPLU;
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on)
+			data |= E1000_82580_PM_SPD;
+		else if (phy->smart_speed == e1000_smart_speed_off)
+			data &= ~E1000_82580_PM_SPD; }
+
+	wr32(E1000_82580_PHY_POWER_MGMT, data);
+	return ret_val;
+}
+
+/**
+ *  igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 data;
+
+	data = rd32(E1000_82580_PHY_POWER_MGMT);
+
+	if (!active) {
+		data &= ~E1000_82580_PM_D3_LPLU;
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on)
+			data |= E1000_82580_PM_SPD;
+		else if (phy->smart_speed == e1000_smart_speed_off)
+			data &= ~E1000_82580_PM_SPD;
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= E1000_82580_PM_D3_LPLU;
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		data &= ~E1000_82580_PM_SPD;
+	}
+
+	wr32(E1000_82580_PHY_POWER_MGMT, data);
+	return ret_val;
+}
+
+/**
  *  igb_acquire_nvm_82575 - Request for access to EEPROM
  *  @hw: pointer to the HW structure
  *
@@ -686,14 +870,14 @@
 {
 	s32 ret_val;
 
-	ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+	ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
 	if (ret_val)
 		goto out;
 
 	ret_val = igb_acquire_nvm(hw);
 
 	if (ret_val)
-		igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+		hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
 
 out:
 	return ret_val;
@@ -709,7 +893,7 @@
 static void igb_release_nvm_82575(struct e1000_hw *hw)
 {
 	igb_release_nvm(hw);
-	igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+	hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
 }
 
 /**
@@ -1080,7 +1264,6 @@
 	 * is no link.
 	 */
 	igb_clear_hw_cntrs_82575(hw);
-
 	return ret_val;
 }
 
@@ -1117,6 +1300,7 @@
 		}
 	}
 	switch (hw->phy.type) {
+	case e1000_phy_i210:
 	case e1000_phy_m88:
 		if (hw->phy.id == I347AT4_E_PHY_ID ||
 		    hw->phy.id == M88E1112_E_PHY_ID)
@@ -1757,7 +1941,7 @@
 
 	/* Determine whether or not a global dev reset is requested */
 	if (global_device_reset &&
-		igb_acquire_swfw_sync_82575(hw, swmbsw_mask))
+		hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
 			global_device_reset = false;
 
 	if (global_device_reset &&
@@ -1803,7 +1987,7 @@
 
 	/* Release semaphore */
 	if (global_device_reset)
-		igb_release_swfw_sync_82575(hw, swmbsw_mask);
+		hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
 
 	return ret_val;
 }
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index b927d79..e85c453 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -55,10 +55,11 @@
 #define E1000_SRRCTL_DROP_EN                            0x80000000
 #define E1000_SRRCTL_TIMESTAMP                          0x40000000
 
+
 #define E1000_MRQC_ENABLE_RSS_4Q            0x00000002
 #define E1000_MRQC_ENABLE_VMDQ              0x00000003
-#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q       0x00000005
 #define E1000_MRQC_RSS_FIELD_IPV4_UDP       0x00400000
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q       0x00000005
 #define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 89eb1f8..ec7e4fe 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -301,6 +301,8 @@
 							* transactions */
 #define E1000_DMACR_DMAC_LX_SHIFT       28
 #define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
+/* DMA Coalescing BMC-to-OS Watchdog Enable */
+#define E1000_DMACR_DC_BMC2OSW_EN	0x00008000
 
 #define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coalescing Transmit
 							* Threshold */
@@ -458,6 +460,7 @@
 #define E1000_ERR_INVALID_ARGUMENT  16
 #define E1000_ERR_NO_SPACE          17
 #define E1000_ERR_NVM_PBA_SECTION   18
+#define E1000_ERR_INVM_VALUE_NOT_FOUND	19
 
 /* Loop limit on how long we wait for auto-negotiation to complete */
 #define COPPER_LINK_UP_LIMIT              10
@@ -595,6 +598,25 @@
 #define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
 #define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
 #define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_FLUPD_I210		0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210		0x04000000 /* Update FLASH done*/
+#define E1000_FLUDONE_ATTEMPTS		20000
+#define E1000_EERD_EEWR_MAX_COUNT	512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX		0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i)	(0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY	E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX	0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX	0x01
+#define E1000_EECD_FLUPD_I210		0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210		0x04000000 /* Update FLASH done*/
+#define E1000_FLUDONE_ATTEMPTS		20000
+#define E1000_EERD_EEWR_MAX_COUNT	512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX		0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i)	(0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY	E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX	0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX	0x01
+
 
 /* Offset to data in NVM read/write registers */
 #define E1000_NVM_RW_REG_DATA   16
@@ -613,6 +635,16 @@
 #define NVM_CHECKSUM_REG           0x003F
 #define NVM_COMPATIBILITY_REG_3    0x0003
 #define NVM_COMPATIBILITY_BIT_MASK 0x8000
+#define NVM_MAC_ADDR               0x0000
+#define NVM_SUB_DEV_ID             0x000B
+#define NVM_SUB_VEN_ID             0x000C
+#define NVM_DEV_ID                 0x000D
+#define NVM_VEN_ID                 0x000E
+#define NVM_INIT_CTRL_2            0x000F
+#define NVM_INIT_CTRL_4            0x0013
+#define NVM_LED_1_CFG              0x001C
+#define NVM_LED_0_2_CFG            0x001F
+
 
 #define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
 #define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
@@ -639,6 +671,7 @@
 
 #define NVM_PBA_OFFSET_0           8
 #define NVM_PBA_OFFSET_1           9
+#define NVM_RESERVED_WORD		0xFFFF
 #define NVM_PBA_PTR_GUARD          0xFAFA
 #define NVM_WORD_SIZE_BASE_SHIFT   6
 
@@ -696,6 +729,7 @@
 #define I82580_I_PHY_ID      0x015403A0
 #define I350_I_PHY_ID        0x015403B0
 #define M88_VENDOR           0x0141
+#define I210_I_PHY_ID        0x01410C00
 
 /* M88E1000 Specific Registers */
 #define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
@@ -815,6 +849,7 @@
 #define E1000_IPCNFG_EEE_100M_AN     0x00000004  /* EEE Enable 100M AN */
 #define E1000_EEER_TX_LPI_EN         0x00010000  /* EEE Tx LPI Enable */
 #define E1000_EEER_RX_LPI_EN         0x00020000  /* EEE Rx LPI Enable */
+#define E1000_EEER_FRC_AN            0x10000000 /* Enable EEE in loopback */
 #define E1000_EEER_LPI_FC            0x00040000  /* EEE Enable on FC */
 
 /* SerDes Control */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index f67cbd3..c2a51dc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -63,6 +63,13 @@
 #define E1000_DEV_ID_I350_FIBER               0x1522
 #define E1000_DEV_ID_I350_SERDES              0x1523
 #define E1000_DEV_ID_I350_SGMII               0x1524
+#define E1000_DEV_ID_I210_COPPER		0x1533
+#define E1000_DEV_ID_I210_COPPER_OEM1		0x1534
+#define E1000_DEV_ID_I210_COPPER_IT		0x1535
+#define E1000_DEV_ID_I210_FIBER			0x1536
+#define E1000_DEV_ID_I210_SERDES		0x1537
+#define E1000_DEV_ID_I210_SGMII			0x1538
+#define E1000_DEV_ID_I211_COPPER		0x1539
 
 #define E1000_REVISION_2 2
 #define E1000_REVISION_4 4
@@ -83,6 +90,8 @@
 	e1000_82576,
 	e1000_82580,
 	e1000_i350,
+	e1000_i210,
+	e1000_i211,
 	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
 };
 
@@ -117,6 +126,7 @@
 	e1000_phy_igp_3,
 	e1000_phy_ife,
 	e1000_phy_82580,
+	e1000_phy_i210,
 };
 
 enum e1000_bus_type {
@@ -313,6 +323,9 @@
 	void (*rar_set)(struct e1000_hw *, u8 *, u32);
 	s32  (*read_mac_addr)(struct e1000_hw *);
 	s32  (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+	s32  (*acquire_swfw_sync)(struct e1000_hw *, u16);
+	void (*release_swfw_sync)(struct e1000_hw *, u16);
+
 };
 
 struct e1000_phy_operations {
@@ -338,6 +351,7 @@
 	s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
 	s32  (*update)(struct e1000_hw *);
 	s32  (*validate)(struct e1000_hw *);
+	s32  (*valid_led_default)(struct e1000_hw *, u16 *);
 };
 
 struct e1000_info {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
new file mode 100644
index 0000000..77a5f93
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -0,0 +1,603 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007-2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+
+/* e1000_i210
+ * e1000_i211
+ */
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "e1000_hw.h"
+#include "e1000_i210.h"
+
+static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw);
+static void igb_put_hw_semaphore_i210(struct e1000_hw *hw);
+static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+				u16 *data);
+static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw);
+
+/**
+ *  igb_acquire_nvm_i210 - Request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the necessary semaphores for exclusive access to the EEPROM.
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+{
+	return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  igb_release_nvm_i210 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ *  then release the semaphores acquired.
+ **/
+void igb_release_nvm_i210(struct e1000_hw *hw)
+{
+	igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 ret_val = E1000_SUCCESS;
+	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+	while (i < timeout) {
+		if (igb_get_hw_semaphore_i210(hw)) {
+			ret_val = -E1000_ERR_SWFW_SYNC;
+			goto out;
+		}
+
+		swfw_sync = rd32(E1000_SW_FW_SYNC);
+		if (!(swfw_sync & fwmask))
+			break;
+
+		/*
+		 * Firmware currently using resource (fwmask)
+		 */
+		igb_put_hw_semaphore_i210(hw);
+		mdelay(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		ret_val = -E1000_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	swfw_sync |= swmask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore_i210(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_release_swfw_sync_i210 - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+		; /* Empty */
+
+	swfw_sync = rd32(E1000_SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore_i210(hw);
+}
+
+/**
+ *  igb_get_hw_semaphore_i210 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 ret_val = E1000_SUCCESS;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = rd32(E1000_SWSM);
+		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		igb_put_hw_semaphore(hw);
+		hw_dbg("Driver can't access the NVM\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_put_hw_semaphore_i210 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	swsm = rd32(E1000_SWSM);
+
+	swsm &= ~E1000_SWSM_SWESMBI;
+
+	wr32(E1000_SWSM, swsm);
+}
+
+/**
+ *  igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the Shadow Ram to read
+ *  @words: number of words to read
+ *  @data: word read from the Shadow Ram
+ *
+ *  Reads a 16 bit word from the Shadow Ram using the EERD register.
+ *  Uses necessary synchronization semaphores.
+ **/
+s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+			     u16 *data)
+{
+	s32 status = E1000_SUCCESS;
+	u16 i, count;
+
+	/* We cannot hold synchronization semaphores for too long,
+	 * because of forceful takeover procedure. However it is more efficient
+	 * to read in bursts than synchronizing access for each word. */
+	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+			E1000_EERD_EEWR_MAX_COUNT : (words - i);
+		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+			status = igb_read_nvm_eerd(hw, offset, count,
+						     data + i);
+			hw->nvm.ops.release(hw);
+		} else {
+			status = E1000_ERR_SWFW_SYNC;
+		}
+
+		if (status != E1000_SUCCESS)
+			break;
+	}
+
+	return status;
+}
+
+/**
+ *  igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the Shadow RAM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ *  Writes data to Shadow RAM at offset using EEWR register.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  data will not be committed to FLASH and also Shadow RAM will most likely
+ *  contain an invalid checksum.
+ *
+ *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ *  partially written.
+ **/
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+			      u16 *data)
+{
+	s32 status = E1000_SUCCESS;
+	u16 i, count;
+
+	/* We cannot hold synchronization semaphores for too long,
+	 * because of forceful takeover procedure. However it is more efficient
+	 * to write in bursts than synchronizing access for each word. */
+	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+			E1000_EERD_EEWR_MAX_COUNT : (words - i);
+		if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+			status = igb_write_nvm_srwr(hw, offset, count,
+						      data + i);
+			hw->nvm.ops.release(hw);
+		} else {
+			status = E1000_ERR_SWFW_SYNC;
+		}
+
+		if (status != E1000_SUCCESS)
+			break;
+	}
+
+	return status;
+}
+
+/**
+ *  igb_write_nvm_srwr - Write to Shadow Ram using EEWR
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the Shadow Ram to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ *  Writes data to Shadow Ram at offset using EEWR register.
+ *
+ *  If igb_update_nvm_checksum is not called after this function , the
+ *  Shadow Ram will most likely contain an invalid checksum.
+ **/
+static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+				u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, k, eewr = 0;
+	u32 attempts = 100000;
+	s32 ret_val = E1000_SUCCESS;
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * too many words for the offset, and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+			(data[i] << E1000_NVM_RW_REG_DATA) |
+			E1000_NVM_RW_REG_START;
+
+		wr32(E1000_SRWR, eewr);
+
+		for (k = 0; k < attempts; k++) {
+			if (E1000_NVM_RW_REG_DONE &
+			    rd32(E1000_SRWR)) {
+				ret_val = E1000_SUCCESS;
+				break;
+			}
+			udelay(5);
+	}
+
+		if (ret_val != E1000_SUCCESS) {
+			hw_dbg("Shadow RAM write EEWR timed out\n");
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_nvm_i211 - Read NVM wrapper function for I211
+ *  @hw: pointer to the HW structure
+ *  @address: the word address (aka eeprom offset) to read
+ *  @data: pointer to the data read
+ *
+ *  Wrapper function to return data formerly found in the NVM.
+ **/
+s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
+			       u16 *data)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	/* Only the MAC addr is required to be present in the iNVM */
+	switch (offset) {
+	case NVM_MAC_ADDR:
+		ret_val = igb_read_invm_i211(hw, offset, &data[0]);
+		ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
+		ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
+		if (ret_val != E1000_SUCCESS)
+			hw_dbg("MAC Addr not found in iNVM\n");
+		break;
+	case NVM_ID_LED_SETTINGS:
+	case NVM_INIT_CTRL_2:
+	case NVM_INIT_CTRL_4:
+	case NVM_LED_1_CFG:
+	case NVM_LED_0_2_CFG:
+		igb_read_invm_i211(hw, offset, data);
+		break;
+	case NVM_COMPAT:
+		*data = ID_LED_DEFAULT_I210;
+		break;
+	case NVM_SUB_DEV_ID:
+		*data = hw->subsystem_device_id;
+		break;
+	case NVM_SUB_VEN_ID:
+		*data = hw->subsystem_vendor_id;
+		break;
+	case NVM_DEV_ID:
+		*data = hw->device_id;
+		break;
+	case NVM_VEN_ID:
+		*data = hw->vendor_id;
+		break;
+	default:
+		hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
+		*data = NVM_RESERVED_WORD;
+		break;
+	}
+	return ret_val;
+}
+
+/**
+ *  igb_read_invm_i211 - Reads OTP
+ *  @hw: pointer to the HW structure
+ *  @address: the word address (aka eeprom offset) to read
+ *  @data: pointer to the data read
+ *
+ *  Reads 16-bit words from the OTP. Return error when the word is not
+ *  stored in OTP.
+ **/
+s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
+{
+	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+	u32 invm_dword;
+	u16 i;
+	u8 record_type, word_address;
+
+	for (i = 0; i < E1000_INVM_SIZE; i++) {
+		invm_dword = rd32(E1000_INVM_DATA_REG(i));
+		/* Get record type */
+		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+			break;
+		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+			if (word_address == (u8)address) {
+				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+				hw_dbg("Read INVM Word 0x%02x = %x",
+					  address, *data);
+				status = E1000_SUCCESS;
+				break;
+			}
+		}
+	}
+	if (status != E1000_SUCCESS)
+		hw_dbg("Requested word 0x%02x not found in OTP\n", address);
+	return status;
+}
+
+/**
+ *  igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+{
+	s32 status = E1000_SUCCESS;
+	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+
+	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+
+		/*
+		 * Replace the read function with semaphore grabbing with
+		 * the one that skips this for a while.
+		 * We have semaphore taken already here.
+		 */
+		read_op_ptr = hw->nvm.ops.read;
+		hw->nvm.ops.read = igb_read_nvm_eerd;
+
+		status = igb_validate_nvm_checksum(hw);
+
+		/* Revert original read operation. */
+		hw->nvm.ops.read = read_op_ptr;
+
+		hw->nvm.ops.release(hw);
+	} else {
+		status = E1000_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+
+/**
+ *  igb_update_nvm_checksum_i210 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM. Next commit EEPROM data onto the Flash.
+ **/
+s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	/*
+	 * Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
+	if (ret_val != E1000_SUCCESS) {
+		hw_dbg("EEPROM read failed\n");
+		goto out;
+	}
+
+	if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+		/*
+		 * Do not use hw->nvm.ops.write, hw->nvm.ops.read
+		 * because we do not want to take the synchronization
+		 * semaphores twice here.
+		 */
+
+		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+			ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
+			if (ret_val) {
+				hw->nvm.ops.release(hw);
+				hw_dbg("NVM Read Error while updating checksum.\n");
+				goto out;
+			}
+			checksum += nvm_data;
+		}
+		checksum = (u16) NVM_SUM - checksum;
+		ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+						&checksum);
+		if (ret_val != E1000_SUCCESS) {
+			hw->nvm.ops.release(hw);
+			hw_dbg("NVM Write Error while updating checksum.\n");
+			goto out;
+		}
+
+		hw->nvm.ops.release(hw);
+
+		ret_val = igb_update_flash_i210(hw);
+	} else {
+		ret_val = -E1000_ERR_SWFW_SYNC;
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_flash_i210 - Commit EEPROM to the flash
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 igb_update_flash_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u32 flup;
+
+	ret_val = igb_pool_flash_update_done_i210(hw);
+	if (ret_val == -E1000_ERR_NVM) {
+		hw_dbg("Flash update time out\n");
+		goto out;
+	}
+
+	flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
+	wr32(E1000_EECD, flup);
+
+	ret_val = igb_pool_flash_update_done_i210(hw);
+	if (ret_val == E1000_SUCCESS)
+		hw_dbg("Flash update complete\n");
+	else
+		hw_dbg("Flash update time out\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_pool_flash_update_done_i210 - Pool FLUDONE status.
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = -E1000_ERR_NVM;
+	u32 i, reg;
+
+	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+		reg = rd32(E1000_EECD);
+		if (reg & E1000_EECD_FLUDONE_I210) {
+			ret_val = E1000_SUCCESS;
+			break;
+		}
+		udelay(5);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_valid_led_default_i210 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+		switch (hw->phy.media_type) {
+		case e1000_media_type_internal_serdes:
+			*data = ID_LED_DEFAULT_I210_SERDES;
+			break;
+		case e1000_media_type_copper:
+		default:
+			*data = ID_LED_DEFAULT_I210;
+			break;
+		}
+	}
+out:
+	return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
new file mode 100644
index 0000000..5dc2bd3
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -0,0 +1,76 @@
+/*******************************************************************************
+
+  Intel(R) Gigabit Ethernet Linux driver
+  Copyright(c) 2007-2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_I210_H_
+#define _E1000_I210_H_
+
+extern s32 igb_update_flash_i210(struct e1000_hw *hw);
+extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
+extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
+extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
+			      u16 words, u16 *data);
+extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
+			     u16 words, u16 *data);
+extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
+extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
+extern void igb_release_nvm_i210(struct e1000_hw *hw);
+extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
+			       u16 *data);
+
+#define E1000_STM_OPCODE		0xDB00
+#define E1000_EEPROM_FLASH_SIZE_WORD	0x11
+
+#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
+	(u8)((invm_dword) & 0x7)
+#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
+	(u8)(((invm_dword) & 0x0000FE00) >> 9)
+#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
+	(u16)(((invm_dword) & 0xFFFF0000) >> 16)
+
+enum E1000_INVM_STRUCTURE_TYPE {
+	E1000_INVM_UNINITIALIZED_STRUCTURE		= 0x00,
+	E1000_INVM_WORD_AUTOLOAD_STRUCTURE		= 0x01,
+	E1000_INVM_CSR_AUTOLOAD_STRUCTURE		= 0x02,
+	E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE	= 0x03,
+	E1000_INVM_RSA_KEY_SHA256_STRUCTURE		= 0x04,
+	E1000_INVM_INVALIDATED_STRUCTURE		= 0x0F,
+};
+
+#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS	8
+#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS	1
+
+#define ID_LED_DEFAULT_I210		((ID_LED_OFF1_ON2  << 8) | \
+					 (ID_LED_OFF1_OFF2 <<  4) | \
+					 (ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_I210_SERDES	((ID_LED_DEF1_DEF2 << 8) | \
+					 (ID_LED_DEF1_DEF2 <<  4) | \
+					 (ID_LED_DEF1_DEF2))
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index f57338a..819c145 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -658,6 +658,7 @@
 	ret_val = igb_set_fc_watermarks(hw);
 
 out:
+
 	return ret_val;
 }
 
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index fa2c6ba..aa5fcdf 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -710,4 +710,3 @@
 out:
 	return ret_val;
 }
-
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 789de5b..7be98b6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -35,6 +35,7 @@
 static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
 					       u16 *phy_ctrl);
 static s32  igb_wait_autoneg(struct e1000_hw *hw);
+static s32  igb_set_master_slave_mode(struct e1000_hw *hw);
 
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] =
@@ -570,6 +571,11 @@
 		hw_dbg("Error committing the PHY changes\n");
 		goto out;
 	}
+	if (phy->type == e1000_phy_i210) {
+		ret_val = igb_set_master_slave_mode(hw);
+		if (ret_val)
+			return ret_val;
+	}
 
 out:
 	return ret_val;
@@ -1213,12 +1219,22 @@
 			goto out;
 
 		if (!link) {
-			if (hw->phy.type != e1000_phy_m88 ||
-			    hw->phy.id == I347AT4_E_PHY_ID ||
-			    hw->phy.id == M88E1112_E_PHY_ID) {
-				hw_dbg("Link taking longer than expected.\n");
-			} else {
+			bool reset_dsp = true;
 
+			switch (hw->phy.id) {
+			case I347AT4_E_PHY_ID:
+			case M88E1112_E_PHY_ID:
+			case I210_I_PHY_ID:
+				reset_dsp = false;
+				break;
+			default:
+				if (hw->phy.type != e1000_phy_m88)
+					reset_dsp = false;
+				break;
+			}
+			if (!reset_dsp)
+				hw_dbg("Link taking longer than expected.\n");
+			else {
 				/*
 				 * We didn't get link.
 				 * Reset the DSP and cross our fingers.
@@ -1243,7 +1259,8 @@
 
 	if (hw->phy.type != e1000_phy_m88 ||
 	    hw->phy.id == I347AT4_E_PHY_ID ||
-	    hw->phy.id == M88E1112_E_PHY_ID)
+	    hw->phy.id == M88E1112_E_PHY_ID ||
+	    hw->phy.id == I210_I_PHY_ID)
 		goto out;
 
 	ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@@ -1441,6 +1458,7 @@
 	u16 phy_data, offset, mask;
 
 	switch (phy->type) {
+	case e1000_phy_i210:
 	case e1000_phy_m88:
 	case e1000_phy_gg82563:
 		offset	= M88E1000_PHY_SPEC_STATUS;
@@ -1476,7 +1494,7 @@
  *
  *  Polarity is determined based on the PHY specific status register.
  **/
-static s32 igb_check_polarity_m88(struct e1000_hw *hw)
+s32 igb_check_polarity_m88(struct e1000_hw *hw)
 {
 	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
@@ -1665,6 +1683,7 @@
 	u16 phy_data, phy_data2, index, default_page, is_cm;
 
 	switch (hw->phy.id) {
+	case I210_I_PHY_ID:
 	case I347AT4_E_PHY_ID:
 		/* Remember the original page select and set it to 7 */
 		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -2129,10 +2148,16 @@
 void igb_power_up_phy_copper(struct e1000_hw *hw)
 {
 	u16 mii_reg = 0;
+	u16 power_reg = 0;
 
 	/* The PHY will retain its settings across a power down/up cycle */
 	hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
 	mii_reg &= ~MII_CR_POWER_DOWN;
+	if (hw->phy.type == e1000_phy_i210) {
+		hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
+		power_reg &= ~GS40G_CS_POWER_DOWN;
+		hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
+	}
 	hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
 }
 
@@ -2146,10 +2171,18 @@
 void igb_power_down_phy_copper(struct e1000_hw *hw)
 {
 	u16 mii_reg = 0;
+	u16 power_reg = 0;
 
 	/* The PHY will retain its settings across a power down/up cycle */
 	hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
 	mii_reg |= MII_CR_POWER_DOWN;
+
+	/* i210 Phy requires an additional bit for power up/down */
+	if (hw->phy.type == e1000_phy_i210) {
+		hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
+		power_reg |= GS40G_CS_POWER_DOWN;
+		hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
+	}
 	hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
 	msleep(1);
 }
@@ -2345,3 +2378,103 @@
 out:
 	return ret_val;
 }
+
+/**
+ *  igb_write_phy_reg_gs40g - Write GS40G PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: lower half is register offset to write to
+ *     upper half is page to use.
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+	u16 page = offset >> GS40G_PAGE_SHIFT;
+
+	offset = offset & GS40G_OFFSET_MASK;
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+	if (ret_val)
+		goto release;
+	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+
+release:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  igb_read_phy_reg_gs40g - Read GS40G  PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: lower half is register offset to read to
+ *     upper half is page to use.
+ *  @data: data to read at register offset
+ *
+ *  Acquires semaphore, if necessary, then reads the data in the PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u16 page = offset >> GS40G_PAGE_SHIFT;
+
+	offset = offset & GS40G_OFFSET_MASK;
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+	if (ret_val)
+		goto release;
+	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+
+release:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  igb_set_master_slave_mode - Setup PHY for Master/slave mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Master/slave mode
+ **/
+static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Resolve Master/Slave mode */
+	ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* load defaults for future use */
+	hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+				   ((phy_data & CR_1000T_MS_VALUE) ?
+				    e1000_ms_force_master :
+				    e1000_ms_force_slave) : e1000_ms_auto;
+
+	switch (hw->phy.ms_type) {
+	case e1000_ms_force_master:
+		phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+		break;
+	case e1000_ms_force_slave:
+		phy_data |= CR_1000T_MS_ENABLE;
+		phy_data &= ~(CR_1000T_MS_VALUE);
+		break;
+	case e1000_ms_auto:
+		phy_data &= ~CR_1000T_MS_ENABLE;
+		/* fall-through */
+	default:
+		break;
+	}
+
+	return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c32ac6..34e4061 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -73,6 +73,9 @@
 s32  igb_get_phy_info_82580(struct e1000_hw *hw);
 s32  igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
 s32  igb_get_cable_length_82580(struct e1000_hw *hw);
+s32  igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_check_polarity_m88(struct e1000_hw *hw);
 
 /* IGP01E1000 Specific Registers */
 #define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
@@ -114,6 +117,13 @@
 /* I82580 PHY Diagnostics Status */
 #define I82580_DSTATUS_CABLE_LENGTH       0x03FC
 #define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT	0xE14
+#define E1000_82580_PM_SPD		0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU		0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU		0x0004 /* For all other states */
+
 /* Enable flexible speed on link-up */
 #define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
 #define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
@@ -133,4 +143,16 @@
 
 #define E1000_CABLE_LENGTH_UNDEFINED      0xFF
 
+/* GS40G - I210 PHY defines */
+#define GS40G_PAGE_SELECT		0x16
+#define GS40G_PAGE_SHIFT		16
+#define GS40G_OFFSET_MASK		0xFFFF
+#define GS40G_PAGE_2			0x20000
+#define GS40G_MAC_REG2			0x15
+#define GS40G_MAC_LB			0x4140
+#define GS40G_MAC_SPEED_1G		0X0006
+#define GS40G_COPPER_SPEC		0x0010
+#define GS40G_CS_POWER_DOWN		0x0002
+#define GS40G_LINE_LB			0x4000
+
 #endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index ccdf36d..35d1e4f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -352,4 +352,18 @@
 #define E1000_O2BGPTC   0x08FE4 /* OS2BMC packets received by BMC */
 #define E1000_O2BSPC    0x0415C /* OS2BMC packets transmitted by host */
 
+#define E1000_SRWR		0x12018  /* Shadow Ram Write Register - RW */
+#define E1000_I210_FLMNGCTL	0x12038
+#define E1000_I210_FLMNGDATA	0x1203C
+#define E1000_I210_FLMNGCNT	0x12040
+
+#define E1000_I210_FLSWCTL	0x12048
+#define E1000_I210_FLSWDATA	0x1204C
+#define E1000_I210_FLSWCNT	0x12050
+
+#define E1000_I210_FLA		0x1201C
+
+#define E1000_INVM_DATA_REG(_n)	(0x12120 + 4*(_n))
+#define E1000_INVM_SIZE		64 /* Number of INVM Data Registers */
+
 #endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8e33bdd..ae6d3f3 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -35,8 +35,8 @@
 #include "e1000_82575.h"
 
 #include <linux/clocksource.h>
-#include <linux/timecompare.h>
 #include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 
@@ -65,10 +65,13 @@
 #define MAX_Q_VECTORS                      8
 
 /* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES                  (adapter->vfs_allocated_count ? 2 : \
-                                           (hw->mac.type > e1000_82575 ? 8 : 4))
+#define IGB_MAX_RX_QUEUES		((adapter->vfs_allocated_count ? 2 : \
+					(hw->mac.type > e1000_82575 ? 8 : 4)))
+#define IGB_MAX_RX_QUEUES_I210             4
+#define IGB_MAX_RX_QUEUES_I211             2
 #define IGB_MAX_TX_QUEUES                  16
-
+#define IGB_MAX_TX_QUEUES_I210             4
+#define IGB_MAX_TX_QUEUES_I211             2
 #define IGB_MAX_VF_MC_ENTRIES              30
 #define IGB_MAX_VF_FUNCTIONS               8
 #define IGB_MAX_VFTA_ENTRIES               128
@@ -328,9 +331,6 @@
 
 	/* OS defined structs */
 	struct pci_dev *pdev;
-	struct cyclecounter cycles;
-	struct timecounter clock;
-	struct timecompare compare;
 	struct hwtstamp_config hwtstamp_config;
 
 	spinlock_t stats64_lock;
@@ -364,6 +364,13 @@
 	u32 wvbr;
 	int node;
 	u32 *shadow_vfta;
+
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info caps;
+	struct delayed_work overflow_work;
+	spinlock_t tmreg_lock;
+	struct cyclecounter cc;
+	struct timecounter tc;
 };
 
 #define IGB_FLAG_HAS_MSI           (1 << 0)
@@ -378,7 +385,6 @@
 #define IGB_DMCTLX_DCFLUSH_DIS     0x80000000  /* Disable DMA Coal Flush */
 
 #define IGB_82576_TSYNC_SHIFT 19
-#define IGB_82580_TSYNC_SHIFT 24
 #define IGB_TS_HDR_LEN        16
 enum e1000_state_t {
 	__IGB_TESTING,
@@ -414,7 +420,15 @@
 extern bool igb_has_link(struct igb_adapter *adapter);
 extern void igb_set_ethtool_ops(struct net_device *);
 extern void igb_power_up_link(struct igb_adapter *);
+#ifdef CONFIG_IGB_PTP
+extern void igb_ptp_init(struct igb_adapter *adapter);
+extern void igb_ptp_remove(struct igb_adapter *adapter);
 
+extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+				   struct skb_shared_hwtstamps *hwtstamps,
+				   u64 systim);
+
+#endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
 	if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e10821a..812d4f9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -335,7 +335,7 @@
 
 static int igb_get_regs_len(struct net_device *netdev)
 {
-#define IGB_REGS_LEN 551
+#define IGB_REGS_LEN 739
 	return IGB_REGS_LEN * sizeof(u32);
 }
 
@@ -552,10 +552,49 @@
 	regs_buff[548] = rd32(E1000_TDFT);
 	regs_buff[549] = rd32(E1000_TDFHS);
 	regs_buff[550] = rd32(E1000_TDFPC);
-	regs_buff[551] = adapter->stats.o2bgptc;
-	regs_buff[552] = adapter->stats.b2ospc;
-	regs_buff[553] = adapter->stats.o2bspc;
-	regs_buff[554] = adapter->stats.b2ogprc;
+
+	if (hw->mac.type > e1000_82580) {
+		regs_buff[551] = adapter->stats.o2bgptc;
+		regs_buff[552] = adapter->stats.b2ospc;
+		regs_buff[553] = adapter->stats.o2bspc;
+		regs_buff[554] = adapter->stats.b2ogprc;
+	}
+
+	if (hw->mac.type != e1000_82576)
+		return;
+	for (i = 0; i < 12; i++)
+		regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
+	for (i = 0; i < 4; i++)
+		regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
+
+	for (i = 0; i < 12; i++)
+		regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
+	for (i = 0; i < 12; i++)
+		regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
 }
 
 static int igb_get_eeprom_len(struct net_device *netdev)
@@ -624,6 +663,9 @@
 	if (eeprom->len == 0)
 		return -EOPNOTSUPP;
 
+	if (hw->mac.type == e1000_i211)
+		return -EOPNOTSUPP;
+
 	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
 		return -EFAULT;
 
@@ -851,6 +893,36 @@
 #define TABLE64_TEST_LO	5
 #define TABLE64_TEST_HI	6
 
+/* i210 reg test */
+static struct igb_reg_test reg_test_i210[] = {
+	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_FCAH,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+	{ E1000_FCT,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+	{ E1000_RDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_RDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	/* RDH is read-only for i210, only test RDT. */
+	{ E1000_RDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_FCRTH,	   0x100, 1,  PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+	{ E1000_FCTTV,	   0x100, 1,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_TIPG,	   0x100, 1,  PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+	{ E1000_TDBAL(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	{ E1000_TDBAH(0),  0x100, 4,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_TDLEN(0),  0x100, 4,  PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	{ E1000_TDT(0),	   0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+	{ E1000_RCTL,	   0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+	{ E1000_TCTL,	   0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+	{ E1000_RA,	   0, 16, TABLE64_TEST_LO,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ E1000_RA,	   0, 16, TABLE64_TEST_HI,
+						0x900FFFFF, 0xFFFFFFFF },
+	{ E1000_MTA,	   0, 128, TABLE32_TEST,
+						0xFFFFFFFF, 0xFFFFFFFF },
+	{ 0, 0, 0, 0, 0 }
+};
+
 /* i350 reg test */
 static struct igb_reg_test reg_test_i350[] = {
 	{ E1000_FCAL,	   0x100, 1,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1073,6 +1145,11 @@
 		test = reg_test_i350;
 		toggle = 0x7FEFF3FF;
 		break;
+	case e1000_i210:
+	case e1000_i211:
+		test = reg_test_i210;
+		toggle = 0x7FEFF3FF;
+		break;
 	case e1000_82580:
 		test = reg_test_82580;
 		toggle = 0x7FEFF3FF;
@@ -1154,23 +1231,13 @@
 
 static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
 {
-	u16 temp;
-	u16 checksum = 0;
-	u16 i;
-
 	*data = 0;
-	/* Read and add up the contents of the EEPROM */
-	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
-		if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
-			*data = 1;
-			break;
-		}
-		checksum += temp;
-	}
 
-	/* If Checksum is not Correct return error else test passed */
-	if ((checksum != (u16) NVM_SUM) && !(*data))
-		*data = 2;
+	/* Validate eeprom on all parts but i211 */
+	if (adapter->hw.mac.type != e1000_i211) {
+		if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
+			*data = 2;
+	}
 
 	return *data;
 }
@@ -1236,6 +1303,8 @@
 		ics_mask = 0x77DCFED5;
 		break;
 	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
 		ics_mask = 0x77DCFED5;
 		break;
 	default:
@@ -1402,23 +1471,35 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_reg = 0;
+	u16 phy_reg = 0;
 
 	hw->mac.autoneg = false;
 
-	if (hw->phy.type == e1000_phy_m88) {
+	switch (hw->phy.type) {
+	case e1000_phy_m88:
 		/* Auto-MDI/MDIX Off */
 		igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
 		/* reset to update Auto-MDI/MDIX */
 		igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
 		/* autoneg off */
 		igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
-	} else if (hw->phy.type == e1000_phy_82580) {
+		break;
+	case e1000_phy_82580:
 		/* enable MII loopback */
 		igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
+		break;
+	case e1000_phy_i210:
+		/* set loopback speed in PHY */
+		igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
+					&phy_reg);
+		phy_reg |= GS40G_MAC_SPEED_1G;
+		igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
+					phy_reg);
+		ctrl_reg = rd32(E1000_CTRL_EXT);
+	default:
+		break;
 	}
 
-	ctrl_reg = rd32(E1000_CTRL);
-
 	/* force 1000, set loopback */
 	igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
 
@@ -1431,7 +1512,7 @@
 		     E1000_CTRL_FD |	 /* Force Duplex to FULL */
 		     E1000_CTRL_SLU);	 /* Set link up enable bit */
 
-	if (hw->phy.type == e1000_phy_m88)
+	if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
 		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
 
 	wr32(E1000_CTRL, ctrl_reg);
@@ -1439,7 +1520,7 @@
 	/* Disable the receiver on the PHY so when a cable is plugged in, the
 	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
 	 */
-	if (hw->phy.type == e1000_phy_m88)
+	if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
 		igb_phy_disable_receiver(adapter);
 
 	udelay(500);
@@ -1704,6 +1785,14 @@
 		*data = 0;
 		goto out;
 	}
+	if ((adapter->hw.mac.type == e1000_i210)
+		|| (adapter->hw.mac.type == e1000_i210)) {
+		dev_err(&adapter->pdev->dev,
+			"Loopback test not supported "
+			"on this part at this time.\n");
+		*data = 0;
+		goto out;
+	}
 	*data = igb_setup_desc_rings(adapter);
 	if (*data)
 		goto out;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5ec3159..dd3bfe8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
 #include "igb.h"
 
 #define MAJ 3
-#define MIN 2
-#define BUILD 10
+#define MIN 4
+#define BUILD 7
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -75,6 +75,11 @@
 };
 
 static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
@@ -114,7 +119,6 @@
 static void igb_setup_mrqc(struct igb_adapter *);
 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
 static void __devexit igb_remove(struct pci_dev *pdev);
-static void igb_init_hw_timer(struct igb_adapter *adapter);
 static int igb_sw_init(struct igb_adapter *);
 static int igb_open(struct net_device *);
 static int igb_close(struct net_device *);
@@ -565,33 +569,6 @@
 	return;
 }
 
-
-/**
- * igb_read_clock - read raw cycle counter (to be used by time counter)
- */
-static cycle_t igb_read_clock(const struct cyclecounter *tc)
-{
-	struct igb_adapter *adapter =
-		container_of(tc, struct igb_adapter, cycles);
-	struct e1000_hw *hw = &adapter->hw;
-	u64 stamp = 0;
-	int shift = 0;
-
-	/*
-	 * The timestamp latches on lowest register read. For the 82580
-	 * the lowest register is SYSTIMR instead of SYSTIML.  However we never
-	 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
-	 */
-	if (hw->mac.type >= e1000_82580) {
-		stamp = rd32(E1000_SYSTIMR) >> 8;
-		shift = IGB_82580_TSYNC_SHIFT;
-	}
-
-	stamp |= (u64)rd32(E1000_SYSTIML) << shift;
-	stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
-	return stamp;
-}
-
 /**
  * igb_get_hw_dev - return device
  * used by hardware layer to print debugging information
@@ -669,6 +646,8 @@
 	case e1000_82575:
 	case e1000_82580:
 	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
 	default:
 		for (; i < adapter->num_rx_queues; i++)
 			adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -755,8 +734,11 @@
 		if (adapter->hw.mac.type >= e1000_82576)
 			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
 
-		/* On i350, loopback VLAN packets have the tag byte-swapped. */
-		if (adapter->hw.mac.type == e1000_i350)
+		/*
+		 * On i350, i210, and i211, loopback VLAN packets
+		 * have the tag byte-swapped.
+		 * */
+		if (adapter->hw.mac.type >= e1000_i350)
 			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
 
 		adapter->rx_ring[i] = ring;
@@ -850,6 +832,8 @@
 		break;
 	case e1000_82580:
 	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
 		/*
 		 * On 82580 and newer adapters the scheme is similar to 82576
 		 * however instead of ordering column-major we have things
@@ -916,6 +900,8 @@
 	case e1000_82576:
 	case e1000_82580:
 	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
 		/* Turn on MSI-X capability first, or our settings
 		 * won't stick.  And it will take days to debug. */
 		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -1062,6 +1048,11 @@
 	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
 		numvecs += adapter->num_tx_queues;
 
+	/* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
+	if ((adapter->hw.mac.type == e1000_i210)
+		|| (adapter->hw.mac.type == e1000_i211))
+		numvecs = 4;
+
 	/* store the number of vectors reserved for queues */
 	adapter->num_q_vectors = numvecs;
 
@@ -1069,6 +1060,7 @@
 	numvecs++;
 	adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
 					GFP_KERNEL);
+
 	if (!adapter->msix_entries)
 		goto msi_only;
 
@@ -1111,9 +1103,12 @@
 		adapter->flags |= IGB_FLAG_HAS_MSI;
 out:
 	/* Notify the stack of the (possibly) reduced queue counts. */
+	rtnl_lock();
 	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
-	return netif_set_real_num_rx_queues(adapter->netdev,
-					    adapter->num_rx_queues);
+	err = netif_set_real_num_rx_queues(adapter->netdev,
+		adapter->num_rx_queues);
+	rtnl_unlock();
+	return err;
 }
 
 /**
@@ -1659,6 +1654,8 @@
 		pba &= E1000_RXPBS_SIZE_MASK_82576;
 		break;
 	case e1000_82575:
+	case e1000_i210:
+	case e1000_i211:
 	default:
 		pba = E1000_PBA_34K;
 		break;
@@ -1743,6 +1740,13 @@
 	if (hw->mac.ops.init_hw(hw))
 		dev_err(&pdev->dev, "Hardware Error\n");
 
+	/*
+	 * Flow control settings reset on hardware reset, so guarantee flow
+	 * control is off when forcing speed.
+	 */
+	if (!hw->mac.autoneg)
+		igb_force_mac_fc(hw);
+
 	igb_init_dmac(adapter, pba);
 	if (!netif_running(adapter->netdev))
 		igb_power_down_link(adapter);
@@ -1847,7 +1851,7 @@
 	 */
 	if (pdev->is_virtfn) {
 		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
-		     pci_name(pdev), pdev->vendor, pdev->device);
+			pci_name(pdev), pdev->vendor, pdev->device);
 		return -EINVAL;
 	}
 
@@ -2001,11 +2005,16 @@
 	 * known good starting state */
 	hw->mac.ops.reset_hw(hw);
 
-	/* make sure the NVM is good */
-	if (hw->nvm.ops.validate(hw) < 0) {
-		dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
-		err = -EIO;
-		goto err_eeprom;
+	/*
+	 * make sure the NVM is good , i211 parts have special NVM that
+	 * doesn't contain a checksum
+	 */
+	if (hw->mac.type != e1000_i211) {
+		if (hw->nvm.ops.validate(hw) < 0) {
+			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
+			err = -EIO;
+			goto err_eeprom;
+		}
 	}
 
 	/* copy the MAC address out of the NVM */
@@ -2110,9 +2119,11 @@
 	}
 
 #endif
+#ifdef CONFIG_IGB_PTP
 	/* do hw tstamp init after resetting */
-	igb_init_hw_timer(adapter);
+	igb_ptp_init(adapter);
 
+#endif
 	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
 	/* print bus type/speed/width info */
 	dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2137,6 +2148,8 @@
 		adapter->num_rx_queues, adapter->num_tx_queues);
 	switch (hw->mac.type) {
 	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
 		igb_set_eee_i350(hw);
 		break;
 	default:
@@ -2184,7 +2197,10 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	pm_runtime_get_noresume(&pdev->dev);
+#ifdef CONFIG_IGB_PTP
+	igb_ptp_remove(adapter);
 
+#endif
 	/*
 	 * The watchdog timer may be rescheduled, so explicitly
 	 * disable watchdog from being rescheduled.
@@ -2260,9 +2276,14 @@
 {
 #ifdef CONFIG_PCI_IOV
 	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_hw *hw = &adapter->hw;
 	int old_vfs = igb_find_enabled_vfs(adapter);
 	int i;
 
+	/* Virtualization features not supported on i210 family. */
+	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+		return;
+
 	if (old_vfs) {
 		dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
 			 "max_vfs setting of %d\n", old_vfs, max_vfs);
@@ -2274,6 +2295,7 @@
 
 	adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
 				sizeof(struct vf_data_storage), GFP_KERNEL);
+
 	/* if allocation failed then we do not support SR-IOV */
 	if (!adapter->vf_data) {
 		adapter->vfs_allocated_count = 0;
@@ -2304,112 +2326,6 @@
 }
 
 /**
- * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
- * @adapter: board private structure to initialize
- *
- * igb_init_hw_timer initializes the function pointer and values for the hw
- * timer found in hardware.
- **/
-static void igb_init_hw_timer(struct igb_adapter *adapter)
-{
-	struct e1000_hw *hw = &adapter->hw;
-
-	switch (hw->mac.type) {
-	case e1000_i350:
-	case e1000_82580:
-		memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-		adapter->cycles.read = igb_read_clock;
-		adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-		adapter->cycles.mult = 1;
-		/*
-		 * The 82580 timesync updates the system timer every 8ns by 8ns
-		 * and the value cannot be shifted.  Instead we need to shift
-		 * the registers to generate a 64bit timer value.  As a result
-		 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
-		 * 24 in order to generate a larger value for synchronization.
-		 */
-		adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
-		/* disable system timer temporarily by setting bit 31 */
-		wr32(E1000_TSAUXC, 0x80000000);
-		wrfl();
-
-		/* Set registers so that rollover occurs soon to test this. */
-		wr32(E1000_SYSTIMR, 0x00000000);
-		wr32(E1000_SYSTIML, 0x80000000);
-		wr32(E1000_SYSTIMH, 0x000000FF);
-		wrfl();
-
-		/* enable system timer by clearing bit 31 */
-		wr32(E1000_TSAUXC, 0x0);
-		wrfl();
-
-		timecounter_init(&adapter->clock,
-				 &adapter->cycles,
-				 ktime_to_ns(ktime_get_real()));
-		/*
-		 * Synchronize our NIC clock against system wall clock. NIC
-		 * time stamp reading requires ~3us per sample, each sample
-		 * was pretty stable even under load => only require 10
-		 * samples for each offset comparison.
-		 */
-		memset(&adapter->compare, 0, sizeof(adapter->compare));
-		adapter->compare.source = &adapter->clock;
-		adapter->compare.target = ktime_get_real;
-		adapter->compare.num_samples = 10;
-		timecompare_update(&adapter->compare, 0);
-		break;
-	case e1000_82576:
-		/*
-		 * Initialize hardware timer: we keep it running just in case
-		 * that some program needs it later on.
-		 */
-		memset(&adapter->cycles, 0, sizeof(adapter->cycles));
-		adapter->cycles.read = igb_read_clock;
-		adapter->cycles.mask = CLOCKSOURCE_MASK(64);
-		adapter->cycles.mult = 1;
-		/**
-		 * Scale the NIC clock cycle by a large factor so that
-		 * relatively small clock corrections can be added or
-		 * subtracted at each clock tick. The drawbacks of a large
-		 * factor are a) that the clock register overflows more quickly
-		 * (not such a big deal) and b) that the increment per tick has
-		 * to fit into 24 bits.  As a result we need to use a shift of
-		 * 19 so we can fit a value of 16 into the TIMINCA register.
-		 */
-		adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
-		wr32(E1000_TIMINCA,
-		                (1 << E1000_TIMINCA_16NS_SHIFT) |
-		                (16 << IGB_82576_TSYNC_SHIFT));
-
-		/* Set registers so that rollover occurs soon to test this. */
-		wr32(E1000_SYSTIML, 0x00000000);
-		wr32(E1000_SYSTIMH, 0xFF800000);
-		wrfl();
-
-		timecounter_init(&adapter->clock,
-				 &adapter->cycles,
-				 ktime_to_ns(ktime_get_real()));
-		/*
-		 * Synchronize our NIC clock against system wall clock. NIC
-		 * time stamp reading requires ~3us per sample, each sample
-		 * was pretty stable even under load => only require 10
-		 * samples for each offset comparison.
-		 */
-		memset(&adapter->compare, 0, sizeof(adapter->compare));
-		adapter->compare.source = &adapter->clock;
-		adapter->compare.target = ktime_get_real;
-		adapter->compare.num_samples = 10;
-		timecompare_update(&adapter->compare, 0);
-		break;
-	case e1000_82575:
-		/* 82575 does not support timesync */
-	default:
-		break;
-	}
-
-}
-
-/**
  * igb_sw_init - Initialize general software structures (struct igb_adapter)
  * @adapter: board private structure to initialize
  *
@@ -2454,11 +2370,28 @@
 		} else
 			adapter->vfs_allocated_count = max_vfs;
 		break;
+	case e1000_i210:
+	case e1000_i211:
+		adapter->vfs_allocated_count = 0;
+		break;
 	default:
 		break;
 	}
 #endif /* CONFIG_PCI_IOV */
-	adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+	switch (hw->mac.type) {
+	case e1000_i210:
+		adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
+			num_online_cpus());
+		break;
+	case e1000_i211:
+		adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
+			num_online_cpus());
+		break;
+	default:
+		adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
+		num_online_cpus());
+		break;
+	}
 	/* i350 cannot do RSS and SR-IOV at the same time */
 	if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
 		adapter->rss_queues = 1;
@@ -2488,7 +2421,7 @@
 	/* Explicitly disable IRQ since the NIC can be in any state. */
 	igb_irq_disable(adapter);
 
-	if (hw->mac.type == e1000_i350)
+	if (hw->mac.type >= e1000_i350)
 		adapter->flags &= ~IGB_FLAG_DMAC;
 
 	set_bit(__IGB_DOWN, &adapter->state);
@@ -2771,8 +2704,6 @@
 
 	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
 	wr32(E1000_TXDCTL(reg_idx), txdctl);
-
-	netdev_tx_reset_queue(txring_txq(ring));
 }
 
 /**
@@ -2943,6 +2874,17 @@
 
 	/* Don't need to set TUOFL or IPOFL, they default to 1 */
 	wr32(E1000_RXCSUM, rxcsum);
+	/*
+	 * Generate RSS hash based on TCP port numbers and/or
+	 * IPv4/v6 src and dst addresses since UDP cannot be
+	 * hashed reliably due to IP fragmentation
+	 */
+
+	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
+	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
+	       E1000_MRQC_RSS_FIELD_IPV6 |
+	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
+	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
 
 	/* If VMDq is enabled then we set the appropriate mode for that, else
 	 * we default to RSS so that an RSS hash is calculated per packet even
@@ -2958,25 +2900,15 @@
 			wr32(E1000_VT_CTL, vtctl);
 		}
 		if (adapter->rss_queues > 1)
-			mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+			mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
 		else
-			mrqc = E1000_MRQC_ENABLE_VMDQ;
+			mrqc |= E1000_MRQC_ENABLE_VMDQ;
 	} else {
-		mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+		if (hw->mac.type != e1000_i211)
+			mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
 	}
 	igb_vmm_control(adapter);
 
-	/*
-	 * Generate RSS hash based on TCP port numbers and/or
-	 * IPv4/v6 src and dst addresses since UDP cannot be
-	 * hashed reliably due to IP fragmentation
-	 */
-	mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
-		E1000_MRQC_RSS_FIELD_IPV4_TCP |
-		E1000_MRQC_RSS_FIELD_IPV6 |
-		E1000_MRQC_RSS_FIELD_IPV6_TCP |
-		E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-
 	wr32(E1000_MRQC, mrqc);
 }
 
@@ -3282,6 +3214,8 @@
 		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
 	}
 
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+
 	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
 	memset(tx_ring->tx_buffer_info, 0, size);
 
@@ -3576,7 +3510,7 @@
 	 * we will have issues with VLAN tag stripping not being done for frames
 	 * that are only arriving because we are the default pool
 	 */
-	if (hw->mac.type < e1000_82576)
+	if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
 		return;
 
 	vmolr |= rd32(E1000_VMOLR(vfn)) &
@@ -3673,7 +3607,7 @@
 	bool ret = false;
 	u32 ctrl_ext, thstat;
 
-	/* check for thermal sensor event on i350, copper only */
+	/* check for thermal sensor event on i350 copper only */
 	if (hw->mac.type == e1000_i350) {
 		thstat = rd32(E1000_THSTAT);
 		ctrl_ext = rd32(E1000_CTRL_EXT);
@@ -5718,35 +5652,7 @@
 	return 0;
 }
 
-/**
- * igb_systim_to_hwtstamp - convert system time value to hw timestamp
- * @adapter: board private structure
- * @shhwtstamps: timestamp structure to update
- * @regval: unsigned 64bit system time value.
- *
- * We need to convert the system time value stored in the RX/TXSTMP registers
- * into a hwtstamp which can be used by the upper level timestamping functions
- */
-static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
-                                   struct skb_shared_hwtstamps *shhwtstamps,
-                                   u64 regval)
-{
-	u64 ns;
-
-	/*
-	 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
-	 * 24 to match clock shift we setup earlier.
-	 */
-	if (adapter->hw.mac.type >= e1000_82580)
-		regval <<= IGB_82580_TSYNC_SHIFT;
-
-	ns = timecounter_cyc2time(&adapter->clock, regval);
-	timecompare_update(&adapter->compare, ns);
-	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
-	shhwtstamps->hwtstamp = ns_to_ktime(ns);
-	shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
-}
-
+#ifdef CONFIG_IGB_PTP
 /**
  * igb_tx_hwtstamp - utility function which checks for TX time stamp
  * @q_vector: pointer to q_vector containing needed info
@@ -5776,6 +5682,7 @@
 	skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
 }
 
+#endif
 /**
  * igb_clean_tx_irq - Reclaim resources after transmit completes
  * @q_vector: pointer to q_vector containing needed info
@@ -5819,9 +5726,11 @@
 		total_bytes += tx_buffer->bytecount;
 		total_packets += tx_buffer->gso_segs;
 
+#ifdef CONFIG_IGB_PTP
 		/* retrieve hardware timestamp */
 		igb_tx_hwtstamp(q_vector, tx_buffer);
 
+#endif
 		/* free the skb */
 		dev_kfree_skb_any(tx_buffer->skb);
 		tx_buffer->skb = NULL;
@@ -5993,6 +5902,7 @@
 		skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 }
 
+#ifdef CONFIG_IGB_PTP
 static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
 			    union e1000_adv_rx_desc *rx_desc,
 			    struct sk_buff *skb)
@@ -6032,6 +5942,7 @@
 	igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
 }
 
+#endif
 static void igb_rx_vlan(struct igb_ring *ring,
 			union e1000_adv_rx_desc *rx_desc,
 			struct sk_buff *skb)
@@ -6142,7 +6053,9 @@
 			goto next_desc;
 		}
 
+#ifdef CONFIG_IGB_PTP
 		igb_rx_hwtstamp(q_vector, rx_desc, skb);
+#endif
 		igb_rx_hash(rx_ring, rx_desc, skb);
 		igb_rx_checksum(rx_ring, rx_desc, skb);
 		igb_rx_vlan(rx_ring, rx_desc, skb);
@@ -6796,18 +6709,7 @@
 	pci_enable_wake(pdev, PCI_D3hot, 0);
 	pci_enable_wake(pdev, PCI_D3cold, 0);
 
-	if (!rtnl_is_locked()) {
-		/*
-		 * shut up ASSERT_RTNL() warning in
-		 * netif_set_real_num_tx/rx_queues.
-		 */
-		rtnl_lock();
-		err = igb_init_interrupt_scheme(adapter);
-		rtnl_unlock();
-	} else {
-		err = igb_init_interrupt_scheme(adapter);
-	}
-	if (err) {
+	if (igb_init_interrupt_scheme(adapter)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
@@ -7170,6 +7072,8 @@
 
 	switch (hw->mac.type) {
 	case e1000_82575:
+	case e1000_i210:
+	case e1000_i211:
 	default:
 		/* replication is not supported for 82575 */
 		return;
@@ -7243,6 +7147,9 @@
 
 			/* watchdog timer= +-1000 usec in 32usec intervals */
 			reg |= (1000 >> 5);
+
+			/* Disable BMC-to-OS Watchdog Enable */
+			reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
 			wr32(E1000_DMACR, reg);
 
 			/*
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
new file mode 100644
index 0000000..d5ee7fa
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -0,0 +1,385 @@
+/*
+ * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
+ *
+ * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "igb.h"
+
+#define INCVALUE_MASK		0x7fffffff
+#define ISGN			0x80000000
+
+/*
+ * The 82580 timesync updates the system timer every 8ns by 8ns,
+ * and this update value cannot be reprogrammed.
+ *
+ * Neither the 82576 nor the 82580 offer registers wide enough to hold
+ * nanoseconds time values for very long. For the 82580, SYSTIM always
+ * counts nanoseconds, but the upper 24 bits are not availible. The
+ * frequency is adjusted by changing the 32 bit fractional nanoseconds
+ * register, TIMINCA.
+ *
+ * For the 82576, the SYSTIM register time unit is affect by the
+ * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
+ * field are needed to provide the nominal 16 nanosecond period,
+ * leaving 19 bits for fractional nanoseconds.
+ *
+ * We scale the NIC clock cycle by a large factor so that relatively
+ * small clock corrections can be added or subtracted at each clock
+ * tick. The drawbacks of a large factor are a) that the clock
+ * register overflows more quickly (not such a big deal) and b) that
+ * the increment per tick has to fit into 24 bits.  As a result we
+ * need to use a shift of 19 so we can fit a value of 16 into the
+ * TIMINCA register.
+ *
+ *
+ *             SYSTIMH            SYSTIML
+ *        +--------------+   +---+---+------+
+ *  82576 |      32      |   | 8 | 5 |  19  |
+ *        +--------------+   +---+---+------+
+ *         \________ 45 bits _______/  fract
+ *
+ *        +----------+---+   +--------------+
+ *  82580 |    24    | 8 |   |      32      |
+ *        +----------+---+   +--------------+
+ *          reserved  \______ 40 bits _____/
+ *
+ *
+ * The 45 bit 82576 SYSTIM overflows every
+ *   2^45 * 10^-9 / 3600 = 9.77 hours.
+ *
+ * The 40 bit 82580 SYSTIM overflows every
+ *   2^40 * 10^-9 /  60  = 18.3 minutes.
+ */
+
+#define IGB_OVERFLOW_PERIOD	(HZ * 60 * 9)
+#define INCPERIOD_82576		(1 << E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK	((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
+#define INCVALUE_82576		(16 << IGB_82576_TSYNC_SHIFT)
+#define IGB_NBITS_82580		40
+
+/*
+ * SYSTIM read access for the 82576
+ */
+
+static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
+{
+	u64 val;
+	u32 lo, hi;
+	struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+	struct e1000_hw *hw = &igb->hw;
+
+	lo = rd32(E1000_SYSTIML);
+	hi = rd32(E1000_SYSTIMH);
+
+	val = ((u64) hi) << 32;
+	val |= lo;
+
+	return val;
+}
+
+/*
+ * SYSTIM read access for the 82580
+ */
+
+static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
+{
+	u64 val;
+	u32 lo, hi, jk;
+	struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
+	struct e1000_hw *hw = &igb->hw;
+
+	/*
+	 * The timestamp latches on lowest register read. For the 82580
+	 * the lowest register is SYSTIMR instead of SYSTIML.  However we only
+	 * need to provide nanosecond resolution, so we just ignore it.
+	 */
+	jk = rd32(E1000_SYSTIMR);
+	lo = rd32(E1000_SYSTIML);
+	hi = rd32(E1000_SYSTIMH);
+
+	val = ((u64) hi) << 32;
+	val |= lo;
+
+	return val;
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	u64 rate;
+	u32 incvalue;
+	int neg_adj = 0;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+	struct e1000_hw *hw = &igb->hw;
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+	rate = ppb;
+	rate <<= 14;
+	rate = div_u64(rate, 1953125);
+
+	incvalue = 16 << IGB_82576_TSYNC_SHIFT;
+
+	if (neg_adj)
+		incvalue -= rate;
+	else
+		incvalue += rate;
+
+	wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
+
+	return 0;
+}
+
+static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	u64 rate;
+	u32 inca;
+	int neg_adj = 0;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+	struct e1000_hw *hw = &igb->hw;
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+	rate = ppb;
+	rate <<= 26;
+	rate = div_u64(rate, 1953125);
+
+	inca = rate & INCVALUE_MASK;
+	if (neg_adj)
+		inca |= ISGN;
+
+	wr32(E1000_TIMINCA, inca);
+
+	return 0;
+}
+
+static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	s64 now;
+	unsigned long flags;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+	spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+	now = timecounter_read(&igb->tc);
+	now += delta;
+	timecounter_init(&igb->tc, &igb->cc, now);
+
+	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+	return 0;
+}
+
+static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	u64 ns;
+	u32 remainder;
+	unsigned long flags;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+	spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+	ns = timecounter_read(&igb->tc);
+
+	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+	ts->tv_nsec = remainder;
+
+	return 0;
+}
+
+static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
+{
+	u64 ns;
+	unsigned long flags;
+	struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
+
+	ns = ts->tv_sec * 1000000000ULL;
+	ns += ts->tv_nsec;
+
+	spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+	timecounter_init(&igb->tc, &igb->cc, ns);
+
+	spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+	return 0;
+}
+
+static int ptp_82576_enable(struct ptp_clock_info *ptp,
+			    struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static int ptp_82580_enable(struct ptp_clock_info *ptp,
+			    struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+static void igb_overflow_check(struct work_struct *work)
+{
+	struct timespec ts;
+	struct igb_adapter *igb =
+		container_of(work, struct igb_adapter, overflow_work.work);
+
+	igb_gettime(&igb->caps, &ts);
+
+	pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+
+	schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
+}
+
+void igb_ptp_init(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	switch (hw->mac.type) {
+	case e1000_i210:
+	case e1000_i211:
+	case e1000_i350:
+	case e1000_82580:
+		adapter->caps.owner	= THIS_MODULE;
+		strcpy(adapter->caps.name, "igb-82580");
+		adapter->caps.max_adj	= 62499999;
+		adapter->caps.n_ext_ts	= 0;
+		adapter->caps.pps	= 0;
+		adapter->caps.adjfreq	= ptp_82580_adjfreq;
+		adapter->caps.adjtime	= igb_adjtime;
+		adapter->caps.gettime	= igb_gettime;
+		adapter->caps.settime	= igb_settime;
+		adapter->caps.enable	= ptp_82580_enable;
+		adapter->cc.read	= igb_82580_systim_read;
+		adapter->cc.mask	= CLOCKSOURCE_MASK(IGB_NBITS_82580);
+		adapter->cc.mult	= 1;
+		adapter->cc.shift	= 0;
+		/* Enable the timer functions by clearing bit 31. */
+		wr32(E1000_TSAUXC, 0x0);
+		break;
+
+	case e1000_82576:
+		adapter->caps.owner	= THIS_MODULE;
+		strcpy(adapter->caps.name, "igb-82576");
+		adapter->caps.max_adj	= 1000000000;
+		adapter->caps.n_ext_ts	= 0;
+		adapter->caps.pps	= 0;
+		adapter->caps.adjfreq	= ptp_82576_adjfreq;
+		adapter->caps.adjtime	= igb_adjtime;
+		adapter->caps.gettime	= igb_gettime;
+		adapter->caps.settime	= igb_settime;
+		adapter->caps.enable	= ptp_82576_enable;
+		adapter->cc.read	= igb_82576_systim_read;
+		adapter->cc.mask	= CLOCKSOURCE_MASK(64);
+		adapter->cc.mult	= 1;
+		adapter->cc.shift	= IGB_82576_TSYNC_SHIFT;
+		/* Dial the nominal frequency. */
+		wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+		break;
+
+	default:
+		adapter->ptp_clock = NULL;
+		return;
+	}
+
+	wrfl();
+
+	timecounter_init(&adapter->tc, &adapter->cc,
+			 ktime_to_ns(ktime_get_real()));
+
+	INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
+
+	spin_lock_init(&adapter->tmreg_lock);
+
+	schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
+
+	adapter->ptp_clock = ptp_clock_register(&adapter->caps);
+	if (IS_ERR(adapter->ptp_clock)) {
+		adapter->ptp_clock = NULL;
+		dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
+	} else
+		dev_info(&adapter->pdev->dev, "added PHC on %s\n",
+			 adapter->netdev->name);
+}
+
+void igb_ptp_remove(struct igb_adapter *adapter)
+{
+	cancel_delayed_work_sync(&adapter->overflow_work);
+
+	if (adapter->ptp_clock) {
+		ptp_clock_unregister(adapter->ptp_clock);
+		dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
+			 adapter->netdev->name);
+	}
+}
+
+/**
+ * igb_systim_to_hwtstamp - convert system time value to hw timestamp
+ * @adapter: board private structure
+ * @hwtstamps: timestamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * We need to convert the system time value stored in the RX/TXSTMP registers
+ * into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * The 'tmreg_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two (or three) 32 bit registers. The first
+ * read latches the value. Ditto for writing.
+ *
+ * In addition, here have extended the system time with an overflow
+ * counter in software.
+ **/
+void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
+			    struct skb_shared_hwtstamps *hwtstamps,
+			    u64 systim)
+{
+	u64 ns;
+	unsigned long flags;
+
+	switch (adapter->hw.mac.type) {
+	case e1000_i210:
+	case e1000_i211:
+	case e1000_i350:
+	case e1000_82580:
+	case e1000_82576:
+		break;
+	default:
+		return;
+	}
+
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+	ns = timecounter_cyc2time(&adapter->tc, systim);
+
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+	memset(hwtstamps, 0, sizeof(*hwtstamps));
+	hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d61ca2a..8ec74b0 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2731,14 +2731,14 @@
 			netdev->addr_len);
 	}
 
-	if (!is_valid_ether_addr(netdev->perm_addr)) {
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
 		dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
 		        netdev->dev_addr);
 		err = -EIO;
 		goto err_hw_init;
 	}
 
-	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
 
 	setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
 	            (unsigned long) adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 8be1d1b..0bdf06b 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,9 +34,11 @@
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
-              ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
+              ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o
 
 ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
 
+ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
+
 ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 74e1921..3ef3c52 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -36,6 +36,12 @@
 #include <linux/aer.h>
 #include <linux/if_vlan.h>
 
+#ifdef CONFIG_IXGBE_PTP
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif /* CONFIG_IXGBE_PTP */
+
 #include "ixgbe_type.h"
 #include "ixgbe_common.h"
 #include "ixgbe_dcb.h"
@@ -96,6 +102,7 @@
 #define IXGBE_TX_FLAGS_FCOE		(u32)(1 << 5)
 #define IXGBE_TX_FLAGS_FSO		(u32)(1 << 6)
 #define IXGBE_TX_FLAGS_TXSW		(u32)(1 << 7)
+#define IXGBE_TX_FLAGS_TSTAMP		(u32)(1 << 8)
 #define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -331,6 +338,26 @@
 	/* for dynamic allocation of rings associated with this q_vector */
 	struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
 };
+#ifdef CONFIG_IXGBE_HWMON
+
+#define IXGBE_HWMON_TYPE_LOC		0
+#define IXGBE_HWMON_TYPE_TEMP		1
+#define IXGBE_HWMON_TYPE_CAUTION	2
+#define IXGBE_HWMON_TYPE_MAX		3
+
+struct hwmon_attr {
+	struct device_attribute dev_attr;
+	struct ixgbe_hw *hw;
+	struct ixgbe_thermal_diode_data *sensor;
+	char name[12];
+};
+
+struct hwmon_buff {
+	struct device *device;
+	struct hwmon_attr *hwmon_list;
+	unsigned int n_hwmon;
+};
+#endif /* CONFIG_IXGBE_HWMON */
 
 /*
  * microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -438,6 +465,8 @@
 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        (u32)(1 << 7)
 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		(u32)(1 << 8)
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		(u32)(1 << 9)
+#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED	(u32)(1 << 10)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED		(u32)(1 << 11)
 
 	/* Tx fast path data */
 	int num_tx_queues;
@@ -525,6 +554,17 @@
 	u32 interrupt_event;
 	u32 led_reg;
 
+#ifdef CONFIG_IXGBE_PTP
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info ptp_caps;
+	unsigned long last_overflow_check;
+	spinlock_t tmreg_lock;
+	struct cyclecounter cc;
+	struct timecounter tc;
+	u32 base_incval;
+	u32 cycle_speed;
+#endif /* CONFIG_IXGBE_PTP */
+
 	/* SR-IOV */
 	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
 	unsigned int num_vfs;
@@ -535,6 +575,10 @@
 
 	u32 timer_event_accumulator;
 	u32 vferr_refcount;
+	struct kobject *info_kobj;
+#ifdef CONFIG_IXGBE_HWMON
+	struct hwmon_buff ixgbe_hwmon_buff;
+#endif /* CONFIG_IXGBE_HWMON */
 };
 
 struct ixgbe_fdir_filter {
@@ -574,9 +618,6 @@
 extern struct ixgbe_info ixgbe_X540_info;
 #ifdef CONFIG_IXGBE_DCB
 extern const struct dcbnl_rtnl_ops dcbnl_ops;
-extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
-                              struct ixgbe_dcb_config *dst_dcb_cfg,
-                              int tc_max);
 #endif
 
 extern char ixgbe_driver_name[];
@@ -600,6 +641,8 @@
 				   struct ixgbe_ring *);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+			       u16 subdevice_id);
 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
 					 struct ixgbe_adapter *,
@@ -629,10 +672,15 @@
 						 union ixgbe_atr_input *mask);
 extern void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
+extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
 #endif
 extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
 extern void ixgbe_do_reset(struct net_device *netdev);
+#ifdef CONFIG_IXGBE_HWMON
+extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+#endif /* CONFIG_IXGBE_HWMON */
 #ifdef IXGBE_FCOE
 extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
 extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
@@ -663,4 +711,18 @@
 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 }
 
+#ifdef CONFIG_IXGBE_PTP
+extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
+				  struct sk_buff *skb);
+extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+				  struct sk_buff *skb);
+extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
+				    struct ifreq *ifr, int cmd);
+extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+#endif /* CONFIG_IXGBE_PTP */
+
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 85d2e2c..4253733 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -91,29 +91,6 @@
 	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
 }
 
-/**
- *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
- *  @hw: pointer to hardware structure
- *
- *  Read PCIe configuration space, and get the MSI-X vector count from
- *  the capabilities table.
- **/
-static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
-{
-	struct ixgbe_adapter *adapter = hw->back;
-	u16 msix_count;
-	pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
-	                     &msix_count);
-	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
-
-	/* MSI-X count is zero-based in HW, so increment to give proper value */
-	msix_count++;
-
-	return msix_count;
-}
-
-/**
- */
 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
@@ -126,7 +103,7 @@
 	mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
 	mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
 	mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
-	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
 
 	return 0;
 }
@@ -347,24 +324,33 @@
 /**
  *  ixgbe_fc_enable_82598 - Enable flow control
  *  @hw: pointer to hardware structure
- *  @packetbuf_num: packet buffer number (0-7)
  *
  *  Enable flow control according to the current settings.
  **/
-static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
 {
 	s32 ret_val = 0;
 	u32 fctrl_reg;
 	u32 rmcs_reg;
 	u32 reg;
+	u32 fcrtl, fcrth;
 	u32 link_speed = 0;
+	int i;
 	bool link_up;
 
-#ifdef CONFIG_DCB
-	if (hw->fc.requested_mode == ixgbe_fc_pfc)
+	/*
+	 * Validate the water mark configuration for packet buffer 0.  Zero
+	 * water marks indicate that the packet buffer was not configured
+	 * and the watermarks for packet buffer 0 should always be configured.
+	 */
+	if (!hw->fc.low_water ||
+	    !hw->fc.high_water[0] ||
+	    !hw->fc.pause_time) {
+		hw_dbg(hw, "Invalid water mark configuration\n");
+		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
 		goto out;
+	}
 
-#endif /* CONFIG_DCB */
 	/*
 	 * On 82598 having Rx FC on causes resets while doing 1G
 	 * so if it's on turn it off once we know link_speed. For
@@ -386,9 +372,7 @@
 	}
 
 	/* Negotiate the fc mode to use */
-	ret_val = ixgbe_fc_autoneg(hw);
-	if (ret_val == IXGBE_ERR_FLOW_CONTROL)
-		goto out;
+	ixgbe_fc_autoneg(hw);
 
 	/* Disable any previous flow control settings */
 	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -405,9 +389,6 @@
 	 * 2: Tx flow control is enabled (we can send pause frames but
 	 *     we do not support receiving pause frames).
 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-	 * 4: Priority Flow Control is enabled.
-#endif
 	 * other: Invalid.
 	 */
 	switch (hw->fc.current_mode) {
@@ -440,11 +421,6 @@
 		fctrl_reg |= IXGBE_FCTRL_RFCE;
 		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
 		break;
-#ifdef CONFIG_DCB
-	case ixgbe_fc_pfc:
-		goto out;
-		break;
-#endif /* CONFIG_DCB */
 	default:
 		hw_dbg(hw, "Flow control param set incorrectly\n");
 		ret_val = IXGBE_ERR_CONFIG;
@@ -457,29 +433,29 @@
 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
 	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
 
+	fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
+
 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
-	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-		reg = hw->fc.low_water << 6;
-		if (hw->fc.send_xon)
-			reg |= IXGBE_FCRTL_XONE;
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+		    hw->fc.high_water[i]) {
+			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
+		} else {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+		}
 
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
-
-		reg = hw->fc.high_water[packetbuf_num] << 6;
-		reg |= IXGBE_FCRTH_FCEN;
-
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
 	}
 
 	/* Configure pause time (2 TCs per register) */
-	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
-	if ((packetbuf_num & 1) == 0)
-		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
-	else
-		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
-	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
 
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
 out:
 	return ret_val;
@@ -1300,6 +1276,8 @@
 	.set_fw_drv_ver         = NULL,
 	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
 	.release_swfw_sync      = &ixgbe_release_swfw_sync,
+	.get_thermal_sensor_data = NULL,
+	.init_thermal_sensor_thresh = NULL,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 9c14685..dee64d2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -2119,6 +2119,8 @@
 	.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
 	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
 	.release_swfw_sync      = &ixgbe_release_swfw_sync,
+	.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
+	.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
 
 };
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 49aa41f..77ac41f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -47,13 +47,6 @@
 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
 
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
-			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
 static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
 static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
 					     u16 words, u16 *data);
@@ -64,6 +57,172 @@
 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
 
 /**
+ *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ *  control
+ *  @hw: pointer to hardware structure
+ *
+ *  There are several phys that do not support autoneg flow control. This
+ *  function check the device id to see if the associated phy supports
+ *  autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_X540T:
+		return 0;
+	case IXGBE_DEV_ID_82599_T3_LOM:
+		return 0;
+	default:
+		return IXGBE_ERR_FC_NOT_SUPPORTED;
+	}
+}
+
+/**
+ *  ixgbe_setup_fc - Set up flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Called at init time to set up flow control.
+ **/
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 reg = 0, reg_bp = 0;
+	u16 reg_cu = 0;
+
+	/*
+	 * Validate the requested mode.  Strict IEEE mode does not allow
+	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
+	 */
+	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+		hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+		goto out;
+	}
+
+	/*
+	 * 10gig parts do not have a word in the EEPROM to determine the
+	 * default flow control setting, so we explicitly set it to full.
+	 */
+	if (hw->fc.requested_mode == ixgbe_fc_default)
+		hw->fc.requested_mode = ixgbe_fc_full;
+
+	/*
+	 * Set up the 1G and 10G flow control advertisement registers so the
+	 * HW will be able to do fc autoneg once the cable is plugged in.  If
+	 * we link at 10G, the 1G advertisement is harmless and vice versa.
+	 */
+	switch (hw->phy.media_type) {
+	case ixgbe_media_type_fiber:
+	case ixgbe_media_type_backplane:
+		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+		break;
+	case ixgbe_media_type_copper:
+		hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+					MDIO_MMD_AN, &reg_cu);
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * The possible values of fc.requested_mode are:
+	 * 0: Flow control is completely disabled
+	 * 1: Rx flow control is enabled (we can receive pause frames,
+	 *    but not send pause frames).
+	 * 2: Tx flow control is enabled (we can send pause frames but
+	 *    we do not support receiving pause frames).
+	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
+	 * other: Invalid.
+	 */
+	switch (hw->fc.requested_mode) {
+	case ixgbe_fc_none:
+		/* Flow control completely disabled by software override. */
+		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+		if (hw->phy.media_type == ixgbe_media_type_backplane)
+			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+				    IXGBE_AUTOC_ASM_PAUSE);
+		else if (hw->phy.media_type == ixgbe_media_type_copper)
+			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+		break;
+	case ixgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		reg |= IXGBE_PCS1GANA_ASM_PAUSE;
+		reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
+		if (hw->phy.media_type == ixgbe_media_type_backplane) {
+			reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
+			reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
+		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
+			reg_cu |= IXGBE_TAF_ASM_PAUSE;
+			reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
+		}
+		break;
+	case ixgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE, as such we fall
+		 * through to the fc_full statement.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+	case ixgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
+		if (hw->phy.media_type == ixgbe_media_type_backplane)
+			reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
+				  IXGBE_AUTOC_ASM_PAUSE;
+		else if (hw->phy.media_type == ixgbe_media_type_copper)
+			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
+		break;
+	default:
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+		break;
+	}
+
+	if (hw->mac.type != ixgbe_mac_X540) {
+		/*
+		 * Enable auto-negotiation between the MAC & PHY;
+		 * the MAC will advertise clause 37 flow control.
+		 */
+		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+		/* Disable AN timeout */
+		if (hw->fc.strict_ieee)
+			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+		hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+	}
+
+	/*
+	 * AUTOC restart handles negotiation of 1G and 10G on backplane
+	 * and copper. There is no need to set the PCS1GCTL register.
+	 *
+	 */
+	if (hw->phy.media_type == ixgbe_media_type_backplane) {
+		reg_bp |= IXGBE_AUTOC_AN_RESTART;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+		    (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+		hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
+				      MDIO_MMD_AN, reg_cu);
+	}
+
+	hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+out:
+	return ret_val;
+}
+
+/**
  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
  *  @hw: pointer to hardware structure
  *
@@ -95,7 +254,7 @@
 	IXGBE_WRITE_FLUSH(hw);
 
 	/* Setup flow control */
-	ixgbe_setup_fc(hw, 0);
+	ixgbe_setup_fc(hw);
 
 	/* Clear adapter stopped flag */
 	hw->adapter_stopped = false;
@@ -1923,30 +2082,36 @@
 /**
  *  ixgbe_fc_enable_generic - Enable flow control
  *  @hw: pointer to hardware structure
- *  @packetbuf_num: packet buffer number (0-7)
  *
  *  Enable flow control according to the current settings.
  **/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
 {
 	s32 ret_val = 0;
 	u32 mflcn_reg, fccfg_reg;
 	u32 reg;
 	u32 fcrtl, fcrth;
+	int i;
 
-#ifdef CONFIG_DCB
-	if (hw->fc.requested_mode == ixgbe_fc_pfc)
+	/*
+	 * Validate the water mark configuration for packet buffer 0.  Zero
+	 * water marks indicate that the packet buffer was not configured
+	 * and the watermarks for packet buffer 0 should always be configured.
+	 */
+	if (!hw->fc.low_water ||
+	    !hw->fc.high_water[0] ||
+	    !hw->fc.pause_time) {
+		hw_dbg(hw, "Invalid water mark configuration\n");
+		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
 		goto out;
+	}
 
-#endif /* CONFIG_DCB */
 	/* Negotiate the fc mode to use */
-	ret_val = ixgbe_fc_autoneg(hw);
-	if (ret_val == IXGBE_ERR_FLOW_CONTROL)
-		goto out;
+	ixgbe_fc_autoneg(hw);
 
 	/* Disable any previous flow control settings */
 	mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-	mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
+	mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
 
 	fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
 	fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
@@ -1959,9 +2124,6 @@
 	 * 2: Tx flow control is enabled (we can send pause frames but
 	 *    we do not support receiving pause frames).
 	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-	 * 4: Priority Flow Control is enabled.
-#endif
 	 * other: Invalid.
 	 */
 	switch (hw->fc.current_mode) {
@@ -1994,11 +2156,6 @@
 		mflcn_reg |= IXGBE_MFLCN_RFCE;
 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
 		break;
-#ifdef CONFIG_DCB
-	case ixgbe_fc_pfc:
-		goto out;
-		break;
-#endif /* CONFIG_DCB */
 	default:
 		hw_dbg(hw, "Flow control param set incorrectly\n");
 		ret_val = IXGBE_ERR_CONFIG;
@@ -2011,212 +2168,40 @@
 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
 
-	fcrtl = hw->fc.low_water << 10;
+	fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
 
-	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-		fcrth = hw->fc.high_water[packetbuf_num] << 10;
-		fcrth |= IXGBE_FCRTH_FCEN;
-		if (hw->fc.send_xon)
-			fcrtl |= IXGBE_FCRTL_XONE;
-	} else {
-		/*
-		 * If Tx flow control is disabled, set our high water mark
-		 * to Rx FIFO size minus 32 in order prevent Tx switch
-		 * loopback from stalling on DMA.
-		 */
-		fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)) - 32;
+	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+		    hw->fc.high_water[i]) {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+		} else {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+			/*
+			 * In order to prevent Tx hangs when the internal Tx
+			 * switch is enabled we must set the high water mark
+			 * to the maximum FCRTH value.  This allows the Tx
+			 * switch to function even under heavy Rx workloads.
+			 */
+			fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+		}
+
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
 	}
 
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
-
 	/* Configure pause time (2 TCs per register) */
-	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
-	if ((packetbuf_num & 1) == 0)
-		reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
-	else
-		reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
-	IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
 
-	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
 out:
 	return ret_val;
 }
 
 /**
- *  ixgbe_fc_autoneg - Configure flow control
- *  @hw: pointer to hardware structure
- *
- *  Compares our advertised flow control capabilities to those advertised by
- *  our link partner, and determines the proper flow control mode to use.
- **/
-s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
-{
-	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-	ixgbe_link_speed speed;
-	bool link_up;
-
-	if (hw->fc.disable_fc_autoneg)
-		goto out;
-
-	/*
-	 * AN should have completed when the cable was plugged in.
-	 * Look for reasons to bail out.  Bail out if:
-	 * - FC autoneg is disabled, or if
-	 * - link is not up.
-	 *
-	 * Since we're being called from an LSC, link is already known to be up.
-	 * So use link_up_wait_to_complete=false.
-	 */
-	hw->mac.ops.check_link(hw, &speed, &link_up, false);
-	if (!link_up) {
-		ret_val = IXGBE_ERR_FLOW_CONTROL;
-		goto out;
-	}
-
-	switch (hw->phy.media_type) {
-	/* Autoneg flow control on fiber adapters */
-	case ixgbe_media_type_fiber:
-		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
-			ret_val = ixgbe_fc_autoneg_fiber(hw);
-		break;
-
-	/* Autoneg flow control on backplane adapters */
-	case ixgbe_media_type_backplane:
-		ret_val = ixgbe_fc_autoneg_backplane(hw);
-		break;
-
-	/* Autoneg flow control on copper adapters */
-	case ixgbe_media_type_copper:
-		if (ixgbe_device_supports_autoneg_fc(hw) == 0)
-			ret_val = ixgbe_fc_autoneg_copper(hw);
-		break;
-
-	default:
-		break;
-	}
-
-out:
-	if (ret_val == 0) {
-		hw->fc.fc_was_autonegged = true;
-	} else {
-		hw->fc.fc_was_autonegged = false;
-		hw->fc.current_mode = hw->fc.requested_mode;
-	}
-	return ret_val;
-}
-
-/**
- *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
- *  @hw: pointer to hardware structure
- *
- *  Enable flow control according on 1 gig fiber.
- **/
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
-{
-	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
-	s32 ret_val;
-
-	/*
-	 * On multispeed fiber at 1g, bail out if
-	 * - link is up but AN did not complete, or if
-	 * - link is up and AN completed but timed out
-	 */
-
-	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
-		ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-		goto out;
-	}
-
-	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
-
-	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
-			       pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
-			       IXGBE_PCS1GANA_ASM_PAUSE,
-			       IXGBE_PCS1GANA_SYM_PAUSE,
-			       IXGBE_PCS1GANA_ASM_PAUSE);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
- *  @hw: pointer to hardware structure
- *
- *  Enable flow control according to IEEE clause 37.
- **/
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
-{
-	u32 links2, anlp1_reg, autoc_reg, links;
-	s32 ret_val;
-
-	/*
-	 * On backplane, bail out if
-	 * - backplane autoneg was not completed, or if
-	 * - we are 82599 and link partner is not AN enabled
-	 */
-	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
-	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
-		hw->fc.fc_was_autonegged = false;
-		hw->fc.current_mode = hw->fc.requested_mode;
-		ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-		goto out;
-	}
-
-	if (hw->mac.type == ixgbe_mac_82599EB) {
-		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
-		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
-			hw->fc.fc_was_autonegged = false;
-			hw->fc.current_mode = hw->fc.requested_mode;
-			ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
-			goto out;
-		}
-	}
-	/*
-	 * Read the 10g AN autoc and LP ability registers and resolve
-	 * local flow control settings accordingly
-	 */
-	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-
-	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
-		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
-		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
-
-out:
-	return ret_val;
-}
-
-/**
- *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
- *  @hw: pointer to hardware structure
- *
- *  Enable flow control according to IEEE clause 37.
- **/
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
-{
-	u16 technology_ability_reg = 0;
-	u16 lp_technology_ability_reg = 0;
-
-	hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
-			     MDIO_MMD_AN,
-			     &technology_ability_reg);
-	hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
-			     MDIO_MMD_AN,
-			     &lp_technology_ability_reg);
-
-	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
-				  (u32)lp_technology_ability_reg,
-				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
-				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
-}
-
-/**
  *  ixgbe_negotiate_fc - Negotiate flow control
  *  @hw: pointer to hardware structure
  *  @adv_reg: flow control advertised settings
@@ -2266,195 +2251,165 @@
 }
 
 /**
- *  ixgbe_setup_fc - Set up flow control
+ *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
  *  @hw: pointer to hardware structure
  *
- *  Called at init time to set up flow control.
+ *  Enable flow control according on 1 gig fiber.
  **/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
 {
-	s32 ret_val = 0;
-	u32 reg = 0, reg_bp = 0;
-	u16 reg_cu = 0;
-
-#ifdef CONFIG_DCB
-	if (hw->fc.requested_mode == ixgbe_fc_pfc) {
-		hw->fc.current_mode = hw->fc.requested_mode;
-		goto out;
-	}
-
-#endif /* CONFIG_DCB */
-	/* Validate the packetbuf configuration */
-	if (packetbuf_num < 0 || packetbuf_num > 7) {
-		hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
-		       "is 0-7\n", packetbuf_num);
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
+	u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
 
 	/*
-	 * Validate the water mark configuration.  Zero water marks are invalid
-	 * because it causes the controller to just blast out fc packets.
+	 * On multispeed fiber at 1g, bail out if
+	 * - link is up but AN did not complete, or if
+	 * - link is up and AN completed but timed out
 	 */
-	if (!hw->fc.low_water ||
-	    !hw->fc.high_water[packetbuf_num] ||
-	    !hw->fc.pause_time) {
-		hw_dbg(hw, "Invalid water mark configuration\n");
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+
+	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
 		goto out;
-	}
 
-	/*
-	 * Validate the requested mode.  Strict IEEE mode does not allow
-	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
-	 */
-	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
-		hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
-		       "IEEE mode\n");
-		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-		goto out;
-	}
+	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
 
-	/*
-	 * 10gig parts do not have a word in the EEPROM to determine the
-	 * default flow control setting, so we explicitly set it to full.
-	 */
-	if (hw->fc.requested_mode == ixgbe_fc_default)
-		hw->fc.requested_mode = ixgbe_fc_full;
+	ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+			       pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+			       IXGBE_PCS1GANA_ASM_PAUSE,
+			       IXGBE_PCS1GANA_SYM_PAUSE,
+			       IXGBE_PCS1GANA_ASM_PAUSE);
 
-	/*
-	 * Set up the 1G and 10G flow control advertisement registers so the
-	 * HW will be able to do fc autoneg once the cable is plugged in.  If
-	 * we link at 10G, the 1G advertisement is harmless and vice versa.
-	 */
-
-	switch (hw->phy.media_type) {
-	case ixgbe_media_type_fiber:
-	case ixgbe_media_type_backplane:
-		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-		reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-		break;
-
-	case ixgbe_media_type_copper:
-		hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
-					MDIO_MMD_AN, &reg_cu);
-		break;
-
-	default:
-		;
-	}
-
-	/*
-	 * The possible values of fc.requested_mode are:
-	 * 0: Flow control is completely disabled
-	 * 1: Rx flow control is enabled (we can receive pause frames,
-	 *    but not send pause frames).
-	 * 2: Tx flow control is enabled (we can send pause frames but
-	 *    we do not support receiving pause frames).
-	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
-#ifdef CONFIG_DCB
-	 * 4: Priority Flow Control is enabled.
-#endif
-	 * other: Invalid.
-	 */
-	switch (hw->fc.requested_mode) {
-	case ixgbe_fc_none:
-		/* Flow control completely disabled by software override. */
-		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-		if (hw->phy.media_type == ixgbe_media_type_backplane)
-			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
-				    IXGBE_AUTOC_ASM_PAUSE);
-		else if (hw->phy.media_type == ixgbe_media_type_copper)
-			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
-		break;
-	case ixgbe_fc_rx_pause:
-		/*
-		 * Rx Flow control is enabled and Tx Flow control is
-		 * disabled by software override. Since there really
-		 * isn't a way to advertise that we are capable of RX
-		 * Pause ONLY, we will advertise that we support both
-		 * symmetric and asymmetric Rx PAUSE.  Later, we will
-		 * disable the adapter's ability to send PAUSE frames.
-		 */
-		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-		if (hw->phy.media_type == ixgbe_media_type_backplane)
-			reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
-				   IXGBE_AUTOC_ASM_PAUSE);
-		else if (hw->phy.media_type == ixgbe_media_type_copper)
-			reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
-		break;
-	case ixgbe_fc_tx_pause:
-		/*
-		 * Tx Flow control is enabled, and Rx Flow control is
-		 * disabled by software override.
-		 */
-		reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
-		reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
-		if (hw->phy.media_type == ixgbe_media_type_backplane) {
-			reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
-			reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
-		} else if (hw->phy.media_type == ixgbe_media_type_copper) {
-			reg_cu |= (IXGBE_TAF_ASM_PAUSE);
-			reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
-		}
-		break;
-	case ixgbe_fc_full:
-		/* Flow control (both Rx and Tx) is enabled by SW override. */
-		reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
-		if (hw->phy.media_type == ixgbe_media_type_backplane)
-			reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
-				   IXGBE_AUTOC_ASM_PAUSE);
-		else if (hw->phy.media_type == ixgbe_media_type_copper)
-			reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
-		break;
-#ifdef CONFIG_DCB
-	case ixgbe_fc_pfc:
-		goto out;
-		break;
-#endif /* CONFIG_DCB */
-	default:
-		hw_dbg(hw, "Flow control param set incorrectly\n");
-		ret_val = IXGBE_ERR_CONFIG;
-		goto out;
-		break;
-	}
-
-	if (hw->mac.type != ixgbe_mac_X540) {
-		/*
-		 * Enable auto-negotiation between the MAC & PHY;
-		 * the MAC will advertise clause 37 flow control.
-		 */
-		IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
-		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
-
-		/* Disable AN timeout */
-		if (hw->fc.strict_ieee)
-			reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
-
-		IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
-		hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
-	}
-
-	/*
-	 * AUTOC restart handles negotiation of 1G and 10G on backplane
-	 * and copper. There is no need to set the PCS1GCTL register.
-	 *
-	 */
-	if (hw->phy.media_type == ixgbe_media_type_backplane) {
-		reg_bp |= IXGBE_AUTOC_AN_RESTART;
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
-	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-		    (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
-		hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
-				      MDIO_MMD_AN, reg_cu);
-	}
-
-	hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
 out:
 	return ret_val;
 }
 
 /**
+ *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+	u32 links2, anlp1_reg, autoc_reg, links;
+	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+	/*
+	 * On backplane, bail out if
+	 * - backplane autoneg was not completed, or if
+	 * - we are 82599 and link partner is not AN enabled
+	 */
+	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
+		goto out;
+
+	if (hw->mac.type == ixgbe_mac_82599EB) {
+		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
+			goto out;
+	}
+	/*
+	 * Read the 10g AN autoc and LP ability registers and resolve
+	 * local flow control settings accordingly
+	 */
+	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+	ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+		anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+		IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+	u16 technology_ability_reg = 0;
+	u16 lp_technology_ability_reg = 0;
+
+	hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
+			     MDIO_MMD_AN,
+			     &technology_ability_reg);
+	hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
+			     MDIO_MMD_AN,
+			     &lp_technology_ability_reg);
+
+	return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+				  (u32)lp_technology_ability_reg,
+				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+				  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ *  ixgbe_fc_autoneg - Configure flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Compares our advertised flow control capabilities to those advertised by
+ *  our link partner, and determines the proper flow control mode to use.
+ **/
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+{
+	s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+	ixgbe_link_speed speed;
+	bool link_up;
+
+	/*
+	 * AN should have completed when the cable was plugged in.
+	 * Look for reasons to bail out.  Bail out if:
+	 * - FC autoneg is disabled, or if
+	 * - link is not up.
+	 *
+	 * Since we're being called from an LSC, link is already known to be up.
+	 * So use link_up_wait_to_complete=false.
+	 */
+	if (hw->fc.disable_fc_autoneg)
+		goto out;
+
+	hw->mac.ops.check_link(hw, &speed, &link_up, false);
+	if (!link_up)
+		goto out;
+
+	switch (hw->phy.media_type) {
+	/* Autoneg flow control on fiber adapters */
+	case ixgbe_media_type_fiber:
+		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+			ret_val = ixgbe_fc_autoneg_fiber(hw);
+		break;
+
+	/* Autoneg flow control on backplane adapters */
+	case ixgbe_media_type_backplane:
+		ret_val = ixgbe_fc_autoneg_backplane(hw);
+		break;
+
+	/* Autoneg flow control on copper adapters */
+	case ixgbe_media_type_copper:
+		if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+			ret_val = ixgbe_fc_autoneg_copper(hw);
+		break;
+
+	default:
+		break;
+	}
+
+out:
+	if (ret_val == 0) {
+		hw->fc.fc_was_autonegged = true;
+	} else {
+		hw->fc.fc_was_autonegged = false;
+		hw->fc.current_mode = hw->fc.requested_mode;
+	}
+}
+
+/**
  *  ixgbe_disable_pcie_master - Disable PCI-express master access
  *  @hw: pointer to hardware structure
  *
@@ -2606,7 +2561,7 @@
 			break;
 		else
 			/* Use interrupt-safe sleep just in case */
-			udelay(10);
+			udelay(1000);
 	}
 
 	/* For informational purposes only */
@@ -2783,17 +2738,36 @@
  *  Read PCIe configuration space, and get the MSI-X vector count from
  *  the capabilities table.
  **/
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
 {
 	struct ixgbe_adapter *adapter = hw->back;
-	u16 msix_count;
-	pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
-	                     &msix_count);
+	u16 msix_count = 1;
+	u16 max_msix_count;
+	u16 pcie_offset;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
+		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
+		max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+		break;
+	default:
+		return msix_count;
+	}
+
+	pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
 	msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
 
-	/* MSI-X count is zero-based in HW, so increment to give proper value */
+	/* MSI-X count is zero-based in HW */
 	msix_count++;
 
+	if (msix_count > max_msix_count)
+		msix_count = max_msix_count;
+
 	return msix_count;
 }
 
@@ -3203,28 +3177,6 @@
 }
 
 /**
- *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
- *  control
- *  @hw: pointer to hardware structure
- *
- *  There are several phys that do not support autoneg flow control. This
- *  function check the device id to see if the associated phy supports
- *  autoneg flow control.
- **/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
-{
-
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_X540T:
-		return 0;
-	case IXGBE_DEV_ID_82599_T3_LOM:
-		return 0;
-	default:
-		return IXGBE_ERR_FC_NOT_SUPPORTED;
-	}
-}
-
-/**
  *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
  *  @hw: pointer to hardware structure
  *  @enable: enable or disable switch for anti-spoofing
@@ -3585,3 +3537,172 @@
 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 }
+
+static const u8 ixgbe_emc_temp_data[4] = {
+	IXGBE_EMC_INTERNAL_DATA,
+	IXGBE_EMC_DIODE1_DATA,
+	IXGBE_EMC_DIODE2_DATA,
+	IXGBE_EMC_DIODE3_DATA
+};
+static const u8 ixgbe_emc_therm_limit[4] = {
+	IXGBE_EMC_INTERNAL_THERM_LIMIT,
+	IXGBE_EMC_DIODE1_THERM_LIMIT,
+	IXGBE_EMC_DIODE2_THERM_LIMIT,
+	IXGBE_EMC_DIODE3_THERM_LIMIT
+};
+
+/**
+ *  ixgbe_get_ets_data - Extracts the ETS bit data
+ *  @hw: pointer to hardware structure
+ *  @ets_cfg: extected ETS data
+ *  @ets_offset: offset of ETS data
+ *
+ *  Returns error code.
+ **/
+static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
+			      u16 *ets_offset)
+{
+	s32 status = 0;
+
+	status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
+	if (status)
+		goto out;
+
+	if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
+		status = IXGBE_NOT_IMPLEMENTED;
+		goto out;
+	}
+
+	status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
+	if (status)
+		goto out;
+
+	if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
+		status = IXGBE_NOT_IMPLEMENTED;
+		goto out;
+	}
+
+out:
+	return status;
+}
+
+/**
+ *  ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the thermal sensor data structure
+ **/
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+{
+	s32 status = 0;
+	u16 ets_offset;
+	u16 ets_cfg;
+	u16 ets_sensor;
+	u8  num_sensors;
+	u8  i;
+	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	/* Only support thermal sensors attached to physical port 0 */
+	if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
+		status = IXGBE_NOT_IMPLEMENTED;
+		goto out;
+	}
+
+	status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
+	if (status)
+		goto out;
+
+	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+	if (num_sensors > IXGBE_MAX_SENSORS)
+		num_sensors = IXGBE_MAX_SENSORS;
+
+	for (i = 0; i < num_sensors; i++) {
+		u8  sensor_index;
+		u8  sensor_location;
+
+		status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
+					     &ets_sensor);
+		if (status)
+			goto out;
+
+		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+				IXGBE_ETS_DATA_INDEX_SHIFT);
+		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+				   IXGBE_ETS_DATA_LOC_SHIFT);
+
+		if (sensor_location != 0) {
+			status = hw->phy.ops.read_i2c_byte(hw,
+					ixgbe_emc_temp_data[sensor_index],
+					IXGBE_I2C_THERMAL_SENSOR_ADDR,
+					&data->sensor[i].temp);
+			if (status)
+				goto out;
+		}
+	}
+out:
+	return status;
+}
+
+/**
+ * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Inits the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+{
+	s32 status = 0;
+	u16 ets_offset;
+	u16 ets_cfg;
+	u16 ets_sensor;
+	u8  low_thresh_delta;
+	u8  num_sensors;
+	u8  therm_limit;
+	u8  i;
+	struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
+
+	/* Only support thermal sensors attached to physical port 0 */
+	if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
+		status = IXGBE_NOT_IMPLEMENTED;
+		goto out;
+	}
+
+	status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
+	if (status)
+		goto out;
+
+	low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
+			     IXGBE_ETS_LTHRES_DELTA_SHIFT);
+	num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
+	if (num_sensors > IXGBE_MAX_SENSORS)
+		num_sensors = IXGBE_MAX_SENSORS;
+
+	for (i = 0; i < num_sensors; i++) {
+		u8  sensor_index;
+		u8  sensor_location;
+
+		hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
+		sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
+				IXGBE_ETS_DATA_INDEX_SHIFT);
+		sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
+				   IXGBE_ETS_DATA_LOC_SHIFT);
+		therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
+
+		hw->phy.ops.write_i2c_byte(hw,
+			ixgbe_emc_therm_limit[sensor_index],
+			IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
+
+		if (sensor_location == 0)
+			continue;
+
+		data->sensor[i].location = sensor_location;
+		data->sensor[i].caution_thresh = therm_limit;
+		data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
+	}
+out:
+	return status;
+}
+
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 204f062..6222fdb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -31,7 +31,7 @@
 #include "ixgbe_type.h"
 #include "ixgbe.h"
 
-u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -77,8 +77,8 @@
 s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
-s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -107,6 +107,19 @@
 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
 			     u32 headroom, int strategy);
 
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
+#define IXGBE_EMC_INTERNAL_DATA		0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT	0x20
+#define IXGBE_EMC_DIODE1_DATA		0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT	0x19
+#define IXGBE_EMC_DIODE2_DATA		0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT	0x1A
+#define IXGBE_EMC_DIODE3_DATA		0x2A
+#define IXGBE_EMC_DIODE3_THERM_LIMIT	0x30
+
+s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+
 #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
 
 #ifndef writeq
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index d3695ed..87592b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -191,54 +191,47 @@
  */
 s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
 {
-	u32 reg;
+	u32 fcrtl, reg;
 	u8  i;
 
-	if (pfc_en) {
-		/* Enable Transmit Priority Flow Control */
-		reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
-		reg &= ~IXGBE_RMCS_TFCE_802_3X;
-		/* correct the reporting of our flow control status */
-		reg |= IXGBE_RMCS_TFCE_PRIORITY;
-		IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+	/* Enable Transmit Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	reg &= ~IXGBE_RMCS_TFCE_802_3X;
+	reg |= IXGBE_RMCS_TFCE_PRIORITY;
+	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
 
-		/* Enable Receive Priority Flow Control */
-		reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-		reg &= ~IXGBE_FCTRL_RFCE;
+	/* Enable Receive Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
+
+	if (pfc_en)
 		reg |= IXGBE_FCTRL_RPFCE;
-		IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
 
-		/* Configure pause time */
-		for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
-			IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
 
-		/* Configure flow control refresh threshold value */
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
-	}
-
-	/*
-	 * Configure flow control thresholds and enable priority flow control
-	 * for each traffic class.
-	 */
+	fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
+	/* Configure PFC Tx thresholds per TC */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		int enabled = pfc_en & (1 << i);
+		if (!(pfc_en & (1 << i))) {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+			continue;
+		}
 
-		reg = hw->fc.low_water << 10;
-
-		if (enabled == pfc_enabled_tx ||
-		    enabled == pfc_enabled_full)
-			reg |= IXGBE_FCRTL_XONE;
-
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
-
-		reg = hw->fc.high_water[i] << 10;
-		if (enabled == pfc_enabled_tx ||
-		    enabled == pfc_enabled_full)
-			reg |= IXGBE_FCRTH_FCEN;
-
+		reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
 	}
 
+	/* Configure pause time */
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 888a419..4eac80d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -211,24 +211,42 @@
  */
 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
 {
-	u32 i, j, reg;
+	u32 i, j, fcrtl, reg;
 	u8 max_tc = 0;
 
-	for (i = 0; i < MAX_USER_PRIORITY; i++)
+	/* Enable Transmit Priority Flow Control */
+	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
+
+	/* Enable Receive Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+	reg |= IXGBE_MFLCN_DPF;
+
+	/*
+	 * X540 supports per TC Rx priority flow control.  So
+	 * clear all TCs and only enable those that should be
+	 * enabled.
+	 */
+	reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+	if (hw->mac.type == ixgbe_mac_X540)
+		reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+
+	if (pfc_en)
+		reg |= IXGBE_MFLCN_RPFCE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+	for (i = 0; i < MAX_USER_PRIORITY; i++) {
 		if (prio_tc[i] > max_tc)
 			max_tc = prio_tc[i];
+	}
+
+	fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
 
 	/* Configure PFC Tx thresholds per TC */
-	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+	for (i = 0; i <= max_tc; i++) {
 		int enabled = 0;
 
-		if (i > max_tc) {
-			reg = 0;
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
-			continue;
-		}
-
 		for (j = 0; j < MAX_USER_PRIORITY; j++) {
 			if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
 				enabled = 1;
@@ -236,62 +254,30 @@
 			}
 		}
 
-		reg = hw->fc.low_water << 10;
+		if (enabled) {
+			reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+		} else {
+			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+		}
 
-		if (enabled)
-			reg |= IXGBE_FCRTL_XONE;
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
-
-		reg = hw->fc.high_water[i] << 10;
-		if (enabled)
-			reg |= IXGBE_FCRTH_FCEN;
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
 	}
 
-	if (pfc_en) {
-		/* Configure pause time (2 TCs per register) */
-		reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
-		for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
-			IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
-
-		/* Configure flow control refresh threshold value */
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
-
-
-		reg = IXGBE_FCCFG_TFCE_PRIORITY;
-		IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
-		/*
-		 * Enable Receive PFC
-		 * 82599 will always honor XOFF frames we receive when
-		 * we are in PFC mode however X540 only honors enabled
-		 * traffic classes.
-		 */
-		reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-		reg &= ~IXGBE_MFLCN_RFCE;
-		reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
-
-		if (hw->mac.type == ixgbe_mac_X540) {
-			reg &= ~IXGBE_MFLCN_RPFCE_MASK;
-			reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
-		}
-
-		IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
-
-	} else {
-		/* X540 devices have a RX bit that should be cleared
-		 * if PFC is disabled on all TCs but PFC features is
-		 * enabled.
-		 */
-		if (hw->mac.type == ixgbe_mac_X540) {
-			reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-			reg &= ~IXGBE_MFLCN_RPFCE_MASK;
-			IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
-		}
-
-		for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-			hw->mac.ops.fc_enable(hw, i);
+	for (; i < MAX_TRAFFIC_CLASS; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
 	}
 
+	/* Configure pause time (2 TCs per register) */
+	reg = hw->fc.pause_time * 0x00010001;
+	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 652e4b0..5164a21 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -44,18 +44,26 @@
 #define DCB_NO_HW_CHG   1  /* DCB configuration did not change */
 #define DCB_HW_CHG      2  /* DCB configuration changed, no reset */
 
-int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
-		       struct ixgbe_dcb_config *dcfg, int tc_max)
+static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
 {
+	struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg;
+	struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg;
 	struct tc_configuration *src = NULL;
 	struct tc_configuration *dst = NULL;
 	int i, j;
 	int tx = DCB_TX_CONFIG;
 	int rx = DCB_RX_CONFIG;
 	int changes = 0;
+#ifdef IXGBE_FCOE
+	struct dcb_app app = {
+			      .selector = DCB_APP_IDTYPE_ETHTYPE,
+			      .protocol = ETH_P_FCOE,
+			     };
+	u8 up = dcb_getapp(adapter->netdev, &app);
 
-	if (!scfg || !dcfg)
-		return changes;
+	if (up && !(up & (1 << adapter->fcoe.up)))
+		changes |= BIT_APP_UPCHG;
+#endif
 
 	for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
 		src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
@@ -330,60 +338,20 @@
 static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
+	struct ixgbe_hw *hw = &adapter->hw;
 	int ret = DCB_NO_HW_CHG;
 	int i;
-#ifdef IXGBE_FCOE
-	struct dcb_app app = {
-			      .selector = DCB_APP_IDTYPE_ETHTYPE,
-			      .protocol = ETH_P_FCOE,
-			     };
-	u8 up;
-
-	/* In IEEE mode, use the IEEE Ethertype selector value */
-	if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
-		app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
-		up = dcb_ieee_getapp_mask(netdev, &app);
-	} else {
-		up = dcb_getapp(netdev, &app);
-	}
-#endif
 
 	/* Fail command if not in CEE mode */
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
 		return ret;
 
-	adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
-						      &adapter->dcb_cfg,
+	adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
 						      MAX_TRAFFIC_CLASS);
 	if (!adapter->dcb_set_bitmap)
 		return ret;
 
-	if (adapter->dcb_cfg.pfc_mode_enable) {
-		switch (adapter->hw.mac.type) {
-		case ixgbe_mac_82599EB:
-		case ixgbe_mac_X540:
-			if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
-				adapter->last_lfc_mode =
-				                  adapter->hw.fc.current_mode;
-			break;
-		default:
-			break;
-		}
-		adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
-	} else {
-		switch (adapter->hw.mac.type) {
-		case ixgbe_mac_82598EB:
-			adapter->hw.fc.requested_mode = ixgbe_fc_none;
-			break;
-		case ixgbe_mac_82599EB:
-		case ixgbe_mac_X540:
-			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
-			break;
-		default:
-			break;
-		}
-	}
-
 	if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
 		u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
 		u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -396,23 +364,19 @@
 			max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 #endif
 
-		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
-					       max_frame, DCB_TX_CONFIG);
-		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
-					       max_frame, DCB_RX_CONFIG);
+		ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame,
+					       DCB_TX_CONFIG);
+		ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame,
+					       DCB_RX_CONFIG);
 
-		ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
-					DCB_TX_CONFIG, refill);
-		ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
-		ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
-				       DCB_TX_CONFIG, bwg_id);
-		ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
-				      DCB_TX_CONFIG, prio_type);
-		ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
-				     DCB_TX_CONFIG, prio_tc);
+		ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill);
+		ixgbe_dcb_unpack_max(dcb_cfg, max);
+		ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id);
+		ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type);
+		ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc);
 
-		ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
-					bwg_id, prio_type, prio_tc);
+		ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id,
+					prio_type, prio_tc);
 
 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
 			netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
@@ -421,27 +385,34 @@
 	}
 
 	if (adapter->dcb_set_bitmap & BIT_PFC) {
-		u8 pfc_en;
-		u8 prio_tc[MAX_USER_PRIORITY];
+		if (dcb_cfg->pfc_mode_enable) {
+			u8 pfc_en;
+			u8 prio_tc[MAX_USER_PRIORITY];
 
-		ixgbe_dcb_unpack_map(&adapter->dcb_cfg,
-				     DCB_TX_CONFIG, prio_tc);
-		ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
-		ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en, prio_tc);
-		if (ret != DCB_HW_CHG_RST)
-			ret = DCB_HW_CHG;
+			ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc);
+			ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en);
+			ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc);
+		} else {
+			hw->mac.ops.fc_enable(hw);
+		}
+
+		ixgbe_set_rx_drop_en(adapter);
+
+		ret = DCB_HW_CHG;
 	}
 
-	if (adapter->dcb_cfg.pfc_mode_enable)
-		adapter->hw.fc.current_mode = ixgbe_fc_pfc;
-
 #ifdef IXGBE_FCOE
 	/* Reprogam FCoE hardware offloads when the traffic class
 	 * FCoE is using changes. This happens if the APP info
 	 * changes or the up2tc mapping is updated.
 	 */
-	if ((up && !(up & (1 << adapter->fcoe.up))) ||
-	    (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+		struct dcb_app app = {
+				      .selector = DCB_APP_IDTYPE_ETHTYPE,
+				      .protocol = ETH_P_FCOE,
+				     };
+		u8 up = dcb_getapp(netdev, &app);
+
 		adapter->fcoe.up = ffs(up) - 1;
 		ixgbe_dcbnl_devreset(netdev);
 		ret = DCB_HW_CHG_RST;
@@ -650,7 +621,9 @@
 				   struct ieee_pfc *pfc)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	struct ixgbe_hw *hw = &adapter->hw;
 	u8 *prio_tc;
+	int err;
 
 	if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
 		return -EINVAL;
@@ -664,7 +637,16 @@
 
 	prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
 	memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
-	return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
+
+	/* Enable link flow control parameters if PFC is disabled */
+	if (pfc->pfc_en)
+		err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc);
+	else
+		err = hw->mac.ops.fc_enable(hw);
+
+	ixgbe_set_rx_drop_en(adapter);
+
+	return err;
 }
 
 static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 31a2bf7..3178f1e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -391,11 +391,6 @@
 	} else if (hw->fc.current_mode == ixgbe_fc_full) {
 		pause->rx_pause = 1;
 		pause->tx_pause = 1;
-#ifdef CONFIG_DCB
-	} else if (hw->fc.current_mode == ixgbe_fc_pfc) {
-		pause->rx_pause = 0;
-		pause->tx_pause = 0;
-#endif
 	}
 }
 
@@ -404,21 +399,14 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	struct ixgbe_fc_info fc;
+	struct ixgbe_fc_info fc = hw->fc;
 
-#ifdef CONFIG_DCB
-	if (adapter->dcb_cfg.pfc_mode_enable ||
-		((hw->mac.type == ixgbe_mac_82598EB) &&
-		(adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
+	/* 82598 does no support link flow control with DCB enabled */
+	if ((hw->mac.type == ixgbe_mac_82598EB) &&
+	    (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
 		return -EINVAL;
 
-#endif
-	fc = hw->fc;
-
-	if (pause->autoneg != AUTONEG_ENABLE)
-		fc.disable_fc_autoneg = true;
-	else
-		fc.disable_fc_autoneg = false;
+	fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
 
 	if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
 		fc.requested_mode = ixgbe_fc_full;
@@ -426,14 +414,8 @@
 		fc.requested_mode = ixgbe_fc_rx_pause;
 	else if (!pause->rx_pause && pause->tx_pause)
 		fc.requested_mode = ixgbe_fc_tx_pause;
-	else if (!pause->rx_pause && !pause->tx_pause)
-		fc.requested_mode = ixgbe_fc_none;
 	else
-		return -EINVAL;
-
-#ifdef CONFIG_DCB
-	adapter->last_lfc_mode = fc.requested_mode;
-#endif
+		fc.requested_mode = ixgbe_fc_none;
 
 	/* if the thing changed then we'll update and use new autoneg */
 	if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
@@ -1780,6 +1762,8 @@
 		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
 	}
 
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+
 	/* re-map buffers to ring, store next to clean values */
 	ixgbe_alloc_rx_buffers(rx_ring, count);
 	rx_ring->next_to_clean = rx_ntc;
@@ -1969,53 +1953,12 @@
                                struct ethtool_wolinfo *wol)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	int retval = 1;
-	u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+	int retval = 0;
 
-	/* WOL not supported except for the following */
-	switch(hw->device_id) {
-	case IXGBE_DEV_ID_82599_SFP:
-		/* Only these subdevices could supports WOL */
-		switch (hw->subsystem_device_id) {
-		case IXGBE_SUBDEV_ID_82599_560FLR:
-			/* only support first port */
-			if (hw->bus.func != 0) {
-				wol->supported = 0;
-				break;
-			}
-		case IXGBE_SUBDEV_ID_82599_SFP:
-			retval = 0;
-			break;
-		default:
-			wol->supported = 0;
-			break;
-		}
-		break;
-	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-		/* All except this subdevice support WOL */
-		if (hw->subsystem_device_id ==
-		    IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
-			wol->supported = 0;
-			break;
-		}
-		retval = 0;
-		break;
-	case IXGBE_DEV_ID_82599_KX4:
-		retval = 0;
-		break;
-	case IXGBE_DEV_ID_X540T:
-		/* check eeprom to see if enabled wol */
-		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
-		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
-		     (hw->bus.func == 0))) {
-			retval = 0;
-			break;
-		}
-
-		/* All others not supported */
-		wol->supported = 0;
-		break;
-	default:
+	/* WOL not supported for all devices */
+	if (!ixgbe_wol_supported(adapter, hw->device_id,
+				 hw->subsystem_device_id)) {
+		retval = 1;
 		wol->supported = 0;
 	}
 
@@ -2753,6 +2696,46 @@
 	return ret;
 }
 
+static int ixgbe_get_ts_info(struct net_device *dev,
+			     struct ethtool_ts_info *info)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+	switch (adapter->hw.mac.type) {
+#ifdef CONFIG_IXGBE_PTP
+	case ixgbe_mac_X540:
+	case ixgbe_mac_82599EB:
+		info->so_timestamping =
+			SOF_TIMESTAMPING_TX_HARDWARE |
+			SOF_TIMESTAMPING_RX_HARDWARE |
+			SOF_TIMESTAMPING_RAW_HARDWARE;
+
+		if (adapter->ptp_clock)
+			info->phc_index = ptp_clock_index(adapter->ptp_clock);
+		else
+			info->phc_index = -1;
+
+		info->tx_types =
+			(1 << HWTSTAMP_TX_OFF) |
+			(1 << HWTSTAMP_TX_ON);
+
+		info->rx_filters =
+			(1 << HWTSTAMP_FILTER_NONE) |
+			(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+			(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+			(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+			(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+			(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+			(1 << HWTSTAMP_FILTER_SOME);
+		break;
+#endif /* CONFIG_IXGBE_PTP */
+	default:
+		return ethtool_op_get_ts_info(dev, info);
+		break;
+	}
+	return 0;
+}
+
 static const struct ethtool_ops ixgbe_ethtool_ops = {
 	.get_settings           = ixgbe_get_settings,
 	.set_settings           = ixgbe_set_settings,
@@ -2781,6 +2764,7 @@
 	.set_coalesce           = ixgbe_set_coalesce,
 	.get_rxnfc		= ixgbe_get_rxnfc,
 	.set_rxnfc		= ixgbe_set_rxnfc,
+	.get_ts_info		= ixgbe_get_ts_info,
 };
 
 void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 77ea4b7..bc07933 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -437,6 +437,7 @@
 	 */
 	if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
 	    (fctl & FC_FC_END_SEQ)) {
+		skb_linearize(skb);
 		crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
 		crc->fcoe_eof = FC_EOF_T;
 	}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ed1b47d..af1a531 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -523,11 +523,17 @@
 /**
  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
  * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
  * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
  *
  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
  **/
-static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
+static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
+				int v_count, int v_idx,
 				int txr_count, int txr_idx,
 				int rxr_count, int rxr_idx)
 {
@@ -598,7 +604,7 @@
 
 		/* update count and index */
 		txr_count--;
-		txr_idx++;
+		txr_idx += v_count;
 
 		/* push pointer to next ring */
 		ring++;
@@ -641,7 +647,7 @@
 
 		/* update count and index */
 		rxr_count--;
-		rxr_idx++;
+		rxr_idx += v_count;
 
 		/* push pointer to next ring */
 		ring++;
@@ -700,24 +706,23 @@
 		q_vectors = 1;
 
 	if (q_vectors >= (rxr_remaining + txr_remaining)) {
-		for (; rxr_remaining; v_idx++, q_vectors--) {
-			int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
-			err = ixgbe_alloc_q_vector(adapter, v_idx,
-						   0, 0, rqpv, rxr_idx);
+		for (; rxr_remaining; v_idx++) {
+			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
+						   0, 0, 1, rxr_idx);
 
 			if (err)
 				goto err_out;
 
 			/* update counts and index */
-			rxr_remaining -= rqpv;
-			rxr_idx += rqpv;
+			rxr_remaining--;
+			rxr_idx++;
 		}
 	}
 
-	for (; q_vectors; v_idx++, q_vectors--) {
-		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
-		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
-		err = ixgbe_alloc_q_vector(adapter, v_idx,
+	for (; v_idx < q_vectors; v_idx++) {
+		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
 					   tqpv, txr_idx,
 					   rqpv, rxr_idx);
 
@@ -726,9 +731,9 @@
 
 		/* update counts and index */
 		rxr_remaining -= rqpv;
-		rxr_idx += rqpv;
 		txr_remaining -= tqpv;
-		txr_idx += tqpv;
+		rxr_idx++;
+		txr_idx++;
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a7f3cd8..bf20457 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,8 +63,8 @@
 			      "Intel(R) 10 Gigabit Network Connection";
 #endif
 #define MAJ 3
-#define MIN 8
-#define BUILD 21
+#define MIN 9
+#define BUILD 15
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 	__stringify(BUILD) "-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
@@ -133,7 +133,7 @@
 static unsigned int max_vfs;
 module_param(max_vfs, uint, 0);
 MODULE_PARM_DESC(max_vfs,
-		 "Maximum number of virtual functions to allocate per physical function");
+		 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
 #endif /* CONFIG_PCI_IOV */
 
 static unsigned int allow_unsupported_sfp;
@@ -610,35 +610,50 @@
 	/* tx_buffer must be completely set up in the transmit path */
 }
 
+static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_hw_stats *hwstats = &adapter->stats;
+	int i;
+	u32 data;
+
+	if ((hw->fc.current_mode != ixgbe_fc_full) &&
+	    (hw->fc.current_mode != ixgbe_fc_rx_pause))
+		return;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+		break;
+	default:
+		data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+	}
+	hwstats->lxoffrxc += data;
+
+	/* refill credits (no tx hang) if we received xoff */
+	if (!data)
+		return;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		clear_bit(__IXGBE_HANG_CHECK_ARMED,
+			  &adapter->tx_ring[i]->state);
+}
+
 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	struct ixgbe_hw_stats *hwstats = &adapter->stats;
-	u32 data = 0;
 	u32 xoff[8] = {0};
 	int i;
+	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
 
-	if ((hw->fc.current_mode == ixgbe_fc_full) ||
-	    (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
-		switch (hw->mac.type) {
-		case ixgbe_mac_82598EB:
-			data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-			break;
-		default:
-			data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
-		}
-		hwstats->lxoffrxc += data;
+	if (adapter->ixgbe_ieee_pfc)
+		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
 
-		/* refill credits (no tx hang) if we received xoff */
-		if (!data)
-			return;
-
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			clear_bit(__IXGBE_HANG_CHECK_ARMED,
-				  &adapter->tx_ring[i]->state);
+	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
+		ixgbe_update_xoff_rx_lfc(adapter);
 		return;
-	} else if (!(adapter->dcb_cfg.pfc_mode_enable))
-		return;
+	}
 
 	/* update stats for each tc, only valid with PFC enabled */
 	for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
@@ -774,6 +789,13 @@
 		total_bytes += tx_buffer->bytecount;
 		total_packets += tx_buffer->gso_segs;
 
+#ifdef CONFIG_IXGBE_PTP
+		if (unlikely(tx_buffer->tx_flags &
+			     IXGBE_TX_FLAGS_TSTAMP))
+			ixgbe_ptp_tx_hwtstamp(q_vector,
+					      tx_buffer->skb);
+
+#endif
 		/* free the skb */
 		dev_kfree_skb_any(tx_buffer->skb);
 
@@ -1144,7 +1166,7 @@
 	 * there isn't much point in holding memory we can't use
 	 */
 	if (dma_mapping_error(rx_ring->dev, dma)) {
-		put_page(page);
+		__free_pages(page, ixgbe_rx_pg_order(rx_ring));
 		bi->page = NULL;
 
 		rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1374,6 +1396,11 @@
 
 	ixgbe_rx_checksum(rx_ring, rx_desc, skb);
 
+#ifdef CONFIG_IXGBE_PTP
+	if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))
+		ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
+#endif
+
 	if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
 		u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
 		__vlan_hwaccel_put_tag(skb, vid);
@@ -2295,6 +2322,9 @@
 	}
 
 	ixgbe_check_fan_failure(adapter, eicr);
+#ifdef CONFIG_IXGBE_PTP
+	ixgbe_ptp_check_pps_event(adapter, eicr);
+#endif
 
 	/* re-enable the original interrupt state, no lsc, no queues */
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2487,6 +2517,9 @@
 	}
 
 	ixgbe_check_fan_failure(adapter, eicr);
+#ifdef CONFIG_IXGBE_PTP
+	ixgbe_ptp_check_pps_event(adapter, eicr);
+#endif
 
 	/* would disable interrupts here but EIAM disabled it */
 	napi_schedule(&q_vector->napi);
@@ -2671,8 +2704,6 @@
 	/* enable queue */
 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
 
-	netdev_tx_reset_queue(txring_txq(ring));
-
 	/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
 	if (hw->mac.type == ixgbe_mac_82598EB &&
 	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -2758,6 +2789,61 @@
 		ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
 }
 
+static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
+				 struct ixgbe_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u8 reg_idx = ring->reg_idx;
+	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
+
+	srrctl |= IXGBE_SRRCTL_DROP_EN;
+
+	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
+}
+
+static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
+				  struct ixgbe_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u8 reg_idx = ring->reg_idx;
+	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
+
+	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
+
+	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
+}
+
+#ifdef CONFIG_IXGBE_DCB
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
+#else
+static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
+#endif
+{
+	int i;
+	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
+
+	if (adapter->ixgbe_ieee_pfc)
+		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
+
+	/*
+	 * We should set the drop enable bit if:
+	 *  SR-IOV is enabled
+	 *   or
+	 *  Number of Rx queues > 1 and flow control is disabled
+	 *
+	 *  This allows us to avoid head of line blocking for security
+	 *  and performance reasons.
+	 */
+	if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
+	    !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
+	} else {
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
+	}
+}
+
 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
 
 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
@@ -2904,33 +2990,6 @@
 	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
 }
 
-/**
- *  ixgbe_set_uta - Set unicast filter table address
- *  @adapter: board private structure
- *
- *  The unicast table address is a register array of 32-bit registers.
- *  The table is meant to be used in a way similar to how the MTA is used
- *  however due to certain limitations in the hardware it is necessary to
- *  set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
- *  enable bit to allow vlan tag stripping when promiscuous mode is enabled
- **/
-static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
-{
-	struct ixgbe_hw *hw = &adapter->hw;
-	int i;
-
-	/* The UTA table only exists on 82599 hardware and newer */
-	if (hw->mac.type < ixgbe_mac_82599EB)
-		return;
-
-	/* we only need to do this if VMDq is enabled */
-	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
-		return;
-
-	for (i = 0; i < 128; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
-}
-
 #define IXGBE_MAX_RX_DESC_POLL 10
 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
 				       struct ixgbe_ring *ring)
@@ -3216,8 +3275,6 @@
 	/* Program registers for the distribution of queues */
 	ixgbe_setup_mrqc(adapter);
 
-	ixgbe_set_uta(adapter);
-
 	/* set_rx_buffer_len must be called before ring initialization */
 	ixgbe_set_rx_buffer_len(adapter);
 
@@ -3454,16 +3511,17 @@
 		}
 		ixgbe_vlan_filter_enable(adapter);
 		hw->addr_ctrl.user_set_promisc = false;
-		/*
-		 * Write addresses to available RAR registers, if there is not
-		 * sufficient space to store all the addresses then enable
-		 * unicast promiscuous mode
-		 */
-		count = ixgbe_write_uc_addr_list(netdev);
-		if (count < 0) {
-			fctrl |= IXGBE_FCTRL_UPE;
-			vmolr |= IXGBE_VMOLR_ROPE;
-		}
+	}
+
+	/*
+	 * Write addresses to available RAR registers, if there is not
+	 * sufficient space to store all the addresses then enable
+	 * unicast promiscuous mode
+	 */
+	count = ixgbe_write_uc_addr_list(netdev);
+	if (count < 0) {
+		fctrl |= IXGBE_FCTRL_UPE;
+		vmolr |= IXGBE_VMOLR_ROPE;
 	}
 
 	if (adapter->num_vfs) {
@@ -4130,7 +4188,8 @@
 				       DMA_FROM_DEVICE);
 		rx_buffer->dma = 0;
 		if (rx_buffer->page)
-			put_page(rx_buffer->page);
+			__free_pages(rx_buffer->page,
+				     ixgbe_rx_pg_order(rx_ring));
 		rx_buffer->page = NULL;
 	}
 
@@ -4167,6 +4226,8 @@
 		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
 	}
 
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+
 	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
 	memset(tx_ring->tx_buffer_info, 0, size);
 
@@ -4418,17 +4479,14 @@
 	adapter->dcb_cfg.pfc_mode_enable = false;
 	adapter->dcb_set_bitmap = 0x00;
 	adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
-	ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
-			   MAX_TRAFFIC_CLASS);
+	memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+	       sizeof(adapter->temp_dcb_cfg));
 
 #endif
 
 	/* default flow control settings */
 	hw->fc.requested_mode = ixgbe_fc_full;
 	hw->fc.current_mode = ixgbe_fc_full;	/* init for ethtool output */
-#ifdef CONFIG_DCB
-	adapter->last_lfc_mode = hw->fc.current_mode;
-#endif
 	ixgbe_pbthresh_setup(adapter);
 	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
 	hw->fc.send_xon = true;
@@ -4866,17 +4924,15 @@
 	netif_device_detach(netdev);
 
 	if (netif_running(netdev)) {
+		rtnl_lock();
 		ixgbe_down(adapter);
 		ixgbe_free_irq(adapter);
 		ixgbe_free_all_tx_resources(adapter);
 		ixgbe_free_all_rx_resources(adapter);
+		rtnl_unlock();
 	}
 
 	ixgbe_clear_interrupt_scheme(adapter);
-#ifdef CONFIG_DCB
-	kfree(adapter->ixgbe_ieee_pfc);
-	kfree(adapter->ixgbe_ieee_ets);
-#endif
 
 #ifdef CONFIG_PM
 	retval = pci_save_state(pdev);
@@ -4995,9 +5051,6 @@
 	if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
 		u64 rsc_count = 0;
 		u64 rsc_flush = 0;
-		for (i = 0; i < 16; i++)
-			adapter->hw_rx_no_dma_resources +=
-				IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
 			rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
@@ -5100,6 +5153,9 @@
 		hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
 		hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
 	case ixgbe_mac_82599EB:
+		for (i = 0; i < 16; i++)
+			adapter->hw_rx_no_dma_resources +=
+					     IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
 		IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
 		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
@@ -5277,7 +5333,7 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 link_speed = adapter->link_speed;
 	bool link_up = adapter->link_up;
-	int i;
+	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
 
 	if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
 		return;
@@ -5289,13 +5345,13 @@
 		link_speed = IXGBE_LINK_SPEED_10GB_FULL;
 		link_up = true;
 	}
-	if (link_up) {
-		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-			for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
-				hw->mac.ops.fc_enable(hw, i);
-		} else {
-			hw->mac.ops.fc_enable(hw, 0);
-		}
+
+	if (adapter->ixgbe_ieee_pfc)
+		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
+
+	if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
+		hw->mac.ops.fc_enable(hw);
+		ixgbe_set_rx_drop_en(adapter);
 	}
 
 	if (link_up ||
@@ -5349,6 +5405,11 @@
 		flow_rx = false;
 		break;
 	}
+
+#ifdef CONFIG_IXGBE_PTP
+	ixgbe_ptp_start_cyclecounter(adapter);
+#endif
+
 	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
 	       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
 	       "10 Gbps" :
@@ -5386,6 +5447,10 @@
 	if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
 		adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
 
+#ifdef CONFIG_IXGBE_PTP
+	ixgbe_ptp_start_cyclecounter(adapter);
+#endif
+
 	e_info(drv, "NIC Link is Down\n");
 	netif_carrier_off(netdev);
 }
@@ -5685,6 +5750,9 @@
 	ixgbe_watchdog_subtask(adapter);
 	ixgbe_fdir_reinit_subtask(adapter);
 	ixgbe_check_hang_subtask(adapter);
+#ifdef CONFIG_IXGBE_PTP
+	ixgbe_ptp_overflow_check(adapter);
+#endif
 
 	ixgbe_service_event_complete(adapter);
 }
@@ -5835,6 +5903,11 @@
 	if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
 		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
 
+#ifdef CONFIG_IXGBE_PTP
+	if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
+		cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
+#endif
+
 	/* set segmentation enable bits for TSO/FSO */
 #ifdef IXGBE_FCOE
 	if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
@@ -6225,6 +6298,15 @@
 		tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
 	}
 
+	skb_tx_timestamp(skb);
+
+#ifdef CONFIG_IXGBE_PTP
+	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+		tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+	}
+#endif
+
 #ifdef CONFIG_PCI_IOV
 	/*
 	 * Use the l2switch_enable flag - would be false if the DMA
@@ -6377,7 +6459,14 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-	return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
+	switch (cmd) {
+#ifdef CONFIG_IXGBE_PTP
+	case SIOCSHWTSTAMP:
+		return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
+#endif
+	default:
+		return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
+	}
 }
 
 /**
@@ -6569,15 +6658,17 @@
 
 	if (tc) {
 		netdev_set_num_tc(dev, tc);
-		adapter->last_lfc_mode = adapter->hw.fc.current_mode;
 		adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
 		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 
-		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+			adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
+		}
 	} else {
 		netdev_reset_tc(dev);
-		adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
 
 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -6626,7 +6717,7 @@
 	/* Turn off LRO if not RSC capable */
 	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
 		features &= ~NETIF_F_LRO;
-	
+
 
 	return features;
 }
@@ -6685,6 +6776,74 @@
 	return 0;
 }
 
+static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
+			     struct net_device *dev,
+			     unsigned char *addr,
+			     u16 flags)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	int err = -EOPNOTSUPP;
+
+	if (ndm->ndm_state & NUD_PERMANENT) {
+		pr_info("%s: FDB only supports static addresses\n",
+			ixgbe_driver_name);
+		return -EINVAL;
+	}
+
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+		if (is_unicast_ether_addr(addr))
+			err = dev_uc_add_excl(dev, addr);
+		else if (is_multicast_ether_addr(addr))
+			err = dev_mc_add_excl(dev, addr);
+		else
+			err = -EINVAL;
+	}
+
+	/* Only return duplicate errors if NLM_F_EXCL is set */
+	if (err == -EEXIST && !(flags & NLM_F_EXCL))
+		err = 0;
+
+	return err;
+}
+
+static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
+			     struct net_device *dev,
+			     unsigned char *addr)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	int err = -EOPNOTSUPP;
+
+	if (ndm->ndm_state & NUD_PERMANENT) {
+		pr_info("%s: FDB only supports static addresses\n",
+			ixgbe_driver_name);
+		return -EINVAL;
+	}
+
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+		if (is_unicast_ether_addr(addr))
+			err = dev_uc_del(dev, addr);
+		else if (is_multicast_ether_addr(addr))
+			err = dev_mc_del(dev, addr);
+		else
+			err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
+			      struct netlink_callback *cb,
+			      struct net_device *dev,
+			      int idx)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+	return idx;
+}
+
 static const struct net_device_ops ixgbe_netdev_ops = {
 	.ndo_open		= ixgbe_open,
 	.ndo_stop		= ixgbe_close,
@@ -6721,6 +6880,9 @@
 #endif /* IXGBE_FCOE */
 	.ndo_set_features = ixgbe_set_features,
 	.ndo_fix_features = ixgbe_fix_features,
+	.ndo_fdb_add		= ixgbe_ndo_fdb_add,
+	.ndo_fdb_del		= ixgbe_ndo_fdb_del,
+	.ndo_fdb_dump		= ixgbe_ndo_fdb_dump,
 };
 
 static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
@@ -6735,14 +6897,66 @@
 	/* The 82599 supports up to 64 VFs per physical function
 	 * but this implementation limits allocation to 63 so that
 	 * basic networking resources are still available to the
-	 * physical function
+	 * physical function.  If the user requests greater thn
+	 * 63 VFs then it is an error - reset to default of zero.
 	 */
-	adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+	adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
 	ixgbe_enable_sriov(adapter, ii);
 #endif /* CONFIG_PCI_IOV */
 }
 
 /**
+ * ixgbe_wol_supported - Check whether device supports WoL
+ * @hw: hw specific details
+ * @device_id: the device ID
+ * @subdev_id: the subsystem device ID
+ *
+ * This function is used by probe and ethtool to determine
+ * which devices have WoL support
+ *
+ **/
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+			u16 subdevice_id)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+	int is_wol_supported = 0;
+
+	switch (device_id) {
+	case IXGBE_DEV_ID_82599_SFP:
+		/* Only these subdevices could supports WOL */
+		switch (subdevice_id) {
+		case IXGBE_SUBDEV_ID_82599_560FLR:
+			/* only support first port */
+			if (hw->bus.func != 0)
+				break;
+		case IXGBE_SUBDEV_ID_82599_SFP:
+			is_wol_supported = 1;
+			break;
+		}
+		break;
+	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+		/* All except this subdevice support WOL */
+		if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+			is_wol_supported = 1;
+		break;
+	case IXGBE_DEV_ID_82599_KX4:
+		is_wol_supported = 1;
+		break;
+	case IXGBE_DEV_ID_X540T:
+		/* check eeprom to see if enabled wol */
+		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+		     (hw->bus.func == 0))) {
+			is_wol_supported = 1;
+		}
+		break;
+	}
+
+	return is_wol_supported;
+}
+
+/**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
  * @ent: entry in ixgbe_pci_tbl
@@ -6768,7 +6982,6 @@
 	u16 device_caps;
 #endif
 	u32 eec;
-	u16 wol_cap;
 
 	/* Catch broken hardware that put the wrong VF device ID in
 	 * the PCIe SR-IOV capability.
@@ -7032,42 +7245,18 @@
 		netdev->features &= ~NETIF_F_RXHASH;
 	}
 
-	/* WOL not supported for all but the following */
+	/* WOL not supported for all devices */
 	adapter->wol = 0;
-	switch (pdev->device) {
-	case IXGBE_DEV_ID_82599_SFP:
-		/* Only these subdevice supports WOL */
-		switch (pdev->subsystem_device) {
-		case IXGBE_SUBDEV_ID_82599_560FLR:
-			/* only support first port */
-			if (hw->bus.func != 0)
-				break;
-		case IXGBE_SUBDEV_ID_82599_SFP:
-			adapter->wol = IXGBE_WUFC_MAG;
-			break;
-		}
-		break;
-	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-		/* All except this subdevice support WOL */
-		if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
-			adapter->wol = IXGBE_WUFC_MAG;
-		break;
-	case IXGBE_DEV_ID_82599_KX4:
+	hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
+	if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device))
 		adapter->wol = IXGBE_WUFC_MAG;
-		break;
-	case IXGBE_DEV_ID_X540T:
-		/* Check eeprom to see if it is enabled */
-		hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
-		wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
 
-		if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
-		    ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
-		     (hw->bus.func == 0)))
-			adapter->wol = IXGBE_WUFC_MAG;
-		break;
-	}
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
+#ifdef CONFIG_IXGBE_PTP
+	ixgbe_ptp_init(adapter);
+#endif /* CONFIG_IXGBE_PTP*/
+
 	/* save off EEPROM version number */
 	hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
 	hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
@@ -7154,6 +7343,12 @@
 
 	e_dev_info("%s\n", ixgbe_default_device_descr);
 	cards_found++;
+
+#ifdef CONFIG_IXGBE_HWMON
+	if (ixgbe_sysfs_init(adapter))
+		e_err(probe, "failed to allocate sysfs resources\n");
+#endif /* CONFIG_IXGBE_HWMON */
+
 	return 0;
 
 err_register:
@@ -7192,6 +7387,10 @@
 	set_bit(__IXGBE_DOWN, &adapter->state);
 	cancel_work_sync(&adapter->service_task);
 
+#ifdef CONFIG_IXGBE_PTP
+	ixgbe_ptp_stop(adapter);
+#endif
+
 #ifdef CONFIG_IXGBE_DCA
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
 		adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
@@ -7200,6 +7399,10 @@
 	}
 
 #endif
+#ifdef CONFIG_IXGBE_HWMON
+	ixgbe_sysfs_exit(adapter);
+#endif /* CONFIG_IXGBE_HWMON */
+
 #ifdef IXGBE_FCOE
 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 		ixgbe_cleanup_fcoe(adapter);
@@ -7224,6 +7427,11 @@
 
 	ixgbe_release_hw_control(adapter);
 
+#ifdef CONFIG_DCB
+	kfree(adapter->ixgbe_ieee_pfc);
+	kfree(adapter->ixgbe_ieee_ets);
+
+#endif
 	iounmap(adapter->hw.hw_addr);
 	pci_release_selected_regions(pdev, pci_select_bars(pdev,
 				     IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index bf9f82f..2411770 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1582,13 +1582,21 @@
  **/
 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
 {
-	*i2cctl |= IXGBE_I2C_CLK_OUT;
+	u32 i = 0;
+	u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
+	u32 i2cctl_r = 0;
 
-	IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
-	IXGBE_WRITE_FLUSH(hw);
+	for (i = 0; i < timeout; i++) {
+		*i2cctl |= IXGBE_I2C_CLK_OUT;
+		IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+		IXGBE_WRITE_FLUSH(hw);
+		/* SCL rise time (1000ns) */
+		udelay(IXGBE_I2C_T_RISE);
 
-	/* SCL rise time (1000ns) */
-	udelay(IXGBE_I2C_T_RISE);
+		i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+		if (i2cctl_r & IXGBE_I2C_CLK_IN)
+			break;
+	}
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
new file mode 100644
index 0000000..ddc6a4d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -0,0 +1,900 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+#include "ixgbe.h"
+#include <linux/export.h>
+
+/*
+ * The 82599 and the X540 do not have true 64bit nanosecond scale
+ * counter registers. Instead, SYSTIME is defined by a fixed point
+ * system which allows the user to define the scale counter increment
+ * value at every level change of the oscillator driving the SYSTIME
+ * value. For both devices the TIMINCA:IV field defines this
+ * increment. On the X540 device, 31 bits are provided. However on the
+ * 82599 only provides 24 bits. The time unit is determined by the
+ * clock frequency of the oscillator in combination with the TIMINCA
+ * register. When these devices link at 10Gb the oscillator has a
+ * period of 6.4ns. In order to convert the scale counter into
+ * nanoseconds the cyclecounter and timecounter structures are
+ * used. The SYSTIME registers need to be converted to ns values by use
+ * of only a right shift (division by power of 2). The following math
+ * determines the largest incvalue that will fit into the available
+ * bits in the TIMINCA register.
+ *
+ * PeriodWidth: Number of bits to store the clock period
+ * MaxWidth: The maximum width value of the TIMINCA register
+ * Period: The clock period for the oscillator
+ * round(): discard the fractional portion of the calculation
+ *
+ * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ]
+ *
+ * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns
+ * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns
+ *
+ * The period also changes based on the link speed:
+ * At 10Gb link or no link, the period remains the same.
+ * At 1Gb link, the period is multiplied by 10. (64ns)
+ * At 100Mb link, the period is multiplied by 100. (640ns)
+ *
+ * The calculated value allows us to right shift the SYSTIME register
+ * value in order to quickly convert it into a nanosecond clock,
+ * while allowing for the maximum possible adjustment value.
+ *
+ * These diagrams are only for the 10Gb link period
+ *
+ *           SYSTIMEH            SYSTIMEL
+ *       +--------------+  +--------------+
+ * X540  |      32      |  | 1 | 3 |  28  |
+ *       *--------------+  +--------------+
+ *        \________ 36 bits ______/  fract
+ *
+ *       +--------------+  +--------------+
+ * 82599 |      32      |  | 8 | 3 |  21  |
+ *       *--------------+  +--------------+
+ *        \________ 43 bits ______/  fract
+ *
+ * The 36 bit X540 SYSTIME overflows every
+ *   2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds
+ *
+ * The 43 bit 82599 SYSTIME overflows every
+ *   2^43 * 10^-9 / 3600 = 2.4 hours
+ */
+#define IXGBE_INCVAL_10GB 0x66666666
+#define IXGBE_INCVAL_1GB  0x40000000
+#define IXGBE_INCVAL_100  0x50000000
+
+#define IXGBE_INCVAL_SHIFT_10GB  28
+#define IXGBE_INCVAL_SHIFT_1GB   24
+#define IXGBE_INCVAL_SHIFT_100   21
+
+#define IXGBE_INCVAL_SHIFT_82599 7
+#define IXGBE_INCPER_SHIFT_82599 24
+#define IXGBE_MAX_TIMEADJ_VALUE  0x7FFFFFFFFFFFFFFFULL
+
+#define IXGBE_OVERFLOW_PERIOD    (HZ * 30)
+
+#ifndef NSECS_PER_SEC
+#define NSECS_PER_SEC 1000000000ULL
+#endif
+
+/**
+ * ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc - the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
+{
+	struct ixgbe_adapter *adapter =
+		container_of(cc, struct ixgbe_adapter, cc);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u64 stamp = 0;
+
+	stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+	stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+
+	return stamp;
+}
+
+/**
+ * ixgbe_ptp_adjfreq
+ * @ptp - the ptp clock structure
+ * @ppb - parts per billion adjustment from base
+ *
+ * adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	struct ixgbe_adapter *adapter =
+		container_of(ptp, struct ixgbe_adapter, ptp_caps);
+	struct ixgbe_hw *hw = &adapter->hw;
+	u64 freq;
+	u32 diff, incval;
+	int neg_adj = 0;
+
+	if (ppb < 0) {
+		neg_adj = 1;
+		ppb = -ppb;
+	}
+
+	smp_mb();
+	incval = ACCESS_ONCE(adapter->base_incval);
+
+	freq = incval;
+	freq *= ppb;
+	diff = div_u64(freq, 1000000000ULL);
+
+	incval = neg_adj ? (incval - diff) : (incval + diff);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_X540:
+		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+		break;
+	case ixgbe_mac_82599EB:
+		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+				(1 << IXGBE_INCPER_SHIFT_82599) |
+				incval);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_ptp_adjtime
+ * @ptp - the ptp clock structure
+ * @delta - offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct ixgbe_adapter *adapter =
+		container_of(ptp, struct ixgbe_adapter, ptp_caps);
+	unsigned long flags;
+	u64 now;
+
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+	now = timecounter_read(&adapter->tc);
+	now += delta;
+
+	/* reset the timecounter */
+	timecounter_init(&adapter->tc,
+			 &adapter->cc,
+			 now);
+
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+	return 0;
+}
+
+/**
+ * ixgbe_ptp_gettime
+ * @ptp - the ptp clock structure
+ * @ts - timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+	struct ixgbe_adapter *adapter =
+		container_of(ptp, struct ixgbe_adapter, ptp_caps);
+	u64 ns;
+	u32 remainder;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+	ns = timecounter_read(&adapter->tc);
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+	ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+	ts->tv_nsec = remainder;
+
+	return 0;
+}
+
+/**
+ * ixgbe_ptp_settime
+ * @ptp - the ptp clock structure
+ * @ts - the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
+			     const struct timespec *ts)
+{
+	struct ixgbe_adapter *adapter =
+		container_of(ptp, struct ixgbe_adapter, ptp_caps);
+	u64 ns;
+	unsigned long flags;
+
+	ns = ts->tv_sec * 1000000000ULL;
+	ns += ts->tv_nsec;
+
+	/* reset the timecounter */
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+	timecounter_init(&adapter->tc, &adapter->cc, ns);
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * ixgbe_ptp_enable
+ * @ptp - the ptp clock structure
+ * @rq - the requested feature to change
+ * @on - whether to enable or disable the feature
+ *
+ * enable (or disable) ancillary features of the phc subsystem.
+ * our driver only supports the PPS feature on the X540
+ */
+static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
+			    struct ptp_clock_request *rq, int on)
+{
+	struct ixgbe_adapter *adapter =
+		container_of(ptp, struct ixgbe_adapter, ptp_caps);
+
+	/**
+	 * When PPS is enabled, unmask the interrupt for the ClockOut
+	 * feature, so that the interrupt handler can send the PPS
+	 * event when the clock SDP triggers. Clear mask when PPS is
+	 * disabled
+	 */
+	if (rq->type == PTP_CLK_REQ_PPS) {
+		switch (adapter->hw.mac.type) {
+		case ixgbe_mac_X540:
+			if (on)
+				adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
+			else
+				adapter->flags2 &=
+					~IXGBE_FLAG2_PTP_PPS_ENABLED;
+			return 0;
+		default:
+			break;
+		}
+	}
+
+	return -ENOTSUPP;
+}
+
+/**
+ * ixgbe_ptp_check_pps_event
+ * @adapter - the private adapter structure
+ * @eicr - the interrupt cause register value
+ *
+ * This function is called by the interrupt routine when checking for
+ * interrupts. It will check and handle a pps event.
+ */
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ptp_clock_event event;
+
+	event.type = PTP_CLOCK_PPS;
+
+	/* Make sure ptp clock is valid, and PPS event enabled */
+	if (!adapter->ptp_clock ||
+	    !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
+		return;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_X540:
+		if (eicr & IXGBE_EICR_TIMESYNC)
+			ptp_clock_event(adapter->ptp_clock, &event);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * ixgbe_ptp_enable_sdp
+ * @hw - the hardware private structure
+ * @shift - the clock shift for calculating nanoseconds
+ *
+ * this function enables the clock out feature on the sdp0 for the
+ * X540 device. It will create a 1second periodic output that can be
+ * used as the PPS (via an interrupt).
+ *
+ * It calculates when the systime will be on an exact second, and then
+ * aligns the start of the PPS signal to that value. The shift is
+ * necessary because it can change based on the link speed.
+ */
+static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
+{
+	u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh;
+	u64 clock_edge = 0;
+	u32 rem;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_X540:
+		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+		/*
+		 * enable the SDP0 pin as output, and connected to the native
+		 * function for Timesync (ClockOut)
+		 */
+		esdp |= (IXGBE_ESDP_SDP0_DIR |
+			 IXGBE_ESDP_SDP0_NATIVE);
+
+		/*
+		 * enable the Clock Out feature on SDP0, and allow interrupts
+		 * to occur when the pin changes
+		 */
+		tsauxc = (IXGBE_TSAUXC_EN_CLK |
+			  IXGBE_TSAUXC_SYNCLK |
+			  IXGBE_TSAUXC_SDP0_INT);
+
+		/* clock period (or pulse length) */
+		clktiml = (u32)(NSECS_PER_SEC << shift);
+		clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
+
+		clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+		clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+
+		/*
+		 * account for the fact that we can't do u64 division
+		 * with remainder, by converting the clock values into
+		 * nanoseconds first
+		 */
+		clock_edge >>= shift;
+		div_u64_rem(clock_edge, NSECS_PER_SEC, &rem);
+		clock_edge += (NSECS_PER_SEC - rem);
+		clock_edge <<= shift;
+
+		/* specify the initial clock start time */
+		trgttiml = (u32)clock_edge;
+		trgttimh = (u32)(clock_edge >> 32);
+
+		IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml);
+		IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh);
+		IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
+		IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);
+
+		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+		IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+
+		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_TIMESYNC);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * ixgbe_ptp_disable_sdp
+ * @hw - the private hardware structure
+ *
+ * this function disables the auxiliary SDP clock out feature
+ */
+static void ixgbe_ptp_disable_sdp(struct ixgbe_hw *hw)
+{
+	IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_TIMESYNC);
+	IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0);
+}
+
+/**
+ * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
+ * @work: structure containing information about this work task
+ *
+ * this work function is scheduled to continue reading the timecounter
+ * in order to prevent missing when the system time registers wrap
+ * around. This needs to be run approximately twice a minute when no
+ * PTP activity is occurring.
+ */
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
+{
+	unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
+	struct timespec ts;
+
+	if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) &&
+	    (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
+		ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
+		adapter->last_overflow_check = jiffies;
+	}
+}
+
+/**
+ * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
+			   struct sk_buff *skb)
+{
+	struct ixgbe_adapter *adapter;
+	struct ixgbe_hw *hw;
+	struct skb_shared_hwtstamps shhwtstamps;
+	u64 regval = 0, ns;
+	u32 tsynctxctl;
+	unsigned long flags;
+
+	/* we cannot process timestamps on a ring without a q_vector */
+	if (!q_vector || !q_vector->adapter)
+		return;
+
+	adapter = q_vector->adapter;
+	hw = &adapter->hw;
+
+	tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
+
+	/*
+	 * if TX timestamp is not valid, exit after clearing the
+	 * timestamp registers
+	 */
+	if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
+		return;
+
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+	ns = timecounter_cyc2time(&adapter->tc, regval);
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+	shhwtstamps.hwtstamp = ns_to_ktime(ns);
+	skb_tstamp_tx(skb, &shhwtstamps);
+}
+
+/**
+ * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+			   struct sk_buff *skb)
+{
+	struct ixgbe_adapter *adapter;
+	struct ixgbe_hw *hw;
+	struct skb_shared_hwtstamps *shhwtstamps;
+	u64 regval = 0, ns;
+	u32 tsyncrxctl;
+	unsigned long flags;
+
+	/* we cannot process timestamps on a ring without a q_vector */
+	if (!q_vector || !q_vector->adapter)
+		return;
+
+	adapter = q_vector->adapter;
+	hw = &adapter->hw;
+
+	tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
+
+	/*
+	 * If this bit is set, then the RX registers contain the time stamp. No
+	 * other packet will be time stamped until we read these registers, so
+	 * read the registers to make them available again. Because only one
+	 * packet can be time stamped at a time, we know that the register
+	 * values must belong to this one here and therefore we don't need to
+	 * compare any of the additional attributes stored for it.
+	 *
+	 * If nothing went wrong, then it should have a skb_shared_tx that we
+	 * can turn into a skb_shared_hwtstamps.
+	 */
+	if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
+		return;
+
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+	ns = timecounter_cyc2time(&adapter->tc, regval);
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+	shhwtstamps = skb_hwtstamps(skb);
+	shhwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @adapter: pointer to adapter struct
+ * @ifreq: ioctl data
+ * @cmd: particular ioctl requested
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ */
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
+			     struct ifreq *ifr, int cmd)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct hwtstamp_config config;
+	u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
+	u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
+	u32 tsync_rx_mtrl = 0;
+	bool is_l4 = false;
+	bool is_l2 = false;
+	u32 regval;
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		tsync_tx_ctl = 0;
+	case HWTSTAMP_TX_ON:
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		tsync_rx_ctl = 0;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+		tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
+		is_l4 = true;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
+		tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
+		is_l4 = true;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
+		tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
+		is_l2 = true;
+		is_l4 = true;
+		config.rx_filter = HWTSTAMP_FILTER_SOME;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
+		tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
+		is_l2 = true;
+		is_l4 = true;
+		config.rx_filter = HWTSTAMP_FILTER_SOME;
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+		tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		is_l2 = true;
+		is_l4 = true;
+		break;
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_ALL:
+	default:
+		/*
+		 * register RXMTRL must be set, therefore it is not
+		 * possible to time stamp both V1 Sync and Delay_Req messages
+		 * and hardware does not support timestamping all packets
+		 * => return error
+		 */
+		return -ERANGE;
+	}
+
+	if (hw->mac.type == ixgbe_mac_82598EB) {
+		if (tsync_rx_ctl | tsync_tx_ctl)
+			return -ERANGE;
+		return 0;
+	}
+
+	/* define ethertype filter for timestamped packets */
+	if (is_l2)
+		IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
+				(IXGBE_ETQF_FILTER_EN | /* enable filter */
+				 IXGBE_ETQF_1588 | /* enable timestamping */
+				 ETH_P_1588));     /* 1588 eth protocol type */
+	else
+		IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0);
+
+#define PTP_PORT 319
+	/* L4 Queue Filter[3]: filter by destination port and protocol */
+	if (is_l4) {
+		u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */
+			    | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */
+			    | IXGBE_FTQF_QUEUE_ENABLE);
+
+		ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */
+			  & IXGBE_FTQF_DEST_PORT_MASK /* dest check */
+			  & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */
+			 << IXGBE_FTQF_5TUPLE_MASK_SHIFT);
+
+		IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3),
+				(3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 |
+				 IXGBE_IMIR_SIZE_BP_82599));
+
+		/* enable port check */
+		IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3),
+				(htons(PTP_PORT) |
+				 htons(PTP_PORT) << 16));
+
+		IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf);
+
+		tsync_rx_mtrl |= PTP_PORT << 16;
+	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0);
+	}
+
+	/* enable/disable TX */
+	regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+	regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
+	regval |= tsync_tx_ctl;
+	IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval);
+
+	/* enable/disable RX */
+	regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+	regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK);
+	regval |= tsync_rx_ctl;
+	IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval);
+
+	/* define which PTP packets are time stamped */
+	IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl);
+
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* clear TX/RX time stamp registers, just to be sure */
+	regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
+	regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
+/**
+ * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
+ * @adapter - pointer to the adapter structure
+ *
+ * this function initializes the timecounter and cyclecounter
+ * structures for use in generated a ns counter from the arbitrary
+ * fixed point cycles registers in the hardware.
+ *
+ * A change in link speed impacts the frequency of the DMA clock on
+ * the device, which is used to generate the cycle counter
+ * registers. Therefor this function is called whenever the link speed
+ * changes.
+ *
+ * This function also turns on the SDP pin for clock out feature (X540
+ * only), because this is where the shift is first calculated.
+ */
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 incval = 0;
+	u32 shift = 0;
+	u32 cycle_speed;
+	unsigned long flags;
+
+	/**
+	 * Determine what speed we need to set the cyclecounter
+	 * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
+	 * unknown speeds as 10Gb. (Hence why we can't just copy the
+	 * link_speed.
+	 */
+	switch (adapter->link_speed) {
+	case IXGBE_LINK_SPEED_100_FULL:
+	case IXGBE_LINK_SPEED_1GB_FULL:
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		cycle_speed = adapter->link_speed;
+		break;
+	default:
+		/* cycle speed should be 10Gb when there is no link */
+		cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
+		break;
+	}
+
+	/* Bail if the cycle speed didn't change */
+	if (adapter->cycle_speed == cycle_speed)
+		return;
+
+	/* disable the SDP clock out */
+	ixgbe_ptp_disable_sdp(hw);
+
+	/**
+	 * Scale the NIC cycle counter by a large factor so that
+	 * relatively small corrections to the frequency can be added
+	 * or subtracted. The drawbacks of a large factor include
+	 * (a) the clock register overflows more quickly, (b) the cycle
+	 * counter structure must be able to convert the systime value
+	 * to nanoseconds using only a multiplier and a right-shift,
+	 * and (c) the value must fit within the timinca register space
+	 * => math based on internal DMA clock rate and available bits
+	 */
+	switch (cycle_speed) {
+	case IXGBE_LINK_SPEED_100_FULL:
+		incval = IXGBE_INCVAL_100;
+		shift = IXGBE_INCVAL_SHIFT_100;
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		incval = IXGBE_INCVAL_1GB;
+		shift = IXGBE_INCVAL_SHIFT_1GB;
+		break;
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		incval = IXGBE_INCVAL_10GB;
+		shift = IXGBE_INCVAL_SHIFT_10GB;
+		break;
+	}
+
+	/**
+	 * Modify the calculated values to fit within the correct
+	 * number of bits specified by the hardware. The 82599 doesn't
+	 * have the same space as the X540, so bitshift the calculated
+	 * values to fit.
+	 */
+	switch (hw->mac.type) {
+	case ixgbe_mac_X540:
+		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+		break;
+	case ixgbe_mac_82599EB:
+		incval >>= IXGBE_INCVAL_SHIFT_82599;
+		shift -= IXGBE_INCVAL_SHIFT_82599;
+		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+				(1 << IXGBE_INCPER_SHIFT_82599) |
+				incval);
+		break;
+	default:
+		/* other devices aren't supported */
+		return;
+	}
+
+	/* reset the system time registers */
+	IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
+	IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* now that the shift has been calculated and the systime
+	 * registers reset, (re-)enable the Clock out feature*/
+	ixgbe_ptp_enable_sdp(hw, shift);
+
+	/* store the new cycle speed */
+	adapter->cycle_speed = cycle_speed;
+
+	ACCESS_ONCE(adapter->base_incval) = incval;
+	smp_mb();
+
+	/* grab the ptp lock */
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
+	memset(&adapter->cc, 0, sizeof(adapter->cc));
+	adapter->cc.read = ixgbe_ptp_read;
+	adapter->cc.mask = CLOCKSOURCE_MASK(64);
+	adapter->cc.shift = shift;
+	adapter->cc.mult = 1;
+
+	/* reset the ns time counter */
+	timecounter_init(&adapter->tc, &adapter->cc,
+			 ktime_to_ns(ktime_get_real()));
+
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+}
+
+/**
+ * ixgbe_ptp_init
+ * @adapter - the ixgbe private adapter structure
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_X540:
+		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+		adapter->ptp_caps.owner = THIS_MODULE;
+		adapter->ptp_caps.max_adj = 250000000;
+		adapter->ptp_caps.n_alarm = 0;
+		adapter->ptp_caps.n_ext_ts = 0;
+		adapter->ptp_caps.n_per_out = 0;
+		adapter->ptp_caps.pps = 1;
+		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
+		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
+		adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
+		adapter->ptp_caps.settime = ixgbe_ptp_settime;
+		adapter->ptp_caps.enable = ixgbe_ptp_enable;
+		break;
+	case ixgbe_mac_82599EB:
+		snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+		adapter->ptp_caps.owner = THIS_MODULE;
+		adapter->ptp_caps.max_adj = 250000000;
+		adapter->ptp_caps.n_alarm = 0;
+		adapter->ptp_caps.n_ext_ts = 0;
+		adapter->ptp_caps.n_per_out = 0;
+		adapter->ptp_caps.pps = 0;
+		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq;
+		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
+		adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
+		adapter->ptp_caps.settime = ixgbe_ptp_settime;
+		adapter->ptp_caps.enable = ixgbe_ptp_enable;
+		break;
+	default:
+		adapter->ptp_clock = NULL;
+		return;
+	}
+
+	spin_lock_init(&adapter->tmreg_lock);
+
+	ixgbe_ptp_start_cyclecounter(adapter);
+
+	/* (Re)start the overflow check */
+	adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
+
+	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps);
+	if (IS_ERR(adapter->ptp_clock)) {
+		adapter->ptp_clock = NULL;
+		e_dev_err("ptp_clock_register failed\n");
+	} else
+		e_dev_info("registered PHC device on %s\n", netdev->name);
+
+	return;
+}
+
+/**
+ * ixgbe_ptp_stop - disable ptp device and stop the overflow check
+ * @adapter: pointer to adapter struct
+ *
+ * this function stops the ptp support, and cancels the delayed work.
+ */
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+{
+	ixgbe_ptp_disable_sdp(&adapter->hw);
+
+	/* stop the overflow check task */
+	adapter->flags2 &= ~IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
+
+	if (adapter->ptp_clock) {
+		ptp_clock_unregister(adapter->ptp_clock);
+		adapter->ptp_clock = NULL;
+		e_dev_info("removed PHC on %s\n",
+			   adapter->netdev->name);
+	}
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 88a58cb..2d971d1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -544,13 +544,18 @@
 
 	retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 
-	if (retval)
+	if (retval) {
 		pr_err("Error receiving message from VF\n");
+		return retval;
+	}
 
 	/* this is a message we already processed, do nothing */
 	if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
 		return retval;
 
+	/* flush the ack before we write any messages back */
+	IXGBE_WRITE_FLUSH(hw);
+
 	/*
 	 * until the vf completes a virtual function reset it should not be
 	 * allowed to start any configuration.
@@ -637,6 +642,12 @@
 	case IXGBE_VF_SET_MACVLAN:
 		index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
 			IXGBE_VT_MSGINFO_SHIFT;
+		if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+			e_warn(drv, "VF %d requested MACVLAN filter but is "
+				    "administratively denied\n", vf);
+			retval = -1;
+			break;
+		}
 		/*
 		 * If the VF is allowed to set MAC filters then turn off
 		 * anti-spoofing to avoid false positives.  An index
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
new file mode 100644
index 0000000..1d80b1c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -0,0 +1,245 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2012 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "ixgbe.h"
+#include "ixgbe_common.h"
+#include "ixgbe_type.h"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/hwmon.h>
+
+#ifdef CONFIG_IXGBE_HWMON
+/* hwmon callback functions */
+static ssize_t ixgbe_hwmon_show_location(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
+						     dev_attr);
+	return sprintf(buf, "loc%u\n",
+		       ixgbe_attr->sensor->location);
+}
+
+static ssize_t ixgbe_hwmon_show_temp(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
+						     dev_attr);
+	unsigned int value;
+
+	/* reset the temp field */
+	ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw);
+
+	value = ixgbe_attr->sensor->temp;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
+						     dev_attr);
+	unsigned int value = ixgbe_attr->sensor->caution_thresh;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
+						     dev_attr);
+	unsigned int value = ixgbe_attr->sensor->max_op_thresh;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+/*
+ * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @ adapter: pointer to the adapter structure
+ * @ offset: offset in the eeprom sensor data table
+ * @ type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
+				unsigned int offset, int type) {
+	int rc;
+	unsigned int n_attr;
+	struct hwmon_attr *ixgbe_attr;
+
+	n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
+	ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
+
+	switch (type) {
+	case IXGBE_HWMON_TYPE_LOC:
+		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
+		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
+			 "temp%u_label", offset);
+		break;
+	case IXGBE_HWMON_TYPE_TEMP:
+		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
+		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
+			 "temp%u_input", offset);
+		break;
+	case IXGBE_HWMON_TYPE_CAUTION:
+		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
+		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
+			 "temp%u_max", offset);
+		break;
+	case IXGBE_HWMON_TYPE_MAX:
+		ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
+		snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
+			 "temp%u_crit", offset);
+		break;
+	default:
+		rc = -EPERM;
+		return rc;
+	}
+
+	/* These always the same regardless of type */
+	ixgbe_attr->sensor =
+		&adapter->hw.mac.thermal_sensor_data.sensor[offset];
+	ixgbe_attr->hw = &adapter->hw;
+	ixgbe_attr->dev_attr.store = NULL;
+	ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
+	ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
+
+	rc = device_create_file(&adapter->pdev->dev,
+				&ixgbe_attr->dev_attr);
+
+	if (rc == 0)
+		++adapter->ixgbe_hwmon_buff.n_hwmon;
+
+	return rc;
+}
+
+static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
+{
+	int i;
+
+	if (adapter == NULL)
+		return;
+
+	for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
+		device_remove_file(&adapter->pdev->dev,
+			   &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
+	}
+
+	kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
+
+	if (adapter->ixgbe_hwmon_buff.device)
+		hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
+}
+
+/* called from ixgbe_main.c */
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
+{
+	ixgbe_sysfs_del_adapter(adapter);
+}
+
+/* called from ixgbe_main.c */
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
+{
+	struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
+	unsigned int i;
+	int n_attrs;
+	int rc = 0;
+
+	/* If this method isn't defined we don't support thermals */
+	if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) {
+		goto exit;
+	}
+
+	/* Don't create thermal hwmon interface if no sensors present */
+	if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
+		goto exit;
+
+	/*
+	 * Allocation space for max attributs
+	 * max num sensors * values (loc, temp, max, caution)
+	 */
+	n_attrs = IXGBE_MAX_SENSORS * 4;
+	ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
+					  GFP_KERNEL);
+	if (!ixgbe_hwmon->hwmon_list) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
+	if (IS_ERR(ixgbe_hwmon->device)) {
+		rc = PTR_ERR(ixgbe_hwmon->device);
+		goto err;
+	}
+
+	for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
+		/*
+		 * Only create hwmon sysfs entries for sensors that have
+		 * meaningful data for.
+		 */
+		if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+			continue;
+
+		/* Bail if any hwmon attr struct fails to initialize */
+		rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
+		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
+		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
+		rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
+		if (rc)
+			goto err;
+	}
+
+	goto exit;
+
+err:
+	ixgbe_sysfs_del_adapter(adapter);
+exit:
+	return rc;
+}
+#endif /* CONFIG_IXGBE_HWMON */
+
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8636e83..204848d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -110,6 +110,28 @@
 #define IXGBE_I2C_CLK_OUT   0x00000002
 #define IXGBE_I2C_DATA_IN   0x00000004
 #define IXGBE_I2C_DATA_OUT  0x00000008
+#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT	500
+
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR	0xF8
+#define IXGBE_EMC_INTERNAL_DATA		0x00
+#define IXGBE_EMC_INTERNAL_THERM_LIMIT	0x20
+#define IXGBE_EMC_DIODE1_DATA		0x01
+#define IXGBE_EMC_DIODE1_THERM_LIMIT	0x19
+#define IXGBE_EMC_DIODE2_DATA		0x23
+#define IXGBE_EMC_DIODE2_THERM_LIMIT	0x1A
+
+#define IXGBE_MAX_SENSORS		3
+
+struct ixgbe_thermal_diode_data {
+	u8 location;
+	u8 temp;
+	u8 caution_thresh;
+	u8 max_op_thresh;
+};
+
+struct ixgbe_thermal_sensor_data {
+	struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
+};
 
 /* Interrupt Registers */
 #define IXGBE_EICR      0x00800
@@ -802,6 +824,8 @@
 #define IXGBE_TRGTTIMH0  0x08C28 /* Target Time Register 0 High - RW */
 #define IXGBE_TRGTTIML1  0x08C2C /* Target Time Register 1 Low - RW */
 #define IXGBE_TRGTTIMH1  0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_CLKTIML    0x08C34 /* Clock Out Time Register Low - RW */
+#define IXGBE_CLKTIMH    0x08C38 /* Clock Out Time Register High - RW */
 #define IXGBE_FREQOUT0   0x08C34 /* Frequency Out 0 Control register - RW */
 #define IXGBE_FREQOUT1   0x08C38 /* Frequency Out 1 Control register - RW */
 #define IXGBE_AUXSTMPL0  0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
@@ -1287,6 +1311,7 @@
 #define IXGBE_EICR_LINKSEC      0x00200000 /* PN Threshold */
 #define IXGBE_EICR_MNG          0x00400000 /* Manageability Event Interrupt */
 #define IXGBE_EICR_TS           0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_TIMESYNC     0x01000000 /* Timesync Event */
 #define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */
 #define IXGBE_EICR_GPI_SDP1     0x02000000 /* Gen Purpose Interrupt on SDP1 */
 #define IXGBE_EICR_GPI_SDP2     0x04000000 /* Gen Purpose Interrupt on SDP2 */
@@ -1304,6 +1329,7 @@
 #define IXGBE_EICS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
 #define IXGBE_EICS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EICS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
+#define IXGBE_EICS_TIMESYNC     IXGBE_EICR_TIMESYNC  /* Timesync Event */
 #define IXGBE_EICS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
 #define IXGBE_EICS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
 #define IXGBE_EICS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
@@ -1322,6 +1348,7 @@
 #define IXGBE_EIMS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EIMS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
 #define IXGBE_EIMS_TS           IXGBE_EICR_TS        /* Thermel Sensor Event */
+#define IXGBE_EIMS_TIMESYNC     IXGBE_EICR_TIMESYNC  /* Timesync Event */
 #define IXGBE_EIMS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
 #define IXGBE_EIMS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
 #define IXGBE_EIMS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
@@ -1339,6 +1366,7 @@
 #define IXGBE_EIMC_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
 #define IXGBE_EIMC_LSC          IXGBE_EICR_LSC       /* Link Status Change */
 #define IXGBE_EIMC_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
+#define IXGBE_EIMC_TIMESYNC     IXGBE_EICR_TIMESYNC  /* Timesync Event */
 #define IXGBE_EIMC_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
 #define IXGBE_EIMC_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
 #define IXGBE_EIMC_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
@@ -1479,8 +1507,10 @@
 #define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
 #define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
 #define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP0_DIR     0x00000100 /* SDP0 IO direction */
 #define IXGBE_ESDP_SDP4_DIR     0x00000004 /* SDP4 IO direction */
 #define IXGBE_ESDP_SDP5_DIR     0x00002000 /* SDP5 IO direction */
+#define IXGBE_ESDP_SDP0_NATIVE  0x00010000 /* SDP0 Native Function */
 
 /* LEDCTL Bit Masks */
 #define IXGBE_LED_IVRT_BASE      0x00000040
@@ -1677,11 +1707,29 @@
 #define IXGBE_PBANUM0_PTR       0x15
 #define IXGBE_PBANUM1_PTR       0x16
 #define IXGBE_FREE_SPACE_PTR    0X3E
+
+/* External Thermal Sensor Config */
+#define IXGBE_ETS_CFG                   0x26
+#define IXGBE_ETS_LTHRES_DELTA_MASK     0x07C0
+#define IXGBE_ETS_LTHRES_DELTA_SHIFT    6
+#define IXGBE_ETS_TYPE_MASK             0x0038
+#define IXGBE_ETS_TYPE_SHIFT            3
+#define IXGBE_ETS_TYPE_EMC              0x000
+#define IXGBE_ETS_TYPE_EMC_SHIFTED      0x000
+#define IXGBE_ETS_NUM_SENSORS_MASK      0x0007
+#define IXGBE_ETS_DATA_LOC_MASK         0x3C00
+#define IXGBE_ETS_DATA_LOC_SHIFT        10
+#define IXGBE_ETS_DATA_INDEX_MASK       0x0300
+#define IXGBE_ETS_DATA_INDEX_SHIFT      8
+#define IXGBE_ETS_DATA_HTHRESH_MASK     0x00FF
+
 #define IXGBE_SAN_MAC_ADDR_PTR  0x28
 #define IXGBE_DEVICE_CAPS       0x2C
 #define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
 #define IXGBE_PCIE_MSIX_82599_CAPS  0x72
+#define IXGBE_MAX_MSIX_VECTORS_82599	0x40
 #define IXGBE_PCIE_MSIX_82598_CAPS  0x62
+#define IXGBE_MAX_MSIX_VECTORS_82598	0x13
 
 /* MSI-X capability fields masks */
 #define IXGBE_PCIE_MSIX_TBL_SZ_MASK     0x7FF
@@ -1839,6 +1887,40 @@
 #define IXGBE_RXDCTL_RLPML_EN   0x00008000
 #define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
 
+#define IXGBE_TSAUXC_EN_CLK   0x00000004
+#define IXGBE_TSAUXC_SYNCLK   0x00000008
+#define IXGBE_TSAUXC_SDP0_INT 0x00000040
+
+#define IXGBE_TSYNCTXCTL_VALID		0x00000001 /* Tx timestamp valid */
+#define IXGBE_TSYNCTXCTL_ENABLED	0x00000010 /* Tx timestamping enabled */
+
+#define IXGBE_TSYNCRXCTL_VALID		0x00000001 /* Rx timestamp valid */
+#define IXGBE_TSYNCRXCTL_TYPE_MASK	0x0000000E /* Rx type mask */
+#define IXGBE_TSYNCRXCTL_TYPE_L2_V2	0x00
+#define IXGBE_TSYNCRXCTL_TYPE_L4_V1	0x02
+#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2	0x04
+#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2	0x0A
+#define IXGBE_TSYNCRXCTL_ENABLED	0x00000010 /* Rx Timestamping enabled */
+
+#define IXGBE_RXMTRL_V1_CTRLT_MASK	0x000000FF
+#define IXGBE_RXMTRL_V1_SYNC_MSG	0x00
+#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG	0x01
+#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG	0x02
+#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG	0x03
+#define IXGBE_RXMTRL_V1_MGMT_MSG	0x04
+
+#define IXGBE_RXMTRL_V2_MSGID_MASK		0x0000FF00
+#define IXGBE_RXMTRL_V2_SYNC_MSG		0x0000
+#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG		0x0100
+#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG		0x0200
+#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG		0x0300
+#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG		0x0800
+#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG		0x0900
+#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG	0x0A00
+#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG		0x0B00
+#define IXGBE_RXMTRL_V2_SIGNALING_MSG		0x0C00
+#define IXGBE_RXMTRL_V2_MGMT_MSG		0x0D00
+
 #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
 #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
 #define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
@@ -1852,7 +1934,7 @@
 #define IXGBE_MFLCN_DPF         0x00000002 /* Discard Pause Frame */
 #define IXGBE_MFLCN_RPFCE       0x00000004 /* Receive Priority FC Enable */
 #define IXGBE_MFLCN_RFCE        0x00000008 /* Receive FC Enable */
-#define IXGBE_MFLCN_RPFCE_MASK	0x00000FF0 /* Receive FC Mask */
+#define IXGBE_MFLCN_RPFCE_MASK	0x00000FF4 /* Receive FC Mask */
 
 #define IXGBE_MFLCN_RPFCE_SHIFT		 4
 
@@ -1968,6 +2050,7 @@
 #define IXGBE_RXDADV_STAT_FCSTAT_NODDP  0x00000010 /* 01: Ctxt w/o DDP */
 #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
 #define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_TS		0x00010000 /* IEEE 1588 Time Stamp */
 
 /* PSRTYPE bit definitions */
 #define IXGBE_PSRTYPE_TCPHDR    0x00000010
@@ -2245,6 +2328,7 @@
 /* Adv Transmit Descriptor Config Masks */
 #define IXGBE_ADVTXD_DTALEN_MASK      0x0000FFFF /* Data buf length(bytes) */
 #define IXGBE_ADVTXD_MAC_LINKSEC      0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_MAC_TSTAMP	      0x00080000 /* IEEE 1588 Time Stamp */
 #define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK   0x000003FF /* IPSec SA index */
 #define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK    0x000001FF /* IPSec ESP length */
 #define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */
@@ -2533,9 +2617,6 @@
 	ixgbe_fc_rx_pause,
 	ixgbe_fc_tx_pause,
 	ixgbe_fc_full,
-#ifdef CONFIG_DCB
-	ixgbe_fc_pfc,
-#endif
 	ixgbe_fc_default
 };
 
@@ -2768,10 +2849,12 @@
 	void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
 
 	/* Flow Control */
-	s32 (*fc_enable)(struct ixgbe_hw *, s32);
+	s32 (*fc_enable)(struct ixgbe_hw *);
 
 	/* Manageability interface */
 	s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+	s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
+	s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
 };
 
 struct ixgbe_phy_operations {
@@ -2813,6 +2896,7 @@
 	u16                             wwnn_prefix;
 	/* prefix for World Wide Port Name (WWPN) */
 	u16                             wwpn_prefix;
+	u16				max_msix_vectors;
 #define IXGBE_MAX_MTA			128
 	u32				mta_shadow[IXGBE_MAX_MTA];
 	s32                             mc_filter_type;
@@ -2823,12 +2907,12 @@
 	u32				rx_pb_size;
 	u32                             max_tx_queues;
 	u32                             max_rx_queues;
-	u32                             max_msix_vectors;
 	u32                             orig_autoc;
 	u32                             orig_autoc2;
 	bool                            orig_link_settings_stored;
 	bool                            autotry_restart;
 	u8                              flags;
+	struct ixgbe_thermal_sensor_data  thermal_sensor_data;
 };
 
 struct ixgbe_phy_info {
@@ -2938,7 +3022,6 @@
 #define IXGBE_ERR_OVERTEMP                      -26
 #define IXGBE_ERR_FC_NOT_NEGOTIATED             -27
 #define IXGBE_ERR_FC_NOT_SUPPORTED              -28
-#define IXGBE_ERR_FLOW_CONTROL                  -29
 #define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30
 #define IXGBE_ERR_PBA_SECTION                   -31
 #define IXGBE_ERR_INVALID_ARGUMENT              -32
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 97a9914..f90ec07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -849,6 +849,8 @@
 	.release_swfw_sync      = &ixgbe_release_swfw_sync_X540,
 	.disable_rx_buff	= &ixgbe_disable_rx_buff_generic,
 	.enable_rx_buff		= &ixgbe_enable_rx_buff_generic,
+	.get_thermal_sensor_data = NULL,
+	.init_thermal_sensor_thresh = NULL,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 947b5c8..e09a6cc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -40,6 +40,7 @@
 typedef u32 ixgbe_link_speed;
 #define IXGBE_LINK_SPEED_1GB_FULL       0x0020
 #define IXGBE_LINK_SPEED_10GB_FULL      0x0080
+#define IXGBE_LINK_SPEED_100_FULL	0x0008
 
 #define IXGBE_CTRL_RST              0x04000000 /* Reset (SW) */
 #define IXGBE_RXDCTL_ENABLE         0x02000000 /* Enable specific Rx Queue */
@@ -48,6 +49,7 @@
 #define IXGBE_LINKS_SPEED_82599     0x30000000
 #define IXGBE_LINKS_SPEED_10G_82599 0x30000000
 #define IXGBE_LINKS_SPEED_1G_82599  0x20000000
+#define IXGBE_LINKS_SPEED_100_82599 0x10000000
 
 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */
 #define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE  8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 2bfe0d1..e8dddf5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -107,10 +107,20 @@
 	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
 
 	if (link_up) {
-		ethtool_cmd_speed_set(
-			ecmd,
-			(link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
-			SPEED_10000 : SPEED_1000);
+		__u32 speed = SPEED_10000;
+		switch (link_speed) {
+		case IXGBE_LINK_SPEED_10GB_FULL:
+			speed = SPEED_10000;
+			break;
+		case IXGBE_LINK_SPEED_1GB_FULL:
+			speed = SPEED_1000;
+			break;
+		case IXGBE_LINK_SPEED_100_FULL:
+			speed = SPEED_100;
+			break;
+		}
+
+		ethtool_cmd_speed_set(ecmd, speed);
 		ecmd->duplex = DUPLEX_FULL;
 	} else {
 		ethtool_cmd_speed_set(ecmd, -1);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index dfed420..0a1b992 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -287,7 +287,7 @@
 extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
 
-extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
 extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
 extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
 extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 307611a..f69ec42 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -57,7 +57,7 @@
 static const char ixgbevf_driver_string[] =
 	"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "2.2.0-k"
+#define DRV_VERSION "2.6.0-k"
 const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
 	"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -1608,13 +1608,14 @@
 	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
 }
 
-static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
 	int i, j = 0;
 	int num_rx_rings = adapter->num_rx_queues;
 	u32 txdctl, rxdctl;
+	u32 msg[2];
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		j = adapter->tx_ring[i].reg_idx;
@@ -1653,6 +1654,10 @@
 			hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
 	}
 
+	msg[0] = IXGBE_VF_SET_LPE;
+	msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	hw->mbx.ops.write_posted(hw, msg, 2);
+
 	clear_bit(__IXGBEVF_DOWN, &adapter->state);
 	ixgbevf_napi_enable_all(adapter);
 
@@ -1667,24 +1672,20 @@
 	adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
 	adapter->link_check_timeout = jiffies;
 	mod_timer(&adapter->watchdog_timer, jiffies);
-	return 0;
 }
 
-int ixgbevf_up(struct ixgbevf_adapter *adapter)
+void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
-	int err;
 	struct ixgbe_hw *hw = &adapter->hw;
 
 	ixgbevf_configure(adapter);
 
-	err = ixgbevf_up_complete(adapter);
+	ixgbevf_up_complete(adapter);
 
 	/* clear any pending interrupts, may auto mask */
 	IXGBE_READ_REG(hw, IXGBE_VTEICR);
 
 	ixgbevf_irq_enable(adapter, true, true);
-
-	return err;
 }
 
 /**
@@ -2673,9 +2674,7 @@
 	 */
 	ixgbevf_map_rings_to_vectors(adapter);
 
-	err = ixgbevf_up_complete(adapter);
-	if (err)
-		goto err_up;
+	ixgbevf_up_complete(adapter);
 
 	/* clear any pending interrupts, may auto mask */
 	IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -2689,7 +2688,6 @@
 
 err_req_irq:
 	ixgbevf_down(adapter);
-err_up:
 	ixgbevf_free_irq(adapter);
 err_setup_rx:
 	ixgbevf_free_all_rx_resources(adapter);
@@ -3196,9 +3194,11 @@
 	/* must set new MTU before calling down or up */
 	netdev->mtu = new_mtu;
 
-	msg[0] = IXGBE_VF_SET_LPE;
-	msg[1] = max_frame;
-	hw->mbx.ops.write_posted(hw, msg, 2);
+	if (!netif_running(netdev)) {
+		msg[0] = IXGBE_VF_SET_LPE;
+		msg[1] = max_frame;
+		hw->mbx.ops.write_posted(hw, msg, 2);
+	}
 
 	if (netif_running(netdev))
 		ixgbevf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 74be741..ec89b86 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -404,11 +404,17 @@
 	else
 		*link_up = false;
 
-	if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
-	    IXGBE_LINKS_SPEED_10G_82599)
+	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+	case IXGBE_LINKS_SPEED_10G_82599:
 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-	else
+		break;
+	case IXGBE_LINKS_SPEED_1G_82599:
 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
+		break;
+	case IXGBE_LINKS_SPEED_100_82599:
+		*speed = IXGBE_LINK_SPEED_100_FULL;
+		break;
+	}
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5e1ca0f..c8950da 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1665,6 +1665,7 @@
 	.get_strings		= mv643xx_eth_get_strings,
 	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
 	.get_sset_count		= mv643xx_eth_get_sset_count,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index efec6b6..1db023b 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1456,6 +1456,7 @@
 	.set_settings = pxa168_set_settings,
 	.get_drvinfo = pxa168_get_drvinfo,
 	.get_link = ethtool_op_get_link,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops pxa168_eth_netdev_ops = {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index c9b504e..cace36f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2494,8 +2494,13 @@
 		skb_copy_from_linear_data(re->skb, skb->data, length);
 		skb->ip_summed = re->skb->ip_summed;
 		skb->csum = re->skb->csum;
+		skb->rxhash = re->skb->rxhash;
+		skb->vlan_tci = re->skb->vlan_tci;
+
 		pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
 					       length, PCI_DMA_FROMDEVICE);
+		re->skb->vlan_tci = 0;
+		re->skb->rxhash = 0;
 		re->skb->ip_summed = CHECKSUM_NONE;
 		skb_put(skb, length);
 	}
@@ -2580,9 +2585,6 @@
 	struct sk_buff *skb = NULL;
 	u16 count = (status & GMR_FS_LEN) >> 16;
 
-	if (status & GMR_FS_VLAN)
-		count -= VLAN_HLEN;	/* Account for vlan tag */
-
 	netif_printk(sky2, rx_status, KERN_DEBUG, dev,
 		     "rx slot %u status 0x%x len %d\n",
 		     sky2->rx_next, status, length);
@@ -2590,6 +2592,9 @@
 	sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
 	prefetch(sky2->rx_ring + sky2->rx_next);
 
+	if (vlan_tx_tag_present(re->skb))
+		count -= VLAN_HLEN;	/* Account for vlan tag */
+
 	/* This chip has hardware problems that generates bogus status.
 	 * So do only marginal checking and expect higher level protocols
 	 * to handle crap frames.
@@ -2647,11 +2652,8 @@
 }
 
 static inline void sky2_skb_rx(const struct sky2_port *sky2,
-			       u32 status, struct sk_buff *skb)
+			       struct sk_buff *skb)
 {
-	if (status & GMR_FS_VLAN)
-		__vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
-
 	if (skb->ip_summed == CHECKSUM_NONE)
 		netif_receive_skb(skb);
 	else
@@ -2705,6 +2707,14 @@
 	}
 }
 
+static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
+{
+	struct sk_buff *skb;
+
+	skb = sky2->rx_ring[sky2->rx_next].skb;
+	__vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
+}
+
 static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
 {
 	struct sk_buff *skb;
@@ -2763,8 +2773,7 @@
 			}
 
 			skb->protocol = eth_type_trans(skb, dev);
-
-			sky2_skb_rx(sky2, status, skb);
+			sky2_skb_rx(sky2, skb);
 
 			/* Stop after net poll weight */
 			if (++work_done >= to_do)
@@ -2772,11 +2781,11 @@
 			break;
 
 		case OP_RXVLAN:
-			sky2->rx_tag = length;
+			sky2_rx_tag(sky2, length);
 			break;
 
 		case OP_RXCHKSVLAN:
-			sky2->rx_tag = length;
+			sky2_rx_tag(sky2, length);
 			/* fall through */
 		case OP_RXCHKS:
 			if (likely(dev->features & NETIF_F_RXCSUM))
@@ -4816,14 +4825,14 @@
 
 	init_waitqueue_head(&hw->msi_wait);
 
-	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
-
 	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
 	if (err) {
 		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
 		return err;
 	}
 
+	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
+
 	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
 	sky2_read8(hw, B0_CTST);
 
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ff6f58b..3c896ce 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2241,7 +2241,6 @@
 	u16		     rx_pending;
 	u16		     rx_data_size;
 	u16		     rx_nfrags;
-	u16		     rx_tag;
 
 	struct {
 		unsigned long last;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 1bb9353..5f027f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -11,6 +11,18 @@
 	  This driver supports Mellanox Technologies ConnectX Ethernet
 	  devices.
 
+config MLX4_EN_DCB
+	bool "Data Center Bridging (DCB) Support"
+	default y
+	depends on MLX4_EN && DCB
+	---help---
+	  Say Y here if you want to use Data Center Bridging (DCB) in the
+	  driver.
+	  If set to N, will not be able to configure QoS and ratelimit attributes.
+	  This flag is depended on the kernel's DCB support.
+
+	  If unsure, set to Y
+
 config MLX4_CORE
 	tristate
 	depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 4a40ab9..293127d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -7,3 +7,4 @@
 
 mlx4_en-y := 	en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
 		en_resources.o en_netdev.o en_selftest.o
+mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 8be20e7..06fef5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -124,9 +124,6 @@
 
 	spin_lock(&bitmap->lock);
 	bitmap_clear(bitmap->table, obj, cnt);
-	bitmap->last = min(bitmap->last, obj);
-	bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
-			& bitmap->mask;
 	bitmap->avail += cnt;
 	spin_unlock(&bitmap->lock);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 773c70e..1bcead1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1254,7 +1254,6 @@
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
 	u32 reply;
-	u32 slave_status = 0;
 	u8 is_going_down = 0;
 	int i;
 
@@ -1274,10 +1273,8 @@
 		}
 		/*check if we are in the middle of FLR process,
 		if so return "retry" status to the slave*/
-		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
-			slave_status = MLX4_DELAY_RESET_SLAVE;
+		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
 			goto inform_slave_state;
-		}
 
 		/* write the version in the event field */
 		reply |= mlx4_comm_get_version();
@@ -1557,7 +1554,7 @@
 	return 0;
 
 err_resource:
-	mlx4_free_resource_tracker(dev);
+	mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
 err_thread:
 	flush_workqueue(priv->mfunc.master.comm_wq);
 	destroy_workqueue(priv->mfunc.master.comm_wq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 00b8127..908a460 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -124,11 +124,7 @@
 	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
 	cq->mcq.event = mlx4_en_cq_event;
 
-	if (cq->is_tx) {
-		init_timer(&cq->timer);
-		cq->timer.function = mlx4_en_poll_tx_cq;
-		cq->timer.data = (unsigned long) cq;
-	} else {
+	if (!cq->is_tx) {
 		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
 		napi_enable(&cq->napi);
 	}
@@ -151,16 +147,12 @@
 
 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
 {
-	struct mlx4_en_dev *mdev = priv->mdev;
-
-	if (cq->is_tx)
-		del_timer(&cq->timer);
-	else {
+	if (!cq->is_tx) {
 		napi_disable(&cq->napi);
 		netif_napi_del(&cq->napi);
 	}
 
-	mlx4_cq_free(mdev->dev, &cq->mcq);
+	mlx4_cq_free(priv->mdev->dev, &cq->mcq);
 }
 
 /* Set rx cq moderation parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
new file mode 100644
index 0000000..5d367958
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/dcbnl.h>
+#include <linux/math64.h>
+
+#include "mlx4_en.h"
+
+static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
+				   struct ieee_ets *ets)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct ieee_ets *my_ets = &priv->ets;
+
+	/* No IEEE PFC settings available */
+	if (!my_ets)
+		return -EINVAL;
+
+	ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+	ets->cbs = my_ets->cbs;
+	memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+	memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+	memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+
+	return 0;
+}
+
+static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
+{
+	int i;
+	int total_ets_bw = 0;
+	int has_ets_tc = 0;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
+			en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
+					i, ets->prio_tc[i]);
+			return -EINVAL;
+		}
+
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			has_ets_tc = 1;
+			total_ets_bw += ets->tc_tx_bw[i];
+			break;
+		default:
+			en_err(priv, "TC[%d]: Not supported TSA: %d\n",
+					i, ets->tc_tsa[i]);
+			return -ENOTSUPP;
+		}
+	}
+
+	if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
+		en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
+				total_ets_bw);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
+		struct ieee_ets *ets, u16 *ratelimit)
+{
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int num_strict = 0;
+	int i;
+	__u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
+	__u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
+
+	ets = ets ?: &priv->ets;
+	ratelimit = ratelimit ?: priv->maxrate;
+
+	/* higher TC means higher priority => lower pg */
+	for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			pg[i] = num_strict++;
+			tc_tx_bw[i] = MLX4_EN_BW_MAX;
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			pg[i] = MLX4_EN_TC_ETS;
+			tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
+			break;
+		}
+	}
+
+	return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
+			ratelimit);
+}
+
+static int
+mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx4_en_ets_validate(priv, ets);
+	if (err)
+		return err;
+
+	err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
+	if (err)
+		return err;
+
+	err = mlx4_en_config_port_scheduler(priv, ets, NULL);
+	if (err)
+		return err;
+
+	memcpy(&priv->ets, ets, sizeof(priv->ets));
+
+	return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
+		struct ieee_pfc *pfc)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+
+	pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+	pfc->pfc_en = priv->prof->tx_ppp;
+
+	return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
+		struct ieee_pfc *pfc)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
+	int err;
+
+	en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
+			pfc->pfc_cap,
+			pfc->pfc_en,
+			pfc->mbc,
+			pfc->delay);
+
+	priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
+	priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
+
+	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+				    priv->rx_skb_size + ETH_FCS_LEN,
+				    priv->prof->tx_pause,
+				    priv->prof->tx_ppp,
+				    priv->prof->rx_pause,
+				    priv->prof->rx_ppp);
+	if (err)
+		en_err(priv, "Failed setting pause params\n");
+
+	return err;
+}
+
+static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
+{
+	return DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+	    (mode & DCB_CAP_DCBX_VER_CEE) ||
+	    !(mode & DCB_CAP_DCBX_VER_IEEE) ||
+	    !(mode & DCB_CAP_DCBX_HOST))
+		return 1;
+
+	return 0;
+}
+
+#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
+static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
+				   struct ieee_maxrate *maxrate)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int i;
+
+	if (!priv->maxrate)
+		return -EINVAL;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+		maxrate->tc_maxrate[i] =
+			priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
+
+	return 0;
+}
+
+static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
+		struct ieee_maxrate *maxrate)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	u16 tmp[IEEE_8021QAZ_MAX_TCS];
+	int i, err;
+
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		/* Convert from Kbps into HW units, rounding result up.
+		 * Setting to 0, means unlimited BW.
+		 */
+		tmp[i] = div_u64(maxrate->tc_maxrate[i] +
+				 MLX4_RATELIMIT_UNITS_IN_KB - 1,
+				 MLX4_RATELIMIT_UNITS_IN_KB);
+	}
+
+	err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
+	if (err)
+		return err;
+
+	memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
+
+	return 0;
+}
+
+const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
+	.ieee_getets	= mlx4_en_dcbnl_ieee_getets,
+	.ieee_setets	= mlx4_en_dcbnl_ieee_setets,
+	.ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
+	.ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
+	.ieee_getpfc	= mlx4_en_dcbnl_ieee_getpfc,
+	.ieee_setpfc	= mlx4_en_dcbnl_ieee_setpfc,
+
+	.getdcbx	= mlx4_en_dcbnl_getdcbx,
+	.setdcbx	= mlx4_en_dcbnl_setdcbx,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 70346fd..72901ce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -83,7 +83,7 @@
 #define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
 
 static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
-	"Interupt Test",
+	"Interrupt Test",
 	"Link Test",
 	"Speed Test",
 	"Register Test",
@@ -359,8 +359,8 @@
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 
-	coal->tx_coalesce_usecs = 0;
-	coal->tx_max_coalesced_frames = 0;
+	coal->tx_coalesce_usecs = priv->tx_usecs;
+	coal->tx_max_coalesced_frames = priv->tx_frames;
 	coal->rx_coalesce_usecs = priv->rx_usecs;
 	coal->rx_max_coalesced_frames = priv->rx_frames;
 
@@ -388,6 +388,21 @@
 				MLX4_EN_RX_COAL_TIME :
 				coal->rx_coalesce_usecs;
 
+	/* Setting TX coalescing parameters */
+	if (coal->tx_coalesce_usecs != priv->tx_usecs ||
+	    coal->tx_max_coalesced_frames != priv->tx_frames) {
+		priv->tx_usecs = coal->tx_coalesce_usecs;
+		priv->tx_frames = coal->tx_max_coalesced_frames;
+		for (i = 0; i < priv->tx_ring_num; i++) {
+			priv->tx_cq[i].moder_cnt = priv->tx_frames;
+			priv->tx_cq[i].moder_time = priv->tx_usecs;
+			if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
+				en_warn(priv, "Failed changing moderation "
+					      "for TX cq %d\n", i);
+			}
+		}
+	}
+
 	/* Set adaptive coalescing params */
 	priv->pkt_rate_low = coal->pkt_rate_low;
 	priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2097a7d..988b242 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -101,6 +101,8 @@
 	int i;
 
 	params->udp_rss = udp_rss;
+	params->num_tx_rings_p_up = min_t(int, num_online_cpus(),
+			MLX4_EN_MAX_TX_RING_P_UP);
 	if (params->udp_rss && !(mdev->dev->caps.flags
 					& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
 		mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
@@ -113,8 +115,8 @@
 		params->prof[i].tx_ppp = pfctx;
 		params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
 		params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
-		params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
-			(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+		params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
+			MLX4_EN_NUM_UP;
 		params->prof[i].rss_rings = 0;
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 31b455a..926d8aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,6 +45,27 @@
 #include "mlx4_en.h"
 #include "en_port.h"
 
+static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
+{
+	struct mlx4_en_priv *priv = netdev_priv(dev);
+	int i;
+	unsigned int q, offset = 0;
+
+	if (up && up != MLX4_EN_NUM_UP)
+		return -EINVAL;
+
+	netdev_set_num_tc(dev, up);
+
+	/* Partition Tx queues evenly amongst UP's */
+	q = priv->tx_ring_num / up;
+	for (i = 0; i < up; i++) {
+		netdev_set_tc_queue(dev, i, q, offset);
+		offset += q;
+	}
+
+	return 0;
+}
+
 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -421,6 +442,8 @@
 	 */
 	priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
 	priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
+	priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
+	priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
 	en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
 			   "rx_frames:%d rx_usecs:%d\n",
 		 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
@@ -437,8 +460,8 @@
 
 	for (i = 0; i < priv->tx_ring_num; i++) {
 		cq = &priv->tx_cq[i];
-		cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
-		cq->moder_time = MLX4_EN_TX_COAL_TIME;
+		cq->moder_cnt = priv->tx_frames;
+		cq->moder_time = priv->tx_usecs;
 	}
 
 	/* Reset auto-moderation params */
@@ -650,12 +673,18 @@
 
 		/* Configure ring */
 		tx_ring = &priv->tx_ring[i];
-		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
+		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
+			i / priv->mdev->profile.num_tx_rings_p_up);
 		if (err) {
 			en_err(priv, "Failed allocating Tx ring\n");
 			mlx4_en_deactivate_cq(priv, cq);
 			goto tx_err;
 		}
+		tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+
+		/* Arm CQ for TX completions */
+		mlx4_en_arm_cq(priv, cq);
+
 		/* Set initial ownership of all Tx TXBBs to SW (1) */
 		for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
 			*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
@@ -797,12 +826,15 @@
 						 watchdog_task);
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct net_device *dev = priv->dev;
+	int i;
 
 	en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
 
 	mutex_lock(&mdev->state_lock);
 	if (priv->port_up) {
 		mlx4_en_stop_port(dev);
+		for (i = 0; i < priv->tx_ring_num; i++)
+			netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
 		if (mlx4_en_start_port(dev))
 			en_err(priv, "Failed restarting port %d\n", priv->port);
 	}
@@ -966,6 +998,10 @@
 	mutex_unlock(&mdev->state_lock);
 
 	mlx4_en_free_resources(priv);
+
+	kfree(priv->tx_ring);
+	kfree(priv->tx_cq);
+
 	free_netdev(dev);
 }
 
@@ -1036,6 +1072,7 @@
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
 	.ndo_set_features	= mlx4_en_set_features,
+	.ndo_setup_tc		= mlx4_en_setup_tc,
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1070,6 +1107,18 @@
 	priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
 			MLX4_WQE_CTRL_SOLICITED);
 	priv->tx_ring_num = prof->tx_ring_num;
+	priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) *
+			priv->tx_ring_num, GFP_KERNEL);
+	if (!priv->tx_ring) {
+		err = -ENOMEM;
+		goto out;
+	}
+	priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num,
+			GFP_KERNEL);
+	if (!priv->tx_cq) {
+		err = -ENOMEM;
+		goto out;
+	}
 	priv->rx_ring_num = prof->rx_ring_num;
 	priv->mac_index = -1;
 	priv->msg_enable = MLX4_EN_MSG_LEVEL;
@@ -1079,6 +1128,10 @@
 	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
 	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
 	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+#ifdef CONFIG_MLX4_EN_DCB
+	if (!mlx4_is_slave(priv->mdev->dev))
+		dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
+#endif
 
 	/* Query for default mac and max mtu */
 	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 6934fd7..745090b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,6 +39,8 @@
 #define SET_PORT_PROMISC_SHIFT	31
 #define SET_PORT_MC_PROMISC_SHIFT	30
 
+#define MLX4_EN_NUM_TC		8
+
 #define VLAN_FLTR_SIZE	128
 struct mlx4_set_vlan_fltr_mbox {
 	__be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index bcbc54c..10c24c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -39,7 +39,7 @@
 
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
 			     int is_tx, int rss, int qpn, int cqn,
-			     struct mlx4_qp_context *context)
+			     int user_prio, struct mlx4_qp_context *context)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 
@@ -57,6 +57,10 @@
 	context->local_qpn = cpu_to_be32(qpn);
 	context->pri_path.ackto = 1 & 0x07;
 	context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
+	if (user_prio >= 0) {
+		context->pri_path.sched_queue |= user_prio << 3;
+		context->pri_path.feup = 1 << 6;
+	}
 	context->pri_path.counter_index = 0xff;
 	context->cqn_send = cpu_to_be32(cqn);
 	context->cqn_recv = cpu_to_be32(cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9adbd53..d49a7ac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -823,7 +823,7 @@
 
 	memset(context, 0, sizeof *context);
 	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
-				qpn, ring->cqn, context);
+				qpn, ring->cqn, -1, context);
 	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
 
 	/* Cancel FCS removal if FW allows */
@@ -890,7 +890,7 @@
 	}
 	rss_map->indir_qp.event = mlx4_en_sqp_event;
 	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
-				priv->rx_ring[0].cqn, &context);
+				priv->rx_ring[0].cqn, -1, &context);
 
 	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
 		rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1796824..019d856 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -67,8 +67,6 @@
 
 	inline_thold = min(inline_thold, MAX_INLINE);
 
-	spin_lock_init(&ring->comp_lock);
-
 	tmp = size * sizeof(struct mlx4_en_tx_info);
 	ring->tx_info = vmalloc(tmp);
 	if (!ring->tx_info)
@@ -156,7 +154,7 @@
 
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring *ring,
-			     int cq)
+			     int cq, int user_prio)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err;
@@ -174,7 +172,7 @@
 	ring->doorbell_qpn = ring->qp.qpn << 8;
 
 	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
-				ring->cqn, &ring->context);
+				ring->cqn, user_prio, &ring->context);
 	if (ring->bf_enabled)
 		ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
 
@@ -317,6 +315,8 @@
 	int size = cq->size;
 	u32 size_mask = ring->size_mask;
 	struct mlx4_cqe *buf = cq->buf;
+	u32 packets = 0;
+	u32 bytes = 0;
 
 	if (!priv->port_up)
 		return;
@@ -345,6 +345,8 @@
 					priv, ring, ring_index,
 					!!((ring->cons + txbbs_skipped) &
 							ring->size));
+			packets++;
+			bytes += ring->tx_info[ring_index].nr_bytes;
 		} while (ring_index != new_index);
 
 		++cons_index;
@@ -361,13 +363,14 @@
 	mlx4_cq_set_ci(mcq);
 	wmb();
 	ring->cons += txbbs_skipped;
+	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
 
 	/* Wakeup Tx queue if this ring stopped it */
 	if (unlikely(ring->blocked)) {
 		if ((u32) (ring->prod - ring->cons) <=
 		     ring->size - HEADROOM - MAX_DESC_TXBBS) {
 			ring->blocked = 0;
-			netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
+			netif_tx_wake_queue(ring->tx_queue);
 			priv->port_stats.wake_queue++;
 		}
 	}
@@ -377,41 +380,12 @@
 {
 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
 
-	if (!spin_trylock(&ring->comp_lock))
-		return;
 	mlx4_en_process_tx_cq(cq->dev, cq);
-	mod_timer(&cq->timer, jiffies + 1);
-	spin_unlock(&ring->comp_lock);
+	mlx4_en_arm_cq(priv, cq);
 }
 
 
-void mlx4_en_poll_tx_cq(unsigned long data)
-{
-	struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
-	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-	u32 inflight;
-
-	INC_PERF_COUNTER(priv->pstats.tx_poll);
-
-	if (!spin_trylock_irq(&ring->comp_lock)) {
-		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-		return;
-	}
-	mlx4_en_process_tx_cq(cq->dev, cq);
-	inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
-
-	/* If there are still packets in flight and the timer has not already
-	 * been scheduled by the Tx routine then schedule it here to guarantee
-	 * completion processing of these packets */
-	if (inflight && priv->port_up)
-		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-
-	spin_unlock_irq(&ring->comp_lock);
-}
-
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
 						      struct mlx4_en_tx_ring *ring,
 						      u32 index,
@@ -440,25 +414,6 @@
 	return ring->buf + index * TXBB_SIZE;
 }
 
-static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
-{
-	struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
-	struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
-	unsigned long flags;
-
-	/* If we don't have a pending timer, set one up to catch our recent
-	   post in case the interface becomes idle */
-	if (!timer_pending(&cq->timer))
-		mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-
-	/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
-	if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-		if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
-			mlx4_en_process_tx_cq(priv->dev, cq);
-			spin_unlock_irqrestore(&ring->comp_lock, flags);
-		}
-}
-
 static int is_inline(struct sk_buff *skb, void **pfrag)
 {
 	void *ptr;
@@ -571,17 +526,16 @@
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
-	u16 vlan_tag = 0;
+	u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up;
+	u8 up = 0;
 
-	/* If we support per priority flow control and the packet contains
-	 * a vlan tag, send the packet to the TX ring assigned to that priority
-	 */
-	if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
-		vlan_tag = vlan_tx_tag_get(skb);
-		return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
-	}
+	if (dev->num_tc)
+		return skb_tx_hash(dev, skb);
 
-	return skb_tx_hash(dev, skb);
+	if (vlan_tx_tag_present(skb))
+		up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
+
+	return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up;
 }
 
 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
@@ -594,7 +548,6 @@
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_tx_ring *ring;
-	struct mlx4_en_cq *cq;
 	struct mlx4_en_tx_desc *tx_desc;
 	struct mlx4_wqe_data_seg *data;
 	struct skb_frag_struct *frag;
@@ -638,13 +591,10 @@
 	if (unlikely(((int)(ring->prod - ring->cons)) >
 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
 		/* every full Tx ring stops queue */
-		netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
+		netif_tx_stop_queue(ring->tx_queue);
 		ring->blocked = 1;
 		priv->port_stats.queue_stopped++;
 
-		/* Use interrupts to find out when queue opened */
-		cq = &priv->tx_cq[tx_ind];
-		mlx4_en_arm_cq(priv, cq);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -707,7 +657,7 @@
 		priv->port_stats.tso_packets++;
 		i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
 			!!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
-		ring->bytes += skb->len + (i - 1) * lso_header_size;
+		tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
 		ring->packets += i;
 	} else {
 		/* Normal (Non LSO) packet */
@@ -715,10 +665,12 @@
 			((ring->prod & ring->size) ?
 			 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
 		data = &tx_desc->data;
-		ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+		tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 		ring->packets++;
 
 	}
+	ring->bytes += tx_info->nr_bytes;
+	netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
 	AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
 
 
@@ -792,9 +744,6 @@
 		iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
 	}
 
-	/* Poll CQ here */
-	mlx4_en_xmit_poll(priv, tx_ind);
-
 	return NETDEV_TX_OK;
 
 tx_drop:
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 2a02ba5..68f5cd6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -118,6 +118,20 @@
 			mlx4_dbg(dev, "    %s\n", fname[i]);
 }
 
+static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
+{
+	static const char * const fname[] = {
+		[0] = "RSS support",
+		[1] = "RSS Toeplitz Hash Function support",
+		[2] = "RSS XOR Hash Function support"
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fname); ++i)
+		if (fname[i] && (flags & (1LL << i)))
+			mlx4_dbg(dev, "    %s\n", fname[i]);
+}
+
 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
 {
 	struct mlx4_cmd_mailbox *mailbox;
@@ -346,6 +360,7 @@
 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET		0x29
 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET		0x2b
 #define QUERY_DEV_CAP_MAX_GSO_OFFSET		0x2d
+#define QUERY_DEV_CAP_RSS_OFFSET		0x2e
 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET		0x2f
 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET		0x33
 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET		0x35
@@ -390,6 +405,7 @@
 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
 
+	dev_cap->flags2 = 0;
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
@@ -439,6 +455,17 @@
 	else
 		dev_cap->max_gso_sz = 1 << field;
 
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
+	if (field & 0x20)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
+	if (field & 0x10)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
+	field &= 0xf;
+	if (field) {
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
+		dev_cap->max_rss_tbl_sz = 1 << field;
+	} else
+		dev_cap->max_rss_tbl_sz = 0;
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
 	dev_cap->max_rdma_global = 1 << (field & 0x3f);
 	MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
@@ -632,8 +659,10 @@
 		 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
 	mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
 	mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
+	mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
 
 	dump_dev_cap_flags(dev, dev_cap->flags);
+	dump_dev_cap_flags2(dev, dev_cap->flags2);
 
 out:
 	mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1164,9 +1193,8 @@
 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 		if (err)
 			return err;
-		priv->mfunc.master.slave_state[slave].init_port_mask |=
-			(1 << port);
 	}
+	priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
 	++priv->mfunc.master.init_port_ref[port];
 	return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index e1a5fa5..64c0399 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -79,6 +79,7 @@
 	u64 trans_code[MLX4_MAX_PORTS + 1];
 	u16 stat_rate_support;
 	u64 flags;
+	u64 flags2;
 	int reserved_uars;
 	int uar_size;
 	int min_page_sz;
@@ -110,6 +111,7 @@
 	u32 reserved_lkey;
 	u64 max_icm_sz;
 	int max_gso_sz;
+	int max_rss_tbl_sz;
 	u8  supported_port_types[MLX4_MAX_PORTS + 1];
 	u8  suggested_type[MLX4_MAX_PORTS + 1];
 	u8  default_sense[MLX4_MAX_PORTS + 1];
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 8bb05b4..2e024a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -272,10 +272,12 @@
 	dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
 	dev->caps.page_size_cap	     = ~(u32) (dev_cap->min_page_sz - 1);
 	dev->caps.flags		     = dev_cap->flags;
+	dev->caps.flags2	     = dev_cap->flags2;
 	dev->caps.bmme_flags	     = dev_cap->bmme_flags;
 	dev->caps.reserved_lkey	     = dev_cap->reserved_lkey;
 	dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
+	dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
 
 	/* Sense port always allowed on supported devices for ConnectX1 and 2 */
 	if (dev->pdev->device != 0x1003)
@@ -1306,7 +1308,7 @@
 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
 }
 
-int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
+int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
@@ -1319,13 +1321,44 @@
 
 	return 0;
 }
+
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
+{
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
+				   RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (!err)
+			*idx = get_param_l(&out_param);
+
+		return err;
+	}
+	return __mlx4_counter_alloc(dev, idx);
+}
 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
 
-void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
+void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
 {
 	mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
 	return;
 }
+
+void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
+{
+	u64 in_param;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, idx);
+		mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
+			 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+			 MLX4_CMD_WRAPPED);
+		return;
+	}
+	__mlx4_counter_free(dev, idx);
+}
 EXPORT_SYMBOL_GPL(mlx4_counter_free);
 
 static int mlx4_setup_hca(struct mlx4_dev *dev)
@@ -1865,7 +1898,6 @@
 				mlx4_err(dev, "Failed to enable sriov,"
 					 "continuing without sriov enabled"
 					 " (err = %d).\n", err);
-				num_vfs = 0;
 				err = 0;
 			} else {
 				mlx4_warn(dev, "Running in master mode\n");
@@ -2022,7 +2054,7 @@
 	mlx4_cmd_cleanup(dev);
 
 err_sriov:
-	if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+	if (dev->flags & MLX4_FLAG_SRIOV)
 		pci_disable_sriov(pdev);
 
 err_rel_own:
@@ -2070,6 +2102,10 @@
 			mlx4_CLOSE_PORT(dev, p);
 		}
 
+		if (mlx4_is_master(dev))
+			mlx4_free_resource_tracker(dev,
+						   RES_TR_FREE_SLAVES_ONLY);
+
 		mlx4_cleanup_counters_table(dev);
 		mlx4_cleanup_mcg_table(dev);
 		mlx4_cleanup_qp_table(dev);
@@ -2082,7 +2118,8 @@
 		mlx4_cleanup_pd_table(dev);
 
 		if (mlx4_is_master(dev))
-			mlx4_free_resource_tracker(dev);
+			mlx4_free_resource_tracker(dev,
+						   RES_TR_FREE_STRUCTS_ONLY);
 
 		iounmap(priv->kar);
 		mlx4_uar_free(dev, &priv->driver_uar);
@@ -2099,7 +2136,7 @@
 
 		if (dev->flags & MLX4_FLAG_MSI_X)
 			pci_disable_msix(pdev);
-		if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+		if (dev->flags & MLX4_FLAG_SRIOV) {
 			mlx4_warn(dev, "Disabling sriov\n");
 			pci_disable_sriov(pdev);
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 4799e82..f4a8f98 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -357,7 +357,6 @@
 	u32 prot;
 	int i;
 	bool found;
-	int last_index;
 	int err;
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
@@ -419,7 +418,6 @@
 			if (err)
 				goto out_mailbox;
 		}
-		last_index = entry->index;
 	}
 
 	/* add the new qpn to list of promisc qps */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 2a0ff2c..86b6e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -53,6 +53,26 @@
 #define DRV_VERSION	"1.1"
 #define DRV_RELDATE	"Dec, 2011"
 
+#define MLX4_NUM_UP		8
+#define MLX4_NUM_TC		8
+#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
+#define MLX4_RATELIMIT_DEFAULT 0xffff
+
+struct mlx4_set_port_prio2tc_context {
+	u8 prio2tc[4];
+};
+
+struct mlx4_port_scheduler_tc_cfg_be {
+	__be16 pg;
+	__be16 bw_precentage;
+	__be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
+	__be16 max_bw_value;
+};
+
+struct mlx4_set_port_scheduler_context {
+	struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
+};
+
 enum {
 	MLX4_HCR_BASE		= 0x80680,
 	MLX4_HCR_SIZE		= 0x0001c,
@@ -126,6 +146,11 @@
 	RES_OP_MAP_ICM,
 };
 
+enum mlx4_res_tracker_free_type {
+	RES_TR_FREE_ALL,
+	RES_TR_FREE_SLAVES_ONLY,
+	RES_TR_FREE_STRUCTS_ONLY,
+};
 
 /*
  *Virtual HCR structures.
@@ -851,6 +876,10 @@
 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 		     int start_index, int npages, u64 *page_list);
+int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
+void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
+void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -1007,7 +1036,8 @@
 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
 int mlx4_init_resource_tracker(struct mlx4_dev *dev);
 
-void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+void mlx4_free_resource_tracker(struct mlx4_dev *dev,
+				enum mlx4_res_tracker_free_type type);
 
 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
 			  struct mlx4_vhcr *vhcr,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d69fee4..6ae3509 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -40,6 +40,9 @@
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
+#ifdef CONFIG_MLX4_EN_DCB
+#include <linux/dcbnl.h>
+#endif
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/qp.h>
@@ -108,9 +111,8 @@
 #define MLX4_EN_MIN_TX_SIZE	(4096 / TXBB_SIZE)
 
 #define MLX4_EN_SMALL_PKT_SIZE		64
-#define MLX4_EN_NUM_TX_RINGS		8
-#define MLX4_EN_NUM_PPP_RINGS		8
-#define MAX_TX_RINGS			(MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
+#define MLX4_EN_MAX_TX_RING_P_UP	32
+#define MLX4_EN_NUM_UP			8
 #define MLX4_EN_DEF_TX_RING_SIZE	512
 #define MLX4_EN_DEF_RX_RING_SIZE  	1024
 
@@ -118,7 +120,7 @@
 #define MLX4_EN_RX_COAL_TARGET	44
 #define MLX4_EN_RX_COAL_TIME	0x10
 
-#define MLX4_EN_TX_COAL_PKTS	5
+#define MLX4_EN_TX_COAL_PKTS	16
 #define MLX4_EN_TX_COAL_TIME	0x80
 
 #define MLX4_EN_RX_RATE_LOW		400000
@@ -196,6 +198,7 @@
 struct mlx4_en_tx_info {
 	struct sk_buff *skb;
 	u32 nr_txbb;
+	u32 nr_bytes;
 	u8 linear;
 	u8 data_offset;
 	u8 inl;
@@ -251,9 +254,9 @@
 	unsigned long bytes;
 	unsigned long packets;
 	unsigned long tx_csum;
-	spinlock_t comp_lock;
 	struct mlx4_bf bf;
 	bool bf_enabled;
+	struct netdev_queue *tx_queue;
 };
 
 struct mlx4_en_rx_desc {
@@ -304,8 +307,6 @@
 	spinlock_t              lock;
 	struct net_device      *dev;
 	struct napi_struct	napi;
-	/* Per-core Tx cq processing support */
-	struct timer_list timer;
 	int size;
 	int buf_size;
 	unsigned vector;
@@ -336,6 +337,7 @@
 	u32 active_ports;
 	u32 small_pkt_int;
 	u8 no_reset;
+	u8 num_tx_rings_p_up;
 	struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
 };
 
@@ -411,6 +413,15 @@
 
 };
 
+#ifdef CONFIG_MLX4_EN_DCB
+/* Minimal TC BW - setting to 0 will block traffic */
+#define MLX4_EN_BW_MIN 1
+#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
+
+#define MLX4_EN_TC_ETS 7
+
+#endif
+
 struct mlx4_en_priv {
 	struct mlx4_en_dev *mdev;
 	struct mlx4_en_port_profile *prof;
@@ -465,9 +476,9 @@
 	u16 num_frags;
 	u16 log_rx_info;
 
-	struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
+	struct mlx4_en_tx_ring *tx_ring;
 	struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
-	struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
+	struct mlx4_en_cq *tx_cq;
 	struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
 	struct work_struct mcast_task;
 	struct work_struct mac_task;
@@ -484,6 +495,11 @@
 	int vids[128];
 	bool wol;
 	struct device *ddev;
+
+#ifdef CONFIG_MLX4_EN_DCB
+	struct ieee_ets ets;
+	u16 maxrate[IEEE_8021QAZ_MAX_TCS];
+#endif
 };
 
 enum mlx4_en_wol {
@@ -512,7 +528,6 @@
 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
 
-void mlx4_en_poll_tx_cq(unsigned long data);
 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -522,7 +537,7 @@
 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_tx_ring *ring,
-			     int cq);
+			     int cq, int user_prio);
 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
 				struct mlx4_en_tx_ring *ring);
 
@@ -540,8 +555,8 @@
 			  int budget);
 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
-			     int is_tx, int rss, int qpn, int cqn,
-			     struct mlx4_qp_context *context);
+		int is_tx, int rss, int qpn, int cqn, int user_prio,
+		struct mlx4_qp_context *context);
 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
 int mlx4_en_map_buffer(struct mlx4_buf *buf);
 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
@@ -558,6 +573,10 @@
 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
 int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
 
+#ifdef CONFIG_MLX4_EN_DCB
+extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
+#endif
+
 #define MLX4_EN_NUM_SELF_TEST	5
 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
 u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index fe2ac84..af55b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -788,7 +788,6 @@
 		   int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
-	u64 mtt_offset;
 	int err = -ENOMEM;
 
 	if (max_maps > dev->caps.max_fmr_maps)
@@ -811,8 +810,6 @@
 	if (err)
 		return err;
 
-	mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
-
 	fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
 				    fmr->mr.mtt.offset,
 				    &fmr->dma_handle);
@@ -895,6 +892,6 @@
 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
 {
 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
-			MLX4_CMD_WRAPPED);
+			MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index db4746d..1ac8863 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -63,7 +63,7 @@
 }
 EXPORT_SYMBOL_GPL(mlx4_pd_free);
 
-int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
+int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 
@@ -73,11 +73,46 @@
 
 	return 0;
 }
+
+int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
+{
+	u64 out_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		err = mlx4_cmd_imm(dev, 0, &out_param,
+				   RES_XRCD, RES_OP_RESERVE,
+				   MLX4_CMD_ALLOC_RES,
+				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (err)
+			return err;
+
+		*xrcdn = get_param_l(&out_param);
+		return 0;
+	}
+	return __mlx4_xrcd_alloc(dev, xrcdn);
+}
 EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
 
+void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
+{
+	mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
+}
+
 void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
 {
-	mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
+	u64 in_param;
+	int err;
+
+	if (mlx4_is_mfunc(dev)) {
+		set_param_l(&in_param, xrcdn);
+		err = mlx4_cmd(dev, in_param, RES_XRCD,
+			       RES_OP_RESERVE, MLX4_CMD_FREE_RES,
+			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		if (err)
+			mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn);
+	} else
+		__mlx4_xrcd_free(dev, xrcdn);
 }
 EXPORT_SYMBOL_GPL(mlx4_xrcd_free);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 77535ff..1fe2c7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -338,13 +338,12 @@
 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
 	u64 out_param;
-	int err;
 
 	if (mlx4_is_mfunc(dev)) {
 		set_param_l(&out_param, port);
-		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
-				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
-				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+		(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+				    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+				    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 		return;
 	}
 	__mlx4_unregister_mac(dev, port, mac);
@@ -834,6 +833,68 @@
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
 
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_prio2tc_context *context;
+	int err;
+	u32 in_mod;
+	int i;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof *context);
+
+	for (i = 0; i < MLX4_NUM_UP; i += 2)
+		context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
+
+	in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
+
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+		u8 *pg, u16 *ratelimit)
+{
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_scheduler_context *context;
+	int err;
+	u32 in_mod;
+	int i;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+	memset(context, 0, sizeof *context);
+
+	for (i = 0; i < MLX4_NUM_TC; i++) {
+		struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
+		u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
+			MLX4_RATELIMIT_DEFAULT;
+
+		tc->pg = htons(pg[i]);
+		tc->bw_precentage = htons(tc_tx_bw[i]);
+
+		tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
+		tc->max_bw_value = htons(r);
+	}
+
+	in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
+
 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
 				struct mlx4_vhcr *vhcr,
 				struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 8752e6e..b45d0e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -89,17 +89,6 @@
 	RES_QP_HW
 };
 
-static inline const char *qp_states_str(enum res_qp_states state)
-{
-	switch (state) {
-	case RES_QP_BUSY: return "RES_QP_BUSY";
-	case RES_QP_RESERVED: return "RES_QP_RESERVED";
-	case RES_QP_MAPPED: return "RES_QP_MAPPED";
-	case RES_QP_HW: return "RES_QP_HW";
-	default: return "Unknown";
-	}
-}
-
 struct res_qp {
 	struct res_common	com;
 	struct res_mtt	       *mtt;
@@ -173,16 +162,6 @@
 	RES_SRQ_HW,
 };
 
-static inline const char *srq_states_str(enum res_srq_states state)
-{
-	switch (state) {
-	case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
-	case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
-	case RES_SRQ_HW: return "RES_SRQ_HW";
-	default: return "Unknown";
-	}
-}
-
 struct res_srq {
 	struct res_common	com;
 	struct res_mtt	       *mtt;
@@ -195,20 +174,21 @@
 	RES_COUNTER_ALLOCATED,
 };
 
-static inline const char *counter_states_str(enum res_counter_states state)
-{
-	switch (state) {
-	case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
-	case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
-	default: return "Unknown";
-	}
-}
-
 struct res_counter {
 	struct res_common	com;
 	int			port;
 };
 
+enum res_xrcdn_states {
+	RES_XRCD_BUSY = RES_ANY_BUSY,
+	RES_XRCD_ALLOCATED,
+};
+
+struct res_xrcdn {
+	struct res_common	com;
+	int			port;
+};
+
 /* For Debug uses */
 static const char *ResourceType(enum mlx4_resource rt)
 {
@@ -221,6 +201,7 @@
 	case RES_MAC: return  "RES_MAC";
 	case RES_EQ: return "RES_EQ";
 	case RES_COUNTER: return "RES_COUNTER";
+	case RES_XRCD: return "RES_XRCD";
 	default: return "Unknown resource type !!!";
 	};
 }
@@ -254,16 +235,23 @@
 	return 0 ;
 }
 
-void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+void mlx4_free_resource_tracker(struct mlx4_dev *dev,
+				enum mlx4_res_tracker_free_type type)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
 	int i;
 
 	if (priv->mfunc.master.res_tracker.slave_list) {
-		for (i = 0 ; i < dev->num_slaves; i++)
-			mlx4_delete_all_resources_for_slave(dev, i);
+		if (type != RES_TR_FREE_STRUCTS_ONLY)
+			for (i = 0 ; i < dev->num_slaves; i++)
+				if (type == RES_TR_FREE_ALL ||
+				    dev->caps.function != i)
+					mlx4_delete_all_resources_for_slave(dev, i);
 
-		kfree(priv->mfunc.master.res_tracker.slave_list);
+		if (type != RES_TR_FREE_SLAVES_ONLY) {
+			kfree(priv->mfunc.master.res_tracker.slave_list);
+			priv->mfunc.master.res_tracker.slave_list = NULL;
+		}
 	}
 }
 
@@ -471,6 +459,20 @@
 	return &ret->com;
 }
 
+static struct res_common *alloc_xrcdn_tr(int id)
+{
+	struct res_xrcdn *ret;
+
+	ret = kzalloc(sizeof *ret, GFP_KERNEL);
+	if (!ret)
+		return NULL;
+
+	ret->com.res_id = id;
+	ret->com.state = RES_XRCD_ALLOCATED;
+
+	return &ret->com;
+}
+
 static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
 				   int extra)
 {
@@ -501,7 +503,9 @@
 	case RES_COUNTER:
 		ret = alloc_counter_tr(id);
 		break;
-
+	case RES_XRCD:
+		ret = alloc_xrcdn_tr(id);
+		break;
 	default:
 		return NULL;
 	}
@@ -624,6 +628,16 @@
 	return 0;
 }
 
+static int remove_xrcdn_ok(struct res_xrcdn *res)
+{
+	if (res->com.state == RES_XRCD_BUSY)
+		return -EBUSY;
+	else if (res->com.state != RES_XRCD_ALLOCATED)
+		return -EPERM;
+
+	return 0;
+}
+
 static int remove_cq_ok(struct res_cq *res)
 {
 	if (res->com.state == RES_CQ_BUSY)
@@ -663,6 +677,8 @@
 		return remove_eq_ok((struct res_eq *)res);
 	case RES_COUNTER:
 		return remove_counter_ok((struct res_counter *)res);
+	case RES_XRCD:
+		return remove_xrcdn_ok((struct res_xrcdn *)res);
 	default:
 		return -EINVAL;
 	}
@@ -1269,6 +1285,50 @@
 	return 0;
 }
 
+static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			     u64 in_param, u64 *out_param)
+{
+	u32 index;
+	int err;
+
+	if (op != RES_OP_RESERVE)
+		return -EINVAL;
+
+	err = __mlx4_counter_alloc(dev, &index);
+	if (err)
+		return err;
+
+	err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
+	if (err)
+		__mlx4_counter_free(dev, index);
+	else
+		set_param_l(out_param, index);
+
+	return err;
+}
+
+static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			   u64 in_param, u64 *out_param)
+{
+	u32 xrcdn;
+	int err;
+
+	if (op != RES_OP_RESERVE)
+		return -EINVAL;
+
+	err = __mlx4_xrcd_alloc(dev, &xrcdn);
+	if (err)
+		return err;
+
+	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
+	if (err)
+		__mlx4_xrcd_free(dev, xrcdn);
+	else
+		set_param_l(out_param, xrcdn);
+
+	return err;
+}
+
 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
 			   struct mlx4_vhcr *vhcr,
 			   struct mlx4_cmd_mailbox *inbox,
@@ -1314,6 +1374,16 @@
 				    vhcr->in_param, &vhcr->out_param);
 		break;
 
+	case RES_COUNTER:
+		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
+					vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_XRCD:
+		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
+				      vhcr->in_param, &vhcr->out_param);
+		break;
+
 	default:
 		err = -EINVAL;
 		break;
@@ -1496,6 +1566,44 @@
 	return 0;
 }
 
+static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			    u64 in_param, u64 *out_param)
+{
+	int index;
+	int err;
+
+	if (op != RES_OP_RESERVE)
+		return -EINVAL;
+
+	index = get_param_l(&in_param);
+	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
+	if (err)
+		return err;
+
+	__mlx4_counter_free(dev, index);
+
+	return err;
+}
+
+static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+			  u64 in_param, u64 *out_param)
+{
+	int xrcdn;
+	int err;
+
+	if (op != RES_OP_RESERVE)
+		return -EINVAL;
+
+	xrcdn = get_param_l(&in_param);
+	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
+	if (err)
+		return err;
+
+	__mlx4_xrcd_free(dev, xrcdn);
+
+	return err;
+}
+
 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
 			  struct mlx4_vhcr *vhcr,
 			  struct mlx4_cmd_mailbox *inbox,
@@ -1541,6 +1649,15 @@
 				   vhcr->in_param, &vhcr->out_param);
 		break;
 
+	case RES_COUNTER:
+		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
+				       vhcr->in_param, &vhcr->out_param);
+		break;
+
+	case RES_XRCD:
+		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
+				     vhcr->in_param, &vhcr->out_param);
+
 	default:
 		break;
 	}
@@ -2536,7 +2653,7 @@
 	struct mlx4_qp qp; /* dummy for calling attach/detach */
 	u8 *gid = inbox->buf;
 	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
-	int err, err1;
+	int err;
 	int qpn;
 	struct res_qp *rqp;
 	int attach = vhcr->op_modifier;
@@ -2571,7 +2688,7 @@
 
 ex_rem:
 	/* ignore error return below, already in error */
-	err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type);
+	(void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
 ex_put:
 	put_res(dev, slave, qpn, RES_QP);
 
@@ -2604,13 +2721,12 @@
 {
 	struct res_gid *rgid;
 	struct res_gid *tmp;
-	int err;
 	struct mlx4_qp qp; /* dummy for calling attach/detach */
 
 	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
 		qp.qpn = rqp->local_qpn;
-		err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
-					    rgid->steer);
+		(void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+					     rgid->steer);
 		list_del(&rgid->list);
 		kfree(rgid);
 	}
@@ -3036,14 +3152,13 @@
 							   MLX4_CMD_HW2SW_EQ,
 							   MLX4_CMD_TIME_CLASS_A,
 							   MLX4_CMD_NATIVE);
-					mlx4_dbg(dev, "rem_slave_eqs: failed"
-						 " to move slave %d eqs %d to"
-						 " SW ownership\n", slave, eqn);
+					if (err)
+						mlx4_dbg(dev, "rem_slave_eqs: failed"
+							 " to move slave %d eqs %d to"
+							 " SW ownership\n", slave, eqn);
 					mlx4_free_cmd_mailbox(dev, mailbox);
-					if (!err) {
-						atomic_dec(&eq->mtt->ref_count);
-						state = RES_EQ_RESERVED;
-					}
+					atomic_dec(&eq->mtt->ref_count);
+					state = RES_EQ_RESERVED;
 					break;
 
 				default:
@@ -3056,6 +3171,64 @@
 	spin_unlock_irq(mlx4_tlock(dev));
 }
 
+static void rem_slave_counters(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *counter_list =
+		&tracker->slave_list[slave].res_list[RES_COUNTER];
+	struct res_counter *counter;
+	struct res_counter *tmp;
+	int err;
+	int index;
+
+	err = move_all_busy(dev, slave, RES_COUNTER);
+	if (err)
+		mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
+			  "busy for slave %d\n", slave);
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
+		if (counter->com.owner == slave) {
+			index = counter->com.res_id;
+			radix_tree_delete(&tracker->res_tree[RES_COUNTER], index);
+			list_del(&counter->com.list);
+			kfree(counter);
+			__mlx4_counter_free(dev, index);
+		}
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
+{
+	struct mlx4_priv *priv = mlx4_priv(dev);
+	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+	struct list_head *xrcdn_list =
+		&tracker->slave_list[slave].res_list[RES_XRCD];
+	struct res_xrcdn *xrcd;
+	struct res_xrcdn *tmp;
+	int err;
+	int xrcdn;
+
+	err = move_all_busy(dev, slave, RES_XRCD);
+	if (err)
+		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
+			  "busy for slave %d\n", slave);
+
+	spin_lock_irq(mlx4_tlock(dev));
+	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
+		if (xrcd->com.owner == slave) {
+			xrcdn = xrcd->com.res_id;
+			radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn);
+			list_del(&xrcd->com.list);
+			kfree(xrcd);
+			__mlx4_xrcd_free(dev, xrcdn);
+		}
+	}
+	spin_unlock_irq(mlx4_tlock(dev));
+}
+
 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
 {
 	struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3069,5 +3242,7 @@
 	rem_slave_mrs(dev, slave);
 	rem_slave_eqs(dev, slave);
 	rem_slave_mtts(dev, slave);
+	rem_slave_counters(dev, slave);
+	rem_slave_xrcdns(dev, slave);
 	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
 }
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f84dd2d..24fb049 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1262,7 +1262,7 @@
 		.owner	= THIS_MODULE,
 	},
 	.probe		= ks8842_probe,
-	.remove		= ks8842_remove,
+	.remove		= __devexit_p(ks8842_remove),
 };
 
 module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index f8dda00..5e313e9 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -618,10 +618,8 @@
 	netif_dbg(ks, intr, ks->netdev,
 		  "%s: status 0x%04x\n", __func__, status);
 
-	if (status & IRQ_LCI) {
-		/* should do something about checking link status */
+	if (status & IRQ_LCI)
 		handled |= IRQ_LCI;
-	}
 
 	if (status & IRQ_LDI) {
 		u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
@@ -684,6 +682,9 @@
 
 	mutex_unlock(&ks->lock);
 
+	if (status & IRQ_LCI)
+		mii_check_link(&ks->mii);
+
 	if (status & IRQ_TXI)
 		netif_wake_queue(ks->netdev);
 
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 27273ae..90153fc 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,7 +4033,6 @@
 
 	netdev->netdev_ops = &myri10ge_netdev_ops;
 	netdev->mtu = myri10ge_initial_mtu;
-	netdev->base_addr = mgp->iomem_base;
 	netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
 	netdev->features = netdev->hw_features;
 
@@ -4047,12 +4046,10 @@
 		netdev->vlan_features &= ~NETIF_F_TSO;
 
 	/* make sure we can get an irq, and that MSI can be
-	 * setup (if available).  Also ensure netdev->irq
-	 * is set to correct value if MSI is enabled */
+	 * setup (if available). */
 	status = myri10ge_request_irq(mgp);
 	if (status != 0)
 		goto abort_with_firmware;
-	netdev->irq = pdev->irq;
 	myri10ge_free_irq(mgp);
 
 	/* Save configuration space to be restored if the
@@ -4077,7 +4074,7 @@
 	else
 		dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
 			 mgp->msi_enabled ? "MSI" : "xPIC",
-			 netdev->irq, mgp->tx_boundary, mgp->fw_name,
+			 pdev->irq, mgp->tx_boundary, mgp->fw_name,
 			 (mgp->wc_enabled ? "Enabled" : "Disabled"));
 
 	board_number++;
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index eb836f7..f157334 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -6,9 +6,8 @@
 	bool "National Semi-conductor devices"
 	default y
 	depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
-		   ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \
-		   MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \
-		   XTENSA_PLATFORM_XT2000 || ZORRO
+		   ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MIPS || \
+		   PCI || PCMCIA || SUPERH || XTENSA_PLATFORM_XT2000 || ZORRO
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
@@ -21,21 +20,6 @@
 
 if NET_VENDOR_NATSEMI
 
-config IBMLANA
-	tristate "IBM LAN Adapter/A support"
-	depends on MCA
-	---help---
-	  This is a Micro Channel Ethernet adapter.  You need to set
-	  CONFIG_MCA to use this driver.  It is both available as an in-kernel
-	  driver and as a module.
-
-	  To compile this driver as a module, choose M here. The only
-	  currently supported card is the IBM LAN Adapter/A for Ethernet.  It
-	  will both support 16K and 32K memory windows, however a 32K window
-	  gives a better security against packet losses.  Usage of multiple
-	  boards with this driver should be possible, but has not been tested
-	  up to now due to lack of hardware.
-
 config MACSONIC
 	tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)"
 	depends on MAC
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
index 9aa5dea..764c532a 100644
--- a/drivers/net/ethernet/natsemi/Makefile
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -2,7 +2,6 @@
 # Makefile for the National Semi-conductor Sonic devices.
 #
 
-obj-$(CONFIG_IBMLANA) += ibmlana.o
 obj-$(CONFIG_MACSONIC) += macsonic.o
 obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
 obj-$(CONFIG_NATSEMI) += natsemi.o
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index d38e48d..5b61d12 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -547,6 +547,7 @@
 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
 	dma_addr_t tx_dma[TX_RING_SIZE];
 	struct net_device *dev;
+	void __iomem *ioaddr;
 	struct napi_struct napi;
 	/* Media monitoring timer */
 	struct timer_list timer;
@@ -699,7 +700,9 @@
 
 static inline void __iomem *ns_ioaddr(struct net_device *dev)
 {
-	return (void __iomem *) dev->base_addr;
+	struct netdev_private *np = netdev_priv(dev);
+
+	return np->ioaddr;
 }
 
 static inline void natsemi_irq_enable(struct net_device *dev)
@@ -863,10 +866,9 @@
 	/* Store MAC Address in perm_addr */
 	memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
 
-	dev->base_addr = (unsigned long __force) ioaddr;
-	dev->irq = irq;
-
 	np = netdev_priv(dev);
+	np->ioaddr = ioaddr;
+
 	netif_napi_add(dev, &np->napi, natsemi_poll, 64);
 	np->dev = dev;
 
@@ -914,9 +916,6 @@
 	}
 
 	option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
-	if (dev->mem_start)
-		option = dev->mem_start;
-
 	/* The lower four bits are the media type. */
 	if (option) {
 		if (option & 0x200)
@@ -1532,20 +1531,21 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem * ioaddr = ns_ioaddr(dev);
+	const int irq = np->pci_dev->irq;
 	int i;
 
 	/* Reset the chip, just in case. */
 	natsemi_reset(dev);
 
-	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
+	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 	if (i) return i;
 
 	if (netif_msg_ifup(np))
 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-			dev->name, dev->irq);
+			dev->name, irq);
 	i = alloc_ring(dev);
 	if (i < 0) {
-		free_irq(dev->irq, dev);
+		free_irq(irq, dev);
 		return i;
 	}
 	napi_enable(&np->napi);
@@ -1794,6 +1794,7 @@
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem * ioaddr = ns_ioaddr(dev);
 	int next_tick = NATSEMI_TIMER_FREQ;
+	const int irq = np->pci_dev->irq;
 
 	if (netif_msg_timer(np)) {
 		/* DO NOT read the IntrStatus register,
@@ -1817,14 +1818,14 @@
 				if (netif_msg_drv(np))
 					printk(KERN_NOTICE "%s: possible phy reset: "
 						"re-initializing\n", dev->name);
-				disable_irq(dev->irq);
+				disable_irq(irq);
 				spin_lock_irq(&np->lock);
 				natsemi_stop_rxtx(dev);
 				dump_ring(dev);
 				reinit_ring(dev);
 				init_registers(dev);
 				spin_unlock_irq(&np->lock);
-				enable_irq(dev->irq);
+				enable_irq(irq);
 			} else {
 				/* hurry back */
 				next_tick = HZ;
@@ -1841,10 +1842,10 @@
 		spin_unlock_irq(&np->lock);
 	}
 	if (np->oom) {
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		np->oom = 0;
 		refill_rx(dev);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 		if (!np->oom) {
 			writel(RxOn, ioaddr + ChipCmd);
 		} else {
@@ -1885,8 +1886,9 @@
 {
 	struct netdev_private *np = netdev_priv(dev);
 	void __iomem * ioaddr = ns_ioaddr(dev);
+	const int irq = np->pci_dev->irq;
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	spin_lock_irq(&np->lock);
 	if (!np->hands_off) {
 		if (netif_msg_tx_err(np))
@@ -1905,7 +1907,7 @@
 			dev->name);
 	}
 	spin_unlock_irq(&np->lock);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
 	dev->trans_start = jiffies; /* prevent tx timeout */
 	dev->stats.tx_errors++;
@@ -2470,9 +2472,12 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void natsemi_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	intr_handler(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct netdev_private *np = netdev_priv(dev);
+	const int irq = np->pci_dev->irq;
+
+	disable_irq(irq);
+	intr_handler(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -2523,8 +2528,9 @@
 	if (netif_running(dev)) {
 		struct netdev_private *np = netdev_priv(dev);
 		void __iomem * ioaddr = ns_ioaddr(dev);
+		const int irq = np->pci_dev->irq;
 
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		spin_lock(&np->lock);
 		/* stop engines */
 		natsemi_stop_rxtx(dev);
@@ -2537,7 +2543,7 @@
 		/* restart engines */
 		writel(RxOn | TxOn, ioaddr + ChipCmd);
 		spin_unlock(&np->lock);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 	}
 	return 0;
 }
@@ -3135,6 +3141,7 @@
 {
 	void __iomem * ioaddr = ns_ioaddr(dev);
 	struct netdev_private *np = netdev_priv(dev);
+	const int irq = np->pci_dev->irq;
 
 	if (netif_msg_ifdown(np))
 		printk(KERN_DEBUG
@@ -3156,14 +3163,14 @@
 	 */
 
 	del_timer_sync(&np->timer);
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	spin_lock_irq(&np->lock);
 	natsemi_irq_disable(dev);
 	np->hands_off = 1;
 	spin_unlock_irq(&np->lock);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
-	free_irq(dev->irq, dev);
+	free_irq(irq, dev);
 
 	/* Interrupt disabled, interrupt handler released,
 	 * queue stopped, timer deleted, rtnl_lock held
@@ -3256,9 +3263,11 @@
 
 	rtnl_lock();
 	if (netif_running (dev)) {
+		const int irq = np->pci_dev->irq;
+
 		del_timer_sync(&np->timer);
 
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		spin_lock_irq(&np->lock);
 
 		natsemi_irq_disable(dev);
@@ -3267,7 +3276,7 @@
 		netif_stop_queue(dev);
 
 		spin_unlock_irq(&np->lock);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 
 		napi_disable(&np->napi);
 
@@ -3307,6 +3316,8 @@
 	if (netif_device_present(dev))
 		goto out;
 	if (netif_running(dev)) {
+		const int irq = np->pci_dev->irq;
+
 		BUG_ON(!np->hands_off);
 		ret = pci_enable_device(pdev);
 		if (ret < 0) {
@@ -3320,13 +3331,13 @@
 
 		natsemi_reset(dev);
 		init_ring(dev);
-		disable_irq(dev->irq);
+		disable_irq(irq);
 		spin_lock_irq(&np->lock);
 		np->hands_off = 0;
 		init_registers(dev);
 		netif_device_attach(dev);
 		spin_unlock_irq(&np->lock);
-		enable_irq(dev->irq);
+		enable_irq(irq);
 
 		mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
 	}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6338ef8..bb36758 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2846,6 +2846,7 @@
 static void s2io_netpoll(struct net_device *dev)
 {
 	struct s2io_nic *nic = netdev_priv(dev);
+	const int irq = nic->pdev->irq;
 	struct XENA_dev_config __iomem *bar0 = nic->bar0;
 	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
 	int i;
@@ -2855,7 +2856,7 @@
 	if (pci_channel_offline(nic->pdev))
 		return;
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 
 	writeq(val64, &bar0->rx_traffic_int);
 	writeq(val64, &bar0->tx_traffic_int);
@@ -2884,7 +2885,7 @@
 			break;
 		}
 	}
-	enable_irq(dev->irq);
+	enable_irq(irq);
 }
 #endif
 
@@ -3897,9 +3898,7 @@
 
 static void remove_inta_isr(struct s2io_nic *sp)
 {
-	struct net_device *dev = sp->dev;
-
-	free_irq(sp->pdev->irq, dev);
+	free_irq(sp->pdev->irq, sp->dev);
 }
 
 /* ********************************************************* *
@@ -7046,7 +7045,7 @@
 		}
 	}
 	if (sp->config.intr_type == INTA) {
-		err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
+		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
 				  sp->name, dev);
 		if (err) {
 			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
@@ -7908,9 +7907,6 @@
 		goto bar1_remap_failed;
 	}
 
-	dev->irq = pdev->irq;
-	dev->base_addr = (unsigned long)sp->bar0;
-
 	/* Initializing the BAR1 address as the start of the FIFO pointer. */
 	for (j = 0; j < MAX_TX_FIFOS; j++) {
 		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index ef76725..51387c3 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1882,25 +1882,24 @@
  */
 static void vxge_netpoll(struct net_device *dev)
 {
-	struct __vxge_hw_device *hldev;
-	struct vxgedev *vdev;
-
-	vdev = netdev_priv(dev);
-	hldev = pci_get_drvdata(vdev->pdev);
+	struct vxgedev *vdev = netdev_priv(dev);
+	struct pci_dev *pdev = vdev->pdev;
+	struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
+	const int irq = pdev->irq;
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-	if (pci_channel_offline(vdev->pdev))
+	if (pci_channel_offline(pdev))
 		return;
 
-	disable_irq(dev->irq);
+	disable_irq(irq);
 	vxge_hw_device_clear_tx_rx(hldev);
 
 	vxge_hw_device_clear_tx_rx(hldev);
 	VXGE_COMPLETE_ALL_RX(vdev);
 	VXGE_COMPLETE_ALL_TX(vdev);
 
-	enable_irq(dev->irq);
+	enable_irq(irq);
 
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s:%d  Exiting...", __func__, __LINE__);
@@ -2860,12 +2859,12 @@
 		vdev->config.rx_pause_enable);
 
 	if (vdev->vp_reset_timer.function == NULL)
-		vxge_os_timer(vdev->vp_reset_timer,
-			vxge_poll_vp_reset, vdev, (HZ/2));
+		vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev,
+			      HZ / 2);
 
 	/* There is no need to check for RxD leak and RxD lookup on Titan1A */
 	if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
-		vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+		vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
 			      HZ / 2);
 
 	set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -3424,9 +3423,6 @@
 	ndev->features |= ndev->hw_features |
 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 
-	/*  Driver entry points */
-	ndev->irq = vdev->pdev->irq;
-	ndev->base_addr = (unsigned long) hldev->bar0;
 
 	ndev->netdev_ops = &vxge_netdev_ops;
 
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index f52a42d..35f3e75 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -416,12 +416,15 @@
 	static int p = val; \
 	module_param(p, int, 0)
 
-#define vxge_os_timer(timer, handle, arg, exp) do { \
-		init_timer(&timer); \
-		timer.function = handle; \
-		timer.data = (unsigned long) arg; \
-		mod_timer(&timer, (jiffies + exp)); \
-	} while (0);
+static inline
+void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
+		   struct vxgedev *vdev, unsigned long timeout)
+{
+	init_timer(timer);
+	timer->function = func;
+	timer->data = (unsigned long)vdev;
+	mod_timer(timer, jiffies + timeout);
+}
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev);
 enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index aca1304..928913c 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2279,6 +2279,8 @@
 
 	netdev_sent_queue(np->dev, skb->len);
 
+	skb_tx_timestamp(skb);
+
 	np->put_tx.orig = put_tx;
 
 	spin_unlock_irqrestore(&np->lock, flags);
@@ -2426,6 +2428,8 @@
 
 	netdev_sent_queue(np->dev, skb->len);
 
+	skb_tx_timestamp(skb);
+
 	np->put_tx.ex = put_tx;
 
 	spin_unlock_irqrestore(&np->lock, flags);
@@ -3942,13 +3946,11 @@
 		ret = pci_enable_msi(np->pci_dev);
 		if (ret == 0) {
 			np->msi_flags |= NV_MSI_ENABLED;
-			dev->irq = np->pci_dev->irq;
 			if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
 				netdev_info(dev, "request_irq failed %d\n",
 					    ret);
 				pci_disable_msi(np->pci_dev);
 				np->msi_flags &= ~NV_MSI_ENABLED;
-				dev->irq = np->pci_dev->irq;
 				goto out_err;
 			}
 
@@ -5649,9 +5651,6 @@
 	np->base = ioremap(addr, np->register_size);
 	if (!np->base)
 		goto out_relreg;
-	dev->base_addr = (unsigned long)np->base;
-
-	dev->irq = pci_dev->irq;
 
 	np->rx_ring_size = RX_RING_DEFAULT;
 	np->tx_ring_size = TX_RING_DEFAULT;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 6dfc26d..d3469d8 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -990,10 +990,10 @@
 			ndev->stats.rx_errors++;
 		} else {
 			/* Packet is good */
-			skb = dev_alloc_skb(len + 8);
-			if (!skb)
+			skb = dev_alloc_skb(len);
+			if (!skb) {
 				ndev->stats.rx_dropped++;
-			else {
+			} else {
 				prdbuf = skb_put(skb, len);
 
 				/* Copy packet from buffer */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index dd14915..b07311e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -584,7 +584,6 @@
 /**
  * struct pch_gbe_adapter - board specific private data structure
  * @stats_lock:	Spinlock structure for status
- * @tx_queue_lock:	Spinlock structure for transmit
  * @ethtool_lock:	Spinlock structure for ethtool
  * @irq_sem:		Semaphore for interrupt
  * @netdev:		Pointer of network device structure
@@ -609,7 +608,6 @@
 
 struct pch_gbe_adapter {
 	spinlock_t stats_lock;
-	spinlock_t tx_queue_lock;
 	spinlock_t ethtool_lock;
 	atomic_t irq_sem;
 	struct net_device *netdev;
@@ -660,6 +658,7 @@
 extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
 extern u64 pch_rx_snap_read(struct pci_dev *pdev);
 extern u64 pch_tx_snap_read(struct pci_dev *pdev);
+extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
 #endif
 
 /* pch_gbe_param.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 8035e5f..3787c64 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -79,7 +79,6 @@
 #define	PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
 #define	PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
 
-#define PCH_GBE_ETH_ALEN            6
 
 /* This defines the bits that are set in the Interrupt Mask
  * Set/Read Register.  Each bit is documented below:
@@ -101,18 +100,19 @@
 
 #ifdef CONFIG_PCH_PTP
 /* Macros for ieee1588 */
-#define TICKS_NS_SHIFT  5
-
 /* 0x40 Time Synchronization Channel Control Register Bits */
 #define MASTER_MODE   (1<<0)
-#define SLAVE_MODE    (0<<0)
+#define SLAVE_MODE    (0)
 #define V2_MODE       (1<<31)
-#define CAP_MODE0     (0<<16)
+#define CAP_MODE0     (0)
 #define CAP_MODE2     (1<<17)
 
 /* 0x44 Time Synchronization Channel Event Register Bits */
 #define TX_SNAPSHOT_LOCKED (1<<0)
 #define RX_SNAPSHOT_LOCKED (1<<1)
+
+#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
+#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
 #endif
 
 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -120,6 +120,7 @@
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 			       int data);
+static void pch_gbe_set_multi(struct net_device *netdev);
 
 #ifdef CONFIG_PCH_PTP
 static struct sock_filter ptp_filter[] = {
@@ -133,10 +134,8 @@
 	u16 *hi, *id;
 	u32 lo;
 
-	if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) &&
-		(sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
+	if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
 		return 0;
-	}
 
 	offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
 
@@ -153,8 +152,8 @@
 		seqid  == *id);
 }
 
-static void pch_rx_timestamp(
-			struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+static void
+pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 {
 	struct skb_shared_hwtstamps *shhwtstamps;
 	struct pci_dev *pdev;
@@ -183,7 +182,6 @@
 		goto out;
 
 	ns = pch_rx_snap_read(pdev);
-	ns <<= TICKS_NS_SHIFT;
 
 	shhwtstamps = skb_hwtstamps(skb);
 	memset(shhwtstamps, 0, sizeof(*shhwtstamps));
@@ -192,8 +190,8 @@
 	pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
 }
 
-static void pch_tx_timestamp(
-			struct pch_gbe_adapter *adapter, struct sk_buff *skb)
+static void
+pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 {
 	struct skb_shared_hwtstamps shhwtstamps;
 	struct pci_dev *pdev;
@@ -202,17 +200,16 @@
 	u32 cnt, val;
 
 	shtx = skb_shinfo(skb);
-	if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))
-		shtx->tx_flags |= SKBTX_IN_PROGRESS;
-	else
+	if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
 		return;
 
+	shtx->tx_flags |= SKBTX_IN_PROGRESS;
+
 	/* Get ieee1588's dev information */
 	pdev = adapter->ptp_pdev;
 
 	/*
 	 * This really stinks, but we have to poll for the Tx time stamp.
-	 * Usually, the time stamp is ready after 4 to 6 microseconds.
 	 */
 	for (cnt = 0; cnt < 100; cnt++) {
 		val = pch_ch_event_read(pdev);
@@ -226,7 +223,6 @@
 	}
 
 	ns = pch_tx_snap_read(pdev);
-	ns <<= TICKS_NS_SHIFT;
 
 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
@@ -240,6 +236,7 @@
 	struct hwtstamp_config cfg;
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 	struct pci_dev *pdev;
+	u8 station[20];
 
 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
 		return -EFAULT;
@@ -267,15 +264,23 @@
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 		adapter->hwts_rx_en = 0;
-		pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0));
+		pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
 		break;
 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 		adapter->hwts_rx_en = 1;
-		pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0));
+		pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
 		break;
-	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 		adapter->hwts_rx_en = 1;
-		pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2));
+		pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
+		strcpy(station, PTP_L4_MULTICAST_SA);
+		pch_set_station_address(station, pdev);
+		break;
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+		adapter->hwts_rx_en = 1;
+		pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
+		strcpy(station, PTP_L2_MULTICAST_SA);
+		pch_set_station_address(station, pdev);
 		break;
 	default:
 		return -ERANGE;
@@ -399,18 +404,18 @@
 	iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
 #endif
 	pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
-	/* Setup the receive address */
+	/* Setup the receive addresses */
 	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 	return;
 }
 
 static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
 {
-	/* Read the MAC address. and store to the private data */
+	/* Read the MAC addresses. and store to the private data */
 	pch_gbe_mac_read_mac_addr(hw);
 	iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
 	pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
-	/* Setup the MAC address */
+	/* Setup the MAC addresses */
 	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 	return;
 }
@@ -460,7 +465,7 @@
 		if (mc_addr_count) {
 			pch_gbe_mac_mar_set(hw, mc_addr_list, i);
 			mc_addr_count--;
-			mc_addr_list += PCH_GBE_ETH_ALEN;
+			mc_addr_list += ETH_ALEN;
 		} else {
 			/* Clear MAC address mask */
 			adrmask = ioread32(&hw->reg->ADDR_MASK);
@@ -640,14 +645,11 @@
  */
 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 {
-	int size;
-
-	size = (int)sizeof(struct pch_gbe_tx_ring);
-	adapter->tx_ring = kzalloc(size, GFP_KERNEL);
+	adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL);
 	if (!adapter->tx_ring)
 		return -ENOMEM;
-	size = (int)sizeof(struct pch_gbe_rx_ring);
-	adapter->rx_ring = kzalloc(size, GFP_KERNEL);
+
+	adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL);
 	if (!adapter->rx_ring) {
 		kfree(adapter->tx_ring);
 		return -ENOMEM;
@@ -778,6 +780,8 @@
 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
 {
 	pch_gbe_mac_reset_hw(&adapter->hw);
+	/* reprogram multicast address register after reset */
+	pch_gbe_set_multi(adapter->netdev);
 	/* Setup the receive address. */
 	pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
 	if (pch_gbe_hal_init_hw(&adapter->hw))
@@ -1162,7 +1166,6 @@
 	struct sk_buff *tmp_skb;
 	unsigned int frame_ctrl;
 	unsigned int ring_num;
-	unsigned long flags;
 
 	/*-- Set frame control --*/
 	frame_ctrl = 0;
@@ -1182,8 +1185,6 @@
 		if (skb->protocol == htons(ETH_P_IP)) {
 			struct iphdr *iph = ip_hdr(skb);
 			unsigned int offset;
-			iph->check = 0;
-			iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
 			offset = skb_transport_offset(skb);
 			if (iph->protocol == IPPROTO_TCP) {
 				skb->csum = 0;
@@ -1211,14 +1212,14 @@
 			}
 		}
 	}
-	spin_lock_irqsave(&tx_ring->tx_lock, flags);
+
 	ring_num = tx_ring->next_to_use;
 	if (unlikely((ring_num + 1) == tx_ring->count))
 		tx_ring->next_to_use = 0;
 	else
 		tx_ring->next_to_use = ring_num + 1;
 
-	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
 	buffer_info = &tx_ring->buffer_info[ring_num];
 	tmp_skb = buffer_info->skb;
 
@@ -1342,6 +1343,8 @@
 		/* Stop Receive */
 		pch_gbe_mac_reset_rx(hw);
 	}
+	/* reprogram multicast address register after reset */
+	pch_gbe_set_multi(adapter->netdev);
 }
 
 static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
@@ -1518,7 +1521,7 @@
 						&rx_ring->rx_buff_pool_logic,
 						GFP_KERNEL);
 	if (!rx_ring->rx_buff_pool) {
-		pr_err("Unable to allocate memory for the receive poll buffer\n");
+		pr_err("Unable to allocate memory for the receive pool buffer\n");
 		return -ENOMEM;
 	}
 	memset(rx_ring->rx_buff_pool, 0, size);
@@ -1637,15 +1640,17 @@
 	pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
 		 cleaned_count);
 	/* Recover from running out of Tx resources in xmit_frame */
+	spin_lock(&tx_ring->tx_lock);
 	if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
 		netif_wake_queue(adapter->netdev);
 		adapter->stats.tx_restart_count++;
 		pr_debug("Tx wake queue\n");
 	}
-	spin_lock(&adapter->tx_queue_lock);
+
 	tx_ring->next_to_clean = i;
-	spin_unlock(&adapter->tx_queue_lock);
+
 	pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+	spin_unlock(&tx_ring->tx_lock);
 	return cleaned;
 }
 
@@ -1924,7 +1929,6 @@
 }
 
 
-static void pch_gbe_set_multi(struct net_device *netdev);
 /**
  * pch_gbe_up - Up GbE network device
  * @adapter:  Board private structure
@@ -2037,7 +2041,6 @@
 		return -ENOMEM;
 	}
 	spin_lock_init(&adapter->hw.miim_lock);
-	spin_lock_init(&adapter->tx_queue_lock);
 	spin_lock_init(&adapter->stats_lock);
 	spin_lock_init(&adapter->ethtool_lock);
 	atomic_set(&adapter->irq_sem, 0);
@@ -2142,10 +2145,10 @@
 			 tx_ring->next_to_use, tx_ring->next_to_clean);
 		return NETDEV_TX_BUSY;
 	}
-	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 
 	/* CRC,ITAG no support */
 	pch_gbe_tx_queue(adapter, tx_ring, skb);
+	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
 	return NETDEV_TX_OK;
 }
 
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 0d29f5f..c236715 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -683,8 +683,6 @@
 	}
 
 	hmp->base = ioaddr;
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
 	pci_set_drvdata(pdev, dev);
 
 	hmp->chip_id = chip_id;
@@ -859,14 +857,11 @@
 	u32 rx_int_var, tx_int_var;
 	u16 fifo_info;
 
-	i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev);
+	i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
+			dev->name, dev);
 	if (i)
 		return i;
 
-	if (hamachi_debug > 1)
-		printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
-			   dev->name, dev->irq);
-
 	hamachi_init_ring(dev);
 
 #if ADDRLEN == 64
@@ -1705,7 +1700,7 @@
 	}
 #endif /* __i386__ debugging only */
 
-	free_irq(dev->irq, dev);
+	free_irq(hmp->pci_dev->irq, dev);
 
 	del_timer_sync(&hmp->timer);
 
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 7757b80..04e622f 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -427,9 +427,6 @@
 	/* Reset the chip. */
 	iowrite32(0x80000000, ioaddr + DMACtrl);
 
-	dev->base_addr = (unsigned long)ioaddr;
-	dev->irq = irq;
-
 	pci_set_drvdata(pdev, dev);
 	spin_lock_init(&np->lock);
 
@@ -569,25 +566,20 @@
 static int yellowfin_open(struct net_device *dev)
 {
 	struct yellowfin_private *yp = netdev_priv(dev);
+	const int irq = yp->pci_dev->irq;
 	void __iomem *ioaddr = yp->base;
-	int i, ret;
+	int i, rc;
 
 	/* Reset the chip. */
 	iowrite32(0x80000000, ioaddr + DMACtrl);
 
-	ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
-	if (ret)
-		return ret;
+	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
+	if (rc)
+		return rc;
 
-	if (yellowfin_debug > 1)
-		netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
-			      __func__, dev->irq);
-
-	ret = yellowfin_init_ring(dev);
-	if (ret) {
-		free_irq(dev->irq, dev);
-		return ret;
-	}
+	rc = yellowfin_init_ring(dev);
+	if (rc < 0)
+		goto err_free_irq;
 
 	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -647,8 +639,12 @@
 	yp->timer.data = (unsigned long)dev;
 	yp->timer.function = yellowfin_timer;				/* timer handler */
 	add_timer(&yp->timer);
+out:
+	return rc;
 
-	return 0;
+err_free_irq:
+	free_irq(irq, dev);
+	goto out;
 }
 
 static void yellowfin_timer(unsigned long data)
@@ -1251,7 +1247,7 @@
 	}
 #endif /* __i386__ debugging only */
 
-	free_irq(dev->irq, dev);
+	free_irq(yp->pci_dev->irq, dev);
 
 	/* Free all the skbuffs in the Rx queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index ddc95b0..e559dfa 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -623,7 +623,7 @@
 	mac->rx = NULL;
 }
 
-static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
+static void pasemi_mac_replenish_rx_ring(struct net_device *dev,
 					 const int limit)
 {
 	const struct pasemi_mac *mac = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index b5de8a7..37ccbe5 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 78
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.78"
+#define _NETXEN_NIC_LINUX_SUBVERSION 79
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.79"
 
 #define NETXEN_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
 #define _major(v)	(((v) >> 24) & 0xff)
@@ -419,6 +419,8 @@
 	(((sts_data) >> 52) & 0x1)
 #define netxen_get_lro_sts_seq_number(sts_data)		\
 	((sts_data) & 0x0FFFFFFFF)
+#define netxen_get_lro_sts_mss(sts_data1)		\
+	((sts_data1 >> 32) & 0x0FFFF)
 
 
 struct status_desc {
@@ -794,6 +796,7 @@
 #define NX_CAP0_JUMBO_CONTIGUOUS	NX_CAP_BIT(0, 7)
 #define NX_CAP0_LRO_CONTIGUOUS		NX_CAP_BIT(0, 8)
 #define NX_CAP0_HW_LRO			NX_CAP_BIT(0, 10)
+#define NX_CAP0_HW_LRO_MSS		NX_CAP_BIT(0, 21)
 
 /*
  * Context state
@@ -1073,6 +1076,8 @@
 #define NX_FW_CAPABILITY_FVLANTX		(1 << 9)
 #define NX_FW_CAPABILITY_HW_LRO			(1 << 10)
 #define NX_FW_CAPABILITY_GBE_LINK_CFG		(1 << 11)
+#define NX_FW_CAPABILITY_MORE_CAPS		(1 << 31)
+#define NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG	(1 << 2)
 
 /* module types */
 #define LINKEVENT_MODULE_NOT_PRESENT			1
@@ -1155,6 +1160,7 @@
 #define NETXEN_NIC_BRIDGE_ENABLED       0X10
 #define NETXEN_NIC_DIAG_ENABLED		0x20
 #define NETXEN_FW_RESET_OWNER           0x40
+#define NETXEN_FW_MSS_CAP	        0x80
 #define NETXEN_IS_MSI_FAMILY(adapter) \
 	((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
 
@@ -1201,6 +1207,9 @@
 #define NX_FORCE_FW_RESET               0xdeaddead
 
 
+/* Fw dump levels */
+static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
+
 /* Flash read/write address */
 #define NX_FW_DUMP_REG1         0x00130060
 #define NX_FW_DUMP_REG2         0x001e0000
@@ -1814,6 +1823,13 @@
 	char short_name[NETXEN_MAX_SHORT_NAME];
 };
 
+struct netxen_dimm_cfg {
+	u8 presence;
+	u8 mem_type;
+	u8 dimm_type;
+	u32 size;
+};
+
 static const struct netxen_brdinfo netxen_boards[] = {
 	{NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"},
 	{NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"},
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index f3c0057..7f556a8 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -229,7 +229,7 @@
 				adapter->mdump.md_template;
 	adapter->mdump.md_capture_buff = NULL;
 	adapter->mdump.fw_supports_md = 1;
-	adapter->mdump.md_enabled = 1;
+	adapter->mdump.md_enabled = 0;
 
 	return err;
 
@@ -328,6 +328,9 @@
 	cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
 	cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
 
+	if (adapter->flags & NETXEN_FW_MSS_CAP)
+		cap |= NX_CAP0_HW_LRO_MSS;
+
 	prq->capabilities[0] = cpu_to_le32(cap);
 	prq->host_int_crb_mode =
 		cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 8c39299..3973040 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -834,7 +834,7 @@
 static int
 netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
 {
-	int ret = 0;
+	int i;
 	struct netxen_adapter *adapter = netdev_priv(netdev);
 	struct netxen_minidump *mdump = &adapter->mdump;
 
@@ -844,7 +844,7 @@
 			mdump->md_enabled = 1;
 		if (adapter->fw_mdump_rdy) {
 			netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
-			return ret;
+			return 0;
 		}
 		netdev_info(netdev, "Forcing a fw dump\n");
 		nx_dev_request_reset(adapter);
@@ -867,19 +867,21 @@
 		adapter->flags &= ~NETXEN_FW_RESET_OWNER;
 		break;
 	default:
-		if (val->flag <= NX_DUMP_MASK_MAX &&
-			val->flag >= NX_DUMP_MASK_MIN) {
-			mdump->md_capture_mask = val->flag & 0xff;
-			netdev_info(netdev, "Driver mask changed to: 0x%x\n",
+		for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
+			if (val->flag == FW_DUMP_LEVELS[i]) {
+				mdump->md_capture_mask = val->flag;
+				netdev_info(netdev,
+					"Driver mask changed to: 0x%x\n",
 					mdump->md_capture_mask);
-			break;
+				return 0;
+			}
 		}
 		netdev_info(netdev,
 			"Invalid dump level: 0x%x\n", val->flag);
 		return -EINVAL;
 	}
 
-	return ret;
+	return 0;
 }
 
 static int
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
index b1a897c..28e0769 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -776,6 +776,7 @@
 #define CRB_SW_INT_MASK_3		(NETXEN_NIC_REG(0x1e8))
 
 #define CRB_FW_CAPABILITIES_1		(NETXEN_CAM_RAM(0x128))
+#define CRB_FW_CAPABILITIES_2		(NETXEN_CAM_RAM(0x12c))
 #define CRB_MAC_BLOCK_START		(NETXEN_CAM_RAM(0x1c0))
 
 /*
@@ -955,6 +956,31 @@
 #define NX_CRB_DEV_REF_COUNT		(NETXEN_CAM_RAM(0x138))
 #define NX_CRB_DEV_STATE		(NETXEN_CAM_RAM(0x140))
 
+/* MiniDIMM related macros */
+#define NETXEN_DIMM_CAPABILITY		(NETXEN_CAM_RAM(0x258))
+#define NETXEN_DIMM_PRESENT			0x1
+#define NETXEN_DIMM_MEMTYPE_DDR2_SDRAM	0x2
+#define NETXEN_DIMM_SIZE			0x4
+#define NETXEN_DIMM_MEMTYPE(VAL)		((VAL >> 3) & 0xf)
+#define	NETXEN_DIMM_NUMROWS(VAL)		((VAL >> 7) & 0xf)
+#define	NETXEN_DIMM_NUMCOLS(VAL)		((VAL >> 11) & 0xf)
+#define	NETXEN_DIMM_NUMRANKS(VAL)		((VAL >> 15) & 0x3)
+#define NETXEN_DIMM_DATAWIDTH(VAL)		((VAL >> 18) & 0x3)
+#define NETXEN_DIMM_NUMBANKS(VAL)		((VAL >> 21) & 0xf)
+#define NETXEN_DIMM_TYPE(VAL)		((VAL >> 25) & 0x3f)
+#define NETXEN_DIMM_VALID_FLAG		0x80000000
+
+#define NETXEN_DIMM_MEM_DDR2_SDRAM	0x8
+
+#define NETXEN_DIMM_STD_MEM_SIZE	512
+
+#define NETXEN_DIMM_TYPE_RDIMM	0x1
+#define NETXEN_DIMM_TYPE_UDIMM	0x2
+#define NETXEN_DIMM_TYPE_SO_DIMM	0x4
+#define NETXEN_DIMM_TYPE_Micro_DIMM	0x8
+#define NETXEN_DIMM_TYPE_Mini_RDIMM	0x10
+#define NETXEN_DIMM_TYPE_Mini_UDIMM	0x20
+
 /* Device State */
 #define NX_DEV_COLD		1
 #define NX_DEV_INITALIZING	2
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 718b274..0d725dc 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1131,7 +1131,6 @@
 		 _build(file_fw_ver));
 		return -EINVAL;
 	}
-
 	val = nx_get_bios_version(adapter);
 	netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
 	if ((__force u32)val != bios) {
@@ -1661,6 +1660,9 @@
 
 	length = skb->len;
 
+	if (adapter->flags & NETXEN_FW_MSS_CAP)
+		skb_shinfo(skb)->gso_size  =  netxen_get_lro_sts_mss(sts_data1);
+
 	netif_receive_skb(skb);
 
 	adapter->stats.lro_pkts++;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 65a718f..342b3a7 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1184,6 +1184,7 @@
 	int err, ring;
 	struct nx_host_rds_ring *rds_ring;
 	struct nx_host_tx_ring *tx_ring;
+	u32 capab2;
 
 	if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
 		return 0;
@@ -1192,6 +1193,13 @@
 	if (err)
 		return err;
 
+	adapter->flags &= ~NETXEN_FW_MSS_CAP;
+	if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) {
+		capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2);
+		if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+			adapter->flags |= NETXEN_FW_MSS_CAP;
+	}
+
 	err = netxen_napi_add(adapter, netdev);
 	if (err)
 		return err;
@@ -1810,7 +1818,6 @@
 		flags = FLAGS_VLAN_TAGGED;
 
 	} else if (vlan_tx_tag_present(skb)) {
-
 		flags = FLAGS_VLAN_OOB;
 		vid = vlan_tx_tag_get(skb);
 		netxen_set_tx_vlan_tci(first_desc, vid);
@@ -2926,6 +2933,134 @@
 	.write = netxen_sysfs_write_mem,
 };
 
+static ssize_t
+netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *attr,
+		char *buf, loff_t offset, size_t size)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct netxen_adapter *adapter = dev_get_drvdata(dev);
+	struct net_device *netdev = adapter->netdev;
+	struct netxen_dimm_cfg dimm;
+	u8 dw, rows, cols, banks, ranks;
+	u32 val;
+
+	if (size != sizeof(struct netxen_dimm_cfg)) {
+		netdev_err(netdev, "Invalid size\n");
+		return -1;
+	}
+
+	memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
+	val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY);
+
+	/* Checks if DIMM info is valid. */
+	if (val & NETXEN_DIMM_VALID_FLAG) {
+		netdev_err(netdev, "Invalid DIMM flag\n");
+		dimm.presence = 0xff;
+		goto out;
+	}
+
+	rows = NETXEN_DIMM_NUMROWS(val);
+	cols = NETXEN_DIMM_NUMCOLS(val);
+	ranks = NETXEN_DIMM_NUMRANKS(val);
+	banks = NETXEN_DIMM_NUMBANKS(val);
+	dw = NETXEN_DIMM_DATAWIDTH(val);
+
+	dimm.presence = (val & NETXEN_DIMM_PRESENT);
+
+	/* Checks if DIMM info is present. */
+	if (!dimm.presence) {
+		netdev_err(netdev, "DIMM not present\n");
+		goto out;
+	}
+
+	dimm.dimm_type = NETXEN_DIMM_TYPE(val);
+
+	switch (dimm.dimm_type) {
+	case NETXEN_DIMM_TYPE_RDIMM:
+	case NETXEN_DIMM_TYPE_UDIMM:
+	case NETXEN_DIMM_TYPE_SO_DIMM:
+	case NETXEN_DIMM_TYPE_Micro_DIMM:
+	case NETXEN_DIMM_TYPE_Mini_RDIMM:
+	case NETXEN_DIMM_TYPE_Mini_UDIMM:
+		break;
+	default:
+		netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type);
+		goto out;
+	}
+
+	if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM)
+		dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM;
+	else
+		dimm.mem_type = NETXEN_DIMM_MEMTYPE(val);
+
+	if (val & NETXEN_DIMM_SIZE) {
+		dimm.size = NETXEN_DIMM_STD_MEM_SIZE;
+		goto out;
+	}
+
+	if (!rows) {
+		netdev_err(netdev, "Invalid no of rows %x\n", rows);
+		goto out;
+	}
+
+	if (!cols) {
+		netdev_err(netdev, "Invalid no of columns %x\n", cols);
+		goto out;
+	}
+
+	if (!banks) {
+		netdev_err(netdev, "Invalid no of banks %x\n", banks);
+		goto out;
+	}
+
+	ranks += 1;
+
+	switch (dw) {
+	case 0x0:
+		dw = 32;
+		break;
+	case 0x1:
+		dw = 33;
+		break;
+	case 0x2:
+		dw = 36;
+		break;
+	case 0x3:
+		dw = 64;
+		break;
+	case 0x4:
+		dw = 72;
+		break;
+	case 0x5:
+		dw = 80;
+		break;
+	case 0x6:
+		dw = 128;
+		break;
+	case 0x7:
+		dw = 144;
+		break;
+	default:
+		netdev_err(netdev, "Invalid data-width %x\n", dw);
+		goto out;
+	}
+
+	dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8;
+	/* Size returned in MB. */
+	dimm.size = (dimm.size) / 0x100000;
+out:
+	memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg));
+	return sizeof(struct netxen_dimm_cfg);
+
+}
+
+static struct bin_attribute bin_attr_dimm = {
+	.attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
+	.size = 0,
+	.read = netxen_sysfs_read_dimm,
+};
+
 
 static void
 netxen_create_sysfs_entries(struct netxen_adapter *adapter)
@@ -2963,6 +3098,8 @@
 		dev_info(dev, "failed to create crb sysfs entry\n");
 	if (device_create_bin_file(dev, &bin_attr_mem))
 		dev_info(dev, "failed to create mem sysfs entry\n");
+	if (device_create_bin_file(dev, &bin_attr_dimm))
+		dev_info(dev, "failed to create dimm sysfs entry\n");
 }
 
 
@@ -2975,6 +3112,7 @@
 	device_remove_file(dev, &dev_attr_diag_mode);
 	device_remove_bin_file(dev, &bin_attr_crb);
 	device_remove_bin_file(dev, &bin_attr_mem);
+	device_remove_bin_file(dev, &bin_attr_dimm);
 }
 
 #ifdef CONFIG_INET
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 385a4d5..8680a5d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 27
-#define QLCNIC_LINUX_VERSIONID  "5.0.27"
+#define _QLCNIC_LINUX_SUBVERSION 28
+#define QLCNIC_LINUX_VERSIONID  "5.0.28"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -607,6 +607,7 @@
 #define QLCNIC_CDRP_CMD_CONFIG_PORT		0x0000002E
 #define QLCNIC_CDRP_CMD_TEMP_SIZE		0x0000002f
 #define QLCNIC_CDRP_CMD_GET_TEMP_HDR		0x00000030
+#define QLCNIC_CDRP_CMD_GET_MAC_STATS		0x00000037
 
 #define QLCNIC_RCODE_SUCCESS		0
 #define QLCNIC_RCODE_NOT_SUPPORTED	9
@@ -1180,18 +1181,62 @@
 #define QLCNIC_STATS_ESWITCH		2
 #define QLCNIC_QUERY_RX_COUNTER		0
 #define QLCNIC_QUERY_TX_COUNTER		1
-#define QLCNIC_ESW_STATS_NOT_AVAIL	0xffffffffffffffffULL
+#define QLCNIC_STATS_NOT_AVAIL	0xffffffffffffffffULL
+#define QLCNIC_FILL_STATS(VAL1) \
+	(((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1)
+#define QLCNIC_MAC_STATS 1
+#define QLCNIC_ESW_STATS 2
 
 #define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
 do {	\
-	if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \
-	    ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
+	if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \
+	    ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
 		(VAL1) = (VAL2); \
-	else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \
-		 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
+	else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \
+		 ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
 			(VAL1) += (VAL2); \
 } while (0)
 
+struct qlcnic_mac_statistics{
+	__le64	mac_tx_frames;
+	__le64	mac_tx_bytes;
+	__le64	mac_tx_mcast_pkts;
+	__le64	mac_tx_bcast_pkts;
+	__le64	mac_tx_pause_cnt;
+	__le64	mac_tx_ctrl_pkt;
+	__le64	mac_tx_lt_64b_pkts;
+	__le64	mac_tx_lt_127b_pkts;
+	__le64	mac_tx_lt_255b_pkts;
+	__le64	mac_tx_lt_511b_pkts;
+	__le64	mac_tx_lt_1023b_pkts;
+	__le64	mac_tx_lt_1518b_pkts;
+	__le64	mac_tx_gt_1518b_pkts;
+	__le64	rsvd1[3];
+
+	__le64	mac_rx_frames;
+	__le64	mac_rx_bytes;
+	__le64	mac_rx_mcast_pkts;
+	__le64	mac_rx_bcast_pkts;
+	__le64	mac_rx_pause_cnt;
+	__le64	mac_rx_ctrl_pkt;
+	__le64	mac_rx_lt_64b_pkts;
+	__le64	mac_rx_lt_127b_pkts;
+	__le64	mac_rx_lt_255b_pkts;
+	__le64	mac_rx_lt_511b_pkts;
+	__le64	mac_rx_lt_1023b_pkts;
+	__le64	mac_rx_lt_1518b_pkts;
+	__le64	mac_rx_gt_1518b_pkts;
+	__le64	rsvd2[3];
+
+	__le64	mac_rx_length_error;
+	__le64	mac_rx_length_small;
+	__le64	mac_rx_length_large;
+	__le64	mac_rx_jabber;
+	__le64	mac_rx_dropped;
+	__le64	mac_rx_crc_error;
+	__le64	mac_align_error;
+} __packed;
+
 struct __qlcnic_esw_statistics {
 	__le16 context_id;
 	__le16 version;
@@ -1352,6 +1397,8 @@
 #define QLCNIC_ENABLE_FW_DUMP		0xaddfeed
 #define QLCNIC_DISABLE_FW_DUMP		0xbadfeed
 #define QLCNIC_FORCE_FW_RESET		0xdeaddead
+#define QLCNIC_SET_QUIESCENT		0xadd00010
+#define QLCNIC_RESET_QUIESCENT		0xadd00020
 
 struct qlcnic_dump_operations {
 	enum op_codes opcode;
@@ -1510,6 +1557,7 @@
 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
 					struct __qlcnic_esw_statistics *);
 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
+int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
 extern int qlcnic_config_tso;
 
 /*
@@ -1559,6 +1607,7 @@
 }
 
 extern const struct ethtool_ops qlcnic_ethtool_ops;
+extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
 
 struct qlcnic_nic_template {
 	int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 569a837..8db8524 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -905,6 +905,65 @@
 	return err;
 }
 
+/* This routine will retrieve the MAC statistics from firmware */
+int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
+		struct qlcnic_mac_statistics *mac_stats)
+{
+	struct qlcnic_mac_statistics *stats;
+	struct qlcnic_cmd_args cmd;
+	size_t stats_size = sizeof(struct qlcnic_mac_statistics);
+	dma_addr_t stats_dma_t;
+	void *stats_addr;
+	int err;
+
+	stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+			&stats_dma_t, GFP_KERNEL);
+	if (!stats_addr) {
+		dev_err(&adapter->pdev->dev,
+			"%s: Unable to allocate memory.\n", __func__);
+		return -ENOMEM;
+	}
+	memset(stats_addr, 0, stats_size);
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS;
+	cmd.req.arg1 = stats_size << 16;
+	cmd.req.arg2 = MSD(stats_dma_t);
+	cmd.req.arg3 = LSD(stats_dma_t);
+
+	qlcnic_issue_cmd(adapter, &cmd);
+	err = cmd.rsp.cmd;
+
+	if (!err) {
+		stats = stats_addr;
+		mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
+		mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
+		mac_stats->mac_tx_mcast_pkts =
+					le64_to_cpu(stats->mac_tx_mcast_pkts);
+		mac_stats->mac_tx_bcast_pkts =
+					le64_to_cpu(stats->mac_tx_bcast_pkts);
+		mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
+		mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
+		mac_stats->mac_rx_mcast_pkts =
+					le64_to_cpu(stats->mac_rx_mcast_pkts);
+		mac_stats->mac_rx_length_error =
+				le64_to_cpu(stats->mac_rx_length_error);
+		mac_stats->mac_rx_length_small =
+				le64_to_cpu(stats->mac_rx_length_small);
+		mac_stats->mac_rx_length_large =
+				le64_to_cpu(stats->mac_rx_length_large);
+		mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
+		mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
+		mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
+	} else {
+		dev_info(&adapter->pdev->dev,
+			"%s: Get mac stats failed =%d.\n", __func__, err);
+	}
+
+	dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+		stats_dma_t);
+	return err;
+}
+
 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
 		const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
 
@@ -920,13 +979,13 @@
 		return -EIO;
 
 	memset(esw_stats, 0, sizeof(u64));
-	esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
-	esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
-	esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
-	esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
-	esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL;
-	esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
-	esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL;
+	esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
+	esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
+	esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
+	esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
+	esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
+	esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
+	esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
 	esw_stats->context_id = eswitch;
 
 	for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 89ddf7f..9e9e78a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -78,8 +78,46 @@
 	"tx numbytes",
 };
 
-#define QLCNIC_STATS_LEN	ARRAY_SIZE(qlcnic_gstrings_stats)
+static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
+	"mac_tx_frames",
+	"mac_tx_bytes",
+	"mac_tx_mcast_pkts",
+	"mac_tx_bcast_pkts",
+	"mac_tx_pause_cnt",
+	"mac_tx_ctrl_pkt",
+	"mac_tx_lt_64b_pkts",
+	"mac_tx_lt_127b_pkts",
+	"mac_tx_lt_255b_pkts",
+	"mac_tx_lt_511b_pkts",
+	"mac_tx_lt_1023b_pkts",
+	"mac_tx_lt_1518b_pkts",
+	"mac_tx_gt_1518b_pkts",
+	"mac_rx_frames",
+	"mac_rx_bytes",
+	"mac_rx_mcast_pkts",
+	"mac_rx_bcast_pkts",
+	"mac_rx_pause_cnt",
+	"mac_rx_ctrl_pkt",
+	"mac_rx_lt_64b_pkts",
+	"mac_rx_lt_127b_pkts",
+	"mac_rx_lt_255b_pkts",
+	"mac_rx_lt_511b_pkts",
+	"mac_rx_lt_1023b_pkts",
+	"mac_rx_lt_1518b_pkts",
+	"mac_rx_gt_1518b_pkts",
+	"mac_rx_length_error",
+	"mac_rx_length_small",
+	"mac_rx_length_large",
+	"mac_rx_jabber",
+	"mac_rx_dropped",
+	"mac_rx_crc_error",
+	"mac_align_error",
+};
+
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings)
 #define QLCNIC_DEVICE_STATS_LEN	ARRAY_SIZE(qlcnic_device_gstrings_stats)
+#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN
 
 static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
 	"Register_Test_on_offline",
@@ -644,8 +682,8 @@
 		return QLCNIC_TEST_LEN;
 	case ETH_SS_STATS:
 		if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
-			return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
-		return QLCNIC_STATS_LEN;
+			return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
+		return QLCNIC_TOTAL_STATS_LEN;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -851,7 +889,7 @@
 qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
-	int index, i;
+	int index, i, j;
 
 	switch (stringset) {
 	case ETH_SS_TEST:
@@ -864,6 +902,11 @@
 			       qlcnic_gstrings_stats[index].stat_string,
 			       ETH_GSTRING_LEN);
 		}
+		for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) {
+			memcpy(data + index * ETH_GSTRING_LEN,
+			       qlcnic_mac_stats_strings[j],
+			       ETH_GSTRING_LEN);
+		}
 		if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
 			return;
 		for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
@@ -874,22 +917,64 @@
 	}
 }
 
-#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \
-	(((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1)
-
 static void
-qlcnic_fill_device_stats(int *index, u64 *data,
-		struct __qlcnic_esw_statistics *stats)
+qlcnic_fill_stats(int *index, u64 *data, void *stats, int type)
 {
 	int ind = *index;
 
-	data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames);
-	data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames);
-	data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames);
-	data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames);
-	data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors);
-	data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames);
-	data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes);
+	if (type == QLCNIC_MAC_STATS) {
+		struct qlcnic_mac_statistics *mac_stats =
+					(struct qlcnic_mac_statistics *)stats;
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
+		data[ind++] =
+			QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
+		data[ind++] =
+			QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
+		data[ind++] =
+			QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
+		data[ind++] =
+			QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
+		data[ind++] =
+			QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
+		data[ind++] =
+			QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
+		data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
+	} else if (type == QLCNIC_ESW_STATS) {
+		struct __qlcnic_esw_statistics *esw_stats =
+				(struct __qlcnic_esw_statistics *)stats;
+		data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
+		data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
+		data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
+		data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
+		data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors);
+		data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames);
+		data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes);
+	}
 
 	*index = ind;
 }
@@ -900,6 +985,7 @@
 {
 	struct qlcnic_adapter *adapter = netdev_priv(dev);
 	struct qlcnic_esw_statistics port_stats;
+	struct qlcnic_mac_statistics mac_stats;
 	int index, ret;
 
 	for (index = 0; index < QLCNIC_STATS_LEN; index++) {
@@ -911,6 +997,11 @@
 		     sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
 	}
 
+	/* Retrieve MAC statistics from firmware */
+	memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
+	qlcnic_get_mac_stats(adapter, &mac_stats);
+	qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS);
+
 	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
 		return;
 
@@ -920,14 +1011,14 @@
 	if (ret)
 		return;
 
-	qlcnic_fill_device_stats(&index, data, &port_stats.rx);
+	qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS);
 
 	ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
 			QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
 	if (ret)
 		return;
 
-	qlcnic_fill_device_stats(&index, data, &port_stats.tx);
+	qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS);
 }
 
 static int qlcnic_set_led(struct net_device *dev,
@@ -1132,11 +1223,21 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
 
+	if (!fw_dump->tmpl_hdr) {
+		netdev_err(adapter->netdev, "FW Dump not supported\n");
+		return -ENOTSUPP;
+	}
+
 	if (fw_dump->clr)
 		dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
 	else
 		dump->len = 0;
-	dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+
+	if (!fw_dump->enable)
+		dump->flag = ETH_FW_DUMP_DISABLE;
+	else
+		dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+
 	dump->version = adapter->fw_version;
 	return 0;
 }
@@ -1150,6 +1251,11 @@
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
 
+	if (!fw_dump->tmpl_hdr) {
+		netdev_err(netdev, "FW Dump not supported\n");
+		return -ENOTSUPP;
+	}
+
 	if (!fw_dump->clr) {
 		netdev_info(netdev, "Dump not available\n");
 		return -EINVAL;
@@ -1177,55 +1283,74 @@
 static int
 qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
 {
-	int ret = 0;
+	int i;
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+	u32 state;
 
 	switch (val->flag) {
 	case QLCNIC_FORCE_FW_DUMP_KEY:
+		if (!fw_dump->tmpl_hdr) {
+			netdev_err(netdev, "FW dump not supported\n");
+			return -ENOTSUPP;
+		}
 		if (!fw_dump->enable) {
 			netdev_info(netdev, "FW dump not enabled\n");
-			return ret;
+			return 0;
 		}
 		if (fw_dump->clr) {
 			netdev_info(netdev,
 			"Previous dump not cleared, not forcing dump\n");
-			return ret;
+			return 0;
 		}
 		netdev_info(netdev, "Forcing a FW dump\n");
 		qlcnic_dev_request_reset(adapter);
 		break;
 	case QLCNIC_DISABLE_FW_DUMP:
-		if (fw_dump->enable) {
+		if (fw_dump->enable && fw_dump->tmpl_hdr) {
 			netdev_info(netdev, "Disabling FW dump\n");
 			fw_dump->enable = 0;
 		}
-		break;
+		return 0;
 	case QLCNIC_ENABLE_FW_DUMP:
-		if (!fw_dump->enable && fw_dump->tmpl_hdr) {
+		if (!fw_dump->tmpl_hdr) {
+			netdev_err(netdev, "FW dump not supported\n");
+			return -ENOTSUPP;
+		}
+		if (!fw_dump->enable) {
 			netdev_info(netdev, "Enabling FW dump\n");
 			fw_dump->enable = 1;
 		}
-		break;
+		return 0;
 	case QLCNIC_FORCE_FW_RESET:
 		netdev_info(netdev, "Forcing a FW reset\n");
 		qlcnic_dev_request_reset(adapter);
 		adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
-		break;
+		return 0;
+	case QLCNIC_SET_QUIESCENT:
+	case QLCNIC_RESET_QUIESCENT:
+		state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+		if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+			netdev_info(netdev, "Device in FAILED state\n");
+		return 0;
 	default:
-		if (val->flag > QLCNIC_DUMP_MASK_MAX ||
-			val->flag < QLCNIC_DUMP_MASK_MIN) {
-				netdev_info(netdev,
-				"Invalid dump level: 0x%x\n", val->flag);
-				ret = -EINVAL;
-				goto out;
+		if (!fw_dump->tmpl_hdr) {
+			netdev_err(netdev, "FW dump not supported\n");
+			return -ENOTSUPP;
 		}
-		fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff;
-		netdev_info(netdev, "Driver mask changed to: 0x%x\n",
-			fw_dump->tmpl_hdr->drv_cap_mask);
+		for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
+			if (val->flag == FW_DUMP_LEVELS[i]) {
+				fw_dump->tmpl_hdr->drv_cap_mask =
+							val->flag;
+				netdev_info(netdev, "Driver mask changed to: 0x%x\n",
+					fw_dump->tmpl_hdr->drv_cap_mask);
+				return 0;
+			}
+		}
+		netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
+		return -EINVAL;
 	}
-out:
-	return ret;
+	return 0;
 }
 
 const struct ethtool_ops qlcnic_ethtool_ops = {
@@ -1258,3 +1383,10 @@
 	.get_dump_data = qlcnic_get_dump_data,
 	.set_dump = qlcnic_set_dump,
 };
+
+const struct ethtool_ops qlcnic_ethtool_failed_ops = {
+	.get_settings = qlcnic_get_settings,
+	.get_drvinfo = qlcnic_get_drvinfo,
+	.set_msglevel = qlcnic_set_msglevel,
+	.get_msglevel = qlcnic_get_msglevel,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index a528193..6ced319 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -704,6 +704,8 @@
 #define QLCNIC_DEV_FAILED		0x6
 #define QLCNIC_DEV_QUISCENT		0x7
 
+#define QLCNIC_DEV_BADBAD		0xbad0bad0
+
 #define QLCNIC_DEV_NPAR_NON_OPER	0 /* NON Operational */
 #define QLCNIC_DEV_NPAR_OPER		1 /* NPAR Operational */
 #define QLCNIC_DEV_NPAR_OPER_TIMEO	30 /* Operational time out */
@@ -776,6 +778,10 @@
 #define FLASH_ROM_WINDOW	0x42110030
 #define FLASH_ROM_DATA		0x42150000
 
+
+static const u32 FW_DUMP_LEVELS[] = {
+	0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
+
 static const u32 MIU_TEST_READ_DATA[] = {
 	0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 75c32e8..46e77a2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -338,6 +338,10 @@
 #endif
 };
 
+static const struct net_device_ops qlcnic_netdev_failed_ops = {
+	.ndo_open	   = qlcnic_open,
+};
+
 static struct qlcnic_nic_template qlcnic_ops = {
 	.config_bridged_mode = qlcnic_config_bridged_mode,
 	.config_led = qlcnic_config_led,
@@ -1623,8 +1627,9 @@
 
 	err = adapter->nic_ops->start_firmware(adapter);
 	if (err) {
-		dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
-		goto err_out_decr_ref;
+		dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
+			"\t\tIf reboot doesn't help, try flashing the card\n");
+		goto err_out_maintenance_mode;
 	}
 
 	if (qlcnic_read_mac_addr(adapter))
@@ -1695,6 +1700,18 @@
 	pci_set_drvdata(pdev, NULL);
 	pci_disable_device(pdev);
 	return err;
+
+err_out_maintenance_mode:
+	netdev->netdev_ops = &qlcnic_netdev_failed_ops;
+	SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+	err = register_netdev(netdev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to register net device\n");
+		goto err_out_decr_ref;
+	}
+	pci_set_drvdata(pdev, adapter);
+	qlcnic_create_diag_entries(adapter);
+	return 0;
 }
 
 static void __devexit qlcnic_remove(struct pci_dev *pdev)
@@ -1831,8 +1848,14 @@
 static int qlcnic_open(struct net_device *netdev)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
+	u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 	int err;
 
+	if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
+		netdev_err(netdev, "Device in FAILED state\n");
+		return -EIO;
+	}
+
 	netif_carrier_off(netdev);
 
 	err = qlcnic_attach(adapter);
@@ -1942,7 +1965,7 @@
 	__le16 vlan_id = 0;
 	u8 hindex;
 
-	if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
+	if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
 		return;
 
 	if (adapter->fhash.fnum >= adapter->fhash.fmax)
@@ -2212,8 +2235,7 @@
 
 	if (adapter->flags & QLCNIC_MACSPOOF) {
 		phdr = (struct ethhdr *)skb->data;
-		if (compare_ether_addr(phdr->h_source,
-					adapter->mac_addr))
+		if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
 			goto drop_packet;
 	}
 
@@ -3018,6 +3040,12 @@
 		return;
 
 	state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+	if (state  == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
+		netdev_err(adapter->netdev,
+				"Device is in FAILED state, Please Reboot\n");
+		qlcnic_api_unlock(adapter);
+		return;
+	}
 
 	if (state == QLCNIC_DEV_READY) {
 		QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
@@ -3061,6 +3089,9 @@
 	while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
 		msleep(10);
 
+	if (!adapter->fw_work.work.func)
+		return;
+
 	cancel_delayed_work_sync(&adapter->fw_work);
 }
 
@@ -4280,6 +4311,7 @@
 qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
 {
 	struct device *dev = &adapter->pdev->dev;
+	u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 
 	if (device_create_bin_file(dev, &bin_attr_port_stats))
 		dev_info(dev, "failed to create port stats sysfs entry");
@@ -4288,14 +4320,19 @@
 		return;
 	if (device_create_file(dev, &dev_attr_diag_mode))
 		dev_info(dev, "failed to create diag_mode sysfs entry\n");
-	if (device_create_file(dev, &dev_attr_beacon))
-		dev_info(dev, "failed to create beacon sysfs entry");
 	if (device_create_bin_file(dev, &bin_attr_crb))
 		dev_info(dev, "failed to create crb sysfs entry\n");
 	if (device_create_bin_file(dev, &bin_attr_mem))
 		dev_info(dev, "failed to create mem sysfs entry\n");
+
+	if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+		return;
+
 	if (device_create_bin_file(dev, &bin_attr_pci_config))
 		dev_info(dev, "failed to create pci config sysfs entry");
+	if (device_create_file(dev, &dev_attr_beacon))
+		dev_info(dev, "failed to create beacon sysfs entry");
+
 	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
 		return;
 	if (device_create_bin_file(dev, &bin_attr_esw_config))
@@ -4314,16 +4351,19 @@
 qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
 {
 	struct device *dev = &adapter->pdev->dev;
+	u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 
 	device_remove_bin_file(dev, &bin_attr_port_stats);
 
 	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
 		return;
 	device_remove_file(dev, &dev_attr_diag_mode);
-	device_remove_file(dev, &dev_attr_beacon);
 	device_remove_bin_file(dev, &bin_attr_crb);
 	device_remove_bin_file(dev, &bin_attr_mem);
+	if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
+		return;
 	device_remove_bin_file(dev, &bin_attr_pci_config);
+	device_remove_file(dev, &dev_attr_beacon);
 	if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
 		return;
 	device_remove_bin_file(dev, &bin_attr_esw_config);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 49343ec..09d8d33 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3845,7 +3845,7 @@
 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
 			WAKE_MCAST | WAKE_BCAST)) {
 		netif_err(qdev, ifdown, qdev->ndev,
-			  "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+			  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
 			  qdev->wol);
 		return -EINVAL;
 	}
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index b96e192..4de7364 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
  * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
  * Copyright (C) 2007
  *	Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
- *	Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -74,9 +74,13 @@
 #define MT_ICR		0x0C	/* TX interrupt control */
 #define MR_ICR		0x10	/* RX interrupt control */
 #define MTPR		0x14	/* TX poll command register */
+#define  TM2TX		0x0001	/* Trigger MAC to transmit */
 #define MR_BSR		0x18	/* RX buffer size */
 #define MR_DCR		0x1A	/* RX descriptor control */
 #define MLSR		0x1C	/* Last status */
+#define  TX_FIFO_UNDR	0x0200	/* TX FIFO under-run */
+#define	 TX_EXCEEDC	0x2000	/* Transmit exceed collision */
+#define  TX_LATEC	0x4000	/* Transmit late collision */
 #define MMDIO		0x20	/* MDIO control register */
 #define  MDIO_WRITE	0x4000	/* MDIO write */
 #define  MDIO_READ	0x2000	/* MDIO read */
@@ -124,6 +128,9 @@
 #define MID_3M		0x82	/* MID3 Medium */
 #define MID_3H		0x84	/* MID3 High */
 #define PHY_CC		0x88	/* PHY status change configuration register */
+#define  SCEN		0x8000	/* PHY status change enable */
+#define  PHYAD_SHIFT	8	/* PHY address shift */
+#define  TMRDIV_SHIFT	0	/* Timer divider shift */
 #define PHY_ST		0x8A	/* PHY status register */
 #define MAC_SM		0xAC	/* MAC status machine */
 #define  MAC_SM_RST	0x0002	/* MAC status machine reset */
@@ -137,6 +144,8 @@
 #define MBCR_DEFAULT	0x012A	/* MAC Bus Control Register */
 #define MCAST_MAX	3	/* Max number multicast addresses to filter */
 
+#define MAC_DEF_TIMEOUT	2048	/* Default MAC read/write operation timeout */
+
 /* Descriptor status */
 #define DSC_OWNER_MAC	0x8000	/* MAC is the owner of this descriptor */
 #define DSC_RX_OK	0x4000	/* RX was successful */
@@ -187,7 +196,7 @@
 	dma_addr_t rx_ring_dma;
 	dma_addr_t tx_ring_dma;
 	u16	tx_free_desc;
-	u16	mcr0, mcr1;
+	u16	mcr0;
 	struct net_device *dev;
 	struct mii_bus *mii_bus;
 	struct napi_struct napi;
@@ -204,7 +213,7 @@
 /* Read a word data from PHY Chip */
 static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
 {
-	int limit = 2048;
+	int limit = MAC_DEF_TIMEOUT;
 	u16 cmd;
 
 	iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
@@ -222,7 +231,7 @@
 static void r6040_phy_write(void __iomem *ioaddr,
 					int phy_addr, int reg, u16 val)
 {
-	int limit = 2048;
+	int limit = MAC_DEF_TIMEOUT;
 	u16 cmd;
 
 	iowrite16(val, ioaddr + MMWD);
@@ -358,27 +367,35 @@
 	return rc;
 }
 
-static void r6040_init_mac_regs(struct net_device *dev)
+static void r6040_reset_mac(struct r6040_private *lp)
 {
-	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
-	int limit = 2048;
+	int limit = MAC_DEF_TIMEOUT;
 	u16 cmd;
 
-	/* Mask Off Interrupt */
-	iowrite16(MSK_INT, ioaddr + MIER);
-
-	/* Reset RDC MAC */
 	iowrite16(MAC_RST, ioaddr + MCR1);
 	while (limit--) {
 		cmd = ioread16(ioaddr + MCR1);
 		if (cmd & MAC_RST)
 			break;
 	}
+
 	/* Reset internal state machine */
 	iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
 	iowrite16(0, ioaddr + MAC_SM);
 	mdelay(5);
+}
+
+static void r6040_init_mac_regs(struct net_device *dev)
+{
+	struct r6040_private *lp = netdev_priv(dev);
+	void __iomem *ioaddr = lp->base;
+
+	/* Mask Off Interrupt */
+	iowrite16(MSK_INT, ioaddr + MIER);
+
+	/* Reset RDC MAC */
+	r6040_reset_mac(lp);
 
 	/* MAC Bus Control Register */
 	iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -407,7 +424,7 @@
 	/* Let TX poll the descriptors
 	 * we may got called by r6040_tx_timeout which has left
 	 * some unsent tx buffers */
-	iowrite16(0x01, ioaddr + MTPR);
+	iowrite16(TM2TX, ioaddr + MTPR);
 }
 
 static void r6040_tx_timeout(struct net_device *dev)
@@ -445,18 +462,13 @@
 {
 	struct r6040_private *lp = netdev_priv(dev);
 	void __iomem *ioaddr = lp->base;
-	int limit = 2048;
 	u16 *adrp;
-	u16 cmd;
 
 	/* Stop MAC */
 	iowrite16(MSK_INT, ioaddr + MIER);	/* Mask Off Interrupt */
-	iowrite16(MAC_RST, ioaddr + MCR1);	/* Reset RDC MAC */
-	while (limit--) {
-		cmd = ioread16(ioaddr + MCR1);
-		if (cmd & MAC_RST)
-			break;
-	}
+
+	/* Reset RDC MAC */
+	r6040_reset_mac(lp);
 
 	/* Restore MAC Address to MIDx */
 	adrp = (u16 *) dev->dev_addr;
@@ -599,9 +611,9 @@
 		/* Check for errors */
 		err = ioread16(ioaddr + MLSR);
 
-		if (err & 0x0200)
-			dev->stats.rx_fifo_errors++;
-		if (err & (0x2000 | 0x4000))
+		if (err & TX_FIFO_UNDR)
+			dev->stats.tx_fifo_errors++;
+		if (err & (TX_EXCEEDC | TX_LATEC))
 			dev->stats.tx_carrier_errors++;
 
 		if (descptr->status & DSC_OWNER_MAC)
@@ -736,11 +748,7 @@
 	u16 *adrp;
 
 	/* Reset MAC */
-	iowrite16(MAC_RST, ioaddr + MCR1);
-	/* Reset internal state machine */
-	iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
-	iowrite16(0, ioaddr + MAC_SM);
-	mdelay(5);
+	r6040_reset_mac(lp);
 
 	/* Restore MAC Address */
 	adrp = (u16 *) dev->dev_addr;
@@ -840,7 +848,7 @@
 	skb_tx_timestamp(skb);
 
 	/* Trigger the MAC to check the TX descriptor */
-	iowrite16(0x01, ioaddr + MTPR);
+	iowrite16(TM2TX, ioaddr + MTPR);
 	lp->tx_insert_ptr = descptr->vndescp;
 
 	/* If no tx resource, stop */
@@ -973,6 +981,7 @@
 	.get_settings		= netdev_get_settings,
 	.set_settings		= netdev_set_settings,
 	.get_link		= ethtool_op_get_link,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops r6040_netdev_ops = {
@@ -1126,10 +1135,15 @@
 		err = -EIO;
 		goto err_out_free_res;
 	}
+
 	/* If PHY status change register is still set to zero it means the
-	 * bootloader didn't initialize it */
+	 * bootloader didn't initialize it, so we set it to:
+	 * - enable phy status change
+	 * - enable all phy addresses
+	 * - set to lowest timer divider */
 	if (ioread16(ioaddr + PHY_CC) == 0)
-		iowrite16(0x9f07, ioaddr + PHY_CC);
+		iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
+				7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
 
 	/* Init system & device */
 	lp->base = ioaddr;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b3287c0..5eef290 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -635,9 +635,12 @@
  */
 static void cp_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	cp_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct cp_private *cp = netdev_priv(dev);
+	const int irq = cp->pdev->irq;
+
+	disable_irq(irq);
+	cp_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -1117,6 +1120,7 @@
 static int cp_open (struct net_device *dev)
 {
 	struct cp_private *cp = netdev_priv(dev);
+	const int irq = cp->pdev->irq;
 	int rc;
 
 	netif_dbg(cp, ifup, dev, "enabling interface\n");
@@ -1129,7 +1133,7 @@
 
 	cp_init_hw(cp);
 
-	rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
+	rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
 	if (rc)
 		goto err_out_hw;
 
@@ -1166,7 +1170,7 @@
 
 	spin_unlock_irqrestore(&cp->lock, flags);
 
-	free_irq(dev->irq, dev);
+	free_irq(cp->pdev->irq, dev);
 
 	cp_free_rings(cp);
 	return 0;
@@ -1914,7 +1918,6 @@
 		       (unsigned long long)pciaddr);
 		goto err_out_res;
 	}
-	dev->base_addr = (unsigned long) regs;
 	cp->regs = regs;
 
 	cp_stop_hw(cp);
@@ -1942,14 +1945,12 @@
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
 		NETIF_F_HIGHDMA;
 
-	dev->irq = pdev->irq;
-
 	rc = register_netdev(dev);
 	if (rc)
 		goto err_out_iomap;
 
-	netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
-		    dev->base_addr, dev->dev_addr, dev->irq);
+	netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
+		    regs, dev->dev_addr, pdev->irq);
 
 	pci_set_drvdata(pdev, dev);
 
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index df7fd8d..03df076 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -148,9 +148,9 @@
 
 /* Whether to use MMIO or PIO. Default to MMIO. */
 #ifdef CONFIG_8139TOO_PIO
-static int use_io = 1;
+static bool use_io = true;
 #else
-static int use_io = 0;
+static bool use_io = false;
 #endif
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -620,7 +620,7 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
-module_param(use_io, int, 0);
+module_param(use_io, bool, 0);
 MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
 module_param(multicast_filter_limit, int, 0);
 module_param_array(media, int, NULL, 0);
@@ -750,15 +750,22 @@
 
 static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
 {
+	struct device *d = &pdev->dev;
 	void __iomem *ioaddr;
 	struct net_device *dev;
 	struct rtl8139_private *tp;
 	u8 tmp8;
 	int rc, disable_dev_on_err = 0;
-	unsigned int i;
-	unsigned long pio_start, pio_end, pio_flags, pio_len;
-	unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+	unsigned int i, bar;
+	unsigned long io_len;
 	u32 version;
+	static const struct {
+		unsigned long mask;
+		char *type;
+	} res[] = {
+		{ IORESOURCE_IO,  "PIO" },
+		{ IORESOURCE_MEM, "MMIO" }
+	};
 
 	assert (pdev != NULL);
 
@@ -777,78 +784,45 @@
 	if (rc)
 		goto err_out;
 
-	pio_start = pci_resource_start (pdev, 0);
-	pio_end = pci_resource_end (pdev, 0);
-	pio_flags = pci_resource_flags (pdev, 0);
-	pio_len = pci_resource_len (pdev, 0);
-
-	mmio_start = pci_resource_start (pdev, 1);
-	mmio_end = pci_resource_end (pdev, 1);
-	mmio_flags = pci_resource_flags (pdev, 1);
-	mmio_len = pci_resource_len (pdev, 1);
-
-	/* set this immediately, we need to know before
-	 * we talk to the chip directly */
-	pr_debug("PIO region size == 0x%02lX\n", pio_len);
-	pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
-
-retry:
-	if (use_io) {
-		/* make sure PCI base addr 0 is PIO */
-		if (!(pio_flags & IORESOURCE_IO)) {
-			dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
-			rc = -ENODEV;
-			goto err_out;
-		}
-		/* check for weird/broken PCI region reporting */
-		if (pio_len < RTL_MIN_IO_SIZE) {
-			dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
-			rc = -ENODEV;
-			goto err_out;
-		}
-	} else {
-		/* make sure PCI base addr 1 is MMIO */
-		if (!(mmio_flags & IORESOURCE_MEM)) {
-			dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
-			rc = -ENODEV;
-			goto err_out;
-		}
-		if (mmio_len < RTL_MIN_IO_SIZE) {
-			dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
-			rc = -ENODEV;
-			goto err_out;
-		}
-	}
-
 	rc = pci_request_regions (pdev, DRV_NAME);
 	if (rc)
 		goto err_out;
 	disable_dev_on_err = 1;
 
-	/* enable PCI bus-mastering */
 	pci_set_master (pdev);
 
-	if (use_io) {
-		ioaddr = pci_iomap(pdev, 0, 0);
-		if (!ioaddr) {
-			dev_err(&pdev->dev, "cannot map PIO, aborting\n");
-			rc = -EIO;
-			goto err_out;
-		}
-		dev->base_addr = pio_start;
-		tp->regs_len = pio_len;
-	} else {
-		/* ioremap MMIO region */
-		ioaddr = pci_iomap(pdev, 1, 0);
-		if (ioaddr == NULL) {
-			dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n");
-			pci_release_regions(pdev);
-			use_io = 1;
+retry:
+	/* PIO bar register comes first. */
+	bar = !use_io;
+
+	io_len = pci_resource_len(pdev, bar);
+
+	dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len);
+
+	if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) {
+		dev_err(d, "region #%d not a %s resource, aborting\n", bar,
+			res[bar].type);
+		rc = -ENODEV;
+		goto err_out;
+	}
+	if (io_len < RTL_MIN_IO_SIZE) {
+		dev_err(d, "Invalid PCI %s region size(s), aborting\n",
+			res[bar].type);
+		rc = -ENODEV;
+		goto err_out;
+	}
+
+	ioaddr = pci_iomap(pdev, bar, 0);
+	if (!ioaddr) {
+		dev_err(d, "cannot map %s\n", res[bar].type);
+		if (!use_io) {
+			use_io = true;
 			goto retry;
 		}
-		dev->base_addr = (long) ioaddr;
-		tp->regs_len = mmio_len;
+		rc = -ENODEV;
+		goto err_out;
 	}
+	tp->regs_len = io_len;
 	tp->mmio_addr = ioaddr;
 
 	/* Bring old chips out of low-power mode. */
@@ -1035,8 +1009,6 @@
 	dev->hw_features |= NETIF_F_RXALL;
 	dev->hw_features |= NETIF_F_RXFCS;
 
-	dev->irq = pdev->irq;
-
 	/* tp zeroed and aligned in alloc_etherdev */
 	tp = netdev_priv(dev);
 
@@ -1062,9 +1034,9 @@
 
 	pci_set_drvdata (pdev, dev);
 
-	netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+	netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n",
 		    board_info[ent->driver_data].name,
-		    dev->base_addr, dev->dev_addr, dev->irq);
+		    ioaddr, dev->dev_addr, pdev->irq);
 
 	netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
 		   rtl_chip_info[tp->chipset].name);
@@ -1339,10 +1311,11 @@
 static int rtl8139_open (struct net_device *dev)
 {
 	struct rtl8139_private *tp = netdev_priv(dev);
-	int retval;
 	void __iomem *ioaddr = tp->mmio_addr;
+	const int irq = tp->pci_dev->irq;
+	int retval;
 
-	retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
+	retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
 	if (retval)
 		return retval;
 
@@ -1351,7 +1324,7 @@
 	tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
 					   &tp->rx_ring_dma, GFP_KERNEL);
 	if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
-		free_irq(dev->irq, dev);
+		free_irq(irq, dev);
 
 		if (tp->tx_bufs)
 			dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
@@ -1377,7 +1350,7 @@
 		  "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
 		  __func__,
 		  (unsigned long long)pci_resource_start (tp->pci_dev, 1),
-		  dev->irq, RTL_R8 (MediaStatus),
+		  irq, RTL_R8 (MediaStatus),
 		  tp->mii.full_duplex ? "full" : "half");
 
 	rtl8139_start_thread(tp);
@@ -2240,9 +2213,12 @@
  */
 static void rtl8139_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	rtl8139_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct rtl8139_private *tp = netdev_priv(dev);
+	const int irq = tp->pci_dev->irq;
+
+	disable_irq(irq);
+	rtl8139_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -2295,7 +2271,7 @@
 
 	spin_unlock_irqrestore (&tp->lock, flags);
 
-	free_irq (dev->irq, dev);
+	free_irq(tp->pci_dev->irq, dev);
 
 	rtl8139_tx_clear (tp);
 
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f545093..4f74b97 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -44,6 +44,8 @@
 #define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
 #define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
 #define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
+#define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
+#define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
 
 #ifdef RTL8169_DEBUG
 #define assert(expr) \
@@ -61,8 +63,12 @@
 #define R8169_MSG_DEFAULT \
 	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
 
-#define TX_BUFFS_AVAIL(tp) \
-	(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+#define TX_SLOTS_AVAIL(tp) \
+	(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
+
+/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+#define TX_FRAGS_READY_FOR(tp,nr_frags) \
+	(TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@ -133,6 +139,8 @@
 	RTL_GIGA_MAC_VER_34,
 	RTL_GIGA_MAC_VER_35,
 	RTL_GIGA_MAC_VER_36,
+	RTL_GIGA_MAC_VER_37,
+	RTL_GIGA_MAC_VER_38,
 	RTL_GIGA_MAC_NONE   = 0xff,
 };
 
@@ -245,6 +253,12 @@
 	[RTL_GIGA_MAC_VER_36] =
 		_R("RTL8168f/8111f",	RTL_TD_1, FIRMWARE_8168F_2,
 							JUMBO_9K, false),
+	[RTL_GIGA_MAC_VER_37] =
+		_R("RTL8402",		RTL_TD_1, FIRMWARE_8402_1,
+							JUMBO_1K, true),
+	[RTL_GIGA_MAC_VER_38] =
+		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_1,
+							JUMBO_9K, false),
 };
 #undef _R
 
@@ -315,6 +329,8 @@
 	Config0		= 0x51,
 	Config1		= 0x52,
 	Config2		= 0x53,
+#define PME_SIGNAL			(1 << 5)	/* 8168c and later */
+
 	Config3		= 0x54,
 	Config4		= 0x55,
 	Config5		= 0x56,
@@ -355,6 +371,9 @@
 #define	CSIAR_BYTE_ENABLE		0x0f
 #define	CSIAR_BYTE_ENABLE_SHIFT		12
 #define	CSIAR_ADDR_MASK			0x0fff
+#define CSIAR_FUNC_CARD			0x00000000
+#define CSIAR_FUNC_SDIO			0x00010000
+#define CSIAR_FUNC_NIC			0x00020000
 	PMCH			= 0x6f,
 	EPHYAR			= 0x80,
 #define	EPHYAR_FLAG			0x80000000
@@ -716,6 +735,11 @@
 		void (*disable)(struct rtl8169_private *);
 	} jumbo_ops;
 
+	struct csi_ops {
+		void (*write)(void __iomem *, int, int);
+		u32 (*read)(void __iomem *, int);
+	} csi_ops;
+
 	int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
 	int (*get_settings)(struct net_device *, struct ethtool_cmd *);
 	void (*phy_reset_enable)(struct rtl8169_private *tp);
@@ -768,6 +792,8 @@
 MODULE_FIRMWARE(FIRMWARE_8105E_1);
 MODULE_FIRMWARE(FIRMWARE_8168F_1);
 MODULE_FIRMWARE(FIRMWARE_8168F_2);
+MODULE_FIRMWARE(FIRMWARE_8402_1);
+MODULE_FIRMWARE(FIRMWARE_8411_1);
 
 static void rtl_lock_work(struct rtl8169_private *tp)
 {
@@ -1078,40 +1104,6 @@
 	return value;
 }
 
-static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
-{
-	unsigned int i;
-
-	RTL_W32(CSIDR, value);
-	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
-		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-
-	for (i = 0; i < 100; i++) {
-		if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
-			break;
-		udelay(10);
-	}
-}
-
-static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
-{
-	u32 value = ~0x00;
-	unsigned int i;
-
-	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
-		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
-
-	for (i = 0; i < 100; i++) {
-		if (RTL_R32(CSIAR) & CSIAR_FLAG) {
-			value = RTL_R32(CSIDR);
-			break;
-		}
-		udelay(10);
-	}
-
-	return value;
-}
-
 static
 void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
 {
@@ -1281,7 +1273,8 @@
 	if (!netif_running(dev))
 		return;
 
-	if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
 		if (RTL_R8(PHYstatus) & _1000bpsF) {
 			rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
 				      0x00000011, ERIAR_EXGMAC);
@@ -1316,6 +1309,16 @@
 			rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
 				      0x0000003f, ERIAR_EXGMAC);
 		}
+	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
+		if (RTL_R8(PHYstatus) & _10bps) {
+			rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
+				      0x4d02, ERIAR_EXGMAC);
+			rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
+				      0x0060, ERIAR_EXGMAC);
+		} else {
+			rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
+				      0x0000, ERIAR_EXGMAC);
+		}
 	}
 }
 
@@ -1396,7 +1399,6 @@
 		u16 reg;
 		u8  mask;
 	} cfg[] = {
-		{ WAKE_ANY,   Config1, PMEnable },
 		{ WAKE_PHY,   Config3, LinkUp },
 		{ WAKE_MAGIC, Config3, MagicPacket },
 		{ WAKE_UCAST, Config5, UWF },
@@ -1404,16 +1406,32 @@
 		{ WAKE_MCAST, Config5, MWF },
 		{ WAKE_ANY,   Config5, LanWake }
 	};
+	u8 options;
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
 
 	for (i = 0; i < ARRAY_SIZE(cfg); i++) {
-		u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+		options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
 		if (wolopts & cfg[i].opt)
 			options |= cfg[i].mask;
 		RTL_W8(cfg[i].reg, options);
 	}
 
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
+		options = RTL_R8(Config1) & ~PMEnable;
+		if (wolopts)
+			options |= PMEnable;
+		RTL_W8(Config1, options);
+		break;
+	default:
+		options = RTL_R8(Config2) & ~PME_SIGNAL;
+		if (wolopts)
+			options |= PME_SIGNAL;
+		RTL_W8(Config2, options);
+		break;
+	}
+
 	RTL_W8(Cfg9346, Cfg9346_Lock);
 }
 
@@ -1853,6 +1871,7 @@
 	.get_strings		= rtl8169_get_strings,
 	.get_sset_count		= rtl8169_get_sset_count,
 	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1876,6 +1895,7 @@
 		int mac_version;
 	} mac_info[] = {
 		/* 8168F family. */
+		{ 0x7c800000, 0x48800000,	RTL_GIGA_MAC_VER_38 },
 		{ 0x7cf00000, 0x48100000,	RTL_GIGA_MAC_VER_36 },
 		{ 0x7cf00000, 0x48000000,	RTL_GIGA_MAC_VER_35 },
 
@@ -1913,6 +1933,7 @@
 		{ 0x7c800000, 0x30000000,	RTL_GIGA_MAC_VER_11 },
 
 		/* 8101 family. */
+		{ 0x7c800000, 0x44000000,	RTL_GIGA_MAC_VER_37 },
 		{ 0x7cf00000, 0x40b00000,	RTL_GIGA_MAC_VER_30 },
 		{ 0x7cf00000, 0x40a00000,	RTL_GIGA_MAC_VER_30 },
 		{ 0x7cf00000, 0x40900000,	RTL_GIGA_MAC_VER_29 },
@@ -3013,6 +3034,28 @@
 	rtl_writephy(tp, 0x1f, 0x0000);
 }
 
+static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
+{
+	/* For 4-corner performance improve */
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b80);
+	rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* PHY auto speed down */
+	rtl_writephy(tp, 0x1f, 0x0007);
+	rtl_writephy(tp, 0x1e, 0x002d);
+	rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+	/* Improve 10M EEE waveform */
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b86);
+	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+}
+
 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
 {
 	static const struct phy_reg phy_reg_init[] = {
@@ -3054,24 +3097,7 @@
 
 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 
-	/* For 4-corner performance improve */
-	rtl_writephy(tp, 0x1f, 0x0005);
-	rtl_writephy(tp, 0x05, 0x8b80);
-	rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
-	rtl_writephy(tp, 0x1f, 0x0000);
-
-	/* PHY auto speed down */
-	rtl_writephy(tp, 0x1f, 0x0007);
-	rtl_writephy(tp, 0x1e, 0x002d);
-	rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
-	rtl_writephy(tp, 0x1f, 0x0000);
-	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
-
-	/* Improve 10M EEE waveform */
-	rtl_writephy(tp, 0x1f, 0x0005);
-	rtl_writephy(tp, 0x05, 0x8b86);
-	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
-	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl8168f_hw_phy_config(tp);
 
 	/* Improve 2-pair detection performance */
 	rtl_writephy(tp, 0x1f, 0x0005);
@@ -3084,23 +3110,104 @@
 {
 	rtl_apply_firmware(tp);
 
-	/* For 4-corner performance improve */
+	rtl8168f_hw_phy_config(tp);
+}
+
+static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	static const struct phy_reg phy_reg_init[] = {
+		/* Channel estimation fine tune */
+		{ 0x1f, 0x0003 },
+		{ 0x09, 0xa20f },
+		{ 0x1f, 0x0000 },
+
+		/* Modify green table for giga & fnet */
+		{ 0x1f, 0x0005 },
+		{ 0x05, 0x8b55 },
+		{ 0x06, 0x0000 },
+		{ 0x05, 0x8b5e },
+		{ 0x06, 0x0000 },
+		{ 0x05, 0x8b67 },
+		{ 0x06, 0x0000 },
+		{ 0x05, 0x8b70 },
+		{ 0x06, 0x0000 },
+		{ 0x1f, 0x0000 },
+		{ 0x1f, 0x0007 },
+		{ 0x1e, 0x0078 },
+		{ 0x17, 0x0000 },
+		{ 0x19, 0x00aa },
+		{ 0x1f, 0x0000 },
+
+		/* Modify green table for 10M */
+		{ 0x1f, 0x0005 },
+		{ 0x05, 0x8b79 },
+		{ 0x06, 0xaa00 },
+		{ 0x1f, 0x0000 },
+
+		/* Disable hiimpedance detection (RTCT) */
+		{ 0x1f, 0x0003 },
+		{ 0x01, 0x328a },
+		{ 0x1f, 0x0000 }
+	};
+
+
+	rtl_apply_firmware(tp);
+
+	rtl8168f_hw_phy_config(tp);
+
+	/* Improve 2-pair detection performance */
 	rtl_writephy(tp, 0x1f, 0x0005);
-	rtl_writephy(tp, 0x05, 0x8b80);
-	rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
+	rtl_writephy(tp, 0x05, 0x8b85);
+	rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
 	rtl_writephy(tp, 0x1f, 0x0000);
 
-	/* PHY auto speed down */
+	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+	/* Modify green table for giga */
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b54);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
+	rtl_writephy(tp, 0x05, 0x8b5d);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
+	rtl_writephy(tp, 0x05, 0x8a7c);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+	rtl_writephy(tp, 0x05, 0x8a7f);
+	rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
+	rtl_writephy(tp, 0x05, 0x8a82);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+	rtl_writephy(tp, 0x05, 0x8a85);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+	rtl_writephy(tp, 0x05, 0x8a88);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* uc same-seed solution */
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b85);
+	rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
+	rtl_writephy(tp, 0x1f, 0x0000);
+
+	/* eee setting */
+	rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
+	rtl_writephy(tp, 0x1f, 0x0005);
+	rtl_writephy(tp, 0x05, 0x8b85);
+	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
+	rtl_writephy(tp, 0x1f, 0x0004);
 	rtl_writephy(tp, 0x1f, 0x0007);
-	rtl_writephy(tp, 0x1e, 0x002d);
-	rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+	rtl_writephy(tp, 0x1e, 0x0020);
+	rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
 	rtl_writephy(tp, 0x1f, 0x0000);
-	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+	rtl_writephy(tp, 0x0d, 0x0007);
+	rtl_writephy(tp, 0x0e, 0x003c);
+	rtl_writephy(tp, 0x0d, 0x4007);
+	rtl_writephy(tp, 0x0e, 0x0000);
+	rtl_writephy(tp, 0x0d, 0x0000);
 
-	/* Improve 10M EEE waveform */
-	rtl_writephy(tp, 0x1f, 0x0005);
-	rtl_writephy(tp, 0x05, 0x8b86);
-	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+	/* Green feature */
+	rtl_writephy(tp, 0x1f, 0x0003);
+	rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
+	rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
 	rtl_writephy(tp, 0x1f, 0x0000);
 }
 
@@ -3147,6 +3254,25 @@
 	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 }
 
+static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	/* Disable ALDPS before setting firmware */
+	rtl_writephy(tp, 0x1f, 0x0000);
+	rtl_writephy(tp, 0x18, 0x0310);
+	msleep(20);
+
+	rtl_apply_firmware(tp);
+
+	/* EEE setting */
+	rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+	rtl_writephy(tp, 0x1f, 0x0004);
+	rtl_writephy(tp, 0x10, 0x401f);
+	rtl_writephy(tp, 0x19, 0x7030);
+	rtl_writephy(tp, 0x1f, 0x0000);
+}
+
 static void rtl_hw_phy_config(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -3235,6 +3361,14 @@
 		rtl8168f_2_hw_phy_config(tp);
 		break;
 
+	case RTL_GIGA_MAC_VER_37:
+		rtl8402_hw_phy_config(tp);
+		break;
+
+	case RTL_GIGA_MAC_VER_38:
+		rtl8411_hw_phy_config(tp);
+		break;
+
 	default:
 		break;
 	}
@@ -3472,6 +3606,8 @@
 	case RTL_GIGA_MAC_VER_32:
 	case RTL_GIGA_MAC_VER_33:
 	case RTL_GIGA_MAC_VER_34:
+	case RTL_GIGA_MAC_VER_37:
+	case RTL_GIGA_MAC_VER_38:
 		RTL_W32(RxConfig, RTL_R32(RxConfig) |
 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
 		break;
@@ -3507,15 +3643,45 @@
 
 static void r810x_pll_power_down(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+
 	if (rtl_wol_pll_power_down(tp))
 		return;
 
 	r810x_phy_power_down(tp);
+
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_07:
+	case RTL_GIGA_MAC_VER_08:
+	case RTL_GIGA_MAC_VER_09:
+	case RTL_GIGA_MAC_VER_10:
+	case RTL_GIGA_MAC_VER_13:
+	case RTL_GIGA_MAC_VER_16:
+		break;
+	default:
+		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
+		break;
+	}
 }
 
 static void r810x_pll_power_up(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+
 	r810x_phy_power_up(tp);
+
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_07:
+	case RTL_GIGA_MAC_VER_08:
+	case RTL_GIGA_MAC_VER_09:
+	case RTL_GIGA_MAC_VER_10:
+	case RTL_GIGA_MAC_VER_13:
+	case RTL_GIGA_MAC_VER_16:
+		break;
+	default:
+		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
+		break;
+	}
 }
 
 static void r8168_phy_power_up(struct rtl8169_private *tp)
@@ -3619,13 +3785,6 @@
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 
-	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
-	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
-	     tp->mac_version == RTL_GIGA_MAC_VER_31) &&
-	    r8168dp_check_dash(tp)) {
-		return;
-	}
-
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_25:
 	case RTL_GIGA_MAC_VER_26:
@@ -3670,6 +3829,7 @@
 	case RTL_GIGA_MAC_VER_16:
 	case RTL_GIGA_MAC_VER_29:
 	case RTL_GIGA_MAC_VER_30:
+	case RTL_GIGA_MAC_VER_37:
 		ops->down	= r810x_pll_power_down;
 		ops->up		= r810x_pll_power_up;
 		break;
@@ -3694,6 +3854,7 @@
 	case RTL_GIGA_MAC_VER_34:
 	case RTL_GIGA_MAC_VER_35:
 	case RTL_GIGA_MAC_VER_36:
+	case RTL_GIGA_MAC_VER_38:
 		ops->down	= r8168_pll_power_down;
 		ops->up		= r8168_pll_power_up;
 		break;
@@ -3979,7 +4140,9 @@
 			udelay(20);
 	} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
 	           tp->mac_version == RTL_GIGA_MAC_VER_35 ||
-	           tp->mac_version == RTL_GIGA_MAC_VER_36) {
+	           tp->mac_version == RTL_GIGA_MAC_VER_36 ||
+	           tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+	           tp->mac_version == RTL_GIGA_MAC_VER_38) {
 		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
 		while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
 			udelay(100);
@@ -4185,22 +4348,141 @@
 	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
 }
 
-static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
+static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
+{
+	if (tp->csi_ops.write)
+		tp->csi_ops.write(tp->mmio_addr, addr, value);
+}
+
+static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
+{
+	if (tp->csi_ops.read)
+		return tp->csi_ops.read(tp->mmio_addr, addr);
+	else
+		return ~0;
+}
+
+static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
 {
 	u32 csi;
 
-	csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
-	rtl_csi_write(ioaddr, 0x070c, csi | bits);
+	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
+	rtl_csi_write(tp, 0x070c, csi | bits);
 }
 
-static void rtl_csi_access_enable_1(void __iomem *ioaddr)
+static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable(ioaddr, 0x17000000);
+	rtl_csi_access_enable(tp, 0x17000000);
 }
 
-static void rtl_csi_access_enable_2(void __iomem *ioaddr)
+static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable(ioaddr, 0x27000000);
+	rtl_csi_access_enable(tp, 0x27000000);
+}
+
+static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
+{
+	unsigned int i;
+
+	RTL_W32(CSIDR, value);
+	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
+			break;
+		udelay(10);
+	}
+}
+
+static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
+{
+	u32 value = ~0x00;
+	unsigned int i;
+
+	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (RTL_R32(CSIAR) & CSIAR_FLAG) {
+			value = RTL_R32(CSIDR);
+			break;
+		}
+		udelay(10);
+	}
+
+	return value;
+}
+
+static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
+{
+	unsigned int i;
+
+	RTL_W32(CSIDR, value);
+	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
+		CSIAR_FUNC_NIC);
+
+	for (i = 0; i < 100; i++) {
+		if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
+			break;
+		udelay(10);
+	}
+}
+
+static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
+{
+	u32 value = ~0x00;
+	unsigned int i;
+
+	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
+		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+	for (i = 0; i < 100; i++) {
+		if (RTL_R32(CSIAR) & CSIAR_FLAG) {
+			value = RTL_R32(CSIDR);
+			break;
+		}
+		udelay(10);
+	}
+
+	return value;
+}
+
+static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
+{
+	struct csi_ops *ops = &tp->csi_ops;
+
+	switch (tp->mac_version) {
+	case RTL_GIGA_MAC_VER_01:
+	case RTL_GIGA_MAC_VER_02:
+	case RTL_GIGA_MAC_VER_03:
+	case RTL_GIGA_MAC_VER_04:
+	case RTL_GIGA_MAC_VER_05:
+	case RTL_GIGA_MAC_VER_06:
+	case RTL_GIGA_MAC_VER_10:
+	case RTL_GIGA_MAC_VER_11:
+	case RTL_GIGA_MAC_VER_12:
+	case RTL_GIGA_MAC_VER_13:
+	case RTL_GIGA_MAC_VER_14:
+	case RTL_GIGA_MAC_VER_15:
+	case RTL_GIGA_MAC_VER_16:
+	case RTL_GIGA_MAC_VER_17:
+		ops->write	= NULL;
+		ops->read	= NULL;
+		break;
+
+	case RTL_GIGA_MAC_VER_37:
+	case RTL_GIGA_MAC_VER_38:
+		ops->write	= r8402_csi_write;
+		ops->read	= r8402_csi_read;
+		break;
+
+	default:
+		ops->write	= r8169_csi_write;
+		ops->read	= r8169_csi_read;
+		break;
+	}
 }
 
 struct ephy_info {
@@ -4257,8 +4539,11 @@
 	PktCntrDisable | \
 	Mac_dbgo_sel)
 
-static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
@@ -4267,17 +4552,22 @@
 		(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
 }
 
-static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
 {
-	rtl_hw_start_8168bb(ioaddr, pdev);
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	rtl_hw_start_8168bb(tp);
 
 	RTL_W8(MaxTxPacketSize, TxPacketMax);
 
 	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
 }
 
-static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
+static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
 	RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
 
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
@@ -4289,8 +4579,9 @@
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
 
-static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	static const struct ephy_info e_info_8168cp[] = {
 		{ 0x01, 0,	0x0001 },
 		{ 0x02, 0x0800,	0x1000 },
@@ -4299,16 +4590,19 @@
 		{ 0x07, 0,	0x2000 }
 	};
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
 
-	__rtl_hw_start_8168cp(ioaddr, pdev);
+	__rtl_hw_start_8168cp(tp);
 }
 
-static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_2(tp);
 
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
@@ -4317,9 +4611,12 @@
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
 
-static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_2(tp);
 
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 
@@ -4333,52 +4630,57 @@
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
 
-static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	static const struct ephy_info e_info_8168c_1[] = {
 		{ 0x02, 0x0800,	0x1000 },
 		{ 0x03, 0,	0x0002 },
 		{ 0x06, 0x0080,	0x0000 }
 	};
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
 
 	rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
 
-	__rtl_hw_start_8168cp(ioaddr, pdev);
+	__rtl_hw_start_8168cp(tp);
 }
 
-static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	static const struct ephy_info e_info_8168c_2[] = {
 		{ 0x01, 0,	0x0001 },
 		{ 0x03, 0x0400,	0x0220 }
 	};
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
 
-	__rtl_hw_start_8168cp(ioaddr, pdev);
+	__rtl_hw_start_8168cp(tp);
 }
 
-static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
 {
-	rtl_hw_start_8168c_2(ioaddr, pdev);
+	rtl_hw_start_8168c_2(tp);
 }
 
-static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
-	__rtl_hw_start_8168cp(ioaddr, pdev);
+	__rtl_hw_start_8168cp(tp);
 }
 
-static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168d(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_2(tp);
 
 	rtl_disable_clock_request(pdev);
 
@@ -4389,9 +4691,12 @@
 	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
 }
 
-static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_1(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_1(tp);
 
 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
@@ -4400,8 +4705,10 @@
 	rtl_disable_clock_request(pdev);
 }
 
-static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 	static const struct ephy_info e_info_8168d_4[] = {
 		{ 0x0b, ~0,	0x48 },
 		{ 0x19, 0x20,	0x50 },
@@ -4409,7 +4716,7 @@
 	};
 	int i;
 
-	rtl_csi_access_enable_1(ioaddr);
+	rtl_csi_access_enable_1(tp);
 
 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
@@ -4426,8 +4733,10 @@
 	rtl_enable_clock_request(pdev);
 }
 
-static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 	static const struct ephy_info e_info_8168e_1[] = {
 		{ 0x00, 0x0200,	0x0100 },
 		{ 0x00, 0x0000,	0x0004 },
@@ -4444,7 +4753,7 @@
 		{ 0x0a, 0x0000,	0x0040 }
 	};
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
 
@@ -4461,14 +4770,16 @@
 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
-static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 	static const struct ephy_info e_info_8168e_2[] = {
 		{ 0x09, 0x0000,	0x0080 },
 		{ 0x19, 0x0000,	0x0224 }
 	};
 
-	rtl_csi_access_enable_1(ioaddr);
+	rtl_csi_access_enable_1(tp);
 
 	rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
 
@@ -4499,18 +4810,12 @@
 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
-static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 {
-	static const struct ephy_info e_info_8168f_1[] = {
-		{ 0x06, 0x00c0,	0x0020 },
-		{ 0x08, 0x0001,	0x0002 },
-		{ 0x09, 0x0000,	0x0080 },
-		{ 0x19, 0x0000,	0x0224 }
-	};
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 
-	rtl_csi_access_enable_1(ioaddr);
-
-	rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+	rtl_csi_access_enable_2(tp);
 
 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
@@ -4524,8 +4829,6 @@
 	rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
 	rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
 	rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
-	rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
-		     ERIAR_EXGMAC);
 
 	RTL_W8(MaxTxPacketSize, EarlySize);
 
@@ -4533,20 +4836,54 @@
 
 	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
 	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
-
-	/* Adjust EEE LED frequency */
-	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
-
 	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
 	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
+static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	static const struct ephy_info e_info_8168f_1[] = {
+		{ 0x06, 0x00c0,	0x0020 },
+		{ 0x08, 0x0001,	0x0002 },
+		{ 0x09, 0x0000,	0x0080 },
+		{ 0x19, 0x0000,	0x0224 }
+	};
+
+	rtl_hw_start_8168f(tp);
+
+	rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+
+	rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+		     ERIAR_EXGMAC);
+
+	/* Adjust EEE LED frequency */
+	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+}
+
+static void rtl_hw_start_8411(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	static const struct ephy_info e_info_8168f_1[] = {
+		{ 0x06, 0x00c0,	0x0020 },
+		{ 0x0f, 0xffff,	0x5200 },
+		{ 0x1e, 0x0000,	0x4000 },
+		{ 0x19, 0x0000,	0x0224 }
+	};
+
+	rtl_hw_start_8168f(tp);
+
+	rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+
+	rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
+		     ERIAR_EXGMAC);
+}
+
 static void rtl_hw_start_8168(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
-	struct pci_dev *pdev = tp->pci_dev;
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
 
@@ -4577,67 +4914,71 @@
 
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_11:
-		rtl_hw_start_8168bb(ioaddr, pdev);
+		rtl_hw_start_8168bb(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_12:
 	case RTL_GIGA_MAC_VER_17:
-		rtl_hw_start_8168bef(ioaddr, pdev);
+		rtl_hw_start_8168bef(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_18:
-		rtl_hw_start_8168cp_1(ioaddr, pdev);
+		rtl_hw_start_8168cp_1(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_19:
-		rtl_hw_start_8168c_1(ioaddr, pdev);
+		rtl_hw_start_8168c_1(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_20:
-		rtl_hw_start_8168c_2(ioaddr, pdev);
+		rtl_hw_start_8168c_2(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_21:
-		rtl_hw_start_8168c_3(ioaddr, pdev);
+		rtl_hw_start_8168c_3(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_22:
-		rtl_hw_start_8168c_4(ioaddr, pdev);
+		rtl_hw_start_8168c_4(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_23:
-		rtl_hw_start_8168cp_2(ioaddr, pdev);
+		rtl_hw_start_8168cp_2(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_24:
-		rtl_hw_start_8168cp_3(ioaddr, pdev);
+		rtl_hw_start_8168cp_3(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_25:
 	case RTL_GIGA_MAC_VER_26:
 	case RTL_GIGA_MAC_VER_27:
-		rtl_hw_start_8168d(ioaddr, pdev);
+		rtl_hw_start_8168d(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_28:
-		rtl_hw_start_8168d_4(ioaddr, pdev);
+		rtl_hw_start_8168d_4(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_31:
-		rtl_hw_start_8168dp(ioaddr, pdev);
+		rtl_hw_start_8168dp(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_32:
 	case RTL_GIGA_MAC_VER_33:
-		rtl_hw_start_8168e_1(ioaddr, pdev);
+		rtl_hw_start_8168e_1(tp);
 		break;
 	case RTL_GIGA_MAC_VER_34:
-		rtl_hw_start_8168e_2(ioaddr, pdev);
+		rtl_hw_start_8168e_2(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_35:
 	case RTL_GIGA_MAC_VER_36:
-		rtl_hw_start_8168f_1(ioaddr, pdev);
+		rtl_hw_start_8168f_1(tp);
+		break;
+
+	case RTL_GIGA_MAC_VER_38:
+		rtl_hw_start_8411(tp);
 		break;
 
 	default:
@@ -4664,8 +5005,10 @@
 	PktCntrDisable | \
 	Mac_dbgo_sel)
 
-static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
 	static const struct ephy_info e_info_8102e_1[] = {
 		{ 0x01,	0, 0x6e65 },
 		{ 0x02,	0, 0x091f },
@@ -4678,7 +5021,7 @@
 	};
 	u8 cfg1;
 
-	rtl_csi_access_enable_2(ioaddr);
+	rtl_csi_access_enable_2(tp);
 
 	RTL_W8(DBG_REG, FIX_NAK_1);
 
@@ -4695,9 +5038,12 @@
 	rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
 }
 
-static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
 {
-	rtl_csi_access_enable_2(ioaddr);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct pci_dev *pdev = tp->pci_dev;
+
+	rtl_csi_access_enable_2(tp);
 
 	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
 
@@ -4705,15 +5051,16 @@
 	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
 }
 
-static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
 {
-	rtl_hw_start_8102e_2(ioaddr, pdev);
+	rtl_hw_start_8102e_2(tp);
 
-	rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
+	rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
 }
 
-static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
 {
+	void __iomem *ioaddr = tp->mmio_addr;
 	static const struct ephy_info e_info_8105e_1[] = {
 		{ 0x07,	0, 0x4000 },
 		{ 0x19,	0, 0x0200 },
@@ -4737,12 +5084,44 @@
 	rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
 }
 
-static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
 {
-	rtl_hw_start_8105e_1(ioaddr, pdev);
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	rtl_hw_start_8105e_1(tp);
 	rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
 }
 
+static void rtl_hw_start_8402(struct rtl8169_private *tp)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	static const struct ephy_info e_info_8402[] = {
+		{ 0x19,	0xffff, 0xff64 },
+		{ 0x1e,	0, 0x4000 }
+	};
+
+	rtl_csi_access_enable_2(tp);
+
+	/* Force LAN exit from ASPM if Rx/Tx are not idle */
+	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+	rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
+
+	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+	rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
+	rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
+	rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+	rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+	rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+	rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+	rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
+		     ERIAR_EXGMAC);
+}
+
 static void rtl_hw_start_8101(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
@@ -4766,22 +5145,26 @@
 
 	switch (tp->mac_version) {
 	case RTL_GIGA_MAC_VER_07:
-		rtl_hw_start_8102e_1(ioaddr, pdev);
+		rtl_hw_start_8102e_1(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_08:
-		rtl_hw_start_8102e_3(ioaddr, pdev);
+		rtl_hw_start_8102e_3(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_09:
-		rtl_hw_start_8102e_2(ioaddr, pdev);
+		rtl_hw_start_8102e_2(tp);
 		break;
 
 	case RTL_GIGA_MAC_VER_29:
-		rtl_hw_start_8105e_1(ioaddr, pdev);
+		rtl_hw_start_8105e_1(tp);
 		break;
 	case RTL_GIGA_MAC_VER_30:
-		rtl_hw_start_8105e_2(ioaddr, pdev);
+		rtl_hw_start_8105e_2(tp);
+		break;
+
+	case RTL_GIGA_MAC_VER_37:
+		rtl_hw_start_8402(tp);
 		break;
 	}
 
@@ -5115,7 +5498,7 @@
 	u32 opts[2];
 	int frags;
 
-	if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+	if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
 		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
 		goto err_stop_0;
 	}
@@ -5169,7 +5552,7 @@
 
 	mmiowb();
 
-	if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+	if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
 		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
 		 * not miss a ring update when it notices a stopped queue.
 		 */
@@ -5183,7 +5566,7 @@
 		 * can't.
 		 */
 		smp_mb();
-		if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+		if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
 			netif_wake_queue(dev);
 	}
 
@@ -5306,7 +5689,7 @@
 		 */
 		smp_mb();
 		if (netif_queue_stopped(dev) &&
-		    (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+		    TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
 			netif_wake_queue(dev);
 		}
 		/*
@@ -6178,6 +6561,7 @@
 	rtl_init_mdio_ops(tp);
 	rtl_init_pll_power_ops(tp);
 	rtl_init_jumbo_ops(tp);
+	rtl_init_csi_ops(tp);
 
 	rtl8169_print_mac_version(tp);
 
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 3fb2355..46df3a0 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -4,11 +4,11 @@
 
 config SH_ETH
 	tristate "Renesas SuperH Ethernet support"
-	depends on SUPERH && \
+	depends on (SUPERH || ARCH_SHMOBILE) && \
 		(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
 		 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
 		 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
-		 CPU_SUBTYPE_SH7757)
+		 CPU_SUBTYPE_SH7757 || ARCH_R8A7740)
 	select CRC32
 	select NET_CORE
 	select MII
@@ -17,4 +17,5 @@
 	---help---
 	  Renesas SuperH Ethernet device driver.
 	  This driver supporting CPUs are:
-		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757.
+		- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
+		  and R8A7740.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d63e09b..be3c221 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -386,6 +386,114 @@
 		sh_eth_write(ndev, 0x0, CSMR);
 }
 
+#elif defined(CONFIG_ARCH_R8A7740)
+#define SH_ETH_HAS_TSU	1
+static void sh_eth_chip_reset(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	unsigned long mii;
+
+	/* reset device */
+	sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+	mdelay(1);
+
+	switch (mdp->phy_interface) {
+	case PHY_INTERFACE_MODE_GMII:
+		mii = 2;
+		break;
+	case PHY_INTERFACE_MODE_MII:
+		mii = 1;
+		break;
+	case PHY_INTERFACE_MODE_RMII:
+	default:
+		mii = 0;
+		break;
+	}
+	sh_eth_write(ndev, mii, RMII_MII);
+}
+
+static void sh_eth_reset(struct net_device *ndev)
+{
+	int cnt = 100;
+
+	sh_eth_write(ndev, EDSR_ENALL, EDSR);
+	sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+	while (cnt > 0) {
+		if (!(sh_eth_read(ndev, EDMR) & 0x3))
+			break;
+		mdelay(1);
+		cnt--;
+	}
+	if (cnt == 0)
+		printk(KERN_ERR "Device reset fail\n");
+
+	/* Table Init */
+	sh_eth_write(ndev, 0x0, TDLAR);
+	sh_eth_write(ndev, 0x0, TDFAR);
+	sh_eth_write(ndev, 0x0, TDFXR);
+	sh_eth_write(ndev, 0x0, TDFFR);
+	sh_eth_write(ndev, 0x0, RDLAR);
+	sh_eth_write(ndev, 0x0, RDFAR);
+	sh_eth_write(ndev, 0x0, RDFXR);
+	sh_eth_write(ndev, 0x0, RDFFR);
+}
+
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+
+	if (mdp->duplex) /* Full */
+		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+	else		/* Half */
+		sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+
+	switch (mdp->speed) {
+	case 10: /* 10BASE */
+		sh_eth_write(ndev, GECMR_10, GECMR);
+		break;
+	case 100:/* 100BASE */
+		sh_eth_write(ndev, GECMR_100, GECMR);
+		break;
+	case 1000: /* 1000BASE */
+		sh_eth_write(ndev, GECMR_1000, GECMR);
+		break;
+	default:
+		break;
+	}
+}
+
+/* R8A7740 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+	.chip_reset	= sh_eth_chip_reset,
+	.set_duplex	= sh_eth_set_duplex,
+	.set_rate	= sh_eth_set_rate,
+
+	.ecsr_value	= ECSR_ICD | ECSR_MPD,
+	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
+	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+	.tx_check	= EESR_TC1 | EESR_FTC,
+	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+			  EESR_ECI,
+	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+			  EESR_TFE,
+
+	.apr		= 1,
+	.mpr		= 1,
+	.tpauser	= 1,
+	.bculr		= 1,
+	.hw_swap	= 1,
+	.no_trimd	= 1,
+	.no_ade		= 1,
+	.tsu		= 1,
+};
+
 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 #define SH_ETH_RESET_DEFAULT	1
 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -443,7 +551,7 @@
 }
 #endif
 
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 static void sh_eth_set_receive_align(struct sk_buff *skb)
 {
 	int reserve;
@@ -919,6 +1027,10 @@
 		desc_status = edmac_to_cpu(mdp, rxdesc->status);
 		pkt_len = rxdesc->frame_length;
 
+#if defined(CONFIG_ARCH_R8A7740)
+		desc_status >>= 16;
+#endif
+
 		if (--boguscnt < 0)
 			break;
 
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 0fa14afc..57b8e1f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -372,7 +372,7 @@
 };
 
 /* Driver's parameters */
-#if defined(CONFIG_CPU_SH4)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 #define SH4_SKB_RX_ALIGN	32
 #else
 #define SH2_SH3_SKB_RX_ALIGN	2
@@ -381,7 +381,8 @@
 /*
  * Register's bits
  */
-#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\
+    defined(CONFIG_ARCH_R8A7740)
 /* EDSR */
 enum EDSR_BIT {
 	EDSR_ENT = 0x01, EDSR_ENR = 0x02,
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 1895605..8e9fda0 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -937,7 +937,7 @@
 	do {
 		unsigned long flags;
 		spin_lock_irqsave(&pd->lock, flags);
-		for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++)
+		for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
 			pd->stats[i] =
 				pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
 		s6gmac_stats_collect(pd, &statinf[0][0]);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3cbfbff..b95f2e1 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -656,25 +656,30 @@
 	struct efx_channel *channel;
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
+	struct pci_dev *dev = efx->pci_dev;
 	int rc;
 
 	EFX_ASSERT_RESET_SERIALISED(efx);
 	BUG_ON(efx->port_enabled);
 
-	rc = efx_nic_flush_queues(efx);
-	if (rc && EFX_WORKAROUND_7803(efx)) {
-		/* Schedule a reset to recover from the flush failure. The
-		 * descriptor caches reference memory we're about to free,
-		 * but falcon_reconfigure_mac_wrapper() won't reconnect
-		 * the MACs because of the pending reset. */
-		netif_err(efx, drv, efx->net_dev,
-			  "Resetting to recover from flush failure\n");
-		efx_schedule_reset(efx, RESET_TYPE_ALL);
-	} else if (rc) {
-		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
-	} else {
-		netif_dbg(efx, drv, efx->net_dev,
-			  "successfully flushed all queues\n");
+	/* Only perform flush if dma is enabled */
+	if (dev->is_busmaster) {
+		rc = efx_nic_flush_queues(efx);
+
+		if (rc && EFX_WORKAROUND_7803(efx)) {
+			/* Schedule a reset to recover from the flush failure. The
+			 * descriptor caches reference memory we're about to free,
+			 * but falcon_reconfigure_mac_wrapper() won't reconnect
+			 * the MACs because of the pending reset. */
+			netif_err(efx, drv, efx->net_dev,
+				  "Resetting to recover from flush failure\n");
+			efx_schedule_reset(efx, RESET_TYPE_ALL);
+		} else if (rc) {
+			netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+		} else {
+			netif_dbg(efx, drv, efx->net_dev,
+				  "successfully flushed all queues\n");
+		}
 	}
 
 	efx_for_each_channel(channel, efx) {
@@ -1349,7 +1354,7 @@
 	}
 
 	/* RSS might be usable on VFs even if it is disabled on the PF */
-	efx->rss_spread = (efx->n_rx_channels > 1 ?
+	efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
 			   efx->n_rx_channels : efx_vf_size(efx));
 
 	return 0;
@@ -2492,8 +2497,8 @@
 	efx_fini_io(efx);
 	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
 
-	pci_set_drvdata(pci_dev, NULL);
 	efx_fini_struct(efx);
+	pci_set_drvdata(pci_dev, NULL);
 	free_netdev(efx->net_dev);
 };
 
@@ -2695,6 +2700,7 @@
  fail2:
 	efx_fini_struct(efx);
  fail1:
+	pci_set_drvdata(pci_dev, NULL);
 	WARN_ON(rc > 0);
 	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
 	free_netdev(net_dev);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f22f45f..03ded36 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1023,7 +1023,7 @@
 			return -EINVAL;
 
 		/* Is it a default UC or MC filter? */
-		if (!compare_ether_addr(mac_mask->h_dest, mac_addr_mc_mask) &&
+		if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
 		    vlan_tag_mask == 0) {
 			if (is_multicast_ether_addr(mac_entry->h_dest))
 				rc = efx_filter_set_mc_def(&spec);
@@ -1108,6 +1108,39 @@
 	return 0;
 }
 
+static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
+					 struct ethtool_eeprom *ee,
+					 u8 *data)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int ret;
+
+	if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&efx->mac_lock);
+	ret = efx->phy_op->get_module_eeprom(efx, ee, data);
+	mutex_unlock(&efx->mac_lock);
+
+	return ret;
+}
+
+static int efx_ethtool_get_module_info(struct net_device *net_dev,
+				       struct ethtool_modinfo *modinfo)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int ret;
+
+	if (!efx->phy_op || !efx->phy_op->get_module_info)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&efx->mac_lock);
+	ret = efx->phy_op->get_module_info(efx, modinfo);
+	mutex_unlock(&efx->mac_lock);
+
+	return ret;
+}
+
 const struct ethtool_ops efx_ethtool_ops = {
 	.get_settings		= efx_ethtool_get_settings,
 	.set_settings		= efx_ethtool_set_settings,
@@ -1137,4 +1170,6 @@
 	.get_rxfh_indir_size	= efx_ethtool_get_rxfh_indir_size,
 	.get_rxfh_indir		= efx_ethtool_get_rxfh_indir,
 	.set_rxfh_indir		= efx_ethtool_set_rxfh_indir,
+	.get_module_info	= efx_ethtool_get_module_info,
+	.get_module_eeprom	= efx_ethtool_get_module_eeprom,
 };
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_phy.c
index 7bcad89..13cb40f 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_phy.c
@@ -739,6 +739,80 @@
 	return NULL;
 }
 
+#define SFP_PAGE_SIZE	128
+#define SFP_NUM_PAGES	2
+static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
+					  struct ethtool_eeprom *ee, u8 *data)
+{
+	u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX];
+	u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN];
+	size_t outlen;
+	int rc;
+	unsigned int payload_len;
+	unsigned int space_remaining = ee->len;
+	unsigned int page;
+	unsigned int page_off;
+	unsigned int to_copy;
+	u8 *user_data = data;
+
+	BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN);
+
+	page_off = ee->offset % SFP_PAGE_SIZE;
+	page = ee->offset / SFP_PAGE_SIZE;
+
+	while (space_remaining && (page < SFP_NUM_PAGES)) {
+		MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
+
+		rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO,
+				  inbuf, sizeof(inbuf),
+				  outbuf, sizeof(outbuf),
+				  &outlen);
+		if (rc)
+			return rc;
+
+		if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
+			      SFP_PAGE_SIZE))
+			return -EIO;
+
+		payload_len = MCDI_DWORD(outbuf,
+					 GET_PHY_MEDIA_INFO_OUT_DATALEN);
+		if (payload_len != SFP_PAGE_SIZE)
+			return -EIO;
+
+		/* Copy as much as we can into data */
+		payload_len -= page_off;
+		to_copy = (space_remaining < payload_len) ?
+			space_remaining : payload_len;
+
+		memcpy(user_data,
+		       outbuf + page_off +
+		       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
+		       to_copy);
+
+		space_remaining -= to_copy;
+		user_data += to_copy;
+		page_off = 0;
+		page++;
+	}
+
+	return 0;
+}
+
+static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
+					struct ethtool_modinfo *modinfo)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+
+	switch (phy_cfg->media) {
+	case MC_CMD_MEDIA_SFP_PLUS:
+		modinfo->type = ETH_MODULE_SFF_8079;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
 const struct efx_phy_operations efx_mcdi_phy_ops = {
 	.probe		= efx_mcdi_phy_probe,
 	.init		= efx_port_dummy_op_int,
@@ -751,4 +825,6 @@
 	.test_alive	= efx_mcdi_phy_test_alive,
 	.run_tests	= efx_mcdi_phy_run_tests,
 	.test_name	= efx_mcdi_phy_test_name,
+	.get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
+	.get_module_info = efx_mcdi_phy_get_module_info,
 };
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f0385e1..0e57535 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -252,8 +252,6 @@
  * @max_fill: RX descriptor maximum fill level (<= ring size)
  * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
  *	(<= @max_fill)
- * @fast_fill_limit: The level to which a fast fill will fill
- *	(@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
  * @min_fill: RX descriptor minimum non-zero fill level.
  *	This records the minimum fill level observed when a ring
  *	refill was triggered.
@@ -274,7 +272,6 @@
 	int removed_count;
 	unsigned int max_fill;
 	unsigned int fast_fill_trigger;
-	unsigned int fast_fill_limit;
 	unsigned int min_fill;
 	unsigned int min_overfill;
 	unsigned int alloc_page_count;
@@ -522,6 +519,11 @@
 	int (*test_alive) (struct efx_nic *efx);
 	const char *(*test_name) (struct efx_nic *efx, unsigned int index);
 	int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
+	int (*get_module_eeprom) (struct efx_nic *efx,
+			       struct ethtool_eeprom *ee,
+			       u8 *data);
+	int (*get_module_info) (struct efx_nic *efx,
+				struct ethtool_modinfo *modinfo);
 };
 
 /**
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 8a7caf88..326a286 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -449,6 +449,37 @@
 	efx->phy_data = NULL;
 }
 
+static int qt202x_phy_get_module_info(struct efx_nic *efx,
+				      struct ethtool_modinfo *modinfo)
+{
+	modinfo->type = ETH_MODULE_SFF_8079;
+	modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+	return 0;
+}
+
+static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
+					struct ethtool_eeprom *ee, u8 *data)
+{
+	int mmd, reg_base, rc, i;		
+
+	if (efx->phy_type == PHY_TYPE_QT2025C) {
+		mmd = MDIO_MMD_PCS;
+		reg_base = 0xd000;
+	} else {
+		mmd = MDIO_MMD_PMAPMD;
+		reg_base = 0x8007;
+	}
+
+	for (i = 0; i < ee->len; i++) {
+		rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i);
+		if (rc < 0)
+			return rc;
+		data[i] = rc;
+	}
+
+	return 0;
+}
+
 const struct efx_phy_operations falcon_qt202x_phy_ops = {
 	.probe		 = qt202x_phy_probe,
 	.init		 = qt202x_phy_init,
@@ -459,4 +490,6 @@
 	.get_settings	 = qt202x_phy_get_settings,
 	.set_settings	 = efx_mdio_set_settings,
 	.test_alive	 = efx_mdio_test_alive,
+	.get_module_eeprom = qt202x_phy_get_module_eeprom,
+	.get_module_info = qt202x_phy_get_module_info,
 };
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 763fa2f..243e91f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -76,12 +76,7 @@
 /* This is the percentage fill level below which new RX descriptors
  * will be added to the RX descriptor ring.
  */
-static unsigned int rx_refill_threshold = 90;
-
-/* This is the percentage fill level to which an RX queue will be refilled
- * when the "RX refill threshold" is reached.
- */
-static unsigned int rx_refill_limit = 95;
+static unsigned int rx_refill_threshold;
 
 /*
  * RX maximum head room required.
@@ -342,7 +337,7 @@
  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
  * @rx_queue:		RX descriptor queue
  * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@fast_fill_limit. If there is insufficient atomic
+ * @rx_queue->@max_fill. If there is insufficient atomic
  * memory to do so, a slow fill will be scheduled.
  *
  * The caller must provide serialisation (none is used here). In practise,
@@ -367,15 +362,14 @@
 			rx_queue->min_fill = fill_level;
 	}
 
-	space = rx_queue->fast_fill_limit - fill_level;
-	if (space < EFX_RX_BATCH)
-		goto out;
+	space = rx_queue->max_fill - fill_level;
+	EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
 
 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
 		   "RX queue %d fast-filling descriptor ring from"
 		   " level %d to level %d using %s allocation\n",
 		   efx_rx_queue_index(rx_queue), fill_level,
-		   rx_queue->fast_fill_limit,
+		   rx_queue->max_fill,
 		   channel->rx_alloc_push_pages ? "page" : "skb");
 
 	do {
@@ -681,7 +675,7 @@
 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 {
 	struct efx_nic *efx = rx_queue->efx;
-	unsigned int max_fill, trigger, limit;
+	unsigned int max_fill, trigger, max_trigger;
 
 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -694,12 +688,17 @@
 
 	/* Initialise limit fields */
 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
-	trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
-	limit = max_fill * min(rx_refill_limit, 100U) / 100U;
+	max_trigger = max_fill - EFX_RX_BATCH;
+	if (rx_refill_threshold != 0) {
+		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+		if (trigger > max_trigger)
+			trigger = max_trigger;
+	} else {
+		trigger = max_trigger;
+	}
 
 	rx_queue->max_fill = max_fill;
 	rx_queue->fast_fill_trigger = trigger;
-	rx_queue->fast_fill_limit = limit;
 
 	/* Set up RX descriptor ring */
 	rx_queue->enabled = true;
@@ -746,5 +745,5 @@
 
 module_param(rx_refill_threshold, uint, 0444);
 MODULE_PARM_DESC(rx_refill_threshold,
-		 "RX descriptor ring fast/slow fill threshold (%)");
+		 "RX descriptor ring refill threshold (%)");
 
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index a284d64..32e5566 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -39,9 +39,7 @@
 #define SC92031_NAME "sc92031"
 
 /* BAR 0 is MMIO, BAR 1 is PIO */
-#ifndef SC92031_USE_BAR
-#define SC92031_USE_BAR 0
-#endif
+#define SC92031_USE_PIO	0
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
 static int multicast_filter_limit = 64;
@@ -366,7 +364,7 @@
 	mmiowb();
 
 	/* wait for any concurrent interrupt/tasklet to finish */
-	synchronize_irq(dev->irq);
+	synchronize_irq(priv->pdev->irq);
 	tasklet_disable(&priv->tasklet);
 }
 
@@ -1114,10 +1112,13 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void sc92031_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
+	struct sc92031_priv *priv = netdev_priv(dev);
+	const int irq = priv->pdev->irq;
+
+	disable_irq(irq);
+	if (sc92031_interrupt(irq, dev) != IRQ_NONE)
 		sc92031_tasklet((unsigned long)dev);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 }
 #endif
 
@@ -1402,7 +1403,6 @@
 	struct net_device *dev;
 	struct sc92031_priv *priv;
 	u32 mac0, mac1;
-	unsigned long base_addr;
 
 	err = pci_enable_device(pdev);
 	if (unlikely(err < 0))
@@ -1422,7 +1422,7 @@
 	if (unlikely(err < 0))
 		goto out_request_regions;
 
-	port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
+	port_base = pci_iomap(pdev, SC92031_USE_PIO, 0);
 	if (unlikely(!port_base)) {
 		err = -EIO;
 		goto out_iomap;
@@ -1437,14 +1437,6 @@
 	pci_set_drvdata(pdev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-#if SC92031_USE_BAR == 0
-	dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
-	dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
-#elif SC92031_USE_BAR == 1
-	dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
-#endif
-	dev->irq = pdev->irq;
-
 	/* faked with skb_copy_and_csum_dev */
 	dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -1478,13 +1470,9 @@
 	if (err < 0)
 		goto out_register_netdev;
 
-#if SC92031_USE_BAR == 0
-	base_addr = dev->mem_start;
-#elif SC92031_USE_BAR == 1
-	base_addr = dev->base_addr;
-#endif
 	printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
-			base_addr, dev->dev_addr, dev->irq);
+	       (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr,
+	       pdev->irq);
 
 	return 0;
 
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index a9deda8..4613591 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -729,7 +729,7 @@
  * The interrupt handler does all of the Rx thread work and cleans up after
  * the Tx thread.
  */
-static irqreturn_t sis190_interrupt(int irq, void *__dev)
+static irqreturn_t sis190_irq(int irq, void *__dev)
 {
 	struct net_device *dev = __dev;
 	struct sis190_private *tp = netdev_priv(dev);
@@ -772,11 +772,11 @@
 static void sis190_netpoll(struct net_device *dev)
 {
 	struct sis190_private *tp = netdev_priv(dev);
-	struct pci_dev *pdev = tp->pci_dev;
+	const int irq = tp->pci_dev->irq;
 
-	disable_irq(pdev->irq);
-	sis190_interrupt(pdev->irq, dev);
-	enable_irq(pdev->irq);
+	disable_irq(irq);
+	sis190_irq(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -1085,7 +1085,7 @@
 
 	sis190_request_timer(dev);
 
-	rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
+	rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
 	if (rc < 0)
 		goto err_release_timer_2;
 
@@ -1097,11 +1097,9 @@
 	sis190_delete_timer(dev);
 	sis190_rx_clear(tp);
 err_free_rx_1:
-	pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
-		tp->rx_dma);
+	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
 err_free_tx_0:
-	pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
-		tp->tx_dma);
+	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
 	goto out;
 }
 
@@ -1141,7 +1139,7 @@
 
 		spin_unlock_irq(&tp->lock);
 
-		synchronize_irq(dev->irq);
+		synchronize_irq(tp->pci_dev->irq);
 
 		if (!poll_locked)
 			poll_locked++;
@@ -1161,7 +1159,7 @@
 
 	sis190_down(dev);
 
-	free_irq(dev->irq, dev);
+	free_irq(pdev->irq, dev);
 
 	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
 	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
@@ -1884,8 +1882,6 @@
 	dev->netdev_ops = &sis190_netdev_ops;
 
 	SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
-	dev->irq = pdev->irq;
-	dev->base_addr = (unsigned long) 0xdead;
 	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
 
 	spin_lock_init(&tp->lock);
@@ -1902,7 +1898,7 @@
 		netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
 			    pci_name(pdev),
 			    sis_chip_info[ent->driver_data].name,
-			    ioaddr, dev->irq, dev->dev_addr);
+			    ioaddr, pdev->irq, dev->dev_addr);
 		netdev_info(dev, "%s mode.\n",
 			    (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
 	}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 5ccf02e..203d9c6 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -168,6 +168,8 @@
 	unsigned int cur_phy;
 	struct mii_if_info mii_info;
 
+	void __iomem	*ioaddr;
+
 	struct timer_list timer; /* Link status detection timer. */
 	u8 autong_complete; /* 1: auto-negotiate complete  */
 
@@ -201,13 +203,18 @@
 MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
 MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
 
+#define sw32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define sw8(reg, val)	iowrite8(val, ioaddr + (reg))
+#define sr32(reg)	ioread32(ioaddr + (reg))
+#define sr16(reg)	ioread16(ioaddr + (reg))
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void sis900_poll(struct net_device *dev);
 #endif
 static int sis900_open(struct net_device *net_dev);
 static int sis900_mii_probe (struct net_device * net_dev);
 static void sis900_init_rxfilter (struct net_device * net_dev);
-static u16 read_eeprom(long ioaddr, int location);
+static u16 read_eeprom(void __iomem *ioaddr, int location);
 static int mdio_read(struct net_device *net_dev, int phy_id, int location);
 static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
 static void sis900_timer(unsigned long data);
@@ -231,7 +238,7 @@
 static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
 static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
 static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
-static void sis900_set_mode (long ioaddr, int speed, int duplex);
+static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
 static const struct ethtool_ops sis900_ethtool_ops;
 
 /**
@@ -246,7 +253,8 @@
 
 static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
 {
-	long ioaddr = pci_resource_start(pci_dev, 0);
+	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u16 signature;
 	int i;
 
@@ -325,29 +333,30 @@
 static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
 					struct net_device *net_dev)
 {
-	long ioaddr = net_dev->base_addr;
+	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 rfcrSave;
 	u32 i;
 
-	rfcrSave = inl(rfcr + ioaddr);
+	rfcrSave = sr32(rfcr);
 
-	outl(rfcrSave | RELOAD, ioaddr + cr);
-	outl(0, ioaddr + cr);
+	sw32(cr, rfcrSave | RELOAD);
+	sw32(cr, 0);
 
 	/* disable packet filtering before setting filter */
-	outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+	sw32(rfcr, rfcrSave & ~RFEN);
 
 	/* load MAC addr to filter data register */
 	for (i = 0 ; i < 3 ; i++) {
-		outl((i << RFADDR_shift), ioaddr + rfcr);
-		*( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
+		sw32(rfcr, (i << RFADDR_shift));
+		*( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
 	}
 
 	/* Store MAC Address in perm_addr */
 	memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
 
 	/* enable packet filtering */
-	outl(rfcrSave | RFEN, rfcr + ioaddr);
+	sw32(rfcr, rfcrSave | RFEN);
 
 	return 1;
 }
@@ -371,31 +380,30 @@
 static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
 					struct net_device *net_dev)
 {
-	long ioaddr = net_dev->base_addr;
-	long ee_addr = ioaddr + mear;
-	u32 waittime = 0;
-	int i;
+	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
+	int wait, rc = 0;
 
-	outl(EEREQ, ee_addr);
-	while(waittime < 2000) {
-		if(inl(ee_addr) & EEGNT) {
+	sw32(mear, EEREQ);
+	for (wait = 0; wait < 2000; wait++) {
+		if (sr32(mear) & EEGNT) {
+			u16 *mac = (u16 *)net_dev->dev_addr;
+			int i;
 
 			/* get MAC address from EEPROM */
 			for (i = 0; i < 3; i++)
-			        ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+			        mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
 
 			/* Store MAC Address in perm_addr */
 			memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
 
-			outl(EEDONE, ee_addr);
-			return 1;
-		} else {
-			udelay(1);
-			waittime ++;
+			rc = 1;
+			break;
 		}
+		udelay(1);
 	}
-	outl(EEDONE, ee_addr);
-	return 0;
+	sw32(mear, EEDONE);
+	return rc;
 }
 
 static const struct net_device_ops sis900_netdev_ops = {
@@ -433,7 +441,7 @@
 	struct pci_dev *dev;
 	dma_addr_t ring_dma;
 	void *ring_space;
-	long ioaddr;
+	void __iomem *ioaddr;
 	int i, ret;
 	const char *card_name = card_names[pci_id->driver_data];
 	const char *dev_name = pci_name(pci_dev);
@@ -464,14 +472,17 @@
 	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
 
 	/* We do a request_region() to register /proc/ioports info. */
-	ioaddr = pci_resource_start(pci_dev, 0);
 	ret = pci_request_regions(pci_dev, "sis900");
 	if (ret)
 		goto err_out;
 
+	/* IO region. */
+	ioaddr = pci_iomap(pci_dev, 0, 0);
+	if (!ioaddr)
+		goto err_out_cleardev;
+
 	sis_priv = netdev_priv(net_dev);
-	net_dev->base_addr = ioaddr;
-	net_dev->irq = pci_dev->irq;
+	sis_priv->ioaddr = ioaddr;
 	sis_priv->pci_dev = pci_dev;
 	spin_lock_init(&sis_priv->lock);
 
@@ -480,7 +491,7 @@
 	ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
 	if (!ring_space) {
 		ret = -ENOMEM;
-		goto err_out_cleardev;
+		goto err_out_unmap;
 	}
 	sis_priv->tx_ring = ring_space;
 	sis_priv->tx_ring_dma = ring_dma;
@@ -534,7 +545,7 @@
 
 	/* 630ET : set the mii access mode as software-mode */
 	if (sis_priv->chipset_rev == SIS630ET_900_REV)
-		outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr);
+		sw32(cr, ACCESSMODE | sr32(cr));
 
 	/* probe for mii transceiver */
 	if (sis900_mii_probe(net_dev) == 0) {
@@ -556,25 +567,27 @@
 		goto err_unmap_rx;
 
 	/* print some information about our NIC */
-	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
-	       net_dev->name, card_name, ioaddr, net_dev->irq,
+	printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
+	       net_dev->name, card_name, ioaddr, pci_dev->irq,
 	       net_dev->dev_addr);
 
 	/* Detect Wake on Lan support */
-	ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
+	ret = (sr32(CFGPMC) & PMESP) >> 27;
 	if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
 		printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
 
 	return 0;
 
- err_unmap_rx:
+err_unmap_rx:
 	pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
 		sis_priv->rx_ring_dma);
- err_unmap_tx:
+err_unmap_tx:
 	pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
 		sis_priv->tx_ring_dma);
- err_out_cleardev:
- 	pci_set_drvdata(pci_dev, NULL);
+err_out_unmap:
+	pci_iounmap(pci_dev, ioaddr);
+err_out_cleardev:
+	pci_set_drvdata(pci_dev, NULL);
 	pci_release_regions(pci_dev);
  err_out:
 	free_netdev(net_dev);
@@ -798,7 +811,7 @@
 
 
 /* Delay between EEPROM clock transitions. */
-#define eeprom_delay()  inl(ee_addr)
+#define eeprom_delay()	sr32(mear)
 
 /**
  *	read_eeprom - Read Serial EEPROM
@@ -809,41 +822,41 @@
  *	Note that location is in word (16 bits) unit
  */
 
-static u16 __devinit read_eeprom(long ioaddr, int location)
+static u16 __devinit read_eeprom(void __iomem *ioaddr, int location)
 {
+	u32 read_cmd = location | EEread;
 	int i;
 	u16 retval = 0;
-	long ee_addr = ioaddr + mear;
-	u32 read_cmd = location | EEread;
 
-	outl(0, ee_addr);
+	sw32(mear, 0);
 	eeprom_delay();
-	outl(EECS, ee_addr);
+	sw32(mear, EECS);
 	eeprom_delay();
 
 	/* Shift the read command (9) bits out. */
 	for (i = 8; i >= 0; i--) {
 		u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
-		outl(dataval, ee_addr);
+
+		sw32(mear, dataval);
 		eeprom_delay();
-		outl(dataval | EECLK, ee_addr);
+		sw32(mear, dataval | EECLK);
 		eeprom_delay();
 	}
-	outl(EECS, ee_addr);
+	sw32(mear, EECS);
 	eeprom_delay();
 
 	/* read the 16-bits data in */
 	for (i = 16; i > 0; i--) {
-		outl(EECS, ee_addr);
+		sw32(mear, EECS);
 		eeprom_delay();
-		outl(EECS | EECLK, ee_addr);
+		sw32(mear, EECS | EECLK);
 		eeprom_delay();
-		retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0);
+		retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
 		eeprom_delay();
 	}
 
 	/* Terminate the EEPROM access. */
-	outl(0, ee_addr);
+	sw32(mear, 0);
 	eeprom_delay();
 
 	return retval;
@@ -852,24 +865,27 @@
 /* Read and write the MII management registers using software-generated
    serial MDIO protocol. Note that the command bits and data bits are
    send out separately */
-#define mdio_delay()    inl(mdio_addr)
+#define mdio_delay()	sr32(mear)
 
-static void mdio_idle(long mdio_addr)
+static void mdio_idle(struct sis900_private *sp)
 {
-	outl(MDIO | MDDIR, mdio_addr);
+	void __iomem *ioaddr = sp->ioaddr;
+
+	sw32(mear, MDIO | MDDIR);
 	mdio_delay();
-	outl(MDIO | MDDIR | MDC, mdio_addr);
+	sw32(mear, MDIO | MDDIR | MDC);
 }
 
-/* Syncronize the MII management interface by shifting 32 one bits out. */
-static void mdio_reset(long mdio_addr)
+/* Synchronize the MII management interface by shifting 32 one bits out. */
+static void mdio_reset(struct sis900_private *sp)
 {
+	void __iomem *ioaddr = sp->ioaddr;
 	int i;
 
 	for (i = 31; i >= 0; i--) {
-		outl(MDDIR | MDIO, mdio_addr);
+		sw32(mear, MDDIR | MDIO);
 		mdio_delay();
-		outl(MDDIR | MDIO | MDC, mdio_addr);
+		sw32(mear, MDDIR | MDIO | MDC);
 		mdio_delay();
 	}
 }
@@ -887,31 +903,33 @@
 
 static int mdio_read(struct net_device *net_dev, int phy_id, int location)
 {
-	long mdio_addr = net_dev->base_addr + mear;
 	int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+	struct sis900_private *sp = netdev_priv(net_dev);
+	void __iomem *ioaddr = sp->ioaddr;
 	u16 retval = 0;
 	int i;
 
-	mdio_reset(mdio_addr);
-	mdio_idle(mdio_addr);
+	mdio_reset(sp);
+	mdio_idle(sp);
 
 	for (i = 15; i >= 0; i--) {
 		int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
-		outl(dataval, mdio_addr);
+
+		sw32(mear, dataval);
 		mdio_delay();
-		outl(dataval | MDC, mdio_addr);
+		sw32(mear, dataval | MDC);
 		mdio_delay();
 	}
 
 	/* Read the 16 data bits. */
 	for (i = 16; i > 0; i--) {
-		outl(0, mdio_addr);
+		sw32(mear, 0);
 		mdio_delay();
-		retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0);
-		outl(MDC, mdio_addr);
+		retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
+		sw32(mear, MDC);
 		mdio_delay();
 	}
-	outl(0x00, mdio_addr);
+	sw32(mear, 0x00);
 
 	return retval;
 }
@@ -931,19 +949,21 @@
 static void mdio_write(struct net_device *net_dev, int phy_id, int location,
 			int value)
 {
-	long mdio_addr = net_dev->base_addr + mear;
 	int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
+	struct sis900_private *sp = netdev_priv(net_dev);
+	void __iomem *ioaddr = sp->ioaddr;
 	int i;
 
-	mdio_reset(mdio_addr);
-	mdio_idle(mdio_addr);
+	mdio_reset(sp);
+	mdio_idle(sp);
 
 	/* Shift the command bits out. */
 	for (i = 15; i >= 0; i--) {
 		int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
-		outb(dataval, mdio_addr);
+
+		sw8(mear, dataval);
 		mdio_delay();
-		outb(dataval | MDC, mdio_addr);
+		sw8(mear, dataval | MDC);
 		mdio_delay();
 	}
 	mdio_delay();
@@ -951,21 +971,22 @@
 	/* Shift the value bits out. */
 	for (i = 15; i >= 0; i--) {
 		int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
-		outl(dataval, mdio_addr);
+
+		sw32(mear, dataval);
 		mdio_delay();
-		outl(dataval | MDC, mdio_addr);
+		sw32(mear, dataval | MDC);
 		mdio_delay();
 	}
 	mdio_delay();
 
 	/* Clear out extra bits. */
 	for (i = 2; i > 0; i--) {
-		outb(0, mdio_addr);
+		sw8(mear, 0);
 		mdio_delay();
-		outb(MDC, mdio_addr);
+		sw8(mear, MDC);
 		mdio_delay();
 	}
-	outl(0x00, mdio_addr);
+	sw32(mear, 0x00);
 }
 
 
@@ -1000,9 +1021,12 @@
 */
 static void sis900_poll(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	sis900_interrupt(dev->irq, dev);
-	enable_irq(dev->irq);
+	struct sis900_private *sp = netdev_priv(dev);
+	const int irq = sp->pci_dev->irq;
+
+	disable_irq(irq);
+	sis900_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -1018,7 +1042,7 @@
 sis900_open(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	int ret;
 
 	/* Soft reset the chip. */
@@ -1027,8 +1051,8 @@
 	/* Equalizer workaround Rule */
 	sis630_set_eq(net_dev, sis_priv->chipset_rev);
 
-	ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED,
-						net_dev->name, net_dev);
+	ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
+			  net_dev->name, net_dev);
 	if (ret)
 		return ret;
 
@@ -1042,12 +1066,12 @@
 	netif_start_queue(net_dev);
 
 	/* Workaround for EDB */
-	sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+	sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
 	/* Enable all known interrupts by setting the interrupt mask. */
-	outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
-	outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
-	outl(IE, ioaddr + ier);
+	sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+	sw32(cr, RxENA | sr32(cr));
+	sw32(ier, IE);
 
 	sis900_check_mode(net_dev, sis_priv->mii);
 
@@ -1074,31 +1098,30 @@
 sis900_init_rxfilter (struct net_device * net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 rfcrSave;
 	u32 i;
 
-	rfcrSave = inl(rfcr + ioaddr);
+	rfcrSave = sr32(rfcr);
 
 	/* disable packet filtering before setting filter */
-	outl(rfcrSave & ~RFEN, rfcr + ioaddr);
+	sw32(rfcr, rfcrSave & ~RFEN);
 
 	/* load MAC addr to filter data register */
 	for (i = 0 ; i < 3 ; i++) {
-		u32 w;
+		u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
 
-		w = (u32) *((u16 *)(net_dev->dev_addr)+i);
-		outl((i << RFADDR_shift), ioaddr + rfcr);
-		outl(w, ioaddr + rfdr);
+		sw32(rfcr, i << RFADDR_shift);
+		sw32(rfdr, w);
 
 		if (netif_msg_hw(sis_priv)) {
 			printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
-			       net_dev->name, i, inl(ioaddr + rfdr));
+			       net_dev->name, i, sr32(rfdr));
 		}
 	}
 
 	/* enable packet filtering */
-	outl(rfcrSave | RFEN, rfcr + ioaddr);
+	sw32(rfcr, rfcrSave | RFEN);
 }
 
 /**
@@ -1112,7 +1135,7 @@
 sis900_init_tx_ring(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	int i;
 
 	sis_priv->tx_full = 0;
@@ -1128,10 +1151,10 @@
 	}
 
 	/* load Transmit Descriptor Register */
-	outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+	sw32(txdp, sis_priv->tx_ring_dma);
 	if (netif_msg_hw(sis_priv))
 		printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
-		       net_dev->name, inl(ioaddr + txdp));
+		       net_dev->name, sr32(txdp));
 }
 
 /**
@@ -1146,7 +1169,7 @@
 sis900_init_rx_ring(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	int i;
 
 	sis_priv->cur_rx = 0;
@@ -1181,10 +1204,10 @@
 	sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
 
 	/* load Receive Descriptor Register */
-	outl(sis_priv->rx_ring_dma, ioaddr + rxdp);
+	sw32(rxdp, sis_priv->rx_ring_dma);
 	if (netif_msg_hw(sis_priv))
 		printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
-		       net_dev->name, inl(ioaddr + rxdp));
+		       net_dev->name, sr32(rxdp));
 }
 
 /**
@@ -1298,7 +1321,7 @@
 
 		sis900_read_mode(net_dev, &speed, &duplex);
 		if (duplex){
-			sis900_set_mode(net_dev->base_addr, speed, duplex);
+			sis900_set_mode(sis_priv, speed, duplex);
 			sis630_set_eq(net_dev, sis_priv->chipset_rev);
 			netif_start_queue(net_dev);
 		}
@@ -1359,25 +1382,25 @@
 static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	int speed, duplex;
 
 	if (mii_phy->phy_types == LAN) {
-		outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg);
+		sw32(cfg, ~EXD & sr32(cfg));
 		sis900_set_capability(net_dev , mii_phy);
 		sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
 	} else {
-		outl(EXD | inl(ioaddr + cfg), ioaddr + cfg);
+		sw32(cfg, EXD | sr32(cfg));
 		speed = HW_SPEED_HOME;
 		duplex = FDX_CAPABLE_HALF_SELECTED;
-		sis900_set_mode(ioaddr, speed, duplex);
+		sis900_set_mode(sis_priv, speed, duplex);
 		sis_priv->autong_complete = 1;
 	}
 }
 
 /**
  *	sis900_set_mode - Set the media mode of mac register.
- *	@ioaddr: the address of the device
+ *	@sp:     the device private data
  *	@speed : the transmit speed to be determined
  *	@duplex: the duplex mode to be determined
  *
@@ -1388,11 +1411,12 @@
  *	double words.
  */
 
-static void sis900_set_mode (long ioaddr, int speed, int duplex)
+static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
 {
+	void __iomem *ioaddr = sp->ioaddr;
 	u32 tx_flags = 0, rx_flags = 0;
 
-	if (inl(ioaddr + cfg) & EDB_MASTER_EN) {
+	if (sr32( cfg) & EDB_MASTER_EN) {
 		tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
 					(TX_FILL_THRESH << TxFILLT_shift);
 		rx_flags = DMA_BURST_64 << RxMXDMA_shift;
@@ -1420,8 +1444,8 @@
 	rx_flags |= RxAJAB;
 #endif
 
-	outl (tx_flags, ioaddr + txcfg);
-	outl (rx_flags, ioaddr + rxcfg);
+	sw32(txcfg, tx_flags);
+	sw32(rxcfg, rx_flags);
 }
 
 /**
@@ -1528,16 +1552,17 @@
 static void sis900_tx_timeout(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	unsigned long flags;
 	int i;
 
-	if(netif_msg_tx_err(sis_priv))
+	if (netif_msg_tx_err(sis_priv)) {
 		printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
-	       		net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
+			net_dev->name, sr32(cr), sr32(isr));
+	}
 
 	/* Disable interrupts by clearing the interrupt mask. */
-	outl(0x0000, ioaddr + imr);
+	sw32(imr, 0x0000);
 
 	/* use spinlock to prevent interrupt handler accessing buffer ring */
 	spin_lock_irqsave(&sis_priv->lock, flags);
@@ -1566,10 +1591,10 @@
 	net_dev->trans_start = jiffies; /* prevent tx timeout */
 
 	/* load Transmit Descriptor Register */
-	outl(sis_priv->tx_ring_dma, ioaddr + txdp);
+	sw32(txdp, sis_priv->tx_ring_dma);
 
 	/* Enable all known interrupts by setting the interrupt mask. */
-	outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
+	sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
 }
 
 /**
@@ -1586,7 +1611,7 @@
 sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	unsigned int  entry;
 	unsigned long flags;
 	unsigned int  index_cur_tx, index_dirty_tx;
@@ -1608,7 +1633,7 @@
 	sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
 		skb->data, skb->len, PCI_DMA_TODEVICE);
 	sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
-	outl(TxENA | inl(ioaddr + cr), ioaddr + cr);
+	sw32(cr, TxENA | sr32(cr));
 
 	sis_priv->cur_tx ++;
 	index_cur_tx = sis_priv->cur_tx;
@@ -1654,14 +1679,14 @@
 	struct net_device *net_dev = dev_instance;
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
 	int boguscnt = max_interrupt_work;
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 status;
 	unsigned int handled = 0;
 
 	spin_lock (&sis_priv->lock);
 
 	do {
-		status = inl(ioaddr + isr);
+		status = sr32(isr);
 
 		if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
 			/* nothing intresting happened */
@@ -1696,7 +1721,7 @@
 	if(netif_msg_intr(sis_priv))
 		printk(KERN_DEBUG "%s: exiting interrupt, "
 		       "interrupt status = 0x%#8.8x.\n",
-		       net_dev->name, inl(ioaddr + isr));
+		       net_dev->name, sr32(isr));
 
 	spin_unlock (&sis_priv->lock);
 	return IRQ_RETVAL(handled);
@@ -1715,7 +1740,7 @@
 static int sis900_rx(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
 	u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
 	int rx_work_limit;
@@ -1847,7 +1872,7 @@
 		}
 	}
 	/* re-enable the potentially idle receive state matchine */
-	outl(RxENA | inl(ioaddr + cr), ioaddr + cr );
+	sw32(cr , RxENA | sr32(cr));
 
 	return 0;
 }
@@ -1932,31 +1957,31 @@
 
 static int sis900_close(struct net_device *net_dev)
 {
-	long ioaddr = net_dev->base_addr;
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	struct pci_dev *pdev = sis_priv->pci_dev;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	struct sk_buff *skb;
 	int i;
 
 	netif_stop_queue(net_dev);
 
 	/* Disable interrupts by clearing the interrupt mask. */
-	outl(0x0000, ioaddr + imr);
-	outl(0x0000, ioaddr + ier);
+	sw32(imr, 0x0000);
+	sw32(ier, 0x0000);
 
 	/* Stop the chip's Tx and Rx Status Machine */
-	outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+	sw32(cr, RxDIS | TxDIS | sr32(cr));
 
 	del_timer(&sis_priv->timer);
 
-	free_irq(net_dev->irq, net_dev);
+	free_irq(pdev->irq, net_dev);
 
 	/* Free Tx and RX skbuff */
 	for (i = 0; i < NUM_RX_DESC; i++) {
 		skb = sis_priv->rx_skbuff[i];
 		if (skb) {
-			pci_unmap_single(sis_priv->pci_dev,
-				sis_priv->rx_ring[i].bufptr,
-				RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+			pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
+					 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
 			dev_kfree_skb(skb);
 			sis_priv->rx_skbuff[i] = NULL;
 		}
@@ -1964,9 +1989,8 @@
 	for (i = 0; i < NUM_TX_DESC; i++) {
 		skb = sis_priv->tx_skbuff[i];
 		if (skb) {
-			pci_unmap_single(sis_priv->pci_dev,
-				sis_priv->tx_ring[i].bufptr, skb->len,
-				PCI_DMA_TODEVICE);
+			pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
+					 skb->len, PCI_DMA_TODEVICE);
 			dev_kfree_skb(skb);
 			sis_priv->tx_skbuff[i] = NULL;
 		}
@@ -2055,14 +2079,14 @@
 static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long pmctrl_addr = net_dev->base_addr + pmctrl;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 cfgpmcsr = 0, pmctrl_bits = 0;
 
 	if (wol->wolopts == 0) {
 		pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
 		cfgpmcsr &= ~PME_EN;
 		pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
-		outl(pmctrl_bits, pmctrl_addr);
+		sw32(pmctrl, pmctrl_bits);
 		if (netif_msg_wol(sis_priv))
 			printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
 		return 0;
@@ -2077,7 +2101,7 @@
 	if (wol->wolopts & WAKE_PHY)
 		pmctrl_bits |= LINKON;
 
-	outl(pmctrl_bits, pmctrl_addr);
+	sw32(pmctrl, pmctrl_bits);
 
 	pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
 	cfgpmcsr |= PME_EN;
@@ -2090,10 +2114,11 @@
 
 static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
 {
-	long pmctrl_addr = net_dev->base_addr + pmctrl;
+	struct sis900_private *sp = netdev_priv(net_dev);
+	void __iomem *ioaddr = sp->ioaddr;
 	u32 pmctrl_bits;
 
-	pmctrl_bits = inl(pmctrl_addr);
+	pmctrl_bits = sr32(pmctrl);
 	if (pmctrl_bits & MAGICPKT)
 		wol->wolopts |= WAKE_MAGIC;
 	if (pmctrl_bits & LINKON)
@@ -2279,8 +2304,8 @@
 
 static void set_rx_mode(struct net_device *net_dev)
 {
-	long ioaddr = net_dev->base_addr;
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u16 mc_filter[16] = {0};	/* 256/128 bits multicast hash table */
 	int i, table_entries;
 	u32 rx_mode;
@@ -2322,24 +2347,24 @@
 	/* update Multicast Hash Table in Receive Filter */
 	for (i = 0; i < table_entries; i++) {
                 /* why plus 0x04 ??, That makes the correct value for hash table. */
-		outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr);
-		outl(mc_filter[i], ioaddr + rfdr);
+		sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
+		sw32(rfdr, mc_filter[i]);
 	}
 
-	outl(RFEN | rx_mode, ioaddr + rfcr);
+	sw32(rfcr, RFEN | rx_mode);
 
 	/* sis900 is capable of looping back packets at MAC level for
 	 * debugging purpose */
 	if (net_dev->flags & IFF_LOOPBACK) {
 		u32 cr_saved;
 		/* We must disable Tx/Rx before setting loopback mode */
-		cr_saved = inl(ioaddr + cr);
-		outl(cr_saved | TxDIS | RxDIS, ioaddr + cr);
+		cr_saved = sr32(cr);
+		sw32(cr, cr_saved | TxDIS | RxDIS);
 		/* enable loopback */
-		outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg);
-		outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg);
+		sw32(txcfg, sr32(txcfg) | TxMLB);
+		sw32(rxcfg, sr32(rxcfg) | RxATX);
 		/* restore cr */
-		outl(cr_saved, ioaddr + cr);
+		sw32(cr, cr_saved);
 	}
 }
 
@@ -2355,26 +2380,25 @@
 static void sis900_reset(struct net_device *net_dev)
 {
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
-	int i = 0;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 	u32 status = TxRCMP | RxRCMP;
+	int i;
 
-	outl(0, ioaddr + ier);
-	outl(0, ioaddr + imr);
-	outl(0, ioaddr + rfcr);
+	sw32(ier, 0);
+	sw32(imr, 0);
+	sw32(rfcr, 0);
 
-	outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr);
+	sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
 
 	/* Check that the chip has finished the reset. */
-	while (status && (i++ < 1000)) {
-		status ^= (inl(isr + ioaddr) & status);
-	}
+	for (i = 0; status && (i < 1000); i++)
+		status ^= sr32(isr) & status;
 
-	if( (sis_priv->chipset_rev >= SIS635A_900_REV) ||
-			(sis_priv->chipset_rev == SIS900B_900_REV) )
-		outl(PESEL | RND_CNT, ioaddr + cfg);
+	if (sis_priv->chipset_rev >= SIS635A_900_REV ||
+	    sis_priv->chipset_rev == SIS900B_900_REV)
+		sw32(cfg, PESEL | RND_CNT);
 	else
-		outl(PESEL, ioaddr + cfg);
+		sw32(cfg, PESEL);
 }
 
 /**
@@ -2388,10 +2412,12 @@
 {
 	struct net_device *net_dev = pci_get_drvdata(pci_dev);
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	struct mii_phy *phy = NULL;
+
+	unregister_netdev(net_dev);
 
 	while (sis_priv->first_mii) {
-		phy = sis_priv->first_mii;
+		struct mii_phy *phy = sis_priv->first_mii;
+
 		sis_priv->first_mii = phy->next;
 		kfree(phy);
 	}
@@ -2400,7 +2426,7 @@
 		sis_priv->rx_ring_dma);
 	pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
 		sis_priv->tx_ring_dma);
-	unregister_netdev(net_dev);
+	pci_iounmap(pci_dev, sis_priv->ioaddr);
 	free_netdev(net_dev);
 	pci_release_regions(pci_dev);
 	pci_set_drvdata(pci_dev, NULL);
@@ -2411,7 +2437,8 @@
 static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
 {
 	struct net_device *net_dev = pci_get_drvdata(pci_dev);
-	long ioaddr = net_dev->base_addr;
+	struct sis900_private *sis_priv = netdev_priv(net_dev);
+	void __iomem *ioaddr = sis_priv->ioaddr;
 
 	if(!netif_running(net_dev))
 		return 0;
@@ -2420,7 +2447,7 @@
 	netif_device_detach(net_dev);
 
 	/* Stop the chip's Tx and Rx Status Machine */
-	outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr);
+	sw32(cr, RxDIS | TxDIS | sr32(cr));
 
 	pci_set_power_state(pci_dev, PCI_D3hot);
 	pci_save_state(pci_dev);
@@ -2432,7 +2459,7 @@
 {
 	struct net_device *net_dev = pci_get_drvdata(pci_dev);
 	struct sis900_private *sis_priv = netdev_priv(net_dev);
-	long ioaddr = net_dev->base_addr;
+	void __iomem *ioaddr = sis_priv->ioaddr;
 
 	if(!netif_running(net_dev))
 		return 0;
@@ -2453,9 +2480,9 @@
 	sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
 	/* Enable all known interrupts by setting the interrupt mask. */
-	outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
-	outl(RxENA | inl(ioaddr + cr), ioaddr + cr);
-	outl(IE, ioaddr + ier);
+	sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+	sw32(cr, RxENA | sr32(cr));
+	sw32(ier, IE);
 
 	sis900_check_mode(net_dev, sis_priv->mii);
 
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 2a662e6..d01e59c 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -146,6 +146,12 @@
 #define EPIC_TOTAL_SIZE 0x100
 #define USE_IO_OPS 1
 
+#ifdef USE_IO_OPS
+#define EPIC_BAR	0
+#else
+#define EPIC_BAR	1
+#endif
+
 typedef enum {
 	SMSC_83C170_0,
 	SMSC_83C170,
@@ -176,21 +182,11 @@
 };
 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 
-
-#ifndef USE_IO_OPS
-#undef inb
-#undef inw
-#undef inl
-#undef outb
-#undef outw
-#undef outl
-#define inb readb
-#define inw readw
-#define inl readl
-#define outb writeb
-#define outw writew
-#define outl writel
-#endif
+#define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
+#define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
+#define er8(reg)	ioread8(ioaddr + (reg))
+#define er16(reg)	ioread16(ioaddr + (reg))
+#define er32(reg)	ioread32(ioaddr + (reg))
 
 /* Offsets to registers, using the (ugh) SMC names. */
 enum epic_registers {
@@ -275,6 +271,7 @@
 	u32 irq_mask;
 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 
+	void __iomem *ioaddr;
 	struct pci_dev *pci_dev;			/* PCI bus location. */
 	int chip_id, chip_flags;
 
@@ -290,7 +287,7 @@
 };
 
 static int epic_open(struct net_device *dev);
-static int read_eeprom(long ioaddr, int location);
+static int read_eeprom(struct epic_private *, int);
 static int mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 static void epic_restart(struct net_device *dev);
@@ -321,11 +318,11 @@
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
-static int __devinit epic_init_one (struct pci_dev *pdev,
-				    const struct pci_device_id *ent)
+static int __devinit epic_init_one(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
 {
 	static int card_idx = -1;
-	long ioaddr;
+	void __iomem *ioaddr;
 	int chip_idx = (int) ent->driver_data;
 	int irq;
 	struct net_device *dev;
@@ -368,19 +365,15 @@
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-#ifdef USE_IO_OPS
-	ioaddr = pci_resource_start (pdev, 0);
-#else
-	ioaddr = pci_resource_start (pdev, 1);
-	ioaddr = (long) pci_ioremap_bar(pdev, 1);
+	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
 	if (!ioaddr) {
 		dev_err(&pdev->dev, "ioremap failed\n");
 		goto err_out_free_netdev;
 	}
-#endif
 
 	pci_set_drvdata(pdev, dev);
 	ep = netdev_priv(dev);
+	ep->ioaddr = ioaddr;
 	ep->mii.dev = dev;
 	ep->mii.mdio_read = mdio_read;
 	ep->mii.mdio_write = mdio_write;
@@ -409,34 +402,31 @@
 			duplex = full_duplex[card_idx];
 	}
 
-	dev->base_addr = ioaddr;
-	dev->irq = irq;
-
 	spin_lock_init(&ep->lock);
 	spin_lock_init(&ep->napi_lock);
 	ep->reschedule_in_poll = 0;
 
 	/* Bring the chip out of low-power mode. */
-	outl(0x4200, ioaddr + GENCTL);
+	ew32(GENCTL, 0x4200);
 	/* Magic?!  If we don't set this bit the MII interface won't work. */
 	/* This magic is documented in SMSC app note 7.15 */
 	for (i = 16; i > 0; i--)
-		outl(0x0008, ioaddr + TEST1);
+		ew32(TEST1, 0x0008);
 
 	/* Turn on the MII transceiver. */
-	outl(0x12, ioaddr + MIICfg);
+	ew32(MIICfg, 0x12);
 	if (chip_idx == 1)
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
-	outl(0x0200, ioaddr + GENCTL);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
+	ew32(GENCTL, 0x0200);
 
 	/* Note: the '175 does not have a serial EEPROM. */
 	for (i = 0; i < 3; i++)
-		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
+		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
 
 	if (debug > 2) {
 		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
 		for (i = 0; i < 64; i++)
-			printk(" %4.4x%s", read_eeprom(ioaddr, i),
+			printk(" %4.4x%s", read_eeprom(ep, i),
 				   i % 16 == 15 ? "\n" : "");
 	}
 
@@ -481,8 +471,8 @@
 
 	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
 	if (ep->chip_flags & MII_PWRDWN)
-		outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
-	outl(0x0008, ioaddr + GENCTL);
+		ew32(NVCTL, er32(NVCTL) & ~0x483c);
+	ew32(GENCTL, 0x0008);
 
 	/* The lower four bits are the media type. */
 	if (duplex) {
@@ -501,8 +491,9 @@
 	if (ret < 0)
 		goto err_out_unmap_rx;
 
-	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
-	       dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
+	printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
+	       dev->name, pci_id_tbl[chip_idx].name,
+	       (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
 	       dev->dev_addr);
 
 out:
@@ -513,10 +504,8 @@
 err_out_unmap_tx:
 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 err_out_iounmap:
-#ifndef USE_IO_OPS
-	iounmap(ioaddr);
+	pci_iounmap(pdev, ioaddr);
 err_out_free_netdev:
-#endif
 	free_netdev(dev);
 err_out_free_res:
 	pci_release_regions(pdev);
@@ -540,7 +529,7 @@
    This serves to flush the operation to the PCI bus.
  */
 
-#define eeprom_delay()	inl(ee_addr)
+#define eeprom_delay()	er32(EECTL)
 
 /* The EEPROM commands include the alway-set leading bit. */
 #define EE_WRITE_CMD	(5 << 6)
@@ -550,67 +539,67 @@
 
 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 {
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
-	outl(0x00000000, ioaddr + INTMASK);
+	ew32(INTMASK, 0x00000000);
 }
 
-static inline void __epic_pci_commit(long ioaddr)
+static inline void __epic_pci_commit(void __iomem *ioaddr)
 {
 #ifndef USE_IO_OPS
-	inl(ioaddr + INTMASK);
+	er32(INTMASK);
 #endif
 }
 
 static inline void epic_napi_irq_off(struct net_device *dev,
 				     struct epic_private *ep)
 {
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
-	outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
+	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
 	__epic_pci_commit(ioaddr);
 }
 
 static inline void epic_napi_irq_on(struct net_device *dev,
 				    struct epic_private *ep)
 {
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
 	/* No need to commit possible posted write */
-	outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
+	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
 }
 
-static int __devinit read_eeprom(long ioaddr, int location)
+static int __devinit read_eeprom(struct epic_private *ep, int location)
 {
+	void __iomem *ioaddr = ep->ioaddr;
 	int i;
 	int retval = 0;
-	long ee_addr = ioaddr + EECTL;
 	int read_cmd = location |
-		(inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
+		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 
-	outl(EE_ENB & ~EE_CS, ee_addr);
-	outl(EE_ENB, ee_addr);
+	ew32(EECTL, EE_ENB & ~EE_CS);
+	ew32(EECTL, EE_ENB);
 
 	/* Shift the read command bits out. */
 	for (i = 12; i >= 0; i--) {
 		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
-		outl(EE_ENB | dataval, ee_addr);
+		ew32(EECTL, EE_ENB | dataval);
 		eeprom_delay();
-		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
 		eeprom_delay();
 	}
-	outl(EE_ENB, ee_addr);
+	ew32(EECTL, EE_ENB);
 
 	for (i = 16; i > 0; i--) {
-		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
 		eeprom_delay();
-		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
-		outl(EE_ENB, ee_addr);
+		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
+		ew32(EECTL, EE_ENB);
 		eeprom_delay();
 	}
 
 	/* Terminate the EEPROM access. */
-	outl(EE_ENB & ~EE_CS, ee_addr);
+	ew32(EECTL, EE_ENB & ~EE_CS);
 	return retval;
 }
 
@@ -618,22 +607,23 @@
 #define MII_WRITEOP		2
 static int mdio_read(struct net_device *dev, int phy_id, int location)
 {
-	long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
 	int i;
 
-	outl(read_cmd, ioaddr + MIICtrl);
+	ew32(MIICtrl, read_cmd);
 	/* Typical operation takes 25 loops. */
 	for (i = 400; i > 0; i--) {
 		barrier();
-		if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
+		if ((er32(MIICtrl) & MII_READOP) == 0) {
 			/* Work around read failure bug. */
 			if (phy_id == 1 && location < 6 &&
-			    inw(ioaddr + MIIData) == 0xffff) {
-				outl(read_cmd, ioaddr + MIICtrl);
+			    er16(MIIData) == 0xffff) {
+				ew32(MIICtrl, read_cmd);
 				continue;
 			}
-			return inw(ioaddr + MIIData);
+			return er16(MIIData);
 		}
 	}
 	return 0xffff;
@@ -641,14 +631,15 @@
 
 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 {
-	long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	int i;
 
-	outw(value, ioaddr + MIIData);
-	outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
+	ew16(MIIData, value);
+	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
 	for (i = 10000; i > 0; i--) {
 		barrier();
-		if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
+		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
 			break;
 	}
 }
@@ -657,25 +648,26 @@
 static int epic_open(struct net_device *dev)
 {
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
-	int i;
-	int retval;
+	void __iomem *ioaddr = ep->ioaddr;
+	const int irq = ep->pci_dev->irq;
+	int rc, i;
 
 	/* Soft reset the chip. */
-	outl(0x4001, ioaddr + GENCTL);
+	ew32(GENCTL, 0x4001);
 
 	napi_enable(&ep->napi);
-	if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
+	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
+	if (rc) {
 		napi_disable(&ep->napi);
-		return retval;
+		return rc;
 	}
 
 	epic_init_ring(dev);
 
-	outl(0x4000, ioaddr + GENCTL);
+	ew32(GENCTL, 0x4000);
 	/* This magic is documented in SMSC app note 7.15 */
 	for (i = 16; i > 0; i--)
-		outl(0x0008, ioaddr + TEST1);
+		ew32(TEST1, 0x0008);
 
 	/* Pull the chip out of low-power mode, enable interrupts, and set for
 	   PCI read multiple.  The MIIcfg setting and strange write order are
@@ -683,29 +675,29 @@
 	   wiring on the Ositech CardBus card.
 	*/
 #if 0
-	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 #endif
 	if (ep->chip_flags & MII_PWRDWN)
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 
 	/* Tell the chip to byteswap descriptors on big-endian hosts */
 #ifdef __BIG_ENDIAN
-	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
-	inl(ioaddr + GENCTL);
-	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
+	er32(GENCTL);
+	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 #else
-	outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
-	inl(ioaddr + GENCTL);
-	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
+	er32(GENCTL);
+	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 #endif
 
 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 
 	for (i = 0; i < 3; i++)
-		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 
 	ep->tx_threshold = TX_FIFO_THRESH;
-	outl(ep->tx_threshold, ioaddr + TxThresh);
+	ew32(TxThresh, ep->tx_threshold);
 
 	if (media2miictl[dev->if_port & 15]) {
 		if (ep->mii_phy_cnt)
@@ -731,26 +723,27 @@
 		}
 	}
 
-	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
-	outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
-	outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
+	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
+	ew32(PRxCDAR, ep->rx_ring_dma);
+	ew32(PTxCDAR, ep->tx_ring_dma);
 
 	/* Start the chip's Rx process. */
 	set_rx_mode(dev);
-	outl(StartRx | RxQueued, ioaddr + COMMAND);
+	ew32(COMMAND, StartRx | RxQueued);
 
 	netif_start_queue(dev);
 
 	/* Enable interrupts by setting the interrupt mask. */
-	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-		 | CntFull | TxUnderrun
-		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
+	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
+	     TxUnderrun);
 
-	if (debug > 1)
-		printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
-			   "%s-duplex.\n",
-			   dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
-			   ep->mii.full_duplex ? "full" : "half");
+	if (debug > 1) {
+		printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
+		       "status %4.4x %s-duplex.\n",
+		       dev->name, ioaddr, irq, er32(GENCTL),
+		       ep->mii.full_duplex ? "full" : "half");
+	}
 
 	/* Set the timer to switch to check for link beat and perhaps switch
 	   to an alternate media type. */
@@ -760,27 +753,29 @@
 	ep->timer.function = epic_timer;				/* timer handler */
 	add_timer(&ep->timer);
 
-	return 0;
+	return rc;
 }
 
 /* Reset the chip to recover from a PCI transaction error.
    This may occur at interrupt time. */
 static void epic_pause(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct net_device_stats *stats = &dev->stats;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 
 	netif_stop_queue (dev);
 
 	/* Disable interrupts by clearing the interrupt mask. */
-	outl(0x00000000, ioaddr + INTMASK);
+	ew32(INTMASK, 0x00000000);
 	/* Stop the chip's Tx and Rx DMA processes. */
-	outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
+	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
 
 	/* Update the error counts. */
-	if (inw(ioaddr + COMMAND) != 0xffff) {
-		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+	if (er16(COMMAND) != 0xffff) {
+		stats->rx_missed_errors	+= er8(MPCNT);
+		stats->rx_frame_errors	+= er8(ALICNT);
+		stats->rx_crc_errors	+= er8(CRCCNT);
 	}
 
 	/* Remove the packets on the Rx queue. */
@@ -789,12 +784,12 @@
 
 static void epic_restart(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	int i;
 
 	/* Soft reset the chip. */
-	outl(0x4001, ioaddr + GENCTL);
+	ew32(GENCTL, 0x4001);
 
 	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
 		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
@@ -802,47 +797,46 @@
 
 	/* This magic is documented in SMSC app note 7.15 */
 	for (i = 16; i > 0; i--)
-		outl(0x0008, ioaddr + TEST1);
+		ew32(TEST1, 0x0008);
 
 #ifdef __BIG_ENDIAN
-	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 #else
-	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 #endif
-	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
+	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 	if (ep->chip_flags & MII_PWRDWN)
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 
 	for (i = 0; i < 3; i++)
-		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 
 	ep->tx_threshold = TX_FIFO_THRESH;
-	outl(ep->tx_threshold, ioaddr + TxThresh);
-	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
-	outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
-		sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
-	outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
-		 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
+	ew32(TxThresh, ep->tx_threshold);
+	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
+	ew32(PRxCDAR, ep->rx_ring_dma +
+	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
+	ew32(PTxCDAR, ep->tx_ring_dma +
+	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
 
 	/* Start the chip's Rx process. */
 	set_rx_mode(dev);
-	outl(StartRx | RxQueued, ioaddr + COMMAND);
+	ew32(COMMAND, StartRx | RxQueued);
 
 	/* Enable interrupts by setting the interrupt mask. */
-	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-		 | CntFull | TxUnderrun
-		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
+	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
+	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
+	     TxUnderrun);
 
 	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
 		   " interrupt %4.4x.\n",
-		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
-		   (int)inl(ioaddr + INTSTAT));
+		   dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 }
 
 static void check_media(struct net_device *dev)
 {
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
 	int negotiated = mii_lpa & ep->mii.advertising;
 	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
@@ -856,7 +850,7 @@
 		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
 			   " partner capability of %4.4x.\n", dev->name,
 			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
-		outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
 	}
 }
 
@@ -864,16 +858,15 @@
 {
 	struct net_device *dev = (struct net_device *)data;
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 	int next_tick = 5*HZ;
 
 	if (debug > 3) {
 		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
-			   dev->name, (int)inl(ioaddr + TxSTAT));
+		       dev->name, er32(TxSTAT));
 		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
-			   "IntStatus %4.4x RxStatus %4.4x.\n",
-			   dev->name, (int)inl(ioaddr + INTMASK),
-			   (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
+		       "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
+		       er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
 	}
 
 	check_media(dev);
@@ -885,23 +878,22 @@
 static void epic_tx_timeout(struct net_device *dev)
 {
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
 	if (debug > 0) {
 		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
-			   "Tx status %4.4x.\n",
-			   dev->name, (int)inw(ioaddr + TxSTAT));
+		       "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
 		if (debug > 1) {
 			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
 				   dev->name, ep->dirty_tx, ep->cur_tx);
 		}
 	}
-	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
+	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
 		dev->stats.tx_fifo_errors++;
-		outl(RestartTx, ioaddr + COMMAND);
+		ew32(COMMAND, RestartTx);
 	} else {
 		epic_restart(dev);
-		outl(TxQueued, dev->base_addr + COMMAND);
+		ew32(COMMAND, TxQueued);
 	}
 
 	dev->trans_start = jiffies; /* prevent tx timeout */
@@ -959,6 +951,7 @@
 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	int entry, free_count;
 	u32 ctrl_word;
 	unsigned long flags;
@@ -999,13 +992,12 @@
 
 	spin_unlock_irqrestore(&ep->lock, flags);
 	/* Trigger an immediate transmit demand. */
-	outl(TxQueued, dev->base_addr + COMMAND);
+	ew32(COMMAND, TxQueued);
 
 	if (debug > 4)
 		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
-			   "flag %2.2x Tx status %8.8x.\n",
-			   dev->name, (int)skb->len, entry, ctrl_word,
-			   (int)inl(dev->base_addr + TxSTAT));
+		       "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
+		       entry, ctrl_word, er32(TxSTAT));
 
 	return NETDEV_TX_OK;
 }
@@ -1086,18 +1078,17 @@
 {
 	struct net_device *dev = dev_instance;
 	struct epic_private *ep = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 	unsigned int handled = 0;
 	int status;
 
-	status = inl(ioaddr + INTSTAT);
+	status = er32(INTSTAT);
 	/* Acknowledge all of the current interrupt sources ASAP. */
-	outl(status & EpicNormalEvent, ioaddr + INTSTAT);
+	ew32(INTSTAT, status & EpicNormalEvent);
 
 	if (debug > 4) {
 		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
-				   "intstat=%#8.8x.\n", dev->name, status,
-				   (int)inl(ioaddr + INTSTAT));
+		       "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
 	}
 
 	if ((status & IntrSummary) == 0)
@@ -1118,19 +1109,21 @@
 
 	/* Check uncommon events all at once. */
 	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
+		struct net_device_stats *stats = &dev->stats;
+
 		if (status == EpicRemoved)
 			goto out;
 
 		/* Always update the error counts to avoid overhead later. */
-		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+		stats->rx_missed_errors	+= er8(MPCNT);
+		stats->rx_frame_errors	+= er8(ALICNT);
+		stats->rx_crc_errors	+= er8(CRCCNT);
 
 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
-			dev->stats.tx_fifo_errors++;
-			outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+			stats->tx_fifo_errors++;
+			ew32(TxThresh, ep->tx_threshold += 128);
 			/* Restart the transmit process. */
-			outl(RestartTx, ioaddr + COMMAND);
+			ew32(COMMAND, RestartTx);
 		}
 		if (status & PCIBusErr170) {
 			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
@@ -1139,7 +1132,7 @@
 			epic_restart(dev);
 		}
 		/* Clear all error sources. */
-		outl(status & 0x7f18, ioaddr + INTSTAT);
+		ew32(INTSTAT, status & 0x7f18);
 	}
 
 out:
@@ -1248,17 +1241,17 @@
 
 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
 {
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 	int status;
 
-	status = inl(ioaddr + INTSTAT);
+	status = er32(INTSTAT);
 
 	if (status == EpicRemoved)
 		return;
 	if (status & RxOverflow) 	/* Missed a Rx frame. */
 		dev->stats.rx_errors++;
 	if (status & (RxOverflow | RxFull))
-		outw(RxQueued, ioaddr + COMMAND);
+		ew16(COMMAND, RxQueued);
 }
 
 static int epic_poll(struct napi_struct *napi, int budget)
@@ -1266,7 +1259,7 @@
 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
 	struct net_device *dev = ep->mii.dev;
 	int work_done = 0;
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = ep->ioaddr;
 
 rx_action:
 
@@ -1287,7 +1280,7 @@
 		more = ep->reschedule_in_poll;
 		if (!more) {
 			__napi_complete(napi);
-			outl(EpicNapiEvent, ioaddr + INTSTAT);
+			ew32(INTSTAT, EpicNapiEvent);
 			epic_napi_irq_on(dev, ep);
 		} else
 			ep->reschedule_in_poll--;
@@ -1303,8 +1296,9 @@
 
 static int epic_close(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct epic_private *ep = netdev_priv(dev);
+	struct pci_dev *pdev = ep->pci_dev;
+	void __iomem *ioaddr = ep->ioaddr;
 	struct sk_buff *skb;
 	int i;
 
@@ -1313,13 +1307,13 @@
 
 	if (debug > 1)
 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
-			   dev->name, (int)inl(ioaddr + INTSTAT));
+		       dev->name, er32(INTSTAT));
 
 	del_timer_sync(&ep->timer);
 
 	epic_disable_int(dev, ep);
 
-	free_irq(dev->irq, dev);
+	free_irq(pdev->irq, dev);
 
 	epic_pause(dev);
 
@@ -1330,7 +1324,7 @@
 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
 		ep->rx_ring[i].buflength = 0;
 		if (skb) {
-			pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
+			pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
 				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 			dev_kfree_skb(skb);
 		}
@@ -1341,26 +1335,28 @@
 		ep->tx_skbuff[i] = NULL;
 		if (!skb)
 			continue;
-		pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
-				 skb->len, PCI_DMA_TODEVICE);
+		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
+				 PCI_DMA_TODEVICE);
 		dev_kfree_skb(skb);
 	}
 
 	/* Green! Leave the chip in low-power mode. */
-	outl(0x0008, ioaddr + GENCTL);
+	ew32(GENCTL, 0x0008);
 
 	return 0;
 }
 
 static struct net_device_stats *epic_get_stats(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 
 	if (netif_running(dev)) {
-		/* Update the error counts. */
-		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
-		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
-		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
+		struct net_device_stats *stats = &dev->stats;
+
+		stats->rx_missed_errors	+= er8(MPCNT);
+		stats->rx_frame_errors	+= er8(ALICNT);
+		stats->rx_crc_errors	+= er8(CRCCNT);
 	}
 
 	return &dev->stats;
@@ -1373,13 +1369,13 @@
 
 static void set_rx_mode(struct net_device *dev)
 {
-	long ioaddr = dev->base_addr;
 	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 	unsigned char mc_filter[8];		 /* Multicast hash filter */
 	int i;
 
 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
-		outl(0x002C, ioaddr + RxCtrl);
+		ew32(RxCtrl, 0x002c);
 		/* Unconditionally log net taps. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
 	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
@@ -1387,9 +1383,9 @@
 		   is never enabled. */
 		/* Too many to filter perfectly -- accept all multicasts. */
 		memset(mc_filter, 0xff, sizeof(mc_filter));
-		outl(0x000C, ioaddr + RxCtrl);
+		ew32(RxCtrl, 0x000c);
 	} else if (netdev_mc_empty(dev)) {
-		outl(0x0004, ioaddr + RxCtrl);
+		ew32(RxCtrl, 0x0004);
 		return;
 	} else {					/* Never executed, for now. */
 		struct netdev_hw_addr *ha;
@@ -1404,7 +1400,7 @@
 	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
 	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
 		for (i = 0; i < 4; i++)
-			outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
+			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
 		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
 	}
 }
@@ -1466,22 +1462,26 @@
 
 static int ethtool_begin(struct net_device *dev)
 {
-	unsigned long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
+
 	/* power-up, if interface is down */
-	if (! netif_running(dev)) {
-		outl(0x0200, ioaddr + GENCTL);
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+	if (!netif_running(dev)) {
+		ew32(GENCTL, 0x0200);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 	}
 	return 0;
 }
 
 static void ethtool_complete(struct net_device *dev)
 {
-	unsigned long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
+
 	/* power-down, if interface is down */
-	if (! netif_running(dev)) {
-		outl(0x0008, ioaddr + GENCTL);
-		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+	if (!netif_running(dev)) {
+		ew32(GENCTL, 0x0008);
+		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
 	}
 }
 
@@ -1500,14 +1500,14 @@
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	struct epic_private *np = netdev_priv(dev);
-	long ioaddr = dev->base_addr;
+	void __iomem *ioaddr = np->ioaddr;
 	struct mii_ioctl_data *data = if_mii(rq);
 	int rc;
 
 	/* power-up, if interface is down */
 	if (! netif_running(dev)) {
-		outl(0x0200, ioaddr + GENCTL);
-		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+		ew32(GENCTL, 0x0200);
+		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 	}
 
 	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
@@ -1517,14 +1517,14 @@
 
 	/* power-down, if interface is down */
 	if (! netif_running(dev)) {
-		outl(0x0008, ioaddr + GENCTL);
-		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+		ew32(GENCTL, 0x0008);
+		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
 	}
 	return rc;
 }
 
 
-static void __devexit epic_remove_one (struct pci_dev *pdev)
+static void __devexit epic_remove_one(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct epic_private *ep = netdev_priv(dev);
@@ -1532,9 +1532,7 @@
 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 	unregister_netdev(dev);
-#ifndef USE_IO_OPS
-	iounmap((void*) dev->base_addr);
-#endif
+	pci_iounmap(pdev, ep->ioaddr);
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
@@ -1548,13 +1546,14 @@
 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
-	long ioaddr = dev->base_addr;
+	struct epic_private *ep = netdev_priv(dev);
+	void __iomem *ioaddr = ep->ioaddr;
 
 	if (!netif_running(dev))
 		return 0;
 	epic_pause(dev);
 	/* Put the chip into low-power mode. */
-	outl(0x0008, ioaddr + GENCTL);
+	ew32(GENCTL, 0x0008);
 	/* pci_power_off(pdev, -1); */
 	return 0;
 }
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index cd3defb..dab9c6f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2066,6 +2066,7 @@
 	.get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
 	.get_eeprom = smsc911x_ethtool_get_eeprom,
 	.set_eeprom = smsc911x_ethtool_set_eeprom,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 static const struct net_device_ops smsc911x_netdev_ops = {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 3838647..fd33b21 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -54,7 +54,7 @@
 };
 
 struct smsc9420_pdata {
-	void __iomem *base_addr;
+	void __iomem *ioaddr;
 	struct pci_dev *pdev;
 	struct net_device *dev;
 
@@ -114,13 +114,13 @@
 
 static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
 {
-	return ioread32(pd->base_addr + offset);
+	return ioread32(pd->ioaddr + offset);
 }
 
 static inline void
 smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
 {
-	iowrite32(value, pd->base_addr + offset);
+	iowrite32(value, pd->ioaddr + offset);
 }
 
 static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
@@ -469,6 +469,7 @@
 	.set_eeprom = smsc9420_ethtool_set_eeprom,
 	.get_regs_len = smsc9420_ethtool_getregslen,
 	.get_regs = smsc9420_ethtool_getregs,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 /* Sets the device MAC address to dev_addr */
@@ -659,7 +660,7 @@
 	ulong flags;
 
 	BUG_ON(!pd);
-	BUG_ON(!pd->base_addr);
+	BUG_ON(!pd->ioaddr);
 
 	int_cfg = smsc9420_reg_read(pd, INT_CFG);
 
@@ -720,9 +721,12 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void smsc9420_poll_controller(struct net_device *dev)
 {
-	disable_irq(dev->irq);
+	struct smsc9420_pdata *pd = netdev_priv(dev);
+	const int irq = pd->pdev->irq;
+
+	disable_irq(irq);
 	smsc9420_isr(0, dev);
-	enable_irq(dev->irq);
+	enable_irq(irq);
 }
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
@@ -759,7 +763,7 @@
 	smsc9420_stop_rx(pd);
 	smsc9420_free_rx_ring(pd);
 
-	free_irq(dev->irq, pd);
+	free_irq(pd->pdev->irq, pd);
 
 	smsc9420_dmac_soft_reset(pd);
 
@@ -1331,15 +1335,12 @@
 
 static int smsc9420_open(struct net_device *dev)
 {
-	struct smsc9420_pdata *pd;
+	struct smsc9420_pdata *pd = netdev_priv(dev);
 	u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
+	const int irq = pd->pdev->irq;
 	unsigned long flags;
 	int result = 0, timeout;
 
-	BUG_ON(!dev);
-	pd = netdev_priv(dev);
-	BUG_ON(!pd);
-
 	if (!is_valid_ether_addr(dev->dev_addr)) {
 		smsc_warn(IFUP, "dev_addr is not a valid MAC address");
 		result = -EADDRNOTAVAIL;
@@ -1358,9 +1359,10 @@
 	smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
 	smsc9420_pci_flush_write(pd);
 
-	if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
-			DRV_NAME, pd)) {
-		smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq);
+	result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
+			     DRV_NAME, pd);
+	if (result) {
+		smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
 		result = -ENODEV;
 		goto out_0;
 	}
@@ -1395,7 +1397,7 @@
 	smsc9420_pci_flush_write(pd);
 
 	/* test the IRQ connection to the ISR */
-	smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
+	smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
 	pd->software_irq_signal = false;
 
 	spin_lock_irqsave(&pd->int_lock, flags);
@@ -1430,7 +1432,7 @@
 		goto out_free_irq_1;
 	}
 
-	smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq);
+	smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
 
 	result = smsc9420_alloc_tx_ring(pd);
 	if (result) {
@@ -1490,7 +1492,7 @@
 out_free_tx_ring_2:
 	smsc9420_free_tx_ring(pd);
 out_free_irq_1:
-	free_irq(dev->irq, pd);
+	free_irq(irq, pd);
 out_0:
 	return result;
 }
@@ -1519,7 +1521,7 @@
 		smsc9420_stop_rx(pd);
 		smsc9420_free_rx_ring(pd);
 
-		free_irq(dev->irq, pd);
+		free_irq(pd->pdev->irq, pd);
 
 		netif_device_detach(dev);
 	}
@@ -1552,6 +1554,7 @@
 		smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
 
 	if (netif_running(dev)) {
+		/* FIXME: gross. It looks like ancient PM relic.*/
 		err = smsc9420_open(dev);
 		netif_device_attach(dev);
 	}
@@ -1625,8 +1628,6 @@
 	/* registers are double mapped with 0 offset for LE and 0x200 for BE */
 	virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
 
-	dev->base_addr = (ulong)virt_addr;
-
 	pd = netdev_priv(dev);
 
 	/* pci descriptors are created in the PCI consistent area */
@@ -1646,7 +1647,7 @@
 
 	pd->pdev = pdev;
 	pd->dev = dev;
-	pd->base_addr = virt_addr;
+	pd->ioaddr = virt_addr;
 	pd->msg_enable = smsc_debug;
 	pd->rx_csum = true;
 
@@ -1669,7 +1670,6 @@
 
 	dev->netdev_ops = &smsc9420_netdev_ops;
 	dev->ethtool_ops = &smsc9420_ethtool_ops;
-	dev->irq = pdev->irq;
 
 	netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
 
@@ -1727,7 +1727,7 @@
 	pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
 		(RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
 
-	iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET);
+	iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
 	pci_release_regions(pdev);
 	free_netdev(dev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 0319d64..bcd54d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -97,6 +97,16 @@
 	unsigned long normal_irq_n;
 };
 
+/* CSR Frequency Access Defines*/
+#define CSR_F_35M	35000000
+#define CSR_F_60M	60000000
+#define CSR_F_100M	100000000
+#define CSR_F_150M	150000000
+#define CSR_F_250M	250000000
+#define CSR_F_300M	300000000
+
+#define	MAC_CSR_H_FRQ_MASK	0x20
+
 #define HASH_TABLE_SIZE 64
 #define PAUSE_TIME 0x200
 
@@ -137,6 +147,7 @@
 #define DMA_HW_FEAT_FLEXIPPSEN	0x04000000 /* Flexible PPS Output */
 #define DMA_HW_FEAT_SAVLANINS	0x08000000 /* Source Addr or VLAN Insertion */
 #define DMA_HW_FEAT_ACTPHYIF	0x70000000 /* Active/selected PHY interface */
+#define DEFAULT_DMA_PBL		8
 
 enum rx_frame_status { /* IPC status */
 	good_frame = 0,
@@ -228,7 +239,7 @@
 	int (*get_rx_owner) (struct dma_desc *p);
 	void (*set_rx_owner) (struct dma_desc *p);
 	/* Get the receive frame size */
-	int (*get_rx_frame_len) (struct dma_desc *p);
+	int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
 	/* Return the reception status looking at the RDES1 */
 	int (*rx_status) (void *data, struct stmmac_extra_stats *x,
 			  struct dma_desc *p);
@@ -236,7 +247,8 @@
 
 struct stmmac_dma_ops {
 	/* DMA core initialization */
-	int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+	int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
+		     int burst_len, u32 dma_tx, u32 dma_rx);
 	/* Dump DMA registers */
 	void (*dump_regs) (void __iomem *ioaddr);
 	/* Set tx/rx threshold in the csr6 register
@@ -261,14 +273,14 @@
 struct stmmac_ops {
 	/* MAC core initialization */
 	void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
-	/* Support checksum offload engine */
-	int  (*rx_coe) (void __iomem *ioaddr);
+	/* Enable and verify that the IPC module is supported */
+	int (*rx_ipc) (void __iomem *ioaddr);
 	/* Dump MAC registers */
 	void (*dump_regs) (void __iomem *ioaddr);
 	/* Handle extra events on specific interrupts hw dependent */
 	void (*host_irq_status) (void __iomem *ioaddr);
 	/* Multicast filter setting */
-	void (*set_filter) (struct net_device *dev);
+	void (*set_filter) (struct net_device *dev, int id);
 	/* Flow control setting */
 	void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
 			   unsigned int fc, unsigned int pause_time);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index cfcef0e..23478bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -61,9 +61,11 @@
 };
 
 /* GMAC HW ADDR regs */
-#define GMAC_ADDR_HIGH(reg)		(0x00000040+(reg * 8))
-#define GMAC_ADDR_LOW(reg)		(0x00000044+(reg * 8))
-#define GMAC_MAX_UNICAST_ADDRESSES	16
+#define GMAC_ADDR_HIGH(reg)	(((reg > 15) ? 0x00000800 : 0x00000040) + \
+				(reg * 8))
+#define GMAC_ADDR_LOW(reg)	(((reg > 15) ? 0x00000804 : 0x00000044) + \
+				(reg * 8))
+#define GMAC_MAX_PERFECT_ADDRESSES	32
 
 #define GMAC_AN_CTRL	0x000000c0	/* AN control */
 #define GMAC_AN_STATUS	0x000000c4	/* AN status */
@@ -139,10 +141,11 @@
 };
 
 #define DMA_BUS_MODE_FB		0x00010000	/* Fixed burst */
+#define DMA_BUS_MODE_MB		0x04000000	/* Mixed burst */
 #define DMA_BUS_MODE_RPBL_MASK	0x003e0000	/* Rx-Programmable Burst Len */
 #define DMA_BUS_MODE_RPBL_SHIFT	17
 #define DMA_BUS_MODE_USP	0x00800000
-#define DMA_BUS_MODE_4PBL	0x01000000
+#define DMA_BUS_MODE_PBL	0x01000000
 #define DMA_BUS_MODE_AAL	0x02000000
 
 /* DMA CRS Control and Status Register Mapping */
@@ -205,4 +208,7 @@
 #define GMAC_MMC_TX_INTR   0x108
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
 
+/* Synopsys Core versions */
+#define	DWMAC_CORE_3_40	34
+
 extern const struct stmmac_dma_ops dwmac1000_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b1c48b9..b5e4d02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -46,7 +46,7 @@
 #endif
 }
 
-static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
+static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
 {
 	u32 value = readl(ioaddr + GMAC_CONTROL);
 
@@ -84,10 +84,11 @@
 				GMAC_ADDR_LOW(reg_n));
 }
 
-static void dwmac1000_set_filter(struct net_device *dev)
+static void dwmac1000_set_filter(struct net_device *dev, int id)
 {
 	void __iomem *ioaddr = (void __iomem *) dev->base_addr;
 	unsigned int value = 0;
+	unsigned int perfect_addr_number;
 
 	CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
 		 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
@@ -121,8 +122,14 @@
 		writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
 	}
 
+	/* Extra 16 regs are available in cores newer than the 3.40. */
+	if (id > DWMAC_CORE_3_40)
+		perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES;
+	else
+		perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES / 2;
+
 	/* Handle multiple unicast addresses (perfect filtering)*/
-	if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
+	if (netdev_uc_count(dev) > perfect_addr_number)
 		/* Switch to promiscuous mode is more than 16 addrs
 		   are required */
 		value |= GMAC_FRAME_FILTER_PR;
@@ -211,7 +218,7 @@
 
 static const struct stmmac_ops dwmac1000_ops = {
 	.core_init = dwmac1000_core_init,
-	.rx_coe = dwmac1000_rx_coe_supported,
+	.rx_ipc = dwmac1000_rx_ipc_enable,
 	.dump_regs = dwmac1000_dump_regs,
 	.host_irq_status = dwmac1000_irq_status,
 	.set_filter = dwmac1000_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 4d5402a..0335000 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
 #include "dwmac1000.h"
 #include "dwmac_dma.h"
 
-static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
-			      u32 dma_rx)
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
+			      int mb, int burst_len, u32 dma_tx, u32 dma_rx)
 {
 	u32 value = readl(ioaddr + DMA_BUS_MODE);
 	int limit;
@@ -48,15 +48,51 @@
 	if (limit < 0)
 		return -EBUSY;
 
-	value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
-	    ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
-	     (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+	/*
+	 * Set the DMA PBL (Programmable Burst Length) mode
+	 * Before stmmac core 3.50 this mode bit was 4xPBL, and
+	 * post 3.5 mode bit acts as 8*PBL.
+	 * For core rev < 3.5, when the core is set for 4xPBL mode, the
+	 * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
+	 * depending on pbl value.
+	 * For core rev > 3.5, when the core is set for 8xPBL mode, the
+	 * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
+	 * depending on pbl value.
+	 */
+	value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
+		(pbl << DMA_BUS_MODE_RPBL_SHIFT));
+
+	/* Set the Fixed burst mode */
+	if (fb)
+		value |= DMA_BUS_MODE_FB;
+
+	/* Mixed Burst has no effect when fb is set */
+	if (mb)
+		value |= DMA_BUS_MODE_MB;
 
 #ifdef CONFIG_STMMAC_DA
 	value |= DMA_BUS_MODE_DA;	/* Rx has priority over tx */
 #endif
 	writel(value, ioaddr + DMA_BUS_MODE);
 
+	/* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
+	 * for supported bursts.
+	 *
+	 * Note: This is applicable only for revision GMACv3.61a. For
+	 * older version this register is reserved and shall have no
+	 * effect.
+	 *
+	 * Note:
+	 *  For Fixed Burst Mode: if we directly write 0xFF to this
+	 *  register using the configurations pass from platform code,
+	 *  this would ensure that all bursts supported by core are set
+	 *  and those which are not supported would remain ineffective.
+	 *
+	 *  For Non Fixed Burst Mode: provide the maximum value of the
+	 *  burst length. Any burst equal or below the provided burst
+	 *  length would be allowed to perform. */
+	writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
+
 	/* Mask interrupts by writing to CSR7 */
 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 138fb8dd..19e0f4e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -43,11 +43,6 @@
 #endif
 }
 
-static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
-{
-	return 0;
-}
-
 static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
 {
 	pr_info("\t----------------------------------------------\n"
@@ -72,6 +67,11 @@
 		readl(ioaddr + MAC_VLAN2));
 }
 
+static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
+{
+	return 0;
+}
+
 static void dwmac100_irq_status(void __iomem *ioaddr)
 {
 	return;
@@ -89,7 +89,7 @@
 	stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
 }
 
-static void dwmac100_set_filter(struct net_device *dev)
+static void dwmac100_set_filter(struct net_device *dev, int id)
 {
 	void __iomem *ioaddr = (void __iomem *) dev->base_addr;
 	u32 value = readl(ioaddr + MAC_CONTROL);
@@ -160,7 +160,7 @@
 
 static const struct stmmac_ops dwmac100_ops = {
 	.core_init = dwmac100_core_init,
-	.rx_coe = dwmac100_rx_coe_supported,
+	.rx_ipc = dwmac100_rx_ipc_enable,
 	.dump_regs = dwmac100_dump_mac_regs,
 	.host_irq_status = dwmac100_irq_status,
 	.set_filter = dwmac100_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index bc17fd0..c2b4d55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
 #include "dwmac100.h"
 #include "dwmac_dma.h"
 
-static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
-			     u32 dma_rx)
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
+			     int mb, int burst_len, u32 dma_tx, u32 dma_rx)
 {
 	u32 value = readl(ioaddr + DMA_BUS_MODE);
 	int limit;
@@ -52,7 +52,7 @@
 
 	/* Enable Application Access by writing to DMA CSR0 */
 	writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
-	       ioaddr + DMA_BUS_MODE);
+			ioaddr + DMA_BUS_MODE);
 
 	/* Mask interrupts by writing to CSR7 */
 	writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 437edac..6e0360f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -32,6 +32,7 @@
 #define DMA_CONTROL		0x00001018	/* Ctrl (Operational Mode) */
 #define DMA_INTR_ENA		0x0000101c	/* Interrupt Enable */
 #define DMA_MISSED_FRAME_CTR	0x00001020	/* Missed Frame Counter */
+#define DMA_AXI_BUS_MODE       0x00001028      /* AXI Bus Mode */
 #define DMA_CUR_TX_BUF_ADDR	0x00001050	/* Current Host Tx Buffer */
 #define DMA_CUR_RX_BUF_ADDR	0x00001054	/* Current Host Rx Buffer */
 #define DMA_HW_FEATURE		0x00001058	/* HW Feature Register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index f20aa12..4e0e18a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -31,6 +31,8 @@
 #define DWMAC_LIB_DBG(fmt, args...)  do { } while (0)
 #endif
 
+#define GMAC_HI_REG_AE		0x80000000
+
 /* CSR1 enables the transmit DMA to check for new descriptor */
 void dwmac_enable_dma_transmission(void __iomem *ioaddr)
 {
@@ -233,7 +235,11 @@
 	unsigned long data;
 
 	data = (addr[5] << 8) | addr[4];
-	writel(data, ioaddr + high);
+	/* For MAC Addr registers se have to set the Address Enable (AE)
+	 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+	 * is RO.
+	 */
+	writel(data | GMAC_HI_REG_AE, ioaddr + high);
 	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 	writel(data, ioaddr + low);
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ad1b627..2fc8ef9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -22,6 +22,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/stmmac.h>
 #include "common.h"
 #include "descs_com.h"
 
@@ -309,9 +310,17 @@
 	p->des01.etx.interrupt = 1;
 }
 
-static int enh_desc_get_rx_frame_len(struct dma_desc *p)
+static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
 {
-	return p->des01.erx.frame_length;
+	/* The type-1 checksum offload engines append the checksum at
+	 * the end of frame and the two bytes of checksum are added in
+	 * the length.
+	 * Adjust for that in the framelen for type-1 checksum offload
+	 * engines. */
+	if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+		return p->des01.erx.frame_length - 2;
+	else
+		return p->des01.erx.frame_length;
 }
 
 const struct stmmac_desc_ops enh_desc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 25953bb..68962c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -22,6 +22,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/stmmac.h>
 #include "common.h"
 #include "descs_com.h"
 
@@ -201,9 +202,17 @@
 	p->des01.tx.interrupt = 1;
 }
 
-static int ndesc_get_rx_frame_len(struct dma_desc *p)
+static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
 {
-	return p->des01.rx.frame_length;
+	/* The type-1 checksum offload engines append the checksum at
+	 * the end of frame and the two bytes of checksum are added in
+	 * the length.
+	 * Adjust for that in the framelen for type-1 checksum offload
+	 * engines. */
+	if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+		return p->des01.rx.frame_length - 2;
+	else
+		return p->des01.rx.frame_length;
 }
 
 const struct stmmac_desc_ops ndesc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b4b095f..6b5d060 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -21,7 +21,9 @@
 *******************************************************************************/
 
 #define STMMAC_RESOURCE_NAME   "stmmaceth"
-#define DRV_MODULE_VERSION	"Feb_2012"
+#define DRV_MODULE_VERSION	"March_2012"
+
+#include <linux/clk.h>
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include "common.h"
@@ -56,8 +58,6 @@
 
 	struct stmmac_extra_stats xstats;
 	struct napi_struct napi;
-
-	int rx_coe;
 	int no_csum_insertion;
 
 	struct phy_device *phydev;
@@ -81,6 +81,11 @@
 	struct stmmac_counters mmc;
 	struct dma_features dma_cap;
 	int hw_cap_support;
+#ifdef CONFIG_HAVE_CLK
+	struct clk *stmmac_clk;
+#endif
+	int clk_csr;
+	int synopsys_id;
 };
 
 extern int phyaddr;
@@ -99,3 +104,42 @@
 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 				     struct plat_stmmacenet_data *plat_dat,
 				     void __iomem *addr);
+
+#ifdef CONFIG_HAVE_CLK
+static inline int stmmac_clk_enable(struct stmmac_priv *priv)
+{
+	if (!IS_ERR(priv->stmmac_clk))
+		return clk_enable(priv->stmmac_clk);
+
+	return 0;
+}
+
+static inline void stmmac_clk_disable(struct stmmac_priv *priv)
+{
+	if (IS_ERR(priv->stmmac_clk))
+		return;
+
+	clk_disable(priv->stmmac_clk);
+}
+static inline int stmmac_clk_get(struct stmmac_priv *priv)
+{
+	priv->stmmac_clk = clk_get(priv->device, NULL);
+
+	if (IS_ERR(priv->stmmac_clk))
+		return PTR_ERR(priv->stmmac_clk);
+
+	return 0;
+}
+#else
+static inline int stmmac_clk_enable(struct stmmac_priv *priv)
+{
+	return 0;
+}
+static inline void stmmac_clk_disable(struct stmmac_priv *priv)
+{
+}
+static inline int stmmac_clk_get(struct stmmac_priv *priv)
+{
+	return 0;
+}
+#endif /* CONFIG_HAVE_CLK */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f98e151..ce43184 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@
 	.get_wol = stmmac_get_wol,
 	.set_wol = stmmac_set_wol,
 	.get_sset_count	= stmmac_get_sset_count,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 48d56da..7096633 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -163,6 +163,38 @@
 		pause = PAUSE_TIME;
 }
 
+static void stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+#ifdef CONFIG_HAVE_CLK
+	u32 clk_rate;
+
+	if (IS_ERR(priv->stmmac_clk))
+		return;
+
+	clk_rate = clk_get_rate(priv->stmmac_clk);
+
+	/* Platform provided default clk_csr would be assumed valid
+	 * for all other cases except for the below mentioned ones. */
+	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
+		if (clk_rate < CSR_F_35M)
+			priv->clk_csr = STMMAC_CSR_20_35M;
+		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
+			priv->clk_csr = STMMAC_CSR_35_60M;
+		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
+			priv->clk_csr = STMMAC_CSR_60_100M;
+		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
+			priv->clk_csr = STMMAC_CSR_100_150M;
+		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
+			priv->clk_csr = STMMAC_CSR_150_250M;
+		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+			priv->clk_csr = STMMAC_CSR_250_300M;
+	} /* For values higher than the IEEE 802.3 specified frequency
+	   * we can not estimate the proper divider as it is not known
+	   * the frequency of clk_csr_i. So we do not change the default
+	   * divider. */
+#endif
+}
+
 #if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
 static void print_pkt(unsigned char *buf, int len)
 {
@@ -307,7 +339,13 @@
 	priv->speed = 0;
 	priv->oldduplex = -1;
 
-	snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id);
+	if (priv->plat->phy_bus_name)
+		snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+				priv->plat->phy_bus_name, priv->plat->bus_id);
+	else
+		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+				priv->plat->bus_id);
+
 	snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 		 priv->plat->phy_addr);
 	pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id);
@@ -884,6 +922,26 @@
 						   priv->dev->dev_addr);
 }
 
+static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+{
+	int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
+	int mixed_burst = 0;
+
+	/* Some DMA parameters can be passed from the platform;
+	 * in case of these are not passed we keep a default
+	 * (good for all the chips) and init the DMA! */
+	if (priv->plat->dma_cfg) {
+		pbl = priv->plat->dma_cfg->pbl;
+		fixed_burst = priv->plat->dma_cfg->fixed_burst;
+		mixed_burst = priv->plat->dma_cfg->mixed_burst;
+		burst_len = priv->plat->dma_cfg->burst_len;
+	}
+
+	return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
+				   burst_len, priv->dma_tx_phy,
+				   priv->dma_rx_phy);
+}
+
 /**
  *  stmmac_open - open entry point of the driver
  *  @dev : pointer to the device structure.
@@ -898,16 +956,6 @@
 	struct stmmac_priv *priv = netdev_priv(dev);
 	int ret;
 
-	stmmac_check_ether_addr(priv);
-
-	/* MDIO bus Registration */
-	ret = stmmac_mdio_register(dev);
-	if (ret < 0) {
-		pr_debug("%s: MDIO bus (id: %d) registration failed",
-			 __func__, priv->plat->bus_id);
-		return ret;
-	}
-
 #ifdef CONFIG_STMMAC_TIMER
 	priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
 	if (unlikely(priv->tm == NULL))
@@ -925,6 +973,10 @@
 	} else
 		priv->tm->enable = 1;
 #endif
+	stmmac_clk_enable(priv);
+
+	stmmac_check_ether_addr(priv);
+
 	ret = stmmac_init_phy(dev);
 	if (unlikely(ret)) {
 		pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
@@ -938,8 +990,7 @@
 	init_dma_desc_rings(dev);
 
 	/* DMA initialization and SW reset */
-	ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
-				  priv->dma_tx_phy, priv->dma_rx_phy);
+	ret = stmmac_init_dma_engine(priv);
 	if (ret < 0) {
 		pr_err("%s: DMA initialization failed\n", __func__);
 		goto open_error;
@@ -1026,6 +1077,8 @@
 	if (priv->phydev)
 		phy_disconnect(priv->phydev);
 
+	stmmac_clk_disable(priv);
+
 	return ret;
 }
 
@@ -1077,7 +1130,7 @@
 #ifdef CONFIG_STMMAC_DEBUG_FS
 	stmmac_exit_fs();
 #endif
-	stmmac_mdio_unregister(dev);
+	stmmac_clk_disable(priv);
 
 	return 0;
 }
@@ -1276,7 +1329,8 @@
 			struct sk_buff *skb;
 			int frame_len;
 
-			frame_len = priv->hw->desc->get_rx_frame_len(p);
+			frame_len = priv->hw->desc->get_rx_frame_len(p,
+					priv->plat->rx_coe);
 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
 			 * Type frames (LLC/LLC-SNAP) */
 			if (unlikely(status != llc_snap))
@@ -1312,7 +1366,7 @@
 #endif
 			skb->protocol = eth_type_trans(skb, priv->dev);
 
-			if (unlikely(!priv->rx_coe)) {
+			if (unlikely(!priv->plat->rx_coe)) {
 				/* No RX COE for old mac10/100 devices */
 				skb_checksum_none_assert(skb);
 				netif_receive_skb(skb);
@@ -1413,7 +1467,7 @@
 	struct stmmac_priv *priv = netdev_priv(dev);
 
 	spin_lock(&priv->lock);
-	priv->hw->mac->set_filter(dev);
+	priv->hw->mac->set_filter(dev, priv->synopsys_id);
 	spin_unlock(&priv->lock);
 }
 
@@ -1459,8 +1513,10 @@
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 
-	if (!priv->rx_coe)
+	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
 		features &= ~NETIF_F_RXCSUM;
+	else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
+		features &= ~NETIF_F_IPV6_CSUM;
 	if (!priv->plat->tx_coe)
 		features &= ~NETIF_F_ALL_CSUM;
 
@@ -1584,7 +1640,7 @@
 	.open = stmmac_sysfs_ring_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
-	.release = seq_release,
+	.release = single_release,
 };
 
 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
@@ -1656,7 +1712,7 @@
 	.open = stmmac_sysfs_dma_cap_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
-	.release = seq_release,
+	.release = single_release,
 };
 
 static int stmmac_init_fs(struct net_device *dev)
@@ -1752,7 +1808,7 @@
 	priv->hw->ring = &ring_mode_ops;
 
 	/* Get and dump the chip ID */
-	stmmac_get_synopsys_id(priv);
+	priv->synopsys_id = stmmac_get_synopsys_id(priv);
 
 	/* Get the HW capability (new GMAC newer than 3.50a) */
 	priv->hw_cap_support = stmmac_get_hw_features(priv);
@@ -1765,17 +1821,32 @@
 		 * register (if supported).
 		 */
 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
-		priv->plat->tx_coe = priv->dma_cap.tx_coe;
 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+
+		priv->plat->tx_coe = priv->dma_cap.tx_coe;
+
+		if (priv->dma_cap.rx_coe_type2)
+			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
+		else if (priv->dma_cap.rx_coe_type1)
+			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+
 	} else
 		pr_info(" No HW DMA feature register supported");
 
 	/* Select the enhnaced/normal descriptor structures */
 	stmmac_selec_desc_mode(priv);
 
-	priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
-	if (priv->rx_coe)
-		pr_info(" RX Checksum Offload Engine supported\n");
+	/* Enable the IPC (Checksum Offload) and check if the feature has been
+	 * enabled during the core configuration. */
+	ret = priv->hw->mac->rx_ipc(priv->ioaddr);
+	if (!ret) {
+		pr_warning(" RX IPC Checksum Offload not configured.\n");
+		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+	}
+
+	if (priv->plat->rx_coe)
+		pr_info(" RX Checksum Offload Engine supported (type %d)\n",
+			priv->plat->rx_coe);
 	if (priv->plat->tx_coe)
 		pr_info(" TX Checksum insertion supported\n");
 
@@ -1856,6 +1927,28 @@
 		goto error;
 	}
 
+	if (stmmac_clk_get(priv))
+		pr_warning("%s: warning: cannot get CSR clock\n", __func__);
+
+	/* If a specific clk_csr value is passed from the platform
+	 * this means that the CSR Clock Range selection cannot be
+	 * changed at run-time and it is fixed. Viceversa the driver'll try to
+	 * set the MDC clock dynamically according to the csr actual
+	 * clock input.
+	 */
+	if (!priv->plat->clk_csr)
+		stmmac_clk_csr_set(priv);
+	else
+		priv->clk_csr = priv->plat->clk_csr;
+
+	/* MDIO bus Registration */
+	ret = stmmac_mdio_register(ndev);
+	if (ret < 0) {
+		pr_debug("%s: MDIO bus (id: %d) registration failed",
+			 __func__, priv->plat->bus_id);
+		goto error;
+	}
+
 	return priv;
 
 error:
@@ -1883,6 +1976,7 @@
 	priv->hw->dma->stop_tx(priv->ioaddr);
 
 	stmmac_set_mac(priv->ioaddr, false);
+	stmmac_mdio_unregister(ndev);
 	netif_carrier_off(ndev);
 	unregister_netdev(ndev);
 	free_netdev(ndev);
@@ -1895,6 +1989,7 @@
 {
 	struct stmmac_priv *priv = netdev_priv(ndev);
 	int dis_ic = 0;
+	unsigned long flags;
 
 	if (!ndev || !netif_running(ndev))
 		return 0;
@@ -1902,7 +1997,7 @@
 	if (priv->phydev)
 		phy_stop(priv->phydev);
 
-	spin_lock(&priv->lock);
+	spin_lock_irqsave(&priv->lock, flags);
 
 	netif_device_detach(ndev);
 	netif_stop_queue(ndev);
@@ -1925,21 +2020,24 @@
 	/* Enable Power down mode by programming the PMT regs */
 	if (device_may_wakeup(priv->device))
 		priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
-	else
+	else {
 		stmmac_set_mac(priv->ioaddr, false);
-
-	spin_unlock(&priv->lock);
+		/* Disable clock in case of PWM is off */
+		stmmac_clk_disable(priv);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
 	return 0;
 }
 
 int stmmac_resume(struct net_device *ndev)
 {
 	struct stmmac_priv *priv = netdev_priv(ndev);
+	unsigned long flags;
 
 	if (!netif_running(ndev))
 		return 0;
 
-	spin_lock(&priv->lock);
+	spin_lock_irqsave(&priv->lock, flags);
 
 	/* Power Down bit, into the PM register, is cleared
 	 * automatically as soon as a magic packet or a Wake-up frame
@@ -1948,6 +2046,9 @@
 	 * from another devices (e.g. serial console). */
 	if (device_may_wakeup(priv->device))
 		priv->hw->mac->pmt(priv->ioaddr, 0);
+	else
+		/* enable the clk prevously disabled */
+		stmmac_clk_enable(priv);
 
 	netif_device_attach(ndev);
 
@@ -1964,7 +2065,7 @@
 
 	netif_start_queue(ndev);
 
-	spin_unlock(&priv->lock);
+	spin_unlock_irqrestore(&priv->lock, flags);
 
 	if (priv->phydev)
 		phy_start(priv->phydev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 7319532..ade1082 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -34,6 +34,22 @@
 #define MII_BUSY 0x00000001
 #define MII_WRITE 0x00000002
 
+static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
+{
+	unsigned long curr;
+	unsigned long finish = jiffies + 3 * HZ;
+
+	do {
+		curr = jiffies;
+		if (readl(ioaddr + mii_addr) & MII_BUSY)
+			cpu_relax();
+		else
+			return 0;
+	} while (!time_after_eq(curr, finish));
+
+	return -EBUSY;
+}
+
 /**
  * stmmac_mdio_read
  * @bus: points to the mii_bus structure
@@ -54,11 +70,15 @@
 	int data;
 	u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
 			((phyreg << 6) & (0x000007C0)));
-	regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
+	regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
 
-	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
+
 	writel(regValue, priv->ioaddr + mii_address);
-	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
 
 	/* Read the data from the MII data register */
 	data = (int)readl(priv->ioaddr + mii_data);
@@ -86,20 +106,18 @@
 	    (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
 	    | MII_WRITE;
 
-	value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
-
+	value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
 
 	/* Wait until any existing MII operation is complete */
-	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
+	if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+		return -EBUSY;
 
 	/* Set the MII address register to write */
 	writel(phydata, priv->ioaddr + mii_data);
 	writel(value, priv->ioaddr + mii_address);
 
 	/* Wait until any existing MII operation is complete */
-	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
-
-	return 0;
+	return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
 }
 
 /**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index da66ed7..58fab53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -28,6 +28,7 @@
 
 struct plat_stmmacenet_data plat_dat;
 struct stmmac_mdio_bus_data mdio_data;
+struct stmmac_dma_cfg dma_cfg;
 
 static void stmmac_default_data(void)
 {
@@ -35,7 +36,6 @@
 	plat_dat.bus_id = 1;
 	plat_dat.phy_addr = 0;
 	plat_dat.interface = PHY_INTERFACE_MODE_GMII;
-	plat_dat.pbl = 32;
 	plat_dat.clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
 	plat_dat.has_gmac = 1;
 	plat_dat.force_sf_dma_mode = 1;
@@ -44,6 +44,10 @@
 	mdio_data.phy_reset = NULL;
 	mdio_data.phy_mask = 0;
 	plat_dat.mdio_bus_data = &mdio_data;
+
+	dma_cfg.pbl = 32;
+	dma_cfg.burst_len = DMA_AXI_BLEN_256;
+	plat_dat.dma_cfg = &dma_cfg;
 }
 
 /**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 116529a..3dd8f08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -50,7 +50,6 @@
 	 * once needed on other platforms.
 	 */
 	if (of_device_is_compatible(np, "st,spear600-gmac")) {
-		plat->pbl = 8;
 		plat->has_gmac = 1;
 		plat->pmt = 1;
 	}
@@ -189,9 +188,6 @@
 	if (priv->plat->exit)
 		priv->plat->exit(pdev);
 
-	if (priv->plat->exit)
-		priv->plat->exit(pdev);
-
 	platform_set_drvdata(pdev, NULL);
 
 	iounmap((void *)priv->ioaddr);
@@ -218,14 +214,26 @@
 
 int stmmac_pltfr_freeze(struct device *dev)
 {
+	int ret;
+	struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
 	struct net_device *ndev = dev_get_drvdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
 
-	return stmmac_freeze(ndev);
+	ret = stmmac_freeze(ndev);
+	if (plat_dat->exit)
+		plat_dat->exit(pdev);
+
+	return ret;
 }
 
 int stmmac_pltfr_restore(struct device *dev)
 {
+	struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
 	struct net_device *ndev = dev_get_drvdata(dev);
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (plat_dat->init)
+		plat_dat->init(pdev);
 
 	return stmmac_restore(ndev);
 }
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index c99b3b0..703c8cc 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9838,7 +9838,7 @@
 			goto err_out_release_parent;
 		}
 	}
-	if (err || dma_mask == DMA_BIT_MASK(32)) {
+	if (err) {
 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (err) {
 			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 558409f..3cf4ab7 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -401,7 +401,7 @@
 		return 1;
 	}
 
-	udelay(5000);
+	mdelay(5);
 
 	/* Execute RX reset command. */
 	writel(gp->swrst_base | GREG_SWRST_RXRST,
@@ -2339,7 +2339,7 @@
 	netif_device_detach(dev);
 
 	/* Switch off chip, remember WOL setting */
-	gp->asleep_wol = gp->wake_on_lan;
+	gp->asleep_wol = !!gp->wake_on_lan;
 	gem_do_stop(dev, gp->asleep_wol);
 
 	/* Unlock the network stack */
@@ -2898,7 +2898,6 @@
 	}
 
 	gp->pdev = pdev;
-	dev->base_addr = (long) pdev;
 	gp->dev = dev;
 
 	gp->msg_enable = DEFAULT_MSG;
@@ -2972,7 +2971,6 @@
 	netif_napi_add(dev, &gp->napi, gem_poll, 64);
 	dev->ethtool_ops = &gem_ethtool_ops;
 	dev->watchdog_timeo = 5 * HZ;
-	dev->irq = pdev->irq;
 	dev->dma = 0;
 
 	/* Set that now, in case PM kicks in now */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index b95e7e6..dfc00c4 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2182,11 +2182,12 @@
 	 * into a single source which we register handling at probe time.
 	 */
 	if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
-		if (request_irq(dev->irq, happy_meal_interrupt,
-				IRQF_SHARED, dev->name, (void *)dev)) {
+		res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
+				  dev->name, dev);
+		if (res) {
 			HMD(("EAGAIN\n"));
 			printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
-			       dev->irq);
+			       hp->irq);
 
 			return -EAGAIN;
 		}
@@ -2199,7 +2200,7 @@
 	spin_unlock_irq(&hp->happy_lock);
 
 	if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
-		free_irq(dev->irq, dev);
+		free_irq(hp->irq, dev);
 	return res;
 }
 
@@ -2221,7 +2222,7 @@
 	 * time and never unregister.
 	 */
 	if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
-		free_irq(dev->irq, dev);
+		free_irq(hp->irq, dev);
 
 	return 0;
 }
@@ -2777,7 +2778,7 @@
 	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
 	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
 
-	dev->irq = op->archdata.irqs[0];
+	hp->irq = op->archdata.irqs[0];
 
 #if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
 	/* Hook up SBUS register/descriptor accessors. */
@@ -2981,8 +2982,6 @@
 	if (hme_version_printed++ == 0)
 		printk(KERN_INFO "%s", version);
 
-	dev->base_addr = (long) pdev;
-
 	hp = netdev_priv(dev);
 
 	hp->happy_dev = pdev;
@@ -3087,12 +3086,11 @@
 
 	init_timer(&hp->happy_timer);
 
+	hp->irq = pdev->irq;
 	hp->dev = dev;
 	dev->netdev_ops = &hme_netdev_ops;
 	dev->watchdog_timeo = 5*HZ;
 	dev->ethtool_ops = &hme_ethtool_ops;
-	dev->irq = pdev->irq;
-	dev->dma = 0;
 
 	/* Happy Meal can do it all... */
 	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 64f2783..f430765 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -432,6 +432,7 @@
 
 	dma_addr_t                hblock_dvma;    /* DVMA visible address happy block  */
 	unsigned int              happy_flags;    /* Driver state flags                */
+	int                       irq;
 	enum happy_transceiver    tcvr_type;      /* Kind of transceiver in use        */
 	unsigned int              happy_bursts;   /* Get your mind out of the gutter   */
 	unsigned int              paddr;          /* PHY address for transceiver       */
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 38e3ae9..a108db3 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -618,7 +618,7 @@
 	struct vnet_port *port;
 
 	hlist_for_each_entry(port, n, hp, hash) {
-		if (!compare_ether_addr(port->raddr, skb->data))
+		if (ether_addr_equal(port->raddr, skb->data))
 			return port;
 	}
 	port = NULL;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ad973ff..8846516 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1317,7 +1317,7 @@
 
 static void print_rxfd(struct rxf_desc *rxfd)
 {
-	DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n"
+	DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
 	    "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
 	    rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
 }
@@ -1988,10 +1988,6 @@
 		/* these fields are used for info purposes only
 		 * so we can have them same for all ports of the board */
 		ndev->if_port = port;
-		ndev->base_addr = pciaddr;
-		ndev->mem_start = pciaddr;
-		ndev->mem_end = pciaddr + regionSize;
-		ndev->irq = pdev->irq;
 		ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
 		    | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
 		    NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 3455876..d614c37 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -92,7 +92,7 @@
 	CPDMA_STATE_TEARDOWN,
 };
 
-const char *cpdma_state_str[] = { "idle", "active", "teardown" };
+static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
 
 struct cpdma_ctlr {
 	enum cpdma_state	state;
@@ -276,6 +276,7 @@
 		ctlr->num_chan = CPDMA_MAX_CHANNELS;
 	return ctlr;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
 
 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
 {
@@ -321,6 +322,7 @@
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
 
 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
 {
@@ -351,6 +353,7 @@
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
 
 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
 {
@@ -421,6 +424,7 @@
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
 
 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
 {
@@ -444,6 +448,7 @@
 	kfree(ctlr);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
 
 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
 {
@@ -528,6 +533,7 @@
 err_chan_alloc:
 	return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_create);
 
 int cpdma_chan_destroy(struct cpdma_chan *chan)
 {
@@ -545,6 +551,7 @@
 	kfree(chan);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
 
 int cpdma_chan_get_stats(struct cpdma_chan *chan,
 			 struct cpdma_chan_stats *stats)
@@ -693,6 +700,7 @@
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_submit);
 
 static void __cpdma_chan_free(struct cpdma_chan *chan,
 			      struct cpdma_desc __iomem *desc,
@@ -776,6 +784,7 @@
 	}
 	return used;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_process);
 
 int cpdma_chan_start(struct cpdma_chan *chan)
 {
@@ -803,6 +812,7 @@
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_start);
 
 int cpdma_chan_stop(struct cpdma_chan *chan)
 {
@@ -863,6 +873,7 @@
 	spin_unlock_irqrestore(&chan->lock, flags);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_stop);
 
 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
 {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 174a334..4da93a5 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -627,6 +627,7 @@
 	.get_link = ethtool_op_get_link,
 	.get_coalesce = emac_get_coalesce,
 	.set_coalesce =  emac_set_coalesce,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 /**
@@ -1511,7 +1512,7 @@
 
 static int match_first_device(struct device *dev, void *data)
 {
-	return 1;
+	return !strncmp(dev_name(dev), "davinci_mdio", 12);
 }
 
 /**
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 817ad3b..3e6abf0 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -228,7 +228,7 @@
 	unsigned long addr;
 
 	addr = tag->buffer[9].address;
-	addr |= (tag->buffer[8].address << 16) << 16;
+	addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
 	return (struct sk_buff *) addr;
 }
 
@@ -2545,7 +2545,7 @@
 
 	phy = priv->phy[priv->phy_num];
 
-	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
 	tlan_mii_sync(dev->base_addr);
 	value = MII_GC_LOOPBK | MII_GC_RESET;
 	tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 3d501ec..96070e9 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -843,7 +843,7 @@
 		if (!is_multicast_ether_addr(buf)) {
 			/* Filter packets not for our address. */
 			const u8 *mine = dev->dev_addr;
-			filter = compare_ether_addr(mine, buf);
+			filter = !ether_addr_equal(mine, buf);
 		}
 	}
 
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 5c14f82..961c832 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1590,8 +1590,8 @@
 		found = 0;
 		oldest = NULL;
 		list_for_each_entry(target, &wl->network_list, list) {
-			if (!compare_ether_addr(&target->hwinfo->bssid[2],
-						&scan_info->bssid[2])) {
+			if (ether_addr_equal(&target->hwinfo->bssid[2],
+					     &scan_info->bssid[2])) {
 				found = 1;
 				pr_debug("%s: same BBS found scanned list\n",
 					 __func__);
@@ -1691,8 +1691,8 @@
 
 		/* If bss specified, check it only */
 		if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) {
-			if (!compare_ether_addr(&scan_info->hwinfo->bssid[2],
-						wl->bssid)) {
+			if (ether_addr_equal(&scan_info->hwinfo->bssid[2],
+					     wl->bssid)) {
 				best_bss = scan_info;
 				pr_debug("%s: bssid matched\n", __func__);
 				break;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index fcfa01f..0459c09 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -689,9 +689,12 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void rhine_poll(struct net_device *dev)
 {
-	disable_irq(dev->irq);
-	rhine_interrupt(dev->irq, (void *)dev);
-	enable_irq(dev->irq);
+	struct rhine_private *rp = netdev_priv(dev);
+	const int irq = rp->pdev->irq;
+
+	disable_irq(irq);
+	rhine_interrupt(irq, dev);
+	enable_irq(irq);
 }
 #endif
 
@@ -972,7 +975,6 @@
 	}
 #endif /* USE_MMIO */
 
-	dev->base_addr = (unsigned long)ioaddr;
 	rp->base = ioaddr;
 
 	/* Get chip registers into a sane state */
@@ -995,8 +997,6 @@
 	if (!phy_id)
 		phy_id = ioread8(ioaddr + 0x6C);
 
-	dev->irq = pdev->irq;
-
 	spin_lock_init(&rp->lock);
 	mutex_init(&rp->task_lock);
 	INIT_WORK(&rp->reset_task, rhine_reset_task);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 8a5d7c1..ea3e0a2 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2488,8 +2488,8 @@
 
 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
 		velocity_get_ip(vptr);
-	if (dev->irq != 0)
-		free_irq(dev->irq, dev);
+
+	free_irq(vptr->pdev->irq, dev);
 
 	velocity_free_rings(vptr);
 
@@ -2755,8 +2755,6 @@
 	if (ret < 0)
 		goto err_free_dev;
 
-	dev->irq = pdev->irq;
-
 	ret = velocity_get_pci_info(vptr, pdev);
 	if (ret < 0) {
 		/* error message already printed */
@@ -2779,8 +2777,6 @@
 
 	mac_wol_reset(regs);
 
-	dev->base_addr = vptr->ioaddr;
-
 	for (i = 0; i < 6; i++)
 		dev->dev_addr[i] = readb(&regs->PAR[i]);
 
@@ -2806,7 +2802,6 @@
 
 	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
 
-	dev->irq = pdev->irq;
 	dev->netdev_ops = &velocity_netdev_ops;
 	dev->ethtool_ops = &velocity_ethtool_ops;
 	netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
new file mode 100644
index 0000000..cb18043
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -0,0 +1,73 @@
+#
+# WIZnet devices configuration
+#
+
+config NET_VENDOR_WIZNET
+	bool "WIZnet devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y
+	  and read the Ethernet-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about WIZnet devices. If you say Y, you will be asked
+	  for your specific card in the following questions.
+
+if NET_VENDOR_WIZNET
+
+config WIZNET_W5100
+	tristate "WIZnet W5100 Ethernet support"
+	depends on HAS_IOMEM
+	---help---
+	  Support for WIZnet W5100 chips.
+
+	  W5100 is a single chip with integrated 10/100 Ethernet MAC,
+	  PHY and hardware TCP/IP stack, but this driver is limited to
+	  the MAC and PHY functions only, onchip TCP/IP is unused.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called w5100.
+
+config WIZNET_W5300
+	tristate "WIZnet W5300 Ethernet support"
+	depends on HAS_IOMEM
+	---help---
+	  Support for WIZnet W5300 chips.
+
+	  W5300 is a single chip with integrated 10/100 Ethernet MAC,
+	  PHY and hardware TCP/IP stack, but this driver is limited to
+	  the MAC and PHY functions only, onchip TCP/IP is unused.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called w5300.
+
+choice
+	prompt "WIZnet interface mode"
+	depends on WIZNET_W5100 || WIZNET_W5300
+	default WIZNET_BUS_ANY
+
+config WIZNET_BUS_DIRECT
+	bool "Direct address bus mode"
+	---help---
+	  In direct address mode host system can directly access all registers
+	  after mapping to Memory-Mapped I/O space.
+
+config WIZNET_BUS_INDIRECT
+	bool "Indirect address bus mode"
+	---help---
+	  In indirect address mode host system indirectly accesses registers
+	  using Indirect Mode Address Register and Indirect Mode Data Register,
+	  which are directly mapped to Memory-Mapped I/O space.
+
+config WIZNET_BUS_ANY
+	bool "Select interface mode in runtime"
+	---help---
+	  If interface mode is unknown in compile time, it can be selected
+	  in runtime from board/platform resources configuration.
+
+	  Performance may decrease compared to explicitly selected bus mode.
+endchoice
+
+endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
new file mode 100644
index 0000000..c614535
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_WIZNET_W5100) += w5100.o
+obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
new file mode 100644
index 0000000..a75e9ef
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -0,0 +1,808 @@
+/*
+ * Ethernet driver for the WIZnet W5100 chip.
+ *
+ * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kconfig.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/wiznet.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#define DRV_NAME	"w5100"
+#define DRV_VERSION	"2012-04-04"
+
+MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
+MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
+MODULE_ALIAS("platform:"DRV_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * Registers
+ */
+#define W5100_COMMON_REGS	0x0000
+#define W5100_MR		0x0000 /* Mode Register */
+#define   MR_RST		  0x80 /* S/W reset */
+#define   MR_PB			  0x10 /* Ping block */
+#define   MR_AI			  0x02 /* Address Auto-Increment */
+#define   MR_IND		  0x01 /* Indirect mode */
+#define W5100_SHAR		0x0009 /* Source MAC address */
+#define W5100_IR		0x0015 /* Interrupt Register */
+#define W5100_IMR		0x0016 /* Interrupt Mask Register */
+#define   IR_S0			  0x01 /* S0 interrupt */
+#define W5100_RTR		0x0017 /* Retry Time-value Register */
+#define   RTR_DEFAULT		  2000 /* =0x07d0 (2000) */
+#define W5100_RMSR		0x001a /* Receive Memory Size */
+#define W5100_TMSR		0x001b /* Transmit Memory Size */
+#define W5100_COMMON_REGS_LEN	0x0040
+
+#define W5100_S0_REGS		0x0400
+#define W5100_S0_MR		0x0400 /* S0 Mode Register */
+#define   S0_MR_MACRAW		  0x04 /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW_MF	  0x44 /* MAC RAW mode (filtered) */
+#define W5100_S0_CR		0x0401 /* S0 Command Register */
+#define   S0_CR_OPEN		  0x01 /* OPEN command */
+#define   S0_CR_CLOSE		  0x10 /* CLOSE command */
+#define   S0_CR_SEND		  0x20 /* SEND command */
+#define   S0_CR_RECV		  0x40 /* RECV command */
+#define W5100_S0_IR		0x0402 /* S0 Interrupt Register */
+#define   S0_IR_SENDOK		  0x10 /* complete sending */
+#define   S0_IR_RECV		  0x04 /* receiving data */
+#define W5100_S0_SR		0x0403 /* S0 Status Register */
+#define   S0_SR_MACRAW		  0x42 /* mac raw mode */
+#define W5100_S0_TX_FSR		0x0420 /* S0 Transmit free memory size */
+#define W5100_S0_TX_RD		0x0422 /* S0 Transmit memory read pointer */
+#define W5100_S0_TX_WR		0x0424 /* S0 Transmit memory write pointer */
+#define W5100_S0_RX_RSR		0x0426 /* S0 Receive free memory size */
+#define W5100_S0_RX_RD		0x0428 /* S0 Receive memory read pointer */
+#define W5100_S0_REGS_LEN	0x0040
+
+#define W5100_TX_MEM_START	0x4000
+#define W5100_TX_MEM_END	0x5fff
+#define W5100_TX_MEM_MASK	0x1fff
+#define W5100_RX_MEM_START	0x6000
+#define W5100_RX_MEM_END	0x7fff
+#define W5100_RX_MEM_MASK	0x1fff
+
+/*
+ * Device driver private data structure
+ */
+struct w5100_priv {
+	void __iomem *base;
+	spinlock_t reg_lock;
+	bool indirect;
+	u8   (*read)(struct w5100_priv *priv, u16 addr);
+	void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
+	u16  (*read16)(struct w5100_priv *priv, u16 addr);
+	void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
+	void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+	void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+	int irq;
+	int link_irq;
+	int link_gpio;
+
+	struct napi_struct napi;
+	struct net_device *ndev;
+	bool promisc;
+	u32 msg_enable;
+};
+
+/************************************************************************
+ *
+ *  Lowlevel I/O functions
+ *
+ ***********************************************************************/
+
+/*
+ * In direct address mode host system can directly access W5100 registers
+ * after mapping to Memory-Mapped I/O space.
+ *
+ * 0x8000 bytes are required for memory space.
+ */
+static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
+{
+	return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static inline void w5100_write_direct(struct w5100_priv *priv,
+				      u16 addr, u8 data)
+{
+	iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
+{
+	u16 data;
+	data  = w5100_read_direct(priv, addr) << 8;
+	data |= w5100_read_direct(priv, addr + 1);
+	return data;
+}
+
+static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
+{
+	w5100_write_direct(priv, addr, data >> 8);
+	w5100_write_direct(priv, addr + 1, data);
+}
+
+static void w5100_readbuf_direct(struct w5100_priv *priv,
+				 u16 offset, u8 *buf, int len)
+{
+	u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+	int i;
+
+	for (i = 0; i < len; i++, addr++) {
+		if (unlikely(addr > W5100_RX_MEM_END))
+			addr = W5100_RX_MEM_START;
+		*buf++ = w5100_read_direct(priv, addr);
+	}
+}
+
+static void w5100_writebuf_direct(struct w5100_priv *priv,
+				  u16 offset, u8 *buf, int len)
+{
+	u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+	int i;
+
+	for (i = 0; i < len; i++, addr++) {
+		if (unlikely(addr > W5100_TX_MEM_END))
+			addr = W5100_TX_MEM_START;
+		w5100_write_direct(priv, addr, *buf++);
+	}
+}
+
+/*
+ * In indirect address mode host system indirectly accesses registers by
+ * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
+ * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
+ * Mode Register (MR) is directly accessible.
+ *
+ * Only 0x04 bytes are required for memory space.
+ */
+#define W5100_IDM_AR		0x01   /* Indirect Mode Address Register */
+#define W5100_IDM_DR		0x03   /* Indirect Mode Data Register */
+
+static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
+{
+	unsigned long flags;
+	u8 data;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+	data = w5100_read_direct(priv, W5100_IDM_DR);
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+	return data;
+}
+
+static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+	w5100_write_direct(priv, W5100_IDM_DR, data);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
+{
+	unsigned long flags;
+	u16 data;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+	data  = w5100_read_direct(priv, W5100_IDM_DR) << 8;
+	data |= w5100_read_direct(priv, W5100_IDM_DR);
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+	return data;
+}
+
+static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+	w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
+	w5100_write_direct(priv, W5100_IDM_DR, data);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static void w5100_readbuf_indirect(struct w5100_priv *priv,
+				   u16 offset, u8 *buf, int len)
+{
+	u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+
+	for (i = 0; i < len; i++, addr++) {
+		if (unlikely(addr > W5100_RX_MEM_END)) {
+			addr = W5100_RX_MEM_START;
+			w5100_write16_direct(priv, W5100_IDM_AR, addr);
+			mmiowb();
+		}
+		*buf++ = w5100_read_direct(priv, W5100_IDM_DR);
+	}
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+static void w5100_writebuf_indirect(struct w5100_priv *priv,
+				    u16 offset, u8 *buf, int len)
+{
+	u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5100_write16_direct(priv, W5100_IDM_AR, addr);
+	mmiowb();
+
+	for (i = 0; i < len; i++, addr++) {
+		if (unlikely(addr > W5100_TX_MEM_END)) {
+			addr = W5100_TX_MEM_START;
+			w5100_write16_direct(priv, W5100_IDM_AR, addr);
+			mmiowb();
+		}
+		w5100_write_direct(priv, W5100_IDM_DR, *buf++);
+	}
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+#if defined(CONFIG_WIZNET_BUS_DIRECT)
+#define w5100_read	w5100_read_direct
+#define w5100_write	w5100_write_direct
+#define w5100_read16	w5100_read16_direct
+#define w5100_write16	w5100_write16_direct
+#define w5100_readbuf	w5100_readbuf_direct
+#define w5100_writebuf	w5100_writebuf_direct
+
+#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
+#define w5100_read	w5100_read_indirect
+#define w5100_write	w5100_write_indirect
+#define w5100_read16	w5100_read16_indirect
+#define w5100_write16	w5100_write16_indirect
+#define w5100_readbuf	w5100_readbuf_indirect
+#define w5100_writebuf	w5100_writebuf_indirect
+
+#else /* CONFIG_WIZNET_BUS_ANY */
+#define w5100_read	priv->read
+#define w5100_write	priv->write
+#define w5100_read16	priv->read16
+#define w5100_write16	priv->write16
+#define w5100_readbuf	priv->readbuf
+#define w5100_writebuf	priv->writebuf
+#endif
+
+static int w5100_command(struct w5100_priv *priv, u16 cmd)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+	w5100_write(priv, W5100_S0_CR, cmd);
+	mmiowb();
+
+	while (w5100_read(priv, W5100_S0_CR) != 0) {
+		if (time_after(jiffies, timeout))
+			return -EIO;
+		cpu_relax();
+	}
+
+	return 0;
+}
+
+static void w5100_write_macaddr(struct w5100_priv *priv)
+{
+	struct net_device *ndev = priv->ndev;
+	int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
+	mmiowb();
+}
+
+static void w5100_hw_reset(struct w5100_priv *priv)
+{
+	w5100_write_direct(priv, W5100_MR, MR_RST);
+	mmiowb();
+	mdelay(5);
+	w5100_write_direct(priv, W5100_MR, priv->indirect ?
+				  MR_PB | MR_AI | MR_IND :
+				  MR_PB);
+	mmiowb();
+	w5100_write(priv, W5100_IMR, 0);
+	w5100_write_macaddr(priv);
+
+	/* Configure 16K of internal memory
+	 * as 8K RX buffer and 8K TX buffer
+	 */
+	w5100_write(priv, W5100_RMSR, 0x03);
+	w5100_write(priv, W5100_TMSR, 0x03);
+	mmiowb();
+}
+
+static void w5100_hw_start(struct w5100_priv *priv)
+{
+	w5100_write(priv, W5100_S0_MR, priv->promisc ?
+			  S0_MR_MACRAW : S0_MR_MACRAW_MF);
+	mmiowb();
+	w5100_command(priv, S0_CR_OPEN);
+	w5100_write(priv, W5100_IMR, IR_S0);
+	mmiowb();
+}
+
+static void w5100_hw_close(struct w5100_priv *priv)
+{
+	w5100_write(priv, W5100_IMR, 0);
+	mmiowb();
+	w5100_command(priv, S0_CR_CLOSE);
+}
+
+/***********************************************************************
+ *
+ *   Device driver functions / callbacks
+ *
+ ***********************************************************************/
+
+static void w5100_get_drvinfo(struct net_device *ndev,
+			      struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+		sizeof(info->bus_info));
+}
+
+static u32 w5100_get_link(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (gpio_is_valid(priv->link_gpio))
+		return !!gpio_get_value(priv->link_gpio);
+
+	return 1;
+}
+
+static u32 w5100_get_msglevel(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	return priv->msg_enable;
+}
+
+static void w5100_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	priv->msg_enable = value;
+}
+
+static int w5100_get_regs_len(struct net_device *ndev)
+{
+	return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
+}
+
+static void w5100_get_regs(struct net_device *ndev,
+			   struct ethtool_regs *regs, void *_buf)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	u8 *buf = _buf;
+	u16 i;
+
+	regs->version = 1;
+	for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
+		*buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
+	for (i = 0; i < W5100_S0_REGS_LEN; i++)
+		*buf++ = w5100_read(priv, W5100_S0_REGS + i);
+}
+
+static void w5100_tx_timeout(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+	w5100_hw_reset(priv);
+	w5100_hw_start(priv);
+	ndev->stats.tx_errors++;
+	ndev->trans_start = jiffies;
+	netif_wake_queue(ndev);
+}
+
+static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	u16 offset;
+
+	netif_stop_queue(ndev);
+
+	offset = w5100_read16(priv, W5100_S0_TX_WR);
+	w5100_writebuf(priv, offset, skb->data, skb->len);
+	w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
+	mmiowb();
+	ndev->stats.tx_bytes += skb->len;
+	ndev->stats.tx_packets++;
+	dev_kfree_skb(skb);
+
+	w5100_command(priv, S0_CR_SEND);
+
+	return NETDEV_TX_OK;
+}
+
+static int w5100_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
+	struct net_device *ndev = priv->ndev;
+	struct sk_buff *skb;
+	int rx_count;
+	u16 rx_len;
+	u16 offset;
+	u8 header[2];
+
+	for (rx_count = 0; rx_count < budget; rx_count++) {
+		u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
+		if (rx_buf_len == 0)
+			break;
+
+		offset = w5100_read16(priv, W5100_S0_RX_RD);
+		w5100_readbuf(priv, offset, header, 2);
+		rx_len = get_unaligned_be16(header) - 2;
+
+		skb = netdev_alloc_skb_ip_align(ndev, rx_len);
+		if (unlikely(!skb)) {
+			w5100_write16(priv, W5100_S0_RX_RD,
+					    offset + rx_buf_len);
+			w5100_command(priv, S0_CR_RECV);
+			ndev->stats.rx_dropped++;
+			return -ENOMEM;
+		}
+
+		skb_put(skb, rx_len);
+		w5100_readbuf(priv, offset + 2, skb->data, rx_len);
+		w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
+		mmiowb();
+		w5100_command(priv, S0_CR_RECV);
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		netif_receive_skb(skb);
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += rx_len;
+	}
+
+	if (rx_count < budget) {
+		w5100_write(priv, W5100_IMR, IR_S0);
+		mmiowb();
+		napi_complete(napi);
+	}
+
+	return rx_count;
+}
+
+static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
+{
+	struct net_device *ndev = ndev_instance;
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	int ir = w5100_read(priv, W5100_S0_IR);
+	if (!ir)
+		return IRQ_NONE;
+	w5100_write(priv, W5100_S0_IR, ir);
+	mmiowb();
+
+	if (ir & S0_IR_SENDOK) {
+		netif_dbg(priv, tx_done, ndev, "tx done\n");
+		netif_wake_queue(ndev);
+	}
+
+	if (ir & S0_IR_RECV) {
+		if (napi_schedule_prep(&priv->napi)) {
+			w5100_write(priv, W5100_IMR, 0);
+			mmiowb();
+			__napi_schedule(&priv->napi);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
+{
+	struct net_device *ndev = ndev_instance;
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		if (gpio_get_value(priv->link_gpio) != 0) {
+			netif_info(priv, link, ndev, "link is up\n");
+			netif_carrier_on(ndev);
+		} else {
+			netif_info(priv, link, ndev, "link is down\n");
+			netif_carrier_off(ndev);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void w5100_set_rx_mode(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
+
+	if (priv->promisc != set_promisc) {
+		priv->promisc = set_promisc;
+		w5100_hw_start(priv);
+	}
+}
+
+static int w5100_set_macaddr(struct net_device *ndev, void *addr)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+	struct sockaddr *sock_addr = addr;
+
+	if (!is_valid_ether_addr(sock_addr->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+	ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
+	w5100_write_macaddr(priv);
+	return 0;
+}
+
+static int w5100_open(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	netif_info(priv, ifup, ndev, "enabling\n");
+	if (!is_valid_ether_addr(ndev->dev_addr))
+		return -EINVAL;
+	w5100_hw_start(priv);
+	napi_enable(&priv->napi);
+	netif_start_queue(ndev);
+	if (!gpio_is_valid(priv->link_gpio) ||
+	    gpio_get_value(priv->link_gpio) != 0)
+		netif_carrier_on(ndev);
+	return 0;
+}
+
+static int w5100_stop(struct net_device *ndev)
+{
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	netif_info(priv, ifdown, ndev, "shutting down\n");
+	w5100_hw_close(priv);
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	napi_disable(&priv->napi);
+	return 0;
+}
+
+static const struct ethtool_ops w5100_ethtool_ops = {
+	.get_drvinfo		= w5100_get_drvinfo,
+	.get_msglevel		= w5100_get_msglevel,
+	.set_msglevel		= w5100_set_msglevel,
+	.get_link		= w5100_get_link,
+	.get_regs_len		= w5100_get_regs_len,
+	.get_regs		= w5100_get_regs,
+};
+
+static const struct net_device_ops w5100_netdev_ops = {
+	.ndo_open		= w5100_open,
+	.ndo_stop		= w5100_stop,
+	.ndo_start_xmit		= w5100_start_tx,
+	.ndo_tx_timeout		= w5100_tx_timeout,
+	.ndo_set_rx_mode	= w5100_set_rx_mode,
+	.ndo_set_mac_address	= w5100_set_macaddr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+};
+
+static int __devinit w5100_hw_probe(struct platform_device *pdev)
+{
+	struct wiznet_platform_data *data = pdev->dev.platform_data;
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5100_priv *priv = netdev_priv(ndev);
+	const char *name = netdev_name(ndev);
+	struct resource *mem;
+	int mem_size;
+	int irq;
+	int ret;
+
+	if (data && is_valid_ether_addr(data->mac_addr)) {
+		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+	} else {
+		random_ether_addr(ndev->dev_addr);
+		ndev->addr_assign_type |= NET_ADDR_RANDOM;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem)
+		return -ENXIO;
+	mem_size = resource_size(mem);
+	if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
+		return -EBUSY;
+	priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+	if (!priv->base)
+		return -EBUSY;
+
+	spin_lock_init(&priv->reg_lock);
+	priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
+	if (priv->indirect) {
+		priv->read     = w5100_read_indirect;
+		priv->write    = w5100_write_indirect;
+		priv->read16   = w5100_read16_indirect;
+		priv->write16  = w5100_write16_indirect;
+		priv->readbuf  = w5100_readbuf_indirect;
+		priv->writebuf = w5100_writebuf_indirect;
+	} else {
+		priv->read     = w5100_read_direct;
+		priv->write    = w5100_write_direct;
+		priv->read16   = w5100_read16_direct;
+		priv->write16  = w5100_write16_direct;
+		priv->readbuf  = w5100_readbuf_direct;
+		priv->writebuf = w5100_writebuf_direct;
+	}
+
+	w5100_hw_reset(priv);
+	if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+	ret = request_irq(irq, w5100_interrupt,
+			  IRQ_TYPE_LEVEL_LOW, name, ndev);
+	if (ret < 0)
+		return ret;
+	priv->irq = irq;
+
+	priv->link_gpio = data ? data->link_gpio : -EINVAL;
+	if (gpio_is_valid(priv->link_gpio)) {
+		char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
+		if (!link_name)
+			return -ENOMEM;
+		snprintf(link_name, 16, "%s-link", name);
+		priv->link_irq = gpio_to_irq(priv->link_gpio);
+		if (request_any_context_irq(priv->link_irq, w5100_detect_link,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				link_name, priv->ndev) < 0)
+			priv->link_gpio = -EINVAL;
+	}
+
+	netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
+	return 0;
+}
+
+static int __devinit w5100_probe(struct platform_device *pdev)
+{
+	struct w5100_priv *priv;
+	struct net_device *ndev;
+	int err;
+
+	ndev = alloc_etherdev(sizeof(*priv));
+	if (!ndev)
+		return -ENOMEM;
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	platform_set_drvdata(pdev, ndev);
+	priv = netdev_priv(ndev);
+	priv->ndev = ndev;
+
+	ether_setup(ndev);
+	ndev->netdev_ops = &w5100_netdev_ops;
+	ndev->ethtool_ops = &w5100_ethtool_ops;
+	ndev->watchdog_timeo = HZ;
+	netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
+
+	/* This chip doesn't support VLAN packets with normal MTU,
+	 * so disable VLAN for this device.
+	 */
+	ndev->features |= NETIF_F_VLAN_CHALLENGED;
+
+	err = register_netdev(ndev);
+	if (err < 0)
+		goto err_register;
+
+	err = w5100_hw_probe(pdev);
+	if (err < 0)
+		goto err_hw_probe;
+
+	return 0;
+
+err_hw_probe:
+	unregister_netdev(ndev);
+err_register:
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static int __devexit w5100_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	w5100_hw_reset(priv);
+	free_irq(priv->irq, ndev);
+	if (gpio_is_valid(priv->link_gpio))
+		free_irq(priv->link_irq, ndev);
+
+	unregister_netdev(ndev);
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int w5100_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		netif_carrier_off(ndev);
+		netif_device_detach(ndev);
+
+		w5100_hw_close(priv);
+	}
+	return 0;
+}
+
+static int w5100_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5100_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		w5100_hw_reset(priv);
+		w5100_hw_start(priv);
+
+		netif_device_attach(ndev);
+		if (!gpio_is_valid(priv->link_gpio) ||
+		    gpio_get_value(priv->link_gpio) != 0)
+			netif_carrier_on(ndev);
+	}
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+
+static struct platform_driver w5100_driver = {
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &w5100_pm_ops,
+	},
+	.probe		= w5100_probe,
+	.remove		= __devexit_p(w5100_remove),
+};
+
+module_platform_driver(w5100_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
new file mode 100644
index 0000000..3306a20
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -0,0 +1,720 @@
+/*
+ * Ethernet driver for the WIZnet W5300 chip.
+ *
+ * Copyright (C) 2008-2009 WIZnet Co.,Ltd.
+ * Copyright (C) 2011 Taehun Kim <kth3321 <at> gmail.com>
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kconfig.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/wiznet.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+
+#define DRV_NAME	"w5300"
+#define DRV_VERSION	"2012-04-04"
+
+MODULE_DESCRIPTION("WIZnet W5300 Ethernet driver v"DRV_VERSION);
+MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
+MODULE_ALIAS("platform:"DRV_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * Registers
+ */
+#define W5300_MR		0x0000	/* Mode Register */
+#define   MR_DBW		  (1 << 15) /* Data bus width */
+#define   MR_MPF		  (1 << 14) /* Mac layer pause frame */
+#define   MR_WDF(n)		  (((n)&7)<<11) /* Write data fetch time */
+#define   MR_RDH		  (1 << 10) /* Read data hold time */
+#define   MR_FS			  (1 << 8)  /* FIFO swap */
+#define   MR_RST		  (1 << 7)  /* S/W reset */
+#define   MR_PB			  (1 << 4)  /* Ping block */
+#define   MR_DBS		  (1 << 2)  /* Data bus swap */
+#define   MR_IND		  (1 << 0)  /* Indirect mode */
+#define W5300_IR		0x0002	/* Interrupt Register */
+#define W5300_IMR		0x0004	/* Interrupt Mask Register */
+#define   IR_S0			  0x0001  /* S0 interrupt */
+#define W5300_SHARL		0x0008	/* Source MAC address (0123) */
+#define W5300_SHARH		0x000c	/* Source MAC address (45) */
+#define W5300_TMSRL		0x0020	/* Transmit Memory Size (0123) */
+#define W5300_TMSRH		0x0024	/* Transmit Memory Size (4567) */
+#define W5300_RMSRL		0x0028	/* Receive Memory Size (0123) */
+#define W5300_RMSRH		0x002c	/* Receive Memory Size (4567) */
+#define W5300_MTYPE		0x0030	/* Memory Type */
+#define W5300_IDR		0x00fe	/* Chip ID register */
+#define   IDR_W5300		  0x5300  /* =0x5300 for WIZnet W5300 */
+#define W5300_S0_MR		0x0200	/* S0 Mode Register */
+#define   S0_MR_CLOSED		  0x0000  /* Close mode */
+#define   S0_MR_MACRAW		  0x0004  /* MAC RAW mode (promiscous) */
+#define   S0_MR_MACRAW_MF	  0x0044  /* MAC RAW mode (filtered) */
+#define W5300_S0_CR		0x0202	/* S0 Command Register */
+#define   S0_CR_OPEN		  0x0001  /* OPEN command */
+#define   S0_CR_CLOSE		  0x0010  /* CLOSE command */
+#define   S0_CR_SEND		  0x0020  /* SEND command */
+#define   S0_CR_RECV		  0x0040  /* RECV command */
+#define W5300_S0_IMR		0x0204	/* S0 Interrupt Mask Register */
+#define W5300_S0_IR		0x0206	/* S0 Interrupt Register */
+#define   S0_IR_RECV		  0x0004  /* Receive interrupt */
+#define   S0_IR_SENDOK		  0x0010  /* Send OK interrupt */
+#define W5300_S0_SSR		0x0208	/* S0 Socket Status Register */
+#define W5300_S0_TX_WRSR	0x0220	/* S0 TX Write Size Register */
+#define W5300_S0_TX_FSR		0x0224	/* S0 TX Free Size Register */
+#define W5300_S0_RX_RSR		0x0228	/* S0 Received data Size */
+#define W5300_S0_TX_FIFO	0x022e	/* S0 Transmit FIFO */
+#define W5300_S0_RX_FIFO	0x0230	/* S0 Receive FIFO */
+#define W5300_REGS_LEN		0x0400
+
+/*
+ * Device driver private data structure
+ */
+struct w5300_priv {
+	void __iomem *base;
+	spinlock_t reg_lock;
+	bool indirect;
+	u16  (*read) (struct w5300_priv *priv, u16 addr);
+	void (*write)(struct w5300_priv *priv, u16 addr, u16 data);
+	int irq;
+	int link_irq;
+	int link_gpio;
+
+	struct napi_struct napi;
+	struct net_device *ndev;
+	bool promisc;
+	u32 msg_enable;
+};
+
+/************************************************************************
+ *
+ *  Lowlevel I/O functions
+ *
+ ***********************************************************************/
+
+/*
+ * In direct address mode host system can directly access W5300 registers
+ * after mapping to Memory-Mapped I/O space.
+ *
+ * 0x400 bytes are required for memory space.
+ */
+static inline u16 w5300_read_direct(struct w5300_priv *priv, u16 addr)
+{
+	return ioread16(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+static inline void w5300_write_direct(struct w5300_priv *priv,
+				      u16 addr, u16 data)
+{
+	iowrite16(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+}
+
+/*
+ * In indirect address mode host system indirectly accesses registers by
+ * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
+ * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
+ * Mode Register (MR) is directly accessible.
+ *
+ * Only 0x06 bytes are required for memory space.
+ */
+#define W5300_IDM_AR		0x0002	 /* Indirect Mode Address */
+#define W5300_IDM_DR		0x0004	 /* Indirect Mode Data */
+
+static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr)
+{
+	unsigned long flags;
+	u16 data;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5300_write_direct(priv, W5300_IDM_AR, addr);
+	mmiowb();
+	data = w5300_read_direct(priv, W5300_IDM_DR);
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+	return data;
+}
+
+static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->reg_lock, flags);
+	w5300_write_direct(priv, W5300_IDM_AR, addr);
+	mmiowb();
+	w5300_write_direct(priv, W5300_IDM_DR, data);
+	mmiowb();
+	spin_unlock_irqrestore(&priv->reg_lock, flags);
+}
+
+#if defined(CONFIG_WIZNET_BUS_DIRECT)
+#define w5300_read	w5300_read_direct
+#define w5300_write	w5300_write_direct
+
+#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
+#define w5300_read	w5300_read_indirect
+#define w5300_write	w5300_write_indirect
+
+#else /* CONFIG_WIZNET_BUS_ANY */
+#define w5300_read	priv->read
+#define w5300_write	priv->write
+#endif
+
+static u32 w5300_read32(struct w5300_priv *priv, u16 addr)
+{
+	u32 data;
+	data  = w5300_read(priv, addr) << 16;
+	data |= w5300_read(priv, addr + 2);
+	return data;
+}
+
+static void w5300_write32(struct w5300_priv *priv, u16 addr, u32 data)
+{
+	w5300_write(priv, addr, data >> 16);
+	w5300_write(priv, addr + 2, data);
+}
+
+static int w5300_command(struct w5300_priv *priv, u16 cmd)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+	w5300_write(priv, W5300_S0_CR, cmd);
+	mmiowb();
+
+	while (w5300_read(priv, W5300_S0_CR) != 0) {
+		if (time_after(jiffies, timeout))
+			return -EIO;
+		cpu_relax();
+	}
+
+	return 0;
+}
+
+static void w5300_read_frame(struct w5300_priv *priv, u8 *buf, int len)
+{
+	u16 fifo;
+	int i;
+
+	for (i = 0; i < len; i += 2) {
+		fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+		*buf++ = fifo >> 8;
+		*buf++ = fifo;
+	}
+	fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+	fifo = w5300_read(priv, W5300_S0_RX_FIFO);
+}
+
+static void w5300_write_frame(struct w5300_priv *priv, u8 *buf, int len)
+{
+	u16 fifo;
+	int i;
+
+	for (i = 0; i < len; i += 2) {
+		fifo  = *buf++ << 8;
+		fifo |= *buf++;
+		w5300_write(priv, W5300_S0_TX_FIFO, fifo);
+	}
+	w5300_write32(priv, W5300_S0_TX_WRSR, len);
+}
+
+static void w5300_write_macaddr(struct w5300_priv *priv)
+{
+	struct net_device *ndev = priv->ndev;
+	w5300_write32(priv, W5300_SHARL,
+		      ndev->dev_addr[0] << 24 |
+		      ndev->dev_addr[1] << 16 |
+		      ndev->dev_addr[2] << 8 |
+		      ndev->dev_addr[3]);
+	w5300_write(priv, W5300_SHARH,
+		      ndev->dev_addr[4] << 8 |
+		      ndev->dev_addr[5]);
+	mmiowb();
+}
+
+static void w5300_hw_reset(struct w5300_priv *priv)
+{
+	w5300_write_direct(priv, W5300_MR, MR_RST);
+	mmiowb();
+	mdelay(5);
+	w5300_write_direct(priv, W5300_MR, priv->indirect ?
+				 MR_WDF(7) | MR_PB | MR_IND :
+				 MR_WDF(7) | MR_PB);
+	mmiowb();
+	w5300_write(priv, W5300_IMR, 0);
+	w5300_write_macaddr(priv);
+
+	/* Configure 128K of internal memory
+	 * as 64K RX fifo and 64K TX fifo
+	 */
+	w5300_write32(priv, W5300_RMSRL, 64 << 24);
+	w5300_write32(priv, W5300_RMSRH, 0);
+	w5300_write32(priv, W5300_TMSRL, 64 << 24);
+	w5300_write32(priv, W5300_TMSRH, 0);
+	w5300_write(priv, W5300_MTYPE, 0x00ff);
+	mmiowb();
+}
+
+static void w5300_hw_start(struct w5300_priv *priv)
+{
+	w5300_write(priv, W5300_S0_MR, priv->promisc ?
+			  S0_MR_MACRAW : S0_MR_MACRAW_MF);
+	mmiowb();
+	w5300_command(priv, S0_CR_OPEN);
+	w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
+	w5300_write(priv, W5300_IMR, IR_S0);
+	mmiowb();
+}
+
+static void w5300_hw_close(struct w5300_priv *priv)
+{
+	w5300_write(priv, W5300_IMR, 0);
+	mmiowb();
+	w5300_command(priv, S0_CR_CLOSE);
+}
+
+/***********************************************************************
+ *
+ *   Device driver functions / callbacks
+ *
+ ***********************************************************************/
+
+static void w5300_get_drvinfo(struct net_device *ndev,
+			      struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+		sizeof(info->bus_info));
+}
+
+static u32 w5300_get_link(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	if (gpio_is_valid(priv->link_gpio))
+		return !!gpio_get_value(priv->link_gpio);
+
+	return 1;
+}
+
+static u32 w5300_get_msglevel(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	return priv->msg_enable;
+}
+
+static void w5300_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	priv->msg_enable = value;
+}
+
+static int w5300_get_regs_len(struct net_device *ndev)
+{
+	return W5300_REGS_LEN;
+}
+
+static void w5300_get_regs(struct net_device *ndev,
+			   struct ethtool_regs *regs, void *_buf)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+	u8 *buf = _buf;
+	u16 addr;
+	u16 data;
+
+	regs->version = 1;
+	for (addr = 0; addr < W5300_REGS_LEN; addr += 2) {
+		switch (addr & 0x23f) {
+		case W5300_S0_TX_FIFO: /* cannot read TX_FIFO */
+		case W5300_S0_RX_FIFO: /* cannot read RX_FIFO */
+			data = 0xffff;
+			break;
+		default:
+			data = w5300_read(priv, addr);
+			break;
+		}
+		*buf++ = data >> 8;
+		*buf++ = data;
+	}
+}
+
+static void w5300_tx_timeout(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+	w5300_hw_reset(priv);
+	w5300_hw_start(priv);
+	ndev->stats.tx_errors++;
+	ndev->trans_start = jiffies;
+	netif_wake_queue(ndev);
+}
+
+static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+
+	w5300_write_frame(priv, skb->data, skb->len);
+	mmiowb();
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += skb->len;
+	dev_kfree_skb(skb);
+	netif_dbg(priv, tx_queued, ndev, "tx queued\n");
+
+	w5300_command(priv, S0_CR_SEND);
+
+	return NETDEV_TX_OK;
+}
+
+static int w5300_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct w5300_priv *priv = container_of(napi, struct w5300_priv, napi);
+	struct net_device *ndev = priv->ndev;
+	struct sk_buff *skb;
+	int rx_count;
+	u16 rx_len;
+
+	for (rx_count = 0; rx_count < budget; rx_count++) {
+		u32 rx_fifo_len = w5300_read32(priv, W5300_S0_RX_RSR);
+		if (rx_fifo_len == 0)
+			break;
+
+		rx_len = w5300_read(priv, W5300_S0_RX_FIFO);
+
+		skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2));
+		if (unlikely(!skb)) {
+			u32 i;
+			for (i = 0; i < rx_fifo_len; i += 2)
+				w5300_read(priv, W5300_S0_RX_FIFO);
+			ndev->stats.rx_dropped++;
+			return -ENOMEM;
+		}
+
+		skb_put(skb, rx_len);
+		w5300_read_frame(priv, skb->data, rx_len);
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		netif_receive_skb(skb);
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += rx_len;
+	}
+
+	if (rx_count < budget) {
+		w5300_write(priv, W5300_IMR, IR_S0);
+		mmiowb();
+		napi_complete(napi);
+	}
+
+	return rx_count;
+}
+
+static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
+{
+	struct net_device *ndev = ndev_instance;
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	int ir = w5300_read(priv, W5300_S0_IR);
+	if (!ir)
+		return IRQ_NONE;
+	w5300_write(priv, W5300_S0_IR, ir);
+	mmiowb();
+
+	if (ir & S0_IR_SENDOK) {
+		netif_dbg(priv, tx_done, ndev, "tx done\n");
+		netif_wake_queue(ndev);
+	}
+
+	if (ir & S0_IR_RECV) {
+		if (napi_schedule_prep(&priv->napi)) {
+			w5300_write(priv, W5300_IMR, 0);
+			mmiowb();
+			__napi_schedule(&priv->napi);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t w5300_detect_link(int irq, void *ndev_instance)
+{
+	struct net_device *ndev = ndev_instance;
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		if (gpio_get_value(priv->link_gpio) != 0) {
+			netif_info(priv, link, ndev, "link is up\n");
+			netif_carrier_on(ndev);
+		} else {
+			netif_info(priv, link, ndev, "link is down\n");
+			netif_carrier_off(ndev);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void w5300_set_rx_mode(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+	bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
+
+	if (priv->promisc != set_promisc) {
+		priv->promisc = set_promisc;
+		w5300_hw_start(priv);
+	}
+}
+
+static int w5300_set_macaddr(struct net_device *ndev, void *addr)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+	struct sockaddr *sock_addr = addr;
+
+	if (!is_valid_ether_addr(sock_addr->sa_data))
+		return -EADDRNOTAVAIL;
+	memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+	ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
+	w5300_write_macaddr(priv);
+	return 0;
+}
+
+static int w5300_open(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	netif_info(priv, ifup, ndev, "enabling\n");
+	if (!is_valid_ether_addr(ndev->dev_addr))
+		return -EINVAL;
+	w5300_hw_start(priv);
+	napi_enable(&priv->napi);
+	netif_start_queue(ndev);
+	if (!gpio_is_valid(priv->link_gpio) ||
+	    gpio_get_value(priv->link_gpio) != 0)
+		netif_carrier_on(ndev);
+	return 0;
+}
+
+static int w5300_stop(struct net_device *ndev)
+{
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	netif_info(priv, ifdown, ndev, "shutting down\n");
+	w5300_hw_close(priv);
+	netif_carrier_off(ndev);
+	netif_stop_queue(ndev);
+	napi_disable(&priv->napi);
+	return 0;
+}
+
+static const struct ethtool_ops w5300_ethtool_ops = {
+	.get_drvinfo		= w5300_get_drvinfo,
+	.get_msglevel		= w5300_get_msglevel,
+	.set_msglevel		= w5300_set_msglevel,
+	.get_link		= w5300_get_link,
+	.get_regs_len		= w5300_get_regs_len,
+	.get_regs		= w5300_get_regs,
+};
+
+static const struct net_device_ops w5300_netdev_ops = {
+	.ndo_open		= w5300_open,
+	.ndo_stop		= w5300_stop,
+	.ndo_start_xmit		= w5300_start_tx,
+	.ndo_tx_timeout		= w5300_tx_timeout,
+	.ndo_set_rx_mode	= w5300_set_rx_mode,
+	.ndo_set_mac_address	= w5300_set_macaddr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+};
+
+static int __devinit w5300_hw_probe(struct platform_device *pdev)
+{
+	struct wiznet_platform_data *data = pdev->dev.platform_data;
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5300_priv *priv = netdev_priv(ndev);
+	const char *name = netdev_name(ndev);
+	struct resource *mem;
+	int mem_size;
+	int irq;
+	int ret;
+
+	if (data && is_valid_ether_addr(data->mac_addr)) {
+		memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+	} else {
+		random_ether_addr(ndev->dev_addr);
+		ndev->addr_assign_type |= NET_ADDR_RANDOM;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem)
+		return -ENXIO;
+	mem_size = resource_size(mem);
+	if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
+		return -EBUSY;
+	priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+	if (!priv->base)
+		return -EBUSY;
+
+	spin_lock_init(&priv->reg_lock);
+	priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
+	if (priv->indirect) {
+		priv->read  = w5300_read_indirect;
+		priv->write = w5300_write_indirect;
+	} else {
+		priv->read  = w5300_read_direct;
+		priv->write = w5300_write_direct;
+	}
+
+	w5300_hw_reset(priv);
+	if (w5300_read(priv, W5300_IDR) != IDR_W5300)
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+	ret = request_irq(irq, w5300_interrupt,
+			  IRQ_TYPE_LEVEL_LOW, name, ndev);
+	if (ret < 0)
+		return ret;
+	priv->irq = irq;
+
+	priv->link_gpio = data ? data->link_gpio : -EINVAL;
+	if (gpio_is_valid(priv->link_gpio)) {
+		char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
+		if (!link_name)
+			return -ENOMEM;
+		snprintf(link_name, 16, "%s-link", name);
+		priv->link_irq = gpio_to_irq(priv->link_gpio);
+		if (request_any_context_irq(priv->link_irq, w5300_detect_link,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				link_name, priv->ndev) < 0)
+			priv->link_gpio = -EINVAL;
+	}
+
+	netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
+	return 0;
+}
+
+static int __devinit w5300_probe(struct platform_device *pdev)
+{
+	struct w5300_priv *priv;
+	struct net_device *ndev;
+	int err;
+
+	ndev = alloc_etherdev(sizeof(*priv));
+	if (!ndev)
+		return -ENOMEM;
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	platform_set_drvdata(pdev, ndev);
+	priv = netdev_priv(ndev);
+	priv->ndev = ndev;
+
+	ether_setup(ndev);
+	ndev->netdev_ops = &w5300_netdev_ops;
+	ndev->ethtool_ops = &w5300_ethtool_ops;
+	ndev->watchdog_timeo = HZ;
+	netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16);
+
+	/* This chip doesn't support VLAN packets with normal MTU,
+	 * so disable VLAN for this device.
+	 */
+	ndev->features |= NETIF_F_VLAN_CHALLENGED;
+
+	err = register_netdev(ndev);
+	if (err < 0)
+		goto err_register;
+
+	err = w5300_hw_probe(pdev);
+	if (err < 0)
+		goto err_hw_probe;
+
+	return 0;
+
+err_hw_probe:
+	unregister_netdev(ndev);
+err_register:
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static int __devexit w5300_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	w5300_hw_reset(priv);
+	free_irq(priv->irq, ndev);
+	if (gpio_is_valid(priv->link_gpio))
+		free_irq(priv->link_irq, ndev);
+
+	unregister_netdev(ndev);
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int w5300_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	if (netif_running(ndev)) {
+		netif_carrier_off(ndev);
+		netif_device_detach(ndev);
+
+		w5300_hw_close(priv);
+	}
+	return 0;
+}
+
+static int w5300_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct w5300_priv *priv = netdev_priv(ndev);
+
+	if (!netif_running(ndev)) {
+		w5300_hw_reset(priv);
+		w5300_hw_start(priv);
+
+		netif_device_attach(ndev);
+		if (!gpio_is_valid(priv->link_gpio) ||
+		    gpio_get_value(priv->link_gpio) != 0)
+			netif_carrier_on(ndev);
+	}
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
+
+static struct platform_driver w5300_driver = {
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.pm	= &w5300_pm_ops,
+	},
+	.probe		= w5300_probe,
+	.remove		= __devexit_p(w5300_remove),
+};
+
+module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index d21591a..1eaf712 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1000,6 +1000,7 @@
 	.set_settings = temac_set_settings,
 	.nway_reset = temac_nway_reset,
 	.get_link = ethtool_op_get_link,
+	.get_ts_info = ethtool_op_get_ts_info,
 };
 
 static int __devinit temac_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index cf67352..3f43101 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -5,8 +5,8 @@
 config NET_VENDOR_XSCALE
 	bool "Intel XScale IXP devices"
 	default y
-	depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \
-		   IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611)
+	depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \
+		   IXP4XX_NPE && IXP4XX_QMGR)
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
@@ -27,6 +27,4 @@
 	  Say Y here if you want to use built-in Ethernet ports
 	  on IXP4xx processor.
 
-source "drivers/net/ethernet/xscale/ixp2000/Kconfig"
-
 endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
index b195b9d..abc3b03 100644
--- a/drivers/net/ethernet/xscale/Makefile
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -2,5 +2,4 @@
 # Makefile for the Intel XScale IXP device drivers.
 #
 
-obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
 obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig
deleted file mode 100644
index 58dbc5b..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config ENP2611_MSF_NET
-	tristate "Radisys ENP2611 MSF network interface support"
-	depends on ARCH_ENP2611
-	---help---
-	  This is a driver for the MSF network interface unit in
-	  the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ethernet/xscale/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile
deleted file mode 100644
index fd38351..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
-
-enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c
deleted file mode 100644
index 7dea5b9..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include "caleb.h"
-
-#define CALEB_IDLO		0x00
-#define CALEB_IDHI		0x01
-#define CALEB_RID		0x02
-#define CALEB_RESET		0x03
-#define CALEB_INTREN0		0x04
-#define CALEB_INTREN1		0x05
-#define CALEB_INTRSTAT0		0x06
-#define CALEB_INTRSTAT1		0x07
-#define CALEB_PORTEN		0x08
-#define CALEB_BURST		0x09
-#define CALEB_PORTPAUS		0x0A
-#define CALEB_PORTPAUSD		0x0B
-#define CALEB_PHY0RX		0x10
-#define CALEB_PHY1RX		0x11
-#define CALEB_PHY0TX		0x12
-#define CALEB_PHY1TX		0x13
-#define CALEB_IXPRX_HI_CNTR	0x15
-#define CALEB_PHY0RX_HI_CNTR	0x16
-#define CALEB_PHY1RX_HI_CNTR	0x17
-#define CALEB_IXPRX_CNTR	0x18
-#define CALEB_PHY0RX_CNTR	0x19
-#define CALEB_PHY1RX_CNTR	0x1A
-#define CALEB_IXPTX_CNTR	0x1B
-#define CALEB_PHY0TX_CNTR	0x1C
-#define CALEB_PHY1TX_CNTR	0x1D
-#define CALEB_DEBUG0		0x1E
-#define CALEB_DEBUG1		0x1F
-
-
-static u8 caleb_reg_read(int reg)
-{
-	u8 value;
-
-	value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
-
-//	printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
-
-	return value;
-}
-
-static void caleb_reg_write(int reg, u8 value)
-{
-	u8 dummy;
-
-//	printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
-
-	*((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
-
-	dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
-	__asm__ __volatile__("mov %0, %0" : "+r" (dummy));
-}
-
-
-void caleb_reset(void)
-{
-	/*
-	 * Perform a chip reset.
-	 */
-	caleb_reg_write(CALEB_RESET, 0x02);
-	udelay(1);
-
-	/*
-	 * Enable all interrupt sources.  This is needed to get
-	 * meaningful results out of the status bits (register 6
-	 * and 7.)
-	 */
-	caleb_reg_write(CALEB_INTREN0, 0xff);
-	caleb_reg_write(CALEB_INTREN1, 0x07);
-
-	/*
-	 * Set RX and TX FIFO thresholds to 1.5kb.
-	 */
-	caleb_reg_write(CALEB_PHY0RX, 0x11);
-	caleb_reg_write(CALEB_PHY1RX, 0x11);
-	caleb_reg_write(CALEB_PHY0TX, 0x11);
-	caleb_reg_write(CALEB_PHY1TX, 0x11);
-
-	/*
-	 * Program SPI-3 burst size.
-	 */
-	caleb_reg_write(CALEB_BURST, 0);	// 64-byte RBUF mpackets
-//	caleb_reg_write(CALEB_BURST, 1);	// 128-byte RBUF mpackets
-//	caleb_reg_write(CALEB_BURST, 2);	// 256-byte RBUF mpackets
-}
-
-void caleb_enable_rx(int port)
-{
-	u8 temp;
-
-	temp = caleb_reg_read(CALEB_PORTEN);
-	temp |= 1 << port;
-	caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_disable_rx(int port)
-{
-	u8 temp;
-
-	temp = caleb_reg_read(CALEB_PORTEN);
-	temp &= ~(1 << port);
-	caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_enable_tx(int port)
-{
-	u8 temp;
-
-	temp = caleb_reg_read(CALEB_PORTEN);
-	temp |= 1 << (port + 4);
-	caleb_reg_write(CALEB_PORTEN, temp);
-}
-
-void caleb_disable_tx(int port)
-{
-	u8 temp;
-
-	temp = caleb_reg_read(CALEB_PORTEN);
-	temp &= ~(1 << (port + 4));
-	caleb_reg_write(CALEB_PORTEN, temp);
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h
deleted file mode 100644
index e93a1ef..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __CALEB_H
-#define __CALEB_H
-
-void caleb_reset(void);
-void caleb_enable_rx(int port);
-void caleb_disable_rx(int port);
-void caleb_enable_tx(int port);
-void caleb_disable_tx(int port);
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
deleted file mode 100644
index 34a6cfd..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/enp2611.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * IXP2400 MSF network device driver for the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <asm/hardware/uengine.h>
-#include <asm/mach-types.h>
-#include <asm/io.h>
-#include "ixpdev.h"
-#include "caleb.h"
-#include "ixp2400-msf.h"
-#include "pm3386.h"
-
-/***********************************************************************
- * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
- * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
- * to the IXP2400.
- *
- *                +-------------+
- * SFP GBIC #0 ---+             |       +---------+
- *                |  PM3386 #0  +-------+         |
- * SFP GBIC #1 ---+             |       | "Caleb" |         +---------+
- *                +-------------+       |         |         |         |
- *                                      | SPI-3   +---------+ IXP2400 |
- *                +-------------+       | bridge  |         |         |
- * SFP GBIC #2 ---+             |       | FPGA    |         +---------+
- *                |  PM3386 #1  +-------+         |
- *                |             |       +---------+
- *                +-------------+
- *              ^                   ^                  ^
- *              | 1.25Gbaud         | 104MHz           | 104MHz
- *              | SERDES ea.        | SPI-3 ea.        | SPI-3
- *
- ***********************************************************************/
-static struct ixp2400_msf_parameters enp2611_msf_parameters =
-{
-	.rx_mode =		IXP2400_RX_MODE_UTOPIA_POS |
-				IXP2400_RX_MODE_1x32 |
-				IXP2400_RX_MODE_MPHY |
-				IXP2400_RX_MODE_MPHY_32 |
-				IXP2400_RX_MODE_MPHY_POLLED_STATUS |
-				IXP2400_RX_MODE_MPHY_LEVEL3 |
-				IXP2400_RX_MODE_RBUF_SIZE_64,
-
-	.rxclk01_multiplier =	IXP2400_PLL_MULTIPLIER_16,
-
-	.rx_poll_ports =	3,
-
-	.rx_channel_mode = {
-		IXP2400_PORT_RX_MODE_MASTER |
-		IXP2400_PORT_RX_MODE_POS_PHY |
-		IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-		IXP2400_PORT_RX_MODE_ODD_PARITY |
-		IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_RX_MODE_MASTER |
-		IXP2400_PORT_RX_MODE_POS_PHY |
-		IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-		IXP2400_PORT_RX_MODE_ODD_PARITY |
-		IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_RX_MODE_MASTER |
-		IXP2400_PORT_RX_MODE_POS_PHY |
-		IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-		IXP2400_PORT_RX_MODE_ODD_PARITY |
-		IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_RX_MODE_MASTER |
-		IXP2400_PORT_RX_MODE_POS_PHY |
-		IXP2400_PORT_RX_MODE_POS_PHY_L3 |
-		IXP2400_PORT_RX_MODE_ODD_PARITY |
-		IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
-	},
-
-	.tx_mode =		IXP2400_TX_MODE_UTOPIA_POS |
-				IXP2400_TX_MODE_1x32 |
-				IXP2400_TX_MODE_MPHY |
-				IXP2400_TX_MODE_MPHY_32 |
-				IXP2400_TX_MODE_MPHY_POLLED_STATUS |
-				IXP2400_TX_MODE_MPHY_LEVEL3 |
-				IXP2400_TX_MODE_TBUF_SIZE_64,
-
-	.txclk01_multiplier =	IXP2400_PLL_MULTIPLIER_16,
-
-	.tx_poll_ports =	3,
-
-	.tx_channel_mode = {
-		IXP2400_PORT_TX_MODE_MASTER |
-		IXP2400_PORT_TX_MODE_POS_PHY |
-		IXP2400_PORT_TX_MODE_ODD_PARITY |
-		IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_TX_MODE_MASTER |
-		IXP2400_PORT_TX_MODE_POS_PHY |
-		IXP2400_PORT_TX_MODE_ODD_PARITY |
-		IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_TX_MODE_MASTER |
-		IXP2400_PORT_TX_MODE_POS_PHY |
-		IXP2400_PORT_TX_MODE_ODD_PARITY |
-		IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
-
-		IXP2400_PORT_TX_MODE_MASTER |
-		IXP2400_PORT_TX_MODE_POS_PHY |
-		IXP2400_PORT_TX_MODE_ODD_PARITY |
-		IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
-	}
-};
-
-static struct net_device *nds[3];
-static struct timer_list link_check_timer;
-
-/* @@@ Poll the SFP moddef0 line too.  */
-/* @@@ Try to use the pm3386 DOOL interrupt as well.  */
-static void enp2611_check_link_status(unsigned long __dummy)
-{
-	int i;
-
-	for (i = 0; i < 3; i++) {
-		struct net_device *dev;
-		int status;
-
-		dev = nds[i];
-		if (dev == NULL)
-			continue;
-
-		status = pm3386_is_link_up(i);
-		if (status && !netif_carrier_ok(dev)) {
-			/* @@@ Should report autonegotiation status.  */
-			printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
-
-			pm3386_enable_tx(i);
-			caleb_enable_tx(i);
-			netif_carrier_on(dev);
-		} else if (!status && netif_carrier_ok(dev)) {
-			printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
-
-			netif_carrier_off(dev);
-			caleb_disable_tx(i);
-			pm3386_disable_tx(i);
-		}
-	}
-
-	link_check_timer.expires = jiffies + HZ / 10;
-	add_timer(&link_check_timer);
-}
-
-static void enp2611_set_port_admin_status(int port, int up)
-{
-	if (up) {
-		caleb_enable_rx(port);
-
-		pm3386_set_carrier(port, 1);
-		pm3386_enable_rx(port);
-	} else {
-		caleb_disable_tx(port);
-		pm3386_disable_tx(port);
-		/* @@@ Flush out pending packets.  */
-		pm3386_set_carrier(port, 0);
-
-		pm3386_disable_rx(port);
-		caleb_disable_rx(port);
-	}
-}
-
-static int __init enp2611_init_module(void)
-{ 
-	int ports;
-	int i;
-
-	if (!machine_is_enp2611())
-		return -ENODEV;
-
-	caleb_reset();
-	pm3386_reset();
-
-	ports = pm3386_port_count();
-	for (i = 0; i < ports; i++) {
-		nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
-		if (nds[i] == NULL) {
-			while (--i >= 0)
-				free_netdev(nds[i]);
-			return -ENOMEM;
-		}
-
-		pm3386_init_port(i);
-		pm3386_get_mac(i, nds[i]->dev_addr);
-	}
-
-	ixp2400_msf_init(&enp2611_msf_parameters);
-
-	if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
-		for (i = 0; i < ports; i++)
-			if (nds[i])
-				free_netdev(nds[i]);
-		return -EINVAL;
-	}
-
-	init_timer(&link_check_timer);
-	link_check_timer.function = enp2611_check_link_status;
-	link_check_timer.expires = jiffies;
-	add_timer(&link_check_timer);
-
-	return 0;
-}
-
-static void __exit enp2611_cleanup_module(void)
-{
-	int i;
-
-	del_timer_sync(&link_check_timer);
-
-	ixpdev_deinit();
-	for (i = 0; i < 3; i++)
-		free_netdev(nds[i]);
-}
-
-module_init(enp2611_init_module);
-module_exit(enp2611_cleanup_module);
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
deleted file mode 100644
index f5ffd7e..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * Generic library functions for the MSF (Media and Switch Fabric) unit
- * found on the Intel IXP2400 network processor.
- *
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of the
- * License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <mach/hardware.h>
-#include <mach/ixp2000-regs.h>
-#include <asm/delay.h>
-#include <asm/io.h>
-#include "ixp2400-msf.h"
-
-/*
- * This is the Intel recommended PLL init procedure as described on
- * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
- */
-static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
-{
-	int rx_dual_clock;
-	int tx_dual_clock;
-	u32 value;
-
-	/*
-	 * If the RX mode is not 1x32, we have to enable both RX PLLs
-	 * (#0 and #1.)  The same thing for the TX direction.
-	 */
-	rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
-	tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
-
-	/*
-	 * Read initial value.
-	 */
-	value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
-
-	/*
-	 * Put PLLs in powerdown and bypass mode.
-	 */
-	value |= 0x0000f0f0;
-	ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-	/*
-	 * Set single or dual clock mode bits.
-	 */
-	value &= ~0x03000000;
-	value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
-
-	/*
-	 * Set multipliers.
-	 */
-	value &= ~0x00ff0000;
-	value |= mp->rxclk01_multiplier << 16;
-	value |= mp->rxclk23_multiplier << 18;
-	value |= mp->txclk01_multiplier << 20;
-	value |= mp->txclk23_multiplier << 22;
-
-	/*
-	 * And write value.
-	 */
-	ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-	/*
-	 * Disable PLL bypass mode.
-	 */
-	value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
-	ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-	/*
-	 * Turn on PLLs.
-	 */
-	value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
-	ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
-
-	/*
-	 * Wait for PLLs to lock.  There are lock status bits, but IXP2400
-	 * erratum #65 says that these lock bits should not be relied upon
-	 * as they might not accurately reflect the true state of the PLLs.
-	 */
-	udelay(100);
-}
-
-/*
- * Needed according to p480 of Programmer's Reference Manual.
- */
-static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
-{
-	int size_bits;
-	int i;
-
-	/*
-	 * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
-	 * corruption) in the Intel-recommended way: do not add the RBUF
-	 * elements susceptible to corruption to the freelist.
-	 */
-	size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
-	if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
-		for (i = 1; i < 128; i++) {
-			if (i == 9 || i == 18 || i == 27)
-				continue;
-			ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-		}
-	} else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
-		for (i = 1; i < 64; i++) {
-			if (i == 4 || i == 9 || i == 13)
-				continue;
-			ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-		}
-	} else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
-		for (i = 1; i < 32; i++) {
-			if (i == 2 || i == 4 || i == 6)
-				continue;
-			ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
-		}
-	}
-}
-
-static u32 ixp2400_msf_valid_channels(u32 reg)
-{
-	u32 channels;
-
-	channels = 0;
-	switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
-	case IXP2400_RX_MODE_1x32:
-		channels = 0x1;
-		if (reg & IXP2400_RX_MODE_MPHY &&
-		    !(reg & IXP2400_RX_MODE_MPHY_32))
-			channels = 0xf;
-		break;
-
-	case IXP2400_RX_MODE_2x16:
-		channels = 0x5;
-		break;
-
-	case IXP2400_RX_MODE_4x8:
-		channels = 0xf;
-		break;
-
-	case IXP2400_RX_MODE_1x16_2x8:
-		channels = 0xd;
-		break;
-	}
-
-	return channels;
-}
-
-static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
-{
-	u32 value;
-
-	value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
-	value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
-	ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
-}
-
-static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
-{
-	u32 value;
-
-	value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
-	value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
-	ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
-}
-
-
-void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
-{
-	u32 value;
-	int i;
-
-	/*
-	 * Init the RX/TX PLLs based on the passed parameter block.
-	 */
-	ixp2400_pll_init(mp);
-
-	/*
-	 * Reset MSF.  Bit 7 in IXP_RESET_0 resets the MSF.
-	 */
-	value = ixp2000_reg_read(IXP2000_RESET0);
-	ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
-	ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
-
-	/*
-	 * Initialise the RX section.
-	 */
-	ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
-	ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
-	for (i = 0; i < 4; i++) {
-		ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
-						mp->rx_channel_mode[i]);
-	}
-	ixp2400_msf_free_rbuf_entries(mp);
-	ixp2400_msf_enable_rx(mp);
-
-	/*
-	 * Initialise the TX section.
-	 */
-	ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
-	ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
-	for (i = 0; i < 4; i++) {
-		ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
-						mp->tx_channel_mode[i]);
-	}
-	ixp2400_msf_enable_tx(mp);
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
deleted file mode 100644
index 3ac1af2..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Generic library functions for the MSF (Media and Switch Fabric) unit
- * found on the Intel IXP2400 network processor.
- *
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of the
- * License, or (at your option) any later version.
- */
-
-#ifndef __IXP2400_MSF_H
-#define __IXP2400_MSF_H
-
-struct ixp2400_msf_parameters
-{
-	u32				rx_mode;
-	unsigned			rxclk01_multiplier:2;
-	unsigned			rxclk23_multiplier:2;
-	unsigned			rx_poll_ports:6;
-	u32				rx_channel_mode[4];
-
-	u32				tx_mode;
-	unsigned			txclk01_multiplier:2;
-	unsigned			txclk23_multiplier:2;
-	unsigned			tx_poll_ports:6;
-	u32				tx_channel_mode[4];
-};
-
-void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
-
-#define IXP2400_PLL_MULTIPLIER_48		0x00
-#define IXP2400_PLL_MULTIPLIER_24		0x01
-#define IXP2400_PLL_MULTIPLIER_16		0x02
-#define IXP2400_PLL_MULTIPLIER_12		0x03
-
-#define IXP2400_RX_MODE_CSIX			0x00400000
-#define IXP2400_RX_MODE_UTOPIA_POS		0x00000000
-#define IXP2400_RX_MODE_WIDTH_MASK		0x00300000
-#define IXP2400_RX_MODE_1x16_2x8		0x00300000
-#define IXP2400_RX_MODE_4x8			0x00200000
-#define IXP2400_RX_MODE_2x16			0x00100000
-#define IXP2400_RX_MODE_1x32			0x00000000
-#define IXP2400_RX_MODE_MPHY			0x00080000
-#define IXP2400_RX_MODE_SPHY			0x00000000
-#define IXP2400_RX_MODE_MPHY_32			0x00040000
-#define IXP2400_RX_MODE_MPHY_4			0x00000000
-#define IXP2400_RX_MODE_MPHY_POLLED_STATUS	0x00020000
-#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS	0x00000000
-#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX	0x00010000
-#define IXP2400_RX_MODE_CBUS_SIMPLEX		0x00000000
-#define IXP2400_RX_MODE_MPHY_LEVEL2		0x00004000
-#define IXP2400_RX_MODE_MPHY_LEVEL3		0x00000000
-#define IXP2400_RX_MODE_CBUS_8BIT		0x00002000
-#define IXP2400_RX_MODE_CBUS_4BIT		0x00000000
-#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST	0x00000200
-#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS	0x00000000
-#define IXP2400_RX_MODE_RBUF_SIZE_MASK		0x0000000c
-#define IXP2400_RX_MODE_RBUF_SIZE_256		0x00000008
-#define IXP2400_RX_MODE_RBUF_SIZE_128		0x00000004
-#define IXP2400_RX_MODE_RBUF_SIZE_64		0x00000000
-
-#define IXP2400_PORT_RX_MODE_SLAVE		0x00000040
-#define IXP2400_PORT_RX_MODE_MASTER		0x00000000
-#define IXP2400_PORT_RX_MODE_POS_PHY_L3		0x00000020
-#define IXP2400_PORT_RX_MODE_POS_PHY_L2		0x00000000
-#define IXP2400_PORT_RX_MODE_POS_PHY		0x00000010
-#define IXP2400_PORT_RX_MODE_UTOPIA		0x00000000
-#define IXP2400_PORT_RX_MODE_EVEN_PARITY	0x0000000c
-#define IXP2400_PORT_RX_MODE_ODD_PARITY		0x00000008
-#define IXP2400_PORT_RX_MODE_NO_PARITY		0x00000000
-#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS	0x00000002
-#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS	0x00000000
-#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE	0x00000001
-#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE	0x00000000
-
-#define IXP2400_TX_MODE_CSIX			0x00400000
-#define IXP2400_TX_MODE_UTOPIA_POS		0x00000000
-#define IXP2400_TX_MODE_WIDTH_MASK		0x00300000
-#define IXP2400_TX_MODE_1x16_2x8		0x00300000
-#define IXP2400_TX_MODE_4x8			0x00200000
-#define IXP2400_TX_MODE_2x16			0x00100000
-#define IXP2400_TX_MODE_1x32			0x00000000
-#define IXP2400_TX_MODE_MPHY			0x00080000
-#define IXP2400_TX_MODE_SPHY			0x00000000
-#define IXP2400_TX_MODE_MPHY_32			0x00040000
-#define IXP2400_TX_MODE_MPHY_4			0x00000000
-#define IXP2400_TX_MODE_MPHY_POLLED_STATUS	0x00020000
-#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS	0x00000000
-#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX	0x00010000
-#define IXP2400_TX_MODE_CBUS_SIMPLEX		0x00000000
-#define IXP2400_TX_MODE_MPHY_LEVEL2		0x00004000
-#define IXP2400_TX_MODE_MPHY_LEVEL3		0x00000000
-#define IXP2400_TX_MODE_CBUS_8BIT		0x00002000
-#define IXP2400_TX_MODE_CBUS_4BIT		0x00000000
-#define IXP2400_TX_MODE_TBUF_SIZE_MASK		0x0000000c
-#define IXP2400_TX_MODE_TBUF_SIZE_256		0x00000008
-#define IXP2400_TX_MODE_TBUF_SIZE_128		0x00000004
-#define IXP2400_TX_MODE_TBUF_SIZE_64		0x00000000
-
-#define IXP2400_PORT_TX_MODE_SLAVE		0x00000040
-#define IXP2400_PORT_TX_MODE_MASTER		0x00000000
-#define IXP2400_PORT_TX_MODE_POS_PHY		0x00000010
-#define IXP2400_PORT_TX_MODE_UTOPIA		0x00000000
-#define IXP2400_PORT_TX_MODE_EVEN_PARITY	0x0000000c
-#define IXP2400_PORT_TX_MODE_ODD_PARITY		0x00000008
-#define IXP2400_PORT_TX_MODE_NO_PARITY		0x00000000
-#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS	0x00000002
-#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE	0x00000001
-#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE	0x00000000
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
deleted file mode 100644
index 42a73e35..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * RX ucode for the Intel IXP2400 in POS-PHY mode.
- * Copyright (C) 2004, 2005 Lennert Buytenhek
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Assumptions made in this code:
- * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
- *   only one full element list is used.  This includes, for example,
- *   1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4.  (This
- *   is not an exhaustive list.)
- * - The RBUF uses 64-byte mpackets.
- * - RX descriptors reside in SRAM, and have the following format:
- *	struct rx_desc
- *	{
- *	// to uengine
- *		u32	buf_phys_addr;
- *		u32	buf_length;
- *
- *	// from uengine
- *		u32	channel;
- *		u32	pkt_length;
- *	};
- * - Packet data resides in DRAM.
- * - Packet buffer addresses are 8-byte aligned.
- * - Scratch ring 0 is rx_pending.
- * - Scratch ring 1 is rx_done, and has status condition 'full'.
- * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
- * - This code is run on all eight threads of the microengine it runs on.
- *
- * Local memory is used for per-channel RX state.
- */
-
-#define RX_THREAD_FREELIST_0		0x0030
-#define RBUF_ELEMENT_DONE		0x0044
-
-#define CHANNEL_FLAGS			*l$index0[0]
-#define CHANNEL_FLAG_RECEIVING		1
-#define PACKET_LENGTH			*l$index0[1]
-#define PACKET_CHECKSUM			*l$index0[2]
-#define BUFFER_HANDLE			*l$index0[3]
-#define BUFFER_START			*l$index0[4]
-#define BUFFER_LENGTH			*l$index0[5]
-
-#define CHANNEL_STATE_SIZE		24	// in bytes
-#define CHANNEL_STATE_SHIFT		5	// ceil(log2(state size))
-
-
-	.sig volatile sig1
-	.sig volatile sig2
-	.sig volatile sig3
-
-	.sig mpacket_arrived
-	.reg add_to_rx_freelist
-	.reg read $rsw0, $rsw1
-	.xfer_order $rsw0 $rsw1
-
-	.reg zero
-
-	/*
-	 * Initialise add_to_rx_freelist.
-	 */
-	.begin
-		.reg temp
-		.reg temp2
-
-		immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
-		immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
-
-		local_csr_rd[ACTIVE_CTX_STS]
-		immed[temp, 0]
-		alu[temp2, temp, and, 0x1f]
-		alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
-		alu[temp2, temp, and, 0x80]
-		alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
-	.end
-
-	immed[zero, 0]
-
-	/*
-	 * Skip context 0 initialisation?
-	 */
-	.begin
-		br!=ctx[0, mpacket_receive_loop#]
-	.end
-
-	/*
-	 * Initialise local memory.
-	 */
-	.begin
-		.reg addr
-		.reg temp
-
-		immed[temp, 0]
-	init_local_mem_loop#:
-		alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
-		local_csr_wr[ACTIVE_LM_ADDR_0, addr]
-		nop
-		nop
-		nop
-
-		immed[CHANNEL_FLAGS, 0]
-
-		alu[temp, temp, +, 1]
-		alu[--, temp, and, 0x20]
-		beq[init_local_mem_loop#]
-	.end
-
-	/*
-	 * Initialise signal pipeline.
-	 */
-	.begin
-		local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
-		.set_sig sig1
-
-		local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
-		.set_sig sig2
-
-		local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
-		.set_sig sig3
-	.end
-
-mpacket_receive_loop#:
-	/*
-	 * Synchronise and wait for mpacket.
-	 */
-	.begin
-		ctx_arb[sig1]
-		local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
-
-		msf[fast_wr, --, add_to_rx_freelist, 0]
-		.set_sig mpacket_arrived
-		ctx_arb[mpacket_arrived]
-		.set $rsw0 $rsw1
-	.end
-
-	/*
-	 * We halt if we see {inbparerr,parerr,null,soperror}.
-	 */
-	.begin
-		alu_shf[--, 0x1b, and, $rsw0, >>8]
-		bne[abort_rswerr#]
-	.end
-
-	/*
-	 * Point local memory pointer to this channel's state area.
-	 */
-	.begin
-		.reg chanaddr
-
-		alu[chanaddr, $rsw0, and, 0x1f]
-		alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
-		local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
-		nop
-		nop
-		nop
-	.end
-
-	/*
-	 * Check whether we received a SOP mpacket while we were already
-	 * working on a packet, or a non-SOP mpacket while there was no
-	 * packet pending.  (SOP == RECEIVING -> abort)  If everything's
-	 * okay, update the RECEIVING flag to reflect our new state.
-	 */
-	.begin
-		.reg temp
-		.reg eop
-
-		#if CHANNEL_FLAG_RECEIVING != 1
-		#error CHANNEL_FLAG_RECEIVING is not 1
-		#endif
-
-		alu_shf[temp, 1, and, $rsw0, >>15]
-		alu[temp, temp, xor, CHANNEL_FLAGS]
-		alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
-		beq[abort_proterr#]
-
-		alu_shf[eop, 1, and, $rsw0, >>14]
-		alu[CHANNEL_FLAGS, temp, xor, eop]
-	.end
-
-	/*
-	 * Copy the mpacket into the right spot, and in case of EOP,
-	 * write back the descriptor and pass the packet on.
-	 */
-	.begin
-		.reg buffer_offset
-		.reg _packet_length
-		.reg _packet_checksum
-		.reg _buffer_handle
-		.reg _buffer_start
-		.reg _buffer_length
-
-		/*
-		 * Determine buffer_offset, _packet_length and
-		 * _packet_checksum.
-		 */
-		.begin
-			.reg temp
-
-			alu[--, 1, and, $rsw0, >>15]
-			beq[not_sop#]
-
-			immed[PACKET_LENGTH, 0]
-			immed[PACKET_CHECKSUM, 0]
-
-		not_sop#:
-			alu[buffer_offset, --, b, PACKET_LENGTH]
-			alu_shf[temp, 0xff, and, $rsw0, >>16]
-			alu[_packet_length, buffer_offset, +, temp]
-			alu[PACKET_LENGTH, --, b, _packet_length]
-
-			immed[temp, 0xffff]
-			alu[temp, $rsw1, and, temp]
-			alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
-			alu[PACKET_CHECKSUM, --, b, _packet_checksum]
-		.end
-
-		/*
-		 * Allocate buffer in case of SOP.
-		 */
-		.begin
-			.reg temp
-
-			alu[temp, 1, and, $rsw0, >>15]
-			beq[skip_buffer_alloc#]
-
-			.begin
-				.sig zzz
-				.reg read $stemp $stemp2
-				.xfer_order $stemp $stemp2
-
-			rx_nobufs#:
-				scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
-				alu[_buffer_handle, --, b, $stemp]
-				beq[rx_nobufs#]
-
-				sram[read, $stemp, _buffer_handle, 0, 2],
-								ctx_swap[zzz]
-				alu[_buffer_start, --, b, $stemp]
-				alu[_buffer_length, --, b, $stemp2]
-			.end
-
-		skip_buffer_alloc#:
-		.end
-
-		/*
-		 * Resynchronise.
-		 */
-		.begin
-			ctx_arb[sig2]
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
-		.end
-
-		/*
-		 * Synchronise buffer state.
-		 */
-		.begin
-			.reg temp
-
-			alu[temp, 1, and, $rsw0, >>15]
-			beq[copy_from_local_mem#]
-
-			alu[BUFFER_HANDLE, --, b, _buffer_handle]
-			alu[BUFFER_START, --, b, _buffer_start]
-			alu[BUFFER_LENGTH, --, b, _buffer_length]
-			br[sync_state_done#]
-
-		copy_from_local_mem#:
-			alu[_buffer_handle, --, b, BUFFER_HANDLE]
-			alu[_buffer_start, --, b, BUFFER_START]
-			alu[_buffer_length, --, b, BUFFER_LENGTH]
-
-		sync_state_done#:
-		.end
-
-#if 0
-		/*
-		 * Debug buffer state management.
-		 */
-		.begin
-			.reg temp
-
-			alu[temp, 1, and, $rsw0, >>14]
-			beq[no_poison#]
-			immed[BUFFER_HANDLE, 0xdead]
-			immed[BUFFER_START, 0xdead]
-			immed[BUFFER_LENGTH, 0xdead]
-		no_poison#:
-
-			immed[temp, 0xdead]
-			alu[--, _buffer_handle, -, temp]
-			beq[state_corrupted#]
-			alu[--, _buffer_start, -, temp]
-			beq[state_corrupted#]
-			alu[--, _buffer_length, -, temp]
-			beq[state_corrupted#]
-		.end
-#endif
-
-		/*
-		 * Check buffer length.
-		 */
-		.begin
-			alu[--, _buffer_length, -, _packet_length]
-			blo[buffer_overflow#]
-		.end
-
-		/*
-		 * Copy the mpacket and give back the RBUF element.
-		 */
-		.begin
-			.reg element
-			.reg xfer_size
-			.reg temp
-			.sig copy_sig
-
-			alu_shf[element, 0x7f, and, $rsw0, >>24]
-			alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
-
-			alu[xfer_size, xfer_size, -, 1]
-			alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
-			alu_shf[temp, 0x10, or, xfer_size, <<21]
-			alu_shf[temp, temp, or, element, <<11]
-			alu_shf[--, temp, or, 1, <<18]
-
-			dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
-						indirect_ref, sig_done[copy_sig]
-			ctx_arb[copy_sig]
-
-			alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
-			msf[fast_wr, --, temp, 0]
-		.end
-
-		/*
-		 * If EOP, write back the packet descriptor.
-		 */
-		.begin
-			.reg write $stemp $stemp2
-			.xfer_order $stemp $stemp2
-			.sig zzz
-
-			alu_shf[--, 1, and, $rsw0, >>14]
-			beq[no_writeback#]
-
-			alu[$stemp, $rsw0, and, 0x1f]
-			alu[$stemp2, --, b, _packet_length]
-			sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
-
-		no_writeback#:
-		.end
-
-		/*
-		 * Resynchronise.
-		 */
-		.begin
-			ctx_arb[sig3]
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
-		.end
-
-		/*
-		 * If EOP, put the buffer back onto the scratch ring.
-		 */
-		.begin
-			.reg write $stemp
-			.sig zzz
-
-			br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
-
-			alu_shf[--, 1, and, $rsw0, >>14]
-			beq[mpacket_receive_loop#]
-
-			alu[--, 1, and, $rsw0, >>10]
-			bne[rxerr#]
-
-			alu[$stemp, --, b, _buffer_handle]
-			scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
-			cap[fast_wr, 0, XSCALE_INT_A]
-			br[mpacket_receive_loop#]
-
-		rxerr#:
-			alu[$stemp, --, b, _buffer_handle]
-			scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
-			br[mpacket_receive_loop#]
-		.end
-	.end
-
-
-abort_rswerr#:
-	halt
-
-abort_proterr#:
-	halt
-
-state_corrupted#:
-	halt
-
-buffer_overflow#:
-	halt
-
-rx_done_ring_overflow#:
-	halt
-
-
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
deleted file mode 100644
index e8aee2f..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
+++ /dev/null
@@ -1,130 +0,0 @@
-static struct ixp2000_uengine_code ixp2400_rx =
-{
-	.cpu_model_bitmask	= 0x000003fe,
-	.cpu_min_revision	= 0,
-	.cpu_max_revision	= 255,
-
-	.uengine_parameters	= IXP2000_UENGINE_8_CONTEXTS |
-				  IXP2000_UENGINE_PRN_UPDATE_EVERY |
-				  IXP2000_UENGINE_NN_FROM_PREVIOUS |
-				  IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
-				  IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
-				  IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
-
-	.initial_reg_values	= (struct ixp2000_reg_value []) {
-		{ -1, -1 }
-	},
-
-	.num_insns		= 109,
-	.insns			= (u8 []) {
-		0xf0, 0x00, 0x0c, 0xc0, 0x05,
-		0xf4, 0x44, 0x0c, 0x00, 0x05,
-		0xfc, 0x04, 0x4c, 0x00, 0x00,
-		0xf0, 0x00, 0x00, 0x3b, 0x00,
-		0xb4, 0x40, 0xf0, 0x3b, 0x1f,
-		0x8a, 0xc0, 0x50, 0x3e, 0x05,
-		0xb4, 0x40, 0xf0, 0x3b, 0x80,
-		0x9a, 0xe0, 0x00, 0x3e, 0x05,
-		0xf0, 0x00, 0x00, 0x07, 0x00,
-		0xd8, 0x05, 0xc0, 0x00, 0x11,
-		0xf0, 0x00, 0x00, 0x0f, 0x00,
-		0x91, 0xb0, 0x20, 0x0e, 0x00,
-		0xfc, 0x06, 0x60, 0x0b, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x02, 0x00,
-		0xb0, 0xc0, 0x30, 0x0f, 0x01,
-		0xa4, 0x70, 0x00, 0x0f, 0x20,
-		0xd8, 0x02, 0xc0, 0x01, 0x00,
-		0xfc, 0x10, 0xac, 0x23, 0x08,
-		0xfc, 0x10, 0xac, 0x43, 0x10,
-		0xfc, 0x10, 0xac, 0x63, 0x18,
-		0xe0, 0x00, 0x00, 0x00, 0x02,
-		0xfc, 0x10, 0xae, 0x23, 0x88,
-		0x3d, 0x00, 0x04, 0x03, 0x20,
-		0xe0, 0x00, 0x00, 0x00, 0x10,
-		0x84, 0x82, 0x02, 0x01, 0x3b,
-		0xd8, 0x1a, 0x00, 0x01, 0x01,
-		0xb4, 0x00, 0x8c, 0x7d, 0x80,
-		0x91, 0xb0, 0x80, 0x22, 0x00,
-		0xfc, 0x06, 0x60, 0x23, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0xf0, 0x00, 0x0c, 0x03, 0x00,
-		0x94, 0xf0, 0x92, 0x01, 0x21,
-		0xac, 0x40, 0x60, 0x26, 0x00,
-		0xa4, 0x30, 0x0c, 0x04, 0x06,
-		0xd8, 0x1a, 0x40, 0x01, 0x00,
-		0x94, 0xe0, 0xa2, 0x01, 0x21,
-		0xac, 0x20, 0x00, 0x28, 0x06,
-		0x84, 0xf2, 0x02, 0x01, 0x21,
-		0xd8, 0x0b, 0x40, 0x01, 0x00,
-		0xf0, 0x00, 0x0c, 0x02, 0x01,
-		0xf0, 0x00, 0x0c, 0x02, 0x02,
-		0xa0, 0x00, 0x08, 0x04, 0x00,
-		0x95, 0x00, 0xc6, 0x01, 0xff,
-		0xa0, 0x80, 0x10, 0x30, 0x00,
-		0xa0, 0x60, 0x1c, 0x00, 0x01,
-		0xf0, 0x0f, 0xf0, 0x33, 0xff,
-		0xb4, 0x00, 0xc0, 0x31, 0x81,
-		0xb0, 0x80, 0xb0, 0x32, 0x02,
-		0xa0, 0x20, 0x20, 0x2c, 0x00,
-		0x94, 0xf0, 0xd2, 0x01, 0x21,
-		0xd8, 0x0f, 0x40, 0x01, 0x00,
-		0x19, 0x40, 0x10, 0x04, 0x20,
-		0xa0, 0x00, 0x26, 0x04, 0x00,
-		0xd8, 0x0d, 0xc0, 0x01, 0x00,
-		0x00, 0x42, 0x10, 0x80, 0x02,
-		0xb0, 0x00, 0x46, 0x04, 0x00,
-		0xb0, 0x00, 0x56, 0x08, 0x00,
-		0xe0, 0x00, 0x00, 0x00, 0x04,
-		0xfc, 0x10, 0xae, 0x43, 0x90,
-		0x84, 0xf0, 0x32, 0x01, 0x21,
-		0xd8, 0x11, 0x40, 0x01, 0x00,
-		0xa0, 0x60, 0x3c, 0x00, 0x02,
-		0xa0, 0x20, 0x40, 0x10, 0x00,
-		0xa0, 0x20, 0x50, 0x14, 0x00,
-		0xd8, 0x12, 0x00, 0x00, 0x18,
-		0xa0, 0x00, 0x28, 0x0c, 0x00,
-		0xb0, 0x00, 0x48, 0x10, 0x00,
-		0xb0, 0x00, 0x58, 0x14, 0x00,
-		0xaa, 0xf0, 0x00, 0x14, 0x01,
-		0xd8, 0x1a, 0xc0, 0x01, 0x05,
-		0x85, 0x80, 0x42, 0x01, 0xff,
-		0x95, 0x00, 0x66, 0x01, 0xff,
-		0xba, 0xc0, 0x60, 0x1b, 0x01,
-		0x9a, 0x30, 0x60, 0x19, 0x30,
-		0x9a, 0xb0, 0x70, 0x1a, 0x30,
-		0x9b, 0x50, 0x78, 0x1e, 0x04,
-		0x8a, 0xe2, 0x08, 0x1e, 0x21,
-		0x6a, 0x4e, 0x00, 0x13, 0x00,
-		0xe0, 0x00, 0x00, 0x00, 0x30,
-		0x9b, 0x00, 0x7a, 0x92, 0x04,
-		0x3d, 0x00, 0x04, 0x1f, 0x20,
-		0x84, 0xe2, 0x02, 0x01, 0x21,
-		0xd8, 0x16, 0x80, 0x01, 0x00,
-		0xa4, 0x18, 0x0c, 0x7d, 0x80,
-		0xa0, 0x58, 0x1c, 0x00, 0x01,
-		0x01, 0x42, 0x00, 0xa0, 0x02,
-		0xe0, 0x00, 0x00, 0x00, 0x08,
-		0xfc, 0x10, 0xae, 0x63, 0x98,
-		0xd8, 0x1b, 0x00, 0xc2, 0x14,
-		0x84, 0xe2, 0x02, 0x01, 0x21,
-		0xd8, 0x05, 0xc0, 0x01, 0x00,
-		0x84, 0xa2, 0x02, 0x01, 0x21,
-		0xd8, 0x19, 0x40, 0x01, 0x01,
-		0xa0, 0x58, 0x0c, 0x00, 0x02,
-		0x1a, 0x40, 0x00, 0x04, 0x24,
-		0x33, 0x00, 0x01, 0x2f, 0x20,
-		0xd8, 0x05, 0xc0, 0x00, 0x18,
-		0xa0, 0x58, 0x0c, 0x00, 0x02,
-		0x1a, 0x40, 0x00, 0x04, 0x20,
-		0xd8, 0x05, 0xc0, 0x00, 0x18,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-	}
-};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
deleted file mode 100644
index d090d18..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * TX ucode for the Intel IXP2400 in POS-PHY mode.
- * Copyright (C) 2004, 2005 Lennert Buytenhek
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Assumptions made in this code:
- * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
- *   only one TBUF partition is used.  This includes, for example,
- *   1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
- *   is not an exhaustive list.)
- * - The TBUF uses 64-byte mpackets.
- * - TX descriptors reside in SRAM, and have the following format:
- *	struct tx_desc
- *	{
- *	// to uengine
- *		u32	buf_phys_addr;
- *		u32	pkt_length;
- *		u32	channel;
- *	};
- * - Packet data resides in DRAM.
- * - Packet buffer addresses are 8-byte aligned.
- * - Scratch ring 2 is tx_pending.
- * - Scratch ring 3 is tx_done, and has status condition 'full'.
- * - This code is run on all eight threads of the microengine it runs on.
- */
-
-#define TX_SEQUENCE_0		0x0060
-#define TBUF_CTRL		0x1800
-
-#define PARTITION_SIZE		128
-#define PARTITION_THRESH	96
-
-
-	.sig volatile sig1
-	.sig volatile sig2
-	.sig volatile sig3
-
-	.reg @old_tx_seq_0
-	.reg @mpkts_in_flight
-	.reg @next_tbuf_mpacket
-
-	.reg @buffer_handle
-	.reg @buffer_start
-	.reg @packet_length
-	.reg @channel
-	.reg @packet_offset
-
-	.reg zero
-
-	immed[zero, 0]
-
-	/*
-	 * Skip context 0 initialisation?
-	 */
-	.begin
-		br!=ctx[0, mpacket_tx_loop#]
-	.end
-
-	/*
-	 * Wait until all pending TBUF elements have been transmitted.
-	 */
-	.begin
-		.reg read $tx
-		.sig zzz
-
-	loop_empty#:
-		msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
-		alu_shf[--, --, b, $tx, >>31]
-		beq[loop_empty#]
-
-		alu[@old_tx_seq_0, --, b, $tx]
-	.end
-
-	immed[@mpkts_in_flight, 0]
-	alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
-
-	immed[@buffer_handle, 0]
-
-	/*
-	 * Initialise signal pipeline.
-	 */
-	.begin
-		local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
-		.set_sig sig1
-
-		local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
-		.set_sig sig2
-
-		local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
-		.set_sig sig3
-	.end
-
-mpacket_tx_loop#:
-	.begin
-		.reg tbuf_element_index
-		.reg buffer_handle
-		.reg sop_eop
-		.reg packet_data
-		.reg channel
-		.reg mpacket_size
-
-		/*
-		 * If there is no packet currently being transmitted,
-		 * dequeue the next TX descriptor, and fetch the buffer
-		 * address, packet length and destination channel number.
-		 */
-		.begin
-			.reg read $stemp $stemp2 $stemp3
-			.xfer_order $stemp $stemp2 $stemp3
-			.sig zzz
-
-			ctx_arb[sig1]
-
-			alu[--, --, b, @buffer_handle]
-			bne[already_got_packet#]
-
-		tx_nobufs#:
-			scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
-			alu[@buffer_handle, --, b, $stemp]
-			beq[tx_nobufs#]
-
-			sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
-			alu[@buffer_start, --, b, $stemp]
-			alu[@packet_length, --, b, $stemp2]
-			beq[zero_byte_packet#]
-			alu[@channel, --, b, $stemp3]
-			immed[@packet_offset, 0]
-
-		already_got_packet#:
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
-		.end
-
-		/*
-		 * Determine tbuf element index, SOP/EOP flags, mpacket
-		 * offset and mpacket size and cache buffer_handle and
-		 * channel number.
-		 */
-		.begin
-			alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
-			alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
-			alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
-							(PARTITION_SIZE - 1)]
-
-			alu[buffer_handle, --, b, @buffer_handle]
-			immed[@buffer_handle, 0]
-
-			immed[sop_eop, 1]
-
-			alu[packet_data, --, b, @packet_offset]
-			bne[no_sop#]
-			alu[sop_eop, sop_eop, or, 2]
-		no_sop#:
-			alu[packet_data, packet_data, +, @buffer_start]
-
-			alu[channel, --, b, @channel]
-
-			alu[mpacket_size, @packet_length, -, @packet_offset]
-			alu[--, 64, -, mpacket_size]
-			bhs[eop#]
-			alu[@buffer_handle, --, b, buffer_handle]
-			immed[mpacket_size, 64]
-			alu[sop_eop, sop_eop, and, 2]
-		eop#:
-
-			alu[@packet_offset, @packet_offset, +, mpacket_size]
-		.end
-
-		/*
-		 * Wait until there's enough space in the TBUF.
-		 */
-		.begin
-			.reg read $tx
-			.reg temp
-			.sig zzz
-
-			ctx_arb[sig2]
-
-			br[test_space#]
-
-		loop_space#:
-			msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
-
-			alu[temp, $tx, -, @old_tx_seq_0]
-			alu[temp, temp, and, 0xff]
-			alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
-
-			alu[@old_tx_seq_0, --, b, $tx]
-
-		test_space#:
-			alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
-			blo[loop_space#]
-
-			alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
-
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
-		.end
-
-		/*
-		 * Copy the packet data to the TBUF.
-		 */
-		.begin
-			.reg temp
-			.sig copy_sig
-
-			alu[temp, mpacket_size, -, 1]
-			alu_shf[temp, 0x10, or, temp, >>3]
-			alu_shf[temp, 0x10, or, temp, <<21]
-			alu_shf[temp, temp, or, tbuf_element_index, <<11]
-			alu_shf[--, temp, or, 1, <<18]
-
-			dram[tbuf_wr, --, packet_data, 0, max_8],
-					indirect_ref, sig_done[copy_sig]
-			ctx_arb[copy_sig]
-		.end
-
-		/*
-		 * Mark TBUF element as ready-to-be-transmitted.
-		 */
-		.begin
-			.reg write $tsw $tsw2
-			.xfer_order $tsw $tsw2
-			.reg temp
-			.sig zzz
-
-			alu_shf[temp, channel, or, mpacket_size, <<24]
-			alu_shf[$tsw, temp, or, sop_eop, <<8]
-			immed[$tsw2, 0]
-
-			immed[temp, TBUF_CTRL]
-			alu_shf[temp, temp, or, tbuf_element_index, <<3]
-			msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
-		.end
-
-		/*
-		 * Resynchronise.
-		 */
-		.begin
-			ctx_arb[sig3]
-			local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
-		.end
-
-		/*
-		 * If this was an EOP mpacket, recycle the TX buffer
-	 	 * and signal the host.
-		 */
-		.begin
-			.reg write $stemp
-			.sig zzz
-
-			alu[--, sop_eop, and, 1]
-			beq[mpacket_tx_loop#]
-
-		tx_done_ring_full#:
-			br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
-
-			alu[$stemp, --, b, buffer_handle]
-			scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
-			cap[fast_wr, 0, XSCALE_INT_A]
-			br[mpacket_tx_loop#]
-		.end
-	.end
-
-
-zero_byte_packet#:
-	halt
-
-
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
deleted file mode 100644
index a433e24..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
+++ /dev/null
@@ -1,98 +0,0 @@
-static struct ixp2000_uengine_code ixp2400_tx =
-{
-	.cpu_model_bitmask	= 0x000003fe,
-	.cpu_min_revision	= 0,
-	.cpu_max_revision	= 255,
-
-	.uengine_parameters	= IXP2000_UENGINE_8_CONTEXTS |
-				  IXP2000_UENGINE_PRN_UPDATE_EVERY |
-				  IXP2000_UENGINE_NN_FROM_PREVIOUS |
-				  IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
-				  IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
-				  IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
-
-	.initial_reg_values	= (struct ixp2000_reg_value []) {
-		{ -1, -1 }
-	},
-
-	.num_insns		= 77,
-	.insns			= (u8 []) {
-		0xf0, 0x00, 0x00, 0x07, 0x00,
-		0xd8, 0x03, 0x00, 0x00, 0x11,
-		0x3c, 0x40, 0x00, 0x04, 0xe0,
-		0x81, 0xf2, 0x02, 0x01, 0x00,
-		0xd8, 0x00, 0x80, 0x01, 0x00,
-		0xb0, 0x08, 0x06, 0x00, 0x00,
-		0xf0, 0x00, 0x0c, 0x00, 0x80,
-		0xb4, 0x49, 0x02, 0x03, 0x7f,
-		0xf0, 0x00, 0x02, 0x83, 0x00,
-		0xfc, 0x10, 0xac, 0x23, 0x08,
-		0xfc, 0x10, 0xac, 0x43, 0x10,
-		0xfc, 0x10, 0xac, 0x63, 0x18,
-		0xe0, 0x00, 0x00, 0x00, 0x02,
-		0xa0, 0x30, 0x02, 0x80, 0x00,
-		0xd8, 0x06, 0x00, 0x01, 0x01,
-		0x19, 0x40, 0x00, 0x04, 0x28,
-		0xb0, 0x0a, 0x06, 0x00, 0x00,
-		0xd8, 0x03, 0xc0, 0x01, 0x00,
-		0x00, 0x44, 0x00, 0x80, 0x80,
-		0xa0, 0x09, 0x06, 0x00, 0x00,
-		0xb0, 0x0b, 0x06, 0x04, 0x00,
-		0xd8, 0x13, 0x00, 0x01, 0x00,
-		0xb0, 0x0c, 0x06, 0x08, 0x00,
-		0xf0, 0x00, 0x0c, 0x00, 0xa0,
-		0xfc, 0x10, 0xae, 0x23, 0x88,
-		0xa0, 0x00, 0x12, 0x40, 0x00,
-		0xb0, 0xc9, 0x02, 0x43, 0x01,
-		0xb4, 0x49, 0x02, 0x43, 0x7f,
-		0xb0, 0x00, 0x22, 0x80, 0x00,
-		0xf0, 0x00, 0x02, 0x83, 0x00,
-		0xf0, 0x00, 0x0c, 0x04, 0x02,
-		0xb0, 0x40, 0x6c, 0x00, 0xa0,
-		0xd8, 0x08, 0x80, 0x01, 0x01,
-		0xaa, 0x00, 0x2c, 0x08, 0x02,
-		0xa0, 0xc0, 0x30, 0x18, 0x90,
-		0xa0, 0x00, 0x43, 0x00, 0x00,
-		0xba, 0xc0, 0x32, 0xc0, 0xa0,
-		0xaa, 0xb0, 0x00, 0x0f, 0x40,
-		0xd8, 0x0a, 0x80, 0x01, 0x04,
-		0xb0, 0x0a, 0x00, 0x08, 0x00,
-		0xf0, 0x00, 0x00, 0x0f, 0x40,
-		0xa4, 0x00, 0x2c, 0x08, 0x02,
-		0xa0, 0x8a, 0x00, 0x0c, 0xa0,
-		0xe0, 0x00, 0x00, 0x00, 0x04,
-		0xd8, 0x0c, 0x80, 0x00, 0x18,
-		0x3c, 0x40, 0x00, 0x04, 0xe0,
-		0xba, 0x80, 0x42, 0x01, 0x80,
-		0xb4, 0x40, 0x40, 0x13, 0xff,
-		0xaa, 0x88, 0x00, 0x10, 0x80,
-		0xb0, 0x08, 0x06, 0x00, 0x00,
-		0xaa, 0xf0, 0x0d, 0x80, 0x80,
-		0xd8, 0x0b, 0x40, 0x01, 0x05,
-		0xa0, 0x88, 0x0c, 0x04, 0x80,
-		0xfc, 0x10, 0xae, 0x43, 0x90,
-		0xba, 0xc0, 0x50, 0x0f, 0x01,
-		0x9a, 0x30, 0x50, 0x15, 0x30,
-		0x9a, 0xb0, 0x50, 0x16, 0x30,
-		0x9b, 0x50, 0x58, 0x16, 0x01,
-		0x8a, 0xe2, 0x08, 0x16, 0x21,
-		0x6b, 0x4e, 0x00, 0x83, 0x03,
-		0xe0, 0x00, 0x00, 0x00, 0x30,
-		0x9a, 0x80, 0x70, 0x0e, 0x04,
-		0x8b, 0x88, 0x08, 0x1e, 0x02,
-		0xf0, 0x00, 0x0c, 0x01, 0x81,
-		0xf0, 0x01, 0x80, 0x1f, 0x00,
-		0x9b, 0xd0, 0x78, 0x1e, 0x01,
-		0x3d, 0x42, 0x00, 0x1c, 0x20,
-		0xe0, 0x00, 0x00, 0x00, 0x08,
-		0xfc, 0x10, 0xae, 0x63, 0x98,
-		0xa4, 0x30, 0x0c, 0x04, 0x02,
-		0xd8, 0x03, 0x00, 0x01, 0x00,
-		0xd8, 0x11, 0xc1, 0x42, 0x14,
-		0xa0, 0x18, 0x00, 0x08, 0x00,
-		0x1a, 0x40, 0x00, 0x04, 0x2c,
-		0x33, 0x00, 0x01, 0x2f, 0x20,
-		0xd8, 0x03, 0x00, 0x00, 0x18,
-		0xe0, 0x00, 0x02, 0x00, 0x00,
-	}
-};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
deleted file mode 100644
index 4500837..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/moduleparam.h>
-#include <linux/gfp.h>
-#include <asm/hardware/uengine.h>
-#include <asm/io.h>
-#include "ixp2400_rx.ucode"
-#include "ixp2400_tx.ucode"
-#include "ixpdev_priv.h"
-#include "ixpdev.h"
-#include "pm3386.h"
-
-#define DRV_MODULE_VERSION	"0.2"
-
-static int nds_count;
-static struct net_device **nds;
-static int nds_open;
-static void (*set_port_admin_status)(int port, int up);
-
-static struct ixpdev_rx_desc * const rx_desc =
-	(struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
-static struct ixpdev_tx_desc * const tx_desc =
-	(struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
-static int tx_pointer;
-
-
-static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct ixpdev_priv *ip = netdev_priv(dev);
-	struct ixpdev_tx_desc *desc;
-	int entry;
-	unsigned long flags;
-
-	if (unlikely(skb->len > PAGE_SIZE)) {
-		/* @@@ Count drops.  */
-		dev_kfree_skb(skb);
-		return NETDEV_TX_OK;
-	}
-
-	entry = tx_pointer;
-	tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
-
-	desc = tx_desc + entry;
-	desc->pkt_length = skb->len;
-	desc->channel = ip->channel;
-
-	skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
-	dev_kfree_skb(skb);
-
-	ixp2000_reg_write(RING_TX_PENDING,
-		TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
-
-	local_irq_save(flags);
-	ip->tx_queue_entries++;
-	if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
-		netif_stop_queue(dev);
-	local_irq_restore(flags);
-
-	return NETDEV_TX_OK;
-}
-
-
-static int ixpdev_rx(struct net_device *dev, int processed, int budget)
-{
-	while (processed < budget) {
-		struct ixpdev_rx_desc *desc;
-		struct sk_buff *skb;
-		void *buf;
-		u32 _desc;
-
-		_desc = ixp2000_reg_read(RING_RX_DONE);
-		if (_desc == 0)
-			return 0;
-
-		desc = rx_desc +
-			((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
-		buf = phys_to_virt(desc->buf_addr);
-
-		if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
-			printk(KERN_ERR "ixp2000: rx err, length %d\n",
-					desc->pkt_length);
-			goto err;
-		}
-
-		if (desc->channel < 0 || desc->channel >= nds_count) {
-			printk(KERN_ERR "ixp2000: rx err, channel %d\n",
-					desc->channel);
-			goto err;
-		}
-
-		/* @@@ Make FCS stripping configurable.  */
-		desc->pkt_length -= 4;
-
-		if (unlikely(!netif_running(nds[desc->channel])))
-			goto err;
-
-		skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
-		if (likely(skb != NULL)) {
-			skb_copy_to_linear_data(skb, buf, desc->pkt_length);
-			skb_put(skb, desc->pkt_length);
-			skb->protocol = eth_type_trans(skb, nds[desc->channel]);
-
-			netif_receive_skb(skb);
-		}
-
-err:
-		ixp2000_reg_write(RING_RX_PENDING, _desc);
-		processed++;
-	}
-
-	return processed;
-}
-
-/* dev always points to nds[0].  */
-static int ixpdev_poll(struct napi_struct *napi, int budget)
-{
-	struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi);
-	struct net_device *dev = ip->dev;
-	int rx;
-
-	rx = 0;
-	do {
-		ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
-
-		rx = ixpdev_rx(dev, rx, budget);
-		if (rx >= budget)
-			break;
-	} while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
-
-	napi_complete(napi);
-	ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
-
-	return rx;
-}
-
-static void ixpdev_tx_complete(void)
-{
-	int channel;
-	u32 wake;
-
-	wake = 0;
-	while (1) {
-		struct ixpdev_priv *ip;
-		u32 desc;
-		int entry;
-
-		desc = ixp2000_reg_read(RING_TX_DONE);
-		if (desc == 0)
-			break;
-
-		/* @@@ Check whether entries come back in order.  */
-		entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
-		channel = tx_desc[entry].channel;
-
-		if (channel < 0 || channel >= nds_count) {
-			printk(KERN_ERR "ixp2000: txcomp channel index "
-					"out of bounds (%d, %.8i, %d)\n",
-					channel, (unsigned int)desc, entry);
-			continue;
-		}
-
-		ip = netdev_priv(nds[channel]);
-		if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
-			wake |= 1 << channel;
-		ip->tx_queue_entries--;
-	}
-
-	for (channel = 0; wake != 0; channel++) {
-		if (wake & (1 << channel)) {
-			netif_wake_queue(nds[channel]);
-			wake &= ~(1 << channel);
-		}
-	}
-}
-
-static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
-{
-	u32 status;
-
-	status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
-	if (status == 0)
-		return IRQ_NONE;
-
-	/*
-	 * Any of the eight receive units signaled RX?
-	 */
-	if (status & 0x00ff) {
-		struct net_device *dev = nds[0];
-		struct ixpdev_priv *ip = netdev_priv(dev);
-
-		ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
-		if (likely(napi_schedule_prep(&ip->napi))) {
-			__napi_schedule(&ip->napi);
-		} else {
-			printk(KERN_CRIT "ixp2000: irq while polling!!\n");
-		}
-	}
-
-	/*
-	 * Any of the eight transmit units signaled TXdone?
-	 */
-	if (status & 0xff00) {
-		ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
-		ixpdev_tx_complete();
-	}
-
-	return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ixpdev_poll_controller(struct net_device *dev)
-{
-	disable_irq(IRQ_IXP2000_THDA0);
-	ixpdev_interrupt(IRQ_IXP2000_THDA0, dev);
-	enable_irq(IRQ_IXP2000_THDA0);
-}
-#endif
-
-static int ixpdev_open(struct net_device *dev)
-{
-	struct ixpdev_priv *ip = netdev_priv(dev);
-	int err;
-
-	napi_enable(&ip->napi);
-	if (!nds_open++) {
-		err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
-					IRQF_SHARED, "ixp2000_eth", nds);
-		if (err) {
-			nds_open--;
-			napi_disable(&ip->napi);
-			return err;
-		}
-
-		ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
-	}
-
-	set_port_admin_status(ip->channel, 1);
-	netif_start_queue(dev);
-
-	return 0;
-}
-
-static int ixpdev_close(struct net_device *dev)
-{
-	struct ixpdev_priv *ip = netdev_priv(dev);
-
-	netif_stop_queue(dev);
-	napi_disable(&ip->napi);
-	set_port_admin_status(ip->channel, 0);
-
-	if (!--nds_open) {
-		ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
-		free_irq(IRQ_IXP2000_THDA0, nds);
-	}
-
-	return 0;
-}
-
-static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
-{
-	struct ixpdev_priv *ip = netdev_priv(dev);
-
-	pm3386_get_stats(ip->channel, &(dev->stats));
-
-	return &(dev->stats);
-}
-
-static const struct net_device_ops ixpdev_netdev_ops = {
-	.ndo_open		= ixpdev_open,
-	.ndo_stop		= ixpdev_close,
-	.ndo_start_xmit		= ixpdev_xmit,
-	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= eth_mac_addr,
-	.ndo_get_stats		= ixpdev_get_stats,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= ixpdev_poll_controller,
-#endif
-};
-
-struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
-{
-	struct net_device *dev;
-	struct ixpdev_priv *ip;
-
-	dev = alloc_etherdev(sizeof_priv);
-	if (dev == NULL)
-		return NULL;
-
-	dev->netdev_ops = &ixpdev_netdev_ops;
-
-	dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
-
-	ip = netdev_priv(dev);
-	ip->dev = dev;
-	netif_napi_add(dev, &ip->napi, ixpdev_poll, 64);
-	ip->channel = channel;
-	ip->tx_queue_entries = 0;
-
-	return dev;
-}
-
-int ixpdev_init(int __nds_count, struct net_device **__nds,
-		void (*__set_port_admin_status)(int port, int up))
-{
-	int i;
-	int err;
-
-	BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);
-
-	printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);
-
-	nds_count = __nds_count;
-	nds = __nds;
-	set_port_admin_status = __set_port_admin_status;
-
-	for (i = 0; i < RX_BUF_COUNT; i++) {
-		void *buf;
-
-		buf = (void *)get_zeroed_page(GFP_KERNEL);
-		if (buf == NULL) {
-			err = -ENOMEM;
-			while (--i >= 0)
-				free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-			goto err_out;
-		}
-		rx_desc[i].buf_addr = virt_to_phys(buf);
-		rx_desc[i].buf_length = PAGE_SIZE;
-	}
-
-	/* @@@ Maybe we shouldn't be preallocating TX buffers.  */
-	for (i = 0; i < TX_BUF_COUNT; i++) {
-		void *buf;
-
-		buf = (void *)get_zeroed_page(GFP_KERNEL);
-		if (buf == NULL) {
-			err = -ENOMEM;
-			while (--i >= 0)
-				free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-			goto err_free_rx;
-		}
-		tx_desc[i].buf_addr = virt_to_phys(buf);
-	}
-
-	/* 256 entries, ring status set means 'empty', base address 0x0000.  */
-	ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
-	ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
-	ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
-
-	/* 256 entries, ring status set means 'full', base address 0x0400.  */
-	ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
-	ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
-	ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
-
-	for (i = 0; i < RX_BUF_COUNT; i++) {
-		ixp2000_reg_write(RING_RX_PENDING,
-			RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
-	}
-
-	ixp2000_uengine_load(0, &ixp2400_rx);
-	ixp2000_uengine_start_contexts(0, 0xff);
-
-	/* 256 entries, ring status set means 'empty', base address 0x0800.  */
-	ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
-	ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
-	ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
-
-	/* 256 entries, ring status set means 'full', base address 0x0c00.  */
-	ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
-	ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
-	ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
-
-	ixp2000_uengine_load(1, &ixp2400_tx);
-	ixp2000_uengine_start_contexts(1, 0xff);
-
-	for (i = 0; i < nds_count; i++) {
-		err = register_netdev(nds[i]);
-		if (err) {
-			while (--i >= 0)
-				unregister_netdev(nds[i]);
-			goto err_free_tx;
-		}
-	}
-
-	for (i = 0; i < nds_count; i++) {
-		printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), %pM.\n",
-				 nds[i]->name, i, nds[i]->dev_addr);
-	}
-
-	return 0;
-
-err_free_tx:
-	for (i = 0; i < TX_BUF_COUNT; i++)
-		free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-
-err_free_rx:
-	for (i = 0; i < RX_BUF_COUNT; i++)
-		free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-
-err_out:
-	return err;
-} 
-
-void ixpdev_deinit(void)
-{
-	int i;
-
-	/* @@@ Flush out pending packets.  */
-
-	for (i = 0; i < nds_count; i++)
-		unregister_netdev(nds[i]);
-
-	ixp2000_uengine_stop_contexts(1, 0xff);
-	ixp2000_uengine_stop_contexts(0, 0xff);
-	ixp2000_uengine_reset(0x3);
-
-	for (i = 0; i < TX_BUF_COUNT; i++)
-		free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
-
-	for (i = 0; i < RX_BUF_COUNT; i++)
-		free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
-}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
deleted file mode 100644
index 391ece6..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __IXPDEV_H
-#define __IXPDEV_H
-
-struct ixpdev_priv
-{
-	struct net_device *dev;
-	struct napi_struct napi;
-	int	channel;
-	int	tx_queue_entries;
-};
-
-struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
-int ixpdev_init(int num_ports, struct net_device **nds,
-		void (*set_port_admin_status)(int port, int up));
-void ixpdev_deinit(void);
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
deleted file mode 100644
index 86aa08e..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * IXP2000 MSF network device driver
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __IXPDEV_PRIV_H
-#define __IXPDEV_PRIV_H
-
-#define RX_BUF_DESC_BASE	0x00001000
-#define RX_BUF_COUNT		((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
-#define TX_BUF_DESC_BASE	0x00002000
-#define TX_BUF_COUNT		((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
-#define TX_BUF_COUNT_PER_CHAN	(TX_BUF_COUNT / 4)
-
-#define RING_RX_PENDING		((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
-#define RING_RX_DONE		((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
-#define RING_TX_PENDING		((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
-#define RING_TX_DONE		((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
-
-#define SCRATCH_REG(x)		((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
-#define RING_RX_PENDING_BASE	SCRATCH_REG(0x00)
-#define RING_RX_PENDING_HEAD	SCRATCH_REG(0x04)
-#define RING_RX_PENDING_TAIL	SCRATCH_REG(0x08)
-#define RING_RX_DONE_BASE	SCRATCH_REG(0x10)
-#define RING_RX_DONE_HEAD	SCRATCH_REG(0x14)
-#define RING_RX_DONE_TAIL	SCRATCH_REG(0x18)
-#define RING_TX_PENDING_BASE	SCRATCH_REG(0x20)
-#define RING_TX_PENDING_HEAD	SCRATCH_REG(0x24)
-#define RING_TX_PENDING_TAIL	SCRATCH_REG(0x28)
-#define RING_TX_DONE_BASE	SCRATCH_REG(0x30)
-#define RING_TX_DONE_HEAD	SCRATCH_REG(0x34)
-#define RING_TX_DONE_TAIL	SCRATCH_REG(0x38)
-
-struct ixpdev_rx_desc
-{
-	u32	buf_addr;
-	u32	buf_length;
-	u32	channel;
-	u32	pkt_length;
-};
-
-struct ixpdev_tx_desc
-{
-	u32	buf_addr;
-	u32	pkt_length;
-	u32	channel;
-	u32	unused;
-};
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
deleted file mode 100644
index e08d3f9..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Helper functions for the PM3386s on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <asm/io.h>
-#include "pm3386.h"
-
-/*
- * Read from register 'reg' of PM3386 device 'pm'.
- */
-static u16 pm3386_reg_read(int pm, int reg)
-{
-	void *_reg;
-	u16 value;
-
-	_reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
-	if (pm == 1)
-		_reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
-
-	value = *((volatile u16 *)(_reg + (reg << 1)));
-
-//	printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
-
-	return value;
-}
-
-/*
- * Write to register 'reg' of PM3386 device 'pm', and perform
- * a readback from the identification register.
- */
-static void pm3386_reg_write(int pm, int reg, u16 value)
-{
-	void *_reg;
-	u16 dummy;
-
-//	printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
-
-	_reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
-	if (pm == 1)
-		_reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
-
-	*((volatile u16 *)(_reg + (reg << 1))) = value;
-
-	dummy = *((volatile u16 *)_reg);
-	__asm__ __volatile__("mov %0, %0" : "+r" (dummy));
-}
-
-/*
- * Read from port 'port' register 'reg', where the registers
- * for the different ports are 'spacing' registers apart.
- */
-static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
-{
-	int reg;
-
-	reg = _reg;
-	if (port & 1)
-		reg += spacing;
-
-	return pm3386_reg_read(port >> 1, reg);
-}
-
-/*
- * Write to port 'port' register 'reg', where the registers
- * for the different ports are 'spacing' registers apart.
- */
-static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
-{
-	int reg;
-
-	reg = _reg;
-	if (port & 1)
-		reg += spacing;
-
-	pm3386_reg_write(port >> 1, reg, value);
-}
-
-int pm3386_secondary_present(void)
-{
-	return pm3386_reg_read(1, 0) == 0x3386;
-}
-
-void pm3386_reset(void)
-{
-	u8 mac[3][6];
-	int secondary;
-
-	secondary = pm3386_secondary_present();
-
-	/* Save programmed MAC addresses.  */
-	pm3386_get_mac(0, mac[0]);
-	pm3386_get_mac(1, mac[1]);
-	if (secondary)
-		pm3386_get_mac(2, mac[2]);
-
-	/* Assert analog and digital reset.  */
-	pm3386_reg_write(0, 0x002, 0x0060);
-	if (secondary)
-		pm3386_reg_write(1, 0x002, 0x0060);
-	mdelay(1);
-
-	/* Deassert analog reset.  */
-	pm3386_reg_write(0, 0x002, 0x0062);
-	if (secondary)
-		pm3386_reg_write(1, 0x002, 0x0062);
-	mdelay(10);
-
-	/* Deassert digital reset.  */
-	pm3386_reg_write(0, 0x002, 0x0063);
-	if (secondary)
-		pm3386_reg_write(1, 0x002, 0x0063);
-	mdelay(10);
-
-	/* Restore programmed MAC addresses.  */
-	pm3386_set_mac(0, mac[0]);
-	pm3386_set_mac(1, mac[1]);
-	if (secondary)
-		pm3386_set_mac(2, mac[2]);
-
-	/* Disable carrier on all ports.  */
-	pm3386_set_carrier(0, 0);
-	pm3386_set_carrier(1, 0);
-	if (secondary)
-		pm3386_set_carrier(2, 0);
-}
-
-static u16 swaph(u16 x)
-{
-	return ((x << 8) | (x >> 8)) & 0xffff;
-}
-
-int pm3386_port_count(void)
-{
-	return 2 + pm3386_secondary_present();
-}
-
-void pm3386_init_port(int port)
-{
-	int pm = port >> 1;
-
-	/*
-	 * Work around ENP2611 bootloader programming MAC address
-	 * in reverse.
-	 */
-	if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
-	    (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
-		u16 temp[3];
-
-		temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
-		temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
-		temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
-		pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
-		pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
-		pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
-	}
-
-	/*
-	 * Initialise narrowbanding mode.  See application note 2010486
-	 * for more information.  (@@@ We also need to issue a reset
-	 * when ROOL or DOOL are detected.)
-	 */
-	pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
-	udelay(500);
-	pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
-
-	/*
-	 * SPI-3 ingress block.  Set 64 bytes SPI-3 burst size
-	 * towards SPI-3 bridge.
-	 */
-	pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
-
-	/*
-	 * Enable ingress protocol checking, and soft reset the
-	 * SPI-3 ingress block.
-	 */
-	pm3386_reg_write(pm, 0x103, 0x0003);
-	while (!(pm3386_reg_read(pm, 0x103) & 0x80))
-		;
-
-	/*
-	 * SPI-3 egress block.  Gather 12288 bytes of the current
-	 * packet in the TX fifo before initiating transmit on the
-	 * SERDES interface.  (Prevents TX underflows.)
-	 */
-	pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
-
-	/*
-	 * Enforce odd parity from the SPI-3 bridge, and soft reset
-	 * the SPI-3 egress block.
-	 */
-	pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
-	while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
-		;
-
-	/*
-	 * EGMAC block.  Set this channels to reject long preambles,
-	 * not send or transmit PAUSE frames, enable preamble checking,
-	 * disable frame length checking, enable FCS appending, enable
-	 * TX frame padding.
-	 */
-	pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
-
-	/*
-	 * Soft reset the EGMAC block.
-	 */
-	pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
-	pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
-
-	/*
-	 * Auto-sense autonegotiation status.
-	 */
-	pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
-
-	/*
-	 * Allow reception of jumbo frames.
-	 */
-	pm3386_port_reg_write(port, 0x310, 0x100, 9018);
-
-	/*
-	 * Allow transmission of jumbo frames.
-	 */
-	pm3386_port_reg_write(port, 0x336, 0x100, 9018);
-
-	/* @@@ Should set 0x337/0x437 (RX forwarding threshold.)  */
-
-	/*
-	 * Set autonegotiation parameters to 'no PAUSE, full duplex.'
-	 */
-	pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
-
-	/*
-	 * Enable and restart autonegotiation.
-	 */
-	pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
-	pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
-}
-
-void pm3386_get_mac(int port, u8 *mac)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x308, 0x100);
-	mac[0] = temp & 0xff;
-	mac[1] = (temp >> 8) & 0xff;
-
-	temp = pm3386_port_reg_read(port, 0x309, 0x100);
-	mac[2] = temp & 0xff;
-	mac[3] = (temp >> 8) & 0xff;
-
-	temp = pm3386_port_reg_read(port, 0x30a, 0x100);
-	mac[4] = temp & 0xff;
-	mac[5] = (temp >> 8) & 0xff;
-}
-
-void pm3386_set_mac(int port, u8 *mac)
-{
-	pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
-	pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
-	pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
-}
-
-static u32 pm3386_get_stat(int port, u16 base)
-{
-	u32 value;
-
-	value = pm3386_port_reg_read(port, base, 0x100);
-	value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
-
-	return value;
-}
-
-void pm3386_get_stats(int port, struct net_device_stats *stats)
-{
-	/*
-	 * Snapshot statistics counters.
-	 */
-	pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
-	while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
-		;
-
-	memset(stats, 0, sizeof(*stats));
-
-	stats->rx_packets = pm3386_get_stat(port, 0x510);
-	stats->tx_packets = pm3386_get_stat(port, 0x590);
-	stats->rx_bytes = pm3386_get_stat(port, 0x514);
-	stats->tx_bytes = pm3386_get_stat(port, 0x594);
-	/* @@@ Add other stats.  */
-}
-
-void pm3386_set_carrier(int port, int state)
-{
-	pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
-}
-
-int pm3386_is_link_up(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x31a, 0x100);
-	temp = pm3386_port_reg_read(port, 0x31a, 0x100);
-
-	return !!(temp & 0x0002);
-}
-
-void pm3386_enable_rx(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x303, 0x100);
-	temp |= 0x1000;
-	pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_disable_rx(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x303, 0x100);
-	temp &= 0xefff;
-	pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_enable_tx(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x303, 0x100);
-	temp |= 0x4000;
-	pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-void pm3386_disable_tx(int port)
-{
-	u16 temp;
-
-	temp = pm3386_port_reg_read(port, 0x303, 0x100);
-	temp &= 0xbfff;
-	pm3386_port_reg_write(port, 0x303, 0x100, temp);
-}
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
deleted file mode 100644
index cc4183d..0000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Helper functions for the PM3386s on the Radisys ENP2611
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __PM3386_H
-#define __PM3386_H
-
-void pm3386_reset(void);
-int pm3386_port_count(void);
-void pm3386_init_port(int port);
-void pm3386_get_mac(int port, u8 *mac);
-void pm3386_set_mac(int port, u8 *mac);
-void pm3386_get_stats(int port, struct net_device_stats *stats);
-void pm3386_set_carrier(int port, int state);
-int pm3386_is_link_up(int port);
-void pm3386_enable_rx(int port);
-void pm3386_disable_rx(int port);
-void pm3386_enable_tx(int port);
-void pm3386_disable_tx(int port);
-
-
-#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 41a8b5a..482648f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1002,12 +1002,41 @@
 	return phy_start_aneg(port->phydev);
 }
 
+int ixp46x_phc_index = -1;
+
+static int ixp4xx_get_ts_info(struct net_device *dev,
+			      struct ethtool_ts_info *info)
+{
+	if (!cpu_is_ixp46x()) {
+		info->so_timestamping =
+			SOF_TIMESTAMPING_TX_SOFTWARE |
+			SOF_TIMESTAMPING_RX_SOFTWARE |
+			SOF_TIMESTAMPING_SOFTWARE;
+		info->phc_index = -1;
+		return 0;
+	}
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = ixp46x_phc_index;
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
+	return 0;
+}
+
 static const struct ethtool_ops ixp4xx_ethtool_ops = {
 	.get_drvinfo = ixp4xx_get_drvinfo,
 	.get_settings = ixp4xx_get_settings,
 	.set_settings = ixp4xx_set_settings,
 	.nway_reset = ixp4xx_nway_reset,
 	.get_link = ethtool_op_get_link,
+	.get_ts_info = ixp4xx_get_ts_info,
 };
 
 
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 168c8f4..d471963 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -113,10 +113,9 @@
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-	if (pci_request_regions(pdev, "rrunner")) {
-		ret = -EIO;
+	ret = pci_request_regions(pdev, "rrunner");
+	if (ret < 0)
 		goto out;
-	}
 
 	pci_set_drvdata(pdev, dev);
 
@@ -124,11 +123,8 @@
 
 	spin_lock_init(&rrpriv->lock);
 
-	dev->irq = pdev->irq;
 	dev->netdev_ops = &rr_netdev_ops;
 
-	dev->base_addr = pci_resource_start(pdev, 0);
-
 	/* display version info if adapter is found */
 	if (!version_disp) {
 		/* set display flag to TRUE so that */
@@ -146,16 +142,15 @@
 	pci_set_master(pdev);
 
 	printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
-	       "at 0x%08lx, irq %i, PCI latency %i\n", dev->name,
-	       dev->base_addr, dev->irq, pci_latency);
+	       "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
+	       (unsigned long long)pci_resource_start(pdev, 0),
+	       pdev->irq, pci_latency);
 
 	/*
-	 * Remap the regs into kernel space.
+	 * Remap the MMIO regs into kernel space.
 	 */
-
-	rrpriv->regs = ioremap(dev->base_addr, 0x1000);
-
-	if (!rrpriv->regs){
+	rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
+	if (!rrpriv->regs) {
 		printk(KERN_ERR "%s:  Unable to map I/O register, "
 			"RoadRunner will be disabled.\n", dev->name);
 		ret = -EIO;
@@ -202,8 +197,6 @@
 
 	rr_init(dev);
 
-	dev->base_addr = 0;
-
 	ret = register_netdev(dev);
 	if (ret)
 		goto out;
@@ -217,7 +210,7 @@
 		pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
 				    rrpriv->tx_ring_dma);
 	if (rrpriv->regs)
-		iounmap(rrpriv->regs);
+		pci_iounmap(pdev, rrpriv->regs);
 	if (pdev) {
 		pci_release_regions(pdev);
 		pci_set_drvdata(pdev, NULL);
@@ -231,29 +224,26 @@
 static void __devexit rr_remove_one (struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
+	struct rr_private *rr = netdev_priv(dev);
 
-	if (dev) {
-		struct rr_private *rr = netdev_priv(dev);
-
-		if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){
-			printk(KERN_ERR "%s: trying to unload running NIC\n",
-			       dev->name);
-			writel(HALT_NIC, &rr->regs->HostCtrl);
-		}
-
-		pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
-				    rr->evt_ring_dma);
-		pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
-				    rr->rx_ring_dma);
-		pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
-				    rr->tx_ring_dma);
-		unregister_netdev(dev);
-		iounmap(rr->regs);
-		free_netdev(dev);
-		pci_release_regions(pdev);
-		pci_disable_device(pdev);
-		pci_set_drvdata(pdev, NULL);
+	if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
+		printk(KERN_ERR "%s: trying to unload running NIC\n",
+		       dev->name);
+		writel(HALT_NIC, &rr->regs->HostCtrl);
 	}
+
+	unregister_netdev(dev);
+	pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
+			    rr->evt_ring_dma);
+	pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
+			    rr->rx_ring_dma);
+	pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
+			    rr->tx_ring_dma);
+	pci_iounmap(pdev, rr->regs);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(dev);
 }
 
 
@@ -1229,9 +1219,9 @@
 	readl(&regs->HostCtrl);
 	spin_unlock_irqrestore(&rrpriv->lock, flags);
 
-	if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
+	if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
 		printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
-		       dev->name, dev->irq);
+		       dev->name, pdev->irq);
 		ecode = -EAGAIN;
 		goto error;
 	}
@@ -1338,16 +1328,15 @@
 
 static int rr_close(struct net_device *dev)
 {
-	struct rr_private *rrpriv;
-	struct rr_regs __iomem *regs;
+	struct rr_private *rrpriv = netdev_priv(dev);
+	struct rr_regs __iomem *regs = rrpriv->regs;
+	struct pci_dev *pdev = rrpriv->pci_dev;
 	unsigned long flags;
 	u32 tmp;
 	short i;
 
 	netif_stop_queue(dev);
 
-	rrpriv = netdev_priv(dev);
-	regs = rrpriv->regs;
 
 	/*
 	 * Lock to make sure we are not cleaning up while another CPU
@@ -1386,15 +1375,15 @@
 	rr_raz_tx(rrpriv, dev);
 	rr_raz_rx(rrpriv, dev);
 
-	pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl),
+	pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
 			    rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
 	rrpriv->rx_ctrl = NULL;
 
-	pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info),
-			    rrpriv->info, rrpriv->info_dma);
+	pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
+			    rrpriv->info_dma);
 	rrpriv->info = NULL;
 
-	free_irq(dev->irq, dev);
+	free_irq(pdev->irq, dev);
 	spin_unlock_irqrestore(&rrpriv->lock, flags);
 
 	return 0;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index c358245..4ffcd57 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -27,6 +27,7 @@
 
 #include <linux/list.h>
 #include <linux/hyperv.h>
+#include <linux/rndis.h>
 
 /* Fwd declaration */
 struct hv_netvsc_packet;
@@ -506,295 +507,6 @@
 	void *extension;
 };
 
-
-/*  Status codes */
-
-
-#ifndef STATUS_SUCCESS
-#define STATUS_SUCCESS				(0x00000000L)
-#endif
-
-#ifndef STATUS_UNSUCCESSFUL
-#define STATUS_UNSUCCESSFUL			(0xC0000001L)
-#endif
-
-#ifndef STATUS_PENDING
-#define STATUS_PENDING				(0x00000103L)
-#endif
-
-#ifndef STATUS_INSUFFICIENT_RESOURCES
-#define STATUS_INSUFFICIENT_RESOURCES		(0xC000009AL)
-#endif
-
-#ifndef STATUS_BUFFER_OVERFLOW
-#define STATUS_BUFFER_OVERFLOW			(0x80000005L)
-#endif
-
-#ifndef STATUS_NOT_SUPPORTED
-#define STATUS_NOT_SUPPORTED			(0xC00000BBL)
-#endif
-
-#define RNDIS_STATUS_SUCCESS			(STATUS_SUCCESS)
-#define RNDIS_STATUS_PENDING			(STATUS_PENDING)
-#define RNDIS_STATUS_NOT_RECOGNIZED		(0x00010001L)
-#define RNDIS_STATUS_NOT_COPIED			(0x00010002L)
-#define RNDIS_STATUS_NOT_ACCEPTED		(0x00010003L)
-#define RNDIS_STATUS_CALL_ACTIVE		(0x00010007L)
-
-#define RNDIS_STATUS_ONLINE			(0x40010003L)
-#define RNDIS_STATUS_RESET_START		(0x40010004L)
-#define RNDIS_STATUS_RESET_END			(0x40010005L)
-#define RNDIS_STATUS_RING_STATUS		(0x40010006L)
-#define RNDIS_STATUS_CLOSED			(0x40010007L)
-#define RNDIS_STATUS_WAN_LINE_UP		(0x40010008L)
-#define RNDIS_STATUS_WAN_LINE_DOWN		(0x40010009L)
-#define RNDIS_STATUS_WAN_FRAGMENT		(0x4001000AL)
-#define RNDIS_STATUS_MEDIA_CONNECT		(0x4001000BL)
-#define RNDIS_STATUS_MEDIA_DISCONNECT		(0x4001000CL)
-#define RNDIS_STATUS_HARDWARE_LINE_UP		(0x4001000DL)
-#define RNDIS_STATUS_HARDWARE_LINE_DOWN		(0x4001000EL)
-#define RNDIS_STATUS_INTERFACE_UP		(0x4001000FL)
-#define RNDIS_STATUS_INTERFACE_DOWN		(0x40010010L)
-#define RNDIS_STATUS_MEDIA_BUSY			(0x40010011L)
-#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION	(0x40010012L)
-#define RNDIS_STATUS_WW_INDICATION		RDIA_SPECIFIC_INDICATION
-#define RNDIS_STATUS_LINK_SPEED_CHANGE		(0x40010013L)
-
-#define RNDIS_STATUS_NOT_RESETTABLE		(0x80010001L)
-#define RNDIS_STATUS_SOFT_ERRORS		(0x80010003L)
-#define RNDIS_STATUS_HARD_ERRORS		(0x80010004L)
-#define RNDIS_STATUS_BUFFER_OVERFLOW		(STATUS_BUFFER_OVERFLOW)
-
-#define RNDIS_STATUS_FAILURE			(STATUS_UNSUCCESSFUL)
-#define RNDIS_STATUS_RESOURCES			(STATUS_INSUFFICIENT_RESOURCES)
-#define RNDIS_STATUS_CLOSING			(0xC0010002L)
-#define RNDIS_STATUS_BAD_VERSION		(0xC0010004L)
-#define RNDIS_STATUS_BAD_CHARACTERISTICS	(0xC0010005L)
-#define RNDIS_STATUS_ADAPTER_NOT_FOUND		(0xC0010006L)
-#define RNDIS_STATUS_OPEN_FAILED		(0xC0010007L)
-#define RNDIS_STATUS_DEVICE_FAILED		(0xC0010008L)
-#define RNDIS_STATUS_MULTICAST_FULL		(0xC0010009L)
-#define RNDIS_STATUS_MULTICAST_EXISTS		(0xC001000AL)
-#define RNDIS_STATUS_MULTICAST_NOT_FOUND	(0xC001000BL)
-#define RNDIS_STATUS_REQUEST_ABORTED		(0xC001000CL)
-#define RNDIS_STATUS_RESET_IN_PROGRESS		(0xC001000DL)
-#define RNDIS_STATUS_CLOSING_INDICATING		(0xC001000EL)
-#define RNDIS_STATUS_NOT_SUPPORTED		(STATUS_NOT_SUPPORTED)
-#define RNDIS_STATUS_INVALID_PACKET		(0xC001000FL)
-#define RNDIS_STATUS_OPEN_LIST_FULL		(0xC0010010L)
-#define RNDIS_STATUS_ADAPTER_NOT_READY		(0xC0010011L)
-#define RNDIS_STATUS_ADAPTER_NOT_OPEN		(0xC0010012L)
-#define RNDIS_STATUS_NOT_INDICATING		(0xC0010013L)
-#define RNDIS_STATUS_INVALID_LENGTH		(0xC0010014L)
-#define RNDIS_STATUS_INVALID_DATA		(0xC0010015L)
-#define RNDIS_STATUS_BUFFER_TOO_SHORT		(0xC0010016L)
-#define RNDIS_STATUS_INVALID_OID		(0xC0010017L)
-#define RNDIS_STATUS_ADAPTER_REMOVED		(0xC0010018L)
-#define RNDIS_STATUS_UNSUPPORTED_MEDIA		(0xC0010019L)
-#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE	(0xC001001AL)
-#define RNDIS_STATUS_FILE_NOT_FOUND		(0xC001001BL)
-#define RNDIS_STATUS_ERROR_READING_FILE		(0xC001001CL)
-#define RNDIS_STATUS_ALREADY_MAPPED		(0xC001001DL)
-#define RNDIS_STATUS_RESOURCE_CONFLICT		(0xC001001EL)
-#define RNDIS_STATUS_NO_CABLE			(0xC001001FL)
-
-#define RNDIS_STATUS_INVALID_SAP		(0xC0010020L)
-#define RNDIS_STATUS_SAP_IN_USE			(0xC0010021L)
-#define RNDIS_STATUS_INVALID_ADDRESS		(0xC0010022L)
-#define RNDIS_STATUS_VC_NOT_ACTIVATED		(0xC0010023L)
-#define RNDIS_STATUS_DEST_OUT_OF_ORDER		(0xC0010024L)
-#define RNDIS_STATUS_VC_NOT_AVAILABLE		(0xC0010025L)
-#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE	(0xC0010026L)
-#define RNDIS_STATUS_INCOMPATABLE_QOS		(0xC0010027L)
-#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED	(0xC0010028L)
-#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION	(0xC0010029L)
-
-#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR	(0xC0011000L)
-
-/* Object Identifiers used by NdisRequest Query/Set Information */
-/* General Objects */
-#define RNDIS_OID_GEN_SUPPORTED_LIST		0x00010101
-#define RNDIS_OID_GEN_HARDWARE_STATUS		0x00010102
-#define RNDIS_OID_GEN_MEDIA_SUPPORTED		0x00010103
-#define RNDIS_OID_GEN_MEDIA_IN_USE		0x00010104
-#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD		0x00010105
-#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE	0x00010106
-#define RNDIS_OID_GEN_LINK_SPEED		0x00010107
-#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE	0x00010108
-#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE	0x00010109
-#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE	0x0001010A
-#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE	0x0001010B
-#define RNDIS_OID_GEN_VENDOR_ID			0x0001010C
-#define RNDIS_OID_GEN_VENDOR_DESCRIPTION	0x0001010D
-#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER	0x0001010E
-#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD		0x0001010F
-#define RNDIS_OID_GEN_DRIVER_VERSION		0x00010110
-#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE	0x00010111
-#define RNDIS_OID_GEN_PROTOCOL_OPTIONS		0x00010112
-#define RNDIS_OID_GEN_MAC_OPTIONS		0x00010113
-#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS	0x00010114
-#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS	0x00010115
-#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION	0x00010116
-#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES	0x00010118
-#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET	0x00010119
-#define RNDIS_OID_GEN_MACHINE_NAME		0x0001021A
-#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER	0x0001021B
-
-#define RNDIS_OID_GEN_XMIT_OK			0x00020101
-#define RNDIS_OID_GEN_RCV_OK			0x00020102
-#define RNDIS_OID_GEN_XMIT_ERROR		0x00020103
-#define RNDIS_OID_GEN_RCV_ERROR			0x00020104
-#define RNDIS_OID_GEN_RCV_NO_BUFFER		0x00020105
-
-#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT	0x00020201
-#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT	0x00020202
-#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT	0x00020203
-#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT	0x00020204
-#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT	0x00020205
-#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT	0x00020206
-#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV	0x00020207
-#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV	0x00020208
-#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV	0x00020209
-#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV	0x0002020A
-#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV	0x0002020B
-#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV	0x0002020C
-
-#define RNDIS_OID_GEN_RCV_CRC_ERROR		0x0002020D
-#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH	0x0002020E
-
-#define RNDIS_OID_GEN_GET_TIME_CAPS		0x0002020F
-#define RNDIS_OID_GEN_GET_NETCARD_TIME		0x00020210
-
-/* These are connection-oriented general OIDs. */
-/* These replace the above OIDs for connection-oriented media. */
-#define RNDIS_OID_GEN_CO_SUPPORTED_LIST		0x00010101
-#define RNDIS_OID_GEN_CO_HARDWARE_STATUS	0x00010102
-#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED	0x00010103
-#define RNDIS_OID_GEN_CO_MEDIA_IN_USE		0x00010104
-#define RNDIS_OID_GEN_CO_LINK_SPEED		0x00010105
-#define RNDIS_OID_GEN_CO_VENDOR_ID		0x00010106
-#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION	0x00010107
-#define RNDIS_OID_GEN_CO_DRIVER_VERSION		0x00010108
-#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS	0x00010109
-#define RNDIS_OID_GEN_CO_MAC_OPTIONS		0x0001010A
-#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS	0x0001010B
-#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION	0x0001010C
-#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED	0x0001010D
-
-#define RNDIS_OID_GEN_CO_GET_TIME_CAPS		0x00010201
-#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME	0x00010202
-
-/* These are connection-oriented statistics OIDs. */
-#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK		0x00020101
-#define RNDIS_OID_GEN_CO_RCV_PDUS_OK		0x00020102
-#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR	0x00020103
-#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR		0x00020104
-#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER	0x00020105
-
-
-#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR		0x00020201
-#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH	0x00020202
-#define RNDIS_OID_GEN_CO_BYTES_XMIT		0x00020203
-#define RNDIS_OID_GEN_CO_BYTES_RCV		0x00020204
-#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING	0x00020205
-#define RNDIS_OID_GEN_CO_NETCARD_LOAD		0x00020206
-
-/* These are objects for Connection-oriented media call-managers. */
-#define RNDIS_OID_CO_ADD_PVC			0xFF000001
-#define RNDIS_OID_CO_DELETE_PVC			0xFF000002
-#define RNDIS_OID_CO_GET_CALL_INFORMATION	0xFF000003
-#define RNDIS_OID_CO_ADD_ADDRESS		0xFF000004
-#define RNDIS_OID_CO_DELETE_ADDRESS		0xFF000005
-#define RNDIS_OID_CO_GET_ADDRESSES		0xFF000006
-#define RNDIS_OID_CO_ADDRESS_CHANGE		0xFF000007
-#define RNDIS_OID_CO_SIGNALING_ENABLED		0xFF000008
-#define RNDIS_OID_CO_SIGNALING_DISABLED		0xFF000009
-
-/* 802.3 Objects (Ethernet) */
-#define RNDIS_OID_802_3_PERMANENT_ADDRESS	0x01010101
-#define RNDIS_OID_802_3_CURRENT_ADDRESS		0x01010102
-#define RNDIS_OID_802_3_MULTICAST_LIST		0x01010103
-#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE	0x01010104
-#define RNDIS_OID_802_3_MAC_OPTIONS		0x01010105
-
-#define NDIS_802_3_MAC_OPTION_PRIORITY		0x00000001
-
-#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT	0x01020101
-#define RNDIS_OID_802_3_XMIT_ONE_COLLISION	0x01020102
-#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS	0x01020103
-
-#define RNDIS_OID_802_3_XMIT_DEFERRED		0x01020201
-#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS	0x01020202
-#define RNDIS_OID_802_3_RCV_OVERRUN		0x01020203
-#define RNDIS_OID_802_3_XMIT_UNDERRUN		0x01020204
-#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE	0x01020205
-#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST	0x01020206
-#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS	0x01020207
-
-/* Remote NDIS message types */
-#define REMOTE_NDIS_PACKET_MSG			0x00000001
-#define REMOTE_NDIS_INITIALIZE_MSG		0x00000002
-#define REMOTE_NDIS_HALT_MSG			0x00000003
-#define REMOTE_NDIS_QUERY_MSG			0x00000004
-#define REMOTE_NDIS_SET_MSG			0x00000005
-#define REMOTE_NDIS_RESET_MSG			0x00000006
-#define REMOTE_NDIS_INDICATE_STATUS_MSG		0x00000007
-#define REMOTE_NDIS_KEEPALIVE_MSG		0x00000008
-
-#define REMOTE_CONDIS_MP_CREATE_VC_MSG		0x00008001
-#define REMOTE_CONDIS_MP_DELETE_VC_MSG		0x00008002
-#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG	0x00008005
-#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG	0x00008006
-#define REMOTE_CONDIS_INDICATE_STATUS_MSG	0x00008007
-
-/* Remote NDIS message completion types */
-#define REMOTE_NDIS_INITIALIZE_CMPLT		0x80000002
-#define REMOTE_NDIS_QUERY_CMPLT			0x80000004
-#define REMOTE_NDIS_SET_CMPLT			0x80000005
-#define REMOTE_NDIS_RESET_CMPLT			0x80000006
-#define REMOTE_NDIS_KEEPALIVE_CMPLT		0x80000008
-
-#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT	0x80008001
-#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT	0x80008002
-#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT	0x80008005
-#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT	0x80008006
-
-/*
- * Reserved message type for private communication between lower-layer host
- * driver and remote device, if necessary.
- */
-#define REMOTE_NDIS_BUS_MSG			0xff000001
-
-/*  Defines for DeviceFlags in struct rndis_initialize_complete */
-#define RNDIS_DF_CONNECTIONLESS			0x00000001
-#define RNDIS_DF_CONNECTION_ORIENTED		0x00000002
-#define RNDIS_DF_RAW_DATA			0x00000004
-
-/*  Remote NDIS medium types. */
-#define RNDIS_MEDIUM_802_3			0x00000000
-#define RNDIS_MEDIUM_802_5			0x00000001
-#define RNDIS_MEDIUM_FDDI				0x00000002
-#define RNDIS_MEDIUM_WAN				0x00000003
-#define RNDIS_MEDIUM_LOCAL_TALK			0x00000004
-#define RNDIS_MEDIUM_ARCNET_RAW			0x00000006
-#define RNDIS_MEDIUM_ARCNET_878_2			0x00000007
-#define RNDIS_MEDIUM_ATM				0x00000008
-#define RNDIS_MEDIUM_WIRELESS_WAN			0x00000009
-#define RNDIS_MEDIUM_IRDA				0x0000000a
-#define RNDIS_MEDIUM_CO_WAN			0x0000000b
-/* Not a real medium, defined as an upper-bound */
-#define RNDIS_MEDIUM_MAX				0x0000000d
-
-
-/* Remote NDIS medium connection states. */
-#define RNDIS_MEDIA_STATE_CONNECTED		0x00000000
-#define RNDIS_MEDIA_STATE_DISCONNECTED		0x00000001
-
-/*  Remote NDIS version numbers */
-#define RNDIS_MAJOR_VERSION			0x00000001
-#define RNDIS_MINOR_VERSION			0x00000000
-
-
 /* NdisInitialize message */
 struct rndis_initialize_request {
 	u32 req_id;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d025c83..8b91947 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -428,6 +428,24 @@
 	return 0;
 }
 
+
+#define RING_AVAIL_PERCENT_HIWATER 20
+#define RING_AVAIL_PERCENT_LOWATER 10
+
+/*
+ * Get the percentage of available bytes to write in the ring.
+ * The return value is in range from 0 to 100.
+ */
+static inline u32 hv_ringbuf_avail_percent(
+		struct hv_ring_buffer_info *ring_info)
+{
+	u32 avail_read, avail_write;
+
+	hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
+
+	return avail_write * 100 / ring_info->ring_datasize;
+}
+
 static void netvsc_send_completion(struct hv_device *device,
 				   struct vmpacket_descriptor *packet)
 {
@@ -455,6 +473,8 @@
 		complete(&net_device->channel_init_wait);
 	} else if (nvsp_packet->hdr.msg_type ==
 		   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
+		int num_outstanding_sends;
+
 		/* Get the send context */
 		nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
 			packet->trans_id;
@@ -463,10 +483,14 @@
 		nvsc_packet->completion.send.send_completion(
 			nvsc_packet->completion.send.send_completion_ctx);
 
-		atomic_dec(&net_device->num_outstanding_sends);
+		num_outstanding_sends =
+			atomic_dec_return(&net_device->num_outstanding_sends);
 
-		if (netif_queue_stopped(ndev) && !net_device->start_remove)
-			netif_wake_queue(ndev);
+		if (netif_queue_stopped(ndev) && !net_device->start_remove &&
+			(hv_ringbuf_avail_percent(&device->channel->outbound)
+			> RING_AVAIL_PERCENT_HIWATER ||
+			num_outstanding_sends < 1))
+				netif_wake_queue(ndev);
 	} else {
 		netdev_err(ndev, "Unknown send completion packet type- "
 			   "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -519,10 +543,19 @@
 
 	if (ret == 0) {
 		atomic_inc(&net_device->num_outstanding_sends);
+		if (hv_ringbuf_avail_percent(&device->channel->outbound) <
+			RING_AVAIL_PERCENT_LOWATER) {
+			netif_stop_queue(ndev);
+			if (atomic_read(&net_device->
+				num_outstanding_sends) < 1)
+				netif_wake_queue(ndev);
+		}
 	} else if (ret == -EAGAIN) {
 		netif_stop_queue(ndev);
-		if (atomic_read(&net_device->num_outstanding_sends) < 1)
+		if (atomic_read(&net_device->num_outstanding_sends) < 1) {
 			netif_wake_queue(ndev);
+			ret = -ENOSPC;
+		}
 	} else {
 		netdev_err(ndev, "Unable to send packet %p ret %d\n",
 			   packet, ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2d59138..8f8ed33 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -211,9 +211,13 @@
 		net->stats.tx_packets++;
 	} else {
 		kfree(packet);
+		if (ret != -EAGAIN) {
+			dev_kfree_skb_any(skb);
+			net->stats.tx_dropped++;
+		}
 	}
 
-	return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+	return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
 }
 
 /*
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index d6be64b..981ebb1 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -129,8 +129,8 @@
 	netdev = net_device->ndev;
 
 	switch (rndis_msg->ndis_msg_type) {
-	case REMOTE_NDIS_PACKET_MSG:
-		netdev_dbg(netdev, "REMOTE_NDIS_PACKET_MSG (len %u, "
+	case RNDIS_MSG_PACKET:
+		netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
 			   "data offset %u data len %u, # oob %u, "
 			   "oob offset %u, oob len %u, pkt offset %u, "
 			   "pkt len %u\n",
@@ -144,8 +144,8 @@
 			   rndis_msg->msg.pkt.per_pkt_info_len);
 		break;
 
-	case REMOTE_NDIS_INITIALIZE_CMPLT:
-		netdev_dbg(netdev, "REMOTE_NDIS_INITIALIZE_CMPLT "
+	case RNDIS_MSG_INIT_C:
+		netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
 			"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
 			"device flags %d, max xfer size 0x%x, max pkts %u, "
 			"pkt aligned %u)\n",
@@ -162,8 +162,8 @@
 			   pkt_alignment_factor);
 		break;
 
-	case REMOTE_NDIS_QUERY_CMPLT:
-		netdev_dbg(netdev, "REMOTE_NDIS_QUERY_CMPLT "
+	case RNDIS_MSG_QUERY_C:
+		netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
 			"(len %u, id 0x%x, status 0x%x, buf len %u, "
 			"buf offset %u)\n",
 			rndis_msg->msg_len,
@@ -175,16 +175,16 @@
 			   info_buf_offset);
 		break;
 
-	case REMOTE_NDIS_SET_CMPLT:
+	case RNDIS_MSG_SET_C:
 		netdev_dbg(netdev,
-			"REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)\n",
+			"RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
 			rndis_msg->msg_len,
 			rndis_msg->msg.set_complete.req_id,
 			rndis_msg->msg.set_complete.status);
 		break;
 
-	case REMOTE_NDIS_INDICATE_STATUS_MSG:
-		netdev_dbg(netdev, "REMOTE_NDIS_INDICATE_STATUS_MSG "
+	case RNDIS_MSG_INDICATE:
+		netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
 			"(len %u, status 0x%x, buf len %u, buf offset %u)\n",
 			rndis_msg->msg_len,
 			rndis_msg->msg.indicate_status.status,
@@ -264,14 +264,14 @@
 				sizeof(struct rndis_filter_packet));
 
 			if (resp->ndis_msg_type ==
-			    REMOTE_NDIS_RESET_CMPLT) {
+			    RNDIS_MSG_RESET_C) {
 				/* does not have a request id field */
 				request->response_msg.msg.reset_complete.
-					status = STATUS_BUFFER_OVERFLOW;
+					status = RNDIS_STATUS_BUFFER_OVERFLOW;
 			} else {
 				request->response_msg.msg.
 				init_complete.status =
-					STATUS_BUFFER_OVERFLOW;
+					RNDIS_STATUS_BUFFER_OVERFLOW;
 			}
 		}
 
@@ -415,19 +415,19 @@
 	dump_rndis_message(dev, rndis_msg);
 
 	switch (rndis_msg->ndis_msg_type) {
-	case REMOTE_NDIS_PACKET_MSG:
+	case RNDIS_MSG_PACKET:
 		/* data msg */
 		rndis_filter_receive_data(rndis_dev, rndis_msg, pkt);
 		break;
 
-	case REMOTE_NDIS_INITIALIZE_CMPLT:
-	case REMOTE_NDIS_QUERY_CMPLT:
-	case REMOTE_NDIS_SET_CMPLT:
+	case RNDIS_MSG_INIT_C:
+	case RNDIS_MSG_QUERY_C:
+	case RNDIS_MSG_SET_C:
 		/* completion msgs */
 		rndis_filter_receive_response(rndis_dev, rndis_msg);
 		break;
 
-	case REMOTE_NDIS_INDICATE_STATUS_MSG:
+	case RNDIS_MSG_INDICATE:
 		/* notification msgs */
 		rndis_filter_receive_indicate_status(rndis_dev, rndis_msg);
 		break;
@@ -456,7 +456,7 @@
 		return -EINVAL;
 
 	*result_size = 0;
-	request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG,
+	request = get_rndis_request(dev, RNDIS_MSG_QUERY,
 			RNDIS_MESSAGE_SIZE(struct rndis_query_request));
 	if (!request) {
 		ret = -ENOMEM;
@@ -536,7 +536,7 @@
 
 	ndev = dev->net_dev->ndev;
 
-	request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG,
+	request = get_rndis_request(dev, RNDIS_MSG_SET,
 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
 			sizeof(u32));
 	if (!request) {
@@ -588,7 +588,7 @@
 	u32 status;
 	int ret, t;
 
-	request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG,
+	request = get_rndis_request(dev, RNDIS_MSG_INIT,
 			RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
 	if (!request) {
 		ret = -ENOMEM;
@@ -641,7 +641,7 @@
 	struct rndis_halt_request *halt;
 
 	/* Attempt to do a rndis device halt */
-	request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG,
+	request = get_rndis_request(dev, RNDIS_MSG_HALT,
 				RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
 	if (!request)
 		goto cleanup;
@@ -805,7 +805,7 @@
 	if (isvlan)
 		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
 
-	rndis_msg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
+	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
 	rndis_msg->msg_len = pkt->total_data_buflen +
 				      rndis_msg_size;
 
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 4680478..3575844 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -321,8 +321,8 @@
 	  Say M to build a module; it will be called au1k_ir.ko
 
 config SMC_IRCC_FIR
-	tristate "SMSC IrCC (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && IRDA && ISA_DMA_API
+	tristate "SMSC IrCC"
+	depends on IRDA && ISA_DMA_API
 	help
 	  Say Y here if you want to build support for the SMC Infrared
 	  Communications Controller.  It is used in a wide variety of
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 4351296..510b9c8 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1710,7 +1710,7 @@
 
 /* Flush all packets */
   while ((i--) && (self->txpending))
-    udelay (10000);
+    msleep(10);
 
   spin_lock_irqsave(&self->spinlock, flags);
 
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 725d6b3..eb315b8 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -737,7 +737,7 @@
 	netif_stop_queue(ndev);
 	pm_runtime_put_sync(&self->pdev->dev);
 
-	dev_info(&ndev->dev, "stoped\n");
+	dev_info(&ndev->dev, "stopped\n");
 
 	return 0;
 }
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index e6661b5..256eddf 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -685,7 +685,7 @@
 
 	netif_stop_queue(ndev);
 
-	dev_info(&ndev->dev, "stoped\n");
+	dev_info(&ndev->dev, "stopped\n");
 
 	return 0;
 }
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 6c95d40..a926813 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -1,7 +1,6 @@
 /*********************************************************************
  *
  * Description:   Driver for the SMC Infrared Communications Controller
- * Status:        Experimental.
  * Author:        Daniele Peri (peri@csai.unipa.it)
  * Created at:
  * Modified at:
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f975afd..66a9bfe 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -57,7 +57,7 @@
 	struct hlist_node *n;
 
 	hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
-		if (!compare_ether_addr_64bits(vlan->dev->dev_addr, addr))
+		if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
 			return vlan;
 	}
 	return NULL;
@@ -96,7 +96,7 @@
 	 * currently in use by the underlying device or
 	 * another macvlan.
 	 */
-	if (!compare_ether_addr_64bits(port->dev->dev_addr, addr))
+	if (ether_addr_equal_64bits(port->dev->dev_addr, addr))
 		return 1;
 
 	if (macvlan_hash_lookup(port, addr))
@@ -118,8 +118,7 @@
 		return vlan->forward(dev, skb);
 
 	skb->dev = dev;
-	if (!compare_ether_addr_64bits(eth->h_dest,
-				       dev->broadcast))
+	if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
 		skb->pkt_type = PACKET_BROADCAST;
 	else
 		skb->pkt_type = PACKET_MULTICAST;
@@ -259,7 +258,7 @@
 
 xmit_world:
 	skb->ip_summed = ip_summed;
-	skb_set_dev(skb, vlan->lowerdev);
+	skb->dev = vlan->lowerdev;
 	return dev_queue_xmit(skb);
 }
 
@@ -312,7 +311,8 @@
 	int err;
 
 	if (vlan->port->passthru) {
-		dev_set_promiscuity(lowerdev, 1);
+		if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
+			dev_set_promiscuity(lowerdev, 1);
 		goto hash_add;
 	}
 
@@ -344,12 +344,15 @@
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
 
+	dev_uc_unsync(lowerdev, dev);
+	dev_mc_unsync(lowerdev, dev);
+
 	if (vlan->port->passthru) {
-		dev_set_promiscuity(lowerdev, -1);
+		if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
+			dev_set_promiscuity(lowerdev, -1);
 		goto hash_del;
 	}
 
-	dev_mc_unsync(lowerdev, dev);
 	if (dev->flags & IFF_ALLMULTI)
 		dev_set_allmulti(lowerdev, -1);
 
@@ -399,10 +402,11 @@
 		dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
 }
 
-static void macvlan_set_multicast_list(struct net_device *dev)
+static void macvlan_set_mac_lists(struct net_device *dev)
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 
+	dev_uc_sync(vlan->lowerdev, dev);
 	dev_mc_sync(vlan->lowerdev, dev);
 }
 
@@ -542,6 +546,43 @@
 	return 0;
 }
 
+static int macvlan_fdb_add(struct ndmsg *ndm,
+			   struct net_device *dev,
+			   unsigned char *addr,
+			   u16 flags)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+	int err = -EINVAL;
+
+	if (!vlan->port->passthru)
+		return -EOPNOTSUPP;
+
+	if (is_unicast_ether_addr(addr))
+		err = dev_uc_add_excl(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_add_excl(dev, addr);
+
+	return err;
+}
+
+static int macvlan_fdb_del(struct ndmsg *ndm,
+			   struct net_device *dev,
+			   unsigned char *addr)
+{
+	struct macvlan_dev *vlan = netdev_priv(dev);
+	int err = -EINVAL;
+
+	if (!vlan->port->passthru)
+		return -EOPNOTSUPP;
+
+	if (is_unicast_ether_addr(addr))
+		err = dev_uc_del(dev, addr);
+	else if (is_multicast_ether_addr(addr))
+		err = dev_mc_del(dev, addr);
+
+	return err;
+}
+
 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *drvinfo)
 {
@@ -572,11 +613,14 @@
 	.ndo_change_mtu		= macvlan_change_mtu,
 	.ndo_change_rx_flags	= macvlan_change_rx_flags,
 	.ndo_set_mac_address	= macvlan_set_mac_address,
-	.ndo_set_rx_mode	= macvlan_set_multicast_list,
+	.ndo_set_rx_mode	= macvlan_set_mac_lists,
 	.ndo_get_stats64	= macvlan_dev_get_stats64,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_vlan_rx_add_vid	= macvlan_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= macvlan_vlan_rx_kill_vid,
+	.ndo_fdb_add		= macvlan_fdb_add,
+	.ndo_fdb_del		= macvlan_fdb_del,
+	.ndo_fdb_dump		= ndo_dflt_fdb_dump,
 };
 
 void macvlan_common_setup(struct net_device *dev)
@@ -711,6 +755,9 @@
 	if (data && data[IFLA_MACVLAN_MODE])
 		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
 
+	if (data && data[IFLA_MACVLAN_FLAGS])
+		vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
+
 	if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
 		if (port->count)
 			return -EINVAL;
@@ -760,6 +807,16 @@
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	if (data && data[IFLA_MACVLAN_MODE])
 		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+	if (data && data[IFLA_MACVLAN_FLAGS]) {
+		__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
+		bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
+
+		if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
+			dev_set_promiscuity(vlan->lowerdev, -1);
+		else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
+			dev_set_promiscuity(vlan->lowerdev, 1);
+		vlan->flags = flags;
+	}
 	return 0;
 }
 
@@ -773,7 +830,10 @@
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 
-	NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode);
+	if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
+		goto nla_put_failure;
+	if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -781,7 +841,8 @@
 }
 
 static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
-	[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
+	[IFLA_MACVLAN_MODE]  = { .type = NLA_U32 },
+	[IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
 };
 
 int macvlan_link_register(struct rtnl_link_ops *ops)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0427c65..2ee56de 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1,5 +1,6 @@
 #include <linux/etherdevice.h>
 #include <linux/if_macvlan.h>
+#include <linux/if_vlan.h>
 #include <linux/interrupt.h>
 #include <linux/nsproxy.h>
 #include <linux/compat.h>
@@ -505,10 +506,11 @@
 		if (copy > size) {
 			++from;
 			--count;
-		}
+			offset = 0;
+		} else
+			offset += size;
 		copy -= size;
 		offset1 += size;
-		offset = 0;
 	}
 
 	if (len == offset1)
@@ -518,24 +520,29 @@
 		struct page *page[MAX_SKB_FRAGS];
 		int num_pages;
 		unsigned long base;
+		unsigned long truesize;
 
-		len = from->iov_len - offset1;
+		len = from->iov_len - offset;
 		if (!len) {
-			offset1 = 0;
+			offset = 0;
 			++from;
 			continue;
 		}
-		base = (unsigned long)from->iov_base + offset1;
+		base = (unsigned long)from->iov_base + offset;
 		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+		if (i + size > MAX_SKB_FRAGS)
+			return -EMSGSIZE;
 		num_pages = get_user_pages_fast(base, size, 0, &page[i]);
-		if ((num_pages != size) ||
-		    (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
-			/* put_page is in skb free */
+		if (num_pages != size) {
+			for (i = 0; i < num_pages; i++)
+				put_page(page[i]);
 			return -EFAULT;
+		}
+		truesize = size * PAGE_SIZE;
 		skb->data_len += len;
 		skb->len += len;
-		skb->truesize += len;
-		atomic_add(len, &skb->sk->sk_wmem_alloc);
+		skb->truesize += truesize;
+		atomic_add(truesize, &skb->sk->sk_wmem_alloc);
 		while (len) {
 			int off = base & ~PAGE_MASK;
 			int size = min_t(int, len, PAGE_SIZE - off);
@@ -546,7 +553,7 @@
 			len -= size;
 			i++;
 		}
-		offset1 = 0;
+		offset = 0;
 		++from;
 	}
 	return 0;
@@ -646,7 +653,7 @@
 	int err;
 	struct virtio_net_hdr vnet_hdr = { 0 };
 	int vnet_hdr_len = 0;
-	int copylen;
+	int copylen = 0;
 	bool zerocopy = false;
 
 	if (q->flags & IFF_VNET_HDR) {
@@ -675,15 +682,31 @@
 	if (unlikely(len < ETH_HLEN))
 		goto err;
 
+	err = -EMSGSIZE;
+	if (unlikely(count > UIO_MAXIOV))
+		goto err;
+
 	if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
 		zerocopy = true;
 
 	if (zerocopy) {
+		/* Userspace may produce vectors with count greater than
+		 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
+		 * to let the rest of data to be fit in the frags.
+		 */
+		if (count > MAX_SKB_FRAGS) {
+			copylen = iov_length(iv, count - MAX_SKB_FRAGS);
+			if (copylen < vnet_hdr_len)
+				copylen = 0;
+			else
+				copylen -= vnet_hdr_len;
+		}
 		/* There are 256 bytes to be copied in skb, so there is enough
 		 * room for skb expand head in case it is used.
 		 * The rest buffer is mapped from userspace.
 		 */
-		copylen = vnet_hdr.hdr_len;
+		if (copylen < vnet_hdr.hdr_len)
+			copylen = vnet_hdr.hdr_len;
 		if (!copylen)
 			copylen = GOODCOPY_LEN;
 	} else
@@ -694,10 +717,9 @@
 	if (!skb)
 		goto err;
 
-	if (zerocopy) {
+	if (zerocopy)
 		err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
-		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
-	} else
+	else
 		err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
 						   len);
 	if (err)
@@ -716,8 +738,10 @@
 	rcu_read_lock_bh();
 	vlan = rcu_dereference_bh(q->vlan);
 	/* copy skb_ubuf_info for callback when skb has no error */
-	if (zerocopy)
+	if (zerocopy) {
 		skb_shinfo(skb)->destructor_arg = m->msg_control;
+		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+	}
 	if (vlan)
 		macvlan_start_xmit(skb, vlan->dev);
 	else
@@ -759,6 +783,8 @@
 	struct macvlan_dev *vlan;
 	int ret;
 	int vnet_hdr_len = 0;
+	int vlan_offset = 0;
+	int copied;
 
 	if (q->flags & IFF_VNET_HDR) {
 		struct virtio_net_hdr vnet_hdr;
@@ -773,18 +799,48 @@
 		if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
 			return -EFAULT;
 	}
+	copied = vnet_hdr_len;
 
-	len = min_t(int, skb->len, len);
+	if (!vlan_tx_tag_present(skb))
+		len = min_t(int, skb->len, len);
+	else {
+		int copy;
+		struct {
+			__be16 h_vlan_proto;
+			__be16 h_vlan_TCI;
+		} veth;
+		veth.h_vlan_proto = htons(ETH_P_8021Q);
+		veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
 
-	ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
+		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+		len = min_t(int, skb->len + VLAN_HLEN, len);
 
+		copy = min_t(int, vlan_offset, len);
+		ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
+		len -= copy;
+		copied += copy;
+		if (ret || !len)
+			goto done;
+
+		copy = min_t(int, sizeof(veth), len);
+		ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
+		len -= copy;
+		copied += copy;
+		if (ret || !len)
+			goto done;
+	}
+
+	ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+	copied += len;
+
+done:
 	rcu_read_lock_bh();
 	vlan = rcu_dereference_bh(q->vlan);
 	if (vlan)
-		macvlan_count_rx(vlan, len, ret == 0, 0);
+		macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
 	rcu_read_unlock_bh();
 
-	return ret ? ret : (len + vnet_hdr_len);
+	return ret ? ret : copied;
 }
 
 static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 0e01f4e..944cdfb 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -135,6 +135,25 @@
 
 	  If in doubt, say Y.
 
+config MDIO_BUS_MUX
+	tristate
+	depends on OF_MDIO
+	help
+	  This module provides a driver framework for MDIO bus
+	  multiplexers which connect one of several child MDIO busses
+	  to a parent bus.  Switching between child busses is done by
+	  device specific drivers.
+
+config MDIO_BUS_MUX_GPIO
+	tristate "Support for GPIO controlled MDIO bus multiplexers"
+	depends on OF_GPIO && OF_MDIO
+	select MDIO_BUS_MUX
+	help
+	  This module provides a driver for MDIO bus multiplexers that
+	  are controlled via GPIO lines.  The multiplexer connects one of
+	  several child MDIO busses to a parent bus.  Child bus
+	  selection is under the control of GPIO lines.
+
 endif # PHYLIB
 
 config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b7438b1..f51af68 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -25,3 +25,5 @@
 obj-$(CONFIG_MDIO_OCTEON)	+= mdio-octeon.o
 obj-$(CONFIG_MICREL_KS8995MA)	+= spi_ks8995.o
 obj-$(CONFIG_AMD_PHY)		+= amd.o
+obj-$(CONFIG_MDIO_BUS_MUX)	+= mdio-mux.o
+obj-$(CONFIG_MDIO_BUS_MUX_GPIO)	+= mdio-mux-gpio.o
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e16f98c..cd802eb 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -39,10 +39,7 @@
 		MII_BCM63XX_IR_SPEED |
 		MII_BCM63XX_IR_LINK) |
 		MII_BCM63XX_IR_EN;
-	err = phy_write(phydev, MII_BCM63XX_IR, reg);
-	if (err < 0)
-		return err;
-	return 0;
+	return phy_write(phydev, MII_BCM63XX_IR, reg);
 }
 
 static int bcm63xx_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 2f774ac..5f59cc0 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -134,12 +134,7 @@
 		return err;
 
 	/* Reconnect the PHY, and enable Autonegotiation */
-	err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
-
-	if (err < 0)
-		return err;
-
-	return 0;
+	return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
 }
 
 static int dm9161_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index dd7ae19..940b290 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1215,6 +1215,36 @@
 	}
 }
 
+static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
+{
+	struct dp83640_private *dp83640 = dev->priv;
+
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE |
+		SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RAW_HARDWARE;
+	info->phc_index = ptp_clock_index(dp83640->clock->ptp_clock);
+	info->tx_types =
+		(1 << HWTSTAMP_TX_OFF) |
+		(1 << HWTSTAMP_TX_ON) |
+		(1 << HWTSTAMP_TX_ONESTEP_SYNC);
+	info->rx_filters =
+		(1 << HWTSTAMP_FILTER_NONE) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+		(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+	return 0;
+}
+
 static struct phy_driver dp83640_driver = {
 	.phy_id		= DP83640_PHY_ID,
 	.phy_id_mask	= 0xfffffff0,
@@ -1225,6 +1255,7 @@
 	.remove		= dp83640_remove,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
+	.ts_info	= dp83640_ts_info,
 	.hwtstamp	= dp83640_hwtstamp,
 	.rxtstamp	= dp83640_rxtstamp,
 	.txtstamp	= dp83640_txtstamp,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e8b9c53..418928d 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -455,11 +455,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-	if (err < 0)
-		return err;
-
-	return 0;
+	return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -515,11 +511,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-	if (err < 0)
-		return err;
-
-	return 0;
+	return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1149_config_init(struct phy_device *phydev)
@@ -545,11 +537,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-	if (err < 0)
-		return err;
-
-	return 0;
+	return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
 static int m88e1145_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
new file mode 100644
index 0000000..e0cc4ef
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -0,0 +1,142 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <linux/mdio-mux.h>
+#include <linux/of_gpio.h>
+
+#define DRV_VERSION "1.0"
+#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
+
+#define MDIO_MUX_GPIO_MAX_BITS 8
+
+struct mdio_mux_gpio_state {
+	int gpio[MDIO_MUX_GPIO_MAX_BITS];
+	unsigned int num_gpios;
+	void *mux_handle;
+};
+
+static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
+				   void *data)
+{
+	int change;
+	unsigned int n;
+	struct mdio_mux_gpio_state *s = data;
+
+	if (current_child == desired_child)
+		return 0;
+
+	change = current_child == -1 ? -1 : current_child ^ desired_child;
+
+	for (n = 0; n < s->num_gpios; n++) {
+		if (change & 1)
+			gpio_set_value_cansleep(s->gpio[n],
+						(desired_child & 1) != 0);
+		change >>= 1;
+		desired_child >>= 1;
+	}
+
+	return 0;
+}
+
+static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev)
+{
+	enum of_gpio_flags f;
+	struct mdio_mux_gpio_state *s;
+	unsigned int num_gpios;
+	unsigned int n;
+	int r;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	num_gpios = of_gpio_count(pdev->dev.of_node);
+	if (num_gpios == 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
+		return -ENODEV;
+
+	s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	s->num_gpios = num_gpios;
+
+	for (n = 0; n < num_gpios; ) {
+		int gpio = of_get_gpio_flags(pdev->dev.of_node, n, &f);
+		if (gpio < 0) {
+			r = (gpio == -ENODEV) ? -EPROBE_DEFER : gpio;
+			goto err;
+		}
+		s->gpio[n] = gpio;
+
+		n++;
+
+		r = gpio_request(gpio, "mdio_mux_gpio");
+		if (r)
+			goto err;
+
+		r = gpio_direction_output(gpio, 0);
+		if (r)
+			goto err;
+	}
+
+	r = mdio_mux_init(&pdev->dev,
+			  mdio_mux_gpio_switch_fn, &s->mux_handle, s);
+
+	if (r == 0) {
+		pdev->dev.platform_data = s;
+		return 0;
+	}
+err:
+	while (n) {
+		n--;
+		gpio_free(s->gpio[n]);
+	}
+	devm_kfree(&pdev->dev, s);
+	return r;
+}
+
+static int __devexit mdio_mux_gpio_remove(struct platform_device *pdev)
+{
+	struct mdio_mux_gpio_state *s = pdev->dev.platform_data;
+	mdio_mux_uninit(s->mux_handle);
+	return 0;
+}
+
+static struct of_device_id mdio_mux_gpio_match[] = {
+	{
+		.compatible = "mdio-mux-gpio",
+	},
+	{
+		/* Legacy compatible property. */
+		.compatible = "cavium,mdio-mux-sn74cbtlv3253",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, mdio_mux_gpio_match);
+
+static struct platform_driver mdio_mux_gpio_driver = {
+	.driver = {
+		.name		= "mdio-mux-gpio",
+		.owner		= THIS_MODULE,
+		.of_match_table = mdio_mux_gpio_match,
+	},
+	.probe		= mdio_mux_gpio_probe,
+	.remove		= __devexit_p(mdio_mux_gpio_remove),
+};
+
+module_platform_driver(mdio_mux_gpio_driver);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
new file mode 100644
index 0000000..39ea067
--- /dev/null
+++ b/drivers/net/phy/mdio-mux.c
@@ -0,0 +1,192 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mdio-mux.h>
+#include <linux/of_mdio.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define DRV_VERSION "1.0"
+#define DRV_DESCRIPTION "MDIO bus multiplexer driver"
+
+struct mdio_mux_child_bus;
+
+struct mdio_mux_parent_bus {
+	struct mii_bus *mii_bus;
+	int current_child;
+	int parent_id;
+	void *switch_data;
+	int (*switch_fn)(int current_child, int desired_child, void *data);
+
+	/* List of our children linked through their next fields. */
+	struct mdio_mux_child_bus *children;
+};
+
+struct mdio_mux_child_bus {
+	struct mii_bus *mii_bus;
+	struct mdio_mux_parent_bus *parent;
+	struct mdio_mux_child_bus *next;
+	int bus_number;
+	int phy_irq[PHY_MAX_ADDR];
+};
+
+/*
+ * The parent bus' lock is used to order access to the switch_fn.
+ */
+static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+	struct mdio_mux_child_bus *cb = bus->priv;
+	struct mdio_mux_parent_bus *pb = cb->parent;
+	int r;
+
+	mutex_lock(&pb->mii_bus->mdio_lock);
+	r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
+	if (r)
+		goto out;
+
+	pb->current_child = cb->bus_number;
+
+	r = pb->mii_bus->read(pb->mii_bus, phy_id, regnum);
+out:
+	mutex_unlock(&pb->mii_bus->mdio_lock);
+
+	return r;
+}
+
+/*
+ * The parent bus' lock is used to order access to the switch_fn.
+ */
+static int mdio_mux_write(struct mii_bus *bus, int phy_id,
+			  int regnum, u16 val)
+{
+	struct mdio_mux_child_bus *cb = bus->priv;
+	struct mdio_mux_parent_bus *pb = cb->parent;
+
+	int r;
+
+	mutex_lock(&pb->mii_bus->mdio_lock);
+	r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
+	if (r)
+		goto out;
+
+	pb->current_child = cb->bus_number;
+
+	r = pb->mii_bus->write(pb->mii_bus, phy_id, regnum, val);
+out:
+	mutex_unlock(&pb->mii_bus->mdio_lock);
+
+	return r;
+}
+
+static int parent_count;
+
+int mdio_mux_init(struct device *dev,
+		  int (*switch_fn)(int cur, int desired, void *data),
+		  void **mux_handle,
+		  void *data)
+{
+	struct device_node *parent_bus_node;
+	struct device_node *child_bus_node;
+	int r, ret_val;
+	struct mii_bus *parent_bus;
+	struct mdio_mux_parent_bus *pb;
+	struct mdio_mux_child_bus *cb;
+
+	if (!dev->of_node)
+		return -ENODEV;
+
+	parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0);
+
+	if (!parent_bus_node)
+		return -ENODEV;
+
+	parent_bus = of_mdio_find_bus(parent_bus_node);
+	if (parent_bus == NULL) {
+		ret_val = -EPROBE_DEFER;
+		goto err_parent_bus;
+	}
+
+	pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
+	if (pb == NULL) {
+		ret_val = -ENOMEM;
+		goto err_parent_bus;
+	}
+
+	pb->switch_data = data;
+	pb->switch_fn = switch_fn;
+	pb->current_child = -1;
+	pb->parent_id = parent_count++;
+	pb->mii_bus = parent_bus;
+
+	ret_val = -ENODEV;
+	for_each_child_of_node(dev->of_node, child_bus_node) {
+		u32 v;
+
+		r = of_property_read_u32(child_bus_node, "reg", &v);
+		if (r)
+			continue;
+
+		cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
+		if (cb == NULL) {
+			dev_err(dev,
+				"Error: Failed to allocate memory for child\n");
+			ret_val = -ENOMEM;
+			break;
+		}
+		cb->bus_number = v;
+		cb->parent = pb;
+		cb->mii_bus = mdiobus_alloc();
+		cb->mii_bus->priv = cb;
+
+		cb->mii_bus->irq = cb->phy_irq;
+		cb->mii_bus->name = "mdio_mux";
+		snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
+			 pb->parent_id, v);
+		cb->mii_bus->parent = dev;
+		cb->mii_bus->read = mdio_mux_read;
+		cb->mii_bus->write = mdio_mux_write;
+		r = of_mdiobus_register(cb->mii_bus, child_bus_node);
+		if (r) {
+			mdiobus_free(cb->mii_bus);
+			devm_kfree(dev, cb);
+		} else {
+			of_node_get(child_bus_node);
+			cb->next = pb->children;
+			pb->children = cb;
+		}
+	}
+	if (pb->children) {
+		*mux_handle = pb;
+		dev_info(dev, "Version " DRV_VERSION "\n");
+		return 0;
+	}
+err_parent_bus:
+	of_node_put(parent_bus_node);
+	return ret_val;
+}
+EXPORT_SYMBOL_GPL(mdio_mux_init);
+
+void mdio_mux_uninit(void *mux_handle)
+{
+	struct mdio_mux_parent_bus *pb = mux_handle;
+	struct mdio_mux_child_bus *cb = pb->children;
+
+	while (cb) {
+		mdiobus_unregister(cb->mii_bus);
+		mdiobus_free(cb->mii_bus);
+		cb = cb->next;
+	}
+}
+EXPORT_SYMBOL_GPL(mdio_mux_uninit);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8985cc6..683ef1c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -88,6 +88,38 @@
 	.dev_release	= mdiobus_release,
 };
 
+#if IS_ENABLED(CONFIG_OF_MDIO)
+/* Helper function for of_mdio_find_bus */
+static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
+{
+	return dev->of_node == mdio_bus_np;
+}
+/**
+ * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
+ * @mdio_np: Pointer to the mii_bus.
+ *
+ * Returns a pointer to the mii_bus, or NULL if none found.
+ *
+ * Because the association of a device_node and mii_bus is made via
+ * of_mdiobus_register(), the mii_bus cannot be found before it is
+ * registered with of_mdiobus_register().
+ *
+ */
+struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
+{
+	struct device *d;
+
+	if (!mdio_bus_np)
+		return NULL;
+
+	d = class_find_device(&mdio_bus_class, NULL,  mdio_bus_np,
+			      of_mdio_bus_match);
+
+	return d ? to_mii_bus(d) : NULL;
+}
+EXPORT_SYMBOL(of_mdio_find_bus);
+#endif
+
 /**
  * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
  * @bus: target mii_bus
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e8c42d6..de86a55 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -207,7 +207,7 @@
  * Description: Reads the ID registers of the PHY at @addr on the
  *   @bus, stores it in @phy_id and returns zero on success.
  */
-int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
+static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
 {
 	int phy_reg;
 
@@ -230,7 +230,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL(get_phy_id);
 
 /**
  * get_phy_device - reads the specified PHY device and returns its @phy_device struct
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 116a2dd..4eb98bc 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -348,7 +348,6 @@
 static struct spi_driver ks8995_driver = {
 	.driver = {
 		.name	    = "spi-ks8995",
-		.bus	     = &spi_bus_type,
 		.owner	   = THIS_MODULE,
 	},
 	.probe	  = ks8995_probe,
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index af95a98..a031f6b 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -613,7 +613,7 @@
 	*buf++ = PPP_FLAG;
 	ap->olim = buf;
 
-	kfree_skb(ap->tpkt);
+	consume_skb(ap->tpkt);
 	ap->tpkt = NULL;
 	return 1;
 }
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 21d7151..5c05572 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1092,13 +1092,13 @@
 				   new_skb->data, skb->len + 2,
 				   compressor_skb_size);
 	if (len > 0 && (ppp->flags & SC_CCP_UP)) {
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = new_skb;
 		skb_put(skb, len);
 		skb_pull(skb, 2);	/* pull off A/C bytes */
 	} else if (len == 0) {
 		/* didn't compress, or CCP not up yet */
-		kfree_skb(new_skb);
+		consume_skb(new_skb);
 		new_skb = skb;
 	} else {
 		/*
@@ -1112,7 +1112,7 @@
 		if (net_ratelimit())
 			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
 		kfree_skb(skb);
-		kfree_skb(new_skb);
+		consume_skb(new_skb);
 		new_skb = NULL;
 	}
 	return new_skb;
@@ -1178,7 +1178,7 @@
 				    !(ppp->flags & SC_NO_TCP_CCID));
 		if (cp == skb->data + 2) {
 			/* didn't compress */
-			kfree_skb(new_skb);
+			consume_skb(new_skb);
 		} else {
 			if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
 				proto = PPP_VJC_COMP;
@@ -1187,7 +1187,7 @@
 				proto = PPP_VJC_UNCOMP;
 				cp[0] = skb->data[2];
 			}
-			kfree_skb(skb);
+			consume_skb(skb);
 			skb = new_skb;
 			cp = skb_put(skb, len + 2);
 			cp[0] = 0;
@@ -1703,7 +1703,7 @@
 			}
 			skb_reserve(ns, 2);
 			skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
-			kfree_skb(skb);
+			consume_skb(skb);
 			skb = ns;
 		}
 		else
@@ -1851,7 +1851,7 @@
 			goto err;
 		}
 
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = ns;
 		skb_put(skb, len);
 		skb_pull(skb, 2);	/* pull off the A/C bytes */
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 55e466c..1a12033 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -588,7 +588,7 @@
 			skb_reserve(npkt,2);
 			skb_copy_from_linear_data(skb,
 				      skb_put(npkt, skb->len), skb->len);
-			kfree_skb(skb);
+			consume_skb(skb);
 			skb = npkt;
 		}
 		skb_push(skb,2);
@@ -656,7 +656,7 @@
 			if (sent < ap->tpkt->len) {
 				tty_stuffed = 1;
 			} else {
-				kfree_skb(ap->tpkt);
+				consume_skb(ap->tpkt);
 				ap->tpkt = NULL;
 				clear_bit(XMIT_FULL, &ap->xmit_flags);
 				done = 1;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2fa1a9b..cbf7047 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -201,7 +201,7 @@
 	return 0;
 }
 
-static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid,
+static void __delete_item(struct pppoe_net *pn, __be16 sid,
 					char *addr, int ifindex)
 {
 	int hash = hash_item(sid, addr);
@@ -220,8 +220,6 @@
 		src = &ret->next;
 		ret = ret->next;
 	}
-
-	return ret;
 }
 
 /**********************************************************************
@@ -264,16 +262,12 @@
 	return pppox_sock;
 }
 
-static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid,
+static inline void delete_item(struct pppoe_net *pn, __be16 sid,
 					char *addr, int ifindex)
 {
-	struct pppox_sock *ret;
-
 	write_lock_bh(&pn->hash_lock);
-	ret = __delete_item(pn, sid, addr, ifindex);
+	__delete_item(pn, sid, addr, ifindex);
 	write_unlock_bh(&pn->hash_lock);
-
-	return ret;
 }
 
 /***************************************************************************
@@ -990,8 +984,10 @@
 	if (skb) {
 		total_len = min_t(size_t, total_len, skb->len);
 		error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
-		if (error == 0)
-			error = total_len;
+		if (error == 0) {
+			consume_skb(skb);
+			return total_len;
+		}
 	}
 
 	kfree_skb(skb);
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 885dbdd..1c98321 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -116,8 +116,8 @@
 	int i;
 
 	rcu_read_lock();
-	for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID;
-	     i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) {
+	i = 1;
+	for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
 		sock = rcu_dereference(callid_sock[i]);
 		if (!sock)
 			continue;
@@ -209,7 +209,7 @@
 		}
 		if (skb->sk)
 			skb_set_owner_w(new_skb, skb->sk);
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = new_skb;
 	}
 
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 248a144..89024d5 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -40,4 +40,15 @@
 	  To compile this team mode as a module, choose M here: the module
 	  will be called team_mode_activebackup.
 
+config NET_TEAM_MODE_LOADBALANCE
+	tristate "Load-balance mode support"
+	depends on NET_TEAM
+	---help---
+	  This mode provides load balancing functionality. Tx port selection
+	  is done using BPF function set up from userspace (bpf_hash_func
+	  option)
+
+	  To compile this team mode as a module, choose M here: the module
+	  will be called team_mode_loadbalance.
+
 endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 85f2028..fb9f4c1 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_NET_TEAM) += team.o
 obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
 obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
+obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8f81805..c61ae35 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -65,7 +65,7 @@
 	return dev_set_mac_address(port_dev, &addr);
 }
 
-int team_port_set_orig_mac(struct team_port *port)
+static int team_port_set_orig_mac(struct team_port *port)
 {
 	return __set_port_mac(port->dev, port->orig.dev_addr);
 }
@@ -76,12 +76,26 @@
 }
 EXPORT_SYMBOL(team_port_set_team_mac);
 
+static void team_refresh_port_linkup(struct team_port *port)
+{
+	port->linkup = port->user.linkup_enabled ? port->user.linkup :
+						   port->state.linkup;
+}
 
 /*******************
  * Options handling
  *******************/
 
-struct team_option *__team_find_option(struct team *team, const char *opt_name)
+struct team_option_inst { /* One for each option instance */
+	struct list_head list;
+	struct team_option *option;
+	struct team_port *port; /* != NULL if per-port */
+	bool changed;
+	bool removed;
+};
+
+static struct team_option *__team_find_option(struct team *team,
+					      const char *opt_name)
 {
 	struct team_option *option;
 
@@ -92,9 +106,121 @@
 	return NULL;
 }
 
-int __team_options_register(struct team *team,
-			    const struct team_option *option,
-			    size_t option_count)
+static int __team_option_inst_add(struct team *team, struct team_option *option,
+				  struct team_port *port)
+{
+	struct team_option_inst *opt_inst;
+
+	opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
+	if (!opt_inst)
+		return -ENOMEM;
+	opt_inst->option = option;
+	opt_inst->port = port;
+	opt_inst->changed = true;
+	opt_inst->removed = false;
+	list_add_tail(&opt_inst->list, &team->option_inst_list);
+	return 0;
+}
+
+static void __team_option_inst_del(struct team_option_inst *opt_inst)
+{
+	list_del(&opt_inst->list);
+	kfree(opt_inst);
+}
+
+static void __team_option_inst_del_option(struct team *team,
+					  struct team_option *option)
+{
+	struct team_option_inst *opt_inst, *tmp;
+
+	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
+		if (opt_inst->option == option)
+			__team_option_inst_del(opt_inst);
+	}
+}
+
+static int __team_option_inst_add_option(struct team *team,
+					 struct team_option *option)
+{
+	struct team_port *port;
+	int err;
+
+	if (!option->per_port)
+		return __team_option_inst_add(team, option, 0);
+
+	list_for_each_entry(port, &team->port_list, list) {
+		err = __team_option_inst_add(team, option, port);
+		if (err)
+			goto inst_del_option;
+	}
+	return 0;
+
+inst_del_option:
+	__team_option_inst_del_option(team, option);
+	return err;
+}
+
+static void __team_option_inst_mark_removed_option(struct team *team,
+						   struct team_option *option)
+{
+	struct team_option_inst *opt_inst;
+
+	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+		if (opt_inst->option == option) {
+			opt_inst->changed = true;
+			opt_inst->removed = true;
+		}
+	}
+}
+
+static void __team_option_inst_del_port(struct team *team,
+					struct team_port *port)
+{
+	struct team_option_inst *opt_inst, *tmp;
+
+	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
+		if (opt_inst->option->per_port &&
+		    opt_inst->port == port)
+			__team_option_inst_del(opt_inst);
+	}
+}
+
+static int __team_option_inst_add_port(struct team *team,
+				       struct team_port *port)
+{
+	struct team_option *option;
+	int err;
+
+	list_for_each_entry(option, &team->option_list, list) {
+		if (!option->per_port)
+			continue;
+		err = __team_option_inst_add(team, option, port);
+		if (err)
+			goto inst_del_port;
+	}
+	return 0;
+
+inst_del_port:
+	__team_option_inst_del_port(team, port);
+	return err;
+}
+
+static void __team_option_inst_mark_removed_port(struct team *team,
+						 struct team_port *port)
+{
+	struct team_option_inst *opt_inst;
+
+	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+		if (opt_inst->port == port) {
+			opt_inst->changed = true;
+			opt_inst->removed = true;
+		}
+	}
+}
+
+static int __team_options_register(struct team *team,
+				   const struct team_option *option,
+				   size_t option_count)
 {
 	int i;
 	struct team_option **dst_opts;
@@ -107,26 +233,32 @@
 	for (i = 0; i < option_count; i++, option++) {
 		if (__team_find_option(team, option->name)) {
 			err = -EEXIST;
-			goto rollback;
+			goto alloc_rollback;
 		}
 		dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
 		if (!dst_opts[i]) {
 			err = -ENOMEM;
-			goto rollback;
+			goto alloc_rollback;
 		}
 	}
 
 	for (i = 0; i < option_count; i++) {
-		dst_opts[i]->changed = true;
-		dst_opts[i]->removed = false;
+		err = __team_option_inst_add_option(team, dst_opts[i]);
+		if (err)
+			goto inst_rollback;
 		list_add_tail(&dst_opts[i]->list, &team->option_list);
 	}
 
 	kfree(dst_opts);
 	return 0;
 
-rollback:
-	for (i = 0; i < option_count; i++)
+inst_rollback:
+	for (i--; i >= 0; i--)
+		__team_option_inst_del_option(team, dst_opts[i]);
+
+	i = option_count - 1;
+alloc_rollback:
+	for (i--; i >= 0; i--)
 		kfree(dst_opts[i]);
 
 	kfree(dst_opts);
@@ -143,10 +275,8 @@
 		struct team_option *del_opt;
 
 		del_opt = __team_find_option(team, option->name);
-		if (del_opt) {
-			del_opt->changed = true;
-			del_opt->removed = true;
-		}
+		if (del_opt)
+			__team_option_inst_mark_removed_option(team, del_opt);
 	}
 }
 
@@ -161,6 +291,7 @@
 
 		del_opt = __team_find_option(team, option->name);
 		if (del_opt) {
+			__team_option_inst_del_option(team, del_opt);
 			list_del(&del_opt->list);
 			kfree(del_opt);
 		}
@@ -193,22 +324,42 @@
 }
 EXPORT_SYMBOL(team_options_unregister);
 
-static int team_option_get(struct team *team, struct team_option *option,
-			   void *arg)
-{
-	return option->getter(team, arg);
-}
-
-static int team_option_set(struct team *team, struct team_option *option,
-			   void *arg)
+static int team_option_port_add(struct team *team, struct team_port *port)
 {
 	int err;
 
-	err = option->setter(team, arg);
+	err = __team_option_inst_add_port(team, port);
+	if (err)
+		return err;
+	__team_options_change_check(team);
+	return 0;
+}
+
+static void team_option_port_del(struct team *team, struct team_port *port)
+{
+	__team_option_inst_mark_removed_port(team, port);
+	__team_options_change_check(team);
+	__team_option_inst_del_port(team, port);
+}
+
+static int team_option_get(struct team *team,
+			   struct team_option_inst *opt_inst,
+			   struct team_gsetter_ctx *ctx)
+{
+	return opt_inst->option->getter(team, ctx);
+}
+
+static int team_option_set(struct team *team,
+			   struct team_option_inst *opt_inst,
+			   struct team_gsetter_ctx *ctx)
+{
+	int err;
+
+	err = opt_inst->option->setter(team, ctx);
 	if (err)
 		return err;
 
-	option->changed = true;
+	opt_inst->changed = true;
 	__team_options_change_check(team);
 	return err;
 }
@@ -408,6 +559,8 @@
  * Rx path frame handler
  ************************/
 
+static bool team_port_enabled(struct team_port *port);
+
 /* note: already called with rcu_read_lock */
 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
 {
@@ -424,8 +577,12 @@
 
 	port = team_port_get_rcu(skb->dev);
 	team = port->team;
-
-	res = team->ops.receive(team, port, skb);
+	if (!team_port_enabled(port)) {
+		/* allow exact match delivery for disabled ports */
+		res = RX_HANDLER_EXACT;
+	} else {
+		res = team->ops.receive(team, port, skb);
+	}
 	if (res == RX_HANDLER_ANOTHER) {
 		struct team_pcpu_stats *pcpu_stats;
 
@@ -461,17 +618,25 @@
 	return false;
 }
 
-/*
- * Add/delete port to the team port list. Write guarded by rtnl_lock.
- * Takes care of correct port->index setup (might be racy).
- */
-static void team_port_list_add_port(struct team *team,
-				    struct team_port *port)
+static bool team_port_enabled(struct team_port *port)
 {
-	port->index = team->port_count++;
+	return port->index != -1;
+}
+
+/*
+ * Enable/disable port by adding to enabled port hashlist and setting
+ * port->index (Might be racy so reader could see incorrect ifindex when
+ * processing a flying packet, but that is not a problem). Write guarded
+ * by team->lock.
+ */
+static void team_port_enable(struct team *team,
+			     struct team_port *port)
+{
+	if (team_port_enabled(port))
+		return;
+	port->index = team->en_port_count++;
 	hlist_add_head_rcu(&port->hlist,
 			   team_port_index_hash(team, port->index));
-	list_add_tail_rcu(&port->list, &team->port_list);
 }
 
 static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -479,7 +644,7 @@
 	int i;
 	struct team_port *port;
 
-	for (i = rm_index + 1; i < team->port_count; i++) {
+	for (i = rm_index + 1; i < team->en_port_count; i++) {
 		port = team_get_port_by_index(team, i);
 		hlist_del_rcu(&port->hlist);
 		port->index--;
@@ -488,15 +653,17 @@
 	}
 }
 
-static void team_port_list_del_port(struct team *team,
-				   struct team_port *port)
+static void team_port_disable(struct team *team,
+			      struct team_port *port)
 {
 	int rm_index = port->index;
 
+	if (!team_port_enabled(port))
+		return;
 	hlist_del_rcu(&port->hlist);
-	list_del_rcu(&port->list);
 	__reconstruct_port_hlist(team, rm_index);
-	team->port_count--;
+	team->en_port_count--;
+	port->index = -1;
 }
 
 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -642,7 +809,16 @@
 		goto err_handler_register;
 	}
 
-	team_port_list_add_port(team, port);
+	err = team_option_port_add(team, port);
+	if (err) {
+		netdev_err(dev, "Device %s failed to add per-port options\n",
+			   portname);
+		goto err_option_port_add;
+	}
+
+	port->index = -1;
+	team_port_enable(team, port);
+	list_add_tail_rcu(&port->list, &team->port_list);
 	team_adjust_ops(team);
 	__team_compute_features(team);
 	__team_port_change_check(port, !!netif_carrier_ok(port_dev));
@@ -651,6 +827,9 @@
 
 	return 0;
 
+err_option_port_add:
+	netdev_rx_handler_unregister(port_dev);
+
 err_handler_register:
 	netdev_set_master(port_dev, NULL);
 
@@ -688,8 +867,10 @@
 
 	port->removed = true;
 	__team_port_change_check(port, false);
-	team_port_list_del_port(team, port);
+	team_port_disable(team, port);
+	list_del_rcu(&port->list);
 	team_adjust_ops(team);
+	team_option_port_del(team, port);
 	netdev_rx_handler_unregister(port_dev);
 	netdev_set_master(port_dev, NULL);
 	vlan_vids_del_by_dev(port_dev, dev);
@@ -712,19 +893,66 @@
 
 static const char team_no_mode_kind[] = "*NOMODE*";
 
-static int team_mode_option_get(struct team *team, void *arg)
+static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
 {
-	const char **str = arg;
-
-	*str = team->mode ? team->mode->kind : team_no_mode_kind;
+	ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
 	return 0;
 }
 
-static int team_mode_option_set(struct team *team, void *arg)
+static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
 {
-	const char **str = arg;
+	return team_change_mode(team, ctx->data.str_val);
+}
 
-	return team_change_mode(team, *str);
+static int team_port_en_option_get(struct team *team,
+				   struct team_gsetter_ctx *ctx)
+{
+	ctx->data.bool_val = team_port_enabled(ctx->port);
+	return 0;
+}
+
+static int team_port_en_option_set(struct team *team,
+				   struct team_gsetter_ctx *ctx)
+{
+	if (ctx->data.bool_val)
+		team_port_enable(team, ctx->port);
+	else
+		team_port_disable(team, ctx->port);
+	return 0;
+}
+
+static int team_user_linkup_option_get(struct team *team,
+				       struct team_gsetter_ctx *ctx)
+{
+	ctx->data.bool_val = ctx->port->user.linkup;
+	return 0;
+}
+
+static int team_user_linkup_option_set(struct team *team,
+				       struct team_gsetter_ctx *ctx)
+{
+	ctx->port->user.linkup = ctx->data.bool_val;
+	team_refresh_port_linkup(ctx->port);
+	return 0;
+}
+
+static int team_user_linkup_en_option_get(struct team *team,
+					  struct team_gsetter_ctx *ctx)
+{
+	struct team_port *port = ctx->port;
+
+	ctx->data.bool_val = port->user.linkup_enabled;
+	return 0;
+}
+
+static int team_user_linkup_en_option_set(struct team *team,
+					  struct team_gsetter_ctx *ctx)
+{
+	struct team_port *port = ctx->port;
+
+	port->user.linkup_enabled = ctx->data.bool_val;
+	team_refresh_port_linkup(ctx->port);
+	return 0;
 }
 
 static const struct team_option team_options[] = {
@@ -734,6 +962,27 @@
 		.getter = team_mode_option_get,
 		.setter = team_mode_option_set,
 	},
+	{
+		.name = "enabled",
+		.type = TEAM_OPTION_TYPE_BOOL,
+		.per_port = true,
+		.getter = team_port_en_option_get,
+		.setter = team_port_en_option_set,
+	},
+	{
+		.name = "user_linkup",
+		.type = TEAM_OPTION_TYPE_BOOL,
+		.per_port = true,
+		.getter = team_user_linkup_option_get,
+		.setter = team_user_linkup_option_set,
+	},
+	{
+		.name = "user_linkup_enabled",
+		.type = TEAM_OPTION_TYPE_BOOL,
+		.per_port = true,
+		.getter = team_user_linkup_en_option_get,
+		.setter = team_user_linkup_en_option_set,
+	},
 };
 
 static int team_init(struct net_device *dev)
@@ -750,12 +999,13 @@
 		return -ENOMEM;
 
 	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
-		INIT_HLIST_HEAD(&team->port_hlist[i]);
+		INIT_HLIST_HEAD(&team->en_port_hlist[i]);
 	INIT_LIST_HEAD(&team->port_list);
 
 	team_adjust_ops(team);
 
 	INIT_LIST_HEAD(&team->option_list);
+	INIT_LIST_HEAD(&team->option_inst_list);
 	err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
 	if (err)
 		goto err_options_register;
@@ -1145,10 +1395,7 @@
 	},
 	[TEAM_ATTR_OPTION_CHANGED]		= { .type = NLA_FLAG },
 	[TEAM_ATTR_OPTION_TYPE]			= { .type = NLA_U8 },
-	[TEAM_ATTR_OPTION_DATA] = {
-		.type = NLA_BINARY,
-		.len = TEAM_STRING_MAX_LEN,
-	},
+	[TEAM_ATTR_OPTION_DATA]			= { .type = NLA_BINARY },
 };
 
 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
@@ -1241,46 +1488,86 @@
 {
 	struct nlattr *option_list;
 	void *hdr;
-	struct team_option *option;
+	struct team_option_inst *opt_inst;
+	int err;
 
 	hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
 			  TEAM_CMD_OPTIONS_GET);
 	if (IS_ERR(hdr))
 		return PTR_ERR(hdr);
 
-	NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+		goto nla_put_failure;
 	option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
 	if (!option_list)
 		return -EMSGSIZE;
 
-	list_for_each_entry(option, &team->option_list, list) {
+	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
 		struct nlattr *option_item;
-		long arg;
+		struct team_option *option = opt_inst->option;
+		struct team_gsetter_ctx ctx;
 
 		/* Include only changed options if fill all mode is not on */
-		if (!fillall && !option->changed)
+		if (!fillall && !opt_inst->changed)
 			continue;
 		option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
 		if (!option_item)
 			goto nla_put_failure;
-		NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
-		if (option->changed) {
-			NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
-			option->changed = false;
+		if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
+			goto nla_put_failure;
+		if (opt_inst->changed) {
+			if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
+				goto nla_put_failure;
+			opt_inst->changed = false;
 		}
-		if (option->removed)
-			NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED);
+		if (opt_inst->removed &&
+		    nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
+			goto nla_put_failure;
+		if (opt_inst->port &&
+		    nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
+				opt_inst->port->dev->ifindex))
+			goto nla_put_failure;
+		ctx.port = opt_inst->port;
 		switch (option->type) {
 		case TEAM_OPTION_TYPE_U32:
-			NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
-			team_option_get(team, option, &arg);
-			NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+			if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
+				goto nla_put_failure;
+			err = team_option_get(team, opt_inst, &ctx);
+			if (err)
+				goto errout;
+			if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
+					ctx.data.u32_val))
+				goto nla_put_failure;
 			break;
 		case TEAM_OPTION_TYPE_STRING:
-			NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
-			team_option_get(team, option, &arg);
-			NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
-				       (char *) arg);
+			if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
+				goto nla_put_failure;
+			err = team_option_get(team, opt_inst, &ctx);
+			if (err)
+				goto errout;
+			if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
+					   ctx.data.str_val))
+				goto nla_put_failure;
+			break;
+		case TEAM_OPTION_TYPE_BINARY:
+			if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
+				goto nla_put_failure;
+			err = team_option_get(team, opt_inst, &ctx);
+			if (err)
+				goto errout;
+			if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
+				    ctx.data.bin_val.len, ctx.data.bin_val.ptr))
+				goto nla_put_failure;
+			break;
+		case TEAM_OPTION_TYPE_BOOL:
+			if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
+				goto nla_put_failure;
+			err = team_option_get(team, opt_inst, &ctx);
+			if (err)
+				goto errout;
+			if (ctx.data.bool_val &&
+			    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
+				goto nla_put_failure;
 			break;
 		default:
 			BUG();
@@ -1292,8 +1579,10 @@
 	return genlmsg_end(skb, hdr);
 
 nla_put_failure:
+	err = -EMSGSIZE;
+errout:
 	genlmsg_cancel(skb, hdr);
-	return -EMSGSIZE;
+	return err;
 }
 
 static int team_nl_fill_options_get_all(struct sk_buff *skb,
@@ -1339,9 +1628,12 @@
 	}
 
 	nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
-		struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+		struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
+		struct nlattr *attr_port_ifindex;
+		struct nlattr *attr_data;
 		enum team_option_type opt_type;
-		struct team_option *option;
+		int opt_port_ifindex = 0; /* != 0 for per-port options */
+		struct team_option_inst *opt_inst;
 		char *opt_name;
 		bool opt_found = false;
 
@@ -1349,48 +1641,78 @@
 			err = -EINVAL;
 			goto team_put;
 		}
-		err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+		err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
 				       nl_option, team_nl_option_policy);
 		if (err)
 			goto team_put;
-		if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
-		    !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
-		    !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+		if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
+		    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
 			err = -EINVAL;
 			goto team_put;
 		}
-		switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+		switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
 		case NLA_U32:
 			opt_type = TEAM_OPTION_TYPE_U32;
 			break;
 		case NLA_STRING:
 			opt_type = TEAM_OPTION_TYPE_STRING;
 			break;
+		case NLA_BINARY:
+			opt_type = TEAM_OPTION_TYPE_BINARY;
+			break;
+		case NLA_FLAG:
+			opt_type = TEAM_OPTION_TYPE_BOOL;
+			break;
 		default:
 			goto team_put;
 		}
 
-		opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
-		list_for_each_entry(option, &team->option_list, list) {
-			long arg;
-			struct nlattr *opt_data_attr;
+		attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
+		if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
+			err = -EINVAL;
+			goto team_put;
+		}
 
+		opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
+		attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
+		if (attr_port_ifindex)
+			opt_port_ifindex = nla_get_u32(attr_port_ifindex);
+
+		list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+			struct team_option *option = opt_inst->option;
+			struct team_gsetter_ctx ctx;
+			int tmp_ifindex;
+
+			tmp_ifindex = opt_inst->port ?
+				      opt_inst->port->dev->ifindex : 0;
 			if (option->type != opt_type ||
-			    strcmp(option->name, opt_name))
+			    strcmp(option->name, opt_name) ||
+			    tmp_ifindex != opt_port_ifindex)
 				continue;
 			opt_found = true;
-			opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+			ctx.port = opt_inst->port;
 			switch (opt_type) {
 			case TEAM_OPTION_TYPE_U32:
-				arg = nla_get_u32(opt_data_attr);
+				ctx.data.u32_val = nla_get_u32(attr_data);
 				break;
 			case TEAM_OPTION_TYPE_STRING:
-				arg = (long) nla_data(opt_data_attr);
+				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
+					err = -EINVAL;
+					goto team_put;
+				}
+				ctx.data.str_val = nla_data(attr_data);
+				break;
+			case TEAM_OPTION_TYPE_BINARY:
+				ctx.data.bin_val.len = nla_len(attr_data);
+				ctx.data.bin_val.ptr = nla_data(attr_data);
+				break;
+			case TEAM_OPTION_TYPE_BOOL:
+				ctx.data.bool_val = attr_data ? true : false;
 				break;
 			default:
 				BUG();
 			}
-			err = team_option_set(team, option, &arg);
+			err = team_option_set(team, opt_inst, &ctx);
 			if (err)
 				goto team_put;
 		}
@@ -1420,7 +1742,8 @@
 	if (IS_ERR(hdr))
 		return PTR_ERR(hdr);
 
-	NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+		goto nla_put_failure;
 	port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
 	if (!port_list)
 		return -EMSGSIZE;
@@ -1434,17 +1757,20 @@
 		port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
 		if (!port_item)
 			goto nla_put_failure;
-		NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+		if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
+			goto nla_put_failure;
 		if (port->changed) {
-			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+			if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
+				goto nla_put_failure;
 			port->changed = false;
 		}
-		if (port->removed)
-			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED);
-		if (port->linkup)
-			NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
-		NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
-		NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+		if ((port->removed &&
+		     nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
+		    (port->state.linkup &&
+		     nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
+		    nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
+		    nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
+			goto nla_put_failure;
 		nla_nest_end(skb, port_item);
 	}
 
@@ -1603,23 +1929,24 @@
 {
 	int err;
 
-	if (!port->removed && port->linkup == linkup)
+	if (!port->removed && port->state.linkup == linkup)
 		return;
 
 	port->changed = true;
-	port->linkup = linkup;
+	port->state.linkup = linkup;
+	team_refresh_port_linkup(port);
 	if (linkup) {
 		struct ethtool_cmd ecmd;
 
 		err = __ethtool_get_settings(port->dev, &ecmd);
 		if (!err) {
-			port->speed = ethtool_cmd_speed(&ecmd);
-			port->duplex = ecmd.duplex;
+			port->state.speed = ethtool_cmd_speed(&ecmd);
+			port->state.duplex = ecmd.duplex;
 			goto send_event;
 		}
 	}
-	port->speed = 0;
-	port->duplex = 0;
+	port->state.speed = 0;
+	port->state.duplex = 0;
 
 send_event:
 	err = team_nl_send_event_port_list_get(port->team);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index f4d960e..fd6bd03 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -59,23 +59,21 @@
 		RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
 }
 
-static int ab_active_port_get(struct team *team, void *arg)
+static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
 {
-	u32 *ifindex = arg;
-
-	*ifindex = 0;
 	if (ab_priv(team)->active_port)
-		*ifindex = ab_priv(team)->active_port->dev->ifindex;
+		ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
+	else
+		ctx->data.u32_val = 0;
 	return 0;
 }
 
-static int ab_active_port_set(struct team *team, void *arg)
+static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx)
 {
-	u32 *ifindex = arg;
 	struct team_port *port;
 
-	list_for_each_entry_rcu(port, &team->port_list, list) {
-		if (port->dev->ifindex == *ifindex) {
+	list_for_each_entry(port, &team->port_list, list) {
+		if (port->dev->ifindex == ctx->data.u32_val) {
 			rcu_assign_pointer(ab_priv(team)->active_port, port);
 			return 0;
 		}
@@ -92,12 +90,12 @@
 	},
 };
 
-int ab_init(struct team *team)
+static int ab_init(struct team *team)
 {
 	return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
 }
 
-void ab_exit(struct team *team)
+static void ab_exit(struct team *team)
 {
 	team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
 }
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
new file mode 100644
index 0000000..86e8183
--- /dev/null
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -0,0 +1,174 @@
+/*
+ * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
+ * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/if_team.h>
+
+struct lb_priv {
+	struct sk_filter __rcu *fp;
+	struct sock_fprog *orig_fprog;
+};
+
+static struct lb_priv *lb_priv(struct team *team)
+{
+	return (struct lb_priv *) &team->mode_priv;
+}
+
+static bool lb_transmit(struct team *team, struct sk_buff *skb)
+{
+	struct sk_filter *fp;
+	struct team_port *port;
+	unsigned int hash;
+	int port_index;
+
+	fp = rcu_dereference(lb_priv(team)->fp);
+	if (unlikely(!fp))
+		goto drop;
+	hash = SK_RUN_FILTER(fp, skb);
+	port_index = hash % team->en_port_count;
+	port = team_get_port_by_index_rcu(team, port_index);
+	if (unlikely(!port))
+		goto drop;
+	skb->dev = port->dev;
+	if (dev_queue_xmit(skb))
+		return false;
+	return true;
+
+drop:
+	dev_kfree_skb_any(skb);
+	return false;
+}
+
+static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+	if (!lb_priv(team)->orig_fprog) {
+		ctx->data.bin_val.len = 0;
+		ctx->data.bin_val.ptr = NULL;
+		return 0;
+	}
+	ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
+				sizeof(struct sock_filter);
+	ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
+	return 0;
+}
+
+static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
+			  const void *data)
+{
+	struct sock_fprog *fprog;
+	struct sock_filter *filter = (struct sock_filter *) data;
+
+	if (data_len % sizeof(struct sock_filter))
+		return -EINVAL;
+	fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
+	if (!fprog)
+		return -ENOMEM;
+	fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
+	if (!fprog->filter) {
+		kfree(fprog);
+		return -ENOMEM;
+	}
+	fprog->len = data_len / sizeof(struct sock_filter);
+	*pfprog = fprog;
+	return 0;
+}
+
+static void __fprog_destroy(struct sock_fprog *fprog)
+{
+	kfree(fprog->filter);
+	kfree(fprog);
+}
+
+static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
+{
+	struct sk_filter *fp = NULL;
+	struct sock_fprog *fprog = NULL;
+	int err;
+
+	if (ctx->data.bin_val.len) {
+		err = __fprog_create(&fprog, ctx->data.bin_val.len,
+				     ctx->data.bin_val.ptr);
+		if (err)
+			return err;
+		err = sk_unattached_filter_create(&fp, fprog);
+		if (err) {
+			__fprog_destroy(fprog);
+			return err;
+		}
+	}
+
+	if (lb_priv(team)->orig_fprog) {
+		/* Clear old filter data */
+		__fprog_destroy(lb_priv(team)->orig_fprog);
+		sk_unattached_filter_destroy(lb_priv(team)->fp);
+	}
+
+	rcu_assign_pointer(lb_priv(team)->fp, fp);
+	lb_priv(team)->orig_fprog = fprog;
+	return 0;
+}
+
+static const struct team_option lb_options[] = {
+	{
+		.name = "bpf_hash_func",
+		.type = TEAM_OPTION_TYPE_BINARY,
+		.getter = lb_bpf_func_get,
+		.setter = lb_bpf_func_set,
+	},
+};
+
+static int lb_init(struct team *team)
+{
+	return team_options_register(team, lb_options,
+				     ARRAY_SIZE(lb_options));
+}
+
+static void lb_exit(struct team *team)
+{
+	team_options_unregister(team, lb_options,
+				ARRAY_SIZE(lb_options));
+}
+
+static const struct team_mode_ops lb_mode_ops = {
+	.init			= lb_init,
+	.exit			= lb_exit,
+	.transmit		= lb_transmit,
+};
+
+static struct team_mode lb_mode = {
+	.kind		= "loadbalance",
+	.owner		= THIS_MODULE,
+	.priv_size	= sizeof(struct lb_priv),
+	.ops		= &lb_mode_ops,
+};
+
+static int __init lb_init_module(void)
+{
+	return team_mode_register(&lb_mode);
+}
+
+static void __exit lb_cleanup_module(void)
+{
+	team_mode_unregister(&lb_mode);
+}
+
+module_init(lb_init_module);
+module_exit(lb_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Load-balancing mode for team");
+MODULE_ALIAS("team-mode-loadbalance");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index a0e8f80..6abfbdc 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -50,7 +50,7 @@
 	struct team_port *port;
 	int port_index;
 
-	port_index = rr_priv(team)->sent_packets++ % team->port_count;
+	port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
 	port = team_get_port_by_index_rcu(team, port_index);
 	port = __get_first_port_up(team, port);
 	if (unlikely(!port))
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
deleted file mode 100644
index b15ac81..0000000
--- a/drivers/net/tokenring/3c359.c
+++ /dev/null
@@ -1,1843 +0,0 @@
-/*
- *   3c359.c (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
- *
- *  Linux driver for 3Com 3c359 Tokenlink Velocity XL PCI NIC
- *
- *  Base Driver Olympic:
- *	Written 1999 Peter De Schrijver & Mike Phillips
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- * 
- *  7/17/00 - Clean up, version number 0.9.0. Ready to release to the world.
- *
- *  2/16/01 - Port up to kernel 2.4.2 ready for submission into the kernel.
- *  3/05/01 - Last clean up stuff before submission.
- *  2/15/01 - Finally, update to new pci api. 
- *
- *  To Do:
- */
-
-/* 
- *	Technical Card Details
- *
- *  All access to data is done with 16/8 bit transfers.  The transfer
- *  method really sucks. You can only read or write one location at a time.
- *
- *  Also, the microcode for the card must be uploaded if the card does not have
- *  the flashrom on board.  This is a 28K bloat in the driver when compiled
- *  as a module.
- *
- *  Rx is very simple, status into a ring of descriptors, dma data transfer,
- *  interrupts to tell us when a packet is received.
- *
- *  Tx is a little more interesting. Similar scenario, descriptor and dma data
- *  transfers, but we don't have to interrupt the card to tell it another packet
- *  is ready for transmission, we are just doing simple memory writes, not io or mmio
- *  writes.  The card can be set up to simply poll on the next
- *  descriptor pointer and when this value is non-zero will automatically download
- *  the next packet.  The card then interrupts us when the packet is done.
- *
- */
-
-#define XL_DEBUG 0
-
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/proc_fs.h>
-#include <linux/ptrace.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/stddef.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-
-#include <net/checksum.h>
-
-#include <asm/io.h>
-
-#include "3c359.h"
-
-static char version[] __devinitdata  = 
-"3c359.c v1.2.0 2/17/01 - Mike Phillips (mikep@linuxtr.net)" ; 
-
-#define FW_NAME		"3com/3C359.bin"
-MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; 
-MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
-MODULE_FIRMWARE(FW_NAME);
-
-/* Module parameters */
-
-/* Ring Speed 0,4,16 
- * 0 = Autosense   
- * 4,16 = Selected speed only, no autosense
- * This allows the card to be the first on the ring
- * and become the active monitor.
- *
- * WARNING: Some hubs will allow you to insert
- * at the wrong speed.
- * 
- * The adapter will _not_ fail to open if there are no
- * active monitors on the ring, it will simply open up in 
- * its last known ringspeed if no ringspeed is specified.
- */
-
-static int ringspeed[XL_MAX_ADAPTERS] = {0,} ;
-
-module_param_array(ringspeed, int, NULL, 0);
-MODULE_PARM_DESC(ringspeed,"3c359: Ringspeed selection - 4,16 or 0") ;
-
-/* Packet buffer size */
-
-static int pkt_buf_sz[XL_MAX_ADAPTERS] = {0,} ;
- 
-module_param_array(pkt_buf_sz, int, NULL, 0) ;
-MODULE_PARM_DESC(pkt_buf_sz,"3c359: Initial buffer size") ;
-/* Message Level */
-
-static int message_level[XL_MAX_ADAPTERS] = {0,} ;
-
-module_param_array(message_level, int, NULL, 0) ;
-MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
-/* 
- *	This is a real nasty way of doing this, but otherwise you
- *	will be stuck with 1555 lines of hex #'s in the code.
- */
-
-static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
-{
-	{PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
-	{ }			/* terminate list */
-};
-MODULE_DEVICE_TABLE(pci,xl_pci_tbl) ; 
-
-static int xl_init(struct net_device *dev);
-static int xl_open(struct net_device *dev);
-static int xl_open_hw(struct net_device *dev) ;  
-static int xl_hw_reset(struct net_device *dev); 
-static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev);
-static void xl_dn_comp(struct net_device *dev); 
-static int xl_close(struct net_device *dev);
-static void xl_set_rx_mode(struct net_device *dev);
-static irqreturn_t xl_interrupt(int irq, void *dev_id);
-static int xl_set_mac_address(struct net_device *dev, void *addr) ; 
-static void xl_arb_cmd(struct net_device *dev);
-static void xl_asb_cmd(struct net_device *dev) ; 
-static void xl_srb_cmd(struct net_device *dev, int srb_cmd) ; 
-static void xl_wait_misr_flags(struct net_device *dev) ; 
-static int xl_change_mtu(struct net_device *dev, int mtu);
-static void xl_srb_bh(struct net_device *dev) ; 
-static void xl_asb_bh(struct net_device *dev) ; 
-static void xl_reset(struct net_device *dev) ;  
-static void xl_freemem(struct net_device *dev) ;  
-
-
-/* EEProm Access Functions */
-static u16  xl_ee_read(struct net_device *dev, int ee_addr) ; 
-static void  xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) ; 
-
-/* Debugging functions */
-#if XL_DEBUG
-static void print_tx_state(struct net_device *dev) ; 
-static void print_rx_state(struct net_device *dev) ; 
-
-static void print_tx_state(struct net_device *dev)
-{
-
-	struct xl_private *xl_priv = netdev_priv(dev);
-	struct xl_tx_desc *txd ; 
-	u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 
-	int i ; 
-
-	printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
-		xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ; 
-	printk("Ring    , Address ,   FSH  , DnNextPtr, Buffer, Buffer_Len\n");
-	for (i = 0; i < 16; i++) {
-		txd = &(xl_priv->xl_tx_ring[i]) ; 
-		printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
-			txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ; 
-	}
-
-	printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
-	
-	printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
-	printk("Queue status = %0x\n",netif_running(dev) ) ;
-}
-
-static void print_rx_state(struct net_device *dev)
-{
-
-	struct xl_private *xl_priv = netdev_priv(dev);
-	struct xl_rx_desc *rxd ; 
-	u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 
-	int i ; 
-
-	printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
-	printk("Ring    , Address ,   FrameState  , UPNextPtr, FragAddr, Frag_Len\n");
-	for (i = 0; i < 16; i++) { 
-		/* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
-		rxd = &(xl_priv->xl_rx_ring[i]) ; 
-		printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
-			rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ; 
-	}
-
-	printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
-	
-	printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
-	printk("Queue status = %0x\n",netif_running(dev));
-} 
-#endif
-
-/*
- *	Read values from the on-board EEProm.  This looks very strange
- *	but you have to wait for the EEProm to get/set the value before 
- *	passing/getting the next value from the nic. As with all requests
- *	on this nic it has to be done in two stages, a) tell the nic which
- *	memory address you want to access and b) pass/get the value from the nic.
- *	With the EEProm, you have to wait before and between access a) and b).
- *	As this is only read at initialization time and the wait period is very 
- *	small we shouldn't have to worry about scheduling issues.
- */
-
-static u16 xl_ee_read(struct net_device *dev, int ee_addr)
-{ 
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 
-
-	/* Wait for EEProm to not be busy */
-	writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
-
-	/* Tell EEProm what we want to do and where */
-	writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ; 
-
-	/* Wait for EEProm to not be busy */
-	writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ; 
-	
-	/* Tell EEProm what we want to do and where */
-	writel(IO_WORD_WRITE | EECONTROL , xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writew(EEREAD + ee_addr, xl_mmio + MMIO_MACDATA) ; 
-
-	/* Finally read the value from the EEProm */
-	writel(IO_WORD_READ | EEDATA , xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	return readw(xl_mmio + MMIO_MACDATA) ; 
-}
-
-/* 
- *	Write values to the onboard eeprom. As with eeprom read you need to 
- *	set which location to write, wait, value to write, wait, with the 
- *	added twist of having to enable eeprom writes as well.
- */
-
-static void  xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 
-
-	/* Wait for EEProm to not be busy */
-	writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
-	
-	/* Enable write/erase */
-	writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writew(EE_ENABLE_WRITE, xl_mmio + MMIO_MACDATA) ; 
-
-	/* Wait for EEProm to not be busy */
-	writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
-
-	/* Put the value we want to write into EEDATA */ 
-	writel(IO_WORD_WRITE | EEDATA, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writew(ee_value, xl_mmio + MMIO_MACDATA) ;
-
-	/* Tell EEProm to write eevalue into ee_addr */
-	writel(IO_WORD_WRITE | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writew(EEWRITE + ee_addr, xl_mmio + MMIO_MACDATA) ; 
-
-	/* Wait for EEProm to not be busy, to ensure write gets done */
-	writel(IO_WORD_READ | EECONTROL, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	while ( readw(xl_mmio + MMIO_MACDATA) & EEBUSY ) ;
-	
-	return ; 
-}
-
-static const struct net_device_ops xl_netdev_ops = {
-	.ndo_open		= xl_open,
-	.ndo_stop		= xl_close,
-	.ndo_start_xmit		= xl_xmit,
-	.ndo_change_mtu		= xl_change_mtu,
-	.ndo_set_rx_mode	= xl_set_rx_mode,
-	.ndo_set_mac_address	= xl_set_mac_address,
-};
- 
-static int __devinit xl_probe(struct pci_dev *pdev,
-			      const struct pci_device_id *ent) 
-{
-	struct net_device *dev ; 
-	struct xl_private *xl_priv ; 
-	static int card_no = -1 ;
-	int i ; 
-
-	card_no++ ; 
-
-	if (pci_enable_device(pdev)) { 
-		return -ENODEV ; 
-	} 
-
-	pci_set_master(pdev);
-
-	if ((i = pci_request_regions(pdev,"3c359"))) { 
-		return i ; 
-	}
-
-	/* 
-	 * Allowing init_trdev to allocate the private data will align
-	 * xl_private on a 32 bytes boundary which we need for the rx/tx
-	 * descriptors
-	 */
-
-	dev = alloc_trdev(sizeof(struct xl_private)) ; 
-	if (!dev) { 
-		pci_release_regions(pdev) ; 
-		return -ENOMEM ; 
-	} 
-	xl_priv = netdev_priv(dev);
-
-#if XL_DEBUG  
-	printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n", 
-		pdev, dev, netdev_priv(dev), (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start);
-#endif 
-
-	dev->irq=pdev->irq;
-	dev->base_addr=pci_resource_start(pdev,0) ; 
-	xl_priv->xl_card_name = pci_name(pdev);
-	xl_priv->xl_mmio=ioremap(pci_resource_start(pdev,1), XL_IO_SPACE);
-	xl_priv->pdev = pdev ; 
-		
-	if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
-		xl_priv->pkt_buf_sz = PKT_BUF_SZ ; 
-	else
-		xl_priv->pkt_buf_sz = pkt_buf_sz[card_no] ; 
-
-	dev->mtu = xl_priv->pkt_buf_sz - TR_HLEN ; 
-	xl_priv->xl_ring_speed = ringspeed[card_no] ; 
-	xl_priv->xl_message_level = message_level[card_no] ; 
-	xl_priv->xl_functional_addr[0] = xl_priv->xl_functional_addr[1] = xl_priv->xl_functional_addr[2] = xl_priv->xl_functional_addr[3] = 0 ; 
-	xl_priv->xl_copy_all_options = 0 ; 
-		
-	if((i = xl_init(dev))) {
-		iounmap(xl_priv->xl_mmio) ; 
-		free_netdev(dev) ; 
-		pci_release_regions(pdev) ; 
-		return i ; 
-	}				
-
-	dev->netdev_ops = &xl_netdev_ops;
-	SET_NETDEV_DEV(dev, &pdev->dev);
-
-	pci_set_drvdata(pdev,dev) ; 
-	if ((i = register_netdev(dev))) { 
-		printk(KERN_ERR "3C359, register netdev failed\n") ;  
-		pci_set_drvdata(pdev,NULL) ; 
-		iounmap(xl_priv->xl_mmio) ; 
-		free_netdev(dev) ; 
-		pci_release_regions(pdev) ; 
-		return i ; 
-	}
-   
-	printk(KERN_INFO "3C359: %s registered as: %s\n",xl_priv->xl_card_name,dev->name) ; 
-
-	return 0; 
-}
-
-static int xl_init_firmware(struct xl_private *xl_priv)
-{
-	int err;
-
-	err = request_firmware(&xl_priv->fw, FW_NAME, &xl_priv->pdev->dev);
-	if (err) {
-		printk(KERN_ERR "Failed to load firmware \"%s\"\n", FW_NAME);
-		return err;
-	}
-
-	if (xl_priv->fw->size < 16) {
-		printk(KERN_ERR "Bogus length %zu in \"%s\"\n",
-		       xl_priv->fw->size, FW_NAME);
-		release_firmware(xl_priv->fw);
-		err = -EINVAL;
-	}
-
-	return err;
-}
-
-static int __devinit xl_init(struct net_device *dev) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	int err;
-
-	printk(KERN_INFO "%s\n", version);
-	printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
-		xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
-
-	spin_lock_init(&xl_priv->xl_lock) ; 
-
-	err = xl_init_firmware(xl_priv);
-	if (err == 0)
-		err = xl_hw_reset(dev);
-
-	return err;
-}
-
-
-/* 
- *	Hardware reset.  This needs to be a separate entity as we need to reset the card
- *	when we change the EEProm settings.
- */
-
-static int xl_hw_reset(struct net_device *dev) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 
-	unsigned long t ; 
-	u16 i ; 
-    	u16 result_16 ; 
-	u8 result_8 ;
-	u16 start ; 
-	int j ;
-
-	if (xl_priv->fw == NULL)
-		return -EINVAL;
-
-	/*
-	 *  Reset the card.  If the card has got the microcode on board, we have 
-         *  missed the initialization interrupt, so we must always do this.
-	 */
-
-	writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; 
-
-	/* 
-	 * Must wait for cmdInProgress bit (12) to clear before continuing with
-	 * card configuration.
-	 */
-
-	t=jiffies;
-	while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 
-		schedule();		
-		if (time_after(jiffies, t + 40 * HZ)) {
-			printk(KERN_ERR "%s: 3COM 3C359 Velocity XL  card not responding to global reset.\n", dev->name);
-			return -ENODEV;
-		}
-	}
-
-	/*
-	 *  Enable pmbar by setting bit in CPAttention
-	 */
-
-	writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
-	result_8 = readb(xl_mmio + MMIO_MACDATA) ; 
-	result_8 = result_8 | CPA_PMBARVIS ; 
-	writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(result_8, xl_mmio + MMIO_MACDATA) ; 
-	
-	/*
-	 * Read cpHold bit in pmbar, if cleared we have got Flashrom on board.
- 	 * If not, we need to upload the microcode to the card
-	 */
-
-	writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);  
-
-#if XL_DEBUG
-	printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
-#endif
-
-	if ( readw( (xl_mmio + MMIO_MACDATA))  & PMB_CPHOLD ) { 
-
-		/* Set PmBar, privateMemoryBase bits (8:2) to 0 */
-
-		writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);  
-		result_16 = readw(xl_mmio + MMIO_MACDATA) ; 
-		result_16 = result_16 & ~((0x7F) << 2) ; 
-		writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writew(result_16,xl_mmio + MMIO_MACDATA) ; 
-	
-		/* Set CPAttention, memWrEn bit */
-
-		writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		result_8 = readb(xl_mmio + MMIO_MACDATA) ; 
-		result_8 = result_8 | CPA_MEMWREN  ; 
-		writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(result_8, xl_mmio + MMIO_MACDATA) ; 
-
-		/* 
-		 * Now to write the microcode into the shared ram 
-	 	 * The microcode must finish at position 0xFFFF,
-	 	 * so we must subtract to get the start position for the code
-	 	 *
-		 * Looks strange but ensures compiler only uses
-		 * 16 bit unsigned int
-		 */
-		start = (0xFFFF - (xl_priv->fw->size) + 1) ;
-
-		printk(KERN_INFO "3C359: Uploading Microcode: "); 
-
-		for (i = start, j = 0; j < xl_priv->fw->size; i++, j++) {
-			writel(MEM_BYTE_WRITE | 0XD0000 | i,
-			       xl_mmio + MMIO_MAC_ACCESS_CMD);
-			writeb(xl_priv->fw->data[j], xl_mmio + MMIO_MACDATA);
-			if (j % 1024 == 0)
-				printk(".");
-		}
-		printk("\n") ; 
-
-		for (i = 0; i < 16; i++) {
-			writel((MEM_BYTE_WRITE | 0xDFFF0) + i,
-			       xl_mmio + MMIO_MAC_ACCESS_CMD);
-			writeb(xl_priv->fw->data[xl_priv->fw->size - 16 + i],
-			       xl_mmio + MMIO_MACDATA);
-		}
-
-		/*
-		 * Have to write the start address of the upload to FFF4, but
-                 * the address must be >> 4. You do not want to know how long
-                 * it took me to discover this.
-		 */
-
-		writel(MEM_WORD_WRITE | 0xDFFF4, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writew(start >> 4, xl_mmio + MMIO_MACDATA);
-
-		/* Clear the CPAttention, memWrEn Bit */
-	
-		writel( (IO_BYTE_READ | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		result_8 = readb(xl_mmio + MMIO_MACDATA) ; 
-		result_8 = result_8 & ~CPA_MEMWREN ; 
-		writel( (IO_BYTE_WRITE | CPATTENTION), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(result_8, xl_mmio + MMIO_MACDATA) ; 
-
-		/* Clear the cpHold bit in pmbar */
-
-		writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);  
-		result_16 = readw(xl_mmio + MMIO_MACDATA) ; 
-		result_16 = result_16 & ~PMB_CPHOLD ; 
-		writel( (IO_WORD_WRITE | PMBAR), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writew(result_16,xl_mmio + MMIO_MACDATA) ; 
-
-
-	} /* If microcode upload required */
-
-	/* 
-	 * The card should now go though a self test procedure and get itself ready
-         * to be opened, we must wait for an srb response with the initialization
-         * information. 
-	 */
-
-#if XL_DEBUG
-	printk(KERN_INFO "%s: Microcode uploaded, must wait for the self test to complete\n", dev->name);
-#endif
-
-	writew(SETINDENABLE | 0xFFF, xl_mmio + MMIO_COMMAND) ; 
-
-	t=jiffies;
-	while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) { 
-		schedule();		
-		if (time_after(jiffies, t + 15 * HZ)) {
-			printk(KERN_ERR "3COM 3C359 Velocity XL  card not responding.\n");
-			return -ENODEV; 
-		}
-	}
-
-	/*
-	 * Write the RxBufArea with D000, RxEarlyThresh, TxStartThresh, 
- 	 * DnPriReqThresh, read the tech docs if you want to know what
-	 * values they need to be.
-	 */
-
-	writel(MMIO_WORD_WRITE | RXBUFAREA, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writew(0xD000, xl_mmio + MMIO_MACDATA) ; 
-	
-	writel(MMIO_WORD_WRITE | RXEARLYTHRESH, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writew(0X0020, xl_mmio + MMIO_MACDATA) ; 
-	
-	writew( SETTXSTARTTHRESH | 0x40 , xl_mmio + MMIO_COMMAND) ; 
-
-	writeb(0x04, xl_mmio + MMIO_DNBURSTTHRESH) ; 
-	writeb(0x04, xl_mmio + DNPRIREQTHRESH) ;
-
-	/*
-	 * Read WRBR to provide the location of the srb block, have to use byte reads not word reads. 
-	 * Tech docs have this wrong !!!!
-	 */
-
-	writel(MMIO_BYTE_READ | WRBR, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	xl_priv->srb = readb(xl_mmio + MMIO_MACDATA) << 8 ; 
-	writel( (MMIO_BYTE_READ | WRBR) + 1, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	xl_priv->srb = xl_priv->srb | readb(xl_mmio + MMIO_MACDATA) ;
-
-#if XL_DEBUG
-	writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	if ( readw(xl_mmio + MMIO_MACDATA) & 2) { 
-		printk(KERN_INFO "Default ring speed 4 mbps\n");
-	} else {
-		printk(KERN_INFO "Default ring speed 16 mbps\n");
-	} 
-	printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
-#endif
-
-	return 0;
-}
-
-static int xl_open(struct net_device *dev) 
-{
-	struct xl_private *xl_priv=netdev_priv(dev);
-	u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 
-	u8 i ; 
-	__le16 hwaddr[3] ; /* Should be u8[6] but we get word return values */
-	int open_err ;
-
-	u16 switchsettings, switchsettings_eeprom  ;
- 
-	if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev))
-		return -EAGAIN;
-
-	/* 
-	 * Read the information from the EEPROM that we need.
-	 */
-	
-	hwaddr[0] = cpu_to_le16(xl_ee_read(dev,0x10));
-	hwaddr[1] = cpu_to_le16(xl_ee_read(dev,0x11));
-	hwaddr[2] = cpu_to_le16(xl_ee_read(dev,0x12));
-
-	/* Ring speed */
-
-	switchsettings_eeprom = xl_ee_read(dev,0x08) ;
-	switchsettings = switchsettings_eeprom ;  
-
-	if (xl_priv->xl_ring_speed != 0) { 
-		if (xl_priv->xl_ring_speed == 4)  
-			switchsettings = switchsettings | 0x02 ; 
-		else 
-			switchsettings = switchsettings & ~0x02 ; 
-	}
-
-	/* Only write EEProm if there has been a change */
-	if (switchsettings != switchsettings_eeprom) { 
-		xl_ee_write(dev,0x08,switchsettings) ; 
-		/* Hardware reset after changing EEProm */
-		xl_hw_reset(dev) ; 
-	}
-
-	memcpy(dev->dev_addr,hwaddr,dev->addr_len) ; 
-	
-	open_err = xl_open_hw(dev) ; 
-
-	/* 
-	 * This really needs to be cleaned up with better error reporting.
-	 */
-
-	if (open_err != 0) { /* Something went wrong with the open command */
-		if (open_err & 0x07) { /* Wrong speed, retry at different speed */
-			printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
-			switchsettings = switchsettings ^ 2 ; 
-			xl_ee_write(dev,0x08,switchsettings) ; 
-			xl_hw_reset(dev) ; 
-			open_err = xl_open_hw(dev) ; 
-			if (open_err != 0) { 
-				printk(KERN_WARNING "%s: Open error returned a second time, we're bombing out now\n", dev->name); 
-				free_irq(dev->irq,dev) ; 						
-				return -ENODEV ;
-			}  
-		} else { 
-			printk(KERN_WARNING "%s: Open Error = %04x\n", dev->name, open_err) ; 
-			free_irq(dev->irq,dev) ; 
-			return -ENODEV ; 
-		}
-	}
-
-	/*
-	 * Now to set up the Rx and Tx buffer structures
-	 */
-	/* These MUST be on 8 byte boundaries */
-	xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL);
-	if (xl_priv->xl_tx_ring == NULL) {
-		free_irq(dev->irq,dev);
-		return -ENOMEM;
-	}
-	xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL);
-	if (xl_priv->xl_rx_ring == NULL) {
-		free_irq(dev->irq,dev);
-		kfree(xl_priv->xl_tx_ring);
-		return -ENOMEM;
-	}
-
-	 /* Setup Rx Ring */
-	 for (i=0 ; i < XL_RX_RING_SIZE ; i++) { 
-		struct sk_buff *skb ; 
-
-		skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; 
-		if (skb==NULL) 
-			break ; 
-
-		skb->dev = dev ; 
-		xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
-		xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
-		xl_priv->rx_ring_skb[i] = skb ; 	
-	}
-
-	if (i==0) { 
-		printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
-		free_irq(dev->irq,dev) ; 
-		kfree(xl_priv->xl_tx_ring);
-		kfree(xl_priv->xl_rx_ring);
-		return -EIO ; 
-	} 
-
-	xl_priv->rx_ring_no = i ; 
-	xl_priv->rx_ring_tail = 0 ; 
-	xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ; 
-	for (i=0;i<(xl_priv->rx_ring_no-1);i++) { 
-		xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)));
-	} 
-	xl_priv->xl_rx_ring[i].upnextptr = 0 ; 
-
-	writel(xl_priv->rx_ring_dma_addr, xl_mmio + MMIO_UPLISTPTR) ; 
-	
-	/* Setup Tx Ring */
-	
-	xl_priv->tx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_tx_ring, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE,PCI_DMA_TODEVICE) ; 
-	
-	xl_priv->tx_ring_head = 1 ; 
-	xl_priv->tx_ring_tail = 255 ; /* Special marker for first packet */
-	xl_priv->free_ring_entries = XL_TX_RING_SIZE ; 
-
-	/*
- 	 * Setup the first dummy DPD entry for polling to start working.
-	 */
-
-	xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY;
-	xl_priv->xl_tx_ring[0].buffer = 0 ; 
-	xl_priv->xl_tx_ring[0].buffer_length = 0 ; 
-	xl_priv->xl_tx_ring[0].dnnextptr = 0 ; 
-
-	writel(xl_priv->tx_ring_dma_addr, xl_mmio + MMIO_DNLISTPTR) ; 
-	writel(DNUNSTALL, xl_mmio + MMIO_COMMAND) ; 
-	writel(UPUNSTALL, xl_mmio + MMIO_COMMAND) ; 
-	writel(DNENABLE, xl_mmio + MMIO_COMMAND) ; 
-	writeb(0x40, xl_mmio + MMIO_DNPOLL) ;	
-
-	/*
-	 * Enable interrupts on the card
-	 */
-
-	writel(SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; 
-	writel(SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; 
-
-	netif_start_queue(dev) ; 	
-	return 0;
-	
-}	
-
-static int xl_open_hw(struct net_device *dev) 
-{ 
-	struct xl_private *xl_priv=netdev_priv(dev);
-	u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 
-	u16 vsoff ;
-	char ver_str[33];  
-	int open_err ; 
-	int i ; 
-	unsigned long t ; 
-
-	/*
-	 * Okay, let's build up the Open.NIC srb command
-	 *
-	 */
-		
-	writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(OPEN_NIC, xl_mmio + MMIO_MACDATA) ; 
-	
-	/*
-	 * Use this as a test byte, if it comes back with the same value, the command didn't work
-	 */
-
-	writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb)+ 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(0xff,xl_mmio + MMIO_MACDATA) ; 
-
-	/* Open options */
-	writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(0x00, xl_mmio + MMIO_MACDATA) ; 
-	writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + 9, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(0x00, xl_mmio + MMIO_MACDATA) ; 
-
-	/* 
-	 * Node address, be careful here, the docs say you can just put zeros here and it will use
-	 * the hardware address, it doesn't, you must include the node address in the open command.
-	 */
-
-	if (xl_priv->xl_laa[0]) {  /* If using a LAA address */
-		for (i=10;i<16;i++) { 
-			writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-			writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ;
-		}
-		memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ; 
-	} else { /* Regular hardware address */ 
-		for (i=10;i<16;i++) { 
-			writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-			writeb(dev->dev_addr[i-10], xl_mmio + MMIO_MACDATA) ; 
-		}
-	}
-
-	/* Default everything else to 0 */
-	for (i = 16; i < 34; i++) {
-		writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(0x00,xl_mmio + MMIO_MACDATA) ; 
-	}
-	
-	/*
-	 *  Set the csrb bit in the MISR register
-	 */
-
-	xl_wait_misr_flags(dev) ; 
-	writel(MEM_BYTE_WRITE | MF_CSRB, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(0xFF, xl_mmio + MMIO_MACDATA) ; 
-	writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(MISR_CSRB , xl_mmio + MMIO_MACDATA) ; 
-
-	/*
-	 * Now wait for the command to run
-	 */
-
-	t=jiffies;
-	while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { 
-		schedule();		
-		if (time_after(jiffies, t + 40 * HZ)) {
-			printk(KERN_ERR "3COM 3C359 Velocity XL  card not responding.\n");
-			break ; 
-		}
-	}
-
-	/*
-	 * Let's interpret the open response
-	 */
-
-	writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb)+2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	if (readb(xl_mmio + MMIO_MACDATA)!=0) {
-		open_err = readb(xl_mmio + MMIO_MACDATA) << 8 ; 
-		writel( (MEM_BYTE_READ | 0xD0000 | xl_priv->srb) + 7, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		open_err |= readb(xl_mmio + MMIO_MACDATA) ; 
-		return open_err ; 
-	} else { 
-		writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
-		printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ; 
-		printk("ASB: %04x",xl_priv->asb ) ; 
-		writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		printk(", SRB: %04x",swab16(readw(xl_mmio + MMIO_MACDATA)) ) ;
- 
-		writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
-		printk(", ARB: %04x\n",xl_priv->arb );
-		writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
-
-		/* 
-		 * Interesting, sending the individual characters directly to printk was causing klogd to use
-		 * use 100% of processor time, so we build up the string and print that instead.
-	   	 */
-
-		for (i=0;i<0x20;i++) { 
-			writel( (MEM_BYTE_READ | 0xD0000 | vsoff) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-			ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ; 
-		}
-		ver_str[i] = '\0' ; 
-		printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
-	} 	
-	
-	/*
-	 * Issue the AckInterrupt
-	 */
-	writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 
-
-	return 0 ; 
-}
-
-/*
- *	There are two ways of implementing rx on the 359 NIC, either
- * 	interrupt driven or polling.  We are going to uses interrupts,
- *	it is the easier way of doing things.
- *	
- *	The Rx works with a ring of Rx descriptors.  At initialise time the ring
- *	entries point to the next entry except for the last entry in the ring 
- *	which points to 0.  The card is programmed with the location of the first
- *	available descriptor and keeps reading the next_ptr until next_ptr is set
- *	to 0.  Hopefully with a ring size of 16 the card will never get to read a next_ptr
- *	of 0.  As the Rx interrupt is received we copy the frame up to the protocol layers
- *	and then point the end of the ring to our current position and point our current
- *	position to 0, therefore making the current position the last position on the ring.
- *	The last position on the ring therefore loops continually loops around the rx ring.
- *	
- *	rx_ring_tail is the position on the ring to process next. (Think of a snake, the head 
- *	expands as the card adds new packets and we go around eating the tail processing the
- *	packets.)
- *
- *	Undoubtably it could be streamlined and improved upon, but at the moment it works 
- *	and the fast path through the routine is fine. 
- *	
- *	adv_rx_ring could be inlined to increase performance, but its called a *lot* of times
- *	in xl_rx so would increase the size of the function significantly. 
- */
-
-static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */ 
-{
-	struct xl_private *xl_priv=netdev_priv(dev);
-	int n = xl_priv->rx_ring_tail;
-	int prev_ring_loc;
-
-	prev_ring_loc = (n + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1);
-	xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n));
-	xl_priv->xl_rx_ring[n].framestatus = 0;
-	xl_priv->xl_rx_ring[n].upnextptr = 0;
-	xl_priv->rx_ring_tail++;
-	xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1);
-}
-
-static void xl_rx(struct net_device *dev)
-{
-	struct xl_private *xl_priv=netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	struct sk_buff *skb, *skb2 ; 
-	int frame_length = 0, copy_len = 0  ; 	
-	int temp_ring_loc ;  
-
-	/*
-	 * Receive the next frame, loop around the ring until all frames
-  	 * have been received.
-	 */ 	 
-	
-	while (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & (RXUPDCOMPLETE | RXUPDFULL) ) { /* Descriptor to process */
-
-		if (xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & RXUPDFULL ) { /* UpdFull, Multiple Descriptors used for the frame */
-
-			/* 
-			 * This is a pain, you need to go through all the descriptors until the last one 
-			 * for this frame to find the framelength
-			 */
-
-			temp_ring_loc = xl_priv->rx_ring_tail ; 
-
-			while (xl_priv->xl_rx_ring[temp_ring_loc].framestatus & RXUPDFULL ) {
-				temp_ring_loc++ ; 
-				temp_ring_loc &= (XL_RX_RING_SIZE-1) ; 
-			}
-
-			frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF;
-
-			skb = dev_alloc_skb(frame_length) ;
- 
-			if (skb==NULL) { /* No memory for frame, still need to roll forward the rx ring */
-				printk(KERN_WARNING "%s: dev_alloc_skb failed - multi buffer !\n", dev->name) ; 
-				while (xl_priv->rx_ring_tail != temp_ring_loc)  
-					adv_rx_ring(dev) ; 
-				
-				adv_rx_ring(dev) ; /* One more time just for luck :) */ 
-				dev->stats.rx_dropped++ ; 
-
-				writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 
-				return ; 				
-			}
-	
-			while (xl_priv->rx_ring_tail != temp_ring_loc) { 
-				copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF;
-				frame_length -= copy_len ;  
-				pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
-				skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
-							  skb_put(skb, copy_len),
-							  copy_len);
-				pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
-				adv_rx_ring(dev) ; 
-			} 
-
-			/* Now we have found the last fragment */
-			pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
-			skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
-				      skb_put(skb,copy_len), frame_length);
-/*			memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
-			pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
-			adv_rx_ring(dev) ; 
-			skb->protocol = tr_type_trans(skb,dev) ; 
-			netif_rx(skb) ; 
-
-		} else { /* Single Descriptor Used, simply swap buffers over, fast path  */
-
-			frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF;
-			
-			skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; 
-
-			if (skb==NULL) { /* Still need to fix the rx ring */
-				printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
-				adv_rx_ring(dev) ; 
-				dev->stats.rx_dropped++ ; 
-				writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 
-				return ; 
-			}
-
-			skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; 
-			pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
-			skb_put(skb2, frame_length) ; 
-			skb2->protocol = tr_type_trans(skb2,dev) ; 
-
-			xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ; 	
-			xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
-			xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
-			adv_rx_ring(dev) ; 
-			dev->stats.rx_packets++ ; 
-			dev->stats.rx_bytes += frame_length ; 	
-
-			netif_rx(skb2) ; 		
-		 } /* if multiple buffers */
-	} /* while packet to do */
-
-	/* Clear the updComplete interrupt */
-	writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 
-	return ; 	
-}
-
-/*
- * This is ruthless, it doesn't care what state the card is in it will 
- * completely reset the adapter.
- */
-
-static void xl_reset(struct net_device *dev) 
-{
-	struct xl_private *xl_priv=netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	unsigned long t; 
-
-	writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; 
-
-	/* 
-	 * Must wait for cmdInProgress bit (12) to clear before continuing with
-	 * card configuration.
-	 */
-
-	t=jiffies;
-	while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 
-		if (time_after(jiffies, t + 40 * HZ)) {
-			printk(KERN_ERR "3COM 3C359 Velocity XL  card not responding.\n");
-			break ; 
-		}
-	}
-	
-}
-
-static void xl_freemem(struct net_device *dev) 
-{
-	struct xl_private *xl_priv=netdev_priv(dev);
-	int i ; 
-
-	for (i=0;i<XL_RX_RING_SIZE;i++) {
-		dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ; 
-		pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
-		xl_priv->rx_ring_tail++ ; 
-		xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1; 
-	} 
-
-	/* unmap ring */
-	pci_unmap_single(xl_priv->pdev,xl_priv->rx_ring_dma_addr, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_FROMDEVICE) ; 
-	
-	pci_unmap_single(xl_priv->pdev,xl_priv->tx_ring_dma_addr, sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE, PCI_DMA_TODEVICE) ; 
-
-	kfree(xl_priv->xl_rx_ring) ; 
-	kfree(xl_priv->xl_tx_ring) ; 
-
-	return  ; 
-}
-
-static irqreturn_t xl_interrupt(int irq, void *dev_id) 
-{
-	struct net_device *dev = (struct net_device *)dev_id;
-	struct xl_private *xl_priv =netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	u16 intstatus, macstatus  ;
-
-	intstatus = readw(xl_mmio + MMIO_INTSTATUS) ;  
-
-	if (!(intstatus & 1)) /* We didn't generate the interrupt */
-		return IRQ_NONE;
-
-	spin_lock(&xl_priv->xl_lock) ; 
-
-	/*
-	 * Process the interrupt
-	 */
-	/*
-	 * Something fishy going on here, we shouldn't get 0001 ints, not fatal though.
-	 */
-	if (intstatus == 0x0001) {  
-		writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
-		printk(KERN_INFO "%s: 00001 int received\n",dev->name);
-	} else {  
-		if (intstatus &	(HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) { 
-			
-			/* 
-			 * Host Error.
-			 * It may be possible to recover from this, but usually it means something
-			 * is seriously fubar, so we just close the adapter.
-			 */
-
-			if (intstatus & HOSTERRINT) {
-				printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
-				writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
-				printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
-				netif_stop_queue(dev) ;
-				xl_freemem(dev) ; 
-				free_irq(dev->irq,dev); 	
-				xl_reset(dev) ; 
-				writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 
-				spin_unlock(&xl_priv->xl_lock) ; 
-				return IRQ_HANDLED;
-			} /* Host Error */
-
-			if (intstatus & SRBRINT ) {  /* Srbc interrupt */
-				writel(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
-				if (xl_priv->srb_queued)
-					xl_srb_bh(dev) ; 
-			} /* SRBR Interrupt */
-
-			if (intstatus & TXUNDERRUN) { /* Issue DnReset command */
-				writel(DNRESET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-				while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { /* Wait for command to run */
-					/* !!! FIX-ME !!!! 
-					Must put a timeout check here ! */
-					/* Empty Loop */
-				} 
-				printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
-				writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 
-			} /* TxUnderRun */
-	
-			if (intstatus & ARBCINT ) { /* Arbc interrupt */
-				xl_arb_cmd(dev) ; 
-			} /* Arbc */
-
-			if (intstatus & ASBFINT) { 
-				if (xl_priv->asb_queued == 1) {
-					xl_asb_cmd(dev) ; 
-				} else if (xl_priv->asb_queued == 2) {
-					xl_asb_bh(dev) ; 
-				} else { 
-					writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ; 
-				}  
-			} /* Asbf */
-
-			if (intstatus & UPCOMPINT ) /* UpComplete */
-				xl_rx(dev) ; 
-
-			if (intstatus & DNCOMPINT )  /* DnComplete */
-				xl_dn_comp(dev) ; 
-
-			if (intstatus & HARDERRINT ) { /* Hardware error */
-				writel(MMIO_WORD_READ | MACSTATUS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-				macstatus = readw(xl_mmio + MMIO_MACDATA) ; 
-				printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
-				if (macstatus & (1<<14)) 
-					printk(KERN_WARNING "tchk error: Unrecoverable error\n");
-				if (macstatus & (1<<3))
-					printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
-				if (macstatus & (1<<2))
-					printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
-				printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ; 
-				printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
-				netif_stop_queue(dev) ;
-				xl_freemem(dev) ; 
-				free_irq(dev->irq,dev); 
-				unregister_netdev(dev) ; 
-				free_netdev(dev) ;  
-				xl_reset(dev) ; 
-				writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 
-				spin_unlock(&xl_priv->xl_lock) ; 
-				return IRQ_HANDLED;
-			}
-		} else { 
-			printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
-			writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 	
-		}
-	} 
-
-	/* Turn interrupts back on */
-
-	writel( SETINDENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; 
-	writel( SETINTENABLE | INT_MASK, xl_mmio + MMIO_COMMAND) ; 
-
-	spin_unlock(&xl_priv->xl_lock) ;
-	return IRQ_HANDLED;
-}	
-
-/*
- *	Tx - Polling configuration
- */
-	
-static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct xl_private *xl_priv=netdev_priv(dev);
-	struct xl_tx_desc *txd ; 
-	int tx_head, tx_tail, tx_prev ; 
-	unsigned long flags ; 	
-
-	spin_lock_irqsave(&xl_priv->xl_lock,flags) ; 
-
-	netif_stop_queue(dev) ; 
-
-	if (xl_priv->free_ring_entries > 1 ) { 	
-		/*
-		 * Set up the descriptor for the packet 
-		 */
-		tx_head = xl_priv->tx_ring_head ; 
-		tx_tail = xl_priv->tx_ring_tail ; 
-
-		txd = &(xl_priv->xl_tx_ring[tx_head]) ; 
-		txd->dnnextptr = 0 ; 
-		txd->framestartheader = cpu_to_le32(skb->len) | TXDNINDICATE;
-		txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
-		txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST;
-		xl_priv->tx_ring_skb[tx_head] = skb ; 
-		dev->stats.tx_packets++ ; 
-		dev->stats.tx_bytes += skb->len ;
-
-		/* 
-		 * Set the nextptr of the previous descriptor equal to this descriptor, add XL_TX_RING_SIZE -1 
-		 * to ensure no negative numbers in unsigned locations.
-		 */ 
-	
-		tx_prev = (xl_priv->tx_ring_head + XL_TX_RING_SIZE - 1) & (XL_TX_RING_SIZE - 1) ; 
-
-		xl_priv->tx_ring_head++ ; 
-		xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ;
-		xl_priv->free_ring_entries-- ; 
-
-		xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head));
-
-		/* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */
-		/* readl(xl_mmio + MMIO_DNLISTPTR) ; */
-
-		netif_wake_queue(dev) ; 
-
-		spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ; 
- 
-		return NETDEV_TX_OK;
-	} else {
-		spin_unlock_irqrestore(&xl_priv->xl_lock,flags) ; 
-		return NETDEV_TX_BUSY;
-	}
-
-}
-	
-/* 
- * The NIC has told us that a packet has been downloaded onto the card, we must
- * find out which packet it has done, clear the skb and information for the packet
- * then advance around the ring for all transmitted packets
- */
-
-static void xl_dn_comp(struct net_device *dev) 
-{
-	struct xl_private *xl_priv=netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	struct xl_tx_desc *txd ; 
-
-
-	if (xl_priv->tx_ring_tail == 255) {/* First time */
-		xl_priv->xl_tx_ring[0].framestartheader = 0 ; 
-		xl_priv->xl_tx_ring[0].dnnextptr = 0 ;  
-		xl_priv->tx_ring_tail = 1 ; 
-	}
-
-	while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) { 
-		txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ;
-		pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE);
-		txd->framestartheader = 0 ; 
-		txd->buffer = cpu_to_le32(0xdeadbeef);
-		txd->buffer_length  = 0 ;  
-		dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ;
-		xl_priv->tx_ring_tail++ ; 
-		xl_priv->tx_ring_tail &= (XL_TX_RING_SIZE - 1) ; 
-		xl_priv->free_ring_entries++ ; 
-	}
-
-	netif_wake_queue(dev) ; 
-
-	writel(ACK_INTERRUPT | DNCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 
-}
-
-/*
- * Close the adapter properly.
- * This srb reply cannot be handled from interrupt context as we have
- * to free the interrupt from the driver. 
- */
-
-static int xl_close(struct net_device *dev) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	unsigned long t ; 
-
-	netif_stop_queue(dev) ; 
-
-	/*
-	 * Close the adapter, need to stall the rx and tx queues.
-	 */
-
-    	writew(DNSTALL, xl_mmio + MMIO_COMMAND) ; 
-	t=jiffies;
-	while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 
-		schedule();		
-		if (time_after(jiffies, t + 10 * HZ)) {
-			printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name);
-			break ; 
-		}
-	}
-    	writew(DNDISABLE, xl_mmio + MMIO_COMMAND) ; 
-	t=jiffies;
-	while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 
-		schedule();		
-		if (time_after(jiffies, t + 10 * HZ)) {
-			printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name);
-			break ;
-		}
-	}
-    	writew(UPSTALL, xl_mmio + MMIO_COMMAND) ; 
-	t=jiffies;
-	while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 
-		schedule();		
-		if (time_after(jiffies, t + 10 * HZ)) {
-			printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name);
-			break ; 
-		}
-	}
-
-	/* Turn off interrupts, we will still get the indication though
- 	 * so we can trap it
-	 */
-
-	writel(SETINTENABLE, xl_mmio + MMIO_COMMAND) ; 
-
-	xl_srb_cmd(dev,CLOSE_NIC) ; 
-
-	t=jiffies;
-	while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { 
-		schedule();		
-		if (time_after(jiffies, t + 10 * HZ)) {
-			printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name);
-			break ; 
-		}
-	}
-	/* Read the srb response from the adapter */
-
-	writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
-	if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) { 
-		printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
-	} else { 
-		writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
-		if (readb(xl_mmio + MMIO_MACDATA)==0) { 
-			printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
-			writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 
-
-			xl_freemem(dev) ; 
-			free_irq(dev->irq,dev) ; 
-		} else { 
-			printk(KERN_INFO "%s: Close nic command returned error code %02x\n",dev->name, readb(xl_mmio + MMIO_MACDATA)) ;
-		} 
-	}
-
-	/* Reset the upload and download logic */
- 
-    	writew(UPRESET, xl_mmio + MMIO_COMMAND) ; 
-	t=jiffies;
-	while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 
-		schedule();		
-		if (time_after(jiffies, t + 10 * HZ)) {
-			printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name);
-			break ; 
-		}
-	}
-    	writew(DNRESET, xl_mmio + MMIO_COMMAND) ; 
-	t=jiffies;
-	while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 
-		schedule();		
-		if (time_after(jiffies, t + 10 * HZ)) {
-			printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name);
-			break ; 
-		}
-	}
-	xl_hw_reset(dev) ; 
-	return 0 ;
-}
-
-static void xl_set_rx_mode(struct net_device *dev) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	struct netdev_hw_addr *ha;
-	unsigned char dev_mc_address[4] ; 
-	u16 options ; 
-
-	if (dev->flags & IFF_PROMISC)
-		options = 0x0004 ; 
-	else
-		options = 0x0000 ; 
-
-	if (options ^ xl_priv->xl_copy_all_options) { /* Changed, must send command */
-		xl_priv->xl_copy_all_options = options ; 
-		xl_srb_cmd(dev, SET_RECEIVE_MODE) ;
-		return ;  
-	}
-
-	dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
-
-	netdev_for_each_mc_addr(ha, dev) {
-		dev_mc_address[0] |= ha->addr[2];
-		dev_mc_address[1] |= ha->addr[3];
-		dev_mc_address[2] |= ha->addr[4];
-		dev_mc_address[3] |= ha->addr[5];
-        }
-
-	if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
-		memcpy(xl_priv->xl_functional_addr, dev_mc_address,4) ; 
-		xl_srb_cmd(dev, SET_FUNC_ADDRESS) ; 
-	}
-	return ; 
-}
-
-
-/*
- *	We issued an srb command and now we must read
- *	the response from the completed command.
- */
-
-static void xl_srb_bh(struct net_device *dev) 
-{ 
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	u8 srb_cmd, ret_code ; 
-	int i ; 
-
-	writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
-	srb_cmd = readb(xl_mmio + MMIO_MACDATA) ; 
-	writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
-	ret_code = readb(xl_mmio + MMIO_MACDATA) ; 
-
-	/* Ret_code is standard across all commands */
-
-	switch (ret_code) { 
-	case 1:
-		printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ; 
-		break ; 
-	case 4:
-		printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
-		break ;
-	
-	case 6:
-		printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
-		break ;
-
-	case 0: /* Successful command execution */ 
-		switch (srb_cmd) { 
-		case READ_LOG: /* Returns 14 bytes of data from the NIC */
-			if(xl_priv->xl_message_level)
-				printk(KERN_INFO "%s: READ.LOG 14 bytes of data ",dev->name) ; 
-			/* 
-			 * We still have to read the log even if message_level = 0 and we don't want
-			 * to see it
-			 */
-			for (i=0;i<14;i++) { 
-				writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-				if(xl_priv->xl_message_level) 
-					printk("%02x:",readb(xl_mmio + MMIO_MACDATA)) ; 	
-			} 
-			printk("\n") ; 
-			break ; 
-		case SET_FUNC_ADDRESS:
-			if(xl_priv->xl_message_level) 
-				printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
-			break ; 
-		case CLOSE_NIC:
-			if(xl_priv->xl_message_level)
-				printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
-			break ; 
-		case SET_MULTICAST_MODE:
-			if(xl_priv->xl_message_level)
-				printk(KERN_INFO "%s: Multicast options successfully changed\n",dev->name) ; 
-			break ;
-		case SET_RECEIVE_MODE:
-			if(xl_priv->xl_message_level) {  
-				if (xl_priv->xl_copy_all_options == 0x0004) 
-					printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
-				else
-					printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
-			}
-			break ; 
- 
-		} /* switch */
-		break ; 
-	} /* switch */
-	return ; 	
-} 
-
-static int xl_set_mac_address (struct net_device *dev, void *addr) 
-{
-	struct sockaddr *saddr = addr ; 
-	struct xl_private *xl_priv = netdev_priv(dev);
-
-	if (netif_running(dev)) { 
-		printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; 
-		return -EIO ; 
-	}
-
-	memcpy(xl_priv->xl_laa, saddr->sa_data,dev->addr_len) ; 
-	
-	if (xl_priv->xl_message_level) { 
- 		printk(KERN_INFO "%s: MAC/LAA Set to  = %x.%x.%x.%x.%x.%x\n",dev->name, xl_priv->xl_laa[0],
-		xl_priv->xl_laa[1], xl_priv->xl_laa[2],
-		xl_priv->xl_laa[3], xl_priv->xl_laa[4],
-		xl_priv->xl_laa[5]);
-	} 
-
-	return 0 ; 
-}
-
-static void xl_arb_cmd(struct net_device *dev)
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	u8 arb_cmd ; 
-	u16 lan_status, lan_status_diff ; 
-
-	writel( ( MEM_BYTE_READ | 0xD0000 | xl_priv->arb), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	arb_cmd = readb(xl_mmio + MMIO_MACDATA) ; 
-	
-	if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */
-		writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
-		 
-		printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, swab16(readw(xl_mmio + MMIO_MACDATA) )) ;
-
-		lan_status = swab16(readw(xl_mmio + MMIO_MACDATA));
-	
-		/* Acknowledge interrupt, this tells nic we are done with the arb */
-		writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 
-			
-		lan_status_diff = xl_priv->xl_lan_status ^ lan_status ; 
-
-		if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) { 
-			if (lan_status_diff & LSC_LWF) 
-				printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
-			if (lan_status_diff & LSC_ARW) 
-				printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
-			if (lan_status_diff & LSC_FPE)
-				printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
-			if (lan_status_diff & LSC_RR) 
-				printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
-		
-			/* Adapter has been closed by the hardware */
-
-			netif_stop_queue(dev);
-			xl_freemem(dev) ; 
-			free_irq(dev->irq,dev);
-			
-			printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
-		} /* If serious error */
-		
-		if (xl_priv->xl_message_level) { 
-			if (lan_status_diff & LSC_SIG_LOSS) 
-					printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
-			if (lan_status_diff & LSC_HARD_ERR)
-					printk(KERN_INFO "%s: Beaconing\n",dev->name);
-			if (lan_status_diff & LSC_SOFT_ERR)
-					printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
-			if (lan_status_diff & LSC_TRAN_BCN) 
-					printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
-			if (lan_status_diff & LSC_SS) 
-					printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
-			if (lan_status_diff & LSC_RING_REC)
-					printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
-			if (lan_status_diff & LSC_FDX_MODE)
-					printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
-		} 	
-		
-		if (lan_status_diff & LSC_CO) { 
-				if (xl_priv->xl_message_level) 
-					printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
-				/* Issue READ.LOG command */
-				xl_srb_cmd(dev, READ_LOG) ; 	
-		}
-
-		/* There is no command in the tech docs to issue the read_sr_counters */
-		if (lan_status_diff & LSC_SR_CO) { 
-			if (xl_priv->xl_message_level)
-				printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
-		}
-
-		xl_priv->xl_lan_status = lan_status ; 
-	
-	}  /* Lan.change.status */
-	else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
-#if XL_DEBUG
-		printk(KERN_INFO "Received.Data\n");
-#endif 		
-		writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
-		xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
-		
-		/* Now we are going to be really basic here and not do anything
-		 * with the data at all. The tech docs do not give me enough
-		 * information to calculate the buffers properly so we're
-		 * just going to tell the nic that we've dealt with the frame
-		 * anyway.
-		 */
-
-		/* Acknowledge interrupt, this tells nic we are done with the arb */
-		writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 
-
-		/* Is the ASB free ? */ 	
-			
-		xl_priv->asb_queued = 0 ; 			
-		writel( ((MEM_BYTE_READ | 0xD0000 | xl_priv->asb) + 2), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
-		if (readb(xl_mmio + MMIO_MACDATA) != 0xff) { 
-			xl_priv->asb_queued = 1 ;
-
-			xl_wait_misr_flags(dev) ;  
-
-			writel(MEM_BYTE_WRITE | MF_ASBFR, xl_mmio + MMIO_MAC_ACCESS_CMD); 
-			writeb(0xff, xl_mmio + MMIO_MACDATA) ;
-			writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-			writeb(MISR_ASBFR, xl_mmio + MMIO_MACDATA) ; 
-			return ; 	
-			/* Drop out and wait for the bottom half to be run */
-		}
-	
-		xl_asb_cmd(dev) ; 
-		
-	} else {
-		printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
-	}
-
-	/* Acknowledge the arb interrupt */
-
-	writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 
-
-	return ; 
-}
-
-
-/*
- *	There is only one asb command, but we can get called from different
- *	places.
- */
-
-static void xl_asb_cmd(struct net_device *dev)
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-
-	if (xl_priv->asb_queued == 1) 
-		writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ; 
-		
-	writel(MEM_BYTE_WRITE | 0xd0000 | xl_priv->asb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(0x81, xl_mmio + MMIO_MACDATA) ; 
-
-	writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ;
-
-	xl_wait_misr_flags(dev) ; 	
-
-	writel(MEM_BYTE_WRITE | MF_RASB, xl_mmio + MMIO_MAC_ACCESS_CMD); 
-	writeb(0xff, xl_mmio + MMIO_MACDATA) ;
-
-	writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(MISR_RASB, xl_mmio + MMIO_MACDATA) ; 
-
-	xl_priv->asb_queued = 2 ; 
-
-	return ; 
-}
-
-/*
- * 	This will only get called if there was an error
- *	from the asb cmd.
- */
-static void xl_asb_bh(struct net_device *dev) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	u8 ret_code ; 
-
-	writel(MMIO_BYTE_READ | 0xd0000 | xl_priv->asb | 2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	ret_code = readb(xl_mmio + MMIO_MACDATA) ; 
-	switch (ret_code) { 
-		case 0x01:
-			printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
-			break ;
-		case 0x26:
-			printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
-			break ; 
-		case 0x40:
-			printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
-			break ;  
-	}
-	xl_priv->asb_queued = 0 ; 
-	writel(ACK_INTERRUPT | LATCH_ACK | ASBFACK, xl_mmio + MMIO_COMMAND) ;
-	return ;  
-}
-
-/* 	
- *	Issue srb commands to the nic 
- */
-
-static void xl_srb_cmd(struct net_device *dev, int srb_cmd) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-
-	switch (srb_cmd) { 
-	case READ_LOG:
-		writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(READ_LOG, xl_mmio + MMIO_MACDATA) ; 
-		break; 
-
-	case CLOSE_NIC:
-		writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(CLOSE_NIC, xl_mmio + MMIO_MACDATA) ; 
-		break ;
-
-	case SET_RECEIVE_MODE:
-		writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(SET_RECEIVE_MODE, xl_mmio + MMIO_MACDATA) ; 
-		writel(MEM_WORD_WRITE | 0xD0000 | xl_priv->srb | 4, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writew(xl_priv->xl_copy_all_options, xl_mmio + MMIO_MACDATA) ; 
-		break ;
-
-	case SET_FUNC_ADDRESS:
-		writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(SET_FUNC_ADDRESS, xl_mmio + MMIO_MACDATA) ; 
-		writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 6 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(xl_priv->xl_functional_addr[0], xl_mmio + MMIO_MACDATA) ; 
-		writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 7 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(xl_priv->xl_functional_addr[1], xl_mmio + MMIO_MACDATA) ; 
-		writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 8 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(xl_priv->xl_functional_addr[2], xl_mmio + MMIO_MACDATA) ; 
-		writel(MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb | 9 , xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-		writeb(xl_priv->xl_functional_addr[3], xl_mmio + MMIO_MACDATA) ;
-		break ;  
-	} /* switch */
-
-
-	xl_wait_misr_flags(dev)  ; 
-
-	/* Write 0xff to the CSRB flag */
-	writel(MEM_BYTE_WRITE | MF_CSRB , xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(0xFF, xl_mmio + MMIO_MACDATA) ; 
-	/* Set csrb bit in MISR register to process command */
-	writel(MMIO_BYTE_WRITE | MISR_SET, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(MISR_CSRB, xl_mmio + MMIO_MACDATA) ; 
-	xl_priv->srb_queued = 1 ; 
-
-	return ; 
-}
-
-/*
- * This is nasty, to use the MISR command you have to wait for 6 memory locations
- * to be zero. This is the way the driver does on other OS'es so we should be ok with 
- * the empty loop.
- */
-
-static void xl_wait_misr_flags(struct net_device *dev) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u8 __iomem * xl_mmio = xl_priv->xl_mmio ; 
-	
-	int i  ; 
-	
-	writel(MMIO_BYTE_READ | MISR_RW, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	if (readb(xl_mmio + MMIO_MACDATA) != 0) {  /* Misr not clear */
-		for (i=0; i<6; i++) { 
-			writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-			while (readb(xl_mmio + MMIO_MACDATA) != 0) {
-				;	/* Empty Loop */
-			}
-		} 
-	}
-
-	writel(MMIO_BYTE_WRITE | MISR_AND, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 
-	writeb(0x80, xl_mmio + MMIO_MACDATA) ; 
-
-	return ; 
-} 
-
-/*
- *	Change mtu size, this should work the same as olympic
- */
-
-static int xl_change_mtu(struct net_device *dev, int mtu) 
-{
-	struct xl_private *xl_priv = netdev_priv(dev);
-	u16 max_mtu ; 
-
-	if (xl_priv->xl_ring_speed == 4)
-		max_mtu = 4500 ; 
-	else
-		max_mtu = 18000 ; 
-	
-	if (mtu > max_mtu)
-		return -EINVAL ; 
-	if (mtu < 100) 
-		return -EINVAL ; 
-
-	dev->mtu = mtu ; 
-	xl_priv->pkt_buf_sz = mtu + TR_HLEN ; 
-
-	return 0 ; 
-}
-
-static void __devexit xl_remove_one (struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct xl_private *xl_priv=netdev_priv(dev);
-	
-	release_firmware(xl_priv->fw);
-	unregister_netdev(dev);
-	iounmap(xl_priv->xl_mmio) ; 
-	pci_release_regions(pdev) ; 
-	pci_set_drvdata(pdev,NULL) ; 
-	free_netdev(dev);
-	return ; 
-}
-
-static struct pci_driver xl_3c359_driver = {
-	.name		= "3c359",
-	.id_table	= xl_pci_tbl,
-	.probe		= xl_probe,
-	.remove		= __devexit_p(xl_remove_one),
-};
-
-static int __init xl_pci_init (void)
-{
-	return pci_register_driver(&xl_3c359_driver);
-}
-
-
-static void __exit xl_pci_cleanup (void)
-{
-	pci_unregister_driver (&xl_3c359_driver);
-}
-
-module_init(xl_pci_init);
-module_exit(xl_pci_cleanup);
-
-MODULE_LICENSE("GPL") ; 
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
deleted file mode 100644
index bcb1a6b..0000000
--- a/drivers/net/tokenring/3c359.h
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- *  3c359.h (c) 2000 Mike Phillips (mikep@linuxtr.net) All Rights Reserved
- *
- *  Linux driver for 3Com 3C359 Token Link PCI XL cards.
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License Version 2 or (at your option) 
- *  any later verion, incorporated herein by reference.
- */
-
-/* Memory Access Commands */
-#define IO_BYTE_READ 0x28 << 24
-#define IO_BYTE_WRITE 0x18 << 24 
-#define IO_WORD_READ 0x20 << 24
-#define IO_WORD_WRITE 0x10 << 24
-#define MMIO_BYTE_READ 0x88 << 24
-#define MMIO_BYTE_WRITE 0x48 << 24
-#define MMIO_WORD_READ 0x80 << 24
-#define MMIO_WORD_WRITE 0x40 << 24
-#define MEM_BYTE_READ 0x8C << 24
-#define MEM_BYTE_WRITE 0x4C << 24
-#define MEM_WORD_READ 0x84 << 24
-#define MEM_WORD_WRITE 0x44 << 24
-
-#define PMBAR 0x1C80
-#define PMB_CPHOLD (1<<10)
-
-#define CPATTENTION 0x180D
-#define CPA_PMBARVIS (1<<7)
-#define CPA_MEMWREN (1<<6)
-
-#define SWITCHSETTINGS 0x1C88
-#define EECONTROL 0x1C8A
-#define EEDATA 0x1C8C
-#define EEREAD 0x0080 
-#define EEWRITE 0x0040
-#define EEERASE 0x0060
-#define EE_ENABLE_WRITE 0x0030
-#define EEBUSY (1<<15)
-
-#define WRBR 0xCDE02
-#define WWOR 0xCDE04
-#define WWCR 0xCDE06
-#define MACSTATUS 0xCDE08 
-#define MISR_RW 0xCDE0B
-#define MISR_AND 0xCDE2B
-#define MISR_SET 0xCDE4B
-#define RXBUFAREA 0xCDE10
-#define RXEARLYTHRESH 0xCDE12
-#define TXSTARTTHRESH 0x58
-#define DNPRIREQTHRESH 0x2C
-
-#define MISR_CSRB (1<<5)
-#define MISR_RASB (1<<4)
-#define MISR_SRBFR (1<<3)
-#define MISR_ASBFR (1<<2)
-#define MISR_ARBF (1<<1) 
-
-/* MISR Flags memory locations */
-#define MF_SSBF 0xDFFE0 
-#define MF_ARBF 0xDFFE1
-#define MF_ASBFR 0xDFFE2
-#define MF_SRBFR 0xDFFE3
-#define MF_RASB 0xDFFE4
-#define MF_CSRB 0xDFFE5
-
-#define MMIO_MACDATA 0x10 
-#define MMIO_MAC_ACCESS_CMD 0x14
-#define MMIO_TIMER 0x1A
-#define MMIO_DMA_CTRL 0x20
-#define MMIO_DNLISTPTR 0x24
-#define MMIO_HASHFILTER 0x28
-#define MMIO_CONFIG 0x29
-#define MMIO_DNPRIREQTHRESH 0x2C
-#define MMIO_DNPOLL 0x2D
-#define MMIO_UPPKTSTATUS 0x30
-#define MMIO_FREETIMER 0x34
-#define MMIO_COUNTDOWN 0x36
-#define MMIO_UPLISTPTR 0x38
-#define MMIO_UPPOLL 0x3C
-#define MMIO_UPBURSTTHRESH 0x40
-#define MMIO_DNBURSTTHRESH 0x41
-#define MMIO_INTSTATUS_AUTO 0x56
-#define MMIO_TXSTARTTHRESH 0x58
-#define MMIO_INTERRUPTENABLE 0x5A
-#define MMIO_INDICATIONENABLE 0x5C
-#define MMIO_COMMAND 0x5E  /* These two are meant to be the same */
-#define MMIO_INTSTATUS 0x5E /* Makes the code more readable this way */
-#define INTSTAT_CMD_IN_PROGRESS (1<<12) 
-#define INTSTAT_SRB (1<<14)
-#define INTSTAT_INTLATCH (1<<0)
-
-/* Indication / Interrupt Mask 
- * Annoyingly the bits to be set in the indication and interrupt enable
- * do not match with the actual bits received in the interrupt, although
- * they are in the same order. 
- * The mapping for the indication / interrupt are:
- * Bit	Indication / Interrupt
- *   0	HostError
- *   1	txcomplete
- *   2	updneeded
- *   3	rxcomplete
- *   4	intrequested
- *   5	macerror
- *   6  dncomplete
- *   7	upcomplete
- *   8	txunderrun
- *   9	asbf
- *  10	srbr
- *  11	arbc
- *
- *  The only ones we don't want to receive are txcomplete and rxcomplete
- *  we use dncomplete and upcomplete instead.
- */
-
-#define INT_MASK 0xFF5
-
-/* Note the subtle difference here, IND and INT */
-
-#define SETINDENABLE (8<<12)
-#define SETINTENABLE (7<<12)
-#define SRBBIT (1<<10)
-#define ASBBIT (1<<9)
-#define ARBBIT (1<<11)
-
-#define SRB 0xDFE90
-#define ASB 0xDFED0
-#define ARB 0xD0000
-#define SCRATCH 0xDFEF0
-
-#define INT_REQUEST 0x6000 /* (6 << 12) */
-#define ACK_INTERRUPT 0x6800 /* (13 <<11) */
-#define GLOBAL_RESET 0x00 
-#define DNDISABLE 0x5000 
-#define DNENABLE 0x4800 
-#define DNSTALL 0x3002
-#define DNRESET 0x5800
-#define DNUNSTALL 0x3003
-#define UPRESET 0x2800
-#define UPSTALL 0x3000
-#define UPUNSTALL 0x3001
-#define SETCONFIG 0x4000
-#define SETTXSTARTTHRESH 0x9800 
-
-/* Received Interrupts */
-#define ASBFINT (1<<13)
-#define SRBRINT (1<<14)
-#define ARBCINT (1<<15)
-#define TXUNDERRUN (1<<11)
-
-#define UPCOMPINT (1<<10)
-#define DNCOMPINT (1<<9)
-#define HARDERRINT (1<<7)
-#define RXCOMPLETE (1<<4)
-#define TXCOMPINT (1<<2)
-#define HOSTERRINT (1<<1)
-
-/* Receive descriptor bits */
-#define RXOVERRUN cpu_to_le32(1<<19)
-#define RXFC cpu_to_le32(1<<21)
-#define RXAR cpu_to_le32(1<<22)
-#define RXUPDCOMPLETE cpu_to_le32(1<<23)
-#define RXUPDFULL cpu_to_le32(1<<24)
-#define RXUPLASTFRAG cpu_to_le32(1<<31)
-
-/* Transmit descriptor bits */
-#define TXDNCOMPLETE cpu_to_le32(1<<16)
-#define TXTXINDICATE cpu_to_le32(1<<27)
-#define TXDPDEMPTY cpu_to_le32(1<<29)
-#define TXDNINDICATE cpu_to_le32(1<<31)
-#define TXDNFRAGLAST cpu_to_le32(1<<31)
-
-/* Interrupts to Acknowledge */
-#define LATCH_ACK 1 
-#define TXCOMPACK (1<<1)
-#define INTREQACK (1<<2)
-#define DNCOMPACK (1<<3)
-#define UPCOMPACK (1<<4)
-#define ASBFACK (1<<5)
-#define SRBRACK (1<<6)
-#define ARBCACK (1<<7)
-
-#define XL_IO_SPACE 128
-#define SRB_COMMAND_SIZE 50
-
-/* Adapter Commands */
-#define REQUEST_INT 0x00
-#define MODIFY_OPEN_PARMS 0x01
-#define RESTORE_OPEN_PARMS 0x02
-#define OPEN_NIC 0x03
-#define CLOSE_NIC 0x04
-#define SET_SLEEP_MODE 0x05
-#define SET_GROUP_ADDRESS 0x06
-#define SET_FUNC_ADDRESS 0x07
-#define READ_LOG 0x08
-#define SET_MULTICAST_MODE 0x0C
-#define CHANGE_WAKEUP_PATTERN 0x0D
-#define GET_STATISTICS 0x13
-#define SET_RECEIVE_MODE 0x1F
-
-/* ARB Commands */
-#define RECEIVE_DATA 0x81
-#define RING_STATUS_CHANGE 0x84
-
-/* ASB Commands */
-#define ASB_RECEIVE_DATE 0x81 
-
-/* Defines for LAN STATUS CHANGE reports */
-#define LSC_SIG_LOSS 0x8000
-#define LSC_HARD_ERR 0x4000
-#define LSC_SOFT_ERR 0x2000
-#define LSC_TRAN_BCN 0x1000
-#define LSC_LWF      0x0800
-#define LSC_ARW      0x0400
-#define LSC_FPE      0x0200
-#define LSC_RR       0x0100
-#define LSC_CO       0x0080
-#define LSC_SS       0x0040
-#define LSC_RING_REC 0x0020
-#define LSC_SR_CO    0x0010
-#define LSC_FDX_MODE 0x0004
-
-#define XL_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
-
-/* 3c359 defaults for buffers */
- 
-#define XL_RX_RING_SIZE 16 /* must be a power of 2 */
-#define XL_TX_RING_SIZE 16 /* must be a power of 2 */
-
-#define PKT_BUF_SZ 4096 /* Default packet size */
-
-/* 3c359 data structures */
-
-struct xl_tx_desc {
-	__le32 dnnextptr;
-	__le32 framestartheader;
-	__le32 buffer;
-	__le32 buffer_length;
-};
-
-struct xl_rx_desc {
-	__le32 upnextptr;
-	__le32 framestatus;
-	__le32 upfragaddr;
-	__le32 upfraglen;
-};
-
-struct xl_private {
-	
-
-	/* These two structures must be aligned on 8 byte boundaries */
-
-	/* struct xl_rx_desc xl_rx_ring[XL_RX_RING_SIZE]; */
-	/* struct xl_tx_desc xl_tx_ring[XL_TX_RING_SIZE]; */
-	struct xl_rx_desc *xl_rx_ring ; 
-	struct xl_tx_desc *xl_tx_ring ; 
-	struct sk_buff *tx_ring_skb[XL_TX_RING_SIZE], *rx_ring_skb[XL_RX_RING_SIZE];	
-	int tx_ring_head, tx_ring_tail ;  
-	int rx_ring_tail, rx_ring_no ; 
-	int free_ring_entries ; 
-
-	u16 srb;
-	u16 arb;
-	u16 asb;
-
-	u8 __iomem *xl_mmio;
-	const char *xl_card_name;
-	struct pci_dev *pdev ; 
-	
-	spinlock_t xl_lock ; 
-
-	volatile int srb_queued;    
-	struct wait_queue *srb_wait;
-	volatile int asb_queued;   
-
-	u16 mac_buffer ; 	
-	u16 xl_lan_status ;
-	u8 xl_ring_speed ;
-	u16 pkt_buf_sz ; 
-	u8 xl_message_level; 
-	u16 xl_copy_all_options ;  
-	unsigned char xl_functional_addr[4] ; 
-	u16 xl_addr_table_addr, xl_parms_addr ; 
-	u8 xl_laa[6] ; 
-	u32 rx_ring_dma_addr ; 
-	u32 tx_ring_dma_addr ; 
-
-	/* firmware section */
-	const struct firmware *fw;
-};
-
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
deleted file mode 100644
index 45550d4..0000000
--- a/drivers/net/tokenring/Kconfig
+++ /dev/null
@@ -1,199 +0,0 @@
-#
-# Token Ring driver configuration
-#
-
-# So far, we only have PCI, ISA, and MCA token ring devices
-menuconfig TR
-	bool "Token Ring driver support"
-	depends on NETDEVICES && !UML
-	depends on (PCI || ISA || MCA || CCW || PCMCIA)
-	help
-	  Token Ring is IBM's way of communication on a local network; the
-	  rest of the world uses Ethernet. To participate on a Token Ring
-	  network, you need a special Token ring network card. If you are
-	  connected to such a Token Ring network and want to use your Token
-	  Ring card under Linux, say Y here and to the driver for your
-	  particular card below and read the Token-Ring mini-HOWTO, available
-	  from <http://www.tldp.org/docs.html#howto>. Most people can
-	  say N here.
-
-if TR
-
-config WANT_LLC
-	def_bool y
-	select LLC
-
-config PCMCIA_IBMTR
-	tristate "IBM PCMCIA tokenring adapter support"
-	depends on IBMTR!=y && PCMCIA
-	---help---
-	  Say Y here if you intend to attach this type of Token Ring PCMCIA
-	  card to your computer. You then also need to say Y to "Token Ring
-	  driver support".
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called ibmtr_cs.
-
-config IBMTR
-	tristate "IBM Tropic chipset based adapter support"
-	depends on ISA || MCA
-	---help---
-	  This is support for all IBM Token Ring cards that don't use DMA. If
-	  you have such a beast, say Y and read the Token-Ring mini-HOWTO,
-	  available from <http://www.tldp.org/docs.html#howto>.
-
-	  Warning: this driver will almost definitely fail if more than one
-	  active Token Ring card is present.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called ibmtr.
-
-config IBMOL
-	tristate "IBM Olympic chipset PCI adapter support"
-	depends on PCI
-	---help---
-	  This is support for all non-Lanstreamer IBM PCI Token Ring Cards.
-	  Specifically this is all IBM PCI, PCI Wake On Lan, PCI II, PCI II
-	  Wake On Lan, and PCI 100/16/4 adapters.
-
-	  If you have such an adapter, say Y and read the Token-Ring
-	  mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called olympic.
-
-	  Also read <file:Documentation/networking/olympic.txt> or check the
-	  Linux Token Ring Project site for the latest information at
-	  <http://www.linuxtr.net/>.
-
-config IBMLS
-	tristate "IBM Lanstreamer chipset PCI adapter support"
-	depends on PCI && !64BIT
-	help
-	  This is support for IBM Lanstreamer PCI Token Ring Cards.
-
-	  If you have such an adapter, say Y and read the Token-Ring
-	  mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called lanstreamer.
-
-config 3C359
-	tristate "3Com 3C359 Token Link Velocity XL adapter support"
-	depends on PCI
-	---help---
-	  This is support for the 3Com PCI Velocity XL cards, specifically
-	  the 3Com 3C359, please note this is not for the 3C339 cards, you
-	  should use the tms380 driver instead.
-
-	  If you have such an adapter, say Y and read the Token-Ring
-	  mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called 3c359.
-
-	  Also read the file <file:Documentation/networking/3c359.txt> or check the 
-	  Linux Token Ring Project site for the latest information at
-	  <http://www.linuxtr.net>
-
-config TMS380TR
-	tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
-	depends on PCI || ISA && ISA_DMA_API || MCA
-	select FW_LOADER
-	---help---
-	  This driver provides generic support for token ring adapters
-	  based on the Texas Instruments TMS380 series chipsets.  This
-	  includes the SysKonnect TR4/16(+) ISA (SK-4190), SysKonnect
-	  TR4/16(+) PCI (SK-4590), SysKonnect TR4/16 PCI (SK-4591),
-	  Compaq 4/16 PCI, Thomas-Conrad TC4048 4/16 PCI, and several
-	  Madge adapters.  If you say Y here, you will be asked to select
-	  which cards to support below.  If you're using modules, each
-	  class of card will be supported by a separate module.
-
-	  If you have such an adapter and would like to use it, say Y and
-	  read the Token-Ring mini-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.
-
-	  Also read the file <file:Documentation/networking/tms380tr.txt> or
-	  check <http://www.auk.cx/tms380tr/>.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called tms380tr.
-
-config TMSPCI
-	tristate "Generic TMS380 PCI support"
-	depends on TMS380TR && PCI
-	---help---
-	  This tms380 module supports generic TMS380-based PCI cards.
-
-	  These cards are known to work:
-	  - Compaq 4/16 TR PCI
-	  - SysKonnect TR4/16 PCI (SK-4590/SK-4591)
-	  - Thomas-Conrad TC4048 PCI 4/16
-	  - 3Com Token Link Velocity
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called tmspci.
-
-config SKISA
-	tristate "SysKonnect TR4/16 ISA support"
-	depends on TMS380TR && ISA
-	help
-	  This tms380 module supports SysKonnect TR4/16 ISA cards.
-
-	  These cards are known to work:
-	  - SysKonnect TR4/16 ISA (SK-4190)
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called skisa.
-
-config PROTEON
-	tristate "Proteon ISA support"
-	depends on TMS380TR && ISA
-	help
-	  This tms380 module supports Proteon ISA cards.
-
-	  These cards are known to work:
-	  - Proteon 1392
-	  - Proteon 1392 plus
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called proteon.
-
-config ABYSS
-	tristate "Madge Smart 16/4 PCI Mk2 support"
-	depends on TMS380TR && PCI
-	help
-	  This tms380 module supports the Madge Smart 16/4 PCI Mk2
-	  cards (51-02).
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called abyss.
-
-config MADGEMC
-	tristate "Madge Smart 16/4 Ringnode MicroChannel"
-	depends on TMS380TR && MCA
-	help
-	  This tms380 module supports the Madge Smart 16/4 MC16 and MC32
-	  MicroChannel adapters.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called madgemc.
-
-config SMCTR
-	tristate "SMC ISA/MCA adapter support"
-	depends on (ISA || MCA_LEGACY) && (BROKEN || !64BIT)
-	---help---
-	  This is support for the ISA and MCA SMC Token Ring cards,
-	  specifically SMC TokenCard Elite (8115T) and SMC TokenCard Elite/A
-	  (8115T/A) adapters.
-
-	  If you have such an adapter and would like to use it, say Y or M and
-	  read the Token-Ring mini-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto> and the file
-	  <file:Documentation/networking/smctr.txt>.
-
-	  To compile this driver as a module, choose M here: the module will be
-	  called smctr.
-
-endif # TR
diff --git a/drivers/net/tokenring/Makefile b/drivers/net/tokenring/Makefile
deleted file mode 100644
index f1be8d9..0000000
--- a/drivers/net/tokenring/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# Makefile for drivers/net/tokenring
-#
-
-obj-$(CONFIG_PCMCIA_IBMTR)	+= ibmtr_cs.o
-obj-$(CONFIG_IBMTR)	+= ibmtr.o
-obj-$(CONFIG_IBMOL)	+= olympic.o
-obj-$(CONFIG_IBMLS)	+= lanstreamer.o
-obj-$(CONFIG_TMS380TR)	+= tms380tr.o
-obj-$(CONFIG_ABYSS)	+= abyss.o
-obj-$(CONFIG_MADGEMC)	+= madgemc.o
-obj-$(CONFIG_PROTEON)	+= proteon.o
-obj-$(CONFIG_TMSPCI)	+= tmspci.o
-obj-$(CONFIG_SKISA)	+= skisa.o
-obj-$(CONFIG_SMCTR)	+= smctr.o
-obj-$(CONFIG_3C359)	+= 3c359.o
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
deleted file mode 100644
index b715e6b..0000000
--- a/drivers/net/tokenring/abyss.c
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- *  abyss.c: Network driver for the Madge Smart 16/4 PCI Mk2 token ring card.
- *
- *  Written 1999-2000 by Adam Fritzler
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- *
- *  This driver module supports the following cards:
- *      - Madge Smart 16/4 PCI Mk2
- *
- *  Maintainer(s):
- *    AF	Adam Fritzler
- *
- *  Modification History:
- *	30-Dec-99	AF	Split off from the tms380tr driver.
- *	22-Jan-00	AF	Updated to use indirect read/writes 
- *	23-Nov-00	JG	New PCI API, cleanups
- *
- *
- *  TODO:
- *	1. See if we can use MMIO instead of inb/outb/inw/outw
- *	2. Add support for Mk1 (has AT24 attached to the PCI
- *		config registers)
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include "tms380tr.h"
-#include "abyss.h"            /* Madge-specific constants */
-
-static char version[] __devinitdata =
-"abyss.c: v1.02 23/11/2000 by Adam Fritzler\n";
-
-#define ABYSS_IO_EXTENT 64
-
-static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
-	{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
-	  PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
-	{ }			/* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, abyss_pci_tbl);
-
-MODULE_LICENSE("GPL");
-
-static int abyss_open(struct net_device *dev);
-static int abyss_close(struct net_device *dev);
-static void abyss_enable(struct net_device *dev);
-static int abyss_chipset_init(struct net_device *dev);
-static void abyss_read_eeprom(struct net_device *dev);
-static unsigned short abyss_setnselout_pins(struct net_device *dev);
-
-static void at24_writedatabyte(unsigned long regaddr, unsigned char byte);
-static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr);
-static int at24_sendcmd(unsigned long regaddr, unsigned char cmd);
-static unsigned char at24_readdatabit(unsigned long regaddr);
-static unsigned char at24_readdatabyte(unsigned long regaddr);
-static int at24_waitforack(unsigned long regaddr);
-static int at24_waitfornack(unsigned long regaddr);
-static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data);
-static void at24_start(unsigned long regaddr);
-static unsigned char at24_readb(unsigned long regaddr, unsigned char addr);
-
-static unsigned short abyss_sifreadb(struct net_device *dev, unsigned short reg)
-{
-	return inb(dev->base_addr + reg);
-}
-
-static unsigned short abyss_sifreadw(struct net_device *dev, unsigned short reg)
-{
-	return inw(dev->base_addr + reg);
-}
-
-static void abyss_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	outb(val, dev->base_addr + reg);
-}
-
-static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	outw(val, dev->base_addr + reg);
-}
-
-static struct net_device_ops abyss_netdev_ops;
-
-static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
-{	
-	static int versionprinted;
-	struct net_device *dev;
-	struct net_local *tp;
-	int ret, pci_irq_line;
-	unsigned long pci_ioaddr;
-	
-	if (versionprinted++ == 0)
-		printk("%s", version);
-
-	if (pci_enable_device(pdev))
-		return -EIO;
-
-	/* Remove I/O space marker in bit 0. */
-	pci_irq_line = pdev->irq;
-	pci_ioaddr = pci_resource_start (pdev, 0);
-		
-	/* At this point we have found a valid card. */
-		
-	dev = alloc_trdev(sizeof(struct net_local));
-	if (!dev)
-		return -ENOMEM;
-
-	if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) {
-		ret = -EBUSY;
-		goto err_out_trdev;
-	}
-		
-	ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
-			  dev->name, dev);
-	if (ret)
-		goto err_out_region;
-		
-	dev->base_addr	= pci_ioaddr;
-	dev->irq	= pci_irq_line;
-		
-	printk("%s: Madge Smart 16/4 PCI Mk2 (Abyss)\n", dev->name);
-	printk("%s:    IO: %#4lx  IRQ: %d\n",
-	       dev->name, pci_ioaddr, dev->irq);
-	/*
-	 * The TMS SIF registers lay 0x10 above the card base address.
-	 */
-	dev->base_addr += 0x10;
-		
-	ret = tmsdev_init(dev, &pdev->dev);
-	if (ret) {
-		printk("%s: unable to get memory for dev->priv.\n", 
-		       dev->name);
-		goto err_out_irq;
-	}
-
-	abyss_read_eeprom(dev);
-
-	printk("%s:    Ring Station Address: %pM\n", dev->name, dev->dev_addr);
-
-	tp = netdev_priv(dev);
-	tp->setnselout = abyss_setnselout_pins;
-	tp->sifreadb = abyss_sifreadb;
-	tp->sifreadw = abyss_sifreadw;
-	tp->sifwriteb = abyss_sifwriteb;
-	tp->sifwritew = abyss_sifwritew;
-
-	memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1);
-		
-	dev->netdev_ops = &abyss_netdev_ops;
-
-	pci_set_drvdata(pdev, dev);
-	SET_NETDEV_DEV(dev, &pdev->dev);
-
-	ret = register_netdev(dev);
-	if (ret)
-		goto err_out_tmsdev;
-	return 0;
-
-err_out_tmsdev:
-	pci_set_drvdata(pdev, NULL);
-	tmsdev_term(dev);
-err_out_irq:
-	free_irq(pdev->irq, dev);
-err_out_region:
-	release_region(pci_ioaddr, ABYSS_IO_EXTENT);
-err_out_trdev:
-	free_netdev(dev);
-	return ret;
-}
-
-static unsigned short abyss_setnselout_pins(struct net_device *dev)
-{
-	unsigned short val = 0;
-	struct net_local *tp = netdev_priv(dev);
-	
-	if(tp->DataRate == SPEED_4)
-		val |= 0x01;  /* Set 4Mbps */
-	else
-		val |= 0x00;  /* Set 16Mbps */
-	
-	return val;
-}
-
-/*
- * The following Madge boards should use this code:
- *   - Smart 16/4 PCI Mk2 (Abyss)
- *   - Smart 16/4 PCI Mk1 (PCI T)
- *   - Smart 16/4 Client Plus PnP (Big Apple)
- *   - Smart 16/4 Cardbus Mk2
- *
- * These access an Atmel AT24 SEEPROM using their glue chip registers. 
- *
- */
-static void at24_writedatabyte(unsigned long regaddr, unsigned char byte)
-{
-	int i;
-	
-	for (i = 0; i < 8; i++) {
-		at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
-		at24_setlines(regaddr, 1, (byte >> (7-i))&0x01);
-		at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
-	}
-}
-
-static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr)
-{
-	if (at24_sendcmd(regaddr, cmd)) {
-		at24_writedatabyte(regaddr, addr);
-		return at24_waitforack(regaddr);
-	}
-	return 0;
-}
-
-static int at24_sendcmd(unsigned long regaddr, unsigned char cmd)
-{
-	int i;
-	
-	for (i = 0; i < 10; i++) {
-		at24_start(regaddr);
-		at24_writedatabyte(regaddr, cmd);
-		if (at24_waitforack(regaddr))
-			return 1;
-	}
-	return 0;
-}
-
-static unsigned char at24_readdatabit(unsigned long regaddr)
-{
-	unsigned char val;
-
-	at24_setlines(regaddr, 0, 1);
-	at24_setlines(regaddr, 1, 1);
-	val = (inb(regaddr) & AT24_DATA)?1:0;
-	at24_setlines(regaddr, 1, 1);
-	at24_setlines(regaddr, 0, 1);
-	return val;
-}
-
-static unsigned char at24_readdatabyte(unsigned long regaddr)
-{
-	unsigned char data = 0;
-	int i;
-	
-	for (i = 0; i < 8; i++) {
-		data <<= 1;
-		data |= at24_readdatabit(regaddr);
-	}
-
-	return data;
-}
-
-static int at24_waitforack(unsigned long regaddr)
-{
-	int i;
-	
-	for (i = 0; i < 10; i++) {
-		if ((at24_readdatabit(regaddr) & 0x01) == 0x00)
-			return 1;
-	}
-	return 0;
-}
-
-static int at24_waitfornack(unsigned long regaddr)
-{
-	int i;
-	for (i = 0; i < 10; i++) {
-		if ((at24_readdatabit(regaddr) & 0x01) == 0x01)
-			return 1;
-	}
-	return 0;
-}
-
-static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data)
-{
-	unsigned char val = AT24_ENABLE;
-	if (clock)
-		val |= AT24_CLOCK;
-	if (data)
-		val |= AT24_DATA;
-
-	outb(val, regaddr); 
-	tms380tr_wait(20); /* Very necessary. */
-}
-
-static void at24_start(unsigned long regaddr)
-{
-	at24_setlines(regaddr, 0, 1);
-	at24_setlines(regaddr, 1, 1);
-	at24_setlines(regaddr, 1, 0);
-	at24_setlines(regaddr, 0, 1);
-}
-
-static unsigned char at24_readb(unsigned long regaddr, unsigned char addr)
-{
-	unsigned char data = 0xff;
-	
-	if (at24_sendfullcmd(regaddr, AT24_WRITE, addr)) {
-		if (at24_sendcmd(regaddr, AT24_READ)) {
-			data = at24_readdatabyte(regaddr);
-			if (!at24_waitfornack(regaddr))
-				data = 0xff;
-		}
-	}
-	return data;
-}
-
-
-/*
- * Enable basic functions of the Madge chipset needed
- * for initialization.
- */
-static void abyss_enable(struct net_device *dev)
-{
-	unsigned char reset_reg;
-	unsigned long ioaddr;
-	
-	ioaddr = dev->base_addr;
-	reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
-	reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
-	outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
-	tms380tr_wait(100);
-}
-
-/*
- * Enable the functions of the Madge chipset needed for
- * full working order. 
- */
-static int abyss_chipset_init(struct net_device *dev)
-{
-	unsigned char reset_reg;
-	unsigned long ioaddr;
-	
-	ioaddr = dev->base_addr;
-	
-	reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
-	
-	reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
-	outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
-	
-	reset_reg &= ~(PCIBM2_RESET_REG_CHIP_NRES |
-		       PCIBM2_RESET_REG_FIFO_NRES | 
-		       PCIBM2_RESET_REG_SIF_NRES);
-	outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
-	
-	tms380tr_wait(100);
-	
-	reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
-	outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
-	
-	reset_reg |= PCIBM2_RESET_REG_SIF_NRES;
-	outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
-
-	reset_reg |= PCIBM2_RESET_REG_FIFO_NRES;
-	outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
-
-	outb(PCIBM2_INT_CONTROL_REG_SINTEN | 
-	     PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE, 
-	     ioaddr + PCIBM2_INT_CONTROL_REG);
-  
-	outb(30, ioaddr + PCIBM2_FIFO_THRESHOLD);
-	
-	return 0;
-}
-
-static inline void abyss_chipset_close(struct net_device *dev)
-{
-	unsigned long ioaddr;
-	
-	ioaddr = dev->base_addr;
-	outb(0, ioaddr + PCIBM2_RESET_REG);
-}
-
-/*
- * Read configuration data from the AT24 SEEPROM on Madge cards.
- *
- */
-static void abyss_read_eeprom(struct net_device *dev)
-{
-	struct net_local *tp;
-	unsigned long ioaddr;
-	unsigned short val;
-	int i;
-	
-	tp = netdev_priv(dev);
-	ioaddr = dev->base_addr;
-	
-	/* Must enable glue chip first */
-	abyss_enable(dev);
-	
-	val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG, 
-			 PCIBM2_SEEPROM_RING_SPEED);
-	tp->DataRate = val?SPEED_4:SPEED_16; /* set open speed */
-	printk("%s:    SEEPROM: ring speed: %dMb/sec\n", dev->name, tp->DataRate);
-	
-	val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
-			 PCIBM2_SEEPROM_RAM_SIZE) * 128;
-	printk("%s:    SEEPROM: adapter RAM: %dkb\n", dev->name, val);
-	
-	dev->addr_len = 6;
-	for (i = 0; i < 6; i++) 
-		dev->dev_addr[i] = at24_readb(ioaddr + PCIBM2_SEEPROM_REG, 
-					      PCIBM2_SEEPROM_BIA+i);
-}
-
-static int abyss_open(struct net_device *dev)
-{  
-	abyss_chipset_init(dev);
-	tms380tr_open(dev);
-	return 0;
-}
-
-static int abyss_close(struct net_device *dev)
-{
-	tms380tr_close(dev);
-	abyss_chipset_close(dev);
-	return 0;
-}
-
-static void __devexit abyss_detach (struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	
-	BUG_ON(!dev);
-	unregister_netdev(dev);
-	release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
-	free_irq(dev->irq, dev);
-	tmsdev_term(dev);
-	free_netdev(dev);
-	pci_set_drvdata(pdev, NULL);
-}
-
-static struct pci_driver abyss_driver = {
-	.name		= "abyss",
-	.id_table	= abyss_pci_tbl,
-	.probe		= abyss_attach,
-	.remove		= __devexit_p(abyss_detach),
-};
-
-static int __init abyss_init (void)
-{
-	abyss_netdev_ops = tms380tr_netdev_ops;
-
-	abyss_netdev_ops.ndo_open = abyss_open;
-	abyss_netdev_ops.ndo_stop = abyss_close;
-
-	return pci_register_driver(&abyss_driver);
-}
-
-static void __exit abyss_rmmod (void)
-{
-	pci_unregister_driver (&abyss_driver);
-}
-
-module_init(abyss_init);
-module_exit(abyss_rmmod);
-
diff --git a/drivers/net/tokenring/abyss.h b/drivers/net/tokenring/abyss.h
deleted file mode 100644
index b0a473b..0000000
--- a/drivers/net/tokenring/abyss.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* 
- * abyss.h: Header for the abyss tms380tr module
- *
- * Authors:
- * - Adam Fritzler
- */
-
-#ifndef __LINUX_MADGETR_H
-#define __LINUX_MADGETR_H
-
-#ifdef __KERNEL__
-
-/*
- * For Madge Smart 16/4 PCI Mk2.  Since we increment the base address
- * to get everything correct for the TMS SIF, we do these as negatives
- * as they fall below the SIF in addressing.
- */
-#define PCIBM2_INT_STATUS_REG          ((short)-15)/* 0x01 */
-#define PCIBM2_INT_CONTROL_REG         ((short)-14)/* 0x02 */
-#define PCIBM2_RESET_REG               ((short)-12)/* 0x04 */
-#define PCIBM2_SEEPROM_REG             ((short)-9) /* 0x07 */
-
-#define PCIBM2_INT_CONTROL_REG_SINTEN           0x02
-#define PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE   0x80
-#define PCIBM2_INT_STATUS_REG_PCI_ERR           0x80
-
-#define PCIBM2_RESET_REG_CHIP_NRES              0x01
-#define PCIBM2_RESET_REG_FIFO_NRES              0x02
-#define PCIBM2_RESET_REG_SIF_NRES               0x04
-
-#define PCIBM2_FIFO_THRESHOLD   0x21
-#define PCIBM2_BURST_LENGTH     0x22
-
-/*
- * Bits in PCIBM2_SEEPROM_REG.
- */
-#define AT24_ENABLE             0x04
-#define AT24_DATA               0x02
-#define AT24_CLOCK              0x01
-
-/*
- * AT24 Commands.
- */
-#define AT24_WRITE              0xA0
-#define AT24_READ               0xA1
-
-/*
- * Addresses in AT24 SEEPROM.
- */
-#define PCIBM2_SEEPROM_BIA          0x12
-#define PCIBM2_SEEPROM_RING_SPEED   0x18
-#define PCIBM2_SEEPROM_RAM_SIZE     0x1A
-#define PCIBM2_SEEPROM_HWF1         0x1C
-#define PCIBM2_SEEPROM_HWF2         0x1E
-
-
-#endif /* __KERNEL__ */
-#endif /* __LINUX_MADGETR_H */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
deleted file mode 100644
index b5c8c18..0000000
--- a/drivers/net/tokenring/ibmtr.c
+++ /dev/null
@@ -1,1964 +0,0 @@
-/* ibmtr.c:  A shared-memory IBM Token Ring 16/4 driver for linux
- *
- *	Written 1993 by Mark Swanson and Peter De Schrijver.
- *	This software may be used and distributed according to the terms
- *	of the GNU General Public License, incorporated herein by reference.
- *
- *	This device driver should work with Any IBM Token Ring Card that does
- *	not use DMA.
- *
- *	I used Donald Becker's (becker@scyld.com) device driver work
- *	as a base for most of my initial work.
- *
- *	Changes by Peter De Schrijver
- *		(Peter.Deschrijver@linux.cc.kuleuven.ac.be) :
- *
- *	+ changed name to ibmtr.c in anticipation of other tr boards.
- *	+ changed reset code and adapter open code.
- *	+ added SAP open code.
- *	+ a first attempt to write interrupt, transmit and receive routines.
- *
- *	Changes by David W. Morris (dwm@shell.portal.com) :
- *	941003 dwm: - Restructure tok_probe for multiple adapters, devices.
- *	+ Add comments, misc reorg for clarity.
- *	+ Flatten interrupt handler levels.
- *
- *	Changes by Farzad Farid (farzy@zen.via.ecp.fr)
- *	and Pascal Andre (andre@chimay.via.ecp.fr) (March 9 1995) :
- *	+ multi ring support clean up.
- *	+ RFC1042 compliance enhanced.
- *
- *	Changes by Pascal Andre (andre@chimay.via.ecp.fr) (September 7 1995) :
- *	+ bug correction in tr_tx
- *	+ removed redundant information display
- *	+ some code reworking
- *
- *	Changes by Michel Lespinasse (walken@via.ecp.fr),
- *	Yann Doussot (doussot@via.ecp.fr) and Pascal Andre (andre@via.ecp.fr)
- *	(February 18, 1996) :
- *	+ modified shared memory and mmio access port the driver to
- *	  alpha platform (structure access -> readb/writeb)
- *
- *	Changes by Steve Kipisz (bungy@ibm.net or kipisz@vnet.ibm.com)
- *	(January 18 1996):
- *	+ swapped WWOR and WWCR in ibmtr.h
- *	+ moved some init code from tok_probe into trdev_init.  The
- *	  PCMCIA code can call trdev_init to complete initializing
- *	  the driver.
- *	+ added -DPCMCIA to support PCMCIA
- *	+ detecting PCMCIA Card Removal in interrupt handler.  If
- *	  ISRP is FF, then a PCMCIA card has been removed
- *        10/2000 Burt needed a new method to avoid crashing the OS
- *
- *	Changes by Paul Norton (pnorton@cts.com) :
- *	+ restructured the READ.LOG logic to prevent the transmit SRB
- *	  from being rudely overwritten before the transmit cycle is
- *	  complete. (August 15 1996)
- *	+ completed multiple adapter support. (November 20 1996)
- *	+ implemented csum_partial_copy in tr_rx and increased receive 
- *        buffer size and count. Minor fixes. (March 15, 1997)
- *
- *	Changes by Christopher Turcksin <wabbit@rtfc.demon.co.uk>
- *	+ Now compiles ok as a module again.
- *
- *	Changes by Paul Norton (pnorton@ieee.org) :
- *      + moved the header manipulation code in tr_tx and tr_rx to
- *        net/802/tr.c. (July 12 1997)
- *      + add retry and timeout on open if cable disconnected. (May 5 1998)
- *      + lifted 2000 byte mtu limit. now depends on shared-RAM size.
- *        May 25 1998)
- *      + can't allocate 2k recv buff at 8k shared-RAM. (20 October 1998)
- *
- *      Changes by Joel Sloan (jjs@c-me.com) :
- *      + disable verbose debug messages by default - to enable verbose
- *	  debugging, edit the IBMTR_DEBUG_MESSAGES define below 
- *	
- *	Changes by Mike Phillips <phillim@amtrak.com> :
- *	+ Added extra #ifdef's to work with new PCMCIA Token Ring Code.
- *	  The PCMCIA code now just sets up the card so it can be recognized
- *        by ibmtr_probe. Also checks allocated memory vs. on-board memory
- *	  for correct figure to use.
- *
- *	Changes by Tim Hockin (thockin@isunix.it.ilstu.edu) :
- *	+ added spinlocks for SMP sanity (10 March 1999)
- *
- *      Changes by Jochen Friedrich to enable RFC1469 Option 2 multicasting
- *      i.e. using functional address C0 00 00 04 00 00 to transmit and 
- *      receive multicast packets.
- *
- *      Changes by Mike Sullivan (based on original sram patch by Dave Grothe
- *      to support windowing into on adapter shared ram.
- *      i.e. Use LANAID to setup a PnP configuration with 16K RAM. Paging
- *      will shift this 16K window over the entire available shared RAM.
- *
- *      Changes by Peter De Schrijver (p2@mind.be) :
- *      + fixed a problem with PCMCIA card removal
- *
- *      Change by Mike Sullivan et al.:
- *      + added turbo card support. No need to use lanaid to configure
- *      the adapter into isa compatibility mode.
- *
- *      Changes by Burt Silverman to allow the computer to behave nicely when
- *	a cable is pulled or not in place, or a PCMCIA card is removed hot.
- */
-
-/* change the define of IBMTR_DEBUG_MESSAGES to a nonzero value 
-in the event that chatty debug messages are desired - jjs 12/30/98 */
-
-#define IBMTR_DEBUG_MESSAGES 0
-
-#include <linux/module.h>
-#include <linux/sched.h>
-
-#ifdef PCMCIA		/* required for ibmtr_cs.c to build */
-#undef MODULE		/* yes, really */
-#undef ENABLE_PAGING
-#else
-#define ENABLE_PAGING 1		
-#endif
-
-/* changes the output format of driver initialization */
-#define TR_VERBOSE	0
-
-/* some 95 OS send many non UI frame; this allow removing the warning */
-#define TR_FILTERNONUI	1
-
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/ip.h>
-#include <linux/trdevice.h>
-#include <linux/ibmtr.h>
-
-#include <net/checksum.h>
-
-#include <asm/io.h>
-
-#define DPRINTK(format, args...) printk("%s: " format, dev->name , ## args)
-#define DPRINTD(format, args...) DummyCall("%s: " format, dev->name , ## args)
-
-/* version and credits */
-#ifndef PCMCIA
-static char version[] __devinitdata =
-    "\nibmtr.c: v1.3.57   8/ 7/94 Peter De Schrijver and Mark Swanson\n"
-    "         v2.1.125 10/20/98 Paul Norton    <pnorton@ieee.org>\n"
-    "         v2.2.0   12/30/98 Joel Sloan     <jjs@c-me.com>\n"
-    "         v2.2.1   02/08/00 Mike Sullivan  <sullivam@us.ibm.com>\n" 
-    "         v2.2.2   07/27/00 Burt Silverman <burts@us.ibm.com>\n" 
-    "         v2.4.0   03/01/01 Mike Sullivan <sullivan@us.ibm.com>\n";
-#endif
-
-/* this allows displaying full adapter information */
-
-static char *channel_def[] __devinitdata = { "ISA", "MCA", "ISA P&P" };
-
-static char pcchannelid[] __devinitdata = {
-	0x05, 0x00, 0x04, 0x09,
-	0x04, 0x03, 0x04, 0x0f,
-	0x03, 0x06, 0x03, 0x01,
-	0x03, 0x01, 0x03, 0x00,
-	0x03, 0x09, 0x03, 0x09,
-	0x03, 0x00, 0x02, 0x00
-};
-
-static char mcchannelid[] __devinitdata =  {
-	0x04, 0x0d, 0x04, 0x01,
-	0x05, 0x02, 0x05, 0x03,
-	0x03, 0x06, 0x03, 0x03,
-	0x05, 0x08, 0x03, 0x04,
-	0x03, 0x05, 0x03, 0x01,
-	0x03, 0x08, 0x02, 0x00
-};
-
-static char __devinit *adapter_def(char type)
-{
-	switch (type) {
-	case 0xF: return "PC Adapter | PC Adapter II | Adapter/A";
-	case 0xE: return "16/4 Adapter | 16/4 Adapter/A (long)";
-	case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter";
-	case 0xC: return "Auto 16/4 Adapter";
-	default: return "adapter (unknown type)";
-	}
-};
-
-#define TRC_INIT 0x01		/*  Trace initialization & PROBEs */
-#define TRC_INITV 0x02		/*  verbose init trace points     */
-static unsigned char ibmtr_debug_trace = 0;
-
-static int	ibmtr_probe1(struct net_device *dev, int ioaddr);
-static unsigned char get_sram_size(struct tok_info *adapt_info);
-static int 	trdev_init(struct net_device *dev);
-static int 	tok_open(struct net_device *dev);
-static int 	tok_init_card(struct net_device *dev);
-static void	tok_open_adapter(unsigned long dev_addr);
-static void 	open_sap(unsigned char type, struct net_device *dev);
-static void 	tok_set_multicast_list(struct net_device *dev);
-static netdev_tx_t tok_send_packet(struct sk_buff *skb,
-					 struct net_device *dev);
-static int 	tok_close(struct net_device *dev);
-static irqreturn_t tok_interrupt(int irq, void *dev_id);
-static void 	initial_tok_int(struct net_device *dev);
-static void 	tr_tx(struct net_device *dev);
-static void 	tr_rx(struct net_device *dev);
-static void	ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev);
-static void	tok_rerun(unsigned long dev_addr);
-static void	ibmtr_readlog(struct net_device *dev);
-static int	ibmtr_change_mtu(struct net_device *dev, int mtu);
-static void	find_turbo_adapters(int *iolist);
-
-static int ibmtr_portlist[IBMTR_MAX_ADAPTERS+1] __devinitdata = {
-	0xa20, 0xa24, 0, 0, 0
-};
-static int __devinitdata turbo_io[IBMTR_MAX_ADAPTERS] = {0};
-static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
-static int __devinitdata turbo_searched = 0;
-
-#ifndef PCMCIA
-static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
-#endif
-
-static void __devinit PrtChanID(char *pcid, short stride)
-{
-	short i, j;
-	for (i = 0, j = 0; i < 24; i++, j += stride)
-		printk("%1x", ((int) pcid[j]) & 0x0f);
-	printk("\n");
-}
-
-static void __devinit HWPrtChanID(void __iomem *pcid, short stride)
-{
-	short i, j;
-	for (i = 0, j = 0; i < 24; i++, j += stride)
-		printk("%1x", ((int) readb(pcid + j)) & 0x0f);
-	printk("\n");
-}
-
-/* We have to ioremap every checked address, because isa_readb is 
- * going away. 
- */
-
-static void __devinit find_turbo_adapters(int *iolist)
-{
-	int ram_addr;
-	int index=0;
-	void __iomem *chanid;
-	int found_turbo=0;
-	unsigned char *tchanid, ctemp;
-	int i, j;
-	unsigned long jif;
-	void __iomem *ram_mapped ;   
-
-	if (turbo_searched == 1) return;
-	turbo_searched=1;
-	for (ram_addr=0xC0000; ram_addr < 0xE0000; ram_addr+=0x2000) {
-
-		__u32 intf_tbl=0;
-
-		found_turbo=1;
-		ram_mapped = ioremap((u32)ram_addr,0x1fff) ; 
-		if (ram_mapped==NULL) 
- 			continue ; 
-		chanid=(CHANNEL_ID + ram_mapped);
-		tchanid=pcchannelid;
-		ctemp=readb(chanid) & 0x0f;
-		if (ctemp != *tchanid) continue;
-		for (i=2,j=1; i<=46; i=i+2,j++) {
-			if ((readb(chanid+i) & 0x0f) != tchanid[j]){
-				found_turbo=0;
-				break;
-			}
-		}
-		if (!found_turbo) continue;
-
-		writeb(0x90, ram_mapped+0x1E01);
-		for(i=2; i<0x0f; i++) {
-			writeb(0x00, ram_mapped+0x1E01+i);
-		}
-		writeb(0x00, ram_mapped+0x1E01);
-		for(jif=jiffies+TR_BUSY_INTERVAL; time_before_eq(jiffies,jif););
-		intf_tbl=ntohs(readw(ram_mapped+ACA_OFFSET+ACA_RW+WRBR_EVEN));
-		if (intf_tbl) {
-#if IBMTR_DEBUG_MESSAGES
-			printk("ibmtr::find_turbo_adapters, Turbo found at "
-				"ram_addr %x\n",ram_addr);
-			printk("ibmtr::find_turbo_adapters, interface_table ");
-			for(i=0; i<6; i++) {
-				printk("%x:",readb(ram_addr+intf_tbl+i));
-			}
-			printk("\n");
-#endif
-			turbo_io[index]=ntohs(readw(ram_mapped+intf_tbl+4));
-			turbo_irq[index]=readb(ram_mapped+intf_tbl+3);
-			outb(0, turbo_io[index] + ADAPTRESET);
-			for(jif=jiffies+TR_RST_TIME;time_before_eq(jiffies,jif););
-			outb(0, turbo_io[index] + ADAPTRESETREL);
-			index++;
-			continue;
-		}
-#if IBMTR_DEBUG_MESSAGES 
-		printk("ibmtr::find_turbo_adapters, ibmtr card found at"
-			" %x but not a Turbo model\n",ram_addr);
-#endif
-	iounmap(ram_mapped) ; 	
-	} /* for */
-	for(i=0; i<IBMTR_MAX_ADAPTERS; i++) {
-		if(!turbo_io[i]) break;
-		for (j=0; j<IBMTR_MAX_ADAPTERS; j++) {
-			if ( iolist[j] && iolist[j] != turbo_io[i]) continue;
-			iolist[j]=turbo_io[i];
-			break;
-		}
-	}
-}
-
-static void ibmtr_cleanup_card(struct net_device *dev)
-{
-	if (dev->base_addr) {
-		outb(0,dev->base_addr+ADAPTRESET);
-		
-		schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
-
-		outb(0,dev->base_addr+ADAPTRESETREL);
-	}
-
-#ifndef PCMCIA
-	free_irq(dev->irq, dev);
-	release_region(dev->base_addr, IBMTR_IO_EXTENT);
-
-	{ 
-		struct tok_info *ti = netdev_priv(dev);
-		iounmap(ti->mmio);
-		iounmap(ti->sram_virt);
-	}
-#endif		
-}
-
-/****************************************************************************
- *	ibmtr_probe():  Routine specified in the network device structure
- *	to probe for an IBM Token Ring Adapter.  Routine outline:
- *	I.    Interrogate hardware to determine if an adapter exists
- *	      and what the speeds and feeds are
- *	II.   Setup data structures to control execution based upon
- *	      adapter characteristics.
- *
- *	We expect ibmtr_probe to be called once for each device entry
- *	which references it.
- ****************************************************************************/
-
-static int __devinit ibmtr_probe(struct net_device *dev)
-{
-	int i;
-	int base_addr = dev->base_addr;
-
-	if (base_addr && base_addr <= 0x1ff) /* Don't probe at all. */
-		return -ENXIO;
-	if (base_addr > 0x1ff) { /* Check a single specified location.  */
-		if (!ibmtr_probe1(dev, base_addr)) return 0;
-		return -ENODEV;
-	}
-	find_turbo_adapters(ibmtr_portlist);
-	for (i = 0; ibmtr_portlist[i]; i++) {
-		int ioaddr = ibmtr_portlist[i];
-
-		if (!ibmtr_probe1(dev, ioaddr)) return 0;
-	}
-	return -ENODEV;
-}
-
-int __devinit ibmtr_probe_card(struct net_device *dev)
-{
-	int err = ibmtr_probe(dev);
-	if (!err) {
-		err = register_netdev(dev);
-		if (err)
-			ibmtr_cleanup_card(dev);
-	}
-	return err;
-}
-
-/*****************************************************************************/
-
-static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
-{
-
-	unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0;
-	void __iomem * t_mmio = NULL;
-	struct tok_info *ti = netdev_priv(dev);
-	void __iomem *cd_chanid;
-	unsigned char *tchanid, ctemp;
-#ifndef PCMCIA
-	unsigned char t_irq=0;
-        unsigned long timeout;
-	static int version_printed;
-#endif
-
-	/*    Query the adapter PIO base port which will return
-	 *    indication of where MMIO was placed. We also have a
-	 *    coded interrupt number.
-	 */
-	segment = inb(PIOaddr);
-	if (segment < 0x40 || segment > 0xe0) {
-		/* Out of range values so we'll assume non-existent IO device
-		 * but this is not necessarily a problem, esp if a turbo
-		 * adapter is being used.  */
-#if IBMTR_DEBUG_MESSAGES
-		DPRINTK("ibmtr_probe1(): unhappy that inb(0x%X) == 0x%X, "
-			"Hardware Problem?\n",PIOaddr,segment);
-#endif
-		return -ENODEV;
-	}
-	/*
-	 *    Compute the linear base address of the MMIO area
-	 *    as LINUX doesn't care about segments
-	 */
-	t_mmio = ioremap(((__u32) (segment & 0xfc) << 11) + 0x80000,2048);
-	if (!t_mmio) { 
-		DPRINTK("Cannot remap mmiobase memory area") ; 
-		return -ENODEV ; 
-	} 
-	intr = segment & 0x03;	/* low bits is coded interrupt # */
-	if (ibmtr_debug_trace & TRC_INIT)
-		DPRINTK("PIOaddr: %4hx seg/intr: %2x mmio base: %p intr: %d\n"
-				, PIOaddr, (int) segment, t_mmio, (int) intr);
-
-	/*
-	 *    Now we will compare expected 'channelid' strings with
-	 *    what we is there to learn of ISA/MCA or not TR card
-	 */
-#ifdef PCMCIA
-	iounmap(t_mmio);
-	t_mmio = ti->mmio;	/*BMS to get virtual address */
-	irq = ti->irq;		/*BMS to display the irq!   */
-#endif
-	cd_chanid = (CHANNEL_ID + t_mmio);	/* for efficiency */
-	tchanid = pcchannelid;
-	cardpresent = TR_ISA;	/* try ISA */
-
-	/*    Suboptimize knowing first byte different */
-	ctemp = readb(cd_chanid) & 0x0f;
-	if (ctemp != *tchanid) {	/* NOT ISA card, try MCA */
-		tchanid = mcchannelid;
-		cardpresent = TR_MCA;
-		if (ctemp != *tchanid)	/* Neither ISA nor MCA */
-			cardpresent = NOTOK;
-	}
-	if (cardpresent != NOTOK) {
-		/*       Know presumed type, try rest of ID */
-		for (i = 2, j = 1; i <= 46; i = i + 2, j++) {
-			if( (readb(cd_chanid+i)&0x0f) == tchanid[j]) continue;
-			/* match failed, not TR card */
-			cardpresent = NOTOK;
-			break;
-		}
-	}
-	/* 
-	 *    If we have an ISA board check for the ISA P&P version,
-	 *    as it has different IRQ settings 
-	 */
-	if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio) == 0x0e))
-		cardpresent = TR_ISAPNP;
-	if (cardpresent == NOTOK) {	/* "channel_id" did not match, report */
-		if (!(ibmtr_debug_trace & TRC_INIT)) {
-#ifndef PCMCIA
-			iounmap(t_mmio);
-#endif
-			return -ENODEV;
-		}
-		DPRINTK( "Channel ID string not found for PIOaddr: %4hx\n",
-								PIOaddr);
-		DPRINTK("Expected for ISA: ");
-		PrtChanID(pcchannelid, 1);
-		DPRINTK("           found: ");
-/* BMS Note that this can be misleading, when hardware is flaky, because you
-   are reading it a second time here. So with my flaky hardware, I'll see my-
-   self in this block, with the HW ID matching the ISA ID exactly! */
-		HWPrtChanID(cd_chanid, 2);
-		DPRINTK("Expected for MCA: ");
-		PrtChanID(mcchannelid, 1);
-	}
-	/* Now, setup some of the pl0 buffers for this driver.. */
-	/* If called from PCMCIA, it is already set up, so no need to 
-	   waste the memory, just use the existing structure */
-#ifndef PCMCIA
-	ti->mmio = t_mmio;
-        for (i = 0; i < IBMTR_MAX_ADAPTERS; i++) {
-                if (turbo_io[i] != PIOaddr)
-			continue;
-#if IBMTR_DEBUG_MESSAGES 
-		printk("ibmtr::tr_probe1, setting PIOaddr %x to Turbo\n",
-		       PIOaddr);
-#endif
-		ti->turbo = 1;
-		t_irq = turbo_irq[i];
-        }
-#endif /* !PCMCIA */
-	ti->readlog_pending = 0;
-	init_waitqueue_head(&ti->wait_for_reset);
-
-	/* if PCMCIA, the card can be recognized as either TR_ISA or TR_ISAPNP
-	 * depending which card is inserted.	*/
-	
-#ifndef PCMCIA
-	switch (cardpresent) {
-	case TR_ISA:
-		if (intr == 0) irq = 9;	/* irq2 really is irq9 */
-		if (intr == 1) irq = 3;
-		if (intr == 2) irq = 6;
-		if (intr == 3) irq = 7;
-		ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
-		break;
-	case TR_MCA:
-		if (intr == 0) irq = 9;
-		if (intr == 1) irq = 3;
-		if (intr == 2) irq = 10;
-		if (intr == 3) irq = 11;
-		ti->global_int_enable = 0;
-		ti->adapter_int_enable = 0;
-		ti->sram_phys=(__u32)(inb(PIOaddr+ADAPTRESETREL) & 0xfe) << 12;
-		break;
-	case TR_ISAPNP:
-		if (!t_irq) {
-			if (intr == 0) irq = 9;
-			if (intr == 1) irq = 3;
-			if (intr == 2) irq = 10;
-			if (intr == 3) irq = 11;
-		} else
-			irq=t_irq;
-		timeout = jiffies + TR_SPIN_INTERVAL;
-		while (!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)){
-			if (!time_after(jiffies, timeout)) continue;
-			DPRINTK( "Hardware timeout during initialization.\n");
-			iounmap(t_mmio);
-			return -ENODEV;
-		}
-		ti->sram_phys =
-		     ((__u32)readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_EVEN)<<12);
-		ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
-		break;
-	} /*end switch (cardpresent) */
-#endif	/*not PCMCIA */
-
-	if (ibmtr_debug_trace & TRC_INIT) {	/* just report int */
-		DPRINTK("irq=%d", irq);
-		printk(", sram_phys=0x%x", ti->sram_phys);
-		if(ibmtr_debug_trace&TRC_INITV){ /* full chat in verbose only */
-			DPRINTK(", ti->mmio=%p", ti->mmio);
-			printk(", segment=%02X", segment);
-		}
-		printk(".\n");
-	}
-
-	/* Get hw address of token ring card */
-	j = 0;
-	for (i = 0; i < 0x18; i = i + 2) {
-		/* technical reference states to do this */
-		temp = readb(ti->mmio + AIP + i) & 0x0f;
-		ti->hw_address[j] = temp;
-		if (j & 1)
-			dev->dev_addr[(j / 2)] =
-				ti->hw_address[j]+ (ti->hw_address[j - 1] << 4);
-		++j;
-	}
-	/* get Adapter type:  'F' = Adapter/A, 'E' = 16/4 Adapter II,... */
-	ti->adapter_type = readb(ti->mmio + AIPADAPTYPE);
-
-	/* get Data Rate:  F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */
-	ti->data_rate = readb(ti->mmio + AIPDATARATE);
-
-	/* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */
-	ti->token_release = readb(ti->mmio + AIPEARLYTOKEN);
-
-	/* How much shared RAM is on adapter ? */
-	if (ti->turbo) {
-		ti->avail_shared_ram=127;
-	} else {
-		ti->avail_shared_ram = get_sram_size(ti);/*in 512 byte units */
-	}
-	/* We need to set or do a bunch of work here based on previous results*/
-	/* Support paging?  What sizes?:  F=no, E=16k, D=32k, C=16 & 32k */
-	ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE);
-
-	/* Available DHB  4Mb size:   F=2048, E=4096, D=4464 */
-	switch (readb(ti->mmio + AIP4MBDHB)) {
-	case 0xe: ti->dhb_size4mb = 4096; break;
-	case 0xd: ti->dhb_size4mb = 4464; break;
-	default:  ti->dhb_size4mb = 2048; break;
-	}
-
-	/* Available DHB 16Mb size:  F=2048, E=4096, D=8192, C=16384, B=17960 */
-	switch (readb(ti->mmio + AIP16MBDHB)) {
-	case 0xe: ti->dhb_size16mb = 4096; break;
-	case 0xd: ti->dhb_size16mb = 8192; break;
-	case 0xc: ti->dhb_size16mb = 16384; break;
-	case 0xb: ti->dhb_size16mb = 17960; break;
-	default:  ti->dhb_size16mb = 2048; break;
-	}
-
-	/*    We must figure out how much shared memory space this adapter
-	 *    will occupy so that if there are two adapters we can fit both
-	 *    in.  Given a choice, we will limit this adapter to 32K.  The
-	 *    maximum space will will use for two adapters is 64K so if the
-	 *    adapter we are working on demands 64K (it also doesn't support
-	 *    paging), then only one adapter can be supported.  
-	 */
-
-	/*
-	 *    determine how much of total RAM is mapped into PC space 
-	 */
-	ti->mapped_ram_size= /*sixteen to onehundredtwentyeight 512byte blocks*/
-	    1<< ((readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03) + 4);
-	ti->page_mask = 0;
-	if (ti->turbo)  ti->page_mask=0xf0;
-	else if (ti->shared_ram_paging == 0xf);  /* No paging in adapter */
-	else {
-#ifdef ENABLE_PAGING
-		unsigned char pg_size = 0;
-		/* BMS:   page size: PCMCIA, use configuration register;
-		   ISAPNP, use LANAIDC config tool from www.ibm.com  */
-		switch (ti->shared_ram_paging) {
-		case 0xf:
-			break;
-		case 0xe:
-			ti->page_mask = (ti->mapped_ram_size == 32) ? 0xc0 : 0;
-			pg_size = 32;	/* 16KB page size */
-			break;
-		case 0xd:
-			ti->page_mask = (ti->mapped_ram_size == 64) ? 0x80 : 0;
-			pg_size = 64;	/* 32KB page size */
-			break;
-		case 0xc:
-			switch (ti->mapped_ram_size) {
-			case 32:
-				ti->page_mask = 0xc0;
-				pg_size = 32;
-				break;
-			case 64:
-				ti->page_mask = 0x80;
-				pg_size = 64;
-				break;
-			}
-			break;
-		default:
-			DPRINTK("Unknown shared ram paging info %01X\n",
-							ti->shared_ram_paging);
-			iounmap(t_mmio); 
-			return -ENODEV;
-			break;
-		} /*end switch shared_ram_paging */
-
-		if (ibmtr_debug_trace & TRC_INIT)
-			DPRINTK("Shared RAM paging code: %02X, "
-				"mapped RAM size: %dK, shared RAM size: %dK, "
-				"page mask: %02X\n:",
-				ti->shared_ram_paging, ti->mapped_ram_size / 2,
-				ti->avail_shared_ram / 2, ti->page_mask);
-#endif	/*ENABLE_PAGING */
-	}
-
-#ifndef PCMCIA
-	/* finish figuring the shared RAM address */
-	if (cardpresent == TR_ISA) {
-		static const __u32 ram_bndry_mask[] = {
-			0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000
-		};
-		__u32 new_base, rrr_32, chk_base, rbm;
-
-		rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03;
-		rbm = ram_bndry_mask[rrr_32];
-		new_base = (ibmtr_mem_base + (~rbm)) & rbm;/* up to boundary */
-		chk_base = new_base + (ti->mapped_ram_size << 9);
-		if (chk_base > (ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE)) {
-			DPRINTK("Shared RAM for this adapter (%05x) exceeds "
-			"driver limit (%05x), adapter not started.\n",
-			chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
-			iounmap(t_mmio);
-			return -ENODEV;
-		} else { /* seems cool, record what we have figured out */
-			ti->sram_base = new_base >> 12;
-			ibmtr_mem_base = chk_base;
-		}
-	}
-	else  ti->sram_base = ti->sram_phys >> 12;
-
-	/* The PCMCIA has already got the interrupt line and the io port, 
-	   so no chance of anybody else getting it - MLP */
-	if (request_irq(dev->irq = irq, tok_interrupt, 0, "ibmtr", dev) != 0) {
-		DPRINTK("Could not grab irq %d.  Halting Token Ring driver.\n",
-					irq);
-		iounmap(t_mmio);
-		return -ENODEV;
-	}
-	/*?? Now, allocate some of the PIO PORTs for this driver.. */
-	/* record PIOaddr range as busy */
-	if (!request_region(PIOaddr, IBMTR_IO_EXTENT, "ibmtr")) {
-		DPRINTK("Could not grab PIO range. Halting driver.\n");
-		free_irq(dev->irq, dev);
-		iounmap(t_mmio);
-		return -EBUSY;
-	}
-
-	if (!version_printed++) {
-		printk(version);
-	}
-#endif /* !PCMCIA */
-	DPRINTK("%s %s found\n",
-		channel_def[cardpresent - 1], adapter_def(ti->adapter_type));
-	DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n",
-			irq, PIOaddr, ti->mapped_ram_size / 2);
-	DPRINTK("Hardware address : %pM\n", dev->dev_addr);
-	if (ti->page_mask)
-		DPRINTK("Shared RAM paging enabled. "
-			"Page size: %uK Shared Ram size %dK\n",
-			((ti->page_mask^0xff)+1) >>2, ti->avail_shared_ram / 2);
-	else
-		DPRINTK("Shared RAM paging disabled. ti->page_mask %x\n",
-								ti->page_mask);
-
-	/* Calculate the maximum DHB we can use */
-	/* two cases where avail_shared_ram doesn't equal mapped_ram_size:
-	    1. avail_shared_ram is 127 but mapped_ram_size is 128 (typical)
-	    2. user has configured adapter for less than avail_shared_ram
-	       but is not using paging (she should use paging, I believe)
-	*/
-	if (!ti->page_mask) {
-		ti->avail_shared_ram=
-				min(ti->mapped_ram_size,ti->avail_shared_ram);
-	}
-
-	switch (ti->avail_shared_ram) {
-	case 16:		/* 8KB shared RAM */
-		ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)2048);
-		ti->rbuf_len4 = 1032;
-		ti->rbuf_cnt4=2;
-		ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)2048);
-		ti->rbuf_len16 = 1032;
-		ti->rbuf_cnt16=2;
-		break;
-	case 32:		/* 16KB shared RAM */
-		ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
-		ti->rbuf_len4 = 1032;
-		ti->rbuf_cnt4=4;
-		ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)4096);
-		ti->rbuf_len16 = 1032;	/*1024 usable */
-		ti->rbuf_cnt16=4;
-		break;
-	case 64:		/* 32KB shared RAM */
-		ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
-		ti->rbuf_len4 = 1032;
-		ti->rbuf_cnt4=6;
-		ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)10240);
-		ti->rbuf_len16 = 1032;
-		ti->rbuf_cnt16=6;
-		break;
-	case 127:		/* 63.5KB shared RAM */
-		ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
-		ti->rbuf_len4 = 1032;
-		ti->rbuf_cnt4=6;
-		ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)16384);
-		ti->rbuf_len16 = 1032;
-		ti->rbuf_cnt16=16;
-		break;
-	case 128:		/* 64KB   shared RAM */
-		ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
-		ti->rbuf_len4 = 1032;
-		ti->rbuf_cnt4=6;
-		ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)17960);
-		ti->rbuf_len16 = 1032;
-		ti->rbuf_cnt16=16;
-		break;
-	default:
-		ti->dhb_size4mb = 2048;
-		ti->rbuf_len4 = 1032;
-		ti->rbuf_cnt4=2;
-		ti->dhb_size16mb = 2048;
-		ti->rbuf_len16 = 1032;
-		ti->rbuf_cnt16=2;
-		break;
-	}
-	/* this formula is not smart enough for the paging case
-	ti->rbuf_cnt<x> = (ti->avail_shared_ram * BLOCKSZ - ADAPT_PRIVATE -
-			ARBLENGTH - SSBLENGTH - DLC_MAX_SAP * SAPLENGTH -
-			DLC_MAX_STA * STALENGTH - ti->dhb_size<x>mb * NUM_DHB -
-			SRBLENGTH - ASBLENGTH) / ti->rbuf_len<x>;
-	*/
-	ti->maxmtu16 = (ti->rbuf_len16 - 8) * ti->rbuf_cnt16  - TR_HLEN;
-	ti->maxmtu4 = (ti->rbuf_len4 - 8) * ti->rbuf_cnt4 - TR_HLEN;
-	/*BMS assuming 18 bytes of Routing Information (usually works) */
-	DPRINTK("Maximum Receive Internet Protocol MTU 16Mbps: %d, 4Mbps: %d\n",
-						     ti->maxmtu16, ti->maxmtu4);
-
-	dev->base_addr = PIOaddr;	/* set the value for device */
-	dev->mem_start = ti->sram_base << 12;
-	dev->mem_end = dev->mem_start + (ti->mapped_ram_size << 9) - 1;
-	trdev_init(dev);
-	return 0;   /* Return 0 to indicate we have found a Token Ring card. */
-}				/*ibmtr_probe1() */
-
-/*****************************************************************************/
-
-/* query the adapter for the size of shared RAM  */
-/* the function returns the RAM size in units of 512 bytes */
-
-static unsigned char __devinit get_sram_size(struct tok_info *adapt_info)
-{
-	unsigned char avail_sram_code;
-	static unsigned char size_code[] = { 0, 16, 32, 64, 127, 128 };
-	/* Adapter gives
-	   'F' -- use RRR bits 3,2
-	   'E' -- 8kb   'D' -- 16kb
-	   'C' -- 32kb  'A' -- 64KB
-	   'B' - 64KB less 512 bytes at top
-	   (WARNING ... must zero top bytes in INIT */
-
-	avail_sram_code = 0xf - readb(adapt_info->mmio + AIPAVAILSHRAM);
-	if (avail_sram_code) return size_code[avail_sram_code];
-	else		/* for code 'F', must compute size from RRR(3,2) bits */
-		return 1 <<
-		 ((readb(adapt_info->mmio+ACA_OFFSET+ACA_RW+RRR_ODD)>>2&3)+4);
-}
-
-/*****************************************************************************/
-
-static const struct net_device_ops trdev_netdev_ops = {
-	.ndo_open		= tok_open,
-	.ndo_stop		= tok_close,
-	.ndo_start_xmit		= tok_send_packet,
-	.ndo_set_rx_mode	= tok_set_multicast_list,
-	.ndo_change_mtu		= ibmtr_change_mtu,
-};
-
-static int __devinit trdev_init(struct net_device *dev)
-{
-	struct tok_info *ti = netdev_priv(dev);
-
-	SET_PAGE(ti->srb_page);
-        ti->open_failure = NO    ;
-	dev->netdev_ops = &trdev_netdev_ops;
-
-	return 0;
-}
-
-/*****************************************************************************/
-
-static int tok_init_card(struct net_device *dev)
-{
-	struct tok_info *ti;
-	short PIOaddr;
-	unsigned long i;
-
-	PIOaddr = dev->base_addr;
-	ti = netdev_priv(dev);
-	/* Special processing for first interrupt after reset */
-	ti->do_tok_int = FIRST_INT;
-	/* Reset adapter */
-	writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
-	outb(0, PIOaddr + ADAPTRESET);
-
-	schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
-
-	outb(0, PIOaddr + ADAPTRESETREL);
-#ifdef ENABLE_PAGING
-	if (ti->page_mask)
-		writeb(SRPR_ENABLE_PAGING,ti->mmio+ACA_OFFSET+ACA_RW+SRPR_EVEN);
-#endif
-	writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
-	i = sleep_on_timeout(&ti->wait_for_reset, 4 * HZ);
-	return i? 0 : -EAGAIN;
-}
-
-/*****************************************************************************/
-static int tok_open(struct net_device *dev)
-{
-	struct tok_info *ti = netdev_priv(dev);
-	int i;
-
-	/*the case we were left in a failure state during a previous open */
-	if (ti->open_failure == YES) {
-		DPRINTK("Last time you were disconnected, how about now?\n");
-		printk("You can't insert with an ICS connector half-cocked.\n");
-	}
-
-	ti->open_status  = CLOSED; /* CLOSED or OPEN      */
-	ti->sap_status   = CLOSED; /* CLOSED or OPEN      */
-	ti->open_failure =     NO; /* NO     or YES       */
-	ti->open_mode    = MANUAL; /* MANUAL or AUTOMATIC */
-
-	ti->sram_phys &= ~1; /* to reverse what we do in tok_close */
-	/* init the spinlock */
-	spin_lock_init(&ti->lock);
-	init_timer(&ti->tr_timer);
-	
-	i = tok_init_card(dev);
-	if (i) return i;
-
-	while (1){
-		tok_open_adapter((unsigned long) dev);
-		i= interruptible_sleep_on_timeout(&ti->wait_for_reset, 25 * HZ);
-		/* sig catch: estimate opening adapter takes more than .5 sec*/
-		if (i>(245*HZ)/10) break; /* fancier than if (i==25*HZ) */
-		if (i==0) break;
-		if (ti->open_status == OPEN && ti->sap_status==OPEN) {
-			netif_start_queue(dev);
-			DPRINTK("Adapter is up and running\n");
-			return 0;
-		}
-		i=schedule_timeout_interruptible(TR_RETRY_INTERVAL);
-							/* wait 30 seconds */
-		if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */
-	}
-	outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/
-	DPRINTK("TERMINATED via signal\n");	/*BMS useful */
-	return -EAGAIN;
-}
-
-/*****************************************************************************/
-
-#define COMMAND_OFST             0
-#define OPEN_OPTIONS_OFST        8
-#define NUM_RCV_BUF_OFST        24
-#define RCV_BUF_LEN_OFST        26
-#define DHB_LENGTH_OFST         28
-#define NUM_DHB_OFST            30
-#define DLC_MAX_SAP_OFST        32
-#define DLC_MAX_STA_OFST        33
-
-static void tok_open_adapter(unsigned long dev_addr)
-{
-	struct net_device *dev = (struct net_device *) dev_addr;
-	struct tok_info *ti;
-	int i;
-
-	ti = netdev_priv(dev);
-	SET_PAGE(ti->init_srb_page); 
-	writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
-	for (i = 0; i < sizeof(struct dir_open_adapter); i++)
-		writeb(0, ti->init_srb + i);
-	writeb(DIR_OPEN_ADAPTER, ti->init_srb + COMMAND_OFST);
-	writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + OPEN_OPTIONS_OFST);
-	if (ti->ring_speed == 16) {
-		writew(htons(ti->dhb_size16mb), ti->init_srb + DHB_LENGTH_OFST);
-		writew(htons(ti->rbuf_cnt16), ti->init_srb + NUM_RCV_BUF_OFST);
-		writew(htons(ti->rbuf_len16), ti->init_srb + RCV_BUF_LEN_OFST);
-	} else {
-		writew(htons(ti->dhb_size4mb), ti->init_srb + DHB_LENGTH_OFST);
-		writew(htons(ti->rbuf_cnt4), ti->init_srb + NUM_RCV_BUF_OFST);
-		writew(htons(ti->rbuf_len4), ti->init_srb + RCV_BUF_LEN_OFST);
-	}
-	writeb(NUM_DHB,		/* always 2 */ ti->init_srb + NUM_DHB_OFST);
-	writeb(DLC_MAX_SAP, ti->init_srb + DLC_MAX_SAP_OFST);
-	writeb(DLC_MAX_STA, ti->init_srb + DLC_MAX_STA_OFST);
-	ti->srb = ti->init_srb;	/* We use this one in the interrupt handler */
-	ti->srb_page = ti->init_srb_page;
-	DPRINTK("Opening adapter: Xmit bfrs: %d X %d, Rcv bfrs: %d X %d\n",
-		readb(ti->init_srb + NUM_DHB_OFST),
-		ntohs(readw(ti->init_srb + DHB_LENGTH_OFST)),
-		ntohs(readw(ti->init_srb + NUM_RCV_BUF_OFST)),
-		ntohs(readw(ti->init_srb + RCV_BUF_LEN_OFST)));
-	writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
-	writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-}
-
-/*****************************************************************************/
-
-static void open_sap(unsigned char type, struct net_device *dev)
-{
-	int i;
-	struct tok_info *ti = netdev_priv(dev);
-
-	SET_PAGE(ti->srb_page);
-	for (i = 0; i < sizeof(struct dlc_open_sap); i++)
-		writeb(0, ti->srb + i);
-
-#define MAX_I_FIELD_OFST        14
-#define SAP_VALUE_OFST          16
-#define SAP_OPTIONS_OFST        17
-#define STATION_COUNT_OFST      18
-
-	writeb(DLC_OPEN_SAP, ti->srb + COMMAND_OFST);
-	writew(htons(MAX_I_FIELD), ti->srb + MAX_I_FIELD_OFST);
-	writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb+ SAP_OPTIONS_OFST);
-	writeb(SAP_OPEN_STATION_CNT, ti->srb + STATION_COUNT_OFST);
-	writeb(type, ti->srb + SAP_VALUE_OFST);
-	writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-}
-
-
-/*****************************************************************************/
-
-static void tok_set_multicast_list(struct net_device *dev)
-{
-	struct tok_info *ti = netdev_priv(dev);
-	struct netdev_hw_addr *ha;
-	unsigned char address[4];
-
-	int i;
-
-	/*BMS the next line is CRUCIAL or you may be sad when you */
-	/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
-	if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
-	address[0] = address[1] = address[2] = address[3] = 0;
-	netdev_for_each_mc_addr(ha, dev) {
-		address[0] |= ha->addr[2];
-		address[1] |= ha->addr[3];
-		address[2] |= ha->addr[4];
-		address[3] |= ha->addr[5];
-	}
-	SET_PAGE(ti->srb_page);
-	for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
-		writeb(0, ti->srb + i);
-
-#define FUNCT_ADDRESS_OFST 6
-
-	writeb(DIR_SET_FUNC_ADDR, ti->srb + COMMAND_OFST);
-	for (i = 0; i < 4; i++) 
-		writeb(address[i], ti->srb + FUNCT_ADDRESS_OFST + i);
-	writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-#if TR_VERBOSE
-	DPRINTK("Setting functional address: ");
-	for (i=0;i<4;i++)  printk("%02X ", address[i]);
-	printk("\n");
-#endif
-}
-
-/*****************************************************************************/
-
-#define STATION_ID_OFST 4
-
-static netdev_tx_t tok_send_packet(struct sk_buff *skb,
-					 struct net_device *dev)
-{
-	struct tok_info *ti;
-	unsigned long flags;
-	ti = netdev_priv(dev);
-
-        netif_stop_queue(dev);
-
-	/* lock against other CPUs */
-	spin_lock_irqsave(&(ti->lock), flags);
-
-	/* Save skb; we'll need it when the adapter asks for the data */
-	ti->current_skb = skb;
-	SET_PAGE(ti->srb_page);
-	writeb(XMIT_UI_FRAME, ti->srb + COMMAND_OFST);
-	writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
-	writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-	spin_unlock_irqrestore(&(ti->lock), flags);
-	return NETDEV_TX_OK;
-}
-
-/*****************************************************************************/
-
-static int tok_close(struct net_device *dev)
-{
-	struct tok_info *ti = netdev_priv(dev);
-
-	/* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */
-	/* unloading the module from memory, and then if a timer pops, ouch */
-	del_timer_sync(&ti->tr_timer);
-	outb(0, dev->base_addr + ADAPTRESET);
-	ti->sram_phys |= 1;
-	ti->open_status = CLOSED;
-
-	netif_stop_queue(dev);
-	DPRINTK("Adapter is closed.\n");
-	return 0;
-}
-
-/*****************************************************************************/
-
-#define RETCODE_OFST		2
-#define OPEN_ERROR_CODE_OFST	6
-#define ASB_ADDRESS_OFST        8
-#define SRB_ADDRESS_OFST        10
-#define ARB_ADDRESS_OFST        12
-#define SSB_ADDRESS_OFST        14
-
-static char *printphase[]= {"Lobe media test","Physical insertion",
-	      "Address verification","Roll call poll","Request Parameters"};
-static char *printerror[]={"Function failure","Signal loss","Reserved",
-		"Frequency error","Timeout","Ring failure","Ring beaconing",
-		"Duplicate node address",
-		"Parameter request-retry count exceeded","Remove received",
-		"IMPL force received","Duplicate modifier",
-		"No monitor detected","Monitor contention failed for RPL"};
-
-static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page)
-{
-	if (ti->page_mask) {
-		*page = (index >> 8) & ti->page_mask;
-		index &= ~(ti->page_mask << 8);
-	}
-	return ti->sram_virt + index;
-}
-
-static void dir_open_adapter (struct net_device *dev)
-{
-        struct tok_info *ti = netdev_priv(dev);
-        unsigned char ret_code;
-        __u16 err;
-
-        ti->srb = map_address(ti,
-		ntohs(readw(ti->init_srb + SRB_ADDRESS_OFST)),
-		&ti->srb_page);
-        ti->ssb = map_address(ti,
-		ntohs(readw(ti->init_srb + SSB_ADDRESS_OFST)),
-		&ti->ssb_page);
-        ti->arb = map_address(ti,
-		ntohs(readw(ti->init_srb + ARB_ADDRESS_OFST)),
-		&ti->arb_page);
-        ti->asb = map_address(ti,
-		ntohs(readw(ti->init_srb + ASB_ADDRESS_OFST)),
-		&ti->asb_page);
-        ti->current_skb = NULL;
-        ret_code = readb(ti->init_srb + RETCODE_OFST);
-        err = ntohs(readw(ti->init_srb + OPEN_ERROR_CODE_OFST));
-        if (!ret_code) {
-		ti->open_status = OPEN; /* TR adapter is now available */
-                if (ti->open_mode == AUTOMATIC) {
-			DPRINTK("Adapter reopened.\n");
-                }
-                writeb(~SRB_RESP_INT, ti->mmio+ACA_OFFSET+ACA_RESET+ISRP_ODD);
-                open_sap(EXTENDED_SAP, dev);
-		return;
-	}
-	ti->open_failure = YES;
-	if (ret_code == 7){
-               if (err == 0x24) {
-			if (!ti->auto_speedsave) {
-				DPRINTK("Open failed: Adapter speed must match "
-                                 "ring speed if Automatic Ring Speed Save is "
-				 "disabled.\n");
-				ti->open_action = FAIL;
-			}else
-				DPRINTK("Retrying open to adjust to "
-					"ring speed, ");
-                } else if (err == 0x2d) {
-			DPRINTK("Physical Insertion: No Monitor Detected, ");
-			printk("retrying after %ds delay...\n",
-					TR_RETRY_INTERVAL/HZ);
-                } else if (err == 0x11) {
-			DPRINTK("Lobe Media Function Failure (0x11), ");
-			printk(" retrying after %ds delay...\n",
-					TR_RETRY_INTERVAL/HZ);
-                } else {
-			char **prphase = printphase;
-			char **prerror = printerror;
-			int pnr = err / 16 - 1;
-			int enr = err % 16 - 1;
-			DPRINTK("TR Adapter misc open failure, error code = ");
-			if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) ||
-					enr < 0 ||
-					enr >= ARRAY_SIZE(printerror))
-				printk("0x%x, invalid Phase/Error.", err);
-			else
-				printk("0x%x, Phase: %s, Error: %s\n", err,
-						prphase[pnr], prerror[enr]);
-			printk(" retrying after %ds delay...\n",
-					TR_RETRY_INTERVAL/HZ);
-                }
-        } else DPRINTK("open failed: ret_code = %02X..., ", ret_code);
-	if (ti->open_action != FAIL) {
-		if (ti->open_mode==AUTOMATIC){
-			ti->open_action = REOPEN;
-			ibmtr_reset_timer(&(ti->tr_timer), dev);
-			return;
-		}
-		wake_up(&ti->wait_for_reset);
-		return;
-	}
-	DPRINTK("FAILURE, CAPUT\n");
-}
-
-/******************************************************************************/
-
-static irqreturn_t tok_interrupt(int irq, void *dev_id)
-{
-	unsigned char status;
-	/*  unsigned char status_even ; */
-	struct tok_info *ti;
-	struct net_device *dev;
-#ifdef ENABLE_PAGING
-	unsigned char save_srpr;
-#endif
-
-	dev = dev_id;
-#if TR_VERBOSE
-	DPRINTK("Int from tok_driver, dev : %p irq%d\n", dev,irq);
-#endif
-	ti = netdev_priv(dev);
-	if (ti->sram_phys & 1)
-		return IRQ_NONE;         /* PCMCIA card extraction flag */
-	spin_lock(&(ti->lock));
-#ifdef ENABLE_PAGING
-	save_srpr = readb(ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
-#endif
-
-	/* Disable interrupts till processing is finished */
-	writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
-
-	/* Reset interrupt for ISA boards */
-	if (ti->adapter_int_enable)
-		outb(0, ti->adapter_int_enable);
-	else /* used for PCMCIA cards */
-		outb(0, ti->global_int_enable);
-        if (ti->do_tok_int == FIRST_INT){
-                initial_tok_int(dev);
-#ifdef ENABLE_PAGING
-                writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
-#endif
-                spin_unlock(&(ti->lock));
-                return IRQ_HANDLED;
-        }
-	/*  Begin interrupt handler HERE inline to avoid the extra
-	    levels of logic and call depth for the original solution. */
-	status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
-	/*BMSstatus_even = readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) */
-	/*BMSdebugprintk("tok_interrupt: ISRP_ODD = 0x%x ISRP_EVEN = 0x%x\n", */
-	/*BMS                                       status,status_even);      */
-
-	if (status & ADAP_CHK_INT) {
-		int i;
-		void __iomem *check_reason;
-		__u8 check_reason_page = 0;
-		check_reason = map_address(ti,
-			ntohs(readw(ti->mmio+ ACA_OFFSET+ACA_RW + WWCR_EVEN)),
-			&check_reason_page);
-		SET_PAGE(check_reason_page);
-
-		DPRINTK("Adapter check interrupt\n");
-		DPRINTK("8 reason bytes follow: ");
-		for (i = 0; i < 8; i++, check_reason++)
-			printk("%02X ", (int) readb(check_reason));
-		printk("\n");
-		writeb(~ADAP_CHK_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
-		status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRA_EVEN);
-		DPRINTK("ISRA_EVEN == 0x02%x\n",status);
-		ti->open_status = CLOSED;
-		ti->sap_status  = CLOSED;
-		ti->open_mode   = AUTOMATIC;
-		netif_carrier_off(dev);
-		netif_stop_queue(dev);
-		ti->open_action = RESTART;
-		outb(0, dev->base_addr + ADAPTRESET);
-		ibmtr_reset_timer(&(ti->tr_timer), dev);/*BMS try to reopen*/
-		spin_unlock(&(ti->lock));
-		return IRQ_HANDLED;
-	}
-	if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
-		& (TCR_INT | ERR_INT | ACCESS_INT)) {
-		DPRINTK("adapter error: ISRP_EVEN : %02x\n",
-			(int)readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRP_EVEN));
-		writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
-			ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
-		status= readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRA_EVEN);/*BMS*/
-		DPRINTK("ISRA_EVEN == 0x02%x\n",status);/*BMS*/
-                writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
-#ifdef ENABLE_PAGING
-                writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
-#endif
-                spin_unlock(&(ti->lock));
-                return IRQ_HANDLED;
-        }
-	if (status & SRB_RESP_INT) {	/* SRB response */
-		SET_PAGE(ti->srb_page);
-#if TR_VERBOSE
-		DPRINTK("SRB resp: cmd=%02X rsp=%02X\n",
-				readb(ti->srb), readb(ti->srb + RETCODE_OFST));
-#endif
-		switch (readb(ti->srb)) {	/* SRB command check */
-		case XMIT_DIR_FRAME:{
-			unsigned char xmit_ret_code;
-			xmit_ret_code = readb(ti->srb + RETCODE_OFST);
-			if (xmit_ret_code == 0xff) break;
-			DPRINTK("error on xmit_dir_frame request: %02X\n",
-								xmit_ret_code);
-			if (ti->current_skb) {
-				dev_kfree_skb_irq(ti->current_skb);
-				ti->current_skb = NULL;
-			}
-			/*dev->tbusy = 0;*/
-			netif_wake_queue(dev);
-			if (ti->readlog_pending)
-				ibmtr_readlog(dev);
-			break;
-		}
-		case XMIT_UI_FRAME:{
-			unsigned char xmit_ret_code;
-
-			xmit_ret_code = readb(ti->srb + RETCODE_OFST);
-			if (xmit_ret_code == 0xff) break;
-			DPRINTK("error on xmit_ui_frame request: %02X\n",
-								xmit_ret_code);
-			if (ti->current_skb) {
-				dev_kfree_skb_irq(ti->current_skb);
-				ti->current_skb = NULL;
-			}
-			netif_wake_queue(dev);
-			if (ti->readlog_pending)
-				ibmtr_readlog(dev);
-			break;
-		}
-		case DIR_OPEN_ADAPTER:
-			dir_open_adapter(dev);
-			break;
-		case DLC_OPEN_SAP:
-			if (readb(ti->srb + RETCODE_OFST)) {
-				DPRINTK("open_sap failed: ret_code = %02X, "
-					"retrying\n",
-					(int) readb(ti->srb + RETCODE_OFST));
-				ti->open_action = REOPEN;
-				ibmtr_reset_timer(&(ti->tr_timer), dev);
-				break;
-			}
-			ti->exsap_station_id = readw(ti->srb + STATION_ID_OFST);
-			ti->sap_status = OPEN;/* TR adapter is now available */
-			if (ti->open_mode==MANUAL){
-				wake_up(&ti->wait_for_reset);
-				break;
-			}
-			netif_wake_queue(dev);
-			netif_carrier_on(dev);
-			break;
-		case DIR_INTERRUPT:
-		case DIR_MOD_OPEN_PARAMS:
-		case DIR_SET_GRP_ADDR:
-		case DIR_SET_FUNC_ADDR:
-		case DLC_CLOSE_SAP:
-			if (readb(ti->srb + RETCODE_OFST))
-				DPRINTK("error on %02X: %02X\n",
-					(int) readb(ti->srb + COMMAND_OFST),
-					(int) readb(ti->srb + RETCODE_OFST));
-			break;
-		case DIR_READ_LOG:
-			if (readb(ti->srb + RETCODE_OFST)){
-				DPRINTK("error on dir_read_log: %02X\n",
-					(int) readb(ti->srb + RETCODE_OFST));
-				netif_wake_queue(dev);
-				break;
-			}
-#if IBMTR_DEBUG_MESSAGES
-
-#define LINE_ERRORS_OFST                 0
-#define INTERNAL_ERRORS_OFST             1
-#define BURST_ERRORS_OFST                2
-#define AC_ERRORS_OFST                   3
-#define ABORT_DELIMITERS_OFST            4
-#define LOST_FRAMES_OFST                 6
-#define RECV_CONGEST_COUNT_OFST          7
-#define FRAME_COPIED_ERRORS_OFST         8
-#define FREQUENCY_ERRORS_OFST            9
-#define TOKEN_ERRORS_OFST               10
-
-			DPRINTK("Line errors %02X, Internal errors %02X, "
-			"Burst errors %02X\n" "A/C errors %02X, "
-			"Abort delimiters %02X, Lost frames %02X\n"
-			"Receive congestion count %02X, "
-			"Frame copied errors %02X\nFrequency errors %02X, "
-			"Token errors %02X\n",
-			(int) readb(ti->srb + LINE_ERRORS_OFST),
-			(int) readb(ti->srb + INTERNAL_ERRORS_OFST),
-			(int) readb(ti->srb + BURST_ERRORS_OFST),
-			(int) readb(ti->srb + AC_ERRORS_OFST),
-			(int) readb(ti->srb + ABORT_DELIMITERS_OFST),
-			(int) readb(ti->srb + LOST_FRAMES_OFST),
-			(int) readb(ti->srb + RECV_CONGEST_COUNT_OFST),
-			(int) readb(ti->srb + FRAME_COPIED_ERRORS_OFST),
-			(int) readb(ti->srb + FREQUENCY_ERRORS_OFST),
-			(int) readb(ti->srb + TOKEN_ERRORS_OFST));
-#endif
-			netif_wake_queue(dev);
-			break;
-		default:
-			DPRINTK("Unknown command %02X encountered\n",
-						(int) readb(ti->srb));
-        	}	/* end switch SRB command check */
-		writeb(~SRB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
-	}	/* if SRB response */
-	if (status & ASB_FREE_INT) {	/* ASB response */
-		SET_PAGE(ti->asb_page);
-#if TR_VERBOSE
-		DPRINTK("ASB resp: cmd=%02X\n", readb(ti->asb));
-#endif
-
-		switch (readb(ti->asb)) {	/* ASB command check */
-		case REC_DATA:
-		case XMIT_UI_FRAME:
-		case XMIT_DIR_FRAME:
-			break;
-		default:
-			DPRINTK("unknown command in asb %02X\n",
-						(int) readb(ti->asb));
-		}	/* switch ASB command check */
-		if (readb(ti->asb + 2) != 0xff)	/* checks ret_code */
-			DPRINTK("ASB error %02X in cmd %02X\n",
-				(int) readb(ti->asb + 2), (int) readb(ti->asb));
-		writeb(~ASB_FREE_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
-	}	/* if ASB response */
-
-#define STATUS_OFST             6
-#define NETW_STATUS_OFST        6
-
-	if (status & ARB_CMD_INT) {	/* ARB response */
-		SET_PAGE(ti->arb_page);
-#if TR_VERBOSE
-		DPRINTK("ARB resp: cmd=%02X\n", readb(ti->arb));
-#endif
-
-		switch (readb(ti->arb)) {	/* ARB command check */
-		case DLC_STATUS:
-			DPRINTK("DLC_STATUS new status: %02X on station %02X\n",
-				ntohs(readw(ti->arb + STATUS_OFST)),
-				ntohs(readw(ti->arb+ STATION_ID_OFST)));
-			break;
-		case REC_DATA:
-			tr_rx(dev);
-			break;
-		case RING_STAT_CHANGE:{
-			unsigned short ring_status;
-			ring_status= ntohs(readw(ti->arb + NETW_STATUS_OFST));
-			if (ibmtr_debug_trace & TRC_INIT)
-				DPRINTK("Ring Status Change...(0x%x)\n",
-								ring_status);
-			if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){
-				netif_stop_queue(dev);
-				netif_carrier_off(dev);
-				DPRINTK("Remove received, or Auto-removal error"
-					", or Lobe fault\n");
-				DPRINTK("We'll try to reopen the closed adapter"
-					" after a %d second delay.\n",
-						TR_RETRY_INTERVAL/HZ);
-				/*I was confused: I saw the TR reopening but */
-				/*forgot:with an RJ45 in an RJ45/ICS adapter */
-				/*but adapter not in the ring, the TR will   */
-				/* open, and then soon close and come here.  */
-				ti->open_mode = AUTOMATIC;
-				ti->open_status = CLOSED; /*12/2000 BMS*/
-				ti->open_action = REOPEN;
-				ibmtr_reset_timer(&(ti->tr_timer), dev);
-			} else if (ring_status & LOG_OVERFLOW) {
-				if(netif_queue_stopped(dev))
-					ti->readlog_pending = 1;
-				else
-					ibmtr_readlog(dev);
-			}
-			break;
-          	}
-		case XMIT_DATA_REQ:
-			tr_tx(dev);
-			break;
-		default:
-			DPRINTK("Unknown command %02X in arb\n",
-						(int) readb(ti->arb));
-			break;
-		}	/* switch ARB command check */
-		writeb(~ARB_CMD_INT, ti->mmio+ ACA_OFFSET+ACA_RESET + ISRP_ODD);
-		writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-	}	/* if ARB response */
-	if (status & SSB_RESP_INT) {	/* SSB response */
-		unsigned char retcode;
-		SET_PAGE(ti->ssb_page);
-#if TR_VERBOSE
-		DPRINTK("SSB resp: cmd=%02X rsp=%02X\n",
-				readb(ti->ssb), readb(ti->ssb + 2));
-#endif
-
-		switch (readb(ti->ssb)) {	/* SSB command check */
-		case XMIT_DIR_FRAME:
-		case XMIT_UI_FRAME:
-			retcode = readb(ti->ssb + 2);
-			if (retcode && (retcode != 0x22))/* checks ret_code */
-				DPRINTK("xmit ret_code: %02X xmit error code: "
-					"%02X\n",
-					(int)retcode, (int)readb(ti->ssb + 6));
-			else
-				dev->stats.tx_packets++;
-			break;
-		case XMIT_XID_CMD:
-			DPRINTK("xmit xid ret_code: %02X\n",
-						(int) readb(ti->ssb + 2));
-		default:
-			DPRINTK("Unknown command %02X in ssb\n",
-						(int) readb(ti->ssb));
-		}	/* SSB command check */
-		writeb(~SSB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
-		writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-	}	/* if SSB response */
-#ifdef ENABLE_PAGING
-	writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
-#endif
-	writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
-	spin_unlock(&(ti->lock));
-	return IRQ_HANDLED;
-}				/*tok_interrupt */
-
-/*****************************************************************************/
-
-#define INIT_STATUS_OFST        1
-#define INIT_STATUS_2_OFST      2
-#define ENCODED_ADDRESS_OFST    8
-
-static void initial_tok_int(struct net_device *dev)
-{
-
-	__u32 encoded_addr, hw_encoded_addr;
-	struct tok_info *ti;
-        unsigned char init_status; /*BMS 12/2000*/
-
-	ti = netdev_priv(dev);
-
-	ti->do_tok_int = NOT_FIRST;
-
-	/* we assign the shared-ram address for ISA devices */
-	writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
-#ifndef PCMCIA
-        ti->sram_virt = ioremap(((__u32)ti->sram_base << 12), ti->avail_shared_ram);
-#endif
-	ti->init_srb = map_address(ti,
-		ntohs(readw(ti->mmio + ACA_OFFSET + WRBR_EVEN)),
-		&ti->init_srb_page);
-	if (ti->page_mask && ti->avail_shared_ram == 127) {
-		void __iomem *last_512;
-		__u8 last_512_page=0;
-		int i;
-		last_512 = map_address(ti, 0xfe00, &last_512_page);
-		/* initialize high section of ram (if necessary) */
-		SET_PAGE(last_512_page);
-		for (i = 0; i < 512; i++)
-			writeb(0, last_512 + i);
-	}
-	SET_PAGE(ti->init_srb_page);
-
-#if TR_VERBOSE
-	{
-	int i;
-
-	DPRINTK("ti->init_srb_page=0x%x\n", ti->init_srb_page);
-	DPRINTK("init_srb(%p):", ti->init_srb );
-	for (i = 0; i < 20; i++)
-		printk("%02X ", (int) readb(ti->init_srb + i));
-	printk("\n");
-	}
-#endif
-
-	hw_encoded_addr = readw(ti->init_srb + ENCODED_ADDRESS_OFST);
-	encoded_addr = ntohs(hw_encoded_addr);
-        init_status= /*BMS 12/2000 check for shallow mode possibility (Turbo)*/
-	readb(ti->init_srb+offsetof(struct srb_init_response,init_status));
-	/*printk("Initial interrupt: init_status= 0x%02x\n",init_status);*/
-	ti->ring_speed = init_status & 0x01 ? 16 : 4;
-	DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n",
-				ti->ring_speed, (unsigned int)dev->mem_start);
-	ti->auto_speedsave = (readb(ti->init_srb+INIT_STATUS_2_OFST) & 4) != 0;
-
-        if (ti->open_mode == MANUAL)	wake_up(&ti->wait_for_reset);
-	else				tok_open_adapter((unsigned long)dev);
-        
-} /*initial_tok_int() */
-
-/*****************************************************************************/
-
-#define CMD_CORRELATE_OFST      1
-#define DHB_ADDRESS_OFST        6
-
-#define FRAME_LENGTH_OFST       6
-#define HEADER_LENGTH_OFST      8
-#define RSAP_VALUE_OFST         9
-
-static void tr_tx(struct net_device *dev)
-{
-	struct tok_info *ti = netdev_priv(dev);
-	struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data;
-	unsigned int hdr_len;
-	__u32 dhb=0,dhb_base;
-	void __iomem *dhbuf = NULL;
-	unsigned char xmit_command;
-	int i,dhb_len=0x4000,src_len,src_offset;
-	struct trllc *llc;
-	struct srb_xmit xsrb;
-	__u8 dhb_page = 0;
-	__u8 llc_ssap;
-
-	SET_PAGE(ti->asb_page);
-
-	if (readb(ti->asb+RETCODE_OFST) != 0xFF) DPRINTK("ASB not free !!!\n");
-
-	/* in providing the transmit interrupts, is telling us it is ready for
-	   data and providing a shared memory address for us to stuff with data.
-	   Here we compute the effective address where we will place data.
-	*/
-	SET_PAGE(ti->arb_page);
-	dhb=dhb_base=ntohs(readw(ti->arb + DHB_ADDRESS_OFST));
-	if (ti->page_mask) {
-		dhb_page = (dhb_base >> 8) & ti->page_mask;
-		dhb=dhb_base & ~(ti->page_mask << 8);
-	}
-	dhbuf = ti->sram_virt + dhb;
-
-	/* Figure out the size of the 802.5 header */
-	if (!(trhdr->saddr[0] & 0x80))	/* RIF present? */
-		hdr_len = sizeof(struct trh_hdr) - TR_MAXRIFLEN;
-	else
-		hdr_len = ((ntohs(trhdr->rcf) & TR_RCF_LEN_MASK) >> 8)
-		    + sizeof(struct trh_hdr) - TR_MAXRIFLEN;
-
-	llc = (struct trllc *) (ti->current_skb->data + hdr_len);
-
-	llc_ssap = llc->ssap;
-	SET_PAGE(ti->srb_page);
-	memcpy_fromio(&xsrb, ti->srb, sizeof(xsrb));
-	SET_PAGE(ti->asb_page);
-	xmit_command = xsrb.command;
-
-	writeb(xmit_command, ti->asb + COMMAND_OFST);
-	writew(xsrb.station_id, ti->asb + STATION_ID_OFST);
-	writeb(llc_ssap, ti->asb + RSAP_VALUE_OFST);
-	writeb(xsrb.cmd_corr, ti->asb + CMD_CORRELATE_OFST);
-	writeb(0, ti->asb + RETCODE_OFST);
-	if ((xmit_command == XMIT_XID_CMD) || (xmit_command == XMIT_TEST_CMD)) {
-		writew(htons(0x11), ti->asb + FRAME_LENGTH_OFST);
-		writeb(0x0e, ti->asb + HEADER_LENGTH_OFST);
-		SET_PAGE(dhb_page);
-		writeb(AC, dhbuf);
-		writeb(LLC_FRAME, dhbuf + 1);
-		for (i = 0; i < TR_ALEN; i++)
-			writeb((int) 0x0FF, dhbuf + i + 2);
-		for (i = 0; i < TR_ALEN; i++)
-			writeb(0, dhbuf + i + TR_ALEN + 2);
-		writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-		return;
-	}
-	/*
-	 *    the token ring packet is copied from sk_buff to the adapter
-	 *    buffer identified in the command data received with the interrupt.
-	 */
-	writeb(hdr_len, ti->asb + HEADER_LENGTH_OFST);
-	writew(htons(ti->current_skb->len), ti->asb + FRAME_LENGTH_OFST);
-	src_len=ti->current_skb->len;
-	src_offset=0;
-	dhb=dhb_base;
-	while(1) {
-		if (ti->page_mask) {
-			dhb_page=(dhb >> 8) & ti->page_mask;
-			dhb=dhb & ~(ti->page_mask << 8);
-			dhb_len=0x4000-dhb; /* remaining size of this page */
-		}
-		dhbuf = ti->sram_virt + dhb;
-		SET_PAGE(dhb_page);
-		if (src_len > dhb_len) {
-			memcpy_toio(dhbuf,&ti->current_skb->data[src_offset],
-					dhb_len);
-			src_len -= dhb_len;
-			src_offset += dhb_len;
-			dhb_base+=dhb_len;
-			dhb=dhb_base;
-			continue;
-		}
-		memcpy_toio(dhbuf, &ti->current_skb->data[src_offset], src_len);
-		break;
-	}
-	writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-	dev->stats.tx_bytes += ti->current_skb->len;
-	dev_kfree_skb_irq(ti->current_skb);
-	ti->current_skb = NULL;
-	netif_wake_queue(dev);
-	if (ti->readlog_pending)
-		ibmtr_readlog(dev);
-}				/*tr_tx */
-
-/*****************************************************************************/
-
-
-#define RECEIVE_BUFFER_OFST     6
-#define LAN_HDR_LENGTH_OFST     8
-#define DLC_HDR_LENGTH_OFST     9
-
-#define DSAP_OFST               0
-#define SSAP_OFST               1
-#define LLC_OFST                2
-#define PROTID_OFST             3
-#define ETHERTYPE_OFST          6
-
-static void tr_rx(struct net_device *dev)
-{
-	struct tok_info *ti = netdev_priv(dev);
-	__u32 rbuffer;
-	void __iomem *rbuf, *rbufdata, *llc;
-	__u8 rbuffer_page = 0;
-	unsigned char *data;
-	unsigned int rbuffer_len, lan_hdr_len, hdr_len, ip_len, length;
-	unsigned char dlc_hdr_len;
-	struct sk_buff *skb;
-	unsigned int skb_size = 0;
-	int IPv4_p = 0;
-	unsigned int chksum = 0;
-	struct iphdr *iph;
-	struct arb_rec_req rarb;
-
-	SET_PAGE(ti->arb_page);
-	memcpy_fromio(&rarb, ti->arb, sizeof(rarb));
-	rbuffer = ntohs(rarb.rec_buf_addr) ;
-	rbuf = map_address(ti, rbuffer, &rbuffer_page);
-
-	SET_PAGE(ti->asb_page);
-
-	if (readb(ti->asb + RETCODE_OFST) !=0xFF) DPRINTK("ASB not free !!!\n");
-
-	writeb(REC_DATA, ti->asb + COMMAND_OFST);
-	writew(rarb.station_id, ti->asb + STATION_ID_OFST);
-	writew(rarb.rec_buf_addr, ti->asb + RECEIVE_BUFFER_OFST);
-
-	lan_hdr_len = rarb.lan_hdr_len;
-	if (lan_hdr_len > sizeof(struct trh_hdr)) {
-		DPRINTK("Linux cannot handle greater than 18 bytes RIF\n");
-		return;
-	}			/*BMS I added this above just to be very safe */
-	dlc_hdr_len = readb(ti->arb + DLC_HDR_LENGTH_OFST);
-	hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr);
-
-	SET_PAGE(rbuffer_page);
-	llc = rbuf + offsetof(struct rec_buf, data) + lan_hdr_len;
-
-#if TR_VERBOSE
-	DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n",
-	(__u32) offsetof(struct rec_buf, data), (unsigned int) lan_hdr_len);
-	DPRINTK("llc: %08X rec_buf_addr: %04X dev->mem_start: %lX\n",
-		llc, ntohs(rarb.rec_buf_addr), dev->mem_start);
-	DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, "
-		"ethertype: %04X\n",
-		(int) readb(llc + DSAP_OFST), (int) readb(llc + SSAP_OFST),
-		(int) readb(llc + LLC_OFST), (int) readb(llc + PROTID_OFST),
-		(int) readb(llc+PROTID_OFST+1),(int)readb(llc+PROTID_OFST + 2),
-		(int) ntohs(readw(llc + ETHERTYPE_OFST)));
-#endif
-	if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) {
-		SET_PAGE(ti->asb_page);
-		writeb(DATA_LOST, ti->asb + RETCODE_OFST);
-		dev->stats.rx_dropped++;
-		writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-		return;
-	}
-	length = ntohs(rarb.frame_len);
-	if (readb(llc + DSAP_OFST) == EXTENDED_SAP &&
-	   readb(llc + SSAP_OFST) == EXTENDED_SAP &&
-		length >= hdr_len)	IPv4_p = 1;
-#if TR_VERBOSE
-#define SADDR_OFST	8
-#define DADDR_OFST	2
-
-	if (!IPv4_p) {
-
-		void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data);
-		u8 saddr[6];
-		u8 daddr[6];
-		int i;
-		for (i = 0 ; i < 6 ; i++)
-			saddr[i] = readb(trhhdr + SADDR_OFST + i);
-		for (i = 0 ; i < 6 ; i++)
-			daddr[i] = readb(trhhdr + DADDR_OFST + i);
-		DPRINTK("Probably non-IP frame received.\n");
-		DPRINTK("ssap: %02X dsap: %02X "
-			"saddr: %pM daddr: %pM\n",
-			readb(llc + SSAP_OFST), readb(llc + DSAP_OFST),
-			saddr, daddr);
-	}
-#endif
-
-	/*BMS handle the case she comes in with few hops but leaves with many */
-        skb_size=length-lan_hdr_len+sizeof(struct trh_hdr)+sizeof(struct trllc);
-
-	if (!(skb = dev_alloc_skb(skb_size))) {
-		DPRINTK("out of memory. frame dropped.\n");
-		dev->stats.rx_dropped++;
-		SET_PAGE(ti->asb_page);
-		writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
-		writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-		return;
-	}
-	/*BMS again, if she comes in with few but leaves with many */
-	skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len);
-	skb_put(skb, length);
-	data = skb->data;
-	rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len)));
-	rbufdata = rbuf + offsetof(struct rec_buf, data);
-
-	if (IPv4_p) {
-		/* Copy the headers without checksumming */
-		memcpy_fromio(data, rbufdata, hdr_len);
-
-		/* Watch for padded packets and bogons */
-		iph= (struct iphdr *)(data+ lan_hdr_len + sizeof(struct trllc));
-		ip_len = ntohs(iph->tot_len) - sizeof(struct iphdr);
-		length -= hdr_len;
-		if ((ip_len <= length) && (ip_len > 7))
-			length = ip_len;
-		data += hdr_len;
-		rbuffer_len -= hdr_len;
-		rbufdata += hdr_len;
-	}
-	/* Copy the payload... */
-#define BUFFER_POINTER_OFST	2
-#define BUFFER_LENGTH_OFST      6
-	for (;;) {
-		if (ibmtr_debug_trace&TRC_INITV && length < rbuffer_len)
-			DPRINTK("CURIOUS, length=%d < rbuffer_len=%d\n",
-						length,rbuffer_len);
-		if (IPv4_p)
-			chksum=csum_partial_copy_nocheck((void*)rbufdata,
-			    data,length<rbuffer_len?length:rbuffer_len,chksum);
-		else
-			memcpy_fromio(data, rbufdata, rbuffer_len);
-		rbuffer = ntohs(readw(rbuf+BUFFER_POINTER_OFST)) ;
-		if (!rbuffer)
-			break;
-		rbuffer -= 2;
-		length -= rbuffer_len;
-		data += rbuffer_len;
-		rbuf = map_address(ti, rbuffer, &rbuffer_page);
-		SET_PAGE(rbuffer_page);
-		rbuffer_len = ntohs(readw(rbuf + BUFFER_LENGTH_OFST));
-		rbufdata = rbuf + offsetof(struct rec_buf, data);
-	}
-
-	SET_PAGE(ti->asb_page);
-	writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
-
-	writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-
-	dev->stats.rx_bytes += skb->len;
-	dev->stats.rx_packets++;
-
-	skb->protocol = tr_type_trans(skb, dev);
-	if (IPv4_p) {
-		skb->csum = chksum;
-		skb->ip_summed = CHECKSUM_COMPLETE;
-	}
-	netif_rx(skb);
-}				/*tr_rx */
-
-/*****************************************************************************/
-
-static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev)
-{
-	tmr->expires = jiffies + TR_RETRY_INTERVAL;
-	tmr->data = (unsigned long) dev;
-	tmr->function = tok_rerun;
-	init_timer(tmr);
-	add_timer(tmr);
-}
-
-/*****************************************************************************/
-
-static void tok_rerun(unsigned long dev_addr)
-{
-	struct net_device *dev = (struct net_device *)dev_addr;
-	struct tok_info *ti = netdev_priv(dev);
-
-	if ( ti->open_action == RESTART){
-		ti->do_tok_int = FIRST_INT;
-		outb(0, dev->base_addr + ADAPTRESETREL);
-#ifdef ENABLE_PAGING
-		if (ti->page_mask)
-			writeb(SRPR_ENABLE_PAGING,
-				ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
-#endif
-
-		writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
-	} else
-		tok_open_adapter(dev_addr);
-}
-
-/*****************************************************************************/
-
-static void ibmtr_readlog(struct net_device *dev)
-{
-	struct tok_info *ti;
-
-	ti = netdev_priv(dev);
-
-	ti->readlog_pending = 0;
-	SET_PAGE(ti->srb_page);
-	writeb(DIR_READ_LOG, ti->srb);
-	writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
-	writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
-
-	netif_stop_queue(dev);
-
-}
-
-/*****************************************************************************/
-
-static int ibmtr_change_mtu(struct net_device *dev, int mtu)
-{
-	struct tok_info *ti = netdev_priv(dev);
-
-	if (ti->ring_speed == 16 && mtu > ti->maxmtu16)
-		return -EINVAL;
-	if (ti->ring_speed == 4 && mtu > ti->maxmtu4)
-		return -EINVAL;
-	dev->mtu = mtu;
-	return 0;
-}
-
-/*****************************************************************************/
-#ifdef MODULE
-
-/* 3COM 3C619C supports 8 interrupts, 32 I/O ports */
-static struct net_device *dev_ibmtr[IBMTR_MAX_ADAPTERS];
-static int io[IBMTR_MAX_ADAPTERS] = { 0xa20, 0xa24 };
-static int irq[IBMTR_MAX_ADAPTERS];
-static int mem[IBMTR_MAX_ADAPTERS];
-
-MODULE_LICENSE("GPL");
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-
-static int __init ibmtr_init(void)
-{
-	int i;
-	int count=0;
-
-	find_turbo_adapters(io);
-
-	for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) {
-		struct net_device *dev;
-		irq[i] = 0;
-		mem[i] = 0;
-		dev = alloc_trdev(sizeof(struct tok_info));
-		if (dev == NULL) { 
-			if (i == 0)
-				return -ENOMEM;
-			break;
-		}
-		dev->base_addr = io[i];
-		dev->irq = irq[i];
-		dev->mem_start = mem[i];
-
-		if (ibmtr_probe_card(dev)) {
-			free_netdev(dev);
-			continue;
-		}
-		dev_ibmtr[i] = dev;
-		count++;
-	}
-	if (count) return 0;
-	printk("ibmtr: register_netdev() returned non-zero.\n");
-	return -EIO;
-}
-module_init(ibmtr_init);
-
-static void __exit ibmtr_cleanup(void)
-{
-	int i;
-
-	for (i = 0; i < IBMTR_MAX_ADAPTERS; i++){
-		if (!dev_ibmtr[i])
-			continue;
-		unregister_netdev(dev_ibmtr[i]);
-		ibmtr_cleanup_card(dev_ibmtr[i]);
-		free_netdev(dev_ibmtr[i]);
-	}
-}
-module_exit(ibmtr_cleanup);
-#endif
diff --git a/drivers/net/tokenring/ibmtr_cs.c b/drivers/net/tokenring/ibmtr_cs.c
deleted file mode 100644
index 356e28e..0000000
--- a/drivers/net/tokenring/ibmtr_cs.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*======================================================================
-
-    A PCMCIA token-ring driver for IBM-based cards
-
-    This driver supports the IBM PCMCIA Token-Ring Card.
-    Written by Steve Kipisz, kipisz@vnet.ibm.com or
-                             bungy@ibm.net
-
-    Written 1995,1996.
-
-    This code is based on pcnet_cs.c from David Hinds.
-    
-    V2.2.0 February 1999 - Mike Phillips phillim@amtrak.com
-
-    Linux V2.2.x presented significant changes to the underlying
-    ibmtr.c code.  Mainly the code became a lot more organized and
-    modular.
-
-    This caused the old PCMCIA Token Ring driver to give up and go 
-    home early. Instead of just patching the old code to make it 
-    work, the PCMCIA code has been streamlined, updated and possibly
-    improved.
-
-    This code now only contains code required for the Card Services.
-    All we do here is set the card up enough so that the real ibmtr.c
-    driver can find it and work with it properly.
-
-    i.e. We set up the io port, irq, mmio memory and shared ram
-    memory.  This enables ibmtr_probe in ibmtr.c to find the card and
-    configure it as though it was a normal ISA and/or PnP card.
-
-    CHANGES
-
-    v2.2.5 April 1999 Mike Phillips (phillim@amtrak.com)
-    Obscure bug fix, required changed to ibmtr.c not ibmtr_cs.c
-    
-    v2.2.7 May 1999 Mike Phillips (phillim@amtrak.com)
-    Updated to version 2.2.7 to match the first version of the kernel
-    that the modification to ibmtr.c were incorporated into.
-    
-    v2.2.17 July 2000 Burt Silverman (burts@us.ibm.com)
-    Address translation feature of PCMCIA controller is usable so
-    memory windows can be placed in High memory (meaning above
-    0xFFFFF.)
-
-======================================================================*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/ibmtr.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ds.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-#define PCMCIA
-#include "ibmtr.c"
-
-
-/*====================================================================*/
-
-/* Parameters that can be set with 'insmod' */
-
-/* MMIO base address */
-static u_long mmiobase = 0xce000;
-
-/* SRAM base address */
-static u_long srambase = 0xd0000;
-
-/* SRAM size 8,16,32,64 */
-static u_long sramsize = 64;
-
-/* Ringspeed 4,16 */
-static int ringspeed = 16;
-
-module_param(mmiobase, ulong, 0);
-module_param(srambase, ulong, 0);
-module_param(sramsize, ulong, 0);
-module_param(ringspeed, int, 0);
-MODULE_LICENSE("GPL");
-
-/*====================================================================*/
-
-static int ibmtr_config(struct pcmcia_device *link);
-static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase);
-static void ibmtr_release(struct pcmcia_device *link);
-static void ibmtr_detach(struct pcmcia_device *p_dev);
-
-/*====================================================================*/
-
-typedef struct ibmtr_dev_t {
-	struct pcmcia_device	*p_dev;
-	struct net_device	*dev;
-	struct tok_info		*ti;
-} ibmtr_dev_t;
-
-static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
-	ibmtr_dev_t *info = dev_id;
-	struct net_device *dev = info->dev;
-	return tok_interrupt(irq, dev);
-};
-
-static int __devinit ibmtr_attach(struct pcmcia_device *link)
-{
-    ibmtr_dev_t *info;
-    struct net_device *dev;
-
-    dev_dbg(&link->dev, "ibmtr_attach()\n");
-
-    /* Create new token-ring device */
-    info = kzalloc(sizeof(*info), GFP_KERNEL);
-    if (!info) return -ENOMEM;
-    dev = alloc_trdev(sizeof(struct tok_info));
-    if (!dev) {
-	kfree(info);
-	return -ENOMEM;
-    }
-
-    info->p_dev = link;
-    link->priv = info;
-    info->ti = netdev_priv(dev);
-
-    link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
-    link->resource[0]->end = 4;
-    link->config_flags |= CONF_ENABLE_IRQ;
-    link->config_regs = PRESENT_OPTION;
-
-    info->dev = dev;
-
-    return ibmtr_config(link);
-} /* ibmtr_attach */
-
-static void ibmtr_detach(struct pcmcia_device *link)
-{
-    struct ibmtr_dev_t *info = link->priv;
-    struct net_device *dev = info->dev;
-     struct tok_info *ti = netdev_priv(dev);
-
-    dev_dbg(&link->dev, "ibmtr_detach\n");
-    
-    /* 
-     * When the card removal interrupt hits tok_interrupt(), 
-     * bail out early, so we don't crash the machine 
-     */
-    ti->sram_phys |= 1;
-
-    unregister_netdev(dev);
-    
-    del_timer_sync(&(ti->tr_timer));
-
-    ibmtr_release(link);
-
-    free_netdev(dev);
-    kfree(info);
-} /* ibmtr_detach */
-
-static int __devinit ibmtr_config(struct pcmcia_device *link)
-{
-    ibmtr_dev_t *info = link->priv;
-    struct net_device *dev = info->dev;
-    struct tok_info *ti = netdev_priv(dev);
-    int i, ret;
-
-    dev_dbg(&link->dev, "ibmtr_config\n");
-
-    link->io_lines = 16;
-    link->config_index = 0x61;
-
-    /* Determine if this is PRIMARY or ALTERNATE. */
-
-    /* Try PRIMARY card at 0xA20-0xA23 */
-    link->resource[0]->start = 0xA20;
-    i = pcmcia_request_io(link);
-    if (i != 0) {
-	/* Couldn't get 0xA20-0xA23.  Try ALTERNATE at 0xA24-0xA27. */
-	link->resource[0]->start = 0xA24;
-	ret = pcmcia_request_io(link);
-	if (ret)
-		goto failed;
-    }
-    dev->base_addr = link->resource[0]->start;
-
-    ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
-    if (ret)
-	    goto failed;
-    dev->irq = link->irq;
-    ti->irq = link->irq;
-    ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
-
-    /* Allocate the MMIO memory window */
-    link->resource[2]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
-    link->resource[2]->flags |= WIN_USE_WAIT;
-    link->resource[2]->start = 0;
-    link->resource[2]->end = 0x2000;
-    ret = pcmcia_request_window(link, link->resource[2], 250);
-    if (ret)
-	    goto failed;
-
-    ret = pcmcia_map_mem_page(link, link->resource[2], mmiobase);
-    if (ret)
-	    goto failed;
-    ti->mmio = ioremap(link->resource[2]->start,
-		    resource_size(link->resource[2]));
-
-    /* Allocate the SRAM memory window */
-    link->resource[3]->flags = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
-    link->resource[3]->flags |= WIN_USE_WAIT;
-    link->resource[3]->start = 0;
-    link->resource[3]->end = sramsize * 1024;
-    ret = pcmcia_request_window(link, link->resource[3], 250);
-    if (ret)
-	    goto failed;
-
-    ret = pcmcia_map_mem_page(link, link->resource[3], srambase);
-    if (ret)
-	    goto failed;
-
-    ti->sram_base = srambase >> 12;
-    ti->sram_virt = ioremap(link->resource[3]->start,
-		    resource_size(link->resource[3]));
-    ti->sram_phys = link->resource[3]->start;
-
-    ret = pcmcia_enable_device(link);
-    if (ret)
-	    goto failed;
-
-    /*  Set up the Token-Ring Controller Configuration Register and
-        turn on the card.  Check the "Local Area Network Credit Card
-        Adapters Technical Reference"  SC30-3585 for this info.  */
-    ibmtr_hw_setup(dev, mmiobase);
-
-    SET_NETDEV_DEV(dev, &link->dev);
-
-    i = ibmtr_probe_card(dev);
-    if (i != 0) {
-	pr_notice("register_netdev() failed\n");
-	goto failed;
-    }
-
-    netdev_info(dev, "port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
-		dev->base_addr, dev->irq,
-		(u_long)ti->mmio, (u_long)(ti->sram_base << 12),
-		dev->dev_addr);
-    return 0;
-
-failed:
-    ibmtr_release(link);
-    return -ENODEV;
-} /* ibmtr_config */
-
-static void ibmtr_release(struct pcmcia_device *link)
-{
-	ibmtr_dev_t *info = link->priv;
-	struct net_device *dev = info->dev;
-
-	dev_dbg(&link->dev, "ibmtr_release\n");
-
-	if (link->resource[2]->end) {
-		struct tok_info *ti = netdev_priv(dev);
-		iounmap(ti->mmio);
-	}
-	pcmcia_disable_device(link);
-}
-
-static int ibmtr_suspend(struct pcmcia_device *link)
-{
-	ibmtr_dev_t *info = link->priv;
-	struct net_device *dev = info->dev;
-
-	if (link->open)
-		netif_device_detach(dev);
-
-	return 0;
-}
-
-static int __devinit ibmtr_resume(struct pcmcia_device *link)
-{
-	ibmtr_dev_t *info = link->priv;
-	struct net_device *dev = info->dev;
-
-	if (link->open) {
-		ibmtr_probe(dev);	/* really? */
-		netif_device_attach(dev);
-	}
-
-	return 0;
-}
-
-
-/*====================================================================*/
-
-static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
-{
-    int i;
-
-    /* Bizarre IBM behavior, there are 16 bits of information we
-       need to set, but the card only allows us to send 4 bits at a 
-       time.  For each byte sent to base_addr, bits 7-4 tell the
-       card which part of the 16 bits we are setting, bits 3-0 contain 
-       the actual information */
-
-    /* First nibble provides 4 bits of mmio */
-    i = (mmiobase >> 16) & 0x0F;
-    outb(i, dev->base_addr);
-
-    /* Second nibble provides 3 bits of mmio */
-    i = 0x10 | ((mmiobase >> 12) & 0x0E);
-    outb(i, dev->base_addr);
-
-    /* Third nibble, hard-coded values */
-    i = 0x26;
-    outb(i, dev->base_addr);
-
-    /* Fourth nibble sets shared ram page size */
-
-    /* 8 = 00, 16 = 01, 32 = 10, 64 = 11 */          
-    i = (sramsize >> 4) & 0x07;
-    i = ((i == 4) ? 3 : i) << 2;
-    i |= 0x30;
-
-    if (ringspeed == 16)
-	i |= 2;
-    if (dev->base_addr == 0xA24)
-	i |= 1;
-    outb(i, dev->base_addr);
-
-    /* 0x40 will release the card for use */
-    outb(0x40, dev->base_addr);
-}
-
-static const struct pcmcia_device_id ibmtr_ids[] = {
-	PCMCIA_DEVICE_PROD_ID12("3Com", "TokenLink Velocity PC Card", 0x41240e5b, 0x82c3734e),
-	PCMCIA_DEVICE_PROD_ID12("IBM", "TOKEN RING", 0xb569a6e5, 0xbf8eed47),
-	PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, ibmtr_ids);
-
-static struct pcmcia_driver ibmtr_cs_driver = {
-	.owner		= THIS_MODULE,
-	.name		= "ibmtr_cs",
-	.probe		= ibmtr_attach,
-	.remove		= ibmtr_detach,
-	.id_table       = ibmtr_ids,
-	.suspend	= ibmtr_suspend,
-	.resume		= ibmtr_resume,
-};
-
-static int __init init_ibmtr_cs(void)
-{
-	return pcmcia_register_driver(&ibmtr_cs_driver);
-}
-
-static void __exit exit_ibmtr_cs(void)
-{
-	pcmcia_unregister_driver(&ibmtr_cs_driver);
-}
-
-module_init(init_ibmtr_cs);
-module_exit(exit_ibmtr_cs);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
deleted file mode 100644
index 3e4b4f0..0000000
--- a/drivers/net/tokenring/lanstreamer.c
+++ /dev/null
@@ -1,1917 +0,0 @@
-/*
- *   lanstreamer.c -- driver for the IBM Auto LANStreamer PCI Adapter
- *
- *  Written By: Mike Sullivan, IBM Corporation
- *
- *  Copyright (C) 1999 IBM Corporation
- *
- *  Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
- *  chipset. 
- *
- *  This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
- *  chipsets) written  by:
- *      1999 Peter De Schrijver All Rights Reserved
- *	1999 Mike Phillips (phillim@amtrak.com)
- *
- *  Base Driver Skeleton:
- *      Written 1993-94 by Donald Becker.
- *
- *      Copyright 1993 United States Government as represented by the
- *      Director, National Security Agency.
- *
- * This program is free software; you can redistribute it and/or modify      
- * it under the terms of the GNU General Public License as published by      
- * the Free Software Foundation; either version 2 of the License, or         
- * (at your option) any later version.                                       
- *                                                                           
- * This program is distributed in the hope that it will be useful,           
- * but WITHOUT ANY WARRANTY; without even the implied warranty of            
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the             
- * GNU General Public License for more details.                              
- *                                                                           
- * NO WARRANTY                                                               
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR        
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT      
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,      
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is    
- * solely responsible for determining the appropriateness of using and       
- * distributing the Program and assumes all risks associated with its        
- * exercise of rights under this Agreement, including but not limited to     
- * the risks and costs of program errors, damage to or loss of data,         
- * programs or equipment, and unavailability or interruption of operations.  
- *                                                                           
- * DISCLAIMER OF LIABILITY                                                   
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY   
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL        
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND   
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR     
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE    
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED  
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES             
- *                                                                           
- * You should have received a copy of the GNU General Public License         
- * along with this program; if not, write to the Free Software               
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
- *                                                                           
- * 
- *  12/10/99 - Alpha Release 0.1.0
- *            First release to the public
- *  03/03/00 - Merged to kernel, indented -kr -i8 -bri0, fixed some missing
- *		malloc free checks, reviewed code. <alan@redhat.com>
- *  03/13/00 - Added spinlocks for smp
- *  03/08/01 - Added support for module_init() and module_exit()
- *  08/15/01 - Added ioctl() functionality for debugging, changed netif_*_queue
- *             calls and other incorrectness - Kent Yoder <yoder1@us.ibm.com>
- *  11/05/01 - Restructured the interrupt function, added delays, reduced the 
- *             the number of TX descriptors to 1, which together can prevent 
- *             the card from locking up the box - <yoder1@us.ibm.com>
- *  09/27/02 - New PCI interface + bug fix. - <yoder1@us.ibm.com>
- *  11/13/02 - Removed free_irq calls which could cause a hang, added
- *	       netif_carrier_{on|off} - <yoder1@us.ibm.com>
- *  
- *  To Do:
- *
- *
- *  If Problems do Occur
- *  Most problems can be rectified by either closing and opening the interface
- *  (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
- *  if compiled into the kernel).
- */
-
-/* Change STREAMER_DEBUG to 1 to get verbose, and I mean really verbose, messages */
-
-#define STREAMER_DEBUG 0
-#define STREAMER_DEBUG_PACKETS 0
-
-/* Change STREAMER_NETWORK_MONITOR to receive mac frames through the arb channel.
- * Will also create a /proc/net/streamer_tr entry if proc_fs is compiled into the
- * kernel.
- * Intended to be used to create a ring-error reporting network module 
- * i.e. it will give you the source address of beaconers on the ring 
- */
-
-#define STREAMER_NETWORK_MONITOR 0
-
-/* #define CONFIG_PROC_FS */
-
-/*
- *  Allow or disallow ioctl's for debugging
- */
-
-#define STREAMER_IOCTL 0
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/proc_fs.h>
-#include <linux/ptrace.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/stddef.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-#include <linux/slab.h>
-
-#include <net/net_namespace.h>
-#include <net/checksum.h>
-
-#include <asm/io.h>
-
-#include "lanstreamer.h"
-
-#if (BITS_PER_LONG == 64)
-#error broken on 64-bit: stores pointer to rx_ring->buffer in 32-bit int
-#endif
-
-
-/* I've got to put some intelligence into the version number so that Peter and I know
- * which version of the code somebody has got. 
- * Version Number = a.b.c.d  where a.b.c is the level of code and d is the latest author.
- * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
- * 
- * Official releases will only have an a.b.c version number format.
- */
-
-static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
-                        "              v0.5.3 11/13/02 - Kent Yoder";
-
-static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
-	{}	/* terminating entry */
-};
-MODULE_DEVICE_TABLE(pci,streamer_pci_tbl);
-
-
-static char *open_maj_error[] = {
-	"No error", "Lobe Media Test", "Physical Insertion",
-	"Address Verification", "Neighbor Notification (Ring Poll)",
-	"Request Parameters", "FDX Registration Request",
-	"FDX Lobe Media Test", "FDX Duplicate Address Check",
-	"Unknown stage"
-};
-
-static char *open_min_error[] = {
-	"No error", "Function Failure", "Signal Lost", "Wire Fault",
-	"Ring Speed Mismatch", "Timeout", "Ring Failure", "Ring Beaconing",
-	"Duplicate Node Address", "Request Parameters", "Remove Received",
-	"Reserved", "Reserved", "No Monitor Detected for RPL",
-	"Monitor Contention failer for RPL", "FDX Protocol Error"
-};
-
-/* Module parameters */
-
-/* Ring Speed 0,4,16
- * 0 = Autosense         
- * 4,16 = Selected speed only, no autosense
- * This allows the card to be the first on the ring
- * and become the active monitor.
- *
- * WARNING: Some hubs will allow you to insert
- * at the wrong speed
- */
-
-static int ringspeed[STREAMER_MAX_ADAPTERS] = { 0, };
-
-module_param_array(ringspeed, int, NULL, 0);
-
-/* Packet buffer size */
-
-static int pkt_buf_sz[STREAMER_MAX_ADAPTERS] = { 0, };
-
-module_param_array(pkt_buf_sz, int, NULL, 0);
-
-/* Message Level */
-
-static int message_level[STREAMER_MAX_ADAPTERS] = { 1, };
-
-module_param_array(message_level, int, NULL, 0);
-
-#if STREAMER_IOCTL
-static int streamer_ioctl(struct net_device *, struct ifreq *, int);
-#endif
-
-static int streamer_reset(struct net_device *dev);
-static int streamer_open(struct net_device *dev);
-static netdev_tx_t streamer_xmit(struct sk_buff *skb,
-				       struct net_device *dev);
-static int streamer_close(struct net_device *dev);
-static void streamer_set_rx_mode(struct net_device *dev);
-static irqreturn_t streamer_interrupt(int irq, void *dev_id);
-static int streamer_set_mac_address(struct net_device *dev, void *addr);
-static void streamer_arb_cmd(struct net_device *dev);
-static int streamer_change_mtu(struct net_device *dev, int mtu);
-static void streamer_srb_bh(struct net_device *dev);
-static void streamer_asb_bh(struct net_device *dev);
-#if STREAMER_NETWORK_MONITOR
-#ifdef CONFIG_PROC_FS
-static int streamer_proc_info(char *buffer, char **start, off_t offset,
-			      int length, int *eof, void *data);
-static int sprintf_info(char *buffer, struct net_device *dev);
-struct streamer_private *dev_streamer=NULL;
-#endif
-#endif
-
-static const struct net_device_ops streamer_netdev_ops = {
-	.ndo_open		= streamer_open,
-	.ndo_stop		= streamer_close,
-	.ndo_start_xmit		= streamer_xmit,
-	.ndo_change_mtu		= streamer_change_mtu,
-#if STREAMER_IOCTL
-	.ndo_do_ioctl		= streamer_ioctl,
-#endif
-	.ndo_set_rx_mode	= streamer_set_rx_mode,
-	.ndo_set_mac_address	= streamer_set_mac_address,
-};
-
-static int __devinit streamer_init_one(struct pci_dev *pdev,
-				       const struct pci_device_id *ent)
-{
-	struct net_device *dev;
-	struct streamer_private *streamer_priv;
-	unsigned long pio_start, pio_end, pio_flags, pio_len;
-	unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
-	int rc = 0;
-	static int card_no=-1;
-	u16 pcr;
-
-#if STREAMER_DEBUG
-	printk("lanstreamer::streamer_init_one, entry pdev %p\n",pdev);
-#endif
-
-	card_no++;
-	dev = alloc_trdev(sizeof(*streamer_priv));
-	if (dev==NULL) {
-		printk(KERN_ERR "lanstreamer: out of memory.\n");
-		return -ENOMEM;
-	}
-
-	streamer_priv = netdev_priv(dev);
-
-#if STREAMER_NETWORK_MONITOR
-#ifdef CONFIG_PROC_FS
-	if (!dev_streamer)
-		create_proc_read_entry("streamer_tr", 0, init_net.proc_net,
-					streamer_proc_info, NULL); 
-	streamer_priv->next = dev_streamer;
-	dev_streamer = streamer_priv;
-#endif
-#endif
-
-	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (rc) {
-		printk(KERN_ERR "%s: No suitable PCI mapping available.\n",
-				dev->name);
-		rc = -ENODEV;
-		goto err_out;
-	}
-
-	rc = pci_enable_device(pdev);
-	if (rc) {
-		printk(KERN_ERR "lanstreamer: unable to enable pci device\n");
-		rc=-EIO;
-		goto err_out;
-	}
-
-	pci_set_master(pdev);
-
-	rc = pci_set_mwi(pdev);
-	if (rc) {
-		printk(KERN_ERR "lanstreamer: unable to enable MWI on pci device\n");
-		goto err_out_disable;
-	}
-
-	pio_start = pci_resource_start(pdev, 0);
-	pio_end = pci_resource_end(pdev, 0);
-	pio_flags = pci_resource_flags(pdev, 0);
-	pio_len = pci_resource_len(pdev, 0);
-
-	mmio_start = pci_resource_start(pdev, 1);
-	mmio_end = pci_resource_end(pdev, 1);
-	mmio_flags = pci_resource_flags(pdev, 1);
-	mmio_len = pci_resource_len(pdev, 1);
-
-#if STREAMER_DEBUG
-	printk("lanstreamer: pio_start %x pio_end %x pio_len %x pio_flags %x\n",
-		pio_start, pio_end, pio_len, pio_flags);
-	printk("lanstreamer: mmio_start %x mmio_end %x mmio_len %x mmio_flags %x\n",
-		mmio_start, mmio_end, mmio_flags, mmio_len);
-#endif
-
-	if (!request_region(pio_start, pio_len, "lanstreamer")) {
-		printk(KERN_ERR "lanstreamer: unable to get pci io addr %lx\n",
-			pio_start);
-		rc= -EBUSY;
-		goto err_out_mwi;
-	}
-
-	if (!request_mem_region(mmio_start, mmio_len, "lanstreamer")) {
-		printk(KERN_ERR "lanstreamer: unable to get pci mmio addr %lx\n",
-			mmio_start);
-		rc= -EBUSY;
-		goto err_out_free_pio;
-	}
-
-	streamer_priv->streamer_mmio=ioremap(mmio_start, mmio_len);
-	if (streamer_priv->streamer_mmio == NULL) {
-		printk(KERN_ERR "lanstreamer: unable to remap MMIO %lx\n",
-			mmio_start);
-		rc= -EIO;
-		goto err_out_free_mmio;
-	}
-
-	init_waitqueue_head(&streamer_priv->srb_wait);
-	init_waitqueue_head(&streamer_priv->trb_wait);
-
-	dev->netdev_ops = &streamer_netdev_ops;
-	dev->irq = pdev->irq;
-	dev->base_addr=pio_start;
-	SET_NETDEV_DEV(dev, &pdev->dev);
-
-	streamer_priv->streamer_card_name = (char *)pdev->resource[0].name;
-	streamer_priv->pci_dev = pdev;
-
-	if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000))
-		streamer_priv->pkt_buf_sz = PKT_BUF_SZ;
-	else
-		streamer_priv->pkt_buf_sz = pkt_buf_sz[card_no];
-
-	streamer_priv->streamer_ring_speed = ringspeed[card_no];
-	streamer_priv->streamer_message_level = message_level[card_no];
-
-	pci_set_drvdata(pdev, dev);
-
-	spin_lock_init(&streamer_priv->streamer_lock);
-
-	pci_read_config_word (pdev, PCI_COMMAND, &pcr);
-	pcr |= PCI_COMMAND_SERR;
-	pci_write_config_word (pdev, PCI_COMMAND, pcr);
-
-	printk("%s\n", version);
-	printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
-		streamer_priv->streamer_card_name,
-		(unsigned int) dev->base_addr,
-		streamer_priv->streamer_mmio, 
-		dev->irq);
-
-	if (streamer_reset(dev))
-		goto err_out_unmap;
-
-	rc = register_netdev(dev);
-	if (rc)
-		goto err_out_unmap;
-	return 0;
-
-err_out_unmap:
-	iounmap(streamer_priv->streamer_mmio);
-err_out_free_mmio:
-	release_mem_region(mmio_start, mmio_len);
-err_out_free_pio:
-	release_region(pio_start, pio_len);
-err_out_mwi:
-	pci_clear_mwi(pdev);
-err_out_disable:
-	pci_disable_device(pdev);
-err_out:
-	free_netdev(dev);
-#if STREAMER_DEBUG
-	printk("lanstreamer: Exit error %x\n",rc);
-#endif
-	return rc;
-}
-
-static void __devexit streamer_remove_one(struct pci_dev *pdev)
-{
-	struct net_device *dev=pci_get_drvdata(pdev);
-	struct streamer_private *streamer_priv;
-
-#if STREAMER_DEBUG
-	printk("lanstreamer::streamer_remove_one entry pdev %p\n",pdev);
-#endif
-
-	if (dev == NULL) {
-		printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev is NULL\n");
-		return;
-	}
-
-	streamer_priv=netdev_priv(dev);
-	if (streamer_priv == NULL) {
-		printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n");
-		return;
-	}
-
-#if STREAMER_NETWORK_MONITOR
-#ifdef CONFIG_PROC_FS
-	{
-		struct streamer_private **p, **next;
-
-		for (p = &dev_streamer; *p; p = next) {
-			next = &(*p)->next;
-			if (*p == streamer_priv) {
-				*p = *next;
-				break;
-			}
-		}
-		if (!dev_streamer)
-			remove_proc_entry("streamer_tr", init_net.proc_net);
-	}
-#endif
-#endif
-
-	unregister_netdev(dev);
-	iounmap(streamer_priv->streamer_mmio);
-	release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev,1));
-	release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev,0));
-	pci_clear_mwi(pdev);
-	pci_disable_device(pdev);
-	free_netdev(dev);
-	pci_set_drvdata(pdev, NULL);
-}
-
-
-static int streamer_reset(struct net_device *dev)
-{
-	struct streamer_private *streamer_priv;
-	__u8 __iomem *streamer_mmio;
-	unsigned long t;
-	unsigned int uaa_addr;
-	struct sk_buff *skb = NULL;
-	__u16 misr;
-
-	streamer_priv = netdev_priv(dev);
-	streamer_mmio = streamer_priv->streamer_mmio;
-
-	writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL);
-	t = jiffies;
-	/* Hold soft reset bit for a while */
-	ssleep(1);
-	
-	writew(readw(streamer_mmio + BCTL) & ~BCTL_SOFTRESET,
-	       streamer_mmio + BCTL);
-
-#if STREAMER_DEBUG
-	printk("BCTL: %x\n", readw(streamer_mmio + BCTL));
-	printk("GPR: %x\n", readw(streamer_mmio + GPR));
-	printk("SISRMASK: %x\n", readw(streamer_mmio + SISR_MASK));
-#endif
-	writew(readw(streamer_mmio + BCTL) | (BCTL_RX_FIFO_8 | BCTL_TX_FIFO_8), streamer_mmio + BCTL );
-
-	if (streamer_priv->streamer_ring_speed == 0) {	/* Autosense */
-		writew(readw(streamer_mmio + GPR) | GPR_AUTOSENSE,
-		       streamer_mmio + GPR);
-		if (streamer_priv->streamer_message_level)
-			printk(KERN_INFO "%s: Ringspeed autosense mode on\n",
-			       dev->name);
-	} else if (streamer_priv->streamer_ring_speed == 16) {
-		if (streamer_priv->streamer_message_level)
-			printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n",
-			       dev->name);
-		writew(GPR_16MBPS, streamer_mmio + GPR);
-	} else if (streamer_priv->streamer_ring_speed == 4) {
-		if (streamer_priv->streamer_message_level)
-			printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n",
-			       dev->name);
-		writew(0, streamer_mmio + GPR);
-	}
-
-	skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
-	if (!skb) {
-		printk(KERN_INFO "%s: skb allocation for diagnostics failed...proceeding\n",
-		       dev->name);
-	} else {
-	        struct streamer_rx_desc *rx_ring;
-                u8 *data;
-
-		rx_ring=(struct streamer_rx_desc *)skb->data;
-		data=((u8 *)skb->data)+sizeof(struct streamer_rx_desc);
-		rx_ring->forward=0;
-		rx_ring->status=0;
-		rx_ring->buffer=cpu_to_le32(pci_map_single(streamer_priv->pci_dev, data, 
-							512, PCI_DMA_FROMDEVICE));
-		rx_ring->framelen_buflen=512; 
-		writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, rx_ring, 512, PCI_DMA_FROMDEVICE)),
-			streamer_mmio+RXBDA);
-	}
-
-#if STREAMER_DEBUG
-	printk("GPR = %x\n", readw(streamer_mmio + GPR));
-#endif
-	/* start solo init */
-	writew(SISR_MI, streamer_mmio + SISR_MASK_SUM);
-
-	while (!((readw(streamer_mmio + SISR)) & SISR_SRB_REPLY)) {
-		msleep_interruptible(100);
-		if (time_after(jiffies, t + 40 * HZ)) {
-			printk(KERN_ERR
-			       "IBM PCI tokenring card not responding\n");
-			release_region(dev->base_addr, STREAMER_IO_SPACE);
-			if (skb)
-				dev_kfree_skb(skb);
-			return -1;
-		}
-	}
-	writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
-	misr = readw(streamer_mmio + MISR_RUM);
-	writew(~misr, streamer_mmio + MISR_RUM);
-
-	if (skb)
-		dev_kfree_skb(skb);	/* release skb used for diagnostics */
-
-#if STREAMER_DEBUG
-	printk("LAPWWO: %x, LAPA: %x LAPE:  %x\n",
-	       readw(streamer_mmio + LAPWWO), readw(streamer_mmio + LAPA),
-	       readw(streamer_mmio + LAPE));
-#endif
-
-#if STREAMER_DEBUG
-	{
-		int i;
-		writew(readw(streamer_mmio + LAPWWO),
-		       streamer_mmio + LAPA);
-		printk("initialization response srb dump: ");
-		for (i = 0; i < 10; i++)
-			printk("%x:",
-			       ntohs(readw(streamer_mmio + LAPDINC)));
-		printk("\n");
-	}
-#endif
-
-	writew(readw(streamer_mmio + LAPWWO) + 6, streamer_mmio + LAPA);
-	if (readw(streamer_mmio + LAPD)) {
-		printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",
-		       ntohs(readw(streamer_mmio + LAPD)));
-		release_region(dev->base_addr, STREAMER_IO_SPACE);
-		return -1;
-	}
-
-	writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
-	uaa_addr = ntohs(readw(streamer_mmio + LAPDINC));
-	readw(streamer_mmio + LAPDINC);	/* skip over Level.Addr field */
-	streamer_priv->streamer_addr_table_addr = ntohs(readw(streamer_mmio + LAPDINC));
-	streamer_priv->streamer_parms_addr = ntohs(readw(streamer_mmio + LAPDINC));
-
-#if STREAMER_DEBUG
-	printk("UAA resides at %x\n", uaa_addr);
-#endif
-
-	/* setup uaa area for access with LAPD */
-	{
-		int i;
-		__u16 addr;
-		writew(uaa_addr, streamer_mmio + LAPA);
-		for (i = 0; i < 6; i += 2) {
-		        addr=ntohs(readw(streamer_mmio+LAPDINC));
-			dev->dev_addr[i]= (addr >> 8) & 0xff;
-			dev->dev_addr[i+1]= addr & 0xff;
-		}
-#if STREAMER_DEBUG
-		printk("Adapter address: %pM\n", dev->dev_addr);
-#endif
-	}
-	return 0;
-}
-
-static int streamer_open(struct net_device *dev)
-{
-	struct streamer_private *streamer_priv = netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	unsigned long flags;
-	char open_error[255];
-	int i, open_finished = 1;
-	__u16 srb_word;
-	__u16 srb_open;
-	int rc;
-
-	if (readw(streamer_mmio+BMCTL_SUM) & BMCTL_RX_ENABLED) {
-	        rc=streamer_reset(dev);
-	}
-
-	if (request_irq(dev->irq, streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) {
-		return -EAGAIN;
-	}
-#if STREAMER_DEBUG
-	printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
-	printk("pending ints: %x\n", readw(streamer_mmio + SISR));
-#endif
-
-	writew(SISR_MI | SISR_SRB_REPLY, streamer_mmio + SISR_MASK);	/* more ints later, doesn't stop arb cmd interrupt */
-	writew(LISR_LIE, streamer_mmio + LISR);	/* more ints later */
-
-	/* adapter is closed, so SRB is pointed to by LAPWWO */
-	writew(readw(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
-
-#if STREAMER_DEBUG
-	printk("LAPWWO: %x, LAPA: %x\n", readw(streamer_mmio + LAPWWO),
-	       readw(streamer_mmio + LAPA));
-	printk("LAPE: %x\n", readw(streamer_mmio + LAPE));
-	printk("SISR Mask = %04x\n", readw(streamer_mmio + SISR_MASK));
-#endif
-	do {
-		for (i = 0; i < SRB_COMMAND_SIZE; i += 2) {
-			writew(0, streamer_mmio + LAPDINC);
-		}
-
-		writew(readw(streamer_mmio+LAPWWO),streamer_mmio+LAPA);
-		writew(htons(SRB_OPEN_ADAPTER<<8),streamer_mmio+LAPDINC) ; 	/* open */
-		writew(htons(STREAMER_CLEAR_RET_CODE<<8),streamer_mmio+LAPDINC);
-		writew(STREAMER_CLEAR_RET_CODE, streamer_mmio + LAPDINC);
-
-		writew(readw(streamer_mmio + LAPWWO) + 8, streamer_mmio + LAPA);
-#if STREAMER_NETWORK_MONITOR
-		/* If Network Monitor, instruct card to copy MAC frames through the ARB */
-		writew(htons(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), streamer_mmio + LAPDINC);	/* offset 8 word contains open options */
-#else
-		writew(htons(OPEN_ADAPTER_ENABLE_FDX), streamer_mmio + LAPDINC);	/* Offset 8 word contains Open.Options */
-#endif
-
-		if (streamer_priv->streamer_laa[0]) {
-			writew(readw(streamer_mmio + LAPWWO) + 12, streamer_mmio + LAPA);
-			writew(htons((streamer_priv->streamer_laa[0] << 8) | 
-				     streamer_priv->streamer_laa[1]),streamer_mmio+LAPDINC);
-			writew(htons((streamer_priv->streamer_laa[2] << 8) | 
-				     streamer_priv->streamer_laa[3]),streamer_mmio+LAPDINC);
-			writew(htons((streamer_priv->streamer_laa[4] << 8) | 
-				     streamer_priv->streamer_laa[5]),streamer_mmio+LAPDINC);
-			memcpy(dev->dev_addr, streamer_priv->streamer_laa, dev->addr_len);
-		}
-
-		/* save off srb open offset */
-		srb_open = readw(streamer_mmio + LAPWWO);
-#if STREAMER_DEBUG
-		writew(readw(streamer_mmio + LAPWWO),
-		       streamer_mmio + LAPA);
-		printk("srb open request:\n");
-		for (i = 0; i < 16; i++) {
-			printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
-		}
-		printk("\n");
-#endif
-		spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
-		streamer_priv->srb_queued = 1;
-
-		/* signal solo that SRB command has been issued */
-		writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
-		spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
-
-		while (streamer_priv->srb_queued) {
-			interruptible_sleep_on_timeout(&streamer_priv->srb_wait, 5 * HZ);
-			if (signal_pending(current)) {
-				printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
-				printk(KERN_WARNING "SISR=%x MISR=%x, LISR=%x\n",
-				       readw(streamer_mmio + SISR),
-				       readw(streamer_mmio + MISR_RUM),
-				       readw(streamer_mmio + LISR));
-				streamer_priv->srb_queued = 0;
-				break;
-			}
-		}
-
-#if STREAMER_DEBUG
-		printk("SISR_MASK: %x\n", readw(streamer_mmio + SISR_MASK));
-		printk("srb open response:\n");
-		writew(srb_open, streamer_mmio + LAPA);
-		for (i = 0; i < 10; i++) {
-			printk("%x:",
-			       ntohs(readw(streamer_mmio + LAPDINC)));
-		}
-#endif
-
-		/* If we get the same return response as we set, the interrupt wasn't raised and the open
-		 * timed out.
-		 */
-		writew(srb_open + 2, streamer_mmio + LAPA);
-		srb_word = ntohs(readw(streamer_mmio + LAPD)) >> 8;
-		if (srb_word == STREAMER_CLEAR_RET_CODE) {
-			printk(KERN_WARNING "%s: Adapter Open time out or error.\n",
-			       dev->name);
-			return -EIO;
-		}
-
-		if (srb_word != 0) {
-			if (srb_word == 0x07) {
-				if (!streamer_priv->streamer_ring_speed && open_finished) {	/* Autosense , first time around */
-					printk(KERN_WARNING "%s: Retrying at different ring speed\n",
-					       dev->name);
-					open_finished = 0;
-				} else {
-					__u16 error_code;
-
-					writew(srb_open + 6, streamer_mmio + LAPA);
-					error_code = ntohs(readw(streamer_mmio + LAPD));
-					strcpy(open_error, open_maj_error[(error_code & 0xf0) >> 4]);
-					strcat(open_error, " - ");
-					strcat(open_error, open_min_error[(error_code & 0x0f)]);
-
-					if (!streamer_priv->streamer_ring_speed &&
-					    ((error_code & 0x0f) == 0x0d))
-					{
-						printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
-						printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
-						free_irq(dev->irq, dev);
-						return -EIO;
-					}
-
-					printk(KERN_WARNING "%s: %s\n",
-					       dev->name, open_error);
-					free_irq(dev->irq, dev);
-					return -EIO;
-
-				}	/* if autosense && open_finished */
-			} else {
-				printk(KERN_WARNING "%s: Bad OPEN response: %x\n",
-				       dev->name, srb_word);
-				free_irq(dev->irq, dev);
-				return -EIO;
-			}
-		} else
-			open_finished = 1;
-	} while (!(open_finished));	/* Will only loop if ring speed mismatch re-open attempted && autosense is on */
-
-	writew(srb_open + 18, streamer_mmio + LAPA);
-	srb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
-	if (srb_word & (1 << 3))
-		if (streamer_priv->streamer_message_level)
-			printk(KERN_INFO "%s: Opened in FDX Mode\n", dev->name);
-
-	if (srb_word & 1)
-		streamer_priv->streamer_ring_speed = 16;
-	else
-		streamer_priv->streamer_ring_speed = 4;
-
-	if (streamer_priv->streamer_message_level)
-		printk(KERN_INFO "%s: Opened in %d Mbps mode\n", 
-			dev->name,
-			streamer_priv->streamer_ring_speed);
-
-	writew(srb_open + 8, streamer_mmio + LAPA);
-	streamer_priv->asb = ntohs(readw(streamer_mmio + LAPDINC));
-	streamer_priv->srb = ntohs(readw(streamer_mmio + LAPDINC));
-	streamer_priv->arb = ntohs(readw(streamer_mmio + LAPDINC));
-	readw(streamer_mmio + LAPDINC);	/* offset 14 word is rsvd */
-	streamer_priv->trb = ntohs(readw(streamer_mmio + LAPDINC));
-
-	streamer_priv->streamer_receive_options = 0x00;
-	streamer_priv->streamer_copy_all_options = 0;
-
-	/* setup rx ring */
-	/* enable rx channel */
-	writew(~BMCTL_RX_DIS, streamer_mmio + BMCTL_RUM);
-
-	/* setup rx descriptors */
-	streamer_priv->streamer_rx_ring=
-	    kmalloc( sizeof(struct streamer_rx_desc)*
-		     STREAMER_RX_RING_SIZE,GFP_KERNEL);
-	if (!streamer_priv->streamer_rx_ring) {
-	    printk(KERN_WARNING "%s ALLOC of streamer rx ring FAILED!!\n",dev->name);
-	    return -EIO;
-	}
-
-	for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
-		struct sk_buff *skb;
-
-		skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
-		if (skb == NULL)
-			break;
-
-		skb->dev = dev;
-
-		streamer_priv->streamer_rx_ring[i].forward = 
-			cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[i + 1],
-					sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
-		streamer_priv->streamer_rx_ring[i].status = 0;
-		streamer_priv->streamer_rx_ring[i].buffer = 
-			cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data,
-					      streamer_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
-		streamer_priv->streamer_rx_ring[i].framelen_buflen = streamer_priv->pkt_buf_sz;
-		streamer_priv->rx_ring_skb[i] = skb;
-	}
-	streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1].forward =
-				cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
-						sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE));
-
-	if (i == 0) {
-		printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n", dev->name);
-		free_irq(dev->irq, dev);
-		return -EIO;
-	}
-
-	streamer_priv->rx_ring_last_received = STREAMER_RX_RING_SIZE - 1;	/* last processed rx status */
-
-	writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[0],
-				sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)), 
-		streamer_mmio + RXBDA);
-	writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_rx_ring[STREAMER_RX_RING_SIZE - 1],
-				sizeof(struct streamer_rx_desc), PCI_DMA_TODEVICE)), 
-		streamer_mmio + RXLBDA);
-
-	/* set bus master interrupt event mask */
-	writew(MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
-
-
-	/* setup tx ring */
-	streamer_priv->streamer_tx_ring=kmalloc(sizeof(struct streamer_tx_desc)*
-						STREAMER_TX_RING_SIZE,GFP_KERNEL);
-	if (!streamer_priv->streamer_tx_ring) {
-	    printk(KERN_WARNING "%s ALLOC of streamer_tx_ring FAILED\n",dev->name);
-	    return -EIO;
-	}
-
-	writew(~BMCTL_TX2_DIS, streamer_mmio + BMCTL_RUM);	/* Enables TX channel 2 */
-	for (i = 0; i < STREAMER_TX_RING_SIZE; i++) {
-		streamer_priv->streamer_tx_ring[i].forward = cpu_to_le32(pci_map_single(streamer_priv->pci_dev, 
-										&streamer_priv->streamer_tx_ring[i + 1],
-										sizeof(struct streamer_tx_desc),
-										PCI_DMA_TODEVICE));
-		streamer_priv->streamer_tx_ring[i].status = 0;
-		streamer_priv->streamer_tx_ring[i].bufcnt_framelen = 0;
-		streamer_priv->streamer_tx_ring[i].buffer = 0;
-		streamer_priv->streamer_tx_ring[i].buflen = 0;
-		streamer_priv->streamer_tx_ring[i].rsvd1 = 0;
-		streamer_priv->streamer_tx_ring[i].rsvd2 = 0;
-		streamer_priv->streamer_tx_ring[i].rsvd3 = 0;
-	}
-	streamer_priv->streamer_tx_ring[STREAMER_TX_RING_SIZE - 1].forward =
-					cpu_to_le32(pci_map_single(streamer_priv->pci_dev, &streamer_priv->streamer_tx_ring[0],
-							sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE));
-
-	streamer_priv->free_tx_ring_entries = STREAMER_TX_RING_SIZE;
-	streamer_priv->tx_ring_free = 0;	/* next entry in tx ring to use */
-	streamer_priv->tx_ring_last_status = STREAMER_TX_RING_SIZE - 1;
-
-	/* set Busmaster interrupt event mask (handle receives on interrupt only */
-	writew(MISR_TX2_EOF | MISR_RX_NOBUF | MISR_RX_EOF, streamer_mmio + MISR_MASK);
-	/* set system event interrupt mask */
-	writew(SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE, streamer_mmio + SISR_MASK_SUM);
-
-#if STREAMER_DEBUG
-	printk("BMCTL: %x\n", readw(streamer_mmio + BMCTL_SUM));
-	printk("SISR MASK: %x\n", readw(streamer_mmio + SISR_MASK));
-#endif
-
-#if STREAMER_NETWORK_MONITOR
-
-	writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
-	printk("%s: Node Address: %04x:%04x:%04x\n", dev->name,
-		ntohs(readw(streamer_mmio + LAPDINC)),
-		ntohs(readw(streamer_mmio + LAPDINC)),
-		ntohs(readw(streamer_mmio + LAPDINC)));
-	readw(streamer_mmio + LAPDINC);
-	readw(streamer_mmio + LAPDINC);
-	printk("%s: Functional Address: %04x:%04x\n", dev->name,
-		ntohs(readw(streamer_mmio + LAPDINC)),
-		ntohs(readw(streamer_mmio + LAPDINC)));
-
-	writew(streamer_priv->streamer_parms_addr + 4,
-		streamer_mmio + LAPA);
-	printk("%s: NAUN Address: %04x:%04x:%04x\n", dev->name,
-		ntohs(readw(streamer_mmio + LAPDINC)),
-		ntohs(readw(streamer_mmio + LAPDINC)),
-		ntohs(readw(streamer_mmio + LAPDINC)));
-#endif
-
-	netif_start_queue(dev);
-	netif_carrier_on(dev);
-	return 0;
-}
-
-/*
- *	When we enter the rx routine we do not know how many frames have been 
- *	queued on the rx channel.  Therefore we start at the next rx status
- *	position and travel around the receive ring until we have completed
- *	all the frames.
- *
- *	This means that we may process the frame before we receive the end
- *	of frame interrupt. This is why we always test the status instead
- *	of blindly processing the next frame.
- *	
- */
-static void streamer_rx(struct net_device *dev)
-{
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	struct streamer_rx_desc *rx_desc;
-	int rx_ring_last_received, length, frame_length, buffer_cnt = 0;
-	struct sk_buff *skb, *skb2;
-
-	/* setup the next rx descriptor to be received */
-	rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
-	rx_ring_last_received = streamer_priv->rx_ring_last_received;
-
-	while (rx_desc->status & 0x01000000) {	/* While processed descriptors are available */
-		if (rx_ring_last_received != streamer_priv->rx_ring_last_received) 
-		{
-			printk(KERN_WARNING "RX Error 1 rx_ring_last_received not the same %x %x\n",
-				rx_ring_last_received, streamer_priv->rx_ring_last_received);
-		}
-		streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
-		rx_ring_last_received = streamer_priv->rx_ring_last_received;
-
-		length = rx_desc->framelen_buflen & 0xffff;	/* buffer length */
-		frame_length = (rx_desc->framelen_buflen >> 16) & 0xffff;
-
-		if (rx_desc->status & 0x7E830000) {	/* errors */
-			if (streamer_priv->streamer_message_level) {
-				printk(KERN_WARNING "%s: Rx Error %x\n",
-				       dev->name, rx_desc->status);
-			}
-		} else {	/* received without errors */
-			if (rx_desc->status & 0x80000000) {	/* frame complete */
-				buffer_cnt = 1;
-				skb = dev_alloc_skb(streamer_priv->pkt_buf_sz);
-			} else {
-				skb = dev_alloc_skb(frame_length);
-			}
-
-			if (skb == NULL) 
-			{
-				printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",	dev->name);
-				dev->stats.rx_dropped++;
-			} else {	/* we allocated an skb OK */
-				if (buffer_cnt == 1) {
-					/* release the DMA mapping */
-					pci_unmap_single(streamer_priv->pci_dev, 
-						le32_to_cpu(streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer),
-						streamer_priv->pkt_buf_sz, 
-						PCI_DMA_FROMDEVICE);
-					skb2 = streamer_priv->rx_ring_skb[rx_ring_last_received];
-#if STREAMER_DEBUG_PACKETS
-					{
-						int i;
-						printk("streamer_rx packet print: skb->data2 %p  skb->head %p\n", skb2->data, skb2->head);
-						for (i = 0; i < frame_length; i++) 
-						{
-							printk("%x:", skb2->data[i]);
-							if (((i + 1) % 16) == 0)
-								printk("\n");
-						}
-						printk("\n");
-					}
-#endif
-					skb_put(skb2, length);
-					skb2->protocol = tr_type_trans(skb2, dev);
-					/* recycle this descriptor */
-					streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
-					streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
-					streamer_priv->streamer_rx_ring[rx_ring_last_received].buffer = 
-						cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, streamer_priv->pkt_buf_sz,
-								PCI_DMA_FROMDEVICE));
-					streamer_priv->rx_ring_skb[rx_ring_last_received] = skb;
-					/* place recycled descriptor back on the adapter */
-					writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, 
-									&streamer_priv->streamer_rx_ring[rx_ring_last_received],
-									sizeof(struct streamer_rx_desc), PCI_DMA_FROMDEVICE)),
-						streamer_mmio + RXLBDA);
-					/* pass the received skb up to the protocol */
-					netif_rx(skb2);
-				} else {
-					do {	/* Walk the buffers */
-						pci_unmap_single(streamer_priv->pci_dev, le32_to_cpu(rx_desc->buffer), length, PCI_DMA_FROMDEVICE), 
-						memcpy(skb_put(skb, length), (void *)rx_desc->buffer, length);	/* copy this fragment */
-						streamer_priv->streamer_rx_ring[rx_ring_last_received].status = 0;
-						streamer_priv->streamer_rx_ring[rx_ring_last_received].framelen_buflen = streamer_priv->pkt_buf_sz;
-						
-						/* give descriptor back to the adapter */
-						writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, 
-									&streamer_priv->streamer_rx_ring[rx_ring_last_received],
-									length, PCI_DMA_FROMDEVICE)), 
-							streamer_mmio + RXLBDA);
-
-						if (rx_desc->status & 0x80000000)
-							break;	/* this descriptor completes the frame */
-
-						/* else get the next pending descriptor */
-						if (rx_ring_last_received!= streamer_priv->rx_ring_last_received)
-						{
-							printk("RX Error rx_ring_last_received not the same %x %x\n",
-								rx_ring_last_received,
-								streamer_priv->rx_ring_last_received);
-						}
-						rx_desc = &streamer_priv->streamer_rx_ring[(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE-1)];
-
-						length = rx_desc->framelen_buflen & 0xffff;	/* buffer length */
-						streamer_priv->rx_ring_last_received =	(streamer_priv->rx_ring_last_received+1) & (STREAMER_RX_RING_SIZE - 1);
-						rx_ring_last_received = streamer_priv->rx_ring_last_received;
-					} while (1);
-
-					skb->protocol = tr_type_trans(skb, dev);
-					/* send up to the protocol */
-					netif_rx(skb);
-				}
-				dev->stats.rx_packets++;
-				dev->stats.rx_bytes += length;
-			}	/* if skb == null */
-		}		/* end received without errors */
-
-		/* try the next one */
-		rx_desc = &streamer_priv->streamer_rx_ring[(rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1)];
-	}			/* end for all completed rx descriptors */
-}
-
-static irqreturn_t streamer_interrupt(int irq, void *dev_id)
-{
-	struct net_device *dev = (struct net_device *) dev_id;
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	__u16 sisr;
-	__u16 misr;
-	u8 max_intr = MAX_INTR;
-
-	spin_lock(&streamer_priv->streamer_lock);
-	sisr = readw(streamer_mmio + SISR);
-
-	while((sisr & (SISR_MI | SISR_SRB_REPLY | SISR_ADAPTER_CHECK | SISR_ASB_FREE | 
-		       SISR_ARB_CMD | SISR_TRB_REPLY | SISR_PAR_ERR | SISR_SERR_ERR)) &&
-	      (max_intr > 0)) {
-
-		if(sisr & SISR_PAR_ERR) {
-			writew(~SISR_PAR_ERR, streamer_mmio + SISR_RUM);
-			(void)readw(streamer_mmio + SISR_RUM);
-		}
-
-		else if(sisr & SISR_SERR_ERR) {
-			writew(~SISR_SERR_ERR, streamer_mmio + SISR_RUM);
-			(void)readw(streamer_mmio + SISR_RUM);
-		}
-
-		else if(sisr & SISR_MI) {
-			misr = readw(streamer_mmio + MISR_RUM);
-
-		if (misr & MISR_TX2_EOF) {
-				while(streamer_priv->streamer_tx_ring[(streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1)].status) {
-				streamer_priv->tx_ring_last_status = (streamer_priv->tx_ring_last_status + 1) & (STREAMER_TX_RING_SIZE - 1);
-				streamer_priv->free_tx_ring_entries++;
-				dev->stats.tx_bytes += streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]->len;
-				dev->stats.tx_packets++;
-				dev_kfree_skb_irq(streamer_priv->tx_ring_skb[streamer_priv->tx_ring_last_status]);
-				streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buffer = 0xdeadbeef;
-				streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].status = 0;
-				streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].bufcnt_framelen = 0;
-				streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].buflen = 0;
-				streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd1 = 0;
-				streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd2 = 0;
-				streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_last_status].rsvd3 = 0;
-			}
-			netif_wake_queue(dev);
-		}
-
-		if (misr & MISR_RX_EOF) {
-			streamer_rx(dev);
-		}
-		/* MISR_RX_EOF */
-
-			if (misr & MISR_RX_NOBUF) {
-				/* According to the documentation, we don't have to do anything,  
-                                 * but trapping it keeps it out of /var/log/messages.  
-                                 */
-			}		/* SISR_RX_NOBUF */
-
-			writew(~misr, streamer_mmio + MISR_RUM);
-			(void)readw(streamer_mmio + MISR_RUM);
-		}
-
-		else if (sisr & SISR_SRB_REPLY) {
-			if (streamer_priv->srb_queued == 1) {
-				wake_up_interruptible(&streamer_priv->srb_wait);
-			} else if (streamer_priv->srb_queued == 2) {
-				streamer_srb_bh(dev);
-			}
-			streamer_priv->srb_queued = 0;
-
-			writew(~SISR_SRB_REPLY, streamer_mmio + SISR_RUM);
-			(void)readw(streamer_mmio + SISR_RUM);
-		}
-
-		else if (sisr & SISR_ADAPTER_CHECK) {
-			printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
-			writel(readl(streamer_mmio + LAPWWO), streamer_mmio + LAPA);
-			printk(KERN_WARNING "%s: Words %x:%x:%x:%x:\n",
-			       dev->name, readw(streamer_mmio + LAPDINC),
-			       ntohs(readw(streamer_mmio + LAPDINC)),
-			       ntohs(readw(streamer_mmio + LAPDINC)),
-			       ntohs(readw(streamer_mmio + LAPDINC)));
-			netif_stop_queue(dev);
-			netif_carrier_off(dev);
-			printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
-		}
-
-		/* SISR_ADAPTER_CHECK */
-		else if (sisr & SISR_ASB_FREE) {
-			/* Wake up anything that is waiting for the asb response */
-			if (streamer_priv->asb_queued) {
-				streamer_asb_bh(dev);
-			}
-			writew(~SISR_ASB_FREE, streamer_mmio + SISR_RUM);
-			(void)readw(streamer_mmio + SISR_RUM);
-		}
-		/* SISR_ASB_FREE */
-		else if (sisr & SISR_ARB_CMD) {
-			streamer_arb_cmd(dev);
-			writew(~SISR_ARB_CMD, streamer_mmio + SISR_RUM);
-			(void)readw(streamer_mmio + SISR_RUM);
-		}
-		/* SISR_ARB_CMD */
-		else if (sisr & SISR_TRB_REPLY) {
-			/* Wake up anything that is waiting for the trb response */
-			if (streamer_priv->trb_queued) {
-				wake_up_interruptible(&streamer_priv->
-						      trb_wait);
-			}
-			streamer_priv->trb_queued = 0;
-			writew(~SISR_TRB_REPLY, streamer_mmio + SISR_RUM);
-			(void)readw(streamer_mmio + SISR_RUM);
-		}
-		/* SISR_TRB_REPLY */
-
-		sisr = readw(streamer_mmio + SISR);
-		max_intr--;
-	} /* while() */		
-
-	spin_unlock(&streamer_priv->streamer_lock) ; 
-	return IRQ_HANDLED;
-}
-
-static netdev_tx_t streamer_xmit(struct sk_buff *skb,
-				       struct net_device *dev)
-{
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	unsigned long flags ;
-
-	spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
-
-	if (streamer_priv->free_tx_ring_entries) {
-		streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].status = 0;
-		streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].bufcnt_framelen = 0x00020000 | skb->len;
-		streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buffer = 
-			cpu_to_le32(pci_map_single(streamer_priv->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE));
-		streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd1 = skb->len;
-		streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd2 = 0;
-		streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].rsvd3 = 0;
-		streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free].buflen = skb->len;
-
-		streamer_priv->tx_ring_skb[streamer_priv->tx_ring_free] = skb;
-		streamer_priv->free_tx_ring_entries--;
-#if STREAMER_DEBUG_PACKETS
-		{
-			int i;
-			printk("streamer_xmit packet print:\n");
-			for (i = 0; i < skb->len; i++) {
-				printk("%x:", skb->data[i]);
-				if (((i + 1) % 16) == 0)
-					printk("\n");
-			}
-			printk("\n");
-		}
-#endif
-
-		writel(cpu_to_le32(pci_map_single(streamer_priv->pci_dev, 
-					&streamer_priv->streamer_tx_ring[streamer_priv->tx_ring_free],
-					sizeof(struct streamer_tx_desc), PCI_DMA_TODEVICE)),
-			streamer_mmio + TX2LFDA);
-		(void)readl(streamer_mmio + TX2LFDA);
-
-		streamer_priv->tx_ring_free = (streamer_priv->tx_ring_free + 1) & (STREAMER_TX_RING_SIZE - 1);
-		spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
-		return NETDEV_TX_OK;
-	} else {
-	        netif_stop_queue(dev);
-	        spin_unlock_irqrestore(&streamer_priv->streamer_lock,flags);
-		return NETDEV_TX_BUSY;
-	}
-}
-
-
-static int streamer_close(struct net_device *dev)
-{
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	unsigned long flags;
-	int i;
-
-	netif_stop_queue(dev);
-	netif_carrier_off(dev);
-	writew(streamer_priv->srb, streamer_mmio + LAPA);
-	writew(htons(SRB_CLOSE_ADAPTER << 8),streamer_mmio+LAPDINC);
-	writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
-
-	spin_lock_irqsave(&streamer_priv->streamer_lock, flags);
-
-	streamer_priv->srb_queued = 1;
-	writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
-
-	spin_unlock_irqrestore(&streamer_priv->streamer_lock, flags);
-
-	while (streamer_priv->srb_queued) 
-	{
-		interruptible_sleep_on_timeout(&streamer_priv->srb_wait,
-					       jiffies + 60 * HZ);
-		if (signal_pending(current)) 
-		{
-			printk(KERN_WARNING "%s: SRB timed out.\n", dev->name);
-			printk(KERN_WARNING "SISR=%x MISR=%x LISR=%x\n",
-			       readw(streamer_mmio + SISR),
-			       readw(streamer_mmio + MISR_RUM),
-			       readw(streamer_mmio + LISR));
-			streamer_priv->srb_queued = 0;
-			break;
-		}
-	}
-
-	streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
-
-	for (i = 0; i < STREAMER_RX_RING_SIZE; i++) {
-	        if (streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]) {
-		        dev_kfree_skb(streamer_priv->rx_ring_skb[streamer_priv->rx_ring_last_received]);
-		} 
-		streamer_priv->rx_ring_last_received = (streamer_priv->rx_ring_last_received + 1) & (STREAMER_RX_RING_SIZE - 1);
-	}
-
-	/* reset tx/rx fifo's and busmaster logic */
-
-	/* TBD. Add graceful way to reset the LLC channel without doing a soft reset. 
-	   writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
-	   udelay(1);
-	   writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL);
-	 */
-
-#if STREAMER_DEBUG
-	writew(streamer_priv->srb, streamer_mmio + LAPA);
-	printk("srb): ");
-	for (i = 0; i < 2; i++) {
-		printk("%x ", ntohs(readw(streamer_mmio + LAPDINC)));
-	}
-	printk("\n");
-#endif
-	free_irq(dev->irq, dev);
-	return 0;
-}
-
-static void streamer_set_rx_mode(struct net_device *dev)
-{
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	__u8 options = 0;
-	struct netdev_hw_addr *ha;
-	unsigned char dev_mc_address[5];
-
-	writel(streamer_priv->srb, streamer_mmio + LAPA);
-	options = streamer_priv->streamer_copy_all_options;
-
-	if (dev->flags & IFF_PROMISC)
-		options |= (3 << 5);	/* All LLC and MAC frames, all through the main rx channel */
-	else
-		options &= ~(3 << 5);
-
-	/* Only issue the srb if there is a change in options */
-
-	if ((options ^ streamer_priv->streamer_copy_all_options)) 
-	{
-		/* Now to issue the srb command to alter the copy.all.options */
-		writew(htons(SRB_MODIFY_RECEIVE_OPTIONS << 8), streamer_mmio+LAPDINC);
-		writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
-		writew(htons((streamer_priv->streamer_receive_options << 8) | options),streamer_mmio+LAPDINC);
-		writew(htons(0x4a41),streamer_mmio+LAPDINC);
-		writew(htons(0x4d45),streamer_mmio+LAPDINC);
-		writew(htons(0x5320),streamer_mmio+LAPDINC);
-		writew(0x2020, streamer_mmio + LAPDINC);
-
-		streamer_priv->srb_queued = 2;	/* Can't sleep, use srb_bh */
-
-		writel(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
-
-		streamer_priv->streamer_copy_all_options = options;
-		return;
-	}
-
-	/* Set the functional addresses we need for multicast */
-	writel(streamer_priv->srb,streamer_mmio+LAPA);
-	dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 
-  
-	netdev_for_each_mc_addr(ha, dev) {
-		dev_mc_address[0] |= ha->addr[2];
-		dev_mc_address[1] |= ha->addr[3];
-		dev_mc_address[2] |= ha->addr[4];
-		dev_mc_address[3] |= ha->addr[5];
-	}
-  
-	writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
-	writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
-	writew(0,streamer_mmio+LAPDINC);
-	writew(htons( (dev_mc_address[0] << 8) | dev_mc_address[1]),streamer_mmio+LAPDINC);
-	writew(htons( (dev_mc_address[2] << 8) | dev_mc_address[3]),streamer_mmio+LAPDINC);
-	streamer_priv->srb_queued = 2 ; 
-	writel(LISR_SRB_CMD,streamer_mmio+LISR_SUM);
-}
-
-static void streamer_srb_bh(struct net_device *dev)
-{
-	struct streamer_private *streamer_priv = netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	__u16 srb_word;
-
-	writew(streamer_priv->srb, streamer_mmio + LAPA);
-	srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
-
-	switch (srb_word) {
-
-		/* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous) 
-		 * At some point we should do something if we get an error, such as
-		 * resetting the IFF_PROMISC flag in dev
-		 */
-
-	case SRB_MODIFY_RECEIVE_OPTIONS:
-	        srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
-
-		switch (srb_word) {
-		case 0x01:
-			printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
-			break;
-		case 0x04:
-			printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
-			break;
-		default:
-			if (streamer_priv->streamer_message_level)
-				printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",
-				       dev->name,
-				       streamer_priv->streamer_copy_all_options,
-				       streamer_priv->streamer_receive_options);
-			break;
-		}		/* switch srb[2] */
-		break;
-
-
-		/* SRB_SET_GROUP_ADDRESS - Multicast group setting 
-		 */
-	case SRB_SET_GROUP_ADDRESS:
-	        srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
-		switch (srb_word) {
-		case 0x00:
-		        break;
-		case 0x01:
-			printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
-			break;
-		case 0x04:
-			printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
-			break;
-		case 0x3c:
-			printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n", dev->name);
-			break;
-		case 0x3e:	/* If we ever implement individual multicast addresses, will need to deal with this */
-			printk(KERN_WARNING "%s: Group address registers full\n", dev->name);
-			break;
-		case 0x55:
-			printk(KERN_INFO "%s: Group Address already set.\n", dev->name);
-			break;
-		default:
-			break;
-		}		/* switch srb[2] */
-		break;
-
-
-		/* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
-		 */
-	case SRB_RESET_GROUP_ADDRESS:
-	        srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
-		switch (srb_word) {
-		case 0x00:
-		        break;
-		case 0x01:
-			printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
-			break;
-		case 0x04:
-			printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
-			break;
-		case 0x39:	/* Must deal with this if individual multicast addresses used */
-			printk(KERN_INFO "%s: Group address not found\n", dev->name);
-			break;
-		default:
-			break;
-		}		/* switch srb[2] */
-		break;
-
-
-		/* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode 
-		 */
-
-	case SRB_SET_FUNC_ADDRESS:
-	        srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
-		switch (srb_word) {
-		case 0x00:
-			if (streamer_priv->streamer_message_level)
-				printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
-			break;
-		case 0x01:
-			printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
-			break;
-		case 0x04:
-			printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
-			break;
-		default:
-			break;
-		}		/* switch srb[2] */
-		break;
-
-		/* SRB_READ_LOG - Read and reset the adapter error counters
-		 */
-
-	case SRB_READ_LOG:
-	        srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
-		switch (srb_word) {
-		case 0x00:
-			{
-				int i;
-				if (streamer_priv->streamer_message_level)
-					printk(KERN_INFO "%s: Read Log command complete\n", dev->name);
-				printk("Read Log statistics: ");
-				writew(streamer_priv->srb + 6,
-				       streamer_mmio + LAPA);
-				for (i = 0; i < 5; i++) {
-					printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
-				}
-				printk("\n");
-			}
-			break;
-		case 0x01:
-			printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
-			break;
-		case 0x04:
-			printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
-			break;
-
-		}		/* switch srb[2] */
-		break;
-
-		/* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
-
-	case SRB_READ_SR_COUNTERS:
-	        srb_word=ntohs(readw(streamer_mmio+LAPDINC)) >> 8;
-		switch (srb_word) {
-		case 0x00:
-			if (streamer_priv->streamer_message_level)
-				printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
-			break;
-		case 0x01:
-			printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
-			break;
-		case 0x04:
-			printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
-			break;
-		default:
-			break;
-		}		/* switch srb[2] */
-		break;
-
-	default:
-		printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n", dev->name);
-		break;
-	}			/* switch srb[0] */
-}
-
-static int streamer_set_mac_address(struct net_device *dev, void *addr)
-{
-	struct sockaddr *saddr = addr;
-	struct streamer_private *streamer_priv = netdev_priv(dev);
-
-	if (netif_running(dev)) 
-	{
-		printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name);
-		return -EIO;
-	}
-
-	memcpy(streamer_priv->streamer_laa, saddr->sa_data, dev->addr_len);
-
-	if (streamer_priv->streamer_message_level) {
-		printk(KERN_INFO "%s: MAC/LAA Set to  = %x.%x.%x.%x.%x.%x\n",
-		       dev->name, streamer_priv->streamer_laa[0],
-		       streamer_priv->streamer_laa[1],
-		       streamer_priv->streamer_laa[2],
-		       streamer_priv->streamer_laa[3],
-		       streamer_priv->streamer_laa[4],
-		       streamer_priv->streamer_laa[5]);
-	}
-	return 0;
-}
-
-static void streamer_arb_cmd(struct net_device *dev)
-{
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	__u8 header_len;
-	__u16 frame_len, buffer_len;
-	struct sk_buff *mac_frame;
-	__u8 frame_data[256];
-	__u16 buff_off;
-	__u16 lan_status = 0, lan_status_diff;	/* Initialize to stop compiler warning */
-	__u8 fdx_prot_error;
-	__u16 next_ptr;
-	__u16 arb_word;
-
-#if STREAMER_NETWORK_MONITOR
-	struct trh_hdr *mac_hdr;
-#endif
-
-	writew(streamer_priv->arb, streamer_mmio + LAPA);
-	arb_word=ntohs(readw(streamer_mmio+LAPD)) >> 8;
-	
-	if (arb_word == ARB_RECEIVE_DATA) {	/* Receive.data, MAC frames */
-		writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
-		streamer_priv->mac_rx_buffer = buff_off = ntohs(readw(streamer_mmio + LAPDINC));
-		header_len=ntohs(readw(streamer_mmio+LAPDINC)) >> 8; /* 802.5 Token-Ring Header Length */
-		frame_len = ntohs(readw(streamer_mmio + LAPDINC));
-
-#if STREAMER_DEBUG
-		{
-			int i;
-			__u16 next;
-			__u8 status;
-			__u16 len;
-
-			writew(ntohs(buff_off), streamer_mmio + LAPA);	/*setup window to frame data */
-			next = htons(readw(streamer_mmio + LAPDINC));
-			status =
-			    ntohs(readw(streamer_mmio + LAPDINC)) & 0xff;
-			len = ntohs(readw(streamer_mmio + LAPDINC));
-
-			/* print out 1st 14 bytes of frame data */
-			for (i = 0; i < 7; i++) {
-				printk("Loc %d = %04x\n", i,
-				       ntohs(readw
-					     (streamer_mmio + LAPDINC)));
-			}
-
-			printk("next %04x, fs %02x, len %04x\n", next,
-			       status, len);
-		}
-#endif
-		if (!(mac_frame = dev_alloc_skb(frame_len))) {
-			printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n",
-			       dev->name);
-			goto drop_frame;
-		}
-		/* Walk the buffer chain, creating the frame */
-
-		do {
-			int i;
-			__u16 rx_word;
-
-			writew(htons(buff_off), streamer_mmio + LAPA);	/* setup window to frame data */
-			next_ptr = ntohs(readw(streamer_mmio + LAPDINC));
-			readw(streamer_mmio + LAPDINC);	/* read thru status word */
-			buffer_len = ntohs(readw(streamer_mmio + LAPDINC));
-
-			if (buffer_len > 256)
-				break;
-
-			i = 0;
-			while (i < buffer_len) {
-				rx_word=ntohs(readw(streamer_mmio+LAPDINC));
-				frame_data[i]=rx_word >> 8;
-				frame_data[i+1]=rx_word & 0xff;
-				i += 2;
-			}
-
-			memcpy(skb_put(mac_frame, buffer_len),
-				      frame_data, buffer_len);
-		} while (next_ptr && (buff_off = next_ptr));
-
-		mac_frame->protocol = tr_type_trans(mac_frame, dev);
-#if STREAMER_NETWORK_MONITOR
-		printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
-		       dev->name);
-		mac_hdr = tr_hdr(mac_frame);
-		printk(KERN_WARNING
-		       "%s: MAC Frame Dest. Addr: %pM\n",
-		       dev->name, mac_hdr->daddr);
-		printk(KERN_WARNING
-		       "%s: MAC Frame Srce. Addr: %pM\n",
-		       dev->name, mac_hdr->saddr);
-#endif
-		netif_rx(mac_frame);
-
-		/* Now tell the card we have dealt with the received frame */
-drop_frame:
-		/* Set LISR Bit 1 */
-		writel(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
-
-		/* Is the ASB free ? */
-
-		if (!(readl(streamer_priv->streamer_mmio + SISR) & SISR_ASB_FREE)) 
-		{
-			streamer_priv->asb_queued = 1;
-			writel(LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
-			return;
-			/* Drop out and wait for the bottom half to be run */
-		}
-
-
-		writew(streamer_priv->asb, streamer_mmio + LAPA);
-		writew(htons(ASB_RECEIVE_DATA << 8), streamer_mmio+LAPDINC);
-		writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
-		writew(0, streamer_mmio + LAPDINC);
-		writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
-
-		writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
-
-		streamer_priv->asb_queued = 2;
-		return;
-
-	} else if (arb_word == ARB_LAN_CHANGE_STATUS) {	/* Lan.change.status */
-		writew(streamer_priv->arb + 6, streamer_mmio + LAPA);
-		lan_status = ntohs(readw(streamer_mmio + LAPDINC));
-		fdx_prot_error = ntohs(readw(streamer_mmio+LAPD)) >> 8;
-		
-		/* Issue ARB Free */
-		writew(LISR_ARB_FREE, streamer_priv->streamer_mmio + LISR_SUM);
-
-		lan_status_diff = (streamer_priv->streamer_lan_status ^ lan_status) & 
-		    lan_status; 
-
-		if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR)) 
-		{
-			if (lan_status_diff & LSC_LWF)
-				printk(KERN_WARNING "%s: Short circuit detected on the lobe\n", dev->name);
-			if (lan_status_diff & LSC_ARW)
-				printk(KERN_WARNING "%s: Auto removal error\n", dev->name);
-			if (lan_status_diff & LSC_FPE)
-				printk(KERN_WARNING "%s: FDX Protocol Error\n", dev->name);
-			if (lan_status_diff & LSC_RR)
-				printk(KERN_WARNING "%s: Force remove MAC frame received\n", dev->name);
-
-			/* Adapter has been closed by the hardware */
-
-			/* reset tx/rx fifo's and busmaster logic */
-
-			/* @TBD. no llc reset on autostreamer writel(readl(streamer_mmio+BCTL)|(3<<13),streamer_mmio+BCTL);
-			   udelay(1);
-			   writel(readl(streamer_mmio+BCTL)&~(3<<13),streamer_mmio+BCTL); */
-
-			netif_stop_queue(dev);
-			netif_carrier_off(dev);
-			printk(KERN_WARNING "%s: Adapter must be manually reset.\n", dev->name);
-		}
-		/* If serious error */
-		if (streamer_priv->streamer_message_level) {
-			if (lan_status_diff & LSC_SIG_LOSS)
-				printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
-			if (lan_status_diff & LSC_HARD_ERR) 
-				printk(KERN_INFO "%s: Beaconing\n", dev->name);
-			if (lan_status_diff & LSC_SOFT_ERR)
-				printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
-			if (lan_status_diff & LSC_TRAN_BCN)
-				printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
-			if (lan_status_diff & LSC_SS)
-				printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
-			if (lan_status_diff & LSC_RING_REC)
-				printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
-			if (lan_status_diff & LSC_FDX_MODE)
-				printk(KERN_INFO "%s: Operating in FDX mode\n", dev->name);
-		}
-
-		if (lan_status_diff & LSC_CO) {
-			if (streamer_priv->streamer_message_level)
-				printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
-
-			/* Issue READ.LOG command */
-
-			writew(streamer_priv->srb, streamer_mmio + LAPA);
-			writew(htons(SRB_READ_LOG << 8),streamer_mmio+LAPDINC);
-			writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
-			writew(0, streamer_mmio + LAPDINC);
-			streamer_priv->srb_queued = 2;	/* Can't sleep, use srb_bh */
-
-			writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
-		}
-
-		if (lan_status_diff & LSC_SR_CO) {
-			if (streamer_priv->streamer_message_level)
-				printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
-
-			/* Issue a READ.SR.COUNTERS */
-			writew(streamer_priv->srb, streamer_mmio + LAPA);
-			writew(htons(SRB_READ_SR_COUNTERS << 8),
-			       streamer_mmio+LAPDINC);
-			writew(htons(STREAMER_CLEAR_RET_CODE << 8),
-			       streamer_mmio+LAPDINC);
-			streamer_priv->srb_queued = 2;	/* Can't sleep, use srb_bh */
-			writew(LISR_SRB_CMD, streamer_mmio + LISR_SUM);
-
-		}
-		streamer_priv->streamer_lan_status = lan_status;
-	} /* Lan.change.status */
-	else
-		printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
-}
-
-static void streamer_asb_bh(struct net_device *dev)
-{
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-
-	if (streamer_priv->asb_queued == 1) 
-	{
-		/* Dropped through the first time */
-
-		writew(streamer_priv->asb, streamer_mmio + LAPA);
-		writew(htons(ASB_RECEIVE_DATA << 8),streamer_mmio+LAPDINC);
-		writew(htons(STREAMER_CLEAR_RET_CODE << 8), streamer_mmio+LAPDINC);
-		writew(0, streamer_mmio + LAPDINC);
-		writew(htons(streamer_priv->mac_rx_buffer), streamer_mmio + LAPD);
-
-		writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ, streamer_priv->streamer_mmio + LISR_SUM);
-		streamer_priv->asb_queued = 2;
-
-		return;
-	}
-
-	if (streamer_priv->asb_queued == 2) {
-		__u8 rc;
-		writew(streamer_priv->asb + 2, streamer_mmio + LAPA);
-		rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
-		switch (rc) {
-		case 0x01:
-			printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
-			break;
-		case 0x26:
-			printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
-			break;
-		case 0xFF:
-			/* Valid response, everything should be ok again */
-			break;
-		default:
-			printk(KERN_WARNING "%s: Invalid return code in asb\n", dev->name);
-			break;
-		}
-	}
-	streamer_priv->asb_queued = 0;
-}
-
-static int streamer_change_mtu(struct net_device *dev, int mtu)
-{
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u16 max_mtu;
-
-	if (streamer_priv->streamer_ring_speed == 4)
-		max_mtu = 4500;
-	else
-		max_mtu = 18000;
-
-	if (mtu > max_mtu)
-		return -EINVAL;
-	if (mtu < 100)
-		return -EINVAL;
-
-	dev->mtu = mtu;
-	streamer_priv->pkt_buf_sz = mtu + TR_HLEN;
-
-	return 0;
-}
-
-#if STREAMER_NETWORK_MONITOR
-#ifdef CONFIG_PROC_FS
-static int streamer_proc_info(char *buffer, char **start, off_t offset,
-			      int length, int *eof, void *data)
-{
-  struct streamer_private *sdev=NULL;
-	struct pci_dev *pci_device = NULL;
-	int len = 0;
-	off_t begin = 0;
-	off_t pos = 0;
-	int size;
-
-  struct net_device *dev;
-
-	size = sprintf(buffer, "IBM LanStreamer/MPC Chipset Token Ring Adapters\n");
-
-	pos += size;
-	len += size;
-
-  for(sdev=dev_streamer; sdev; sdev=sdev->next) {
-    pci_device=sdev->pci_dev;
-    dev=pci_get_drvdata(pci_device);
-
-				size = sprintf_info(buffer + len, dev);
-				len += size;
-				pos = begin + len;
-
-				if (pos < offset) {
-					len = 0;
-					begin = pos;
-				}
-				if (pos > offset + length)
-					break;
-		}		/* for */
-
-	*start = buffer + (offset - begin);	/* Start of wanted data */
-	len -= (offset - begin);	/* Start slop */
-	if (len > length)
-		len = length;	/* Ending slop */
-	return len;
-}
-
-static int sprintf_info(char *buffer, struct net_device *dev)
-{
-	struct streamer_private *streamer_priv =
-	    netdev_priv(dev);
-	__u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
-	struct streamer_adapter_addr_table sat;
-	struct streamer_parameters_table spt;
-	int size = 0;
-	int i;
-
-	writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
-	for (i = 0; i < 14; i += 2) {
-		__u16 io_word;
-		__u8 *datap = (__u8 *) & sat;
-		io_word=ntohs(readw(streamer_mmio+LAPDINC));
-		datap[size]=io_word >> 8;
-		datap[size+1]=io_word & 0xff;
-	}
-	writew(streamer_priv->streamer_parms_addr, streamer_mmio + LAPA);
-	for (i = 0; i < 68; i += 2) {
-		__u16 io_word;
-		__u8 *datap = (__u8 *) & spt;
-		io_word=ntohs(readw(streamer_mmio+LAPDINC));
-		datap[size]=io_word >> 8;
-		datap[size+1]=io_word & 0xff;
-	}
-
-	size = sprintf(buffer, "\n%6s: Adapter Address   : Node Address      : Functional Addr\n", dev->name);
-
-	size += sprintf(buffer + size,
-			"%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
-			dev->name, dev->dev_addr, sat.node_addr,
-			sat.func_addr[0], sat.func_addr[1],
-			sat.func_addr[2], sat.func_addr[3]);
-
-	size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
-
-	size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address   : Poll Address      : AccPri : Auth Src : Att Code :\n", dev->name);
-
-	size += sprintf(buffer + size,
-		    "%6s: %02x:%02x:%02x:%02x   : %pM : %pM : %04x   : %04x     :  %04x    :\n",
-		    dev->name, spt.phys_addr[0], spt.phys_addr[1],
-		    spt.phys_addr[2], spt.phys_addr[3],
-		    spt.up_node_addr, spt.poll_addr,
-		    ntohs(spt.acc_priority), ntohs(spt.auth_source_class),
-		    ntohs(spt.att_code));
-
-	size += sprintf(buffer + size, "%6s: Source Address    : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name);
-
-	size += sprintf(buffer + size,
-		    "%6s: %pM : %04x  : %04x   : %04x   : %04x   : %04x    :     %04x     : \n",
-		    dev->name, spt.source_addr,
-		    ntohs(spt.beacon_type), ntohs(spt.major_vector),
-		    ntohs(spt.lan_status), ntohs(spt.local_ring),
-		    ntohs(spt.mon_error), ntohs(spt.frame_correl));
-
-	size += sprintf(buffer + size, "%6s: Beacon Details :  Tx  :  Rx  : NAUN Node Address : NAUN Node Phys : \n",
-		    dev->name);
-
-	size += sprintf(buffer + size,
-		    "%6s:                :  %02x  :  %02x  : %pM : %02x:%02x:%02x:%02x    : \n",
-		    dev->name, ntohs(spt.beacon_transmit),
-		    ntohs(spt.beacon_receive),
-		    spt.beacon_naun,
-		    spt.beacon_phys[0], spt.beacon_phys[1],
-		    spt.beacon_phys[2], spt.beacon_phys[3]);
-	return size;
-}
-#endif
-#endif
-
-static struct pci_driver streamer_pci_driver = {
-  .name     = "lanstreamer",
-  .id_table = streamer_pci_tbl,
-  .probe    = streamer_init_one,
-  .remove   = __devexit_p(streamer_remove_one),
-};
-
-static int __init streamer_init_module(void) {
-  return pci_register_driver(&streamer_pci_driver);
-}
-
-static void __exit streamer_cleanup_module(void) {
-  pci_unregister_driver(&streamer_pci_driver);
-}
-
-module_init(streamer_init_module);
-module_exit(streamer_cleanup_module);
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/lanstreamer.h b/drivers/net/tokenring/lanstreamer.h
deleted file mode 100644
index 3c58d6a..0000000
--- a/drivers/net/tokenring/lanstreamer.h
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- *   lanstreamer.h -- driver for the IBM Auto LANStreamer PCI Adapter
- *
- *  Written By: Mike Sullivan, IBM Corporation
- *
- *  Copyright (C) 1999 IBM Corporation
- *
- *  Linux driver for IBM PCI tokenring cards based on the LanStreamer MPC
- *  chipset. 
- *
- *  This driver is based on the olympic driver for IBM PCI TokenRing cards (Pit/Pit-Phy/Olympic
- *  chipsets) written  by:
- *      1999 Peter De Schrijver All Rights Reserved
- *	1999 Mike Phillips (phillim@amtrak.com)
- *
- *  Base Driver Skeleton:
- *      Written 1993-94 by Donald Becker.
- *
- *      Copyright 1993 United States Government as represented by the
- *      Director, National Security Agency.
- *
- * This program is free software; you can redistribute it and/or modify      
- * it under the terms of the GNU General Public License as published by      
- * the Free Software Foundation; either version 2 of the License, or         
- * (at your option) any later version.                                       
- *                                                                           
- * This program is distributed in the hope that it will be useful,           
- * but WITHOUT ANY WARRANTY; without even the implied warranty of            
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the             
- * GNU General Public License for more details.                              
- *                                                                           
- * NO WARRANTY                                                               
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR        
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT      
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,      
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is    
- * solely responsible for determining the appropriateness of using and       
- * distributing the Program and assumes all risks associated with its        
- * exercise of rights under this Agreement, including but not limited to     
- * the risks and costs of program errors, damage to or loss of data,         
- * programs or equipment, and unavailability or interruption of operations.  
- *                                                                           
- * DISCLAIMER OF LIABILITY                                                   
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY   
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL        
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND   
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR     
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE    
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED  
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES             
- *                                                                           
- * You should have received a copy of the GNU General Public License         
- * along with this program; if not, write to the Free Software               
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
- *                                                                           
- * 
- *  12/10/99 - Alpha Release 0.1.0
- *            First release to the public
- *  08/15/01 - Added ioctl() definitions and others - Kent Yoder <yoder1@us.ibm.com>
- *
- */
-
-/* MAX_INTR - the maximum number of times we can loop
- * inside the interrupt function before returning
- * control to the OS (maximum value is 256)
- */
-#define MAX_INTR 5
-
-#define CLS 0x0C
-#define MLR 0x86
-#define LTR 0x0D
-
-#define BCTL 0x60
-#define BCTL_SOFTRESET (1<<15)
-#define BCTL_RX_FIFO_8 (1<<1)
-#define BCTL_TX_FIFO_8 (1<<3)
-
-#define GPR 0x4a
-#define GPR_AUTOSENSE (1<<2)
-#define GPR_16MBPS (1<<3)
-
-#define LISR 0x10
-#define LISR_SUM 0x12
-#define LISR_RUM 0x14
-
-#define LISR_LIE (1<<15)
-#define LISR_SLIM (1<<13)
-#define LISR_SLI (1<<12)
-#define LISR_BPEI (1<<9)
-#define LISR_BPE (1<<8)
-#define LISR_SRB_CMD (1<<5)
-#define LISR_ASB_REPLY (1<<4)
-#define LISR_ASB_FREE_REQ (1<<2)
-#define LISR_ARB_FREE (1<<1)
-#define LISR_TRB_FRAME (1<<0)
-
-#define SISR 0x16
-#define SISR_SUM 0x18
-#define SISR_RUM 0x1A
-#define SISR_MASK 0x54
-#define SISR_MASK_SUM 0x56
-#define SISR_MASK_RUM 0x58
-
-#define SISR_MI (1<<15)
-#define SISR_SERR_ERR (1<<14)
-#define SISR_TIMER (1<<11)
-#define SISR_LAP_PAR_ERR (1<<10)
-#define SISR_LAP_ACC_ERR (1<<9)
-#define SISR_PAR_ERR (1<<8)
-#define SISR_ADAPTER_CHECK (1<<6)
-#define SISR_SRB_REPLY (1<<5)
-#define SISR_ASB_FREE (1<<4)
-#define SISR_ARB_CMD (1<<3)
-#define SISR_TRB_REPLY (1<<2)
-
-#define MISR_RUM 0x5A
-#define MISR_MASK 0x5C
-#define MISR_MASK_RUM 0x5E
-
-#define MISR_TX2_IDLE (1<<15)
-#define MISR_TX2_NO_STATUS (1<<14)
-#define MISR_TX2_HALT (1<<13)
-#define MISR_TX2_EOF (1<<12)
-#define MISR_TX1_IDLE (1<<11)
-#define MISR_TX1_NO_STATUS (1<<10)
-#define MISR_TX1_HALT (1<<9)
-#define MISR_TX1_EOF (1<<8)
-#define MISR_RX_NOBUF (1<<5)
-#define MISR_RX_EOB (1<<4)
-#define MISR_RX_NO_STATUS (1<<2)
-#define MISR_RX_HALT (1<<1)
-#define MISR_RX_EOF (1<<0)
-
-#define LAPA 0x62
-#define LAPE 0x64
-#define LAPD 0x66
-#define LAPDINC 0x68
-#define LAPWWO 0x6A
-#define LAPWWC 0x6C
-#define LAPCTL 0x6E
-
-#define TIMER 0x4E4
-
-#define BMCTL_SUM 0x50
-#define BMCTL_RUM 0x52
-#define BMCTL_TX1_DIS (1<<14)
-#define BMCTL_TX2_DIS (1<<10)
-#define BMCTL_RX_DIS (1<<6)
-#define BMCTL_RX_ENABLED  (1<<5)
-
-#define RXLBDA  0x90
-#define RXBDA   0x94
-#define RXSTAT  0x98
-#define RXDBA   0x9C
-
-#define TX1LFDA 0xA0
-#define TX1FDA  0xA4
-#define TX1STAT 0xA8
-#define TX1DBA  0xAC
-#define TX2LFDA 0xB0
-#define TX2FDA  0xB4
-#define TX2STAT 0xB8
-#define TX2DBA  0xBC
-
-#define STREAMER_IO_SPACE 256
-
-#define SRB_COMMAND_SIZE 50
-
-#define STREAMER_MAX_ADAPTERS 8	/* 0x08 __MODULE_STRING can't hand 0xnn */
-
-/* Defines for LAN STATUS CHANGE reports */
-#define LSC_SIG_LOSS 0x8000
-#define LSC_HARD_ERR 0x4000
-#define LSC_SOFT_ERR 0x2000
-#define LSC_TRAN_BCN 0x1000
-#define LSC_LWF      0x0800
-#define LSC_ARW      0x0400
-#define LSC_FPE      0x0200
-#define LSC_RR       0x0100
-#define LSC_CO       0x0080
-#define LSC_SS       0x0040
-#define LSC_RING_REC 0x0020
-#define LSC_SR_CO    0x0010
-#define LSC_FDX_MODE 0x0004
-
-/* Defines for OPEN ADAPTER command */
-
-#define OPEN_ADAPTER_EXT_WRAP (1<<15)
-#define OPEN_ADAPTER_DIS_HARDEE (1<<14)
-#define OPEN_ADAPTER_DIS_SOFTERR (1<<13)
-#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12)
-#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11)
-#define OPEN_ADAPTER_ENABLE_EC (1<<10)
-#define OPEN_ADAPTER_CONTENDER (1<<8)
-#define OPEN_ADAPTER_PASS_BEACON (1<<7)
-#define OPEN_ADAPTER_ENABLE_FDX (1<<6)
-#define OPEN_ADAPTER_ENABLE_RPL (1<<5)
-#define OPEN_ADAPTER_INHIBIT_ETR (1<<4)
-#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3)
-
-
-/* Defines for SRB Commands */
-#define SRB_CLOSE_ADAPTER 0x04
-#define SRB_CONFIGURE_BRIDGE 0x0c
-#define SRB_CONFIGURE_HP_CHANNEL 0x13
-#define SRB_MODIFY_BRIDGE_PARMS 0x15
-#define SRB_MODIFY_OPEN_OPTIONS 0x01
-#define SRB_MODIFY_RECEIVE_OPTIONS 0x17
-#define SRB_NO_OPERATION 0x00
-#define SRB_OPEN_ADAPTER 0x03
-#define SRB_READ_LOG 0x08
-#define SRB_READ_SR_COUNTERS 0x16
-#define SRB_RESET_GROUP_ADDRESS 0x02
-#define SRB_RESET_TARGET_SEGMETN 0x14
-#define SRB_SAVE_CONFIGURATION 0x1b
-#define SRB_SET_BRIDGE_PARMS 0x09
-#define SRB_SET_FUNC_ADDRESS 0x07
-#define SRB_SET_GROUP_ADDRESS 0x06
-#define SRB_SET_TARGET_SEGMENT 0x05
-
-/* Clear return code */
-#define STREAMER_CLEAR_RET_CODE 0xfe
-
-/* ARB Commands */
-#define ARB_RECEIVE_DATA 0x81
-#define ARB_LAN_CHANGE_STATUS 0x84
-
-/* ASB Response commands */
-#define ASB_RECEIVE_DATA 0x81
-
-
-/* Streamer defaults for buffers */
-
-#define STREAMER_RX_RING_SIZE 16	/* should be a power of 2 */
-/* Setting the number of TX descriptors to 1 is a workaround for an
- * undocumented hardware problem with the lanstreamer board. Setting
- * this to something higher may slightly increase the throughput you
- * can get from the card, but at the risk of locking up the box. - 
- * <yoder1@us.ibm.com>
- */
-#define STREAMER_TX_RING_SIZE 1	/* should be a power of 2 */
-
-#define PKT_BUF_SZ 4096		/* Default packet size */
-
-/* Streamer data structures */
-
-struct streamer_tx_desc {
-	__u32 forward;
-	__u32 status;
-	__u32 bufcnt_framelen;
-	__u32 buffer;
-	__u32 buflen;
-	__u32 rsvd1;
-	__u32 rsvd2;
-	__u32 rsvd3;
-};
-
-struct streamer_rx_desc {
-	__u32 forward;
-	__u32 status;
-	__u32 buffer;
-	__u32 framelen_buflen;
-};
-
-struct mac_receive_buffer {
-	__u16 next;
-	__u8 padding;
-	__u8 frame_status;
-	__u16 buffer_length;
-	__u8 frame_data;
-};
-
-struct streamer_private {
-
-	__u16 srb;
-	__u16 trb;
-	__u16 arb;
-	__u16 asb;
-
-        struct streamer_private *next;
-        struct pci_dev *pci_dev;
-	__u8 __iomem *streamer_mmio;
-        char *streamer_card_name;
- 
-        spinlock_t streamer_lock;
-
-	volatile int srb_queued;	/* True if an SRB is still posted */
-	wait_queue_head_t srb_wait;
-
-	volatile int asb_queued;	/* True if an ASB is posted */
-
-	volatile int trb_queued;	/* True if a TRB is posted */
-	wait_queue_head_t trb_wait;
-
-	struct streamer_rx_desc *streamer_rx_ring;
-	struct streamer_tx_desc *streamer_tx_ring;
-	struct sk_buff *tx_ring_skb[STREAMER_TX_RING_SIZE],
-	    *rx_ring_skb[STREAMER_RX_RING_SIZE];
-	int tx_ring_free, tx_ring_last_status, rx_ring_last_received,
-	    free_tx_ring_entries;
-
-	__u16 streamer_lan_status;
-	__u8 streamer_ring_speed;
-	__u16 pkt_buf_sz;
-	__u8 streamer_receive_options, streamer_copy_all_options,
-	    streamer_message_level;
-	__u16 streamer_addr_table_addr, streamer_parms_addr;
-	__u16 mac_rx_buffer;
-	__u8 streamer_laa[6];
-};
-
-struct streamer_adapter_addr_table {
-
-	__u8 node_addr[6];
-	__u8 reserved[4];
-	__u8 func_addr[4];
-};
-
-struct streamer_parameters_table {
-
-	__u8 phys_addr[4];
-	__u8 up_node_addr[6];
-	__u8 up_phys_addr[4];
-	__u8 poll_addr[6];
-	__u16 reserved;
-	__u16 acc_priority;
-	__u16 auth_source_class;
-	__u16 att_code;
-	__u8 source_addr[6];
-	__u16 beacon_type;
-	__u16 major_vector;
-	__u16 lan_status;
-	__u16 soft_error_time;
-	__u16 reserved1;
-	__u16 local_ring;
-	__u16 mon_error;
-	__u16 beacon_transmit;
-	__u16 beacon_receive;
-	__u16 frame_correl;
-	__u8 beacon_naun[6];
-	__u32 reserved2;
-	__u8 beacon_phys[4];
-};
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
deleted file mode 100644
index 28adcdf..0000000
--- a/drivers/net/tokenring/madgemc.c
+++ /dev/null
@@ -1,761 +0,0 @@
-/*
- *  madgemc.c: Driver for the Madge Smart 16/4 MC16 MCA token ring card.
- *
- *  Written 2000 by Adam Fritzler
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- *
- *  This driver module supports the following cards:
- *      - Madge Smart 16/4 Ringnode MC16
- *	- Madge Smart 16/4 Ringnode MC32 (??)
- *
- *  Maintainer(s):
- *    AF	Adam Fritzler
- *
- *  Modification History:
- *	16-Jan-00	AF	Created
- *
- */
-static const char version[] = "madgemc.c: v0.91 23/01/2000 by Adam Fritzler\n";
-
-#include <linux/module.h>
-#include <linux/mca.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include "tms380tr.h"
-#include "madgemc.h"            /* Madge-specific constants */
-
-#define MADGEMC_IO_EXTENT 32
-#define MADGEMC_SIF_OFFSET 0x08
-
-struct card_info {
-	/*
-	 * These are read from the BIA ROM.
-	 */
-	unsigned int manid;
-	unsigned int cardtype;
-	unsigned int cardrev;
-	unsigned int ramsize;
-	
-	/*
-	 * These are read from the MCA POS registers.  
-	 */
-	unsigned int burstmode:2;
-	unsigned int fairness:1; /* 0 = Fair, 1 = Unfair */
-	unsigned int arblevel:4;
-	unsigned int ringspeed:2; /* 0 = 4mb, 1 = 16, 2 = Auto/none */
-	unsigned int cabletype:1; /* 0 = RJ45, 1 = DB9 */
-};
-
-static int madgemc_open(struct net_device *dev);
-static int madgemc_close(struct net_device *dev);
-static int madgemc_chipset_init(struct net_device *dev);
-static void madgemc_read_rom(struct net_device *dev, struct card_info *card);
-static unsigned short madgemc_setnselout_pins(struct net_device *dev);
-static void madgemc_setcabletype(struct net_device *dev, int type);
-
-static int madgemc_mcaproc(char *buf, int slot, void *d);
-
-static void madgemc_setregpage(struct net_device *dev, int page);
-static void madgemc_setsifsel(struct net_device *dev, int val);
-static void madgemc_setint(struct net_device *dev, int val);
-
-static irqreturn_t madgemc_interrupt(int irq, void *dev_id);
-
-/*
- * These work around paging, however they don't guarantee you're on the
- * right page.
- */
-#define SIFREADB(reg) (inb(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
-#define SIFWRITEB(val, reg) (outb(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
-#define SIFREADW(reg) (inw(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
-#define SIFWRITEW(val, reg) (outw(val, dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
-
-/*
- * Read a byte-length value from the register.
- */
-static unsigned short madgemc_sifreadb(struct net_device *dev, unsigned short reg)
-{
-	unsigned short ret;
-	if (reg<0x8)	
-		ret = SIFREADB(reg);
-	else {
-		madgemc_setregpage(dev, 1);	
-		ret = SIFREADB(reg);
-		madgemc_setregpage(dev, 0);
-	}
-	return ret;
-}
-
-/*
- * Write a byte-length value to a register.
- */
-static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	if (reg<0x8)
-		SIFWRITEB(val, reg);
-	else {
-		madgemc_setregpage(dev, 1);
-		SIFWRITEB(val, reg);
-		madgemc_setregpage(dev, 0);
-	}
-}
-
-/*
- * Read a word-length value from a register
- */
-static unsigned short madgemc_sifreadw(struct net_device *dev, unsigned short reg)
-{
-	unsigned short ret;
-	if (reg<0x8)	
-		ret = SIFREADW(reg);
-	else {
-		madgemc_setregpage(dev, 1);	
-		ret = SIFREADW(reg);
-		madgemc_setregpage(dev, 0);
-	}
-	return ret;
-}
-
-/*
- * Write a word-length value to a register.
- */
-static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	if (reg<0x8)
-		SIFWRITEW(val, reg);
-	else {
-		madgemc_setregpage(dev, 1);
-		SIFWRITEW(val, reg);
-		madgemc_setregpage(dev, 0);
-	}
-}
-
-static struct net_device_ops madgemc_netdev_ops __read_mostly;
-
-static int __devinit madgemc_probe(struct device *device)
-{	
-	static int versionprinted;
-	struct net_device *dev;
-	struct net_local *tp;
-	struct card_info *card;
-	struct mca_device *mdev = to_mca_device(device);
-	int ret = 0;
-
-	if (versionprinted++ == 0)
-		printk("%s", version);
-
-	if(mca_device_claimed(mdev))
-		return -EBUSY;
-	mca_device_set_claim(mdev, 1);
-
-	dev = alloc_trdev(sizeof(struct net_local));
-	if (!dev) {
-		printk("madgemc: unable to allocate dev space\n");
-		mca_device_set_claim(mdev, 0);
-		ret = -ENOMEM;
-		goto getout;
-	}
-
-	dev->netdev_ops = &madgemc_netdev_ops;
-
-	card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
-	if (card==NULL) {
-		ret = -ENOMEM;
-		goto getout1;
-	}
-
-	/*
-	 * Parse configuration information.  This all comes
-	 * directly from the publicly available @002d.ADF.
-	 * Get it from Madge or your local ADF library.
-	 */
-
-	/*
-	 * Base address 
-	 */
-	dev->base_addr = 0x0a20 + 
-		((mdev->pos[2] & MC16_POS2_ADDR2)?0x0400:0) +
-		((mdev->pos[0] & MC16_POS0_ADDR1)?0x1000:0) +
-		((mdev->pos[3] & MC16_POS3_ADDR3)?0x2000:0);
-
-	/*
-	 * Interrupt line
-	 */
-	switch(mdev->pos[0] >> 6) { /* upper two bits */
-		case 0x1: dev->irq = 3; break;
-		case 0x2: dev->irq = 9; break; /* IRQ 2 = IRQ 9 */
-		case 0x3: dev->irq = 10; break;
-		default: dev->irq = 0; break;
-	}
-
-	if (dev->irq == 0) {
-		printk("%s: invalid IRQ\n", dev->name);
-		ret = -EBUSY;
-		goto getout2;
-	}
-
-	if (!request_region(dev->base_addr, MADGEMC_IO_EXTENT, 
-			   "madgemc")) {
-		printk(KERN_INFO "madgemc: unable to setup Smart MC in slot %d because of I/O base conflict at 0x%04lx\n", mdev->slot, dev->base_addr);
-		dev->base_addr += MADGEMC_SIF_OFFSET;
-		ret = -EBUSY;
-		goto getout2;
-	}
-	dev->base_addr += MADGEMC_SIF_OFFSET;
-	
-	/*
-	 * Arbitration Level
-	 */
-	card->arblevel = ((mdev->pos[0] >> 1) & 0x7) + 8;
-
-	/*
-	 * Burst mode and Fairness
-	 */
-	card->burstmode = ((mdev->pos[2] >> 6) & 0x3);
-	card->fairness = ((mdev->pos[2] >> 4) & 0x1);
-
-	/*
-	 * Ring Speed
-	 */
-	if ((mdev->pos[1] >> 2)&0x1)
-		card->ringspeed = 2; /* not selected */
-	else if ((mdev->pos[2] >> 5) & 0x1)
-		card->ringspeed = 1; /* 16Mb */
-	else
-		card->ringspeed = 0; /* 4Mb */
-
-	/* 
-	 * Cable type
-	 */
-	if ((mdev->pos[1] >> 6)&0x1)
-		card->cabletype = 1; /* STP/DB9 */
-	else
-		card->cabletype = 0; /* UTP/RJ-45 */
-
-
-	/* 
-	 * ROM Info. This requires us to actually twiddle
-	 * bits on the card, so we must ensure above that 
-	 * the base address is free of conflict (request_region above).
-	 */
-	madgemc_read_rom(dev, card);
-		
-	if (card->manid != 0x4d) { /* something went wrong */
-		printk(KERN_INFO "%s: Madge MC ROM read failed (unknown manufacturer ID %02x)\n", dev->name, card->manid);
-		goto getout3;
-	}
-		
-	if ((card->cardtype != 0x08) && (card->cardtype != 0x0d)) {
-		printk(KERN_INFO "%s: Madge MC ROM read failed (unknown card ID %02x)\n", dev->name, card->cardtype);
-		ret = -EIO;
-		goto getout3;
-	}
-	       
-	/* All cards except Rev 0 and 1 MC16's have 256kb of RAM */
-	if ((card->cardtype == 0x08) && (card->cardrev <= 0x01))
-		card->ramsize = 128;
-	else
-		card->ramsize = 256;
-
-	printk("%s: %s Rev %d at 0x%04lx IRQ %d\n", 
-	       dev->name, 
-	       (card->cardtype == 0x08)?MADGEMC16_CARDNAME:
-	       MADGEMC32_CARDNAME, card->cardrev, 
-	       dev->base_addr, dev->irq);
-
-	if (card->cardtype == 0x0d)
-		printk("%s:     Warning: MC32 support is experimental and highly untested\n", dev->name);
-	
-	if (card->ringspeed==2) { /* Unknown */
-		printk("%s:     Warning: Ring speed not set in POS -- Please run the reference disk and set it!\n", dev->name);
-		card->ringspeed = 1; /* default to 16mb */
-	}
-		
-	printk("%s:     RAM Size: %dKB\n", dev->name, card->ramsize);
-
-	printk("%s:     Ring Speed: %dMb/sec on %s\n", dev->name, 
-	       (card->ringspeed)?16:4, 
-	       card->cabletype?"STP/DB9":"UTP/RJ-45");
-	printk("%s:     Arbitration Level: %d\n", dev->name, 
-	       card->arblevel);
-
-	printk("%s:     Burst Mode: ", dev->name);
-	switch(card->burstmode) {
-		case 0: printk("Cycle steal"); break;
-		case 1: printk("Limited burst"); break;
-		case 2: printk("Delayed release"); break;
-		case 3: printk("Immediate release"); break;
-	}
-	printk(" (%s)\n", (card->fairness)?"Unfair":"Fair");
-
-
-	/* 
-	 * Enable SIF before we assign the interrupt handler,
-	 * just in case we get spurious interrupts that need
-	 * handling.
-	 */ 
-	outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
-	madgemc_setsifsel(dev, 1);
-	if (request_irq(dev->irq, madgemc_interrupt, IRQF_SHARED,
-		       "madgemc", dev)) {
-		ret = -EBUSY;
-		goto getout3;
-	}
-
-	madgemc_chipset_init(dev); /* enables interrupts! */
-	madgemc_setcabletype(dev, card->cabletype);
-
-	/* Setup MCA structures */
-	mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
-	mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev);
-
-	printk("%s:     Ring Station Address: %pM\n",
-	       dev->name, dev->dev_addr);
-
-	if (tmsdev_init(dev, device)) {
-		printk("%s: unable to get memory for dev->priv.\n", 
-		       dev->name);
-		ret = -ENOMEM;
-		goto getout4;
-	}
-	tp = netdev_priv(dev);
-
-	/* 
-	 * The MC16 is physically a 32bit card.  However, Madge
-	 * insists on calling it 16bit, so I'll assume here that
-	 * they know what they're talking about.  Cut off DMA
-	 * at 16mb.
-	 */
-	tp->setnselout = madgemc_setnselout_pins;
-	tp->sifwriteb = madgemc_sifwriteb;
-	tp->sifreadb = madgemc_sifreadb;
-	tp->sifwritew = madgemc_sifwritew;
-	tp->sifreadw = madgemc_sifreadw;
-	tp->DataRate = (card->ringspeed)?SPEED_16:SPEED_4;
-
-	memcpy(tp->ProductID, "Madge MCA 16/4    ", PROD_ID_SIZE + 1);
-
-	tp->tmspriv = card;
-	dev_set_drvdata(device, dev);
-
-	if (register_netdev(dev) == 0)
-		return 0;
-
-	dev_set_drvdata(device, NULL);
-	ret = -ENOMEM;
-getout4:
-	free_irq(dev->irq, dev);
-getout3:
-	release_region(dev->base_addr-MADGEMC_SIF_OFFSET, 
-		       MADGEMC_IO_EXTENT); 
-getout2:
-	kfree(card);
-getout1:
-	free_netdev(dev);
-getout:
-	mca_device_set_claim(mdev, 0);
-	return ret;
-}
-
-/*
- * Handle interrupts generated by the card
- *
- * The MicroChannel Madge cards need slightly more handling
- * after an interrupt than other TMS380 cards do.
- *
- * First we must make sure it was this card that generated the
- * interrupt (since interrupt sharing is allowed).  Then,
- * because we're using level-triggered interrupts (as is
- * standard on MCA), we must toggle the interrupt line
- * on the card in order to claim and acknowledge the interrupt.
- * Once that is done, the interrupt should be handlable in
- * the normal tms380tr_interrupt() routine.
- *
- * There's two ways we can check to see if the interrupt is ours,
- * both with their own disadvantages...
- *
- * 1)  	Read in the SIFSTS register from the TMS controller.  This
- *	is guaranteed to be accurate, however, there's a fairly
- *	large performance penalty for doing so: the Madge chips
- *	must request the register from the Eagle, the Eagle must
- *	read them from its internal bus, and then take the route
- *	back out again, for a 16bit read.  
- *
- * 2)	Use the MC_CONTROL_REG0_SINTR bit from the Madge ASICs.
- *	The major disadvantage here is that the accuracy of the
- *	bit is in question.  However, it cuts out the extra read
- *	cycles it takes to read the Eagle's SIF, as its only an
- *	8bit read, and theoretically the Madge bit is directly
- *	connected to the interrupt latch coming out of the Eagle
- *	hardware (that statement is not verified).  
- *
- * I can't determine which of these methods has the best win.  For now,
- * we make a compromise.  Use the Madge way for the first interrupt,
- * which should be the fast-path, and then once we hit the first 
- * interrupt, keep on trying using the SIF method until we've
- * exhausted all contiguous interrupts.
- *
- */
-static irqreturn_t madgemc_interrupt(int irq, void *dev_id)
-{
-	int pending,reg1;
-	struct net_device *dev;
-
-	if (!dev_id) {
-		printk("madgemc_interrupt: was not passed a dev_id!\n");
-		return IRQ_NONE;
-	}
-
-	dev = dev_id;
-
-	/* Make sure its really us. -- the Madge way */
-	pending = inb(dev->base_addr + MC_CONTROL_REG0);
-	if (!(pending & MC_CONTROL_REG0_SINTR))
-		return IRQ_NONE; /* not our interrupt */
-
-	/*
-	 * Since we're level-triggered, we may miss the rising edge
-	 * of the next interrupt while we're off handling this one,
-	 * so keep checking until the SIF verifies that it has nothing
-	 * left for us to do.
-	 */
-	pending = STS_SYSTEM_IRQ;
-	do {
-		if (pending & STS_SYSTEM_IRQ) {
-
-			/* Toggle the interrupt to reset the latch on card */
-			reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
-			outb(reg1 ^ MC_CONTROL_REG1_SINTEN, 
-			     dev->base_addr + MC_CONTROL_REG1);
-			outb(reg1, dev->base_addr + MC_CONTROL_REG1);
-
-			/* Continue handling as normal */
-			tms380tr_interrupt(irq, dev_id);
-
-			pending = SIFREADW(SIFSTS); /* restart - the SIF way */
-
-		} else
-			return IRQ_HANDLED; 
-	} while (1);
-
-	return IRQ_HANDLED; /* not reachable */
-}
-
-/*
- * Set the card to the preferred ring speed.
- *
- * Unlike newer cards, the MC16/32 have their speed selection
- * circuit connected to the Madge ASICs and not to the TMS380
- * NSELOUT pins. Set the ASIC bits correctly here, and return 
- * zero to leave the TMS NSELOUT bits unaffected.
- *
- */
-static unsigned short madgemc_setnselout_pins(struct net_device *dev)
-{
-	unsigned char reg1;
-	struct net_local *tp = netdev_priv(dev);
-	
-	reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
-
-	if(tp->DataRate == SPEED_16)
-		reg1 |= MC_CONTROL_REG1_SPEED_SEL; /* add for 16mb */
-	else if (reg1 & MC_CONTROL_REG1_SPEED_SEL)
-		reg1 ^= MC_CONTROL_REG1_SPEED_SEL; /* remove for 4mb */
-	outb(reg1, dev->base_addr + MC_CONTROL_REG1);
-
-	return 0; /* no change */
-}
-
-/*
- * Set the register page.  This equates to the SRSX line
- * on the TMS380Cx6.
- *
- * Register selection is normally done via three contiguous
- * bits.  However, some boards (such as the MC16/32) use only
- * two bits, plus a separate bit in the glue chip.  This
- * sets the SRSX bit (the top bit).  See page 4-17 in the
- * Yellow Book for which registers are affected.
- *
- */
-static void madgemc_setregpage(struct net_device *dev, int page)
-{	
-	static int reg1;
-
-	reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
-	if ((page == 0) && (reg1 & MC_CONTROL_REG1_SRSX)) {
-		outb(reg1 ^ MC_CONTROL_REG1_SRSX, 
-		     dev->base_addr + MC_CONTROL_REG1);
-	}
-	else if (page == 1) {
-		outb(reg1 | MC_CONTROL_REG1_SRSX, 
-		     dev->base_addr + MC_CONTROL_REG1);
-	}
-	reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
-}
-
-/*
- * The SIF registers are not mapped into register space by default
- * Set this to 1 to map them, 0 to map the BIA ROM.
- *
- */
-static void madgemc_setsifsel(struct net_device *dev, int val)
-{
-	unsigned int reg0;
-
-	reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
-	if ((val == 0) && (reg0 & MC_CONTROL_REG0_SIFSEL)) {
-		outb(reg0 ^ MC_CONTROL_REG0_SIFSEL, 
-		     dev->base_addr + MC_CONTROL_REG0);
-	} else if (val == 1) {
-		outb(reg0 | MC_CONTROL_REG0_SIFSEL, 
-		     dev->base_addr + MC_CONTROL_REG0);
-	}	
-	reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
-}
-
-/*
- * Enable SIF interrupts
- *
- * This does not enable interrupts in the SIF, but rather
- * enables SIF interrupts to be passed onto the host.
- *
- */
-static void madgemc_setint(struct net_device *dev, int val)
-{
-	unsigned int reg1;
-
-	reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
-	if ((val == 0) && (reg1 & MC_CONTROL_REG1_SINTEN)) {
-		outb(reg1 ^ MC_CONTROL_REG1_SINTEN, 
-		     dev->base_addr + MC_CONTROL_REG1);
-	} else if (val == 1) {
-		outb(reg1 | MC_CONTROL_REG1_SINTEN, 
-		     dev->base_addr + MC_CONTROL_REG1);
-	}
-}
-
-/*
- * Cable type is set via control register 7. Bit zero high
- * for UTP, low for STP.
- */
-static void madgemc_setcabletype(struct net_device *dev, int type)
-{
-	outb((type==0)?MC_CONTROL_REG7_CABLEUTP:MC_CONTROL_REG7_CABLESTP,
-	     dev->base_addr + MC_CONTROL_REG7);
-}
-
-/*
- * Enable the functions of the Madge chipset needed for
- * full working order. 
- */
-static int madgemc_chipset_init(struct net_device *dev)
-{
-	outb(0, dev->base_addr + MC_CONTROL_REG1); /* pull SRESET low */
-	tms380tr_wait(100); /* wait for card to reset */
-
-	/* bring back into normal operating mode */
-	outb(MC_CONTROL_REG1_NSRESET, dev->base_addr + MC_CONTROL_REG1);
-
-	/* map SIF registers */
-	madgemc_setsifsel(dev, 1);
-
-	/* enable SIF interrupts */
-	madgemc_setint(dev, 1); 
-
-	return 0;
-}
-
-/*
- * Disable the board, and put back into power-up state.
- */
-static void madgemc_chipset_close(struct net_device *dev)
-{
-	/* disable interrupts */
-	madgemc_setint(dev, 0);
-	/* unmap SIF registers */
-	madgemc_setsifsel(dev, 0);
-}
-
-/*
- * Read the card type (MC16 or MC32) from the card.
- *
- * The configuration registers are stored in two separate
- * pages.  Pages are flipped by clearing bit 3 of CONTROL_REG0 (PAGE)
- * for page zero, or setting bit 3 for page one.
- *
- * Page zero contains the following data:
- *	Byte 0: Manufacturer ID (0x4D -- ASCII "M")
- *	Byte 1: Card type:
- *			0x08 for MC16
- *			0x0D for MC32
- *	Byte 2: Card revision
- *	Byte 3: Mirror of POS config register 0
- *	Byte 4: Mirror of POS 1
- *	Byte 5: Mirror of POS 2
- *
- * Page one contains the following data:
- *	Byte 0: Unused
- *	Byte 1-6: BIA, MSB to LSB.
- *
- * Note that to read the BIA, we must unmap the SIF registers
- * by clearing bit 2 of CONTROL_REG0 (SIFSEL), as the data
- * will reside in the same logical location.  For this reason,
- * _never_ read the BIA while the Eagle processor is running!
- * The SIF will be completely inaccessible until the BIA operation
- * is complete.
- *
- */
-static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
-{
-	unsigned long ioaddr;
-	unsigned char reg0, reg1, tmpreg0, i;
-
-	ioaddr = dev->base_addr;
-
-	reg0 = inb(ioaddr + MC_CONTROL_REG0);
-	reg1 = inb(ioaddr + MC_CONTROL_REG1);
-
-	/* Switch to page zero and unmap SIF */
-	tmpreg0 = reg0 & ~(MC_CONTROL_REG0_PAGE + MC_CONTROL_REG0_SIFSEL);
-	outb(tmpreg0, ioaddr + MC_CONTROL_REG0);
-	
-	card->manid = inb(ioaddr + MC_ROM_MANUFACTURERID);
-	card->cardtype = inb(ioaddr + MC_ROM_ADAPTERID);
-	card->cardrev = inb(ioaddr + MC_ROM_REVISION);
-
-	/* Switch to rom page one */
-	outb(tmpreg0 | MC_CONTROL_REG0_PAGE, ioaddr + MC_CONTROL_REG0);
-
-	/* Read BIA */
-	dev->addr_len = 6;
-	for (i = 0; i < 6; i++)
-		dev->dev_addr[i] = inb(ioaddr + MC_ROM_BIA_START + i);
-	
-	/* Restore original register values */
-	outb(reg0, ioaddr + MC_CONTROL_REG0);
-	outb(reg1, ioaddr + MC_CONTROL_REG1);
-}
-
-static int madgemc_open(struct net_device *dev)
-{  
-	/*
-	 * Go ahead and reinitialize the chipset again, just to 
-	 * make sure we didn't get left in a bad state.
-	 */
-	madgemc_chipset_init(dev);
-	tms380tr_open(dev);
-	return 0;
-}
-
-static int madgemc_close(struct net_device *dev)
-{
-	tms380tr_close(dev);
-	madgemc_chipset_close(dev);
-	return 0;
-}
-
-/*
- * Give some details available from /proc/mca/slotX
- */
-static int madgemc_mcaproc(char *buf, int slot, void *d) 
-{	
-	struct net_device *dev = (struct net_device *)d;
-	struct net_local *tp = netdev_priv(dev);
-	struct card_info *curcard = tp->tmspriv;
-	int len = 0;
-	
-	len += sprintf(buf+len, "-------\n");
-	if (curcard) {
-		len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev);
-		len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize);
-		len += sprintf(buf+len, "Cable type: %s\n", (curcard->cabletype)?"STP/DB9":"UTP/RJ-45");
-		len += sprintf(buf+len, "Configured ring speed: %dMb/sec\n", (curcard->ringspeed)?16:4);
-		len += sprintf(buf+len, "Running ring speed: %dMb/sec\n", (tp->DataRate==SPEED_16)?16:4);
-		len += sprintf(buf+len, "Device: %s\n", dev->name);
-		len += sprintf(buf+len, "IO Port: 0x%04lx\n", dev->base_addr);
-		len += sprintf(buf+len, "IRQ: %d\n", dev->irq);
-		len += sprintf(buf+len, "Arbitration Level: %d\n", curcard->arblevel);
-		len += sprintf(buf+len, "Burst Mode: ");
-		switch(curcard->burstmode) {
-		case 0: len += sprintf(buf+len, "Cycle steal"); break;
-		case 1: len += sprintf(buf+len, "Limited burst"); break;
-		case 2: len += sprintf(buf+len, "Delayed release"); break;
-		case 3: len += sprintf(buf+len, "Immediate release"); break;
-		}
-		len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair");
-		
-		len += sprintf(buf+len, "Ring Station Address: %pM\n",
-			       dev->dev_addr);
-	} else 
-		len += sprintf(buf+len, "Card not configured\n");
-
-	return len;
-}
-
-static int __devexit madgemc_remove(struct device *device)
-{
-	struct net_device *dev = dev_get_drvdata(device);
-	struct net_local *tp;
-        struct card_info *card;
-
-	BUG_ON(!dev);
-
-	tp = netdev_priv(dev);
-	card = tp->tmspriv;
-	kfree(card);
-	tp->tmspriv = NULL;
-
-	unregister_netdev(dev);
-	release_region(dev->base_addr-MADGEMC_SIF_OFFSET, MADGEMC_IO_EXTENT);
-	free_irq(dev->irq, dev);
-	tmsdev_term(dev);
-	free_netdev(dev);
-	dev_set_drvdata(device, NULL);
-
-	return 0;
-}
-
-static short madgemc_adapter_ids[] __initdata = {
-	0x002d,
-	0x0000
-};
-
-static struct mca_driver madgemc_driver = {
-	.id_table = madgemc_adapter_ids,
-	.driver = {
-		.name = "madgemc",
-		.bus = &mca_bus_type,
-		.probe = madgemc_probe,
-		.remove = __devexit_p(madgemc_remove),
-	},
-};
-
-static int __init madgemc_init (void)
-{
-	madgemc_netdev_ops = tms380tr_netdev_ops;
-	madgemc_netdev_ops.ndo_open = madgemc_open;
-	madgemc_netdev_ops.ndo_stop = madgemc_close;
-
-	return mca_register_driver (&madgemc_driver);
-}
-
-static void __exit madgemc_exit (void)
-{
-	mca_unregister_driver (&madgemc_driver);
-}
-
-module_init(madgemc_init);
-module_exit(madgemc_exit);
-
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/tokenring/madgemc.h b/drivers/net/tokenring/madgemc.h
deleted file mode 100644
index fe88e27..0000000
--- a/drivers/net/tokenring/madgemc.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* 
- * madgemc.h: Header for the madgemc tms380tr module
- *
- * Authors:
- * - Adam Fritzler
- */
-
-#ifndef __LINUX_MADGEMC_H
-#define __LINUX_MADGEMC_H
-
-#ifdef __KERNEL__
-
-#define MADGEMC16_CARDNAME "Madge Smart 16/4 MC16 Ringnode"
-#define MADGEMC32_CARDNAME "Madge Smart 16/4 MC32 Ringnode"
-
-/* 
- * Bit definitions for the POS config registers
- */
-#define MC16_POS0_ADDR1 0x20
-#define MC16_POS2_ADDR2 0x04
-#define MC16_POS3_ADDR3 0x20
-
-#define MC_CONTROL_REG0		((long)-8) /* 0x00 */
-#define MC_CONTROL_REG1		((long)-7) /* 0x01 */
-#define MC_ADAPTER_POS_REG0	((long)-6) /* 0x02 */
-#define MC_ADAPTER_POS_REG1	((long)-5) /* 0x03 */
-#define MC_ADAPTER_POS_REG2	((long)-4) /* 0x04 */
-#define MC_ADAPTER_REG5_UNUSED	((long)-3) /* 0x05 */
-#define MC_ADAPTER_REG6_UNUSED	((long)-2) /* 0x06 */
-#define MC_CONTROL_REG7		((long)-1) /* 0x07 */
-
-#define MC_CONTROL_REG0_UNKNOWN1	0x01
-#define MC_CONTROL_REG0_UNKNOWN2	0x02
-#define MC_CONTROL_REG0_SIFSEL		0x04
-#define MC_CONTROL_REG0_PAGE		0x08
-#define MC_CONTROL_REG0_TESTINTERRUPT	0x10
-#define MC_CONTROL_REG0_UNKNOWN20	0x20
-#define MC_CONTROL_REG0_SINTR		0x40
-#define MC_CONTROL_REG0_UNKNOWN80	0x80
-
-#define MC_CONTROL_REG1_SINTEN		0x01
-#define MC_CONTROL_REG1_BITOFDEATH	0x02
-#define MC_CONTROL_REG1_NSRESET		0x04
-#define MC_CONTROL_REG1_UNKNOWN8	0x08
-#define MC_CONTROL_REG1_UNKNOWN10	0x10
-#define MC_CONTROL_REG1_UNKNOWN20	0x20
-#define MC_CONTROL_REG1_SRSX		0x40
-#define MC_CONTROL_REG1_SPEED_SEL	0x80
-
-#define MC_CONTROL_REG7_CABLESTP	0x00
-#define MC_CONTROL_REG7_CABLEUTP	0x01
-
-/*
- * ROM Page Zero
- */
-#define MC_ROM_MANUFACTURERID		0x00
-#define MC_ROM_ADAPTERID		0x01
-#define MC_ROM_REVISION			0x02
-#define MC_ROM_CONFIG0			0x03
-#define MC_ROM_CONFIG1			0x04
-#define MC_ROM_CONFIG2			0x05
-
-/*
- * ROM Page One
- */
-#define MC_ROM_UNUSED_BYTE		0x00
-#define MC_ROM_BIA_START		0x01
-
-#endif /* __KERNEL__ */
-#endif /* __LINUX_MADGEMC_H */
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
deleted file mode 100644
index 0e23474..0000000
--- a/drivers/net/tokenring/olympic.c
+++ /dev/null
@@ -1,1749 +0,0 @@
-/*
- *   olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
- *		   1999/2000 Mike Phillips (mikep@linuxtr.net)
- *
- *  Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
- *  chipset. 
- *
- *  Base Driver Skeleton:
- *      Written 1993-94 by Donald Becker.
- *
- *      Copyright 1993 United States Government as represented by the
- *      Director, National Security Agency.
- *
- *  Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their 
- *  assistance and perserverance with the testing of this driver.
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- * 
- *  4/27/99 - Alpha Release 0.1.0
- *            First release to the public
- *
- *  6/8/99  - Official Release 0.2.0   
- *            Merged into the kernel code 
- *  8/18/99 - Updated driver for 2.3.13 kernel to use new pci
- *	      resource. Driver also reports the card name returned by
- *            the pci resource.
- *  1/11/00 - Added spinlocks for smp
- *  2/23/00 - Updated to dev_kfree_irq 
- *  3/10/00 - Fixed FDX enable which triggered other bugs also 
- *            squashed.
- *  5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
- *            The odd thing about the changes is that the fix for
- *            endian issues with the big-endian data in the arb, asb...
- *            was to always swab() the bytes, no matter what CPU.
- *            That's because the read[wl]() functions always swap the
- *            bytes on the way in on PPC.
- *            Fixing the hardware descriptors was another matter,
- *            because they weren't going through read[wl](), there all
- *            the results had to be in memory in le32 values. kdaaker
- *
- * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
- *
- * 03/09/01 - Add new pci api, dev_base_lock, general clean up. 
- *
- * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
- *	      Change proc_fs behaviour, now one entry per adapter.
- *
- * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
- *	      adapter when live does not take the system down with it.
- * 
- * 06/02/01 - Clean up, copy skb for small packets
- * 
- * 06/22/01 - Add EISR error handling routines 
- *
- * 07/19/01 - Improve bad LAA reporting, strip out freemem
- *	      into a separate function, its called from 3 
- *	      different places now. 
- * 02/09/02 - Replaced sleep_on. 
- * 03/01/02 - Replace access to several registers from 32 bit to 
- * 	      16 bit. Fixes alignment errors on PPC 64 bit machines.
- * 	      Thanks to Al Trautman for this one.
- * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
- * 	      silently ignored until the error checking code 
- * 	      went into version 1.0.0 
- * 06/04/02 - Add correct start up sequence for the cardbus adapters.
- * 	      Required for strict compliance with pci power mgmt specs.
- *  To Do:
- *
- *	     Wake on lan	
- * 
- *  If Problems do Occur
- *  Most problems can be rectified by either closing and opening the interface
- *  (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
- *  if compiled into the kernel).
- */
-
-/* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
-
-#define OLYMPIC_DEBUG 0
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/proc_fs.h>
-#include <linux/ptrace.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/stddef.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <net/checksum.h>
-#include <net/net_namespace.h>
-
-#include <asm/io.h>
-
-#include "olympic.h"
-
-/* I've got to put some intelligence into the version number so that Peter and I know
- * which version of the code somebody has got. 
- * Version Number = a.b.c.d  where a.b.c is the level of code and d is the latest author.
- * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
- * 
- * Official releases will only have an a.b.c version number format. 
- */
-
-static char version[] =
-"Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ; 
-
-static char *open_maj_error[]  = {"No error", "Lobe Media Test", "Physical Insertion",
-				   "Address Verification", "Neighbor Notification (Ring Poll)",
-				   "Request Parameters","FDX Registration Request",
-				   "FDX Duplicate Address Check", "Station registration Query Wait",
-				   "Unknown stage"};
-
-static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
-				   "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
-				   "Duplicate Node Address","Request Parameters","Remove Received",
-				   "Reserved", "Reserved", "No Monitor Detected for RPL", 
-				   "Monitor Contention failer for RPL", "FDX Protocol Error"};
-
-/* Module parameters */
-
-MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; 
-MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ; 
-
-/* Ring Speed 0,4,16,100 
- * 0 = Autosense         
- * 4,16 = Selected speed only, no autosense
- * This allows the card to be the first on the ring
- * and become the active monitor.
- * 100 = Nothing at present, 100mbps is autodetected
- * if FDX is turned on. May be implemented in the future to 
- * fail if 100mpbs is not detected.
- *
- * WARNING: Some hubs will allow you to insert
- * at the wrong speed
- */
-
-static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
-module_param_array(ringspeed, int, NULL, 0);
-
-/* Packet buffer size */
-
-static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
-module_param_array(pkt_buf_sz, int, NULL, 0) ;
-
-/* Message Level */
-
-static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ; 
-module_param_array(message_level, int, NULL, 0) ;
-
-/* Change network_monitor to receive mac frames through the arb channel.
- * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
- * device, i.e. tr0, tr1 etc. 
- * Intended to be used to create a ring-error reporting network module 
- * i.e. it will give you the source address of beaconers on the ring 
- */
-static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
-module_param_array(network_monitor, int, NULL, 0);
-
-static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
-	{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
-	{ } 	/* Terminating Entry */
-};
-MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ; 
-
-
-static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 
-static int olympic_init(struct net_device *dev);
-static int olympic_open(struct net_device *dev);
-static netdev_tx_t olympic_xmit(struct sk_buff *skb,
-				      struct net_device *dev);
-static int olympic_close(struct net_device *dev);
-static void olympic_set_rx_mode(struct net_device *dev);
-static void olympic_freemem(struct net_device *dev) ;  
-static irqreturn_t olympic_interrupt(int irq, void *dev_id);
-static int olympic_set_mac_address(struct net_device *dev, void *addr) ; 
-static void olympic_arb_cmd(struct net_device *dev);
-static int olympic_change_mtu(struct net_device *dev, int mtu);
-static void olympic_srb_bh(struct net_device *dev) ; 
-static void olympic_asb_bh(struct net_device *dev) ; 
-static const struct file_operations olympic_proc_ops;
-
-static const struct net_device_ops olympic_netdev_ops = {
-	.ndo_open		= olympic_open,
-	.ndo_stop		= olympic_close,
-	.ndo_start_xmit		= olympic_xmit,
-	.ndo_change_mtu		= olympic_change_mtu,
-	.ndo_set_rx_mode	= olympic_set_rx_mode,
-	.ndo_set_mac_address	= olympic_set_mac_address,
-};
-
-static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	struct net_device *dev ; 
-	struct olympic_private *olympic_priv;
-	static int card_no = -1 ;
-	int i ; 
-
-	card_no++ ; 
-
-	if ((i = pci_enable_device(pdev))) {
-		return i ; 
-	}
-
-	pci_set_master(pdev);
-
-	if ((i = pci_request_regions(pdev,"olympic"))) { 
-		goto op_disable_dev;
-	}
- 
-	dev = alloc_trdev(sizeof(struct olympic_private)) ; 
-	if (!dev) {
-		i = -ENOMEM; 
-		goto op_release_dev;
-	}
-
-	olympic_priv = netdev_priv(dev) ;
-	
-	spin_lock_init(&olympic_priv->olympic_lock) ; 
-
-	init_waitqueue_head(&olympic_priv->srb_wait);
-	init_waitqueue_head(&olympic_priv->trb_wait);
-#if OLYMPIC_DEBUG  
-	printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
-#endif
-	dev->irq=pdev->irq;
-	dev->base_addr=pci_resource_start(pdev, 0);
-	olympic_priv->olympic_card_name = pci_name(pdev);
-	olympic_priv->pdev = pdev; 
-	olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
-	olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
-	if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
-		goto op_free_iomap;
-	}
-				
-	if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
-		olympic_priv->pkt_buf_sz = PKT_BUF_SZ ; 
-	else
-		olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ; 
-
-	dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ; 
-	olympic_priv->olympic_ring_speed = ringspeed[card_no] ; 
-	olympic_priv->olympic_message_level = message_level[card_no] ; 
-	olympic_priv->olympic_network_monitor = network_monitor[card_no];
-	
-	if ((i = olympic_init(dev))) {
-		goto op_free_iomap;
-	}				
-
-	dev->netdev_ops = &olympic_netdev_ops;
-	SET_NETDEV_DEV(dev, &pdev->dev);
-
-	pci_set_drvdata(pdev,dev) ; 
-	register_netdev(dev) ; 
-	printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
-	if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */ 
-		char proc_name[20] ; 
-		strcpy(proc_name,"olympic_") ;
-		strcat(proc_name,dev->name) ; 
-		proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev);
-		printk("Olympic: Network Monitor information: /proc/%s\n",proc_name); 
-	}
-	return  0 ;
-
-op_free_iomap:
-	if (olympic_priv->olympic_mmio)
-		iounmap(olympic_priv->olympic_mmio); 
-	if (olympic_priv->olympic_lap)
-		iounmap(olympic_priv->olympic_lap);
-
-	free_netdev(dev);
-op_release_dev:
-	pci_release_regions(pdev); 
-
-op_disable_dev:
-	pci_disable_device(pdev);
-	return i;
-}
-
-static int olympic_init(struct net_device *dev)
-{
-    	struct olympic_private *olympic_priv;
-	u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
-	unsigned long t; 
-	unsigned int uaa_addr;
-
-	olympic_priv=netdev_priv(dev);
-	olympic_mmio=olympic_priv->olympic_mmio;
-
-	printk("%s\n", version);
-	printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
-
-	writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
-	t=jiffies;
-	while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
-		schedule();		
-		if(time_after(jiffies, t + 40*HZ)) {
-			printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
-			return -ENODEV;
-		}
-	}
-
-
-	/* Needed for cardbus */
-	if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
-		writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
-	}
-	
-#if OLYMPIC_DEBUG
-	printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
-	printk("GPR: %x\n",readw(olympic_mmio+GPR));
-	printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
-#endif
-	/* Aaaahhh, You have got to be real careful setting GPR, the card
-	   holds the previous values from flash memory, including autosense 
-           and ring speed */
-
-	writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
-	
-	if (olympic_priv->olympic_ring_speed  == 0) { /* Autosense */
-		writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
-		if (olympic_priv->olympic_message_level) 
-			printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
-	} else if (olympic_priv->olympic_ring_speed == 16) {
-		if (olympic_priv->olympic_message_level) 
-			printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
-		writew(GPR_16MBPS, olympic_mmio+GPR);
-	} else if (olympic_priv->olympic_ring_speed == 4) {
-		if (olympic_priv->olympic_message_level) 
-			printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ; 
-		writew(0, olympic_mmio+GPR);
-	} 
-	
-	writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
-
-#if OLYMPIC_DEBUG
-	printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ; 
-#endif
-	/* Solo has been paused to meet the Cardbus power
-	 * specs if the adapter is cardbus. Check to 
-	 * see its been paused and then restart solo. The
-	 * adapter should set the pause bit within 1 second.
-	 */
-
-	if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) { 
-		t=jiffies;
-		while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
-			schedule() ; 
-			if(time_after(jiffies, t + 2*HZ)) {
-				printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ; 
-				return -ENODEV;
-			}
-		}
-		writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ; 
-	}
-	
-	/* start solo init */
-	writel((1<<15),olympic_mmio+SISR_MASK_SUM);
-
-	t=jiffies;
-	while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
-		schedule();		
-		if(time_after(jiffies, t + 15*HZ)) {
-			printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
-			return -ENODEV;
-		}
-	}
-	
-	writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
-
-#if OLYMPIC_DEBUG
-	printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
-#endif
-
-	init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
-
-#if OLYMPIC_DEBUG		
-{
-	int i;
-	printk("init_srb(%p): ",init_srb);
-	for(i=0;i<20;i++)
-		printk("%x ",readb(init_srb+i));
-	printk("\n");
-}
-#endif	
-	if(readw(init_srb+6)) {
-		printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
-		return -ENODEV;
-	}
-
-	if (olympic_priv->olympic_message_level) {
-		if ( readb(init_srb +2) & 0x40) { 
-			printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
-		} else { 
-			printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
-		}
-	}
-  
-	uaa_addr=swab16(readw(init_srb+8));
-
-#if OLYMPIC_DEBUG
-	printk("UAA resides at %x\n",uaa_addr);
-#endif
-
-	writel(uaa_addr,olympic_mmio+LAPA);
-	adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
-
-	memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
-
-#if OLYMPIC_DEBUG
-	printk("adapter address: %pM\n", dev->dev_addr);
-#endif
-
-	olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12)); 
-	olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14)); 
-
-	return 0;
-
-}
-
-static int olympic_open(struct net_device *dev)
-{
-	struct olympic_private *olympic_priv=netdev_priv(dev);
-	u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
-	unsigned long flags, t;
-	int i, open_finished = 1 ;
-	u8 resp, err;
-
-	DECLARE_WAITQUEUE(wait,current) ; 
-
-	olympic_init(dev);
-
-	if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
-			dev))
-		return -EAGAIN;
-
-#if OLYMPIC_DEBUG
-	printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
-	printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
-#endif
-
-	writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
-
-	writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
-
-	writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
-
-	/* adapter is closed, so SRB is pointed to by LAPWWO */
-
-	writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
-	init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
-	
-#if OLYMPIC_DEBUG
-	printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
-	printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
-	printk("Before the open command\n");
-#endif	
-	do {
-		memset_io(init_srb,0,SRB_COMMAND_SIZE);
-
-		writeb(SRB_OPEN_ADAPTER,init_srb) ; 	/* open */
-		writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
-
-		/* If Network Monitor, instruct card to copy MAC frames through the ARB */
-		if (olympic_priv->olympic_network_monitor) 
-			writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
-		else
-			writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
-	
-		/* Test OR of first 3 bytes as its totally possible for 
-		 * someone to set the first 2 bytes to be zero, although this 
-		 * is an error, the first byte must have bit 6 set to 1  */
-
-		if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
-			writeb(olympic_priv->olympic_laa[0],init_srb+12);
-			writeb(olympic_priv->olympic_laa[1],init_srb+13);
-			writeb(olympic_priv->olympic_laa[2],init_srb+14);
-			writeb(olympic_priv->olympic_laa[3],init_srb+15);
-			writeb(olympic_priv->olympic_laa[4],init_srb+16);
-			writeb(olympic_priv->olympic_laa[5],init_srb+17);
-			memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;  
-		} 	
-		writeb(1,init_srb+30);
-
-		spin_lock_irqsave(&olympic_priv->olympic_lock,flags);	
-		olympic_priv->srb_queued=1;
-
-		writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
-		spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
-
-		t = jiffies ; 
-	
-		add_wait_queue(&olympic_priv->srb_wait,&wait) ;
-		set_current_state(TASK_INTERRUPTIBLE) ; 
- 
- 		while(olympic_priv->srb_queued) {        
-			schedule() ; 
-        		if(signal_pending(current))	{            
-				printk(KERN_WARNING "%s: Signal received in open.\n",
-                			dev->name);
-            			printk(KERN_WARNING "SISR=%x LISR=%x\n",
-                			readl(olympic_mmio+SISR),
-                			readl(olympic_mmio+LISR));
-            			olympic_priv->srb_queued=0;
-            			break;
-        		}
-			if (time_after(jiffies, t + 10*HZ)) {
-				printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
-				olympic_priv->srb_queued=0;
-				break ; 
-			} 
-			set_current_state(TASK_INTERRUPTIBLE) ; 
-    		}
-		remove_wait_queue(&olympic_priv->srb_wait,&wait) ; 
-		set_current_state(TASK_RUNNING) ; 
-		olympic_priv->srb_queued = 0 ; 
-#if OLYMPIC_DEBUG
-		printk("init_srb(%p): ",init_srb);
-		for(i=0;i<20;i++)
-			printk("%02x ",readb(init_srb+i));
-		printk("\n");
-#endif
-		
-		/* If we get the same return response as we set, the interrupt wasn't raised and the open
-                 * timed out.
-		 */
-
-		switch (resp = readb(init_srb+2)) {
-		case OLYMPIC_CLEAR_RET_CODE:
-			printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ; 
-			goto out;
-		case 0:
-			open_finished = 1;
-			break;
-		case 0x07:
-			if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
-				printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
-				open_finished = 0 ;  
-				continue;
-			}
-
-			err = readb(init_srb+7);
-
-			if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) { 
-				printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
-				printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
-			} else {
-				printk(KERN_WARNING "%s: %s - %s\n", dev->name,
-					open_maj_error[(err & 0xf0) >> 4],
-					open_min_error[(err & 0x0f)]);
-			}
-			goto out;
-
-		case 0x32:
-			printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
-			       dev->name, olympic_priv->olympic_laa);
-			goto out;
-
-		default:
-			printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
-			goto out;
-
-		}
-	} while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */	
-
-	if (readb(init_srb+18) & (1<<3)) 
-		if (olympic_priv->olympic_message_level) 
-			printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
-
-	if (readb(init_srb+18) & (1<<1))
-		olympic_priv->olympic_ring_speed = 100 ; 
-	else if (readb(init_srb+18) & 1)
-		olympic_priv->olympic_ring_speed = 16 ; 
-	else
-		olympic_priv->olympic_ring_speed = 4 ; 
-
-	if (olympic_priv->olympic_message_level) 
-		printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
-
-	olympic_priv->asb = swab16(readw(init_srb+8));
-	olympic_priv->srb = swab16(readw(init_srb+10));
-	olympic_priv->arb = swab16(readw(init_srb+12));
-	olympic_priv->trb = swab16(readw(init_srb+16));
-
-	olympic_priv->olympic_receive_options = 0x01 ; 
-	olympic_priv->olympic_copy_all_options = 0 ; 
-	
-	/* setup rx ring */
-	
-	writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */ 
-
-	writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
-
-	for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
-
-		struct sk_buff *skb;
-		
-		skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
-		if(skb == NULL)
-			break;
-
-		skb->dev = dev;
-
-		olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev, 
-							  skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ; 
-		olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz); 
-		olympic_priv->rx_ring_skb[i]=skb;
-	}
-
-	if (i==0) {
-		printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
-		goto out;
-	}
-
-	olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring, 
-					 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
-	writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
-	writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
-	writew(i, olympic_mmio+RXDESCQCNT);
-		
-	olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring, 
-						sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
-	writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
-	writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
-	
- 	olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1;	/* last processed rx status */
-	olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;  
-
-	writew(i, olympic_mmio+RXSTATQCNT);
-
-#if OLYMPIC_DEBUG 
-	printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
-	printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
-	printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
-	printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
-	printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7])  );
-
-	printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
-	printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
-		olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ; 
-#endif
-
-	writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
-
-#if OLYMPIC_DEBUG 
-	printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
-	printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
-	printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
-#endif 
-
-	writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
-
-	/* setup tx ring */
-
-	writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
-	for(i=0;i<OLYMPIC_TX_RING_SIZE;i++) 
-		olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
-
-	olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
-	olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
-					 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ; 
-	writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
-	writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
-	writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
-	
-	olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
-						sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
-	writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
-	writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
-	writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
-		
-	olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
-	olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
-
-	writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
-	writel(0,olympic_mmio+EISR) ; 
-	writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
-	writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
-
-#if OLYMPIC_DEBUG 
-	printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
-	printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
-#endif
-
-	if (olympic_priv->olympic_network_monitor) { 
-		u8 __iomem *oat;
-		u8 __iomem *opt;
-		u8 addr[6];
-		oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
-		opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
-
-		for (i = 0; i < 6; i++)
-			addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
-		printk("%s: Node Address: %pM\n", dev->name, addr);
-		printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name, 
-			readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), 
-			readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
-			readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
-			readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
-
-		for (i = 0; i < 6; i++)
-			addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
-		printk("%s: NAUN Address: %pM\n", dev->name, addr);
-	}
-	
-	netif_start_queue(dev);
-	return 0;
-
-out:
-	free_irq(dev->irq, dev);
-	return -EIO;
-}	
-
-/*
- *	When we enter the rx routine we do not know how many frames have been 
- *	queued on the rx channel.  Therefore we start at the next rx status
- *	position and travel around the receive ring until we have completed
- *	all the frames.
- *
- *	This means that we may process the frame before we receive the end
- *	of frame interrupt. This is why we always test the status instead
- *	of blindly processing the next frame.
- *
- *	We also remove the last 4 bytes from the packet as well, these are
- *	just token ring trailer info and upset protocols that don't check 
- *	their own length, i.e. SNA. 
- *	
- */
-static void olympic_rx(struct net_device *dev)
-{
-	struct olympic_private *olympic_priv=netdev_priv(dev);
-	u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
-	struct olympic_rx_status *rx_status;
-	struct olympic_rx_desc *rx_desc ; 
-	int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
-	struct sk_buff *skb, *skb2;
-	int i;
-
-	rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ; 
- 
-	while (rx_status->status_buffercnt) { 
-                u32 l_status_buffercnt;
-
-		olympic_priv->rx_status_last_received++ ;
-		olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
-#if OLYMPIC_DEBUG
-		printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
-#endif
-		length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
-		buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff; 
-		i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */ 
-		frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16; 
-
-#if OLYMPIC_DEBUG 
-		printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
-#endif
-                l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
-		if(l_status_buffercnt & 0xC0000000) {
-			if (l_status_buffercnt & 0x3B000000) {
-				if (olympic_priv->olympic_message_level) {
-					if (l_status_buffercnt & (1<<29))  /* Rx Frame Truncated */
-						printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
-					if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
-						printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
-					if (l_status_buffercnt & (1<<27)) /* No receive buffers */
-						printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
-					if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
-						printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
-					if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
-						printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
-				} 
-				olympic_priv->rx_ring_last_received += i ; 
-				olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 
-				dev->stats.rx_errors++;
-			} else {	
-			
-				if (buffer_cnt == 1) {
-					skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ; 
-				} else {
-					skb = dev_alloc_skb(length) ; 
-				}
-
-				if (skb == NULL) {
-					printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
-					dev->stats.rx_dropped++;
-					/* Update counters even though we don't transfer the frame */
-					olympic_priv->rx_ring_last_received += i ; 
-					olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;  
-				} else  {
-					/* Optimise based upon number of buffers used. 
-			   	   	   If only one buffer is used we can simply swap the buffers around.
-			   	   	   If more than one then we must use the new buffer and copy the information
-			   	   	   first. Ideally all frames would be in a single buffer, this can be tuned by
-                               	   	   altering the buffer size. If the length of the packet is less than
-					   1500 bytes we're going to copy it over anyway to stop packets getting
-					   dropped from sockets with buffers smaller than our pkt_buf_sz. */
-				
- 					if (buffer_cnt==1) {
-						olympic_priv->rx_ring_last_received++ ; 
-						olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
-						rx_ring_last_received = olympic_priv->rx_ring_last_received ;
-						if (length > 1500) { 
-							skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ; 
-							/* unmap buffer */
-							pci_unmap_single(olympic_priv->pdev,
-								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer), 
-								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 
-							skb_put(skb2,length-4);
-							skb2->protocol = tr_type_trans(skb2,dev);
-							olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer = 
-								cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, 
-								olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
-							olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length = 
-								cpu_to_le32(olympic_priv->pkt_buf_sz); 
-							olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ; 
-							netif_rx(skb2) ; 
-						} else { 
-							pci_dma_sync_single_for_cpu(olympic_priv->pdev,
-								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
-								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 
-							skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
-								      skb_put(skb,length - 4),
-								      length - 4);
-							pci_dma_sync_single_for_device(olympic_priv->pdev,
-								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
-								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
-							skb->protocol = tr_type_trans(skb,dev) ; 
-							netif_rx(skb) ; 
-						} 
-					} else {
-						do { /* Walk the buffers */ 
-							olympic_priv->rx_ring_last_received++ ; 
-							olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
-							rx_ring_last_received = olympic_priv->rx_ring_last_received ; 
-							pci_dma_sync_single_for_cpu(olympic_priv->pdev,
-								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
-								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 
-							rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
-							cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length)); 
-							skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
-								      skb_put(skb, cpy_length),
-								      cpy_length);
-							pci_dma_sync_single_for_device(olympic_priv->pdev,
-								le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
-								olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
-						} while (--i) ; 
-						skb_trim(skb,skb->len-4) ; 
-						skb->protocol = tr_type_trans(skb,dev);
-						netif_rx(skb) ; 
-					} 
-					dev->stats.rx_packets++ ;
-					dev->stats.rx_bytes += length ;
-				} /* if skb == null */
-			} /* If status & 0x3b */
-
-		} else { /*if buffercnt & 0xC */
-			olympic_priv->rx_ring_last_received += i ; 
-			olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ; 
-		} 
-
-		rx_status->fragmentcnt_framelen = 0 ; 
-		rx_status->status_buffercnt = 0 ; 
-		rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
-
-		writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) |  buffer_cnt , olympic_mmio+RXENQ); 
-	} /* while */
-
-}
-
-static void olympic_freemem(struct net_device *dev) 
-{ 
-	struct olympic_private *olympic_priv=netdev_priv(dev);
-	int i;
-			
-	for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
-		if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
-			dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
-			olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
-		}
-		if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
-			pci_unmap_single(olympic_priv->pdev, 
-			le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
-			olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
-		}
-		olympic_priv->rx_status_last_received++;
-		olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
-	}
-	/* unmap rings */
-	pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr, 
-		sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
-	pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
-		sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
-
-	pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr, 
-		sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
-	pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr, 
-		sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
-
-	return ; 
-}
- 
-static irqreturn_t olympic_interrupt(int irq, void *dev_id) 
-{
-	struct net_device *dev= (struct net_device *)dev_id;
-	struct olympic_private *olympic_priv=netdev_priv(dev);
-	u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
-	u32 sisr;
-	u8 __iomem *adapter_check_area ; 
-	
-	/* 
-	 *  Read sisr but don't reset it yet. 
-	 *  The indication bit may have been set but the interrupt latch
-	 *  bit may not be set, so we'd lose the interrupt later. 
-	 */ 
-	sisr=readl(olympic_mmio+SISR) ; 
-	if (!(sisr & SISR_MI)) /* Interrupt isn't for us */ 
-		return IRQ_NONE;
-	sisr=readl(olympic_mmio+SISR_RR) ;  /* Read & Reset sisr */ 
-
-	spin_lock(&olympic_priv->olympic_lock);
-
-	/* Hotswap gives us this on removal */
-	if (sisr == 0xffffffff) { 
-		printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ; 
-		spin_unlock(&olympic_priv->olympic_lock) ; 
-		return IRQ_NONE;
-	} 
-		
-	if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |  
-			SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {  
-	
-		/* If we ever get this the adapter is seriously dead. Only a reset is going to 
-		 * bring it back to life. We're talking pci bus errors and such like :( */ 
-		if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
-			printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ; 
-			printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ; 
-			printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ; 
-			printk(KERN_ERR "or the linux-tr mailing list.\n") ; 
-			wake_up_interruptible(&olympic_priv->srb_wait);
-			spin_unlock(&olympic_priv->olympic_lock) ; 
-			return IRQ_HANDLED;
-		} /* SISR_ERR */
-
-		if(sisr & SISR_SRB_REPLY) {
-			if(olympic_priv->srb_queued==1) {
-				wake_up_interruptible(&olympic_priv->srb_wait);
-			} else if (olympic_priv->srb_queued==2) { 
-				olympic_srb_bh(dev) ; 
-			}
-			olympic_priv->srb_queued=0;
-		} /* SISR_SRB_REPLY */
-
-		/* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
-		   we get all tx completions. */
-		if (sisr & SISR_TX1_EOF) {
-			while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) { 
-				olympic_priv->tx_ring_last_status++;
-				olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
-				olympic_priv->free_tx_ring_entries++;
-				dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
-				dev->stats.tx_packets++ ;
-				pci_unmap_single(olympic_priv->pdev, 
-					le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer), 
-					olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
-				dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
-				olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
-				olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
-			}
-			netif_wake_queue(dev);
-		} /* SISR_TX1_EOF */
-	
-		if (sisr & SISR_RX_STATUS) {
-			olympic_rx(dev);
-		} /* SISR_RX_STATUS */
-	
-		if (sisr & SISR_ADAPTER_CHECK) {
-			netif_stop_queue(dev);
-			printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
-			writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
-			adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
-			printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ; 
-			spin_unlock(&olympic_priv->olympic_lock) ; 
-			return IRQ_HANDLED; 
-		} /* SISR_ADAPTER_CHECK */
-	
-		if (sisr & SISR_ASB_FREE) {
-			/* Wake up anything that is waiting for the asb response */  
-			if (olympic_priv->asb_queued) {
-				olympic_asb_bh(dev) ; 
-			}
-		} /* SISR_ASB_FREE */
-	
-		if (sisr & SISR_ARB_CMD) {
-			olympic_arb_cmd(dev) ; 
-		} /* SISR_ARB_CMD */
-	
-		if (sisr & SISR_TRB_REPLY) {
-			/* Wake up anything that is waiting for the trb response */
-			if (olympic_priv->trb_queued) {
-				wake_up_interruptible(&olympic_priv->trb_wait);
-			}
-			olympic_priv->trb_queued = 0 ; 
-		} /* SISR_TRB_REPLY */	
-	
-		if (sisr & SISR_RX_NOBUF) {
-			/* According to the documentation, we don't have to do anything, but trapping it keeps it out of
-                  	   	   /var/log/messages.  */
-		} /* SISR_RX_NOBUF */
-	} else { 
-		printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
-		printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
-	} /* One if the interrupts we want */
-	writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
-	
-	spin_unlock(&olympic_priv->olympic_lock) ; 
-	return IRQ_HANDLED;
-}	
-
-static netdev_tx_t olympic_xmit(struct sk_buff *skb,
-				      struct net_device *dev)
-{
-	struct olympic_private *olympic_priv=netdev_priv(dev);
-	u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
-	unsigned long flags ; 
-
-	spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
-
-	netif_stop_queue(dev);
-	
-	if(olympic_priv->free_tx_ring_entries) {
-		olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer = 
-			cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
-		olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
-		olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
-		olympic_priv->free_tx_ring_entries--;
-
-        	olympic_priv->tx_ring_free++;
-        	olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
-		writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
-		netif_wake_queue(dev);
-		spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
-		return NETDEV_TX_OK;
-	} else {
-		spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
-		return NETDEV_TX_BUSY;
-	} 
-
-}
-	
-
-static int olympic_close(struct net_device *dev) 
-{
-	struct olympic_private *olympic_priv=netdev_priv(dev);
-	u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
-	unsigned long t,flags;
-
-	DECLARE_WAITQUEUE(wait,current) ; 
-
-	netif_stop_queue(dev);
-	
-	writel(olympic_priv->srb,olympic_mmio+LAPA);
-	srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
-	
-    	writeb(SRB_CLOSE_ADAPTER,srb+0);
-	writeb(0,srb+1);
-	writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
-
-	add_wait_queue(&olympic_priv->srb_wait,&wait) ;
-	set_current_state(TASK_INTERRUPTIBLE) ; 
-
-	spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
-	olympic_priv->srb_queued=1;
-
-	writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
-	spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
-
-	while(olympic_priv->srb_queued) {
-
-		t = schedule_timeout_interruptible(60*HZ);
-
-        	if(signal_pending(current))	{            
-			printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
-            		printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
-            		olympic_priv->srb_queued=0;
-            		break;
-        	}
-
-		if (t == 0) { 
-			printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
-		} 
-		olympic_priv->srb_queued=0;
-    	}
-	remove_wait_queue(&olympic_priv->srb_wait,&wait) ; 
-
-	olympic_priv->rx_status_last_received++;
-	olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
-
-	olympic_freemem(dev) ; 	
-
-	/* reset tx/rx fifo's and busmaster logic */
-
-	writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
-	udelay(1);
-	writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
-
-#if OLYMPIC_DEBUG
-	{
-	int i ; 
-	printk("srb(%p): ",srb);
-	for(i=0;i<4;i++)
-		printk("%x ",readb(srb+i));
-	printk("\n");
-	}
-#endif
-	free_irq(dev->irq,dev);
-
-	return 0;
-	
-}
-
-static void olympic_set_rx_mode(struct net_device *dev) 
-{
-	struct olympic_private *olympic_priv = netdev_priv(dev);
-   	u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; 
-	u8 options = 0; 
-	u8 __iomem *srb;
-	struct netdev_hw_addr *ha;
-	unsigned char dev_mc_address[4] ; 
-
-	writel(olympic_priv->srb,olympic_mmio+LAPA);
-	srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
-	options = olympic_priv->olympic_copy_all_options; 
-
-	if (dev->flags&IFF_PROMISC)  
-		options |= 0x61 ;
-	else
-		options &= ~0x61 ; 
-
-	/* Only issue the srb if there is a change in options */
-
-	if ((options ^ olympic_priv->olympic_copy_all_options)) { 
-	
-		/* Now to issue the srb command to alter the copy.all.options */
-	
-		writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
-		writeb(0,srb+1);
-		writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
-		writeb(0,srb+3);
-		writeb(olympic_priv->olympic_receive_options,srb+4);
-		writeb(options,srb+5);
-
-		olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
-
-		writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
-
-		olympic_priv->olympic_copy_all_options = options ;
-		
-		return ;  
-	} 
-
-	/* Set the functional addresses we need for multicast */
-
-	dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 
-
-	netdev_for_each_mc_addr(ha, dev) {
-		dev_mc_address[0] |= ha->addr[2];
-		dev_mc_address[1] |= ha->addr[3];
-		dev_mc_address[2] |= ha->addr[4];
-		dev_mc_address[3] |= ha->addr[5];
-	}
-
-	writeb(SRB_SET_FUNC_ADDRESS,srb+0);
-	writeb(0,srb+1);
-	writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
-	writeb(0,srb+3);
-	writeb(0,srb+4);
-	writeb(0,srb+5);
-	writeb(dev_mc_address[0],srb+6);
-	writeb(dev_mc_address[1],srb+7);
-	writeb(dev_mc_address[2],srb+8);
-	writeb(dev_mc_address[3],srb+9);
-
-	olympic_priv->srb_queued = 2 ;
-	writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
-
-}
-
-static void olympic_srb_bh(struct net_device *dev) 
-{ 
-	struct olympic_private *olympic_priv = netdev_priv(dev);
-   	u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; 
-	u8 __iomem *srb;
-
-	writel(olympic_priv->srb,olympic_mmio+LAPA);
-	srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
-
-	switch (readb(srb)) { 
-
-		/* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous) 
-                 * At some point we should do something if we get an error, such as
-                 * resetting the IFF_PROMISC flag in dev
-		 */
-
-		case SRB_MODIFY_RECEIVE_OPTIONS:
-			switch (readb(srb+2)) { 
-				case 0x01:
-					printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ; 
-					break ; 
-				case 0x04:
-					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
-					break ; 
-				default:
-					if (olympic_priv->olympic_message_level) 
-						printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ; 
-					break ; 	
-			} /* switch srb[2] */ 
-			break ;
-		
-		/* SRB_SET_GROUP_ADDRESS - Multicast group setting 
-                 */
-
-		case SRB_SET_GROUP_ADDRESS:
-			switch (readb(srb+2)) { 
-				case 0x00:
-					break ; 
-				case 0x01:
-					printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
-					break ;
-				case 0x04:
-					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name); 
-					break ;
-				case 0x3c:
-					printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ; 
-					break ;
-				case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
-					printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ; 
-					break ;  
-				case 0x55:
-					printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ; 
-					break ;
-				default:
-					break ; 
-			} /* switch srb[2] */ 
-			break ; 
-
-		/* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
- 		 */
-
-		case SRB_RESET_GROUP_ADDRESS:
-			switch (readb(srb+2)) { 
-				case 0x00:
-					break ; 
-				case 0x01:
-					printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
-					break ; 
-				case 0x04:
-					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 
-					break ; 
-				case 0x39: /* Must deal with this if individual multicast addresses used */
-					printk(KERN_INFO "%s: Group address not found\n",dev->name);
-					break ;
-				default:
-					break ; 
-			} /* switch srb[2] */
-			break ; 
-
-		
-		/* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode 
-		 */
-
-		case SRB_SET_FUNC_ADDRESS:
-			switch (readb(srb+2)) { 
-				case 0x00:
-					if (olympic_priv->olympic_message_level)
-						printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
-					break ;
-				case 0x01:
-					printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
-					break ; 
-				case 0x04:
-					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 
-					break ; 
-				default:
-					break ; 
-			} /* switch srb[2] */
-			break ; 
-	
-		/* SRB_READ_LOG - Read and reset the adapter error counters
- 		 */
-
-		case SRB_READ_LOG:
-			switch (readb(srb+2)) { 
-				case 0x00: 
-					if (olympic_priv->olympic_message_level) 
-						printk(KERN_INFO "%s: Read Log issued\n",dev->name) ; 
-					break ; 
-				case 0x01:
-					printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
-					break ; 
-				case 0x04:
-					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 
-					break ; 
-			
-			} /* switch srb[2] */
-			break ; 
-		
-		/* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
-
-		case SRB_READ_SR_COUNTERS:
-			switch (readb(srb+2)) { 
-				case 0x00: 
-					if (olympic_priv->olympic_message_level) 
-						printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ; 
-					break ; 
-				case 0x01:
-					printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
-					break ; 
-				case 0x04:
-					printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 
-					break ; 
-				default:
-					break ; 
-			} /* switch srb[2] */
-			break ;
- 
-		default:
-			printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
-			break ; 
-	} /* switch srb[0] */
-
-} 
-
-static int olympic_set_mac_address (struct net_device *dev, void *addr) 
-{
-	struct sockaddr *saddr = addr ; 
-	struct olympic_private *olympic_priv = netdev_priv(dev);
-
-	if (netif_running(dev)) { 
-		printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; 
-		return -EIO ; 
-	}
-
-	memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ; 
-	
-	if (olympic_priv->olympic_message_level) { 
- 		printk(KERN_INFO "%s: MAC/LAA Set to  = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
-		olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
-		olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
-		olympic_priv->olympic_laa[5]);
-	} 
-
-	return 0 ; 
-}
-
-static void olympic_arb_cmd(struct net_device *dev)
-{
-	struct olympic_private *olympic_priv = netdev_priv(dev);
-	u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
-	u8 __iomem *arb_block, *asb_block, *srb  ; 
-	u8 header_len ; 
-	u16 frame_len, buffer_len ;
-	struct sk_buff *mac_frame ;  
-	u8 __iomem *buf_ptr ;
-	u8 __iomem *frame_data ;  
-	u16 buff_off ; 
-	u16 lan_status = 0, lan_status_diff  ; /* Initialize to stop compiler warning */
-	u8 fdx_prot_error ; 
-	u16 next_ptr;
-
-	arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ; 
-	asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ; 
-	srb = (olympic_priv->olympic_lap + olympic_priv->srb) ; 
-	
-	if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
-
-		header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */	
-		frame_len = swab16(readw(arb_block + 10)) ; 
-
-		buff_off = swab16(readw(arb_block + 6)) ;
-		
-		buf_ptr = olympic_priv->olympic_lap + buff_off ; 
-
-#if OLYMPIC_DEBUG
-{
-		int i;
-		frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ; 
-
-		for (i=0 ;  i < 14 ; i++) { 
-			printk("Loc %d = %02x\n",i,readb(frame_data + i)); 
-		}
-
-		printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
-}
-#endif 
-		mac_frame = dev_alloc_skb(frame_len) ; 
-		if (!mac_frame) {
-			printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
-			goto drop_frame;
-		}
-
-		/* Walk the buffer chain, creating the frame */
-
-		do {
-			frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ; 
-			buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); 
-			memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
-			next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next)); 
-		} while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
-
-		mac_frame->protocol = tr_type_trans(mac_frame, dev);
-
-		if (olympic_priv->olympic_network_monitor) { 
-			struct trh_hdr *mac_hdr;
-			printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
-			mac_hdr = tr_hdr(mac_frame);
-			printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
-			       dev->name, mac_hdr->daddr);
-			printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
-			       dev->name, mac_hdr->saddr);
-		}
-		netif_rx(mac_frame);
-
-drop_frame:
-		/* Now tell the card we have dealt with the received frame */
-
-		/* Set LISR Bit 1 */
-		writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
-
-		/* Is the ASB free ? */ 	
-		
-		if (readb(asb_block + 2) != 0xff) { 
-			olympic_priv->asb_queued = 1 ; 
-			writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM); 
-			return ; 	
-			/* Drop out and wait for the bottom half to be run */
-		}
-		
-		writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
-		writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
-		writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
-		writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */		
-
-		writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
-		
-		olympic_priv->asb_queued = 2 ; 
-	
-		return ; 	
-		
-	} else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
-		lan_status = swab16(readw(arb_block+6));
-		fdx_prot_error = readb(arb_block+8) ; 
-		
-		/* Issue ARB Free */
-		writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
-
-		lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ; 
-
-		if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) { 
-			if (lan_status_diff & LSC_LWF) 
-					printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
-			if (lan_status_diff & LSC_ARW) 
-					printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
-			if (lan_status_diff & LSC_FPE)
-					printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
-			if (lan_status_diff & LSC_RR) 
-					printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
-		
-			/* Adapter has been closed by the hardware */
-		
-			/* reset tx/rx fifo's and busmaster logic */
-
-			writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
-			udelay(1);
-			writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
-			netif_stop_queue(dev);
-			olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ; 
-			printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
-		} /* If serious error */
-		
-		if (olympic_priv->olympic_message_level) { 
-			if (lan_status_diff & LSC_SIG_LOSS) 
-					printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
-			if (lan_status_diff & LSC_HARD_ERR)
-					printk(KERN_INFO "%s: Beaconing\n",dev->name);
-			if (lan_status_diff & LSC_SOFT_ERR)
-					printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
-			if (lan_status_diff & LSC_TRAN_BCN) 
-					printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
-			if (lan_status_diff & LSC_SS) 
-					printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
-			if (lan_status_diff & LSC_RING_REC)
-					printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
-			if (lan_status_diff & LSC_FDX_MODE)
-					printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
-		} 	
-		
-		if (lan_status_diff & LSC_CO) { 
-					
-				if (olympic_priv->olympic_message_level) 
-					printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
-					
-				/* Issue READ.LOG command */
-
-				writeb(SRB_READ_LOG, srb);
-				writeb(0,srb+1);
-				writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
-				writeb(0,srb+3);
-				writeb(0,srb+4);
-				writeb(0,srb+5);
-					
-				olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
-
-				writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
-					
-		}
-
-		if (lan_status_diff & LSC_SR_CO) { 
-
-				if (olympic_priv->olympic_message_level)
-					printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
-
-				/* Issue a READ.SR.COUNTERS */
-				
-				writeb(SRB_READ_SR_COUNTERS,srb);
-				writeb(0,srb+1);
-				writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
-				writeb(0,srb+3);
-				
-				olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
-
-				writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
-
-		}
-
-		olympic_priv->olympic_lan_status = lan_status ; 
-	
-	}  /* Lan.change.status */
-	else
-		printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
-}
-
-static void olympic_asb_bh(struct net_device *dev) 
-{
-	struct olympic_private *olympic_priv = netdev_priv(dev);
-	u8 __iomem *arb_block, *asb_block ; 
-
-	arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ; 
-	asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ; 
-
-	if (olympic_priv->asb_queued == 1) {   /* Dropped through the first time */
-
-		writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
-		writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
-		writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
-		writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */		
-
-		writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
-		olympic_priv->asb_queued = 2 ; 
-
-		return ; 
-	}
-
-	if (olympic_priv->asb_queued == 2) { 
-		switch (readb(asb_block+2)) {
-			case 0x01:
-				printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
-				break ;
-			case 0x26:
-				printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
-				break ;
-			case 0xFF:
-				/* Valid response, everything should be ok again */
-				break ;
-			default:
-				printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
-				break ;
-		}
-	}
-	olympic_priv->asb_queued = 0 ; 
-}
- 
-static int olympic_change_mtu(struct net_device *dev, int mtu) 
-{
-	struct olympic_private *olympic_priv = netdev_priv(dev);
-	u16 max_mtu ; 
-
-	if (olympic_priv->olympic_ring_speed == 4)
-		max_mtu = 4500 ; 
-	else
-		max_mtu = 18000 ; 
-	
-	if (mtu > max_mtu)
-		return -EINVAL ; 
-	if (mtu < 100) 
-		return -EINVAL ; 
-
-	dev->mtu = mtu ; 
-	olympic_priv->pkt_buf_sz = mtu + TR_HLEN ; 
-
-	return 0 ; 
-}
-
-static int olympic_proc_show(struct seq_file *m, void *v)
-{
-	struct net_device *dev = m->private;
-	struct olympic_private *olympic_priv=netdev_priv(dev);
-	u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; 
-	u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; 
-	u8 addr[6];
-	u8 addr2[6];
-	int i;
-
-	seq_printf(m,
-		"IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
-	seq_printf(m, "\n%6s: Adapter Address   : Node Address      : Functional Addr\n",
- 	   dev->name); 
-
-	for (i = 0 ; i < 6 ; i++)
-		addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
-
-	seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
-	   dev->name,
-	   dev->dev_addr, addr,
-	   readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), 
-	   readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
-	   readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
-	   readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
-	 
-	seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name);
-
-	seq_printf(m, "%6s: Physical Addr : Up Node Address   : Poll Address      : AccPri : Auth Src : Att Code :\n",
-	  dev->name) ; 
-
-	for (i = 0 ; i < 6 ; i++)
-		addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
-	for (i = 0 ; i < 6 ; i++)
-		addr2[i] =  readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
-
-	seq_printf(m, "%6s: %02x:%02x:%02x:%02x   : %pM : %pM : %04x   : %04x     :  %04x    :\n",
-	  dev->name,
-	  readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
-	  readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
-	  readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
-	  readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
-	  addr, addr2,
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
-
-	seq_printf(m, "%6s: Source Address    : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
-	  dev->name) ; 
-	
-	for (i = 0 ; i < 6 ; i++)
-		addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
-	seq_printf(m, "%6s: %pM : %04x  : %04x   : %04x   : %04x   : %04x    :     %04x     : \n",
-	  dev->name, addr,
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
-
-	seq_printf(m, "%6s: Beacon Details :  Tx  :  Rx  : NAUN Node Address : NAUN Node Phys : \n",
-	  dev->name) ; 
-
-	for (i = 0 ; i < 6 ; i++)
-		addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
-	seq_printf(m, "%6s:                :  %02x  :  %02x  : %pM : %02x:%02x:%02x:%02x    : \n",
-	  dev->name,
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
-	  swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
-	  addr,
-	  readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
-	  readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
-	  readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
-	  readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
-
-	return 0;
-}
-
-static int olympic_proc_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, olympic_proc_show, PDE(inode)->data);
-}
-
-static const struct file_operations olympic_proc_ops = {
-	.open		= olympic_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static void __devexit olympic_remove_one(struct pci_dev *pdev) 
-{
-	struct net_device *dev = pci_get_drvdata(pdev) ; 
-	struct olympic_private *olympic_priv=netdev_priv(dev);
-
-	if (olympic_priv->olympic_network_monitor) { 
-		char proc_name[20] ; 
-		strcpy(proc_name,"olympic_") ;
-		strcat(proc_name,dev->name) ;
-		remove_proc_entry(proc_name,init_net.proc_net);
-	}
-	unregister_netdev(dev) ; 
-	iounmap(olympic_priv->olympic_mmio) ; 
-	iounmap(olympic_priv->olympic_lap) ; 
-	pci_release_regions(pdev) ;
-	pci_set_drvdata(pdev,NULL) ;  	
-	free_netdev(dev) ; 
-}
-
-static struct pci_driver olympic_driver = { 
-	.name		= "olympic",
-	.id_table	= olympic_pci_tbl,
-	.probe		= olympic_probe,
-	.remove		= __devexit_p(olympic_remove_one),
-};
-
-static int __init olympic_pci_init(void) 
-{
-	return pci_register_driver(&olympic_driver) ;
-}
-
-static void __exit olympic_pci_cleanup(void)
-{
-	pci_unregister_driver(&olympic_driver) ; 
-}	
-
-
-module_init(olympic_pci_init) ; 
-module_exit(olympic_pci_cleanup) ; 
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h
deleted file mode 100644
index 30631ba..0000000
--- a/drivers/net/tokenring/olympic.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- *  olympic.h (c) 1999 Peter De Schrijver All Rights Reserved
- *                1999,2000 Mike Phillips (mikep@linuxtr.net)
- *
- *  Linux driver for IBM PCI tokenring cards based on the olympic and the PIT/PHY chipset.
- *
- *  Base Driver Skeleton:
- *      Written 1993-94 by Donald Becker.
- *
- *      Copyright 1993 United States Government as represented by the
- *      Director, National Security Agency.
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- */
-
-#define CID 0x4e
-
-#define BCTL 0x70
-#define BCTL_SOFTRESET (1<<15)
-#define BCTL_MIMREB (1<<6)
-#define BCTL_MODE_INDICATOR (1<<5)
-
-#define GPR 0x4a
-#define GPR_OPTI_BF (1<<6)
-#define GPR_NEPTUNE_BF (1<<4) 
-#define GPR_AUTOSENSE (1<<2)
-#define GPR_16MBPS (1<<3) 
-
-#define PAG 0x85
-#define LBC 0x8e
-
-#define LISR 0x10
-#define LISR_SUM 0x14
-#define LISR_RWM 0x18
-
-#define LISR_LIE (1<<15)
-#define LISR_SLIM (1<<13)
-#define LISR_SLI (1<<12)
-#define LISR_PCMSRMASK (1<<11)
-#define LISR_PCMSRINT (1<<10)
-#define LISR_WOLMASK (1<<9)
-#define LISR_WOL (1<<8)
-#define LISR_SRB_CMD (1<<5)
-#define LISR_ASB_REPLY (1<<4)
-#define LISR_ASB_FREE_REQ (1<<2)
-#define LISR_ARB_FREE (1<<1)
-#define LISR_TRB_FRAME (1<<0)
-
-#define SISR 0x20
-#define SISR_SUM 0x24
-#define SISR_RWM 0x28
-#define SISR_RR 0x2C
-#define SISR_RESMASK 0x30
-#define SISR_MASK 0x54
-#define SISR_MASK_SUM 0x58
-#define SISR_MASK_RWM 0x5C
-
-#define SISR_TX2_IDLE (1<<31)
-#define SISR_TX2_HALT (1<<29)
-#define SISR_TX2_EOF (1<<28)
-#define SISR_TX1_IDLE (1<<27)
-#define SISR_TX1_HALT (1<<25)
-#define SISR_TX1_EOF (1<<24)
-#define SISR_TIMEOUT (1<<23)
-#define SISR_RX_NOBUF (1<<22)
-#define SISR_RX_STATUS (1<<21)
-#define SISR_RX_HALT (1<<18)
-#define SISR_RX_EOF_EARLY (1<<16)
-#define SISR_MI (1<<15)
-#define SISR_PI (1<<13)
-#define SISR_ERR (1<<9)
-#define SISR_ADAPTER_CHECK (1<<6)
-#define SISR_SRB_REPLY (1<<5)
-#define SISR_ASB_FREE (1<<4)
-#define SISR_ARB_CMD (1<<3)
-#define SISR_TRB_REPLY (1<<2)
-
-#define EISR 0x34
-#define EISR_RWM 0x38
-#define EISR_MASK 0x3c
-#define EISR_MASK_OPTIONS 0x001FFF7F
-
-#define LAPA 0x60
-#define LAPWWO 0x64
-#define LAPWWC 0x68
-#define LAPCTL 0x6C
-#define LAIPD 0x78
-#define LAIPDDINC 0x7C
-
-#define TIMER 0x50
-
-#define CLKCTL 0x74
-#define CLKCTL_PAUSE (1<<15) 
-
-#define PM_CON 0x4
-
-#define BMCTL_SUM 0x40
-#define BMCTL_RWM 0x44
-#define BMCTL_TX2_DIS (1<<30) 
-#define BMCTL_TX1_DIS (1<<26) 
-#define BMCTL_RX_DIS (1<<22) 
-
-#define BMASR 0xcc
-
-#define RXDESCQ 0x90
-#define RXDESCQCNT 0x94
-#define RXCDA 0x98
-#define RXENQ 0x9C
-#define RXSTATQ 0xA0
-#define RXSTATQCNT 0xA4
-#define RXCSA 0xA8
-#define RXCLEN 0xAC
-#define RXHLEN 0xAE
-
-#define TXDESCQ_1 0xb0
-#define TXDESCQ_2 0xd0
-#define TXDESCQCNT_1 0xb4
-#define TXDESCQCNT_2 0xd4
-#define TXCDA_1 0xb8
-#define TXCDA_2 0xd8
-#define TXENQ_1 0xbc
-#define TXENQ_2 0xdc
-#define TXSTATQ_1 0xc0
-#define TXSTATQ_2 0xe0
-#define TXSTATQCNT_1 0xc4
-#define TXSTATQCNT_2 0xe4
-#define TXCSA_1 0xc8
-#define TXCSA_2 0xe8
-/* Cardbus */
-#define FERMASK 0xf4
-#define FERMASK_INT_BIT (1<<15)
-
-#define OLYMPIC_IO_SPACE 256
-
-#define SRB_COMMAND_SIZE 50
-
-#define OLYMPIC_MAX_ADAPTERS 8 /* 0x08 __MODULE_STRING can't hand 0xnn */
-
-/* Defines for LAN STATUS CHANGE reports */
-#define LSC_SIG_LOSS 0x8000
-#define LSC_HARD_ERR 0x4000
-#define LSC_SOFT_ERR 0x2000
-#define LSC_TRAN_BCN 0x1000
-#define LSC_LWF      0x0800
-#define LSC_ARW      0x0400
-#define LSC_FPE      0x0200
-#define LSC_RR       0x0100
-#define LSC_CO       0x0080
-#define LSC_SS       0x0040
-#define LSC_RING_REC 0x0020
-#define LSC_SR_CO    0x0010
-#define LSC_FDX_MODE 0x0004
-
-/* Defines for OPEN ADAPTER command */
-
-#define OPEN_ADAPTER_EXT_WRAP (1<<15)
-#define OPEN_ADAPTER_DIS_HARDEE (1<<14)
-#define OPEN_ADAPTER_DIS_SOFTERR (1<<13)
-#define OPEN_ADAPTER_PASS_ADC_MAC (1<<12)
-#define OPEN_ADAPTER_PASS_ATT_MAC (1<<11)
-#define OPEN_ADAPTER_ENABLE_EC (1<<10)
-#define OPEN_ADAPTER_CONTENDER (1<<8)
-#define OPEN_ADAPTER_PASS_BEACON (1<<7)
-#define OPEN_ADAPTER_ENABLE_FDX (1<<6)
-#define OPEN_ADAPTER_ENABLE_RPL (1<<5)
-#define OPEN_ADAPTER_INHIBIT_ETR (1<<4)
-#define OPEN_ADAPTER_INTERNAL_WRAP (1<<3)
-#define OPEN_ADAPTER_USE_OPTS2 (1<<0)
-
-#define OPEN_ADAPTER_2_ENABLE_ONNOW (1<<15)
-
-/* Defines for SRB Commands */
-
-#define SRB_ACCESS_REGISTER 0x1f
-#define SRB_CLOSE_ADAPTER 0x04
-#define SRB_CONFIGURE_BRIDGE 0x0c
-#define SRB_CONFIGURE_WAKEUP_EVENT 0x1a
-#define SRB_MODIFY_BRIDGE_PARMS 0x15
-#define SRB_MODIFY_OPEN_OPTIONS 0x01
-#define SRB_MODIFY_RECEIVE_OPTIONS 0x17
-#define SRB_NO_OPERATION 0x00
-#define SRB_OPEN_ADAPTER 0x03
-#define SRB_READ_LOG 0x08
-#define SRB_READ_SR_COUNTERS 0x16
-#define SRB_RESET_GROUP_ADDRESS 0x02
-#define SRB_SAVE_CONFIGURATION 0x1b
-#define SRB_SET_BRIDGE_PARMS 0x09
-#define SRB_SET_BRIDGE_TARGETS 0x10
-#define SRB_SET_FUNC_ADDRESS 0x07
-#define SRB_SET_GROUP_ADDRESS 0x06
-#define SRB_SET_GROUP_ADDR_OPTIONS 0x11
-#define SRB_UPDATE_WAKEUP_PATTERN 0x19
-
-/* Clear return code */
-
-#define OLYMPIC_CLEAR_RET_CODE 0xfe 
-
-/* ARB Commands */
-#define ARB_RECEIVE_DATA 0x81
-#define ARB_LAN_CHANGE_STATUS 0x84
-/* ASB Response commands */
-
-#define ASB_RECEIVE_DATA 0x81
-
-
-/* Olympic defaults for buffers */
- 
-#define OLYMPIC_RX_RING_SIZE 16 /* should be a power of 2 */
-#define OLYMPIC_TX_RING_SIZE 8 /* should be a power of 2 */
-
-#define PKT_BUF_SZ 4096 /* Default packet size */
-
-/* Olympic data structures */
-
-/* xxxx These structures are all little endian in hardware. */
-
-struct olympic_tx_desc {
-	__le32 buffer;
-	__le32 status_length;
-};
-
-struct olympic_tx_status {
-	__le32 status;
-};
-
-struct olympic_rx_desc {
-	__le32 buffer;
-	__le32 res_length; 
-};
-
-struct olympic_rx_status {
-	__le32 fragmentcnt_framelen;
-	__le32 status_buffercnt;
-};
-/* xxxx END These structures are all little endian in hardware. */
-/* xxxx There may be more, but I'm pretty sure about these */
-
-struct mac_receive_buffer {
-	__le16 next ; 
-	u8 padding ; 
-	u8 frame_status ;
-	__le16 buffer_length ; 
-	u8 frame_data ; 
-};
-
-struct olympic_private {
-	
-	u16 srb;      /* be16 */
-	u16 trb;      /* be16 */
-	u16 arb;      /* be16 */
-	u16 asb;      /* be16 */
-
-	u8 __iomem *olympic_mmio;
-	u8 __iomem *olympic_lap;
-	struct pci_dev *pdev ; 
-	const char *olympic_card_name;
-
-	spinlock_t olympic_lock ; 
-
-	volatile int srb_queued;    /* True if an SRB is still posted */	
-	wait_queue_head_t srb_wait;
-
-	volatile int asb_queued;    /* True if an ASB is posted */
-
-	volatile int trb_queued;   /* True if a TRB is posted */
-	wait_queue_head_t trb_wait ; 
-
-	/* These must be on a 4 byte boundary. */
-	struct olympic_rx_desc olympic_rx_ring[OLYMPIC_RX_RING_SIZE];
-	struct olympic_tx_desc olympic_tx_ring[OLYMPIC_TX_RING_SIZE];
-	struct olympic_rx_status olympic_rx_status_ring[OLYMPIC_RX_RING_SIZE];	
-	struct olympic_tx_status olympic_tx_status_ring[OLYMPIC_TX_RING_SIZE];	
-
-	struct sk_buff *tx_ring_skb[OLYMPIC_TX_RING_SIZE], *rx_ring_skb[OLYMPIC_RX_RING_SIZE];	
-	int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries;
-
-	u16 olympic_lan_status ;
-	u8 olympic_ring_speed ;
-	u16 pkt_buf_sz ; 
-	u8 olympic_receive_options, olympic_copy_all_options,olympic_message_level, olympic_network_monitor;  
-	u16 olympic_addr_table_addr, olympic_parms_addr ; 
-	u8 olympic_laa[6] ; 
-	u32 rx_ring_dma_addr;
-	u32 rx_status_ring_dma_addr;
-	u32 tx_ring_dma_addr;
-	u32 tx_status_ring_dma_addr;
-};
-
-struct olympic_adapter_addr_table {
-
-	u8 node_addr[6] ; 
-	u8 reserved[4] ; 
-	u8 func_addr[4] ; 
-} ; 
-
-struct olympic_parameters_table { 
-	
-	u8  phys_addr[4] ; 
-	u8  up_node_addr[6] ; 
-	u8  up_phys_addr[4] ; 
-	u8  poll_addr[6] ; 
-	u16 reserved ; 
-	u16 acc_priority ; 
-	u16 auth_source_class ; 
-	u16 att_code ; 
-	u8  source_addr[6] ; 
-	u16 beacon_type ; 
-	u16 major_vector ; 
-	u16 lan_status ; 
-	u16 soft_error_time ; 
- 	u16 reserved1 ; 
-	u16 local_ring ; 
-	u16 mon_error ; 
-	u16 beacon_transmit ; 
-	u16 beacon_receive ; 
-	u16 frame_correl ; 
-	u8  beacon_naun[6] ; 
-	u32 reserved2 ; 
-	u8  beacon_phys[4] ; 	
-}; 
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
deleted file mode 100644
index 62d90e4..0000000
--- a/drivers/net/tokenring/proteon.c
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- *  proteon.c: A network driver for Proteon ISA token ring cards.
- *
- *  Based on tmspci written 1999 by Adam Fritzler
- *  
- *  Written 2003 by Jochen Friedrich
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- *
- *  This driver module supports the following cards:
- *	- Proteon 1392, 1392+
- *
- *  Maintainer(s):
- *    AF        Adam Fritzler
- *    JF	Jochen Friedrich	jochen@scram.de
- *
- *  Modification History:
- *	02-Jan-03	JF	Created
- *
- */
-static const char version[] = "proteon.c: v1.00 02/01/2003 by Jochen Friedrich\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/pci.h>
-#include <asm/dma.h>
-
-#include "tms380tr.h"
-
-#define PROTEON_IO_EXTENT 32
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int portlist[] __initdata = {
-	0x0A20, 0x0E20, 0x1A20, 0x1E20, 0x2A20, 0x2E20, 0x3A20, 0x3E20,// Prot.
-	0x4A20, 0x4E20, 0x5A20, 0x5E20, 0x6A20, 0x6E20, 0x7A20, 0x7E20,// Prot.
-	0x8A20, 0x8E20, 0x9A20, 0x9E20, 0xAA20, 0xAE20, 0xBA20, 0xBE20,// Prot.
-	0xCA20, 0xCE20, 0xDA20, 0xDE20, 0xEA20, 0xEE20, 0xFA20, 0xFE20,// Prot.
-	0
-};
-
-/* A zero-terminated list of IRQs to be probed. */
-static unsigned short irqlist[] = {
-	7, 6, 5, 4, 3, 12, 11, 10, 9,
-	0
-};
-
-/* A zero-terminated list of DMAs to be probed. */
-static int dmalist[] __initdata = {
-	5, 6, 7,
-	0
-};
-
-static char cardname[] = "Proteon 1392\0";
-static u64 dma_mask = ISA_MAX_ADDRESS;
-static int proteon_open(struct net_device *dev);
-static void proteon_read_eeprom(struct net_device *dev);
-static unsigned short proteon_setnselout_pins(struct net_device *dev);
-
-static unsigned short proteon_sifreadb(struct net_device *dev, unsigned short reg)
-{
-	return inb(dev->base_addr + reg);
-}
-
-static unsigned short proteon_sifreadw(struct net_device *dev, unsigned short reg)
-{
-	return inw(dev->base_addr + reg);
-}
-
-static void proteon_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	outb(val, dev->base_addr + reg);
-}
-
-static void proteon_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	outw(val, dev->base_addr + reg);
-}
-
-static int __init proteon_probe1(struct net_device *dev, int ioaddr)
-{
-	unsigned char chk1, chk2;
-	int i;
-
-	if (!request_region(ioaddr, PROTEON_IO_EXTENT, cardname))
-		return -ENODEV;
-		
-
-	chk1 = inb(ioaddr + 0x1f);      /* Get Proteon ID reg 1 */
-	if (chk1 != 0x1f) 
-		goto nodev;
-
-	chk1 = inb(ioaddr + 0x1e) & 0x07;       /* Get Proteon ID reg 0 */
-	for (i=0; i<16; i++) {
-		chk2 = inb(ioaddr + 0x1e) & 0x07;
-		if (((chk1 + 1) & 0x07) != chk2)
-			goto nodev;
-		chk1 = chk2;
-	}
-
-	dev->base_addr = ioaddr;
-	return 0;
-nodev:
-	release_region(ioaddr, PROTEON_IO_EXTENT); 
-	return -ENODEV;
-}
-
-static struct net_device_ops proteon_netdev_ops __read_mostly;
-
-static int __init setup_card(struct net_device *dev, struct device *pdev)
-{
-	struct net_local *tp;
-        static int versionprinted;
-	const unsigned *port;
-	int j,err = 0;
-
-	if (!dev)
-		return -ENOMEM;
-
-	if (dev->base_addr)	/* probe specific location */
-		err = proteon_probe1(dev, dev->base_addr);
-	else {
-		for (port = portlist; *port; port++) {
-			err = proteon_probe1(dev, *port);
-			if (!err)
-				break;
-		}
-	}
-	if (err)
-		goto out5;
-
-	/* At this point we have found a valid card. */
-
-	if (versionprinted++ == 0)
-		printk(KERN_DEBUG "%s", version);
-
-	err = -EIO;
-	pdev->dma_mask = &dma_mask;
-	if (tmsdev_init(dev, pdev))
-		goto out4;
-
-	dev->base_addr &= ~3; 
-		
-	proteon_read_eeprom(dev);
-
-	printk(KERN_DEBUG "proteon.c:    Ring Station Address: %pM\n",
-	       dev->dev_addr);
-		
-	tp = netdev_priv(dev);
-	tp->setnselout = proteon_setnselout_pins;
-		
-	tp->sifreadb = proteon_sifreadb;
-	tp->sifreadw = proteon_sifreadw;
-	tp->sifwriteb = proteon_sifwriteb;
-	tp->sifwritew = proteon_sifwritew;
-	
-	memcpy(tp->ProductID, cardname, PROD_ID_SIZE + 1);
-
-	tp->tmspriv = NULL;
-
-	dev->netdev_ops = &proteon_netdev_ops;
-
-	if (dev->irq == 0)
-	{
-		for(j = 0; irqlist[j] != 0; j++)
-		{
-			dev->irq = irqlist[j];
-			if (!request_irq(dev->irq, tms380tr_interrupt, 0, 
-				cardname, dev))
-				break;
-                }
-		
-                if(irqlist[j] == 0)
-                {
-                        printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
-			goto out3;
-		}
-	}
-	else
-	{
-		for(j = 0; irqlist[j] != 0; j++)
-			if (irqlist[j] == dev->irq)
-				break;
-		if (irqlist[j] == 0)
-		{
-			printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
-				dev->irq);
-			goto out3;
-		}
-		if (request_irq(dev->irq, tms380tr_interrupt, 0, 
-			cardname, dev))
-		{
-                        printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
-				dev->irq);
-			goto out3;
-		}
-	}
-
-	if (dev->dma == 0)
-	{
-		for(j = 0; dmalist[j] != 0; j++)
-		{
-			dev->dma = dmalist[j];
-                        if (!request_dma(dev->dma, cardname))
-				break;
-		}
-
-		if(dmalist[j] == 0)
-		{
-			printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
-			goto out2;
-		}
-	}
-	else
-	{
-		for(j = 0; dmalist[j] != 0; j++)
-			if (dmalist[j] == dev->dma)
-				break;
-		if (dmalist[j] == 0)
-		{
-                        printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
-				dev->dma);
-			goto out2;
-		}
-		if (request_dma(dev->dma, cardname))
-		{
-                        printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
-				dev->dma);
-			goto out2;
-		}
-	}
-
-	err = register_netdev(dev);
-	if (err)
-		goto out;
-
-	printk(KERN_DEBUG "%s:    IO: %#4lx  IRQ: %d  DMA: %d\n",
-	       dev->name, dev->base_addr, dev->irq, dev->dma);
-
-	return 0;
-out:
-	free_dma(dev->dma);
-out2:
-	free_irq(dev->irq, dev);
-out3:
-	tmsdev_term(dev);
-out4:
-	release_region(dev->base_addr, PROTEON_IO_EXTENT);
-out5:
-	return err;
-}
-
-/*
- * Reads MAC address from adapter RAM, which should've read it from
- * the onboard ROM.  
- *
- * Calling this on a board that does not support it can be a very
- * dangerous thing.  The Madge board, for instance, will lock your
- * machine hard when this is called.  Luckily, its supported in a
- * separate driver.  --ASF
- */
-static void proteon_read_eeprom(struct net_device *dev)
-{
-	int i;
-	
-	/* Address: 0000:0000 */
-	proteon_sifwritew(dev, 0, SIFADX);
-	proteon_sifwritew(dev, 0, SIFADR);	
-	
-	/* Read six byte MAC address data */
-	dev->addr_len = 6;
-	for(i = 0; i < 6; i++)
-		dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8;
-}
-
-static unsigned short proteon_setnselout_pins(struct net_device *dev)
-{
-	return 0;
-}
-
-static int proteon_open(struct net_device *dev)
-{  
-	struct net_local *tp = netdev_priv(dev);
-	unsigned short val = 0;
-	int i;
-
-	/* Proteon reset sequence */
-	outb(0, dev->base_addr + 0x11);
-	mdelay(20);
-	outb(0x04, dev->base_addr + 0x11);
-	mdelay(20);
-	outb(0, dev->base_addr + 0x11);
-	mdelay(100);
-
-	/* set control/status reg */
-	val = inb(dev->base_addr + 0x11);
-	val |= 0x78;
-	val &= 0xf9;
-	if(tp->DataRate == SPEED_4)
-		val |= 0x20;
-	else
-		val &= ~0x20;
-
-	outb(val, dev->base_addr + 0x11);
-	outb(0xff, dev->base_addr + 0x12);
-	for(i = 0; irqlist[i] != 0; i++)
-	{
-		if(irqlist[i] == dev->irq)
-			break;
-	}
-	val = i;
-	i = (7 - dev->dma) << 4;
-	val |= i;
-	outb(val, dev->base_addr + 0x13);
-
-	return tms380tr_open(dev);
-}
-
-#define ISATR_MAX_ADAPTERS 3
-
-static int io[ISATR_MAX_ADAPTERS];
-static int irq[ISATR_MAX_ADAPTERS];
-static int dma[ISATR_MAX_ADAPTERS];
-
-MODULE_LICENSE("GPL");
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(dma, int, NULL, 0);
-
-static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
-
-static struct platform_driver proteon_driver = {
-	.driver		= {
-		.name	= "proteon",
-	},
-};
-
-static int __init proteon_init(void)
-{
-	struct net_device *dev;
-	struct platform_device *pdev;
-	int i, num = 0, err = 0;
-
-	proteon_netdev_ops = tms380tr_netdev_ops;
-	proteon_netdev_ops.ndo_open = proteon_open;
-	proteon_netdev_ops.ndo_stop = tms380tr_close;
-
-	err = platform_driver_register(&proteon_driver);
-	if (err)
-		return err;
-
-	for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
-		dev = alloc_trdev(sizeof(struct net_local));
-		if (!dev)
-			continue;
-
-		dev->base_addr = io[i];
-		dev->irq = irq[i];
-		dev->dma = dma[i];
-		pdev = platform_device_register_simple("proteon",
-			i, NULL, 0);
-		if (IS_ERR(pdev)) {
-			free_netdev(dev);
-			continue;
-		}
-		err = setup_card(dev, &pdev->dev);
-		if (!err) {
-			proteon_dev[i] = pdev;
-			platform_set_drvdata(pdev, dev);
-			++num;
-		} else {
-			platform_device_unregister(pdev);
-			free_netdev(dev);
-		}
-	}
-
-	printk(KERN_NOTICE "proteon.c: %d cards found.\n", num);
-	/* Probe for cards. */
-	if (num == 0) {
-		printk(KERN_NOTICE "proteon.c: No cards found.\n");
-		platform_driver_unregister(&proteon_driver);
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static void __exit proteon_cleanup(void)
-{
-	struct net_device *dev;
-	int i;
-
-	for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
-		struct platform_device *pdev = proteon_dev[i];
-		
-		if (!pdev)
-			continue;
-		dev = platform_get_drvdata(pdev);
-		unregister_netdev(dev);
-		release_region(dev->base_addr, PROTEON_IO_EXTENT);
-		free_irq(dev->irq, dev);
-		free_dma(dev->dma);
-		tmsdev_term(dev);
-		free_netdev(dev);
-		platform_set_drvdata(pdev, NULL);
-		platform_device_unregister(pdev);
-	}
-	platform_driver_unregister(&proteon_driver);
-}
-
-module_init(proteon_init);
-module_exit(proteon_cleanup);
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
deleted file mode 100644
index ee11e93..0000000
--- a/drivers/net/tokenring/skisa.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- *  skisa.c: A network driver for SK-NET TMS380-based ISA token ring cards.
- *
- *  Based on tmspci written 1999 by Adam Fritzler
- *  
- *  Written 2000 by Jochen Friedrich
- *  Dedicated to my girlfriend Steffi Bopp
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- *
- *  This driver module supports the following cards:
- *	- SysKonnect TR4/16(+) ISA	(SK-4190)
- *
- *  Maintainer(s):
- *    AF        Adam Fritzler
- *    JF	Jochen Friedrich	jochen@scram.de
- *
- *  Modification History:
- *	14-Jan-01	JF	Created
- *	28-Oct-02	JF	Fixed probe of card for static compilation.
- *				Fixed module init to not make hotplug go wild.
- *	09-Nov-02	JF	Fixed early bail out on out of memory
- *				situations if multiple cards are found.
- *				Cleaned up some unnecessary console SPAM.
- *	09-Dec-02	JF	Fixed module reference counting.
- *	02-Jan-03	JF	Renamed to skisa.c
- *
- */
-static const char version[] = "skisa.c: v1.03 09/12/2002 by Jochen Friedrich\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/pci.h>
-#include <asm/dma.h>
-
-#include "tms380tr.h"
-
-#define SK_ISA_IO_EXTENT 32
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int portlist[] __initdata = {
-	0x0A20, 0x1A20, 0x0B20, 0x1B20, 0x0980, 0x1980, 0x0900, 0x1900,// SK
-	0
-};
-
-/* A zero-terminated list of IRQs to be probed. 
- * Used again after initial probe for sktr_chipset_init, called from sktr_open.
- */
-static const unsigned short irqlist[] = {
-	3, 5, 9, 10, 11, 12, 15,
-	0
-};
-
-/* A zero-terminated list of DMAs to be probed. */
-static int dmalist[] __initdata = {
-	5, 6, 7,
-	0
-};
-
-static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
-static u64 dma_mask = ISA_MAX_ADDRESS;
-static int sk_isa_open(struct net_device *dev);
-static void sk_isa_read_eeprom(struct net_device *dev);
-static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
-
-static unsigned short sk_isa_sifreadb(struct net_device *dev, unsigned short reg)
-{
-	return inb(dev->base_addr + reg);
-}
-
-static unsigned short sk_isa_sifreadw(struct net_device *dev, unsigned short reg)
-{
-	return inw(dev->base_addr + reg);
-}
-
-static void sk_isa_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	outb(val, dev->base_addr + reg);
-}
-
-static void sk_isa_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	outw(val, dev->base_addr + reg);
-}
-
-
-static int __init sk_isa_probe1(struct net_device *dev, int ioaddr)
-{
-	unsigned char old, chk1, chk2;
-
-	if (!request_region(ioaddr, SK_ISA_IO_EXTENT, isa_cardname))
-		return -ENODEV;
-
-	old = inb(ioaddr + SIFADR);	/* Get the old SIFADR value */
-
-	chk1 = 0;	/* Begin with check value 0 */
-	do {
-		/* Write new SIFADR value */
-		outb(chk1, ioaddr + SIFADR);
-
-		/* Read, invert and write */
-		chk2 = inb(ioaddr + SIFADD);
-		chk2 ^= 0x0FE;
-		outb(chk2, ioaddr + SIFADR);
-
-		/* Read, invert and compare */
-		chk2 = inb(ioaddr + SIFADD);
-		chk2 ^= 0x0FE;
-
-		if(chk1 != chk2) {
-			release_region(ioaddr, SK_ISA_IO_EXTENT);
-			return -ENODEV;
-		}
-
-		chk1 -= 2;
-	} while(chk1 != 0);	/* Repeat 128 times (all byte values) */
-
-    	/* Restore the SIFADR value */
-	outb(old, ioaddr + SIFADR);
-
-	dev->base_addr = ioaddr;
-	return 0;
-}
-
-static struct net_device_ops sk_isa_netdev_ops __read_mostly;
-
-static int __init setup_card(struct net_device *dev, struct device *pdev)
-{
-	struct net_local *tp;
-        static int versionprinted;
-	const unsigned *port;
-	int j, err = 0;
-
-	if (!dev)
-		return -ENOMEM;
-
-	if (dev->base_addr)	/* probe specific location */
-		err = sk_isa_probe1(dev, dev->base_addr);
-	else {
-		for (port = portlist; *port; port++) {
-			err = sk_isa_probe1(dev, *port);
-			if (!err)
-				break;
-		}
-	}
-	if (err)
-		goto out5;
-
-	/* At this point we have found a valid card. */
-
-	if (versionprinted++ == 0)
-		printk(KERN_DEBUG "%s", version);
-
-	err = -EIO;
-	pdev->dma_mask = &dma_mask;
-	if (tmsdev_init(dev, pdev))
-		goto out4;
-
-	dev->base_addr &= ~3; 
-		
-	sk_isa_read_eeprom(dev);
-
-	printk(KERN_DEBUG "skisa.c:    Ring Station Address: %pM\n",
-	       dev->dev_addr);
-		
-	tp = netdev_priv(dev);
-	tp->setnselout = sk_isa_setnselout_pins;
-		
-	tp->sifreadb = sk_isa_sifreadb;
-	tp->sifreadw = sk_isa_sifreadw;
-	tp->sifwriteb = sk_isa_sifwriteb;
-	tp->sifwritew = sk_isa_sifwritew;
-	
-	memcpy(tp->ProductID, isa_cardname, PROD_ID_SIZE + 1);
-
-	tp->tmspriv = NULL;
-
-	dev->netdev_ops = &sk_isa_netdev_ops;
-
-	if (dev->irq == 0)
-	{
-		for(j = 0; irqlist[j] != 0; j++)
-		{
-			dev->irq = irqlist[j];
-			if (!request_irq(dev->irq, tms380tr_interrupt, 0, 
-				isa_cardname, dev))
-				break;
-                }
-		
-                if(irqlist[j] == 0)
-                {
-                        printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n");
-			goto out3;
-		}
-	}
-	else
-	{
-		for(j = 0; irqlist[j] != 0; j++)
-			if (irqlist[j] == dev->irq)
-				break;
-		if (irqlist[j] == 0)
-		{
-			printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n",
-				dev->irq);
-			goto out3;
-		}
-		if (request_irq(dev->irq, tms380tr_interrupt, 0, 
-			isa_cardname, dev))
-		{
-                        printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n",
-				dev->irq);
-			goto out3;
-		}
-	}
-
-	if (dev->dma == 0)
-	{
-		for(j = 0; dmalist[j] != 0; j++)
-		{
-			dev->dma = dmalist[j];
-                        if (!request_dma(dev->dma, isa_cardname))
-				break;
-		}
-
-		if(dmalist[j] == 0)
-		{
-			printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n");
-			goto out2;
-		}
-	}
-	else
-	{
-		for(j = 0; dmalist[j] != 0; j++)
-			if (dmalist[j] == dev->dma)
-				break;
-		if (dmalist[j] == 0)
-		{
-                        printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n",
-				dev->dma);
-			goto out2;
-		}
-		if (request_dma(dev->dma, isa_cardname))
-		{
-                        printk(KERN_INFO "skisa.c: Selected DMA %d not available\n",
-				dev->dma);
-			goto out2;
-		}
-	}
-
-	err = register_netdev(dev);
-	if (err)
-		goto out;
-
-	printk(KERN_DEBUG "%s:    IO: %#4lx  IRQ: %d  DMA: %d\n",
-	       dev->name, dev->base_addr, dev->irq, dev->dma);
-
-	return 0;
-out:
-	free_dma(dev->dma);
-out2:
-	free_irq(dev->irq, dev);
-out3:
-	tmsdev_term(dev);
-out4:
-	release_region(dev->base_addr, SK_ISA_IO_EXTENT);
-out5:
-	return err;
-}
-
-/*
- * Reads MAC address from adapter RAM, which should've read it from
- * the onboard ROM.  
- *
- * Calling this on a board that does not support it can be a very
- * dangerous thing.  The Madge board, for instance, will lock your
- * machine hard when this is called.  Luckily, its supported in a
- * separate driver.  --ASF
- */
-static void sk_isa_read_eeprom(struct net_device *dev)
-{
-	int i;
-	
-	/* Address: 0000:0000 */
-	sk_isa_sifwritew(dev, 0, SIFADX);
-	sk_isa_sifwritew(dev, 0, SIFADR);	
-	
-	/* Read six byte MAC address data */
-	dev->addr_len = 6;
-	for(i = 0; i < 6; i++)
-		dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8;
-}
-
-static unsigned short sk_isa_setnselout_pins(struct net_device *dev)
-{
-	return 0;
-}
-
-static int sk_isa_open(struct net_device *dev)
-{  
-	struct net_local *tp = netdev_priv(dev);
-	unsigned short val = 0;
-	unsigned short oldval;
-	int i;
-
-	val = 0;
-	for(i = 0; irqlist[i] != 0; i++)
-	{
-		if(irqlist[i] == dev->irq)
-			break;
-	}
-
-	val |= CYCLE_TIME << 2;
-	val |= i << 4;
-	i = dev->dma - 5;
-	val |= i;
-	if(tp->DataRate == SPEED_4)
-		val |= LINE_SPEED_BIT;
-	else
-		val &= ~LINE_SPEED_BIT;
-	oldval = sk_isa_sifreadb(dev, POSREG);
-	/* Leave cycle bits alone */
-	oldval |= 0xf3;
-	val &= oldval;
-	sk_isa_sifwriteb(dev, val, POSREG);
-
-	return tms380tr_open(dev);
-}
-
-#define ISATR_MAX_ADAPTERS 3
-
-static int io[ISATR_MAX_ADAPTERS];
-static int irq[ISATR_MAX_ADAPTERS];
-static int dma[ISATR_MAX_ADAPTERS];
-
-MODULE_LICENSE("GPL");
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(dma, int, NULL, 0);
-
-static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
-
-static struct platform_driver sk_isa_driver = {
-	.driver		= {
-		.name	= "skisa",
-	},
-};
-
-static int __init sk_isa_init(void)
-{
-	struct net_device *dev;
-	struct platform_device *pdev;
-	int i, num = 0, err = 0;
-
-	sk_isa_netdev_ops = tms380tr_netdev_ops;
-	sk_isa_netdev_ops.ndo_open = sk_isa_open;
-	sk_isa_netdev_ops.ndo_stop = tms380tr_close;
-
-	err = platform_driver_register(&sk_isa_driver);
-	if (err)
-		return err;
-
-	for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
-		dev = alloc_trdev(sizeof(struct net_local));
-		if (!dev)
-			continue;
-
-		dev->base_addr = io[i];
-		dev->irq = irq[i];
-		dev->dma = dma[i];
-		pdev = platform_device_register_simple("skisa",
-			i, NULL, 0);
-		if (IS_ERR(pdev)) {
-			free_netdev(dev);
-			continue;
-		}
-		err = setup_card(dev, &pdev->dev);
-		if (!err) {
-			sk_isa_dev[i] = pdev;
-			platform_set_drvdata(sk_isa_dev[i], dev);
-			++num;
-		} else {
-			platform_device_unregister(pdev);
-			free_netdev(dev);
-		}
-	}
-
-	printk(KERN_NOTICE "skisa.c: %d cards found.\n", num);
-	/* Probe for cards. */
-	if (num == 0) {
-		printk(KERN_NOTICE "skisa.c: No cards found.\n");
-		platform_driver_unregister(&sk_isa_driver);
-		return -ENODEV;
-	}
-	return 0;
-}
-
-static void __exit sk_isa_cleanup(void)
-{
-	struct net_device *dev;
-	int i;
-
-	for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
-		struct platform_device *pdev = sk_isa_dev[i];
-
-		if (!pdev)
-			continue;
-		dev = platform_get_drvdata(pdev);
-		unregister_netdev(dev);
-		release_region(dev->base_addr, SK_ISA_IO_EXTENT);
-		free_irq(dev->irq, dev);
-		free_dma(dev->dma);
-		tmsdev_term(dev);
-		free_netdev(dev);
-		platform_set_drvdata(pdev, NULL);
-		platform_device_unregister(pdev);
-	}
-	platform_driver_unregister(&sk_isa_driver);
-}
-
-module_init(sk_isa_init);
-module_exit(sk_isa_cleanup);
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
deleted file mode 100644
index cb35fb7..0000000
--- a/drivers/net/tokenring/smctr.c
+++ /dev/null
@@ -1,5717 +0,0 @@
-/*
- *  smctr.c: A network driver for the SMC Token Ring Adapters.
- *
- *  Written by Jay Schulist <jschlst@samba.org>
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- *
- *  This device driver works with the following SMC adapters:
- *      - SMC TokenCard Elite   (8115T, chips 825/584)
- *      - SMC TokenCard Elite/A MCA (8115T/A, chips 825/594)
- *
- *  Source(s):
- *  	- SMC TokenCard SDK.
- *
- *  Maintainer(s):
- *    JS        Jay Schulist <jschlst@samba.org>
- *
- * Changes:
- *    07102000          JS      Fixed a timing problem in smctr_wait_cmd();
- *                              Also added a bit more discriptive error msgs.
- *    07122000          JS      Fixed problem with detecting a card with
- *				module io/irq/mem specified.
- *
- *  To do:
- *    1. Multicast support.
- *
- *  Initial 2.5 cleanup Alan Cox <alan@lxorguk.ukuu.org.uk>  2002/10/28
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/time.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/mca-legacy.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/trdevice.h>
-#include <linux/bitops.h>
-#include <linux/firmware.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-
-#if BITS_PER_LONG == 64
-#error FIXME: driver does not support 64-bit platforms
-#endif
-
-#include "smctr.h"               /* Our Stuff */
-
-static const char version[] __initdata =
-	KERN_INFO "smctr.c: v1.4 7/12/00 by jschlst@samba.org\n";
-static const char cardname[] = "smctr";
-
-
-#define SMCTR_IO_EXTENT   20
-
-#ifdef CONFIG_MCA_LEGACY
-static unsigned int smctr_posid = 0x6ec6;
-#endif
-
-static int ringspeed;
-
-/* SMC Name of the Adapter. */
-static char smctr_name[] = "SMC TokenCard";
-static char *smctr_model = "Unknown";
-
-/* Use 0 for production, 1 for verification, 2 for debug, and
- * 3 for very verbose debug.
- */
-#ifndef SMCTR_DEBUG
-#define SMCTR_DEBUG 1
-#endif
-static unsigned int smctr_debug = SMCTR_DEBUG;
-
-/* smctr.c prototypes and functions are arranged alphabeticly 
- * for clearity, maintainability and pure old fashion fun. 
- */
-/* A */
-static int smctr_alloc_shared_memory(struct net_device *dev);
-
-/* B */
-static int smctr_bypass_state(struct net_device *dev);
-
-/* C */
-static int smctr_checksum_firmware(struct net_device *dev);
-static int __init smctr_chk_isa(struct net_device *dev);
-static int smctr_chg_rx_mask(struct net_device *dev);
-static int smctr_clear_int(struct net_device *dev);
-static int smctr_clear_trc_reset(int ioaddr);
-static int smctr_close(struct net_device *dev);
-
-/* D */
-static int smctr_decode_firmware(struct net_device *dev,
-				 const struct firmware *fw);
-static int smctr_disable_16bit(struct net_device *dev);
-static int smctr_disable_adapter_ctrl_store(struct net_device *dev);
-static int smctr_disable_bic_int(struct net_device *dev);
-
-/* E */
-static int smctr_enable_16bit(struct net_device *dev);
-static int smctr_enable_adapter_ctrl_store(struct net_device *dev);
-static int smctr_enable_adapter_ram(struct net_device *dev);
-static int smctr_enable_bic_int(struct net_device *dev);
-
-/* G */
-static int __init smctr_get_boardid(struct net_device *dev, int mca);
-static int smctr_get_group_address(struct net_device *dev);
-static int smctr_get_functional_address(struct net_device *dev);
-static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev);
-static int smctr_get_physical_drop_number(struct net_device *dev);
-static __u8 *smctr_get_rx_pointer(struct net_device *dev, short queue);
-static int smctr_get_station_id(struct net_device *dev);
-static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
-        __u16 bytes_count);
-static int smctr_get_upstream_neighbor_addr(struct net_device *dev);
-
-/* H */
-static int smctr_hardware_send_packet(struct net_device *dev,
-        struct net_local *tp);
-/* I */
-static int smctr_init_acbs(struct net_device *dev);
-static int smctr_init_adapter(struct net_device *dev);
-static int smctr_init_card_real(struct net_device *dev);
-static int smctr_init_rx_bdbs(struct net_device *dev);
-static int smctr_init_rx_fcbs(struct net_device *dev);
-static int smctr_init_shared_memory(struct net_device *dev);
-static int smctr_init_tx_bdbs(struct net_device *dev);
-static int smctr_init_tx_fcbs(struct net_device *dev);
-static int smctr_internal_self_test(struct net_device *dev);
-static irqreturn_t smctr_interrupt(int irq, void *dev_id);
-static int smctr_issue_enable_int_cmd(struct net_device *dev,
-        __u16 interrupt_enable_mask);
-static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code,
-        __u16 ibits);
-static int smctr_issue_init_timers_cmd(struct net_device *dev);
-static int smctr_issue_init_txrx_cmd(struct net_device *dev);
-static int smctr_issue_insert_cmd(struct net_device *dev);
-static int smctr_issue_read_ring_status_cmd(struct net_device *dev);
-static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt);
-static int smctr_issue_remove_cmd(struct net_device *dev);
-static int smctr_issue_resume_acb_cmd(struct net_device *dev);
-static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue);
-static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue);
-static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue);
-static int smctr_issue_test_internal_rom_cmd(struct net_device *dev);
-static int smctr_issue_test_hic_cmd(struct net_device *dev);
-static int smctr_issue_test_mac_reg_cmd(struct net_device *dev);
-static int smctr_issue_trc_loopback_cmd(struct net_device *dev);
-static int smctr_issue_tri_loopback_cmd(struct net_device *dev);
-static int smctr_issue_write_byte_cmd(struct net_device *dev,
-        short aword_cnt, void *byte);
-static int smctr_issue_write_word_cmd(struct net_device *dev,
-        short aword_cnt, void *word);
-
-/* J */
-static int smctr_join_complete_state(struct net_device *dev);
-
-/* L */
-static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev);
-static int smctr_load_firmware(struct net_device *dev);
-static int smctr_load_node_addr(struct net_device *dev);
-static int smctr_lobe_media_test(struct net_device *dev);
-static int smctr_lobe_media_test_cmd(struct net_device *dev);
-static int smctr_lobe_media_test_state(struct net_device *dev);
-
-/* M */
-static int smctr_make_8025_hdr(struct net_device *dev,
-        MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc);
-static int smctr_make_access_pri(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv);
-static int smctr_make_auth_funct_class(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-static int smctr_make_corr(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv, __u16 correlator);
-static int smctr_make_funct_addr(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-static int smctr_make_group_addr(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-static int smctr_make_phy_drop_num(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
-static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv);
-static int smctr_make_ring_station_status(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-static int smctr_make_ring_station_version(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-static int smctr_make_tx_status_code(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv, __u16 tx_fstatus);
-static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-static int smctr_make_wrap_data(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv);
-
-/* O */
-static int smctr_open(struct net_device *dev);
-static int smctr_open_tr(struct net_device *dev);
-
-/* P */
-struct net_device *smctr_probe(int unit);
-static int __init smctr_probe1(struct net_device *dev, int ioaddr);
-static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
-        struct net_device *dev, __u16 rx_status);
-
-/* R */
-static int smctr_ram_memory_test(struct net_device *dev);
-static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 *correlator);
-static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 *correlator);
-static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf);
-static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
-        MAC_HEADER *rmf, __u16 *correlator);
-static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 *correlator);
-static int smctr_reset_adapter(struct net_device *dev);
-static int smctr_restart_tx_chain(struct net_device *dev, short queue);
-static int smctr_ring_status_chg(struct net_device *dev);
-static int smctr_rx_frame(struct net_device *dev);
-
-/* S */
-static int smctr_send_dat(struct net_device *dev);
-static netdev_tx_t smctr_send_packet(struct sk_buff *skb,
-					   struct net_device *dev);
-static int smctr_send_lobe_media_test(struct net_device *dev);
-static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 correlator);
-static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 correlator);
-static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 correlator);
-static int smctr_send_rpt_tx_forward(struct net_device *dev,
-        MAC_HEADER *rmf, __u16 tx_fstatus);
-static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 rcode, __u16 correlator);
-static int smctr_send_rq_init(struct net_device *dev);
-static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 *tx_fstatus);
-static int smctr_set_auth_access_pri(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv);
-static int smctr_set_auth_funct_class(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv);
-static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
-	__u16 *correlator);
-static int smctr_set_error_timer_value(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv);
-static int smctr_set_frame_forward(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv, __u8 dc_sc);
-static int smctr_set_local_ring_num(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv);
-static unsigned short smctr_set_ctrl_attention(struct net_device *dev);
-static void smctr_set_multicast_list(struct net_device *dev);
-static int smctr_set_page(struct net_device *dev, __u8 *buf);
-static int smctr_set_phy_drop(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv);
-static int smctr_set_ring_speed(struct net_device *dev);
-static int smctr_set_rx_look_ahead(struct net_device *dev);
-static int smctr_set_trc_reset(int ioaddr);
-static int smctr_setup_single_cmd(struct net_device *dev,
-        __u16 command, __u16 subcommand);
-static int smctr_setup_single_cmd_w_data(struct net_device *dev,
-        __u16 command, __u16 subcommand);
-static char *smctr_malloc(struct net_device *dev, __u16 size);
-static int smctr_status_chg(struct net_device *dev);
-
-/* T */
-static void smctr_timeout(struct net_device *dev);
-static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
-        __u16 queue);
-static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue);
-static unsigned short smctr_tx_move_frame(struct net_device *dev,
-        struct sk_buff *skb, __u8 *pbuff, unsigned int bytes);
-
-/* U */
-static int smctr_update_err_stats(struct net_device *dev);
-static int smctr_update_rx_chain(struct net_device *dev, __u16 queue);
-static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
-        __u16 queue);
-
-/* W */
-static int smctr_wait_cmd(struct net_device *dev);
-static int smctr_wait_while_cbusy(struct net_device *dev);
-
-#define TO_256_BYTE_BOUNDRY(X)  (((X + 0xff) & 0xff00) - X)
-#define TO_PARAGRAPH_BOUNDRY(X) (((X + 0x0f) & 0xfff0) - X)
-#define PARAGRAPH_BOUNDRY(X)    smctr_malloc(dev, TO_PARAGRAPH_BOUNDRY(X))
-
-/* Allocate Adapter Shared Memory.
- * IMPORTANT NOTE: Any changes to this function MUST be mirrored in the
- * function "get_num_rx_bdbs" below!!!
- *
- * Order of memory allocation:
- *
- *       0. Initial System Configuration Block Pointer
- *       1. System Configuration Block
- *       2. System Control Block
- *       3. Action Command Block
- *       4. Interrupt Status Block
- *
- *       5. MAC TX FCB'S
- *       6. NON-MAC TX FCB'S
- *       7. MAC TX BDB'S
- *       8. NON-MAC TX BDB'S
- *       9. MAC RX FCB'S
- *      10. NON-MAC RX FCB'S
- *      11. MAC RX BDB'S
- *      12. NON-MAC RX BDB'S
- *      13. MAC TX Data Buffer( 1, 256 byte buffer)
- *      14. MAC RX Data Buffer( 1, 256 byte buffer)
- *
- *      15. NON-MAC TX Data Buffer
- *      16. NON-MAC RX Data Buffer
- */
-static int smctr_alloc_shared_memory(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_alloc_shared_memory\n", dev->name);
-
-        /* Allocate initial System Control Block pointer.
-         * This pointer is located in the last page, last offset - 4.
-         */
-        tp->iscpb_ptr = (ISCPBlock *)(tp->ram_access + ((__u32)64 * 0x400)
-                - (long)ISCP_BLOCK_SIZE);
-
-        /* Allocate System Control Blocks. */
-        tp->scgb_ptr = (SCGBlock *)smctr_malloc(dev, sizeof(SCGBlock));
-        PARAGRAPH_BOUNDRY(tp->sh_mem_used);
-
-        tp->sclb_ptr = (SCLBlock *)smctr_malloc(dev, sizeof(SCLBlock));
-        PARAGRAPH_BOUNDRY(tp->sh_mem_used);
-
-        tp->acb_head = (ACBlock *)smctr_malloc(dev,
-                sizeof(ACBlock)*tp->num_acbs);
-        PARAGRAPH_BOUNDRY(tp->sh_mem_used);
-
-        tp->isb_ptr = (ISBlock *)smctr_malloc(dev, sizeof(ISBlock));
-        PARAGRAPH_BOUNDRY(tp->sh_mem_used);
-
-        tp->misc_command_data = (__u16 *)smctr_malloc(dev, MISC_DATA_SIZE);
-        PARAGRAPH_BOUNDRY(tp->sh_mem_used);
-
-        /* Allocate transmit FCBs. */
-        tp->tx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
-                sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE]);
-
-        tp->tx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
-                sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE]);
-
-        tp->tx_fcb_head[BUG_QUEUE] = (FCBlock *)smctr_malloc(dev,
-                sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE]);
-
-        /* Allocate transmit BDBs. */
-        tp->tx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
-                sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE]);
-
-        tp->tx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
-                sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE]);
-
-        tp->tx_bdb_head[BUG_QUEUE] = (BDBlock *)smctr_malloc(dev,
-                sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE]);
-
-        /* Allocate receive FCBs. */
-        tp->rx_fcb_head[MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
-                sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE]);
-
-        tp->rx_fcb_head[NON_MAC_QUEUE] = (FCBlock *)smctr_malloc(dev,
-                sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE]);
-
-        /* Allocate receive BDBs. */
-        tp->rx_bdb_head[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
-                sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE]);
-
-        tp->rx_bdb_end[MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
-
-        tp->rx_bdb_head[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev,
-                sizeof(BDBlock) * tp->num_rx_bdbs[NON_MAC_QUEUE]);
-
-        tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
-
-        /* Allocate MAC transmit buffers.
-         * MAC Tx Buffers doen't have to be on an ODD Boundary.
-         */
-        tp->tx_buff_head[MAC_QUEUE]
-                = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]);
-        tp->tx_buff_curr[MAC_QUEUE] = tp->tx_buff_head[MAC_QUEUE];
-        tp->tx_buff_end [MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
-
-        /* Allocate BUG transmit buffers. */
-        tp->tx_buff_head[BUG_QUEUE]
-                = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[BUG_QUEUE]);
-        tp->tx_buff_curr[BUG_QUEUE] = tp->tx_buff_head[BUG_QUEUE];
-        tp->tx_buff_end[BUG_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
-
-        /* Allocate MAC receive data buffers.
-         * MAC Rx buffer doesn't have to be on a 256 byte boundary.
-         */
-        tp->rx_buff_head[MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
-                RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE]);
-        tp->rx_buff_end[MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
-
-        /* Allocate Non-MAC transmit buffers.
-         * ?? For maximum Netware performance, put Tx Buffers on
-         * ODD Boundary and then restore malloc to Even Boundrys.
-         */
-        smctr_malloc(dev, 1L);
-        tp->tx_buff_head[NON_MAC_QUEUE]
-                = (__u16 *)smctr_malloc(dev, tp->tx_buff_size[NON_MAC_QUEUE]);
-        tp->tx_buff_curr[NON_MAC_QUEUE] = tp->tx_buff_head[NON_MAC_QUEUE];
-        tp->tx_buff_end [NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
-        smctr_malloc(dev, 1L);
-
-        /* Allocate Non-MAC receive data buffers.
-         * To guarantee a minimum of 256 contiguous memory to
-         * UM_Receive_Packet's lookahead pointer, before a page
-         * change or ring end is encountered, place each rx buffer on
-         * a 256 byte boundary.
-         */
-        smctr_malloc(dev, TO_256_BYTE_BOUNDRY(tp->sh_mem_used));
-        tp->rx_buff_head[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev,
-                RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
-        tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
-
-        return 0;
-}
-
-/* Enter Bypass state. */
-static int smctr_bypass_state(struct net_device *dev)
-{
-        int err;
-
-	if(smctr_debug > 10)
-        	printk(KERN_DEBUG "%s: smctr_bypass_state\n", dev->name);
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE);
-
-        return err;
-}
-
-static int smctr_checksum_firmware(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        __u16 i, checksum = 0;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_checksum_firmware\n", dev->name);
-
-        smctr_enable_adapter_ctrl_store(dev);
-
-        for(i = 0; i < CS_RAM_SIZE; i += 2)
-                checksum += *((__u16 *)(tp->ram_access + i));
-
-        tp->microcode_version = *(__u16 *)(tp->ram_access
-                + CS_RAM_VERSION_OFFSET);
-        tp->microcode_version >>= 8;
-
-        smctr_disable_adapter_ctrl_store(dev);
-
-        if(checksum)
-                return checksum;
-
-        return 0;
-}
-
-static int __init smctr_chk_mca(struct net_device *dev)
-{
-#ifdef CONFIG_MCA_LEGACY
-	struct net_local *tp = netdev_priv(dev);
-	int current_slot;
-	__u8 r1, r2, r3, r4, r5;
-
-	current_slot = mca_find_unused_adapter(smctr_posid, 0);
-	if(current_slot == MCA_NOTFOUND)
-		return -ENODEV;
-
-	mca_set_adapter_name(current_slot, smctr_name);
-	mca_mark_as_used(current_slot);
-	tp->slot_num = current_slot;
-
-	r1 = mca_read_stored_pos(tp->slot_num, 2);
-	r2 = mca_read_stored_pos(tp->slot_num, 3);
-
-	if(tp->slot_num)
-		outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num - 1) | CNFG_SLOT_ENABLE_BIT));
-	else
-		outb(CNFG_POS_CONTROL_REG, (__u8)((tp->slot_num) | CNFG_SLOT_ENABLE_BIT));
-
-	r1 = inb(CNFG_POS_REG1);
-	r2 = inb(CNFG_POS_REG0);
-
-	tp->bic_type = BIC_594_CHIP;
-
-	/* IO */
-	r2 = mca_read_stored_pos(tp->slot_num, 2);
-	r2 &= 0xF0;
-	dev->base_addr = ((__u16)r2 << 8) + (__u16)0x800;
-	request_region(dev->base_addr, SMCTR_IO_EXTENT, smctr_name);
-
-	/* IRQ */
-	r5 = mca_read_stored_pos(tp->slot_num, 5);
-	r5 &= 0xC;
-        switch(r5)
-	{
-            	case 0:
-			dev->irq = 3;
-               		break;
-
-            	case 0x4:
-			dev->irq = 4;
-               		break;
-
-            	case 0x8:
-			dev->irq = 10;
-               		break;
-
-            	default:
-			dev->irq = 15;
-               		break;
-	}
-	if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev)) {
-		release_region(dev->base_addr, SMCTR_IO_EXTENT);
-		return -ENODEV;
-	}
-
-	/* Get RAM base */
-	r3 = mca_read_stored_pos(tp->slot_num, 3);
-	tp->ram_base = ((__u32)(r3 & 0x7) << 13) + 0x0C0000;
-	if (r3 & 0x8)
-		tp->ram_base += 0x010000;
-	if (r3 & 0x80)
-		tp->ram_base += 0xF00000;
-
-	/* Get Ram Size */
-	r3 &= 0x30;
-	r3 >>= 4;
-
-	tp->ram_usable = (__u16)CNFG_SIZE_8KB << r3;
-	tp->ram_size = (__u16)CNFG_SIZE_64KB;
-	tp->board_id |= TOKEN_MEDIA;
-
-	r4 = mca_read_stored_pos(tp->slot_num, 4);
-	tp->rom_base = ((__u32)(r4 & 0x7) << 13) + 0x0C0000;
-	if (r4 & 0x8)
-		tp->rom_base += 0x010000;
-
-	/* Get ROM size. */
-	r4 >>= 4;
-	switch (r4) {
-		case 0:
-			tp->rom_size = CNFG_SIZE_8KB;
-			break;
-		case 1:
-			tp->rom_size = CNFG_SIZE_16KB;
-			break;
-		case 2:
-			tp->rom_size = CNFG_SIZE_32KB;
-			break;
-		default:
-			tp->rom_size = ROM_DISABLE;
-	}
-
-	/* Get Media Type. */
-	r5 = mca_read_stored_pos(tp->slot_num, 5);
-	r5 &= CNFG_MEDIA_TYPE_MASK;
-	switch(r5)
-	{
-		case (0):
-			tp->media_type = MEDIA_STP_4;
-			break;
-
-		case (1):
-			tp->media_type = MEDIA_STP_16;
-			break;
-
-		case (3):
-			tp->media_type = MEDIA_UTP_16;
-			break;
-
-		default:
-			tp->media_type = MEDIA_UTP_4;
-			break;
-	}
-	tp->media_menu = 14;
-
-	r2 = mca_read_stored_pos(tp->slot_num, 2);
-	if(!(r2 & 0x02))
-		tp->mode_bits |= EARLY_TOKEN_REL;
-
-	/* Disable slot */
-	outb(CNFG_POS_CONTROL_REG, 0);
-
-	tp->board_id = smctr_get_boardid(dev, 1);
-	switch(tp->board_id & 0xffff)
-        {
-                case WD8115TA:
-                        smctr_model = "8115T/A";
-                        break;
-
-                case WD8115T:
-			if(tp->extra_info & CHIP_REV_MASK)
-                                smctr_model = "8115T rev XE";
-                        else
-                                smctr_model = "8115T rev XD";
-                        break;
-
-                default:
-                        smctr_model = "Unknown";
-                        break;
-        }
-
-	return 0;
-#else
-	return -1;
-#endif /* CONFIG_MCA_LEGACY */
-}
-
-static int smctr_chg_rx_mask(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err = 0;
-
-        if(smctr_debug > 10)
-		printk(KERN_DEBUG "%s: smctr_chg_rx_mask\n", dev->name);
-
-        smctr_enable_16bit(dev);
-        smctr_set_page(dev, (__u8 *)tp->ram_access);
-
-        if(tp->mode_bits & LOOPING_MODE_MASK)
-                tp->config_word0 |= RX_OWN_BIT;
-        else
-                tp->config_word0 &= ~RX_OWN_BIT;
-
-        if(tp->receive_mask & PROMISCUOUS_MODE)
-                tp->config_word0 |= PROMISCUOUS_BIT;
-        else
-                tp->config_word0 &= ~PROMISCUOUS_BIT;
-
-        if(tp->receive_mask & ACCEPT_ERR_PACKETS)
-                tp->config_word0 |= SAVBAD_BIT;
-        else
-                tp->config_word0 &= ~SAVBAD_BIT;
-
-        if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
-                tp->config_word0 |= RXATMAC;
-        else
-                tp->config_word0 &= ~RXATMAC;
-
-        if(tp->receive_mask & ACCEPT_MULTI_PROM)
-                tp->config_word1 |= MULTICAST_ADDRESS_BIT;
-        else
-                tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
-
-        if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
-                tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
-        else
-        {
-                if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
-                        tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
-                else
-                        tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
-        }
-
-        if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
-                &tp->config_word0)))
-        {
-                return err;
-        }
-
-        if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
-                &tp->config_word1)))
-        {
-                return err;
-        }
-
-        smctr_disable_16bit(dev);
-
-        return 0;
-}
-
-static int smctr_clear_int(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
-
-        return 0;
-}
-
-static int smctr_clear_trc_reset(int ioaddr)
-{
-        __u8 r;
-
-        r = inb(ioaddr + MSR);
-        outb(~MSR_RST & r, ioaddr + MSR);
-
-        return 0;
-}
-
-/*
- * The inverse routine to smctr_open().
- */
-static int smctr_close(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        struct sk_buff *skb;
-        int err;
-
-	netif_stop_queue(dev);
-	
-	tp->cleanup = 1;
-
-        /* Check to see if adapter is already in a closed state. */
-        if(tp->status != OPEN)
-                return 0;
-
-        smctr_enable_16bit(dev);
-        smctr_set_page(dev, (__u8 *)tp->ram_access);
-
-        if((err = smctr_issue_remove_cmd(dev)))
-        {
-                smctr_disable_16bit(dev);
-                return err;
-        }
-
-        for(;;)
-        {
-                skb = skb_dequeue(&tp->SendSkbQueue);
-                if(skb == NULL)
-                        break;
-                tp->QueueSkb++;
-                dev_kfree_skb(skb);
-        }
-
-
-        return 0;
-}
-
-static int smctr_decode_firmware(struct net_device *dev,
-				 const struct firmware *fw)
-{
-        struct net_local *tp = netdev_priv(dev);
-        short bit = 0x80, shift = 12;
-        DECODE_TREE_NODE *tree;
-        short branch, tsize;
-        __u16 buff = 0;
-        long weight;
-        __u8 *ucode;
-        __u16 *mem;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_decode_firmware\n", dev->name);
-
-        weight  = *(long *)(fw->data + WEIGHT_OFFSET);
-        tsize   = *(__u8 *)(fw->data + TREE_SIZE_OFFSET);
-        tree    = (DECODE_TREE_NODE *)(fw->data + TREE_OFFSET);
-        ucode   = (__u8 *)(fw->data + TREE_OFFSET
-                        + (tsize * sizeof(DECODE_TREE_NODE)));
-        mem     = (__u16 *)(tp->ram_access);
-
-        while(weight)
-        {
-                branch = ROOT;
-                while((tree + branch)->tag != LEAF && weight)
-                {
-                        branch = *ucode & bit ? (tree + branch)->llink
-                                : (tree + branch)->rlink;
-
-                        bit >>= 1;
-                        weight--;
-
-                        if(bit == 0)
-                        {
-                                bit = 0x80;
-                                ucode++;
-                        }
-                }
-
-                buff |= (tree + branch)->info << shift;
-                shift -= 4;
-
-                if(shift < 0)
-                {
-                        *(mem++) = SWAP_BYTES(buff);
-                        buff    = 0;
-                        shift   = 12;
-                }
-        }
-
-        /* The following assumes the Control Store Memory has
-         * been initialized to zero. If the last partial word
-         * is zero, it will not be written.
-         */
-        if(buff)
-                *(mem++) = SWAP_BYTES(buff);
-
-        return 0;
-}
-
-static int smctr_disable_16bit(struct net_device *dev)
-{
-        return 0;
-}
-
-/*
- * On Exit, Adapter is:
- * 1. TRC is in a reset state and un-initialized.
- * 2. Adapter memory is enabled.
- * 3. Control Store memory is out of context (-WCSS is 1).
- */
-static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int ioaddr = dev->base_addr;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_disable_adapter_ctrl_store\n", dev->name);
-
-        tp->trc_mask |= CSR_WCSS;
-        outb(tp->trc_mask, ioaddr + CSR);
-
-        return 0;
-}
-
-static int smctr_disable_bic_int(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int ioaddr = dev->base_addr;
-
-        tp->trc_mask = CSR_MSK_ALL | CSR_MSKCBUSY
-	        | CSR_MSKTINT | CSR_WCSS;
-        outb(tp->trc_mask, ioaddr + CSR);
-
-        return 0;
-}
-
-static int smctr_enable_16bit(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        __u8    r;
-
-        if(tp->adapter_bus == BUS_ISA16_TYPE)
-        {
-                r = inb(dev->base_addr + LAAR);
-                outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
-        }
-
-        return 0;
-}
-
-/*
- * To enable the adapter control store memory:
- * 1. Adapter must be in a RESET state.
- * 2. Adapter memory must be enabled.
- * 3. Control Store Memory is in context (-WCSS is 0).
- */
-static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int ioaddr = dev->base_addr;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_enable_adapter_ctrl_store\n", dev->name);
-
-        smctr_set_trc_reset(ioaddr);
-        smctr_enable_adapter_ram(dev);
-
-        tp->trc_mask &= ~CSR_WCSS;
-        outb(tp->trc_mask, ioaddr + CSR);
-
-        return 0;
-}
-
-static int smctr_enable_adapter_ram(struct net_device *dev)
-{
-        int ioaddr = dev->base_addr;
-        __u8 r;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_enable_adapter_ram\n", dev->name);
-
-        r = inb(ioaddr + MSR);
-        outb(MSR_MEMB | r, ioaddr + MSR);
-
-        return 0;
-}
-
-static int smctr_enable_bic_int(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int ioaddr = dev->base_addr;
-        __u8 r;
-
-        switch(tp->bic_type)
-        {
-                case (BIC_584_CHIP):
-                        tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
-                        outb(tp->trc_mask, ioaddr + CSR);
-                        r = inb(ioaddr + IRR);
-                        outb(r | IRR_IEN, ioaddr + IRR);
-                        break;
-
-                case (BIC_594_CHIP):
-                        tp->trc_mask = CSR_MSKCBUSY | CSR_WCSS;
-                        outb(tp->trc_mask, ioaddr + CSR);
-                        r = inb(ioaddr + IMCCR);
-                        outb(r | IMCCR_EIL, ioaddr + IMCCR);
-                        break;
-        }
-
-        return 0;
-}
-
-static int __init smctr_chk_isa(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int ioaddr = dev->base_addr;
-        __u8 r1, r2, b, chksum = 0;
-        __u16 r;
-	int i;
-	int err = -ENODEV;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_chk_isa %#4x\n", dev->name, ioaddr);
-
-	if((ioaddr & 0x1F) != 0)
-                goto out;
-
-        /* Grab the region so that no one else tries to probe our ioports. */
-	if (!request_region(ioaddr, SMCTR_IO_EXTENT, smctr_name)) {
-		err = -EBUSY;
-		goto out;
-	}
-
-        /* Checksum SMC node address */
-        for(i = 0; i < 8; i++)
-        {
-                b = inb(ioaddr + LAR0 + i);
-                chksum += b;
-        }
-
-        if (chksum != NODE_ADDR_CKSUM)
-                goto out2;
-
-        b = inb(ioaddr + BDID);
-	if(b != BRD_ID_8115T)
-        {
-                printk(KERN_ERR "%s: The adapter found is not supported\n", dev->name);
-                goto out2;
-        }
-
-        /* Check for 8115T Board ID */
-        r2 = 0;
-        for(r = 0; r < 8; r++)
-        {
-            r1 = inb(ioaddr + 0x8 + r);
-            r2 += r1;
-        }
-
-        /* value of RegF adds up the sum to 0xFF */
-        if((r2 != 0xFF) && (r2 != 0xEE))
-                goto out2;
-
-        /* Get adapter ID */
-        tp->board_id = smctr_get_boardid(dev, 0);
-        switch(tp->board_id & 0xffff)
-        {
-                case WD8115TA:
-                        smctr_model = "8115T/A";
-                        break;
-
-                case WD8115T:
-			if(tp->extra_info & CHIP_REV_MASK)
-                                smctr_model = "8115T rev XE";
-                        else
-                                smctr_model = "8115T rev XD";
-                        break;
-
-                default:
-                        smctr_model = "Unknown";
-                        break;
-        }
-
-        /* Store BIC type. */
-        tp->bic_type = BIC_584_CHIP;
-        tp->nic_type = NIC_825_CHIP;
-
-        /* Copy Ram Size */
-        tp->ram_usable  = CNFG_SIZE_16KB;
-        tp->ram_size    = CNFG_SIZE_64KB;
-
-        /* Get 58x Ram Base */
-        r1 = inb(ioaddr);
-        r1 &= 0x3F;
-
-        r2 = inb(ioaddr + CNFG_LAAR_584);
-        r2 &= CNFG_LAAR_MASK;
-        r2 <<= 3;
-        r2 |= ((r1 & 0x38) >> 3);
-
-        tp->ram_base = ((__u32)r2 << 16) + (((__u32)(r1 & 0x7)) << 13);
-
-        /* Get 584 Irq */
-        r1 = 0;
-        r1 = inb(ioaddr + CNFG_ICR_583);
-        r1 &= CNFG_ICR_IR2_584;
-
-        r2 = inb(ioaddr + CNFG_IRR_583);
-        r2 &= CNFG_IRR_IRQS;     /* 0x60 */
-        r2 >>= 5;
-
-        switch(r2)
-        {
-                case 0:
-                        if(r1 == 0)
-                                dev->irq = 2;
-                        else
-                                dev->irq = 10;
-                        break;
-
-                case 1:
-                        if(r1 == 0)
-                                dev->irq = 3;
-                        else
-                                dev->irq = 11;
-                        break;
-
-                case 2:
-                        if(r1 == 0)
-                        {
-                                if(tp->extra_info & ALTERNATE_IRQ_BIT)
-                                        dev->irq = 5;
-                                else
-                                        dev->irq = 4;
-                        }
-                        else
-                                dev->irq = 15;
-                        break;
-
-                case 3:
-                        if(r1 == 0)
-                                dev->irq = 7;
-                        else
-                                dev->irq = 4;
-                        break;
-
-                default:
-                        printk(KERN_ERR "%s: No IRQ found aborting\n", dev->name);
-                        goto out2;
-         }
-
-        if (request_irq(dev->irq, smctr_interrupt, IRQF_SHARED, smctr_name, dev))
-                goto out2;
-
-        /* Get 58x Rom Base */
-        r1 = inb(ioaddr + CNFG_BIO_583);
-        r1 &= 0x3E;
-        r1 |= 0x40;
-
-        tp->rom_base = (__u32)r1 << 13;
-
-        /* Get 58x Rom Size */
-        r1 = inb(ioaddr + CNFG_BIO_583);
-        r1 &= 0xC0;
-        if(r1 == 0)
-                tp->rom_size = ROM_DISABLE;
-        else
-        {
-                r1 >>= 6;
-                tp->rom_size = (__u16)CNFG_SIZE_8KB << r1;
-        }
-
-        /* Get 58x Boot Status */
-        r1 = inb(ioaddr + CNFG_GP2);
-
-        tp->mode_bits &= (~BOOT_STATUS_MASK);
-
-        if(r1 & CNFG_GP2_BOOT_NIBBLE)
-                tp->mode_bits |= BOOT_TYPE_1;
-
-        /* Get 58x Zero Wait State */
-        tp->mode_bits &= (~ZERO_WAIT_STATE_MASK);
-
-        r1 = inb(ioaddr + CNFG_IRR_583);
-
-        if(r1 & CNFG_IRR_ZWS)
-                 tp->mode_bits |= ZERO_WAIT_STATE_8_BIT;
-
-        if(tp->board_id & BOARD_16BIT)
-        {
-                r1 = inb(ioaddr + CNFG_LAAR_584);
-
-                if(r1 & CNFG_LAAR_ZWS)
-                        tp->mode_bits |= ZERO_WAIT_STATE_16_BIT;
-        }
-
-        /* Get 584 Media Menu */
-        tp->media_menu = 14;
-        r1 = inb(ioaddr + CNFG_IRR_583);
-
-        tp->mode_bits &= 0xf8ff;       /* (~CNFG_INTERFACE_TYPE_MASK) */
-        if((tp->board_id & TOKEN_MEDIA) == TOKEN_MEDIA)
-        {
-                /* Get Advanced Features */
-                if(((r1 & 0x6) >> 1) == 0x3)
-                        tp->media_type |= MEDIA_UTP_16;
-                else
-                {
-                        if(((r1 & 0x6) >> 1) == 0x2)
-                                tp->media_type |= MEDIA_STP_16;
-                        else
-                        {
-                                if(((r1 & 0x6) >> 1) == 0x1)
-                                        tp->media_type |= MEDIA_UTP_4;
-
-                                else
-                                        tp->media_type |= MEDIA_STP_4;
-                        }
-                }
-
-                r1 = inb(ioaddr + CNFG_GP2);
-                if(!(r1 & 0x2) )           /* GP2_ETRD */
-                        tp->mode_bits |= EARLY_TOKEN_REL;
-
-                /* see if the chip is corrupted
-                if(smctr_read_584_chksum(ioaddr))
-                {
-                        printk(KERN_ERR "%s: EEPROM Checksum Failure\n", dev->name);
-			free_irq(dev->irq, dev);
-                        goto out2;
-                }
-		*/
-        }
-
-        return 0;
-
-out2:
-	release_region(ioaddr, SMCTR_IO_EXTENT);
-out:
-	return err;
-}
-
-static int __init smctr_get_boardid(struct net_device *dev, int mca)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int ioaddr = dev->base_addr;
-        __u8 r, r1, IdByte;
-        __u16 BoardIdMask;
-
-        tp->board_id = BoardIdMask = 0;
-
-	if(mca)
-	{
-		BoardIdMask |= (MICROCHANNEL+INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
-		tp->extra_info |= (INTERFACE_594_CHIP+RAM_SIZE_64K+NIC_825_BIT+ALTERNATE_IRQ_BIT+SLOT_16BIT);
-	}
-	else
-	{
-        	BoardIdMask|=(INTERFACE_CHIP+TOKEN_MEDIA+PAGED_RAM+BOARD_16BIT);
-        	tp->extra_info |= (INTERFACE_584_CHIP + RAM_SIZE_64K
-        	        + NIC_825_BIT + ALTERNATE_IRQ_BIT);
-	}
-
-	if(!mca)
-	{
-        	r = inb(ioaddr + BID_REG_1);
-        	r &= 0x0c;
-       		outb(r, ioaddr + BID_REG_1);
-        	r = inb(ioaddr + BID_REG_1);
-
-        	if(r & BID_SIXTEEN_BIT_BIT)
-        	{
-        	        tp->extra_info |= SLOT_16BIT;
-        	        tp->adapter_bus = BUS_ISA16_TYPE;
-        	}
-        	else
-        	        tp->adapter_bus = BUS_ISA8_TYPE;
-	}
-	else
-		tp->adapter_bus = BUS_MCA_TYPE;
-
-        /* Get Board Id Byte */
-        IdByte = inb(ioaddr + BID_BOARD_ID_BYTE);
-
-        /* if Major version > 1.0 then
-         *      return;
-         */
-        if(IdByte & 0xF8)
-                return -1;
-
-        r1 = inb(ioaddr + BID_REG_1);
-        r1 &= BID_ICR_MASK;
-        r1 |= BID_OTHER_BIT;
-
-        outb(r1, ioaddr + BID_REG_1);
-        r1 = inb(ioaddr + BID_REG_3);
-
-        r1 &= BID_EAR_MASK;
-        r1 |= BID_ENGR_PAGE;
-
-        outb(r1, ioaddr + BID_REG_3);
-        r1 = inb(ioaddr + BID_REG_1);
-        r1 &= BID_ICR_MASK;
-        r1 |= (BID_RLA | BID_OTHER_BIT);
-
-        outb(r1, ioaddr + BID_REG_1);
-
-        r1 = inb(ioaddr + BID_REG_1);
-        while(r1 & BID_RECALL_DONE_MASK)
-                r1 = inb(ioaddr + BID_REG_1);
-
-        r = inb(ioaddr + BID_LAR_0 + BID_REG_6);
-
-        /* clear chip rev bits */
-        tp->extra_info &= ~CHIP_REV_MASK;
-        tp->extra_info |= ((r & BID_EEPROM_CHIP_REV_MASK) << 6);
-
-        r1 = inb(ioaddr + BID_REG_1);
-        r1 &= BID_ICR_MASK;
-        r1 |= BID_OTHER_BIT;
-
-        outb(r1, ioaddr + BID_REG_1);
-        r1 = inb(ioaddr + BID_REG_3);
-
-        r1 &= BID_EAR_MASK;
-        r1 |= BID_EA6;
-
-        outb(r1, ioaddr + BID_REG_3);
-        r1 = inb(ioaddr + BID_REG_1);
-
-        r1 &= BID_ICR_MASK;
-        r1 |= BID_RLA;
-
-        outb(r1, ioaddr + BID_REG_1);
-        r1 = inb(ioaddr + BID_REG_1);
-
-        while(r1 & BID_RECALL_DONE_MASK)
-                r1 = inb(ioaddr + BID_REG_1);
-
-        return BoardIdMask;
-}
-
-static int smctr_get_group_address(struct net_device *dev)
-{
-        smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
-
-        return smctr_wait_cmd(dev);
-}
-
-static int smctr_get_functional_address(struct net_device *dev)
-{
-        smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
-
-        return smctr_wait_cmd(dev);
-}
-
-/* Calculate number of Non-MAC receive BDB's and data buffers.
- * This function must simulate allocateing shared memory exactly
- * as the allocate_shared_memory function above.
- */
-static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int mem_used = 0;
-
-        /* Allocate System Control Blocks. */
-        mem_used += sizeof(SCGBlock);
-
-        mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
-        mem_used += sizeof(SCLBlock);
-
-        mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
-        mem_used += sizeof(ACBlock) * tp->num_acbs;
-
-        mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
-        mem_used += sizeof(ISBlock);
-
-        mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
-        mem_used += MISC_DATA_SIZE;
-
-        /* Allocate transmit FCB's. */
-        mem_used += TO_PARAGRAPH_BOUNDRY(mem_used);
-
-        mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[MAC_QUEUE];
-        mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[NON_MAC_QUEUE];
-        mem_used += sizeof(FCBlock) * tp->num_tx_fcbs[BUG_QUEUE];
-
-        /* Allocate transmit BDBs. */
-        mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[MAC_QUEUE];
-        mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[NON_MAC_QUEUE];
-        mem_used += sizeof(BDBlock) * tp->num_tx_bdbs[BUG_QUEUE];
-
-        /* Allocate receive FCBs. */
-        mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[MAC_QUEUE];
-        mem_used += sizeof(FCBlock) * tp->num_rx_fcbs[NON_MAC_QUEUE];
-
-        /* Allocate receive BDBs. */
-        mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE];
-
-        /* Allocate MAC transmit buffers.
-         * MAC transmit buffers don't have to be on an ODD Boundary.
-         */
-        mem_used += tp->tx_buff_size[MAC_QUEUE];
-
-        /* Allocate BUG transmit buffers. */
-        mem_used += tp->tx_buff_size[BUG_QUEUE];
-
-        /* Allocate MAC receive data buffers.
-         * MAC receive buffers don't have to be on a 256 byte boundary.
-         */
-        mem_used += RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[MAC_QUEUE];
-
-        /* Allocate Non-MAC transmit buffers.
-         * For maximum Netware performance, put Tx Buffers on
-         * ODD Boundary,and then restore malloc to Even Boundrys.
-         */
-        mem_used += 1L;
-        mem_used += tp->tx_buff_size[NON_MAC_QUEUE];
-        mem_used += 1L;
-
-        /* CALCULATE NUMBER OF NON-MAC RX BDB'S
-         * AND NON-MAC RX DATA BUFFERS
-         *
-         * Make sure the mem_used offset at this point is the
-         * same as in allocate_shared memory or the following
-         * boundary adjustment will be incorrect (i.e. not allocating
-         * the non-mac receive buffers above cannot change the 256
-         * byte offset).
-         *
-         * Since this cannot be guaranteed, adding the full 256 bytes
-         * to the amount of shared memory used at this point will guaranteed
-         * that the rx data buffers do not overflow shared memory.
-         */
-        mem_used += 0x100;
-
-        return (0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock));
-}
-
-static int smctr_get_physical_drop_number(struct net_device *dev)
-{
-        smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
-
-        return smctr_wait_cmd(dev);
-}
-
-static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-        BDBlock *bdb;
-
-        bdb = (BDBlock *)((__u32)tp->ram_access
-                + (__u32)(tp->rx_fcb_curr[queue]->trc_bdb_ptr));
-
-        tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
-
-        return (__u8 *)bdb->data_block_ptr;
-}
-
-static int smctr_get_station_id(struct net_device *dev)
-{
-        smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
-
-        return smctr_wait_cmd(dev);
-}
-
-/*
- * Get the current statistics. This may be called with the card open
- * or closed.
- */
-static struct net_device_stats *smctr_get_stats(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        return (struct net_device_stats *)&tp->MacStat;
-}
-
-static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
-        __u16 bytes_count)
-{
-        struct net_local *tp = netdev_priv(dev);
-        FCBlock *pFCB;
-        BDBlock *pbdb;
-        unsigned short alloc_size;
-        unsigned short *temp;
-
-        if(smctr_debug > 20)
-                printk(KERN_DEBUG "smctr_get_tx_fcb\n");
-
-        /* check if there is enough FCB blocks */
-        if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
-                return (FCBlock *)(-1L);
-
-        /* round off the input pkt size to the nearest even number */
-        alloc_size = (bytes_count + 1) & 0xfffe;
-
-        /* check if enough mem */
-        if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
-                return (FCBlock *)(-1L);
-
-        /* check if past the end ;
-         * if exactly enough mem to end of ring, alloc from front.
-         * this avoids update of curr when curr = end
-         */
-        if(((unsigned long)(tp->tx_buff_curr[queue]) + alloc_size)
-                >= (unsigned long)(tp->tx_buff_end[queue]))
-        {
-                /* check if enough memory from ring head */
-                alloc_size = alloc_size +
-                        (__u16)((__u32)tp->tx_buff_end[queue]
-                        - (__u32)tp->tx_buff_curr[queue]);
-
-                if((tp->tx_buff_used[queue] + alloc_size)
-                        > tp->tx_buff_size[queue])
-                {
-                        return (FCBlock *)(-1L);
-                }
-
-                /* ring wrap */
-                tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
-        }
-
-        tp->tx_buff_used[queue] += alloc_size;
-        tp->num_tx_fcbs_used[queue]++;
-        tp->tx_fcb_curr[queue]->frame_length = bytes_count;
-        tp->tx_fcb_curr[queue]->memory_alloc = alloc_size;
-        temp = tp->tx_buff_curr[queue];
-        tp->tx_buff_curr[queue]
-                = (__u16 *)((__u32)temp + (__u32)((bytes_count + 1) & 0xfffe));
-
-        pbdb = tp->tx_fcb_curr[queue]->bdb_ptr;
-        pbdb->buffer_length = bytes_count;
-        pbdb->data_block_ptr = temp;
-        pbdb->trc_data_block_ptr = TRC_POINTER(temp);
-
-        pFCB = tp->tx_fcb_curr[queue];
-        tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
-
-        return pFCB;
-}
-
-static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
-{
-        smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
-
-        return smctr_wait_cmd(dev);
-}
-
-static int smctr_hardware_send_packet(struct net_device *dev,
-        struct net_local *tp)
-{
-        struct tr_statistics *tstat = &tp->MacStat;
-        struct sk_buff *skb;
-        FCBlock *fcb;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name);
-
-        if(tp->status != OPEN)
-                return -1;
-
-        if(tp->monitor_state_ready != 1)
-                return -1;
-
-        for(;;)
-        {
-                /* Send first buffer from queue */
-                skb = skb_dequeue(&tp->SendSkbQueue);
-                if(skb == NULL)
-                        return -1;
-
-                tp->QueueSkb++;
-
-                if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size)
-			return -1;
-
-                smctr_enable_16bit(dev);
-                smctr_set_page(dev, (__u8 *)tp->ram_access);
-
-                if((fcb = smctr_get_tx_fcb(dev, NON_MAC_QUEUE, skb->len))
-                        == (FCBlock *)(-1L))
-                {
-                        smctr_disable_16bit(dev);
-                        return -1;
-                }
-
-                smctr_tx_move_frame(dev, skb,
-                        (__u8 *)fcb->bdb_ptr->data_block_ptr, skb->len);
-
-                smctr_set_page(dev, (__u8 *)fcb);
-
-                smctr_trc_send_packet(dev, fcb, NON_MAC_QUEUE);
-                dev_kfree_skb(skb);
-
-                tstat->tx_packets++;
-
-                smctr_disable_16bit(dev);
-        }
-
-        return 0;
-}
-
-static int smctr_init_acbs(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i;
-        ACBlock *acb;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_init_acbs\n", dev->name);
-
-        acb                     = tp->acb_head;
-        acb->cmd_done_status    = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
-        acb->cmd_info           = ACB_CHAIN_END;
-        acb->cmd                = 0;
-        acb->subcmd             = 0;
-        acb->data_offset_lo     = 0;
-        acb->data_offset_hi     = 0;
-        acb->next_ptr
-                = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
-        acb->trc_next_ptr       = TRC_POINTER(acb->next_ptr);
-
-        for(i = 1; i < tp->num_acbs; i++)
-        {
-                acb             = acb->next_ptr;
-                acb->cmd_done_status
-                        = (ACB_COMMAND_DONE | ACB_COMMAND_SUCCESSFUL);
-                acb->cmd_info = ACB_CHAIN_END;
-                acb->cmd        = 0;
-                acb->subcmd     = 0;
-                acb->data_offset_lo = 0;
-                acb->data_offset_hi = 0;
-                acb->next_ptr
-                        = (ACBlock *)(((char *)acb) + sizeof(ACBlock));
-                acb->trc_next_ptr = TRC_POINTER(acb->next_ptr);
-        }
-
-        acb->next_ptr           = tp->acb_head;
-        acb->trc_next_ptr       = TRC_POINTER(tp->acb_head);
-        tp->acb_next            = tp->acb_head->next_ptr;
-        tp->acb_curr            = tp->acb_head->next_ptr;
-        tp->num_acbs_used       = 0;
-
-        return 0;
-}
-
-static int smctr_init_adapter(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_init_adapter\n", dev->name);
-
-        tp->status              = CLOSED;
-        tp->page_offset_mask    = (tp->ram_usable * 1024) - 1;
-        skb_queue_head_init(&tp->SendSkbQueue);
-        tp->QueueSkb = MAX_TX_QUEUE;
-
-        if(!(tp->group_address_0 & 0x0080))
-                tp->group_address_0 |= 0x00C0;
-
-        if(!(tp->functional_address_0 & 0x00C0))
-                tp->functional_address_0 |= 0x00C0;
-
-        tp->functional_address[0] &= 0xFF7F;
-
-        if(tp->authorized_function_classes == 0)
-                tp->authorized_function_classes = 0x7FFF;
-
-        if(tp->authorized_access_priority == 0)
-                tp->authorized_access_priority = 0x06;
-
-        smctr_disable_bic_int(dev);
-        smctr_set_trc_reset(dev->base_addr);
-
-        smctr_enable_16bit(dev);
-        smctr_set_page(dev, (__u8 *)tp->ram_access);
-
-        if(smctr_checksum_firmware(dev))
-	{
-                printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name);
-		return -ENOENT;
-        }
-
-        if((err = smctr_ram_memory_test(dev)))
-	{
-                printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name);
-                return -EIO;
-        }
-
-	smctr_set_rx_look_ahead(dev);
-        smctr_load_node_addr(dev);
-
-        /* Initialize adapter for Internal Self Test. */
-        smctr_reset_adapter(dev);
-        if((err = smctr_init_card_real(dev)))
-	{
-                printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
-                        dev->name, err);
-                return -EINVAL;
-        }
-
-        /* This routine clobbers the TRC's internal registers. */
-        if((err = smctr_internal_self_test(dev)))
-	{
-                printk(KERN_ERR "%s: Card failed internal self test (%d)\n",
-                        dev->name, err);
-                return -EINVAL;
-        }
-
-        /* Re-Initialize adapter's internal registers */
-        smctr_reset_adapter(dev);
-        if((err = smctr_init_card_real(dev)))
-	{
-                printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
-                        dev->name, err);
-                return -EINVAL;
-        }
-
-        smctr_enable_bic_int(dev);
-
-        if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
-                return err;
-
-        smctr_disable_16bit(dev);
-
-        return 0;
-}
-
-static int smctr_init_card_real(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err = 0;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_init_card_real\n", dev->name);
-
-        tp->sh_mem_used = 0;
-        tp->num_acbs    = NUM_OF_ACBS;
-
-        /* Range Check Max Packet Size */
-        if(tp->max_packet_size < 256)
-                tp->max_packet_size = 256;
-        else
-        {
-                if(tp->max_packet_size > NON_MAC_TX_BUFFER_MEMORY)
-                        tp->max_packet_size = NON_MAC_TX_BUFFER_MEMORY;
-        }
-
-        tp->num_of_tx_buffs = (NON_MAC_TX_BUFFER_MEMORY
-                / tp->max_packet_size) - 1;
-
-        if(tp->num_of_tx_buffs > NUM_NON_MAC_TX_FCBS)
-                tp->num_of_tx_buffs = NUM_NON_MAC_TX_FCBS;
-        else
-        {
-                if(tp->num_of_tx_buffs == 0)
-                        tp->num_of_tx_buffs = 1;
-        }
-
-        /* Tx queue constants */
-        tp->num_tx_fcbs        [BUG_QUEUE]     = NUM_BUG_TX_FCBS;
-        tp->num_tx_bdbs        [BUG_QUEUE]     = NUM_BUG_TX_BDBS;
-        tp->tx_buff_size       [BUG_QUEUE]     = BUG_TX_BUFFER_MEMORY;
-        tp->tx_buff_used       [BUG_QUEUE]     = 0;
-        tp->tx_queue_status    [BUG_QUEUE]     = NOT_TRANSMITING;
-
-        tp->num_tx_fcbs        [MAC_QUEUE]     = NUM_MAC_TX_FCBS;
-        tp->num_tx_bdbs        [MAC_QUEUE]     = NUM_MAC_TX_BDBS;
-        tp->tx_buff_size       [MAC_QUEUE]     = MAC_TX_BUFFER_MEMORY;
-        tp->tx_buff_used       [MAC_QUEUE]     = 0;
-        tp->tx_queue_status    [MAC_QUEUE]     = NOT_TRANSMITING;
-
-        tp->num_tx_fcbs        [NON_MAC_QUEUE] = NUM_NON_MAC_TX_FCBS;
-        tp->num_tx_bdbs        [NON_MAC_QUEUE] = NUM_NON_MAC_TX_BDBS;
-        tp->tx_buff_size       [NON_MAC_QUEUE] = NON_MAC_TX_BUFFER_MEMORY;
-        tp->tx_buff_used       [NON_MAC_QUEUE] = 0;
-        tp->tx_queue_status    [NON_MAC_QUEUE] = NOT_TRANSMITING;
-
-        /* Receive Queue Constants */
-        tp->num_rx_fcbs[MAC_QUEUE] = NUM_MAC_RX_FCBS;
-        tp->num_rx_bdbs[MAC_QUEUE] = NUM_MAC_RX_BDBS;
-
-        if(tp->extra_info & CHIP_REV_MASK)
-                tp->num_rx_fcbs[NON_MAC_QUEUE] = 78;    /* 825 Rev. XE */
-        else
-                tp->num_rx_fcbs[NON_MAC_QUEUE] = 7;     /* 825 Rev. XD */
-
-        tp->num_rx_bdbs[NON_MAC_QUEUE] = smctr_get_num_rx_bdbs(dev);
-
-        smctr_alloc_shared_memory(dev);
-        smctr_init_shared_memory(dev);
-
-        if((err = smctr_issue_init_timers_cmd(dev)))
-                return err;
-
-        if((err = smctr_issue_init_txrx_cmd(dev)))
-	{
-                printk(KERN_ERR "%s: Hardware failure\n", dev->name);
-                return err;
-        }
-
-        return 0;
-}
-
-static int smctr_init_rx_bdbs(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i, j;
-        BDBlock *bdb;
-        __u16 *buf;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_init_rx_bdbs\n", dev->name);
-
-        for(i = 0; i < NUM_RX_QS_USED; i++)
-        {
-                bdb = tp->rx_bdb_head[i];
-                buf = tp->rx_buff_head[i];
-                bdb->info = (BDB_CHAIN_END | BDB_NO_WARNING);
-                bdb->buffer_length = RX_DATA_BUFFER_SIZE;
-                bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
-                bdb->data_block_ptr = buf;
-                bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
-
-                if(i == NON_MAC_QUEUE)
-                        bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
-                else
-                        bdb->trc_data_block_ptr = TRC_POINTER(buf);
-
-                for(j = 1; j < tp->num_rx_bdbs[i]; j++)
-                {
-                        bdb->next_ptr->back_ptr = bdb;
-                        bdb = bdb->next_ptr;
-                        buf = (__u16 *)((char *)buf + RX_DATA_BUFFER_SIZE);
-                        bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
-                        bdb->buffer_length = RX_DATA_BUFFER_SIZE;
-                        bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
-                        bdb->data_block_ptr = buf;
-                        bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
-
-                        if(i == NON_MAC_QUEUE)
-                                bdb->trc_data_block_ptr = RX_BUFF_TRC_POINTER(buf);
-                        else
-                                bdb->trc_data_block_ptr = TRC_POINTER(buf);
-                }
-
-                bdb->next_ptr           = tp->rx_bdb_head[i];
-                bdb->trc_next_ptr       = TRC_POINTER(tp->rx_bdb_head[i]);
-
-                tp->rx_bdb_head[i]->back_ptr    = bdb;
-                tp->rx_bdb_curr[i]              = tp->rx_bdb_head[i]->next_ptr;
-        }
-
-        return 0;
-}
-
-static int smctr_init_rx_fcbs(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i, j;
-        FCBlock *fcb;
-
-        for(i = 0; i < NUM_RX_QS_USED; i++)
-        {
-                fcb               = tp->rx_fcb_head[i];
-                fcb->frame_status = 0;
-                fcb->frame_length = 0;
-                fcb->info         = FCB_CHAIN_END;
-                fcb->next_ptr     = (FCBlock *)(((char*)fcb) + sizeof(FCBlock));
-                if(i == NON_MAC_QUEUE)
-                        fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
-                else
-                        fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
-
-                for(j = 1; j < tp->num_rx_fcbs[i]; j++)
-                {
-                        fcb->next_ptr->back_ptr = fcb;
-                        fcb                     = fcb->next_ptr;
-                        fcb->frame_status       = 0;
-                        fcb->frame_length       = 0;
-                        fcb->info               = FCB_WARNING;
-                        fcb->next_ptr
-                                = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
-
-                        if(i == NON_MAC_QUEUE)
-                                fcb->trc_next_ptr
-                                        = RX_FCB_TRC_POINTER(fcb->next_ptr);
-                        else
-                                fcb->trc_next_ptr
-                                        = TRC_POINTER(fcb->next_ptr);
-                }
-
-                fcb->next_ptr = tp->rx_fcb_head[i];
-
-                if(i == NON_MAC_QUEUE)
-                        fcb->trc_next_ptr = RX_FCB_TRC_POINTER(fcb->next_ptr);
-                else
-                        fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
-
-                tp->rx_fcb_head[i]->back_ptr    = fcb;
-                tp->rx_fcb_curr[i]              = tp->rx_fcb_head[i]->next_ptr;
-        }
-
-        return 0;
-}
-
-static int smctr_init_shared_memory(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i;
-        __u32 *iscpb;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_init_shared_memory\n", dev->name);
-
-        smctr_set_page(dev, (__u8 *)(unsigned int)tp->iscpb_ptr);
-
-        /* Initialize Initial System Configuration Point. (ISCP) */
-        iscpb = (__u32 *)PAGE_POINTER(&tp->iscpb_ptr->trc_scgb_ptr);
-        *iscpb = (__u32)(SWAP_WORDS(TRC_POINTER(tp->scgb_ptr)));
-
-        smctr_set_page(dev, (__u8 *)tp->ram_access);
-
-        /* Initialize System Configuration Pointers. (SCP) */
-        tp->scgb_ptr->config = (SCGB_ADDRESS_POINTER_FORMAT
-                | SCGB_MULTI_WORD_CONTROL | SCGB_DATA_FORMAT
-                | SCGB_BURST_LENGTH);
-
-        tp->scgb_ptr->trc_sclb_ptr      = TRC_POINTER(tp->sclb_ptr);
-        tp->scgb_ptr->trc_acb_ptr       = TRC_POINTER(tp->acb_head);
-        tp->scgb_ptr->trc_isb_ptr       = TRC_POINTER(tp->isb_ptr);
-        tp->scgb_ptr->isbsiz            = (sizeof(ISBlock)) - 2;
-
-        /* Initialize System Control Block. (SCB) */
-        tp->sclb_ptr->valid_command    = SCLB_VALID | SCLB_CMD_NOP;
-        tp->sclb_ptr->iack_code        = 0;
-        tp->sclb_ptr->resume_control   = 0;
-        tp->sclb_ptr->int_mask_control = 0;
-        tp->sclb_ptr->int_mask_state   = 0;
-
-        /* Initialize Interrupt Status Block. (ISB) */
-        for(i = 0; i < NUM_OF_INTERRUPTS; i++)
-        {
-                tp->isb_ptr->IStatus[i].IType = 0xf0;
-                tp->isb_ptr->IStatus[i].ISubtype = 0;
-        }
-
-        tp->current_isb_index = 0;
-
-        /* Initialize Action Command Block. (ACB) */
-        smctr_init_acbs(dev);
-
-        /* Initialize transmit FCB's and BDB's. */
-        smctr_link_tx_fcbs_to_bdbs(dev);
-        smctr_init_tx_bdbs(dev);
-        smctr_init_tx_fcbs(dev);
-
-        /* Initialize receive FCB's and BDB's. */
-        smctr_init_rx_bdbs(dev);
-        smctr_init_rx_fcbs(dev);
-
-        return 0;
-}
-
-static int smctr_init_tx_bdbs(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i, j;
-        BDBlock *bdb;
-
-        for(i = 0; i < NUM_TX_QS_USED; i++)
-        {
-                bdb = tp->tx_bdb_head[i];
-                bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
-                bdb->next_ptr = (BDBlock *)(((char *)bdb) + sizeof(BDBlock));
-                bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
-
-                for(j = 1; j < tp->num_tx_bdbs[i]; j++)
-                {
-                        bdb->next_ptr->back_ptr = bdb;
-                        bdb = bdb->next_ptr;
-                        bdb->info = (BDB_NOT_CHAIN_END | BDB_NO_WARNING);
-                        bdb->next_ptr
-                                = (BDBlock *)(((char *)bdb) + sizeof( BDBlock));                        bdb->trc_next_ptr = TRC_POINTER(bdb->next_ptr);
-                }
-
-                bdb->next_ptr = tp->tx_bdb_head[i];
-                bdb->trc_next_ptr = TRC_POINTER(tp->tx_bdb_head[i]);
-                tp->tx_bdb_head[i]->back_ptr = bdb;
-        }
-
-        return 0;
-}
-
-static int smctr_init_tx_fcbs(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i, j;
-        FCBlock *fcb;
-
-        for(i = 0; i < NUM_TX_QS_USED; i++)
-        {
-                fcb               = tp->tx_fcb_head[i];
-                fcb->frame_status = 0;
-                fcb->frame_length = 0;
-                fcb->info         = FCB_CHAIN_END;
-                fcb->next_ptr = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
-                fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
-
-                for(j = 1; j < tp->num_tx_fcbs[i]; j++)
-                {
-                        fcb->next_ptr->back_ptr = fcb;
-                        fcb                     = fcb->next_ptr;
-                        fcb->frame_status       = 0;
-                        fcb->frame_length       = 0;
-                        fcb->info               = FCB_CHAIN_END;
-                        fcb->next_ptr
-                                = (FCBlock *)(((char *)fcb) + sizeof(FCBlock));
-                        fcb->trc_next_ptr = TRC_POINTER(fcb->next_ptr);
-                }
-
-                fcb->next_ptr           = tp->tx_fcb_head[i];
-                fcb->trc_next_ptr       = TRC_POINTER(tp->tx_fcb_head[i]);
-
-                tp->tx_fcb_head[i]->back_ptr    = fcb;
-                tp->tx_fcb_end[i]               = tp->tx_fcb_head[i]->next_ptr;
-                tp->tx_fcb_curr[i]              = tp->tx_fcb_head[i]->next_ptr;
-                tp->num_tx_fcbs_used[i]         = 0;
-        }
-
-        return 0;
-}
-
-static int smctr_internal_self_test(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err;
-
-        if((err = smctr_issue_test_internal_rom_cmd(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        if(tp->acb_head->cmd_done_status & 0xff)
-                return -1;
-
-        if((err = smctr_issue_test_hic_cmd(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        if(tp->acb_head->cmd_done_status & 0xff)
-                return -1;
-
-        if((err = smctr_issue_test_mac_reg_cmd(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        if(tp->acb_head->cmd_done_status & 0xff)
-                return -1;
-
-        return 0;
-}
-
-/*
- * The typical workload of the driver: Handle the network interface interrupts.
- */
-static irqreturn_t smctr_interrupt(int irq, void *dev_id)
-{
-        struct net_device *dev = dev_id;
-        struct net_local *tp;
-        int ioaddr;
-        __u16 interrupt_unmask_bits = 0, interrupt_ack_code = 0xff00;
-        __u16 err1, err = NOT_MY_INTERRUPT;
-        __u8 isb_type, isb_subtype;
-        __u16 isb_index;
-
-        ioaddr = dev->base_addr;
-        tp = netdev_priv(dev);
-
-        if(tp->status == NOT_INITIALIZED)
-                return IRQ_NONE;
-
-        spin_lock(&tp->lock);
-        
-        smctr_disable_bic_int(dev);
-        smctr_enable_16bit(dev);
-
-        smctr_clear_int(dev);
-
-        /* First read the LSB */
-        while((tp->isb_ptr->IStatus[tp->current_isb_index].IType & 0xf0) == 0)
-        {
-                isb_index       = tp->current_isb_index;
-                isb_type        = tp->isb_ptr->IStatus[isb_index].IType;
-                isb_subtype     = tp->isb_ptr->IStatus[isb_index].ISubtype;
-
-                (tp->current_isb_index)++;
-                if(tp->current_isb_index == NUM_OF_INTERRUPTS)
-                        tp->current_isb_index = 0;
-
-                if(isb_type >= 0x10)
-                {
-                        smctr_disable_16bit(dev);
-		        spin_unlock(&tp->lock);
-                        return IRQ_HANDLED;
-                }
-
-                err = HARDWARE_FAILED;
-                interrupt_ack_code = isb_index;
-                tp->isb_ptr->IStatus[isb_index].IType |= 0xf0;
-
-                interrupt_unmask_bits |= (1 << (__u16)isb_type);
-
-                switch(isb_type)
-                {
-                        case ISB_IMC_MAC_TYPE_3:
-                                smctr_disable_16bit(dev);
-
-                                switch(isb_subtype)
-                                {
-                                        case 0:
-                                                tp->monitor_state = MS_MONITOR_FSM_INACTIVE;
-                                               break;
-
-                                        case 1:
-                                                tp->monitor_state = MS_REPEAT_BEACON_STATE;
-                                                break;
-
-                                        case 2:
-                                                tp->monitor_state = MS_REPEAT_CLAIM_TOKEN_STATE;
-                                                break;
-
-                                        case 3:
-                                                tp->monitor_state = MS_TRANSMIT_CLAIM_TOKEN_STATE;                                                break;
-
-                                        case 4:
-                                                tp->monitor_state = MS_STANDBY_MONITOR_STATE;
-                                                break;
-
-                                        case 5:
-                                                tp->monitor_state = MS_TRANSMIT_BEACON_STATE;
-                                                break;
-
-                                        case 6:
-                                                tp->monitor_state = MS_ACTIVE_MONITOR_STATE;
-                                                break;
-
-                                        case 7:
-                                                tp->monitor_state = MS_TRANSMIT_RING_PURGE_STATE;
-                                                break;
-
-                                        case 8:   /* diagnostic state */
-                                                break;
-
-                                        case 9:
-                                                tp->monitor_state = MS_BEACON_TEST_STATE;
-                                                if(smctr_lobe_media_test(dev))
-                                                {
-                                                        tp->ring_status_flags = RING_STATUS_CHANGED;
-                                                        tp->ring_status = AUTO_REMOVAL_ERROR;
-                                                        smctr_ring_status_chg(dev);
-                                                        smctr_bypass_state(dev);
-                                                }
-                                                else
-                                                        smctr_issue_insert_cmd(dev);
-                                                break;
-
-                                        /* case 0x0a-0xff, illegal states */
-                                        default:
-                                                break;
-                                }
-
-                                tp->ring_status_flags = MONITOR_STATE_CHANGED;
-                                err = smctr_ring_status_chg(dev);
-
-                                smctr_enable_16bit(dev);
-                                break;
-
-                        /* Type 0x02 - MAC Error Counters Interrupt
-                         * One or more MAC Error Counter is half full
-                         *      MAC Error Counters
-                         *      Lost_FR_Error_Counter
-                         *      RCV_Congestion_Counter
-                         *      FR_copied_Error_Counter
-                         *      FREQ_Error_Counter
-                         *      Token_Error_Counter
-                         *      Line_Error_Counter
-                         *      Internal_Error_Count
-                         */
-                        case ISB_IMC_MAC_ERROR_COUNTERS:
-                                /* Read 802.5 Error Counters */
-                                err = smctr_issue_read_ring_status_cmd(dev);
-                                break;
-
-                        /* Type 0x04 - MAC Type 2 Interrupt
-                         * HOST needs to enqueue MAC Frame for transmission
-                         * SubType Bit 15 - RQ_INIT_PDU( Request Initialization)                         * Changed from RQ_INIT_PDU to
-                         * TRC_Status_Changed_Indicate
-                         */
-                        case ISB_IMC_MAC_TYPE_2:
-                                err = smctr_issue_read_ring_status_cmd(dev);
-                                break;
-
-
-                        /* Type 0x05 - TX Frame Interrupt (FI). */
-                        case ISB_IMC_TX_FRAME:
-                                /* BUG QUEUE for TRC stuck receive BUG */
-                                if(isb_subtype & TX_PENDING_PRIORITY_2)
-                                {
-                                        if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS)
-                                                break;
-                                }
-
-                                /* NON-MAC frames only */
-                                if(isb_subtype & TX_PENDING_PRIORITY_1)
-                                {
-                                        if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS)
-                                                break;
-                                }
-
-                                /* MAC frames only */
-                                if(isb_subtype & TX_PENDING_PRIORITY_0)
-                                        err = smctr_tx_complete(dev, MAC_QUEUE);                                break;
-
-                        /* Type 0x06 - TX END OF QUEUE (FE) */
-                        case ISB_IMC_END_OF_TX_QUEUE:
-                                /* BUG queue */
-                                if(isb_subtype & TX_PENDING_PRIORITY_2)
-                                {
-                                        /* ok to clear Receive FIFO overrun
-                                         * imask send_BUG now completes.
-                                         */
-                                        interrupt_unmask_bits |= 0x800;
-
-                                        tp->tx_queue_status[BUG_QUEUE] = NOT_TRANSMITING;
-                                        if((err = smctr_tx_complete(dev, BUG_QUEUE)) != SUCCESS)
-                                                break;
-                                        if((err = smctr_restart_tx_chain(dev, BUG_QUEUE)) != SUCCESS)
-                                                break;
-                                }
-
-                                /* NON-MAC queue only */
-                                if(isb_subtype & TX_PENDING_PRIORITY_1)
-                                {
-                                        tp->tx_queue_status[NON_MAC_QUEUE] = NOT_TRANSMITING;
-                                        if((err = smctr_tx_complete(dev, NON_MAC_QUEUE)) != SUCCESS)
-                                                break;
-                                        if((err = smctr_restart_tx_chain(dev, NON_MAC_QUEUE)) != SUCCESS)
-                                                break;
-                                }
-
-                                /* MAC queue only */
-                                if(isb_subtype & TX_PENDING_PRIORITY_0)
-                                {
-                                        tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
-                                        if((err = smctr_tx_complete(dev, MAC_QUEUE)) != SUCCESS)
-                                                break;
-
-                                        err = smctr_restart_tx_chain(dev, MAC_QUEUE);
-                                }
-                                break;
-
-                        /* Type 0x07 - NON-MAC RX Resource Interrupt
-                         *   Subtype bit 12 - (BW) BDB warning
-                         *   Subtype bit 13 - (FW) FCB warning
-                         *   Subtype bit 14 - (BE) BDB End of chain
-                         *   Subtype bit 15 - (FE) FCB End of chain
-                         */
-                        case ISB_IMC_NON_MAC_RX_RESOURCE:
-                                tp->rx_fifo_overrun_count = 0;
-                                tp->receive_queue_number = NON_MAC_QUEUE;
-                                err1 = smctr_rx_frame(dev);
-
-                                if(isb_subtype & NON_MAC_RX_RESOURCE_FE)
-                                {
-                                        if((err = smctr_issue_resume_rx_fcb_cmd(                                                dev, NON_MAC_QUEUE)) != SUCCESS)                                                break;
-
-                                        if(tp->ptr_rx_fcb_overruns)
-                                                (*tp->ptr_rx_fcb_overruns)++;
-                                }
-
-                                if(isb_subtype & NON_MAC_RX_RESOURCE_BE)
-                                {
-                                        if((err = smctr_issue_resume_rx_bdb_cmd(                                                dev, NON_MAC_QUEUE)) != SUCCESS)                                                break;
-
-                                        if(tp->ptr_rx_bdb_overruns)
-                                                (*tp->ptr_rx_bdb_overruns)++;
-                                }
-                                err = err1;
-                                break;
-
-                        /* Type 0x08 - MAC RX Resource Interrupt
-                         *   Subtype bit 12 - (BW) BDB warning
-                         *   Subtype bit 13 - (FW) FCB warning
-                         *   Subtype bit 14 - (BE) BDB End of chain
-                         *   Subtype bit 15 - (FE) FCB End of chain
-                         */
-                        case ISB_IMC_MAC_RX_RESOURCE:
-                                tp->receive_queue_number = MAC_QUEUE;
-                                err1 = smctr_rx_frame(dev);
-
-                                if(isb_subtype & MAC_RX_RESOURCE_FE)
-                                {
-                                        if((err = smctr_issue_resume_rx_fcb_cmd(                                                dev, MAC_QUEUE)) != SUCCESS)
-                                                break;
-
-                                        if(tp->ptr_rx_fcb_overruns)
-                                                (*tp->ptr_rx_fcb_overruns)++;
-                                }
-
-                                if(isb_subtype & MAC_RX_RESOURCE_BE)
-                                {
-                                        if((err = smctr_issue_resume_rx_bdb_cmd(                                                dev, MAC_QUEUE)) != SUCCESS)
-                                                break;
-
-                                        if(tp->ptr_rx_bdb_overruns)
-                                                (*tp->ptr_rx_bdb_overruns)++;
-                                }
-                                err = err1;
-                                break;
-
-                        /* Type 0x09 - NON_MAC RX Frame Interrupt */
-                        case ISB_IMC_NON_MAC_RX_FRAME:
-                                tp->rx_fifo_overrun_count = 0;
-                                tp->receive_queue_number = NON_MAC_QUEUE;
-                                err = smctr_rx_frame(dev);
-                                break;
-
-                        /* Type 0x0A - MAC RX Frame Interrupt */
-                        case ISB_IMC_MAC_RX_FRAME:
-                                tp->receive_queue_number = MAC_QUEUE;
-                                err = smctr_rx_frame(dev);
-                                break;
-
-                        /* Type 0x0B - TRC status
-                         * TRC has encountered an error condition
-                         * subtype bit 14 - transmit FIFO underrun
-                         * subtype bit 15 - receive FIFO overrun
-                         */
-                        case ISB_IMC_TRC_FIFO_STATUS:
-                                if(isb_subtype & TRC_FIFO_STATUS_TX_UNDERRUN)
-                                {
-                                        if(tp->ptr_tx_fifo_underruns)
-                                                (*tp->ptr_tx_fifo_underruns)++;
-                                }
-
-                                if(isb_subtype & TRC_FIFO_STATUS_RX_OVERRUN)
-                                {
-                                        /* update overrun stuck receive counter
-                                         * if >= 3, has to clear it by sending
-                                         * back to back frames. We pick
-                                         * DAT(duplicate address MAC frame)
-                                         */
-                                        tp->rx_fifo_overrun_count++;
-
-                                        if(tp->rx_fifo_overrun_count >= 3)
-                                        {
-                                                tp->rx_fifo_overrun_count = 0;
-
-                                                /* delay clearing fifo overrun
-                                                 * imask till send_BUG tx
-                                                 * complete posted
-                                                 */
-                                                interrupt_unmask_bits &= (~0x800);
-                                                printk(KERN_CRIT "Jay please send bug\n");//                                              smctr_send_bug(dev);
-                                        }
-
-                                        if(tp->ptr_rx_fifo_overruns)
-                                                (*tp->ptr_rx_fifo_overruns)++;
-                                }
-
-                                err = SUCCESS;
-                                break;
-
-                        /* Type 0x0C - Action Command Status Interrupt
-                         * Subtype bit 14 - CB end of command chain (CE)
-                         * Subtype bit 15 - CB command interrupt (CI)
-                         */
-                        case ISB_IMC_COMMAND_STATUS:
-                                err = SUCCESS;
-                                if(tp->acb_head->cmd == ACB_CMD_HIC_NOP)
-                                {
-                                        printk(KERN_ERR "i1\n");
-                                        smctr_disable_16bit(dev);
-
-                                        /* XXXXXXXXXXXXXXXXX */
-                                /*      err = UM_Interrupt(dev); */
-
-                                        smctr_enable_16bit(dev);
-                                }
-                                else
-                                {
-                                        if((tp->acb_head->cmd
-					    == ACB_CMD_READ_TRC_STATUS) &&
-					   (tp->acb_head->subcmd
-					    == RW_TRC_STATUS_BLOCK))
-                                        {
-                                                if(tp->ptr_bcn_type)
-                                                {
-                                                        *(tp->ptr_bcn_type)
-                                                                = (__u32)((SBlock *)tp->misc_command_data)->BCN_Type;
-                                                }
-
-                                                if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & ERROR_COUNTERS_CHANGED)
-                                                {
-                                                        smctr_update_err_stats(dev);
-                                                }
-
-                                                if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & TI_NDIS_RING_STATUS_CHANGED)
-                                                {
-                                                        tp->ring_status
-                                                                = ((SBlock*)tp->misc_command_data)->TI_NDIS_Ring_Status;
-                                                        smctr_disable_16bit(dev);
-                                                        err = smctr_ring_status_chg(dev);
-                                                        smctr_enable_16bit(dev);
-                                                        if((tp->ring_status & REMOVE_RECEIVED) &&
-							   (tp->config_word0 & NO_AUTOREMOVE))
-                                                        {
-                                                                smctr_issue_remove_cmd(dev);
-                                                        }
-
-                                                        if(err != SUCCESS)
-                                                        {
-                                                                tp->acb_pending = 0;
-                                                                break;
-                                                        }
-                                                }
-
-                                                if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & UNA_CHANGED)
-                                                {
-                                                        if(tp->ptr_una)
-                                                        {
-                                                                tp->ptr_una[0] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[0]);
-                                                                tp->ptr_una[1] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[1]);
-                                                                tp->ptr_una[2] = SWAP_BYTES(((SBlock *)tp->misc_command_data)->UNA[2]);
-                                                        }
-
-                                                }
-
-                                                if(((SBlock *)tp->misc_command_data)->Status_CHG_Indicate & READY_TO_SEND_RQ_INIT)                                                {
-                                                        err = smctr_send_rq_init(dev);
-                                                }
-                                        }
-                                }
-
-                                tp->acb_pending = 0;
-                                break;
-
-                        /* Type 0x0D - MAC Type 1 interrupt
-                         * Subtype -- 00 FR_BCN received at S12
-                         *            01 FR_BCN received at S21
-                         *            02 FR_DAT(DA=MA, A<>0) received at S21
-                         *            03 TSM_EXP at S21
-                         *            04 FR_REMOVE received at S42
-                         *            05 TBR_EXP, BR_FLAG_SET at S42
-                         *            06 TBT_EXP at S53
-                         */
-                        case ISB_IMC_MAC_TYPE_1:
-                                if(isb_subtype > 8)
-                                {
-                                        err = HARDWARE_FAILED;
-                                        break;
-                                }
-
-                                err = SUCCESS;
-                                switch(isb_subtype)
-                                {
-                                        case 0:
-                                                tp->join_state = JS_BYPASS_STATE;
-                                                if(tp->status != CLOSED)
-                                                {
-                                                        tp->status = CLOSED;
-                                                        err = smctr_status_chg(dev);
-                                                }
-                                                break;
-
-                                        case 1:
-                                                tp->join_state = JS_LOBE_TEST_STATE;
-                                                break;
-
-                                        case 2:
-                                                tp->join_state = JS_DETECT_MONITOR_PRESENT_STATE;
-                                                break;
-
-                                        case 3:
-                                                tp->join_state = JS_AWAIT_NEW_MONITOR_STATE;
-                                                break;
-
-                                        case 4:
-                                                tp->join_state = JS_DUPLICATE_ADDRESS_TEST_STATE;
-                                                break;
-
-                                        case 5:
-                                                tp->join_state = JS_NEIGHBOR_NOTIFICATION_STATE;
-                                                break;
-
-                                        case 6:
-                                                tp->join_state = JS_REQUEST_INITIALIZATION_STATE;
-                                                break;
-
-                                        case 7:
-                                                tp->join_state = JS_JOIN_COMPLETE_STATE;
-                                                tp->status = OPEN;
-                                                err = smctr_status_chg(dev);
-                                                break;
-
-                                        case 8:
-                                                tp->join_state = JS_BYPASS_WAIT_STATE;
-                                                break;
-                                }
-                                break ;
-
-                        /* Type 0x0E - TRC Initialization Sequence Interrupt
-                         * Subtype -- 00-FF Initializatin sequence complete
-                         */
-                        case ISB_IMC_TRC_INTRNL_TST_STATUS:
-                                tp->status = INITIALIZED;
-                                smctr_disable_16bit(dev);
-                                err = smctr_status_chg(dev);
-                                smctr_enable_16bit(dev);
-                                break;
-
-                        /* other interrupt types, illegal */
-                        default:
-                                break;
-                }
-
-                if(err != SUCCESS)
-                        break;
-        }
-
-        /* Checking the ack code instead of the unmask bits here is because :
-         * while fixing the stuck receive, DAT frame are sent and mask off
-         * FIFO overrun interrupt temporarily (interrupt_unmask_bits = 0)
-         * but we still want to issue ack to ISB
-         */
-        if(!(interrupt_ack_code & 0xff00))
-                smctr_issue_int_ack(dev, interrupt_ack_code, interrupt_unmask_bits);
-
-        smctr_disable_16bit(dev);
-        smctr_enable_bic_int(dev);
-        spin_unlock(&tp->lock);
-
-        return IRQ_HANDLED;
-}
-
-static int smctr_issue_enable_int_cmd(struct net_device *dev,
-        __u16 interrupt_enable_mask)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        tp->sclb_ptr->int_mask_control  = interrupt_enable_mask;
-        tp->sclb_ptr->valid_command     = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
-
-        smctr_set_ctrl_attention(dev);
-
-        return 0;
-}
-
-static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(smctr_wait_while_cbusy(dev))
-                return -1;
-
-        tp->sclb_ptr->int_mask_control = ibits;
-        tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */        tp->sclb_ptr->resume_control = 0;
-        tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_IACK_CODE_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
-
-        smctr_set_ctrl_attention(dev);
-
-        return 0;
-}
-
-static int smctr_issue_init_timers_cmd(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i;
-        int err;
-        __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
-        tp->config_word1 = 0;
-
-        if((tp->media_type == MEDIA_STP_16) ||
-	   (tp->media_type == MEDIA_UTP_16) ||
-	   (tp->media_type == MEDIA_STP_16_UTP_16))
-        {
-                tp->config_word0 |= FREQ_16MB_BIT;
-        }
-
-        if(tp->mode_bits & EARLY_TOKEN_REL)
-                tp->config_word0 |= ETREN;
-
-        if(tp->mode_bits & LOOPING_MODE_MASK)
-                tp->config_word0 |= RX_OWN_BIT;
-        else
-                tp->config_word0 &= ~RX_OWN_BIT;
-
-        if(tp->receive_mask & PROMISCUOUS_MODE)
-                tp->config_word0 |= PROMISCUOUS_BIT;
-        else
-                tp->config_word0 &= ~PROMISCUOUS_BIT;
-
-        if(tp->receive_mask & ACCEPT_ERR_PACKETS)
-                tp->config_word0 |= SAVBAD_BIT;
-        else
-                tp->config_word0 &= ~SAVBAD_BIT;
-
-        if(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES)
-                tp->config_word0 |= RXATMAC;
-        else
-                tp->config_word0 &= ~RXATMAC;
-
-        if(tp->receive_mask & ACCEPT_MULTI_PROM)
-                tp->config_word1 |= MULTICAST_ADDRESS_BIT;
-        else
-                tp->config_word1 &= ~MULTICAST_ADDRESS_BIT;
-
-        if(tp->receive_mask & ACCEPT_SOURCE_ROUTING_SPANNING)
-                tp->config_word1 |= SOURCE_ROUTING_SPANNING_BITS;
-        else
-        {
-                if(tp->receive_mask & ACCEPT_SOURCE_ROUTING)
-                        tp->config_word1 |= SOURCE_ROUTING_EXPLORER_BIT;
-                else
-                        tp->config_word1 &= ~SOURCE_ROUTING_SPANNING_BITS;
-        }
-
-        if((tp->media_type == MEDIA_STP_16) ||
-	   (tp->media_type == MEDIA_UTP_16) ||
-	   (tp->media_type == MEDIA_STP_16_UTP_16))
-        {
-                tp->config_word1 |= INTERFRAME_SPACING_16;
-        }
-        else
-                tp->config_word1 |= INTERFRAME_SPACING_4;
-
-        *pTimer_Struc++ = tp->config_word0;
-        *pTimer_Struc++ = tp->config_word1;
-
-        if((tp->media_type == MEDIA_STP_4) ||
-	   (tp->media_type == MEDIA_UTP_4) ||
-	   (tp->media_type == MEDIA_STP_4_UTP_4))
-        {
-                *pTimer_Struc++ = 0x00FA;       /* prescale */
-                *pTimer_Struc++ = 0x2710;       /* TPT_limit */
-                *pTimer_Struc++ = 0x2710;       /* TQP_limit */
-                *pTimer_Struc++ = 0x0A28;       /* TNT_limit */
-                *pTimer_Struc++ = 0x3E80;       /* TBT_limit */
-                *pTimer_Struc++ = 0x3A98;       /* TSM_limit */
-                *pTimer_Struc++ = 0x1B58;       /* TAM_limit */
-                *pTimer_Struc++ = 0x00C8;       /* TBR_limit */
-                *pTimer_Struc++ = 0x07D0;       /* TER_limit */
-                *pTimer_Struc++ = 0x000A;       /* TGT_limit */
-                *pTimer_Struc++ = 0x1162;       /* THT_limit */
-                *pTimer_Struc++ = 0x07D0;       /* TRR_limit */
-                *pTimer_Struc++ = 0x1388;       /* TVX_limit */
-                *pTimer_Struc++ = 0x0000;       /* reserved */
-        }
-        else
-        {
-                *pTimer_Struc++ = 0x03E8;       /* prescale */
-                *pTimer_Struc++ = 0x9C40;       /* TPT_limit */
-                *pTimer_Struc++ = 0x9C40;       /* TQP_limit */
-                *pTimer_Struc++ = 0x0A28;       /* TNT_limit */
-                *pTimer_Struc++ = 0x3E80;       /* TBT_limit */
-                *pTimer_Struc++ = 0x3A98;       /* TSM_limit */
-                *pTimer_Struc++ = 0x1B58;       /* TAM_limit */
-                *pTimer_Struc++ = 0x00C8;       /* TBR_limit */
-                *pTimer_Struc++ = 0x07D0;       /* TER_limit */
-                *pTimer_Struc++ = 0x000A;       /* TGT_limit */
-                *pTimer_Struc++ = 0x4588;       /* THT_limit */
-                *pTimer_Struc++ = 0x1F40;       /* TRR_limit */
-                *pTimer_Struc++ = 0x4E20;       /* TVX_limit */
-                *pTimer_Struc++ = 0x0000;       /* reserved */
-        }
-
-        /* Set node address. */
-        *pTimer_Struc++ = dev->dev_addr[0] << 8
-                | (dev->dev_addr[1] & 0xFF);
-        *pTimer_Struc++ = dev->dev_addr[2] << 8
-                | (dev->dev_addr[3] & 0xFF);
-        *pTimer_Struc++ = dev->dev_addr[4] << 8
-                | (dev->dev_addr[5] & 0xFF);
-
-        /* Set group address. */
-        *pTimer_Struc++ = tp->group_address_0 << 8
-                | tp->group_address_0 >> 8;
-        *pTimer_Struc++ = tp->group_address[0] << 8
-                | tp->group_address[0] >> 8;
-        *pTimer_Struc++ = tp->group_address[1] << 8
-                | tp->group_address[1] >> 8;
-
-        /* Set functional address. */
-        *pTimer_Struc++ = tp->functional_address_0 << 8
-                | tp->functional_address_0 >> 8;
-        *pTimer_Struc++ = tp->functional_address[0] << 8
-                | tp->functional_address[0] >> 8;
-        *pTimer_Struc++ = tp->functional_address[1] << 8
-                | tp->functional_address[1] >> 8;
-
-        /* Set Bit-Wise group address. */
-        *pTimer_Struc++ = tp->bitwise_group_address[0] << 8
-                | tp->bitwise_group_address[0] >> 8;
-        *pTimer_Struc++ = tp->bitwise_group_address[1] << 8
-                | tp->bitwise_group_address[1] >> 8;
-
-        /* Set ring number address. */
-        *pTimer_Struc++ = tp->source_ring_number;
-        *pTimer_Struc++ = tp->target_ring_number;
-
-        /* Physical drop number. */
-        *pTimer_Struc++ = (unsigned short)0;
-        *pTimer_Struc++ = (unsigned short)0;
-
-        /* Product instance ID. */
-        for(i = 0; i < 9; i++)
-                *pTimer_Struc++ = (unsigned short)0;
-
-        err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
-
-        return err;
-}
-
-static int smctr_issue_init_txrx_cmd(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i;
-        int err;
-        void **txrx_ptrs = (void *)tp->misc_command_data;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-	{
-                printk(KERN_ERR "%s: Hardware failure\n", dev->name);
-                return err;
-        }
-
-        /* Initialize Transmit Queue Pointers that are used, to point to
-         * a single FCB.
-         */
-        for(i = 0; i < NUM_TX_QS_USED; i++)
-                *txrx_ptrs++ = (void *)TRC_POINTER(tp->tx_fcb_head[i]);
-
-        /* Initialize Transmit Queue Pointers that are NOT used to ZERO. */
-        for(; i < MAX_TX_QS; i++)
-                *txrx_ptrs++ = (void *)0;
-
-        /* Initialize Receive Queue Pointers (MAC and Non-MAC) that are
-         * used, to point to a single FCB and a BDB chain of buffers.
-         */
-        for(i = 0; i < NUM_RX_QS_USED; i++)
-        {
-                *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_fcb_head[i]);
-                *txrx_ptrs++ = (void *)TRC_POINTER(tp->rx_bdb_head[i]);
-        }
-
-        /* Initialize Receive Queue Pointers that are NOT used to ZERO. */
-        for(; i < MAX_RX_QS; i++)
-        {
-                *txrx_ptrs++ = (void *)0;
-                *txrx_ptrs++ = (void *)0;
-        }
-
-        err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
-
-        return err;
-}
-
-static int smctr_issue_insert_cmd(struct net_device *dev)
-{
-        int err;
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
-
-        return err;
-}
-
-static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
-{
-        int err;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
-                RW_TRC_STATUS_BLOCK);
-
-        return err;
-}
-
-static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
-{
-        int err;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
-                aword_cnt);
-
-        return err;
-}
-
-static int smctr_issue_remove_cmd(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        tp->sclb_ptr->resume_control    = 0;
-        tp->sclb_ptr->valid_command     = SCLB_VALID | SCLB_CMD_REMOVE;
-
-        smctr_set_ctrl_attention(dev);
-
-        return 0;
-}
-
-static int smctr_issue_resume_acb_cmd(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        tp->sclb_ptr->resume_control = SCLB_RC_ACB;
-        tp->sclb_ptr->valid_command  = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
-
-        tp->acb_pending = 1;
-
-        smctr_set_ctrl_attention(dev);
-
-        return 0;
-}
-
-static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        if(queue == MAC_QUEUE)
-                tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
-        else
-                tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_BDB;
-
-        tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
-
-        smctr_set_ctrl_attention(dev);
-
-        return 0;
-}
-
-static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
-
-        if(smctr_wait_while_cbusy(dev))
-                return -1;
-
-        if(queue == MAC_QUEUE)
-                tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
-        else
-                tp->sclb_ptr->resume_control = SCLB_RC_RX_NON_MAC_FCB;
-
-        tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
-
-        smctr_set_ctrl_attention(dev);
-
-        return 0;
-}
-
-static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
-
-        if(smctr_wait_while_cbusy(dev))
-                return -1;
-
-        tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
-        tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
-
-        smctr_set_ctrl_attention(dev);
-
-        return 0;
-}
-
-static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
-{
-        int err;
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
-                TRC_INTERNAL_ROM_TEST);
-
-        return err;
-}
-
-static int smctr_issue_test_hic_cmd(struct net_device *dev)
-{
-        int err;
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
-                TRC_HOST_INTERFACE_REG_TEST);
-
-        return err;
-}
-
-static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
-{
-        int err;
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
-                TRC_MAC_REGISTERS_TEST);
-
-        return err;
-}
-
-static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
-{
-        int err;
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
-                TRC_INTERNAL_LOOPBACK);
-
-        return err;
-}
-
-static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
-{
-        int err;
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
-                TRC_TRI_LOOPBACK);
-
-        return err;
-}
-
-static int smctr_issue_write_byte_cmd(struct net_device *dev,
-        short aword_cnt, void *byte)
-{
-	struct net_local *tp = netdev_priv(dev);
-        unsigned int iword, ibyte;
-	int err;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
-        	iword++, ibyte += 2)
-        {
-                tp->misc_command_data[iword] = (*((__u8 *)byte + ibyte) << 8)
-			| (*((__u8 *)byte + ibyte + 1));
-        }
-
-        return smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
-		aword_cnt);
-}
-
-static int smctr_issue_write_word_cmd(struct net_device *dev,
-        short aword_cnt, void *word)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i, err;
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
-                tp->misc_command_data[i] = *((__u16 *)word + i);
-
-        err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
-                aword_cnt);
-
-        return err;
-}
-
-static int smctr_join_complete_state(struct net_device *dev)
-{
-        int err;
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
-                JS_JOIN_COMPLETE_STATE);
-
-        return err;
-}
-
-static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i, j;
-        FCBlock *fcb;
-        BDBlock *bdb;
-
-        for(i = 0; i < NUM_TX_QS_USED; i++)
-        {
-                fcb = tp->tx_fcb_head[i];
-                bdb = tp->tx_bdb_head[i];
-
-                for(j = 0; j < tp->num_tx_fcbs[i]; j++)
-                {
-                        fcb->bdb_ptr            = bdb;
-                        fcb->trc_bdb_ptr        = TRC_POINTER(bdb);
-                        fcb = (FCBlock *)((char *)fcb + sizeof(FCBlock));
-                        bdb = (BDBlock *)((char *)bdb + sizeof(BDBlock));
-                }
-        }
-
-        return 0;
-}
-
-static int smctr_load_firmware(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-	const struct firmware *fw;
-        __u16 i, checksum = 0;
-        int err = 0;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_load_firmware\n", dev->name);
-
-	if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) {
-		printk(KERN_ERR "%s: firmware not found\n", dev->name);
-		return UCODE_NOT_PRESENT;
-	}
-
-        tp->num_of_tx_buffs     = 4;
-        tp->mode_bits          |= UMAC;
-        tp->receive_mask        = 0;
-        tp->max_packet_size     = 4177;
-
-        /* Can only upload the firmware once per adapter reset. */
-        if (tp->microcode_version != 0) {
-		err = (UCODE_PRESENT);
-		goto out;
-	}
-
-        /* Verify the firmware exists and is there in the right amount. */
-        if (!fw->data ||
-	    (*(fw->data + UCODE_VERSION_OFFSET) < UCODE_VERSION))
-        {
-                err = (UCODE_NOT_PRESENT);
-		goto out;
-        }
-
-        /* UCODE_SIZE is not included in Checksum. */
-        for(i = 0; i < *((__u16 *)(fw->data + UCODE_SIZE_OFFSET)); i += 2)
-                checksum += *((__u16 *)(fw->data + 2 + i));
-        if (checksum) {
-		err = (UCODE_NOT_PRESENT);
-		goto out;
-	}
-
-        /* At this point we have a valid firmware image, lets kick it on up. */
-        smctr_enable_adapter_ram(dev);
-        smctr_enable_16bit(dev);
-        smctr_set_page(dev, (__u8 *)tp->ram_access);
-
-        if((smctr_checksum_firmware(dev)) ||
-	   (*(fw->data + UCODE_VERSION_OFFSET) > tp->microcode_version))
-        {
-                smctr_enable_adapter_ctrl_store(dev);
-
-                /* Zero out ram space for firmware. */
-                for(i = 0; i < CS_RAM_SIZE; i += 2)
-                        *((__u16 *)(tp->ram_access + i)) = 0;
-
-                smctr_decode_firmware(dev, fw);
-
-                tp->microcode_version = *(fw->data + UCODE_VERSION_OFFSET);                *((__u16 *)(tp->ram_access + CS_RAM_VERSION_OFFSET))
-                        = (tp->microcode_version << 8);
-                *((__u16 *)(tp->ram_access + CS_RAM_CHECKSUM_OFFSET))
-                        = ~(tp->microcode_version << 8) + 1;
-
-                smctr_disable_adapter_ctrl_store(dev);
-
-                if(smctr_checksum_firmware(dev))
-                        err = HARDWARE_FAILED;
-        }
-        else
-                err = UCODE_PRESENT;
-
-        smctr_disable_16bit(dev);
- out:
-	release_firmware(fw);
-        return err;
-}
-
-static int smctr_load_node_addr(struct net_device *dev)
-{
-        int ioaddr = dev->base_addr;
-        unsigned int i;
-        __u8 r;
-
-        for(i = 0; i < 6; i++)
-        {
-                r = inb(ioaddr + LAR0 + i);
-                dev->dev_addr[i] = (char)r;
-        }
-        dev->addr_len = 6;
-
-        return 0;
-}
-
-/* Lobe Media Test.
- * During the transmission of the initial 1500 lobe media MAC frames,
- * the phase lock loop in the 805 chip may lock, and then un-lock, causing
- * the 825 to go into a PURGE state. When performing a PURGE, the MCT
- * microcode will not transmit any frames given to it by the host, and
- * will consequently cause a timeout.
- *
- * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit
- * queues other than the one used for the lobe_media_test should be
- * disabled.!?
- *
- * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask
- * has any multi-cast or promiscuous bits set, the receive_mask needs to
- * be changed to clear the multi-cast or promiscuous mode bits, the lobe_test
- * run, and then the receive mask set back to its original value if the test
- * is successful.
- */
-static int smctr_lobe_media_test(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i, perror = 0;
-        unsigned short saved_rcv_mask;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_lobe_media_test\n", dev->name);
-
-        /* Clear receive mask for lobe test. */
-        saved_rcv_mask          = tp->receive_mask;
-        tp->receive_mask        = 0;
-
-        smctr_chg_rx_mask(dev);
-
-        /* Setup the lobe media test. */
-        smctr_lobe_media_test_cmd(dev);
-        if(smctr_wait_cmd(dev))
-		goto err;
-
-        /* Tx lobe media test frames. */
-        for(i = 0; i < 1500; ++i)
-        {
-                if(smctr_send_lobe_media_test(dev))
-                {
-                        if(perror)
-				goto err;
-                        else
-                        {
-                                perror = 1;
-                                if(smctr_lobe_media_test_cmd(dev))
-					goto err;
-                        }
-                }
-        }
-
-        if(smctr_send_dat(dev))
-        {
-                if(smctr_send_dat(dev))
-			goto err;
-        }
-
-        /* Check if any frames received during test. */
-        if((tp->rx_fcb_curr[MAC_QUEUE]->frame_status) ||
-	   (tp->rx_fcb_curr[NON_MAC_QUEUE]->frame_status))
-		goto err;
-
-        /* Set receive mask to "Promisc" mode. */
-        tp->receive_mask = saved_rcv_mask;
-
-        smctr_chg_rx_mask(dev);
-
-	 return 0;
-err:
-	smctr_reset_adapter(dev);
-	tp->status = CLOSED;
-	return LOBE_MEDIA_TEST_FAILED;
-}
-
-static int smctr_lobe_media_test_cmd(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_lobe_media_test_cmd\n", dev->name);
-
-        /* Change to lobe media test state. */
-        if(tp->monitor_state != MS_BEACON_TEST_STATE)
-        {
-                smctr_lobe_media_test_state(dev);
-                if(smctr_wait_cmd(dev))
-                {
-                        printk(KERN_ERR "Lobe Failed test state\n");
-                        return LOBE_MEDIA_TEST_FAILED;
-                }
-        }
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
-                TRC_LOBE_MEDIA_TEST);
-
-        return err;
-}
-
-static int smctr_lobe_media_test_state(struct net_device *dev)
-{
-        int err;
-
-        err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
-                JS_LOBE_TEST_STATE);
-
-        return err;
-}
-
-static int smctr_make_8025_hdr(struct net_device *dev,
-        MAC_HEADER *rmf, MAC_HEADER *tmf, __u16 ac_fc)
-{
-        tmf->ac = MSB(ac_fc);                 /* msb is access control */
-        tmf->fc = LSB(ac_fc);                 /* lsb is frame control */
-
-        tmf->sa[0] = dev->dev_addr[0];
-        tmf->sa[1] = dev->dev_addr[1];
-        tmf->sa[2] = dev->dev_addr[2];
-        tmf->sa[3] = dev->dev_addr[3];
-        tmf->sa[4] = dev->dev_addr[4];
-        tmf->sa[5] = dev->dev_addr[5];
-
-        switch(tmf->vc)
-        {
-		/* Send RQ_INIT to RPS */
-                case RQ_INIT:
-                        tmf->da[0] = 0xc0;
-                        tmf->da[1] = 0x00;
-                        tmf->da[2] = 0x00;
-                        tmf->da[3] = 0x00;
-                        tmf->da[4] = 0x00;
-                        tmf->da[5] = 0x02;
-                        break;
-
-		/* Send RPT_TX_FORWARD to CRS */
-                case RPT_TX_FORWARD:
-                        tmf->da[0] = 0xc0;
-                        tmf->da[1] = 0x00;
-                        tmf->da[2] = 0x00;
-                        tmf->da[3] = 0x00;
-                        tmf->da[4] = 0x00;
-                        tmf->da[5] = 0x10;
-                        break;
-
-		/* Everything else goes to sender */
-                default:
-                        tmf->da[0] = rmf->sa[0];
-                        tmf->da[1] = rmf->sa[1];
-                        tmf->da[2] = rmf->sa[2];
-                        tmf->da[3] = rmf->sa[3];
-                        tmf->da[4] = rmf->sa[4];
-                        tmf->da[5] = rmf->sa[5];
-                        break;
-        }
-
-        return 0;
-}
-
-static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        tsv->svi = AUTHORIZED_ACCESS_PRIORITY;
-        tsv->svl = S_AUTHORIZED_ACCESS_PRIORITY;
-
-        tsv->svv[0] = MSB(tp->authorized_access_priority);
-        tsv->svv[1] = LSB(tp->authorized_access_priority);
-
-	return 0;
-}
-
-static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
-{
-        tsv->svi = ADDRESS_MODIFER;
-        tsv->svl = S_ADDRESS_MODIFER;
-
-        tsv->svv[0] = 0;
-        tsv->svv[1] = 0;
-
-        return 0;
-}
-
-static int smctr_make_auth_funct_class(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        tsv->svi = AUTHORIZED_FUNCTION_CLASS;
-        tsv->svl = S_AUTHORIZED_FUNCTION_CLASS;
-
-        tsv->svv[0] = MSB(tp->authorized_function_classes);
-        tsv->svv[1] = LSB(tp->authorized_function_classes);
-
-        return 0;
-}
-
-static int smctr_make_corr(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv, __u16 correlator)
-{
-        tsv->svi = CORRELATOR;
-        tsv->svl = S_CORRELATOR;
-
-        tsv->svv[0] = MSB(correlator);
-        tsv->svv[1] = LSB(correlator);
-
-        return 0;
-}
-
-static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        smctr_get_functional_address(dev);
-
-        tsv->svi = FUNCTIONAL_ADDRESS;
-        tsv->svl = S_FUNCTIONAL_ADDRESS;
-
-        tsv->svv[0] = MSB(tp->misc_command_data[0]);
-        tsv->svv[1] = LSB(tp->misc_command_data[0]);
-
-        tsv->svv[2] = MSB(tp->misc_command_data[1]);
-        tsv->svv[3] = LSB(tp->misc_command_data[1]);
-
-        return 0;
-}
-
-static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        smctr_get_group_address(dev);
-
-        tsv->svi = GROUP_ADDRESS;
-        tsv->svl = S_GROUP_ADDRESS;
-
-        tsv->svv[0] = MSB(tp->misc_command_data[0]);
-        tsv->svv[1] = LSB(tp->misc_command_data[0]);
-
-        tsv->svv[2] = MSB(tp->misc_command_data[1]);
-        tsv->svv[3] = LSB(tp->misc_command_data[1]);
-
-        /* Set Group Address Sub-vector to all zeros if only the
-         * Group Address/Functional Address Indicator is set.
-         */
-        if(tsv->svv[0] == 0x80 && tsv->svv[1] == 0x00 &&
-	   tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
-                tsv->svv[0] = 0x00;
-
-        return 0;
-}
-
-static int smctr_make_phy_drop_num(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        smctr_get_physical_drop_number(dev);
-
-        tsv->svi = PHYSICAL_DROP;
-        tsv->svl = S_PHYSICAL_DROP;
-
-        tsv->svv[0] = MSB(tp->misc_command_data[0]);
-        tsv->svv[1] = LSB(tp->misc_command_data[0]);
-
-        tsv->svv[2] = MSB(tp->misc_command_data[1]);
-        tsv->svv[3] = LSB(tp->misc_command_data[1]);
-
-        return 0;
-}
-
-static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
-{
-        int i;
-
-        tsv->svi = PRODUCT_INSTANCE_ID;
-        tsv->svl = S_PRODUCT_INSTANCE_ID;
-
-        for(i = 0; i < 18; i++)
-                tsv->svv[i] = 0xF0;
-
-        return 0;
-}
-
-static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        smctr_get_station_id(dev);
-
-        tsv->svi = STATION_IDENTIFER;
-        tsv->svl = S_STATION_IDENTIFER;
-
-        tsv->svv[0] = MSB(tp->misc_command_data[0]);
-        tsv->svv[1] = LSB(tp->misc_command_data[0]);
-
-        tsv->svv[2] = MSB(tp->misc_command_data[1]);
-        tsv->svv[3] = LSB(tp->misc_command_data[1]);
-
-        tsv->svv[4] = MSB(tp->misc_command_data[2]);
-        tsv->svv[5] = LSB(tp->misc_command_data[2]);
-
-        return 0;
-}
-
-static int smctr_make_ring_station_status(struct net_device *dev,
-        MAC_SUB_VECTOR * tsv)
-{
-        tsv->svi = RING_STATION_STATUS;
-        tsv->svl = S_RING_STATION_STATUS;
-
-        tsv->svv[0] = 0;
-        tsv->svv[1] = 0;
-        tsv->svv[2] = 0;
-        tsv->svv[3] = 0;
-        tsv->svv[4] = 0;
-        tsv->svv[5] = 0;
-
-        return 0;
-}
-
-static int smctr_make_ring_station_version(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        tsv->svi = RING_STATION_VERSION_NUMBER;
-        tsv->svl = S_RING_STATION_VERSION_NUMBER;
-
-        tsv->svv[0] = 0xe2;            /* EBCDIC - S */
-        tsv->svv[1] = 0xd4;            /* EBCDIC - M */
-        tsv->svv[2] = 0xc3;            /* EBCDIC - C */
-        tsv->svv[3] = 0x40;            /* EBCDIC -   */
-        tsv->svv[4] = 0xe5;            /* EBCDIC - V */
-        tsv->svv[5] = 0xF0 + (tp->microcode_version >> 4);
-        tsv->svv[6] = 0xF0 + (tp->microcode_version & 0x0f);
-        tsv->svv[7] = 0x40;            /* EBCDIC -   */
-        tsv->svv[8] = 0xe7;            /* EBCDIC - X */
-
-        if(tp->extra_info & CHIP_REV_MASK)
-                tsv->svv[9] = 0xc5;    /* EBCDIC - E */
-        else
-                tsv->svv[9] = 0xc4;    /* EBCDIC - D */
-
-        return 0;
-}
-
-static int smctr_make_tx_status_code(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv, __u16 tx_fstatus)
-{
-        tsv->svi = TRANSMIT_STATUS_CODE;
-        tsv->svl = S_TRANSMIT_STATUS_CODE;
-
-	tsv->svv[0] = ((tx_fstatus & 0x0100 >> 6) | IBM_PASS_SOURCE_ADDR);
-
-        /* Stripped frame status of Transmitted Frame */
-        tsv->svv[1] = tx_fstatus & 0xff;
-
-        return 0;
-}
-
-static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
-        MAC_SUB_VECTOR *tsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        smctr_get_upstream_neighbor_addr(dev);
-
-        tsv->svi = UPSTREAM_NEIGHBOR_ADDRESS;
-        tsv->svl = S_UPSTREAM_NEIGHBOR_ADDRESS;
-
-        tsv->svv[0] = MSB(tp->misc_command_data[0]);
-        tsv->svv[1] = LSB(tp->misc_command_data[0]);
-
-        tsv->svv[2] = MSB(tp->misc_command_data[1]);
-        tsv->svv[3] = LSB(tp->misc_command_data[1]);
-
-        tsv->svv[4] = MSB(tp->misc_command_data[2]);
-        tsv->svv[5] = LSB(tp->misc_command_data[2]);
-
-        return 0;
-}
-
-static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
-{
-        tsv->svi = WRAP_DATA;
-        tsv->svl = S_WRAP_DATA;
-
-        return 0;
-}
-
-/*
- * Open/initialize the board. This is called sometime after
- * booting when the 'ifconfig' program is run.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is non-reboot way to recover if something goes wrong.
- */
-static int smctr_open(struct net_device *dev)
-{
-        int err;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_open\n", dev->name);
-
-        err = smctr_init_adapter(dev);
-        if(err < 0)
-                return err;
-
-        return err;
-}
-
-/* Interrupt driven open of Token card. */
-static int smctr_open_tr(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned long flags;
-        int err;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_open_tr\n", dev->name);
-
-        /* Now we can actually open the adapter. */
-        if(tp->status == OPEN)
-                return 0;
-        if(tp->status != INITIALIZED)
-                return -1;
-
-	/* FIXME: it would work a lot better if we masked the irq sources
-	   on the card here, then we could skip the locking and poll nicely */
-	spin_lock_irqsave(&tp->lock, flags);
-	
-        smctr_set_page(dev, (__u8 *)tp->ram_access);
-
-        if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)MAC_QUEUE)))
-                goto out;
-
-        if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)MAC_QUEUE)))
-                goto out;
-
-        if((err = smctr_issue_resume_rx_fcb_cmd(dev, (short)NON_MAC_QUEUE)))
-                goto out;
-
-        if((err = smctr_issue_resume_rx_bdb_cmd(dev, (short)NON_MAC_QUEUE)))
-                goto out;
-
-        tp->status = CLOSED;
-
-        /* Insert into the Ring or Enter Loopback Mode. */
-        if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_1)
-        {
-                tp->status = CLOSED;
-
-                if(!(err = smctr_issue_trc_loopback_cmd(dev)))
-                {
-                        if(!(err = smctr_wait_cmd(dev)))
-                                tp->status = OPEN;
-                }
-
-                smctr_status_chg(dev);
-        }
-        else
-        {
-                if((tp->mode_bits & LOOPING_MODE_MASK) == LOOPBACK_MODE_2)
-                {
-                        tp->status = CLOSED;
-                        if(!(err = smctr_issue_tri_loopback_cmd(dev)))
-                        {
-                                if(!(err = smctr_wait_cmd(dev)))
-                                        tp->status = OPEN;
-                        }
-
-                        smctr_status_chg(dev);
-                }
-                else
-                {
-                        if((tp->mode_bits & LOOPING_MODE_MASK)
-                                == LOOPBACK_MODE_3)
-                        {
-                                tp->status = CLOSED;
-                                if(!(err = smctr_lobe_media_test_cmd(dev)))
-                                {
-                                        if(!(err = smctr_wait_cmd(dev)))
-                                                tp->status = OPEN;
-                                }
-                                smctr_status_chg(dev);
-                        }
-                        else
-                        {
-                                if(!(err = smctr_lobe_media_test(dev)))
-                                        err = smctr_issue_insert_cmd(dev);
-				else
-                                {
-                                        if(err == LOBE_MEDIA_TEST_FAILED)
-                                                printk(KERN_WARNING "%s: Lobe Media Test Failure - Check cable?\n", dev->name);
-                                }
-                        }
-                }
-        }
-
-out:
-        spin_unlock_irqrestore(&tp->lock, flags);
-
-        return err;
-}
-
-/* Check for a network adapter of this type, 
- * and return device structure if one exists.
- */
-struct net_device __init *smctr_probe(int unit)
-{
-	struct net_device *dev = alloc_trdev(sizeof(struct net_local));
-	static const unsigned ports[] = {
-		0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, 0x300,
-		0x320, 0x340, 0x360, 0x380, 0
-	};
-	const unsigned *port;
-        int err = 0;
-
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	if (unit >= 0) {
-		sprintf(dev->name, "tr%d", unit);
-		netdev_boot_setup_check(dev);
-	}
-
-        if (dev->base_addr > 0x1ff)    /* Check a single specified location. */
-		err = smctr_probe1(dev, dev->base_addr);
-        else if(dev->base_addr != 0)  /* Don't probe at all. */
-                err =-ENXIO;
-	else {
-		for (port = ports; *port; port++) {
-			err = smctr_probe1(dev, *port);
-			if (!err)
-				break;
-		}
-	}
-	if (err)
-		goto out;
-	err = register_netdev(dev);
-	if (err)
-		goto out1;
-	return dev;
-out1:
-#ifdef CONFIG_MCA_LEGACY
-	{ struct net_local *tp = netdev_priv(dev);
-	  if (tp->slot_num)
-		mca_mark_as_unused(tp->slot_num);
-	}
-#endif
-	release_region(dev->base_addr, SMCTR_IO_EXTENT);
-	free_irq(dev->irq, dev);
-out:
-	free_netdev(dev);
-	return ERR_PTR(err);
-}
-
-static const struct net_device_ops smctr_netdev_ops = {
-	.ndo_open          = smctr_open,
-	.ndo_stop          = smctr_close,
-	.ndo_start_xmit    = smctr_send_packet,
-	.ndo_tx_timeout	   = smctr_timeout,
-	.ndo_get_stats     = smctr_get_stats,
-	.ndo_set_rx_mode   = smctr_set_multicast_list,
-};
-
-static int __init smctr_probe1(struct net_device *dev, int ioaddr)
-{
-        static unsigned version_printed;
-        struct net_local *tp = netdev_priv(dev);
-        int err;
-        __u32 *ram;
-
-        if(smctr_debug && version_printed++ == 0)
-                printk(version);
-
-        spin_lock_init(&tp->lock);
-        dev->base_addr = ioaddr;
-
-	/* Actually detect an adapter now. */
-        err = smctr_chk_isa(dev);
-        if(err < 0)
-        {
-		if ((err = smctr_chk_mca(dev)) < 0) {
-			err = -ENODEV;
-			goto out;
-		}
-        }
-
-        tp = netdev_priv(dev);
-        dev->mem_start = tp->ram_base;
-        dev->mem_end = dev->mem_start + 0x10000;
-        ram = (__u32 *)phys_to_virt(dev->mem_start);
-        tp->ram_access = *(__u32 *)&ram;
-	tp->status = NOT_INITIALIZED;
-
-        err = smctr_load_firmware(dev);
-        if(err != UCODE_PRESENT && err != SUCCESS)
-        {
-                printk(KERN_ERR "%s: Firmware load failed (%d)\n", dev->name, err);
-		err = -EIO;
-		goto out;
-        }
-
-	/* Allow user to specify ring speed on module insert. */
-	if(ringspeed == 4)
-		tp->media_type = MEDIA_UTP_4;
-	else
-		tp->media_type = MEDIA_UTP_16;
-
-        printk(KERN_INFO "%s: %s %s at Io %#4x, Irq %d, Rom %#4x, Ram %#4x.\n",
-                dev->name, smctr_name, smctr_model,
-                (unsigned int)dev->base_addr,
-                dev->irq, tp->rom_base, tp->ram_base);
-
-	dev->netdev_ops = &smctr_netdev_ops;
-        dev->watchdog_timeo	= HZ;
-        return 0;
-
-out:
-	return err;
-}
-
-static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
-        struct net_device *dev, __u16 rx_status)
-{
-        struct net_local *tp = netdev_priv(dev);
-        struct sk_buff *skb;
-        __u16 rcode, correlator;
-        int err = 0;
-        __u8 xframe = 1;
-
-        rmf->vl = SWAP_BYTES(rmf->vl);
-        if(rx_status & FCB_RX_STATUS_DA_MATCHED)
-        {
-                switch(rmf->vc)
-                {
-                        /* Received MAC Frames Processed by RS. */
-                        case INIT:
-                                if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED)
-                                {
-                                        return rcode;
-                                }
-
-                                if((err = smctr_send_rsp(dev, rmf, rcode,
-                                        correlator)))
-                                {
-                                        return err;
-                                }
-                                break;
-
-                        case CHG_PARM:
-                                if((rcode = smctr_rcv_chg_param(dev, rmf,
-                                        &correlator)) ==HARDWARE_FAILED)
-                                {
-                                        return rcode;
-                                }
-
-                                if((err = smctr_send_rsp(dev, rmf, rcode,
-                                        correlator)))
-                                {
-                                        return err;
-                                }
-                                break;
-
-                        case RQ_ADDR:
-                                if((rcode = smctr_rcv_rq_addr_state_attch(dev,
-                                        rmf, &correlator)) != POSITIVE_ACK)
-                                {
-                                        if(rcode == HARDWARE_FAILED)
-                                                return rcode;
-                                        else
-                                                return smctr_send_rsp(dev, rmf,
-                                                        rcode, correlator);
-                                }
-
-                                if((err = smctr_send_rpt_addr(dev, rmf,
-                                        correlator)))
-                                {
-                                        return err;
-                                }
-                                break;
-
-                        case RQ_ATTCH:
-                                if((rcode = smctr_rcv_rq_addr_state_attch(dev,
-                                        rmf, &correlator)) != POSITIVE_ACK)
-                                {
-                                        if(rcode == HARDWARE_FAILED)
-                                                return rcode;
-                                        else
-                                                return smctr_send_rsp(dev, rmf,
-                                                        rcode,
-                                                        correlator);
-                                }
-
-                                if((err = smctr_send_rpt_attch(dev, rmf,
-                                        correlator)))
-                                {
-                                        return err;
-                                }
-                                break;
-
-                        case RQ_STATE:
-                                if((rcode = smctr_rcv_rq_addr_state_attch(dev,
-                                        rmf, &correlator)) != POSITIVE_ACK)
-                                {
-                                        if(rcode == HARDWARE_FAILED)
-                                                return rcode;
-                                        else
-                                                return smctr_send_rsp(dev, rmf,
-                                                        rcode,
-                                                        correlator);
-                                }
-
-                                if((err = smctr_send_rpt_state(dev, rmf,
-                                        correlator)))
-                                {
-                                        return err;
-                                }
-                                break;
-
-                        case TX_FORWARD: {
-        			__u16 uninitialized_var(tx_fstatus);
-
-                                if((rcode = smctr_rcv_tx_forward(dev, rmf))
-                                        != POSITIVE_ACK)
-                                {
-                                        if(rcode == HARDWARE_FAILED)
-                                                return rcode;
-                                        else
-                                                return smctr_send_rsp(dev, rmf,
-                                                        rcode,
-                                                        correlator);
-                                }
-
-                                if((err = smctr_send_tx_forward(dev, rmf,
-                                        &tx_fstatus)) == HARDWARE_FAILED)
-                                {
-                                        return err;
-                                }
-
-                                if(err == A_FRAME_WAS_FORWARDED)
-                                {
-                                        if((err = smctr_send_rpt_tx_forward(dev,
-						rmf, tx_fstatus))
-                                                == HARDWARE_FAILED)
-                                        {
-                                                return err;
-                                        }
-                                }
-                                break;
-			}
-
-                        /* Received MAC Frames Processed by CRS/REM/RPS. */
-                        case RSP:
-                        case RQ_INIT:
-                        case RPT_NEW_MON:
-                        case RPT_SUA_CHG:
-                        case RPT_ACTIVE_ERR:
-                        case RPT_NN_INCMP:
-                        case RPT_ERROR:
-                        case RPT_ATTCH:
-                        case RPT_STATE:
-                        case RPT_ADDR:
-                                break;
-
-                        /* Rcvd Att. MAC Frame (if RXATMAC set) or UNKNOWN */
-                        default:
-                                xframe = 0;
-                                if(!(tp->receive_mask & ACCEPT_ATT_MAC_FRAMES))
-                                {
-                                        rcode = smctr_rcv_unknown(dev, rmf,
-                                                &correlator);
-                                        if((err = smctr_send_rsp(dev, rmf,rcode,
-                                                correlator)))
-                                        {
-                                                return err;
-                                        }
-                                }
-
-                                break;
-                }
-        }
-        else
-        {
-                /* 1. DA doesn't match (Promiscuous Mode).
-                 * 2. Parse for Extended MAC Frame Type.
-                 */
-                switch(rmf->vc)
-                {
-                        case RSP:
-                        case INIT:
-                        case RQ_INIT:
-                        case RQ_ADDR:
-                        case RQ_ATTCH:
-                        case RQ_STATE:
-                        case CHG_PARM:
-                        case RPT_ADDR:
-                        case RPT_ERROR:
-                        case RPT_ATTCH:
-                        case RPT_STATE:
-                        case RPT_NEW_MON:
-                        case RPT_SUA_CHG:
-                        case RPT_NN_INCMP:
-                        case RPT_ACTIVE_ERR:
-                                break;
-
-                        default:
-                                xframe = 0;
-                                break;
-                }
-        }
-
-        /* NOTE: UNKNOWN MAC frames will NOT be passed up unless
-         * ACCEPT_ATT_MAC_FRAMES is set.
-         */
-        if(((tp->receive_mask & ACCEPT_ATT_MAC_FRAMES) &&
-	    (xframe == (__u8)0)) ||
-	   ((tp->receive_mask & ACCEPT_EXT_MAC_FRAMES) &&
-	    (xframe == (__u8)1)))
-        {
-                rmf->vl = SWAP_BYTES(rmf->vl);
-
-                if (!(skb = dev_alloc_skb(size)))
-			return -ENOMEM;
-                skb->len = size;
-
-                /* Slide data into a sleek skb. */
-                skb_put(skb, skb->len);
-                skb_copy_to_linear_data(skb, rmf, skb->len);
-
-                /* Update Counters */
-                tp->MacStat.rx_packets++;
-                tp->MacStat.rx_bytes += skb->len;
-
-                /* Kick the packet on up. */
-                skb->protocol = tr_type_trans(skb, dev);
-                netif_rx(skb);
-                err = 0;
-        }
-
-        return err;
-}
-
-/* Adapter RAM test. Incremental word ODD boundary data test. */
-static int smctr_ram_memory_test(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        __u16 page, pages_of_ram, start_pattern = 0, word_pattern = 0,
-                word_read = 0, err_word = 0, err_pattern = 0;
-        unsigned int err_offset;
-        __u32 j, pword;
-        __u8 err = 0;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_ram_memory_test\n", dev->name);
-
-        start_pattern   = 0x0001;
-        pages_of_ram    = tp->ram_size / tp->ram_usable;
-        pword           = tp->ram_access;
-
-        /* Incremental word ODD boundary test. */
-        for(page = 0; (page < pages_of_ram) && (~err);
-                page++, start_pattern += 0x8000)
-        {
-                smctr_set_page(dev, (__u8 *)(tp->ram_access
-                        + (page * tp->ram_usable * 1024) + 1));
-                word_pattern = start_pattern;
-
-                for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1; j += 2)
-                        *(__u16 *)(pword + j) = word_pattern++;
-
-                word_pattern = start_pattern;
-
-                for(j = 1; j < (__u32)(tp->ram_usable * 1024) - 1 && (~err);
-		    j += 2, word_pattern++)
-                {
-                        word_read = *(__u16 *)(pword + j);
-                        if(word_read != word_pattern)
-                        {
-                                err             = (__u8)1;
-                                err_offset      = j;
-                                err_word        = word_read;
-                                err_pattern     = word_pattern;
-                                return RAM_TEST_FAILED;
-                        }
-                }
-        }
-
-        /* Zero out memory. */
-        for(page = 0; page < pages_of_ram && (~err); page++)
-        {
-                smctr_set_page(dev, (__u8 *)(tp->ram_access
-                        + (page * tp->ram_usable * 1024)));
-                word_pattern = 0;
-
-                for(j = 0; j < (__u32)tp->ram_usable * 1024; j +=2)
-                        *(__u16 *)(pword + j) = word_pattern;
-
-                for(j =0; j < (__u32)tp->ram_usable * 1024 && (~err); j += 2)
-                {
-                        word_read = *(__u16 *)(pword + j);
-                        if(word_read != word_pattern)
-                        {
-                                err             = (__u8)1;
-                                err_offset      = j;
-                                err_word        = word_read;
-                                err_pattern     = word_pattern;
-                                return RAM_TEST_FAILED;
-                        }
-                }
-        }
-
-        smctr_set_page(dev, (__u8 *)tp->ram_access);
-
-        return 0;
-}
-
-static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 *correlator)
-{
-        MAC_SUB_VECTOR *rsv;
-        signed short vlen;
-        __u16 rcode = POSITIVE_ACK;
-        unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
-
-        /* This Frame can only come from a CRS */
-        if((rmf->dc_sc & SC_MASK) != SC_CRS)
-                return E_INAPPROPRIATE_SOURCE_CLASS;
-
-        /* Remove MVID Length from total length. */
-        vlen = (signed short)rmf->vl - 4;
-
-        /* Point to First SVID */
-        rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
-
-        /* Search for Appropriate SVID's. */
-        while((vlen > 0) && (rcode == POSITIVE_ACK))
-        {
-                switch(rsv->svi)
-                {
-                        case CORRELATOR:
-                                svectors |= F_CORRELATOR;
-                                rcode = smctr_set_corr(dev, rsv, correlator);
-                                break;
-
-                        case LOCAL_RING_NUMBER:
-                                svectors |= F_LOCAL_RING_NUMBER;
-                                rcode = smctr_set_local_ring_num(dev, rsv);
-                                break;
-
-                        case ASSIGN_PHYSICAL_DROP:
-                                svectors |= F_ASSIGN_PHYSICAL_DROP;
-                                rcode = smctr_set_phy_drop(dev, rsv);
-                                break;
-
-                        case ERROR_TIMER_VALUE:
-                                svectors |= F_ERROR_TIMER_VALUE;
-                                rcode = smctr_set_error_timer_value(dev, rsv);
-                                break;
-
-                        case AUTHORIZED_FUNCTION_CLASS:
-                                svectors |= F_AUTHORIZED_FUNCTION_CLASS;
-                                rcode = smctr_set_auth_funct_class(dev, rsv);
-                                break;
-
-                        case AUTHORIZED_ACCESS_PRIORITY:
-                                svectors |= F_AUTHORIZED_ACCESS_PRIORITY;
-                                rcode = smctr_set_auth_access_pri(dev, rsv);
-                                break;
-
-                        default:
-                                rcode = E_SUB_VECTOR_UNKNOWN;
-                                break;
-                }
-
-                /* Let Sender Know if SUM of SV length's is
-                 * larger then length in MVID length field
-                 */
-                if((vlen -= rsv->svl) < 0)
-                        rcode = E_VECTOR_LENGTH_ERROR;
-
-                rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
-        }
-
-        if(rcode == POSITIVE_ACK)
-        {
-                /* Let Sender Know if MVID length field
-                 * is larger then SUM of SV length's
-                 */
-                if(vlen != 0)
-                        rcode = E_VECTOR_LENGTH_ERROR;
-                else
-		{
-                	/* Let Sender Know if Expected SVID Missing */
-                	if((svectors & R_CHG_PARM) ^ R_CHG_PARM)
-                        	rcode = E_MISSING_SUB_VECTOR;
-		}
-        }
-
-        return rcode;
-}
-
-static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 *correlator)
-{
-        MAC_SUB_VECTOR *rsv;
-        signed short vlen;
-        __u16 rcode = POSITIVE_ACK;
-        unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
-
-        /* This Frame can only come from a RPS */
-        if((rmf->dc_sc & SC_MASK) != SC_RPS)
-                return E_INAPPROPRIATE_SOURCE_CLASS;
-
-        /* Remove MVID Length from total length. */
-        vlen = (signed short)rmf->vl - 4;
-
-        /* Point to First SVID */
-        rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
-
-        /* Search for Appropriate SVID's */
-        while((vlen > 0) && (rcode == POSITIVE_ACK))
-        {
-                switch(rsv->svi)
-                {
-                        case CORRELATOR:
-                                svectors |= F_CORRELATOR;
-                                rcode = smctr_set_corr(dev, rsv, correlator);
-                                break;
-
-                        case LOCAL_RING_NUMBER:
-                                svectors |= F_LOCAL_RING_NUMBER;
-                                rcode = smctr_set_local_ring_num(dev, rsv);
-                                break;
-
-                        case ASSIGN_PHYSICAL_DROP:
-                                svectors |= F_ASSIGN_PHYSICAL_DROP;
-                                rcode = smctr_set_phy_drop(dev, rsv);
-                                break;
-
-                        case ERROR_TIMER_VALUE:
-                                svectors |= F_ERROR_TIMER_VALUE;
-                                rcode = smctr_set_error_timer_value(dev, rsv);
-                                break;
-
-                        default:
-                                rcode = E_SUB_VECTOR_UNKNOWN;
-                                break;
-                }
-
-                /* Let Sender Know if SUM of SV length's is
-                 * larger then length in MVID length field
-		 */
-                if((vlen -= rsv->svl) < 0)
-                        rcode = E_VECTOR_LENGTH_ERROR;
-
-                rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
-        }
-
-        if(rcode == POSITIVE_ACK)
-        {
-                /* Let Sender Know if MVID length field
-                 * is larger then SUM of SV length's
-                 */
-                if(vlen != 0)
-                        rcode = E_VECTOR_LENGTH_ERROR;
-                else
-		{
-                	/* Let Sender Know if Expected SV Missing */
-                	if((svectors & R_INIT) ^ R_INIT)
-                        	rcode = E_MISSING_SUB_VECTOR;
-		}
-        }
-
-        return rcode;
-}
-
-static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
-{
-        MAC_SUB_VECTOR *rsv;
-        signed short vlen;
-        __u16 rcode = POSITIVE_ACK;
-        unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
-
-        /* This Frame can only come from a CRS */
-        if((rmf->dc_sc & SC_MASK) != SC_CRS)
-                return E_INAPPROPRIATE_SOURCE_CLASS;
-
-        /* Remove MVID Length from total length */
-        vlen = (signed short)rmf->vl - 4;
-
-        /* Point to First SVID */
-        rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
-
-        /* Search for Appropriate SVID's */
-        while((vlen > 0) && (rcode == POSITIVE_ACK))
-        {
-                switch(rsv->svi)
-                {
-                        case FRAME_FORWARD:
-                                svectors |= F_FRAME_FORWARD;
-                                rcode = smctr_set_frame_forward(dev, rsv, 
-					rmf->dc_sc);
-                                break;
-
-                        default:
-                                rcode = E_SUB_VECTOR_UNKNOWN;
-                                break;
-                }
-
-                /* Let Sender Know if SUM of SV length's is
-                 * larger then length in MVID length field
-		 */
-                if((vlen -= rsv->svl) < 0)
-                        rcode = E_VECTOR_LENGTH_ERROR;
-
-                rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
-        }
-
-        if(rcode == POSITIVE_ACK)
-        {
-                /* Let Sender Know if MVID length field
-                 * is larger then SUM of SV length's
-                 */
-                if(vlen != 0)
-                        rcode = E_VECTOR_LENGTH_ERROR;
-                else
-		{
-                	/* Let Sender Know if Expected SV Missing */
-                	if((svectors & R_TX_FORWARD) ^ R_TX_FORWARD)
-                        	rcode = E_MISSING_SUB_VECTOR;
-		}
-        }
-
-        return rcode;
-}
-
-static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
-        MAC_HEADER *rmf, __u16 *correlator)
-{
-        MAC_SUB_VECTOR *rsv;
-        signed short vlen;
-        __u16 rcode = POSITIVE_ACK;
-        unsigned int svectors = F_NO_SUB_VECTORS_FOUND;
-
-        /* Remove MVID Length from total length */
-        vlen = (signed short)rmf->vl - 4;
-
-        /* Point to First SVID */
-        rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
-
-        /* Search for Appropriate SVID's */
-        while((vlen > 0) && (rcode == POSITIVE_ACK))
-        {
-                switch(rsv->svi)
-                {
-                        case CORRELATOR:
-                                svectors |= F_CORRELATOR;
-                                rcode = smctr_set_corr(dev, rsv, correlator);
-                                break;
-
-                        default:
-                                rcode = E_SUB_VECTOR_UNKNOWN;
-                                break;
-                }
-
-                /* Let Sender Know if SUM of SV length's is
-                 * larger then length in MVID length field
-                 */
-                if((vlen -= rsv->svl) < 0)
-                        rcode = E_VECTOR_LENGTH_ERROR;
-
-                rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
-        }
-
-        if(rcode == POSITIVE_ACK)
-        {
-                /* Let Sender Know if MVID length field
-                 * is larger then SUM of SV length's
-                 */
-                if(vlen != 0)
-                        rcode = E_VECTOR_LENGTH_ERROR;
-                else
-		{
-                	/* Let Sender Know if Expected SVID Missing */
-                	if((svectors & R_RQ_ATTCH_STATE_ADDR) 
-				^ R_RQ_ATTCH_STATE_ADDR)
-                        	rcode = E_MISSING_SUB_VECTOR;
-			}
-        }
-
-        return rcode;
-}
-
-static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 *correlator)
-{
-        MAC_SUB_VECTOR *rsv;
-        signed short vlen;
-
-        *correlator = 0;
-
-        /* Remove MVID Length from total length */
-        vlen = (signed short)rmf->vl - 4;
-
-        /* Point to First SVID */
-        rsv = (MAC_SUB_VECTOR *)((__u32)rmf + sizeof(MAC_HEADER));
-
-        /* Search for CORRELATOR for RSP to UNKNOWN */
-        while((vlen > 0) && (*correlator == 0))
-        {
-                switch(rsv->svi)
-                {
-                        case CORRELATOR:
-                                smctr_set_corr(dev, rsv, correlator);
-                                break;
-
-                        default:
-                                break;
-                }
-
-                vlen -= rsv->svl;
-                rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
-        }
-
-        return E_UNRECOGNIZED_VECTOR_ID;
-}
-
-/*
- * Reset the 825 NIC and exit w:
- * 1. The NIC reset cleared (non-reset state), halted and un-initialized.
- * 2. TINT masked.
- * 3. CBUSY masked.
- * 4. TINT clear.
- * 5. CBUSY clear.
- */
-static int smctr_reset_adapter(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int ioaddr = dev->base_addr;
-
-        /* Reseting the NIC will put it in a halted and un-initialized state. */        smctr_set_trc_reset(ioaddr);
-        mdelay(200); /* ~2 ms */
-
-        smctr_clear_trc_reset(ioaddr);
-        mdelay(200); /* ~2 ms */
-
-        /* Remove any latched interrupts that occurred prior to reseting the
-         * adapter or possibily caused by line glitches due to the reset.
-         */
-        outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
-
-        return 0;
-}
-
-static int smctr_restart_tx_chain(struct net_device *dev, short queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err = 0;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_restart_tx_chain\n", dev->name);
-
-        if(tp->num_tx_fcbs_used[queue] != 0 &&
-	   tp->tx_queue_status[queue] == NOT_TRANSMITING)
-        {
-                tp->tx_queue_status[queue] = TRANSMITING;
-                err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
-        }
-
-        return err;
-}
-
-static int smctr_ring_status_chg(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_ring_status_chg\n", dev->name);
-
-        /* Check for ring_status_flag: whenever MONITOR_STATE_BIT
-         * Bit is set, check value of monitor_state, only then we
-         * enable and start transmit/receive timeout (if and only
-         * if it is MS_ACTIVE_MONITOR_STATE or MS_STANDBY_MONITOR_STATE)
-         */
-        if(tp->ring_status_flags == MONITOR_STATE_CHANGED)
-        {
-                if((tp->monitor_state == MS_ACTIVE_MONITOR_STATE) ||
-		   (tp->monitor_state == MS_STANDBY_MONITOR_STATE))
-                {
-                        tp->monitor_state_ready = 1;
-                }
-                else
-                {
-                        /* if adapter is NOT in either active monitor
-                         * or standby monitor state => Disable
-                         * transmit/receive timeout.
-                         */
-                        tp->monitor_state_ready = 0;
-
-			/* Ring speed problem, switching to auto mode. */
-			if(tp->monitor_state == MS_MONITOR_FSM_INACTIVE &&
-			   !tp->cleanup)
-			{
-				printk(KERN_INFO "%s: Incorrect ring speed switching.\n",
-					dev->name);
-				smctr_set_ring_speed(dev);
-			}
-                }
-        }
-
-        if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
-                return 0;
-
-        switch(tp->ring_status)
-        {
-                case RING_RECOVERY:
-                        printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
-                        break;
-
-                case SINGLE_STATION:
-                        printk(KERN_INFO "%s: Single Statinon\n", dev->name);
-                        break;
-
-                case COUNTER_OVERFLOW:
-                        printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
-                        break;
-
-                case REMOVE_RECEIVED:
-                        printk(KERN_INFO "%s: Remove Received\n", dev->name);
-                        break;
-
-                case AUTO_REMOVAL_ERROR:
-                        printk(KERN_INFO "%s: Auto Remove Error\n", dev->name);
-                        break;
-
-                case LOBE_WIRE_FAULT:
-                        printk(KERN_INFO "%s: Lobe Wire Fault\n", dev->name);
-                        break;
-
-                case TRANSMIT_BEACON:
-                        printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
-                        break;
-
-                case SOFT_ERROR:
-                        printk(KERN_INFO "%s: Soft Error\n", dev->name);
-                        break;
-
-                case HARD_ERROR:
-                        printk(KERN_INFO "%s: Hard Error\n", dev->name);
-                        break;
-
-                case SIGNAL_LOSS:
-                        printk(KERN_INFO "%s: Signal Loss\n", dev->name);
-                        break;
-
-                default:
-			printk(KERN_INFO "%s: Unknown ring status change\n",
-				dev->name);
-                        break;
-        }
-
-        return 0;
-}
-
-static int smctr_rx_frame(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        __u16 queue, status, rx_size, err = 0;
-        __u8 *pbuff;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_rx_frame\n", dev->name);
-
-        queue = tp->receive_queue_number;
-
-        while((status = tp->rx_fcb_curr[queue]->frame_status) != SUCCESS)
-        {
-                err = HARDWARE_FAILED;
-
-                if(((status & 0x007f) == 0) ||
-		   ((tp->receive_mask & ACCEPT_ERR_PACKETS) != 0))
-                {
-                        /* frame length less the CRC (4 bytes) + FS (1 byte) */
-                        rx_size = tp->rx_fcb_curr[queue]->frame_length - 5;
-
-                        pbuff = smctr_get_rx_pointer(dev, queue);
-
-                        smctr_set_page(dev, pbuff);
-                        smctr_disable_16bit(dev);
-
-                        /* pbuff points to addr within one page */
-                        pbuff = (__u8 *)PAGE_POINTER(pbuff);
-
-                        if(queue == NON_MAC_QUEUE)
-                        {
-                                struct sk_buff *skb;
-
-                                skb = dev_alloc_skb(rx_size);
-				if (skb) {
-                                	skb_put(skb, rx_size);
-
-					skb_copy_to_linear_data(skb, pbuff, rx_size);
-
-                                	/* Update Counters */
-                                	tp->MacStat.rx_packets++;
-                                	tp->MacStat.rx_bytes += skb->len;
-
-                                	/* Kick the packet on up. */
-                                	skb->protocol = tr_type_trans(skb, dev);
-                                	netif_rx(skb);
-				} else {
-				}
-                        }
-                        else
-                                smctr_process_rx_packet((MAC_HEADER *)pbuff,
-                                        rx_size, dev, status);
-                }
-
-                smctr_enable_16bit(dev);
-                smctr_set_page(dev, (__u8 *)tp->ram_access);
-                smctr_update_rx_chain(dev, queue);
-
-                if(err != SUCCESS)
-                        break;
-        }
-
-        return err;
-}
-
-static int smctr_send_dat(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int i, err;
-        MAC_HEADER *tmf;
-        FCBlock *fcb;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_send_dat\n", dev->name);
-
-        if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
-                sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
-        {
-                return OUT_OF_RESOURCES;
-        }
-
-        /* Initialize DAT Data Fields. */
-        tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
-        tmf->ac = MSB(AC_FC_DAT);
-        tmf->fc = LSB(AC_FC_DAT);
-
-        for(i = 0; i < 6; i++)
-        {
-                tmf->sa[i] = dev->dev_addr[i];
-                tmf->da[i] = dev->dev_addr[i];
-
-        }
-
-        tmf->vc        = DAT;
-        tmf->dc_sc     = DC_RS | SC_RS;
-        tmf->vl        = 4;
-        tmf->vl        = SWAP_BYTES(tmf->vl);
-
-        /* Start Transmit. */
-        if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
-                return err;
-
-        /* Wait for Transmit to Complete */
-        for(i = 0; i < 10000; i++)
-        {
-                if(fcb->frame_status & FCB_COMMAND_DONE)
-                        break;
-                mdelay(1);
-        }
-
-        /* Check if GOOD frame Tx'ed. */
-        if(!(fcb->frame_status &  FCB_COMMAND_DONE) ||
-	   fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
-        {
-                return INITIALIZE_FAILED;
-        }
-
-        /* De-allocated Tx FCB and Frame Buffer
-         * The FCB must be de-allocated manually if executing with
-         * interrupts disabled, other wise the ISR (LM_Service_Events)
-         * will de-allocate it when the interrupt occurs.
-         */
-        tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
-        smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
-
-        return 0;
-}
-
-static void smctr_timeout(struct net_device *dev)
-{
-	/*
-         * If we get here, some higher level has decided we are broken.
-         * There should really be a "kick me" function call instead.
-         *
-         * Resetting the token ring adapter takes a long time so just
-         * fake transmission time and go on trying. Our own timeout
-         * routine is in sktr_timer_chk()
-         */
-        dev->trans_start = jiffies; /* prevent tx timeout */
-        netif_wake_queue(dev);
-}
-
-/*
- * Gets skb from system, queues it and checks if it can be sent
- */
-static netdev_tx_t smctr_send_packet(struct sk_buff *skb,
-					   struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_send_packet\n", dev->name);
-
-        /*
-         * Block a transmit overlap
-         */
-         
-        netif_stop_queue(dev);
-
-        if(tp->QueueSkb == 0)
-                return NETDEV_TX_BUSY;     /* Return with tbusy set: queue full */
-
-        tp->QueueSkb--;
-        skb_queue_tail(&tp->SendSkbQueue, skb);
-        smctr_hardware_send_packet(dev, tp);
-        if(tp->QueueSkb > 0)
-		netif_wake_queue(dev);
-		
-        return NETDEV_TX_OK;
-}
-
-static int smctr_send_lobe_media_test(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-	MAC_SUB_VECTOR *tsv;
-	MAC_HEADER *tmf;
-        FCBlock *fcb;
-	__u32 i;
-	int err;
-
-        if(smctr_debug > 15)
-                printk(KERN_DEBUG "%s: smctr_send_lobe_media_test\n", dev->name);
-
-        if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
-                + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
-        {
-                return OUT_OF_RESOURCES;
-        }
-
-        /* Initialize DAT Data Fields. */
-        tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
-        tmf->ac = MSB(AC_FC_LOBE_MEDIA_TEST);
-        tmf->fc = LSB(AC_FC_LOBE_MEDIA_TEST);
-
-        for(i = 0; i < 6; i++)
-        {
-                tmf->da[i] = 0;
-                tmf->sa[i] = dev->dev_addr[i];
-        }
-
-        tmf->vc        = LOBE_MEDIA_TEST;
-        tmf->dc_sc     = DC_RS | SC_RS;
-        tmf->vl        = 4;
-
-        tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
-        smctr_make_wrap_data(dev, tsv);
-        tmf->vl += tsv->svl;
-
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_wrap_data(dev, tsv);
-        tmf->vl += tsv->svl;
-
-        /* Start Transmit. */
-        tmf->vl = SWAP_BYTES(tmf->vl);
-        if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
-                return err;
-
-        /* Wait for Transmit to Complete. (10 ms). */
-        for(i=0; i < 10000; i++)
-        {
-                if(fcb->frame_status & FCB_COMMAND_DONE)
-                        break;
-                mdelay(1);
-        }
-
-        /* Check if GOOD frame Tx'ed */
-        if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
-	   fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
-        {
-                return LOBE_MEDIA_TEST_FAILED;
-        }
-
-        /* De-allocated Tx FCB and Frame Buffer
-         * The FCB must be de-allocated manually if executing with
-         * interrupts disabled, other wise the ISR (LM_Service_Events)
-         * will de-allocate it when the interrupt occurs.
-         */
-        tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
-        smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
-
-        return 0;
-}
-
-static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 correlator)
-{
-        MAC_HEADER *tmf;
-        MAC_SUB_VECTOR *tsv;
-        FCBlock *fcb;
-
-        if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
-		+ S_CORRELATOR + S_PHYSICAL_DROP + S_UPSTREAM_NEIGHBOR_ADDRESS
-		+ S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
-		== (FCBlock *)(-1L))
-        {
-                return 0;
-        }
-
-        tmf 		= (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
-        tmf->vc    	= RPT_ADDR;
-        tmf->dc_sc 	= (rmf->dc_sc & SC_MASK) << 4;
-        tmf->vl    	= 4;
-
-        smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ADDR);
-
-        tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
-        smctr_make_corr(dev, tsv, correlator);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_phy_drop_num(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_upstream_neighbor_addr(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_addr_mod(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_group_addr(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_funct_addr(dev, tsv);
-
-        tmf->vl += tsv->svl;
-
-        /* Subtract out MVID and MVL which is
-         * include in both vl and MAC_HEADER
-         */
-/*      fcb->frame_length           = tmf->vl + sizeof(MAC_HEADER) - 4;
-        fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
-*/
-        tmf->vl = SWAP_BYTES(tmf->vl);
-
-        return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
-}
-
-static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 correlator)
-{
-        MAC_HEADER *tmf;
-        MAC_SUB_VECTOR *tsv;
-        FCBlock *fcb;
-
-        if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
-		+ S_CORRELATOR + S_PRODUCT_INSTANCE_ID + S_FUNCTIONAL_ADDRESS
-		+ S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
-		== (FCBlock *)(-1L))
-        {
-                return 0;
-        }
-
-        tmf 	   = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
-        tmf->vc    = RPT_ATTCH;
-        tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
-        tmf->vl    = 4;
-
-        smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_ATTCH);
-
-        tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
-        smctr_make_corr(dev, tsv, correlator);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_product_id(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_funct_addr(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_auth_funct_class(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_access_pri(dev, tsv);
-
-        tmf->vl += tsv->svl;
-
-        /* Subtract out MVID and MVL which is
-         * include in both vl and MAC_HEADER
-         */
-/*      fcb->frame_length           = tmf->vl + sizeof(MAC_HEADER) - 4;
-        fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
-*/
-        tmf->vl = SWAP_BYTES(tmf->vl);
-
-        return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
-}
-
-static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 correlator)
-{
-        MAC_HEADER *tmf;
-        MAC_SUB_VECTOR *tsv;
-        FCBlock *fcb;
-
-        if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
-		+ S_CORRELATOR + S_RING_STATION_VERSION_NUMBER
-		+ S_RING_STATION_STATUS + S_STATION_IDENTIFER))
-		== (FCBlock *)(-1L))
-        {
-                return 0;
-        }
-
-        tmf 	   = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
-        tmf->vc    = RPT_STATE;
-        tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
-        tmf->vl    = 4;
-
-        smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_STATE);
-
-        tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
-        smctr_make_corr(dev, tsv, correlator);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_ring_station_version(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_ring_station_status(dev, tsv);
-
-        tmf->vl += tsv->svl;
-        tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-        smctr_make_station_id(dev, tsv);
-
-        tmf->vl += tsv->svl;
-
-        /* Subtract out MVID and MVL which is
-         * include in both vl and MAC_HEADER
-         */
-/*      fcb->frame_length           = tmf->vl + sizeof(MAC_HEADER) - 4;
-        fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
-*/
-        tmf->vl = SWAP_BYTES(tmf->vl);
-
-        return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
-}
-
-static int smctr_send_rpt_tx_forward(struct net_device *dev,
-        MAC_HEADER *rmf, __u16 tx_fstatus)
-{
-        MAC_HEADER *tmf;
-        MAC_SUB_VECTOR *tsv;
-        FCBlock *fcb;
-
-        if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
-		+ S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
-        {
-                return 0;
-        }
-
-        tmf 	   = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
-        tmf->vc    = RPT_TX_FORWARD;
-        tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
-        tmf->vl    = 4;
-
-        smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RPT_TX_FORWARD);
-
-        tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
-        smctr_make_tx_status_code(dev, tsv, tx_fstatus);
-
-        tmf->vl += tsv->svl;
-
-        /* Subtract out MVID and MVL which is
-         * include in both vl and MAC_HEADER
-         */
-/*      fcb->frame_length           = tmf->vl + sizeof(MAC_HEADER) - 4;
-        fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
-*/
-        tmf->vl = SWAP_BYTES(tmf->vl);
-
-        return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
-}
-
-static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 rcode, __u16 correlator)
-{
-        MAC_HEADER *tmf;
-        MAC_SUB_VECTOR *tsv;
-        FCBlock *fcb;
-
-        if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
-		+ S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
-        {
-                return 0;
-        }
-
-        tmf 	   = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
-        tmf->vc    = RSP;
-        tmf->dc_sc = (rmf->dc_sc & SC_MASK) << 4;
-        tmf->vl    = 4;
-
-        smctr_make_8025_hdr(dev, rmf, tmf, AC_FC_RSP);
-
-        tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
-        smctr_make_corr(dev, tsv, correlator);
-
-        return 0;
-}
-
-static int smctr_send_rq_init(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        MAC_HEADER *tmf;
-        MAC_SUB_VECTOR *tsv;
-        FCBlock *fcb;
-	unsigned int i, count = 0;
-	__u16 fstatus;
-	int err;
-
-        do {
-        	if(((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
-			+ S_PRODUCT_INSTANCE_ID + S_UPSTREAM_NEIGHBOR_ADDRESS
-			+ S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
-			== (FCBlock *)(-1L)))
-                {
-                        return 0;
-                }
-
-                tmf 	   = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
-                tmf->vc    = RQ_INIT;
-                tmf->dc_sc = DC_RPS | SC_RS;
-                tmf->vl    = 4;
-
-                smctr_make_8025_hdr(dev, NULL, tmf, AC_FC_RQ_INIT);
-
-                tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
-                smctr_make_product_id(dev, tsv);
-
-                tmf->vl += tsv->svl;
-                tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-                smctr_make_upstream_neighbor_addr(dev, tsv);
-
-                tmf->vl += tsv->svl;
-                tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-                smctr_make_ring_station_version(dev, tsv);
-
-                tmf->vl += tsv->svl;
-                tsv = (MAC_SUB_VECTOR *)((__u32)tsv + tsv->svl);
-                smctr_make_addr_mod(dev, tsv);
-
-                tmf->vl += tsv->svl;
-
-                /* Subtract out MVID and MVL which is
-                 * include in both vl and MAC_HEADER
-                 */
-/*              fcb->frame_length           = tmf->vl + sizeof(MAC_HEADER) - 4;
-                fcb->bdb_ptr->buffer_length = tmf->vl + sizeof(MAC_HEADER) - 4;
-*/
-                tmf->vl = SWAP_BYTES(tmf->vl);
-
-                if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
-                        return err;
-
-                /* Wait for Transmit to Complete */
-      		for(i = 0; i < 10000; i++) 
-		{
-          		if(fcb->frame_status & FCB_COMMAND_DONE)
-              			break;
-          		mdelay(1);
-      		}
-
-                /* Check if GOOD frame Tx'ed */
-                fstatus = fcb->frame_status;
-
-                if(!(fstatus & FCB_COMMAND_DONE))
-                        return HARDWARE_FAILED;
-
-                if(!(fstatus & FCB_TX_STATUS_E))
-                        count++;
-
-                /* De-allocated Tx FCB and Frame Buffer
-                 * The FCB must be de-allocated manually if executing with
-                 * interrupts disabled, other wise the ISR (LM_Service_Events)
-                 * will de-allocate it when the interrupt occurs.
-                 */
-                tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
-                smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
-        } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
-
-	return smctr_join_complete_state(dev);
-}
-
-static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
-        __u16 *tx_fstatus)
-{
-        struct net_local *tp = netdev_priv(dev);
-        FCBlock *fcb;
-        unsigned int i;
-	int err;
-
-        /* Check if this is the END POINT of the Transmit Forward Chain. */
-        if(rmf->vl <= 18)
-                return 0;
-
-        /* Allocate Transmit FCB only by requesting 0 bytes
-         * of data buffer.
-         */
-        if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
-                return 0;
-
-        /* Set pointer to Transmit Frame Buffer to the data
-         * portion of the received TX Forward frame, making
-         * sure to skip over the Vector Code (vc) and Vector
-         * length (vl).
-         */
-        fcb->bdb_ptr->trc_data_block_ptr = TRC_POINTER((__u32)rmf 
-		+ sizeof(MAC_HEADER) + 2);
-        fcb->bdb_ptr->data_block_ptr     = (__u16 *)((__u32)rmf 
-		+ sizeof(MAC_HEADER) + 2);
-
-        fcb->frame_length                = rmf->vl - 4 - 2;
-        fcb->bdb_ptr->buffer_length      = rmf->vl - 4 - 2;
-
-        if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
-                return err;
-
-        /* Wait for Transmit to Complete */
-   	for(i = 0; i < 10000; i++) 
-	{
-       		if(fcb->frame_status & FCB_COMMAND_DONE)
-           		break;
-        	mdelay(1);
-   	}
-
-        /* Check if GOOD frame Tx'ed */
-        if(!(fcb->frame_status & FCB_COMMAND_DONE))
-        {
-                if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
-                        return err;
-
-      		for(i = 0; i < 10000; i++) 
-		{
-          		if(fcb->frame_status & FCB_COMMAND_DONE)
-              			break;
-        		mdelay(1);
-      		}
-
-                if(!(fcb->frame_status & FCB_COMMAND_DONE))
-                        return HARDWARE_FAILED;
-        }
-
-        *tx_fstatus = fcb->frame_status;
-
-        return A_FRAME_WAS_FORWARDED;
-}
-
-static int smctr_set_auth_access_pri(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
-                return E_SUB_VECTOR_LENGTH_ERROR;
-
-        tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
-
-        return POSITIVE_ACK;
-}
-
-static int smctr_set_auth_funct_class(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
-                return E_SUB_VECTOR_LENGTH_ERROR;
-
-        tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
-
-        return POSITIVE_ACK;
-}
-
-static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
-        __u16 *correlator)
-{
-        if(rsv->svl != S_CORRELATOR)
-                return E_SUB_VECTOR_LENGTH_ERROR;
-
-        *correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
-
-        return POSITIVE_ACK;
-}
-
-static int smctr_set_error_timer_value(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv)
-{
-	__u16 err_tval;
-	int err;
-
-        if(rsv->svl != S_ERROR_TIMER_VALUE)
-                return E_SUB_VECTOR_LENGTH_ERROR;
-
-        err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
-
-        smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
-
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        return POSITIVE_ACK;
-}
-
-static int smctr_set_frame_forward(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv, __u8 dc_sc)
-{
-        if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
-                return E_SUB_VECTOR_LENGTH_ERROR;
-
-        if((dc_sc & DC_MASK) != DC_CRS)
-        {
-                if(rsv->svl >= 2 && rsv->svl < 20)
-			return E_TRANSMIT_FORWARD_INVALID;
-
-                if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
-                        return E_TRANSMIT_FORWARD_INVALID;
-        }
-
-        return POSITIVE_ACK;
-}
-
-static int smctr_set_local_ring_num(struct net_device *dev,
-        MAC_SUB_VECTOR *rsv)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(rsv->svl != S_LOCAL_RING_NUMBER)
-                return E_SUB_VECTOR_LENGTH_ERROR;
-
-        if(tp->ptr_local_ring_num)
-                *(__u16 *)(tp->ptr_local_ring_num) 
-			= (rsv->svv[0] << 8 | rsv->svv[1]);
-
-        return POSITIVE_ACK;
-}
-
-static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int ioaddr = dev->base_addr;
-
-        if(tp->bic_type == BIC_585_CHIP)
-                outb((tp->trc_mask | HWR_CA), ioaddr + HWR);
-        else
-        {
-                outb((tp->trc_mask | CSR_CA), ioaddr + CSR);
-                outb(tp->trc_mask, ioaddr + CSR);
-        }
-
-        return 0;
-}
-
-static void smctr_set_multicast_list(struct net_device *dev)
-{
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name);
-}
-
-static int smctr_set_page(struct net_device *dev, __u8 *buf)
-{
-        struct net_local *tp = netdev_priv(dev);
-        __u8 amask;
-        __u32 tptr;
-
-        tptr = (__u32)buf - (__u32)tp->ram_access;
-        amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
-        outb(amask, dev->base_addr + PR);
-
-        return 0;
-}
-
-static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
-{
-	int err;
-
-        if(rsv->svl != S_PHYSICAL_DROP)
-                return E_SUB_VECTOR_LENGTH_ERROR;
-
-        smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
-        if((err = smctr_wait_cmd(dev)))
-                return err;
-
-        return POSITIVE_ACK;
-}
-
-/* Reset the ring speed to the opposite of what it was. This auto-pilot
- * mode requires a complete reset and re-init of the adapter.
- */
-static int smctr_set_ring_speed(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-	int err;
-
-        if(tp->media_type == MEDIA_UTP_16)
-                tp->media_type = MEDIA_UTP_4;
-        else
-                tp->media_type = MEDIA_UTP_16;
-
-        smctr_enable_16bit(dev);
-
-        /* Re-Initialize adapter's internal registers */
-        smctr_reset_adapter(dev);
-
-        if((err = smctr_init_card_real(dev)))
-                return err;
-
-        smctr_enable_bic_int(dev);
-
-        if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
-                return err;
-
-        smctr_disable_16bit(dev);
-
-	return 0;
-}
-
-static int smctr_set_rx_look_ahead(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        __u16 sword, rword;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_set_rx_look_ahead_flag\n", dev->name);
-
-        tp->adapter_flags &= ~(FORCED_16BIT_MODE);
-        tp->adapter_flags |= RX_VALID_LOOKAHEAD;
-
-        if(tp->adapter_bus == BUS_ISA16_TYPE)
-        {
-                sword = *((__u16 *)(tp->ram_access));
-                *((__u16 *)(tp->ram_access)) = 0x1234;
-
-                smctr_disable_16bit(dev);
-                rword = *((__u16 *)(tp->ram_access));
-                smctr_enable_16bit(dev);
-
-                if(rword != 0x1234)
-                        tp->adapter_flags |= FORCED_16BIT_MODE;
-
-                *((__u16 *)(tp->ram_access)) = sword;
-        }
-
-        return 0;
-}
-
-static int smctr_set_trc_reset(int ioaddr)
-{
-        __u8 r;
-
-        r = inb(ioaddr + MSR);
-        outb(MSR_RST | r, ioaddr + MSR);
-
-        return 0;
-}
-
-/*
- * This function can be called if the adapter is busy or not.
- */
-static int smctr_setup_single_cmd(struct net_device *dev,
-        __u16 command, __u16 subcommand)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int err;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name);
-
-        if((err = smctr_wait_while_cbusy(dev)))
-                return err;
-
-        if((err = (unsigned int)smctr_wait_cmd(dev)))
-                return err;
-
-        tp->acb_head->cmd_done_status   = 0;
-        tp->acb_head->cmd               = command;
-        tp->acb_head->subcmd            = subcommand;
-
-        err = smctr_issue_resume_acb_cmd(dev);
-
-        return err;
-}
-
-/*
- * This function can not be called with the adapter busy.
- */
-static int smctr_setup_single_cmd_w_data(struct net_device *dev,
-        __u16 command, __u16 subcommand)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        tp->acb_head->cmd_done_status   = ACB_COMMAND_NOT_DONE;
-        tp->acb_head->cmd               = command;
-        tp->acb_head->subcmd            = subcommand;
-        tp->acb_head->data_offset_lo
-                = (__u16)TRC_POINTER(tp->misc_command_data);
-
-        return smctr_issue_resume_acb_cmd(dev);
-}
-
-static char *smctr_malloc(struct net_device *dev, __u16 size)
-{
-        struct net_local *tp = netdev_priv(dev);
-        char *m;
-
-        m = (char *)(tp->ram_access + tp->sh_mem_used);
-        tp->sh_mem_used += (__u32)size;
-
-        return m;
-}
-
-static int smctr_status_chg(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_status_chg\n", dev->name);
-
-        switch(tp->status)
-        {
-                case OPEN:
-                        break;
-
-                case CLOSED:
-                        break;
-
-                /* Interrupt driven open() completion. XXX */
-                case INITIALIZED:
-                        tp->group_address_0 = 0;
-                        tp->group_address[0] = 0;
-                        tp->group_address[1] = 0;
-                        tp->functional_address_0 = 0;
-                        tp->functional_address[0] = 0;
-                        tp->functional_address[1] = 0;
-                        smctr_open_tr(dev);
-                        break;
-
-                default:
-                        printk(KERN_INFO "%s: status change unknown %x\n",
-                                dev->name, tp->status);
-                        break;
-        }
-
-        return 0;
-}
-
-static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
-        __u16 queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-        int err = 0;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_trc_send_packet\n", dev->name);
-
-        fcb->info = FCB_CHAIN_END | FCB_ENABLE_TFS;
-        if(tp->num_tx_fcbs[queue] != 1)
-                fcb->back_ptr->info = FCB_INTERRUPT_ENABLE | FCB_ENABLE_TFS;
-
-        if(tp->tx_queue_status[queue] == NOT_TRANSMITING)
-        {
-                tp->tx_queue_status[queue] = TRANSMITING;
-                err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
-        }
-
-        return err;
-}
-
-static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-        __u16 status, err = 0;
-        int cstatus;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_tx_complete\n", dev->name);
-
-        while((status = tp->tx_fcb_end[queue]->frame_status) != SUCCESS)
-        {
-                if(status & 0x7e00 )
-                {
-                        err = HARDWARE_FAILED;
-                        break;
-                }
-
-                if((err = smctr_update_tx_chain(dev, tp->tx_fcb_end[queue],
-                        queue)) != SUCCESS)
-                        break;
-
-                smctr_disable_16bit(dev);
-
-                if(tp->mode_bits & UMAC)
-                {
-                        if(!(status & (FCB_TX_STATUS_AR1 | FCB_TX_STATUS_AR2)))
-                                cstatus = NO_SUCH_DESTINATION;
-                        else
-                        {
-                                if(!(status & (FCB_TX_STATUS_CR1 | FCB_TX_STATUS_CR2)))
-                                        cstatus = DEST_OUT_OF_RESOURCES;
-                                else
-                                {
-                                        if(status & FCB_TX_STATUS_E)
-                                                cstatus = MAX_COLLISIONS;
-                                        else
-                                                cstatus = SUCCESS;
-                                }
-                        }
-                }
-                else
-                        cstatus = SUCCESS;
-
-                if(queue == BUG_QUEUE)
-                        err = SUCCESS;
-
-                smctr_enable_16bit(dev);
-                if(err != SUCCESS)
-                        break;
-        }
-
-        return err;
-}
-
-static unsigned short smctr_tx_move_frame(struct net_device *dev,
-        struct sk_buff *skb, __u8 *pbuff, unsigned int bytes)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int ram_usable;
-        __u32 flen, len, offset = 0;
-        __u8 *frag, *page;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_tx_move_frame\n", dev->name);
-
-        ram_usable = ((unsigned int)tp->ram_usable) << 10;
-        frag       = skb->data;
-        flen       = skb->len;
-
-        while(flen > 0 && bytes > 0)
-        {
-                smctr_set_page(dev, pbuff);
-
-                offset = SMC_PAGE_OFFSET(pbuff);
-
-                if(offset + flen > ram_usable)
-                        len = ram_usable - offset;
-                else
-                        len = flen;
-
-                if(len > bytes)
-                        len = bytes;
-
-                page = (char *) (offset + tp->ram_access);
-                memcpy(page, frag, len);
-
-                flen -=len;
-                bytes -= len;
-                frag += len;
-                pbuff += len;
-        }
-
-        return 0;
-}
-
-/* Update the error statistic counters for this adapter. */
-static int smctr_update_err_stats(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        struct tr_statistics *tstat = &tp->MacStat;
-
-        if(tstat->internal_errors)
-                tstat->internal_errors
-                        += *(tp->misc_command_data + 0) & 0x00ff;
-
-        if(tstat->line_errors)
-                tstat->line_errors += *(tp->misc_command_data + 0) >> 8;
-
-        if(tstat->A_C_errors)
-                tstat->A_C_errors += *(tp->misc_command_data + 1) & 0x00ff;
-
-        if(tstat->burst_errors)
-                tstat->burst_errors += *(tp->misc_command_data + 1) >> 8;
-
-        if(tstat->abort_delimiters)
-                tstat->abort_delimiters += *(tp->misc_command_data + 2) >> 8;
-
-        if(tstat->recv_congest_count)
-                tstat->recv_congest_count
-                        += *(tp->misc_command_data + 3) & 0x00ff;
-
-        if(tstat->lost_frames)
-                tstat->lost_frames
-                        += *(tp->misc_command_data + 3) >> 8;
-
-        if(tstat->frequency_errors)
-                tstat->frequency_errors += *(tp->misc_command_data + 4) & 0x00ff;
-
-        if(tstat->frame_copied_errors)
-                 tstat->frame_copied_errors
-                        += *(tp->misc_command_data + 4) >> 8;
-
-        if(tstat->token_errors)
-                tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
-
-        return 0;
-}
-
-static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-        FCBlock *fcb;
-        BDBlock *bdb;
-        __u16 size, len;
-
-        fcb = tp->rx_fcb_curr[queue];
-        len = fcb->frame_length;
-
-        fcb->frame_status = 0;
-        fcb->info = FCB_CHAIN_END;
-        fcb->back_ptr->info = FCB_WARNING;
-
-        tp->rx_fcb_curr[queue] = tp->rx_fcb_curr[queue]->next_ptr;
-
-        /* update RX BDBs */
-        size = (len >> RX_BDB_SIZE_SHIFT);
-        if(len & RX_DATA_BUFFER_SIZE_MASK)
-                size += sizeof(BDBlock);
-        size &= (~RX_BDB_SIZE_MASK);
-
-        /* check if wrap around */
-        bdb = (BDBlock *)((__u32)(tp->rx_bdb_curr[queue]) + (__u32)(size));
-        if((__u32)bdb >= (__u32)tp->rx_bdb_end[queue])
-        {
-                bdb = (BDBlock *)((__u32)(tp->rx_bdb_head[queue])
-                        + (__u32)(bdb) - (__u32)(tp->rx_bdb_end[queue]));
-        }
-
-        bdb->back_ptr->info = BDB_CHAIN_END;
-        tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
-        tp->rx_bdb_curr[queue] = bdb;
-
-        return 0;
-}
-
-static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
-        __u16 queue)
-{
-        struct net_local *tp = netdev_priv(dev);
-
-        if(smctr_debug > 20)
-                printk(KERN_DEBUG "smctr_update_tx_chain\n");
-
-        if(tp->num_tx_fcbs_used[queue] <= 0)
-                return HARDWARE_FAILED;
-        else
-        {
-                if(tp->tx_buff_used[queue] < fcb->memory_alloc)
-                {
-                        tp->tx_buff_used[queue] = 0;
-                        return HARDWARE_FAILED;
-                }
-
-                tp->tx_buff_used[queue] -= fcb->memory_alloc;
-
-                /* if all transmit buffer are cleared
-                 * need to set the tx_buff_curr[] to tx_buff_head[]
-                 * otherwise, tx buffer will be segregate and cannot
-                 * accommodate and buffer greater than (curr - head) and
-                 * (end - curr) since we do not allow wrap around allocation.
-                 */
-                if(tp->tx_buff_used[queue] == 0)
-                        tp->tx_buff_curr[queue] = tp->tx_buff_head[queue];
-
-                tp->num_tx_fcbs_used[queue]--;
-                fcb->frame_status = 0;
-                tp->tx_fcb_end[queue] = fcb->next_ptr;
-		netif_wake_queue(dev);
-                return 0;
-        }
-}
-
-static int smctr_wait_cmd(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int loop_count = 0x20000;
-
-        if(smctr_debug > 10)
-                printk(KERN_DEBUG "%s: smctr_wait_cmd\n", dev->name);
-
-        while(loop_count)
-        {
-                if(tp->acb_head->cmd_done_status & ACB_COMMAND_DONE)
-                        break;
-		udelay(1);
-                loop_count--;
-        }
-
-        if(loop_count == 0)
-                return HARDWARE_FAILED;
-
-        if(tp->acb_head->cmd_done_status & 0xff)
-                return HARDWARE_FAILED;
-
-        return 0;
-}
-
-static int smctr_wait_while_cbusy(struct net_device *dev)
-{
-        struct net_local *tp = netdev_priv(dev);
-        unsigned int timeout = 0x20000;
-        int ioaddr = dev->base_addr;
-        __u8 r;
-
-        if(tp->bic_type == BIC_585_CHIP)
-        {
-                while(timeout)
-                {
-                        r = inb(ioaddr + HWR);
-                        if((r & HWR_CBUSY) == 0)
-                                break;
-                        timeout--;
-                }
-        }
-        else
-        {
-                while(timeout)
-                {
-                        r = inb(ioaddr + CSR);
-                        if((r & CSR_CBUSY) == 0)
-                                break;
-                        timeout--;
-                }
-        }
-
-        if(timeout)
-                return 0;
-        else
-                return HARDWARE_FAILED;
-}
-
-#ifdef MODULE
-
-static struct net_device* dev_smctr[SMCTR_MAX_ADAPTERS];
-static int io[SMCTR_MAX_ADAPTERS];
-static int irq[SMCTR_MAX_ADAPTERS];
-
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("tr_smctr.bin");
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param(ringspeed, int, 0);
-
-static struct net_device * __init setup_card(int n)
-{
-	struct net_device *dev = alloc_trdev(sizeof(struct net_local));
-	int err;
-	
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	dev->irq = irq[n];
-	err = smctr_probe1(dev, io[n]);
-	if (err) 
-		goto out;
-		
-	err = register_netdev(dev);
-	if (err)
-		goto out1;
-	return dev;
- out1:
-#ifdef CONFIG_MCA_LEGACY
-	{ struct net_local *tp = netdev_priv(dev);
-	  if (tp->slot_num)
-		mca_mark_as_unused(tp->slot_num);
-	}
-#endif
-	release_region(dev->base_addr, SMCTR_IO_EXTENT);
-	free_irq(dev->irq, dev);
-out:
-	free_netdev(dev);
-	return ERR_PTR(err);
-}
-
-int __init init_module(void)
-{
-        int i, found = 0;
-	struct net_device *dev;
-
-        for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) {
-		dev = io[0]? setup_card(i) : smctr_probe(-1);
-		if (!IS_ERR(dev)) {
-			++found;
-			dev_smctr[i] = dev;
-		}
-        }
-
-        return found ? 0 : -ENODEV;
-}
-
-void __exit cleanup_module(void)
-{
-        int i;
-
-        for(i = 0; i < SMCTR_MAX_ADAPTERS; i++) {
-		struct net_device *dev = dev_smctr[i];
-
-		if (dev) {
-
-			unregister_netdev(dev);
-#ifdef CONFIG_MCA_LEGACY
-			{ struct net_local *tp = netdev_priv(dev);
-			if (tp->slot_num)
-				mca_mark_as_unused(tp->slot_num);
-			}
-#endif
-			release_region(dev->base_addr, SMCTR_IO_EXTENT);
-			if (dev->irq)
-				free_irq(dev->irq, dev);
-
-			free_netdev(dev);
-		}
-        }
-}
-#endif /* MODULE */
diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h
deleted file mode 100644
index 6e5700a..0000000
--- a/drivers/net/tokenring/smctr.h
+++ /dev/null
@@ -1,1585 +0,0 @@
-/* smctr.h: SMC Token Ring driver header for Linux
- *
- * Authors:
- *  - Jay Schulist <jschlst@samba.org>
- */
-
-#ifndef __LINUX_SMCTR_H
-#define __LINUX_SMCTR_H
-
-#ifdef __KERNEL__
-
-#define MAX_TX_QUEUE 10
-
-#define SMC_HEADER_SIZE 14
-
-#define SMC_PAGE_OFFSET(X)          (((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask)
-
-#define INIT            0x0D
-#define RQ_ATTCH        0x10
-#define RQ_STATE        0x0F
-#define RQ_ADDR         0x0E
-#define CHG_PARM        0x0C
-#define RSP             0x00
-#define TX_FORWARD      0x09
-
-#define AC_FC_DAT	((3<<13) | 1)
-#define      DAT             0x07
-
-#define RPT_NEW_MON     0x25
-#define RPT_SUA_CHG     0x26
-#define RPT_ACTIVE_ERR  0x28
-#define RPT_NN_INCMP    0x27
-#define RPT_ERROR       0x29
-
-#define RQ_INIT         0x20
-#define RPT_ATTCH       0x24
-#define RPT_STATE       0x23
-#define RPT_ADDR        0x22
-
-#define POSITIVE_ACK                    0x0001
-#define A_FRAME_WAS_FORWARDED           0x8888
-
-#define      GROUP_ADDRESS                   0x2B
-#define      PHYSICAL_DROP                   0x0B
-#define      AUTHORIZED_ACCESS_PRIORITY      0x07
-#define      AUTHORIZED_FUNCTION_CLASS       0x06
-#define      FUNCTIONAL_ADDRESS              0x2C
-#define      RING_STATION_STATUS             0x29
-#define      TRANSMIT_STATUS_CODE            0x2A
-#define      IBM_PASS_SOURCE_ADDR    0x01
-#define      AC_FC_RPT_TX_FORWARD            ((0<<13) | 0)
-#define      AC_FC_RPT_STATE                 ((0<<13) | 0)
-#define      AC_FC_RPT_ADDR                  ((0<<13) | 0)
-#define      CORRELATOR                      0x09
-
-#define POSITIVE_ACK                    0x0001          /*             */
-#define E_MAC_DATA_INCOMPLETE           0x8001          /* not used    */
-#define E_VECTOR_LENGTH_ERROR           0x8002          /*             */
-#define E_UNRECOGNIZED_VECTOR_ID        0x8003          /*             */
-#define E_INAPPROPRIATE_SOURCE_CLASS    0x8004          /*             */
-#define E_SUB_VECTOR_LENGTH_ERROR       0x8005          /*             */
-#define E_TRANSMIT_FORWARD_INVALID      0x8006          /* def. by IBM */
-#define E_MISSING_SUB_VECTOR            0x8007          /*             */
-#define E_SUB_VECTOR_UNKNOWN            0x8008          /*             */
-#define E_MAC_HEADER_TOO_LONG           0x8009          /*             */
-#define E_FUNCTION_DISABLED             0x800A          /* not used    */
-
-#define A_FRAME_WAS_FORWARDED           0x8888          /* used by send_TX_FORWARD */
-
-#define UPSTREAM_NEIGHBOR_ADDRESS       0x02
-#define LOCAL_RING_NUMBER               0x03
-#define ASSIGN_PHYSICAL_DROP            0x04
-#define ERROR_TIMER_VALUE               0x05
-#define AUTHORIZED_FUNCTION_CLASS       0x06
-#define AUTHORIZED_ACCESS_PRIORITY      0x07
-#define CORRELATOR                      0x09
-#define PHYSICAL_DROP                   0x0B
-#define RESPONSE_CODE                   0x20
-#define ADDRESS_MODIFER                 0x21
-#define PRODUCT_INSTANCE_ID             0x22
-#define RING_STATION_VERSION_NUMBER     0x23
-#define WRAP_DATA                       0x26
-#define FRAME_FORWARD                   0x27
-#define STATION_IDENTIFER               0x28
-#define RING_STATION_STATUS             0x29
-#define TRANSMIT_STATUS_CODE            0x2A
-#define GROUP_ADDRESS                   0x2B
-#define FUNCTIONAL_ADDRESS              0x2C
-
-#define F_NO_SUB_VECTORS_FOUND                  0x0000
-#define F_UPSTREAM_NEIGHBOR_ADDRESS             0x0001
-#define F_LOCAL_RING_NUMBER                     0x0002
-#define F_ASSIGN_PHYSICAL_DROP                  0x0004
-#define F_ERROR_TIMER_VALUE                     0x0008
-#define F_AUTHORIZED_FUNCTION_CLASS             0x0010
-#define F_AUTHORIZED_ACCESS_PRIORITY            0x0020
-#define F_CORRELATOR                            0x0040
-#define F_PHYSICAL_DROP                         0x0080
-#define F_RESPONSE_CODE                         0x0100
-#define F_PRODUCT_INSTANCE_ID                   0x0200
-#define F_RING_STATION_VERSION_NUMBER           0x0400
-#define F_STATION_IDENTIFER                     0x0800
-#define F_RING_STATION_STATUS                   0x1000
-#define F_GROUP_ADDRESS                         0x2000
-#define F_FUNCTIONAL_ADDRESS                    0x4000
-#define F_FRAME_FORWARD                         0x8000
-
-#define R_INIT                                  0x00
-#define R_RQ_ATTCH_STATE_ADDR                   0x00
-#define R_CHG_PARM                              0x00
-#define R_TX_FORWARD                            F_FRAME_FORWARD
-
-
-#define      UPSTREAM_NEIGHBOR_ADDRESS       0x02
-#define      ADDRESS_MODIFER                 0x21
-#define      RING_STATION_VERSION_NUMBER     0x23
-#define      PRODUCT_INSTANCE_ID             0x22
-
-#define      RPT_TX_FORWARD  0x2A
-
-#define AC_FC_INIT                      (3<<13) | 0 /*                     */
-#define AC_FC_RQ_INIT                   ((3<<13) | 0) /*                     */
-#define AC_FC_RQ_ATTCH                  (3<<13) | 0 /* DC = SC of rx frame */
-#define AC_FC_RQ_STATE                  (3<<13) | 0 /* DC = SC of rx frame */
-#define AC_FC_RQ_ADDR                   (3<<13) | 0 /* DC = SC of rx frame */
-#define AC_FC_CHG_PARM                  (3<<13) | 0 /*                     */
-#define AC_FC_RSP                       (0<<13) | 0 /* DC = SC of rx frame */
-#define AC_FC_RPT_ATTCH                 (0<<13) | 0
-
-#define S_UPSTREAM_NEIGHBOR_ADDRESS               6 + 2
-#define S_LOCAL_RING_NUMBER                       2 + 2
-#define S_ASSIGN_PHYSICAL_DROP                    4 + 2
-#define S_ERROR_TIMER_VALUE                       2 + 2
-#define S_AUTHORIZED_FUNCTION_CLASS               2 + 2
-#define S_AUTHORIZED_ACCESS_PRIORITY              2 + 2
-#define S_CORRELATOR                              2 + 2
-#define S_PHYSICAL_DROP                           4 + 2
-#define S_RESPONSE_CODE                           4 + 2
-#define S_ADDRESS_MODIFER                         2 + 2
-#define S_PRODUCT_INSTANCE_ID                    18 + 2
-#define S_RING_STATION_VERSION_NUMBER            10 + 2
-#define S_STATION_IDENTIFER                       6 + 2
-#define S_RING_STATION_STATUS                     6 + 2
-#define S_GROUP_ADDRESS                           4 + 2
-#define S_FUNCTIONAL_ADDRESS                      4 + 2
-#define S_FRAME_FORWARD                         252 + 2
-#define S_TRANSMIT_STATUS_CODE                    2 + 2
-
-#define ISB_IMC_RES0                    0x0000  /* */
-#define ISB_IMC_MAC_TYPE_3              0x0001  /* MAC_ARC_INDICATE */
-#define ISB_IMC_MAC_ERROR_COUNTERS      0x0002  /* */
-#define ISB_IMC_RES1                    0x0003  /* */
-#define ISB_IMC_MAC_TYPE_2              0x0004  /* QUE_MAC_INDICATE */
-#define ISB_IMC_TX_FRAME                0x0005  /* */
-#define ISB_IMC_END_OF_TX_QUEUE         0x0006  /* */
-#define ISB_IMC_NON_MAC_RX_RESOURCE     0x0007  /* */
-#define ISB_IMC_MAC_RX_RESOURCE         0x0008  /* */
-#define ISB_IMC_NON_MAC_RX_FRAME        0x0009  /* */
-#define ISB_IMC_MAC_RX_FRAME            0x000A  /* */
-#define ISB_IMC_TRC_FIFO_STATUS         0x000B  /* */
-#define ISB_IMC_COMMAND_STATUS          0x000C  /* */
-#define ISB_IMC_MAC_TYPE_1              0x000D  /* Self Removed */
-#define ISB_IMC_TRC_INTRNL_TST_STATUS   0x000E  /* */
-#define ISB_IMC_RES2                    0x000F  /* */
-
-#define NON_MAC_RX_RESOURCE_BW          0x10    /* shifted right 8 bits */
-#define NON_MAC_RX_RESOURCE_FW          0x20    /* shifted right 8 bits */
-#define NON_MAC_RX_RESOURCE_BE          0x40    /* shifted right 8 bits */
-#define NON_MAC_RX_RESOURCE_FE          0x80    /* shifted right 8 bits */
-#define RAW_NON_MAC_RX_RESOURCE_BW      0x1000  /* */
-#define RAW_NON_MAC_RX_RESOURCE_FW      0x2000  /* */
-#define RAW_NON_MAC_RX_RESOURCE_BE      0x4000  /* */
-#define RAW_NON_MAC_RX_RESOURCE_FE      0x8000  /* */
-
-#define MAC_RX_RESOURCE_BW              0x10    /* shifted right 8 bits */
-#define MAC_RX_RESOURCE_FW              0x20    /* shifted right 8 bits */
-#define MAC_RX_RESOURCE_BE              0x40    /* shifted right 8 bits */
-#define MAC_RX_RESOURCE_FE              0x80    /* shifted right 8 bits */
-#define RAW_MAC_RX_RESOURCE_BW          0x1000  /* */
-#define RAW_MAC_RX_RESOURCE_FW          0x2000  /* */
-#define RAW_MAC_RX_RESOURCE_BE          0x4000  /* */
-#define RAW_MAC_RX_RESOURCE_FE          0x8000  /* */
-
-#define TRC_FIFO_STATUS_TX_UNDERRUN     0x40    /* shifted right 8 bits */
-#define TRC_FIFO_STATUS_RX_OVERRUN      0x80    /* shifted right 8 bits */
-#define RAW_TRC_FIFO_STATUS_TX_UNDERRUN 0x4000  /* */
-#define RAW_TRC_FIFO_STATUS_RX_OVERRUN  0x8000  /* */
-
-#define       CSR_CLRTINT             0x08
-
-#define MSB(X)                  ((__u8)((__u16) X >> 8))
-#define LSB(X)                  ((__u8)((__u16) X &  0xff))
-
-#define AC_FC_LOBE_MEDIA_TEST           ((3<<13) | 0)
-#define S_WRAP_DATA                             248 + 2 /* 500 + 2 */
-#define      WRAP_DATA                       0x26
-#define LOBE_MEDIA_TEST 0x08
-
-/* Destination Class (dc) */
-
-#define DC_MASK         0xF0
-#define DC_RS           0x00
-#define DC_CRS          0x40
-#define DC_RPS          0x50
-#define DC_REM          0x60
-
-/* Source Classes (sc) */
-
-#define SC_MASK         0x0F
-#define SC_RS           0x00
-#define SC_CRS          0x04
-#define SC_RPS          0x05
-#define SC_REM          0x06
-
-#define PR		0x11
-#define PR_PAGE_MASK	0x0C000
-
-#define MICROCHANNEL	0x0008
-#define INTERFACE_CHIP	0x0010
-#define BOARD_16BIT	0x0040
-#define PAGED_RAM	0x0080
-#define WD8115TA	(TOKEN_MEDIA | MICROCHANNEL | INTERFACE_CHIP | PAGED_RAM)
-#define WD8115T		(TOKEN_MEDIA | INTERFACE_CHIP | BOARD_16BIT | PAGED_RAM)
-
-#define BRD_ID_8316	0x50
-
-#define r587_SER	0x001
-#define SER_DIN		0x80
-#define SER_DOUT	0x40
-#define SER_CLK		0x20
-#define SER_ECS		0x10
-#define SER_E806	0x08
-#define SER_PNP		0x04
-#define SER_BIO		0x02
-#define SER_16B		0x01
-
-#define r587_IDR	0x004
-#define IDR_IRQ_MASK	0x0F0
-#define IDR_DCS_MASK	0x007
-#define IDR_RWS		0x008
-
-
-#define r587_BIO	0x003
-#define BIO_ENB		0x080
-#define BIO_MASK	0x03F
-
-#define r587_PCR	0x005
-#define PCR_RAMS	0x040
-
-
-
-#define NUM_ADDR_BITS	8
-
-#define ISA_MAX_ADDRESS		0x00ffffff
-
-#define SMCTR_MAX_ADAPTERS	7
-
-#define MC_TABLE_ENTRIES      16
-
-#define MAXFRAGMENTS          32
-
-#define CHIP_REV_MASK         0x3000
-
-#define MAX_TX_QS             8
-#define NUM_TX_QS_USED        3
-
-#define MAX_RX_QS             2
-#define NUM_RX_QS_USED        2
-
-#define INTEL_DATA_FORMAT	0x4000
-#define INTEL_ADDRESS_POINTER_FORMAT	0x8000
-#define PAGE_POINTER(X)		((((unsigned long)(X) - tp->ram_access) & tp->page_offset_mask) + tp->ram_access)
-#define SWAP_WORDS(X)		(((X & 0xFFFF) << 16) | (X >> 16))
-
-#define INTERFACE_CHIP          0x0010          /* Soft Config Adapter */
-#define ADVANCED_FEATURES       0x0020          /* Adv. netw. interface features */
-#define BOARD_16BIT             0x0040          /* 16 bit capability */
-#define PAGED_RAM               0x0080          /* Adapter has paged RAM */
-
-#define PAGED_ROM               0x0100          /* Adapter has paged ROM */
-
-#define RAM_SIZE_UNKNOWN        0x0000          /* Unknown RAM size */
-#define RAM_SIZE_0K             0x0001          /* 0K  RAM */
-#define RAM_SIZE_8K             0x0002          /* 8k  RAM */
-#define RAM_SIZE_16K            0x0003          /* 16k RAM */
-#define RAM_SIZE_32K            0x0004          /* 32k RAM */
-#define RAM_SIZE_64K            0x0005          /* 64k RAM */
-#define RAM_SIZE_RESERVED_6     0x0006          /* Reserved RAM size */
-#define RAM_SIZE_RESERVED_7     0x0007          /* Reserved RAM size */
-#define RAM_SIZE_MASK           0x0007          /* Isolates RAM Size */
-
-#define TOKEN_MEDIA           0x0005
-
-#define BID_REG_0       0x00
-#define BID_REG_1       0x01
-#define BID_REG_2       0x02
-#define BID_REG_3       0x03
-#define BID_REG_4       0x04
-#define BID_REG_5       0x05
-#define BID_REG_6       0x06
-#define BID_REG_7       0x07
-#define BID_LAR_0       0x08
-#define BID_LAR_1       0x09
-#define BID_LAR_2       0x0A
-#define BID_LAR_3       0x0B
-#define BID_LAR_4       0x0C
-#define BID_LAR_5       0x0D
-
-#define BID_BOARD_ID_BYTE       0x0E
-#define BID_CHCKSM_BYTE         0x0F
-#define BID_LAR_OFFSET          0x08  
-
-#define BID_MSZ_583_BIT         0x08
-#define BID_SIXTEEN_BIT_BIT     0x01
-
-#define BID_BOARD_REV_MASK      0x1E
-
-#define BID_MEDIA_TYPE_BIT      0x01
-#define BID_SOFT_CONFIG_BIT     0x20
-#define BID_RAM_SIZE_BIT        0x40
-#define BID_BUS_TYPE_BIT        0x80
-
-#define BID_CR          0x10
-
-#define BID_TXP         0x04            /* Transmit Packet Command */
-
-#define BID_TCR_DIFF    0x0D    /* Transmit Configuration Register */
-
-#define BID_TCR_VAL     0x18            /* Value to Test 8390 or 690 */
-#define BID_PS0         0x00            /* Register Page Select 0 */
-#define BID_PS1         0x40            /* Register Page Select 1 */
-#define BID_PS2         0x80            /* Register Page Select 2 */
-#define BID_PS_MASK     0x3F            /* For Masking Off Page Select Bits */
-
-#define BID_EEPROM_0                    0x08
-#define BID_EEPROM_1                    0x09
-#define BID_EEPROM_2                    0x0A
-#define BID_EEPROM_3                    0x0B
-#define BID_EEPROM_4                    0x0C
-#define BID_EEPROM_5                    0x0D
-#define BID_EEPROM_6                    0x0E
-#define BID_EEPROM_7                    0x0F
-
-#define BID_OTHER_BIT                   0x02
-#define BID_ICR_MASK                    0x0C
-#define BID_EAR_MASK                    0x0F
-#define BID_ENGR_PAGE                   0x0A0
-#define BID_RLA                         0x10
-#define BID_EA6                         0x80
-#define BID_RECALL_DONE_MASK            0x10
-#define BID_BID_EEPROM_OVERRIDE         0xFFB0
-#define BID_EXTRA_EEPROM_OVERRIDE       0xFFD0
-#define BID_EEPROM_MEDIA_MASK           0x07
-#define BID_STARLAN_TYPE                0x00
-#define BID_ETHERNET_TYPE               0x01
-#define BID_TP_TYPE                     0x02
-#define BID_EW_TYPE                     0x03
-#define BID_TOKEN_RING_TYPE             0x04
-#define BID_UTP2_TYPE                   0x05
-#define BID_EEPROM_IRQ_MASK             0x18
-#define BID_PRIMARY_IRQ                 0x00
-#define BID_ALTERNATE_IRQ_1             0x08
-#define BID_ALTERNATE_IRQ_2             0x10
-#define BID_ALTERNATE_IRQ_3             0x18
-#define BID_EEPROM_RAM_SIZE_MASK        0xE0
-#define BID_EEPROM_RAM_SIZE_RES1        0x00
-#define BID_EEPROM_RAM_SIZE_RES2        0x20
-#define BID_EEPROM_RAM_SIZE_8K          0x40
-#define BID_EEPROM_RAM_SIZE_16K         0x60
-#define BID_EEPROM_RAM_SIZE_32K         0x80
-#define BID_EEPROM_RAM_SIZE_64K         0xA0
-#define BID_EEPROM_RAM_SIZE_RES3        0xC0
-#define BID_EEPROM_RAM_SIZE_RES4        0xE0
-#define BID_EEPROM_BUS_TYPE_MASK        0x07
-#define BID_EEPROM_BUS_TYPE_AT          0x00
-#define BID_EEPROM_BUS_TYPE_MCA         0x01
-#define BID_EEPROM_BUS_TYPE_EISA        0x02
-#define BID_EEPROM_BUS_TYPE_NEC         0x03
-#define BID_EEPROM_BUS_SIZE_MASK        0x18
-#define BID_EEPROM_BUS_SIZE_8BIT        0x00
-#define BID_EEPROM_BUS_SIZE_16BIT       0x08
-#define BID_EEPROM_BUS_SIZE_32BIT       0x10
-#define BID_EEPROM_BUS_SIZE_64BIT       0x18
-#define BID_EEPROM_BUS_MASTER           0x20
-#define BID_EEPROM_RAM_PAGING           0x40
-#define BID_EEPROM_ROM_PAGING           0x80
-#define BID_EEPROM_PAGING_MASK          0xC0
-#define BID_EEPROM_LOW_COST             0x08
-#define BID_EEPROM_IO_MAPPED            0x10
-#define BID_EEPROM_HMI                  0x01
-#define BID_EEPROM_AUTO_MEDIA_DETECT    0x01
-#define BID_EEPROM_CHIP_REV_MASK        0x0C
-
-#define BID_EEPROM_LAN_ADDR             0x30
-
-#define BID_EEPROM_MEDIA_OPTION         0x54
-#define BID_EEPROM_MEDIA_UTP            0x01
-#define BID_EEPROM_4MB_RING             0x08
-#define BID_EEPROM_16MB_RING            0x10
-#define BID_EEPROM_MEDIA_STP            0x40
-
-#define BID_EEPROM_MISC_DATA            0x56
-#define BID_EEPROM_EARLY_TOKEN_RELEASE  0x02
-
-#define CNFG_ID_8003E           0x6fc0
-#define CNFG_ID_8003S           0x6fc1
-#define CNFG_ID_8003W           0x6fc2
-#define CNFG_ID_8115TRA         0x6ec6
-#define CNFG_ID_8013E           0x61C8
-#define CNFG_ID_8013W           0x61C9
-#define CNFG_ID_BISTRO03E       0xEFE5
-#define CNFG_ID_BISTRO13E       0xEFD5
-#define CNFG_ID_BISTRO13W       0xEFD4
-#define CNFG_MSR_583    0x0
-#define CNFG_ICR_583    0x1
-#define CNFG_IAR_583    0x2
-#define CNFG_BIO_583    0x3
-#define CNFG_EAR_583    0x3
-#define CNFG_IRR_583    0x4
-#define CNFG_LAAR_584   0x5
-#define CNFG_GP2                0x7
-#define CNFG_LAAR_MASK          0x1F
-#define CNFG_LAAR_ZWS           0x20
-#define CNFG_LAAR_L16E          0x40
-#define CNFG_ICR_IR2_584        0x04
-#define CNFG_ICR_MASK       0x08
-#define CNFG_ICR_MSZ        0x08
-#define CNFG_ICR_RLA        0x10
-#define CNFG_ICR_STO        0x80
-#define CNFG_IRR_IRQS           0x60
-#define CNFG_IRR_IEN            0x80
-#define CNFG_IRR_ZWS            0x01
-#define CNFG_GP2_BOOT_NIBBLE    0x0F
-#define CNFG_IRR_OUT2       0x04
-#define CNFG_IRR_OUT1       0x02
-
-#define CNFG_SIZE_8KB           8
-#define CNFG_SIZE_16KB          16
-#define CNFG_SIZE_32KB          32
-#define CNFG_SIZE_64KB          64
-#define CNFG_SIZE_128KB     128
-#define CNFG_SIZE_256KB     256
-#define ROM_DISABLE             0x0
-
-#define CNFG_SLOT_ENABLE_BIT    0x08
-
-#define CNFG_POS_CONTROL_REG    0x096
-#define CNFG_POS_REG0           0x100
-#define CNFG_POS_REG1           0x101
-#define CNFG_POS_REG2           0x102
-#define CNFG_POS_REG3           0x103
-#define CNFG_POS_REG4           0x104
-#define CNFG_POS_REG5           0x105
-
-#define CNFG_ADAPTER_TYPE_MASK  0x0e
-
-#define SLOT_16BIT              0x0008
-#define INTERFACE_5X3_CHIP      0x0000          /* 0000 = 583 or 593 chips */
-#define NIC_690_BIT                     0x0010          /* NIC is 690 */
-#define ALTERNATE_IRQ_BIT       0x0020          /* Alternate IRQ is used */
-#define INTERFACE_584_CHIP      0x0040          /* 0001 = 584 chip */
-#define INTERFACE_594_CHIP      0x0080          /* 0010 = 594 chip */
-#define INTERFACE_585_CHIP      0x0100          /* 0100 = 585/790 chip */
-#define INTERFACE_CHIP_MASK     0x03C0          /* Isolates Intfc Chip Type */
-
-#define BOARD_16BIT             0x0040
-#define NODE_ADDR_CKSUM 	0xEE
-#define BRD_ID_8115T    	0x04
-
-#define NIC_825_BIT             0x0400          /* TRC 83C825 NIC */
-#define NIC_790_BIT             0x0800          /* NIC is 83C790 Ethernet */
-
-#define CHIP_REV_MASK           0x3000
-
-#define HWR_CBUSY			0x02
-#define HWR_CA				0x01
-
-#define MAC_QUEUE                       0
-#define NON_MAC_QUEUE                   1
-#define BUG_QUEUE                       2       /* NO RECEIVE QUEUE, ONLY TX */
-
-#define NUM_MAC_TX_FCBS                 8
-#define NUM_MAC_TX_BDBS                 NUM_MAC_TX_FCBS
-#define NUM_MAC_RX_FCBS                 7
-#define NUM_MAC_RX_BDBS                 8
-
-#define NUM_NON_MAC_TX_FCBS             6
-#define NUM_NON_MAC_TX_BDBS             NUM_NON_MAC_TX_FCBS
-
-#define NUM_NON_MAC_RX_BDBS             0       /* CALCULATED DYNAMICALLY */
-
-#define NUM_BUG_TX_FCBS                 8
-#define NUM_BUG_TX_BDBS                 NUM_BUG_TX_FCBS
-
-#define MAC_TX_BUFFER_MEMORY            1024
-#define NON_MAC_TX_BUFFER_MEMORY        (20 * 1024)
-#define BUG_TX_BUFFER_MEMORY            (NUM_BUG_TX_FCBS * 32)
-
-#define RX_BUFFER_MEMORY                0       /* CALCULATED DYNAMICALLY */
-#define RX_DATA_BUFFER_SIZE             256
-#define RX_BDB_SIZE_SHIFT               3       /* log2(RX_DATA_BUFFER_SIZE)-log2(sizeof(BDBlock)) */
-#define RX_BDB_SIZE_MASK                (sizeof(BDBlock) - 1)
-#define RX_DATA_BUFFER_SIZE_MASK        (RX_DATA_BUFFER_SIZE-1)
-
-#define NUM_OF_INTERRUPTS               0x20
-
-#define NOT_TRANSMITING                 0
-#define TRANSMITING			1
-
-#define TRC_INTERRUPT_ENABLE_MASK       0x7FF6
-
-#define UCODE_VERSION                   0x58
-
-#define UCODE_SIZE_OFFSET               0x0000  /* WORD */
-#define UCODE_CHECKSUM_OFFSET           0x0002  /* WORD */
-#define UCODE_VERSION_OFFSET            0x0004  /* BYTE */
-
-#define CS_RAM_SIZE                     0X2000
-#define CS_RAM_CHECKSUM_OFFSET          0x1FFE  /* WORD 1FFE(MSB)-1FFF(LSB)*/
-#define CS_RAM_VERSION_OFFSET           0x1FFC  /* WORD 1FFC(MSB)-1FFD(LSB)*/
-
-#define MISC_DATA_SIZE                  128
-#define NUM_OF_ACBS                     1
-
-#define ACB_COMMAND_NOT_DONE            0x0000  /* Init, command not done */
-#define ACB_COMMAND_DONE                0x8000  /* TRC says command done */
-#define ACB_COMMAND_STATUS_MASK         0x00FF  /* low byte is status */
-#define ACB_COMMAND_SUCCESSFUL          0x0000  /* means cmd was successful */
-#define ACB_NOT_CHAIN_END               0x0000  /* tell TRC more CBs in chain */
-#define ACB_CHAIN_END                   0x8000  /* tell TRC last CB in chain */
-#define ACB_COMMAND_NO_INTERRUPT        0x0000  /* tell TRC no INT after CB */
-#define ACB_COMMAND_INTERRUPT           0x2000  /* tell TRC to INT after CB */
-#define ACB_SUB_CMD_NOP                 0x0000
-#define ACB_CMD_HIC_NOP                 0x0080
-#define ACB_CMD_MCT_NOP                 0x0000
-#define ACB_CMD_MCT_TEST                0x0001
-#define ACB_CMD_HIC_TEST                0x0081
-#define ACB_CMD_INSERT                  0x0002
-#define ACB_CMD_REMOVE                  0x0003
-#define ACB_CMD_MCT_WRITE_VALUE         0x0004
-#define ACB_CMD_HIC_WRITE_VALUE         0x0084
-#define ACB_CMD_MCT_READ_VALUE          0x0005
-#define ACB_CMD_HIC_READ_VALUE          0x0085
-#define ACB_CMD_INIT_TX_RX              0x0086
-#define ACB_CMD_INIT_TRC_TIMERS         0x0006
-#define ACB_CMD_READ_TRC_STATUS         0x0007
-#define ACB_CMD_CHANGE_JOIN_STATE       0x0008
-#define ACB_CMD_RESERVED_9              0x0009
-#define ACB_CMD_RESERVED_A              0x000A
-#define ACB_CMD_RESERVED_B              0x000B
-#define ACB_CMD_RESERVED_C              0x000C
-#define ACB_CMD_RESERVED_D              0x000D
-#define ACB_CMD_RESERVED_E              0x000E
-#define ACB_CMD_RESERVED_F              0x000F
-
-#define TRC_MAC_REGISTERS_TEST          0x0000
-#define TRC_INTERNAL_LOOPBACK           0x0001
-#define TRC_TRI_LOOPBACK                0x0002
-#define TRC_INTERNAL_ROM_TEST           0x0003
-#define TRC_LOBE_MEDIA_TEST             0x0004
-#define TRC_ANALOG_TEST                 0x0005
-#define TRC_HOST_INTERFACE_REG_TEST     0x0003
-
-#define TEST_DMA_1                      0x0000
-#define TEST_DMA_2                      0x0001
-#define TEST_MCT_ROM                    0x0002
-#define HIC_INTERNAL_DIAG               0x0003
-
-#define ABORT_TRANSMIT_PRIORITY_0       0x0001
-#define ABORT_TRANSMIT_PRIORITY_1       0x0002
-#define ABORT_TRANSMIT_PRIORITY_2       0x0004
-#define ABORT_TRANSMIT_PRIORITY_3       0x0008
-#define ABORT_TRANSMIT_PRIORITY_4       0x0010
-#define ABORT_TRANSMIT_PRIORITY_5       0x0020
-#define ABORT_TRANSMIT_PRIORITY_6       0x0040
-#define ABORT_TRANSMIT_PRIORITY_7       0x0080
-
-#define TX_PENDING_PRIORITY_0           0x0001
-#define TX_PENDING_PRIORITY_1           0x0002
-#define TX_PENDING_PRIORITY_2           0x0004
-#define TX_PENDING_PRIORITY_3           0x0008
-#define TX_PENDING_PRIORITY_4           0x0010
-#define TX_PENDING_PRIORITY_5           0x0020
-#define TX_PENDING_PRIORITY_6           0x0040
-#define TX_PENDING_PRIORITY_7           0x0080
-
-#define FCB_FRAME_LENGTH                0x100
-#define FCB_COMMAND_DONE                0x8000  /* FCB Word 0 */
-#define FCB_NOT_CHAIN_END               0x0000  /* FCB Word 1 */
-#define FCB_CHAIN_END                   0x8000
-#define FCB_NO_WARNING                  0x0000
-#define FCB_WARNING                     0x4000
-#define FCB_INTERRUPT_DISABLE           0x0000
-#define FCB_INTERRUPT_ENABLE            0x2000
-
-#define FCB_ENABLE_IMA                  0x0008
-#define FCB_ENABLE_TES                  0x0004  /* Guarantee Tx before Int */
-#define FCB_ENABLE_TFS                  0x0002  /* Post Tx Frame Status */
-#define FCB_ENABLE_NTC                  0x0001  /* No Tx CRC */
-
-#define FCB_TX_STATUS_CR2               0x0004
-#define FCB_TX_STATUS_AR2               0x0008
-#define FCB_TX_STATUS_CR1               0x0040
-#define FCB_TX_STATUS_AR1               0x0080
-#define FCB_TX_AC_BITS                  (FCB_TX_STATUS_AR1+FCB_TX_STATUS_AR2+FCB_TX_STATUS_CR1+FCB_TX_STATUS_CR2)
-#define FCB_TX_STATUS_E                 0x0100
-
-#define FCB_RX_STATUS_ANY_ERROR         0x0001
-#define FCB_RX_STATUS_FCS_ERROR         0x0002
-
-#define FCB_RX_STATUS_IA_MATCHED        0x0400
-#define FCB_RX_STATUS_IGA_BSGA_MATCHED  0x0500
-#define FCB_RX_STATUS_FA_MATCHED        0x0600
-#define FCB_RX_STATUS_BA_MATCHED        0x0700
-#define FCB_RX_STATUS_DA_MATCHED        0x0400
-#define FCB_RX_STATUS_SOURCE_ROUTING    0x0800
-
-#define BDB_BUFFER_SIZE                 0x100
-#define BDB_NOT_CHAIN_END               0x0000
-#define BDB_CHAIN_END                   0x8000
-#define BDB_NO_WARNING                  0x0000
-#define BDB_WARNING                     0x4000
-
-#define ERROR_COUNTERS_CHANGED          0x0001
-#define TI_NDIS_RING_STATUS_CHANGED     0x0002
-#define UNA_CHANGED                     0x0004
-#define READY_TO_SEND_RQ_INIT           0x0008
-
-#define SCGB_ADDRESS_POINTER_FORMAT     INTEL_ADDRESS_POINTER_FORMAT
-#define SCGB_DATA_FORMAT                INTEL_DATA_FORMAT
-#define SCGB_MULTI_WORD_CONTROL         0
-#define SCGB_BURST_LENGTH               0x000E  /* DMA Burst Length */
-
-#define SCGB_CONFIG                     (INTEL_ADDRESS_POINTER_FORMAT+INTEL_DATA_FORMAT+SCGB_BURST_LENGTH)
-
-#define ISCP_BLOCK_SIZE                 0x0A
-#define RAM_SIZE                        0x10000
-#define INIT_SYS_CONFIG_PTR_OFFSET      (RAM_SIZE-ISCP_BLOCK_SIZE)
-#define SCGP_BLOCK_OFFSET               0
-
-#define SCLB_NOT_VALID                  0x0000  /* Initially, SCLB not valid */
-#define SCLB_VALID                      0x8000  /* Host tells TRC SCLB valid */
-#define SCLB_PROCESSED                  0x0000  /* TRC says SCLB processed */
-#define SCLB_RESUME_CONTROL_NOT_VALID   0x0000  /* Initially, RC not valid */
-#define SCLB_RESUME_CONTROL_VALID       0x4000  /* Host tells TRC RC valid */
-#define SCLB_IACK_CODE_NOT_VALID        0x0000  /* Initially, IACK not valid */
-#define SCLB_IACK_CODE_VALID            0x2000  /* Host tells TRC IACK valid */
-#define SCLB_CMD_NOP                    0x0000
-#define SCLB_CMD_REMOVE                 0x0001
-#define SCLB_CMD_SUSPEND_ACB_CHAIN      0x0002
-#define SCLB_CMD_SET_INTERRUPT_MASK     0x0003
-#define SCLB_CMD_CLEAR_INTERRUPT_MASK   0x0004
-#define SCLB_CMD_RESERVED_5             0x0005
-#define SCLB_CMD_RESERVED_6             0x0006
-#define SCLB_CMD_RESERVED_7             0x0007
-#define SCLB_CMD_RESERVED_8             0x0008
-#define SCLB_CMD_RESERVED_9             0x0009
-#define SCLB_CMD_RESERVED_A             0x000A
-#define SCLB_CMD_RESERVED_B             0x000B
-#define SCLB_CMD_RESERVED_C             0x000C
-#define SCLB_CMD_RESERVED_D             0x000D
-#define SCLB_CMD_RESERVED_E             0x000E
-#define SCLB_CMD_RESERVED_F             0x000F
-
-#define SCLB_RC_ACB                     0x0001  /* Action Command Block Chain */
-#define SCLB_RC_RES0                    0x0002  /* Always Zero */
-#define SCLB_RC_RES1                    0x0004  /* Always Zero */
-#define SCLB_RC_RES2                    0x0008  /* Always Zero */
-#define SCLB_RC_RX_MAC_FCB              0x0010  /* RX_MAC_FCB Chain */
-#define SCLB_RC_RX_MAC_BDB              0x0020  /* RX_MAC_BDB Chain */
-#define SCLB_RC_RX_NON_MAC_FCB          0x0040  /* RX_NON_MAC_FCB Chain */
-#define SCLB_RC_RX_NON_MAC_BDB          0x0080  /* RX_NON_MAC_BDB Chain */
-#define SCLB_RC_TFCB0                   0x0100  /* TX Priority 0 FCB Chain */
-#define SCLB_RC_TFCB1                   0x0200  /* TX Priority 1 FCB Chain */
-#define SCLB_RC_TFCB2                   0x0400  /* TX Priority 2 FCB Chain */
-#define SCLB_RC_TFCB3                   0x0800  /* TX Priority 3 FCB Chain */
-#define SCLB_RC_TFCB4                   0x1000  /* TX Priority 4 FCB Chain */
-#define SCLB_RC_TFCB5                   0x2000  /* TX Priority 5 FCB Chain */
-#define SCLB_RC_TFCB6                   0x4000  /* TX Priority 6 FCB Chain */
-#define SCLB_RC_TFCB7                   0x8000  /* TX Priority 7 FCB Chain */
-
-#define SCLB_IMC_RES0                   0x0001  /* */
-#define SCLB_IMC_MAC_TYPE_3             0x0002  /* MAC_ARC_INDICATE */
-#define SCLB_IMC_MAC_ERROR_COUNTERS     0x0004  /* */
-#define SCLB_IMC_RES1                   0x0008  /* */
-#define SCLB_IMC_MAC_TYPE_2             0x0010  /* QUE_MAC_INDICATE */
-#define SCLB_IMC_TX_FRAME               0x0020  /* */
-#define SCLB_IMC_END_OF_TX_QUEUE        0x0040  /* */
-#define SCLB_IMC_NON_MAC_RX_RESOURCE    0x0080  /* */
-#define SCLB_IMC_MAC_RX_RESOURCE        0x0100  /* */
-#define SCLB_IMC_NON_MAC_RX_FRAME       0x0200  /* */
-#define SCLB_IMC_MAC_RX_FRAME           0x0400  /* */
-#define SCLB_IMC_TRC_FIFO_STATUS        0x0800  /* */
-#define SCLB_IMC_COMMAND_STATUS         0x1000  /* */
-#define SCLB_IMC_MAC_TYPE_1             0x2000  /* Self Removed */
-#define SCLB_IMC_TRC_INTRNL_TST_STATUS  0x4000  /* */
-#define SCLB_IMC_RES2                   0x8000  /* */
-
-#define DMA_TRIGGER                     0x0004
-#define FREQ_16MB_BIT                   0x0010
-#define THDREN                          0x0020
-#define CFG0_RSV1                       0x0040
-#define CFG0_RSV2                       0x0080
-#define ETREN                           0x0100
-#define RX_OWN_BIT                      0x0200
-#define RXATMAC                         0x0400
-#define PROMISCUOUS_BIT                 0x0800
-#define USETPT                          0x1000
-#define SAVBAD_BIT                      0x2000
-#define ONEQUE                          0x4000
-#define NO_AUTOREMOVE                   0x8000
-
-#define RX_FCB_AREA_8316        0x00000000
-#define RX_BUFF_AREA_8316       0x00000000
-
-#define TRC_POINTER(X)          ((unsigned long)(X) - tp->ram_access)
-#define RX_FCB_TRC_POINTER(X)   ((unsigned long)(X) - tp->ram_access + RX_FCB_AREA_8316)
-#define RX_BUFF_TRC_POINTER(X) ((unsigned long)(X) - tp->ram_access + RX_BUFF_AREA_8316)
-
-// Offset 0: MSR - Memory Select Register
-//
-#define r587_MSR        0x000   // Register Offset
-//#define       MSR_RST         0x080   // LAN Controller Reset
-#define MSR_MENB        0x040   // Shared Memory Enable
-#define MSR_RA18        0x020   // Ram Address bit 18   (583, 584, 587)
-#define MSR_RA17        0x010   // Ram Address bit 17   (583, 584, 585/790)
-#define MSR_RA16        0x008   // Ram Address bit 16   (583, 584, 585/790)
-#define MSR_RA15        0x004   // Ram Address bit 15   (583, 584, 585/790)
-#define MSR_RA14        0x002   // Ram Address bit 14   (583, 584, 585/790)
-#define MSR_RA13        0x001   // Ram Address bit 13   (583, 584, 585/790)
-
-#define MSR_MASK        0x03F   // Mask for Address bits RA18-RA13 (583, 584, 587)
-
-#define MSR                     0x00
-#define IRR                     0x04
-#define HWR                     0x04
-#define LAAR                    0x05
-#define IMCCR                   0x05
-#define LAR0                    0x08
-#define BDID                    0x0E    // Adapter ID byte register offset
-#define CSR                     0x10
-#define PR                      0x11
-
-#define MSR_RST                 0x80
-#define MSR_MEMB                0x40
-#define MSR_0WS                 0x20
-
-#define FORCED_16BIT_MODE       0x0002
-
-#define INTERFRAME_SPACING_16           0x0003  /* 6 bytes */
-#define INTERFRAME_SPACING_4            0x0001  /* 2 bytes */
-#define MULTICAST_ADDRESS_BIT           0x0010
-#define NON_SRC_ROUTING_BIT             0x0020
-
-#define LOOPING_MODE_MASK       0x0007
-
-/*
- * Decode firmware defines.
- */
-#define SWAP_BYTES(X)		((X & 0xff) << 8) | (X >> 8)
-#define WEIGHT_OFFSET		5
-#define TREE_SIZE_OFFSET	9
-#define TREE_OFFSET		11
-
-/* The Huffman Encoding Tree is constructed of these nodes. */
-typedef struct {
-	__u8	llink;	/* Short version of above node. */
-	__u8	tag;
-	__u8	info;	/* This node is used on decodes. */
-	__u8	rlink;
-} DECODE_TREE_NODE;
-
-#define ROOT	0	/* Branch value. */
-#define LEAF	0	/* Tag field value. */
-#define BRANCH	1	/* Tag field value. */
-
-/*
- * Multicast Table Structure
- */
-typedef struct {
-        __u8    address[6];
-        __u8    instance_count;
-} McTable;
-
-/*
- * Fragment Descriptor Definition
- */
-typedef struct {
-        __u8  *fragment_ptr;
-        __u32   fragment_length;
-} FragmentStructure;
-
-/*
- * Data Buffer Structure Definition
- */
-typedef struct {
-        __u32 fragment_count;
-        FragmentStructure       fragment_list[MAXFRAGMENTS];
-} DataBufferStructure;
-
-#pragma pack(1)
-typedef struct {
-                __u8    IType;
-                __u8    ISubtype;
-} Interrupt_Status_Word;
-
-#pragma pack(1)
-typedef struct BDBlockType {
-                __u16                   info;                   /* 02 */
-                __u32                   trc_next_ptr;           /* 06 */
-                __u32                   trc_data_block_ptr;     /* 10 */
-                __u16                   buffer_length;          /* 12 */
-
-                __u16                   *data_block_ptr;        /* 16 */
-                struct  BDBlockType     *next_ptr;              /* 20 */
-                struct  BDBlockType     *back_ptr;              /* 24 */
-                __u8                    filler[8];              /* 32 */
-} BDBlock;
-
-#pragma pack(1)
-typedef struct FCBlockType {
-                __u16                   frame_status;           /* 02 */
-                __u16                   info;                   /* 04 */
-                __u32                   trc_next_ptr;           /* 08 */
-                __u32                   trc_bdb_ptr;            /* 12 */
-                __u16                   frame_length;           /* 14 */
-
-                BDBlock                 *bdb_ptr;               /* 18 */
-                struct  FCBlockType     *next_ptr;              /* 22 */
-                struct  FCBlockType     *back_ptr;              /* 26 */
-                __u16                   memory_alloc;           /* 28 */
-                __u8                    filler[4];              /* 32 */
-
-} FCBlock;
-
-#pragma pack(1)
-typedef struct SBlockType{
-                __u8                           Internal_Error_Count;
-                __u8                           Line_Error_Count;
-                __u8                           AC_Error_Count;
-                __u8                           Burst_Error_Count;
-                __u8                            RESERVED_COUNTER_0;
-                __u8                            AD_TRANS_Count;
-                __u8                            RCV_Congestion_Count;
-                __u8                            Lost_FR_Error_Count;
-                __u8                            FREQ_Error_Count;
-                __u8                            FR_Copied_Error_Count;
-                __u8                            RESERVED_COUNTER_1;
-                __u8                            Token_Error_Count;
-
-                __u16                           TI_NDIS_Ring_Status;
-                __u16                           BCN_Type;
-                __u16                           Error_Code;
-                __u16                           SA_of_Last_AMP_SMP[3];
-                __u16                           UNA[3];
-                __u16                           Ucode_Version_Number;
-                __u16                           Status_CHG_Indicate;
-                __u16                           RESERVED_STATUS_0;
-} SBlock;
-
-#pragma pack(1)
-typedef struct ACBlockType {
-                __u16                   cmd_done_status;    /* 02 */
-                __u16                   cmd_info;           /* 04 */
-                __u32                   trc_next_ptr;           /* 08 */
-                __u16                   cmd;                /* 10 */
-                __u16                   subcmd;             /* 12 */
-                __u16                   data_offset_lo;         /* 14 */
-                __u16                   data_offset_hi;         /* 16 */
-
-                struct  ACBlockType     *next_ptr;              /* 20 */
-
-                __u8                    filler[12];             /* 32 */
-} ACBlock;
-
-#define NUM_OF_INTERRUPTS               0x20
-
-#pragma pack(1)
-typedef struct {
-                Interrupt_Status_Word   IStatus[NUM_OF_INTERRUPTS];
-} ISBlock;
-
-#pragma pack(1)
-typedef struct {
-                __u16                   valid_command;          /* 02 */
-                __u16                   iack_code;              /* 04 */
-                __u16                   resume_control;         /* 06 */
-                __u16                   int_mask_control;       /* 08 */
-                __u16                   int_mask_state;         /* 10 */
-
-                __u8                    filler[6];              /* 16 */
-} SCLBlock;
-
-#pragma pack(1)
-typedef struct
-{
-                __u16                   config;                 /* 02 */
-                __u32                   trc_sclb_ptr;           /* 06 */
-                __u32                   trc_acb_ptr;            /* 10 */
-                __u32                   trc_isb_ptr;            /* 14 */
-                __u16                   isbsiz;                 /* 16 */
-
-                SCLBlock                *sclb_ptr;              /* 20 */
-                ACBlock                 *acb_ptr;               /* 24 */
-                ISBlock                 *isb_ptr;               /* 28 */
-
-                __u16                   Non_Mac_Rx_Bdbs;        /* 30 DEBUG */
-                __u8                    filler[2];              /* 32 */
-
-} SCGBlock;
-
-#pragma pack(1)
-typedef struct
-{
-	__u32		trc_scgb_ptr;
-	SCGBlock	*scgb_ptr;
-} ISCPBlock;
-#pragma pack()
-
-typedef struct net_local {
-	ISCPBlock       *iscpb_ptr;
-        SCGBlock        *scgb_ptr;
-        SCLBlock        *sclb_ptr;
-        ISBlock         *isb_ptr;
-
-	ACBlock         *acb_head;
-        ACBlock         *acb_curr;
-        ACBlock         *acb_next;
-
-	__u8		adapter_name[12];
-
-	__u16		num_rx_bdbs	[NUM_RX_QS_USED];
-	__u16		num_rx_fcbs	[NUM_RX_QS_USED];
-
-	__u16		num_tx_bdbs	[NUM_TX_QS_USED];
-	__u16		num_tx_fcbs	[NUM_TX_QS_USED];
-
-	__u16		num_of_tx_buffs;
-
-	__u16		tx_buff_size	[NUM_TX_QS_USED];
-	__u16		tx_buff_used	[NUM_TX_QS_USED];
-	__u16		tx_queue_status	[NUM_TX_QS_USED];
-
-	FCBlock		*tx_fcb_head[NUM_TX_QS_USED];
-	FCBlock		*tx_fcb_curr[NUM_TX_QS_USED];
-	FCBlock		*tx_fcb_end[NUM_TX_QS_USED];
-	BDBlock		*tx_bdb_head[NUM_TX_QS_USED];
-	__u16		*tx_buff_head[NUM_TX_QS_USED];
-	__u16		*tx_buff_end[NUM_TX_QS_USED];
-	__u16		*tx_buff_curr[NUM_TX_QS_USED];
-	__u16		num_tx_fcbs_used[NUM_TX_QS_USED];
-
-	FCBlock		*rx_fcb_head[NUM_RX_QS_USED];
-	FCBlock		*rx_fcb_curr[NUM_RX_QS_USED];
-	BDBlock		*rx_bdb_head[NUM_RX_QS_USED];
-	BDBlock		*rx_bdb_curr[NUM_RX_QS_USED];
-	BDBlock		*rx_bdb_end[NUM_RX_QS_USED];
-	__u16		*rx_buff_head[NUM_RX_QS_USED];
-	__u16		*rx_buff_end[NUM_RX_QS_USED];
-
-	__u32		*ptr_local_ring_num;
-
-	__u32		sh_mem_used;
-
-	__u16		page_offset_mask;
-
-	__u16		authorized_function_classes;
-	__u16		authorized_access_priority;
-
-        __u16            num_acbs;
-        __u16            num_acbs_used;
-        __u16            acb_pending;
-
-	__u16		current_isb_index;
-
-	__u8            monitor_state;
-	__u8		monitor_state_ready;
-	__u16		ring_status;
-	__u8		ring_status_flags;
-	__u8		state;
-
-	__u8		join_state;
-
-	__u8		slot_num;
-	__u16		pos_id;
-
-	__u32		*ptr_una;
-	__u32		*ptr_bcn_type;
-	__u32		*ptr_tx_fifo_underruns;
-	__u32		*ptr_rx_fifo_underruns;
-	__u32		*ptr_rx_fifo_overruns;
-	__u32		*ptr_tx_fifo_overruns;
-	__u32		*ptr_tx_fcb_overruns;
-	__u32		*ptr_rx_fcb_overruns;
-	__u32		*ptr_tx_bdb_overruns;
-	__u32		*ptr_rx_bdb_overruns;
-
-	__u16		receive_queue_number;
-
-	__u8		rx_fifo_overrun_count;
-	__u8		tx_fifo_overrun_count;
-
-	__u16            adapter_flags;
-	__u16		adapter_flags1;
-	__u16            *misc_command_data;
-	__u16            max_packet_size;
-
-	__u16            config_word0;
-        __u16            config_word1;
-
-	__u8            trc_mask;
-
-	__u16            source_ring_number;
-        __u16            target_ring_number;
-
-	__u16		microcode_version;
-
-	__u16            bic_type;
-        __u16            nic_type;
-        __u16            board_id;
-
-	__u16            rom_size;
-	__u32		rom_base;
-        __u16            ram_size;
-        __u16            ram_usable;
-	__u32		ram_base;
-	__u32		ram_access;
-
-	__u16            extra_info;
-        __u16            mode_bits;
-	__u16		media_menu;
-	__u16		media_type;
-	__u16		adapter_bus;
-
-	__u16		status;
-	__u16            receive_mask;
-
-	__u16            group_address_0;
-        __u16            group_address[2];
-        __u16            functional_address_0;
-        __u16            functional_address[2];
-        __u16            bitwise_group_address[2];
-
-	__u8		cleanup;
-
-	struct sk_buff_head SendSkbQueue;
-        __u16 QueueSkb;
-
-	struct tr_statistics MacStat;   /* MAC statistics structure */
-	
-	spinlock_t	lock;
-} NET_LOCAL;
-
-/************************************
- * SNMP-ON-BOARD Agent Link Structure
- ************************************/
-
-typedef struct {
-        __u8           LnkSigStr[12]; /* signature string "SmcLinkTable" */
-        __u8           LnkDrvTyp;     /* 1=Redbox ODI, 2=ODI DOS, 3=ODI OS/2, 4=NDIS DOS */
-        __u8           LnkFlg;        /* 0 if no agent linked, 1 if agent linked */
-        void           *LnkNfo;       /* routine which returns pointer to NIC info */
-        void           *LnkAgtRcv;    /* pointer to agent receive trap entry */
-        void           *LnkAgtXmt;            /* pointer to agent transmit trap
-entry  */
-void           *LnkGet;                  /* pointer to NIC receive data
-copy routine */
-        void           *LnkSnd;                  /* pointer to NIC send routine
-*/
-        void           *LnkRst;                  /* pointer to NIC driver reset
-routine */
-        void           *LnkMib;                  /* pointer to MIB data base */
-        void           *LnkMibAct;            /* pointer to MIB action routine list */
-        __u16           LnkCntOffset;  /* offset to error counters */
-        __u16           LnkCntNum;     /* number of error counters */
-        __u16           LnkCntSize;    /* size of error counters i.e. 32 = 32 bits */
-        void           *LnkISR;       /* pointer to interrupt vector */
-        __u8           LnkFrmTyp;     /* 1=Ethernet, 2=Token Ring */
-        __u8           LnkDrvVer1 ;   /* driver major version */
-        __u8           LnkDrvVer2 ;   /* driver minor version */
-} AgentLink;
-
-/*
- * Definitions for pcm_card_flags(bit_mapped)
- */
-#define REG_COMPLETE   0x0001
-#define INSERTED       0x0002
-#define PCC_INSERTED   0x0004         /* 1=currently inserted, 0=cur removed */
-
-/*
- * Adapter RAM test patterns
- */
-#define RAM_PATTERN_1  0x55AA
-#define RAM_PATTERN_2  0x9249
-#define RAM_PATTERN_3  0xDB6D
-
-/*
- * definitions for RAM test
- */
-#define ROM_SIGNATURE  0xAA55
-#define MIN_ROM_SIZE   0x2000
-
-/*
- * Return Codes
- */
-#define SUCCESS                 0x0000
-#define ADAPTER_AND_CONFIG      0x0001
-#define ADAPTER_NO_CONFIG       0x0002
-#define NOT_MY_INTERRUPT        0x0003
-#define FRAME_REJECTED          0x0004
-#define EVENTS_DISABLED         0x0005
-#define OUT_OF_RESOURCES        0x0006
-#define INVALID_PARAMETER       0x0007
-#define INVALID_FUNCTION        0x0008
-#define INITIALIZE_FAILED       0x0009
-#define CLOSE_FAILED            0x000A
-#define MAX_COLLISIONS          0x000B
-#define NO_SUCH_DESTINATION     0x000C
-#define BUFFER_TOO_SMALL_ERROR  0x000D
-#define ADAPTER_CLOSED          0x000E
-#define UCODE_NOT_PRESENT       0x000F
-#define FIFO_UNDERRUN           0x0010
-#define DEST_OUT_OF_RESOURCES   0x0011
-#define ADAPTER_NOT_INITIALIZED 0x0012
-#define PENDING                 0x0013
-#define UCODE_PRESENT           0x0014
-#define NOT_INIT_BY_BRIDGE      0x0015
-
-#define OPEN_FAILED             0x0080
-#define HARDWARE_FAILED         0x0081
-#define SELF_TEST_FAILED        0x0082
-#define RAM_TEST_FAILED         0x0083
-#define RAM_CONFLICT            0x0084
-#define ROM_CONFLICT            0x0085
-#define UNKNOWN_ADAPTER         0x0086
-#define CONFIG_ERROR            0x0087
-#define CONFIG_WARNING          0x0088
-#define NO_FIXED_CNFG           0x0089
-#define EEROM_CKSUM_ERROR       0x008A
-#define ROM_SIGNATURE_ERROR     0x008B
-#define ROM_CHECKSUM_ERROR      0x008C
-#define ROM_SIZE_ERROR          0x008D
-#define UNSUPPORTED_NIC_CHIP    0x008E
-#define NIC_REG_ERROR           0x008F
-#define BIC_REG_ERROR           0x0090
-#define MICROCODE_TEST_ERROR    0x0091
-#define LOBE_MEDIA_TEST_FAILED  0x0092
-
-#define ADAPTER_FOUND_LAN_CORRUPT 0x009B
-
-#define ADAPTER_NOT_FOUND       0xFFFF
-
-#define ILLEGAL_FUNCTION        INVALID_FUNCTION
-
-/* Errors */
-#define IO_BASE_INVALID         0x0001
-#define IO_BASE_RANGE           0x0002
-#define IRQ_INVALID             0x0004
-#define IRQ_RANGE               0x0008
-#define RAM_BASE_INVALID        0x0010
-#define RAM_BASE_RANGE          0x0020
-#define RAM_SIZE_RANGE          0x0040
-#define MEDIA_INVALID           0x0800
-
-/* Warnings */
-#define IRQ_MISMATCH            0x0080
-#define RAM_BASE_MISMATCH       0x0100
-#define RAM_SIZE_MISMATCH       0x0200
-#define BUS_MODE_MISMATCH       0x0400
-
-#define RX_CRC_ERROR                            0x01
-#define RX_ALIGNMENT_ERROR              0x02
-#define RX_HW_FAILED                            0x80
-
-/*
- * Definitions for the field RING_STATUS_FLAGS
- */
-#define RING_STATUS_CHANGED                     0X01
-#define MONITOR_STATE_CHANGED                   0X02
-#define JOIN_STATE_CHANGED                      0X04
-
-/*
- * Definitions for the field JOIN_STATE
- */
-#define JS_BYPASS_STATE                         0x00
-#define JS_LOBE_TEST_STATE                      0x01
-#define JS_DETECT_MONITOR_PRESENT_STATE         0x02
-#define JS_AWAIT_NEW_MONITOR_STATE              0x03
-#define JS_DUPLICATE_ADDRESS_TEST_STATE         0x04
-#define JS_NEIGHBOR_NOTIFICATION_STATE          0x05
-#define JS_REQUEST_INITIALIZATION_STATE         0x06
-#define JS_JOIN_COMPLETE_STATE                  0x07
-#define JS_BYPASS_WAIT_STATE                    0x08
-
-/*
- * Definitions for the field MONITOR_STATE
- */
-#define MS_MONITOR_FSM_INACTIVE                 0x00
-#define MS_REPEAT_BEACON_STATE                  0x01
-#define MS_REPEAT_CLAIM_TOKEN_STATE             0x02
-#define MS_TRANSMIT_CLAIM_TOKEN_STATE           0x03
-#define MS_STANDBY_MONITOR_STATE                0x04
-#define MS_TRANSMIT_BEACON_STATE                0x05
-#define MS_ACTIVE_MONITOR_STATE                 0x06
-#define MS_TRANSMIT_RING_PURGE_STATE            0x07
-#define MS_BEACON_TEST_STATE                    0x09
-
-/*
- * Definitions for the bit-field RING_STATUS
- */
-#define SIGNAL_LOSS                             0x8000
-#define HARD_ERROR                              0x4000
-#define SOFT_ERROR                              0x2000
-#define TRANSMIT_BEACON                         0x1000
-#define LOBE_WIRE_FAULT                         0x0800
-#define AUTO_REMOVAL_ERROR                      0x0400
-#define REMOVE_RECEIVED                         0x0100
-#define COUNTER_OVERFLOW                        0x0080
-#define SINGLE_STATION                          0x0040
-#define RING_RECOVERY                           0x0020
-
-/*
- * Definitions for the field BUS_TYPE
- */
-#define AT_BUS                  0x00
-#define MCA_BUS                 0x01
-#define EISA_BUS                0x02
-#define PCI_BUS                 0x03
-#define PCMCIA_BUS              0x04
-
-/*
- * Definitions for adapter_flags
- */
-#define RX_VALID_LOOKAHEAD      0x0001
-#define FORCED_16BIT_MODE       0x0002
-#define ADAPTER_DISABLED        0x0004
-#define TRANSMIT_CHAIN_INT      0x0008
-#define EARLY_RX_FRAME          0x0010
-#define EARLY_TX                0x0020
-#define EARLY_RX_COPY           0x0040
-#define USES_PHYSICAL_ADDR      0x0080		/* Rsvd for DEC PCI and 9232 */
-#define NEEDS_PHYSICAL_ADDR  	0x0100       	/* Reserved*/
-#define RX_STATUS_PENDING       0x0200
-#define ERX_DISABLED         	0x0400       	/* EARLY_RX_ENABLE rcv_mask */
-#define ENABLE_TX_PENDING       0x0800
-#define ENABLE_RX_PENDING       0x1000
-#define PERM_CLOSE              0x2000  
-#define IO_MAPPED               0x4000  	/* IOmapped bus interface 795 */
-#define ETX_DISABLED            0x8000
-
-
-/*
- * Definitions for adapter_flags1
- */
-#define TX_PHY_RX_VIRT          0x0001 
-#define NEEDS_HOST_RAM          0x0002
-#define NEEDS_MEDIA_TYPE        0x0004
-#define EARLY_RX_DONE           0x0008
-#define PNP_BOOT_BIT            0x0010  /* activates PnP & config on power-up */
-                                        /* clear => regular PnP operation */
-#define PNP_ENABLE              0x0020  /* regular PnP operation clear => */
-                                        /* no PnP, overrides PNP_BOOT_BIT */
-#define SATURN_ENABLE           0x0040
-
-#define ADAPTER_REMOVABLE       0x0080 	/* adapter is hot swappable */
-#define TX_PHY                  0x0100  /* Uses physical address for tx bufs */
-#define RX_PHY                  0x0200  /* Uses physical address for rx bufs */
-#define TX_VIRT                 0x0400  /* Uses virtual addr for tx bufs */
-#define RX_VIRT                 0x0800 
-#define NEEDS_SERVICE           0x1000 
-
-/*
- * Adapter Status Codes
- */
-#define OPEN                    0x0001
-#define INITIALIZED             0x0002
-#define CLOSED                  0x0003
-#define FAILED                  0x0005
-#define NOT_INITIALIZED         0x0006
-#define IO_CONFLICT             0x0007
-#define CARD_REMOVED            0x0008
-#define CARD_INSERTED           0x0009
-
-/*
- * Mode Bit Definitions
- */
-#define INTERRUPT_STATUS_BIT    0x8000  /* PC Interrupt Line: 0 = Not Enabled */
-#define BOOT_STATUS_MASK        0x6000  /* Mask to isolate BOOT_STATUS */
-#define BOOT_INHIBIT            0x0000  /* BOOT_STATUS is 'inhibited' */
-#define BOOT_TYPE_1             0x2000  /* Unused BOOT_STATUS value */
-#define BOOT_TYPE_2             0x4000  /* Unused BOOT_STATUS value */
-#define BOOT_TYPE_3             0x6000  /* Unused BOOT_STATUS value */
-#define ZERO_WAIT_STATE_MASK    0x1800  /* Mask to isolate Wait State flags */
-#define ZERO_WAIT_STATE_8_BIT   0x1000  /* 0 = Disabled (Inserts Wait States) */
-#define ZERO_WAIT_STATE_16_BIT  0x0800  /* 0 = Disabled (Inserts Wait States) */
-#define LOOPING_MODE_MASK       0x0007
-#define LOOPBACK_MODE_0         0x0000
-#define LOOPBACK_MODE_1         0x0001
-#define LOOPBACK_MODE_2         0x0002
-#define LOOPBACK_MODE_3         0x0003
-#define LOOPBACK_MODE_4         0x0004
-#define LOOPBACK_MODE_5         0x0005
-#define LOOPBACK_MODE_6         0x0006
-#define LOOPBACK_MODE_7         0x0007
-#define AUTO_MEDIA_DETECT       0x0008
-#define MANUAL_CRC              0x0010
-#define EARLY_TOKEN_REL         0x0020  /* Early Token Release for Token Ring */
-#define UMAC               0x0040 
-#define UTP2_PORT               0x0080  /* For 8216T2, 0=port A, 1=Port B. */
-#define BNC_10BT_INTERFACE      0x0600  /* BNC and UTP current media set */
-#define UTP_INTERFACE           0x0500  /* Ethernet UTP Only. */
-#define BNC_INTERFACE           0x0400
-#define AUI_INTERFACE           0x0300
-#define AUI_10BT_INTERFACE      0x0200
-#define STARLAN_10_INTERFACE    0x0100
-#define INTERFACE_TYPE_MASK     0x0700
-
-/*
- * Media Type Bit Definitions
- *
- * legend:      TP = Twisted Pair
- *              STP = Shielded twisted pair
- *              UTP = Unshielded twisted pair
- */
-
-#define CNFG_MEDIA_TYPE_MASK    0x001e  /* POS Register 3 Mask         */
-
-#define MEDIA_S10               0x0000  /* Ethernet adapter, TP.        */
-#define MEDIA_AUI_UTP           0x0001  /* Ethernet adapter, AUI/UTP media */
-#define MEDIA_BNC               0x0002  /* Ethernet adapter, BNC media. */
-#define MEDIA_AUI               0x0003  /* Ethernet Adapter, AUI media. */
-#define MEDIA_STP_16            0x0004  /* TokenRing adap, 16Mbit STP.  */
-#define MEDIA_STP_4             0x0005  /* TokenRing adap, 4Mbit STP.   */
-#define MEDIA_UTP_16            0x0006  /* TokenRing adap, 16Mbit UTP.  */
-#define MEDIA_UTP_4             0x0007  /* TokenRing adap, 4Mbit UTP.   */
-#define MEDIA_UTP               0x0008  /* Ethernet adapter, UTP media (no AUI)
-*/
-#define MEDIA_BNC_UTP           0x0010  /* Ethernet adapter, BNC/UTP media */
-#define MEDIA_UTPFD             0x0011  /* Ethernet adapter, TP full duplex */
-#define MEDIA_UTPNL             0x0012  /* Ethernet adapter, TP with link integrity test disabled */
-#define MEDIA_AUI_BNC           0x0013  /* Ethernet adapter, AUI/BNC media */
-#define MEDIA_AUI_BNC_UTP       0x0014  /* Ethernet adapter, AUI_BNC/UTP */
-#define MEDIA_UTPA              0x0015  /* Ethernet UTP-10Mbps Ports A */
-#define MEDIA_UTPB              0x0016  /* Ethernet UTP-10Mbps Ports B */
-#define MEDIA_STP_16_UTP_16     0x0017  /* Token Ring STP-16Mbps/UTP-16Mbps */
-#define MEDIA_STP_4_UTP_4       0x0018  /* Token Ring STP-4Mbps/UTP-4Mbps */
-
-#define MEDIA_STP100_UTP100     0x0020  /* Ethernet STP-100Mbps/UTP-100Mbps */
-#define MEDIA_UTP100FD          0x0021  /* Ethernet UTP-100Mbps, full duplex */
-#define MEDIA_UTP100            0x0022  /* Ethernet UTP-100Mbps */
-
-
-#define MEDIA_UNKNOWN           0xFFFF  /* Unknown adapter/media type   */
-
-/*
- * Definitions for the field:
- * media_type2
- */
-#define MEDIA_TYPE_MII              0x0001
-#define MEDIA_TYPE_UTP              0x0002
-#define MEDIA_TYPE_BNC              0x0004
-#define MEDIA_TYPE_AUI              0x0008
-#define MEDIA_TYPE_S10              0x0010
-#define MEDIA_TYPE_AUTO_SENSE       0x1000
-#define MEDIA_TYPE_AUTO_DETECT      0x4000
-#define MEDIA_TYPE_AUTO_NEGOTIATE   0x8000
-
-/*
- * Definitions for the field:
- * line_speed
- */
-#define LINE_SPEED_UNKNOWN          0x0000
-#define LINE_SPEED_4                0x0001
-#define LINE_SPEED_10               0x0002
-#define LINE_SPEED_16               0x0004
-#define LINE_SPEED_100              0x0008
-#define LINE_SPEED_T4               0x0008  /* 100BaseT4 aliased for 9332BVT */
-#define LINE_SPEED_FULL_DUPLEX      0x8000
-
-/*
- * Definitions for the field:
- * bic_type (Bus interface chip type)
- */
-#define BIC_NO_CHIP             0x0000  /* Bus interface chip not implemented */
-#define BIC_583_CHIP            0x0001  /* 83C583 bus interface chip */
-#define BIC_584_CHIP            0x0002  /* 83C584 bus interface chip */
-#define BIC_585_CHIP            0x0003  /* 83C585 bus interface chip */
-#define BIC_593_CHIP            0x0004  /* 83C593 bus interface chip */
-#define BIC_594_CHIP            0x0005  /* 83C594 bus interface chip */
-#define BIC_564_CHIP            0x0006  /* PCMCIA Bus interface chip */
-#define BIC_790_CHIP            0x0007  /* 83C790 bus i-face/Ethernet NIC chip */
-#define BIC_571_CHIP            0x0008  /* 83C571 EISA bus master i-face */
-#define BIC_587_CHIP            0x0009  /* Token Ring AT bus master i-face */
-#define BIC_574_CHIP            0x0010  /* FEAST bus interface chip */
-#define BIC_8432_CHIP           0x0011  /* 8432 bus i-face/Ethernet NIC(DEC PCI) */
-#define BIC_9332_CHIP           0x0012  /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */
-#define BIC_8432E_CHIP          0x0013  /* 8432 Enhanced bus iface/Ethernet NIC(DEC) */
-#define BIC_EPIC100_CHIP        0x0014  /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */
-#define BIC_C94_CHIP            0x0015  /* 91C94 bus i-face in PCMCIA mode */
-#define BIC_X8020_CHIP          0x0016  /* Xilinx PCMCIA multi-func i-face */
-
-/*
- * Definitions for the field:
- * nic_type (Bus interface chip type)
- */
-#define NIC_UNK_CHIP            0x0000  /* Unknown NIC chip      */
-#define NIC_8390_CHIP           0x0001  /* DP8390 Ethernet NIC   */
-#define NIC_690_CHIP            0x0002  /* 83C690 Ethernet NIC   */
-#define NIC_825_CHIP            0x0003  /* 83C825 Token Ring NIC */
-/*      #define NIC_???_CHIP    0x0004  */ /* Not used           */
-/*      #define NIC_???_CHIP    0x0005  */ /* Not used           */
-/*      #define NIC_???_CHIP    0x0006  */ /* Not used           */
-#define NIC_790_CHIP            0x0007  /* 83C790 bus i-face/Ethernet NIC chip */
-#define NIC_C100_CHIP           0x0010  /* FEAST 100Mbps Ethernet NIC */
-#define NIC_8432_CHIP           0x0011  /* 8432 bus i-face/Ethernet NIC(DEC PCI) */
-#define NIC_9332_CHIP           0x0012  /* 9332 bus i-face/100Mbps Ether NIC(DEC PCI) */
-#define NIC_8432E_CHIP          0x0013  /* 8432 enhanced bus iface/Ethernet NIC(DEC) */
-#define NIC_EPIC100_CHIP        0x0014   /* EPIC/100 10/100 Mbps Ethernet BIC/NIC */
-#define NIC_C94_CHIP            0x0015  /* 91C94 PC Card with multi func */
-
-/*
- * Definitions for the field:
- * adapter_type The adapter_type field describes the adapter/bus
- *              configuration.
- */
-#define BUS_ISA16_TYPE          0x0001  /* 16 bit adap in 16 bit (E)ISA slot  */
-#define BUS_ISA8_TYPE           0x0002  /* 8/16b adap in 8 bit XT/(E)ISA slot */
-#define BUS_MCA_TYPE            0x0003  /* Micro Channel adapter              */
-
-/*
- * Receive Mask definitions
- */
-#define ACCEPT_MULTICAST                0x0001
-#define ACCEPT_BROADCAST                0x0002
-#define PROMISCUOUS_MODE                0x0004
-#define ACCEPT_SOURCE_ROUTING           0x0008
-#define ACCEPT_ERR_PACKETS              0x0010
-#define ACCEPT_ATT_MAC_FRAMES           0x0020
-#define ACCEPT_MULTI_PROM               0x0040
-#define TRANSMIT_ONLY                   0x0080
-#define ACCEPT_EXT_MAC_FRAMES           0x0100
-#define EARLY_RX_ENABLE                 0x0200
-#define PKT_SIZE_NOT_NEEDED             0x0400
-#define ACCEPT_SOURCE_ROUTING_SPANNING  0x0808
-
-#define ACCEPT_ALL_MAC_FRAMES           0x0120
-
-/*
- * config_mode defs
- */
-#define STORE_EEROM             0x0001  /* Store config in EEROM. */
-#define STORE_REGS              0x0002  /* Store config in register set. */
-
-/*
- * equates for lmac_flags in adapter structure (Ethernet)
- */
-#define         MEM_DISABLE     0x0001
-#define         RX_STATUS_POLL  0x0002
-#define         USE_RE_BIT      0x0004
-/*#define       RESERVED        0x0008 */
-/*#define       RESERVED        0x0010 */
-/*#define       RESERVED        0x0020 */
-/*#define       RESERVED        0x0040 */
-/*#define       RESERVED        0x0080 */
-/*#define       RESERVED        0x0100 */
-/*#define       RESERVED        0x0200 */
-/*#define       RESERVED        0x0400 */
-/*#define       RESERVED        0x0800 */
-/*#define       RESERVED        0x1000 */
-/*#define       RESERVED        0x2000 */
-/*#define       RESERVED        0x4000 */
-/*#define       RESERVED        0x8000 */
-
-/* media_opts & media_set Fields bit defs for Ethernet ... */
-#define         MED_OPT_BNC     0x01
-#define         MED_OPT_UTP     0x02
-#define         MED_OPT_AUI     0x04
-#define         MED_OPT_10MB    0x08
-#define         MED_OPT_100MB   0x10
-#define         MED_OPT_S10     0x20
-
-/* media_opts & media_set Fields bit defs for Token Ring ... */
-#define         MED_OPT_4MB     0x08
-#define         MED_OPT_16MB    0x10
-#define         MED_OPT_STP     0x40
-
-#define MAX_8023_SIZE           1500    /* Max 802.3 size of frame. */
-#define DEFAULT_ERX_VALUE       4       /* Number of 16-byte blocks for 790B early Rx. */
-#define DEFAULT_ETX_VALUE       32      /* Number of bytes for 790B early Tx. */
-#define DEFAULT_TX_RETRIES      3       /* Number of transmit retries */
-#define LPBK_FRAME_SIZE         1024    /* Default loopback frame for Rx calibration test. */
-#define MAX_LOOKAHEAD_SIZE      252     /* Max lookahead size for ethernet. */
-
-#define RW_MAC_STATE                    0x1101
-#define RW_SA_OF_LAST_AMP_OR_SMP        0x2803
-#define RW_PHYSICAL_DROP_NUMBER         0x3B02
-#define RW_UPSTREAM_NEIGHBOR_ADDRESS    0x3E03
-#define RW_PRODUCT_INSTANCE_ID          0x4B09
-
-#define RW_TRC_STATUS_BLOCK             0x5412
-
-#define RW_MAC_ERROR_COUNTERS_NO_CLEAR  0x8006
-#define RW_MAC_ERROR_COUNTER_CLEAR      0x7A06
-#define RW_CONFIG_REGISTER_0            0xA001
-#define RW_CONFIG_REGISTER_1            0xA101
-#define RW_PRESCALE_TIMER_THRESHOLD     0xA201
-#define RW_TPT_THRESHOLD                0xA301
-#define RW_TQP_THRESHOLD                0xA401
-#define RW_TNT_THRESHOLD                0xA501
-#define RW_TBT_THRESHOLD                0xA601
-#define RW_TSM_THRESHOLD                0xA701
-#define RW_TAM_THRESHOLD                0xA801
-#define RW_TBR_THRESHOLD                0xA901
-#define RW_TER_THRESHOLD                0xAA01
-#define RW_TGT_THRESHOLD                0xAB01
-#define RW_THT_THRESHOLD                0xAC01
-#define RW_TRR_THRESHOLD                0xAD01
-#define RW_TVX_THRESHOLD                0xAE01
-#define RW_INDIVIDUAL_MAC_ADDRESS       0xB003
-
-#define RW_INDIVIDUAL_GROUP_ADDRESS     0xB303  /* all of group addr */
-#define RW_INDIVIDUAL_GROUP_ADDR_WORD_0 0xB301  /* 1st word of group addr */
-#define RW_INDIVIDUAL_GROUP_ADDR        0xB402  /* 2nd-3rd word of group addr */
-#define RW_FUNCTIONAL_ADDRESS           0xB603  /* all of functional addr */
-#define RW_FUNCTIONAL_ADDR_WORD_0       0xB601  /* 1st word of func  addr */
-#define RW_FUNCTIONAL_ADDR              0xB702  /* 2nd-3rd word func addr */
-
-#define RW_BIT_SIGNIFICANT_GROUP_ADDR   0xB902
-#define RW_SOURCE_RING_BRIDGE_NUMBER    0xBB01
-#define RW_TARGET_RING_NUMBER           0xBC01
-
-#define RW_HIC_INTERRUPT_MASK           0xC601
-
-#define SOURCE_ROUTING_SPANNING_BITS    0x00C0  /* Spanning Tree Frames */
-#define SOURCE_ROUTING_EXPLORER_BIT     0x0040  /* Explorer and Single Route */
-
-        /* write */
-
-#define CSR_MSK_ALL             0x80    // Bic 587 Only
-#define CSR_MSKTINT             0x20
-#define CSR_MSKCBUSY            0x10
-#define CSR_CLRTINT             0x08
-#define CSR_CLRCBUSY            0x04
-#define CSR_WCSS                0x02
-#define CSR_CA                  0x01
-
-        /* read */
-
-#define CSR_TINT                0x20
-#define CSR_CINT                0x10
-#define CSR_TSTAT               0x08
-#define CSR_CSTAT               0x04
-#define CSR_FAULT               0x02
-#define CSR_CBUSY               0x01
-
-#define LAAR_MEM16ENB           0x80
-#define Zws16                   0x20
-
-#define IRR_IEN                 0x80
-#define Zws8                    0x01
-
-#define IMCCR_EIL               0x04
-
-typedef struct {
-        __u8            ac;                             /* Access Control */
-        __u8            fc;                             /* Frame Control */
-        __u8            da[6];                          /* Dest Addr */
-        __u8            sa[6];                          /* Source Addr */
-
-        __u16            vl;                             /* Vector Length */
-        __u8            dc_sc;                          /* Dest/Source Class */
-        __u8            vc;                             /* Vector Code */
-        } MAC_HEADER;
-
-#define MAX_SUB_VECTOR_INFO     (RX_DATA_BUFFER_SIZE - sizeof(MAC_HEADER) - 2)
-
-typedef struct
-        {
-        __u8            svl;                            /* Sub-vector Length */
-        __u8            svi;                            /* Sub-vector Code */
-        __u8            svv[MAX_SUB_VECTOR_INFO];       /* Sub-vector Info */
-        } MAC_SUB_VECTOR;
-
-#endif	/* __KERNEL__ */
-#endif	/* __LINUX_SMCTR_H */
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
deleted file mode 100644
index be4813e..0000000
--- a/drivers/net/tokenring/tms380tr.c
+++ /dev/null
@@ -1,2306 +0,0 @@
-/*
- *  tms380tr.c: A network driver library for Texas Instruments TMS380-based
- *              Token Ring Adapters.
- *
- *  Originally sktr.c: Written 1997 by Christoph Goos
- *
- *  A fine result of the Linux Systems Network Architecture Project.
- *  http://www.vanheusden.com/sna/ 
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- *
- *  The following modules are currently available for card support:
- *	- tmspci (Generic PCI card support)
- *	- abyss (Madge PCI support)
- *      - tmsisa (SysKonnect TR4/16 ISA)
- *
- *  Sources:
- *  	- The hardware related parts of this driver are take from
- *  	  the SysKonnect Token Ring driver for Windows NT.
- *  	- I used the IBM Token Ring driver 'ibmtr.c' as a base for this
- *  	  driver, as well as the 'skeleton.c' driver by Donald Becker.
- *  	- Also various other drivers in the linux source tree were taken
- *  	  as samples for some tasks.
- *      - TI TMS380 Second-Generation Token Ring User's Guide
- *  	- TI datasheets for respective chips
- *  	- David Hein at Texas Instruments 
- *  	- Various Madge employees
- *
- *  Maintainer(s):
- *    JS	Jay Schulist		jschlst@samba.org
- *    CG	Christoph Goos		cgoos@syskonnect.de
- *    AF	Adam Fritzler
- *    MLP       Mike Phillips           phillim@amtrak.com
- *    JF	Jochen Friedrich	jochen@scram.de
- *     
- *  Modification History:
- *	29-Aug-97	CG	Created
- *	04-Apr-98	CG	Fixed problems caused by tok_timer_check
- *	10-Apr-98	CG	Fixed lockups at cable disconnection
- *	27-May-98	JS	Formated to Linux Kernel Format
- *	31-May-98	JS	Hacked in PCI support
- *	16-Jun-98	JS	Modulized for multiple cards with one driver
- *	   Sep-99	AF	Renamed to tms380tr (supports more than SK's)
- *      23-Sep-99	AF      Added Compaq and Thomas-Conrad PCI support
- *				Fixed a bug causing double copies on PCI
- *				Fixed for new multicast stuff (2.2/2.3)
- *	25-Sep-99	AF	Uped TPL_NUM from 3 to 9
- *				Removed extraneous 'No free TPL'
- *	22-Dec-99	AF	Added Madge PCI Mk2 support and generalized
- *				parts of the initilization procedure.
- *	30-Dec-99	AF	Turned tms380tr into a library ala 8390.
- *				Madge support is provided in the abyss module
- *				Generic PCI support is in the tmspci module.
- *	30-Nov-00	JF	Updated PCI code to support IO MMU via
- *				pci_map_static(). Alpha uses this MMU for ISA
- *				as well.
- *      14-Jan-01	JF	Fix DMA on ifdown/ifup sequences. Some 
- *      			cleanup.
- *	13-Jan-02	JF	Add spinlock to fix race condition.
- *	09-Nov-02	JF	Fixed printks to not SPAM the console during
- *				normal operation.
- *	30-Dec-02	JF	Removed incorrect __init from 
- *				tms380tr_init_card.
- *	22-Jul-05	JF	Converted to dma-mapping.
- *      			
- *  To do:
- *    1. Multi/Broadcast packet handling (this may have fixed itself)
- *    2. Write a sktrisa module that includes the old ISA support (done)
- *    3. Allow modules to load their own microcode
- *    4. Speed up the BUD process -- freezing the kernel for 3+sec is
- *         quite unacceptable.
- *    5. Still a few remaining stalls when the cable is unplugged.
- */
-
-#ifdef MODULE
-static const char version[] = "tms380tr.c: v1.10 30/12/2002 by Christoph Goos, Adam Fritzler\n";
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/time.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/trdevice.h>
-#include <linux/firmware.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-
-#include "tms380tr.h"		/* Our Stuff */
-
-/* Use 0 for production, 1 for verification, 2 for debug, and
- * 3 for very verbose debug.
- */
-#ifndef TMS380TR_DEBUG
-#define TMS380TR_DEBUG 0
-#endif
-static unsigned int tms380tr_debug = TMS380TR_DEBUG;
-
-/* Index to functions, as function prototypes.
- * Alphabetical by function name.
- */
-
-/* "A" */
-/* "B" */
-static int      tms380tr_bringup_diags(struct net_device *dev);
-/* "C" */
-static void	tms380tr_cancel_tx_queue(struct net_local* tp);
-static int 	tms380tr_chipset_init(struct net_device *dev);
-static void 	tms380tr_chk_irq(struct net_device *dev);
-static void 	tms380tr_chk_outstanding_cmds(struct net_device *dev);
-static void 	tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr);
-static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType);
-int	 	tms380tr_close(struct net_device *dev);
-static void 	tms380tr_cmd_status_irq(struct net_device *dev);
-/* "D" */
-static void 	tms380tr_disable_interrupts(struct net_device *dev);
-#if TMS380TR_DEBUG > 0
-static void 	tms380tr_dump(unsigned char *Data, int length);
-#endif
-/* "E" */
-static void 	tms380tr_enable_interrupts(struct net_device *dev);
-static void 	tms380tr_exec_cmd(struct net_device *dev, unsigned short Command);
-static void 	tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue);
-/* "F" */
-/* "G" */
-static struct net_device_stats *tms380tr_get_stats(struct net_device *dev);
-/* "H" */
-static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb,
-						       struct net_device *dev);
-/* "I" */
-static int 	tms380tr_init_adapter(struct net_device *dev);
-static void 	tms380tr_init_ipb(struct net_local *tp);
-static void 	tms380tr_init_net_local(struct net_device *dev);
-static void 	tms380tr_init_opb(struct net_device *dev);
-/* "M" */
-/* "O" */
-int		tms380tr_open(struct net_device *dev);
-static void	tms380tr_open_adapter(struct net_device *dev);
-/* "P" */
-/* "R" */
-static void 	tms380tr_rcv_status_irq(struct net_device *dev);
-static int 	tms380tr_read_ptr(struct net_device *dev);
-static void 	tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
-			unsigned short Address, int Length);
-static int 	tms380tr_reset_adapter(struct net_device *dev);
-static void 	tms380tr_reset_interrupt(struct net_device *dev);
-static void 	tms380tr_ring_status_irq(struct net_device *dev);
-/* "S" */
-static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb,
-					      struct net_device *dev);
-static void 	tms380tr_set_multicast_list(struct net_device *dev);
-static int	tms380tr_set_mac_address(struct net_device *dev, void *addr);
-/* "T" */
-static void 	tms380tr_timer_chk(unsigned long data);
-static void 	tms380tr_timer_end_wait(unsigned long data);
-static void 	tms380tr_tx_status_irq(struct net_device *dev);
-/* "U" */
-static void 	tms380tr_update_rcv_stats(struct net_local *tp,
-			unsigned char DataPtr[], unsigned int Length);
-/* "W" */
-void	 	tms380tr_wait(unsigned long time);
-static void 	tms380tr_write_rpl_status(RPL *rpl, unsigned int Status);
-static void 	tms380tr_write_tpl_status(TPL *tpl, unsigned int Status);
-
-#define SIFREADB(reg) \
-	(((struct net_local *)netdev_priv(dev))->sifreadb(dev, reg))
-#define SIFWRITEB(val, reg) \
-	(((struct net_local *)netdev_priv(dev))->sifwriteb(dev, val, reg))
-#define SIFREADW(reg) \
-	(((struct net_local *)netdev_priv(dev))->sifreadw(dev, reg))
-#define SIFWRITEW(val, reg) \
-	(((struct net_local *)netdev_priv(dev))->sifwritew(dev, val, reg))
-
-
-
-#if 0 /* TMS380TR_DEBUG > 0 */
-static int madgemc_sifprobe(struct net_device *dev)
-{
-        unsigned char old, chk1, chk2;
-	
-	old = SIFREADB(SIFADR);  /* Get the old SIFADR value */
-
-        chk1 = 0;       /* Begin with check value 0 */
-        do {
-		madgemc_setregpage(dev, 0);
-                /* Write new SIFADR value */
-		SIFWRITEB(chk1, SIFADR);
-		chk2 = SIFREADB(SIFADR);
-		if (chk2 != chk1)
-			return -1;
-		
-		madgemc_setregpage(dev, 1);
-                /* Read, invert and write */
-		chk2 = SIFREADB(SIFADD);
-		if (chk2 != chk1)
-			return -1;
-
-		madgemc_setregpage(dev, 0);
-                chk2 ^= 0x0FE;
-		SIFWRITEB(chk2, SIFADR);
-
-                /* Read, invert and compare */
-		madgemc_setregpage(dev, 1);
-		chk2 = SIFREADB(SIFADD);
-		madgemc_setregpage(dev, 0);
-                chk2 ^= 0x0FE;
-
-                if(chk1 != chk2)
-                        return -1;    /* No adapter */
-                chk1 -= 2;
-        } while(chk1 != 0);     /* Repeat 128 times (all byte values) */
-
-	madgemc_setregpage(dev, 0); /* sanity */
-        /* Restore the SIFADR value */
-	SIFWRITEB(old, SIFADR);
-
-        return 0;
-}
-#endif
-
-/*
- * Open/initialize the board. This is called sometime after
- * booting when the 'ifconfig' program is run.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is non-reboot way to recover if something goes wrong.
- */
-int tms380tr_open(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	int err;
-	
-	/* init the spinlock */
-	spin_lock_init(&tp->lock);
-	init_timer(&tp->timer);
-
-	/* Reset the hardware here. Don't forget to set the station address. */
-
-#ifdef CONFIG_ISA
-	if(dev->dma > 0) 
-	{
-		unsigned long flags=claim_dma_lock();
-		disable_dma(dev->dma);
-		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
-		enable_dma(dev->dma);
-		release_dma_lock(flags);
-	}
-#endif
-	
-	err = tms380tr_chipset_init(dev);
-  	if(err)
-	{
-		printk(KERN_INFO "%s: Chipset initialization error\n", 
-			dev->name);
-		return -1;
-	}
-
-	tp->timer.expires	= jiffies + 30*HZ;
-	tp->timer.function	= tms380tr_timer_end_wait;
-	tp->timer.data		= (unsigned long)dev;
-	add_timer(&tp->timer);
-
-	printk(KERN_DEBUG "%s: Adapter RAM size: %dK\n", 
-	       dev->name, tms380tr_read_ptr(dev));
-
-	tms380tr_enable_interrupts(dev);
-	tms380tr_open_adapter(dev);
-
-	netif_start_queue(dev);
-	
-	/* Wait for interrupt from hardware. If interrupt does not come,
-	 * there will be a timeout from the timer.
-	 */
-	tp->Sleeping = 1;
-	interruptible_sleep_on(&tp->wait_for_tok_int);
-	del_timer(&tp->timer);
-
-	/* If AdapterVirtOpenFlag is 1, the adapter is now open for use */
-	if(tp->AdapterVirtOpenFlag == 0)
-	{
-		tms380tr_disable_interrupts(dev);
-		return -1;
-	}
-
-	tp->StartTime = jiffies;
-
-	/* Start function control timer */
-	tp->timer.expires	= jiffies + 2*HZ;
-	tp->timer.function	= tms380tr_timer_chk;
-	tp->timer.data		= (unsigned long)dev;
-	add_timer(&tp->timer);
-
-	return 0;
-}
-
-/*
- * Timeout function while waiting for event
- */
-static void tms380tr_timer_end_wait(unsigned long data)
-{
-	struct net_device *dev = (struct net_device*)data;
-	struct net_local *tp = netdev_priv(dev);
-
-	if(tp->Sleeping)
-	{
-		tp->Sleeping = 0;
-		wake_up_interruptible(&tp->wait_for_tok_int);
-	}
-}
-
-/*
- * Initialize the chipset
- */
-static int tms380tr_chipset_init(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	int err;
-
-	tms380tr_init_ipb(tp);
-	tms380tr_init_opb(dev);
-	tms380tr_init_net_local(dev);
-
-	if(tms380tr_debug > 3)
-		printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name);
-	err = tms380tr_reset_adapter(dev);
-	if(err < 0)
-		return -1;
-
-	if(tms380tr_debug > 3)
-		printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name);
-	err = tms380tr_bringup_diags(dev);
-	if(err < 0)
-		return -1;
-
-	if(tms380tr_debug > 3)
-		printk(KERN_DEBUG "%s: Init adapter...\n", dev->name);
-	err = tms380tr_init_adapter(dev);
-	if(err < 0)
-		return -1;
-
-	if(tms380tr_debug > 3)
-		printk(KERN_DEBUG "%s: Done!\n", dev->name);
-	return 0;
-}
-
-/*
- * Initializes the net_local structure.
- */
-static void tms380tr_init_net_local(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	int i;
-	dma_addr_t dmabuf;
-
-	tp->scb.CMD	= 0;
-	tp->scb.Parm[0] = 0;
-	tp->scb.Parm[1] = 0;
-
-	tp->ssb.STS	= 0;
-	tp->ssb.Parm[0] = 0;
-	tp->ssb.Parm[1] = 0;
-	tp->ssb.Parm[2] = 0;
-
-	tp->CMDqueue	= 0;
-
-	tp->AdapterOpenFlag	= 0;
-	tp->AdapterVirtOpenFlag = 0;
-	tp->ScbInUse		= 0;
-	tp->OpenCommandIssued	= 0;
-	tp->ReOpenInProgress	= 0;
-	tp->HaltInProgress	= 0;
-	tp->TransmitHaltScheduled = 0;
-	tp->LobeWireFaultLogged	= 0;
-	tp->LastOpenStatus	= 0;
-	tp->MaxPacketSize	= DEFAULT_PACKET_SIZE;
-
-	/* Create circular chain of transmit lists */
-	for (i = 0; i < TPL_NUM; i++)
-	{
-		tp->Tpl[i].NextTPLAddr = htonl(((char *)(&tp->Tpl[(i+1) % TPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */
-		tp->Tpl[i].Status	= 0;
-		tp->Tpl[i].FrameSize	= 0;
-		tp->Tpl[i].FragList[0].DataCount	= 0;
-		tp->Tpl[i].FragList[0].DataAddr		= 0;
-		tp->Tpl[i].NextTPLPtr	= &tp->Tpl[(i+1) % TPL_NUM];
-		tp->Tpl[i].MData	= NULL;
-		tp->Tpl[i].TPLIndex	= i;
-		tp->Tpl[i].DMABuff	= 0;
-		tp->Tpl[i].BusyFlag	= 0;
-	}
-
-	tp->TplFree = tp->TplBusy = &tp->Tpl[0];
-
-	/* Create circular chain of receive lists */
-	for (i = 0; i < RPL_NUM; i++)
-	{
-		tp->Rpl[i].NextRPLAddr = htonl(((char *)(&tp->Rpl[(i+1) % RPL_NUM]) - (char *)tp) + tp->dmabuffer); /* DMA buffer may be MMU driven */
-		tp->Rpl[i].Status = (RX_VALID | RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
-		tp->Rpl[i].FrameSize = 0;
-		tp->Rpl[i].FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
-
-		/* Alloc skb and point adapter to data area */
-		tp->Rpl[i].Skb = dev_alloc_skb(tp->MaxPacketSize);
-			tp->Rpl[i].DMABuff = 0;
-
-		/* skb == NULL ? then use local buffer */
-		if(tp->Rpl[i].Skb == NULL)
-		{
-			tp->Rpl[i].SkbStat = SKB_UNAVAILABLE;
-			tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer);
-			tp->Rpl[i].MData = tp->LocalRxBuffers[i];
-		}
-		else	/* SKB != NULL */
-		{
-			tp->Rpl[i].Skb->dev = dev;
-			skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
-
-			/* data unreachable for DMA ? then use local buffer */
-			dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
-			if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
-			{
-				tp->Rpl[i].SkbStat = SKB_DATA_COPY;
-				tp->Rpl[i].FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[i] - (char *)tp) + tp->dmabuffer);
-				tp->Rpl[i].MData = tp->LocalRxBuffers[i];
-			}
-			else	/* DMA directly in skb->data */
-			{
-				tp->Rpl[i].SkbStat = SKB_DMA_DIRECT;
-				tp->Rpl[i].FragList[0].DataAddr = htonl(dmabuf);
-				tp->Rpl[i].MData = tp->Rpl[i].Skb->data;
-				tp->Rpl[i].DMABuff = dmabuf;
-			}
-		}
-
-		tp->Rpl[i].NextRPLPtr = &tp->Rpl[(i+1) % RPL_NUM];
-		tp->Rpl[i].RPLIndex = i;
-	}
-
-	tp->RplHead = &tp->Rpl[0];
-	tp->RplTail = &tp->Rpl[RPL_NUM-1];
-	tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
-}
-
-/*
- * Initializes the initialisation parameter block.
- */
-static void tms380tr_init_ipb(struct net_local *tp)
-{
-	tp->ipb.Init_Options	= BURST_MODE;
-	tp->ipb.CMD_Status_IV	= 0;
-	tp->ipb.TX_IV		= 0;
-	tp->ipb.RX_IV		= 0;
-	tp->ipb.Ring_Status_IV	= 0;
-	tp->ipb.SCB_Clear_IV	= 0;
-	tp->ipb.Adapter_CHK_IV	= 0;
-	tp->ipb.RX_Burst_Size	= BURST_SIZE;
-	tp->ipb.TX_Burst_Size	= BURST_SIZE;
-	tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES;
-	tp->ipb.SCB_Addr	= 0;
-	tp->ipb.SSB_Addr	= 0;
-}
-
-/*
- * Initializes the open parameter block.
- */
-static void tms380tr_init_opb(struct net_device *dev)
-{
-	struct net_local *tp;
-	unsigned long Addr;
-	unsigned short RplSize    = RPL_SIZE;
-	unsigned short TplSize    = TPL_SIZE;
-	unsigned short BufferSize = BUFFER_SIZE;
-	int i;
-
-	tp = netdev_priv(dev);
-
-	tp->ocpl.OPENOptions 	 = 0;
-	tp->ocpl.OPENOptions 	|= ENABLE_FULL_DUPLEX_SELECTION;
-	tp->ocpl.FullDuplex 	 = 0;
-	tp->ocpl.FullDuplex 	|= OPEN_FULL_DUPLEX_OFF;
-
-        /* 
-	 * Set node address 
-	 *
-	 * We go ahead and put it in the OPB even though on
-	 * most of the generic adapters this isn't required.
-	 * Its simpler this way.  -- ASF
-	 */
-        for (i=0;i<6;i++)
-                tp->ocpl.NodeAddr[i] = ((unsigned char *)dev->dev_addr)[i];
-
-	tp->ocpl.GroupAddr	 = 0;
-	tp->ocpl.FunctAddr	 = 0;
-	tp->ocpl.RxListSize	 = cpu_to_be16((unsigned short)RplSize);
-	tp->ocpl.TxListSize	 = cpu_to_be16((unsigned short)TplSize);
-	tp->ocpl.BufSize	 = cpu_to_be16((unsigned short)BufferSize);
-	tp->ocpl.Reserved	 = 0;
-	tp->ocpl.TXBufMin	 = TX_BUF_MIN;
-	tp->ocpl.TXBufMax	 = TX_BUF_MAX;
-
-	Addr = htonl(((char *)tp->ProductID - (char *)tp) + tp->dmabuffer);
-
-	tp->ocpl.ProdIDAddr[0]	 = LOWORD(Addr);
-	tp->ocpl.ProdIDAddr[1]	 = HIWORD(Addr);
-}
-
-/*
- * Send OPEN command to adapter
- */
-static void tms380tr_open_adapter(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-
-	if(tp->OpenCommandIssued)
-		return;
-
-	tp->OpenCommandIssued = 1;
-	tms380tr_exec_cmd(dev, OC_OPEN);
-}
-
-/*
- * Clear the adapter's interrupt flag. Clear system interrupt enable
- * (SINTEN): disable adapter to system interrupts.
- */
-static void tms380tr_disable_interrupts(struct net_device *dev)
-{
-	SIFWRITEB(0, SIFACL);
-}
-
-/*
- * Set the adapter's interrupt flag. Set system interrupt enable
- * (SINTEN): enable adapter to system interrupts.
- */
-static void tms380tr_enable_interrupts(struct net_device *dev)
-{
-	SIFWRITEB(ACL_SINTEN, SIFACL);
-}
-
-/*
- * Put command in command queue, try to execute it.
- */
-static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command)
-{
-	struct net_local *tp = netdev_priv(dev);
-
-	tp->CMDqueue |= Command;
-	tms380tr_chk_outstanding_cmds(dev);
-}
-
-static void tms380tr_timeout(struct net_device *dev)
-{
-	/*
-	 * If we get here, some higher level has decided we are broken.
-	 * There should really be a "kick me" function call instead.
-	 *
-	 * Resetting the token ring adapter takes a long time so just
-	 * fake transmission time and go on trying. Our own timeout
-	 * routine is in tms380tr_timer_chk()
-	 */
-	dev->trans_start = jiffies; /* prevent tx timeout */
-	netif_wake_queue(dev);
-}
-
-/*
- * Gets skb from system, queues it and checks if it can be sent
- */
-static netdev_tx_t tms380tr_send_packet(struct sk_buff *skb,
-					      struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	netdev_tx_t rc;
-
-	rc = tms380tr_hardware_send_packet(skb, dev);
-	if(tp->TplFree->NextTPLPtr->BusyFlag)
-		netif_stop_queue(dev);
-	return rc;
-}
-
-/*
- * Move frames into adapter tx queue
- */
-static netdev_tx_t tms380tr_hardware_send_packet(struct sk_buff *skb,
-						       struct net_device *dev)
-{
-	TPL *tpl;
-	short length;
-	unsigned char *buf;
-	unsigned long flags;
-	int i;
-	dma_addr_t dmabuf, newbuf;
-	struct net_local *tp = netdev_priv(dev);
-   
-	/* Try to get a free TPL from the chain.
-	 *
-	 * NOTE: We *must* always leave one unused TPL in the chain,
-	 * because otherwise the adapter might send frames twice.
-	 */
-	spin_lock_irqsave(&tp->lock, flags);
-	if(tp->TplFree->NextTPLPtr->BusyFlag)  { /* No free TPL */
-		if (tms380tr_debug > 0)
-			printk(KERN_DEBUG "%s: No free TPL\n", dev->name);
-		spin_unlock_irqrestore(&tp->lock, flags);
-		return NETDEV_TX_BUSY;
-	}
-
-	dmabuf = 0;
-
-	/* Is buffer reachable for Busmaster-DMA? */
-
-	length	= skb->len;
-	dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE);
-	if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
-		/* Copy frame to local buffer */
-		dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE);
-		dmabuf  = 0;
-		i 	= tp->TplFree->TPLIndex;
-		buf 	= tp->LocalTxBuffers[i];
-		skb_copy_from_linear_data(skb, buf, length);
-		newbuf 	= ((char *)buf - (char *)tp) + tp->dmabuffer;
-	}
-	else {
-		/* Send direct from skb->data */
-		newbuf	= dmabuf;
-		buf	= skb->data;
-	}
-	/* Source address in packet? */
-	tms380tr_chk_src_addr(buf, dev->dev_addr);
-	tp->LastSendTime	= jiffies;
-	tpl 			= tp->TplFree;	/* Get the "free" TPL */
-	tpl->BusyFlag 		= 1;		/* Mark TPL as busy */
-	tp->TplFree 		= tpl->NextTPLPtr;
-    
-	/* Save the skb for delayed return of skb to system */
-	tpl->Skb = skb;
-	tpl->DMABuff = dmabuf;
-	tpl->FragList[0].DataCount = cpu_to_be16((unsigned short)length);
-	tpl->FragList[0].DataAddr  = htonl(newbuf);
-
-	/* Write the data length in the transmit list. */
-	tpl->FrameSize 	= cpu_to_be16((unsigned short)length);
-	tpl->MData 	= buf;
-
-	/* Transmit the frame and set the status values. */
-	tms380tr_write_tpl_status(tpl, TX_VALID | TX_START_FRAME
-				| TX_END_FRAME | TX_PASS_SRC_ADDR
-				| TX_FRAME_IRQ);
-
-	/* Let adapter send the frame. */
-	tms380tr_exec_sifcmd(dev, CMD_TX_VALID);
-	spin_unlock_irqrestore(&tp->lock, flags);
-
-	return NETDEV_TX_OK;
-}
-
-/*
- * Write the given value to the 'Status' field of the specified TPL.
- * NOTE: This function should be used whenever the status of any TPL must be
- * modified by the driver, because the compiler may otherwise change the
- * order of instructions such that writing the TPL status may be executed at
- * an undesirable time. When this function is used, the status is always
- * written when the function is called.
- */
-static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status)
-{
-	tpl->Status = Status;
-}
-
-static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr)
-{
-	unsigned char SRBit;
-
-	if((((unsigned long)frame[8]) & ~0x80) != 0)	/* Compare 4 bytes */
-		return;
-	if((unsigned short)frame[12] != 0)		/* Compare 2 bytes */
-		return;
-
-	SRBit = frame[8] & 0x80;
-	memcpy(&frame[8], hw_addr, 6);
-	frame[8] |= SRBit;
-}
-
-/*
- * The timer routine: Check if adapter still open and working, reopen if not. 
- */
-static void tms380tr_timer_chk(unsigned long data)
-{
-	struct net_device *dev = (struct net_device*)data;
-	struct net_local *tp = netdev_priv(dev);
-
-	if(tp->HaltInProgress)
-		return;
-
-	tms380tr_chk_outstanding_cmds(dev);
-	if(time_before(tp->LastSendTime + SEND_TIMEOUT, jiffies) &&
-	   (tp->TplFree != tp->TplBusy))
-	{
-		/* Anything to send, but stalled too long */
-		tp->LastSendTime = jiffies;
-		tms380tr_exec_cmd(dev, OC_CLOSE);	/* Does reopen automatically */
-	}
-
-	tp->timer.expires = jiffies + 2*HZ;
-	add_timer(&tp->timer);
-
-	if(tp->AdapterOpenFlag || tp->ReOpenInProgress)
-		return;
-	tp->ReOpenInProgress = 1;
-	tms380tr_open_adapter(dev);
-}
-
-/*
- * The typical workload of the driver: Handle the network interface interrupts.
- */
-irqreturn_t tms380tr_interrupt(int irq, void *dev_id)
-{
-	struct net_device *dev = dev_id;
-	struct net_local *tp;
-	unsigned short irq_type;
-	int handled = 0;
-
-	tp = netdev_priv(dev);
-
-	irq_type = SIFREADW(SIFSTS);
-
-	while(irq_type & STS_SYSTEM_IRQ) {
-		handled = 1;
-		irq_type &= STS_IRQ_MASK;
-
-		if(!tms380tr_chk_ssb(tp, irq_type)) {
-			printk(KERN_DEBUG "%s: DATA LATE occurred\n", dev->name);
-			break;
-		}
-
-		switch(irq_type) {
-		case STS_IRQ_RECEIVE_STATUS:
-			tms380tr_reset_interrupt(dev);
-			tms380tr_rcv_status_irq(dev);
-			break;
-
-		case STS_IRQ_TRANSMIT_STATUS:
-			/* Check if TRANSMIT.HALT command is complete */
-			if(tp->ssb.Parm[0] & COMMAND_COMPLETE) {
-				tp->TransmitCommandActive = 0;
-					tp->TransmitHaltScheduled = 0;
-
-					/* Issue a new transmit command. */
-					tms380tr_exec_cmd(dev, OC_TRANSMIT);
-				}
-
-				tms380tr_reset_interrupt(dev);
-				tms380tr_tx_status_irq(dev);
-				break;
-
-		case STS_IRQ_COMMAND_STATUS:
-			/* The SSB contains status of last command
-			 * other than receive/transmit.
-			 */
-			tms380tr_cmd_status_irq(dev);
-			break;
-			
-		case STS_IRQ_SCB_CLEAR:
-			/* The SCB is free for another command. */
-			tp->ScbInUse = 0;
-			tms380tr_chk_outstanding_cmds(dev);
-			break;
-			
-		case STS_IRQ_RING_STATUS:
-			tms380tr_ring_status_irq(dev);
-			break;
-
-		case STS_IRQ_ADAPTER_CHECK:
-			tms380tr_chk_irq(dev);
-			break;
-
-		case STS_IRQ_LLC_STATUS:
-			printk(KERN_DEBUG "tms380tr: unexpected LLC status IRQ\n");
-			break;
-			
-		case STS_IRQ_TIMER:
-			printk(KERN_DEBUG "tms380tr: unexpected Timer IRQ\n");
-			break;
-			
-		case STS_IRQ_RECEIVE_PENDING:
-			printk(KERN_DEBUG "tms380tr: unexpected Receive Pending IRQ\n");
-			break;
-			
-		default:
-			printk(KERN_DEBUG "Unknown Token Ring IRQ (0x%04x)\n", irq_type);
-			break;
-		}
-
-		/* Reset system interrupt if not already done. */
-		if(irq_type != STS_IRQ_TRANSMIT_STATUS &&
-		   irq_type != STS_IRQ_RECEIVE_STATUS) {
-			tms380tr_reset_interrupt(dev);
-		}
-
-		irq_type = SIFREADW(SIFSTS);
-	}
-
-	return IRQ_RETVAL(handled);
-}
-
-/*
- *  Reset the INTERRUPT SYSTEM bit and issue SSB CLEAR command.
- */
-static void tms380tr_reset_interrupt(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	SSB *ssb = &tp->ssb;
-
-	/*
-	 * [Workaround for "Data Late"]
-	 * Set all fields of the SSB to well-defined values so we can
-	 * check if the adapter has written the SSB.
-	 */
-
-	ssb->STS	= (unsigned short) -1;
-	ssb->Parm[0] 	= (unsigned short) -1;
-	ssb->Parm[1] 	= (unsigned short) -1;
-	ssb->Parm[2] 	= (unsigned short) -1;
-
-	/* Free SSB by issuing SSB_CLEAR command after reading IRQ code
-	 * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts.
-	 */
-	tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ);
-}
-
-/*
- * Check if the SSB has actually been written by the adapter.
- */
-static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqType)
-{
-	SSB *ssb = &tp->ssb;	/* The address of the SSB. */
-
-	/* C 0 1 2 INTERRUPT CODE
-	 * - - - - --------------
-	 * 1 1 1 1 TRANSMIT STATUS
-	 * 1 1 1 1 RECEIVE STATUS
-	 * 1 ? ? 0 COMMAND STATUS
-	 * 0 0 0 0 SCB CLEAR
-	 * 1 1 0 0 RING STATUS
-	 * 0 0 0 0 ADAPTER CHECK
-	 *
-	 * 0 = SSB field not affected by interrupt
-	 * 1 = SSB field is affected by interrupt
-	 *
-	 * C = SSB ADDRESS +0: COMMAND
-	 * 0 = SSB ADDRESS +2: STATUS 0
-	 * 1 = SSB ADDRESS +4: STATUS 1
-	 * 2 = SSB ADDRESS +6: STATUS 2
-	 */
-
-	/* Check if this interrupt does use the SSB. */
-
-	if(IrqType != STS_IRQ_TRANSMIT_STATUS &&
-	   IrqType != STS_IRQ_RECEIVE_STATUS &&
-	   IrqType != STS_IRQ_COMMAND_STATUS &&
-	   IrqType != STS_IRQ_RING_STATUS)
-	{
-		return 1;	/* SSB not involved. */
-	}
-
-	/* Note: All fields of the SSB have been set to all ones (-1) after it
-	 * has last been used by the software (see DriverIsr()).
-	 *
-	 * Check if the affected SSB fields are still unchanged.
-	 */
-
-	if(ssb->STS == (unsigned short) -1)
-		return 0;	/* Command field not yet available. */
-	if(IrqType == STS_IRQ_COMMAND_STATUS)
-		return 1;	/* Status fields not always affected. */
-	if(ssb->Parm[0] == (unsigned short) -1)
-		return 0;	/* Status 1 field not yet available. */
-	if(IrqType == STS_IRQ_RING_STATUS)
-		return 1;	/* Status 2 & 3 fields not affected. */
-
-	/* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */
-	if(ssb->Parm[1] == (unsigned short) -1)
-		return 0;	/* Status 2 field not yet available. */
-	if(ssb->Parm[2] == (unsigned short) -1)
-		return 0;	/* Status 3 field not yet available. */
-
-	return 1;	/* All SSB fields have been written by the adapter. */
-}
-
-/*
- * Evaluates the command results status in the SSB status field.
- */
-static void tms380tr_cmd_status_irq(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	unsigned short ssb_cmd, ssb_parm_0;
-	unsigned short ssb_parm_1;
-	char *open_err = "Open error -";
-	char *code_err = "Open code -";
-
-	/* Copy the ssb values to local variables */
-	ssb_cmd    = tp->ssb.STS;
-	ssb_parm_0 = tp->ssb.Parm[0];
-	ssb_parm_1 = tp->ssb.Parm[1];
-
-	if(ssb_cmd == OPEN)
-	{
-		tp->Sleeping = 0;
-		if(!tp->ReOpenInProgress)
-	    		wake_up_interruptible(&tp->wait_for_tok_int);
-
-		tp->OpenCommandIssued = 0;
-		tp->ScbInUse = 0;
-
-		if((ssb_parm_0 & 0x00FF) == GOOD_COMPLETION)
-		{
-			/* Success, the adapter is open. */
-			tp->LobeWireFaultLogged	= 0;
-			tp->AdapterOpenFlag 	= 1;
-			tp->AdapterVirtOpenFlag = 1;
-			tp->TransmitCommandActive = 0;
-			tms380tr_exec_cmd(dev, OC_TRANSMIT);
-			tms380tr_exec_cmd(dev, OC_RECEIVE);
-
-			if(tp->ReOpenInProgress)
-				tp->ReOpenInProgress = 0;
-
-			return;
-		}
-		else 	/* The adapter did not open. */
-		{
-	    		if(ssb_parm_0 & NODE_ADDR_ERROR)
-				printk(KERN_INFO "%s: Node address error\n",
-					dev->name);
-	    		if(ssb_parm_0 & LIST_SIZE_ERROR)
-				printk(KERN_INFO "%s: List size error\n",
-					dev->name);
-	    		if(ssb_parm_0 & BUF_SIZE_ERROR)
-				printk(KERN_INFO "%s: Buffer size error\n",
-					dev->name);
-	    		if(ssb_parm_0 & TX_BUF_COUNT_ERROR)
-				printk(KERN_INFO "%s: Tx buffer count error\n",
-					dev->name);
-	    		if(ssb_parm_0 & INVALID_OPEN_OPTION)
-				printk(KERN_INFO "%s: Invalid open option\n",
-					dev->name);
-	    		if(ssb_parm_0 & OPEN_ERROR)
-			{
-				/* Show the open phase. */
-				switch(ssb_parm_0 & OPEN_PHASES_MASK)
-				{
-					case LOBE_MEDIA_TEST:
-						if(!tp->LobeWireFaultLogged)
-						{
-							tp->LobeWireFaultLogged = 1;
-							printk(KERN_INFO "%s: %s Lobe wire fault (check cable !).\n", dev->name, open_err);
-		    				}
-						tp->ReOpenInProgress	= 1;
-						tp->AdapterOpenFlag 	= 0;
-						tp->AdapterVirtOpenFlag = 1;
-						tms380tr_open_adapter(dev);
-						return;
-
-					case PHYSICAL_INSERTION:
-						printk(KERN_INFO "%s: %s Physical insertion.\n", dev->name, open_err);
-						break;
-
-					case ADDRESS_VERIFICATION:
-						printk(KERN_INFO "%s: %s Address verification.\n", dev->name, open_err);
-						break;
-
-					case PARTICIPATION_IN_RING_POLL:
-						printk(KERN_INFO "%s: %s Participation in ring poll.\n", dev->name, open_err);
-						break;
-
-					case REQUEST_INITIALISATION:
-						printk(KERN_INFO "%s: %s Request initialisation.\n", dev->name, open_err);
-						break;
-
-					case FULLDUPLEX_CHECK:
-						printk(KERN_INFO "%s: %s Full duplex check.\n", dev->name, open_err);
-						break;
-
-					default:
-						printk(KERN_INFO "%s: %s Unknown open phase\n", dev->name, open_err);
-						break;
-				}
-
-				/* Show the open errors. */
-				switch(ssb_parm_0 & OPEN_ERROR_CODES_MASK)
-				{
-					case OPEN_FUNCTION_FAILURE:
-						printk(KERN_INFO "%s: %s OPEN_FUNCTION_FAILURE", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_FUNCTION_FAILURE;
-						break;
-
-					case OPEN_SIGNAL_LOSS:
-						printk(KERN_INFO "%s: %s OPEN_SIGNAL_LOSS\n", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_SIGNAL_LOSS;
-						break;
-
-					case OPEN_TIMEOUT:
-						printk(KERN_INFO "%s: %s OPEN_TIMEOUT\n", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_TIMEOUT;
-						break;
-
-					case OPEN_RING_FAILURE:
-						printk(KERN_INFO "%s: %s OPEN_RING_FAILURE\n", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_RING_FAILURE;
-						break;
-
-					case OPEN_RING_BEACONING:
-						printk(KERN_INFO "%s: %s OPEN_RING_BEACONING\n", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_RING_BEACONING;
-						break;
-
-					case OPEN_DUPLICATE_NODEADDR:
-						printk(KERN_INFO "%s: %s OPEN_DUPLICATE_NODEADDR\n", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_DUPLICATE_NODEADDR;
-						break;
-
-					case OPEN_REQUEST_INIT:
-						printk(KERN_INFO "%s: %s OPEN_REQUEST_INIT\n", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_REQUEST_INIT;
-						break;
-
-					case OPEN_REMOVE_RECEIVED:
-						printk(KERN_INFO "%s: %s OPEN_REMOVE_RECEIVED", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_REMOVE_RECEIVED;
-						break;
-
-					case OPEN_FULLDUPLEX_SET:
-						printk(KERN_INFO "%s: %s OPEN_FULLDUPLEX_SET\n", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_FULLDUPLEX_SET;
-						break;
-
-					default:
-						printk(KERN_INFO "%s: %s Unknown open err code", dev->name, code_err);
-						tp->LastOpenStatus =
-							OPEN_FUNCTION_FAILURE;
-						break;
-				}
-			}
-
-			tp->AdapterOpenFlag 	= 0;
-			tp->AdapterVirtOpenFlag = 0;
-
-			return;
-		}
-	}
-	else
-	{
-		if(ssb_cmd != READ_ERROR_LOG)
-			return;
-
-		/* Add values from the error log table to the MAC
-		 * statistics counters and update the errorlogtable
-		 * memory.
-		 */
-		tp->MacStat.line_errors += tp->errorlogtable.Line_Error;
-		tp->MacStat.burst_errors += tp->errorlogtable.Burst_Error;
-		tp->MacStat.A_C_errors += tp->errorlogtable.ARI_FCI_Error;
-		tp->MacStat.lost_frames += tp->errorlogtable.Lost_Frame_Error;
-		tp->MacStat.recv_congest_count += tp->errorlogtable.Rx_Congest_Error;
-		tp->MacStat.rx_errors += tp->errorlogtable.Rx_Congest_Error;
-		tp->MacStat.frame_copied_errors += tp->errorlogtable.Frame_Copied_Error;
-		tp->MacStat.token_errors += tp->errorlogtable.Token_Error;
-		tp->MacStat.dummy1 += tp->errorlogtable.DMA_Bus_Error;
-		tp->MacStat.dummy1 += tp->errorlogtable.DMA_Parity_Error;
-		tp->MacStat.abort_delimiters += tp->errorlogtable.AbortDelimeters;
-		tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error;
-		tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error;
-	}
-}
-
-/*
- * The inverse routine to tms380tr_open().
- */
-int tms380tr_close(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	netif_stop_queue(dev);
-	
-	del_timer(&tp->timer);
-
-	/* Flush the Tx and disable Rx here. */
-
-	tp->HaltInProgress 	= 1;
-	tms380tr_exec_cmd(dev, OC_CLOSE);
-	tp->timer.expires	= jiffies + 1*HZ;
-	tp->timer.function 	= tms380tr_timer_end_wait;
-	tp->timer.data 		= (unsigned long)dev;
-	add_timer(&tp->timer);
-
-	tms380tr_enable_interrupts(dev);
-
-	tp->Sleeping = 1;
-	interruptible_sleep_on(&tp->wait_for_tok_int);
-	tp->TransmitCommandActive = 0;
-    
-	del_timer(&tp->timer);
-	tms380tr_disable_interrupts(dev);
-   
-#ifdef CONFIG_ISA
-	if(dev->dma > 0) 
-	{
-		unsigned long flags=claim_dma_lock();
-		disable_dma(dev->dma);
-		release_dma_lock(flags);
-	}
-#endif
-	
-	SIFWRITEW(0xFF00, SIFCMD);
-#if 0
-	if(dev->dma > 0) /* what the? */
-		SIFWRITEB(0xff, POSREG);
-#endif
-	tms380tr_cancel_tx_queue(tp);
-
-	return 0;
-}
-
-/*
- * Get the current statistics. This may be called with the card open
- * or closed.
- */
-static struct net_device_stats *tms380tr_get_stats(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-
-	return (struct net_device_stats *)&tp->MacStat;
-}
-
-/*
- * Set or clear the multicast filter for this adapter.
- */
-static void tms380tr_set_multicast_list(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	unsigned int OpenOptions;
-	
-	OpenOptions = tp->ocpl.OPENOptions &
-		~(PASS_ADAPTER_MAC_FRAMES
-		  | PASS_ATTENTION_FRAMES
-		  | PASS_BEACON_MAC_FRAMES
-		  | COPY_ALL_MAC_FRAMES
-		  | COPY_ALL_NON_MAC_FRAMES);
-	
-	tp->ocpl.FunctAddr = 0;
-	
-	if(dev->flags & IFF_PROMISC)
-		/* Enable promiscuous mode */
-		OpenOptions |= COPY_ALL_NON_MAC_FRAMES |
-			COPY_ALL_MAC_FRAMES;
-	else
-	{
-		if(dev->flags & IFF_ALLMULTI)
-		{
-			/* Disable promiscuous mode, use normal mode. */
-			tp->ocpl.FunctAddr = 0xFFFFFFFF;
-		}
-		else
-		{
-			struct netdev_hw_addr *ha;
-
-			netdev_for_each_mc_addr(ha, dev) {
-				((char *)(&tp->ocpl.FunctAddr))[0] |=
-					ha->addr[2];
-				((char *)(&tp->ocpl.FunctAddr))[1] |=
-					ha->addr[3];
-				((char *)(&tp->ocpl.FunctAddr))[2] |=
-					ha->addr[4];
-				((char *)(&tp->ocpl.FunctAddr))[3] |=
-					ha->addr[5];
-			}
-		}
-		tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
-	}
-	
-	tp->ocpl.OPENOptions = OpenOptions;
-	tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS);
-}
-
-/*
- * Wait for some time (microseconds)
- */
-void tms380tr_wait(unsigned long time)
-{
-#if 0
-	long tmp;
-	
-	tmp = jiffies + time/(1000000/HZ);
-	do {
-		tmp = schedule_timeout_interruptible(tmp);
-	} while(time_after(tmp, jiffies));
-#else
-	mdelay(time / 1000);
-#endif
-}
-
-/*
- * Write a command value to the SIFCMD register
- */
-static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue)
-{
-	unsigned short cmd;
-	unsigned short SifStsValue;
-	unsigned long loop_counter;
-
-	WriteValue = ((WriteValue ^ CMD_SYSTEM_IRQ) | CMD_INTERRUPT_ADAPTER);
-	cmd = (unsigned short)WriteValue;
-	loop_counter = 0,5 * 800000;
-	do {
-		SifStsValue = SIFREADW(SIFSTS);
-	} while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--);
-	SIFWRITEW(cmd, SIFCMD);
-}
-
-/*
- * Processes adapter hardware reset, halts adapter and downloads firmware,
- * clears the halt bit.
- */
-static int tms380tr_reset_adapter(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	unsigned short *fw_ptr;
-	unsigned short count, c, count2;
-	const struct firmware *fw_entry = NULL;
-
-	if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
-		printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
-			dev->name, "tms380tr.bin");
-		return -1;
-	}
-
-	fw_ptr = (unsigned short *)fw_entry->data;
-	count2 = fw_entry->size / 2;
-
-	/* Hardware adapter reset */
-	SIFWRITEW(ACL_ARESET, SIFACL);
-	tms380tr_wait(40);
-	
-	c = SIFREADW(SIFACL);
-	tms380tr_wait(20);
-
-	if(dev->dma == 0)	/* For PCI adapters */
-	{
-		c &= ~(ACL_NSELOUT0 | ACL_NSELOUT1);	/* Clear bits */
-		if(tp->setnselout)
-		  c |= (*tp->setnselout)(dev);
-	}
-
-	/* In case a command is pending - forget it */
-	tp->ScbInUse = 0;
-
-	c &= ~ACL_ARESET;		/* Clear adapter reset bit */
-	c |=  ACL_CPHALT;		/* Halt adapter CPU, allow download */
-	c |= ACL_BOOT;
-	c |= ACL_SINTEN;
-	c &= ~ACL_PSDMAEN;		/* Clear pseudo dma bit */
-	SIFWRITEW(c, SIFACL);
-	tms380tr_wait(40);
-
-	count = 0;
-	/* Download firmware via DIO interface: */
-	do {
-		if (count2 < 3) continue;
-
-		/* Download first address part */
-		SIFWRITEW(*fw_ptr, SIFADX);
-		fw_ptr++;
-		count2--;
-		/* Download second address part */
-		SIFWRITEW(*fw_ptr, SIFADD);
-		fw_ptr++;
-		count2--;
-
-		if((count = *fw_ptr) != 0)	/* Load loop counter */
-		{
-			fw_ptr++;	/* Download block data */
-			count2--;
-			if (count > count2) continue;
-
-			for(; count > 0; count--)
-			{
-				SIFWRITEW(*fw_ptr, SIFINC);
-				fw_ptr++;
-				count2--;
-			}
-		}
-		else	/* Stop, if last block downloaded */
-		{
-			c = SIFREADW(SIFACL);
-			c &= (~ACL_CPHALT | ACL_SINTEN);
-
-			/* Clear CPHALT and start BUD */
-			SIFWRITEW(c, SIFACL);
-			release_firmware(fw_entry);
-			return 1;
-		}
-	} while(count == 0);
-
-	release_firmware(fw_entry);
-	printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
-	return -1;
-}
-
-MODULE_FIRMWARE("tms380tr.bin");
-
-/*
- * Starts bring up diagnostics of token ring adapter and evaluates
- * diagnostic results.
- */
-static int tms380tr_bringup_diags(struct net_device *dev)
-{
-	int loop_cnt, retry_cnt;
-	unsigned short Status;
-
-	tms380tr_wait(HALF_SECOND);
-	tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
-	tms380tr_wait(HALF_SECOND);
-
-	retry_cnt = BUD_MAX_RETRIES;	/* maximal number of retrys */
-
-	do {
-		retry_cnt--;
-		if(tms380tr_debug > 3)
-			printk(KERN_DEBUG "BUD-Status: ");
-		loop_cnt = BUD_MAX_LOOPCNT;	/* maximum: three seconds*/
-		do {			/* Inspect BUD results */
-			loop_cnt--;
-			tms380tr_wait(HALF_SECOND);
-			Status = SIFREADW(SIFSTS);
-			Status &= STS_MASK;
-
-			if(tms380tr_debug > 3)
-				printk(KERN_DEBUG " %04X\n", Status);
-			/* BUD successfully completed */
-			if(Status == STS_INITIALIZE)
-				return 1;
-		/* Unrecoverable hardware error, BUD not completed? */
-		} while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST))
-			!= (STS_ERROR | STS_TEST)));
-
-		/* Error preventing completion of BUD */
-		if(retry_cnt > 0)
-		{
-			printk(KERN_INFO "%s: Adapter Software Reset.\n", 
-				dev->name);
-			tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
-			tms380tr_wait(HALF_SECOND);
-		}
-	} while(retry_cnt > 0);
-
-	Status = SIFREADW(SIFSTS);
-	
-	printk(KERN_INFO "%s: Hardware error\n", dev->name);
-	/* Hardware error occurred! */
-	Status &= 0x001f;
-	if (Status & 0x0010)
-		printk(KERN_INFO "%s: BUD Error: Timeout\n", dev->name);
-	else if ((Status & 0x000f) > 6)
-		printk(KERN_INFO "%s: BUD Error: Illegal Failure\n", dev->name);
-	else
-		printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f);
-
-	return -1;
-}
-
-/*
- * Copy initialisation data to adapter memory, beginning at address
- * 1:0A00; Starting DMA test and evaluating result bits.
- */
-static int tms380tr_init_adapter(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-
-	const unsigned char SCB_Test[6] = {0x00, 0x00, 0xC1, 0xE2, 0xD4, 0x8B};
-	const unsigned char SSB_Test[8] = {0xFF, 0xFF, 0xD1, 0xD7,
-						0xC5, 0xD9, 0xC3, 0xD4};
-	void *ptr = (void *)&tp->ipb;
-	unsigned short *ipb_ptr = (unsigned short *)ptr;
-	unsigned char *cb_ptr = (unsigned char *) &tp->scb;
-	unsigned char *sb_ptr = (unsigned char *) &tp->ssb;
-	unsigned short Status;
-	int i, loop_cnt, retry_cnt;
-
-	/* Normalize: byte order low/high, word order high/low! (only IPB!) */
-	tp->ipb.SCB_Addr = SWAPW(((char *)&tp->scb - (char *)tp) + tp->dmabuffer);
-	tp->ipb.SSB_Addr = SWAPW(((char *)&tp->ssb - (char *)tp) + tp->dmabuffer);
-
-	if(tms380tr_debug > 3)
-	{
-		printk(KERN_DEBUG "%s: buffer (real): %lx\n", dev->name, (long) &tp->scb);
-		printk(KERN_DEBUG "%s: buffer (virt): %lx\n", dev->name, (long) ((char *)&tp->scb - (char *)tp) + (long) tp->dmabuffer);
-		printk(KERN_DEBUG "%s: buffer (DMA) : %lx\n", dev->name, (long) tp->dmabuffer);
-		printk(KERN_DEBUG "%s: buffer (tp)  : %lx\n", dev->name, (long) tp);
-	}
-	/* Maximum: three initialization retries */
-	retry_cnt = INIT_MAX_RETRIES;
-
-	do {
-		retry_cnt--;
-
-		/* Transfer initialization block */
-		SIFWRITEW(0x0001, SIFADX);
-
-		/* To address 0001:0A00 of adapter RAM */
-		SIFWRITEW(0x0A00, SIFADD);
-
-		/* Write 11 words to adapter RAM */
-		for(i = 0; i < 11; i++)
-			SIFWRITEW(ipb_ptr[i], SIFINC);
-
-		/* Execute SCB adapter command */
-		tms380tr_exec_sifcmd(dev, CMD_EXECUTE);
-
-		loop_cnt = INIT_MAX_LOOPCNT;	/* Maximum: 11 seconds */
-
-		/* While remaining retries, no error and not completed */
-		do {
-			Status = 0;
-			loop_cnt--;
-			tms380tr_wait(HALF_SECOND);
-
-			/* Mask interesting status bits */
-			Status = SIFREADW(SIFSTS);
-			Status &= STS_MASK;
-		} while(((Status &(STS_INITIALIZE | STS_ERROR | STS_TEST)) != 0) &&
-			((Status & STS_ERROR) == 0) && (loop_cnt != 0));
-
-		if((Status & (STS_INITIALIZE | STS_ERROR | STS_TEST)) == 0)
-		{
-			/* Initialization completed without error */
-			i = 0;
-			do {	/* Test if contents of SCB is valid */
-				if(SCB_Test[i] != *(cb_ptr + i))
-				{
-					printk(KERN_INFO "%s: DMA failed\n", dev->name);
-					/* DMA data error: wrong data in SCB */
-					return -1;
-				}
-				i++;
-			} while(i < 6);
-
-			i = 0;
-			do {	/* Test if contents of SSB is valid */
-				if(SSB_Test[i] != *(sb_ptr + i))
-					/* DMA data error: wrong data in SSB */
-					return -1;
-				i++;
-			} while (i < 8);
-
-			return 1;	/* Adapter successfully initialized */
-		}
-		else
-		{
-			if((Status & STS_ERROR) != 0)
-			{
-				/* Initialization error occurred */
-				Status = SIFREADW(SIFSTS);
-				Status &= STS_ERROR_MASK;
-				/* ShowInitialisationErrorCode(Status); */
-				printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status);
-				return -1; /* Unrecoverable error */
-			}
-			else
-			{
-				if(retry_cnt > 0)
-				{
-					/* Reset adapter and try init again */
-					tms380tr_exec_sifcmd(dev, EXEC_SOFT_RESET);
-					tms380tr_wait(HALF_SECOND);
-				}
-			}
-		}
-	} while(retry_cnt > 0);
-
-	printk(KERN_INFO "%s: Retry exceeded\n", dev->name);
-	return -1;
-}
-
-/*
- * Check for outstanding commands in command queue and tries to execute
- * command immediately. Corresponding command flag in command queue is cleared.
- */
-static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	unsigned long Addr = 0;
-
-	if(tp->CMDqueue == 0)
-		return;		/* No command execution */
-
-	/* If SCB in use: no command */
-	if(tp->ScbInUse == 1)
-		return;
-
-	/* Check if adapter is opened, avoiding COMMAND_REJECT
-	 * interrupt by the adapter!
-	 */
-	if (tp->AdapterOpenFlag == 0) {
-		if (tp->CMDqueue & OC_OPEN) {
-			/* Execute OPEN command	*/
-			tp->CMDqueue ^= OC_OPEN;
-
-			Addr = htonl(((char *)&tp->ocpl - (char *)tp) + tp->dmabuffer);
-			tp->scb.Parm[0] = LOWORD(Addr);
-			tp->scb.Parm[1] = HIWORD(Addr);
-			tp->scb.CMD = OPEN;
-		} else
-			/* No OPEN command queued, but adapter closed. Note:
-			 * We'll try to re-open the adapter in DriverPoll()
-			 */
-			return;		/* No adapter command issued */
-	} else {
-		/* Adapter is open; evaluate command queue: try to execute
-		 * outstanding commands (depending on priority!) CLOSE
-		 * command queued
-		 */
-		if (tp->CMDqueue & OC_CLOSE) {
-			tp->CMDqueue ^= OC_CLOSE;
-			tp->AdapterOpenFlag = 0;
-			tp->scb.Parm[0] = 0; /* Parm[0], Parm[1] are ignored */
-			tp->scb.Parm[1] = 0; /* but should be set to zero! */
-			tp->scb.CMD = CLOSE;
-			if(!tp->HaltInProgress)
-				tp->CMDqueue |= OC_OPEN; /* re-open adapter */
-			else
-				tp->CMDqueue = 0;	/* no more commands */
-		} else if (tp->CMDqueue & OC_RECEIVE) {
-			tp->CMDqueue ^= OC_RECEIVE;
-			Addr = htonl(((char *)tp->RplHead - (char *)tp) + tp->dmabuffer);
-			tp->scb.Parm[0] = LOWORD(Addr);
-			tp->scb.Parm[1] = HIWORD(Addr);
-			tp->scb.CMD = RECEIVE;
-		} else if (tp->CMDqueue & OC_TRANSMIT_HALT) {
-			/* NOTE: TRANSMIT.HALT must be checked
-			 * before TRANSMIT.
-			 */
-			tp->CMDqueue ^= OC_TRANSMIT_HALT;
-			tp->scb.CMD = TRANSMIT_HALT;
-
-			/* Parm[0] and Parm[1] are ignored
-			 * but should be set to zero!
-			 */
-			tp->scb.Parm[0] = 0;
-			tp->scb.Parm[1] = 0;
-		} else if (tp->CMDqueue & OC_TRANSMIT) {
-			/* NOTE: TRANSMIT must be
-			 * checked after TRANSMIT.HALT
-			 */
-			if (tp->TransmitCommandActive) {
-				if (!tp->TransmitHaltScheduled) {
-					tp->TransmitHaltScheduled = 1;
-					tms380tr_exec_cmd(dev, OC_TRANSMIT_HALT);
-				}
-				tp->TransmitCommandActive = 0;
-				return;
-			}
-
-			tp->CMDqueue ^= OC_TRANSMIT;
-			tms380tr_cancel_tx_queue(tp);
-			Addr = htonl(((char *)tp->TplBusy - (char *)tp) + tp->dmabuffer);
-			tp->scb.Parm[0] = LOWORD(Addr);
-			tp->scb.Parm[1] = HIWORD(Addr);
-			tp->scb.CMD = TRANSMIT;
-			tp->TransmitCommandActive = 1;
-		} else if (tp->CMDqueue & OC_MODIFY_OPEN_PARMS) {
-			tp->CMDqueue ^= OC_MODIFY_OPEN_PARMS;
-			tp->scb.Parm[0] = tp->ocpl.OPENOptions; /* new OPEN options*/
-			tp->scb.Parm[0] |= ENABLE_FULL_DUPLEX_SELECTION;
-			tp->scb.Parm[1] = 0; /* is ignored but should be zero */
-			tp->scb.CMD = MODIFY_OPEN_PARMS;
-		} else if (tp->CMDqueue & OC_SET_FUNCT_ADDR) {
-			tp->CMDqueue ^= OC_SET_FUNCT_ADDR;
-			tp->scb.Parm[0] = LOWORD(tp->ocpl.FunctAddr);
-			tp->scb.Parm[1] = HIWORD(tp->ocpl.FunctAddr);
-			tp->scb.CMD = SET_FUNCT_ADDR;
-		} else if (tp->CMDqueue & OC_SET_GROUP_ADDR) {
-			tp->CMDqueue ^= OC_SET_GROUP_ADDR;
-			tp->scb.Parm[0] = LOWORD(tp->ocpl.GroupAddr);
-			tp->scb.Parm[1] = HIWORD(tp->ocpl.GroupAddr);
-			tp->scb.CMD = SET_GROUP_ADDR;
-		} else if (tp->CMDqueue & OC_READ_ERROR_LOG) {
-			tp->CMDqueue ^= OC_READ_ERROR_LOG;
-			Addr = htonl(((char *)&tp->errorlogtable - (char *)tp) + tp->dmabuffer);
-			tp->scb.Parm[0] = LOWORD(Addr);
-			tp->scb.Parm[1] = HIWORD(Addr);
-			tp->scb.CMD = READ_ERROR_LOG;
-		} else {
-			printk(KERN_WARNING "CheckForOutstandingCommand: unknown Command\n");
-			tp->CMDqueue = 0;
-			return;
-		}
-	}
-
-	tp->ScbInUse = 1;	/* Set semaphore: SCB in use. */
-
-	/* Execute SCB and generate IRQ when done. */
-	tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST);
-}
-
-/*
- * IRQ conditions: signal loss on the ring, transmit or receive of beacon
- * frames (disabled if bit 1 of OPEN option is set); report error MAC
- * frame transmit (disabled if bit 2 of OPEN option is set); open or short
- * circuit fault on the lobe is detected; remove MAC frame received;
- * error counter overflow (255); opened adapter is the only station in ring.
- * After some of the IRQs the adapter is closed!
- */
-static void tms380tr_ring_status_irq(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-
-	tp->CurrentRingStatus = be16_to_cpu((unsigned short)tp->ssb.Parm[0]);
-
-	/* First: fill up statistics */
-	if(tp->ssb.Parm[0] & SIGNAL_LOSS)
-	{
-		printk(KERN_INFO "%s: Signal Loss\n", dev->name);
-		tp->MacStat.line_errors++;
-	}
-
-	/* Adapter is closed, but initialized */
-	if(tp->ssb.Parm[0] & LOBE_WIRE_FAULT)
-	{
-		printk(KERN_INFO "%s: Lobe Wire Fault, Reopen Adapter\n", 
-			dev->name);
-		tp->MacStat.line_errors++;
-	}
-
-	if(tp->ssb.Parm[0] & RING_RECOVERY)
-		printk(KERN_INFO "%s: Ring Recovery\n", dev->name);
-
-	/* Counter overflow: read error log */
-	if(tp->ssb.Parm[0] & COUNTER_OVERFLOW)
-	{
-		printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
-		tms380tr_exec_cmd(dev, OC_READ_ERROR_LOG);
-	}
-
-	/* Adapter is closed, but initialized */
-	if(tp->ssb.Parm[0] & REMOVE_RECEIVED)
-		printk(KERN_INFO "%s: Remove Received, Reopen Adapter\n", 
-			dev->name);
-
-	/* Adapter is closed, but initialized */
-	if(tp->ssb.Parm[0] & AUTO_REMOVAL_ERROR)
-		printk(KERN_INFO "%s: Auto Removal Error, Reopen Adapter\n", 
-			dev->name);
-
-	if(tp->ssb.Parm[0] & HARD_ERROR)
-		printk(KERN_INFO "%s: Hard Error\n", dev->name);
-
-	if(tp->ssb.Parm[0] & SOFT_ERROR)
-		printk(KERN_INFO "%s: Soft Error\n", dev->name);
-
-	if(tp->ssb.Parm[0] & TRANSMIT_BEACON)
-		printk(KERN_INFO "%s: Transmit Beacon\n", dev->name);
-
-	if(tp->ssb.Parm[0] & SINGLE_STATION)
-		printk(KERN_INFO "%s: Single Station\n", dev->name);
-
-	/* Check if adapter has been closed */
-	if(tp->ssb.Parm[0] & ADAPTER_CLOSED)
-	{
-		printk(KERN_INFO "%s: Adapter closed (Reopening)," 
-			"CurrentRingStat %x\n",
-			dev->name, tp->CurrentRingStatus);
-		tp->AdapterOpenFlag = 0;
-		tms380tr_open_adapter(dev);
-	}
-}
-
-/*
- * Issued if adapter has encountered an unrecoverable hardware
- * or software error.
- */
-static void tms380tr_chk_irq(struct net_device *dev)
-{
-	int i;
-	unsigned short AdapterCheckBlock[4];
-	struct net_local *tp = netdev_priv(dev);
-
-	tp->AdapterOpenFlag = 0;	/* Adapter closed now */
-
-	/* Page number of adapter memory */
-	SIFWRITEW(0x0001, SIFADX);
-	/* Address offset */
-	SIFWRITEW(CHECKADDR, SIFADR);
-
-	/* Reading 8 byte adapter check block. */
-	for(i = 0; i < 4; i++)
-		AdapterCheckBlock[i] = SIFREADW(SIFINC);
-
-	if(tms380tr_debug > 3)
-	{
-		printk(KERN_DEBUG "%s: AdapterCheckBlock: ", dev->name);
-		for (i = 0; i < 4; i++)
-			printk("%04X", AdapterCheckBlock[i]);
-		printk("\n");
-	}
-
-	switch(AdapterCheckBlock[0])
-	{
-		case DIO_PARITY:
-			printk(KERN_INFO "%s: DIO parity error\n", dev->name);
-			break;
-
-		case DMA_READ_ABORT:
-			printk(KERN_INFO "%s DMA read operation aborted:\n",
-				dev->name);
-			switch (AdapterCheckBlock[1])
-			{
-				case 0:
-					printk(KERN_INFO "Timeout\n");
-					printk(KERN_INFO "Address: %04X %04X\n",
-						AdapterCheckBlock[2],
-						AdapterCheckBlock[3]);
-					break;
-
-				case 1:
-					printk(KERN_INFO "Parity error\n");
-					printk(KERN_INFO "Address: %04X %04X\n",
-						AdapterCheckBlock[2], 
-						AdapterCheckBlock[3]);
-					break;
-
-				case 2: 
-					printk(KERN_INFO "Bus error\n");
-					printk(KERN_INFO "Address: %04X %04X\n",
-						AdapterCheckBlock[2], 
-						AdapterCheckBlock[3]);
-					break;
-
-				default:
-					printk(KERN_INFO "Unknown error.\n");
-					break;
-			}
-			break;
-
-		case DMA_WRITE_ABORT:
-			printk(KERN_INFO "%s: DMA write operation aborted:\n",
-				dev->name);
-			switch (AdapterCheckBlock[1])
-			{
-				case 0: 
-					printk(KERN_INFO "Timeout\n");
-					printk(KERN_INFO "Address: %04X %04X\n",
-						AdapterCheckBlock[2], 
-						AdapterCheckBlock[3]);
-					break;
-
-				case 1: 
-					printk(KERN_INFO "Parity error\n");
-					printk(KERN_INFO "Address: %04X %04X\n",
-						AdapterCheckBlock[2], 
-						AdapterCheckBlock[3]);
-					break;
-
-				case 2: 
-					printk(KERN_INFO "Bus error\n");
-					printk(KERN_INFO "Address: %04X %04X\n",
-						AdapterCheckBlock[2], 
-						AdapterCheckBlock[3]);
-					break;
-
-				default:
-					printk(KERN_INFO "Unknown error.\n");
-					break;
-			}
-			break;
-
-		case ILLEGAL_OP_CODE:
-			printk(KERN_INFO "%s: Illegal operation code in firmware\n",
-				dev->name);
-			/* Parm[0-3]: adapter internal register R13-R15 */
-			break;
-
-		case PARITY_ERRORS:
-			printk(KERN_INFO "%s: Adapter internal bus parity error\n",
-				dev->name);
-			/* Parm[0-3]: adapter internal register R13-R15 */
-			break;
-
-		case RAM_DATA_ERROR:
-			printk(KERN_INFO "%s: RAM data error\n", dev->name);
-			/* Parm[0-1]: MSW/LSW address of RAM location. */
-			break;
-
-		case RAM_PARITY_ERROR:
-			printk(KERN_INFO "%s: RAM parity error\n", dev->name);
-			/* Parm[0-1]: MSW/LSW address of RAM location. */
-			break;
-
-		case RING_UNDERRUN:
-			printk(KERN_INFO "%s: Internal DMA underrun detected\n",
-				dev->name);
-			break;
-
-		case INVALID_IRQ:
-			printk(KERN_INFO "%s: Unrecognized interrupt detected\n",
-				dev->name);
-			/* Parm[0-3]: adapter internal register R13-R15 */
-			break;
-
-		case INVALID_ERROR_IRQ:
-			printk(KERN_INFO "%s: Unrecognized error interrupt detected\n",
-				dev->name);
-			/* Parm[0-3]: adapter internal register R13-R15 */
-			break;
-
-		case INVALID_XOP:
-			printk(KERN_INFO "%s: Unrecognized XOP request detected\n",
-				dev->name);
-			/* Parm[0-3]: adapter internal register R13-R15 */
-			break;
-
-		default:
-			printk(KERN_INFO "%s: Unknown status", dev->name);
-			break;
-	}
-
-	if(tms380tr_chipset_init(dev) == 1)
-	{
-		/* Restart of firmware successful */
-		tp->AdapterOpenFlag = 1;
-	}
-}
-
-/*
- * Internal adapter pointer to RAM data are copied from adapter into
- * host system.
- */
-static int tms380tr_read_ptr(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	unsigned short adapterram;
-
-	tms380tr_read_ram(dev, (unsigned char *)&tp->intptrs.BurnedInAddrPtr,
-			ADAPTER_INT_PTRS, 16);
-	tms380tr_read_ram(dev, (unsigned char *)&adapterram,
-			cpu_to_be16((unsigned short)tp->intptrs.AdapterRAMPtr), 2);
-	return be16_to_cpu(adapterram); 
-}
-
-/*
- * Reads a number of bytes from adapter to system memory.
- */
-static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
-				unsigned short Address, int Length)
-{
-	int i;
-	unsigned short old_sifadx, old_sifadr, InWord;
-
-	/* Save the current values */
-	old_sifadx = SIFREADW(SIFADX);
-	old_sifadr = SIFREADW(SIFADR);
-
-	/* Page number of adapter memory */
-	SIFWRITEW(0x0001, SIFADX);
-	/* Address offset in adapter RAM */
-        SIFWRITEW(Address, SIFADR);
-
-	/* Copy len byte from adapter memory to system data area. */
-	i = 0;
-	for(;;)
-	{
-		InWord = SIFREADW(SIFINC);
-
-		*(Data + i) = HIBYTE(InWord);	/* Write first byte */
-		if(++i == Length)		/* All is done break */
-			break;
-
-		*(Data + i) = LOBYTE(InWord);	/* Write second byte */
-		if (++i == Length)		/* All is done break */
-			break;
-	}
-
-	/* Restore original values */
-	SIFWRITEW(old_sifadx, SIFADX);
-	SIFWRITEW(old_sifadr, SIFADR);
-}
-
-/*
- * Cancel all queued packets in the transmission queue.
- */
-static void tms380tr_cancel_tx_queue(struct net_local* tp)
-{
-	TPL *tpl;
-
-	/*
-	 * NOTE: There must not be an active TRANSMIT command pending, when
-	 * this function is called.
-	 */
-	if(tp->TransmitCommandActive)
-		return;
-
-	for(;;)
-	{
-		tpl = tp->TplBusy;
-		if(!tpl->BusyFlag)
-			break;
-		/* "Remove" TPL from busy list. */
-		tp->TplBusy = tpl->NextTPLPtr;
-		tms380tr_write_tpl_status(tpl, 0);	/* Clear VALID bit */
-		tpl->BusyFlag = 0;		/* "free" TPL */
-
-		printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
-		if (tpl->DMABuff)
-			dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
-		dev_kfree_skb_any(tpl->Skb);
-	}
-}
-
-/*
- * This function is called whenever a transmit interrupt is generated by the
- * adapter. For a command complete interrupt, it is checked if we have to
- * issue a new transmit command or not.
- */
-static void tms380tr_tx_status_irq(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	unsigned char HighByte, HighAc, LowAc;
-	TPL *tpl;
-
-	/* NOTE: At this point the SSB from TRANSMIT STATUS is no longer
-	 * available, because the CLEAR SSB command has already been issued.
-	 *
-	 * Process all complete transmissions.
-	 */
-
-	for(;;)
-	{
-		tpl = tp->TplBusy;
-		if(!tpl->BusyFlag || (tpl->Status
-			& (TX_VALID | TX_FRAME_COMPLETE))
-			!= TX_FRAME_COMPLETE)
-		{
-			break;
-		}
-
-		/* "Remove" TPL from busy list. */
-		tp->TplBusy = tpl->NextTPLPtr ;
-
-		/* Check the transmit status field only for directed frames*/
-		if(DIRECTED_FRAME(tpl) && (tpl->Status & TX_ERROR) == 0)
-		{
-			HighByte = GET_TRANSMIT_STATUS_HIGH_BYTE(tpl->Status);
-			HighAc   = GET_FRAME_STATUS_HIGH_AC(HighByte);
-			LowAc    = GET_FRAME_STATUS_LOW_AC(HighByte);
-
-			if((HighAc != LowAc) || (HighAc == AC_NOT_RECOGNIZED))
-			{
-				printk(KERN_DEBUG "%s: (DA=%08lX not recognized)\n",
-					dev->name,
-					*(unsigned long *)&tpl->MData[2+2]);
-			}
-			else
-			{
-				if(tms380tr_debug > 3)
-					printk(KERN_DEBUG "%s: Directed frame tx'd\n", 
-						dev->name);
-			}
-		}
-		else
-		{
-			if(!DIRECTED_FRAME(tpl))
-			{
-				if(tms380tr_debug > 3)
-					printk(KERN_DEBUG "%s: Broadcast frame tx'd\n",
-						dev->name);
-			}
-		}
-
-		tp->MacStat.tx_packets++;
-		if (tpl->DMABuff)
-			dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
-		dev_kfree_skb_irq(tpl->Skb);
-		tpl->BusyFlag = 0;	/* "free" TPL */
-	}
-
-	if(!tp->TplFree->NextTPLPtr->BusyFlag)
-		netif_wake_queue(dev);
-}
-
-/*
- * Called if a frame receive interrupt is generated by the adapter.
- * Check if the frame is valid and indicate it to system.
- */
-static void tms380tr_rcv_status_irq(struct net_device *dev)
-{
-	struct net_local *tp = netdev_priv(dev);
-	unsigned char *ReceiveDataPtr;
-	struct sk_buff *skb;
-	unsigned int Length, Length2;
-	RPL *rpl;
-	RPL *SaveHead;
-	dma_addr_t dmabuf;
-
-	/* NOTE: At this point the SSB from RECEIVE STATUS is no longer
-	 * available, because the CLEAR SSB command has already been issued.
-	 *
-	 * Process all complete receives.
-	 */
-
-	for(;;)
-	{
-		rpl = tp->RplHead;
-		if(rpl->Status & RX_VALID)
-			break;		/* RPL still in use by adapter */
-
-		/* Forward RPLHead pointer to next list. */
-		SaveHead = tp->RplHead;
-		tp->RplHead = rpl->NextRPLPtr;
-
-		/* Get the frame size (Byte swap for Intel).
-		 * Do this early (see workaround comment below)
-		 */
-		Length = be16_to_cpu(rpl->FrameSize);
-
-		/* Check if the Frame_Start, Frame_End and
-		 * Frame_Complete bits are set.
-		 */
-		if((rpl->Status & VALID_SINGLE_BUFFER_FRAME)
-			== VALID_SINGLE_BUFFER_FRAME)
-		{
-			ReceiveDataPtr = rpl->MData;
-
-			/* Workaround for delayed write of FrameSize on ISA
-			 * (FrameSize is false but valid-bit is reset)
-			 * Frame size is set to zero when the RPL is freed.
-			 * Length2 is there because there have also been
-			 * cases where the FrameSize was partially written
-			 */
-			Length2 = be16_to_cpu(rpl->FrameSize);
-
-			if(Length == 0 || Length != Length2)
-			{
-				tp->RplHead = SaveHead;
-				break;	/* Return to tms380tr_interrupt */
-			}
-			tms380tr_update_rcv_stats(tp,ReceiveDataPtr,Length);
-			  
-			if(tms380tr_debug > 3)
-				printk(KERN_DEBUG "%s: Packet Length %04X (%d)\n",
-					dev->name, Length, Length);
-			  
-			/* Indicate the received frame to system the
-			 * adapter does the Source-Routing padding for 
-			 * us. See: OpenOptions in tms380tr_init_opb()
-			 */
-			skb = rpl->Skb;
-			if(rpl->SkbStat == SKB_UNAVAILABLE)
-			{
-				/* Try again to allocate skb */
-				skb = dev_alloc_skb(tp->MaxPacketSize);
-				if(skb == NULL)
-				{
-					/* Update Stats ?? */
-				}
-				else
-				{
-					skb_put(skb, tp->MaxPacketSize);
-					rpl->SkbStat 	= SKB_DATA_COPY;
-					ReceiveDataPtr 	= rpl->MData;
-				}
-			}
-
-			if(skb && (rpl->SkbStat == SKB_DATA_COPY ||
-				   rpl->SkbStat == SKB_DMA_DIRECT))
-			{
-				if(rpl->SkbStat == SKB_DATA_COPY)
-					skb_copy_to_linear_data(skb, ReceiveDataPtr,
-						       Length);
-
-				/* Deliver frame to system */
-				rpl->Skb = NULL;
-				skb_trim(skb,Length);
-				skb->protocol = tr_type_trans(skb,dev);
-				netif_rx(skb);
-			}
-		}
-		else	/* Invalid frame */
-		{
-			if(rpl->Skb != NULL)
-				dev_kfree_skb_irq(rpl->Skb);
-
-			/* Skip list. */
-			if(rpl->Status & RX_START_FRAME)
-				/* Frame start bit is set -> overflow. */
-				tp->MacStat.rx_errors++;
-		}
-		if (rpl->DMABuff)
-			dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
-		rpl->DMABuff = 0;
-
-		/* Allocate new skb for rpl */
-		rpl->Skb = dev_alloc_skb(tp->MaxPacketSize);
-		/* skb == NULL ? then use local buffer */
-		if(rpl->Skb == NULL)
-		{
-			rpl->SkbStat = SKB_UNAVAILABLE;
-			rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
-			rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
-		}
-		else	/* skb != NULL */
-		{
-			rpl->Skb->dev = dev;
-			skb_put(rpl->Skb, tp->MaxPacketSize);
-
-			/* Data unreachable for DMA ? then use local buffer */
-			dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
-			if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
-			{
-				rpl->SkbStat = SKB_DATA_COPY;
-				rpl->FragList[0].DataAddr = htonl(((char *)tp->LocalRxBuffers[rpl->RPLIndex] - (char *)tp) + tp->dmabuffer);
-				rpl->MData = tp->LocalRxBuffers[rpl->RPLIndex];
-			}
-			else
-			{
-				/* DMA directly in skb->data */
-				rpl->SkbStat = SKB_DMA_DIRECT;
-				rpl->FragList[0].DataAddr = htonl(dmabuf);
-				rpl->MData = rpl->Skb->data;
-				rpl->DMABuff = dmabuf;
-			}
-		}
-
-		rpl->FragList[0].DataCount = cpu_to_be16((unsigned short)tp->MaxPacketSize);
-		rpl->FrameSize = 0;
-
-		/* Pass the last RPL back to the adapter */
-		tp->RplTail->FrameSize = 0;
-
-		/* Reset the CSTAT field in the list. */
-		tms380tr_write_rpl_status(tp->RplTail, RX_VALID | RX_FRAME_IRQ);
-
-		/* Current RPL becomes last one in list. */
-		tp->RplTail = tp->RplTail->NextRPLPtr;
-
-		/* Inform adapter about RPL valid. */
-		tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
-	}
-}
-
-/*
- * This function should be used whenever the status of any RPL must be
- * modified by the driver, because the compiler may otherwise change the
- * order of instructions such that writing the RPL status may be executed
- * at an undesirable time. When this function is used, the status is
- * always written when the function is called.
- */
-static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
-{
-	rpl->Status = Status;
-}
-
-/*
- * The function updates the statistic counters in mac->MacStat.
- * It differtiates between directed and broadcast/multicast ( ==functional)
- * frames.
- */
-static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPtr[],
-					unsigned int Length)
-{
-	tp->MacStat.rx_packets++;
-	tp->MacStat.rx_bytes += Length;
-	
-	/* Test functional bit */
-	if(DataPtr[2] & GROUP_BIT)
-		tp->MacStat.multicast++;
-}
-
-static int tms380tr_set_mac_address(struct net_device *dev, void *addr)
-{
-	struct net_local *tp = netdev_priv(dev);
-	struct sockaddr *saddr = addr;
-	
-	if (tp->AdapterOpenFlag || tp->AdapterVirtOpenFlag) {
-		printk(KERN_WARNING "%s: Cannot set MAC/LAA address while card is open\n", dev->name);
-		return -EIO;
-	}
-	memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
-	return 0;
-}
-
-#if TMS380TR_DEBUG > 0
-/*
- * Dump Packet (data)
- */
-static void tms380tr_dump(unsigned char *Data, int length)
-{
-	int i, j;
-
-	for (i = 0, j = 0; i < length / 8; i++, j += 8)
-	{
-		printk(KERN_DEBUG "%02x %02x %02x %02x %02x %02x %02x %02x\n",
-		       Data[j+0],Data[j+1],Data[j+2],Data[j+3],
-		       Data[j+4],Data[j+5],Data[j+6],Data[j+7]);
-	}
-}
-#endif
-
-void tmsdev_term(struct net_device *dev)
-{
-	struct net_local *tp;
-
-	tp = netdev_priv(dev);
-	dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
-		DMA_BIDIRECTIONAL);
-}
-
-const struct net_device_ops tms380tr_netdev_ops = {
-	.ndo_open		= tms380tr_open,
-	.ndo_stop		= tms380tr_close,
-	.ndo_start_xmit		= tms380tr_send_packet,
-	.ndo_tx_timeout		= tms380tr_timeout,
-	.ndo_get_stats		= tms380tr_get_stats,
-	.ndo_set_rx_mode	= tms380tr_set_multicast_list,
-	.ndo_set_mac_address	= tms380tr_set_mac_address,
-};
-EXPORT_SYMBOL(tms380tr_netdev_ops);
-
-int tmsdev_init(struct net_device *dev, struct device *pdev)
-{
-	struct net_local *tms_local;
-
-	memset(netdev_priv(dev), 0, sizeof(struct net_local));
-	tms_local = netdev_priv(dev);
-	init_waitqueue_head(&tms_local->wait_for_tok_int);
-	if (pdev->dma_mask)
-		tms_local->dmalimit = *pdev->dma_mask;
-	else
-		return -ENOMEM;
-	tms_local->pdev = pdev;
-	tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local,
-	    sizeof(struct net_local), DMA_BIDIRECTIONAL);
-	if (tms_local->dmabuffer + sizeof(struct net_local) > 
-			tms_local->dmalimit)
-	{
-		printk(KERN_INFO "%s: Memory not accessible for DMA\n",
-			dev->name);
-		tmsdev_term(dev);
-		return -ENOMEM;
-	}
-	
-	dev->netdev_ops		= &tms380tr_netdev_ops;
-	dev->watchdog_timeo	= HZ;
-
-	return 0;
-}
-
-EXPORT_SYMBOL(tms380tr_open);
-EXPORT_SYMBOL(tms380tr_close);
-EXPORT_SYMBOL(tms380tr_interrupt);
-EXPORT_SYMBOL(tmsdev_init);
-EXPORT_SYMBOL(tmsdev_term);
-EXPORT_SYMBOL(tms380tr_wait);
-
-#ifdef MODULE
-
-static struct module *TMS380_module = NULL;
-
-int init_module(void)
-{
-	printk(KERN_DEBUG "%s", version);
-	
-	TMS380_module = &__this_module;
-	return 0;
-}
-
-void cleanup_module(void)
-{
-	TMS380_module = NULL;
-}
-#endif
-
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
deleted file mode 100644
index e5a617c..0000000
--- a/drivers/net/tokenring/tms380tr.h
+++ /dev/null
@@ -1,1141 +0,0 @@
-/* 
- * tms380tr.h: TI TMS380 Token Ring driver for Linux
- *
- * Authors:
- * - Christoph Goos <cgoos@syskonnect.de>
- * - Adam Fritzler
- */
-
-#ifndef __LINUX_TMS380TR_H
-#define __LINUX_TMS380TR_H
-
-#ifdef __KERNEL__
-
-#include <linux/interrupt.h>
-
-/* module prototypes */
-extern const struct net_device_ops tms380tr_netdev_ops;
-int tms380tr_open(struct net_device *dev);
-int tms380tr_close(struct net_device *dev);
-irqreturn_t tms380tr_interrupt(int irq, void *dev_id);
-int tmsdev_init(struct net_device *dev, struct device *pdev);
-void tmsdev_term(struct net_device *dev);
-void tms380tr_wait(unsigned long time);
-
-#define TMS380TR_MAX_ADAPTERS 7
-
-#define SEND_TIMEOUT 10*HZ
-
-#define TR_RCF_LONGEST_FRAME_MASK 0x0070
-#define TR_RCF_FRAME4K 0x0030
-
-/*------------------------------------------------------------------*/
-/*  Bit order for adapter communication with DMA		    */
-/*  --------------------------------------------------------------  */
-/*  Bit  8 | 9| 10| 11|| 12| 13| 14| 15|| 0| 1| 2| 3|| 4| 5| 6| 7|  */
-/*  --------------------------------------------------------------  */
-/*  The bytes in a word must be byte swapped. Also, if a double	    */
-/*  word is used for storage, then the words, as well as the bytes, */
-/*  must be swapped. 						    */
-/*  Bit order for adapter communication with DIO 		    */
-/*  --------------------------------------------------------------  */
-/*  Bit  0 | 1| 2| 3|| 4| 5| 6| 7|| 8| 9| 10| 11|| 12| 13| 14| 15|  */
-/*  --------------------------------------------------------------  */
-/*------------------------------------------------------------------*/
-
-/* Swap words of a long.                        */
-#define SWAPW(x) (((x) << 16) | ((x) >> 16))
-
-/* Get the low byte of a word.                      */
-#define LOBYTE(w)       ((unsigned char)(w))
-
-/* Get the high byte of a word.                     */
-#define HIBYTE(w)       ((unsigned char)((unsigned short)(w) >> 8))
-
-/* Get the low word of a long.                      */
-#define LOWORD(l)       ((unsigned short)(l))
-
-/* Get the high word of a long.                     */
-#define HIWORD(l)       ((unsigned short)((unsigned long)(l) >> 16))
-
-
-
-/* Token ring adapter I/O addresses for normal mode. */
-
-/*
- * The SIF registers.  Common to all adapters.
- */
-/* Basic SIF (SRSX = 0) */
-#define SIFDAT      		0x00	/* SIF/DMA data. */
-#define SIFINC      		0x02  	/* IO Word data with auto increment. */
-#define SIFINH      		0x03  	/* IO Byte data with auto increment. */
-#define SIFADR      		0x04  	/* SIF/DMA Address. */
-#define SIFCMD      		0x06  	/* SIF Command. */
-#define SIFSTS      		0x06  	/* SIF Status. */
-
-/* "Extended" SIF (SRSX = 1) */
-#define SIFACL      		0x08  	/* SIF Adapter Control Register. */
-#define SIFADD      		0x0a 	/* SIF/DMA Address. -- 0x0a */
-#define SIFADX      		0x0c     /* 0x0c */
-#define DMALEN      		0x0e 	/* SIF DMA length. -- 0x0e */
-
-/*
- * POS Registers.  Only for ISA Adapters.
- */
-#define POSREG      		0x10 	/* Adapter Program Option Select (POS)
-			 		 * Register: base IO address + 16 byte.
-			 		 */
-#define POSREG_2    		24L 	/* only for TR4/16+ adapter
-			 		 * base IO address + 24 byte. -- 0x18
-			 		 */
-
-/* SIFCMD command codes (high-low) */
-#define CMD_INTERRUPT_ADAPTER   0x8000  /* Cause internal adapter interrupt */
-#define CMD_ADAPTER_RESET   	0x4000  /* Hardware reset of adapter */
-#define CMD_SSB_CLEAR		0x2000  /* Acknowledge to adapter to
-					 * system interrupts.
-					 */
-#define CMD_EXECUTE		0x1000	/* Execute SCB command */
-#define CMD_SCB_REQUEST		0x0800  /* Request adapter to interrupt
-					 * system when SCB is available for
-					 * another command.
-					 */
-#define CMD_RX_CONTINUE		0x0400  /* Continue receive after odd pointer
-					 * stop. (odd pointer receive method)
-					 */
-#define CMD_RX_VALID		0x0200  /* Now actual RPL is valid. */
-#define CMD_TX_VALID		0x0100  /* Now actual TPL is valid. (valid
-					 * bit receive/transmit method)
-					 */
-#define CMD_SYSTEM_IRQ		0x0080  /* Adapter-to-attached-system
-					 * interrupt is reset.
-					 */
-#define CMD_CLEAR_SYSTEM_IRQ	0x0080	/* Clear SYSTEM_INTERRUPT bit.
-					 * (write: 1=ignore, 0=reset)
-					 */
-#define EXEC_SOFT_RESET		0xFF00  /* adapter soft reset. (restart
-					 * adapter after hardware reset)
-					 */
-
-
-/* ACL commands (high-low) */
-#define ACL_SWHLDA		0x0800  /* Software hold acknowledge. */
-#define ACL_SWDDIR		0x0400  /* Data transfer direction. */
-#define ACL_SWHRQ		0x0200  /* Pseudo DMA operation. */
-#define ACL_PSDMAEN		0x0100  /* Enable pseudo system DMA. */
-#define ACL_ARESET		0x0080  /* Adapter hardware reset command.
-					 * (held in reset condition as
-					 * long as bit is set)
-					 */
-#define ACL_CPHALT		0x0040  /* Communication processor halt.
-					 * (can only be set while ACL_ARESET
-					 * bit is set; prevents adapter
-					 * processor from executing code while
-					 * downloading firmware)
-					 */
-#define ACL_BOOT		0x0020
-#define ACL_SINTEN		0x0008  /* System interrupt enable/disable
-					 * (1/0): can be written if ACL_ARESET
-					 * is zero.
-					 */
-#define ACL_PEN                 0x0004
-
-#define ACL_NSELOUT0            0x0002 
-#define ACL_NSELOUT1            0x0001	/* NSELOUTx have a card-specific
-					 * meaning for setting ring speed.
-					 */
-
-#define PS_DMA_MASK		(ACL_SWHRQ | ACL_PSDMAEN)
-
-
-/* SIFSTS register return codes (high-low) */
-#define STS_SYSTEM_IRQ		0x0080	/* Adapter-to-attached-system
-					 * interrupt is valid.
-					 */
-#define STS_INITIALIZE		0x0040  /* INITIALIZE status. (ready to
-					 * initialize)
-					 */
-#define STS_TEST		0x0020  /* TEST status. (BUD not completed) */
-#define STS_ERROR		0x0010  /* ERROR status. (unrecoverable
-					 * HW error occurred)
-					 */
-#define STS_MASK		0x00F0  /* Mask interesting status bits. */
-#define STS_ERROR_MASK		0x000F  /* Get Error Code by masking the
-					 * interrupt code bits.
-					 */
-#define ADAPTER_INT_PTRS	0x0A00  /* Address offset of adapter internal
-					 * pointers 01:0a00 (high-low) have to
-					 * be read after init and before open.
-					 */
-
-
-/* Interrupt Codes (only MAC IRQs) */
-#define STS_IRQ_ADAPTER_CHECK	0x0000	/* unrecoverable hardware or
-					 * software error.
-					 */ 
-#define STS_IRQ_RING_STATUS	0x0004  /* SSB is updated with ring status. */
-#define STS_IRQ_LLC_STATUS	0x0005	/* Not used in MAC-only microcode */
-#define STS_IRQ_SCB_CLEAR	0x0006	/* SCB clear, following an
-					 * SCB_REQUEST IRQ.
-					 */
-#define STS_IRQ_TIMER		0x0007	/* Not normally used in MAC ucode */
-#define STS_IRQ_COMMAND_STATUS	0x0008	/* SSB is updated with command 
-					 * status.
-					 */ 
-#define STS_IRQ_RECEIVE_STATUS	0x000A	/* SSB is updated with receive
-					 * status.
-					 */
-#define STS_IRQ_TRANSMIT_STATUS	0x000C	/* SSB is updated with transmit
-                                         * status
-					 */
-#define STS_IRQ_RECEIVE_PENDING	0x000E	/* Not used in MAC-only microcode */
-#define STS_IRQ_MASK		0x000F	/* = STS_ERROR_MASK. */
-
-
-/* TRANSMIT_STATUS completion code: (SSB.Parm[0]) */
-#define COMMAND_COMPLETE	0x0080	/* TRANSMIT command completed
-                                         * (avoid this!) issue another transmit
-					 * to send additional frames.
-					 */
-#define FRAME_COMPLETE		0x0040	/* Frame has been transmitted;
-					 * INTERRUPT_FRAME bit was set in the
-					 * CSTAT request; indication of possibly
-					 * more than one frame transmissions!
-					 * SSB.Parm[0-1]: 32 bit pointer to
-					 * TPL of last frame.
-					 */
-#define LIST_ERROR		0x0020	/* Error in one of the TPLs that
-					 * compose the frame; TRANSMIT
-					 * terminated; Parm[1-2]: 32bit pointer
-					 * to TPL which starts the error
-					 * frame; error details in bits 8-13.
-					 * (14?)
-					 */
-#define FRAME_SIZE_ERROR	0x8000	/* FRAME_SIZE does not equal the sum of
-					 * the valid DATA_COUNT fields;
-					 * FRAME_SIZE less than header plus
-					 * information field. (15 bytes +
-					 * routing field) Or if FRAME_SIZE
-					 * was specified as zero in one list.
-					 */
-#define TX_THRESHOLD		0x4000	/* FRAME_SIZE greater than (BUFFER_SIZE
-					 * - 9) * TX_BUF_MAX.
-					 */
-#define ODD_ADDRESS		0x2000	/* Odd forward pointer value is
-					 * read on a list without END_FRAME
-					 * indication.
-					 */
-#define FRAME_ERROR		0x1000	/* START_FRAME bit (not) anticipated,
-					 * but (not) set.
-					 */
-#define ACCESS_PRIORITY_ERROR	0x0800	/* Access priority requested has not
-					 * been allowed.
-					 */
-#define UNENABLED_MAC_FRAME	0x0400	/* MAC frame has source class of zero
-					 * or MAC frame PCF ATTN field is
-					 * greater than one.
-					 */
-#define ILLEGAL_FRAME_FORMAT	0x0200	/* Bit 0 or FC field was set to one. */
-
-
-/*
- * Since we need to support some functions even if the adapter is in a
- * CLOSED state, we have a (pseudo-) command queue which holds commands
- * that are outstandig to be executed.
- *
- * Each time a command completes, an interrupt occurs and the next
- * command is executed. The command queue is actually a simple word with 
- * a bit for each outstandig command. Therefore the commands will not be
- * executed in the order they have been queued.
- *
- * The following defines the command code bits and the command queue:
- */
-#define OC_OPEN			0x0001	/* OPEN command */
-#define OC_TRANSMIT		0x0002	/* TRANSMIT command */
-#define OC_TRANSMIT_HALT	0x0004	/* TRANSMIT_HALT command */
-#define OC_RECEIVE		0x0008	/* RECEIVE command */
-#define OC_CLOSE		0x0010	/* CLOSE command */
-#define OC_SET_GROUP_ADDR	0x0020	/* SET_GROUP_ADDR command */
-#define OC_SET_FUNCT_ADDR	0x0040	/* SET_FUNCT_ADDR command */
-#define OC_READ_ERROR_LOG	0x0080	/* READ_ERROR_LOG command */
-#define OC_READ_ADAPTER		0x0100	/* READ_ADAPTER command */
-#define OC_MODIFY_OPEN_PARMS	0x0400	/* MODIFY_OPEN_PARMS command */
-#define OC_RESTORE_OPEN_PARMS	0x0800	/* RESTORE_OPEN_PARMS command */
-#define OC_SET_FIRST_16_GROUP	0x1000	/* SET_FIRST_16_GROUP command */
-#define OC_SET_BRIDGE_PARMS	0x2000	/* SET_BRIDGE_PARMS command */
-#define OC_CONFIG_BRIDGE_PARMS	0x4000	/* CONFIG_BRIDGE_PARMS command */
-
-#define OPEN			0x0300	/* C: open command. S: completion. */
-#define TRANSMIT		0x0400	/* C: transmit command. S: completion
-					 * status. (reject: COMMAND_REJECT if
-					 * adapter not opened, TRANSMIT already
-					 * issued or address passed in the SCB
-					 * not word aligned)
-					 */
-#define TRANSMIT_HALT		0x0500	/* C: interrupt TX TPL chain; if no
-					 * TRANSMIT command issued, the command
-					 * is ignored (completion with TRANSMIT
-					 * status (0x0400)!)
-					 */
-#define RECEIVE			0x0600	/* C: receive command. S: completion
-					 * status. (reject: COMMAND_REJECT if
-					 * adapter not opened, RECEIVE already
-					 * issued or address passed in the SCB 
-					 * not word aligned)
-					 */
-#define CLOSE			0x0700	/* C: close adapter. S: completion.
-					 * (COMMAND_REJECT if adapter not open)
-					 */
-#define SET_GROUP_ADDR		0x0800	/* C: alter adapter group address after
-					 * OPEN. S: completion. (COMMAND_REJECT
-					 * if adapter not open)
-					 */
-#define SET_FUNCT_ADDR		0x0900	/* C: alter adapter functional address
-					 * after OPEN. S: completion.
-					 * (COMMAND_REJECT if adapter not open)
-					 */
-#define READ_ERROR_LOG		0x0A00	/* C: read adapter error counters.
-					 * S: completion. (command ignored
-					 * if adapter not open!)
-					 */
-#define READ_ADAPTER		0x0B00	/* C: read data from adapter memory.
-					 * (important: after init and before
-					 * open!) S: completion. (ADAPTER_CHECK
-					 * interrupt if undefined storage area
-					 * read)
-					 */
-#define MODIFY_OPEN_PARMS	0x0D00	/* C: modify some adapter operational
-					 * parameters. (bit correspondend to
-					 * WRAP_INTERFACE is ignored)
-					 * S: completion. (reject: 
-					 * COMMAND_REJECT)
-					 */
-#define RESTORE_OPEN_PARMS	0x0E00	/* C: modify some adapter operational
-					 * parameters. (bit correspondend
-					 * to WRAP_INTERFACE is ignored)
-					 * S: completion. (reject:
-					 * COMMAND_REJECT)
-					 */
-#define SET_FIRST_16_GROUP	0x0F00	/* C: alter the first two bytes in
-					 * adapter group address.
-					 * S: completion. (reject:
-					 * COMMAND_REJECT)
-					 */
-#define SET_BRIDGE_PARMS	0x1000	/* C: values and conditions for the
-					 * adapter hardware to use when frames
-					 * are copied for forwarding.
-					 * S: completion. (reject:
-					 * COMMAND_REJECT)
-					 */
-#define CONFIG_BRIDGE_PARMS	0x1100	/* C: ..
-					 * S: completion. (reject:
-					 * COMMAND_REJECT)
-					 */
-
-#define SPEED_4			4
-#define SPEED_16		16	/* Default transmission speed  */
-
-
-/* Initialization Parameter Block (IPB); word alignment necessary! */
-#define BURST_SIZE	0x0018	/* Default burst size */
-#define BURST_MODE	0x9F00	/* Burst mode enable */
-#define DMA_RETRIES	0x0505	/* Magic DMA retry number... */
-
-#define CYCLE_TIME	3	/* Default AT-bus cycle time: 500 ns
-				 * (later adapter version: fix  cycle time!)
-				 */
-#define LINE_SPEED_BIT	0x80
-
-/* Macro definition for the wait function. */
-#define ONE_SECOND_TICKS	1000000
-#define HALF_SECOND		(ONE_SECOND_TICKS / 2)
-#define ONE_SECOND		(ONE_SECOND_TICKS)
-#define TWO_SECONDS		(ONE_SECOND_TICKS * 2)
-#define THREE_SECONDS		(ONE_SECOND_TICKS * 3)
-#define FOUR_SECONDS		(ONE_SECOND_TICKS * 4)
-#define FIVE_SECONDS		(ONE_SECOND_TICKS * 5)
-
-#define BUFFER_SIZE 		2048	/* Buffers on Adapter */
-
-#pragma pack(1)
-typedef struct {
-	unsigned short Init_Options;	/* Initialize with burst mode;
-					 * LLC disabled. (MAC only)
-					 */
-
-	/* Interrupt vectors the adapter places on attached system bus. */
-	u_int8_t  CMD_Status_IV;    /* Interrupt vector: command status. */
-	u_int8_t  TX_IV;	    /* Interrupt vector: transmit. */
-	u_int8_t  RX_IV;	    /* Interrupt vector: receive. */
-	u_int8_t  Ring_Status_IV;   /* Interrupt vector: ring status. */
-	u_int8_t  SCB_Clear_IV;	    /* Interrupt vector: SCB clear. */
-	u_int8_t  Adapter_CHK_IV;   /* Interrupt vector: adapter check. */
-
-	u_int16_t RX_Burst_Size;    /* Max. number of transfer cycles. */
-	u_int16_t TX_Burst_Size;    /* During DMA burst; even value! */
-	u_int16_t DMA_Abort_Thrhld; /* Number of DMA retries. */
-
-	u_int32_t SCB_Addr;   /* SCB address: even, word aligned, high-low */
-	u_int32_t SSB_Addr;   /* SSB address: even, word aligned, high-low */
-} IPB, *IPB_Ptr;
-#pragma pack()
-
-/*
- * OPEN Command Parameter List (OCPL) (can be reused, if the adapter has to
- * be reopened)
- */
-#define BUFFER_SIZE	2048		/* Buffers on Adapter. */
-#define TPL_SIZE	8+6*TX_FRAG_NUM /* Depending on fragments per TPL. */
-#define RPL_SIZE	14		/* (with TI firmware v2.26 handling
-					 * up to nine fragments possible)
-					 */
-#define TX_BUF_MIN	20		/* ??? (Stephan: calculation with */
-#define TX_BUF_MAX	40		/* BUFFER_SIZE and MAX_FRAME_SIZE) ??? 
-					 */
-#define DISABLE_EARLY_TOKEN_RELEASE 	0x1000
-
-/* OPEN Options (high-low) */
-#define WRAP_INTERFACE		0x0080	/* Inserting omitted for test
-					 * purposes; transmit data appears
-					 * as receive data. (useful for
-					 * testing; change: CLOSE necessary)
-					 */
-#define DISABLE_HARD_ERROR	0x0040	/* On HARD_ERROR & TRANSMIT_BEACON
-					 * no RING.STATUS interrupt.
-					 */
-#define DISABLE_SOFT_ERROR	0x0020	/* On SOFT_ERROR, no RING.STATUS
-					 * interrupt.
-					 */
-#define PASS_ADAPTER_MAC_FRAMES	0x0010	/* Passing unsupported MAC frames
-					 * to system.
-					 */
-#define PASS_ATTENTION_FRAMES	0x0008	/* All changed attention MAC frames are
-					 * passed to the system.
-					 */
-#define PAD_ROUTING_FIELD	0x0004	/* Routing field is padded to 18
-					 * bytes.
-					 */
-#define FRAME_HOLD		0x0002	/*Adapter waits for entire frame before
-					 * initiating DMA transfer; otherwise:
-					 * DMA transfer initiation if internal
-					 * buffer filled.
-					 */
-#define CONTENDER		0x0001	/* Adapter participates in the monitor
-					 * contention process.
-					 */
-#define PASS_BEACON_MAC_FRAMES	0x8000	/* Adapter passes beacon MAC frames
-					 * to the system.
-					 */
-#define EARLY_TOKEN_RELEASE 	0x1000	/* Only valid in 16 Mbps operation;
-					 * 0 = ETR. (no effect in 4 Mbps
-					 * operation)
-					 */
-#define COPY_ALL_MAC_FRAMES	0x0400	/* All MAC frames are copied to
-					 * the system. (after OPEN: duplicate
-					 * address test (DAT) MAC frame is 
-					 * first received frame copied to the
-					 * system)
-					 */
-#define COPY_ALL_NON_MAC_FRAMES	0x0200	/* All non MAC frames are copied to
-					 * the system.
-					 */
-#define PASS_FIRST_BUF_ONLY	0x0100	/* Passes only first internal buffer
-					 * of each received frame; FrameSize
-					 * of RPLs must contain internal
-					 * BUFFER_SIZE bits for promiscuous mode.
-					 */
-#define ENABLE_FULL_DUPLEX_SELECTION	0x2000 
- 					/* Enable the use of full-duplex
-					 * settings with bits in byte 22 in
-					 * ocpl. (new feature in firmware
-					 * version 3.09)
-					 */
-
-/* Full-duplex settings */
-#define OPEN_FULL_DUPLEX_OFF	0x0000
-#define OPEN_FULL_DUPLEX_ON	0x00c0
-#define OPEN_FULL_DUPLEX_AUTO	0x0080
-
-#define PROD_ID_SIZE	18	/* Length of product ID. */
-
-#define TX_FRAG_NUM	3	 /* Number of fragments used in one TPL. */
-#define TX_MORE_FRAGMENTS 0x8000 /* Bit set in DataCount to indicate more
-				  * fragments following.
-				  */
-
-/* XXX is there some better way to do this? */
-#define ISA_MAX_ADDRESS 	0x00ffffff
-#define PCI_MAX_ADDRESS		0xffffffff
-
-#pragma pack(1)
-typedef struct {
-	u_int16_t OPENOptions;
-	u_int8_t  NodeAddr[6];	/* Adapter node address; use ROM 
-				 * address
-				 */
-	u_int32_t GroupAddr;	/* Multicast: high order
-				 * bytes = 0xC000
-				 */
-	u_int32_t FunctAddr;	/* High order bytes = 0xC000 */
-	__be16 RxListSize;	/* RPL size: 0 (=26), 14, 20 or
-				 * 26 bytes read by the adapter.
-				 * (Depending on the number of 
-				 * fragments/list)
-				 */
-	__be16 TxListSize;	/* TPL size */
-	__be16 BufSize;		/* Is automatically rounded up to the
-				 * nearest nK boundary.
-				 */
-	u_int16_t FullDuplex;
-	u_int16_t Reserved;
-	u_int8_t  TXBufMin;	/* Number of adapter buffers reserved
-				 * for transmission a minimum of 2
-				 * buffers must be allocated.
-				 */
-	u_int8_t  TXBufMax;	/* Maximum number of adapter buffers
-				 * for transmit; a minimum of 2 buffers
-				 * must be available for receive.
-				 * Default: 6
-				 */
-	u_int16_t ProdIDAddr[2];/* Pointer to product ID. */
-} OPB, *OPB_Ptr;
-#pragma pack()
-
-/*
- * SCB: adapter commands enabled by the host system started by writing
- * CMD_INTERRUPT_ADAPTER | CMD_EXECUTE (|SCB_REQUEST) to the SIFCMD IO
- * register. (special case: | CMD_SYSTEM_IRQ for initialization)
- */
-#pragma pack(1)
-typedef struct {
-	u_int16_t CMD;		/* Command code */
-	u_int16_t Parm[2];	/* Pointer to Command Parameter Block */
-} SCB;	/* System Command Block (32 bit physical address; big endian)*/
-#pragma pack()
-
-/*
- * SSB: adapter command return status can be evaluated after COMMAND_STATUS
- * adapter to system interrupt after reading SSB, the availability of the SSB
- * has to be told the adapter by writing CMD_INTERRUPT_ADAPTER | CMD_SSB_CLEAR
- * in the SIFCMD IO register.
- */
-#pragma pack(1)
-typedef struct {
-	u_int16_t STS;		/* Status code */
-	u_int16_t Parm[3];	/* Parameter or pointer to Status Parameter
-				 * Block.
-				 */
-} SSB;	/* System Status Block (big endian - physical address)  */
-#pragma pack()
-
-typedef struct {
-	unsigned short BurnedInAddrPtr;	/* Pointer to adapter burned in
-					 * address. (BIA)
-					 */
-	unsigned short SoftwareLevelPtr;/* Pointer to software level data. */
-	unsigned short AdapterAddrPtr;	/* Pointer to adapter addresses. */
-	unsigned short AdapterParmsPtr;	/* Pointer to adapter parameters. */
-	unsigned short MACBufferPtr;	/* Pointer to MAC buffer. (internal) */
-	unsigned short LLCCountersPtr;	/* Pointer to LLC counters.  */
-	unsigned short SpeedFlagPtr;	/* Pointer to data rate flag.
-					 * (4/16 Mbps)
-					 */
-	unsigned short AdapterRAMPtr;	/* Pointer to adapter RAM found. (KB) */
-} INTPTRS;	/* Adapter internal pointers */
-
-#pragma pack(1)
-typedef struct {
-	u_int8_t  Line_Error;		/* Line error: code violation in
-					 * frame or in a token, or FCS error.
-					 */
-	u_int8_t  Internal_Error;	/* IBM specific. (Reserved_1) */
-	u_int8_t  Burst_Error;
-	u_int8_t  ARI_FCI_Error;	/* ARI/FCI bit zero in AMP or
-					 * SMP MAC frame.
-					 */
-	u_int8_t  AbortDelimeters;	/* IBM specific. (Reserved_2) */
-	u_int8_t  Reserved_3;
-	u_int8_t  Lost_Frame_Error;	/* Receive of end of transmitted
-					 * frame failed.
-					 */
-	u_int8_t  Rx_Congest_Error;	/* Adapter in repeat mode has not
-					 * enough buffer space to copy incoming
-					 * frame.
-					 */
-	u_int8_t  Frame_Copied_Error;	/* ARI bit not zero in frame
-					 * addressed to adapter.
-					 */
-	u_int8_t  Frequency_Error;	/* IBM specific. (Reserved_4) */
-	u_int8_t  Token_Error;		/* (active only in monitor station) */
-	u_int8_t  Reserved_5;
-	u_int8_t  DMA_Bus_Error;	/* DMA bus errors not exceeding the
-					 * abort thresholds.
-					 */
-	u_int8_t  DMA_Parity_Error;	/* DMA parity errors not exceeding
-					 * the abort thresholds.
-					 */
-} ERRORTAB;	/* Adapter error counters */
-#pragma pack()
-
-
-/*--------------------- Send and Receive definitions -------------------*/
-#pragma pack(1)
-typedef struct {
-	__be16 DataCount;	/* Value 0, even and odd values are
-				 * permitted; value is unaltered most
-				 * significant bit set: following
-				 * fragments last fragment: most
-				 * significant bit is not evaluated.
-				 * (???)
-				 */
-	__be32 DataAddr;	/* Pointer to frame data fragment;
-				 * even or odd.
-				 */
-} Fragment;
-#pragma pack()
-
-#define MAX_FRAG_NUMBERS    9	/* Maximal number of fragments possible to use
-				 * in one RPL/TPL. (depending on TI firmware 
-				 * version)
-				 */
-
-/*
- * AC (1), FC (1), Dst (6), Src (6), RIF (18), Data (4472) = 4504
- * The packet size can be one of the follows: 548, 1502, 2084, 4504, 8176,
- * 11439, 17832. Refer to TMS380 Second Generation Token Ring User's Guide
- * Page 2-27.
- */
-#define HEADER_SIZE		(1 + 1 + 6 + 6)
-#define SRC_SIZE		18
-#define MIN_DATA_SIZE		516
-#define DEFAULT_DATA_SIZE	4472
-#define MAX_DATA_SIZE		17800
-
-#define DEFAULT_PACKET_SIZE (HEADER_SIZE + SRC_SIZE + DEFAULT_DATA_SIZE)
-#define MIN_PACKET_SIZE     (HEADER_SIZE + SRC_SIZE + MIN_DATA_SIZE)
-#define MAX_PACKET_SIZE     (HEADER_SIZE + SRC_SIZE + MAX_DATA_SIZE)
-
-/*
- * Macros to deal with the frame status field.
- */
-#define AC_NOT_RECOGNIZED	0x00
-#define GROUP_BIT		0x80
-#define GET_TRANSMIT_STATUS_HIGH_BYTE(Ts) ((unsigned char)((Ts) >> 8))
-#define GET_FRAME_STATUS_HIGH_AC(Fs)	  ((unsigned char)(((Fs) & 0xC0) >> 6))
-#define GET_FRAME_STATUS_LOW_AC(Fs)       ((unsigned char)(((Fs) & 0x0C) >> 2))
-#define DIRECTED_FRAME(Context)           (!((Context)->MData[2] & GROUP_BIT))
-
-
-/*--------------------- Send Functions ---------------------------------*/
-/* define TX_CSTAT _REQUEST (R) and _COMPLETE (C) values (high-low) */
-
-#define TX_VALID		0x0080	/* R: set via TRANSMIT.VALID interrupt.
-					 * C: always reset to zero!
-					 */
-#define TX_FRAME_COMPLETE	0x0040	/* R: must be reset to zero.
-					 * C: set to one.
-					 */
-#define TX_START_FRAME		0x0020  /* R: start of a frame: 1 
-					 * C: unchanged.
-					 */
-#define TX_END_FRAME		0x0010  /* R: end of a frame: 1
-					 * C: unchanged.
-					 */
-#define TX_FRAME_IRQ		0x0008  /* R: request interrupt generation
-					 * after transmission.
-					 * C: unchanged.
-					 */
-#define TX_ERROR		0x0004  /* R: reserved.
-					 * C: set to one if Error occurred.
-					 */
-#define TX_INTERFRAME_WAIT	0x0004
-#define TX_PASS_CRC		0x0002  /* R: set if CRC value is already
-					 * calculated. (valid only in
-					 * FRAME_START TPL)
-					 * C: unchanged.
-					 */
-#define TX_PASS_SRC_ADDR	0x0001  /* R: adapter uses explicit frame
-					 * source address and does not overwrite
-					 * with the adapter node address.
-					 * (valid only in FRAME_START TPL)
-					 *
-					 * C: unchanged.
-					 */
-#define TX_STRIP_FS		0xFF00  /* R: reserved.
-					 * C: if no Transmission Error,
-					 * field contains copy of FS byte after
-					 * stripping of frame.
-					 */
-
-/*
- * Structure of Transmit Parameter Lists (TPLs) (only one frame every TPL,
- * but possibly multiple TPLs for one frame) the length of the TPLs has to be
- * initialized in the OPL. (OPEN parameter list)
- */
-#define TPL_NUM		3	/* Number of Transmit Parameter Lists.
-				 * !! MUST BE >= 3 !!
-				 */
-
-#pragma pack(1)
-typedef struct s_TPL TPL;
-
-struct s_TPL {	/* Transmit Parameter List (align on even word boundaries) */
-	__be32 NextTPLAddr;		/* Pointer to next TPL in chain; if
-					 * pointer is odd: this is the last
-					 * TPL. Pointing to itself can cause
-					 * problems!
-					 */
-	volatile u_int16_t Status;	/* Initialized by the adapter:
-					 * CSTAT_REQUEST important: update least
-					 * significant bit first! Set by the
-					 * adapter: CSTAT_COMPLETE status.
-					 */
-	__be16 FrameSize;		/* Number of bytes to be transmitted
-					 * as a frame including AC/FC,
-					 * Destination, Source, Routing field
-					 * not including CRC, FS, End Delimiter
-					 * (valid only if START_FRAME bit in 
-					 * CSTAT nonzero) must not be zero in
-					 * any list; maximum value: (BUFFER_SIZE
-					 * - 8) * TX_BUF_MAX sum of DataCount
-					 * values in FragmentList must equal
-					 * Frame_Size value in START_FRAME TPL!
-					 * frame data fragment list.
-					 */
-
-	/* TPL/RPL size in OPEN parameter list depending on maximal
-	 * numbers of fragments used in one parameter list.
-	 */
-	Fragment FragList[TX_FRAG_NUM];	/* Maximum: nine frame fragments in one
-					 * TPL actual version of firmware: 9
-					 * fragments possible.
-					 */
-#pragma pack()
-
-	/* Special proprietary data and precalculations */
-
-	TPL *NextTPLPtr;		/* Pointer to next TPL in chain. */
-	unsigned char *MData;
-	struct sk_buff *Skb;
-	unsigned char TPLIndex;
-	volatile unsigned char BusyFlag;/* Flag: TPL busy? */
-	dma_addr_t DMABuff;		/* DMA IO bus address from dma_map */
-};
-
-/* ---------------------Receive Functions-------------------------------*
- * define RECEIVE_CSTAT_REQUEST (R) and RECEIVE_CSTAT_COMPLETE (C) values.
- * (high-low)
- */
-#define RX_VALID		0x0080	/* R: set; tell adapter with
-					 * RECEIVE.VALID interrupt.
-					 * C: reset to zero.
-					 */
-#define RX_FRAME_COMPLETE	0x0040  /* R: must be reset to zero,
-					 * C: set to one.
-					 */
-#define RX_START_FRAME		0x0020  /* R: must be reset to zero.
-					 * C: set to one on the list.
-					 */
-#define RX_END_FRAME		0x0010  /* R: must be reset to zero.
-					 * C: set to one on the list
-					 * that ends the frame.
-					 */
-#define RX_FRAME_IRQ		0x0008  /* R: request interrupt generation
-					 * after receive.
-					 * C: unchanged.
-					 */
-#define RX_INTERFRAME_WAIT	0x0004  /* R: after receiving a frame:
-					 * interrupt and wait for a
-					 * RECEIVE.CONTINUE.
-					 * C: unchanged.
-					 */
-#define RX_PASS_CRC		0x0002  /* R: if set, the adapter includes
-					 * the CRC in data passed. (last four 
-					 * bytes; valid only if FRAME_START is
-					 * set)
-					 * C: set, if CRC is included in
-					 * received data.
-					 */
-#define RX_PASS_SRC_ADDR	0x0001  /* R: adapter uses explicit frame
-					 * source address and does not
-					 * overwrite with the adapter node
-					 * address. (valid only if FRAME_START
-					 * is set)
-					 * C: unchanged.
-					 */
-#define RX_RECEIVE_FS		0xFC00  /* R: reserved; must be reset to zero.
-					 * C: on lists with START_FRAME, field
-					 * contains frame status field from
-					 * received frame; otherwise cleared.
-					 */
-#define RX_ADDR_MATCH		0x0300  /* R: reserved; must be reset to zero.
-					 * C: address match code mask.
-					 */ 
-#define RX_STATUS_MASK		0x00FF  /* Mask for receive status bits. */
-
-#define RX_INTERN_ADDR_MATCH    0x0100  /* C: internally address match. */
-#define RX_EXTERN_ADDR_MATCH    0x0200  /* C: externally matched via
-					 * XMATCH/XFAIL interface.
-					 */
-#define RX_INTEXT_ADDR_MATCH    0x0300  /* C: internally and externally
-					 * matched.
-					 */
-#define RX_READY (RX_VALID | RX_FRAME_IRQ) /* Ready for receive. */
-
-/* Constants for Command Status Interrupt.
- * COMMAND_REJECT status field bit functions (SSB.Parm[0])
- */
-#define ILLEGAL_COMMAND		0x0080	/* Set if an unknown command
-					 * is issued to the adapter
-					 */
-#define ADDRESS_ERROR		0x0040  /* Set if any address field in
-					 * the SCB is odd. (not word aligned)
-					 */
-#define ADAPTER_OPEN		0x0020  /* Command issued illegal with
-					 * open adapter.
-					 */
-#define ADAPTER_CLOSE		0x0010  /* Command issued illegal with
-					 * closed adapter.
-					 */
-#define SAME_COMMAND		0x0008  /* Command issued with same command
-					 * already executing.
-					 */
-
-/* OPEN_COMPLETION values (SSB.Parm[0], MSB) */
-#define NODE_ADDR_ERROR		0x0040  /* Wrong address or BIA read
-					 * zero address.
-					 */
-#define LIST_SIZE_ERROR		0x0020  /* If List_Size value not in 0,
-					 * 14, 20, 26.
-					 */
-#define BUF_SIZE_ERROR		0x0010  /* Not enough available memory for
-					 * two buffers.
-					 */
-#define TX_BUF_COUNT_ERROR	0x0004  /* Remaining receive buffers less than
-					 * two.
-					 */
-#define OPEN_ERROR		0x0002	/* Error during ring insertion; more
-					 * information in bits 8-15.
-					 */
-
-/* Standard return codes */
-#define GOOD_COMPLETION		0x0080  /* =OPEN_SUCCESSFULL */
-#define INVALID_OPEN_OPTION	0x0001  /* OPEN options are not supported by
-					 * the adapter.
-					 */
-
-/* OPEN phases; details of OPEN_ERROR (SSB.Parm[0], LSB)            */
-#define OPEN_PHASES_MASK            0xF000  /* Check only the bits 8-11. */
-#define LOBE_MEDIA_TEST             0x1000
-#define PHYSICAL_INSERTION          0x2000
-#define ADDRESS_VERIFICATION        0x3000
-#define PARTICIPATION_IN_RING_POLL  0x4000
-#define REQUEST_INITIALISATION      0x5000
-#define FULLDUPLEX_CHECK            0x6000
-
-/* OPEN error codes; details of OPEN_ERROR (SSB.Parm[0], LSB) */
-#define OPEN_ERROR_CODES_MASK	0x0F00  /* Check only the bits 12-15. */
-#define OPEN_FUNCTION_FAILURE   0x0100  /* Unable to transmit to itself or
-					 * frames received before insertion.
-					 */
-#define OPEN_SIGNAL_LOSS	0x0200	/* Signal loss condition detected at
-					 * receiver.
-					 */
-#define OPEN_TIMEOUT		0x0500	/* Insertion timer expired before
-					 * logical insertion.
-					 */
-#define OPEN_RING_FAILURE	0x0600	/* Unable to receive own ring purge
-					 * MAC frames.
-					 */
-#define OPEN_RING_BEACONING	0x0700	/* Beacon MAC frame received after
-					 * ring insertion.
-					 */
-#define OPEN_DUPLICATE_NODEADDR	0x0800  /* Other station in ring found
-					 * with the same address.
-					 */
-#define OPEN_REQUEST_INIT	0x0900	/* RPS present but does not respond. */
-#define OPEN_REMOVE_RECEIVED    0x0A00  /* Adapter received a remove adapter
-					 * MAC frame.
-					 */
-#define OPEN_FULLDUPLEX_SET	0x0D00	/* Got this with full duplex on when
-					 * trying to connect to a normal ring.
-					 */
-
-/* SET_BRIDGE_PARMS return codes: */
-#define BRIDGE_INVALID_MAX_LEN  0x4000  /* MAX_ROUTING_FIELD_LENGTH odd,
-					 * less than 6 or > 30.
-					 */
-#define BRIDGE_INVALID_SRC_RING 0x2000  /* SOURCE_RING number zero, too large
-					 * or = TARGET_RING.
-					 */
-#define BRIDGE_INVALID_TRG_RING 0x1000  /* TARGET_RING number zero, too large
-					 * or = SOURCE_RING.
-					 */
-#define BRIDGE_INVALID_BRDGE_NO 0x0800  /* BRIDGE_NUMBER too large. */
-#define BRIDGE_INVALID_OPTIONS  0x0400  /* Invalid bridge options. */
-#define BRIDGE_DIAGS_FAILED     0x0200  /* Diagnostics of TMS380SRA failed. */
-#define BRIDGE_NO_SRA           0x0100  /* The TMS380SRA does not exist in HW
-					 * configuration.
-					 */
-
-/*
- * Bring Up Diagnostics error codes.
- */
-#define BUD_INITIAL_ERROR       0x0
-#define BUD_CHECKSUM_ERROR      0x1
-#define BUD_ADAPTER_RAM_ERROR   0x2
-#define BUD_INSTRUCTION_ERROR   0x3
-#define BUD_CONTEXT_ERROR       0x4
-#define BUD_PROTOCOL_ERROR      0x5
-#define BUD_INTERFACE_ERROR	0x6
-
-/* BUD constants */
-#define BUD_MAX_RETRIES         3
-#define BUD_MAX_LOOPCNT         6
-#define BUD_TIMEOUT             3000
-
-/* Initialization constants */
-#define INIT_MAX_RETRIES        3	/* Maximum three retries. */
-#define INIT_MAX_LOOPCNT        22      /* Maximum loop counts. */
-
-/* RING STATUS field values (high/low) */
-#define SIGNAL_LOSS             0x0080  /* Loss of signal on the ring
-					 * detected.
-					 */
-#define HARD_ERROR              0x0040  /* Transmitting or receiving beacon
-					 * frames.
-					 */
-#define SOFT_ERROR              0x0020  /* Report error MAC frame
-					 * transmitted.
-					 */
-#define TRANSMIT_BEACON         0x0010  /* Transmitting beacon frames on the
-					 * ring.
-					 */
-#define LOBE_WIRE_FAULT         0x0008  /* Open or short circuit in the
-					 * cable to concentrator; adapter
-					 * closed.
-					 */
-#define AUTO_REMOVAL_ERROR      0x0004  /* Lobe wrap test failed, deinserted;
-					 * adapter closed.
-					 */
-#define REMOVE_RECEIVED         0x0001  /* Received a remove ring station MAC
-					 * MAC frame request; adapter closed.
-					 */
-#define COUNTER_OVERFLOW        0x8000  /* Overflow of one of the adapters
-					 * error counters; READ.ERROR.LOG.
-					 */
-#define SINGLE_STATION          0x4000  /* Adapter is the only station on the
-					 * ring.
-					 */
-#define RING_RECOVERY           0x2000  /* Claim token MAC frames on the ring;
-					 * reset after ring purge frame.
-					 */
-
-#define ADAPTER_CLOSED (LOBE_WIRE_FAULT | AUTO_REMOVAL_ERROR |\
-                        REMOVE_RECEIVED)
-
-/* Adapter_check_block.Status field bit assignments: */
-#define DIO_PARITY              0x8000  /* Adapter detects bad parity
-					 * through direct I/O access.
-					 */
-#define DMA_READ_ABORT          0x4000  /* Aborting DMA read operation
-					 * from system Parm[0]: 0=timeout,
-					 * 1=parity error, 2=bus error;
-					 * Parm[1]: 32 bit pointer to host
-					 * system address at failure.
-					 */
-#define DMA_WRITE_ABORT         0x2000  /* Aborting DMA write operation
-					 * to system. (parameters analogous to
-					 * DMA_READ_ABORT)
-					 */
-#define ILLEGAL_OP_CODE         0x1000  /* Illegal operation code in the
-					 * the adapters firmware Parm[0]-2:
-					 * communications processor registers
-					 * R13-R15.
-					 */
-#define PARITY_ERRORS           0x0800  /* Adapter detects internal bus
-					 * parity error.
-					 */
-#define RAM_DATA_ERROR          0x0080  /* Valid only during RAM testing;
-					 * RAM data error Parm[0-1]: 32 bit
-					 * pointer to RAM location.
-					 */
-#define RAM_PARITY_ERROR        0x0040  /* Valid only during RAM testing;
-					 * RAM parity error Parm[0-1]: 32 bit
-					 * pointer to RAM location.
-					 */
-#define RING_UNDERRUN           0x0020  /* Internal DMA underrun when
-					 * transmitting onto ring.
-					 */
-#define INVALID_IRQ             0x0008  /* Unrecognized interrupt generated
-					 * internal to adapter Parm[0-2]:
-					 * adapter register R13-R15.
-					 */
-#define INVALID_ERROR_IRQ       0x0004  /* Unrecognized error interrupt
-					 * generated Parm[0-2]: adapter register
-					 * R13-R15.
-					 */
-#define INVALID_XOP             0x0002  /* Unrecognized XOP request in
-					 * communication processor Parm[0-2]:
-					 * adapter register R13-R15.
-					 */
-#define CHECKADDR               0x05E0  /* Adapter check status information
-					 * address offset.
-					 */
-#define ROM_PAGE_0              0x0000  /* Adapter ROM page 0. */
-
-/*
- * RECEIVE.STATUS interrupt result SSB values: (high-low)
- * (RECEIVE_COMPLETE field bit definitions in SSB.Parm[0])
- */
-#define RX_COMPLETE             0x0080  /* SSB.Parm[0]; SSB.Parm[1]: 32
-					 * bit pointer to last RPL.
-					 */
-#define RX_SUSPENDED            0x0040  /* SSB.Parm[0]; SSB.Parm[1]: 32
-					 * bit pointer to RPL with odd
-					 * forward pointer.
-					 */
-
-/* Valid receive CSTAT: */
-#define RX_FRAME_CONTROL_BITS (RX_VALID | RX_START_FRAME | RX_END_FRAME | \
-			       RX_FRAME_COMPLETE)
-#define VALID_SINGLE_BUFFER_FRAME (RX_START_FRAME | RX_END_FRAME | \
-				   RX_FRAME_COMPLETE)
-
-typedef enum SKB_STAT SKB_STAT;
-enum SKB_STAT {
-	SKB_UNAVAILABLE,
-	SKB_DMA_DIRECT,
-	SKB_DATA_COPY
-};
-
-/* Receive Parameter List (RPL) The length of the RPLs has to be initialized 
- * in the OPL. (OPEN parameter list)
- */
-#define RPL_NUM		3
-
-#define RX_FRAG_NUM     1	/* Maximal number of used fragments in one RPL.
-				 * (up to firmware v2.24: 3, now: up to 9)
-				 */
-
-#pragma pack(1)
-typedef struct s_RPL RPL;
-struct s_RPL {	/* Receive Parameter List */
-	__be32 NextRPLAddr;		/* Pointer to next RPL in chain
-					 * (normalized = physical 32 bit
-					 * address) if pointer is odd: this
-					 * is last RPL. Pointing to itself can
-					 * cause problems!
-					 */
-	volatile u_int16_t Status;	/* Set by creation of Receive Parameter
-					 * List RECEIVE_CSTAT_COMPLETE set by
-					 * adapter in lists that start or end
-					 * a frame.
-					 */
-	volatile __be16 FrameSize;	 /* Number of bytes received as a
-					 * frame including AC/FC, Destination,
-					 * Source, Routing field not including 
-					 * CRC, FS (Frame Status), End Delimiter
-					 * (valid only if START_FRAME bit in 
-					 * CSTAT nonzero) must not be zero in
-					 * any list; maximum value: (BUFFER_SIZE
-					 * - 8) * TX_BUF_MAX sum of DataCount
-					 * values in FragmentList must equal
-					 * Frame_Size value in START_FRAME TPL!
-					 * frame data fragment list
-					 */
-
-	/* TPL/RPL size in OPEN parameter list depending on maximal numbers
-	 * of fragments used in one parameter list.
-	 */
-	Fragment FragList[RX_FRAG_NUM];	/* Maximum: nine frame fragments in
-					 * one TPL. Actual version of firmware:
-					 * 9 fragments possible.
-					 */
-#pragma pack()
-
-	/* Special proprietary data and precalculations. */
-	RPL *NextRPLPtr;	/* Logical pointer to next RPL in chain. */
-	unsigned char *MData;
-	struct sk_buff *Skb;
-	SKB_STAT SkbStat;
-	int RPLIndex;
-	dma_addr_t DMABuff;		/* DMA IO bus address from dma_map */
-};
-
-/* Information that need to be kept for each board. */
-typedef struct net_local {
-#pragma pack(1)
-	IPB ipb;	/* Initialization Parameter Block. */
-	SCB scb;	/* System Command Block: system to adapter 
-			 * communication.
-			 */
-	SSB ssb;	/* System Status Block: adapter to system 
-			 * communication.
-			 */
-	OPB ocpl;	/* Open Options Parameter Block. */
-
-	ERRORTAB errorlogtable;	/* Adapter statistic error counters.
-				 * (read from adapter memory)
-				 */
-	unsigned char ProductID[PROD_ID_SIZE + 1]; /* Product ID */
-#pragma pack()
-
-	TPL Tpl[TPL_NUM];
-	TPL *TplFree;
-	TPL *TplBusy;
-	unsigned char LocalTxBuffers[TPL_NUM][DEFAULT_PACKET_SIZE];
-
-	RPL Rpl[RPL_NUM];
-	RPL *RplHead;
-	RPL *RplTail;
-	unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
-
-	struct device *pdev;
-	int DataRate;
-	unsigned char ScbInUse;
-	unsigned short CMDqueue;
-
-	unsigned long AdapterOpenFlag:1;
-	unsigned long AdapterVirtOpenFlag:1;
-	unsigned long OpenCommandIssued:1;
-	unsigned long TransmitCommandActive:1;
-	unsigned long TransmitHaltScheduled:1;
-	unsigned long HaltInProgress:1;
-	unsigned long LobeWireFaultLogged:1;
-	unsigned long ReOpenInProgress:1;
-	unsigned long Sleeping:1;
-
-	unsigned long LastOpenStatus;
-	unsigned short CurrentRingStatus;
-	unsigned long MaxPacketSize;
-	
-	unsigned long StartTime;
-	unsigned long LastSendTime;
-
-	struct tr_statistics MacStat;	/* MAC statistics structure */
-
-	unsigned long dmalimit; /* the max DMA address (ie, ISA) */
-	dma_addr_t    dmabuffer; /* the DMA bus address corresponding to
-				    priv. Might be different from virt_to_bus()
-				    for architectures with IO MMU (Alpha) */
-
-	struct timer_list timer;
-
-	wait_queue_head_t  wait_for_tok_int;
-
-	INTPTRS intptrs;	/* Internal adapter pointer. Must be read
-				 * before OPEN command.
-				 */
-	unsigned short (*setnselout)(struct net_device *);
-	unsigned short (*sifreadb)(struct net_device *, unsigned short);
-	void (*sifwriteb)(struct net_device *, unsigned short, unsigned short);
-	unsigned short (*sifreadw)(struct net_device *, unsigned short);
-	void (*sifwritew)(struct net_device *, unsigned short, unsigned short);
-
-	spinlock_t lock;                /* SMP protection */
-	void *tmspriv;
-} NET_LOCAL;
-
-#endif	/* __KERNEL__ */
-#endif	/* __LINUX_TMS380TR_H */
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
deleted file mode 100644
index fb9918d..0000000
--- a/drivers/net/tokenring/tmspci.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- *  tmspci.c: A generic network driver for TMS380-based PCI token ring cards.
- *
- *  Written 1999 by Adam Fritzler
- *
- *  This software may be used and distributed according to the terms
- *  of the GNU General Public License, incorporated herein by reference.
- *
- *  This driver module supports the following cards:
- *	- SysKonnect TR4/16(+) PCI	(SK-4590)
- *	- SysKonnect TR4/16 PCI		(SK-4591)
- *      - Compaq TR 4/16 PCI
- *      - Thomas-Conrad TC4048 4/16 PCI 
- *      - 3Com 3C339 Token Link Velocity
- *
- *  Maintainer(s):
- *    AF	Adam Fritzler
- *
- *  Modification History:
- *	30-Dec-99	AF	Split off from the tms380tr driver.
- *	22-Jan-00	AF	Updated to use indirect read/writes
- *	23-Nov-00	JG	New PCI API, cleanups
- *
- *  TODO:
- *	1. See if we can use MMIO instead of port accesses
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include "tms380tr.h"
-
-static char version[] __devinitdata =
-"tmspci.c: v1.02 23/11/2000 by Adam Fritzler\n";
-
-#define TMS_PCI_IO_EXTENT 32
-
-struct card_info {
-	unsigned char nselout[2]; /* NSELOUT vals for 4mb([0]) and 16mb([1]) */
-	char *name;
-};
-
-static struct card_info card_info_table[] = {
-	{ {0x03, 0x01}, "Compaq 4/16 TR PCI"},
-	{ {0x03, 0x01}, "SK NET TR 4/16 PCI"},
-	{ {0x03, 0x01}, "Thomas-Conrad TC4048 PCI 4/16"},
-	{ {0x03, 0x01}, "3Com Token Link Velocity"},
-};
-
-static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
-	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-	{ PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
-	{ PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
-	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
-	{ }			/* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, tmspci_pci_tbl);
-
-MODULE_LICENSE("GPL");
-
-static void tms_pci_read_eeprom(struct net_device *dev);
-static unsigned short tms_pci_setnselout_pins(struct net_device *dev);
-
-static unsigned short tms_pci_sifreadb(struct net_device *dev, unsigned short reg)
-{
-	return inb(dev->base_addr + reg);
-}
-
-static unsigned short tms_pci_sifreadw(struct net_device *dev, unsigned short reg)
-{
-	return inw(dev->base_addr + reg);
-}
-
-static void tms_pci_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	outb(val, dev->base_addr + reg);
-}
-
-static void tms_pci_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
-{
-	outw(val, dev->base_addr + reg);
-}
-
-static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
-{	
-	static int versionprinted;
-	struct net_device *dev;
-	struct net_local *tp;
-	int ret;
-	unsigned int pci_irq_line;
-	unsigned long pci_ioaddr;
-	struct card_info *cardinfo = &card_info_table[ent->driver_data];
-
-	if (versionprinted++ == 0)
-		printk("%s", version);
-
-	if (pci_enable_device(pdev))
-		return -EIO;
-
-	/* Remove I/O space marker in bit 0. */
-	pci_irq_line = pdev->irq;
-	pci_ioaddr = pci_resource_start (pdev, 0);
-
-	/* At this point we have found a valid card. */
-	dev = alloc_trdev(sizeof(struct net_local));
-	if (!dev)
-		return -ENOMEM;
-
-	if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) {
-		ret = -EBUSY;
-		goto err_out_trdev;
-	}
-
-	dev->base_addr	= pci_ioaddr;
-	dev->irq 	= pci_irq_line;
-	dev->dma	= 0;
-
-	dev_info(&pdev->dev, "%s\n", cardinfo->name);
-	dev_info(&pdev->dev, "    IO: %#4lx  IRQ: %d\n", dev->base_addr, dev->irq);
-		
-	tms_pci_read_eeprom(dev);
-
-	dev_info(&pdev->dev, "    Ring Station Address: %pM\n", dev->dev_addr);
-		
-	ret = tmsdev_init(dev, &pdev->dev);
-	if (ret) {
-		dev_info(&pdev->dev, "unable to get memory for dev->priv.\n");
-		goto err_out_region;
-	}
-
-	tp = netdev_priv(dev);
-	tp->setnselout = tms_pci_setnselout_pins;
-		
-	tp->sifreadb = tms_pci_sifreadb;
-	tp->sifreadw = tms_pci_sifreadw;
-	tp->sifwriteb = tms_pci_sifwriteb;
-	tp->sifwritew = tms_pci_sifwritew;
-		
-	memcpy(tp->ProductID, cardinfo->name, PROD_ID_SIZE + 1);
-
-	tp->tmspriv = cardinfo;
-
-	dev->netdev_ops = &tms380tr_netdev_ops;
-
-	ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
-			  dev->name, dev);
-	if (ret)
-		goto err_out_tmsdev;
-
-	pci_set_drvdata(pdev, dev);
-	SET_NETDEV_DEV(dev, &pdev->dev);
-
-	ret = register_netdev(dev);
-	if (ret)
-		goto err_out_irq;
-	
-	return 0;
-
-err_out_irq:
-	free_irq(pdev->irq, dev);
-err_out_tmsdev:
-	pci_set_drvdata(pdev, NULL);
-	tmsdev_term(dev);
-err_out_region:
-	release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
-err_out_trdev:
-	free_netdev(dev);
-	return ret;
-}
-
-/*
- * Reads MAC address from adapter RAM, which should've read it from
- * the onboard ROM.  
- *
- * Calling this on a board that does not support it can be a very
- * dangerous thing.  The Madge board, for instance, will lock your
- * machine hard when this is called.  Luckily, its supported in a
- * separate driver.  --ASF
- */
-static void tms_pci_read_eeprom(struct net_device *dev)
-{
-	int i;
-	
-	/* Address: 0000:0000 */
-	tms_pci_sifwritew(dev, 0, SIFADX);
-	tms_pci_sifwritew(dev, 0, SIFADR);	
-	
-	/* Read six byte MAC address data */
-	dev->addr_len = 6;
-	for(i = 0; i < 6; i++)
-		dev->dev_addr[i] = tms_pci_sifreadw(dev, SIFINC) >> 8;
-}
-
-static unsigned short tms_pci_setnselout_pins(struct net_device *dev)
-{
-	unsigned short val = 0;
-	struct net_local *tp = netdev_priv(dev);
-	struct card_info *cardinfo = tp->tmspriv;
-  
-	if(tp->DataRate == SPEED_4)
-		val |= cardinfo->nselout[0];	/* Set 4Mbps */
-	else
-		val |= cardinfo->nselout[1];	/* Set 16Mbps */
-	return val;
-}
-
-static void __devexit tms_pci_detach (struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-
-	BUG_ON(!dev);
-	unregister_netdev(dev);
-	release_region(dev->base_addr, TMS_PCI_IO_EXTENT);
-	free_irq(dev->irq, dev);
-	tmsdev_term(dev);
-	free_netdev(dev);
-	pci_set_drvdata(pdev, NULL);
-}
-
-static struct pci_driver tms_pci_driver = {
-	.name		= "tmspci",
-	.id_table	= tmspci_pci_tbl,
-	.probe		= tms_pci_attach,
-	.remove		= __devexit_p(tms_pci_detach),
-};
-
-static int __init tms_pci_init (void)
-{
-	return pci_register_driver(&tms_pci_driver);
-}
-
-static void __exit tms_pci_rmmod (void)
-{
-	pci_unregister_driver (&tms_pci_driver);
-}
-
-module_init(tms_pci_init);
-module_exit(tms_pci_rmmod);
-
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bb8c72c..987aeef 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -313,7 +313,7 @@
 
 	/* Exact match */
 	for (i = 0; i < filter->count; i++)
-		if (!compare_ether_addr(eh->h_dest, filter->addr[i]))
+		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
 			return 1;
 
 	/* Inexact match (multicast only) */
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 5ee032c..42b5151 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -355,7 +355,7 @@
 	u32 packet_len;
 	u32 padbytes = 0xffff0000;
 
-	padlen = ((skb->len + 4) % 512) ? 0 : 4;
+	padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
 
 	if ((!skb_cloned(skb)) &&
 	    ((headroom + tailroom) >= (4 + padlen))) {
@@ -377,7 +377,7 @@
 	cpu_to_le32s(&packet_len);
 	skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
 
-	if ((skb->len % 512) == 0) {
+	if (padlen) {
 		cpu_to_le32s(&padbytes);
 		memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
 		skb_put(skb, sizeof(padbytes));
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 90a3002..fffee6a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -83,6 +83,7 @@
 	struct cdc_state		*info = (void *) &dev->data;
 	int				status;
 	int				rndis;
+	bool				android_rndis_quirk = false;
 	struct usb_driver		*driver = driver_of(intf);
 	struct usb_cdc_mdlm_desc	*desc = NULL;
 	struct usb_cdc_mdlm_detail_desc *detail = NULL;
@@ -195,6 +196,11 @@
 					info->control,
 					info->u->bSlaveInterface0,
 					info->data);
+				/* fall back to hard-wiring for RNDIS */
+				if (rndis) {
+					android_rndis_quirk = true;
+					goto next_desc;
+				}
 				goto bad_desc;
 			}
 			if (info->control != intf) {
@@ -271,11 +277,15 @@
 	/* Microsoft ActiveSync based and some regular RNDIS devices lack the
 	 * CDC descriptors, so we'll hard-wire the interfaces and not check
 	 * for descriptors.
+	 *
+	 * Some Android RNDIS devices have a CDC Union descriptor pointing
+	 * to non-existing interfaces.  Ignore that and attempt the same
+	 * hard-wired 0 and 1 interfaces.
 	 */
-	if (rndis && !info->u) {
+	if (rndis && (!info->u || android_rndis_quirk)) {
 		info->control = usb_ifnum_to_if(dev->udev, 0);
 		info->data = usb_ifnum_to_if(dev->udev, 1);
-		if (!info->control || !info->data) {
+		if (!info->control || !info->data || info->control != intf) {
 			dev_dbg(&intf->dev,
 				"rndis: master #0/%p slave #1/%p\n",
 				info->control,
@@ -475,6 +485,8 @@
 /*-------------------------------------------------------------------------*/
 
 #define HUAWEI_VENDOR_ID	0x12D1
+#define NOVATEL_VENDOR_ID	0x1410
+#define ZTE_VENDOR_ID		0x19D2
 
 static const struct usb_device_id	products [] = {
 /*
@@ -592,6 +604,76 @@
  * because of bugs/quirks in a given product (like Zaurus, above).
  */
 {
+	/* Novatel USB551L */
+	/* This match must come *before* the generic CDC-ETHER match so that
+	 * we get FLAG_WWAN set on the device, since it's descriptors are
+	 * generic CDC-ETHER.
+	 */
+	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+		 | USB_DEVICE_ID_MATCH_PRODUCT
+		 | USB_DEVICE_ID_MATCH_INT_INFO,
+	.idVendor               = NOVATEL_VENDOR_ID,
+	.idProduct		= 0xB001,
+	.bInterfaceClass	= USB_CLASS_COMM,
+	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	.driver_info = (unsigned long)&wwan_info,
+}, {
+	/* ZTE (Vodafone) K3805-Z */
+	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+		 | USB_DEVICE_ID_MATCH_PRODUCT
+		 | USB_DEVICE_ID_MATCH_INT_INFO,
+	.idVendor               = ZTE_VENDOR_ID,
+	.idProduct		= 0x1003,
+	.bInterfaceClass	= USB_CLASS_COMM,
+	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	.driver_info = (unsigned long)&wwan_info,
+}, {
+	/* ZTE (Vodafone) K3806-Z */
+	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+		 | USB_DEVICE_ID_MATCH_PRODUCT
+		 | USB_DEVICE_ID_MATCH_INT_INFO,
+	.idVendor               = ZTE_VENDOR_ID,
+	.idProduct		= 0x1015,
+	.bInterfaceClass	= USB_CLASS_COMM,
+	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	.driver_info = (unsigned long)&wwan_info,
+}, {
+	/* ZTE (Vodafone) K4510-Z */
+	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+		 | USB_DEVICE_ID_MATCH_PRODUCT
+		 | USB_DEVICE_ID_MATCH_INT_INFO,
+	.idVendor               = ZTE_VENDOR_ID,
+	.idProduct		= 0x1173,
+	.bInterfaceClass	= USB_CLASS_COMM,
+	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	.driver_info = (unsigned long)&wwan_info,
+}, {
+	/* ZTE (Vodafone) K3770-Z */
+	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+		 | USB_DEVICE_ID_MATCH_PRODUCT
+		 | USB_DEVICE_ID_MATCH_INT_INFO,
+	.idVendor               = ZTE_VENDOR_ID,
+	.idProduct		= 0x1177,
+	.bInterfaceClass	= USB_CLASS_COMM,
+	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	.driver_info = (unsigned long)&wwan_info,
+}, {
+	/* ZTE (Vodafone) K3772-Z */
+	.match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+		 | USB_DEVICE_ID_MATCH_PRODUCT
+		 | USB_DEVICE_ID_MATCH_INT_INFO,
+	.idVendor               = ZTE_VENDOR_ID,
+	.idProduct		= 0x1181,
+	.bInterfaceClass	= USB_CLASS_COMM,
+	.bInterfaceSubClass	= USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol	= USB_CDC_PROTO_NONE,
+	.driver_info = (unsigned long)&wwan_info,
+}, {
 	USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
 			USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d316503b..63cfd0b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -356,10 +356,19 @@
 };
 
 /* ZTE suck at making USB descriptors */
-static const struct driver_info	qmi_wwan_force_int4 = {
-	.description	= "Qualcomm Gobi wwan/QMI device",
+static const struct driver_info	qmi_wwan_force_int1 = {
+	.description	= "Qualcomm WWAN/QMI device",
 	.flags		= FLAG_WWAN,
-	.bind		= qmi_wwan_bind_gobi,
+	.bind		= qmi_wwan_bind_shared,
+	.unbind		= qmi_wwan_unbind_shared,
+	.manage_power	= qmi_wwan_manage_power,
+	.data		= BIT(1), /* interface whitelist bitmap */
+};
+
+static const struct driver_info	qmi_wwan_force_int4 = {
+	.description	= "Qualcomm WWAN/QMI device",
+	.flags		= FLAG_WWAN,
+	.bind		= qmi_wwan_bind_shared,
 	.unbind		= qmi_wwan_unbind_shared,
 	.manage_power	= qmi_wwan_manage_power,
 	.data		= BIT(4), /* interface whitelist bitmap */
@@ -401,6 +410,14 @@
 		.bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
 		.driver_info        = (unsigned long)&qmi_wwan_info,
 	},
+	{	/* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
+		.match_flags        = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
+		.idVendor           = HUAWEI_VENDOR_ID,
+		.bInterfaceClass    = USB_CLASS_VENDOR_SPEC,
+		.bInterfaceSubClass = 1,
+		.bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */
+		.driver_info        = (unsigned long)&qmi_wwan_info,
+	},
 	{	/* Huawei E392, E398 and possibly others in "Windows mode"
 		 * using a combined control and data interface without any CDC
 		 * functional descriptors
@@ -430,6 +447,15 @@
 		.bInterfaceProtocol = 0xff,
 		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
 	},
+	{	/* ZTE (Vodafone) K3520-Z */
+		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+		.idVendor           = 0x19d2,
+		.idProduct          = 0x0055,
+		.bInterfaceClass    = 0xff,
+		.bInterfaceSubClass = 0xff,
+		.bInterfaceProtocol = 0xff,
+		.driver_info        = (unsigned long)&qmi_wwan_force_int1,
+	},
 	{	/* ZTE (Vodafone) K3565-Z */
 		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
 		.idVendor           = 0x19d2,
@@ -457,6 +483,15 @@
 		.bInterfaceProtocol = 0xff,
 		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
 	},
+	{	/* ZTE (Vodafone) K3765-Z */
+		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+		.idVendor           = 0x19d2,
+		.idProduct          = 0x2002,
+		.bInterfaceClass    = 0xff,
+		.bInterfaceSubClass = 0xff,
+		.bInterfaceProtocol = 0xff,
+		.driver_info        = (unsigned long)&qmi_wwan_force_int4,
+	},
 	{	/* ZTE (Vodafone) K4505-Z */
 		.match_flags	    = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
 		.idVendor           = 0x19d2,
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index c8f1b5b..0d746b3 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -77,7 +77,9 @@
 	if (dev->driver_info->indication) {
 		dev->driver_info->indication(dev, msg, buflen);
 	} else {
-		switch (msg->status) {
+		u32 status = le32_to_cpu(msg->status);
+
+		switch (status) {
 		case RNDIS_STATUS_MEDIA_CONNECT:
 			dev_info(udev, "rndis media connect\n");
 			break;
@@ -85,8 +87,7 @@
 			dev_info(udev, "rndis media disconnect\n");
 			break;
 		default:
-			dev_info(udev, "rndis indication: 0x%08x\n",
-					le32_to_cpu(msg->status));
+			dev_info(udev, "rndis indication: 0x%08x\n", status);
 		}
 	}
 }
@@ -109,16 +110,17 @@
 	int			retval;
 	int			partial;
 	unsigned		count;
-	__le32			rsp;
-	u32			xid = 0, msg_len, request_id;
+	u32			xid = 0, msg_len, request_id, msg_type, rsp,
+				status;
 
 	/* REVISIT when this gets called from contexts other than probe() or
 	 * disconnect(): either serialize, or dispatch responses on xid
 	 */
 
+	msg_type = le32_to_cpu(buf->msg_type);
+
 	/* Issue the request; xid is unique, don't bother byteswapping it */
-	if (likely(buf->msg_type != RNDIS_MSG_HALT &&
-		   buf->msg_type != RNDIS_MSG_RESET)) {
+	if (likely(msg_type != RNDIS_MSG_HALT && msg_type != RNDIS_MSG_RESET)) {
 		xid = dev->xid++;
 		if (!xid)
 			xid = dev->xid++;
@@ -149,7 +151,7 @@
 	}
 
 	/* Poll the control channel; the request probably completed immediately */
-	rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
+	rsp = le32_to_cpu(buf->msg_type) | RNDIS_MSG_COMPLETION;
 	for (count = 0; count < 10; count++) {
 		memset(buf, 0, CONTROL_BUFFER_SIZE);
 		retval = usb_control_msg(dev->udev,
@@ -160,35 +162,36 @@
 			buf, buflen,
 			RNDIS_CONTROL_TIMEOUT_MS);
 		if (likely(retval >= 8)) {
+			msg_type = le32_to_cpu(buf->msg_type);
 			msg_len = le32_to_cpu(buf->msg_len);
+			status = le32_to_cpu(buf->status);
 			request_id = (__force u32) buf->request_id;
-			if (likely(buf->msg_type == rsp)) {
+			if (likely(msg_type == rsp)) {
 				if (likely(request_id == xid)) {
 					if (unlikely(rsp == RNDIS_MSG_RESET_C))
 						return 0;
-					if (likely(RNDIS_STATUS_SUCCESS
-							== buf->status))
+					if (likely(RNDIS_STATUS_SUCCESS ==
+							status))
 						return 0;
 					dev_dbg(&info->control->dev,
 						"rndis reply status %08x\n",
-						le32_to_cpu(buf->status));
+						status);
 					return -EL3RST;
 				}
 				dev_dbg(&info->control->dev,
 					"rndis reply id %d expected %d\n",
 					request_id, xid);
 				/* then likely retry */
-			} else switch (buf->msg_type) {
-			case RNDIS_MSG_INDICATE:	/* fault/event */
+			} else switch (msg_type) {
+			case RNDIS_MSG_INDICATE: /* fault/event */
 				rndis_msg_indicate(dev, (void *)buf, buflen);
-
 				break;
-			case RNDIS_MSG_KEEPALIVE: {	/* ping */
+			case RNDIS_MSG_KEEPALIVE: { /* ping */
 				struct rndis_keepalive_c *msg = (void *)buf;
 
-				msg->msg_type = RNDIS_MSG_KEEPALIVE_C;
+				msg->msg_type = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C);
 				msg->msg_len = cpu_to_le32(sizeof *msg);
-				msg->status = RNDIS_STATUS_SUCCESS;
+				msg->status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
 				retval = usb_control_msg(dev->udev,
 					usb_sndctrlpipe(dev->udev, 0),
 					USB_CDC_SEND_ENCAPSULATED_COMMAND,
@@ -236,7 +239,7 @@
  * ActiveSync 4.1 Windows driver.
  */
 static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
-		void *buf, __le32 oid, u32 in_len,
+		void *buf, u32 oid, u32 in_len,
 		void **reply, int *reply_len)
 {
 	int retval;
@@ -251,9 +254,9 @@
 	u.buf = buf;
 
 	memset(u.get, 0, sizeof *u.get + in_len);
-	u.get->msg_type = RNDIS_MSG_QUERY;
+	u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY);
 	u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len);
-	u.get->oid = oid;
+	u.get->oid = cpu_to_le32(oid);
 	u.get->len = cpu_to_le32(in_len);
 	u.get->offset = cpu_to_le32(20);
 
@@ -324,7 +327,7 @@
 	if (retval < 0)
 		goto fail;
 
-	u.init->msg_type = RNDIS_MSG_INIT;
+	u.init->msg_type = cpu_to_le32(RNDIS_MSG_INIT);
 	u.init->msg_len = cpu_to_le32(sizeof *u.init);
 	u.init->major_version = cpu_to_le32(1);
 	u.init->minor_version = cpu_to_le32(0);
@@ -395,22 +398,23 @@
 	/* Check physical medium */
 	phym = NULL;
 	reply_len = sizeof *phym;
-	retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM,
-			0, (void **) &phym, &reply_len);
+	retval = rndis_query(dev, intf, u.buf,
+			     RNDIS_OID_GEN_PHYSICAL_MEDIUM,
+			     0, (void **) &phym, &reply_len);
 	if (retval != 0 || !phym) {
 		/* OID is optional so don't fail here. */
-		phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED;
+		phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
 		phym = &phym_unspec;
 	}
 	if ((flags & FLAG_RNDIS_PHYM_WIRELESS) &&
-			*phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
+	    le32_to_cpup(phym) != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
 		netif_dbg(dev, probe, dev->net,
 			  "driver requires wireless physical medium, but device is not\n");
 		retval = -ENODEV;
 		goto halt_fail_and_release;
 	}
 	if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) &&
-			*phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
+	    le32_to_cpup(phym) == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
 		netif_dbg(dev, probe, dev->net,
 			  "driver requires non-wireless physical medium, but device is wireless.\n");
 		retval = -ENODEV;
@@ -419,8 +423,9 @@
 
 	/* Get designated host ethernet address */
 	reply_len = ETH_ALEN;
-	retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS,
-			48, (void **) &bp, &reply_len);
+	retval = rndis_query(dev, intf, u.buf,
+			     RNDIS_OID_802_3_PERMANENT_ADDRESS,
+			     48, (void **) &bp, &reply_len);
 	if (unlikely(retval< 0)) {
 		dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval);
 		goto halt_fail_and_release;
@@ -430,12 +435,12 @@
 
 	/* set a nonzero filter to enable data transfers */
 	memset(u.set, 0, sizeof *u.set);
-	u.set->msg_type = RNDIS_MSG_SET;
+	u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET);
 	u.set->msg_len = cpu_to_le32(4 + sizeof *u.set);
-	u.set->oid = OID_GEN_CURRENT_PACKET_FILTER;
+	u.set->oid = cpu_to_le32(RNDIS_OID_GEN_CURRENT_PACKET_FILTER);
 	u.set->len = cpu_to_le32(4);
 	u.set->offset = cpu_to_le32((sizeof *u.set) - 8);
-	*(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER;
+	*(__le32 *)(u.buf + sizeof *u.set) = cpu_to_le32(RNDIS_DEFAULT_FILTER);
 
 	retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE);
 	if (unlikely(retval < 0)) {
@@ -450,7 +455,7 @@
 
 halt_fail_and_release:
 	memset(u.halt, 0, sizeof *u.halt);
-	u.halt->msg_type = RNDIS_MSG_HALT;
+	u.halt->msg_type = cpu_to_le32(RNDIS_MSG_HALT);
 	u.halt->msg_len = cpu_to_le32(sizeof *u.halt);
 	(void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE);
 fail_and_release:
@@ -475,7 +480,7 @@
 	/* try to clear any rndis state/activity (no i/o from stack!) */
 	halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
 	if (halt) {
-		halt->msg_type = RNDIS_MSG_HALT;
+		halt->msg_type = cpu_to_le32(RNDIS_MSG_HALT);
 		halt->msg_len = cpu_to_le32(sizeof *halt);
 		(void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE);
 		kfree(halt);
@@ -494,16 +499,16 @@
 	while (likely(skb->len)) {
 		struct rndis_data_hdr	*hdr = (void *)skb->data;
 		struct sk_buff		*skb2;
-		u32			msg_len, data_offset, data_len;
+		u32			msg_type, msg_len, data_offset, data_len;
 
+		msg_type = le32_to_cpu(hdr->msg_type);
 		msg_len = le32_to_cpu(hdr->msg_len);
 		data_offset = le32_to_cpu(hdr->data_offset);
 		data_len = le32_to_cpu(hdr->data_len);
 
 		/* don't choke if we see oob, per-packet data, etc */
-		if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET ||
-			     skb->len < msg_len ||
-			     (data_offset + data_len + 8) > msg_len)) {
+		if (unlikely(msg_type != RNDIS_MSG_PACKET || skb->len < msg_len
+				|| (data_offset + data_len + 8) > msg_len)) {
 			dev->net->stats.rx_frame_errors++;
 			netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n",
 				   le32_to_cpu(hdr->msg_type),
@@ -569,7 +574,7 @@
 fill:
 	hdr = (void *) __skb_push(skb, sizeof *hdr);
 	memset(hdr, 0, sizeof *hdr);
-	hdr->msg_type = RNDIS_MSG_PACKET;
+	hdr->msg_type = cpu_to_le32(RNDIS_MSG_PACKET);
 	hdr->msg_len = cpu_to_le32(skb->len);
 	hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8);
 	hdr->data_len = cpu_to_le32(len);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index a234948..fb1a087 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -98,7 +98,7 @@
 
 	if (unlikely(ret < 0))
 		netdev_warn(dev->net,
-			"Failed to read register index 0x%08x", index);
+			"Failed to read reg index 0x%08x: %d", index, ret);
 
 	le32_to_cpus(buf);
 	*data = *buf;
@@ -128,7 +128,7 @@
 
 	if (unlikely(ret < 0))
 		netdev_warn(dev->net,
-			"Failed to write register index 0x%08x", index);
+			"Failed to write reg index 0x%08x: %d", index, ret);
 
 	kfree(buf);
 
@@ -171,7 +171,7 @@
 	idx &= dev->mii.reg_num_mask;
 	addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
 		| ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
-		| MII_ACCESS_READ;
+		| MII_ACCESS_READ | MII_ACCESS_BUSY;
 	ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
 	check_warn_goto_done(ret, "Error writing MII_ACCESS");
 
@@ -210,7 +210,7 @@
 	idx &= dev->mii.reg_num_mask;
 	addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
 		| ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
-		| MII_ACCESS_WRITE;
+		| MII_ACCESS_WRITE | MII_ACCESS_BUSY;
 	ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
 	check_warn_goto_done(ret, "Error writing MII_ACCESS");
 
@@ -508,9 +508,9 @@
 	u16 lcladv, rmtadv;
 	int ret;
 
-	/* clear interrupt status */
-	ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
-	check_warn_return(ret, "Error reading PHY_INT_SRC");
+	/* write to clear phy interrupt status */
+	smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC,
+		PHY_INT_SRC_CLEAR_ALL);
 
 	ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
 	check_warn_return(ret, "Error writing INT_STS");
@@ -643,7 +643,7 @@
 
 static int smsc75xx_phy_initialize(struct usbnet *dev)
 {
-	int bmcr, timeout = 0;
+	int bmcr, ret, timeout = 0;
 
 	/* Initialize MII structure */
 	dev->mii.dev = dev->net;
@@ -651,6 +651,7 @@
 	dev->mii.mdio_write = smsc75xx_mdio_write;
 	dev->mii.phy_id_mask = 0x1f;
 	dev->mii.reg_num_mask = 0x1f;
+	dev->mii.supports_gmii = 1;
 	dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID;
 
 	/* reset phy and wait for reset to complete */
@@ -661,7 +662,7 @@
 		bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
 		check_warn_return(bmcr, "Error reading MII_BMCR");
 		timeout++;
-	} while ((bmcr & MII_BMCR) && (timeout < 100));
+	} while ((bmcr & BMCR_RESET) && (timeout < 100));
 
 	if (timeout >= 100) {
 		netdev_warn(dev->net, "timeout on PHY Reset");
@@ -671,10 +672,13 @@
 	smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
 		ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
 		ADVERTISE_PAUSE_ASYM);
+	smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
+		ADVERTISE_1000FULL);
 
-	/* read to clear */
-	smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
-	check_warn_return(bmcr, "Error reading PHY_INT_SRC");
+	/* read and write to clear phy interrupt status */
+	ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+	check_warn_return(ret, "Error reading PHY_INT_SRC");
+	smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff);
 
 	smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
 		PHY_INT_MASK_DEFAULT);
@@ -899,15 +903,20 @@
 
 	netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf);
 
-	/* Configure GPIO pins as LED outputs */
-	ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
-	check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret);
+	ret = smsc75xx_read_reg(dev, E2P_CMD, &buf);
+	check_warn_return(ret, "Failed to read E2P_CMD: %d", ret);
 
-	buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
-	buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
+	/* only set default GPIO/LED settings if no EEPROM is detected */
+	if (!(buf & E2P_CMD_LOADED)) {
+		ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
+		check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret);
 
-	ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
-	check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret);
+		buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
+		buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
+
+		ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
+		check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret);
+	}
 
 	ret = smsc75xx_write_reg(dev, FLOW, 0);
 	check_warn_return(ret, "Failed to write FLOW: %d", ret);
@@ -946,6 +955,14 @@
 	ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
 	check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
 
+	/* allow mac to detect speed and duplex from phy */
+	ret = smsc75xx_read_reg(dev, MAC_CR, &buf);
+	check_warn_return(ret, "Failed to read MAC_CR: %d", ret);
+
+	buf |= (MAC_CR_ADD | MAC_CR_ASD);
+	ret = smsc75xx_write_reg(dev, MAC_CR, buf);
+	check_warn_return(ret, "Failed to write MAC_CR: %d", ret);
+
 	ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
 	check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
 
@@ -1212,7 +1229,7 @@
 	.rx_fixup	= smsc75xx_rx_fixup,
 	.tx_fixup	= smsc75xx_tx_fixup,
 	.status		= smsc75xx_status,
-	.flags		= FLAG_ETHER | FLAG_SEND_ZLP,
+	.flags		= FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
 };
 
 static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/smsc75xx.h b/drivers/net/usb/smsc75xx.h
index 16e98c7..67eba39 100644
--- a/drivers/net/usb/smsc75xx.h
+++ b/drivers/net/usb/smsc75xx.h
@@ -388,6 +388,7 @@
 #define PHY_INT_SRC_ANEG_COMP		((u16)0x0040)
 #define PHY_INT_SRC_REMOTE_FAULT	((u16)0x0020)
 #define PHY_INT_SRC_LINK_DOWN		((u16)0x0010)
+#define PHY_INT_SRC_CLEAR_ALL		((u16)0xffff)
 
 #define PHY_INT_MASK			(30)
 #define PHY_INT_MASK_ENERGY_ON		((u16)0x0080)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5f19f84..94ae669 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1017,6 +1017,7 @@
 	dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
 	dev->net->flags |= IFF_MULTICAST;
 	dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
+	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 	return 0;
 }
 
@@ -1191,7 +1192,7 @@
 	.rx_fixup	= smsc95xx_rx_fixup,
 	.tx_fixup	= smsc95xx_tx_fixup,
 	.status		= smsc95xx_status,
-	.flags		= FLAG_ETHER | FLAG_SEND_ZLP,
+	.flags		= FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
 };
 
 static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index b7b3f5b..9f58330 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -210,6 +210,7 @@
 		} else {
 			usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
 				buf, maxp, intr_complete, dev, period);
+			dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
 			dev_dbg(&intf->dev,
 				"status ep%din, %d bytes period %d\n",
 				usb_pipeendpoint(pipe), maxp, period);
@@ -281,17 +282,32 @@
 }
 EXPORT_SYMBOL_GPL(usbnet_change_mtu);
 
+/* The caller must hold list->lock */
+static void __usbnet_queue_skb(struct sk_buff_head *list,
+			struct sk_buff *newsk, enum skb_state state)
+{
+	struct skb_data *entry = (struct skb_data *) newsk->cb;
+
+	__skb_queue_tail(list, newsk);
+	entry->state = state;
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
  * completion callbacks.  2.5 should have fixed those bugs...
  */
 
-static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
+static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
+		struct sk_buff_head *list, enum skb_state state)
 {
 	unsigned long		flags;
+	enum skb_state 		old_state;
+	struct skb_data *entry = (struct skb_data *) skb->cb;
 
 	spin_lock_irqsave(&list->lock, flags);
+	old_state = entry->state;
+	entry->state = state;
 	__skb_unlink(skb, list);
 	spin_unlock(&list->lock);
 	spin_lock(&dev->done.lock);
@@ -299,6 +315,7 @@
 	if (dev->done.qlen == 1)
 		tasklet_schedule(&dev->bh);
 	spin_unlock_irqrestore(&dev->done.lock, flags);
+	return old_state;
 }
 
 /* some work can't be done in tasklets, so we use keventd
@@ -339,7 +356,6 @@
 	entry = (struct skb_data *) skb->cb;
 	entry->urb = urb;
 	entry->dev = dev;
-	entry->state = rx_start;
 	entry->length = 0;
 
 	usb_fill_bulk_urb (urb, dev->udev, dev->in,
@@ -371,7 +387,7 @@
 			tasklet_schedule (&dev->bh);
 			break;
 		case 0:
-			__skb_queue_tail (&dev->rxq, skb);
+			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
 		}
 	} else {
 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
@@ -422,16 +438,17 @@
 	struct skb_data		*entry = (struct skb_data *) skb->cb;
 	struct usbnet		*dev = entry->dev;
 	int			urb_status = urb->status;
+	enum skb_state		state;
 
 	skb_put (skb, urb->actual_length);
-	entry->state = rx_done;
+	state = rx_done;
 	entry->urb = NULL;
 
 	switch (urb_status) {
 	/* success */
 	case 0:
 		if (skb->len < dev->net->hard_header_len) {
-			entry->state = rx_cleanup;
+			state = rx_cleanup;
 			dev->net->stats.rx_errors++;
 			dev->net->stats.rx_length_errors++;
 			netif_dbg(dev, rx_err, dev->net,
@@ -470,7 +487,7 @@
 				  "rx throttle %d\n", urb_status);
 		}
 block:
-		entry->state = rx_cleanup;
+		state = rx_cleanup;
 		entry->urb = urb;
 		urb = NULL;
 		break;
@@ -481,17 +498,18 @@
 		// FALLTHROUGH
 
 	default:
-		entry->state = rx_cleanup;
+		state = rx_cleanup;
 		dev->net->stats.rx_errors++;
 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
 		break;
 	}
 
-	defer_bh(dev, skb, &dev->rxq);
+	state = defer_bh(dev, skb, &dev->rxq, state);
 
 	if (urb) {
 		if (netif_running (dev->net) &&
-		    !test_bit (EVENT_RX_HALT, &dev->flags)) {
+		    !test_bit (EVENT_RX_HALT, &dev->flags) &&
+		    state != unlink_start) {
 			rx_submit (dev, urb, GFP_ATOMIC);
 			usb_mark_last_busy(dev->udev);
 			return;
@@ -578,16 +596,23 @@
 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
 {
 	unsigned long		flags;
-	struct sk_buff		*skb, *skbnext;
+	struct sk_buff		*skb;
 	int			count = 0;
 
 	spin_lock_irqsave (&q->lock, flags);
-	skb_queue_walk_safe(q, skb, skbnext) {
+	while (!skb_queue_empty(q)) {
 		struct skb_data		*entry;
 		struct urb		*urb;
 		int			retval;
 
-		entry = (struct skb_data *) skb->cb;
+		skb_queue_walk(q, skb) {
+			entry = (struct skb_data *) skb->cb;
+			if (entry->state != unlink_start)
+				goto found;
+		}
+		break;
+found:
+		entry->state = unlink_start;
 		urb = entry->urb;
 
 		/*
@@ -884,6 +909,7 @@
 	.get_drvinfo		= usbnet_get_drvinfo,
 	.get_msglevel		= usbnet_get_msglevel,
 	.set_msglevel		= usbnet_set_msglevel,
+	.get_ts_info		= ethtool_op_get_ts_info,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -1038,8 +1064,7 @@
 	}
 
 	usb_autopm_put_interface_async(dev->intf);
-	entry->state = tx_done;
-	defer_bh(dev, skb, &dev->txq);
+	(void) defer_bh(dev, skb, &dev->txq, tx_done);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1095,7 +1120,6 @@
 	entry = (struct skb_data *) skb->cb;
 	entry->urb = urb;
 	entry->dev = dev;
-	entry->state = tx_start;
 	entry->length = length;
 
 	usb_fill_bulk_urb (urb, dev->udev, dev->out,
@@ -1154,7 +1178,7 @@
 		break;
 	case 0:
 		net->trans_start = jiffies;
-		__skb_queue_tail (&dev->txq, skb);
+		__usbnet_queue_skb(&dev->txq, skb, tx_start);
 		if (dev->txq.qlen >= TX_QLEN (dev))
 			netif_stop_queue (net);
 	}
@@ -1443,7 +1467,7 @@
 
 	status = register_netdev (net);
 	if (status)
-		goto out3;
+		goto out4;
 	netif_info(dev, probe, dev->net,
 		   "register '%s' at usb-%s-%s, %s, %pM\n",
 		   udev->dev.driver->name,
@@ -1461,6 +1485,8 @@
 
 	return 0;
 
+out4:
+	usb_free_urb(dev->interrupt);
 out3:
 	if (info->unbind)
 		info->unbind (dev, udev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index af8acc8..9ce6995 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -66,12 +66,21 @@
 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
 	bool mergeable_rx_bufs;
 
+	/* enable config space updates */
+	bool config_enable;
+
 	/* Active statistics */
 	struct virtnet_stats __percpu *stats;
 
 	/* Work struct for refilling if we run low on memory. */
 	struct delayed_work refill;
 
+	/* Work struct for config space updates */
+	struct work_struct config_work;
+
+	/* Lock for config space updates */
+	struct mutex config_lock;
+
 	/* Chain pages by the private ptr. */
 	struct page *pages;
 
@@ -492,7 +501,9 @@
 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
 	if (napi_schedule_prep(&vi->napi)) {
 		virtqueue_disable_cb(vi->rvq);
+		local_bh_disable();
 		__napi_schedule(&vi->napi);
+		local_bh_enable();
 	}
 }
 
@@ -780,6 +791,16 @@
 	return status == VIRTIO_NET_OK;
 }
 
+static void virtnet_ack_link_announce(struct virtnet_info *vi)
+{
+	rtnl_lock();
+	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
+				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
+				  0, 0))
+		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
+	rtnl_unlock();
+}
+
 static int virtnet_close(struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
@@ -951,20 +972,31 @@
 #endif
 };
 
-static void virtnet_update_status(struct virtnet_info *vi)
+static void virtnet_config_changed_work(struct work_struct *work)
 {
+	struct virtnet_info *vi =
+		container_of(work, struct virtnet_info, config_work);
 	u16 v;
 
+	mutex_lock(&vi->config_lock);
+	if (!vi->config_enable)
+		goto done;
+
 	if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
 			      offsetof(struct virtio_net_config, status),
 			      &v) < 0)
-		return;
+		goto done;
+
+	if (v & VIRTIO_NET_S_ANNOUNCE) {
+		netif_notify_peers(vi->dev);
+		virtnet_ack_link_announce(vi);
+	}
 
 	/* Ignore unknown (future) status bits */
 	v &= VIRTIO_NET_S_LINK_UP;
 
 	if (vi->status == v)
-		return;
+		goto done;
 
 	vi->status = v;
 
@@ -975,13 +1007,15 @@
 		netif_carrier_off(vi->dev);
 		netif_stop_queue(vi->dev);
 	}
+done:
+	mutex_unlock(&vi->config_lock);
 }
 
 static void virtnet_config_changed(struct virtio_device *vdev)
 {
 	struct virtnet_info *vi = vdev->priv;
 
-	virtnet_update_status(vi);
+	queue_work(system_nrt_wq, &vi->config_work);
 }
 
 static int init_vqs(struct virtnet_info *vi)
@@ -1075,6 +1109,9 @@
 		goto free;
 
 	INIT_DELAYED_WORK(&vi->refill, refill_work);
+	mutex_init(&vi->config_lock);
+	vi->config_enable = true;
+	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
 	sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
 	sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
 
@@ -1110,7 +1147,7 @@
 	   otherwise get link status from config. */
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
 		netif_carrier_off(dev);
-		virtnet_update_status(vi);
+		queue_work(system_nrt_wq, &vi->config_work);
 	} else {
 		vi->status = VIRTIO_NET_S_LINK_UP;
 		netif_carrier_on(dev);
@@ -1169,10 +1206,17 @@
 {
 	struct virtnet_info *vi = vdev->priv;
 
+	/* Prevent config work handler from accessing the device. */
+	mutex_lock(&vi->config_lock);
+	vi->config_enable = false;
+	mutex_unlock(&vi->config_lock);
+
 	unregister_netdev(vi->dev);
 
 	remove_vq_common(vi);
 
+	flush_work(&vi->config_work);
+
 	free_percpu(vi->stats);
 	free_netdev(vi->dev);
 }
@@ -1182,6 +1226,11 @@
 {
 	struct virtnet_info *vi = vdev->priv;
 
+	/* Prevent config work handler from accessing the device */
+	mutex_lock(&vi->config_lock);
+	vi->config_enable = false;
+	mutex_unlock(&vi->config_lock);
+
 	virtqueue_disable_cb(vi->rvq);
 	virtqueue_disable_cb(vi->svq);
 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
@@ -1195,6 +1244,8 @@
 
 	remove_vq_common(vi);
 
+	flush_work(&vi->config_work);
+
 	return 0;
 }
 
@@ -1215,6 +1266,10 @@
 	if (!try_fill_recv(vi, GFP_KERNEL))
 		queue_delayed_work(system_nrt_wq, &vi->refill, 0);
 
+	mutex_lock(&vi->config_lock);
+	vi->config_enable = true;
+	mutex_unlock(&vi->config_lock);
+
 	return 0;
 }
 #endif
@@ -1232,6 +1287,7 @@
 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
+	VIRTIO_NET_F_GUEST_ANNOUNCE,
 };
 
 static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c676de7..9eb6479 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2055,15 +2055,4 @@
 	.remove		= __devexit_p(dscc4_remove_one),
 };
 
-static int __init dscc4_init_module(void)
-{
-	return pci_register_driver(&dscc4_driver);
-}
-
-static void __exit dscc4_cleanup_module(void)
-{
-	pci_unregister_driver(&dscc4_driver);
-}
-
-module_init(dscc4_init_module);
-module_exit(dscc4_cleanup_module);
+module_pci_driver(dscc4_driver);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 76a8a4a..f5d533a 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1120,7 +1120,7 @@
 {
     lmc_softc_t *sc = dev_to_sc(dev);
 
-    lmc_trace(dev, "lmc_runnig_reset in");
+    lmc_trace(dev, "lmc_running_reset in");
 
     /* stop interrupts */
     /* Clear the interrupt mask */
@@ -1736,18 +1736,7 @@
 	.remove		= __devexit_p(lmc_remove_one),
 };
 
-static int __init init_lmc(void)
-{
-    return pci_register_driver(&lmc_driver);
-}
-
-static void __exit exit_lmc(void)
-{
-    pci_unregister_driver(&lmc_driver);
-}
-
-module_init(init_lmc);
-module_exit(exit_lmc);
+module_pci_driver(lmc_driver);
 
 unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
 {
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index 3f70338..672de18 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -32,8 +32,9 @@
 	  If unsure, it is safe to select M (module).
 
 config WIMAX_IWMC3200_SDIO
-	bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO"
+	bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
 	depends on WIMAX_I2400M_SDIO
+	depends on EXPERIMENTAL
 	select IWMC3200TOP
 	help
 	  Select if you have a device based on the Intel Multicom WiMAX
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index e325768..b78ee67 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -277,7 +277,7 @@
 		d_printf(1, dev, "RX: size changed to %d, received %d, "
 			 "copied %d, capacity %ld\n",
 			 rx_size, read_size, rx_skb->len,
-			 (long) (skb_end_pointer(new_skb) - new_skb->head));
+			 (long) skb_end_offset(new_skb));
 		goto retry;
 	}
 		/* In most cases, it happens due to the hardware scheduling a
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 29b1e03..713d033 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -695,7 +695,7 @@
 	d_fnstart(3, dev, "(iface %p)\n", iface);
 	rmb();		/* see i2400m->updown's documentation  */
 	if (i2400m->updown == 0) {
-		d_printf(1, dev, "fw was down, no resume neeed\n");
+		d_printf(1, dev, "fw was down, no resume needed\n");
 		goto out;
 	}
 	d_printf(1, dev, "fw was up, resuming\n");
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index abd3b71..5f58fa5 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -282,8 +282,7 @@
 source "drivers/net/wireless/p54/Kconfig"
 source "drivers/net/wireless/rt2x00/Kconfig"
 source "drivers/net/wireless/rtlwifi/Kconfig"
-source "drivers/net/wireless/wl1251/Kconfig"
-source "drivers/net/wireless/wl12xx/Kconfig"
+source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 source "drivers/net/wireless/mwifiex/Kconfig"
 
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 98db7619..0ce218b 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -51,9 +51,7 @@
 
 obj-$(CONFIG_MAC80211_HWSIM)	+= mac80211_hwsim.o
 
-obj-$(CONFIG_WL1251)	+= wl1251/
-obj-$(CONFIG_WL12XX)	+= wl12xx/
-obj-$(CONFIG_WL12XX_PLATFORM_DATA)	+= wl12xx/
+obj-$(CONFIG_WL_TI)	+= ti/
 
 obj-$(CONFIG_IWM)	+= iwmc3200wifi/
 
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f5ce562..0ac09a2 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1991,19 +1991,4 @@
 #endif /* CONFIG_PM */
 };
 
-
-
-static int __init adm8211_init(void)
-{
-	return pci_register_driver(&adm8211_driver);
-}
-
-
-static void __exit adm8211_exit(void)
-{
-	pci_unregister_driver(&adm8211_driver);
-}
-
-
-module_init(adm8211_init);
-module_exit(adm8211_exit);
+module_pci_driver(adm8211_driver);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 4045e5a..3df0146 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1122,12 +1122,12 @@
 static void at76_dump_mib_local(struct at76_priv *priv)
 {
 	int ret;
-	struct mib_local *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL);
+	struct mib_local *m = kmalloc(sizeof(*m), GFP_KERNEL);
 
 	if (!m)
 		return;
 
-	ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
+	ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(*m));
 	if (ret < 0) {
 		wiphy_err(priv->hw->wiphy,
 			  "at76_get_mib (LOCAL) failed: %d\n", ret);
@@ -1751,7 +1751,7 @@
 	 * following workaround is necessary. If the TX frame is an
 	 * authentication frame extract the bssid and send the CMD_JOIN. */
 	if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
-		if (compare_ether_addr(priv->bssid, mgmt->bssid)) {
+		if (!ether_addr_equal(priv->bssid, mgmt->bssid)) {
 			memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
 			ieee80211_queue_work(hw, &priv->work_join_bssid);
 			dev_kfree_skb_any(skb);
@@ -2512,10 +2512,8 @@
 
 	printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " unloading\n");
 	usb_deregister(&at76_driver);
-	for (i = 0; i < ARRAY_SIZE(firmwares); i++) {
-		if (firmwares[i].fw)
-			release_firmware(firmwares[i].fw);
-	}
+	for (i = 0; i < ARRAY_SIZE(firmwares); i++)
+		release_firmware(firmwares[i].fw);
 	led_trigger_unregister_simple(ledtrig_tx);
 }
 
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8c50d9d..aec33cc 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -220,6 +220,7 @@
 	}
 
 	ath5k_deinit_ah(ah);
+	iounmap(ah->iobase);
 	platform_set_drvdata(pdev, NULL);
 	ieee80211_free_hw(hw);
 
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 35e9370..5c00875 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -14,6 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
@@ -728,33 +730,25 @@
 ath5k_ani_print_counters(struct ath5k_hw *ah)
 {
 	/* clears too */
-	printk(KERN_NOTICE "ACK fail\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
-	printk(KERN_NOTICE "RTS fail\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
-	printk(KERN_NOTICE "RTS success\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_RTS_OK));
-	printk(KERN_NOTICE "FCS error\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
+	pr_notice("ACK fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
+	pr_notice("RTS fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
+	pr_notice("RTS success\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_OK));
+	pr_notice("FCS error\t%d\n", ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
 
 	/* no clear */
-	printk(KERN_NOTICE "tx\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
-	printk(KERN_NOTICE "rx\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
-	printk(KERN_NOTICE "busy\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
-	printk(KERN_NOTICE "cycles\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
+	pr_notice("tx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
+	pr_notice("rx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
+	pr_notice("busy\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
+	pr_notice("cycles\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
 
-	printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
-	printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
-	printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
-	printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
-		ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
+	pr_notice("AR5K_PHYERR_CNT1\t%d\n",
+		  ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
+	pr_notice("AR5K_PHYERR_CNT2\t%d\n",
+		  ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
+	pr_notice("AR5K_OFDM_FIL_CNT\t%d\n",
+		  ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
+	pr_notice("AR5K_CCK_FIL_CNT\t%d\n",
+		  ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
 }
 
 #endif
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 8d434b8f..64a453a 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -76,26 +76,29 @@
   GENERIC DRIVER DEFINITIONS
 \****************************/
 
-#define ATH5K_PRINTF(fmt, ...) \
-	printk(KERN_WARNING "%s: " fmt, __func__, ##__VA_ARGS__)
+#define ATH5K_PRINTF(fmt, ...)						\
+	pr_warn("%s: " fmt, __func__, ##__VA_ARGS__)
 
-#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \
-	printk(_level "ath5k %s: " _fmt, \
-		((_sc) && (_sc)->hw) ? wiphy_name((_sc)->hw->wiphy) : "", \
-		##__VA_ARGS__)
+void __printf(3, 4)
+_ath5k_printk(const struct ath5k_hw *ah, const char *level,
+	      const char *fmt, ...);
 
-#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) do { \
-	if (net_ratelimit()) \
-		ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \
-	} while (0)
+#define ATH5K_PRINTK(_sc, _level, _fmt, ...)				\
+	_ath5k_printk(_sc, _level, _fmt, ##__VA_ARGS__)
 
-#define ATH5K_INFO(_sc, _fmt, ...) \
+#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...)			\
+do {									\
+	if (net_ratelimit())						\
+		ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); 	\
+} while (0)
+
+#define ATH5K_INFO(_sc, _fmt, ...)					\
 	ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__)
 
-#define ATH5K_WARN(_sc, _fmt, ...) \
+#define ATH5K_WARN(_sc, _fmt, ...)					\
 	ATH5K_PRINTK_LIMIT(_sc, KERN_WARNING, _fmt, ##__VA_ARGS__)
 
-#define ATH5K_ERR(_sc, _fmt, ...) \
+#define ATH5K_ERR(_sc, _fmt, ...)					\
 	ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__)
 
 /*
@@ -1524,7 +1527,7 @@
 
 /* Protocol Control Unit Functions */
 /* Helpers */
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
 		int len, struct ieee80211_rate *rate, bool shortpre);
 unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
 unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index d7114c7..7106547 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -20,6 +20,8 @@
 * Attach/Detach Functions and helpers *
 \*************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include "ath5k.h"
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0e643b0..0ba81a6 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -40,6 +40,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -460,7 +462,7 @@
 	}
 
 	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
-		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
+		if (ether_addr_equal(iter_data->hw_macaddr, mac))
 			iter_data->need_set_hw_addr = false;
 
 	if (!iter_data->any_assoc) {
@@ -1168,7 +1170,7 @@
 
 	if (ieee80211_is_beacon(mgmt->frame_control) &&
 	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
-	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
+	    ether_addr_equal(mgmt->bssid, common->curbssid)) {
 		/*
 		 * Received an IBSS beacon with the same BSSID. Hardware *must*
 		 * have updated the local TSF. We have to work around various
@@ -1232,7 +1234,7 @@
 
 	/* only beacons from our BSSID */
 	if (!ieee80211_is_beacon(mgmt->frame_control) ||
-	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
+	    !ether_addr_equal(mgmt->bssid, common->curbssid))
 		return;
 
 	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
@@ -3038,3 +3040,23 @@
 	ath5k_hw_set_rx_filter(ah, rfilt);
 	ah->filter_flags = rfilt;
 }
+
+void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
+		   const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	if (ah && ah->hw)
+		printk("%s" pr_fmt("%s: %pV"),
+		       level, wiphy_name(ah->hw->wiphy), &vaf);
+	else
+		printk("%s" pr_fmt("%pV"), level, &vaf);
+
+	va_end(args);
+}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index e5e8f45..9d00dab 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -57,6 +57,9 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGES.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/export.h>
 #include <linux/moduleparam.h>
 
@@ -247,10 +250,10 @@
 
 	if (strncmp(buf, "disable", 7) == 0) {
 		AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
-		printk(KERN_INFO "debugfs disable beacons\n");
+		pr_info("debugfs disable beacons\n");
 	} else if (strncmp(buf, "enable", 6) == 0) {
 		AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
-		printk(KERN_INFO "debugfs enable beacons\n");
+		pr_info("debugfs enable beacons\n");
 	}
 	return count;
 }
@@ -450,19 +453,19 @@
 
 	if (strncmp(buf, "diversity", 9) == 0) {
 		ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
-		printk(KERN_INFO "ath5k debug: enable diversity\n");
+		pr_info("debug: enable diversity\n");
 	} else if (strncmp(buf, "fixed-a", 7) == 0) {
 		ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
-		printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
+		pr_info("debug: fixed antenna A\n");
 	} else if (strncmp(buf, "fixed-b", 7) == 0) {
 		ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
-		printk(KERN_INFO "ath5k debug: fixed antenna B\n");
+		pr_info("debug: fixed antenna B\n");
 	} else if (strncmp(buf, "clear", 5) == 0) {
 		for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
 			ah->stats.antenna_rx[i] = 0;
 			ah->stats.antenna_tx[i] = 0;
 		}
-		printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
+		pr_info("debug: cleared antenna stats\n");
 	}
 	return count;
 }
@@ -632,7 +635,7 @@
 		st->txerr_fifo = 0;
 		st->txerr_filt = 0;
 		st->tx_all_count = 0;
-		printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
+		pr_info("debug: cleared frameerrors stats\n");
 	}
 	return count;
 }
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index f8bfa3a..bd8d439 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -21,6 +21,8 @@
  Hardware Descriptor Functions
 \******************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
@@ -441,10 +443,8 @@
 				struct ath5k_desc *desc,
 				struct ath5k_tx_status *ts)
 {
-	struct ath5k_hw_2w_tx_ctl *tx_ctl;
 	struct ath5k_hw_tx_status *tx_status;
 
-	tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
 	tx_status = &desc->ud.ds_tx5210.tx_stat;
 
 	/* No frame has been send or error */
@@ -495,11 +495,9 @@
 				struct ath5k_desc *desc,
 				struct ath5k_tx_status *ts)
 {
-	struct ath5k_hw_4w_tx_ctl *tx_ctl;
 	struct ath5k_hw_tx_status *tx_status;
 	u32 txstat0, txstat1;
 
-	tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
 	tx_status = &desc->ud.ds_tx5212.tx_stat;
 
 	txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 5cc9aa8..ce86f15 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -29,6 +29,8 @@
  * status registers (ISR).
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index cd708c1..4026c90 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -21,6 +21,8 @@
 * EEPROM access functions and helpers *
 \*************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
 
 #include "ath5k.h"
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index a1ea78e..ee1c2fa 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -19,6 +19,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
@@ -1574,8 +1576,7 @@
 
 		/* AR5K_MODE_11B */
 		if (mode > 2) {
-			ATH5K_ERR(ah,
-				"unsupported channel mode: %d\n", mode);
+			ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode);
 			return -EINVAL;
 		}
 
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index c1151c7..b9f708a 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -39,6 +39,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include "ath5k.h"
 
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 5c53299..22b80af 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -41,6 +41,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <net/mac80211.h>
 #include <asm/unaligned.h>
 
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 849fa06..dff48fb 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -14,6 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/nl80211.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
@@ -45,6 +47,7 @@
 	{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
 	{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
 	{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
+	{ PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
@@ -337,28 +340,4 @@
 	.driver.pm	= ATH5K_PM_OPS,
 };
 
-/*
- * Module init/exit functions
- */
-static int __init
-init_ath5k_pci(void)
-{
-	int ret;
-
-	ret = pci_register_driver(&ath5k_pci_driver);
-	if (ret) {
-		printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-static void __exit
-exit_ath5k_pci(void)
-{
-	pci_unregister_driver(&ath5k_pci_driver);
-}
-
-module_init(init_ath5k_pci);
-module_exit(exit_ath5k_pci);
+module_pci_driver(ath5k_pci_driver);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index cebfd6f..1f16b42 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -110,7 +110,7 @@
  * bwmodes.
  */
 int
-ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
 		int len, struct ieee80211_rate *rate, bool shortpre)
 {
 	int sifs, preamble, plcp_bits, sym_time;
@@ -120,7 +120,7 @@
 	/* Fallback */
 	if (!ah->ah_bwmode) {
 		__le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
-					NULL, len, rate);
+					NULL, band, len, rate);
 
 		/* subtract difference between long and short preamble */
 		dur = le16_to_cpu(raw_dur);
@@ -302,14 +302,15 @@
 		 * actual rate for this rate. See mac80211 tx.c
 		 * ieee80211_duration() for a brief description of
 		 * what rate we should choose to TX ACKs. */
-		tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
+		tx_time = ath5k_hw_get_frame_duration(ah, band, 10,
+					rate, false);
 
 		ath5k_hw_reg_write(ah, tx_time, reg);
 
 		if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
 			continue;
 
-		tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, true);
+		tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, true);
 		ath5k_hw_reg_write(ah, tx_time,
 			reg + (AR5K_SET_SHORT_PREAMBLE << 2));
 	}
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 3a28454..8b71a2d 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -22,6 +22,8 @@
 * PHY related functions *
 \***********************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 30b50f9..65fe929 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -20,6 +20,8 @@
 Queue Control Unit, DCF Control Unit Functions
 \********************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
@@ -563,6 +565,7 @@
 int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
 {
 	struct ieee80211_channel *channel = ah->ah_current_channel;
+	enum ieee80211_band band;
 	struct ieee80211_rate *rate;
 	u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
 	u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
@@ -598,11 +601,12 @@
 	 * Also we have different lowest rate for 802.11a
 	 */
 	if (channel->band == IEEE80211_BAND_5GHZ)
-		rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
+		band = IEEE80211_BAND_5GHZ;
 	else
-		rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
+		band = IEEE80211_BAND_2GHZ;
 
-	ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
+	rate = &ah->sbands[band].bitrates[0];
+	ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
 
 	/* ack_tx_time includes an SIFS already */
 	eifs = ack_tx_time + sifs + 2 * slot_time;
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 200f165..0c2dd47 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -23,6 +23,8 @@
   Reset function and helpers
 \****************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <asm/unaligned.h>
 
 #include <linux/pci.h>		/* To determine if a card is pci-e */
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 9364da7..04cf0ca 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/pci.h>
 
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 85746c3e..8cae888 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -25,7 +25,8 @@
 obj-$(CONFIG_ATH6KL) += ath6kl_core.o
 ath6kl_core-y += debug.o
 ath6kl_core-y += hif.o
-ath6kl_core-y += htc.o
+ath6kl_core-y += htc_mbox.o
+ath6kl_core-y += htc_pipe.o
 ath6kl_core-y += bmi.o
 ath6kl_core-y += cfg80211.o
 ath6kl_core-y += init.o
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 00d3895..28a65d3 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -15,6 +15,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/moduleparam.h>
 #include <linux/inetdevice.h>
 #include <linux/export.h>
@@ -49,6 +51,8 @@
 	.max_power      = 30,                       \
 }
 
+#define DEFAULT_BG_SCAN_PERIOD 60
+
 static struct ieee80211_rate ath6kl_rates[] = {
 	RATETAB_ENT(10, 0x1, 0),
 	RATETAB_ENT(20, 0x2, 0),
@@ -69,7 +73,8 @@
 #define ath6kl_g_rates     (ath6kl_rates + 0)
 #define ath6kl_g_rates_size    12
 
-#define ath6kl_g_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
+#define ath6kl_g_htcap IEEE80211_HT_CAP_SGI_20
+#define ath6kl_a_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
 			IEEE80211_HT_CAP_SGI_20		 | \
 			IEEE80211_HT_CAP_SGI_40)
 
@@ -126,7 +131,7 @@
 	.channels = ath6kl_5ghz_a_channels,
 	.n_bitrates = ath6kl_a_rates_size,
 	.bitrates = ath6kl_a_rates,
-	.ht_cap.cap = ath6kl_g_htcap,
+	.ht_cap.cap = ath6kl_a_htcap,
 	.ht_cap.ht_supported = true,
 };
 
@@ -607,6 +612,17 @@
 					vif->req_bssid, vif->ch_hint,
 					ar->connect_ctrl_flags, nw_subtype);
 
+	/* disable background scan if period is 0 */
+	if (sme->bg_scan_period == 0)
+		sme->bg_scan_period = 0xffff;
+
+	/* configure default value if not specified */
+	if (sme->bg_scan_period == -1)
+		sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
+
+	ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
+				  sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
+
 	up(&ar->sem);
 
 	if (status == -EINVAL) {
@@ -941,6 +957,8 @@
 	if (test_bit(CONNECTED, &vif->flags))
 		force_fg_scan = 1;
 
+	vif->scan_req = request;
+
 	if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
 		     ar->fw_capabilities)) {
 		/*
@@ -963,10 +981,10 @@
 						ATH6KL_FG_SCAN_INTERVAL,
 						n_channels, channels);
 	}
-	if (ret)
+	if (ret) {
 		ath6kl_err("wmi_startscan_cmd failed\n");
-	else
-		vif->scan_req = request;
+		vif->scan_req = NULL;
+	}
 
 	kfree(channels);
 
@@ -1436,9 +1454,38 @@
 					struct vif_params *params)
 {
 	struct ath6kl_vif *vif = netdev_priv(ndev);
+	int i;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
 
+	/*
+	 * Don't bring up p2p on an interface which is not initialized
+	 * for p2p operation where fw does not have capability to switch
+	 * dynamically between non-p2p and p2p type interface.
+	 */
+	if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+		      vif->ar->fw_capabilities) &&
+	    (type == NL80211_IFTYPE_P2P_CLIENT ||
+	     type == NL80211_IFTYPE_P2P_GO)) {
+		if (vif->ar->vif_max == 1) {
+			if (vif->fw_vif_idx != 0)
+				return -EINVAL;
+			else
+				goto set_iface_type;
+		}
+
+		for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) {
+			if (i == vif->fw_vif_idx)
+				break;
+		}
+
+		if (i == vif->ar->vif_max) {
+			ath6kl_err("Invalid interface to bring up P2P\n");
+			return -EINVAL;
+		}
+	}
+
+set_iface_type:
 	switch (type) {
 	case NL80211_IFTYPE_STATION:
 		vif->next_mode = INFRA_NETWORK;
@@ -1924,12 +1971,61 @@
 	return 0;
 }
 
+static int is_hsleep_mode_procsed(struct ath6kl_vif *vif)
+{
+	return test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
+}
+
+static bool is_ctrl_ep_empty(struct ath6kl *ar)
+{
+	return !ar->tx_pending[ar->ctrl_ep];
+}
+
+static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif)
+{
+	int ret, left;
+
+	clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
+
+	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+						 ATH6KL_HOST_MODE_ASLEEP);
+	if (ret)
+		return ret;
+
+	left = wait_event_interruptible_timeout(ar->event_wq,
+						is_hsleep_mode_procsed(vif),
+						WMI_TIMEOUT);
+	if (left == 0) {
+		ath6kl_warn("timeout, didn't get host sleep cmd processed event\n");
+		ret = -ETIMEDOUT;
+	} else if (left < 0) {
+		ath6kl_warn("error while waiting for host sleep cmd processed event %d\n",
+			    left);
+		ret = left;
+	}
+
+	if (ar->tx_pending[ar->ctrl_ep]) {
+		left = wait_event_interruptible_timeout(ar->event_wq,
+							is_ctrl_ep_empty(ar),
+							WMI_TIMEOUT);
+		if (left == 0) {
+			ath6kl_warn("clear wmi ctrl data timeout\n");
+			ret = -ETIMEDOUT;
+		} else if (left < 0) {
+			ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
+			ret = left;
+		}
+	}
+
+	return ret;
+}
+
 static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 {
 	struct in_device *in_dev;
 	struct in_ifaddr *ifa;
 	struct ath6kl_vif *vif;
-	int ret, left;
+	int ret;
 	u32 filter = 0;
 	u16 i, bmiss_time;
 	u8 index = 0;
@@ -2030,39 +2126,11 @@
 	if (ret)
 		return ret;
 
-	clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
-
-	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
-						 ATH6KL_HOST_MODE_ASLEEP);
+	ret = ath6kl_cfg80211_host_sleep(ar, vif);
 	if (ret)
 		return ret;
 
-	left = wait_event_interruptible_timeout(ar->event_wq,
-			test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags),
-			WMI_TIMEOUT);
-	if (left == 0) {
-		ath6kl_warn("timeout, didn't get host sleep cmd "
-			    "processed event\n");
-		ret = -ETIMEDOUT;
-	} else if (left < 0) {
-		ath6kl_warn("error while waiting for host sleep cmd "
-			    "processed event %d\n", left);
-		ret = left;
-	}
-
-	if (ar->tx_pending[ar->ctrl_ep]) {
-		left = wait_event_interruptible_timeout(ar->event_wq,
-				ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
-		if (left == 0) {
-			ath6kl_warn("clear wmi ctrl data timeout\n");
-			ret = -ETIMEDOUT;
-		} else if (left < 0) {
-			ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
-			ret = left;
-		}
-	}
-
-	return ret;
+	return 0;
 }
 
 static int ath6kl_wow_resume(struct ath6kl *ar)
@@ -2109,10 +2177,82 @@
 	return 0;
 }
 
+static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
+{
+	struct ath6kl_vif *vif;
+	int ret;
+
+	vif = ath6kl_vif_first(ar);
+	if (!vif)
+		return -EIO;
+
+	if (!ath6kl_cfg80211_ready(vif))
+		return -EIO;
+
+	ath6kl_cfg80211_stop_all(ar);
+
+	/* Save the current power mode before enabling power save */
+	ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+	ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
+	if (ret)
+		return ret;
+
+	/* Disable WOW mode */
+	ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+					  ATH6KL_WOW_MODE_DISABLE,
+					  0, 0);
+	if (ret)
+		return ret;
+
+	/* Flush all non control pkts in TX path */
+	ath6kl_tx_data_cleanup(ar);
+
+	ret = ath6kl_cfg80211_host_sleep(ar, vif);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar)
+{
+	struct ath6kl_vif *vif;
+	int ret;
+
+	vif = ath6kl_vif_first(ar);
+
+	if (!vif)
+		return -EIO;
+
+	if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
+		ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
+					       ar->wmi->saved_pwr_mode);
+		if (ret)
+			return ret;
+	}
+
+	ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+						 ATH6KL_HOST_MODE_AWAKE);
+	if (ret)
+		return ret;
+
+	ar->state = ATH6KL_STATE_ON;
+
+	/* Reset scan parameter to default values */
+	ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+					0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 int ath6kl_cfg80211_suspend(struct ath6kl *ar,
 			    enum ath6kl_cfg_suspend_mode mode,
 			    struct cfg80211_wowlan *wow)
 {
+	struct ath6kl_vif *vif;
 	enum ath6kl_state prev_state;
 	int ret;
 
@@ -2137,15 +2277,12 @@
 
 	case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
 
-		ath6kl_cfg80211_stop_all(ar);
+		ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep suspend\n");
 
-		/* save the current power mode before enabling power save */
-		ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
-
-		ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
+		ret = ath6kl_cfg80211_deepsleep_suspend(ar);
 		if (ret) {
-			ath6kl_warn("wmi powermode command failed during suspend: %d\n",
-				    ret);
+			ath6kl_err("deepsleep suspend failed: %d\n", ret);
+			return ret;
 		}
 
 		ar->state = ATH6KL_STATE_DEEPSLEEP;
@@ -2185,6 +2322,9 @@
 		break;
 	}
 
+	list_for_each_entry(vif, &ar->vif_list, list)
+		ath6kl_cfg80211_scan_complete_event(vif, true);
+
 	return 0;
 }
 EXPORT_SYMBOL(ath6kl_cfg80211_suspend);
@@ -2206,17 +2346,13 @@
 		break;
 
 	case ATH6KL_STATE_DEEPSLEEP:
-		if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
-			ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
-						       ar->wmi->saved_pwr_mode);
-			if (ret) {
-				ath6kl_warn("wmi powermode command failed during resume: %d\n",
-					    ret);
-			}
+		ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep resume\n");
+
+		ret = ath6kl_cfg80211_deepsleep_resume(ar);
+		if (ret) {
+			ath6kl_warn("deep sleep resume failed: %d\n", ret);
+			return ret;
 		}
-
-		ar->state = ATH6KL_STATE_ON;
-
 		break;
 
 	case ATH6KL_STATE_CUTPOWER:
@@ -2290,31 +2426,25 @@
 }
 #endif
 
-static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
-			      struct ieee80211_channel *chan,
-			      enum nl80211_channel_type channel_type)
+static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
+			    bool ht_enable)
 {
-	struct ath6kl_vif *vif;
+	struct ath6kl_htcap *htcap = &vif->htcap;
 
-	/*
-	 * 'dev' could be NULL if a channel change is required for the hardware
-	 * device itself, instead of a particular VIF.
-	 *
-	 * FIXME: To be handled properly when monitor mode is supported.
-	 */
-	if (!dev)
-		return -EBUSY;
+	if (htcap->ht_enable == ht_enable)
+		return 0;
 
-	vif = netdev_priv(dev);
+	if (ht_enable) {
+		/* Set default ht capabilities */
+		htcap->ht_enable = true;
+		htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
+				   ath6kl_g_htcap : ath6kl_a_htcap;
+		htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
+	} else /* Disable ht */
+		memset(htcap, 0, sizeof(*htcap));
 
-	if (!ath6kl_cfg80211_ready(vif))
-		return -EIO;
-
-	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
-		   __func__, chan->center_freq, chan->hw_value);
-	vif->next_chan = chan->center_freq;
-
-	return 0;
+	return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx,
+					band, htcap);
 }
 
 static bool ath6kl_is_p2p_ie(const u8 *pos)
@@ -2391,6 +2521,81 @@
 	return 0;
 }
 
+static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
+			      struct ieee80211_channel *chan,
+			      enum nl80211_channel_type channel_type)
+{
+	struct ath6kl_vif *vif;
+
+	/*
+	 * 'dev' could be NULL if a channel change is required for the hardware
+	 * device itself, instead of a particular VIF.
+	 *
+	 * FIXME: To be handled properly when monitor mode is supported.
+	 */
+	if (!dev)
+		return -EBUSY;
+
+	vif = netdev_priv(dev);
+
+	if (!ath6kl_cfg80211_ready(vif))
+		return -EIO;
+
+	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
+		   __func__, chan->center_freq, chan->hw_value);
+	vif->next_chan = chan->center_freq;
+	vif->next_ch_type = channel_type;
+	vif->next_ch_band = chan->band;
+
+	return 0;
+}
+
+static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
+				u8 *rsn_capab)
+{
+	const u8 *rsn_ie;
+	size_t rsn_ie_len;
+	u16 cnt;
+
+	if (!beacon->tail)
+		return -EINVAL;
+
+	rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, beacon->tail, beacon->tail_len);
+	if (!rsn_ie)
+		return -EINVAL;
+
+	rsn_ie_len = *(rsn_ie + 1);
+	/* skip element id and length */
+	rsn_ie += 2;
+
+	/* skip version, group cipher */
+	if (rsn_ie_len < 6)
+		return -EINVAL;
+	rsn_ie +=  6;
+	rsn_ie_len -= 6;
+
+	/* skip pairwise cipher suite */
+	if (rsn_ie_len < 2)
+		return -EINVAL;
+	cnt = *((u16 *) rsn_ie);
+	rsn_ie += (2 + cnt * 4);
+	rsn_ie_len -= (2 + cnt * 4);
+
+	/* skip akm suite */
+	if (rsn_ie_len < 2)
+		return -EINVAL;
+	cnt = *((u16 *) rsn_ie);
+	rsn_ie += (2 + cnt * 4);
+	rsn_ie_len -= (2 + cnt * 4);
+
+	if (rsn_ie_len < 2)
+		return -EINVAL;
+
+	memcpy(rsn_capab, rsn_ie, 2);
+
+	return 0;
+}
+
 static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
 			   struct cfg80211_ap_settings *info)
 {
@@ -2403,6 +2608,7 @@
 	struct wmi_connect_cmd p;
 	int res;
 	int i, ret;
+	u16 rsn_capab = 0;
 
 	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
 
@@ -2532,6 +2738,34 @@
 		p.nw_subtype = SUBTYPE_NONE;
 	}
 
+	if (info->inactivity_timeout) {
+		res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
+						  info->inactivity_timeout);
+		if (res < 0)
+			return res;
+	}
+
+	if (ath6kl_set_htcap(vif, vif->next_ch_band,
+			     vif->next_ch_type != NL80211_CHAN_NO_HT))
+		return -EIO;
+
+	/*
+	 * Get the PTKSA replay counter in the RSN IE. Supplicant
+	 * will use the RSN IE in M3 message and firmware has to
+	 * advertise the same in beacon/probe response. Send
+	 * the complete RSN IE capability field to firmware
+	 */
+	if (!ath6kl_get_rsn_capab(&info->beacon, (u8 *) &rsn_capab) &&
+	    test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+		     ar->fw_capabilities)) {
+		res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
+					    WLAN_EID_RSN, WMI_RSN_IE_CAPB,
+					    (const u8 *) &rsn_capab,
+					    sizeof(rsn_capab));
+		if (res < 0)
+			return res;
+	}
+
 	res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
 	if (res < 0)
 		return res;
@@ -2566,6 +2800,13 @@
 	ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
 	clear_bit(CONNECTED, &vif->flags);
 
+	/* Restore ht setting in firmware */
+	if (ath6kl_set_htcap(vif, IEEE80211_BAND_2GHZ, true))
+		return -EIO;
+
+	if (ath6kl_set_htcap(vif, IEEE80211_BAND_5GHZ, true))
+		return -EIO;
+
 	return 0;
 }
 
@@ -2747,6 +2988,21 @@
 	return false;
 }
 
+/* Check if SSID length is greater than DIRECT- */
+static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
+{
+	const struct ieee80211_mgmt *mgmt;
+	mgmt = (const struct ieee80211_mgmt *) buf;
+
+	/* variable[1] contains the SSID tag length */
+	if (buf + len >= &mgmt->u.probe_resp.variable[1] &&
+	    (mgmt->u.probe_resp.variable[1] > P2P_WILDCARD_SSID_LEN)) {
+		return true;
+	}
+
+	return false;
+}
+
 static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
 			  struct ieee80211_channel *chan, bool offchan,
 			  enum nl80211_channel_type channel_type,
@@ -2761,11 +3017,11 @@
 	bool more_data, queued;
 
 	mgmt = (const struct ieee80211_mgmt *) buf;
-	if (buf + len >= mgmt->u.probe_resp.variable &&
-	    vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
-	    ieee80211_is_probe_resp(mgmt->frame_control)) {
+	if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
+	    ieee80211_is_probe_resp(mgmt->frame_control) &&
+	    ath6kl_is_p2p_go_ssid(buf, len)) {
 		/*
-		 * Send Probe Response frame in AP mode using a separate WMI
+		 * Send Probe Response frame in GO mode using a separate WMI
 		 * command to allow the target to fill in the generic IEs.
 		 */
 		*cookie = 0; /* TX status not supported */
@@ -2833,6 +3089,8 @@
 	if (vif->sme_state != SME_DISCONNECTED)
 		return -EBUSY;
 
+	ath6kl_cfg80211_scan_complete_event(vif, true);
+
 	for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
 		ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
 					  i, DISABLE_SSID_FLAG,
@@ -3094,6 +3352,7 @@
 	vif->next_mode = nw_type;
 	vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
 	vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
+	vif->htcap.ht_enable = true;
 
 	memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
 	if (fw_vif_idx != 0)
@@ -3181,6 +3440,10 @@
 	if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
 		ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
 
+	if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
+		     ar->fw_capabilities))
+		ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER;
+
 	ar->wiphy->probe_resp_offload =
 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index a60e78c..98a8861 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,7 +22,8 @@
 
 #define ATH6KL_MAX_IE			256
 
-extern int ath6kl_printk(const char *level, const char *fmt, ...);
+extern __printf(2, 3)
+int ath6kl_printk(const char *level, const char *fmt, ...);
 
 /*
  * Reflects the version of binary interface exposed by ATH6KL target
@@ -77,6 +78,7 @@
 
 struct htc_endpoint_credit_dist;
 struct ath6kl;
+struct ath6kl_htcap;
 enum htc_credit_dist_reason;
 struct ath6kl_htc_credit_info;
 
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 45e641f..fdb3b1d 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -20,9 +20,11 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 #include "debug.h"
 #include "hif-ops.h"
+#include "htc-ops.h"
 #include "cfg80211.h"
 
 unsigned int debug_mask;
@@ -39,12 +41,36 @@
 module_param(ath6kl_p2p, uint, 0644);
 module_param(testmode, uint, 0644);
 
-int ath6kl_core_init(struct ath6kl *ar)
+void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
+{
+	ath6kl_htc_tx_complete(ar, skb);
+}
+EXPORT_SYMBOL(ath6kl_core_tx_complete);
+
+void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe)
+{
+	ath6kl_htc_rx_complete(ar, skb, pipe);
+}
+EXPORT_SYMBOL(ath6kl_core_rx_complete);
+
+int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
 {
 	struct ath6kl_bmi_target_info targ_info;
 	struct net_device *ndev;
 	int ret = 0, i;
 
+	switch (htc_type) {
+	case ATH6KL_HTC_TYPE_MBOX:
+		ath6kl_htc_mbox_attach(ar);
+		break;
+	case ATH6KL_HTC_TYPE_PIPE:
+		ath6kl_htc_pipe_attach(ar);
+		break;
+	default:
+		WARN_ON(1);
+		return -ENOMEM;
+	}
+
 	ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
 	if (!ar->ath6kl_wq)
 		return -ENOMEM;
@@ -280,7 +306,7 @@
 
 	kfree(ar->fw_board);
 	kfree(ar->fw_otp);
-	kfree(ar->fw);
+	vfree(ar->fw);
 	kfree(ar->fw_patch);
 	kfree(ar->fw_testscript);
 
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index f1dd890..9d67964 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -91,6 +91,15 @@
 	 */
 	ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
 
+	/*
+	 * Firmware has support to cleanup inactive stations
+	 * in AP mode.
+	 */
+	ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
+
+	/* Firmware has support to override rsn cap of rsn ie */
+	ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+
 	/* this needs to be last */
 	ATH6KL_FW_CAPABILITY_MAX,
 };
@@ -205,6 +214,8 @@
 #define ATH6KL_CONF_ENABLE_TX_BURST		BIT(3)
 #define ATH6KL_CONF_UART_DEBUG			BIT(4)
 
+#define P2P_WILDCARD_SSID_LEN			7 /* DIRECT- */
+
 enum wlan_low_pwr_state {
 	WLAN_POWER_STATE_ON,
 	WLAN_POWER_STATE_CUT_PWR,
@@ -454,6 +465,11 @@
 	ATH6KL_HIF_TYPE_USB,
 };
 
+enum ath6kl_htc_type {
+	ATH6KL_HTC_TYPE_MBOX,
+	ATH6KL_HTC_TYPE_PIPE,
+};
+
 /* Max number of filters that hw supports */
 #define ATH6K_MAX_MC_FILTERS_PER_LIST 7
 struct ath6kl_mc_filter {
@@ -461,6 +477,12 @@
 	char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
 };
 
+struct ath6kl_htcap {
+	bool ht_enable;
+	u8 ampdu_factor;
+	unsigned short cap_info;
+};
+
 /*
  * Driver's maximum limit, note that some firmwares support only one vif
  * and the runtime (current) limit must be checked from ar->vif_max.
@@ -509,6 +531,7 @@
 	struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
 	struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
 	struct aggr_info *aggr_cntxt;
+	struct ath6kl_htcap htcap;
 
 	struct timer_list disconnect_timer;
 	struct timer_list sched_scan_timer;
@@ -521,6 +544,8 @@
 	u32 send_action_id;
 	bool probe_req_report;
 	u16 next_chan;
+	enum nl80211_channel_type next_ch_type;
+	enum ieee80211_band next_ch_band;
 	u16 assoc_bss_beacon_int;
 	u16 listen_intvl_t;
 	u16 bmiss_time_t;
@@ -568,6 +593,7 @@
 
 	struct ath6kl_bmi bmi;
 	const struct ath6kl_hif_ops *hif_ops;
+	const struct ath6kl_htc_ops *htc_ops;
 	struct wmi *wmi;
 	int tx_pending[ENDPOINT_MAX];
 	int total_tx_data_pend;
@@ -746,7 +772,8 @@
 void ath6kl_cookie_init(struct ath6kl *ar);
 void ath6kl_cookie_cleanup(struct ath6kl *ar);
 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
-void ath6kl_tx_complete(void *context, struct list_head *packet_queue);
+void ath6kl_tx_complete(struct htc_target *context,
+			struct list_head *packet_queue);
 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
 					       struct htc_packet *packet);
 void ath6kl_stop_txrx(struct ath6kl *ar);
@@ -821,8 +848,11 @@
 
 void ath6kl_check_wow_status(struct ath6kl *ar);
 
+void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb);
+void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
+
 struct ath6kl *ath6kl_core_create(struct device *dev);
-int ath6kl_core_init(struct ath6kl *ar);
+int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
 void ath6kl_core_cleanup(struct ath6kl *ar);
 void ath6kl_core_destroy(struct ath6kl *ar);
 
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index d01403a..1b76aff 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -616,6 +616,12 @@
 			 "Num disconnects", tgt_stats->cs_discon_cnt);
 	len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
 			 "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi);
+	len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+			 "ARP pkt received", tgt_stats->arp_received);
+	len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+			 "ARP pkt matched", tgt_stats->arp_matched);
+	len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+			 "ARP pkt replied", tgt_stats->arp_replied);
 
 	if (len > buf_len)
 		len = buf_len;
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 1803a0b..49639d8 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -43,6 +43,7 @@
 	ATH6KL_DBG_WMI_DUMP	= BIT(19),
 	ATH6KL_DBG_SUSPEND	= BIT(20),
 	ATH6KL_DBG_USB		= BIT(21),
+	ATH6KL_DBG_USB_BULK	= BIT(22),
 	ATH6KL_DBG_ANY	        = 0xffffffff  /* enable all logs */
 };
 
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index fd84086..8c9e72d 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -150,4 +150,38 @@
 	ar->hif_ops->stop(ar);
 }
 
+static inline int ath6kl_hif_pipe_send(struct ath6kl *ar,
+				       u8 pipe, struct sk_buff *hdr_buf,
+				       struct sk_buff *buf)
+{
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe send\n");
+
+	return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf);
+}
+
+static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar,
+					       u8 *ul_pipe, u8 *dl_pipe)
+{
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
+
+	ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe);
+}
+
+static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar,
+					      u16 service_id, u8 *ul_pipe,
+					      u8 *dl_pipe)
+{
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
+
+	return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe);
+}
+
+static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar,
+							u8 pipe)
+{
+	ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get free queue number\n");
+
+	return ar->hif_ops->pipe_get_free_queue_number(ar, pipe);
+}
+
 #endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 20ed6b7..61f6b21 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -256,6 +256,12 @@
 	int (*power_on)(struct ath6kl *ar);
 	int (*power_off)(struct ath6kl *ar);
 	void (*stop)(struct ath6kl *ar);
+	int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf,
+			 struct sk_buff *buf);
+	void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl);
+	int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul,
+				u8 *pipe_dl);
+	u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe);
 };
 
 int ath6kl_hif_setup(struct ath6kl_device *dev);
diff --git a/drivers/net/wireless/ath/ath6kl/htc-ops.h b/drivers/net/wireless/ath/ath6kl/htc-ops.h
new file mode 100644
index 0000000..2d4eed5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc-ops.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_OPS_H
+#define HTC_OPS_H
+
+#include "htc.h"
+#include "debug.h"
+
+static inline void *ath6kl_htc_create(struct ath6kl *ar)
+{
+	return ar->htc_ops->create(ar);
+}
+
+static inline int ath6kl_htc_wait_target(struct htc_target *target)
+{
+	return target->dev->ar->htc_ops->wait_target(target);
+}
+
+static inline int ath6kl_htc_start(struct htc_target *target)
+{
+	return target->dev->ar->htc_ops->start(target);
+}
+
+static inline int ath6kl_htc_conn_service(struct htc_target *target,
+					  struct htc_service_connect_req *req,
+					  struct htc_service_connect_resp *resp)
+{
+	return target->dev->ar->htc_ops->conn_service(target, req, resp);
+}
+
+static inline int ath6kl_htc_tx(struct htc_target *target,
+				struct htc_packet *packet)
+{
+	return target->dev->ar->htc_ops->tx(target, packet);
+}
+
+static inline void ath6kl_htc_stop(struct htc_target *target)
+{
+	return target->dev->ar->htc_ops->stop(target);
+}
+
+static inline void ath6kl_htc_cleanup(struct htc_target *target)
+{
+	return target->dev->ar->htc_ops->cleanup(target);
+}
+
+static inline void ath6kl_htc_flush_txep(struct htc_target *target,
+					 enum htc_endpoint_id endpoint,
+					 u16 tag)
+{
+	return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag);
+}
+
+static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target)
+{
+	return target->dev->ar->htc_ops->flush_rx_buf(target);
+}
+
+static inline void ath6kl_htc_activity_changed(struct htc_target *target,
+					       enum htc_endpoint_id endpoint,
+					       bool active)
+{
+	return target->dev->ar->htc_ops->activity_changed(target, endpoint,
+							  active);
+}
+
+static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
+					   enum htc_endpoint_id endpoint)
+{
+	return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint);
+}
+
+static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
+						struct list_head *pktq)
+{
+	return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq);
+}
+
+static inline int ath6kl_htc_credit_setup(struct htc_target *target,
+					  struct ath6kl_htc_credit_info *info)
+{
+	return target->dev->ar->htc_ops->credit_setup(target, info);
+}
+
+static inline void ath6kl_htc_tx_complete(struct ath6kl *ar,
+					  struct sk_buff *skb)
+{
+	ar->htc_ops->tx_complete(ar, skb);
+}
+
+
+static inline void ath6kl_htc_rx_complete(struct ath6kl *ar,
+					  struct sk_buff *skb, u8 pipe)
+{
+	ar->htc_ops->rx_complete(ar, skb, pipe);
+}
+
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 5027ccc..a2c8ff8 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -25,6 +25,7 @@
 /* send direction */
 #define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
 #define HTC_FLAGS_SEND_BUNDLE        (1 << 1)
+#define HTC_FLAGS_TX_FIXUP_NETBUF    (1 << 2)
 
 /* receive direction */
 #define HTC_FLG_RX_UNUSED        (1 << 0)
@@ -56,6 +57,10 @@
 #define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT	0x2
 #define HTC_CONN_FLGS_REDUCE_CRED_DRIB		0x4
 #define HTC_CONN_FLGS_THRESH_MASK		0x3
+/* disable credit flow control on a specific service */
+#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL          (1 << 3)
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT    8
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK     0xFF00
 
 /* connect response status codes */
 #define HTC_SERVICE_SUCCESS      0
@@ -75,6 +80,7 @@
 #define HTC_RECORD_LOOKAHEAD_BUNDLE 3
 
 #define HTC_SETUP_COMP_FLG_RX_BNDL_EN     (1 << 0)
+#define HTC_SETUP_COMP_FLG_DISABLE_TX_CREDIT_FLOW (1 << 1)
 
 #define MAKE_SERVICE_ID(group, index) \
 	(int)(((int)group << 8) | (int)(index))
@@ -109,6 +115,8 @@
 
 /* HTC operational parameters */
 #define HTC_TARGET_RESPONSE_TIMEOUT        2000	/* in ms */
+#define HTC_TARGET_RESPONSE_POLL_WAIT      10
+#define HTC_TARGET_RESPONSE_POLL_COUNT     200
 #define HTC_TARGET_DEBUG_INTR_MASK         0x01
 #define HTC_TARGET_CREDIT_INTR_MASK        0xF0
 
@@ -128,6 +136,7 @@
 
 #define HTC_RECV_WAIT_BUFFERS        (1 << 0)
 #define HTC_OP_STATE_STOPPING        (1 << 0)
+#define HTC_OP_STATE_SETUP_COMPLETE  (1 << 1)
 
 /*
  * The frame header length and message formats defined herein were selected
@@ -311,6 +320,14 @@
 
 	void (*completion) (struct htc_target *, struct htc_packet *);
 	struct htc_target *context;
+
+	/*
+	 * optimization for network-oriented data, the HTC packet
+	 * can pass the network buffer corresponding to the HTC packet
+	 * lower layers may optimized the transfer knowing this is
+	 * a network buffer
+	 */
+	struct sk_buff *skb;
 };
 
 enum htc_send_full_action {
@@ -319,12 +336,14 @@
 };
 
 struct htc_ep_callbacks {
+	void (*tx_complete) (struct htc_target *, struct htc_packet *);
 	void (*rx) (struct htc_target *, struct htc_packet *);
 	void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
 	enum htc_send_full_action (*tx_full) (struct htc_target *,
 					      struct htc_packet *);
 	struct htc_packet *(*rx_allocthresh) (struct htc_target *,
 					      enum htc_endpoint_id, int);
+	void (*tx_comp_multi) (struct htc_target *, struct list_head *);
 	int rx_alloc_thresh;
 	int rx_refill_thresh;
 };
@@ -502,6 +521,13 @@
 	u32 conn_flags;
 	struct htc_endpoint_stats ep_st;
 	u16 tx_drop_packet_threshold;
+
+	struct {
+		u8 pipeid_ul;
+		u8 pipeid_dl;
+		struct list_head tx_lookup_queue;
+		bool tx_credit_flow_enabled;
+	} pipe;
 };
 
 struct htc_control_buffer {
@@ -509,6 +535,42 @@
 	u8 *buf;
 };
 
+struct htc_pipe_txcredit_alloc {
+	u16 service_id;
+	u8 credit_alloc;
+};
+
+enum htc_send_queue_result {
+	HTC_SEND_QUEUE_OK = 0,	/* packet was queued */
+	HTC_SEND_QUEUE_DROP = 1,	/* this packet should be dropped */
+};
+
+struct ath6kl_htc_ops {
+	void* (*create)(struct ath6kl *ar);
+	int (*wait_target)(struct htc_target *target);
+	int (*start)(struct htc_target *target);
+	int (*conn_service)(struct htc_target *target,
+			    struct htc_service_connect_req *req,
+			    struct htc_service_connect_resp *resp);
+	int  (*tx)(struct htc_target *target, struct htc_packet *packet);
+	void (*stop)(struct htc_target *target);
+	void (*cleanup)(struct htc_target *target);
+	void (*flush_txep)(struct htc_target *target,
+			   enum htc_endpoint_id endpoint, u16 tag);
+	void (*flush_rx_buf)(struct htc_target *target);
+	void (*activity_changed)(struct htc_target *target,
+				 enum htc_endpoint_id endpoint,
+				 bool active);
+	int (*get_rxbuf_num)(struct htc_target *target,
+			     enum htc_endpoint_id endpoint);
+	int (*add_rxbuf_multiple)(struct htc_target *target,
+				  struct list_head *pktq);
+	int (*credit_setup)(struct htc_target *target,
+			    struct ath6kl_htc_credit_info *cred_info);
+	int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb);
+	int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
+};
+
 struct ath6kl_device;
 
 /* our HTC target state */
@@ -557,36 +619,19 @@
 
 	/* counts the number of Tx without bundling continously per AC */
 	u32 ac_tx_count[WMM_NUM_AC];
+
+	struct {
+		struct htc_packet *htc_packet_pool;
+		u8 ctrl_response_buf[HTC_MAX_CTRL_MSG_LEN];
+		int ctrl_response_len;
+		bool ctrl_response_valid;
+		struct htc_pipe_txcredit_alloc txcredit_alloc[ENDPOINT_MAX];
+	} pipe;
 };
 
-void *ath6kl_htc_create(struct ath6kl *ar);
-void ath6kl_htc_set_credit_dist(struct htc_target *target,
-				struct ath6kl_htc_credit_info *cred_info,
-				u16 svc_pri_order[], int len);
-int ath6kl_htc_wait_target(struct htc_target *target);
-int ath6kl_htc_start(struct htc_target *target);
-int ath6kl_htc_conn_service(struct htc_target *target,
-			    struct htc_service_connect_req *req,
-			    struct htc_service_connect_resp *resp);
-int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet);
-void ath6kl_htc_stop(struct htc_target *target);
-void ath6kl_htc_cleanup(struct htc_target *target);
-void ath6kl_htc_flush_txep(struct htc_target *target,
-			   enum htc_endpoint_id endpoint, u16 tag);
-void ath6kl_htc_flush_rx_buf(struct htc_target *target);
-void ath6kl_htc_indicate_activity_change(struct htc_target *target,
-					 enum htc_endpoint_id endpoint,
-					 bool active);
-int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
-			     enum htc_endpoint_id endpoint);
-int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
-				  struct list_head *pktq);
 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
 				     u32 msg_look_ahead, int *n_pkts);
 
-int ath6kl_credit_setup(void *htc_handle,
-			struct ath6kl_htc_credit_info *cred_info);
-
 static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
 				    u8 *buf, unsigned int len,
 				    enum htc_endpoint_id eid, u16 tag)
@@ -626,4 +671,7 @@
 	return depth;
 }
 
+void ath6kl_htc_pipe_attach(struct ath6kl *ar);
+void ath6kl_htc_mbox_attach(struct ath6kl *ar);
+
 #endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
similarity index 95%
rename from drivers/net/wireless/ath/ath6kl/htc.c
rename to drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 4849d99..065e615 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -23,6 +23,14 @@
 
 #define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
 
+static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
+static void ath6kl_htc_mbox_stop(struct htc_target *target);
+static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
+					      struct list_head *pkt_queue);
+static void ath6kl_htc_set_credit_dist(struct htc_target *target,
+				       struct ath6kl_htc_credit_info *cred_info,
+				       u16 svc_pri_order[], int len);
+
 /* threshold to re-enable Tx bundling for an AC*/
 #define TX_RESUME_BUNDLE_THRESHOLD	1500
 
@@ -130,8 +138,8 @@
 }
 
 /* initialize and setup credit distribution */
-int ath6kl_credit_setup(void *htc_handle,
-			struct ath6kl_htc_credit_info *cred_info)
+static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
+			       struct ath6kl_htc_credit_info *cred_info)
 {
 	u16 servicepriority[5];
 
@@ -144,7 +152,7 @@
 	servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
 
 	/* set priority list */
-	ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
+	ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
 
 	return 0;
 }
@@ -432,7 +440,7 @@
 		   "htc tx complete ep %d pkts %d\n",
 		   endpoint->eid, get_queue_depth(txq));
 
-	ath6kl_tx_complete(endpoint->target->dev->ar, txq);
+	ath6kl_tx_complete(endpoint->target, txq);
 }
 
 static void htc_tx_comp_handler(struct htc_target *target,
@@ -1065,7 +1073,7 @@
 	return status;
 }
 
-void ath6kl_htc_set_credit_dist(struct htc_target *target,
+static void ath6kl_htc_set_credit_dist(struct htc_target *target,
 				struct ath6kl_htc_credit_info *credit_info,
 				u16 srvc_pri_order[], int list_len)
 {
@@ -1093,7 +1101,8 @@
 	}
 }
 
-int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
+static int ath6kl_htc_mbox_tx(struct htc_target *target,
+			      struct htc_packet *packet)
 {
 	struct htc_endpoint *endpoint;
 	struct list_head queue;
@@ -1121,7 +1130,7 @@
 }
 
 /* flush endpoint TX queue */
-void ath6kl_htc_flush_txep(struct htc_target *target,
+static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
 			   enum htc_endpoint_id eid, u16 tag)
 {
 	struct htc_packet *packet, *tmp_pkt;
@@ -1173,12 +1182,13 @@
 		if (endpoint->svc_id == 0)
 			/* not in use.. */
 			continue;
-		ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
+		ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
 	}
 }
 
-void ath6kl_htc_indicate_activity_change(struct htc_target *target,
-					 enum htc_endpoint_id eid, bool active)
+static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
+					     enum htc_endpoint_id eid,
+					     bool active)
 {
 	struct htc_endpoint *endpoint = &target->endpoint[eid];
 	bool dist = false;
@@ -1246,7 +1256,7 @@
 
 	INIT_LIST_HEAD(&queue);
 	list_add_tail(&packet->list, &queue);
-	return ath6kl_htc_add_rxbuf_multiple(target, &queue);
+	return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
 }
 
 static void htc_reclaim_rxbuf(struct htc_target *target,
@@ -1353,7 +1363,9 @@
 					sizeof(*htc_hdr));
 
 	if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
-		ath6kl_warn("Rx buffer requested with invalid length\n");
+		ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
+			    htc_hdr->eid, htc_hdr->flags,
+			    le16_to_cpu(htc_hdr->payld_len));
 		return -EINVAL;
 	}
 
@@ -2288,7 +2300,7 @@
 	return NULL;
 }
 
-int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
+static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
 				  struct list_head *pkt_queue)
 {
 	struct htc_endpoint *endpoint;
@@ -2350,7 +2362,7 @@
 	return status;
 }
 
-void ath6kl_htc_flush_rx_buf(struct htc_target *target)
+static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
 {
 	struct htc_endpoint *endpoint;
 	struct htc_packet *packet, *tmp_pkt;
@@ -2392,7 +2404,7 @@
 	}
 }
 
-int ath6kl_htc_conn_service(struct htc_target *target,
+static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
 			    struct htc_service_connect_req *conn_req,
 			    struct htc_service_connect_resp *conn_resp)
 {
@@ -2564,7 +2576,7 @@
 	INIT_LIST_HEAD(&target->cred_dist_list);
 }
 
-int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
+static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
 			     enum htc_endpoint_id endpoint)
 {
 	int num;
@@ -2624,7 +2636,7 @@
 	}
 }
 
-int ath6kl_htc_wait_target(struct htc_target *target)
+static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
 {
 	struct htc_packet *packet = NULL;
 	struct htc_ready_ext_msg *rdy_msg;
@@ -2693,12 +2705,12 @@
 	connect.svc_id = HTC_CTRL_RSVD_SVC;
 
 	/* connect fake service */
-	status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
+	status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
 
 	if (status)
 		/*
 		 * FIXME: this call doesn't make sense, the caller should
-		 * call ath6kl_htc_cleanup() when it wants remove htc
+		 * call ath6kl_htc_mbox_cleanup() when it wants remove htc
 		 */
 		ath6kl_hif_cleanup_scatter(target->dev->ar);
 
@@ -2715,7 +2727,7 @@
  * Start HTC, enable interrupts and let the target know
  * host has finished setup.
  */
-int ath6kl_htc_start(struct htc_target *target)
+static int ath6kl_htc_mbox_start(struct htc_target *target)
 {
 	struct htc_packet *packet;
 	int status;
@@ -2752,7 +2764,7 @@
 	status = ath6kl_hif_unmask_intrs(target->dev);
 
 	if (status)
-		ath6kl_htc_stop(target);
+		ath6kl_htc_mbox_stop(target);
 
 	return status;
 }
@@ -2796,7 +2808,7 @@
 }
 
 /* htc_stop: stop interrupt reception, and flush all queued buffers */
-void ath6kl_htc_stop(struct htc_target *target)
+static void ath6kl_htc_mbox_stop(struct htc_target *target)
 {
 	spin_lock_bh(&target->htc_lock);
 	target->htc_flags |= HTC_OP_STATE_STOPPING;
@@ -2811,12 +2823,12 @@
 
 	ath6kl_htc_flush_txep_all(target);
 
-	ath6kl_htc_flush_rx_buf(target);
+	ath6kl_htc_mbox_flush_rx_buf(target);
 
 	ath6kl_htc_reset(target);
 }
 
-void *ath6kl_htc_create(struct ath6kl *ar)
+static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
 {
 	struct htc_target *target = NULL;
 	int status = 0;
@@ -2857,13 +2869,13 @@
 	return target;
 
 err_htc_cleanup:
-	ath6kl_htc_cleanup(target);
+	ath6kl_htc_mbox_cleanup(target);
 
 	return NULL;
 }
 
 /* cleanup the HTC instance */
-void ath6kl_htc_cleanup(struct htc_target *target)
+static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
 {
 	struct htc_packet *packet, *tmp_packet;
 
@@ -2888,3 +2900,24 @@
 	kfree(target->dev);
 	kfree(target);
 }
+
+static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
+	.create = ath6kl_htc_mbox_create,
+	.wait_target = ath6kl_htc_mbox_wait_target,
+	.start = ath6kl_htc_mbox_start,
+	.conn_service = ath6kl_htc_mbox_conn_service,
+	.tx = ath6kl_htc_mbox_tx,
+	.stop = ath6kl_htc_mbox_stop,
+	.cleanup = ath6kl_htc_mbox_cleanup,
+	.flush_txep = ath6kl_htc_mbox_flush_txep,
+	.flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
+	.activity_changed = ath6kl_htc_mbox_activity_changed,
+	.get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
+	.add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
+	.credit_setup = ath6kl_htc_mbox_credit_setup,
+};
+
+void ath6kl_htc_mbox_attach(struct ath6kl *ar)
+{
+	ar->htc_ops = &ath6kl_htc_mbox_ops;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
new file mode 100644
index 0000000..b277b34
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -0,0 +1,1713 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "debug.h"
+#include "hif-ops.h"
+
+#define HTC_PACKET_CONTAINER_ALLOCATION 32
+#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
+
+static int ath6kl_htc_pipe_tx(struct htc_target *handle,
+			      struct htc_packet *packet);
+static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
+
+/* htc pipe tx path */
+static inline void restore_tx_packet(struct htc_packet *packet)
+{
+	if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
+		skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
+		packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
+	}
+}
+
+static void do_send_completion(struct htc_endpoint *ep,
+			       struct list_head *queue_to_indicate)
+{
+	struct htc_packet *packet;
+
+	if (list_empty(queue_to_indicate)) {
+		/* nothing to indicate */
+		return;
+	}
+
+	if (ep->ep_cb.tx_comp_multi != NULL) {
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
+			   __func__, ep->eid,
+			   get_queue_depth(queue_to_indicate));
+		/*
+		 * a multiple send complete handler is being used,
+		 * pass the queue to the handler
+		 */
+		ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
+		/*
+		 * all packets are now owned by the callback,
+		 * reset queue to be safe
+		 */
+		INIT_LIST_HEAD(queue_to_indicate);
+	} else {
+		/* using legacy EpTxComplete */
+		do {
+			packet = list_first_entry(queue_to_indicate,
+						  struct htc_packet, list);
+
+			list_del(&packet->list);
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "%s: calling ep %d send complete callback on packet 0x%p\n",
+				   __func__, ep->eid, packet);
+			ep->ep_cb.tx_complete(ep->target, packet);
+		} while (!list_empty(queue_to_indicate));
+	}
+}
+
+static void send_packet_completion(struct htc_target *target,
+				   struct htc_packet *packet)
+{
+	struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
+	struct list_head container;
+
+	restore_tx_packet(packet);
+	INIT_LIST_HEAD(&container);
+	list_add_tail(&packet->list, &container);
+
+	/* do completion */
+	do_send_completion(ep, &container);
+}
+
+static void get_htc_packet_credit_based(struct htc_target *target,
+					struct htc_endpoint *ep,
+					struct list_head *queue)
+{
+	int credits_required;
+	int remainder;
+	u8 send_flags;
+	struct htc_packet *packet;
+	unsigned int transfer_len;
+
+	/* NOTE : the TX lock is held when this function is called */
+
+	/* loop until we can grab as many packets out of the queue as we can */
+	while (true) {
+		send_flags = 0;
+		if (list_empty(&ep->txq))
+			break;
+
+		/* get packet at head, but don't remove it */
+		packet = list_first_entry(&ep->txq, struct htc_packet, list);
+		if (packet == NULL)
+			break;
+
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "%s: got head packet:0x%p , queue depth: %d\n",
+			   __func__, packet, get_queue_depth(&ep->txq));
+
+		transfer_len = packet->act_len + HTC_HDR_LENGTH;
+
+		if (transfer_len <= target->tgt_cred_sz) {
+			credits_required = 1;
+		} else {
+			/* figure out how many credits this message requires */
+			credits_required = transfer_len / target->tgt_cred_sz;
+			remainder = transfer_len % target->tgt_cred_sz;
+
+			if (remainder)
+				credits_required++;
+		}
+
+		ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
+			   __func__, credits_required, ep->cred_dist.credits);
+
+		if (ep->eid == ENDPOINT_0) {
+			/*
+			 * endpoint 0 is special, it always has a credit and
+			 * does not require credit based flow control
+			 */
+			credits_required = 0;
+
+		} else {
+
+			if (ep->cred_dist.credits < credits_required)
+				break;
+
+			ep->cred_dist.credits -= credits_required;
+			ep->ep_st.cred_cosumd += credits_required;
+
+			/* check if we need credits back from the target */
+			if (ep->cred_dist.credits <
+					ep->cred_dist.cred_per_msg) {
+				/* tell the target we need credits ASAP! */
+				send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
+				ep->ep_st.cred_low_indicate += 1;
+				ath6kl_dbg(ATH6KL_DBG_HTC,
+					   "%s: host needs credits\n",
+					   __func__);
+			}
+		}
+
+		/* now we can fully dequeue */
+		packet = list_first_entry(&ep->txq, struct htc_packet, list);
+
+		list_del(&packet->list);
+		/* save the number of credits this packet consumed */
+		packet->info.tx.cred_used = credits_required;
+		/* save send flags */
+		packet->info.tx.flags = send_flags;
+		packet->info.tx.seqno = ep->seqno;
+		ep->seqno++;
+		/* queue this packet into the caller's queue */
+		list_add_tail(&packet->list, queue);
+	}
+
+}
+
+static void get_htc_packet(struct htc_target *target,
+			   struct htc_endpoint *ep,
+			   struct list_head *queue, int resources)
+{
+	struct htc_packet *packet;
+
+	/* NOTE : the TX lock is held when this function is called */
+
+	/* loop until we can grab as many packets out of the queue as we can */
+	while (resources) {
+		if (list_empty(&ep->txq))
+			break;
+
+		packet = list_first_entry(&ep->txq, struct htc_packet, list);
+		list_del(&packet->list);
+
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "%s: got packet:0x%p , new queue depth: %d\n",
+			   __func__, packet, get_queue_depth(&ep->txq));
+		packet->info.tx.seqno = ep->seqno;
+		packet->info.tx.flags = 0;
+		packet->info.tx.cred_used = 0;
+		ep->seqno++;
+
+		/* queue this packet into the caller's queue */
+		list_add_tail(&packet->list, queue);
+		resources--;
+	}
+}
+
+static int htc_issue_packets(struct htc_target *target,
+			     struct htc_endpoint *ep,
+			     struct list_head *pkt_queue)
+{
+	int status = 0;
+	u16 payload_len;
+	struct sk_buff *skb;
+	struct htc_frame_hdr *htc_hdr;
+	struct htc_packet *packet;
+
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "%s: queue: 0x%p, pkts %d\n", __func__,
+		   pkt_queue, get_queue_depth(pkt_queue));
+
+	while (!list_empty(pkt_queue)) {
+		packet = list_first_entry(pkt_queue, struct htc_packet, list);
+		list_del(&packet->list);
+
+		skb = packet->skb;
+		if (!skb) {
+			WARN_ON_ONCE(1);
+			status = -EINVAL;
+			break;
+		}
+
+		payload_len = packet->act_len;
+
+		/* setup HTC frame header */
+		htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
+							    sizeof(*htc_hdr));
+		if (!htc_hdr) {
+			WARN_ON_ONCE(1);
+			status = -EINVAL;
+			break;
+		}
+
+		packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
+
+		/* Endianess? */
+		put_unaligned((u16) payload_len, &htc_hdr->payld_len);
+		htc_hdr->flags = packet->info.tx.flags;
+		htc_hdr->eid = (u8) packet->endpoint;
+		htc_hdr->ctrl[0] = 0;
+		htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
+
+		spin_lock_bh(&target->tx_lock);
+
+		/* store in look up queue to match completions */
+		list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
+		ep->ep_st.tx_issued += 1;
+		spin_unlock_bh(&target->tx_lock);
+
+		status = ath6kl_hif_pipe_send(target->dev->ar,
+					      ep->pipe.pipeid_ul, NULL, skb);
+
+		if (status != 0) {
+			if (status != -ENOMEM) {
+				/* TODO: if more than 1 endpoint maps to the
+				 * same PipeID, it is possible to run out of
+				 * resources in the HIF layer.
+				 * Don't emit the error
+				 */
+				ath6kl_dbg(ATH6KL_DBG_HTC,
+					   "%s: failed status:%d\n",
+					   __func__, status);
+			}
+			spin_lock_bh(&target->tx_lock);
+			list_del(&packet->list);
+
+			/* reclaim credits */
+			ep->cred_dist.credits += packet->info.tx.cred_used;
+			spin_unlock_bh(&target->tx_lock);
+
+			/* put it back into the callers queue */
+			list_add(&packet->list, pkt_queue);
+			break;
+		}
+
+	}
+
+	if (status != 0) {
+		while (!list_empty(pkt_queue)) {
+			if (status != -ENOMEM) {
+				ath6kl_dbg(ATH6KL_DBG_HTC,
+					   "%s: failed pkt:0x%p status:%d\n",
+					   __func__, packet, status);
+			}
+
+			packet = list_first_entry(pkt_queue,
+						  struct htc_packet, list);
+			list_del(&packet->list);
+			packet->status = status;
+			send_packet_completion(target, packet);
+		}
+	}
+
+	return status;
+}
+
+static enum htc_send_queue_result htc_try_send(struct htc_target *target,
+					       struct htc_endpoint *ep,
+					       struct list_head *txq)
+{
+	struct list_head send_queue;	/* temp queue to hold packets */
+	struct htc_packet *packet, *tmp_pkt;
+	struct ath6kl *ar = target->dev->ar;
+	enum htc_send_full_action action;
+	int tx_resources, overflow, txqueue_depth, i, good_pkts;
+	u8 pipeid;
+
+	ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
+		   __func__, txq,
+		   (txq == NULL) ? 0 : get_queue_depth(txq));
+
+	/* init the local send queue */
+	INIT_LIST_HEAD(&send_queue);
+
+	/*
+	 * txq equals to NULL means
+	 * caller didn't provide a queue, just wants us to
+	 * check queues and send
+	 */
+	if (txq != NULL) {
+		if (list_empty(txq)) {
+			/* empty queue */
+			return HTC_SEND_QUEUE_DROP;
+		}
+
+		spin_lock_bh(&target->tx_lock);
+		txqueue_depth = get_queue_depth(&ep->txq);
+		spin_unlock_bh(&target->tx_lock);
+
+		if (txqueue_depth >= ep->max_txq_depth) {
+			/* we've already overflowed */
+			overflow = get_queue_depth(txq);
+		} else {
+			/* get how much we will overflow by */
+			overflow = txqueue_depth;
+			overflow += get_queue_depth(txq);
+			/* get how much we will overflow the TX queue by */
+			overflow -= ep->max_txq_depth;
+		}
+
+		/* if overflow is negative or zero, we are okay */
+		if (overflow > 0) {
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
+				   __func__, ep->eid, overflow, txqueue_depth,
+				   ep->max_txq_depth);
+		}
+		if ((overflow <= 0) ||
+		    (ep->ep_cb.tx_full == NULL)) {
+			/*
+			 * all packets will fit or caller did not provide send
+			 * full indication handler -- just move all of them
+			 * to the local send_queue object
+			 */
+			list_splice_tail_init(txq, &send_queue);
+		} else {
+			good_pkts = get_queue_depth(txq) - overflow;
+			if (good_pkts < 0) {
+				WARN_ON_ONCE(1);
+				return HTC_SEND_QUEUE_DROP;
+			}
+
+			/* we have overflowed, and a callback is provided */
+			/* dequeue all non-overflow packets to the sendqueue */
+			for (i = 0; i < good_pkts; i++) {
+				/* pop off caller's queue */
+				packet = list_first_entry(txq,
+							  struct htc_packet,
+							  list);
+				list_del(&packet->list);
+				/* insert into local queue */
+				list_add_tail(&packet->list, &send_queue);
+			}
+
+			/*
+			 * the caller's queue has all the packets that won't fit
+			 * walk through the caller's queue and indicate each to
+			 * the send full handler
+			 */
+			list_for_each_entry_safe(packet, tmp_pkt,
+						 txq, list) {
+
+				ath6kl_dbg(ATH6KL_DBG_HTC,
+					   "%s: Indicat overflowed TX pkts: %p\n",
+					   __func__, packet);
+				action = ep->ep_cb.tx_full(ep->target, packet);
+				if (action == HTC_SEND_FULL_DROP) {
+					/* callback wants the packet dropped */
+					ep->ep_st.tx_dropped += 1;
+
+					/* leave this one in the caller's queue
+					 * for cleanup */
+				} else {
+					/* callback wants to keep this packet,
+					 * remove from caller's queue */
+					list_del(&packet->list);
+					/* put it in the send queue */
+					list_add_tail(&packet->list,
+						      &send_queue);
+				}
+
+			}
+
+			if (list_empty(&send_queue)) {
+				/* no packets made it in, caller will cleanup */
+				return HTC_SEND_QUEUE_DROP;
+			}
+		}
+	}
+
+	if (!ep->pipe.tx_credit_flow_enabled) {
+		tx_resources =
+		    ath6kl_hif_pipe_get_free_queue_number(ar,
+							  ep->pipe.pipeid_ul);
+	} else {
+		tx_resources = 0;
+	}
+
+	spin_lock_bh(&target->tx_lock);
+	if (!list_empty(&send_queue)) {
+		/* transfer packets to tail */
+		list_splice_tail_init(&send_queue, &ep->txq);
+		if (!list_empty(&send_queue)) {
+			WARN_ON_ONCE(1);
+			spin_unlock_bh(&target->tx_lock);
+			return HTC_SEND_QUEUE_DROP;
+		}
+		INIT_LIST_HEAD(&send_queue);
+	}
+
+	/* increment tx processing count on entry */
+	ep->tx_proc_cnt++;
+
+	if (ep->tx_proc_cnt > 1) {
+		/*
+		 * Another thread or task is draining the TX queues on this
+		 * endpoint that thread will reset the tx processing count
+		 * when the queue is drained.
+		 */
+		ep->tx_proc_cnt--;
+		spin_unlock_bh(&target->tx_lock);
+		return HTC_SEND_QUEUE_OK;
+	}
+
+	/***** beyond this point only 1 thread may enter ******/
+
+	/*
+	 * Now drain the endpoint TX queue for transmission as long as we have
+	 * enough transmit resources.
+	 */
+	while (true) {
+
+		if (get_queue_depth(&ep->txq) == 0)
+			break;
+
+		if (ep->pipe.tx_credit_flow_enabled) {
+			/*
+			 * Credit based mechanism provides flow control
+			 * based on target transmit resource availability,
+			 * we assume that the HIF layer will always have
+			 * bus resources greater than target transmit
+			 * resources.
+			 */
+			get_htc_packet_credit_based(target, ep, &send_queue);
+		} else {
+			/*
+			 * Get all packets for this endpoint that we can
+			 * for this pass.
+			 */
+			get_htc_packet(target, ep, &send_queue, tx_resources);
+		}
+
+		if (get_queue_depth(&send_queue) == 0) {
+			/*
+			 * Didn't get packets due to out of resources or TX
+			 * queue was drained.
+			 */
+			break;
+		}
+
+		spin_unlock_bh(&target->tx_lock);
+
+		/* send what we can */
+		htc_issue_packets(target, ep, &send_queue);
+
+		if (!ep->pipe.tx_credit_flow_enabled) {
+			pipeid = ep->pipe.pipeid_ul;
+			tx_resources =
+			    ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
+		}
+
+		spin_lock_bh(&target->tx_lock);
+
+	}
+	/* done with this endpoint, we can clear the count */
+	ep->tx_proc_cnt = 0;
+	spin_unlock_bh(&target->tx_lock);
+
+	return HTC_SEND_QUEUE_OK;
+}
+
+/* htc control packet manipulation */
+static void destroy_htc_txctrl_packet(struct htc_packet *packet)
+{
+	struct sk_buff *skb;
+	skb = packet->skb;
+	if (skb != NULL)
+		dev_kfree_skb(skb);
+
+	kfree(packet);
+}
+
+static struct htc_packet *build_htc_txctrl_packet(void)
+{
+	struct htc_packet *packet = NULL;
+	struct sk_buff *skb;
+
+	packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
+	if (packet == NULL)
+		return NULL;
+
+	skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
+
+	if (skb == NULL) {
+		kfree(packet);
+		return NULL;
+	}
+	packet->skb = skb;
+
+	return packet;
+}
+
+static void htc_free_txctrl_packet(struct htc_target *target,
+				   struct htc_packet *packet)
+{
+	destroy_htc_txctrl_packet(packet);
+}
+
+static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
+{
+	return build_htc_txctrl_packet();
+}
+
+static void htc_txctrl_complete(struct htc_target *target,
+				struct htc_packet *packet)
+{
+	htc_free_txctrl_packet(target, packet);
+}
+
+#define MAX_MESSAGE_SIZE 1536
+
+static int htc_setup_target_buffer_assignments(struct htc_target *target)
+{
+	int status, credits, credit_per_maxmsg, i;
+	struct htc_pipe_txcredit_alloc *entry;
+	unsigned int hif_usbaudioclass = 0;
+
+	credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
+	if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
+		credit_per_maxmsg++;
+
+	/* TODO, this should be configured by the caller! */
+
+	credits = target->tgt_creds;
+	entry = &target->pipe.txcredit_alloc[0];
+
+	status = -ENOMEM;
+
+	/* FIXME: hif_usbaudioclass is always zero */
+	if (hif_usbaudioclass) {
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "%s: For USB Audio Class- Total:%d\n",
+			   __func__, credits);
+		entry++;
+		entry++;
+		/* Setup VO Service To have Max Credits */
+		entry->service_id = WMI_DATA_VO_SVC;
+		entry->credit_alloc = (credits - 6);
+		if (entry->credit_alloc == 0)
+			entry->credit_alloc++;
+
+		credits -= (int) entry->credit_alloc;
+		if (credits <= 0)
+			return status;
+
+		entry++;
+		entry->service_id = WMI_CONTROL_SVC;
+		entry->credit_alloc = credit_per_maxmsg;
+		credits -= (int) entry->credit_alloc;
+		if (credits <= 0)
+			return status;
+
+		/* leftovers go to best effort */
+		entry++;
+		entry++;
+		entry->service_id = WMI_DATA_BE_SVC;
+		entry->credit_alloc = (u8) credits;
+		status = 0;
+	} else {
+		entry++;
+		entry->service_id = WMI_DATA_VI_SVC;
+		entry->credit_alloc = credits / 4;
+		if (entry->credit_alloc == 0)
+			entry->credit_alloc++;
+
+		credits -= (int) entry->credit_alloc;
+		if (credits <= 0)
+			return status;
+
+		entry++;
+		entry->service_id = WMI_DATA_VO_SVC;
+		entry->credit_alloc = credits / 4;
+		if (entry->credit_alloc == 0)
+			entry->credit_alloc++;
+
+		credits -= (int) entry->credit_alloc;
+		if (credits <= 0)
+			return status;
+
+		entry++;
+		entry->service_id = WMI_CONTROL_SVC;
+		entry->credit_alloc = credit_per_maxmsg;
+		credits -= (int) entry->credit_alloc;
+		if (credits <= 0)
+			return status;
+
+		entry++;
+		entry->service_id = WMI_DATA_BK_SVC;
+		entry->credit_alloc = credit_per_maxmsg;
+		credits -= (int) entry->credit_alloc;
+		if (credits <= 0)
+			return status;
+
+		/* leftovers go to best effort */
+		entry++;
+		entry->service_id = WMI_DATA_BE_SVC;
+		entry->credit_alloc = (u8) credits;
+		status = 0;
+	}
+
+	if (status == 0) {
+		for (i = 0; i < ENDPOINT_MAX; i++) {
+			if (target->pipe.txcredit_alloc[i].service_id != 0) {
+				ath6kl_dbg(ATH6KL_DBG_HTC,
+					   "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
+					   i,
+					   target->pipe.txcredit_alloc[i].
+					   service_id,
+					   target->pipe.txcredit_alloc[i].
+					   credit_alloc);
+			}
+		}
+	}
+	return status;
+}
+
+/* process credit reports and call distribution function */
+static void htc_process_credit_report(struct htc_target *target,
+				      struct htc_credit_report *rpt,
+				      int num_entries,
+				      enum htc_endpoint_id from_ep)
+{
+	int total_credits = 0, i;
+	struct htc_endpoint *ep;
+
+	/* lock out TX while we update credits */
+	spin_lock_bh(&target->tx_lock);
+
+	for (i = 0; i < num_entries; i++, rpt++) {
+		if (rpt->eid >= ENDPOINT_MAX) {
+			WARN_ON_ONCE(1);
+			spin_unlock_bh(&target->tx_lock);
+			return;
+		}
+
+		ep = &target->endpoint[rpt->eid];
+		ep->cred_dist.credits += rpt->credits;
+
+		if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
+			spin_unlock_bh(&target->tx_lock);
+			htc_try_send(target, ep, NULL);
+			spin_lock_bh(&target->tx_lock);
+		}
+
+		total_credits += rpt->credits;
+	}
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "Report indicated %d credits to distribute\n",
+		   total_credits);
+
+	spin_unlock_bh(&target->tx_lock);
+}
+
+/* flush endpoint TX queue */
+static void htc_flush_tx_endpoint(struct htc_target *target,
+				  struct htc_endpoint *ep, u16 tag)
+{
+	struct htc_packet *packet;
+
+	spin_lock_bh(&target->tx_lock);
+	while (get_queue_depth(&ep->txq)) {
+		packet = list_first_entry(&ep->txq, struct htc_packet, list);
+		list_del(&packet->list);
+		packet->status = 0;
+		send_packet_completion(target, packet);
+	}
+	spin_unlock_bh(&target->tx_lock);
+}
+
+/*
+ * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
+ * since upper layers expects struct htc_packet containers we use the completed
+ * skb and lookup it's corresponding HTC packet buffer from a lookup list.
+ * This is extra overhead that can be fixed by re-aligning HIF interfaces with
+ * HTC.
+ */
+static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
+					       struct htc_endpoint *ep,
+					       struct sk_buff *skb)
+{
+	struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
+
+	spin_lock_bh(&target->tx_lock);
+
+	/*
+	 * interate from the front of tx lookup queue
+	 * this lookup should be fast since lower layers completes in-order and
+	 * so the completed packet should be at the head of the list generally
+	 */
+	list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
+				 list) {
+		/* check for removal */
+		if (skb == packet->skb) {
+			/* found it */
+			list_del(&packet->list);
+			found_packet = packet;
+			break;
+		}
+	}
+
+	spin_unlock_bh(&target->tx_lock);
+
+	return found_packet;
+}
+
+static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
+{
+	struct htc_target *target = ar->htc_target;
+	struct htc_frame_hdr *htc_hdr;
+	struct htc_endpoint *ep;
+	struct htc_packet *packet;
+	u8 ep_id, *netdata;
+	u32 netlen;
+
+	netdata = skb->data;
+	netlen = skb->len;
+
+	htc_hdr = (struct htc_frame_hdr *) netdata;
+
+	ep_id = htc_hdr->eid;
+	ep = &target->endpoint[ep_id];
+
+	packet = htc_lookup_tx_packet(target, ep, skb);
+	if (packet == NULL) {
+		/* may have already been flushed and freed */
+		ath6kl_err("HTC TX lookup failed!\n");
+	} else {
+		/* will be giving this buffer back to upper layers */
+		packet->status = 0;
+		send_packet_completion(target, packet);
+	}
+	skb = NULL;
+
+	if (!ep->pipe.tx_credit_flow_enabled) {
+		/*
+		 * note: when using TX credit flow, the re-checking of queues
+		 * happens when credits flow back from the target. in the
+		 * non-TX credit case, we recheck after the packet completes
+		 */
+		htc_try_send(target, ep, NULL);
+	}
+
+	return 0;
+}
+
+static int htc_send_packets_multiple(struct htc_target *target,
+				     struct list_head *pkt_queue)
+{
+	struct htc_endpoint *ep;
+	struct htc_packet *packet, *tmp_pkt;
+
+	if (list_empty(pkt_queue))
+		return -EINVAL;
+
+	/* get first packet to find out which ep the packets will go into */
+	packet = list_first_entry(pkt_queue, struct htc_packet, list);
+	if (packet == NULL)
+		return -EINVAL;
+
+	if (packet->endpoint >= ENDPOINT_MAX) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+	ep = &target->endpoint[packet->endpoint];
+
+	htc_try_send(target, ep, pkt_queue);
+
+	/* do completion on any packets that couldn't get in */
+	if (!list_empty(pkt_queue)) {
+		list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
+			packet->status = -ENOMEM;
+		}
+
+		do_send_completion(ep, pkt_queue);
+	}
+
+	return 0;
+}
+
+/* htc pipe rx path */
+static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
+{
+	struct htc_packet *packet;
+	spin_lock_bh(&target->rx_lock);
+
+	if (target->pipe.htc_packet_pool == NULL) {
+		spin_unlock_bh(&target->rx_lock);
+		return NULL;
+	}
+
+	packet = target->pipe.htc_packet_pool;
+	target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
+
+	spin_unlock_bh(&target->rx_lock);
+
+	packet->list.next = NULL;
+	return packet;
+}
+
+static void free_htc_packet_container(struct htc_target *target,
+				      struct htc_packet *packet)
+{
+	struct list_head *lh;
+
+	spin_lock_bh(&target->rx_lock);
+
+	if (target->pipe.htc_packet_pool == NULL) {
+		target->pipe.htc_packet_pool = packet;
+		packet->list.next = NULL;
+	} else {
+		lh = (struct list_head *) target->pipe.htc_packet_pool;
+		packet->list.next = lh;
+		target->pipe.htc_packet_pool = packet;
+	}
+
+	spin_unlock_bh(&target->rx_lock);
+}
+
+static int htc_process_trailer(struct htc_target *target, u8 *buffer,
+			       int len, enum htc_endpoint_id from_ep)
+{
+	struct htc_credit_report *report;
+	struct htc_record_hdr *record;
+	u8 *record_buf, *orig_buf;
+	int orig_len, status;
+
+	orig_buf = buffer;
+	orig_len = len;
+	status = 0;
+
+	while (len > 0) {
+		if (len < sizeof(struct htc_record_hdr)) {
+			status = -EINVAL;
+			break;
+		}
+
+		/* these are byte aligned structs */
+		record = (struct htc_record_hdr *) buffer;
+		len -= sizeof(struct htc_record_hdr);
+		buffer += sizeof(struct htc_record_hdr);
+
+		if (record->len > len) {
+			/* no room left in buffer for record */
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "invalid length: %d (id:%d) buffer has: %d bytes left\n",
+				   record->len, record->rec_id, len);
+			status = -EINVAL;
+			break;
+		}
+
+		/* start of record follows the header */
+		record_buf = buffer;
+
+		switch (record->rec_id) {
+		case HTC_RECORD_CREDITS:
+			if (record->len < sizeof(struct htc_credit_report)) {
+				WARN_ON_ONCE(1);
+				return -EINVAL;
+			}
+
+			report = (struct htc_credit_report *) record_buf;
+			htc_process_credit_report(target, report,
+						  record->len / sizeof(*report),
+						  from_ep);
+			break;
+		default:
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "unhandled record: id:%d length:%d\n",
+				   record->rec_id, record->len);
+			break;
+		}
+
+		if (status != 0)
+			break;
+
+		/* advance buffer past this record for next time around */
+		buffer += record->len;
+		len -= record->len;
+	}
+
+	return status;
+}
+
+static void do_recv_completion(struct htc_endpoint *ep,
+			       struct list_head *queue_to_indicate)
+{
+	struct htc_packet *packet;
+
+	if (list_empty(queue_to_indicate)) {
+		/* nothing to indicate */
+		return;
+	}
+
+	/* using legacy EpRecv */
+	while (!list_empty(queue_to_indicate)) {
+		packet = list_first_entry(queue_to_indicate,
+					  struct htc_packet, list);
+		list_del(&packet->list);
+		ep->ep_cb.rx(ep->target, packet);
+	}
+
+	return;
+}
+
+static void recv_packet_completion(struct htc_target *target,
+				   struct htc_endpoint *ep,
+				   struct htc_packet *packet)
+{
+	struct list_head container;
+	INIT_LIST_HEAD(&container);
+	list_add_tail(&packet->list, &container);
+
+	/* do completion */
+	do_recv_completion(ep, &container);
+}
+
+static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
+				       u8 pipeid)
+{
+	struct htc_target *target = ar->htc_target;
+	u8 *netdata, *trailer, hdr_info;
+	struct htc_frame_hdr *htc_hdr;
+	u32 netlen, trailerlen = 0;
+	struct htc_packet *packet;
+	struct htc_endpoint *ep;
+	u16 payload_len;
+	int status = 0;
+
+	netdata = skb->data;
+	netlen = skb->len;
+
+	htc_hdr = (struct htc_frame_hdr *) netdata;
+
+	ep = &target->endpoint[htc_hdr->eid];
+
+	if (htc_hdr->eid >= ENDPOINT_MAX) {
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "HTC Rx: invalid EndpointID=%d\n",
+			   htc_hdr->eid);
+		status = -EINVAL;
+		goto free_skb;
+	}
+
+	payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
+
+	if (netlen < (payload_len + HTC_HDR_LENGTH)) {
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "HTC Rx: insufficient length, got:%d expected =%u\n",
+			   netlen, payload_len + HTC_HDR_LENGTH);
+		status = -EINVAL;
+		goto free_skb;
+	}
+
+	/* get flags to check for trailer */
+	hdr_info = htc_hdr->flags;
+	if (hdr_info & HTC_FLG_RX_TRAILER) {
+		/* extract the trailer length */
+		hdr_info = htc_hdr->ctrl[0];
+		if ((hdr_info < sizeof(struct htc_record_hdr)) ||
+		    (hdr_info > payload_len)) {
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "invalid header: payloadlen should be %d, CB[0]: %d\n",
+				   payload_len, hdr_info);
+			status = -EINVAL;
+			goto free_skb;
+		}
+
+		trailerlen = hdr_info;
+		/* process trailer after hdr/apps payload */
+		trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
+			payload_len - hdr_info;
+		status = htc_process_trailer(target, trailer, hdr_info,
+					     htc_hdr->eid);
+		if (status != 0)
+			goto free_skb;
+	}
+
+	if (((int) payload_len - (int) trailerlen) <= 0) {
+		/* zero length packet with trailer, just drop these */
+		goto free_skb;
+	}
+
+	if (htc_hdr->eid == ENDPOINT_0) {
+		/* handle HTC control message */
+		if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
+			/*
+			 * fatal: target should not send unsolicited
+			 * messageson the endpoint 0
+			 */
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "HTC ignores Rx Ctrl after setup complete\n");
+			status = -EINVAL;
+			goto free_skb;
+		}
+
+		/* remove HTC header */
+		skb_pull(skb, HTC_HDR_LENGTH);
+
+		netdata = skb->data;
+		netlen = skb->len;
+
+		spin_lock_bh(&target->rx_lock);
+
+		target->pipe.ctrl_response_valid = true;
+		target->pipe.ctrl_response_len = min_t(int, netlen,
+						       HTC_MAX_CTRL_MSG_LEN);
+		memcpy(target->pipe.ctrl_response_buf, netdata,
+		       target->pipe.ctrl_response_len);
+
+		spin_unlock_bh(&target->rx_lock);
+
+		dev_kfree_skb(skb);
+		skb = NULL;
+		goto free_skb;
+	}
+
+	/*
+	 * TODO: the message based HIF architecture allocates net bufs
+	 * for recv packets since it bridges that HIF to upper layers,
+	 * which expects HTC packets, we form the packets here
+	 */
+	packet = alloc_htc_packet_container(target);
+	if (packet == NULL) {
+		status = -ENOMEM;
+		goto free_skb;
+	}
+
+	packet->status = 0;
+	packet->endpoint = htc_hdr->eid;
+	packet->pkt_cntxt = skb;
+
+	/* TODO: for backwards compatibility */
+	packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
+	packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
+
+	/*
+	 * TODO: this is a hack because the driver layer will set the
+	 * actual len of the skb again which will just double the len
+	 */
+	skb_trim(skb, 0);
+
+	recv_packet_completion(target, ep, packet);
+
+	/* recover the packet container */
+	free_htc_packet_container(target, packet);
+	skb = NULL;
+
+free_skb:
+	if (skb != NULL)
+		dev_kfree_skb(skb);
+
+	return status;
+
+}
+
+static void htc_flush_rx_queue(struct htc_target *target,
+			       struct htc_endpoint *ep)
+{
+	struct list_head container;
+	struct htc_packet *packet;
+
+	spin_lock_bh(&target->rx_lock);
+
+	while (1) {
+		if (list_empty(&ep->rx_bufq))
+			break;
+
+		packet = list_first_entry(&ep->rx_bufq,
+					  struct htc_packet, list);
+		list_del(&packet->list);
+
+		spin_unlock_bh(&target->rx_lock);
+		packet->status = -ECANCELED;
+		packet->act_len = 0;
+
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "Flushing RX packet:0x%p, length:%d, ep:%d\n",
+			   packet, packet->buf_len,
+			   packet->endpoint);
+
+		INIT_LIST_HEAD(&container);
+		list_add_tail(&packet->list, &container);
+
+		/* give the packet back */
+		do_recv_completion(ep, &container);
+		spin_lock_bh(&target->rx_lock);
+	}
+
+	spin_unlock_bh(&target->rx_lock);
+}
+
+/* polling routine to wait for a control packet to be received */
+static int htc_wait_recv_ctrl_message(struct htc_target *target)
+{
+	int count = HTC_TARGET_RESPONSE_POLL_COUNT;
+
+	while (count > 0) {
+		spin_lock_bh(&target->rx_lock);
+
+		if (target->pipe.ctrl_response_valid) {
+			target->pipe.ctrl_response_valid = false;
+			spin_unlock_bh(&target->rx_lock);
+			break;
+		}
+
+		spin_unlock_bh(&target->rx_lock);
+
+		count--;
+
+		msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
+	}
+
+	if (count <= 0) {
+		ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
+		return -ECOMM;
+	}
+
+	return 0;
+}
+
+static void htc_rxctrl_complete(struct htc_target *context,
+				struct htc_packet *packet)
+{
+	/* TODO, can't really receive HTC control messages yet.... */
+	ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
+}
+
+/* htc pipe initialization */
+static void reset_endpoint_states(struct htc_target *target)
+{
+	struct htc_endpoint *ep;
+	int i;
+
+	for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+		ep = &target->endpoint[i];
+		ep->svc_id = 0;
+		ep->len_max = 0;
+		ep->max_txq_depth = 0;
+		ep->eid = i;
+		INIT_LIST_HEAD(&ep->txq);
+		INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
+		INIT_LIST_HEAD(&ep->rx_bufq);
+		ep->target = target;
+		ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
+	}
+}
+
+/* start HTC, this is called after all services are connected */
+static int htc_config_target_hif_pipe(struct htc_target *target)
+{
+	return 0;
+}
+
+/* htc service functions */
+static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
+{
+	u8 allocation = 0;
+	int i;
+
+	for (i = 0; i < ENDPOINT_MAX; i++) {
+		if (target->pipe.txcredit_alloc[i].service_id == service_id)
+			allocation =
+				target->pipe.txcredit_alloc[i].credit_alloc;
+	}
+
+	if (allocation == 0) {
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "HTC Service TX : 0x%2.2X : allocation is zero!\n",
+			   service_id);
+	}
+
+	return allocation;
+}
+
+static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
+		     struct htc_service_connect_req *conn_req,
+		     struct htc_service_connect_resp *conn_resp)
+{
+	struct ath6kl *ar = target->dev->ar;
+	struct htc_packet *packet = NULL;
+	struct htc_conn_service_resp *resp_msg;
+	struct htc_conn_service_msg *conn_msg;
+	enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
+	bool disable_credit_flowctrl = false;
+	unsigned int max_msg_size = 0;
+	struct htc_endpoint *ep;
+	int length, status = 0;
+	struct sk_buff *skb;
+	u8 tx_alloc;
+	u16 flags;
+
+	if (conn_req->svc_id == 0) {
+		WARN_ON_ONCE(1);
+		status = -EINVAL;
+		goto free_packet;
+	}
+
+	if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
+		/* special case for pseudo control service */
+		assigned_epid = ENDPOINT_0;
+		max_msg_size = HTC_MAX_CTRL_MSG_LEN;
+		tx_alloc = 0;
+
+	} else {
+
+		tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
+		if (tx_alloc == 0) {
+			status = -ENOMEM;
+			goto free_packet;
+		}
+
+		/* allocate a packet to send to the target */
+		packet = htc_alloc_txctrl_packet(target);
+
+		if (packet == NULL) {
+			WARN_ON_ONCE(1);
+			status = -ENOMEM;
+			goto free_packet;
+		}
+
+		skb = packet->skb;
+		length = sizeof(struct htc_conn_service_msg);
+
+		/* assemble connect service message */
+		conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
+								   length);
+		if (conn_msg == NULL) {
+			WARN_ON_ONCE(1);
+			status = -EINVAL;
+			goto free_packet;
+		}
+
+		memset(conn_msg, 0,
+		       sizeof(struct htc_conn_service_msg));
+		conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
+		conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
+		conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
+					~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
+
+		/* tell target desired recv alloc for this ep */
+		flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
+		conn_msg->conn_flags |= cpu_to_le16(flags);
+
+		if (conn_req->conn_flags &
+		    HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
+			disable_credit_flowctrl = true;
+		}
+
+		set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
+				 length,
+				 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+		status = ath6kl_htc_pipe_tx(target, packet);
+
+		/* we don't own it anymore */
+		packet = NULL;
+		if (status != 0)
+			goto free_packet;
+
+		/* wait for response */
+		status = htc_wait_recv_ctrl_message(target);
+		if (status != 0)
+			goto free_packet;
+
+		/* we controlled the buffer creation so it has to be
+		 * properly aligned
+		 */
+		resp_msg = (struct htc_conn_service_resp *)
+		    target->pipe.ctrl_response_buf;
+
+		if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
+		    (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
+			/* this message is not valid */
+			WARN_ON_ONCE(1);
+			status = -EINVAL;
+			goto free_packet;
+		}
+
+		ath6kl_dbg(ATH6KL_DBG_TRC,
+			   "%s: service 0x%X conn resp: status: %d ep: %d\n",
+			   __func__, resp_msg->svc_id, resp_msg->status,
+			   resp_msg->eid);
+
+		conn_resp->resp_code = resp_msg->status;
+		/* check response status */
+		if (resp_msg->status != HTC_SERVICE_SUCCESS) {
+			ath6kl_dbg(ATH6KL_DBG_HTC,
+				   "Target failed service 0x%X connect request (status:%d)\n",
+				   resp_msg->svc_id, resp_msg->status);
+			status = -EINVAL;
+			goto free_packet;
+		}
+
+		assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
+		max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
+	}
+
+	/* the rest are parameter checks so set the error status */
+	status = -EINVAL;
+
+	if (assigned_epid >= ENDPOINT_MAX) {
+		WARN_ON_ONCE(1);
+		goto free_packet;
+	}
+
+	if (max_msg_size == 0) {
+		WARN_ON_ONCE(1);
+		goto free_packet;
+	}
+
+	ep = &target->endpoint[assigned_epid];
+	ep->eid = assigned_epid;
+	if (ep->svc_id != 0) {
+		/* endpoint already in use! */
+		WARN_ON_ONCE(1);
+		goto free_packet;
+	}
+
+	/* return assigned endpoint to caller */
+	conn_resp->endpoint = assigned_epid;
+	conn_resp->len_max = max_msg_size;
+
+	/* setup the endpoint */
+	ep->svc_id = conn_req->svc_id; /* this marks ep in use */
+	ep->max_txq_depth = conn_req->max_txq_depth;
+	ep->len_max = max_msg_size;
+	ep->cred_dist.credits = tx_alloc;
+	ep->cred_dist.cred_sz = target->tgt_cred_sz;
+	ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
+	if (max_msg_size % target->tgt_cred_sz)
+		ep->cred_dist.cred_per_msg++;
+
+	/* copy all the callbacks */
+	ep->ep_cb = conn_req->ep_cb;
+
+	status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
+					     &ep->pipe.pipeid_ul,
+					     &ep->pipe.pipeid_dl);
+	if (status != 0)
+		goto free_packet;
+
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
+		   ep->svc_id, ep->pipe.pipeid_ul,
+		   ep->pipe.pipeid_dl, ep->eid);
+
+	if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
+		ep->pipe.tx_credit_flow_enabled = false;
+		ath6kl_dbg(ATH6KL_DBG_HTC,
+			   "SVC: 0x%4.4X ep:%d TX flow control off\n",
+			   ep->svc_id, assigned_epid);
+	}
+
+free_packet:
+	if (packet != NULL)
+		htc_free_txctrl_packet(target, packet);
+	return status;
+}
+
+/* htc export functions */
+static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
+{
+	int status = 0;
+	struct htc_endpoint *ep = NULL;
+	struct htc_target *target = NULL;
+	struct htc_packet *packet;
+	int i;
+
+	target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
+	if (target == NULL) {
+		ath6kl_err("htc create unable to allocate memory\n");
+		status = -ENOMEM;
+		goto fail_htc_create;
+	}
+
+	spin_lock_init(&target->htc_lock);
+	spin_lock_init(&target->rx_lock);
+	spin_lock_init(&target->tx_lock);
+
+	reset_endpoint_states(target);
+
+	for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
+		packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
+
+		if (packet != NULL)
+			free_htc_packet_container(target, packet);
+	}
+
+	target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
+	if (!target->dev) {
+		ath6kl_err("unable to allocate memory\n");
+		status = -ENOMEM;
+		goto fail_htc_create;
+	}
+	target->dev->ar = ar;
+	target->dev->htc_cnxt = target;
+
+	/* Get HIF default pipe for HTC message exchange */
+	ep = &target->endpoint[ENDPOINT_0];
+
+	ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
+				    &ep->pipe.pipeid_dl);
+
+	return target;
+
+fail_htc_create:
+	if (status != 0) {
+		if (target != NULL)
+			ath6kl_htc_pipe_cleanup(target);
+
+		target = NULL;
+	}
+	return target;
+}
+
+/* cleanup the HTC instance */
+static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
+{
+	struct htc_packet *packet;
+
+	while (true) {
+		packet = alloc_htc_packet_container(target);
+		if (packet == NULL)
+			break;
+		kfree(packet);
+	}
+
+	kfree(target->dev);
+
+	/* kfree our instance */
+	kfree(target);
+}
+
+static int ath6kl_htc_pipe_start(struct htc_target *target)
+{
+	struct sk_buff *skb;
+	struct htc_setup_comp_ext_msg *setup;
+	struct htc_packet *packet;
+
+	htc_config_target_hif_pipe(target);
+
+	/* allocate a buffer to send */
+	packet = htc_alloc_txctrl_packet(target);
+	if (packet == NULL) {
+		WARN_ON_ONCE(1);
+		return -ENOMEM;
+	}
+
+	skb = packet->skb;
+
+	/* assemble setup complete message */
+	setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
+							  sizeof(*setup));
+	memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
+	setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+	ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
+
+	set_htc_pkt_info(packet, NULL, (u8 *) setup,
+			 sizeof(struct htc_setup_comp_ext_msg),
+			 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+	target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
+
+	return ath6kl_htc_pipe_tx(target, packet);
+}
+
+static void ath6kl_htc_pipe_stop(struct htc_target *target)
+{
+	int i;
+	struct htc_endpoint *ep;
+
+	/* cleanup endpoints */
+	for (i = 0; i < ENDPOINT_MAX; i++) {
+		ep = &target->endpoint[i];
+		htc_flush_rx_queue(target, ep);
+		htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
+	}
+
+	reset_endpoint_states(target);
+	target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
+}
+
+static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
+					 enum htc_endpoint_id endpoint)
+{
+	int num;
+
+	spin_lock_bh(&target->rx_lock);
+	num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
+	spin_unlock_bh(&target->rx_lock);
+
+	return num;
+}
+
+static int ath6kl_htc_pipe_tx(struct htc_target *target,
+			      struct htc_packet *packet)
+{
+	struct list_head queue;
+
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
+		   __func__, packet->endpoint, packet->buf,
+		   packet->act_len);
+
+	INIT_LIST_HEAD(&queue);
+	list_add_tail(&packet->list, &queue);
+
+	return htc_send_packets_multiple(target, &queue);
+}
+
+static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
+{
+	struct htc_ready_ext_msg *ready_msg;
+	struct htc_service_connect_req connect;
+	struct htc_service_connect_resp resp;
+	int status = 0;
+
+	status = htc_wait_recv_ctrl_message(target);
+
+	if (status != 0)
+		return status;
+
+	if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
+		ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
+			   target->pipe.ctrl_response_len);
+		return -ECOMM;
+	}
+
+	ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
+
+	if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
+		ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
+			   ready_msg->ver2_0_info.msg_id);
+		return -ECOMM;
+	}
+
+	ath6kl_dbg(ATH6KL_DBG_HTC,
+		   "Target Ready! : transmit resources : %d size:%d\n",
+		   ready_msg->ver2_0_info.cred_cnt,
+		   ready_msg->ver2_0_info.cred_sz);
+
+	target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
+	target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
+
+	if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
+		return -ECOMM;
+
+	htc_setup_target_buffer_assignments(target);
+
+	/* setup our pseudo HTC control endpoint connection */
+	memset(&connect, 0, sizeof(connect));
+	memset(&resp, 0, sizeof(resp));
+	connect.ep_cb.tx_complete = htc_txctrl_complete;
+	connect.ep_cb.rx = htc_rxctrl_complete;
+	connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
+	connect.svc_id = HTC_CTRL_RSVD_SVC;
+
+	/* connect fake service */
+	status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
+
+	return status;
+}
+
+static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
+				       enum htc_endpoint_id endpoint, u16 tag)
+{
+	struct htc_endpoint *ep = &target->endpoint[endpoint];
+
+	if (ep->svc_id == 0) {
+		WARN_ON_ONCE(1);
+		/* not in use.. */
+		return;
+	}
+
+	htc_flush_tx_endpoint(target, ep, tag);
+}
+
+static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
+					      struct list_head *pkt_queue)
+{
+	struct htc_packet *packet, *tmp_pkt, *first;
+	struct htc_endpoint *ep;
+	int status = 0;
+
+	if (list_empty(pkt_queue))
+		return -EINVAL;
+
+	first = list_first_entry(pkt_queue, struct htc_packet, list);
+	if (first == NULL) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
+	if (first->endpoint >= ENDPOINT_MAX) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
+	ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
+		   __func__, first->endpoint, get_queue_depth(pkt_queue),
+		   first->buf_len);
+
+	ep = &target->endpoint[first->endpoint];
+
+	spin_lock_bh(&target->rx_lock);
+
+	/* store receive packets */
+	list_splice_tail_init(pkt_queue, &ep->rx_bufq);
+
+	spin_unlock_bh(&target->rx_lock);
+
+	if (status != 0) {
+		/* walk through queue and mark each one canceled */
+		list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
+			packet->status = -ECANCELED;
+		}
+
+		do_recv_completion(ep, pkt_queue);
+	}
+
+	return status;
+}
+
+static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
+					     enum htc_endpoint_id ep,
+					     bool active)
+{
+	/* TODO */
+}
+
+static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
+{
+	/* TODO */
+}
+
+static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
+					struct ath6kl_htc_credit_info *info)
+{
+	return 0;
+}
+
+static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
+	.create = ath6kl_htc_pipe_create,
+	.wait_target = ath6kl_htc_pipe_wait_target,
+	.start = ath6kl_htc_pipe_start,
+	.conn_service = ath6kl_htc_pipe_conn_service,
+	.tx = ath6kl_htc_pipe_tx,
+	.stop = ath6kl_htc_pipe_stop,
+	.cleanup = ath6kl_htc_pipe_cleanup,
+	.flush_txep = ath6kl_htc_pipe_flush_txep,
+	.flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
+	.activity_changed = ath6kl_htc_pipe_activity_changed,
+	.get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
+	.add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
+	.credit_setup = ath6kl_htc_pipe_credit_setup,
+	.tx_complete = ath6kl_htc_pipe_tx_complete,
+	.rx_complete = ath6kl_htc_pipe_rx_complete,
+};
+
+void ath6kl_htc_pipe_attach(struct ath6kl *ar)
+{
+	ar->htc_ops = &ath6kl_htc_pipe_ops;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 03cae14..29ef50e 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -16,17 +16,21 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/moduleparam.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/of.h>
 #include <linux/mmc/sdio_func.h>
+#include <linux/vmalloc.h>
 
 #include "core.h"
 #include "cfg80211.h"
 #include "target.h"
 #include "debug.h"
 #include "hif-ops.h"
+#include "htc-ops.h"
 
 static const struct ath6kl_hw hw_list[] = {
 	{
@@ -256,6 +260,7 @@
 	memset(&connect, 0, sizeof(connect));
 
 	/* these fields are the same for all service endpoints */
+	connect.ep_cb.tx_comp_multi = ath6kl_tx_complete;
 	connect.ep_cb.rx = ath6kl_rx;
 	connect.ep_cb.rx_refill = ath6kl_rx_refill;
 	connect.ep_cb.tx_full = ath6kl_tx_queue_full;
@@ -485,22 +490,31 @@
 		fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
 
 	/*
-	 * By default, submodes :
+	 * Submodes when fw does not support dynamic interface
+	 * switching:
 	 *		vif[0] - AP/STA/IBSS
 	 *		vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
 	 *		vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
+	 * Otherwise, All the interface are initialized to p2p dev.
 	 */
 
-	for (i = 0; i < ar->max_norm_iface; i++)
-		fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
-			      (i * HI_OPTION_FW_SUBMODE_BITS);
+	if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+		     ar->fw_capabilities)) {
+		for (i = 0; i < ar->vif_max; i++)
+			fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+				(i * HI_OPTION_FW_SUBMODE_BITS);
+	} else {
+		for (i = 0; i < ar->max_norm_iface; i++)
+			fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
+				(i * HI_OPTION_FW_SUBMODE_BITS);
 
-	for (i = ar->max_norm_iface; i < ar->vif_max; i++)
-		fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
-			      (i * HI_OPTION_FW_SUBMODE_BITS);
+		for (i = ar->max_norm_iface; i < ar->vif_max; i++)
+			fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+				(i * HI_OPTION_FW_SUBMODE_BITS);
 
-	if (ar->p2p && ar->vif_max == 1)
-		fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
+		if (ar->p2p && ar->vif_max == 1)
+			fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
+	}
 
 	if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest,
 				  HTC_PROTOCOL_VERSION) != 0) {
@@ -539,18 +553,20 @@
 	 * but possible in theory.
 	 */
 
-	param = ar->hw.board_ext_data_addr;
-	ram_reserved_size = ar->hw.reserved_ram_size;
+	if (ar->target_type == TARGET_TYPE_AR6003) {
+		param = ar->hw.board_ext_data_addr;
+		ram_reserved_size = ar->hw.reserved_ram_size;
 
-	if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
-		ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
-		return -EIO;
-	}
+		if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
+			ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
+			return -EIO;
+		}
 
-	if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
-				  ram_reserved_size) != 0) {
-		ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
-		return -EIO;
+		if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
+					  ram_reserved_size) != 0) {
+			ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
+			return -EIO;
+		}
 	}
 
 	/* set the block size for the target */
@@ -924,13 +940,14 @@
 			if (ar->fw != NULL)
 				break;
 
-			ar->fw = kmemdup(data, ie_len, GFP_KERNEL);
+			ar->fw = vmalloc(ie_len);
 
 			if (ar->fw == NULL) {
 				ret = -ENOMEM;
 				goto out;
 			}
 
+			memcpy(ar->fw, data, ie_len);
 			ar->fw_len = ie_len;
 			break;
 		case ATH6KL_FW_IE_PATCH_IMAGE:
@@ -1507,7 +1524,7 @@
 	}
 
 	/* setup credit distribution */
-	ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info);
+	ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info);
 
 	/* start HTC */
 	ret = ath6kl_htc_start(ar->htc_target);
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 229e192..4d818f9 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -15,6 +15,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "core.h"
 #include "hif-ops.h"
 #include "cfg80211.h"
@@ -756,6 +758,10 @@
 	stats->wow_evt_discarded +=
 		le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
 
+	stats->arp_received = le32_to_cpu(tgt_stats->arp_stats.arp_received);
+	stats->arp_replied = le32_to_cpu(tgt_stats->arp_stats.arp_replied);
+	stats->arp_matched = le32_to_cpu(tgt_stats->arp_stats.arp_matched);
+
 	if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
 		clear_bit(STATS_UPDATE_PEND, &vif->flags);
 		wake_up(&ar->event_wq);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 5352864..44ea7a7 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1362,7 +1362,7 @@
 		goto err_core_alloc;
 	}
 
-	ret = ath6kl_core_init(ar);
+	ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
 	if (ret) {
 		ath6kl_err("Failed to init ath6kl core\n");
 		goto err_core_alloc;
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index 6675c92..acc9aa8 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -55,8 +55,9 @@
 		ath6kl_warn("failed to allocate testmode rx skb!\n");
 		return;
 	}
-	NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD);
-	NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf);
+	if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) ||
+	    nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf))
+		goto nla_put_failure;
 	cfg80211_testmode_event(skb, GFP_KERNEL);
 	return;
 
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index f85353f..82f2f5c 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -15,8 +15,11 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "core.h"
 #include "debug.h"
+#include "htc-ops.h"
 
 /*
  * tid - tid_mux0..tid_mux3
@@ -322,6 +325,7 @@
 	cookie->map_no = 0;
 	set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
 			 eid, ATH6KL_CONTROL_PKT_TAG);
+	cookie->htc_pkt.skb = skb;
 
 	/*
 	 * This interface is asynchronous, if there is an error, cleanup
@@ -490,6 +494,7 @@
 	cookie->map_no = map_no;
 	set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
 			 eid, htc_tag);
+	cookie->htc_pkt.skb = skb;
 
 	ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
 			skb->data, skb->len);
@@ -570,7 +575,7 @@
 
 notify_htc:
 	/* notify HTC, this may cause credit distribution changes */
-	ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
+	ath6kl_htc_activity_changed(ar->htc_target, eid, active);
 }
 
 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
@@ -666,9 +671,10 @@
 	}
 }
 
-void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
+void ath6kl_tx_complete(struct htc_target *target,
+			struct list_head *packet_queue)
 {
-	struct ath6kl *ar = context;
+	struct ath6kl *ar = target->dev->ar;
 	struct sk_buff_head skb_queue;
 	struct htc_packet *packet;
 	struct sk_buff *skb;
@@ -887,6 +893,7 @@
 			skb->data = PTR_ALIGN(skb->data - 4, 4);
 		set_htc_rxpkt_info(packet, skb, skb->data,
 				   ATH6KL_BUFFER_SIZE, endpoint);
+		packet->skb = skb;
 		list_add_tail(&packet->list, &queue);
 	}
 
@@ -909,6 +916,8 @@
 			skb->data = PTR_ALIGN(skb->data - 4, 4);
 		set_htc_rxpkt_info(packet, skb, skb->data,
 				   ATH6KL_AMSDU_BUFFER_SIZE, 0);
+		packet->skb = skb;
+
 		spin_lock_bh(&ar->lock);
 		list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
 		spin_unlock_bh(&ar->lock);
@@ -1281,6 +1290,7 @@
 	struct wmi_data_hdr *dhdr;
 	int min_hdr_len;
 	u8 meta_type, dot11_hdr = 0;
+	u8 pad_before_data_start;
 	int status = packet->status;
 	enum htc_endpoint_id ept = packet->endpoint;
 	bool is_amsdu, prev_ps, ps_state = false;
@@ -1492,6 +1502,10 @@
 	seq_no = wmi_data_hdr_get_seqno(dhdr);
 	meta_type = wmi_data_hdr_get_meta(dhdr);
 	dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
+	pad_before_data_start =
+		(le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
+			& WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
+
 	skb_pull(skb, sizeof(struct wmi_data_hdr));
 
 	switch (meta_type) {
@@ -1510,6 +1524,8 @@
 		break;
 	}
 
+	skb_pull(skb, pad_before_data_start);
+
 	if (dot11_hdr)
 		status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
 	else if (!is_amsdu)
@@ -1579,7 +1595,8 @@
 			/* aggregation code will handle the skb */
 			return;
 		}
-	}
+	} else if (!is_broadcast_ether_addr(datap->h_dest))
+		vif->net_stats.multicast++;
 
 	ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
 }
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 325b122..ec7f1f5 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -21,15 +21,77 @@
 #include "debug.h"
 #include "core.h"
 
+/* constants */
+#define TX_URB_COUNT            32
+#define RX_URB_COUNT            32
+#define ATH6KL_USB_RX_BUFFER_SIZE  1700
+
+/* tx/rx pipes for usb */
+enum ATH6KL_USB_PIPE_ID {
+	ATH6KL_USB_PIPE_TX_CTRL = 0,
+	ATH6KL_USB_PIPE_TX_DATA_LP,
+	ATH6KL_USB_PIPE_TX_DATA_MP,
+	ATH6KL_USB_PIPE_TX_DATA_HP,
+	ATH6KL_USB_PIPE_RX_CTRL,
+	ATH6KL_USB_PIPE_RX_DATA,
+	ATH6KL_USB_PIPE_RX_DATA2,
+	ATH6KL_USB_PIPE_RX_INT,
+	ATH6KL_USB_PIPE_MAX
+};
+
+#define ATH6KL_USB_PIPE_INVALID ATH6KL_USB_PIPE_MAX
+
+struct ath6kl_usb_pipe {
+	struct list_head urb_list_head;
+	struct usb_anchor urb_submitted;
+	u32 urb_alloc;
+	u32 urb_cnt;
+	u32 urb_cnt_thresh;
+	unsigned int usb_pipe_handle;
+	u32 flags;
+	u8 ep_address;
+	u8 logical_pipe_num;
+	struct ath6kl_usb *ar_usb;
+	u16 max_packet_size;
+	struct work_struct io_complete_work;
+	struct sk_buff_head io_comp_queue;
+	struct usb_endpoint_descriptor *ep_desc;
+};
+
+#define ATH6KL_USB_PIPE_FLAG_TX    (1 << 0)
+
 /* usb device object */
 struct ath6kl_usb {
+	/* protects pipe->urb_list_head and  pipe->urb_cnt */
+	spinlock_t cs_lock;
+
 	struct usb_device *udev;
 	struct usb_interface *interface;
+	struct ath6kl_usb_pipe pipes[ATH6KL_USB_PIPE_MAX];
 	u8 *diag_cmd_buffer;
 	u8 *diag_resp_buffer;
 	struct ath6kl *ar;
 };
 
+/* usb urb object */
+struct ath6kl_urb_context {
+	struct list_head link;
+	struct ath6kl_usb_pipe *pipe;
+	struct sk_buff *skb;
+	struct ath6kl *ar;
+};
+
+/* USB endpoint definitions */
+#define ATH6KL_USB_EP_ADDR_APP_CTRL_IN          0x81
+#define ATH6KL_USB_EP_ADDR_APP_DATA_IN          0x82
+#define ATH6KL_USB_EP_ADDR_APP_DATA2_IN         0x83
+#define ATH6KL_USB_EP_ADDR_APP_INT_IN           0x84
+
+#define ATH6KL_USB_EP_ADDR_APP_CTRL_OUT         0x01
+#define ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT      0x02
+#define ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT      0x03
+#define ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT      0x04
+
 /* diagnostic command defnitions */
 #define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD        1
 #define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP       2
@@ -55,11 +117,493 @@
 	__le32 value;
 } __packed;
 
+/* function declarations */
+static void ath6kl_usb_recv_complete(struct urb *urb);
+
+#define ATH6KL_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02)
+#define ATH6KL_USB_IS_INT_EP(attr)  (((attr) & 3) == 0x03)
+#define ATH6KL_USB_IS_ISOC_EP(attr)  (((attr) & 3) == 0x01)
+#define ATH6KL_USB_IS_DIR_IN(addr)  ((addr) & 0x80)
+
+/* pipe/urb operations */
+static struct ath6kl_urb_context *
+ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
+{
+	struct ath6kl_urb_context *urb_context = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+	if (!list_empty(&pipe->urb_list_head)) {
+		urb_context =
+		    list_first_entry(&pipe->urb_list_head,
+				     struct ath6kl_urb_context, link);
+		list_del(&urb_context->link);
+		pipe->urb_cnt--;
+	}
+	spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
+
+	return urb_context;
+}
+
+static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
+					struct ath6kl_urb_context *urb_context)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+	pipe->urb_cnt++;
+
+	list_add(&urb_context->link, &pipe->urb_list_head);
+	spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
+}
+
+static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
+{
+	if (urb_context->skb != NULL) {
+		dev_kfree_skb(urb_context->skb);
+		urb_context->skb = NULL;
+	}
+
+	ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+}
+
+static inline struct ath6kl_usb *ath6kl_usb_priv(struct ath6kl *ar)
+{
+	return ar->hif_priv;
+}
+
+/* pipe resource allocation/cleanup */
+static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
+					   int urb_cnt)
+{
+	struct ath6kl_urb_context *urb_context;
+	int status = 0, i;
+
+	INIT_LIST_HEAD(&pipe->urb_list_head);
+	init_usb_anchor(&pipe->urb_submitted);
+
+	for (i = 0; i < urb_cnt; i++) {
+		urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
+				      GFP_KERNEL);
+		if (urb_context == NULL)
+			/* FIXME: set status to -ENOMEM */
+			break;
+
+		urb_context->pipe = pipe;
+
+		/*
+		 * we are only allocate the urb contexts here, the actual URB
+		 * is allocated from the kernel as needed to do a transaction
+		 */
+		pipe->urb_alloc++;
+		ath6kl_usb_free_urb_to_pipe(pipe, urb_context);
+	}
+
+	ath6kl_dbg(ATH6KL_DBG_USB,
+		   "ath6kl usb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n",
+		   pipe->logical_pipe_num, pipe->usb_pipe_handle,
+		   pipe->urb_alloc);
+
+	return status;
+}
+
+static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
+{
+	struct ath6kl_urb_context *urb_context;
+
+	if (pipe->ar_usb == NULL) {
+		/* nothing allocated for this pipe */
+		return;
+	}
+
+	ath6kl_dbg(ATH6KL_DBG_USB,
+		   "ath6kl usb: free resources lpipe:%d"
+		   "hpipe:0x%X urbs:%d avail:%d\n",
+		   pipe->logical_pipe_num, pipe->usb_pipe_handle,
+		   pipe->urb_alloc, pipe->urb_cnt);
+
+	if (pipe->urb_alloc != pipe->urb_cnt) {
+		ath6kl_dbg(ATH6KL_DBG_USB,
+			   "ath6kl usb: urb leak! lpipe:%d"
+			   "hpipe:0x%X urbs:%d avail:%d\n",
+			   pipe->logical_pipe_num, pipe->usb_pipe_handle,
+			   pipe->urb_alloc, pipe->urb_cnt);
+	}
+
+	while (true) {
+		urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
+		if (urb_context == NULL)
+			break;
+		kfree(urb_context);
+	}
+
+}
+
+static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
+{
+	int i;
+
+	for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
+		ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
+
+}
+
+static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
+					  u8 ep_address, int *urb_count)
+{
+	u8 pipe_num = ATH6KL_USB_PIPE_INVALID;
+
+	switch (ep_address) {
+	case ATH6KL_USB_EP_ADDR_APP_CTRL_IN:
+		pipe_num = ATH6KL_USB_PIPE_RX_CTRL;
+		*urb_count = RX_URB_COUNT;
+		break;
+	case ATH6KL_USB_EP_ADDR_APP_DATA_IN:
+		pipe_num = ATH6KL_USB_PIPE_RX_DATA;
+		*urb_count = RX_URB_COUNT;
+		break;
+	case ATH6KL_USB_EP_ADDR_APP_INT_IN:
+		pipe_num = ATH6KL_USB_PIPE_RX_INT;
+		*urb_count = RX_URB_COUNT;
+		break;
+	case ATH6KL_USB_EP_ADDR_APP_DATA2_IN:
+		pipe_num = ATH6KL_USB_PIPE_RX_DATA2;
+		*urb_count = RX_URB_COUNT;
+		break;
+	case ATH6KL_USB_EP_ADDR_APP_CTRL_OUT:
+		pipe_num = ATH6KL_USB_PIPE_TX_CTRL;
+		*urb_count = TX_URB_COUNT;
+		break;
+	case ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT:
+		pipe_num = ATH6KL_USB_PIPE_TX_DATA_LP;
+		*urb_count = TX_URB_COUNT;
+		break;
+	case ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT:
+		pipe_num = ATH6KL_USB_PIPE_TX_DATA_MP;
+		*urb_count = TX_URB_COUNT;
+		break;
+	case ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT:
+		pipe_num = ATH6KL_USB_PIPE_TX_DATA_HP;
+		*urb_count = TX_URB_COUNT;
+		break;
+	default:
+		/* note: there may be endpoints not currently used */
+		break;
+	}
+
+	return pipe_num;
+}
+
+static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb)
+{
+	struct usb_interface *interface = ar_usb->interface;
+	struct usb_host_interface *iface_desc = interface->cur_altsetting;
+	struct usb_endpoint_descriptor *endpoint;
+	struct ath6kl_usb_pipe *pipe;
+	int i, urbcount, status = 0;
+	u8 pipe_num;
+
+	ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n");
+
+	/* walk decriptors and setup pipes */
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+		endpoint = &iface_desc->endpoint[i].desc;
+
+		if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
+			ath6kl_dbg(ATH6KL_DBG_USB,
+				   "%s Bulk Ep:0x%2.2X maxpktsz:%d\n",
+				   ATH6KL_USB_IS_DIR_IN
+				   (endpoint->bEndpointAddress) ?
+				   "RX" : "TX", endpoint->bEndpointAddress,
+				   le16_to_cpu(endpoint->wMaxPacketSize));
+		} else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
+			ath6kl_dbg(ATH6KL_DBG_USB,
+				   "%s Int Ep:0x%2.2X maxpktsz:%d interval:%d\n",
+				   ATH6KL_USB_IS_DIR_IN
+				   (endpoint->bEndpointAddress) ?
+				   "RX" : "TX", endpoint->bEndpointAddress,
+				   le16_to_cpu(endpoint->wMaxPacketSize),
+				   endpoint->bInterval);
+		} else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
+			/* TODO for ISO */
+			ath6kl_dbg(ATH6KL_DBG_USB,
+				   "%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d\n",
+				   ATH6KL_USB_IS_DIR_IN
+				   (endpoint->bEndpointAddress) ?
+				   "RX" : "TX", endpoint->bEndpointAddress,
+				   le16_to_cpu(endpoint->wMaxPacketSize),
+				   endpoint->bInterval);
+		}
+		urbcount = 0;
+
+		pipe_num =
+		    ath6kl_usb_get_logical_pipe_num(ar_usb,
+						    endpoint->bEndpointAddress,
+						    &urbcount);
+		if (pipe_num == ATH6KL_USB_PIPE_INVALID)
+			continue;
+
+		pipe = &ar_usb->pipes[pipe_num];
+		if (pipe->ar_usb != NULL) {
+			/* hmmm..pipe was already setup */
+			continue;
+		}
+
+		pipe->ar_usb = ar_usb;
+		pipe->logical_pipe_num = pipe_num;
+		pipe->ep_address = endpoint->bEndpointAddress;
+		pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize);
+
+		if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
+			if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
+				pipe->usb_pipe_handle =
+				    usb_rcvbulkpipe(ar_usb->udev,
+						    pipe->ep_address);
+			} else {
+				pipe->usb_pipe_handle =
+				    usb_sndbulkpipe(ar_usb->udev,
+						    pipe->ep_address);
+			}
+		} else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
+			if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
+				pipe->usb_pipe_handle =
+				    usb_rcvintpipe(ar_usb->udev,
+						   pipe->ep_address);
+			} else {
+				pipe->usb_pipe_handle =
+				    usb_sndintpipe(ar_usb->udev,
+						   pipe->ep_address);
+			}
+		} else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
+			/* TODO for ISO */
+			if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
+				pipe->usb_pipe_handle =
+				    usb_rcvisocpipe(ar_usb->udev,
+						    pipe->ep_address);
+			} else {
+				pipe->usb_pipe_handle =
+				    usb_sndisocpipe(ar_usb->udev,
+						    pipe->ep_address);
+			}
+		}
+
+		pipe->ep_desc = endpoint;
+
+		if (!ATH6KL_USB_IS_DIR_IN(pipe->ep_address))
+			pipe->flags |= ATH6KL_USB_PIPE_FLAG_TX;
+
+		status = ath6kl_usb_alloc_pipe_resources(pipe, urbcount);
+		if (status != 0)
+			break;
+	}
+
+	return status;
+}
+
+/* pipe operations */
+static void ath6kl_usb_post_recv_transfers(struct ath6kl_usb_pipe *recv_pipe,
+					   int buffer_length)
+{
+	struct ath6kl_urb_context *urb_context;
+	struct urb *urb;
+	int usb_status;
+
+	while (true) {
+		urb_context = ath6kl_usb_alloc_urb_from_pipe(recv_pipe);
+		if (urb_context == NULL)
+			break;
+
+		urb_context->skb = dev_alloc_skb(buffer_length);
+		if (urb_context->skb == NULL)
+			goto err_cleanup_urb;
+
+		urb = usb_alloc_urb(0, GFP_ATOMIC);
+		if (urb == NULL)
+			goto err_cleanup_urb;
+
+		usb_fill_bulk_urb(urb,
+				  recv_pipe->ar_usb->udev,
+				  recv_pipe->usb_pipe_handle,
+				  urb_context->skb->data,
+				  buffer_length,
+				  ath6kl_usb_recv_complete, urb_context);
+
+		ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+			   "ath6kl usb: bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes buf:0x%p\n",
+			   recv_pipe->logical_pipe_num,
+			   recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
+			   buffer_length, urb_context->skb);
+
+		usb_anchor_urb(urb, &recv_pipe->urb_submitted);
+		usb_status = usb_submit_urb(urb, GFP_ATOMIC);
+
+		if (usb_status) {
+			ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+				   "ath6kl usb : usb bulk recv failed %d\n",
+				   usb_status);
+			usb_unanchor_urb(urb);
+			usb_free_urb(urb);
+			goto err_cleanup_urb;
+		}
+		usb_free_urb(urb);
+	}
+	return;
+
+err_cleanup_urb:
+	ath6kl_usb_cleanup_recv_urb(urb_context);
+	return;
+}
+
+static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb)
+{
+	int i;
+
+	for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
+		if (ar_usb->pipes[i].ar_usb != NULL)
+			usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
+	}
+
+	/*
+	 * Flushing any pending I/O may schedule work this call will block
+	 * until all scheduled work runs to completion.
+	 */
+	flush_scheduled_work();
+}
+
+static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
+{
+	/*
+	 * note: control pipe is no longer used
+	 * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_cnt_thresh =
+	 *      ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_alloc/2;
+	 * ath6kl_usb_post_recv_transfers(&ar_usb->
+	 *		pipes[ATH6KL_USB_PIPE_RX_CTRL],
+	 *		ATH6KL_USB_RX_BUFFER_SIZE);
+	 */
+
+	ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh =
+	    ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2;
+	ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
+				       ATH6KL_USB_RX_BUFFER_SIZE);
+}
+
+/* hif usb rx/tx completion functions */
+static void ath6kl_usb_recv_complete(struct urb *urb)
+{
+	struct ath6kl_urb_context *urb_context = urb->context;
+	struct ath6kl_usb_pipe *pipe = urb_context->pipe;
+	struct sk_buff *skb = NULL;
+	int status = 0;
+
+	ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+		   "%s: recv pipe: %d, stat:%d, len:%d urb:0x%p\n", __func__,
+		   pipe->logical_pipe_num, urb->status, urb->actual_length,
+		   urb);
+
+	if (urb->status != 0) {
+		status = -EIO;
+		switch (urb->status) {
+		case -ECONNRESET:
+		case -ENOENT:
+		case -ESHUTDOWN:
+			/*
+			 * no need to spew these errors when device
+			 * removed or urb killed due to driver shutdown
+			 */
+			status = -ECANCELED;
+			break;
+		default:
+			ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+				   "%s recv pipe: %d (ep:0x%2.2X), failed:%d\n",
+				   __func__, pipe->logical_pipe_num,
+				   pipe->ep_address, urb->status);
+			break;
+		}
+		goto cleanup_recv_urb;
+	}
+
+	if (urb->actual_length == 0)
+		goto cleanup_recv_urb;
+
+	skb = urb_context->skb;
+
+	/* we are going to pass it up */
+	urb_context->skb = NULL;
+	skb_put(skb, urb->actual_length);
+
+	/* note: queue implements a lock */
+	skb_queue_tail(&pipe->io_comp_queue, skb);
+	schedule_work(&pipe->io_complete_work);
+
+cleanup_recv_urb:
+	ath6kl_usb_cleanup_recv_urb(urb_context);
+
+	if (status == 0 &&
+	    pipe->urb_cnt >= pipe->urb_cnt_thresh) {
+		/* our free urbs are piling up, post more transfers */
+		ath6kl_usb_post_recv_transfers(pipe, ATH6KL_USB_RX_BUFFER_SIZE);
+	}
+}
+
+static void ath6kl_usb_usb_transmit_complete(struct urb *urb)
+{
+	struct ath6kl_urb_context *urb_context = urb->context;
+	struct ath6kl_usb_pipe *pipe = urb_context->pipe;
+	struct sk_buff *skb;
+
+	ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+		   "%s: pipe: %d, stat:%d, len:%d\n",
+		   __func__, pipe->logical_pipe_num, urb->status,
+		   urb->actual_length);
+
+	if (urb->status != 0) {
+		ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+			   "%s:  pipe: %d, failed:%d\n",
+			   __func__, pipe->logical_pipe_num, urb->status);
+	}
+
+	skb = urb_context->skb;
+	urb_context->skb = NULL;
+	ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+
+	/* note: queue implements a lock */
+	skb_queue_tail(&pipe->io_comp_queue, skb);
+	schedule_work(&pipe->io_complete_work);
+}
+
+static void ath6kl_usb_io_comp_work(struct work_struct *work)
+{
+	struct ath6kl_usb_pipe *pipe = container_of(work,
+						    struct ath6kl_usb_pipe,
+						    io_complete_work);
+	struct ath6kl_usb *ar_usb;
+	struct sk_buff *skb;
+
+	ar_usb = pipe->ar_usb;
+
+	while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
+		if (pipe->flags & ATH6KL_USB_PIPE_FLAG_TX) {
+			ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+				   "ath6kl usb xmit callback buf:0x%p\n", skb);
+			ath6kl_core_tx_complete(ar_usb->ar, skb);
+		} else {
+			ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+				   "ath6kl usb recv callback buf:0x%p\n", skb);
+			ath6kl_core_rx_complete(ar_usb->ar, skb,
+						pipe->logical_pipe_num);
+		}
+	}
+}
+
 #define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write))
 #define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read))
 
 static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
 {
+	ath6kl_usb_flush_all(ar_usb);
+
+	ath6kl_usb_cleanup_pipe_resources(ar_usb);
+
 	usb_set_intfdata(ar_usb->interface, NULL);
 
 	kfree(ar_usb->diag_cmd_buffer);
@@ -70,19 +614,28 @@
 
 static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
 {
-	struct ath6kl_usb *ar_usb = NULL;
 	struct usb_device *dev = interface_to_usbdev(interface);
+	struct ath6kl_usb *ar_usb;
+	struct ath6kl_usb_pipe *pipe;
 	int status = 0;
+	int i;
 
 	ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
 	if (ar_usb == NULL)
 		goto fail_ath6kl_usb_create;
 
-	memset(ar_usb, 0, sizeof(struct ath6kl_usb));
 	usb_set_intfdata(interface, ar_usb);
+	spin_lock_init(&(ar_usb->cs_lock));
 	ar_usb->udev = dev;
 	ar_usb->interface = interface;
 
+	for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
+		pipe = &ar_usb->pipes[i];
+		INIT_WORK(&pipe->io_complete_work,
+			  ath6kl_usb_io_comp_work);
+		skb_queue_head_init(&pipe->io_comp_queue);
+	}
+
 	ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL);
 	if (ar_usb->diag_cmd_buffer == NULL) {
 		status = -ENOMEM;
@@ -96,6 +649,8 @@
 		goto fail_ath6kl_usb_create;
 	}
 
+	status = ath6kl_usb_setup_pipe_resources(ar_usb);
+
 fail_ath6kl_usb_create:
 	if (status != 0) {
 		ath6kl_usb_destroy(ar_usb);
@@ -114,11 +669,177 @@
 
 	ath6kl_stop_txrx(ar_usb->ar);
 
+	/* Delay to wait for the target to reboot */
+	mdelay(20);
 	ath6kl_core_cleanup(ar_usb->ar);
-
 	ath6kl_usb_destroy(ar_usb);
 }
 
+/* exported hif usb APIs for htc pipe */
+static void hif_start(struct ath6kl *ar)
+{
+	struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+	int i;
+
+	ath6kl_usb_start_recv_pipes(device);
+
+	/* set the TX resource avail threshold for each TX pipe */
+	for (i = ATH6KL_USB_PIPE_TX_CTRL;
+	     i <= ATH6KL_USB_PIPE_TX_DATA_HP; i++) {
+		device->pipes[i].urb_cnt_thresh =
+		    device->pipes[i].urb_alloc / 2;
+	}
+}
+
+static int ath6kl_usb_send(struct ath6kl *ar, u8 PipeID,
+			   struct sk_buff *hdr_skb, struct sk_buff *skb)
+{
+	struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+	struct ath6kl_usb_pipe *pipe = &device->pipes[PipeID];
+	struct ath6kl_urb_context *urb_context;
+	int usb_status, status = 0;
+	struct urb *urb;
+	u8 *data;
+	u32 len;
+
+	ath6kl_dbg(ATH6KL_DBG_USB_BULK, "+%s pipe : %d, buf:0x%p\n",
+		   __func__, PipeID, skb);
+
+	urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
+
+	if (urb_context == NULL) {
+		/*
+		 * TODO: it is possible to run out of urbs if
+		 * 2 endpoints map to the same pipe ID
+		 */
+		ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+			   "%s pipe:%d no urbs left. URB Cnt : %d\n",
+			   __func__, PipeID, pipe->urb_cnt);
+		status = -ENOMEM;
+		goto fail_hif_send;
+	}
+
+	urb_context->skb = skb;
+
+	data = skb->data;
+	len = skb->len;
+
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (urb == NULL) {
+		status = -ENOMEM;
+		ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
+					    urb_context);
+		goto fail_hif_send;
+	}
+
+	usb_fill_bulk_urb(urb,
+			  device->udev,
+			  pipe->usb_pipe_handle,
+			  data,
+			  len,
+			  ath6kl_usb_usb_transmit_complete, urb_context);
+
+	if ((len % pipe->max_packet_size) == 0) {
+		/* hit a max packet boundary on this pipe */
+		urb->transfer_flags |= URB_ZERO_PACKET;
+	}
+
+	ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+		   "athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes\n",
+		   pipe->logical_pipe_num, pipe->usb_pipe_handle,
+		   pipe->ep_address, len);
+
+	usb_anchor_urb(urb, &pipe->urb_submitted);
+	usb_status = usb_submit_urb(urb, GFP_ATOMIC);
+
+	if (usb_status) {
+		ath6kl_dbg(ATH6KL_DBG_USB_BULK,
+			   "ath6kl usb : usb bulk transmit failed %d\n",
+			   usb_status);
+		usb_unanchor_urb(urb);
+		ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
+					    urb_context);
+		status = -EINVAL;
+	}
+	usb_free_urb(urb);
+
+fail_hif_send:
+	return status;
+}
+
+static void hif_stop(struct ath6kl *ar)
+{
+	struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+
+	ath6kl_usb_flush_all(device);
+}
+
+static void ath6kl_usb_get_default_pipe(struct ath6kl *ar,
+					u8 *ul_pipe, u8 *dl_pipe)
+{
+	*ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
+	*dl_pipe = ATH6KL_USB_PIPE_RX_CTRL;
+}
+
+static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
+				       u8 *ul_pipe, u8 *dl_pipe)
+{
+	int status = 0;
+
+	switch (svc_id) {
+	case HTC_CTRL_RSVD_SVC:
+	case WMI_CONTROL_SVC:
+		*ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
+		/* due to large control packets, shift to data pipe */
+		*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
+		break;
+	case WMI_DATA_BE_SVC:
+	case WMI_DATA_BK_SVC:
+		*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
+		/*
+		* Disable rxdata2 directly, it will be enabled
+		* if FW enable rxdata2
+		*/
+		*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
+		break;
+	case WMI_DATA_VI_SVC:
+		*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
+		/*
+		* Disable rxdata2 directly, it will be enabled
+		* if FW enable rxdata2
+		*/
+		*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
+		break;
+	case WMI_DATA_VO_SVC:
+		*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_HP;
+		/*
+		* Disable rxdata2 directly, it will be enabled
+		* if FW enable rxdata2
+		*/
+		*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
+		break;
+	default:
+		status = -EPERM;
+		break;
+	}
+
+	return status;
+}
+
+static u16 ath6kl_usb_get_free_queue_number(struct ath6kl *ar, u8 pipe_id)
+{
+	struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+
+	return device->pipes[pipe_id].urb_cnt;
+}
+
+static void hif_detach_htc(struct ath6kl *ar)
+{
+	struct ath6kl_usb *device = ath6kl_usb_priv(ar);
+
+	ath6kl_usb_flush_all(device);
+}
+
 static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
 				   u8 req, u16 value, u16 index, void *data,
 				   u32 size)
@@ -301,14 +1022,21 @@
 
 static int ath6kl_usb_power_on(struct ath6kl *ar)
 {
+	hif_start(ar);
 	return 0;
 }
 
 static int ath6kl_usb_power_off(struct ath6kl *ar)
 {
+	hif_detach_htc(ar);
 	return 0;
 }
 
+static void ath6kl_usb_stop(struct ath6kl *ar)
+{
+	hif_stop(ar);
+}
+
 static const struct ath6kl_hif_ops ath6kl_usb_ops = {
 	.diag_read32 = ath6kl_usb_diag_read32,
 	.diag_write32 = ath6kl_usb_diag_write32,
@@ -316,6 +1044,11 @@
 	.bmi_write = ath6kl_usb_bmi_write,
 	.power_on = ath6kl_usb_power_on,
 	.power_off = ath6kl_usb_power_off,
+	.stop = ath6kl_usb_stop,
+	.pipe_send = ath6kl_usb_send,
+	.pipe_get_default = ath6kl_usb_get_default_pipe,
+	.pipe_map_service = ath6kl_usb_map_service_pipe,
+	.pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
 };
 
 /* ath6kl usb driver registered functions */
@@ -368,7 +1101,7 @@
 
 	ar_usb->ar = ar;
 
-	ret = ath6kl_core_init(ar);
+	ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_PIPE);
 	if (ret) {
 		ath6kl_err("Failed to init ath6kl core: %d\n", ret);
 		goto err_core_free;
@@ -392,6 +1125,46 @@
 	ath6kl_usb_device_detached(interface);
 }
 
+#ifdef CONFIG_PM
+
+static int ath6kl_usb_suspend(struct usb_interface *interface,
+			      pm_message_t message)
+{
+	struct ath6kl_usb *device;
+	device = usb_get_intfdata(interface);
+
+	ath6kl_usb_flush_all(device);
+	return 0;
+}
+
+static int ath6kl_usb_resume(struct usb_interface *interface)
+{
+	struct ath6kl_usb *device;
+	device = usb_get_intfdata(interface);
+
+	ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA],
+				       ATH6KL_USB_RX_BUFFER_SIZE);
+	ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA2],
+				       ATH6KL_USB_RX_BUFFER_SIZE);
+
+	return 0;
+}
+
+static int ath6kl_usb_reset_resume(struct usb_interface *intf)
+{
+	if (usb_get_intfdata(intf))
+		ath6kl_usb_remove(intf);
+	return 0;
+}
+
+#else
+
+#define ath6kl_usb_suspend NULL
+#define ath6kl_usb_resume NULL
+#define ath6kl_usb_reset_resume NULL
+
+#endif
+
 /* table of devices that work with this driver */
 static struct usb_device_id ath6kl_usb_ids[] = {
 	{USB_DEVICE(0x0cf3, 0x9374)},
@@ -403,8 +1176,12 @@
 static struct usb_driver ath6kl_usb_driver = {
 	.name = "ath6kl_usb",
 	.probe = ath6kl_usb_probe,
+	.suspend = ath6kl_usb_suspend,
+	.resume = ath6kl_usb_resume,
+	.reset_resume = ath6kl_usb_reset_resume,
 	.disconnect = ath6kl_usb_remove,
 	.id_table = ath6kl_usb_ids,
+	.supports_autosuspend = true,
 };
 
 static int ath6kl_usb_init(void)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 2b44233..7c8a997 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2882,6 +2882,43 @@
 	return ret;
 }
 
+int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
+			     enum ieee80211_band band,
+			     struct ath6kl_htcap *htcap)
+{
+	struct sk_buff *skb;
+	struct wmi_set_htcap_cmd *cmd;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_set_htcap_cmd *) skb->data;
+
+	/*
+	 * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
+	 * this will be changed in firmware. If at all there is any change in
+	 * band value, the host needs to be fixed.
+	 */
+	cmd->band = band;
+	cmd->ht_enable = !!htcap->ht_enable;
+	cmd->ht20_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_20);
+	cmd->ht40_supported =
+		!!(htcap->cap_info & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+	cmd->ht40_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_40);
+	cmd->intolerant_40mhz =
+		!!(htcap->cap_info & IEEE80211_HT_CAP_40MHZ_INTOLERANT);
+	cmd->max_ampdu_len_exp = htcap->ampdu_factor;
+
+	ath6kl_dbg(ATH6KL_DBG_WMI,
+		   "Set htcap: band:%d ht_enable:%d 40mhz:%d sgi_20mhz:%d sgi_40mhz:%d 40mhz_intolerant:%d ampdu_len_exp:%d\n",
+		   cmd->band, cmd->ht_enable, cmd->ht40_supported,
+		   cmd->ht20_sgi, cmd->ht40_sgi, cmd->intolerant_40mhz,
+		   cmd->max_ampdu_len_exp);
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_HT_CAP_CMDID,
+				   NO_SYNC_WMIFLAG);
+}
+
 int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
 {
 	struct sk_buff *skb;
@@ -3032,6 +3069,9 @@
 	cm->reason = cpu_to_le16(reason);
 	cm->cmd = cmd;
 
+	ath6kl_dbg(ATH6KL_DBG_WMI, "ap_set_mlme: cmd=%d reason=%d\n", cm->cmd,
+		   cm->reason);
+
 	return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
 				   NO_SYNC_WMIFLAG);
 }
@@ -3181,6 +3221,29 @@
 				   NO_SYNC_WMIFLAG);
 }
 
+int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
+			  const u8 *ie_info, u8 ie_len)
+{
+	struct sk_buff *skb;
+	struct wmi_set_ie_cmd *p;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
+	if (!skb)
+		return -ENOMEM;
+
+	ath6kl_dbg(ATH6KL_DBG_WMI, "set_ie_cmd: ie_id=%u ie_ie_field=%u ie_len=%u\n",
+		   ie_id, ie_field, ie_len);
+	p = (struct wmi_set_ie_cmd *) skb->data;
+	p->ie_id = ie_id;
+	p->ie_field = ie_field;
+	p->ie_len = ie_len;
+	if (ie_info && ie_len > 0)
+		memcpy(p->ie_info, ie_info, ie_len);
+
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IE_CMDID,
+				   NO_SYNC_WMIFLAG);
+}
+
 int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
 {
 	struct sk_buff *skb;
@@ -3392,6 +3455,23 @@
 				     WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
 }
 
+int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout)
+{
+	struct sk_buff *skb;
+	struct wmi_set_inact_period_cmd *cmd;
+
+	skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_set_inact_period_cmd *) skb->data;
+	cmd->inact_period = cpu_to_le32(inact_timeout);
+	cmd->num_null_func = 0;
+
+	return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_CONN_INACT_CMDID,
+				   NO_SYNC_WMIFLAG);
+}
+
 static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
 {
 	struct wmix_cmd_hdr *cmd;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 4092e3e..d3d2ab5 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -182,6 +182,9 @@
 #define WMI_DATA_HDR_META_MASK      0x7
 #define WMI_DATA_HDR_META_SHIFT     13
 
+#define WMI_DATA_HDR_PAD_BEFORE_DATA_MASK               0xFF
+#define WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT              0x8
+
 /* Macros for operating on WMI_DATA_HDR (info3) field */
 #define WMI_DATA_HDR_IF_IDX_MASK    0xF
 
@@ -423,6 +426,7 @@
 	WMI_SET_FRAMERATES_CMDID,
 	WMI_SET_AP_PS_CMDID,
 	WMI_SET_QOS_SUPP_CMDID,
+	WMI_SET_IE_CMDID,
 
 	/* WMI_THIN_RESERVED_... mark the start and end
 	 * values for WMI_THIN_RESERVED command IDs. These
@@ -629,6 +633,11 @@
 	WMI_NUM_MGMT_FRAME
 };
 
+enum wmi_ie_field_type {
+	WMI_RSN_IE_CAPB	= 0x1,
+	WMI_IE_FULL	= 0xFF,  /* indicats full IE */
+};
+
 /* WMI_CONNECT_CMDID  */
 enum network_type {
 	INFRA_NETWORK = 0x01,
@@ -1268,6 +1277,16 @@
 	u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
 } __packed;
 
+struct wmi_set_htcap_cmd {
+	u8 band;
+	u8 ht_enable;
+	u8 ht40_supported;
+	u8 ht20_sgi;
+	u8 ht40_sgi;
+	u8 intolerant_40mhz;
+	u8 max_ampdu_len_exp;
+} __packed;
+
 /* Command Replies */
 
 /* WMI_GET_CHANNEL_LIST_CMDID reply */
@@ -1913,6 +1932,14 @@
 	u8 ie_info[0];
 } __packed;
 
+struct wmi_set_ie_cmd {
+	u8 ie_id;
+	u8 ie_field;	/* enum wmi_ie_field_type */
+	u8 ie_len;
+	u8 reserved;
+	u8 ie_info[0];
+} __packed;
+
 /* Notify the WSC registration status to the target */
 #define WSC_REG_ACTIVE     1
 #define WSC_REG_INACTIVE   0
@@ -2141,6 +2168,11 @@
 	u8 hidden_ssid;
 } __packed;
 
+struct wmi_set_inact_period_cmd {
+	__le32 inact_period;
+	u8 num_null_func;
+} __packed;
+
 /* AP mode events */
 struct wmi_ap_set_apsd_cmd {
 	u8 enable;
@@ -2465,6 +2497,9 @@
 int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
 int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
 				 u8 keep_alive_intvl);
+int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
+			     enum ieee80211_band band,
+			     struct ath6kl_htcap *htcap);
 int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
 
 s32 ath6kl_wmi_get_rate(s8 rate_index);
@@ -2515,6 +2550,9 @@
 int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
 			     const u8 *ie, u8 ie_len);
 
+int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
+			  const u8 *ie_info, u8 ie_len);
+
 /* P2P */
 int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
 
@@ -2538,6 +2576,8 @@
 int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
 			     const u8 *ie, u8 ie_len);
 
+int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout);
+
 void ath6kl_wmi_sscan_timer(unsigned long ptr);
 
 struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 27d95fe..3f0b8472 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -11,7 +11,10 @@
 ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
 ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
 ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
-ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
+		dfs.o \
+		dfs_pattern_detector.o \
+		dfs_pri_detector.o
 
 obj-$(CONFIG_ATH9K) += ath9k.o
 
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 7e0ea4e..b4c77f9 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -46,8 +46,8 @@
 	{  5,  4,  1  }, /* lvl 5 */
 	{  6,  5,  1  }, /* lvl 6 */
 	{  7,  6,  1  }, /* lvl 7 */
-	{  7,  7,  1  }, /* lvl 8 */
-	{  7,  8,  0  }  /* lvl 9 */
+	{  7,  6,  0  }, /* lvl 8 */
+	{  7,  7,  0  }  /* lvl 9 */
 };
 #define ATH9K_ANI_OFDM_NUM_LEVEL \
 	ARRAY_SIZE(ofdm_level_table)
@@ -91,8 +91,8 @@
 	{  4,  0  }, /* lvl 4 */
 	{  5,  0  }, /* lvl 5 */
 	{  6,  0  }, /* lvl 6 */
-	{  7,  0  }, /* lvl 7 (only for high rssi) */
-	{  8,  0  }  /* lvl 8 (only for high rssi) */
+	{  6,  0  }, /* lvl 7 (only for high rssi) */
+	{  7,  0  }  /* lvl 8 (only for high rssi) */
 };
 
 #define ATH9K_ANI_CCK_NUM_LEVEL \
@@ -274,7 +274,9 @@
 		aniState->rssiThrLow, aniState->rssiThrHigh);
 
 	if (aniState->update_ani)
-		aniState->ofdmNoiseImmunityLevel = immunityLevel;
+		aniState->ofdmNoiseImmunityLevel =
+			(immunityLevel > ATH9K_ANI_OFDM_DEF_LEVEL) ?
+			immunityLevel : ATH9K_ANI_OFDM_DEF_LEVEL;
 
 	entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
 	entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -290,16 +292,9 @@
 				     ATH9K_ANI_FIRSTEP_LEVEL,
 				     entry_ofdm->fir_step_level);
 
-	if ((ah->opmode != NL80211_IFTYPE_STATION &&
-	     ah->opmode != NL80211_IFTYPE_ADHOC) ||
-	    aniState->noiseFloor <= aniState->rssiThrHigh) {
-		if (aniState->ofdmWeakSigDetectOff)
-			/* force on ofdm weak sig detect */
-			ath9k_hw_ani_control(ah,
-				ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
-					     true);
-		else if (aniState->ofdmWeakSigDetectOff ==
-			 entry_ofdm->ofdm_weak_signal_on)
+	if ((aniState->noiseFloor >= aniState->rssiThrHigh) &&
+	    (!aniState->ofdmWeakSigDetectOff !=
+	     entry_ofdm->ofdm_weak_signal_on)) {
 			ath9k_hw_ani_control(ah,
 				ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
 				entry_ofdm->ofdm_weak_signal_on);
@@ -347,7 +342,9 @@
 		immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
 
 	if (aniState->update_ani)
-		aniState->cckNoiseImmunityLevel = immunityLevel;
+		aniState->cckNoiseImmunityLevel =
+			(immunityLevel > ATH9K_ANI_CCK_DEF_LEVEL) ?
+			immunityLevel : ATH9K_ANI_CCK_DEF_LEVEL;
 
 	entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
 	entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -717,26 +714,30 @@
 		ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
 		cckPhyErrRate, aniState->ofdmsTurn);
 
-	if (aniState->listenTime > 5 * ah->aniperiod) {
-		if (ofdmPhyErrRate <= ah->config.ofdm_trig_low &&
-		    cckPhyErrRate <= ah->config.cck_trig_low) {
+	if (aniState->listenTime > ah->aniperiod) {
+		if (cckPhyErrRate < ah->config.cck_trig_low &&
+		    ((ofdmPhyErrRate < ah->config.ofdm_trig_low &&
+		      aniState->ofdmNoiseImmunityLevel <
+		      ATH9K_ANI_OFDM_DEF_LEVEL) ||
+		     (ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI &&
+		      aniState->ofdmNoiseImmunityLevel >=
+		      ATH9K_ANI_OFDM_DEF_LEVEL))) {
 			ath9k_hw_ani_lower_immunity(ah);
 			aniState->ofdmsTurn = !aniState->ofdmsTurn;
-		}
-		ath9k_ani_restart(ah);
-	} else if (aniState->listenTime > ah->aniperiod) {
-		/* check to see if need to raise immunity */
-		if (ofdmPhyErrRate > ah->config.ofdm_trig_high &&
-		    (cckPhyErrRate <= ah->config.cck_trig_high ||
-		     aniState->ofdmsTurn)) {
+		} else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high &&
+			    aniState->ofdmNoiseImmunityLevel >=
+			    ATH9K_ANI_OFDM_DEF_LEVEL) ||
+			   (ofdmPhyErrRate >
+			    ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI &&
+			    aniState->ofdmNoiseImmunityLevel <
+			    ATH9K_ANI_OFDM_DEF_LEVEL)) {
 			ath9k_hw_ani_ofdm_err_trigger(ah);
-			ath9k_ani_restart(ah);
 			aniState->ofdmsTurn = false;
 		} else if (cckPhyErrRate > ah->config.cck_trig_high) {
 			ath9k_hw_ani_cck_err_trigger(ah);
-			ath9k_ani_restart(ah);
 			aniState->ofdmsTurn = true;
 		}
+		ath9k_ani_restart(ah);
 	}
 }
 EXPORT_SYMBOL(ath9k_hw_ani_monitor);
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 83029d6..72e2b87 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -25,11 +25,13 @@
 
 /* units are errors per second */
 #define ATH9K_ANI_OFDM_TRIG_HIGH_OLD      500
-#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW      1000
+#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW      3500
+#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
 
 /* units are errors per second */
 #define ATH9K_ANI_OFDM_TRIG_LOW_OLD       200
 #define ATH9K_ANI_OFDM_TRIG_LOW_NEW       400
+#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
 
 /* units are errors per second */
 #define ATH9K_ANI_CCK_TRIG_HIGH_OLD       200
@@ -53,7 +55,7 @@
 #define ATH9K_ANI_RSSI_THR_LOW            7
 
 #define ATH9K_ANI_PERIOD_OLD              100
-#define ATH9K_ANI_PERIOD_NEW              1000
+#define ATH9K_ANI_PERIOD_NEW              300
 
 /* in ms */
 #define ATH9K_ANI_POLLINTERVAL_OLD        100
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index d7d8e91..c7492c6 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -245,7 +245,6 @@
 	REG_WRITE(ah, AR_PHY(0x37), reg32);
 
 	ah->curchan = chan;
-	ah->curchan_rad_index = -1;
 
 	return 0;
 }
@@ -619,19 +618,10 @@
 	u32 synthDelay;
 
 	synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
-	if (IS_CHAN_B(chan))
-		synthDelay = (4 * synthDelay) / 22;
-	else
-		synthDelay /= 10;
-
-	if (IS_CHAN_HALF_RATE(chan))
-		synthDelay *= 2;
-	else if (IS_CHAN_QUARTER_RATE(chan))
-		synthDelay *= 4;
 
 	REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
 
-	udelay(synthDelay + BASE_ACTIVATE_DELAY);
+	ath9k_hw_synth_delay(ah, chan, synthDelay);
 }
 
 static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
@@ -869,7 +859,7 @@
 	ar5008_hw_set_channel_regs(ah, chan);
 	ar5008_hw_init_chain_masks(ah);
 	ath9k_olc_init(ah);
-	ath9k_hw_apply_txpower(ah, chan);
+	ath9k_hw_apply_txpower(ah, chan, false);
 
 	/* Write analog registers */
 	if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
@@ -949,12 +939,8 @@
 static void ar5008_hw_rfbus_done(struct ath_hw *ah)
 {
 	u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
-	if (IS_CHAN_B(ah->curchan))
-		synthDelay = (4 * synthDelay) / 22;
-	else
-		synthDelay /= 10;
 
-	udelay(synthDelay + BASE_ACTIVATE_DELAY);
+	ath9k_hw_synth_delay(ah, ah->curchan, synthDelay);
 
 	REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
 }
@@ -1047,46 +1033,8 @@
 		break;
 	}
 	case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
-		static const int m1ThreshLow[] = { 127, 50 };
-		static const int m2ThreshLow[] = { 127, 40 };
-		static const int m1Thresh[] = { 127, 0x4d };
-		static const int m2Thresh[] = { 127, 0x40 };
-		static const int m2CountThr[] = { 31, 16 };
-		static const int m2CountThrLow[] = { 63, 48 };
 		u32 on = param ? 1 : 0;
 
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
-			      AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
-			      m1ThreshLow[on]);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
-			      AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
-			      m2ThreshLow[on]);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M1_THRESH,
-			      m1Thresh[on]);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M2_THRESH,
-			      m2Thresh[on]);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M2COUNT_THR,
-			      m2CountThr[on]);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
-			      AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
-			      m2CountThrLow[on]);
-
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
-			      m1ThreshLow[on]);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
-			      m2ThreshLow[on]);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M1_THRESH,
-			      m1Thresh[on]);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M2_THRESH,
-			      m2Thresh[on]);
-
 		if (on)
 			REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
 				    AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index aa2abaf..8d78253 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -136,6 +136,7 @@
 	}
 
 	if (sync_cause) {
+		ath9k_debug_sync_cause(common, sync_cause);
 		fatal_int =
 			(sync_cause &
 			 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 3cbbb03..846dd79 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -152,7 +152,6 @@
 	REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
 
 	ah->curchan = chan;
-	ah->curchan_rad_index = -1;
 
 	return 0;
 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 46c79a3..952cb2b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -777,11 +777,11 @@
 	{0x0000a074, 0x00000000},
 	{0x0000a078, 0x00000000},
 	{0x0000a07c, 0x00000000},
-	{0x0000a080, 0x22222229},
-	{0x0000a084, 0x1d1d1d1d},
-	{0x0000a088, 0x1d1d1d1d},
-	{0x0000a08c, 0x1d1d1d1d},
-	{0x0000a090, 0x171d1d1d},
+	{0x0000a080, 0x1a1a1a1a},
+	{0x0000a084, 0x1a1a1a1a},
+	{0x0000a088, 0x1a1a1a1a},
+	{0x0000a08c, 0x1a1a1a1a},
+	{0x0000a090, 0x171a1a1a},
 	{0x0000a094, 0x11111717},
 	{0x0000a098, 0x00030311},
 	{0x0000a09c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 63089cc..a0387a0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1000,10 +1000,12 @@
 	if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
 		ar9003_mci_init_cal_req(ah, &is_reusable);
 
-	txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
-	REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
-	udelay(5);
-	REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+	if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
+		txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
+		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+		udelay(5);
+		REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+	}
 
 skip_tx_iqcal:
 	if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 6bb4db0..ac53d90 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -30,11 +30,6 @@
 #define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
 #define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
 #define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
-#define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6  /* 10*log10(2)*2 */
-#define REDUCE_SCALED_POWER_BY_THREE_CHAIN   9  /* 10*log10(3)*2 */
-#define PWRINCR_3_TO_1_CHAIN      9             /* 10*log(3)*2 */
-#define PWRINCR_3_TO_2_CHAIN      3             /* floor(10*log(3/2)*2) */
-#define PWRINCR_2_TO_1_CHAIN      6             /* 10*log(2)*2 */
 
 #define SUB_NUM_CTL_MODES_AT_5G_40 2    /* excluding HT40, EXT-OFDM */
 #define SUB_NUM_CTL_MODES_AT_2G_40 3    /* excluding HT40, EXT-OFDM, EXT-CCK */
@@ -2936,15 +2931,6 @@
 #undef N_LOOP
 }
 
-
-static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
-{
-	if (fbin == AR5416_BCHAN_UNUSED)
-		return fbin;
-
-	return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
-}
-
 static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
 {
 	return 0;
@@ -4070,7 +4056,7 @@
 	 * targetpower piers stored on eeprom
 	 */
 	for (i = 0; i < numPiers; i++) {
-		freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+		freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
 		targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
 	}
 
@@ -4106,7 +4092,7 @@
 	 * from targetpower piers stored on eeprom
 	 */
 	for (i = 0; i < numPiers; i++) {
-		freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+		freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
 		targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
 	}
 
@@ -4142,7 +4128,7 @@
 	 * targetpower piers stored on eeprom
 	 */
 	for (i = 0; i < numPiers; i++) {
-		freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
+		freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
 		targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
 	}
 
@@ -4167,7 +4153,7 @@
 	 * targetpower piers stored on eeprom
 	 */
 	for (i = 0; i < numPiers; i++) {
-		freqArray[i] = FBIN2FREQ(pFreqBin[i], 1);
+		freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], 1);
 		targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
 	}
 
@@ -4295,18 +4281,10 @@
 #undef POW_SM
 }
 
-static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
-					      u8 *targetPowerValT2)
+static void ar9003_hw_get_legacy_target_powers(struct ath_hw *ah, u16 freq,
+					       u8 *targetPowerValT2,
+					       bool is2GHz)
 {
-	/* XXX: hard code for now, need to get from eeprom struct */
-	u8 ht40PowerIncForPdadc = 0;
-	bool is2GHz = false;
-	unsigned int i = 0;
-	struct ath_common *common = ath9k_hw_common(ah);
-
-	if (freq < 4000)
-		is2GHz = true;
-
 	targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
 	    ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
 					 is2GHz);
@@ -4319,6 +4297,11 @@
 	targetPowerValT2[ALL_TARGET_LEGACY_54] =
 	    ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
 					 is2GHz);
+}
+
+static void ar9003_hw_get_cck_target_powers(struct ath_hw *ah, u16 freq,
+					    u8 *targetPowerValT2)
+{
 	targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
 	    ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
 					     freq);
@@ -4328,6 +4311,11 @@
 	    ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
 	targetPowerValT2[ALL_TARGET_LEGACY_11S] =
 	    ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
+}
+
+static void ar9003_hw_get_ht20_target_powers(struct ath_hw *ah, u16 freq,
+					     u8 *targetPowerValT2, bool is2GHz)
+{
 	targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
 	    ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
 					      is2GHz);
@@ -4370,6 +4358,16 @@
 	targetPowerValT2[ALL_TARGET_HT20_23] =
 	    ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
 					      is2GHz);
+}
+
+static void ar9003_hw_get_ht40_target_powers(struct ath_hw *ah,
+						   u16 freq,
+						   u8 *targetPowerValT2,
+						   bool is2GHz)
+{
+	/* XXX: hard code for now, need to get from eeprom struct */
+	u8 ht40PowerIncForPdadc = 0;
+
 	targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
 	    ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
 					      is2GHz) + ht40PowerIncForPdadc;
@@ -4413,6 +4411,26 @@
 	targetPowerValT2[ALL_TARGET_HT40_23] =
 	    ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
 					      is2GHz) + ht40PowerIncForPdadc;
+}
+
+static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
+					      struct ath9k_channel *chan,
+					      u8 *targetPowerValT2)
+{
+	bool is2GHz = IS_CHAN_2GHZ(chan);
+	unsigned int i = 0;
+	struct ath_common *common = ath9k_hw_common(ah);
+	u16 freq = chan->channel;
+
+	if (is2GHz)
+		ar9003_hw_get_cck_target_powers(ah, freq, targetPowerValT2);
+
+	ar9003_hw_get_legacy_target_powers(ah, freq, targetPowerValT2, is2GHz);
+	ar9003_hw_get_ht20_target_powers(ah, freq, targetPowerValT2, is2GHz);
+
+	if (IS_CHAN_HT40(chan))
+		ar9003_hw_get_ht40_target_powers(ah, freq, targetPowerValT2,
+						 is2GHz);
 
 	for (i = 0; i < ar9300RateSize; i++) {
 		ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
@@ -4464,7 +4482,7 @@
 		is2GHz = 1;
 	}
 
-	*pfrequency = FBIN2FREQ(*pCalPier, is2GHz);
+	*pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz);
 	*pcorrection = pCalPierStruct->refPower;
 	*ptemperature = pCalPierStruct->tempMeas;
 	*pvoltage = pCalPierStruct->voltMeas;
@@ -4789,34 +4807,9 @@
 	bool is2ghz = IS_CHAN_2GHZ(chan);
 
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
-	scaledPower = powerLimit - antenna_reduction;
+	scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
+						antenna_reduction);
 
-	/*
-	 * Reduce scaled Power by number of chains active to get
-	 * to per chain tx power level
-	 */
-	switch (ar5416_get_ntxchains(ah->txchainmask)) {
-	case 1:
-		break;
-	case 2:
-		if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
-			scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
-		else
-			scaledPower = 0;
-		break;
-	case 3:
-		if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
-			scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
-		else
-			scaledPower = 0;
-		break;
-	}
-
-	scaledPower = max((u16)0, scaledPower);
-
-	/*
-	 * Get target powers from EEPROM - our baseline for TX Power
-	 */
 	if (is2ghz) {
 		/* Setup for CTL modes */
 		/* CTL_11B, CTL_11G, CTL_2GHT20 */
@@ -4988,7 +4981,12 @@
 	unsigned int i = 0, paprd_scale_factor = 0;
 	u8 pwr_idx, min_pwridx = 0;
 
-	ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2);
+	memset(targetPowerValT2, 0 , sizeof(targetPowerValT2));
+
+	/*
+	 * Get target powers from EEPROM - our baseline for TX Power
+	 */
+	ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
 
 	if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
 		if (IS_CHAN_2GHZ(chan))
@@ -5060,8 +5058,6 @@
 			i, targetPowerValT2[i]);
 	}
 
-	ah->txpower_limit = regulatory->max_power_level;
-
 	/* Write target power array to registers */
 	ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
 	ar9003_hw_calibration_apply(ah, chan->channel);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index bb223fe..2505ac4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -42,7 +42,6 @@
 #define AR9300_EEPMISC_WOW           0x02
 #define AR9300_CUSTOMER_DATA_SIZE    20
 
-#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
 #define AR9300_MAX_CHAINS            3
 #define AR9300_ANT_16S               25
 #define AR9300_FUTURE_MODAL_SZ       6
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 0f56e32..a0e3394 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -305,11 +305,6 @@
 				ar9462_common_rx_gain_table_2p0,
 				ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2);
 
-		INIT_INI_ARRAY(&ah->ini_BTCOEX_MAX_TXPWR,
-				ar9462_2p0_BTCOEX_MAX_TXPWR_table,
-				ARRAY_SIZE(ar9462_2p0_BTCOEX_MAX_TXPWR_table),
-				2);
-
 		/* Awake -> Sleep Setting */
 		INIT_INI_ARRAY(&ah->iniPcieSerdes,
 				PCIE_PLL_ON_CREQ_DIS_L1_2P0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index a66a13b..d9e0824 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -306,6 +306,8 @@
 		ar9003_mci_get_isr(ah, masked);
 
 	if (sync_cause) {
+		ath9k_debug_sync_cause(common, sync_cause);
+
 		if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
 			REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
 			REG_WRITE(ah, AR_RC, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 59647a3..3d400e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -54,7 +54,7 @@
 
 	if (val) {
 		ah->paprd_table_write_done = true;
-		ath9k_hw_apply_txpower(ah, chan);
+		ath9k_hw_apply_txpower(ah, chan, false);
 	}
 
 	REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index bc992b2..11abb97 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -152,7 +152,6 @@
 	REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
 
 	ah->curchan = chan;
-	ah->curchan_rad_index = -1;
 
 	return 0;
 }
@@ -209,11 +208,12 @@
 			continue;
 		negative = 0;
 		if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
-			cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
-					IS_CHAN_2GHZ(chan)) - synth_freq;
+			cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
+							 IS_CHAN_2GHZ(chan));
 		else
-			cur_bb_spur = spur_freq[i] - synth_freq;
+			cur_bb_spur = spur_freq[i];
 
+		cur_bb_spur -= synth_freq;
 		if (cur_bb_spur < 0) {
 			negative = 1;
 			cur_bb_spur = -cur_bb_spur;
@@ -373,7 +373,7 @@
 			else
 				spur_subchannel_sd = 0;
 
-			spur_freq_sd = (freq_offset << 9) / 11;
+			spur_freq_sd = ((freq_offset + 10) << 9) / 11;
 
 		} else {
 			if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
@@ -382,7 +382,7 @@
 			else
 				spur_subchannel_sd = 1;
 
-			spur_freq_sd = (freq_offset << 9) / 11;
+			spur_freq_sd = ((freq_offset - 10) << 9) / 11;
 
 		}
 
@@ -443,7 +443,8 @@
 	ar9003_hw_spur_ofdm_clear(ah);
 
 	for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) {
-		freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
+		freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
+		freq_offset -= synth_freq;
 		if (abs(freq_offset) < range) {
 			ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
 			break;
@@ -525,22 +526,10 @@
 	 * Value is in 100ns increments.
 	 */
 	synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
-	if (IS_CHAN_B(chan))
-		synthDelay = (4 * synthDelay) / 22;
-	else
-		synthDelay /= 10;
 
 	/* Activate the PHY (includes baseband activate + synthesizer on) */
 	REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
-
-	/*
-	 * There is an issue if the AP starts the calibration before
-	 * the base band timeout completes.  This could result in the
-	 * rx_clear false triggering.  As a workaround we add delay an
-	 * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
-	 * does not happen.
-	 */
-	udelay(synthDelay + BASE_ACTIVATE_DELAY);
+	ath9k_hw_synth_delay(ah, chan, synthDelay);
 }
 
 static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
@@ -684,9 +673,6 @@
 
 	REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
 
-	if (AR_SREV_9462(ah))
-		ar9003_hw_prog_ini(ah, &ah->ini_BTCOEX_MAX_TXPWR, 1);
-
 	if (chan->channel == 2484)
 		ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
 
@@ -694,7 +680,7 @@
 	ar9003_hw_override_ini(ah);
 	ar9003_hw_set_channel_regs(ah, chan);
 	ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
-	ath9k_hw_apply_txpower(ah, chan);
+	ath9k_hw_apply_txpower(ah, chan, false);
 
 	if (AR_SREV_9462(ah)) {
 		if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
@@ -725,6 +711,14 @@
 
 	if (IS_CHAN_A_FAST_CLOCK(ah, chan))
 		rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
+	if (IS_CHAN_QUARTER_RATE(chan))
+		rfMode |= AR_PHY_MODE_QUARTER;
+	if (IS_CHAN_HALF_RATE(chan))
+		rfMode |= AR_PHY_MODE_HALF;
+
+	if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
+		REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
+			      AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW, 3);
 
 	REG_WRITE(ah, AR_PHY_MODE, rfMode);
 }
@@ -795,12 +789,8 @@
 static void ar9003_hw_rfbus_done(struct ath_hw *ah)
 {
 	u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
-	if (IS_CHAN_B(ah->curchan))
-		synthDelay = (4 * synthDelay) / 22;
-	else
-		synthDelay /= 10;
 
-	udelay(synthDelay + BASE_ACTIVATE_DELAY);
+	ath9k_hw_synth_delay(ah, ah->curchan, synthDelay);
 
 	REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
 }
@@ -823,55 +813,6 @@
 		 * on == 0 means more noise imm
 		 */
 		u32 on = param ? 1 : 0;
-		/*
-		 * make register setting for default
-		 * (weak sig detect ON) come from INI file
-		 */
-		int m1ThreshLow = on ?
-			aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
-		int m2ThreshLow = on ?
-			aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
-		int m1Thresh = on ?
-			aniState->iniDef.m1Thresh : m1Thresh_off;
-		int m2Thresh = on ?
-			aniState->iniDef.m2Thresh : m2Thresh_off;
-		int m2CountThr = on ?
-			aniState->iniDef.m2CountThr : m2CountThr_off;
-		int m2CountThrLow = on ?
-			aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
-		int m1ThreshLowExt = on ?
-			aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
-		int m2ThreshLowExt = on ?
-			aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
-		int m1ThreshExt = on ?
-			aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
-		int m2ThreshExt = on ?
-			aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
-
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
-			      AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
-			      m1ThreshLow);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
-			      AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
-			      m2ThreshLow);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M1_THRESH, m1Thresh);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M2_THRESH, m2Thresh);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR,
-			      AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
-			      AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
-			      m2CountThrLow);
-
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
-		REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
-			      AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
 
 		if (on)
 			REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index d834d97..7268a48 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -468,6 +468,9 @@
 #define AR_PHY_ADDAC_PARA_CTL    (AR_SM_BASE + 0x150)
 #define AR_PHY_XPA_CFG           (AR_SM_BASE + 0x158)
 
+#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW  3
+#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW_S    0
+
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A           0x0001FC00
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S         10
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A                       0x3FF
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index b6ba1e8..1d6658e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -1115,9 +1115,9 @@
 	{0x000081f8, 0x00000000},
 	{0x000081fc, 0x00000000},
 	{0x00008240, 0x00100000},
-	{0x00008244, 0x0010f400},
+	{0x00008244, 0x0010f424},
 	{0x00008248, 0x00000800},
-	{0x0000824c, 0x0001e800},
+	{0x0000824c, 0x0001e848},
 	{0x00008250, 0x00000000},
 	{0x00008254, 0x00000000},
 	{0x00008258, 0x00000000},
@@ -1448,16 +1448,4 @@
 	{0x0000b1fc, 0x00000196},
 };
 
-static const u32 ar9462_2p0_BTCOEX_MAX_TXPWR_table[][2] = {
-	/* Addr      allmodes  */
-	{0x000018c0, 0x10101010},
-	{0x000018c4, 0x10101010},
-	{0x000018c8, 0x10101010},
-	{0x000018cc, 0x10101010},
-	{0x000018d0, 0x10101010},
-	{0x000018d4, 0x10101010},
-	{0x000018d8, 0x10101010},
-	{0x000018dc, 0x10101010},
-};
-
 #endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 8c84049..a277cf6 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -26,6 +26,7 @@
 #include "debug.h"
 #include "common.h"
 #include "mci.h"
+#include "dfs.h"
 
 /*
  * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
@@ -369,7 +370,7 @@
  * number of beacon intervals, the game's up.
  */
 #define BSTUCK_THRESH           	9
-#define	ATH_BCBUF               	4
+#define	ATH_BCBUF               	8
 #define ATH_DEFAULT_BINTVAL     	100 /* TU */
 #define ATH_DEFAULT_BMISS_LIMIT 	10
 #define IEEE80211_MS_TO_TU(x)           (((x) * 1000) / 1024)
@@ -430,6 +431,8 @@
 void ath_reset_work(struct work_struct *work);
 void ath_hw_check(struct work_struct *work);
 void ath_hw_pll_work(struct work_struct *work);
+void ath_rx_poll(unsigned long data);
+void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
 void ath_paprd_calibrate(struct work_struct *work);
 void ath_ani_calibrate(unsigned long data);
 void ath_start_ani(struct ath_common *common);
@@ -670,6 +673,7 @@
 	struct ath_beacon_config cur_beacon_conf;
 	struct delayed_work tx_complete_work;
 	struct delayed_work hw_pll_work;
+	struct timer_list rx_poll_timer;
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 	struct ath_btcoex btcoex;
@@ -680,6 +684,7 @@
 
 	struct ath_ant_comb ant_comb;
 	u8 ant_tx, ant_rx;
+	struct dfs_pattern_detector *dfs_detector;
 };
 
 void ath9k_tasklet(unsigned long data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 6264182..11bc55e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -91,7 +91,7 @@
 	info.txpower = MAX_RATE_POWER;
 	info.keyix = ATH9K_TXKEYIX_INVALID;
 	info.keytype = ATH9K_KEY_TYPE_CLEAR;
-	info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_INTREQ;
+	info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_CLRDMASK;
 
 	info.buf_addr[0] = bf->bf_buf_addr;
 	info.buf_len[0] = roundup(skb->len, 4);
@@ -359,6 +359,11 @@
 	int slot;
 	u32 bfaddr, bc = 0;
 
+	if (work_pending(&sc->hw_reset_work)) {
+		ath_dbg(common, RESET,
+			"reset work is pending, skip beaconing now\n");
+		return;
+	}
 	/*
 	 * Check if the previous beacon has gone out.  If
 	 * not don't try to post another, skip this period
@@ -369,6 +374,9 @@
 	if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
 		sc->beacon.bmisscnt++;
 
+		if (!ath9k_hw_check_alive(ah))
+			ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+
 		if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
 			ath_dbg(common, BSTUCK,
 				"missed %u consecutive beacons\n",
@@ -378,6 +386,7 @@
 				ath9k_hw_bstuck_nfcal(ah);
 		} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
 			ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
+			sc->beacon.bmisscnt = 0;
 			sc->sc_flags |= SC_OP_TSF_RESET;
 			ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
 		}
@@ -650,6 +659,8 @@
 	u32 tsf, intval, nexttbtt;
 
 	ath9k_reset_beacon_status(sc);
+	if (!(sc->sc_flags & SC_OP_BEACONS))
+		ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp);
 
 	intval = TU_TO_USEC(conf->beacon_interval);
 	tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
@@ -806,8 +817,10 @@
 {
 	struct ath_hw *ah = sc->sc_ah;
 
-	if (!ath_has_valid_bslot(sc))
+	if (!ath_has_valid_bslot(sc)) {
+		sc->sc_flags &= ~SC_OP_BEACONS;
 		return;
+	}
 
 	ath9k_ps_wakeup(sc);
 	if (status) {
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index ec32719..1ca6da80 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -108,9 +108,7 @@
 		return;
 	}
 
-	if (AR_SREV_9462(ah)) {
-		btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
-	} else if (AR_SREV_9300_20_OR_LATER(ah)) {
+	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
 		btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
 		btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -284,11 +282,12 @@
 		ath9k_hw_btcoex_enable_2wire(ah);
 		break;
 	case ATH_BTCOEX_CFG_3WIRE:
+		if (AR_SREV_9462(ah)) {
+			ath9k_hw_btcoex_enable_mci(ah);
+			return;
+		}
 		ath9k_hw_btcoex_enable_3wire(ah);
 		break;
-	case ATH_BTCOEX_CFG_MCI:
-		ath9k_hw_btcoex_enable_mci(ah);
-		return;
 	}
 
 	REG_RMW(ah, AR_GPIO_PDPU,
@@ -305,11 +304,12 @@
 	int i;
 
 	btcoex_hw->enabled = false;
-	if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
+	if (AR_SREV_9462(ah)) {
 		ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
 		for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
 			REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
 				  btcoex_hw->wlan_weight[i]);
+		return;
 	}
 	ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
 
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 8f93aef..3a1e1cf 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -51,7 +51,6 @@
 	ATH_BTCOEX_CFG_NONE,
 	ATH_BTCOEX_CFG_2WIRE,
 	ATH_BTCOEX_CFG_3WIRE,
-	ATH_BTCOEX_CFG_MCI,
 };
 
 struct ath9k_hw_mci {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index ff47b32..fde700c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -380,63 +380,75 @@
 				   size_t count, loff_t *ppos)
 {
 	struct ath_softc *sc = file->private_data;
-	char buf[512];
 	unsigned int len = 0;
+	int rv;
+	int mxlen = 4000;
+	char *buf = kmalloc(mxlen, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+#define PR_IS(a, s)						\
+	do {							\
+		len += snprintf(buf + len, mxlen - len,		\
+				"%21s: %10u\n", a,		\
+				sc->debug.stats.istats.s);	\
+	} while (0)
 
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
-		len += snprintf(buf + len, sizeof(buf) - len,
-			"%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
-		len += snprintf(buf + len, sizeof(buf) - len,
-			"%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
-		len += snprintf(buf + len, sizeof(buf) - len,
-			"%8s: %10u\n", "WATCHDOG",
-			sc->debug.stats.istats.bb_watchdog);
+		PR_IS("RXLP", rxlp);
+		PR_IS("RXHP", rxhp);
+		PR_IS("WATHDOG", bb_watchdog);
 	} else {
-		len += snprintf(buf + len, sizeof(buf) - len,
-			"%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
+		PR_IS("RX", rxok);
 	}
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "RXORN", sc->debug.stats.istats.rxorn);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "TX", sc->debug.stats.istats.txok);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "TXURN", sc->debug.stats.istats.txurn);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "MIB", sc->debug.stats.istats.mib);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "RXPHY", sc->debug.stats.istats.rxphyerr);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "RXKCM", sc->debug.stats.istats.rx_keycache_miss);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "SWBA", sc->debug.stats.istats.swba);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "BMISS", sc->debug.stats.istats.bmiss);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "BNR", sc->debug.stats.istats.bnr);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "CST", sc->debug.stats.istats.cst);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "GTT", sc->debug.stats.istats.gtt);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "TIM", sc->debug.stats.istats.tim);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "CABEND", sc->debug.stats.istats.cabend);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "DTIMSYNC", sc->debug.stats.istats.dtimsync);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "TSFOOR", sc->debug.stats.istats.tsfoor);
-	len += snprintf(buf + len, sizeof(buf) - len,
-		"%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total);
+	PR_IS("RXEOL", rxeol);
+	PR_IS("RXORN", rxorn);
+	PR_IS("TX", txok);
+	PR_IS("TXURN", txurn);
+	PR_IS("MIB", mib);
+	PR_IS("RXPHY", rxphyerr);
+	PR_IS("RXKCM", rx_keycache_miss);
+	PR_IS("SWBA", swba);
+	PR_IS("BMISS", bmiss);
+	PR_IS("BNR", bnr);
+	PR_IS("CST", cst);
+	PR_IS("GTT", gtt);
+	PR_IS("TIM", tim);
+	PR_IS("CABEND", cabend);
+	PR_IS("DTIMSYNC", dtimsync);
+	PR_IS("DTIM", dtim);
+	PR_IS("TSFOOR", tsfoor);
+	PR_IS("TOTAL", total);
 
+	len += snprintf(buf + len, mxlen - len,
+			"SYNC_CAUSE stats:\n");
 
-	if (len > sizeof(buf))
-		len = sizeof(buf);
+	PR_IS("Sync-All", sync_cause_all);
+	PR_IS("RTC-IRQ", sync_rtc_irq);
+	PR_IS("MAC-IRQ", sync_mac_irq);
+	PR_IS("EEPROM-Illegal-Access", eeprom_illegal_access);
+	PR_IS("APB-Timeout", apb_timeout);
+	PR_IS("PCI-Mode-Conflict", pci_mode_conflict);
+	PR_IS("HOST1-Fatal", host1_fatal);
+	PR_IS("HOST1-Perr", host1_perr);
+	PR_IS("TRCV-FIFO-Perr", trcv_fifo_perr);
+	PR_IS("RADM-CPL-EP", radm_cpl_ep);
+	PR_IS("RADM-CPL-DLLP-Abort", radm_cpl_dllp_abort);
+	PR_IS("RADM-CPL-TLP-Abort", radm_cpl_tlp_abort);
+	PR_IS("RADM-CPL-ECRC-Err", radm_cpl_ecrc_err);
+	PR_IS("RADM-CPL-Timeout", radm_cpl_timeout);
+	PR_IS("Local-Bus-Timeout", local_timeout);
+	PR_IS("PM-Access", pm_access);
+	PR_IS("MAC-Awake", mac_awake);
+	PR_IS("MAC-Asleep", mac_asleep);
+	PR_IS("MAC-Sleep-Access", mac_sleep_access);
 
-	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	if (len > mxlen)
+		len = mxlen;
+
+	rv = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+	kfree(buf);
+	return rv;
 }
 
 static const struct file_operations fops_interrupt = {
@@ -524,6 +536,7 @@
 	PR("hw-put-tx-buf:   ", puttxbuf);
 	PR("hw-tx-start:     ", txstart);
 	PR("hw-tx-proc-desc: ", txprocdesc);
+	PR("TX-Failed:       ", txfailed);
 	len += snprintf(buf + len, size - len,
 			"%s%11p%11p%10p%10p\n", "txq-memory-address:",
 			sc->tx.txq_map[WME_AC_BE],
@@ -880,6 +893,13 @@
 	len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
 			sc->debug.stats.rxstats.phy_err_stats[p]);
 
+#define RXS_ERR(s, e)					    \
+	do {						    \
+		len += snprintf(buf + len, size - len,	    \
+				"%22s : %10u\n", s,	    \
+				sc->debug.stats.rxstats.e); \
+	} while (0)
+
 	struct ath_softc *sc = file->private_data;
 	char *buf;
 	unsigned int len = 0, size = 1600;
@@ -889,27 +909,18 @@
 	if (buf == NULL)
 		return -ENOMEM;
 
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "CRC ERR",
-			sc->debug.stats.rxstats.crc_err);
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "DECRYPT CRC ERR",
-			sc->debug.stats.rxstats.decrypt_crc_err);
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "PHY ERR",
-			sc->debug.stats.rxstats.phy_err);
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "MIC ERR",
-			sc->debug.stats.rxstats.mic_err);
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "PRE-DELIM CRC ERR",
-			sc->debug.stats.rxstats.pre_delim_crc_err);
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "POST-DELIM CRC ERR",
-			sc->debug.stats.rxstats.post_delim_crc_err);
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "DECRYPT BUSY ERR",
-			sc->debug.stats.rxstats.decrypt_busy_err);
+	RXS_ERR("CRC ERR", crc_err);
+	RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
+	RXS_ERR("PHY ERR", phy_err);
+	RXS_ERR("MIC ERR", mic_err);
+	RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
+	RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
+	RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
+	RXS_ERR("RX-LENGTH-ERR", rx_len_err);
+	RXS_ERR("RX-OOM-ERR", rx_oom_err);
+	RXS_ERR("RX-RATE-ERR", rx_rate_err);
+	RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);
+	RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
 
 	PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
 	PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
@@ -938,12 +949,10 @@
 	PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
 	PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
 
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "RX-Pkts-All",
-			sc->debug.stats.rxstats.rx_pkts_all);
-	len += snprintf(buf + len, size - len,
-			"%22s : %10u\n", "RX-Bytes-All",
-			sc->debug.stats.rxstats.rx_bytes_all);
+	RXS_ERR("RX-Pkts-All", rx_pkts_all);
+	RXS_ERR("RX-Bytes-All", rx_bytes_all);
+	RXS_ERR("RX-Beacons", rx_beacons);
+	RXS_ERR("RX-Frags", rx_frags);
 
 	if (len > size)
 		len = size;
@@ -953,12 +962,12 @@
 
 	return retval;
 
+#undef RXS_ERR
 #undef PHY_ERR
 }
 
 void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
 {
-#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
 #define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
 #define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
 			[sc->debug.rsidx].c)
@@ -1004,7 +1013,6 @@
 
 #endif
 
-#undef RX_STAT_INC
 #undef RX_PHY_ERR_INC
 #undef RX_SAMP_DBG
 }
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 64fcfad..c34da09 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -60,6 +60,7 @@
  * @tsfoor: TSF out of range, indicates that the corrected TSF received
  * from a beacon differs from the PCU's internal TSF by more than a
  * (programmable) threshold
+ * @local_timeout: Internal bus timeout.
  */
 struct ath_interrupt_stats {
 	u32 total;
@@ -85,8 +86,30 @@
 	u32 dtim;
 	u32 bb_watchdog;
 	u32 tsfoor;
+
+	/* Sync-cause stats */
+	u32 sync_cause_all;
+	u32 sync_rtc_irq;
+	u32 sync_mac_irq;
+	u32 eeprom_illegal_access;
+	u32 apb_timeout;
+	u32 pci_mode_conflict;
+	u32 host1_fatal;
+	u32 host1_perr;
+	u32 trcv_fifo_perr;
+	u32 radm_cpl_ep;
+	u32 radm_cpl_dllp_abort;
+	u32 radm_cpl_tlp_abort;
+	u32 radm_cpl_ecrc_err;
+	u32 radm_cpl_timeout;
+	u32 local_timeout;
+	u32 pm_access;
+	u32 mac_awake;
+	u32 mac_asleep;
+	u32 mac_sleep_access;
 };
 
+
 /**
  * struct ath_tx_stats - Statistics about TX
  * @tx_pkts_all:  No. of total frames transmitted, including ones that
@@ -113,6 +136,7 @@
  * @puttxbuf: Number of times hardware was given txbuf to write.
  * @txstart:  Number of times hardware was told to start tx.
  * @txprocdesc:  Number of times tx descriptor was processed
+ * @txfailed:  Out-of-memory or other errors in xmit path.
  */
 struct ath_tx_stats {
 	u32 tx_pkts_all;
@@ -135,8 +159,11 @@
 	u32 puttxbuf;
 	u32 txstart;
 	u32 txprocdesc;
+	u32 txfailed;
 };
 
+#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
+
 /**
  * struct ath_rx_stats - RX Statistics
  * @rx_pkts_all:  No. of total frames received, including ones that
@@ -153,6 +180,13 @@
  * @post_delim_crc_err: Post-Frame delimiter CRC error detections
  * @decrypt_busy_err: Decryption interruptions counter
  * @phy_err_stats: Individual PHY error statistics
+ * @rx_len_err:  No. of frames discarded due to bad length.
+ * @rx_oom_err:  No. of frames dropped due to OOM issues.
+ * @rx_rate_err:  No. of frames dropped due to rate errors.
+ * @rx_too_many_frags_err:  Frames dropped due to too-many-frags received.
+ * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
+ * @rx_beacons:  No. of beacons received.
+ * @rx_frags:  No. of rx-fragements received.
  */
 struct ath_rx_stats {
 	u32 rx_pkts_all;
@@ -165,6 +199,13 @@
 	u32 post_delim_crc_err;
 	u32 decrypt_busy_err;
 	u32 phy_err_stats[ATH9K_PHYERR_MAX];
+	u32 rx_len_err;
+	u32 rx_oom_err;
+	u32 rx_rate_err;
+	u32 rx_too_many_frags_err;
+	u32 rx_drop_rxflush;
+	u32 rx_beacons;
+	u32 rx_frags;
 };
 
 enum ath_reset_type {
@@ -174,6 +215,7 @@
 	RESET_TYPE_TX_ERROR,
 	RESET_TYPE_TX_HANG,
 	RESET_TYPE_PLL_HANG,
+	RESET_TYPE_MAC_HANG,
 	__RESET_TYPE_MAX
 };
 
@@ -247,6 +289,8 @@
 
 #else
 
+#define RX_STAT_INC(c) /* NOP */
+
 static inline int ath9k_init_debug(struct ath_hw *ah)
 {
 	return 0;
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index f4f56af..ecc8179 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -21,17 +21,6 @@
 #include "dfs.h"
 #include "dfs_debug.h"
 
-/*
- * TODO: move into or synchronize this with generic header
- *	 as soon as IF is defined
- */
-struct dfs_radar_pulse {
-	u16 freq;
-	u64 ts;
-	u32 width;
-	u8 rssi;
-};
-
 /* internal struct to pass radar data */
 struct ath_radar_data {
 	u8 pulse_bw_info;
@@ -60,44 +49,44 @@
 #define EXT_CH_RADAR_FOUND 0x02
 static bool
 ath9k_postprocess_radar_event(struct ath_softc *sc,
-			      struct ath_radar_data *are,
-			      struct dfs_radar_pulse *drp)
+			      struct ath_radar_data *ard,
+			      struct pulse_event *pe)
 {
 	u8 rssi;
 	u16 dur;
 
 	ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
 		"pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
-		are->pulse_bw_info,
-		are->pulse_length_pri, are->rssi,
-		are->pulse_length_ext, are->ext_rssi);
+		ard->pulse_bw_info,
+		ard->pulse_length_pri, ard->rssi,
+		ard->pulse_length_ext, ard->ext_rssi);
 
 	/*
 	 * Only the last 2 bits of the BW info are relevant, they indicate
 	 * which channel the radar was detected in.
 	 */
-	are->pulse_bw_info &= 0x03;
+	ard->pulse_bw_info &= 0x03;
 
-	switch (are->pulse_bw_info) {
+	switch (ard->pulse_bw_info) {
 	case PRI_CH_RADAR_FOUND:
 		/* radar in ctrl channel */
-		dur = are->pulse_length_pri;
+		dur = ard->pulse_length_pri;
 		DFS_STAT_INC(sc, pri_phy_errors);
 		/*
 		 * cannot use ctrl channel RSSI
 		 * if extension channel is stronger
 		 */
-		rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
+		rssi = (ard->ext_rssi >= (ard->rssi + 3)) ? 0 : ard->rssi;
 		break;
 	case EXT_CH_RADAR_FOUND:
 		/* radar in extension channel */
-		dur = are->pulse_length_ext;
+		dur = ard->pulse_length_ext;
 		DFS_STAT_INC(sc, ext_phy_errors);
 		/*
 		 * cannot use extension channel RSSI
 		 * if control channel is stronger
 		 */
-		rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
+		rssi = (ard->rssi >= (ard->ext_rssi + 12)) ? 0 : ard->ext_rssi;
 		break;
 	case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
 		/*
@@ -107,14 +96,14 @@
 		 * Radiated testing, when pulse is on DC, different pri and
 		 * ext durations are reported, so take the larger of the two
 		 */
-		if (are->pulse_length_ext >= are->pulse_length_pri)
-			dur = are->pulse_length_ext;
+		if (ard->pulse_length_ext >= ard->pulse_length_pri)
+			dur = ard->pulse_length_ext;
 		else
-			dur = are->pulse_length_pri;
+			dur = ard->pulse_length_pri;
 		DFS_STAT_INC(sc, dc_phy_errors);
 
 		/* when both are present use stronger one */
-		rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
+		rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi;
 		break;
 	default:
 		/*
@@ -137,8 +126,8 @@
 	 */
 
 	/* convert duration to usecs */
-	drp->width = dur_to_usecs(sc->sc_ah, dur);
-	drp->rssi = rssi;
+	pe->width = dur_to_usecs(sc->sc_ah, dur);
+	pe->rssi = rssi;
 
 	DFS_STAT_INC(sc, pulses_detected);
 	return true;
@@ -155,15 +144,17 @@
 	struct ath_radar_data ard;
 	u16 datalen;
 	char *vdata_end;
-	struct dfs_radar_pulse drp;
+	struct pulse_event pe;
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
-	    (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
+	DFS_STAT_INC(sc, pulses_total);
+	if ((rs->rs_phyerr != ATH9K_PHYERR_RADAR) &&
+	    (rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT)) {
 		ath_dbg(common, DFS,
 			"Error: rs_phyer=0x%x not a radar error\n",
 			rs->rs_phyerr);
+		DFS_STAT_INC(sc, pulses_no_dfs);
 		return;
 	}
 
@@ -189,27 +180,22 @@
 	ard.pulse_bw_info = vdata_end[-1];
 	ard.pulse_length_ext = vdata_end[-2];
 	ard.pulse_length_pri = vdata_end[-3];
-
-	ath_dbg(common, DFS,
-		"bw_info=%d, length_pri=%d, length_ext=%d, "
-		"rssi_pri=%d, rssi_ext=%d\n",
-		ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
-		ard.rssi, ard.ext_rssi);
-
-	drp.freq = ah->curchan->channel;
-	drp.ts = mactime;
-	if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
+	pe.freq = ah->curchan->channel;
+	pe.ts = mactime;
+	if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
+		struct dfs_pattern_detector *pd = sc->dfs_detector;
 		static u64 last_ts;
 		ath_dbg(common, DFS,
 			"ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
 			"width=%d, rssi=%d, delta_ts=%llu\n",
-			drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
-		last_ts = drp.ts;
-		/*
-		 * TODO: forward pulse to pattern detector
-		 *
-		 * ieee80211_add_radar_pulse(drp.freq, drp.ts,
-		 *                           drp.width, drp.rssi);
-		 */
+			pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts);
+		last_ts = pe.ts;
+		DFS_STAT_INC(sc, pulses_processed);
+		if (pd != NULL && pd->add_pulse(pd, &pe)) {
+			DFS_STAT_INC(sc, radar_detected);
+			/*
+			 * TODO: forward radar event to DFS management layer
+			 */
+		}
 	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
index c241285..3c839f0 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.h
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -17,6 +17,7 @@
 
 #ifndef ATH9K_DFS_H
 #define ATH9K_DFS_H
+#include "dfs_pattern_detector.h"
 
 #if defined(CONFIG_ATH9K_DFS_CERTIFIED)
 /**
@@ -31,13 +32,14 @@
  *
  * The radar information provided as raw payload data is validated and
  * filtered for false pulses. Events passing all tests are forwarded to
- * the upper layer for pattern detection.
+ * the DFS detector for pattern detection.
  */
 void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
 			      struct ath_rx_status *rs, u64 mactime);
 #else
-static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
-					    struct ath_rx_status *rs, u64 mactime) { }
+static inline void
+ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+			 struct ath_rx_status *rs, u64 mactime) { }
 #endif
 
 #endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 4364c10..55d2807 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -21,9 +21,15 @@
 #include "ath9k.h"
 #include "dfs_debug.h"
 
+
+struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
+
 #define ATH9K_DFS_STAT(s, p) \
 	len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
 			sc->debug.stats.dfs_stats.p);
+#define ATH9K_DFS_POOL_STAT(s, p) \
+	len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
+			global_dfs_pool_stats.p);
 
 static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
 			     size_t count, loff_t *ppos)
@@ -43,6 +49,9 @@
 			hw_ver->macVersion, hw_ver->macRev,
 			(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
 					"enabled" : "disabled");
+	len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
+	ATH9K_DFS_STAT("pulse events reported   ", pulses_total);
+	ATH9K_DFS_STAT("invalid pulse events    ", pulses_no_dfs);
 	ATH9K_DFS_STAT("DFS pulses detected     ", pulses_detected);
 	ATH9K_DFS_STAT("Datalen discards        ", datalen_discards);
 	ATH9K_DFS_STAT("RSSI discards           ", rssi_discards);
@@ -50,6 +59,18 @@
 	ATH9K_DFS_STAT("Primary channel pulses  ", pri_phy_errors);
 	ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
 	ATH9K_DFS_STAT("Dual channel pulses     ", dc_phy_errors);
+	len += snprintf(buf + len, size - len, "Radar detector statistics "
+			"(current DFS region: %d)\n", sc->dfs_detector->region);
+	ATH9K_DFS_STAT("Pulse events processed  ", pulses_processed);
+	ATH9K_DFS_STAT("Radars detected         ", radar_detected);
+	len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
+	ATH9K_DFS_POOL_STAT("Pool references         ", pool_reference);
+	ATH9K_DFS_POOL_STAT("Pulses allocated        ", pulse_allocated);
+	ATH9K_DFS_POOL_STAT("Pulses alloc error      ", pulse_alloc_error);
+	ATH9K_DFS_POOL_STAT("Pulses in use           ", pulse_used);
+	ATH9K_DFS_POOL_STAT("Seqs. allocated         ", pseq_allocated);
+	ATH9K_DFS_POOL_STAT("Seqs. alloc error       ", pseq_alloc_error);
+	ATH9K_DFS_POOL_STAT("Seqs. in use            ", pseq_used);
 
 	if (len > size)
 		len = size;
@@ -60,8 +81,33 @@
 	return retval;
 }
 
+/* magic number to prevent accidental reset of DFS statistics */
+#define DFS_STATS_RESET_MAGIC	0x80000000
+static ssize_t write_file_dfs(struct file *file, const char __user *user_buf,
+			      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+	if (strict_strtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (val == DFS_STATS_RESET_MAGIC)
+		memset(&sc->debug.stats.dfs_stats, 0,
+		       sizeof(sc->debug.stats.dfs_stats));
+	return count;
+}
+
 static const struct file_operations fops_dfs_stats = {
 	.read = read_file_dfs,
+	.write = write_file_dfs,
 	.open = simple_open,
 	.owner = THIS_MODULE,
 	.llseek = default_llseek,
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index 4911724..e36810a 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -22,17 +22,23 @@
 #include "hw.h"
 
 /**
- * struct ath_dfs_stats - DFS Statistics
- *
- * @pulses_detected:  No. of pulses detected so far
- * @datalen_discards: No. of pulses discarded due to invalid datalen
- * @rssi_discards:    No. of pulses discarded due to invalid RSSI
- * @bwinfo_discards:  No. of pulses discarded due to invalid BW info
- * @pri_phy_errors:   No. of pulses reported for primary channel
- * @ext_phy_errors:   No. of pulses reported for extension channel
- * @dc_phy_errors:    No. of pulses reported for primary + extension channel
+ * struct ath_dfs_stats - DFS Statistics per wiphy
+ * @pulses_total:     pulses reported by HW
+ * @pulses_no_dfs:    pulses wrongly reported as DFS
+ * @pulses_detected:  pulses detected so far
+ * @datalen_discards: pulses discarded due to invalid datalen
+ * @rssi_discards:    pulses discarded due to invalid RSSI
+ * @bwinfo_discards:  pulses discarded due to invalid BW info
+ * @pri_phy_errors:   pulses reported for primary channel
+ * @ext_phy_errors:   pulses reported for extension channel
+ * @dc_phy_errors:    pulses reported for primary + extension channel
+ * @pulses_processed: pulses forwarded to detector
+ * @radar_detected:   radars detected
  */
 struct ath_dfs_stats {
+	/* pulse stats */
+	u32 pulses_total;
+	u32 pulses_no_dfs;
 	u32 pulses_detected;
 	u32 datalen_discards;
 	u32 rssi_discards;
@@ -40,18 +46,39 @@
 	u32 pri_phy_errors;
 	u32 ext_phy_errors;
 	u32 dc_phy_errors;
+	/* pattern detection stats */
+	u32 pulses_processed;
+	u32 radar_detected;
 };
 
+/**
+ * struct ath_dfs_pool_stats - DFS Statistics for global pools
+ */
+struct ath_dfs_pool_stats {
+	u32 pool_reference;
+	u32 pulse_allocated;
+	u32 pulse_alloc_error;
+	u32 pulse_used;
+	u32 pseq_allocated;
+	u32 pseq_alloc_error;
+	u32 pseq_used;
+};
 #if defined(CONFIG_ATH9K_DFS_DEBUGFS)
 
 #define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
 void ath9k_dfs_init_debug(struct ath_softc *sc);
 
+#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
+#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
+extern struct ath_dfs_pool_stats global_dfs_pool_stats;
+
 #else
 
 #define DFS_STAT_INC(sc, c) do { } while (0)
 static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
 
+#define DFS_POOL_STAT_INC(c) do { } while (0)
+#define DFS_POOL_STAT_DEC(c) do { } while (0)
 #endif /* CONFIG_ATH9K_DFS_DEBUGFS */
 
 #endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
new file mode 100644
index 0000000..ea2a6cf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "dfs_pattern_detector.h"
+#include "dfs_pri_detector.h"
+
+/*
+ * tolerated deviation of radar time stamp in usecs on both sides
+ * TODO: this might need to be HW-dependent
+ */
+#define PRI_TOLERANCE	16
+
+/**
+ * struct radar_types - contains array of patterns defined for one DFS domain
+ * @domain: DFS regulatory domain
+ * @num_radar_types: number of radar types to follow
+ * @radar_types: radar types array
+ */
+struct radar_types {
+	enum nl80211_dfs_regions region;
+	u32 num_radar_types;
+	const struct radar_detector_specs *radar_types;
+};
+
+/* percentage on ppb threshold to trigger detection */
+#define MIN_PPB_THRESH	50
+#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
+#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
+
+#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB)	\
+{								\
+	ID, WMIN, WMAX, (PRF2PRI(PMAX) - PRI_TOLERANCE),	\
+	(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF,	\
+	PPB_THRESH(PPB), PRI_TOLERANCE,				\
+}
+
+/* radar types as defined by ETSI EN-301-893 v1.5.1 */
+static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
+	ETSI_PATTERN(0,  0,  1,  700,  700, 1, 18),
+	ETSI_PATTERN(1,  0,  5,  200, 1000, 1, 10),
+	ETSI_PATTERN(2,  0, 15,  200, 1600, 1, 15),
+	ETSI_PATTERN(3,  0, 15, 2300, 4000, 1, 25),
+	ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
+	ETSI_PATTERN(5,  0,  2,  300,  400, 3, 10),
+	ETSI_PATTERN(6,  0,  2,  400, 1200, 3, 15),
+};
+
+static const struct radar_types etsi_radar_types_v15 = {
+	.region			= NL80211_DFS_ETSI,
+	.num_radar_types	= ARRAY_SIZE(etsi_radar_ref_types_v15),
+	.radar_types		= etsi_radar_ref_types_v15,
+};
+
+/* for now, we support ETSI radar types, FCC and JP are TODO */
+static const struct radar_types *dfs_domains[] = {
+	&etsi_radar_types_v15,
+};
+
+/**
+ * get_dfs_domain_radar_types() - get radar types for a given DFS domain
+ * @param domain DFS domain
+ * @return radar_types ptr on success, NULL if DFS domain is not supported
+ */
+static const struct radar_types *
+get_dfs_domain_radar_types(enum nl80211_dfs_regions region)
+{
+	u32 i;
+	for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) {
+		if (dfs_domains[i]->region == region)
+			return dfs_domains[i];
+	}
+	return NULL;
+}
+
+/**
+ * struct channel_detector - detector elements for a DFS channel
+ * @head: list_head
+ * @freq: frequency for this channel detector in MHz
+ * @detectors: array of dynamically created detector elements for this freq
+ *
+ * Channel detectors are required to provide multi-channel DFS detection, e.g.
+ * to support off-channel scanning. A pattern detector has a list of channels
+ * radar pulses have been reported for in the past.
+ */
+struct channel_detector {
+	struct list_head head;
+	u16 freq;
+	struct pri_detector **detectors;
+};
+
+/* channel_detector_reset() - reset detector lines for a given channel */
+static void channel_detector_reset(struct dfs_pattern_detector *dpd,
+				   struct channel_detector *cd)
+{
+	u32 i;
+	if (cd == NULL)
+		return;
+	for (i = 0; i < dpd->num_radar_types; i++)
+		cd->detectors[i]->reset(cd->detectors[i], dpd->last_pulse_ts);
+}
+
+/* channel_detector_exit() - destructor */
+static void channel_detector_exit(struct dfs_pattern_detector *dpd,
+				  struct channel_detector *cd)
+{
+	u32 i;
+	if (cd == NULL)
+		return;
+	list_del(&cd->head);
+	for (i = 0; i < dpd->num_radar_types; i++) {
+		struct pri_detector *de = cd->detectors[i];
+		if (de != NULL)
+			de->exit(de);
+	}
+	kfree(cd->detectors);
+	kfree(cd);
+}
+
+static struct channel_detector *
+channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
+{
+	u32 sz, i;
+	struct channel_detector *cd;
+
+	cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+	if (cd == NULL)
+		goto fail;
+
+	INIT_LIST_HEAD(&cd->head);
+	cd->freq = freq;
+	sz = sizeof(cd->detectors) * dpd->num_radar_types;
+	cd->detectors = kzalloc(sz, GFP_KERNEL);
+	if (cd->detectors == NULL)
+		goto fail;
+
+	for (i = 0; i < dpd->num_radar_types; i++) {
+		const struct radar_detector_specs *rs = &dpd->radar_spec[i];
+		struct pri_detector *de = pri_detector_init(rs);
+		if (de == NULL)
+			goto fail;
+		cd->detectors[i] = de;
+	}
+	list_add(&cd->head, &dpd->channel_detectors);
+	return cd;
+
+fail:
+	pr_err("failed to allocate channel_detector for freq=%d\n", freq);
+	channel_detector_exit(dpd, cd);
+	return NULL;
+}
+
+/**
+ * channel_detector_get() - get channel detector for given frequency
+ * @param dpd instance pointer
+ * @param freq frequency in MHz
+ * @return pointer to channel detector on success, NULL otherwise
+ *
+ * Return existing channel detector for the given frequency or return a
+ * newly create one.
+ */
+static struct channel_detector *
+channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq)
+{
+	struct channel_detector *cd;
+	list_for_each_entry(cd, &dpd->channel_detectors, head) {
+		if (cd->freq == freq)
+			return cd;
+	}
+	return channel_detector_create(dpd, freq);
+}
+
+/*
+ * DFS Pattern Detector
+ */
+
+/* dpd_reset(): reset all channel detectors */
+static void dpd_reset(struct dfs_pattern_detector *dpd)
+{
+	struct channel_detector *cd;
+	if (!list_empty(&dpd->channel_detectors))
+		list_for_each_entry(cd, &dpd->channel_detectors, head)
+			channel_detector_reset(dpd, cd);
+
+}
+static void dpd_exit(struct dfs_pattern_detector *dpd)
+{
+	struct channel_detector *cd, *cd0;
+	if (!list_empty(&dpd->channel_detectors))
+		list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
+			channel_detector_exit(dpd, cd);
+	kfree(dpd);
+}
+
+static bool
+dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
+{
+	u32 i;
+	bool ts_wraparound;
+	struct channel_detector *cd;
+
+	if (dpd->region == NL80211_DFS_UNSET) {
+		/*
+		 * pulses received for a non-supported or un-initialized
+		 * domain are treated as detected radars
+		 */
+		return true;
+	}
+
+	cd = channel_detector_get(dpd, event->freq);
+	if (cd == NULL)
+		return false;
+
+	ts_wraparound = (event->ts < dpd->last_pulse_ts);
+	dpd->last_pulse_ts = event->ts;
+	if (ts_wraparound) {
+		/*
+		 * reset detector on time stamp wraparound
+		 * with monotonic time stamps, this should never happen
+		 */
+		pr_warn("DFS: time stamp wraparound detected, resetting\n");
+		dpd_reset(dpd);
+	}
+	/* do type individual pattern matching */
+	for (i = 0; i < dpd->num_radar_types; i++) {
+		if (cd->detectors[i]->add_pulse(cd->detectors[i], event) != 0) {
+			channel_detector_reset(dpd, cd);
+			return true;
+		}
+	}
+	return false;
+}
+
+static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
+			   enum nl80211_dfs_regions region)
+{
+	const struct radar_types *rt;
+	struct channel_detector *cd, *cd0;
+
+	if (dpd->region == region)
+		return true;
+
+	dpd->region = NL80211_DFS_UNSET;
+
+	rt = get_dfs_domain_radar_types(region);
+	if (rt == NULL)
+		return false;
+
+	/* delete all channel detectors for previous DFS domain */
+	if (!list_empty(&dpd->channel_detectors))
+		list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
+			channel_detector_exit(dpd, cd);
+	dpd->radar_spec = rt->radar_types;
+	dpd->num_radar_types = rt->num_radar_types;
+
+	dpd->region = region;
+	return true;
+}
+
+static struct dfs_pattern_detector default_dpd = {
+	.exit		= dpd_exit,
+	.set_domain	= dpd_set_domain,
+	.add_pulse	= dpd_add_pulse,
+	.region		= NL80211_DFS_UNSET,
+};
+
+struct dfs_pattern_detector *
+dfs_pattern_detector_init(enum nl80211_dfs_regions region)
+{
+	struct dfs_pattern_detector *dpd;
+	dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
+	if (dpd == NULL) {
+		pr_err("allocation of dfs_pattern_detector failed\n");
+		return NULL;
+	}
+	*dpd = default_dpd;
+	INIT_LIST_HEAD(&dpd->channel_detectors);
+
+	if (dpd->set_domain(dpd, region))
+		return dpd;
+
+	pr_err("Could not set DFS domain to %d. ", region);
+	return NULL;
+}
+EXPORT_SYMBOL(dfs_pattern_detector_init);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
new file mode 100644
index 0000000..fd0328a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DFS_PATTERN_DETECTOR_H
+#define DFS_PATTERN_DETECTOR_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/nl80211.h>
+
+/**
+ * struct pulse_event - describing pulses reported by PHY
+ * @ts: pulse time stamp in us
+ * @freq: channel frequency in MHz
+ * @width: pulse duration in us
+ * @rssi: rssi of radar event
+ */
+struct pulse_event {
+	u64 ts;
+	u16 freq;
+	u8 width;
+	u8 rssi;
+};
+
+/**
+ * struct radar_detector_specs - detector specs for a radar pattern type
+ * @type_id: pattern type, as defined by regulatory
+ * @width_min: minimum radar pulse width in [us]
+ * @width_max: maximum radar pulse width in [us]
+ * @pri_min: minimum pulse repetition interval in [us] (including tolerance)
+ * @pri_max: minimum pri in [us] (including tolerance)
+ * @num_pri: maximum number of different pri for this type
+ * @ppb: pulses per bursts for this type
+ * @ppb_thresh: number of pulses required to trigger detection
+ * @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
+ */
+struct radar_detector_specs {
+	u8 type_id;
+	u8 width_min;
+	u8 width_max;
+	u16 pri_min;
+	u16 pri_max;
+	u8 num_pri;
+	u8 ppb;
+	u8 ppb_thresh;
+	u8 max_pri_tolerance;
+};
+
+/**
+ * struct dfs_pattern_detector - DFS pattern detector
+ * @exit(): destructor
+ * @set_domain(): set DFS domain, resets detector lines upon domain changes
+ * @add_pulse(): add radar pulse to detector, returns true on detection
+ * @region: active DFS region, NL80211_DFS_UNSET until set
+ * @num_radar_types: number of different radar types
+ * @last_pulse_ts: time stamp of last valid pulse in usecs
+ * @radar_detector_specs: array of radar detection specs
+ * @channel_detectors: list connecting channel_detector elements
+ */
+struct dfs_pattern_detector {
+	void (*exit)(struct dfs_pattern_detector *dpd);
+	bool (*set_domain)(struct dfs_pattern_detector *dpd,
+			   enum nl80211_dfs_regions region);
+	bool (*add_pulse)(struct dfs_pattern_detector *dpd,
+			  struct pulse_event *pe);
+
+	enum nl80211_dfs_regions region;
+	u8 num_radar_types;
+	u64 last_pulse_ts;
+
+	const struct radar_detector_specs *radar_spec;
+	struct list_head channel_detectors;
+};
+
+/**
+ * dfs_pattern_detector_init() - constructor for pattern detector class
+ * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
+ * @return instance pointer on success, NULL otherwise
+ */
+#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
+extern struct dfs_pattern_detector *
+dfs_pattern_detector_init(enum nl80211_dfs_regions region);
+#else
+static inline struct dfs_pattern_detector *
+dfs_pattern_detector_init(enum nl80211_dfs_regions region)
+{
+	return NULL;
+}
+#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
+
+#endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
new file mode 100644
index 0000000..91b8dce
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ath9k.h"
+#include "dfs_pattern_detector.h"
+#include "dfs_pri_detector.h"
+#include "dfs_debug.h"
+
+/**
+ * struct pri_sequence - sequence of pulses matching one PRI
+ * @head: list_head
+ * @pri: pulse repetition interval (PRI) in usecs
+ * @dur: duration of sequence in usecs
+ * @count: number of pulses in this sequence
+ * @count_falses: number of not matching pulses in this sequence
+ * @first_ts: time stamp of first pulse in usecs
+ * @last_ts: time stamp of last pulse in usecs
+ * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur)
+ */
+struct pri_sequence {
+	struct list_head head;
+	u32 pri;
+	u32 dur;
+	u32 count;
+	u32 count_falses;
+	u64 first_ts;
+	u64 last_ts;
+	u64 deadline_ts;
+};
+
+/**
+ * struct pulse_elem - elements in pulse queue
+ * @ts: time stamp in usecs
+ */
+struct pulse_elem {
+	struct list_head head;
+	u64 ts;
+};
+
+/**
+ * pde_get_multiple() - get number of multiples considering a given tolerance
+ * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
+ */
+static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
+{
+	u32 remainder;
+	u32 factor;
+	u32 delta;
+
+	if (fraction == 0)
+		return 0;
+
+	delta = (val < fraction) ? (fraction - val) : (val - fraction);
+
+	if (delta <= tolerance)
+		/* val and fraction are within tolerance */
+		return 1;
+
+	factor = val / fraction;
+	remainder = val % fraction;
+	if (remainder > tolerance) {
+		/* no exact match */
+		if ((fraction - remainder) <= tolerance)
+			/* remainder is within tolerance */
+			factor++;
+		else
+			factor = 0;
+	}
+	return factor;
+}
+
+/**
+ * DOC: Singleton Pulse and Sequence Pools
+ *
+ * Instances of pri_sequence and pulse_elem are kept in singleton pools to
+ * reduce the number of dynamic allocations. They are shared between all
+ * instances and grow up to the peak number of simultaneously used objects.
+ *
+ * Memory is freed after all references to the pools are released.
+ */
+static u32 singleton_pool_references;
+static LIST_HEAD(pulse_pool);
+static LIST_HEAD(pseq_pool);
+static DEFINE_SPINLOCK(pool_lock);
+
+static void pool_register_ref(void)
+{
+	spin_lock_bh(&pool_lock);
+	singleton_pool_references++;
+	DFS_POOL_STAT_INC(pool_reference);
+	spin_unlock_bh(&pool_lock);
+}
+
+static void pool_deregister_ref(void)
+{
+	spin_lock_bh(&pool_lock);
+	singleton_pool_references--;
+	DFS_POOL_STAT_DEC(pool_reference);
+	if (singleton_pool_references == 0) {
+		/* free singleton pools with no references left */
+		struct pri_sequence *ps, *ps0;
+		struct pulse_elem *p, *p0;
+
+		list_for_each_entry_safe(p, p0, &pulse_pool, head) {
+			list_del(&p->head);
+			DFS_POOL_STAT_DEC(pulse_allocated);
+			kfree(p);
+		}
+		list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
+			list_del(&ps->head);
+			DFS_POOL_STAT_DEC(pseq_allocated);
+			kfree(ps);
+		}
+	}
+	spin_unlock_bh(&pool_lock);
+}
+
+static void pool_put_pulse_elem(struct pulse_elem *pe)
+{
+	spin_lock_bh(&pool_lock);
+	list_add(&pe->head, &pulse_pool);
+	DFS_POOL_STAT_DEC(pulse_used);
+	spin_unlock_bh(&pool_lock);
+}
+
+static void pool_put_pseq_elem(struct pri_sequence *pse)
+{
+	spin_lock_bh(&pool_lock);
+	list_add(&pse->head, &pseq_pool);
+	DFS_POOL_STAT_DEC(pseq_used);
+	spin_unlock_bh(&pool_lock);
+}
+
+static struct pri_sequence *pool_get_pseq_elem(void)
+{
+	struct pri_sequence *pse = NULL;
+	spin_lock_bh(&pool_lock);
+	if (!list_empty(&pseq_pool)) {
+		pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
+		list_del(&pse->head);
+		DFS_POOL_STAT_INC(pseq_used);
+	}
+	spin_unlock_bh(&pool_lock);
+	return pse;
+}
+
+static struct pulse_elem *pool_get_pulse_elem(void)
+{
+	struct pulse_elem *pe = NULL;
+	spin_lock_bh(&pool_lock);
+	if (!list_empty(&pulse_pool)) {
+		pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
+		list_del(&pe->head);
+		DFS_POOL_STAT_INC(pulse_used);
+	}
+	spin_unlock_bh(&pool_lock);
+	return pe;
+}
+
+static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
+{
+	struct list_head *l = &pde->pulses;
+	if (list_empty(l))
+		return NULL;
+	return list_entry(l->prev, struct pulse_elem, head);
+}
+
+static bool pulse_queue_dequeue(struct pri_detector *pde)
+{
+	struct pulse_elem *p = pulse_queue_get_tail(pde);
+	if (p != NULL) {
+		list_del_init(&p->head);
+		pde->count--;
+		/* give it back to pool */
+		pool_put_pulse_elem(p);
+	}
+	return (pde->count > 0);
+}
+
+/* remove pulses older than window */
+static void pulse_queue_check_window(struct pri_detector *pde)
+{
+	u64 min_valid_ts;
+	struct pulse_elem *p;
+
+	/* there is no delta time with less than 2 pulses */
+	if (pde->count < 2)
+		return;
+
+	if (pde->last_ts <= pde->window_size)
+		return;
+
+	min_valid_ts = pde->last_ts - pde->window_size;
+	while ((p = pulse_queue_get_tail(pde)) != NULL) {
+		if (p->ts >= min_valid_ts)
+			return;
+		pulse_queue_dequeue(pde);
+	}
+}
+
+static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
+{
+	struct pulse_elem *p = pool_get_pulse_elem();
+	if (p == NULL) {
+		p = kmalloc(sizeof(*p), GFP_KERNEL);
+		if (p == NULL) {
+			DFS_POOL_STAT_INC(pulse_alloc_error);
+			return false;
+		}
+		DFS_POOL_STAT_INC(pulse_allocated);
+		DFS_POOL_STAT_INC(pulse_used);
+	}
+	INIT_LIST_HEAD(&p->head);
+	p->ts = ts;
+	list_add(&p->head, &pde->pulses);
+	pde->count++;
+	pde->last_ts = ts;
+	pulse_queue_check_window(pde);
+	if (pde->count >= pde->max_count)
+		pulse_queue_dequeue(pde);
+	return true;
+}
+
+static bool pseq_handler_create_sequences(struct pri_detector *pde,
+					  u64 ts, u32 min_count)
+{
+	struct pulse_elem *p;
+	list_for_each_entry(p, &pde->pulses, head) {
+		struct pri_sequence ps, *new_ps;
+		struct pulse_elem *p2;
+		u32 tmp_false_count;
+		u64 min_valid_ts;
+		u32 delta_ts = ts - p->ts;
+
+		if (delta_ts < pde->rs->pri_min)
+			/* ignore too small pri */
+			continue;
+
+		if (delta_ts > pde->rs->pri_max)
+			/* stop on too large pri (sorted list) */
+			break;
+
+		/* build a new sequence with new potential pri */
+		ps.count = 2;
+		ps.count_falses = 0;
+		ps.first_ts = p->ts;
+		ps.last_ts = ts;
+		ps.pri = ts - p->ts;
+		ps.dur = ps.pri * (pde->rs->ppb - 1)
+				+ 2 * pde->rs->max_pri_tolerance;
+
+		p2 = p;
+		tmp_false_count = 0;
+		min_valid_ts = ts - ps.dur;
+		/* check which past pulses are candidates for new sequence */
+		list_for_each_entry_continue(p2, &pde->pulses, head) {
+			u32 factor;
+			if (p2->ts < min_valid_ts)
+				/* stop on crossing window border */
+				break;
+			/* check if pulse match (multi)PRI */
+			factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
+						  pde->rs->max_pri_tolerance);
+			if (factor > 0) {
+				ps.count++;
+				ps.first_ts = p2->ts;
+				/*
+				 * on match, add the intermediate falses
+				 * and reset counter
+				 */
+				ps.count_falses += tmp_false_count;
+				tmp_false_count = 0;
+			} else {
+				/* this is a potential false one */
+				tmp_false_count++;
+			}
+		}
+		if (ps.count < min_count)
+			/* did not reach minimum count, drop sequence */
+			continue;
+
+		/* this is a valid one, add it */
+		ps.deadline_ts = ps.first_ts + ps.dur;
+		new_ps = pool_get_pseq_elem();
+		if (new_ps == NULL) {
+			new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL);
+			if (new_ps == NULL) {
+				DFS_POOL_STAT_INC(pseq_alloc_error);
+				return false;
+			}
+			DFS_POOL_STAT_INC(pseq_allocated);
+			DFS_POOL_STAT_INC(pseq_used);
+		}
+		memcpy(new_ps, &ps, sizeof(ps));
+		INIT_LIST_HEAD(&new_ps->head);
+		list_add(&new_ps->head, &pde->sequences);
+	}
+	return true;
+}
+
+/* check new ts and add to all matching existing sequences */
+static u32
+pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
+{
+	u32 max_count = 0;
+	struct pri_sequence *ps, *ps2;
+	list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
+		u32 delta_ts;
+		u32 factor;
+
+		/* first ensure that sequence is within window */
+		if (ts > ps->deadline_ts) {
+			list_del_init(&ps->head);
+			pool_put_pseq_elem(ps);
+			continue;
+		}
+
+		delta_ts = ts - ps->last_ts;
+		factor = pde_get_multiple(delta_ts, ps->pri,
+					  pde->rs->max_pri_tolerance);
+		if (factor > 0) {
+			ps->last_ts = ts;
+			ps->count++;
+
+			if (max_count < ps->count)
+				max_count = ps->count;
+		} else {
+			ps->count_falses++;
+		}
+	}
+	return max_count;
+}
+
+static struct pri_sequence *
+pseq_handler_check_detection(struct pri_detector *pde)
+{
+	struct pri_sequence *ps;
+
+	if (list_empty(&pde->sequences))
+		return NULL;
+
+	list_for_each_entry(ps, &pde->sequences, head) {
+		/*
+		 * we assume to have enough matching confidence if we
+		 * 1) have enough pulses
+		 * 2) have more matching than false pulses
+		 */
+		if ((ps->count >= pde->rs->ppb_thresh) &&
+		    (ps->count * pde->rs->num_pri >= ps->count_falses))
+			return ps;
+	}
+	return NULL;
+}
+
+
+/* free pulse queue and sequences list and give objects back to pools */
+static void pri_detector_reset(struct pri_detector *pde, u64 ts)
+{
+	struct pri_sequence *ps, *ps0;
+	struct pulse_elem *p, *p0;
+	list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
+		list_del_init(&ps->head);
+		pool_put_pseq_elem(ps);
+	}
+	list_for_each_entry_safe(p, p0, &pde->pulses, head) {
+		list_del_init(&p->head);
+		pool_put_pulse_elem(p);
+	}
+	pde->count = 0;
+	pde->last_ts = ts;
+}
+
+static void pri_detector_exit(struct pri_detector *de)
+{
+	pri_detector_reset(de, 0);
+	pool_deregister_ref();
+	kfree(de);
+}
+
+static bool pri_detector_add_pulse(struct pri_detector *de,
+				   struct pulse_event *event)
+{
+	u32 max_updated_seq;
+	struct pri_sequence *ps;
+	u64 ts = event->ts;
+	const struct radar_detector_specs *rs = de->rs;
+
+	/* ignore pulses not within width range */
+	if ((rs->width_min > event->width) || (rs->width_max < event->width))
+		return false;
+
+	if ((ts - de->last_ts) < rs->max_pri_tolerance)
+		/* if delta to last pulse is too short, don't use this pulse */
+		return false;
+	de->last_ts = ts;
+
+	max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
+
+	if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
+		pr_err("failed to create pulse sequences\n");
+		pri_detector_reset(de, ts);
+		return false;
+	}
+
+	ps = pseq_handler_check_detection(de);
+
+	if (ps != NULL) {
+		pr_info("DFS: radar found: pri=%d, count=%d, count_false=%d\n",
+			 ps->pri, ps->count, ps->count_falses);
+		pri_detector_reset(de, ts);
+		return true;
+	}
+	pulse_queue_enqueue(de, ts);
+	return false;
+}
+
+struct pri_detector *
+pri_detector_init(const struct radar_detector_specs *rs)
+{
+	struct pri_detector *de;
+	de = kzalloc(sizeof(*de), GFP_KERNEL);
+	if (de == NULL)
+		return NULL;
+	de->exit = pri_detector_exit;
+	de->add_pulse = pri_detector_add_pulse;
+	de->reset = pri_detector_reset;
+
+	INIT_LIST_HEAD(&de->sequences);
+	INIT_LIST_HEAD(&de->pulses);
+	de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
+	de->max_count = rs->ppb * 2;
+	de->rs = rs;
+
+	pool_register_ref();
+	return de;
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
new file mode 100644
index 0000000..81cde9f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2012 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DFS_PRI_DETECTOR_H
+#define DFS_PRI_DETECTOR_H
+
+#include <linux/list.h>
+
+/**
+ * struct pri_detector - PRI detector element for a dedicated radar type
+ * @exit(): destructor
+ * @add_pulse(): add pulse event, returns true if pattern was detected
+ * @reset(): clear states and reset to given time stamp
+ * @rs: detector specs for this detector element
+ * @last_ts: last pulse time stamp considered for this element in usecs
+ * @sequences: list_head holding potential pulse sequences
+ * @pulses: list connecting pulse_elem objects
+ * @count: number of pulses in queue
+ * @max_count: maximum number of pulses to be queued
+ * @window_size: window size back from newest pulse time stamp in usecs
+ */
+struct pri_detector {
+	void (*exit)     (struct pri_detector *de);
+	bool (*add_pulse)(struct pri_detector *de, struct pulse_event *e);
+	void (*reset)    (struct pri_detector *de, u64 ts);
+
+/* private: internal use only */
+	const struct radar_detector_specs *rs;
+	u64 last_ts;
+	struct list_head sequences;
+	struct list_head pulses;
+	u32 count;
+	u32 max_count;
+	u32 window_size;
+};
+
+struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs);
+
+#endif /* DFS_PRI_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index c435232..0512397 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -16,14 +16,6 @@
 
 #include "hw.h"
 
-static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
-{
-	if (fbin == AR5416_BCHAN_UNUSED)
-		return fbin;
-
-	return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
-}
-
 void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
 {
         REG_WRITE(ah, reg, val);
@@ -290,6 +282,34 @@
 	return twiceMaxEdgePower;
 }
 
+u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
+			      u8 antenna_reduction)
+{
+	u16 reduction = antenna_reduction;
+
+	/*
+	 * Reduce scaled Power by number of chains active
+	 * to get the per chain tx power level.
+	 */
+	switch (ar5416_get_ntxchains(ah->txchainmask)) {
+	case 1:
+		break;
+	case 2:
+		reduction += POWER_CORRECTION_FOR_TWO_CHAIN;
+		break;
+	case 3:
+		reduction += POWER_CORRECTION_FOR_THREE_CHAIN;
+		break;
+	}
+
+	if (power_limit > reduction)
+		power_limit -= reduction;
+	else
+		power_limit = 0;
+
+	return power_limit;
+}
+
 void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
 {
 	struct ath_common *common = ath9k_hw_common(ah);
@@ -299,10 +319,10 @@
 	case 1:
 		break;
 	case 2:
-		regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
+		regulatory->max_power_level += POWER_CORRECTION_FOR_TWO_CHAIN;
 		break;
 	case 3:
-		regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
+		regulatory->max_power_level += POWER_CORRECTION_FOR_THREE_CHAIN;
 		break;
 	default:
 		ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 5ff7ab9..33acb92 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -79,8 +79,8 @@
 #define SUB_NUM_CTL_MODES_AT_5G_40 2
 #define SUB_NUM_CTL_MODES_AT_2G_40 3
 
-#define INCREASE_MAXPOW_BY_TWO_CHAIN     6  /* 10*log10(2)*2 */
-#define INCREASE_MAXPOW_BY_THREE_CHAIN   10 /* 10*log10(3)*2 */
+#define POWER_CORRECTION_FOR_TWO_CHAIN		6  /* 10*log10(2)*2 */
+#define POWER_CORRECTION_FOR_THREE_CHAIN	10 /* 10*log10(3)*2 */
 
 /*
  * For AR9285 and later chipsets, the following bits are not being programmed
@@ -686,6 +686,8 @@
 				u16 numRates, bool isHt40Target);
 u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
 				bool is2GHz, int num_band_edges);
+u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
+			      u8 antenna_reduction);
 void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
 int ath9k_hw_eeprom_init(struct ath_hw *ah);
 
@@ -697,6 +699,14 @@
 				u16 *pPdGainBoundaries, u8 *pPDADCValues,
 				u16 numXpdGains);
 
+static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
+{
+	if (fbin == AR5416_BCHAN_UNUSED)
+		return fbin;
+
+	return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
+}
+
 #define ar5416_get_ntxchains(_txchainmask)			\
 	(((_txchainmask >> 2) & 1) +                            \
 	 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index f272236..aa61476 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -564,9 +564,6 @@
 	(((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
 	 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
 
-#define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6
-#define REDUCE_SCALED_POWER_BY_THREE_CHAIN   10
-
 	u16 twiceMaxEdgePower;
 	int i;
 	struct cal_ctl_data_ar9287 *rep;
@@ -591,29 +588,8 @@
 	tx_chainmask = ah->txchainmask;
 
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
-	scaledPower = powerLimit - antenna_reduction;
-
-	/*
-	 * Reduce scaled Power by number of chains active
-	 * to get the per chain tx power level.
-	 */
-	switch (ar5416_get_ntxchains(tx_chainmask)) {
-	case 1:
-		break;
-	case 2:
-		if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
-			scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
-		else
-			scaledPower = 0;
-		break;
-	case 3:
-		if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
-			scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
-		else
-			scaledPower = 0;
-		break;
-	}
-	scaledPower = max((u16)0, scaledPower);
+	scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
+						antenna_reduction);
 
 	/*
 	 * Get TX power from EEPROM.
@@ -786,8 +762,6 @@
 
 #undef CMP_CTL
 #undef CMP_NO_CTL
-#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN
-#undef REDUCE_SCALED_POWER_BY_THREE_CHAIN
 }
 
 static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
@@ -824,6 +798,8 @@
 			regulatory->max_power_level = ratesArray[i];
 	}
 
+	ath9k_hw_update_regulatory_maxpower(ah);
+
 	if (test)
 		return;
 
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 619b95d..b5fba8b 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -991,9 +991,6 @@
 						  u16 antenna_reduction,
 						  u16 powerLimit)
 {
-#define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6  /* 10*log10(2)*2 */
-#define REDUCE_SCALED_POWER_BY_THREE_CHAIN   9 /* 10*log10(3)*2 */
-
 	struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
 	u16 twiceMaxEdgePower;
 	int i;
@@ -1027,24 +1024,8 @@
 
 	ath9k_hw_get_channel_centers(ah, chan, &centers);
 
-	scaledPower = powerLimit - antenna_reduction;
-
-	switch (ar5416_get_ntxchains(tx_chainmask)) {
-	case 1:
-		break;
-	case 2:
-		if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
-			scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
-		else
-			scaledPower = 0;
-		break;
-	case 3:
-		if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
-			scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
-		else
-			scaledPower = 0;
-		break;
-	}
+	scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
+						antenna_reduction);
 
 	if (IS_CHAN_2GHZ(chan)) {
 		numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
@@ -1263,20 +1244,7 @@
 			regulatory->max_power_level = ratesArray[i];
 	}
 
-	switch(ar5416_get_ntxchains(ah->txchainmask)) {
-	case 1:
-		break;
-	case 2:
-		regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
-		break;
-	case 3:
-		regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
-		break;
-	default:
-		ath_dbg(ath9k_hw_common(ah), EEPROM,
-			"Invalid chainmask configuration\n");
-		break;
-	}
+	ath9k_hw_update_regulatory_maxpower(ah);
 
 	if (test)
 		return;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index fbe23de..281a9af 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -41,6 +41,9 @@
 {
 	int ret;
 
+	if (AR_SREV_9100(sc->sc_ah))
+		return;
+
 	if (sc->sc_ah->led_pin < 0) {
 		if (AR_SREV_9287(sc->sc_ah))
 			sc->sc_ah->led_pin = ATH_LED_PIN_9287;
@@ -362,7 +365,7 @@
 		ath9k_hw_btcoex_disable(ah);
 		if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
 			ath9k_btcoex_timer_pause(sc);
-		if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI)
+		if (AR_SREV_9462(ah))
 			ath_mci_flush_profile(&sc->btcoex.mci);
 	}
 }
@@ -373,7 +376,7 @@
 	    ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
 		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
 
-	if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI)
+	if (AR_SREV_9462(sc->sc_ah))
 		ath_mci_cleanup(sc);
 }
 
@@ -399,17 +402,16 @@
 		txq = sc->tx.txq_map[WME_AC_BE];
 		ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
-		break;
-	case ATH_BTCOEX_CFG_MCI:
-		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
-		sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
-		INIT_LIST_HEAD(&sc->btcoex.mci.info);
+		if (AR_SREV_9462(ah)) {
+			sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+			INIT_LIST_HEAD(&sc->btcoex.mci.info);
 
-		r = ath_mci_setup(sc);
-		if (r)
-			return r;
+			r = ath_mci_setup(sc);
+			if (r)
+				return r;
 
-		ath9k_hw_btcoex_init_mci(ah);
+			ath9k_hw_btcoex_init_mci(ah);
+		}
 
 		break;
 	default:
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 424aabb..f67cd95 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -53,6 +53,8 @@
 	  .driver_info = AR9280_USB },  /* SMC Networks */
 	{ USB_DEVICE(0x0411, 0x017f),
 	  .driver_info = AR9280_USB },  /* Sony UWA-BR100 */
+	{ USB_DEVICE(0x04da, 0x3904),
+	  .driver_info = AR9280_USB },
 
 	{ USB_DEVICE(0x0cf3, 0x20ff),
 	  .driver_info = STORAGE_DEVICE },
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index de5ee15..25213d5 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -14,6 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "htc.h"
 
 MODULE_AUTHOR("Atheros Communications");
@@ -711,7 +713,8 @@
 
 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
+			    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
 	hw->queues = 4;
 	hw->channel_change_time = 5000;
@@ -966,9 +969,7 @@
 static int __init ath9k_htc_init(void)
 {
 	if (ath9k_hif_usb_init() < 0) {
-		printk(KERN_ERR
-			"ath9k_htc: No USB devices found,"
-			" driver not installed.\n");
+		pr_err("No USB devices found, driver not installed\n");
 		return -ENODEV;
 	}
 
@@ -979,6 +980,6 @@
 static void __exit ath9k_htc_exit(void)
 {
 	ath9k_hif_usb_exit();
-	printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
+	pr_info("Driver unloaded\n");
 }
 module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index c25226a..4a9570d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -14,6 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "htc.h"
 
 static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
@@ -461,7 +463,7 @@
 		      char *product, u32 drv_info)
 {
 	if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
-		printk(KERN_ERR "Failed to initialize the device\n");
+		pr_err("Failed to initialize the device\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6c69e4e..f84477c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -24,6 +24,8 @@
 #include "rc.h"
 #include "ar9003_mac.h"
 #include "ar9003_mci.h"
+#include "debug.h"
+#include "ath9k.h"
 
 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
 
@@ -83,6 +85,53 @@
 /* Helper Functions */
 /********************/
 
+#ifdef CONFIG_ATH9K_DEBUGFS
+
+void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
+{
+	struct ath_softc *sc = common->priv;
+	if (sync_cause)
+		sc->debug.stats.istats.sync_cause_all++;
+	if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
+		sc->debug.stats.istats.sync_rtc_irq++;
+	if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
+		sc->debug.stats.istats.sync_mac_irq++;
+	if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
+		sc->debug.stats.istats.eeprom_illegal_access++;
+	if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
+		sc->debug.stats.istats.apb_timeout++;
+	if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
+		sc->debug.stats.istats.pci_mode_conflict++;
+	if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
+		sc->debug.stats.istats.host1_fatal++;
+	if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
+		sc->debug.stats.istats.host1_perr++;
+	if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
+		sc->debug.stats.istats.trcv_fifo_perr++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
+		sc->debug.stats.istats.radm_cpl_ep++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
+		sc->debug.stats.istats.radm_cpl_dllp_abort++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
+		sc->debug.stats.istats.radm_cpl_tlp_abort++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
+		sc->debug.stats.istats.radm_cpl_ecrc_err++;
+	if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
+		sc->debug.stats.istats.radm_cpl_timeout++;
+	if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
+		sc->debug.stats.istats.local_timeout++;
+	if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
+		sc->debug.stats.istats.pm_access++;
+	if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
+		sc->debug.stats.istats.mac_awake++;
+	if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
+		sc->debug.stats.istats.mac_asleep++;
+	if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
+		sc->debug.stats.istats.mac_sleep_access++;
+}
+#endif
+
+
 static void ath9k_hw_set_clockrate(struct ath_hw *ah)
 {
 	struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -142,6 +191,22 @@
 }
 EXPORT_SYMBOL(ath9k_hw_wait);
 
+void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
+			  int hw_delay)
+{
+	if (IS_CHAN_B(chan))
+		hw_delay = (4 * hw_delay) / 22;
+	else
+		hw_delay /= 10;
+
+	if (IS_CHAN_HALF_RATE(chan))
+		hw_delay *= 2;
+	else if (IS_CHAN_QUARTER_RATE(chan))
+		hw_delay *= 4;
+
+	udelay(hw_delay + BASE_ACTIVATE_DELAY);
+}
+
 void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
 			  int column, unsigned int *writecnt)
 {
@@ -388,8 +453,8 @@
 {
 	int i;
 
-	ah->config.dma_beacon_response_time = 2;
-	ah->config.sw_beacon_response_time = 10;
+	ah->config.dma_beacon_response_time = 1;
+	ah->config.sw_beacon_response_time = 6;
 	ah->config.additional_swba_backoff = 0;
 	ah->config.ack_6mb = 0x0;
 	ah->config.cwm_ignore_extcca = 0;
@@ -445,7 +510,6 @@
 		AR_STA_ID1_MCAST_KSRCH;
 	if (AR_SREV_9100(ah))
 		ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
-	ah->enable_32kHz_clock = DONT_USE_32KHZ;
 	ah->slottime = ATH9K_SLOT_TIME_9;
 	ah->globaltxtimeout = (u32) -1;
 	ah->power_mode = ATH9K_PM_UNDEFINED;
@@ -972,7 +1036,7 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ieee80211_conf *conf = &common->hw->conf;
 	const struct ath9k_channel *chan = ah->curchan;
-	int acktimeout, ctstimeout;
+	int acktimeout, ctstimeout, ack_offset = 0;
 	int slottime;
 	int sifstime;
 	int rx_lat = 0, tx_lat = 0, eifs = 0;
@@ -993,6 +1057,11 @@
 		rx_lat = 37;
 	tx_lat = 54;
 
+	if (IS_CHAN_5GHZ(chan))
+		sifstime = 16;
+	else
+		sifstime = 10;
+
 	if (IS_CHAN_HALF_RATE(chan)) {
 		eifs = 175;
 		rx_lat *= 2;
@@ -1000,8 +1069,9 @@
 		if (IS_CHAN_A_FAST_CLOCK(ah, chan))
 		    tx_lat += 11;
 
+		sifstime *= 2;
+		ack_offset = 16;
 		slottime = 13;
-		sifstime = 32;
 	} else if (IS_CHAN_QUARTER_RATE(chan)) {
 		eifs = 340;
 		rx_lat = (rx_lat * 4) - 1;
@@ -1009,8 +1079,9 @@
 		if (IS_CHAN_A_FAST_CLOCK(ah, chan))
 		    tx_lat += 22;
 
+		sifstime *= 4;
+		ack_offset = 32;
 		slottime = 21;
-		sifstime = 64;
 	} else {
 		if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
 			eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
@@ -1024,14 +1095,10 @@
 		tx_lat = MS(reg, AR_USEC_TX_LAT);
 
 		slottime = ah->slottime;
-		if (IS_CHAN_5GHZ(chan))
-			sifstime = 16;
-		else
-			sifstime = 10;
 	}
 
 	/* As defined by IEEE 802.11-2007 17.3.8.6 */
-	acktimeout = slottime + sifstime + 3 * ah->coverage_class;
+	acktimeout = slottime + sifstime + 3 * ah->coverage_class + ack_offset;
 	ctstimeout = acktimeout;
 
 	/*
@@ -1041,7 +1108,8 @@
 	 * BA frames in some implementations, but it has been found to fix ACK
 	 * timeout issues in other cases as well.
 	 */
-	if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) {
+	if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ &&
+	    !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
 		acktimeout += 64 - sifstime - ah->slottime;
 		ctstimeout += 48 - sifstime - ah->slottime;
 	}
@@ -1454,7 +1522,7 @@
 		return false;
 	}
 	ath9k_hw_set_clockrate(ah);
-	ath9k_hw_apply_txpower(ah, chan);
+	ath9k_hw_apply_txpower(ah, chan, false);
 	ath9k_hw_rfbus_done(ah);
 
 	if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
@@ -1491,11 +1559,84 @@
 	}
 }
 
+static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states,
+			       int *hang_state, int *hang_pos)
+{
+	static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */
+	u32 chain_state, dcs_pos, i;
+
+	for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) {
+		chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f;
+		for (i = 0; i < 3; i++) {
+			if (chain_state == dcu_chain_state[i]) {
+				*hang_state = chain_state;
+				*hang_pos = dcs_pos;
+				return true;
+			}
+		}
+	}
+	return false;
+}
+
+#define DCU_COMPLETE_STATE        1
+#define DCU_COMPLETE_STATE_MASK 0x3
+#define NUM_STATUS_READS         50
+static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
+{
+	u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4;
+	u32 i, hang_pos, hang_state, num_state = 6;
+
+	comp_state = REG_READ(ah, AR_DMADBG_6);
+
+	if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) {
+		ath_dbg(ath9k_hw_common(ah), RESET,
+			"MAC Hang signature not found at DCU complete\n");
+		return false;
+	}
+
+	chain_state = REG_READ(ah, dcs_reg);
+	if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
+		goto hang_check_iter;
+
+	dcs_reg = AR_DMADBG_5;
+	num_state = 4;
+	chain_state = REG_READ(ah, dcs_reg);
+	if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
+		goto hang_check_iter;
+
+	ath_dbg(ath9k_hw_common(ah), RESET,
+		"MAC Hang signature 1 not found\n");
+	return false;
+
+hang_check_iter:
+	ath_dbg(ath9k_hw_common(ah), RESET,
+		"DCU registers: chain %08x complete %08x Hang: state %d pos %d\n",
+		chain_state, comp_state, hang_state, hang_pos);
+
+	for (i = 0; i < NUM_STATUS_READS; i++) {
+		chain_state = REG_READ(ah, dcs_reg);
+		chain_state = (chain_state >> (5 * hang_pos)) & 0x1f;
+		comp_state = REG_READ(ah, AR_DMADBG_6);
+
+		if (((comp_state & DCU_COMPLETE_STATE_MASK) !=
+					DCU_COMPLETE_STATE) ||
+		    (chain_state != hang_state))
+			return false;
+	}
+
+	ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n");
+
+	return true;
+}
+
 bool ath9k_hw_check_alive(struct ath_hw *ah)
 {
 	int count = 50;
 	u32 reg;
 
+	if (AR_SREV_9300(ah))
+		return !ath9k_hw_detect_mac_hang(ah);
+
 	if (AR_SREV_9285_12_OR_LATER(ah))
 		return true;
 
@@ -1546,6 +1687,10 @@
 	if (chan->channel == ah->curchan->channel)
 		goto fail;
 
+	if ((ah->curchan->channelFlags | chan->channelFlags) &
+	    (CHANNEL_HALF | CHANNEL_QUARTER))
+		goto fail;
+
 	if ((chan->channelFlags & CHANNEL_ALL) !=
 	    (ah->curchan->channelFlags & CHANNEL_ALL))
 		goto fail;
@@ -2652,7 +2797,8 @@
 	return ah->eep_ops->get_eeprom(ah, gain_param);
 }
 
-void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan)
+void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
+			    bool test)
 {
 	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
 	struct ieee80211_channel *channel;
@@ -2673,7 +2819,7 @@
 
 	ah->eep_ops->set_txpower(ah, chan,
 				 ath9k_regd_get_ctl(reg, chan),
-				 ant_reduction, new_pwr, false);
+				 ant_reduction, new_pwr, test);
 }
 
 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
@@ -2686,7 +2832,7 @@
 	if (test)
 		channel->max_power = MAX_RATE_POWER / 2;
 
-	ath9k_hw_apply_txpower(ah, chan);
+	ath9k_hw_apply_txpower(ah, chan, test);
 
 	if (test)
 		channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index aa1680a..828b9bb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -708,7 +708,6 @@
 	struct ar5416Stats stats;
 	struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
 
-	int16_t curchan_rad_index;
 	enum ath9k_int imask;
 	u32 imrs2_reg;
 	u32 txok_interrupt_mask;
@@ -762,11 +761,6 @@
 
 	u32 sta_id1_defaults;
 	u32 misc_mode;
-	enum {
-		AUTO_32KHZ,
-		USE_32KHZ,
-		DONT_USE_32KHZ,
-	} enable_32kHz_clock;
 
 	/* Private to hardware code */
 	struct ath_hw_private_ops private_ops;
@@ -783,7 +777,6 @@
 	u32 *analogBank7Data;
 	u32 *bank6Temp;
 
-	u8 txpower_limit;
 	int coverage_class;
 	u32 slottime;
 	u32 globaltxtimeout;
@@ -848,7 +841,6 @@
 	struct ath_gen_timer_table hw_gen_timers;
 
 	struct ar9003_txs *ts_ring;
-	void *ts_start;
 	u32 ts_paddr_start;
 	u32 ts_paddr_end;
 	u16 ts_tail;
@@ -915,7 +907,6 @@
 }
 
 /* Initialization, Detach, Reset */
-const char *ath9k_hw_probe(u16 vendorid, u16 devid);
 void ath9k_hw_deinit(struct ath_hw *ah);
 int ath9k_hw_init(struct ath_hw *ah);
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -932,6 +923,8 @@
 void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
 
 /* General Operation */
+void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
+			  int hw_delay);
 bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
 void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
 			  int column, unsigned int *writecnt);
@@ -965,6 +958,13 @@
 
 bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
 
+#ifdef CONFIG_ATH9K_DEBUGFS
+void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause);
+#else
+static inline void ath9k_debug_sync_cause(struct ath_common *common,
+					  u32 sync_cause) {}
+#endif
+
 /* Generic hw timer primitives */
 struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
 					  void (*trigger)(void *),
@@ -985,7 +985,8 @@
 /* PHY */
 void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
 				   u32 *coef_mantissa, u32 *coef_exponent);
-void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan);
+void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
+			    bool test);
 
 /*
  * Code Specific to AR5008, AR9001 or AR9002,
@@ -1011,7 +1012,6 @@
 int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
 int ar9003_paprd_init_table(struct ath_hw *ah);
 bool ar9003_paprd_is_done(struct ath_hw *ah);
-void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
 
 /* Hardware family op attach helpers */
 void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index cb00645..dee9e09 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -14,6 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/ath9k_platform.h>
@@ -519,6 +521,8 @@
 	atomic_set(&ah->intr_ref_cnt, -1);
 	sc->sc_ah = ah;
 
+	sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET);
+
 	if (!pdata) {
 		ah->ah_flags |= AH_USE_EEPROM;
 		sc->sc_ah->led_pin = -1;
@@ -642,6 +646,24 @@
 		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
 }
 
+static const struct ieee80211_iface_limit if_limits[] = {
+	{ .max = 2048,	.types = BIT(NL80211_IFTYPE_STATION) |
+				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				 BIT(NL80211_IFTYPE_WDS) },
+	{ .max = 8,	.types =
+#ifdef CONFIG_MAC80211_MESH
+				 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+				 BIT(NL80211_IFTYPE_AP) |
+				 BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+	.limits = if_limits,
+	.n_limits = ARRAY_SIZE(if_limits),
+	.max_interfaces = 2048,
+	.num_different_channels = 1,
+};
 
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
@@ -671,11 +693,15 @@
 		BIT(NL80211_IFTYPE_ADHOC) |
 		BIT(NL80211_IFTYPE_MESH_POINT);
 
+	hw->wiphy->iface_combinations = &if_comb;
+	hw->wiphy->n_iface_combinations = 1;
+
 	if (AR_SREV_5416(sc->sc_ah))
 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+	hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
 	hw->queues = 4;
 	hw->max_rates = 4;
@@ -779,6 +805,7 @@
 			goto error_world;
 	}
 
+	setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
 	sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
 
 	ath_init_leds(sc);
@@ -821,6 +848,8 @@
 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
 
 	ath9k_hw_deinit(sc->sc_ah);
+	if (sc->dfs_detector != NULL)
+		sc->dfs_detector->exit(sc->dfs_detector);
 
 	kfree(sc->sc_ah);
 	sc->sc_ah = NULL;
@@ -866,17 +895,14 @@
 	/* Register rate control algorithm */
 	error = ath_rate_control_register();
 	if (error != 0) {
-		printk(KERN_ERR
-			"ath9k: Unable to register rate control "
-			"algorithm: %d\n",
-			error);
+		pr_err("Unable to register rate control algorithm: %d\n",
+		       error);
 		goto err_out;
 	}
 
 	error = ath_pci_init();
 	if (error < 0) {
-		printk(KERN_ERR
-			"ath9k: No PCI devices found, driver not installed.\n");
+		pr_err("No PCI devices found, driver not installed\n");
 		error = -ENODEV;
 		goto err_rate_unregister;
 	}
@@ -905,6 +931,6 @@
 	ath_ahb_exit();
 	ath_pci_exit();
 	ath_rate_control_unregister();
-	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+	pr_info("%s: Driver unloaded\n", dev_info);
 }
 module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index f7bd253..04ef775 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -133,8 +133,16 @@
 
 void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
 {
+	int maxdelay = 1000;
 	int i, q;
 
+	if (ah->curchan) {
+		if (IS_CHAN_HALF_RATE(ah->curchan))
+			maxdelay *= 2;
+		else if (IS_CHAN_QUARTER_RATE(ah->curchan))
+			maxdelay *= 4;
+	}
+
 	REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
 
 	REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
@@ -142,7 +150,7 @@
 	REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
 
 	for (q = 0; q < AR_NUM_QCU; q++) {
-		for (i = 0; i < 1000; i++) {
+		for (i = 0; i < maxdelay; i++) {
 			if (i)
 				udelay(5);
 
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 798ea57..dfa78e8 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -113,21 +113,25 @@
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	enum ath9k_power_mode mode;
 	unsigned long flags;
+	bool reset;
 
 	spin_lock_irqsave(&sc->sc_pm_lock, flags);
 	if (--sc->ps_usecount != 0)
 		goto unlock;
 
-	if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
+	if (sc->ps_idle) {
+		ath9k_hw_setrxabort(sc->sc_ah, 1);
+		ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
 		mode = ATH9K_PM_FULL_SLEEP;
-	else if (sc->ps_enabled &&
-		 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
-			      PS_WAIT_FOR_CAB |
-			      PS_WAIT_FOR_PSPOLL_DATA |
-			      PS_WAIT_FOR_TX_ACK)))
+	} else if (sc->ps_enabled &&
+		   !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+				     PS_WAIT_FOR_CAB |
+				     PS_WAIT_FOR_PSPOLL_DATA |
+				     PS_WAIT_FOR_TX_ACK))) {
 		mode = ATH9K_PM_NETWORK_SLEEP;
-	else
+	} else {
 		goto unlock;
+	}
 
 	spin_lock(&common->cc_lock);
 	ath_hw_cycle_counters_update(common);
@@ -241,6 +245,7 @@
 
 	sc->hw_busy_count = 0;
 	del_timer_sync(&common->ani.timer);
+	del_timer_sync(&sc->rx_poll_timer);
 
 	ath9k_debug_samp_bb_mac(sc);
 	ath9k_hw_disable_interrupts(ah);
@@ -282,6 +287,7 @@
 
 		ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
 		ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
+		ath_start_rx_poll(sc, 3);
 		if (!common->disable_ani)
 			ath_start_ani(common);
 	}
@@ -690,17 +696,6 @@
 		goto out;
 	}
 
-	/*
-	 * Only run the baseband hang check if beacons stop working in AP or
-	 * IBSS mode, because it has a high false positive rate. For station
-	 * mode it should not be necessary, since the upper layers will detect
-	 * this through a beacon miss automatically and the following channel
-	 * change will trigger a hardware reset anyway
-	 */
-	if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
-	    !ath9k_hw_check_alive(ah))
-		ieee80211_queue_work(sc->hw, &sc->hw_check_work);
-
 	if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
 		/*
 		 * TSF sync does not look correct; remain awake to sync with
@@ -912,10 +907,19 @@
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	unsigned long flags;
 	int busy;
+	u8 is_alive, nbeacon = 1;
 
 	ath9k_ps_wakeup(sc);
-	if (ath9k_hw_check_alive(sc->sc_ah))
+	is_alive = ath9k_hw_check_alive(sc->sc_ah);
+
+	if (is_alive && !AR_SREV_9300(sc->sc_ah))
 		goto out;
+	else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
+		ath_dbg(common, RESET,
+			"DCU stuck is detected. Schedule chip reset\n");
+		RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
+		goto sched_reset;
+	}
 
 	spin_lock_irqsave(&common->cc_lock, flags);
 	busy = ath_update_survey_stats(sc);
@@ -926,12 +930,18 @@
 	if (busy >= 99) {
 		if (++sc->hw_busy_count >= 3) {
 			RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
-			ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+			goto sched_reset;
 		}
-
-	} else if (busy >= 0)
+	} else if (busy >= 0) {
 		sc->hw_busy_count = 0;
+		nbeacon = 3;
+	}
 
+	ath_start_rx_poll(sc, nbeacon);
+	goto out;
+
+sched_reset:
+	ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
 out:
 	ath9k_ps_restore(sc);
 }
@@ -1094,14 +1104,7 @@
 		}
 	}
 
-	/*
-	 * Cannot tx while the hardware is in full sleep, it first needs a full
-	 * chip reset to recover from that
-	 */
-	if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
-		goto exit;
-
-	if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
+	if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) {
 		/*
 		 * We are using PS-Poll and mac80211 can request TX while in
 		 * power save mode. Need to wake up hardware for the TX to be
@@ -1120,12 +1123,21 @@
 		}
 		/*
 		 * The actual restore operation will happen only after
-		 * the sc_flags bit is cleared. We are just dropping
+		 * the ps_flags bit is cleared. We are just dropping
 		 * the ps_usecount here.
 		 */
 		ath9k_ps_restore(sc);
 	}
 
+	/*
+	 * Cannot tx while the hardware is in full sleep, it first needs a full
+	 * chip reset to recover from that
+	 */
+	if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) {
+		ath_err(common, "TX while HW is in FULL_SLEEP mode\n");
+		goto exit;
+	}
+
 	memset(&txctl, 0, sizeof(struct ath_tx_control));
 	txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
 
@@ -1133,6 +1145,7 @@
 
 	if (ath_tx_start(hw, skb, &txctl) != 0) {
 		ath_dbg(common, XMIT, "TX failed\n");
+		TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
 		goto exit;
 	}
 
@@ -1151,6 +1164,7 @@
 	mutex_lock(&sc->mutex);
 
 	ath_cancel_work(sc);
+	del_timer_sync(&sc->rx_poll_timer);
 
 	if (sc->sc_flags & SC_OP_INVALID) {
 		ath_dbg(common, ANY, "Device not present\n");
@@ -1237,7 +1251,6 @@
 	ath9k_set_beaconing_status(sc, false);
 	ath_beacon_return(sc, avp);
 	ath9k_set_beaconing_status(sc, true);
-	sc->sc_flags &= ~SC_OP_BEACONS;
 }
 
 static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@@ -1368,21 +1381,31 @@
 	ath9k_calculate_summary_state(hw, vif);
 
 	if (ath9k_uses_beacons(vif->type)) {
-		int error;
-		/* This may fail because upper levels do not have beacons
-		 * properly configured yet.  That's OK, we assume it
-		 * will be properly configured and then we will be notified
-		 * in the info_changed method and set up beacons properly
-		 * there.
-		 */
+		/* Reserve a beacon slot for the vif */
 		ath9k_set_beaconing_status(sc, false);
-		error = ath_beacon_alloc(sc, vif);
-		if (!error)
-			ath_beacon_config(sc, vif);
+		ath_beacon_alloc(sc, vif);
 		ath9k_set_beaconing_status(sc, true);
 	}
 }
 
+void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
+{
+	if (!AR_SREV_9300(sc->sc_ah))
+		return;
+
+	if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF))
+		return;
+
+	mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
+			(nbeacon * sc->cur_beacon_conf.beacon_interval));
+}
+
+void ath_rx_poll(unsigned long data)
+{
+	struct ath_softc *sc = (struct ath_softc *)data;
+
+	ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+}
 
 static int ath9k_add_interface(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif)
@@ -1511,6 +1534,7 @@
 static void ath9k_enable_ps(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	sc->ps_enabled = true;
 	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1520,11 +1544,13 @@
 		}
 		ath9k_hw_setrxabort(ah, 1);
 	}
+	ath_dbg(common, PS, "PowerSave enabled\n");
 }
 
 static void ath9k_disable_ps(struct ath_softc *sc)
 {
 	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
 
 	sc->ps_enabled = false;
 	ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
@@ -1539,7 +1565,7 @@
 			ath9k_hw_set_interrupts(ah);
 		}
 	}
-
+	ath_dbg(common, PS, "PowerSave disabled\n");
 }
 
 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1911,6 +1937,8 @@
 		sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
 		sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
 
+		ath_start_rx_poll(sc, 3);
+
 		if (!common->disable_ani) {
 			sc->sc_flags |= SC_OP_ANI_RUN;
 			ath_start_ani(common);
@@ -1950,6 +1978,7 @@
 		/* Stop ANI */
 		sc->sc_flags &= ~SC_OP_ANI_RUN;
 		del_timer_sync(&common->ani.timer);
+		del_timer_sync(&sc->rx_poll_timer);
 		memset(&sc->caldata, 0, sizeof(sc->caldata));
 	}
 }
@@ -1964,7 +1993,6 @@
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_vif *avp = (void *)vif->drv_priv;
 	int slottime;
-	int error;
 
 	ath9k_ps_wakeup(sc);
 	mutex_lock(&sc->mutex);
@@ -1993,16 +2021,29 @@
 		} else {
 			sc->sc_flags &= ~SC_OP_ANI_RUN;
 			del_timer_sync(&common->ani.timer);
+			del_timer_sync(&sc->rx_poll_timer);
 		}
 	}
 
-	/* Enable transmission of beacons (AP, IBSS, MESH) */
-	if ((changed & BSS_CHANGED_BEACON) ||
-	    ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
+	/*
+	 * In case of AP mode, the HW TSF has to be reset
+	 * when the beacon interval changes.
+	 */
+	if ((changed & BSS_CHANGED_BEACON_INT) &&
+	    (vif->type == NL80211_IFTYPE_AP))
+		sc->sc_flags |= SC_OP_TSF_RESET;
+
+	/* Configure beaconing (AP, IBSS, MESH) */
+	if (ath9k_uses_beacons(vif->type) &&
+	    ((changed & BSS_CHANGED_BEACON) ||
+	     (changed & BSS_CHANGED_BEACON_ENABLED) ||
+	     (changed & BSS_CHANGED_BEACON_INT))) {
 		ath9k_set_beaconing_status(sc, false);
-		error = ath_beacon_alloc(sc, vif);
-		if (!error)
-			ath_beacon_config(sc, vif);
+		if (bss_conf->enable_beacon)
+			ath_beacon_alloc(sc, vif);
+		else
+			avp->is_bslot_active = false;
+		ath_beacon_config(sc, vif);
 		ath9k_set_beaconing_status(sc, true);
 	}
 
@@ -2025,30 +2066,6 @@
 		}
 	}
 
-	/* Disable transmission of beacons */
-	if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
-	    !bss_conf->enable_beacon) {
-		ath9k_set_beaconing_status(sc, false);
-		avp->is_bslot_active = false;
-		ath9k_set_beaconing_status(sc, true);
-	}
-
-	if (changed & BSS_CHANGED_BEACON_INT) {
-		/*
-		 * In case of AP mode, the HW TSF has to be reset
-		 * when the beacon interval changes.
-		 */
-		if (vif->type == NL80211_IFTYPE_AP) {
-			sc->sc_flags |= SC_OP_TSF_RESET;
-			ath9k_set_beaconing_status(sc, false);
-			error = ath_beacon_alloc(sc, vif);
-			if (!error)
-				ath_beacon_config(sc, vif);
-			ath9k_set_beaconing_status(sc, true);
-		} else
-			ath_beacon_config(sc, vif);
-	}
-
 	mutex_unlock(&sc->mutex);
 	ath9k_ps_restore(sc);
 }
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 77dc327..a856b51 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -14,6 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/nl80211.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
@@ -171,14 +173,13 @@
 
 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (ret) {
-		printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
+		pr_err("32-bit DMA not available\n");
 		goto err_dma;
 	}
 
 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 	if (ret) {
-		printk(KERN_ERR "ath9k: 32-bit DMA consistent "
-			"DMA enable failed\n");
+		pr_err("32-bit DMA consistent DMA enable failed\n");
 		goto err_dma;
 	}
 
@@ -224,7 +225,7 @@
 
 	mem = pci_iomap(pdev, 0, 0);
 	if (!mem) {
-		printk(KERN_ERR "PCI memory map error\n") ;
+		pr_err("PCI memory map error\n") ;
 		ret = -EIO;
 		goto err_iomap;
 	}
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 08bb455..92a6c0a 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1436,7 +1436,7 @@
 
 static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
 			    struct ieee80211_sta *sta, void *priv_sta,
-			    u32 changed, enum nl80211_channel_type oper_chan_type)
+			    u32 changed)
 {
 	struct ath_softc *sc = priv;
 	struct ath_rate_priv *ath_rc_priv = priv_sta;
@@ -1447,12 +1447,11 @@
 
 	/* FIXME: Handle AP mode later when we support CWM */
 
-	if (changed & IEEE80211_RC_HT_CHANGED) {
+	if (changed & IEEE80211_RC_BW_CHANGED) {
 		if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
 			return;
 
-		if (oper_chan_type == NL80211_CHAN_HT40MINUS ||
-		    oper_chan_type == NL80211_CHAN_HT40PLUS)
+		if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
 			oper_cw40 = true;
 
 		if (oper_cw40)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1c4583c..e1fcc68 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -812,6 +812,7 @@
 	is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
 		test_bit(rx_stats->rs_keyix, common->tkip_keymap);
 	strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
+		ieee80211_has_protected(fc) &&
 		!(rx_stats->rs_status &
 		(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
 		 ATH9K_RXERR_KEYMISS));
@@ -824,15 +825,20 @@
 	if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
 		rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
-	if (!rx_stats->rs_datalen)
+	if (!rx_stats->rs_datalen) {
+		RX_STAT_INC(rx_len_err);
 		return false;
+	}
+
         /*
          * rs_status follows rs_datalen so if rs_datalen is too large
          * we can take a hint that hardware corrupted it, so ignore
          * those frames.
          */
-	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
+	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
+		RX_STAT_INC(rx_len_err);
 		return false;
+	}
 
 	/* Only use error bits from the last fragment */
 	if (rx_stats->rs_more)
@@ -902,6 +908,7 @@
 	struct ieee80211_supported_band *sband;
 	enum ieee80211_band band;
 	unsigned int i = 0;
+	struct ath_softc __maybe_unused *sc = common->priv;
 
 	band = hw->conf.channel->band;
 	sband = hw->wiphy->bands[band];
@@ -936,7 +943,7 @@
 	ath_dbg(common, ANY,
 		"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
 		rx_stats->rs_rate);
-
+	RX_STAT_INC(rx_rate_err);
 	return -EINVAL;
 }
 
@@ -1823,10 +1830,14 @@
 
 		hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
 		rxs = IEEE80211_SKB_RXCB(hdr_skb);
-		if (ieee80211_is_beacon(hdr->frame_control) &&
-		    !is_zero_ether_addr(common->curbssid) &&
-		    !compare_ether_addr(hdr->addr3, common->curbssid))
-			rs.is_mybeacon = true;
+		if (ieee80211_is_beacon(hdr->frame_control)) {
+			RX_STAT_INC(rx_beacons);
+			if (!is_zero_ether_addr(common->curbssid) &&
+			    ether_addr_equal(hdr->addr3, common->curbssid))
+				rs.is_mybeacon = true;
+			else
+				rs.is_mybeacon = false;
+		}
 		else
 			rs.is_mybeacon = false;
 
@@ -1836,8 +1847,10 @@
 		 * If we're asked to flush receive queue, directly
 		 * chain it back at the queue without processing it.
 		 */
-		if (sc->sc_flags & SC_OP_RXFLUSH)
+		if (sc->sc_flags & SC_OP_RXFLUSH) {
+			RX_STAT_INC(rx_drop_rxflush);
 			goto requeue_drop_frag;
+		}
 
 		memset(rxs, 0, sizeof(struct ieee80211_rx_status));
 
@@ -1855,6 +1868,10 @@
 		if (retval)
 			goto requeue_drop_frag;
 
+		if (rs.is_mybeacon) {
+			sc->hw_busy_count = 0;
+			ath_start_rx_poll(sc, 3);
+		}
 		/* Ensure we always have an skb to requeue once we are done
 		 * processing the current buffer's skb */
 		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1863,8 +1880,10 @@
 		 * tell hardware it can give us a new frame using the old
 		 * skb and put it at the tail of the sc->rx.rxbuf list for
 		 * processing. */
-		if (!requeue_skb)
+		if (!requeue_skb) {
+			RX_STAT_INC(rx_oom_err);
 			goto requeue_drop_frag;
+		}
 
 		/* Unmap the frame */
 		dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1895,6 +1914,7 @@
 		}
 
 		if (rs.rs_more) {
+			RX_STAT_INC(rx_frags);
 			/*
 			 * rs_more indicates chained descriptors which can be
 			 * used to link buffers together for a sort of
@@ -1904,6 +1924,7 @@
 				/* too many fragments - cannot handle frame */
 				dev_kfree_skb_any(sc->rx.frag);
 				dev_kfree_skb_any(skb);
+				RX_STAT_INC(rx_too_many_frags_err);
 				skb = NULL;
 			}
 			sc->rx.frag = skb;
@@ -1915,6 +1936,7 @@
 
 			if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
 				dev_kfree_skb(skb);
+				RX_STAT_INC(rx_oom_err);
 				goto requeue_drop_frag;
 			}
 
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
index 885c427..65919c9 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.h
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -114,7 +114,7 @@
 
 #define carl9170_regwrite_result()					\
 	__err;								\
-} while (0);
+} while (0)
 
 
 #define carl9170_async_regwrite_get_buf()				\
@@ -126,7 +126,7 @@
 		__err = -ENOMEM;					\
 		goto __async_regwrite_out;				\
 	}								\
-} while (0);
+} while (0)
 
 #define carl9170_async_regwrite_begin(carl)				\
 do {									\
@@ -169,6 +169,6 @@
 
 #define carl9170_async_regwrite_result()				\
 	__err;								\
-} while (0);
+} while (0)
 
 #endif /* __CMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index cffde8d..5c73c03 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -355,6 +355,8 @@
 
 	ar->hw->wiphy->interface_modes |= if_comb_types;
 
+	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
 #undef SUPPORTED
 	return carl9170_fw_tx_sequence(ar);
 }
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index dc99030..84b22ee 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -538,7 +538,7 @@
 		return;
 
 	/* and only beacons from the associated BSSID, please */
-	if (compare_ether_addr(hdr->addr3, ar->common.curbssid) ||
+	if (!ether_addr_equal(hdr->addr3, ar->common.curbssid) ||
 	    !ar->common.curaid)
 		return;
 
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index ea2c737..8e99540 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -14,6 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 
@@ -49,7 +51,7 @@
 		if (off != 0)
 			skb_reserve(skb, common->cachelsz - off);
 	} else {
-		printk(KERN_ERR "skbuff alloc of size %u failed\n", len);
+		pr_err("skbuff alloc of size %u failed\n", len);
 		return NULL;
 	}
 
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 10dea37..d816980 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -14,6 +14,8 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <net/cfg80211.h>
@@ -562,7 +564,7 @@
 	printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
 
 	if (!ath_regd_is_eeprom_valid(reg)) {
-		printk(KERN_ERR "ath: Invalid EEPROM contents\n");
+		pr_err("Invalid EEPROM contents\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 6c87a82..d07c030 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -3989,8 +3989,7 @@
 			atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000);
 		}
 
-		if (fw_entry)
-			release_firmware(fw_entry);
+		release_firmware(fw_entry);
 	}
 
 	err = atmel_wakeup_firmware(priv);
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 9ab1192..51e33b5 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -74,15 +74,4 @@
 	stop_atmel_card(pci_get_drvdata(pdev));
 }
 
-static int __init atmel_init_module(void)
-{
-	return pci_register_driver(&atmel_driver);
-}
-
-static void __exit atmel_cleanup_module(void)
-{
-	pci_unregister_driver(&atmel_driver);
-}
-
-module_init(atmel_init_module);
-module_exit(atmel_cleanup_module);
+module_pci_driver(atmel_driver);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c79e663..617afc8 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4010,6 +4010,20 @@
 	if (modparam_nohwcrypt)
 		return -ENOSPC; /* User disabled HW-crypto */
 
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
+	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+		/*
+		 * For now, disable hw crypto for the RSN IBSS group keys. This
+		 * could be optimized in the future, but until that gets
+		 * implemented, use of software crypto for group addressed
+		 * frames is a acceptable to allow RSN IBSS to be used.
+		 */
+		return -EOPNOTSUPP;
+	}
+
 	mutex_lock(&wl->mutex);
 
 	dev = wl->current_dev;
@@ -4827,8 +4841,14 @@
  out_mutex_unlock:
 	mutex_unlock(&wl->mutex);
 
-	/* reload configuration */
-	b43_op_config(hw, ~0);
+	/*
+	 * Configuration may have been overwritten during initialization.
+	 * Reload the configuration, but only if initialization was
+	 * successful. Reloading the configuration after a failed init
+	 * may hang the system.
+	 */
+	if (!err)
+		b43_op_config(hw, ~0);
 
 	return err;
 }
@@ -5275,6 +5295,8 @@
 		BIT(NL80211_IFTYPE_WDS) |
 		BIT(NL80211_IFTYPE_ADHOC);
 
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
 	hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
 	wl->mac80211_initially_registered_queues = hw->queues;
 	hw->max_rates = 2;
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 80b0755..a54fb2d 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -193,7 +193,7 @@
 	.name		= "b43-sdio",
 	.id_table	= b43_sdio_ids,
 	.probe		= b43_sdio_probe,
-	.remove		= b43_sdio_remove,
+	.remove		= __devexit_p(b43_sdio_remove),
 };
 
 int b43_sdio_init(void)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 2c53678..b31ccc0 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -290,7 +290,8 @@
 		txhdr->dur_fb = wlhdr->duration_id;
 	} else {
 		txhdr->dur_fb = ieee80211_generic_frame_duration(
-			dev->wl->hw, info->control.vif, fragment_len, fbrate);
+			dev->wl->hw, info->control.vif, info->band,
+			fragment_len, fbrate);
 	}
 
 	plcp_fragment_len = fragment_len + FCS_LEN;
@@ -378,7 +379,7 @@
 	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 		phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
 
-	switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) {
+	switch (b43_ieee80211_antenna_sanitize(dev, 0)) {
 	case 0: /* Default */
 		phy_ctl |= B43_TXH_PHY_ANT01AUTO;
 		break;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index df7e16d..1be214b 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1056,6 +1056,7 @@
 	b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
 	dur = ieee80211_generic_frame_duration(dev->wl->hw,
 					       dev->wl->vif,
+					       IEEE80211_BAND_2GHZ,
 					       size,
 					       rate);
 	/* Write PLCP in two parts and timing for packet transfer */
@@ -1121,6 +1122,7 @@
 					 IEEE80211_STYPE_PROBE_RESP);
 	dur = ieee80211_generic_frame_duration(dev->wl->hw,
 					       dev->wl->vif,
+					       IEEE80211_BAND_2GHZ,
 					       *dest_size,
 					       rate);
 	hdr->duration_id = dur;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 5188fab..a8012f2 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -228,6 +228,7 @@
 	} else {
 		txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
 							 info->control.vif,
+							 info->band,
 							 fragment_len,
 							 rate_fb);
 	}
@@ -277,19 +278,7 @@
 		phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM;
 	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 		phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
-	switch (info->antenna_sel_tx) {
-	case 0:
-		phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
-		break;
-	case 1:
-		phy_ctl |= B43legacy_TX4_PHY_ANT0;
-		break;
-	case 2:
-		phy_ctl |= B43legacy_TX4_PHY_ANT1;
-		break;
-	default:
-		B43legacy_BUG_ON(1);
-	}
+	phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
 
 	/* MAC control */
 	rates = info->control.rates;
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index c510453..b480088 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -36,6 +36,15 @@
 	  IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
 	  use the driver for a SDIO wireless card.
 
+config BRCMFMAC_SDIO_OOB
+	bool "Out of band interrupt support for SDIO interface chipset"
+	depends on BRCMFMAC_SDIO
+	---help---
+	  This option enables out-of-band interrupt support for Broadcom
+	  SDIO Wifi chipset using fullmac in order to gain better
+	  performance and deep sleep wake up capability on certain
+	  platforms. Say N if you are unsure.
+
 config BRCMFMAC_USB
 	bool "USB bus interface support for FullMAC driver"
 	depends on USB
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e925290..4add7da 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -39,37 +39,113 @@
 
 #define SDIOH_API_ACCESS_RETRY_LIMIT	2
 
-static void brcmf_sdioh_irqhandler(struct sdio_func *func)
+#ifdef CONFIG_BRCMFMAC_SDIO_OOB
+static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id)
 {
-	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(dev_id);
 
-	brcmf_dbg(TRACE, "***IRQHandler\n");
+	brcmf_dbg(INTR, "oob intr triggered\n");
 
-	sdio_release_host(func);
+	/*
+	 * out-of-band interrupt is level-triggered which won't
+	 * be cleared until dpc
+	 */
+	if (sdiodev->irq_en) {
+		disable_irq_nosync(irq);
+		sdiodev->irq_en = false;
+	}
 
 	brcmf_sdbrcm_isr(sdiodev->bus);
 
-	sdio_claim_host(func);
+	return IRQ_HANDLED;
+}
+
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
+{
+	int ret = 0;
+	u8 data;
+	unsigned long flags;
+
+	brcmf_dbg(TRACE, "Entering\n");
+
+	brcmf_dbg(ERROR, "requesting irq %d\n", sdiodev->irq);
+	ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler,
+			  sdiodev->irq_flags, "brcmf_oob_intr",
+			  &sdiodev->func[1]->card->dev);
+	if (ret != 0)
+		return ret;
+	spin_lock_init(&sdiodev->irq_en_lock);
+	spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
+	sdiodev->irq_en = true;
+	spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
+
+	ret = enable_irq_wake(sdiodev->irq);
+	if (ret != 0)
+		return ret;
+	sdiodev->irq_wake = true;
+
+	/* must configure SDIO_CCCR_IENx to enable irq */
+	data = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_0,
+				     SDIO_CCCR_IENx, &ret);
+	data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
+	brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx,
+			       data, &ret);
+
+	/* redirect, configure ane enable io for interrupt signal */
+	data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+	if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
+		data |= SDIO_SEPINT_ACT_HI;
+	brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT,
+			       data, &ret);
+
+	return 0;
+}
+
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+{
+	brcmf_dbg(TRACE, "Entering\n");
+
+	brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT,
+			       0, NULL);
+	brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx, 0, NULL);
+
+	if (sdiodev->irq_wake) {
+		disable_irq_wake(sdiodev->irq);
+		sdiodev->irq_wake = false;
+	}
+	free_irq(sdiodev->irq, &sdiodev->func[1]->card->dev);
+	sdiodev->irq_en = false;
+
+	return 0;
+}
+#else		/* CONFIG_BRCMFMAC_SDIO_OOB */
+static void brcmf_sdio_irqhandler(struct sdio_func *func)
+{
+	struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+
+	brcmf_dbg(INTR, "ib intr triggered\n");
+
+	brcmf_sdbrcm_isr(sdiodev->bus);
 }
 
 /* dummy handler for SDIO function 2 interrupt */
-static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func)
+static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
 {
 }
 
-int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
 {
 	brcmf_dbg(TRACE, "Entering\n");
 
 	sdio_claim_host(sdiodev->func[1]);
-	sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler);
-	sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler);
+	sdio_claim_irq(sdiodev->func[1], brcmf_sdio_irqhandler);
+	sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
 	sdio_release_host(sdiodev->func[1]);
 
 	return 0;
 }
 
-int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
 {
 	brcmf_dbg(TRACE, "Entering\n");
 
@@ -80,6 +156,7 @@
 
 	return 0;
 }
+#endif		/* CONFIG_BRCMFMAC_SDIO_OOB */
 
 u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
 			 int *err)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 4688904..dd07d33 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -27,6 +27,7 @@
 #include <linux/errno.h>
 #include <linux/sched.h>	/* request_irq() */
 #include <linux/module.h>
+#include <linux/platform_device.h>
 #include <net/cfg80211.h>
 
 #include <defs.h>
@@ -55,6 +56,15 @@
 };
 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
 
+#ifdef CONFIG_BRCMFMAC_SDIO_OOB
+static struct list_head oobirq_lh;
+struct brcmf_sdio_oobirq {
+	unsigned int irq;
+	unsigned long flags;
+	struct list_head list;
+};
+#endif		/* CONFIG_BRCMFMAC_SDIO_OOB */
+
 static bool
 brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
 {
@@ -107,10 +117,17 @@
 			}
 			sdio_release_host(sdfunc);
 		}
-	} else if (regaddr == SDIO_CCCR_ABORT) {
+	} else if ((regaddr == SDIO_CCCR_ABORT) ||
+		   (regaddr == SDIO_CCCR_IENx)) {
+		sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func),
+				 GFP_KERNEL);
+		if (!sdfunc)
+			return -ENOMEM;
+		sdfunc->num = 0;
 		sdio_claim_host(sdfunc);
 		sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
 		sdio_release_host(sdfunc);
+		kfree(sdfunc);
 	} else if (regaddr < 0xF0) {
 		brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
 		err_ret = -EPERM;
@@ -461,12 +478,40 @@
 
 }
 
+#ifdef CONFIG_BRCMFMAC_SDIO_OOB
+static int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
+{
+	struct brcmf_sdio_oobirq *oobirq_entry;
+
+	if (list_empty(&oobirq_lh)) {
+		brcmf_dbg(ERROR, "no valid oob irq resource\n");
+		return -ENXIO;
+	}
+
+	oobirq_entry = list_first_entry(&oobirq_lh, struct brcmf_sdio_oobirq,
+					list);
+
+	sdiodev->irq = oobirq_entry->irq;
+	sdiodev->irq_flags = oobirq_entry->flags;
+	list_del(&oobirq_entry->list);
+	kfree(oobirq_entry);
+
+	return 0;
+}
+#else
+static inline int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
+{
+	return 0;
+}
+#endif		/* CONFIG_BRCMFMAC_SDIO_OOB */
+
 static int brcmf_ops_sdio_probe(struct sdio_func *func,
 			      const struct sdio_device_id *id)
 {
 	int ret = 0;
 	struct brcmf_sdio_dev *sdiodev;
 	struct brcmf_bus *bus_if;
+
 	brcmf_dbg(TRACE, "Enter\n");
 	brcmf_dbg(TRACE, "func->class=%x\n", func->class);
 	brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -486,7 +531,7 @@
 			kfree(bus_if);
 			return -ENOMEM;
 		}
-		sdiodev->func[0] = func->card->sdio_func[0];
+		sdiodev->func[0] = func;
 		sdiodev->func[1] = func;
 		sdiodev->bus_if = bus_if;
 		bus_if->bus_priv.sdio = sdiodev;
@@ -505,6 +550,10 @@
 		sdiodev = dev_get_drvdata(&func->card->dev);
 		if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
 			return -ENODEV;
+
+		ret = brcmf_sdio_getintrcfg(sdiodev);
+		if (ret)
+			return ret;
 		sdiodev->func[2] = func;
 
 		bus_if = sdiodev->bus_if;
@@ -597,6 +646,65 @@
 #endif	/* CONFIG_PM_SLEEP */
 };
 
+#ifdef CONFIG_BRCMFMAC_SDIO_OOB
+static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct brcmf_sdio_oobirq *oobirq_entry;
+	int i, ret;
+
+	INIT_LIST_HEAD(&oobirq_lh);
+
+	for (i = 0; ; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+		if (!res)
+			break;
+
+		oobirq_entry = kzalloc(sizeof(struct brcmf_sdio_oobirq),
+				       GFP_KERNEL);
+		oobirq_entry->irq = res->start;
+		oobirq_entry->flags = res->flags & IRQF_TRIGGER_MASK;
+		list_add_tail(&oobirq_entry->list, &oobirq_lh);
+	}
+	if (i == 0)
+		return -ENXIO;
+
+	ret = sdio_register_driver(&brcmf_sdmmc_driver);
+
+	if (ret)
+		brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+
+	return ret;
+}
+
+static struct platform_driver brcmf_sdio_pd = {
+	.probe		= brcmf_sdio_pd_probe,
+	.driver		= {
+		.name	= "brcmf_sdio_pd"
+	}
+};
+
+void brcmf_sdio_exit(void)
+{
+	brcmf_dbg(TRACE, "Enter\n");
+
+	sdio_unregister_driver(&brcmf_sdmmc_driver);
+
+	platform_driver_unregister(&brcmf_sdio_pd);
+}
+
+void brcmf_sdio_init(void)
+{
+	int ret;
+
+	brcmf_dbg(TRACE, "Enter\n");
+
+	ret = platform_driver_register(&brcmf_sdio_pd);
+
+	if (ret)
+		brcmf_dbg(ERROR, "platform_driver_register failed: %d\n", ret);
+}
+#else
 void brcmf_sdio_exit(void)
 {
 	brcmf_dbg(TRACE, "Enter\n");
@@ -615,3 +723,4 @@
 	if (ret)
 		brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
 }
+#endif		/* CONFIG_BRCMFMAC_SDIO_OOB */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 07686a7..9f63701 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -632,7 +632,6 @@
 extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
 			  char *buf, uint len);
 
-extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
 extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
 extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index b3e3b7f..a5c15ca 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -421,6 +421,7 @@
 	pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
 
 	skb_pull(pktbuf, BDC_HEADER_LEN);
+	skb_pull(pktbuf, h->data_offset << 2);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 4187435..236cb9f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -799,7 +799,6 @@
 {
 	char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];	/*  Room for
 				 "event_msgs" + '\0' + bitvec  */
-	uint up = 0;
 	char buf[128], *ptr;
 	u32 dongle_align = drvr->bus_if->align;
 	u32 glom = 0;
@@ -853,9 +852,6 @@
 	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
 				  sizeof(iovbuf));
 
-	/* Force STA UP */
-	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
-
 	/* Setup event_msgs */
 	brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
 		      iovbuf, sizeof(iovbuf));
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 2a1e5ae..8933f9b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -799,6 +799,7 @@
 	struct brcmf_bus *bus_if = drvr->bus_if;
 	u32 toe_ol;
 	s32 ret = 0;
+	uint up = 0;
 
 	brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
 
@@ -822,6 +823,10 @@
 			drvr->iflist[ifp->idx]->ndev->features &=
 				~NETIF_F_IP_CSUM;
 	}
+
+	/* make sure RF is ready for work */
+	brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
+
 	/* Allow transmit calls */
 	netif_start_queue(ndev);
 	drvr->bus_if->drvr_up = true;
@@ -843,6 +848,63 @@
 	.ndo_set_rx_mode = brcmf_netdev_set_multicast_list
 };
 
+static int brcmf_net_attach(struct brcmf_if *ifp)
+{
+	struct brcmf_pub *drvr = ifp->drvr;
+	struct net_device *ndev;
+	u8 temp_addr[ETH_ALEN];
+
+	brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+
+	ndev = drvr->iflist[ifp->idx]->ndev;
+	ndev->netdev_ops = &brcmf_netdev_ops_pri;
+
+	/*
+	 * determine mac address to use
+	 */
+	if (is_valid_ether_addr(ifp->mac_addr))
+		memcpy(temp_addr, ifp->mac_addr, ETH_ALEN);
+	else
+		memcpy(temp_addr, drvr->mac, ETH_ALEN);
+
+	if (ifp->idx == 1) {
+		brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
+		/*  ACCESSPOINT INTERFACE CASE */
+		temp_addr[0] |= 0X02;	/* set bit 2 ,
+			 - Locally Administered address  */
+
+	}
+	ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
+	ndev->ethtool_ops = &brcmf_ethtool_ops;
+
+	drvr->rxsz = ndev->mtu + ndev->hard_header_len +
+			      drvr->hdrlen;
+
+	memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
+
+	/* attach to cfg80211 for primary interface */
+	if (!ifp->idx) {
+		drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
+		if (drvr->config == NULL) {
+			brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
+			goto fail;
+		}
+	}
+
+	if (register_netdev(ndev) != 0) {
+		brcmf_dbg(ERROR, "couldn't register the net device\n");
+		goto fail;
+	}
+
+	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+
+	return 0;
+
+fail:
+	ndev->netdev_ops = NULL;
+	return -EBADE;
+}
+
 int
 brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
 {
@@ -882,7 +944,7 @@
 	if (mac_addr != NULL)
 		memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
 
-	if (brcmf_net_attach(drvr, ifp->idx)) {
+	if (brcmf_net_attach(ifp)) {
 		brcmf_dbg(ERROR, "brcmf_net_attach failed");
 		free_netdev(ifp->ndev);
 		drvr->iflist[ifidx] = NULL;
@@ -1016,69 +1078,16 @@
 	if (ret < 0)
 		return ret;
 
+	/* add primary networking interface */
+	ret = brcmf_add_if(dev, 0, "wlan%d", drvr->mac);
+	if (ret < 0)
+		return ret;
+
 	/* signal bus ready */
 	bus_if->state = BRCMF_BUS_DATA;
 	return 0;
 }
 
-int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
-{
-	struct net_device *ndev;
-	u8 temp_addr[ETH_ALEN] = {
-		0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
-
-	brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
-
-	ndev = drvr->iflist[ifidx]->ndev;
-	ndev->netdev_ops = &brcmf_netdev_ops_pri;
-
-	/*
-	 * We have to use the primary MAC for virtual interfaces
-	 */
-	if (ifidx != 0) {
-		/* for virtual interfaces use the primary MAC  */
-		memcpy(temp_addr, drvr->mac, ETH_ALEN);
-
-	}
-
-	if (ifidx == 1) {
-		brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
-		/*  ACCESSPOINT INTERFACE CASE */
-		temp_addr[0] |= 0X02;	/* set bit 2 ,
-			 - Locally Administered address  */
-
-	}
-	ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
-	ndev->ethtool_ops = &brcmf_ethtool_ops;
-
-	drvr->rxsz = ndev->mtu + ndev->hard_header_len +
-			      drvr->hdrlen;
-
-	memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
-
-	/* attach to cfg80211 for primary interface */
-	if (!ifidx) {
-		drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
-		if (drvr->config == NULL) {
-			brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
-			goto fail;
-		}
-	}
-
-	if (register_netdev(ndev) != 0) {
-		brcmf_dbg(ERROR, "couldn't register the net device\n");
-		goto fail;
-	}
-
-	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
-
-	return 0;
-
-fail:
-	ndev->netdev_ops = NULL;
-	return -EBADE;
-}
-
 static void brcmf_bus_detach(struct brcmf_pub *drvr)
 {
 	brcmf_dbg(TRACE, "Enter\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 2bf5dda..149ee67 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -574,6 +574,8 @@
 
 	struct task_struct *dpc_tsk;
 	struct completion dpc_wait;
+	struct list_head dpc_tsklst;
+	spinlock_t dpc_tl_lock;
 
 	struct semaphore sdsem;
 
@@ -2350,6 +2352,24 @@
 	up(&bus->sdsem);
 }
 
+#ifdef CONFIG_BRCMFMAC_SDIO_OOB
+static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
+	if (!bus->sdiodev->irq_en && !bus->ipend) {
+		enable_irq(bus->sdiodev->irq);
+		bus->sdiodev->irq_en = true;
+	}
+	spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
+}
+#else
+static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
+{
+}
+#endif		/* CONFIG_BRCMFMAC_SDIO_OOB */
+
 static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 {
 	u32 intstatus, newstatus = 0;
@@ -2507,6 +2527,8 @@
 	bus->intstatus = intstatus;
 
 clkwait:
+	brcmf_sdbrcm_clrintr(bus);
+
 	if (data_ok(bus) && bus->ctrl_frame_stat &&
 		(bus->clkstate == CLK_AVAIL)) {
 		int ret, i;
@@ -2594,29 +2616,59 @@
 	return resched;
 }
 
+static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
+{
+	struct list_head *new_hd;
+	unsigned long flags;
+
+	if (in_interrupt())
+		new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
+	else
+		new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (new_hd == NULL)
+		return;
+
+	spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+	list_add_tail(new_hd, &bus->dpc_tsklst);
+	spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+}
+
 static int brcmf_sdbrcm_dpc_thread(void *data)
 {
 	struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
+	struct list_head *cur_hd, *tmp_hd;
+	unsigned long flags;
 
 	allow_signal(SIGTERM);
 	/* Run until signal received */
 	while (1) {
 		if (kthread_should_stop())
 			break;
-		if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
-			/* Call bus dpc unless it indicated down
-			(then clean stop) */
-			if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) {
-				if (brcmf_sdbrcm_dpc(bus))
-					complete(&bus->dpc_wait);
-			} else {
+
+		if (list_empty(&bus->dpc_tsklst))
+			if (wait_for_completion_interruptible(&bus->dpc_wait))
+				break;
+
+		spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+		list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
+			spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
+			if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
 				/* after stopping the bus, exit thread */
 				brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
 				bus->dpc_tsk = NULL;
+				spin_lock_irqsave(&bus->dpc_tl_lock, flags);
 				break;
 			}
-		} else
-			break;
+
+			if (brcmf_sdbrcm_dpc(bus))
+				brcmf_sdbrcm_adddpctsk(bus);
+
+			spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+			list_del(cur_hd);
+			kfree(cur_hd);
+		}
+		spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
 	}
 	return 0;
 }
@@ -2669,8 +2721,10 @@
 	/* Schedule DPC if needed to send queued packet(s) */
 	if (!bus->dpc_sched) {
 		bus->dpc_sched = true;
-		if (bus->dpc_tsk)
+		if (bus->dpc_tsk) {
+			brcmf_sdbrcm_adddpctsk(bus);
 			complete(&bus->dpc_wait);
+		}
 	}
 
 	return ret;
@@ -3474,8 +3528,14 @@
 	brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
 			       SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
 
+	if (ret == 0) {
+		ret = brcmf_sdio_intr_register(bus->sdiodev);
+		if (ret != 0)
+			brcmf_dbg(ERROR, "intr register failed:%d\n", ret);
+	}
+
 	/* If we didn't come up, turn off backplane clock */
-	if (!ret)
+	if (bus_if->state != BRCMF_BUS_DATA)
 		brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
 
 exit:
@@ -3514,8 +3574,10 @@
 		brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
 
 	bus->dpc_sched = true;
-	if (bus->dpc_tsk)
+	if (bus->dpc_tsk) {
+		brcmf_sdbrcm_adddpctsk(bus);
 		complete(&bus->dpc_wait);
+	}
 }
 
 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3559,8 +3621,10 @@
 				bus->ipend = true;
 
 				bus->dpc_sched = true;
-				if (bus->dpc_tsk)
+				if (bus->dpc_tsk) {
+					brcmf_sdbrcm_adddpctsk(bus);
 					complete(&bus->dpc_wait);
+				}
 			}
 		}
 
@@ -3829,7 +3893,7 @@
 
 	if (bus) {
 		/* De-register interrupt handler */
-		brcmf_sdcard_intr_dereg(bus->sdiodev);
+		brcmf_sdio_intr_unregister(bus->sdiodev);
 
 		if (bus->sdiodev->bus_if->drvr) {
 			brcmf_detach(bus->sdiodev->dev);
@@ -3897,6 +3961,8 @@
 	}
 	/* Initialize DPC thread */
 	init_completion(&bus->dpc_wait);
+	INIT_LIST_HEAD(&bus->dpc_tsklst);
+	spin_lock_init(&bus->dpc_tl_lock);
 	bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
 				   bus, "brcmf_dpc");
 	if (IS_ERR(bus->dpc_tsk)) {
@@ -3928,15 +3994,6 @@
 		goto fail;
 	}
 
-	/* Register interrupt callback, but mask it (not operational yet). */
-	brcmf_dbg(INTR, "disable SDIO interrupts (not interested yet)\n");
-	ret = brcmf_sdcard_intr_reg(bus->sdiodev);
-	if (ret != 0) {
-		brcmf_dbg(ERROR, "FAILED: sdcard_intr_reg returned %d\n", ret);
-		goto fail;
-	}
-	brcmf_dbg(INTR, "registered SDIO interrupt function ok\n");
-
 	brcmf_dbg(INFO, "completed!!\n");
 
 	/* if firmware path present try to download and bring up bus */
@@ -3948,12 +4005,6 @@
 		}
 	}
 
-	/* add interface and open for business */
-	if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) {
-		brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
-		goto fail;
-	}
-
 	return bus;
 
 fail:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 0281d20..7010eaf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -43,6 +43,13 @@
 /* as of sdiod rev 0, supports 3 functions */
 #define SBSDIO_NUM_FUNCTION		3
 
+/* function 0 vendor specific CCCR registers */
+#define SDIO_CCCR_BRCM_SEPINT		0xf2
+
+#define  SDIO_SEPINT_MASK		0x01
+#define  SDIO_SEPINT_OE			0x02
+#define  SDIO_SEPINT_ACT_HI		0x04
+
 /* function 1 miscellaneous registers */
 
 /* sprom command and status */
@@ -144,13 +151,18 @@
 	wait_queue_head_t request_buffer_wait;
 	struct device *dev;
 	struct brcmf_bus *bus_if;
+#ifdef CONFIG_BRCMFMAC_SDIO_OOB
+	unsigned int irq;		/* oob interrupt number */
+	unsigned long irq_flags;	/* board specific oob flags */
+	bool irq_en;			/* irq enable flags */
+	spinlock_t irq_en_lock;
+	bool irq_wake;			/* irq wake enable flags */
+#endif		/* CONFIG_BRCMFMAC_SDIO_OOB */
 };
 
-/* Register/deregister device interrupt handler. */
-extern int
-brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev);
-
-extern int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev);
+/* Register/deregister interrupt handler. */
+extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
+extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
 /* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
  *   fn:   function number
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 8236422..1d67ecf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1383,14 +1383,6 @@
 		goto fail;
 	}
 
-	/* add interface and open for business */
-	ret = brcmf_add_if(dev, 0, "wlan%d", NULL);
-	if (ret) {
-		brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
-		brcmf_detach(dev);
-		goto fail;
-	}
-
 	return 0;
 fail:
 	/* Release resources in reverse order */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 55e9f45..0efe88e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -628,6 +628,40 @@
 	return false;
 }
 
+/*
+ * Indicates whether the country provided is valid to pass
+ * to cfg80211 or not.
+ *
+ * returns true if valid; false if not.
+ */
+static bool brcms_c_country_valid(const char *ccode)
+{
+	/*
+	 * only allow ascii alpha uppercase for the first 2
+	 * chars.
+	 */
+	if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A &&
+	      (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A &&
+	      ccode[2] == '\0'))
+		return false;
+
+	/*
+	 * do not match ISO 3166-1 user assigned country codes
+	 * that may be in the driver table
+	 */
+	if (!strcmp("AA", ccode) ||        /* AA */
+	    !strcmp("ZZ", ccode) ||        /* ZZ */
+	    ccode[0] == 'X' ||             /* XA - XZ */
+	    (ccode[0] == 'Q' &&            /* QM - QZ */
+	     (ccode[1] >= 'M' && ccode[1] <= 'Z')))
+		return false;
+
+	if (!strcmp("NA", ccode))
+		return false;
+
+	return true;
+}
+
 /* Lookup a country info structure from a null terminated country
  * abbreviation and regrev directly with no translation.
  */
@@ -1089,7 +1123,7 @@
 
 	/* store the country code for passing up as a regulatory hint */
 	ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE);
-	if (ccode)
+	if (ccode && brcms_c_country_valid(ccode))
 		strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
 
 	/*
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index 1948cb2..3f659e09 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -733,7 +733,7 @@
 	do { \
 		plcp[1] = len & 0xff; \
 		plcp[2] = ((len >> 8) & 0xff); \
-	} while (0);
+	} while (0)
 
 #define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
 #define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 569ab8a..aa15558 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1069,11 +1069,7 @@
 		wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
 			  "%d\n", __func__, err);
 
-	if (wl->pub->srom_ccode[0])
-		err = brcms_set_hint(wl, wl->pub->srom_ccode);
-	else
-		err = brcms_set_hint(wl, "US");
-	if (err)
+	if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode))
 		wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
 			  __func__, err);
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 7083db7..b4d9279 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -847,8 +847,7 @@
 	 */
 	if (!(txs->status & TX_STATUS_AMPDU)
 	    && (txs->status & TX_STATUS_INTERMEDIATE)) {
-		wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
-			  __func__);
+		BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
 		return false;
 	}
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index ce8562a..0fce56235 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -207,8 +207,7 @@
 };
 
 static const u16 iqcal_gainparams_numgains_lcnphy[1] = {
-	sizeof(tbl_iqcal_gainparams_lcnphy_2G) /
-	sizeof(*tbl_iqcal_gainparams_lcnphy_2G),
+	ARRAY_SIZE(tbl_iqcal_gainparams_lcnphy_2G),
 };
 
 static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 3909574..812b6e3 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -16353,11 +16353,7 @@
 			wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
 					       rfseq_rx2tx_events_rev3_ipa,
 					       rfseq_rx2tx_dlys_rev3_ipa,
-					       sizeof
-					       (rfseq_rx2tx_events_rev3_ipa) /
-					       sizeof
-					       (rfseq_rx2tx_events_rev3_ipa
-						[0]));
+					       ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa));
 
 		mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14));
 		mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14));
@@ -16858,18 +16854,13 @@
 		wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX,
 				       rfseq_tx2rx_events_rev3,
 				       rfseq_tx2rx_dlys_rev3,
-				       sizeof(rfseq_tx2rx_events_rev3) /
-				       sizeof(rfseq_tx2rx_events_rev3[0]));
+				       ARRAY_SIZE(rfseq_tx2rx_events_rev3));
 
 		if (PHY_IPA(pi))
 			wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
 					       rfseq_rx2tx_events_rev3_ipa,
 					       rfseq_rx2tx_dlys_rev3_ipa,
-					       sizeof
-					       (rfseq_rx2tx_events_rev3_ipa) /
-					       sizeof
-					       (rfseq_rx2tx_events_rev3_ipa
-						[0]));
+					       ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa));
 
 		if ((pi->sh->hw_phyrxchain != 0x3) &&
 		    (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) {
@@ -16885,8 +16876,7 @@
 				pi, NPHY_RFSEQ_RX2TX,
 				rfseq_rx2tx_events_rev3,
 				rfseq_rx2tx_dlys_rev3,
-				sizeof(rfseq_rx2tx_events_rev3)	/
-				sizeof(rfseq_rx2tx_events_rev3[0]));
+				ARRAY_SIZE(rfseq_rx2tx_events_rev3));
 		}
 
 		if (CHSPEC_IS2G(pi->radio_chanspec))
@@ -17209,13 +17199,11 @@
 
 		wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events,
 				       rfseq_rx2tx_dlys,
-				       sizeof(rfseq_rx2tx_events) /
-				       sizeof(rfseq_rx2tx_events[0]));
+				       ARRAY_SIZE(rfseq_rx2tx_events));
 
 		wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events,
 				       rfseq_tx2rx_dlys,
-				       sizeof(rfseq_tx2rx_events) /
-				       sizeof(rfseq_tx2rx_events[0]));
+				       ARRAY_SIZE(rfseq_tx2rx_events));
 
 		wlc_phy_workarounds_nphy_gainctrl(pi);
 
@@ -19357,8 +19345,7 @@
 			}
 
 			if (isAdjustNoiseVar) {
-				numTonesAdjust = sizeof(nphy_adj_tone_id_buf) /
-						sizeof(nphy_adj_tone_id_buf[0]);
+				numTonesAdjust = ARRAY_SIZE(nphy_adj_tone_id_buf);
 
 				wlc_phy_adjust_min_noisevar_nphy(
 					pi,
@@ -25204,32 +25191,26 @@
 
 				phy_a15 = pad_gain_codes_used_2057rev5;
 				phy_a13 =
-					sizeof(pad_gain_codes_used_2057rev5) /
-					sizeof(pad_gain_codes_used_2057rev5
-						[0]) - 1;
+					ARRAY_SIZE(pad_gain_codes_used_2057rev5) - 1;
 
 			} else if ((pi->pubpi.radiorev == 7)
 				   || (pi->pubpi.radiorev == 8)) {
 
 				phy_a15 = pad_gain_codes_used_2057rev7;
 				phy_a13 =
-					sizeof(pad_gain_codes_used_2057rev7) /
-					sizeof(pad_gain_codes_used_2057rev7
-						[0]) - 1;
+					ARRAY_SIZE(pad_gain_codes_used_2057rev7) - 1;
 
 			} else {
 
 				phy_a15 = pad_all_gain_codes_2057;
-				phy_a13 = sizeof(pad_all_gain_codes_2057) /
-					  sizeof(pad_all_gain_codes_2057[0]) -
+				phy_a13 = ARRAY_SIZE(pad_all_gain_codes_2057) -
 					  1;
 			}
 
 		} else {
 
 			phy_a15 = pga_all_gain_codes_2057;
-			phy_a13 = sizeof(pga_all_gain_codes_2057) /
-				  sizeof(pga_all_gain_codes_2057[0]) - 1;
+			phy_a13 = ARRAY_SIZE(pga_all_gain_codes_2057) - 1;
 		}
 
 		phy_a14 = 0;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 5fb17d5..333193f 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -17,17 +17,7 @@
 #ifndef	_BRCM_HW_IDS_H_
 #define	_BRCM_HW_IDS_H_
 
-#define	BCM4325_D11DUAL_ID	0x431b
-#define	BCM4325_D11G_ID		0x431c
-#define	BCM4325_D11A_ID		0x431d
-
-#define BCM4329_D11N2G_ID	0x432f	/* 4329 802.11n 2.4G device */
-#define BCM4329_D11N5G_ID	0x4330	/* 4329 802.11n 5G device */
-#define BCM4329_D11NDUAL_ID	0x432e
-
-#define BCM4319_D11N_ID		0x4337	/* 4319 802.11n dualband device */
-#define BCM4319_D11N2G_ID	0x4338	/* 4319 802.11n 2.4G device */
-#define BCM4319_D11N5G_ID	0x4339	/* 4319 802.11n 5G device */
+#define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
 
 #define BCM43224_D11N_ID	0x4353	/* 43224 802.11n dualband device */
 #define BCM43224_D11N_ID_VEN1	0x0576	/* Vendor specific 43224 802.11n db */
@@ -37,23 +27,15 @@
 #define BCM43236_D11N_ID	0x4346	/* 43236 802.11n dualband device */
 #define BCM43236_D11N2G_ID	0x4347	/* 43236 802.11n 2.4GHz device */
 
-#define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
-
-/* Chip IDs */
-#define BCM4313_CHIP_ID		0x4313	/* 4313 chip id */
-#define	BCM4319_CHIP_ID		0x4319	/* 4319 chip id */
-
-#define	BCM43224_CHIP_ID	43224	/* 43224 chipcommon chipid */
-#define	BCM43225_CHIP_ID	43225	/* 43225 chipcommon chipid */
-#define	BCM43421_CHIP_ID	43421	/* 43421 chipcommon chipid */
-#define	BCM43235_CHIP_ID	43235	/* 43235 chipcommon chipid */
-#define	BCM43236_CHIP_ID	43236	/* 43236 chipcommon chipid */
-#define	BCM43238_CHIP_ID	43238	/* 43238 chipcommon chipid */
-#define	BCM4329_CHIP_ID		0x4329	/* 4329 chipcommon chipid */
-#define	BCM4325_CHIP_ID		0x4325	/* 4325 chipcommon chipid */
-#define	BCM4331_CHIP_ID		0x4331	/* 4331 chipcommon chipid */
-#define BCM4336_CHIP_ID		0x4336	/* 4336 chipcommon chipid */
-#define BCM4330_CHIP_ID		0x4330	/* 4330 chipcommon chipid */
-#define BCM6362_CHIP_ID		0x6362	/* 6362 chipcommon chipid */
+/* Chipcommon Core Chip IDs */
+#define BCM4313_CHIP_ID		0x4313
+#define BCM43224_CHIP_ID	43224
+#define BCM43225_CHIP_ID	43225
+#define BCM43235_CHIP_ID	43235
+#define BCM43236_CHIP_ID	43236
+#define BCM43238_CHIP_ID	43238
+#define BCM4329_CHIP_ID		0x4329
+#define BCM4330_CHIP_ID		0x4330
+#define BCM4331_CHIP_ID		0x4331
 
 #endif				/* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index bfa0d54..627bc12 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -244,8 +244,7 @@
 	unsigned long flags;
 	struct hostap_tx_callback_info *entry;
 
-	entry = kmalloc(sizeof(*entry),
-							   GFP_ATOMIC);
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
 	if (entry == NULL)
 		return 0;
 
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 972a9c3..05ca340 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -457,18 +457,4 @@
 #endif /* CONFIG_PM */
 };
 
-
-static int __init init_prism2_pci(void)
-{
-	return pci_register_driver(&prism2_pci_driver);
-}
-
-
-static void __exit exit_prism2_pci(void)
-{
-	pci_unregister_driver(&prism2_pci_driver);
-}
-
-
-module_init(init_prism2_pci);
-module_exit(exit_prism2_pci);
+module_pci_driver(prism2_pci_driver);
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 33e7903..c3d067e 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -616,18 +616,4 @@
 	.remove		= prism2_plx_remove,
 };
 
-
-static int __init init_prism2_plx(void)
-{
-	return pci_register_driver(&prism2_plx_driver);
-}
-
-
-static void __exit exit_prism2_plx(void)
-{
-	pci_unregister_driver(&prism2_plx_driver);
-}
-
-
-module_init(init_prism2_plx);
-module_exit(exit_prism2_plx);
+module_pci_driver(prism2_plx_driver);
diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h
new file mode 100644
index 0000000..4007bf5
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/ipw.h
@@ -0,0 +1,23 @@
+/*
+ * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
+ *
+ * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __IPW_H__
+#define __IPW_H__
+
+#include <linux/ieee80211.h>
+
+static const u32 ipw_cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+};
+
+#endif
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index f0551f8..9cfae0c 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -166,6 +166,7 @@
 #include <net/lib80211.h>
 
 #include "ipw2100.h"
+#include "ipw.h"
 
 #define IPW2100_VERSION "git-1.2.2"
 
@@ -343,38 +344,50 @@
 
 static inline void read_register(struct net_device *dev, u32 reg, u32 * val)
 {
-	*val = readl((void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread32(priv->ioaddr + reg);
 	IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
 }
 
 static inline void write_register(struct net_device *dev, u32 reg, u32 val)
 {
-	writel(val, (void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite32(val, priv->ioaddr + reg);
 	IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
 }
 
 static inline void read_register_word(struct net_device *dev, u32 reg,
 				      u16 * val)
 {
-	*val = readw((void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread16(priv->ioaddr + reg);
 	IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
 }
 
 static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val)
 {
-	*val = readb((void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread8(priv->ioaddr + reg);
 	IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
 }
 
 static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
 {
-	writew(val, (void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite16(val, priv->ioaddr + reg);
 	IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
 }
 
 static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
 {
-	writeb(val, (void __iomem *)(dev->base_addr + reg));
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite8(val, priv->ioaddr + reg);
 	IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
 }
 
@@ -506,13 +519,13 @@
 		read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
 }
 
-static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev)
+static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev)
 {
-	return (dev->base_addr &&
-		(readl
-		 ((void __iomem *)(dev->base_addr +
-				   IPW_REG_DOA_DEBUG_AREA_START))
-		 == IPW_DATA_DOA_DEBUG_VALUE));
+	u32 dbg;
+
+	read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg);
+
+	return dbg == IPW_DATA_DOA_DEBUG_VALUE;
 }
 
 static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
@@ -1946,11 +1959,12 @@
 		wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
 	}
 
+	wdev->wiphy->cipher_suites = ipw_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
+
 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
-	if (wiphy_register(wdev->wiphy)) {
-		ipw2100_down(priv);
+	if (wiphy_register(wdev->wiphy))
 		return -EIO;
-	}
 	return 0;
 }
 
@@ -3773,7 +3787,7 @@
 	    IPW2100_ORD(COUNTRY_CODE,
 				"IEEE country code as recv'd from beacon"),
 	    IPW2100_ORD(COUNTRY_CHANNELS,
-				"channels suported by country"),
+				"channels supported by country"),
 	    IPW2100_ORD(RESET_CNT, "adapter resets (warm)"),
 	    IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"),
 	    IPW2100_ORD(ANTENNA_DIVERSITY,
@@ -4062,7 +4076,7 @@
 	ipw2100_firmware.version = 0;
 #endif
 
-	printk(KERN_INFO "%s: Reseting on mode change.\n", priv->net_dev->name);
+	printk(KERN_INFO "%s: Resetting on mode change.\n", priv->net_dev->name);
 	priv->reset_backoff = 0;
 	schedule_reset(priv);
 
@@ -6082,9 +6096,7 @@
 /* Look into using netdev destructor to shutdown libipw? */
 
 static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
-					       void __iomem * base_addr,
-					       unsigned long mem_start,
-					       unsigned long mem_len)
+					       void __iomem * ioaddr)
 {
 	struct ipw2100_priv *priv;
 	struct net_device *dev;
@@ -6096,6 +6108,7 @@
 	priv->ieee = netdev_priv(dev);
 	priv->pci_dev = pci_dev;
 	priv->net_dev = dev;
+	priv->ioaddr = ioaddr;
 
 	priv->ieee->hard_start_xmit = ipw2100_tx;
 	priv->ieee->set_security = shim__set_security;
@@ -6111,10 +6124,6 @@
 	dev->watchdog_timeo = 3 * HZ;
 	dev->irq = 0;
 
-	dev->base_addr = (unsigned long)base_addr;
-	dev->mem_start = mem_start;
-	dev->mem_end = dev->mem_start + mem_len - 1;
-
 	/* NOTE: We don't use the wireless_handlers hook
 	 * in dev as the system will start throwing WX requests
 	 * to us before we're actually initialized and it just
@@ -6215,8 +6224,7 @@
 static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
 				const struct pci_device_id *ent)
 {
-	unsigned long mem_start, mem_len, mem_flags;
-	void __iomem *base_addr = NULL;
+	void __iomem *ioaddr;
 	struct net_device *dev = NULL;
 	struct ipw2100_priv *priv = NULL;
 	int err = 0;
@@ -6225,18 +6233,14 @@
 
 	IPW_DEBUG_INFO("enter\n");
 
-	mem_start = pci_resource_start(pci_dev, 0);
-	mem_len = pci_resource_len(pci_dev, 0);
-	mem_flags = pci_resource_flags(pci_dev, 0);
-
-	if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
+	if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) {
 		IPW_DEBUG_INFO("weird - resource type is not memory\n");
 		err = -ENODEV;
-		goto fail;
+		goto out;
 	}
 
-	base_addr = ioremap_nocache(mem_start, mem_len);
-	if (!base_addr) {
+	ioaddr = pci_iomap(pci_dev, 0, 0);
+	if (!ioaddr) {
 		printk(KERN_WARNING DRV_NAME
 		       "Error calling ioremap_nocache.\n");
 		err = -EIO;
@@ -6244,7 +6248,7 @@
 	}
 
 	/* allocate and initialize our net_device */
-	dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len);
+	dev = ipw2100_alloc_device(pci_dev, ioaddr);
 	if (!dev) {
 		printk(KERN_WARNING DRV_NAME
 		       "Error calling ipw2100_alloc_device.\n");
@@ -6325,6 +6329,11 @@
 	printk(KERN_INFO DRV_NAME
 	       ": Detected Intel PRO/Wireless 2100 Network Connection\n");
 
+	err = ipw2100_wdev_init(dev);
+	if (err)
+		goto fail;
+	registered = 1;
+
 	/* Bring up the interface.  Pre 0.46, after we registered the
 	 * network device we would call ipw2100_up.  This introduced a race
 	 * condition with newer hotplug configurations (network was coming
@@ -6341,11 +6350,7 @@
 		       "Error calling register_netdev.\n");
 		goto fail;
 	}
-	registered = 1;
-
-	err = ipw2100_wdev_init(dev);
-	if (err)
-		goto fail;
+	registered = 2;
 
 	mutex_lock(&priv->action_mutex);
 
@@ -6379,18 +6384,21 @@
 	priv->status |= STATUS_INITIALIZED;
 
 	mutex_unlock(&priv->action_mutex);
-
-	return 0;
+out:
+	return err;
 
       fail_unlock:
 	mutex_unlock(&priv->action_mutex);
-	wiphy_unregister(priv->ieee->wdev.wiphy);
-	kfree(priv->ieee->bg_band.channels);
       fail:
 	if (dev) {
-		if (registered)
+		if (registered >= 2)
 			unregister_netdev(dev);
 
+		if (registered) {
+			wiphy_unregister(priv->ieee->wdev.wiphy);
+			kfree(priv->ieee->bg_band.channels);
+		}
+
 		ipw2100_hw_stop_adapter(priv);
 
 		ipw2100_disable_interrupts(priv);
@@ -6409,63 +6417,56 @@
 		pci_set_drvdata(pci_dev, NULL);
 	}
 
-	if (base_addr)
-		iounmap(base_addr);
+	pci_iounmap(pci_dev, ioaddr);
 
 	pci_release_regions(pci_dev);
 	pci_disable_device(pci_dev);
-
-	return err;
+	goto out;
 }
 
 static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
 {
 	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
-	struct net_device *dev;
+	struct net_device *dev = priv->net_dev;
 
-	if (priv) {
-		mutex_lock(&priv->action_mutex);
+	mutex_lock(&priv->action_mutex);
 
-		priv->status &= ~STATUS_INITIALIZED;
+	priv->status &= ~STATUS_INITIALIZED;
 
-		dev = priv->net_dev;
-		sysfs_remove_group(&pci_dev->dev.kobj,
-				   &ipw2100_attribute_group);
+	sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
 
 #ifdef CONFIG_PM
-		if (ipw2100_firmware.version)
-			ipw2100_release_firmware(priv, &ipw2100_firmware);
+	if (ipw2100_firmware.version)
+		ipw2100_release_firmware(priv, &ipw2100_firmware);
 #endif
-		/* Take down the hardware */
-		ipw2100_down(priv);
+	/* Take down the hardware */
+	ipw2100_down(priv);
 
-		/* Release the mutex so that the network subsystem can
-		 * complete any needed calls into the driver... */
-		mutex_unlock(&priv->action_mutex);
+	/* Release the mutex so that the network subsystem can
+	 * complete any needed calls into the driver... */
+	mutex_unlock(&priv->action_mutex);
 
-		/* Unregister the device first - this results in close()
-		 * being called if the device is open.  If we free storage
-		 * first, then close() will crash. */
-		unregister_netdev(dev);
+	/* Unregister the device first - this results in close()
+	 * being called if the device is open.  If we free storage
+	 * first, then close() will crash.
+	 * FIXME: remove the comment above. */
+	unregister_netdev(dev);
 
-		ipw2100_kill_works(priv);
+	ipw2100_kill_works(priv);
 
-		ipw2100_queues_free(priv);
+	ipw2100_queues_free(priv);
 
-		/* Free potential debugging firmware snapshot */
-		ipw2100_snapshot_free(priv);
+	/* Free potential debugging firmware snapshot */
+	ipw2100_snapshot_free(priv);
 
-		if (dev->irq)
-			free_irq(dev->irq, priv);
+	free_irq(dev->irq, priv);
 
-		if (dev->base_addr)
-			iounmap((void __iomem *)dev->base_addr);
+	pci_iounmap(pci_dev, priv->ioaddr);
 
-		/* wiphy_unregister needs to be here, before free_libipw */
-		wiphy_unregister(priv->ieee->wdev.wiphy);
-		kfree(priv->ieee->bg_band.channels);
-		free_libipw(dev, 0);
-	}
+	/* wiphy_unregister needs to be here, before free_libipw */
+	wiphy_unregister(priv->ieee->wdev.wiphy);
+	kfree(priv->ieee->bg_band.channels);
+	free_libipw(dev, 0);
 
 	pci_release_regions(pci_dev);
 	pci_disable_device(pci_dev);
@@ -8508,8 +8509,7 @@
 				     struct ipw2100_fw *fw)
 {
 	fw->version = 0;
-	if (fw->fw_entry)
-		release_firmware(fw->fw_entry);
+	release_firmware(fw->fw_entry);
 	fw->fw_entry = NULL;
 }
 
@@ -8609,7 +8609,7 @@
 	struct net_device *dev = priv->net_dev;
 	const unsigned char *microcode_data = fw->uc.data;
 	unsigned int microcode_data_left = fw->uc.size;
-	void __iomem *reg = (void __iomem *)dev->base_addr;
+	void __iomem *reg = priv->ioaddr;
 
 	struct symbol_alive_response response;
 	int i, j;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 99cba96..9731252 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -135,15 +135,6 @@
 	IPW_HW_STATE_ENABLED = 0
 };
 
-struct ssid_context {
-	char ssid[IW_ESSID_MAX_SIZE + 1];
-	int ssid_len;
-	unsigned char bssid[ETH_ALEN];
-	int port_type;
-	int channel;
-
-};
-
 extern const char *port_type_str[];
 extern const char *band_str[];
 
@@ -488,6 +479,7 @@
 #define CAP_PRIVACY_ON          (1<<1)	/* Off = No privacy */
 
 struct ipw2100_priv {
+	void __iomem *ioaddr;
 
 	int stop_hang_check;	/* Set 1 when shutting down to kill hang_check */
 	int stop_rf_kill;	/* Set 1 when shutting down to kill rf_kill */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 2b02257..0036737 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <net/cfg80211-wext.h>
 #include "ipw2200.h"
+#include "ipw.h"
 
 
 #ifndef KBUILD_EXTMOD
@@ -2191,6 +2192,7 @@
 {
 	int rc = 0;
 	unsigned long flags;
+	unsigned long now, end;
 
 	spin_lock_irqsave(&priv->lock, flags);
 	if (priv->status & STATUS_HCMD_ACTIVE) {
@@ -2232,10 +2234,20 @@
 	}
 	spin_unlock_irqrestore(&priv->lock, flags);
 
+	now = jiffies;
+	end = now + HOST_COMPLETE_TIMEOUT;
+again:
 	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
 					      !(priv->
 						status & STATUS_HCMD_ACTIVE),
-					      HOST_COMPLETE_TIMEOUT);
+					      end - now);
+	if (rc < 0) {
+		now = jiffies;
+		if (time_before(now, end))
+			goto again;
+		rc = 0;
+	}
+
 	if (rc == 0) {
 		spin_lock_irqsave(&priv->lock, flags);
 		if (priv->status & STATUS_HCMD_ACTIVE) {
@@ -3657,8 +3669,7 @@
 		priv->rxq = NULL;
 	}
 	ipw_tx_queue_free(priv);
-	if (raw)
-		release_firmware(raw);
+	release_firmware(raw);
 #ifdef CONFIG_PM
 	fw_loaded = 0;
 	raw = NULL;
@@ -7024,7 +7035,7 @@
 			    cpu_to_le16(burst_duration);
 	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
 		if (type == IEEE_B) {
-			IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
+			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
 				      type);
 			if (priv->qos_data.qos_enable == 0)
 				active_one = &def_parameters_CCK;
@@ -11432,20 +11443,6 @@
 	mutex_unlock(&priv->mutex);
 }
 
-/* Called by register_netdev() */
-static int ipw_net_init(struct net_device *dev)
-{
-	int rc = 0;
-	struct ipw_priv *priv = libipw_priv(dev);
-
-	mutex_lock(&priv->mutex);
-	if (ipw_up(priv))
-		rc = -EIO;
-	mutex_unlock(&priv->mutex);
-
-	return rc;
-}
-
 static int ipw_wdev_init(struct net_device *dev)
 {
 	int i, rc = 0;
@@ -11533,6 +11530,9 @@
 		wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
 	}
 
+	wdev->wiphy->cipher_suites = ipw_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
+
 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
 
 	/* With that information in place, we can now register the wiphy... */
@@ -11711,7 +11711,6 @@
 #endif
 
 static const struct net_device_ops ipw_netdev_ops = {
-	.ndo_init		= ipw_net_init,
 	.ndo_open		= ipw_net_open,
 	.ndo_stop		= ipw_net_stop,
 	.ndo_set_rx_mode	= ipw_net_set_multicast_list,
@@ -11826,10 +11825,6 @@
 	net_dev->wireless_data = &priv->wireless_data;
 	net_dev->wireless_handlers = &ipw_wx_handler_def;
 	net_dev->ethtool_ops = &ipw_ethtool_ops;
-	net_dev->irq = pdev->irq;
-	net_dev->base_addr = (unsigned long)priv->hw_base;
-	net_dev->mem_start = pci_resource_start(pdev, 0);
-	net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
 
 	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
 	if (err) {
@@ -11838,17 +11833,24 @@
 		goto out_release_irq;
 	}
 
-	mutex_unlock(&priv->mutex);
-	err = register_netdev(net_dev);
-	if (err) {
-		IPW_ERROR("failed to register network device\n");
+	if (ipw_up(priv)) {
+		mutex_unlock(&priv->mutex);
+		err = -EIO;
 		goto out_remove_sysfs;
 	}
 
+	mutex_unlock(&priv->mutex);
+
 	err = ipw_wdev_init(net_dev);
 	if (err) {
 		IPW_ERROR("failed to register wireless device\n");
-		goto out_unregister_netdev;
+		goto out_remove_sysfs;
+	}
+
+	err = register_netdev(net_dev);
+	if (err) {
+		IPW_ERROR("failed to register network device\n");
+		goto out_unregister_wiphy;
 	}
 
 #ifdef CONFIG_IPW2200_PROMISCUOUS
@@ -11857,10 +11859,8 @@
 		if (err) {
 			IPW_ERROR("Failed to register promiscuous network "
 				  "device (error %d).\n", err);
-			wiphy_unregister(priv->ieee->wdev.wiphy);
-			kfree(priv->ieee->a_band.channels);
-			kfree(priv->ieee->bg_band.channels);
-			goto out_unregister_netdev;
+			unregister_netdev(priv->net_dev);
+			goto out_unregister_wiphy;
 		}
 	}
 #endif
@@ -11872,8 +11872,10 @@
 
 	return 0;
 
-      out_unregister_netdev:
-	unregister_netdev(priv->net_dev);
+      out_unregister_wiphy:
+	wiphy_unregister(priv->ieee->wdev.wiphy);
+	kfree(priv->ieee->a_band.channels);
+	kfree(priv->ieee->bg_band.channels);
       out_remove_sysfs:
 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
       out_release_irq:
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 8874588..0b22fb4 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -584,61 +584,6 @@
 
 /*******************************************************/
 
-enum {				/* libipw_basic_report.map */
-	LIBIPW_BASIC_MAP_BSS = (1 << 0),
-	LIBIPW_BASIC_MAP_OFDM = (1 << 1),
-	LIBIPW_BASIC_MAP_UNIDENTIFIED = (1 << 2),
-	LIBIPW_BASIC_MAP_RADAR = (1 << 3),
-	LIBIPW_BASIC_MAP_UNMEASURED = (1 << 4),
-	/* Bits 5-7 are reserved */
-
-};
-struct libipw_basic_report {
-	u8 channel;
-	__le64 start_time;
-	__le16 duration;
-	u8 map;
-} __packed;
-
-enum {				/* libipw_measurement_request.mode */
-	/* Bit 0 is reserved */
-	LIBIPW_MEASUREMENT_ENABLE = (1 << 1),
-	LIBIPW_MEASUREMENT_REQUEST = (1 << 2),
-	LIBIPW_MEASUREMENT_REPORT = (1 << 3),
-	/* Bits 4-7 are reserved */
-};
-
-enum {
-	LIBIPW_REPORT_BASIC = 0,	/* required */
-	LIBIPW_REPORT_CCA = 1,	/* optional */
-	LIBIPW_REPORT_RPI = 2,	/* optional */
-	/* 3-255 reserved */
-};
-
-struct libipw_measurement_params {
-	u8 channel;
-	__le64 start_time;
-	__le16 duration;
-} __packed;
-
-struct libipw_measurement_request {
-	struct libipw_info_element ie;
-	u8 token;
-	u8 mode;
-	u8 type;
-	struct libipw_measurement_params params[0];
-} __packed;
-
-struct libipw_measurement_report {
-	struct libipw_info_element ie;
-	u8 token;
-	u8 mode;
-	u8 type;
-	union {
-		struct libipw_basic_report basic[0];
-	} u;
-} __packed;
-
 struct libipw_tpc_report {
 	u8 transmit_power;
 	u8 link_margin;
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index c4955d2..02e0579 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -77,8 +77,8 @@
 
 		if (entry->skb != NULL && entry->seq == seq &&
 		    (entry->last_frag + 1 == frag || frag == -1) &&
-		    !compare_ether_addr(entry->src_addr, src) &&
-		    !compare_ether_addr(entry->dst_addr, dst))
+		    ether_addr_equal(entry->src_addr, src) &&
+		    ether_addr_equal(entry->dst_addr, dst))
 			return entry;
 	}
 
@@ -245,12 +245,12 @@
 	/* check that the frame is unicast frame to us */
 	if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
 	    IEEE80211_FCTL_TODS &&
-	    !compare_ether_addr(hdr->addr1, dev->dev_addr) &&
-	    !compare_ether_addr(hdr->addr3, dev->dev_addr)) {
+	    ether_addr_equal(hdr->addr1, dev->dev_addr) &&
+	    ether_addr_equal(hdr->addr3, dev->dev_addr)) {
 		/* ToDS frame with own addr BSSID and DA */
 	} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
 		   IEEE80211_FCTL_FROMDS &&
-		   !compare_ether_addr(hdr->addr1, dev->dev_addr)) {
+		   ether_addr_equal(hdr->addr1, dev->dev_addr)) {
 		/* FromDS frame with own addr as DA */
 	} else
 		return 0;
@@ -523,8 +523,8 @@
 
 	if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
 	    (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
-	    IEEE80211_FCTL_FROMDS && ieee->stadev
-	    && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) {
+	    IEEE80211_FCTL_FROMDS && ieee->stadev &&
+	    ether_addr_equal(hdr->addr2, ieee->assoc_ap_addr)) {
 		/* Frame from BSSID of the AP for which we are a client */
 		skb->dev = dev = ieee->stadev;
 		stats = hostap_get_stats(dev);
@@ -1468,7 +1468,7 @@
 	 * as one network */
 	return ((src->ssid_len == dst->ssid_len) &&
 		(src->channel == dst->channel) &&
-		!compare_ether_addr(src->bssid, dst->bssid) &&
+		ether_addr_equal(src->bssid, dst->bssid) &&
 		!memcmp(src->ssid, dst->ssid, src->ssid_len));
 }
 
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index b25c01b..87e5398 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -453,10 +453,10 @@
 	switch (il->iw_mode) {
 	case NL80211_IFTYPE_ADHOC:	/* Header: Dest. | Source    | BSSID */
 		/* packets to our IBSS update information */
-		return !compare_ether_addr(header->addr3, il->bssid);
+		return ether_addr_equal(header->addr3, il->bssid);
 	case NL80211_IFTYPE_STATION:	/* Header: Dest. | AP{BSSID} | Source */
 		/* packets to our IBSS update information */
-		return !compare_ether_addr(header->addr2, il->bssid);
+		return ether_addr_equal(header->addr2, il->bssid);
 	default:
 		return 1;
 	}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index c46275a..509301a 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -2565,7 +2565,7 @@
 	spin_lock_irqsave(&il->sta_lock, flags);
 	for (i = start; i < il->hw_params.max_stations; i++)
 		if (il->stations[i].used &&
-		    (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
+		    ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
 			ret = i;
 			goto out;
 		}
@@ -2850,9 +2850,9 @@
 il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
 			    struct ieee80211_tx_info *info)
 {
-	struct ieee80211_tx_rate *r = &info->control.rates[0];
+	struct ieee80211_tx_rate *r = &info->status.rates[0];
 
-	info->antenna_sel_tx =
+	info->status.antenna =
 	    ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
 	if (rate_n_flags & RATE_MCS_HT_MSK)
 		r->flags |= IEEE80211_TX_RC_MCS;
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 11ab124..f3b8e91 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -873,7 +873,7 @@
 	    tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
 	    tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
 	    tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
-	    tbl_type.ant_type != info->antenna_sel_tx ||
+	    tbl_type.ant_type != info->status.antenna ||
 	    !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
 	    || !!(tx_rate & RATE_MCS_GF_MSK) !=
 	    !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index eaf24945..cbf2dc1 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1896,8 +1896,8 @@
 		sta_id = il->hw_params.bcast_id;
 	else
 		for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
-			if (!compare_ether_addr
-			    (il->stations[i].sta.sta.addr, addr)) {
+			if (ether_addr_equal(il->stations[i].sta.sta.addr,
+					     addr)) {
 				sta_id = i;
 				break;
 			}
@@ -1926,7 +1926,7 @@
 
 	if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
 	    (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
-	    !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+	    ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
 		D_ASSOC("STA %d (%pM) already added, not adding again.\n",
 			sta_id, addr);
 		return sta_id;
@@ -3744,10 +3744,10 @@
 
 	/* These items are only settable from the full RXON command */
 	CHK(!il_is_associated(il));
-	CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
-	CHK(compare_ether_addr(staging->node_addr, active->node_addr));
-	CHK(compare_ether_addr
-	    (staging->wlap_bssid_addr, active->wlap_bssid_addr));
+	CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
+	CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
+	CHK(!ether_addr_equal(staging->wlap_bssid_addr,
+			      active->wlap_bssid_addr));
 	CHK_NEQ(staging->dev_type, active->dev_type);
 	CHK_NEQ(staging->channel, active->channel);
 	CHK_NEQ(staging->air_propagation, active->air_propagation);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 2fe6273..db6c6e5 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -113,20 +113,21 @@
 	  generic netlink message via NL80211_TESTMODE channel.
 
 config IWLWIFI_P2P
-       bool "iwlwifi experimental P2P support"
-       depends on IWLWIFI
-       help
-         This option enables experimental P2P support for some devices
-         based on microcode support. Since P2P support is still under
-         development, this option may even enable it for some devices
-         now that turn out to not support it in the future due to
-         microcode restrictions.
+	def_bool y
+	bool "iwlwifi experimental P2P support"
+	depends on IWLWIFI
+	help
+	  This option enables experimental P2P support for some devices
+	  based on microcode support. Since P2P support is still under
+	  development, this option may even enable it for some devices
+	  now that turn out to not support it in the future due to
+	  microcode restrictions.
 
-         To determine if your microcode supports the experimental P2P
-         offered by this option, check if the driver advertises AP
-         support when it is loaded.
+	  To determine if your microcode supports the experimental P2P
+	  offered by this option, check if the driver advertises AP
+	  support when it is loaded.
 
-         Say Y only if you want to experiment with P2P.
+	  Say Y only if you want to experiment with P2P.
 
 config IWLWIFI_EXPERIMENTAL_MFP
 	bool "support MFP (802.11w) even if uCode doesn't advertise"
@@ -136,3 +137,11 @@
 	  even if the microcode doesn't advertise it.
 
 	  Say Y only if you want to experiment with MFP.
+
+config IWLWIFI_UCODE16
+	bool "support uCode 16.0"
+	depends on IWLWIFI
+	help
+	  This option enables support for uCode version 16.0.
+
+	  Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 85d163e..406f297 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -5,9 +5,9 @@
 iwlwifi-objs		+= iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
 iwlwifi-objs		+= iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
 
-iwlwifi-objs		+= iwl-core.o iwl-eeprom.o iwl-power.o
+iwlwifi-objs		+= iwl-eeprom.o iwl-power.o
 iwlwifi-objs		+= iwl-scan.o iwl-led.o
-iwlwifi-objs		+= iwl-agn-rxon.o
+iwlwifi-objs		+= iwl-agn-rxon.o iwl-agn-devices.o
 iwlwifi-objs		+= iwl-5000.o
 iwlwifi-objs		+= iwl-6000.o
 iwlwifi-objs		+= iwl-1000.o
@@ -17,6 +17,8 @@
 iwlwifi-objs		+= iwl-notif-wait.o
 iwlwifi-objs		+= iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
 
+
+iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 5b0d888..2629a66 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -24,30 +24,16 @@
  *
  *****************************************************************************/
 
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
 #include <linux/stringify.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-agn-hw.h"
-#include "iwl-shared.h"
+#include "iwl-config.h"
 #include "iwl-cfg.h"
-#include "iwl-prph.h"
+#include "iwl-csr.h"
+#include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL1000_UCODE_API_MAX 6
-#define IWL100_UCODE_API_MAX 6
+#define IWL1000_UCODE_API_MAX 5
+#define IWL100_UCODE_API_MAX 5
 
 /* Oldest version we won't warn about */
 #define IWL1000_UCODE_API_OK 5
@@ -57,6 +43,10 @@
 #define IWL1000_UCODE_API_MIN 1
 #define IWL100_UCODE_API_MIN 5
 
+/* EEPROM version */
+#define EEPROM_1000_TX_POWER_VERSION	(4)
+#define EEPROM_1000_EEPROM_VERSION	(0x15C)
+
 #define IWL1000_FW_PRE "iwlwifi-1000-"
 #define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
 
@@ -64,100 +54,8 @@
 #define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
 
 
-/*
- * For 1000, use advance thermal throttling critical temperature threshold,
- * but legacy thermal management implementation for now.
- * This is for the reason of 1000 uCode using advance thermal throttling API
- * but not implement ct_kill_exit based on ct_kill exit temperature
- * so the thermal throttling will still based on legacy thermal throttling
- * management.
- * The code here need to be modified once 1000 uCode has the advanced thermal
- * throttling algorithm in place
- */
-static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
-{
-	/* want Celsius */
-	hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
-	hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
-}
-
-/* NIC configuration for 1000 series */
-static void iwl1000_nic_config(struct iwl_priv *priv)
-{
-	/* set CSR_HW_CONFIG_REG for uCode use */
-	iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
-		    CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
-		    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
-	/* Setting digital SVR for 1000 card to 1.32V */
-	/* locking is acquired in iwl_set_bits_mask_prph() function */
-	iwl_set_bits_mask_prph(trans(priv), APMG_DIGITAL_SVR_REG,
-				APMG_SVR_DIGITAL_VOLTAGE_1_32,
-				~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
-}
-
-static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
-	.min_nrg_cck = 95,
-	.auto_corr_min_ofdm = 90,
-	.auto_corr_min_ofdm_mrc = 170,
-	.auto_corr_min_ofdm_x1 = 120,
-	.auto_corr_min_ofdm_mrc_x1 = 240,
-
-	.auto_corr_max_ofdm = 120,
-	.auto_corr_max_ofdm_mrc = 210,
-	.auto_corr_max_ofdm_x1 = 155,
-	.auto_corr_max_ofdm_mrc_x1 = 290,
-
-	.auto_corr_min_cck = 125,
-	.auto_corr_max_cck = 200,
-	.auto_corr_min_cck_mrc = 170,
-	.auto_corr_max_cck_mrc = 400,
-	.nrg_th_cck = 95,
-	.nrg_th_ofdm = 95,
-
-	.barker_corr_th_min = 190,
-	.barker_corr_th_min_mrc = 390,
-	.nrg_th_cca = 62,
-};
-
-static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
-{
-	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ);
-
-	hw_params(priv).tx_chains_num =
-		num_of_ant(hw_params(priv).valid_tx_ant);
-	if (cfg(priv)->rx_with_siso_diversity)
-		hw_params(priv).rx_chains_num = 1;
-	else
-		hw_params(priv).rx_chains_num =
-			num_of_ant(hw_params(priv).valid_rx_ant);
-
-	iwl1000_set_ct_threshold(priv);
-
-	/* Set initial sensitivity parameters */
-	hw_params(priv).sens = &iwl1000_sensitivity;
-}
-
-static struct iwl_lib_ops iwl1000_lib = {
-	.set_hw_params = iwl1000_hw_set_hw_params,
-	.nic_config = iwl1000_nic_config,
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REG_BAND_1_CHANNELS,
-			EEPROM_REG_BAND_2_CHANNELS,
-			EEPROM_REG_BAND_3_CHANNELS,
-			EEPROM_REG_BAND_4_CHANNELS,
-			EEPROM_REG_BAND_5_CHANNELS,
-			EEPROM_REG_BAND_24_HT40_CHANNELS,
-			EEPROM_REGULATORY_BAND_NO_HT40,
-		},
-	},
-	.temperature = iwlagn_temperature,
-};
-
 static const struct iwl_base_params iwl1000_base_params = {
 	.num_of_queues = IWLAGN_NUM_QUEUES,
-	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
 	.max_ll_items = OTP_MAX_LL_ITEMS_1000,
@@ -166,15 +64,13 @@
 	.support_ct_kill_exit = true,
 	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
 	.chain_noise_scale = 1000,
-	.wd_timeout = IWL_DEF_WD_TIMEOUT,
+	.wd_timeout = IWL_WATCHHDOG_DISABLED,
 	.max_event_log_size = 128,
-	.wd_disable = true,
 };
 
 static const struct iwl_ht_params iwl1000_ht_params = {
 	.ht_greenfield_support = true,
 	.use_rts_for_aggregation = true, /* use rts/cts protection */
-	.smps_mode = IEEE80211_SMPS_DYNAMIC,
 };
 
 #define IWL_DEVICE_1000						\
@@ -182,11 +78,11 @@
 	.ucode_api_max = IWL1000_UCODE_API_MAX,			\
 	.ucode_api_ok = IWL1000_UCODE_API_OK,			\
 	.ucode_api_min = IWL1000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_1000,		\
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
 	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_1000_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,	\
-	.lib = &iwl1000_lib,					\
 	.base_params = &iwl1000_base_params,			\
 	.led_mode = IWL_LED_BLINK
 
@@ -206,11 +102,11 @@
 	.ucode_api_max = IWL100_UCODE_API_MAX,			\
 	.ucode_api_ok = IWL100_UCODE_API_OK,			\
 	.ucode_api_min = IWL100_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_100,			\
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
 	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_1000_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,	\
-	.lib = &iwl1000_lib,					\
 	.base_params = &iwl1000_base_params,			\
 	.led_mode = IWL_LED_RF_STATE,				\
 	.rx_with_siso_diversity = true
@@ -226,5 +122,5 @@
 	IWL_DEVICE_100,
 };
 
-MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 5635b9e..7f79341 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -24,25 +24,12 @@
  *
  *****************************************************************************/
 
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
 #include <linux/stringify.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-agn-hw.h"
-#include "iwl-shared.h"
+#include "iwl-config.h"
 #include "iwl-cfg.h"
+#include "iwl-agn-hw.h"
+#include "iwl-commands.h" /* needed for BT for now */
 
 /* Highest firmware API version supported */
 #define IWL2030_UCODE_API_MAX 6
@@ -51,10 +38,10 @@
 #define IWL135_UCODE_API_MAX 6
 
 /* Oldest version we won't warn about */
-#define IWL2030_UCODE_API_OK 5
-#define IWL2000_UCODE_API_OK 5
-#define IWL105_UCODE_API_OK 5
-#define IWL135_UCODE_API_OK 5
+#define IWL2030_UCODE_API_OK 6
+#define IWL2000_UCODE_API_OK 6
+#define IWL105_UCODE_API_OK 6
+#define IWL135_UCODE_API_OK 6
 
 /* Lowest firmware API version supported */
 #define IWL2030_UCODE_API_MIN 5
@@ -62,6 +49,11 @@
 #define IWL105_UCODE_API_MIN 5
 #define IWL135_UCODE_API_MIN 5
 
+/* EEPROM version */
+#define EEPROM_2000_TX_POWER_VERSION	(6)
+#define EEPROM_2000_EEPROM_VERSION	(0x805)
+
+
 #define IWL2030_FW_PRE "iwlwifi-2030-"
 #define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
 
@@ -74,105 +66,9 @@
 #define IWL135_FW_PRE "iwlwifi-135-"
 #define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
 
-static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
-{
-	/* want Celsius */
-	hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
-	hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
-}
-
-/* NIC configuration for 2000 series */
-static void iwl2000_nic_config(struct iwl_priv *priv)
-{
-	iwl_rf_config(priv);
-
-	if (cfg(priv)->iq_invert)
-		iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
-			    CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
-}
-
-static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
-	.min_nrg_cck = 97,
-	.auto_corr_min_ofdm = 80,
-	.auto_corr_min_ofdm_mrc = 128,
-	.auto_corr_min_ofdm_x1 = 105,
-	.auto_corr_min_ofdm_mrc_x1 = 192,
-
-	.auto_corr_max_ofdm = 145,
-	.auto_corr_max_ofdm_mrc = 232,
-	.auto_corr_max_ofdm_x1 = 110,
-	.auto_corr_max_ofdm_mrc_x1 = 232,
-
-	.auto_corr_min_cck = 125,
-	.auto_corr_max_cck = 175,
-	.auto_corr_min_cck_mrc = 160,
-	.auto_corr_max_cck_mrc = 310,
-	.nrg_th_cck = 97,
-	.nrg_th_ofdm = 100,
-
-	.barker_corr_th_min = 190,
-	.barker_corr_th_min_mrc = 390,
-	.nrg_th_cca = 62,
-};
-
-static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
-{
-	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ);
-
-	hw_params(priv).tx_chains_num =
-		num_of_ant(hw_params(priv).valid_tx_ant);
-	if (cfg(priv)->rx_with_siso_diversity)
-		hw_params(priv).rx_chains_num = 1;
-	else
-		hw_params(priv).rx_chains_num =
-			num_of_ant(hw_params(priv).valid_rx_ant);
-
-	iwl2000_set_ct_threshold(priv);
-
-	/* Set initial sensitivity parameters */
-	hw_params(priv).sens = &iwl2000_sensitivity;
-}
-
-static struct iwl_lib_ops iwl2000_lib = {
-	.set_hw_params = iwl2000_hw_set_hw_params,
-	.nic_config = iwl2000_nic_config,
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REG_BAND_1_CHANNELS,
-			EEPROM_REG_BAND_2_CHANNELS,
-			EEPROM_REG_BAND_3_CHANNELS,
-			EEPROM_REG_BAND_4_CHANNELS,
-			EEPROM_REG_BAND_5_CHANNELS,
-			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
-			EEPROM_REGULATORY_BAND_NO_HT40,
-		},
-		.enhanced_txpower = true,
-	},
-	.temperature = iwlagn_temperature,
-};
-
-static struct iwl_lib_ops iwl2030_lib = {
-	.set_hw_params = iwl2000_hw_set_hw_params,
-	.nic_config = iwl2000_nic_config,
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REG_BAND_1_CHANNELS,
-			EEPROM_REG_BAND_2_CHANNELS,
-			EEPROM_REG_BAND_3_CHANNELS,
-			EEPROM_REG_BAND_4_CHANNELS,
-			EEPROM_REG_BAND_5_CHANNELS,
-			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
-			EEPROM_REGULATORY_BAND_NO_HT40,
-		},
-		.enhanced_txpower = true,
-	},
-	.temperature = iwlagn_temperature,
-};
-
 static const struct iwl_base_params iwl2000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
-	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
 	.pll_cfg_val = 0,
 	.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
 	.shadow_ram_support = true,
@@ -191,7 +87,6 @@
 static const struct iwl_base_params iwl2030_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
-	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
 	.pll_cfg_val = 0,
 	.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
 	.shadow_ram_support = true,
@@ -226,16 +121,15 @@
 	.ucode_api_max = IWL2000_UCODE_API_MAX,			\
 	.ucode_api_ok = IWL2000_UCODE_API_OK,			\
 	.ucode_api_min = IWL2000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_2000,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_2000_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
-	.lib = &iwl2000_lib,					\
 	.base_params = &iwl2000_base_params,			\
 	.need_temp_offset_calib = true,				\
 	.temp_offset_v2 = true,					\
-	.led_mode = IWL_LED_RF_STATE,				\
-	.iq_invert = true					\
+	.led_mode = IWL_LED_RF_STATE
 
 const struct iwl_cfg iwl2000_2bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -254,18 +148,17 @@
 	.ucode_api_max = IWL2030_UCODE_API_MAX,			\
 	.ucode_api_ok = IWL2030_UCODE_API_OK,			\
 	.ucode_api_min = IWL2030_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_2030,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_2000_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
-	.lib = &iwl2030_lib,					\
 	.base_params = &iwl2030_base_params,			\
 	.bt_params = &iwl2030_bt_params,			\
 	.need_temp_offset_calib = true,				\
 	.temp_offset_v2 = true,					\
 	.led_mode = IWL_LED_RF_STATE,				\
-	.adv_pm = true,						\
-	.iq_invert = true					\
+	.adv_pm = true
 
 const struct iwl_cfg iwl2030_2bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -278,18 +171,17 @@
 	.ucode_api_max = IWL105_UCODE_API_MAX,			\
 	.ucode_api_ok = IWL105_UCODE_API_OK,			\
 	.ucode_api_min = IWL105_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_105,			\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_2000_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
-	.lib = &iwl2000_lib,					\
 	.base_params = &iwl2000_base_params,			\
 	.need_temp_offset_calib = true,				\
 	.temp_offset_v2 = true,					\
 	.led_mode = IWL_LED_RF_STATE,				\
 	.adv_pm = true,						\
-	.rx_with_siso_diversity = true,				\
-	.iq_invert = true					\
+	.rx_with_siso_diversity = true
 
 const struct iwl_cfg iwl105_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -308,19 +200,18 @@
 	.ucode_api_max = IWL135_UCODE_API_MAX,			\
 	.ucode_api_ok = IWL135_UCODE_API_OK,			\
 	.ucode_api_min = IWL135_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_135,			\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_2000_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
-	.lib = &iwl2030_lib,					\
 	.base_params = &iwl2030_base_params,			\
 	.bt_params = &iwl2030_bt_params,			\
 	.need_temp_offset_calib = true,				\
 	.temp_offset_v2 = true,					\
 	.led_mode = IWL_LED_RF_STATE,				\
 	.adv_pm = true,						\
-	.rx_with_siso_diversity = true,				\
-	.iq_invert = true					\
+	.rx_with_siso_diversity = true
 
 const struct iwl_cfg iwl135_bgn_cfg = {
 	.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
@@ -328,7 +219,7 @@
 	.ht_params = &iwl2000_ht_params,
 };
 
-MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
+MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a805e97..8e26bc8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -24,299 +24,47 @@
  *
  *****************************************************************************/
 
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
 #include <linux/stringify.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-agn-hw.h"
-#include "iwl-trans.h"
-#include "iwl-shared.h"
+#include "iwl-config.h"
 #include "iwl-cfg.h"
-#include "iwl-prph.h"
+#include "iwl-agn-hw.h"
+#include "iwl-csr.h"
 
 /* Highest firmware API version supported */
 #define IWL5000_UCODE_API_MAX 5
 #define IWL5150_UCODE_API_MAX 2
 
+/* Oldest version we won't warn about */
+#define IWL5000_UCODE_API_OK 5
+#define IWL5150_UCODE_API_OK 2
+
 /* Lowest firmware API version supported */
 #define IWL5000_UCODE_API_MIN 1
 #define IWL5150_UCODE_API_MIN 1
 
+/* EEPROM versions */
+#define EEPROM_5000_TX_POWER_VERSION	(4)
+#define EEPROM_5000_EEPROM_VERSION	(0x11A)
+#define EEPROM_5050_TX_POWER_VERSION	(4)
+#define EEPROM_5050_EEPROM_VERSION	(0x21E)
+
 #define IWL5000_FW_PRE "iwlwifi-5000-"
 #define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
 
 #define IWL5150_FW_PRE "iwlwifi-5150-"
 #define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
 
-/* NIC configuration for 5000 series */
-static void iwl5000_nic_config(struct iwl_priv *priv)
-{
-	iwl_rf_config(priv);
-
-	/* W/A : NIC is stuck in a reset state after Early PCIe power off
-	 * (PCIe power is lost before PERST# is asserted),
-	 * causing ME FW to lose ownership and not being able to obtain it back.
-	 */
-	iwl_set_bits_mask_prph(trans(priv), APMG_PS_CTRL_REG,
-				APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
-				~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
-}
-
-static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
-	.min_nrg_cck = 100,
-	.auto_corr_min_ofdm = 90,
-	.auto_corr_min_ofdm_mrc = 170,
-	.auto_corr_min_ofdm_x1 = 105,
-	.auto_corr_min_ofdm_mrc_x1 = 220,
-
-	.auto_corr_max_ofdm = 120,
-	.auto_corr_max_ofdm_mrc = 210,
-	.auto_corr_max_ofdm_x1 = 120,
-	.auto_corr_max_ofdm_mrc_x1 = 240,
-
-	.auto_corr_min_cck = 125,
-	.auto_corr_max_cck = 200,
-	.auto_corr_min_cck_mrc = 200,
-	.auto_corr_max_cck_mrc = 400,
-	.nrg_th_cck = 100,
-	.nrg_th_ofdm = 100,
-
-	.barker_corr_th_min = 190,
-	.barker_corr_th_min_mrc = 390,
-	.nrg_th_cca = 62,
-};
-
-static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
-	.min_nrg_cck = 95,
-	.auto_corr_min_ofdm = 90,
-	.auto_corr_min_ofdm_mrc = 170,
-	.auto_corr_min_ofdm_x1 = 105,
-	.auto_corr_min_ofdm_mrc_x1 = 220,
-
-	.auto_corr_max_ofdm = 120,
-	.auto_corr_max_ofdm_mrc = 210,
-	/* max = min for performance bug in 5150 DSP */
-	.auto_corr_max_ofdm_x1 = 105,
-	.auto_corr_max_ofdm_mrc_x1 = 220,
-
-	.auto_corr_min_cck = 125,
-	.auto_corr_max_cck = 200,
-	.auto_corr_min_cck_mrc = 170,
-	.auto_corr_max_cck_mrc = 400,
-	.nrg_th_cck = 95,
-	.nrg_th_ofdm = 95,
-
-	.barker_corr_th_min = 190,
-	.barker_corr_th_min_mrc = 390,
-	.nrg_th_cca = 62,
-};
-
-#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF	(-5)
-
-static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
-{
-	u16 temperature, voltage;
-	__le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
-				EEPROM_KELVIN_TEMPERATURE);
-
-	temperature = le16_to_cpu(temp_calib[0]);
-	voltage = le16_to_cpu(temp_calib[1]);
-
-	/* offset = temp - volt / coeff */
-	return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
-}
-
-static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
-{
-	const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
-	s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
-			iwl_temp_calib_to_offset(priv->shrd);
-
-	hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
-}
-
-static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
-{
-	/* want Celsius */
-	hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
-}
-
-static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
-{
-	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
-					BIT(IEEE80211_BAND_5GHZ);
-
-	hw_params(priv).tx_chains_num =
-		num_of_ant(hw_params(priv).valid_tx_ant);
-	hw_params(priv).rx_chains_num =
-		num_of_ant(hw_params(priv).valid_rx_ant);
-
-	iwl5000_set_ct_threshold(priv);
-
-	/* Set initial sensitivity parameters */
-	hw_params(priv).sens = &iwl5000_sensitivity;
-}
-
-static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
-{
-	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
-					BIT(IEEE80211_BAND_5GHZ);
-
-	hw_params(priv).tx_chains_num =
-		num_of_ant(hw_params(priv).valid_tx_ant);
-	hw_params(priv).rx_chains_num =
-		num_of_ant(hw_params(priv).valid_rx_ant);
-
-	iwl5150_set_ct_threshold(priv);
-
-	/* Set initial sensitivity parameters */
-	hw_params(priv).sens = &iwl5150_sensitivity;
-}
-
-static void iwl5150_temperature(struct iwl_priv *priv)
-{
-	u32 vt = 0;
-	s32 offset =  iwl_temp_calib_to_offset(priv->shrd);
-
-	vt = le32_to_cpu(priv->statistics.common.temperature);
-	vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
-	/* now vt hold the temperature in Kelvin */
-	priv->temperature = KELVIN_TO_CELSIUS(vt);
-	iwl_tt_handler(priv);
-}
-
-static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
-				     struct ieee80211_channel_switch *ch_switch)
-{
-	/*
-	 * MULTI-FIXME
-	 * See iwlagn_mac_channel_switch.
-	 */
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct iwl5000_channel_switch_cmd cmd;
-	const struct iwl_channel_info *ch_info;
-	u32 switch_time_in_usec, ucode_switch_time;
-	u16 ch;
-	u32 tsf_low;
-	u8 switch_count;
-	u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
-	struct ieee80211_vif *vif = ctx->vif;
-	struct iwl_host_cmd hcmd = {
-		.id = REPLY_CHANNEL_SWITCH,
-		.len = { sizeof(cmd), },
-		.flags = CMD_SYNC,
-		.data = { &cmd, },
-	};
-
-	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
-	ch = ch_switch->channel->hw_value;
-	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
-		      ctx->active.channel, ch);
-	cmd.channel = cpu_to_le16(ch);
-	cmd.rxon_flags = ctx->staging.flags;
-	cmd.rxon_filter_flags = ctx->staging.filter_flags;
-	switch_count = ch_switch->count;
-	tsf_low = ch_switch->timestamp & 0x0ffffffff;
-	/*
-	 * calculate the ucode channel switch time
-	 * adding TSF as one of the factor for when to switch
-	 */
-	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
-		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
-		    beacon_interval)) {
-			switch_count -= (priv->ucode_beacon_time -
-				tsf_low) / beacon_interval;
-		} else
-			switch_count = 0;
-	}
-	if (switch_count <= 1)
-		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
-	else {
-		switch_time_in_usec =
-			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
-		ucode_switch_time = iwl_usecs_to_beacons(priv,
-							 switch_time_in_usec,
-							 beacon_interval);
-		cmd.switch_time = iwl_add_beacon_time(priv,
-						      priv->ucode_beacon_time,
-						      ucode_switch_time,
-						      beacon_interval);
-	}
-	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
-		      cmd.switch_time);
-	ch_info = iwl_get_channel_info(priv, priv->band, ch);
-	if (ch_info)
-		cmd.expect_beacon = is_channel_radar(ch_info);
-	else {
-		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-			ctx->active.channel, ch);
-		return -EFAULT;
-	}
-
-	return iwl_dvm_send_cmd(priv, &hcmd);
-}
-
-static struct iwl_lib_ops iwl5000_lib = {
-	.set_hw_params = iwl5000_hw_set_hw_params,
-	.set_channel_switch = iwl5000_hw_channel_switch,
-	.nic_config = iwl5000_nic_config,
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REG_BAND_1_CHANNELS,
-			EEPROM_REG_BAND_2_CHANNELS,
-			EEPROM_REG_BAND_3_CHANNELS,
-			EEPROM_REG_BAND_4_CHANNELS,
-			EEPROM_REG_BAND_5_CHANNELS,
-			EEPROM_REG_BAND_24_HT40_CHANNELS,
-			EEPROM_REG_BAND_52_HT40_CHANNELS
-		},
-	},
-	.temperature = iwlagn_temperature,
-};
-
-static struct iwl_lib_ops iwl5150_lib = {
-	.set_hw_params = iwl5150_hw_set_hw_params,
-	.set_channel_switch = iwl5000_hw_channel_switch,
-	.nic_config = iwl5000_nic_config,
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REG_BAND_1_CHANNELS,
-			EEPROM_REG_BAND_2_CHANNELS,
-			EEPROM_REG_BAND_3_CHANNELS,
-			EEPROM_REG_BAND_4_CHANNELS,
-			EEPROM_REG_BAND_5_CHANNELS,
-			EEPROM_REG_BAND_24_HT40_CHANNELS,
-			EEPROM_REG_BAND_52_HT40_CHANNELS
-		},
-	},
-	.temperature = iwl5150_temperature,
-};
-
 static const struct iwl_base_params iwl5000_base_params = {
 	.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
-	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
 	.pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
 	.led_compensation = 51,
 	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
 	.chain_noise_scale = 1000,
-	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.wd_timeout = IWL_WATCHHDOG_DISABLED,
 	.max_event_log_size = 512,
 	.no_idle_support = true,
-	.wd_disable = true,
 };
 
 static const struct iwl_ht_params iwl5000_ht_params = {
@@ -326,12 +74,13 @@
 #define IWL_DEVICE_5000						\
 	.fw_name_pre = IWL5000_FW_PRE,				\
 	.ucode_api_max = IWL5000_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL5000_UCODE_API_OK,			\
 	.ucode_api_min = IWL5000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_5000,		\
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
 	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_5000_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,	\
-	.lib = &iwl5000_lib,					\
 	.base_params = &iwl5000_base_params,			\
 	.led_mode = IWL_LED_BLINK
 
@@ -371,12 +120,13 @@
 	.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
 	.fw_name_pre = IWL5000_FW_PRE,
 	.ucode_api_max = IWL5000_UCODE_API_MAX,
+	.ucode_api_ok = IWL5000_UCODE_API_OK,
 	.ucode_api_min = IWL5000_UCODE_API_MIN,
+	.device_family = IWL_DEVICE_FAMILY_5000,
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,
 	.max_data_size = IWLAGN_RTC_DATA_SIZE,
 	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
-	.lib = &iwl5000_lib,
 	.base_params = &iwl5000_base_params,
 	.ht_params = &iwl5000_ht_params,
 	.led_mode = IWL_LED_BLINK,
@@ -386,12 +136,13 @@
 #define IWL_DEVICE_5150						\
 	.fw_name_pre = IWL5150_FW_PRE,				\
 	.ucode_api_max = IWL5150_UCODE_API_MAX,			\
+	.ucode_api_ok = IWL5150_UCODE_API_OK,			\
 	.ucode_api_min = IWL5150_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_5150,		\
 	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
 	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_5050_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,	\
-	.lib = &iwl5150_lib,					\
 	.base_params = &iwl5000_base_params,			\
 	.no_xtal_calib = true,					\
 	.led_mode = IWL_LED_BLINK,				\
@@ -409,5 +160,5 @@
 	IWL_DEVICE_5150,
 };
 
-MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 64060cd..381b02c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -24,26 +24,12 @@
  *
  *****************************************************************************/
 
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
 #include <linux/stringify.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-agn-hw.h"
-#include "iwl-trans.h"
-#include "iwl-shared.h"
+#include "iwl-config.h"
 #include "iwl-cfg.h"
+#include "iwl-agn-hw.h"
+#include "iwl-commands.h" /* needed for BT for now */
 
 /* Highest firmware API version supported */
 #define IWL6000_UCODE_API_MAX 6
@@ -53,12 +39,28 @@
 /* Oldest version we won't warn about */
 #define IWL6000_UCODE_API_OK 4
 #define IWL6000G2_UCODE_API_OK 5
+#define IWL6050_UCODE_API_OK 5
+#define IWL6000G2B_UCODE_API_OK 6
 
 /* Lowest firmware API version supported */
 #define IWL6000_UCODE_API_MIN 4
 #define IWL6050_UCODE_API_MIN 4
 #define IWL6000G2_UCODE_API_MIN 4
 
+/* EEPROM versions */
+#define EEPROM_6000_TX_POWER_VERSION	(4)
+#define EEPROM_6000_EEPROM_VERSION	(0x423)
+#define EEPROM_6050_TX_POWER_VERSION	(4)
+#define EEPROM_6050_EEPROM_VERSION	(0x532)
+#define EEPROM_6150_TX_POWER_VERSION	(6)
+#define EEPROM_6150_EEPROM_VERSION	(0x553)
+#define EEPROM_6005_TX_POWER_VERSION	(6)
+#define EEPROM_6005_EEPROM_VERSION	(0x709)
+#define EEPROM_6030_TX_POWER_VERSION	(6)
+#define EEPROM_6030_EEPROM_VERSION	(0x709)
+#define EEPROM_6035_TX_POWER_VERSION	(6)
+#define EEPROM_6035_EEPROM_VERSION	(0x753)
+
 #define IWL6000_FW_PRE "iwlwifi-6000-"
 #define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
 
@@ -71,205 +73,9 @@
 #define IWL6030_FW_PRE "iwlwifi-6000g2b-"
 #define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
 
-static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
-{
-	/* want Celsius */
-	hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
-	hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
-}
-
-static void iwl6050_additional_nic_config(struct iwl_priv *priv)
-{
-	/* Indicate calibration version to uCode. */
-	if (iwl_eeprom_calib_version(priv->shrd) >= 6)
-		iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
-				CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
-}
-
-static void iwl6150_additional_nic_config(struct iwl_priv *priv)
-{
-	/* Indicate calibration version to uCode. */
-	if (iwl_eeprom_calib_version(priv->shrd) >= 6)
-		iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
-				CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
-	iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
-		    CSR_GP_DRIVER_REG_BIT_6050_1x2);
-}
-
-static void iwl6000i_additional_nic_config(struct iwl_priv *priv)
-{
-	/* 2x2 IPA phy type */
-	iwl_write32(trans(priv), CSR_GP_DRIVER_REG,
-		     CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
-}
-
-/* NIC configuration for 6000 series */
-static void iwl6000_nic_config(struct iwl_priv *priv)
-{
-	iwl_rf_config(priv);
-
-	/* do additional nic configuration if needed */
-	if (cfg(priv)->additional_nic_config)
-		cfg(priv)->additional_nic_config(priv);
-}
-
-static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
-	.min_nrg_cck = 110,
-	.auto_corr_min_ofdm = 80,
-	.auto_corr_min_ofdm_mrc = 128,
-	.auto_corr_min_ofdm_x1 = 105,
-	.auto_corr_min_ofdm_mrc_x1 = 192,
-
-	.auto_corr_max_ofdm = 145,
-	.auto_corr_max_ofdm_mrc = 232,
-	.auto_corr_max_ofdm_x1 = 110,
-	.auto_corr_max_ofdm_mrc_x1 = 232,
-
-	.auto_corr_min_cck = 125,
-	.auto_corr_max_cck = 175,
-	.auto_corr_min_cck_mrc = 160,
-	.auto_corr_max_cck_mrc = 310,
-	.nrg_th_cck = 110,
-	.nrg_th_ofdm = 110,
-
-	.barker_corr_th_min = 190,
-	.barker_corr_th_min_mrc = 336,
-	.nrg_th_cca = 62,
-};
-
-static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
-{
-	hw_params(priv).ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
-					BIT(IEEE80211_BAND_5GHZ);
-
-	hw_params(priv).tx_chains_num =
-		num_of_ant(hw_params(priv).valid_tx_ant);
-	if (cfg(priv)->rx_with_siso_diversity)
-		hw_params(priv).rx_chains_num = 1;
-	else
-		hw_params(priv).rx_chains_num =
-			num_of_ant(hw_params(priv).valid_rx_ant);
-
-	iwl6000_set_ct_threshold(priv);
-
-	/* Set initial sensitivity parameters */
-	hw_params(priv).sens = &iwl6000_sensitivity;
-
-}
-
-static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
-				     struct ieee80211_channel_switch *ch_switch)
-{
-	/*
-	 * MULTI-FIXME
-	 * See iwlagn_mac_channel_switch.
-	 */
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-	struct iwl6000_channel_switch_cmd cmd;
-	const struct iwl_channel_info *ch_info;
-	u32 switch_time_in_usec, ucode_switch_time;
-	u16 ch;
-	u32 tsf_low;
-	u8 switch_count;
-	u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
-	struct ieee80211_vif *vif = ctx->vif;
-	struct iwl_host_cmd hcmd = {
-		.id = REPLY_CHANNEL_SWITCH,
-		.len = { sizeof(cmd), },
-		.flags = CMD_SYNC,
-		.data = { &cmd, },
-	};
-
-	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
-	ch = ch_switch->channel->hw_value;
-	IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
-		      ctx->active.channel, ch);
-	cmd.channel = cpu_to_le16(ch);
-	cmd.rxon_flags = ctx->staging.flags;
-	cmd.rxon_filter_flags = ctx->staging.filter_flags;
-	switch_count = ch_switch->count;
-	tsf_low = ch_switch->timestamp & 0x0ffffffff;
-	/*
-	 * calculate the ucode channel switch time
-	 * adding TSF as one of the factor for when to switch
-	 */
-	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
-		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
-		    beacon_interval)) {
-			switch_count -= (priv->ucode_beacon_time -
-				tsf_low) / beacon_interval;
-		} else
-			switch_count = 0;
-	}
-	if (switch_count <= 1)
-		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
-	else {
-		switch_time_in_usec =
-			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
-		ucode_switch_time = iwl_usecs_to_beacons(priv,
-							 switch_time_in_usec,
-							 beacon_interval);
-		cmd.switch_time = iwl_add_beacon_time(priv,
-						      priv->ucode_beacon_time,
-						      ucode_switch_time,
-						      beacon_interval);
-	}
-	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
-		      cmd.switch_time);
-	ch_info = iwl_get_channel_info(priv, priv->band, ch);
-	if (ch_info)
-		cmd.expect_beacon = is_channel_radar(ch_info);
-	else {
-		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-			ctx->active.channel, ch);
-		return -EFAULT;
-	}
-
-	return iwl_dvm_send_cmd(priv, &hcmd);
-}
-
-static struct iwl_lib_ops iwl6000_lib = {
-	.set_hw_params = iwl6000_hw_set_hw_params,
-	.set_channel_switch = iwl6000_hw_channel_switch,
-	.nic_config = iwl6000_nic_config,
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REG_BAND_1_CHANNELS,
-			EEPROM_REG_BAND_2_CHANNELS,
-			EEPROM_REG_BAND_3_CHANNELS,
-			EEPROM_REG_BAND_4_CHANNELS,
-			EEPROM_REG_BAND_5_CHANNELS,
-			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
-			EEPROM_REG_BAND_52_HT40_CHANNELS
-		},
-		.enhanced_txpower = true,
-	},
-	.temperature = iwlagn_temperature,
-};
-
-static struct iwl_lib_ops iwl6030_lib = {
-	.set_hw_params = iwl6000_hw_set_hw_params,
-	.set_channel_switch = iwl6000_hw_channel_switch,
-	.nic_config = iwl6000_nic_config,
-	.eeprom_ops = {
-		.regulatory_bands = {
-			EEPROM_REG_BAND_1_CHANNELS,
-			EEPROM_REG_BAND_2_CHANNELS,
-			EEPROM_REG_BAND_3_CHANNELS,
-			EEPROM_REG_BAND_4_CHANNELS,
-			EEPROM_REG_BAND_5_CHANNELS,
-			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
-			EEPROM_REG_BAND_52_HT40_CHANNELS
-		},
-		.enhanced_txpower = true,
-	},
-	.temperature = iwlagn_temperature,
-};
-
 static const struct iwl_base_params iwl6000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
-	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
 	.pll_cfg_val = 0,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -286,7 +92,6 @@
 static const struct iwl_base_params iwl6050_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
-	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
 	.pll_cfg_val = 0,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
 	.shadow_ram_support = true,
@@ -303,7 +108,6 @@
 static const struct iwl_base_params iwl6000_g2_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE,
 	.num_of_queues = IWLAGN_NUM_QUEUES,
-	.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
 	.pll_cfg_val = 0,
 	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
 	.shadow_ram_support = true,
@@ -336,11 +140,11 @@
 	.ucode_api_max = IWL6000G2_UCODE_API_MAX,		\
 	.ucode_api_ok = IWL6000G2_UCODE_API_OK,			\
 	.ucode_api_min = IWL6000G2_UCODE_API_MIN,		\
+	.device_family = IWL_DEVICE_FAMILY_6005,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_6005_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION,	\
-	.lib = &iwl6000_lib,					\
 	.base_params = &iwl6000_g2_base_params,			\
 	.need_temp_offset_calib = true,				\
 	.led_mode = IWL_LED_RF_STATE
@@ -388,13 +192,13 @@
 #define IWL_DEVICE_6030						\
 	.fw_name_pre = IWL6030_FW_PRE,				\
 	.ucode_api_max = IWL6000G2_UCODE_API_MAX,		\
-	.ucode_api_ok = IWL6000G2_UCODE_API_OK,			\
+	.ucode_api_ok = IWL6000G2B_UCODE_API_OK,		\
 	.ucode_api_min = IWL6000G2_UCODE_API_MIN,		\
+	.device_family = IWL_DEVICE_FAMILY_6030,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
 	.eeprom_ver = EEPROM_6030_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
-	.lib = &iwl6030_lib,					\
 	.base_params = &iwl6000_g2_base_params,			\
 	.bt_params = &iwl6000_bt_params,			\
 	.need_temp_offset_calib = true,				\
@@ -461,14 +265,13 @@
 	.ucode_api_max = IWL6000_UCODE_API_MAX,			\
 	.ucode_api_ok = IWL6000_UCODE_API_OK,			\
 	.ucode_api_min = IWL6000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6000i,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
 	.valid_tx_ant = ANT_BC,		/* .cfg overwrite */	\
 	.valid_rx_ant = ANT_BC,		/* .cfg overwrite */	\
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,	\
-	.lib = &iwl6000_lib,					\
-	.additional_nic_config = iwl6000i_additional_nic_config,\
 	.base_params = &iwl6000_base_params,			\
 	.led_mode = IWL_LED_BLINK
 
@@ -492,12 +295,11 @@
 	.fw_name_pre = IWL6050_FW_PRE,				\
 	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
 	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6050,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
 	.valid_tx_ant = ANT_AB,		/* .cfg overwrite */	\
 	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */	\
-	.lib = &iwl6000_lib,					\
-	.additional_nic_config = iwl6050_additional_nic_config,	\
 	.eeprom_ver = EEPROM_6050_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,	\
 	.base_params = &iwl6050_base_params,			\
@@ -519,10 +321,9 @@
 	.fw_name_pre = IWL6050_FW_PRE,				\
 	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
 	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6150,		\
 	.max_inst_size = IWL60_RTC_INST_SIZE,			\
 	.max_data_size = IWL60_RTC_DATA_SIZE,			\
-	.lib = &iwl6000_lib,					\
-	.additional_nic_config = iwl6150_additional_nic_config,	\
 	.eeprom_ver = EEPROM_6150_EEPROM_VERSION,		\
 	.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,	\
 	.base_params = &iwl6050_base_params,			\
@@ -546,17 +347,17 @@
 	.ucode_api_max = IWL6000_UCODE_API_MAX,
 	.ucode_api_ok = IWL6000_UCODE_API_OK,
 	.ucode_api_min = IWL6000_UCODE_API_MIN,
+	.device_family = IWL_DEVICE_FAMILY_6000,
 	.max_inst_size = IWL60_RTC_INST_SIZE,
 	.max_data_size = IWL60_RTC_DATA_SIZE,
 	.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
 	.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
-	.lib = &iwl6000_lib,
 	.base_params = &iwl6000_base_params,
 	.ht_params = &iwl6000_ht_params,
 	.led_mode = IWL_LED_BLINK,
 };
 
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 84cbe7b..95f27f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -64,7 +64,6 @@
 #include <net/mac80211.h>
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-agn-calib.h"
 #include "iwl-trans.h"
 #include "iwl-agn.h"
@@ -190,7 +189,7 @@
 	u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
 	u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
 	struct iwl_sensitivity_data *data = NULL;
-	const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
+	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
 
 	data = &(priv->sensitivity_data);
 
@@ -373,7 +372,7 @@
 	u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
 	u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
 	struct iwl_sensitivity_data *data = NULL;
-	const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
+	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
 
 	data = &(priv->sensitivity_data);
 
@@ -521,7 +520,7 @@
 
 	iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
 
-	if (cfg(priv)->base_params->hd_v2) {
+	if (priv->cfg->base_params->hd_v2) {
 		cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
 			HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
 		cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -597,9 +596,9 @@
 	int ret = 0;
 	int i;
 	struct iwl_sensitivity_data *data = NULL;
-	const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
+	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
 
-	if (priv->disable_sens_cal)
+	if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
 		return;
 
 	IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n");
@@ -663,7 +662,7 @@
 	struct statistics_rx_phy *ofdm, *cck;
 	struct statistics_general_data statis;
 
-	if (priv->disable_sens_cal)
+	if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
 		return;
 
 	data = &(priv->sensitivity_data);
@@ -833,28 +832,28 @@
 	 * To be safe, simply mask out any chains that we know
 	 * are not on the device.
 	 */
-	active_chains &= hw_params(priv).valid_rx_ant;
+	active_chains &= priv->hw_params.valid_rx_ant;
 
 	num_tx_chains = 0;
 	for (i = 0; i < NUM_RX_CHAINS; i++) {
 		/* loops on all the bits of
 		 * priv->hw_setting.valid_tx_ant */
 		u8 ant_msk = (1 << i);
-		if (!(hw_params(priv).valid_tx_ant & ant_msk))
+		if (!(priv->hw_params.valid_tx_ant & ant_msk))
 			continue;
 
 		num_tx_chains++;
 		if (data->disconn_array[i] == 0)
 			/* there is a Tx antenna connected */
 			break;
-		if (num_tx_chains == hw_params(priv).tx_chains_num &&
+		if (num_tx_chains == priv->hw_params.tx_chains_num &&
 		    data->disconn_array[i]) {
 			/*
 			 * If all chains are disconnected
 			 * connect the first valid tx chain
 			 */
 			first_chain =
-				find_first_chain(hw_params(priv).valid_tx_ant);
+				find_first_chain(priv->hw_params.valid_tx_ant);
 			data->disconn_array[first_chain] = 0;
 			active_chains |= BIT(first_chain);
 			IWL_DEBUG_CALIB(priv,
@@ -864,13 +863,13 @@
 		}
 	}
 
-	if (active_chains != hw_params(priv).valid_rx_ant &&
+	if (active_chains != priv->hw_params.valid_rx_ant &&
 	    active_chains != priv->chain_noise_data.active_chains)
 		IWL_DEBUG_CALIB(priv,
 				"Detected that not all antennas are connected! "
 				"Connected: %#x, valid: %#x.\n",
 				active_chains,
-				hw_params(priv).valid_rx_ant);
+				priv->hw_params.valid_rx_ant);
 
 	/* Save for use within RXON, TX, SCAN commands, etc. */
 	data->active_chains = active_chains;
@@ -895,7 +894,7 @@
 			continue;
 		}
 
-		delta_g = (cfg(priv)->base_params->chain_noise_scale *
+		delta_g = (priv->cfg->base_params->chain_noise_scale *
 			((s32)average_noise[default_chain] -
 			(s32)average_noise[i])) / 1500;
 
@@ -970,7 +969,7 @@
 	 */
 	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 
-	if (priv->disable_chain_noise_cal)
+	if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
 		return;
 
 	data = &(priv->chain_noise_data);
@@ -1051,11 +1050,11 @@
 		return;
 
 	/* Analyze signal for disconnected antenna */
-	if (cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist) {
+	if (priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist) {
 		/* Disable disconnected antenna algorithm for advanced
 		   bt coex, assuming valid antennas are connected */
-		data->active_chains = hw_params(priv).valid_rx_ant;
+		data->active_chains = priv->hw_params.valid_rx_ant;
 		for (i = 0; i < NUM_RX_CHAINS; i++)
 			if (!(data->active_chains & (1<<i)))
 				data->disconn_array[i] = 1;
@@ -1085,7 +1084,7 @@
 			min_average_noise, min_average_noise_antenna_i);
 
 	iwlagn_gain_computation(priv, average_noise,
-				find_first_chain(hw_params(priv).valid_rx_ant));
+				find_first_chain(priv->hw_params.valid_rx_ant));
 
 	/* Some power changes may have been made during the calibration.
 	 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index 9ed6683..dbe1378 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -63,7 +63,6 @@
 #define __iwl_calib_h__
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-commands.h"
 
 void iwl_chain_noise_calibration(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c b/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
new file mode 100644
index 0000000..48533b3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
@@ -0,0 +1,755 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+/*
+ * DVM device-specific data & functions
+ */
+#include "iwl-agn.h"
+#include "iwl-dev.h"
+#include "iwl-commands.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+/*
+ * 1000 series
+ * ===========
+ */
+
+/*
+ * For 1000, use advance thermal throttling critical temperature threshold,
+ * but legacy thermal management implementation for now.
+ * This is for the reason of 1000 uCode using advance thermal throttling API
+ * but not implement ct_kill_exit based on ct_kill exit temperature
+ * so the thermal throttling will still based on legacy thermal throttling
+ * management.
+ * The code here need to be modified once 1000 uCode has the advanced thermal
+ * throttling algorithm in place
+ */
+static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
+{
+	/* want Celsius */
+	priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+	priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 1000 series */
+static void iwl1000_nic_config(struct iwl_priv *priv)
+{
+	/* set CSR_HW_CONFIG_REG for uCode use */
+	iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+		    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+	/* Setting digital SVR for 1000 card to 1.32V */
+	/* locking is acquired in iwl_set_bits_mask_prph() function */
+	iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
+				APMG_SVR_DIGITAL_VOLTAGE_1_32,
+				~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
+}
+
+/**
+ * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
+					   u16 tsf_bits)
+{
+	return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
+					    u16 tsf_bits)
+{
+	return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec,
+				u32 beacon_interval)
+{
+	u32 quot;
+	u32 rem;
+	u32 interval = beacon_interval * TIME_UNIT;
+
+	if (!interval || !usec)
+		return 0;
+
+	quot = (usec / interval) &
+		(iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
+		IWLAGN_EXT_BEACON_TIME_POS);
+	rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
+				   IWLAGN_EXT_BEACON_TIME_POS);
+
+	return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
+}
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
+			   u32 addon, u32 beacon_interval)
+{
+	u32 base_low = base & iwl_beacon_time_mask_low(priv,
+				IWLAGN_EXT_BEACON_TIME_POS);
+	u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
+				IWLAGN_EXT_BEACON_TIME_POS);
+	u32 interval = beacon_interval * TIME_UNIT;
+	u32 res = (base & iwl_beacon_time_mask_high(priv,
+				IWLAGN_EXT_BEACON_TIME_POS)) +
+				(addon & iwl_beacon_time_mask_high(priv,
+				IWLAGN_EXT_BEACON_TIME_POS));
+
+	if (base_low > addon_low)
+		res += base_low - addon_low;
+	else if (base_low < addon_low) {
+		res += interval + base_low - addon_low;
+		res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
+	} else
+		res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
+
+	return cpu_to_le32(res);
+}
+
+static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
+	.min_nrg_cck = 95,
+	.auto_corr_min_ofdm = 90,
+	.auto_corr_min_ofdm_mrc = 170,
+	.auto_corr_min_ofdm_x1 = 120,
+	.auto_corr_min_ofdm_mrc_x1 = 240,
+
+	.auto_corr_max_ofdm = 120,
+	.auto_corr_max_ofdm_mrc = 210,
+	.auto_corr_max_ofdm_x1 = 155,
+	.auto_corr_max_ofdm_mrc_x1 = 290,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 200,
+	.auto_corr_min_cck_mrc = 170,
+	.auto_corr_max_cck_mrc = 400,
+	.nrg_th_cck = 95,
+	.nrg_th_ofdm = 95,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ);
+
+	priv->hw_params.tx_chains_num =
+		num_of_ant(priv->hw_params.valid_tx_ant);
+	if (priv->cfg->rx_with_siso_diversity)
+		priv->hw_params.rx_chains_num = 1;
+	else
+		priv->hw_params.rx_chains_num =
+			num_of_ant(priv->hw_params.valid_rx_ant);
+
+	iwl1000_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl1000_sensitivity;
+}
+
+struct iwl_lib_ops iwl1000_lib = {
+	.set_hw_params = iwl1000_hw_set_hw_params,
+	.nic_config = iwl1000_nic_config,
+	.eeprom_ops = {
+		.regulatory_bands = {
+			EEPROM_REG_BAND_1_CHANNELS,
+			EEPROM_REG_BAND_2_CHANNELS,
+			EEPROM_REG_BAND_3_CHANNELS,
+			EEPROM_REG_BAND_4_CHANNELS,
+			EEPROM_REG_BAND_5_CHANNELS,
+			EEPROM_REG_BAND_24_HT40_CHANNELS,
+			EEPROM_REGULATORY_BAND_NO_HT40,
+		},
+	},
+	.temperature = iwlagn_temperature,
+};
+
+
+/*
+ * 2000 series
+ * ===========
+ */
+
+static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
+{
+	/* want Celsius */
+	priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+	priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 2000 series */
+static void iwl2000_nic_config(struct iwl_priv *priv)
+{
+	iwl_rf_config(priv);
+
+	iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
+		    CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
+}
+
+static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+	.min_nrg_cck = 97,
+	.auto_corr_min_ofdm = 80,
+	.auto_corr_min_ofdm_mrc = 128,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 192,
+
+	.auto_corr_max_ofdm = 145,
+	.auto_corr_max_ofdm_mrc = 232,
+	.auto_corr_max_ofdm_x1 = 110,
+	.auto_corr_max_ofdm_mrc_x1 = 232,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 175,
+	.auto_corr_min_cck_mrc = 160,
+	.auto_corr_max_cck_mrc = 310,
+	.nrg_th_cck = 97,
+	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ);
+
+	priv->hw_params.tx_chains_num =
+		num_of_ant(priv->hw_params.valid_tx_ant);
+	if (priv->cfg->rx_with_siso_diversity)
+		priv->hw_params.rx_chains_num = 1;
+	else
+		priv->hw_params.rx_chains_num =
+			num_of_ant(priv->hw_params.valid_rx_ant);
+
+	iwl2000_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl2000_sensitivity;
+}
+
+struct iwl_lib_ops iwl2000_lib = {
+	.set_hw_params = iwl2000_hw_set_hw_params,
+	.nic_config = iwl2000_nic_config,
+	.eeprom_ops = {
+		.regulatory_bands = {
+			EEPROM_REG_BAND_1_CHANNELS,
+			EEPROM_REG_BAND_2_CHANNELS,
+			EEPROM_REG_BAND_3_CHANNELS,
+			EEPROM_REG_BAND_4_CHANNELS,
+			EEPROM_REG_BAND_5_CHANNELS,
+			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+			EEPROM_REGULATORY_BAND_NO_HT40,
+		},
+		.enhanced_txpower = true,
+	},
+	.temperature = iwlagn_temperature,
+};
+
+struct iwl_lib_ops iwl2030_lib = {
+	.set_hw_params = iwl2000_hw_set_hw_params,
+	.nic_config = iwl2000_nic_config,
+	.eeprom_ops = {
+		.regulatory_bands = {
+			EEPROM_REG_BAND_1_CHANNELS,
+			EEPROM_REG_BAND_2_CHANNELS,
+			EEPROM_REG_BAND_3_CHANNELS,
+			EEPROM_REG_BAND_4_CHANNELS,
+			EEPROM_REG_BAND_5_CHANNELS,
+			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+			EEPROM_REGULATORY_BAND_NO_HT40,
+		},
+		.enhanced_txpower = true,
+	},
+	.temperature = iwlagn_temperature,
+};
+
+/*
+ * 5000 series
+ * ===========
+ */
+
+/* NIC configuration for 5000 series */
+static void iwl5000_nic_config(struct iwl_priv *priv)
+{
+	iwl_rf_config(priv);
+
+	/* W/A : NIC is stuck in a reset state after Early PCIe power off
+	 * (PCIe power is lost before PERST# is asserted),
+	 * causing ME FW to lose ownership and not being able to obtain it back.
+	 */
+	iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
+				APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+				~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+}
+
+static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
+	.min_nrg_cck = 100,
+	.auto_corr_min_ofdm = 90,
+	.auto_corr_min_ofdm_mrc = 170,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 220,
+
+	.auto_corr_max_ofdm = 120,
+	.auto_corr_max_ofdm_mrc = 210,
+	.auto_corr_max_ofdm_x1 = 120,
+	.auto_corr_max_ofdm_mrc_x1 = 240,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 200,
+	.auto_corr_min_cck_mrc = 200,
+	.auto_corr_max_cck_mrc = 400,
+	.nrg_th_cck = 100,
+	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
+	.min_nrg_cck = 95,
+	.auto_corr_min_ofdm = 90,
+	.auto_corr_min_ofdm_mrc = 170,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 220,
+
+	.auto_corr_max_ofdm = 120,
+	.auto_corr_max_ofdm_mrc = 210,
+	/* max = min for performance bug in 5150 DSP */
+	.auto_corr_max_ofdm_x1 = 105,
+	.auto_corr_max_ofdm_mrc_x1 = 220,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 200,
+	.auto_corr_min_cck_mrc = 170,
+	.auto_corr_max_cck_mrc = 400,
+	.nrg_th_cck = 95,
+	.nrg_th_ofdm = 95,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF	(-5)
+
+static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+{
+	u16 temperature, voltage;
+	__le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+				EEPROM_KELVIN_TEMPERATURE);
+
+	temperature = le16_to_cpu(temp_calib[0]);
+	voltage = le16_to_cpu(temp_calib[1]);
+
+	/* offset = temp - volt / coeff */
+	return (s32)(temperature -
+			voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+}
+
+static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
+{
+	const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
+	s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
+			iwl_temp_calib_to_offset(priv);
+
+	priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
+}
+
+static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
+{
+	/* want Celsius */
+	priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+}
+
+static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
+					BIT(IEEE80211_BAND_5GHZ);
+
+	priv->hw_params.tx_chains_num =
+		num_of_ant(priv->hw_params.valid_tx_ant);
+	priv->hw_params.rx_chains_num =
+		num_of_ant(priv->hw_params.valid_rx_ant);
+
+	iwl5000_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl5000_sensitivity;
+}
+
+static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
+{
+	priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
+					BIT(IEEE80211_BAND_5GHZ);
+
+	priv->hw_params.tx_chains_num =
+		num_of_ant(priv->hw_params.valid_tx_ant);
+	priv->hw_params.rx_chains_num =
+		num_of_ant(priv->hw_params.valid_rx_ant);
+
+	iwl5150_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl5150_sensitivity;
+}
+
+static void iwl5150_temperature(struct iwl_priv *priv)
+{
+	u32 vt = 0;
+	s32 offset =  iwl_temp_calib_to_offset(priv);
+
+	vt = le32_to_cpu(priv->statistics.common.temperature);
+	vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
+	/* now vt hold the temperature in Kelvin */
+	priv->temperature = KELVIN_TO_CELSIUS(vt);
+	iwl_tt_handler(priv);
+}
+
+static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
+				     struct ieee80211_channel_switch *ch_switch)
+{
+	/*
+	 * MULTI-FIXME
+	 * See iwlagn_mac_channel_switch.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwl5000_channel_switch_cmd cmd;
+	const struct iwl_channel_info *ch_info;
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+	struct ieee80211_vif *vif = ctx->vif;
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_CHANNEL_SWITCH,
+		.len = { sizeof(cmd), },
+		.flags = CMD_SYNC,
+		.data = { &cmd, },
+	};
+
+	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+	ch = ch_switch->channel->hw_value;
+	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
+		      ctx->active.channel, ch);
+	cmd.channel = cpu_to_le16(ch);
+	cmd.rxon_flags = ctx->staging.flags;
+	cmd.rxon_filter_flags = ctx->staging.filter_flags;
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+		    beacon_interval)) {
+			switch_count -= (priv->ucode_beacon_time -
+				tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time = iwl_usecs_to_beacons(priv,
+							 switch_time_in_usec,
+							 beacon_interval);
+		cmd.switch_time = iwl_add_beacon_time(priv,
+						      priv->ucode_beacon_time,
+						      ucode_switch_time,
+						      beacon_interval);
+	}
+	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+		      cmd.switch_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, ch);
+	if (ch_info)
+		cmd.expect_beacon = is_channel_radar(ch_info);
+	else {
+		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+			ctx->active.channel, ch);
+		return -EFAULT;
+	}
+
+	return iwl_dvm_send_cmd(priv, &hcmd);
+}
+
+struct iwl_lib_ops iwl5000_lib = {
+	.set_hw_params = iwl5000_hw_set_hw_params,
+	.set_channel_switch = iwl5000_hw_channel_switch,
+	.nic_config = iwl5000_nic_config,
+	.eeprom_ops = {
+		.regulatory_bands = {
+			EEPROM_REG_BAND_1_CHANNELS,
+			EEPROM_REG_BAND_2_CHANNELS,
+			EEPROM_REG_BAND_3_CHANNELS,
+			EEPROM_REG_BAND_4_CHANNELS,
+			EEPROM_REG_BAND_5_CHANNELS,
+			EEPROM_REG_BAND_24_HT40_CHANNELS,
+			EEPROM_REG_BAND_52_HT40_CHANNELS
+		},
+	},
+	.temperature = iwlagn_temperature,
+};
+
+struct iwl_lib_ops iwl5150_lib = {
+	.set_hw_params = iwl5150_hw_set_hw_params,
+	.set_channel_switch = iwl5000_hw_channel_switch,
+	.nic_config = iwl5000_nic_config,
+	.eeprom_ops = {
+		.regulatory_bands = {
+			EEPROM_REG_BAND_1_CHANNELS,
+			EEPROM_REG_BAND_2_CHANNELS,
+			EEPROM_REG_BAND_3_CHANNELS,
+			EEPROM_REG_BAND_4_CHANNELS,
+			EEPROM_REG_BAND_5_CHANNELS,
+			EEPROM_REG_BAND_24_HT40_CHANNELS,
+			EEPROM_REG_BAND_52_HT40_CHANNELS
+		},
+	},
+	.temperature = iwl5150_temperature,
+};
+
+
+
+/*
+ * 6000 series
+ * ===========
+ */
+
+static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
+{
+	/* want Celsius */
+	priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+	priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 6000 series */
+static void iwl6000_nic_config(struct iwl_priv *priv)
+{
+	iwl_rf_config(priv);
+
+	switch (priv->cfg->device_family) {
+	case IWL_DEVICE_FAMILY_6005:
+	case IWL_DEVICE_FAMILY_6030:
+	case IWL_DEVICE_FAMILY_6000:
+		break;
+	case IWL_DEVICE_FAMILY_6000i:
+		/* 2x2 IPA phy type */
+		iwl_write32(priv->trans, CSR_GP_DRIVER_REG,
+			     CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
+		break;
+	case IWL_DEVICE_FAMILY_6050:
+		/* Indicate calibration version to uCode. */
+		if (iwl_eeprom_calib_version(priv) >= 6)
+			iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
+					CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
+		break;
+	case IWL_DEVICE_FAMILY_6150:
+		/* Indicate calibration version to uCode. */
+		if (iwl_eeprom_calib_version(priv) >= 6)
+			iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
+					CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
+		iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
+			    CSR_GP_DRIVER_REG_BIT_6050_1x2);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
+	.min_nrg_cck = 110,
+	.auto_corr_min_ofdm = 80,
+	.auto_corr_min_ofdm_mrc = 128,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 192,
+
+	.auto_corr_max_ofdm = 145,
+	.auto_corr_max_ofdm_mrc = 232,
+	.auto_corr_max_ofdm_x1 = 110,
+	.auto_corr_max_ofdm_mrc_x1 = 232,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 175,
+	.auto_corr_min_cck_mrc = 160,
+	.auto_corr_max_cck_mrc = 310,
+	.nrg_th_cck = 110,
+	.nrg_th_ofdm = 110,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 336,
+	.nrg_th_cca = 62,
+};
+
+static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	priv->hw_params.ht40_channel =  BIT(IEEE80211_BAND_2GHZ) |
+					BIT(IEEE80211_BAND_5GHZ);
+
+	priv->hw_params.tx_chains_num =
+		num_of_ant(priv->hw_params.valid_tx_ant);
+	if (priv->cfg->rx_with_siso_diversity)
+		priv->hw_params.rx_chains_num = 1;
+	else
+		priv->hw_params.rx_chains_num =
+			num_of_ant(priv->hw_params.valid_rx_ant);
+
+	iwl6000_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl6000_sensitivity;
+
+}
+
+static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+				     struct ieee80211_channel_switch *ch_switch)
+{
+	/*
+	 * MULTI-FIXME
+	 * See iwlagn_mac_channel_switch.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwl6000_channel_switch_cmd cmd;
+	const struct iwl_channel_info *ch_info;
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+	struct ieee80211_vif *vif = ctx->vif;
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_CHANNEL_SWITCH,
+		.len = { sizeof(cmd), },
+		.flags = CMD_SYNC,
+		.data = { &cmd, },
+	};
+
+	cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+	ch = ch_switch->channel->hw_value;
+	IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+		      ctx->active.channel, ch);
+	cmd.channel = cpu_to_le16(ch);
+	cmd.rxon_flags = ctx->staging.flags;
+	cmd.rxon_filter_flags = ctx->staging.filter_flags;
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+		    beacon_interval)) {
+			switch_count -= (priv->ucode_beacon_time -
+				tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time = iwl_usecs_to_beacons(priv,
+							 switch_time_in_usec,
+							 beacon_interval);
+		cmd.switch_time = iwl_add_beacon_time(priv,
+						      priv->ucode_beacon_time,
+						      ucode_switch_time,
+						      beacon_interval);
+	}
+	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+		      cmd.switch_time);
+	ch_info = iwl_get_channel_info(priv, priv->band, ch);
+	if (ch_info)
+		cmd.expect_beacon = is_channel_radar(ch_info);
+	else {
+		IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+			ctx->active.channel, ch);
+		return -EFAULT;
+	}
+
+	return iwl_dvm_send_cmd(priv, &hcmd);
+}
+
+struct iwl_lib_ops iwl6000_lib = {
+	.set_hw_params = iwl6000_hw_set_hw_params,
+	.set_channel_switch = iwl6000_hw_channel_switch,
+	.nic_config = iwl6000_nic_config,
+	.eeprom_ops = {
+		.regulatory_bands = {
+			EEPROM_REG_BAND_1_CHANNELS,
+			EEPROM_REG_BAND_2_CHANNELS,
+			EEPROM_REG_BAND_3_CHANNELS,
+			EEPROM_REG_BAND_4_CHANNELS,
+			EEPROM_REG_BAND_5_CHANNELS,
+			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+			EEPROM_REG_BAND_52_HT40_CHANNELS
+		},
+		.enhanced_txpower = true,
+	},
+	.temperature = iwlagn_temperature,
+};
+
+struct iwl_lib_ops iwl6030_lib = {
+	.set_hw_params = iwl6000_hw_set_hw_params,
+	.set_channel_switch = iwl6000_hw_channel_switch,
+	.nic_config = iwl6000_nic_config,
+	.eeprom_ops = {
+		.regulatory_bands = {
+			EEPROM_REG_BAND_1_CHANNELS,
+			EEPROM_REG_BAND_2_CHANNELS,
+			EEPROM_REG_BAND_3_CHANNELS,
+			EEPROM_REG_BAND_4_CHANNELS,
+			EEPROM_REG_BAND_5_CHANNELS,
+			EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+			EEPROM_REG_BAND_52_HT40_CHANNELS
+		},
+		.enhanced_txpower = true,
+	},
+	.temperature = iwlagn_temperature,
+};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index d0ec0ab..7960a52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -102,10 +102,18 @@
 
 /* EEPROM */
 #define IWLAGN_EEPROM_IMG_SIZE		2048
+/* OTP */
+/* lower blocks contain EEPROM image and calibration data */
+#define OTP_LOW_IMAGE_SIZE		(2 * 512 * sizeof(u16)) /* 2 KB */
+/* high blocks contain PAPD data */
+#define OTP_HIGH_IMAGE_SIZE_6x00        (6 * 512 * sizeof(u16)) /* 6 KB */
+#define OTP_HIGH_IMAGE_SIZE_1000        (0x200 * sizeof(u16)) /* 1024 bytes */
+#define OTP_MAX_LL_ITEMS_1000		(3)	/* OTP blocks for 1000 */
+#define OTP_MAX_LL_ITEMS_6x00		(4)	/* OTP blocks for 6x00 */
+#define OTP_MAX_LL_ITEMS_6x50		(7)	/* OTP blocks for 6x50 */
+#define OTP_MAX_LL_ITEMS_2x00		(4)	/* OTP blocks for 2x00 */
 
-#define IWLAGN_CMD_FIFO_NUM		7
+
 #define IWLAGN_NUM_QUEUES		20
-#define IWLAGN_NUM_AMPDU_QUEUES		9
-#define IWLAGN_FIRST_AMPDU_QUEUE	11
 
 #endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 56f41c9..01dc442 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -33,12 +33,11 @@
 #include <linux/sched.h>
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-agn-hw.h"
 #include "iwl-agn.h"
 #include "iwl-trans.h"
-#include "iwl-shared.h"
+#include "iwl-modparams.h"
 
 int iwlagn_hw_valid_rtc_data_addr(u32 addr)
 {
@@ -94,81 +93,6 @@
 	iwl_tt_handler(priv);
 }
 
-u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
-{
-	struct iwl_eeprom_calib_hdr *hdr;
-
-	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
-							EEPROM_CALIB_ALL);
-	return hdr->version;
-
-}
-
-/*
- * EEPROM
- */
-static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
-{
-	u16 offset = 0;
-
-	if ((address & INDIRECT_ADDRESS) == 0)
-		return address;
-
-	switch (address & INDIRECT_TYPE_MSK) {
-	case INDIRECT_HOST:
-		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
-		break;
-	case INDIRECT_GENERAL:
-		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
-		break;
-	case INDIRECT_REGULATORY:
-		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
-		break;
-	case INDIRECT_TXP_LIMIT:
-		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
-		break;
-	case INDIRECT_TXP_LIMIT_SIZE:
-		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
-		break;
-	case INDIRECT_CALIBRATION:
-		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
-		break;
-	case INDIRECT_PROCESS_ADJST:
-		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
-		break;
-	case INDIRECT_OTHERS:
-		offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
-		break;
-	default:
-		IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
-		address & INDIRECT_TYPE_MSK);
-		break;
-	}
-
-	/* translate the offset from words to byte */
-	return (address & ADDRESS_MSK) + (offset << 1);
-}
-
-const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
-{
-	u32 address = eeprom_indirect_address(shrd, offset);
-	BUG_ON(address >= shrd->cfg->base_params->eeprom_size);
-	return &shrd->eeprom[address];
-}
-
-struct iwl_mod_params iwlagn_mod_params = {
-	.amsdu_size_8K = 1,
-	.restart_fw = 1,
-	.plcp_check = true,
-	.bt_coex_active = true,
-	.no_sleep_autoadjust = true,
-	.power_level = IWL_POWER_INDEX_1,
-	.bt_ch_announce = true,
-	.wanted_ucode_alternative = 1,
-	.auto_agg = true,
-	/* the rest are 0 by default */
-};
-
 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
 {
 	int idx = 0;
@@ -228,13 +152,13 @@
 				 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
 				 IWL_SCD_MGMT_MSK;
 	if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
-	    (priv->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+	    (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
 		flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
 				IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
 				IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
 				IWL_PAN_SCD_MULTICAST_MSK;
 
-	if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
+	if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
 		flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
 
 	IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -253,7 +177,7 @@
 		goto done;
 	}
 	IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
-	iwl_trans_wait_tx_queue_empty(trans(priv));
+	iwl_trans_wait_tx_queue_empty(priv->trans);
 done:
 	ieee80211_wake_queues(priv->hw);
 	mutex_unlock(&priv->mutex);
@@ -262,76 +186,8 @@
 /*
  * BT coex
  */
-/*
- * Macros to access the lookup table.
- *
- * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
-* wifi_prio, wifi_txrx and wifi_sh_ant_req.
- *
- * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
- *
- * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
- * one after another in 32-bit registers, and "registers" 0 through 7 contain
- * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
- *
- * These macros encode that format.
- */
-#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
-		  wifi_txrx, wifi_sh_ant_req) \
-	(bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
-	(wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
-
-#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
-	lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
-#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
-				 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
-	(!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
-				   bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
-				   wifi_sh_ant_req))))
-#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
-				wifi_prio, wifi_txrx, wifi_sh_ant_req) \
-	LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
-			       bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
-			       wifi_sh_ant_req))
-#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
-				  wifi_req, wifi_prio, wifi_txrx, \
-				  wifi_sh_ant_req) \
-	LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
-			       bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
-			       wifi_sh_ant_req))
-
-#define LUT_WLAN_KILL_OP(lut, op, val) \
-	lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
-#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
-			   wifi_prio, wifi_txrx, wifi_sh_ant_req) \
-	(!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
-			     wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
-#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
-			  wifi_prio, wifi_txrx, wifi_sh_ant_req) \
-	LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
-			 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
-#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
-			    wifi_prio, wifi_txrx, wifi_sh_ant_req) \
-	LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
-			 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
-
-#define LUT_ANT_SWITCH_OP(lut, op, val) \
-	lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
-#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
-			    wifi_prio, wifi_txrx, wifi_sh_ant_req) \
-	(!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
-			      wifi_req, wifi_prio, wifi_txrx, \
-			      wifi_sh_ant_req))))
-#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
-			   wifi_prio, wifi_txrx, wifi_sh_ant_req) \
-	LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
-			  wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
-#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
-			     wifi_prio, wifi_txrx, wifi_sh_ant_req) \
-	LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
-			  wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
-
-static const __le32 iwlagn_def_3w_lookup[12] = {
+/* Notmal TDM */
+static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
 	cpu_to_le32(0xaaaaaaaa),
 	cpu_to_le32(0xaaaaaaaa),
 	cpu_to_le32(0xaeaaaaaa),
@@ -346,7 +202,25 @@
 	cpu_to_le32(0xf0005000),
 };
 
-static const __le32 iwlagn_concurrent_lookup[12] = {
+
+/* Loose Coex */
+static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaeaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xcc00ff28),
+	cpu_to_le32(0x0000aaaa),
+	cpu_to_le32(0xcc00aaaa),
+	cpu_to_le32(0x0000aaaa),
+	cpu_to_le32(0x00000000),
+	cpu_to_le32(0x00000000),
+	cpu_to_le32(0xf0005000),
+	cpu_to_le32(0xf0005000),
+};
+
+/* Full concurrency */
+static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
 	cpu_to_le32(0xaaaaaaaa),
 	cpu_to_le32(0xaaaaaaaa),
 	cpu_to_le32(0xaaaaaaaa),
@@ -369,24 +243,30 @@
 		.bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
 		.bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
 	};
-	struct iwl6000_bt_cmd bt_cmd_6000;
-	struct iwl2000_bt_cmd bt_cmd_2000;
+	struct iwl_bt_cmd_v1 bt_cmd_v1;
+	struct iwl_bt_cmd_v2 bt_cmd_v2;
 	int ret;
 
 	BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
 			sizeof(basic.bt3_lookup_table));
 
-	if (cfg(priv)->bt_params) {
-		if (cfg(priv)->bt_params->bt_session_2) {
-			bt_cmd_2000.prio_boost = cpu_to_le32(
-				cfg(priv)->bt_params->bt_prio_boost);
-			bt_cmd_2000.tx_prio_boost = 0;
-			bt_cmd_2000.rx_prio_boost = 0;
+	if (priv->cfg->bt_params) {
+		/*
+		 * newer generation of devices (2000 series and newer)
+		 * use the version 2 of the bt command
+		 * we need to make sure sending the host command
+		 * with correct data structure to avoid uCode assert
+		 */
+		if (priv->cfg->bt_params->bt_session_2) {
+			bt_cmd_v2.prio_boost = cpu_to_le32(
+				priv->cfg->bt_params->bt_prio_boost);
+			bt_cmd_v2.tx_prio_boost = 0;
+			bt_cmd_v2.rx_prio_boost = 0;
 		} else {
-			bt_cmd_6000.prio_boost =
-				cfg(priv)->bt_params->bt_prio_boost;
-			bt_cmd_6000.tx_prio_boost = 0;
-			bt_cmd_6000.rx_prio_boost = 0;
+			bt_cmd_v1.prio_boost =
+				priv->cfg->bt_params->bt_prio_boost;
+			bt_cmd_v1.tx_prio_boost = 0;
+			bt_cmd_v1.rx_prio_boost = 0;
 		}
 	} else {
 		IWL_ERR(priv, "failed to construct BT Coex Config\n");
@@ -395,6 +275,7 @@
 
 	basic.kill_ack_mask = priv->kill_ack_mask;
 	basic.kill_cts_mask = priv->kill_cts_mask;
+	basic.reduce_txpower = priv->reduced_txpower;
 	basic.valid = priv->bt_valid;
 
 	/*
@@ -403,7 +284,7 @@
 	 * (might be in monitor mode), or the interface is in
 	 * IBSS mode (no proper uCode support for coex then).
 	 */
-	if (!iwlagn_mod_params.bt_coex_active ||
+	if (!iwlwifi_mod_params.bt_coex_active ||
 	    priv->iw_mode == NL80211_IFTYPE_ADHOC) {
 		basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
 	} else {
@@ -432,16 +313,16 @@
 		       priv->bt_full_concurrent ?
 		       "full concurrency" : "3-wire");
 
-	if (cfg(priv)->bt_params->bt_session_2) {
-		memcpy(&bt_cmd_2000.basic, &basic,
+	if (priv->cfg->bt_params->bt_session_2) {
+		memcpy(&bt_cmd_v2.basic, &basic,
 			sizeof(basic));
 		ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
-			CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
+			CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2);
 	} else {
-		memcpy(&bt_cmd_6000.basic, &basic,
+		memcpy(&bt_cmd_v1.basic, &basic,
 			sizeof(basic));
 		ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
-			CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
+			CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1);
 	}
 	if (ret)
 		IWL_ERR(priv, "failed to send BT Coex Config\n");
@@ -615,7 +496,7 @@
 				struct iwl_bt_uart_msg *uart_msg)
 {
 	IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
-			"Update Req = 0x%X",
+			"Update Req = 0x%X\n",
 		(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
 			BT_UART_MSG_FRAME1MSGTYPE_POS,
 		(BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
@@ -624,7 +505,7 @@
 			BT_UART_MSG_FRAME1UPDATEREQ_POS);
 
 	IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
-			"Chl_SeqN = 0x%X, In band = 0x%X",
+			"Chl_SeqN = 0x%X, In band = 0x%X\n",
 		(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
 			BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
 		(BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
@@ -635,7 +516,7 @@
 			BT_UART_MSG_FRAME2INBAND_POS);
 
 	IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
-			"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
+			"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
 		(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
 			BT_UART_MSG_FRAME3SCOESCO_POS,
 		(BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
@@ -649,12 +530,12 @@
 		(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
 			BT_UART_MSG_FRAME3OBEX_POS);
 
-	IWL_DEBUG_COEX(priv, "Idle duration = 0x%X",
+	IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n",
 		(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
 			BT_UART_MSG_FRAME4IDLEDURATION_POS);
 
 	IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
-			"eSCO Retransmissions = 0x%X",
+			"eSCO Retransmissions = 0x%X\n",
 		(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
 			BT_UART_MSG_FRAME5TXACTIVITY_POS,
 		(BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
@@ -662,14 +543,14 @@
 		(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
 			BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
 
-	IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
+	IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
 		(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
 			BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
 		(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
 			BT_UART_MSG_FRAME6DISCOVERABLE_POS);
 
 	IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
-			"0x%X, Inquiry = 0x%X, Connectable = 0x%X",
+			"0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
 		(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
 			BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
 		(BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
@@ -680,29 +561,62 @@
 			BT_UART_MSG_FRAME7CONNECTABLE_POS);
 }
 
-static void iwlagn_set_kill_msk(struct iwl_priv *priv,
+static bool iwlagn_set_kill_msk(struct iwl_priv *priv,
 				struct iwl_bt_uart_msg *uart_msg)
 {
-	u8 kill_msk;
-	static const __le32 bt_kill_ack_msg[2] = {
+	bool need_update = false;
+	u8 kill_msk = IWL_BT_KILL_REDUCE;
+	static const __le32 bt_kill_ack_msg[3] = {
 		IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
-		IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
-	static const __le32 bt_kill_cts_msg[2] = {
+		IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
+		IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
+	static const __le32 bt_kill_cts_msg[3] = {
 		IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
-		IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
+		IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
+		IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
 
-	kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
-		? 1 : 0;
+	if (!priv->reduced_txpower)
+		kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
+			? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT;
 	if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
 	    priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
 		priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
 		priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
 		priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
 		priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
-
-		/* schedule to send runtime bt_config */
-		queue_work(priv->workqueue, &priv->bt_runtime_config);
+		need_update = true;
 	}
+	return need_update;
+}
+
+static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
+				struct iwl_bt_uart_msg *uart_msg)
+{
+	bool need_update = false;
+
+	if (!priv->reduced_txpower &&
+	    !iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
+	    (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
+	    BT_UART_MSG_FRAME3OBEX_MSK)) &&
+	    !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
+	    BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) {
+		/* enabling reduced tx power */
+		priv->reduced_txpower = true;
+		priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
+		need_update = true;
+	} else if (priv->reduced_txpower &&
+		   (iwl_is_associated(priv, IWL_RXON_CTX_PAN) ||
+		   (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
+		   BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) ||
+		   !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
+		   BT_UART_MSG_FRAME3OBEX_MSK)))) {
+		/* disable reduced tx power */
+		priv->reduced_txpower = false;
+		priv->bt_valid &= ~IWLAGN_BT_VALID_REDUCED_TX_PWR;
+		need_update = true;
+	}
+
+	return need_update;
 }
 
 int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
@@ -750,7 +664,12 @@
 		}
 	}
 
-	iwlagn_set_kill_msk(priv, uart_msg);
+	/* schedule to send runtime bt_config */
+	/* check reduce power before change ack/cts kill mask */
+	if (iwlagn_fill_txpower_mode(priv, uart_msg) ||
+	    iwlagn_set_kill_msk(priv, uart_msg))
+		queue_work(priv->workqueue, &priv->bt_runtime_config);
+
 
 	/* FIXME: based on notification, adjust the prio_boost */
 
@@ -798,8 +717,8 @@
  */
 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
 {
-	if (cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist &&
+	if (priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist &&
 	    (priv->bt_full_concurrent ||
 	     priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
 		/*
@@ -856,7 +775,7 @@
 void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
 	bool is_single = is_single_rx_stream(priv);
-	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status);
+	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
 	u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
 	u32 active_chains;
 	u16 rx_chain;
@@ -868,10 +787,10 @@
 	if (priv->chain_noise_data.active_chains)
 		active_chains = priv->chain_noise_data.active_chains;
 	else
-		active_chains = hw_params(priv).valid_rx_ant;
+		active_chains = priv->hw_params.valid_rx_ant;
 
-	if (cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist &&
+	if (priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist &&
 	    (priv->bt_full_concurrent ||
 	     priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
 		/*
@@ -1190,7 +1109,7 @@
 	memcpy(&rxon, &ctx->active, sizeof(rxon));
 
 	priv->ucode_loaded = false;
-	iwl_trans_stop_device(trans(priv));
+	iwl_trans_stop_device(priv->trans);
 
 	priv->wowlan = true;
 
@@ -1212,7 +1131,7 @@
 	if (ret)
 		goto out;
 
-	if (!iwlagn_mod_params.sw_crypto) {
+	if (!iwlwifi_mod_params.sw_crypto) {
 		/* mark all keys clear */
 		priv->ucode_key_table = 0;
 		ctx->key_mapping_keys = 0;
@@ -1298,6 +1217,12 @@
 		return -EIO;
 	}
 
+	if (test_bit(STATUS_FW_ERROR, &priv->status)) {
+		IWL_ERR(priv, "Command %s failed: FW Error\n",
+			iwl_dvm_get_cmd_string(cmd->id));
+		return -EIO;
+	}
+
 	/*
 	 * Synchronous commands from this op-mode must hold
 	 * the mutex, this ensures we don't try to send two
@@ -1312,7 +1237,7 @@
 		return -EIO;
 	}
 
-	return iwl_trans_send_cmd(trans(priv), cmd);
+	return iwl_trans_send_cmd(priv->trans, cmd);
 }
 
 int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 7e590b3..51e1a69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -36,9 +36,9 @@
 #include <linux/workqueue.h>
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-agn.h"
 #include "iwl-op-mode.h"
+#include "iwl-modparams.h"
 
 #define RS_NAME "iwl-agn-rs"
 
@@ -420,7 +420,7 @@
 
 	load = rs_tl_get_load(lq_data, tid);
 
-	if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
+	if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
 		IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
 				sta->addr, tid);
 		ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -819,7 +819,7 @@
 
 		if (num_of_ant(tbl->ant_type) > 1)
 			tbl->ant_type =
-			    first_antenna(hw_params(priv).valid_tx_ant);
+			    first_antenna(priv->hw_params.valid_tx_ant);
 
 		tbl->is_ht40 = 0;
 		tbl->is_SGI = 0;
@@ -969,7 +969,7 @@
 	    (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
 	    (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
 	    (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
-	    (tbl_type.ant_type != info->antenna_sel_tx) ||
+	    (tbl_type.ant_type != info->status.antenna) ||
 	    (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
 	    (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
 	    (rs_index != mac_index)) {
@@ -1085,7 +1085,7 @@
 	    (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
 		rs_program_fix_rate(priv, lq_sta);
 #endif
-	if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist)
+	if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
 		rs_bt_update_lq(priv, ctx, lq_sta);
 }
 
@@ -1291,7 +1291,7 @@
 		return -1;
 
 	/* Need both Tx chains/antennas to support MIMO */
-	if (hw_params(priv).tx_chains_num < 2)
+	if (priv->hw_params.tx_chains_num < 2)
 		return -1;
 
 	IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
@@ -1347,7 +1347,7 @@
 		return -1;
 
 	/* Need both Tx chains/antennas to support MIMO */
-	if (hw_params(priv).tx_chains_num < 3)
+	if (priv->hw_params.tx_chains_num < 3)
 		return -1;
 
 	IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
@@ -1446,8 +1446,8 @@
 	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action;
-	u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
-	u8 tx_chains_num = hw_params(priv).tx_chains_num;
+	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
 	int ret = 0;
 	u8 update_search_tbl_counter = 0;
 
@@ -1464,7 +1464,7 @@
 	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
 		/* avoid antenna B and MIMO */
 		valid_tx_ant =
-			first_antenna(hw_params(priv).valid_tx_ant);
+			first_antenna(priv->hw_params.valid_tx_ant);
 		if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
 		    tbl->action != IWL_LEGACY_SWITCH_SISO)
 			tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1488,7 +1488,7 @@
 		else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
 			tbl->action = IWL_LEGACY_SWITCH_SISO;
 		valid_tx_ant =
-			first_antenna(hw_params(priv).valid_tx_ant);
+			first_antenna(priv->hw_params.valid_tx_ant);
 	}
 
 	start_action = tbl->action;
@@ -1622,8 +1622,8 @@
 	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action;
-	u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
-	u8 tx_chains_num = hw_params(priv).tx_chains_num;
+	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
 	u8 update_search_tbl_counter = 0;
 	int ret;
 
@@ -1640,7 +1640,7 @@
 	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
 		/* avoid antenna B and MIMO */
 		valid_tx_ant =
-			first_antenna(hw_params(priv).valid_tx_ant);
+			first_antenna(priv->hw_params.valid_tx_ant);
 		if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
 			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
 		break;
@@ -1658,7 +1658,7 @@
 	/* configure as 1x1 if bt full concurrency */
 	if (priv->bt_full_concurrent) {
 		valid_tx_ant =
-			first_antenna(hw_params(priv).valid_tx_ant);
+			first_antenna(priv->hw_params.valid_tx_ant);
 		if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
 			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
 	}
@@ -1794,8 +1794,8 @@
 	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action;
-	u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
-	u8 tx_chains_num = hw_params(priv).tx_chains_num;
+	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
 	u8 update_search_tbl_counter = 0;
 	int ret;
 
@@ -1964,8 +1964,8 @@
 	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
 		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	u8 start_action;
-	u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
-	u8 tx_chains_num = hw_params(priv).tx_chains_num;
+	u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
 	int ret;
 	u8 update_search_tbl_counter = 0;
 
@@ -2166,7 +2166,7 @@
 		    (lq_sta->total_success > lq_sta->max_success_limit) ||
 		    ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
 		     && (flush_interval_passed))) {
-			IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
+			IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n",
 				     lq_sta->total_failed,
 				     lq_sta->total_success,
 				     flush_interval_passed);
@@ -2698,7 +2698,7 @@
 
 	i = lq_sta->last_txrate_idx;
 
-	valid_tx_ant = hw_params(priv).valid_tx_ant;
+	valid_tx_ant = priv->hw_params.valid_tx_ant;
 
 	if (!lq_sta->search_better_tbl)
 		active_tbl = lq_sta->active_tbl;
@@ -2826,6 +2826,7 @@
 	struct iwl_station_priv *sta_priv;
 	struct iwl_lq_sta *lq_sta;
 	struct ieee80211_supported_band *sband;
+	unsigned long supp; /* must be unsigned long for for_each_set_bit */
 
 	sta_priv = (struct iwl_station_priv *) sta->drv_priv;
 	lq_sta = &sta_priv->lq_sta;
@@ -2855,8 +2856,15 @@
 	lq_sta->max_rate_idx = -1;
 	lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
 	lq_sta->is_green = rs_use_green(sta);
-	lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
-	lq_sta->band = priv->band;
+	lq_sta->band = sband->band;
+	/*
+	 * active legacy rates as per supported rates bitmap
+	 */
+	supp = sta->supp_rates[sband->band];
+	lq_sta->active_legacy_rate = 0;
+	for_each_set_bit(i, &supp, BITS_PER_LONG)
+		lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
+
 	/*
 	 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
 	 * supp_rates[] does not; shift to convert format, force 9 MBits off.
@@ -2884,15 +2892,15 @@
 
 	/* These values will be overridden later */
 	lq_sta->lq.general_params.single_stream_ant_msk =
-		first_antenna(hw_params(priv).valid_tx_ant);
+		first_antenna(priv->hw_params.valid_tx_ant);
 	lq_sta->lq.general_params.dual_stream_ant_msk =
-		hw_params(priv).valid_tx_ant &
-		~first_antenna(hw_params(priv).valid_tx_ant);
+		priv->hw_params.valid_tx_ant &
+		~first_antenna(priv->hw_params.valid_tx_ant);
 	if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
 		lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
-	} else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
+	} else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
 		lq_sta->lq.general_params.dual_stream_ant_msk =
-			hw_params(priv).valid_tx_ant;
+			priv->hw_params.valid_tx_ant;
 	}
 
 	/* as default allow aggregation for all tids */
@@ -2938,7 +2946,7 @@
 	if (priv && priv->bt_full_concurrent) {
 		/* 1x1 only */
 		tbl_type.ant_type =
-			first_antenna(hw_params(priv).valid_tx_ant);
+			first_antenna(priv->hw_params.valid_tx_ant);
 	}
 
 	/* How many times should we repeat the initial rate? */
@@ -2970,7 +2978,7 @@
 		if (priv->bt_full_concurrent)
 			valid_tx_ant = ANT_A;
 		else
-			valid_tx_ant = hw_params(priv).valid_tx_ant;
+			valid_tx_ant = priv->hw_params.valid_tx_ant;
 	}
 
 	/* Fill rest of rate table */
@@ -3004,7 +3012,7 @@
 		if (priv && priv->bt_full_concurrent) {
 			/* 1x1 only */
 			tbl_type.ant_type =
-			    first_antenna(hw_params(priv).valid_tx_ant);
+			    first_antenna(priv->hw_params.valid_tx_ant);
 		}
 
 		/* Indicate to uCode which entries might be MIMO.
@@ -3055,11 +3063,11 @@
 	 * overwrite if needed, pass aggregation time limit
 	 * to uCode in uSec
 	 */
-	if (priv && cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->agg_time_limit &&
+	if (priv && priv->cfg->bt_params &&
+	    priv->cfg->bt_params->agg_time_limit &&
 	    priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
 		lq_cmd->agg_params.agg_time_limit =
-			cpu_to_le16(cfg(priv)->bt_params->agg_time_limit);
+			cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
 }
 
 static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -3091,7 +3099,7 @@
 	u8 ant_sel_tx;
 
 	priv = lq_sta->drv;
-	valid_tx_ant = hw_params(priv).valid_tx_ant;
+	valid_tx_ant = priv->hw_params.valid_tx_ant;
 	if (lq_sta->dbg_fixed_rate) {
 		ant_sel_tx =
 		  ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3162,9 +3170,9 @@
 	desc += sprintf(buff+desc, "fixed rate 0x%X\n",
 			lq_sta->dbg_fixed_rate);
 	desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
-	    (hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "",
-	    (hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "",
-	    (hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : "");
+	    (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+	    (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+	    (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
 	desc += sprintf(buff+desc, "lq type %s\n",
 	   (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
 	if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 203b1c1..82d02e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -30,6 +30,7 @@
 #include <net/mac80211.h>
 
 #include "iwl-commands.h"
+#include "iwl-config.h"
 
 struct iwl_rate_info {
 	u8 plcp;	/* uCode API:  IWL_RATE_6M_PLCP, etc. */
@@ -174,32 +175,6 @@
 	IWL_RATE_11M_IEEE = 22,
 };
 
-#define IWL_CCK_BASIC_RATES_MASK    \
-       (IWL_RATE_1M_MASK          | \
-	IWL_RATE_2M_MASK)
-
-#define IWL_CCK_RATES_MASK          \
-       (IWL_CCK_BASIC_RATES_MASK  | \
-	IWL_RATE_5M_MASK          | \
-	IWL_RATE_11M_MASK)
-
-#define IWL_OFDM_BASIC_RATES_MASK   \
-	(IWL_RATE_6M_MASK         | \
-	IWL_RATE_12M_MASK         | \
-	IWL_RATE_24M_MASK)
-
-#define IWL_OFDM_RATES_MASK         \
-       (IWL_OFDM_BASIC_RATES_MASK | \
-	IWL_RATE_9M_MASK          | \
-	IWL_RATE_18M_MASK         | \
-	IWL_RATE_36M_MASK         | \
-	IWL_RATE_48M_MASK         | \
-	IWL_RATE_54M_MASK)
-
-#define IWL_BASIC_RATES_MASK         \
-	(IWL_OFDM_BASIC_RATES_MASK | \
-	 IWL_CCK_BASIC_RATES_MASK)
-
 #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
 
 #define IWL_INVALID_VALUE    -1
@@ -306,15 +281,6 @@
 #define is_a_band(tbl) ((tbl) == LQ_A)
 #define is_g_and(tbl) ((tbl) == LQ_G)
 
-#define	ANT_NONE	0x0
-#define	ANT_A		BIT(0)
-#define	ANT_B		BIT(1)
-#define	ANT_AB		(ANT_A | ANT_B)
-#define ANT_C		BIT(2)
-#define	ANT_AC		(ANT_A | ANT_C)
-#define ANT_BC		(ANT_B | ANT_C)
-#define ANT_ABC		(ANT_AB | ANT_C)
-
 #define IWL_MAX_MCS_DISPLAY_SIZE	12
 
 struct iwl_rate_mcs_info {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index f4b84d1..403de96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -34,95 +34,91 @@
 #include <asm/unaligned.h>
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-agn-calib.h"
 #include "iwl-agn.h"
-#include "iwl-shared.h"
+#include "iwl-modparams.h"
 
-const char *get_cmd_string(u8 cmd)
-{
-	switch (cmd) {
-		IWL_CMD(REPLY_ALIVE);
-		IWL_CMD(REPLY_ERROR);
-		IWL_CMD(REPLY_ECHO);
-		IWL_CMD(REPLY_RXON);
-		IWL_CMD(REPLY_RXON_ASSOC);
-		IWL_CMD(REPLY_QOS_PARAM);
-		IWL_CMD(REPLY_RXON_TIMING);
-		IWL_CMD(REPLY_ADD_STA);
-		IWL_CMD(REPLY_REMOVE_STA);
-		IWL_CMD(REPLY_REMOVE_ALL_STA);
-		IWL_CMD(REPLY_TXFIFO_FLUSH);
-		IWL_CMD(REPLY_WEPKEY);
-		IWL_CMD(REPLY_TX);
-		IWL_CMD(REPLY_LEDS_CMD);
-		IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
-		IWL_CMD(COEX_PRIORITY_TABLE_CMD);
-		IWL_CMD(COEX_MEDIUM_NOTIFICATION);
-		IWL_CMD(COEX_EVENT_CMD);
-		IWL_CMD(REPLY_QUIET_CMD);
-		IWL_CMD(REPLY_CHANNEL_SWITCH);
-		IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
-		IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
-		IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
-		IWL_CMD(POWER_TABLE_CMD);
-		IWL_CMD(PM_SLEEP_NOTIFICATION);
-		IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
-		IWL_CMD(REPLY_SCAN_CMD);
-		IWL_CMD(REPLY_SCAN_ABORT_CMD);
-		IWL_CMD(SCAN_START_NOTIFICATION);
-		IWL_CMD(SCAN_RESULTS_NOTIFICATION);
-		IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
-		IWL_CMD(BEACON_NOTIFICATION);
-		IWL_CMD(REPLY_TX_BEACON);
-		IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
-		IWL_CMD(QUIET_NOTIFICATION);
-		IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
-		IWL_CMD(MEASURE_ABORT_NOTIFICATION);
-		IWL_CMD(REPLY_BT_CONFIG);
-		IWL_CMD(REPLY_STATISTICS_CMD);
-		IWL_CMD(STATISTICS_NOTIFICATION);
-		IWL_CMD(REPLY_CARD_STATE_CMD);
-		IWL_CMD(CARD_STATE_NOTIFICATION);
-		IWL_CMD(MISSED_BEACONS_NOTIFICATION);
-		IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
-		IWL_CMD(SENSITIVITY_CMD);
-		IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
-		IWL_CMD(REPLY_RX_PHY_CMD);
-		IWL_CMD(REPLY_RX_MPDU_CMD);
-		IWL_CMD(REPLY_RX);
-		IWL_CMD(REPLY_COMPRESSED_BA);
-		IWL_CMD(CALIBRATION_CFG_CMD);
-		IWL_CMD(CALIBRATION_RES_NOTIFICATION);
-		IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
-		IWL_CMD(REPLY_TX_POWER_DBM_CMD);
-		IWL_CMD(TEMPERATURE_NOTIFICATION);
-		IWL_CMD(TX_ANT_CONFIGURATION_CMD);
-		IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
-		IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
-		IWL_CMD(REPLY_BT_COEX_PROT_ENV);
-		IWL_CMD(REPLY_WIPAN_PARAMS);
-		IWL_CMD(REPLY_WIPAN_RXON);
-		IWL_CMD(REPLY_WIPAN_RXON_TIMING);
-		IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
-		IWL_CMD(REPLY_WIPAN_QOS_PARAM);
-		IWL_CMD(REPLY_WIPAN_WEPKEY);
-		IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
-		IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
-		IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
-		IWL_CMD(REPLY_WOWLAN_PATTERNS);
-		IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
-		IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
-		IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
-		IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
-		IWL_CMD(REPLY_WOWLAN_GET_STATUS);
-		IWL_CMD(REPLY_D3_CONFIG);
-	default:
-		return "UNKNOWN";
+#define IWL_CMD_ENTRY(x) [x] = #x
 
-	}
-}
+const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
+	IWL_CMD_ENTRY(REPLY_ALIVE),
+	IWL_CMD_ENTRY(REPLY_ERROR),
+	IWL_CMD_ENTRY(REPLY_ECHO),
+	IWL_CMD_ENTRY(REPLY_RXON),
+	IWL_CMD_ENTRY(REPLY_RXON_ASSOC),
+	IWL_CMD_ENTRY(REPLY_QOS_PARAM),
+	IWL_CMD_ENTRY(REPLY_RXON_TIMING),
+	IWL_CMD_ENTRY(REPLY_ADD_STA),
+	IWL_CMD_ENTRY(REPLY_REMOVE_STA),
+	IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA),
+	IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH),
+	IWL_CMD_ENTRY(REPLY_WEPKEY),
+	IWL_CMD_ENTRY(REPLY_TX),
+	IWL_CMD_ENTRY(REPLY_LEDS_CMD),
+	IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD),
+	IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD),
+	IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION),
+	IWL_CMD_ENTRY(COEX_EVENT_CMD),
+	IWL_CMD_ENTRY(REPLY_QUIET_CMD),
+	IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH),
+	IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION),
+	IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD),
+	IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION),
+	IWL_CMD_ENTRY(POWER_TABLE_CMD),
+	IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION),
+	IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC),
+	IWL_CMD_ENTRY(REPLY_SCAN_CMD),
+	IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD),
+	IWL_CMD_ENTRY(SCAN_START_NOTIFICATION),
+	IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION),
+	IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION),
+	IWL_CMD_ENTRY(BEACON_NOTIFICATION),
+	IWL_CMD_ENTRY(REPLY_TX_BEACON),
+	IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION),
+	IWL_CMD_ENTRY(QUIET_NOTIFICATION),
+	IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD),
+	IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION),
+	IWL_CMD_ENTRY(REPLY_BT_CONFIG),
+	IWL_CMD_ENTRY(REPLY_STATISTICS_CMD),
+	IWL_CMD_ENTRY(STATISTICS_NOTIFICATION),
+	IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD),
+	IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION),
+	IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION),
+	IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD),
+	IWL_CMD_ENTRY(SENSITIVITY_CMD),
+	IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD),
+	IWL_CMD_ENTRY(REPLY_RX_PHY_CMD),
+	IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD),
+	IWL_CMD_ENTRY(REPLY_RX),
+	IWL_CMD_ENTRY(REPLY_COMPRESSED_BA),
+	IWL_CMD_ENTRY(CALIBRATION_CFG_CMD),
+	IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION),
+	IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION),
+	IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD),
+	IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION),
+	IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD),
+	IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF),
+	IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE),
+	IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV),
+	IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS),
+	IWL_CMD_ENTRY(REPLY_WIPAN_RXON),
+	IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING),
+	IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC),
+	IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM),
+	IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY),
+	IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH),
+	IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION),
+	IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE),
+	IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS),
+	IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER),
+	IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS),
+	IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS),
+	IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL),
+	IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS),
+	IWL_CMD_ENTRY(REPLY_D3_CONFIG),
+};
+#undef IWL_CMD_ENTRY
 
 /******************************************************************************
  *
@@ -137,10 +133,9 @@
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_error_resp *err_resp = (void *)pkt->data;
 
-	IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
+	IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) "
 		"seq 0x%04X ser 0x%08X\n",
 		le32_to_cpu(err_resp->error_type),
-		get_cmd_string(err_resp->cmd_id),
 		err_resp->cmd_id,
 		le16_to_cpu(err_resp->bad_cmd_seq_num),
 		le32_to_cpu(err_resp->error_info));
@@ -216,8 +211,7 @@
 	u32 __maybe_unused len =
 		le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 	IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
-			"notification for %s:\n", len,
-			get_cmd_string(pkt->hdr.cmd));
+			"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
 	iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
 	return 0;
 }
@@ -246,69 +240,6 @@
 	return 0;
 }
 
-/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
-#define ACK_CNT_RATIO (50)
-#define BA_TIMEOUT_CNT (5)
-#define BA_TIMEOUT_MAX (16)
-
-/**
- * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
- *
- * When the ACK count ratio is low and aggregated BA timeout retries exceeding
- * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
- * operation state.
- */
-static bool iwlagn_good_ack_health(struct iwl_priv *priv,
-				struct statistics_tx *cur)
-{
-	int actual_delta, expected_delta, ba_timeout_delta;
-	struct statistics_tx *old;
-
-	if (priv->agg_tids_count)
-		return true;
-
-	lockdep_assert_held(&priv->statistics.lock);
-
-	old = &priv->statistics.tx;
-
-	actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
-		       le32_to_cpu(old->actual_ack_cnt);
-	expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
-			 le32_to_cpu(old->expected_ack_cnt);
-
-	/* Values should not be negative, but we do not trust the firmware */
-	if (actual_delta <= 0 || expected_delta <= 0)
-		return true;
-
-	ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
-			   le32_to_cpu(old->agg.ba_timeout);
-
-	if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
-	    ba_timeout_delta > BA_TIMEOUT_CNT) {
-		IWL_DEBUG_RADIO(priv,
-			"deltas: actual %d expected %d ba_timeout %d\n",
-			actual_delta, expected_delta, ba_timeout_delta);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-		/*
-		 * This is ifdef'ed on DEBUGFS because otherwise the
-		 * statistics aren't available. If DEBUGFS is set but
-		 * DEBUG is not, these will just compile out.
-		 */
-		IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
-				priv->delta_stats.tx.rx_detected_cnt);
-		IWL_DEBUG_RADIO(priv,
-				"ack_or_ba_timeout_collision delta %d\n",
-				priv->delta_stats.tx.ack_or_ba_timeout_collision);
-#endif
-
-		if (ba_timeout_delta >= BA_TIMEOUT_MAX)
-			return false;
-	}
-
-	return true;
-}
-
 /**
  * iwl_good_plcp_health - checks for plcp error.
  *
@@ -347,6 +278,45 @@
 	return true;
 }
 
+int iwl_force_rf_reset(struct iwl_priv *priv, bool external)
+{
+	struct iwl_rf_reset *rf_reset;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return -EAGAIN;
+
+	if (!iwl_is_any_associated(priv)) {
+		IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+		return -ENOLINK;
+	}
+
+	rf_reset = &priv->rf_reset;
+	rf_reset->reset_request_count++;
+	if (!external && rf_reset->last_reset_jiffies &&
+	    time_after(rf_reset->last_reset_jiffies +
+		       IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) {
+		IWL_DEBUG_INFO(priv, "RF reset rejected\n");
+		rf_reset->reset_reject_count++;
+		return -EAGAIN;
+	}
+	rf_reset->reset_success_count++;
+	rf_reset->last_reset_jiffies = jiffies;
+
+	/*
+	 * There is no easy and better way to force reset the radio,
+	 * the only known method is switching channel which will force to
+	 * reset and tune the radio.
+	 * Use internal short scan (single channel) operation to should
+	 * achieve this objective.
+	 * Driver should reset the radio when number of consecutive missed
+	 * beacon, or any other uCode error condition detected.
+	 */
+	IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+	iwl_internal_short_hw_scan(priv);
+	return 0;
+}
+
+
 static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
 				struct statistics_rx_phy *cur_ofdm,
 				struct statistics_rx_ht_phy *cur_ofdm_ht,
@@ -368,15 +338,9 @@
 	if (msecs < 99)
 		return;
 
-	if (iwlagn_mod_params.ack_check && !iwlagn_good_ack_health(priv, tx)) {
-		IWL_ERR(priv, "low ack count detected, restart firmware\n");
-		if (!iwl_force_reset(priv, IWL_FW_RESET, false))
-			return;
-	}
-
-	if (iwlagn_mod_params.plcp_check &&
+	if (iwlwifi_mod_params.plcp_check &&
 	    !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
-		iwl_force_reset(priv, IWL_RF_RESET, false);
+		iwl_force_rf_reset(priv, false);
 }
 
 /* Calculate noise level, based on measurements during network silence just
@@ -589,8 +553,8 @@
 		iwlagn_rx_calc_noise(priv);
 		queue_work(priv->workqueue, &priv->run_time_calib_work);
 	}
-	if (cfg(priv)->lib->temperature && change)
-		cfg(priv)->lib->temperature(priv);
+	if (priv->lib->temperature && change)
+		priv->lib->temperature(priv);
 
 	spin_unlock(&priv->statistics.lock);
 
@@ -639,16 +603,16 @@
 	if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
 		     CT_CARD_DISABLED)) {
 
-		iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
+		iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
 			    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
 
-		iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C,
+		iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
 					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
 
 		if (!(flags & RXON_CARD_DISABLED)) {
-			iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
+			iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
 				    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-			iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C,
+			iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
 					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
 		}
 		if (flags & CT_CARD_DISABLED)
@@ -671,7 +635,7 @@
 		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
 			test_bit(STATUS_RF_KILL_HW, &priv->status));
 	else
-		wake_up(&trans(priv)->wait_command_queue);
+		wake_up(&priv->trans->wait_command_queue);
 	return 0;
 }
 
@@ -773,8 +737,7 @@
 	struct sk_buff *skb;
 	__le16 fc = hdr->frame_control;
 	struct iwl_rxon_context *ctx;
-	struct page *p;
-	int offset;
+	unsigned int hdrlen, fraglen;
 
 	/* We only process data packets if the interface is open */
 	if (unlikely(!priv->is_open)) {
@@ -784,21 +747,34 @@
 	}
 
 	/* In case of HW accelerated crypto and bad decryption, drop */
-	if (!iwlagn_mod_params.sw_crypto &&
+	if (!iwlwifi_mod_params.sw_crypto &&
 	    iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
 		return;
 
-	skb = dev_alloc_skb(128);
+	/* Dont use dev_alloc_skb(), we'll have enough headroom once
+	 * ieee80211_hdr pulled.
+	 */
+	skb = alloc_skb(128, GFP_ATOMIC);
 	if (!skb) {
-		IWL_ERR(priv, "dev_alloc_skb failed\n");
+		IWL_ERR(priv, "alloc_skb failed\n");
 		return;
 	}
+	/* If frame is small enough to fit in skb->head, pull it completely.
+	 * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
+	 * are more efficient.
+	 */
+	hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
 
-	offset = (void *)hdr - rxb_addr(rxb);
-	p = rxb_steal_page(rxb);
-	skb_add_rx_frag(skb, 0, p, offset, len, len);
+	memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
+	fraglen = len - hdrlen;
 
-	iwl_update_stats(priv, false, fc, len);
+	if (fraglen) {
+		int offset = (void *)hdr + hdrlen -
+			     rxb_addr(rxb) + rxb_offset(rxb);
+
+		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+				fraglen, rxb->truesize);
+	}
 
 	/*
 	* Wake any queues that were stopped due to a passive channel tx
@@ -809,8 +785,8 @@
 	*/
 	if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) {
 		for_each_context(priv, ctx) {
-			if (compare_ether_addr(hdr->addr3,
-					       ctx->active.bssid_addr))
+			if (!ether_addr_equal(hdr->addr3,
+					      ctx->active.bssid_addr))
 				continue;
 			iwlagn_lift_passive_no_rx(priv);
 		}
@@ -970,7 +946,7 @@
 	}
 
 	if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
-		IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
+		IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
 				phy_res->cfg_phy_cnt);
 		return 0;
 	}
@@ -1005,7 +981,6 @@
 	/* Find max signal strength (dBm) among 3 antenna/receiver chains */
 	rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
 
-	iwl_dbg_log_rx_data_frame(priv, len, header);
 	IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
 		rx_status.signal, (unsigned long long)rx_status.mactime);
 
@@ -1134,16 +1109,13 @@
 	handlers[REPLY_COMPRESSED_BA]		=
 		iwlagn_rx_reply_compressed_ba;
 
-	/* init calibration handlers */
-	priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
-					iwlagn_rx_calib_result;
 	priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
 
 	/* set up notification wait support */
 	iwl_notification_wait_init(&priv->notif_wait);
 
 	/* Set up BT Rx handlers */
-	if (cfg(priv)->bt_params)
+	if (priv->cfg->bt_params)
 		iwlagn_bt_rx_handler_setup(priv);
 }
 
@@ -1185,9 +1157,9 @@
 			err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
 		} else {
 			/* No handling needed */
-			IWL_DEBUG_RX(priv,
-				"No handler needed for %s, 0x%02x\n",
-				get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+			IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
+				     iwl_dvm_get_cmd_string(pkt->hdr.cmd),
+				     pkt->hdr.cmd);
 		}
 	}
 	return err;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 2e1a317..74fbee6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -24,12 +24,79 @@
  *
  *****************************************************************************/
 
+#include <linux/etherdevice.h>
 #include "iwl-dev.h"
 #include "iwl-agn.h"
-#include "iwl-core.h"
 #include "iwl-agn-calib.h"
 #include "iwl-trans.h"
-#include "iwl-shared.h"
+#include "iwl-modparams.h"
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+				   struct iwl_rxon_context *ctx)
+{
+	const struct iwl_channel_info *ch_info;
+
+	memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+	if (!ctx->vif) {
+		ctx->staging.dev_type = ctx->unused_devtype;
+	} else
+	switch (ctx->vif->type) {
+	case NL80211_IFTYPE_AP:
+		ctx->staging.dev_type = ctx->ap_devtype;
+		break;
+
+	case NL80211_IFTYPE_STATION:
+		ctx->staging.dev_type = ctx->station_devtype;
+		ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+		break;
+
+	case NL80211_IFTYPE_ADHOC:
+		ctx->staging.dev_type = ctx->ibss_devtype;
+		ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+		ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
+						  RXON_FILTER_ACCEPT_GRP_MSK;
+		break;
+
+	default:
+		IWL_ERR(priv, "Unsupported interface type %d\n",
+			ctx->vif->type);
+		break;
+	}
+
+#if 0
+	/* TODO:  Figure out when short_preamble would be set and cache from
+	 * that */
+	if (!hw_to_local(priv->hw)->short_preamble)
+		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+	ch_info = iwl_get_channel_info(priv, priv->band,
+				       le16_to_cpu(ctx->active.channel));
+
+	if (!ch_info)
+		ch_info = &priv->channel_info[0];
+
+	ctx->staging.channel = cpu_to_le16(ch_info->channel);
+	priv->band = ch_info->band;
+
+	iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
+
+	/* clear both MIX and PURE40 mode flag */
+	ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
+					RXON_FLG_CHANNEL_MODE_PURE_40);
+	if (ctx->vif)
+		memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+	ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+	ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+	ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
+}
 
 static int iwlagn_disable_bss(struct iwl_priv *priv,
 			      struct iwl_rxon_context *ctx,
@@ -59,9 +126,12 @@
 	__le32 old_filter = send->filter_flags;
 	u8 old_dev_type = send->dev_type;
 	int ret;
+	static const u8 deactivate_cmd[] = {
+		REPLY_WIPAN_DEACTIVATION_COMPLETE
+	};
 
 	iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
-				   REPLY_WIPAN_DEACTIVATION_COMPLETE,
+				   deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
 				   NULL, NULL);
 
 	send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -101,8 +171,7 @@
 	return ret;
 }
 
-static void iwlagn_update_qos(struct iwl_priv *priv,
-			      struct iwl_rxon_context *ctx)
+void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
 	int ret;
 
@@ -129,8 +198,8 @@
 		IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
 }
 
-static int iwlagn_update_beacon(struct iwl_priv *priv,
-				struct ieee80211_vif *vif)
+int iwlagn_update_beacon(struct iwl_priv *priv,
+			 struct ieee80211_vif *vif)
 {
 	lockdep_assert_held(&priv->mutex);
 
@@ -186,6 +255,109 @@
 	return ret;
 }
 
+static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+	u16 new_val;
+	u16 beacon_factor;
+
+	/*
+	 * If mac80211 hasn't given us a beacon interval, program
+	 * the default into the device (not checking this here
+	 * would cause the adjustment below to return the maximum
+	 * value, which may break PAN.)
+	 */
+	if (!beacon_val)
+		return DEFAULT_BEACON_INTERVAL;
+
+	/*
+	 * If the beacon interval we obtained from the peer
+	 * is too large, we'll have to wake up more often
+	 * (and in IBSS case, we'll beacon too much)
+	 *
+	 * For example, if max_beacon_val is 4096, and the
+	 * requested beacon interval is 7000, we'll have to
+	 * use 3500 to be able to wake up on the beacons.
+	 *
+	 * This could badly influence beacon detection stats.
+	 */
+
+	beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+	new_val = beacon_val / beacon_factor;
+
+	if (!new_val)
+		new_val = max_beacon_val;
+
+	return new_val;
+}
+
+static int iwl_send_rxon_timing(struct iwl_priv *priv,
+				struct iwl_rxon_context *ctx)
+{
+	u64 tsf;
+	s32 interval_tm, rem;
+	struct ieee80211_conf *conf = NULL;
+	u16 beacon_int;
+	struct ieee80211_vif *vif = ctx->vif;
+
+	conf = &priv->hw->conf;
+
+	lockdep_assert_held(&priv->mutex);
+
+	memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
+
+	ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
+	ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+	beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+	/*
+	 * TODO: For IBSS we need to get atim_window from mac80211,
+	 *	 for now just always use 0
+	 */
+	ctx->timing.atim_window = 0;
+
+	if (ctx->ctxid == IWL_RXON_CTX_PAN &&
+	    (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
+	    iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
+	    priv->contexts[IWL_RXON_CTX_BSS].vif &&
+	    priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
+		ctx->timing.beacon_interval =
+			priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
+		beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
+	} else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
+		   iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
+		   priv->contexts[IWL_RXON_CTX_PAN].vif &&
+		   priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
+		   (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
+		    !ctx->vif->bss_conf.beacon_int)) {
+		ctx->timing.beacon_interval =
+			priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
+		beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
+	} else {
+		beacon_int = iwl_adjust_beacon_interval(beacon_int,
+			IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
+		ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+	}
+
+	ctx->beacon_int = beacon_int;
+
+	tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
+	interval_tm = beacon_int * TIME_UNIT;
+	rem = do_div(tsf, interval_tm);
+	ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+	ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
+
+	IWL_DEBUG_ASSOC(priv,
+			"beacon interval %d beacon timer %d beacon tim %d\n",
+			le16_to_cpu(ctx->timing.beacon_interval),
+			le32_to_cpu(ctx->timing.beacon_init_val),
+			le16_to_cpu(ctx->timing.atim_window));
+
+	return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
+				CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
+}
+
 static int iwlagn_rxon_disconn(struct iwl_priv *priv,
 			       struct iwl_rxon_context *ctx)
 {
@@ -228,6 +400,64 @@
 	return 0;
 }
 
+static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
+{
+	int ret;
+	s8 prev_tx_power;
+	bool defer;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+	if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
+		return 0;
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (priv->tx_power_user_lmt == tx_power && !force)
+		return 0;
+
+	if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
+		IWL_WARN(priv,
+			 "Requested user TXPOWER %d below lower limit %d.\n",
+			 tx_power,
+			 IWLAGN_TX_POWER_TARGET_POWER_MIN);
+		return -EINVAL;
+	}
+
+	if (tx_power > priv->tx_power_device_lmt) {
+		IWL_WARN(priv,
+			"Requested user TXPOWER %d above upper limit %d.\n",
+			 tx_power, priv->tx_power_device_lmt);
+		return -EINVAL;
+	}
+
+	if (!iwl_is_ready_rf(priv))
+		return -EIO;
+
+	/* scan complete and commit_rxon use tx_power_next value,
+	 * it always need to be updated for newest request */
+	priv->tx_power_next = tx_power;
+
+	/* do not set tx power when scanning or channel changing */
+	defer = test_bit(STATUS_SCANNING, &priv->status) ||
+		memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+	if (defer && !force) {
+		IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
+		return 0;
+	}
+
+	prev_tx_power = priv->tx_power_user_lmt;
+	priv->tx_power_user_lmt = tx_power;
+
+	ret = iwlagn_send_tx_power(priv);
+
+	/* if fail to set tx_power, restore the orig. tx power */
+	if (ret) {
+		priv->tx_power_user_lmt = prev_tx_power;
+		priv->tx_power_next = prev_tx_power;
+	}
+	return ret;
+}
+
 static int iwlagn_rxon_connect(struct iwl_priv *priv,
 			       struct iwl_rxon_context *ctx)
 {
@@ -295,9 +525,9 @@
 	}
 
 	if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
-	    cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode)
+	    priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
 		ieee80211_request_smps(ctx->vif,
-				       cfg(priv)->ht_params->smps_mode);
+				       priv->cfg->ht_params->smps_mode);
 
 	return 0;
 }
@@ -309,7 +539,7 @@
 	int slot0 = 300, slot1 = 0;
 	int ret;
 
-	if (priv->shrd->valid_contexts == BIT(IWL_RXON_CTX_BSS))
+	if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
 		return 0;
 
 	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
@@ -394,6 +624,414 @@
 	return ret;
 }
 
+static void _iwl_set_rxon_ht(struct iwl_priv *priv,
+			     struct iwl_ht_config *ht_conf,
+			     struct iwl_rxon_context *ctx)
+{
+	struct iwl_rxon_cmd *rxon = &ctx->staging;
+
+	if (!ctx->ht.enabled) {
+		rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+			RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+			RXON_FLG_HT40_PROT_MSK |
+			RXON_FLG_HT_PROT_MSK);
+		return;
+	}
+
+	/* FIXME: if the definition of ht.protection changed, the "translation"
+	 * will be needed for rxon->flags
+	 */
+	rxon->flags |= cpu_to_le32(ctx->ht.protection <<
+				   RXON_FLG_HT_OPERATING_MODE_POS);
+
+	/* Set up channel bandwidth:
+	 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+	/* clear the HT channel mode before set the mode */
+	rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+			 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+	if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
+		/* pure ht40 */
+		if (ctx->ht.protection ==
+		    IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+			rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+			/*
+			 * Note: control channel is opposite of extension
+			 * channel
+			 */
+			switch (ctx->ht.extension_chan_offset) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				rxon->flags &=
+					~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				rxon->flags |=
+					RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				break;
+			}
+		} else {
+			/*
+			 * Note: control channel is opposite of extension
+			 * channel
+			 */
+			switch (ctx->ht.extension_chan_offset) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				rxon->flags &=
+					~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+			default:
+				/*
+				 * channel location only valid if in Mixed
+				 * mode
+				 */
+				IWL_ERR(priv,
+					"invalid extension channel offset\n");
+				break;
+			}
+		}
+	} else {
+		rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+	}
+
+	iwlagn_set_rxon_chain(priv, ctx);
+
+	IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
+			"extension channel offset 0x%x\n",
+			le32_to_cpu(rxon->flags), ctx->ht.protection,
+			ctx->ht.extension_chan_offset);
+}
+
+void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
+{
+	struct iwl_rxon_context *ctx;
+
+	for_each_context(priv, ctx)
+		_iwl_set_rxon_ht(priv, ht_conf, ctx);
+}
+
+/**
+ * iwl_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+			 struct iwl_rxon_context *ctx)
+{
+	enum ieee80211_band band = ch->band;
+	u16 channel = ch->hw_value;
+
+	if ((le16_to_cpu(ctx->staging.channel) == channel) &&
+	    (priv->band == band))
+		return;
+
+	ctx->staging.channel = cpu_to_le16(channel);
+	if (band == IEEE80211_BAND_5GHZ)
+		ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+	else
+		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+	priv->band = band;
+
+	IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
+
+}
+
+void iwl_set_flags_for_band(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    enum ieee80211_band band,
+			    struct ieee80211_vif *vif)
+{
+	if (band == IEEE80211_BAND_5GHZ) {
+		ctx->staging.flags &=
+		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
+		      | RXON_FLG_CCK_MSK);
+		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+	} else {
+		/* Copied from iwl_post_associate() */
+		if (vif && vif->bss_conf.use_short_slot)
+			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+		ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+		ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+	}
+}
+
+static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
+				  struct iwl_rxon_context *ctx, int hw_decrypt)
+{
+	struct iwl_rxon_cmd *rxon = &ctx->staging;
+
+	if (hw_decrypt)
+		rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+	else
+		rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+
+/* validate RXON structure is valid */
+static int iwl_check_rxon_cmd(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx)
+{
+	struct iwl_rxon_cmd *rxon = &ctx->staging;
+	u32 errors = 0;
+
+	if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+		if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+			IWL_WARN(priv, "check 2.4G: wrong narrow\n");
+			errors |= BIT(0);
+		}
+		if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+			IWL_WARN(priv, "check 2.4G: wrong radar\n");
+			errors |= BIT(1);
+		}
+	} else {
+		if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+			IWL_WARN(priv, "check 5.2G: not short slot!\n");
+			errors |= BIT(2);
+		}
+		if (rxon->flags & RXON_FLG_CCK_MSK) {
+			IWL_WARN(priv, "check 5.2G: CCK!\n");
+			errors |= BIT(3);
+		}
+	}
+	if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+		IWL_WARN(priv, "mac/bssid mcast!\n");
+		errors |= BIT(4);
+	}
+
+	/* make sure basic rates 6Mbps and 1Mbps are supported */
+	if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
+	    (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
+		IWL_WARN(priv, "neither 1 nor 6 are basic\n");
+		errors |= BIT(5);
+	}
+
+	if (le16_to_cpu(rxon->assoc_id) > 2007) {
+		IWL_WARN(priv, "aid > 2007\n");
+		errors |= BIT(6);
+	}
+
+	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
+			== (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+		IWL_WARN(priv, "CCK and short slot\n");
+		errors |= BIT(7);
+	}
+
+	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
+			== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+		IWL_WARN(priv, "CCK and auto detect");
+		errors |= BIT(8);
+	}
+
+	if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
+			    RXON_FLG_TGG_PROTECT_MSK)) ==
+			    RXON_FLG_TGG_PROTECT_MSK) {
+		IWL_WARN(priv, "TGg but no auto-detect\n");
+		errors |= BIT(9);
+	}
+
+	if (rxon->channel == 0) {
+		IWL_WARN(priv, "zero channel is invalid\n");
+		errors |= BIT(10);
+	}
+
+	WARN(errors, "Invalid RXON (%#x), channel %d",
+	     errors, le16_to_cpu(rxon->channel));
+
+	return errors ? -EINVAL : 0;
+}
+
+/**
+ * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @priv: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int iwl_full_rxon_required(struct iwl_priv *priv,
+			   struct iwl_rxon_context *ctx)
+{
+	const struct iwl_rxon_cmd *staging = &ctx->staging;
+	const struct iwl_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond)							\
+	if ((cond)) {							\
+		IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");	\
+		return 1;						\
+	}
+
+#define CHK_NEQ(c1, c2)						\
+	if ((c1) != (c2)) {					\
+		IWL_DEBUG_INFO(priv, "need full RXON - "	\
+			       #c1 " != " #c2 " - %d != %d\n",	\
+			       (c1), (c2));			\
+		return 1;					\
+	}
+
+	/* These items are only settable from the full RXON command */
+	CHK(!iwl_is_associated_ctx(ctx));
+	CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
+	CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
+	CHK(!ether_addr_equal(staging->wlap_bssid_addr,
+			      active->wlap_bssid_addr));
+	CHK_NEQ(staging->dev_type, active->dev_type);
+	CHK_NEQ(staging->channel, active->channel);
+	CHK_NEQ(staging->air_propagation, active->air_propagation);
+	CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+		active->ofdm_ht_single_stream_basic_rates);
+	CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+		active->ofdm_ht_dual_stream_basic_rates);
+	CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
+		active->ofdm_ht_triple_stream_basic_rates);
+	CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+	/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+	 * be updated with the RXON_ASSOC command -- however only some
+	 * flag transitions are allowed using RXON_ASSOC */
+
+	/* Check if we are not switching bands */
+	CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+		active->flags & RXON_FLG_BAND_24G_MSK);
+
+	/* Check if we are switching association toggle */
+	CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+		active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+	return 0;
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+			     enum iwl_rxon_context_id ctxid)
+{
+	struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
+	struct iwl_rxon_cmd *rxon = &ctx->staging;
+
+	IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
+	iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+	IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
+			le16_to_cpu(rxon->channel));
+	IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
+			le32_to_cpu(rxon->flags));
+	IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
+			le32_to_cpu(rxon->filter_flags));
+	IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
+	IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
+			rxon->ofdm_basic_rates);
+	IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
+			rxon->cck_basic_rates);
+	IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
+	IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+	IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
+			le16_to_cpu(rxon->assoc_id));
+}
+#endif
+
+static void iwl_calc_basic_rates(struct iwl_priv *priv,
+				 struct iwl_rxon_context *ctx)
+{
+	int lowest_present_ofdm = 100;
+	int lowest_present_cck = 100;
+	u8 cck = 0;
+	u8 ofdm = 0;
+
+	if (ctx->vif) {
+		struct ieee80211_supported_band *sband;
+		unsigned long basic = ctx->vif->bss_conf.basic_rates;
+		int i;
+
+		sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band];
+
+		for_each_set_bit(i, &basic, BITS_PER_LONG) {
+			int hw = sband->bitrates[i].hw_value;
+			if (hw >= IWL_FIRST_OFDM_RATE) {
+				ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+				if (lowest_present_ofdm > hw)
+					lowest_present_ofdm = hw;
+			} else {
+				BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+				cck |= BIT(hw);
+				if (lowest_present_cck > hw)
+					lowest_present_cck = hw;
+			}
+		}
+	}
+
+	/*
+	 * Now we've got the basic rates as bitmaps in the ofdm and cck
+	 * variables. This isn't sufficient though, as there might not
+	 * be all the right rates in the bitmap. E.g. if the only basic
+	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+	 *
+	 *    [...] a STA responding to a received frame shall transmit
+	 *    its Control Response frame [...] at the highest rate in the
+	 *    BSSBasicRateSet parameter that is less than or equal to the
+	 *    rate of the immediately previous frame in the frame exchange
+	 *    sequence ([...]) and that is of the same modulation class
+	 *    ([...]) as the received frame. If no rate contained in the
+	 *    BSSBasicRateSet parameter meets these conditions, then the
+	 *    control frame sent in response to a received frame shall be
+	 *    transmitted at the highest mandatory rate of the PHY that is
+	 *    less than or equal to the rate of the received frame, and
+	 *    that is of the same modulation class as the received frame.
+	 *
+	 * As a consequence, we need to add all mandatory rates that are
+	 * lower than all of the basic rates to these bitmaps.
+	 */
+
+	if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
+	if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
+	/* 6M already there or needed so always add */
+	ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
+
+	/*
+	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+	 * Note, however:
+	 *  - if no CCK rates are basic, it must be ERP since there must
+	 *    be some basic rates at all, so they're OFDM => ERP PHY
+	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
+	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+	 *  - if 5.5M is basic, 1M and 2M are mandatory
+	 *  - if 2M is basic, 1M is mandatory
+	 *  - if 1M is basic, that's the only valid ACK rate.
+	 * As a consequence, it's not as complicated as it sounds, just add
+	 * any lower rates to the ACK rate bitmap.
+	 */
+	if (IWL_RATE_11M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
+	if (IWL_RATE_5M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
+	if (IWL_RATE_2M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
+	/* 1M already there or needed so always add */
+	cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
+
+	IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
+		       cck, ofdm);
+
+	/* "basic_rates" is a misnomer here -- should be called ACK rates */
+	ctx->staging.cck_basic_rates = cck;
+	ctx->staging.ofdm_basic_rates = ofdm;
+}
+
 /**
  * iwlagn_commit_rxon - commit staging_rxon to hardware
  *
@@ -433,11 +1071,14 @@
 	/* always get timestamp with Rx frame */
 	ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
 
+	/* recalculate basic rates */
+	iwl_calc_basic_rates(priv, ctx);
+
 	/*
 	 * force CTS-to-self frames protection if RTS-CTS is not preferred
 	 * one aggregation protection method
 	 */
-	if (!hw_params(priv).use_rts_for_aggregation)
+	if (!priv->hw_params.use_rts_for_aggregation)
 		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
 
 	if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -489,7 +1130,7 @@
 		return 0;
 	}
 
-	iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto);
+	iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto);
 
 	IWL_DEBUG_INFO(priv,
 		       "Going to commit RXON\n"
@@ -547,7 +1188,7 @@
 	const struct iwl_channel_info *ch_info;
 	int ret = 0;
 
-	IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed);
+	IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
 
 	mutex_lock(&priv->mutex);
 
@@ -621,13 +1262,6 @@
 		}
 
 		iwl_update_bcast_stations(priv);
-
-		/*
-		 * The list of supported rates and rate mask can be different
-		 * for each band; since the band may have changed, reset
-		 * the rate mask to what mac80211 lists.
-		 */
-		iwl_set_rate(priv);
 	}
 
 	if (changed & (IEEE80211_CONF_CHANGE_PS |
@@ -656,9 +1290,9 @@
 	return ret;
 }
 
-static void iwlagn_check_needed_chains(struct iwl_priv *priv,
-				       struct iwl_rxon_context *ctx,
-				       struct ieee80211_bss_conf *bss_conf)
+void iwlagn_check_needed_chains(struct iwl_priv *priv,
+				struct iwl_rxon_context *ctx,
+				struct ieee80211_bss_conf *bss_conf)
 {
 	struct ieee80211_vif *vif = ctx->vif;
 	struct iwl_rxon_context *tmp;
@@ -750,11 +1384,14 @@
 	ht_conf->single_chain_sufficient = !need_multiple;
 }
 
-static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+void iwlagn_chain_noise_reset(struct iwl_priv *priv)
 {
 	struct iwl_chain_noise_data *data = &priv->chain_noise_data;
 	int ret;
 
+	if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
+		return;
+
 	if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
 	    iwl_is_any_associated(priv)) {
 		struct iwl_calib_chain_noise_reset_cmd cmd;
@@ -907,8 +1544,7 @@
 			iwl_power_update_mode(priv, false);
 
 		/* Enable RX differential gain and sensitivity calibrations */
-		if (!priv->disable_chain_noise_cal)
-			iwlagn_chain_noise_reset(priv);
+		iwlagn_chain_noise_reset(priv);
 		priv->start_calib = 1;
 	}
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index c417560..b31584e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -30,10 +30,11 @@
 #include <net/mac80211.h>
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-agn.h"
 #include "iwl-trans.h"
 
+const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
 static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
 {
 	lockdep_assert_held(&priv->sta_lock);
@@ -170,6 +171,50 @@
 	return cmd.handler_status;
 }
 
+static bool iwl_is_channel_extension(struct iwl_priv *priv,
+				     enum ieee80211_band band,
+				     u16 channel, u8 extension_chan_offset)
+{
+	const struct iwl_channel_info *ch_info;
+
+	ch_info = iwl_get_channel_info(priv, band, channel);
+	if (!is_channel_valid(ch_info))
+		return false;
+
+	if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+		return !(ch_info->ht40_extension_channel &
+					IEEE80211_CHAN_NO_HT40PLUS);
+	else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+		return !(ch_info->ht40_extension_channel &
+					IEEE80211_CHAN_NO_HT40MINUS);
+
+	return false;
+}
+
+bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    struct ieee80211_sta_ht_cap *ht_cap)
+{
+	if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+		return false;
+
+	/*
+	 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+	 * the bit will not set if it is pure 40MHz case
+	 */
+	if (ht_cap && !ht_cap->ht_supported)
+		return false;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (priv->disable_ht40)
+		return false;
+#endif
+
+	return iwl_is_channel_extension(priv, priv->band,
+			le16_to_cpu(ctx->staging.channel),
+			ctx->ht.extension_chan_offset);
+}
+
 static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
 				  struct ieee80211_sta *sta,
 				  struct iwl_rxon_context *ctx,
@@ -277,8 +322,8 @@
 		sta_id = ctx->bcast_sta_id;
 	else
 		for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) {
-			if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
-						addr)) {
+			if (ether_addr_equal(priv->stations[i].sta.sta.addr,
+					     addr)) {
 				sta_id = i;
 				break;
 			}
@@ -308,7 +353,7 @@
 
 	if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
 	    (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
-	    !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
+	    ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) {
 		IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
 				"adding again.\n", sta_id, addr);
 		return sta_id;
@@ -581,6 +626,56 @@
 	spin_unlock_bh(&priv->sta_lock);
 }
 
+static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+			    u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
+{
+	int i, r;
+	u32 rate_flags = 0;
+	__le32 rate_n_flags;
+
+	lockdep_assert_held(&priv->mutex);
+
+	memset(link_cmd, 0, sizeof(*link_cmd));
+
+	/* Set up the rate scaling to start at selected rate, fall back
+	 * all the way down to 1M in IEEE order, and then spin on 1M */
+	if (priv->band == IEEE80211_BAND_5GHZ)
+		r = IWL_RATE_6M_INDEX;
+	else if (ctx && ctx->vif && ctx->vif->p2p)
+		r = IWL_RATE_6M_INDEX;
+	else
+		r = IWL_RATE_1M_INDEX;
+
+	if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+				RATE_MCS_ANT_POS;
+	rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+		link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+	link_cmd->general_params.single_stream_ant_msk =
+			first_antenna(priv->hw_params.valid_tx_ant);
+
+	link_cmd->general_params.dual_stream_ant_msk =
+		priv->hw_params.valid_tx_ant &
+		~first_antenna(priv->hw_params.valid_tx_ant);
+	if (!link_cmd->general_params.dual_stream_ant_msk) {
+		link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+	} else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+		link_cmd->general_params.dual_stream_ant_msk =
+			priv->hw_params.valid_tx_ant;
+	}
+
+	link_cmd->agg_params.agg_dis_start_th =
+		LINK_QUAL_AGG_DISABLE_START_DEF;
+	link_cmd->agg_params.agg_time_limit =
+		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+	link_cmd->sta_id = sta_id;
+}
+
 /**
  * iwl_clear_ucode_stations - clear ucode station table bits
  *
@@ -841,56 +936,6 @@
 }
 
 
-void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-		     u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
-{
-	int i, r;
-	u32 rate_flags = 0;
-	__le32 rate_n_flags;
-
-	lockdep_assert_held(&priv->mutex);
-
-	memset(link_cmd, 0, sizeof(*link_cmd));
-
-	/* Set up the rate scaling to start at selected rate, fall back
-	 * all the way down to 1M in IEEE order, and then spin on 1M */
-	if (priv->band == IEEE80211_BAND_5GHZ)
-		r = IWL_RATE_6M_INDEX;
-	else if (ctx && ctx->vif && ctx->vif->p2p)
-		r = IWL_RATE_6M_INDEX;
-	else
-		r = IWL_RATE_1M_INDEX;
-
-	if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
-		rate_flags |= RATE_MCS_CCK_MSK;
-
-	rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) <<
-				RATE_MCS_ANT_POS;
-	rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
-	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
-		link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
-
-	link_cmd->general_params.single_stream_ant_msk =
-			first_antenna(hw_params(priv).valid_tx_ant);
-
-	link_cmd->general_params.dual_stream_ant_msk =
-		hw_params(priv).valid_tx_ant &
-		~first_antenna(hw_params(priv).valid_tx_ant);
-	if (!link_cmd->general_params.dual_stream_ant_msk) {
-		link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
-	} else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
-		link_cmd->general_params.dual_stream_ant_msk =
-			hw_params(priv).valid_tx_ant;
-	}
-
-	link_cmd->agg_params.agg_dis_start_th =
-		LINK_QUAL_AGG_DISABLE_START_DEF;
-	link_cmd->agg_params.agg_time_limit =
-		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
-	link_cmd->sta_id = sta_id;
-}
-
 static struct iwl_link_quality_cmd *
 iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		 u8 sta_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index baaf5ba..a5cfe0a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -37,11 +37,11 @@
 #include "iwl-agn.h"
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-commands.h"
 #include "iwl-debug.h"
 #include "iwl-agn-tt.h"
+#include "iwl-modparams.h"
 
 /* default Thermal Throttling transaction table
  * Current state   |         Throttling Down               |  Throttling Up
@@ -179,19 +179,19 @@
 
 	if (tt->state == IWL_TI_CT_KILL) {
 		if (priv->thermal_throttle.ct_kill_toggle) {
-			iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
+			iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
 				    CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
 			priv->thermal_throttle.ct_kill_toggle = false;
 		} else {
-			iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET,
+			iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
 				    CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
 			priv->thermal_throttle.ct_kill_toggle = true;
 		}
-		iwl_read32(trans(priv), CSR_UCODE_DRV_GP1);
-		spin_lock_irqsave(&trans(priv)->reg_lock, flags);
-		if (likely(iwl_grab_nic_access(trans(priv))))
-			iwl_release_nic_access(trans(priv));
-		spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
+		iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
+		spin_lock_irqsave(&priv->trans->reg_lock, flags);
+		if (likely(iwl_grab_nic_access(priv->trans)))
+			iwl_release_nic_access(priv->trans);
+		spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
 
 		/* Reschedule the ct_kill timer to occur in
 		 * CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -632,7 +632,7 @@
 	INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
 	INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
 
-	if (cfg(priv)->base_params->adv_thermal_throttle) {
+	if (priv->cfg->base_params->adv_thermal_throttle) {
 		IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
 		tt->restriction = kcalloc(IWL_TI_STATE_MAX,
 					  sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 34adedc7..f2e9f29 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -34,12 +34,22 @@
 #include <linux/ieee80211.h>
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-agn-hw.h"
 #include "iwl-agn.h"
 #include "iwl-trans.h"
 
+static const u8 tid_to_ac[] = {
+	IEEE80211_AC_BE,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BE,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VO,
+	IEEE80211_AC_VO,
+};
+
 static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
 				     struct ieee80211_tx_info *info,
 				     __le16 fc, __le32 *tx_flags)
@@ -74,8 +84,8 @@
 	else if (ieee80211_is_back_req(fc))
 		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
 	else if (info->band == IEEE80211_BAND_2GHZ &&
-		 cfg(priv)->bt_params &&
-		 cfg(priv)->bt_params->advanced_bt_coexist &&
+		 priv->cfg->bt_params &&
+		 priv->cfg->bt_params->advanced_bt_coexist &&
 		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
 		 ieee80211_is_reassoc_req(fc) ||
 		 skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -192,15 +202,15 @@
 		rate_flags |= RATE_MCS_CCK_MSK;
 
 	/* Set up antennas */
-	 if (cfg(priv)->bt_params &&
-	     cfg(priv)->bt_params->advanced_bt_coexist &&
+	 if (priv->cfg->bt_params &&
+	     priv->cfg->bt_params->advanced_bt_coexist &&
 	     priv->bt_full_concurrent) {
 		/* operated as 1x1 in full concurrency mode */
 		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
-				first_antenna(hw_params(priv).valid_tx_ant));
+				first_antenna(priv->hw_params.valid_tx_ant));
 	} else
 		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
-						hw_params(priv).valid_tx_ant);
+						priv->hw_params.valid_tx_ant);
 	rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
 
 	/* Set the rate in the TX cmd */
@@ -293,6 +303,7 @@
 	u16 len, seq_number = 0;
 	u8 sta_id, tid = IWL_MAX_TID_COUNT;
 	bool is_agg = false;
+	int txq_id;
 
 	if (info->control.vif)
 		ctx = iwl_rxon_ctx_from_vif(info->control.vif);
@@ -384,12 +395,9 @@
 
 	/* TODO need this for burst mode later on */
 	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
-	iwl_dbg_log_tx_data_frame(priv, len, hdr);
 
 	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
 
-	iwl_update_stats(priv, true, fc, len);
-
 	memset(&info->status, 0, sizeof(info->status));
 
 	info->driver_data[0] = ctx;
@@ -435,7 +443,31 @@
 	/* Copy MAC header from skb into command buffer */
 	memcpy(tx_cmd->hdr, hdr, hdr_len);
 
-	if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid))
+	if (is_agg)
+		txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
+	else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+		/*
+		 * Send this frame after DTIM -- there's a special queue
+		 * reserved for this for contexts that support AP mode.
+		 */
+		txq_id = ctx->mcast_queue;
+
+		/*
+		 * The microcode will clear the more data
+		 * bit in the last frame it transmits.
+		 */
+		hdr->frame_control |=
+			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+	} else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+		txq_id = IWL_AUX_QUEUE;
+	else
+		txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+	WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
+	WARN_ON_ONCE(is_agg &&
+		     priv->queue_to_mac80211[txq_id] != info->hw_queue);
+
+	if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
 		goto drop_unlock_sta;
 
 	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
@@ -464,11 +496,33 @@
 	return -1;
 }
 
+static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
+{
+	int q;
+
+	for (q = IWLAGN_FIRST_AMPDU_QUEUE;
+	     q < priv->cfg->base_params->num_of_queues; q++) {
+		if (!test_and_set_bit(q, priv->agg_q_alloc)) {
+			priv->queue_to_mac80211[q] = mq;
+			return q;
+		}
+	}
+
+	return -ENOSPC;
+}
+
+static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
+{
+	clear_bit(q, priv->agg_q_alloc);
+	priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
+}
+
 int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid)
 {
 	struct iwl_tid_data *tid_data;
-	int sta_id;
+	int sta_id, txq_id;
+	enum iwl_agg_state agg_state;
 
 	sta_id = iwl_sta_id(sta);
 
@@ -480,6 +534,7 @@
 	spin_lock_bh(&priv->sta_lock);
 
 	tid_data = &priv->tid_data[sta_id][tid];
+	txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
 
 	switch (priv->tid_data[sta_id][tid].agg.state) {
 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -491,6 +546,13 @@
 		*/
 		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
 		goto turn_off;
+	case IWL_AGG_STARTING:
+		/*
+		 * This can happen when the session is stopped before
+		 * we receive ADDBA response
+		 */
+		IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
+		goto turn_off;
 	case IWL_AGG_ON:
 		break;
 	default:
@@ -504,9 +566,13 @@
 	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
 
 	/* There are still packets for this RA / TID in the HW */
-	if (tid_data->agg.ssn != tid_data->next_reclaimed) {
+	if (!test_bit(txq_id, priv->agg_q_alloc)) {
+		IWL_DEBUG_TX_QUEUES(priv,
+			"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
+			sta_id, tid, txq_id);
+	} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
 		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
-				    "next_recl = %d",
+				    "next_recl = %d\n",
 				    tid_data->agg.ssn,
 				    tid_data->next_reclaimed);
 		priv->tid_data[sta_id][tid].agg.state =
@@ -515,14 +581,22 @@
 		return 0;
 	}
 
-	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
 			    tid_data->agg.ssn);
 turn_off:
+	agg_state = priv->tid_data[sta_id][tid].agg.state;
 	priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
 
 	spin_unlock_bh(&priv->sta_lock);
 
-	iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+	if (test_bit(txq_id, priv->agg_q_alloc)) {
+		/* If the transport didn't know that we wanted to start
+		 * agreggation, don't tell it that we want to stop them
+		 */
+		if (agg_state != IWL_AGG_STARTING)
+			iwl_trans_tx_agg_disable(priv->trans, txq_id);
+		iwlagn_dealloc_agg_txq(priv, txq_id);
+	}
 
 	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 
@@ -532,9 +606,9 @@
 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 	struct iwl_tid_data *tid_data;
-	int sta_id;
-	int ret;
+	int sta_id, txq_id, ret;
 
 	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
 		     sta->addr, tid);
@@ -552,36 +626,37 @@
 		return -ENXIO;
 	}
 
+	txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
+	if (txq_id < 0) {
+		IWL_DEBUG_TX_QUEUES(priv,
+			"No free aggregation queue for %pM/%d\n",
+			sta->addr, tid);
+		return txq_id;
+	}
+
 	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
 	if (ret)
 		return ret;
 
 	spin_lock_bh(&priv->sta_lock);
-
 	tid_data = &priv->tid_data[sta_id][tid];
 	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+	tid_data->agg.txq_id = txq_id;
 
 	*ssn = tid_data->agg.ssn;
 
-	ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
-	if (ret) {
-		spin_unlock_bh(&priv->sta_lock);
-		return ret;
-	}
-
 	if (*ssn == tid_data->next_reclaimed) {
-		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
 				    tid_data->agg.ssn);
-		tid_data->agg.state = IWL_AGG_ON;
+		tid_data->agg.state = IWL_AGG_STARTING;
 		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 	} else {
 		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
-				    "next_reclaimed = %d",
+				    "next_reclaimed = %d\n",
 				    tid_data->agg.ssn,
 				    tid_data->next_reclaimed);
 		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
 	}
-
 	spin_unlock_bh(&priv->sta_lock);
 
 	return ret;
@@ -592,15 +667,21 @@
 {
 	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
 	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+	int q, fifo;
 	u16 ssn;
 
 	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
 
 	spin_lock_bh(&priv->sta_lock);
 	ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
+	q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
+	priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
 	spin_unlock_bh(&priv->sta_lock);
 
-	iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
+	fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
+
+	iwl_trans_tx_agg_setup(priv->trans, q, fifo,
+			       sta_priv->sta_id, tid,
 			       buf_size, ssn);
 
 	/*
@@ -623,7 +704,7 @@
 	sta_priv->max_agg_bufsize =
 		min(sta_priv->max_agg_bufsize, buf_size);
 
-	if (hw_params(priv).use_rts_for_aggregation) {
+	if (priv->hw_params.use_rts_for_aggregation) {
 		/*
 		 * switch to RTS/CTS if it is the prefer protection
 		 * method for HT traffic
@@ -666,7 +747,9 @@
 			IWL_DEBUG_TX_QUEUES(priv,
 				"Can continue DELBA flow ssn = next_recl ="
 				" %d", tid_data->next_reclaimed);
-			iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+			iwl_trans_tx_agg_disable(priv->trans,
+						 tid_data->agg.txq_id);
+			iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
 			tid_data->agg.state = IWL_AGG_OFF;
 			ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
 		}
@@ -677,7 +760,7 @@
 			IWL_DEBUG_TX_QUEUES(priv,
 				"Can continue ADDBA flow ssn = next_recl ="
 				" %d", tid_data->next_reclaimed);
-			tid_data->agg.state = IWL_AGG_ON;
+			tid_data->agg.state = IWL_AGG_STARTING;
 			ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
 		}
 		break;
@@ -711,9 +794,9 @@
 static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
 				  struct ieee80211_tx_info *info)
 {
-	struct ieee80211_tx_rate *r = &info->control.rates[0];
+	struct ieee80211_tx_rate *r = &info->status.rates[0];
 
-	info->antenna_sel_tx =
+	info->status.antenna =
 		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
 	if (rate_n_flags & RATE_MCS_HT_MSK)
 		r->flags |= IEEE80211_TX_RC_MCS;
@@ -841,8 +924,8 @@
 	 * notification again.
 	 */
 	if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
-	    cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist) {
+	    priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist) {
 		IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
 	}
 
@@ -1005,6 +1088,29 @@
 	}
 }
 
+static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
+		       int txq_id, int ssn, struct sk_buff_head *skbs)
+{
+	if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
+		     tid != IWL_TID_NON_QOS &&
+		     txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
+		/*
+		 * FIXME: this is a uCode bug which need to be addressed,
+		 * log the information and return for now.
+		 * Since it is can possibly happen very often and in order
+		 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
+		 */
+		IWL_DEBUG_TX_QUEUES(priv,
+			"Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
+			txq_id, sta_id, tid,
+			priv->tid_data[sta_id][tid].agg.txq_id);
+		return 1;
+	}
+
+	iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs);
+	return 0;
+}
+
 int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
 			       struct iwl_device_cmd *cmd)
 {
@@ -1059,13 +1165,12 @@
 		if (tid != IWL_TID_NON_QOS) {
 			priv->tid_data[sta_id][tid].next_reclaimed =
 				next_reclaimed;
-			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
+			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
 						  next_reclaimed);
 		}
 
 		/*we can free until ssn % q.n_bd not inclusive */
-		WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid,
-					  txq_id, ssn, &skbs));
+		WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs));
 		iwlagn_check_ratid_empty(priv, sta_id, tid);
 		freed = 0;
 
@@ -1159,7 +1264,7 @@
 	 * (in Tx queue's circular buffer) of first TFD/frame in window */
 	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
 
-	if (scd_flow >= cfg(priv)->base_params->num_of_queues) {
+	if (scd_flow >= priv->cfg->base_params->num_of_queues) {
 		IWL_ERR(priv,
 			"BUG_ON scd_flow is bigger than number of queues\n");
 		return 0;
@@ -1183,8 +1288,8 @@
 	/* Release all TFDs before the SSN, i.e. all TFDs in front of
 	 * block-ack window (we assume that they've been successfully
 	 * transmitted ... if not, it's too late anyway). */
-	if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
-			      ba_resp_scd_ssn, &reclaimed_skbs)) {
+	if (iwl_reclaim(priv, sta_id, tid, scd_flow,
+			ba_resp_scd_ssn, &reclaimed_skbs)) {
 		spin_unlock(&priv->sta_lock);
 		return 0;
 	}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f1226dbf..8d76370 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -26,6 +26,9 @@
  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  *
  *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -43,13 +46,13 @@
 
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-agn-calib.h"
 #include "iwl-agn.h"
-#include "iwl-shared.h"
 #include "iwl-trans.h"
 #include "iwl-op-mode.h"
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
 
 /******************************************************************************
  *
@@ -177,7 +180,7 @@
 		rate = info->control.rates[0].idx;
 
 	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
-					      hw_params(priv).valid_tx_ant);
+					      priv->hw_params.valid_tx_ant);
 	rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
 
 	/* In mac80211, rates for 5 GHz start at 0 */
@@ -286,6 +289,25 @@
 	mutex_unlock(&priv->mutex);
 }
 
+int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
+{
+	struct iwl_statistics_cmd statistics_cmd = {
+		.configuration_flags =
+			clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
+	};
+
+	if (flags & CMD_ASYNC)
+		return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+					CMD_ASYNC,
+					sizeof(struct iwl_statistics_cmd),
+					&statistics_cmd);
+	else
+		return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+					CMD_SYNC,
+					sizeof(struct iwl_statistics_cmd),
+					&statistics_cmd);
+}
+
 /**
  * iwl_bg_statistics_periodic - Timer callback to queue statistics
  *
@@ -326,14 +348,14 @@
 		ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
 
 	/* Make sure device is powered up for SRAM reads */
-	spin_lock_irqsave(&trans(priv)->reg_lock, reg_flags);
-	if (unlikely(!iwl_grab_nic_access(trans(priv)))) {
-		spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags);
+	spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
+	if (unlikely(!iwl_grab_nic_access(priv->trans))) {
+		spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
 		return;
 	}
 
 	/* Set starting address; reads will auto-increment */
-	iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, ptr);
+	iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
 
 	/*
 	 * Refuse to read more than would have fit into the log from
@@ -349,20 +371,20 @@
 	 * place event id # at far right for easier visual parsing.
 	 */
 	for (i = 0; i < num_events; i++) {
-		ev = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
-		time = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
+		ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
+		time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
 		if (mode == 0) {
 			trace_iwlwifi_dev_ucode_cont_event(
-					trans(priv)->dev, 0, time, ev);
+					priv->trans->dev, 0, time, ev);
 		} else {
-			data = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
+			data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
 			trace_iwlwifi_dev_ucode_cont_event(
-					trans(priv)->dev, time, data, ev);
+					priv->trans->dev, time, data, ev);
 		}
 	}
 	/* Allow device to power down */
-	iwl_release_nic_access(trans(priv));
-	spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags);
+	iwl_release_nic_access(priv->trans);
+	spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
 }
 
 static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -379,10 +401,9 @@
 	u32 num_wraps;  /* # times uCode wrapped to top of log */
 	u32 next_entry; /* index of next entry to be written by uCode */
 
-	base = priv->shrd->device_pointers.log_event_table;
+	base = priv->device_pointers.log_event_table;
 	if (iwlagn_hw_valid_rtc_data_addr(base)) {
-		iwl_read_targ_mem_words(trans(priv), base, &read, sizeof(read));
-
+		iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read));
 		capacity = read.capacity;
 		mode = read.mode;
 		num_wraps = read.wrap_counter;
@@ -422,7 +443,7 @@
 		else
 			priv->event_log.wraps_once_count++;
 
-		trace_iwlwifi_dev_ucode_wrap_event(trans(priv)->dev,
+		trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev,
 				num_wraps - priv->event_log.num_wraps,
 				next_entry, priv->event_log.next_entry);
 
@@ -488,7 +509,76 @@
 	iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
 }
 
-static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
+/*
+ * queue/FIFO/AC mapping definitions
+ */
+
+#define IWL_TX_FIFO_BK		0	/* shared */
+#define IWL_TX_FIFO_BE		1
+#define IWL_TX_FIFO_VI		2	/* shared */
+#define IWL_TX_FIFO_VO		3
+#define IWL_TX_FIFO_BK_IPAN	IWL_TX_FIFO_BK
+#define IWL_TX_FIFO_BE_IPAN	4
+#define IWL_TX_FIFO_VI_IPAN	IWL_TX_FIFO_VI
+#define IWL_TX_FIFO_VO_IPAN	5
+/* re-uses the VO FIFO, uCode will properly flush/schedule */
+#define IWL_TX_FIFO_AUX		5
+#define IWL_TX_FIFO_UNUSED	-1
+
+#define IWLAGN_CMD_FIFO_NUM	7
+
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE	8
+
+static const u8 iwlagn_default_queue_to_tx_fifo[] = {
+	IWL_TX_FIFO_VO,
+	IWL_TX_FIFO_VI,
+	IWL_TX_FIFO_BE,
+	IWL_TX_FIFO_BK,
+	IWLAGN_CMD_FIFO_NUM,
+};
+
+static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
+	IWL_TX_FIFO_VO,
+	IWL_TX_FIFO_VI,
+	IWL_TX_FIFO_BE,
+	IWL_TX_FIFO_BK,
+	IWL_TX_FIFO_BK_IPAN,
+	IWL_TX_FIFO_BE_IPAN,
+	IWL_TX_FIFO_VI_IPAN,
+	IWL_TX_FIFO_VO_IPAN,
+	IWL_TX_FIFO_BE_IPAN,
+	IWLAGN_CMD_FIFO_NUM,
+	IWL_TX_FIFO_AUX,
+};
+
+static const u8 iwlagn_bss_ac_to_fifo[] = {
+	IWL_TX_FIFO_VO,
+	IWL_TX_FIFO_VI,
+	IWL_TX_FIFO_BE,
+	IWL_TX_FIFO_BK,
+};
+
+static const u8 iwlagn_bss_ac_to_queue[] = {
+	0, 1, 2, 3,
+};
+
+static const u8 iwlagn_pan_ac_to_fifo[] = {
+	IWL_TX_FIFO_VO_IPAN,
+	IWL_TX_FIFO_VI_IPAN,
+	IWL_TX_FIFO_BE_IPAN,
+	IWL_TX_FIFO_BK_IPAN,
+};
+
+static const u8 iwlagn_pan_ac_to_queue[] = {
+	7, 6, 5, 4,
+};
+
+void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
 {
 	int i;
 
@@ -496,9 +586,9 @@
 	 * The default context is always valid,
 	 * the PAN context depends on uCode.
 	 */
-	priv->shrd->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+	priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
 	if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
-		priv->shrd->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
+		priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
 
 	for (i = 0; i < NUM_IWL_RXON_CTX; i++)
 		priv->contexts[i].ctxid = i;
@@ -520,6 +610,10 @@
 	priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
 	priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
 	priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+	memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
+	       iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
+	memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
+	       iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
 
 	priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
 	priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
@@ -542,26 +636,31 @@
 	priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
 	priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
 	priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
+	memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
+	       iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
+	memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
+	       iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
+	priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
 
 	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
 }
 
-static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
+void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 {
 	struct iwl_ct_kill_config cmd;
 	struct iwl_ct_kill_throttling_config adv_cmd;
 	int ret = 0;
 
-	iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
+	iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
 		    CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
 
 	priv->thermal_throttle.ct_kill_toggle = false;
 
-	if (cfg(priv)->base_params->support_ct_kill_exit) {
+	if (priv->cfg->base_params->support_ct_kill_exit) {
 		adv_cmd.critical_temperature_enter =
-			cpu_to_le32(hw_params(priv).ct_kill_threshold);
+			cpu_to_le32(priv->hw_params.ct_kill_threshold);
 		adv_cmd.critical_temperature_exit =
-			cpu_to_le32(hw_params(priv).ct_kill_exit_threshold);
+			cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
 
 		ret = iwl_dvm_send_cmd_pdu(priv,
 				       REPLY_CT_KILL_CONFIG_CMD,
@@ -572,11 +671,11 @@
 			IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
 				"succeeded, critical temperature enter is %d,"
 				"exit is %d\n",
-				hw_params(priv).ct_kill_threshold,
-				hw_params(priv).ct_kill_exit_threshold);
+				priv->hw_params.ct_kill_threshold,
+				priv->hw_params.ct_kill_exit_threshold);
 	} else {
 		cmd.critical_temperature_R =
-			cpu_to_le32(hw_params(priv).ct_kill_threshold);
+			cpu_to_le32(priv->hw_params.ct_kill_threshold);
 
 		ret = iwl_dvm_send_cmd_pdu(priv,
 				       REPLY_CT_KILL_CONFIG_CMD,
@@ -587,7 +686,7 @@
 			IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
 				"succeeded, "
 				"critical temperature is %d\n",
-				hw_params(priv).ct_kill_threshold);
+				priv->hw_params.ct_kill_threshold);
 	}
 }
 
@@ -627,6 +726,29 @@
 	}
 }
 
+void iwl_send_bt_config(struct iwl_priv *priv)
+{
+	struct iwl_bt_cmd bt_cmd = {
+		.lead_time = BT_LEAD_TIME_DEF,
+		.max_kill = BT_MAX_KILL_DEF,
+		.kill_ack_mask = 0,
+		.kill_cts_mask = 0,
+	};
+
+	if (!iwlwifi_mod_params.bt_coex_active)
+		bt_cmd.flags = BT_COEX_DISABLE;
+	else
+		bt_cmd.flags = BT_COEX_ENABLE;
+
+	priv->bt_enable_flag = bt_cmd.flags;
+	IWL_DEBUG_INFO(priv, "BT coex %s\n",
+		(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+	if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+			     CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
+		IWL_ERR(priv, "failed to send BT Coex Config\n");
+}
+
 /**
  * iwl_alive_start - called after REPLY_ALIVE notification received
  *                   from protocol/runtime uCode (initialization uCode's
@@ -642,9 +764,6 @@
 	/* After the ALIVE response, we can send host commands to the uCode */
 	set_bit(STATUS_ALIVE, &priv->status);
 
-	/* Enable watchdog to monitor the driver tx queues */
-	iwl_setup_watchdog(priv);
-
 	if (iwl_is_rfkill(priv))
 		return -ERFKILL;
 
@@ -654,10 +773,10 @@
 	}
 
 	/* download priority table before any calibration request */
-	if (cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist) {
+	if (priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist) {
 		/* Configure Bluetooth device coexistence support */
-		if (cfg(priv)->bt_params->bt_sco_disable)
+		if (priv->cfg->bt_params->bt_sco_disable)
 			priv->bt_enable_pspoll = false;
 		else
 			priv->bt_enable_pspoll = true;
@@ -694,10 +813,8 @@
 
 	ieee80211_wake_queues(priv->hw);
 
-	priv->active_rate = IWL_RATES_MASK;
-
 	/* Configure Tx antenna selection based on H/W config */
-	iwlagn_send_tx_ant_config(priv, hw_params(priv).valid_tx_ant);
+	iwlagn_send_tx_ant_config(priv, priv->hw_params.valid_tx_ant);
 
 	if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
 		struct iwl_rxon_cmd *active_rxon =
@@ -788,10 +905,6 @@
 	exit_pending =
 		test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
 
-	/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
-	 * to prevent rearm timer */
-	del_timer_sync(&priv->watchdog);
-
 	iwl_clear_ucode_stations(priv, NULL);
 	iwl_dealloc_bcast_stations(priv);
 	iwl_clear_driver_stations(priv);
@@ -800,9 +913,9 @@
 	priv->bt_status = 0;
 	priv->cur_rssi_ctx = NULL;
 	priv->bt_is_sco = 0;
-	if (cfg(priv)->bt_params)
+	if (priv->cfg->bt_params)
 		priv->bt_traffic_load =
-			 cfg(priv)->bt_params->bt_init_traffic_load;
+			 priv->cfg->bt_params->bt_init_traffic_load;
 	else
 		priv->bt_traffic_load = 0;
 	priv->bt_full_concurrent = false;
@@ -817,18 +930,17 @@
 		ieee80211_stop_queues(priv->hw);
 
 	priv->ucode_loaded = false;
-	iwl_trans_stop_device(trans(priv));
+	iwl_trans_stop_device(priv->trans);
 
 	/* Clear out all status bits but a few that are stable across reset */
 	priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
 				STATUS_RF_KILL_HW |
 			test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
 				STATUS_GEO_CONFIGURED |
+			test_bit(STATUS_FW_ERROR, &priv->status) <<
+				STATUS_FW_ERROR |
 			test_bit(STATUS_EXIT_PENDING, &priv->status) <<
 				STATUS_EXIT_PENDING;
-	priv->shrd->status &=
-			test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
-				STATUS_FW_ERROR;
 
 	dev_kfree_skb(priv->beacon_skb);
 	priv->beacon_skb = NULL;
@@ -863,17 +975,15 @@
 
 void iwlagn_prepare_restart(struct iwl_priv *priv)
 {
-	struct iwl_rxon_context *ctx;
 	bool bt_full_concurrent;
 	u8 bt_ci_compliance;
 	u8 bt_load;
 	u8 bt_status;
 	bool bt_is_sco;
+	int i;
 
 	lockdep_assert_held(&priv->mutex);
 
-	for_each_context(priv, ctx)
-		ctx->vif = NULL;
 	priv->is_open = 0;
 
 	/*
@@ -898,6 +1008,15 @@
 	priv->bt_traffic_load = bt_load;
 	priv->bt_status = bt_status;
 	priv->bt_is_sco = bt_is_sco;
+
+	/* reset aggregation queues */
+	for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
+		priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
+	/* and stop counts */
+	for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
+		atomic_set(&priv->queue_stop_count[i], 0);
+
+	memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
 }
 
 static void iwl_bg_restart(struct work_struct *data)
@@ -907,7 +1026,7 @@
 	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 		return;
 
-	if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
+	if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
 		mutex_lock(&priv->mutex);
 		iwlagn_prepare_restart(priv);
 		mutex_unlock(&priv->mutex);
@@ -959,7 +1078,7 @@
  *
  *****************************************************************************/
 
-static void iwl_setup_deferred_work(struct iwl_priv *priv)
+void iwl_setup_deferred_work(struct iwl_priv *priv)
 {
 	priv->workqueue = create_singlethread_workqueue(DRV_NAME);
 
@@ -974,7 +1093,7 @@
 
 	iwl_setup_scan_deferred_work(priv);
 
-	if (cfg(priv)->bt_params)
+	if (priv->cfg->bt_params)
 		iwlagn_bt_setup_deferred_work(priv);
 
 	init_timer(&priv->statistics_periodic);
@@ -984,15 +1103,11 @@
 	init_timer(&priv->ucode_trace);
 	priv->ucode_trace.data = (unsigned long)priv;
 	priv->ucode_trace.function = iwl_bg_ucode_trace;
-
-	init_timer(&priv->watchdog);
-	priv->watchdog.data = (unsigned long)priv;
-	priv->watchdog.function = iwl_bg_watchdog;
 }
 
 void iwl_cancel_deferred_work(struct iwl_priv *priv)
 {
-	if (cfg(priv)->bt_params)
+	if (priv->cfg->bt_params)
 		iwlagn_bt_cancel_deferred_work(priv);
 
 	cancel_work_sync(&priv->run_time_calib_work);
@@ -1028,7 +1143,193 @@
 	}
 }
 
-static int iwl_init_drv(struct iwl_priv *priv)
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
+			      struct ieee80211_sta_ht_cap *ht_info,
+			      enum ieee80211_band band)
+{
+	u16 max_bit_rate = 0;
+	u8 rx_chains_num = priv->hw_params.rx_chains_num;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
+
+	ht_info->cap = 0;
+	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+	ht_info->ht_supported = true;
+
+	if (priv->cfg->ht_params &&
+	    priv->cfg->ht_params->ht_greenfield_support)
+		ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+	max_bit_rate = MAX_BIT_RATE_20_MHZ;
+	if (priv->hw_params.ht40_channel & BIT(band)) {
+		ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+		ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+		ht_info->mcs.rx_mask[4] = 0x01;
+		max_bit_rate = MAX_BIT_RATE_40_MHZ;
+	}
+
+	if (iwlwifi_mod_params.amsdu_size_8K)
+		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+	ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+	ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+	ht_info->mcs.rx_mask[0] = 0xFF;
+	if (rx_chains_num >= 2)
+		ht_info->mcs.rx_mask[1] = 0xFF;
+	if (rx_chains_num >= 3)
+		ht_info->mcs.rx_mask[2] = 0xFF;
+
+	/* Highest supported Rx data rate */
+	max_bit_rate *= rx_chains_num;
+	WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+	ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+	/* Tx MCS capabilities */
+	ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	if (tx_chains_num != rx_chains_num) {
+		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+		ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
+				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+	}
+}
+
+/**
+ * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+static int iwl_init_geos(struct iwl_priv *priv)
+{
+	struct iwl_channel_info *ch;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *channels;
+	struct ieee80211_channel *geo_ch;
+	struct ieee80211_rate *rates;
+	int i = 0;
+	s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
+
+	if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+	    priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+		IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
+		set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+		return 0;
+	}
+
+	channels = kcalloc(priv->channel_count,
+			   sizeof(struct ieee80211_channel), GFP_KERNEL);
+	if (!channels)
+		return -ENOMEM;
+
+	rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
+			GFP_KERNEL);
+	if (!rates) {
+		kfree(channels);
+		return -ENOMEM;
+	}
+
+	/* 5.2GHz channels start after the 2.4GHz channels */
+	sband = &priv->bands[IEEE80211_BAND_5GHZ];
+	sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
+	/* just OFDM */
+	sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
+	sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
+
+	if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
+		iwl_init_ht_hw_capab(priv, &sband->ht_cap,
+					 IEEE80211_BAND_5GHZ);
+
+	sband = &priv->bands[IEEE80211_BAND_2GHZ];
+	sband->channels = channels;
+	/* OFDM & CCK */
+	sband->bitrates = rates;
+	sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
+
+	if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
+		iwl_init_ht_hw_capab(priv, &sband->ht_cap,
+					 IEEE80211_BAND_2GHZ);
+
+	priv->ieee_channels = channels;
+	priv->ieee_rates = rates;
+
+	for (i = 0;  i < priv->channel_count; i++) {
+		ch = &priv->channel_info[i];
+
+		/* FIXME: might be removed if scan is OK */
+		if (!is_channel_valid(ch))
+			continue;
+
+		sband =  &priv->bands[ch->band];
+
+		geo_ch = &sband->channels[sband->n_channels++];
+
+		geo_ch->center_freq =
+			ieee80211_channel_to_frequency(ch->channel, ch->band);
+		geo_ch->max_power = ch->max_power_avg;
+		geo_ch->max_antenna_gain = 0xff;
+		geo_ch->hw_value = ch->channel;
+
+		if (is_channel_valid(ch)) {
+			if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+				geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+			if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+				geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+			if (ch->flags & EEPROM_CHANNEL_RADAR)
+				geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+			geo_ch->flags |= ch->ht40_extension_channel;
+
+			if (ch->max_power_avg > max_tx_power)
+				max_tx_power = ch->max_power_avg;
+		} else {
+			geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+		}
+
+		IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
+				ch->channel, geo_ch->center_freq,
+				is_channel_a_band(ch) ?  "5.2" : "2.4",
+				geo_ch->flags & IEEE80211_CHAN_DISABLED ?
+				"restricted" : "valid",
+				 geo_ch->flags);
+	}
+
+	priv->tx_power_device_lmt = max_tx_power;
+	priv->tx_power_user_lmt = max_tx_power;
+	priv->tx_power_next = max_tx_power;
+
+	if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
+	     priv->hw_params.sku & EEPROM_SKU_CAP_BAND_52GHZ) {
+		IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
+			"Please send your %s to maintainer.\n",
+			priv->trans->hw_id_str);
+		priv->hw_params.sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
+	}
+
+	if (iwlwifi_mod_params.disable_5ghz)
+		priv->bands[IEEE80211_BAND_5GHZ].n_channels = 0;
+
+	IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+		   priv->bands[IEEE80211_BAND_2GHZ].n_channels,
+		   priv->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+	set_bit(STATUS_GEO_CONFIGURED, &priv->status);
+
+	return 0;
+}
+
+/*
+ * iwl_free_geos - undo allocations in iwl_init_geos
+ */
+static void iwl_free_geos(struct iwl_priv *priv)
+{
+	kfree(priv->ieee_channels);
+	kfree(priv->ieee_rates);
+	clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
+}
+
+int iwl_init_drv(struct iwl_priv *priv)
 {
 	int ret;
 
@@ -1043,7 +1344,7 @@
 	priv->band = IEEE80211_BAND_2GHZ;
 
 	priv->plcp_delta_threshold =
-		cfg(priv)->base_params->plcp_delta_threshold;
+		priv->cfg->base_params->plcp_delta_threshold;
 
 	priv->iw_mode = NL80211_IFTYPE_STATION;
 	priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -1052,12 +1353,6 @@
 
 	priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
 
-	/* initialize force reset */
-	priv->force_reset[IWL_RF_RESET].reset_duration =
-		IWL_DELAY_NEXT_FORCE_RF_RESET;
-	priv->force_reset[IWL_FW_RESET].reset_duration =
-		IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
 	priv->rx_statistics_jiffies = jiffies;
 
 	/* Choose which receivers/antennas to use */
@@ -1066,8 +1361,8 @@
 	iwl_init_scan_params(priv);
 
 	/* init bt coex */
-	if (cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist) {
+	if (priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist) {
 		priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
 		priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
 		priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -1097,7 +1392,7 @@
 	return ret;
 }
 
-static void iwl_uninit_drv(struct iwl_priv *priv)
+void iwl_uninit_drv(struct iwl_priv *priv)
 {
 	iwl_free_geos(priv);
 	iwl_free_channel_map(priv);
@@ -1110,75 +1405,59 @@
 #endif
 }
 
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
-static void iwl_set_hw_params(struct iwl_priv *priv)
+void iwl_set_hw_params(struct iwl_priv *priv)
 {
-	if (cfg(priv)->ht_params)
-		hw_params(priv).use_rts_for_aggregation =
-			cfg(priv)->ht_params->use_rts_for_aggregation;
+	if (priv->cfg->ht_params)
+		priv->hw_params.use_rts_for_aggregation =
+			priv->cfg->ht_params->use_rts_for_aggregation;
 
-	if (iwlagn_mod_params.amsdu_size_8K)
-		hw_params(priv).rx_page_order =
-			get_order(IWL_RX_BUF_SIZE_8K);
-	else
-		hw_params(priv).rx_page_order =
-			get_order(IWL_RX_BUF_SIZE_4K);
-
-	if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
-		hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
-
-	hw_params(priv).num_ampdu_queues =
-		cfg(priv)->base_params->num_of_ampdu_queues;
-	hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+		priv->hw_params.sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
 
 	/* Device-specific setup */
-	cfg(priv)->lib->set_hw_params(priv);
+	priv->lib->set_hw_params(priv);
 }
 
 
 
-static void iwl_debug_config(struct iwl_priv *priv)
+/* show what optional capabilities we have */
+void iwl_option_config(struct iwl_priv *priv)
 {
-	dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
 #ifdef CONFIG_IWLWIFI_DEBUG
-		"enabled\n");
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
 #else
-		"disabled\n");
-#endif
-	dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-		"enabled\n");
-#else
-		"disabled\n");
-#endif
-	dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
-#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
-		"enabled\n");
-#else
-		"disabled\n");
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n");
 #endif
 
-	dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
-		"enabled\n");
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n");
 #else
-		"disabled\n");
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n");
 #endif
-	dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_P2P "
-#ifdef CONFIG_IWLWIFI_P2P
-		"enabled\n");
+
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n");
 #else
-		"disabled\n");
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE enabled\n");
+#else
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TESTMODE disabled\n");
+#endif
+
+#ifdef CONFIG_IWLWIFI_P2P
+	IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
+#else
+	IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n");
 #endif
 }
 
 static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
+						 const struct iwl_cfg *cfg,
 						 const struct iwl_fw *fw)
 {
-	int err = 0;
 	struct iwl_priv *priv;
 	struct ieee80211_hw *hw;
 	struct iwl_op_mode *op_mode;
@@ -1193,25 +1472,60 @@
 		STATISTICS_NOTIFICATION,
 		REPLY_TX,
 	};
+	int i;
 
 	/************************
 	 * 1. Allocating HW data
 	 ************************/
 	hw = iwl_alloc_all();
 	if (!hw) {
-		pr_err("%s: Cannot allocate network device\n",
-				cfg(trans)->name);
-		err = -ENOMEM;
+		pr_err("%s: Cannot allocate network device\n", cfg->name);
 		goto out;
 	}
 
 	op_mode = hw->priv;
 	op_mode->ops = &iwl_dvm_ops;
 	priv = IWL_OP_MODE_GET_DVM(op_mode);
-	priv->shrd = trans->shrd;
+	priv->trans = trans;
+	priv->dev = trans->dev;
+	priv->cfg = cfg;
 	priv->fw = fw;
-	/* TODO: remove fw from shared data later */
-	priv->shrd->fw = fw;
+
+	switch (priv->cfg->device_family) {
+	case IWL_DEVICE_FAMILY_1000:
+	case IWL_DEVICE_FAMILY_100:
+		priv->lib = &iwl1000_lib;
+		break;
+	case IWL_DEVICE_FAMILY_2000:
+	case IWL_DEVICE_FAMILY_105:
+		priv->lib = &iwl2000_lib;
+		break;
+	case IWL_DEVICE_FAMILY_2030:
+	case IWL_DEVICE_FAMILY_135:
+		priv->lib = &iwl2030_lib;
+		break;
+	case IWL_DEVICE_FAMILY_5000:
+		priv->lib = &iwl5000_lib;
+		break;
+	case IWL_DEVICE_FAMILY_5150:
+		priv->lib = &iwl5150_lib;
+		break;
+	case IWL_DEVICE_FAMILY_6000:
+	case IWL_DEVICE_FAMILY_6005:
+	case IWL_DEVICE_FAMILY_6000i:
+	case IWL_DEVICE_FAMILY_6050:
+	case IWL_DEVICE_FAMILY_6150:
+		priv->lib = &iwl6000_lib;
+		break;
+	case IWL_DEVICE_FAMILY_6030:
+		priv->lib = &iwl6030_lib;
+		break;
+	default:
+		break;
+	}
+
+	if (WARN_ON(!priv->lib))
+		goto out_free_hw;
 
 	/*
 	 * Populate the state variables that the transport layer needs
@@ -1220,87 +1534,90 @@
 	trans_cfg.op_mode = op_mode;
 	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
 	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+	trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+	if (!iwlwifi_mod_params.wd_disable)
+		trans_cfg.queue_watchdog_timeout =
+			priv->cfg->base_params->wd_timeout;
+	else
+		trans_cfg.queue_watchdog_timeout = IWL_WATCHHDOG_DISABLED;
+	trans_cfg.command_names = iwl_dvm_cmd_strings;
 
 	ucode_flags = fw->ucode_capa.flags;
 
 #ifndef CONFIG_IWLWIFI_P2P
-	ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+	ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
 #endif
 
 	if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
 		priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
 		trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+		trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
+		trans_cfg.n_queue_to_fifo =
+			ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
 	} else {
 		priv->sta_key_max_num = STA_KEY_MAX_NUM;
 		trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+		trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
+		trans_cfg.n_queue_to_fifo =
+			ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
 	}
 
 	/* Configure transport layer */
-	iwl_trans_configure(trans(priv), &trans_cfg);
+	iwl_trans_configure(priv->trans, &trans_cfg);
 
 	/* At this point both hw and priv are allocated. */
 
-	SET_IEEE80211_DEV(priv->hw, trans(priv)->dev);
+	SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
 
-	/* show what debugging capabilities we have */
-	iwl_debug_config(priv);
+	iwl_option_config(priv);
 
 	IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
 
 	/* is antenna coupling more than 35dB ? */
 	priv->bt_ant_couple_ok =
-		(iwlagn_mod_params.ant_coupling >
+		(iwlwifi_mod_params.ant_coupling >
 			IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
 			true : false;
 
 	/* enable/disable bt channel inhibition */
-	priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce;
+	priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce;
 	IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
 		       (priv->bt_ch_announce) ? "On" : "Off");
 
-	if (iwl_alloc_traffic_mem(priv))
-		IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
 	/* these spin locks will be used in apm_ops.init and EEPROM access
 	 * we should init now
 	 */
-	spin_lock_init(&trans(priv)->reg_lock);
 	spin_lock_init(&priv->statistics.lock);
 
 	/***********************
 	 * 2. Read REV register
 	 ***********************/
 	IWL_INFO(priv, "Detected %s, REV=0x%X\n",
-		cfg(priv)->name, trans(priv)->hw_rev);
+		priv->cfg->name, priv->trans->hw_rev);
 
-	err = iwl_trans_start_hw(trans(priv));
-	if (err)
-		goto out_free_traffic_mem;
+	if (iwl_trans_start_hw(priv->trans))
+		goto out_free_hw;
 
-	/*****************
-	 * 3. Read EEPROM
-	 *****************/
-	err = iwl_eeprom_init(trans(priv), trans(priv)->hw_rev);
-	/* Reset chip to save power until we load uCode during "up". */
-	iwl_trans_stop_hw(trans(priv));
-	if (err) {
+	/* Read the EEPROM */
+	if (iwl_eeprom_init(priv, priv->trans->hw_rev)) {
 		IWL_ERR(priv, "Unable to init EEPROM\n");
-		goto out_free_traffic_mem;
+		goto out_free_hw;
 	}
-	err = iwl_eeprom_check_version(priv);
-	if (err)
+	/* Reset chip to save power until we load uCode during "up". */
+	iwl_trans_stop_hw(priv->trans, false);
+
+	if (iwl_eeprom_check_version(priv))
 		goto out_free_eeprom;
 
-	err = iwl_eeprom_init_hw_params(priv);
-	if (err)
+	if (iwl_eeprom_init_hw_params(priv))
 		goto out_free_eeprom;
 
 	/* extract MAC Address */
-	iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr);
+	iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
 	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
 	priv->hw->wiphy->addresses = priv->addresses;
 	priv->hw->wiphy->n_addresses = 1;
-	num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS);
+	num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
 	if (num_mac > 1) {
 		memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
 		       ETH_ALEN);
@@ -1313,7 +1630,7 @@
 	 ************************/
 	iwl_set_hw_params(priv);
 
-	if (!(hw_params(priv).sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
+	if (!(priv->hw_params.sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
 		IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
 		ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
 		/*
@@ -1323,18 +1640,32 @@
 		ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
 		priv->sta_key_max_num = STA_KEY_MAX_NUM;
 		trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+		trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
+		trans_cfg.n_queue_to_fifo =
+			ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
 
 		/* Configure transport layer again*/
-		iwl_trans_configure(trans(priv), &trans_cfg);
+		iwl_trans_configure(priv->trans, &trans_cfg);
 	}
 
 	/*******************
 	 * 5. Setup priv
 	 *******************/
+	for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
+		priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
+		if (i < IWLAGN_FIRST_AMPDU_QUEUE &&
+		    i != IWL_DEFAULT_CMD_QUEUE_NUM &&
+		    i != IWL_IPAN_CMD_QUEUE_NUM)
+			priv->queue_to_mac80211[i] = i;
+		atomic_set(&priv->queue_stop_count[i], 0);
+	}
 
-	err = iwl_init_drv(priv);
-	if (err)
+	WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
+						IWLAGN_CMD_FIFO_NUM);
+
+	if (iwl_init_drv(priv))
 		goto out_free_eeprom;
+
 	/* At this point both hw and priv are initialized. */
 
 	/********************
@@ -1367,15 +1698,12 @@
 	 *
 	 * 7. Setup and register with mac80211 and debugfs
 	 **************************************************/
-	err = iwlagn_mac_setup_register(priv, &fw->ucode_capa);
-	if (err)
+	if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
 		goto out_destroy_workqueue;
 
-	err = iwl_dbgfs_register(priv, DRV_NAME);
-	if (err)
+	if (iwl_dbgfs_register(priv, DRV_NAME))
 		IWL_ERR(priv,
-			"failed to create debugfs files. Ignoring error: %d\n",
-			err);
+			"failed to create debugfs files. Ignoring error\n");
 
 	return op_mode;
 
@@ -1384,16 +1712,15 @@
 	priv->workqueue = NULL;
 	iwl_uninit_drv(priv);
 out_free_eeprom:
-	iwl_eeprom_free(priv->shrd);
-out_free_traffic_mem:
-	iwl_free_traffic_mem(priv);
+	iwl_eeprom_free(priv);
+out_free_hw:
 	ieee80211_free_hw(priv->hw);
 out:
 	op_mode = NULL;
 	return op_mode;
 }
 
-static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
+void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
 {
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
 
@@ -1408,9 +1735,9 @@
 
 	/*This will stop the queues, move the device to low power state */
 	priv->ucode_loaded = false;
-	iwl_trans_stop_device(trans(priv));
+	iwl_trans_stop_device(priv->trans);
 
-	iwl_eeprom_free(priv->shrd);
+	iwl_eeprom_free(priv);
 
 	/*netif_stop_queue(dev); */
 	flush_workqueue(priv->workqueue);
@@ -1420,69 +1747,562 @@
 	 * until now... */
 	destroy_workqueue(priv->workqueue);
 	priv->workqueue = NULL;
-	iwl_free_traffic_mem(priv);
 
 	iwl_uninit_drv(priv);
 
 	dev_kfree_skb(priv->beacon_skb);
 
+	iwl_trans_stop_hw(priv->trans, true);
 	ieee80211_free_hw(priv->hw);
 }
 
-static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
+static const char * const desc_lookup_text[] = {
+	"OK",
+	"FAIL",
+	"BAD_PARAM",
+	"BAD_CHECKSUM",
+	"NMI_INTERRUPT_WDG",
+	"SYSASSERT",
+	"FATAL_ERROR",
+	"BAD_COMMAND",
+	"HW_ERROR_TUNE_LOCK",
+	"HW_ERROR_TEMPERATURE",
+	"ILLEGAL_CHAN_FREQ",
+	"VCC_NOT_STABLE",
+	"FH_ERROR",
+	"NMI_INTERRUPT_HOST",
+	"NMI_INTERRUPT_ACTION_PT",
+	"NMI_INTERRUPT_UNKNOWN",
+	"UCODE_VERSION_MISMATCH",
+	"HW_ERROR_ABS_LOCK",
+	"HW_ERROR_CAL_LOCK_FAIL",
+	"NMI_INTERRUPT_INST_ACTION_PT",
+	"NMI_INTERRUPT_DATA_ACTION_PT",
+	"NMI_TRM_HW_ER",
+	"NMI_INTERRUPT_TRM",
+	"NMI_INTERRUPT_BREAK_POINT",
+	"DEBUG_0",
+	"DEBUG_1",
+	"DEBUG_2",
+	"DEBUG_3",
+};
+
+static struct { char *name; u8 num; } advanced_lookup[] = {
+	{ "NMI_INTERRUPT_WDG", 0x34 },
+	{ "SYSASSERT", 0x35 },
+	{ "UCODE_VERSION_MISMATCH", 0x37 },
+	{ "BAD_COMMAND", 0x38 },
+	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+	{ "FATAL_ERROR", 0x3D },
+	{ "NMI_TRM_HW_ERR", 0x46 },
+	{ "NMI_INTERRUPT_TRM", 0x4C },
+	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+	{ "NMI_INTERRUPT_HOST", 0x66 },
+	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
+	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
+	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+	{ "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+	int i;
+	int max = ARRAY_SIZE(desc_lookup_text);
+
+	if (num < max)
+		return desc_lookup_text[num];
+
+	max = ARRAY_SIZE(advanced_lookup) - 1;
+	for (i = 0; i < max; i++) {
+		if (advanced_lookup[i].num == num)
+			break;
+	}
+	return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+static void iwl_dump_nic_error_log(struct iwl_priv *priv)
+{
+	struct iwl_trans *trans = priv->trans;
+	u32 base;
+	struct iwl_error_event_table table;
+
+	base = priv->device_pointers.error_event_table;
+	if (priv->cur_ucode == IWL_UCODE_INIT) {
+		if (!base)
+			base = priv->fw->init_errlog_ptr;
+	} else {
+		if (!base)
+			base = priv->fw->inst_errlog_ptr;
+	}
+
+	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+		IWL_ERR(priv,
+			"Not valid error log pointer 0x%08X for %s uCode\n",
+			base,
+			(priv->cur_ucode == IWL_UCODE_INIT)
+					? "Init" : "RT");
+		return;
+	}
+
+	/*TODO: Update dbgfs with ISR error stats obtained below */
+	iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
+
+	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+			priv->status, table.valid);
+	}
+
+	trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
+				      table.data1, table.data2, table.line,
+				      table.blink1, table.blink2, table.ilink1,
+				      table.ilink2, table.bcon_time, table.gp1,
+				      table.gp2, table.gp3, table.ucode_ver,
+				      table.hw_ver, table.brd_ver);
+	IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
+		desc_lookup(table.error_id));
+	IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
+	IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
+	IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
+	IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
+	IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
+	IWL_ERR(priv, "0x%08X | data1\n", table.data1);
+	IWL_ERR(priv, "0x%08X | data2\n", table.data2);
+	IWL_ERR(priv, "0x%08X | line\n", table.line);
+	IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
+	IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
+	IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
+	IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
+	IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
+	IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
+	IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
+	IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
+	IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
+	IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
+	IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
+	IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
+	IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
+	IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
+	IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
+	IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
+	IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
+	IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
+	IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
+	IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+	IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+	IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+	IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
+	IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+#define EVENT_START_OFFSET  (4 * sizeof(u32))
+
+/**
+ * iwl_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+			       u32 num_events, u32 mode,
+			       int pos, char **buf, size_t bufsz)
+{
+	u32 i;
+	u32 base;       /* SRAM byte address of event log header */
+	u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+	u32 ptr;        /* SRAM byte address of log data */
+	u32 ev, time, data; /* event log data */
+	unsigned long reg_flags;
+
+	struct iwl_trans *trans = priv->trans;
+
+	if (num_events == 0)
+		return pos;
+
+	base = priv->device_pointers.log_event_table;
+	if (priv->cur_ucode == IWL_UCODE_INIT) {
+		if (!base)
+			base = priv->fw->init_evtlog_ptr;
+	} else {
+		if (!base)
+			base = priv->fw->inst_evtlog_ptr;
+	}
+
+	if (mode == 0)
+		event_size = 2 * sizeof(u32);
+	else
+		event_size = 3 * sizeof(u32);
+
+	ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+	/* Make sure device is powered up for SRAM reads */
+	spin_lock_irqsave(&trans->reg_lock, reg_flags);
+	if (unlikely(!iwl_grab_nic_access(trans)))
+		goto out_unlock;
+
+	/* Set starting address; reads will auto-increment */
+	iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
+
+	/* "time" is actually "data" for mode 0 (no timestamp).
+	* place event id # at far right for easier visual parsing. */
+	for (i = 0; i < num_events; i++) {
+		ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+		time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+		if (mode == 0) {
+			/* data, ev */
+			if (bufsz) {
+				pos += scnprintf(*buf + pos, bufsz - pos,
+						"EVT_LOG:0x%08x:%04u\n",
+						time, ev);
+			} else {
+				trace_iwlwifi_dev_ucode_event(trans->dev, 0,
+					time, ev);
+				IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+					time, ev);
+			}
+		} else {
+			data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+			if (bufsz) {
+				pos += scnprintf(*buf + pos, bufsz - pos,
+						"EVT_LOGT:%010u:0x%08x:%04u\n",
+						 time, data, ev);
+			} else {
+				IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+					time, data, ev);
+				trace_iwlwifi_dev_ucode_event(trans->dev, time,
+					data, ev);
+			}
+		}
+	}
+
+	/* Allow device to power down */
+	iwl_release_nic_access(trans);
+out_unlock:
+	spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
+	return pos;
+}
+
+/**
+ * iwl_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+				    u32 num_wraps, u32 next_entry,
+				    u32 size, u32 mode,
+				    int pos, char **buf, size_t bufsz)
+{
+	/*
+	 * display the newest DEFAULT_LOG_ENTRIES entries
+	 * i.e the entries just before the next ont that uCode would fill.
+	 */
+	if (num_wraps) {
+		if (next_entry < size) {
+			pos = iwl_print_event_log(priv,
+						capacity - (size - next_entry),
+						size - next_entry, mode,
+						pos, buf, bufsz);
+			pos = iwl_print_event_log(priv, 0,
+						  next_entry, mode,
+						  pos, buf, bufsz);
+		} else
+			pos = iwl_print_event_log(priv, next_entry - size,
+						  size, mode, pos, buf, bufsz);
+	} else {
+		if (next_entry < size) {
+			pos = iwl_print_event_log(priv, 0, next_entry,
+						  mode, pos, buf, bufsz);
+		} else {
+			pos = iwl_print_event_log(priv, next_entry - size,
+						  size, mode, pos, buf, bufsz);
+		}
+	}
+	return pos;
+}
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+			    char **buf, bool display)
+{
+	u32 base;       /* SRAM byte address of event log header */
+	u32 capacity;   /* event log capacity in # entries */
+	u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+	u32 num_wraps;  /* # times uCode wrapped to top of log */
+	u32 next_entry; /* index of next entry to be written by uCode */
+	u32 size;       /* # entries that we'll print */
+	u32 logsize;
+	int pos = 0;
+	size_t bufsz = 0;
+	struct iwl_trans *trans = priv->trans;
+
+	base = priv->device_pointers.log_event_table;
+	if (priv->cur_ucode == IWL_UCODE_INIT) {
+		logsize = priv->fw->init_evtlog_size;
+		if (!base)
+			base = priv->fw->init_evtlog_ptr;
+	} else {
+		logsize = priv->fw->inst_evtlog_size;
+		if (!base)
+			base = priv->fw->inst_evtlog_ptr;
+	}
+
+	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+		IWL_ERR(priv,
+			"Invalid event log pointer 0x%08X for %s uCode\n",
+			base,
+			(priv->cur_ucode == IWL_UCODE_INIT)
+					? "Init" : "RT");
+		return -EINVAL;
+	}
+
+	/* event log header */
+	capacity = iwl_read_targ_mem(trans, base);
+	mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
+	num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
+	next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
+
+	if (capacity > logsize) {
+		IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
+			"entries\n", capacity, logsize);
+		capacity = logsize;
+	}
+
+	if (next_entry > logsize) {
+		IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
+			next_entry, logsize);
+		next_entry = logsize;
+	}
+
+	size = num_wraps ? capacity : next_entry;
+
+	/* bail out if nothing in log */
+	if (size == 0) {
+		IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
+		return pos;
+	}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
+		size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+			? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#else
+	size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+		? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+#endif
+	IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
+		size);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (display) {
+		if (full_log)
+			bufsz = capacity * 48;
+		else
+			bufsz = size * 48;
+		*buf = kmalloc(bufsz, GFP_KERNEL);
+		if (!*buf)
+			return -ENOMEM;
+	}
+	if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
+		/*
+		 * if uCode has wrapped back to top of log,
+		 * start at the oldest entry,
+		 * i.e the next one that uCode would fill.
+		 */
+		if (num_wraps)
+			pos = iwl_print_event_log(priv, next_entry,
+						capacity - next_entry, mode,
+						pos, buf, bufsz);
+		/* (then/else) start at top of log */
+		pos = iwl_print_event_log(priv, 0,
+					  next_entry, mode, pos, buf, bufsz);
+	} else
+		pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+						next_entry, size, mode,
+						pos, buf, bufsz);
+#else
+	pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+					next_entry, size, mode,
+					pos, buf, bufsz);
+#endif
+	return pos;
+}
+
+static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
+{
+	unsigned int reload_msec;
+	unsigned long reload_jiffies;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
+		iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
+#endif
+
+	/* uCode is no longer loaded. */
+	priv->ucode_loaded = false;
+
+	/* Set the FW error flag -- cleared on iwl_down */
+	set_bit(STATUS_FW_ERROR, &priv->status);
+
+	iwl_abort_notification_waits(&priv->notif_wait);
+
+	/* Keep the restart process from trying to send host
+	 * commands by clearing the ready bit */
+	clear_bit(STATUS_READY, &priv->status);
+
+	wake_up(&priv->trans->wait_command_queue);
+
+	if (!ondemand) {
+		/*
+		 * If firmware keep reloading, then it indicate something
+		 * serious wrong and firmware having problem to recover
+		 * from it. Instead of keep trying which will fill the syslog
+		 * and hang the system, let's just stop it
+		 */
+		reload_jiffies = jiffies;
+		reload_msec = jiffies_to_msecs((long) reload_jiffies -
+					(long) priv->reload_jiffies);
+		priv->reload_jiffies = reload_jiffies;
+		if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
+			priv->reload_count++;
+			if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
+				IWL_ERR(priv, "BUG_ON, Stop restarting\n");
+				return;
+			}
+		} else
+			priv->reload_count = 0;
+	}
+
+	if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+		if (iwlwifi_mod_params.restart_fw) {
+			IWL_DEBUG_FW_ERRORS(priv,
+				  "Restarting adapter due to uCode error.\n");
+			queue_work(priv->workqueue, &priv->restart);
+		} else
+			IWL_DEBUG_FW_ERRORS(priv,
+				  "Detected FW error, but not restarting\n");
+	}
+}
+
+void iwl_nic_error(struct iwl_op_mode *op_mode)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	IWL_ERR(priv, "Loaded firmware version: %s\n",
+		priv->fw->fw_version);
+
+	iwl_dump_nic_error_log(priv);
+	iwl_dump_nic_event_log(priv, false, NULL, false);
+
+	iwlagn_fw_error(priv, false);
+}
+
+void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
 {
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
 
 	if (!iwl_check_for_ct_kill(priv)) {
 		IWL_ERR(priv, "Restarting adapter queue is full\n");
-		iwl_nic_error(op_mode);
+		iwlagn_fw_error(priv, false);
 	}
 }
 
-static void iwl_nic_config(struct iwl_op_mode *op_mode)
+void iwl_nic_config(struct iwl_op_mode *op_mode)
 {
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
 
-	cfg(priv)->lib->nic_config(priv);
+	priv->lib->nic_config(priv);
 }
 
-static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
+static void iwl_wimax_active(struct iwl_op_mode *op_mode)
 {
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
 
-	set_bit(ac, &priv->transport_queue_stop);
-	ieee80211_stop_queue(priv->hw, ac);
+	clear_bit(STATUS_READY, &priv->status);
+	IWL_ERR(priv, "RF is used by WiMAX\n");
 }
 
-static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac)
+void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
 {
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+	int mq = priv->queue_to_mac80211[queue];
 
-	clear_bit(ac, &priv->transport_queue_stop);
+	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+		return;
+
+	if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
+		IWL_DEBUG_TX_QUEUES(priv,
+			"queue %d (mac80211 %d) already stopped\n",
+			queue, mq);
+		return;
+	}
+
+	set_bit(mq, &priv->transport_queue_stop);
+	ieee80211_stop_queue(priv->hw, mq);
+}
+
+void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+	int mq = priv->queue_to_mac80211[queue];
+
+	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+		return;
+
+	if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
+		IWL_DEBUG_TX_QUEUES(priv,
+			"queue %d (mac80211 %d) already awake\n",
+			queue, mq);
+		return;
+	}
+
+	clear_bit(mq, &priv->transport_queue_stop);
 
 	if (!priv->passive_no_rx)
-		ieee80211_wake_queue(priv->hw, ac);
+		ieee80211_wake_queue(priv->hw, mq);
 }
 
 void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
 {
-	int ac;
+	int mq;
 
 	if (!priv->passive_no_rx)
 		return;
 
-	for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) {
-		if (!test_bit(ac, &priv->transport_queue_stop)) {
-			IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d");
-			ieee80211_wake_queue(priv->hw, ac);
+	for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
+		if (!test_bit(mq, &priv->transport_queue_stop)) {
+			IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq);
+			ieee80211_wake_queue(priv->hw, mq);
 		} else {
-			IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d");
+			IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq);
 		}
 	}
 
 	priv->passive_no_rx = false;
 }
 
+void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info;
+
+	info = IEEE80211_SKB_CB(skb);
+	kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
+	dev_kfree_skb_any(skb);
+}
+
+void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	if (state)
+		set_bit(STATUS_RF_KILL_HW, &priv->status);
+	else
+		clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+	wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
+}
+
 const struct iwl_op_mode_ops iwl_dvm_ops = {
 	.start = iwl_op_mode_dvm_start,
 	.stop = iwl_op_mode_dvm_stop,
@@ -1494,6 +2314,7 @@
 	.nic_error = iwl_nic_error,
 	.cmd_queue_full = iwl_cmd_queue_full,
 	.nic_config = iwl_nic_config,
+	.wimax_active = iwl_wimax_active,
 };
 
 /*****************************************************************************
@@ -1544,96 +2365,3 @@
 
 module_exit(iwl_exit);
 module_init(iwl_init);
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-module_param_named(debug, iwlagn_mod_params.debug_level, uint,
-		   S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-
-module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
-MODULE_PARM_DESC(11n_disable,
-	"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
-module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
-		   int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-
-module_param_named(ucode_alternative,
-		   iwlagn_mod_params.wanted_ucode_alternative,
-		   int, S_IRUGO);
-MODULE_PARM_DESC(ucode_alternative,
-		 "specify ucode alternative to use from ucode file");
-
-module_param_named(antenna_coupling, iwlagn_mod_params.ant_coupling,
-		   int, S_IRUGO);
-MODULE_PARM_DESC(antenna_coupling,
-		 "specify antenna coupling in dB (defualt: 0 dB)");
-
-module_param_named(bt_ch_inhibition, iwlagn_mod_params.bt_ch_announce,
-		   bool, S_IRUGO);
-MODULE_PARM_DESC(bt_ch_inhibition,
-		 "Enable BT channel inhibition (default: enable)");
-
-module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
-MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
-
-module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
-MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
-
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
-MODULE_PARM_DESC(wd_disable,
-		"Disable stuck queue watchdog timer 0=system default, "
-		"1=disable, 2=enable (default: 0)");
-
-/*
- * set bt_coex_active to true, uCode will do kill/defer
- * every time the priority line is asserted (BT is sending signals on the
- * priority line in the PCIx).
- * set bt_coex_active to false, uCode will ignore the BT activity and
- * perform the normal operation
- *
- * User might experience transmit issue on some platform due to WiFi/BT
- * co-exist problem. The possible behaviors are:
- *   Able to scan and finding all the available AP
- *   Not able to associate with any AP
- * On those platforms, WiFi communication can be restored by set
- * "bt_coex_active" module parameter to "false"
- *
- * default: bt_coex_active = true (BT_COEX_ENABLE)
- */
-module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active,
-		bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
-
-module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "0=system default, "
-		"1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
-
-module_param_named(power_save, iwlagn_mod_params.power_save,
-		bool, S_IRUGO);
-MODULE_PARM_DESC(power_save,
-		 "enable WiFi power management (default: disable)");
-
-module_param_named(power_level, iwlagn_mod_params.power_level,
-		int, S_IRUGO);
-MODULE_PARM_DESC(power_level,
-		 "default power save level (range from 1 - 5, default: 1)");
-
-module_param_named(auto_agg, iwlagn_mod_params.auto_agg,
-		bool, S_IRUGO);
-MODULE_PARM_DESC(auto_agg,
-		 "enable agg w/o check traffic load (default: enable)");
-
-/*
- * For now, keep using power level 1 instead of automatically
- * adjusting ...
- */
-module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust,
-		bool, S_IRUGO);
-MODULE_PARM_DESC(no_sleep_autoadjust,
-		 "don't automatically adjust sleep level "
-		 "according to maximum network latency (default: true)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 3780a03..79c0fe0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -64,6 +64,43 @@
 #define __iwl_agn_h__
 
 #include "iwl-dev.h"
+#include "iwl-config.h"
+
+/* The first 11 queues (0-10) are used otherwise */
+#define IWLAGN_FIRST_AMPDU_QUEUE	11
+
+/* AUX (TX during scan dwell) queue */
+#define IWL_AUX_QUEUE		10
+
+/* device operations */
+extern struct iwl_lib_ops iwl1000_lib;
+extern struct iwl_lib_ops iwl2000_lib;
+extern struct iwl_lib_ops iwl2030_lib;
+extern struct iwl_lib_ops iwl5000_lib;
+extern struct iwl_lib_ops iwl5150_lib;
+extern struct iwl_lib_ops iwl6000_lib;
+extern struct iwl_lib_ops iwl6030_lib;
+
+
+#define TIME_UNIT		1024
+
+/*****************************************************
+* DRIVER STATUS FUNCTIONS
+******************************************************/
+#define STATUS_RF_KILL_HW	0
+#define STATUS_CT_KILL		1
+#define STATUS_ALIVE		2
+#define STATUS_READY		3
+#define STATUS_GEO_CONFIGURED	4
+#define STATUS_EXIT_PENDING	5
+#define STATUS_STATISTICS	6
+#define STATUS_SCANNING		7
+#define STATUS_SCAN_ABORTING	8
+#define STATUS_SCAN_HW		9
+#define STATUS_FW_ERROR		10
+#define STATUS_CHANNEL_SWITCH_PENDING 11
+#define STATUS_SCAN_COMPLETE	12
+#define STATUS_POWER_PMI	13
 
 struct iwl_ucode_capabilities;
 
@@ -80,12 +117,9 @@
 void iwl_down(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
-void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
 int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
 				 struct iwl_rx_cmd_buffer *rxb,
 				 struct iwl_device_cmd *cmd);
-void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
-void iwl_nic_error(struct iwl_op_mode *op_mode);
 
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 
@@ -103,6 +137,8 @@
 			 u32 flags, u16 len, const void *data);
 
 /* RXON */
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+				   struct iwl_rxon_context *ctx);
 int iwlagn_set_pan_params(struct iwl_priv *priv);
 int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
 void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -113,11 +149,15 @@
 			     u32 changes);
 void iwlagn_config_ht40(struct ieee80211_conf *conf,
 			struct iwl_rxon_context *ctx);
+void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
+void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+			 struct iwl_rxon_context *ctx);
+void iwl_set_flags_for_band(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    enum ieee80211_band band,
+			    struct ieee80211_vif *vif);
 
 /* uCode */
-int iwlagn_rx_calib_result(struct iwl_priv *priv,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd);
 int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
 void iwl_send_prio_tbl(struct iwl_priv *priv);
 int iwl_init_alive_start(struct iwl_priv *priv);
@@ -128,14 +168,25 @@
 int iwl_calib_set(struct iwl_priv *priv,
 		  const struct iwl_calib_hdr *cmd, int len);
 void iwl_calib_free_results(struct iwl_priv *priv);
+int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+			    char **buf, bool display);
+int iwlagn_hw_valid_rtc_data_addr(u32 addr);
 
 /* lib */
 int iwlagn_send_tx_power(struct iwl_priv *priv);
 void iwlagn_temperature(struct iwl_priv *priv);
-u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
 int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
 void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
 int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
+int iwl_send_statistics_request(struct iwl_priv *priv,
+				u8 flags, bool clear);
+
+static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
+			struct iwl_priv *priv, enum ieee80211_band band)
+{
+	return priv->hw->wiphy->bands[band];
+}
+
 #ifdef CONFIG_PM_SLEEP
 int iwlagn_send_patterns(struct iwl_priv *priv,
 			 struct cfg80211_wowlan *wowlan);
@@ -145,6 +196,7 @@
 /* rx */
 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
 void iwl_setup_rx_handlers(struct iwl_priv *priv);
+void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
 
 
 /* tx */
@@ -189,6 +241,31 @@
 /* scan */
 void iwlagn_post_scan(struct iwl_priv *priv);
 void iwlagn_disable_roc(struct iwl_priv *priv);
+int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
+void iwl_init_scan_params(struct iwl_priv *priv);
+int iwl_scan_cancel(struct iwl_priv *priv);
+void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+void iwl_force_scan_end(struct iwl_priv *priv);
+void iwl_internal_short_hw_scan(struct iwl_priv *priv);
+void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
+void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
+void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+				   struct ieee80211_vif *vif,
+				   enum iwl_scan_type scan_type,
+				   enum ieee80211_band band);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IWL_ACTIVE_QUIET_TIME       cpu_to_le16(10)  /* msec */
+#define IWL_PLCP_QUIET_THRESH       cpu_to_le16(1)  /* packets */
+
+#define IWL_SCAN_CHECK_WATCHDOG		(HZ * 7)
+
 
 /* bt coex */
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
@@ -201,6 +278,12 @@
 void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv);
 void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
 
+static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
+{
+	return priv->cfg->bt_params &&
+	       priv->cfg->bt_params->advanced_bt_coexist;
+}
+
 #ifdef CONFIG_IWLWIFI_DEBUG
 const char *iwl_get_tx_fail_reason(u32 status);
 const char *iwl_get_agg_tx_fail_reason(u16 status);
@@ -239,8 +322,6 @@
 u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		    const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
 
-void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-		     u8 sta_id, struct iwl_link_quality_cmd *link_cmd);
 int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		    struct iwl_link_quality_cmd *lq, u8 flags, bool init);
 int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
@@ -248,6 +329,9 @@
 int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		      struct ieee80211_sta *sta);
 
+bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    struct ieee80211_sta_ht_cap *ht_cap);
 
 static inline int iwl_sta_id(struct ieee80211_sta *sta)
 {
@@ -305,9 +389,6 @@
 	return cpu_to_le32(flags|(u32)rate);
 }
 
-/* eeprom */
-void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
-
 extern int iwl_alive_start(struct iwl_priv *priv);
 /* svtool */
 #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
@@ -386,13 +467,35 @@
 	return iwl_is_ready(priv);
 }
 
+static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
+{
+	if (state)
+		set_bit(STATUS_POWER_PMI, &priv->status);
+	else
+		clear_bit(STATUS_POWER_PMI, &priv->status);
+	iwl_trans_set_pmi(priv->trans, state);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_dbgfs_unregister(struct iwl_priv *priv);
+#else
+static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
+{
+	return 0;
+}
+static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
+{
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
 #ifdef CONFIG_IWLWIFI_DEBUG
 #define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...)	\
 do {									\
 	if (!iwl_is_rfkill((m)))					\
 		IWL_ERR(m, fmt, ##args);				\
 	else								\
-		__iwl_err(trans(m)->dev, true,				\
+		__iwl_err((m)->dev, true,				\
 			  !iwl_have_debug_level(IWL_DL_RADIO),		\
 			  fmt, ##args);					\
 } while (0)
@@ -402,8 +505,98 @@
 	if (!iwl_is_rfkill((m)))					\
 		IWL_ERR(m, fmt, ##args);				\
 	else								\
-		__iwl_err(trans(m)->dev, true, true, fmt, ##args);	\
+		__iwl_err((m)->dev, true, true, fmt, ##args);	\
 } while (0)
 #endif				/* CONFIG_IWLWIFI_DEBUG */
 
+extern const char *iwl_dvm_cmd_strings[REPLY_MAX];
+
+static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
+{
+	const char *s = iwl_dvm_cmd_strings[cmd];
+	if (s)
+		return s;
+	return "UNKNOWN";
+}
+
+/* API method exported for mvm hybrid state */
+void iwl_setup_deferred_work(struct iwl_priv *priv);
+int iwl_send_wimax_coex(struct iwl_priv *priv);
+int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
+void iwl_option_config(struct iwl_priv *priv);
+void iwl_set_hw_params(struct iwl_priv *priv);
+void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
+int iwl_init_drv(struct iwl_priv *priv);
+void iwl_uninit_drv(struct iwl_priv *priv);
+void iwl_send_bt_config(struct iwl_priv *priv);
+void iwl_rf_kill_ct_config(struct iwl_priv *priv);
+int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+void iwl_teardown_interface(struct iwl_priv *priv,
+			    struct ieee80211_vif *vif,
+			    bool mode_change);
+int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+void iwlagn_check_needed_chains(struct iwl_priv *priv,
+				struct iwl_rxon_context *ctx,
+				struct ieee80211_bss_conf *bss_conf);
+void iwlagn_chain_noise_reset(struct iwl_priv *priv);
+int iwlagn_update_beacon(struct iwl_priv *priv,
+			 struct ieee80211_vif *vif);
+void iwl_tt_handler(struct iwl_priv *priv);
+void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode);
+void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue);
+void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
+void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
+void iwl_nic_error(struct iwl_op_mode *op_mode);
+void iwl_cmd_queue_full(struct iwl_op_mode *op_mode);
+void iwl_nic_config(struct iwl_op_mode *op_mode);
+int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+		       struct ieee80211_sta *sta, bool set);
+void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+			      enum ieee80211_rssi_event rssi_event);
+int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw);
+int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
+void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
+void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue);
+void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+			       struct ieee80211_channel_switch *ch_switch);
+int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
+			 struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta,
+			 enum ieee80211_sta_state old_state,
+			 enum ieee80211_sta_state new_state);
+int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+			    struct ieee80211_vif *vif,
+			    enum ieee80211_ampdu_mlme_action action,
+			    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+			    u8 buf_size);
+int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+		       struct ieee80211_vif *vif,
+		       struct cfg80211_scan_request *req);
+void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+			   struct ieee80211_vif *vif,
+			   enum sta_notify_cmd cmd,
+			   struct ieee80211_sta *sta);
+void iwlagn_configure_filter(struct ieee80211_hw *hw,
+			     unsigned int changed_flags,
+			     unsigned int *total_flags,
+			     u64 multicast);
+int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+		       struct ieee80211_vif *vif, u16 queue,
+		       const struct ieee80211_tx_queue_params *params);
+void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct cfg80211_gtk_rekey_data *data);
+void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_key_conf *keyconf,
+				struct ieee80211_sta *sta,
+				u32 iv32, u16 *phase1key);
+int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		       struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta,
+		       struct ieee80211_key_conf *key);
+void iwlagn_mac_stop(struct ieee80211_hw *hw);
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
 #endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 9ed73e5..83a6930 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1877,9 +1877,16 @@
 
 #define IWLAGN_BT3_T7_DEFAULT		1
 
+enum iwl_bt_kill_idx {
+	IWL_BT_KILL_DEFAULT = 0,
+	IWL_BT_KILL_OVERRIDE = 1,
+	IWL_BT_KILL_REDUCE = 2,
+};
+
 #define IWLAGN_BT_KILL_ACK_MASK_DEFAULT	cpu_to_le32(0xffff0000)
 #define IWLAGN_BT_KILL_CTS_MASK_DEFAULT	cpu_to_le32(0xffff0000)
 #define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO	cpu_to_le32(0xffffffff)
+#define IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE	cpu_to_le32(0)
 
 #define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT	2
 
@@ -1891,7 +1898,7 @@
 #define IWLAGN_BT_VALID_3W_TIMERS	cpu_to_le16(BIT(3))
 #define IWLAGN_BT_VALID_KILL_ACK_MASK	cpu_to_le16(BIT(4))
 #define IWLAGN_BT_VALID_KILL_CTS_MASK	cpu_to_le16(BIT(5))
-#define IWLAGN_BT_VALID_BT4_TIMES	cpu_to_le16(BIT(6))
+#define IWLAGN_BT_VALID_REDUCED_TX_PWR	cpu_to_le16(BIT(6))
 #define IWLAGN_BT_VALID_3W_LUT		cpu_to_le16(BIT(7))
 
 #define IWLAGN_BT_ALL_VALID_MSK		(IWLAGN_BT_VALID_ENABLE_FLAGS | \
@@ -1900,9 +1907,11 @@
 					IWLAGN_BT_VALID_3W_TIMERS | \
 					IWLAGN_BT_VALID_KILL_ACK_MASK | \
 					IWLAGN_BT_VALID_KILL_CTS_MASK | \
-					IWLAGN_BT_VALID_BT4_TIMES | \
+					IWLAGN_BT_VALID_REDUCED_TX_PWR | \
 					IWLAGN_BT_VALID_3W_LUT)
 
+#define IWLAGN_BT_DECISION_LUT_SIZE	12
+
 struct iwl_basic_bt_cmd {
 	u8 flags;
 	u8 ledtime; /* unused */
@@ -1913,12 +1922,13 @@
 	u8 bt3_prio_sample_time;
 	u8 bt3_timer_t2_value;
 	__le16 bt4_reaction_time; /* unused */
-	__le32 bt3_lookup_table[12];
-	__le16 bt4_decision_time; /* unused */
+	__le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE];
+	u8 reduce_txpower;
+	u8 reserved;
 	__le16 valid;
 };
 
-struct iwl6000_bt_cmd {
+struct iwl_bt_cmd_v1 {
 	struct iwl_basic_bt_cmd basic;
 	u8 prio_boost;
 	/*
@@ -1929,7 +1939,7 @@
 	__le16 rx_prio_boost;	/* SW boost of WiFi rx priority */
 };
 
-struct iwl2000_bt_cmd {
+struct iwl_bt_cmd_v2 {
 	struct iwl_basic_bt_cmd basic;
 	__le32 prio_boost;
 	/*
@@ -3634,6 +3644,9 @@
 		(0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
 
 
+#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD	(-62)
+#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD	(-65)
+
 struct iwl_bt_uart_msg {
 	u8 header;
 	u8 frame1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
new file mode 100644
index 0000000..67b28aa
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -0,0 +1,255 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __IWL_CONFIG_H__
+#define __IWL_CONFIG_H__
+
+#include <linux/types.h>
+#include <net/mac80211.h>
+
+
+enum iwl_device_family {
+	IWL_DEVICE_FAMILY_UNDEFINED,
+	IWL_DEVICE_FAMILY_1000,
+	IWL_DEVICE_FAMILY_100,
+	IWL_DEVICE_FAMILY_2000,
+	IWL_DEVICE_FAMILY_2030,
+	IWL_DEVICE_FAMILY_105,
+	IWL_DEVICE_FAMILY_135,
+	IWL_DEVICE_FAMILY_5000,
+	IWL_DEVICE_FAMILY_5150,
+	IWL_DEVICE_FAMILY_6000,
+	IWL_DEVICE_FAMILY_6000i,
+	IWL_DEVICE_FAMILY_6005,
+	IWL_DEVICE_FAMILY_6030,
+	IWL_DEVICE_FAMILY_6050,
+	IWL_DEVICE_FAMILY_6150,
+};
+
+/*
+ * LED mode
+ *    IWL_LED_DEFAULT:  use device default
+ *    IWL_LED_RF_STATE: turn LED on/off based on RF state
+ *			LED ON  = RF ON
+ *			LED OFF = RF OFF
+ *    IWL_LED_BLINK:    adjust led blink rate based on blink table
+ *    IWL_LED_DISABLE:	led disabled
+ */
+enum iwl_led_mode {
+	IWL_LED_DEFAULT,
+	IWL_LED_RF_STATE,
+	IWL_LED_BLINK,
+	IWL_LED_DISABLE,
+};
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs.  It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN		1
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF		50
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF	100
+#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF	200
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX		255
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE	0
+
+/* TX queue watchdog timeouts in mSecs */
+#define IWL_WATCHHDOG_DISABLED	0
+#define IWL_DEF_WD_TIMEOUT	2000
+#define IWL_LONG_WD_TIMEOUT	10000
+#define IWL_MAX_WD_TIMEOUT	120000
+
+/* Antenna presence definitions */
+#define	ANT_NONE	0x0
+#define	ANT_A		BIT(0)
+#define	ANT_B		BIT(1)
+#define ANT_C		BIT(2)
+#define	ANT_AB		(ANT_A | ANT_B)
+#define	ANT_AC		(ANT_A | ANT_C)
+#define ANT_BC		(ANT_B | ANT_C)
+#define ANT_ABC		(ANT_A | ANT_B | ANT_C)
+
+
+/*
+ * @max_ll_items: max number of OTP blocks
+ * @shadow_ram_support: shadow support for OTP memory
+ * @led_compensation: compensate on the led on/off time per HW according
+ *	to the deviation to achieve the desired led frequency.
+ *	The detail algorithm is described in iwl-led.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @adv_thermal_throttle: support advance thermal throttle
+ * @support_ct_kill_exit: support ct kill exit condition
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ *	radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @wd_timeout: TX queues watchdog timeout
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @shadow_reg_enable: HW shadhow register bit
+ * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * @no_idle_support: do not support idle mode
+ */
+struct iwl_base_params {
+	int eeprom_size;
+	int num_of_queues;	/* def: HW dependent */
+	/* for iwl_apm_init() */
+	u32 pll_cfg_val;
+
+	const u16 max_ll_items;
+	const bool shadow_ram_support;
+	u16 led_compensation;
+	bool adv_thermal_throttle;
+	bool support_ct_kill_exit;
+	u8 plcp_delta_threshold;
+	s32 chain_noise_scale;
+	unsigned int wd_timeout;
+	u32 max_event_log_size;
+	const bool shadow_reg_enable;
+	const bool hd_v2;
+	const bool no_idle_support;
+};
+
+/*
+ * @advanced_bt_coexist: support advanced bt coexist
+ * @bt_init_traffic_load: specify initial bt traffic load
+ * @bt_prio_boost: default bt priority boost value
+ * @agg_time_limit: maximum number of uSec in aggregation
+ * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
+ */
+struct iwl_bt_params {
+	bool advanced_bt_coexist;
+	u8 bt_init_traffic_load;
+	u8 bt_prio_boost;
+	u16 agg_time_limit;
+	bool bt_sco_disable;
+	bool bt_session_2;
+};
+/*
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
+ */
+struct iwl_ht_params {
+	const bool ht_greenfield_support; /* if used set to true */
+	bool use_rts_for_aggregation;
+	enum ieee80211_smps_mode smps_mode;
+};
+
+/**
+ * struct iwl_cfg
+ * @name: Offical name of the device
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ *	(.ucode) will be added to filename before loading from disk. The
+ *	filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_ok: oldest version of the uCode API that is OK to load
+ *	without a warning, for use in transitions
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @max_inst_size: The maximal length of the fw inst section
+ * @max_data_size: The maximal length of the fw data section
+ * @valid_tx_ant: valid transmit antenna
+ * @valid_rx_ant: valid receive antenna
+ * @eeprom_ver: EEPROM version
+ * @eeprom_calib_ver: EEPROM calibration version
+ * @lib: pointer to the lib ops
+ * @base_params: pointer to basic parameters
+ * @ht_params: point to ht patameters
+ * @bt_params: pointer to bt parameters
+ * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ *	don't send it to those
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ * @adv_pm: advance power management
+ * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
+ * @internal_wimax_coex: internal wifi/wimax combo device
+ * @temp_offset_v2: support v2 of temperature offset calibration
+ *
+ * We enable the driver to be backward compatible wrt. hardware features.
+ * API differences in uCode shouldn't be handled here but through TLVs
+ * and/or the uCode API version instead.
+ */
+struct iwl_cfg {
+	/* params specific to an individual device within a device family */
+	const char *name;
+	const char *fw_name_pre;
+	const unsigned int ucode_api_max;
+	const unsigned int ucode_api_ok;
+	const unsigned int ucode_api_min;
+	const enum iwl_device_family device_family;
+	const u32 max_data_size;
+	const u32 max_inst_size;
+	u8   valid_tx_ant;
+	u8   valid_rx_ant;
+	u16  eeprom_ver;
+	u16  eeprom_calib_ver;
+	/* params not likely to change within a device family */
+	const struct iwl_base_params *base_params;
+	/* params likely to change within a device family */
+	const struct iwl_ht_params *ht_params;
+	const struct iwl_bt_params *bt_params;
+	const bool need_temp_offset_calib; /* if used set to true */
+	const bool no_xtal_calib;
+	enum iwl_led_mode led_mode;
+	const bool adv_pm;
+	const bool rx_with_siso_diversity;
+	const bool internal_wimax_coex;
+	const bool temp_offset_v2;
+};
+
+#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
deleted file mode 100644
index 46490d3..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ /dev/null
@@ -1,1480 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-power.h"
-#include "iwl-shared.h"
-#include "iwl-agn.h"
-#include "iwl-trans.h"
-
-const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
-			      struct ieee80211_sta_ht_cap *ht_info,
-			      enum ieee80211_band band)
-{
-	u16 max_bit_rate = 0;
-	u8 rx_chains_num = hw_params(priv).rx_chains_num;
-	u8 tx_chains_num = hw_params(priv).tx_chains_num;
-
-	ht_info->cap = 0;
-	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
-	ht_info->ht_supported = true;
-
-	if (cfg(priv)->ht_params &&
-	    cfg(priv)->ht_params->ht_greenfield_support)
-		ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
-	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-	max_bit_rate = MAX_BIT_RATE_20_MHZ;
-	if (hw_params(priv).ht40_channel & BIT(band)) {
-		ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-		ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
-		ht_info->mcs.rx_mask[4] = 0x01;
-		max_bit_rate = MAX_BIT_RATE_40_MHZ;
-	}
-
-	if (iwlagn_mod_params.amsdu_size_8K)
-		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
-	ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
-	ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
-	ht_info->mcs.rx_mask[0] = 0xFF;
-	if (rx_chains_num >= 2)
-		ht_info->mcs.rx_mask[1] = 0xFF;
-	if (rx_chains_num >= 3)
-		ht_info->mcs.rx_mask[2] = 0xFF;
-
-	/* Highest supported Rx data rate */
-	max_bit_rate *= rx_chains_num;
-	WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
-	ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
-	/* Tx MCS capabilities */
-	ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-	if (tx_chains_num != rx_chains_num) {
-		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
-		ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
-				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
-	}
-}
-
-/**
- * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-int iwl_init_geos(struct iwl_priv *priv)
-{
-	struct iwl_channel_info *ch;
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_channel *channels;
-	struct ieee80211_channel *geo_ch;
-	struct ieee80211_rate *rates;
-	int i = 0;
-	s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
-
-	if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
-	    priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
-		IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
-		set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-		return 0;
-	}
-
-	channels = kcalloc(priv->channel_count,
-			   sizeof(struct ieee80211_channel), GFP_KERNEL);
-	if (!channels)
-		return -ENOMEM;
-
-	rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
-			GFP_KERNEL);
-	if (!rates) {
-		kfree(channels);
-		return -ENOMEM;
-	}
-
-	/* 5.2GHz channels start after the 2.4GHz channels */
-	sband = &priv->bands[IEEE80211_BAND_5GHZ];
-	sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
-	/* just OFDM */
-	sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
-	sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
-	if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
-		iwl_init_ht_hw_capab(priv, &sband->ht_cap,
-					 IEEE80211_BAND_5GHZ);
-
-	sband = &priv->bands[IEEE80211_BAND_2GHZ];
-	sband->channels = channels;
-	/* OFDM & CCK */
-	sband->bitrates = rates;
-	sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
-	if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
-		iwl_init_ht_hw_capab(priv, &sband->ht_cap,
-					 IEEE80211_BAND_2GHZ);
-
-	priv->ieee_channels = channels;
-	priv->ieee_rates = rates;
-
-	for (i = 0;  i < priv->channel_count; i++) {
-		ch = &priv->channel_info[i];
-
-		/* FIXME: might be removed if scan is OK */
-		if (!is_channel_valid(ch))
-			continue;
-
-		sband =  &priv->bands[ch->band];
-
-		geo_ch = &sband->channels[sband->n_channels++];
-
-		geo_ch->center_freq =
-			ieee80211_channel_to_frequency(ch->channel, ch->band);
-		geo_ch->max_power = ch->max_power_avg;
-		geo_ch->max_antenna_gain = 0xff;
-		geo_ch->hw_value = ch->channel;
-
-		if (is_channel_valid(ch)) {
-			if (!(ch->flags & EEPROM_CHANNEL_IBSS))
-				geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
-			if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
-				geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
-			if (ch->flags & EEPROM_CHANNEL_RADAR)
-				geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
-			geo_ch->flags |= ch->ht40_extension_channel;
-
-			if (ch->max_power_avg > max_tx_power)
-				max_tx_power = ch->max_power_avg;
-		} else {
-			geo_ch->flags |= IEEE80211_CHAN_DISABLED;
-		}
-
-		IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
-				ch->channel, geo_ch->center_freq,
-				is_channel_a_band(ch) ?  "5.2" : "2.4",
-				geo_ch->flags & IEEE80211_CHAN_DISABLED ?
-				"restricted" : "valid",
-				 geo_ch->flags);
-	}
-
-	priv->tx_power_device_lmt = max_tx_power;
-	priv->tx_power_user_lmt = max_tx_power;
-	priv->tx_power_next = max_tx_power;
-
-	if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
-	     hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) {
-		IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
-			"Please send your %s to maintainer.\n",
-			trans(priv)->hw_id_str);
-		hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
-	}
-
-	IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
-		   priv->bands[IEEE80211_BAND_2GHZ].n_channels,
-		   priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
-	set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
-	return 0;
-}
-
-/*
- * iwl_free_geos - undo allocations in iwl_init_geos
- */
-void iwl_free_geos(struct iwl_priv *priv)
-{
-	kfree(priv->ieee_channels);
-	kfree(priv->ieee_rates);
-	clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-
-static bool iwl_is_channel_extension(struct iwl_priv *priv,
-				     enum ieee80211_band band,
-				     u16 channel, u8 extension_chan_offset)
-{
-	const struct iwl_channel_info *ch_info;
-
-	ch_info = iwl_get_channel_info(priv, band, channel);
-	if (!is_channel_valid(ch_info))
-		return false;
-
-	if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
-		return !(ch_info->ht40_extension_channel &
-					IEEE80211_CHAN_NO_HT40PLUS);
-	else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
-		return !(ch_info->ht40_extension_channel &
-					IEEE80211_CHAN_NO_HT40MINUS);
-
-	return false;
-}
-
-bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    struct ieee80211_sta_ht_cap *ht_cap)
-{
-	if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
-		return false;
-
-	/*
-	 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
-	 * the bit will not set if it is pure 40MHz case
-	 */
-	if (ht_cap && !ht_cap->ht_supported)
-		return false;
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-	if (priv->disable_ht40)
-		return false;
-#endif
-
-	return iwl_is_channel_extension(priv, priv->band,
-			le16_to_cpu(ctx->staging.channel),
-			ctx->ht.extension_chan_offset);
-}
-
-static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
-{
-	u16 new_val;
-	u16 beacon_factor;
-
-	/*
-	 * If mac80211 hasn't given us a beacon interval, program
-	 * the default into the device (not checking this here
-	 * would cause the adjustment below to return the maximum
-	 * value, which may break PAN.)
-	 */
-	if (!beacon_val)
-		return DEFAULT_BEACON_INTERVAL;
-
-	/*
-	 * If the beacon interval we obtained from the peer
-	 * is too large, we'll have to wake up more often
-	 * (and in IBSS case, we'll beacon too much)
-	 *
-	 * For example, if max_beacon_val is 4096, and the
-	 * requested beacon interval is 7000, we'll have to
-	 * use 3500 to be able to wake up on the beacons.
-	 *
-	 * This could badly influence beacon detection stats.
-	 */
-
-	beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
-	new_val = beacon_val / beacon_factor;
-
-	if (!new_val)
-		new_val = max_beacon_val;
-
-	return new_val;
-}
-
-int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	u64 tsf;
-	s32 interval_tm, rem;
-	struct ieee80211_conf *conf = NULL;
-	u16 beacon_int;
-	struct ieee80211_vif *vif = ctx->vif;
-
-	conf = &priv->hw->conf;
-
-	lockdep_assert_held(&priv->mutex);
-
-	memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
-
-	ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
-	ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
-
-	beacon_int = vif ? vif->bss_conf.beacon_int : 0;
-
-	/*
-	 * TODO: For IBSS we need to get atim_window from mac80211,
-	 *	 for now just always use 0
-	 */
-	ctx->timing.atim_window = 0;
-
-	if (ctx->ctxid == IWL_RXON_CTX_PAN &&
-	    (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
-	    iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
-	    priv->contexts[IWL_RXON_CTX_BSS].vif &&
-	    priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
-		ctx->timing.beacon_interval =
-			priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
-		beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
-	} else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
-		   iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
-		   priv->contexts[IWL_RXON_CTX_PAN].vif &&
-		   priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
-		   (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
-		    !ctx->vif->bss_conf.beacon_int)) {
-		ctx->timing.beacon_interval =
-			priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
-		beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
-	} else {
-		beacon_int = iwl_adjust_beacon_interval(beacon_int,
-			IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
-		ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
-	}
-
-	ctx->beacon_int = beacon_int;
-
-	tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
-	interval_tm = beacon_int * TIME_UNIT;
-	rem = do_div(tsf, interval_tm);
-	ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
-
-	ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
-
-	IWL_DEBUG_ASSOC(priv,
-			"beacon interval %d beacon timer %d beacon tim %d\n",
-			le16_to_cpu(ctx->timing.beacon_interval),
-			le32_to_cpu(ctx->timing.beacon_init_val),
-			le16_to_cpu(ctx->timing.atim_window));
-
-	return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
-				CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
-}
-
-void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-			   int hw_decrypt)
-{
-	struct iwl_rxon_cmd *rxon = &ctx->staging;
-
-	if (hw_decrypt)
-		rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
-	else
-		rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
-
-}
-
-/* validate RXON structure is valid */
-int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-	struct iwl_rxon_cmd *rxon = &ctx->staging;
-	u32 errors = 0;
-
-	if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
-		if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
-			IWL_WARN(priv, "check 2.4G: wrong narrow\n");
-			errors |= BIT(0);
-		}
-		if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
-			IWL_WARN(priv, "check 2.4G: wrong radar\n");
-			errors |= BIT(1);
-		}
-	} else {
-		if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
-			IWL_WARN(priv, "check 5.2G: not short slot!\n");
-			errors |= BIT(2);
-		}
-		if (rxon->flags & RXON_FLG_CCK_MSK) {
-			IWL_WARN(priv, "check 5.2G: CCK!\n");
-			errors |= BIT(3);
-		}
-	}
-	if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
-		IWL_WARN(priv, "mac/bssid mcast!\n");
-		errors |= BIT(4);
-	}
-
-	/* make sure basic rates 6Mbps and 1Mbps are supported */
-	if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
-	    (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
-		IWL_WARN(priv, "neither 1 nor 6 are basic\n");
-		errors |= BIT(5);
-	}
-
-	if (le16_to_cpu(rxon->assoc_id) > 2007) {
-		IWL_WARN(priv, "aid > 2007\n");
-		errors |= BIT(6);
-	}
-
-	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
-			== (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
-		IWL_WARN(priv, "CCK and short slot\n");
-		errors |= BIT(7);
-	}
-
-	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
-			== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
-		IWL_WARN(priv, "CCK and auto detect");
-		errors |= BIT(8);
-	}
-
-	if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
-			    RXON_FLG_TGG_PROTECT_MSK)) ==
-			    RXON_FLG_TGG_PROTECT_MSK) {
-		IWL_WARN(priv, "TGg but no auto-detect\n");
-		errors |= BIT(9);
-	}
-
-	if (rxon->channel == 0) {
-		IWL_WARN(priv, "zero channel is invalid\n");
-		errors |= BIT(10);
-	}
-
-	WARN(errors, "Invalid RXON (%#x), channel %d",
-	     errors, le16_to_cpu(rxon->channel));
-
-	return errors ? -EINVAL : 0;
-}
-
-/**
- * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
- * @priv: staging_rxon is compared to active_rxon
- *
- * If the RXON structure is changing enough to require a new tune,
- * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
- * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
- */
-int iwl_full_rxon_required(struct iwl_priv *priv,
-			   struct iwl_rxon_context *ctx)
-{
-	const struct iwl_rxon_cmd *staging = &ctx->staging;
-	const struct iwl_rxon_cmd *active = &ctx->active;
-
-#define CHK(cond)							\
-	if ((cond)) {							\
-		IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");	\
-		return 1;						\
-	}
-
-#define CHK_NEQ(c1, c2)						\
-	if ((c1) != (c2)) {					\
-		IWL_DEBUG_INFO(priv, "need full RXON - "	\
-			       #c1 " != " #c2 " - %d != %d\n",	\
-			       (c1), (c2));			\
-		return 1;					\
-	}
-
-	/* These items are only settable from the full RXON command */
-	CHK(!iwl_is_associated_ctx(ctx));
-	CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
-	CHK(compare_ether_addr(staging->node_addr, active->node_addr));
-	CHK(compare_ether_addr(staging->wlap_bssid_addr,
-				active->wlap_bssid_addr));
-	CHK_NEQ(staging->dev_type, active->dev_type);
-	CHK_NEQ(staging->channel, active->channel);
-	CHK_NEQ(staging->air_propagation, active->air_propagation);
-	CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
-		active->ofdm_ht_single_stream_basic_rates);
-	CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
-		active->ofdm_ht_dual_stream_basic_rates);
-	CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
-		active->ofdm_ht_triple_stream_basic_rates);
-	CHK_NEQ(staging->assoc_id, active->assoc_id);
-
-	/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
-	 * be updated with the RXON_ASSOC command -- however only some
-	 * flag transitions are allowed using RXON_ASSOC */
-
-	/* Check if we are not switching bands */
-	CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
-		active->flags & RXON_FLG_BAND_24G_MSK);
-
-	/* Check if we are switching association toggle */
-	CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
-		active->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-#undef CHK
-#undef CHK_NEQ
-
-	return 0;
-}
-
-static void _iwl_set_rxon_ht(struct iwl_priv *priv,
-			     struct iwl_ht_config *ht_conf,
-			     struct iwl_rxon_context *ctx)
-{
-	struct iwl_rxon_cmd *rxon = &ctx->staging;
-
-	if (!ctx->ht.enabled) {
-		rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
-			RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
-			RXON_FLG_HT40_PROT_MSK |
-			RXON_FLG_HT_PROT_MSK);
-		return;
-	}
-
-	/* FIXME: if the definition of ht.protection changed, the "translation"
-	 * will be needed for rxon->flags
-	 */
-	rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
-
-	/* Set up channel bandwidth:
-	 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
-	/* clear the HT channel mode before set the mode */
-	rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
-			 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
-	if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
-		/* pure ht40 */
-		if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
-			rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
-			/* Note: control channel is opposite of extension channel */
-			switch (ctx->ht.extension_chan_offset) {
-			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-				rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-				break;
-			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-				break;
-			}
-		} else {
-			/* Note: control channel is opposite of extension channel */
-			switch (ctx->ht.extension_chan_offset) {
-			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-				rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
-				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
-				break;
-			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
-				break;
-			case IEEE80211_HT_PARAM_CHA_SEC_NONE:
-			default:
-				/* channel location only valid if in Mixed mode */
-				IWL_ERR(priv, "invalid extension channel offset\n");
-				break;
-			}
-		}
-	} else {
-		rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
-	}
-
-	iwlagn_set_rxon_chain(priv, ctx);
-
-	IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
-			"extension channel offset 0x%x\n",
-			le32_to_cpu(rxon->flags), ctx->ht.protection,
-			ctx->ht.extension_chan_offset);
-}
-
-void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
-{
-	struct iwl_rxon_context *ctx;
-
-	for_each_context(priv, ctx)
-		_iwl_set_rxon_ht(priv, ht_conf, ctx);
-}
-
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8 iwl_get_single_channel_number(struct iwl_priv *priv,
-				 enum ieee80211_band band)
-{
-	const struct iwl_channel_info *ch_info;
-	int i;
-	u8 channel = 0;
-	u8 min, max;
-	struct iwl_rxon_context *ctx;
-
-	if (band == IEEE80211_BAND_5GHZ) {
-		min = 14;
-		max = priv->channel_count;
-	} else {
-		min = 0;
-		max = 14;
-	}
-
-	for (i = min; i < max; i++) {
-		bool busy = false;
-
-		for_each_context(priv, ctx) {
-			busy = priv->channel_info[i].channel ==
-				le16_to_cpu(ctx->staging.channel);
-			if (busy)
-				break;
-		}
-
-		if (busy)
-			continue;
-
-		channel = priv->channel_info[i].channel;
-		ch_info = iwl_get_channel_info(priv, band, channel);
-		if (is_channel_valid(ch_info))
-			break;
-	}
-
-	return channel;
-}
-
-/**
- * iwl_set_rxon_channel - Set the band and channel values in staging RXON
- * @ch: requested channel as a pointer to struct ieee80211_channel
-
- * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
- * in the staging RXON flag structure based on the ch->band
- */
-void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
-			 struct iwl_rxon_context *ctx)
-{
-	enum ieee80211_band band = ch->band;
-	u16 channel = ch->hw_value;
-
-	if ((le16_to_cpu(ctx->staging.channel) == channel) &&
-	    (priv->band == band))
-		return;
-
-	ctx->staging.channel = cpu_to_le16(channel);
-	if (band == IEEE80211_BAND_5GHZ)
-		ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
-	else
-		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-
-	priv->band = band;
-
-	IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
-
-}
-
-void iwl_set_flags_for_band(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    enum ieee80211_band band,
-			    struct ieee80211_vif *vif)
-{
-	if (band == IEEE80211_BAND_5GHZ) {
-		ctx->staging.flags &=
-		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
-		      | RXON_FLG_CCK_MSK);
-		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-	} else {
-		/* Copied from iwl_post_associate() */
-		if (vif && vif->bss_conf.use_short_slot)
-			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-		else
-			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
-		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-		ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
-		ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
-	}
-}
-
-/*
- * initialize rxon structure with default values from eeprom
- */
-void iwl_connection_init_rx_config(struct iwl_priv *priv,
-				   struct iwl_rxon_context *ctx)
-{
-	const struct iwl_channel_info *ch_info;
-
-	memset(&ctx->staging, 0, sizeof(ctx->staging));
-
-	if (!ctx->vif) {
-		ctx->staging.dev_type = ctx->unused_devtype;
-	} else switch (ctx->vif->type) {
-	case NL80211_IFTYPE_AP:
-		ctx->staging.dev_type = ctx->ap_devtype;
-		break;
-
-	case NL80211_IFTYPE_STATION:
-		ctx->staging.dev_type = ctx->station_devtype;
-		ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
-		break;
-
-	case NL80211_IFTYPE_ADHOC:
-		ctx->staging.dev_type = ctx->ibss_devtype;
-		ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
-		ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
-						  RXON_FILTER_ACCEPT_GRP_MSK;
-		break;
-
-	default:
-		IWL_ERR(priv, "Unsupported interface type %d\n",
-			ctx->vif->type);
-		break;
-	}
-
-#if 0
-	/* TODO:  Figure out when short_preamble would be set and cache from
-	 * that */
-	if (!hw_to_local(priv->hw)->short_preamble)
-		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-	else
-		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-#endif
-
-	ch_info = iwl_get_channel_info(priv, priv->band,
-				       le16_to_cpu(ctx->active.channel));
-
-	if (!ch_info)
-		ch_info = &priv->channel_info[0];
-
-	ctx->staging.channel = cpu_to_le16(ch_info->channel);
-	priv->band = ch_info->band;
-
-	iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
-
-	ctx->staging.ofdm_basic_rates =
-	    (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-	ctx->staging.cck_basic_rates =
-	    (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
-	/* clear both MIX and PURE40 mode flag */
-	ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
-					RXON_FLG_CHANNEL_MODE_PURE_40);
-	if (ctx->vif)
-		memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
-
-	ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
-	ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
-	ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
-}
-
-void iwl_set_rate(struct iwl_priv *priv)
-{
-	const struct ieee80211_supported_band *hw = NULL;
-	struct ieee80211_rate *rate;
-	struct iwl_rxon_context *ctx;
-	int i;
-
-	hw = iwl_get_hw_mode(priv, priv->band);
-	if (!hw) {
-		IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
-		return;
-	}
-
-	priv->active_rate = 0;
-
-	for (i = 0; i < hw->n_bitrates; i++) {
-		rate = &(hw->bitrates[i]);
-		if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
-			priv->active_rate |= (1 << rate->hw_value);
-	}
-
-	IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
-
-	for_each_context(priv, ctx) {
-		ctx->staging.cck_basic_rates =
-		    (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
-		ctx->staging.ofdm_basic_rates =
-		   (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-	}
-}
-
-void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
-{
-	/*
-	 * MULTI-FIXME
-	 * See iwlagn_mac_channel_switch.
-	 */
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-		ieee80211_chswitch_done(ctx->vif, is_success);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-void iwl_print_rx_config_cmd(struct iwl_priv *priv,
-			     enum iwl_rxon_context_id ctxid)
-{
-	struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
-	struct iwl_rxon_cmd *rxon = &ctx->staging;
-
-	IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
-	iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
-	IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
-	IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
-	IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
-			le32_to_cpu(rxon->filter_flags));
-	IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
-	IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
-			rxon->ofdm_basic_rates);
-	IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
-	IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
-	IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
-	IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
-}
-#endif
-
-static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
-{
-	unsigned int reload_msec;
-	unsigned long reload_jiffies;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-	if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
-		iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
-#endif
-
-	/* uCode is no longer loaded. */
-	priv->ucode_loaded = false;
-
-	/* Set the FW error flag -- cleared on iwl_down */
-	set_bit(STATUS_FW_ERROR, &priv->shrd->status);
-
-	/* Cancel currently queued command. */
-	clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
-
-	iwl_abort_notification_waits(&priv->notif_wait);
-
-	/* Keep the restart process from trying to send host
-	 * commands by clearing the ready bit */
-	clear_bit(STATUS_READY, &priv->status);
-
-	wake_up(&trans(priv)->wait_command_queue);
-
-	if (!ondemand) {
-		/*
-		 * If firmware keep reloading, then it indicate something
-		 * serious wrong and firmware having problem to recover
-		 * from it. Instead of keep trying which will fill the syslog
-		 * and hang the system, let's just stop it
-		 */
-		reload_jiffies = jiffies;
-		reload_msec = jiffies_to_msecs((long) reload_jiffies -
-					(long) priv->reload_jiffies);
-		priv->reload_jiffies = reload_jiffies;
-		if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
-			priv->reload_count++;
-			if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
-				IWL_ERR(priv, "BUG_ON, Stop restarting\n");
-				return;
-			}
-		} else
-			priv->reload_count = 0;
-	}
-
-	if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-		if (iwlagn_mod_params.restart_fw) {
-			IWL_DEBUG_FW_ERRORS(priv,
-				  "Restarting adapter due to uCode error.\n");
-			queue_work(priv->workqueue, &priv->restart);
-		} else
-			IWL_DEBUG_FW_ERRORS(priv,
-				  "Detected FW error, but not restarting\n");
-	}
-}
-
-int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
-{
-	int ret;
-	s8 prev_tx_power;
-	bool defer;
-	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-	lockdep_assert_held(&priv->mutex);
-
-	if (priv->tx_power_user_lmt == tx_power && !force)
-		return 0;
-
-	if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
-		IWL_WARN(priv,
-			 "Requested user TXPOWER %d below lower limit %d.\n",
-			 tx_power,
-			 IWLAGN_TX_POWER_TARGET_POWER_MIN);
-		return -EINVAL;
-	}
-
-	if (tx_power > priv->tx_power_device_lmt) {
-		IWL_WARN(priv,
-			"Requested user TXPOWER %d above upper limit %d.\n",
-			 tx_power, priv->tx_power_device_lmt);
-		return -EINVAL;
-	}
-
-	if (!iwl_is_ready_rf(priv))
-		return -EIO;
-
-	/* scan complete and commit_rxon use tx_power_next value,
-	 * it always need to be updated for newest request */
-	priv->tx_power_next = tx_power;
-
-	/* do not set tx power when scanning or channel changing */
-	defer = test_bit(STATUS_SCANNING, &priv->status) ||
-		memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
-	if (defer && !force) {
-		IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
-		return 0;
-	}
-
-	prev_tx_power = priv->tx_power_user_lmt;
-	priv->tx_power_user_lmt = tx_power;
-
-	ret = iwlagn_send_tx_power(priv);
-
-	/* if fail to set tx_power, restore the orig. tx power */
-	if (ret) {
-		priv->tx_power_user_lmt = prev_tx_power;
-		priv->tx_power_next = prev_tx_power;
-	}
-	return ret;
-}
-
-void iwl_send_bt_config(struct iwl_priv *priv)
-{
-	struct iwl_bt_cmd bt_cmd = {
-		.lead_time = BT_LEAD_TIME_DEF,
-		.max_kill = BT_MAX_KILL_DEF,
-		.kill_ack_mask = 0,
-		.kill_cts_mask = 0,
-	};
-
-	if (!iwlagn_mod_params.bt_coex_active)
-		bt_cmd.flags = BT_COEX_DISABLE;
-	else
-		bt_cmd.flags = BT_COEX_ENABLE;
-
-	priv->bt_enable_flag = bt_cmd.flags;
-	IWL_DEBUG_INFO(priv, "BT coex %s\n",
-		(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
-
-	if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
-			     CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
-		IWL_ERR(priv, "failed to send BT Coex Config\n");
-}
-
-int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
-{
-	struct iwl_statistics_cmd statistics_cmd = {
-		.configuration_flags =
-			clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
-	};
-
-	if (flags & CMD_ASYNC)
-		return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
-					      CMD_ASYNC,
-					       sizeof(struct iwl_statistics_cmd),
-					       &statistics_cmd);
-	else
-		return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
-					CMD_SYNC,
-					sizeof(struct iwl_statistics_cmd),
-					&statistics_cmd);
-}
-
-
-
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-
-#define IWL_TRAFFIC_DUMP_SIZE	(IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
-
-void iwl_reset_traffic_log(struct iwl_priv *priv)
-{
-	priv->tx_traffic_idx = 0;
-	priv->rx_traffic_idx = 0;
-	if (priv->tx_traffic)
-		memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-	if (priv->rx_traffic)
-		memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-}
-
-int iwl_alloc_traffic_mem(struct iwl_priv *priv)
-{
-	u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
-
-	if (iwl_have_debug_level(IWL_DL_TX)) {
-		if (!priv->tx_traffic) {
-			priv->tx_traffic =
-				kzalloc(traffic_size, GFP_KERNEL);
-			if (!priv->tx_traffic)
-				return -ENOMEM;
-		}
-	}
-	if (iwl_have_debug_level(IWL_DL_RX)) {
-		if (!priv->rx_traffic) {
-			priv->rx_traffic =
-				kzalloc(traffic_size, GFP_KERNEL);
-			if (!priv->rx_traffic)
-				return -ENOMEM;
-		}
-	}
-	iwl_reset_traffic_log(priv);
-	return 0;
-}
-
-void iwl_free_traffic_mem(struct iwl_priv *priv)
-{
-	kfree(priv->tx_traffic);
-	priv->tx_traffic = NULL;
-
-	kfree(priv->rx_traffic);
-	priv->rx_traffic = NULL;
-}
-
-void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
-		      u16 length, struct ieee80211_hdr *header)
-{
-	__le16 fc;
-	u16 len;
-
-	if (likely(!iwl_have_debug_level(IWL_DL_TX)))
-		return;
-
-	if (!priv->tx_traffic)
-		return;
-
-	fc = header->frame_control;
-	if (ieee80211_is_data(fc)) {
-		len = (length > IWL_TRAFFIC_ENTRY_SIZE)
-		       ? IWL_TRAFFIC_ENTRY_SIZE : length;
-		memcpy((priv->tx_traffic +
-		       (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
-		       header, len);
-		priv->tx_traffic_idx =
-			(priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
-	}
-}
-
-void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
-		      u16 length, struct ieee80211_hdr *header)
-{
-	__le16 fc;
-	u16 len;
-
-	if (likely(!iwl_have_debug_level(IWL_DL_RX)))
-		return;
-
-	if (!priv->rx_traffic)
-		return;
-
-	fc = header->frame_control;
-	if (ieee80211_is_data(fc)) {
-		len = (length > IWL_TRAFFIC_ENTRY_SIZE)
-		       ? IWL_TRAFFIC_ENTRY_SIZE : length;
-		memcpy((priv->rx_traffic +
-		       (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
-		       header, len);
-		priv->rx_traffic_idx =
-			(priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
-	}
-}
-
-const char *get_mgmt_string(int cmd)
-{
-	switch (cmd) {
-		IWL_CMD(MANAGEMENT_ASSOC_REQ);
-		IWL_CMD(MANAGEMENT_ASSOC_RESP);
-		IWL_CMD(MANAGEMENT_REASSOC_REQ);
-		IWL_CMD(MANAGEMENT_REASSOC_RESP);
-		IWL_CMD(MANAGEMENT_PROBE_REQ);
-		IWL_CMD(MANAGEMENT_PROBE_RESP);
-		IWL_CMD(MANAGEMENT_BEACON);
-		IWL_CMD(MANAGEMENT_ATIM);
-		IWL_CMD(MANAGEMENT_DISASSOC);
-		IWL_CMD(MANAGEMENT_AUTH);
-		IWL_CMD(MANAGEMENT_DEAUTH);
-		IWL_CMD(MANAGEMENT_ACTION);
-	default:
-		return "UNKNOWN";
-
-	}
-}
-
-const char *get_ctrl_string(int cmd)
-{
-	switch (cmd) {
-		IWL_CMD(CONTROL_BACK_REQ);
-		IWL_CMD(CONTROL_BACK);
-		IWL_CMD(CONTROL_PSPOLL);
-		IWL_CMD(CONTROL_RTS);
-		IWL_CMD(CONTROL_CTS);
-		IWL_CMD(CONTROL_ACK);
-		IWL_CMD(CONTROL_CFEND);
-		IWL_CMD(CONTROL_CFENDACK);
-	default:
-		return "UNKNOWN";
-
-	}
-}
-
-void iwl_clear_traffic_stats(struct iwl_priv *priv)
-{
-	memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
-	memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-}
-
-/*
- * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
- * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
- * Use debugFs to display the rx/rx_statistics
- * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
- * information will be recorded, but DATA pkt still will be recorded
- * for the reason of iwl_led.c need to control the led blinking based on
- * number of tx and rx data.
- *
- */
-void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
-{
-	struct traffic_stats	*stats;
-
-	if (is_tx)
-		stats = &priv->tx_stats;
-	else
-		stats = &priv->rx_stats;
-
-	if (ieee80211_is_mgmt(fc)) {
-		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-			stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
-			stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-			stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
-			stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
-			stats->mgmt[MANAGEMENT_PROBE_REQ]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
-			stats->mgmt[MANAGEMENT_PROBE_RESP]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_BEACON):
-			stats->mgmt[MANAGEMENT_BEACON]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_ATIM):
-			stats->mgmt[MANAGEMENT_ATIM]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
-			stats->mgmt[MANAGEMENT_DISASSOC]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_AUTH):
-			stats->mgmt[MANAGEMENT_AUTH]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-			stats->mgmt[MANAGEMENT_DEAUTH]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_ACTION):
-			stats->mgmt[MANAGEMENT_ACTION]++;
-			break;
-		}
-	} else if (ieee80211_is_ctl(fc)) {
-		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-		case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
-			stats->ctrl[CONTROL_BACK_REQ]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_BACK):
-			stats->ctrl[CONTROL_BACK]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
-			stats->ctrl[CONTROL_PSPOLL]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_RTS):
-			stats->ctrl[CONTROL_RTS]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_CTS):
-			stats->ctrl[CONTROL_CTS]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_ACK):
-			stats->ctrl[CONTROL_ACK]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_CFEND):
-			stats->ctrl[CONTROL_CFEND]++;
-			break;
-		case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
-			stats->ctrl[CONTROL_CFENDACK]++;
-			break;
-		}
-	} else {
-		/* data */
-		stats->data_cnt++;
-		stats->data_bytes += len;
-	}
-}
-#endif
-
-static void iwl_force_rf_reset(struct iwl_priv *priv)
-{
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	if (!iwl_is_any_associated(priv)) {
-		IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
-		return;
-	}
-	/*
-	 * There is no easy and better way to force reset the radio,
-	 * the only known method is switching channel which will force to
-	 * reset and tune the radio.
-	 * Use internal short scan (single channel) operation to should
-	 * achieve this objective.
-	 * Driver should reset the radio when number of consecutive missed
-	 * beacon, or any other uCode error condition detected.
-	 */
-	IWL_DEBUG_INFO(priv, "perform radio reset.\n");
-	iwl_internal_short_hw_scan(priv);
-}
-
-
-int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
-{
-	struct iwl_force_reset *force_reset;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return -EINVAL;
-
-	if (mode >= IWL_MAX_FORCE_RESET) {
-		IWL_DEBUG_INFO(priv, "invalid reset request.\n");
-		return -EINVAL;
-	}
-	force_reset = &priv->force_reset[mode];
-	force_reset->reset_request_count++;
-	if (!external) {
-		if (force_reset->last_force_reset_jiffies &&
-		    time_after(force_reset->last_force_reset_jiffies +
-		    force_reset->reset_duration, jiffies)) {
-			IWL_DEBUG_INFO(priv, "force reset rejected\n");
-			force_reset->reset_reject_count++;
-			return -EAGAIN;
-		}
-	}
-	force_reset->reset_success_count++;
-	force_reset->last_force_reset_jiffies = jiffies;
-	IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
-	switch (mode) {
-	case IWL_RF_RESET:
-		iwl_force_rf_reset(priv);
-		break;
-	case IWL_FW_RESET:
-		/*
-		 * if the request is from external(ex: debugfs),
-		 * then always perform the request in regardless the module
-		 * parameter setting
-		 * if the request is from internal (uCode error or driver
-		 * detect failure), then fw_restart module parameter
-		 * need to be check before performing firmware reload
-		 */
-		if (!external && !iwlagn_mod_params.restart_fw) {
-			IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
-				       "module parameter setting\n");
-			break;
-		}
-		IWL_ERR(priv, "On demand firmware reload\n");
-		iwlagn_fw_error(priv, true);
-		break;
-	}
-	return 0;
-}
-
-
-int iwl_cmd_echo_test(struct iwl_priv *priv)
-{
-	int ret;
-	struct iwl_host_cmd cmd = {
-		.id = REPLY_ECHO,
-		.len = { 0 },
-		.flags = CMD_SYNC,
-	};
-
-	ret = iwl_dvm_send_cmd(priv, &cmd);
-	if (ret)
-		IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
-	else
-		IWL_DEBUG_INFO(priv, "echo testing pass\n");
-	return ret;
-}
-
-static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq)
-{
-	if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
-		int ret;
-		ret = iwl_force_reset(priv, IWL_FW_RESET, false);
-		return (ret == -EAGAIN) ? 0 : 1;
-	}
-	return 0;
-}
-
-/*
- * Making watchdog tick be a quarter of timeout assure we will
- * discover the queue hung between timeout and 1.25*timeout
- */
-#define IWL_WD_TICK(timeout) ((timeout) / 4)
-
-/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
- * we reset the firmware. If everything is fine just rearm the timer.
- */
-void iwl_bg_watchdog(unsigned long data)
-{
-	struct iwl_priv *priv = (struct iwl_priv *)data;
-	int cnt;
-	unsigned long timeout;
-
-	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-		return;
-
-	if (iwl_is_rfkill(priv))
-		return;
-
-	timeout = hw_params(priv).wd_timeout;
-	if (timeout == 0)
-		return;
-
-	/* monitor and check for stuck queues */
-	for (cnt = 0; cnt < cfg(priv)->base_params->num_of_queues; cnt++)
-		if (iwl_check_stuck_queue(priv, cnt))
-			return;
-
-	mod_timer(&priv->watchdog, jiffies +
-		  msecs_to_jiffies(IWL_WD_TICK(timeout)));
-}
-
-void iwl_setup_watchdog(struct iwl_priv *priv)
-{
-	unsigned int timeout = hw_params(priv).wd_timeout;
-
-	if (!iwlagn_mod_params.wd_disable) {
-		/* use system default */
-		if (timeout && !cfg(priv)->base_params->wd_disable)
-			mod_timer(&priv->watchdog,
-				jiffies +
-				msecs_to_jiffies(IWL_WD_TICK(timeout)));
-		else
-			del_timer(&priv->watchdog);
-	} else {
-		/* module parameter overwrite default configuration */
-		if (timeout && iwlagn_mod_params.wd_disable == 2)
-			mod_timer(&priv->watchdog,
-				jiffies +
-				msecs_to_jiffies(IWL_WD_TICK(timeout)));
-		else
-			del_timer(&priv->watchdog);
-	}
-}
-
-/**
- * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
-					   u16 tsf_bits)
-{
-	return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
-					    u16 tsf_bits)
-{
-	return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in extended:internal format
- * the extended part is the beacon counts
- * the internal part is the time in usec within one beacon interval
- */
-u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
-{
-	u32 quot;
-	u32 rem;
-	u32 interval = beacon_interval * TIME_UNIT;
-
-	if (!interval || !usec)
-		return 0;
-
-	quot = (usec / interval) &
-		(iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
-		IWLAGN_EXT_BEACON_TIME_POS);
-	rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
-				   IWLAGN_EXT_BEACON_TIME_POS);
-
-	return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
-}
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
-			   u32 addon, u32 beacon_interval)
-{
-	u32 base_low = base & iwl_beacon_time_mask_low(priv,
-				IWLAGN_EXT_BEACON_TIME_POS);
-	u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
-				IWLAGN_EXT_BEACON_TIME_POS);
-	u32 interval = beacon_interval * TIME_UNIT;
-	u32 res = (base & iwl_beacon_time_mask_high(priv,
-				IWLAGN_EXT_BEACON_TIME_POS)) +
-				(addon & iwl_beacon_time_mask_high(priv,
-				IWLAGN_EXT_BEACON_TIME_POS));
-
-	if (base_low > addon_low)
-		res += base_low - addon_low;
-	else if (base_low < addon_low) {
-		res += interval + base_low - addon_low;
-		res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
-	} else
-		res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
-
-	return cpu_to_le32(res);
-}
-
-void iwl_nic_error(struct iwl_op_mode *op_mode)
-{
-	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
-	iwlagn_fw_error(priv, false);
-}
-
-void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
-{
-	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
-	if (state)
-		set_bit(STATUS_RF_KILL_HW, &priv->status);
-	else
-		clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-	wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
-}
-
-void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
-{
-	struct ieee80211_tx_info *info;
-
-	info = IEEE80211_SKB_CB(skb);
-	kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
-	dev_kfree_skb_any(skb);
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
deleted file mode 100644
index 635eb68..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ /dev/null
@@ -1,234 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_core_h__
-#define __iwl_core_h__
-
-#include "iwl-dev.h"
-#include "iwl-io.h"
-
-/************************
- * forward declarations *
- ************************/
-struct iwl_host_cmd;
-struct iwl_cmd;
-
-#define TIME_UNIT		1024
-
-struct iwl_lib_ops {
-	/* set hw dependent parameters */
-	void (*set_hw_params)(struct iwl_priv *priv);
-	int (*set_channel_switch)(struct iwl_priv *priv,
-				  struct ieee80211_channel_switch *ch_switch);
-	/* device specific configuration */
-	void (*nic_config)(struct iwl_priv *priv);
-
-	/* eeprom operations (as defined in iwl-eeprom.h) */
-	struct iwl_eeprom_ops eeprom_ops;
-
-	/* temperature */
-	void (*temperature)(struct iwl_priv *priv);
-};
-
-/***************************
- *   L i b                 *
- ***************************/
-
-void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-			   int hw_decrypt);
-int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
-			 struct iwl_rxon_context *ctx);
-void iwl_set_flags_for_band(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    enum ieee80211_band band,
-			    struct ieee80211_vif *vif);
-u8 iwl_get_single_channel_number(struct iwl_priv *priv,
-				  enum ieee80211_band band);
-void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
-bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
-			    struct iwl_rxon_context *ctx,
-			    struct ieee80211_sta_ht_cap *ht_cap);
-void iwl_connection_init_rx_config(struct iwl_priv *priv,
-				   struct iwl_rxon_context *ctx);
-void iwl_set_rate(struct iwl_priv *priv);
-int iwl_cmd_echo_test(struct iwl_priv *priv);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_alloc_traffic_mem(struct iwl_priv *priv);
-void iwl_free_traffic_mem(struct iwl_priv *priv);
-void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
-				u16 length, struct ieee80211_hdr *header);
-void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
-				u16 length, struct ieee80211_hdr *header);
-const char *get_mgmt_string(int cmd);
-const char *get_ctrl_string(int cmd);
-void iwl_clear_traffic_stats(struct iwl_priv *priv);
-void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
-		      u16 len);
-void iwl_reset_traffic_log(struct iwl_priv *priv);
-
-#else
-static inline int iwl_alloc_traffic_mem(struct iwl_priv *priv)
-{
-	return 0;
-}
-static inline void iwl_free_traffic_mem(struct iwl_priv *priv)
-{
-}
-static inline void iwl_reset_traffic_log(struct iwl_priv *priv)
-{
-}
-static inline void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
-		      u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
-		      u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
-				    __le16 fc, u16 len)
-{
-}
-#endif
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
-
-void iwl_setup_watchdog(struct iwl_priv *priv);
-/*****************************************************
- * TX power
- ****************************************************/
-int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
-
-/*******************************************************************************
- * Scanning
- ******************************************************************************/
-void iwl_init_scan_params(struct iwl_priv *priv);
-int iwl_scan_cancel(struct iwl_priv *priv);
-void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-void iwl_force_scan_end(struct iwl_priv *priv);
-void iwl_internal_short_hw_scan(struct iwl_priv *priv);
-int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
-void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
-void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
-void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
-int __must_check iwl_scan_initiate(struct iwl_priv *priv,
-				   struct ieee80211_vif *vif,
-				   enum iwl_scan_type scan_type,
-				   enum ieee80211_band band);
-
-/* For faster active scanning, scan will move to the next channel if fewer than
- * PLCP_QUIET_THRESH packets are heard on this channel within
- * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
- * time if it's a quiet channel (nothing responded to our probe, and there's
- * no other traffic).
- * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
-#define IWL_ACTIVE_QUIET_TIME       cpu_to_le16(10)  /* msec */
-#define IWL_PLCP_QUIET_THRESH       cpu_to_le16(1)  /* packets */
-
-#define IWL_SCAN_CHECK_WATCHDOG		(HZ * 7)
-
-/* traffic log definitions */
-#define IWL_TRAFFIC_ENTRIES	(256)
-#define IWL_TRAFFIC_ENTRY_SIZE  (64)
-
-/*****************************************************
- *   S e n d i n g     H o s t     C o m m a n d s   *
- *****************************************************/
-
-void iwl_bg_watchdog(unsigned long data);
-u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
-__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
-			   u32 addon, u32 beacon_interval);
-
-
-/*****************************************************
-*  GEOS
-******************************************************/
-int iwl_init_geos(struct iwl_priv *priv);
-void iwl_free_geos(struct iwl_priv *priv);
-
-extern void iwl_send_bt_config(struct iwl_priv *priv);
-extern int iwl_send_statistics_request(struct iwl_priv *priv,
-				       u8 flags, bool clear);
-
-int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-
-static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
-			struct iwl_priv *priv, enum ieee80211_band band)
-{
-	return priv->hw->wiphy->bands[band];
-}
-
-static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
-{
-	return cfg(priv)->bt_params &&
-	       cfg(priv)->bt_params->advanced_bt_coexist;
-}
-
-extern bool bt_siso_mode;
-
-#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 5f96ce1..5975054 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -430,6 +430,9 @@
 #define HBUS_TARG_PRPH_WDAT     (HBUS_BASE+0x04c)
 #define HBUS_TARG_PRPH_RDAT     (HBUS_BASE+0x050)
 
+/* Used to enable DBGM */
+#define HBUS_TARG_TEST_REG	(HBUS_BASE+0x05c)
+
 /*
  * Per-Tx-queue write pointer (index, really!)
  * Indicates index to next TFD that driver will fill (1 past latest filled).
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 059efab..2d1b428 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -63,6 +63,7 @@
 
 #include <linux/interrupt.h>
 #include "iwl-debug.h"
+#include "iwl-devtrace.h"
 
 #define __iwl_fn(fn)						\
 void __iwl_ ##fn(struct device *dev, const char *fmt, ...)	\
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index a6b32a1..8376b84 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -29,10 +29,13 @@
 #ifndef __iwl_debug_h__
 #define __iwl_debug_h__
 
-#include "iwl-shared.h"
-#include "iwl-devtrace.h"
+#include "iwl-modparams.h"
 
-struct iwl_priv;
+
+static inline bool iwl_have_debug_level(u32 level)
+{
+	return iwlwifi_mod_params.debug_level & level;
+}
 
 void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
 		const char *fmt, ...);
@@ -41,10 +44,10 @@
 void __iwl_crit(struct device *dev, const char *fmt, ...);
 
 /* No matter what is m (priv, bus, trans), this will work */
-#define IWL_ERR(m, f, a...) __iwl_err(trans(m)->dev, false, false, f, ## a)
-#define IWL_WARN(m, f, a...) __iwl_warn(trans(m)->dev, f, ## a)
-#define IWL_INFO(m, f, a...) __iwl_info(trans(m)->dev, f, ## a)
-#define IWL_CRIT(m, f, a...) __iwl_crit(trans(m)->dev, f, ## a)
+#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
+#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
+#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
+#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
 
 #if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
 void __iwl_dbg(struct device *dev,
@@ -65,9 +68,9 @@
 } while (0)
 
 #define IWL_DEBUG(m, level, fmt, args...)				\
-	__iwl_dbg(trans(m)->dev, level, false, __func__, fmt, ##args)
+	__iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
 #define IWL_DEBUG_LIMIT(m, level, fmt, args...)				\
-	__iwl_dbg(trans(m)->dev, level, true, __func__, fmt, ##args)
+	__iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 #define iwl_print_hex_dump(m, level, p, len)				\
@@ -80,19 +83,6 @@
 #define iwl_print_hex_dump(m, level, p, len)
 #endif				/* CONFIG_IWLWIFI_DEBUG */
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
-#else
-static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
-	return 0;
-}
-static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
-#endif				/* CONFIG_IWLWIFI_DEBUGFS */
-
 /*
  * To use the debug system:
  *
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 2bbaebd..e7c157e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -37,9 +37,9 @@
 
 #include "iwl-dev.h"
 #include "iwl-debug.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-agn.h"
+#include "iwl-modparams.h"
 
 /* create and remove of files */
 #define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
@@ -111,105 +111,6 @@
 	.llseek = generic_file_llseek,					\
 };
 
-static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	char *buf;
-	int pos = 0;
-
-	int cnt;
-	ssize_t ret;
-	const size_t bufsz = 100 +
-		sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
-	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%25s\t\t: %u\n",
-				 get_mgmt_string(cnt),
-				 priv->tx_stats.mgmt[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
-	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%25s\t\t: %u\n",
-				 get_ctrl_string(cnt),
-				 priv->tx_stats.ctrl[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
-			 priv->tx_stats.data_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
-			 priv->tx_stats.data_bytes);
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	u32 clear_flag;
-	char buf[8];
-	int buf_size;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%x", &clear_flag) != 1)
-		return -EFAULT;
-	iwl_clear_traffic_stats(priv);
-
-	return count;
-}
-
-static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
-						char __user *user_buf,
-						size_t count, loff_t *ppos) {
-
-	struct iwl_priv *priv = file->private_data;
-	char *buf;
-	int pos = 0;
-	int cnt;
-	ssize_t ret;
-	const size_t bufsz = 100 +
-		sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
-	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%25s\t\t: %u\n",
-				 get_mgmt_string(cnt),
-				 priv->rx_stats.mgmt[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
-	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
-		pos += scnprintf(buf + pos, bufsz - pos,
-				 "\t%25s\t\t: %u\n",
-				 get_ctrl_string(cnt),
-				 priv->rx_stats.ctrl[cnt]);
-	}
-	pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
-	pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
-			 priv->rx_stats.data_cnt);
-	pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
-			 priv->rx_stats.data_bytes);
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
 static ssize_t iwl_dbgfs_sram_read(struct file *file,
 					char __user *user_buf,
 					size_t count, loff_t *ppos)
@@ -230,11 +131,9 @@
 	/* default is to dump the entire data segment */
 	if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
 		priv->dbgfs_sram_offset = 0x800000;
-		if (!priv->ucode_loaded) {
-			IWL_ERR(priv, "No uCode has been loadded.\n");
+		if (!priv->ucode_loaded)
 			return -EINVAL;
-		}
-		img = &priv->fw->img[priv->shrd->ucode_type];
+		img = &priv->fw->img[priv->cur_ucode];
 		priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
 	}
 	len = priv->dbgfs_sram_len;
@@ -259,7 +158,7 @@
 	sram = priv->dbgfs_sram_offset & ~0x3;
 
 	/* read the first u32 from sram */
-	val = iwl_read_targ_mem(trans(priv), sram);
+	val = iwl_read_targ_mem(priv->trans, sram);
 
 	for (; len; len--) {
 		/* put the address at the start of every line */
@@ -278,7 +177,7 @@
 		if (++offset == 4) {
 			sram += 4;
 			offset = 0;
-			val = iwl_read_targ_mem(trans(priv), sram);
+			val = iwl_read_targ_mem(priv->trans, sram);
 		}
 
 		/* put in extra spaces and split lines for human readability */
@@ -369,14 +268,19 @@
 				 i, station->sta.sta.addr,
 				 station->sta.station_flags_msk);
 		pos += scnprintf(buf + pos, bufsz - pos,
-				"TID\tseq_num\trate_n_flags\n");
+				"TID seqno  next_rclmd "
+				"rate_n_flags state txq\n");
 
 		for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
 			tid_data = &priv->tid_data[i][j];
 			pos += scnprintf(buf + pos, bufsz - pos,
-				"%d:\t%#x\t%#x",
+				"%d:  0x%.4x 0x%.4x     0x%.8x   "
+				"%d     %.2d",
 				j, tid_data->seq_number,
-				tid_data->agg.rate_n_flags);
+				tid_data->next_reclaimed,
+				tid_data->agg.rate_n_flags,
+				tid_data->agg.state,
+				tid_data->agg.txq_id);
 
 			if (tid_data->agg.wait_for_ba)
 				pos += scnprintf(buf + pos, bufsz - pos,
@@ -403,30 +307,25 @@
 	const u8 *ptr;
 	char *buf;
 	u16 eeprom_ver;
-	size_t eeprom_len = cfg(priv)->base_params->eeprom_size;
+	size_t eeprom_len = priv->cfg->base_params->eeprom_size;
 	buf_size = 4 * eeprom_len + 256;
 
-	if (eeprom_len % 16) {
-		IWL_ERR(priv, "NVM size is not multiple of 16.\n");
+	if (eeprom_len % 16)
 		return -ENODATA;
-	}
 
-	ptr = priv->shrd->eeprom;
-	if (!ptr) {
-		IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
+	ptr = priv->eeprom;
+	if (!ptr)
 		return -ENOMEM;
-	}
 
 	/* 4 characters for byte 0xYY */
 	buf = kzalloc(buf_size, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
-	eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
+
+	eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
 	pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
 			"version: 0x%x\n",
-			(trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+			(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
 			 ? "OTP" : "EEPROM", eeprom_ver);
 	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
 		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
@@ -456,10 +355,8 @@
 		return -EAGAIN;
 
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
 	if (supp_band) {
@@ -521,8 +418,6 @@
 	int pos = 0;
 	const size_t bufsz = sizeof(buf);
 
-	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
-		test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
 	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
 		test_bit(STATUS_RF_KILL_HW, &priv->status));
 	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
@@ -544,9 +439,9 @@
 	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
 		test_bit(STATUS_SCAN_HW, &priv->status));
 	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
-		test_bit(STATUS_POWER_PMI, &priv->shrd->status));
+		test_bit(STATUS_POWER_PMI, &priv->status));
 	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
-		test_bit(STATUS_FW_ERROR, &priv->shrd->status));
+		test_bit(STATUS_FW_ERROR, &priv->status));
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
@@ -563,16 +458,14 @@
 	ssize_t ret;
 
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	for (cnt = 0; cnt < REPLY_MAX; cnt++) {
 		if (priv->rx_handlers_stats[cnt] > 0)
 			pos += scnprintf(buf + pos, bufsz - pos,
 				"\tRx handler[%36s]:\t\t %u\n",
-				get_cmd_string(cnt),
+				iwl_dvm_get_cmd_string(cnt),
 				priv->rx_handlers_stats[cnt]);
 	}
 
@@ -680,11 +573,8 @@
 		return -EFAULT;
 	if (!iwl_is_any_associated(priv))
 		priv->disable_ht40 = ht40 ? true : false;
-	else {
-		IWL_ERR(priv, "Sta associated with AP - "
-			"Change to 40MHz channel support is not allowed\n");
+	else
 		return -EINVAL;
-	}
 
 	return count;
 }
@@ -816,87 +706,6 @@
 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
 DEBUGFS_READ_FILE_OPS(current_sleep_command);
 
-static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	int pos = 0, ofs = 0;
-	int cnt = 0, entry;
-
-	char *buf;
-	int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
-		(cfg(priv)->base_params->num_of_queues * 32 * 8) + 400;
-	const u8 *ptr;
-	ssize_t ret;
-
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate buffer\n");
-		return -ENOMEM;
-	}
-	if (priv->tx_traffic && iwl_have_debug_level(IWL_DL_TX)) {
-		ptr = priv->tx_traffic;
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"Tx Traffic idx: %u\n", priv->tx_traffic_idx);
-		for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
-			for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
-			     entry++,  ofs += 16) {
-				pos += scnprintf(buf + pos, bufsz - pos,
-						"0x%.4x ", ofs);
-				hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
-						   buf + pos, bufsz - pos, 0);
-				pos += strlen(buf + pos);
-				if (bufsz - pos > 0)
-					buf[pos++] = '\n';
-			}
-		}
-	}
-
-	if (priv->rx_traffic && iwl_have_debug_level(IWL_DL_RX)) {
-		ptr = priv->rx_traffic;
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"Rx Traffic idx: %u\n", priv->rx_traffic_idx);
-		for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
-			for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
-			     entry++,  ofs += 16) {
-				pos += scnprintf(buf + pos, bufsz - pos,
-						"0x%.4x ", ofs);
-				hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
-						   buf + pos, bufsz - pos, 0);
-				pos += strlen(buf + pos);
-				if (bufsz - pos > 0)
-					buf[pos++] = '\n';
-			}
-		}
-	}
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-	kfree(buf);
-	return ret;
-}
-
-static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	int traffic_log;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &traffic_log) != 1)
-		return -EFAULT;
-	if (traffic_log == 0)
-		iwl_reset_traffic_log(priv);
-
-	return count;
-}
-
 static const char *fmt_value = "  %-30s %10u\n";
 static const char *fmt_hex   = "  %-30s       0x%02X\n";
 static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
@@ -947,10 +756,8 @@
 		return -EAGAIN;
 
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	/*
 	 * the statistic information display here is based on
@@ -1376,10 +1183,8 @@
 		return -EAGAIN;
 
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	/* the statistic information display here is based on
 	 * the last statistics notification from uCode
@@ -1536,17 +1341,17 @@
 	if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
 		pos += scnprintf(buf + pos, bufsz - pos,
 			"tx power: (1/2 dB step)\n");
-		if ((hw_params(priv).valid_tx_ant & ANT_A) &&
+		if ((priv->hw_params.valid_tx_ant & ANT_A) &&
 		    tx->tx_power.ant_a)
 			pos += scnprintf(buf + pos, bufsz - pos,
 					fmt_hex, "antenna A:",
 					tx->tx_power.ant_a);
-		if ((hw_params(priv).valid_tx_ant & ANT_B) &&
+		if ((priv->hw_params.valid_tx_ant & ANT_B) &&
 		    tx->tx_power.ant_b)
 			pos += scnprintf(buf + pos, bufsz - pos,
 					fmt_hex, "antenna B:",
 					tx->tx_power.ant_b);
-		if ((hw_params(priv).valid_tx_ant & ANT_C) &&
+		if ((priv->hw_params.valid_tx_ant & ANT_C) &&
 		    tx->tx_power.ant_c)
 			pos += scnprintf(buf + pos, bufsz - pos,
 					fmt_hex, "antenna C:",
@@ -1578,10 +1383,8 @@
 		return -EAGAIN;
 
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	/* the statistic information display here is based on
 	 * the last statistics notification from uCode
@@ -1704,16 +1507,11 @@
 	ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
 	mutex_unlock(&priv->mutex);
 
-	if (ret) {
-		IWL_ERR(priv,
-			"Error sending statistics request: %zd\n", ret);
+	if (ret)
 		return -EAGAIN;
-	}
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	/*
 	 * the statistic information display here is based on
@@ -1790,10 +1588,8 @@
 		return -EAGAIN;
 
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
 	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
@@ -1933,10 +1729,8 @@
 
 	data = &priv->sensitivity_data;
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
 			data->auto_corr_ofdm);
@@ -2014,10 +1808,8 @@
 
 	data = &priv->chain_noise_data;
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(priv, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
 			data->active_chains);
@@ -2068,7 +1860,7 @@
 	const size_t bufsz = sizeof(buf);
 	u32 pwrsave_status;
 
-	pwrsave_status = iwl_read32(trans(priv), CSR_GP_CNTRL) &
+	pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) &
 			CSR_GP_REG_POWER_SAVE_STATUS_MSK;
 
 	pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
@@ -2262,59 +2054,39 @@
 	return count;
 }
 
-static ssize_t iwl_dbgfs_force_reset_read(struct file *file,
-					char __user *user_buf,
-					size_t count, loff_t *ppos)
+static ssize_t iwl_dbgfs_rf_reset_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
 {
 	struct iwl_priv *priv = file->private_data;
-	int i, pos = 0;
+	int pos = 0;
 	char buf[300];
 	const size_t bufsz = sizeof(buf);
-	struct iwl_force_reset *force_reset;
+	struct iwl_rf_reset *rf_reset = &priv->rf_reset;
 
-	for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
-		force_reset = &priv->force_reset[i];
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"Force reset method %d\n", i);
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"\tnumber of reset request: %d\n",
-				force_reset->reset_request_count);
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"\tnumber of reset request success: %d\n",
-				force_reset->reset_success_count);
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"\tnumber of reset request reject: %d\n",
-				force_reset->reset_reject_count);
-		pos += scnprintf(buf + pos, bufsz - pos,
-				"\treset duration: %lu\n",
-				force_reset->reset_duration);
-	}
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"RF reset statistics\n");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\tnumber of reset request: %d\n",
+			rf_reset->reset_request_count);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\tnumber of reset request success: %d\n",
+			rf_reset->reset_success_count);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\tnumber of reset request reject: %d\n",
+			rf_reset->reset_reject_count);
+
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
+static ssize_t iwl_dbgfs_rf_reset_write(struct file *file,
 					const char __user *user_buf,
 					size_t count, loff_t *ppos) {
 
 	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	int reset, ret;
+	int ret;
 
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &reset) != 1)
-		return -EINVAL;
-	switch (reset) {
-	case IWL_RF_RESET:
-	case IWL_FW_RESET:
-		ret = iwl_force_reset(priv, reset, true);
-		break;
-	default:
-		return -EINVAL;
-	}
+	ret = iwl_force_rf_reset(priv, true);
 	return ret ? ret : count;
 }
 
@@ -2342,29 +2114,6 @@
 	return count;
 }
 
-static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_priv *priv = file->private_data;
-	char buf[8];
-	int buf_size;
-	int timeout;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &timeout) != 1)
-		return -EINVAL;
-	if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
-		timeout = IWL_DEF_WD_TIMEOUT;
-
-	hw_params(priv).wd_timeout = timeout;
-	iwl_setup_watchdog(priv);
-	return count;
-}
-
 static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
 					char __user *user_buf,
 					size_t count, loff_t *ppos) {
@@ -2420,10 +2169,10 @@
 	char buf[40];
 	const size_t bufsz = sizeof(buf);
 
-	if (cfg(priv)->ht_params)
+	if (priv->cfg->ht_params)
 		pos += scnprintf(buf + pos, bufsz - pos,
 			 "use %s for aggregation\n",
-			 (hw_params(priv).use_rts_for_aggregation) ?
+			 (priv->hw_params.use_rts_for_aggregation) ?
 				"rts/cts" : "cts-to-self");
 	else
 		pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2440,7 +2189,7 @@
 	int buf_size;
 	int rts;
 
-	if (!cfg(priv)->ht_params)
+	if (!priv->cfg->ht_params)
 		return -EINVAL;
 
 	memset(buf, 0, sizeof(buf));
@@ -2450,12 +2199,29 @@
 	if (sscanf(buf, "%d", &rts) != 1)
 		return -EINVAL;
 	if (rts)
-		hw_params(priv).use_rts_for_aggregation = true;
+		priv->hw_params.use_rts_for_aggregation = true;
 	else
-		hw_params(priv).use_rts_for_aggregation = false;
+		priv->hw_params.use_rts_for_aggregation = false;
 	return count;
 }
 
+static int iwl_cmd_echo_test(struct iwl_priv *priv)
+{
+	int ret;
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_ECHO,
+		.len = { 0 },
+		.flags = CMD_SYNC,
+	};
+
+	ret = iwl_dvm_send_cmd(priv, &cmd);
+	if (ret)
+		IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
+	else
+		IWL_DEBUG_INFO(priv, "echo testing pass\n");
+	return ret;
+}
+
 static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
 					const char __user *user_buf,
 					size_t count, loff_t *ppos)
@@ -2473,9 +2239,93 @@
 	return count;
 }
 
-DEBUGFS_READ_FILE_OPS(rx_statistics);
-DEBUGFS_READ_FILE_OPS(tx_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char *buf;
+	int pos = 0;
+	ssize_t ret = -ENOMEM;
+
+	ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
+	if (buf) {
+		ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+		kfree(buf);
+	}
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_log_event_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	u32 event_log_flag;
+	char buf[8];
+	int buf_size;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &event_log_flag) != 1)
+		return -EFAULT;
+	if (event_log_flag == 1)
+		iwl_dump_nic_event_log(priv, true, NULL, false);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[120];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "Sensitivity calibrations %s\n",
+			 (priv->calib_disabled &
+					IWL_SENSITIVITY_CALIB_DISABLED) ?
+			 "DISABLED" : "ENABLED");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "Chain noise calibrations %s\n",
+			 (priv->calib_disabled &
+					IWL_CHAIN_NOISE_CALIB_DISABLED) ?
+			 "DISABLED" : "ENABLED");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "Tx power calibrations %s\n",
+			 (priv->calib_disabled &
+					IWL_TX_POWER_CALIB_DISABLED) ?
+			 "DISABLED" : "ENABLED");
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	u32 calib_disabled;
+	int buf_size;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%x", &calib_disabled) != 1)
+		return -EFAULT;
+
+	priv->calib_disabled = calib_disabled;
+
+	return count;
+}
+
 DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
 DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
 DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2483,20 +2333,20 @@
 DEBUGFS_READ_FILE_OPS(chain_noise);
 DEBUGFS_READ_FILE_OPS(power_save_status);
 DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
-DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
 DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
 DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
 DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
-DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_WRITE_FILE_OPS(rf_reset);
 DEBUGFS_READ_FILE_OPS(rxon_flags);
 DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
 DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
 DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
-DEBUGFS_WRITE_FILE_OPS(wd_timeout);
 DEBUGFS_READ_FILE_OPS(bt_traffic);
 DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
 DEBUGFS_READ_FILE_OPS(reply_tx_error);
 DEBUGFS_WRITE_FILE_OPS(echo_test);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
 
 /*
  * Create the debugfs files and directories
@@ -2537,15 +2387,11 @@
 	DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
 	DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR);
 
-	DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
-	DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
 	DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
 	DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
-	DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
 	DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
 	DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
-	DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+	DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR);
 	DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
 	DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
 	DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
@@ -2558,17 +2404,16 @@
 	DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
 	DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
 	DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
-	DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
 	DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
+	DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
+
 	if (iwl_advanced_bt_coexist(priv))
 		DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
 
-	DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
-			 &priv->disable_sens_cal);
-	DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
-			 &priv->disable_chain_noise_cal);
+	/* Calibrations disabled/enabled status*/
+	DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
 
-	if (iwl_trans_dbgfs_register(trans(priv), dir_debug))
+	if (iwl_trans_dbgfs_register(priv->trans, dir_debug))
 		goto err;
 	return 0;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 16956b7..7006237 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 
+#include "iwl-fw.h"
 #include "iwl-eeprom.h"
 #include "iwl-csr.h"
 #include "iwl-debug.h"
@@ -47,12 +48,9 @@
 #include "iwl-agn-rs.h"
 #include "iwl-agn-tt.h"
 #include "iwl-trans.h"
-#include "iwl-shared.h"
 #include "iwl-op-mode.h"
 #include "iwl-notif-wait.h"
 
-struct iwl_tx_queue;
-
 /* CT-KILL constants */
 #define CT_KILL_THRESHOLD_LEGACY   110 /* in Celsius */
 #define CT_KILL_THRESHOLD	   114 /* in Celsius */
@@ -196,6 +194,7 @@
  * These states relate to a specific RA / TID.
  *
  * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
  * @IWL_AGG_ON: aggregation session is up
  * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
  *	HW queue to be empty from packets for this RA /TID.
@@ -204,6 +203,7 @@
  */
 enum iwl_agg_state {
 	IWL_AGG_OFF = 0,
+	IWL_AGG_STARTING,
 	IWL_AGG_ON,
 	IWL_EMPTYING_HW_QUEUE_ADDBA,
 	IWL_EMPTYING_HW_QUEUE_DELBA,
@@ -220,8 +220,7 @@
  *	Tx response (REPLY_TX), and the block ack notification
  *	(REPLY_COMPRESSED_BA).
  * @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session - used by the transport layer.
- *	Needed by the upper layer for debugfs only.
+ * @txq_id: Tx queue used by the BA session
  * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
  *	the first packet to be sent in legacy HW queue in Tx AGG stop flow.
  *	Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -507,44 +506,6 @@
 	u32 unknown;
 };
 
-/* management statistics */
-enum iwl_mgmt_stats {
-	MANAGEMENT_ASSOC_REQ = 0,
-	MANAGEMENT_ASSOC_RESP,
-	MANAGEMENT_REASSOC_REQ,
-	MANAGEMENT_REASSOC_RESP,
-	MANAGEMENT_PROBE_REQ,
-	MANAGEMENT_PROBE_RESP,
-	MANAGEMENT_BEACON,
-	MANAGEMENT_ATIM,
-	MANAGEMENT_DISASSOC,
-	MANAGEMENT_AUTH,
-	MANAGEMENT_DEAUTH,
-	MANAGEMENT_ACTION,
-	MANAGEMENT_MAX,
-};
-/* control statistics */
-enum iwl_ctrl_stats {
-	CONTROL_BACK_REQ =  0,
-	CONTROL_BACK,
-	CONTROL_PSPOLL,
-	CONTROL_RTS,
-	CONTROL_CTS,
-	CONTROL_ACK,
-	CONTROL_CFEND,
-	CONTROL_CFENDACK,
-	CONTROL_MAX,
-};
-
-struct traffic_stats {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-	u32 mgmt[MANAGEMENT_MAX];
-	u32 ctrl[CONTROL_MAX];
-	u32 data_cnt;
-	u64 data_bytes;
-#endif
-};
-
 /*
  * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
  * to perform continuous uCode event logging operation if enabled
@@ -571,24 +532,7 @@
 	int wraps_more_count;
 };
 
-/*
- * This is the threshold value of plcp error rate per 100mSecs.  It is
- * used to set and check for the validity of plcp_delta.
- */
-#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN	(1)
-#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF	(50)
-#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF	(100)
-#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF	(200)
-#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX	(255)
-#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE	(0)
-
 #define IWL_DELAY_NEXT_FORCE_RF_RESET  (HZ*3)
-#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
-
-/* TX queue watchdog timeouts in mSecs */
-#define IWL_DEF_WD_TIMEOUT	(2000)
-#define IWL_LONG_WD_TIMEOUT	(10000)
-#define IWL_MAX_WD_TIMEOUT	(120000)
 
 /* BT Antenna Coupling Threshold (dB) */
 #define IWL_BT_ANTENNA_COUPLING_THRESHOLD	(35)
@@ -598,18 +542,18 @@
 #define IWL_MAX_CONTINUE_RELOAD_CNT	4
 
 
-enum iwl_reset {
-	IWL_RF_RESET = 0,
-	IWL_FW_RESET,
-	IWL_MAX_FORCE_RESET,
-};
-
-struct iwl_force_reset {
+struct iwl_rf_reset {
 	int reset_request_count;
 	int reset_success_count;
 	int reset_reject_count;
-	unsigned long reset_duration;
-	unsigned long last_force_reset_jiffies;
+	unsigned long last_reset_jiffies;
+};
+
+enum iwl_rxon_context_id {
+	IWL_RXON_CTX_BSS,
+	IWL_RXON_CTX_PAN,
+
+	NUM_IWL_RXON_CTX
 };
 
 /* extend beacon time format bit shifting  */
@@ -623,6 +567,10 @@
 struct iwl_rxon_context {
 	struct ieee80211_vif *vif;
 
+	u8 mcast_queue;
+	u8 ac_to_queue[IEEE80211_NUM_ACS];
+	u8 ac_to_fifo[IEEE80211_NUM_ACS];
+
 	/*
 	 * We could use the vif to indicate active, but we
 	 * also need it to be active during disabling when
@@ -677,6 +625,52 @@
 	IWL_SCAN_ROC,
 };
 
+/**
+ * struct iwl_hw_params
+ *
+ * Holds the module parameters
+ *
+ * @tx_chains_num: Number of TX chains
+ * @rx_chains_num: Number of RX chains
+ * @valid_tx_ant: usable antennas for TX
+ * @valid_rx_ant: usable antennas for RX
+ * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
+ * @sku: sku read from EEPROM
+ * @ct_kill_threshold: temperature threshold - in hw dependent unit
+ * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
+ *	relevant for 1000, 6000 and up
+ * @struct iwl_sensitivity_ranges: range of sensitivity values
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
+ */
+struct iwl_hw_params {
+	u8  tx_chains_num;
+	u8  rx_chains_num;
+	u8  valid_tx_ant;
+	u8  valid_rx_ant;
+	u8  ht40_channel;
+	bool use_rts_for_aggregation;
+	u16 sku;
+	u32 ct_kill_threshold;
+	u32 ct_kill_exit_threshold;
+
+	const struct iwl_sensitivity_ranges *sens;
+};
+
+struct iwl_lib_ops {
+	/* set hw dependent parameters */
+	void (*set_hw_params)(struct iwl_priv *priv);
+	int (*set_channel_switch)(struct iwl_priv *priv,
+				  struct ieee80211_channel_switch *ch_switch);
+	/* device specific configuration */
+	void (*nic_config)(struct iwl_priv *priv);
+
+	/* eeprom operations (as defined in iwl-eeprom.h) */
+	struct iwl_eeprom_ops eeprom_ops;
+
+	/* temperature */
+	void (*temperature)(struct iwl_priv *priv);
+};
+
 #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
 struct iwl_testmode_trace {
 	u32 buff_size;
@@ -701,6 +695,17 @@
 	u8 data[];
 };
 
+/* Calibration disabling bit mask */
+enum {
+	IWL_CALIB_ENABLE_ALL			= 0,
+
+	IWL_SENSITIVITY_CALIB_DISABLED		= BIT(0),
+	IWL_CHAIN_NOISE_CALIB_DISABLED		= BIT(1),
+	IWL_TX_POWER_CALIB_DISABLED		= BIT(2),
+
+	IWL_CALIB_DISABLE_ALL			= 0xFFFFFFFF,
+};
+
 #define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \
 	((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific))
 
@@ -710,9 +715,11 @@
 
 struct iwl_priv {
 
-	/*data shared among all the driver's layers */
-	struct iwl_shared *shrd;
+	struct iwl_trans *trans;
+	struct device *dev;		/* for debug prints only */
+	const struct iwl_cfg *cfg;
 	const struct iwl_fw *fw;
+	const struct iwl_lib_ops *lib;
 	unsigned long status;
 
 	spinlock_t sta_lock;
@@ -720,6 +727,11 @@
 
 	unsigned long transport_queue_stop;
 	bool passive_no_rx;
+#define IWL_INVALID_MAC80211_QUEUE	0xff
+	u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
+	atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
+
+	unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
 
 	/* ieee device used by generic ieee processing code */
 	struct ieee80211_hw *hw;
@@ -730,7 +742,10 @@
 
 	struct workqueue_struct *workqueue;
 
+	struct iwl_hw_params hw_params;
+
 	enum ieee80211_band band;
+	u8 valid_contexts;
 
 	void (*pre_rx_handler)(struct iwl_priv *priv,
 			       struct iwl_rx_cmd_buffer *rxb);
@@ -763,8 +778,8 @@
 	/*counters */
 	u32 rx_handlers_stats[REPLY_MAX];
 
-	/* force reset */
-	struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+	/* rf reset */
+	struct iwl_rf_reset rf_reset;
 
 	/* firmware reload counter and timestamp */
 	unsigned long reload_jiffies;
@@ -810,8 +825,6 @@
 
 	__le16 switch_channel;
 
-	u16 active_rate;
-
 	u8 start_calib;
 	struct iwl_sensitivity_data sensitivity_data;
 	struct iwl_chain_noise_data chain_noise_data;
@@ -825,10 +838,6 @@
 
 	int activity_timer_active;
 
-	/* counts mgmt, ctl, and data packets */
-	struct traffic_stats tx_stats;
-	struct traffic_stats rx_stats;
-
 	struct iwl_power_mgr power_data;
 	struct iwl_tt_mgmt thermal_throttle;
 
@@ -912,6 +921,7 @@
 	__le32 kill_ack_mask;
 	__le32 kill_cts_mask;
 	__le16 bt_valid;
+	bool reduced_txpower;
 	u16 bt_on_thresh;
 	u16 bt_duration;
 	u16 dynamic_frag_thresh;
@@ -948,23 +958,21 @@
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	/* debugfs */
-	u16 tx_traffic_idx;
-	u16 rx_traffic_idx;
-	u8 *tx_traffic;
-	u8 *rx_traffic;
 	struct dentry *debugfs_dir;
 	u32 dbgfs_sram_offset, dbgfs_sram_len;
 	bool disable_ht40;
 	void *wowlan_sram;
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 
+	/* eeprom -- this is in the card's little endian byte order */
+	u8 *eeprom;
+	enum iwl_nvm_type nvm_device_type;
+
 	struct work_struct txpower_work;
-	u32 disable_sens_cal;
-	u32 disable_chain_noise_cal;
+	u32 calib_disabled;
 	struct work_struct run_time_calib_work;
 	struct timer_list statistics_periodic;
 	struct timer_list ucode_trace;
-	struct timer_list watchdog;
 
 	struct iwl_event_log event_log;
 
@@ -982,10 +990,18 @@
 	__le64 replay_ctr;
 	__le16 last_seq_ctl;
 	bool have_rekey_data;
+
+	/* device_pointers: pointers to ucode event tables */
+	struct {
+		u32 error_event_table;
+		u32 log_event_table;
+	} device_pointers;
+
+	/* indicator of loaded ucode image */
+	enum iwl_ucode_type cur_ucode;
 }; /*iwl_priv */
 
 extern struct kmem_cache *iwl_tx_cmd_pool;
-extern struct iwl_mod_params iwlagn_mod_params;
 
 static inline struct iwl_rxon_context *
 iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
@@ -998,7 +1014,7 @@
 #define for_each_context(priv, ctx)				\
 	for (ctx = &priv->contexts[IWL_RXON_CTX_BSS];		\
 	     ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++)	\
-		if (priv->shrd->valid_contexts & BIT(ctx->ctxid))
+		if (priv->valid_contexts & BIT(ctx->ctxid))
 
 static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
 {
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6f312c7..3c72bad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -66,10 +66,13 @@
 #include <linux/module.h>
 
 #include "iwl-drv.h"
+#include "iwl-debug.h"
 #include "iwl-trans.h"
-#include "iwl-shared.h"
 #include "iwl-op-mode.h"
 #include "iwl-agn-hw.h"
+#include "iwl-fw.h"
+#include "iwl-config.h"
+#include "iwl-modparams.h"
 
 /* private includes */
 #include "iwl-fw-file.h"
@@ -77,8 +80,10 @@
 /**
  * struct iwl_drv - drv common data
  * @fw: the iwl_fw structure
- * @shrd: pointer to common shared structure
  * @op_mode: the running op_mode
+ * @trans: transport layer
+ * @dev: for debug prints only
+ * @cfg: configuration struct
  * @fw_index: firmware revision to try loading
  * @firmware_name: composite filename of ucode file to load
  * @request_firmware_complete: the firmware has been obtained from user space
@@ -86,8 +91,10 @@
 struct iwl_drv {
 	struct iwl_fw fw;
 
-	struct iwl_shared *shrd;
 	struct iwl_op_mode *op_mode;
+	struct iwl_trans *trans;
+	struct device *dev;
+	const struct iwl_cfg *cfg;
 
 	int fw_index;                   /* firmware we're trying to load */
 	char firmware_name[25];         /* name of firmware file to load */
@@ -110,7 +117,7 @@
 static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
 {
 	if (desc->v_addr)
-		dma_free_coherent(trans(drv)->dev, desc->len,
+		dma_free_coherent(drv->trans->dev, desc->len,
 				  desc->v_addr, desc->p_addr);
 	desc->v_addr = NULL;
 	desc->len = 0;
@@ -138,7 +145,7 @@
 		return -EINVAL;
 	}
 
-	desc->v_addr = dma_alloc_coherent(trans(drv)->dev, sec->size,
+	desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size,
 					  &desc->p_addr, GFP_KERNEL);
 	if (!desc->v_addr)
 		return -ENOMEM;
@@ -156,8 +163,7 @@
 
 static int iwl_request_firmware(struct iwl_drv *drv, bool first)
 {
-	const struct iwl_cfg *cfg = cfg(drv);
-	const char *name_pre = cfg->fw_name_pre;
+	const char *name_pre = drv->cfg->fw_name_pre;
 	char tag[8];
 
 	if (first) {
@@ -166,14 +172,14 @@
 		strcpy(tag, UCODE_EXPERIMENTAL_TAG);
 	} else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
 #endif
-		drv->fw_index = cfg->ucode_api_max;
+		drv->fw_index = drv->cfg->ucode_api_max;
 		sprintf(tag, "%d", drv->fw_index);
 	} else {
 		drv->fw_index--;
 		sprintf(tag, "%d", drv->fw_index);
 	}
 
-	if (drv->fw_index < cfg->ucode_api_min) {
+	if (drv->fw_index < drv->cfg->ucode_api_min) {
 		IWL_ERR(drv, "no suitable firmware found!\n");
 		return -ENOENT;
 	}
@@ -186,7 +192,7 @@
 		       drv->firmware_name);
 
 	return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
-				       trans(drv)->dev,
+				       drv->trans->dev,
 				       GFP_KERNEL, drv, iwl_ucode_callback);
 }
 
@@ -284,6 +290,7 @@
 
 	sec->offset = le32_to_cpu(sec_parse->offset);
 	sec->data = sec_parse->data;
+	sec->size = size - sizeof(sec_parse->offset);
 
 	++img->sec_counter;
 
@@ -414,9 +421,6 @@
 	struct iwl_ucode_tlv *tlv;
 	size_t len = ucode_raw->size;
 	const u8 *data;
-	int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
-	int tmp;
-	u64 alternatives;
 	u32 tlv_len;
 	enum iwl_ucode_tlv_type tlv_type;
 	const u8 *tlv_data;
@@ -434,23 +438,6 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * Check which alternatives are present, and "downgrade"
-	 * when the chosen alternative is not present, warning
-	 * the user when that happens. Some files may not have
-	 * any alternatives, so don't warn in that case.
-	 */
-	alternatives = le64_to_cpu(ucode->alternatives);
-	tmp = wanted_alternative;
-	if (wanted_alternative > 63)
-		wanted_alternative = 63;
-	while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
-		wanted_alternative--;
-	if (wanted_alternative && wanted_alternative != tmp)
-		IWL_WARN(drv,
-			 "uCode alternative %d not available, choosing %d\n",
-			 tmp, wanted_alternative);
-
 	drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
 	build = le32_to_cpu(ucode->build);
 
@@ -475,14 +462,11 @@
 	len -= sizeof(*ucode);
 
 	while (len >= sizeof(*tlv)) {
-		u16 tlv_alt;
-
 		len -= sizeof(*tlv);
 		tlv = (void *)data;
 
 		tlv_len = le32_to_cpu(tlv->length);
-		tlv_type = le16_to_cpu(tlv->type);
-		tlv_alt = le16_to_cpu(tlv->alternative);
+		tlv_type = le32_to_cpu(tlv->type);
 		tlv_data = tlv->data;
 
 		if (len < tlv_len) {
@@ -493,14 +477,6 @@
 		len -= ALIGN(tlv_len, 4);
 		data += sizeof(*tlv) + ALIGN(tlv_len, 4);
 
-		/*
-		 * Alternative 0 is always valid.
-		 *
-		 * Skip alternative TLVs that are not selected.
-		 */
-		if (tlv_alt != 0 && tlv_alt != wanted_alternative)
-			continue;
-
 		switch (tlv_type) {
 		case IWL_UCODE_TLV_INST:
 			set_sec_data(pieces, IWL_UCODE_REGULAR,
@@ -755,14 +731,13 @@
 static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
 {
 	struct iwl_drv *drv = context;
-	const struct iwl_cfg *cfg = cfg(drv);
 	struct iwl_fw *fw = &drv->fw;
 	struct iwl_ucode_header *ucode;
 	int err;
 	struct iwl_firmware_pieces pieces;
-	const unsigned int api_max = cfg->ucode_api_max;
-	unsigned int api_ok = cfg->ucode_api_ok;
-	const unsigned int api_min = cfg->ucode_api_min;
+	const unsigned int api_max = drv->cfg->ucode_api_max;
+	unsigned int api_ok = drv->cfg->ucode_api_ok;
+	const unsigned int api_min = drv->cfg->ucode_api_min;
 	u32 api_ver;
 	int i;
 
@@ -838,46 +813,10 @@
 	IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version);
 
 	/*
-	 * For any of the failures below (before allocating pci memory)
-	 * we will try to load a version with a smaller API -- maybe the
-	 * user just got a corrupted version of the latest API.
-	 */
-
-	IWL_DEBUG_INFO(drv, "f/w package hdr ucode version raw = 0x%x\n",
-		       drv->fw.ucode_ver);
-	IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n",
-		get_sec_size(&pieces, IWL_UCODE_REGULAR,
-			     IWL_UCODE_SECTION_INST));
-	IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n",
-		get_sec_size(&pieces, IWL_UCODE_REGULAR,
-			     IWL_UCODE_SECTION_DATA));
-	IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n",
-		get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
-	IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n",
-		get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
-
-	/* Verify that uCode images will fit in card's SRAM */
-	if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
-							cfg->max_inst_size) {
-		IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
-			get_sec_size(&pieces, IWL_UCODE_REGULAR,
-				     IWL_UCODE_SECTION_INST));
-		goto try_again;
-	}
-
-	if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
-							cfg->max_data_size) {
-		IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
-			get_sec_size(&pieces, IWL_UCODE_REGULAR,
-				     IWL_UCODE_SECTION_DATA));
-		goto try_again;
-	}
-
-	/*
 	 * In mvm uCode there is no difference between data and instructions
 	 * sections.
 	 */
-	if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, cfg))
+	if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg))
 		goto try_again;
 
 	/* Allocate ucode buffers for card's bus-master loading ... */
@@ -901,14 +840,14 @@
 		fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
 	else
 		fw->init_evtlog_size =
-			cfg->base_params->max_event_log_size;
+			drv->cfg->base_params->max_event_log_size;
 	fw->init_errlog_ptr = pieces.init_errlog_ptr;
 	fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
 	if (pieces.inst_evtlog_size)
 		fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
 	else
 		fw->inst_evtlog_size =
-			cfg->base_params->max_event_log_size;
+			drv->cfg->base_params->max_event_log_size;
 	fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
 
 	/*
@@ -924,7 +863,7 @@
 	release_firmware(ucode_raw);
 	complete(&drv->request_firmware_complete);
 
-	drv->op_mode = iwl_dvm_ops.start(drv->shrd->trans, &drv->fw);
+	drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
 
 	if (!drv->op_mode)
 		goto out_unbind;
@@ -944,42 +883,38 @@
 	release_firmware(ucode_raw);
  out_unbind:
 	complete(&drv->request_firmware_complete);
-	device_release_driver(trans(drv)->dev);
+	device_release_driver(drv->trans->dev);
 }
 
-int iwl_drv_start(struct iwl_shared *shrd,
-		  struct iwl_trans *trans, const struct iwl_cfg *cfg)
+struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
+			      const struct iwl_cfg *cfg)
 {
 	struct iwl_drv *drv;
 	int ret;
 
-	shrd->cfg = cfg;
-
 	drv = kzalloc(sizeof(*drv), GFP_KERNEL);
-	if (!drv) {
-		dev_printk(KERN_ERR, trans->dev, "Couldn't allocate iwl_drv");
-		return -ENOMEM;
-	}
-	drv->shrd = shrd;
-	shrd->drv = drv;
+	if (!drv)
+		return NULL;
+
+	drv->trans = trans;
+	drv->dev = trans->dev;
+	drv->cfg = cfg;
 
 	init_completion(&drv->request_firmware_complete);
 
 	ret = iwl_request_firmware(drv, true);
 
 	if (ret) {
-		dev_printk(KERN_ERR, trans->dev, "Couldn't request the fw");
+		IWL_ERR(trans, "Couldn't request the fw\n");
 		kfree(drv);
-		shrd->drv = NULL;
+		drv = NULL;
 	}
 
-	return ret;
+	return drv;
 }
 
-void iwl_drv_stop(struct iwl_shared *shrd)
+void iwl_drv_stop(struct iwl_drv *drv)
 {
-	struct iwl_drv *drv = shrd->drv;
-
 	wait_for_completion(&drv->request_firmware_complete);
 
 	/* op_mode can be NULL if its start failed */
@@ -989,5 +924,95 @@
 	iwl_dealloc_ucode(drv);
 
 	kfree(drv);
-	shrd->drv = NULL;
 }
+
+
+/* shared module parameters */
+struct iwl_mod_params iwlwifi_mod_params = {
+	.amsdu_size_8K = 1,
+	.restart_fw = 1,
+	.plcp_check = true,
+	.bt_coex_active = true,
+	.power_level = IWL_POWER_INDEX_1,
+	.bt_ch_announce = true,
+	.auto_agg = true,
+	/* the rest are 0 by default */
+};
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
+		   S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
+MODULE_PARM_DESC(11n_disable,
+	"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
+module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
+		   int, S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
+		   int, S_IRUGO);
+MODULE_PARM_DESC(antenna_coupling,
+		 "specify antenna coupling in dB (defualt: 0 dB)");
+
+module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce,
+		   bool, S_IRUGO);
+MODULE_PARM_DESC(bt_ch_inhibition,
+		 "Enable BT channel inhibition (default: enable)");
+
+module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO);
+MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
+
+module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
+MODULE_PARM_DESC(wd_disable,
+		"Disable stuck queue watchdog timer 0=system default, "
+		"1=disable, 2=enable (default: 0)");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ *   Able to scan and finding all the available AP
+ *   Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active,
+		bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
+
+module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode, "0=system default, "
+		"1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
+
+module_param_named(power_save, iwlwifi_mod_params.power_save,
+		bool, S_IRUGO);
+MODULE_PARM_DESC(power_save,
+		 "enable WiFi power management (default: disable)");
+
+module_param_named(power_level, iwlwifi_mod_params.power_level,
+		int, S_IRUGO);
+MODULE_PARM_DESC(power_level,
+		 "default power save level (range from 1 - 5, default: 1)");
+
+module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
+		bool, S_IRUGO);
+MODULE_PARM_DESC(auto_agg,
+		 "enable agg w/o check traffic load (default: enable)");
+
+module_param_named(5ghz_disable, iwlwifi_mod_params.disable_5ghz,
+		bool, S_IRUGO);
+MODULE_PARM_DESC(5ghz_disable, "disable 5GHz band (default: 0 [enabled])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 3b771c1..2cbf137 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -63,7 +63,12 @@
 #ifndef __iwl_drv_h__
 #define __iwl_drv_h__
 
-#include "iwl-shared.h"
+/* for all modules */
+#define DRV_NAME        "iwlwifi"
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2012 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
 
 /**
  * DOC: Driver system flows - drv component
@@ -90,34 +95,32 @@
  * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
  */
 
+struct iwl_drv;
+struct iwl_trans;
+struct iwl_cfg;
 /**
  * iwl_drv_start - start the drv
  *
- * @shrd: the shrd area
  * @trans_ops: the ops of the transport
  * @cfg: device specific constants / virtual functions
  *
- * TODO: review the parameters given to this function
- *
  * starts the driver: fetches the firmware. This should be called by bus
  * specific system flows implementations. For example, the bus specific probe
  * function should do bus related operations only, and then call to this
- * function.
+ * function. It returns the driver object or %NULL if an error occured.
  */
-int iwl_drv_start(struct iwl_shared *shrd,
-		  struct iwl_trans *trans, const struct iwl_cfg *cfg);
+struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
+			      const struct iwl_cfg *cfg);
 
 /**
  * iwl_drv_stop - stop the drv
  *
- * @shrd: the shrd area
- *
- * TODO: review the parameters given to this function
+ * @drv:
  *
  * Stop the driver. This should be called by bus specific system flows
  * implementations. For example, the bus specific remove function should first
  * call this function and then do the bus related operations only.
  */
-void iwl_drv_stop(struct iwl_shared *shrd);
+void iwl_drv_stop(struct iwl_drv *drv);
 
 #endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 23cea42..50c5891 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -68,9 +68,7 @@
 
 #include <net/mac80211.h>
 
-#include "iwl-commands.h"
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-debug.h"
 #include "iwl-agn.h"
 #include "iwl-eeprom.h"
@@ -187,33 +185,33 @@
 
 }
 
-static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
+static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
 {
-	u32 gp = iwl_read32(trans, CSR_EEPROM_GP) &
+	u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP) &
 			   CSR_EEPROM_GP_VALID_MSK;
 	int ret = 0;
 
-	IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
+	IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
 	switch (gp) {
 	case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
-		if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
-			IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
+		if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
+			IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
 				gp);
 			ret = -ENOENT;
 		}
 		break;
 	case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
 	case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
-		if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
-			IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
+		if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
+			IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
 			ret = -ENOENT;
 		}
 		break;
 	case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
 	default:
-		IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, "
+		IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
 			"EEPROM_GP=0x%08x\n",
-			(trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+			(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
 			? "OTP" : "EEPROM", gp);
 		ret = -ENOENT;
 		break;
@@ -221,11 +219,11 @@
 	return ret;
 }
 
-u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset)
+u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset)
 {
-	if (!shrd->eeprom)
+	if (!priv->eeprom)
 		return 0;
-	return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8);
+	return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
 }
 
 int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -233,11 +231,11 @@
 	u16 eeprom_ver;
 	u16 calib_ver;
 
-	eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
-	calib_ver = iwl_eeprom_calib_version(priv->shrd);
+	eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+	calib_ver = iwl_eeprom_calib_version(priv);
 
-	if (eeprom_ver < cfg(priv)->eeprom_ver ||
-	    calib_ver < cfg(priv)->eeprom_calib_ver)
+	if (eeprom_ver < priv->cfg->eeprom_ver ||
+	    calib_ver < priv->cfg->eeprom_calib_ver)
 		goto err;
 
 	IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
@@ -247,58 +245,115 @@
 err:
 	IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
 		  "CALIB=0x%x < 0x%x\n",
-		  eeprom_ver, cfg(priv)->eeprom_ver,
-		  calib_ver,  cfg(priv)->eeprom_calib_ver);
+		  eeprom_ver, priv->cfg->eeprom_ver,
+		  calib_ver,  priv->cfg->eeprom_calib_ver);
 	return -EINVAL;
 
 }
 
 int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
 {
-	struct iwl_shared *shrd = priv->shrd;
 	u16 radio_cfg;
 
-	hw_params(priv).sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
-	if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE &&
-	    !cfg(priv)->ht_params) {
+	priv->hw_params.sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
+	if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE &&
+	    !priv->cfg->ht_params) {
 		IWL_ERR(priv, "Invalid 11n configuration\n");
 		return -EINVAL;
 	}
 
-	if (!hw_params(priv).sku) {
+	if (!priv->hw_params.sku) {
 		IWL_ERR(priv, "Invalid device sku\n");
 		return -EINVAL;
 	}
 
-	IWL_INFO(priv, "Device SKU: 0x%X\n", hw_params(priv).sku);
+	IWL_INFO(priv, "Device SKU: 0x%X\n", priv->hw_params.sku);
 
-	radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
+	radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
 
-	hw_params(priv).valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
-	hw_params(priv).valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+	priv->hw_params.valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+	priv->hw_params.valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
 
 	/* check overrides (some devices have wrong EEPROM) */
-	if (cfg(priv)->valid_tx_ant)
-		hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
-	if (cfg(priv)->valid_rx_ant)
-		hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
+	if (priv->cfg->valid_tx_ant)
+		priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+	if (priv->cfg->valid_rx_ant)
+		priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
 
-	if (!hw_params(priv).valid_tx_ant || !hw_params(priv).valid_rx_ant) {
+	if (!priv->hw_params.valid_tx_ant || !priv->hw_params.valid_rx_ant) {
 		IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
-			hw_params(priv).valid_tx_ant,
-			hw_params(priv).valid_rx_ant);
+			priv->hw_params.valid_tx_ant,
+			priv->hw_params.valid_rx_ant);
 		return -EINVAL;
 	}
 
 	IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
-		 hw_params(priv).valid_tx_ant, hw_params(priv).valid_rx_ant);
+		 priv->hw_params.valid_tx_ant, priv->hw_params.valid_rx_ant);
 
 	return 0;
 }
 
-void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
+u16 iwl_eeprom_calib_version(struct iwl_priv *priv)
 {
-	const u8 *addr = iwl_eeprom_query_addr(shrd,
+	struct iwl_eeprom_calib_hdr *hdr;
+
+	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+							EEPROM_CALIB_ALL);
+	return hdr->version;
+}
+
+static u32 eeprom_indirect_address(struct iwl_priv *priv, u32 address)
+{
+	u16 offset = 0;
+
+	if ((address & INDIRECT_ADDRESS) == 0)
+		return address;
+
+	switch (address & INDIRECT_TYPE_MSK) {
+	case INDIRECT_HOST:
+		offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+		break;
+	case INDIRECT_GENERAL:
+		offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+		break;
+	case INDIRECT_REGULATORY:
+		offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+		break;
+	case INDIRECT_TXP_LIMIT:
+		offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
+		break;
+	case INDIRECT_TXP_LIMIT_SIZE:
+		offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
+		break;
+	case INDIRECT_CALIBRATION:
+		offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+		break;
+	case INDIRECT_PROCESS_ADJST:
+		offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+		break;
+	case INDIRECT_OTHERS:
+		offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+		break;
+	default:
+		IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+		address & INDIRECT_TYPE_MSK);
+		break;
+	}
+
+	/* translate the offset from words to byte */
+	return (address & ADDRESS_MSK) + (offset << 1);
+}
+
+const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset)
+{
+	u32 address = eeprom_indirect_address(priv, offset);
+	BUG_ON(address >= priv->cfg->base_params->eeprom_size);
+	return &priv->eeprom[address];
+}
+
+void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac)
+{
+	const u8 *addr = iwl_eeprom_query_addr(priv,
 					EEPROM_MAC_ADDRESS);
 	memcpy(mac, addr, ETH_ALEN);
 }
@@ -376,7 +431,7 @@
 		 * CSR auto clock gate disable bit -
 		 * this is only applicable for HW with OTP shadow RAM
 		 */
-		if (cfg(trans)->base_params->shadow_ram_support)
+		if (trans->cfg->base_params->shadow_ram_support)
 			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
 				CSR_RESET_LINK_PWR_MGMT_DISABLED);
 	}
@@ -497,7 +552,7 @@
 		}
 		/* more in the link list, continue */
 		usedblocks++;
-	} while (usedblocks <= cfg(trans)->base_params->max_ll_items);
+	} while (usedblocks <= trans->cfg->base_params->max_ll_items);
 
 	/* OTP has no valid blocks */
 	IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n");
@@ -591,7 +646,6 @@
 
 static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
 {
-	struct iwl_shared *shrd = priv->shrd;
 	struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
 	int idx, entries;
 	__le16 *txp_len;
@@ -600,10 +654,10 @@
 	BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
 
 	/* the length is in 16-bit words, but we want entries */
-	txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS);
+	txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
 	entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
 
-	txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS);
+	txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
 
 	for (idx = 0; idx < entries; idx++) {
 		txp = &txp_array[idx];
@@ -637,7 +691,7 @@
 				 ((txp->delta_20_in_40 & 0xf0) >> 4),
 				 (txp->delta_20_in_40 & 0x0f));
 
-		max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx,
+		max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
 						      &max_txp_avg_halfdbm);
 
 		/*
@@ -656,66 +710,66 @@
 /**
  * iwl_eeprom_init - read EEPROM contents
  *
- * Load the EEPROM contents from adapter into shrd->eeprom
+ * Load the EEPROM contents from adapter into priv->eeprom
  *
  * NOTE:  This routine uses the non-debug IO access functions.
  */
-int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev)
+int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
 {
 	__le16 *e;
-	u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
+	u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP);
 	int sz;
 	int ret;
 	u16 addr;
 	u16 validblockaddr = 0;
 	u16 cache_addr = 0;
 
-	trans->nvm_device_type = iwl_get_nvm_type(trans, hw_rev);
-	if (trans->nvm_device_type == -ENOENT)
+	priv->nvm_device_type = iwl_get_nvm_type(priv->trans, hw_rev);
+	if (priv->nvm_device_type == -ENOENT)
 		return -ENOENT;
 	/* allocate eeprom */
-	sz = cfg(trans)->base_params->eeprom_size;
-	IWL_DEBUG_EEPROM(trans, "NVM size = %d\n", sz);
-	trans->shrd->eeprom = kzalloc(sz, GFP_KERNEL);
-	if (!trans->shrd->eeprom) {
+	sz = priv->cfg->base_params->eeprom_size;
+	IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
+	priv->eeprom = kzalloc(sz, GFP_KERNEL);
+	if (!priv->eeprom) {
 		ret = -ENOMEM;
 		goto alloc_err;
 	}
-	e = (__le16 *)trans->shrd->eeprom;
+	e = (__le16 *)priv->eeprom;
 
-	ret = iwl_eeprom_verify_signature(trans);
+	ret = iwl_eeprom_verify_signature(priv);
 	if (ret < 0) {
-		IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+		IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
 		ret = -ENOENT;
 		goto err;
 	}
 
 	/* Make sure driver (instead of uCode) is allowed to read EEPROM */
-	ret = iwl_eeprom_acquire_semaphore(trans);
+	ret = iwl_eeprom_acquire_semaphore(priv->trans);
 	if (ret < 0) {
-		IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
+		IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
 		ret = -ENOENT;
 		goto err;
 	}
 
-	if (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+	if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
 
-		ret = iwl_init_otp_access(trans);
+		ret = iwl_init_otp_access(priv->trans);
 		if (ret) {
-			IWL_ERR(trans, "Failed to initialize OTP access.\n");
+			IWL_ERR(priv, "Failed to initialize OTP access.\n");
 			ret = -ENOENT;
 			goto done;
 		}
-		iwl_write32(trans, CSR_EEPROM_GP,
-			    iwl_read32(trans, CSR_EEPROM_GP) &
+		iwl_write32(priv->trans, CSR_EEPROM_GP,
+			    iwl_read32(priv->trans, CSR_EEPROM_GP) &
 			    ~CSR_EEPROM_GP_IF_OWNER_MSK);
 
-		iwl_set_bit(trans, CSR_OTP_GP_REG,
+		iwl_set_bit(priv->trans, CSR_OTP_GP_REG,
 			     CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
 			     CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
 		/* traversing the linked list if no shadow ram supported */
-		if (!cfg(trans)->base_params->shadow_ram_support) {
-			if (iwl_find_otp_image(trans, &validblockaddr)) {
+		if (!priv->cfg->base_params->shadow_ram_support) {
+			if (iwl_find_otp_image(priv->trans, &validblockaddr)) {
 				ret = -ENOENT;
 				goto done;
 			}
@@ -724,7 +778,8 @@
 		     addr += sizeof(u16)) {
 			__le16 eeprom_data;
 
-			ret = iwl_read_otp_word(trans, addr, &eeprom_data);
+			ret = iwl_read_otp_word(priv->trans, addr,
+						&eeprom_data);
 			if (ret)
 				goto done;
 			e[cache_addr / 2] = eeprom_data;
@@ -735,94 +790,93 @@
 		for (addr = 0; addr < sz; addr += sizeof(u16)) {
 			u32 r;
 
-			iwl_write32(trans, CSR_EEPROM_REG,
+			iwl_write32(priv->trans, CSR_EEPROM_REG,
 				    CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
 
-			ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+			ret = iwl_poll_bit(priv->trans, CSR_EEPROM_REG,
 						  CSR_EEPROM_REG_READ_VALID_MSK,
 						  CSR_EEPROM_REG_READ_VALID_MSK,
 						  IWL_EEPROM_ACCESS_TIMEOUT);
 			if (ret < 0) {
-				IWL_ERR(trans,
+				IWL_ERR(priv,
 					"Time out reading EEPROM[%d]\n", addr);
 				goto done;
 			}
-			r = iwl_read32(trans, CSR_EEPROM_REG);
+			r = iwl_read32(priv->trans, CSR_EEPROM_REG);
 			e[addr / 2] = cpu_to_le16(r >> 16);
 		}
 	}
 
-	IWL_DEBUG_EEPROM(trans, "NVM Type: %s, version: 0x%x\n",
-		       (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+	IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
+		       (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
 		       ? "OTP" : "EEPROM",
-		       iwl_eeprom_query16(trans->shrd, EEPROM_VERSION));
+		       iwl_eeprom_query16(priv, EEPROM_VERSION));
 
 	ret = 0;
 done:
-	iwl_eeprom_release_semaphore(trans);
+	iwl_eeprom_release_semaphore(priv->trans);
 
 err:
 	if (ret)
-		iwl_eeprom_free(trans->shrd);
+		iwl_eeprom_free(priv);
 alloc_err:
 	return ret;
 }
 
-void iwl_eeprom_free(struct iwl_shared *shrd)
+void iwl_eeprom_free(struct iwl_priv *priv)
 {
-	kfree(shrd->eeprom);
-	shrd->eeprom = NULL;
+	kfree(priv->eeprom);
+	priv->eeprom = NULL;
 }
 
-static void iwl_init_band_reference(const struct iwl_priv *priv,
+static void iwl_init_band_reference(struct iwl_priv *priv,
 			int eep_band, int *eeprom_ch_count,
 			const struct iwl_eeprom_channel **eeprom_ch_info,
 			const u8 **eeprom_ch_index)
 {
-	struct iwl_shared *shrd = priv->shrd;
-	u32 offset = cfg(priv)->lib->
+	u32 offset = priv->lib->
 			eeprom_ops.regulatory_bands[eep_band - 1];
 	switch (eep_band) {
 	case 1:		/* 2.4GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(shrd, offset);
+				iwl_eeprom_query_addr(priv, offset);
 		*eeprom_ch_index = iwl_eeprom_band_1;
 		break;
 	case 2:		/* 4.9GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(shrd, offset);
+				iwl_eeprom_query_addr(priv, offset);
 		*eeprom_ch_index = iwl_eeprom_band_2;
 		break;
 	case 3:		/* 5.2GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(shrd, offset);
+				iwl_eeprom_query_addr(priv, offset);
 		*eeprom_ch_index = iwl_eeprom_band_3;
 		break;
 	case 4:		/* 5.5GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(shrd, offset);
+				iwl_eeprom_query_addr(priv, offset);
 		*eeprom_ch_index = iwl_eeprom_band_4;
 		break;
 	case 5:		/* 5.7GHz band */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(shrd, offset);
+				iwl_eeprom_query_addr(priv, offset);
 		*eeprom_ch_index = iwl_eeprom_band_5;
 		break;
 	case 6:		/* 2.4GHz ht40 channels */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(shrd, offset);
+				iwl_eeprom_query_addr(priv, offset);
 		*eeprom_ch_index = iwl_eeprom_band_6;
 		break;
 	case 7:		/* 5 GHz ht40 channels */
 		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
 		*eeprom_ch_info = (struct iwl_eeprom_channel *)
-				iwl_eeprom_query_addr(shrd, offset);
+				iwl_eeprom_query_addr(priv, offset);
 		*eeprom_ch_index = iwl_eeprom_band_7;
 		break;
 	default:
@@ -987,9 +1041,9 @@
 	}
 
 	/* Check if we do have HT40 channels */
-	if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] ==
+	if (priv->lib->eeprom_ops.regulatory_bands[5] ==
 	    EEPROM_REGULATORY_BAND_NO_HT40 &&
-	    cfg(priv)->lib->eeprom_ops.regulatory_bands[6] ==
+	    priv->lib->eeprom_ops.regulatory_bands[6] ==
 	    EEPROM_REGULATORY_BAND_NO_HT40)
 		return 0;
 
@@ -1025,7 +1079,7 @@
 	 * driver need to process addition information
 	 * to determine the max channel tx power limits
 	 */
-	if (cfg(priv)->lib->eeprom_ops.enhanced_txpower)
+	if (priv->lib->eeprom_ops.enhanced_txpower)
 		iwl_eeprom_enhanced_txpower(priv);
 
 	return 0;
@@ -1072,11 +1126,11 @@
 {
 	u16 radio_cfg;
 
-	radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG);
+	radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
 
 	/* write radio config values to register */
 	if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
-		iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
+		iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
 			    EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
 			    EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
 			    EEPROM_RF_CFG_DASH_MSK(radio_cfg));
@@ -1088,7 +1142,7 @@
 		WARN_ON(1);
 
 	/* set CSR_HW_CONFIG_REG for uCode use */
-	iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
+	iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
 		    CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
 		    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index e4a7583..64bfd94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,8 +66,6 @@
 #include <net/mac80211.h>
 
 struct iwl_priv;
-struct iwl_shared;
-struct iwl_trans;
 
 /*
  * EEPROM access time values:
@@ -208,59 +206,6 @@
 /* 6000 regulatory - indirect access */
 #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS  ((0x80)\
 		| INDIRECT_ADDRESS | INDIRECT_REGULATORY)   /* 14  bytes */
-
-/* 5000 Specific */
-#define EEPROM_5000_TX_POWER_VERSION    (4)
-#define EEPROM_5000_EEPROM_VERSION	(0x11A)
-
-/* 5050 Specific */
-#define EEPROM_5050_TX_POWER_VERSION    (4)
-#define EEPROM_5050_EEPROM_VERSION	(0x21E)
-
-/* 1000 Specific */
-#define EEPROM_1000_TX_POWER_VERSION    (4)
-#define EEPROM_1000_EEPROM_VERSION	(0x15C)
-
-/* 6x00 Specific */
-#define EEPROM_6000_TX_POWER_VERSION    (4)
-#define EEPROM_6000_EEPROM_VERSION	(0x423)
-
-/* 6x50 Specific */
-#define EEPROM_6050_TX_POWER_VERSION    (4)
-#define EEPROM_6050_EEPROM_VERSION	(0x532)
-
-/* 6150 Specific */
-#define EEPROM_6150_TX_POWER_VERSION    (6)
-#define EEPROM_6150_EEPROM_VERSION	(0x553)
-
-/* 6x05 Specific */
-#define EEPROM_6005_TX_POWER_VERSION    (6)
-#define EEPROM_6005_EEPROM_VERSION	(0x709)
-
-/* 6x30 Specific */
-#define EEPROM_6030_TX_POWER_VERSION    (6)
-#define EEPROM_6030_EEPROM_VERSION	(0x709)
-
-/* 2x00 Specific */
-#define EEPROM_2000_TX_POWER_VERSION    (6)
-#define EEPROM_2000_EEPROM_VERSION	(0x805)
-
-/* 6x35 Specific */
-#define EEPROM_6035_TX_POWER_VERSION    (6)
-#define EEPROM_6035_EEPROM_VERSION	(0x753)
-
-
-/* OTP */
-/* lower blocks contain EEPROM image and calibration data */
-#define OTP_LOW_IMAGE_SIZE		(2 * 512 * sizeof(u16)) /* 2 KB */
-/* high blocks contain PAPD data */
-#define OTP_HIGH_IMAGE_SIZE_6x00        (6 * 512 * sizeof(u16)) /* 6 KB */
-#define OTP_HIGH_IMAGE_SIZE_1000        (0x200 * sizeof(u16)) /* 1024 bytes */
-#define OTP_MAX_LL_ITEMS_1000		(3)	/* OTP blocks for 1000 */
-#define OTP_MAX_LL_ITEMS_6x00		(4)	/* OTP blocks for 6x00 */
-#define OTP_MAX_LL_ITEMS_6x50		(7)	/* OTP blocks for 6x50 */
-#define OTP_MAX_LL_ITEMS_2x00		(4)	/* OTP blocks for 2x00 */
-
 /* 2.4 GHz */
 extern const u8 iwl_eeprom_band_1[14];
 
@@ -306,12 +251,14 @@
 };
 
 
-int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_shared *shrd);
-int  iwl_eeprom_check_version(struct iwl_priv *priv);
+int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
+void iwl_eeprom_free(struct iwl_priv *priv);
+int iwl_eeprom_check_version(struct iwl_priv *priv);
 int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
-u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
+u16 iwl_eeprom_calib_version(struct iwl_priv *priv);
+const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset);
+u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset);
+void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac);
 int iwl_init_channel_map(struct iwl_priv *priv);
 void iwl_free_channel_map(struct iwl_priv *priv);
 const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 9020809..74bce97 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -104,15 +104,29 @@
  * (see struct iwl_tfd_frame).  These 16 pointer registers are offset by 0x04
  * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
  * aligned (address bits 0-7 must be 0).
+ * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
+ * for them are in different places.
  *
  * Bit fields in each pointer register:
  *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
  */
-#define FH_MEM_CBBC_LOWER_BOUND          (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_MEM_CBBC_UPPER_BOUND          (FH_MEM_LOWER_BOUND + 0xA10)
+#define FH_MEM_CBBC_0_15_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_MEM_CBBC_0_15_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xA10)
+#define FH_MEM_CBBC_16_19_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xBF0)
+#define FH_MEM_CBBC_16_19_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_CBBC_20_31_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xB20)
+#define FH_MEM_CBBC_20_31_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xB80)
 
-/* Find TFD CB base pointer for given queue (range 0-15). */
-#define FH_MEM_CBBC_QUEUE(x)  (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+/* Find TFD CB base pointer for given queue */
+static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
+{
+	if (chnl < 16)
+		return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
+	if (chnl < 20)
+		return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
+	WARN_ON_ONCE(chnl >= 32);
+	return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
+}
 
 
 /**
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index c924ccb..e715640 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -93,15 +93,7 @@
  * new TLV uCode file layout
  *
  * The new TLV file format contains TLVs, that each specify
- * some piece of data. To facilitate "groups", for example
- * different instruction image with different capabilities,
- * bundled with the same init image, an alternative mechanism
- * is provided:
- * When the alternative field is 0, that means that the item
- * is always valid. When it is non-zero, then it is only
- * valid in conjunction with items of the same alternative,
- * in which case the driver (user) selects one alternative
- * to use.
+ * some piece of data.
  */
 
 enum iwl_ucode_tlv_type {
@@ -132,8 +124,7 @@
 };
 
 struct iwl_ucode_tlv {
-	__le16 type;		/* see above */
-	__le16 alternative;	/* see comment */
+	__le32 type;		/* see above */
 	__le32 length;		/* not including type/length fields */
 	u8 data[0];
 };
@@ -152,7 +143,7 @@
 	u8 human_readable[64];
 	__le32 ver;		/* major/minor/API/serial */
 	__le32 build;
-	__le64 alternatives;	/* bitmask of valid alternatives */
+	__le64 ignore;
 	/*
 	 * The data contained herein has a TLV layout,
 	 * see above for the TLV header and types.
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 8e36bdc..2153e4c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -63,6 +63,7 @@
 #ifndef __iwl_fw_h__
 #define __iwl_fw_h__
 #include <linux/types.h>
+#include <net/mac80211.h>
 
 /**
  * enum iwl_ucode_tlv_flag - ucode API flags
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 09b8567..abb3250 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -30,7 +30,6 @@
 #define __iwl_io_h__
 
 #include "iwl-devtrace.h"
-#include "iwl-shared.h"
 #include "iwl-trans.h"
 
 static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 1993a2b..4700041 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -36,11 +36,10 @@
 #include <asm/unaligned.h>
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-agn.h"
 #include "iwl-io.h"
 #include "iwl-trans.h"
-#include "iwl-shared.h"
+#include "iwl-modparams.h"
 
 /* Throughput		OFF time(ms)	ON time (ms)
  *	>300			25		25
@@ -71,7 +70,7 @@
 /* Set led register off */
 void iwlagn_led_enable(struct iwl_priv *priv)
 {
-	iwl_write32(trans(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+	iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
 }
 
 /*
@@ -107,9 +106,9 @@
 	};
 	u32 reg;
 
-	reg = iwl_read32(trans(priv), CSR_LED_REG);
+	reg = iwl_read32(priv->trans, CSR_LED_REG);
 	if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
-		iwl_write32(trans(priv), CSR_LED_REG,
+		iwl_write32(priv->trans, CSR_LED_REG,
 			    reg & CSR_LED_BSM_CTRL_MSK);
 
 	return iwl_dvm_send_cmd(priv, &cmd);
@@ -138,11 +137,11 @@
 	}
 
 	IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
-			cfg(priv)->base_params->led_compensation);
+			priv->cfg->base_params->led_compensation);
 	led_cmd.on = iwl_blink_compensation(priv, on,
-				cfg(priv)->base_params->led_compensation);
+				priv->cfg->base_params->led_compensation);
 	led_cmd.off = iwl_blink_compensation(priv, off,
-				cfg(priv)->base_params->led_compensation);
+				priv->cfg->base_params->led_compensation);
 
 	ret = iwl_send_led_cmd(priv, &led_cmd);
 	if (!ret) {
@@ -175,7 +174,7 @@
 
 void iwl_leds_init(struct iwl_priv *priv)
 {
-	int mode = iwlagn_mod_params.led_mode;
+	int mode = iwlwifi_mod_params.led_mode;
 	int ret;
 
 	if (mode == IWL_LED_DISABLE) {
@@ -183,7 +182,7 @@
 		return;
 	}
 	if (mode == IWL_LED_DEFAULT)
-		mode = cfg(priv)->led_mode;
+		mode = priv->cfg->led_mode;
 
 	priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
 				   wiphy_name(priv->hw->wiphy));
@@ -207,7 +206,7 @@
 		break;
 	}
 
-	ret = led_classdev_register(trans(priv)->dev, &priv->led);
+	ret = led_classdev_register(priv->trans->dev, &priv->led);
 	if (ret) {
 		kfree(priv->led.name);
 		return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index b6805f8..d33cc9c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -44,13 +44,12 @@
 
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-agn-calib.h"
 #include "iwl-agn.h"
-#include "iwl-shared.h"
 #include "iwl-trans.h"
 #include "iwl-op-mode.h"
+#include "iwl-modparams.h"
 
 /*****************************************************************************
  *
@@ -147,7 +146,13 @@
 		    IEEE80211_HW_AMPDU_AGGREGATION |
 		    IEEE80211_HW_NEED_DTIM_PERIOD |
 		    IEEE80211_HW_SPECTRUM_MGMT |
-		    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+		    IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+		    IEEE80211_HW_QUEUE_CONTROL |
+		    IEEE80211_HW_SUPPORTS_PS |
+		    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+		    IEEE80211_HW_SCAN_WHILE_IDLE;
+
+	hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
 
 	/*
 	 * Including the following line will crash some AP's.  This
@@ -156,10 +161,7 @@
 	hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
 	 */
 
-	hw->flags |= IEEE80211_HW_SUPPORTS_PS |
-		     IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
-	if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
+	if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
 		hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
 			     IEEE80211_HW_SUPPORTS_STATIC_SMPS;
 
@@ -197,13 +199,13 @@
 			    WIPHY_FLAG_IBSS_RSN;
 
 	if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
-	    trans(priv)->ops->wowlan_suspend &&
-	    device_can_wakeup(trans(priv)->dev)) {
+	    priv->trans->ops->wowlan_suspend &&
+	    device_can_wakeup(priv->trans->dev)) {
 		hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
 					  WIPHY_WOWLAN_DISCONNECT |
 					  WIPHY_WOWLAN_EAP_IDENTITY_REQ |
 					  WIPHY_WOWLAN_RFKILL_RELEASE;
-		if (!iwlagn_mod_params.sw_crypto)
+		if (!iwlwifi_mod_params.sw_crypto)
 			hw->wiphy->wowlan.flags |=
 				WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
 				WIPHY_WOWLAN_GTK_REKEY_FAILURE;
@@ -215,7 +217,7 @@
 					IWLAGN_WOWLAN_MAX_PATTERN_LEN;
 	}
 
-	if (iwlagn_mod_params.power_save)
+	if (iwlwifi_mod_params.power_save)
 		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
 	else
 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -224,8 +226,11 @@
 	/* we create the 802.11 header and a zero-length SSID element */
 	hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
 
-	/* Default value; 4 EDCA QOS priorities */
-	hw->queues = 4;
+	/*
+	 * We don't use all queues: 4 and 9 are unused and any
+	 * aggregation queue gets mapped down to the AC queue.
+	 */
+	hw->queues = IWLAGN_FIRST_AMPDU_QUEUE;
 
 	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
 
@@ -236,7 +241,7 @@
 		priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
 			&priv->bands[IEEE80211_BAND_5GHZ];
 
-	hw->wiphy->hw_version = trans(priv)->hw_id;
+	hw->wiphy->hw_version = priv->trans->hw_id;
 
 	iwl_leds_init(priv);
 
@@ -332,7 +337,7 @@
 	return 0;
 }
 
-static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+void iwlagn_mac_stop(struct ieee80211_hw *hw)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
@@ -355,18 +360,18 @@
 	 * even if interface is down, trans->down will leave the RF
 	 * kill interrupt enabled
 	 */
-	iwl_trans_stop_hw(trans(priv));
+	iwl_trans_stop_hw(priv->trans, false);
 
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
-				      struct ieee80211_vif *vif,
-				      struct cfg80211_gtk_rekey_data *data)
+void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct cfg80211_gtk_rekey_data *data)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
-	if (iwlagn_mod_params.sw_crypto)
+	if (iwlwifi_mod_params.sw_crypto)
 		return;
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -388,8 +393,7 @@
 
 #ifdef CONFIG_PM_SLEEP
 
-static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
-			      struct cfg80211_wowlan *wowlan)
+int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -412,9 +416,9 @@
 	if (ret)
 		goto error;
 
-	device_set_wakeup_enable(trans(priv)->dev, true);
+	device_set_wakeup_enable(priv->trans->dev, true);
 
-	iwl_trans_wowlan_suspend(trans(priv));
+	iwl_trans_wowlan_suspend(priv->trans);
 
 	goto out;
 
@@ -437,27 +441,28 @@
 	unsigned long flags;
 	u32 base, status = 0xffffffff;
 	int ret = -EIO;
-	const struct fw_img *img;
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
 	mutex_lock(&priv->mutex);
 
-	iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR,
+	iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
 			  CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
 
-	base = priv->shrd->device_pointers.error_event_table;
+	base = priv->device_pointers.error_event_table;
 	if (iwlagn_hw_valid_rtc_data_addr(base)) {
-		spin_lock_irqsave(&trans(priv)->reg_lock, flags);
-		ret = iwl_grab_nic_access_silent(trans(priv));
+		spin_lock_irqsave(&priv->trans->reg_lock, flags);
+		ret = iwl_grab_nic_access_silent(priv->trans);
 		if (likely(ret == 0)) {
-			iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base);
-			status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
-			iwl_release_nic_access(trans(priv));
+			iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
+			status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
+			iwl_release_nic_access(priv->trans);
 		}
-		spin_unlock_irqrestore(&trans(priv)->reg_lock, flags);
+		spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 		if (ret == 0) {
+			const struct fw_img *img;
+
 			img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
 			if (!priv->wowlan_sram) {
 				priv->wowlan_sram =
@@ -467,7 +472,7 @@
 
 			if (priv->wowlan_sram)
 				_iwl_read_targ_mem_words(
-				      trans(priv), 0x800000,
+				      priv->trans, 0x800000,
 				      priv->wowlan_sram,
 				      img->sec[IWL_UCODE_SECTION_DATA].len / 4);
 		}
@@ -479,7 +484,7 @@
 
 	priv->wowlan = false;
 
-	device_set_wakeup_enable(trans(priv)->dev, false);
+	device_set_wakeup_enable(priv->trans->dev, false);
 
 	iwlagn_prepare_restart(priv);
 
@@ -497,7 +502,7 @@
 
 #endif
 
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
@@ -508,21 +513,21 @@
 		dev_kfree_skb_any(skb);
 }
 
-static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
-				       struct ieee80211_vif *vif,
-				       struct ieee80211_key_conf *keyconf,
-				       struct ieee80211_sta *sta,
-				       u32 iv32, u16 *phase1key)
+void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_key_conf *keyconf,
+				struct ieee80211_sta *sta,
+				u32 iv32, u16 *phase1key)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
 	iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
 }
 
-static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-			      struct ieee80211_vif *vif,
-			      struct ieee80211_sta *sta,
-			      struct ieee80211_key_conf *key)
+int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		       struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta,
+		       struct ieee80211_key_conf *key)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -532,7 +537,7 @@
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
 
-	if (iwlagn_mod_params.sw_crypto) {
+	if (iwlwifi_mod_params.sw_crypto) {
 		IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
 		return -EOPNOTSUPP;
 	}
@@ -622,11 +627,11 @@
 	return ret;
 }
 
-static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif,
-				   enum ieee80211_ampdu_mlme_action action,
-				   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-				   u8 buf_size)
+int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+			    struct ieee80211_vif *vif,
+			    enum ieee80211_ampdu_mlme_action action,
+			    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+			    u8 buf_size)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	int ret = -EINVAL;
@@ -635,7 +640,7 @@
 	IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
 		     sta->addr, tid);
 
-	if (!(hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE))
+	if (!(priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE))
 		return -EACCES;
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -643,7 +648,7 @@
 
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
-		if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
 			break;
 		IWL_DEBUG_HT(priv, "start Rx\n");
 		ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -653,7 +658,9 @@
 		ret = iwl_sta_rx_agg_stop(priv, sta, tid);
 		break;
 	case IEEE80211_AMPDU_TX_START:
-		if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+		if (!priv->trans->ops->tx_agg_setup)
+			break;
+		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
 			break;
 		IWL_DEBUG_HT(priv, "start Tx\n");
 		ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
@@ -667,7 +674,7 @@
 				     priv->agg_tids_count);
 		}
 		if (!priv->agg_tids_count &&
-		    hw_params(priv).use_rts_for_aggregation) {
+		    priv->hw_params.use_rts_for_aggregation) {
 			/*
 			 * switch off RTS/CTS if it was previously enabled
 			 */
@@ -746,11 +753,11 @@
 	return ret;
 }
 
-static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
-				struct ieee80211_vif *vif,
-				struct ieee80211_sta *sta,
-				enum ieee80211_sta_state old_state,
-				enum ieee80211_sta_state new_state)
+int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
+			 struct ieee80211_vif *vif,
+			 struct ieee80211_sta *sta,
+			 enum ieee80211_sta_state old_state,
+			 enum ieee80211_sta_state new_state)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -829,8 +836,8 @@
 	return ret;
 }
 
-static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
-				struct ieee80211_channel_switch *ch_switch)
+void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+			       struct ieee80211_channel_switch *ch_switch)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	const struct iwl_channel_info *ch_info;
@@ -863,7 +870,7 @@
 	if (!iwl_is_associated_ctx(ctx))
 		goto out;
 
-	if (!cfg(priv)->lib->set_channel_switch)
+	if (!priv->lib->set_channel_switch)
 		goto out;
 
 	ch = channel->hw_value;
@@ -892,14 +899,13 @@
 	iwl_set_rxon_ht(priv, ht_conf);
 	iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
 
-	iwl_set_rate(priv);
 	/*
 	 * at this point, staging_rxon has the
 	 * configuration for channel switch
 	 */
 	set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
 	priv->switch_channel = cpu_to_le16(ch);
-	if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) {
+	if (priv->lib->set_channel_switch(priv, ch_switch)) {
 		clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
 		priv->switch_channel = 0;
 		ieee80211_chswitch_done(ctx->vif, false);
@@ -910,10 +916,25 @@
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
-				    unsigned int changed_flags,
-				    unsigned int *total_flags,
-				    u64 multicast)
+void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
+{
+	/*
+	 * MULTI-FIXME
+	 * See iwlagn_mac_channel_switch.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+		ieee80211_chswitch_done(ctx->vif, is_success);
+}
+
+void iwlagn_configure_filter(struct ieee80211_hw *hw,
+			     unsigned int changed_flags,
+			     unsigned int *total_flags,
+			     u64 multicast)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	__le32 filter_or = 0, filter_nand = 0;
@@ -960,7 +981,7 @@
 			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
+void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
@@ -988,7 +1009,7 @@
 		}
 	}
 	IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
-	iwl_trans_wait_tx_queue_empty(trans(priv));
+	iwl_trans_wait_tx_queue_empty(priv->trans);
 done:
 	mutex_unlock(&priv->mutex);
 	IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1003,7 +1024,7 @@
 	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
 	int err = 0;
 
-	if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+	if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
 		return -EOPNOTSUPP;
 
 	if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
@@ -1087,11 +1108,11 @@
 	return err;
 }
 
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
-	if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+	if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
 		return -EOPNOTSUPP;
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1104,16 +1125,16 @@
 	return 0;
 }
 
-static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
-			   enum ieee80211_rssi_event rssi_event)
+void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+			      enum ieee80211_rssi_event rssi_event)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
 	IWL_DEBUG_MAC80211(priv, "enter\n");
 	mutex_lock(&priv->mutex);
 
-	if (cfg(priv)->bt_params &&
-			cfg(priv)->bt_params->advanced_bt_coexist) {
+	if (priv->cfg->bt_params &&
+			priv->cfg->bt_params->advanced_bt_coexist) {
 		if (rssi_event == RSSI_EVENT_LOW)
 			priv->bt_enable_pspoll = true;
 		else if (rssi_event == RSSI_EVENT_HIGH)
@@ -1129,8 +1150,8 @@
 	IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
-			   struct ieee80211_sta *sta, bool set)
+int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+		       struct ieee80211_sta *sta, bool set)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
@@ -1139,9 +1160,9 @@
 	return 0;
 }
 
-static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif, u16 queue,
-		    const struct ieee80211_tx_queue_params *params)
+int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+		       struct ieee80211_vif *vif, u16 queue,
+		       const struct ieee80211_tx_queue_params *params)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1183,7 +1204,7 @@
 	return 0;
 }
 
-static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
@@ -1199,11 +1220,10 @@
 	return iwlagn_commit_rxon(priv, ctx);
 }
 
-static int iwl_setup_interface(struct iwl_priv *priv,
-			       struct iwl_rxon_context *ctx)
+int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 {
 	struct ieee80211_vif *vif = ctx->vif;
-	int err;
+	int err, ac;
 
 	lockdep_assert_held(&priv->mutex);
 
@@ -1223,7 +1243,7 @@
 		return err;
 	}
 
-	if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist &&
+	if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
 	    vif->type == NL80211_IFTYPE_ADHOC) {
 		/*
 		 * pretend to have high BT traffic as long as we
@@ -1233,17 +1253,27 @@
 		priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
 	}
 
+	/* set up queue mappings */
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+		vif->hw_queue[ac] = ctx->ac_to_queue[ac];
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		vif->cab_queue = ctx->mcast_queue;
+	else
+		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+
 	return 0;
 }
 
 static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
-			     struct ieee80211_vif *vif)
+				    struct ieee80211_vif *vif)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
 	struct iwl_rxon_context *tmp, *ctx = NULL;
 	int err;
 	enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
+	bool reset = false;
 
 	IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
 			   viftype, vif->addr);
@@ -1265,6 +1295,13 @@
 			tmp->interface_modes | tmp->exclusive_interface_modes;
 
 		if (tmp->vif) {
+			/* On reset we need to add the same interface again */
+			if (tmp->vif == vif) {
+				reset = true;
+				ctx = tmp;
+				break;
+			}
+
 			/* check if this busy context is exclusive */
 			if (tmp->exclusive_interface_modes &
 						BIT(tmp->vif->type)) {
@@ -1291,7 +1328,7 @@
 	ctx->vif = vif;
 
 	err = iwl_setup_interface(priv, ctx);
-	if (!err)
+	if (!err || reset)
 		goto out;
 
 	ctx->vif = NULL;
@@ -1303,9 +1340,9 @@
 	return err;
 }
 
-static void iwl_teardown_interface(struct iwl_priv *priv,
-				   struct ieee80211_vif *vif,
-				   bool mode_change)
+void iwl_teardown_interface(struct iwl_priv *priv,
+			    struct ieee80211_vif *vif,
+			    bool mode_change)
 {
 	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 
@@ -1446,9 +1483,9 @@
 	return err;
 }
 
-static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
-		    struct ieee80211_vif *vif,
-		    struct cfg80211_scan_request *req)
+int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+		       struct ieee80211_vif *vif,
+		       struct cfg80211_scan_request *req)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	int ret;
@@ -1503,7 +1540,7 @@
 	iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
 }
 
-static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
 			   struct ieee80211_vif *vif,
 			   enum sta_notify_cmd cmd,
 			   struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
new file mode 100644
index 0000000..d9a86d6
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -0,0 +1,126 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_modparams_h__
+#define __iwl_modparams_h__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <net/mac80211.h>
+
+extern struct iwl_mod_params iwlwifi_mod_params;
+
+enum iwl_power_level {
+	IWL_POWER_INDEX_1,
+	IWL_POWER_INDEX_2,
+	IWL_POWER_INDEX_3,
+	IWL_POWER_INDEX_4,
+	IWL_POWER_INDEX_5,
+	IWL_POWER_NUM
+};
+
+#define IWL_DISABLE_HT_ALL	BIT(0)
+#define IWL_DISABLE_HT_TXAGG	BIT(1)
+#define IWL_DISABLE_HT_RXAGG	BIT(2)
+
+/**
+ * struct iwl_mod_params
+ *
+ * Holds the module parameters
+ *
+ * @sw_crypto: using hardware encryption, default = 0
+ * @disable_11n: disable 11n capabilities, default = 0,
+ *	use IWL_DISABLE_HT_* constants
+ * @amsdu_size_8K: enable 8K amsdu size, default = 1
+ * @restart_fw: restart firmware, default = 1
+ * @plcp_check: enable plcp health check, default = true
+ * @wd_disable: enable stuck queue check, default = 0
+ * @bt_coex_active: enable bt coex, default = true
+ * @led_mode: system default, default = 0
+ * @power_save: disable power save, default = false
+ * @power_level: power level, default = 1
+ * @debug_level: levels are IWL_DL_*
+ * @ant_coupling: antenna coupling in dB, default = 0
+ * @bt_ch_announce: BT channel inhibition, default = enable
+ * @auto_agg: enable agg. without check, default = true
+ * @disable_5ghz: disable 5GHz capability, default = false
+ */
+struct iwl_mod_params {
+	int sw_crypto;
+	unsigned int disable_11n;
+	int amsdu_size_8K;
+	int restart_fw;
+	bool plcp_check;
+	int  wd_disable;
+	bool bt_coex_active;
+	int led_mode;
+	bool power_save;
+	int power_level;
+	u32 debug_level;
+	int ant_coupling;
+	bool bt_ch_announce;
+	bool auto_agg;
+	bool disable_5ghz;
+};
+
+#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 88dc4a0..0066b89 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -75,21 +75,45 @@
 void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
 				  struct iwl_rx_packet *pkt)
 {
+	bool triggered = false;
+
 	if (!list_empty(&notif_wait->notif_waits)) {
 		struct iwl_notification_wait *w;
 
 		spin_lock(&notif_wait->notif_wait_lock);
 		list_for_each_entry(w, &notif_wait->notif_waits, list) {
-			if (w->cmd != pkt->hdr.cmd)
+			int i;
+			bool found = false;
+
+			/*
+			 * If it already finished (triggered) or has been
+			 * aborted then don't evaluate it again to avoid races,
+			 * Otherwise the function could be called again even
+			 * though it returned true before
+			 */
+			if (w->triggered || w->aborted)
 				continue;
-			w->triggered = true;
-			if (w->fn)
-				w->fn(notif_wait, pkt, w->fn_data);
+
+			for (i = 0; i < w->n_cmds; i++) {
+				if (w->cmds[i] == pkt->hdr.cmd) {
+					found = true;
+					break;
+				}
+			}
+			if (!found)
+				continue;
+
+			if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
+				w->triggered = true;
+				triggered = true;
+			}
 		}
 		spin_unlock(&notif_wait->notif_wait_lock);
 
-		wake_up_all(&notif_wait->notif_waitq);
 	}
+
+	if (triggered)
+		wake_up_all(&notif_wait->notif_waitq);
 }
 
 void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
@@ -109,14 +133,18 @@
 void
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
 			   struct iwl_notification_wait *wait_entry,
-			   u8 cmd,
-			   void (*fn)(struct iwl_notif_wait_data *notif_wait,
+			   const u8 *cmds, int n_cmds,
+			   bool (*fn)(struct iwl_notif_wait_data *notif_wait,
 				      struct iwl_rx_packet *pkt, void *data),
 			   void *fn_data)
 {
+	if (WARN_ON(n_cmds > MAX_NOTIF_CMDS))
+		n_cmds = MAX_NOTIF_CMDS;
+
 	wait_entry->fn = fn;
 	wait_entry->fn_data = fn_data;
-	wait_entry->cmd = cmd;
+	wait_entry->n_cmds = n_cmds;
+	memcpy(wait_entry->cmds, cmds, n_cmds);
 	wait_entry->triggered = false;
 	wait_entry->aborted = false;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 5e8af95..82152310 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -72,11 +72,19 @@
 	wait_queue_head_t notif_waitq;
 };
 
+#define MAX_NOTIF_CMDS	5
+
 /**
  * struct iwl_notification_wait - notification wait entry
  * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
+ * @fn: Function called with the notification. If the function
+ *	returns true, the wait is over, if it returns false then
+ *	the waiter stays blocked. If no function is given, any
+ *	of the listed commands will unblock the waiter.
+ * @cmds: command IDs
+ * @n_cmds: number of command IDs
+ * @triggered: waiter should be woken up
+ * @aborted: wait was aborted
  *
  * This structure is not used directly, to wait for a
  * notification declare it on the stack, and call
@@ -93,11 +101,12 @@
 struct iwl_notification_wait {
 	struct list_head list;
 
-	void (*fn)(struct iwl_notif_wait_data *notif_data,
+	bool (*fn)(struct iwl_notif_wait_data *notif_data,
 		   struct iwl_rx_packet *pkt, void *data);
 	void *fn_data;
 
-	u8 cmd;
+	u8 cmds[MAX_NOTIF_CMDS];
+	u8 n_cmds;
 	bool triggered, aborted;
 };
 
@@ -112,8 +121,8 @@
 void __acquires(wait_entry)
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
 			   struct iwl_notification_wait *wait_entry,
-			   u8 cmd,
-			   void (*fn)(struct iwl_notif_wait_data *notif_data,
+			   const u8 *cmds, int n_cmds,
+			   bool (*fn)(struct iwl_notif_wait_data *notif_data,
 				      struct iwl_rx_packet *pkt, void *data),
 			   void *fn_data);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 6ea4163..4ef742b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -69,6 +69,7 @@
 struct iwl_device_cmd;
 struct iwl_rx_cmd_buffer;
 struct iwl_fw;
+struct iwl_cfg;
 
 /**
  * DOC: Operational mode - what is it ?
@@ -111,10 +112,10 @@
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *	HCMD the this Rx responds to.
  *	Must be atomic.
- * @queue_full: notifies that a HW queue is full. Ac is the ac of the queue
+ * @queue_full: notifies that a HW queue is full.
  *	Must be atomic
  * @queue_not_full: notifies that a HW queue is not full any more.
- *	Ac is the ac of the queue. Must be atomic
+ *	Must be atomic
  * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
  *	the radio is killed. Must be atomic.
  * @free_skb: allows the transport layer to free skbs that haven't been
@@ -125,20 +126,23 @@
  * @cmd_queue_full: Called when the command queue gets full. Must be atomic.
  * @nic_config: configure NIC, called before firmware is started.
  *	May sleep
+ * @wimax_active: invoked when WiMax becomes active.  Must be atomic.
  */
 struct iwl_op_mode_ops {
 	struct iwl_op_mode *(*start)(struct iwl_trans *trans,
+				     const struct iwl_cfg *cfg,
 				     const struct iwl_fw *fw);
 	void (*stop)(struct iwl_op_mode *op_mode);
 	int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
 		  struct iwl_device_cmd *cmd);
-	void (*queue_full)(struct iwl_op_mode *op_mode, u8 ac);
-	void (*queue_not_full)(struct iwl_op_mode *op_mode, u8 ac);
+	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
+	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
 	void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
 	void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
 	void (*nic_error)(struct iwl_op_mode *op_mode);
 	void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
 	void (*nic_config)(struct iwl_op_mode *op_mode);
+	void (*wimax_active)(struct iwl_op_mode *op_mode);
 };
 
 /**
@@ -169,15 +173,16 @@
 	return op_mode->ops->rx(op_mode, rxb, cmd);
 }
 
-static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, u8 ac)
+static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
+					  int queue)
 {
-	op_mode->ops->queue_full(op_mode, ac);
+	op_mode->ops->queue_full(op_mode, queue);
 }
 
 static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
-					      u8 ac)
+					      int queue)
 {
-	op_mode->ops->queue_not_full(op_mode, ac);
+	op_mode->ops->queue_not_full(op_mode, queue);
 }
 
 static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
@@ -208,6 +213,11 @@
 	op_mode->ops->nic_config(op_mode);
 }
 
+static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
+{
+	op_mode->ops->wimax_active(op_mode);
+}
+
 /*****************************************************
 * Op mode layers implementations
 ******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index c5e339e..0c8a1c2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -60,17 +60,18 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
 
-#include "iwl-io.h"
-#include "iwl-shared.h"
 #include "iwl-trans.h"
-#include "iwl-csr.h"
 #include "iwl-cfg.h"
 #include "iwl-drv.h"
 #include "iwl-trans.h"
+#include "iwl-trans-pcie-int.h"
 
 #define IWL_PCI_DEVICE(dev, subdev, cfg) \
 	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
@@ -261,61 +262,46 @@
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT	0x041
 
+#ifndef CONFIG_IWLWIFI_IDI
+
 static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
-	struct iwl_shared *shrd;
 	struct iwl_trans *iwl_trans;
-	int err;
+	struct iwl_trans_pcie *trans_pcie;
 
-	shrd = kzalloc(sizeof(*iwl_trans->shrd), GFP_KERNEL);
-	if (!shrd) {
-		dev_printk(KERN_ERR, &pdev->dev,
-			   "Couldn't allocate iwl_shared");
-		err = -ENOMEM;
-		goto out_free_bus;
-	}
+	iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
+	if (iwl_trans == NULL)
+		return -ENOMEM;
 
-#ifdef CONFIG_IWLWIFI_IDI
-	iwl_trans = iwl_trans_idi_alloc(shrd, pdev, ent);
-#else
-	iwl_trans = iwl_trans_pcie_alloc(shrd, pdev, ent);
-#endif
-	if (iwl_trans == NULL) {
-		err = -ENOMEM;
-		goto out_free_bus;
-	}
-
-	shrd->trans = iwl_trans;
 	pci_set_drvdata(pdev, iwl_trans);
 
-	err = iwl_drv_start(shrd, iwl_trans, cfg);
-	if (err)
+	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+	trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
+	if (!trans_pcie->drv)
 		goto out_free_trans;
 
 	return 0;
 
 out_free_trans:
-	iwl_trans_free(iwl_trans);
+	iwl_trans_pcie_free(iwl_trans);
 	pci_set_drvdata(pdev, NULL);
-out_free_bus:
-	kfree(shrd);
-	return err;
+	return -EFAULT;
 }
 
 static void __devexit iwl_pci_remove(struct pci_dev *pdev)
 {
-	struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
-	struct iwl_shared *shrd = iwl_trans->shrd;
+	struct iwl_trans *trans = pci_get_drvdata(pdev);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	iwl_drv_stop(shrd);
-	iwl_trans_free(shrd->trans);
+	iwl_drv_stop(trans_pcie->drv);
+	iwl_trans_pcie_free(trans);
 
 	pci_set_drvdata(pdev, NULL);
-
-	kfree(shrd);
 }
 
+#endif /* CONFIG_IWLWIFI_IDI */
+
 #ifdef CONFIG_PM_SLEEP
 
 static int iwl_pci_suspend(struct device *device)
@@ -360,6 +346,15 @@
 
 #endif
 
+#ifdef CONFIG_IWLWIFI_IDI
+/*
+ * Defined externally in iwl-idi.c
+ */
+int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+void __devexit iwl_pci_remove(struct pci_dev *pdev);
+
+#endif /* CONFIG_IWLWIFI_IDI */
+
 static struct pci_driver iwl_pci_driver = {
 	.name = DRV_NAME,
 	.id_table = iwl_hw_card_ids,
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
new file mode 100644
index 0000000..f166955
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -0,0 +1,288 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "iwl-debug.h"
+#include "iwl-dev.h"
+
+#include "iwl-phy-db.h"
+
+#define CHANNEL_NUM_SIZE	4	/* num of channels in calib_ch size */
+
+struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
+{
+	struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
+					    GFP_KERNEL);
+
+	if (!phy_db)
+		return phy_db;
+
+	phy_db->dev = dev;
+
+	/* TODO: add default values of the phy db. */
+	return phy_db;
+}
+
+/*
+ * get phy db section: returns a pointer to a phy db section specified by
+ * type and channel group id.
+ */
+static struct iwl_phy_db_entry *
+iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
+		       enum iwl_phy_db_section_type type,
+		       u16 chg_id)
+{
+	if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
+		return NULL;
+
+	switch (type) {
+	case IWL_PHY_DB_CFG:
+		return &phy_db->cfg;
+	case IWL_PHY_DB_CALIB_NCH:
+		return &phy_db->calib_nch;
+	case IWL_PHY_DB_CALIB_CH:
+		return &phy_db->calib_ch;
+	case IWL_PHY_DB_CALIB_CHG_PAPD:
+		if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
+			return NULL;
+		return &phy_db->calib_ch_group_papd[chg_id];
+	case IWL_PHY_DB_CALIB_CHG_TXP:
+		if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
+			return NULL;
+		return &phy_db->calib_ch_group_txp[chg_id];
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
+				    enum iwl_phy_db_section_type type,
+				    u16 chg_id)
+{
+	struct iwl_phy_db_entry *entry =
+				iwl_phy_db_get_section(phy_db, type, chg_id);
+	if (!entry)
+		return;
+
+	kfree(entry->data);
+	entry->data = NULL;
+	entry->size = 0;
+}
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db)
+{
+	int i;
+
+	if (!phy_db)
+		return;
+
+	iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
+	iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
+	iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
+	for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
+		iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
+	for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
+		iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+
+	kfree(phy_db);
+}
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
+			   enum iwl_phy_db_section_type type, u8 *data,
+			   u16 size, gfp_t alloc_ctx)
+{
+	struct iwl_phy_db_entry *entry;
+	u16 chg_id = 0;
+
+	if (!phy_db)
+		return -EINVAL;
+
+	if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
+	    type == IWL_PHY_DB_CALIB_CHG_TXP)
+		chg_id = le16_to_cpup((__le16 *)data);
+
+	entry = iwl_phy_db_get_section(phy_db, type, chg_id);
+	if (!entry)
+		return -EINVAL;
+
+	kfree(entry->data);
+	entry->data = kmemdup(data, size, alloc_ctx);
+	if (!entry->data) {
+		entry->size = 0;
+		return -ENOMEM;
+	}
+
+	entry->size = size;
+
+	if (type == IWL_PHY_DB_CALIB_CH) {
+		phy_db->channel_num = le32_to_cpup((__le32 *)data);
+		phy_db->channel_size =
+		      (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
+	}
+
+	return 0;
+}
+
+static int is_valid_channel(u16 ch_id)
+{
+	if (ch_id <= 14 ||
+	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
+	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
+	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
+		return 1;
+	return 0;
+}
+
+static u8 ch_id_to_ch_index(u16 ch_id)
+{
+	if (WARN_ON(!is_valid_channel(ch_id)))
+		return 0xff;
+
+	if (ch_id <= 14)
+		return ch_id - 1;
+	if (ch_id <= 64)
+		return (ch_id + 20) / 4;
+	if (ch_id <= 140)
+		return (ch_id - 12) / 4;
+	return (ch_id - 13) / 4;
+}
+
+
+static u16 channel_id_to_papd(u16 ch_id)
+{
+	if (WARN_ON(!is_valid_channel(ch_id)))
+		return 0xff;
+
+	if (1 <= ch_id && ch_id <= 14)
+		return 0;
+	if (36 <= ch_id && ch_id <= 64)
+		return 1;
+	if (100 <= ch_id && ch_id <= 140)
+		return 2;
+	return 3;
+}
+
+static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
+{
+	struct iwl_phy_db_chg_txp *txp_chg;
+	int i;
+	u8 ch_index = ch_id_to_ch_index(ch_id);
+	if (ch_index == 0xff)
+		return 0xff;
+
+	for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
+		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
+		if (!txp_chg)
+			return 0xff;
+		/*
+		 * Looking for the first channel group that its max channel is
+		 * higher then wanted channel.
+		 */
+		if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
+			return i;
+	}
+	return 0xff;
+}
+
+int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
+				enum iwl_phy_db_section_type type, u8 **data,
+				u16 *size, u16 ch_id)
+{
+	struct iwl_phy_db_entry *entry;
+	u32 channel_num;
+	u32 channel_size;
+	u16 ch_group_id = 0;
+	u16 index;
+
+	if (!phy_db)
+		return -EINVAL;
+
+	/* find wanted channel group */
+	if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
+		ch_group_id = channel_id_to_papd(ch_id);
+	else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
+		ch_group_id = channel_id_to_txp(phy_db, ch_id);
+
+	entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
+	if (!entry)
+		return -EINVAL;
+
+	if (type == IWL_PHY_DB_CALIB_CH) {
+		index = ch_id_to_ch_index(ch_id);
+		channel_num = phy_db->channel_num;
+		channel_size = phy_db->channel_size;
+		if (index >= channel_num) {
+			IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
+			return -EINVAL;
+		}
+		*data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
+		*size = channel_size;
+	} else {
+		*data = entry->data;
+		*size = entry->size;
+	}
+	return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
new file mode 100644
index 0000000..c34c6a9
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -0,0 +1,129 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PHYDB_H__
+#define __IWL_PHYDB_H__
+
+#include <linux/types.h>
+
+#define IWL_NUM_PAPD_CH_GROUPS	4
+#define IWL_NUM_TXP_CH_GROUPS	8
+
+struct iwl_phy_db_entry {
+	u16	size;
+	u8	*data;
+};
+
+struct iwl_shared;
+
+/**
+ * struct iwl_phy_db - stores phy configuration and calibration data.
+ *
+ * @cfg: phy configuration.
+ * @calib_nch: non channel specific calibration data.
+ * @calib_ch: channel specific calibration data.
+ * @calib_ch_group_papd: calibration data related to papd channel group.
+ * @calib_ch_group_txp: calibration data related to tx power chanel group.
+ */
+struct iwl_phy_db {
+	struct iwl_phy_db_entry	cfg;
+	struct iwl_phy_db_entry	calib_nch;
+	struct iwl_phy_db_entry	calib_ch;
+	struct iwl_phy_db_entry	calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
+	struct iwl_phy_db_entry	calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
+
+	u32 channel_num;
+	u32 channel_size;
+
+	/* for an access to the logger */
+	struct device *dev;
+};
+
+enum iwl_phy_db_section_type {
+	IWL_PHY_DB_CFG = 1,
+	IWL_PHY_DB_CALIB_NCH,
+	IWL_PHY_DB_CALIB_CH,
+	IWL_PHY_DB_CALIB_CHG_PAPD,
+	IWL_PHY_DB_CALIB_CHG_TXP,
+	IWL_PHY_DB_MAX
+};
+
+/* for parsing of tx power channel group data that comes from the firmware*/
+struct iwl_phy_db_chg_txp {
+	__le32 space;
+	__le16 max_channel_idx;
+} __packed;
+
+struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
+			   enum iwl_phy_db_section_type type, u8 *data,
+			   u16 size, gfp_t alloc_ctx);
+
+int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
+				enum iwl_phy_db_section_type type, u8 **data,
+				u16 *size, u16 ch_id);
+
+#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 958d9d0..8352265 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -37,13 +37,12 @@
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
 #include "iwl-agn.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-commands.h"
 #include "iwl-debug.h"
 #include "iwl-power.h"
 #include "iwl-trans.h"
-#include "iwl-shared.h"
+#include "iwl-modparams.h"
 
 /*
  * Setting power level allows the card to go to sleep when not busy.
@@ -167,7 +166,7 @@
 	u8 skip;
 	u32 slp_itrvl;
 
-	if (cfg(priv)->adv_pm) {
+	if (priv->cfg->adv_pm) {
 		table = apm_range_2;
 		if (period <= IWL_DTIM_RANGE_1_MAX)
 			table = apm_range_1;
@@ -215,13 +214,13 @@
 	else
 		cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
 
-	if (cfg(priv)->base_params->shadow_reg_enable)
+	if (priv->cfg->base_params->shadow_reg_enable)
 		cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
 	else
 		cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
 
 	if (iwl_advanced_bt_coexist(priv)) {
-		if (!cfg(priv)->bt_params->bt_sco_disable)
+		if (!priv->cfg->bt_params->bt_sco_disable)
 			cmd->flags |= IWL_POWER_BT_SCO_ENA;
 		else
 			cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -268,61 +267,6 @@
 	IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
 }
 
-static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
-				     struct iwl_powertable_cmd *cmd,
-				     int dynps_ms, int wakeup_period)
-{
-	/*
-	 * These are the original power level 3 sleep successions. The
-	 * device may behave better with such succession and was also
-	 * only tested with that. Just like the original sleep commands,
-	 * also adjust the succession here to the wakeup_period below.
-	 * The ranges are the same as for the sleep commands, 0-2, 3-9
-	 * and >10, which is selected based on the DTIM interval for
-	 * the sleep index but here we use the wakeup period since that
-	 * is what we need to do for the latency requirements.
-	 */
-	static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 };
-	static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 };
-	static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF };
-	const u8 *slp_succ = slp_succ_r0;
-	int i;
-
-	if (wakeup_period > IWL_DTIM_RANGE_0_MAX)
-		slp_succ = slp_succ_r1;
-	if (wakeup_period > IWL_DTIM_RANGE_1_MAX)
-		slp_succ = slp_succ_r2;
-
-	memset(cmd, 0, sizeof(*cmd));
-
-	cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK |
-		     IWL_POWER_FAST_PD; /* no use seeing frames for others */
-
-	if (priv->power_data.bus_pm)
-		cmd->flags |= IWL_POWER_PCI_PM_MSK;
-
-	if (cfg(priv)->base_params->shadow_reg_enable)
-		cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
-	else
-		cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
-
-	if (iwl_advanced_bt_coexist(priv)) {
-		if (!cfg(priv)->bt_params->bt_sco_disable)
-			cmd->flags |= IWL_POWER_BT_SCO_ENA;
-		else
-			cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
-	}
-
-	cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
-	cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
-
-	for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
-		cmd->sleep_interval[i] =
-			cpu_to_le32(min_t(int, slp_succ[i], wakeup_period));
-
-	IWL_DEBUG_POWER(priv, "Automatic sleep command\n");
-}
-
 static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
 {
 	IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
@@ -350,7 +294,7 @@
 
 	if (priv->wowlan)
 		iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
-	else if (!cfg(priv)->base_params->no_idle_support &&
+	else if (!priv->cfg->base_params->no_idle_support &&
 		 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
 		iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
 	else if (iwl_tt_is_low_power_state(priv)) {
@@ -363,18 +307,15 @@
 		iwl_static_sleep_cmd(priv, cmd,
 				     priv->power_data.debug_sleep_level_override,
 				     dtimper);
-	else if (iwlagn_mod_params.no_sleep_autoadjust) {
-		if (iwlagn_mod_params.power_level > IWL_POWER_INDEX_1 &&
-		    iwlagn_mod_params.power_level <= IWL_POWER_INDEX_5)
+	else {
+		if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 &&
+		    iwlwifi_mod_params.power_level <= IWL_POWER_INDEX_5)
 			iwl_static_sleep_cmd(priv, cmd,
-				iwlagn_mod_params.power_level, dtimper);
+				iwlwifi_mod_params.power_level, dtimper);
 		else
 			iwl_static_sleep_cmd(priv, cmd,
 				IWL_POWER_INDEX_1, dtimper);
-	} else
-		iwl_power_fill_sleep_cmd(priv, cmd,
-					 priv->hw->conf.dynamic_ps_timeout,
-					 priv->hw->conf.max_sleep_period);
+	}
 }
 
 int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
@@ -403,12 +344,12 @@
 	}
 
 	if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
-		set_bit(STATUS_POWER_PMI, &priv->shrd->status);
+		iwl_dvm_set_pmi(priv, true);
 
 	ret = iwl_set_power(priv, cmd);
 	if (!ret) {
 		if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
-			clear_bit(STATUS_POWER_PMI, &priv->shrd->status);
+			iwl_dvm_set_pmi(priv, false);
 
 		if (update_chains)
 			iwl_update_chain_flags(priv);
@@ -436,7 +377,7 @@
 /* initialize to default */
 void iwl_power_initialize(struct iwl_priv *priv)
 {
-	priv->power_data.bus_pm = trans(priv)->pm_support;
+	priv->power_data.bus_pm = priv->trans->pm_support;
 
 	priv->power_data.debug_sleep_level_override = -1;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 07a19fc..21afc92 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -30,15 +30,6 @@
 
 #include "iwl-commands.h"
 
-enum iwl_power_level {
-	IWL_POWER_INDEX_1,
-	IWL_POWER_INDEX_2,
-	IWL_POWER_INDEX_3,
-	IWL_POWER_INDEX_4,
-	IWL_POWER_INDEX_5,
-	IWL_POWER_NUM
-};
-
 struct iwl_power_mgr {
 	struct iwl_powertable_cmd sleep_cmd;
 	struct iwl_powertable_cmd sleep_cmd_next;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 75dc20b..3b106929 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -223,12 +223,33 @@
 #define SCD_AIT			(SCD_BASE + 0x0c)
 #define SCD_TXFACT		(SCD_BASE + 0x10)
 #define SCD_ACTIVE		(SCD_BASE + 0x14)
-#define SCD_QUEUE_WRPTR(x)	(SCD_BASE + 0x18 + (x) * 4)
-#define SCD_QUEUE_RDPTR(x)	(SCD_BASE + 0x68 + (x) * 4)
 #define SCD_QUEUECHAIN_SEL	(SCD_BASE + 0xe8)
 #define SCD_AGGR_SEL		(SCD_BASE + 0x248)
 #define SCD_INTERRUPT_MASK	(SCD_BASE + 0x108)
-#define SCD_QUEUE_STATUS_BITS(x)	(SCD_BASE + 0x10c + (x) * 4)
+
+static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
+{
+	if (chnl < 20)
+		return SCD_BASE + 0x18 + chnl * 4;
+	WARN_ON_ONCE(chnl >= 32);
+	return SCD_BASE + 0x284 + (chnl - 20) * 4;
+}
+
+static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
+{
+	if (chnl < 20)
+		return SCD_BASE + 0x68 + chnl * 4;
+	WARN_ON_ONCE(chnl >= 32);
+	return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
+}
+
+static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
+{
+	if (chnl < 20)
+		return SCD_BASE + 0x10c + chnl * 4;
+	WARN_ON_ONCE(chnl >= 32);
+	return SCD_BASE + 0x384 + (chnl - 20) * 4;
+}
 
 /*********************** END TX SCHEDULER *************************************/
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 902efe4..a8437a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -32,7 +32,6 @@
 
 #include "iwl-eeprom.h"
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-agn.h"
 #include "iwl-trans.h"
@@ -69,7 +68,7 @@
 	if (!test_bit(STATUS_READY, &priv->status) ||
 	    !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
 	    !test_bit(STATUS_SCAN_HW, &priv->status) ||
-	    test_bit(STATUS_FW_ERROR, &priv->shrd->status))
+	    test_bit(STATUS_FW_ERROR, &priv->status))
 		return -EIO;
 
 	ret = iwl_dvm_send_cmd(priv, &cmd);
@@ -451,6 +450,46 @@
 	return iwl_limit_dwell(priv, passive);
 }
 
+/* Return valid, unused, channel for a passive scan to reset the RF */
+static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
+				 enum ieee80211_band band)
+{
+	const struct iwl_channel_info *ch_info;
+	int i;
+	u8 channel = 0;
+	u8 min, max;
+	struct iwl_rxon_context *ctx;
+
+	if (band == IEEE80211_BAND_5GHZ) {
+		min = 14;
+		max = priv->channel_count;
+	} else {
+		min = 0;
+		max = 14;
+	}
+
+	for (i = min; i < max; i++) {
+		bool busy = false;
+
+		for_each_context(priv, ctx) {
+			busy = priv->channel_info[i].channel ==
+				le16_to_cpu(ctx->staging.channel);
+			if (busy)
+				break;
+		}
+
+		if (busy)
+			continue;
+
+		channel = priv->channel_info[i].channel;
+		ch_info = iwl_get_channel_info(priv, band, channel);
+		if (is_channel_valid(ch_info))
+			break;
+	}
+
+	return channel;
+}
+
 static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
 					   struct ieee80211_vif *vif,
 					   enum ieee80211_band band,
@@ -633,12 +672,12 @@
 	u16 rx_chain = 0;
 	enum ieee80211_band band;
 	u8 n_probes = 0;
-	u8 rx_ant = hw_params(priv).valid_rx_ant;
+	u8 rx_ant = priv->hw_params.valid_rx_ant;
 	u8 rate;
 	bool is_active = false;
 	int  chan_mod;
 	u8 active_chains;
-	u8 scan_tx_antennas = hw_params(priv).valid_tx_ant;
+	u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
 	int ret;
 
 	lockdep_assert_held(&priv->mutex);
@@ -751,8 +790,8 @@
 		 * Internal scans are passive, so we can indiscriminately set
 		 * the BT ignore flag on 2.4 GHz since it applies to TX only.
 		 */
-		if (cfg(priv)->bt_params &&
-		    cfg(priv)->bt_params->advanced_bt_coexist)
+		if (priv->cfg->bt_params &&
+		    priv->cfg->bt_params->advanced_bt_coexist)
 			scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
 		break;
 	case IEEE80211_BAND_5GHZ:
@@ -793,12 +832,9 @@
 
 	band = priv->scan_band;
 
-	if (cfg(priv)->scan_rx_antennas[band])
-		rx_ant = cfg(priv)->scan_rx_antennas[band];
-
 	if (band == IEEE80211_BAND_2GHZ &&
-	    cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist) {
+	    priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist) {
 		/* transmit 2.4 GHz probes only on first antenna */
 		scan_tx_antennas = first_antenna(scan_tx_antennas);
 	}
@@ -809,8 +845,12 @@
 	rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
 	scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
 
-	/* In power save mode use one chain, otherwise use all chains */
-	if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
+	/*
+	 * In power save mode while associated use one chain,
+	 * otherwise use all chains
+	 */
+	if (test_bit(STATUS_POWER_PMI, &priv->status) &&
+	    !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) {
 		/* rx_ant has been set to all valid chains previously */
 		active_chains = rx_ant &
 				((u8)(priv->chain_noise_data.active_chains));
@@ -822,8 +862,8 @@
 
 		rx_ant = first_antenna(active_chains);
 	}
-	if (cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist &&
+	if (priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist &&
 	    priv->bt_full_concurrent) {
 		/* operated as 1x1 in full concurrency mode */
 		rx_ant = first_antenna(rx_ant);
@@ -831,7 +871,7 @@
 
 	/* MIMO is not used here, but value is required */
 	rx_chain |=
-		hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+		priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
 	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
 	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
 	rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -944,7 +984,7 @@
 
 void iwl_init_scan_params(struct iwl_priv *priv)
 {
-	u8 ant_idx = fls(hw_params(priv).valid_tx_ant) - 1;
+	u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
 	if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
 		priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
 	if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
deleted file mode 100644
index b515d65..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ /dev/null
@@ -1,435 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_shared_h__
-#define __iwl_shared_h__
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-fw.h"
-
-/**
- * DOC: shared area - role and goal
- *
- * The shared area contains all the data exported by the upper layer to the
- * other layers. Since the bus and transport layer shouldn't dereference
- * iwl_priv, all the data needed by the upper layer and the transport / bus
- * layer must be here.
- * The shared area also holds pointer to all the other layers. This allows a
- * layer to call a function from another layer.
- *
- * NOTE: All the layers hold a pointer to the shared area which must be shrd.
- *	A few macros assume that (_m)->shrd points to the shared area no matter
- *	what _m is.
- *
- * gets notifications about enumeration, suspend, resume.
- * For the moment, the bus layer is not a linux kernel module as itself, and
- * the module_init function of the driver must call the bus specific
- * registration functions. These functions are listed at the end of this file.
- * For the moment, there is only one implementation of this interface: PCI-e.
- * This implementation is iwl-pci.c
- */
-
-struct iwl_priv;
-struct iwl_trans;
-struct iwl_sensitivity_ranges;
-struct iwl_trans_ops;
-
-#define DRV_NAME        "iwlwifi"
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT	"Copyright(c) 2003-2012 Intel Corporation"
-#define DRV_AUTHOR     "<ilw@linux.intel.com>"
-
-extern struct iwl_mod_params iwlagn_mod_params;
-
-#define IWL_DISABLE_HT_ALL	BIT(0)
-#define IWL_DISABLE_HT_TXAGG	BIT(1)
-#define IWL_DISABLE_HT_RXAGG	BIT(2)
-
-/**
- * struct iwl_mod_params
- *
- * Holds the module parameters
- *
- * @sw_crypto: using hardware encryption, default = 0
- * @disable_11n: disable 11n capabilities, default = 0,
- *	use IWL_DISABLE_HT_* constants
- * @amsdu_size_8K: enable 8K amsdu size, default = 1
- * @antenna: both antennas (use diversity), default = 0
- * @restart_fw: restart firmware, default = 1
- * @plcp_check: enable plcp health check, default = true
- * @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = 0
- * @bt_coex_active: enable bt coex, default = true
- * @led_mode: system default, default = 0
- * @no_sleep_autoadjust: disable autoadjust, default = true
- * @power_save: disable power save, default = false
- * @power_level: power level, default = 1
- * @debug_level: levels are IWL_DL_*
- * @ant_coupling: antenna coupling in dB, default = 0
- * @bt_ch_announce: BT channel inhibition, default = enable
- * @wanted_ucode_alternative: ucode alternative to use, default = 1
- * @auto_agg: enable agg. without check, default = true
- */
-struct iwl_mod_params {
-	int sw_crypto;
-	unsigned int disable_11n;
-	int amsdu_size_8K;
-	int antenna;
-	int restart_fw;
-	bool plcp_check;
-	bool ack_check;
-	int  wd_disable;
-	bool bt_coex_active;
-	int led_mode;
-	bool no_sleep_autoadjust;
-	bool power_save;
-	int power_level;
-	u32 debug_level;
-	int ant_coupling;
-	bool bt_ch_announce;
-	int wanted_ucode_alternative;
-	bool auto_agg;
-};
-
-/**
- * struct iwl_hw_params
- *
- * Holds the module parameters
- *
- * @num_ampdu_queues: num of ampdu queues
- * @tx_chains_num: Number of TX chains
- * @rx_chains_num: Number of RX chains
- * @valid_tx_ant: usable antennas for TX
- * @valid_rx_ant: usable antennas for RX
- * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
- * @sku: sku read from EEPROM
- * @rx_page_order: Rx buffer page order
- * @ct_kill_threshold: temperature threshold - in hw dependent unit
- * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
- *	relevant for 1000, 6000 and up
- * @wd_timeout: TX queues watchdog timeout
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- * @use_rts_for_aggregation: use rts/cts protection for HT traffic
- */
-struct iwl_hw_params {
-	u8  num_ampdu_queues;
-	u8  tx_chains_num;
-	u8  rx_chains_num;
-	u8  valid_tx_ant;
-	u8  valid_rx_ant;
-	u8  ht40_channel;
-	bool use_rts_for_aggregation;
-	u16 sku;
-	u32 rx_page_order;
-	u32 ct_kill_threshold;
-	u32 ct_kill_exit_threshold;
-	unsigned int wd_timeout;
-
-	const struct iwl_sensitivity_ranges *sens;
-};
-
-/*
- * LED mode
- *    IWL_LED_DEFAULT:  use device default
- *    IWL_LED_RF_STATE: turn LED on/off based on RF state
- *			LED ON  = RF ON
- *			LED OFF = RF OFF
- *    IWL_LED_BLINK:    adjust led blink rate based on blink table
- *    IWL_LED_DISABLE:	led disabled
- */
-enum iwl_led_mode {
-	IWL_LED_DEFAULT,
-	IWL_LED_RF_STATE,
-	IWL_LED_BLINK,
-	IWL_LED_DISABLE,
-};
-
-/*
- * @max_ll_items: max number of OTP blocks
- * @shadow_ram_support: shadow support for OTP memory
- * @led_compensation: compensate on the led on/off time per HW according
- *	to the deviation to achieve the desired led frequency.
- *	The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @adv_thermal_throttle: support advance thermal throttle
- * @support_ct_kill_exit: support ct kill exit condition
- * @support_wimax_coexist: support wimax/wifi co-exist
- * @plcp_delta_threshold: plcp error rate threshold used to trigger
- *	radio tuning when there is a high receiving plcp error rate
- * @chain_noise_scale: default chain noise scale used for gain computation
- * @wd_timeout: TX queues watchdog timeout
- * @max_event_log_size: size of event log buffer size for ucode event logging
- * @shadow_reg_enable: HW shadhow register bit
- * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
- * @no_idle_support: do not support idle mode
- * wd_disable: disable watchdog timer
- */
-struct iwl_base_params {
-	int eeprom_size;
-	int num_of_queues;	/* def: HW dependent */
-	int num_of_ampdu_queues;/* def: HW dependent */
-	/* for iwl_apm_init() */
-	u32 pll_cfg_val;
-
-	const u16 max_ll_items;
-	const bool shadow_ram_support;
-	u16 led_compensation;
-	bool adv_thermal_throttle;
-	bool support_ct_kill_exit;
-	const bool support_wimax_coexist;
-	u8 plcp_delta_threshold;
-	s32 chain_noise_scale;
-	unsigned int wd_timeout;
-	u32 max_event_log_size;
-	const bool shadow_reg_enable;
-	const bool hd_v2;
-	const bool no_idle_support;
-	const bool wd_disable;
-};
-
-/*
- * @advanced_bt_coexist: support advanced bt coexist
- * @bt_init_traffic_load: specify initial bt traffic load
- * @bt_prio_boost: default bt priority boost value
- * @agg_time_limit: maximum number of uSec in aggregation
- * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
- */
-struct iwl_bt_params {
-	bool advanced_bt_coexist;
-	u8 bt_init_traffic_load;
-	u8 bt_prio_boost;
-	u16 agg_time_limit;
-	bool bt_sco_disable;
-	bool bt_session_2;
-};
-/*
- * @use_rts_for_aggregation: use rts/cts protection for HT traffic
- */
-struct iwl_ht_params {
-	const bool ht_greenfield_support; /* if used set to true */
-	bool use_rts_for_aggregation;
-	enum ieee80211_smps_mode smps_mode;
-};
-
-/**
- * struct iwl_cfg
- * @name: Offical name of the device
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- *	(.ucode) will be added to filename before loading from disk. The
- *	filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- *	without a warning, for use in transitions
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @max_inst_size: The maximal length of the fw inst section
- * @max_data_size: The maximal length of the fw data section
- * @valid_tx_ant: valid transmit antenna
- * @valid_rx_ant: valid receive antenna
- * @eeprom_ver: EEPROM version
- * @eeprom_calib_ver: EEPROM calibration version
- * @lib: pointer to the lib ops
- * @additional_nic_config: additional nic configuration
- * @base_params: pointer to basic parameters
- * @ht_params: point to ht patameters
- * @bt_params: pointer to bt parameters
- * @need_temp_offset_calib: need to perform temperature offset calibration
- * @no_xtal_calib: some devices do not need crystal calibration data,
- *	don't send it to those
- * @scan_rx_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- * @adv_pm: advance power management
- * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
- * @internal_wimax_coex: internal wifi/wimax combo device
- * @iq_invert: I/Q inversion
- * @temp_offset_v2: support v2 of temperature offset calibration
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version.
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision.
- */
-struct iwl_cfg {
-	/* params specific to an individual device within a device family */
-	const char *name;
-	const char *fw_name_pre;
-	const unsigned int ucode_api_max;
-	const unsigned int ucode_api_ok;
-	const unsigned int ucode_api_min;
-	const u32 max_data_size;
-	const u32 max_inst_size;
-	u8   valid_tx_ant;
-	u8   valid_rx_ant;
-	u16  eeprom_ver;
-	u16  eeprom_calib_ver;
-	const struct iwl_lib_ops *lib;
-	void (*additional_nic_config)(struct iwl_priv *priv);
-	/* params not likely to change within a device family */
-	const struct iwl_base_params *base_params;
-	/* params likely to change within a device family */
-	const struct iwl_ht_params *ht_params;
-	const struct iwl_bt_params *bt_params;
-	const bool need_temp_offset_calib; /* if used set to true */
-	const bool no_xtal_calib;
-	u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
-	enum iwl_led_mode led_mode;
-	const bool adv_pm;
-	const bool rx_with_siso_diversity;
-	const bool internal_wimax_coex;
-	const bool iq_invert;
-	const bool temp_offset_v2;
-};
-
-/**
- * struct iwl_shared - shared fields for all the layers of the driver
- *
- * @status: STATUS_*
- * @wowlan: are we running wowlan uCode
- * @valid_contexts: microcode/device supports multiple contexts
- * @bus: pointer to the bus layer data
- * @cfg: see struct iwl_cfg
- * @priv: pointer to the upper layer data
- * @trans: pointer to the transport layer data
- * @nic: pointer to the nic data
- * @hw_params: see struct iwl_hw_params
- * @lock: protect general shared data
- * @eeprom: pointer to the eeprom/OTP image
- * @ucode_type: indicator of loaded ucode image
- * @device_pointers: pointers to ucode event tables
- */
-struct iwl_shared {
-	unsigned long status;
-	u8 valid_contexts;
-
-	const struct iwl_cfg *cfg;
-	struct iwl_trans *trans;
-	void *drv;
-	struct iwl_hw_params hw_params;
-	const struct iwl_fw *fw;
-
-	/* eeprom -- this is in the card's little endian byte order */
-	u8 *eeprom;
-
-	/* ucode related variables */
-	enum iwl_ucode_type ucode_type;
-
-	struct {
-		u32 error_event_table;
-		u32 log_event_table;
-	} device_pointers;
-
-};
-
-/*Whatever _m is (iwl_trans, iwl_priv, these macros will work */
-#define cfg(_m)		((_m)->shrd->cfg)
-#define trans(_m)	((_m)->shrd->trans)
-#define hw_params(_m)	((_m)->shrd->hw_params)
-
-static inline bool iwl_have_debug_level(u32 level)
-{
-	return iwlagn_mod_params.debug_level & level;
-}
-
-enum iwl_rxon_context_id {
-	IWL_RXON_CTX_BSS,
-	IWL_RXON_CTX_PAN,
-
-	NUM_IWL_RXON_CTX
-};
-
-int iwlagn_hw_valid_rtc_data_addr(u32 addr);
-const char *get_cmd_string(u8 cmd);
-
-#define IWL_CMD(x) case x: return #x
-
-/*****************************************************
-* DRIVER STATUS FUNCTIONS
-******************************************************/
-#define STATUS_HCMD_ACTIVE	0	/* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED	2
-#define STATUS_RF_KILL_HW	3
-#define STATUS_CT_KILL		4
-#define STATUS_INIT		5
-#define STATUS_ALIVE		6
-#define STATUS_READY		7
-#define STATUS_TEMPERATURE	8
-#define STATUS_GEO_CONFIGURED	9
-#define STATUS_EXIT_PENDING	10
-#define STATUS_STATISTICS	12
-#define STATUS_SCANNING		13
-#define STATUS_SCAN_ABORTING	14
-#define STATUS_SCAN_HW		15
-#define STATUS_POWER_PMI	16
-#define STATUS_FW_ERROR		17
-#define STATUS_DEVICE_ENABLED	18
-#define STATUS_CHANNEL_SWITCH_PENDING 19
-#define STATUS_SCAN_COMPLETE	20
-
-#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index 76f7f92..060aac3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -71,7 +71,6 @@
 #include <net/netlink.h>
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-debug.h"
 #include "iwl-io.h"
 #include "iwl-agn.h"
@@ -184,9 +183,10 @@
 			 "Run out of memory for messages to user space ?\n");
 		return;
 	}
-	NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
-	/* the length doesn't include len_n_flags field, so add it manually */
-	NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data);
+	if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+	    /* the length doesn't include len_n_flags field, so add it manually */
+	    nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
+		goto nla_put_failure;
 	cfg80211_testmode_event(skb, GFP_ATOMIC);
 	return;
 
@@ -218,7 +218,7 @@
 	if (priv->testmode_trace.trace_enabled) {
 		if (priv->testmode_trace.cpu_addr &&
 		    priv->testmode_trace.dma_addr)
-			dma_free_coherent(trans(priv)->dev,
+			dma_free_coherent(priv->trans->dev,
 					priv->testmode_trace.total_size,
 					priv->testmode_trace.cpu_addr,
 					priv->testmode_trace.dma_addr);
@@ -314,8 +314,9 @@
 	memcpy(reply_buf, &(pkt->hdr), reply_len);
 	iwl_free_resp(&cmd);
 
-	NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
-	NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf);
+	if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+	    nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
+		goto nla_put_failure;
 	return cfg80211_testmode_reply(skb);
 
 nla_put_failure:
@@ -371,7 +372,7 @@
 
 	switch (cmd) {
 	case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
-		val32 = iwl_read_direct32(trans(priv), ofs);
+		val32 = iwl_read_direct32(priv->trans, ofs);
 		IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
 
 		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -379,7 +380,8 @@
 			IWL_ERR(priv, "Memory allocation fail\n");
 			return -ENOMEM;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+		if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -391,7 +393,7 @@
 		} else {
 			val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
 			IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
-			iwl_write_direct32(trans(priv), ofs, val32);
+			iwl_write_direct32(priv->trans, ofs, val32);
 		}
 		break;
 	case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
@@ -401,7 +403,7 @@
 		} else {
 			val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
 			IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
-			iwl_write8(trans(priv), ofs, val8);
+			iwl_write8(priv->trans, ofs, val8);
 		}
 		break;
 	default:
@@ -420,10 +422,13 @@
 static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
 {
 	struct iwl_notification_wait calib_wait;
+	static const u8 calib_complete[] = {
+		CALIBRATION_COMPLETE_NOTIFICATION
+	};
 	int ret;
 
 	iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
-				   CALIBRATION_COMPLETE_NOTIFICATION,
+				   calib_complete, ARRAY_SIZE(calib_complete),
 				   NULL, NULL);
 	ret = iwl_init_alive_start(priv);
 	if (ret) {
@@ -461,7 +466,7 @@
 static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
-	struct iwl_trans *trans = trans(priv);
+	struct iwl_trans *trans = priv->trans;
 	struct sk_buff *skb;
 	unsigned char *rsp_data_ptr = NULL;
 	int status = 0, rsp_data_len = 0;
@@ -470,18 +475,19 @@
 
 	switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
 	case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
-		rsp_data_ptr = (unsigned char *)cfg(priv)->name;
-		rsp_data_len = strlen(cfg(priv)->name);
+		rsp_data_ptr = (unsigned char *)priv->cfg->name;
+		rsp_data_len = strlen(priv->cfg->name);
 		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
 							rsp_data_len + 20);
 		if (!skb) {
 			IWL_ERR(priv, "Memory allocation fail\n");
 			return -ENOMEM;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
-			    IWL_TM_CMD_DEV2APP_SYNC_RSP);
-		NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP,
-			rsp_data_len, rsp_data_ptr);
+		if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+				IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
+		    nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
+			    rsp_data_len, rsp_data_ptr))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -529,18 +535,19 @@
 		break;
 
 	case IWL_TM_CMD_APP2DEV_GET_EEPROM:
-		if (priv->shrd->eeprom) {
+		if (priv->eeprom) {
 			skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
-				cfg(priv)->base_params->eeprom_size + 20);
+				priv->cfg->base_params->eeprom_size + 20);
 			if (!skb) {
 				IWL_ERR(priv, "Memory allocation fail\n");
 				return -ENOMEM;
 			}
-			NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
-				IWL_TM_CMD_DEV2APP_EEPROM_RSP);
-			NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
-				cfg(priv)->base_params->eeprom_size,
-				priv->shrd->eeprom);
+			if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+					IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
+			    nla_put(skb, IWL_TM_ATTR_EEPROM,
+				    priv->cfg->base_params->eeprom_size,
+				    priv->eeprom))
+				goto nla_put_failure;
 			status = cfg80211_testmode_reply(skb);
 			if (status < 0)
 				IWL_ERR(priv, "Error sending msg : %d\n",
@@ -566,15 +573,16 @@
 			IWL_ERR(priv, "Memory allocation fail\n");
 			return -ENOMEM;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION,
-			    priv->fw->ucode_ver);
+		if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
+				priv->fw->ucode_ver))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
 		break;
 
 	case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
-		devid = trans(priv)->hw_id;
+		devid = priv->trans->hw_id;
 		IWL_INFO(priv, "hw version: 0x%x\n", devid);
 
 		skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -582,7 +590,8 @@
 			IWL_ERR(priv, "Memory allocation fail\n");
 			return -ENOMEM;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+		if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -598,13 +607,14 @@
 			IWL_ERR(priv, "No uCode has not been loaded\n");
 			return -EINVAL;
 		} else {
-			img = &priv->fw->img[priv->shrd->ucode_type];
+			img = &priv->fw->img[priv->cur_ucode];
 			inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
 			data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
 		}
-		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_TYPE, priv->shrd->ucode_type);
-		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size);
-		NLA_PUT_U32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size);
+		if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
+		    nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
+		    nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0)
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -639,7 +649,7 @@
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	struct sk_buff *skb;
 	int status = 0;
-	struct device *dev = trans(priv)->dev;
+	struct device *dev = priv->trans->dev;
 
 	switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
 	case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
@@ -678,9 +688,10 @@
 			iwl_trace_cleanup(priv);
 			return -ENOMEM;
 		}
-		NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR,
-			sizeof(priv->testmode_trace.dma_addr),
-			(u64 *)&priv->testmode_trace.dma_addr);
+		if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
+			    sizeof(priv->testmode_trace.dma_addr),
+			    (u64 *)&priv->testmode_trace.dma_addr))
+			goto nla_put_failure;
 		status = cfg80211_testmode_reply(skb);
 		if (status < 0) {
 			IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -725,9 +736,10 @@
 			length = priv->testmode_trace.buff_size %
 				DUMP_CHUNK_SIZE;
 
-		NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
-			priv->testmode_trace.trace_addr +
-			(DUMP_CHUNK_SIZE * idx));
+		if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
+			    priv->testmode_trace.trace_addr +
+			    (DUMP_CHUNK_SIZE * idx)))
+			goto nla_put_failure;
 		idx++;
 		cb->args[4] = idx;
 		return 0;
@@ -779,7 +791,7 @@
 
 static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
 {
-	struct iwl_trans *trans = trans(priv);
+	struct iwl_trans *trans = priv->trans;
 	unsigned long flags;
 	int i;
 
@@ -819,7 +831,7 @@
 static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
 	u32 size, unsigned char *buf)
 {
-	struct iwl_trans *trans = trans(priv);
+	struct iwl_trans *trans = priv->trans;
 	u32 val, i;
 	unsigned long flags;
 
@@ -922,9 +934,10 @@
 			length = priv->testmode_mem.buff_size %
 				DUMP_CHUNK_SIZE;
 
-		NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
-			priv->testmode_mem.buff_addr +
-			(DUMP_CHUNK_SIZE * idx));
+		if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
+			    priv->testmode_mem.buff_addr +
+			    (DUMP_CHUNK_SIZE * idx)))
+			goto nla_put_failure;
 		idx++;
 		cb->args[4] = idx;
 		return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 1c2fe87..6213c05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -34,17 +34,15 @@
 #include <linux/skbuff.h>
 #include <linux/wait.h>
 #include <linux/pci.h>
+#include <linux/timer.h>
 
 #include "iwl-fh.h"
 #include "iwl-csr.h"
-#include "iwl-shared.h"
 #include "iwl-trans.h"
 #include "iwl-debug.h"
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
-struct iwl_tx_queue;
-struct iwl_queue;
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -136,21 +134,14 @@
 	return --index & (n_bd - 1);
 }
 
-/*
- * This queue number is required for proper operation
- * because the ucode will stop/start the scheduler as
- * required.
- */
-#define IWL_IPAN_MCAST_QUEUE		8
-
 struct iwl_cmd_meta {
 	/* only for SYNC commands, iff the reply skb is wanted */
 	struct iwl_host_cmd *source;
 
-	u32 flags;
-
 	DEFINE_DMA_UNMAP_ADDR(mapping);
 	DEFINE_DMA_UNMAP_LEN(len);
+
+	u32 flags;
 };
 
 /*
@@ -188,72 +179,66 @@
 				* space less than this */
 };
 
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct iwl_pcie_tx_queue_entry {
+	struct iwl_device_cmd *cmd;
+	struct sk_buff *skb;
+	struct iwl_cmd_meta meta;
+};
+
 /**
  * struct iwl_tx_queue - Tx Queue for DMA
  * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * lock: queue lock
- * @time_stamp: time (in jiffies) of last read_ptr change
+ * @tfds: transmit frame descriptors (DMA memory)
+ * @entries: transmit entries (driver state)
+ * @lock: queue lock
+ * @stuck_timer: timer that fires if queue gets stuck
+ * @trans_pcie: pointer back to transport (for timer)
  * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- * @sta_id: valid if sched_retry is set
- * @tid: valid if sched_retry is set
+ * @active: stores if queue is active
  *
  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
  * descriptors) and required locking structures.
  */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
 struct iwl_tx_queue {
 	struct iwl_queue q;
 	struct iwl_tfd *tfds;
-	struct iwl_device_cmd **cmd;
-	struct iwl_cmd_meta *meta;
-	struct sk_buff **skbs;
+	struct iwl_pcie_tx_queue_entry *entries;
 	spinlock_t lock;
-	unsigned long time_stamp;
+	struct timer_list stuck_timer;
+	struct iwl_trans_pcie *trans_pcie;
 	u8 need_update;
-	u8 sched_retry;
 	u8 active;
-	u8 swq_id;
-
-	u16 sta_id;
-	u16 tid;
 };
 
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
  * @rx_replenish: work that will be called when buffers need to be allocated
+ * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @irq - the irq number for the device
  * @irq_requested: true when the irq has been requested
  * @scd_base_addr: scheduler sram base address in SRAM
  * @scd_bc_tbls: pointer to the byte count table of the scheduler
  * @kw: keep warm address
- * @ac_to_fifo: to what fifo is a specifc AC mapped ?
- * @ac_to_queue: to what tx queue  is a specifc AC mapped ?
- * @mcast_queue:
- * @txq: Tx DMA processing queues
- * @txq_ctx_active_msk: what queue is active
- * queue_stopped: tracks what queue is stopped
- * queue_stop_count: tracks what SW queue is stopped
  * @pci_dev: basic pci-network driver stuff
  * @hw_base: pci hardware address support
  * @ucode_write_complete: indicates that the ucode has been copied.
  * @ucode_write_waitq: wait queue for uCode load
  * @status - transport specific status flags
  * @cmd_queue - command queue number
+ * @rx_buf_size_8k: 8 kB RX buffer size
+ * @rx_page_order: page order for receive buffer size
+ * @wd_timeout: queue watchdog timeout (jiffies)
  */
 struct iwl_trans_pcie {
 	struct iwl_rx_queue rxq;
 	struct work_struct rx_replenish;
 	struct iwl_trans *trans;
+	struct iwl_drv *drv;
 
 	/* INT ICT Table */
 	__le32 *ict_tbl;
@@ -272,16 +257,9 @@
 	struct iwl_dma_ptr scd_bc_tbls;
 	struct iwl_dma_ptr kw;
 
-	const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
-	const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
-	u8 mcast_queue[NUM_IWL_RXON_CTX];
-	u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
-
 	struct iwl_tx_queue *txq;
-	unsigned long txq_ctx_active_msk;
-#define IWL_MAX_HW_QUEUES	32
+	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
 	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
-	atomic_t queue_stop_count[4];
 
 	/* PCI bus related data */
 	struct pci_dev *pci_dev;
@@ -293,11 +271,41 @@
 	u8 cmd_queue;
 	u8 n_no_reclaim_cmds;
 	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+	u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
+	u8 n_q_to_fifo;
+
+	bool rx_buf_size_8k;
+	u32 rx_page_order;
+
+	const char **command_names;
+
+	/* queue watchdog */
+	unsigned long wd_timeout;
 };
 
+/*****************************************************
+* DRIVER STATUS FUNCTIONS
+******************************************************/
+#define STATUS_HCMD_ACTIVE	0
+#define STATUS_DEVICE_ENABLED	1
+#define STATUS_TPOWER_PMI	2
+#define STATUS_INT_ENABLED	3
+
 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
 	((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
 
+static inline struct iwl_trans *
+iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
+{
+	return container_of((void *)trans_pcie, struct iwl_trans,
+			    trans_specific);
+}
+
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+				       const struct pci_device_id *ent,
+				       const struct iwl_cfg *cfg);
+void iwl_trans_pcie_free(struct iwl_trans *trans);
+
 /*****************************************************
 * RX
 ******************************************************/
@@ -331,15 +339,12 @@
 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
 					   struct iwl_tx_queue *txq,
 					   u16 byte_cnt);
-int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
-				  int sta_id, int tid);
+void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
 void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
-			     struct iwl_tx_queue *txq,
-			     int tx_fifo_id, int scd_retry);
-int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
-void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
-				 enum iwl_rxon_context_id ctx,
+				   struct iwl_tx_queue *txq,
+				   int tx_fifo_id, bool active);
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
 				 int sta_id, int tid, int frame_limit, u16 ssn);
 void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
 	int index, enum dma_data_direction dma_dir);
@@ -350,8 +355,6 @@
 /*****************************************************
 * Error handling
 ******************************************************/
-int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
-			    char **buf, bool display);
 int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
 void iwl_dump_csr(struct iwl_trans *trans);
 
@@ -388,91 +391,28 @@
 	iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
 }
 
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
-	BUG_ON(ac > 3);   /* only have 2 bits */
-	BUG_ON(hwq > 31); /* only use 5 bits */
-
-	txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
-{
-	return txq->swq_id & 0x3;
-}
-
 static inline void iwl_wake_queue(struct iwl_trans *trans,
 				  struct iwl_tx_queue *txq)
 {
-	u8 queue = txq->swq_id;
-	u8 ac = queue & 3;
-	u8 hwq = (queue >> 2) & 0x1f;
-	struct iwl_trans_pcie *trans_pcie =
-		IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
-		if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
-			iwl_op_mode_queue_not_full(trans->op_mode, ac);
-			IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d",
-					    hwq, ac);
-		} else {
-			IWL_DEBUG_TX_QUEUES(trans,
-				"Don't wake hwq %d ac %d stop count %d",
-				hwq, ac,
-				atomic_read(&trans_pcie->queue_stop_count[ac]));
-		}
+	if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
+		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
+		iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
 	}
 }
 
 static inline void iwl_stop_queue(struct iwl_trans *trans,
 				  struct iwl_tx_queue *txq)
 {
-	u8 queue = txq->swq_id;
-	u8 ac = queue & 3;
-	u8 hwq = (queue >> 2) & 0x1f;
-	struct iwl_trans_pcie *trans_pcie =
-		IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
-		if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
-			iwl_op_mode_queue_full(trans->op_mode, ac);
-			IWL_DEBUG_TX_QUEUES(trans,
-				"Stop hwq %d ac %d stop count %d",
-				hwq, ac,
-				atomic_read(&trans_pcie->queue_stop_count[ac]));
-		} else {
-			IWL_DEBUG_TX_QUEUES(trans,
-				"Don't stop hwq %d ac %d stop count %d",
-				hwq, ac,
-				atomic_read(&trans_pcie->queue_stop_count[ac]));
-		}
-	} else {
-		IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped",
-				    hwq);
-	}
-}
-
-static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
-					int txq_id)
-{
-	set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
-					  int txq_id)
-{
-	clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
+	if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
+		iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
+		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
+	} else
+		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+				    txq->q.id);
 }
 
 static inline int iwl_queue_used(const struct iwl_queue *q, int i)
@@ -487,19 +427,18 @@
 	return index & (q->n_window - 1);
 }
 
-#define IWL_TX_FIFO_BK		0	/* shared */
-#define IWL_TX_FIFO_BE		1
-#define IWL_TX_FIFO_VI		2	/* shared */
-#define IWL_TX_FIFO_VO		3
-#define IWL_TX_FIFO_BK_IPAN	IWL_TX_FIFO_BK
-#define IWL_TX_FIFO_BE_IPAN	4
-#define IWL_TX_FIFO_VI_IPAN	IWL_TX_FIFO_VI
-#define IWL_TX_FIFO_VO_IPAN	5
-/* re-uses the VO FIFO, uCode will properly flush/schedule */
-#define IWL_TX_FIFO_AUX		5
-#define IWL_TX_FIFO_UNUSED	-1
+static inline const char *
+trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd)
+{
+	if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
+		return "UNKNOWN";
+	return trans_pcie->command_names[cmd];
+}
 
-/* AUX (TX during scan dwell) queue */
-#define IWL_AUX_QUEUE		10
+static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
+{
+	return !(iwl_read32(trans, CSR_GP_CNTRL) &
+		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+}
 
 #endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 8b1a798..08517d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -140,14 +140,17 @@
 	if (q->need_update == 0)
 		goto exit_unlock;
 
-	if (cfg(trans)->base_params->shadow_reg_enable) {
+	if (trans->cfg->base_params->shadow_reg_enable) {
 		/* shadow register enabled */
 		/* Device expects a multiple of 8 */
 		q->write_actual = (q->write & ~0x7);
 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
 	} else {
+		struct iwl_trans_pcie *trans_pcie =
+			IWL_TRANS_GET_PCIE_TRANS(trans);
+
 		/* If power-saving is in use, make sure device is awake */
-		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
+		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
 			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
 
 			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
@@ -271,17 +274,17 @@
 		if (rxq->free_count > RX_LOW_WATERMARK)
 			gfp_mask |= __GFP_NOWARN;
 
-		if (hw_params(trans).rx_page_order > 0)
+		if (trans_pcie->rx_page_order > 0)
 			gfp_mask |= __GFP_COMP;
 
 		/* Alloc a new receive buffer */
 		page = alloc_pages(gfp_mask,
-				  hw_params(trans).rx_page_order);
+				  trans_pcie->rx_page_order);
 		if (!page) {
 			if (net_ratelimit())
 				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
 					   "order: %d\n",
-					   hw_params(trans).rx_page_order);
+					   trans_pcie->rx_page_order);
 
 			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
 			    net_ratelimit())
@@ -300,7 +303,7 @@
 
 		if (list_empty(&rxq->rx_used)) {
 			spin_unlock_irqrestore(&rxq->lock, flags);
-			__free_pages(page, hw_params(trans).rx_page_order);
+			__free_pages(page, trans_pcie->rx_page_order);
 			return;
 		}
 		element = rxq->rx_used.next;
@@ -313,7 +316,7 @@
 		rxb->page = page;
 		/* Get physical address of the RB */
 		rxb->page_dma = dma_map_page(trans->dev, page, 0,
-				PAGE_SIZE << hw_params(trans).rx_page_order,
+				PAGE_SIZE << trans_pcie->rx_page_order,
 				DMA_FROM_DEVICE);
 		/* dma address must be no more than 36 bits */
 		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
@@ -362,83 +365,98 @@
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
 	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
-	struct iwl_device_cmd *cmd;
 	unsigned long flags;
-	int len, err;
-	u16 sequence;
-	struct iwl_rx_cmd_buffer rxcb;
-	struct iwl_rx_packet *pkt;
-	bool reclaim;
-	int index, cmd_index;
+	bool page_stolen = false;
+	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+	u32 offset = 0;
 
 	if (WARN_ON(!rxb))
 		return;
 
-	dma_unmap_page(trans->dev, rxb->page_dma,
-		       PAGE_SIZE << hw_params(trans).rx_page_order,
-		       DMA_FROM_DEVICE);
+	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
 
-	rxcb._page = rxb->page;
-	pkt = rxb_addr(&rxcb);
+	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
+		struct iwl_rx_packet *pkt;
+		struct iwl_device_cmd *cmd;
+		u16 sequence;
+		bool reclaim;
+		int index, cmd_index, err, len;
+		struct iwl_rx_cmd_buffer rxcb = {
+			._offset = offset,
+			._page = rxb->page,
+			._page_stolen = false,
+			.truesize = max_len,
+		};
 
-	IWL_DEBUG_RX(trans, "%s, 0x%02x\n",
-		     get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+		pkt = rxb_addr(&rxcb);
 
+		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
+			break;
 
-	len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-	len += sizeof(u32); /* account for status word */
-	trace_iwlwifi_dev_rx(trans->dev, pkt, len);
+		IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
+			rxcb._offset,
+			trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
+			pkt->hdr.cmd);
 
-	/* Reclaim a command buffer only if this packet is a response
-	 *   to a (driver-originated) command.
-	 * If the packet (e.g. Rx frame) originated from uCode,
-	 *   there is no command buffer to reclaim.
-	 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-	 *   but apparently a few don't get set; catch them here. */
-	reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
-	if (reclaim) {
-		int i;
+		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+		len += sizeof(u32); /* account for status word */
+		trace_iwlwifi_dev_rx(trans->dev, pkt, len);
 
-		for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
-			if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) {
-				reclaim = false;
-				break;
+		/* Reclaim a command buffer only if this packet is a response
+		 *   to a (driver-originated) command.
+		 * If the packet (e.g. Rx frame) originated from uCode,
+		 *   there is no command buffer to reclaim.
+		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+		 *   but apparently a few don't get set; catch them here. */
+		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
+		if (reclaim) {
+			int i;
+
+			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
+				if (trans_pcie->no_reclaim_cmds[i] ==
+							pkt->hdr.cmd) {
+					reclaim = false;
+					break;
+				}
 			}
 		}
-	}
 
-	sequence = le16_to_cpu(pkt->hdr.sequence);
-	index = SEQ_TO_INDEX(sequence);
-	cmd_index = get_cmd_index(&txq->q, index);
+		sequence = le16_to_cpu(pkt->hdr.sequence);
+		index = SEQ_TO_INDEX(sequence);
+		cmd_index = get_cmd_index(&txq->q, index);
 
-	if (reclaim)
-		cmd = txq->cmd[cmd_index];
-	else
-		cmd = NULL;
-
-	err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
-
-	/*
-	 * XXX: After here, we should always check rxcb._page
-	 * against NULL before touching it or its virtual
-	 * memory (pkt). Because some rx_handler might have
-	 * already taken or freed the pages.
-	 */
-
-	if (reclaim) {
-		/* Invoke any callbacks, transfer the buffer to caller,
-		 * and fire off the (possibly) blocking
-		 * iwl_trans_send_cmd()
-		 * as we reclaim the driver command queue */
-		if (rxcb._page)
-			iwl_tx_cmd_complete(trans, &rxcb, err);
+		if (reclaim)
+			cmd = txq->entries[cmd_index].cmd;
 		else
-			IWL_WARN(trans, "Claim null rxb?\n");
+			cmd = NULL;
+
+		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+
+		/*
+		 * After here, we should always check rxcb._page_stolen,
+		 * if it is true then one of the handlers took the page.
+		 */
+
+		if (reclaim) {
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking
+			 * iwl_trans_send_cmd()
+			 * as we reclaim the driver command queue */
+			if (!rxcb._page_stolen)
+				iwl_tx_cmd_complete(trans, &rxcb, err);
+			else
+				IWL_WARN(trans, "Claim null rxb?\n");
+		}
+
+		page_stolen |= rxcb._page_stolen;
+		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
 	}
 
-	/* page was stolen from us */
-	if (rxcb._page == NULL)
+	/* page was stolen from us -- free our reference */
+	if (page_stolen) {
+		__free_pages(rxb->page, trans_pcie->rx_page_order);
 		rxb->page = NULL;
+	}
 
 	/* Reuse the page if possible. For notification packets and
 	 * SKBs that fail to Rx correctly, add them back into the
@@ -447,7 +465,7 @@
 	if (rxb->page != NULL) {
 		rxb->page_dma =
 			dma_map_page(trans->dev, rxb->page, 0,
-				PAGE_SIZE << hw_params(trans).rx_page_order,
+				PAGE_SIZE << trans_pcie->rx_page_order,
 				DMA_FROM_DEVICE);
 		list_add_tail(&rxb->list, &rxq->rx_free);
 		rxq->free_count++;
@@ -520,412 +538,32 @@
 		iwlagn_rx_queue_restock(trans);
 }
 
-static const char * const desc_lookup_text[] = {
-	"OK",
-	"FAIL",
-	"BAD_PARAM",
-	"BAD_CHECKSUM",
-	"NMI_INTERRUPT_WDG",
-	"SYSASSERT",
-	"FATAL_ERROR",
-	"BAD_COMMAND",
-	"HW_ERROR_TUNE_LOCK",
-	"HW_ERROR_TEMPERATURE",
-	"ILLEGAL_CHAN_FREQ",
-	"VCC_NOT_STABLE",
-	"FH_ERROR",
-	"NMI_INTERRUPT_HOST",
-	"NMI_INTERRUPT_ACTION_PT",
-	"NMI_INTERRUPT_UNKNOWN",
-	"UCODE_VERSION_MISMATCH",
-	"HW_ERROR_ABS_LOCK",
-	"HW_ERROR_CAL_LOCK_FAIL",
-	"NMI_INTERRUPT_INST_ACTION_PT",
-	"NMI_INTERRUPT_DATA_ACTION_PT",
-	"NMI_TRM_HW_ER",
-	"NMI_INTERRUPT_TRM",
-	"NMI_INTERRUPT_BREAK_POINT",
-	"DEBUG_0",
-	"DEBUG_1",
-	"DEBUG_2",
-	"DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
-	{ "NMI_INTERRUPT_WDG", 0x34 },
-	{ "SYSASSERT", 0x35 },
-	{ "UCODE_VERSION_MISMATCH", 0x37 },
-	{ "BAD_COMMAND", 0x38 },
-	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
-	{ "FATAL_ERROR", 0x3D },
-	{ "NMI_TRM_HW_ERR", 0x46 },
-	{ "NMI_INTERRUPT_TRM", 0x4C },
-	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
-	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
-	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
-	{ "NMI_INTERRUPT_HOST", 0x66 },
-	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
-	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
-	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
-	{ "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *desc_lookup(u32 num)
-{
-	int i;
-	int max = ARRAY_SIZE(desc_lookup_text);
-
-	if (num < max)
-		return desc_lookup_text[num];
-
-	max = ARRAY_SIZE(advanced_lookup) - 1;
-	for (i = 0; i < max; i++) {
-		if (advanced_lookup[i].num == num)
-			break;
-	}
-	return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-static void iwl_dump_nic_error_log(struct iwl_trans *trans)
-{
-	u32 base;
-	struct iwl_error_event_table table;
-	struct iwl_trans_pcie *trans_pcie =
-		IWL_TRANS_GET_PCIE_TRANS(trans);
-
-	base = trans->shrd->device_pointers.error_event_table;
-	if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
-		if (!base)
-			base = trans->shrd->fw->init_errlog_ptr;
-	} else {
-		if (!base)
-			base = trans->shrd->fw->inst_errlog_ptr;
-	}
-
-	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
-		IWL_ERR(trans,
-			"Not valid error log pointer 0x%08X for %s uCode\n",
-			base,
-			(trans->shrd->ucode_type == IWL_UCODE_INIT)
-					? "Init" : "RT");
-		return;
-	}
-
-	iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
-
-	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
-		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
-		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
-			trans->shrd->status, table.valid);
-	}
-
-	trans_pcie->isr_stats.err_code = table.error_id;
-
-	trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
-				      table.data1, table.data2, table.line,
-				      table.blink1, table.blink2, table.ilink1,
-				      table.ilink2, table.bcon_time, table.gp1,
-				      table.gp2, table.gp3, table.ucode_ver,
-				      table.hw_ver, table.brd_ver);
-	IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
-		desc_lookup(table.error_id));
-	IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
-	IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
-	IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
-	IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
-	IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
-	IWL_ERR(trans, "0x%08X | data1\n", table.data1);
-	IWL_ERR(trans, "0x%08X | data2\n", table.data2);
-	IWL_ERR(trans, "0x%08X | line\n", table.line);
-	IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
-	IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
-	IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
-	IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
-	IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
-	IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
-	IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
-	IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
-	IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
-	IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
-
-	IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
-	IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
-	IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
-	IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
-	IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
-	IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
-	IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
-	IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
-	IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
-	IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
-	IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
-	IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
-	IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
-	IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
-}
-
 /**
  * iwl_irq_handle_error - called for HW or SW error interrupt from card
  */
 static void iwl_irq_handle_error(struct iwl_trans *trans)
 {
 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
-	if (cfg(trans)->internal_wimax_coex &&
+	if (trans->cfg->internal_wimax_coex &&
 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
 			APMS_CLK_VAL_MRB_FUNC_MODE) ||
 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
 			APMG_PS_CTRL_VAL_RESET_REQ))) {
-		/*
-		 * Keep the restart process from trying to send host
-		 * commands by clearing the ready bit.
-		 */
-		clear_bit(STATUS_READY, &trans->shrd->status);
-		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+		struct iwl_trans_pcie *trans_pcie;
+
+		trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+		iwl_op_mode_wimax_active(trans->op_mode);
 		wake_up(&trans->wait_command_queue);
-		IWL_ERR(trans, "RF is used by WiMAX\n");
 		return;
 	}
 
-	IWL_ERR(trans, "Loaded firmware version: %s\n",
-		trans->shrd->fw->fw_version);
-
-	iwl_dump_nic_error_log(trans);
 	iwl_dump_csr(trans);
 	iwl_dump_fh(trans, NULL, false);
-	iwl_dump_nic_event_log(trans, false, NULL, false);
 
 	iwl_op_mode_nic_error(trans->op_mode);
 }
 
-#define EVENT_START_OFFSET  (4 * sizeof(u32))
-
-/**
- * iwl_print_event_log - Dump error event log to syslog
- *
- */
-static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
-			       u32 num_events, u32 mode,
-			       int pos, char **buf, size_t bufsz)
-{
-	u32 i;
-	u32 base;       /* SRAM byte address of event log header */
-	u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
-	u32 ptr;        /* SRAM byte address of log data */
-	u32 ev, time, data; /* event log data */
-	unsigned long reg_flags;
-
-	if (num_events == 0)
-		return pos;
-
-	base = trans->shrd->device_pointers.log_event_table;
-	if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
-		if (!base)
-			base = trans->shrd->fw->init_evtlog_ptr;
-	} else {
-		if (!base)
-			base = trans->shrd->fw->inst_evtlog_ptr;
-	}
-
-	if (mode == 0)
-		event_size = 2 * sizeof(u32);
-	else
-		event_size = 3 * sizeof(u32);
-
-	ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
-
-	/* Make sure device is powered up for SRAM reads */
-	spin_lock_irqsave(&trans->reg_lock, reg_flags);
-	if (unlikely(!iwl_grab_nic_access(trans)))
-		goto out_unlock;
-
-	/* Set starting address; reads will auto-increment */
-	iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
-
-	/* "time" is actually "data" for mode 0 (no timestamp).
-	* place event id # at far right for easier visual parsing. */
-	for (i = 0; i < num_events; i++) {
-		ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
-		time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
-		if (mode == 0) {
-			/* data, ev */
-			if (bufsz) {
-				pos += scnprintf(*buf + pos, bufsz - pos,
-						"EVT_LOG:0x%08x:%04u\n",
-						time, ev);
-			} else {
-				trace_iwlwifi_dev_ucode_event(trans->dev, 0,
-					time, ev);
-				IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
-					time, ev);
-			}
-		} else {
-			data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
-			if (bufsz) {
-				pos += scnprintf(*buf + pos, bufsz - pos,
-						"EVT_LOGT:%010u:0x%08x:%04u\n",
-						 time, data, ev);
-			} else {
-				IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
-					time, data, ev);
-				trace_iwlwifi_dev_ucode_event(trans->dev, time,
-					data, ev);
-			}
-		}
-	}
-
-	/* Allow device to power down */
-	iwl_release_nic_access(trans);
-out_unlock:
-	spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
-	return pos;
-}
-
-/**
- * iwl_print_last_event_logs - Dump the newest # of event log to syslog
- */
-static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity,
-				    u32 num_wraps, u32 next_entry,
-				    u32 size, u32 mode,
-				    int pos, char **buf, size_t bufsz)
-{
-	/*
-	 * display the newest DEFAULT_LOG_ENTRIES entries
-	 * i.e the entries just before the next ont that uCode would fill.
-	 */
-	if (num_wraps) {
-		if (next_entry < size) {
-			pos = iwl_print_event_log(trans,
-						capacity - (size - next_entry),
-						size - next_entry, mode,
-						pos, buf, bufsz);
-			pos = iwl_print_event_log(trans, 0,
-						  next_entry, mode,
-						  pos, buf, bufsz);
-		} else
-			pos = iwl_print_event_log(trans, next_entry - size,
-						  size, mode, pos, buf, bufsz);
-	} else {
-		if (next_entry < size) {
-			pos = iwl_print_event_log(trans, 0, next_entry,
-						  mode, pos, buf, bufsz);
-		} else {
-			pos = iwl_print_event_log(trans, next_entry - size,
-						  size, mode, pos, buf, bufsz);
-		}
-	}
-	return pos;
-}
-
-#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
-
-int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
-			    char **buf, bool display)
-{
-	u32 base;       /* SRAM byte address of event log header */
-	u32 capacity;   /* event log capacity in # entries */
-	u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
-	u32 num_wraps;  /* # times uCode wrapped to top of log */
-	u32 next_entry; /* index of next entry to be written by uCode */
-	u32 size;       /* # entries that we'll print */
-	u32 logsize;
-	int pos = 0;
-	size_t bufsz = 0;
-
-	base = trans->shrd->device_pointers.log_event_table;
-	if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
-		logsize = trans->shrd->fw->init_evtlog_size;
-		if (!base)
-			base = trans->shrd->fw->init_evtlog_ptr;
-	} else {
-		logsize = trans->shrd->fw->inst_evtlog_size;
-		if (!base)
-			base = trans->shrd->fw->inst_evtlog_ptr;
-	}
-
-	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
-		IWL_ERR(trans,
-			"Invalid event log pointer 0x%08X for %s uCode\n",
-			base,
-			(trans->shrd->ucode_type == IWL_UCODE_INIT)
-					? "Init" : "RT");
-		return -EINVAL;
-	}
-
-	/* event log header */
-	capacity = iwl_read_targ_mem(trans, base);
-	mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
-	num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
-	next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
-
-	if (capacity > logsize) {
-		IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
-			"entries\n", capacity, logsize);
-		capacity = logsize;
-	}
-
-	if (next_entry > logsize) {
-		IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
-			next_entry, logsize);
-		next_entry = logsize;
-	}
-
-	size = num_wraps ? capacity : next_entry;
-
-	/* bail out if nothing in log */
-	if (size == 0) {
-		IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
-		return pos;
-	}
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-	if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
-		size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
-			? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#else
-	size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
-		? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#endif
-	IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
-		size);
-
-#ifdef CONFIG_IWLWIFI_DEBUG
-	if (display) {
-		if (full_log)
-			bufsz = capacity * 48;
-		else
-			bufsz = size * 48;
-		*buf = kmalloc(bufsz, GFP_KERNEL);
-		if (!*buf)
-			return -ENOMEM;
-	}
-	if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
-		/*
-		 * if uCode has wrapped back to top of log,
-		 * start at the oldest entry,
-		 * i.e the next one that uCode would fill.
-		 */
-		if (num_wraps)
-			pos = iwl_print_event_log(trans, next_entry,
-						capacity - next_entry, mode,
-						pos, buf, bufsz);
-		/* (then/else) start at top of log */
-		pos = iwl_print_event_log(trans, 0,
-					  next_entry, mode, pos, buf, bufsz);
-	} else
-		pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
-						next_entry, size, mode,
-						pos, buf, bufsz);
-#else
-	pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
-					next_entry, size, mode,
-					pos, buf, bufsz);
-#endif
-	return pos;
-}
-
 /* tasklet for iwlagn interrupt */
 void iwl_irq_tasklet(struct iwl_trans *trans)
 {
@@ -963,7 +601,7 @@
 	if (iwl_have_debug_level(IWL_DL_ISR)) {
 		/* just for debug */
 		inta_mask = iwl_read32(trans, CSR_INT_MASK);
-		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
+		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
 				inta, inta_mask);
 	}
 #endif
@@ -1011,8 +649,7 @@
 	if (inta & CSR_INT_BIT_RF_KILL) {
 		bool hw_rfkill;
 
-		hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
-				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+		hw_rfkill = iwl_is_rfkill_set(trans);
 		IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
 				hw_rfkill ? "disable radio" : "enable radio");
 
@@ -1043,7 +680,7 @@
 	if (inta & CSR_INT_BIT_WAKEUP) {
 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
 		iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
-		for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++)
+		for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
 			iwl_txq_update_write_ptr(trans,
 						 &trans_pcie->txq[i]);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index e92972f..21a8a67 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -37,47 +37,12 @@
 #include "iwl-agn-hw.h"
 #include "iwl-op-mode.h"
 #include "iwl-trans-pcie-int.h"
+/* FIXME: need to abstract out TX command (once we know what it looks like) */
+#include "iwl-commands.h"
 
 #define IWL_TX_CRC_SIZE 4
 #define IWL_TX_DELIMITER_SIZE 4
 
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- *	VO	0
- *	VI	1
- *	BE	2
- *	BK	3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific code), the AC->hw
- * queue mapping is the identity mapping.
- */
-
-static const u8 tid_to_ac[] = {
-	IEEE80211_AC_BE,
-	IEEE80211_AC_BK,
-	IEEE80211_AC_BK,
-	IEEE80211_AC_BE,
-	IEEE80211_AC_VI,
-	IEEE80211_AC_VI,
-	IEEE80211_AC_VO,
-	IEEE80211_AC_VO
-};
-
-
 /**
  * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
  */
@@ -95,7 +60,7 @@
 	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
 	__le16 bc_ent;
 	struct iwl_tx_cmd *tx_cmd =
-		(struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
+		(void *) txq->entries[txq->q.write_ptr].cmd->payload;
 
 	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
 
@@ -136,13 +101,15 @@
 	if (txq->need_update == 0)
 		return;
 
-	if (cfg(trans)->base_params->shadow_reg_enable) {
+	if (trans->cfg->base_params->shadow_reg_enable) {
 		/* shadow register enabled */
 		iwl_write32(trans, HBUS_TARG_WRPTR,
 			    txq->q.write_ptr | (txq_id << 8));
 	} else {
+		struct iwl_trans_pcie *trans_pcie =
+			IWL_TRANS_GET_PCIE_TRANS(trans);
 		/* if we're trying to save power */
-		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
+		if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
 			/* wake up nic if it's powered down ...
 			 * uCode will wake up, and interrupt us again, so next
 			 * time we'll skip this part. */
@@ -256,13 +223,14 @@
 
 	lockdep_assert_held(&txq->lock);
 
-	iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
+	iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
+			 &tfd_tmp[index], dma_dir);
 
 	/* free SKB */
-	if (txq->skbs) {
+	if (txq->entries) {
 		struct sk_buff *skb;
 
-		skb = txq->skbs[index];
+		skb = txq->entries[index].skb;
 
 		/* Can be called from irqs-disabled context
 		 * If skb is not NULL, it means that the whole queue is being
@@ -270,7 +238,7 @@
 		 */
 		if (skb) {
 			iwl_op_mode_free_skb(trans->op_mode, skb);
-			txq->skbs[index] = NULL;
+			txq->entries[index].skb = NULL;
 		}
 	}
 }
@@ -393,7 +361,7 @@
 	u8 sta_id = 0;
 	__le16 bc_ent;
 	struct iwl_tx_cmd *tx_cmd =
-		(struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
+		(void *)txq->entries[txq->q.read_ptr].cmd->payload;
 
 	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
 
@@ -448,20 +416,17 @@
 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
 				int txq_id, u32 index)
 {
-	IWL_DEBUG_TX_QUEUES(trans, "Q %d  WrPtr: %d", txq_id, index & 0xff);
+	IWL_DEBUG_TX_QUEUES(trans, "Q %d  WrPtr: %d\n", txq_id, index & 0xff);
 	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
 			(index & 0xff) | (txq_id << 8));
 	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
 }
 
 void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
-					struct iwl_tx_queue *txq,
-					int tx_fifo_id, int scd_retry)
+				   struct iwl_tx_queue *txq,
+				   int tx_fifo_id, bool active)
 {
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int txq_id = txq->q.id;
-	int active =
-		test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
 
 	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
 			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
@@ -469,77 +434,22 @@
 			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
 			SCD_QUEUE_STTS_REG_MSK);
 
-	txq->sched_retry = scd_retry;
-
 	if (active)
-		IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
-			scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
+		IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
+				    txq_id, tx_fifo_id);
 	else
-		IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
-			scd_retry ? "BA" : "AC/CMD", txq_id);
+		IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
 }
 
-static inline int get_ac_from_tid(u16 tid)
+void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
+				 int sta_id, int tid, int frame_limit, u16 ssn)
 {
-	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
-		return tid_to_ac[tid];
-
-	/* no support for TIDs 8-15 yet */
-	return -EINVAL;
-}
-
-static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
-				    u8 ctx, u16 tid)
-{
-	const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
-	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
-		return ac_to_fifo[tid_to_ac[tid]];
-
-	/* no support for TIDs 8-15 yet */
-	return -EINVAL;
-}
-
-static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
-{
-	if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
-		return false;
-	return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
-		hw_params(trans).num_ampdu_queues);
-}
-
-void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
-				 enum iwl_rxon_context_id ctx, int sta_id,
-				 int tid, int frame_limit, u16 ssn)
-{
-	int tx_fifo, txq_id;
-	u16 ra_tid;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	unsigned long flags;
+	u16 ra_tid = BUILD_RAxTID(sta_id, tid);
 
-	struct iwl_trans_pcie *trans_pcie =
-		IWL_TRANS_GET_PCIE_TRANS(trans);
-
-	if (WARN_ON(sta_id == IWL_INVALID_STATION))
-		return;
-	if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
-		return;
-
-	tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
-	if (WARN_ON(tx_fifo < 0)) {
-		IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
-		return;
-	}
-
-	txq_id = trans_pcie->agg_txq[sta_id][tid];
-	if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
-		IWL_ERR(trans,
-			"queue number out of range: %d, must be %d to %d\n",
-			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
-			IWLAGN_FIRST_AMPDU_QUEUE +
-			hw_params(trans).num_ampdu_queues - 1);
-		return;
-	}
-
-	ra_tid = BUILD_RAxTID(sta_id, tid);
+	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
+		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
 
 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
 
@@ -550,10 +460,10 @@
 	iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
 
 	/* Set this queue as a chain-building queue */
-	iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
+	iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
 
 	/* enable aggregations for the queue */
-	iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
+	iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
 
 	/* Place first TFD at index corresponding to start sequence number.
 	 * Assumes that ssn_idx is valid (!= 0xFFF) */
@@ -563,92 +473,42 @@
 
 	/* Set up Tx window size and frame limit for this queue */
 	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
-			SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
-			sizeof(u32),
-			((frame_limit <<
-			SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
-			SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
-			((frame_limit <<
-			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
-			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
 
 	iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
 
 	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
 	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
-					tx_fifo, 1);
-
-	trans_pcie->txq[txq_id].sta_id = sta_id;
-	trans_pcie->txq[txq_id].tid = tid;
+				      fifo, true);
 
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 }
 
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
+void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	int txq_id;
 
-	for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
-	     txq_id++)
-		if (!test_and_set_bit(txq_id,
-					&trans_pcie->txq_ctx_active_msk))
-			return txq_id;
-	return -1;
-}
-
-int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
-				int sta_id, int tid)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	int txq_id;
-
-	txq_id = iwlagn_txq_ctx_activate_free(trans);
-	if (txq_id == -1) {
-		IWL_ERR(trans, "No free aggregation queue available\n");
-		return -ENXIO;
-	}
-
-	trans_pcie->agg_txq[sta_id][tid] = txq_id;
-	iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
-
-	return 0;
-}
-
-int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
-
-	if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
-		IWL_ERR(trans,
-			"queue number out of range: %d, must be %d to %d\n",
-			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
-			IWLAGN_FIRST_AMPDU_QUEUE +
-			hw_params(trans).num_ampdu_queues - 1);
-		return -EINVAL;
+	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
+		WARN_ONCE(1, "queue %d not used", txq_id);
+		return;
 	}
 
 	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
 
-	iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
+	iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
 
-	trans_pcie->agg_txq[sta_id][tid] = 0;
 	trans_pcie->txq[txq_id].q.read_ptr = 0;
 	trans_pcie->txq[txq_id].q.write_ptr = 0;
-	/* supposes that ssn_idx is valid (!= 0xFFF) */
 	iwl_trans_set_wr_ptrs(trans, txq_id, 0);
 
-	iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
-	iwl_txq_ctx_deactivate(trans_pcie, txq_id);
-	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
-	return 0;
+	iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));
+
+	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
+				      0, false);
 }
 
 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
@@ -681,11 +541,6 @@
 	int trace_idx;
 #endif
 
-	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
-		IWL_WARN(trans, "fw recovery, no hcmd send\n");
-		return -EIO;
-	}
-
 	copy_size = sizeof(out_cmd->hdr);
 	cmd_size = sizeof(out_cmd->hdr);
 
@@ -726,8 +581,8 @@
 	}
 
 	idx = get_cmd_index(q, q->write_ptr);
-	out_cmd = txq->cmd[idx];
-	out_meta = &txq->meta[idx];
+	out_cmd = txq->entries[idx].cmd;
+	out_meta = &txq->entries[idx].meta;
 
 	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
 	if (cmd->flags & CMD_WANT_SKB)
@@ -753,12 +608,11 @@
 		cmd_dest += cmd->len[i];
 	}
 
-	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
-			"%d bytes at %d[%d]:%d\n",
-			get_cmd_string(out_cmd->hdr.cmd),
-			out_cmd->hdr.cmd,
-			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
-			q->write_ptr, idx, trans_pcie->cmd_queue);
+	IWL_DEBUG_HC(trans,
+		"Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+		trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
+		out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
+		q->write_ptr, idx, trans_pcie->cmd_queue);
 
 	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
 				DMA_BIDIRECTIONAL);
@@ -816,6 +670,10 @@
 			       trace_bufs[2], trace_lens[2]);
 #endif
 
+	/* start timer if queue currently empty */
+	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
+		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+
 	/* Increment and update queue's write index */
 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
 	iwl_txq_update_write_ptr(trans, txq);
@@ -825,6 +683,22 @@
 	return idx;
 }
 
+static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
+				      struct iwl_tx_queue *txq)
+{
+	if (!trans_pcie->wd_timeout)
+		return;
+
+	/*
+	 * if empty delete timer, otherwise move timer forward
+	 * since we're making progress on this queue
+	 */
+	if (txq->q.read_ptr == txq->q.write_ptr)
+		del_timer(&txq->stuck_timer);
+	else
+		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+}
+
 /**
  * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  *
@@ -859,6 +733,8 @@
 		}
 
 	}
+
+	iwl_queue_progress(trans_pcie, txq);
 }
 
 /**
@@ -899,10 +775,8 @@
 	spin_lock(&txq->lock);
 
 	cmd_index = get_cmd_index(&txq->q, index);
-	cmd = txq->cmd[cmd_index];
-	meta = &txq->meta[cmd_index];
-
-	txq->time_stamp = jiffies;
+	cmd = txq->entries[cmd_index].cmd;
+	meta = &txq->entries[cmd_index].meta;
 
 	iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
 			 DMA_BIDIRECTIONAL);
@@ -913,21 +787,23 @@
 
 		meta->source->resp_pkt = pkt;
 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
-		meta->source->_rx_page_order = hw_params(trans).rx_page_order;
+		meta->source->_rx_page_order = trans_pcie->rx_page_order;
 		meta->source->handler_status = handler_status;
 	}
 
 	iwl_hcmd_queue_reclaim(trans, txq_id, index);
 
 	if (!(meta->flags & CMD_ASYNC)) {
-		if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
+		if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
 			IWL_WARN(trans,
 				 "HCMD_ACTIVE already clear for command %s\n",
-				 get_cmd_string(cmd->hdr.cmd));
+				 trans_pcie_get_cmd_string(trans_pcie,
+							   cmd->hdr.cmd));
 		}
-		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
-			       get_cmd_string(cmd->hdr.cmd));
+			       trans_pcie_get_cmd_string(trans_pcie,
+							 cmd->hdr.cmd));
 		wake_up(&trans->wait_command_queue);
 	}
 
@@ -940,6 +816,7 @@
 
 static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	int ret;
 
 	/* An asynchronous command can not expect an SKB to be set. */
@@ -951,7 +828,7 @@
 	if (ret < 0) {
 		IWL_ERR(trans,
 			"Error sending %s: enqueue_hcmd failed: %d\n",
-			  get_cmd_string(cmd->id), ret);
+			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
 		return ret;
 	}
 	return 0;
@@ -964,55 +841,51 @@
 	int ret;
 
 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
-			get_cmd_string(cmd->id));
-
-	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
-		IWL_ERR(trans, "Command %s failed: FW Error\n",
-			       get_cmd_string(cmd->id));
-		return -EIO;
-	}
+		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
 
 	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
-				     &trans->shrd->status))) {
+				     &trans_pcie->status))) {
 		IWL_ERR(trans, "Command %s: a command is already active!\n",
-			get_cmd_string(cmd->id));
+			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
 		return -EIO;
 	}
 
 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
-			get_cmd_string(cmd->id));
+		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));
 
 	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
 	if (cmd_idx < 0) {
 		ret = cmd_idx;
-		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
+		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
 		IWL_ERR(trans,
 			"Error sending %s: enqueue_hcmd failed: %d\n",
-			  get_cmd_string(cmd->id), ret);
+			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
 		return ret;
 	}
 
 	ret = wait_event_timeout(trans->wait_command_queue,
-			!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
+			!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status),
 			HOST_COMPLETE_TIMEOUT);
 	if (!ret) {
-		if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
+		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
 			struct iwl_tx_queue *txq =
 				&trans_pcie->txq[trans_pcie->cmd_queue];
 			struct iwl_queue *q = &txq->q;
 
 			IWL_ERR(trans,
 				"Error sending %s: time out after %dms.\n",
-				get_cmd_string(cmd->id),
+				trans_pcie_get_cmd_string(trans_pcie, cmd->id),
 				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
 
 			IWL_ERR(trans,
 				"Current CMD queue read_ptr %d write_ptr %d\n",
 				q->read_ptr, q->write_ptr);
 
-			clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
-			IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
-				 "%s\n", get_cmd_string(cmd->id));
+			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+			IWL_DEBUG_INFO(trans,
+				       "Clearing HCMD_ACTIVE for command %s\n",
+				       trans_pcie_get_cmd_string(trans_pcie,
+								 cmd->id));
 			ret = -ETIMEDOUT;
 			goto cancel;
 		}
@@ -1020,7 +893,7 @@
 
 	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
 		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
-			  get_cmd_string(cmd->id));
+			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
 		ret = -EIO;
 		goto cancel;
 	}
@@ -1035,8 +908,8 @@
 		 * in later, it will possibly set an invalid
 		 * address (cmd->meta.source).
 		 */
-		trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &=
-							~CMD_WANT_SKB;
+		trans_pcie->txq[trans_pcie->cmd_queue].
+			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
 	}
 
 	if (cmd->resp_pkt) {
@@ -1091,17 +964,20 @@
 	     q->read_ptr != index;
 	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 
-		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
+		if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
 			continue;
 
-		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
+		__skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
 
-		txq->skbs[txq->q.read_ptr] = NULL;
+		txq->entries[txq->q.read_ptr].skb = NULL;
 
 		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
 
 		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
 		freed++;
 	}
+
+	iwl_queue_progress(trans_pcie, txq);
+
 	return freed;
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 4d7b30d..2e57161 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -68,18 +68,20 @@
 #include <linux/bitops.h>
 #include <linux/gfp.h>
 
+#include "iwl-drv.h"
 #include "iwl-trans.h"
 #include "iwl-trans-pcie-int.h"
 #include "iwl-csr.h"
 #include "iwl-prph.h"
-#include "iwl-shared.h"
 #include "iwl-eeprom.h"
 #include "iwl-agn-hw.h"
+/* FIXME: need to abstract out TX command (once we know what it looks like) */
+#include "iwl-commands.h"
 
 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
 
 #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie)	\
-	(((1<<cfg(trans)->base_params->num_of_queues) - 1) &\
+	(((1<<trans->cfg->base_params->num_of_queues) - 1) &\
 	(~(1<<(trans_pcie)->cmd_queue)))
 
 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
@@ -132,10 +134,10 @@
 		 * to an SKB, so we need to unmap and free potential storage */
 		if (rxq->pool[i].page != NULL) {
 			dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
-				PAGE_SIZE << hw_params(trans).rx_page_order,
+				PAGE_SIZE << trans_pcie->rx_page_order,
 				DMA_FROM_DEVICE);
 			__free_pages(rxq->pool[i].page,
-				     hw_params(trans).rx_page_order);
+				     trans_pcie->rx_page_order);
 			rxq->pool[i].page = NULL;
 		}
 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -145,11 +147,12 @@
 static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
 				 struct iwl_rx_queue *rxq)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	u32 rb_size;
 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
 	u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
 
-	if (iwlagn_mod_params.amsdu_size_8K)
+	if (trans_pcie->rx_buf_size_8k)
 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
 	else
 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
@@ -180,7 +183,6 @@
 			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
 			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
 			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
-			   FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
 			   rb_size|
 			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
 			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
@@ -299,6 +301,33 @@
 	memset(ptr, 0, sizeof(*ptr));
 }
 
+static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
+{
+	struct iwl_tx_queue *txq = (void *)data;
+	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+
+	spin_lock(&txq->lock);
+	/* check if triggered erroneously */
+	if (txq->q.read_ptr == txq->q.write_ptr) {
+		spin_unlock(&txq->lock);
+		return;
+	}
+	spin_unlock(&txq->lock);
+
+
+	IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
+		jiffies_to_msecs(trans_pcie->wd_timeout));
+	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+		txq->q.read_ptr, txq->q.write_ptr);
+	IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+		iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
+					& (TFD_QUEUE_SIZE_MAX - 1),
+		iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
+
+	iwl_op_mode_nic_error(trans->op_mode);
+}
+
 static int iwl_trans_txq_alloc(struct iwl_trans *trans,
 				struct iwl_tx_queue *txq, int slots_num,
 				u32 txq_id)
@@ -307,40 +336,31 @@
 	int i;
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
+	if (WARN_ON(txq->entries || txq->tfds))
 		return -EINVAL;
 
+	setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
+		    (unsigned long)txq);
+	txq->trans_pcie = trans_pcie;
+
 	txq->q.n_window = slots_num;
 
-	txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL);
-	txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL);
+	txq->entries = kcalloc(slots_num,
+			       sizeof(struct iwl_pcie_tx_queue_entry),
+			       GFP_KERNEL);
 
-	if (!txq->meta || !txq->cmd)
+	if (!txq->entries)
 		goto error;
 
 	if (txq_id == trans_pcie->cmd_queue)
 		for (i = 0; i < slots_num; i++) {
-			txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
-						GFP_KERNEL);
-			if (!txq->cmd[i])
+			txq->entries[i].cmd =
+				kmalloc(sizeof(struct iwl_device_cmd),
+					GFP_KERNEL);
+			if (!txq->entries[i].cmd)
 				goto error;
 		}
 
-	/* Alloc driver data array and TFD circular buffer */
-	/* Driver private data, only for Tx (not command) queues,
-	 * not shared with device. */
-	if (txq_id != trans_pcie->cmd_queue) {
-		txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
-				    GFP_KERNEL);
-		if (!txq->skbs) {
-			IWL_ERR(trans, "kmalloc for auxiliary BD "
-				  "structures failed\n");
-			goto error;
-		}
-	} else {
-		txq->skbs = NULL;
-	}
-
 	/* Circular buffer of transmit frame descriptors (TFDs),
 	 * shared with device */
 	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
@@ -353,37 +373,22 @@
 
 	return 0;
 error:
-	kfree(txq->skbs);
-	txq->skbs = NULL;
-	/* since txq->cmd has been zeroed,
-	 * all non allocated cmd[i] will be NULL */
-	if (txq->cmd && txq_id == trans_pcie->cmd_queue)
+	if (txq->entries && txq_id == trans_pcie->cmd_queue)
 		for (i = 0; i < slots_num; i++)
-			kfree(txq->cmd[i]);
-	kfree(txq->meta);
-	kfree(txq->cmd);
-	txq->meta = NULL;
-	txq->cmd = NULL;
+			kfree(txq->entries[i].cmd);
+	kfree(txq->entries);
+	txq->entries = NULL;
 
 	return -ENOMEM;
 
 }
 
 static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
-		      int slots_num, u32 txq_id)
+			      int slots_num, u32 txq_id)
 {
 	int ret;
 
 	txq->need_update = 0;
-	memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
-
-	/*
-	 * For the default queues 0-3, set up the swq_id
-	 * already -- all others need to get one later
-	 * (if they need one at all).
-	 */
-	if (txq_id < 4)
-		iwl_set_swq_id(txq, txq_id, txq_id);
 
 	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
 	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
@@ -461,7 +466,7 @@
 
 	if (txq_id == trans_pcie->cmd_queue)
 		for (i = 0; i < txq->q.n_window; i++)
-			kfree(txq->cmd[i]);
+			kfree(txq->entries[i].cmd);
 
 	/* De-alloc circular buffer of TFDs */
 	if (txq->q.n_bd) {
@@ -470,15 +475,10 @@
 		memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
 	}
 
-	/* De-alloc array of per-TFD driver data */
-	kfree(txq->skbs);
-	txq->skbs = NULL;
+	kfree(txq->entries);
+	txq->entries = NULL;
 
-	/* deallocate arrays */
-	kfree(txq->cmd);
-	kfree(txq->meta);
-	txq->cmd = NULL;
-	txq->meta = NULL;
+	del_timer_sync(&txq->stuck_timer);
 
 	/* 0-fill queue descriptor structure */
 	memset(txq, 0, sizeof(*txq));
@@ -497,7 +497,7 @@
 	/* Tx queues */
 	if (trans_pcie->txq) {
 		for (txq_id = 0;
-		     txq_id < cfg(trans)->base_params->num_of_queues; txq_id++)
+		     txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
 			iwl_tx_queue_free(trans, txq_id);
 	}
 
@@ -522,7 +522,7 @@
 	int txq_id, slots_num;
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-	u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues *
+	u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
 			sizeof(struct iwlagn_scd_bc_tbl);
 
 	/*It is not allowed to alloc twice, so warn when this happens.
@@ -546,7 +546,7 @@
 		goto error;
 	}
 
-	trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues,
+	trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
 				  sizeof(struct iwl_tx_queue), GFP_KERNEL);
 	if (!trans_pcie->txq) {
 		IWL_ERR(trans, "Not enough memory for txq\n");
@@ -555,7 +555,7 @@
 	}
 
 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
-	for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
 	     txq_id++) {
 		slots_num = (txq_id == trans_pcie->cmd_queue) ?
 					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
@@ -601,7 +601,7 @@
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
-	for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
 	     txq_id++) {
 		slots_num = (txq_id == trans_pcie->cmd_queue) ?
 					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
@@ -724,9 +724,9 @@
 	iwl_apm_config(trans);
 
 	/* Configure analog phase-lock-loop before activating to D0A */
-	if (cfg(trans)->base_params->pll_cfg_val)
+	if (trans->cfg->base_params->pll_cfg_val)
 		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
-			    cfg(trans)->base_params->pll_cfg_val);
+			    trans->cfg->base_params->pll_cfg_val);
 
 	/*
 	 * Set "initialization complete" bit to move adapter from
@@ -836,7 +836,7 @@
 	if (iwl_tx_init(trans))
 		return -ENOMEM;
 
-	if (cfg(trans)->base_params->shadow_reg_enable) {
+	if (trans->cfg->base_params->shadow_reg_enable) {
 		/* enable shadow regs in HW */
 		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
 			0x800FFFFF);
@@ -895,59 +895,6 @@
 	return ret;
 }
 
-#define IWL_AC_UNSET -1
-
-struct queue_to_fifo_ac {
-	s8 fifo, ac;
-};
-
-static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
-	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
-	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
-	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
-	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
-	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
-	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-};
-
-static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
-	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
-	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
-	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
-	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
-	{ IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
-	{ IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
-	{ IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
-	{ IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
-	{ IWL_TX_FIFO_BE_IPAN, 2, },
-	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
-	{ IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
-};
-
-static const u8 iwlagn_bss_ac_to_fifo[] = {
-	IWL_TX_FIFO_VO,
-	IWL_TX_FIFO_VI,
-	IWL_TX_FIFO_BE,
-	IWL_TX_FIFO_BK,
-};
-static const u8 iwlagn_bss_ac_to_queue[] = {
-	0, 1, 2, 3,
-};
-static const u8 iwlagn_pan_ac_to_fifo[] = {
-	IWL_TX_FIFO_VO_IPAN,
-	IWL_TX_FIFO_VI_IPAN,
-	IWL_TX_FIFO_BE_IPAN,
-	IWL_TX_FIFO_BK_IPAN,
-};
-static const u8 iwlagn_pan_ac_to_queue[] = {
-	7, 6, 5, 4,
-};
-
 /*
  * ucode
  */
@@ -1028,34 +975,21 @@
 				   const struct fw_img *fw)
 {
 	int ret;
-	struct iwl_trans_pcie *trans_pcie =
-		IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill;
 
-	trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
-	trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
-
-	trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
-	trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
-
-	trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
-	trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
-
 	/* This may fail if AMT took ownership of the device */
 	if (iwl_prepare_card_hw(trans)) {
 		IWL_WARN(trans, "Exit HW not ready\n");
 		return -EIO;
 	}
 
-	/* If platform's RF_KILL switch is NOT set to KILL */
-	hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
-				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+	iwl_enable_rfkill_int(trans);
 
-	if (hw_rfkill) {
-		iwl_enable_rfkill_int(trans);
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	hw_rfkill = iwl_is_rfkill_set(trans);
+	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+	if (hw_rfkill)
 		return -ERFKILL;
-	}
 
 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
@@ -1098,9 +1032,7 @@
 
 static void iwl_tx_start(struct iwl_trans *trans)
 {
-	const struct queue_to_fifo_ac *queue_to_fifo;
-	struct iwl_trans_pcie *trans_pcie =
-		IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	u32 a;
 	unsigned long flags;
 	int i, chan;
@@ -1121,7 +1053,7 @@
 		iwl_write_targ_mem(trans, a, 0);
 	for (; a < trans_pcie->scd_base_addr +
 	       SCD_TRANS_TBL_OFFSET_QUEUE(
-				cfg(trans)->base_params->num_of_queues);
+				trans->cfg->base_params->num_of_queues);
 	       a += 4)
 		iwl_write_targ_mem(trans, a, 0);
 
@@ -1144,7 +1076,7 @@
 	iwl_write_prph(trans, SCD_AGGR_SEL, 0);
 
 	/* initiate the queues */
-	for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) {
+	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
 		iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
 		iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
 		iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
@@ -1161,46 +1093,24 @@
 	}
 
 	iwl_write_prph(trans, SCD_INTERRUPT_MASK,
-			IWL_MASK(0, cfg(trans)->base_params->num_of_queues));
+			IWL_MASK(0, trans->cfg->base_params->num_of_queues));
 
 	/* Activate all Tx DMA/FIFO channels */
 	iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
 
-	/* map queues to FIFOs */
-	if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
-		queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
-	else
-		queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
-
 	iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
 
-	/* make sure all queue are not stopped */
-	memset(&trans_pcie->queue_stopped[0], 0,
-		sizeof(trans_pcie->queue_stopped));
-	for (i = 0; i < 4; i++)
-		atomic_set(&trans_pcie->queue_stop_count[i], 0);
+	/* make sure all queue are not stopped/used */
+	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
 
-	/* reset to 0 to enable all the queue first */
-	trans_pcie->txq_ctx_active_msk = 0;
+	for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
+		int fifo = trans_pcie->setup_q_to_fifo[i];
 
-	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
-						IWLAGN_FIRST_AMPDU_QUEUE);
-	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
-						IWLAGN_FIRST_AMPDU_QUEUE);
+		set_bit(i, trans_pcie->queue_used);
 
-	for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
-		int fifo = queue_to_fifo[i].fifo;
-		int ac = queue_to_fifo[i].ac;
-
-		iwl_txq_ctx_activate(trans_pcie, i);
-
-		if (fifo == IWL_TX_FIFO_UNUSED)
-			continue;
-
-		if (ac != IWL_AC_UNSET)
-			iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
 		iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
-					      fifo, 0);
+					      fifo, true);
 	}
 
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
@@ -1251,7 +1161,7 @@
 	}
 
 	/* Unmap DMA from host system and free skb's */
-	for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
 	     txq_id++)
 		iwl_tx_queue_unmap(trans, txq_id);
 
@@ -1303,6 +1213,8 @@
 	iwl_disable_interrupts(trans);
 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
 
+	iwl_enable_rfkill_int(trans);
+
 	/* wait to make sure we flush pending tasklet*/
 	synchronize_irq(trans_pcie->irq);
 	tasklet_kill(&trans_pcie->irq_tasklet);
@@ -1311,6 +1223,12 @@
 
 	/* stop and reset the on-board processor */
 	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	/* clear all status bits */
+	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
+	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
+	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
+	clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
 }
 
 static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
@@ -1325,81 +1243,43 @@
 }
 
 static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
-		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
-		u8 sta_id, u8 tid)
+			     struct iwl_device_cmd *dev_cmd, int txq_id)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
 	struct iwl_cmd_meta *out_meta;
 	struct iwl_tx_queue *txq;
 	struct iwl_queue *q;
-
 	dma_addr_t phys_addr = 0;
 	dma_addr_t txcmd_phys;
 	dma_addr_t scratch_phys;
 	u16 len, firstlen, secondlen;
 	u8 wait_write_ptr = 0;
-	u8 txq_id;
-	bool is_agg = false;
 	__le16 fc = hdr->frame_control;
 	u8 hdr_len = ieee80211_hdrlen(fc);
 	u16 __maybe_unused wifi_seq;
 
-	/*
-	 * Send this frame after DTIM -- there's a special queue
-	 * reserved for this for contexts that support AP mode.
-	 */
-	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
-		txq_id = trans_pcie->mcast_queue[ctx];
-
-		/*
-		 * The microcode will clear the more data
-		 * bit in the last frame it transmits.
-		 */
-		hdr->frame_control |=
-			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-	} else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
-		txq_id = IWL_AUX_QUEUE;
-	else
-		txq_id =
-		    trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
-
-	/* aggregation is on for this <sta,tid> */
-	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
-		WARN_ON(tid >= IWL_MAX_TID_COUNT);
-		txq_id = trans_pcie->agg_txq[sta_id][tid];
-		is_agg = true;
-	}
-
 	txq = &trans_pcie->txq[txq_id];
 	q = &txq->q;
 
+	if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
 	spin_lock(&txq->lock);
 
-	/* In AGG mode, the index in the ring must correspond to the WiFi
-	 * sequence number. This is a HW requirements to help the SCD to parse
-	 * the BA.
-	 * Check here that the packets are in the right place on the ring.
-	 */
-#ifdef CONFIG_IWLWIFI_DEBUG
-	wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
-	WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
-		  "Q: %d WiFi Seq %d tfdNum %d",
-		  txq_id, wifi_seq, q->write_ptr);
-#endif
-
 	/* Set up driver data for this TFD */
-	txq->skbs[q->write_ptr] = skb;
-	txq->cmd[q->write_ptr] = dev_cmd;
+	txq->entries[q->write_ptr].skb = skb;
+	txq->entries[q->write_ptr].cmd = dev_cmd;
 
 	dev_cmd->hdr.cmd = REPLY_TX;
 	dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
 				INDEX_TO_SEQ(q->write_ptr)));
 
 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
-	out_meta = &txq->meta[q->write_ptr];
+	out_meta = &txq->entries[q->write_ptr].meta;
 
 	/*
 	 * Use the first empty entry in this queue's command buffer array
@@ -1481,6 +1361,10 @@
 			     &dev_cmd->hdr, firstlen,
 			     skb->data + hdr_len, secondlen);
 
+	/* start timer if queue currently empty */
+	if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
+		mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+
 	/* Tell device the write index *just past* this latest filled TFD */
 	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
 	iwl_txq_update_write_ptr(trans, txq);
@@ -1541,8 +1425,10 @@
 
 	iwl_apm_init(trans);
 
-	hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
-				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+	/* From now on, the op_mode will be kept updated about RF kill state */
+	iwl_enable_rfkill_int(trans);
+
+	hw_rfkill = iwl_is_rfkill_set(trans);
 	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 
 	return err;
@@ -1555,18 +1441,41 @@
 	return err;
 }
 
-static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans)
+static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
+				   bool op_mode_leaving)
 {
+	bool hw_rfkill;
+	unsigned long flags;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
 	iwl_apm_stop(trans);
 
+	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+	iwl_disable_interrupts(trans);
+	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+
 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
-	/* Even if we stop the HW, we still want the RF kill interrupt */
-	iwl_enable_rfkill_int(trans);
+	if (!op_mode_leaving) {
+		/*
+		 * Even if we stop the HW, we still want the RF kill
+		 * interrupt
+		 */
+		iwl_enable_rfkill_int(trans);
+
+		/*
+		 * Check again since the RF kill state may have changed while
+		 * all the interrupts were disabled, in this case we couldn't
+		 * receive the RF kill interrupt and update the state in the
+		 * op_mode.
+		 */
+		hw_rfkill = iwl_is_rfkill_set(trans);
+		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+	}
 }
 
-static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
-		      int txq_id, int ssn, struct sk_buff_head *skbs)
+static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+				   struct sk_buff_head *skbs)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
@@ -1576,35 +1485,15 @@
 
 	spin_lock(&txq->lock);
 
-	txq->time_stamp = jiffies;
-
-	if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
-		     tid != IWL_TID_NON_QOS &&
-		     txq_id != trans_pcie->agg_txq[sta_id][tid])) {
-		/*
-		 * FIXME: this is a uCode bug which need to be addressed,
-		 * log the information and return for now.
-		 * Since it is can possibly happen very often and in order
-		 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
-		 */
-		IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
-			"agg_txq[sta_id[tid] %d", txq_id,
-			trans_pcie->agg_txq[sta_id][tid]);
-		spin_unlock(&txq->lock);
-		return 1;
-	}
-
 	if (txq->q.read_ptr != tfd_num) {
-		IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
-				txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
-				tfd_num, ssn);
+		IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
+				   txq_id, txq->q.read_ptr, tfd_num, ssn);
 		freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
 		if (iwl_queue_space(&txq->q) > txq->q.low_mark)
 			iwl_wake_queue(trans, txq);
 	}
 
 	spin_unlock(&txq->lock);
-	return 0;
 }
 
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1623,7 +1512,7 @@
 }
 
 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
-			      const struct iwl_trans_config *trans_cfg)
+				     const struct iwl_trans_config *trans_cfg)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
@@ -1635,9 +1524,31 @@
 	if (trans_pcie->n_no_reclaim_cmds)
 		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
 		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
+
+	trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
+
+	if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
+		trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
+
+	/* at least the command queue must be mapped */
+	WARN_ON(!trans_pcie->n_q_to_fifo);
+
+	memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
+	       trans_pcie->n_q_to_fifo * sizeof(u8));
+
+	trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
+	if (trans_pcie->rx_buf_size_8k)
+		trans_pcie->rx_page_order = get_order(8 * 1024);
+	else
+		trans_pcie->rx_page_order = get_order(4 * 1024);
+
+	trans_pcie->wd_timeout =
+		msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
+
+	trans_pcie->command_names = trans_cfg->command_names;
 }
 
-static void iwl_trans_pcie_free(struct iwl_trans *trans)
+void iwl_trans_pcie_free(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie =
 		IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1656,10 +1567,19 @@
 	pci_release_regions(trans_pcie->pci_dev);
 	pci_disable_device(trans_pcie->pci_dev);
 
-	trans->shrd->trans = NULL;
 	kfree(trans);
 }
 
+static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (state)
+		set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+	else
+		clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 {
@@ -1670,16 +1590,14 @@
 {
 	bool hw_rfkill;
 
-	hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) &
-				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+	iwl_enable_rfkill_int(trans);
 
-	if (hw_rfkill)
-		iwl_enable_rfkill_int(trans);
-	else
-		iwl_enable_interrupts(trans);
-
+	hw_rfkill = iwl_is_rfkill_set(trans);
 	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 
+	if (!hw_rfkill)
+		iwl_enable_interrupts(trans);
+
 	return 0;
 }
 #endif /* CONFIG_PM_SLEEP */
@@ -1696,7 +1614,7 @@
 	int ret = 0;
 
 	/* waiting for all the tx frames complete might take a while */
-	for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
+	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
 		if (cnt == trans_pcie->cmd_queue)
 			continue;
 		txq = &trans_pcie->txq[cnt];
@@ -1714,42 +1632,9 @@
 	return ret;
 }
 
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
-{
-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
-	struct iwl_queue *q = &txq->q;
-	unsigned long timeout;
-
-	if (q->read_ptr == q->write_ptr) {
-		txq->time_stamp = jiffies;
-		return 0;
-	}
-
-	timeout = txq->time_stamp +
-		  msecs_to_jiffies(hw_params(trans).wd_timeout);
-
-	if (time_after(jiffies, timeout)) {
-		IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
-			hw_params(trans).wd_timeout);
-		IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
-			q->read_ptr, q->write_ptr);
-		IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
-			iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt))
-				& (TFD_QUEUE_SIZE_MAX - 1),
-			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
-		return 1;
-	}
-
-	return 0;
-}
-
 static const char *get_fh_string(int cmd)
 {
+#define IWL_CMD(x) case x: return #x
 	switch (cmd) {
 	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
 	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
@@ -1763,6 +1648,7 @@
 	default:
 		return "UNKNOWN";
 	}
+#undef IWL_CMD
 }
 
 int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
@@ -1811,6 +1697,7 @@
 
 static const char *get_csr_string(int cmd)
 {
+#define IWL_CMD(x) case x: return #x
 	switch (cmd) {
 	IWL_CMD(CSR_HW_IF_CONFIG_REG);
 	IWL_CMD(CSR_INT_COALESCING);
@@ -1838,6 +1725,7 @@
 	default:
 		return "UNKNOWN";
 	}
+#undef IWL_CMD
 }
 
 void iwl_dump_csr(struct iwl_trans *trans)
@@ -1938,32 +1826,23 @@
 	int ret;
 	size_t bufsz;
 
-	bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues;
+	bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
 
-	if (!trans_pcie->txq) {
-		IWL_ERR(trans, "txq not ready\n");
+	if (!trans_pcie->txq)
 		return -EAGAIN;
-	}
+
 	buf = kzalloc(bufsz, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 
-	for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) {
+	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
 		txq = &trans_pcie->txq[cnt];
 		q = &txq->q;
 		pos += scnprintf(buf + pos, bufsz - pos,
-				"hwq %.2d: read=%u write=%u stop=%d"
-				" swq_id=%#.2x (ac %d/hwq %d)\n",
+				"hwq %.2d: read=%u write=%u use=%d stop=%d\n",
 				cnt, q->read_ptr, q->write_ptr,
-				!!test_bit(cnt, trans_pcie->queue_stopped),
-				txq->swq_id, txq->swq_id & 3,
-				(txq->swq_id >> 2) & 0x1f);
-		if (cnt >= 4)
-			continue;
-		/* for the ACs, display the stop count too */
-		pos += scnprintf(buf + pos, bufsz - pos,
-			"        stop-count: %d\n",
-			atomic_read(&trans_pcie->queue_stop_count[cnt]));
+				!!test_bit(cnt, trans_pcie->queue_used),
+				!!test_bit(cnt, trans_pcie->queue_stopped));
 	}
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	kfree(buf);
@@ -1997,44 +1876,6 @@
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_log_event_read(struct file *file,
-					 char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct iwl_trans *trans = file->private_data;
-	char *buf;
-	int pos = 0;
-	ssize_t ret = -ENOMEM;
-
-	ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
-	if (buf) {
-		ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-		kfree(buf);
-	}
-	return ret;
-}
-
-static ssize_t iwl_dbgfs_log_event_write(struct file *file,
-					const char __user *user_buf,
-					size_t count, loff_t *ppos)
-{
-	struct iwl_trans *trans = file->private_data;
-	u32 event_log_flag;
-	char buf[8];
-	int buf_size;
-
-	memset(buf, 0, sizeof(buf));
-	buf_size = min(count, sizeof(buf) -  1);
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	if (sscanf(buf, "%d", &event_log_flag) != 1)
-		return -EFAULT;
-	if (event_log_flag == 1)
-		iwl_dump_nic_event_log(trans, true, NULL, false);
-
-	return count;
-}
-
 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
 					char __user *user_buf,
 					size_t count, loff_t *ppos) {
@@ -2050,10 +1891,8 @@
 	ssize_t ret;
 
 	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf) {
-		IWL_ERR(trans, "Can not allocate Buffer\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
 	pos += scnprintf(buf + pos, bufsz - pos,
 			"Interrupt Statistics Report:\n");
@@ -2161,12 +2000,26 @@
 	return ret;
 }
 
-DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+
+	if (!trans->op_mode)
+		return -EAGAIN;
+
+	iwl_op_mode_nic_error(trans->op_mode);
+
+	return count;
+}
+
 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
 DEBUGFS_READ_FILE_OPS(fh_reg);
 DEBUGFS_READ_FILE_OPS(rx_queue);
 DEBUGFS_READ_FILE_OPS(tx_queue);
 DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_WRITE_FILE_OPS(fw_restart);
 
 /*
  * Create the debugfs files and directories
@@ -2177,10 +2030,10 @@
 {
 	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
 	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
-	DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
 	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
 	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
 	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+	DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
 	return 0;
 }
 #else
@@ -2190,7 +2043,7 @@
 
 #endif /*CONFIG_IWLWIFI_DEBUGFS */
 
-const struct iwl_trans_ops trans_ops_pcie = {
+static const struct iwl_trans_ops trans_ops_pcie = {
 	.start_hw = iwl_trans_pcie_start_hw,
 	.stop_hw = iwl_trans_pcie_stop_hw,
 	.fw_alive = iwl_trans_pcie_fw_alive,
@@ -2205,15 +2058,11 @@
 	.reclaim = iwl_trans_pcie_reclaim,
 
 	.tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
-	.tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
 	.tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
 
-	.free = iwl_trans_pcie_free,
-
 	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
 
 	.wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
-	.check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
 
 #ifdef CONFIG_PM_SLEEP
 	.suspend = iwl_trans_pcie_suspend,
@@ -2223,11 +2072,12 @@
 	.write32 = iwl_trans_pcie_write32,
 	.read32 = iwl_trans_pcie_read32,
 	.configure = iwl_trans_pcie_configure,
+	.set_pmi = iwl_trans_pcie_set_pmi,
 };
 
-struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
-				       struct pci_dev *pdev,
-				       const struct pci_device_id *ent)
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+				       const struct pci_device_id *ent,
+				       const struct iwl_cfg *cfg)
 {
 	struct iwl_trans_pcie *trans_pcie;
 	struct iwl_trans *trans;
@@ -2243,7 +2093,7 @@
 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
 	trans->ops = &trans_ops_pcie;
-	trans->shrd = shrd;
+	trans->cfg = cfg;
 	trans_pcie->trans = trans;
 	spin_lock_init(&trans_pcie->irq_lock);
 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
@@ -2325,6 +2175,7 @@
 
 	/* Initialize the wait queue for commands */
 	init_waitqueue_head(&trans->wait_command_queue);
+	spin_lock_init(&trans->reg_lock);
 
 	return trans;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 0c81cba..79a1e7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -66,8 +66,9 @@
 #include <linux/ieee80211.h>
 #include <linux/mm.h> /* for page_address */
 
-#include "iwl-shared.h"
 #include "iwl-debug.h"
+#include "iwl-config.h"
+#include "iwl-fw.h"
 
 /**
  * DOC: Transport layer - what is it ?
@@ -104,13 +105,6 @@
  *	6) Eventually, the free function will be called.
  */
 
-struct iwl_priv;
-struct iwl_shared;
-struct iwl_op_mode;
-struct fw_img;
-struct sk_buff;
-struct dentry;
-
 /**
  * DOC: Host command section
  *
@@ -162,6 +156,8 @@
 
 
 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
+#define FH_RSCSR_FRAME_INVALID		0x55550000
+#define FH_RSCSR_FRAME_ALIGN		0x40
 
 struct iwl_rx_packet {
 	/*
@@ -260,27 +256,43 @@
 
 struct iwl_rx_cmd_buffer {
 	struct page *_page;
+	int _offset;
+	bool _page_stolen;
+	unsigned int truesize;
 };
 
 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
 {
-	return page_address(r->_page);
+	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
+}
+
+static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
+{
+	return r->_offset;
 }
 
 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
 {
-	struct page *p = r->_page;
-	r->_page = NULL;
-	return p;
+	r->_page_stolen = true;
+	get_page(r->_page);
+	return r->_page;
 }
 
 #define MAX_NO_RECLAIM_CMDS	6
 
+/*
+ * Maximum number of HW queues the transport layer
+ * currently supports
+ */
+#define IWL_MAX_HW_QUEUES		32
+
 /**
  * struct iwl_trans_config - transport configuration
  *
  * @op_mode: pointer to the upper layer.
- *	Must be set before any other call.
+ * @queue_to_fifo: queue to FIFO mapping to set up by
+ *	default
+ * @n_queue_to_fifo: number of queues to set up
  * @cmd_queue: the index of the command queue.
  *	Must be set before start_fw.
  * @no_reclaim_cmds: Some devices erroneously don't set the
@@ -288,14 +300,29 @@
  *	list of such notifications to filter. Max length is
  *	%MAX_NO_RECLAIM_CMDS.
  * @n_no_reclaim_cmds: # of commands in list
+ * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
+ *	if unset 4k will be the RX buffer size
+ * @queue_watchdog_timeout: time (in ms) after which queues
+ *	are considered stuck and will trigger device restart
+ * @command_names: array of command names, must be 256 entries
+ *	(one for each command); for debugging only
  */
 struct iwl_trans_config {
 	struct iwl_op_mode *op_mode;
+	const u8 *queue_to_fifo;
+	u8 n_queue_to_fifo;
+
 	u8 cmd_queue;
 	const u8 *no_reclaim_cmds;
 	int n_no_reclaim_cmds;
+
+	bool rx_buf_size_8k;
+	unsigned int queue_watchdog_timeout;
+	const char **command_names;
 };
 
+struct iwl_trans;
+
 /**
  * struct iwl_trans_ops - transport specific operations
  *
@@ -304,7 +331,8 @@
  * @start_hw: starts the HW- from that point on, the HW can send interrupts
  *	May sleep
  * @stop_hw: stops the HW- from that point on, the HW will be in low power but
- *	will still issue interrupt if the HW RF kill is triggered.
+ *	will still issue interrupt if the HW RF kill is triggered unless
+ *	op_mode_leaving is true.
  *	May sleep
  * @start_fw: allocates and inits all the resources for the transport
  *	layer. Also kick a fw image.
@@ -322,18 +350,11 @@
  *	Must be atomic
  * @reclaim: free packet until ssn. Returns a list of freed packets.
  *	Must be atomic
- * @tx_agg_alloc: allocate resources for a TX BA session
- *	Must be atomic
  * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
  *	ready and a successful ADDBA response has been received.
  *	May sleep
  * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
  *	Must be atomic
- * @free: release all the ressource for the transport layer itself such as
- *	irq, tasklet etc... From this point on, the device may not issue
- *	any interrupt (incl. RFKILL).
- *	May sleep
- * @check_stuck_queue: check if a specific queue is stuck
  * @wait_tx_queue_empty: wait until all tx queues are empty
  *	May sleep
  * @dbgfs_register: add the dbgfs files under this directory. Files will be
@@ -346,11 +367,12 @@
  * @configure: configure parameters required by the transport layer from
  *	the op_mode. May be called several times before start_fw, can't be
  *	called after that.
+ * @set_pmi: set the power pmi state
  */
 struct iwl_trans_ops {
 
 	int (*start_hw)(struct iwl_trans *iwl_trans);
-	void (*stop_hw)(struct iwl_trans *iwl_trans);
+	void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
 	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
 	void (*fw_alive)(struct iwl_trans *trans);
 	void (*stop_device)(struct iwl_trans *trans);
@@ -360,23 +382,15 @@
 	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
-		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
-		u8 sta_id, u8 tid);
-	int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
-			int txq_id, int ssn, struct sk_buff_head *skbs);
+		  struct iwl_device_cmd *dev_cmd, int queue);
+	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
+			struct sk_buff_head *skbs);
 
-	int (*tx_agg_disable)(struct iwl_trans *trans,
-			      int sta_id, int tid);
-	int (*tx_agg_alloc)(struct iwl_trans *trans,
-			    int sta_id, int tid);
-	void (*tx_agg_setup)(struct iwl_trans *trans,
-			     enum iwl_rxon_context_id ctx, int sta_id, int tid,
-			     int frame_limit, u16 ssn);
-
-	void (*free)(struct iwl_trans *trans);
+	void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo,
+			     int sta_id, int tid, int frame_limit, u16 ssn);
+	void (*tx_agg_disable)(struct iwl_trans *trans, int queue);
 
 	int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
-	int (*check_stuck_queue)(struct iwl_trans *trans, int q);
 	int (*wait_tx_queue_empty)(struct iwl_trans *trans);
 #ifdef CONFIG_PM_SLEEP
 	int (*suspend)(struct iwl_trans *trans);
@@ -387,6 +401,7 @@
 	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
 	void (*configure)(struct iwl_trans *trans,
 			  const struct iwl_trans_config *trans_cfg);
+	void (*set_pmi)(struct iwl_trans *trans, bool state);
 };
 
 /**
@@ -405,20 +420,19 @@
  *
  * @ops - pointer to iwl_trans_ops
  * @op_mode - pointer to the op_mode
- * @shrd - pointer to iwl_shared which holds shared data from the upper layer
+ * @cfg - pointer to the configuration
  * @reg_lock - protect hw register access
  * @dev - pointer to struct device * that represents the device
  * @hw_id: a u32 with the ID of the device / subdevice.
  *	Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
- * @nvm_device_type: indicates OTP or eeprom
  * @pm_support: set to true in start_hw if link pm is supported
  * @wait_command_queue: the wait_queue for SYNC host commands
  */
 struct iwl_trans {
 	const struct iwl_trans_ops *ops;
 	struct iwl_op_mode *op_mode;
-	struct iwl_shared *shrd;
+	const struct iwl_cfg *cfg;
 	enum iwl_trans_state state;
 	spinlock_t reg_lock;
 
@@ -427,7 +441,6 @@
 	u32 hw_id;
 	char hw_id_str[52];
 
-	int    nvm_device_type;
 	bool pm_support;
 
 	wait_queue_head_t wait_command_queue;
@@ -456,11 +469,12 @@
 	return trans->ops->start_hw(trans);
 }
 
-static inline void iwl_trans_stop_hw(struct iwl_trans *trans)
+static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
+				     bool op_mode_leaving)
 {
 	might_sleep();
 
-	trans->ops->stop_hw(trans);
+	trans->ops->stop_hw(trans, op_mode_leaving);
 
 	trans->state = IWL_TRANS_NO_FW;
 }
@@ -507,60 +521,42 @@
 }
 
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
-		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
-		u8 sta_id, u8 tid)
-{
-	if (trans->state != IWL_TRANS_FW_ALIVE)
-		IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
-
-	return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
-}
-
-static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
-				 int tid, int txq_id, int ssn,
-				 struct sk_buff_head *skbs)
+			       struct iwl_device_cmd *dev_cmd, int queue)
 {
 	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
 		  "%s bad state = %d", __func__, trans->state);
 
-	return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, skbs);
+	return trans->ops->tx(trans, skb, dev_cmd, queue);
 }
 
-static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
-					    int sta_id, int tid)
+static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
+				     int ssn, struct sk_buff_head *skbs)
 {
 	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
 		  "%s bad state = %d", __func__, trans->state);
 
-	return trans->ops->tx_agg_disable(trans, sta_id, tid);
+	trans->ops->reclaim(trans, queue, ssn, skbs);
 }
 
-static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
-					 int sta_id, int tid)
+static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue)
 {
 	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
 		  "%s bad state = %d", __func__, trans->state);
 
-	return trans->ops->tx_agg_alloc(trans, sta_id, tid);
+	trans->ops->tx_agg_disable(trans, queue);
 }
 
-
-static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
-					   enum iwl_rxon_context_id ctx,
-					   int sta_id, int tid,
-					   int frame_limit, u16 ssn)
+static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
+					  int fifo, int sta_id, int tid,
+					  int frame_limit, u16 ssn)
 {
 	might_sleep();
 
 	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
 		  "%s bad state = %d", __func__, trans->state);
 
-	trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
-}
-
-static inline void iwl_trans_free(struct iwl_trans *trans)
-{
-	trans->ops->free(trans);
+	trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid,
+				 frame_limit, ssn);
 }
 
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -571,13 +567,6 @@
 	return trans->ops->wait_tx_queue_empty(trans);
 }
 
-static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
-{
-	WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-		  "%s bad state = %d", __func__, trans->state);
-
-	return trans->ops->check_stuck_queue(trans, q);
-}
 static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
 					    struct dentry *dir)
 {
@@ -611,20 +600,15 @@
 	return trans->ops->read32(trans, ofs);
 }
 
+static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
+{
+	trans->ops->set_pmi(trans, state);
+}
+
 /*****************************************************
-* Transport layers implementations + their allocation function
+* driver (transport) register/unregister functions
 ******************************************************/
-struct pci_dev;
-struct pci_device_id;
-extern const struct iwl_trans_ops trans_ops_pcie;
-struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
-				       struct pci_dev *pdev,
-				       const struct pci_device_id *ent);
 int __must_check iwl_pci_register_driver(void);
 void iwl_pci_unregister_driver(void);
 
-extern const struct iwl_trans_ops trans_ops_idi;
-struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd,
-				      void *pdev_void,
-				      const void *ent_void);
 #endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 2528287..bc40dc6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -31,7 +31,6 @@
 #include <linux/init.h>
 
 #include "iwl-dev.h"
-#include "iwl-core.h"
 #include "iwl-io.h"
 #include "iwl-agn-hw.h"
 #include "iwl-agn.h"
@@ -40,37 +39,6 @@
 #include "iwl-fh.h"
 #include "iwl-op-mode.h"
 
-static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
-	{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
-	 0, COEX_UNASSOC_IDLE_FLAGS},
-	{COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
-	 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
-	{COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
-	 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
-	{COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
-	 0, COEX_CALIBRATION_FLAGS},
-	{COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
-	 0, COEX_PERIODIC_CALIBRATION_FLAGS},
-	{COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
-	 0, COEX_CONNECTION_ESTAB_FLAGS},
-	{COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
-	 0, COEX_ASSOCIATED_IDLE_FLAGS},
-	{COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
-	 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
-	{COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
-	 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
-	{COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
-	 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
-	{COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
-	{COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
-	{COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
-	 0, COEX_STAND_ALONE_DEBUG_FLAGS},
-	{COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
-	 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
-	{COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
-	{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
-};
-
 /******************************************************************************
  *
  * uCode download functions
@@ -93,7 +61,7 @@
 {
 	struct iwl_calib_xtal_freq_cmd cmd;
 	__le16 *xtal_calib =
-		(__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL);
+		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
 
 	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
 	cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -105,8 +73,7 @@
 {
 	struct iwl_calib_temperature_offset_cmd cmd;
 	__le16 *offset_calib =
-		(__le16 *)iwl_eeprom_query_addr(priv->shrd,
-						EEPROM_RAW_TEMPERATURE);
+		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
 
 	memset(&cmd, 0, sizeof(cmd));
 	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -122,16 +89,15 @@
 static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
 {
 	struct iwl_calib_temperature_offset_v2_cmd cmd;
-	__le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd,
+	__le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
 				     EEPROM_KELVIN_TEMPERATURE);
 	__le16 *offset_calib_low =
-		(__le16 *)iwl_eeprom_query_addr(priv->shrd,
-						EEPROM_RAW_TEMPERATURE);
+		(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
 	struct iwl_eeprom_calib_hdr *hdr;
 
 	memset(&cmd, 0, sizeof(cmd));
 	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
-	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd,
+	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
 							EEPROM_CALIB_ALL);
 	memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
 		sizeof(*offset_calib_high));
@@ -174,30 +140,12 @@
 	return iwl_dvm_send_cmd(priv, &cmd);
 }
 
-int iwlagn_rx_calib_result(struct iwl_priv *priv,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd)
-{
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->data;
-	int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-
-	/* reduce the size of the length field itself */
-	len -= 4;
-
-	if (iwl_calib_set(priv, hdr, len))
-		IWL_ERR(priv, "Failed to record calibration data %d\n",
-			hdr->op_code);
-
-	return 0;
-}
-
 int iwl_init_alive_start(struct iwl_priv *priv)
 {
 	int ret;
 
-	if (cfg(priv)->bt_params &&
-	    cfg(priv)->bt_params->advanced_bt_coexist) {
+	if (priv->cfg->bt_params &&
+	    priv->cfg->bt_params->advanced_bt_coexist) {
 		/*
 		 * Tell uCode we are ready to perform calibration
 		 * need to perform this before any calibration
@@ -219,8 +167,8 @@
 	 * temperature offset calibration is only needed for runtime ucode,
 	 * so prepare the value now.
 	 */
-	if (cfg(priv)->need_temp_offset_calib) {
-		if (cfg(priv)->temp_offset_v2)
+	if (priv->cfg->need_temp_offset_calib) {
+		if (priv->cfg->temp_offset_v2)
 			return iwl_set_temperature_offset_calib_v2(priv);
 		else
 			return iwl_set_temperature_offset_calib(priv);
@@ -229,29 +177,13 @@
 	return 0;
 }
 
-static int iwl_send_wimax_coex(struct iwl_priv *priv)
+int iwl_send_wimax_coex(struct iwl_priv *priv)
 {
 	struct iwl_wimax_coex_cmd coex_cmd;
 
-	if (cfg(priv)->base_params->support_wimax_coexist) {
-		/* UnMask wake up src at associated sleep */
-		coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
+	/* coexistence is disabled */
+	memset(&coex_cmd, 0, sizeof(coex_cmd));
 
-		/* UnMask wake up src at unassociated sleep */
-		coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
-		memcpy(coex_cmd.sta_prio, cu_priorities,
-			sizeof(struct iwl_wimax_coex_event_entry) *
-			 COEX_NUM_OF_EVENTS);
-
-		/* enabling the coexistence feature */
-		coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
-
-		/* enabling the priorities tables */
-		coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
-	} else {
-		/* coexistence is disabled */
-		memset(&coex_cmd, 0, sizeof(coex_cmd));
-	}
 	return iwl_dvm_send_cmd_pdu(priv,
 				COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
 				sizeof(coex_cmd), &coex_cmd);
@@ -311,7 +243,7 @@
 {
 	int ret;
 
-	iwl_trans_fw_alive(trans(priv));
+	iwl_trans_fw_alive(priv->trans);
 
 	priv->passive_no_rx = false;
 	priv->transport_queue_stop = 0;
@@ -320,7 +252,7 @@
 	if (ret)
 		return ret;
 
-	if (!cfg(priv)->no_xtal_calib) {
+	if (!priv->cfg->no_xtal_calib) {
 		ret = iwl_set_Xtal_calib(priv);
 		if (ret)
 			return ret;
@@ -349,9 +281,9 @@
 		/* read data comes through single port, auto-incr addr */
 		/* NOTE: Use the debugless read so we don't flood kernel log
 		 * if IWL_DL_IO is set */
-		iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR,
+		iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
 			i + fw_desc->offset);
-		val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
+		val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
 		if (val != le32_to_cpu(*image))
 			return -EIO;
 	}
@@ -370,14 +302,14 @@
 
 	IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
 
-	iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR,
+	iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
 				fw_desc->offset);
 
 	for (offs = 0;
 	     offs < len && errors < 20;
 	     offs += sizeof(u32), image++) {
 		/* read data comes through single port, auto-incr addr */
-		val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT);
+		val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
 		if (val != le32_to_cpu(*image)) {
 			IWL_ERR(priv, "uCode INST section at "
 				"offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -417,9 +349,8 @@
 	u8 subtype;
 };
 
-static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
-			    struct iwl_rx_packet *pkt,
-			    void *data)
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+			 struct iwl_rx_packet *pkt, void *data)
 {
 	struct iwl_priv *priv =
 		container_of(notif_wait, struct iwl_priv, notif_wait);
@@ -433,13 +364,15 @@
 		       palive->is_valid, palive->ver_type,
 		       palive->ver_subtype);
 
-	priv->shrd->device_pointers.error_event_table =
+	priv->device_pointers.error_event_table =
 		le32_to_cpu(palive->error_event_table_ptr);
-	priv->shrd->device_pointers.log_event_table =
+	priv->device_pointers.log_event_table =
 		le32_to_cpu(palive->log_event_table_ptr);
 
 	alive_data->subtype = palive->ver_subtype;
 	alive_data->valid = palive->is_valid == UCODE_VALID_OK;
+
+	return true;
 }
 
 #define UCODE_ALIVE_TIMEOUT	HZ
@@ -453,9 +386,10 @@
 	const struct fw_img *fw;
 	int ret;
 	enum iwl_ucode_type old_type;
+	static const u8 alive_cmd[] = { REPLY_ALIVE };
 
-	old_type = priv->shrd->ucode_type;
-	priv->shrd->ucode_type = ucode_type;
+	old_type = priv->cur_ucode;
+	priv->cur_ucode = ucode_type;
 	fw = iwl_get_ucode_image(priv, ucode_type);
 
 	priv->ucode_loaded = false;
@@ -463,12 +397,13 @@
 	if (!fw)
 		return -EINVAL;
 
-	iwl_init_notification_wait(&priv->notif_wait, &alive_wait, REPLY_ALIVE,
-				      iwl_alive_fn, &alive_data);
+	iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
+				   alive_cmd, ARRAY_SIZE(alive_cmd),
+				   iwl_alive_fn, &alive_data);
 
-	ret = iwl_trans_start_fw(trans(priv), fw);
+	ret = iwl_trans_start_fw(priv->trans, fw);
 	if (ret) {
-		priv->shrd->ucode_type = old_type;
+		priv->cur_ucode = old_type;
 		iwl_remove_notification(&priv->notif_wait, &alive_wait);
 		return ret;
 	}
@@ -480,13 +415,13 @@
 	ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
 					UCODE_ALIVE_TIMEOUT);
 	if (ret) {
-		priv->shrd->ucode_type = old_type;
+		priv->cur_ucode = old_type;
 		return ret;
 	}
 
 	if (!alive_data.valid) {
 		IWL_ERR(priv, "Loaded ucode is not valid!\n");
-		priv->shrd->ucode_type = old_type;
+		priv->cur_ucode = old_type;
 		return -EIO;
 	}
 
@@ -498,7 +433,7 @@
 	if (ucode_type != IWL_UCODE_WOWLAN) {
 		ret = iwl_verify_ucode(priv, ucode_type);
 		if (ret) {
-			priv->shrd->ucode_type = old_type;
+			priv->cur_ucode = old_type;
 			return ret;
 		}
 
@@ -510,7 +445,7 @@
 	if (ret) {
 		IWL_WARN(priv,
 			"Could not complete ALIVE transition: %d\n", ret);
-		priv->shrd->ucode_type = old_type;
+		priv->cur_ucode = old_type;
 		return ret;
 	}
 
@@ -519,9 +454,38 @@
 	return 0;
 }
 
+static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
+			      struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_priv *priv = data;
+	struct iwl_calib_hdr *hdr;
+	int len;
+
+	if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
+		WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
+		return true;
+	}
+
+	hdr = (struct iwl_calib_hdr *)pkt->data;
+	len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+	/* reduce the size by the length field itself */
+	len -= sizeof(__le32);
+
+	if (iwl_calib_set(priv, hdr, len))
+		IWL_ERR(priv, "Failed to record calibration data %d\n",
+			hdr->op_code);
+
+	return false;
+}
+
 int iwl_run_init_ucode(struct iwl_priv *priv)
 {
 	struct iwl_notification_wait calib_wait;
+	static const u8 calib_complete[] = {
+		CALIBRATION_RES_NOTIFICATION,
+		CALIBRATION_COMPLETE_NOTIFICATION
+	};
 	int ret;
 
 	lockdep_assert_held(&priv->mutex);
@@ -534,8 +498,8 @@
 		return 0;
 
 	iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
-				      CALIBRATION_COMPLETE_NOTIFICATION,
-				      NULL, NULL);
+				   calib_complete, ARRAY_SIZE(calib_complete),
+				   iwlagn_wait_calib, priv);
 
 	/* Will also start the device */
 	ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
@@ -561,7 +525,7 @@
 	iwl_remove_notification(&priv->notif_wait, &calib_wait);
  out:
 	/* Whatever happened, stop the device */
-	iwl_trans_stop_device(trans(priv));
+	iwl_trans_stop_device(priv->trans);
 	priv->ucode_loaded = false;
 
 	return ret;
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index 03f998d..7107ce5 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,5 +1,5 @@
 config IWM
-	tristate "Intel Wireless Multicomm 3200 WiFi driver"
+	tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
 	depends on MMC && EXPERIMENTAL
 	depends on CFG80211
 	select FW_LOADER
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index f7d01bf..eac72f7 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -6,6 +6,7 @@
 libertas-y += main.o
 libertas-y += rx.o
 libertas-y += tx.o
+libertas-y += firmware.o
 libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
 
 usb8xxx-objs += if_usb.o
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index bc951ab..84a3aa7 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -19,6 +19,10 @@
 };
 
 struct lbs_private;
+typedef void (*lbs_fw_cb)(struct lbs_private *priv, int ret,
+		const struct firmware *helper, const struct firmware *mainfw);
+
+struct lbs_private;
 struct sk_buff;
 struct net_device;
 struct cmd_ds_command;
@@ -66,10 +70,13 @@
 u32 lbs_fw_index_to_data_rate(u8 index);
 u8 lbs_data_rate_to_fw_index(u32 rate);
 
-int lbs_get_firmware(struct device *dev, const char *user_helper,
-			const char *user_mainfw, u32 card_model,
+int lbs_get_firmware(struct device *dev, u32 card_model,
 			const struct lbs_fw_table *fw_table,
 			const struct firmware **helper,
 			const struct firmware **mainfw);
+int lbs_get_firmware_async(struct lbs_private *priv, struct device *device,
+			   u32 card_model, const struct lbs_fw_table *fw_table,
+			   lbs_fw_cb callback);
+void lbs_wait_for_firmware_load(struct lbs_private *priv);
 
 #endif
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f3fd447..6720054 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -7,6 +7,7 @@
 #define _LBS_DEV_H_
 
 #include "defs.h"
+#include "decl.h"
 #include "host.h"
 
 #include <linux/kfifo.h>
@@ -180,6 +181,15 @@
 	wait_queue_head_t scan_q;
 	/* Whether the scan was initiated internally and not by cfg80211 */
 	bool internal_scan;
+
+	/* Firmware load */
+	u32 fw_model;
+	wait_queue_head_t fw_waitq;
+	struct device *fw_device;
+	const struct firmware *helper_fw;
+	const struct lbs_fw_table *fw_table;
+	const struct lbs_fw_table *fw_iter;
+	lbs_fw_cb fw_callback;
 };
 
 extern struct cmd_confirm_sleep confirm_sleep;
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
new file mode 100644
index 0000000..601f207
--- /dev/null
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -0,0 +1,224 @@
+/*
+ * Firmware loading and handling functions.
+ */
+
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include "dev.h"
+#include "decl.h"
+
+static void load_next_firmware_from_table(struct lbs_private *private);
+
+static void lbs_fw_loaded(struct lbs_private *priv, int ret,
+	const struct firmware *helper, const struct firmware *mainfw)
+{
+	unsigned long flags;
+
+	lbs_deb_fw("firmware load complete, code %d\n", ret);
+
+	/* User must free helper/mainfw */
+	priv->fw_callback(priv, ret, helper, mainfw);
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	priv->fw_callback = NULL;
+	wake_up(&priv->fw_waitq);
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+}
+
+static void do_load_firmware(struct lbs_private *priv, const char *name,
+	void (*cb)(const struct firmware *fw, void *context))
+{
+	int ret;
+
+	lbs_deb_fw("Requesting %s\n", name);
+	ret = request_firmware_nowait(THIS_MODULE, true, name,
+			priv->fw_device, GFP_KERNEL, priv, cb);
+	if (ret) {
+		lbs_deb_fw("request_firmware_nowait error %d\n", ret);
+		lbs_fw_loaded(priv, ret, NULL, NULL);
+	}
+}
+
+static void main_firmware_cb(const struct firmware *firmware, void *context)
+{
+	struct lbs_private *priv = context;
+
+	if (!firmware) {
+		/* Failed to find firmware: try next table entry */
+		load_next_firmware_from_table(priv);
+		return;
+	}
+
+	/* Firmware found! */
+	lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
+}
+
+static void helper_firmware_cb(const struct firmware *firmware, void *context)
+{
+	struct lbs_private *priv = context;
+
+	if (!firmware) {
+		/* Failed to find firmware: try next table entry */
+		load_next_firmware_from_table(priv);
+		return;
+	}
+
+	/* Firmware found! */
+	if (priv->fw_iter->fwname) {
+		priv->helper_fw = firmware;
+		do_load_firmware(priv, priv->fw_iter->fwname, main_firmware_cb);
+	} else {
+		/* No main firmware needed for this helper --> success! */
+		lbs_fw_loaded(priv, 0, firmware, NULL);
+	}
+}
+
+static void load_next_firmware_from_table(struct lbs_private *priv)
+{
+	const struct lbs_fw_table *iter;
+
+	if (!priv->fw_iter)
+		iter = priv->fw_table;
+	else
+		iter = ++priv->fw_iter;
+
+	if (priv->helper_fw) {
+		release_firmware(priv->helper_fw);
+		priv->helper_fw = NULL;
+	}
+
+next:
+	if (!iter->helper) {
+		/* End of table hit. */
+		lbs_fw_loaded(priv, -ENOENT, NULL, NULL);
+		return;
+	}
+
+	if (iter->model != priv->fw_model) {
+		iter++;
+		goto next;
+	}
+
+	priv->fw_iter = iter;
+	do_load_firmware(priv, iter->helper, helper_firmware_cb);
+}
+
+void lbs_wait_for_firmware_load(struct lbs_private *priv)
+{
+	wait_event(priv->fw_waitq, priv->fw_callback == NULL);
+}
+
+/**
+ *  lbs_get_firmware_async - Retrieves firmware asynchronously. Can load
+ *  either a helper firmware and a main firmware (2-stage), or just the helper.
+ *
+ *  @priv:      Pointer to lbs_private instance
+ *  @dev:     	A pointer to &device structure
+ *  @card_model: Bus-specific card model ID used to filter firmware table
+ *		elements
+ *  @fw_table:	Table of firmware file names and device model numbers
+ *		terminated by an entry with a NULL helper name
+ *	@callback: User callback to invoke when firmware load succeeds or fails.
+ */
+int lbs_get_firmware_async(struct lbs_private *priv, struct device *device,
+			    u32 card_model, const struct lbs_fw_table *fw_table,
+			    lbs_fw_cb callback)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->driver_lock, flags);
+	if (priv->fw_callback) {
+		lbs_deb_fw("firmware load already in progress\n");
+		spin_unlock_irqrestore(&priv->driver_lock, flags);
+		return -EBUSY;
+	}
+
+	priv->fw_device = device;
+	priv->fw_callback = callback;
+	priv->fw_table = fw_table;
+	priv->fw_iter = NULL;
+	priv->fw_model = card_model;
+	spin_unlock_irqrestore(&priv->driver_lock, flags);
+
+	lbs_deb_fw("Starting async firmware load\n");
+	load_next_firmware_from_table(priv);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(lbs_get_firmware_async);
+
+/**
+ *  lbs_get_firmware - Retrieves two-stage firmware
+ *
+ *  @dev:     	A pointer to &device structure
+ *  @card_model: Bus-specific card model ID used to filter firmware table
+ *		elements
+ *  @fw_table:	Table of firmware file names and device model numbers
+ *		terminated by an entry with a NULL helper name
+ *  @helper:	On success, the helper firmware; caller must free
+ *  @mainfw:	On success, the main firmware; caller must free
+ *
+ * Deprecated: use lbs_get_firmware_async() instead.
+ *
+ *  returns:		0 on success, non-zero on failure
+ */
+int lbs_get_firmware(struct device *dev, u32 card_model,
+			const struct lbs_fw_table *fw_table,
+			const struct firmware **helper,
+			const struct firmware **mainfw)
+{
+	const struct lbs_fw_table *iter;
+	int ret;
+
+	BUG_ON(helper == NULL);
+	BUG_ON(mainfw == NULL);
+
+	/* Search for firmware to use from the table. */
+	iter = fw_table;
+	while (iter && iter->helper) {
+		if (iter->model != card_model)
+			goto next;
+
+		if (*helper == NULL) {
+			ret = request_firmware(helper, iter->helper, dev);
+			if (ret)
+				goto next;
+
+			/* If the device has one-stage firmware (ie cf8305) and
+			 * we've got it then we don't need to bother with the
+			 * main firmware.
+			 */
+			if (iter->fwname == NULL)
+				return 0;
+		}
+
+		if (*mainfw == NULL) {
+			ret = request_firmware(mainfw, iter->fwname, dev);
+			if (ret) {
+				/* Clear the helper to ensure we don't have
+				 * mismatched firmware pairs.
+				 */
+				release_firmware(*helper);
+				*helper = NULL;
+			}
+		}
+
+		if (*helper && *mainfw)
+			return 0;
+
+  next:
+		iter++;
+	}
+
+	/* Failed */
+	release_firmware(*helper);
+	*helper = NULL;
+	release_firmware(*mainfw);
+	*mainfw = NULL;
+
+	return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(lbs_get_firmware);
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 234ee88..16beaf3 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -738,6 +738,50 @@
 	return ret;
 }
 
+static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
+				 const struct firmware *helper,
+				 const struct firmware *mainfw)
+{
+	struct if_cs_card *card = priv->card;
+
+	if (ret) {
+		pr_err("failed to find firmware (%d)\n", ret);
+		return;
+	}
+
+	/* Load the firmware */
+	ret = if_cs_prog_helper(card, helper);
+	if (ret == 0 && (card->model != MODEL_8305))
+		ret = if_cs_prog_real(card, mainfw);
+	if (ret)
+		goto out;
+
+	/* Now actually get the IRQ */
+	ret = request_irq(card->p_dev->irq, if_cs_interrupt,
+		IRQF_SHARED, DRV_NAME, card);
+	if (ret) {
+		pr_err("error in request_irq\n");
+		goto out;
+	}
+
+	/*
+	 * Clear any interrupt cause that happened while sending
+	 * firmware/initializing card
+	 */
+	if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
+	if_cs_enable_ints(card);
+
+	/* And finally bring the card up */
+	priv->fw_ready = 1;
+	if (lbs_start_card(priv) != 0) {
+		pr_err("could not activate card\n");
+		free_irq(card->p_dev->irq, card);
+	}
+
+out:
+	release_firmware(helper);
+	release_firmware(mainfw);
+}
 
 
 /********************************************************************/
@@ -809,8 +853,6 @@
 	unsigned int prod_id;
 	struct lbs_private *priv;
 	struct if_cs_card *card;
-	const struct firmware *helper = NULL;
-	const struct firmware *mainfw = NULL;
 
 	lbs_deb_enter(LBS_DEB_CS);
 
@@ -890,20 +932,6 @@
 		goto out2;
 	}
 
-	ret = lbs_get_firmware(&p_dev->dev, NULL, NULL, card->model,
-				&fw_table[0], &helper, &mainfw);
-	if (ret) {
-		pr_err("failed to find firmware (%d)\n", ret);
-		goto out2;
-	}
-
-	/* Load the firmware early, before calling into libertas.ko */
-	ret = if_cs_prog_helper(card, helper);
-	if (ret == 0 && (card->model != MODEL_8305))
-		ret = if_cs_prog_real(card, mainfw);
-	if (ret)
-		goto out2;
-
 	/* Make this card known to the libertas driver */
 	priv = lbs_add_card(card, &p_dev->dev);
 	if (!priv) {
@@ -911,37 +939,22 @@
 		goto out2;
 	}
 
-	/* Finish setting up fields in lbs_private */
+	/* Set up fields in lbs_private */
 	card->priv = priv;
 	priv->card = card;
 	priv->hw_host_to_card = if_cs_host_to_card;
 	priv->enter_deep_sleep = NULL;
 	priv->exit_deep_sleep = NULL;
 	priv->reset_deep_sleep_wakeup = NULL;
-	priv->fw_ready = 1;
 
-	/* Now actually get the IRQ */
-	ret = request_irq(p_dev->irq, if_cs_interrupt,
-		IRQF_SHARED, DRV_NAME, card);
+	/* Get firmware */
+	ret = lbs_get_firmware_async(priv, &p_dev->dev, card->model, fw_table,
+				     if_cs_prog_firmware);
 	if (ret) {
-		pr_err("error in request_irq\n");
+		pr_err("failed to find firmware (%d)\n", ret);
 		goto out3;
 	}
 
-	/*
-	 * Clear any interrupt cause that happened while sending
-	 * firmware/initializing card
-	 */
-	if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
-	if_cs_enable_ints(card);
-
-	/* And finally bring the card up */
-	if (lbs_start_card(priv) != 0) {
-		pr_err("could not activate card\n");
-		goto out3;
-	}
-
-	ret = 0;
 	goto out;
 
 out3:
@@ -951,11 +964,6 @@
 out1:
 	pcmcia_disable_device(p_dev);
 out:
-	if (helper)
-		release_firmware(helper);
-	if (mainfw)
-		release_firmware(mainfw);
-
 	lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
 	return ret;
 }
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 9804ebc..76caeba 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -65,12 +65,6 @@
  */
 static u8 user_rmmod;
 
-static char *lbs_helper_name = NULL;
-module_param_named(helper_name, lbs_helper_name, charp, 0644);
-
-static char *lbs_fw_name = NULL;
-module_param_named(fw_name, lbs_fw_name, charp, 0644);
-
 static const struct sdio_device_id if_sdio_ids[] = {
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL,
 			SDIO_DEVICE_ID_MARVELL_LIBERTAS) },
@@ -123,11 +117,8 @@
 	int			model;
 	unsigned long		ioport;
 	unsigned int		scratch_reg;
-
-	const char		*helper;
-	const char		*firmware;
-	bool			helper_allocated;
-	bool			firmware_allocated;
+	bool			started;
+	wait_queue_head_t	pwron_waitq;
 
 	u8			buffer[65536] __attribute__((aligned(4)));
 
@@ -140,6 +131,9 @@
 	u8			rx_unit;
 };
 
+static void if_sdio_finish_power_on(struct if_sdio_card *card);
+static int if_sdio_power_off(struct if_sdio_card *card);
+
 /********************************************************************/
 /* I/O                                                              */
 /********************************************************************/
@@ -680,12 +674,39 @@
 	return ret;
 }
 
+static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret,
+				     const struct firmware *helper,
+				     const struct firmware *mainfw)
+{
+	struct if_sdio_card *card = priv->card;
+
+	if (ret) {
+		pr_err("failed to find firmware (%d)\n", ret);
+		return;
+	}
+
+	ret = if_sdio_prog_helper(card, helper);
+	if (ret)
+		goto out;
+
+	lbs_deb_sdio("Helper firmware loaded\n");
+
+	ret = if_sdio_prog_real(card, mainfw);
+	if (ret)
+		goto out;
+
+	lbs_deb_sdio("Firmware loaded\n");
+	if_sdio_finish_power_on(card);
+
+out:
+	release_firmware(helper);
+	release_firmware(mainfw);
+}
+
 static int if_sdio_prog_firmware(struct if_sdio_card *card)
 {
 	int ret;
 	u16 scratch;
-	const struct firmware *helper = NULL;
-	const struct firmware *mainfw = NULL;
 
 	lbs_deb_enter(LBS_DEB_SDIO);
 
@@ -719,43 +740,18 @@
 	 */
 	if (scratch == IF_SDIO_FIRMWARE_OK) {
 		lbs_deb_sdio("firmware already loaded\n");
-		goto success;
+		if_sdio_finish_power_on(card);
+		return 0;
 	} else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) {
 		lbs_deb_sdio("firmware may be running\n");
-		goto success;
+		if_sdio_finish_power_on(card);
+		return 0;
 	}
 
-	ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
-				card->model, &fw_table[0], &helper, &mainfw);
-	if (ret) {
-		pr_err("failed to find firmware (%d)\n", ret);
-		goto out;
-	}
-
-	ret = if_sdio_prog_helper(card, helper);
-	if (ret)
-		goto out;
-
-	lbs_deb_sdio("Helper firmware loaded\n");
-
-	ret = if_sdio_prog_real(card, mainfw);
-	if (ret)
-		goto out;
-
-	lbs_deb_sdio("Firmware loaded\n");
-
-success:
-	sdio_claim_host(card->func);
-	sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
-	sdio_release_host(card->func);
-	ret = 0;
+	ret = lbs_get_firmware_async(card->priv, &card->func->dev, card->model,
+				     fw_table, if_sdio_do_prog_firmware);
 
 out:
-	if (helper)
-		release_firmware(helper);
-	if (mainfw)
-		release_firmware(mainfw);
-
 	lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
 	return ret;
 }
@@ -764,10 +760,89 @@
 /* Power management                                                 */
 /********************************************************************/
 
-static int if_sdio_power_on(struct if_sdio_card *card)
+/* Finish power on sequence (after firmware is loaded) */
+static void if_sdio_finish_power_on(struct if_sdio_card *card)
 {
 	struct sdio_func *func = card->func;
 	struct lbs_private *priv = card->priv;
+	int ret;
+
+	sdio_claim_host(func);
+	sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
+
+	/*
+	 * Get rx_unit if the chip is SD8688 or newer.
+	 * SD8385 & SD8686 do not have rx_unit.
+	 */
+	if ((card->model != MODEL_8385)
+			&& (card->model != MODEL_8686))
+		card->rx_unit = if_sdio_read_rx_unit(card);
+	else
+		card->rx_unit = 0;
+
+	/*
+	 * Set up the interrupt handler late.
+	 *
+	 * If we set it up earlier, the (buggy) hardware generates a spurious
+	 * interrupt, even before the interrupt has been enabled, with
+	 * CCCR_INTx = 0.
+	 *
+	 * We register the interrupt handler late so that we can handle any
+	 * spurious interrupts, and also to avoid generation of that known
+	 * spurious interrupt in the first place.
+	 */
+	ret = sdio_claim_irq(func, if_sdio_interrupt);
+	if (ret)
+		goto release;
+
+	/*
+	 * Enable interrupts now that everything is set up
+	 */
+	sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
+	if (ret)
+		goto release_irq;
+
+	sdio_release_host(func);
+
+	/*
+	 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
+	 */
+	if (card->model == MODEL_8688) {
+		struct cmd_header cmd;
+
+		memset(&cmd, 0, sizeof(cmd));
+
+		lbs_deb_sdio("send function INIT command\n");
+		if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd),
+				lbs_cmd_copyback, (unsigned long) &cmd))
+			netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
+	}
+
+	priv->fw_ready = 1;
+	wake_up(&card->pwron_waitq);
+
+	if (!card->started) {
+		ret = lbs_start_card(priv);
+		if_sdio_power_off(card);
+		if (ret == 0) {
+			card->started = true;
+			/* Tell PM core that we don't need the card to be
+			 * powered now */
+			pm_runtime_put_noidle(&func->dev);
+		}
+	}
+
+	return;
+
+release_irq:
+	sdio_release_irq(func);
+release:
+	sdio_release_host(func);
+}
+
+static int if_sdio_power_on(struct if_sdio_card *card)
+{
+	struct sdio_func *func = card->func;
 	struct mmc_host *host = func->card->host;
 	int ret;
 
@@ -810,64 +885,13 @@
 
 	sdio_release_host(func);
 	ret = if_sdio_prog_firmware(card);
-	sdio_claim_host(func);
-	if (ret)
-		goto disable;
-
-	/*
-	 * Get rx_unit if the chip is SD8688 or newer.
-	 * SD8385 & SD8686 do not have rx_unit.
-	 */
-	if ((card->model != MODEL_8385)
-			&& (card->model != MODEL_8686))
-		card->rx_unit = if_sdio_read_rx_unit(card);
-	else
-		card->rx_unit = 0;
-
-	/*
-	 * Set up the interrupt handler late.
-	 *
-	 * If we set it up earlier, the (buggy) hardware generates a spurious
-	 * interrupt, even before the interrupt has been enabled, with
-	 * CCCR_INTx = 0.
-	 *
-	 * We register the interrupt handler late so that we can handle any
-	 * spurious interrupts, and also to avoid generation of that known
-	 * spurious interrupt in the first place.
-	 */
-	ret = sdio_claim_irq(func, if_sdio_interrupt);
-	if (ret)
-		goto disable;
-
-	/*
-	 * Enable interrupts now that everything is set up
-	 */
-	sdio_writeb(func, 0x0f, IF_SDIO_H_INT_MASK, &ret);
-	if (ret)
-		goto release_irq;
-
-	sdio_release_host(func);
-
-	/*
-	 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
-	 */
-	if (card->model == MODEL_8688) {
-		struct cmd_header cmd;
-
-		memset(&cmd, 0, sizeof(cmd));
-
-		lbs_deb_sdio("send function INIT command\n");
-		if (__lbs_cmd(priv, CMD_FUNC_INIT, &cmd, sizeof(cmd),
-				lbs_cmd_copyback, (unsigned long) &cmd))
-			netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
+	if (ret) {
+		sdio_disable_func(func);
+		return ret;
 	}
 
-	priv->fw_ready = 1;
-
 	return 0;
 
-release_irq:
-	sdio_release_irq(func);
 disable:
 	sdio_disable_func(func);
 release:
@@ -1074,11 +1098,17 @@
 static int if_sdio_power_restore(struct lbs_private *priv)
 {
 	struct if_sdio_card *card = priv->card;
+	int r;
 
 	/* Make sure the card will not be powered off by runtime PM */
 	pm_runtime_get_sync(&card->func->dev);
 
-	return if_sdio_power_on(card);
+	r = if_sdio_power_on(card);
+	if (r)
+		return r;
+
+	wait_event(card->pwron_waitq, priv->fw_ready);
+	return 0;
 }
 
 
@@ -1179,6 +1209,7 @@
 	spin_lock_init(&card->lock);
 	card->workqueue = create_workqueue("libertas_sdio");
 	INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
+	init_waitqueue_head(&card->pwron_waitq);
 
 	/* Check if we support this card */
 	for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
@@ -1220,14 +1251,6 @@
 	if (ret)
 		goto err_activate_card;
 
-	ret = lbs_start_card(priv);
-	if_sdio_power_off(card);
-	if (ret)
-		goto err_activate_card;
-
-	/* Tell PM core that we don't need the card to be powered now */
-	pm_runtime_put_noidle(&func->dev);
-
 out:
 	lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
 
@@ -1244,10 +1267,6 @@
 		kfree(packet);
 	}
 
-	if (card->helper_allocated)
-		kfree(card->helper);
-	if (card->firmware_allocated)
-		kfree(card->firmware);
 	kfree(card);
 
 	goto out;
@@ -1295,12 +1314,6 @@
 		kfree(packet);
 	}
 
-	if (card->helper_allocated)
-		kfree(card->helper);
-	if (card->firmware_allocated)
-		kfree(card->firmware);
-	kfree(card);
-
 	lbs_deb_leave(LBS_DEB_SDIO);
 }
 
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 50b1ee7..9604a1c 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1064,9 +1064,8 @@
 			goto out;
 		}
 
-		err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
-					card->card_id, &fw_table[0], &helper,
-					&mainfw);
+		err = lbs_get_firmware(&card->spi->dev, card->card_id,
+					&fw_table[0], &helper, &mainfw);
 		if (err) {
 			netdev_err(priv->dev, "failed to find firmware (%d)\n",
 				   err);
@@ -1095,10 +1094,8 @@
 		goto out;
 
 out:
-	if (helper)
-		release_firmware(helper);
-	if (mainfw)
-		release_firmware(mainfw);
+	release_firmware(helper);
+	release_firmware(mainfw);
 
 	lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
 
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 74da5f1..75403e6 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -29,9 +29,6 @@
 
 #define MESSAGE_HEADER_LEN	4
 
-static char *lbs_fw_name = NULL;
-module_param_named(fw_name, lbs_fw_name, charp, 0644);
-
 MODULE_FIRMWARE("libertas/usb8388_v9.bin");
 MODULE_FIRMWARE("libertas/usb8388_v5.bin");
 MODULE_FIRMWARE("libertas/usb8388.bin");
@@ -44,6 +41,16 @@
 	MODEL_8682 = 0x2
 };
 
+/* table of firmware file names */
+static const struct lbs_fw_table fw_table[] = {
+	{ MODEL_8388, "libertas/usb8388_olpc.bin", NULL },
+	{ MODEL_8388, "libertas/usb8388_v9.bin", NULL },
+	{ MODEL_8388, "libertas/usb8388_v5.bin", NULL },
+	{ MODEL_8388, "libertas/usb8388.bin", NULL },
+	{ MODEL_8388, "usb8388.bin", NULL },
+	{ MODEL_8682, "libertas/usb8682.bin", NULL }
+};
+
 static struct usb_device_id if_usb_table[] = {
 	/* Enter the device signature inside */
 	{ USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 },
@@ -55,10 +62,9 @@
 
 static void if_usb_receive(struct urb *urb);
 static void if_usb_receive_fwload(struct urb *urb);
-static int __if_usb_prog_firmware(struct if_usb_card *cardp,
-					const char *fwname, int cmd);
-static int if_usb_prog_firmware(struct if_usb_card *cardp,
-					const char *fwname, int cmd);
+static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
+				 const struct firmware *fw,
+				 const struct firmware *unused);
 static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
 			       uint8_t *payload, uint16_t nb);
 static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
@@ -67,69 +73,6 @@
 static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
 static int if_usb_reset_device(struct if_usb_card *cardp);
 
-/* sysfs hooks */
-
-/*
- *  Set function to write firmware to device's persistent memory
- */
-static ssize_t if_usb_firmware_set(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct if_usb_card *cardp = priv->card;
-	int ret;
-
-	BUG_ON(buf == NULL);
-
-	ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
-	if (ret == 0)
-		return count;
-
-	return ret;
-}
-
-/*
- * lbs_flash_fw attribute to be exported per ethX interface through sysfs
- * (/sys/class/net/ethX/lbs_flash_fw).  Use this like so to write firmware to
- * the device's persistent memory:
- * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_fw
- */
-static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
-
-/**
- * if_usb_boot2_set - write firmware to device's persistent memory
- *
- * @dev: target device
- * @attr: device attributes
- * @buf: firmware buffer to write
- * @count: number of bytes to write
- *
- * returns: number of bytes written or negative error code
- */
-static ssize_t if_usb_boot2_set(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct lbs_private *priv = to_net_dev(dev)->ml_priv;
-	struct if_usb_card *cardp = priv->card;
-	int ret;
-
-	BUG_ON(buf == NULL);
-
-	ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
-	if (ret == 0)
-		return count;
-
-	return ret;
-}
-
-/*
- * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs
- * (/sys/class/net/ethX/lbs_flash_boot2).  Use this like so to write firmware
- * to the device's persistent memory:
- * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_boot2
- */
-static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set);
-
 /**
  * if_usb_write_bulk_callback - callback function to handle the status
  * of the URB
@@ -256,6 +199,7 @@
 	struct usb_endpoint_descriptor *endpoint;
 	struct lbs_private *priv;
 	struct if_usb_card *cardp;
+	int r = -ENOMEM;
 	int i;
 
 	udev = interface_to_usbdev(intf);
@@ -313,20 +257,10 @@
 		goto dealloc;
 	}
 
-	/* Upload firmware */
-	kparam_block_sysfs_write(fw_name);
-	if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
-		kparam_unblock_sysfs_write(fw_name);
-		lbs_deb_usbd(&udev->dev, "FW upload failed\n");
-		goto err_prog_firmware;
-	}
-	kparam_unblock_sysfs_write(fw_name);
-
 	if (!(priv = lbs_add_card(cardp, &intf->dev)))
-		goto err_prog_firmware;
+		goto err_add_card;
 
 	cardp->priv = priv;
-	cardp->priv->fw_ready = 1;
 
 	priv->hw_host_to_card = if_usb_host_to_card;
 	priv->enter_deep_sleep = NULL;
@@ -339,42 +273,25 @@
 
 	cardp->boot2_version = udev->descriptor.bcdDevice;
 
-	if_usb_submit_rx_urb(cardp);
-
-	if (lbs_start_card(priv))
-		goto err_start_card;
-
-	if_usb_setup_firmware(priv);
-
 	usb_get_dev(udev);
 	usb_set_intfdata(intf, cardp);
 
-	if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw))
-		netdev_err(priv->dev,
-			   "cannot register lbs_flash_fw attribute\n");
-
-	if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
-		netdev_err(priv->dev,
-			   "cannot register lbs_flash_boot2 attribute\n");
-
-	/*
-	 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
-	 */
-	priv->wol_criteria = EHS_REMOVE_WAKEUP;
-	if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
-		priv->ehs_remove_supported = false;
+	r = lbs_get_firmware_async(priv, &udev->dev, cardp->model,
+				   fw_table, if_usb_prog_firmware);
+	if (r)
+		goto err_get_fw;
 
 	return 0;
 
-err_start_card:
+err_get_fw:
 	lbs_remove_card(priv);
-err_prog_firmware:
+err_add_card:
 	if_usb_reset_device(cardp);
 dealloc:
 	if_usb_free(cardp);
 
 error:
-	return -ENOMEM;
+	return r;
 }
 
 /**
@@ -389,9 +306,6 @@
 
 	lbs_deb_enter(LBS_DEB_MAIN);
 
-	device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2);
-	device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_fw);
-
 	cardp->surprise_removed = 1;
 
 	if (priv) {
@@ -912,121 +826,22 @@
 	return ret;
 }
 
-
-/**
-*  if_usb_prog_firmware - programs the firmware subject to cmd
-*
-*  @cardp:	the if_usb_card descriptor
-*  @fwname:	firmware or boot2 image file name
-*  @cmd:	either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW,
-*		or BOOT_CMD_UPDATE_BOOT2.
-*  returns:	0 or error code
-*/
-static int if_usb_prog_firmware(struct if_usb_card *cardp,
-				const char *fwname, int cmd)
+static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
+				 const struct firmware *fw,
+				 const struct firmware *unused)
 {
-	struct lbs_private *priv = cardp->priv;
-	unsigned long flags, caps;
-	int ret;
-
-	caps = priv->fwcapinfo;
-	if (((cmd == BOOT_CMD_UPDATE_FW) && !(caps & FW_CAPINFO_FIRMWARE_UPGRADE)) ||
-	    ((cmd == BOOT_CMD_UPDATE_BOOT2) && !(caps & FW_CAPINFO_BOOT2_UPGRADE)))
-		return -EOPNOTSUPP;
-
-	/* Ensure main thread is idle. */
-	spin_lock_irqsave(&priv->driver_lock, flags);
-	while (priv->cur_cmd != NULL || priv->dnld_sent != DNLD_RES_RECEIVED) {
-		spin_unlock_irqrestore(&priv->driver_lock, flags);
-		if (wait_event_interruptible(priv->waitq,
-				(priv->cur_cmd == NULL &&
-				priv->dnld_sent == DNLD_RES_RECEIVED))) {
-			return -ERESTARTSYS;
-		}
-		spin_lock_irqsave(&priv->driver_lock, flags);
-	}
-	priv->dnld_sent = DNLD_BOOTCMD_SENT;
-	spin_unlock_irqrestore(&priv->driver_lock, flags);
-
-	ret = __if_usb_prog_firmware(cardp, fwname, cmd);
-
-	spin_lock_irqsave(&priv->driver_lock, flags);
-	priv->dnld_sent = DNLD_RES_RECEIVED;
-	spin_unlock_irqrestore(&priv->driver_lock, flags);
-
-	wake_up(&priv->waitq);
-
-	return ret;
-}
-
-/* table of firmware file names */
-static const struct {
-	u32 model;
-	const char *fwname;
-} fw_table[] = {
-	{ MODEL_8388, "libertas/usb8388_v9.bin" },
-	{ MODEL_8388, "libertas/usb8388_v5.bin" },
-	{ MODEL_8388, "libertas/usb8388.bin" },
-	{ MODEL_8388, "usb8388.bin" },
-	{ MODEL_8682, "libertas/usb8682.bin" }
-};
-
-#ifdef CONFIG_OLPC
-
-static int try_olpc_fw(struct if_usb_card *cardp)
-{
-	int retval = -ENOENT;
-
-	/* try the OLPC firmware first; fall back to fw_table list */
-	if (machine_is_olpc() && cardp->model == MODEL_8388)
-		retval = request_firmware(&cardp->fw,
-				"libertas/usb8388_olpc.bin", &cardp->udev->dev);
-	return retval;
-}
-
-#else
-static int try_olpc_fw(struct if_usb_card *cardp) { return -ENOENT; }
-#endif /* !CONFIG_OLPC */
-
-static int get_fw(struct if_usb_card *cardp, const char *fwname)
-{
-	int i;
-
-	/* Try user-specified firmware first */
-	if (fwname)
-		return request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
-
-	/* Handle OLPC firmware */
-	if (try_olpc_fw(cardp) == 0)
-		return 0;
-
-	/* Otherwise search for firmware to use */
-	for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
-		if (fw_table[i].model != cardp->model)
-			continue;
-		if (request_firmware(&cardp->fw, fw_table[i].fwname,
-					&cardp->udev->dev) == 0)
-			return 0;
-	}
-
-	return -ENOENT;
-}
-
-static int __if_usb_prog_firmware(struct if_usb_card *cardp,
-					const char *fwname, int cmd)
-{
+	struct if_usb_card *cardp = priv->card;
 	int i = 0;
 	static int reset_count = 10;
-	int ret = 0;
 
 	lbs_deb_enter(LBS_DEB_USB);
 
-	ret = get_fw(cardp, fwname);
 	if (ret) {
 		pr_err("failed to find firmware (%d)\n", ret);
 		goto done;
 	}
 
+	cardp->fw = fw;
 	if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
 		ret = -EINVAL;
 		goto release_fw;
@@ -1053,7 +868,7 @@
 	do {
 		int j = 0;
 		i++;
-		if_usb_issue_boot_command(cardp, cmd);
+		if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
 		/* wait for command response */
 		do {
 			j++;
@@ -1109,13 +924,27 @@
 		goto release_fw;
 	}
 
+	cardp->priv->fw_ready = 1;
+	if_usb_submit_rx_urb(cardp);
+
+	if (lbs_start_card(priv))
+		goto release_fw;
+
+	if_usb_setup_firmware(priv);
+
+	/*
+	 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
+	 */
+	priv->wol_criteria = EHS_REMOVE_WAKEUP;
+	if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
+		priv->ehs_remove_supported = false;
+
  release_fw:
 	release_firmware(cardp->fw);
 	cardp->fw = NULL;
 
  done:
-	lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret);
-	return ret;
+	lbs_deb_leave(LBS_DEB_USB);
 }
 
 
@@ -1128,8 +957,10 @@
 
 	lbs_deb_enter(LBS_DEB_USB);
 
-	if (priv->psstate != PS_STATE_FULL_POWER)
-		return -1;
+	if (priv->psstate != PS_STATE_FULL_POWER) {
+		ret = -1;
+		goto out;
+	}
 
 #ifdef CONFIG_OLPC
 	if (machine_is_olpc()) {
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 957681d..e96ee0a 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -878,6 +878,7 @@
 	priv->is_host_sleep_configured = 0;
 	priv->is_host_sleep_activated = 0;
 	init_waitqueue_head(&priv->host_sleep_q);
+	init_waitqueue_head(&priv->fw_waitq);
 	mutex_init(&priv->lock);
 
 	setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
@@ -1033,7 +1034,11 @@
 	lbs_deb_enter(LBS_DEB_MAIN);
 
 	lbs_remove_mesh(priv);
-	lbs_scan_deinit(priv);
+
+	if (priv->wiphy_registered)
+		lbs_scan_deinit(priv);
+
+	lbs_wait_for_firmware_load(priv);
 
 	/* worker thread destruction blocks on the in-flight command which
 	 * should have been cleared already in lbs_stop_card().
@@ -1128,6 +1133,11 @@
 		goto out;
 	dev = priv->dev;
 
+	/* If the netdev isn't registered, it means that lbs_start_card() was
+	 * never called so we have nothing to do here. */
+	if (dev->reg_state != NETREG_REGISTERED)
+		goto out;
+
 	netif_stop_queue(dev);
 	netif_carrier_off(dev);
 
@@ -1177,111 +1187,6 @@
 }
 EXPORT_SYMBOL_GPL(lbs_notify_command_response);
 
-/**
- *  lbs_get_firmware - Retrieves two-stage firmware
- *
- *  @dev:     	A pointer to &device structure
- *  @user_helper: User-defined helper firmware file
- *  @user_mainfw: User-defined main firmware file
- *  @card_model: Bus-specific card model ID used to filter firmware table
- *		elements
- *  @fw_table:	Table of firmware file names and device model numbers
- *		terminated by an entry with a NULL helper name
- *  @helper:	On success, the helper firmware; caller must free
- *  @mainfw:	On success, the main firmware; caller must free
- *
- *  returns:		0 on success, non-zero on failure
- */
-int lbs_get_firmware(struct device *dev, const char *user_helper,
-			const char *user_mainfw, u32 card_model,
-			const struct lbs_fw_table *fw_table,
-			const struct firmware **helper,
-			const struct firmware **mainfw)
-{
-	const struct lbs_fw_table *iter;
-	int ret;
-
-	BUG_ON(helper == NULL);
-	BUG_ON(mainfw == NULL);
-
-	/* Try user-specified firmware first */
-	if (user_helper) {
-		ret = request_firmware(helper, user_helper, dev);
-		if (ret) {
-			dev_err(dev, "couldn't find helper firmware %s\n",
-				user_helper);
-			goto fail;
-		}
-	}
-	if (user_mainfw) {
-		ret = request_firmware(mainfw, user_mainfw, dev);
-		if (ret) {
-			dev_err(dev, "couldn't find main firmware %s\n",
-				user_mainfw);
-			goto fail;
-		}
-	}
-
-	if (*helper && *mainfw)
-		return 0;
-
-	/* Otherwise search for firmware to use.  If neither the helper or
-	 * the main firmware were specified by the user, then we need to
-	 * make sure that found helper & main are from the same entry in
-	 * fw_table.
-	 */
-	iter = fw_table;
-	while (iter && iter->helper) {
-		if (iter->model != card_model)
-			goto next;
-
-		if (*helper == NULL) {
-			ret = request_firmware(helper, iter->helper, dev);
-			if (ret)
-				goto next;
-
-			/* If the device has one-stage firmware (ie cf8305) and
-			 * we've got it then we don't need to bother with the
-			 * main firmware.
-			 */
-			if (iter->fwname == NULL)
-				return 0;
-		}
-
-		if (*mainfw == NULL) {
-			ret = request_firmware(mainfw, iter->fwname, dev);
-			if (ret && !user_helper) {
-				/* Clear the helper if it wasn't user-specified
-				 * and the main firmware load failed, to ensure
-				 * we don't have mismatched firmware pairs.
-				 */
-				release_firmware(*helper);
-				*helper = NULL;
-			}
-		}
-
-		if (*helper && *mainfw)
-			return 0;
-
-  next:
-		iter++;
-	}
-
-  fail:
-	/* Failed */
-	if (*helper) {
-		release_firmware(*helper);
-		*helper = NULL;
-	}
-	if (*mainfw) {
-		release_firmware(*mainfw);
-		*mainfw = NULL;
-	}
-
-	return -ENOENT;
-}
-EXPORT_SYMBOL_GPL(lbs_get_firmware);
-
 static int __init lbs_init_module(void)
 {
 	lbs_deb_enter(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index b7ce6a6..03c0c6b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -582,11 +582,13 @@
 		goto nla_put_failure;
 	}
 
-	NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
-		     sizeof(struct mac_address), data->addresses[1].addr);
+	if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+		    sizeof(struct mac_address), data->addresses[1].addr))
+		goto nla_put_failure;
 
 	/* We get the skb->data */
-	NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data);
+	if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data))
+		goto nla_put_failure;
 
 	/* We get the flags for this transmission, and we translate them to
 	   wmediumd flags  */
@@ -597,7 +599,8 @@
 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 		hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
 
-	NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags);
+	if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
+		goto nla_put_failure;
 
 	/* We get the tx control (rate and retries) info*/
 
@@ -606,12 +609,14 @@
 		tx_attempts[i].count = info->status.rates[i].count;
 	}
 
-	NLA_PUT(skb, HWSIM_ATTR_TX_INFO,
-		     sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
-		     tx_attempts);
+	if (nla_put(skb, HWSIM_ATTR_TX_INFO,
+		    sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
+		    tx_attempts))
+		goto nla_put_failure;
 
 	/* We create a cookie to identify this skb */
-	NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb);
+	if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb))
+		goto nla_put_failure;
 
 	genlmsg_end(skb, msg_head);
 	genlmsg_unicast(&init_net, skb, dst_pid);
@@ -632,6 +637,7 @@
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_rx_status rx_status;
+	struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
 
 	if (data->idle) {
 		wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
@@ -666,6 +672,7 @@
 	spin_lock(&hwsim_radio_lock);
 	list_for_each_entry(data2, &hwsim_radios, list) {
 		struct sk_buff *nskb;
+		struct ieee80211_mgmt *mgmt;
 
 		if (data == data2)
 			continue;
@@ -683,8 +690,18 @@
 
 		if (mac80211_hwsim_addr_match(data2, hdr->addr1))
 			ack = true;
+
+		/* set bcn timestamp relative to receiver mactime */
 		rx_status.mactime =
-			le64_to_cpu(__mac80211_hwsim_get_tsf(data2));
+				le64_to_cpu(__mac80211_hwsim_get_tsf(data2));
+		mgmt = (struct ieee80211_mgmt *) nskb->data;
+		if (ieee80211_is_beacon(mgmt->frame_control) ||
+		    ieee80211_is_probe_resp(mgmt->frame_control))
+			mgmt->u.beacon.timestamp = cpu_to_le64(
+				rx_status.mactime +
+				(data->tsf_offset - data2->tsf_offset) +
+				24 * 8 * 10 / txrate->bitrate);
+
 		memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
 		ieee80211_rx_irqsafe(data2->hw, nskb);
 	}
@@ -698,12 +715,6 @@
 	bool ack;
 	struct ieee80211_tx_info *txi;
 	u32 _pid;
-	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
-	struct mac80211_hwsim_data *data = hw->priv;
-
-	if (ieee80211_is_beacon(mgmt->frame_control) ||
-	    ieee80211_is_probe_resp(mgmt->frame_control))
-		mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
 
 	mac80211_hwsim_monitor_rx(hw, skb);
 
@@ -800,11 +811,9 @@
 				     struct ieee80211_vif *vif)
 {
 	struct ieee80211_hw *hw = arg;
-	struct mac80211_hwsim_data *data = hw->priv;
 	struct sk_buff *skb;
 	struct ieee80211_tx_info *info;
 	u32 _pid;
-	struct ieee80211_mgmt *mgmt;
 
 	hwsim_check_magic(vif);
 
@@ -818,9 +827,6 @@
 		return;
 	info = IEEE80211_SKB_CB(skb);
 
-	mgmt = (struct ieee80211_mgmt *) skb->data;
-	mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
-
 	mac80211_hwsim_monitor_rx(hw, skb);
 
 	/* wmediumd mode check */
@@ -1108,7 +1114,8 @@
 						nla_total_size(sizeof(u32)));
 		if (!skb)
 			return -ENOMEM;
-		NLA_PUT_U32(skb, HWSIM_TM_ATTR_PS, hwsim->ps);
+		if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
+			goto nla_put_failure;
 		return cfg80211_testmode_reply(skb);
 	default:
 		return -EOPNOTSUPP;
@@ -1444,7 +1451,7 @@
 			hwsim_fops_group_read, hwsim_fops_group_write,
 			"%llx\n");
 
-struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
+static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
 			     struct mac_address *addr)
 {
 	struct mac80211_hwsim_data *data;
@@ -1789,9 +1796,11 @@
 			    IEEE80211_HW_SIGNAL_DBM |
 			    IEEE80211_HW_SUPPORTS_STATIC_SMPS |
 			    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
-			    IEEE80211_HW_AMPDU_AGGREGATION;
+			    IEEE80211_HW_AMPDU_AGGREGATION |
+			    IEEE80211_HW_WANT_MONITOR_VIF;
 
-		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+				    WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
 		/* ask mac80211 to reserve space for magic */
 		hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index a5e182b..fe8ebfe 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -350,25 +350,26 @@
 		ret_len += sizeof(struct mwifiex_ie_types_htcap);
 	}
 
-	if (bss_desc->bcn_ht_info) {
+	if (bss_desc->bcn_ht_oper) {
 		if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
 			ht_info = (struct mwifiex_ie_types_htinfo *) *buffer;
 			memset(ht_info, 0,
 			       sizeof(struct mwifiex_ie_types_htinfo));
 			ht_info->header.type =
-					cpu_to_le16(WLAN_EID_HT_INFORMATION);
+					cpu_to_le16(WLAN_EID_HT_OPERATION);
 			ht_info->header.len =
-				cpu_to_le16(sizeof(struct ieee80211_ht_info));
+				cpu_to_le16(
+					sizeof(struct ieee80211_ht_operation));
 
 			memcpy((u8 *) ht_info +
 			       sizeof(struct mwifiex_ie_types_header),
-			       (u8 *) bss_desc->bcn_ht_info +
+			       (u8 *) bss_desc->bcn_ht_oper +
 			       sizeof(struct ieee_types_header),
 			       le16_to_cpu(ht_info->header.len));
 
 			if (!(sband->ht_cap.cap &
 					IEEE80211_HT_CAP_SUP_WIDTH_20_40))
-				ht_info->ht_info.ht_param &=
+				ht_info->ht_oper.ht_param &=
 					~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY |
 					IEEE80211_HT_PARAM_CHA_SEC_OFFSET);
 
@@ -385,16 +386,16 @@
 			sizeof(struct mwifiex_ie_types_chan_list_param_set) -
 			sizeof(struct mwifiex_ie_types_header));
 		chan_list->chan_scan_param[0].chan_number =
-			bss_desc->bcn_ht_info->control_chan;
+			bss_desc->bcn_ht_oper->primary_chan;
 		chan_list->chan_scan_param[0].radio_type =
 			mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
 
 		if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
-		    bss_desc->bcn_ht_info->ht_param &
+		    bss_desc->bcn_ht_oper->ht_param &
 		    IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
 			SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
 					  radio_type,
-					  (bss_desc->bcn_ht_info->ht_param &
+					  (bss_desc->bcn_ht_oper->ht_param &
 					  IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
 
 		*buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 9eefb2a..ab84eb9 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -233,21 +233,27 @@
 
 	skb_push(skb_aggr, headroom);
 
-	/*
-	 * Padding per MSDU will affect the length of next
-	 * packet and hence the exact length of next packet
-	 * is uncertain here.
-	 *
-	 * Also, aggregation of transmission buffer, while
-	 * downloading the data to the card, wont gain much
-	 * on the AMSDU packets as the AMSDU packets utilizes
-	 * the transmission buffer space to the maximum
-	 * (adapter->tx_buf_size).
-	 */
-	tx_param.next_pkt_len = 0;
+	if (adapter->iface_type == MWIFIEX_USB) {
+		adapter->data_sent = true;
+		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
+						   skb_aggr, NULL);
+	} else {
+		/*
+		 * Padding per MSDU will affect the length of next
+		 * packet and hence the exact length of next packet
+		 * is uncertain here.
+		 *
+		 * Also, aggregation of transmission buffer, while
+		 * downloading the data to the card, wont gain much
+		 * on the AMSDU packets as the AMSDU packets utilizes
+		 * the transmission buffer space to the maximum
+		 * (adapter->tx_buf_size).
+		 */
+		tx_param.next_pkt_len = 0;
 
-	ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
-					   skb_aggr, &tx_param);
+		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
+						   skb_aggr, &tx_param);
+	}
 	switch (ret) {
 	case -EBUSY:
 		spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 2a078ce..8e384fa 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -10,12 +10,12 @@
 	  mwifiex.
 
 config MWIFIEX_SDIO
-	tristate "Marvell WiFi-Ex Driver for SD8787/SD8797"
+	tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797"
 	depends on MWIFIEX && MMC
 	select FW_LOADER
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8787/8797 chipsets with SDIO interface.
+	  8786/8787/8797 chipsets with SDIO interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_sdio.
@@ -30,3 +30,14 @@
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_pcie.
+
+config MWIFIEX_USB
+	tristate "Marvell WiFi-Ex Driver for USB8797"
+	depends on MWIFIEX && USB
+	select FW_LOADER
+	---help---
+	  This adds support for wireless adapters based on Marvell
+	  Avastar 88W8797 chipset with USB interface.
+
+	  If you choose to build it as a module, it will be called
+	  mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index b0257ad..5c1a46b 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -42,3 +42,6 @@
 
 mwifiex_pcie-y += pcie.o
 obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
+
+mwifiex_usb-y += usb.o
+obj-$(CONFIG_MWIFIEX_USB) += mwifiex_usb.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 6505038..c78ea87 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -516,25 +516,23 @@
 mwifiex_dump_station_info(struct mwifiex_private *priv,
 			  struct station_info *sinfo)
 {
-	struct mwifiex_ds_get_signal signal;
 	struct mwifiex_rate_cfg rate;
-	int ret = 0;
 
 	sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES |
-		STATION_INFO_RX_PACKETS |
-		STATION_INFO_TX_PACKETS
-		| STATION_INFO_SIGNAL | STATION_INFO_TX_BITRATE;
+			STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS |
+			STATION_INFO_TX_BITRATE |
+			STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
 
 	/* Get signal information from the firmware */
-	memset(&signal, 0, sizeof(struct mwifiex_ds_get_signal));
-	if (mwifiex_get_signal_info(priv, &signal)) {
-		dev_err(priv->adapter->dev, "getting signal information\n");
-		ret = -EFAULT;
+	if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
+				  HostCmd_ACT_GEN_GET, 0, NULL)) {
+		dev_err(priv->adapter->dev, "failed to get signal information\n");
+		return -EFAULT;
 	}
 
 	if (mwifiex_drv_get_data_rate(priv, &rate)) {
 		dev_err(priv->adapter->dev, "getting data rate\n");
-		ret = -EFAULT;
+		return -EFAULT;
 	}
 
 	/* Get DTIM period information from firmware */
@@ -557,11 +555,12 @@
 			sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
 	}
 
+	sinfo->signal_avg = priv->bcn_rssi_avg;
 	sinfo->rx_bytes = priv->stats.rx_bytes;
 	sinfo->tx_bytes = priv->stats.tx_bytes;
 	sinfo->rx_packets = priv->stats.rx_packets;
 	sinfo->tx_packets = priv->stats.tx_packets;
-	sinfo->signal = priv->qual_level;
+	sinfo->signal = priv->bcn_rssi_avg;
 	/* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
 	sinfo->txrate.legacy = rate.rate * 5;
 
@@ -581,7 +580,7 @@
 			priv->curr_bss_params.bss_descriptor.beacon_period;
 	}
 
-	return ret;
+	return 0;
 }
 
 /*
@@ -604,6 +603,23 @@
 	return mwifiex_dump_station_info(priv, sinfo);
 }
 
+/*
+ * CFG802.11 operation handler to dump station information.
+ */
+static int
+mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
+			      int idx, u8 *mac, struct station_info *sinfo)
+{
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+	if (!priv->media_connected || idx)
+		return -ENOENT;
+
+	memcpy(mac, priv->cfg_bssid, ETH_ALEN);
+
+	return mwifiex_dump_station_info(priv, sinfo);
+}
+
 /* Supported rates to be advertised to the cfg80211 */
 
 static struct ieee80211_rate mwifiex_rates[] = {
@@ -750,6 +766,45 @@
 }
 
 /*
+ * CFG802.11 operation handler for connection quality monitoring.
+ *
+ * This function subscribes/unsubscribes HIGH_RSSI and LOW_RSSI
+ * events to FW.
+ */
+static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
+						struct net_device *dev,
+						s32 rssi_thold, u32 rssi_hyst)
+{
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+	struct mwifiex_ds_misc_subsc_evt subsc_evt;
+
+	priv->cqm_rssi_thold = rssi_thold;
+	priv->cqm_rssi_hyst = rssi_hyst;
+
+	memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
+	subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
+
+	/* Subscribe/unsubscribe low and high rssi events */
+	if (rssi_thold && rssi_hyst) {
+		subsc_evt.action = HostCmd_ACT_BITWISE_SET;
+		subsc_evt.bcn_l_rssi_cfg.abs_value = abs(rssi_thold);
+		subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold);
+		subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
+		subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
+		return mwifiex_send_cmd_sync(priv,
+					     HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+					     0, 0, &subsc_evt);
+	} else {
+		subsc_evt.action = HostCmd_ACT_BITWISE_CLR;
+		return mwifiex_send_cmd_sync(priv,
+					     HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+					     0, 0, &subsc_evt);
+	}
+
+	return 0;
+}
+
+/*
  * CFG802.11 operation handler for disconnection request.
  *
  * This function does not work when there is already a disconnection
@@ -1107,6 +1162,17 @@
 	priv->user_scan_cfg->num_ssids = request->n_ssids;
 	priv->user_scan_cfg->ssid_list = request->ssids;
 
+	if (request->ie && request->ie_len) {
+		for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
+			if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
+				continue;
+			priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN;
+			memcpy(&priv->vs_ie[i].ie, request->ie,
+			       request->ie_len);
+			break;
+		}
+	}
+
 	for (i = 0; i < request->n_channels; i++) {
 		chan = request->channels[i];
 		priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
@@ -1124,6 +1190,15 @@
 	if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
 		return -EFAULT;
 
+	if (request->ie && request->ie_len) {
+		for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
+			if (priv->vs_ie[i].mask == MWIFIEX_VSIE_MASK_SCAN) {
+				priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_CLEAR;
+				memset(&priv->vs_ie[i].ie, 0,
+				       MWIFIEX_MAX_VSIE_LEN);
+			}
+		}
+	}
 	return 0;
 }
 
@@ -1340,6 +1415,7 @@
 	.connect = mwifiex_cfg80211_connect,
 	.disconnect = mwifiex_cfg80211_disconnect,
 	.get_station = mwifiex_cfg80211_get_station,
+	.dump_station = mwifiex_cfg80211_dump_station,
 	.set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
 	.set_channel = mwifiex_cfg80211_set_channel,
 	.join_ibss = mwifiex_cfg80211_join_ibss,
@@ -1350,6 +1426,7 @@
 	.set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
 	.set_tx_power = mwifiex_cfg80211_set_tx_power,
 	.set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
+	.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
 };
 
 /*
@@ -1365,6 +1442,7 @@
 	void *wdev_priv;
 	struct wireless_dev *wdev;
 	struct ieee80211_sta_ht_cap *ht_info;
+	u8 *country_code;
 
 	wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
 	if (!wdev) {
@@ -1381,6 +1459,7 @@
 	}
 	wdev->iftype = NL80211_IFTYPE_STATION;
 	wdev->wiphy->max_scan_ssids = 10;
+	wdev->wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
 	wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				       BIT(NL80211_IFTYPE_ADHOC);
 
@@ -1403,8 +1482,8 @@
 	memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
 	wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 
-	/* Reserve space for bss band information */
-	wdev->wiphy->bss_priv_size = sizeof(u8);
+	/* Reserve space for mwifiex specific private data for BSS */
+	wdev->wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
 
 	wdev->wiphy->reg_notifier = mwifiex_reg_notifier;
 
@@ -1427,6 +1506,11 @@
 			"info: successfully registered wiphy device\n");
 	}
 
+	country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
+	if (country_code && regulatory_hint(wdev->wiphy, country_code))
+		dev_err(priv->adapter->dev,
+			"%s: regulatory_hint failed\n", __func__);
+
 	priv->wdev = wdev;
 
 	return ret;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 2fe1c33..560871b 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -71,6 +71,37 @@
 
 static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
 
+struct region_code_mapping {
+	u8 code;
+	u8 region[IEEE80211_COUNTRY_STRING_LEN];
+};
+
+static struct region_code_mapping region_code_mapping_t[] = {
+	{ 0x10, "US " }, /* US FCC */
+	{ 0x20, "CA " }, /* IC Canada */
+	{ 0x30, "EU " }, /* ETSI */
+	{ 0x31, "ES " }, /* Spain */
+	{ 0x32, "FR " }, /* France */
+	{ 0x40, "JP " }, /* Japan */
+	{ 0x41, "JP " }, /* Japan */
+	{ 0x50, "CN " }, /* China */
+};
+
+/* This function converts integer code to region string */
+u8 *mwifiex_11d_code_2_region(u8 code)
+{
+	u8 i;
+	u8 size = sizeof(region_code_mapping_t)/
+				sizeof(struct region_code_mapping);
+
+	/* Look for code in mapping table */
+	for (i = 0; i < size; i++)
+		if (region_code_mapping_t[i].code == code)
+			return region_code_mapping_t[i].region;
+
+	return NULL;
+}
+
 /*
  * This function maps an index in supported rates table into
  * the corresponding data rate.
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 07f6e00..1710bef 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -139,6 +139,7 @@
 	uint16_t cmd_size;
 	struct timeval tstamp;
 	unsigned long flags;
+	__le32 tmp;
 
 	if (!adapter || !cmd_node)
 		return -1;
@@ -178,15 +179,28 @@
 		le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
 		le16_to_cpu(host_cmd->seq_num));
 
-	skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
-
-	ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
-					   cmd_node->cmd_skb, NULL);
-
-	skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN);
+	if (adapter->iface_type == MWIFIEX_USB) {
+		tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
+		skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
+		memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN);
+		adapter->cmd_sent = true;
+		ret = adapter->if_ops.host_to_card(adapter,
+						   MWIFIEX_USB_EP_CMD_EVENT,
+						   cmd_node->cmd_skb, NULL);
+		skb_pull(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
+		if (ret == -EBUSY)
+			cmd_node->cmd_skb = NULL;
+	} else {
+		skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
+		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
+						   cmd_node->cmd_skb, NULL);
+		skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN);
+	}
 
 	if (ret == -1) {
 		dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
+		if (adapter->iface_type == MWIFIEX_USB)
+			adapter->cmd_sent = false;
 		if (cmd_node->wait_q_enabled)
 			adapter->cmd_wait_q.status = -1;
 		mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
@@ -232,6 +246,9 @@
 	struct mwifiex_opt_sleep_confirm *sleep_cfm_buf =
 				(struct mwifiex_opt_sleep_confirm *)
 						adapter->sleep_cfm->data;
+	struct sk_buff *sleep_cfm_tmp;
+	__le32 tmp;
+
 	priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 
 	sleep_cfm_buf->seq_num =
@@ -240,10 +257,28 @@
 					 priv->bss_type)));
 	adapter->seq_num++;
 
-	skb_push(adapter->sleep_cfm, INTF_HEADER_LEN);
-	ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
-					   adapter->sleep_cfm, NULL);
-	skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN);
+	if (adapter->iface_type == MWIFIEX_USB) {
+		sleep_cfm_tmp =
+			dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
+				      + MWIFIEX_TYPE_LEN);
+		skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm)
+			+ MWIFIEX_TYPE_LEN);
+		tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
+		memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN);
+		memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN,
+		       adapter->sleep_cfm->data,
+		       sizeof(struct mwifiex_opt_sleep_confirm));
+		ret = adapter->if_ops.host_to_card(adapter,
+						   MWIFIEX_USB_EP_CMD_EVENT,
+						   sleep_cfm_tmp, NULL);
+		if (ret != -EBUSY)
+			dev_kfree_skb_any(sleep_cfm_tmp);
+	} else {
+		skb_push(adapter->sleep_cfm, INTF_HEADER_LEN);
+		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
+						   adapter->sleep_cfm, NULL);
+		skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN);
+	}
 
 	if (ret == -1) {
 		dev_err(adapter->dev, "SLEEP_CFM: failed\n");
@@ -343,7 +378,12 @@
 		}
 		if (!cmd_array[i].resp_skb)
 			continue;
-		dev_kfree_skb_any(cmd_array[i].resp_skb);
+
+		if (adapter->iface_type == MWIFIEX_USB)
+			adapter->if_ops.cmdrsp_complete(adapter,
+							cmd_array[i].resp_skb);
+		else
+			dev_kfree_skb_any(cmd_array[i].resp_skb);
 	}
 	/* Release struct cmd_ctrl_node */
 	if (adapter->cmd_pool) {
@@ -1083,6 +1123,7 @@
 						    MWIFIEX_BSS_ROLE_ANY),
 				   false);
 }
+EXPORT_SYMBOL_GPL(mwifiex_process_hs_config);
 
 /*
  * This function handles the command response of a sleep confirm command.
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 1a84507..a870b58 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -212,7 +212,7 @@
 		p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
 		p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
 		p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
-		p += sprintf(p, "region_code = \"%02x\"\n", info.region_code);
+		p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
 
 		netdev_for_each_mc_addr(ha, netdev)
 			p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index be5fd16..d04aba4 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -53,6 +53,7 @@
 #define MWIFIEX_RATE_BITMAP_MCS127 159
 
 #define MWIFIEX_RX_DATA_BUF_SIZE     (4 * 1024)
+#define MWIFIEX_RX_CMD_BUF_SIZE	     (2 * 1024)
 
 #define MWIFIEX_RTS_MIN_VALUE              (0)
 #define MWIFIEX_RTS_MAX_VALUE              (2347)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index e98fc5a..5f6adeb 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -81,6 +81,11 @@
 #define FIRMWARE_READY_SDIO				0xfedc
 #define FIRMWARE_READY_PCIE				0xfedcba00
 
+enum mwifiex_usb_ep {
+	MWIFIEX_USB_EP_CMD_EVENT = 1,
+	MWIFIEX_USB_EP_DATA = 2,
+};
+
 enum MWIFIEX_802_11_PRIVACY_FILTER {
 	MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL,
 	MWIFIEX_802_11_PRIV_FILTER_8021X_WEP
@@ -92,16 +97,19 @@
 #define TLV_TYPE_KEY_MATERIAL       (PROPRIETARY_TLV_BASE_ID + 0)
 #define TLV_TYPE_CHANLIST           (PROPRIETARY_TLV_BASE_ID + 1)
 #define TLV_TYPE_NUMPROBES          (PROPRIETARY_TLV_BASE_ID + 2)
+#define TLV_TYPE_RSSI_LOW           (PROPRIETARY_TLV_BASE_ID + 4)
 #define TLV_TYPE_PASSTHROUGH        (PROPRIETARY_TLV_BASE_ID + 10)
 #define TLV_TYPE_WMMQSTATUS         (PROPRIETARY_TLV_BASE_ID + 16)
 #define TLV_TYPE_WILDCARDSSID       (PROPRIETARY_TLV_BASE_ID + 18)
 #define TLV_TYPE_TSFTIMESTAMP       (PROPRIETARY_TLV_BASE_ID + 19)
+#define TLV_TYPE_RSSI_HIGH          (PROPRIETARY_TLV_BASE_ID + 22)
 #define TLV_TYPE_AUTH_TYPE          (PROPRIETARY_TLV_BASE_ID + 31)
 #define TLV_TYPE_CHANNELBANDLIST    (PROPRIETARY_TLV_BASE_ID + 42)
 #define TLV_TYPE_RATE_DROP_CONTROL  (PROPRIETARY_TLV_BASE_ID + 82)
 #define TLV_TYPE_RATE_SCOPE         (PROPRIETARY_TLV_BASE_ID + 83)
 #define TLV_TYPE_POWER_GROUP        (PROPRIETARY_TLV_BASE_ID + 84)
 #define TLV_TYPE_WAPI_IE            (PROPRIETARY_TLV_BASE_ID + 94)
+#define TLV_TYPE_MGMT_IE            (PROPRIETARY_TLV_BASE_ID + 105)
 #define TLV_TYPE_AUTO_DS_PARAM      (PROPRIETARY_TLV_BASE_ID + 113)
 #define TLV_TYPE_PS_PARAM           (PROPRIETARY_TLV_BASE_ID + 114)
 
@@ -194,6 +202,7 @@
 #define HostCmd_CMD_802_11_KEY_MATERIAL               0x005e
 #define HostCmd_CMD_802_11_BG_SCAN_QUERY              0x006c
 #define HostCmd_CMD_WMM_GET_STATUS                    0x0071
+#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT            0x0075
 #define HostCmd_CMD_802_11_TX_RATE_QUERY              0x007f
 #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS     0x0083
 #define HostCmd_CMD_VERSION_EXT                       0x0097
@@ -228,6 +237,8 @@
 #define HostCmd_RET_BIT                       0x8000
 #define HostCmd_ACT_GEN_GET                   0x0000
 #define HostCmd_ACT_GEN_SET                   0x0001
+#define HostCmd_ACT_BITWISE_SET               0x0002
+#define HostCmd_ACT_BITWISE_CLR               0x0003
 #define HostCmd_RESULT_OK                     0x0000
 
 #define HostCmd_ACT_MAC_RX_ON                 0x0001
@@ -813,7 +824,7 @@
 struct mwifiex_bcn_param {
 	u8 bssid[ETH_ALEN];
 	u8 rssi;
-	__le32 timestamp[2];
+	__le64 timestamp;
 	__le16 beacon_period;
 	__le16 cap_info_bitmap;
 } __packed;
@@ -982,8 +993,7 @@
 struct ieee_types_vendor_header {
 	u8 element_id;
 	u8 len;
-	u8 oui[3];
-	u8 oui_type;
+	u8 oui[4];	/* 0~2: oui, 3: oui_type */
 	u8 oui_subtype;
 	u8 version;
 } __packed;
@@ -1007,7 +1017,7 @@
 	struct ieee_types_vendor_header vend_hdr;
 	u8 qos_info_bitmap;
 	u8 reserved;
-	struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_MAX_QUEUES];
+	struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
 } __packed;
 
 struct ieee_types_wmm_info {
@@ -1028,7 +1038,7 @@
 
 struct host_cmd_ds_wmm_get_status {
 	u8 queue_status_tlv[sizeof(struct mwifiex_ie_types_wmm_queue_status) *
-			      IEEE80211_MAX_QUEUES];
+			      IEEE80211_NUM_ACS];
 	u8 wmm_param_tlv[sizeof(struct ieee_types_wmm_parameter) + 2];
 } __packed;
 
@@ -1045,7 +1055,7 @@
 
 struct mwifiex_ie_types_htinfo {
 	struct mwifiex_ie_types_header header;
-	struct ieee80211_ht_info ht_info;
+	struct ieee80211_ht_operation ht_oper;
 } __packed;
 
 struct mwifiex_ie_types_2040bssco {
@@ -1146,6 +1156,17 @@
 	u32 sleep_cookie_addr_hi;
 } __packed;
 
+struct mwifiex_ie_types_rssi_threshold {
+	struct mwifiex_ie_types_header header;
+	u8 abs_value;
+	u8 evt_freq;
+} __packed;
+
+struct host_cmd_ds_802_11_subsc_evt {
+	__le16 action;
+	__le16 events;
+} __packed;
+
 struct host_cmd_ds_command {
 	__le16 command;
 	__le16 size;
@@ -1195,6 +1216,7 @@
 		struct host_cmd_ds_set_bss_mode bss_mode;
 		struct host_cmd_ds_pcie_details pcie_host_spec;
 		struct host_cmd_ds_802_11_eeprom_access eeprom;
+		struct host_cmd_ds_802_11_subsc_evt subsc_evt;
 	} params;
 } __packed;
 
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 54bb483..d440c3e 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -131,6 +131,8 @@
 	priv->wmm_qosinfo = 0;
 	priv->curr_bcn_buf = NULL;
 	priv->curr_bcn_size = 0;
+	priv->wps_ie = NULL;
+	priv->wps_ie_len = 0;
 
 	priv->scan_block = false;
 
@@ -186,10 +188,10 @@
 
 	adapter->cmd_sent = false;
 
-	if (adapter->iface_type == MWIFIEX_PCIE)
-		adapter->data_sent = false;
-	else
+	if (adapter->iface_type == MWIFIEX_SDIO)
 		adapter->data_sent = true;
+	else
+		adapter->data_sent = false;
 
 	adapter->cmd_resp_received = false;
 	adapter->event_received = false;
@@ -377,7 +379,8 @@
 
 	dev_dbg(adapter->dev, "info: free scan table\n");
 
-	adapter->if_ops.cleanup_if(adapter);
+	if (adapter->if_ops.cleanup_if)
+		adapter->if_ops.cleanup_if(adapter);
 
 	if (adapter->sleep_cfm)
 		dev_kfree_skb_any(adapter->sleep_cfm);
@@ -417,6 +420,8 @@
 	spin_lock_init(&adapter->cmd_pending_q_lock);
 	spin_lock_init(&adapter->scan_pending_q_lock);
 
+	skb_queue_head_init(&adapter->usb_rx_data_q);
+
 	for (i = 0; i < adapter->priv_num; ++i) {
 		INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
 		adapter->bss_prio_tbl[i].bss_prio_cur = NULL;
@@ -572,6 +577,7 @@
 	struct mwifiex_private *priv;
 	s32 i;
 	unsigned long flags;
+	struct sk_buff *skb;
 
 	/* mwifiex already shutdown */
 	if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
@@ -599,6 +605,18 @@
 
 	spin_lock_irqsave(&adapter->mwifiex_lock, flags);
 
+	if (adapter->if_ops.data_complete) {
+		while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) {
+			struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
+
+			priv = adapter->priv[rx_info->bss_num];
+			if (priv)
+				priv->stats.rx_dropped++;
+
+			adapter->if_ops.data_complete(adapter, skb);
+		}
+	}
+
 	/* Free adapter structure */
 	mwifiex_free_adapter(adapter);
 
@@ -628,24 +646,28 @@
 	int ret;
 	u32 poll_num = 1;
 
-	adapter->winner = 0;
+	if (adapter->if_ops.check_fw_status) {
+		adapter->winner = 0;
 
-	/* Check if firmware is already running */
-	ret = adapter->if_ops.check_fw_status(adapter, poll_num);
-	if (!ret) {
-		dev_notice(adapter->dev,
-			   "WLAN FW already running! Skip FW download\n");
-		goto done;
-	}
-	poll_num = MAX_FIRMWARE_POLL_TRIES;
+		/* check if firmware is already running */
+		ret = adapter->if_ops.check_fw_status(adapter, poll_num);
+		if (!ret) {
+			dev_notice(adapter->dev,
+				   "WLAN FW already running! Skip FW dnld\n");
+			goto done;
+		}
 
-	/* Check if we are the winner for downloading FW */
-	if (!adapter->winner) {
-		dev_notice(adapter->dev,
-			   "Other intf already running! Skip FW download\n");
-		poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
-		goto poll_fw;
+		poll_num = MAX_FIRMWARE_POLL_TRIES;
+
+		/* check if we are the winner for downloading FW */
+		if (!adapter->winner) {
+			dev_notice(adapter->dev,
+				   "FW already running! Skip FW dnld\n");
+			poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
+			goto poll_fw;
+		}
 	}
+
 	if (pmfw) {
 		/* Download firmware with helper */
 		ret = adapter->if_ops.prog_fw(adapter, pmfw);
@@ -664,6 +686,8 @@
 	}
 done:
 	/* re-enable host interrupt for mwifiex after fw dnld is successful */
-	adapter->if_ops.enable_int(adapter);
+	if (adapter->if_ops.enable_int)
+		adapter->if_ops.enable_int(adapter);
+
 	return ret;
 }
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 7ca4e82..f0f9552 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -85,34 +85,6 @@
 	u32 wep_icv_error[4];
 };
 
-#define BCN_RSSI_AVG_MASK               0x00000002
-#define BCN_NF_AVG_MASK                 0x00000200
-#define ALL_RSSI_INFO_MASK              0x00000fff
-
-struct mwifiex_ds_get_signal {
-	/*
-	 * Bit0:  Last Beacon RSSI,  Bit1:  Average Beacon RSSI,
-	 * Bit2:  Last Data RSSI,    Bit3:  Average Data RSSI,
-	 * Bit4:  Last Beacon SNR,   Bit5:  Average Beacon SNR,
-	 * Bit6:  Last Data SNR,     Bit7:  Average Data SNR,
-	 * Bit8:  Last Beacon NF,    Bit9:  Average Beacon NF,
-	 * Bit10: Last Data NF,      Bit11: Average Data NF
-	 */
-	u16 selector;
-	s16 bcn_rssi_last;
-	s16 bcn_rssi_avg;
-	s16 data_rssi_last;
-	s16 data_rssi_avg;
-	s16 bcn_snr_last;
-	s16 bcn_snr_avg;
-	s16 data_snr_last;
-	s16 data_snr_avg;
-	s16 bcn_nf_last;
-	s16 bcn_nf_avg;
-	s16 data_nf_last;
-	s16 data_nf_avg;
-};
-
 #define MWIFIEX_MAX_VER_STR_LEN    128
 
 struct mwifiex_ver_ext {
@@ -124,7 +96,7 @@
 	u32 bss_mode;
 	struct cfg80211_ssid ssid;
 	u32 bss_chan;
-	u32 region_code;
+	u8 country_code[3];
 	u32 media_connected;
 	u32 max_power_level;
 	u32 min_power_level;
@@ -308,8 +280,30 @@
 	u8 cmd[MWIFIEX_SIZE_OF_CMD_BUFFER];
 };
 
+#define BITMASK_BCN_RSSI_LOW	BIT(0)
+#define BITMASK_BCN_RSSI_HIGH	BIT(4)
+
+enum subsc_evt_rssi_state {
+	EVENT_HANDLED,
+	RSSI_LOW_RECVD,
+	RSSI_HIGH_RECVD
+};
+
+struct subsc_evt_cfg {
+	u8 abs_value;
+	u8 evt_freq;
+};
+
+struct mwifiex_ds_misc_subsc_evt {
+	u16 action;
+	u16 events;
+	struct subsc_evt_cfg bcn_l_rssi_cfg;
+	struct subsc_evt_cfg bcn_h_rssi_cfg;
+};
+
 #define MWIFIEX_MAX_VSIE_LEN       (256)
 #define MWIFIEX_MAX_VSIE_NUM       (8)
+#define MWIFIEX_VSIE_MASK_CLEAR    0x00
 #define MWIFIEX_VSIE_MASK_SCAN     0x01
 #define MWIFIEX_VSIE_MASK_ASSOC    0x02
 #define MWIFIEX_VSIE_MASK_ADHOC    0x04
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 8f9382b..8a39098 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -118,15 +118,15 @@
 	*buffer += sizeof(tsf_tlv.header);
 
 	/* TSF at the time when beacon/probe_response was received */
-	tsf_val = cpu_to_le64(bss_desc->network_tsf);
+	tsf_val = cpu_to_le64(bss_desc->fw_tsf);
 	memcpy(*buffer, &tsf_val, sizeof(tsf_val));
 	*buffer += sizeof(tsf_val);
 
-	memcpy(&tsf_val, bss_desc->time_stamp, sizeof(tsf_val));
+	tsf_val = cpu_to_le64(bss_desc->timestamp);
 
 	dev_dbg(priv->adapter->dev,
 		"info: %s: TSF offset calc: %016llx - %016llx\n",
-		__func__, tsf_val, bss_desc->network_tsf);
+		__func__, bss_desc->timestamp, bss_desc->fw_tsf);
 
 	memcpy(*buffer, &tsf_val, sizeof(tsf_val));
 	*buffer += sizeof(tsf_val);
@@ -225,6 +225,48 @@
 }
 
 /*
+ * This function appends a WPS IE. It is called from the network join command
+ * preparation routine.
+ *
+ * If the IE buffer has been setup by the application, this routine appends
+ * the buffer as a WPS TLV type to the request.
+ */
+static int
+mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer)
+{
+	int retLen = 0;
+	struct mwifiex_ie_types_header ie_header;
+
+	if (!buffer || !*buffer)
+		return 0;
+
+	/*
+	 * If there is a wps ie buffer setup, append it to the return
+	 * parameter buffer pointer.
+	 */
+	if (priv->wps_ie_len) {
+		dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n",
+			priv->wps_ie_len, *buffer);
+
+		/* Wrap the generic IE buffer with a pass through TLV type */
+		ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE);
+		ie_header.len = cpu_to_le16(priv->wps_ie_len);
+		memcpy(*buffer, &ie_header, sizeof(ie_header));
+		*buffer += sizeof(ie_header);
+		retLen += sizeof(ie_header);
+
+		memcpy(*buffer, priv->wps_ie, priv->wps_ie_len);
+		*buffer += priv->wps_ie_len;
+		retLen += priv->wps_ie_len;
+
+	}
+
+	kfree(priv->wps_ie);
+	priv->wps_ie_len = 0;
+	return retLen;
+}
+
+/*
  * This function appends a WAPI IE.
  *
  * This function is called from the network join command preparation routine.
@@ -480,6 +522,8 @@
 	if (priv->sec_info.wapi_enabled && priv->wapi_ie_len)
 		mwifiex_cmd_append_wapi_ie(priv, &pos);
 
+	if (priv->wps.session_enable && priv->wps_ie_len)
+		mwifiex_cmd_append_wps_ie(priv, &pos);
 
 	mwifiex_cmd_append_generic_ie(priv, &pos);
 
@@ -932,20 +976,20 @@
 		/* Fill HT INFORMATION */
 		ht_info = (struct mwifiex_ie_types_htinfo *) pos;
 		memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
-		ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION);
+		ht_info->header.type = cpu_to_le16(WLAN_EID_HT_OPERATION);
 		ht_info->header.len =
-				cpu_to_le16(sizeof(struct ieee80211_ht_info));
+			cpu_to_le16(sizeof(struct ieee80211_ht_operation));
 
-		ht_info->ht_info.control_chan =
+		ht_info->ht_oper.primary_chan =
 			(u8) priv->curr_bss_params.bss_descriptor.channel;
 		if (adapter->sec_chan_offset) {
-			ht_info->ht_info.ht_param = adapter->sec_chan_offset;
-			ht_info->ht_info.ht_param |=
+			ht_info->ht_oper.ht_param = adapter->sec_chan_offset;
+			ht_info->ht_oper.ht_param |=
 					IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
 		}
-		ht_info->ht_info.operation_mode =
+		ht_info->ht_oper.operation_mode =
 		     cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-		ht_info->ht_info.basic_set[0] = 0xff;
+		ht_info->ht_oper.basic_set[0] = 0xff;
 		pos += sizeof(struct mwifiex_ie_types_htinfo);
 		cmd_append_size +=
 				sizeof(struct mwifiex_ie_types_htinfo);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9d1b3ca..be0f0e5 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -58,8 +58,9 @@
 	memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
 
 	/* card specific initialization has been deferred until now .. */
-	if (adapter->if_ops.init_if(adapter))
-		goto error;
+	if (adapter->if_ops.init_if)
+		if (adapter->if_ops.init_if(adapter))
+			goto error;
 
 	adapter->priv_num = 0;
 
@@ -140,6 +141,7 @@
 {
 	int ret = 0;
 	unsigned long flags;
+	struct sk_buff *skb;
 
 	spin_lock_irqsave(&adapter->main_proc_lock, flags);
 
@@ -161,7 +163,8 @@
 		if (adapter->int_status) {
 			if (adapter->hs_activated)
 				mwifiex_process_hs_config(adapter);
-			adapter->if_ops.process_int_status(adapter);
+			if (adapter->if_ops.process_int_status)
+				adapter->if_ops.process_int_status(adapter);
 		}
 
 		/* Need to wake up the card ? */
@@ -174,6 +177,7 @@
 			adapter->if_ops.wakeup(adapter);
 			continue;
 		}
+
 		if (IS_CARD_RX_RCVD(adapter)) {
 			adapter->pm_wakeup_fw_try = false;
 			if (adapter->ps_state == PS_STATE_SLEEP)
@@ -194,6 +198,11 @@
 			}
 		}
 
+		/* Check Rx data for USB */
+		if (adapter->iface_type == MWIFIEX_USB)
+			while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
+				mwifiex_handle_rx_packet(adapter, skb);
+
 		/* Check for Cmd Resp */
 		if (adapter->cmd_resp_received) {
 			adapter->cmd_resp_received = false;
@@ -292,33 +301,35 @@
 }
 
 /*
- * This function initializes the hardware and firmware.
+ * This function gets firmware and initializes it.
  *
  * The main initialization steps followed are -
  *      - Download the correct firmware to card
- *      - Allocate and initialize the adapter structure
- *      - Initialize the private structures
  *      - Issue the init commands to firmware
  */
-static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
+static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
 {
-	int ret, err;
+	int ret;
+	char fmt[64];
+	struct mwifiex_private *priv;
+	struct mwifiex_adapter *adapter = context;
 	struct mwifiex_fw_image fw;
 
-	memset(&fw, 0, sizeof(struct mwifiex_fw_image));
-
-	err = request_firmware(&adapter->firmware, adapter->fw_name,
-			       adapter->dev);
-	if (err < 0) {
-		dev_err(adapter->dev, "request_firmware() returned"
-				" error code %#x\n", err);
-		ret = -1;
+	if (!firmware) {
+		dev_err(adapter->dev,
+			"Failed to get firmware %s\n", adapter->fw_name);
 		goto done;
 	}
+
+	memset(&fw, 0, sizeof(struct mwifiex_fw_image));
+	adapter->firmware = firmware;
 	fw.fw_buf = (u8 *) adapter->firmware->data;
 	fw.fw_len = adapter->firmware->size;
 
-	ret = mwifiex_dnld_fw(adapter, &fw);
+	if (adapter->if_ops.dnld_fw)
+		ret = adapter->if_ops.dnld_fw(adapter, &fw);
+	else
+		ret = mwifiex_dnld_fw(adapter, &fw);
 	if (ret == -1)
 		goto done;
 
@@ -335,17 +346,54 @@
 	/* Wait for mwifiex_init to complete */
 	wait_event_interruptible(adapter->init_wait_q,
 				 adapter->init_wait_q_woken);
-	if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) {
-		ret = -1;
+	if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
 		goto done;
-	}
-	ret = 0;
 
+	priv = adapter->priv[0];
+	if (mwifiex_register_cfg80211(priv) != 0) {
+		dev_err(adapter->dev, "cannot register with cfg80211\n");
+		goto err_init_fw;
+	}
+
+	rtnl_lock();
+	/* Create station interface by default */
+	if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
+				      NL80211_IFTYPE_STATION, NULL, NULL)) {
+		dev_err(adapter->dev, "cannot create default STA interface\n");
+		goto err_add_intf;
+	}
+	rtnl_unlock();
+
+	mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
+	dev_notice(adapter->dev, "driver_version = %s\n", fmt);
+	goto done;
+
+err_add_intf:
+	mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
+	rtnl_unlock();
+err_init_fw:
+	pr_debug("info: %s: unregister device\n", __func__);
+	adapter->if_ops.unregister_dev(adapter);
 done:
-	if (adapter->firmware)
-		release_firmware(adapter->firmware);
-	if (ret)
-		ret = -1;
+	release_firmware(adapter->firmware);
+	complete(&adapter->fw_load);
+	return;
+}
+
+/*
+ * This function initializes the hardware and gets firmware.
+ */
+static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
+{
+	int ret;
+
+	init_completion(&adapter->fw_load);
+	ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
+				      adapter->dev, GFP_KERNEL, adapter,
+				      mwifiex_fw_dpc);
+	if (ret < 0)
+		dev_err(adapter->dev,
+			"request_firmware_nowait() returned error %d\n", ret);
 	return ret;
 }
 
@@ -650,8 +698,6 @@
 		 struct mwifiex_if_ops *if_ops, u8 iface_type)
 {
 	struct mwifiex_adapter *adapter;
-	char fmt[64];
-	struct mwifiex_private *priv;
 
 	if (down_interruptible(sem))
 		goto exit_sem_err;
@@ -692,40 +738,13 @@
 		goto err_init_fw;
 	}
 
-	priv = adapter->priv[0];
-
-	if (mwifiex_register_cfg80211(priv) != 0) {
-		dev_err(adapter->dev, "cannot register netdevice"
-			       " with cfg80211\n");
-			goto err_init_fw;
-	}
-
-	rtnl_lock();
-	/* Create station interface by default */
-	if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
-				      NL80211_IFTYPE_STATION, NULL, NULL)) {
-		rtnl_unlock();
-		dev_err(adapter->dev, "cannot create default station"
-				" interface\n");
-		goto err_add_intf;
-	}
-
-	rtnl_unlock();
-
 	up(sem);
-
-	mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
-	dev_notice(adapter->dev, "driver_version = %s\n", fmt);
-
 	return 0;
 
-err_add_intf:
-	rtnl_lock();
-	mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
-	rtnl_unlock();
 err_init_fw:
 	pr_debug("info: %s: unregister device\n", __func__);
-	adapter->if_ops.unregister_dev(adapter);
+	if (adapter->if_ops.unregister_dev)
+		adapter->if_ops.unregister_dev(adapter);
 err_registerdev:
 	adapter->surprise_removed = true;
 	mwifiex_terminate_workqueue(adapter);
@@ -830,7 +849,8 @@
 
 	/* Unregister device */
 	dev_dbg(adapter->dev, "info: unregister device\n");
-	adapter->if_ops.unregister_dev(adapter);
+	if (adapter->if_ops.unregister_dev)
+		adapter->if_ops.unregister_dev(adapter);
 	/* Free adapter structure */
 	dev_dbg(adapter->dev, "info: free adapter\n");
 	mwifiex_free_adapter(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 35225e9..324ad39 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -92,9 +92,16 @@
 #define MWIFIEX_OUI_NOT_PRESENT			0
 #define MWIFIEX_OUI_PRESENT				1
 
+/*
+ * Do not check for data_received for USB, as data_received
+ * is handled in mwifiex_usb_recv for USB
+ */
 #define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \
-					adapter->event_received || \
-					adapter->data_received)
+				adapter->event_received || \
+				((adapter->iface_type != MWIFIEX_USB) && \
+				adapter->data_received) || \
+				((adapter->iface_type == MWIFIEX_USB) && \
+				!skb_queue_empty(&adapter->usb_rx_data_q)))
 
 #define MWIFIEX_TYPE_CMD				1
 #define MWIFIEX_TYPE_DATA				0
@@ -110,6 +117,11 @@
 
 #define MWIFIEX_EVENT_HEADER_LEN           4
 
+#define MWIFIEX_TYPE_LEN			4
+#define MWIFIEX_USB_TYPE_CMD			0xF00DFACE
+#define MWIFIEX_USB_TYPE_DATA			0xBEADC0DE
+#define MWIFIEX_USB_TYPE_EVENT			0xBEEFFACE
+
 struct mwifiex_dbg {
 	u32 num_cmd_host_to_card_failure;
 	u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -162,6 +174,7 @@
 enum mwifiex_iface_type {
 	MWIFIEX_SDIO,
 	MWIFIEX_PCIE,
+	MWIFIEX_USB
 };
 
 struct mwifiex_add_ba_param {
@@ -201,10 +214,10 @@
 	u32 packets_out[MAX_NUM_TID];
 	/* spin lock to protect ra_list */
 	spinlock_t ra_list_spinlock;
-	struct mwifiex_wmm_ac_status ac_status[IEEE80211_MAX_QUEUES];
-	enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_MAX_QUEUES];
+	struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
+	enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_NUM_ACS];
 	u32 drv_pkt_delay_max;
-	u8 queue_priority[IEEE80211_MAX_QUEUES];
+	u8 queue_priority[IEEE80211_NUM_ACS];
 	u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1];	/* UP: 0 to 7 */
 	/* Number of transmit packets queued */
 	atomic_t tx_pkts_queued;
@@ -260,8 +273,8 @@
 	 * BAND_A(0X04): 'a' band
 	 */
 	u16 bss_band;
-	u64 network_tsf;
-	u8 time_stamp[8];
+	u64 fw_tsf;
+	u64 timestamp;
 	union ieee_types_phy_param_set phy_param_set;
 	union ieee_types_ss_param_set ss_param_set;
 	u16 cap_info_bitmap;
@@ -269,7 +282,7 @@
 	u8  disable_11n;
 	struct ieee80211_ht_cap *bcn_ht_cap;
 	u16 ht_cap_offset;
-	struct ieee80211_ht_info *bcn_ht_info;
+	struct ieee80211_ht_operation *bcn_ht_oper;
 	u16 ht_info_offset;
 	u8 *bcn_bss_co_2040;
 	u16 bss_co_2040_offset;
@@ -407,6 +420,8 @@
 	struct host_cmd_ds_802_11_key_material aes_key;
 	u8 wapi_ie[256];
 	u8 wapi_ie_len;
+	u8 *wps_ie;
+	u8 wps_ie_len;
 	u8 wmm_required;
 	u8 wmm_enabled;
 	u8 wmm_qosinfo;
@@ -448,7 +463,6 @@
 	struct dentry *dfs_dev_dir;
 #endif
 	u8 nick_name[16];
-	u8 qual_level, qual_noise;
 	u16 current_key_index;
 	struct semaphore async_sem;
 	u8 scan_pending_on_block;
@@ -459,6 +473,9 @@
 	u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
 	struct wps wps;
 	u8 scan_block;
+	s32 cqm_rssi_thold;
+	u32 cqm_rssi_hyst;
+	u8 subsc_evt_rssi_state;
 };
 
 enum mwifiex_ba_status {
@@ -518,6 +535,11 @@
 	u8 cmd_wait_q_woken;
 };
 
+struct mwifiex_bss_priv {
+	u8 band;
+	u64 fw_tsf;
+};
+
 struct mwifiex_if_ops {
 	int (*init_if) (struct mwifiex_adapter *);
 	void (*cleanup_if) (struct mwifiex_adapter *);
@@ -537,6 +559,8 @@
 	void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
 	int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
 	int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
+	int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
+	int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
 };
 
 struct mwifiex_adapter {
@@ -599,6 +623,7 @@
 	struct list_head scan_pending_q;
 	/* spin lock for scan_pending_q */
 	spinlock_t scan_pending_q_lock;
+	struct sk_buff_head usb_rx_data_q;
 	u32 scan_processing;
 	u16 region_code;
 	struct mwifiex_802_11d_domain_reg domain_reg;
@@ -651,6 +676,7 @@
 	u8 scan_wait_q_woken;
 	struct cmd_ctrl_node *cmd_queued;
 	spinlock_t queue_lock;		/* lock for tx queues */
+	struct completion fw_load;
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -896,8 +922,6 @@
 int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
 int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
 int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
-int mwifiex_get_signal_info(struct mwifiex_private *priv,
-			    struct mwifiex_ds_get_signal *signal);
 int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
 			      struct mwifiex_rate_cfg *rate);
 int mwifiex_request_scan(struct mwifiex_private *priv,
@@ -950,13 +974,10 @@
 int mwifiex_get_bss_info(struct mwifiex_private *,
 			 struct mwifiex_bss_info *);
 int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
-			      u8 *bssid, s32 rssi, u8 *ie_buf,
-			      size_t ie_len, u16 beacon_period,
-			      u16 cap_info_bitmap, u8 band,
+			      struct cfg80211_bss *bss,
 			      struct mwifiex_bssdescriptor *bss_desc);
 int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
-				struct mwifiex_bssdescriptor *bss_entry,
-				u8 *ie_buf, u32 ie_len);
+				    struct mwifiex_bssdescriptor *bss_entry);
 int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
 					struct mwifiex_bssdescriptor *bss_desc);
 
@@ -965,6 +986,7 @@
 					u32 *flags, struct vif_params *params);
 int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
 
+u8 *mwifiex_11d_code_2_region(u8 code);
 
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 5867fac..13fbc4e 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -119,6 +119,9 @@
 	if (!adapter || !adapter->priv_num)
 		return;
 
+	/* In case driver is removed when asynchronous FW load is in progress */
+	wait_for_completion(&adapter->fw_load);
+
 	if (user_rmmod) {
 #ifdef CONFIG_PM
 		if (adapter->is_suspended)
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index aff9cd7..74f0457 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1048,10 +1048,8 @@
  * This function parses provided beacon buffer and updates
  * respective fields in bss descriptor structure.
  */
-int
-mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
-				struct mwifiex_bssdescriptor *bss_entry,
-				u8 *ie_buf, u32 ie_len)
+int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+				    struct mwifiex_bssdescriptor *bss_entry)
 {
 	int ret = 0;
 	u8 element_id;
@@ -1073,10 +1071,8 @@
 
 	found_data_rate_ie = false;
 	rate_size = 0;
-	current_ptr = ie_buf;
-	bytes_left = ie_len;
-	bss_entry->beacon_buf = ie_buf;
-	bss_entry->beacon_buf_size = ie_len;
+	current_ptr = bss_entry->beacon_buf;
+	bytes_left = bss_entry->beacon_buf_size;
 
 	/* Process variable IE */
 	while (bytes_left >= 2) {
@@ -1221,9 +1217,9 @@
 					sizeof(struct ieee_types_header) -
 					bss_entry->beacon_buf);
 			break;
-		case WLAN_EID_HT_INFORMATION:
-			bss_entry->bcn_ht_info = (struct ieee80211_ht_info *)
-					(current_ptr +
+		case WLAN_EID_HT_OPERATION:
+			bss_entry->bcn_ht_oper =
+				(struct ieee80211_ht_operation *)(current_ptr +
 					sizeof(struct ieee_types_header));
 			bss_entry->ht_info_offset = (u16) (current_ptr +
 					sizeof(struct ieee_types_header) -
@@ -1447,15 +1443,12 @@
 	return ret;
 }
 
-static int
-mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
-			       s32 rssi, const u8 *ie_buf, size_t ie_len,
-			       u16 beacon_period, u16 cap_info_bitmap, u8 band)
+static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
+					  struct cfg80211_bss *bss)
 {
 	struct mwifiex_bssdescriptor *bss_desc;
 	int ret;
 	unsigned long flags;
-	u8 *beacon_ie;
 
 	/* Allocate and fill new bss descriptor */
 	bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
@@ -1465,16 +1458,7 @@
 		return -ENOMEM;
 	}
 
-	beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL);
-	if (!beacon_ie) {
-		kfree(bss_desc);
-		dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
-		return -ENOMEM;
-	}
-
-	ret = mwifiex_fill_new_bss_desc(priv, bssid, rssi, beacon_ie,
-					ie_len, beacon_period,
-					cap_info_bitmap, band, bss_desc);
+	ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
 	if (ret)
 		goto done;
 
@@ -1493,7 +1477,7 @@
 	priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
 	priv->curr_bss_params.bss_descriptor.ht_cap_offset =
 		0;
-	priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL;
+	priv->curr_bss_params.bss_descriptor.bcn_ht_oper = NULL;
 	priv->curr_bss_params.bss_descriptor.ht_info_offset =
 		0;
 	priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 =
@@ -1514,7 +1498,6 @@
 
 done:
 	kfree(bss_desc);
-	kfree(beacon_ie);
 	return 0;
 }
 
@@ -1620,14 +1603,16 @@
 		const u8 *ie_buf;
 		size_t ie_len;
 		u16 channel = 0;
-		u64 network_tsf = 0;
+		u64 fw_tsf = 0;
 		u16 beacon_size = 0;
 		u32 curr_bcn_bytes;
 		u32 freq;
 		u16 beacon_period;
 		u16 cap_info_bitmap;
 		u8 *current_ptr;
+		u64 timestamp;
 		struct mwifiex_bcn_param *bcn_param;
+		struct mwifiex_bss_priv *bss_priv;
 
 		if (bytes_left >= sizeof(beacon_size)) {
 			/* Extract & convert beacon size from command buffer */
@@ -1667,9 +1652,11 @@
 
 		memcpy(bssid, bcn_param->bssid, ETH_ALEN);
 
-		rssi = (s32) (bcn_param->rssi);
-		dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", rssi);
+		rssi = (s32) bcn_param->rssi;
+		rssi = (-rssi) * 100;		/* Convert dBm to mBm */
+		dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
 
+		timestamp = le64_to_cpu(bcn_param->timestamp);
 		beacon_period = le16_to_cpu(bcn_param->beacon_period);
 
 		cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
@@ -1709,14 +1696,13 @@
 
 		/*
 		 * If the TSF TLV was appended to the scan results, save this
-		 * entry's TSF value in the networkTSF field.The networkTSF is
-		 * the firmware's TSF value at the time the beacon or probe
-		 * response was received.
+		 * entry's TSF value in the fw_tsf field. It is the firmware's
+		 * TSF value at the time the beacon or probe response was
+		 * received.
 		 */
 		if (tsf_tlv)
-			memcpy(&network_tsf,
-			       &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
-			       sizeof(network_tsf));
+			memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
+			       sizeof(fw_tsf));
 
 		if (channel) {
 			struct ieee80211_channel *chan;
@@ -1739,21 +1725,19 @@
 
 			if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
 				bss = cfg80211_inform_bss(priv->wdev->wiphy,
-					      chan, bssid, network_tsf,
+					      chan, bssid, timestamp,
 					      cap_info_bitmap, beacon_period,
 					      ie_buf, ie_len, rssi, GFP_KERNEL);
-				*(u8 *)bss->priv = band;
-				cfg80211_put_bss(bss);
-
+				bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+				bss_priv->band = band;
+				bss_priv->fw_tsf = fw_tsf;
 				if (priv->media_connected &&
 				    !memcmp(bssid,
 					    priv->curr_bss_params.bss_descriptor
 					    .mac_address, ETH_ALEN))
-					mwifiex_update_curr_bss_params
-							(priv, bssid, rssi,
-							 ie_buf, ie_len,
-							 beacon_period,
-							 cap_info_bitmap, band);
+					mwifiex_update_curr_bss_params(priv,
+								       bss);
+				cfg80211_put_bss(bss);
 			}
 		} else {
 			dev_dbg(adapter->dev, "missing BSS channel IE\n");
@@ -2019,8 +2003,8 @@
 			(curr_bss->beacon_buf +
 			 curr_bss->ht_cap_offset);
 
-	if (curr_bss->bcn_ht_info)
-		curr_bss->bcn_ht_info = (struct ieee80211_ht_info *)
+	if (curr_bss->bcn_ht_oper)
+		curr_bss->bcn_ht_oper = (struct ieee80211_ht_operation *)
 			(curr_bss->beacon_buf +
 			 curr_bss->ht_info_offset);
 
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index f8012e2..e037747 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -123,6 +123,9 @@
 	if (!adapter || !adapter->priv_num)
 		return;
 
+	/* In case driver is removed when asynchronous FW load is in progress */
+	wait_for_completion(&adapter->fw_load);
+
 	if (user_rmmod) {
 		if (adapter->is_suspended)
 			mwifiex_sdio_resume(adapter->dev);
@@ -250,6 +253,8 @@
 	return 0;
 }
 
+/* Device ID for SD8786 */
+#define SDIO_DEVICE_ID_MARVELL_8786   (0x9116)
 /* Device ID for SD8787 */
 #define SDIO_DEVICE_ID_MARVELL_8787   (0x9119)
 /* Device ID for SD8797 */
@@ -257,6 +262,7 @@
 
 /* WLAN IDs */
 static const struct sdio_device_id mwifiex_ids[] = {
+	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786)},
 	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
 	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
 	{},
@@ -1596,6 +1602,9 @@
 	adapter->dev = &func->dev;
 
 	switch (func->device) {
+	case SDIO_DEVICE_ID_MARVELL_8786:
+		strcpy(adapter->fw_name, SD8786_DEFAULT_FW_NAME);
+		break;
 	case SDIO_DEVICE_ID_MARVELL_8797:
 		strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
 		break;
@@ -1804,5 +1813,6 @@
 MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
 MODULE_VERSION(SDIO_VERSION);
 MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index a3fb322..2103373 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -28,6 +28,7 @@
 
 #include "main.h"
 
+#define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin"
 #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
 #define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
 
@@ -193,7 +194,7 @@
 		a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT -	\
 						a->mp_end_port)));	\
 	a->mpa_tx.pkt_cnt++;						\
-} while (0);
+} while (0)
 
 /* SDIO Tx aggregation limit ? */
 #define MP_TX_AGGR_PKT_LIMIT_REACHED(a)					\
@@ -211,7 +212,7 @@
 	a->mpa_tx.buf_len = 0;						\
 	a->mpa_tx.ports = 0;						\
 	a->mpa_tx.start_port = 0;					\
-} while (0);
+} while (0)
 
 /* SDIO Rx aggregation limit ? */
 #define MP_RX_AGGR_PKT_LIMIT_REACHED(a)					\
@@ -242,7 +243,7 @@
 	a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb;			\
 	a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len;		\
 	a->mpa_rx.pkt_cnt++;						\
-} while (0);
+} while (0)
 
 /* Reset SDIO Rx aggregation buffer parameters */
 #define MP_RX_AGGR_BUF_RESET(a) do {					\
@@ -250,7 +251,7 @@
 	a->mpa_rx.buf_len = 0;						\
 	a->mpa_rx.ports = 0;						\
 	a->mpa_rx.start_port = 0;					\
-} while (0);
+} while (0)
 
 
 /* data structure for SDIO MPA TX */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 6c8e459..87ed2a1 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -907,6 +907,101 @@
 }
 
 /*
+ * This function prepares command for event subscription, configuration
+ * and query. Events can be subscribed or unsubscribed. Current subscribed
+ * events can be queried. Also, current subscribed events are reported in
+ * every FW response.
+ */
+static int
+mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
+			     struct host_cmd_ds_command *cmd,
+			     struct mwifiex_ds_misc_subsc_evt *subsc_evt_cfg)
+{
+	struct host_cmd_ds_802_11_subsc_evt *subsc_evt = &cmd->params.subsc_evt;
+	struct mwifiex_ie_types_rssi_threshold *rssi_tlv;
+	u16 event_bitmap;
+	u8 *pos;
+
+	cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SUBSCRIBE_EVENT);
+	cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_subsc_evt) +
+				S_DS_GEN);
+
+	subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action);
+	dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action);
+
+	/*For query requests, no configuration TLV structures are to be added.*/
+	if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET)
+		return 0;
+
+	subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events);
+
+	event_bitmap = subsc_evt_cfg->events;
+	dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n",
+		event_bitmap);
+
+	if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) ||
+	     (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) &&
+	    (event_bitmap == 0)) {
+		dev_dbg(priv->adapter->dev, "Error: No event specified "
+			"for bitwise action type\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Append TLV structures for each of the specified events for
+	 * subscribing or re-configuring. This is not required for
+	 * bitwise unsubscribing request.
+	 */
+	if (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR)
+		return 0;
+
+	pos = ((u8 *)subsc_evt) +
+			sizeof(struct host_cmd_ds_802_11_subsc_evt);
+
+	if (event_bitmap & BITMASK_BCN_RSSI_LOW) {
+		rssi_tlv = (struct mwifiex_ie_types_rssi_threshold *) pos;
+
+		rssi_tlv->header.type = cpu_to_le16(TLV_TYPE_RSSI_LOW);
+		rssi_tlv->header.len =
+		    cpu_to_le16(sizeof(struct mwifiex_ie_types_rssi_threshold) -
+				sizeof(struct mwifiex_ie_types_header));
+		rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value;
+		rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq;
+
+		dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, "
+			"RSSI:-%d dBm, Freq:%d\n",
+			subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
+			subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
+
+		pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
+		le16_add_cpu(&cmd->size,
+			     sizeof(struct mwifiex_ie_types_rssi_threshold));
+	}
+
+	if (event_bitmap & BITMASK_BCN_RSSI_HIGH) {
+		rssi_tlv = (struct mwifiex_ie_types_rssi_threshold *) pos;
+
+		rssi_tlv->header.type = cpu_to_le16(TLV_TYPE_RSSI_HIGH);
+		rssi_tlv->header.len =
+		    cpu_to_le16(sizeof(struct mwifiex_ie_types_rssi_threshold) -
+				sizeof(struct mwifiex_ie_types_header));
+		rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value;
+		rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq;
+
+		dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, "
+			"RSSI:-%d dBm, Freq:%d\n",
+			subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
+			subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
+
+		pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
+		le16_add_cpu(&cmd->size,
+			     sizeof(struct mwifiex_ie_types_rssi_threshold));
+	}
+
+	return 0;
+}
+
+/*
  * This function prepares the commands before sending them to the firmware.
  *
  * This is a generic function which calls specific command preparation
@@ -1086,6 +1181,9 @@
 	case HostCmd_CMD_PCIE_DESC_DETAILS:
 		ret = mwifiex_cmd_pcie_host_spec(priv, cmd_ptr, cmd_action);
 		break;
+	case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
+		ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf);
+		break;
 	default:
 		dev_err(priv->adapter->dev,
 			"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1195,7 +1293,7 @@
 	if (ret)
 		return -1;
 
-	if (first_sta) {
+	if (first_sta && (priv->adapter->iface_type != MWIFIEX_USB)) {
 		/* Enable auto deep sleep */
 		auto_ds.auto_ds = DEEP_SLEEP_ON;
 		auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 4da19ed..3aa5424 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -119,11 +119,11 @@
  * calculated SNR values.
  */
 static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
-					struct host_cmd_ds_command *resp,
-					struct mwifiex_ds_get_signal *signal)
+					struct host_cmd_ds_command *resp)
 {
 	struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
 						&resp->params.rssi_info_rsp;
+	struct mwifiex_ds_misc_subsc_evt subsc_evt;
 
 	priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
 	priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -137,34 +137,29 @@
 	priv->bcn_rssi_avg = le16_to_cpu(rssi_info_rsp->bcn_rssi_avg);
 	priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg);
 
-	/* Need to indicate IOCTL complete */
-	if (signal) {
-		memset(signal, 0, sizeof(*signal));
+	if (priv->subsc_evt_rssi_state == EVENT_HANDLED)
+		return 0;
 
-		signal->selector = ALL_RSSI_INFO_MASK;
-
-		/* RSSI */
-		signal->bcn_rssi_last = priv->bcn_rssi_last;
-		signal->bcn_rssi_avg = priv->bcn_rssi_avg;
-		signal->data_rssi_last = priv->data_rssi_last;
-		signal->data_rssi_avg = priv->data_rssi_avg;
-
-		/* SNR */
-		signal->bcn_snr_last =
-			CAL_SNR(priv->bcn_rssi_last, priv->bcn_nf_last);
-		signal->bcn_snr_avg =
-			CAL_SNR(priv->bcn_rssi_avg, priv->bcn_nf_avg);
-		signal->data_snr_last =
-			CAL_SNR(priv->data_rssi_last, priv->data_nf_last);
-		signal->data_snr_avg =
-			CAL_SNR(priv->data_rssi_avg, priv->data_nf_avg);
-
-		/* NF */
-		signal->bcn_nf_last = priv->bcn_nf_last;
-		signal->bcn_nf_avg = priv->bcn_nf_avg;
-		signal->data_nf_last = priv->data_nf_last;
-		signal->data_nf_avg = priv->data_nf_avg;
+	/* Resubscribe low and high rssi events with new thresholds */
+	memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
+	subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
+	subsc_evt.action = HostCmd_ACT_BITWISE_SET;
+	if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) {
+		subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
+				priv->cqm_rssi_hyst);
+		subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
+	} else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) {
+		subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
+		subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
+				priv->cqm_rssi_hyst);
 	}
+	subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
+	subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
+
+	priv->subsc_evt_rssi_state = EVENT_HANDLED;
+
+	mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+			       0, 0, &subsc_evt);
 
 	return 0;
 }
@@ -785,6 +780,28 @@
 }
 
 /*
+ * This function handles the command response for subscribe event command.
+ */
+static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
+				 struct host_cmd_ds_command *resp,
+				 struct mwifiex_ds_misc_subsc_evt *sub_event)
+{
+	struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
+		(struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt;
+
+	/* For every subscribe event command (Get/Set/Clear), FW reports the
+	 * current set of subscribed events*/
+	dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
+		le16_to_cpu(cmd_sub_event->events));
+
+	/*Return the subscribed event info for a Get request*/
+	if (sub_event)
+		sub_event->events = le16_to_cpu(cmd_sub_event->events);
+
+	return 0;
+}
+
+/*
  * This function handles the command responses.
  *
  * This is a generic function, which calls command specific
@@ -853,7 +870,7 @@
 		ret = mwifiex_ret_get_log(priv, resp, data_buf);
 		break;
 	case HostCmd_CMD_RSSI_INFO:
-		ret = mwifiex_ret_802_11_rssi_info(priv, resp, data_buf);
+		ret = mwifiex_ret_802_11_rssi_info(priv, resp);
 		break;
 	case HostCmd_CMD_802_11_SNMP_MIB:
 		ret = mwifiex_ret_802_11_snmp_mib(priv, resp, data_buf);
@@ -924,6 +941,9 @@
 		break;
 	case HostCmd_CMD_PCIE_DESC_DETAILS:
 		break;
+	case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
+		ret = mwifiex_ret_subsc_evt(priv, resp, data_buf);
+		break;
 	default:
 		dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
 			resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index cc531b5..f6bbb93 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -128,9 +128,6 @@
 		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 	if (netif_carrier_ok(priv->netdev))
 		netif_carrier_off(priv->netdev);
-	/* Reset wireless stats signal info */
-	priv->qual_level = 0;
-	priv->qual_noise = 0;
 }
 
 /*
@@ -317,6 +314,12 @@
 		break;
 
 	case EVENT_RSSI_LOW:
+		cfg80211_cqm_rssi_notify(priv->netdev,
+					 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+					 GFP_KERNEL);
+		mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
+				       HostCmd_ACT_GEN_GET, 0, NULL);
+		priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
 		dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
 		break;
 	case EVENT_SNR_LOW:
@@ -326,6 +329,12 @@
 		dev_dbg(adapter->dev, "event: MAX_FAIL\n");
 		break;
 	case EVENT_RSSI_HIGH:
+		cfg80211_cqm_rssi_notify(priv->netdev,
+					 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+					 GFP_KERNEL);
+		mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
+				       HostCmd_ACT_GEN_GET, 0, NULL);
+		priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
 		dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
 		break;
 	case EVENT_SNR_HIGH:
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d7b11de..58970e0 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -155,20 +155,29 @@
  * information.
  */
 int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
-			      u8 *bssid, s32 rssi, u8 *ie_buf,
-			      size_t ie_len, u16 beacon_period,
-			      u16 cap_info_bitmap, u8 band,
+			      struct cfg80211_bss *bss,
 			      struct mwifiex_bssdescriptor *bss_desc)
 {
 	int ret;
+	u8 *beacon_ie;
+	struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
 
-	memcpy(bss_desc->mac_address, bssid, ETH_ALEN);
-	bss_desc->rssi = rssi;
-	bss_desc->beacon_buf = ie_buf;
-	bss_desc->beacon_buf_size = ie_len;
-	bss_desc->beacon_period = beacon_period;
-	bss_desc->cap_info_bitmap = cap_info_bitmap;
-	bss_desc->bss_band = band;
+	beacon_ie = kmemdup(bss->information_elements, bss->len_beacon_ies,
+			    GFP_KERNEL);
+	if (!beacon_ie) {
+		dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
+		return -ENOMEM;
+	}
+
+	memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN);
+	bss_desc->rssi = bss->signal;
+	bss_desc->beacon_buf = beacon_ie;
+	bss_desc->beacon_buf_size = bss->len_beacon_ies;
+	bss_desc->beacon_period = bss->beacon_interval;
+	bss_desc->cap_info_bitmap = bss->capability;
+	bss_desc->bss_band = bss_priv->band;
+	bss_desc->fw_tsf = bss_priv->fw_tsf;
+	bss_desc->timestamp = bss->tsf;
 	if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
 		dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
 		bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
@@ -180,9 +189,9 @@
 	else
 		bss_desc->bss_mode = NL80211_IFTYPE_STATION;
 
-	ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc,
-					      ie_buf, ie_len);
+	ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
 
+	kfree(beacon_ie);
 	return ret;
 }
 
@@ -197,7 +206,6 @@
 	int ret;
 	struct mwifiex_adapter *adapter = priv->adapter;
 	struct mwifiex_bssdescriptor *bss_desc = NULL;
-	u8 *beacon_ie = NULL;
 
 	priv->scan_block = false;
 
@@ -210,19 +218,7 @@
 			return -ENOMEM;
 		}
 
-		beacon_ie = kmemdup(bss->information_elements,
-					bss->len_beacon_ies, GFP_KERNEL);
-		if (!beacon_ie) {
-			kfree(bss_desc);
-			dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
-			return -ENOMEM;
-		}
-
-		ret = mwifiex_fill_new_bss_desc(priv, bss->bssid, bss->signal,
-						beacon_ie, bss->len_beacon_ies,
-						bss->beacon_interval,
-						bss->capability,
-						*(u8 *)bss->priv, bss_desc);
+		ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
 		if (ret)
 			goto done;
 	}
@@ -269,7 +265,6 @@
 		    (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
 				       ssid, &bss_desc->ssid))) {
 			kfree(bss_desc);
-			kfree(beacon_ie);
 			return 0;
 		}
 
@@ -304,7 +299,6 @@
 
 done:
 	kfree(bss_desc);
-	kfree(beacon_ie);
 	return ret;
 }
 
@@ -468,7 +462,8 @@
 
 	info->bss_chan = bss_desc->channel;
 
-	info->region_code = adapter->region_code;
+	memcpy(info->country_code, priv->country_code,
+	       IEEE80211_COUNTRY_STRING_LEN);
 
 	info->media_connected = priv->media_connected;
 
@@ -996,6 +991,39 @@
 }
 
 /*
+ * IOCTL request handler to set/reset WPS IE.
+ *
+ * The supplied WPS IE is treated as a opaque buffer. Only the first field
+ * is checked to internally enable WPS. If buffer length is zero, the existing
+ * WPS IE is reset.
+ */
+static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
+			       u8 *ie_data_ptr, u16 ie_len)
+{
+	if (ie_len) {
+		priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
+		if (!priv->wps_ie)
+			return -ENOMEM;
+		if (ie_len > sizeof(priv->wps_ie)) {
+			dev_dbg(priv->adapter->dev,
+				"info: failed to copy WPS IE, too big\n");
+			kfree(priv->wps_ie);
+			return -1;
+		}
+		memcpy(priv->wps_ie, ie_data_ptr, ie_len);
+		priv->wps_ie_len = ie_len;
+		dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
+			priv->wps_ie_len, priv->wps_ie[0]);
+	} else {
+		kfree(priv->wps_ie);
+		priv->wps_ie_len = ie_len;
+		dev_dbg(priv->adapter->dev,
+			"info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
+	}
+	return 0;
+}
+
+/*
  * IOCTL request handler to set WAPI key.
  *
  * This function prepares the correct firmware command and
@@ -1185,39 +1213,6 @@
 }
 
 /*
- * Sends IOCTL request to get signal information.
- *
- * This function allocates the IOCTL request buffer, fills it
- * with requisite parameters and calls the IOCTL handler.
- */
-int mwifiex_get_signal_info(struct mwifiex_private *priv,
-			    struct mwifiex_ds_get_signal *signal)
-{
-	int status;
-
-	signal->selector = ALL_RSSI_INFO_MASK;
-
-	/* Signal info can be obtained only if connected */
-	if (!priv->media_connected) {
-		dev_dbg(priv->adapter->dev,
-			"info: Can not get signal in disconnected state\n");
-		return -1;
-	}
-
-	status = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
-				       HostCmd_ACT_GEN_GET, 0, signal);
-
-	if (!status) {
-		if (signal->selector & BCN_RSSI_AVG_MASK)
-			priv->qual_level = signal->bcn_rssi_avg;
-		if (signal->selector & BCN_NF_AVG_MASK)
-			priv->qual_noise = signal->bcn_nf_avg;
-	}
-
-	return status;
-}
-
-/*
  * Sends IOCTL request to set encoding parameters.
  *
  * This function allocates the IOCTL request buffer, fills it
@@ -1441,6 +1436,7 @@
 			priv->wps.session_enable = true;
 			dev_dbg(priv->adapter->dev,
 				"info: WPS Session Enabled.\n");
+			ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len);
 		}
 
 		/* Append the passed data to the end of the
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 750b695..02ce3b7 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -145,7 +145,12 @@
 			" rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
 		       local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
 		priv->stats.rx_dropped++;
-		dev_kfree_skb_any(skb);
+
+		if (adapter->if_ops.data_complete)
+			adapter->if_ops.data_complete(adapter, skb);
+		else
+			dev_kfree_skb_any(skb);
+
 		return ret;
 	}
 
@@ -196,8 +201,12 @@
 					     (u8) local_rx_pd->rx_pkt_type,
 					     skb);
 
-	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
-		dev_kfree_skb_any(skb);
+	if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
+		if (adapter->if_ops.data_complete)
+			adapter->if_ops.data_complete(adapter, skb);
+		else
+			dev_kfree_skb_any(skb);
+	}
 
 	if (ret)
 		priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 7af534f..0a046d3 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -149,10 +149,14 @@
 	local_tx_pd->bss_num = priv->bss_num;
 	local_tx_pd->bss_type = priv->bss_type;
 
-	skb_push(skb, INTF_HEADER_LEN);
-
-	ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
-					   skb, NULL);
+	if (adapter->iface_type == MWIFIEX_USB) {
+		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
+						   skb, NULL);
+	} else {
+		skb_push(skb, INTF_HEADER_LEN);
+		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
+						   skb, NULL);
+	}
 	switch (ret) {
 	case -EBUSY:
 		adapter->data_sent = true;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index d2af8cb..e2faec4 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -77,12 +77,23 @@
 		if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
 			local_tx_pd =
 				(struct txpd *) (head_ptr + INTF_HEADER_LEN);
-
-		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
-						   skb, tx_param);
+		if (adapter->iface_type == MWIFIEX_USB) {
+			adapter->data_sent = true;
+			skb_pull(skb, INTF_HEADER_LEN);
+			ret = adapter->if_ops.host_to_card(adapter,
+							   MWIFIEX_USB_EP_DATA,
+							   skb, NULL);
+		} else {
+			ret = adapter->if_ops.host_to_card(adapter,
+							   MWIFIEX_TYPE_DATA,
+							   skb, tx_param);
+		}
 	}
 
 	switch (ret) {
+	case -ENOSR:
+		dev_err(adapter->dev, "data: -ENOSR is returned\n");
+		break;
 	case -EBUSY:
 		if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
 		    (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) {
@@ -135,6 +146,9 @@
 	if (!priv)
 		goto done;
 
+	if (adapter->iface_type == MWIFIEX_USB)
+		adapter->data_sent = false;
+
 	mwifiex_set_trans_start(priv->netdev);
 	if (!status) {
 		priv->stats.tx_packets++;
@@ -162,4 +176,5 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(mwifiex_write_data_complete);
 
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
new file mode 100644
index 0000000..49ebf20
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -0,0 +1,1052 @@
+/*
+ * Marvell Wireless LAN device driver: USB specific handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+#include "usb.h"
+
+#define USB_VERSION	"1.0"
+
+static const char usbdriver_name[] = "usb8797";
+
+static u8 user_rmmod;
+static struct mwifiex_if_ops usb_ops;
+static struct semaphore add_remove_card_sem;
+
+static struct usb_device_id mwifiex_usb_table[] = {
+	{USB_DEVICE(USB8797_VID, USB8797_PID_1)},
+	{USB_DEVICE_AND_INTERFACE_INFO(USB8797_VID, USB8797_PID_2,
+				       USB_CLASS_VENDOR_SPEC,
+				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+	{ }	/* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, mwifiex_usb_table);
+
+static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size);
+
+/* This function handles received packet. Necessary action is taken based on
+ * cmd/event/data.
+ */
+static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
+			    struct sk_buff *skb, u8 ep)
+{
+	struct device *dev = adapter->dev;
+	u32 recv_type;
+	__le32 tmp;
+
+	if (adapter->hs_activated)
+		mwifiex_process_hs_config(adapter);
+
+	if (skb->len < INTF_HEADER_LEN) {
+		dev_err(dev, "%s: invalid skb->len\n", __func__);
+		return -1;
+	}
+
+	switch (ep) {
+	case MWIFIEX_USB_EP_CMD_EVENT:
+		dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
+		skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
+		recv_type = le32_to_cpu(tmp);
+		skb_pull(skb, INTF_HEADER_LEN);
+
+		switch (recv_type) {
+		case MWIFIEX_USB_TYPE_CMD:
+			if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
+				dev_err(dev, "CMD: skb->len too large\n");
+				return -1;
+			} else if (!adapter->curr_cmd) {
+				dev_dbg(dev, "CMD: no curr_cmd\n");
+				if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
+					mwifiex_process_sleep_confirm_resp(
+							adapter, skb->data,
+							skb->len);
+					return 0;
+				}
+				return -1;
+			}
+
+			adapter->curr_cmd->resp_skb = skb;
+			adapter->cmd_resp_received = true;
+			break;
+		case MWIFIEX_USB_TYPE_EVENT:
+			if (skb->len < sizeof(u32)) {
+				dev_err(dev, "EVENT: skb->len too small\n");
+				return -1;
+			}
+			skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
+			adapter->event_cause = le32_to_cpu(tmp);
+			skb_pull(skb, sizeof(u32));
+			dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
+
+			if (skb->len > MAX_EVENT_SIZE) {
+				dev_err(dev, "EVENT: event body too large\n");
+				return -1;
+			}
+
+			skb_copy_from_linear_data(skb, adapter->event_body,
+						  skb->len);
+			adapter->event_received = true;
+			adapter->event_skb = skb;
+			break;
+		default:
+			dev_err(dev, "unknown recv_type %#x\n", recv_type);
+			return -1;
+		}
+		break;
+	case MWIFIEX_USB_EP_DATA:
+		dev_dbg(dev, "%s: EP_DATA\n", __func__);
+		if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
+			dev_err(dev, "DATA: skb->len too large\n");
+			return -1;
+		}
+		skb_queue_tail(&adapter->usb_rx_data_q, skb);
+		adapter->data_received = true;
+		break;
+	default:
+		dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
+		return -1;
+	}
+
+	return -EINPROGRESS;
+}
+
+static void mwifiex_usb_rx_complete(struct urb *urb)
+{
+	struct urb_context *context = (struct urb_context *)urb->context;
+	struct mwifiex_adapter *adapter = context->adapter;
+	struct sk_buff *skb = context->skb;
+	struct usb_card_rec *card;
+	int recv_length = urb->actual_length;
+	int size, status;
+
+	if (!adapter || !adapter->card) {
+		pr_err("mwifiex adapter or card structure is not valid\n");
+		return;
+	}
+
+	card = (struct usb_card_rec *)adapter->card;
+	if (card->rx_cmd_ep == context->ep)
+		atomic_dec(&card->rx_cmd_urb_pending);
+	else
+		atomic_dec(&card->rx_data_urb_pending);
+
+	if (recv_length) {
+		if (urb->status || (adapter->surprise_removed)) {
+			dev_err(adapter->dev,
+				"URB status is failed: %d\n", urb->status);
+			/* Do not free skb in case of command ep */
+			if (card->rx_cmd_ep != context->ep)
+				dev_kfree_skb_any(skb);
+			goto setup_for_next;
+		}
+		if (skb->len > recv_length)
+			skb_trim(skb, recv_length);
+		else
+			skb_put(skb, recv_length - skb->len);
+
+		atomic_inc(&adapter->rx_pending);
+		status = mwifiex_usb_recv(adapter, skb, context->ep);
+
+		dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
+			recv_length, status);
+		if (status == -EINPROGRESS) {
+			queue_work(adapter->workqueue, &adapter->main_work);
+
+			/* urb for data_ep is re-submitted now;
+			 * urb for cmd_ep will be re-submitted in callback
+			 * mwifiex_usb_recv_complete
+			 */
+			if (card->rx_cmd_ep == context->ep)
+				return;
+		} else {
+			atomic_dec(&adapter->rx_pending);
+			if (status == -1)
+				dev_err(adapter->dev,
+					"received data processing failed!\n");
+
+			/* Do not free skb in case of command ep */
+			if (card->rx_cmd_ep != context->ep)
+				dev_kfree_skb_any(skb);
+		}
+	} else if (urb->status) {
+		if (!adapter->is_suspended) {
+			dev_warn(adapter->dev,
+				 "Card is removed: %d\n", urb->status);
+			adapter->surprise_removed = true;
+		}
+		dev_kfree_skb_any(skb);
+		return;
+	} else {
+		/* Do not free skb in case of command ep */
+		if (card->rx_cmd_ep != context->ep)
+			dev_kfree_skb_any(skb);
+
+		/* fall through setup_for_next */
+	}
+
+setup_for_next:
+	if (card->rx_cmd_ep == context->ep)
+		size = MWIFIEX_RX_CMD_BUF_SIZE;
+	else
+		size = MWIFIEX_RX_DATA_BUF_SIZE;
+
+	mwifiex_usb_submit_rx_urb(context, size);
+
+	return;
+}
+
+static void mwifiex_usb_tx_complete(struct urb *urb)
+{
+	struct urb_context *context = (struct urb_context *)(urb->context);
+	struct mwifiex_adapter *adapter = context->adapter;
+	struct usb_card_rec *card = adapter->card;
+
+	dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status);
+
+	if (context->ep == card->tx_cmd_ep) {
+		dev_dbg(adapter->dev, "%s: CMD\n", __func__);
+		atomic_dec(&card->tx_cmd_urb_pending);
+		adapter->cmd_sent = false;
+	} else {
+		dev_dbg(adapter->dev, "%s: DATA\n", __func__);
+		atomic_dec(&card->tx_data_urb_pending);
+		mwifiex_write_data_complete(adapter, context->skb,
+					    urb->status ? -1 : 0);
+	}
+
+	queue_work(adapter->workqueue, &adapter->main_work);
+
+	return;
+}
+
+static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
+{
+	struct mwifiex_adapter *adapter = ctx->adapter;
+	struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+
+	if (card->rx_cmd_ep != ctx->ep) {
+		ctx->skb = dev_alloc_skb(size);
+		if (!ctx->skb) {
+			dev_err(adapter->dev,
+				"%s: dev_alloc_skb failed\n", __func__);
+			return -ENOMEM;
+		}
+	}
+
+	usb_fill_bulk_urb(ctx->urb, card->udev,
+			  usb_rcvbulkpipe(card->udev, ctx->ep), ctx->skb->data,
+			  size, mwifiex_usb_rx_complete, (void *)ctx);
+
+	if (card->rx_cmd_ep == ctx->ep)
+		atomic_inc(&card->rx_cmd_urb_pending);
+	else
+		atomic_inc(&card->rx_data_urb_pending);
+
+	if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) {
+		dev_err(adapter->dev, "usb_submit_urb failed\n");
+		dev_kfree_skb_any(ctx->skb);
+		ctx->skb = NULL;
+
+		if (card->rx_cmd_ep == ctx->ep)
+			atomic_dec(&card->rx_cmd_urb_pending);
+		else
+			atomic_dec(&card->rx_data_urb_pending);
+
+		return -1;
+	}
+
+	return 0;
+}
+
+static void mwifiex_usb_free(struct usb_card_rec *card)
+{
+	int i;
+
+	if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
+		usb_kill_urb(card->rx_cmd.urb);
+
+	usb_free_urb(card->rx_cmd.urb);
+	card->rx_cmd.urb = NULL;
+
+	if (atomic_read(&card->rx_data_urb_pending))
+		for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
+			if (card->rx_data_list[i].urb)
+				usb_kill_urb(card->rx_data_list[i].urb);
+
+	for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
+		usb_free_urb(card->rx_data_list[i].urb);
+		card->rx_data_list[i].urb = NULL;
+	}
+
+	for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) {
+		usb_free_urb(card->tx_data_list[i].urb);
+		card->tx_data_list[i].urb = NULL;
+	}
+
+	usb_free_urb(card->tx_cmd.urb);
+	card->tx_cmd.urb = NULL;
+
+	return;
+}
+
+/* This function probes an mwifiex device and registers it. It allocates
+ * the card structure, initiates the device registration and initialization
+ * procedure by adding a logical interface.
+ */
+static int mwifiex_usb_probe(struct usb_interface *intf,
+			     const struct usb_device_id *id)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct usb_host_interface *iface_desc = intf->cur_altsetting;
+	struct usb_endpoint_descriptor *epd;
+	int ret, i;
+	struct usb_card_rec *card;
+	u16 id_vendor, id_product, bcd_device, bcd_usb;
+
+	card = kzalloc(sizeof(struct usb_card_rec), GFP_KERNEL);
+	if (!card)
+		return -ENOMEM;
+
+	id_vendor = le16_to_cpu(udev->descriptor.idVendor);
+	id_product = le16_to_cpu(udev->descriptor.idProduct);
+	bcd_device = le16_to_cpu(udev->descriptor.bcdDevice);
+	bcd_usb = le16_to_cpu(udev->descriptor.bcdUSB);
+	pr_debug("info: VID/PID = %X/%X, Boot2 version = %X\n",
+		 id_vendor, id_product, bcd_device);
+
+	/* PID_1 is used for firmware downloading only */
+	if (id_product == USB8797_PID_1)
+		card->usb_boot_state = USB8797_FW_DNLD;
+	else
+		card->usb_boot_state = USB8797_FW_READY;
+
+	card->udev = udev;
+	card->intf = intf;
+
+	pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocl=%#x\n",
+		 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
+		 udev->descriptor.bDeviceSubClass,
+		 udev->descriptor.bDeviceProtocol);
+
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+		epd = &iface_desc->endpoint[i].desc;
+		if (usb_endpoint_dir_in(epd) &&
+		    usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
+		    usb_endpoint_xfer_bulk(epd)) {
+			pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n",
+				 le16_to_cpu(epd->wMaxPacketSize),
+				 epd->bEndpointAddress);
+			card->rx_cmd_ep = usb_endpoint_num(epd);
+			atomic_set(&card->rx_cmd_urb_pending, 0);
+		}
+		if (usb_endpoint_dir_in(epd) &&
+		    usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA &&
+		    usb_endpoint_xfer_bulk(epd)) {
+			pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n",
+				 le16_to_cpu(epd->wMaxPacketSize),
+				 epd->bEndpointAddress);
+			card->rx_data_ep = usb_endpoint_num(epd);
+			atomic_set(&card->rx_data_urb_pending, 0);
+		}
+		if (usb_endpoint_dir_out(epd) &&
+		    usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA &&
+		    usb_endpoint_xfer_bulk(epd)) {
+			pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
+				 le16_to_cpu(epd->wMaxPacketSize),
+				 epd->bEndpointAddress);
+			card->tx_data_ep = usb_endpoint_num(epd);
+			atomic_set(&card->tx_data_urb_pending, 0);
+		}
+		if (usb_endpoint_dir_out(epd) &&
+		    usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
+		    usb_endpoint_xfer_bulk(epd)) {
+			pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
+				 le16_to_cpu(epd->wMaxPacketSize),
+				 epd->bEndpointAddress);
+			card->tx_cmd_ep = usb_endpoint_num(epd);
+			atomic_set(&card->tx_cmd_urb_pending, 0);
+			card->bulk_out_maxpktsize =
+					le16_to_cpu(epd->wMaxPacketSize);
+		}
+	}
+
+	usb_set_intfdata(intf, card);
+
+	ret = mwifiex_add_card(card, &add_remove_card_sem, &usb_ops,
+			       MWIFIEX_USB);
+	if (ret) {
+		pr_err("%s: mwifiex_add_card failed: %d\n", __func__, ret);
+		usb_reset_device(udev);
+		kfree(card);
+		return ret;
+	}
+
+	usb_get_dev(udev);
+
+	return 0;
+}
+
+/* Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not suspended, this function allocates and sends a
+ * 'host sleep activate' request to the firmware and turns off the traffic.
+ */
+static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+	struct usb_card_rec *card = usb_get_intfdata(intf);
+	struct mwifiex_adapter *adapter;
+	int i;
+
+	if (!card || !card->adapter) {
+		pr_err("%s: card or card->adapter is NULL\n", __func__);
+		return 0;
+	}
+	adapter = card->adapter;
+
+	if (unlikely(adapter->is_suspended))
+		dev_warn(adapter->dev, "Device already suspended\n");
+
+	mwifiex_enable_hs(adapter);
+
+	/* 'is_suspended' flag indicates device is suspended.
+	 * It must be set here before the usb_kill_urb() calls. Reason
+	 * is in the complete handlers, urb->status(= -ENOENT) and
+	 * this flag is used in combination to distinguish between a
+	 * 'suspended' state and a 'disconnect' one.
+	 */
+	adapter->is_suspended = true;
+
+	for (i = 0; i < adapter->priv_num; i++)
+		netif_carrier_off(adapter->priv[i]->netdev);
+
+	if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
+		usb_kill_urb(card->rx_cmd.urb);
+
+	if (atomic_read(&card->rx_data_urb_pending))
+		for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
+			if (card->rx_data_list[i].urb)
+				usb_kill_urb(card->rx_data_list[i].urb);
+
+	for (i = 0; i < MWIFIEX_TX_DATA_URB; i++)
+		if (card->tx_data_list[i].urb)
+			usb_kill_urb(card->tx_data_list[i].urb);
+
+	if (card->tx_cmd.urb)
+		usb_kill_urb(card->tx_cmd.urb);
+
+	return 0;
+}
+
+/* Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not resumed, this function turns on the traffic and
+ * sends a 'host sleep cancel' request to the firmware.
+ */
+static int mwifiex_usb_resume(struct usb_interface *intf)
+{
+	struct usb_card_rec *card = usb_get_intfdata(intf);
+	struct mwifiex_adapter *adapter;
+	int i;
+
+	if (!card || !card->adapter) {
+		pr_err("%s: card or card->adapter is NULL\n", __func__);
+		return 0;
+	}
+	adapter = card->adapter;
+
+	if (unlikely(!adapter->is_suspended)) {
+		dev_warn(adapter->dev, "Device already resumed\n");
+		return 0;
+	}
+
+	/* Indicate device resumed. The netdev queue will be resumed only
+	 * after the urbs have been re-submitted
+	 */
+	adapter->is_suspended = false;
+
+	if (!atomic_read(&card->rx_data_urb_pending))
+		for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
+			mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
+						  MWIFIEX_RX_DATA_BUF_SIZE);
+
+	if (!atomic_read(&card->rx_cmd_urb_pending)) {
+		card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
+		if (card->rx_cmd.skb)
+			mwifiex_usb_submit_rx_urb(&card->rx_cmd,
+						  MWIFIEX_RX_CMD_BUF_SIZE);
+	}
+
+	for (i = 0; i < adapter->priv_num; i++)
+		if (adapter->priv[i]->media_connected)
+			netif_carrier_on(adapter->priv[i]->netdev);
+
+	/* Disable Host Sleep */
+	if (adapter->hs_activated)
+		mwifiex_cancel_hs(mwifiex_get_priv(adapter,
+						   MWIFIEX_BSS_ROLE_ANY),
+				  MWIFIEX_ASYNC_CMD);
+
+#ifdef CONFIG_PM
+	/* Resume handler may be called due to remote wakeup,
+	 * force to exit suspend anyway
+	 */
+	usb_disable_autosuspend(card->udev);
+#endif /* CONFIG_PM */
+
+	return 0;
+}
+
+static void mwifiex_usb_disconnect(struct usb_interface *intf)
+{
+	struct usb_card_rec *card = usb_get_intfdata(intf);
+	struct mwifiex_adapter *adapter;
+	int i;
+
+	if (!card || !card->adapter) {
+		pr_err("%s: card or card->adapter is NULL\n", __func__);
+		return;
+	}
+
+	adapter = card->adapter;
+	if (!adapter->priv_num)
+		return;
+
+	/* In case driver is removed when asynchronous FW downloading is
+	 * in progress
+	 */
+	wait_for_completion(&adapter->fw_load);
+
+	if (user_rmmod) {
+#ifdef CONFIG_PM
+		if (adapter->is_suspended)
+			mwifiex_usb_resume(intf);
+#endif
+		for (i = 0; i < adapter->priv_num; i++)
+			if ((GET_BSS_ROLE(adapter->priv[i]) ==
+			     MWIFIEX_BSS_ROLE_STA) &&
+			    adapter->priv[i]->media_connected)
+				mwifiex_deauthenticate(adapter->priv[i], NULL);
+
+		mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+							  MWIFIEX_BSS_ROLE_ANY),
+					 MWIFIEX_FUNC_SHUTDOWN);
+	}
+
+	mwifiex_usb_free(card);
+
+	dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+	mwifiex_remove_card(adapter, &add_remove_card_sem);
+
+	usb_set_intfdata(intf, NULL);
+	usb_put_dev(interface_to_usbdev(intf));
+	kfree(card);
+
+	return;
+}
+
+static struct usb_driver mwifiex_usb_driver = {
+	.name = usbdriver_name,
+	.probe = mwifiex_usb_probe,
+	.disconnect = mwifiex_usb_disconnect,
+	.id_table = mwifiex_usb_table,
+	.suspend = mwifiex_usb_suspend,
+	.resume = mwifiex_usb_resume,
+	.supports_autosuspend = 1,
+};
+
+static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
+{
+	struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+	int i;
+
+	card->tx_cmd.adapter = adapter;
+	card->tx_cmd.ep = card->tx_cmd_ep;
+
+	card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!card->tx_cmd.urb) {
+		dev_err(adapter->dev, "tx_cmd.urb allocation failed\n");
+		return -ENOMEM;
+	}
+
+	card->tx_data_ix = 0;
+
+	for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) {
+		card->tx_data_list[i].adapter = adapter;
+		card->tx_data_list[i].ep = card->tx_data_ep;
+
+		card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!card->tx_data_list[i].urb) {
+			dev_err(adapter->dev,
+				"tx_data_list[] urb allocation failed\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
+{
+	struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+	int i;
+
+	card->rx_cmd.adapter = adapter;
+	card->rx_cmd.ep = card->rx_cmd_ep;
+
+	card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!card->rx_cmd.urb) {
+		dev_err(adapter->dev, "rx_cmd.urb allocation failed\n");
+		return -ENOMEM;
+	}
+
+	card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
+	if (!card->rx_cmd.skb) {
+		dev_err(adapter->dev, "rx_cmd.skb allocation failed\n");
+		return -ENOMEM;
+	}
+
+	if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
+		return -1;
+
+	for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
+		card->rx_data_list[i].adapter = adapter;
+		card->rx_data_list[i].ep = card->rx_data_ep;
+
+		card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!card->rx_data_list[i].urb) {
+			dev_err(adapter->dev,
+				"rx_data_list[] urb allocation failed\n");
+			return -1;
+		}
+		if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
+					      MWIFIEX_RX_DATA_BUF_SIZE))
+			return -1;
+	}
+
+	return 0;
+}
+
+static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
+				   u32 *len, u8 ep, u32 timeout)
+{
+	struct usb_card_rec *card = adapter->card;
+	int actual_length, ret;
+
+	if (!(*len % card->bulk_out_maxpktsize))
+		(*len)++;
+
+	/* Send the data block */
+	ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf,
+			   *len, &actual_length, timeout);
+	if (ret) {
+		dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
+		ret = -1;
+	}
+
+	*len = actual_length;
+
+	return ret;
+}
+
+static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
+				  u32 *len, u8 ep, u32 timeout)
+{
+	struct usb_card_rec *card = adapter->card;
+	int actual_length, ret;
+
+	/* Receive the data response */
+	ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf,
+			   *len, &actual_length, timeout);
+	if (ret) {
+		dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
+		ret = -1;
+	}
+
+	*len = actual_length;
+
+	return ret;
+}
+
+/* This function write a command/data packet to card. */
+static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
+				    struct sk_buff *skb,
+				    struct mwifiex_tx_param *tx_param)
+{
+	struct usb_card_rec *card = adapter->card;
+	struct urb_context *context;
+	u8 *data = (u8 *)skb->data;
+	struct urb *tx_urb;
+
+	if (adapter->is_suspended) {
+		dev_err(adapter->dev,
+			"%s: not allowed while suspended\n", __func__);
+		return -1;
+	}
+
+	if (adapter->surprise_removed) {
+		dev_err(adapter->dev, "%s: device removed\n", __func__);
+		return -1;
+	}
+
+	if (ep == card->tx_data_ep &&
+	    atomic_read(&card->tx_data_urb_pending) >= MWIFIEX_TX_DATA_URB) {
+		return -EBUSY;
+	}
+
+	dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep);
+
+	if (ep == card->tx_cmd_ep) {
+		context = &card->tx_cmd;
+	} else {
+		if (card->tx_data_ix >= MWIFIEX_TX_DATA_URB)
+			card->tx_data_ix = 0;
+		context = &card->tx_data_list[card->tx_data_ix++];
+	}
+
+	context->adapter = adapter;
+	context->ep = ep;
+	context->skb = skb;
+	tx_urb = context->urb;
+
+	usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep),
+			  data, skb->len, mwifiex_usb_tx_complete,
+			  (void *)context);
+
+	tx_urb->transfer_flags |= URB_ZERO_PACKET;
+
+	if (ep == card->tx_cmd_ep)
+		atomic_inc(&card->tx_cmd_urb_pending);
+	else
+		atomic_inc(&card->tx_data_urb_pending);
+
+	if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
+		dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__);
+		if (ep == card->tx_cmd_ep) {
+			atomic_dec(&card->tx_cmd_urb_pending);
+		} else {
+			atomic_dec(&card->tx_data_urb_pending);
+			if (card->tx_data_ix)
+				card->tx_data_ix--;
+			else
+				card->tx_data_ix = MWIFIEX_TX_DATA_URB;
+		}
+
+		return -1;
+	} else {
+		if (ep == card->tx_data_ep &&
+		    atomic_read(&card->tx_data_urb_pending) ==
+							MWIFIEX_TX_DATA_URB)
+			return -ENOSR;
+	}
+
+	return -EINPROGRESS;
+}
+
+/* This function register usb device and initialize parameter. */
+static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
+{
+	struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+
+	card->adapter = adapter;
+	adapter->dev = &card->udev->dev;
+	strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
+
+	return 0;
+}
+
+/* This function reads one block of firmware data. */
+static int mwifiex_get_fw_data(struct mwifiex_adapter *adapter,
+			       u32 offset, u32 len, u8 *buf)
+{
+	if (!buf || !len)
+		return -1;
+
+	if (offset + len > adapter->firmware->size)
+		return -1;
+
+	memcpy(buf, adapter->firmware->data + offset, len);
+
+	return 0;
+}
+
+static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
+				    struct mwifiex_fw_image *fw)
+{
+	int ret = 0;
+	u8 *firmware = fw->fw_buf, *recv_buff;
+	u32 retries = USB8797_FW_MAX_RETRY, dlen;
+	u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
+	struct fw_data *fwdata;
+	struct fw_sync_header sync_fw;
+	u8 check_winner = 1;
+
+	if (!firmware) {
+		dev_err(adapter->dev,
+			"No firmware image found! Terminating download\n");
+		ret = -1;
+		goto fw_exit;
+	}
+
+	/* Allocate memory for transmit */
+	fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
+	if (!fwdata)
+		goto fw_exit;
+
+	/* Allocate memory for receive */
+	recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
+	if (!recv_buff)
+		goto cleanup;
+
+	do {
+		/* Send pseudo data to check winner status first */
+		if (check_winner) {
+			memset(&fwdata->fw_hdr, 0, sizeof(struct fw_header));
+			dlen = 0;
+		} else {
+			/* copy the header of the fw_data to get the length */
+			if (firmware)
+				memcpy(&fwdata->fw_hdr, &firmware[tlen],
+				       sizeof(struct fw_header));
+			else
+				mwifiex_get_fw_data(adapter, tlen,
+						    sizeof(struct fw_header),
+						    (u8 *)&fwdata->fw_hdr);
+
+			dlen = le32_to_cpu(fwdata->fw_hdr.data_len);
+			dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
+			tlen += sizeof(struct fw_header);
+
+			if (firmware)
+				memcpy(fwdata->data, &firmware[tlen], dlen);
+			else
+				mwifiex_get_fw_data(adapter, tlen, dlen,
+						    (u8 *)fwdata->data);
+
+			fwdata->seq_num = cpu_to_le32(fw_seqnum);
+			tlen += dlen;
+		}
+
+		/* If the send/receive fails or CRC occurs then retry */
+		while (retries--) {
+			u8 *buf = (u8 *)fwdata;
+			u32 len = FW_DATA_XMIT_SIZE;
+
+			/* send the firmware block */
+			ret = mwifiex_write_data_sync(adapter, buf, &len,
+						MWIFIEX_USB_EP_CMD_EVENT,
+						MWIFIEX_USB_TIMEOUT);
+			if (ret) {
+				dev_err(adapter->dev,
+					"write_data_sync: failed: %d\n", ret);
+				continue;
+			}
+
+			buf = recv_buff;
+			len = FW_DNLD_RX_BUF_SIZE;
+
+			/* Receive the firmware block response */
+			ret = mwifiex_read_data_sync(adapter, buf, &len,
+						MWIFIEX_USB_EP_CMD_EVENT,
+						MWIFIEX_USB_TIMEOUT);
+			if (ret) {
+				dev_err(adapter->dev,
+					"read_data_sync: failed: %d\n", ret);
+				continue;
+			}
+
+			memcpy(&sync_fw, recv_buff,
+			       sizeof(struct fw_sync_header));
+
+			/* check 1st firmware block resp for highest bit set */
+			if (check_winner) {
+				if (le32_to_cpu(sync_fw.cmd) & 0x80000000) {
+					dev_warn(adapter->dev,
+						 "USB is not the winner %#x\n",
+						 sync_fw.cmd);
+
+					/* returning success */
+					ret = 0;
+					goto cleanup;
+				}
+
+				dev_dbg(adapter->dev,
+					"USB is the winner, start to download FW\n");
+
+				check_winner = 0;
+				break;
+			}
+
+			/* check the firmware block response for CRC errors */
+			if (sync_fw.cmd) {
+				dev_err(adapter->dev,
+					"FW received block with CRC %#x\n",
+					sync_fw.cmd);
+				ret = -1;
+				continue;
+			}
+
+			retries = USB8797_FW_MAX_RETRY;
+			break;
+		}
+		fw_seqnum++;
+	} while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
+
+cleanup:
+	dev_dbg(adapter->dev, "%s: %d bytes downloaded\n", __func__, tlen);
+
+	kfree(recv_buff);
+	kfree(fwdata);
+
+	if (retries)
+		ret = 0;
+fw_exit:
+	return ret;
+}
+
+static int mwifiex_usb_dnld_fw(struct mwifiex_adapter *adapter,
+			struct mwifiex_fw_image *fw)
+{
+	int ret;
+	struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+
+	if (card->usb_boot_state == USB8797_FW_DNLD) {
+		ret = mwifiex_prog_fw_w_helper(adapter, fw);
+		if (ret)
+			return -1;
+
+		/* Boot state changes after successful firmware download */
+		if (card->usb_boot_state == USB8797_FW_DNLD)
+			return -1;
+	}
+
+	ret = mwifiex_usb_rx_init(adapter);
+	if (!ret)
+		ret = mwifiex_usb_tx_init(adapter);
+
+	return ret;
+}
+
+static void mwifiex_submit_rx_urb(struct mwifiex_adapter *adapter, u8 ep)
+{
+	struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+
+	skb_push(card->rx_cmd.skb, INTF_HEADER_LEN);
+	if ((ep == card->rx_cmd_ep) &&
+	    (!atomic_read(&card->rx_cmd_urb_pending)))
+		mwifiex_usb_submit_rx_urb(&card->rx_cmd,
+					  MWIFIEX_RX_CMD_BUF_SIZE);
+
+	return;
+}
+
+static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter,
+				       struct sk_buff *skb)
+{
+	atomic_dec(&adapter->rx_pending);
+	mwifiex_submit_rx_urb(adapter, MWIFIEX_USB_EP_CMD_EVENT);
+
+	return 0;
+}
+
+static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter,
+				     struct sk_buff *skb)
+{
+	atomic_dec(&adapter->rx_pending);
+	dev_kfree_skb_any(skb);
+
+	return 0;
+}
+
+/* This function wakes up the card. */
+static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
+{
+	/* Simulation of HS_AWAKE event */
+	adapter->pm_wakeup_fw_try = false;
+	adapter->pm_wakeup_card_req = false;
+	adapter->ps_state = PS_STATE_AWAKE;
+
+	return 0;
+}
+
+static struct mwifiex_if_ops usb_ops = {
+	.register_dev =		mwifiex_register_dev,
+	.wakeup =		mwifiex_pm_wakeup_card,
+	.wakeup_complete =	mwifiex_pm_wakeup_card_complete,
+
+	/* USB specific */
+	.dnld_fw =		mwifiex_usb_dnld_fw,
+	.cmdrsp_complete =	mwifiex_usb_cmd_event_complete,
+	.event_complete =	mwifiex_usb_cmd_event_complete,
+	.data_complete =	mwifiex_usb_data_complete,
+	.host_to_card =		mwifiex_usb_host_to_card,
+};
+
+/* This function initializes the USB driver module.
+ *
+ * This initiates the semaphore and registers the device with
+ * USB bus.
+ */
+static int mwifiex_usb_init_module(void)
+{
+	int ret;
+
+	pr_debug("Marvell USB8797 Driver\n");
+
+	sema_init(&add_remove_card_sem, 1);
+
+	ret = usb_register(&mwifiex_usb_driver);
+	if (ret)
+		pr_err("Driver register failed!\n");
+	else
+		pr_debug("info: Driver registered successfully!\n");
+
+	return ret;
+}
+
+/* This function cleans up the USB driver.
+ *
+ * The following major steps are followed in .disconnect for cleanup:
+ *      - Resume the device if its suspended
+ *      - Disconnect the device if connected
+ *      - Shutdown the firmware
+ *      - Unregister the device from USB bus.
+ */
+static void mwifiex_usb_cleanup_module(void)
+{
+	if (!down_interruptible(&add_remove_card_sem))
+		up(&add_remove_card_sem);
+
+	/* set the flag as user is removing this module */
+	user_rmmod = 1;
+
+	usb_deregister(&mwifiex_usb_driver);
+}
+
+module_init(mwifiex_usb_init_module);
+module_exit(mwifiex_usb_cleanup_module);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
+MODULE_VERSION(USB_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE("mrvl/usb8797_uapsta.bin");
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
new file mode 100644
index 0000000..98c4316
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -0,0 +1,99 @@
+/*
+ * This file contains definitions for mwifiex USB interface driver.
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#ifndef _MWIFIEX_USB_H
+#define _MWIFIEX_USB_H
+
+#include <linux/usb.h>
+
+#define USB8797_VID		0x1286
+#define USB8797_PID_1		0x2043
+#define USB8797_PID_2		0x2044
+
+#define USB8797_FW_DNLD		1
+#define USB8797_FW_READY	2
+#define USB8797_FW_MAX_RETRY	3
+
+#define MWIFIEX_TX_DATA_URB	6
+#define MWIFIEX_RX_DATA_URB	6
+#define MWIFIEX_USB_TIMEOUT	100
+
+#define USB8797_DEFAULT_FW_NAME	"mrvl/usb8797_uapsta.bin"
+
+#define FW_DNLD_TX_BUF_SIZE	620
+#define FW_DNLD_RX_BUF_SIZE	2048
+#define FW_HAS_LAST_BLOCK	0x00000004
+
+#define FW_DATA_XMIT_SIZE \
+	(sizeof(struct fw_header) + dlen + sizeof(u32))
+
+struct urb_context {
+	struct mwifiex_adapter *adapter;
+	struct sk_buff *skb;
+	struct urb *urb;
+	u8 ep;
+};
+
+struct usb_card_rec {
+	struct mwifiex_adapter *adapter;
+	struct usb_device *udev;
+	struct usb_interface *intf;
+	u8 rx_cmd_ep;
+	struct urb_context rx_cmd;
+	atomic_t rx_cmd_urb_pending;
+	struct urb_context rx_data_list[MWIFIEX_RX_DATA_URB];
+	u8 usb_boot_state;
+	u8 rx_data_ep;
+	atomic_t rx_data_urb_pending;
+	u8 tx_data_ep;
+	u8 tx_cmd_ep;
+	atomic_t tx_data_urb_pending;
+	atomic_t tx_cmd_urb_pending;
+	int bulk_out_maxpktsize;
+	struct urb_context tx_cmd;
+	int tx_data_ix;
+	struct urb_context tx_data_list[MWIFIEX_TX_DATA_URB];
+};
+
+struct fw_header {
+	__le32 dnld_cmd;
+	__le32 base_addr;
+	__le32 data_len;
+	__le32 crc;
+};
+
+struct fw_sync_header {
+	__le32 cmd;
+	__le32 seq_num;
+};
+
+struct fw_data {
+	struct fw_header fw_hdr;
+	__le32 seq_num;
+	u8 data[1];
+};
+
+/* This function is called after the card has woken up. */
+static inline int
+mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
+{
+	return 0;
+}
+
+#endif /*_MWIFIEX_USB_H */
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 6b39997..2864c74 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -167,6 +167,28 @@
 	skb->dev = priv->netdev;
 	skb->protocol = eth_type_trans(skb, priv->netdev);
 	skb->ip_summed = CHECKSUM_NONE;
+
+	/* This is required only in case of 11n and USB as we alloc
+	 * a buffer of 4K only if its 11N (to be able to receive 4K
+	 * AMSDU packets). In case of SD we allocate buffers based
+	 * on the size of packet and hence this is not needed.
+	 *
+	 * Modifying the truesize here as our allocation for each
+	 * skb is 4K but we only receive 2K packets and this cause
+	 * the kernel to start dropping packets in case where
+	 * application has allocated buffer based on 2K size i.e.
+	 * if there a 64K packet received (in IP fragments and
+	 * application allocates 64K to receive this packet but
+	 * this packet would almost double up because we allocate
+	 * each 1.5K fragment in 4K and pass it up. As soon as the
+	 * 64K limit hits kernel will start to drop rest of the
+	 * fragments. Currently we fail the Filesndl-ht.scr script
+	 * for UDP, hence this fix
+	 */
+	if ((adapter->iface_type == MWIFIEX_USB) &&
+	    (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
+		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
+
 	priv->stats.rx_bytes += skb->len;
 	priv->stats.rx_packets++;
 	if (in_interrupt())
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 5a7316c..429a1de 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1120,11 +1120,19 @@
 	tx_info = MWIFIEX_SKB_TXCB(skb);
 
 	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
-	tx_param.next_pkt_len =
-		((skb_next) ? skb_next->len +
-		 sizeof(struct txpd) : 0);
-	ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb,
-					   &tx_param);
+
+	if (adapter->iface_type == MWIFIEX_USB) {
+		adapter->data_sent = true;
+		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
+						   skb, NULL);
+	} else {
+		tx_param.next_pkt_len =
+			((skb_next) ? skb_next->len +
+			 sizeof(struct txpd) : 0);
+		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
+						   skb, &tx_param);
+	}
+
 	switch (ret) {
 	case -EBUSY:
 		dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b48674b..cf7bdc6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1235,7 +1235,7 @@
 {
 	return priv->capture_beacon &&
 		ieee80211_is_beacon(wh->frame_control) &&
-		!compare_ether_addr(wh->addr3, priv->capture_bssid);
+		ether_addr_equal(wh->addr3, priv->capture_bssid);
 }
 
 static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
@@ -5893,18 +5893,7 @@
 	.shutdown	= __devexit_p(mwl8k_shutdown),
 };
 
-static int __init mwl8k_init(void)
-{
-	return pci_register_driver(&mwl8k_driver);
-}
-
-static void __exit mwl8k_exit(void)
-{
-	pci_unregister_driver(&mwl8k_driver);
-}
-
-module_init(mwl8k_init);
-module_exit(mwl8k_exit);
+module_pci_driver(mwl8k_driver);
 
 MODULE_DESCRIPTION(MWL8K_DESC);
 MODULE_VERSION(MWL8K_VERSION);
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 4df8cf6..400a352 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -379,11 +379,8 @@
 
 void orinoco_uncache_fw(struct orinoco_private *priv)
 {
-	if (priv->cached_pri_fw)
-		release_firmware(priv->cached_pri_fw);
-	if (priv->cached_fw)
-		release_firmware(priv->cached_fw);
-
+	release_firmware(priv->cached_pri_fw);
+	release_firmware(priv->cached_fw);
 	priv->cached_pri_fw = NULL;
 	priv->cached_fw = NULL;
 }
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index ee8af1f..7cffea7 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -796,11 +796,14 @@
 		dev_err(pdev, "Cannot register device (%d).\n", err);
 		return err;
 	}
+	priv->registered = true;
 
 #ifdef CONFIG_P54_LEDS
 	err = p54_init_leds(priv);
-	if (err)
+	if (err) {
+		p54_unregister_common(dev);
 		return err;
+	}
 #endif /* CONFIG_P54_LEDS */
 
 	dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy));
@@ -840,7 +843,11 @@
 	p54_unregister_leds(priv);
 #endif /* CONFIG_P54_LEDS */
 
-	ieee80211_unregister_hw(dev);
+	if (priv->registered) {
+		priv->registered = false;
+		ieee80211_unregister_hw(dev);
+	}
+
 	mutex_destroy(&priv->conf_mutex);
 	mutex_destroy(&priv->eeprom_mutex);
 }
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 452fa3a..40b401e 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -173,6 +173,7 @@
 	struct sk_buff_head tx_pending;
 	struct sk_buff_head tx_queue;
 	struct mutex conf_mutex;
+	bool registered;
 
 	/* memory management (as seen by the firmware) */
 	u32 rx_start;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 45df728..89318ad 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -667,15 +667,4 @@
 	.driver.pm	= P54P_PM_OPS,
 };
 
-static int __init p54p_init(void)
-{
-	return pci_register_driver(&p54p_driver);
-}
-
-static void __exit p54p_exit(void)
-{
-	pci_unregister_driver(&p54p_driver);
-}
-
-module_init(p54p_init);
-module_exit(p54p_exit);
+module_pci_driver(p54p_driver);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index f4d28c3..e1eac83 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -117,21 +117,18 @@
 	u32 intf;
 	enum p54u_hw_type type;
 	const char *fw;
-	const char *fw_legacy;
 	char hw[20];
 } p54u_fwlist[__NUM_P54U_HWTYPES] = {
 	{
 		.type = P54U_NET2280,
 		.intf = FW_LM86,
 		.fw = "isl3886usb",
-		.fw_legacy = "isl3890usb",
 		.hw = "ISL3886 + net2280",
 	},
 	{
 		.type = P54U_3887,
 		.intf = FW_LM87,
 		.fw = "isl3887usb",
-		.fw_legacy = "isl3887usb_bare",
 		.hw = "ISL3887",
 	},
 };
@@ -208,6 +205,16 @@
 	usb_kill_anchored_urbs(&priv->submitted);
 }
 
+static void p54u_stop(struct ieee80211_hw *dev)
+{
+	/*
+	 * TODO: figure out how to reliably stop the 3887 and net2280 so
+	 * the hardware is still usable next time we want to start it.
+	 * until then, we just stop listening to the hardware..
+	 */
+	p54u_free_urbs(dev);
+}
+
 static int p54u_init_urbs(struct ieee80211_hw *dev)
 {
 	struct p54u_priv *priv = dev->priv;
@@ -257,6 +264,16 @@
 	return ret;
 }
 
+static int p54u_open(struct ieee80211_hw *dev)
+{
+	/*
+	 * TODO: Because we don't know how to reliably stop the 3887 and
+	 * the isl3886+net2280, other than brutally cut off all
+	 * communications. We have to reinitialize the urbs on every start.
+	 */
+	return p54u_init_urbs(dev);
+}
+
 static __le32 p54u_lm87_chksum(const __le32 *data, size_t length)
 {
 	u32 chk = 0;
@@ -836,72 +853,139 @@
 	return err;
 }
 
-static int p54u_load_firmware(struct ieee80211_hw *dev)
+static int p54_find_type(struct p54u_priv *priv)
 {
-	struct p54u_priv *priv = dev->priv;
-	int err, i;
-
-	BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES);
+	int i;
 
 	for (i = 0; i < __NUM_P54U_HWTYPES; i++)
 		if (p54u_fwlist[i].type == priv->hw_type)
 			break;
-
 	if (i == __NUM_P54U_HWTYPES)
 		return -EOPNOTSUPP;
 
-	err = request_firmware(&priv->fw, p54u_fwlist[i].fw, &priv->udev->dev);
+	return i;
+}
+
+static int p54u_start_ops(struct p54u_priv *priv)
+{
+	struct ieee80211_hw *dev = priv->common.hw;
+	int ret;
+
+	ret = p54_parse_firmware(dev, priv->fw);
+	if (ret)
+		goto err_out;
+
+	ret = p54_find_type(priv);
+	if (ret < 0)
+		goto err_out;
+
+	if (priv->common.fw_interface != p54u_fwlist[ret].intf) {
+		dev_err(&priv->udev->dev, "wrong firmware, please get "
+			"a firmware for \"%s\" and try again.\n",
+			p54u_fwlist[ret].hw);
+		ret = -ENODEV;
+		goto err_out;
+	}
+
+	ret = priv->upload_fw(dev);
+	if (ret)
+		goto err_out;
+
+	ret = p54u_open(dev);
+	if (ret)
+		goto err_out;
+
+	ret = p54_read_eeprom(dev);
+	if (ret)
+		goto err_stop;
+
+	p54u_stop(dev);
+
+	ret = p54_register_common(dev, &priv->udev->dev);
+	if (ret)
+		goto err_stop;
+
+	return 0;
+
+err_stop:
+	p54u_stop(dev);
+
+err_out:
+	/*
+	 * p54u_disconnect will do the rest of the
+	 * cleanup
+	 */
+	return ret;
+}
+
+static void p54u_load_firmware_cb(const struct firmware *firmware,
+				  void *context)
+{
+	struct p54u_priv *priv = context;
+	struct usb_device *udev = priv->udev;
+	int err;
+
+	complete(&priv->fw_wait_load);
+	if (firmware) {
+		priv->fw = firmware;
+		err = p54u_start_ops(priv);
+	} else {
+		err = -ENOENT;
+		dev_err(&udev->dev, "Firmware not found.\n");
+	}
+
+	if (err) {
+		struct device *parent = priv->udev->dev.parent;
+
+		dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
+
+		if (parent)
+			device_lock(parent);
+
+		device_release_driver(&udev->dev);
+		/*
+		 * At this point p54u_disconnect has already freed
+		 * the "priv" context. Do not use it anymore!
+		 */
+		priv = NULL;
+
+		if (parent)
+			device_unlock(parent);
+	}
+
+	usb_put_dev(udev);
+}
+
+static int p54u_load_firmware(struct ieee80211_hw *dev,
+			      struct usb_interface *intf)
+{
+	struct usb_device *udev = interface_to_usbdev(intf);
+	struct p54u_priv *priv = dev->priv;
+	struct device *device = &udev->dev;
+	int err, i;
+
+	BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES);
+
+	init_completion(&priv->fw_wait_load);
+	i = p54_find_type(priv);
+	if (i < 0)
+		return i;
+
+	dev_info(&priv->udev->dev, "Loading firmware file %s\n",
+	       p54u_fwlist[i].fw);
+
+	usb_get_dev(udev);
+	err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
+				      device, GFP_KERNEL, priv,
+				      p54u_load_firmware_cb);
 	if (err) {
 		dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
 					  "(%d)!\n", p54u_fwlist[i].fw, err);
-
-		err = request_firmware(&priv->fw, p54u_fwlist[i].fw_legacy,
-				       &priv->udev->dev);
-		if (err)
-			return err;
 	}
 
-	err = p54_parse_firmware(dev, priv->fw);
-	if (err)
-		goto out;
-
-	if (priv->common.fw_interface != p54u_fwlist[i].intf) {
-		dev_err(&priv->udev->dev, "wrong firmware, please get "
-			"a firmware for \"%s\" and try again.\n",
-			p54u_fwlist[i].hw);
-		err = -EINVAL;
-	}
-
-out:
-	if (err)
-		release_firmware(priv->fw);
-
 	return err;
 }
 
-static int p54u_open(struct ieee80211_hw *dev)
-{
-	struct p54u_priv *priv = dev->priv;
-	int err;
-
-	err = p54u_init_urbs(dev);
-	if (err) {
-		return err;
-	}
-
-	priv->common.open = p54u_init_urbs;
-
-	return 0;
-}
-
-static void p54u_stop(struct ieee80211_hw *dev)
-{
-	/* TODO: figure out how to reliably stop the 3887 and net2280 so
-	   the hardware is still usable next time we want to start it.
-	   until then, we just stop listening to the hardware.. */
-	p54u_free_urbs(dev);
-}
-
 static int __devinit p54u_probe(struct usb_interface *intf,
 				const struct usb_device_id *id)
 {
@@ -969,33 +1053,7 @@
 		priv->common.tx = p54u_tx_net2280;
 		priv->upload_fw = p54u_upload_firmware_net2280;
 	}
-	err = p54u_load_firmware(dev);
-	if (err)
-		goto err_free_dev;
-
-	err = priv->upload_fw(dev);
-	if (err)
-		goto err_free_fw;
-
-	p54u_open(dev);
-	err = p54_read_eeprom(dev);
-	p54u_stop(dev);
-	if (err)
-		goto err_free_fw;
-
-	err = p54_register_common(dev, &udev->dev);
-	if (err)
-		goto err_free_fw;
-
-	return 0;
-
-err_free_fw:
-	release_firmware(priv->fw);
-
-err_free_dev:
-	p54_free_common(dev);
-	usb_set_intfdata(intf, NULL);
-	usb_put_dev(udev);
+	err = p54u_load_firmware(dev, intf);
 	return err;
 }
 
@@ -1007,9 +1065,10 @@
 	if (!dev)
 		return;
 
+	priv = dev->priv;
+	wait_for_completion(&priv->fw_wait_load);
 	p54_unregister_common(dev);
 
-	priv = dev->priv;
 	usb_put_dev(interface_to_usbdev(intf));
 	release_firmware(priv->fw);
 	p54_free_common(dev);
@@ -1072,7 +1131,7 @@
 	.name	= "p54usb",
 	.id_table = p54u_table,
 	.probe = p54u_probe,
-	.disconnect = p54u_disconnect,
+	.disconnect = __devexit_p(p54u_disconnect),
 	.pre_reset = p54u_pre_reset,
 	.post_reset = p54u_post_reset,
 #ifdef CONFIG_PM
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index ed4034a..d273be7 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -143,6 +143,9 @@
 	struct sk_buff_head rx_queue;
 	struct usb_anchor submitted;
 	const struct firmware *fw;
+
+	/* asynchronous firmware callback */
+	struct completion fw_wait_load;
 };
 
 #endif /* P54USB_H */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index a08a6f0..82a1cac 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -308,7 +308,7 @@
 		return;
 
 	/* only consider beacons from the associated BSSID */
-	if (compare_ether_addr(hdr->addr3, priv->bssid))
+	if (!ether_addr_equal(hdr->addr3, priv->bssid))
 		return;
 
 	tim = p54_find_ie(skb, WLAN_EID_TIM);
@@ -914,8 +914,7 @@
 	txhdr->hw_queue = queue;
 	txhdr->backlog = priv->tx_stats[queue].len - 1;
 	memset(txhdr->durations, 0, sizeof(txhdr->durations));
-	txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ?
-		2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask;
+	txhdr->tx_antenna = 2 & priv->tx_diversity_mask;
 	if (priv->rxhw == 5) {
 		txhdr->longbow.cts_rate = cts_rate;
 		txhdr->longbow.output_power = cpu_to_le16(priv->output_power);
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 9b796ca..a01606b 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -693,8 +693,6 @@
 	return ret;
 }
 
-#define VEC_SIZE(a) ARRAY_SIZE(a)
-
 int
 mgt_commit(islpci_private *priv)
 {
@@ -704,10 +702,10 @@
 	if (islpci_get_state(priv) < PRV_STATE_INIT)
 		return 0;
 
-	rvalue = mgt_commit_list(priv, commit_part1, VEC_SIZE(commit_part1));
+	rvalue = mgt_commit_list(priv, commit_part1, ARRAY_SIZE(commit_part1));
 
 	if (priv->iw_mode != IW_MODE_MONITOR)
-		rvalue |= mgt_commit_list(priv, commit_part2, VEC_SIZE(commit_part2));
+		rvalue |= mgt_commit_list(priv, commit_part2, ARRAY_SIZE(commit_part2));
 
 	u = OID_INL_MODE;
 	rvalue |= mgt_commit_list(priv, &u, 1);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index d66e298..b91d1bb 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -88,49 +88,6 @@
 MODULE_PARM_DESC(workaround_interval,
 	"set stall workaround interval in msecs (0=disabled) (default: 0)");
 
-
-/* various RNDIS OID defs */
-#define OID_GEN_LINK_SPEED			cpu_to_le32(0x00010107)
-#define OID_GEN_RNDIS_CONFIG_PARAMETER		cpu_to_le32(0x0001021b)
-
-#define OID_GEN_XMIT_OK				cpu_to_le32(0x00020101)
-#define OID_GEN_RCV_OK				cpu_to_le32(0x00020102)
-#define OID_GEN_XMIT_ERROR			cpu_to_le32(0x00020103)
-#define OID_GEN_RCV_ERROR			cpu_to_le32(0x00020104)
-#define OID_GEN_RCV_NO_BUFFER			cpu_to_le32(0x00020105)
-
-#define OID_802_3_CURRENT_ADDRESS		cpu_to_le32(0x01010102)
-#define OID_802_3_MULTICAST_LIST		cpu_to_le32(0x01010103)
-#define OID_802_3_MAXIMUM_LIST_SIZE		cpu_to_le32(0x01010104)
-
-#define OID_802_11_BSSID			cpu_to_le32(0x0d010101)
-#define OID_802_11_SSID				cpu_to_le32(0x0d010102)
-#define OID_802_11_INFRASTRUCTURE_MODE		cpu_to_le32(0x0d010108)
-#define OID_802_11_ADD_WEP			cpu_to_le32(0x0d010113)
-#define OID_802_11_REMOVE_WEP			cpu_to_le32(0x0d010114)
-#define OID_802_11_DISASSOCIATE			cpu_to_le32(0x0d010115)
-#define OID_802_11_AUTHENTICATION_MODE		cpu_to_le32(0x0d010118)
-#define OID_802_11_PRIVACY_FILTER		cpu_to_le32(0x0d010119)
-#define OID_802_11_BSSID_LIST_SCAN		cpu_to_le32(0x0d01011a)
-#define OID_802_11_ENCRYPTION_STATUS		cpu_to_le32(0x0d01011b)
-#define OID_802_11_ADD_KEY			cpu_to_le32(0x0d01011d)
-#define OID_802_11_REMOVE_KEY			cpu_to_le32(0x0d01011e)
-#define OID_802_11_ASSOCIATION_INFORMATION	cpu_to_le32(0x0d01011f)
-#define OID_802_11_CAPABILITY			cpu_to_le32(0x0d010122)
-#define OID_802_11_PMKID			cpu_to_le32(0x0d010123)
-#define OID_802_11_NETWORK_TYPES_SUPPORTED	cpu_to_le32(0x0d010203)
-#define OID_802_11_NETWORK_TYPE_IN_USE		cpu_to_le32(0x0d010204)
-#define OID_802_11_TX_POWER_LEVEL		cpu_to_le32(0x0d010205)
-#define OID_802_11_RSSI				cpu_to_le32(0x0d010206)
-#define OID_802_11_RSSI_TRIGGER			cpu_to_le32(0x0d010207)
-#define OID_802_11_FRAGMENTATION_THRESHOLD	cpu_to_le32(0x0d010209)
-#define OID_802_11_RTS_THRESHOLD		cpu_to_le32(0x0d01020a)
-#define OID_802_11_SUPPORTED_RATES		cpu_to_le32(0x0d01020e)
-#define OID_802_11_CONFIGURATION		cpu_to_le32(0x0d010211)
-#define OID_802_11_POWER_MODE			cpu_to_le32(0x0d010216)
-#define OID_802_11_BSSID_LIST			cpu_to_le32(0x0d010217)
-
-
 /* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */
 #define	WL_NOISE	-96	/* typical noise level in dBm */
 #define	WL_SIGMAX	-32	/* typical maximum signal level in dBm */
@@ -149,12 +106,6 @@
 #define BCM4320_DEFAULT_TXPOWER_DBM_50  10
 #define BCM4320_DEFAULT_TXPOWER_DBM_25  7
 
-
-/* codes for "status" field of completion messages */
-#define RNDIS_STATUS_ADAPTER_NOT_READY		cpu_to_le32(0xc0010011)
-#define RNDIS_STATUS_ADAPTER_NOT_OPEN		cpu_to_le32(0xc0010012)
-
-
 /* Known device types */
 #define RNDIS_UNKNOWN	0
 #define RNDIS_BCM4320A	1
@@ -515,7 +466,7 @@
 	int infra_mode;
 	bool connected;
 	u8 bssid[ETH_ALEN];
-	__le32 current_command_oid;
+	u32 current_command_oid;
 
 	/* encryption stuff */
 	u8 encr_tx_key_index;
@@ -670,63 +621,63 @@
 }
 
 #ifdef DEBUG
-static const char *oid_to_string(__le32 oid)
+static const char *oid_to_string(u32 oid)
 {
 	switch (oid) {
 #define OID_STR(oid) case oid: return(#oid)
 		/* from rndis_host.h */
-		OID_STR(OID_802_3_PERMANENT_ADDRESS);
-		OID_STR(OID_GEN_MAXIMUM_FRAME_SIZE);
-		OID_STR(OID_GEN_CURRENT_PACKET_FILTER);
-		OID_STR(OID_GEN_PHYSICAL_MEDIUM);
+		OID_STR(RNDIS_OID_802_3_PERMANENT_ADDRESS);
+		OID_STR(RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE);
+		OID_STR(RNDIS_OID_GEN_CURRENT_PACKET_FILTER);
+		OID_STR(RNDIS_OID_GEN_PHYSICAL_MEDIUM);
 
 		/* from rndis_wlan.c */
-		OID_STR(OID_GEN_LINK_SPEED);
-		OID_STR(OID_GEN_RNDIS_CONFIG_PARAMETER);
+		OID_STR(RNDIS_OID_GEN_LINK_SPEED);
+		OID_STR(RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER);
 
-		OID_STR(OID_GEN_XMIT_OK);
-		OID_STR(OID_GEN_RCV_OK);
-		OID_STR(OID_GEN_XMIT_ERROR);
-		OID_STR(OID_GEN_RCV_ERROR);
-		OID_STR(OID_GEN_RCV_NO_BUFFER);
+		OID_STR(RNDIS_OID_GEN_XMIT_OK);
+		OID_STR(RNDIS_OID_GEN_RCV_OK);
+		OID_STR(RNDIS_OID_GEN_XMIT_ERROR);
+		OID_STR(RNDIS_OID_GEN_RCV_ERROR);
+		OID_STR(RNDIS_OID_GEN_RCV_NO_BUFFER);
 
-		OID_STR(OID_802_3_CURRENT_ADDRESS);
-		OID_STR(OID_802_3_MULTICAST_LIST);
-		OID_STR(OID_802_3_MAXIMUM_LIST_SIZE);
+		OID_STR(RNDIS_OID_802_3_CURRENT_ADDRESS);
+		OID_STR(RNDIS_OID_802_3_MULTICAST_LIST);
+		OID_STR(RNDIS_OID_802_3_MAXIMUM_LIST_SIZE);
 
-		OID_STR(OID_802_11_BSSID);
-		OID_STR(OID_802_11_SSID);
-		OID_STR(OID_802_11_INFRASTRUCTURE_MODE);
-		OID_STR(OID_802_11_ADD_WEP);
-		OID_STR(OID_802_11_REMOVE_WEP);
-		OID_STR(OID_802_11_DISASSOCIATE);
-		OID_STR(OID_802_11_AUTHENTICATION_MODE);
-		OID_STR(OID_802_11_PRIVACY_FILTER);
-		OID_STR(OID_802_11_BSSID_LIST_SCAN);
-		OID_STR(OID_802_11_ENCRYPTION_STATUS);
-		OID_STR(OID_802_11_ADD_KEY);
-		OID_STR(OID_802_11_REMOVE_KEY);
-		OID_STR(OID_802_11_ASSOCIATION_INFORMATION);
-		OID_STR(OID_802_11_CAPABILITY);
-		OID_STR(OID_802_11_PMKID);
-		OID_STR(OID_802_11_NETWORK_TYPES_SUPPORTED);
-		OID_STR(OID_802_11_NETWORK_TYPE_IN_USE);
-		OID_STR(OID_802_11_TX_POWER_LEVEL);
-		OID_STR(OID_802_11_RSSI);
-		OID_STR(OID_802_11_RSSI_TRIGGER);
-		OID_STR(OID_802_11_FRAGMENTATION_THRESHOLD);
-		OID_STR(OID_802_11_RTS_THRESHOLD);
-		OID_STR(OID_802_11_SUPPORTED_RATES);
-		OID_STR(OID_802_11_CONFIGURATION);
-		OID_STR(OID_802_11_POWER_MODE);
-		OID_STR(OID_802_11_BSSID_LIST);
+		OID_STR(RNDIS_OID_802_11_BSSID);
+		OID_STR(RNDIS_OID_802_11_SSID);
+		OID_STR(RNDIS_OID_802_11_INFRASTRUCTURE_MODE);
+		OID_STR(RNDIS_OID_802_11_ADD_WEP);
+		OID_STR(RNDIS_OID_802_11_REMOVE_WEP);
+		OID_STR(RNDIS_OID_802_11_DISASSOCIATE);
+		OID_STR(RNDIS_OID_802_11_AUTHENTICATION_MODE);
+		OID_STR(RNDIS_OID_802_11_PRIVACY_FILTER);
+		OID_STR(RNDIS_OID_802_11_BSSID_LIST_SCAN);
+		OID_STR(RNDIS_OID_802_11_ENCRYPTION_STATUS);
+		OID_STR(RNDIS_OID_802_11_ADD_KEY);
+		OID_STR(RNDIS_OID_802_11_REMOVE_KEY);
+		OID_STR(RNDIS_OID_802_11_ASSOCIATION_INFORMATION);
+		OID_STR(RNDIS_OID_802_11_CAPABILITY);
+		OID_STR(RNDIS_OID_802_11_PMKID);
+		OID_STR(RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED);
+		OID_STR(RNDIS_OID_802_11_NETWORK_TYPE_IN_USE);
+		OID_STR(RNDIS_OID_802_11_TX_POWER_LEVEL);
+		OID_STR(RNDIS_OID_802_11_RSSI);
+		OID_STR(RNDIS_OID_802_11_RSSI_TRIGGER);
+		OID_STR(RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD);
+		OID_STR(RNDIS_OID_802_11_RTS_THRESHOLD);
+		OID_STR(RNDIS_OID_802_11_SUPPORTED_RATES);
+		OID_STR(RNDIS_OID_802_11_CONFIGURATION);
+		OID_STR(RNDIS_OID_802_11_POWER_MODE);
+		OID_STR(RNDIS_OID_802_11_BSSID_LIST);
 #undef OID_STR
 	}
 
 	return "?";
 }
 #else
-static const char *oid_to_string(__le32 oid)
+static const char *oid_to_string(u32 oid)
 {
 	return "?";
 }
@@ -736,7 +687,7 @@
 static int rndis_error_status(__le32 rndis_status)
 {
 	int ret = -EINVAL;
-	switch (rndis_status) {
+	switch (le32_to_cpu(rndis_status)) {
 	case RNDIS_STATUS_SUCCESS:
 		ret = 0;
 		break;
@@ -755,7 +706,7 @@
 	return ret;
 }
 
-static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
+static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
 {
 	struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
 	union {
@@ -782,9 +733,9 @@
 	mutex_lock(&priv->command_lock);
 
 	memset(u.get, 0, sizeof *u.get);
-	u.get->msg_type = RNDIS_MSG_QUERY;
+	u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY);
 	u.get->msg_len = cpu_to_le32(sizeof *u.get);
-	u.get->oid = oid;
+	u.get->oid = cpu_to_le32(oid);
 
 	priv->current_command_oid = oid;
 	ret = rndis_command(dev, u.header, buflen);
@@ -839,7 +790,7 @@
 	return ret;
 }
 
-static int rndis_set_oid(struct usbnet *dev, __le32 oid, const void *data,
+static int rndis_set_oid(struct usbnet *dev, u32 oid, const void *data,
 			 int len)
 {
 	struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
@@ -866,9 +817,9 @@
 	mutex_lock(&priv->command_lock);
 
 	memset(u.set, 0, sizeof *u.set);
-	u.set->msg_type = RNDIS_MSG_SET;
+	u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET);
 	u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len);
-	u.set->oid = oid;
+	u.set->oid = cpu_to_le32(oid);
 	u.set->len = cpu_to_le32(len);
 	u.set->offset = cpu_to_le32(sizeof(*u.set) - 8);
 	u.set->handle = cpu_to_le32(0);
@@ -908,7 +859,7 @@
 
 	reset = (void *)priv->command_buffer;
 	memset(reset, 0, sizeof(*reset));
-	reset->msg_type = RNDIS_MSG_RESET;
+	reset->msg_type = cpu_to_le32(RNDIS_MSG_RESET);
 	reset->msg_len = cpu_to_le32(sizeof(*reset));
 	priv->current_command_oid = 0;
 	ret = rndis_command(usbdev, (void *)reset, CONTROL_BUFFER_SIZE);
@@ -994,7 +945,7 @@
 	}
 #endif
 
-	ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER,
+	ret = rndis_set_oid(dev, RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER,
 							infobuf, info_len);
 	if (ret != 0)
 		netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n",
@@ -1031,9 +982,9 @@
 {
 	__le32 tmp;
 
-	/* Note: OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */
+	/* Note: RNDIS_OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */
 	tmp = cpu_to_le32(1);
-	return rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
+	return rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST_SCAN, &tmp,
 							sizeof(tmp));
 }
 
@@ -1042,7 +993,8 @@
 	struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
 	int ret;
 
-	ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid));
+	ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_SSID,
+			    ssid, sizeof(*ssid));
 	if (ret < 0) {
 		netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret);
 		return ret;
@@ -1059,7 +1011,8 @@
 {
 	int ret;
 
-	ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN);
+	ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID,
+			    bssid, ETH_ALEN);
 	if (ret < 0) {
 		netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n",
 			    bssid, ret);
@@ -1083,7 +1036,8 @@
 	int ret, len;
 
 	len = ETH_ALEN;
-	ret = rndis_query_oid(usbdev, OID_802_11_BSSID, bssid, &len);
+	ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID,
+			      bssid, &len);
 
 	if (ret != 0)
 		memset(bssid, 0, ETH_ALEN);
@@ -1094,8 +1048,9 @@
 static int get_association_info(struct usbnet *usbdev,
 			struct ndis_80211_assoc_info *info, int len)
 {
-	return rndis_query_oid(usbdev, OID_802_11_ASSOCIATION_INFORMATION,
-				info, &len);
+	return rndis_query_oid(usbdev,
+			RNDIS_OID_802_11_ASSOCIATION_INFORMATION,
+			info, &len);
 }
 
 static bool is_associated(struct usbnet *usbdev)
@@ -1119,7 +1074,9 @@
 	int i, ret = 0;
 
 	if (priv->radio_on) {
-		ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0);
+		ret = rndis_set_oid(usbdev,
+				RNDIS_OID_802_11_DISASSOCIATE,
+				NULL, 0);
 		if (ret == 0) {
 			priv->radio_on = false;
 			netdev_dbg(usbdev->net, "%s(): radio_on = false\n",
@@ -1181,8 +1138,9 @@
 		return -ENOTSUPP;
 
 	tmp = cpu_to_le32(auth_mode);
-	ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp,
-								sizeof(tmp));
+	ret = rndis_set_oid(usbdev,
+			    RNDIS_OID_802_11_AUTHENTICATION_MODE,
+			    &tmp, sizeof(tmp));
 	if (ret != 0) {
 		netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n",
 			    ret);
@@ -1208,8 +1166,9 @@
 	else
 		tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL);
 
-	return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp,
-								sizeof(tmp));
+	return rndis_set_oid(usbdev,
+			     RNDIS_OID_802_11_PRIVACY_FILTER, &tmp,
+			     sizeof(tmp));
 }
 
 static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
@@ -1234,8 +1193,9 @@
 		encr_mode = NDIS_80211_ENCR_DISABLED;
 
 	tmp = cpu_to_le32(encr_mode);
-	ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp,
-								sizeof(tmp));
+	ret = rndis_set_oid(usbdev,
+			RNDIS_OID_802_11_ENCRYPTION_STATUS, &tmp,
+			sizeof(tmp));
 	if (ret != 0) {
 		netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n",
 			    ret);
@@ -1255,8 +1215,9 @@
 		   __func__, priv->infra_mode);
 
 	tmp = cpu_to_le32(mode);
-	ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp,
-								sizeof(tmp));
+	ret = rndis_set_oid(usbdev,
+			    RNDIS_OID_802_11_INFRASTRUCTURE_MODE,
+			    &tmp, sizeof(tmp));
 	if (ret != 0) {
 		netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n",
 			    ret);
@@ -1282,8 +1243,9 @@
 		rts_threshold = 2347;
 
 	tmp = cpu_to_le32(rts_threshold);
-	return rndis_set_oid(usbdev, OID_802_11_RTS_THRESHOLD, &tmp,
-								sizeof(tmp));
+	return rndis_set_oid(usbdev,
+			     RNDIS_OID_802_11_RTS_THRESHOLD,
+			     &tmp, sizeof(tmp));
 }
 
 static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold)
@@ -1296,8 +1258,9 @@
 		frag_threshold = 2346;
 
 	tmp = cpu_to_le32(frag_threshold);
-	return rndis_set_oid(usbdev, OID_802_11_FRAGMENTATION_THRESHOLD, &tmp,
-								sizeof(tmp));
+	return rndis_set_oid(usbdev,
+			RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD,
+			&tmp, sizeof(tmp));
 }
 
 static void set_default_iw_params(struct usbnet *usbdev)
@@ -1333,7 +1296,9 @@
 	dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000;
 
 	len = sizeof(config);
-	ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
+	ret = rndis_query_oid(usbdev,
+			RNDIS_OID_802_11_CONFIGURATION,
+			&config, &len);
 	if (ret < 0) {
 		netdev_dbg(usbdev->net, "%s(): querying configuration failed\n",
 			   __func__);
@@ -1341,8 +1306,9 @@
 	}
 
 	config.ds_config = cpu_to_le32(dsconfig);
-	ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config,
-								sizeof(config));
+	ret = rndis_set_oid(usbdev,
+			RNDIS_OID_802_11_CONFIGURATION,
+			&config, sizeof(config));
 
 	netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret);
 
@@ -1359,8 +1325,10 @@
 
 	/* Get channel and beacon interval */
 	len = sizeof(config);
-	ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
-	netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
+	ret = rndis_query_oid(usbdev,
+			RNDIS_OID_802_11_CONFIGURATION,
+			&config, &len);
+	netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_CONFIGURATION -> %d\n",
 				__func__, ret);
 	if (ret < 0)
 		return NULL;
@@ -1413,8 +1381,9 @@
 				    ret);
 	}
 
-	ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key,
-							sizeof(ndis_key));
+	ret = rndis_set_oid(usbdev,
+			RNDIS_OID_802_11_ADD_WEP, &ndis_key,
+			sizeof(ndis_key));
 	if (ret != 0) {
 		netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n",
 			    index + 1, ret);
@@ -1504,9 +1473,10 @@
 			get_bssid(usbdev, ndis_key.bssid);
 	}
 
-	ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key,
-					le32_to_cpu(ndis_key.size));
-	netdev_dbg(usbdev->net, "%s(): OID_802_11_ADD_KEY -> %08X\n",
+	ret = rndis_set_oid(usbdev,
+			RNDIS_OID_802_11_ADD_KEY, &ndis_key,
+			le32_to_cpu(ndis_key.size));
+	netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_ADD_KEY -> %08X\n",
 		   __func__, ret);
 	if (ret != 0)
 		return ret;
@@ -1594,14 +1564,16 @@
 			memset(remove_key.bssid, 0xff,
 						sizeof(remove_key.bssid));
 
-		ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_KEY, &remove_key,
-							sizeof(remove_key));
+		ret = rndis_set_oid(usbdev,
+				RNDIS_OID_802_11_REMOVE_KEY,
+				&remove_key, sizeof(remove_key));
 		if (ret != 0)
 			return ret;
 	} else {
 		keyindex = cpu_to_le32(index);
-		ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex,
-							sizeof(keyindex));
+		ret = rndis_set_oid(usbdev,
+				RNDIS_OID_802_11_REMOVE_WEP,
+				&keyindex, sizeof(keyindex));
 		if (ret != 0) {
 			netdev_warn(usbdev->net,
 				    "removing encryption key %d failed (%08X)\n",
@@ -1626,14 +1598,14 @@
 	char *mc_addrs = NULL;
 	int mc_count;
 
-	basefilter = filter = RNDIS_PACKET_TYPE_DIRECTED |
-			      RNDIS_PACKET_TYPE_BROADCAST;
+	basefilter = filter = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED |
+					  RNDIS_PACKET_TYPE_BROADCAST);
 
 	if (usbdev->net->flags & IFF_PROMISC) {
-		filter |= RNDIS_PACKET_TYPE_PROMISCUOUS |
-			RNDIS_PACKET_TYPE_ALL_LOCAL;
+		filter |= cpu_to_le32(RNDIS_PACKET_TYPE_PROMISCUOUS |
+				      RNDIS_PACKET_TYPE_ALL_LOCAL);
 	} else if (usbdev->net->flags & IFF_ALLMULTI) {
-		filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
+		filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
 	}
 
 	if (filter != basefilter)
@@ -1646,7 +1618,7 @@
 	netif_addr_lock_bh(usbdev->net);
 	mc_count = netdev_mc_count(usbdev->net);
 	if (mc_count > priv->multicast_size) {
-		filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
+		filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
 	} else if (mc_count) {
 		int i = 0;
 
@@ -1669,27 +1641,28 @@
 		goto set_filter;
 
 	if (mc_count) {
-		ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, mc_addrs,
-				    mc_count * ETH_ALEN);
+		ret = rndis_set_oid(usbdev,
+				RNDIS_OID_802_3_MULTICAST_LIST,
+				mc_addrs, mc_count * ETH_ALEN);
 		kfree(mc_addrs);
 		if (ret == 0)
-			filter |= RNDIS_PACKET_TYPE_MULTICAST;
+			filter |= cpu_to_le32(RNDIS_PACKET_TYPE_MULTICAST);
 		else
-			filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
+			filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST);
 
-		netdev_dbg(usbdev->net, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n",
+		netdev_dbg(usbdev->net, "RNDIS_OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n",
 			   mc_count, priv->multicast_size, ret);
 	}
 
 set_filter:
-	ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter,
+	ret = rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter,
 							sizeof(filter));
 	if (ret < 0) {
 		netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n",
 			    le32_to_cpu(filter));
 	}
 
-	netdev_dbg(usbdev->net, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n",
+	netdev_dbg(usbdev->net, "RNDIS_OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n",
 		   le32_to_cpu(filter), ret);
 }
 
@@ -1748,9 +1721,10 @@
 	pmkids->length = cpu_to_le32(len);
 	pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
 
-	ret = rndis_query_oid(usbdev, OID_802_11_PMKID, pmkids, &len);
+	ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_PMKID,
+			pmkids, &len);
 	if (ret < 0) {
-		netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d)"
+		netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d)"
 				" -> %d\n", __func__, len, max_pmkids, ret);
 
 		kfree(pmkids);
@@ -1776,10 +1750,10 @@
 
 	debug_print_pmkids(usbdev, pmkids, __func__);
 
-	ret = rndis_set_oid(usbdev, OID_802_11_PMKID, pmkids,
-						le32_to_cpu(pmkids->length));
+	ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID, pmkids,
+			    le32_to_cpu(pmkids->length));
 	if (ret < 0) {
-		netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d) -> %d"
+		netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d) -> %d"
 				"\n", __func__, len, num_pmkids, ret);
 	}
 
@@ -1801,8 +1775,8 @@
 		count = max_pmkids;
 
 	for (i = 0; i < count; i++)
-		if (!compare_ether_addr(pmkids->bssid_info[i].bssid,
-							pmksa->bssid))
+		if (ether_addr_equal(pmkids->bssid_info[i].bssid,
+				     pmksa->bssid))
 			break;
 
 	/* pmkid not found */
@@ -1843,8 +1817,8 @@
 
 	/* update with new pmkid */
 	for (i = 0; i < count; i++) {
-		if (compare_ether_addr(pmkids->bssid_info[i].bssid,
-							pmksa->bssid))
+		if (!ether_addr_equal(pmkids->bssid_info[i].bssid,
+				      pmksa->bssid))
 			continue;
 
 		memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid,
@@ -2113,7 +2087,8 @@
 	 * resizing until it won't get any bigger.
 	 */
 	new_len = len;
-	ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &new_len);
+	ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST,
+			      buf, &new_len);
 	if (ret != 0 || new_len < sizeof(struct ndis_80211_bssid_list_ex))
 		goto out;
 
@@ -2139,7 +2114,7 @@
 	while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
 		if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
 		    matched) {
-			if (compare_ether_addr(bssid->mac, match_bssid))
+			if (!ether_addr_equal(bssid->mac, match_bssid))
 				*matched = true;
 		}
 
@@ -2511,14 +2486,15 @@
 	memset(sinfo, 0, sizeof(*sinfo));
 
 	len = sizeof(linkspeed);
-	ret = rndis_query_oid(usbdev, OID_GEN_LINK_SPEED, &linkspeed, &len);
+	ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len);
 	if (ret == 0) {
 		sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000;
 		sinfo->filled |= STATION_INFO_TX_BITRATE;
 	}
 
 	len = sizeof(rssi);
-	ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
+	ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
+			      &rssi, &len);
 	if (ret == 0) {
 		sinfo->signal = level_to_qual(le32_to_cpu(rssi));
 		sinfo->filled |= STATION_INFO_SIGNAL;
@@ -2531,7 +2507,7 @@
 	struct rndis_wlan_private *priv = wiphy_priv(wiphy);
 	struct usbnet *usbdev = priv->usbdev;
 
-	if (compare_ether_addr(priv->bssid, mac))
+	if (!ether_addr_equal(priv->bssid, mac))
 		return -ENOENT;
 
 	rndis_fill_station_info(usbdev, sinfo);
@@ -2624,7 +2600,8 @@
 	pmkid.length = cpu_to_le32(sizeof(pmkid));
 	pmkid.bssid_info_count = cpu_to_le32(0);
 
-	return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
+	return rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID,
+			     &pmkid, sizeof(pmkid));
 }
 
 static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
@@ -2654,9 +2631,10 @@
 	priv->power_mode = power_mode;
 
 	mode = cpu_to_le32(power_mode);
-	ret = rndis_set_oid(usbdev, OID_802_11_POWER_MODE, &mode, sizeof(mode));
+	ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_POWER_MODE,
+			    &mode, sizeof(mode));
 
-	netdev_dbg(usbdev->net, "%s(): OID_802_11_POWER_MODE -> %d\n",
+	netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_POWER_MODE -> %d\n",
 				__func__, ret);
 
 	return ret;
@@ -2693,10 +2671,11 @@
 	/* Get signal quality, in case of error use rssi=0 and ignore error. */
 	len = sizeof(rssi);
 	rssi = 0;
-	ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
+	ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
+			      &rssi, &len);
 	signal = level_to_qual(le32_to_cpu(rssi));
 
-	netdev_dbg(usbdev->net, "%s(): OID_802_11_RSSI -> %d, "
+	netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_RSSI -> %d, "
 		   "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi),
 		   level_to_qual(le32_to_cpu(rssi)));
 
@@ -2720,8 +2699,9 @@
 	/* Get SSID, in case of error, use zero length SSID and ignore error. */
 	len = sizeof(ssid);
 	memset(&ssid, 0, sizeof(ssid));
-	ret = rndis_query_oid(usbdev, OID_802_11_SSID, &ssid, &len);
-	netdev_dbg(usbdev->net, "%s(): OID_802_11_SSID -> %d, len: %d, ssid: "
+	ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_SSID,
+			      &ssid, &len);
+	netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_SSID -> %d, len: %d, ssid: "
 				"'%.32s'\n", __func__, ret,
 				le32_to_cpu(ssid.length), ssid.essid);
 
@@ -2843,7 +2823,7 @@
 	 * NDIS spec says: "If the device is associated, but the associated
 	 *  BSSID is not in its BSSID scan list, then the driver must add an
 	 *  entry for the BSSID at the end of the data that it returns in
-	 *  response to query of OID_802_11_BSSID_LIST."
+	 *  response to query of RNDIS_OID_802_11_BSSID_LIST."
 	 *
 	 * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a.
 	 */
@@ -3095,15 +3075,15 @@
 	struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
 	struct rndis_indicate *msg = ind;
 
-	switch (msg->status) {
+	switch (le32_to_cpu(msg->status)) {
 	case RNDIS_STATUS_MEDIA_CONNECT:
-		if (priv->current_command_oid == OID_802_11_ADD_KEY) {
-			/* OID_802_11_ADD_KEY causes sometimes extra
+		if (priv->current_command_oid == RNDIS_OID_802_11_ADD_KEY) {
+			/* RNDIS_OID_802_11_ADD_KEY causes sometimes extra
 			 * "media connect" indications which confuses driver
 			 * and userspace to think that device is
 			 * roaming/reassociating when it isn't.
 			 */
-			netdev_dbg(usbdev->net, "ignored OID_802_11_ADD_KEY triggered 'media connect'\n");
+			netdev_dbg(usbdev->net, "ignored RNDIS_OID_802_11_ADD_KEY triggered 'media connect'\n");
 			return;
 		}
 
@@ -3148,8 +3128,9 @@
 
 	/* determine supported modes */
 	len = sizeof(networks_supported);
-	retval = rndis_query_oid(usbdev, OID_802_11_NETWORK_TYPES_SUPPORTED,
-						&networks_supported, &len);
+	retval = rndis_query_oid(usbdev,
+				 RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED,
+				 &networks_supported, &len);
 	if (retval >= 0) {
 		n = le32_to_cpu(networks_supported.num_items);
 		if (n > 8)
@@ -3173,9 +3154,11 @@
 	/* get device 802.11 capabilities, number of PMKIDs */
 	caps = (struct ndis_80211_capability *)caps_buf;
 	len = sizeof(caps_buf);
-	retval = rndis_query_oid(usbdev, OID_802_11_CAPABILITY, caps, &len);
+	retval = rndis_query_oid(usbdev,
+				 RNDIS_OID_802_11_CAPABILITY,
+				 caps, &len);
 	if (retval >= 0) {
-		netdev_dbg(usbdev->net, "OID_802_11_CAPABILITY -> len %d, "
+		netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, "
 				"ver %d, pmkids %d, auth-encr-pairs %d\n",
 				le32_to_cpu(caps->length),
 				le32_to_cpu(caps->version),
@@ -3247,13 +3230,14 @@
 	}
 
 	len = sizeof(rssi);
-	ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
+	ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI,
+			      &rssi, &len);
 	if (ret == 0) {
 		priv->last_qual = level_to_qual(le32_to_cpu(rssi));
 		rndis_do_cqm(usbdev, le32_to_cpu(rssi));
 	}
 
-	netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
+	netdev_dbg(usbdev->net, "dev-poller: RNDIS_OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
 		   ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
 
 	/* Workaround transfer stalls on poor quality links.
@@ -3275,15 +3259,18 @@
 		 * working.
 		 */
 		tmp = cpu_to_le32(1);
-		rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
-								sizeof(tmp));
+		rndis_set_oid(usbdev,
+			      RNDIS_OID_802_11_BSSID_LIST_SCAN,
+			      &tmp, sizeof(tmp));
 
 		len = CONTROL_BUFFER_SIZE;
 		buf = kmalloc(len, GFP_KERNEL);
 		if (!buf)
 			goto end;
 
-		rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len);
+		rndis_query_oid(usbdev,
+				RNDIS_OID_802_11_BSSID_LIST,
+				buf, &len);
 		kfree(buf);
 	}
 
@@ -3465,13 +3452,15 @@
 	 */
 	usbdev->net->netdev_ops = &rndis_wlan_netdev_ops;
 
-	tmp = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST;
-	retval = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &tmp,
-								sizeof(tmp));
+	tmp = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST);
+	retval = rndis_set_oid(usbdev,
+			       RNDIS_OID_GEN_CURRENT_PACKET_FILTER,
+			       &tmp, sizeof(tmp));
 
 	len = sizeof(tmp);
-	retval = rndis_query_oid(usbdev, OID_802_3_MAXIMUM_LIST_SIZE, &tmp,
-								&len);
+	retval = rndis_query_oid(usbdev,
+				 RNDIS_OID_802_3_MAXIMUM_LIST_SIZE,
+				 &tmp, &len);
 	priv->multicast_size = le32_to_cpu(tmp);
 	if (retval < 0 || priv->multicast_size < 0)
 		priv->multicast_size = 0;
@@ -3601,7 +3590,7 @@
 	/* Set current packet filter zero to block receiving data packets from
 	   device. */
 	filter = 0;
-	rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter,
+	rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter,
 								sizeof(filter));
 
 	return retval;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 3a6b402..5e6b501 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1828,15 +1828,4 @@
 	.resume		= rt2x00pci_resume,
 };
 
-static int __init rt2400pci_init(void)
-{
-	return pci_register_driver(&rt2400pci_driver);
-}
-
-static void __exit rt2400pci_exit(void)
-{
-	pci_unregister_driver(&rt2400pci_driver);
-}
-
-module_init(rt2400pci_init);
-module_exit(rt2400pci_exit);
+module_pci_driver(rt2400pci_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index dcc0e1f..136b849 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -2119,15 +2119,4 @@
 	.resume		= rt2x00pci_resume,
 };
 
-static int __init rt2500pci_init(void)
-{
-	return pci_register_driver(&rt2500pci_driver);
-}
-
-static void __exit rt2500pci_exit(void)
-{
-	pci_unregister_driver(&rt2500pci_driver);
-}
-
-module_init(rt2500pci_init);
-module_exit(rt2500pci_exit);
+module_pci_driver(rt2500pci_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 1de9c75..c88fd3e 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1912,7 +1912,7 @@
 	{ USB_DEVICE(0x0b05, 0x1706) },
 	{ USB_DEVICE(0x0b05, 0x1707) },
 	/* Belkin */
-	{ USB_DEVICE(0x050d, 0x7050) },
+	{ USB_DEVICE(0x050d, 0x7050) },	/* FCC ID: K7SF5D7050A ver. 2.x */
 	{ USB_DEVICE(0x050d, 0x7051) },
 	/* Cisco Systems */
 	{ USB_DEVICE(0x13b1, 0x000d) },
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 063bfa8..9348521 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -83,6 +83,7 @@
 #define REV_RT3090E			0x0211
 #define REV_RT3390E			0x0211
 #define REV_RT5390F			0x0502
+#define REV_RT5390R			0x1502
 
 /*
  * Signal information.
@@ -98,9 +99,11 @@
 #define EEPROM_BASE			0x0000
 #define EEPROM_SIZE			0x0110
 #define BBP_BASE			0x0000
-#define BBP_SIZE			0x0080
+#define BBP_SIZE			0x00ff
 #define RF_BASE				0x0004
 #define RF_SIZE				0x0010
+#define RFCSR_BASE			0x0000
+#define RFCSR_SIZE			0x0040
 
 /*
  * Number of TX queues.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 6c0a12e..dfc90d3 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -290,11 +290,25 @@
 		msleep(10);
 	}
 
-	ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+	ERROR(rt2x00dev, "WPDMA TX/RX busy [0x%08x].\n", reg);
 	return -EACCES;
 }
 EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
 
+void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev)
+{
+	u32 reg;
+
+	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
+	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
+	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+}
+EXPORT_SYMBOL_GPL(rt2800_disable_wpdma);
+
 static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
 {
 	u16 fw_crc;
@@ -412,6 +426,8 @@
 		rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
 	}
 
+	rt2800_disable_wpdma(rt2x00dev);
+
 	/*
 	 * Write firmware to the device.
 	 */
@@ -436,10 +452,7 @@
 	 * Disable DMA, will be reenabled later when enabling
 	 * the radio.
 	 */
-	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
-	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+	rt2800_disable_wpdma(rt2x00dev);
 
 	/*
 	 * Initialize firmware.
@@ -823,6 +836,13 @@
 		.word_size	= sizeof(u32),
 		.word_count	= RF_SIZE / sizeof(u32),
 	},
+	.rfcsr	= {
+		.read		= rt2800_rfcsr_read,
+		.write		= rt2800_rfcsr_write,
+		.word_base	= RFCSR_BASE,
+		.word_size	= sizeof(u8),
+		.word_count	= RFCSR_SIZE / sizeof(u8),
+	},
 };
 EXPORT_SYMBOL_GPL(rt2800_rt2x00debug);
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
@@ -2717,13 +2737,7 @@
 	unsigned int i;
 	int ret;
 
-	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
-	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+	rt2800_disable_wpdma(rt2x00dev);
 
 	ret = rt2800_drv_init_registers(rt2x00dev);
 	if (ret)
@@ -3349,6 +3363,13 @@
 			rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
 		}
 
+		/* This chip has hardware antenna diversity*/
+		if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
+			rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
+			rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
+			rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
+		}
+
 		rt2800_bbp_read(rt2x00dev, 152, &value);
 		if (ant == 0)
 			rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
@@ -3997,10 +4018,7 @@
 {
 	u32 reg;
 
-	rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
-	rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+	rt2800_disable_wpdma(rt2x00dev);
 
 	/* Wait for DMA, ignore error */
 	rt2800_wait_wpdma_ready(rt2x00dev);
@@ -4287,6 +4305,11 @@
 		rt2x00dev->default_ant.rx = ANTENNA_A;
 	}
 
+	if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
+		rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */
+		rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */
+	}
+
 	/*
 	 * Determine external LNA informations.
 	 */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 419e36c..18a0b67 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -208,5 +208,6 @@
 			u8 buf_size);
 int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
 		      struct survey_info *survey);
+void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
 
 #endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 0397bbf..931331d 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -361,7 +361,6 @@
 static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
 {
 	struct queue_entry_priv_pci *entry_priv;
-	u32 reg;
 
 	/*
 	 * Initialize registers.
@@ -394,6 +393,16 @@
 	rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0);
 	rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0);
 
+	rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR4, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT4, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX4, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX4, 0);
+
+	rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR5, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT5, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX5, 0);
+	rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX5, 0);
+
 	entry_priv = rt2x00dev->rx->entries[0].priv_data;
 	rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
 	rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT,
@@ -402,14 +411,7 @@
 				 rt2x00dev->rx[0].limit - 1);
 	rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0);
 
-	/*
-	 * Enable global DMA configuration
-	 */
-	rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
-	rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
-	rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
+	rt2800_disable_wpdma(rt2x00dev);
 
 	rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0);
 
@@ -504,8 +506,10 @@
 {
 	int retval;
 
-	if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
-		     rt2800pci_init_queues(rt2x00dev)))
+	/* Wait for DMA, ignore error until we initialize queues. */
+	rt2800_wait_wpdma_ready(rt2x00dev);
+
+	if (unlikely(rt2800pci_init_queues(rt2x00dev)))
 		return -EIO;
 
 	retval = rt2800_enable_radio(rt2x00dev);
@@ -1184,7 +1188,9 @@
 	{ PCI_DEVICE(0x1814, 0x3593) },
 #endif
 #ifdef CONFIG_RT2800PCI_RT53XX
+	{ PCI_DEVICE(0x1814, 0x5362) },
 	{ PCI_DEVICE(0x1814, 0x5390) },
+	{ PCI_DEVICE(0x1814, 0x5392) },
 	{ PCI_DEVICE(0x1814, 0x539a) },
 	{ PCI_DEVICE(0x1814, 0x539f) },
 #endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 001735f..5601302 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -922,6 +922,7 @@
 	{ USB_DEVICE(0x1482, 0x3c09) },
 	/* AirTies */
 	{ USB_DEVICE(0x1eda, 0x2012) },
+	{ USB_DEVICE(0x1eda, 0x2210) },
 	{ USB_DEVICE(0x1eda, 0x2310) },
 	/* Allwin */
 	{ USB_DEVICE(0x8516, 0x2070) },
@@ -991,6 +992,7 @@
 	/* DVICO */
 	{ USB_DEVICE(0x0fe9, 0xb307) },
 	/* Edimax */
+	{ USB_DEVICE(0x7392, 0x4085) },
 	{ USB_DEVICE(0x7392, 0x7711) },
 	{ USB_DEVICE(0x7392, 0x7717) },
 	{ USB_DEVICE(0x7392, 0x7718) },
@@ -1066,6 +1068,7 @@
 	/* Philips */
 	{ USB_DEVICE(0x0471, 0x200f) },
 	/* Planex */
+	{ USB_DEVICE(0x2019, 0x5201) },
 	{ USB_DEVICE(0x2019, 0xab25) },
 	{ USB_DEVICE(0x2019, 0xed06) },
 	/* Quanta */
@@ -1134,6 +1137,10 @@
 #ifdef CONFIG_RT2800USB_RT33XX
 	/* Belkin */
 	{ USB_DEVICE(0x050d, 0x945b) },
+	/* Panasonic */
+	{ USB_DEVICE(0x083a, 0xb511) },
+	/* Philips */
+	{ USB_DEVICE(0x0471, 0x20dd) },
 	/* Ralink */
 	{ USB_DEVICE(0x148f, 0x3370) },
 	{ USB_DEVICE(0x148f, 0x8070) },
@@ -1145,6 +1152,8 @@
 	{ USB_DEVICE(0x8516, 0x3572) },
 	/* Askey */
 	{ USB_DEVICE(0x1690, 0x0744) },
+	{ USB_DEVICE(0x1690, 0x0761) },
+	{ USB_DEVICE(0x1690, 0x0764) },
 	/* Cisco */
 	{ USB_DEVICE(0x167b, 0x4001) },
 	/* EnGenius */
@@ -1159,20 +1168,25 @@
 	/* Sitecom */
 	{ USB_DEVICE(0x0df6, 0x0041) },
 	{ USB_DEVICE(0x0df6, 0x0062) },
+	{ USB_DEVICE(0x0df6, 0x0065) },
+	{ USB_DEVICE(0x0df6, 0x0066) },
+	{ USB_DEVICE(0x0df6, 0x0068) },
 	/* Toshiba */
 	{ USB_DEVICE(0x0930, 0x0a07) },
 	/* Zinwell */
 	{ USB_DEVICE(0x5a57, 0x0284) },
 #endif
 #ifdef CONFIG_RT2800USB_RT53XX
-	/* Alpha */
-	{ USB_DEVICE(0x2001, 0x3c15) },
-	{ USB_DEVICE(0x2001, 0x3c19) },
 	/* Arcadyan */
 	{ USB_DEVICE(0x043e, 0x7a12) },
 	/* Azurewave */
 	{ USB_DEVICE(0x13d3, 0x3329) },
 	{ USB_DEVICE(0x13d3, 0x3365) },
+	/* D-Link */
+	{ USB_DEVICE(0x2001, 0x3c15) },
+	{ USB_DEVICE(0x2001, 0x3c19) },
+	{ USB_DEVICE(0x2001, 0x3c1c) },
+	{ USB_DEVICE(0x2001, 0x3c1d) },
 	/* LG innotek */
 	{ USB_DEVICE(0x043e, 0x7a22) },
 	/* Panasonic */
@@ -1224,12 +1238,8 @@
 	{ USB_DEVICE(0x07d1, 0x3c0b) },
 	{ USB_DEVICE(0x07d1, 0x3c17) },
 	{ USB_DEVICE(0x2001, 0x3c17) },
-	/* Edimax */
-	{ USB_DEVICE(0x7392, 0x4085) },
 	/* Encore */
 	{ USB_DEVICE(0x203d, 0x14a1) },
-	/* Fujitsu Stylistic 550 */
-	{ USB_DEVICE(0x1690, 0x0761) },
 	/* Gemtek */
 	{ USB_DEVICE(0x15a9, 0x0010) },
 	/* Gigabyte */
@@ -1250,7 +1260,6 @@
 	{ USB_DEVICE(0x05a6, 0x0101) },
 	{ USB_DEVICE(0x1d4d, 0x0010) },
 	/* Planex */
-	{ USB_DEVICE(0x2019, 0x5201) },
 	{ USB_DEVICE(0x2019, 0xab24) },
 	/* Qcom */
 	{ USB_DEVICE(0x18e8, 0x6259) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 471f87c..ca36ccc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -692,6 +692,8 @@
 	 */
 	CONFIG_CHANNEL_HT40,
 	CONFIG_POWERSAVING,
+	CONFIG_HT_DISABLED,
+	CONFIG_QOS_DISABLED,
 
 	/*
 	 * Mark we currently are sequentially reading TX_STA_FIFO register
@@ -1280,7 +1282,7 @@
 void rt2x00lib_txdone(struct queue_entry *entry,
 		      struct txdone_entry_desc *txdesc);
 void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
-void rt2x00lib_rxdone(struct queue_entry *entry);
+void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
 
 /*
  * mac80211 handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 293676b..e7361d9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -217,6 +217,11 @@
 	libconf.conf = conf;
 
 	if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
+		if (!conf_is_ht(conf))
+			set_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags);
+		else
+			clear_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags);
+
 		if (conf_is_ht40(conf)) {
 			set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
 			hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 78787fc..3bb8caf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -70,6 +70,7 @@
 	 *     - eeprom offset/value files
 	 *     - bbp offset/value files
 	 *     - rf offset/value files
+	 *     - rfcsr offset/value files
 	 *   - queue folder
 	 *     - frame dump file
 	 *     - queue stats file
@@ -89,6 +90,8 @@
 	struct dentry *bbp_val_entry;
 	struct dentry *rf_off_entry;
 	struct dentry *rf_val_entry;
+	struct dentry *rfcsr_off_entry;
+	struct dentry *rfcsr_val_entry;
 	struct dentry *queue_folder;
 	struct dentry *queue_frame_dump_entry;
 	struct dentry *queue_stats_entry;
@@ -131,6 +134,7 @@
 	unsigned int offset_eeprom;
 	unsigned int offset_bbp;
 	unsigned int offset_rf;
+	unsigned int offset_rfcsr;
 };
 
 void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
@@ -525,6 +529,7 @@
 RT2X00DEBUGFS_OPS(eeprom, "0x%.4x\n", u16);
 RT2X00DEBUGFS_OPS(bbp, "0x%.2x\n", u8);
 RT2X00DEBUGFS_OPS(rf, "0x%.8x\n", u32);
+RT2X00DEBUGFS_OPS(rfcsr, "0x%.2x\n", u8);
 
 static ssize_t rt2x00debug_read_dev_flags(struct file *file,
 					  char __user *buf,
@@ -614,7 +619,7 @@
 	const struct rt2x00debug *debug = intf->debug;
 	char *data;
 
-	data = kzalloc(8 * MAX_LINE_LENGTH, GFP_KERNEL);
+	data = kzalloc(9 * MAX_LINE_LENGTH, GFP_KERNEL);
 	if (!data)
 		return NULL;
 
@@ -624,22 +629,22 @@
 	data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev);
 	data += sprintf(data, "\n");
 	data += sprintf(data, "register\tbase\twords\twordsize\n");
-	data += sprintf(data, "csr\t%d\t%d\t%d\n",
-			debug->csr.word_base,
-			debug->csr.word_count,
-			debug->csr.word_size);
-	data += sprintf(data, "eeprom\t%d\t%d\t%d\n",
-			debug->eeprom.word_base,
-			debug->eeprom.word_count,
-			debug->eeprom.word_size);
-	data += sprintf(data, "bbp\t%d\t%d\t%d\n",
-			debug->bbp.word_base,
-			debug->bbp.word_count,
-			debug->bbp.word_size);
-	data += sprintf(data, "rf\t%d\t%d\t%d\n",
-			debug->rf.word_base,
-			debug->rf.word_count,
-			debug->rf.word_size);
+#define RT2X00DEBUGFS_SPRINTF_REGISTER(__name)			\
+{								\
+	if(debug->__name.read)					\
+		data += sprintf(data, __stringify(__name)	\
+				"\t%d\t%d\t%d\n",		\
+				debug->__name.word_base,	\
+				debug->__name.word_count,	\
+				debug->__name.word_size);	\
+}
+	RT2X00DEBUGFS_SPRINTF_REGISTER(csr);
+	RT2X00DEBUGFS_SPRINTF_REGISTER(eeprom);
+	RT2X00DEBUGFS_SPRINTF_REGISTER(bbp);
+	RT2X00DEBUGFS_SPRINTF_REGISTER(rf);
+	RT2X00DEBUGFS_SPRINTF_REGISTER(rfcsr);
+#undef RT2X00DEBUGFS_SPRINTF_REGISTER
+
 	blob->size = strlen(blob->data);
 
 	return debugfs_create_blob(name, S_IRUSR, intf->driver_folder, blob);
@@ -694,31 +699,34 @@
 	if (IS_ERR(intf->register_folder) || !intf->register_folder)
 		goto exit;
 
-#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name)	\
-({								\
-	(__intf)->__name##_off_entry =				\
-	    debugfs_create_u32(__stringify(__name) "_offset",	\
-			       S_IRUSR | S_IWUSR,		\
-			       (__intf)->register_folder,	\
-			       &(__intf)->offset_##__name);	\
-	if (IS_ERR((__intf)->__name##_off_entry)		\
-			|| !(__intf)->__name##_off_entry)	\
-		goto exit;					\
-								\
-	(__intf)->__name##_val_entry =				\
-	    debugfs_create_file(__stringify(__name) "_value",	\
-				S_IRUSR | S_IWUSR,		\
-				(__intf)->register_folder,	\
-				(__intf), &rt2x00debug_fop_##__name);\
-	if (IS_ERR((__intf)->__name##_val_entry)		\
-			|| !(__intf)->__name##_val_entry)	\
-		goto exit;					\
+#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name)			\
+({										\
+	if(debug->__name.read) {						\
+		(__intf)->__name##_off_entry =					\
+		debugfs_create_u32(__stringify(__name) "_offset",		\
+				       S_IRUSR | S_IWUSR,			\
+				       (__intf)->register_folder,		\
+				       &(__intf)->offset_##__name);		\
+		if (IS_ERR((__intf)->__name##_off_entry)			\
+				|| !(__intf)->__name##_off_entry)		\
+			goto exit;						\
+										\
+		(__intf)->__name##_val_entry =					\
+		debugfs_create_file(__stringify(__name) "_value",		\
+					S_IRUSR | S_IWUSR,			\
+					(__intf)->register_folder,		\
+					(__intf), &rt2x00debug_fop_##__name);	\
+		if (IS_ERR((__intf)->__name##_val_entry)			\
+				|| !(__intf)->__name##_val_entry)		\
+			goto exit;						\
+	}									\
 })
 
 	RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, csr);
 	RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, eeprom);
 	RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, bbp);
 	RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rf);
+	RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rfcsr);
 
 #undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY
 
@@ -770,6 +778,8 @@
 	debugfs_remove(intf->queue_stats_entry);
 	debugfs_remove(intf->queue_frame_dump_entry);
 	debugfs_remove(intf->queue_folder);
+	debugfs_remove(intf->rfcsr_val_entry);
+	debugfs_remove(intf->rfcsr_off_entry);
 	debugfs_remove(intf->rf_val_entry);
 	debugfs_remove(intf->rf_off_entry);
 	debugfs_remove(intf->bbp_val_entry);
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index fa11409..e11d39b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -65,6 +65,7 @@
 	RT2X00DEBUGFS_REGISTER_ENTRY(eeprom, u16);
 	RT2X00DEBUGFS_REGISTER_ENTRY(bbp, u8);
 	RT2X00DEBUGFS_REGISTER_ENTRY(rf, u32);
+	RT2X00DEBUGFS_REGISTER_ENTRY(rfcsr, u8);
 };
 
 #endif /* RT2X00DEBUG_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 90cc5e7..e5404e5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -391,9 +391,10 @@
 		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
 		tx_info->status.ampdu_len = 1;
 		tx_info->status.ampdu_ack_len = success ? 1 : 0;
-
-		if (!success)
-			tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+		/*
+		 * TODO: Need to tear down BA session here
+		 * if not successful.
+		 */
 	}
 
 	if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -587,7 +588,7 @@
 	return 0;
 }
 
-void rt2x00lib_rxdone(struct queue_entry *entry)
+void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
 {
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 	struct rxdone_entry_desc rxdesc;
@@ -607,7 +608,7 @@
 	 * Allocate a new sk_buffer. If no new buffer available, drop the
 	 * received frame and reuse the existing buffer.
 	 */
-	skb = rt2x00queue_alloc_rxskb(entry);
+	skb = rt2x00queue_alloc_rxskb(entry, gfp);
 	if (!skb)
 		goto submit_entry;
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index ca585e3..8679d78 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -124,17 +124,15 @@
 
 void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
 {
-	char dev_name[16];
-	char name[32];
+	char name[36];
 	int retval;
 	unsigned long on_period;
 	unsigned long off_period;
-
-	snprintf(dev_name, sizeof(dev_name), "%s-%s",
-		 rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy));
+	const char *phy_name = wiphy_name(rt2x00dev->hw->wiphy);
 
 	if (rt2x00dev->led_radio.flags & LED_INITIALIZED) {
-		snprintf(name, sizeof(name), "%s::radio", dev_name);
+		snprintf(name, sizeof(name), "%s-%s::radio",
+			 rt2x00dev->ops->name, phy_name);
 
 		retval = rt2x00leds_register_led(rt2x00dev,
 						 &rt2x00dev->led_radio,
@@ -144,7 +142,8 @@
 	}
 
 	if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) {
-		snprintf(name, sizeof(name), "%s::assoc", dev_name);
+		snprintf(name, sizeof(name), "%s-%s::assoc",
+			 rt2x00dev->ops->name, phy_name);
 
 		retval = rt2x00leds_register_led(rt2x00dev,
 						 &rt2x00dev->led_assoc,
@@ -154,7 +153,8 @@
 	}
 
 	if (rt2x00dev->led_qual.flags & LED_INITIALIZED) {
-		snprintf(name, sizeof(name), "%s::quality", dev_name);
+		snprintf(name, sizeof(name), "%s-%s::quality",
+			 rt2x00dev->ops->name, phy_name);
 
 		retval = rt2x00leds_register_led(rt2x00dev,
 						 &rt2x00dev->led_qual,
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 78bd43b..a093598 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -103,7 +103,7 @@
  * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes.
  * @entry: The entry for which the skb will be applicable.
  */
-struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry);
+struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp);
 
 /**
  * rt2x00queue_free_skb - free a skb
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 2df2eb6..b49773e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -709,9 +709,19 @@
 			rt2x00dev->intf_associated--;
 
 		rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
+
+		clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
 	}
 
 	/*
+	 * Check for access point which do not support 802.11e . We have to
+	 * generate data frames sequence number in S/W for such AP, because
+	 * of H/W bug.
+	 */
+	if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
+		set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
+
+	/*
 	 * When the erp information has changed, we should perform
 	 * additional configuration steps. For all other changes we are done.
 	 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 17148bb..0a4653a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -92,7 +92,7 @@
 		/*
 		 * Send the frame to rt2x00lib for further processing.
 		 */
-		rt2x00lib_rxdone(entry);
+		rt2x00lib_rxdone(entry, GFP_ATOMIC);
 	}
 
 	return !max_rx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9b1b2b7..4c662ec 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -33,7 +33,7 @@
 #include "rt2x00.h"
 #include "rt2x00lib.h"
 
-struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
+struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
 {
 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 	struct sk_buff *skb;
@@ -68,7 +68,7 @@
 	/*
 	 * Allocate skbuffer.
 	 */
-	skb = dev_alloc_skb(frame_size + head_size + tail_size);
+	skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
 	if (!skb)
 		return NULL;
 
@@ -213,8 +213,19 @@
 
 	__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 
-	if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags))
-		return;
+	if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
+		/*
+		 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
+		 * seqno on retransmited data (non-QOS) frames. To workaround
+		 * the problem let's generate seqno in software if QOS is
+		 * disabled.
+		 */
+		if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
+			__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
+		else
+			/* H/W will generate sequence number */
+			return;
+	}
 
 	/*
 	 * The hardware is not able to insert a sequence number. Assign a
@@ -320,14 +331,6 @@
 		txdesc->u.ht.wcid = sta_priv->wcid;
 	}
 
-	txdesc->u.ht.ba_size = 7;	/* FIXME: What value is needed? */
-
-	/*
-	 * Only one STBC stream is supported for now.
-	 */
-	if (tx_info->flags & IEEE80211_TX_CTL_STBC)
-		txdesc->u.ht.stbc = 1;
-
 	/*
 	 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
 	 * mcs rate to be used
@@ -351,6 +354,24 @@
 			txdesc->u.ht.mcs |= 0x08;
 	}
 
+	if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
+		if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+			txdesc->u.ht.txop = TXOP_SIFS;
+		else
+			txdesc->u.ht.txop = TXOP_BACKOFF;
+
+		/* Left zero on all other settings. */
+		return;
+	}
+
+	txdesc->u.ht.ba_size = 7;	/* FIXME: What value is needed? */
+
+	/*
+	 * Only one STBC stream is supported for now.
+	 */
+	if (tx_info->flags & IEEE80211_TX_CTL_STBC)
+		txdesc->u.ht.stbc = 1;
+
 	/*
 	 * This frame is eligible for an AMPDU, however, don't aggregate
 	 * frames that are intended to probe a specific tx rate.
@@ -1142,7 +1163,7 @@
 	struct sk_buff *skb;
 
 	for (i = 0; i < queue->limit; i++) {
-		skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
+		skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
 		if (!skb)
 			return -ENOMEM;
 		queue->entries[i].skb = skb;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 66094eb..d357d1e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -358,7 +358,7 @@
 		/*
 		 * Send the frame to rt2x00lib for further processing.
 		 */
-		rt2x00lib_rxdone(entry);
+		rt2x00lib_rxdone(entry, GFP_KERNEL);
 	}
 }
 
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index e0c6d11..ee22bd7 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -3092,15 +3092,4 @@
 	.resume		= rt2x00pci_resume,
 };
 
-static int __init rt61pci_init(void)
-{
-	return pci_register_driver(&rt61pci_driver);
-}
-
-static void __exit rt61pci_exit(void)
-{
-	pci_unregister_driver(&rt61pci_driver);
-}
-
-module_init(rt61pci_init);
-module_exit(rt61pci_exit);
+module_pci_driver(rt61pci_driver);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index e477a96..1551366 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2412,6 +2412,7 @@
 	{ USB_DEVICE(0x0b05, 0x1723) },
 	{ USB_DEVICE(0x0b05, 0x1724) },
 	/* Belkin */
+	{ USB_DEVICE(0x050d, 0x7050) },	/* FCC ID: K7SF5D7050B ver. 3.x */
 	{ USB_DEVICE(0x050d, 0x705a) },
 	{ USB_DEVICE(0x050d, 0x905b) },
 	{ USB_DEVICE(0x050d, 0x905c) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 2f14a5f..2bebcb7 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1173,15 +1173,4 @@
 #endif /* CONFIG_PM */
 };
 
-static int __init rtl8180_init(void)
-{
-	return pci_register_driver(&rtl8180_driver);
-}
-
-static void __exit rtl8180_exit(void)
-{
-	pci_unregister_driver(&rtl8180_driver);
-}
-
-module_init(rtl8180_init);
-module_exit(rtl8180_exit);
+module_pci_driver(rtl8180_driver);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index cf53ac9..d811496 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -294,6 +294,7 @@
 		hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8);
 		hdr->tx_duration =
 			ieee80211_generic_frame_duration(dev, priv->vif,
+							 info->band,
 							 skb->len, txrate);
 		buf = hdr;
 
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index e54488d..f4c852c 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1460,7 +1460,7 @@
 		return;
 
 	/* and only beacons from the associated BSSID, please */
-	if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
+	if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
 		return;
 
 	if (rtl_find_221_ie(hw, data, len))
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 5c7d579..3d8cc4a 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -328,10 +328,9 @@
 		RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n");
 	}
 
-	if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\
-				sta_addr[4]|sta_addr[5]) == 0) {
+	if (is_zero_ether_addr(sta_addr)) {
 		RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
-			 "sta_addr is 00:00:00:00:00:00\n");
+			 "sta_addr is %pM\n", sta_addr);
 		return;
 	}
 	/* Does STA already exist? */
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 288b035..2062ea1 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -34,6 +34,7 @@
 #include "ps.h"
 #include "efuse.h"
 #include <linux/export.h>
+#include <linux/kmemleak.h>
 
 static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
 	PCI_VENDOR_ID_INTEL,
@@ -1099,6 +1100,7 @@
 			u32 bufferaddress;
 			if (!skb)
 				return 0;
+			kmemleak_not_leak(skb);
 			entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
 
 			/*skb->dev = dev; */
@@ -1851,14 +1853,6 @@
 	/*like read eeprom and so on */
 	rtlpriv->cfg->ops->read_eeprom_info(hw);
 
-	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
-		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
-		err = -ENODEV;
-		goto fail3;
-	}
-
-	rtlpriv->cfg->ops->init_sw_leds(hw);
-
 	/*aspm */
 	rtl_pci_init_aspm(hw);
 
@@ -1877,6 +1871,14 @@
 		goto fail3;
 	}
 
+	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+		err = -ENODEV;
+		goto fail3;
+	}
+
+	rtlpriv->cfg->ops->init_sw_leds(hw);
+
 	err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
 	if (err) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -1941,6 +1943,7 @@
 		rtl_deinit_deferred_work(hw);
 		rtlpriv->intf_ops->adapter_stop(hw);
 	}
+	rtlpriv->cfg->ops->disable_interrupt(hw);
 
 	/*deinit rfkill */
 	rtl_deinit_rfkill(hw);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 5b9c3b5..5ae2664 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -480,7 +480,7 @@
 		return;
 
 	/* and only beacons from the associated BSSID, please */
-	if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
+	if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
 		return;
 
 	rtlpriv->psc.last_beacon = jiffies;
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index c66f08a..d5cbf01 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -225,8 +225,7 @@
 static void rtl_rate_update(void *ppriv,
 			    struct ieee80211_supported_band *sband,
 			    struct ieee80211_sta *sta, void *priv_sta,
-			    u32 changed,
-			    enum nl80211_channel_type oper_chan_type)
+			    u32 changed)
 {
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 1208b75..f7f48c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -33,9 +33,6 @@
 #include "../pci.h"
 #include "../base.h"
 
-struct dig_t dm_digtable;
-static struct ps_t dm_pstable;
-
 #define BT_RSSI_STATE_NORMAL_POWER	BIT_OFFSET_LEN_MASK_32(0, 1)
 #define BT_RSSI_STATE_AMDPU_OFF		BIT_OFFSET_LEN_MASK_32(1, 1)
 #define BT_RSSI_STATE_SPECIAL_LOW	BIT_OFFSET_LEN_MASK_32(2, 1)
@@ -163,33 +160,37 @@
 
 static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
 {
-	dm_digtable.dig_enable_flag = true;
-	dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-	dm_digtable.cur_igvalue = 0x20;
-	dm_digtable.pre_igvalue = 0x0;
-	dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
-	dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
-	dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
-	dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
-	dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
-	dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
-	dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
-	dm_digtable.rx_gain_range_max = DM_DIG_MAX;
-	dm_digtable.rx_gain_range_min = DM_DIG_MIN;
-	dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
-	dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
-	dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
-	dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
-	dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+	dm_digtable->dig_enable_flag = true;
+	dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+	dm_digtable->cur_igvalue = 0x20;
+	dm_digtable->pre_igvalue = 0x0;
+	dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+	dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
+	dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+	dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
+	dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
+	dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+	dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+	dm_digtable->rx_gain_range_max = DM_DIG_MAX;
+	dm_digtable->rx_gain_range_min = DM_DIG_MIN;
+	dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
+	dm_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+	dm_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+	dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
+	dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
 }
 
 static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 	long rssi_val_min = 0;
 
-	if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
-	    (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
+	if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
+	    (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) {
 		if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
 			rssi_val_min =
 			    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
@@ -198,10 +199,10 @@
 			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
 		else
 			rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-	} else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
-		   dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
+	} else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT ||
+		   dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
 		rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
-	} else if (dm_digtable.curmultista_connectstate ==
+	} else if (dm_digtable->curmultista_connectstate ==
 		   DIG_MULTISTA_CONNECT) {
 		rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
 	}
@@ -260,7 +261,8 @@
 static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u8 value_igi = dm_digtable.cur_igvalue;
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+	u8 value_igi = dm_digtable->cur_igvalue;
 
 	if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
 		value_igi--;
@@ -277,43 +279,44 @@
 	if (rtlpriv->falsealm_cnt.cnt_all > 10000)
 		value_igi = 0x32;
 
-	dm_digtable.cur_igvalue = value_igi;
+	dm_digtable->cur_igvalue = value_igi;
 	rtl92c_dm_write_dig(hw);
 }
 
 static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
-	if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
-		if ((dm_digtable.backoff_val - 2) <
-		    dm_digtable.backoff_val_range_min)
-			dm_digtable.backoff_val =
-			    dm_digtable.backoff_val_range_min;
+	if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable->fa_highthresh) {
+		if ((dm_digtable->backoff_val - 2) <
+		    dm_digtable->backoff_val_range_min)
+			dm_digtable->backoff_val =
+			    dm_digtable->backoff_val_range_min;
 		else
-			dm_digtable.backoff_val -= 2;
-	} else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
-		if ((dm_digtable.backoff_val + 2) >
-		    dm_digtable.backoff_val_range_max)
-			dm_digtable.backoff_val =
-			    dm_digtable.backoff_val_range_max;
+			dm_digtable->backoff_val -= 2;
+	} else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable->fa_lowthresh) {
+		if ((dm_digtable->backoff_val + 2) >
+		    dm_digtable->backoff_val_range_max)
+			dm_digtable->backoff_val =
+			    dm_digtable->backoff_val_range_max;
 		else
-			dm_digtable.backoff_val += 2;
+			dm_digtable->backoff_val += 2;
 	}
 
-	if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
-	    dm_digtable.rx_gain_range_max)
-		dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
-	else if ((dm_digtable.rssi_val_min + 10 -
-		  dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
-		dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
+	if ((dm_digtable->rssi_val_min + 10 - dm_digtable->backoff_val) >
+	    dm_digtable->rx_gain_range_max)
+		dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_max;
+	else if ((dm_digtable->rssi_val_min + 10 -
+		  dm_digtable->backoff_val) < dm_digtable->rx_gain_range_min)
+		dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_min;
 	else
-		dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
-		    dm_digtable.backoff_val;
+		dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 -
+		    dm_digtable->backoff_val;
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
 		 "rssi_val_min = %x backoff_val %x\n",
-		 dm_digtable.rssi_val_min, dm_digtable.backoff_val);
+		 dm_digtable->rssi_val_min, dm_digtable->backoff_val);
 
 	rtl92c_dm_write_dig(hw);
 }
@@ -322,6 +325,7 @@
 {
 	static u8 initialized; /* initialized to false */
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
 	bool multi_sta = false;
@@ -330,68 +334,69 @@
 		multi_sta = true;
 
 	if (!multi_sta ||
-	    dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
+	    dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
 		initialized = false;
-		dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
 		return;
 	} else if (initialized == false) {
 		initialized = true;
-		dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
-		dm_digtable.cur_igvalue = 0x20;
+		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+		dm_digtable->cur_igvalue = 0x20;
 		rtl92c_dm_write_dig(hw);
 	}
 
-	if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
-		if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
-		    (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
+	if (dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) {
+		if ((rssi_strength < dm_digtable->rssi_lowthresh) &&
+		    (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
 
-			if (dm_digtable.dig_ext_port_stage ==
+			if (dm_digtable->dig_ext_port_stage ==
 			    DIG_EXT_PORT_STAGE_2) {
-				dm_digtable.cur_igvalue = 0x20;
+				dm_digtable->cur_igvalue = 0x20;
 				rtl92c_dm_write_dig(hw);
 			}
 
-			dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
-		} else if (rssi_strength > dm_digtable.rssi_highthresh) {
-			dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
+			dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
+		} else if (rssi_strength > dm_digtable->rssi_highthresh) {
+			dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
 			rtl92c_dm_ctrl_initgain_by_fa(hw);
 		}
-	} else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
-		dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
-		dm_digtable.cur_igvalue = 0x20;
+	} else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
+		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
+		dm_digtable->cur_igvalue = 0x20;
 		rtl92c_dm_write_dig(hw);
 	}
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
 		 "curmultista_connectstate = %x dig_ext_port_stage %x\n",
-		 dm_digtable.curmultista_connectstate,
-		 dm_digtable.dig_ext_port_stage);
+		 dm_digtable->curmultista_connectstate,
+		 dm_digtable->dig_ext_port_stage);
 }
 
 static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
 		 "presta_connectstate = %x, cursta_connectctate = %x\n",
-		 dm_digtable.presta_connectstate,
-		 dm_digtable.cursta_connectctate);
+		 dm_digtable->presta_connectstate,
+		 dm_digtable->cursta_connectctate);
 
-	if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
-	    || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
-	    || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+	if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate
+	    || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT
+	    || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
 
-		if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
-			dm_digtable.rssi_val_min =
+		if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
+			dm_digtable->rssi_val_min =
 			    rtl92c_dm_initial_gain_min_pwdb(hw);
 			rtl92c_dm_ctrl_initgain_by_rssi(hw);
 		}
 	} else {
-		dm_digtable.rssi_val_min = 0;
-		dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-		dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
-		dm_digtable.cur_igvalue = 0x20;
-		dm_digtable.pre_igvalue = 0;
+		dm_digtable->rssi_val_min = 0;
+		dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+		dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
+		dm_digtable->cur_igvalue = 0x20;
+		dm_digtable->pre_igvalue = 0;
 		rtl92c_dm_write_dig(hw);
 	}
 }
@@ -400,40 +405,41 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
-	if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
-		dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
+	if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+		dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
 
-		if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
-			if (dm_digtable.rssi_val_min <= 25)
-				dm_digtable.cur_cck_pd_state =
+		if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+			if (dm_digtable->rssi_val_min <= 25)
+				dm_digtable->cur_cck_pd_state =
 				    CCK_PD_STAGE_LowRssi;
 			else
-				dm_digtable.cur_cck_pd_state =
+				dm_digtable->cur_cck_pd_state =
 				    CCK_PD_STAGE_HighRssi;
 		} else {
-			if (dm_digtable.rssi_val_min <= 20)
-				dm_digtable.cur_cck_pd_state =
+			if (dm_digtable->rssi_val_min <= 20)
+				dm_digtable->cur_cck_pd_state =
 				    CCK_PD_STAGE_LowRssi;
 			else
-				dm_digtable.cur_cck_pd_state =
+				dm_digtable->cur_cck_pd_state =
 				    CCK_PD_STAGE_HighRssi;
 		}
 	} else {
-		dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+		dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
 	}
 
-	if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
-		if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
+	if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
+		if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
 			if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
-				dm_digtable.cur_cck_fa_state =
+				dm_digtable->cur_cck_fa_state =
 				    CCK_FA_STAGE_High;
 			else
-				dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
+				dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low;
 
-			if (dm_digtable.pre_cck_fa_state !=
-			    dm_digtable.cur_cck_fa_state) {
-				if (dm_digtable.cur_cck_fa_state ==
+			if (dm_digtable->pre_cck_fa_state !=
+			    dm_digtable->cur_cck_fa_state) {
+				if (dm_digtable->cur_cck_fa_state ==
 				    CCK_FA_STAGE_Low)
 					rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
 						      0x83);
@@ -441,8 +447,8 @@
 					rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
 						      0xcd);
 
-				dm_digtable.pre_cck_fa_state =
-				    dm_digtable.cur_cck_fa_state;
+				dm_digtable->pre_cck_fa_state =
+				    dm_digtable->cur_cck_fa_state;
 			}
 
 			rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
@@ -458,11 +464,11 @@
 				rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
 					      MASKBYTE2, 0xd3);
 		}
-		dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
+		dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
 	}
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n",
-		 dm_digtable.cur_cck_pd_state);
+		 dm_digtable->cur_cck_pd_state);
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n",
 		 IS_92C_SERIAL(rtlhal->version));
@@ -470,31 +476,34 @@
 
 static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
 {
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
 	if (mac->act_scanning)
 		return;
 
 	if (mac->link_state >= MAC80211_LINKED)
-		dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
+		dm_digtable->cursta_connectctate = DIG_STA_CONNECT;
 	else
-		dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+		dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
 
 	rtl92c_dm_initial_gain_sta(hw);
 	rtl92c_dm_initial_gain_multi_sta(hw);
 	rtl92c_dm_cck_packet_detection_thresh(hw);
 
-	dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
+	dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate;
 
 }
 
 static void rtl92c_dm_dig(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
 	if (rtlpriv->dm.dm_initialgain_enable == false)
 		return;
-	if (dm_digtable.dig_enable_flag == false)
+	if (dm_digtable->dig_enable_flag == false)
 		return;
 
 	rtl92c_dm_ctrl_initgain_by_twoport(hw);
@@ -514,23 +523,24 @@
 void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 		 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
-		 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
-		 dm_digtable.backoff_val);
+		 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+		 dm_digtable->backoff_val);
 
-	dm_digtable.cur_igvalue += 2;
-	if (dm_digtable.cur_igvalue > 0x3f)
-		dm_digtable.cur_igvalue = 0x3f;
+	dm_digtable->cur_igvalue += 2;
+	if (dm_digtable->cur_igvalue > 0x3f)
+		dm_digtable->cur_igvalue = 0x3f;
 
-	if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
+	if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
 		rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
-			      dm_digtable.cur_igvalue);
+			      dm_digtable->cur_igvalue);
 		rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
-			      dm_digtable.cur_igvalue);
+			      dm_digtable->cur_igvalue);
 
-		dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
+		dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
 	}
 }
 EXPORT_SYMBOL(rtl92c_dm_write_dig);
@@ -1223,15 +1233,20 @@
 
 static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
 {
-	dm_pstable.pre_ccastate = CCA_MAX;
-	dm_pstable.cur_ccasate = CCA_MAX;
-	dm_pstable.pre_rfstate = RF_MAX;
-	dm_pstable.cur_rfstate = RF_MAX;
-	dm_pstable.rssi_val_min = 0;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
+
+	dm_pstable->pre_ccastate = CCA_MAX;
+	dm_pstable->cur_ccasate = CCA_MAX;
+	dm_pstable->pre_rfstate = RF_MAX;
+	dm_pstable->cur_rfstate = RF_MAX;
+	dm_pstable->rssi_val_min = 0;
 }
 
 void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
 {
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
 	static u8 initialize;
 	static u32 reg_874, reg_c70, reg_85c, reg_a74;
 
@@ -1251,27 +1266,27 @@
 	}
 
 	if (!bforce_in_normal) {
-		if (dm_pstable.rssi_val_min != 0) {
-			if (dm_pstable.pre_rfstate == RF_NORMAL) {
-				if (dm_pstable.rssi_val_min >= 30)
-					dm_pstable.cur_rfstate = RF_SAVE;
+		if (dm_pstable->rssi_val_min != 0) {
+			if (dm_pstable->pre_rfstate == RF_NORMAL) {
+				if (dm_pstable->rssi_val_min >= 30)
+					dm_pstable->cur_rfstate = RF_SAVE;
 				else
-					dm_pstable.cur_rfstate = RF_NORMAL;
+					dm_pstable->cur_rfstate = RF_NORMAL;
 			} else {
-				if (dm_pstable.rssi_val_min <= 25)
-					dm_pstable.cur_rfstate = RF_NORMAL;
+				if (dm_pstable->rssi_val_min <= 25)
+					dm_pstable->cur_rfstate = RF_NORMAL;
 				else
-					dm_pstable.cur_rfstate = RF_SAVE;
+					dm_pstable->cur_rfstate = RF_SAVE;
 			}
 		} else {
-			dm_pstable.cur_rfstate = RF_MAX;
+			dm_pstable->cur_rfstate = RF_MAX;
 		}
 	} else {
-		dm_pstable.cur_rfstate = RF_NORMAL;
+		dm_pstable->cur_rfstate = RF_NORMAL;
 	}
 
-	if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
-		if (dm_pstable.cur_rfstate == RF_SAVE) {
+	if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) {
+		if (dm_pstable->cur_rfstate == RF_SAVE) {
 			rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
 				      0x1C0000, 0x2);
 			rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
@@ -1293,7 +1308,7 @@
 			rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
 		}
 
-		dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
+		dm_pstable->pre_rfstate = dm_pstable->cur_rfstate;
 	}
 }
 EXPORT_SYMBOL(rtl92c_dm_rf_saving);
@@ -1301,36 +1316,37 @@
 static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
 	if (((mac->link_state == MAC80211_NOLINK)) &&
 	    (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
-		dm_pstable.rssi_val_min = 0;
+		dm_pstable->rssi_val_min = 0;
 		RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
 	}
 
 	if (mac->link_state == MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-			dm_pstable.rssi_val_min =
+			dm_pstable->rssi_val_min =
 			    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
 			RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
 				 "AP Client PWDB = 0x%lx\n",
-				 dm_pstable.rssi_val_min);
+				 dm_pstable->rssi_val_min);
 		} else {
-			dm_pstable.rssi_val_min =
+			dm_pstable->rssi_val_min =
 			    rtlpriv->dm.undecorated_smoothed_pwdb;
 			RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%lx\n",
-				 dm_pstable.rssi_val_min);
+				 dm_pstable->rssi_val_min);
 		}
 	} else {
-		dm_pstable.rssi_val_min =
+		dm_pstable->rssi_val_min =
 		    rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
 
 		RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
 			 "AP Ext Port PWDB = 0x%lx\n",
-			 dm_pstable.rssi_val_min);
+			 dm_pstable->rssi_val_min);
 	}
 
 	if (IS_92C_SERIAL(rtlhal->version))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index 2178e37..518e208 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -91,40 +91,6 @@
 #define TX_POWER_NEAR_FIELD_THRESH_LVL2		74
 #define TX_POWER_NEAR_FIELD_THRESH_LVL1		67
 
-struct ps_t {
-	u8 pre_ccastate;
-	u8 cur_ccasate;
-	u8 pre_rfstate;
-	u8 cur_rfstate;
-	long rssi_val_min;
-};
-
-struct dig_t {
-	u8 dig_enable_flag;
-	u8 dig_ext_port_stage;
-	u32 rssi_lowthresh;
-	u32 rssi_highthresh;
-	u32 fa_lowthresh;
-	u32 fa_highthresh;
-	u8 cursta_connectctate;
-	u8 presta_connectstate;
-	u8 curmultista_connectstate;
-	u8 pre_igvalue;
-	u8 cur_igvalue;
-	char backoff_val;
-	char backoff_val_range_max;
-	char backoff_val_range_min;
-	u8 rx_gain_range_max;
-	u8 rx_gain_range_min;
-	u8 rssi_val_min;
-	u8 pre_cck_pd_state;
-	u8 cur_cck_pd_state;
-	u8 pre_cck_fa_state;
-	u8 cur_cck_fa_state;
-	u8 pre_ccastate;
-	u8 cur_ccasate;
-};
-
 struct swat_t {
 	u8 failure_cnt;
 	u8 try_flag;
@@ -189,7 +155,6 @@
 	DIG_CONNECT_MAX
 };
 
-extern struct dig_t dm_digtable;
 void rtl92c_dm_init(struct ieee80211_hw *hw);
 void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
 void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index c20b3c3..692c8ef 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -34,6 +34,7 @@
 #include "../rtl8192ce/def.h"
 #include "fw_common.h"
 #include <linux/export.h>
+#include <linux/kmemleak.h>
 
 static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
 {
@@ -776,6 +777,8 @@
 	skb = dev_alloc_skb(totalpacketlen);
 	if (!skb)
 		return;
+	kmemleak_not_leak(skb);
+
 	memcpy((u8 *) skb_put(skb, totalpacketlen),
 	       &reserved_page_packet, totalpacketlen);
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 4c01624..cdcad7d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1881,6 +1881,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct dig_t dm_digtable = rtlpriv->dm_digtable;
 
 	RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
 		 "--->Cmd(%#x), set_io_inprogress(%d)\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 26747fa..d4a3d03 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -86,40 +86,6 @@
 #define TX_POWER_NEAR_FIELD_THRESH_LVL2		74
 #define TX_POWER_NEAR_FIELD_THRESH_LVL1		67
 
-struct ps_t {
-	u8 pre_ccastate;
-	u8 cur_ccasate;
-	u8 pre_rfstate;
-	u8 cur_rfstate;
-	long rssi_val_min;
-};
-
-struct dig_t {
-	u8 dig_enable_flag;
-	u8 dig_ext_port_stage;
-	u32 rssi_lowthresh;
-	u32 rssi_highthresh;
-	u32 fa_lowthresh;
-	u32 fa_highthresh;
-	u8 cursta_connectctate;
-	u8 presta_connectstate;
-	u8 curmultista_connectstate;
-	u8 pre_igvalue;
-	u8 cur_igvalue;
-	char backoff_val;
-	char backoff_val_range_max;
-	char backoff_val_range_min;
-	u8 rx_gain_range_max;
-	u8 rx_gain_range_min;
-	u8 rssi_val_min;
-	u8 pre_cck_pd_state;
-	u8 cur_cck_pd_state;
-	u8 pre_cck_fa_state;
-	u8 cur_cck_fa_state;
-	u8 pre_ccastate;
-	u8 cur_ccasate;
-};
-
 struct swat_t {
 	u8 failure_cnt;
 	u8 try_flag;
@@ -184,7 +150,6 @@
 	DIG_CONNECT_MAX
 };
 
-extern struct dig_t dm_digtable;
 void rtl92c_dm_init(struct ieee80211_hw *hw);
 void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
 void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 2c3b733..3aa927f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -389,21 +389,4 @@
 	.driver.pm = &rtlwifi_pm_ops,
 };
 
-static int __init rtl92ce_module_init(void)
-{
-	int ret;
-
-	ret = pci_register_driver(&rtl92ce_driver);
-	if (ret)
-		RT_ASSERT(false, "No device found\n");
-
-	return ret;
-}
-
-static void __exit rtl92ce_module_exit(void)
-{
-	pci_unregister_driver(&rtl92ce_driver);
-}
-
-module_init(rtl92ce_module_init);
-module_exit(rtl92ce_module_exit);
+module_pci_driver(rtl92ce_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 37b1363..3af874e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -508,14 +508,14 @@
 
 	packet_matchbssid =
 	    ((IEEE80211_FTYPE_CTL != type) &&
-	     (!compare_ether_addr(mac->bssid,
-				  (c_fc & IEEE80211_FCTL_TODS) ?
-				  hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ?
-				  hdr->addr2 : hdr->addr3)) &&
+	     ether_addr_equal(mac->bssid,
+			      (c_fc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+			      (c_fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+			      hdr->addr3) &&
 	     (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
 
 	packet_toself = packet_matchbssid &&
-	    (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+	     ether_addr_equal(praddr, rtlefuse->dev_addr);
 
 	if (ieee80211_is_beacon(fc))
 		packet_beacon = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index efb9ab2..c4adb97 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -530,12 +530,7 @@
 	SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
 
 #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size)	\
-do {							\
-	if (_size > TX_DESC_NEXT_DESC_OFFSET)		\
-		memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);	\
-	else						\
-		memset(__pdesc, 0, _size);	\
-} while (0);
+	memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
 
 struct rx_fwinfo_92c {
 	u8 gain_trsw[4];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 025bdc2..7e91c76 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -1099,14 +1099,14 @@
 	praddr = hdr->addr1;
 	packet_matchbssid =
 	    ((IEEE80211_FTYPE_CTL != type) &&
-	     (!compare_ether_addr(mac->bssid,
-			  (cpu_fc & IEEE80211_FCTL_TODS) ?
-			  hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ?
-			  hdr->addr2 : hdr->addr3)) &&
+	     ether_addr_equal(mac->bssid,
+			      (cpu_fc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+			      (cpu_fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+			      hdr->addr3) &&
 	     (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
 
 	packet_toself = packet_matchbssid &&
-	    (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+	    ether_addr_equal(praddr, rtlefuse->dev_addr);
 	if (ieee80211_is_beacon(fc))
 		packet_beacon = true;
 	_rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 82c85286..7737fb0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -338,6 +338,7 @@
 	{RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
 
 	/****** 8192CU ********/
+	{RTL_USB_DEVICE(0x050d, 0x1004, rtl92cu_hal_cfg)}, /*Belcom-SurfN300*/
 	{RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
 	{RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
 	{RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
index eafdf76..939c905 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
@@ -151,9 +151,6 @@
 
 /* for 92D */
 #define CHIP_92D_SINGLEPHY		BIT(9)
-#define C_CUT_VERSION			BIT(13)
-#define D_CUT_VERSION			((BIT(12)|BIT(13)))
-#define E_CUT_VERSION			BIT(14)
 
 /* Chip specific */
 #define CHIP_BONDING_IDENTIFIER(_value)	(((_value)>>22)&0x3)
@@ -173,7 +170,10 @@
 #define RF_TYPE_1T2R			BIT(4)
 #define RF_TYPE_2T2R			BIT(5)
 #define CHIP_VENDOR_UMC			BIT(7)
-#define B_CUT_VERSION			BIT(12)
+#define CHIP_92D_B_CUT			BIT(12)
+#define CHIP_92D_C_CUT			BIT(13)
+#define CHIP_92D_D_CUT			(BIT(13)|BIT(12))
+#define CHIP_92D_E_CUT			BIT(14)
 
 /* MASK */
 #define IC_TYPE_MASK			(BIT(0)|BIT(1)|BIT(2))
@@ -205,15 +205,13 @@
 					 CHIP_92D) ? true : false)
 #define IS_92D_C_CUT(version)		((IS_92D(version)) ?		\
 				 ((GET_CVID_CUT_VERSION(version) ==	\
-				 0x2000) ? true : false) : false)
+				 CHIP_92D_C_CUT) ? true : false) : false)
 #define IS_92D_D_CUT(version)			((IS_92D(version)) ?	\
 				 ((GET_CVID_CUT_VERSION(version) ==	\
-				 0x3000) ? true : false) : false)
+				 CHIP_92D_D_CUT) ? true : false) : false)
 #define IS_92D_E_CUT(version)		((IS_92D(version)) ?		\
 				 ((GET_CVID_CUT_VERSION(version) ==	\
-				 0x4000) ? true : false) : false)
-#define CHIP_92D_C_CUT			BIT(10)
-#define CHIP_92D_D_CUT			BIT(11)
+				 CHIP_92D_E_CUT) ? true : false) : false)
 
 enum rf_optype {
 	RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 4737018..a7d63a8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -37,8 +37,6 @@
 
 #define UNDEC_SM_PWDB	entry_min_undecoratedsmoothed_pwdb
 
-struct dig_t de_digtable;
-
 static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
 	0x7f8001fe,		/* 0, +6.0dB */
 	0x788001e2,		/* 1, +5.5dB */
@@ -159,27 +157,30 @@
 
 static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
 {
-	de_digtable.dig_enable_flag = true;
-	de_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-	de_digtable.cur_igvalue = 0x20;
-	de_digtable.pre_igvalue = 0x0;
-	de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
-	de_digtable.presta_connectstate = DIG_STA_DISCONNECT;
-	de_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
-	de_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
-	de_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
-	de_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
-	de_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
-	de_digtable.rx_gain_range_max = DM_DIG_FA_UPPER;
-	de_digtable.rx_gain_range_min = DM_DIG_FA_LOWER;
-	de_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
-	de_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
-	de_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
-	de_digtable.pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
-	de_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
-	de_digtable.large_fa_hit = 0;
-	de_digtable.recover_cnt = 0;
-	de_digtable.forbidden_igi = DM_DIG_FA_LOWER;
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+
+	de_digtable->dig_enable_flag = true;
+	de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+	de_digtable->cur_igvalue = 0x20;
+	de_digtable->pre_igvalue = 0x0;
+	de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
+	de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
+	de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+	de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
+	de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
+	de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+	de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+	de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER;
+	de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER;
+	de_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
+	de_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+	de_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+	de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+	de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
+	de_digtable->large_fa_hit = 0;
+	de_digtable->recover_cnt = 0;
+	de_digtable->forbidden_igi = DM_DIG_FA_LOWER;
 }
 
 static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
@@ -266,68 +267,70 @@
 static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
 	struct rtl_mac *mac = rtl_mac(rtlpriv);
 
 	/* Determine the minimum RSSI  */
 	if ((mac->link_state < MAC80211_LINKED) &&
 	    (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
-		de_digtable.min_undecorated_pwdb_for_dm = 0;
+		de_digtable->min_undecorated_pwdb_for_dm = 0;
 		RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
 			 "Not connected to any\n");
 	}
 	if (mac->link_state >= MAC80211_LINKED) {
 		if (mac->opmode == NL80211_IFTYPE_AP ||
 		    mac->opmode == NL80211_IFTYPE_ADHOC) {
-			de_digtable.min_undecorated_pwdb_for_dm =
+			de_digtable->min_undecorated_pwdb_for_dm =
 			    rtlpriv->dm.UNDEC_SM_PWDB;
 			RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
 				 "AP Client PWDB = 0x%lx\n",
 				 rtlpriv->dm.UNDEC_SM_PWDB);
 		} else {
-			de_digtable.min_undecorated_pwdb_for_dm =
+			de_digtable->min_undecorated_pwdb_for_dm =
 			    rtlpriv->dm.undecorated_smoothed_pwdb;
 			RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
 				 "STA Default Port PWDB = 0x%x\n",
-				 de_digtable.min_undecorated_pwdb_for_dm);
+				 de_digtable->min_undecorated_pwdb_for_dm);
 		}
 	} else {
-		de_digtable.min_undecorated_pwdb_for_dm =
+		de_digtable->min_undecorated_pwdb_for_dm =
 		    rtlpriv->dm.UNDEC_SM_PWDB;
 		RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
 			 "AP Ext Port or disconnect PWDB = 0x%x\n",
-			 de_digtable.min_undecorated_pwdb_for_dm);
+			 de_digtable->min_undecorated_pwdb_for_dm);
 	}
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
-		 de_digtable.min_undecorated_pwdb_for_dm);
+		 de_digtable->min_undecorated_pwdb_for_dm);
 }
 
 static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
 	unsigned long flag = 0;
 
-	if (de_digtable.cursta_connectctate == DIG_STA_CONNECT) {
-		if (de_digtable.pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
-			if (de_digtable.min_undecorated_pwdb_for_dm <= 25)
-				de_digtable.cur_cck_pd_state =
+	if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) {
+		if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
+			if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
+				de_digtable->cur_cck_pd_state =
 							 CCK_PD_STAGE_LOWRSSI;
 			else
-				de_digtable.cur_cck_pd_state =
+				de_digtable->cur_cck_pd_state =
 							 CCK_PD_STAGE_HIGHRSSI;
 		} else {
-			if (de_digtable.min_undecorated_pwdb_for_dm <= 20)
-				de_digtable.cur_cck_pd_state =
+			if (de_digtable->min_undecorated_pwdb_for_dm <= 20)
+				de_digtable->cur_cck_pd_state =
 							 CCK_PD_STAGE_LOWRSSI;
 			else
-				de_digtable.cur_cck_pd_state =
+				de_digtable->cur_cck_pd_state =
 							 CCK_PD_STAGE_HIGHRSSI;
 		}
 	} else {
-		de_digtable.cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+		de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
 	}
-	if (de_digtable.pre_cck_pd_state != de_digtable.cur_cck_pd_state) {
-		if (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
+	if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
+		if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
 			rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
 			rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
 			rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
@@ -336,13 +339,13 @@
 			rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
 			rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
 		}
-		de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state;
+		de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
 	}
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
-		 de_digtable.cursta_connectctate == DIG_STA_CONNECT ?
+		 de_digtable->cursta_connectctate == DIG_STA_CONNECT ?
 		 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
-		 de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
+		 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
 		 "Low RSSI " : "High RSSI ");
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
 		 IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
@@ -352,37 +355,40 @@
 void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 		 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
-		 de_digtable.cur_igvalue, de_digtable.pre_igvalue,
-		 de_digtable.backoff_val);
-	if (de_digtable.dig_enable_flag == false) {
+		 de_digtable->cur_igvalue, de_digtable->pre_igvalue,
+		 de_digtable->backoff_val);
+	if (de_digtable->dig_enable_flag == false) {
 		RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
-		de_digtable.pre_igvalue = 0x17;
+		de_digtable->pre_igvalue = 0x17;
 		return;
 	}
-	if (de_digtable.pre_igvalue != de_digtable.cur_igvalue) {
+	if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) {
 		rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
-			      de_digtable.cur_igvalue);
+			      de_digtable->cur_igvalue);
 		rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
-			      de_digtable.cur_igvalue);
-		de_digtable.pre_igvalue = de_digtable.cur_igvalue;
+			      de_digtable->cur_igvalue);
+		de_digtable->pre_igvalue = de_digtable->cur_igvalue;
 	}
 }
 
 static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
 {
+	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+
 	if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
 	    (rtlpriv->mac80211.vendor == PEER_CISCO)) {
 		RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
-		if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50
-		    && de_digtable.min_undecorated_pwdb_for_dm < 50) {
+		if (de_digtable->last_min_undecorated_pwdb_for_dm >= 50
+		    && de_digtable->min_undecorated_pwdb_for_dm < 50) {
 			rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
 			RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 				 "Early Mode Off\n");
-		} else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 &&
-			   de_digtable.min_undecorated_pwdb_for_dm > 55) {
+		} else if (de_digtable->last_min_undecorated_pwdb_for_dm <= 55 &&
+			   de_digtable->min_undecorated_pwdb_for_dm > 55) {
 			rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
 			RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 				 "Early Mode On\n");
@@ -396,14 +402,15 @@
 static void rtl92d_dm_dig(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u8 value_igi = de_digtable.cur_igvalue;
+	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+	u8 value_igi = de_digtable->cur_igvalue;
 	struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
 
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
 	if (rtlpriv->rtlhal.earlymode_enable) {
 		rtl92d_early_mode_enabled(rtlpriv);
-		de_digtable.last_min_undecorated_pwdb_for_dm =
-				 de_digtable.min_undecorated_pwdb_for_dm;
+		de_digtable->last_min_undecorated_pwdb_for_dm =
+				 de_digtable->min_undecorated_pwdb_for_dm;
 	}
 	if (!rtlpriv->dm.dm_initialgain_enable)
 		return;
@@ -421,9 +428,9 @@
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
 	/* Decide the current status and if modify initial gain or not */
 	if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
-		de_digtable.cursta_connectctate = DIG_STA_CONNECT;
+		de_digtable->cursta_connectctate = DIG_STA_CONNECT;
 	else
-		de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+		de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
 
 	/* adjust initial gain according to false alarm counter */
 	if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
@@ -436,64 +443,64 @@
 		value_igi += 2;
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 		 "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
-		 de_digtable.large_fa_hit, de_digtable.forbidden_igi);
+		 de_digtable->large_fa_hit, de_digtable->forbidden_igi);
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 		 "dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
-		 de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
+		 de_digtable->recover_cnt, de_digtable->rx_gain_range_min);
 
 	/* deal with abnorally large false alarm */
 	if (falsealm_cnt->cnt_all > 10000) {
 		RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 			 "dm_DIG(): Abnormally false alarm case\n");
 
-		de_digtable.large_fa_hit++;
-		if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) {
-			de_digtable.forbidden_igi = de_digtable.cur_igvalue;
-			de_digtable.large_fa_hit = 1;
+		de_digtable->large_fa_hit++;
+		if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
+			de_digtable->forbidden_igi = de_digtable->cur_igvalue;
+			de_digtable->large_fa_hit = 1;
 		}
-		if (de_digtable.large_fa_hit >= 3) {
-			if ((de_digtable.forbidden_igi + 1) > DM_DIG_MAX)
-				de_digtable.rx_gain_range_min = DM_DIG_MAX;
+		if (de_digtable->large_fa_hit >= 3) {
+			if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX)
+				de_digtable->rx_gain_range_min = DM_DIG_MAX;
 			else
-				de_digtable.rx_gain_range_min =
-				    (de_digtable.forbidden_igi + 1);
-			de_digtable.recover_cnt = 3600;	/* 3600=2hr */
+				de_digtable->rx_gain_range_min =
+				    (de_digtable->forbidden_igi + 1);
+			de_digtable->recover_cnt = 3600;	/* 3600=2hr */
 		}
 	} else {
 		/* Recovery mechanism for IGI lower bound */
-		if (de_digtable.recover_cnt != 0) {
-			de_digtable.recover_cnt--;
+		if (de_digtable->recover_cnt != 0) {
+			de_digtable->recover_cnt--;
 		} else {
-			if (de_digtable.large_fa_hit == 0) {
-				if ((de_digtable.forbidden_igi - 1) <
+			if (de_digtable->large_fa_hit == 0) {
+				if ((de_digtable->forbidden_igi - 1) <
 				    DM_DIG_FA_LOWER) {
-					de_digtable.forbidden_igi =
+					de_digtable->forbidden_igi =
 							 DM_DIG_FA_LOWER;
-					de_digtable.rx_gain_range_min =
+					de_digtable->rx_gain_range_min =
 							 DM_DIG_FA_LOWER;
 
 				} else {
-					de_digtable.forbidden_igi--;
-					de_digtable.rx_gain_range_min =
-					    (de_digtable.forbidden_igi + 1);
+					de_digtable->forbidden_igi--;
+					de_digtable->rx_gain_range_min =
+					    (de_digtable->forbidden_igi + 1);
 				}
-			} else if (de_digtable.large_fa_hit == 3) {
-				de_digtable.large_fa_hit = 0;
+			} else if (de_digtable->large_fa_hit == 3) {
+				de_digtable->large_fa_hit = 0;
 			}
 		}
 	}
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 		 "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
-		 de_digtable.large_fa_hit, de_digtable.forbidden_igi);
+		 de_digtable->large_fa_hit, de_digtable->forbidden_igi);
 	RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 		 "dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
-		 de_digtable.recover_cnt, de_digtable.rx_gain_range_min);
+		 de_digtable->recover_cnt, de_digtable->rx_gain_range_min);
 
 	if (value_igi > DM_DIG_MAX)
 		value_igi = DM_DIG_MAX;
-	else if (value_igi < de_digtable.rx_gain_range_min)
-		value_igi = de_digtable.rx_gain_range_min;
-	de_digtable.cur_igvalue = value_igi;
+	else if (value_igi < de_digtable->rx_gain_range_min)
+		value_igi = de_digtable->rx_gain_range_min;
+	de_digtable->cur_igvalue = value_igi;
 	rtl92d_dm_write_dig(hw);
 	if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
 		rtl92d_dm_cck_packet_detection_thresh(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
index 91030ec..3fea0c1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
@@ -87,55 +87,6 @@
 #define TX_POWER_NEAR_FIELD_THRESH_LVL1		67
 #define INDEX_MAPPING_NUM			13
 
-struct ps_t {
-	u8 pre_ccastate;
-	u8 cur_ccasate;
-
-	u8 pre_rfstate;
-	u8 cur_rfstate;
-
-	long rssi_val_min;
-};
-
-struct dig_t {
-	u8 dig_enable_flag;
-	u8 dig_ext_port_stage;
-
-	u32 rssi_lowthresh;
-	u32 rssi_highthresh;
-
-	u32 fa_lowthresh;
-	u32 fa_highthresh;
-
-	u8 cursta_connectctate;
-	u8 presta_connectstate;
-	u8 curmultista_connectstate;
-
-	u8 pre_igvalue;
-	u8 cur_igvalue;
-
-	char backoff_val;
-	char backoff_val_range_max;
-	char backoff_val_range_min;
-	u8 rx_gain_range_max;
-	u8 rx_gain_range_min;
-	u8 min_undecorated_pwdb_for_dm;
-	long last_min_undecorated_pwdb_for_dm;
-
-	u8 pre_cck_pd_state;
-	u8 cur_cck_pd_state;
-
-	u8 pre_cck_fa_state;
-	u8 cur_cck_fa_state;
-
-	u8 pre_ccastate;
-	u8 cur_ccasate;
-
-	u8 large_fa_hit;
-	u8 forbidden_igi;
-	u32 recover_cnt;
-};
-
 struct swat {
 	u8 failure_cnt;
 	u8 try_flag;
@@ -200,8 +151,6 @@
 	DIG_CONNECT_MAX
 };
 
-extern struct dig_t de_digtable;
-
 void rtl92d_dm_init(struct ieee80211_hw *hw);
 void rtl92d_dm_watchdog(struct ieee80211_hw *hw);
 void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 509f5af..b338d52 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1743,9 +1743,13 @@
 		chipver |= CHIP_92D_D_CUT;
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
 		break;
+	case 0xCC33:
+		chipver |= CHIP_92D_E_CUT;
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
+		break;
 	default:
 		chipver |= CHIP_92D_D_CUT;
-		RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unkown CUT!\n");
+		RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n");
 		break;
 	}
 	rtlpriv->rtlhal.version = chipver;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 28fc5fb..18380a7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3064,6 +3064,7 @@
 static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *de_digtable = &rtlpriv->dm_digtable;
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
 
 	RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
@@ -3071,13 +3072,13 @@
 		 rtlphy->current_io_type, rtlphy->set_io_inprogress);
 	switch (rtlphy->current_io_type) {
 	case IO_CMD_RESUME_DM_BY_SCAN:
-		de_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+		de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
 		rtl92d_dm_write_dig(hw);
 		rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
 		break;
 	case IO_CMD_PAUSE_DM_BY_SCAN:
-		rtlphy->initgain_backup.xaagccore1 = de_digtable.cur_igvalue;
-		de_digtable.cur_igvalue = 0x37;
+		rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue;
+		de_digtable->cur_igvalue = 0x37;
 		rtl92d_dm_write_dig(hw);
 		break;
 	default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index a7f6126..1666ef7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -466,12 +466,13 @@
 	type = WLAN_FC_GET_TYPE(fc);
 	praddr = hdr->addr1;
 	packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
-	     (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ?
-		  hdr->addr1 : (cfc & IEEE80211_FCTL_FROMDS) ?
-		  hdr->addr2 : hdr->addr3)) && (!pstats->hwerror) &&
-		  (!pstats->crc) && (!pstats->icv));
+	     ether_addr_equal(mac->bssid,
+			      (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+			      (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+			      hdr->addr3) &&
+	     (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
 	packet_toself = packet_matchbssid &&
-			(!compare_ether_addr(praddr, rtlefuse->dev_addr));
+			ether_addr_equal(praddr, rtlefuse->dev_addr);
 	if (ieee80211_is_beacon(fc))
 		packet_beacon = true;
 	_rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 0dc736c..057a524 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -530,12 +530,8 @@
 	SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
 
 #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size)	\
-do {							\
-	if (_size > TX_DESC_NEXT_DESC_OFFSET)		\
-		memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);	\
-	else						\
-		memset((void *)__pdesc, 0, _size);	\
-} while (0);
+	memset((void *)__pdesc, 0,			\
+	       min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
 
 /* For 92D early mode */
 #define SET_EARLYMODE_PKTNUM(__paddr, __value)		\
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index d1b0a1e..20afec6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -252,12 +252,7 @@
  * the desc is cleared. */
 #define	TX_DESC_NEXT_DESC_OFFSET			36
 #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size)		\
-do {								\
-	if (_size > TX_DESC_NEXT_DESC_OFFSET)			\
-		memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);	\
-	else							\
-		memset(__pdesc, 0, _size);			\
-} while (0);
+	memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
 
 /* Rx Desc */
 #define RX_STATUS_DESC_SIZE				24
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index fbabae1..2e11580 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -35,7 +35,6 @@
 #include "dm.h"
 #include "fw.h"
 
-struct dig_t digtable;
 static const u32 edca_setting_dl[PEER_MAX] = {
 	0xa44f,		/* 0 UNKNOWN */
 	0x5ea44f,	/* 1 REALTEK_90 */
@@ -421,62 +420,64 @@
 static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *digtable = &rtlpriv->dm_digtable;
 	struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
 
-	if (falsealm_cnt->cnt_all > digtable.fa_highthresh) {
-		if ((digtable.backoff_val - 6) <
-			digtable.backoffval_range_min)
-			digtable.backoff_val = digtable.backoffval_range_min;
+	if (falsealm_cnt->cnt_all > digtable->fa_highthresh) {
+		if ((digtable->backoff_val - 6) <
+			digtable->backoffval_range_min)
+			digtable->backoff_val = digtable->backoffval_range_min;
 		else
-			digtable.backoff_val -= 6;
-	} else if (falsealm_cnt->cnt_all < digtable.fa_lowthresh) {
-		if ((digtable.backoff_val + 6) >
-			digtable.backoffval_range_max)
-			digtable.backoff_val =
-				 digtable.backoffval_range_max;
+			digtable->backoff_val -= 6;
+	} else if (falsealm_cnt->cnt_all < digtable->fa_lowthresh) {
+		if ((digtable->backoff_val + 6) >
+			digtable->backoffval_range_max)
+			digtable->backoff_val =
+				 digtable->backoffval_range_max;
 		else
-			digtable.backoff_val += 6;
+			digtable->backoff_val += 6;
 	}
 }
 
 static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *digtable = &rtlpriv->dm_digtable;
 	struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
 	static u8 initialized, force_write;
 	u8 initial_gain = 0;
 
-	if ((digtable.pre_sta_connectstate == digtable.cur_sta_connectstate) ||
-		(digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) {
-		if (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) {
+	if ((digtable->pre_sta_connectstate == digtable->cur_sta_connectstate) ||
+		(digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) {
+		if (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) {
 			if (rtlpriv->psc.rfpwr_state != ERFON)
 				return;
 
-			if (digtable.backoff_enable_flag)
+			if (digtable->backoff_enable_flag)
 				rtl92s_backoff_enable_flag(hw);
 			else
-				digtable.backoff_val = DM_DIG_BACKOFF;
+				digtable->backoff_val = DM_DIG_BACKOFF;
 
-			if ((digtable.rssi_val + 10 - digtable.backoff_val) >
-				digtable.rx_gain_range_max)
-				digtable.cur_igvalue =
-						digtable.rx_gain_range_max;
-			else if ((digtable.rssi_val + 10 - digtable.backoff_val)
-				 < digtable.rx_gain_range_min)
-				digtable.cur_igvalue =
-						digtable.rx_gain_range_min;
+			if ((digtable->rssi_val + 10 - digtable->backoff_val) >
+				digtable->rx_gain_range_max)
+				digtable->cur_igvalue =
+						digtable->rx_gain_range_max;
+			else if ((digtable->rssi_val + 10 - digtable->backoff_val)
+				 < digtable->rx_gain_range_min)
+				digtable->cur_igvalue =
+						digtable->rx_gain_range_min;
 			else
-				digtable.cur_igvalue = digtable.rssi_val + 10 -
-						digtable.backoff_val;
+				digtable->cur_igvalue = digtable->rssi_val + 10 -
+						digtable->backoff_val;
 
 			if (falsealm_cnt->cnt_all > 10000)
-				digtable.cur_igvalue =
-					 (digtable.cur_igvalue > 0x33) ?
-					 digtable.cur_igvalue : 0x33;
+				digtable->cur_igvalue =
+					 (digtable->cur_igvalue > 0x33) ?
+					 digtable->cur_igvalue : 0x33;
 
 			if (falsealm_cnt->cnt_all > 16000)
-				digtable.cur_igvalue =
-						 digtable.rx_gain_range_max;
+				digtable->cur_igvalue =
+						 digtable->rx_gain_range_max;
 		/* connected -> connected or disconnected -> disconnected  */
 		} else {
 			/* Firmware control DIG, do nothing in driver dm */
@@ -486,31 +487,31 @@
 		 * disconnected or beforeconnect->(dis)connected */
 	} else {
 		/* Enable FW DIG */
-		digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+		digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
 		rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
 
-		digtable.backoff_val = DM_DIG_BACKOFF;
-		digtable.cur_igvalue = rtlpriv->phy.default_initialgain[0];
-		digtable.pre_igvalue = 0;
+		digtable->backoff_val = DM_DIG_BACKOFF;
+		digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0];
+		digtable->pre_igvalue = 0;
 		return;
 	}
 
 	/* Forced writing to prevent from fw-dig overwriting. */
-	if (digtable.pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1,
+	if (digtable->pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1,
 						  MASKBYTE0))
 		force_write = 1;
 
-	if ((digtable.pre_igvalue != digtable.cur_igvalue) ||
+	if ((digtable->pre_igvalue != digtable->cur_igvalue) ||
 	    !initialized || force_write) {
 		/* Disable FW DIG */
 		rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_DISABLE);
 
-		initial_gain = (u8)digtable.cur_igvalue;
+		initial_gain = (u8)digtable->cur_igvalue;
 
 		/* Set initial gain. */
 		rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, initial_gain);
 		rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, initial_gain);
-		digtable.pre_igvalue = digtable.cur_igvalue;
+		digtable->pre_igvalue = digtable->cur_igvalue;
 		initialized = 1;
 		force_write = 0;
 	}
@@ -519,6 +520,7 @@
 static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *digtable = &rtlpriv->dm_digtable;
 
 	if (rtlpriv->mac80211.act_scanning)
 		return;
@@ -526,17 +528,17 @@
 	/* Decide the current status and if modify initial gain or not */
 	if (rtlpriv->mac80211.link_state >= MAC80211_LINKED ||
 	    rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
-		digtable.cur_sta_connectstate = DIG_STA_CONNECT;
+		digtable->cur_sta_connectstate = DIG_STA_CONNECT;
 	else
-		digtable.cur_sta_connectstate = DIG_STA_DISCONNECT;
+		digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
 
-	digtable.rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb;
+	digtable->rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb;
 
 	/* Change dig mode to rssi */
-	if (digtable.cur_sta_connectstate != DIG_STA_DISCONNECT) {
-		if (digtable.dig_twoport_algorithm ==
+	if (digtable->cur_sta_connectstate != DIG_STA_DISCONNECT) {
+		if (digtable->dig_twoport_algorithm ==
 		    DIG_TWO_PORT_ALGO_FALSE_ALARM) {
-			digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
+			digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
 			rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS);
 		}
 	}
@@ -544,13 +546,14 @@
 	_rtl92s_dm_false_alarm_counter_statistics(hw);
 	_rtl92s_dm_initial_gain_sta_beforeconnect(hw);
 
-	digtable.pre_sta_connectstate = digtable.cur_sta_connectstate;
+	digtable->pre_sta_connectstate = digtable->cur_sta_connectstate;
 }
 
 static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_phy *rtlphy = &(rtlpriv->phy);
+	struct dig_t *digtable = &rtlpriv->dm_digtable;
 
 	/* 2T2R TP issue */
 	if (rtlphy->rf_type == RF_2T2R)
@@ -559,7 +562,7 @@
 	if (!rtlpriv->dm.dm_initialgain_enable)
 		return;
 
-	if (digtable.dig_enable_flag == false)
+	if (digtable->dig_enable_flag == false)
 		return;
 
 	_rtl92s_dm_ctrl_initgain_bytwoport(hw);
@@ -639,51 +642,52 @@
 static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *digtable = &rtlpriv->dm_digtable;
 
 	/* Disable DIG scheme now.*/
-	digtable.dig_enable_flag = true;
-	digtable.backoff_enable_flag = true;
+	digtable->dig_enable_flag = true;
+	digtable->backoff_enable_flag = true;
 
 	if ((rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) &&
 	    (hal_get_firmwareversion(rtlpriv) >= 0x3c))
-		digtable.dig_algorithm = DIG_ALGO_BY_TOW_PORT;
+		digtable->dig_algorithm = DIG_ALGO_BY_TOW_PORT;
 	else
-		digtable.dig_algorithm =
+		digtable->dig_algorithm =
 			 DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM;
 
-	digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
-	digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
-	/* off=by real rssi value, on=by digtable.rssi_val for new dig */
-	digtable.dig_dbgmode = DM_DBG_OFF;
-	digtable.dig_slgorithm_switch = 0;
+	digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
+	digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+	/* off=by real rssi value, on=by digtable->rssi_val for new dig */
+	digtable->dig_dbgmode = DM_DBG_OFF;
+	digtable->dig_slgorithm_switch = 0;
 
 	/* 2007/10/04 MH Define init gain threshol. */
-	digtable.dig_state = DM_STA_DIG_MAX;
-	digtable.dig_highpwrstate = DM_STA_DIG_MAX;
+	digtable->dig_state = DM_STA_DIG_MAX;
+	digtable->dig_highpwrstate = DM_STA_DIG_MAX;
 
-	digtable.cur_sta_connectstate = DIG_STA_DISCONNECT;
-	digtable.pre_sta_connectstate = DIG_STA_DISCONNECT;
-	digtable.cur_ap_connectstate = DIG_AP_DISCONNECT;
-	digtable.pre_ap_connectstate = DIG_AP_DISCONNECT;
+	digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
+	digtable->pre_sta_connectstate = DIG_STA_DISCONNECT;
+	digtable->cur_ap_connectstate = DIG_AP_DISCONNECT;
+	digtable->pre_ap_connectstate = DIG_AP_DISCONNECT;
 
-	digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
-	digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+	digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
+	digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
 
-	digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
-	digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+	digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+	digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
 
-	digtable.rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
-	digtable.rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
+	digtable->rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
+	digtable->rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
 
 	/* for dig debug rssi value */
-	digtable.rssi_val = 50;
-	digtable.backoff_val = DM_DIG_BACKOFF;
-	digtable.rx_gain_range_max = DM_DIG_MAX;
+	digtable->rssi_val = 50;
+	digtable->backoff_val = DM_DIG_BACKOFF;
+	digtable->rx_gain_range_max = DM_DIG_MAX;
 
-	digtable.rx_gain_range_min = DM_DIG_MIN;
+	digtable->rx_gain_range_min = DM_DIG_MIN;
 
-	digtable.backoffval_range_max = DM_DIG_BACKOFF_MAX;
-	digtable.backoffval_range_min = DM_DIG_BACKOFF_MIN;
+	digtable->backoffval_range_max = DM_DIG_BACKOFF_MAX;
+	digtable->backoffval_range_min = DM_DIG_BACKOFF_MIN;
 }
 
 static void _rtl92s_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
index e1b19a6..2e9052c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
@@ -29,48 +29,6 @@
 #ifndef	__RTL_92S_DM_H__
 #define __RTL_92S_DM_H__
 
-struct dig_t {
-	u8 dig_enable_flag;
-	u8 dig_algorithm;
-	u8 dig_twoport_algorithm;
-	u8 dig_ext_port_stage;
-	u8 dig_dbgmode;
-	u8 dig_slgorithm_switch;
-
-	long rssi_lowthresh;
-	long rssi_highthresh;
-
-	u32 fa_lowthresh;
-	u32 fa_highthresh;
-
-	long rssi_highpower_lowthresh;
-	long rssi_highpower_highthresh;
-
-	u8 dig_state;
-	u8 dig_highpwrstate;
-	u8 cur_sta_connectstate;
-	u8 pre_sta_connectstate;
-	u8 cur_ap_connectstate;
-	u8 pre_ap_connectstate;
-
-	u8 cur_pd_thstate;
-	u8 pre_pd_thstate;
-	u8 cur_cs_ratiostate;
-	u8 pre_cs_ratiostate;
-
-	u32 pre_igvalue;
-	u32	cur_igvalue;
-
-	u8 backoff_enable_flag;
-	char backoff_val;
-	char backoffval_range_max;
-	char backoffval_range_min;
-	u8 rx_gain_range_max;
-	u8 rx_gain_range_min;
-
-	long rssi_val;
-};
-
 enum dm_dig_alg {
 	DIG_ALGO_BY_FALSE_ALARM = 0,
 	DIG_ALGO_BY_RSSI	= 1,
@@ -154,8 +112,6 @@
 #define	DM_DIG_BACKOFF_MAX		12
 #define	DM_DIG_BACKOFF_MIN		-4
 
-extern struct dig_t digtable;
-
 void rtl92s_dm_watchdog(struct ieee80211_hw *hw);
 void rtl92s_dm_init(struct ieee80211_hw *hw);
 void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
index b4afff62..d53f433 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
@@ -345,7 +345,7 @@
 	do {							\
 		udelay(1000);					\
 		rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit);		\
-	} while (0);
+	} while (0)
 
 #define FW_CMD_IO_UPDATE(rtlpriv, _val)				\
 	rtlpriv->rtlhal.fwcmd_iomap = _val;
@@ -354,13 +354,13 @@
 	do {							\
 		rtl_write_word(rtlpriv, LBUS_MON_ADDR, (u16)_val);	\
 		FW_CMD_IO_UPDATE(rtlpriv, _val);		\
-	} while (0);
+	} while (0)
 
 #define FW_CMD_PARA_SET(rtlpriv, _val)				\
 	do {							\
 		rtl_write_dword(rtlpriv, LBUS_ADDR_MASK, _val);	\
 		rtlpriv->rtlhal.fwcmd_ioparam = _val;		\
-	} while (0);
+	} while (0)
 
 #define FW_CMD_IO_QUERY(rtlpriv)				\
 	(u16)(rtlpriv->rtlhal.fwcmd_iomap)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 4a49992..8d7099b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1450,6 +1450,7 @@
 bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct dig_t *digtable = &rtlpriv->dm_digtable;
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
 	u32	fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv);
@@ -1588,16 +1589,16 @@
 				      FW_SS_CTL);
 
 			if (rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE ||
-				!digtable.dig_enable_flag)
+				!digtable->dig_enable_flag)
 				fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
 
 			if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) ||
 			    rtlpriv->dm.dynamic_txpower_enable)
 				fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL;
 
-			if ((digtable.dig_ext_port_stage ==
+			if ((digtable->dig_ext_port_stage ==
 			    DIG_EXT_PORT_STAGE_0) ||
-			    (digtable.dig_ext_port_stage ==
+			    (digtable->dig_ext_port_stage ==
 			    DIG_EXT_PORT_STAGE_1))
 				fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index f1b3600..730bcc9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -450,21 +450,4 @@
 	.driver.pm = &rtlwifi_pm_ops,
 };
 
-static int __init rtl92se_module_init(void)
-{
-	int ret = 0;
-
-	ret = pci_register_driver(&rtl92se_driver);
-	if (ret)
-		RT_ASSERT(false, "No device found\n");
-
-	return ret;
-}
-
-static void __exit rtl92se_module_exit(void)
-{
-	pci_unregister_driver(&rtl92se_driver);
-}
-
-module_init(rtl92se_module_init);
-module_exit(rtl92se_module_exit);
+module_pci_driver(rtl92se_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 2fd3d13..812b585 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -492,13 +492,14 @@
 	praddr = hdr->addr1;
 
 	packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
-	     (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ?
-			hdr->addr1 : (cfc & IEEE80211_FCTL_FROMDS) ?
-			hdr->addr2 : hdr->addr3)) && (!pstats->hwerror) &&
-			(!pstats->crc) && (!pstats->icv));
+	     ether_addr_equal(mac->bssid,
+			      (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+			      (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+			      hdr->addr3) &&
+	     (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
 
 	packet_toself = packet_matchbssid &&
-	    (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+	    ether_addr_equal(praddr, rtlefuse->dev_addr);
 
 	if (ieee80211_is_beacon(fc))
 		packet_beacon = true;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index d04dbda..a6049d7 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -971,11 +971,6 @@
 	rtlpriv->cfg->ops->read_chip_version(hw);
 	/*like read eeprom and so on */
 	rtlpriv->cfg->ops->read_eeprom_info(hw);
-	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
-		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
-		goto error_out;
-	}
-	rtlpriv->cfg->ops->init_sw_leds(hw);
 	err = _rtl_usb_init(hw);
 	if (err)
 		goto error_out;
@@ -987,6 +982,11 @@
 			 "Can't allocate sw for mac80211\n");
 		goto error_out;
 	}
+	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+		goto error_out;
+	}
+	rtlpriv->cfg->ops->init_sw_leds(hw);
 
 	return 0;
 error_out:
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 28ebc69..bd816ae 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1592,6 +1592,65 @@
 	char proc_name[20];
 };
 
+struct ps_t {
+	u8 pre_ccastate;
+	u8 cur_ccasate;
+	u8 pre_rfstate;
+	u8 cur_rfstate;
+	long rssi_val_min;
+};
+
+struct dig_t {
+	u32 rssi_lowthresh;
+	u32 rssi_highthresh;
+	u32 fa_lowthresh;
+	u32 fa_highthresh;
+	long last_min_undecorated_pwdb_for_dm;
+	long rssi_highpower_lowthresh;
+	long rssi_highpower_highthresh;
+	u32 recover_cnt;
+	u32 pre_igvalue;
+	u32 cur_igvalue;
+	long rssi_val;
+	u8 dig_enable_flag;
+	u8 dig_ext_port_stage;
+	u8 dig_algorithm;
+	u8 dig_twoport_algorithm;
+	u8 dig_dbgmode;
+	u8 dig_slgorithm_switch;
+	u8 cursta_connectctate;
+	u8 presta_connectstate;
+	u8 curmultista_connectstate;
+	char backoff_val;
+	char backoff_val_range_max;
+	char backoff_val_range_min;
+	u8 rx_gain_range_max;
+	u8 rx_gain_range_min;
+	u8 min_undecorated_pwdb_for_dm;
+	u8 rssi_val_min;
+	u8 pre_cck_pd_state;
+	u8 cur_cck_pd_state;
+	u8 pre_cck_fa_state;
+	u8 cur_cck_fa_state;
+	u8 pre_ccastate;
+	u8 cur_ccasate;
+	u8 large_fa_hit;
+	u8 forbidden_igi;
+	u8 dig_state;
+	u8 dig_highpwrstate;
+	u8 cur_sta_connectstate;
+	u8 pre_sta_connectstate;
+	u8 cur_ap_connectstate;
+	u8 pre_ap_connectstate;
+	u8 cur_pd_thstate;
+	u8 pre_pd_thstate;
+	u8 cur_cs_ratiostate;
+	u8 pre_cs_ratiostate;
+	u8 backoff_enable_flag;
+	char backoffval_range_max;
+	char backoffval_range_min;
+};
+
 struct rtl_priv {
 	struct completion firmware_loading_complete;
 	struct rtl_locks locks;
@@ -1629,6 +1688,10 @@
 	   interface or hardware */
 	unsigned long status;
 
+	/* tables for dm */
+	struct dig_t dm_digtable;
+	struct ps_t dm_pstable;
+
 	/* data buffer pointer for USB reads */
 	__le32 *usb_data;
 	int usb_data_index;
@@ -1958,37 +2021,35 @@
 static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
 				u32 regaddr, u32 bitmask)
 {
-	return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw,
-								    regaddr,
-								    bitmask);
+	struct rtl_priv *rtlpriv = hw->priv;
+
+	return rtlpriv->cfg->ops->get_bbreg(hw, regaddr, bitmask);
 }
 
 static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
 				 u32 bitmask, u32 data)
 {
-	((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw,
-							     regaddr, bitmask,
-							     data);
+	struct rtl_priv *rtlpriv = hw->priv;
 
+	rtlpriv->cfg->ops->set_bbreg(hw, regaddr, bitmask, data);
 }
 
 static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
 				enum radio_path rfpath, u32 regaddr,
 				u32 bitmask)
 {
-	return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw,
-								    rfpath,
-								    regaddr,
-								    bitmask);
+	struct rtl_priv *rtlpriv = hw->priv;
+
+	return rtlpriv->cfg->ops->get_rfreg(hw, rfpath, regaddr, bitmask);
 }
 
 static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
 				 enum radio_path rfpath, u32 regaddr,
 				 u32 bitmask, u32 data)
 {
-	((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw,
-							     rfpath, regaddr,
-							     bitmask, data);
+	struct rtl_priv *rtlpriv = hw->priv;
+
+	rtlpriv->cfg->ops->set_rfreg(hw, rfpath, regaddr, bitmask, data);
 }
 
 static inline bool is_hal_stop(struct rtl_hal *rtlhal)
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
new file mode 100644
index 0000000..1a72932
--- /dev/null
+++ b/drivers/net/wireless/ti/Kconfig
@@ -0,0 +1,14 @@
+menuconfig WL_TI
+	bool "TI Wireless LAN support"
+	---help---
+	  This section contains support for all the wireless drivers
+	  for Texas Instruments WLAN chips, such as wl1251 and the wl12xx
+	  family.
+
+if WL_TI
+source "drivers/net/wireless/ti/wl1251/Kconfig"
+source "drivers/net/wireless/ti/wl12xx/Kconfig"
+
+# keep last for automatic dependencies
+source "drivers/net/wireless/ti/wlcore/Kconfig"
+endif # WL_TI
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
new file mode 100644
index 0000000..0a56562
--- /dev/null
+++ b/drivers/net/wireless/ti/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_WLCORE)			+= wlcore/
+obj-$(CONFIG_WL12XX)			+= wl12xx/
+obj-$(CONFIG_WL12XX_PLATFORM_DATA)	+= wlcore/
+obj-$(CONFIG_WL1251)			+= wl1251/
diff --git a/drivers/net/wireless/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig
similarity index 100%
rename from drivers/net/wireless/wl1251/Kconfig
rename to drivers/net/wireless/ti/wl1251/Kconfig
diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile
similarity index 100%
rename from drivers/net/wireless/wl1251/Makefile
rename to drivers/net/wireless/ti/wl1251/Makefile
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
similarity index 100%
rename from drivers/net/wireless/wl1251/acx.c
rename to drivers/net/wireless/ti/wl1251/acx.c
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
similarity index 100%
rename from drivers/net/wireless/wl1251/acx.h
rename to drivers/net/wireless/ti/wl1251/acx.h
diff --git a/drivers/net/wireless/wl1251/boot.c b/drivers/net/wireless/ti/wl1251/boot.c
similarity index 100%
rename from drivers/net/wireless/wl1251/boot.c
rename to drivers/net/wireless/ti/wl1251/boot.c
diff --git a/drivers/net/wireless/wl1251/boot.h b/drivers/net/wireless/ti/wl1251/boot.h
similarity index 100%
rename from drivers/net/wireless/wl1251/boot.h
rename to drivers/net/wireless/ti/wl1251/boot.h
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
similarity index 100%
rename from drivers/net/wireless/wl1251/cmd.c
rename to drivers/net/wireless/ti/wl1251/cmd.c
diff --git a/drivers/net/wireless/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
similarity index 100%
rename from drivers/net/wireless/wl1251/cmd.h
rename to drivers/net/wireless/ti/wl1251/cmd.h
diff --git a/drivers/net/wireless/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c
similarity index 100%
rename from drivers/net/wireless/wl1251/debugfs.c
rename to drivers/net/wireless/ti/wl1251/debugfs.c
diff --git a/drivers/net/wireless/wl1251/debugfs.h b/drivers/net/wireless/ti/wl1251/debugfs.h
similarity index 100%
rename from drivers/net/wireless/wl1251/debugfs.h
rename to drivers/net/wireless/ti/wl1251/debugfs.h
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
similarity index 100%
rename from drivers/net/wireless/wl1251/event.c
rename to drivers/net/wireless/ti/wl1251/event.c
diff --git a/drivers/net/wireless/wl1251/event.h b/drivers/net/wireless/ti/wl1251/event.h
similarity index 100%
rename from drivers/net/wireless/wl1251/event.h
rename to drivers/net/wireless/ti/wl1251/event.h
diff --git a/drivers/net/wireless/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
similarity index 100%
rename from drivers/net/wireless/wl1251/init.c
rename to drivers/net/wireless/ti/wl1251/init.c
diff --git a/drivers/net/wireless/wl1251/init.h b/drivers/net/wireless/ti/wl1251/init.h
similarity index 100%
rename from drivers/net/wireless/wl1251/init.h
rename to drivers/net/wireless/ti/wl1251/init.h
diff --git a/drivers/net/wireless/wl1251/io.c b/drivers/net/wireless/ti/wl1251/io.c
similarity index 100%
rename from drivers/net/wireless/wl1251/io.c
rename to drivers/net/wireless/ti/wl1251/io.c
diff --git a/drivers/net/wireless/wl1251/io.h b/drivers/net/wireless/ti/wl1251/io.h
similarity index 100%
rename from drivers/net/wireless/wl1251/io.h
rename to drivers/net/wireless/ti/wl1251/io.h
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
similarity index 99%
rename from drivers/net/wireless/wl1251/main.c
rename to drivers/net/wireless/ti/wl1251/main.c
index 41302c7..d1afb8e 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -479,6 +479,7 @@
 	cancel_work_sync(&wl->irq_work);
 	cancel_work_sync(&wl->tx_work);
 	cancel_work_sync(&wl->filter_work);
+	cancel_delayed_work_sync(&wl->elp_work);
 
 	mutex_lock(&wl->mutex);
 
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
similarity index 100%
rename from drivers/net/wireless/wl1251/ps.c
rename to drivers/net/wireless/ti/wl1251/ps.c
diff --git a/drivers/net/wireless/wl1251/ps.h b/drivers/net/wireless/ti/wl1251/ps.h
similarity index 100%
rename from drivers/net/wireless/wl1251/ps.h
rename to drivers/net/wireless/ti/wl1251/ps.h
diff --git a/drivers/net/wireless/wl1251/reg.h b/drivers/net/wireless/ti/wl1251/reg.h
similarity index 100%
rename from drivers/net/wireless/wl1251/reg.h
rename to drivers/net/wireless/ti/wl1251/reg.h
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
similarity index 100%
rename from drivers/net/wireless/wl1251/rx.c
rename to drivers/net/wireless/ti/wl1251/rx.c
diff --git a/drivers/net/wireless/wl1251/rx.h b/drivers/net/wireless/ti/wl1251/rx.h
similarity index 100%
rename from drivers/net/wireless/wl1251/rx.h
rename to drivers/net/wireless/ti/wl1251/rx.h
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
similarity index 99%
rename from drivers/net/wireless/wl1251/sdio.c
rename to drivers/net/wireless/ti/wl1251/sdio.c
index f786942..1b851f6 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -315,8 +315,8 @@
 
 	if (wl->irq)
 		free_irq(wl->irq, wl);
-	kfree(wl_sdio);
 	wl1251_free_hw(wl);
+	kfree(wl_sdio);
 
 	sdio_claim_host(func);
 	sdio_release_irq(func);
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
similarity index 100%
rename from drivers/net/wireless/wl1251/spi.c
rename to drivers/net/wireless/ti/wl1251/spi.c
diff --git a/drivers/net/wireless/wl1251/spi.h b/drivers/net/wireless/ti/wl1251/spi.h
similarity index 100%
rename from drivers/net/wireless/wl1251/spi.h
rename to drivers/net/wireless/ti/wl1251/spi.h
diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
similarity index 100%
rename from drivers/net/wireless/wl1251/tx.c
rename to drivers/net/wireless/ti/wl1251/tx.c
diff --git a/drivers/net/wireless/wl1251/tx.h b/drivers/net/wireless/ti/wl1251/tx.h
similarity index 100%
rename from drivers/net/wireless/wl1251/tx.h
rename to drivers/net/wireless/ti/wl1251/tx.h
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
similarity index 100%
rename from drivers/net/wireless/wl1251/wl1251.h
rename to drivers/net/wireless/ti/wl1251/wl1251.h
diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
similarity index 100%
rename from drivers/net/wireless/wl1251/wl12xx_80211.h
rename to drivers/net/wireless/ti/wl1251/wl12xx_80211.h
diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig
new file mode 100644
index 0000000..5b92329
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/Kconfig
@@ -0,0 +1,8 @@
+config WL12XX
+       tristate "TI wl12xx support"
+       select WLCORE
+       ---help---
+	  This module adds support for wireless adapters based on TI wl1271,
+	  wl1273, wl1281 and wl1283 chipsets. This module does *not* include
+	  support for wl1251.  For wl1251 support, use the separate homonymous
+	   driver instead.
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
new file mode 100644
index 0000000..87f64b1
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -0,0 +1,3 @@
+wl12xx-objs	= main.o cmd.o acx.o
+
+obj-$(CONFIG_WL12XX)		+= wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/acx.c b/drivers/net/wireless/ti/wl12xx/acx.c
new file mode 100644
index 0000000..bea06b2
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/acx.c
@@ -0,0 +1,53 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/acx.h"
+
+#include "acx.h"
+
+int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
+{
+	struct wl1271_acx_host_config_bitmap *bitmap_conf;
+	int ret;
+
+	bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
+	if (!bitmap_conf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
+
+	ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
+				   bitmap_conf, sizeof(*bitmap_conf));
+	if (ret < 0) {
+		wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
+		goto out;
+	}
+
+out:
+	kfree(bitmap_conf);
+
+	return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/debugfs.h b/drivers/net/wireless/ti/wl12xx/acx.h
similarity index 60%
copy from drivers/net/wireless/wl12xx/debugfs.h
copy to drivers/net/wireless/ti/wl12xx/acx.h
index 254c5b2..d1f5aba 100644
--- a/drivers/net/wireless/wl12xx/debugfs.h
+++ b/drivers/net/wireless/ti/wl12xx/acx.h
@@ -1,9 +1,8 @@
 /*
- * This file is part of wl1271
+ * This file is part of wl12xx
  *
- * Copyright (C) 2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ * Copyright (C) 1998-2009, 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2010 Nokia Corporation
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -21,13 +20,17 @@
  *
  */
 
-#ifndef __DEBUGFS_H__
-#define __DEBUGFS_H__
+#ifndef __WL12XX_ACX_H__
+#define __WL12XX_ACX_H__
 
-#include "wl12xx.h"
+#include "../wlcore/wlcore.h"
 
-int wl1271_debugfs_init(struct wl1271 *wl);
-void wl1271_debugfs_exit(struct wl1271 *wl);
-void wl1271_debugfs_reset(struct wl1271 *wl);
+struct wl1271_acx_host_config_bitmap {
+	struct acx_header header;
 
-#endif /* WL1271_DEBUGFS_H */
+	__le32 host_cfg_bitmap;
+} __packed;
+
+int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
+
+#endif /* __WL12XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
new file mode 100644
index 0000000..8ffaeb5
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -0,0 +1,254 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+
+#include "wl12xx.h"
+#include "cmd.h"
+
+int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
+{
+	struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
+	struct wl12xx_priv *priv = wl->priv;
+	struct wl12xx_conf_rf *rf = &priv->conf.rf;
+	int ret;
+
+	if (!wl->nvs)
+		return -ENODEV;
+
+	ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL);
+	if (!ext_radio_parms)
+		return -ENOMEM;
+
+	ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM;
+
+	memcpy(ext_radio_parms->tx_per_channel_power_compensation_2,
+	       rf->tx_per_channel_power_compensation_2,
+	       CONF_TX_PWR_COMPENSATION_LEN_2);
+	memcpy(ext_radio_parms->tx_per_channel_power_compensation_5,
+	       rf->tx_per_channel_power_compensation_5,
+	       CONF_TX_PWR_COMPENSATION_LEN_5);
+
+	wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ",
+		    ext_radio_parms, sizeof(*ext_radio_parms));
+
+	ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0);
+	if (ret < 0)
+		wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed");
+
+	kfree(ext_radio_parms);
+	return ret;
+}
+
+int wl1271_cmd_general_parms(struct wl1271 *wl)
+{
+	struct wl1271_general_parms_cmd *gen_parms;
+	struct wl1271_ini_general_params *gp =
+		&((struct wl1271_nvs_file *)wl->nvs)->general_params;
+	bool answer = false;
+	int ret;
+
+	if (!wl->nvs)
+		return -ENODEV;
+
+	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+		wl1271_warning("FEM index from INI out of bounds");
+		return -EINVAL;
+	}
+
+	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
+	if (!gen_parms)
+		return -ENOMEM;
+
+	gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
+
+	memcpy(&gen_parms->general_params, gp, sizeof(*gp));
+
+	if (gp->tx_bip_fem_auto_detect)
+		answer = true;
+
+	/* Override the REF CLK from the NVS with the one from platform data */
+	gen_parms->general_params.ref_clock = wl->ref_clock;
+
+	ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
+	if (ret < 0) {
+		wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
+		goto out;
+	}
+
+	gp->tx_bip_fem_manufacturer =
+		gen_parms->general_params.tx_bip_fem_manufacturer;
+
+	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+		wl1271_warning("FEM index from FW out of bounds");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
+		     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+
+out:
+	kfree(gen_parms);
+	return ret;
+}
+
+int wl128x_cmd_general_parms(struct wl1271 *wl)
+{
+	struct wl128x_general_parms_cmd *gen_parms;
+	struct wl128x_ini_general_params *gp =
+		&((struct wl128x_nvs_file *)wl->nvs)->general_params;
+	bool answer = false;
+	int ret;
+
+	if (!wl->nvs)
+		return -ENODEV;
+
+	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+		wl1271_warning("FEM index from ini out of bounds");
+		return -EINVAL;
+	}
+
+	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
+	if (!gen_parms)
+		return -ENOMEM;
+
+	gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
+
+	memcpy(&gen_parms->general_params, gp, sizeof(*gp));
+
+	if (gp->tx_bip_fem_auto_detect)
+		answer = true;
+
+	/* Replace REF and TCXO CLKs with the ones from platform data */
+	gen_parms->general_params.ref_clock = wl->ref_clock;
+	gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
+
+	ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
+	if (ret < 0) {
+		wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
+		goto out;
+	}
+
+	gp->tx_bip_fem_manufacturer =
+		gen_parms->general_params.tx_bip_fem_manufacturer;
+
+	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+		wl1271_warning("FEM index from FW out of bounds");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
+		     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+
+out:
+	kfree(gen_parms);
+	return ret;
+}
+
+int wl1271_cmd_radio_parms(struct wl1271 *wl)
+{
+	struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
+	struct wl1271_radio_parms_cmd *radio_parms;
+	struct wl1271_ini_general_params *gp = &nvs->general_params;
+	int ret;
+
+	if (!wl->nvs)
+		return -ENODEV;
+
+	radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
+	if (!radio_parms)
+		return -ENOMEM;
+
+	radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
+
+	/* 2.4GHz parameters */
+	memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
+	       sizeof(struct wl1271_ini_band_params_2));
+	memcpy(&radio_parms->dyn_params_2,
+	       &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
+	       sizeof(struct wl1271_ini_fem_params_2));
+
+	/* 5GHz parameters */
+	memcpy(&radio_parms->static_params_5,
+	       &nvs->stat_radio_params_5,
+	       sizeof(struct wl1271_ini_band_params_5));
+	memcpy(&radio_parms->dyn_params_5,
+	       &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
+	       sizeof(struct wl1271_ini_fem_params_5));
+
+	wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
+		    radio_parms, sizeof(*radio_parms));
+
+	ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
+	if (ret < 0)
+		wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
+
+	kfree(radio_parms);
+	return ret;
+}
+
+int wl128x_cmd_radio_parms(struct wl1271 *wl)
+{
+	struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
+	struct wl128x_radio_parms_cmd *radio_parms;
+	struct wl128x_ini_general_params *gp = &nvs->general_params;
+	int ret;
+
+	if (!wl->nvs)
+		return -ENODEV;
+
+	radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
+	if (!radio_parms)
+		return -ENOMEM;
+
+	radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
+
+	/* 2.4GHz parameters */
+	memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
+	       sizeof(struct wl128x_ini_band_params_2));
+	memcpy(&radio_parms->dyn_params_2,
+	       &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
+	       sizeof(struct wl128x_ini_fem_params_2));
+
+	/* 5GHz parameters */
+	memcpy(&radio_parms->static_params_5,
+	       &nvs->stat_radio_params_5,
+	       sizeof(struct wl128x_ini_band_params_5));
+	memcpy(&radio_parms->dyn_params_5,
+	       &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
+	       sizeof(struct wl128x_ini_fem_params_5));
+
+	radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
+
+	wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
+		    radio_parms, sizeof(*radio_parms));
+
+	ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
+	if (ret < 0)
+		wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
+
+	kfree(radio_parms);
+	return ret;
+}
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.h b/drivers/net/wireless/ti/wl12xx/cmd.h
new file mode 100644
index 0000000..140a0e8
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/cmd.h
@@ -0,0 +1,112 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 1998-2009, 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_CMD_H__
+#define __WL12XX_CMD_H__
+
+#include "conf.h"
+
+#define TEST_CMD_INI_FILE_RADIO_PARAM       0x19
+#define TEST_CMD_INI_FILE_GENERAL_PARAM     0x1E
+
+struct wl1271_general_parms_cmd {
+	struct wl1271_cmd_header header;
+
+	struct wl1271_cmd_test_header test;
+
+	struct wl1271_ini_general_params general_params;
+
+	u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+	u8 sr_sen_n_p;
+	u8 sr_sen_n_p_gain;
+	u8 sr_sen_nrn;
+	u8 sr_sen_prn;
+	u8 padding[3];
+} __packed;
+
+struct wl128x_general_parms_cmd {
+	struct wl1271_cmd_header header;
+
+	struct wl1271_cmd_test_header test;
+
+	struct wl128x_ini_general_params general_params;
+
+	u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
+	u8 sr_sen_n_p;
+	u8 sr_sen_n_p_gain;
+	u8 sr_sen_nrn;
+	u8 sr_sen_prn;
+	u8 padding[3];
+} __packed;
+
+struct wl1271_radio_parms_cmd {
+	struct wl1271_cmd_header header;
+
+	struct wl1271_cmd_test_header test;
+
+	/* Static radio parameters */
+	struct wl1271_ini_band_params_2 static_params_2;
+	struct wl1271_ini_band_params_5 static_params_5;
+
+	/* Dynamic radio parameters */
+	struct wl1271_ini_fem_params_2 dyn_params_2;
+	u8 padding2;
+	struct wl1271_ini_fem_params_5 dyn_params_5;
+	u8 padding3[2];
+} __packed;
+
+struct wl128x_radio_parms_cmd {
+	struct wl1271_cmd_header header;
+
+	struct wl1271_cmd_test_header test;
+
+	/* Static radio parameters */
+	struct wl128x_ini_band_params_2 static_params_2;
+	struct wl128x_ini_band_params_5 static_params_5;
+
+	u8 fem_vendor_and_options;
+
+	/* Dynamic radio parameters */
+	struct wl128x_ini_fem_params_2 dyn_params_2;
+	u8 padding2;
+	struct wl128x_ini_fem_params_5 dyn_params_5;
+} __packed;
+
+#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
+
+struct wl1271_ext_radio_parms_cmd {
+	struct wl1271_cmd_header header;
+
+	struct wl1271_cmd_test_header test;
+
+	u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
+	u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
+	u8 padding[3];
+} __packed;
+
+int wl1271_cmd_general_parms(struct wl1271 *wl);
+int wl128x_cmd_general_parms(struct wl1271 *wl);
+int wl1271_cmd_radio_parms(struct wl1271 *wl);
+int wl128x_cmd_radio_parms(struct wl1271 *wl);
+int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
+
+#endif /* __WL12XX_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/conf.h b/drivers/net/wireless/ti/wl12xx/conf.h
new file mode 100644
index 0000000..75e2989
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/conf.h
@@ -0,0 +1,50 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_CONF_H__
+#define __WL12XX_CONF_H__
+
+/* these are number of channels on the band divided by two, rounded up */
+#define CONF_TX_PWR_COMPENSATION_LEN_2 7
+#define CONF_TX_PWR_COMPENSATION_LEN_5 18
+
+struct wl12xx_conf_rf {
+	/*
+	 * Per channel power compensation for 2.4GHz
+	 *
+	 * Range: s8
+	 */
+	u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
+
+	/*
+	 * Per channel power compensation for 5GHz
+	 *
+	 * Range: s8
+	 */
+	u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
+};
+
+struct wl12xx_priv_conf {
+	struct wl12xx_conf_rf rf;
+	struct conf_memory_settings mem_wl127x;
+};
+
+#endif /* __WL12XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
new file mode 100644
index 0000000..d7dd3de
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -0,0 +1,1388 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/err.h>
+
+#include <linux/wl12xx.h>
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/io.h"
+#include "../wlcore/acx.h"
+#include "../wlcore/tx.h"
+#include "../wlcore/rx.h"
+#include "../wlcore/io.h"
+#include "../wlcore/boot.h"
+
+#include "wl12xx.h"
+#include "reg.h"
+#include "cmd.h"
+#include "acx.h"
+
+static struct wlcore_conf wl12xx_conf = {
+	.sg = {
+		.params = {
+			[CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
+			[CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
+			[CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
+			[CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
+			[CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
+			[CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
+			[CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
+			[CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
+			[CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
+			[CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
+			[CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
+			[CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
+			[CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
+			[CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
+			[CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
+			[CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
+			[CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
+			[CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
+			[CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
+			[CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
+			[CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
+			[CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
+			[CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
+			[CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
+			[CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
+			[CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
+			/* active scan params */
+			[CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
+			[CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
+			[CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
+			/* passive scan params */
+			[CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
+			[CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
+			[CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
+			/* passive scan in dual antenna params */
+			[CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
+			[CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
+			[CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
+			/* general params */
+			[CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
+			[CONF_SG_ANTENNA_CONFIGURATION] = 0,
+			[CONF_SG_BEACON_MISS_PERCENT] = 60,
+			[CONF_SG_DHCP_TIME] = 5000,
+			[CONF_SG_RXT] = 1200,
+			[CONF_SG_TXT] = 1000,
+			[CONF_SG_ADAPTIVE_RXT_TXT] = 1,
+			[CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
+			[CONF_SG_HV3_MAX_SERVED] = 6,
+			[CONF_SG_PS_POLL_TIMEOUT] = 10,
+			[CONF_SG_UPSD_TIMEOUT] = 10,
+			[CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
+			[CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
+			[CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
+			/* AP params */
+			[CONF_AP_BEACON_MISS_TX] = 3,
+			[CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
+			[CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
+			[CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
+			[CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
+			[CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
+			/* CTS Diluting params */
+			[CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
+			[CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
+		},
+		.state = CONF_SG_PROTECTIVE,
+	},
+	.rx = {
+		.rx_msdu_life_time           = 512000,
+		.packet_detection_threshold  = 0,
+		.ps_poll_timeout             = 15,
+		.upsd_timeout                = 15,
+		.rts_threshold               = IEEE80211_MAX_RTS_THRESHOLD,
+		.rx_cca_threshold            = 0,
+		.irq_blk_threshold           = 0xFFFF,
+		.irq_pkt_threshold           = 0,
+		.irq_timeout                 = 600,
+		.queue_type                  = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
+	},
+	.tx = {
+		.tx_energy_detection         = 0,
+		.sta_rc_conf                 = {
+			.enabled_rates       = 0,
+			.short_retry_limit   = 10,
+			.long_retry_limit    = 10,
+			.aflags              = 0,
+		},
+		.ac_conf_count               = 4,
+		.ac_conf                     = {
+			[CONF_TX_AC_BE] = {
+				.ac          = CONF_TX_AC_BE,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = 3,
+				.tx_op_limit = 0,
+			},
+			[CONF_TX_AC_BK] = {
+				.ac          = CONF_TX_AC_BK,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = 7,
+				.tx_op_limit = 0,
+			},
+			[CONF_TX_AC_VI] = {
+				.ac          = CONF_TX_AC_VI,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = CONF_TX_AIFS_PIFS,
+				.tx_op_limit = 3008,
+			},
+			[CONF_TX_AC_VO] = {
+				.ac          = CONF_TX_AC_VO,
+				.cw_min      = 15,
+				.cw_max      = 63,
+				.aifsn       = CONF_TX_AIFS_PIFS,
+				.tx_op_limit = 1504,
+			},
+		},
+		.max_tx_retries = 100,
+		.ap_aging_period = 300,
+		.tid_conf_count = 4,
+		.tid_conf = {
+			[CONF_TX_AC_BE] = {
+				.queue_id    = CONF_TX_AC_BE,
+				.channel_type = CONF_CHANNEL_TYPE_EDCF,
+				.tsid        = CONF_TX_AC_BE,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[CONF_TX_AC_BK] = {
+				.queue_id    = CONF_TX_AC_BK,
+				.channel_type = CONF_CHANNEL_TYPE_EDCF,
+				.tsid        = CONF_TX_AC_BK,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[CONF_TX_AC_VI] = {
+				.queue_id    = CONF_TX_AC_VI,
+				.channel_type = CONF_CHANNEL_TYPE_EDCF,
+				.tsid        = CONF_TX_AC_VI,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+			[CONF_TX_AC_VO] = {
+				.queue_id    = CONF_TX_AC_VO,
+				.channel_type = CONF_CHANNEL_TYPE_EDCF,
+				.tsid        = CONF_TX_AC_VO,
+				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
+				.ack_policy  = CONF_ACK_POLICY_LEGACY,
+				.apsd_conf   = {0, 0},
+			},
+		},
+		.frag_threshold              = IEEE80211_MAX_FRAG_THRESHOLD,
+		.tx_compl_timeout            = 700,
+		.tx_compl_threshold          = 4,
+		.basic_rate                  = CONF_HW_BIT_RATE_1MBPS,
+		.basic_rate_5                = CONF_HW_BIT_RATE_6MBPS,
+		.tmpl_short_retry_limit      = 10,
+		.tmpl_long_retry_limit       = 10,
+		.tx_watchdog_timeout         = 5000,
+	},
+	.conn = {
+		.wake_up_event               = CONF_WAKE_UP_EVENT_DTIM,
+		.listen_interval             = 1,
+		.suspend_wake_up_event       = CONF_WAKE_UP_EVENT_N_DTIM,
+		.suspend_listen_interval     = 3,
+		.bcn_filt_mode               = CONF_BCN_FILT_MODE_ENABLED,
+		.bcn_filt_ie_count           = 2,
+		.bcn_filt_ie = {
+			[0] = {
+				.ie          = WLAN_EID_CHANNEL_SWITCH,
+				.rule        = CONF_BCN_RULE_PASS_ON_APPEARANCE,
+			},
+			[1] = {
+				.ie          = WLAN_EID_HT_OPERATION,
+				.rule        = CONF_BCN_RULE_PASS_ON_CHANGE,
+			},
+		},
+		.synch_fail_thold            = 10,
+		.bss_lose_timeout            = 100,
+		.beacon_rx_timeout           = 10000,
+		.broadcast_timeout           = 20000,
+		.rx_broadcast_in_ps          = 1,
+		.ps_poll_threshold           = 10,
+		.bet_enable                  = CONF_BET_MODE_ENABLE,
+		.bet_max_consecutive         = 50,
+		.psm_entry_retries           = 8,
+		.psm_exit_retries            = 16,
+		.psm_entry_nullfunc_retries  = 3,
+		.dynamic_ps_timeout          = 40,
+		.forced_ps                   = false,
+		.keep_alive_interval         = 55000,
+		.max_listen_interval         = 20,
+	},
+	.itrim = {
+		.enable = false,
+		.timeout = 50000,
+	},
+	.pm_config = {
+		.host_clk_settling_time = 5000,
+		.host_fast_wakeup_support = false
+	},
+	.roam_trigger = {
+		.trigger_pacing               = 1,
+		.avg_weight_rssi_beacon       = 20,
+		.avg_weight_rssi_data         = 10,
+		.avg_weight_snr_beacon        = 20,
+		.avg_weight_snr_data          = 10,
+	},
+	.scan = {
+		.min_dwell_time_active        = 7500,
+		.max_dwell_time_active        = 30000,
+		.min_dwell_time_passive       = 100000,
+		.max_dwell_time_passive       = 100000,
+		.num_probe_reqs               = 2,
+		.split_scan_timeout           = 50000,
+	},
+	.sched_scan = {
+		/*
+		 * Values are in TU/1000 but since sched scan FW command
+		 * params are in TUs rounding up may occur.
+		 */
+		.base_dwell_time		= 7500,
+		.max_dwell_time_delta		= 22500,
+		/* based on 250bits per probe @1Mbps */
+		.dwell_time_delta_per_probe	= 2000,
+		/* based on 250bits per probe @6Mbps (plus a bit more) */
+		.dwell_time_delta_per_probe_5	= 350,
+		.dwell_time_passive		= 100000,
+		.dwell_time_dfs			= 150000,
+		.num_probe_reqs			= 2,
+		.rssi_threshold			= -90,
+		.snr_threshold			= 0,
+	},
+	.ht = {
+		.rx_ba_win_size = 8,
+		.tx_ba_win_size = 64,
+		.inactivity_timeout = 10000,
+		.tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
+	},
+	/*
+	 * Memory config for wl127x chips is given in the
+	 * wl12xx_default_priv_conf struct. The below configuration is
+	 * for wl128x chips.
+	 */
+	.mem = {
+		.num_stations                 = 1,
+		.ssid_profiles                = 1,
+		.rx_block_num                 = 40,
+		.tx_min_block_num             = 40,
+		.dynamic_memory               = 1,
+		.min_req_tx_blocks            = 45,
+		.min_req_rx_blocks            = 22,
+		.tx_min                       = 27,
+	},
+	.fm_coex = {
+		.enable                       = true,
+		.swallow_period               = 5,
+		.n_divider_fref_set_1         = 0xff,       /* default */
+		.n_divider_fref_set_2         = 12,
+		.m_divider_fref_set_1         = 148,
+		.m_divider_fref_set_2         = 0xffff,     /* default */
+		.coex_pll_stabilization_time  = 0xffffffff, /* default */
+		.ldo_stabilization_time       = 0xffff,     /* default */
+		.fm_disturbed_band_margin     = 0xff,       /* default */
+		.swallow_clk_diff             = 0xff,       /* default */
+	},
+	.rx_streaming = {
+		.duration                      = 150,
+		.queues                        = 0x1,
+		.interval                      = 20,
+		.always                        = 0,
+	},
+	.fwlog = {
+		.mode                         = WL12XX_FWLOG_ON_DEMAND,
+		.mem_blocks                   = 2,
+		.severity                     = 0,
+		.timestamp                    = WL12XX_FWLOG_TIMESTAMP_DISABLED,
+		.output                       = WL12XX_FWLOG_OUTPUT_HOST,
+		.threshold                    = 0,
+	},
+	.rate = {
+		.rate_retry_score = 32000,
+		.per_add = 8192,
+		.per_th1 = 2048,
+		.per_th2 = 4096,
+		.max_per = 8100,
+		.inverse_curiosity_factor = 5,
+		.tx_fail_low_th = 4,
+		.tx_fail_high_th = 10,
+		.per_alpha_shift = 4,
+		.per_add_shift = 13,
+		.per_beta1_shift = 10,
+		.per_beta2_shift = 8,
+		.rate_check_up = 2,
+		.rate_check_down = 12,
+		.rate_retry_policy = {
+			0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00,
+		},
+	},
+	.hangover = {
+		.recover_time               = 0,
+		.hangover_period            = 20,
+		.dynamic_mode               = 1,
+		.early_termination_mode     = 1,
+		.max_period                 = 20,
+		.min_period                 = 1,
+		.increase_delta             = 1,
+		.decrease_delta             = 2,
+		.quiet_time                 = 4,
+		.increase_time              = 1,
+		.window_size                = 16,
+	},
+};
+
+static struct wl12xx_priv_conf wl12xx_default_priv_conf = {
+	.rf = {
+		.tx_per_channel_power_compensation_2 = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		},
+		.tx_per_channel_power_compensation_5 = {
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		},
+	},
+	.mem_wl127x = {
+		.num_stations                 = 1,
+		.ssid_profiles                = 1,
+		.rx_block_num                 = 70,
+		.tx_min_block_num             = 40,
+		.dynamic_memory               = 1,
+		.min_req_tx_blocks            = 100,
+		.min_req_rx_blocks            = 22,
+		.tx_min                       = 27,
+	},
+
+};
+
+#define WL12XX_TX_HW_BLOCK_SPARE_DEFAULT        1
+#define WL12XX_TX_HW_BLOCK_GEM_SPARE            2
+#define WL12XX_TX_HW_BLOCK_SIZE                 252
+
+static const u8 wl12xx_rate_to_idx_2ghz[] = {
+	/* MCS rates are used only with 11n */
+	7,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */
+	7,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */
+	6,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */
+	5,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */
+	4,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */
+	3,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */
+	2,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */
+	1,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */
+	0,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */
+
+	11,                            /* WL12XX_CONF_HW_RXTX_RATE_54   */
+	10,                            /* WL12XX_CONF_HW_RXTX_RATE_48   */
+	9,                             /* WL12XX_CONF_HW_RXTX_RATE_36   */
+	8,                             /* WL12XX_CONF_HW_RXTX_RATE_24   */
+
+	/* TI-specific rate */
+	CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22   */
+
+	7,                             /* WL12XX_CONF_HW_RXTX_RATE_18   */
+	6,                             /* WL12XX_CONF_HW_RXTX_RATE_12   */
+	3,                             /* WL12XX_CONF_HW_RXTX_RATE_11   */
+	5,                             /* WL12XX_CONF_HW_RXTX_RATE_9    */
+	4,                             /* WL12XX_CONF_HW_RXTX_RATE_6    */
+	2,                             /* WL12XX_CONF_HW_RXTX_RATE_5_5  */
+	1,                             /* WL12XX_CONF_HW_RXTX_RATE_2    */
+	0                              /* WL12XX_CONF_HW_RXTX_RATE_1    */
+};
+
+static const u8 wl12xx_rate_to_idx_5ghz[] = {
+	/* MCS rates are used only with 11n */
+	7,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */
+	7,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */
+	6,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */
+	5,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */
+	4,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */
+	3,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */
+	2,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */
+	1,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */
+	0,                             /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */
+
+	7,                             /* WL12XX_CONF_HW_RXTX_RATE_54   */
+	6,                             /* WL12XX_CONF_HW_RXTX_RATE_48   */
+	5,                             /* WL12XX_CONF_HW_RXTX_RATE_36   */
+	4,                             /* WL12XX_CONF_HW_RXTX_RATE_24   */
+
+	/* TI-specific rate */
+	CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22   */
+
+	3,                             /* WL12XX_CONF_HW_RXTX_RATE_18   */
+	2,                             /* WL12XX_CONF_HW_RXTX_RATE_12   */
+	CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_11   */
+	1,                             /* WL12XX_CONF_HW_RXTX_RATE_9    */
+	0,                             /* WL12XX_CONF_HW_RXTX_RATE_6    */
+	CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_5_5  */
+	CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_2    */
+	CONF_HW_RXTX_RATE_UNSUPPORTED  /* WL12XX_CONF_HW_RXTX_RATE_1    */
+};
+
+static const u8 *wl12xx_band_rate_to_idx[] = {
+	[IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
+	[IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
+};
+
+enum wl12xx_hw_rates {
+	WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI = 0,
+	WL12XX_CONF_HW_RXTX_RATE_MCS7,
+	WL12XX_CONF_HW_RXTX_RATE_MCS6,
+	WL12XX_CONF_HW_RXTX_RATE_MCS5,
+	WL12XX_CONF_HW_RXTX_RATE_MCS4,
+	WL12XX_CONF_HW_RXTX_RATE_MCS3,
+	WL12XX_CONF_HW_RXTX_RATE_MCS2,
+	WL12XX_CONF_HW_RXTX_RATE_MCS1,
+	WL12XX_CONF_HW_RXTX_RATE_MCS0,
+	WL12XX_CONF_HW_RXTX_RATE_54,
+	WL12XX_CONF_HW_RXTX_RATE_48,
+	WL12XX_CONF_HW_RXTX_RATE_36,
+	WL12XX_CONF_HW_RXTX_RATE_24,
+	WL12XX_CONF_HW_RXTX_RATE_22,
+	WL12XX_CONF_HW_RXTX_RATE_18,
+	WL12XX_CONF_HW_RXTX_RATE_12,
+	WL12XX_CONF_HW_RXTX_RATE_11,
+	WL12XX_CONF_HW_RXTX_RATE_9,
+	WL12XX_CONF_HW_RXTX_RATE_6,
+	WL12XX_CONF_HW_RXTX_RATE_5_5,
+	WL12XX_CONF_HW_RXTX_RATE_2,
+	WL12XX_CONF_HW_RXTX_RATE_1,
+	WL12XX_CONF_HW_RXTX_RATE_MAX,
+};
+
+static struct wlcore_partition_set wl12xx_ptable[PART_TABLE_LEN] = {
+	[PART_DOWN] = {
+		.mem = {
+			.start = 0x00000000,
+			.size  = 0x000177c0
+		},
+		.reg = {
+			.start = REGISTERS_BASE,
+			.size  = 0x00008800
+		},
+		.mem2 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+		.mem3 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+	},
+
+	[PART_BOOT] = { /* in wl12xx we can use a mix of work and down
+			 * partition here */
+		.mem = {
+			.start = 0x00040000,
+			.size  = 0x00014fc0
+		},
+		.reg = {
+			.start = REGISTERS_BASE,
+			.size  = 0x00008800
+		},
+		.mem2 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+		.mem3 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+	},
+
+	[PART_WORK] = {
+		.mem = {
+			.start = 0x00040000,
+			.size  = 0x00014fc0
+		},
+		.reg = {
+			.start = REGISTERS_BASE,
+			.size  = 0x0000a000
+		},
+		.mem2 = {
+			.start = 0x003004f8,
+			.size  = 0x00000004
+		},
+		.mem3 = {
+			.start = 0x00040404,
+			.size  = 0x00000000
+		},
+	},
+
+	[PART_DRPW] = {
+		.mem = {
+			.start = 0x00040000,
+			.size  = 0x00014fc0
+		},
+		.reg = {
+			.start = DRPW_BASE,
+			.size  = 0x00006000
+		},
+		.mem2 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		},
+		.mem3 = {
+			.start = 0x00000000,
+			.size  = 0x00000000
+		}
+	}
+};
+
+static const int wl12xx_rtable[REG_TABLE_LEN] = {
+	[REG_ECPU_CONTROL]		= WL12XX_REG_ECPU_CONTROL,
+	[REG_INTERRUPT_NO_CLEAR]	= WL12XX_REG_INTERRUPT_NO_CLEAR,
+	[REG_INTERRUPT_ACK]		= WL12XX_REG_INTERRUPT_ACK,
+	[REG_COMMAND_MAILBOX_PTR]	= WL12XX_REG_COMMAND_MAILBOX_PTR,
+	[REG_EVENT_MAILBOX_PTR]		= WL12XX_REG_EVENT_MAILBOX_PTR,
+	[REG_INTERRUPT_TRIG]		= WL12XX_REG_INTERRUPT_TRIG,
+	[REG_INTERRUPT_MASK]		= WL12XX_REG_INTERRUPT_MASK,
+	[REG_PC_ON_RECOVERY]		= WL12XX_SCR_PAD4,
+	[REG_CHIP_ID_B]			= WL12XX_CHIP_ID_B,
+	[REG_CMD_MBOX_ADDRESS]		= WL12XX_CMD_MBOX_ADDRESS,
+
+	/* data access memory addresses, used with partition translation */
+	[REG_SLV_MEM_DATA]		= WL1271_SLV_MEM_DATA,
+	[REG_SLV_REG_DATA]		= WL1271_SLV_REG_DATA,
+
+	/* raw data access memory addresses */
+	[REG_RAW_FW_STATUS_ADDR]	= FW_STATUS_ADDR,
+};
+
+/* TODO: maybe move to a new header file? */
+#define WL127X_FW_NAME_MULTI	"ti-connectivity/wl127x-fw-4-mr.bin"
+#define WL127X_FW_NAME_SINGLE	"ti-connectivity/wl127x-fw-4-sr.bin"
+#define WL127X_PLT_FW_NAME	"ti-connectivity/wl127x-fw-4-plt.bin"
+
+#define WL128X_FW_NAME_MULTI	"ti-connectivity/wl128x-fw-4-mr.bin"
+#define WL128X_FW_NAME_SINGLE	"ti-connectivity/wl128x-fw-4-sr.bin"
+#define WL128X_PLT_FW_NAME	"ti-connectivity/wl128x-fw-4-plt.bin"
+
+static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
+{
+	if (wl->chip.id != CHIP_ID_1283_PG20) {
+		struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
+		struct wl1271_rx_mem_pool_addr rx_mem_addr;
+
+		/*
+		 * Choose the block we want to read
+		 * For aggregated packets, only the first memory block
+		 * should be retrieved. The FW takes care of the rest.
+		 */
+		u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK;
+
+		rx_mem_addr.addr = (mem_block << 8) +
+			le32_to_cpu(wl_mem_map->packet_memory_pool_start);
+
+		rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
+
+		wl1271_write(wl, WL1271_SLV_REG_DATA,
+			     &rx_mem_addr, sizeof(rx_mem_addr), false);
+	}
+}
+
+static int wl12xx_identify_chip(struct wl1271 *wl)
+{
+	int ret = 0;
+
+	switch (wl->chip.id) {
+	case CHIP_ID_1271_PG10:
+		wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
+			       wl->chip.id);
+
+		/* clear the alignment quirk, since we don't support it */
+		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
+
+		wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
+		wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
+		wl->mr_fw_name = WL127X_FW_NAME_MULTI;
+		memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
+		       sizeof(wl->conf.mem));
+
+		/* read data preparation is only needed by wl127x */
+		wl->ops->prepare_read = wl127x_prepare_read;
+
+		break;
+
+	case CHIP_ID_1271_PG20:
+		wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
+			     wl->chip.id);
+
+		/* clear the alignment quirk, since we don't support it */
+		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
+
+		wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
+		wl->plt_fw_name = WL127X_PLT_FW_NAME;
+		wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
+		wl->mr_fw_name = WL127X_FW_NAME_MULTI;
+		memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
+		       sizeof(wl->conf.mem));
+
+		/* read data preparation is only needed by wl127x */
+		wl->ops->prepare_read = wl127x_prepare_read;
+
+		break;
+
+	case CHIP_ID_1283_PG20:
+		wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
+			     wl->chip.id);
+		wl->plt_fw_name = WL128X_PLT_FW_NAME;
+		wl->sr_fw_name = WL128X_FW_NAME_SINGLE;
+		wl->mr_fw_name = WL128X_FW_NAME_MULTI;
+		break;
+	case CHIP_ID_1283_PG10:
+	default:
+		wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
+		ret = -ENODEV;
+		goto out;
+	}
+
+out:
+	return ret;
+}
+
+static void wl12xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
+{
+	/* write address >> 1 + 0x30000 to OCP_POR_CTR */
+	addr = (addr >> 1) + 0x30000;
+	wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
+
+	/* write value to OCP_POR_WDATA */
+	wl1271_write32(wl, WL12XX_OCP_DATA_WRITE, val);
+
+	/* write 1 to OCP_CMD */
+	wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
+}
+
+static u16 wl12xx_top_reg_read(struct wl1271 *wl, int addr)
+{
+	u32 val;
+	int timeout = OCP_CMD_LOOP;
+
+	/* write address >> 1 + 0x30000 to OCP_POR_CTR */
+	addr = (addr >> 1) + 0x30000;
+	wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
+
+	/* write 2 to OCP_CMD */
+	wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
+
+	/* poll for data ready */
+	do {
+		val = wl1271_read32(wl, WL12XX_OCP_DATA_READ);
+	} while (!(val & OCP_READY_MASK) && --timeout);
+
+	if (!timeout) {
+		wl1271_warning("Top register access timed out.");
+		return 0xffff;
+	}
+
+	/* check data status and return if OK */
+	if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
+		return val & 0xffff;
+	else {
+		wl1271_warning("Top register access returned error.");
+		return 0xffff;
+	}
+}
+
+static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
+{
+	u16 spare_reg;
+
+	/* Mask bits [2] & [8:4] in the sys_clk_cfg register */
+	spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
+	if (spare_reg == 0xFFFF)
+		return -EFAULT;
+	spare_reg |= (BIT(3) | BIT(5) | BIT(6));
+	wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+
+	/* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
+	wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
+			     WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
+
+	/* Delay execution for 15msec, to let the HW settle */
+	mdelay(15);
+
+	return 0;
+}
+
+static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
+{
+	u16 tcxo_detection;
+
+	tcxo_detection = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG);
+	if (tcxo_detection & TCXO_DET_FAILED)
+		return false;
+
+	return true;
+}
+
+static bool wl128x_is_fref_valid(struct wl1271 *wl)
+{
+	u16 fref_detection;
+
+	fref_detection = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG);
+	if (fref_detection & FREF_CLK_DETECT_FAIL)
+		return false;
+
+	return true;
+}
+
+static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
+{
+	wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
+	wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
+	wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
+
+	return 0;
+}
+
+static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
+{
+	u16 spare_reg;
+	u16 pll_config;
+	u8 input_freq;
+
+	/* Mask bits [3:1] in the sys_clk_cfg register */
+	spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
+	if (spare_reg == 0xFFFF)
+		return -EFAULT;
+	spare_reg |= BIT(2);
+	wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+
+	/* Handle special cases of the TCXO clock */
+	if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
+	    wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
+		return wl128x_manually_configure_mcs_pll(wl);
+
+	/* Set the input frequency according to the selected clock source */
+	input_freq = (clk & 1) + 1;
+
+	pll_config = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG);
+	if (pll_config == 0xFFFF)
+		return -EFAULT;
+	pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
+	pll_config |= MCS_PLL_ENABLE_HP;
+	wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
+
+	return 0;
+}
+
+/*
+ * WL128x has two clocks input - TCXO and FREF.
+ * TCXO is the main clock of the device, while FREF is used to sync
+ * between the GPS and the cellular modem.
+ * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
+ * as the WLAN/BT main clock.
+ */
+static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
+{
+	u16 sys_clk_cfg;
+
+	/* For XTAL-only modes, FREF will be used after switching from TCXO */
+	if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
+	    wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
+		if (!wl128x_switch_tcxo_to_fref(wl))
+			return -EINVAL;
+		goto fref_clk;
+	}
+
+	/* Query the HW, to determine which clock source we should use */
+	sys_clk_cfg = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG);
+	if (sys_clk_cfg == 0xFFFF)
+		return -EINVAL;
+	if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
+		goto fref_clk;
+
+	/* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
+	if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
+	    wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
+		if (!wl128x_switch_tcxo_to_fref(wl))
+			return -EINVAL;
+		goto fref_clk;
+	}
+
+	/* TCXO clock is selected */
+	if (!wl128x_is_tcxo_valid(wl))
+		return -EINVAL;
+	*selected_clock = wl->tcxo_clock;
+	goto config_mcs_pll;
+
+fref_clk:
+	/* FREF clock is selected */
+	if (!wl128x_is_fref_valid(wl))
+		return -EINVAL;
+	*selected_clock = wl->ref_clock;
+
+config_mcs_pll:
+	return wl128x_configure_mcs_pll(wl, *selected_clock);
+}
+
+static int wl127x_boot_clk(struct wl1271 *wl)
+{
+	u32 pause;
+	u32 clk;
+
+	if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
+		wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION;
+
+	if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
+	    wl->ref_clock == CONF_REF_CLK_38_4_E ||
+	    wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
+		/* ref clk: 19.2/38.4/38.4-XTAL */
+		clk = 0x3;
+	else if (wl->ref_clock == CONF_REF_CLK_26_E ||
+		 wl->ref_clock == CONF_REF_CLK_52_E)
+		/* ref clk: 26/52 */
+		clk = 0x5;
+	else
+		return -EINVAL;
+
+	if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
+		u16 val;
+		/* Set clock type (open drain) */
+		val = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE);
+		val &= FREF_CLK_TYPE_BITS;
+		wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+
+		/* Set clock pull mode (no pull) */
+		val = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL);
+		val |= NO_PULL;
+		wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
+	} else {
+		u16 val;
+		/* Set clock polarity */
+		val = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY);
+		val &= FREF_CLK_POLARITY_BITS;
+		val |= CLK_REQ_OUTN_SEL;
+		wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
+	}
+
+	wl1271_write32(wl, WL12XX_PLL_PARAMETERS, clk);
+
+	pause = wl1271_read32(wl, WL12XX_PLL_PARAMETERS);
+
+	wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
+
+	pause &= ~(WU_COUNTER_PAUSE_VAL);
+	pause |= WU_COUNTER_PAUSE_VAL;
+	wl1271_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
+
+	return 0;
+}
+
+static int wl1271_boot_soft_reset(struct wl1271 *wl)
+{
+	unsigned long timeout;
+	u32 boot_data;
+
+	/* perform soft reset */
+	wl1271_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
+
+	/* SOFT_RESET is self clearing */
+	timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
+	while (1) {
+		boot_data = wl1271_read32(wl, WL12XX_SLV_SOFT_RESET);
+		wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
+		if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
+			break;
+
+		if (time_after(jiffies, timeout)) {
+			/* 1.2 check pWhalBus->uSelfClearTime if the
+			 * timeout was reached */
+			wl1271_error("soft reset timeout");
+			return -1;
+		}
+
+		udelay(SOFT_RESET_STALL_TIME);
+	}
+
+	/* disable Rx/Tx */
+	wl1271_write32(wl, WL12XX_ENABLE, 0x0);
+
+	/* disable auto calibration on start*/
+	wl1271_write32(wl, WL12XX_SPARE_A2, 0xffff);
+
+	return 0;
+}
+
+static int wl12xx_pre_boot(struct wl1271 *wl)
+{
+	int ret = 0;
+	u32 clk;
+	int selected_clock = -1;
+
+	if (wl->chip.id == CHIP_ID_1283_PG20) {
+		ret = wl128x_boot_clk(wl, &selected_clock);
+		if (ret < 0)
+			goto out;
+	} else {
+		ret = wl127x_boot_clk(wl);
+		if (ret < 0)
+			goto out;
+	}
+
+	/* Continue the ELP wake up sequence */
+	wl1271_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+	udelay(500);
+
+	wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+
+	/* Read-modify-write DRPW_SCRATCH_START register (see next state)
+	   to be used by DRPw FW. The RTRIM value will be added by the FW
+	   before taking DRPw out of reset */
+
+	clk = wl1271_read32(wl, WL12XX_DRPW_SCRATCH_START);
+
+	wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
+
+	if (wl->chip.id == CHIP_ID_1283_PG20)
+		clk |= ((selected_clock & 0x3) << 1) << 4;
+	else
+		clk |= (wl->ref_clock << 1) << 4;
+
+	wl1271_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
+
+	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+
+	/* Disable interrupts */
+	wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+
+	ret = wl1271_boot_soft_reset(wl);
+	if (ret < 0)
+		goto out;
+
+out:
+	return ret;
+}
+
+static void wl12xx_pre_upload(struct wl1271 *wl)
+{
+	u32 tmp;
+
+	/* write firmware's last address (ie. it's length) to
+	 * ACX_EEPROMLESS_IND_REG */
+	wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
+
+	wl1271_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
+
+	tmp = wlcore_read_reg(wl, REG_CHIP_ID_B);
+
+	wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
+
+	/* 6. read the EEPROM parameters */
+	tmp = wl1271_read32(wl, WL12XX_SCR_PAD2);
+
+	/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
+	 * to upload_fw) */
+
+	if (wl->chip.id == CHIP_ID_1283_PG20)
+		wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
+}
+
+static void wl12xx_enable_interrupts(struct wl1271 *wl)
+{
+	u32 polarity;
+
+	polarity = wl12xx_top_reg_read(wl, OCP_REG_POLARITY);
+
+	/* We use HIGH polarity, so unset the LOW bit */
+	polarity &= ~POLARITY_LOW;
+	wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
+
+	wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_ALL_EVENTS_VECTOR);
+
+	wlcore_enable_interrupts(wl);
+	wlcore_write_reg(wl, REG_INTERRUPT_MASK,
+			 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+
+	wl1271_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
+}
+
+static int wl12xx_boot(struct wl1271 *wl)
+{
+	int ret;
+
+	ret = wl12xx_pre_boot(wl);
+	if (ret < 0)
+		goto out;
+
+	ret = wlcore_boot_upload_nvs(wl);
+	if (ret < 0)
+		goto out;
+
+	wl12xx_pre_upload(wl);
+
+	ret = wlcore_boot_upload_firmware(wl);
+	if (ret < 0)
+		goto out;
+
+	ret = wlcore_boot_run_firmware(wl);
+	if (ret < 0)
+		goto out;
+
+	wl12xx_enable_interrupts(wl);
+
+out:
+	return ret;
+}
+
+static void wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
+			       void *buf, size_t len)
+{
+	wl1271_write(wl, cmd_box_addr, buf, len, false);
+	wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
+}
+
+static void wl12xx_ack_event(struct wl1271 *wl)
+{
+	wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_EVENT_ACK);
+}
+
+static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
+{
+	u32 blk_size = WL12XX_TX_HW_BLOCK_SIZE;
+	u32 align_len = wlcore_calc_packet_alignment(wl, len);
+
+	return (align_len + blk_size - 1) / blk_size + spare_blks;
+}
+
+static void
+wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
+			  u32 blks, u32 spare_blks)
+{
+	if (wl->chip.id == CHIP_ID_1283_PG20) {
+		desc->wl128x_mem.total_mem_blocks = blks;
+	} else {
+		desc->wl127x_mem.extra_blocks = spare_blks;
+		desc->wl127x_mem.total_mem_blocks = blks;
+	}
+}
+
+static void
+wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
+			    struct sk_buff *skb)
+{
+	u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len);
+
+	if (wl->chip.id == CHIP_ID_1283_PG20) {
+		desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
+		desc->length = cpu_to_le16(aligned_len >> 2);
+
+		wl1271_debug(DEBUG_TX,
+			     "tx_fill_hdr: hlid: %d len: %d life: %d mem: %d extra: %d",
+			     desc->hlid,
+			     le16_to_cpu(desc->length),
+			     le16_to_cpu(desc->life_time),
+			     desc->wl128x_mem.total_mem_blocks,
+			     desc->wl128x_mem.extra_bytes);
+	} else {
+		/* calculate number of padding bytes */
+		int pad = aligned_len - skb->len;
+		desc->tx_attr |=
+			cpu_to_le16(pad << TX_HW_ATTR_OFST_LAST_WORD_PAD);
+
+		/* Store the aligned length in terms of words */
+		desc->length = cpu_to_le16(aligned_len >> 2);
+
+		wl1271_debug(DEBUG_TX,
+			     "tx_fill_hdr: pad: %d hlid: %d len: %d life: %d mem: %d",
+			     pad, desc->hlid,
+			     le16_to_cpu(desc->length),
+			     le16_to_cpu(desc->life_time),
+			     desc->wl127x_mem.total_mem_blocks);
+	}
+}
+
+static enum wl_rx_buf_align
+wl12xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
+{
+	if (rx_desc & RX_BUF_UNALIGNED_PAYLOAD)
+		return WLCORE_RX_BUF_UNALIGNED;
+
+	return WLCORE_RX_BUF_ALIGNED;
+}
+
+static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
+				    u32 data_len)
+{
+	struct wl1271_rx_descriptor *desc = rx_data;
+
+	/* invalid packet */
+	if (data_len < sizeof(*desc) ||
+	    data_len < sizeof(*desc) + desc->pad_len)
+		return 0;
+
+	return data_len - sizeof(*desc) - desc->pad_len;
+}
+
+static void wl12xx_tx_delayed_compl(struct wl1271 *wl)
+{
+	if (wl->fw_status->tx_results_counter == (wl->tx_results_count & 0xff))
+		return;
+
+	wl1271_tx_complete(wl);
+}
+
+static int wl12xx_hw_init(struct wl1271 *wl)
+{
+	int ret;
+
+	if (wl->chip.id == CHIP_ID_1283_PG20) {
+		u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
+
+		ret = wl128x_cmd_general_parms(wl);
+		if (ret < 0)
+			goto out;
+		ret = wl128x_cmd_radio_parms(wl);
+		if (ret < 0)
+			goto out;
+
+		if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
+			/* Enable SDIO padding */
+			host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
+
+		/* Must be before wl1271_acx_init_mem_config() */
+		ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
+		if (ret < 0)
+			goto out;
+	} else {
+		ret = wl1271_cmd_general_parms(wl);
+		if (ret < 0)
+			goto out;
+		ret = wl1271_cmd_radio_parms(wl);
+		if (ret < 0)
+			goto out;
+		ret = wl1271_cmd_ext_radio_parms(wl);
+		if (ret < 0)
+			goto out;
+	}
+out:
+	return ret;
+}
+
+static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
+				       struct wl12xx_vif *wlvif)
+{
+	return wlvif->rate_set;
+}
+
+static int wl12xx_identify_fw(struct wl1271 *wl)
+{
+	unsigned int *fw_ver = wl->chip.fw_ver;
+
+	/* Only new station firmwares support routing fw logs to the host */
+	if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
+	    (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
+		wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
+
+	/* This feature is not yet supported for AP mode */
+	if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
+		wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
+
+	return 0;
+}
+
+static void wl12xx_conf_init(struct wl1271 *wl)
+{
+	struct wl12xx_priv *priv = wl->priv;
+
+	/* apply driver default configuration */
+	memcpy(&wl->conf, &wl12xx_conf, sizeof(wl12xx_conf));
+
+	/* apply default private configuration */
+	memcpy(&priv->conf, &wl12xx_default_priv_conf, sizeof(priv->conf));
+}
+
+static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
+{
+	bool supported = false;
+	u8 major, minor;
+
+	if (wl->chip.id == CHIP_ID_1283_PG20) {
+		major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
+		minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
+
+		/* in wl128x we have the MAC address if the PG is >= (2, 1) */
+		if (major > 2 || (major == 2 && minor >= 1))
+			supported = true;
+	} else {
+		major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
+		minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
+
+		/* in wl127x we have the MAC address if the PG is >= (3, 1) */
+		if (major == 3 && minor >= 1)
+			supported = true;
+	}
+
+	wl1271_debug(DEBUG_PROBE,
+		     "PG Ver major = %d minor = %d, MAC %s present",
+		     major, minor, supported ? "is" : "is not");
+
+	return supported;
+}
+
+static void wl12xx_get_fuse_mac(struct wl1271 *wl)
+{
+	u32 mac1, mac2;
+
+	wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+
+	mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
+	mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
+
+	/* these are the two parts of the BD_ADDR */
+	wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
+		((mac1 & 0xff000000) >> 24);
+	wl->fuse_nic_addr = mac1 & 0xffffff;
+
+	wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
+}
+
+static s8 wl12xx_get_pg_ver(struct wl1271 *wl)
+{
+	u32 die_info;
+
+	if (wl->chip.id == CHIP_ID_1283_PG20)
+		die_info = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
+	else
+		die_info = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
+
+	return (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET;
+}
+
+static void wl12xx_get_mac(struct wl1271 *wl)
+{
+	if (wl12xx_mac_in_fuse(wl))
+		wl12xx_get_fuse_mac(wl);
+}
+
+static struct wlcore_ops wl12xx_ops = {
+	.identify_chip		= wl12xx_identify_chip,
+	.identify_fw		= wl12xx_identify_fw,
+	.boot			= wl12xx_boot,
+	.trigger_cmd		= wl12xx_trigger_cmd,
+	.ack_event		= wl12xx_ack_event,
+	.calc_tx_blocks		= wl12xx_calc_tx_blocks,
+	.set_tx_desc_blocks	= wl12xx_set_tx_desc_blocks,
+	.set_tx_desc_data_len	= wl12xx_set_tx_desc_data_len,
+	.get_rx_buf_align	= wl12xx_get_rx_buf_align,
+	.get_rx_packet_len	= wl12xx_get_rx_packet_len,
+	.tx_immediate_compl	= NULL,
+	.tx_delayed_compl	= wl12xx_tx_delayed_compl,
+	.hw_init		= wl12xx_hw_init,
+	.init_vif		= NULL,
+	.sta_get_ap_rate_mask	= wl12xx_sta_get_ap_rate_mask,
+	.get_pg_ver		= wl12xx_get_pg_ver,
+	.get_mac		= wl12xx_get_mac,
+};
+
+static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
+	.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
+	       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
+	.ht_supported = true,
+	.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
+	.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8,
+	.mcs = {
+		.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+		.rx_highest = cpu_to_le16(72),
+		.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+		},
+};
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+	struct wl1271 *wl;
+	struct ieee80211_hw *hw;
+	struct wl12xx_priv *priv;
+
+	hw = wlcore_alloc_hw(sizeof(*priv));
+	if (IS_ERR(hw)) {
+		wl1271_error("can't allocate hw");
+		return PTR_ERR(hw);
+	}
+
+	wl = hw->priv;
+	wl->ops = &wl12xx_ops;
+	wl->ptable = wl12xx_ptable;
+	wl->rtable = wl12xx_rtable;
+	wl->num_tx_desc = 16;
+	wl->normal_tx_spare = WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
+	wl->gem_tx_spare = WL12XX_TX_HW_BLOCK_GEM_SPARE;
+	wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
+	wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
+	wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
+	wl->fw_status_priv_len = 0;
+	memcpy(&wl->ht_cap, &wl12xx_ht_cap, sizeof(wl12xx_ht_cap));
+	wl12xx_conf_init(wl);
+
+	return wlcore_probe(wl, pdev);
+}
+
+static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
+	{ "wl12xx", 0 },
+	{  } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
+
+static struct platform_driver wl12xx_driver = {
+	.probe		= wl12xx_probe,
+	.remove		= __devexit_p(wlcore_remove),
+	.id_table	= wl12xx_id_table,
+	.driver = {
+		.name	= "wl12xx_driver",
+		.owner	= THIS_MODULE,
+	}
+};
+
+static int __init wl12xx_init(void)
+{
+	return platform_driver_register(&wl12xx_driver);
+}
+module_init(wl12xx_init);
+
+static void __exit wl12xx_exit(void)
+{
+	platform_driver_unregister(&wl12xx_driver);
+}
+module_exit(wl12xx_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
+MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
+MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
+MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
+MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
+MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
+MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/ti/wl12xx/reg.h
similarity index 71%
rename from drivers/net/wireless/wl12xx/reg.h
rename to drivers/net/wireless/ti/wl12xx/reg.h
index 340db32..79ede02 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/ti/wl12xx/reg.h
@@ -33,16 +33,8 @@
 #define REGISTERS_DOWN_SIZE 0x00008800
 #define REGISTERS_WORK_SIZE 0x0000b000
 
-#define HW_ACCESS_ELP_CTRL_REG_ADDR         0x1FFFC
 #define FW_STATUS_ADDR                      (0x14FC0 + 0xA000)
 
-/* ELP register commands */
-#define ELPCTRL_WAKE_UP             0x1
-#define ELPCTRL_WAKE_UP_WLAN_READY  0x5
-#define ELPCTRL_SLEEP               0x0
-/* ELP WLAN_READY bit */
-#define ELPCTRL_WLAN_READY          0x2
-
 /*===============================================
    Host Software Reset - 32bit RW
  ------------------------------------------
@@ -57,14 +49,14 @@
     (not self-clearing), the Wlan hardware
     exits the software reset state.
 ===============================================*/
-#define ACX_REG_SLV_SOFT_RESET         (REGISTERS_BASE + 0x0000)
+#define WL12XX_SLV_SOFT_RESET		(REGISTERS_BASE + 0x0000)
 
 #define WL1271_SLV_REG_DATA            (REGISTERS_BASE + 0x0008)
 #define WL1271_SLV_REG_ADATA           (REGISTERS_BASE + 0x000c)
 #define WL1271_SLV_MEM_DATA            (REGISTERS_BASE + 0x0018)
 
-#define ACX_REG_INTERRUPT_TRIG         (REGISTERS_BASE + 0x0474)
-#define ACX_REG_INTERRUPT_TRIG_H       (REGISTERS_BASE + 0x0478)
+#define WL12XX_REG_INTERRUPT_TRIG         (REGISTERS_BASE + 0x0474)
+#define WL12XX_REG_INTERRUPT_TRIG_H       (REGISTERS_BASE + 0x0478)
 
 /*=============================================
   Host Interrupt Mask Register - 32bit (RW)
@@ -94,7 +86,7 @@
  21-			-
  Default: 0x0001
 *==============================================*/
-#define ACX_REG_INTERRUPT_MASK         (REGISTERS_BASE + 0x04DC)
+#define WL12XX_REG_INTERRUPT_MASK         (REGISTERS_BASE + 0x04DC)
 
 /*=============================================
   Host Interrupt Mask Set 16bit, (Write only)
@@ -125,7 +117,7 @@
  Reading this register doesn't
  effect its content.
 =============================================*/
-#define ACX_REG_INTERRUPT_NO_CLEAR     (REGISTERS_BASE + 0x04E8)
+#define WL12XX_REG_INTERRUPT_NO_CLEAR     (REGISTERS_BASE + 0x04E8)
 
 /*=============================================
   Host Interrupt Status Clear on Read  Register
@@ -148,9 +140,9 @@
  HINT_STS_ND registers, thus making the
  assotiated interrupt inactive. (0-no effect)
 ==============================================*/
-#define ACX_REG_INTERRUPT_ACK          (REGISTERS_BASE + 0x04F0)
+#define WL12XX_REG_INTERRUPT_ACK          (REGISTERS_BASE + 0x04F0)
 
-#define RX_DRIVER_COUNTER_ADDRESS      (REGISTERS_BASE + 0x0538)
+#define WL12XX_REG_RX_DRIVER_COUNTER	(REGISTERS_BASE + 0x0538)
 
 /* Device Configuration registers*/
 #define SOR_CFG                        (REGISTERS_BASE + 0x0800)
@@ -175,9 +167,9 @@
  1 halt eCPU
  0 enable eCPU
  ===============================================*/
-#define ACX_REG_ECPU_CONTROL           (REGISTERS_BASE + 0x0804)
+#define WL12XX_REG_ECPU_CONTROL           (REGISTERS_BASE + 0x0804)
 
-#define HI_CFG                         (REGISTERS_BASE + 0x0808)
+#define WL12XX_HI_CFG			(REGISTERS_BASE + 0x0808)
 
 /*===============================================
  EEPROM Burst Read Start  - 32bit RW
@@ -196,72 +188,67 @@
 *================================================*/
 #define ACX_REG_EE_START               (REGISTERS_BASE + 0x080C)
 
-#define OCP_POR_CTR                    (REGISTERS_BASE + 0x09B4)
-#define OCP_DATA_WRITE                 (REGISTERS_BASE + 0x09B8)
-#define OCP_DATA_READ                  (REGISTERS_BASE + 0x09BC)
-#define OCP_CMD                        (REGISTERS_BASE + 0x09C0)
+#define WL12XX_OCP_POR_CTR		(REGISTERS_BASE + 0x09B4)
+#define WL12XX_OCP_DATA_WRITE		(REGISTERS_BASE + 0x09B8)
+#define WL12XX_OCP_DATA_READ		(REGISTERS_BASE + 0x09BC)
+#define WL12XX_OCP_CMD			(REGISTERS_BASE + 0x09C0)
 
-#define WL1271_HOST_WR_ACCESS          (REGISTERS_BASE + 0x09F8)
+#define WL12XX_HOST_WR_ACCESS		(REGISTERS_BASE + 0x09F8)
 
-#define CHIP_ID_B                      (REGISTERS_BASE + 0x5674)
+#define WL12XX_CHIP_ID_B		(REGISTERS_BASE + 0x5674)
 
-#define CHIP_ID_1271_PG10              (0x4030101)
-#define CHIP_ID_1271_PG20              (0x4030111)
-#define CHIP_ID_1283_PG10              (0x05030101)
-#define CHIP_ID_1283_PG20              (0x05030111)
-
-#define ENABLE                         (REGISTERS_BASE + 0x5450)
+#define WL12XX_ENABLE			(REGISTERS_BASE + 0x5450)
 
 /* Power Management registers */
-#define ELP_CFG_MODE                   (REGISTERS_BASE + 0x5804)
-#define ELP_CMD                        (REGISTERS_BASE + 0x5808)
-#define PLL_CAL_TIME                   (REGISTERS_BASE + 0x5810)
-#define CLK_REQ_TIME                   (REGISTERS_BASE + 0x5814)
-#define CLK_BUF_TIME                   (REGISTERS_BASE + 0x5818)
+#define WL12XX_ELP_CFG_MODE		(REGISTERS_BASE + 0x5804)
+#define WL12XX_ELP_CMD			(REGISTERS_BASE + 0x5808)
+#define WL12XX_PLL_CAL_TIME		(REGISTERS_BASE + 0x5810)
+#define WL12XX_CLK_REQ_TIME		(REGISTERS_BASE + 0x5814)
+#define WL12XX_CLK_BUF_TIME		(REGISTERS_BASE + 0x5818)
 
-#define CFG_PLL_SYNC_CNT               (REGISTERS_BASE + 0x5820)
+#define WL12XX_CFG_PLL_SYNC_CNT		(REGISTERS_BASE + 0x5820)
 
 /* Scratch Pad registers*/
-#define SCR_PAD0                       (REGISTERS_BASE + 0x5608)
-#define SCR_PAD1                       (REGISTERS_BASE + 0x560C)
-#define SCR_PAD2                       (REGISTERS_BASE + 0x5610)
-#define SCR_PAD3                       (REGISTERS_BASE + 0x5614)
-#define SCR_PAD4                       (REGISTERS_BASE + 0x5618)
-#define SCR_PAD4_SET                   (REGISTERS_BASE + 0x561C)
-#define SCR_PAD4_CLR                   (REGISTERS_BASE + 0x5620)
-#define SCR_PAD5                       (REGISTERS_BASE + 0x5624)
-#define SCR_PAD5_SET                   (REGISTERS_BASE + 0x5628)
-#define SCR_PAD5_CLR                   (REGISTERS_BASE + 0x562C)
-#define SCR_PAD6                       (REGISTERS_BASE + 0x5630)
-#define SCR_PAD7                       (REGISTERS_BASE + 0x5634)
-#define SCR_PAD8                       (REGISTERS_BASE + 0x5638)
-#define SCR_PAD9                       (REGISTERS_BASE + 0x563C)
+#define WL12XX_SCR_PAD0			(REGISTERS_BASE + 0x5608)
+#define WL12XX_SCR_PAD1			(REGISTERS_BASE + 0x560C)
+#define WL12XX_SCR_PAD2			(REGISTERS_BASE + 0x5610)
+#define WL12XX_SCR_PAD3			(REGISTERS_BASE + 0x5614)
+#define WL12XX_SCR_PAD4			(REGISTERS_BASE + 0x5618)
+#define WL12XX_SCR_PAD4_SET		(REGISTERS_BASE + 0x561C)
+#define WL12XX_SCR_PAD4_CLR		(REGISTERS_BASE + 0x5620)
+#define WL12XX_SCR_PAD5			(REGISTERS_BASE + 0x5624)
+#define WL12XX_SCR_PAD5_SET		(REGISTERS_BASE + 0x5628)
+#define WL12XX_SCR_PAD5_CLR		(REGISTERS_BASE + 0x562C)
+#define WL12XX_SCR_PAD6			(REGISTERS_BASE + 0x5630)
+#define WL12XX_SCR_PAD7			(REGISTERS_BASE + 0x5634)
+#define WL12XX_SCR_PAD8			(REGISTERS_BASE + 0x5638)
+#define WL12XX_SCR_PAD9			(REGISTERS_BASE + 0x563C)
 
 /* Spare registers*/
-#define SPARE_A1                       (REGISTERS_BASE + 0x0994)
-#define SPARE_A2                       (REGISTERS_BASE + 0x0998)
-#define SPARE_A3                       (REGISTERS_BASE + 0x099C)
-#define SPARE_A4                       (REGISTERS_BASE + 0x09A0)
-#define SPARE_A5                       (REGISTERS_BASE + 0x09A4)
-#define SPARE_A6                       (REGISTERS_BASE + 0x09A8)
-#define SPARE_A7                       (REGISTERS_BASE + 0x09AC)
-#define SPARE_A8                       (REGISTERS_BASE + 0x09B0)
-#define SPARE_B1                       (REGISTERS_BASE + 0x5420)
-#define SPARE_B2                       (REGISTERS_BASE + 0x5424)
-#define SPARE_B3                       (REGISTERS_BASE + 0x5428)
-#define SPARE_B4                       (REGISTERS_BASE + 0x542C)
-#define SPARE_B5                       (REGISTERS_BASE + 0x5430)
-#define SPARE_B6                       (REGISTERS_BASE + 0x5434)
-#define SPARE_B7                       (REGISTERS_BASE + 0x5438)
-#define SPARE_B8                       (REGISTERS_BASE + 0x543C)
+#define WL12XX_SPARE_A1			(REGISTERS_BASE + 0x0994)
+#define WL12XX_SPARE_A2			(REGISTERS_BASE + 0x0998)
+#define WL12XX_SPARE_A3			(REGISTERS_BASE + 0x099C)
+#define WL12XX_SPARE_A4			(REGISTERS_BASE + 0x09A0)
+#define WL12XX_SPARE_A5			(REGISTERS_BASE + 0x09A4)
+#define WL12XX_SPARE_A6			(REGISTERS_BASE + 0x09A8)
+#define WL12XX_SPARE_A7			(REGISTERS_BASE + 0x09AC)
+#define WL12XX_SPARE_A8			(REGISTERS_BASE + 0x09B0)
+#define WL12XX_SPARE_B1			(REGISTERS_BASE + 0x5420)
+#define WL12XX_SPARE_B2			(REGISTERS_BASE + 0x5424)
+#define WL12XX_SPARE_B3			(REGISTERS_BASE + 0x5428)
+#define WL12XX_SPARE_B4			(REGISTERS_BASE + 0x542C)
+#define WL12XX_SPARE_B5			(REGISTERS_BASE + 0x5430)
+#define WL12XX_SPARE_B6			(REGISTERS_BASE + 0x5434)
+#define WL12XX_SPARE_B7			(REGISTERS_BASE + 0x5438)
+#define WL12XX_SPARE_B8			(REGISTERS_BASE + 0x543C)
 
-#define PLL_PARAMETERS                 (REGISTERS_BASE + 0x6040)
-#define WU_COUNTER_PAUSE               (REGISTERS_BASE + 0x6008)
-#define WELP_ARM_COMMAND               (REGISTERS_BASE + 0x6100)
-#define DRPW_SCRATCH_START             (DRPW_BASE + 0x002C)
+#define WL12XX_PLL_PARAMETERS		(REGISTERS_BASE + 0x6040)
+#define WL12XX_WU_COUNTER_PAUSE		(REGISTERS_BASE + 0x6008)
+#define WL12XX_WELP_ARM_COMMAND		(REGISTERS_BASE + 0x6100)
+#define WL12XX_DRPW_SCRATCH_START	(DRPW_BASE + 0x002C)
 
+#define WL12XX_CMD_MBOX_ADDRESS		0x407B4
 
-#define ACX_SLV_SOFT_RESET_BIT   BIT(1)
 #define ACX_REG_EEPROM_START_BIT BIT(1)
 
 /* Command/Information Mailbox Pointers */
@@ -279,7 +266,7 @@
  the host receives the Init Complete interrupt from
  the Wlan hardware.
  ===============================================*/
-#define REG_COMMAND_MAILBOX_PTR				(SCR_PAD0)
+#define WL12XX_REG_COMMAND_MAILBOX_PTR		(WL12XX_SCR_PAD0)
 
 /*===============================================
   Information Mailbox Pointer - 32bit RW
@@ -294,7 +281,7 @@
  until after the host receives the Init Complete interrupt from
  the Wlan hardware.
  ===============================================*/
-#define REG_EVENT_MAILBOX_PTR				(SCR_PAD1)
+#define WL12XX_REG_EVENT_MAILBOX_PTR		(WL12XX_SCR_PAD1)
 
 /*===============================================
  EEPROM Read/Write Request 32bit RW
@@ -365,26 +352,6 @@
 #define ACX_CONT_WIND_MIN_MASK   0x0000007f
 #define ACX_CONT_WIND_MAX        0x03ff0000
 
-/*===============================================
-  HI_CFG Interface Configuration Register Values
-  ------------------------------------------
-  ===============================================*/
-#define HI_CFG_UART_ENABLE          0x00000004
-#define HI_CFG_RST232_ENABLE        0x00000008
-#define HI_CFG_CLOCK_REQ_SELECT     0x00000010
-#define HI_CFG_HOST_INT_ENABLE      0x00000020
-#define HI_CFG_VLYNQ_OUTPUT_ENABLE  0x00000040
-#define HI_CFG_HOST_INT_ACTIVE_LOW  0x00000080
-#define HI_CFG_UART_TX_OUT_GPIO_15  0x00000100
-#define HI_CFG_UART_TX_OUT_GPIO_14  0x00000200
-#define HI_CFG_UART_TX_OUT_GPIO_7   0x00000400
-
-#define HI_CFG_DEF_VAL              \
-	(HI_CFG_UART_ENABLE |        \
-	HI_CFG_RST232_ENABLE |      \
-	HI_CFG_CLOCK_REQ_SELECT |   \
-	HI_CFG_HOST_INT_ENABLE)
-
 #define REF_FREQ_19_2                       0
 #define REF_FREQ_26_0                       1
 #define REF_FREQ_38_4                       2
@@ -400,38 +367,19 @@
 #define LUT_PARAM_BB_PLL_LOOP_FILTER        5
 #define LUT_PARAM_NUM                       6
 
-#define ACX_EEPROMLESS_IND_REG              (SCR_PAD4)
+#define WL12XX_EEPROMLESS_IND		(WL12XX_SCR_PAD4)
 #define USE_EEPROM                          0
-#define SOFT_RESET_MAX_TIME                 1000000
-#define SOFT_RESET_STALL_TIME               1000
 #define NVS_DATA_BUNDARY_ALIGNMENT          4
 
-
-/* Firmware image load chunk size */
-#define CHUNK_SIZE	16384
-
 /* Firmware image header size */
 #define FW_HDR_SIZE 8
 
-#define ECPU_CONTROL_HALT					0x00000101
-
-
 /******************************************************************************
 
     CHANNELS, BAND & REG DOMAINS definitions
 
 ******************************************************************************/
 
-
-enum {
-	RADIO_BAND_2_4GHZ = 0,  /* 2.4 Ghz band */
-	RADIO_BAND_5GHZ = 1,    /* 5 Ghz band */
-	RADIO_BAND_JAPAN_4_9_GHZ = 2,
-	DEFAULT_BAND = RADIO_BAND_2_4GHZ,
-	INVALID_BAND = 0xFE,
-	MAX_RADIO_BANDS = 0xFF
-};
-
 #define SHORT_PREAMBLE_BIT   BIT(0) /* CCK or Barker depending on the rate */
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
@@ -465,14 +413,82 @@
 
 ******************************************************************************/
 
+#define OCP_CMD_LOOP		32
+#define OCP_CMD_WRITE		0x1
+#define OCP_CMD_READ		0x2
+#define OCP_READY_MASK		BIT(18)
+#define OCP_STATUS_MASK		(BIT(16) | BIT(17))
+#define OCP_STATUS_NO_RESP	0x00000
+#define OCP_STATUS_OK		0x10000
+#define OCP_STATUS_REQ_FAILED	0x20000
+#define OCP_STATUS_RESP_ERROR	0x30000
 
-/*************************************************************************
+#define OCP_REG_POLARITY     0x0064
+#define OCP_REG_CLK_TYPE     0x0448
+#define OCP_REG_CLK_POLARITY 0x0cb2
+#define OCP_REG_CLK_PULL     0x0cb4
 
-    Interrupt Trigger Register (Host -> WiLink)
+#define POLARITY_LOW         BIT(1)
+#define NO_PULL              (BIT(14) | BIT(15))
 
-**************************************************************************/
+#define FREF_CLK_TYPE_BITS     0xfffffe7f
+#define CLK_REQ_PRCM           0x100
+#define FREF_CLK_POLARITY_BITS 0xfffff8ff
+#define CLK_REQ_OUTN_SEL       0x700
 
-/* Hardware to Embedded CPU Interrupts - first 32-bit register set */
+#define WU_COUNTER_PAUSE_VAL 0x3FF
+
+/* PLL configuration algorithm for wl128x */
+#define SYS_CLK_CFG_REG              0x2200
+/* Bit[0]   -  0-TCXO,  1-FREF */
+#define MCS_PLL_CLK_SEL_FREF         BIT(0)
+/* Bit[3:2] - 01-TCXO, 10-FREF */
+#define WL_CLK_REQ_TYPE_FREF         BIT(3)
+#define WL_CLK_REQ_TYPE_PG2          (BIT(3) | BIT(2))
+/* Bit[4]   -  0-TCXO,  1-FREF */
+#define PRCM_CM_EN_MUX_WLAN_FREF     BIT(4)
+
+#define TCXO_ILOAD_INT_REG           0x2264
+#define TCXO_CLK_DETECT_REG          0x2266
+
+#define TCXO_DET_FAILED              BIT(4)
+
+#define FREF_ILOAD_INT_REG           0x2084
+#define FREF_CLK_DETECT_REG          0x2086
+#define FREF_CLK_DETECT_FAIL         BIT(4)
+
+/* Use this reg for masking during driver access */
+#define WL_SPARE_REG                 0x2320
+#define WL_SPARE_VAL                 BIT(2)
+/* Bit[6:5:3] -  mask wl write SYS_CLK_CFG[8:5:2:4] */
+#define WL_SPARE_MASK_8526           (BIT(6) | BIT(5) | BIT(3))
+
+#define PLL_LOCK_COUNTERS_REG        0xD8C
+#define PLL_LOCK_COUNTERS_COEX       0x0F
+#define PLL_LOCK_COUNTERS_MCS        0xF0
+#define MCS_PLL_OVERRIDE_REG         0xD90
+#define MCS_PLL_CONFIG_REG           0xD92
+#define MCS_SEL_IN_FREQ_MASK         0x0070
+#define MCS_SEL_IN_FREQ_SHIFT        4
+#define MCS_PLL_CONFIG_REG_VAL       0x73
+#define MCS_PLL_ENABLE_HP            (BIT(0) | BIT(1))
+
+#define MCS_PLL_M_REG                0xD94
+#define MCS_PLL_N_REG                0xD96
+#define MCS_PLL_M_REG_VAL            0xC8
+#define MCS_PLL_N_REG_VAL            0x07
+
+#define SDIO_IO_DS                   0xd14
+
+/* SDIO/wSPI DS configuration values */
+enum {
+	HCI_IO_DS_8MA = 0,
+	HCI_IO_DS_4MA = 1, /* default */
+	HCI_IO_DS_6MA = 2,
+	HCI_IO_DS_2MA = 3,
+};
+
+/* end PLL configuration algorithm for wl128x */
 
 /*
  * Host Command Interrupt. Setting this bit masks
@@ -480,7 +496,7 @@
  * the FW that it has sent a command
  * to the Wlan hardware Command Mailbox.
  */
-#define INTR_TRIG_CMD       BIT(0)
+#define WL12XX_INTR_TRIG_CMD		BIT(0)
 
 /*
  * Host Event Acknowlegde Interrupt. The host
@@ -488,42 +504,27 @@
  * the unsolicited information from the event
  * mailbox.
  */
-#define INTR_TRIG_EVENT_ACK BIT(1)
+#define WL12XX_INTR_TRIG_EVENT_ACK	BIT(1)
 
-/*
- * The host sets this bit to inform the Wlan
- * FW that a TX packet is in the XFER
- * Buffer #0.
- */
-#define INTR_TRIG_TX_PROC0 BIT(2)
+/*===============================================
+  HI_CFG Interface Configuration Register Values
+  ------------------------------------------
+  ===============================================*/
+#define HI_CFG_UART_ENABLE          0x00000004
+#define HI_CFG_RST232_ENABLE        0x00000008
+#define HI_CFG_CLOCK_REQ_SELECT     0x00000010
+#define HI_CFG_HOST_INT_ENABLE      0x00000020
+#define HI_CFG_VLYNQ_OUTPUT_ENABLE  0x00000040
+#define HI_CFG_HOST_INT_ACTIVE_LOW  0x00000080
+#define HI_CFG_UART_TX_OUT_GPIO_15  0x00000100
+#define HI_CFG_UART_TX_OUT_GPIO_14  0x00000200
+#define HI_CFG_UART_TX_OUT_GPIO_7   0x00000400
 
-/*
- * The host sets this bit to inform the FW
- * that it read a packet from RX XFER
- * Buffer #0.
- */
-#define INTR_TRIG_RX_PROC0 BIT(3)
-
-#define INTR_TRIG_DEBUG_ACK BIT(4)
-
-#define INTR_TRIG_STATE_CHANGED BIT(5)
-
-
-/* Hardware to Embedded CPU Interrupts - second 32-bit register set */
-
-/*
- * The host sets this bit to inform the FW
- * that it read a packet from RX XFER
- * Buffer #1.
- */
-#define INTR_TRIG_RX_PROC1 BIT(17)
-
-/*
- * The host sets this bit to inform the Wlan
- * hardware that a TX packet is in the XFER
- * Buffer #1.
- */
-#define INTR_TRIG_TX_PROC1 BIT(18)
+#define HI_CFG_DEF_VAL              \
+	(HI_CFG_UART_ENABLE |        \
+	HI_CFG_RST232_ENABLE |      \
+	HI_CFG_CLOCK_REQ_SELECT |   \
+	HI_CFG_HOST_INT_ENABLE)
 
 #define WL127X_REG_FUSE_DATA_2_1	0x050a
 #define WL128X_REG_FUSE_DATA_2_1	0x2152
diff --git a/drivers/net/wireless/wl12xx/debugfs.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
similarity index 64%
copy from drivers/net/wireless/wl12xx/debugfs.h
copy to drivers/net/wireless/ti/wl12xx/wl12xx.h
index 254c5b2..74cd332 100644
--- a/drivers/net/wireless/wl12xx/debugfs.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -1,9 +1,7 @@
 /*
- * This file is part of wl1271
+ * This file is part of wl12xx
  *
- * Copyright (C) 2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ * Copyright (C) 2011 Texas Instruments Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -21,13 +19,13 @@
  *
  */
 
-#ifndef __DEBUGFS_H__
-#define __DEBUGFS_H__
+#ifndef __WL12XX_PRIV_H__
+#define __WL12XX_PRIV_H__
 
-#include "wl12xx.h"
+#include "conf.h"
 
-int wl1271_debugfs_init(struct wl1271 *wl);
-void wl1271_debugfs_exit(struct wl1271 *wl);
-void wl1271_debugfs_reset(struct wl1271 *wl);
+struct wl12xx_priv {
+	struct wl12xx_priv_conf conf;
+};
 
-#endif /* WL1271_DEBUGFS_H */
+#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
new file mode 100644
index 0000000..9d04c38
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -0,0 +1,41 @@
+config WLCORE
+	tristate "TI wlcore support"
+	depends on WL_TI && GENERIC_HARDIRQS
+	depends on INET
+	select FW_LOADER
+	---help---
+	  This module contains the main code for TI WLAN chips.  It abstracts
+	  hardware-specific differences among different chipset families.
+	  Each chipset family needs to implement its own lower-level module
+	  that will depend on this module for the common code.
+
+	  If you choose to build a module, it will be called wlcore. Say N if
+	  unsure.
+
+config WLCORE_SPI
+	tristate "TI wlcore SPI support"
+	depends on WLCORE && SPI_MASTER
+	select CRC7
+	---help---
+	  This module adds support for the SPI interface of adapters using
+	  TI WLAN chipsets.  Select this if your platform is using
+	  the SPI bus.
+
+	  If you choose to build a module, it'll be called wlcore_spi.
+	  Say N if unsure.
+
+config WLCORE_SDIO
+	tristate "TI wlcore SDIO support"
+	depends on WLCORE && MMC
+	---help---
+	  This module adds support for the SDIO interface of adapters using
+	  TI WLAN chipsets.  Select this if your platform is using
+	  the SDIO bus.
+
+	  If you choose to build a module, it'll be called wlcore_sdio.
+	  Say N if unsure.
+
+config WL12XX_PLATFORM_DATA
+	bool
+	depends on WLCORE_SDIO != n || WL1251_SDIO != n
+	default y
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
new file mode 100644
index 0000000..d9fba9e
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -0,0 +1,15 @@
+wlcore-objs		= main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
+			  boot.o init.o debugfs.o scan.o
+
+wlcore_spi-objs 	= spi.o
+wlcore_sdio-objs	= sdio.o
+
+wlcore-$(CONFIG_NL80211_TESTMODE)	+= testmode.o
+obj-$(CONFIG_WLCORE)			+= wlcore.o
+obj-$(CONFIG_WLCORE_SPI)		+= wlcore_spi.o
+obj-$(CONFIG_WLCORE_SDIO)		+= wlcore_sdio.o
+
+# small builtin driver bit
+obj-$(CONFIG_WL12XX_PLATFORM_DATA)	+= wl12xx_platform_data.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
similarity index 97%
rename from drivers/net/wireless/wl12xx/acx.c
rename to drivers/net/wireless/ti/wlcore/acx.c
index bc96db0..5912541 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -28,11 +28,11 @@
 #include <linux/spi/spi.h>
 #include <linux/slab.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
 #include "wl12xx_80211.h"
-#include "reg.h"
 #include "ps.h"
+#include "hw_ops.h"
 
 int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 				  u8 wake_up_event, u8 listen_interval)
@@ -757,7 +757,10 @@
 
 	/* configure one AP supported rate class */
 	acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
-	acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
+
+	/* the AP policy is HW specific */
+	acx->rate_policy.enabled_rates =
+		cpu_to_le32(wlcore_hw_sta_get_ap_rate_mask(wl, wlvif));
 	acx->rate_policy.short_retry_limit = c->short_retry_limit;
 	acx->rate_policy.long_retry_limit = c->long_retry_limit;
 	acx->rate_policy.aflags = c->aflags;
@@ -969,17 +972,14 @@
 		goto out;
 	}
 
-	if (wl->chip.id == CHIP_ID_1283_PG20)
-		mem = &wl->conf.mem_wl128x;
-	else
-		mem = &wl->conf.mem_wl127x;
+	mem = &wl->conf.mem;
 
 	/* memory config */
 	mem_conf->num_stations = mem->num_stations;
 	mem_conf->rx_mem_block_num = mem->rx_block_num;
 	mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
 	mem_conf->num_ssid_profiles = mem->ssid_profiles;
-	mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
+	mem_conf->total_tx_descriptors = cpu_to_le32(wl->num_tx_desc);
 	mem_conf->dyn_mem_enable = mem->dynamic_memory;
 	mem_conf->tx_free_req = mem->min_req_tx_blocks;
 	mem_conf->rx_free_req = mem->min_req_rx_blocks;
@@ -998,32 +998,6 @@
 	return ret;
 }
 
-int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
-{
-	struct wl1271_acx_host_config_bitmap *bitmap_conf;
-	int ret;
-
-	bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
-	if (!bitmap_conf) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
-
-	ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
-				   bitmap_conf, sizeof(*bitmap_conf));
-	if (ret < 0) {
-		wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
-		goto out;
-	}
-
-out:
-	kfree(bitmap_conf);
-
-	return ret;
-}
-
 int wl1271_acx_init_mem_config(struct wl1271 *wl)
 {
 	int ret;
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
similarity index 99%
rename from drivers/net/wireless/wl12xx/acx.h
rename to drivers/net/wireless/ti/wlcore/acx.h
index a28fc04..b2f8883 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -25,7 +25,7 @@
 #ifndef __ACX_H__
 #define __ACX_H__
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "cmd.h"
 
 /*************************************************************************
@@ -824,16 +824,11 @@
 	__le32 period;
 } __packed;
 
+/* TODO: maybe this needs to be moved somewhere else? */
 #define HOST_IF_CFG_RX_FIFO_ENABLE     BIT(0)
 #define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
 #define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
 
-struct wl1271_acx_host_config_bitmap {
-	struct acx_header header;
-
-	__le32 host_cfg_bitmap;
-} __packed;
-
 enum {
 	WL1271_ACX_TRIG_TYPE_LEVEL = 0,
 	WL1271_ACX_TRIG_TYPE_EDGE,
@@ -1274,7 +1269,6 @@
 int wl1271_acx_tx_config_options(struct wl1271 *wl);
 int wl12xx_acx_mem_cfg(struct wl1271 *wl);
 int wl1271_acx_init_mem_config(struct wl1271 *wl);
-int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
 int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
 int wl1271_acx_smart_reflex(struct wl1271 *wl);
 int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
new file mode 100644
index 0000000..3a2207d
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -0,0 +1,443 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/wl12xx.h>
+#include <linux/export.h>
+
+#include "debug.h"
+#include "acx.h"
+#include "boot.h"
+#include "io.h"
+#include "event.h"
+#include "rx.h"
+#include "hw_ops.h"
+
+static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
+{
+	u32 cpu_ctrl;
+
+	/* 10.5.0 run the firmware (I) */
+	cpu_ctrl = wlcore_read_reg(wl, REG_ECPU_CONTROL);
+
+	/* 10.5.1 run the firmware (II) */
+	cpu_ctrl |= flag;
+	wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
+}
+
+static int wlcore_parse_fw_ver(struct wl1271 *wl)
+{
+	int ret;
+
+	ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
+		     &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
+		     &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
+		     &wl->chip.fw_ver[4]);
+
+	if (ret != 5) {
+		wl1271_warning("fw version incorrect value");
+		memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
+		return -EINVAL;
+	}
+
+	ret = wlcore_identify_fw(wl);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int wlcore_boot_fw_version(struct wl1271 *wl)
+{
+	struct wl1271_static_data *static_data;
+	int ret;
+
+	static_data = kmalloc(sizeof(*static_data), GFP_DMA);
+	if (!static_data) {
+		wl1271_error("Couldn't allocate memory for static data!");
+		return -ENOMEM;
+	}
+
+	wl1271_read(wl, wl->cmd_box_addr, static_data, sizeof(*static_data),
+		    false);
+
+	strncpy(wl->chip.fw_ver_str, static_data->fw_version,
+		sizeof(wl->chip.fw_ver_str));
+
+	kfree(static_data);
+
+	/* make sure the string is NULL-terminated */
+	wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
+	ret = wlcore_parse_fw_ver(wl);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
+					     size_t fw_data_len, u32 dest)
+{
+	struct wlcore_partition_set partition;
+	int addr, chunk_num, partition_limit;
+	u8 *p, *chunk;
+
+	/* whal_FwCtrl_LoadFwImageSm() */
+
+	wl1271_debug(DEBUG_BOOT, "starting firmware upload");
+
+	wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
+		     fw_data_len, CHUNK_SIZE);
+
+	if ((fw_data_len % 4) != 0) {
+		wl1271_error("firmware length not multiple of four");
+		return -EIO;
+	}
+
+	chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
+	if (!chunk) {
+		wl1271_error("allocation for firmware upload chunk failed");
+		return -ENOMEM;
+	}
+
+	memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition));
+	partition.mem.start = dest;
+	wlcore_set_partition(wl, &partition);
+
+	/* 10.1 set partition limit and chunk num */
+	chunk_num = 0;
+	partition_limit = wl->ptable[PART_DOWN].mem.size;
+
+	while (chunk_num < fw_data_len / CHUNK_SIZE) {
+		/* 10.2 update partition, if needed */
+		addr = dest + (chunk_num + 2) * CHUNK_SIZE;
+		if (addr > partition_limit) {
+			addr = dest + chunk_num * CHUNK_SIZE;
+			partition_limit = chunk_num * CHUNK_SIZE +
+				wl->ptable[PART_DOWN].mem.size;
+			partition.mem.start = addr;
+			wlcore_set_partition(wl, &partition);
+		}
+
+		/* 10.3 upload the chunk */
+		addr = dest + chunk_num * CHUNK_SIZE;
+		p = buf + chunk_num * CHUNK_SIZE;
+		memcpy(chunk, p, CHUNK_SIZE);
+		wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
+			     p, addr);
+		wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
+
+		chunk_num++;
+	}
+
+	/* 10.4 upload the last chunk */
+	addr = dest + chunk_num * CHUNK_SIZE;
+	p = buf + chunk_num * CHUNK_SIZE;
+	memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
+	wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
+		     fw_data_len % CHUNK_SIZE, p, addr);
+	wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
+
+	kfree(chunk);
+	return 0;
+}
+
+int wlcore_boot_upload_firmware(struct wl1271 *wl)
+{
+	u32 chunks, addr, len;
+	int ret = 0;
+	u8 *fw;
+
+	fw = wl->fw;
+	chunks = be32_to_cpup((__be32 *) fw);
+	fw += sizeof(u32);
+
+	wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
+
+	while (chunks--) {
+		addr = be32_to_cpup((__be32 *) fw);
+		fw += sizeof(u32);
+		len = be32_to_cpup((__be32 *) fw);
+		fw += sizeof(u32);
+
+		if (len > 300000) {
+			wl1271_info("firmware chunk too long: %u", len);
+			return -EINVAL;
+		}
+		wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
+			     chunks, addr, len);
+		ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
+		if (ret != 0)
+			break;
+		fw += len;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wlcore_boot_upload_firmware);
+
+int wlcore_boot_upload_nvs(struct wl1271 *wl)
+{
+	size_t nvs_len, burst_len;
+	int i;
+	u32 dest_addr, val;
+	u8 *nvs_ptr, *nvs_aligned;
+
+	if (wl->nvs == NULL)
+		return -ENODEV;
+
+	if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
+		struct wl1271_nvs_file *nvs =
+			(struct wl1271_nvs_file *)wl->nvs;
+		/*
+		 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
+		 * band configurations) can be removed when those NVS files stop
+		 * floating around.
+		 */
+		if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
+		    wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
+			if (nvs->general_params.dual_mode_select)
+				wl->enable_11a = true;
+		}
+
+		if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
+		    (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
+		     wl->enable_11a)) {
+			wl1271_error("nvs size is not as expected: %zu != %zu",
+				wl->nvs_len, sizeof(struct wl1271_nvs_file));
+			kfree(wl->nvs);
+			wl->nvs = NULL;
+			wl->nvs_len = 0;
+			return -EILSEQ;
+		}
+
+		/* only the first part of the NVS needs to be uploaded */
+		nvs_len = sizeof(nvs->nvs);
+		nvs_ptr = (u8 *) nvs->nvs;
+	} else {
+		struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
+
+		if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
+			if (nvs->general_params.dual_mode_select)
+				wl->enable_11a = true;
+		} else {
+			wl1271_error("nvs size is not as expected: %zu != %zu",
+				     wl->nvs_len,
+				     sizeof(struct wl128x_nvs_file));
+			kfree(wl->nvs);
+			wl->nvs = NULL;
+			wl->nvs_len = 0;
+			return -EILSEQ;
+		}
+
+		/* only the first part of the NVS needs to be uploaded */
+		nvs_len = sizeof(nvs->nvs);
+		nvs_ptr = (u8 *)nvs->nvs;
+	}
+
+	/* update current MAC address to NVS */
+	nvs_ptr[11] = wl->addresses[0].addr[0];
+	nvs_ptr[10] = wl->addresses[0].addr[1];
+	nvs_ptr[6] = wl->addresses[0].addr[2];
+	nvs_ptr[5] = wl->addresses[0].addr[3];
+	nvs_ptr[4] = wl->addresses[0].addr[4];
+	nvs_ptr[3] = wl->addresses[0].addr[5];
+
+	/*
+	 * Layout before the actual NVS tables:
+	 * 1 byte : burst length.
+	 * 2 bytes: destination address.
+	 * n bytes: data to burst copy.
+	 *
+	 * This is ended by a 0 length, then the NVS tables.
+	 */
+
+	/* FIXME: Do we need to check here whether the LSB is 1? */
+	while (nvs_ptr[0]) {
+		burst_len = nvs_ptr[0];
+		dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
+
+		/*
+		 * Due to our new wl1271_translate_reg_addr function,
+		 * we need to add the register partition start address
+		 * to the destination
+		 */
+		dest_addr += wl->curr_part.reg.start;
+
+		/* We move our pointer to the data */
+		nvs_ptr += 3;
+
+		for (i = 0; i < burst_len; i++) {
+			if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
+				goto out_badnvs;
+
+			val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
+			       | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
+
+			wl1271_debug(DEBUG_BOOT,
+				     "nvs burst write 0x%x: 0x%x",
+				     dest_addr, val);
+			wl1271_write32(wl, dest_addr, val);
+
+			nvs_ptr += 4;
+			dest_addr += 4;
+		}
+
+		if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+			goto out_badnvs;
+	}
+
+	/*
+	 * We've reached the first zero length, the first NVS table
+	 * is located at an aligned offset which is at least 7 bytes further.
+	 * NOTE: The wl->nvs->nvs element must be first, in order to
+	 * simplify the casting, we assume it is at the beginning of
+	 * the wl->nvs structure.
+	 */
+	nvs_ptr = (u8 *)wl->nvs +
+			ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+
+	if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+		goto out_badnvs;
+
+	nvs_len -= nvs_ptr - (u8 *)wl->nvs;
+
+	/* Now we must set the partition correctly */
+	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+
+	/* Copy the NVS tables to a new block to ensure alignment */
+	nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
+	if (!nvs_aligned)
+		return -ENOMEM;
+
+	/* And finally we upload the NVS tables */
+	wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS,
+			  nvs_aligned, nvs_len, false);
+
+	kfree(nvs_aligned);
+	return 0;
+
+out_badnvs:
+	wl1271_error("nvs data is malformed");
+	return -EILSEQ;
+}
+EXPORT_SYMBOL_GPL(wlcore_boot_upload_nvs);
+
+int wlcore_boot_run_firmware(struct wl1271 *wl)
+{
+	int loop, ret;
+	u32 chip_id, intr;
+
+	/* Make sure we have the boot partition */
+	wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+
+	wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
+
+	chip_id = wlcore_read_reg(wl, REG_CHIP_ID_B);
+
+	wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
+
+	if (chip_id != wl->chip.id) {
+		wl1271_error("chip id doesn't match after firmware boot");
+		return -EIO;
+	}
+
+	/* wait for init to complete */
+	loop = 0;
+	while (loop++ < INIT_LOOP) {
+		udelay(INIT_LOOP_DELAY);
+		intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
+
+		if (intr == 0xffffffff) {
+			wl1271_error("error reading hardware complete "
+				     "init indication");
+			return -EIO;
+		}
+		/* check that ACX_INTR_INIT_COMPLETE is enabled */
+		else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
+			wlcore_write_reg(wl, REG_INTERRUPT_ACK,
+					 WL1271_ACX_INTR_INIT_COMPLETE);
+			break;
+		}
+	}
+
+	if (loop > INIT_LOOP) {
+		wl1271_error("timeout waiting for the hardware to "
+			     "complete initialization");
+		return -EIO;
+	}
+
+	/* get hardware config command mail box */
+	wl->cmd_box_addr = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR);
+
+	wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr);
+
+	/* get hardware config event mail box */
+	wl->mbox_ptr[0] = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR);
+	wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
+
+	wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
+		     wl->mbox_ptr[0], wl->mbox_ptr[1]);
+
+	ret = wlcore_boot_fw_version(wl);
+	if (ret < 0) {
+		wl1271_error("couldn't boot firmware");
+		return ret;
+	}
+
+	/*
+	 * in case of full asynchronous mode the firmware event must be
+	 * ready to receive event from the command mailbox
+	 */
+
+	/* unmask required mbox events  */
+	wl->event_mask = BSS_LOSE_EVENT_ID |
+		SCAN_COMPLETE_EVENT_ID |
+		ROLE_STOP_COMPLETE_EVENT_ID |
+		RSSI_SNR_TRIGGER_0_EVENT_ID |
+		PSPOLL_DELIVERY_FAILURE_EVENT_ID |
+		SOFT_GEMINI_SENSE_EVENT_ID |
+		PERIODIC_SCAN_REPORT_EVENT_ID |
+		PERIODIC_SCAN_COMPLETE_EVENT_ID |
+		DUMMY_PACKET_EVENT_ID |
+		PEER_REMOVE_COMPLETE_EVENT_ID |
+		BA_SESSION_RX_CONSTRAINT_EVENT_ID |
+		REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
+		INACTIVE_STA_EVENT_ID |
+		MAX_TX_RETRY_EVENT_ID |
+		CHANNEL_SWITCH_COMPLETE_EVENT_ID;
+
+	ret = wl1271_event_unmask(wl);
+	if (ret < 0) {
+		wl1271_error("EVENT mask setting failed");
+		return ret;
+	}
+
+	/* set the working partition to its "running" mode offset */
+	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+
+	/* firmware startup completed */
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware);
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
new file mode 100644
index 0000000..094981d
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -0,0 +1,54 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __BOOT_H__
+#define __BOOT_H__
+
+#include "wlcore.h"
+
+int wlcore_boot_upload_firmware(struct wl1271 *wl);
+int wlcore_boot_upload_nvs(struct wl1271 *wl);
+int wlcore_boot_run_firmware(struct wl1271 *wl);
+
+#define WL1271_NO_SUBBANDS 8
+#define WL1271_NO_POWER_LEVELS 4
+#define WL1271_FW_VERSION_MAX_LEN 20
+
+struct wl1271_static_data {
+	u8 mac_address[ETH_ALEN];
+	u8 padding[2];
+	u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
+	u32 hw_version;
+	u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
+};
+
+/* number of times we try to read the INIT interrupt */
+#define INIT_LOOP 20000
+
+/* delay between retries */
+#define INIT_LOOP_DELAY 50
+
+#define WU_COUNTER_PAUSE_VAL 0x3FF
+#define WELP_ARM_COMMAND_VAL 0x4
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
similarity index 84%
rename from drivers/net/wireless/wl12xx/cmd.c
rename to drivers/net/wireless/ti/wlcore/cmd.c
index 3414fc1..5c4716c 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -28,9 +28,8 @@
 #include <linux/ieee80211.h>
 #include <linux/slab.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
-#include "reg.h"
 #include "io.h"
 #include "acx.h"
 #include "wl12xx_80211.h"
@@ -67,11 +66,15 @@
 
 	wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
 
-	wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
+	/*
+	 * TODO: we just need this because one bit is in a different
+	 * place.  Is there any better way?
+	 */
+	wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
 
 	timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
 
-	intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+	intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
 	while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
 		if (time_after(jiffies, timeout)) {
 			wl1271_error("command complete timeout");
@@ -85,7 +88,7 @@
 		else
 			msleep(1);
 
-		intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+		intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
 	}
 
 	/* read back the status code of the command */
@@ -100,8 +103,7 @@
 		goto fail;
 	}
 
-	wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
-		       WL1271_ACX_INTR_CMD_COMPLETE);
+	wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE);
 	return 0;
 
 fail:
@@ -110,240 +112,18 @@
 	return ret;
 }
 
-int wl1271_cmd_general_parms(struct wl1271 *wl)
-{
-	struct wl1271_general_parms_cmd *gen_parms;
-	struct wl1271_ini_general_params *gp =
-		&((struct wl1271_nvs_file *)wl->nvs)->general_params;
-	bool answer = false;
-	int ret;
-
-	if (!wl->nvs)
-		return -ENODEV;
-
-	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
-		wl1271_warning("FEM index from INI out of bounds");
-		return -EINVAL;
-	}
-
-	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
-	if (!gen_parms)
-		return -ENOMEM;
-
-	gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
-
-	memcpy(&gen_parms->general_params, gp, sizeof(*gp));
-
-	if (gp->tx_bip_fem_auto_detect)
-		answer = true;
-
-	/* Override the REF CLK from the NVS with the one from platform data */
-	gen_parms->general_params.ref_clock = wl->ref_clock;
-
-	ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
-	if (ret < 0) {
-		wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
-		goto out;
-	}
-
-	gp->tx_bip_fem_manufacturer =
-		gen_parms->general_params.tx_bip_fem_manufacturer;
-
-	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
-		wl1271_warning("FEM index from FW out of bounds");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
-		     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
-
-out:
-	kfree(gen_parms);
-	return ret;
-}
-
-int wl128x_cmd_general_parms(struct wl1271 *wl)
-{
-	struct wl128x_general_parms_cmd *gen_parms;
-	struct wl128x_ini_general_params *gp =
-		&((struct wl128x_nvs_file *)wl->nvs)->general_params;
-	bool answer = false;
-	int ret;
-
-	if (!wl->nvs)
-		return -ENODEV;
-
-	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
-		wl1271_warning("FEM index from ini out of bounds");
-		return -EINVAL;
-	}
-
-	gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
-	if (!gen_parms)
-		return -ENOMEM;
-
-	gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
-
-	memcpy(&gen_parms->general_params, gp, sizeof(*gp));
-
-	if (gp->tx_bip_fem_auto_detect)
-		answer = true;
-
-	/* Replace REF and TCXO CLKs with the ones from platform data */
-	gen_parms->general_params.ref_clock = wl->ref_clock;
-	gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
-
-	ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
-	if (ret < 0) {
-		wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
-		goto out;
-	}
-
-	gp->tx_bip_fem_manufacturer =
-		gen_parms->general_params.tx_bip_fem_manufacturer;
-
-	if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
-		wl1271_warning("FEM index from FW out of bounds");
-		ret = -EINVAL;
-		goto out;
-	}
-
-	wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
-		     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
-
-out:
-	kfree(gen_parms);
-	return ret;
-}
-
-int wl1271_cmd_radio_parms(struct wl1271 *wl)
-{
-	struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
-	struct wl1271_radio_parms_cmd *radio_parms;
-	struct wl1271_ini_general_params *gp = &nvs->general_params;
-	int ret;
-
-	if (!wl->nvs)
-		return -ENODEV;
-
-	radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
-	if (!radio_parms)
-		return -ENOMEM;
-
-	radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
-
-	/* 2.4GHz parameters */
-	memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
-	       sizeof(struct wl1271_ini_band_params_2));
-	memcpy(&radio_parms->dyn_params_2,
-	       &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
-	       sizeof(struct wl1271_ini_fem_params_2));
-
-	/* 5GHz parameters */
-	memcpy(&radio_parms->static_params_5,
-	       &nvs->stat_radio_params_5,
-	       sizeof(struct wl1271_ini_band_params_5));
-	memcpy(&radio_parms->dyn_params_5,
-	       &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
-	       sizeof(struct wl1271_ini_fem_params_5));
-
-	wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
-		    radio_parms, sizeof(*radio_parms));
-
-	ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
-	if (ret < 0)
-		wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
-
-	kfree(radio_parms);
-	return ret;
-}
-
-int wl128x_cmd_radio_parms(struct wl1271 *wl)
-{
-	struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
-	struct wl128x_radio_parms_cmd *radio_parms;
-	struct wl128x_ini_general_params *gp = &nvs->general_params;
-	int ret;
-
-	if (!wl->nvs)
-		return -ENODEV;
-
-	radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
-	if (!radio_parms)
-		return -ENOMEM;
-
-	radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
-
-	/* 2.4GHz parameters */
-	memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
-	       sizeof(struct wl128x_ini_band_params_2));
-	memcpy(&radio_parms->dyn_params_2,
-	       &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
-	       sizeof(struct wl128x_ini_fem_params_2));
-
-	/* 5GHz parameters */
-	memcpy(&radio_parms->static_params_5,
-	       &nvs->stat_radio_params_5,
-	       sizeof(struct wl128x_ini_band_params_5));
-	memcpy(&radio_parms->dyn_params_5,
-	       &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
-	       sizeof(struct wl128x_ini_fem_params_5));
-
-	radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
-
-	wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
-		    radio_parms, sizeof(*radio_parms));
-
-	ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
-	if (ret < 0)
-		wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
-
-	kfree(radio_parms);
-	return ret;
-}
-
-int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
-{
-	struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
-	struct conf_rf_settings *rf = &wl->conf.rf;
-	int ret;
-
-	if (!wl->nvs)
-		return -ENODEV;
-
-	ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL);
-	if (!ext_radio_parms)
-		return -ENOMEM;
-
-	ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM;
-
-	memcpy(ext_radio_parms->tx_per_channel_power_compensation_2,
-	       rf->tx_per_channel_power_compensation_2,
-	       CONF_TX_PWR_COMPENSATION_LEN_2);
-	memcpy(ext_radio_parms->tx_per_channel_power_compensation_5,
-	       rf->tx_per_channel_power_compensation_5,
-	       CONF_TX_PWR_COMPENSATION_LEN_5);
-
-	wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ",
-		    ext_radio_parms, sizeof(*ext_radio_parms));
-
-	ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0);
-	if (ret < 0)
-		wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed");
-
-	kfree(ext_radio_parms);
-	return ret;
-}
-
 /*
  * Poll the mailbox event field until any of the bits in the mask is set or a
  * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
  */
 static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
 {
-	u32 events_vector, event;
+	u32 *events_vector;
+	u32 event;
 	unsigned long timeout;
+	int ret = 0;
+
+	events_vector = kmalloc(sizeof(*events_vector), GFP_DMA);
 
 	timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
 
@@ -351,21 +131,24 @@
 		if (time_after(jiffies, timeout)) {
 			wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
 				     (int)mask);
-			return -ETIMEDOUT;
+			ret = -ETIMEDOUT;
+			goto out;
 		}
 
 		msleep(1);
 
 		/* read from both event fields */
-		wl1271_read(wl, wl->mbox_ptr[0], &events_vector,
-			    sizeof(events_vector), false);
-		event = events_vector & mask;
-		wl1271_read(wl, wl->mbox_ptr[1], &events_vector,
-			    sizeof(events_vector), false);
-		event |= events_vector & mask;
+		wl1271_read(wl, wl->mbox_ptr[0], events_vector,
+			    sizeof(*events_vector), false);
+		event = *events_vector & mask;
+		wl1271_read(wl, wl->mbox_ptr[1], events_vector,
+			    sizeof(*events_vector), false);
+		event |= *events_vector & mask;
 	} while (!event);
 
-	return 0;
+out:
+	kfree(events_vector);
+	return ret;
 }
 
 static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
@@ -522,7 +305,7 @@
 
 	cmd->role_id = wlvif->dev_role_id;
 	if (wlvif->band == IEEE80211_BAND_5GHZ)
-		cmd->band = WL12XX_BAND_5GHZ;
+		cmd->band = WLCORE_BAND_5GHZ;
 	cmd->channel = wlvif->channel;
 
 	if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
@@ -613,7 +396,7 @@
 
 	cmd->role_id = wlvif->role_id;
 	if (wlvif->band == IEEE80211_BAND_5GHZ)
-		cmd->band = WL12XX_BAND_5GHZ;
+		cmd->band = WLCORE_BAND_5GHZ;
 	cmd->channel = wlvif->channel;
 	cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
 	cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
@@ -750,14 +533,14 @@
 
 	switch (wlvif->band) {
 	case IEEE80211_BAND_2GHZ:
-		cmd->band = RADIO_BAND_2_4GHZ;
+		cmd->band = WLCORE_BAND_2_4GHZ;
 		break;
 	case IEEE80211_BAND_5GHZ:
-		cmd->band = RADIO_BAND_5GHZ;
+		cmd->band = WLCORE_BAND_5GHZ;
 		break;
 	default:
 		wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
-		cmd->band = RADIO_BAND_2_4GHZ;
+		cmd->band = WLCORE_BAND_2_4GHZ;
 		break;
 	}
 
@@ -830,7 +613,7 @@
 
 	cmd->role_id = wlvif->role_id;
 	if (wlvif->band == IEEE80211_BAND_5GHZ)
-		cmd->band = WL12XX_BAND_5GHZ;
+		cmd->band = WLCORE_BAND_5GHZ;
 	cmd->channel = wlvif->channel;
 	cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
 	cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
@@ -904,6 +687,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(wl1271_cmd_test);
 
 /**
  * read acx from firmware
@@ -960,6 +744,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(wl1271_cmd_configure);
 
 int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
 {
@@ -1730,10 +1515,10 @@
 	cmd->channel = wlvif->channel;
 	switch (wlvif->band) {
 	case IEEE80211_BAND_2GHZ:
-		cmd->band = RADIO_BAND_2_4GHZ;
+		cmd->band = WLCORE_BAND_2_4GHZ;
 		break;
 	case IEEE80211_BAND_5GHZ:
-		cmd->band = RADIO_BAND_5GHZ;
+		cmd->band = WLCORE_BAND_5GHZ;
 		break;
 	default:
 		wl1271_error("roc - unknown band: %d", (int)wlvif->band);
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
similarity index 87%
rename from drivers/net/wireless/wl12xx/cmd.h
rename to drivers/net/wireless/ti/wlcore/cmd.h
index de217d9..a46ae07 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -25,17 +25,12 @@
 #ifndef __CMD_H__
 #define __CMD_H__
 
-#include "wl12xx.h"
+#include "wlcore.h"
 
 struct acx_header;
 
 int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
 		    size_t res_len);
-int wl1271_cmd_general_parms(struct wl1271 *wl);
-int wl128x_cmd_general_parms(struct wl1271 *wl);
-int wl1271_cmd_radio_parms(struct wl1271 *wl);
-int wl128x_cmd_radio_parms(struct wl1271 *wl);
-int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
 int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
 			   u8 *role_id);
 int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
@@ -262,13 +257,13 @@
 	u8 padding[3];
 } __packed;
 
-enum wl12xx_band {
-	WL12XX_BAND_2_4GHZ		= 0,
-	WL12XX_BAND_5GHZ		= 1,
-	WL12XX_BAND_JAPAN_4_9_GHZ	= 2,
-	WL12XX_BAND_DEFAULT		= WL12XX_BAND_2_4GHZ,
-	WL12XX_BAND_INVALID		= 0x7E,
-	WL12XX_BAND_MAX_RADIO		= 0x7F,
+enum wlcore_band {
+	WLCORE_BAND_2_4GHZ		= 0,
+	WLCORE_BAND_5GHZ		= 1,
+	WLCORE_BAND_JAPAN_4_9_GHZ	= 2,
+	WLCORE_BAND_DEFAULT		= WLCORE_BAND_2_4GHZ,
+	WLCORE_BAND_INVALID		= 0x7E,
+	WLCORE_BAND_MAX_RADIO		= 0x7F,
 };
 
 struct wl12xx_cmd_role_start {
@@ -494,83 +489,6 @@
 
 #define WL1271_PD_REFERENCE_POINT_BAND_B_G  0
 
-#define TEST_CMD_INI_FILE_RADIO_PARAM       0x19
-#define TEST_CMD_INI_FILE_GENERAL_PARAM     0x1E
-#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
-
-struct wl1271_general_parms_cmd {
-	struct wl1271_cmd_header header;
-
-	struct wl1271_cmd_test_header test;
-
-	struct wl1271_ini_general_params general_params;
-
-	u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
-	u8 sr_sen_n_p;
-	u8 sr_sen_n_p_gain;
-	u8 sr_sen_nrn;
-	u8 sr_sen_prn;
-	u8 padding[3];
-} __packed;
-
-struct wl128x_general_parms_cmd {
-	struct wl1271_cmd_header header;
-
-	struct wl1271_cmd_test_header test;
-
-	struct wl128x_ini_general_params general_params;
-
-	u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
-	u8 sr_sen_n_p;
-	u8 sr_sen_n_p_gain;
-	u8 sr_sen_nrn;
-	u8 sr_sen_prn;
-	u8 padding[3];
-} __packed;
-
-struct wl1271_radio_parms_cmd {
-	struct wl1271_cmd_header header;
-
-	struct wl1271_cmd_test_header test;
-
-	/* Static radio parameters */
-	struct wl1271_ini_band_params_2 static_params_2;
-	struct wl1271_ini_band_params_5 static_params_5;
-
-	/* Dynamic radio parameters */
-	struct wl1271_ini_fem_params_2 dyn_params_2;
-	u8 padding2;
-	struct wl1271_ini_fem_params_5 dyn_params_5;
-	u8 padding3[2];
-} __packed;
-
-struct wl128x_radio_parms_cmd {
-	struct wl1271_cmd_header header;
-
-	struct wl1271_cmd_test_header test;
-
-	/* Static radio parameters */
-	struct wl128x_ini_band_params_2 static_params_2;
-	struct wl128x_ini_band_params_5 static_params_5;
-
-	u8 fem_vendor_and_options;
-
-	/* Dynamic radio parameters */
-	struct wl128x_ini_fem_params_2 dyn_params_2;
-	u8 padding2;
-	struct wl128x_ini_fem_params_5 dyn_params_5;
-} __packed;
-
-struct wl1271_ext_radio_parms_cmd {
-	struct wl1271_cmd_header header;
-
-	struct wl1271_cmd_test_header test;
-
-	u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
-	u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
-	u8 padding[3];
-} __packed;
-
 /*
  * There are three types of disconnections:
  *
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
similarity index 93%
rename from drivers/net/wireless/wl12xx/conf.h
rename to drivers/net/wireless/ti/wlcore/conf.h
index 3e581e1..fef0db4 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -65,36 +65,7 @@
 	CONF_HW_RATE_INDEX_MAX     = CONF_HW_RATE_INDEX_54MBPS,
 };
 
-enum {
-	CONF_HW_RXTX_RATE_MCS7_SGI = 0,
-	CONF_HW_RXTX_RATE_MCS7,
-	CONF_HW_RXTX_RATE_MCS6,
-	CONF_HW_RXTX_RATE_MCS5,
-	CONF_HW_RXTX_RATE_MCS4,
-	CONF_HW_RXTX_RATE_MCS3,
-	CONF_HW_RXTX_RATE_MCS2,
-	CONF_HW_RXTX_RATE_MCS1,
-	CONF_HW_RXTX_RATE_MCS0,
-	CONF_HW_RXTX_RATE_54,
-	CONF_HW_RXTX_RATE_48,
-	CONF_HW_RXTX_RATE_36,
-	CONF_HW_RXTX_RATE_24,
-	CONF_HW_RXTX_RATE_22,
-	CONF_HW_RXTX_RATE_18,
-	CONF_HW_RXTX_RATE_12,
-	CONF_HW_RXTX_RATE_11,
-	CONF_HW_RXTX_RATE_9,
-	CONF_HW_RXTX_RATE_6,
-	CONF_HW_RXTX_RATE_5_5,
-	CONF_HW_RXTX_RATE_2,
-	CONF_HW_RXTX_RATE_1,
-	CONF_HW_RXTX_RATE_MAX,
-	CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
-};
-
-/* Rates between and including these are MCS rates */
-#define CONF_HW_RXTX_RATE_MCS_MIN CONF_HW_RXTX_RATE_MCS7_SGI
-#define CONF_HW_RXTX_RATE_MCS_MAX CONF_HW_RXTX_RATE_MCS0
+#define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff
 
 enum {
 	CONF_SG_DISABLE = 0,
@@ -1096,16 +1067,31 @@
 };
 
 struct conf_sched_scan_settings {
-	/* minimum time to wait on the channel for active scans (in TUs) */
-	u16 min_dwell_time_active;
+	/*
+	 * The base time to wait on the channel for active scans (in TU/1000).
+	 * The minimum dwell time is calculated according to this:
+	 * min_dwell_time = base + num_of_probes_to_be_sent * delta_per_probe
+	 * The maximum dwell time is calculated according to this:
+	 * max_dwell_time = min_dwell_time + max_dwell_time_delta
+	 */
+	u32 base_dwell_time;
 
-	/* maximum time to wait on the channel for active scans (in TUs) */
-	u16 max_dwell_time_active;
+	/* The delta between the min dwell time and max dwell time for
+	 * active scans (in TU/1000s). The max dwell time is used by the FW once
+	 * traffic is detected on the channel.
+	 */
+	u32 max_dwell_time_delta;
 
-	/* time to wait on the channel for passive scans (in TUs) */
+	/* Delta added to min dwell time per each probe in 2.4 GHz (TU/1000) */
+	u32 dwell_time_delta_per_probe;
+
+	/* Delta added to min dwell time per each probe in 5 GHz (TU/1000) */
+	u32 dwell_time_delta_per_probe_5;
+
+	/* time to wait on the channel for passive scans (in TU/1000) */
 	u32 dwell_time_passive;
 
-	/* time to wait on the channel for DFS scans (in TUs) */
+	/* time to wait on the channel for DFS scans (in TU/1000) */
 	u32 dwell_time_dfs;
 
 	/* number of probe requests to send on each channel in active scans */
@@ -1118,26 +1104,6 @@
 	s8 snr_threshold;
 };
 
-/* these are number of channels on the band divided by two, rounded up */
-#define CONF_TX_PWR_COMPENSATION_LEN_2 7
-#define CONF_TX_PWR_COMPENSATION_LEN_5 18
-
-struct conf_rf_settings {
-	/*
-	 * Per channel power compensation for 2.4GHz
-	 *
-	 * Range: s8
-	 */
-	u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
-
-	/*
-	 * Per channel power compensation for 5GHz
-	 *
-	 * Range: s8
-	 */
-	u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
-};
-
 struct conf_ht_setting {
 	u8 rx_ba_win_size;
 	u8 tx_ba_win_size;
@@ -1286,7 +1252,7 @@
 	u8 window_size;
 };
 
-struct conf_drv_settings {
+struct wlcore_conf {
 	struct conf_sg_settings sg;
 	struct conf_rx_settings rx;
 	struct conf_tx_settings tx;
@@ -1296,16 +1262,13 @@
 	struct conf_roam_trigger_settings roam_trigger;
 	struct conf_scan_settings scan;
 	struct conf_sched_scan_settings sched_scan;
-	struct conf_rf_settings rf;
 	struct conf_ht_setting ht;
-	struct conf_memory_settings mem_wl127x;
-	struct conf_memory_settings mem_wl128x;
+	struct conf_memory_settings mem;
 	struct conf_fm_coex fm_coex;
 	struct conf_rx_streaming_settings rx_streaming;
 	struct conf_fwlog fwlog;
 	struct conf_rate_policy_settings rate;
 	struct conf_hangover_settings hangover;
-	u8 hci_io_ds;
 };
 
 #endif
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/ti/wlcore/debug.h
similarity index 98%
rename from drivers/net/wireless/wl12xx/debug.h
rename to drivers/net/wireless/ti/wlcore/debug.h
index ec0fdc2..6b800b3 100644
--- a/drivers/net/wireless/wl12xx/debug.h
+++ b/drivers/net/wireless/ti/wlcore/debug.h
@@ -52,6 +52,7 @@
 	DEBUG_ADHOC     = BIT(16),
 	DEBUG_AP	= BIT(17),
 	DEBUG_PROBE	= BIT(18),
+	DEBUG_IO	= BIT(19),
 	DEBUG_MASTER	= (DEBUG_ADHOC | DEBUG_AP),
 	DEBUG_ALL	= ~0,
 };
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
similarity index 99%
rename from drivers/net/wireless/wl12xx/debugfs.c
rename to drivers/net/wireless/ti/wlcore/debugfs.c
index 564d495..d5aea1f 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -26,7 +26,7 @@
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
 #include "acx.h"
 #include "ps.h"
@@ -647,6 +647,7 @@
 		VIF_STATE_PRINT_INT(last_rssi_event);
 		VIF_STATE_PRINT_INT(ba_support);
 		VIF_STATE_PRINT_INT(ba_allowed);
+		VIF_STATE_PRINT_INT(is_gem);
 		VIF_STATE_PRINT_LLHEX(tx_security_seq);
 		VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
 	}
diff --git a/drivers/net/wireless/wl12xx/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
similarity index 97%
rename from drivers/net/wireless/wl12xx/debugfs.h
rename to drivers/net/wireless/ti/wlcore/debugfs.h
index 254c5b2..a8d3aef 100644
--- a/drivers/net/wireless/wl12xx/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -24,7 +24,7 @@
 #ifndef __DEBUGFS_H__
 #define __DEBUGFS_H__
 
-#include "wl12xx.h"
+#include "wlcore.h"
 
 int wl1271_debugfs_init(struct wl1271 *wl);
 void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/ti/wlcore/event.c
similarity index 91%
rename from drivers/net/wireless/wl12xx/event.c
rename to drivers/net/wireless/ti/wlcore/event.c
index c953717..292632d 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -21,9 +21,8 @@
  *
  */
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
-#include "reg.h"
 #include "io.h"
 #include "event.h"
 #include "ps.h"
@@ -98,8 +97,9 @@
 	wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask);
 }
 
-static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
+static int wl1271_event_process(struct wl1271 *wl)
 {
+	struct event_mailbox *mbox = wl->mbox;
 	struct ieee80211_vif *vif;
 	struct wl12xx_vif *wlvif;
 	u32 vector;
@@ -196,7 +196,7 @@
 			bool success;
 
 			if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
-						&wl->flags))
+						&wlvif->flags))
 				continue;
 
 			success = mbox->channel_switch_status ? false : true;
@@ -278,18 +278,8 @@
 	return 0;
 }
 
-void wl1271_event_mbox_config(struct wl1271 *wl)
-{
-	wl->mbox_ptr[0] = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
-	wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
-
-	wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
-		     wl->mbox_ptr[0], wl->mbox_ptr[1]);
-}
-
 int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
 {
-	struct event_mailbox mbox;
 	int ret;
 
 	wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
@@ -298,16 +288,19 @@
 		return -EINVAL;
 
 	/* first we read the mbox descriptor */
-	wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox,
-		    sizeof(struct event_mailbox), false);
+	wl1271_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
+		    sizeof(*wl->mbox), false);
 
 	/* process the descriptor */
-	ret = wl1271_event_process(wl, &mbox);
+	ret = wl1271_event_process(wl);
 	if (ret < 0)
 		return ret;
 
-	/* then we let the firmware know it can go on...*/
-	wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
+	/*
+	 * TODO: we just need this because one bit is in a different
+	 * place.  Is there any better way?
+	 */
+	wl->ops->ack_event(wl);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/ti/wlcore/event.h
similarity index 98%
rename from drivers/net/wireless/wl12xx/event.h
rename to drivers/net/wireless/ti/wlcore/event.h
index 057d193..8adf18d 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/ti/wlcore/event.h
@@ -132,8 +132,9 @@
 	u8 reserved_8[9];
 } __packed;
 
+struct wl1271;
+
 int wl1271_event_unmask(struct wl1271 *wl);
-void wl1271_event_mbox_config(struct wl1271 *wl);
 int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
 
 #endif
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
new file mode 100644
index 0000000..9384b4d
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -0,0 +1,122 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WLCORE_HW_OPS_H__
+#define __WLCORE_HW_OPS_H__
+
+#include "wlcore.h"
+#include "rx.h"
+
+static inline u32
+wlcore_hw_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
+{
+	if (!wl->ops->calc_tx_blocks)
+		BUG_ON(1);
+
+	return wl->ops->calc_tx_blocks(wl, len, spare_blks);
+}
+
+static inline void
+wlcore_hw_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
+			     u32 blks, u32 spare_blks)
+{
+	if (!wl->ops->set_tx_desc_blocks)
+		BUG_ON(1);
+
+	return wl->ops->set_tx_desc_blocks(wl, desc, blks, spare_blks);
+}
+
+static inline void
+wlcore_hw_set_tx_desc_data_len(struct wl1271 *wl,
+			       struct wl1271_tx_hw_descr *desc,
+			       struct sk_buff *skb)
+{
+	if (!wl->ops->set_tx_desc_data_len)
+		BUG_ON(1);
+
+	wl->ops->set_tx_desc_data_len(wl, desc, skb);
+}
+
+static inline enum wl_rx_buf_align
+wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
+{
+
+	if (!wl->ops->get_rx_buf_align)
+		BUG_ON(1);
+
+	return wl->ops->get_rx_buf_align(wl, rx_desc);
+}
+
+static inline void
+wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
+{
+	if (wl->ops->prepare_read)
+		wl->ops->prepare_read(wl, rx_desc, len);
+}
+
+static inline u32
+wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len)
+{
+	if (!wl->ops->get_rx_packet_len)
+		BUG_ON(1);
+
+	return wl->ops->get_rx_packet_len(wl, rx_data, data_len);
+}
+
+static inline void wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
+{
+	if (wl->ops->tx_delayed_compl)
+		wl->ops->tx_delayed_compl(wl);
+}
+
+static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl)
+{
+	if (wl->ops->tx_immediate_compl)
+		wl->ops->tx_immediate_compl(wl);
+}
+
+static inline int
+wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+	if (wl->ops->init_vif)
+		return wl->ops->init_vif(wl, wlvif);
+
+	return 0;
+}
+
+static inline u32
+wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+	if (!wl->ops->sta_get_ap_rate_mask)
+		BUG_ON(1);
+
+	return wl->ops->sta_get_ap_rate_mask(wl, wlvif);
+}
+
+static inline int wlcore_identify_fw(struct wl1271 *wl)
+{
+	if (wl->ops->identify_fw)
+		return wl->ops->identify_fw(wl);
+
+	return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/wl12xx/ini.h b/drivers/net/wireless/ti/wlcore/ini.h
similarity index 100%
rename from drivers/net/wireless/wl12xx/ini.h
rename to drivers/net/wireless/ti/wlcore/ini.h
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/ti/wlcore/init.c
similarity index 93%
rename from drivers/net/wireless/wl12xx/init.c
rename to drivers/net/wireless/ti/wlcore/init.c
index 203fbeb..9f89255 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -30,9 +30,9 @@
 #include "wl12xx_80211.h"
 #include "acx.h"
 #include "cmd.h"
-#include "reg.h"
 #include "tx.h"
 #include "io.h"
+#include "hw_ops.h"
 
 int wl1271_init_templates_config(struct wl1271 *wl)
 {
@@ -319,7 +319,7 @@
 {
 	int ret;
 
-	if (wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED)
+	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
 		return 0;
 
 	ret = wl12xx_cmd_config_fwlog(wl);
@@ -494,26 +494,6 @@
 	return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
 }
 
-int wl1271_chip_specific_init(struct wl1271 *wl)
-{
-	int ret = 0;
-
-	if (wl->chip.id == CHIP_ID_1283_PG20) {
-		u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
-
-		if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
-			/* Enable SDIO padding */
-			host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
-
-		/* Must be before wl1271_acx_init_mem_config() */
-		ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
-		if (ret < 0)
-			goto out;
-	}
-out:
-	return ret;
-}
-
 /* vif-specifc initialization */
 static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
@@ -582,10 +562,17 @@
 			if (ret < 0)
 				return ret;
 		} else if (!wl->sta_count) {
-			/* Configure for ELP power saving */
-			ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
-			if (ret < 0)
-				return ret;
+			if (wl->quirks & WLCORE_QUIRK_NO_ELP) {
+				/* Configure for power always on */
+				ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+				if (ret < 0)
+					return ret;
+			} else {
+				/* Configure for ELP power saving */
+				ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+				if (ret < 0)
+					return ret;
+			}
 		}
 	}
 
@@ -652,6 +639,10 @@
 	if (ret < 0)
 		return ret;
 
+	ret = wlcore_hw_init_vif(wl, wlvif);
+	if (ret < 0)
+		return ret;
+
 	return 0;
 }
 
@@ -659,27 +650,8 @@
 {
 	int ret;
 
-	if (wl->chip.id == CHIP_ID_1283_PG20) {
-		ret = wl128x_cmd_general_parms(wl);
-		if (ret < 0)
-			return ret;
-		ret = wl128x_cmd_radio_parms(wl);
-		if (ret < 0)
-			return ret;
-	} else {
-		ret = wl1271_cmd_general_parms(wl);
-		if (ret < 0)
-			return ret;
-		ret = wl1271_cmd_radio_parms(wl);
-		if (ret < 0)
-			return ret;
-		ret = wl1271_cmd_ext_radio_parms(wl);
-		if (ret < 0)
-			return ret;
-	}
-
-	/* Chip-specific init */
-	ret = wl1271_chip_specific_init(wl);
+	/* Chip-specific hw init */
+	ret = wl->ops->hw_init(wl);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/ti/wlcore/init.h
similarity index 98%
rename from drivers/net/wireless/wl12xx/init.h
rename to drivers/net/wireless/ti/wlcore/init.h
index 2da0f40..a45fbfdd 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/ti/wlcore/init.h
@@ -24,7 +24,7 @@
 #ifndef __INIT_H__
 #define __INIT_H__
 
-#include "wl12xx.h"
+#include "wlcore.h"
 
 int wl1271_hw_init_power_auth(struct wl1271 *wl);
 int wl1271_init_templates_config(struct wl1271 *wl);
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
new file mode 100644
index 0000000..7cd0081
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -0,0 +1,173 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+
+#include "wlcore.h"
+#include "debug.h"
+#include "wl12xx_80211.h"
+#include "io.h"
+#include "tx.h"
+
+bool wl1271_set_block_size(struct wl1271 *wl)
+{
+	if (wl->if_ops->set_block_size) {
+		wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
+		return true;
+	}
+
+	return false;
+}
+
+void wlcore_disable_interrupts(struct wl1271 *wl)
+{
+	disable_irq(wl->irq);
+}
+EXPORT_SYMBOL_GPL(wlcore_disable_interrupts);
+
+void wlcore_enable_interrupts(struct wl1271 *wl)
+{
+	enable_irq(wl->irq);
+}
+EXPORT_SYMBOL_GPL(wlcore_enable_interrupts);
+
+int wlcore_translate_addr(struct wl1271 *wl, int addr)
+{
+	struct wlcore_partition_set *part = &wl->curr_part;
+
+	/*
+	 * To translate, first check to which window of addresses the
+	 * particular address belongs. Then subtract the starting address
+	 * of that window from the address. Then, add offset of the
+	 * translated region.
+	 *
+	 * The translated regions occur next to each other in physical device
+	 * memory, so just add the sizes of the preceding address regions to
+	 * get the offset to the new region.
+	 */
+	if ((addr >= part->mem.start) &&
+	    (addr < part->mem.start + part->mem.size))
+		return addr - part->mem.start;
+	else if ((addr >= part->reg.start) &&
+		 (addr < part->reg.start + part->reg.size))
+		return addr - part->reg.start + part->mem.size;
+	else if ((addr >= part->mem2.start) &&
+		 (addr < part->mem2.start + part->mem2.size))
+		return addr - part->mem2.start + part->mem.size +
+			part->reg.size;
+	else if ((addr >= part->mem3.start) &&
+		 (addr < part->mem3.start + part->mem3.size))
+		return addr - part->mem3.start + part->mem.size +
+			part->reg.size + part->mem2.size;
+
+	WARN(1, "HW address 0x%x out of range", addr);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wlcore_translate_addr);
+
+/* Set the partitions to access the chip addresses
+ *
+ * To simplify driver code, a fixed (virtual) memory map is defined for
+ * register and memory addresses. Because in the chipset, in different stages
+ * of operation, those addresses will move around, an address translation
+ * mechanism is required.
+ *
+ * There are four partitions (three memory and one register partition),
+ * which are mapped to two different areas of the hardware memory.
+ *
+ *                                Virtual address
+ *                                     space
+ *
+ *                                    |    |
+ *                                 ...+----+--> mem.start
+ *          Physical address    ...   |    |
+ *               space       ...      |    | [PART_0]
+ *                        ...         |    |
+ *  00000000  <--+----+...         ...+----+--> mem.start + mem.size
+ *               |    |         ...   |    |
+ *               |MEM |      ...      |    |
+ *               |    |   ...         |    |
+ *  mem.size  <--+----+...            |    | {unused area)
+ *               |    |   ...         |    |
+ *               |REG |      ...      |    |
+ *  mem.size     |    |         ...   |    |
+ *      +     <--+----+...         ...+----+--> reg.start
+ *  reg.size     |    |   ...         |    |
+ *               |MEM2|      ...      |    | [PART_1]
+ *               |    |         ...   |    |
+ *                                 ...+----+--> reg.start + reg.size
+ *                                    |    |
+ *
+ */
+void wlcore_set_partition(struct wl1271 *wl,
+			  const struct wlcore_partition_set *p)
+{
+	/* copy partition info */
+	memcpy(&wl->curr_part, p, sizeof(*p));
+
+	wl1271_debug(DEBUG_IO, "mem_start %08X mem_size %08X",
+		     p->mem.start, p->mem.size);
+	wl1271_debug(DEBUG_IO, "reg_start %08X reg_size %08X",
+		     p->reg.start, p->reg.size);
+	wl1271_debug(DEBUG_IO, "mem2_start %08X mem2_size %08X",
+		     p->mem2.start, p->mem2.size);
+	wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X",
+		     p->mem3.start, p->mem3.size);
+
+	wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
+	wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
+	wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
+	wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
+	wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
+	wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+	/*
+	 * We don't need the size of the last partition, as it is
+	 * automatically calculated based on the total memory size and
+	 * the sizes of the previous partitions.
+	 */
+	wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
+}
+EXPORT_SYMBOL_GPL(wlcore_set_partition);
+
+void wlcore_select_partition(struct wl1271 *wl, u8 part)
+{
+	wl1271_debug(DEBUG_IO, "setting partition %d", part);
+
+	wlcore_set_partition(wl, &wl->ptable[part]);
+}
+EXPORT_SYMBOL_GPL(wlcore_select_partition);
+
+void wl1271_io_reset(struct wl1271 *wl)
+{
+	if (wl->if_ops->reset)
+		wl->if_ops->reset(wl->dev);
+}
+
+void wl1271_io_init(struct wl1271 *wl)
+{
+	if (wl->if_ops->init)
+		wl->if_ops->init(wl->dev);
+}
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/ti/wlcore/io.h
similarity index 70%
rename from drivers/net/wireless/wl12xx/io.h
rename to drivers/net/wireless/ti/wlcore/io.h
index 4fb3dab..8942954 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -26,7 +26,6 @@
 #define __IO_H__
 
 #include <linux/irqreturn.h>
-#include "reg.h"
 
 #define HW_ACCESS_MEMORY_MAX_RANGE	0x1FFC0
 
@@ -43,15 +42,14 @@
 
 #define HW_ACCESS_PRAM_MAX_RANGE	0x3c000
 
-extern struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN];
-
 struct wl1271;
 
-void wl1271_disable_interrupts(struct wl1271 *wl);
-void wl1271_enable_interrupts(struct wl1271 *wl);
+void wlcore_disable_interrupts(struct wl1271 *wl);
+void wlcore_enable_interrupts(struct wl1271 *wl);
 
 void wl1271_io_reset(struct wl1271 *wl);
 void wl1271_io_init(struct wl1271 *wl);
+int wlcore_translate_addr(struct wl1271 *wl, int addr);
 
 /* Raw target IO, address is not translated */
 static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
@@ -66,6 +64,18 @@
 	wl->if_ops->read(wl->dev, addr, buf, len, fixed);
 }
 
+static inline void wlcore_raw_read_data(struct wl1271 *wl, int reg, void *buf,
+					size_t len, bool fixed)
+{
+	wl1271_raw_read(wl, wl->rtable[reg], buf, len, fixed);
+}
+
+static inline void wlcore_raw_write_data(struct wl1271 *wl, int reg, void *buf,
+					 size_t len, bool fixed)
+{
+	wl1271_raw_write(wl, wl->rtable[reg], buf, len, fixed);
+}
+
 static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
 {
 	wl1271_raw_read(wl, addr, &wl->buffer_32,
@@ -81,36 +91,12 @@
 			     sizeof(wl->buffer_32), false);
 }
 
-/* Translated target IO */
-static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
-{
-	/*
-	 * To translate, first check to which window of addresses the
-	 * particular address belongs. Then subtract the starting address
-	 * of that window from the address. Then, add offset of the
-	 * translated region.
-	 *
-	 * The translated regions occur next to each other in physical device
-	 * memory, so just add the sizes of the preceding address regions to
-	 * get the offset to the new region.
-	 *
-	 * Currently, only the two first regions are addressed, and the
-	 * assumption is that all addresses will fall into either of those
-	 * two.
-	 */
-	if ((addr >= wl->part.reg.start) &&
-	    (addr < wl->part.reg.start + wl->part.reg.size))
-		return addr - wl->part.reg.start + wl->part.mem.size;
-	else
-		return addr - wl->part.mem.start;
-}
-
 static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
 			       size_t len, bool fixed)
 {
 	int physical;
 
-	physical = wl1271_translate_addr(wl, addr);
+	physical = wlcore_translate_addr(wl, addr);
 
 	wl1271_raw_read(wl, physical, buf, len, fixed);
 }
@@ -120,11 +106,23 @@
 {
 	int physical;
 
-	physical = wl1271_translate_addr(wl, addr);
+	physical = wlcore_translate_addr(wl, addr);
 
 	wl1271_raw_write(wl, physical, buf, len, fixed);
 }
 
+static inline void wlcore_write_data(struct wl1271 *wl, int reg, void *buf,
+				     size_t len, bool fixed)
+{
+	wl1271_write(wl, wl->rtable[reg], buf, len, fixed);
+}
+
+static inline void wlcore_read_data(struct wl1271 *wl, int reg, void *buf,
+				    size_t len, bool fixed)
+{
+	wl1271_read(wl, wl->rtable[reg], buf, len, fixed);
+}
+
 static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
 				      void *buf, size_t len, bool fixed)
 {
@@ -134,19 +132,30 @@
 	/* Addresses are stored internally as addresses to 32 bytes blocks */
 	addr = hwaddr << 5;
 
-	physical = wl1271_translate_addr(wl, addr);
+	physical = wlcore_translate_addr(wl, addr);
 
 	wl1271_raw_read(wl, physical, buf, len, fixed);
 }
 
 static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
 {
-	return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
+	return wl1271_raw_read32(wl, wlcore_translate_addr(wl, addr));
 }
 
 static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
 {
-	wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+	wl1271_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
+}
+
+static inline u32 wlcore_read_reg(struct wl1271 *wl, int reg)
+{
+	return wl1271_raw_read32(wl,
+				 wlcore_translate_addr(wl, wl->rtable[reg]));
+}
+
+static inline void wlcore_write_reg(struct wl1271 *wl, int reg, u32 val)
+{
+	wl1271_raw_write32(wl, wlcore_translate_addr(wl, wl->rtable[reg]), val);
 }
 
 static inline void wl1271_power_off(struct wl1271 *wl)
@@ -164,13 +173,8 @@
 	return ret;
 }
 
-
-/* Top Register IO */
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
-
-int wl1271_set_partition(struct wl1271 *wl,
-			 struct wl1271_partition_set *p);
+void wlcore_set_partition(struct wl1271 *wl,
+			  const struct wlcore_partition_set *p);
 
 bool wl1271_set_block_size(struct wl1271 *wl);
 
@@ -178,4 +182,6 @@
 
 int wl1271_tx_dummy_packet(struct wl1271 *wl);
 
+void wlcore_select_partition(struct wl1271 *wl, u8 part);
+
 #endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/ti/wlcore/main.c
similarity index 85%
rename from drivers/net/wireless/wl12xx/main.c
rename to drivers/net/wireless/ti/wlcore/main.c
index 3900236..2b0f987 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -35,10 +35,9 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
 #include "wl12xx_80211.h"
-#include "reg.h"
 #include "io.h"
 #include "event.h"
 #include "tx.h"
@@ -50,342 +49,15 @@
 #include "boot.h"
 #include "testmode.h"
 #include "scan.h"
+#include "hw_ops.h"
 
 #define WL1271_BOOT_RETRIES 3
 
-static struct conf_drv_settings default_conf = {
-	.sg = {
-		.params = {
-			[CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
-			[CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
-			[CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
-			[CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
-			[CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
-			[CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
-			[CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
-			[CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
-			[CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
-			[CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
-			[CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
-			[CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
-			[CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
-			[CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
-			[CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
-			[CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
-			[CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
-			[CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
-			[CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
-			[CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
-			[CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
-			[CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
-			[CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
-			[CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
-			[CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
-			[CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
-			/* active scan params */
-			[CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
-			[CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
-			[CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
-			/* passive scan params */
-			[CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
-			[CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
-			[CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
-			/* passive scan in dual antenna params */
-			[CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
-			[CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
-			[CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
-			/* general params */
-			[CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
-			[CONF_SG_ANTENNA_CONFIGURATION] = 0,
-			[CONF_SG_BEACON_MISS_PERCENT] = 60,
-			[CONF_SG_DHCP_TIME] = 5000,
-			[CONF_SG_RXT] = 1200,
-			[CONF_SG_TXT] = 1000,
-			[CONF_SG_ADAPTIVE_RXT_TXT] = 1,
-			[CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
-			[CONF_SG_HV3_MAX_SERVED] = 6,
-			[CONF_SG_PS_POLL_TIMEOUT] = 10,
-			[CONF_SG_UPSD_TIMEOUT] = 10,
-			[CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
-			[CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
-			[CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
-			/* AP params */
-			[CONF_AP_BEACON_MISS_TX] = 3,
-			[CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
-			[CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
-			[CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
-			[CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
-			[CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
-			/* CTS Diluting params */
-			[CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
-			[CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
-		},
-		.state = CONF_SG_PROTECTIVE,
-	},
-	.rx = {
-		.rx_msdu_life_time           = 512000,
-		.packet_detection_threshold  = 0,
-		.ps_poll_timeout             = 15,
-		.upsd_timeout                = 15,
-		.rts_threshold               = IEEE80211_MAX_RTS_THRESHOLD,
-		.rx_cca_threshold            = 0,
-		.irq_blk_threshold           = 0xFFFF,
-		.irq_pkt_threshold           = 0,
-		.irq_timeout                 = 600,
-		.queue_type                  = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
-	},
-	.tx = {
-		.tx_energy_detection         = 0,
-		.sta_rc_conf                 = {
-			.enabled_rates       = 0,
-			.short_retry_limit   = 10,
-			.long_retry_limit    = 10,
-			.aflags              = 0,
-		},
-		.ac_conf_count               = 4,
-		.ac_conf                     = {
-			[CONF_TX_AC_BE] = {
-				.ac          = CONF_TX_AC_BE,
-				.cw_min      = 15,
-				.cw_max      = 63,
-				.aifsn       = 3,
-				.tx_op_limit = 0,
-			},
-			[CONF_TX_AC_BK] = {
-				.ac          = CONF_TX_AC_BK,
-				.cw_min      = 15,
-				.cw_max      = 63,
-				.aifsn       = 7,
-				.tx_op_limit = 0,
-			},
-			[CONF_TX_AC_VI] = {
-				.ac          = CONF_TX_AC_VI,
-				.cw_min      = 15,
-				.cw_max      = 63,
-				.aifsn       = CONF_TX_AIFS_PIFS,
-				.tx_op_limit = 3008,
-			},
-			[CONF_TX_AC_VO] = {
-				.ac          = CONF_TX_AC_VO,
-				.cw_min      = 15,
-				.cw_max      = 63,
-				.aifsn       = CONF_TX_AIFS_PIFS,
-				.tx_op_limit = 1504,
-			},
-		},
-		.max_tx_retries = 100,
-		.ap_aging_period = 300,
-		.tid_conf_count = 4,
-		.tid_conf = {
-			[CONF_TX_AC_BE] = {
-				.queue_id    = CONF_TX_AC_BE,
-				.channel_type = CONF_CHANNEL_TYPE_EDCF,
-				.tsid        = CONF_TX_AC_BE,
-				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
-				.ack_policy  = CONF_ACK_POLICY_LEGACY,
-				.apsd_conf   = {0, 0},
-			},
-			[CONF_TX_AC_BK] = {
-				.queue_id    = CONF_TX_AC_BK,
-				.channel_type = CONF_CHANNEL_TYPE_EDCF,
-				.tsid        = CONF_TX_AC_BK,
-				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
-				.ack_policy  = CONF_ACK_POLICY_LEGACY,
-				.apsd_conf   = {0, 0},
-			},
-			[CONF_TX_AC_VI] = {
-				.queue_id    = CONF_TX_AC_VI,
-				.channel_type = CONF_CHANNEL_TYPE_EDCF,
-				.tsid        = CONF_TX_AC_VI,
-				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
-				.ack_policy  = CONF_ACK_POLICY_LEGACY,
-				.apsd_conf   = {0, 0},
-			},
-			[CONF_TX_AC_VO] = {
-				.queue_id    = CONF_TX_AC_VO,
-				.channel_type = CONF_CHANNEL_TYPE_EDCF,
-				.tsid        = CONF_TX_AC_VO,
-				.ps_scheme   = CONF_PS_SCHEME_LEGACY,
-				.ack_policy  = CONF_ACK_POLICY_LEGACY,
-				.apsd_conf   = {0, 0},
-			},
-		},
-		.frag_threshold              = IEEE80211_MAX_FRAG_THRESHOLD,
-		.tx_compl_timeout            = 700,
-		.tx_compl_threshold          = 4,
-		.basic_rate                  = CONF_HW_BIT_RATE_1MBPS,
-		.basic_rate_5                = CONF_HW_BIT_RATE_6MBPS,
-		.tmpl_short_retry_limit      = 10,
-		.tmpl_long_retry_limit       = 10,
-		.tx_watchdog_timeout         = 5000,
-	},
-	.conn = {
-		.wake_up_event               = CONF_WAKE_UP_EVENT_DTIM,
-		.listen_interval             = 1,
-		.suspend_wake_up_event       = CONF_WAKE_UP_EVENT_N_DTIM,
-		.suspend_listen_interval     = 3,
-		.bcn_filt_mode               = CONF_BCN_FILT_MODE_ENABLED,
-		.bcn_filt_ie_count           = 2,
-		.bcn_filt_ie = {
-			[0] = {
-				.ie          = WLAN_EID_CHANNEL_SWITCH,
-				.rule        = CONF_BCN_RULE_PASS_ON_APPEARANCE,
-			},
-			[1] = {
-				.ie          = WLAN_EID_HT_INFORMATION,
-				.rule        = CONF_BCN_RULE_PASS_ON_CHANGE,
-			},
-		},
-		.synch_fail_thold            = 10,
-		.bss_lose_timeout            = 100,
-		.beacon_rx_timeout           = 10000,
-		.broadcast_timeout           = 20000,
-		.rx_broadcast_in_ps          = 1,
-		.ps_poll_threshold           = 10,
-		.bet_enable                  = CONF_BET_MODE_ENABLE,
-		.bet_max_consecutive         = 50,
-		.psm_entry_retries           = 8,
-		.psm_exit_retries            = 16,
-		.psm_entry_nullfunc_retries  = 3,
-		.dynamic_ps_timeout          = 200,
-		.forced_ps                   = false,
-		.keep_alive_interval         = 55000,
-		.max_listen_interval         = 20,
-	},
-	.itrim = {
-		.enable = false,
-		.timeout = 50000,
-	},
-	.pm_config = {
-		.host_clk_settling_time = 5000,
-		.host_fast_wakeup_support = false
-	},
-	.roam_trigger = {
-		.trigger_pacing               = 1,
-		.avg_weight_rssi_beacon       = 20,
-		.avg_weight_rssi_data         = 10,
-		.avg_weight_snr_beacon        = 20,
-		.avg_weight_snr_data          = 10,
-	},
-	.scan = {
-		.min_dwell_time_active        = 7500,
-		.max_dwell_time_active        = 30000,
-		.min_dwell_time_passive       = 100000,
-		.max_dwell_time_passive       = 100000,
-		.num_probe_reqs               = 2,
-		.split_scan_timeout           = 50000,
-	},
-	.sched_scan = {
-		/* sched_scan requires dwell times in TU instead of TU/1000 */
-		.min_dwell_time_active = 30,
-		.max_dwell_time_active = 60,
-		.dwell_time_passive    = 100,
-		.dwell_time_dfs        = 150,
-		.num_probe_reqs        = 2,
-		.rssi_threshold        = -90,
-		.snr_threshold         = 0,
-	},
-	.rf = {
-		.tx_per_channel_power_compensation_2 = {
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-		},
-		.tx_per_channel_power_compensation_5 = {
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-		},
-	},
-	.ht = {
-		.rx_ba_win_size = 8,
-		.tx_ba_win_size = 64,
-		.inactivity_timeout = 10000,
-		.tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
-	},
-	.mem_wl127x = {
-		.num_stations                 = 1,
-		.ssid_profiles                = 1,
-		.rx_block_num                 = 70,
-		.tx_min_block_num             = 40,
-		.dynamic_memory               = 1,
-		.min_req_tx_blocks            = 100,
-		.min_req_rx_blocks            = 22,
-		.tx_min                       = 27,
-	},
-	.mem_wl128x = {
-		.num_stations                 = 1,
-		.ssid_profiles                = 1,
-		.rx_block_num                 = 40,
-		.tx_min_block_num             = 40,
-		.dynamic_memory               = 1,
-		.min_req_tx_blocks            = 45,
-		.min_req_rx_blocks            = 22,
-		.tx_min                       = 27,
-	},
-	.fm_coex = {
-		.enable                       = true,
-		.swallow_period               = 5,
-		.n_divider_fref_set_1         = 0xff,       /* default */
-		.n_divider_fref_set_2         = 12,
-		.m_divider_fref_set_1         = 148,
-		.m_divider_fref_set_2         = 0xffff,     /* default */
-		.coex_pll_stabilization_time  = 0xffffffff, /* default */
-		.ldo_stabilization_time       = 0xffff,     /* default */
-		.fm_disturbed_band_margin     = 0xff,       /* default */
-		.swallow_clk_diff             = 0xff,       /* default */
-	},
-	.rx_streaming = {
-		.duration                      = 150,
-		.queues                        = 0x1,
-		.interval                      = 20,
-		.always                        = 0,
-	},
-	.fwlog = {
-		.mode                         = WL12XX_FWLOG_ON_DEMAND,
-		.mem_blocks                   = 2,
-		.severity                     = 0,
-		.timestamp                    = WL12XX_FWLOG_TIMESTAMP_DISABLED,
-		.output                       = WL12XX_FWLOG_OUTPUT_HOST,
-		.threshold                    = 0,
-	},
-	.hci_io_ds = HCI_IO_DS_6MA,
-	.rate = {
-		.rate_retry_score = 32000,
-		.per_add = 8192,
-		.per_th1 = 2048,
-		.per_th2 = 4096,
-		.max_per = 8100,
-		.inverse_curiosity_factor = 5,
-		.tx_fail_low_th = 4,
-		.tx_fail_high_th = 10,
-		.per_alpha_shift = 4,
-		.per_add_shift = 13,
-		.per_beta1_shift = 10,
-		.per_beta2_shift = 8,
-		.rate_check_up = 2,
-		.rate_check_down = 12,
-		.rate_retry_policy = {
-			0x00, 0x00, 0x00, 0x00, 0x00,
-			0x00, 0x00, 0x00, 0x00, 0x00,
-			0x00, 0x00, 0x00,
-		},
-	},
-	.hangover = {
-		.recover_time               = 0,
-		.hangover_period            = 20,
-		.dynamic_mode               = 1,
-		.early_termination_mode     = 1,
-		.max_period                 = 20,
-		.min_period                 = 1,
-		.increase_delta             = 1,
-		.decrease_delta             = 2,
-		.quiet_time                 = 4,
-		.increase_time              = 1,
-		.window_size                = 16,
-	},
-};
+#define WL1271_BOOT_RETRIES 3
 
 static char *fwlog_param;
 static bool bug_on_recovery;
+static bool no_recovery;
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl,
 					 struct ieee80211_vif *vif,
@@ -628,22 +300,8 @@
 	mutex_unlock(&wl->mutex);
 }
 
-static void wl1271_conf_init(struct wl1271 *wl)
+static void wlcore_adjust_conf(struct wl1271 *wl)
 {
-
-	/*
-	 * This function applies the default configuration to the driver. This
-	 * function is invoked upon driver load (spi probe.)
-	 *
-	 * The configuration is stored in a run-time structure in order to
-	 * facilitate for run-time adjustment of any of the parameters. Making
-	 * changes to the configuration structure will apply the new values on
-	 * the next interface up (wl1271_op_start.)
-	 */
-
-	/* apply driver default configuration */
-	memcpy(&wl->conf, &default_conf, sizeof(default_conf));
-
 	/* Adjust settings according to optional module parameters */
 	if (fwlog_param) {
 		if (!strcmp(fwlog_param, "continuous")) {
@@ -666,28 +324,7 @@
 {
 	int ret;
 
-	if (wl->chip.id == CHIP_ID_1283_PG20)
-		ret = wl128x_cmd_general_parms(wl);
-	else
-		ret = wl1271_cmd_general_parms(wl);
-	if (ret < 0)
-		return ret;
-
-	if (wl->chip.id == CHIP_ID_1283_PG20)
-		ret = wl128x_cmd_radio_parms(wl);
-	else
-		ret = wl1271_cmd_radio_parms(wl);
-	if (ret < 0)
-		return ret;
-
-	if (wl->chip.id != CHIP_ID_1283_PG20) {
-		ret = wl1271_cmd_ext_radio_parms(wl);
-		if (ret < 0)
-			return ret;
-	}
-
-	/* Chip-specific initializations */
-	ret = wl1271_chip_specific_init(wl);
+	ret = wl->ops->hw_init(wl);
 	if (ret < 0)
 		return ret;
 
@@ -750,7 +387,7 @@
 
 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
 					   struct wl12xx_vif *wlvif,
-					   struct wl12xx_fw_status *status)
+					   struct wl_fw_status *status)
 {
 	struct wl1271_link *lnk;
 	u32 cur_fw_ps_map;
@@ -770,9 +407,10 @@
 
 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
 		lnk = &wl->links[hlid];
-		cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
+		cnt = status->counters.tx_lnk_free_pkts[hlid] -
+			lnk->prev_freed_pkts;
 
-		lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
+		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
 		lnk->allocated_pkts -= cnt;
 
 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
@@ -781,15 +419,19 @@
 }
 
 static void wl12xx_fw_status(struct wl1271 *wl,
-			     struct wl12xx_fw_status *status)
+			     struct wl_fw_status *status)
 {
 	struct wl12xx_vif *wlvif;
 	struct timespec ts;
 	u32 old_tx_blk_count = wl->tx_blocks_available;
 	int avail, freed_blocks;
 	int i;
+	size_t status_len;
 
-	wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
+	status_len = sizeof(*status) + wl->fw_status_priv_len;
+
+	wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status,
+			     status_len, false);
 
 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
 		     "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -801,10 +443,10 @@
 	for (i = 0; i < NUM_TX_QUEUES; i++) {
 		/* prevent wrap-around in freed-packets counter */
 		wl->tx_allocated_pkts[i] -=
-				(status->tx_released_pkts[i] -
+				(status->counters.tx_released_pkts[i] -
 				wl->tx_pkts_freed[i]) & 0xff;
 
-		wl->tx_pkts_freed[i] = status->tx_released_pkts[i];
+		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
 	}
 
 	/* prevent wrap-around in total blocks counter */
@@ -927,6 +569,9 @@
 		smp_mb__after_clear_bit();
 
 		wl12xx_fw_status(wl, wl->fw_status);
+
+		wlcore_hw_tx_immediate_compl(wl);
+
 		intr = le32_to_cpu(wl->fw_status->intr);
 		intr &= WL1271_INTR_MASK;
 		if (!intr) {
@@ -963,9 +608,7 @@
 			}
 
 			/* check for tx results */
-			if (wl->fw_status->tx_results_counter !=
-			    (wl->tx_results_count & 0xff))
-				wl1271_tx_complete(wl);
+			wlcore_hw_tx_delayed_compl(wl);
 
 			/* Make sure the deferred queues don't get too long */
 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
@@ -1046,10 +689,7 @@
 
 	if (plt) {
 		fw_type = WL12XX_FW_TYPE_PLT;
-		if (wl->chip.id == CHIP_ID_1283_PG20)
-			fw_name = WL128X_PLT_FW_NAME;
-		else
-			fw_name	= WL127X_PLT_FW_NAME;
+		fw_name = wl->plt_fw_name;
 	} else {
 		/*
 		 * we can't call wl12xx_get_vif_count() here because
@@ -1057,16 +697,10 @@
 		 */
 		if (wl->last_vif_count > 1) {
 			fw_type = WL12XX_FW_TYPE_MULTI;
-			if (wl->chip.id == CHIP_ID_1283_PG20)
-				fw_name = WL128X_FW_NAME_MULTI;
-			else
-				fw_name = WL127X_FW_NAME_MULTI;
+			fw_name = wl->mr_fw_name;
 		} else {
 			fw_type = WL12XX_FW_TYPE_NORMAL;
-			if (wl->chip.id == CHIP_ID_1283_PG20)
-				fw_name = WL128X_FW_NAME_SINGLE;
-			else
-				fw_name = WL127X_FW_NAME_SINGLE;
+			fw_name = wl->sr_fw_name;
 		}
 	}
 
@@ -1173,7 +807,7 @@
 	u32 first_addr;
 	u8 *block;
 
-	if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
+	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
 	    (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
 	    (wl->conf.fwlog.mem_blocks == 0))
 		return;
@@ -1239,11 +873,20 @@
 	wl12xx_read_fwlog_panic(wl);
 
 	wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
-		    wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
+		    wl->chip.fw_ver_str,
+		    wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
 
 	BUG_ON(bug_on_recovery &&
 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
 
+	if (no_recovery) {
+		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
+		clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
+		goto out_unlock;
+	}
+
+	BUG_ON(bug_on_recovery);
+
 	/*
 	 * Advance security sequence number to overcome potential progress
 	 * in the firmware during recovery. This doens't hurt if the network is
@@ -1290,10 +933,7 @@
 
 static void wl1271_fw_wakeup(struct wl1271 *wl)
 {
-	u32 elp_reg;
-
-	elp_reg = ELPCTRL_WAKE_UP;
-	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
 }
 
 static int wl1271_setup(struct wl1271 *wl)
@@ -1323,7 +963,7 @@
 	wl1271_io_reset(wl);
 	wl1271_io_init(wl);
 
-	wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
+	wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
 
 	/* ELP module wake up */
 	wl1271_fw_wakeup(wl);
@@ -1348,44 +988,18 @@
 	 * negligible, we use the same block size for all different
 	 * chip types.
 	 */
-	if (!wl1271_set_block_size(wl))
-		wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
+	if (wl1271_set_block_size(wl))
+		wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
 
-	switch (wl->chip.id) {
-	case CHIP_ID_1271_PG10:
-		wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
-			       wl->chip.id);
-
-		ret = wl1271_setup(wl);
-		if (ret < 0)
-			goto out;
-		wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
-		break;
-
-	case CHIP_ID_1271_PG20:
-		wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
-			     wl->chip.id);
-
-		ret = wl1271_setup(wl);
-		if (ret < 0)
-			goto out;
-		wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
-		break;
-
-	case CHIP_ID_1283_PG20:
-		wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
-			     wl->chip.id);
-
-		ret = wl1271_setup(wl);
-		if (ret < 0)
-			goto out;
-		break;
-	case CHIP_ID_1283_PG10:
-	default:
-		wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
-		ret = -ENODEV;
+	ret = wl->ops->identify_chip(wl);
+	if (ret < 0)
 		goto out;
-	}
+
+	/* TODO: make sure the lower driver has set things up correctly */
+
+	ret = wl1271_setup(wl);
+	if (ret < 0)
+		goto out;
 
 	ret = wl12xx_fetch_firmware(wl, plt);
 	if (ret < 0)
@@ -1425,7 +1039,7 @@
 		if (ret < 0)
 			goto power_off;
 
-		ret = wl1271_boot(wl);
+		ret = wl->ops->boot(wl);
 		if (ret < 0)
 			goto power_off;
 
@@ -1454,7 +1068,7 @@
 		   work function will not do anything.) Also, any other
 		   possible concurrent operations will fail due to the
 		   current state, hence the wl1271 struct should be safe. */
-		wl1271_disable_interrupts(wl);
+		wlcore_disable_interrupts(wl);
 		wl1271_flush_deferred_work(wl);
 		cancel_work_sync(&wl->netstack_work);
 		mutex_lock(&wl->mutex);
@@ -1481,7 +1095,7 @@
 	 * Otherwise, the interrupt handler might be called and exit without
 	 * reading the interrupt status.
 	 */
-	wl1271_disable_interrupts(wl);
+	wlcore_disable_interrupts(wl);
 	mutex_lock(&wl->mutex);
 	if (!wl->plt) {
 		mutex_unlock(&wl->mutex);
@@ -1491,7 +1105,7 @@
 		 * may have been disabled when op_stop was called. It will,
 		 * however, balance the above call to disable_interrupts().
 		 */
-		wl1271_enable_interrupts(wl);
+		wlcore_enable_interrupts(wl);
 
 		wl1271_error("cannot power down because not in PLT "
 			     "state: %d", wl->state);
@@ -1652,14 +1266,12 @@
 {
 	int ret = 0;
 
-	mutex_lock(&wl->mutex);
-
 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
-		goto out_unlock;
+		goto out;
 
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
-		goto out_unlock;
+		goto out;
 
 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
 				    wl->conf.conn.suspend_wake_up_event,
@@ -1668,11 +1280,9 @@
 	if (ret < 0)
 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
 
-
 	wl1271_ps_elp_sleep(wl);
 
-out_unlock:
-	mutex_unlock(&wl->mutex);
+out:
 	return ret;
 
 }
@@ -1682,20 +1292,17 @@
 {
 	int ret = 0;
 
-	mutex_lock(&wl->mutex);
-
 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
-		goto out_unlock;
+		goto out;
 
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
-		goto out_unlock;
+		goto out;
 
 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
 
 	wl1271_ps_elp_sleep(wl);
-out_unlock:
-	mutex_unlock(&wl->mutex);
+out:
 	return ret;
 
 }
@@ -1720,10 +1327,9 @@
 	if ((!is_ap) && (!is_sta))
 		return;
 
-	mutex_lock(&wl->mutex);
 	ret = wl1271_ps_elp_wakeup(wl);
 	if (ret < 0)
-		goto out;
+		return;
 
 	if (is_sta) {
 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
@@ -1739,8 +1345,6 @@
 	}
 
 	wl1271_ps_elp_sleep(wl);
-out:
-	mutex_unlock(&wl->mutex);
 }
 
 static int wl1271_op_suspend(struct ieee80211_hw *hw,
@@ -1755,6 +1359,7 @@
 
 	wl1271_tx_flush(wl);
 
+	mutex_lock(&wl->mutex);
 	wl->wow_enabled = true;
 	wl12xx_for_each_wlvif(wl, wlvif) {
 		ret = wl1271_configure_suspend(wl, wlvif);
@@ -1763,6 +1368,7 @@
 			return ret;
 		}
 	}
+	mutex_unlock(&wl->mutex);
 	/* flush any remaining work */
 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
 
@@ -1770,7 +1376,7 @@
 	 * disable and re-enable interrupts in order to flush
 	 * the threaded_irq
 	 */
-	wl1271_disable_interrupts(wl);
+	wlcore_disable_interrupts(wl);
 
 	/*
 	 * set suspended flag to avoid triggering a new threaded_irq
@@ -1778,7 +1384,7 @@
 	 */
 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
 
-	wl1271_enable_interrupts(wl);
+	wlcore_enable_interrupts(wl);
 	flush_work(&wl->tx_work);
 	flush_delayed_work(&wl->elp_work);
 
@@ -1810,12 +1416,15 @@
 		wl1271_debug(DEBUG_MAC80211,
 			     "run postponed irq_work directly");
 		wl1271_irq(0, wl);
-		wl1271_enable_interrupts(wl);
+		wlcore_enable_interrupts(wl);
 	}
+
+	mutex_lock(&wl->mutex);
 	wl12xx_for_each_wlvif(wl, wlvif) {
 		wl1271_configure_resume(wl, wlvif);
 	}
 	wl->wow_enabled = false;
+	mutex_unlock(&wl->mutex);
 
 	return 0;
 }
@@ -1851,7 +1460,7 @@
 	 * Otherwise, the interrupt handler might be called and exit without
 	 * reading the interrupt status.
 	 */
-	wl1271_disable_interrupts(wl);
+	wlcore_disable_interrupts(wl);
 	mutex_lock(&wl->mutex);
 	if (wl->state == WL1271_STATE_OFF) {
 		mutex_unlock(&wl->mutex);
@@ -1861,7 +1470,7 @@
 		 * may have been disabled when op_stop was called. It will,
 		 * however, balance the above call to disable_interrupts().
 		 */
-		wl1271_enable_interrupts(wl);
+		wlcore_enable_interrupts(wl);
 		return;
 	}
 
@@ -1894,7 +1503,6 @@
 	wl->tx_results_count = 0;
 	wl->tx_packets_count = 0;
 	wl->time_offset = 0;
-	wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
 	wl->ap_fw_ps_map = 0;
 	wl->ap_ps_map = 0;
 	wl->sched_scanning = false;
@@ -2067,7 +1675,7 @@
 		if (ret < 0)
 			goto power_off;
 
-		ret = wl1271_boot(wl);
+		ret = wl->ops->boot(wl);
 		if (ret < 0)
 			goto power_off;
 
@@ -2087,7 +1695,7 @@
 		   work function will not do anything.) Also, any other
 		   possible concurrent operations will fail due to the
 		   current state, hence the wl1271 struct should be safe. */
-		wl1271_disable_interrupts(wl);
+		wlcore_disable_interrupts(wl);
 		wl1271_flush_deferred_work(wl);
 		cancel_work_sync(&wl->netstack_work);
 		mutex_lock(&wl->mutex);
@@ -2360,10 +1968,12 @@
 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
 			wl12xx_free_rate_policy(wl,
 						&wlvif->ap.ucast_rate_idx[i]);
+		wl1271_free_ap_keys(wl, wlvif);
 	}
 
+	dev_kfree_skb(wlvif->probereq);
+	wlvif->probereq = NULL;
 	wl12xx_tx_reset_wlvif(wl, wlvif);
-	wl1271_free_ap_keys(wl, wlvif);
 	if (wl->last_wlvif == wlvif)
 		wl->last_wlvif = NULL;
 	list_del(&wlvif->list);
@@ -2946,6 +2556,17 @@
 	int ret;
 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 
+	/*
+	 * A role set to GEM cipher requires different Tx settings (namely
+	 * spare blocks). Note when we are in this mode so the HW can adjust.
+	 */
+	if (key_type == KEY_GEM) {
+		if (action == KEY_ADD_OR_REPLACE)
+			wlvif->is_gem = true;
+		else if (action == KEY_REMOVE)
+			wlvif->is_gem = false;
+	}
+
 	if (is_ap) {
 		struct wl1271_station *wl_sta;
 		u8 hlid;
@@ -2984,17 +2605,6 @@
 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 		};
 
-		/*
-		 * A STA set to GEM cipher requires 2 tx spare blocks.
-		 * Return to default value when GEM cipher key is removed
-		 */
-		if (key_type == KEY_GEM) {
-			if (action == KEY_ADD_OR_REPLACE)
-				wl->tx_spare_blocks = 2;
-			else if (action == KEY_REMOVE)
-				wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
-		}
-
 		addr = sta ? sta->addr : bcast_addr;
 
 		if (is_zero_ether_addr(addr)) {
@@ -3791,8 +3401,7 @@
 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
 	}
 
-	if (changed & BSS_CHANGED_BSSID &&
-	    (is_ibss || bss_conf->assoc))
+	if (changed & BSS_CHANGED_BSSID)
 		if (!is_zero_ether_addr(bss_conf->bssid)) {
 			ret = wl12xx_cmd_build_null_data(wl, wlvif);
 			if (ret < 0)
@@ -3801,9 +3410,6 @@
 			ret = wl1271_build_qos_null_data(wl, vif);
 			if (ret < 0)
 				goto out;
-
-			/* Need to update the BSSID (for filtering etc) */
-			do_join = true;
 		}
 
 	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
@@ -3830,6 +3436,7 @@
 			int ieoffset;
 			wlvif->aid = bss_conf->aid;
 			wlvif->beacon_int = bss_conf->beacon_int;
+			do_join = true;
 			set_assoc = true;
 
 			/*
@@ -4662,60 +4269,12 @@
 	{ .hw_value = 14, .center_freq = 2484, .max_power = 25 },
 };
 
-/* mapping to indexes for wl1271_rates */
-static const u8 wl1271_rate_to_idx_2ghz[] = {
-	/* MCS rates are used only with 11n */
-	7,                            /* CONF_HW_RXTX_RATE_MCS7_SGI */
-	7,                            /* CONF_HW_RXTX_RATE_MCS7 */
-	6,                            /* CONF_HW_RXTX_RATE_MCS6 */
-	5,                            /* CONF_HW_RXTX_RATE_MCS5 */
-	4,                            /* CONF_HW_RXTX_RATE_MCS4 */
-	3,                            /* CONF_HW_RXTX_RATE_MCS3 */
-	2,                            /* CONF_HW_RXTX_RATE_MCS2 */
-	1,                            /* CONF_HW_RXTX_RATE_MCS1 */
-	0,                            /* CONF_HW_RXTX_RATE_MCS0 */
-
-	11,                            /* CONF_HW_RXTX_RATE_54   */
-	10,                            /* CONF_HW_RXTX_RATE_48   */
-	9,                             /* CONF_HW_RXTX_RATE_36   */
-	8,                             /* CONF_HW_RXTX_RATE_24   */
-
-	/* TI-specific rate */
-	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22   */
-
-	7,                             /* CONF_HW_RXTX_RATE_18   */
-	6,                             /* CONF_HW_RXTX_RATE_12   */
-	3,                             /* CONF_HW_RXTX_RATE_11   */
-	5,                             /* CONF_HW_RXTX_RATE_9    */
-	4,                             /* CONF_HW_RXTX_RATE_6    */
-	2,                             /* CONF_HW_RXTX_RATE_5_5  */
-	1,                             /* CONF_HW_RXTX_RATE_2    */
-	0                              /* CONF_HW_RXTX_RATE_1    */
-};
-
-/* 11n STA capabilities */
-#define HW_RX_HIGHEST_RATE	72
-
-#define WL12XX_HT_CAP { \
-	.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \
-	       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \
-	.ht_supported = true, \
-	.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
-	.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
-	.mcs = { \
-		.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
-		.rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \
-		.tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
-		}, \
-}
-
 /* can't be const, mac80211 writes to this */
 static struct ieee80211_supported_band wl1271_band_2ghz = {
 	.channels = wl1271_channels,
 	.n_channels = ARRAY_SIZE(wl1271_channels),
 	.bitrates = wl1271_rates,
 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
-	.ht_cap	= WL12XX_HT_CAP,
 };
 
 /* 5 GHz data rates for WL1273 */
@@ -4784,48 +4343,11 @@
 	{ .hw_value = 165, .center_freq = 5825, .max_power = 25 },
 };
 
-/* mapping to indexes for wl1271_rates_5ghz */
-static const u8 wl1271_rate_to_idx_5ghz[] = {
-	/* MCS rates are used only with 11n */
-	7,                            /* CONF_HW_RXTX_RATE_MCS7_SGI */
-	7,                            /* CONF_HW_RXTX_RATE_MCS7 */
-	6,                            /* CONF_HW_RXTX_RATE_MCS6 */
-	5,                            /* CONF_HW_RXTX_RATE_MCS5 */
-	4,                            /* CONF_HW_RXTX_RATE_MCS4 */
-	3,                            /* CONF_HW_RXTX_RATE_MCS3 */
-	2,                            /* CONF_HW_RXTX_RATE_MCS2 */
-	1,                            /* CONF_HW_RXTX_RATE_MCS1 */
-	0,                            /* CONF_HW_RXTX_RATE_MCS0 */
-
-	7,                             /* CONF_HW_RXTX_RATE_54   */
-	6,                             /* CONF_HW_RXTX_RATE_48   */
-	5,                             /* CONF_HW_RXTX_RATE_36   */
-	4,                             /* CONF_HW_RXTX_RATE_24   */
-
-	/* TI-specific rate */
-	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22   */
-
-	3,                             /* CONF_HW_RXTX_RATE_18   */
-	2,                             /* CONF_HW_RXTX_RATE_12   */
-	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11   */
-	1,                             /* CONF_HW_RXTX_RATE_9    */
-	0,                             /* CONF_HW_RXTX_RATE_6    */
-	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5  */
-	CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2    */
-	CONF_HW_RXTX_RATE_UNSUPPORTED  /* CONF_HW_RXTX_RATE_1    */
-};
-
 static struct ieee80211_supported_band wl1271_band_5ghz = {
 	.channels = wl1271_channels_5ghz,
 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
 	.bitrates = wl1271_rates_5ghz,
 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
-	.ht_cap	= WL12XX_HT_CAP,
-};
-
-static const u8 *wl1271_band_rate_to_idx[] = {
-	[IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
-	[IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
 };
 
 static const struct ieee80211_ops wl1271_ops = {
@@ -4862,18 +4384,18 @@
 };
 
 
-u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band)
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
 {
 	u8 idx;
 
-	BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
+	BUG_ON(band >= 2);
 
-	if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
+	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
 		wl1271_error("Illegal RX rate from HW: %d", rate);
 		return 0;
 	}
 
-	idx = wl1271_band_rate_to_idx[band][rate];
+	idx = wl->band_rate_to_idx[band][rate];
 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
 		wl1271_error("Unsupported RX rate from HW: %d", rate);
 		return 0;
@@ -5027,34 +4549,6 @@
 	.read = wl1271_sysfs_read_fwlog,
 };
 
-static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
-{
-	bool supported = false;
-	u8 major, minor;
-
-	if (wl->chip.id == CHIP_ID_1283_PG20) {
-		major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
-		minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
-
-		/* in wl128x we have the MAC address if the PG is >= (2, 1) */
-		if (major > 2 || (major == 2 && minor >= 1))
-			supported = true;
-	} else {
-		major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
-		minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
-
-		/* in wl127x we have the MAC address if the PG is >= (3, 1) */
-		if (major == 3 && minor >= 1)
-			supported = true;
-	}
-
-	wl1271_debug(DEBUG_PROBE,
-		     "PG Ver major = %d minor = %d, MAC %s present",
-		     major, minor, supported ? "is" : "is not");
-
-	return supported;
-}
-
 static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
 					u32 oui, u32 nic, int n)
 {
@@ -5080,47 +4574,23 @@
 	wl->hw->wiphy->addresses = wl->addresses;
 }
 
-static void wl12xx_get_fuse_mac(struct wl1271 *wl)
-{
-	u32 mac1, mac2;
-
-	wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
-
-	mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
-	mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
-
-	/* these are the two parts of the BD_ADDR */
-	wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
-		((mac1 & 0xff000000) >> 24);
-	wl->fuse_nic_addr = mac1 & 0xffffff;
-
-	wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
-}
-
 static int wl12xx_get_hw_info(struct wl1271 *wl)
 {
 	int ret;
-	u32 die_info;
 
 	ret = wl12xx_set_power_on(wl);
 	if (ret < 0)
 		goto out;
 
-	wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
+	wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
 
-	if (wl->chip.id == CHIP_ID_1283_PG20)
-		die_info = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
-	else
-		die_info = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
+	wl->fuse_oui_addr = 0;
+	wl->fuse_nic_addr = 0;
 
-	wl->hw_pg_ver = (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET;
+	wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
 
-	if (!wl12xx_mac_in_fuse(wl)) {
-		wl->fuse_oui_addr = 0;
-		wl->fuse_nic_addr = 0;
-	} else {
-		wl12xx_get_fuse_mac(wl);
-	}
+	if (wl->ops->get_mac)
+		wl->ops->get_mac(wl);
 
 	wl1271_power_off(wl);
 out:
@@ -5242,7 +4712,8 @@
 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
 		sizeof(struct ieee80211_header);
 
-	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
+				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
 	/* make sure all our channels fit in the scanned_ch bitmask */
 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
@@ -5254,8 +4725,12 @@
 	 */
 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
 	       sizeof(wl1271_band_2ghz));
+	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap,
+	       sizeof(wl->ht_cap));
 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
 	       sizeof(wl1271_band_5ghz));
+	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap,
+	       sizeof(wl->ht_cap));
 
 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
 		&wl->bands[IEEE80211_BAND_2GHZ];
@@ -5279,14 +4754,14 @@
 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
 
-	wl->hw->max_rx_aggregation_subframes = 8;
+	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
 
 	return 0;
 }
 
 #define WL1271_DEFAULT_CHANNEL 0
 
-static struct ieee80211_hw *wl1271_alloc_hw(void)
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
 {
 	struct ieee80211_hw *hw;
 	struct wl1271 *wl;
@@ -5305,6 +4780,13 @@
 	wl = hw->priv;
 	memset(wl, 0, sizeof(*wl));
 
+	wl->priv = kzalloc(priv_size, GFP_KERNEL);
+	if (!wl->priv) {
+		wl1271_error("could not alloc wl priv");
+		ret = -ENOMEM;
+		goto err_priv_alloc;
+	}
+
 	INIT_LIST_HEAD(&wl->wlvif_list);
 
 	wl->hw = hw;
@@ -5341,7 +4823,6 @@
 	wl->quirks = 0;
 	wl->platform_quirks = 0;
 	wl->sched_scanning = false;
-	wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
 	wl->system_hlid = WL12XX_SYSTEM_HLID;
 	wl->active_sta_count = 0;
 	wl->fwlog_size = 0;
@@ -5351,7 +4832,7 @@
 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
 
 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
-	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
+	for (i = 0; i < wl->num_tx_desc; i++)
 		wl->tx_frames[i] = NULL;
 
 	spin_lock_init(&wl->wl_lock);
@@ -5360,9 +4841,6 @@
 	wl->fw_type = WL12XX_FW_TYPE_NONE;
 	mutex_init(&wl->mutex);
 
-	/* Apply default driver configuration. */
-	wl1271_conf_init(wl);
-
 	order = get_order(WL1271_AGGR_BUFFER_SIZE);
 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
 	if (!wl->aggr_buf) {
@@ -5383,8 +4861,17 @@
 		goto err_dummy_packet;
 	}
 
+	wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_DMA);
+	if (!wl->mbox) {
+		ret = -ENOMEM;
+		goto err_fwlog;
+	}
+
 	return hw;
 
+err_fwlog:
+	free_page((unsigned long)wl->fwlog);
+
 err_dummy_packet:
 	dev_kfree_skb(wl->dummy_packet);
 
@@ -5396,14 +4883,18 @@
 
 err_hw:
 	wl1271_debugfs_exit(wl);
+	kfree(wl->priv);
+
+err_priv_alloc:
 	ieee80211_free_hw(hw);
 
 err_hw_alloc:
 
 	return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
 
-static int wl1271_free_hw(struct wl1271 *wl)
+int wlcore_free_hw(struct wl1271 *wl)
 {
 	/* Unblock any fwlog readers */
 	mutex_lock(&wl->mutex);
@@ -5433,10 +4924,12 @@
 	kfree(wl->tx_res_if);
 	destroy_workqueue(wl->freezable_wq);
 
+	kfree(wl->priv);
 	ieee80211_free_hw(wl->hw);
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(wlcore_free_hw);
 
 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
 {
@@ -5467,22 +4960,22 @@
 	return IRQ_WAKE_THREAD;
 }
 
-static int __devinit wl12xx_probe(struct platform_device *pdev)
+int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
 {
 	struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
-	struct ieee80211_hw *hw;
-	struct wl1271 *wl;
 	unsigned long irqflags;
-	int ret = -ENODEV;
+	int ret;
 
-	hw = wl1271_alloc_hw();
-	if (IS_ERR(hw)) {
-		wl1271_error("can't allocate hw");
-		ret = PTR_ERR(hw);
-		goto out;
+	if (!wl->ops || !wl->ptable) {
+		ret = -EINVAL;
+		goto out_free_hw;
 	}
 
-	wl = hw->priv;
+	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
+
+	/* adjust some runtime configuration parameters */
+	wlcore_adjust_conf(wl);
+
 	wl->irq = platform_get_irq(pdev, 0);
 	wl->ref_clock = pdata->board_ref_clock;
 	wl->tcxo_clock = pdata->board_tcxo_clock;
@@ -5511,7 +5004,7 @@
 		wl->irq_wake_enabled = true;
 		device_init_wakeup(wl->dev, 1);
 		if (pdata->pwr_in_suspend)
-			hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+			wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
 
 	}
 	disable_irq(wl->irq);
@@ -5545,7 +5038,7 @@
 		goto out_hw_pg_ver;
 	}
 
-	return 0;
+	goto out;
 
 out_hw_pg_ver:
 	device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
@@ -5557,13 +5050,14 @@
 	free_irq(wl->irq, wl);
 
 out_free_hw:
-	wl1271_free_hw(wl);
+	wlcore_free_hw(wl);
 
 out:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(wlcore_probe);
 
-static int __devexit wl12xx_remove(struct platform_device *pdev)
+int __devexit wlcore_remove(struct platform_device *pdev)
 {
 	struct wl1271 *wl = platform_get_drvdata(pdev);
 
@@ -5573,38 +5067,11 @@
 	}
 	wl1271_unregister_hw(wl);
 	free_irq(wl->irq, wl);
-	wl1271_free_hw(wl);
+	wlcore_free_hw(wl);
 
 	return 0;
 }
-
-static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
-	{ "wl12xx", 0 },
-	{  } /* Terminating Entry */
-};
-MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
-
-static struct platform_driver wl12xx_driver = {
-	.probe		= wl12xx_probe,
-	.remove		= __devexit_p(wl12xx_remove),
-	.id_table	= wl12xx_id_table,
-	.driver = {
-		.name	= "wl12xx_driver",
-		.owner	= THIS_MODULE,
-	}
-};
-
-static int __init wl12xx_init(void)
-{
-	return platform_driver_register(&wl12xx_driver);
-}
-module_init(wl12xx_init);
-
-static void __exit wl12xx_exit(void)
-{
-	platform_driver_unregister(&wl12xx_driver);
-}
-module_exit(wl12xx_exit);
+EXPORT_SYMBOL_GPL(wlcore_remove);
 
 u32 wl12xx_debug_level = DEBUG_NONE;
 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
@@ -5618,6 +5085,9 @@
 module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
 
+module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
similarity index 97%
rename from drivers/net/wireless/wl12xx/ps.c
rename to drivers/net/wireless/ti/wlcore/ps.c
index 78f598b..756eee2 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -21,7 +21,6 @@
  *
  */
 
-#include "reg.h"
 #include "ps.h"
 #include "io.h"
 #include "tx.h"
@@ -62,7 +61,7 @@
 	}
 
 	wl1271_debug(DEBUG_PSM, "chip to elp");
-	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
 
 out:
@@ -74,6 +73,9 @@
 {
 	struct wl12xx_vif *wlvif;
 
+	if (wl->quirks & WLCORE_QUIRK_NO_ELP)
+		return;
+
 	/* we shouldn't get consecutive sleep requests */
 	if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
 		return;
@@ -125,7 +127,7 @@
 		wl->elp_compl = &compl;
 	spin_unlock_irqrestore(&wl->wl_lock, flags);
 
-	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
+	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
 
 	if (!pending) {
 		ret = wait_for_completion_timeout(
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/ti/wlcore/ps.h
similarity index 98%
rename from drivers/net/wireless/wl12xx/ps.h
rename to drivers/net/wireless/ti/wlcore/ps.h
index 5f19d4f..de4f9da 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/ti/wlcore/ps.h
@@ -24,7 +24,7 @@
 #ifndef __PS_H__
 #define __PS_H__
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "acx.h"
 
 int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
similarity index 69%
rename from drivers/net/wireless/wl12xx/rx.c
rename to drivers/net/wireless/ti/wlcore/rx.c
index cfa6071..89bd938 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -24,34 +24,36 @@
 #include <linux/gfp.h>
 #include <linux/sched.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
 #include "acx.h"
-#include "reg.h"
 #include "rx.h"
 #include "tx.h"
 #include "io.h"
+#include "hw_ops.h"
 
-static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
-				  u32 drv_rx_counter)
+/*
+ * TODO: this is here just for now, it must be removed when the data
+ * operations are in place.
+ */
+#include "../wl12xx/reg.h"
+
+static u32 wlcore_rx_get_buf_size(struct wl1271 *wl,
+				  u32 rx_pkt_desc)
 {
-	return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
-		RX_MEM_BLOCK_MASK;
+	if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
+		return (rx_pkt_desc & ALIGNED_RX_BUF_SIZE_MASK) >>
+		       ALIGNED_RX_BUF_SIZE_SHIFT;
+
+	return (rx_pkt_desc & RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
 }
 
-static u32 wl12xx_rx_get_buf_size(struct wl12xx_fw_status *status,
-				 u32 drv_rx_counter)
+static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
 {
-	return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
-		RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
-}
+	if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
+		return ALIGN(pkt_len, WL12XX_BUS_BLOCK_SIZE);
 
-static bool wl12xx_rx_get_unaligned(struct wl12xx_fw_status *status,
-				    u32 drv_rx_counter)
-{
-	/* Convert the value to bool */
-	return !!(le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
-		RX_BUF_UNALIGNED_PAYLOAD);
+	return pkt_len;
 }
 
 static void wl1271_rx_status(struct wl1271 *wl,
@@ -66,10 +68,10 @@
 	else
 		status->band = IEEE80211_BAND_5GHZ;
 
-	status->rate_idx = wl1271_rate_to_idx(desc->rate, status->band);
+	status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
 
 	/* 11n support */
-	if (desc->rate <= CONF_HW_RXTX_RATE_MCS0)
+	if (desc->rate <= wl->hw_min_ht_rate)
 		status->flag |= RX_FLAG_HT;
 
 	status->signal = desc->rssi;
@@ -98,7 +100,7 @@
 }
 
 static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
-				 bool unaligned, u8 *hlid)
+				 enum wl_rx_buf_align rx_align, u8 *hlid)
 {
 	struct wl1271_rx_descriptor *desc;
 	struct sk_buff *skb;
@@ -106,8 +108,9 @@
 	u8 *buf;
 	u8 beacon = 0;
 	u8 is_data = 0;
-	u8 reserved = unaligned ? NET_IP_ALIGN : 0;
+	u8 reserved = 0;
 	u16 seq_num;
+	u32 pkt_data_len;
 
 	/*
 	 * In PLT mode we seem to get frames and mac80211 warns about them,
@@ -116,6 +119,16 @@
 	if (unlikely(wl->plt))
 		return -EINVAL;
 
+	pkt_data_len = wlcore_hw_get_rx_packet_len(wl, data, length);
+	if (!pkt_data_len) {
+		wl1271_error("Invalid packet arrived from HW. length %d",
+			     length);
+		return -EINVAL;
+	}
+
+	if (rx_align == WLCORE_RX_BUF_UNALIGNED)
+		reserved = NET_IP_ALIGN;
+
 	/* the data read starts with the descriptor */
 	desc = (struct wl1271_rx_descriptor *) data;
 
@@ -142,8 +155,8 @@
 		return -EINVAL;
 	}
 
-	/* skb length not included rx descriptor */
-	skb = __dev_alloc_skb(length + reserved - sizeof(*desc), GFP_KERNEL);
+	/* skb length not including rx descriptor */
+	skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL);
 	if (!skb) {
 		wl1271_error("Couldn't allocate RX frame");
 		return -ENOMEM;
@@ -152,7 +165,7 @@
 	/* reserve the unaligned payload(if any) */
 	skb_reserve(skb, reserved);
 
-	buf = skb_put(skb, length - sizeof(*desc));
+	buf = skb_put(skb, pkt_data_len);
 
 	/*
 	 * Copy packets from aggregation buffer to the skbs without rx
@@ -160,7 +173,10 @@
 	 * packets copy the packets in offset of 2 bytes guarantee IP header
 	 * payload aligned to 4 bytes.
 	 */
-	memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
+	memcpy(buf, data + sizeof(*desc), pkt_data_len);
+	if (rx_align == WLCORE_RX_BUF_PADDED)
+		skb_pull(skb, NET_IP_ALIGN);
+
 	*hlid = desc->hlid;
 
 	hdr = (struct ieee80211_hdr *)skb->data;
@@ -177,36 +193,35 @@
 		     beacon ? "beacon" : "",
 		     seq_num, *hlid);
 
-	skb_trim(skb, skb->len - desc->pad_len);
-
 	skb_queue_tail(&wl->deferred_rx_queue, skb);
 	queue_work(wl->freezable_wq, &wl->netstack_work);
 
 	return is_data;
 }
 
-void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
+void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
 {
-	struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
 	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
 	u32 buf_size;
 	u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
 	u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
 	u32 rx_counter;
-	u32 mem_block;
-	u32 pkt_length;
-	u32 pkt_offset;
+	u32 pkt_len, align_pkt_len;
+	u32 pkt_offset, des;
 	u8 hlid;
-	bool unaligned = false;
+	enum wl_rx_buf_align rx_align;
 
 	while (drv_rx_counter != fw_rx_counter) {
 		buf_size = 0;
 		rx_counter = drv_rx_counter;
 		while (rx_counter != fw_rx_counter) {
-			pkt_length = wl12xx_rx_get_buf_size(status, rx_counter);
-			if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
+			des = le32_to_cpu(status->rx_pkt_descs[rx_counter]);
+			pkt_len = wlcore_rx_get_buf_size(wl, des);
+			align_pkt_len = wlcore_rx_get_align_buf_size(wl,
+								     pkt_len);
+			if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE)
 				break;
-			buf_size += pkt_length;
+			buf_size += align_pkt_len;
 			rx_counter++;
 			rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
 		}
@@ -216,38 +231,18 @@
 			break;
 		}
 
-		if (wl->chip.id != CHIP_ID_1283_PG20) {
-			/*
-			 * Choose the block we want to read
-			 * For aggregated packets, only the first memory block
-			 * should be retrieved. The FW takes care of the rest.
-			 */
-			mem_block = wl12xx_rx_get_mem_block(status,
-							    drv_rx_counter);
-
-			wl->rx_mem_pool_addr.addr = (mem_block << 8) +
-			   le32_to_cpu(wl_mem_map->packet_memory_pool_start);
-
-			wl->rx_mem_pool_addr.addr_extra =
-				wl->rx_mem_pool_addr.addr + 4;
-
-			wl1271_write(wl, WL1271_SLV_REG_DATA,
-				     &wl->rx_mem_pool_addr,
-				     sizeof(wl->rx_mem_pool_addr), false);
-		}
-
 		/* Read all available packets at once */
-		wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
-				buf_size, true);
+		des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
+		wlcore_hw_prepare_read(wl, des, buf_size);
+		wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
+				 buf_size, true);
 
 		/* Split data into separate packets */
 		pkt_offset = 0;
 		while (pkt_offset < buf_size) {
-			pkt_length = wl12xx_rx_get_buf_size(status,
-					drv_rx_counter);
-
-			unaligned = wl12xx_rx_get_unaligned(status,
-					drv_rx_counter);
+			des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
+			pkt_len = wlcore_rx_get_buf_size(wl, des);
+			rx_align = wlcore_hw_get_rx_buf_align(wl, des);
 
 			/*
 			 * the handle data call can only fail in memory-outage
@@ -256,7 +251,7 @@
 			 */
 			if (wl1271_rx_handle_data(wl,
 						  wl->aggr_buf + pkt_offset,
-						  pkt_length, unaligned,
+						  pkt_len, rx_align,
 						  &hlid) == 1) {
 				if (hlid < WL12XX_MAX_LINKS)
 					__set_bit(hlid, active_hlids);
@@ -269,7 +264,7 @@
 			wl->rx_counter++;
 			drv_rx_counter++;
 			drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
-			pkt_offset += pkt_length;
+			pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
 		}
 	}
 
@@ -277,8 +272,9 @@
 	 * Write the driver's packet counter to the FW. This is only required
 	 * for older hardware revisions
 	 */
-	if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
-		wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
+	if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
+		wl1271_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
+			       wl->rx_counter);
 
 	wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
similarity index 91%
rename from drivers/net/wireless/wl12xx/rx.h
rename to drivers/net/wireless/ti/wlcore/rx.h
index 86ba6b1..6e129e2 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -96,9 +96,19 @@
 #define RX_MEM_BLOCK_MASK            0xFF
 #define RX_BUF_SIZE_MASK             0xFFF00
 #define RX_BUF_SIZE_SHIFT_DIV        6
+#define ALIGNED_RX_BUF_SIZE_MASK     0xFFFF00
+#define ALIGNED_RX_BUF_SIZE_SHIFT    8
+
 /* If set, the start of IP payload is not 4 bytes aligned */
 #define RX_BUF_UNALIGNED_PAYLOAD     BIT(20)
 
+/* Describes the alignment state of a Rx buffer */
+enum wl_rx_buf_align {
+	WLCORE_RX_BUF_ALIGNED,
+	WLCORE_RX_BUF_UNALIGNED,
+	WLCORE_RX_BUF_PADDED,
+};
+
 enum {
 	WL12XX_RX_CLASS_UNKNOWN,
 	WL12XX_RX_CLASS_MANAGEMENT,
@@ -126,7 +136,7 @@
 	u8  reserved;
 } __packed;
 
-void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status);
+void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 
 #endif
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
similarity index 94%
rename from drivers/net/wireless/wl12xx/scan.c
rename to drivers/net/wireless/ti/wlcore/scan.c
index fcba055..ade21a0 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -23,7 +23,7 @@
 
 #include <linux/ieee80211.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
 #include "cmd.h"
 #include "scan.h"
@@ -417,6 +417,23 @@
 	int i, j;
 	u32 flags;
 	bool force_passive = !req->n_ssids;
+	u32 min_dwell_time_active, max_dwell_time_active, delta_per_probe;
+	u32 dwell_time_passive, dwell_time_dfs;
+
+	if (band == IEEE80211_BAND_5GHZ)
+		delta_per_probe = c->dwell_time_delta_per_probe_5;
+	else
+		delta_per_probe = c->dwell_time_delta_per_probe;
+
+	min_dwell_time_active = c->base_dwell_time +
+		 req->n_ssids * c->num_probe_reqs * delta_per_probe;
+
+	max_dwell_time_active = min_dwell_time_active + c->max_dwell_time_delta;
+
+	min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000);
+	max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000);
+	dwell_time_passive = DIV_ROUND_UP(c->dwell_time_passive, 1000);
+	dwell_time_dfs = DIV_ROUND_UP(c->dwell_time_dfs, 1000);
 
 	for (i = 0, j = start;
 	     i < req->n_channels && j < max_channels;
@@ -440,21 +457,24 @@
 				     req->channels[i]->flags);
 			wl1271_debug(DEBUG_SCAN, "max_power %d",
 				     req->channels[i]->max_power);
+			wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
+				     min_dwell_time_active,
+				     max_dwell_time_active);
 
 			if (flags & IEEE80211_CHAN_RADAR) {
 				channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
 
 				channels[j].passive_duration =
-					cpu_to_le16(c->dwell_time_dfs);
+					cpu_to_le16(dwell_time_dfs);
 			} else {
 				channels[j].passive_duration =
-					cpu_to_le16(c->dwell_time_passive);
+					cpu_to_le16(dwell_time_passive);
 			}
 
 			channels[j].min_duration =
-				cpu_to_le16(c->min_dwell_time_active);
+				cpu_to_le16(min_dwell_time_active);
 			channels[j].max_duration =
-				cpu_to_le16(c->max_dwell_time_active);
+				cpu_to_le16(max_dwell_time_active);
 
 			channels[j].tx_power_att = req->channels[i]->max_power;
 			channels[j].channel = req->channels[i]->hw_value;
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
similarity index 98%
rename from drivers/net/wireless/wl12xx/scan.h
rename to drivers/net/wireless/ti/wlcore/scan.h
index 96ff457..81ee36a 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -24,7 +24,7 @@
 #ifndef __SCAN_H__
 #define __SCAN_H__
 
-#include "wl12xx.h"
+#include "wlcore.h"
 
 int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
 		const u8 *ssid, size_t ssid_len,
@@ -55,7 +55,7 @@
 #define WL1271_SCAN_BAND_2_4_GHZ 0
 #define WL1271_SCAN_BAND_5_GHZ 1
 
-#define WL1271_SCAN_TIMEOUT    10000 /* msec */
+#define WL1271_SCAN_TIMEOUT    30000 /* msec */
 
 enum {
 	WL1271_SCAN_STATE_IDLE,
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
similarity index 98%
rename from drivers/net/wireless/wl12xx/sdio.c
rename to drivers/net/wireless/ti/wlcore/sdio.c
index 4b3c327..0a72347 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -33,7 +33,7 @@
 #include <linux/wl12xx.h>
 #include <linux/pm_runtime.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "wl12xx_80211.h"
 #include "io.h"
 
@@ -76,7 +76,7 @@
 
 	sdio_claim_host(func);
 
-	if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+	if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
 		((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
 		dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
 			addr, ((u8 *)buf)[0]);
@@ -105,7 +105,7 @@
 
 	sdio_claim_host(func);
 
-	if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
+	if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
 		sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
 		dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
 			addr, ((u8 *)buf)[0]);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
similarity index 99%
rename from drivers/net/wireless/wl12xx/spi.c
rename to drivers/net/wireless/ti/wlcore/spi.c
index 2fc18a8..553cd3c 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -30,12 +30,10 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "wl12xx_80211.h"
 #include "io.h"
 
-#include "reg.h"
-
 #define WSPI_CMD_READ                 0x40000000
 #define WSPI_CMD_WRITE                0x00000000
 #define WSPI_CMD_FIXED                0x20000000
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
similarity index 96%
rename from drivers/net/wireless/wl12xx/testmode.c
rename to drivers/net/wireless/ti/wlcore/testmode.c
index 1e93bb9..0e59ea2 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -25,10 +25,9 @@
 #include <linux/slab.h>
 #include <net/genetlink.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
 #include "acx.h"
-#include "reg.h"
 #include "ps.h"
 #include "io.h"
 
@@ -116,7 +115,8 @@
 			goto out_sleep;
 		}
 
-		NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
+		if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf))
+			goto nla_put_failure;
 		ret = cfg80211_testmode_reply(skb);
 		if (ret < 0)
 			goto out_sleep;
@@ -178,7 +178,8 @@
 		goto out_free;
 	}
 
-	NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+	if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd))
+		goto nla_put_failure;
 	ret = cfg80211_testmode_reply(skb);
 	if (ret < 0)
 		goto out_free;
@@ -297,7 +298,8 @@
 		goto out;
 	}
 
-	NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr);
+	if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr))
+		goto nla_put_failure;
 	ret = cfg80211_testmode_reply(skb);
 	if (ret < 0)
 		goto out;
diff --git a/drivers/net/wireless/wl12xx/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h
similarity index 100%
rename from drivers/net/wireless/wl12xx/testmode.h
rename to drivers/net/wireless/ti/wlcore/testmode.h
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
similarity index 90%
rename from drivers/net/wireless/wl12xx/tx.c
rename to drivers/net/wireless/ti/wlcore/tx.c
index 43ae491..6893bc2 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -25,13 +25,19 @@
 #include <linux/module.h>
 #include <linux/etherdevice.h>
 
-#include "wl12xx.h"
+#include "wlcore.h"
 #include "debug.h"
 #include "io.h"
-#include "reg.h"
 #include "ps.h"
 #include "tx.h"
 #include "event.h"
+#include "hw_ops.h"
+
+/*
+ * TODO: this is here just for now, it must be removed when the data
+ * operations are in place.
+ */
+#include "../wl12xx/reg.h"
 
 static int wl1271_set_default_wep_key(struct wl1271 *wl,
 				      struct wl12xx_vif *wlvif, u8 id)
@@ -56,8 +62,8 @@
 {
 	int id;
 
-	id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
-	if (id >= ACX_TX_DESCRIPTORS)
+	id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
+	if (id >= wl->num_tx_desc)
 		return -EBUSY;
 
 	__set_bit(id, wl->tx_frames_map);
@@ -69,7 +75,7 @@
 static void wl1271_free_tx_id(struct wl1271 *wl, int id)
 {
 	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
-		if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS))
+		if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
 			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
 		wl->tx_frames[id] = NULL;
@@ -167,14 +173,15 @@
 		return wlvif->dev_hlid;
 }
 
-static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
-						unsigned int packet_length)
+unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
+					  unsigned int packet_length)
 {
-	if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
-		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
-	else
+	if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
 		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
+	else
+		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
 }
+EXPORT_SYMBOL(wlcore_calc_packet_alignment);
 
 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			      struct sk_buff *skb, u32 extra, u32 buf_offset,
@@ -182,10 +189,9 @@
 {
 	struct wl1271_tx_hw_descr *desc;
 	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
-	u32 len;
 	u32 total_blocks;
 	int id, ret = -EBUSY, ac;
-	u32 spare_blocks = wl->tx_spare_blocks;
+	u32 spare_blocks = wl->normal_tx_spare;
 	bool is_dummy = false;
 
 	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
@@ -196,30 +202,19 @@
 	if (id < 0)
 		return id;
 
-	/* approximate the number of blocks required for this packet
-	   in the firmware */
-	len = wl12xx_calc_packet_alignment(wl, total_len);
-
-	/* in case of a dummy packet, use default amount of spare mem blocks */
-	if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
+	if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
 		is_dummy = true;
-		spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
-	}
+	else if (wlvif->is_gem)
+		spare_blocks = wl->gem_tx_spare;
 
-	total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
-		spare_blocks;
+	total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
 
 	if (total_blocks <= wl->tx_blocks_available) {
 		desc = (struct wl1271_tx_hw_descr *)skb_push(
 			skb, total_len - skb->len);
 
-		/* HW descriptor fields change between wl127x and wl128x */
-		if (wl->chip.id == CHIP_ID_1283_PG20) {
-			desc->wl128x_mem.total_mem_blocks = total_blocks;
-		} else {
-			desc->wl127x_mem.extra_blocks = spare_blocks;
-			desc->wl127x_mem.total_mem_blocks = total_blocks;
-		}
+		wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
+					     spare_blocks);
 
 		desc->id = id;
 
@@ -256,7 +251,7 @@
 {
 	struct timespec ts;
 	struct wl1271_tx_hw_descr *desc;
-	int aligned_len, ac, rate_idx;
+	int ac, rate_idx;
 	s64 hosttime;
 	u16 tx_attr = 0;
 	__le16 frame_control;
@@ -329,44 +324,16 @@
 	}
 
 	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
-	desc->reserved = 0;
-
-	aligned_len = wl12xx_calc_packet_alignment(wl, skb->len);
-
-	if (wl->chip.id == CHIP_ID_1283_PG20) {
-		desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
-		desc->length = cpu_to_le16(aligned_len >> 2);
-
-		wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
-			     "tx_attr: 0x%x len: %d life: %d mem: %d",
-			     desc->hlid, tx_attr,
-			     le16_to_cpu(desc->length),
-			     le16_to_cpu(desc->life_time),
-			     desc->wl128x_mem.total_mem_blocks);
-	} else {
-		int pad;
-
-		/* Store the aligned length in terms of words */
-		desc->length = cpu_to_le16(aligned_len >> 2);
-
-		/* calculate number of padding bytes */
-		pad = aligned_len - skb->len;
-		tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
-
-		wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
-			     "tx_attr: 0x%x len: %d life: %d mem: %d", pad,
-			     desc->hlid, tx_attr,
-			     le16_to_cpu(desc->length),
-			     le16_to_cpu(desc->life_time),
-			     desc->wl127x_mem.total_mem_blocks);
-	}
 
 	/* for WEP shared auth - no fw encryption is needed */
 	if (ieee80211_is_auth(frame_control) &&
 	    ieee80211_has_protected(frame_control))
 		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
 
+	desc->reserved = 0;
 	desc->tx_attr = cpu_to_le16(tx_attr);
+
+	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
 }
 
 /* caller must hold wl->mutex */
@@ -432,7 +399,7 @@
 	 * In special cases, we want to align to a specific block size
 	 * (eg. for wl128x with SDIO we align to 256).
 	 */
-	total_len = wl12xx_calc_packet_alignment(wl, skb->len);
+	total_len = wlcore_calc_packet_alignment(wl, skb->len);
 
 	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
 	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
@@ -718,8 +685,8 @@
 			 * Flush buffer and try again.
 			 */
 			wl1271_skb_queue_head(wl, wlvif, skb);
-			wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
-				     buf_offset, true);
+			wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
+					  buf_offset, true);
 			sent_packets = true;
 			buf_offset = 0;
 			continue;
@@ -753,8 +720,8 @@
 
 out_ack:
 	if (buf_offset) {
-		wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
-				buf_offset, true);
+		wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
+				  buf_offset, true);
 		sent_packets = true;
 	}
 	if (sent_packets) {
@@ -762,8 +729,8 @@
 		 * Interrupt the firmware with the new packets. This is only
 		 * required for older hardware revisions
 		 */
-		if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
-			wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
+		if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
+			wl1271_write32(wl, WL12XX_HOST_WR_ACCESS,
 				       wl->tx_packets_count);
 
 		wl1271_handle_tx_low_watermark(wl);
@@ -792,11 +759,20 @@
 {
 	u8 flags = 0;
 
-	if (rate_class_index >= CONF_HW_RXTX_RATE_MCS_MIN &&
-	    rate_class_index <= CONF_HW_RXTX_RATE_MCS_MAX)
+	/*
+	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
+	 * only it uses Tx-completion.
+	 */
+	if (rate_class_index <= 8)
 		flags |= IEEE80211_TX_RC_MCS;
-	if (rate_class_index == CONF_HW_RXTX_RATE_MCS7_SGI)
+
+	/*
+	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
+	 * only it uses Tx-completion.
+	 */
+	if (rate_class_index == 0)
 		flags |= IEEE80211_TX_RC_SHORT_GI;
+
 	return flags;
 }
 
@@ -813,7 +789,7 @@
 	u8 retries = 0;
 
 	/* check for id legality */
-	if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
+	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
 		wl1271_warning("TX result illegal id: %d", id);
 		return;
 	}
@@ -834,7 +810,7 @@
 	if (result->status == TX_SUCCESS) {
 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
 			info->flags |= IEEE80211_TX_STAT_ACK;
-		rate = wl1271_rate_to_idx(result->rate_class_index,
+		rate = wlcore_rate_to_idx(wl, result->rate_class_index,
 					  wlvif->band);
 		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
 		retries = result->ack_failures;
@@ -929,6 +905,7 @@
 		wl->tx_results_count++;
 	}
 }
+EXPORT_SYMBOL(wl1271_tx_complete);
 
 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
 {
@@ -1006,7 +983,7 @@
 	if (reset_tx_queues)
 		wl1271_handle_tx_low_watermark(wl);
 
-	for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
+	for (i = 0; i < wl->num_tx_desc; i++) {
 		if (wl->tx_frames[i] == NULL)
 			continue;
 
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
similarity index 97%
rename from drivers/net/wireless/wl12xx/tx.h
rename to drivers/net/wireless/ti/wlcore/tx.h
index 5cf8c32..2fd6e5d 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -25,9 +25,6 @@
 #ifndef __TX_H__
 #define __TX_H__
 
-#define TX_HW_BLOCK_SPARE_DEFAULT        1
-#define TX_HW_BLOCK_SIZE                 252
-
 #define TX_HW_MGMT_PKT_LIFETIME_TU       2000
 #define TX_HW_AP_MODE_PKT_LIFETIME_TU    8000
 
@@ -212,7 +209,7 @@
 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
 void wl1271_tx_flush(struct wl1271 *wl);
-u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
 				enum ieee80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
@@ -224,6 +221,8 @@
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
+unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
+					  unsigned int packet_length);
 
 /* from main.c */
 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wlcore/wl12xx.h
similarity index 68%
rename from drivers/net/wireless/wl12xx/wl12xx.h
rename to drivers/net/wireless/ti/wlcore/wl12xx.h
index 749a15a..a9b220c 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx.h
@@ -89,8 +89,6 @@
 #define WL1271_AP_BSS_INDEX        0
 #define WL1271_AP_DEF_BEACON_EXP   20
 
-#define ACX_TX_DESCRIPTORS         16
-
 #define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
 
 enum wl1271_state {
@@ -105,26 +103,6 @@
 	WL12XX_FW_TYPE_PLT,
 };
 
-enum wl1271_partition_type {
-	PART_DOWN,
-	PART_WORK,
-	PART_DRPW,
-
-	PART_TABLE_LEN
-};
-
-struct wl1271_partition {
-	u32 size;
-	u32 start;
-};
-
-struct wl1271_partition_set {
-	struct wl1271_partition mem;
-	struct wl1271_partition reg;
-	struct wl1271_partition mem2;
-	struct wl1271_partition mem3;
-};
-
 struct wl1271;
 
 enum {
@@ -167,8 +145,21 @@
 
 #define AP_MAX_STATIONS            8
 
+struct wl_fw_packet_counters {
+	/* Cumulative counter of released packets per AC */
+	u8 tx_released_pkts[NUM_TX_QUEUES];
+
+	/* Cumulative counter of freed packets per HLID */
+	u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
+
+	/* Cumulative counter of released Voice memory blocks */
+	u8 tx_voice_released_blks;
+
+	u8 padding[3];
+} __packed;
+
 /* FW status registers */
-struct wl12xx_fw_status {
+struct wl_fw_status {
 	__le32 intr;
 	u8  fw_rx_counter;
 	u8  drv_rx_counter;
@@ -195,16 +186,12 @@
 	/* Size (in Memory Blocks) of TX pool */
 	__le32 tx_total;
 
-	/* Cumulative counter of released packets per AC */
-	u8 tx_released_pkts[NUM_TX_QUEUES];
+	struct wl_fw_packet_counters counters;
 
-	/* Cumulative counter of freed packets per HLID */
-	u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
-
-	/* Cumulative counter of released Voice memory blocks */
-	u8 tx_voice_released_blks;
-	u8 padding_1[3];
 	__le32 log_start_addr;
+
+	/* Private status to be used by the lower drivers */
+	u8 priv[0];
 } __packed;
 
 struct wl1271_rx_mem_pool_addr {
@@ -292,214 +279,6 @@
 	u8 ba_bitmap;
 };
 
-struct wl1271 {
-	struct ieee80211_hw *hw;
-	bool mac80211_registered;
-
-	struct device *dev;
-
-	void *if_priv;
-
-	struct wl1271_if_operations *if_ops;
-
-	void (*set_power)(bool enable);
-	int irq;
-	int ref_clock;
-
-	spinlock_t wl_lock;
-
-	enum wl1271_state state;
-	enum wl12xx_fw_type fw_type;
-	bool plt;
-	u8 last_vif_count;
-	struct mutex mutex;
-
-	unsigned long flags;
-
-	struct wl1271_partition_set part;
-
-	struct wl1271_chip chip;
-
-	int cmd_box_addr;
-	int event_box_addr;
-
-	u8 *fw;
-	size_t fw_len;
-	void *nvs;
-	size_t nvs_len;
-
-	s8 hw_pg_ver;
-
-	/* address read from the fuse ROM */
-	u32 fuse_oui_addr;
-	u32 fuse_nic_addr;
-
-	/* we have up to 2 MAC addresses */
-	struct mac_address addresses[2];
-	int channel;
-	u8 system_hlid;
-
-	unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
-	unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
-	unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
-	unsigned long rate_policies_map[
-			BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
-
-	struct list_head wlvif_list;
-
-	u8 sta_count;
-	u8 ap_count;
-
-	struct wl1271_acx_mem_map *target_mem_map;
-
-	/* Accounting for allocated / available TX blocks on HW */
-	u32 tx_blocks_freed;
-	u32 tx_blocks_available;
-	u32 tx_allocated_blocks;
-	u32 tx_results_count;
-
-	/* amount of spare TX blocks to use */
-	u32 tx_spare_blocks;
-
-	/* Accounting for allocated / available Tx packets in HW */
-	u32 tx_pkts_freed[NUM_TX_QUEUES];
-	u32 tx_allocated_pkts[NUM_TX_QUEUES];
-
-	/* Transmitted TX packets counter for chipset interface */
-	u32 tx_packets_count;
-
-	/* Time-offset between host and chipset clocks */
-	s64 time_offset;
-
-	/* Frames scheduled for transmission, not handled yet */
-	int tx_queue_count[NUM_TX_QUEUES];
-	long stopped_queues_map;
-
-	/* Frames received, not handled yet by mac80211 */
-	struct sk_buff_head deferred_rx_queue;
-
-	/* Frames sent, not returned yet to mac80211 */
-	struct sk_buff_head deferred_tx_queue;
-
-	struct work_struct tx_work;
-	struct workqueue_struct *freezable_wq;
-
-	/* Pending TX frames */
-	unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
-	struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
-	int tx_frames_cnt;
-
-	/* FW Rx counter */
-	u32 rx_counter;
-
-	/* Rx memory pool address */
-	struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
-
-	/* Intermediate buffer, used for packet aggregation */
-	u8 *aggr_buf;
-
-	/* Reusable dummy packet template */
-	struct sk_buff *dummy_packet;
-
-	/* Network stack work  */
-	struct work_struct netstack_work;
-
-	/* FW log buffer */
-	u8 *fwlog;
-
-	/* Number of valid bytes in the FW log buffer */
-	ssize_t fwlog_size;
-
-	/* Sysfs FW log entry readers wait queue */
-	wait_queue_head_t fwlog_waitq;
-
-	/* Hardware recovery work */
-	struct work_struct recovery_work;
-
-	/* The mbox event mask */
-	u32 event_mask;
-
-	/* Mailbox pointers */
-	u32 mbox_ptr[2];
-
-	/* Are we currently scanning */
-	struct ieee80211_vif *scan_vif;
-	struct wl1271_scan scan;
-	struct delayed_work scan_complete_work;
-
-	bool sched_scanning;
-
-	/* The current band */
-	enum ieee80211_band band;
-
-	struct completion *elp_compl;
-	struct delayed_work elp_work;
-
-	/* in dBm */
-	int power_level;
-
-	struct wl1271_stats stats;
-
-	__le32 buffer_32;
-	u32 buffer_cmd;
-	u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
-
-	struct wl12xx_fw_status *fw_status;
-	struct wl1271_tx_hw_res_if *tx_res_if;
-
-	/* Current chipset configuration */
-	struct conf_drv_settings conf;
-
-	bool sg_enabled;
-
-	bool enable_11a;
-
-	/* Most recently reported noise in dBm */
-	s8 noise;
-
-	/* bands supported by this instance of wl12xx */
-	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
-	int tcxo_clock;
-
-	/*
-	 * wowlan trigger was configured during suspend.
-	 * (currently, only "ANY" trigger is supported)
-	 */
-	bool wow_enabled;
-	bool irq_wake_enabled;
-
-	/*
-	 * AP-mode - links indexed by HLID. The global and broadcast links
-	 * are always active.
-	 */
-	struct wl1271_link links[WL12XX_MAX_LINKS];
-
-	/* AP-mode - a bitmap of links currently in PS mode according to FW */
-	u32 ap_fw_ps_map;
-
-	/* AP-mode - a bitmap of links currently in PS mode in mac80211 */
-	unsigned long ap_ps_map;
-
-	/* Quirks of specific hardware revisions */
-	unsigned int quirks;
-
-	/* Platform limitations */
-	unsigned int platform_quirks;
-
-	/* number of currently active RX BA sessions */
-	int ba_rx_session_count;
-
-	/* AP-mode - number of currently connected stations */
-	int active_sta_count;
-
-	/* last wlvif we transmitted from */
-	struct wl12xx_vif *last_wlvif;
-
-	/* work to fire when Tx is stuck */
-	struct delayed_work tx_watchdog_work;
-};
-
 struct wl1271_station {
 	u8 hlid;
 };
@@ -605,6 +384,9 @@
 	struct work_struct rx_streaming_disable_work;
 	struct timer_list rx_streaming_timer;
 
+	/* does the current role use GEM for encryption (AP or STA) */
+	bool is_gem;
+
 	/*
 	 * This struct must be last!
 	 * data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -679,17 +461,6 @@
 #define HW_BG_RATES_MASK	0xffff
 #define HW_HT_RATES_OFFSET	16
 
-/* Quirks */
-
-/* Each RX/TX transaction requires an end-of-transaction transfer */
-#define WL12XX_QUIRK_END_OF_TRANSACTION		BIT(0)
-
-/* wl127x and SPI don't support SDIO block size alignment */
-#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT	BIT(2)
-
-/* Older firmwares did not implement the FW logger over bus feature */
-#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED	BIT(4)
-
 #define WL12XX_HW_BLOCK_SIZE	256
 
 #endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
similarity index 100%
rename from drivers/net/wireless/wl12xx/wl12xx_80211.h
rename to drivers/net/wireless/ti/wlcore/wl12xx_80211.h
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c
similarity index 100%
rename from drivers/net/wireless/wl12xx/wl12xx_platform_data.c
rename to drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
new file mode 100644
index 0000000..39f9fad
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -0,0 +1,448 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WLCORE_H__
+#define __WLCORE_H__
+
+#include <linux/platform_device.h>
+
+#include "wl12xx.h"
+#include "event.h"
+
+/* The maximum number of Tx descriptors in all chip families */
+#define WLCORE_MAX_TX_DESCRIPTORS 32
+
+/* forward declaration */
+struct wl1271_tx_hw_descr;
+enum wl_rx_buf_align;
+
+struct wlcore_ops {
+	int (*identify_chip)(struct wl1271 *wl);
+	int (*identify_fw)(struct wl1271 *wl);
+	int (*boot)(struct wl1271 *wl);
+	void (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
+			    void *buf, size_t len);
+	void (*ack_event)(struct wl1271 *wl);
+	u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
+	void (*set_tx_desc_blocks)(struct wl1271 *wl,
+				   struct wl1271_tx_hw_descr *desc,
+				   u32 blks, u32 spare_blks);
+	void (*set_tx_desc_data_len)(struct wl1271 *wl,
+				     struct wl1271_tx_hw_descr *desc,
+				     struct sk_buff *skb);
+	enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl,
+						 u32 rx_desc);
+	void (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
+	u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data,
+				 u32 data_len);
+	void (*tx_delayed_compl)(struct wl1271 *wl);
+	void (*tx_immediate_compl)(struct wl1271 *wl);
+	int (*hw_init)(struct wl1271 *wl);
+	int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+	u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
+				    struct wl12xx_vif *wlvif);
+	s8 (*get_pg_ver)(struct wl1271 *wl);
+	void (*get_mac)(struct wl1271 *wl);
+};
+
+enum wlcore_partitions {
+	PART_DOWN,
+	PART_WORK,
+	PART_BOOT,
+	PART_DRPW,
+	PART_TOP_PRCM_ELP_SOC,
+	PART_PHY_INIT,
+
+	PART_TABLE_LEN,
+};
+
+struct wlcore_partition {
+	u32 size;
+	u32 start;
+};
+
+struct wlcore_partition_set {
+	struct wlcore_partition mem;
+	struct wlcore_partition reg;
+	struct wlcore_partition mem2;
+	struct wlcore_partition mem3;
+};
+
+enum wlcore_registers {
+	/* register addresses, used with partition translation */
+	REG_ECPU_CONTROL,
+	REG_INTERRUPT_NO_CLEAR,
+	REG_INTERRUPT_ACK,
+	REG_COMMAND_MAILBOX_PTR,
+	REG_EVENT_MAILBOX_PTR,
+	REG_INTERRUPT_TRIG,
+	REG_INTERRUPT_MASK,
+	REG_PC_ON_RECOVERY,
+	REG_CHIP_ID_B,
+	REG_CMD_MBOX_ADDRESS,
+
+	/* data access memory addresses, used with partition translation */
+	REG_SLV_MEM_DATA,
+	REG_SLV_REG_DATA,
+
+	/* raw data access memory addresses */
+	REG_RAW_FW_STATUS_ADDR,
+
+	REG_TABLE_LEN,
+};
+
+struct wl1271 {
+	struct ieee80211_hw *hw;
+	bool mac80211_registered;
+
+	struct device *dev;
+
+	void *if_priv;
+
+	struct wl1271_if_operations *if_ops;
+
+	void (*set_power)(bool enable);
+	int irq;
+	int ref_clock;
+
+	spinlock_t wl_lock;
+
+	enum wl1271_state state;
+	enum wl12xx_fw_type fw_type;
+	bool plt;
+	u8 last_vif_count;
+	struct mutex mutex;
+
+	unsigned long flags;
+
+	struct wlcore_partition_set curr_part;
+
+	struct wl1271_chip chip;
+
+	int cmd_box_addr;
+
+	u8 *fw;
+	size_t fw_len;
+	void *nvs;
+	size_t nvs_len;
+
+	s8 hw_pg_ver;
+
+	/* address read from the fuse ROM */
+	u32 fuse_oui_addr;
+	u32 fuse_nic_addr;
+
+	/* we have up to 2 MAC addresses */
+	struct mac_address addresses[2];
+	int channel;
+	u8 system_hlid;
+
+	unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+	unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+	unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+	unsigned long rate_policies_map[
+			BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+
+	struct list_head wlvif_list;
+
+	u8 sta_count;
+	u8 ap_count;
+
+	struct wl1271_acx_mem_map *target_mem_map;
+
+	/* Accounting for allocated / available TX blocks on HW */
+	u32 tx_blocks_freed;
+	u32 tx_blocks_available;
+	u32 tx_allocated_blocks;
+	u32 tx_results_count;
+
+	/* Accounting for allocated / available Tx packets in HW */
+	u32 tx_pkts_freed[NUM_TX_QUEUES];
+	u32 tx_allocated_pkts[NUM_TX_QUEUES];
+
+	/* Transmitted TX packets counter for chipset interface */
+	u32 tx_packets_count;
+
+	/* Time-offset between host and chipset clocks */
+	s64 time_offset;
+
+	/* Frames scheduled for transmission, not handled yet */
+	int tx_queue_count[NUM_TX_QUEUES];
+	long stopped_queues_map;
+
+	/* Frames received, not handled yet by mac80211 */
+	struct sk_buff_head deferred_rx_queue;
+
+	/* Frames sent, not returned yet to mac80211 */
+	struct sk_buff_head deferred_tx_queue;
+
+	struct work_struct tx_work;
+	struct workqueue_struct *freezable_wq;
+
+	/* Pending TX frames */
+	unsigned long tx_frames_map[BITS_TO_LONGS(WLCORE_MAX_TX_DESCRIPTORS)];
+	struct sk_buff *tx_frames[WLCORE_MAX_TX_DESCRIPTORS];
+	int tx_frames_cnt;
+
+	/* FW Rx counter */
+	u32 rx_counter;
+
+	/* Rx memory pool address */
+	struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
+
+	/* Intermediate buffer, used for packet aggregation */
+	u8 *aggr_buf;
+
+	/* Reusable dummy packet template */
+	struct sk_buff *dummy_packet;
+
+	/* Network stack work  */
+	struct work_struct netstack_work;
+
+	/* FW log buffer */
+	u8 *fwlog;
+
+	/* Number of valid bytes in the FW log buffer */
+	ssize_t fwlog_size;
+
+	/* Sysfs FW log entry readers wait queue */
+	wait_queue_head_t fwlog_waitq;
+
+	/* Hardware recovery work */
+	struct work_struct recovery_work;
+
+	/* Pointer that holds DMA-friendly block for the mailbox */
+	struct event_mailbox *mbox;
+
+	/* The mbox event mask */
+	u32 event_mask;
+
+	/* Mailbox pointers */
+	u32 mbox_ptr[2];
+
+	/* Are we currently scanning */
+	struct ieee80211_vif *scan_vif;
+	struct wl1271_scan scan;
+	struct delayed_work scan_complete_work;
+
+	bool sched_scanning;
+
+	/* The current band */
+	enum ieee80211_band band;
+
+	struct completion *elp_compl;
+	struct delayed_work elp_work;
+
+	/* in dBm */
+	int power_level;
+
+	struct wl1271_stats stats;
+
+	__le32 buffer_32;
+	u32 buffer_cmd;
+	u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
+
+	struct wl_fw_status *fw_status;
+	struct wl1271_tx_hw_res_if *tx_res_if;
+
+	/* Current chipset configuration */
+	struct wlcore_conf conf;
+
+	bool sg_enabled;
+
+	bool enable_11a;
+
+	/* Most recently reported noise in dBm */
+	s8 noise;
+
+	/* bands supported by this instance of wl12xx */
+	struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+	int tcxo_clock;
+
+	/*
+	 * wowlan trigger was configured during suspend.
+	 * (currently, only "ANY" trigger is supported)
+	 */
+	bool wow_enabled;
+	bool irq_wake_enabled;
+
+	/*
+	 * AP-mode - links indexed by HLID. The global and broadcast links
+	 * are always active.
+	 */
+	struct wl1271_link links[WL12XX_MAX_LINKS];
+
+	/* AP-mode - a bitmap of links currently in PS mode according to FW */
+	u32 ap_fw_ps_map;
+
+	/* AP-mode - a bitmap of links currently in PS mode in mac80211 */
+	unsigned long ap_ps_map;
+
+	/* Quirks of specific hardware revisions */
+	unsigned int quirks;
+
+	/* Platform limitations */
+	unsigned int platform_quirks;
+
+	/* number of currently active RX BA sessions */
+	int ba_rx_session_count;
+
+	/* AP-mode - number of currently connected stations */
+	int active_sta_count;
+
+	/* last wlvif we transmitted from */
+	struct wl12xx_vif *last_wlvif;
+
+	/* work to fire when Tx is stuck */
+	struct delayed_work tx_watchdog_work;
+
+	struct wlcore_ops *ops;
+	/* pointer to the lower driver partition table */
+	const struct wlcore_partition_set *ptable;
+	/* pointer to the lower driver register table */
+	const int *rtable;
+	/* name of the firmwares to load - for PLT, single role, multi-role */
+	const char *plt_fw_name;
+	const char *sr_fw_name;
+	const char *mr_fw_name;
+
+	/* per-chip-family private structure */
+	void *priv;
+
+	/* number of TX descriptors the HW supports. */
+	u32 num_tx_desc;
+
+	/* spare Tx blocks for normal/GEM operating modes */
+	u32 normal_tx_spare;
+	u32 gem_tx_spare;
+
+	/* translate HW Tx rates to standard rate-indices */
+	const u8 **band_rate_to_idx;
+
+	/* size of table for HW rates that can be received from chip */
+	u8 hw_tx_rate_tbl_size;
+
+	/* this HW rate and below are considered HT rates for this chip */
+	u8 hw_min_ht_rate;
+
+	/* HW HT (11n) capabilities */
+	struct ieee80211_sta_ht_cap ht_cap;
+
+	/* size of the private FW status data */
+	size_t fw_status_priv_len;
+};
+
+int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
+int __devexit wlcore_remove(struct platform_device *pdev);
+struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
+int wlcore_free_hw(struct wl1271 *wl);
+
+/* Firmware image load chunk size */
+#define CHUNK_SIZE	16384
+
+/* Quirks */
+
+/* Each RX/TX transaction requires an end-of-transaction transfer */
+#define WLCORE_QUIRK_END_OF_TRANSACTION		BIT(0)
+
+/* wl127x and SPI don't support SDIO block size alignment */
+#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN		BIT(2)
+
+/* means aggregated Rx packets are aligned to a SDIO block */
+#define WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN		BIT(3)
+
+/* Older firmwares did not implement the FW logger over bus feature */
+#define WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED	BIT(4)
+
+/* Older firmwares use an old NVS format */
+#define WLCORE_QUIRK_LEGACY_NVS			BIT(5)
+
+/* Some firmwares may not support ELP */
+#define WLCORE_QUIRK_NO_ELP			BIT(6)
+
+/* TODO: move to the lower drivers when all usages are abstracted */
+#define CHIP_ID_1271_PG10              (0x4030101)
+#define CHIP_ID_1271_PG20              (0x4030111)
+#define CHIP_ID_1283_PG10              (0x05030101)
+#define CHIP_ID_1283_PG20              (0x05030111)
+
+/* TODO: move all these common registers and values elsewhere */
+#define HW_ACCESS_ELP_CTRL_REG		0x1FFFC
+
+/* ELP register commands */
+#define ELPCTRL_WAKE_UP             0x1
+#define ELPCTRL_WAKE_UP_WLAN_READY  0x5
+#define ELPCTRL_SLEEP               0x0
+/* ELP WLAN_READY bit */
+#define ELPCTRL_WLAN_READY          0x2
+
+/*************************************************************************
+
+    Interrupt Trigger Register (Host -> WiLink)
+
+**************************************************************************/
+
+/* Hardware to Embedded CPU Interrupts - first 32-bit register set */
+
+/*
+ * The host sets this bit to inform the Wlan
+ * FW that a TX packet is in the XFER
+ * Buffer #0.
+ */
+#define INTR_TRIG_TX_PROC0 BIT(2)
+
+/*
+ * The host sets this bit to inform the FW
+ * that it read a packet from RX XFER
+ * Buffer #0.
+ */
+#define INTR_TRIG_RX_PROC0 BIT(3)
+
+#define INTR_TRIG_DEBUG_ACK BIT(4)
+
+#define INTR_TRIG_STATE_CHANGED BIT(5)
+
+/* Hardware to Embedded CPU Interrupts - second 32-bit register set */
+
+/*
+ * The host sets this bit to inform the FW
+ * that it read a packet from RX XFER
+ * Buffer #1.
+ */
+#define INTR_TRIG_RX_PROC1 BIT(17)
+
+/*
+ * The host sets this bit to inform the Wlan
+ * hardware that a TX packet is in the XFER
+ * Buffer #1.
+ */
+#define INTR_TRIG_TX_PROC1 BIT(18)
+
+#define ACX_SLV_SOFT_RESET_BIT	BIT(1)
+#define SOFT_RESET_MAX_TIME	1000000
+#define SOFT_RESET_STALL_TIME	1000
+
+#define ECPU_CONTROL_HALT	0x00000101
+
+#define WELP_ARM_COMMAND_VAL	0x4
+
+#endif /* __WLCORE_H__ */
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
deleted file mode 100644
index af08c86..0000000
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ /dev/null
@@ -1,48 +0,0 @@
-menuconfig WL12XX_MENU
-	tristate "TI wl12xx driver support"
-	depends on MAC80211 && EXPERIMENTAL
-	---help---
-	  This will enable TI wl12xx driver support for the following chips:
-	  wl1271, wl1273, wl1281 and wl1283.
-	  The drivers make use of the mac80211 stack.
-
-config WL12XX
-	tristate "TI wl12xx support"
-	depends on WL12XX_MENU && GENERIC_HARDIRQS
-	depends on INET
-	select FW_LOADER
-	---help---
-	  This module adds support for wireless adapters based on TI wl1271 and
-	  TI wl1273 chipsets. This module does *not* include support for wl1251.
-	  For wl1251 support, use the separate homonymous driver instead.
-
-	  If you choose to build a module, it will be called wl12xx. Say N if
-	  unsure.
-
-config WL12XX_SPI
-	tristate "TI wl12xx SPI support"
-	depends on WL12XX && SPI_MASTER
-	select CRC7
-	---help---
-	  This module adds support for the SPI interface of adapters using
-	  TI wl12xx chipsets.  Select this if your platform is using
-	  the SPI bus.
-
-	  If you choose to build a module, it'll be called wl12xx_spi.
-	  Say N if unsure.
-
-config WL12XX_SDIO
-	tristate "TI wl12xx SDIO support"
-	depends on WL12XX && MMC
-	---help---
-	  This module adds support for the SDIO interface of adapters using
-	  TI wl12xx chipsets.  Select this if your platform is using
-	  the SDIO bus.
-
-	  If you choose to build a module, it'll be called wl12xx_sdio.
-	  Say N if unsure.
-
-config WL12XX_PLATFORM_DATA
-	bool
-	depends on WL12XX_SDIO != n || WL1251_SDIO != n
-	default y
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
deleted file mode 100644
index 98f289c..0000000
--- a/drivers/net/wireless/wl12xx/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-wl12xx-objs		= main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
-			  boot.o init.o debugfs.o scan.o
-
-wl12xx_spi-objs 	= spi.o
-wl12xx_sdio-objs	= sdio.o
-
-wl12xx-$(CONFIG_NL80211_TESTMODE)	+= testmode.o
-obj-$(CONFIG_WL12XX)			+= wl12xx.o
-obj-$(CONFIG_WL12XX_SPI)		+= wl12xx_spi.o
-obj-$(CONFIG_WL12XX_SDIO)		+= wl12xx_sdio.o
-
-# small builtin driver bit
-obj-$(CONFIG_WL12XX_PLATFORM_DATA)	+= wl12xx_platform_data.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
deleted file mode 100644
index 954101d..0000000
--- a/drivers/net/wireless/wl12xx/boot.c
+++ /dev/null
@@ -1,786 +0,0 @@
-/*
- * This file is part of wl1271
- *
- * Copyright (C) 2008-2010 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/slab.h>
-#include <linux/wl12xx.h>
-#include <linux/export.h>
-
-#include "debug.h"
-#include "acx.h"
-#include "reg.h"
-#include "boot.h"
-#include "io.h"
-#include "event.h"
-#include "rx.h"
-
-static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
-{
-	u32 cpu_ctrl;
-
-	/* 10.5.0 run the firmware (I) */
-	cpu_ctrl = wl1271_read32(wl, ACX_REG_ECPU_CONTROL);
-
-	/* 10.5.1 run the firmware (II) */
-	cpu_ctrl |= flag;
-	wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
-}
-
-static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl)
-{
-	unsigned int quirks = 0;
-	unsigned int *fw_ver = wl->chip.fw_ver;
-
-	/* Only new station firmwares support routing fw logs to the host */
-	if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
-	    (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
-		quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED;
-
-	/* This feature is not yet supported for AP mode */
-	if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
-		quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED;
-
-	return quirks;
-}
-
-static void wl1271_parse_fw_ver(struct wl1271 *wl)
-{
-	int ret;
-
-	ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
-		     &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
-		     &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
-		     &wl->chip.fw_ver[4]);
-
-	if (ret != 5) {
-		wl1271_warning("fw version incorrect value");
-		memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
-		return;
-	}
-
-	/* Check if any quirks are needed with older fw versions */
-	wl->quirks |= wl12xx_get_fw_ver_quirks(wl);
-}
-
-static void wl1271_boot_fw_version(struct wl1271 *wl)
-{
-	struct wl1271_static_data static_data;
-
-	wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
-		    false);
-
-	strncpy(wl->chip.fw_ver_str, static_data.fw_version,
-		sizeof(wl->chip.fw_ver_str));
-
-	/* make sure the string is NULL-terminated */
-	wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
-
-	wl1271_parse_fw_ver(wl);
-}
-
-static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
-					     size_t fw_data_len, u32 dest)
-{
-	struct wl1271_partition_set partition;
-	int addr, chunk_num, partition_limit;
-	u8 *p, *chunk;
-
-	/* whal_FwCtrl_LoadFwImageSm() */
-
-	wl1271_debug(DEBUG_BOOT, "starting firmware upload");
-
-	wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
-		     fw_data_len, CHUNK_SIZE);
-
-	if ((fw_data_len % 4) != 0) {
-		wl1271_error("firmware length not multiple of four");
-		return -EIO;
-	}
-
-	chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
-	if (!chunk) {
-		wl1271_error("allocation for firmware upload chunk failed");
-		return -ENOMEM;
-	}
-
-	memcpy(&partition, &wl12xx_part_table[PART_DOWN], sizeof(partition));
-	partition.mem.start = dest;
-	wl1271_set_partition(wl, &partition);
-
-	/* 10.1 set partition limit and chunk num */
-	chunk_num = 0;
-	partition_limit = wl12xx_part_table[PART_DOWN].mem.size;
-
-	while (chunk_num < fw_data_len / CHUNK_SIZE) {
-		/* 10.2 update partition, if needed */
-		addr = dest + (chunk_num + 2) * CHUNK_SIZE;
-		if (addr > partition_limit) {
-			addr = dest + chunk_num * CHUNK_SIZE;
-			partition_limit = chunk_num * CHUNK_SIZE +
-				wl12xx_part_table[PART_DOWN].mem.size;
-			partition.mem.start = addr;
-			wl1271_set_partition(wl, &partition);
-		}
-
-		/* 10.3 upload the chunk */
-		addr = dest + chunk_num * CHUNK_SIZE;
-		p = buf + chunk_num * CHUNK_SIZE;
-		memcpy(chunk, p, CHUNK_SIZE);
-		wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
-			     p, addr);
-		wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
-
-		chunk_num++;
-	}
-
-	/* 10.4 upload the last chunk */
-	addr = dest + chunk_num * CHUNK_SIZE;
-	p = buf + chunk_num * CHUNK_SIZE;
-	memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
-	wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
-		     fw_data_len % CHUNK_SIZE, p, addr);
-	wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
-
-	kfree(chunk);
-	return 0;
-}
-
-static int wl1271_boot_upload_firmware(struct wl1271 *wl)
-{
-	u32 chunks, addr, len;
-	int ret = 0;
-	u8 *fw;
-
-	fw = wl->fw;
-	chunks = be32_to_cpup((__be32 *) fw);
-	fw += sizeof(u32);
-
-	wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
-
-	while (chunks--) {
-		addr = be32_to_cpup((__be32 *) fw);
-		fw += sizeof(u32);
-		len = be32_to_cpup((__be32 *) fw);
-		fw += sizeof(u32);
-
-		if (len > 300000) {
-			wl1271_info("firmware chunk too long: %u", len);
-			return -EINVAL;
-		}
-		wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
-			     chunks, addr, len);
-		ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
-		if (ret != 0)
-			break;
-		fw += len;
-	}
-
-	return ret;
-}
-
-static int wl1271_boot_upload_nvs(struct wl1271 *wl)
-{
-	size_t nvs_len, burst_len;
-	int i;
-	u32 dest_addr, val;
-	u8 *nvs_ptr, *nvs_aligned;
-
-	if (wl->nvs == NULL)
-		return -ENODEV;
-
-	if (wl->chip.id == CHIP_ID_1283_PG20) {
-		struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
-
-		if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
-			if (nvs->general_params.dual_mode_select)
-				wl->enable_11a = true;
-		} else {
-			wl1271_error("nvs size is not as expected: %zu != %zu",
-				     wl->nvs_len,
-				     sizeof(struct wl128x_nvs_file));
-			kfree(wl->nvs);
-			wl->nvs = NULL;
-			wl->nvs_len = 0;
-			return -EILSEQ;
-		}
-
-		/* only the first part of the NVS needs to be uploaded */
-		nvs_len = sizeof(nvs->nvs);
-		nvs_ptr = (u8 *)nvs->nvs;
-
-	} else {
-		struct wl1271_nvs_file *nvs =
-			(struct wl1271_nvs_file *)wl->nvs;
-		/*
-		 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
-		 * band configurations) can be removed when those NVS files stop
-		 * floating around.
-		 */
-		if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
-		    wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
-			if (nvs->general_params.dual_mode_select)
-				wl->enable_11a = true;
-		}
-
-		if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
-		    (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
-		     wl->enable_11a)) {
-			wl1271_error("nvs size is not as expected: %zu != %zu",
-				wl->nvs_len, sizeof(struct wl1271_nvs_file));
-			kfree(wl->nvs);
-			wl->nvs = NULL;
-			wl->nvs_len = 0;
-			return -EILSEQ;
-		}
-
-		/* only the first part of the NVS needs to be uploaded */
-		nvs_len = sizeof(nvs->nvs);
-		nvs_ptr = (u8 *) nvs->nvs;
-	}
-
-	/* update current MAC address to NVS */
-	nvs_ptr[11] = wl->addresses[0].addr[0];
-	nvs_ptr[10] = wl->addresses[0].addr[1];
-	nvs_ptr[6] = wl->addresses[0].addr[2];
-	nvs_ptr[5] = wl->addresses[0].addr[3];
-	nvs_ptr[4] = wl->addresses[0].addr[4];
-	nvs_ptr[3] = wl->addresses[0].addr[5];
-
-	/*
-	 * Layout before the actual NVS tables:
-	 * 1 byte : burst length.
-	 * 2 bytes: destination address.
-	 * n bytes: data to burst copy.
-	 *
-	 * This is ended by a 0 length, then the NVS tables.
-	 */
-
-	/* FIXME: Do we need to check here whether the LSB is 1? */
-	while (nvs_ptr[0]) {
-		burst_len = nvs_ptr[0];
-		dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
-
-		/*
-		 * Due to our new wl1271_translate_reg_addr function,
-		 * we need to add the REGISTER_BASE to the destination
-		 */
-		dest_addr += REGISTERS_BASE;
-
-		/* We move our pointer to the data */
-		nvs_ptr += 3;
-
-		for (i = 0; i < burst_len; i++) {
-			if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
-				goto out_badnvs;
-
-			val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
-			       | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
-
-			wl1271_debug(DEBUG_BOOT,
-				     "nvs burst write 0x%x: 0x%x",
-				     dest_addr, val);
-			wl1271_write32(wl, dest_addr, val);
-
-			nvs_ptr += 4;
-			dest_addr += 4;
-		}
-
-		if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
-			goto out_badnvs;
-	}
-
-	/*
-	 * We've reached the first zero length, the first NVS table
-	 * is located at an aligned offset which is at least 7 bytes further.
-	 * NOTE: The wl->nvs->nvs element must be first, in order to
-	 * simplify the casting, we assume it is at the beginning of
-	 * the wl->nvs structure.
-	 */
-	nvs_ptr = (u8 *)wl->nvs +
-			ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
-
-	if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
-		goto out_badnvs;
-
-	nvs_len -= nvs_ptr - (u8 *)wl->nvs;
-
-	/* Now we must set the partition correctly */
-	wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
-
-	/* Copy the NVS tables to a new block to ensure alignment */
-	nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
-	if (!nvs_aligned)
-		return -ENOMEM;
-
-	/* And finally we upload the NVS tables */
-	wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
-
-	kfree(nvs_aligned);
-	return 0;
-
-out_badnvs:
-	wl1271_error("nvs data is malformed");
-	return -EILSEQ;
-}
-
-static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
-{
-	wl1271_enable_interrupts(wl);
-	wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
-		       WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
-	wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
-}
-
-static int wl1271_boot_soft_reset(struct wl1271 *wl)
-{
-	unsigned long timeout;
-	u32 boot_data;
-
-	/* perform soft reset */
-	wl1271_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
-
-	/* SOFT_RESET is self clearing */
-	timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
-	while (1) {
-		boot_data = wl1271_read32(wl, ACX_REG_SLV_SOFT_RESET);
-		wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
-		if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
-			break;
-
-		if (time_after(jiffies, timeout)) {
-			/* 1.2 check pWhalBus->uSelfClearTime if the
-			 * timeout was reached */
-			wl1271_error("soft reset timeout");
-			return -1;
-		}
-
-		udelay(SOFT_RESET_STALL_TIME);
-	}
-
-	/* disable Rx/Tx */
-	wl1271_write32(wl, ENABLE, 0x0);
-
-	/* disable auto calibration on start*/
-	wl1271_write32(wl, SPARE_A2, 0xffff);
-
-	return 0;
-}
-
-static int wl1271_boot_run_firmware(struct wl1271 *wl)
-{
-	int loop, ret;
-	u32 chip_id, intr;
-
-	wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
-
-	chip_id = wl1271_read32(wl, CHIP_ID_B);
-
-	wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
-
-	if (chip_id != wl->chip.id) {
-		wl1271_error("chip id doesn't match after firmware boot");
-		return -EIO;
-	}
-
-	/* wait for init to complete */
-	loop = 0;
-	while (loop++ < INIT_LOOP) {
-		udelay(INIT_LOOP_DELAY);
-		intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
-
-		if (intr == 0xffffffff) {
-			wl1271_error("error reading hardware complete "
-				     "init indication");
-			return -EIO;
-		}
-		/* check that ACX_INTR_INIT_COMPLETE is enabled */
-		else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
-			wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
-				       WL1271_ACX_INTR_INIT_COMPLETE);
-			break;
-		}
-	}
-
-	if (loop > INIT_LOOP) {
-		wl1271_error("timeout waiting for the hardware to "
-			     "complete initialization");
-		return -EIO;
-	}
-
-	/* get hardware config command mail box */
-	wl->cmd_box_addr = wl1271_read32(wl, REG_COMMAND_MAILBOX_PTR);
-
-	/* get hardware config event mail box */
-	wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
-
-	/* set the working partition to its "running" mode offset */
-	wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
-
-	wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
-		     wl->cmd_box_addr, wl->event_box_addr);
-
-	wl1271_boot_fw_version(wl);
-
-	/*
-	 * in case of full asynchronous mode the firmware event must be
-	 * ready to receive event from the command mailbox
-	 */
-
-	/* unmask required mbox events  */
-	wl->event_mask = BSS_LOSE_EVENT_ID |
-		SCAN_COMPLETE_EVENT_ID |
-		ROLE_STOP_COMPLETE_EVENT_ID |
-		RSSI_SNR_TRIGGER_0_EVENT_ID |
-		PSPOLL_DELIVERY_FAILURE_EVENT_ID |
-		SOFT_GEMINI_SENSE_EVENT_ID |
-		PERIODIC_SCAN_REPORT_EVENT_ID |
-		PERIODIC_SCAN_COMPLETE_EVENT_ID |
-		DUMMY_PACKET_EVENT_ID |
-		PEER_REMOVE_COMPLETE_EVENT_ID |
-		BA_SESSION_RX_CONSTRAINT_EVENT_ID |
-		REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
-		INACTIVE_STA_EVENT_ID |
-		MAX_TX_RETRY_EVENT_ID |
-		CHANNEL_SWITCH_COMPLETE_EVENT_ID;
-
-	ret = wl1271_event_unmask(wl);
-	if (ret < 0) {
-		wl1271_error("EVENT mask setting failed");
-		return ret;
-	}
-
-	wl1271_event_mbox_config(wl);
-
-	/* firmware startup completed */
-	return 0;
-}
-
-static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
-{
-	u32 polarity;
-
-	polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY);
-
-	/* We use HIGH polarity, so unset the LOW bit */
-	polarity &= ~POLARITY_LOW;
-	wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity);
-
-	return 0;
-}
-
-static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
-{
-	u16 spare_reg;
-
-	/* Mask bits [2] & [8:4] in the sys_clk_cfg register */
-	spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
-	if (spare_reg == 0xFFFF)
-		return -EFAULT;
-	spare_reg |= (BIT(3) | BIT(5) | BIT(6));
-	wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
-
-	/* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
-	wl1271_top_reg_write(wl, SYS_CLK_CFG_REG,
-			     WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
-
-	/* Delay execution for 15msec, to let the HW settle */
-	mdelay(15);
-
-	return 0;
-}
-
-static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
-{
-	u16 tcxo_detection;
-
-	tcxo_detection = wl1271_top_reg_read(wl, TCXO_CLK_DETECT_REG);
-	if (tcxo_detection & TCXO_DET_FAILED)
-		return false;
-
-	return true;
-}
-
-static bool wl128x_is_fref_valid(struct wl1271 *wl)
-{
-	u16 fref_detection;
-
-	fref_detection = wl1271_top_reg_read(wl, FREF_CLK_DETECT_REG);
-	if (fref_detection & FREF_CLK_DETECT_FAIL)
-		return false;
-
-	return true;
-}
-
-static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
-{
-	wl1271_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
-	wl1271_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
-	wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
-
-	return 0;
-}
-
-static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
-{
-	u16 spare_reg;
-	u16 pll_config;
-	u8 input_freq;
-
-	/* Mask bits [3:1] in the sys_clk_cfg register */
-	spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
-	if (spare_reg == 0xFFFF)
-		return -EFAULT;
-	spare_reg |= BIT(2);
-	wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
-
-	/* Handle special cases of the TCXO clock */
-	if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
-	    wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
-		return wl128x_manually_configure_mcs_pll(wl);
-
-	/* Set the input frequency according to the selected clock source */
-	input_freq = (clk & 1) + 1;
-
-	pll_config = wl1271_top_reg_read(wl, MCS_PLL_CONFIG_REG);
-	if (pll_config == 0xFFFF)
-		return -EFAULT;
-	pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
-	pll_config |= MCS_PLL_ENABLE_HP;
-	wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
-
-	return 0;
-}
-
-/*
- * WL128x has two clocks input - TCXO and FREF.
- * TCXO is the main clock of the device, while FREF is used to sync
- * between the GPS and the cellular modem.
- * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
- * as the WLAN/BT main clock.
- */
-static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
-{
-	u16 sys_clk_cfg;
-
-	/* For XTAL-only modes, FREF will be used after switching from TCXO */
-	if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
-	    wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
-		if (!wl128x_switch_tcxo_to_fref(wl))
-			return -EINVAL;
-		goto fref_clk;
-	}
-
-	/* Query the HW, to determine which clock source we should use */
-	sys_clk_cfg = wl1271_top_reg_read(wl, SYS_CLK_CFG_REG);
-	if (sys_clk_cfg == 0xFFFF)
-		return -EINVAL;
-	if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
-		goto fref_clk;
-
-	/* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
-	if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
-	    wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
-		if (!wl128x_switch_tcxo_to_fref(wl))
-			return -EINVAL;
-		goto fref_clk;
-	}
-
-	/* TCXO clock is selected */
-	if (!wl128x_is_tcxo_valid(wl))
-		return -EINVAL;
-	*selected_clock = wl->tcxo_clock;
-	goto config_mcs_pll;
-
-fref_clk:
-	/* FREF clock is selected */
-	if (!wl128x_is_fref_valid(wl))
-		return -EINVAL;
-	*selected_clock = wl->ref_clock;
-
-config_mcs_pll:
-	return wl128x_configure_mcs_pll(wl, *selected_clock);
-}
-
-static int wl127x_boot_clk(struct wl1271 *wl)
-{
-	u32 pause;
-	u32 clk;
-
-	if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
-		wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
-
-	if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
-	    wl->ref_clock == CONF_REF_CLK_38_4_E ||
-	    wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
-		/* ref clk: 19.2/38.4/38.4-XTAL */
-		clk = 0x3;
-	else if (wl->ref_clock == CONF_REF_CLK_26_E ||
-		 wl->ref_clock == CONF_REF_CLK_52_E)
-		/* ref clk: 26/52 */
-		clk = 0x5;
-	else
-		return -EINVAL;
-
-	if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
-		u16 val;
-		/* Set clock type (open drain) */
-		val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
-		val &= FREF_CLK_TYPE_BITS;
-		wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
-
-		/* Set clock pull mode (no pull) */
-		val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL);
-		val |= NO_PULL;
-		wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val);
-	} else {
-		u16 val;
-		/* Set clock polarity */
-		val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY);
-		val &= FREF_CLK_POLARITY_BITS;
-		val |= CLK_REQ_OUTN_SEL;
-		wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
-	}
-
-	wl1271_write32(wl, PLL_PARAMETERS, clk);
-
-	pause = wl1271_read32(wl, PLL_PARAMETERS);
-
-	wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
-
-	pause &= ~(WU_COUNTER_PAUSE_VAL);
-	pause |= WU_COUNTER_PAUSE_VAL;
-	wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
-
-	return 0;
-}
-
-/* uploads NVS and firmware */
-int wl1271_load_firmware(struct wl1271 *wl)
-{
-	int ret = 0;
-	u32 tmp, clk;
-	int selected_clock = -1;
-
-	if (wl->chip.id == CHIP_ID_1283_PG20) {
-		ret = wl128x_boot_clk(wl, &selected_clock);
-		if (ret < 0)
-			goto out;
-	} else {
-		ret = wl127x_boot_clk(wl);
-		if (ret < 0)
-			goto out;
-	}
-
-	/* Continue the ELP wake up sequence */
-	wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
-	udelay(500);
-
-	wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
-
-	/* Read-modify-write DRPW_SCRATCH_START register (see next state)
-	   to be used by DRPw FW. The RTRIM value will be added by the FW
-	   before taking DRPw out of reset */
-
-	wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
-	clk = wl1271_read32(wl, DRPW_SCRATCH_START);
-
-	wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
-
-	if (wl->chip.id == CHIP_ID_1283_PG20) {
-		clk |= ((selected_clock & 0x3) << 1) << 4;
-	} else {
-		clk |= (wl->ref_clock << 1) << 4;
-	}
-
-	wl1271_write32(wl, DRPW_SCRATCH_START, clk);
-
-	wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
-
-	/* Disable interrupts */
-	wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
-
-	ret = wl1271_boot_soft_reset(wl);
-	if (ret < 0)
-		goto out;
-
-	/* 2. start processing NVS file */
-	ret = wl1271_boot_upload_nvs(wl);
-	if (ret < 0)
-		goto out;
-
-	/* write firmware's last address (ie. it's length) to
-	 * ACX_EEPROMLESS_IND_REG */
-	wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
-
-	wl1271_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG);
-
-	tmp = wl1271_read32(wl, CHIP_ID_B);
-
-	wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
-
-	/* 6. read the EEPROM parameters */
-	tmp = wl1271_read32(wl, SCR_PAD2);
-
-	/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
-	 * to upload_fw) */
-
-	if (wl->chip.id == CHIP_ID_1283_PG20)
-		wl1271_top_reg_write(wl, SDIO_IO_DS, wl->conf.hci_io_ds);
-
-	ret = wl1271_boot_upload_firmware(wl);
-	if (ret < 0)
-		goto out;
-
-out:
-	return ret;
-}
-EXPORT_SYMBOL_GPL(wl1271_load_firmware);
-
-int wl1271_boot(struct wl1271 *wl)
-{
-	int ret;
-
-	/* upload NVS and firmware */
-	ret = wl1271_load_firmware(wl);
-	if (ret)
-		return ret;
-
-	/* 10.5 start firmware */
-	ret = wl1271_boot_run_firmware(wl);
-	if (ret < 0)
-		goto out;
-
-	ret = wl1271_boot_write_irq_polarity(wl);
-	if (ret < 0)
-		goto out;
-
-	wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
-		       WL1271_ACX_ALL_EVENTS_VECTOR);
-
-	/* Enable firmware interrupts now */
-	wl1271_boot_enable_interrupts(wl);
-
-	wl1271_event_mbox_config(wl);
-
-out:
-	return ret;
-}
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
deleted file mode 100644
index c3adc09..0000000
--- a/drivers/net/wireless/wl12xx/boot.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * This file is part of wl1271
- *
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#ifndef __BOOT_H__
-#define __BOOT_H__
-
-#include "wl12xx.h"
-
-int wl1271_boot(struct wl1271 *wl);
-int wl1271_load_firmware(struct wl1271 *wl);
-
-#define WL1271_NO_SUBBANDS 8
-#define WL1271_NO_POWER_LEVELS 4
-#define WL1271_FW_VERSION_MAX_LEN 20
-
-struct wl1271_static_data {
-	u8 mac_address[ETH_ALEN];
-	u8 padding[2];
-	u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
-	u32 hw_version;
-	u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
-};
-
-/* number of times we try to read the INIT interrupt */
-#define INIT_LOOP 20000
-
-/* delay between retries */
-#define INIT_LOOP_DELAY 50
-
-#define WU_COUNTER_PAUSE_VAL 0x3FF
-#define WELP_ARM_COMMAND_VAL 0x4
-
-#define OCP_REG_POLARITY     0x0064
-#define OCP_REG_CLK_TYPE     0x0448
-#define OCP_REG_CLK_POLARITY 0x0cb2
-#define OCP_REG_CLK_PULL     0x0cb4
-
-#define CMD_MBOX_ADDRESS     0x407B4
-
-#define POLARITY_LOW         BIT(1)
-#define NO_PULL              (BIT(14) | BIT(15))
-
-#define FREF_CLK_TYPE_BITS     0xfffffe7f
-#define CLK_REQ_PRCM           0x100
-#define FREF_CLK_POLARITY_BITS 0xfffff8ff
-#define CLK_REQ_OUTN_SEL       0x700
-
-/* PLL configuration algorithm for wl128x */
-#define SYS_CLK_CFG_REG              0x2200
-/* Bit[0]   -  0-TCXO,  1-FREF */
-#define MCS_PLL_CLK_SEL_FREF         BIT(0)
-/* Bit[3:2] - 01-TCXO, 10-FREF */
-#define WL_CLK_REQ_TYPE_FREF         BIT(3)
-#define WL_CLK_REQ_TYPE_PG2          (BIT(3) | BIT(2))
-/* Bit[4]   -  0-TCXO,  1-FREF */
-#define PRCM_CM_EN_MUX_WLAN_FREF     BIT(4)
-
-#define TCXO_ILOAD_INT_REG           0x2264
-#define TCXO_CLK_DETECT_REG          0x2266
-
-#define TCXO_DET_FAILED              BIT(4)
-
-#define FREF_ILOAD_INT_REG           0x2084
-#define FREF_CLK_DETECT_REG          0x2086
-#define FREF_CLK_DETECT_FAIL         BIT(4)
-
-/* Use this reg for masking during driver access */
-#define WL_SPARE_REG                 0x2320
-#define WL_SPARE_VAL                 BIT(2)
-/* Bit[6:5:3] -  mask wl write SYS_CLK_CFG[8:5:2:4] */
-#define WL_SPARE_MASK_8526           (BIT(6) | BIT(5) | BIT(3))
-
-#define PLL_LOCK_COUNTERS_REG        0xD8C
-#define PLL_LOCK_COUNTERS_COEX       0x0F
-#define PLL_LOCK_COUNTERS_MCS        0xF0
-#define MCS_PLL_OVERRIDE_REG         0xD90
-#define MCS_PLL_CONFIG_REG           0xD92
-#define MCS_SEL_IN_FREQ_MASK         0x0070
-#define MCS_SEL_IN_FREQ_SHIFT        4
-#define MCS_PLL_CONFIG_REG_VAL       0x73
-#define MCS_PLL_ENABLE_HP            (BIT(0) | BIT(1))
-
-#define MCS_PLL_M_REG                0xD94
-#define MCS_PLL_N_REG                0xD96
-#define MCS_PLL_M_REG_VAL            0xC8
-#define MCS_PLL_N_REG_VAL            0x07
-
-#define SDIO_IO_DS                   0xd14
-
-/* SDIO/wSPI DS configuration values */
-enum {
-	HCI_IO_DS_8MA = 0,
-	HCI_IO_DS_4MA = 1, /* default */
-	HCI_IO_DS_6MA = 2,
-	HCI_IO_DS_2MA = 3,
-};
-
-/* end PLL configuration algorithm for wl128x */
-
-#endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
deleted file mode 100644
index c574a3b..0000000
--- a/drivers/net/wireless/wl12xx/io.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * This file is part of wl1271
- *
- * Copyright (C) 2008-2010 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/interrupt.h>
-
-#include "wl12xx.h"
-#include "debug.h"
-#include "wl12xx_80211.h"
-#include "io.h"
-#include "tx.h"
-
-#define OCP_CMD_LOOP  32
-
-#define OCP_CMD_WRITE 0x1
-#define OCP_CMD_READ  0x2
-
-#define OCP_READY_MASK  BIT(18)
-#define OCP_STATUS_MASK (BIT(16) | BIT(17))
-
-#define OCP_STATUS_NO_RESP    0x00000
-#define OCP_STATUS_OK         0x10000
-#define OCP_STATUS_REQ_FAILED 0x20000
-#define OCP_STATUS_RESP_ERROR 0x30000
-
-struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN] = {
-	[PART_DOWN] = {
-		.mem = {
-			.start = 0x00000000,
-			.size  = 0x000177c0
-		},
-		.reg = {
-			.start = REGISTERS_BASE,
-			.size  = 0x00008800
-		},
-		.mem2 = {
-			.start = 0x00000000,
-			.size  = 0x00000000
-		},
-		.mem3 = {
-			.start = 0x00000000,
-			.size  = 0x00000000
-		},
-	},
-
-	[PART_WORK] = {
-		.mem = {
-			.start = 0x00040000,
-			.size  = 0x00014fc0
-		},
-		.reg = {
-			.start = REGISTERS_BASE,
-			.size  = 0x0000a000
-		},
-		.mem2 = {
-			.start = 0x003004f8,
-			.size  = 0x00000004
-		},
-		.mem3 = {
-			.start = 0x00040404,
-			.size  = 0x00000000
-		},
-	},
-
-	[PART_DRPW] = {
-		.mem = {
-			.start = 0x00040000,
-			.size  = 0x00014fc0
-		},
-		.reg = {
-			.start = DRPW_BASE,
-			.size  = 0x00006000
-		},
-		.mem2 = {
-			.start = 0x00000000,
-			.size  = 0x00000000
-		},
-		.mem3 = {
-			.start = 0x00000000,
-			.size  = 0x00000000
-		}
-	}
-};
-
-bool wl1271_set_block_size(struct wl1271 *wl)
-{
-	if (wl->if_ops->set_block_size) {
-		wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
-		return true;
-	}
-
-	return false;
-}
-
-void wl1271_disable_interrupts(struct wl1271 *wl)
-{
-	disable_irq(wl->irq);
-}
-
-void wl1271_enable_interrupts(struct wl1271 *wl)
-{
-	enable_irq(wl->irq);
-}
-
-/* Set the SPI partitions to access the chip addresses
- *
- * To simplify driver code, a fixed (virtual) memory map is defined for
- * register and memory addresses. Because in the chipset, in different stages
- * of operation, those addresses will move around, an address translation
- * mechanism is required.
- *
- * There are four partitions (three memory and one register partition),
- * which are mapped to two different areas of the hardware memory.
- *
- *                                Virtual address
- *                                     space
- *
- *                                    |    |
- *                                 ...+----+--> mem.start
- *          Physical address    ...   |    |
- *               space       ...      |    | [PART_0]
- *                        ...         |    |
- *  00000000  <--+----+...         ...+----+--> mem.start + mem.size
- *               |    |         ...   |    |
- *               |MEM |      ...      |    |
- *               |    |   ...         |    |
- *  mem.size  <--+----+...            |    | {unused area)
- *               |    |   ...         |    |
- *               |REG |      ...      |    |
- *  mem.size     |    |         ...   |    |
- *      +     <--+----+...         ...+----+--> reg.start
- *  reg.size     |    |   ...         |    |
- *               |MEM2|      ...      |    | [PART_1]
- *               |    |         ...   |    |
- *                                 ...+----+--> reg.start + reg.size
- *                                    |    |
- *
- */
-int wl1271_set_partition(struct wl1271 *wl,
-			 struct wl1271_partition_set *p)
-{
-	/* copy partition info */
-	memcpy(&wl->part, p, sizeof(*p));
-
-	wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
-		     p->mem.start, p->mem.size);
-	wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
-		     p->reg.start, p->reg.size);
-	wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
-		     p->mem2.start, p->mem2.size);
-	wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
-		     p->mem3.start, p->mem3.size);
-
-	/* write partition info to the chipset */
-	wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
-	wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
-	wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
-	wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
-	wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
-	wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
-	wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(wl1271_set_partition);
-
-void wl1271_io_reset(struct wl1271 *wl)
-{
-	if (wl->if_ops->reset)
-		wl->if_ops->reset(wl->dev);
-}
-
-void wl1271_io_init(struct wl1271 *wl)
-{
-	if (wl->if_ops->init)
-		wl->if_ops->init(wl->dev);
-}
-
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
-{
-	/* write address >> 1 + 0x30000 to OCP_POR_CTR */
-	addr = (addr >> 1) + 0x30000;
-	wl1271_write32(wl, OCP_POR_CTR, addr);
-
-	/* write value to OCP_POR_WDATA */
-	wl1271_write32(wl, OCP_DATA_WRITE, val);
-
-	/* write 1 to OCP_CMD */
-	wl1271_write32(wl, OCP_CMD, OCP_CMD_WRITE);
-}
-
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
-{
-	u32 val;
-	int timeout = OCP_CMD_LOOP;
-
-	/* write address >> 1 + 0x30000 to OCP_POR_CTR */
-	addr = (addr >> 1) + 0x30000;
-	wl1271_write32(wl, OCP_POR_CTR, addr);
-
-	/* write 2 to OCP_CMD */
-	wl1271_write32(wl, OCP_CMD, OCP_CMD_READ);
-
-	/* poll for data ready */
-	do {
-		val = wl1271_read32(wl, OCP_DATA_READ);
-	} while (!(val & OCP_READY_MASK) && --timeout);
-
-	if (!timeout) {
-		wl1271_warning("Top register access timed out.");
-		return 0xffff;
-	}
-
-	/* check data status and return if OK */
-	if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
-		return val & 0xffff;
-	else {
-		wl1271_warning("Top register access returned error.");
-		return 0xffff;
-	}
-}
-
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index cb6204f..e6ec16d 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -266,9 +266,13 @@
 	int in_maxlen;
 	struct pn533_frame *in_frame;
 
-	struct tasklet_struct tasklet;
-	struct pn533_frame *tklt_in_frame;
-	int tklt_in_error;
+	struct sk_buff_head resp_q;
+
+	struct workqueue_struct	*wq;
+	struct work_struct cmd_work;
+	struct work_struct mi_work;
+	struct pn533_frame *wq_in_frame;
+	int wq_in_error;
 
 	pn533_cmd_complete_t cmd_complete;
 	void *cmd_complete_arg;
@@ -383,15 +387,21 @@
 	return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd));
 }
 
-static void pn533_tasklet_cmd_complete(unsigned long arg)
+
+static void pn533_wq_cmd_complete(struct work_struct *work)
 {
-	struct pn533 *dev = (struct pn533 *) arg;
-	struct pn533_frame *in_frame = dev->tklt_in_frame;
+	struct pn533 *dev = container_of(work, struct pn533, cmd_work);
+	struct pn533_frame *in_frame;
 	int rc;
 
-	if (dev->tklt_in_error)
+	if (dev == NULL)
+		return;
+
+	in_frame = dev->wq_in_frame;
+
+	if (dev->wq_in_error)
 		rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL,
-							dev->tklt_in_error);
+							dev->wq_in_error);
 	else
 		rc = dev->cmd_complete(dev, dev->cmd_complete_arg,
 					PN533_FRAME_CMD_PARAMS_PTR(in_frame),
@@ -406,7 +416,7 @@
 	struct pn533 *dev = urb->context;
 	struct pn533_frame *in_frame;
 
-	dev->tklt_in_frame = NULL;
+	dev->wq_in_frame = NULL;
 
 	switch (urb->status) {
 	case 0:
@@ -417,36 +427,36 @@
 	case -ESHUTDOWN:
 		nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
 						" status: %d", urb->status);
-		dev->tklt_in_error = urb->status;
-		goto sched_tasklet;
+		dev->wq_in_error = urb->status;
+		goto sched_wq;
 	default:
 		nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
 							" %d", urb->status);
-		dev->tklt_in_error = urb->status;
-		goto sched_tasklet;
+		dev->wq_in_error = urb->status;
+		goto sched_wq;
 	}
 
 	in_frame = dev->in_urb->transfer_buffer;
 
 	if (!pn533_rx_frame_is_valid(in_frame)) {
 		nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
-		dev->tklt_in_error = -EIO;
-		goto sched_tasklet;
+		dev->wq_in_error = -EIO;
+		goto sched_wq;
 	}
 
 	if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) {
 		nfc_dev_err(&dev->interface->dev, "The received frame is not "
 						"response to the last command");
-		dev->tklt_in_error = -EIO;
-		goto sched_tasklet;
+		dev->wq_in_error = -EIO;
+		goto sched_wq;
 	}
 
 	nfc_dev_dbg(&dev->interface->dev, "Received a valid frame");
-	dev->tklt_in_error = 0;
-	dev->tklt_in_frame = in_frame;
+	dev->wq_in_error = 0;
+	dev->wq_in_frame = in_frame;
 
-sched_tasklet:
-	tasklet_schedule(&dev->tasklet);
+sched_wq:
+	queue_work(dev->wq, &dev->cmd_work);
 }
 
 static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
@@ -471,21 +481,21 @@
 	case -ESHUTDOWN:
 		nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
 						" status: %d", urb->status);
-		dev->tklt_in_error = urb->status;
-		goto sched_tasklet;
+		dev->wq_in_error = urb->status;
+		goto sched_wq;
 	default:
 		nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
 							" %d", urb->status);
-		dev->tklt_in_error = urb->status;
-		goto sched_tasklet;
+		dev->wq_in_error = urb->status;
+		goto sched_wq;
 	}
 
 	in_frame = dev->in_urb->transfer_buffer;
 
 	if (!pn533_rx_frame_is_ack(in_frame)) {
 		nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
-		dev->tklt_in_error = -EIO;
-		goto sched_tasklet;
+		dev->wq_in_error = -EIO;
+		goto sched_wq;
 	}
 
 	nfc_dev_dbg(&dev->interface->dev, "Received a valid ack");
@@ -494,15 +504,15 @@
 	if (rc) {
 		nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with"
 							" result %d", rc);
-		dev->tklt_in_error = rc;
-		goto sched_tasklet;
+		dev->wq_in_error = rc;
+		goto sched_wq;
 	}
 
 	return;
 
-sched_tasklet:
-	dev->tklt_in_frame = NULL;
-	tasklet_schedule(&dev->tasklet);
+sched_wq:
+	dev->wq_in_frame = NULL;
+	queue_work(dev->wq, &dev->cmd_work);
 }
 
 static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
@@ -1249,6 +1259,8 @@
 
 	dev->tgt_active_prot = 0;
 
+	skb_queue_purge(&dev->resp_q);
+
 	pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE);
 
 	tg = 1;
@@ -1447,11 +1459,49 @@
 	void *cb_context;
 };
 
+static struct sk_buff *pn533_build_response(struct pn533 *dev)
+{
+	struct sk_buff *skb, *tmp, *t;
+	unsigned int skb_len = 0, tmp_len = 0;
+
+	nfc_dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+	if (skb_queue_empty(&dev->resp_q))
+		return NULL;
+
+	if (skb_queue_len(&dev->resp_q) == 1) {
+		skb = skb_dequeue(&dev->resp_q);
+		goto out;
+	}
+
+	skb_queue_walk_safe(&dev->resp_q, tmp, t)
+		skb_len += tmp->len;
+
+	nfc_dev_dbg(&dev->interface->dev, "%s total length %d\n",
+		    __func__, skb_len);
+
+	skb = alloc_skb(skb_len, GFP_KERNEL);
+	if (skb == NULL)
+		goto out;
+
+	skb_put(skb, skb_len);
+
+	skb_queue_walk_safe(&dev->resp_q, tmp, t) {
+		memcpy(skb->data + tmp_len, tmp->data, tmp->len);
+		tmp_len += tmp->len;
+	}
+
+out:
+	skb_queue_purge(&dev->resp_q);
+
+	return skb;
+}
+
 static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
 						u8 *params, int params_len)
 {
 	struct pn533_data_exchange_arg *arg = _arg;
-	struct sk_buff *skb_resp = arg->skb_resp;
+	struct sk_buff *skb = NULL, *skb_resp = arg->skb_resp;
 	struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
 	int err = 0;
 	u8 status;
@@ -1459,15 +1509,13 @@
 
 	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
 
-	dev_kfree_skb_irq(arg->skb_out);
+	dev_kfree_skb(arg->skb_out);
 
 	if (params_len < 0) { /* error */
 		err = params_len;
 		goto error;
 	}
 
-	skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
-
 	status = params[0];
 
 	cmd_ret = status & PN533_CMD_RET_MASK;
@@ -1478,25 +1526,27 @@
 		goto error;
 	}
 
-	if (status & PN533_CMD_MI_MASK) {
-		/* TODO: Implement support to multi-part data exchange */
-		nfc_dev_err(&dev->interface->dev, "Multi-part message not yet"
-								" supported");
-		/* Prevent the other messages from controller */
-		pn533_send_ack(dev, GFP_ATOMIC);
-		err = -ENOSYS;
-		goto error;
-	}
-
+	skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
 	skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
 	skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
+	skb_queue_tail(&dev->resp_q, skb_resp);
 
-	arg->cb(arg->cb_context, skb_resp, 0);
+	if (status & PN533_CMD_MI_MASK) {
+		queue_work(dev->wq, &dev->mi_work);
+		return -EINPROGRESS;
+	}
+
+	skb = pn533_build_response(dev);
+	if (skb == NULL)
+		goto error;
+
+	arg->cb(arg->cb_context, skb, 0);
 	kfree(arg);
 	return 0;
 
 error:
-	dev_kfree_skb_irq(skb_resp);
+	skb_queue_purge(&dev->resp_q);
+	dev_kfree_skb(skb_resp);
 	arg->cb(arg->cb_context, NULL, err);
 	kfree(arg);
 	return 0;
@@ -1571,6 +1621,68 @@
 	return rc;
 }
 
+static void pn533_wq_mi_recv(struct work_struct *work)
+{
+	struct pn533 *dev = container_of(work, struct pn533, mi_work);
+	struct sk_buff *skb_cmd;
+	struct pn533_data_exchange_arg *arg = dev->cmd_complete_arg;
+	struct pn533_frame *out_frame, *in_frame;
+	struct sk_buff *skb_resp;
+	int skb_resp_len;
+	int rc;
+
+	nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+	/* This is a zero payload size skb */
+	skb_cmd = alloc_skb(PN533_CMD_DATAEXCH_HEAD_LEN + PN533_FRAME_TAIL_SIZE,
+			    GFP_KERNEL);
+	if (skb_cmd == NULL)
+		goto error_cmd;
+
+	skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN);
+
+	rc = pn533_data_exchange_tx_frame(dev, skb_cmd);
+	if (rc)
+		goto error_frame;
+
+	skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
+			PN533_CMD_DATAEXCH_DATA_MAXLEN +
+			PN533_FRAME_TAIL_SIZE;
+	skb_resp = alloc_skb(skb_resp_len, GFP_KERNEL);
+	if (!skb_resp) {
+		rc = -ENOMEM;
+		goto error_frame;
+	}
+
+	in_frame = (struct pn533_frame *) skb_resp->data;
+	out_frame = (struct pn533_frame *) skb_cmd->data;
+
+	arg->skb_resp = skb_resp;
+	arg->skb_out = skb_cmd;
+
+	rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
+					  skb_resp_len,
+					  pn533_data_exchange_complete,
+					  dev->cmd_complete_arg, GFP_KERNEL);
+	if (!rc)
+		return;
+
+	nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
+						" perform data_exchange", rc);
+
+	kfree_skb(skb_resp);
+
+error_frame:
+	kfree_skb(skb_cmd);
+
+error_cmd:
+	pn533_send_ack(dev, GFP_KERNEL);
+
+	kfree(arg);
+
+	up(&dev->cmd_lock);
+}
+
 static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
 								u8 cfgdata_len)
 {
@@ -1668,7 +1780,15 @@
 			NULL, 0,
 			pn533_send_complete, dev);
 
-	tasklet_init(&dev->tasklet, pn533_tasklet_cmd_complete, (ulong)dev);
+	INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
+	INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
+	dev->wq = alloc_workqueue("pn533",
+				  WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
+				  1);
+	if (dev->wq == NULL)
+		goto error;
+
+	skb_queue_head_init(&dev->resp_q);
 
 	usb_set_intfdata(interface, dev);
 
@@ -1678,7 +1798,7 @@
 	rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
 								dev->in_maxlen);
 	if (rc)
-		goto kill_tasklet;
+		goto destroy_wq;
 
 	fw_ver = (struct pn533_fw_version *)
 				PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame);
@@ -1694,7 +1814,7 @@
 					   PN533_CMD_DATAEXCH_HEAD_LEN,
 					   PN533_FRAME_TAIL_SIZE);
 	if (!dev->nfc_dev)
-		goto kill_tasklet;
+		goto destroy_wq;
 
 	nfc_set_parent_dev(dev->nfc_dev, &interface->dev);
 	nfc_set_drvdata(dev->nfc_dev, dev);
@@ -1720,8 +1840,8 @@
 
 free_nfc_dev:
 	nfc_free_device(dev->nfc_dev);
-kill_tasklet:
-	tasklet_kill(&dev->tasklet);
+destroy_wq:
+	destroy_workqueue(dev->wq);
 error:
 	kfree(dev->in_frame);
 	usb_free_urb(dev->in_urb);
@@ -1744,7 +1864,9 @@
 	usb_kill_urb(dev->in_urb);
 	usb_kill_urb(dev->out_urb);
 
-	tasklet_kill(&dev->tasklet);
+	destroy_workqueue(dev->wq);
+
+	skb_queue_purge(&dev->resp_q);
 
 	kfree(dev->in_frame);
 	usb_free_urb(dev->in_urb);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 5806449..d9bfd49 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1260,3 +1260,44 @@
 	return id;
 }
 EXPORT_SYMBOL_GPL(of_alias_get_id);
+
+const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
+			       u32 *pu)
+{
+	const void *curv = cur;
+
+	if (!prop)
+		return NULL;
+
+	if (!cur) {
+		curv = prop->value;
+		goto out_val;
+	}
+
+	curv += sizeof(*cur);
+	if (curv >= prop->value + prop->length)
+		return NULL;
+
+out_val:
+	*pu = be32_to_cpup(curv);
+	return curv;
+}
+EXPORT_SYMBOL_GPL(of_prop_next_u32);
+
+const char *of_prop_next_string(struct property *prop, const char *cur)
+{
+	const void *curv = cur;
+
+	if (!prop)
+		return NULL;
+
+	if (!cur)
+		return prop->value;
+
+	curv += strlen(cur) + 1;
+	if (curv >= prop->value + prop->length)
+		return NULL;
+
+	return curv;
+}
+EXPORT_SYMBOL_GPL(of_prop_next_string);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 483c0adc..2574abd 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -45,6 +45,8 @@
 		for (i=0; i<PHY_MAX_ADDR; i++)
 			mdio->irq[i] = PHY_POLL;
 
+	mdio->dev.of_node = np;
+
 	/* Register the MDIO bus */
 	rc = mdiobus_register(mdio);
 	if (rc)
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8644d53..42cfcd9 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -44,6 +44,7 @@
 #include <asm/ropes.h>
 #include <asm/mckinley.h>	/* for proc_mckinley_root */
 #include <asm/runway.h>		/* for proc_runway_root */
+#include <asm/page.h>		/* for PAGE0 */
 #include <asm/pdc.h>		/* for PDC_MODEL_* */
 #include <asm/pdcpat.h>		/* for is_pdc_pat() */
 #include <asm/parisc-device.h>
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 083a49f..01c001f 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,7 +2,7 @@
 # Makefile for the PCI bus specific drivers.
 #
 
-obj-y		+= access.o bus.o probe.o remove.o pci.o \
+obj-y		+= access.o bus.o probe.o host-bridge.o remove.o pci.o \
 			pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
 			irq.o vpd.o
 obj-$(CONFIG_PROC_FS) += proc.o
@@ -42,6 +42,7 @@
 obj-$(CONFIG_PARISC) += setup-bus.o
 obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
 obj-$(CONFIG_PPC) += setup-bus.o
+obj-$(CONFIG_FRV) += setup-bus.o
 obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
 obj-$(CONFIG_X86_VISWS) += setup-irq.o
 obj-$(CONFIG_MN10300) += setup-bus.o
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
new file mode 100644
index 0000000..a68dc61
--- /dev/null
+++ b/drivers/pci/host-bridge.c
@@ -0,0 +1,96 @@
+/*
+ * host bridge related code
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+
+#include "pci.h"
+
+static struct pci_bus *find_pci_root_bus(struct pci_dev *dev)
+{
+	struct pci_bus *bus;
+
+	bus = dev->bus;
+	while (bus->parent)
+		bus = bus->parent;
+
+	return bus;
+}
+
+static struct pci_host_bridge *find_pci_host_bridge(struct pci_dev *dev)
+{
+	struct pci_bus *bus = find_pci_root_bus(dev);
+
+	return to_pci_host_bridge(bus->bridge);
+}
+
+void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
+				 void (*release_fn)(struct pci_host_bridge *),
+				 void *release_data)
+{
+	bridge->release_fn = release_fn;
+	bridge->release_data = release_data;
+}
+
+static bool resource_contains(struct resource *res1, struct resource *res2)
+{
+	return res1->start <= res2->start && res1->end >= res2->end;
+}
+
+void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+			     struct resource *res)
+{
+	struct pci_host_bridge *bridge = find_pci_host_bridge(dev);
+	struct pci_host_bridge_window *window;
+	resource_size_t offset = 0;
+
+	list_for_each_entry(window, &bridge->windows, list) {
+		if (resource_type(res) != resource_type(window->res))
+			continue;
+
+		if (resource_contains(window->res, res)) {
+			offset = window->offset;
+			break;
+		}
+	}
+
+	region->start = res->start - offset;
+	region->end = res->end - offset;
+}
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+
+static bool region_contains(struct pci_bus_region *region1,
+			    struct pci_bus_region *region2)
+{
+	return region1->start <= region2->start && region1->end >= region2->end;
+}
+
+void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+			     struct pci_bus_region *region)
+{
+	struct pci_host_bridge *bridge = find_pci_host_bridge(dev);
+	struct pci_host_bridge_window *window;
+	resource_size_t offset = 0;
+
+	list_for_each_entry(window, &bridge->windows, list) {
+		struct pci_bus_region bus_region;
+
+		if (resource_type(res) != resource_type(window->res))
+			continue;
+
+		bus_region.start = window->res->start - window->offset;
+		bus_region.end = window->res->end - window->offset;
+
+		if (region_contains(&bus_region, region)) {
+			offset = window->offset;
+			break;
+		}
+	}
+
+	res->start = region->start + offset;
+	res->end = region->end + offset;
+}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0f150f2..61e2fef 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -200,7 +200,7 @@
 		return PCI_D1;
 	case ACPI_STATE_D2:
 		return PCI_D2;
-	case ACPI_STATE_D3:
+	case ACPI_STATE_D3_HOT:
 		return PCI_D3hot;
 	case ACPI_STATE_D3_COLD:
 		return PCI_D3cold;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 6b54b23..bf0cee6 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -421,6 +421,12 @@
 	pci_msix_shutdown(pci_dev);
 
 	/*
+	 * Turn off Bus Master bit on the device to tell it to not
+	 * continue to do DMA
+	 */
+	pci_disable_device(pci_dev);
+
+	/*
 	 * Devices may be enabled to wake up by runtime PM, but they need not
 	 * be supposed to wake up the system from its "power off" state (e.g.
 	 * ACPI S5).  Therefore disable wakeup for all devices that aren't
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 111569c..8f16900 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/pm_runtime.h>
+#include <asm-generic/pci-bridge.h>
 #include <asm/setup.h>
 #include "pci.h"
 
@@ -3164,18 +3165,12 @@
 	return 0;
 }
 
-static int pci_dev_reset(struct pci_dev *dev, int probe)
+static int __pci_dev_reset(struct pci_dev *dev, int probe)
 {
 	int rc;
 
 	might_sleep();
 
-	if (!probe) {
-		pci_cfg_access_lock(dev);
-		/* block PM suspend, driver probe, etc. */
-		device_lock(&dev->dev);
-	}
-
 	rc = pci_dev_specific_reset(dev, probe);
 	if (rc != -ENOTTY)
 		goto done;
@@ -3194,14 +3189,27 @@
 
 	rc = pci_parent_bus_reset(dev, probe);
 done:
+	return rc;
+}
+
+static int pci_dev_reset(struct pci_dev *dev, int probe)
+{
+	int rc;
+
+	if (!probe) {
+		pci_cfg_access_lock(dev);
+		/* block PM suspend, driver probe, etc. */
+		device_lock(&dev->dev);
+	}
+
+	rc = __pci_dev_reset(dev, probe);
+
 	if (!probe) {
 		device_unlock(&dev->dev);
 		pci_cfg_access_unlock(dev);
 	}
-
 	return rc;
 }
-
 /**
  * __pci_reset_function - reset a PCI device function
  * @dev: PCI device to reset
@@ -3246,7 +3254,7 @@
  */
 int __pci_reset_function_locked(struct pci_dev *dev)
 {
-	return pci_dev_reset(dev, 1);
+	return __pci_dev_reset(dev, 0);
 }
 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
 
@@ -3893,6 +3901,8 @@
 				pcie_bus_config = PCIE_BUS_PERFORMANCE;
 			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
 				pcie_bus_config = PCIE_BUS_PEER2PEER;
+			} else if (!strncmp(str, "pcie_scan_all", 13)) {
+				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
 			} else {
 				printk(KERN_ERR "PCI: Unknown option `%s'\n",
 						str);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 2f589a5..75915b3 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -249,7 +249,7 @@
 	int services = 0, pos;
 	u16 reg16;
 	u32 reg32;
-	int cap_mask;
+	int cap_mask = 0;
 	int err;
 
 	if (pcie_ports_disabled)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 5e1ca3c..658ac97 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,18 +10,16 @@
 #include <linux/module.h>
 #include <linux/cpumask.h>
 #include <linux/pci-aspm.h>
+#include <asm-generic/pci-bridge.h>
 #include "pci.h"
 
 #define CARDBUS_LATENCY_TIMER	176	/* secondary latency timer */
 #define CARDBUS_RESERVE_BUSNR	3
 
-static LIST_HEAD(pci_host_bridges);
-
 /* Ugh.  Need to stop exporting this to modules. */
 LIST_HEAD(pci_root_buses);
 EXPORT_SYMBOL(pci_root_buses);
 
-
 static int find_anything(struct device *dev, void *data)
 {
 	return 1;
@@ -44,82 +42,6 @@
 }
 EXPORT_SYMBOL(no_pci_devices);
 
-static struct pci_host_bridge *pci_host_bridge(struct pci_dev *dev)
-{
-	struct pci_bus *bus;
-	struct pci_host_bridge *bridge;
-
-	bus = dev->bus;
-	while (bus->parent)
-		bus = bus->parent;
-
-	list_for_each_entry(bridge, &pci_host_bridges, list) {
-		if (bridge->bus == bus)
-			return bridge;
-	}
-
-	return NULL;
-}
-
-static bool resource_contains(struct resource *res1, struct resource *res2)
-{
-	return res1->start <= res2->start && res1->end >= res2->end;
-}
-
-void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
-			     struct resource *res)
-{
-	struct pci_host_bridge *bridge = pci_host_bridge(dev);
-	struct pci_host_bridge_window *window;
-	resource_size_t offset = 0;
-
-	list_for_each_entry(window, &bridge->windows, list) {
-		if (resource_type(res) != resource_type(window->res))
-			continue;
-
-		if (resource_contains(window->res, res)) {
-			offset = window->offset;
-			break;
-		}
-	}
-
-	region->start = res->start - offset;
-	region->end = res->end - offset;
-}
-EXPORT_SYMBOL(pcibios_resource_to_bus);
-
-static bool region_contains(struct pci_bus_region *region1,
-			    struct pci_bus_region *region2)
-{
-	return region1->start <= region2->start && region1->end >= region2->end;
-}
-
-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
-			     struct pci_bus_region *region)
-{
-	struct pci_host_bridge *bridge = pci_host_bridge(dev);
-	struct pci_host_bridge_window *window;
-	struct pci_bus_region bus_region;
-	resource_size_t offset = 0;
-
-	list_for_each_entry(window, &bridge->windows, list) {
-		if (resource_type(res) != resource_type(window->res))
-			continue;
-
-		bus_region.start = window->res->start - window->offset;
-		bus_region.end = window->res->end - window->offset;
-
-		if (region_contains(&bus_region, region)) {
-			offset = window->offset;
-			break;
-		}
-	}
-
-	res->start = region->start + offset;
-	res->end = region->end + offset;
-}
-EXPORT_SYMBOL(pcibios_bus_to_resource);
-
 /*
  * PCI Bus Class
  */
@@ -501,6 +423,19 @@
 	return b;
 }
 
+static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
+{
+	struct pci_host_bridge *bridge;
+
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	if (bridge) {
+		INIT_LIST_HEAD(&bridge->windows);
+		bridge->bus = b;
+	}
+
+	return bridge;
+}
+
 static unsigned char pcix_bus_speed[] = {
 	PCI_SPEED_UNKNOWN,		/* 0 */
 	PCI_SPEED_66MHz_PCIX,		/* 1 */
@@ -1201,7 +1136,14 @@
 
 static void pci_release_bus_bridge_dev(struct device *dev)
 {
-	kfree(dev);
+	struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
+
+	if (bridge->release_fn)
+		bridge->release_fn(bridge);
+
+	pci_free_resource_list(&bridge->windows);
+
+	kfree(bridge);
 }
 
 struct pci_dev *alloc_pci_dev(void)
@@ -1395,10 +1337,13 @@
 static int only_one_child(struct pci_bus *bus)
 {
 	struct pci_dev *parent = bus->self;
+
 	if (!parent || !pci_is_pcie(parent))
 		return 0;
-	if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
-	    parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
+	if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+		return 1;
+	if (parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM &&
+	    !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
 		return 1;
 	return 0;
 }
@@ -1650,28 +1595,19 @@
 	int error;
 	struct pci_host_bridge *bridge;
 	struct pci_bus *b, *b2;
-	struct device *dev;
 	struct pci_host_bridge_window *window, *n;
 	struct resource *res;
 	resource_size_t offset;
 	char bus_addr[64];
 	char *fmt;
 
-	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
-	if (!bridge)
-		return NULL;
 
 	b = pci_alloc_bus();
 	if (!b)
-		goto err_bus;
-
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-	if (!dev)
-		goto err_dev;
+		return NULL;
 
 	b->sysdata = sysdata;
 	b->ops = ops;
-
 	b2 = pci_find_bus(pci_domain_nr(b), bus);
 	if (b2) {
 		/* If we already got to this bus through a different bridge, ignore it */
@@ -1679,13 +1615,17 @@
 		goto err_out;
 	}
 
-	dev->parent = parent;
-	dev->release = pci_release_bus_bridge_dev;
-	dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
-	error = device_register(dev);
+	bridge = pci_alloc_host_bridge(b);
+	if (!bridge)
+		goto err_out;
+
+	bridge->dev.parent = parent;
+	bridge->dev.release = pci_release_bus_bridge_dev;
+	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
+	error = device_register(&bridge->dev);
 	if (error)
-		goto dev_reg_err;
-	b->bridge = get_device(dev);
+		goto bridge_dev_reg_err;
+	b->bridge = get_device(&bridge->dev);
 	device_enable_async_suspend(b->bridge);
 	pci_set_bus_of_node(b);
 
@@ -1704,9 +1644,6 @@
 
 	b->number = b->secondary = bus;
 
-	bridge->bus = b;
-	INIT_LIST_HEAD(&bridge->windows);
-
 	if (parent)
 		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
 	else
@@ -1732,25 +1669,18 @@
 	}
 
 	down_write(&pci_bus_sem);
-	list_add_tail(&bridge->list, &pci_host_bridges);
 	list_add_tail(&b->node, &pci_root_buses);
 	up_write(&pci_bus_sem);
 
 	return b;
 
 class_dev_reg_err:
-	device_unregister(dev);
-dev_reg_err:
-	down_write(&pci_bus_sem);
-	list_del(&bridge->list);
-	list_del(&b->node);
-	up_write(&pci_bus_sem);
-err_out:
-	kfree(dev);
-err_dev:
-	kfree(b);
-err_bus:
+	put_device(&bridge->dev);
+	device_unregister(&bridge->dev);
+bridge_dev_reg_err:
 	kfree(bridge);
+err_out:
+	kfree(b);
 	return NULL;
 }
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4bf7102..2a75216 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2626,6 +2626,18 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
 			quirk_msi_intx_disable_bug);
 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
+			quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
+			quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
+			quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
+			quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
+			quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
+			quirk_msi_intx_disable_bug);
 #endif /* CONFIG_PCI_MSI */
 
 /* Allow manual resource allocation for PCI hotplug bridges
@@ -3085,16 +3097,74 @@
 	return 0;
 }
 
+#include "../gpu/drm/i915/i915_reg.h"
+#define MSG_CTL			0x45010
+#define NSDE_PWR_STATE		0xd0100
+#define IGD_OPERATION_TIMEOUT	10000     /* set timeout 10 seconds */
+
+static int reset_ivb_igd(struct pci_dev *dev, int probe)
+{
+	void __iomem *mmio_base;
+	unsigned long timeout;
+	u32 val;
+
+	if (probe)
+		return 0;
+
+	mmio_base = pci_iomap(dev, 0, 0);
+	if (!mmio_base)
+		return -ENOMEM;
+
+	iowrite32(0x00000002, mmio_base + MSG_CTL);
+
+	/*
+	 * Clobbering SOUTH_CHICKEN2 register is fine only if the next
+	 * driver loaded sets the right bits. However, this's a reset and
+	 * the bits have been set by i915 previously, so we clobber
+	 * SOUTH_CHICKEN2 register directly here.
+	 */
+	iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
+
+	val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
+	iowrite32(val, mmio_base + PCH_PP_CONTROL);
+
+	timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
+	do {
+		val = ioread32(mmio_base + PCH_PP_STATUS);
+		if ((val & 0xb0000000) == 0)
+			goto reset_complete;
+		msleep(10);
+	} while (time_before(jiffies, timeout));
+	dev_warn(&dev->dev, "timeout during reset\n");
+
+reset_complete:
+	iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
+
+	pci_iounmap(dev, mmio_base);
+	return 0;
+}
+
 #define PCI_DEVICE_ID_INTEL_82599_SFP_VF   0x10ed
+#define PCI_DEVICE_ID_INTEL_IVB_M_VGA      0x0156
+#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA     0x0166
 
 static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
 		 reset_intel_82599_sfp_virtfn },
+	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
+		reset_ivb_igd },
+	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
+		reset_ivb_igd },
 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
 		reset_intel_generic_dev },
 	{ 0 }
 };
 
+/*
+ * These device-specific reset methods are here rather than in a driver
+ * because when a host assigns a device to a guest VM, the host may need
+ * to reset the device but probably doesn't have a driver for it.
+ */
 int pci_dev_specific_reset(struct pci_dev *dev, int probe)
 {
 	const struct pci_dev_reset_methods *i;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index abfb964..91c1f64 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -4,7 +4,6 @@
 
 config PINCTRL
 	bool
-	depends on EXPERIMENTAL
 
 if PINCTRL
 
@@ -27,6 +26,35 @@
 	help
 	  Say Y here to add some extra checks and diagnostics to PINCTRL calls.
 
+config PINCTRL_IMX
+	bool
+	select PINMUX
+	select PINCONF
+
+config PINCTRL_IMX51
+	bool "IMX51 pinctrl driver"
+	depends on OF
+	depends on SOC_IMX51
+	select PINCTRL_IMX
+	help
+	  Say Y here to enable the imx51 pinctrl driver
+
+config PINCTRL_IMX53
+	bool "IMX53 pinctrl driver"
+	depends on OF
+	depends on SOC_IMX53
+	select PINCTRL_IMX
+	help
+	  Say Y here to enable the imx53 pinctrl driver
+
+config PINCTRL_IMX6Q
+	bool "IMX6Q pinctrl driver"
+	depends on OF
+	depends on SOC_IMX6Q
+	select PINCTRL_IMX
+	help
+	  Say Y here to enable the imx6q pinctrl driver
+
 config PINCTRL_PXA3xx
 	bool
 	select PINMUX
@@ -37,6 +65,21 @@
 	select PINCTRL_PXA3xx
 	select PINCONF
 
+config PINCTRL_MXS
+	bool
+
+config PINCTRL_IMX23
+	bool
+	select PINMUX
+	select PINCONF
+	select PINCTRL_MXS
+
+config PINCTRL_IMX28
+	bool
+	select PINMUX
+	select PINCONF
+	select PINCTRL_MXS
+
 config PINCTRL_PXA168
 	bool "PXA168 pin controller driver"
 	depends on ARCH_MMP
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 6d4150b..515e32f 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -5,9 +5,19 @@
 obj-$(CONFIG_PINCTRL)		+= core.o
 obj-$(CONFIG_PINMUX)		+= pinmux.o
 obj-$(CONFIG_PINCONF)		+= pinconf.o
+ifeq ($(CONFIG_OF),y)
+obj-$(CONFIG_PINCTRL)		+= devicetree.o
+endif
 obj-$(CONFIG_GENERIC_PINCONF)	+= pinconf-generic.o
+obj-$(CONFIG_PINCTRL_IMX)	+= pinctrl-imx.o
+obj-$(CONFIG_PINCTRL_IMX51)	+= pinctrl-imx51.o
+obj-$(CONFIG_PINCTRL_IMX53)	+= pinctrl-imx53.o
+obj-$(CONFIG_PINCTRL_IMX6Q)	+= pinctrl-imx6q.o
 obj-$(CONFIG_PINCTRL_PXA3xx)	+= pinctrl-pxa3xx.o
 obj-$(CONFIG_PINCTRL_MMP2)	+= pinctrl-mmp2.o
+obj-$(CONFIG_PINCTRL_MXS)	+= pinctrl-mxs.o
+obj-$(CONFIG_PINCTRL_IMX23)	+= pinctrl-imx23.o
+obj-$(CONFIG_PINCTRL_IMX28)	+= pinctrl-imx28.o
 obj-$(CONFIG_PINCTRL_PXA168)	+= pinctrl-pxa168.o
 obj-$(CONFIG_PINCTRL_PXA910)	+= pinctrl-pxa910.o
 obj-$(CONFIG_PINCTRL_SIRF)	+= pinctrl-sirf.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index df6296c..c3b331b 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -23,9 +23,11 @@
 #include <linux/sysfs.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/machine.h>
 #include "core.h"
+#include "devicetree.h"
 #include "pinmux.h"
 #include "pinconf.h"
 
@@ -41,11 +43,13 @@
 	unsigned num_maps;
 };
 
+static bool pinctrl_dummy_state;
+
 /* Mutex taken by all entry points */
 DEFINE_MUTEX(pinctrl_mutex);
 
 /* Global list of pin control devices (struct pinctrl_dev) */
-static LIST_HEAD(pinctrldev_list);
+LIST_HEAD(pinctrldev_list);
 
 /* List of pin controller handles (struct pinctrl) */
 static LIST_HEAD(pinctrl_list);
@@ -59,6 +63,19 @@
 			_i_ < _maps_node_->num_maps; \
 			i++, _map_ = &_maps_node_->maps[_i_])
 
+/**
+ * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
+ *
+ * Usually this function is called by platforms without pinctrl driver support
+ * but run with some shared drivers using pinctrl APIs.
+ * After calling this function, the pinctrl core will return successfully
+ * with creating a dummy state for the driver to keep going smoothly.
+ */
+void pinctrl_provide_dummies(void)
+{
+	pinctrl_dummy_state = true;
+}
+
 const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
 {
 	/* We're not allowed to register devices without name */
@@ -124,6 +141,25 @@
 }
 
 /**
+ * pin_get_name_from_id() - look up a pin name from a pin id
+ * @pctldev: the pin control device to lookup the pin on
+ * @name: the name of the pin to look up
+ */
+const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin)
+{
+	const struct pin_desc *desc;
+
+	desc = pin_desc_get(pctldev, pin);
+	if (desc == NULL) {
+		dev_err(pctldev->dev, "failed to get pin(%d) name\n",
+			pin);
+		return NULL;
+	}
+
+	return desc->name;
+}
+
+/**
  * pin_is_valid() - check if pin exists on controller
  * @pctldev: the pin control device to check the pin on
  * @pin: pin to check, use the local pin controller index number
@@ -255,7 +291,8 @@
  *
  * Find the pin controller handling a certain GPIO pin from the pinspace of
  * the GPIO subsystem, return the device and the matching GPIO range. Returns
- * negative if the GPIO range could not be found in any device.
+ * -EPROBE_DEFER if the GPIO range could not be found in any device since it
+ * may still have not been registered.
  */
 static int pinctrl_get_device_gpio_range(unsigned gpio,
 					 struct pinctrl_dev **outdev,
@@ -275,7 +312,7 @@
 		}
 	}
 
-	return -EINVAL;
+	return -EPROBE_DEFER;
 }
 
 /**
@@ -318,9 +355,10 @@
 			       const char *pin_group)
 {
 	const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+	unsigned ngroups = pctlops->get_groups_count(pctldev);
 	unsigned group_selector = 0;
 
-	while (pctlops->list_groups(pctldev, group_selector) >= 0) {
+	while (group_selector < ngroups) {
 		const char *gname = pctlops->get_group_name(pctldev,
 							    group_selector);
 		if (!strcmp(gname, pin_group)) {
@@ -360,7 +398,7 @@
 	ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
 	if (ret) {
 		mutex_unlock(&pinctrl_mutex);
-		return -EINVAL;
+		return ret;
 	}
 
 	/* Convert to the pin controllers number space */
@@ -516,11 +554,14 @@
 
 	setting->pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
 	if (setting->pctldev == NULL) {
-		dev_err(p->dev, "unknown pinctrl device %s in map entry",
+		dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
 			map->ctrl_dev_name);
 		kfree(setting);
-		/* Eventually, this should trigger deferred probe */
-		return -ENODEV;
+		/*
+		 * OK let us guess that the driver is not there yet, and
+		 * let's defer obtaining this pinctrl handle to later...
+		 */
+		return -EPROBE_DEFER;
 	}
 
 	switch (map->type) {
@@ -579,6 +620,13 @@
 	}
 	p->dev = dev;
 	INIT_LIST_HEAD(&p->states);
+	INIT_LIST_HEAD(&p->dt_maps);
+
+	ret = pinctrl_dt_to_map(p);
+	if (ret < 0) {
+		kfree(p);
+		return ERR_PTR(ret);
+	}
 
 	devname = dev_name(dev);
 
@@ -662,6 +710,8 @@
 		kfree(state);
 	}
 
+	pinctrl_dt_free_maps(p);
+
 	if (inlist)
 		list_del(&p->node);
 	kfree(p);
@@ -685,8 +735,18 @@
 	struct pinctrl_state *state;
 
 	state = find_state(p, name);
-	if (!state)
-		return ERR_PTR(-ENODEV);
+	if (!state) {
+		if (pinctrl_dummy_state) {
+			/* create dummy state */
+			dev_dbg(p->dev, "using pinctrl dummy state (%s)\n",
+				name);
+			state = create_state(p, name);
+			if (IS_ERR(state))
+				return state;
+		} else {
+			return ERR_PTR(-ENODEV);
+		}
+	}
 
 	return state;
 }
@@ -787,15 +847,63 @@
 }
 EXPORT_SYMBOL_GPL(pinctrl_select_state);
 
+static void devm_pinctrl_release(struct device *dev, void *res)
+{
+	pinctrl_put(*(struct pinctrl **)res);
+}
+
 /**
- * pinctrl_register_mappings() - register a set of pin controller mappings
- * @maps: the pincontrol mappings table to register. This should probably be
- *	marked with __initdata so it can be discarded after boot. This
- *	function will perform a shallow copy for the mapping entries.
- * @num_maps: the number of maps in the mapping table
+ * struct devm_pinctrl_get() - Resource managed pinctrl_get()
+ * @dev: the device to obtain the handle for
+ *
+ * If there is a need to explicitly destroy the returned struct pinctrl,
+ * devm_pinctrl_put() should be used, rather than plain pinctrl_put().
  */
-int pinctrl_register_mappings(struct pinctrl_map const *maps,
-			      unsigned num_maps)
+struct pinctrl *devm_pinctrl_get(struct device *dev)
+{
+	struct pinctrl **ptr, *p;
+
+	ptr = devres_alloc(devm_pinctrl_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	p = pinctrl_get(dev);
+	if (!IS_ERR(p)) {
+		*ptr = p;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return p;
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_get);
+
+static int devm_pinctrl_match(struct device *dev, void *res, void *data)
+{
+	struct pinctrl **p = res;
+
+	return *p == data;
+}
+
+/**
+ * devm_pinctrl_put() - Resource managed pinctrl_put()
+ * @p: the pinctrl handle to release
+ *
+ * Deallocate a struct pinctrl obtained via devm_pinctrl_get(). Normally
+ * this function will not need to be called and the resource management
+ * code will ensure that the resource is freed.
+ */
+void devm_pinctrl_put(struct pinctrl *p)
+{
+	WARN_ON(devres_destroy(p->dev, devm_pinctrl_release,
+			       devm_pinctrl_match, p));
+	pinctrl_put(p);
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_put);
+
+int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+			 bool dup, bool locked)
 {
 	int i, ret;
 	struct pinctrl_maps *maps_node;
@@ -829,13 +937,13 @@
 		case PIN_MAP_TYPE_MUX_GROUP:
 			ret = pinmux_validate_map(&maps[i], i);
 			if (ret < 0)
-				return 0;
+				return ret;
 			break;
 		case PIN_MAP_TYPE_CONFIGS_PIN:
 		case PIN_MAP_TYPE_CONFIGS_GROUP:
 			ret = pinconf_validate_map(&maps[i], i);
 			if (ret < 0)
-				return 0;
+				return ret;
 			break;
 		default:
 			pr_err("failed to register map %s (%d): invalid type given\n",
@@ -851,20 +959,52 @@
 	}
 
 	maps_node->num_maps = num_maps;
-	maps_node->maps = kmemdup(maps, sizeof(*maps) * num_maps, GFP_KERNEL);
-	if (!maps_node->maps) {
-		pr_err("failed to duplicate mapping table\n");
-		kfree(maps_node);
-		return -ENOMEM;
+	if (dup) {
+		maps_node->maps = kmemdup(maps, sizeof(*maps) * num_maps,
+					  GFP_KERNEL);
+		if (!maps_node->maps) {
+			pr_err("failed to duplicate mapping table\n");
+			kfree(maps_node);
+			return -ENOMEM;
+		}
+	} else {
+		maps_node->maps = maps;
 	}
 
-	mutex_lock(&pinctrl_mutex);
+	if (!locked)
+		mutex_lock(&pinctrl_mutex);
 	list_add_tail(&maps_node->node, &pinctrl_maps);
-	mutex_unlock(&pinctrl_mutex);
+	if (!locked)
+		mutex_unlock(&pinctrl_mutex);
 
 	return 0;
 }
 
+/**
+ * pinctrl_register_mappings() - register a set of pin controller mappings
+ * @maps: the pincontrol mappings table to register. This should probably be
+ *	marked with __initdata so it can be discarded after boot. This
+ *	function will perform a shallow copy for the mapping entries.
+ * @num_maps: the number of maps in the mapping table
+ */
+int pinctrl_register_mappings(struct pinctrl_map const *maps,
+			      unsigned num_maps)
+{
+	return pinctrl_register_map(maps, num_maps, true, false);
+}
+
+void pinctrl_unregister_map(struct pinctrl_map const *map)
+{
+	struct pinctrl_maps *maps_node;
+
+	list_for_each_entry(maps_node, &pinctrl_maps, node) {
+		if (maps_node->maps == map) {
+			list_del(&maps_node->node);
+			return;
+		}
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 
 static int pinctrl_pins_show(struct seq_file *s, void *what)
@@ -906,15 +1046,17 @@
 {
 	struct pinctrl_dev *pctldev = s->private;
 	const struct pinctrl_ops *ops = pctldev->desc->pctlops;
-	unsigned selector = 0;
+	unsigned ngroups, selector = 0;
 
+	ngroups = ops->get_groups_count(pctldev);
 	mutex_lock(&pinctrl_mutex);
 
 	seq_puts(s, "registered pin groups:\n");
-	while (ops->list_groups(pctldev, selector) >= 0) {
+	while (selector < ngroups) {
 		const unsigned *pins;
 		unsigned num_pins;
 		const char *gname = ops->get_group_name(pctldev, selector);
+		const char *pname;
 		int ret;
 		int i;
 
@@ -924,10 +1066,14 @@
 			seq_printf(s, "%s [ERROR GETTING PINS]\n",
 				   gname);
 		else {
-			seq_printf(s, "group: %s, pins = [ ", gname);
-			for (i = 0; i < num_pins; i++)
-				seq_printf(s, "%d ", pins[i]);
-			seq_puts(s, "]\n");
+			seq_printf(s, "group: %s\n", gname);
+			for (i = 0; i < num_pins; i++) {
+				pname = pin_get_name(pctldev, pins[i]);
+				if (WARN_ON(!pname))
+					return -EINVAL;
+				seq_printf(s, "pin %d (%s)\n", pins[i], pname);
+			}
+			seq_puts(s, "\n");
 		}
 		selector++;
 	}
@@ -1226,11 +1372,14 @@
 	const struct pinctrl_ops *ops = pctldev->desc->pctlops;
 
 	if (!ops ||
-	    !ops->list_groups ||
+	    !ops->get_groups_count ||
 	    !ops->get_group_name ||
 	    !ops->get_group_pins)
 		return -EINVAL;
 
+	if (ops->dt_node_to_map && !ops->dt_free_map)
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -1268,37 +1417,29 @@
 	/* check core ops for sanity */
 	ret = pinctrl_check_ops(pctldev);
 	if (ret) {
-		pr_err("%s pinctrl ops lacks necessary functions\n",
-			pctldesc->name);
+		dev_err(dev, "pinctrl ops lacks necessary functions\n");
 		goto out_err;
 	}
 
 	/* If we're implementing pinmuxing, check the ops for sanity */
 	if (pctldesc->pmxops) {
 		ret = pinmux_check_ops(pctldev);
-		if (ret) {
-			pr_err("%s pinmux ops lacks necessary functions\n",
-			       pctldesc->name);
+		if (ret)
 			goto out_err;
-		}
 	}
 
 	/* If we're implementing pinconfig, check the ops for sanity */
 	if (pctldesc->confops) {
 		ret = pinconf_check_ops(pctldev);
-		if (ret) {
-			pr_err("%s pin config ops lacks necessary functions\n",
-			       pctldesc->name);
+		if (ret)
 			goto out_err;
-		}
 	}
 
 	/* Register all the pins */
-	pr_debug("try to register %d pins on %s...\n",
-		 pctldesc->npins, pctldesc->name);
+	dev_dbg(dev, "try to register %d pins ...\n",  pctldesc->npins);
 	ret = pinctrl_register_pins(pctldev, pctldesc->pins, pctldesc->npins);
 	if (ret) {
-		pr_err("error during pin registration\n");
+		dev_err(dev, "error during pin registration\n");
 		pinctrl_free_pindescs(pctldev, pctldesc->pins,
 				      pctldesc->npins);
 		goto out_err;
@@ -1313,8 +1454,15 @@
 		struct pinctrl_state *s =
 			pinctrl_lookup_state_locked(pctldev->p,
 						    PINCTRL_STATE_DEFAULT);
-		if (!IS_ERR(s))
-			pinctrl_select_state_locked(pctldev->p, s);
+		if (IS_ERR(s)) {
+			dev_dbg(dev, "failed to lookup the default state\n");
+		} else {
+			ret = pinctrl_select_state_locked(pctldev->p, s);
+			if (ret) {
+				dev_err(dev,
+					"failed to select default state\n");
+			}
+		}
 	}
 
 	mutex_unlock(&pinctrl_mutex);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 17ecf65..1f40ff6 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -52,12 +52,15 @@
  * @dev: the device using this pin control handle
  * @states: a list of states for this device
  * @state: the current state
+ * @dt_maps: the mapping table chunks dynamically parsed from device tree for
+ *	this device, if any
  */
 struct pinctrl {
 	struct list_head node;
 	struct device *dev;
 	struct list_head states;
 	struct pinctrl_state *state;
+	struct list_head dt_maps;
 };
 
 /**
@@ -100,7 +103,8 @@
  * struct pinctrl_setting - an individual mux or config setting
  * @node: list node for struct pinctrl_settings's @settings field
  * @type: the type of setting
- * @pctldev: pin control device handling to be programmed
+ * @pctldev: pin control device handling to be programmed. Not used for
+ *   PIN_MAP_TYPE_DUMMY_STATE.
  * @data: Data specific to the setting type
  */
 struct pinctrl_setting {
@@ -144,6 +148,7 @@
 
 struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
 int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
+const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin);
 int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
 			       const char *pin_group);
 
@@ -153,4 +158,9 @@
 	return radix_tree_lookup(&pctldev->pin_desc_tree, pin);
 }
 
+int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
+			 bool dup, bool locked);
+void pinctrl_unregister_map(struct pinctrl_map const *map);
+
 extern struct mutex pinctrl_mutex;
+extern struct list_head pinctrldev_list;
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
new file mode 100644
index 0000000..fcb1de4
--- /dev/null
+++ b/drivers/pinctrl/devicetree.c
@@ -0,0 +1,249 @@
+/*
+ * Device tree integration for the pin control subsystem
+ *
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "devicetree.h"
+
+/**
+ * struct pinctrl_dt_map - mapping table chunk parsed from device tree
+ * @node: list node for struct pinctrl's @dt_maps field
+ * @pctldev: the pin controller that allocated this struct, and will free it
+ * @maps: the mapping table entries
+ */
+struct pinctrl_dt_map {
+	struct list_head node;
+	struct pinctrl_dev *pctldev;
+	struct pinctrl_map *map;
+	unsigned num_maps;
+};
+
+static void dt_free_map(struct pinctrl_dev *pctldev,
+		     struct pinctrl_map *map, unsigned num_maps)
+{
+	if (pctldev) {
+		struct pinctrl_ops *ops = pctldev->desc->pctlops;
+		ops->dt_free_map(pctldev, map, num_maps);
+	} else {
+		/* There is no pctldev for PIN_MAP_TYPE_DUMMY_STATE */
+		kfree(map);
+	}
+}
+
+void pinctrl_dt_free_maps(struct pinctrl *p)
+{
+	struct pinctrl_dt_map *dt_map, *n1;
+
+	list_for_each_entry_safe(dt_map, n1, &p->dt_maps, node) {
+		pinctrl_unregister_map(dt_map->map);
+		list_del(&dt_map->node);
+		dt_free_map(dt_map->pctldev, dt_map->map,
+			    dt_map->num_maps);
+		kfree(dt_map);
+	}
+
+	of_node_put(p->dev->of_node);
+}
+
+static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
+				   struct pinctrl_dev *pctldev,
+				   struct pinctrl_map *map, unsigned num_maps)
+{
+	int i;
+	struct pinctrl_dt_map *dt_map;
+
+	/* Initialize common mapping table entry fields */
+	for (i = 0; i < num_maps; i++) {
+		map[i].dev_name = dev_name(p->dev);
+		map[i].name = statename;
+		if (pctldev)
+			map[i].ctrl_dev_name = dev_name(pctldev->dev);
+	}
+
+	/* Remember the converted mapping table entries */
+	dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
+	if (!dt_map) {
+		dev_err(p->dev, "failed to alloc struct pinctrl_dt_map\n");
+		dt_free_map(pctldev, map, num_maps);
+		return -ENOMEM;
+	}
+
+	dt_map->pctldev = pctldev;
+	dt_map->map = map;
+	dt_map->num_maps = num_maps;
+	list_add_tail(&dt_map->node, &p->dt_maps);
+
+	return pinctrl_register_map(map, num_maps, false, true);
+}
+
+static struct pinctrl_dev *find_pinctrl_by_of_node(struct device_node *np)
+{
+	struct pinctrl_dev *pctldev;
+
+	list_for_each_entry(pctldev, &pinctrldev_list, node)
+		if (pctldev->dev->of_node == np)
+			return pctldev;
+
+	return NULL;
+}
+
+static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
+				struct device_node *np_config)
+{
+	struct device_node *np_pctldev;
+	struct pinctrl_dev *pctldev;
+	struct pinctrl_ops *ops;
+	int ret;
+	struct pinctrl_map *map;
+	unsigned num_maps;
+
+	/* Find the pin controller containing np_config */
+	np_pctldev = of_node_get(np_config);
+	for (;;) {
+		np_pctldev = of_get_next_parent(np_pctldev);
+		if (!np_pctldev || of_node_is_root(np_pctldev)) {
+			dev_info(p->dev, "could not find pctldev for node %s, deferring probe\n",
+				np_config->full_name);
+			of_node_put(np_pctldev);
+			/* OK let's just assume this will appear later then */
+			return -EPROBE_DEFER;
+		}
+		pctldev = find_pinctrl_by_of_node(np_pctldev);
+		if (pctldev)
+			break;
+	}
+	of_node_put(np_pctldev);
+
+	/*
+	 * Call pinctrl driver to parse device tree node, and
+	 * generate mapping table entries
+	 */
+	ops = pctldev->desc->pctlops;
+	if (!ops->dt_node_to_map) {
+		dev_err(p->dev, "pctldev %s doesn't support DT\n",
+			dev_name(pctldev->dev));
+		return -ENODEV;
+	}
+	ret = ops->dt_node_to_map(pctldev, np_config, &map, &num_maps);
+	if (ret < 0)
+		return ret;
+
+	/* Stash the mapping table chunk away for later use */
+	return dt_remember_or_free_map(p, statename, pctldev, map, num_maps);
+}
+
+static int dt_remember_dummy_state(struct pinctrl *p, const char *statename)
+{
+	struct pinctrl_map *map;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (!map) {
+		dev_err(p->dev, "failed to alloc struct pinctrl_map\n");
+		return -ENOMEM;
+	}
+
+	/* There is no pctldev for PIN_MAP_TYPE_DUMMY_STATE */
+	map->type = PIN_MAP_TYPE_DUMMY_STATE;
+
+	return dt_remember_or_free_map(p, statename, NULL, map, 1);
+}
+
+int pinctrl_dt_to_map(struct pinctrl *p)
+{
+	struct device_node *np = p->dev->of_node;
+	int state, ret;
+	char *propname;
+	struct property *prop;
+	const char *statename;
+	const __be32 *list;
+	int size, config;
+	phandle phandle;
+	struct device_node *np_config;
+
+	/* CONFIG_OF enabled, p->dev not instantiated from DT */
+	if (!np) {
+		dev_dbg(p->dev, "no of_node; not parsing pinctrl DT\n");
+		return 0;
+	}
+
+	/* We may store pointers to property names within the node */
+	of_node_get(np);
+
+	/* For each defined state ID */
+	for (state = 0; ; state++) {
+		/* Retrieve the pinctrl-* property */
+		propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
+		prop = of_find_property(np, propname, &size);
+		kfree(propname);
+		if (!prop)
+			break;
+		list = prop->value;
+		size /= sizeof(*list);
+
+		/* Determine whether pinctrl-names property names the state */
+		ret = of_property_read_string_index(np, "pinctrl-names",
+						    state, &statename);
+		/*
+		 * If not, statename is just the integer state ID. But rather
+		 * than dynamically allocate it and have to free it later,
+		 * just point part way into the property name for the string.
+		 */
+		if (ret < 0) {
+			/* strlen("pinctrl-") == 8 */
+			statename = prop->name + 8;
+		}
+
+		/* For every referenced pin configuration node in it */
+		for (config = 0; config < size; config++) {
+			phandle = be32_to_cpup(list++);
+
+			/* Look up the pin configuration node */
+			np_config = of_find_node_by_phandle(phandle);
+			if (!np_config) {
+				dev_err(p->dev,
+					"prop %s index %i invalid phandle\n",
+					prop->name, config);
+				ret = -EINVAL;
+				goto err;
+			}
+
+			/* Parse the node */
+			ret = dt_to_map_one_config(p, statename, np_config);
+			of_node_put(np_config);
+			if (ret < 0)
+				goto err;
+		}
+
+		/* No entries in DT? Generate a dummy state table entry */
+		if (!size) {
+			ret = dt_remember_dummy_state(p, statename);
+			if (ret < 0)
+				goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	pinctrl_dt_free_maps(p);
+	return ret;
+}
diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h
new file mode 100644
index 0000000..760bc49
--- /dev/null
+++ b/drivers/pinctrl/devicetree.h
@@ -0,0 +1,35 @@
+/*
+ * Internal interface to pinctrl device tree integration
+ *
+ * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef CONFIG_OF
+
+void pinctrl_dt_free_maps(struct pinctrl *p);
+int pinctrl_dt_to_map(struct pinctrl *p);
+
+#else
+
+static inline int pinctrl_dt_to_map(struct pinctrl *p)
+{
+	return 0;
+}
+
+static inline void pinctrl_dt_free_maps(struct pinctrl *p)
+{
+}
+
+#endif
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 7321e86..43f474c 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -28,11 +28,17 @@
 	const struct pinconf_ops *ops = pctldev->desc->confops;
 
 	/* We must be able to read out pin status */
-	if (!ops->pin_config_get && !ops->pin_config_group_get)
+	if (!ops->pin_config_get && !ops->pin_config_group_get) {
+		dev_err(pctldev->dev,
+			"pinconf must be able to read out pin status\n");
 		return -EINVAL;
+	}
 	/* We have to be able to config the pins in SOME way */
-	if (!ops->pin_config_set && !ops->pin_config_group_set)
+	if (!ops->pin_config_set && !ops->pin_config_group_set) {
+		dev_err(pctldev->dev,
+			"pinconf has to be able to set a pins config\n");
 		return -EINVAL;
+	}
 	return 0;
 }
 
@@ -44,9 +50,9 @@
 		return -EINVAL;
 	}
 
-	if (map->data.configs.num_configs &&
+	if (!map->data.configs.num_configs ||
 			!map->data.configs.configs) {
-		pr_err("failed to register map %s (%d): no configs ptr given\n",
+		pr_err("failed to register map %s (%d): no configs given\n",
 		       map->name, i);
 		return -EINVAL;
 	}
@@ -379,8 +385,16 @@
 
 void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
 {
+	struct pinctrl_dev *pctldev;
+	const struct pinconf_ops *confops;
 	int i;
 
+	pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
+	if (pctldev)
+		confops = pctldev->desc->confops;
+	else
+		confops = NULL;
+
 	switch (map->type) {
 	case PIN_MAP_TYPE_CONFIGS_PIN:
 		seq_printf(s, "pin ");
@@ -394,8 +408,15 @@
 
 	seq_printf(s, "%s\n", map->data.configs.group_or_pin);
 
-	for (i = 0; i < map->data.configs.num_configs; i++)
-		seq_printf(s, "config %08lx\n", map->data.configs.configs[i]);
+	for (i = 0; i < map->data.configs.num_configs; i++) {
+		seq_printf(s, "config ");
+		if (confops && confops->pin_config_config_dbg_show)
+			confops->pin_config_config_dbg_show(pctldev, s,
+						map->data.configs.configs[i]);
+		else
+			seq_printf(s, "%08lx", map->data.configs.configs[i]);
+		seq_printf(s, "\n");
+	}
 }
 
 void pinconf_show_setting(struct seq_file *s,
@@ -403,6 +424,7 @@
 {
 	struct pinctrl_dev *pctldev = setting->pctldev;
 	const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+	const struct pinconf_ops *confops = pctldev->desc->confops;
 	struct pin_desc *desc;
 	int i;
 
@@ -428,8 +450,15 @@
 	 * FIXME: We should really get the pin controler to dump the config
 	 * values, so they can be decoded to something meaningful.
 	 */
-	for (i = 0; i < setting->data.configs.num_configs; i++)
-		seq_printf(s, " %08lx", setting->data.configs.configs[i]);
+	for (i = 0; i < setting->data.configs.num_configs; i++) {
+		seq_printf(s, " ");
+		if (confops && confops->pin_config_config_dbg_show)
+			confops->pin_config_config_dbg_show(pctldev, s,
+				setting->data.configs.configs[i]);
+		else
+			seq_printf(s, "%08lx",
+				   setting->data.configs.configs[i]);
+	}
 
 	seq_printf(s, "\n");
 }
@@ -448,10 +477,14 @@
 static int pinconf_pins_show(struct seq_file *s, void *what)
 {
 	struct pinctrl_dev *pctldev = s->private;
+	const struct pinconf_ops *ops = pctldev->desc->confops;
 	unsigned i, pin;
 
+	if (!ops || !ops->pin_config_get)
+		return 0;
+
 	seq_puts(s, "Pin config settings per pin\n");
-	seq_puts(s, "Format: pin (name): pinmux setting array\n");
+	seq_puts(s, "Format: pin (name): configs\n");
 
 	mutex_lock(&pinctrl_mutex);
 
@@ -495,17 +528,18 @@
 	struct pinctrl_dev *pctldev = s->private;
 	const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
 	const struct pinconf_ops *ops = pctldev->desc->confops;
+	unsigned ngroups = pctlops->get_groups_count(pctldev);
 	unsigned selector = 0;
 
 	if (!ops || !ops->pin_config_group_get)
 		return 0;
 
 	seq_puts(s, "Pin config settings per pin group\n");
-	seq_puts(s, "Format: group (name): pinmux setting array\n");
+	seq_puts(s, "Format: group (name): configs\n");
 
 	mutex_lock(&pinctrl_mutex);
 
-	while (pctlops->list_groups(pctldev, selector) >= 0) {
+	while (selector < ngroups) {
 		const char *gname = pctlops->get_group_name(pctldev, selector);
 
 		seq_printf(s, "%u (%s):", selector, gname);
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
index 54510de..e3ed8cb 100644
--- a/drivers/pinctrl/pinconf.h
+++ b/drivers/pinctrl/pinconf.h
@@ -19,11 +19,6 @@
 			  struct pinctrl_setting *setting);
 void pinconf_free_setting(struct pinctrl_setting const *setting);
 int pinconf_apply_setting(struct pinctrl_setting const *setting);
-void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map);
-void pinconf_show_setting(struct seq_file *s,
-			  struct pinctrl_setting const *setting);
-void pinconf_init_device_debugfs(struct dentry *devroot,
-				 struct pinctrl_dev *pctldev);
 
 /*
  * You will only be interested in these if you're using PINCONF
@@ -61,6 +56,18 @@
 	return 0;
 }
 
+#endif
+
+#if defined(CONFIG_PINCONF) && defined(CONFIG_DEBUG_FS)
+
+void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map);
+void pinconf_show_setting(struct seq_file *s,
+			  struct pinctrl_setting const *setting);
+void pinconf_init_device_debugfs(struct dentry *devroot,
+				 struct pinctrl_dev *pctldev);
+
+#else
+
 static inline void pinconf_show_map(struct seq_file *s,
 				    struct pinctrl_map const *map)
 {
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 0797eba..55697a5 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -174,7 +174,7 @@
 
 
 /* Initial configuration */
-static const struct __initdata u300_gpio_confdata
+static const struct __initconst u300_gpio_confdata
 bs335_gpio_config[BS335_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
 	/* Port 0, pins 0-7 */
 	{
@@ -255,7 +255,7 @@
 	}
 };
 
-static const struct __initdata u300_gpio_confdata
+static const struct __initconst u300_gpio_confdata
 bs365_gpio_config[BS365_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
 	/* Port 0, pins 0-7 */
 	{
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
new file mode 100644
index 0000000..f6e7c67
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -0,0 +1,620 @@
+/*
+ * Core driver for the imx pin controller
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "pinctrl-imx.h"
+
+#define IMX_PMX_DUMP(info, p, m, c, n)		\
+{						\
+	int i, j;				\
+	printk("Format: Pin Mux Config\n");	\
+	for (i = 0; i < n; i++) {		\
+		j = p[i];			\
+		printk("%s %d 0x%lx\n",		\
+			info->pins[j].name,	\
+			m[i], c[i]);		\
+	}					\
+}
+
+/* The bits in CONFIG cell defined in binding doc*/
+#define IMX_NO_PAD_CTL	0x80000000	/* no pin config need */
+#define IMX_PAD_SION 0x40000000		/* set SION */
+
+/**
+ * @dev: a pointer back to containing device
+ * @base: the offset to the controller in virtual memory
+ */
+struct imx_pinctrl {
+	struct device *dev;
+	struct pinctrl_dev *pctl;
+	void __iomem *base;
+	const struct imx_pinctrl_soc_info *info;
+};
+
+static const struct imx_pin_reg *imx_find_pin_reg(
+				const struct imx_pinctrl_soc_info *info,
+				unsigned pin, bool is_mux, unsigned mux)
+{
+	const struct imx_pin_reg *pin_reg = NULL;
+	int i;
+
+	for (i = 0; i < info->npin_regs; i++) {
+		pin_reg = &info->pin_regs[i];
+		if (pin_reg->pid != pin)
+			continue;
+		if (!is_mux)
+			break;
+		else if (pin_reg->mux_mode == (mux & IMX_MUX_MASK))
+			break;
+	}
+
+	if (!pin_reg) {
+		dev_err(info->dev, "Pin(%s): unable to find pin reg map\n",
+			info->pins[pin].name);
+		return NULL;
+	}
+
+	return pin_reg;
+}
+
+static const inline struct imx_pin_group *imx_pinctrl_find_group_by_name(
+				const struct imx_pinctrl_soc_info *info,
+				const char *name)
+{
+	const struct imx_pin_group *grp = NULL;
+	int i;
+
+	for (i = 0; i < info->ngroups; i++) {
+		if (!strcmp(info->groups[i].name, name)) {
+			grp = &info->groups[i];
+			break;
+		}
+	}
+
+	return grp;
+}
+
+static int imx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+
+	return info->ngroups;
+}
+
+static const char *imx_get_group_name(struct pinctrl_dev *pctldev,
+				       unsigned selector)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+
+	return info->groups[selector].name;
+}
+
+static int imx_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+			       const unsigned **pins,
+			       unsigned *npins)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+
+	if (selector >= info->ngroups)
+		return -EINVAL;
+
+	*pins = info->groups[selector].pins;
+	*npins = info->groups[selector].npins;
+
+	return 0;
+}
+
+static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+		   unsigned offset)
+{
+	seq_printf(s, "%s", dev_name(pctldev->dev));
+}
+
+static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
+			struct device_node *np,
+			struct pinctrl_map **map, unsigned *num_maps)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+	const struct imx_pin_group *grp;
+	struct pinctrl_map *new_map;
+	struct device_node *parent;
+	int map_num = 1;
+	int i;
+
+	/*
+	 * first find the group of this node and check if we need create
+	 * config maps for pins
+	 */
+	grp = imx_pinctrl_find_group_by_name(info, np->name);
+	if (!grp) {
+		dev_err(info->dev, "unable to find group for node %s\n",
+			np->name);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < grp->npins; i++) {
+		if (!(grp->configs[i] & IMX_NO_PAD_CTL))
+			map_num++;
+	}
+
+	new_map = kmalloc(sizeof(struct pinctrl_map) * map_num, GFP_KERNEL);
+	if (!new_map)
+		return -ENOMEM;
+
+	*map = new_map;
+	*num_maps = map_num;
+
+	/* create mux map */
+	parent = of_get_parent(np);
+	if (!parent)
+		return -EINVAL;
+	new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
+	new_map[0].data.mux.function = parent->name;
+	new_map[0].data.mux.group = np->name;
+	of_node_put(parent);
+
+	/* create config map */
+	new_map++;
+	for (i = 0; i < grp->npins; i++) {
+		if (!(grp->configs[i] & IMX_NO_PAD_CTL)) {
+			new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
+			new_map[i].data.configs.group_or_pin =
+					pin_get_name(pctldev, grp->pins[i]);
+			new_map[i].data.configs.configs = &grp->configs[i];
+			new_map[i].data.configs.num_configs = 1;
+		}
+	}
+
+	dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
+		new_map->data.mux.function, new_map->data.mux.group, map_num);
+
+	return 0;
+}
+
+static void imx_dt_free_map(struct pinctrl_dev *pctldev,
+				struct pinctrl_map *map, unsigned num_maps)
+{
+	int i;
+
+	for (i = 0; i < num_maps; i++)
+		kfree(map);
+}
+
+static struct pinctrl_ops imx_pctrl_ops = {
+	.get_groups_count = imx_get_groups_count,
+	.get_group_name = imx_get_group_name,
+	.get_group_pins = imx_get_group_pins,
+	.pin_dbg_show = imx_pin_dbg_show,
+	.dt_node_to_map = imx_dt_node_to_map,
+	.dt_free_map = imx_dt_free_map,
+
+};
+
+static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
+			   unsigned group)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+	const struct imx_pin_reg *pin_reg;
+	const unsigned *pins, *mux;
+	unsigned int npins, pin_id;
+	int i;
+
+	/*
+	 * Configure the mux mode for each pin in the group for a specific
+	 * function.
+	 */
+	pins = info->groups[group].pins;
+	npins = info->groups[group].npins;
+	mux = info->groups[group].mux_mode;
+
+	WARN_ON(!pins || !npins || !mux);
+
+	dev_dbg(ipctl->dev, "enable function %s group %s\n",
+		info->functions[selector].name, info->groups[group].name);
+
+	for (i = 0; i < npins; i++) {
+		pin_id = pins[i];
+
+		pin_reg = imx_find_pin_reg(info, pin_id, 1, mux[i]);
+		if (!pin_reg)
+			return -EINVAL;
+
+		if (!pin_reg->mux_reg) {
+			dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
+				info->pins[pin_id].name);
+			return -EINVAL;
+		}
+
+		writel(mux[i], ipctl->base + pin_reg->mux_reg);
+		dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%x\n",
+			pin_reg->mux_reg, mux[i]);
+
+		/* some pins also need select input setting, set it if found */
+		if (pin_reg->input_reg) {
+			writel(pin_reg->input_val, ipctl->base + pin_reg->input_reg);
+			dev_dbg(ipctl->dev,
+				"==>select_input: offset 0x%x val 0x%x\n",
+				pin_reg->input_reg, pin_reg->input_val);
+		}
+	}
+
+	return 0;
+}
+
+static int imx_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+
+	return info->nfunctions;
+}
+
+static const char *imx_pmx_get_func_name(struct pinctrl_dev *pctldev,
+					  unsigned selector)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+
+	return info->functions[selector].name;
+}
+
+static int imx_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+			       const char * const **groups,
+			       unsigned * const num_groups)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+
+	*groups = info->functions[selector].groups;
+	*num_groups = info->functions[selector].num_groups;
+
+	return 0;
+}
+
+static struct pinmux_ops imx_pmx_ops = {
+	.get_functions_count = imx_pmx_get_funcs_count,
+	.get_function_name = imx_pmx_get_func_name,
+	.get_function_groups = imx_pmx_get_groups,
+	.enable = imx_pmx_enable,
+};
+
+static int imx_pinconf_get(struct pinctrl_dev *pctldev,
+			     unsigned pin_id, unsigned long *config)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+	const struct imx_pin_reg *pin_reg;
+
+	pin_reg = imx_find_pin_reg(info, pin_id, 0, 0);
+	if (!pin_reg)
+		return -EINVAL;
+
+	if (!pin_reg->conf_reg) {
+		dev_err(info->dev, "Pin(%s) does not support config function\n",
+			info->pins[pin_id].name);
+		return -EINVAL;
+	}
+
+	*config = readl(ipctl->base + pin_reg->conf_reg);
+
+	return 0;
+}
+
+static int imx_pinconf_set(struct pinctrl_dev *pctldev,
+			     unsigned pin_id, unsigned long config)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+	const struct imx_pin_reg *pin_reg;
+
+	pin_reg = imx_find_pin_reg(info, pin_id, 0, 0);
+	if (!pin_reg)
+		return -EINVAL;
+
+	if (!pin_reg->conf_reg) {
+		dev_err(info->dev, "Pin(%s) does not support config function\n",
+			info->pins[pin_id].name);
+		return -EINVAL;
+	}
+
+	dev_dbg(ipctl->dev, "pinconf set pin %s\n",
+		info->pins[pin_id].name);
+
+	writel(config, ipctl->base + pin_reg->conf_reg);
+	dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%lx\n",
+		pin_reg->conf_reg, config);
+
+	return 0;
+}
+
+static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+				   struct seq_file *s, unsigned pin_id)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+	const struct imx_pin_reg *pin_reg;
+	unsigned long config;
+
+	pin_reg = imx_find_pin_reg(info, pin_id, 0, 0);
+	if (!pin_reg || !pin_reg->conf_reg) {
+		seq_printf(s, "N/A");
+		return;
+	}
+
+	config = readl(ipctl->base + pin_reg->conf_reg);
+	seq_printf(s, "0x%lx", config);
+}
+
+static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+					 struct seq_file *s, unsigned group)
+{
+	struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+	const struct imx_pinctrl_soc_info *info = ipctl->info;
+	struct imx_pin_group *grp;
+	unsigned long config;
+	const char *name;
+	int i, ret;
+
+	if (group > info->ngroups)
+		return;
+
+	seq_printf(s, "\n");
+	grp = &info->groups[group];
+	for (i = 0; i < grp->npins; i++) {
+		name = pin_get_name(pctldev, grp->pins[i]);
+		ret = imx_pinconf_get(pctldev, grp->pins[i], &config);
+		if (ret)
+			return;
+		seq_printf(s, "%s: 0x%lx", name, config);
+	}
+}
+
+struct pinconf_ops imx_pinconf_ops = {
+	.pin_config_get = imx_pinconf_get,
+	.pin_config_set = imx_pinconf_set,
+	.pin_config_dbg_show = imx_pinconf_dbg_show,
+	.pin_config_group_dbg_show = imx_pinconf_group_dbg_show,
+};
+
+static struct pinctrl_desc imx_pinctrl_desc = {
+	.pctlops = &imx_pctrl_ops,
+	.pmxops = &imx_pmx_ops,
+	.confops = &imx_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+/* decode pin id and mux from pin function id got from device tree*/
+static int imx_pinctrl_get_pin_id_and_mux(const struct imx_pinctrl_soc_info *info,
+				unsigned int pin_func_id, unsigned int *pin_id,
+				unsigned int *mux)
+{
+	if (pin_func_id > info->npin_regs)
+		return -EINVAL;
+
+	*pin_id = info->pin_regs[pin_func_id].pid;
+	*mux = info->pin_regs[pin_func_id].mux_mode;
+
+	return 0;
+}
+
+static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
+				struct imx_pin_group *grp,
+				struct imx_pinctrl_soc_info *info,
+				u32 index)
+{
+	unsigned int pin_func_id;
+	int ret, size;
+	const const __be32 *list;
+	int i, j;
+	u32 config;
+
+	dev_dbg(info->dev, "group(%d): %s\n", index, np->name);
+
+	/* Initialise group */
+	grp->name = np->name;
+
+	/*
+	 * the binding format is fsl,pins = <PIN_FUNC_ID CONFIG ...>,
+	 * do sanity check and calculate pins number
+	 */
+	list = of_get_property(np, "fsl,pins", &size);
+	/* we do not check return since it's safe node passed down */
+	size /= sizeof(*list);
+	if (!size || size % 2) {
+		dev_err(info->dev, "wrong pins number or pins and configs should be pairs\n");
+		return -EINVAL;
+	}
+
+	grp->npins = size / 2;
+	grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
+				GFP_KERNEL);
+	grp->mux_mode = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
+				GFP_KERNEL);
+	grp->configs = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned long),
+				GFP_KERNEL);
+	for (i = 0, j = 0; i < size; i += 2, j++) {
+		pin_func_id = be32_to_cpu(*list++);
+		ret = imx_pinctrl_get_pin_id_and_mux(info, pin_func_id,
+					&grp->pins[j], &grp->mux_mode[j]);
+		if (ret) {
+			dev_err(info->dev, "get invalid pin function id\n");
+			return -EINVAL;
+		}
+		/* SION bit is in mux register */
+		config = be32_to_cpu(*list++);
+		if (config & IMX_PAD_SION)
+			grp->mux_mode[j] |= IOMUXC_CONFIG_SION;
+		grp->configs[j] = config & ~IMX_PAD_SION;
+	}
+
+#ifdef DEBUG
+	IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
+#endif
+	return 0;
+}
+
+static int __devinit imx_pinctrl_parse_functions(struct device_node *np,
+			struct imx_pinctrl_soc_info *info, u32 index)
+{
+	struct device_node *child;
+	struct imx_pmx_func *func;
+	struct imx_pin_group *grp;
+	int ret;
+	static u32 grp_index;
+	u32 i = 0;
+
+	dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
+
+	func = &info->functions[index];
+
+	/* Initialise function */
+	func->name = np->name;
+	func->num_groups = of_get_child_count(np);
+	if (func->num_groups <= 0) {
+		dev_err(info->dev, "no groups defined\n");
+		return -EINVAL;
+	}
+	func->groups = devm_kzalloc(info->dev,
+			func->num_groups * sizeof(char *), GFP_KERNEL);
+
+	for_each_child_of_node(np, child) {
+		func->groups[i] = child->name;
+		grp = &info->groups[grp_index++];
+		ret = imx_pinctrl_parse_groups(child, grp, info, i++);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int __devinit imx_pinctrl_probe_dt(struct platform_device *pdev,
+				struct imx_pinctrl_soc_info *info)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *child;
+	int ret;
+	u32 nfuncs = 0;
+	u32 i = 0;
+
+	if (!np)
+		return -ENODEV;
+
+	nfuncs = of_get_child_count(np);
+	if (nfuncs <= 0) {
+		dev_err(&pdev->dev, "no functions defined\n");
+		return -EINVAL;
+	}
+
+	info->nfunctions = nfuncs;
+	info->functions = devm_kzalloc(&pdev->dev, nfuncs * sizeof(struct imx_pmx_func),
+					GFP_KERNEL);
+	if (!info->functions)
+		return -ENOMEM;
+
+	info->ngroups = 0;
+	for_each_child_of_node(np, child)
+		info->ngroups += of_get_child_count(child);
+	info->groups = devm_kzalloc(&pdev->dev, info->ngroups * sizeof(struct imx_pin_group),
+					GFP_KERNEL);
+	if (!info->groups)
+		return -ENOMEM;
+
+	for_each_child_of_node(np, child) {
+		ret = imx_pinctrl_parse_functions(child, info, i++);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to parse function\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+int __devinit imx_pinctrl_probe(struct platform_device *pdev,
+				struct imx_pinctrl_soc_info *info)
+{
+	struct imx_pinctrl *ipctl;
+	struct resource *res;
+	int ret;
+
+	if (!info || !info->pins || !info->npins
+		  || !info->pin_regs || !info->npin_regs) {
+		dev_err(&pdev->dev, "wrong pinctrl info\n");
+		return -EINVAL;
+	}
+	info->dev = &pdev->dev;
+
+	/* Create state holders etc for this driver */
+	ipctl = devm_kzalloc(&pdev->dev, sizeof(*ipctl), GFP_KERNEL);
+	if (!ipctl)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENOENT;
+
+	ipctl->base = devm_request_and_ioremap(&pdev->dev, res);
+	if (!ipctl->base)
+		return -EBUSY;
+
+	imx_pinctrl_desc.name = dev_name(&pdev->dev);
+	imx_pinctrl_desc.pins = info->pins;
+	imx_pinctrl_desc.npins = info->npins;
+
+	ret = imx_pinctrl_probe_dt(pdev, info);
+	if (ret) {
+		dev_err(&pdev->dev, "fail to probe dt properties\n");
+		return ret;
+	}
+
+	ipctl->info = info;
+	ipctl->dev = info->dev;
+	platform_set_drvdata(pdev, ipctl);
+	ipctl->pctl = pinctrl_register(&imx_pinctrl_desc, &pdev->dev, ipctl);
+	if (!ipctl->pctl) {
+		dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
+		return -EINVAL;
+	}
+
+	dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
+
+	return 0;
+}
+
+int __devexit imx_pinctrl_remove(struct platform_device *pdev)
+{
+	struct imx_pinctrl *ipctl = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(ipctl->pctl);
+
+	return 0;
+}
diff --git a/drivers/pinctrl/pinctrl-imx.h b/drivers/pinctrl/pinctrl-imx.h
new file mode 100644
index 0000000..9b65e78
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx.h
@@ -0,0 +1,106 @@
+/*
+ * IMX pinmux core definitions
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DRIVERS_PINCTRL_IMX_H
+#define __DRIVERS_PINCTRL_IMX_H
+
+struct platform_device;
+
+/**
+ * struct imx_pin_group - describes an IMX pin group
+ * @name: the name of this specific pin group
+ * @pins: an array of discrete physical pins used in this group, taken
+ *	from the driver-local pin enumeration space
+ * @npins: the number of pins in this group array, i.e. the number of
+ *	elements in .pins so we can iterate over that array
+ * @mux_mode: the mux mode for each pin in this group. The size of this
+ *	array is the same as pins.
+ * @configs: the config for each pin in this group. The size of this
+ *	array is the same as pins.
+ */
+struct imx_pin_group {
+	const char *name;
+	unsigned int *pins;
+	unsigned npins;
+	unsigned int *mux_mode;
+	unsigned long *configs;
+};
+
+/**
+ * struct imx_pmx_func - describes IMX pinmux functions
+ * @name: the name of this specific function
+ * @groups: corresponding pin groups
+ * @num_groups: the number of groups
+ */
+struct imx_pmx_func {
+	const char *name;
+	const char **groups;
+	unsigned num_groups;
+};
+
+/**
+ * struct imx_pin_reg - describe a pin reg map
+ * The last 3 members are used for select input setting
+ * @pid: pin id
+ * @mux_reg: mux register offset
+ * @conf_reg: config register offset
+ * @mux_mode: mux mode
+ * @input_reg: select input register offset for this mux if any
+ *  0 if no select input setting needed.
+ * @input_val: the value set to select input register
+ */
+struct imx_pin_reg {
+	u16 pid;
+	u16 mux_reg;
+	u16 conf_reg;
+	u8 mux_mode;
+	u16 input_reg;
+	u8 input_val;
+};
+
+struct imx_pinctrl_soc_info {
+	struct device *dev;
+	const struct pinctrl_pin_desc *pins;
+	unsigned int npins;
+	const struct imx_pin_reg *pin_regs;
+	unsigned int npin_regs;
+	struct imx_pin_group *groups;
+	unsigned int ngroups;
+	struct imx_pmx_func *functions;
+	unsigned int nfunctions;
+};
+
+#define NO_MUX		0x0
+#define NO_PAD		0x0
+
+#define IMX_PIN_REG(id, conf, mux, mode, input, val)	\
+	{						\
+		.pid = id,				\
+		.conf_reg = conf,			\
+		.mux_reg = mux,				\
+		.mux_mode  = mode,			\
+		.input_reg = input,			\
+		.input_val = val,			\
+	}
+
+#define IMX_PINCTRL_PIN(pin) PINCTRL_PIN(pin, #pin)
+
+#define PAD_CTL_MASK(len)	((1 << len) - 1)
+#define IMX_MUX_MASK	0x7
+#define IOMUXC_CONFIG_SION	(0x1 << 4)
+
+int imx_pinctrl_probe(struct platform_device *pdev,
+			struct imx_pinctrl_soc_info *info);
+int imx_pinctrl_remove(struct platform_device *pdev);
+#endif /* __DRIVERS_PINCTRL_IMX_H */
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c
new file mode 100644
index 0000000..75d3eff
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx23.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mxs.h"
+
+enum imx23_pin_enum {
+	GPMI_D00	= PINID(0, 0),
+	GPMI_D01	= PINID(0, 1),
+	GPMI_D02	= PINID(0, 2),
+	GPMI_D03	= PINID(0, 3),
+	GPMI_D04	= PINID(0, 4),
+	GPMI_D05	= PINID(0, 5),
+	GPMI_D06	= PINID(0, 6),
+	GPMI_D07	= PINID(0, 7),
+	GPMI_D08	= PINID(0, 8),
+	GPMI_D09	= PINID(0, 9),
+	GPMI_D10	= PINID(0, 10),
+	GPMI_D11	= PINID(0, 11),
+	GPMI_D12	= PINID(0, 12),
+	GPMI_D13	= PINID(0, 13),
+	GPMI_D14	= PINID(0, 14),
+	GPMI_D15	= PINID(0, 15),
+	GPMI_CLE	= PINID(0, 16),
+	GPMI_ALE	= PINID(0, 17),
+	GPMI_CE2N	= PINID(0, 18),
+	GPMI_RDY0	= PINID(0, 19),
+	GPMI_RDY1	= PINID(0, 20),
+	GPMI_RDY2	= PINID(0, 21),
+	GPMI_RDY3	= PINID(0, 22),
+	GPMI_WPN	= PINID(0, 23),
+	GPMI_WRN	= PINID(0, 24),
+	GPMI_RDN	= PINID(0, 25),
+	AUART1_CTS	= PINID(0, 26),
+	AUART1_RTS	= PINID(0, 27),
+	AUART1_RX	= PINID(0, 28),
+	AUART1_TX	= PINID(0, 29),
+	I2C_SCL		= PINID(0, 30),
+	I2C_SDA		= PINID(0, 31),
+	LCD_D00		= PINID(1, 0),
+	LCD_D01		= PINID(1, 1),
+	LCD_D02		= PINID(1, 2),
+	LCD_D03		= PINID(1, 3),
+	LCD_D04		= PINID(1, 4),
+	LCD_D05		= PINID(1, 5),
+	LCD_D06		= PINID(1, 6),
+	LCD_D07		= PINID(1, 7),
+	LCD_D08		= PINID(1, 8),
+	LCD_D09		= PINID(1, 9),
+	LCD_D10		= PINID(1, 10),
+	LCD_D11		= PINID(1, 11),
+	LCD_D12		= PINID(1, 12),
+	LCD_D13		= PINID(1, 13),
+	LCD_D14		= PINID(1, 14),
+	LCD_D15		= PINID(1, 15),
+	LCD_D16		= PINID(1, 16),
+	LCD_D17		= PINID(1, 17),
+	LCD_RESET	= PINID(1, 18),
+	LCD_RS		= PINID(1, 19),
+	LCD_WR		= PINID(1, 20),
+	LCD_CS		= PINID(1, 21),
+	LCD_DOTCK	= PINID(1, 22),
+	LCD_ENABLE	= PINID(1, 23),
+	LCD_HSYNC	= PINID(1, 24),
+	LCD_VSYNC	= PINID(1, 25),
+	PWM0		= PINID(1, 26),
+	PWM1		= PINID(1, 27),
+	PWM2		= PINID(1, 28),
+	PWM3		= PINID(1, 29),
+	PWM4		= PINID(1, 30),
+	SSP1_CMD	= PINID(2, 0),
+	SSP1_DETECT	= PINID(2, 1),
+	SSP1_DATA0	= PINID(2, 2),
+	SSP1_DATA1	= PINID(2, 3),
+	SSP1_DATA2	= PINID(2, 4),
+	SSP1_DATA3	= PINID(2, 5),
+	SSP1_SCK	= PINID(2, 6),
+	ROTARYA		= PINID(2, 7),
+	ROTARYB		= PINID(2, 8),
+	EMI_A00		= PINID(2, 9),
+	EMI_A01		= PINID(2, 10),
+	EMI_A02		= PINID(2, 11),
+	EMI_A03		= PINID(2, 12),
+	EMI_A04		= PINID(2, 13),
+	EMI_A05		= PINID(2, 14),
+	EMI_A06		= PINID(2, 15),
+	EMI_A07		= PINID(2, 16),
+	EMI_A08		= PINID(2, 17),
+	EMI_A09		= PINID(2, 18),
+	EMI_A10		= PINID(2, 19),
+	EMI_A11		= PINID(2, 20),
+	EMI_A12		= PINID(2, 21),
+	EMI_BA0		= PINID(2, 22),
+	EMI_BA1		= PINID(2, 23),
+	EMI_CASN	= PINID(2, 24),
+	EMI_CE0N	= PINID(2, 25),
+	EMI_CE1N	= PINID(2, 26),
+	GPMI_CE1N	= PINID(2, 27),
+	GPMI_CE0N	= PINID(2, 28),
+	EMI_CKE		= PINID(2, 29),
+	EMI_RASN	= PINID(2, 30),
+	EMI_WEN		= PINID(2, 31),
+	EMI_D00		= PINID(3, 0),
+	EMI_D01		= PINID(3, 1),
+	EMI_D02		= PINID(3, 2),
+	EMI_D03		= PINID(3, 3),
+	EMI_D04		= PINID(3, 4),
+	EMI_D05		= PINID(3, 5),
+	EMI_D06		= PINID(3, 6),
+	EMI_D07		= PINID(3, 7),
+	EMI_D08		= PINID(3, 8),
+	EMI_D09		= PINID(3, 9),
+	EMI_D10		= PINID(3, 10),
+	EMI_D11		= PINID(3, 11),
+	EMI_D12		= PINID(3, 12),
+	EMI_D13		= PINID(3, 13),
+	EMI_D14		= PINID(3, 14),
+	EMI_D15		= PINID(3, 15),
+	EMI_DQM0	= PINID(3, 16),
+	EMI_DQM1	= PINID(3, 17),
+	EMI_DQS0	= PINID(3, 18),
+	EMI_DQS1	= PINID(3, 19),
+	EMI_CLK		= PINID(3, 20),
+	EMI_CLKN	= PINID(3, 21),
+};
+
+static const struct pinctrl_pin_desc imx23_pins[] = {
+	MXS_PINCTRL_PIN(GPMI_D00),
+	MXS_PINCTRL_PIN(GPMI_D01),
+	MXS_PINCTRL_PIN(GPMI_D02),
+	MXS_PINCTRL_PIN(GPMI_D03),
+	MXS_PINCTRL_PIN(GPMI_D04),
+	MXS_PINCTRL_PIN(GPMI_D05),
+	MXS_PINCTRL_PIN(GPMI_D06),
+	MXS_PINCTRL_PIN(GPMI_D07),
+	MXS_PINCTRL_PIN(GPMI_D08),
+	MXS_PINCTRL_PIN(GPMI_D09),
+	MXS_PINCTRL_PIN(GPMI_D10),
+	MXS_PINCTRL_PIN(GPMI_D11),
+	MXS_PINCTRL_PIN(GPMI_D12),
+	MXS_PINCTRL_PIN(GPMI_D13),
+	MXS_PINCTRL_PIN(GPMI_D14),
+	MXS_PINCTRL_PIN(GPMI_D15),
+	MXS_PINCTRL_PIN(GPMI_CLE),
+	MXS_PINCTRL_PIN(GPMI_ALE),
+	MXS_PINCTRL_PIN(GPMI_CE2N),
+	MXS_PINCTRL_PIN(GPMI_RDY0),
+	MXS_PINCTRL_PIN(GPMI_RDY1),
+	MXS_PINCTRL_PIN(GPMI_RDY2),
+	MXS_PINCTRL_PIN(GPMI_RDY3),
+	MXS_PINCTRL_PIN(GPMI_WPN),
+	MXS_PINCTRL_PIN(GPMI_WRN),
+	MXS_PINCTRL_PIN(GPMI_RDN),
+	MXS_PINCTRL_PIN(AUART1_CTS),
+	MXS_PINCTRL_PIN(AUART1_RTS),
+	MXS_PINCTRL_PIN(AUART1_RX),
+	MXS_PINCTRL_PIN(AUART1_TX),
+	MXS_PINCTRL_PIN(I2C_SCL),
+	MXS_PINCTRL_PIN(I2C_SDA),
+	MXS_PINCTRL_PIN(LCD_D00),
+	MXS_PINCTRL_PIN(LCD_D01),
+	MXS_PINCTRL_PIN(LCD_D02),
+	MXS_PINCTRL_PIN(LCD_D03),
+	MXS_PINCTRL_PIN(LCD_D04),
+	MXS_PINCTRL_PIN(LCD_D05),
+	MXS_PINCTRL_PIN(LCD_D06),
+	MXS_PINCTRL_PIN(LCD_D07),
+	MXS_PINCTRL_PIN(LCD_D08),
+	MXS_PINCTRL_PIN(LCD_D09),
+	MXS_PINCTRL_PIN(LCD_D10),
+	MXS_PINCTRL_PIN(LCD_D11),
+	MXS_PINCTRL_PIN(LCD_D12),
+	MXS_PINCTRL_PIN(LCD_D13),
+	MXS_PINCTRL_PIN(LCD_D14),
+	MXS_PINCTRL_PIN(LCD_D15),
+	MXS_PINCTRL_PIN(LCD_D16),
+	MXS_PINCTRL_PIN(LCD_D17),
+	MXS_PINCTRL_PIN(LCD_RESET),
+	MXS_PINCTRL_PIN(LCD_RS),
+	MXS_PINCTRL_PIN(LCD_WR),
+	MXS_PINCTRL_PIN(LCD_CS),
+	MXS_PINCTRL_PIN(LCD_DOTCK),
+	MXS_PINCTRL_PIN(LCD_ENABLE),
+	MXS_PINCTRL_PIN(LCD_HSYNC),
+	MXS_PINCTRL_PIN(LCD_VSYNC),
+	MXS_PINCTRL_PIN(PWM0),
+	MXS_PINCTRL_PIN(PWM1),
+	MXS_PINCTRL_PIN(PWM2),
+	MXS_PINCTRL_PIN(PWM3),
+	MXS_PINCTRL_PIN(PWM4),
+	MXS_PINCTRL_PIN(SSP1_CMD),
+	MXS_PINCTRL_PIN(SSP1_DETECT),
+	MXS_PINCTRL_PIN(SSP1_DATA0),
+	MXS_PINCTRL_PIN(SSP1_DATA1),
+	MXS_PINCTRL_PIN(SSP1_DATA2),
+	MXS_PINCTRL_PIN(SSP1_DATA3),
+	MXS_PINCTRL_PIN(SSP1_SCK),
+	MXS_PINCTRL_PIN(ROTARYA),
+	MXS_PINCTRL_PIN(ROTARYB),
+	MXS_PINCTRL_PIN(EMI_A00),
+	MXS_PINCTRL_PIN(EMI_A01),
+	MXS_PINCTRL_PIN(EMI_A02),
+	MXS_PINCTRL_PIN(EMI_A03),
+	MXS_PINCTRL_PIN(EMI_A04),
+	MXS_PINCTRL_PIN(EMI_A05),
+	MXS_PINCTRL_PIN(EMI_A06),
+	MXS_PINCTRL_PIN(EMI_A07),
+	MXS_PINCTRL_PIN(EMI_A08),
+	MXS_PINCTRL_PIN(EMI_A09),
+	MXS_PINCTRL_PIN(EMI_A10),
+	MXS_PINCTRL_PIN(EMI_A11),
+	MXS_PINCTRL_PIN(EMI_A12),
+	MXS_PINCTRL_PIN(EMI_BA0),
+	MXS_PINCTRL_PIN(EMI_BA1),
+	MXS_PINCTRL_PIN(EMI_CASN),
+	MXS_PINCTRL_PIN(EMI_CE0N),
+	MXS_PINCTRL_PIN(EMI_CE1N),
+	MXS_PINCTRL_PIN(GPMI_CE1N),
+	MXS_PINCTRL_PIN(GPMI_CE0N),
+	MXS_PINCTRL_PIN(EMI_CKE),
+	MXS_PINCTRL_PIN(EMI_RASN),
+	MXS_PINCTRL_PIN(EMI_WEN),
+	MXS_PINCTRL_PIN(EMI_D00),
+	MXS_PINCTRL_PIN(EMI_D01),
+	MXS_PINCTRL_PIN(EMI_D02),
+	MXS_PINCTRL_PIN(EMI_D03),
+	MXS_PINCTRL_PIN(EMI_D04),
+	MXS_PINCTRL_PIN(EMI_D05),
+	MXS_PINCTRL_PIN(EMI_D06),
+	MXS_PINCTRL_PIN(EMI_D07),
+	MXS_PINCTRL_PIN(EMI_D08),
+	MXS_PINCTRL_PIN(EMI_D09),
+	MXS_PINCTRL_PIN(EMI_D10),
+	MXS_PINCTRL_PIN(EMI_D11),
+	MXS_PINCTRL_PIN(EMI_D12),
+	MXS_PINCTRL_PIN(EMI_D13),
+	MXS_PINCTRL_PIN(EMI_D14),
+	MXS_PINCTRL_PIN(EMI_D15),
+	MXS_PINCTRL_PIN(EMI_DQM0),
+	MXS_PINCTRL_PIN(EMI_DQM1),
+	MXS_PINCTRL_PIN(EMI_DQS0),
+	MXS_PINCTRL_PIN(EMI_DQS1),
+	MXS_PINCTRL_PIN(EMI_CLK),
+	MXS_PINCTRL_PIN(EMI_CLKN),
+};
+
+static struct mxs_regs imx23_regs = {
+	.muxsel = 0x100,
+	.drive = 0x200,
+	.pull = 0x400,
+};
+
+static struct mxs_pinctrl_soc_data imx23_pinctrl_data = {
+	.regs = &imx23_regs,
+	.pins = imx23_pins,
+	.npins = ARRAY_SIZE(imx23_pins),
+};
+
+static int __devinit imx23_pinctrl_probe(struct platform_device *pdev)
+{
+	return mxs_pinctrl_probe(pdev, &imx23_pinctrl_data);
+}
+
+static struct of_device_id imx23_pinctrl_of_match[] __devinitdata = {
+	{ .compatible = "fsl,imx23-pinctrl", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx23_pinctrl_of_match);
+
+static struct platform_driver imx23_pinctrl_driver = {
+	.driver = {
+		.name = "imx23-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = imx23_pinctrl_of_match,
+	},
+	.probe = imx23_pinctrl_probe,
+	.remove = __devexit_p(mxs_pinctrl_remove),
+};
+
+static int __init imx23_pinctrl_init(void)
+{
+	return platform_driver_register(&imx23_pinctrl_driver);
+}
+arch_initcall(imx23_pinctrl_init);
+
+static void __exit imx23_pinctrl_exit(void)
+{
+	platform_driver_unregister(&imx23_pinctrl_driver);
+}
+module_exit(imx23_pinctrl_exit);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale i.MX23 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c
new file mode 100644
index 0000000..b973026
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx28.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mxs.h"
+
+enum imx28_pin_enum {
+	GPMI_D00	= PINID(0, 0),
+	GPMI_D01	= PINID(0, 1),
+	GPMI_D02	= PINID(0, 2),
+	GPMI_D03	= PINID(0, 3),
+	GPMI_D04	= PINID(0, 4),
+	GPMI_D05	= PINID(0, 5),
+	GPMI_D06	= PINID(0, 6),
+	GPMI_D07	= PINID(0, 7),
+	GPMI_CE0N	= PINID(0, 16),
+	GPMI_CE1N	= PINID(0, 17),
+	GPMI_CE2N	= PINID(0, 18),
+	GPMI_CE3N	= PINID(0, 19),
+	GPMI_RDY0	= PINID(0, 20),
+	GPMI_RDY1	= PINID(0, 21),
+	GPMI_RDY2	= PINID(0, 22),
+	GPMI_RDY3	= PINID(0, 23),
+	GPMI_RDN	= PINID(0, 24),
+	GPMI_WRN	= PINID(0, 25),
+	GPMI_ALE	= PINID(0, 26),
+	GPMI_CLE	= PINID(0, 27),
+	GPMI_RESETN	= PINID(0, 28),
+	LCD_D00		= PINID(1, 0),
+	LCD_D01		= PINID(1, 1),
+	LCD_D02		= PINID(1, 2),
+	LCD_D03		= PINID(1, 3),
+	LCD_D04		= PINID(1, 4),
+	LCD_D05		= PINID(1, 5),
+	LCD_D06		= PINID(1, 6),
+	LCD_D07		= PINID(1, 7),
+	LCD_D08		= PINID(1, 8),
+	LCD_D09		= PINID(1, 9),
+	LCD_D10		= PINID(1, 10),
+	LCD_D11		= PINID(1, 11),
+	LCD_D12		= PINID(1, 12),
+	LCD_D13		= PINID(1, 13),
+	LCD_D14		= PINID(1, 14),
+	LCD_D15		= PINID(1, 15),
+	LCD_D16		= PINID(1, 16),
+	LCD_D17		= PINID(1, 17),
+	LCD_D18		= PINID(1, 18),
+	LCD_D19		= PINID(1, 19),
+	LCD_D20		= PINID(1, 20),
+	LCD_D21		= PINID(1, 21),
+	LCD_D22		= PINID(1, 22),
+	LCD_D23		= PINID(1, 23),
+	LCD_RD_E	= PINID(1, 24),
+	LCD_WR_RWN	= PINID(1, 25),
+	LCD_RS		= PINID(1, 26),
+	LCD_CS		= PINID(1, 27),
+	LCD_VSYNC	= PINID(1, 28),
+	LCD_HSYNC	= PINID(1, 29),
+	LCD_DOTCLK	= PINID(1, 30),
+	LCD_ENABLE	= PINID(1, 31),
+	SSP0_DATA0	= PINID(2, 0),
+	SSP0_DATA1	= PINID(2, 1),
+	SSP0_DATA2	= PINID(2, 2),
+	SSP0_DATA3	= PINID(2, 3),
+	SSP0_DATA4	= PINID(2, 4),
+	SSP0_DATA5	= PINID(2, 5),
+	SSP0_DATA6	= PINID(2, 6),
+	SSP0_DATA7	= PINID(2, 7),
+	SSP0_CMD	= PINID(2, 8),
+	SSP0_DETECT	= PINID(2, 9),
+	SSP0_SCK	= PINID(2, 10),
+	SSP1_SCK	= PINID(2, 12),
+	SSP1_CMD	= PINID(2, 13),
+	SSP1_DATA0	= PINID(2, 14),
+	SSP1_DATA3	= PINID(2, 15),
+	SSP2_SCK	= PINID(2, 16),
+	SSP2_MOSI	= PINID(2, 17),
+	SSP2_MISO	= PINID(2, 18),
+	SSP2_SS0	= PINID(2, 19),
+	SSP2_SS1	= PINID(2, 20),
+	SSP2_SS2	= PINID(2, 21),
+	SSP3_SCK	= PINID(2, 24),
+	SSP3_MOSI	= PINID(2, 25),
+	SSP3_MISO	= PINID(2, 26),
+	SSP3_SS0	= PINID(2, 27),
+	AUART0_RX	= PINID(3, 0),
+	AUART0_TX	= PINID(3, 1),
+	AUART0_CTS	= PINID(3, 2),
+	AUART0_RTS	= PINID(3, 3),
+	AUART1_RX	= PINID(3, 4),
+	AUART1_TX	= PINID(3, 5),
+	AUART1_CTS	= PINID(3, 6),
+	AUART1_RTS	= PINID(3, 7),
+	AUART2_RX	= PINID(3, 8),
+	AUART2_TX	= PINID(3, 9),
+	AUART2_CTS	= PINID(3, 10),
+	AUART2_RTS	= PINID(3, 11),
+	AUART3_RX	= PINID(3, 12),
+	AUART3_TX	= PINID(3, 13),
+	AUART3_CTS	= PINID(3, 14),
+	AUART3_RTS	= PINID(3, 15),
+	PWM0		= PINID(3, 16),
+	PWM1		= PINID(3, 17),
+	PWM2		= PINID(3, 18),
+	SAIF0_MCLK	= PINID(3, 20),
+	SAIF0_LRCLK	= PINID(3, 21),
+	SAIF0_BITCLK	= PINID(3, 22),
+	SAIF0_SDATA0	= PINID(3, 23),
+	I2C0_SCL	= PINID(3, 24),
+	I2C0_SDA	= PINID(3, 25),
+	SAIF1_SDATA0	= PINID(3, 26),
+	SPDIF		= PINID(3, 27),
+	PWM3		= PINID(3, 28),
+	PWM4		= PINID(3, 29),
+	LCD_RESET	= PINID(3, 30),
+	ENET0_MDC	= PINID(4, 0),
+	ENET0_MDIO	= PINID(4, 1),
+	ENET0_RX_EN	= PINID(4, 2),
+	ENET0_RXD0	= PINID(4, 3),
+	ENET0_RXD1	= PINID(4, 4),
+	ENET0_TX_CLK	= PINID(4, 5),
+	ENET0_TX_EN	= PINID(4, 6),
+	ENET0_TXD0	= PINID(4, 7),
+	ENET0_TXD1	= PINID(4, 8),
+	ENET0_RXD2	= PINID(4, 9),
+	ENET0_RXD3	= PINID(4, 10),
+	ENET0_TXD2	= PINID(4, 11),
+	ENET0_TXD3	= PINID(4, 12),
+	ENET0_RX_CLK	= PINID(4, 13),
+	ENET0_COL	= PINID(4, 14),
+	ENET0_CRS	= PINID(4, 15),
+	ENET_CLK	= PINID(4, 16),
+	JTAG_RTCK	= PINID(4, 20),
+	EMI_D00		= PINID(5, 0),
+	EMI_D01		= PINID(5, 1),
+	EMI_D02		= PINID(5, 2),
+	EMI_D03		= PINID(5, 3),
+	EMI_D04		= PINID(5, 4),
+	EMI_D05		= PINID(5, 5),
+	EMI_D06		= PINID(5, 6),
+	EMI_D07		= PINID(5, 7),
+	EMI_D08		= PINID(5, 8),
+	EMI_D09		= PINID(5, 9),
+	EMI_D10		= PINID(5, 10),
+	EMI_D11		= PINID(5, 11),
+	EMI_D12		= PINID(5, 12),
+	EMI_D13		= PINID(5, 13),
+	EMI_D14		= PINID(5, 14),
+	EMI_D15		= PINID(5, 15),
+	EMI_ODT0	= PINID(5, 16),
+	EMI_DQM0	= PINID(5, 17),
+	EMI_ODT1	= PINID(5, 18),
+	EMI_DQM1	= PINID(5, 19),
+	EMI_DDR_OPEN_FB	= PINID(5, 20),
+	EMI_CLK		= PINID(5, 21),
+	EMI_DQS0	= PINID(5, 22),
+	EMI_DQS1	= PINID(5, 23),
+	EMI_DDR_OPEN	= PINID(5, 26),
+	EMI_A00		= PINID(6, 0),
+	EMI_A01		= PINID(6, 1),
+	EMI_A02		= PINID(6, 2),
+	EMI_A03		= PINID(6, 3),
+	EMI_A04		= PINID(6, 4),
+	EMI_A05		= PINID(6, 5),
+	EMI_A06		= PINID(6, 6),
+	EMI_A07		= PINID(6, 7),
+	EMI_A08		= PINID(6, 8),
+	EMI_A09		= PINID(6, 9),
+	EMI_A10		= PINID(6, 10),
+	EMI_A11		= PINID(6, 11),
+	EMI_A12		= PINID(6, 12),
+	EMI_A13		= PINID(6, 13),
+	EMI_A14		= PINID(6, 14),
+	EMI_BA0		= PINID(6, 16),
+	EMI_BA1		= PINID(6, 17),
+	EMI_BA2		= PINID(6, 18),
+	EMI_CASN	= PINID(6, 19),
+	EMI_RASN	= PINID(6, 20),
+	EMI_WEN		= PINID(6, 21),
+	EMI_CE0N	= PINID(6, 22),
+	EMI_CE1N	= PINID(6, 23),
+	EMI_CKE		= PINID(6, 24),
+};
+
+static const struct pinctrl_pin_desc imx28_pins[] = {
+	MXS_PINCTRL_PIN(GPMI_D00),
+	MXS_PINCTRL_PIN(GPMI_D01),
+	MXS_PINCTRL_PIN(GPMI_D02),
+	MXS_PINCTRL_PIN(GPMI_D03),
+	MXS_PINCTRL_PIN(GPMI_D04),
+	MXS_PINCTRL_PIN(GPMI_D05),
+	MXS_PINCTRL_PIN(GPMI_D06),
+	MXS_PINCTRL_PIN(GPMI_D07),
+	MXS_PINCTRL_PIN(GPMI_CE0N),
+	MXS_PINCTRL_PIN(GPMI_CE1N),
+	MXS_PINCTRL_PIN(GPMI_CE2N),
+	MXS_PINCTRL_PIN(GPMI_CE3N),
+	MXS_PINCTRL_PIN(GPMI_RDY0),
+	MXS_PINCTRL_PIN(GPMI_RDY1),
+	MXS_PINCTRL_PIN(GPMI_RDY2),
+	MXS_PINCTRL_PIN(GPMI_RDY3),
+	MXS_PINCTRL_PIN(GPMI_RDN),
+	MXS_PINCTRL_PIN(GPMI_WRN),
+	MXS_PINCTRL_PIN(GPMI_ALE),
+	MXS_PINCTRL_PIN(GPMI_CLE),
+	MXS_PINCTRL_PIN(GPMI_RESETN),
+	MXS_PINCTRL_PIN(LCD_D00),
+	MXS_PINCTRL_PIN(LCD_D01),
+	MXS_PINCTRL_PIN(LCD_D02),
+	MXS_PINCTRL_PIN(LCD_D03),
+	MXS_PINCTRL_PIN(LCD_D04),
+	MXS_PINCTRL_PIN(LCD_D05),
+	MXS_PINCTRL_PIN(LCD_D06),
+	MXS_PINCTRL_PIN(LCD_D07),
+	MXS_PINCTRL_PIN(LCD_D08),
+	MXS_PINCTRL_PIN(LCD_D09),
+	MXS_PINCTRL_PIN(LCD_D10),
+	MXS_PINCTRL_PIN(LCD_D11),
+	MXS_PINCTRL_PIN(LCD_D12),
+	MXS_PINCTRL_PIN(LCD_D13),
+	MXS_PINCTRL_PIN(LCD_D14),
+	MXS_PINCTRL_PIN(LCD_D15),
+	MXS_PINCTRL_PIN(LCD_D16),
+	MXS_PINCTRL_PIN(LCD_D17),
+	MXS_PINCTRL_PIN(LCD_D18),
+	MXS_PINCTRL_PIN(LCD_D19),
+	MXS_PINCTRL_PIN(LCD_D20),
+	MXS_PINCTRL_PIN(LCD_D21),
+	MXS_PINCTRL_PIN(LCD_D22),
+	MXS_PINCTRL_PIN(LCD_D23),
+	MXS_PINCTRL_PIN(LCD_RD_E),
+	MXS_PINCTRL_PIN(LCD_WR_RWN),
+	MXS_PINCTRL_PIN(LCD_RS),
+	MXS_PINCTRL_PIN(LCD_CS),
+	MXS_PINCTRL_PIN(LCD_VSYNC),
+	MXS_PINCTRL_PIN(LCD_HSYNC),
+	MXS_PINCTRL_PIN(LCD_DOTCLK),
+	MXS_PINCTRL_PIN(LCD_ENABLE),
+	MXS_PINCTRL_PIN(SSP0_DATA0),
+	MXS_PINCTRL_PIN(SSP0_DATA1),
+	MXS_PINCTRL_PIN(SSP0_DATA2),
+	MXS_PINCTRL_PIN(SSP0_DATA3),
+	MXS_PINCTRL_PIN(SSP0_DATA4),
+	MXS_PINCTRL_PIN(SSP0_DATA5),
+	MXS_PINCTRL_PIN(SSP0_DATA6),
+	MXS_PINCTRL_PIN(SSP0_DATA7),
+	MXS_PINCTRL_PIN(SSP0_CMD),
+	MXS_PINCTRL_PIN(SSP0_DETECT),
+	MXS_PINCTRL_PIN(SSP0_SCK),
+	MXS_PINCTRL_PIN(SSP1_SCK),
+	MXS_PINCTRL_PIN(SSP1_CMD),
+	MXS_PINCTRL_PIN(SSP1_DATA0),
+	MXS_PINCTRL_PIN(SSP1_DATA3),
+	MXS_PINCTRL_PIN(SSP2_SCK),
+	MXS_PINCTRL_PIN(SSP2_MOSI),
+	MXS_PINCTRL_PIN(SSP2_MISO),
+	MXS_PINCTRL_PIN(SSP2_SS0),
+	MXS_PINCTRL_PIN(SSP2_SS1),
+	MXS_PINCTRL_PIN(SSP2_SS2),
+	MXS_PINCTRL_PIN(SSP3_SCK),
+	MXS_PINCTRL_PIN(SSP3_MOSI),
+	MXS_PINCTRL_PIN(SSP3_MISO),
+	MXS_PINCTRL_PIN(SSP3_SS0),
+	MXS_PINCTRL_PIN(AUART0_RX),
+	MXS_PINCTRL_PIN(AUART0_TX),
+	MXS_PINCTRL_PIN(AUART0_CTS),
+	MXS_PINCTRL_PIN(AUART0_RTS),
+	MXS_PINCTRL_PIN(AUART1_RX),
+	MXS_PINCTRL_PIN(AUART1_TX),
+	MXS_PINCTRL_PIN(AUART1_CTS),
+	MXS_PINCTRL_PIN(AUART1_RTS),
+	MXS_PINCTRL_PIN(AUART2_RX),
+	MXS_PINCTRL_PIN(AUART2_TX),
+	MXS_PINCTRL_PIN(AUART2_CTS),
+	MXS_PINCTRL_PIN(AUART2_RTS),
+	MXS_PINCTRL_PIN(AUART3_RX),
+	MXS_PINCTRL_PIN(AUART3_TX),
+	MXS_PINCTRL_PIN(AUART3_CTS),
+	MXS_PINCTRL_PIN(AUART3_RTS),
+	MXS_PINCTRL_PIN(PWM0),
+	MXS_PINCTRL_PIN(PWM1),
+	MXS_PINCTRL_PIN(PWM2),
+	MXS_PINCTRL_PIN(SAIF0_MCLK),
+	MXS_PINCTRL_PIN(SAIF0_LRCLK),
+	MXS_PINCTRL_PIN(SAIF0_BITCLK),
+	MXS_PINCTRL_PIN(SAIF0_SDATA0),
+	MXS_PINCTRL_PIN(I2C0_SCL),
+	MXS_PINCTRL_PIN(I2C0_SDA),
+	MXS_PINCTRL_PIN(SAIF1_SDATA0),
+	MXS_PINCTRL_PIN(SPDIF),
+	MXS_PINCTRL_PIN(PWM3),
+	MXS_PINCTRL_PIN(PWM4),
+	MXS_PINCTRL_PIN(LCD_RESET),
+	MXS_PINCTRL_PIN(ENET0_MDC),
+	MXS_PINCTRL_PIN(ENET0_MDIO),
+	MXS_PINCTRL_PIN(ENET0_RX_EN),
+	MXS_PINCTRL_PIN(ENET0_RXD0),
+	MXS_PINCTRL_PIN(ENET0_RXD1),
+	MXS_PINCTRL_PIN(ENET0_TX_CLK),
+	MXS_PINCTRL_PIN(ENET0_TX_EN),
+	MXS_PINCTRL_PIN(ENET0_TXD0),
+	MXS_PINCTRL_PIN(ENET0_TXD1),
+	MXS_PINCTRL_PIN(ENET0_RXD2),
+	MXS_PINCTRL_PIN(ENET0_RXD3),
+	MXS_PINCTRL_PIN(ENET0_TXD2),
+	MXS_PINCTRL_PIN(ENET0_TXD3),
+	MXS_PINCTRL_PIN(ENET0_RX_CLK),
+	MXS_PINCTRL_PIN(ENET0_COL),
+	MXS_PINCTRL_PIN(ENET0_CRS),
+	MXS_PINCTRL_PIN(ENET_CLK),
+	MXS_PINCTRL_PIN(JTAG_RTCK),
+	MXS_PINCTRL_PIN(EMI_D00),
+	MXS_PINCTRL_PIN(EMI_D01),
+	MXS_PINCTRL_PIN(EMI_D02),
+	MXS_PINCTRL_PIN(EMI_D03),
+	MXS_PINCTRL_PIN(EMI_D04),
+	MXS_PINCTRL_PIN(EMI_D05),
+	MXS_PINCTRL_PIN(EMI_D06),
+	MXS_PINCTRL_PIN(EMI_D07),
+	MXS_PINCTRL_PIN(EMI_D08),
+	MXS_PINCTRL_PIN(EMI_D09),
+	MXS_PINCTRL_PIN(EMI_D10),
+	MXS_PINCTRL_PIN(EMI_D11),
+	MXS_PINCTRL_PIN(EMI_D12),
+	MXS_PINCTRL_PIN(EMI_D13),
+	MXS_PINCTRL_PIN(EMI_D14),
+	MXS_PINCTRL_PIN(EMI_D15),
+	MXS_PINCTRL_PIN(EMI_ODT0),
+	MXS_PINCTRL_PIN(EMI_DQM0),
+	MXS_PINCTRL_PIN(EMI_ODT1),
+	MXS_PINCTRL_PIN(EMI_DQM1),
+	MXS_PINCTRL_PIN(EMI_DDR_OPEN_FB),
+	MXS_PINCTRL_PIN(EMI_CLK),
+	MXS_PINCTRL_PIN(EMI_DQS0),
+	MXS_PINCTRL_PIN(EMI_DQS1),
+	MXS_PINCTRL_PIN(EMI_DDR_OPEN),
+	MXS_PINCTRL_PIN(EMI_A00),
+	MXS_PINCTRL_PIN(EMI_A01),
+	MXS_PINCTRL_PIN(EMI_A02),
+	MXS_PINCTRL_PIN(EMI_A03),
+	MXS_PINCTRL_PIN(EMI_A04),
+	MXS_PINCTRL_PIN(EMI_A05),
+	MXS_PINCTRL_PIN(EMI_A06),
+	MXS_PINCTRL_PIN(EMI_A07),
+	MXS_PINCTRL_PIN(EMI_A08),
+	MXS_PINCTRL_PIN(EMI_A09),
+	MXS_PINCTRL_PIN(EMI_A10),
+	MXS_PINCTRL_PIN(EMI_A11),
+	MXS_PINCTRL_PIN(EMI_A12),
+	MXS_PINCTRL_PIN(EMI_A13),
+	MXS_PINCTRL_PIN(EMI_A14),
+	MXS_PINCTRL_PIN(EMI_BA0),
+	MXS_PINCTRL_PIN(EMI_BA1),
+	MXS_PINCTRL_PIN(EMI_BA2),
+	MXS_PINCTRL_PIN(EMI_CASN),
+	MXS_PINCTRL_PIN(EMI_RASN),
+	MXS_PINCTRL_PIN(EMI_WEN),
+	MXS_PINCTRL_PIN(EMI_CE0N),
+	MXS_PINCTRL_PIN(EMI_CE1N),
+	MXS_PINCTRL_PIN(EMI_CKE),
+};
+
+static struct mxs_regs imx28_regs = {
+	.muxsel = 0x100,
+	.drive = 0x300,
+	.pull = 0x600,
+};
+
+static struct mxs_pinctrl_soc_data imx28_pinctrl_data = {
+	.regs = &imx28_regs,
+	.pins = imx28_pins,
+	.npins = ARRAY_SIZE(imx28_pins),
+};
+
+static int __devinit imx28_pinctrl_probe(struct platform_device *pdev)
+{
+	return mxs_pinctrl_probe(pdev, &imx28_pinctrl_data);
+}
+
+static struct of_device_id imx28_pinctrl_of_match[] __devinitdata = {
+	{ .compatible = "fsl,imx28-pinctrl", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx28_pinctrl_of_match);
+
+static struct platform_driver imx28_pinctrl_driver = {
+	.driver = {
+		.name = "imx28-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = imx28_pinctrl_of_match,
+	},
+	.probe = imx28_pinctrl_probe,
+	.remove = __devexit_p(mxs_pinctrl_remove),
+};
+
+static int __init imx28_pinctrl_init(void)
+{
+	return platform_driver_register(&imx28_pinctrl_driver);
+}
+arch_initcall(imx28_pinctrl_init);
+
+static void __exit imx28_pinctrl_exit(void)
+{
+	platform_driver_unregister(&imx28_pinctrl_driver);
+}
+module_exit(imx28_pinctrl_exit);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale i.MX28 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c
new file mode 100644
index 0000000..689b3c8
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx51.c
@@ -0,0 +1,1322 @@
+/*
+ * imx51 pinctrl driver based on imx pinmux core
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro, Inc.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx51_pads {
+	MX51_PAD_EIM_D16 = 1,
+	MX51_PAD_EIM_D17 = 2,
+	MX51_PAD_EIM_D18 = 3,
+	MX51_PAD_EIM_D19 = 4,
+	MX51_PAD_EIM_D20 = 5,
+	MX51_PAD_EIM_D21 = 6,
+	MX51_PAD_EIM_D22 = 7,
+	MX51_PAD_EIM_D23 = 8,
+	MX51_PAD_EIM_D24 = 9,
+	MX51_PAD_EIM_D25 = 10,
+	MX51_PAD_EIM_D26 = 11,
+	MX51_PAD_EIM_D27 = 12,
+	MX51_PAD_EIM_D28 = 13,
+	MX51_PAD_EIM_D29 = 14,
+	MX51_PAD_EIM_D30 = 15,
+	MX51_PAD_EIM_D31 = 16,
+	MX51_PAD_EIM_A16 = 17,
+	MX51_PAD_EIM_A17 = 18,
+	MX51_PAD_EIM_A18 = 19,
+	MX51_PAD_EIM_A19 = 20,
+	MX51_PAD_EIM_A20 = 21,
+	MX51_PAD_EIM_A21 = 22,
+	MX51_PAD_EIM_A22 = 23,
+	MX51_PAD_EIM_A23 = 24,
+	MX51_PAD_EIM_A24 = 25,
+	MX51_PAD_EIM_A25 = 26,
+	MX51_PAD_EIM_A26 = 27,
+	MX51_PAD_EIM_A27 = 28,
+	MX51_PAD_EIM_EB0 = 29,
+	MX51_PAD_EIM_EB1 = 30,
+	MX51_PAD_EIM_EB2 = 31,
+	MX51_PAD_EIM_EB3 = 32,
+	MX51_PAD_EIM_OE = 33,
+	MX51_PAD_EIM_CS0 = 34,
+	MX51_PAD_EIM_CS1 = 35,
+	MX51_PAD_EIM_CS2 = 36,
+	MX51_PAD_EIM_CS3 = 37,
+	MX51_PAD_EIM_CS4 = 38,
+	MX51_PAD_EIM_CS5 = 39,
+	MX51_PAD_EIM_DTACK = 40,
+	MX51_PAD_EIM_LBA = 41,
+	MX51_PAD_EIM_CRE = 42,
+	MX51_PAD_DRAM_CS1 = 43,
+	MX51_PAD_NANDF_WE_B = 44,
+	MX51_PAD_NANDF_RE_B = 45,
+	MX51_PAD_NANDF_ALE = 46,
+	MX51_PAD_NANDF_CLE = 47,
+	MX51_PAD_NANDF_WP_B = 48,
+	MX51_PAD_NANDF_RB0 = 49,
+	MX51_PAD_NANDF_RB1 = 50,
+	MX51_PAD_NANDF_RB2 = 51,
+	MX51_PAD_NANDF_RB3 = 52,
+	MX51_PAD_GPIO_NAND = 53,
+	MX51_PAD_NANDF_CS0 = 54,
+	MX51_PAD_NANDF_CS1 = 55,
+	MX51_PAD_NANDF_CS2 = 56,
+	MX51_PAD_NANDF_CS3 = 57,
+	MX51_PAD_NANDF_CS4 = 58,
+	MX51_PAD_NANDF_CS5 = 59,
+	MX51_PAD_NANDF_CS6 = 60,
+	MX51_PAD_NANDF_CS7 = 61,
+	MX51_PAD_NANDF_RDY_INT = 62,
+	MX51_PAD_NANDF_D15 = 63,
+	MX51_PAD_NANDF_D14 = 64,
+	MX51_PAD_NANDF_D13 = 65,
+	MX51_PAD_NANDF_D12 = 66,
+	MX51_PAD_NANDF_D11 = 67,
+	MX51_PAD_NANDF_D10 = 68,
+	MX51_PAD_NANDF_D9 = 69,
+	MX51_PAD_NANDF_D8 = 70,
+	MX51_PAD_NANDF_D7 = 71,
+	MX51_PAD_NANDF_D6 = 72,
+	MX51_PAD_NANDF_D5 = 73,
+	MX51_PAD_NANDF_D4 = 74,
+	MX51_PAD_NANDF_D3 = 75,
+	MX51_PAD_NANDF_D2 = 76,
+	MX51_PAD_NANDF_D1 = 77,
+	MX51_PAD_NANDF_D0 = 78,
+	MX51_PAD_CSI1_D8 = 79,
+	MX51_PAD_CSI1_D9 = 80,
+	MX51_PAD_CSI1_D10 = 81,
+	MX51_PAD_CSI1_D11 = 82,
+	MX51_PAD_CSI1_D12 = 83,
+	MX51_PAD_CSI1_D13 = 84,
+	MX51_PAD_CSI1_D14 = 85,
+	MX51_PAD_CSI1_D15 = 86,
+	MX51_PAD_CSI1_D16 = 87,
+	MX51_PAD_CSI1_D17 = 88,
+	MX51_PAD_CSI1_D18 = 89,
+	MX51_PAD_CSI1_D19 = 90,
+	MX51_PAD_CSI1_VSYNC = 91,
+	MX51_PAD_CSI1_HSYNC = 92,
+	MX51_PAD_CSI1_PIXCLK = 93,
+	MX51_PAD_CSI1_MCLK = 94,
+	MX51_PAD_CSI2_D12 = 95,
+	MX51_PAD_CSI2_D13 = 96,
+	MX51_PAD_CSI2_D14 = 97,
+	MX51_PAD_CSI2_D15 = 98,
+	MX51_PAD_CSI2_D16 = 99,
+	MX51_PAD_CSI2_D17 = 100,
+	MX51_PAD_CSI2_D18 = 101,
+	MX51_PAD_CSI2_D19 = 102,
+	MX51_PAD_CSI2_VSYNC = 103,
+	MX51_PAD_CSI2_HSYNC = 104,
+	MX51_PAD_CSI2_PIXCLK = 105,
+	MX51_PAD_I2C1_CLK = 106,
+	MX51_PAD_I2C1_DAT = 107,
+	MX51_PAD_AUD3_BB_TXD = 108,
+	MX51_PAD_AUD3_BB_RXD = 109,
+	MX51_PAD_AUD3_BB_CK = 110,
+	MX51_PAD_AUD3_BB_FS = 111,
+	MX51_PAD_CSPI1_MOSI = 112,
+	MX51_PAD_CSPI1_MISO = 113,
+	MX51_PAD_CSPI1_SS0 = 114,
+	MX51_PAD_CSPI1_SS1 = 115,
+	MX51_PAD_CSPI1_RDY = 116,
+	MX51_PAD_CSPI1_SCLK = 117,
+	MX51_PAD_UART1_RXD = 118,
+	MX51_PAD_UART1_TXD = 119,
+	MX51_PAD_UART1_RTS = 120,
+	MX51_PAD_UART1_CTS = 121,
+	MX51_PAD_UART2_RXD = 122,
+	MX51_PAD_UART2_TXD = 123,
+	MX51_PAD_UART3_RXD = 124,
+	MX51_PAD_UART3_TXD = 125,
+	MX51_PAD_OWIRE_LINE = 126,
+	MX51_PAD_KEY_ROW0 = 127,
+	MX51_PAD_KEY_ROW1 = 128,
+	MX51_PAD_KEY_ROW2 = 129,
+	MX51_PAD_KEY_ROW3 = 130,
+	MX51_PAD_KEY_COL0 = 131,
+	MX51_PAD_KEY_COL1 = 132,
+	MX51_PAD_KEY_COL2 = 133,
+	MX51_PAD_KEY_COL3 = 134,
+	MX51_PAD_KEY_COL4 = 135,
+	MX51_PAD_KEY_COL5 = 136,
+	MX51_PAD_USBH1_CLK = 137,
+	MX51_PAD_USBH1_DIR = 138,
+	MX51_PAD_USBH1_STP = 139,
+	MX51_PAD_USBH1_NXT = 140,
+	MX51_PAD_USBH1_DATA0 = 141,
+	MX51_PAD_USBH1_DATA1 = 142,
+	MX51_PAD_USBH1_DATA2 = 143,
+	MX51_PAD_USBH1_DATA3 = 144,
+	MX51_PAD_USBH1_DATA4 = 145,
+	MX51_PAD_USBH1_DATA5 = 146,
+	MX51_PAD_USBH1_DATA6 = 147,
+	MX51_PAD_USBH1_DATA7 = 148,
+	MX51_PAD_DI1_PIN11 = 149,
+	MX51_PAD_DI1_PIN12 = 150,
+	MX51_PAD_DI1_PIN13 = 151,
+	MX51_PAD_DI1_D0_CS = 152,
+	MX51_PAD_DI1_D1_CS = 153,
+	MX51_PAD_DISPB2_SER_DIN = 154,
+	MX51_PAD_DISPB2_SER_DIO = 155,
+	MX51_PAD_DISPB2_SER_CLK = 156,
+	MX51_PAD_DISPB2_SER_RS = 157,
+	MX51_PAD_DISP1_DAT0 = 158,
+	MX51_PAD_DISP1_DAT1 = 159,
+	MX51_PAD_DISP1_DAT2 = 160,
+	MX51_PAD_DISP1_DAT3 = 161,
+	MX51_PAD_DISP1_DAT4 = 162,
+	MX51_PAD_DISP1_DAT5 = 163,
+	MX51_PAD_DISP1_DAT6 = 164,
+	MX51_PAD_DISP1_DAT7 = 165,
+	MX51_PAD_DISP1_DAT8 = 166,
+	MX51_PAD_DISP1_DAT9 = 167,
+	MX51_PAD_DISP1_DAT10 = 168,
+	MX51_PAD_DISP1_DAT11 = 169,
+	MX51_PAD_DISP1_DAT12 = 170,
+	MX51_PAD_DISP1_DAT13 = 171,
+	MX51_PAD_DISP1_DAT14 = 172,
+	MX51_PAD_DISP1_DAT15 = 173,
+	MX51_PAD_DISP1_DAT16 = 174,
+	MX51_PAD_DISP1_DAT17 = 175,
+	MX51_PAD_DISP1_DAT18 = 176,
+	MX51_PAD_DISP1_DAT19 = 177,
+	MX51_PAD_DISP1_DAT20 = 178,
+	MX51_PAD_DISP1_DAT21 = 179,
+	MX51_PAD_DISP1_DAT22 = 180,
+	MX51_PAD_DISP1_DAT23 = 181,
+	MX51_PAD_DI1_PIN3 = 182,
+	MX51_PAD_DI1_PIN2 = 183,
+	MX51_PAD_DI_GP2 = 184,
+	MX51_PAD_DI_GP3 = 185,
+	MX51_PAD_DI2_PIN4 = 186,
+	MX51_PAD_DI2_PIN2 = 187,
+	MX51_PAD_DI2_PIN3 = 188,
+	MX51_PAD_DI2_DISP_CLK = 189,
+	MX51_PAD_DI_GP4 = 190,
+	MX51_PAD_DISP2_DAT0 = 191,
+	MX51_PAD_DISP2_DAT1 = 192,
+	MX51_PAD_DISP2_DAT2 = 193,
+	MX51_PAD_DISP2_DAT3 = 194,
+	MX51_PAD_DISP2_DAT4 = 195,
+	MX51_PAD_DISP2_DAT5 = 196,
+	MX51_PAD_DISP2_DAT6 = 197,
+	MX51_PAD_DISP2_DAT7 = 198,
+	MX51_PAD_DISP2_DAT8 = 199,
+	MX51_PAD_DISP2_DAT9 = 200,
+	MX51_PAD_DISP2_DAT10 = 201,
+	MX51_PAD_DISP2_DAT11 = 202,
+	MX51_PAD_DISP2_DAT12 = 203,
+	MX51_PAD_DISP2_DAT13 = 204,
+	MX51_PAD_DISP2_DAT14 = 205,
+	MX51_PAD_DISP2_DAT15 = 206,
+	MX51_PAD_SD1_CMD = 207,
+	MX51_PAD_SD1_CLK = 208,
+	MX51_PAD_SD1_DATA0 = 209,
+	MX51_PAD_EIM_DA0 = 210,
+	MX51_PAD_EIM_DA1 = 211,
+	MX51_PAD_EIM_DA2 = 212,
+	MX51_PAD_EIM_DA3 = 213,
+	MX51_PAD_SD1_DATA1 = 214,
+	MX51_PAD_EIM_DA4 = 215,
+	MX51_PAD_EIM_DA5 = 216,
+	MX51_PAD_EIM_DA6 = 217,
+	MX51_PAD_EIM_DA7 = 218,
+	MX51_PAD_SD1_DATA2 = 219,
+	MX51_PAD_EIM_DA10 = 220,
+	MX51_PAD_EIM_DA11 = 221,
+	MX51_PAD_EIM_DA8 = 222,
+	MX51_PAD_EIM_DA9 = 223,
+	MX51_PAD_SD1_DATA3 = 224,
+	MX51_PAD_GPIO1_0 = 225,
+	MX51_PAD_GPIO1_1 = 226,
+	MX51_PAD_EIM_DA12 = 227,
+	MX51_PAD_EIM_DA13 = 228,
+	MX51_PAD_EIM_DA14 = 229,
+	MX51_PAD_EIM_DA15 = 230,
+	MX51_PAD_SD2_CMD = 231,
+	MX51_PAD_SD2_CLK = 232,
+	MX51_PAD_SD2_DATA0 = 233,
+	MX51_PAD_SD2_DATA1 = 234,
+	MX51_PAD_SD2_DATA2 = 235,
+	MX51_PAD_SD2_DATA3 = 236,
+	MX51_PAD_GPIO1_2 = 237,
+	MX51_PAD_GPIO1_3 = 238,
+	MX51_PAD_PMIC_INT_REQ = 239,
+	MX51_PAD_GPIO1_4 = 240,
+	MX51_PAD_GPIO1_5 = 241,
+	MX51_PAD_GPIO1_6 = 242,
+	MX51_PAD_GPIO1_7 = 243,
+	MX51_PAD_GPIO1_8 = 244,
+	MX51_PAD_GPIO1_9 = 245,
+};
+
+/* imx51 register maps */
+static struct imx_pin_reg imx51_pin_regs[] = {
+	IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 5, 0x000, 0), /* MX51_PAD_EIM_D16__AUD4_RXFS */
+	IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 7, 0x8d8, 0), /* MX51_PAD_EIM_D16__AUD5_TXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 0, 0x000, 0), /* MX51_PAD_EIM_D16__EIM_D16 */
+	IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 1, 0x000, 0), /* MX51_PAD_EIM_D16__GPIO2_0 */
+	IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 4, 0x9b4, 0), /* MX51_PAD_EIM_D16__I2C1_SDA */
+	IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 3, 0x000, 0), /* MX51_PAD_EIM_D16__UART2_CTS */
+	IMX_PIN_REG(MX51_PAD_EIM_D16, 0x3f0, 0x05c, 2, 0x000, 0), /* MX51_PAD_EIM_D16__USBH2_DATA0 */
+	IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 7, 0x8d4, 0), /* MX51_PAD_EIM_D17__AUD5_RXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 0, 0x000, 0), /* MX51_PAD_EIM_D17__EIM_D17 */
+	IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 1, 0x000, 0), /* MX51_PAD_EIM_D17__GPIO2_1 */
+	IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 3, 0x9ec, 0), /* MX51_PAD_EIM_D17__UART2_RXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 4, 0x000, 0), /* MX51_PAD_EIM_D17__UART3_CTS */
+	IMX_PIN_REG(MX51_PAD_EIM_D17, 0x3f4, 0x060, 2, 0x000, 0), /* MX51_PAD_EIM_D17__USBH2_DATA1 */
+	IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 7, 0x8e4, 0), /* MX51_PAD_EIM_D18__AUD5_TXC */
+	IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 0, 0x000, 0), /* MX51_PAD_EIM_D18__EIM_D18 */
+	IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 1, 0x000, 0), /* MX51_PAD_EIM_D18__GPIO2_2 */
+	IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 3, 0x000, 0), /* MX51_PAD_EIM_D18__UART2_TXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 4, 0x9f0, 1), /* MX51_PAD_EIM_D18__UART3_RTS */
+	IMX_PIN_REG(MX51_PAD_EIM_D18, 0x3f8, 0x064, 2, 0x000, 0), /* MX51_PAD_EIM_D18__USBH2_DATA2 */
+	IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 5, 0x000, 0), /* MX51_PAD_EIM_D19__AUD4_RXC */
+	IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 7, 0x8e8, 0), /* MX51_PAD_EIM_D19__AUD5_TXFS */
+	IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 0, 0x000, 0), /* MX51_PAD_EIM_D19__EIM_D19 */
+	IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 1, 0x000, 0), /* MX51_PAD_EIM_D19__GPIO2_3 */
+	IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 4, 0x9b0, 0), /* MX51_PAD_EIM_D19__I2C1_SCL */
+	IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 3, 0x9e8, 1), /* MX51_PAD_EIM_D19__UART2_RTS */
+	IMX_PIN_REG(MX51_PAD_EIM_D19, 0x3fc, 0x068, 2, 0x000, 0), /* MX51_PAD_EIM_D19__USBH2_DATA3 */
+	IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 5, 0x8c8, 0), /* MX51_PAD_EIM_D20__AUD4_TXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 0, 0x000, 0), /* MX51_PAD_EIM_D20__EIM_D20 */
+	IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 1, 0x000, 0), /* MX51_PAD_EIM_D20__GPIO2_4 */
+	IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 4, 0x000, 0), /* MX51_PAD_EIM_D20__SRTC_ALARM_DEB */
+	IMX_PIN_REG(MX51_PAD_EIM_D20, 0x400, 0x06c, 2, 0x000, 0), /* MX51_PAD_EIM_D20__USBH2_DATA4 */
+	IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 5, 0x8c4, 0), /* MX51_PAD_EIM_D21__AUD4_RXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 0, 0x000, 0), /* MX51_PAD_EIM_D21__EIM_D21 */
+	IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 1, 0x000, 0), /* MX51_PAD_EIM_D21__GPIO2_5 */
+	IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 3, 0x000, 0), /* MX51_PAD_EIM_D21__SRTC_ALARM_DEB */
+	IMX_PIN_REG(MX51_PAD_EIM_D21, 0x404, 0x070, 2, 0x000, 0), /* MX51_PAD_EIM_D21__USBH2_DATA5 */
+	IMX_PIN_REG(MX51_PAD_EIM_D22, 0x408, 0x074, 5, 0x8cc, 0), /* MX51_PAD_EIM_D22__AUD4_TXC */
+	IMX_PIN_REG(MX51_PAD_EIM_D22, 0x408, 0x074, 0, 0x000, 0), /* MX51_PAD_EIM_D22__EIM_D22 */
+	IMX_PIN_REG(MX51_PAD_EIM_D22, 0x408, 0x074, 1, 0x000, 0), /* MX51_PAD_EIM_D22__GPIO2_6 */
+	IMX_PIN_REG(MX51_PAD_EIM_D22, 0x408, 0x074, 2, 0x000, 0), /* MX51_PAD_EIM_D22__USBH2_DATA6 */
+	IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 5, 0x8d0, 0), /* MX51_PAD_EIM_D23__AUD4_TXFS */
+	IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 0, 0x000, 0), /* MX51_PAD_EIM_D23__EIM_D23 */
+	IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 1, 0x000, 0), /* MX51_PAD_EIM_D23__GPIO2_7 */
+	IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 4, 0x000, 0), /* MX51_PAD_EIM_D23__SPDIF_OUT1 */
+	IMX_PIN_REG(MX51_PAD_EIM_D23, 0x40c, 0x078, 2, 0x000, 0), /* MX51_PAD_EIM_D23__USBH2_DATA7 */
+	IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 5, 0x8f8, 0), /* MX51_PAD_EIM_D24__AUD6_RXFS */
+	IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 0, 0x000, 0), /* MX51_PAD_EIM_D24__EIM_D24 */
+	IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 1, 0x000, 0), /* MX51_PAD_EIM_D24__GPIO2_8 */
+	IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 4, 0x9bc, 0), /* MX51_PAD_EIM_D24__I2C2_SDA */
+	IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 3, 0x000, 0), /* MX51_PAD_EIM_D24__UART3_CTS */
+	IMX_PIN_REG(MX51_PAD_EIM_D24, 0x410, 0x07c, 2, 0x000, 0), /* MX51_PAD_EIM_D24__USBOTG_DATA0 */
+	IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 0, 0x000, 0), /* MX51_PAD_EIM_D25__EIM_D25 */
+	IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 1, 0x9c8, 0), /* MX51_PAD_EIM_D25__KEY_COL6 */
+	IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 4, 0x000, 0), /* MX51_PAD_EIM_D25__UART2_CTS */
+	IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 3, 0x9f4, 0), /* MX51_PAD_EIM_D25__UART3_RXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D25, 0x414, 0x080, 2, 0x000, 0), /* MX51_PAD_EIM_D25__USBOTG_DATA1 */
+	IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 0, 0x000, 0), /* MX51_PAD_EIM_D26__EIM_D26 */
+	IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 1, 0x9cc, 0), /* MX51_PAD_EIM_D26__KEY_COL7 */
+	IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 4, 0x9e8, 3), /* MX51_PAD_EIM_D26__UART2_RTS */
+	IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 3, 0x000, 0), /* MX51_PAD_EIM_D26__UART3_TXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D26, 0x418, 0x084, 2, 0x000, 0), /* MX51_PAD_EIM_D26__USBOTG_DATA2 */
+	IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 5, 0x8f4, 0), /* MX51_PAD_EIM_D27__AUD6_RXC */
+	IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 0, 0x000, 0), /* MX51_PAD_EIM_D27__EIM_D27 */
+	IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 1, 0x000, 0), /* MX51_PAD_EIM_D27__GPIO2_9 */
+	IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 4, 0x9b8, 0), /* MX51_PAD_EIM_D27__I2C2_SCL */
+	IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 3, 0x9f0, 3), /* MX51_PAD_EIM_D27__UART3_RTS */
+	IMX_PIN_REG(MX51_PAD_EIM_D27, 0x41c, 0x088, 2, 0x000, 0), /* MX51_PAD_EIM_D27__USBOTG_DATA3 */
+	IMX_PIN_REG(MX51_PAD_EIM_D28, 0x420, 0x08c, 5, 0x8f0, 0), /* MX51_PAD_EIM_D28__AUD6_TXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D28, 0x420, 0x08c, 0, 0x000, 0), /* MX51_PAD_EIM_D28__EIM_D28 */
+	IMX_PIN_REG(MX51_PAD_EIM_D28, 0x420, 0x08c, 1, 0x9d0, 0), /* MX51_PAD_EIM_D28__KEY_ROW4 */
+	IMX_PIN_REG(MX51_PAD_EIM_D28, 0x420, 0x08c, 2, 0x000, 0), /* MX51_PAD_EIM_D28__USBOTG_DATA4 */
+	IMX_PIN_REG(MX51_PAD_EIM_D29, 0x424, 0x090, 5, 0x8ec, 0), /* MX51_PAD_EIM_D29__AUD6_RXD */
+	IMX_PIN_REG(MX51_PAD_EIM_D29, 0x424, 0x090, 0, 0x000, 0), /* MX51_PAD_EIM_D29__EIM_D29 */
+	IMX_PIN_REG(MX51_PAD_EIM_D29, 0x424, 0x090, 1, 0x9d4, 0), /* MX51_PAD_EIM_D29__KEY_ROW5 */
+	IMX_PIN_REG(MX51_PAD_EIM_D29, 0x424, 0x090, 2, 0x000, 0), /* MX51_PAD_EIM_D29__USBOTG_DATA5 */
+	IMX_PIN_REG(MX51_PAD_EIM_D30, 0x428, 0x094, 5, 0x8fc, 0), /* MX51_PAD_EIM_D30__AUD6_TXC */
+	IMX_PIN_REG(MX51_PAD_EIM_D30, 0x428, 0x094, 0, 0x000, 0), /* MX51_PAD_EIM_D30__EIM_D30 */
+	IMX_PIN_REG(MX51_PAD_EIM_D30, 0x428, 0x094, 1, 0x9d8, 0), /* MX51_PAD_EIM_D30__KEY_ROW6 */
+	IMX_PIN_REG(MX51_PAD_EIM_D30, 0x428, 0x094, 2, 0x000, 0), /* MX51_PAD_EIM_D30__USBOTG_DATA6 */
+	IMX_PIN_REG(MX51_PAD_EIM_D31, 0x42c, 0x098, 5, 0x900, 0), /* MX51_PAD_EIM_D31__AUD6_TXFS */
+	IMX_PIN_REG(MX51_PAD_EIM_D31, 0x42c, 0x098, 0, 0x000, 0), /* MX51_PAD_EIM_D31__EIM_D31 */
+	IMX_PIN_REG(MX51_PAD_EIM_D31, 0x42c, 0x098, 1, 0x9dc, 0), /* MX51_PAD_EIM_D31__KEY_ROW7 */
+	IMX_PIN_REG(MX51_PAD_EIM_D31, 0x42c, 0x098, 2, 0x000, 0), /* MX51_PAD_EIM_D31__USBOTG_DATA7 */
+	IMX_PIN_REG(MX51_PAD_EIM_A16, 0x430, 0x09c, 0, 0x000, 0), /* MX51_PAD_EIM_A16__EIM_A16 */
+	IMX_PIN_REG(MX51_PAD_EIM_A16, 0x430, 0x09c, 1, 0x000, 0), /* MX51_PAD_EIM_A16__GPIO2_10 */
+	IMX_PIN_REG(MX51_PAD_EIM_A16, 0x430, 0x09c, 7, 0x000, 0), /* MX51_PAD_EIM_A16__OSC_FREQ_SEL0 */
+	IMX_PIN_REG(MX51_PAD_EIM_A17, 0x434, 0x0a0, 0, 0x000, 0), /* MX51_PAD_EIM_A17__EIM_A17 */
+	IMX_PIN_REG(MX51_PAD_EIM_A17, 0x434, 0x0a0, 1, 0x000, 0), /* MX51_PAD_EIM_A17__GPIO2_11 */
+	IMX_PIN_REG(MX51_PAD_EIM_A17, 0x434, 0x0a0, 7, 0x000, 0), /* MX51_PAD_EIM_A17__OSC_FREQ_SEL1 */
+	IMX_PIN_REG(MX51_PAD_EIM_A18, 0x438, 0x0a4, 7, 0x000, 0), /* MX51_PAD_EIM_A18__BOOT_LPB0 */
+	IMX_PIN_REG(MX51_PAD_EIM_A18, 0x438, 0x0a4, 0, 0x000, 0), /* MX51_PAD_EIM_A18__EIM_A18 */
+	IMX_PIN_REG(MX51_PAD_EIM_A18, 0x438, 0x0a4, 1, 0x000, 0), /* MX51_PAD_EIM_A18__GPIO2_12 */
+	IMX_PIN_REG(MX51_PAD_EIM_A19, 0x43c, 0x0a8, 7, 0x000, 0), /* MX51_PAD_EIM_A19__BOOT_LPB1 */
+	IMX_PIN_REG(MX51_PAD_EIM_A19, 0x43c, 0x0a8, 0, 0x000, 0), /* MX51_PAD_EIM_A19__EIM_A19 */
+	IMX_PIN_REG(MX51_PAD_EIM_A19, 0x43c, 0x0a8, 1, 0x000, 0), /* MX51_PAD_EIM_A19__GPIO2_13 */
+	IMX_PIN_REG(MX51_PAD_EIM_A20, 0x440, 0x0ac, 7, 0x000, 0), /* MX51_PAD_EIM_A20__BOOT_UART_SRC0 */
+	IMX_PIN_REG(MX51_PAD_EIM_A20, 0x440, 0x0ac, 0, 0x000, 0), /* MX51_PAD_EIM_A20__EIM_A20 */
+	IMX_PIN_REG(MX51_PAD_EIM_A20, 0x440, 0x0ac, 1, 0x000, 0), /* MX51_PAD_EIM_A20__GPIO2_14 */
+	IMX_PIN_REG(MX51_PAD_EIM_A21, 0x444, 0x0b0, 7, 0x000, 0), /* MX51_PAD_EIM_A21__BOOT_UART_SRC1 */
+	IMX_PIN_REG(MX51_PAD_EIM_A21, 0x444, 0x0b0, 0, 0x000, 0), /* MX51_PAD_EIM_A21__EIM_A21 */
+	IMX_PIN_REG(MX51_PAD_EIM_A21, 0x444, 0x0b0, 1, 0x000, 0), /* MX51_PAD_EIM_A21__GPIO2_15 */
+	IMX_PIN_REG(MX51_PAD_EIM_A22, 0x448, 0x0b4, 0, 0x000, 0), /* MX51_PAD_EIM_A22__EIM_A22 */
+	IMX_PIN_REG(MX51_PAD_EIM_A22, 0x448, 0x0b4, 1, 0x000, 0), /* MX51_PAD_EIM_A22__GPIO2_16 */
+	IMX_PIN_REG(MX51_PAD_EIM_A23, 0x44c, 0x0b8, 7, 0x000, 0), /* MX51_PAD_EIM_A23__BOOT_HPN_EN */
+	IMX_PIN_REG(MX51_PAD_EIM_A23, 0x44c, 0x0b8, 0, 0x000, 0), /* MX51_PAD_EIM_A23__EIM_A23 */
+	IMX_PIN_REG(MX51_PAD_EIM_A23, 0x44c, 0x0b8, 1, 0x000, 0), /* MX51_PAD_EIM_A23__GPIO2_17 */
+	IMX_PIN_REG(MX51_PAD_EIM_A24, 0x450, 0x0bc, 0, 0x000, 0), /* MX51_PAD_EIM_A24__EIM_A24 */
+	IMX_PIN_REG(MX51_PAD_EIM_A24, 0x450, 0x0bc, 1, 0x000, 0), /* MX51_PAD_EIM_A24__GPIO2_18 */
+	IMX_PIN_REG(MX51_PAD_EIM_A24, 0x450, 0x0bc, 2, 0x000, 0), /* MX51_PAD_EIM_A24__USBH2_CLK */
+	IMX_PIN_REG(MX51_PAD_EIM_A25, 0x454, 0x0c0, 6, 0x000, 0), /* MX51_PAD_EIM_A25__DISP1_PIN4 */
+	IMX_PIN_REG(MX51_PAD_EIM_A25, 0x454, 0x0c0, 0, 0x000, 0), /* MX51_PAD_EIM_A25__EIM_A25 */
+	IMX_PIN_REG(MX51_PAD_EIM_A25, 0x454, 0x0c0, 1, 0x000, 0), /* MX51_PAD_EIM_A25__GPIO2_19 */
+	IMX_PIN_REG(MX51_PAD_EIM_A25, 0x454, 0x0c0, 2, 0x000, 0), /* MX51_PAD_EIM_A25__USBH2_DIR */
+	IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 5, 0x9a0, 0), /* MX51_PAD_EIM_A26__CSI1_DATA_EN */
+	IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 6, 0x908, 0), /* MX51_PAD_EIM_A26__DISP2_EXT_CLK */
+	IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 0, 0x000, 0), /* MX51_PAD_EIM_A26__EIM_A26 */
+	IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 1, 0x000, 0), /* MX51_PAD_EIM_A26__GPIO2_20 */
+	IMX_PIN_REG(MX51_PAD_EIM_A26, 0x458, 0x0c4, 2, 0x000, 0), /* MX51_PAD_EIM_A26__USBH2_STP */
+	IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 5, 0x99c, 0), /* MX51_PAD_EIM_A27__CSI2_DATA_EN */
+	IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 6, 0x9a4, 0), /* MX51_PAD_EIM_A27__DISP1_PIN1 */
+	IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 0, 0x000, 0), /* MX51_PAD_EIM_A27__EIM_A27 */
+	IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 1, 0x000, 0), /* MX51_PAD_EIM_A27__GPIO2_21 */
+	IMX_PIN_REG(MX51_PAD_EIM_A27, 0x45c, 0x0c8, 2, 0x000, 0), /* MX51_PAD_EIM_A27__USBH2_NXT */
+	IMX_PIN_REG(MX51_PAD_EIM_EB0, 0x460, 0x0cc, 0, 0x000, 0), /* MX51_PAD_EIM_EB0__EIM_EB0 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB1, 0x464, 0x0d0, 0, 0x000, 0), /* MX51_PAD_EIM_EB1__EIM_EB1 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 6, 0x8e0, 0), /* MX51_PAD_EIM_EB2__AUD5_RXFS */
+	IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 5, 0x000, 0), /* MX51_PAD_EIM_EB2__CSI1_D2 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 0, 0x000, 0), /* MX51_PAD_EIM_EB2__EIM_EB2 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 3, 0x954, 0), /* MX51_PAD_EIM_EB2__FEC_MDIO */
+	IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 1, 0x000, 0), /* MX51_PAD_EIM_EB2__GPIO2_22 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB2, 0x468, 0x0d4, 7, 0x000, 0), /* MX51_PAD_EIM_EB2__GPT_CMPOUT1 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 6, 0x8dc, 0), /* MX51_PAD_EIM_EB3__AUD5_RXC */
+	IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 5, 0x000, 0), /* MX51_PAD_EIM_EB3__CSI1_D3 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 0, 0x000, 0), /* MX51_PAD_EIM_EB3__EIM_EB3 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 3, 0x95c, 0), /* MX51_PAD_EIM_EB3__FEC_RDATA1 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 1, 0x000, 0), /* MX51_PAD_EIM_EB3__GPIO2_23 */
+	IMX_PIN_REG(MX51_PAD_EIM_EB3, 0x46c, 0x0d8, 7, 0x000, 0), /* MX51_PAD_EIM_EB3__GPT_CMPOUT2 */
+	IMX_PIN_REG(MX51_PAD_EIM_OE, 0x470, 0x0dc, 0, 0x000, 0), /* MX51_PAD_EIM_OE__EIM_OE */
+	IMX_PIN_REG(MX51_PAD_EIM_OE, 0x470, 0x0dc, 1, 0x000, 0), /* MX51_PAD_EIM_OE__GPIO2_24 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS0, 0x474, 0x0e0, 0, 0x000, 0), /* MX51_PAD_EIM_CS0__EIM_CS0 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS0, 0x474, 0x0e0, 1, 0x000, 0), /* MX51_PAD_EIM_CS0__GPIO2_25 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS1, 0x478, 0x0e4, 0, 0x000, 0), /* MX51_PAD_EIM_CS1__EIM_CS1 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS1, 0x478, 0x0e4, 1, 0x000, 0), /* MX51_PAD_EIM_CS1__GPIO2_26 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 6, 0x8d8, 1), /* MX51_PAD_EIM_CS2__AUD5_TXD */
+	IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 5, 0x000, 0), /* MX51_PAD_EIM_CS2__CSI1_D4 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 0, 0x000, 0), /* MX51_PAD_EIM_CS2__EIM_CS2 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 3, 0x960, 0), /* MX51_PAD_EIM_CS2__FEC_RDATA2 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 1, 0x000, 0), /* MX51_PAD_EIM_CS2__GPIO2_27 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS2, 0x47c, 0x0e8, 2, 0x000, 0), /* MX51_PAD_EIM_CS2__USBOTG_STP */
+	IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 6, 0x8d4, 1), /* MX51_PAD_EIM_CS3__AUD5_RXD */
+	IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 5, 0x000, 0), /* MX51_PAD_EIM_CS3__CSI1_D5 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 0, 0x000, 0), /* MX51_PAD_EIM_CS3__EIM_CS3 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 3, 0x964, 0), /* MX51_PAD_EIM_CS3__FEC_RDATA3 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 1, 0x000, 0), /* MX51_PAD_EIM_CS3__GPIO2_28 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS3, 0x480, 0x0ec, 2, 0x000, 0), /* MX51_PAD_EIM_CS3__USBOTG_NXT */
+	IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 6, 0x8e4, 1), /* MX51_PAD_EIM_CS4__AUD5_TXC */
+	IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 5, 0x000, 0), /* MX51_PAD_EIM_CS4__CSI1_D6 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 0, 0x000, 0), /* MX51_PAD_EIM_CS4__EIM_CS4 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 3, 0x970, 0), /* MX51_PAD_EIM_CS4__FEC_RX_ER */
+	IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 1, 0x000, 0), /* MX51_PAD_EIM_CS4__GPIO2_29 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS4, 0x484, 0x0f0, 2, 0x000, 0), /* MX51_PAD_EIM_CS4__USBOTG_CLK */
+	IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 6, 0x8e8, 1), /* MX51_PAD_EIM_CS5__AUD5_TXFS */
+	IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 5, 0x000, 0), /* MX51_PAD_EIM_CS5__CSI1_D7 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 4, 0x904, 0), /* MX51_PAD_EIM_CS5__DISP1_EXT_CLK */
+	IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 0, 0x000, 0), /* MX51_PAD_EIM_CS5__EIM_CS5 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 3, 0x950, 0), /* MX51_PAD_EIM_CS5__FEC_CRS */
+	IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 1, 0x000, 0), /* MX51_PAD_EIM_CS5__GPIO2_30 */
+	IMX_PIN_REG(MX51_PAD_EIM_CS5, 0x488, 0x0f4, 2, 0x000, 0), /* MX51_PAD_EIM_CS5__USBOTG_DIR */
+	IMX_PIN_REG(MX51_PAD_EIM_DTACK, 0x48c, 0x0f8, 0, 0x000, 0), /* MX51_PAD_EIM_DTACK__EIM_DTACK */
+	IMX_PIN_REG(MX51_PAD_EIM_DTACK, 0x48c, 0x0f8, 1, 0x000, 0), /* MX51_PAD_EIM_DTACK__GPIO2_31 */
+	IMX_PIN_REG(MX51_PAD_EIM_LBA, 0x494, 0x0fc, 0, 0x000, 0), /* MX51_PAD_EIM_LBA__EIM_LBA */
+	IMX_PIN_REG(MX51_PAD_EIM_LBA, 0x494, 0x0fc, 1, 0x978, 0), /* MX51_PAD_EIM_LBA__GPIO3_1 */
+	IMX_PIN_REG(MX51_PAD_EIM_CRE, 0x4a0, 0x100, 0, 0x000, 0), /* MX51_PAD_EIM_CRE__EIM_CRE */
+	IMX_PIN_REG(MX51_PAD_EIM_CRE, 0x4a0, 0x100, 1, 0x97c, 0), /* MX51_PAD_EIM_CRE__GPIO3_2 */
+	IMX_PIN_REG(MX51_PAD_DRAM_CS1, 0x4d0, 0x104, 0, 0x000, 0), /* MX51_PAD_DRAM_CS1__DRAM_CS1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_WE_B, 0x4e4, 0x108, 3, 0x980, 0), /* MX51_PAD_NANDF_WE_B__GPIO3_3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_WE_B, 0x4e4, 0x108, 0, 0x000, 0), /* MX51_PAD_NANDF_WE_B__NANDF_WE_B */
+	IMX_PIN_REG(MX51_PAD_NANDF_WE_B, 0x4e4, 0x108, 1, 0x000, 0), /* MX51_PAD_NANDF_WE_B__PATA_DIOW */
+	IMX_PIN_REG(MX51_PAD_NANDF_WE_B, 0x4e4, 0x108, 2, 0x93c, 0), /* MX51_PAD_NANDF_WE_B__SD3_DATA0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RE_B, 0x4e8, 0x10c, 3, 0x984, 0), /* MX51_PAD_NANDF_RE_B__GPIO3_4 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RE_B, 0x4e8, 0x10c, 0, 0x000, 0), /* MX51_PAD_NANDF_RE_B__NANDF_RE_B */
+	IMX_PIN_REG(MX51_PAD_NANDF_RE_B, 0x4e8, 0x10c, 1, 0x000, 0), /* MX51_PAD_NANDF_RE_B__PATA_DIOR */
+	IMX_PIN_REG(MX51_PAD_NANDF_RE_B, 0x4e8, 0x10c, 2, 0x940, 0), /* MX51_PAD_NANDF_RE_B__SD3_DATA1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_ALE, 0x4ec, 0x110, 3, 0x988, 0), /* MX51_PAD_NANDF_ALE__GPIO3_5 */
+	IMX_PIN_REG(MX51_PAD_NANDF_ALE, 0x4ec, 0x110, 0, 0x000, 0), /* MX51_PAD_NANDF_ALE__NANDF_ALE */
+	IMX_PIN_REG(MX51_PAD_NANDF_ALE, 0x4ec, 0x110, 1, 0x000, 0), /* MX51_PAD_NANDF_ALE__PATA_BUFFER_EN */
+	IMX_PIN_REG(MX51_PAD_NANDF_CLE, 0x4f0, 0x114, 3, 0x98c, 0), /* MX51_PAD_NANDF_CLE__GPIO3_6 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CLE, 0x4f0, 0x114, 0, 0x000, 0), /* MX51_PAD_NANDF_CLE__NANDF_CLE */
+	IMX_PIN_REG(MX51_PAD_NANDF_CLE, 0x4f0, 0x114, 1, 0x000, 0), /* MX51_PAD_NANDF_CLE__PATA_RESET_B */
+	IMX_PIN_REG(MX51_PAD_NANDF_WP_B, 0x4f4, 0x118, 3, 0x990, 0), /* MX51_PAD_NANDF_WP_B__GPIO3_7 */
+	IMX_PIN_REG(MX51_PAD_NANDF_WP_B, 0x4f4, 0x118, 0, 0x000, 0), /* MX51_PAD_NANDF_WP_B__NANDF_WP_B */
+	IMX_PIN_REG(MX51_PAD_NANDF_WP_B, 0x4f4, 0x118, 1, 0x000, 0), /* MX51_PAD_NANDF_WP_B__PATA_DMACK */
+	IMX_PIN_REG(MX51_PAD_NANDF_WP_B, 0x4f4, 0x118, 2, 0x944, 0), /* MX51_PAD_NANDF_WP_B__SD3_DATA2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 5, 0x930, 0), /* MX51_PAD_NANDF_RB0__ECSPI2_SS1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 3, 0x994, 0), /* MX51_PAD_NANDF_RB0__GPIO3_8 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 0, 0x000, 0), /* MX51_PAD_NANDF_RB0__NANDF_RB0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 1, 0x000, 0), /* MX51_PAD_NANDF_RB0__PATA_DMARQ */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB0, 0x4f8, 0x11c, 2, 0x948, 0), /* MX51_PAD_NANDF_RB0__SD3_DATA3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 6, 0x91c, 0), /* MX51_PAD_NANDF_RB1__CSPI_MOSI */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 2, 0x000, 0), /* MX51_PAD_NANDF_RB1__ECSPI2_RDY */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 3, 0x000, 0), /* MX51_PAD_NANDF_RB1__GPIO3_9 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 0, 0x000, 0), /* MX51_PAD_NANDF_RB1__NANDF_RB1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 1, 0x000, 0), /* MX51_PAD_NANDF_RB1__PATA_IORDY */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB1, 0x4fc, 0x120, 5, 0x000, 0), /* MX51_PAD_NANDF_RB1__SD4_CMD */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 5, 0x9a8, 0), /* MX51_PAD_NANDF_RB2__DISP2_WAIT */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 2, 0x000, 0), /* MX51_PAD_NANDF_RB2__ECSPI2_SCLK */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 1, 0x94c, 0), /* MX51_PAD_NANDF_RB2__FEC_COL */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 3, 0x000, 0), /* MX51_PAD_NANDF_RB2__GPIO3_10 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 0, 0x000, 0), /* MX51_PAD_NANDF_RB2__NANDF_RB2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 7, 0x000, 0), /* MX51_PAD_NANDF_RB2__USBH3_H3_DP */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB2, 0x500, 0x124, 6, 0xa20, 0), /* MX51_PAD_NANDF_RB2__USBH3_NXT */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 5, 0x000, 0), /* MX51_PAD_NANDF_RB3__DISP1_WAIT */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 2, 0x000, 0), /* MX51_PAD_NANDF_RB3__ECSPI2_MISO */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 1, 0x968, 0), /* MX51_PAD_NANDF_RB3__FEC_RX_CLK */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 3, 0x000, 0), /* MX51_PAD_NANDF_RB3__GPIO3_11 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 0, 0x000, 0), /* MX51_PAD_NANDF_RB3__NANDF_RB3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 6, 0x9f8, 0), /* MX51_PAD_NANDF_RB3__USBH3_CLK */
+	IMX_PIN_REG(MX51_PAD_NANDF_RB3, 0x504, 0x128, 7, 0x000, 0), /* MX51_PAD_NANDF_RB3__USBH3_H3_DM */
+	IMX_PIN_REG(MX51_PAD_GPIO_NAND, 0x514, 0x12c, 0, 0x998, 0), /* MX51_PAD_GPIO_NAND__GPIO_NAND */
+	IMX_PIN_REG(MX51_PAD_GPIO_NAND, 0x514, 0x12c, 1, 0x000, 0), /* MX51_PAD_GPIO_NAND__PATA_INTRQ */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS0, 0x518, 0x130, 3, 0x000, 0), /* MX51_PAD_NANDF_CS0__GPIO3_16 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS0, 0x518, 0x130, 0, 0x000, 0), /* MX51_PAD_NANDF_CS0__NANDF_CS0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS1, 0x51c, 0x134, 3, 0x000, 0), /* MX51_PAD_NANDF_CS1__GPIO3_17 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS1, 0x51c, 0x134, 0, 0x000, 0), /* MX51_PAD_NANDF_CS1__NANDF_CS1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 6, 0x914, 0), /* MX51_PAD_NANDF_CS2__CSPI_SCLK */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 2, 0x000, 0), /* MX51_PAD_NANDF_CS2__FEC_TX_ER */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 3, 0x000, 0), /* MX51_PAD_NANDF_CS2__GPIO3_18 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 0, 0x000, 0), /* MX51_PAD_NANDF_CS2__NANDF_CS2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 1, 0x000, 0), /* MX51_PAD_NANDF_CS2__PATA_CS_0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 5, 0x000, 0), /* MX51_PAD_NANDF_CS2__SD4_CLK */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS2, 0x520, 0x138, 7, 0x000, 0), /* MX51_PAD_NANDF_CS2__USBH3_H1_DP */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 2, 0x000, 0), /* MX51_PAD_NANDF_CS3__FEC_MDC */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 3, 0x000, 0), /* MX51_PAD_NANDF_CS3__GPIO3_19 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 0, 0x000, 0), /* MX51_PAD_NANDF_CS3__NANDF_CS3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 1, 0x000, 0), /* MX51_PAD_NANDF_CS3__PATA_CS_1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 5, 0x000, 0), /* MX51_PAD_NANDF_CS3__SD4_DAT0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS3, 0x524, 0x13c, 7, 0x000, 0), /* MX51_PAD_NANDF_CS3__USBH3_H1_DM */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 2, 0x000, 0), /* MX51_PAD_NANDF_CS4__FEC_TDATA1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 3, 0x000, 0), /* MX51_PAD_NANDF_CS4__GPIO3_20 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 0, 0x000, 0), /* MX51_PAD_NANDF_CS4__NANDF_CS4 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 1, 0x000, 0), /* MX51_PAD_NANDF_CS4__PATA_DA_0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 5, 0x000, 0), /* MX51_PAD_NANDF_CS4__SD4_DAT1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS4, 0x528, 0x140, 7, 0xa24, 0), /* MX51_PAD_NANDF_CS4__USBH3_STP */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 2, 0x000, 0), /* MX51_PAD_NANDF_CS5__FEC_TDATA2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 3, 0x000, 0), /* MX51_PAD_NANDF_CS5__GPIO3_21 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 0, 0x000, 0), /* MX51_PAD_NANDF_CS5__NANDF_CS5 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 1, 0x000, 0), /* MX51_PAD_NANDF_CS5__PATA_DA_1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 5, 0x000, 0), /* MX51_PAD_NANDF_CS5__SD4_DAT2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS5, 0x52c, 0x144, 7, 0xa1c, 0), /* MX51_PAD_NANDF_CS5__USBH3_DIR */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 7, 0x928, 0), /* MX51_PAD_NANDF_CS6__CSPI_SS3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 2, 0x000, 0), /* MX51_PAD_NANDF_CS6__FEC_TDATA3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 3, 0x000, 0), /* MX51_PAD_NANDF_CS6__GPIO3_22 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 0, 0x000, 0), /* MX51_PAD_NANDF_CS6__NANDF_CS6 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 1, 0x000, 0), /* MX51_PAD_NANDF_CS6__PATA_DA_2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS6, 0x530, 0x148, 5, 0x000, 0), /* MX51_PAD_NANDF_CS6__SD4_DAT3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS7, 0x534, 0x14c, 1, 0x000, 0), /* MX51_PAD_NANDF_CS7__FEC_TX_EN */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS7, 0x534, 0x14c, 3, 0x000, 0), /* MX51_PAD_NANDF_CS7__GPIO3_23 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS7, 0x534, 0x14c, 0, 0x000, 0), /* MX51_PAD_NANDF_CS7__NANDF_CS7 */
+	IMX_PIN_REG(MX51_PAD_NANDF_CS7, 0x534, 0x14c, 5, 0x000, 0), /* MX51_PAD_NANDF_CS7__SD3_CLK */
+	IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 2, 0x000, 0), /* MX51_PAD_NANDF_RDY_INT__ECSPI2_SS0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 1, 0x974, 0), /* MX51_PAD_NANDF_RDY_INT__FEC_TX_CLK */
+	IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 3, 0x000, 0), /* MX51_PAD_NANDF_RDY_INT__GPIO3_24 */
+	IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 0, 0x938, 0), /* MX51_PAD_NANDF_RDY_INT__NANDF_RDY_INT */
+	IMX_PIN_REG(MX51_PAD_NANDF_RDY_INT, 0x538, 0x150, 5, 0x000, 0), /* MX51_PAD_NANDF_RDY_INT__SD3_CMD */
+	IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 2, 0x000, 0), /* MX51_PAD_NANDF_D15__ECSPI2_MOSI */
+	IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 3, 0x000, 0), /* MX51_PAD_NANDF_D15__GPIO3_25 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 0, 0x000, 0), /* MX51_PAD_NANDF_D15__NANDF_D15 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 1, 0x000, 0), /* MX51_PAD_NANDF_D15__PATA_DATA15 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D15, 0x53c, 0x154, 5, 0x000, 0), /* MX51_PAD_NANDF_D15__SD3_DAT7 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 2, 0x934, 0), /* MX51_PAD_NANDF_D14__ECSPI2_SS3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 3, 0x000, 0), /* MX51_PAD_NANDF_D14__GPIO3_26 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 0, 0x000, 0), /* MX51_PAD_NANDF_D14__NANDF_D14 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 1, 0x000, 0), /* MX51_PAD_NANDF_D14__PATA_DATA14 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D14, 0x540, 0x158, 5, 0x000, 0), /* MX51_PAD_NANDF_D14__SD3_DAT6 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 2, 0x000, 0), /* MX51_PAD_NANDF_D13__ECSPI2_SS2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 3, 0x000, 0), /* MX51_PAD_NANDF_D13__GPIO3_27 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 0, 0x000, 0), /* MX51_PAD_NANDF_D13__NANDF_D13 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 1, 0x000, 0), /* MX51_PAD_NANDF_D13__PATA_DATA13 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D13, 0x544, 0x15c, 5, 0x000, 0), /* MX51_PAD_NANDF_D13__SD3_DAT5 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 2, 0x930, 1), /* MX51_PAD_NANDF_D12__ECSPI2_SS1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 3, 0x000, 0), /* MX51_PAD_NANDF_D12__GPIO3_28 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 0, 0x000, 0), /* MX51_PAD_NANDF_D12__NANDF_D12 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 1, 0x000, 0), /* MX51_PAD_NANDF_D12__PATA_DATA12 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D12, 0x548, 0x160, 5, 0x000, 0), /* MX51_PAD_NANDF_D12__SD3_DAT4 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 2, 0x96c, 0), /* MX51_PAD_NANDF_D11__FEC_RX_DV */
+	IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 3, 0x000, 0), /* MX51_PAD_NANDF_D11__GPIO3_29 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 0, 0x000, 0), /* MX51_PAD_NANDF_D11__NANDF_D11 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 1, 0x000, 0), /* MX51_PAD_NANDF_D11__PATA_DATA11 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D11, 0x54c, 0x164, 5, 0x948, 1), /* MX51_PAD_NANDF_D11__SD3_DATA3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D10, 0x550, 0x168, 3, 0x000, 0), /* MX51_PAD_NANDF_D10__GPIO3_30 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D10, 0x550, 0x168, 0, 0x000, 0), /* MX51_PAD_NANDF_D10__NANDF_D10 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D10, 0x550, 0x168, 1, 0x000, 0), /* MX51_PAD_NANDF_D10__PATA_DATA10 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D10, 0x550, 0x168, 5, 0x944, 1), /* MX51_PAD_NANDF_D10__SD3_DATA2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 2, 0x958, 0), /* MX51_PAD_NANDF_D9__FEC_RDATA0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 3, 0x000, 0), /* MX51_PAD_NANDF_D9__GPIO3_31 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 0, 0x000, 0), /* MX51_PAD_NANDF_D9__NANDF_D9 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 1, 0x000, 0), /* MX51_PAD_NANDF_D9__PATA_DATA9 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D9, 0x554, 0x16c, 5, 0x940, 1), /* MX51_PAD_NANDF_D9__SD3_DATA1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 2, 0x000, 0), /* MX51_PAD_NANDF_D8__FEC_TDATA0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 3, 0x000, 0), /* MX51_PAD_NANDF_D8__GPIO4_0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 0, 0x000, 0), /* MX51_PAD_NANDF_D8__NANDF_D8 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 1, 0x000, 0), /* MX51_PAD_NANDF_D8__PATA_DATA8 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D8, 0x558, 0x170, 5, 0x93c, 1), /* MX51_PAD_NANDF_D8__SD3_DATA0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D7, 0x55c, 0x174, 3, 0x000, 0), /* MX51_PAD_NANDF_D7__GPIO4_1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D7, 0x55c, 0x174, 0, 0x000, 0), /* MX51_PAD_NANDF_D7__NANDF_D7 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D7, 0x55c, 0x174, 1, 0x000, 0), /* MX51_PAD_NANDF_D7__PATA_DATA7 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D7, 0x55c, 0x174, 5, 0x9fc, 0), /* MX51_PAD_NANDF_D7__USBH3_DATA0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 3, 0x000, 0), /* MX51_PAD_NANDF_D6__GPIO4_2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 0, 0x000, 0), /* MX51_PAD_NANDF_D6__NANDF_D6 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 1, 0x000, 0), /* MX51_PAD_NANDF_D6__PATA_DATA6 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 2, 0x000, 0), /* MX51_PAD_NANDF_D6__SD4_LCTL */
+	IMX_PIN_REG(MX51_PAD_NANDF_D6, 0x560, 0x178, 5, 0xa00, 0), /* MX51_PAD_NANDF_D6__USBH3_DATA1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 3, 0x000, 0), /* MX51_PAD_NANDF_D5__GPIO4_3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 0, 0x000, 0), /* MX51_PAD_NANDF_D5__NANDF_D5 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 1, 0x000, 0), /* MX51_PAD_NANDF_D5__PATA_DATA5 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 2, 0x000, 0), /* MX51_PAD_NANDF_D5__SD4_WP */
+	IMX_PIN_REG(MX51_PAD_NANDF_D5, 0x564, 0x17c, 5, 0xa04, 0), /* MX51_PAD_NANDF_D5__USBH3_DATA2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 3, 0x000, 0), /* MX51_PAD_NANDF_D4__GPIO4_4 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 0, 0x000, 0), /* MX51_PAD_NANDF_D4__NANDF_D4 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 1, 0x000, 0), /* MX51_PAD_NANDF_D4__PATA_DATA4 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 2, 0x000, 0), /* MX51_PAD_NANDF_D4__SD4_CD */
+	IMX_PIN_REG(MX51_PAD_NANDF_D4, 0x568, 0x180, 5, 0xa08, 0), /* MX51_PAD_NANDF_D4__USBH3_DATA3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 3, 0x000, 0), /* MX51_PAD_NANDF_D3__GPIO4_5 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 0, 0x000, 0), /* MX51_PAD_NANDF_D3__NANDF_D3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 1, 0x000, 0), /* MX51_PAD_NANDF_D3__PATA_DATA3 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 2, 0x000, 0), /* MX51_PAD_NANDF_D3__SD4_DAT4 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D3, 0x56c, 0x184, 5, 0xa0c, 0), /* MX51_PAD_NANDF_D3__USBH3_DATA4 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 3, 0x000, 0), /* MX51_PAD_NANDF_D2__GPIO4_6 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 0, 0x000, 0), /* MX51_PAD_NANDF_D2__NANDF_D2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 1, 0x000, 0), /* MX51_PAD_NANDF_D2__PATA_DATA2 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 2, 0x000, 0), /* MX51_PAD_NANDF_D2__SD4_DAT5 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D2, 0x570, 0x188, 5, 0xa10, 0), /* MX51_PAD_NANDF_D2__USBH3_DATA5 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 3, 0x000, 0), /* MX51_PAD_NANDF_D1__GPIO4_7 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 0, 0x000, 0), /* MX51_PAD_NANDF_D1__NANDF_D1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 1, 0x000, 0), /* MX51_PAD_NANDF_D1__PATA_DATA1 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 2, 0x000, 0), /* MX51_PAD_NANDF_D1__SD4_DAT6 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D1, 0x574, 0x18c, 5, 0xa14, 0), /* MX51_PAD_NANDF_D1__USBH3_DATA6 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 3, 0x000, 0), /* MX51_PAD_NANDF_D0__GPIO4_8 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 0, 0x000, 0), /* MX51_PAD_NANDF_D0__NANDF_D0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 1, 0x000, 0), /* MX51_PAD_NANDF_D0__PATA_DATA0 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 2, 0x000, 0), /* MX51_PAD_NANDF_D0__SD4_DAT7 */
+	IMX_PIN_REG(MX51_PAD_NANDF_D0, 0x578, 0x190, 5, 0xa18, 0), /* MX51_PAD_NANDF_D0__USBH3_DATA7 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D8, 0x57c, 0x194, 0, 0x000, 0), /* MX51_PAD_CSI1_D8__CSI1_D8 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D8, 0x57c, 0x194, 3, 0x998, 1), /* MX51_PAD_CSI1_D8__GPIO3_12 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D9, 0x580, 0x198, 0, 0x000, 0), /* MX51_PAD_CSI1_D9__CSI1_D9 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D9, 0x580, 0x198, 3, 0x000, 0), /* MX51_PAD_CSI1_D9__GPIO3_13 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D10, 0x584, 0x19c, 0, 0x000, 0), /* MX51_PAD_CSI1_D10__CSI1_D10 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D11, 0x588, 0x1a0, 0, 0x000, 0), /* MX51_PAD_CSI1_D11__CSI1_D11 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D12, 0x58c, 0x1a4, 0, 0x000, 0), /* MX51_PAD_CSI1_D12__CSI1_D12 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D13, 0x590, 0x1a8, 0, 0x000, 0), /* MX51_PAD_CSI1_D13__CSI1_D13 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D14, 0x594, 0x1ac, 0, 0x000, 0), /* MX51_PAD_CSI1_D14__CSI1_D14 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D15, 0x598, 0x1b0, 0, 0x000, 0), /* MX51_PAD_CSI1_D15__CSI1_D15 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D16, 0x59c, 0x1b4, 0, 0x000, 0), /* MX51_PAD_CSI1_D16__CSI1_D16 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D17, 0x5a0, 0x1b8, 0, 0x000, 0), /* MX51_PAD_CSI1_D17__CSI1_D17 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D18, 0x5a4, 0x1bc, 0, 0x000, 0), /* MX51_PAD_CSI1_D18__CSI1_D18 */
+	IMX_PIN_REG(MX51_PAD_CSI1_D19, 0x5a8, 0x1c0, 0, 0x000, 0), /* MX51_PAD_CSI1_D19__CSI1_D19 */
+	IMX_PIN_REG(MX51_PAD_CSI1_VSYNC, 0x5ac, 0x1c4, 0, 0x000, 0), /* MX51_PAD_CSI1_VSYNC__CSI1_VSYNC */
+	IMX_PIN_REG(MX51_PAD_CSI1_VSYNC, 0x5ac, 0x1c4, 3, 0x000, 0), /* MX51_PAD_CSI1_VSYNC__GPIO3_14 */
+	IMX_PIN_REG(MX51_PAD_CSI1_HSYNC, 0x5b0, 0x1c8, 0, 0x000, 0), /* MX51_PAD_CSI1_HSYNC__CSI1_HSYNC */
+	IMX_PIN_REG(MX51_PAD_CSI1_HSYNC, 0x5b0, 0x1c8, 3, 0x000, 0), /* MX51_PAD_CSI1_HSYNC__GPIO3_15 */
+	IMX_PIN_REG(MX51_PAD_CSI1_PIXCLK, 0x5b4, NO_MUX, 0, 0x000, 0), /* MX51_PAD_CSI1_PIXCLK__CSI1_PIXCLK */
+	IMX_PIN_REG(MX51_PAD_CSI1_MCLK, 0x5b8, NO_MUX, 0, 0x000, 0), /* MX51_PAD_CSI1_MCLK__CSI1_MCLK */
+	IMX_PIN_REG(MX51_PAD_CSI2_D12, 0x5bc, 0x1cc, 0, 0x000, 0), /* MX51_PAD_CSI2_D12__CSI2_D12 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D12, 0x5bc, 0x1cc, 3, 0x000, 0), /* MX51_PAD_CSI2_D12__GPIO4_9 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D13, 0x5c0, 0x1d0, 0, 0x000, 0), /* MX51_PAD_CSI2_D13__CSI2_D13 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D13, 0x5c0, 0x1d0, 3, 0x000, 0), /* MX51_PAD_CSI2_D13__GPIO4_10 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D14, 0x5c4, 0x1d4, 0, 0x000, 0), /* MX51_PAD_CSI2_D14__CSI2_D14 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D15, 0x5c8, 0x1d8, 0, 0x000, 0), /* MX51_PAD_CSI2_D15__CSI2_D15 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D16, 0x5cc, 0x1dc, 0, 0x000, 0), /* MX51_PAD_CSI2_D16__CSI2_D16 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D17, 0x5d0, 0x1e0, 0, 0x000, 0), /* MX51_PAD_CSI2_D17__CSI2_D17 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D18, 0x5d4, 0x1e4, 0, 0x000, 0), /* MX51_PAD_CSI2_D18__CSI2_D18 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D18, 0x5d4, 0x1e4, 3, 0x000, 0), /* MX51_PAD_CSI2_D18__GPIO4_11 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D19, 0x5d8, 0x1e8, 0, 0x000, 0), /* MX51_PAD_CSI2_D19__CSI2_D19 */
+	IMX_PIN_REG(MX51_PAD_CSI2_D19, 0x5d8, 0x1e8, 3, 0x000, 0), /* MX51_PAD_CSI2_D19__GPIO4_12 */
+	IMX_PIN_REG(MX51_PAD_CSI2_VSYNC, 0x5dc, 0x1ec, 0, 0x000, 0), /* MX51_PAD_CSI2_VSYNC__CSI2_VSYNC */
+	IMX_PIN_REG(MX51_PAD_CSI2_VSYNC, 0x5dc, 0x1ec, 3, 0x000, 0), /* MX51_PAD_CSI2_VSYNC__GPIO4_13 */
+	IMX_PIN_REG(MX51_PAD_CSI2_HSYNC, 0x5e0, 0x1f0, 0, 0x000, 0), /* MX51_PAD_CSI2_HSYNC__CSI2_HSYNC */
+	IMX_PIN_REG(MX51_PAD_CSI2_HSYNC, 0x5e0, 0x1f0, 3, 0x000, 0), /* MX51_PAD_CSI2_HSYNC__GPIO4_14 */
+	IMX_PIN_REG(MX51_PAD_CSI2_PIXCLK, 0x5e4, 0x1f4, 0, 0x000, 0), /* MX51_PAD_CSI2_PIXCLK__CSI2_PIXCLK */
+	IMX_PIN_REG(MX51_PAD_CSI2_PIXCLK, 0x5e4, 0x1f4, 3, 0x000, 0), /* MX51_PAD_CSI2_PIXCLK__GPIO4_15 */
+	IMX_PIN_REG(MX51_PAD_I2C1_CLK, 0x5e8, 0x1f8, 3, 0x000, 0), /* MX51_PAD_I2C1_CLK__GPIO4_16 */
+	IMX_PIN_REG(MX51_PAD_I2C1_CLK, 0x5e8, 0x1f8, 0, 0x000, 0), /* MX51_PAD_I2C1_CLK__I2C1_CLK */
+	IMX_PIN_REG(MX51_PAD_I2C1_DAT, 0x5ec, 0x1fc, 3, 0x000, 0), /* MX51_PAD_I2C1_DAT__GPIO4_17 */
+	IMX_PIN_REG(MX51_PAD_I2C1_DAT, 0x5ec, 0x1fc, 0, 0x000, 0), /* MX51_PAD_I2C1_DAT__I2C1_DAT */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_TXD, 0x5f0, 0x200, 0, 0x000, 0), /* MX51_PAD_AUD3_BB_TXD__AUD3_TXD */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_TXD, 0x5f0, 0x200, 3, 0x000, 0), /* MX51_PAD_AUD3_BB_TXD__GPIO4_18 */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_RXD, 0x5f4, 0x204, 0, 0x000, 0), /* MX51_PAD_AUD3_BB_RXD__AUD3_RXD */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_RXD, 0x5f4, 0x204, 3, 0x000, 0), /* MX51_PAD_AUD3_BB_RXD__GPIO4_19 */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_RXD, 0x5f4, 0x204, 1, 0x9f4, 2), /* MX51_PAD_AUD3_BB_RXD__UART3_RXD */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_CK, 0x5f8, 0x208, 0, 0x000, 0), /* MX51_PAD_AUD3_BB_CK__AUD3_TXC */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_CK, 0x5f8, 0x208, 3, 0x000, 0), /* MX51_PAD_AUD3_BB_CK__GPIO4_20 */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_FS, 0x5fc, 0x20c, 0, 0x000, 0), /* MX51_PAD_AUD3_BB_FS__AUD3_TXFS */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_FS, 0x5fc, 0x20c, 3, 0x000, 0), /* MX51_PAD_AUD3_BB_FS__GPIO4_21 */
+	IMX_PIN_REG(MX51_PAD_AUD3_BB_FS, 0x5fc, 0x20c, 1, 0x000, 0), /* MX51_PAD_AUD3_BB_FS__UART3_TXD */
+	IMX_PIN_REG(MX51_PAD_CSPI1_MOSI, 0x600, 0x210, 0, 0x000, 0), /* MX51_PAD_CSPI1_MOSI__ECSPI1_MOSI */
+	IMX_PIN_REG(MX51_PAD_CSPI1_MOSI, 0x600, 0x210, 3, 0x000, 0), /* MX51_PAD_CSPI1_MOSI__GPIO4_22 */
+	IMX_PIN_REG(MX51_PAD_CSPI1_MOSI, 0x600, 0x210, 1, 0x9b4, 1), /* MX51_PAD_CSPI1_MOSI__I2C1_SDA */
+	IMX_PIN_REG(MX51_PAD_CSPI1_MISO, 0x604, 0x214, 1, 0x8c4, 1), /* MX51_PAD_CSPI1_MISO__AUD4_RXD */
+	IMX_PIN_REG(MX51_PAD_CSPI1_MISO, 0x604, 0x214, 0, 0x000, 0), /* MX51_PAD_CSPI1_MISO__ECSPI1_MISO */
+	IMX_PIN_REG(MX51_PAD_CSPI1_MISO, 0x604, 0x214, 3, 0x000, 0), /* MX51_PAD_CSPI1_MISO__GPIO4_23 */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SS0, 0x608, 0x218, 1, 0x8cc, 1), /* MX51_PAD_CSPI1_SS0__AUD4_TXC */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SS0, 0x608, 0x218, 0, 0x000, 0), /* MX51_PAD_CSPI1_SS0__ECSPI1_SS0 */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SS0, 0x608, 0x218, 3, 0x000, 0), /* MX51_PAD_CSPI1_SS0__GPIO4_24 */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SS1, 0x60c, 0x21c, 1, 0x8c8, 1), /* MX51_PAD_CSPI1_SS1__AUD4_TXD */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SS1, 0x60c, 0x21c, 0, 0x000, 0), /* MX51_PAD_CSPI1_SS1__ECSPI1_SS1 */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SS1, 0x60c, 0x21c, 3, 0x000, 0), /* MX51_PAD_CSPI1_SS1__GPIO4_25 */
+	IMX_PIN_REG(MX51_PAD_CSPI1_RDY, 0x610, 0x220, 1, 0x8d0, 1), /* MX51_PAD_CSPI1_RDY__AUD4_TXFS */
+	IMX_PIN_REG(MX51_PAD_CSPI1_RDY, 0x610, 0x220, 0, 0x000, 0), /* MX51_PAD_CSPI1_RDY__ECSPI1_RDY */
+	IMX_PIN_REG(MX51_PAD_CSPI1_RDY, 0x610, 0x220, 3, 0x000, 0), /* MX51_PAD_CSPI1_RDY__GPIO4_26 */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SCLK, 0x614, 0x224, 0, 0x000, 0), /* MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SCLK, 0x614, 0x224, 3, 0x000, 0), /* MX51_PAD_CSPI1_SCLK__GPIO4_27 */
+	IMX_PIN_REG(MX51_PAD_CSPI1_SCLK, 0x614, 0x224, 1, 0x9b0, 1), /* MX51_PAD_CSPI1_SCLK__I2C1_SCL */
+	IMX_PIN_REG(MX51_PAD_UART1_RXD, 0x618, 0x228, 3, 0x000, 0), /* MX51_PAD_UART1_RXD__GPIO4_28 */
+	IMX_PIN_REG(MX51_PAD_UART1_RXD, 0x618, 0x228, 0, 0x9e4, 0), /* MX51_PAD_UART1_RXD__UART1_RXD */
+	IMX_PIN_REG(MX51_PAD_UART1_TXD, 0x61c, 0x22c, 3, 0x000, 0), /* MX51_PAD_UART1_TXD__GPIO4_29 */
+	IMX_PIN_REG(MX51_PAD_UART1_TXD, 0x61c, 0x22c, 1, 0x000, 0), /* MX51_PAD_UART1_TXD__PWM2_PWMO */
+	IMX_PIN_REG(MX51_PAD_UART1_TXD, 0x61c, 0x22c, 0, 0x000, 0), /* MX51_PAD_UART1_TXD__UART1_TXD */
+	IMX_PIN_REG(MX51_PAD_UART1_RTS, 0x620, 0x230, 3, 0x000, 0), /* MX51_PAD_UART1_RTS__GPIO4_30 */
+	IMX_PIN_REG(MX51_PAD_UART1_RTS, 0x620, 0x230, 0, 0x9e0, 0), /* MX51_PAD_UART1_RTS__UART1_RTS */
+	IMX_PIN_REG(MX51_PAD_UART1_CTS, 0x624, 0x234, 3, 0x000, 0), /* MX51_PAD_UART1_CTS__GPIO4_31 */
+	IMX_PIN_REG(MX51_PAD_UART1_CTS, 0x624, 0x234, 0, 0x000, 0), /* MX51_PAD_UART1_CTS__UART1_CTS */
+	IMX_PIN_REG(MX51_PAD_UART2_RXD, 0x628, 0x238, 1, 0x000, 0), /* MX51_PAD_UART2_RXD__FIRI_TXD */
+	IMX_PIN_REG(MX51_PAD_UART2_RXD, 0x628, 0x238, 3, 0x000, 0), /* MX51_PAD_UART2_RXD__GPIO1_20 */
+	IMX_PIN_REG(MX51_PAD_UART2_RXD, 0x628, 0x238, 0, 0x9ec, 2), /* MX51_PAD_UART2_RXD__UART2_RXD */
+	IMX_PIN_REG(MX51_PAD_UART2_TXD, 0x62c, 0x23c, 1, 0x000, 0), /* MX51_PAD_UART2_TXD__FIRI_RXD */
+	IMX_PIN_REG(MX51_PAD_UART2_TXD, 0x62c, 0x23c, 3, 0x000, 0), /* MX51_PAD_UART2_TXD__GPIO1_21 */
+	IMX_PIN_REG(MX51_PAD_UART2_TXD, 0x62c, 0x23c, 0, 0x000, 0), /* MX51_PAD_UART2_TXD__UART2_TXD */
+	IMX_PIN_REG(MX51_PAD_UART3_RXD, 0x630, 0x240, 2, 0x000, 0), /* MX51_PAD_UART3_RXD__CSI1_D0 */
+	IMX_PIN_REG(MX51_PAD_UART3_RXD, 0x630, 0x240, 3, 0x000, 0), /* MX51_PAD_UART3_RXD__GPIO1_22 */
+	IMX_PIN_REG(MX51_PAD_UART3_RXD, 0x630, 0x240, 0, 0x000, 0), /* MX51_PAD_UART3_RXD__UART1_DTR */
+	IMX_PIN_REG(MX51_PAD_UART3_RXD, 0x630, 0x240, 1, 0x9f4, 4), /* MX51_PAD_UART3_RXD__UART3_RXD */
+	IMX_PIN_REG(MX51_PAD_UART3_TXD, 0x634, 0x244, 2, 0x000, 0), /* MX51_PAD_UART3_TXD__CSI1_D1 */
+	IMX_PIN_REG(MX51_PAD_UART3_TXD, 0x634, 0x244, 3, 0x000, 0), /* MX51_PAD_UART3_TXD__GPIO1_23 */
+	IMX_PIN_REG(MX51_PAD_UART3_TXD, 0x634, 0x244, 0, 0x000, 0), /* MX51_PAD_UART3_TXD__UART1_DSR */
+	IMX_PIN_REG(MX51_PAD_UART3_TXD, 0x634, 0x244, 1, 0x000, 0), /* MX51_PAD_UART3_TXD__UART3_TXD */
+	IMX_PIN_REG(MX51_PAD_OWIRE_LINE, 0x638, 0x248, 3, 0x000, 0), /* MX51_PAD_OWIRE_LINE__GPIO1_24 */
+	IMX_PIN_REG(MX51_PAD_OWIRE_LINE, 0x638, 0x248, 0, 0x000, 0), /* MX51_PAD_OWIRE_LINE__OWIRE_LINE */
+	IMX_PIN_REG(MX51_PAD_OWIRE_LINE, 0x638, 0x248, 6, 0x000, 0), /* MX51_PAD_OWIRE_LINE__SPDIF_OUT */
+	IMX_PIN_REG(MX51_PAD_KEY_ROW0, 0x63c, 0x24c, 0, 0x000, 0), /* MX51_PAD_KEY_ROW0__KEY_ROW0 */
+	IMX_PIN_REG(MX51_PAD_KEY_ROW1, 0x640, 0x250, 0, 0x000, 0), /* MX51_PAD_KEY_ROW1__KEY_ROW1 */
+	IMX_PIN_REG(MX51_PAD_KEY_ROW2, 0x644, 0x254, 0, 0x000, 0), /* MX51_PAD_KEY_ROW2__KEY_ROW2 */
+	IMX_PIN_REG(MX51_PAD_KEY_ROW3, 0x648, 0x258, 0, 0x000, 0), /* MX51_PAD_KEY_ROW3__KEY_ROW3 */
+	IMX_PIN_REG(MX51_PAD_KEY_COL0, 0x64c, 0x25c, 0, 0x000, 0), /* MX51_PAD_KEY_COL0__KEY_COL0 */
+	IMX_PIN_REG(MX51_PAD_KEY_COL0, 0x64c, 0x25c, 7, 0x90c, 0), /* MX51_PAD_KEY_COL0__PLL1_BYP */
+	IMX_PIN_REG(MX51_PAD_KEY_COL1, 0x650, 0x260, 0, 0x000, 0), /* MX51_PAD_KEY_COL1__KEY_COL1 */
+	IMX_PIN_REG(MX51_PAD_KEY_COL1, 0x650, 0x260, 7, 0x910, 0), /* MX51_PAD_KEY_COL1__PLL2_BYP */
+	IMX_PIN_REG(MX51_PAD_KEY_COL2, 0x654, 0x264, 0, 0x000, 0), /* MX51_PAD_KEY_COL2__KEY_COL2 */
+	IMX_PIN_REG(MX51_PAD_KEY_COL2, 0x654, 0x264, 7, 0x000, 0), /* MX51_PAD_KEY_COL2__PLL3_BYP */
+	IMX_PIN_REG(MX51_PAD_KEY_COL3, 0x658, 0x268, 0, 0x000, 0), /* MX51_PAD_KEY_COL3__KEY_COL3 */
+	IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 3, 0x9b8, 1), /* MX51_PAD_KEY_COL4__I2C2_SCL */
+	IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 0, 0x000, 0), /* MX51_PAD_KEY_COL4__KEY_COL4 */
+	IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 6, 0x000, 0), /* MX51_PAD_KEY_COL4__SPDIF_OUT1 */
+	IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 1, 0x000, 0), /* MX51_PAD_KEY_COL4__UART1_RI */
+	IMX_PIN_REG(MX51_PAD_KEY_COL4, 0x65c, 0x26c, 2, 0x9f0, 4), /* MX51_PAD_KEY_COL4__UART3_RTS */
+	IMX_PIN_REG(MX51_PAD_KEY_COL5, 0x660, 0x270, 3, 0x9bc, 1), /* MX51_PAD_KEY_COL5__I2C2_SDA */
+	IMX_PIN_REG(MX51_PAD_KEY_COL5, 0x660, 0x270, 0, 0x000, 0), /* MX51_PAD_KEY_COL5__KEY_COL5 */
+	IMX_PIN_REG(MX51_PAD_KEY_COL5, 0x660, 0x270, 1, 0x000, 0), /* MX51_PAD_KEY_COL5__UART1_DCD */
+	IMX_PIN_REG(MX51_PAD_KEY_COL5, 0x660, 0x270, 2, 0x000, 0), /* MX51_PAD_KEY_COL5__UART3_CTS */
+	IMX_PIN_REG(MX51_PAD_USBH1_CLK, 0x678, 0x278, 1, 0x914, 1), /* MX51_PAD_USBH1_CLK__CSPI_SCLK */
+	IMX_PIN_REG(MX51_PAD_USBH1_CLK, 0x678, 0x278, 2, 0x000, 0), /* MX51_PAD_USBH1_CLK__GPIO1_25 */
+	IMX_PIN_REG(MX51_PAD_USBH1_CLK, 0x678, 0x278, 5, 0x9b8, 2), /* MX51_PAD_USBH1_CLK__I2C2_SCL */
+	IMX_PIN_REG(MX51_PAD_USBH1_CLK, 0x678, 0x278, 0, 0x000, 0), /* MX51_PAD_USBH1_CLK__USBH1_CLK */
+	IMX_PIN_REG(MX51_PAD_USBH1_DIR, 0x67c, 0x27c, 1, 0x91c, 1), /* MX51_PAD_USBH1_DIR__CSPI_MOSI */
+	IMX_PIN_REG(MX51_PAD_USBH1_DIR, 0x67c, 0x27c, 2, 0x000, 0), /* MX51_PAD_USBH1_DIR__GPIO1_26 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DIR, 0x67c, 0x27c, 5, 0x9bc, 2), /* MX51_PAD_USBH1_DIR__I2C2_SDA */
+	IMX_PIN_REG(MX51_PAD_USBH1_DIR, 0x67c, 0x27c, 0, 0x000, 0), /* MX51_PAD_USBH1_DIR__USBH1_DIR */
+	IMX_PIN_REG(MX51_PAD_USBH1_STP, 0x680, 0x280, 1, 0x000, 0), /* MX51_PAD_USBH1_STP__CSPI_RDY */
+	IMX_PIN_REG(MX51_PAD_USBH1_STP, 0x680, 0x280, 2, 0x000, 0), /* MX51_PAD_USBH1_STP__GPIO1_27 */
+	IMX_PIN_REG(MX51_PAD_USBH1_STP, 0x680, 0x280, 5, 0x9f4, 6), /* MX51_PAD_USBH1_STP__UART3_RXD */
+	IMX_PIN_REG(MX51_PAD_USBH1_STP, 0x680, 0x280, 0, 0x000, 0), /* MX51_PAD_USBH1_STP__USBH1_STP */
+	IMX_PIN_REG(MX51_PAD_USBH1_NXT, 0x684, 0x284, 1, 0x918, 0), /* MX51_PAD_USBH1_NXT__CSPI_MISO */
+	IMX_PIN_REG(MX51_PAD_USBH1_NXT, 0x684, 0x284, 2, 0x000, 0), /* MX51_PAD_USBH1_NXT__GPIO1_28 */
+	IMX_PIN_REG(MX51_PAD_USBH1_NXT, 0x684, 0x284, 5, 0x000, 0), /* MX51_PAD_USBH1_NXT__UART3_TXD */
+	IMX_PIN_REG(MX51_PAD_USBH1_NXT, 0x684, 0x284, 0, 0x000, 0), /* MX51_PAD_USBH1_NXT__USBH1_NXT */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA0, 0x688, 0x288, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA0__GPIO1_11 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA0, 0x688, 0x288, 1, 0x000, 0), /* MX51_PAD_USBH1_DATA0__UART2_CTS */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA0, 0x688, 0x288, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA0__USBH1_DATA0 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA1, 0x68c, 0x28c, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA1__GPIO1_12 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA1, 0x68c, 0x28c, 1, 0x9ec, 4), /* MX51_PAD_USBH1_DATA1__UART2_RXD */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA1, 0x68c, 0x28c, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA1__USBH1_DATA1 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA2, 0x690, 0x290, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA2__GPIO1_13 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA2, 0x690, 0x290, 1, 0x000, 0), /* MX51_PAD_USBH1_DATA2__UART2_TXD */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA2, 0x690, 0x290, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA2__USBH1_DATA2 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA3, 0x694, 0x294, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA3__GPIO1_14 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA3, 0x694, 0x294, 1, 0x9e8, 5), /* MX51_PAD_USBH1_DATA3__UART2_RTS */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA3, 0x694, 0x294, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA3__USBH1_DATA3 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA4, 0x698, 0x298, 1, 0x000, 0), /* MX51_PAD_USBH1_DATA4__CSPI_SS0 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA4, 0x698, 0x298, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA4__GPIO1_15 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA4, 0x698, 0x298, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA4__USBH1_DATA4 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA5, 0x69c, 0x29c, 1, 0x920, 0), /* MX51_PAD_USBH1_DATA5__CSPI_SS1 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA5, 0x69c, 0x29c, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA5__GPIO1_16 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA5, 0x69c, 0x29c, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA5__USBH1_DATA5 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA6, 0x6a0, 0x2a0, 1, 0x928, 1), /* MX51_PAD_USBH1_DATA6__CSPI_SS3 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA6, 0x6a0, 0x2a0, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA6__GPIO1_17 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA6, 0x6a0, 0x2a0, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA6__USBH1_DATA6 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA7, 0x6a4, 0x2a4, 1, 0x000, 0), /* MX51_PAD_USBH1_DATA7__ECSPI1_SS3 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA7, 0x6a4, 0x2a4, 5, 0x934, 1), /* MX51_PAD_USBH1_DATA7__ECSPI2_SS3 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA7, 0x6a4, 0x2a4, 2, 0x000, 0), /* MX51_PAD_USBH1_DATA7__GPIO1_18 */
+	IMX_PIN_REG(MX51_PAD_USBH1_DATA7, 0x6a4, 0x2a4, 0, 0x000, 0), /* MX51_PAD_USBH1_DATA7__USBH1_DATA7 */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN11, 0x6a8, 0x2a8, 0, 0x000, 0), /* MX51_PAD_DI1_PIN11__DI1_PIN11 */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN11, 0x6a8, 0x2a8, 7, 0x000, 0), /* MX51_PAD_DI1_PIN11__ECSPI1_SS2 */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN11, 0x6a8, 0x2a8, 4, 0x000, 0), /* MX51_PAD_DI1_PIN11__GPIO3_0 */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN12, 0x6ac, 0x2ac, 0, 0x000, 0), /* MX51_PAD_DI1_PIN12__DI1_PIN12 */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN12, 0x6ac, 0x2ac, 4, 0x978, 1), /* MX51_PAD_DI1_PIN12__GPIO3_1 */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN13, 0x6b0, 0x2b0, 0, 0x000, 0), /* MX51_PAD_DI1_PIN13__DI1_PIN13 */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN13, 0x6b0, 0x2b0, 4, 0x97c, 1), /* MX51_PAD_DI1_PIN13__GPIO3_2 */
+	IMX_PIN_REG(MX51_PAD_DI1_D0_CS, 0x6b4, 0x2b4, 0, 0x000, 0), /* MX51_PAD_DI1_D0_CS__DI1_D0_CS */
+	IMX_PIN_REG(MX51_PAD_DI1_D0_CS, 0x6b4, 0x2b4, 4, 0x980, 1), /* MX51_PAD_DI1_D0_CS__GPIO3_3 */
+	IMX_PIN_REG(MX51_PAD_DI1_D1_CS, 0x6b8, 0x2b8, 0, 0x000, 0), /* MX51_PAD_DI1_D1_CS__DI1_D1_CS */
+	IMX_PIN_REG(MX51_PAD_DI1_D1_CS, 0x6b8, 0x2b8, 2, 0x000, 0), /* MX51_PAD_DI1_D1_CS__DISP1_PIN14 */
+	IMX_PIN_REG(MX51_PAD_DI1_D1_CS, 0x6b8, 0x2b8, 3, 0x000, 0), /* MX51_PAD_DI1_D1_CS__DISP1_PIN5 */
+	IMX_PIN_REG(MX51_PAD_DI1_D1_CS, 0x6b8, 0x2b8, 4, 0x984, 1), /* MX51_PAD_DI1_D1_CS__GPIO3_4 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIN, 0x6bc, 0x2bc, 2, 0x9a4, 1), /* MX51_PAD_DISPB2_SER_DIN__DISP1_PIN1 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIN, 0x6bc, 0x2bc, 0, 0x9c4, 0), /* MX51_PAD_DISPB2_SER_DIN__DISPB2_SER_DIN */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIN, 0x6bc, 0x2bc, 4, 0x988, 1), /* MX51_PAD_DISPB2_SER_DIN__GPIO3_5 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIO, 0x6c0, 0x2c0, 3, 0x000, 0), /* MX51_PAD_DISPB2_SER_DIO__DISP1_PIN6 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIO, 0x6c0, 0x2c0, 0, 0x9c4, 1), /* MX51_PAD_DISPB2_SER_DIO__DISPB2_SER_DIO */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_DIO, 0x6c0, 0x2c0, 4, 0x98c, 1), /* MX51_PAD_DISPB2_SER_DIO__GPIO3_6 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_CLK, 0x6c4, 0x2c4, 2, 0x000, 0), /* MX51_PAD_DISPB2_SER_CLK__DISP1_PIN17 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_CLK, 0x6c4, 0x2c4, 3, 0x000, 0), /* MX51_PAD_DISPB2_SER_CLK__DISP1_PIN7 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_CLK, 0x6c4, 0x2c4, 0, 0x000, 0), /* MX51_PAD_DISPB2_SER_CLK__DISPB2_SER_CLK */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_CLK, 0x6c4, 0x2c4, 4, 0x990, 1), /* MX51_PAD_DISPB2_SER_CLK__GPIO3_7 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 2, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISP1_EXT_CLK */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 2, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISP1_PIN16 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 3, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISP1_PIN8 */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 0, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISPB2_SER_RS */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 0, 0x000, 0), /* MX51_PAD_DISPB2_SER_RS__DISPB2_SER_RS */
+	IMX_PIN_REG(MX51_PAD_DISPB2_SER_RS, 0x6c8, 0x2c8, 4, 0x994, 1), /* MX51_PAD_DISPB2_SER_RS__GPIO3_8 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT0, 0x6cc, 0x2cc, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT0__DISP1_DAT0 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT1, 0x6d0, 0x2d0, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT1__DISP1_DAT1 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT2, 0x6d4, 0x2d4, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT2__DISP1_DAT2 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT3, 0x6d8, 0x2d8, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT3__DISP1_DAT3 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT4, 0x6dc, 0x2dc, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT4__DISP1_DAT4 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT5, 0x6e0, 0x2e0, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT5__DISP1_DAT5 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT6, 0x6e4, 0x2e4, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT6__BOOT_USB_SRC */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT6, 0x6e4, 0x2e4, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT6__DISP1_DAT6 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT7, 0x6e8, 0x2e8, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT7__BOOT_EEPROM_CFG */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT7, 0x6e8, 0x2e8, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT7__DISP1_DAT7 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT8, 0x6ec, 0x2ec, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT8__BOOT_SRC0 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT8, 0x6ec, 0x2ec, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT8__DISP1_DAT8 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT9, 0x6f0, 0x2f0, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT9__BOOT_SRC1 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT9, 0x6f0, 0x2f0, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT9__DISP1_DAT9 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT10, 0x6f4, 0x2f4, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT10__BOOT_SPARE_SIZE */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT10, 0x6f4, 0x2f4, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT10__DISP1_DAT10 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT11, 0x6f8, 0x2f8, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT11__BOOT_LPB_FREQ2 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT11, 0x6f8, 0x2f8, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT11__DISP1_DAT11 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT12, 0x6fc, 0x2fc, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT12__BOOT_MLC_SEL */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT12, 0x6fc, 0x2fc, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT12__DISP1_DAT12 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT13, 0x700, 0x300, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT13__BOOT_MEM_CTL0 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT13, 0x700, 0x300, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT13__DISP1_DAT13 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT14, 0x704, 0x304, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT14__BOOT_MEM_CTL1 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT14, 0x704, 0x304, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT14__DISP1_DAT14 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT15, 0x708, 0x308, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT15__BOOT_BUS_WIDTH */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT15, 0x708, 0x308, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT15__DISP1_DAT15 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT16, 0x70c, 0x30c, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT16__BOOT_PAGE_SIZE0 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT16, 0x70c, 0x30c, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT16__DISP1_DAT16 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT17, 0x710, 0x310, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT17__BOOT_PAGE_SIZE1 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT17, 0x710, 0x310, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT17__DISP1_DAT17 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT18, 0x714, 0x314, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT18__BOOT_WEIM_MUXED0 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT18, 0x714, 0x314, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT18__DISP1_DAT18 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT18, 0x714, 0x314, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT18__DISP2_PIN11 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT18, 0x714, 0x314, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT18__DISP2_PIN5 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT19, 0x718, 0x318, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT19__BOOT_WEIM_MUXED1 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT19, 0x718, 0x318, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT19__DISP1_DAT19 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT19, 0x718, 0x318, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT19__DISP2_PIN12 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT19, 0x718, 0x318, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT19__DISP2_PIN6 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT20, 0x71c, 0x31c, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT20__BOOT_MEM_TYPE0 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT20, 0x71c, 0x31c, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT20__DISP1_DAT20 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT20, 0x71c, 0x31c, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT20__DISP2_PIN13 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT20, 0x71c, 0x31c, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT20__DISP2_PIN7 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT21, 0x720, 0x320, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT21__BOOT_MEM_TYPE1 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT21, 0x720, 0x320, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT21__DISP1_DAT21 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT21, 0x720, 0x320, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT21__DISP2_PIN14 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT21, 0x720, 0x320, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT21__DISP2_PIN8 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT22, 0x724, 0x324, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT22__BOOT_LPB_FREQ0 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT22, 0x724, 0x324, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT22__DISP1_DAT22 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT22, 0x724, 0x324, 6, 0x000, 0), /* MX51_PAD_DISP1_DAT22__DISP2_D0_CS */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT22, 0x724, 0x324, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT22__DISP2_DAT16 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 7, 0x000, 0), /* MX51_PAD_DISP1_DAT23__BOOT_LPB_FREQ1 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 0, 0x000, 0), /* MX51_PAD_DISP1_DAT23__DISP1_DAT23 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 6, 0x000, 0), /* MX51_PAD_DISP1_DAT23__DISP2_D1_CS */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 5, 0x000, 0), /* MX51_PAD_DISP1_DAT23__DISP2_DAT17 */
+	IMX_PIN_REG(MX51_PAD_DISP1_DAT23, 0x728, 0x328, 4, 0x000, 0), /* MX51_PAD_DISP1_DAT23__DISP2_SER_CS */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN3, 0x72c, 0x32c, 0, 0x000, 0), /* MX51_PAD_DI1_PIN3__DI1_PIN3 */
+	IMX_PIN_REG(MX51_PAD_DI1_PIN2, 0x734, 0x330, 0, 0x000, 0), /* MX51_PAD_DI1_PIN2__DI1_PIN2 */
+	IMX_PIN_REG(MX51_PAD_DI_GP2, 0x740, 0x338, 0, 0x000, 0), /* MX51_PAD_DI_GP2__DISP1_SER_CLK */
+	IMX_PIN_REG(MX51_PAD_DI_GP2, 0x740, 0x338, 2, 0x9a8, 1), /* MX51_PAD_DI_GP2__DISP2_WAIT */
+	IMX_PIN_REG(MX51_PAD_DI_GP3, 0x744, 0x33c, 3, 0x9a0, 1), /* MX51_PAD_DI_GP3__CSI1_DATA_EN */
+	IMX_PIN_REG(MX51_PAD_DI_GP3, 0x744, 0x33c, 0, 0x9c0, 0), /* MX51_PAD_DI_GP3__DISP1_SER_DIO */
+	IMX_PIN_REG(MX51_PAD_DI_GP3, 0x744, 0x33c, 2, 0x000, 0), /* MX51_PAD_DI_GP3__FEC_TX_ER */
+	IMX_PIN_REG(MX51_PAD_DI2_PIN4, 0x748, 0x340, 3, 0x99c, 1), /* MX51_PAD_DI2_PIN4__CSI2_DATA_EN */
+	IMX_PIN_REG(MX51_PAD_DI2_PIN4, 0x748, 0x340, 0, 0x000, 0), /* MX51_PAD_DI2_PIN4__DI2_PIN4 */
+	IMX_PIN_REG(MX51_PAD_DI2_PIN4, 0x748, 0x340, 2, 0x950, 1), /* MX51_PAD_DI2_PIN4__FEC_CRS */
+	IMX_PIN_REG(MX51_PAD_DI2_PIN2, 0x74c, 0x344, 0, 0x000, 0), /* MX51_PAD_DI2_PIN2__DI2_PIN2 */
+	IMX_PIN_REG(MX51_PAD_DI2_PIN2, 0x74c, 0x344, 2, 0x000, 0), /* MX51_PAD_DI2_PIN2__FEC_MDC */
+	IMX_PIN_REG(MX51_PAD_DI2_PIN3, 0x750, 0x348, 0, 0x000, 0), /* MX51_PAD_DI2_PIN3__DI2_PIN3 */
+	IMX_PIN_REG(MX51_PAD_DI2_PIN3, 0x750, 0x348, 2, 0x954, 1), /* MX51_PAD_DI2_PIN3__FEC_MDIO */
+	IMX_PIN_REG(MX51_PAD_DI2_DISP_CLK, 0x754, 0x34c, 0, 0x000, 0), /* MX51_PAD_DI2_DISP_CLK__DI2_DISP_CLK */
+	IMX_PIN_REG(MX51_PAD_DI2_DISP_CLK, 0x754, 0x34c, 2, 0x95c, 1), /* MX51_PAD_DI2_DISP_CLK__FEC_RDATA1 */
+	IMX_PIN_REG(MX51_PAD_DI_GP4, 0x758, 0x350, 4, 0x000, 0), /* MX51_PAD_DI_GP4__DI2_PIN15 */
+	IMX_PIN_REG(MX51_PAD_DI_GP4, 0x758, 0x350, 0, 0x9c0, 1), /* MX51_PAD_DI_GP4__DISP1_SER_DIN */
+	IMX_PIN_REG(MX51_PAD_DI_GP4, 0x758, 0x350, 3, 0x000, 0), /* MX51_PAD_DI_GP4__DISP2_PIN1 */
+	IMX_PIN_REG(MX51_PAD_DI_GP4, 0x758, 0x350, 2, 0x960, 1), /* MX51_PAD_DI_GP4__FEC_RDATA2 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT0__DISP2_DAT0 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 2, 0x964, 1), /* MX51_PAD_DISP2_DAT0__FEC_RDATA3 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 4, 0x9c8, 1), /* MX51_PAD_DISP2_DAT0__KEY_COL6 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 5, 0x9f4, 8), /* MX51_PAD_DISP2_DAT0__UART3_RXD */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT0, 0x75c, 0x354, 3, 0x9f8, 1), /* MX51_PAD_DISP2_DAT0__USBH3_CLK */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT1__DISP2_DAT1 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 2, 0x970, 1), /* MX51_PAD_DISP2_DAT1__FEC_RX_ER */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 4, 0x9cc, 1), /* MX51_PAD_DISP2_DAT1__KEY_COL7 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT1__UART3_TXD */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT1, 0x760, 0x358, 3, 0xa1c, 1), /* MX51_PAD_DISP2_DAT1__USBH3_DIR */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT2, 0x764, 0x35c, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT2__DISP2_DAT2 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT3, 0x768, 0x360, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT3__DISP2_DAT3 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT4, 0x76c, 0x364, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT4__DISP2_DAT4 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT5, 0x770, 0x368, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT5__DISP2_DAT5 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT6__DISP2_DAT6 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT6__FEC_TDATA1 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT6__GPIO1_19 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 4, 0x9d0, 1), /* MX51_PAD_DISP2_DAT6__KEY_ROW4 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT6, 0x774, 0x36c, 3, 0xa24, 1), /* MX51_PAD_DISP2_DAT6__USBH3_STP */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT7__DISP2_DAT7 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT7__FEC_TDATA2 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT7__GPIO1_29 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 4, 0x9d4, 1), /* MX51_PAD_DISP2_DAT7__KEY_ROW5 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT7, 0x778, 0x370, 3, 0xa20, 1), /* MX51_PAD_DISP2_DAT7__USBH3_NXT */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT8__DISP2_DAT8 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT8__FEC_TDATA3 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT8__GPIO1_30 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 4, 0x9d8, 1), /* MX51_PAD_DISP2_DAT8__KEY_ROW6 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT8, 0x77c, 0x374, 3, 0x9fc, 1), /* MX51_PAD_DISP2_DAT8__USBH3_DATA0 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 4, 0x8f4, 1), /* MX51_PAD_DISP2_DAT9__AUD6_RXC */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT9__DISP2_DAT9 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT9__FEC_TX_EN */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT9__GPIO1_31 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT9, 0x780, 0x378, 3, 0xa00, 1), /* MX51_PAD_DISP2_DAT9__USBH3_DATA1 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT10__DISP2_DAT10 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT10__DISP2_SER_CS */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 2, 0x94c, 1), /* MX51_PAD_DISP2_DAT10__FEC_COL */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 4, 0x9dc, 1), /* MX51_PAD_DISP2_DAT10__KEY_ROW7 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT10, 0x784, 0x37c, 3, 0xa04, 1), /* MX51_PAD_DISP2_DAT10__USBH3_DATA2 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 4, 0x8f0, 1), /* MX51_PAD_DISP2_DAT11__AUD6_TXD */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT11__DISP2_DAT11 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 2, 0x968, 1), /* MX51_PAD_DISP2_DAT11__FEC_RX_CLK */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 7, 0x000, 0), /* MX51_PAD_DISP2_DAT11__GPIO1_10 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT11, 0x788, 0x380, 3, 0xa08, 1), /* MX51_PAD_DISP2_DAT11__USBH3_DATA3 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT12, 0x78c, 0x384, 4, 0x8ec, 1), /* MX51_PAD_DISP2_DAT12__AUD6_RXD */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT12, 0x78c, 0x384, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT12__DISP2_DAT12 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT12, 0x78c, 0x384, 2, 0x96c, 1), /* MX51_PAD_DISP2_DAT12__FEC_RX_DV */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT12, 0x78c, 0x384, 3, 0xa0c, 1), /* MX51_PAD_DISP2_DAT12__USBH3_DATA4 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT13, 0x790, 0x388, 4, 0x8fc, 1), /* MX51_PAD_DISP2_DAT13__AUD6_TXC */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT13, 0x790, 0x388, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT13__DISP2_DAT13 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT13, 0x790, 0x388, 2, 0x974, 1), /* MX51_PAD_DISP2_DAT13__FEC_TX_CLK */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT13, 0x790, 0x388, 3, 0xa10, 1), /* MX51_PAD_DISP2_DAT13__USBH3_DATA5 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT14, 0x794, 0x38c, 4, 0x900, 1), /* MX51_PAD_DISP2_DAT14__AUD6_TXFS */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT14, 0x794, 0x38c, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT14__DISP2_DAT14 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT14, 0x794, 0x38c, 2, 0x958, 1), /* MX51_PAD_DISP2_DAT14__FEC_RDATA0 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT14, 0x794, 0x38c, 3, 0xa14, 1), /* MX51_PAD_DISP2_DAT14__USBH3_DATA6 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 4, 0x8f8, 1), /* MX51_PAD_DISP2_DAT15__AUD6_RXFS */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 5, 0x000, 0), /* MX51_PAD_DISP2_DAT15__DISP1_SER_CS */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 0, 0x000, 0), /* MX51_PAD_DISP2_DAT15__DISP2_DAT15 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 2, 0x000, 0), /* MX51_PAD_DISP2_DAT15__FEC_TDATA0 */
+	IMX_PIN_REG(MX51_PAD_DISP2_DAT15, 0x798, 0x390, 3, 0xa18, 1), /* MX51_PAD_DISP2_DAT15__USBH3_DATA7 */
+	IMX_PIN_REG(MX51_PAD_SD1_CMD, 0x79c, 0x394, 1, 0x8e0, 1), /* MX51_PAD_SD1_CMD__AUD5_RXFS */
+	IMX_PIN_REG(MX51_PAD_SD1_CMD, 0x79c, 0x394, 2, 0x91c, 2), /* MX51_PAD_SD1_CMD__CSPI_MOSI */
+	IMX_PIN_REG(MX51_PAD_SD1_CMD, 0x79c, 0x394, 0, 0x000, 0), /* MX51_PAD_SD1_CMD__SD1_CMD */
+	IMX_PIN_REG(MX51_PAD_SD1_CLK, 0x7a0, 0x398, 1, 0x8dc, 1), /* MX51_PAD_SD1_CLK__AUD5_RXC */
+	IMX_PIN_REG(MX51_PAD_SD1_CLK, 0x7a0, 0x398, 2, 0x914, 2), /* MX51_PAD_SD1_CLK__CSPI_SCLK */
+	IMX_PIN_REG(MX51_PAD_SD1_CLK, 0x7a0, 0x398, 0, 0x000, 0), /* MX51_PAD_SD1_CLK__SD1_CLK */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA0, 0x7a4, 0x39c, 1, 0x8d8, 2), /* MX51_PAD_SD1_DATA0__AUD5_TXD */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA0, 0x7a4, 0x39c, 2, 0x918, 1), /* MX51_PAD_SD1_DATA0__CSPI_MISO */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA0, 0x7a4, 0x39c, 0, 0x000, 0), /* MX51_PAD_SD1_DATA0__SD1_DATA0 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA0, NO_PAD, 0x01c, 0, 0x000, 0), /* MX51_PAD_EIM_DA0__EIM_DA0 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA1, NO_PAD, 0x020, 0, 0x000, 0), /* MX51_PAD_EIM_DA1__EIM_DA1 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA2, NO_PAD, 0x024, 0, 0x000, 0), /* MX51_PAD_EIM_DA2__EIM_DA2 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA3, NO_PAD, 0x028, 0, 0x000, 0), /* MX51_PAD_EIM_DA3__EIM_DA3 */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA1, 0x7a8, 0x3a0, 1, 0x8d4, 2), /* MX51_PAD_SD1_DATA1__AUD5_RXD */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA1, 0x7a8, 0x3a0, 0, 0x000, 0), /* MX51_PAD_SD1_DATA1__SD1_DATA1 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA4, NO_PAD, 0x02c, 0, 0x000, 0), /* MX51_PAD_EIM_DA4__EIM_DA4 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA5, NO_PAD, 0x030, 0, 0x000, 0), /* MX51_PAD_EIM_DA5__EIM_DA5 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA6, NO_PAD, 0x034, 0, 0x000, 0), /* MX51_PAD_EIM_DA6__EIM_DA6 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA7, NO_PAD, 0x038, 0, 0x000, 0), /* MX51_PAD_EIM_DA7__EIM_DA7 */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA2, 0x7ac, 0x3a4, 1, 0x8e4, 2), /* MX51_PAD_SD1_DATA2__AUD5_TXC */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA2, 0x7ac, 0x3a4, 0, 0x000, 0), /* MX51_PAD_SD1_DATA2__SD1_DATA2 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA10, NO_PAD, 0x044, 0, 0x000, 0), /* MX51_PAD_EIM_DA10__EIM_DA10 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA11, NO_PAD, 0x048, 0, 0x000, 0), /* MX51_PAD_EIM_DA11__EIM_DA11 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA8, NO_PAD, 0x03c, 0, 0x000, 0), /* MX51_PAD_EIM_DA8__EIM_DA8 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA9, NO_PAD, 0x040, 0, 0x000, 0), /* MX51_PAD_EIM_DA9__EIM_DA9 */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA3, 0x7b0, 0x3a8, 1, 0x8e8, 2), /* MX51_PAD_SD1_DATA3__AUD5_TXFS */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA3, 0x7b0, 0x3a8, 2, 0x920, 1), /* MX51_PAD_SD1_DATA3__CSPI_SS1 */
+	IMX_PIN_REG(MX51_PAD_SD1_DATA3, 0x7b0, 0x3a8, 0, 0x000, 0), /* MX51_PAD_SD1_DATA3__SD1_DATA3 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_0, 0x7b4, 0x3ac, 2, 0x924, 0), /* MX51_PAD_GPIO1_0__CSPI_SS2 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_0, 0x7b4, 0x3ac, 1, 0x000, 0), /* MX51_PAD_GPIO1_0__GPIO1_0 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_0, 0x7b4, 0x3ac, 0, 0x000, 0), /* MX51_PAD_GPIO1_0__SD1_CD */
+	IMX_PIN_REG(MX51_PAD_GPIO1_1, 0x7b8, 0x3b0, 2, 0x918, 2), /* MX51_PAD_GPIO1_1__CSPI_MISO */
+	IMX_PIN_REG(MX51_PAD_GPIO1_1, 0x7b8, 0x3b0, 1, 0x000, 0), /* MX51_PAD_GPIO1_1__GPIO1_1 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_1, 0x7b8, 0x3b0, 0, 0x000, 0), /* MX51_PAD_GPIO1_1__SD1_WP */
+	IMX_PIN_REG(MX51_PAD_EIM_DA12, NO_PAD, 0x04c, 0, 0x000, 0), /* MX51_PAD_EIM_DA12__EIM_DA12 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */
+	IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */
+	IMX_PIN_REG(MX51_PAD_SD2_CMD, NO_PAD, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */
+	IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */
+	IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */
+	IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */
+	IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 1, 0x9b4, 2), /* MX51_PAD_SD2_CLK__I2C1_SDA */
+	IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 0, 0x000, 0), /* MX51_PAD_SD2_CLK__SD2_CLK */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA0, 0x7c4, 0x3bc, 2, 0x918, 3), /* MX51_PAD_SD2_DATA0__CSPI_MISO */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA0, 0x7c4, 0x3bc, 1, 0x000, 0), /* MX51_PAD_SD2_DATA0__SD1_DAT4 */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA0, 0x7c4, 0x3bc, 0, 0x000, 0), /* MX51_PAD_SD2_DATA0__SD2_DATA0 */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA1, 0x7c8, 0x3c0, 1, 0x000, 0), /* MX51_PAD_SD2_DATA1__SD1_DAT5 */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA1, 0x7c8, 0x3c0, 0, 0x000, 0), /* MX51_PAD_SD2_DATA1__SD2_DATA1 */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA1, 0x7c8, 0x3c0, 2, 0x000, 0), /* MX51_PAD_SD2_DATA1__USBH3_H2_DP */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA2, 0x7cc, 0x3c4, 1, 0x000, 0), /* MX51_PAD_SD2_DATA2__SD1_DAT6 */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA2, 0x7cc, 0x3c4, 0, 0x000, 0), /* MX51_PAD_SD2_DATA2__SD2_DATA2 */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA2, 0x7cc, 0x3c4, 2, 0x000, 0), /* MX51_PAD_SD2_DATA2__USBH3_H2_DM */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA3, 0x7d0, 0x3c8, 2, 0x924, 1), /* MX51_PAD_SD2_DATA3__CSPI_SS2 */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA3, 0x7d0, 0x3c8, 1, 0x000, 0), /* MX51_PAD_SD2_DATA3__SD1_DAT7 */
+	IMX_PIN_REG(MX51_PAD_SD2_DATA3, 0x7d0, 0x3c8, 0, 0x000, 0), /* MX51_PAD_SD2_DATA3__SD2_DATA3 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 5, 0x000, 0), /* MX51_PAD_GPIO1_2__CCM_OUT_2 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 0, 0x000, 0), /* MX51_PAD_GPIO1_2__GPIO1_2 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 2, 0x9b8, 3), /* MX51_PAD_GPIO1_2__I2C2_SCL */
+	IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 7, 0x90c, 1), /* MX51_PAD_GPIO1_2__PLL1_BYP */
+	IMX_PIN_REG(MX51_PAD_GPIO1_2, 0x7d4, 0x3cc, 1, 0x000, 0), /* MX51_PAD_GPIO1_2__PWM1_PWMO */
+	IMX_PIN_REG(MX51_PAD_GPIO1_3, 0x7d8, 0x3d0, 0, 0x000, 0), /* MX51_PAD_GPIO1_3__GPIO1_3 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_3, 0x7d8, 0x3d0, 2, 0x9bc, 3), /* MX51_PAD_GPIO1_3__I2C2_SDA */
+	IMX_PIN_REG(MX51_PAD_GPIO1_3, 0x7d8, 0x3d0, 7, 0x910, 1), /* MX51_PAD_GPIO1_3__PLL2_BYP */
+	IMX_PIN_REG(MX51_PAD_GPIO1_3, 0x7d8, 0x3d0, 1, 0x000, 0), /* MX51_PAD_GPIO1_3__PWM2_PWMO */
+	IMX_PIN_REG(MX51_PAD_PMIC_INT_REQ, 0x7fc, 0x3d4, 0, 0x000, 0), /* MX51_PAD_PMIC_INT_REQ__PMIC_INT_REQ */
+	IMX_PIN_REG(MX51_PAD_PMIC_INT_REQ, 0x7fc, 0x3d4, 1, 0x000, 0), /* MX51_PAD_PMIC_INT_REQ__PMIC_PMU_IRQ_B */
+	IMX_PIN_REG(MX51_PAD_GPIO1_4, 0x804, 0x3d8, 4, 0x908, 1), /* MX51_PAD_GPIO1_4__DISP2_EXT_CLK */
+	IMX_PIN_REG(MX51_PAD_GPIO1_4, 0x804, 0x3d8, 3, 0x938, 1), /* MX51_PAD_GPIO1_4__EIM_RDY */
+	IMX_PIN_REG(MX51_PAD_GPIO1_4, 0x804, 0x3d8, 0, 0x000, 0), /* MX51_PAD_GPIO1_4__GPIO1_4 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_4, 0x804, 0x3d8, 2, 0x000, 0), /* MX51_PAD_GPIO1_4__WDOG1_WDOG_B */
+	IMX_PIN_REG(MX51_PAD_GPIO1_5, 0x808, 0x3dc, 6, 0x000, 0), /* MX51_PAD_GPIO1_5__CSI2_MCLK */
+	IMX_PIN_REG(MX51_PAD_GPIO1_5, 0x808, 0x3dc, 3, 0x000, 0), /* MX51_PAD_GPIO1_5__DISP2_PIN16 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_5, 0x808, 0x3dc, 0, 0x000, 0), /* MX51_PAD_GPIO1_5__GPIO1_5 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_5, 0x808, 0x3dc, 2, 0x000, 0), /* MX51_PAD_GPIO1_5__WDOG2_WDOG_B */
+	IMX_PIN_REG(MX51_PAD_GPIO1_6, 0x80c, 0x3e0, 4, 0x000, 0), /* MX51_PAD_GPIO1_6__DISP2_PIN17 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_6, 0x80c, 0x3e0, 0, 0x000, 0), /* MX51_PAD_GPIO1_6__GPIO1_6 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_6, 0x80c, 0x3e0, 3, 0x000, 0), /* MX51_PAD_GPIO1_6__REF_EN_B */
+	IMX_PIN_REG(MX51_PAD_GPIO1_7, 0x810, 0x3e4, 3, 0x000, 0), /* MX51_PAD_GPIO1_7__CCM_OUT_0 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_7, 0x810, 0x3e4, 0, 0x000, 0), /* MX51_PAD_GPIO1_7__GPIO1_7 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_7, 0x810, 0x3e4, 6, 0x000, 0), /* MX51_PAD_GPIO1_7__SD2_WP */
+	IMX_PIN_REG(MX51_PAD_GPIO1_7, 0x810, 0x3e4, 2, 0x000, 0), /* MX51_PAD_GPIO1_7__SPDIF_OUT1 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_8, 0x814, 0x3e8, 2, 0x99c, 2), /* MX51_PAD_GPIO1_8__CSI2_DATA_EN */
+	IMX_PIN_REG(MX51_PAD_GPIO1_8, 0x814, 0x3e8, 0, 0x000, 0), /* MX51_PAD_GPIO1_8__GPIO1_8 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_8, 0x814, 0x3e8, 6, 0x000, 0), /* MX51_PAD_GPIO1_8__SD2_CD */
+	IMX_PIN_REG(MX51_PAD_GPIO1_8, 0x814, 0x3e8, 1, 0x000, 0), /* MX51_PAD_GPIO1_8__USBH3_PWR */
+	IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 3, 0x000, 0), /* MX51_PAD_GPIO1_9__CCM_OUT_1 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 2, 0x000, 0), /* MX51_PAD_GPIO1_9__DISP2_D1_CS */
+	IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 7, 0x000, 0), /* MX51_PAD_GPIO1_9__DISP2_SER_CS */
+	IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 0, 0x000, 0), /* MX51_PAD_GPIO1_9__GPIO1_9 */
+	IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 6, 0x000, 0), /* MX51_PAD_GPIO1_9__SD2_LCTL */
+	IMX_PIN_REG(MX51_PAD_GPIO1_9, 0x818, 0x3ec, 1, 0x000, 0), /* MX51_PAD_GPIO1_9__USBH3_OC */
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx51_pinctrl_pads[] = {
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D16),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D17),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D18),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D19),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D20),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D21),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D22),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D23),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D24),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D25),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D26),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D27),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D28),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D29),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D30),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_D31),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A16),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A17),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A18),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A19),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A20),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A21),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A22),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A23),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A24),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A25),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A26),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_A27),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_EB0),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_EB1),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_EB2),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_EB3),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_OE),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_CS0),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_CS1),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_CS2),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_CS3),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_CS4),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_CS5),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DTACK),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_LBA),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_CRE),
+	IMX_PINCTRL_PIN(MX51_PAD_DRAM_CS1),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_WE_B),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_RE_B),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_ALE),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CLE),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_WP_B),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_RB0),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_RB1),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_RB2),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_RB3),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO_NAND),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CS0),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CS1),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CS2),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CS3),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CS4),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CS5),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CS6),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_CS7),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_RDY_INT),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D15),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D14),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D13),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D12),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D11),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D10),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D9),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D8),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D7),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D6),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D5),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D4),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D3),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D2),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D1),
+	IMX_PINCTRL_PIN(MX51_PAD_NANDF_D0),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D8),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D9),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D10),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D11),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D12),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D13),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D14),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D15),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D16),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D17),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D18),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_D19),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_VSYNC),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_HSYNC),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_PIXCLK),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI1_MCLK),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_D12),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_D13),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_D14),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_D15),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_D16),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_D17),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_D18),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_D19),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_VSYNC),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_HSYNC),
+	IMX_PINCTRL_PIN(MX51_PAD_CSI2_PIXCLK),
+	IMX_PINCTRL_PIN(MX51_PAD_I2C1_CLK),
+	IMX_PINCTRL_PIN(MX51_PAD_I2C1_DAT),
+	IMX_PINCTRL_PIN(MX51_PAD_AUD3_BB_TXD),
+	IMX_PINCTRL_PIN(MX51_PAD_AUD3_BB_RXD),
+	IMX_PINCTRL_PIN(MX51_PAD_AUD3_BB_CK),
+	IMX_PINCTRL_PIN(MX51_PAD_AUD3_BB_FS),
+	IMX_PINCTRL_PIN(MX51_PAD_CSPI1_MOSI),
+	IMX_PINCTRL_PIN(MX51_PAD_CSPI1_MISO),
+	IMX_PINCTRL_PIN(MX51_PAD_CSPI1_SS0),
+	IMX_PINCTRL_PIN(MX51_PAD_CSPI1_SS1),
+	IMX_PINCTRL_PIN(MX51_PAD_CSPI1_RDY),
+	IMX_PINCTRL_PIN(MX51_PAD_CSPI1_SCLK),
+	IMX_PINCTRL_PIN(MX51_PAD_UART1_RXD),
+	IMX_PINCTRL_PIN(MX51_PAD_UART1_TXD),
+	IMX_PINCTRL_PIN(MX51_PAD_UART1_RTS),
+	IMX_PINCTRL_PIN(MX51_PAD_UART1_CTS),
+	IMX_PINCTRL_PIN(MX51_PAD_UART2_RXD),
+	IMX_PINCTRL_PIN(MX51_PAD_UART2_TXD),
+	IMX_PINCTRL_PIN(MX51_PAD_UART3_RXD),
+	IMX_PINCTRL_PIN(MX51_PAD_UART3_TXD),
+	IMX_PINCTRL_PIN(MX51_PAD_OWIRE_LINE),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_ROW0),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_ROW1),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_ROW2),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_ROW3),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_COL0),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_COL1),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_COL2),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_COL3),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_COL4),
+	IMX_PINCTRL_PIN(MX51_PAD_KEY_COL5),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_CLK),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DIR),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_STP),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_NXT),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DATA0),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DATA1),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DATA2),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DATA3),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DATA4),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DATA5),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DATA6),
+	IMX_PINCTRL_PIN(MX51_PAD_USBH1_DATA7),
+	IMX_PINCTRL_PIN(MX51_PAD_DI1_PIN11),
+	IMX_PINCTRL_PIN(MX51_PAD_DI1_PIN12),
+	IMX_PINCTRL_PIN(MX51_PAD_DI1_PIN13),
+	IMX_PINCTRL_PIN(MX51_PAD_DI1_D0_CS),
+	IMX_PINCTRL_PIN(MX51_PAD_DI1_D1_CS),
+	IMX_PINCTRL_PIN(MX51_PAD_DISPB2_SER_DIN),
+	IMX_PINCTRL_PIN(MX51_PAD_DISPB2_SER_DIO),
+	IMX_PINCTRL_PIN(MX51_PAD_DISPB2_SER_CLK),
+	IMX_PINCTRL_PIN(MX51_PAD_DISPB2_SER_RS),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT0),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT1),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT2),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT3),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT4),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT5),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT6),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT7),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT8),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT9),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT10),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT11),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT12),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT13),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT14),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT15),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT16),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT17),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT18),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT19),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT20),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT21),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT22),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP1_DAT23),
+	IMX_PINCTRL_PIN(MX51_PAD_DI1_PIN3),
+	IMX_PINCTRL_PIN(MX51_PAD_DI1_PIN2),
+	IMX_PINCTRL_PIN(MX51_PAD_DI_GP2),
+	IMX_PINCTRL_PIN(MX51_PAD_DI_GP3),
+	IMX_PINCTRL_PIN(MX51_PAD_DI2_PIN4),
+	IMX_PINCTRL_PIN(MX51_PAD_DI2_PIN2),
+	IMX_PINCTRL_PIN(MX51_PAD_DI2_PIN3),
+	IMX_PINCTRL_PIN(MX51_PAD_DI2_DISP_CLK),
+	IMX_PINCTRL_PIN(MX51_PAD_DI_GP4),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT0),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT1),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT2),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT3),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT4),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT5),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT6),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT7),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT8),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT9),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT10),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT11),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT12),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT13),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT14),
+	IMX_PINCTRL_PIN(MX51_PAD_DISP2_DAT15),
+	IMX_PINCTRL_PIN(MX51_PAD_SD1_CMD),
+	IMX_PINCTRL_PIN(MX51_PAD_SD1_CLK),
+	IMX_PINCTRL_PIN(MX51_PAD_SD1_DATA0),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA0),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA1),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA2),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA3),
+	IMX_PINCTRL_PIN(MX51_PAD_SD1_DATA1),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA4),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA5),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA6),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA7),
+	IMX_PINCTRL_PIN(MX51_PAD_SD1_DATA2),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA10),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA11),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA8),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA9),
+	IMX_PINCTRL_PIN(MX51_PAD_SD1_DATA3),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_0),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_1),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA12),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA13),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA14),
+	IMX_PINCTRL_PIN(MX51_PAD_EIM_DA15),
+	IMX_PINCTRL_PIN(MX51_PAD_SD2_CMD),
+	IMX_PINCTRL_PIN(MX51_PAD_SD2_CLK),
+	IMX_PINCTRL_PIN(MX51_PAD_SD2_DATA0),
+	IMX_PINCTRL_PIN(MX51_PAD_SD2_DATA1),
+	IMX_PINCTRL_PIN(MX51_PAD_SD2_DATA2),
+	IMX_PINCTRL_PIN(MX51_PAD_SD2_DATA3),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_2),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_3),
+	IMX_PINCTRL_PIN(MX51_PAD_PMIC_INT_REQ),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_4),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_5),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_6),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_7),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_8),
+	IMX_PINCTRL_PIN(MX51_PAD_GPIO1_9),
+};
+
+static struct imx_pinctrl_soc_info imx51_pinctrl_info = {
+	.pins = imx51_pinctrl_pads,
+	.npins = ARRAY_SIZE(imx51_pinctrl_pads),
+	.pin_regs = imx51_pin_regs,
+	.npin_regs = ARRAY_SIZE(imx51_pin_regs),
+};
+
+static struct of_device_id imx51_pinctrl_of_match[] __devinitdata = {
+	{ .compatible = "fsl,imx51-iomuxc", },
+	{ /* sentinel */ }
+};
+
+static int __devinit imx51_pinctrl_probe(struct platform_device *pdev)
+{
+	return imx_pinctrl_probe(pdev, &imx51_pinctrl_info);
+}
+
+static struct platform_driver imx51_pinctrl_driver = {
+	.driver = {
+		.name = "imx51-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(imx51_pinctrl_of_match),
+	},
+	.probe = imx51_pinctrl_probe,
+	.remove = __devexit_p(imx_pinctrl_remove),
+};
+
+static int __init imx51_pinctrl_init(void)
+{
+	return platform_driver_register(&imx51_pinctrl_driver);
+}
+arch_initcall(imx51_pinctrl_init);
+
+static void __exit imx51_pinctrl_exit(void)
+{
+	platform_driver_unregister(&imx51_pinctrl_driver);
+}
+module_exit(imx51_pinctrl_exit);
+MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>");
+MODULE_DESCRIPTION("Freescale IMX51 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-imx53.c b/drivers/pinctrl/pinctrl-imx53.c
new file mode 100644
index 0000000..1f49e16
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx53.c
@@ -0,0 +1,1649 @@
+/*
+ * imx53 pinctrl driver based on imx pinmux core
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro, Inc.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx53_pads {
+	MX53_PAD_GPIO_19 = 1,
+	MX53_PAD_KEY_COL0 = 2,
+	MX53_PAD_KEY_ROW0 = 3,
+	MX53_PAD_KEY_COL1 = 4,
+	MX53_PAD_KEY_ROW1 = 5,
+	MX53_PAD_KEY_COL2 = 6,
+	MX53_PAD_KEY_ROW2 = 7,
+	MX53_PAD_KEY_COL3 = 8,
+	MX53_PAD_KEY_ROW3 = 9,
+	MX53_PAD_KEY_COL4 = 10,
+	MX53_PAD_KEY_ROW4 = 11,
+	MX53_PAD_DI0_DISP_CLK = 12,
+	MX53_PAD_DI0_PIN15 = 13,
+	MX53_PAD_DI0_PIN2 = 14,
+	MX53_PAD_DI0_PIN3 = 15,
+	MX53_PAD_DI0_PIN4 = 16,
+	MX53_PAD_DISP0_DAT0 = 17,
+	MX53_PAD_DISP0_DAT1 = 18,
+	MX53_PAD_DISP0_DAT2 = 19,
+	MX53_PAD_DISP0_DAT3 = 20,
+	MX53_PAD_DISP0_DAT4 = 21,
+	MX53_PAD_DISP0_DAT5 = 22,
+	MX53_PAD_DISP0_DAT6 = 23,
+	MX53_PAD_DISP0_DAT7 = 24,
+	MX53_PAD_DISP0_DAT8 = 25,
+	MX53_PAD_DISP0_DAT9 = 26,
+	MX53_PAD_DISP0_DAT10 = 27,
+	MX53_PAD_DISP0_DAT11 = 28,
+	MX53_PAD_DISP0_DAT12 = 29,
+	MX53_PAD_DISP0_DAT13 = 30,
+	MX53_PAD_DISP0_DAT14 = 31,
+	MX53_PAD_DISP0_DAT15 = 32,
+	MX53_PAD_DISP0_DAT16 = 33,
+	MX53_PAD_DISP0_DAT17 = 34,
+	MX53_PAD_DISP0_DAT18 = 35,
+	MX53_PAD_DISP0_DAT19 = 36,
+	MX53_PAD_DISP0_DAT20 = 37,
+	MX53_PAD_DISP0_DAT21 = 38,
+	MX53_PAD_DISP0_DAT22 = 39,
+	MX53_PAD_DISP0_DAT23 = 40,
+	MX53_PAD_CSI0_PIXCLK = 41,
+	MX53_PAD_CSI0_MCLK = 42,
+	MX53_PAD_CSI0_DATA_EN = 43,
+	MX53_PAD_CSI0_VSYNC = 44,
+	MX53_PAD_CSI0_DAT4 = 45,
+	MX53_PAD_CSI0_DAT5 = 46,
+	MX53_PAD_CSI0_DAT6 = 47,
+	MX53_PAD_CSI0_DAT7 = 48,
+	MX53_PAD_CSI0_DAT8 = 49,
+	MX53_PAD_CSI0_DAT9 = 50,
+	MX53_PAD_CSI0_DAT10 = 51,
+	MX53_PAD_CSI0_DAT11 = 52,
+	MX53_PAD_CSI0_DAT12 = 53,
+	MX53_PAD_CSI0_DAT13 = 54,
+	MX53_PAD_CSI0_DAT14 = 55,
+	MX53_PAD_CSI0_DAT15 = 56,
+	MX53_PAD_CSI0_DAT16 = 57,
+	MX53_PAD_CSI0_DAT17 = 58,
+	MX53_PAD_CSI0_DAT18 = 59,
+	MX53_PAD_CSI0_DAT19 = 60,
+	MX53_PAD_EIM_A25 = 61,
+	MX53_PAD_EIM_EB2 = 62,
+	MX53_PAD_EIM_D16 = 63,
+	MX53_PAD_EIM_D17 = 64,
+	MX53_PAD_EIM_D18 = 65,
+	MX53_PAD_EIM_D19 = 66,
+	MX53_PAD_EIM_D20 = 67,
+	MX53_PAD_EIM_D21 = 68,
+	MX53_PAD_EIM_D22 = 69,
+	MX53_PAD_EIM_D23 = 70,
+	MX53_PAD_EIM_EB3 = 71,
+	MX53_PAD_EIM_D24 = 72,
+	MX53_PAD_EIM_D25 = 73,
+	MX53_PAD_EIM_D26 = 74,
+	MX53_PAD_EIM_D27 = 75,
+	MX53_PAD_EIM_D28 = 76,
+	MX53_PAD_EIM_D29 = 77,
+	MX53_PAD_EIM_D30 = 78,
+	MX53_PAD_EIM_D31 = 79,
+	MX53_PAD_EIM_A24 = 80,
+	MX53_PAD_EIM_A23 = 81,
+	MX53_PAD_EIM_A22 = 82,
+	MX53_PAD_EIM_A21 = 83,
+	MX53_PAD_EIM_A20 = 84,
+	MX53_PAD_EIM_A19 = 85,
+	MX53_PAD_EIM_A18 = 86,
+	MX53_PAD_EIM_A17 = 87,
+	MX53_PAD_EIM_A16 = 88,
+	MX53_PAD_EIM_CS0 = 89,
+	MX53_PAD_EIM_CS1 = 90,
+	MX53_PAD_EIM_OE = 91,
+	MX53_PAD_EIM_RW = 92,
+	MX53_PAD_EIM_LBA = 93,
+	MX53_PAD_EIM_EB0 = 94,
+	MX53_PAD_EIM_EB1 = 95,
+	MX53_PAD_EIM_DA0 = 96,
+	MX53_PAD_EIM_DA1 = 97,
+	MX53_PAD_EIM_DA2 = 98,
+	MX53_PAD_EIM_DA3 = 99,
+	MX53_PAD_EIM_DA4 = 100,
+	MX53_PAD_EIM_DA5 = 101,
+	MX53_PAD_EIM_DA6 = 102,
+	MX53_PAD_EIM_DA7 = 103,
+	MX53_PAD_EIM_DA8 = 104,
+	MX53_PAD_EIM_DA9 = 105,
+	MX53_PAD_EIM_DA10 = 106,
+	MX53_PAD_EIM_DA11 = 107,
+	MX53_PAD_EIM_DA12 = 108,
+	MX53_PAD_EIM_DA13 = 109,
+	MX53_PAD_EIM_DA14 = 110,
+	MX53_PAD_EIM_DA15 = 111,
+	MX53_PAD_NANDF_WE_B = 112,
+	MX53_PAD_NANDF_RE_B = 113,
+	MX53_PAD_EIM_WAIT = 114,
+	MX53_PAD_LVDS1_TX3_P = 115,
+	MX53_PAD_LVDS1_TX2_P = 116,
+	MX53_PAD_LVDS1_CLK_P = 117,
+	MX53_PAD_LVDS1_TX1_P = 118,
+	MX53_PAD_LVDS1_TX0_P = 119,
+	MX53_PAD_LVDS0_TX3_P = 120,
+	MX53_PAD_LVDS0_CLK_P = 121,
+	MX53_PAD_LVDS0_TX2_P = 122,
+	MX53_PAD_LVDS0_TX1_P = 123,
+	MX53_PAD_LVDS0_TX0_P = 124,
+	MX53_PAD_GPIO_10 = 125,
+	MX53_PAD_GPIO_11 = 126,
+	MX53_PAD_GPIO_12 = 127,
+	MX53_PAD_GPIO_13 = 128,
+	MX53_PAD_GPIO_14 = 129,
+	MX53_PAD_NANDF_CLE = 130,
+	MX53_PAD_NANDF_ALE = 131,
+	MX53_PAD_NANDF_WP_B = 132,
+	MX53_PAD_NANDF_RB0 = 133,
+	MX53_PAD_NANDF_CS0 = 134,
+	MX53_PAD_NANDF_CS1 = 135,
+	MX53_PAD_NANDF_CS2 = 136,
+	MX53_PAD_NANDF_CS3 = 137,
+	MX53_PAD_FEC_MDIO = 138,
+	MX53_PAD_FEC_REF_CLK = 139,
+	MX53_PAD_FEC_RX_ER = 140,
+	MX53_PAD_FEC_CRS_DV = 141,
+	MX53_PAD_FEC_RXD1 = 142,
+	MX53_PAD_FEC_RXD0 = 143,
+	MX53_PAD_FEC_TX_EN = 144,
+	MX53_PAD_FEC_TXD1 = 145,
+	MX53_PAD_FEC_TXD0 = 146,
+	MX53_PAD_FEC_MDC = 147,
+	MX53_PAD_PATA_DIOW = 148,
+	MX53_PAD_PATA_DMACK = 149,
+	MX53_PAD_PATA_DMARQ = 150,
+	MX53_PAD_PATA_BUFFER_EN = 151,
+	MX53_PAD_PATA_INTRQ = 152,
+	MX53_PAD_PATA_DIOR = 153,
+	MX53_PAD_PATA_RESET_B = 154,
+	MX53_PAD_PATA_IORDY = 155,
+	MX53_PAD_PATA_DA_0 = 156,
+	MX53_PAD_PATA_DA_1 = 157,
+	MX53_PAD_PATA_DA_2 = 158,
+	MX53_PAD_PATA_CS_0 = 159,
+	MX53_PAD_PATA_CS_1 = 160,
+	MX53_PAD_PATA_DATA0 = 161,
+	MX53_PAD_PATA_DATA1 = 162,
+	MX53_PAD_PATA_DATA2 = 163,
+	MX53_PAD_PATA_DATA3 = 164,
+	MX53_PAD_PATA_DATA4 = 165,
+	MX53_PAD_PATA_DATA5 = 166,
+	MX53_PAD_PATA_DATA6 = 167,
+	MX53_PAD_PATA_DATA7 = 168,
+	MX53_PAD_PATA_DATA8 = 169,
+	MX53_PAD_PATA_DATA9 = 170,
+	MX53_PAD_PATA_DATA10 = 171,
+	MX53_PAD_PATA_DATA11 = 172,
+	MX53_PAD_PATA_DATA12 = 173,
+	MX53_PAD_PATA_DATA13 = 174,
+	MX53_PAD_PATA_DATA14 = 175,
+	MX53_PAD_PATA_DATA15 = 176,
+	MX53_PAD_SD1_DATA0 = 177,
+	MX53_PAD_SD1_DATA1 = 178,
+	MX53_PAD_SD1_CMD = 179,
+	MX53_PAD_SD1_DATA2 = 180,
+	MX53_PAD_SD1_CLK = 181,
+	MX53_PAD_SD1_DATA3 = 182,
+	MX53_PAD_SD2_CLK = 183,
+	MX53_PAD_SD2_CMD = 184,
+	MX53_PAD_SD2_DATA3 = 185,
+	MX53_PAD_SD2_DATA2 = 186,
+	MX53_PAD_SD2_DATA1 = 187,
+	MX53_PAD_SD2_DATA0 = 188,
+	MX53_PAD_GPIO_0 = 189,
+	MX53_PAD_GPIO_1 = 190,
+	MX53_PAD_GPIO_9 = 191,
+	MX53_PAD_GPIO_3 = 192,
+	MX53_PAD_GPIO_6 = 193,
+	MX53_PAD_GPIO_2 = 194,
+	MX53_PAD_GPIO_4 = 195,
+	MX53_PAD_GPIO_5 = 196,
+	MX53_PAD_GPIO_7 = 197,
+	MX53_PAD_GPIO_8 = 198,
+	MX53_PAD_GPIO_16 = 199,
+	MX53_PAD_GPIO_17 = 200,
+	MX53_PAD_GPIO_18 = 201,
+};
+
+/* imx53 register maps */
+static struct imx_pin_reg imx53_pin_regs[] = {
+	IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 0, 0x840, 0), /* MX53_PAD_GPIO_19__KPP_COL_5 */
+	IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 1, 0x000, 0), /* MX53_PAD_GPIO_19__GPIO4_5 */
+	IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 2, 0x000, 0), /* MX53_PAD_GPIO_19__CCM_CLKO */
+	IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 3, 0x000, 0), /* MX53_PAD_GPIO_19__SPDIF_OUT1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 4, 0x000, 0), /* MX53_PAD_GPIO_19__RTC_CE_RTC_EXT_TRIG2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 5, 0x000, 0), /* MX53_PAD_GPIO_19__ECSPI1_RDY */
+	IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 6, 0x000, 0), /* MX53_PAD_GPIO_19__FEC_TDATA_3 */
+	IMX_PIN_REG(MX53_PAD_GPIO_19, 0x348, 0x020, 7, 0x000, 0), /* MX53_PAD_GPIO_19__SRC_INT_BOOT */
+	IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 0, 0x000, 0), /* MX53_PAD_KEY_COL0__KPP_COL_0 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 1, 0x000, 0), /* MX53_PAD_KEY_COL0__GPIO4_6 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 2, 0x758, 0), /* MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC */
+	IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 4, 0x000, 0), /* MX53_PAD_KEY_COL0__UART4_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 5, 0x79C, 0), /* MX53_PAD_KEY_COL0__ECSPI1_SCLK */
+	IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 6, 0x000, 0), /* MX53_PAD_KEY_COL0__FEC_RDATA_3 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL0, 0x34C, 0x024, 7, 0x000, 0), /* MX53_PAD_KEY_COL0__SRC_ANY_PU_RST */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 0, 0x000, 0), /* MX53_PAD_KEY_ROW0__KPP_ROW_0 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 1, 0x000, 0), /* MX53_PAD_KEY_ROW0__GPIO4_7 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 2, 0x74C, 0), /* MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 4, 0x890, 1), /* MX53_PAD_KEY_ROW0__UART4_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 5, 0x7A4, 0), /* MX53_PAD_KEY_ROW0__ECSPI1_MOSI */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW0, 0x350, 0x028, 6, 0x000, 0), /* MX53_PAD_KEY_ROW0__FEC_TX_ER */
+	IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 0, 0x000, 0), /* MX53_PAD_KEY_COL1__KPP_COL_1 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 1, 0x000, 0), /* MX53_PAD_KEY_COL1__GPIO4_8 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 2, 0x75C, 0), /* MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS */
+	IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 4, 0x000, 0), /* MX53_PAD_KEY_COL1__UART5_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 5, 0x7A0, 0), /* MX53_PAD_KEY_COL1__ECSPI1_MISO */
+	IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 6, 0x808, 0), /* MX53_PAD_KEY_COL1__FEC_RX_CLK */
+	IMX_PIN_REG(MX53_PAD_KEY_COL1, 0x354, 0x02C, 7, 0x000, 0), /* MX53_PAD_KEY_COL1__USBPHY1_TXREADY */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 0, 0x000, 0), /* MX53_PAD_KEY_ROW1__KPP_ROW_1 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 1, 0x000, 0), /* MX53_PAD_KEY_ROW1__GPIO4_9 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 2, 0x748, 0), /* MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 4, 0x898, 1), /* MX53_PAD_KEY_ROW1__UART5_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 5, 0x7A8, 0), /* MX53_PAD_KEY_ROW1__ECSPI1_SS0 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 6, 0x800, 0), /* MX53_PAD_KEY_ROW1__FEC_COL */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW1, 0x358, 0x030, 7, 0x000, 0), /* MX53_PAD_KEY_ROW1__USBPHY1_RXVALID */
+	IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 0, 0x000, 0), /* MX53_PAD_KEY_COL2__KPP_COL_2 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 1, 0x000, 0), /* MX53_PAD_KEY_COL2__GPIO4_10 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 2, 0x000, 0), /* MX53_PAD_KEY_COL2__CAN1_TXCAN */
+	IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 4, 0x804, 0), /* MX53_PAD_KEY_COL2__FEC_MDIO */
+	IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 5, 0x7AC, 0), /* MX53_PAD_KEY_COL2__ECSPI1_SS1 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 6, 0x000, 0), /* MX53_PAD_KEY_COL2__FEC_RDATA_2 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL2, 0x35C, 0x034, 7, 0x000, 0), /* MX53_PAD_KEY_COL2__USBPHY1_RXACTIVE */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 0, 0x000, 0), /* MX53_PAD_KEY_ROW2__KPP_ROW_2 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 1, 0x000, 0), /* MX53_PAD_KEY_ROW2__GPIO4_11 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 2, 0x760, 0), /* MX53_PAD_KEY_ROW2__CAN1_RXCAN */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 4, 0x000, 0), /* MX53_PAD_KEY_ROW2__FEC_MDC */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 5, 0x7B0, 0), /* MX53_PAD_KEY_ROW2__ECSPI1_SS2 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 6, 0x000, 0), /* MX53_PAD_KEY_ROW2__FEC_TDATA_2 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW2, 0x360, 0x038, 7, 0x000, 0), /* MX53_PAD_KEY_ROW2__USBPHY1_RXERROR */
+	IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 0, 0x000, 0), /* MX53_PAD_KEY_COL3__KPP_COL_3 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 1, 0x000, 0), /* MX53_PAD_KEY_COL3__GPIO4_12 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 2, 0x000, 0), /* MX53_PAD_KEY_COL3__USBOH3_H2_DP */
+	IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 3, 0x870, 0), /* MX53_PAD_KEY_COL3__SPDIF_IN1 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 4, 0x81C, 0), /* MX53_PAD_KEY_COL3__I2C2_SCL */
+	IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 5, 0x7B4, 0), /* MX53_PAD_KEY_COL3__ECSPI1_SS3 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 6, 0x000, 0), /* MX53_PAD_KEY_COL3__FEC_CRS */
+	IMX_PIN_REG(MX53_PAD_KEY_COL3, 0x364, 0x03C, 7, 0x000, 0), /* MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 0, 0x000, 0), /* MX53_PAD_KEY_ROW3__KPP_ROW_3 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 1, 0x000, 0), /* MX53_PAD_KEY_ROW3__GPIO4_13 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 2, 0x000, 0), /* MX53_PAD_KEY_ROW3__USBOH3_H2_DM */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 3, 0x768, 0), /* MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 4, 0x820, 0), /* MX53_PAD_KEY_ROW3__I2C2_SDA */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 5, 0x000, 0), /* MX53_PAD_KEY_ROW3__OSC32K_32K_OUT */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 6, 0x77C, 0), /* MX53_PAD_KEY_ROW3__CCM_PLL4_BYP */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW3, 0x368, 0x040, 7, 0x000, 0), /* MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 0, 0x000, 0), /* MX53_PAD_KEY_COL4__KPP_COL_4 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 1, 0x000, 0), /* MX53_PAD_KEY_COL4__GPIO4_14 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 2, 0x000, 0), /* MX53_PAD_KEY_COL4__CAN2_TXCAN */
+	IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 3, 0x000, 0), /* MX53_PAD_KEY_COL4__IPU_SISG_4 */
+	IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 4, 0x894, 0), /* MX53_PAD_KEY_COL4__UART5_RTS */
+	IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 5, 0x89C, 0), /* MX53_PAD_KEY_COL4__USBOH3_USBOTG_OC */
+	IMX_PIN_REG(MX53_PAD_KEY_COL4, 0x36C, 0x044, 7, 0x000, 0), /* MX53_PAD_KEY_COL4__USBPHY1_LINESTATE_1 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 0, 0x000, 0), /* MX53_PAD_KEY_ROW4__KPP_ROW_4 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 1, 0x000, 0), /* MX53_PAD_KEY_ROW4__GPIO4_15 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 2, 0x764, 0), /* MX53_PAD_KEY_ROW4__CAN2_RXCAN */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 3, 0x000, 0), /* MX53_PAD_KEY_ROW4__IPU_SISG_5 */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 4, 0x000, 0), /* MX53_PAD_KEY_ROW4__UART5_CTS */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 5, 0x000, 0), /* MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR */
+	IMX_PIN_REG(MX53_PAD_KEY_ROW4, 0x370, 0x048, 7, 0x000, 0), /* MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID */
+	IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 0, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK */
+	IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 1, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__GPIO4_16 */
+	IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 2, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__USBOH3_USBH2_DIR */
+	IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 5, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__SDMA_DEBUG_CORE_STATE_0 */
+	IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 6, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__EMI_EMI_DEBUG_0 */
+	IMX_PIN_REG(MX53_PAD_DI0_DISP_CLK, 0x378, 0x04C, 7, 0x000, 0), /* MX53_PAD_DI0_DISP_CLK__USBPHY1_AVALID */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 0, 0x000, 0), /* MX53_PAD_DI0_PIN15__IPU_DI0_PIN15 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 1, 0x000, 0), /* MX53_PAD_DI0_PIN15__GPIO4_17 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 2, 0x000, 0), /* MX53_PAD_DI0_PIN15__AUDMUX_AUD6_TXC */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 5, 0x000, 0), /* MX53_PAD_DI0_PIN15__SDMA_DEBUG_CORE_STATE_1 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 6, 0x000, 0), /* MX53_PAD_DI0_PIN15__EMI_EMI_DEBUG_1 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN15, 0x37C, 0x050, 7, 0x000, 0), /* MX53_PAD_DI0_PIN15__USBPHY1_BVALID */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 0, 0x000, 0), /* MX53_PAD_DI0_PIN2__IPU_DI0_PIN2 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 1, 0x000, 0), /* MX53_PAD_DI0_PIN2__GPIO4_18 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 2, 0x000, 0), /* MX53_PAD_DI0_PIN2__AUDMUX_AUD6_TXD */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 5, 0x000, 0), /* MX53_PAD_DI0_PIN2__SDMA_DEBUG_CORE_STATE_2 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 6, 0x000, 0), /* MX53_PAD_DI0_PIN2__EMI_EMI_DEBUG_2 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN2, 0x380, 0x054, 7, 0x000, 0), /* MX53_PAD_DI0_PIN2__USBPHY1_ENDSESSION */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 0, 0x000, 0), /* MX53_PAD_DI0_PIN3__IPU_DI0_PIN3 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 1, 0x000, 0), /* MX53_PAD_DI0_PIN3__GPIO4_19 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 2, 0x000, 0), /* MX53_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 5, 0x000, 0), /* MX53_PAD_DI0_PIN3__SDMA_DEBUG_CORE_STATE_3 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 6, 0x000, 0), /* MX53_PAD_DI0_PIN3__EMI_EMI_DEBUG_3 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN3, 0x384, 0x058, 7, 0x000, 0), /* MX53_PAD_DI0_PIN3__USBPHY1_IDDIG */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 0, 0x000, 0), /* MX53_PAD_DI0_PIN4__IPU_DI0_PIN4 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 1, 0x000, 0), /* MX53_PAD_DI0_PIN4__GPIO4_20 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 2, 0x000, 0), /* MX53_PAD_DI0_PIN4__AUDMUX_AUD6_RXD */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 3, 0x7FC, 0), /* MX53_PAD_DI0_PIN4__ESDHC1_WP */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 5, 0x000, 0), /* MX53_PAD_DI0_PIN4__SDMA_DEBUG_YIELD */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 6, 0x000, 0), /* MX53_PAD_DI0_PIN4__EMI_EMI_DEBUG_4 */
+	IMX_PIN_REG(MX53_PAD_DI0_PIN4, 0x388, 0x05C, 7, 0x000, 0), /* MX53_PAD_DI0_PIN4__USBPHY1_HOSTDISCONNECT */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT0__IPU_DISP0_DAT_0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT0__GPIO4_21 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 2, 0x780, 0), /* MX53_PAD_DISP0_DAT0__CSPI_SCLK */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT0__USBOH3_USBH2_DATA_0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT0__SDMA_DEBUG_CORE_RUN */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT0__EMI_EMI_DEBUG_5 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT0, 0x38C, 0x060, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT0__USBPHY2_TXREADY */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT1__IPU_DISP0_DAT_1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT1__GPIO4_22 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 2, 0x788, 0), /* MX53_PAD_DISP0_DAT1__CSPI_MOSI */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT1__USBOH3_USBH2_DATA_1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT1__SDMA_DEBUG_EVENT_CHANNEL_SEL */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT1__EMI_EMI_DEBUG_6 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT1, 0x390, 0x064, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT1__USBPHY2_RXVALID */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT2__IPU_DISP0_DAT_2 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT2__GPIO4_23 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 2, 0x784, 0), /* MX53_PAD_DISP0_DAT2__CSPI_MISO */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT2__USBOH3_USBH2_DATA_2 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT2__SDMA_DEBUG_MODE */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT2__EMI_EMI_DEBUG_7 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT2, 0x394, 0x068, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT2__USBPHY2_RXACTIVE */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT3__IPU_DISP0_DAT_3 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT3__GPIO4_24 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 2, 0x78C, 0), /* MX53_PAD_DISP0_DAT3__CSPI_SS0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT3__USBOH3_USBH2_DATA_3 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT3__SDMA_DEBUG_BUS_ERROR */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT3__EMI_EMI_DEBUG_8 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT3, 0x398, 0x06C, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT3__USBPHY2_RXERROR */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT4__IPU_DISP0_DAT_4 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT4__GPIO4_25 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 2, 0x790, 0), /* MX53_PAD_DISP0_DAT4__CSPI_SS1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT4__USBOH3_USBH2_DATA_4 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT4__EMI_EMI_DEBUG_9 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT4, 0x39C, 0x070, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT4__USBPHY2_SIECLOCK */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT5__IPU_DISP0_DAT_5 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT5__GPIO4_26 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 2, 0x794, 0), /* MX53_PAD_DISP0_DAT5__CSPI_SS2 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT5__USBOH3_USBH2_DATA_5 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT5__SDMA_DEBUG_MATCHED_DMBUS */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT5__EMI_EMI_DEBUG_10 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT5, 0x3A0, 0x074, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT5__USBPHY2_LINESTATE_0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT6__IPU_DISP0_DAT_6 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT6__GPIO4_27 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 2, 0x798, 0), /* MX53_PAD_DISP0_DAT6__CSPI_SS3 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT6__USBOH3_USBH2_DATA_6 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT6__SDMA_DEBUG_RTBUFFER_WRITE */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT6__EMI_EMI_DEBUG_11 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT6, 0x3A4, 0x078, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT6__USBPHY2_LINESTATE_1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT7__IPU_DISP0_DAT_7 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT7__GPIO4_28 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT7__CSPI_RDY */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT7__USBOH3_USBH2_DATA_7 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT7__SDMA_DEBUG_EVENT_CHANNEL_0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT7__EMI_EMI_DEBUG_12 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT7, 0x3A8, 0x07C, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT7__USBPHY2_VBUSVALID */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT8__IPU_DISP0_DAT_8 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT8__GPIO4_29 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT8__PWM1_PWMO */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT8__WDOG1_WDOG_B */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT8__SDMA_DEBUG_EVENT_CHANNEL_1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT8__EMI_EMI_DEBUG_13 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT8, 0x3AC, 0x080, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT8__USBPHY2_AVALID */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT9__IPU_DISP0_DAT_9 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT9__GPIO4_30 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT9__PWM2_PWMO */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 3, 0x000, 0), /* MX53_PAD_DISP0_DAT9__WDOG2_WDOG_B */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT9__SDMA_DEBUG_EVENT_CHANNEL_2 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT9__EMI_EMI_DEBUG_14 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT9, 0x3B0, 0x084, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT9__USBPHY2_VSTATUS_0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT10__IPU_DISP0_DAT_10 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT10__GPIO4_31 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT10__USBOH3_USBH2_STP */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT10__SDMA_DEBUG_EVENT_CHANNEL_3 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT10__EMI_EMI_DEBUG_15 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT10, 0x3B4, 0x088, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT10__USBPHY2_VSTATUS_1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT11__IPU_DISP0_DAT_11 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT11__GPIO5_5 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT11__USBOH3_USBH2_NXT */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT11__SDMA_DEBUG_EVENT_CHANNEL_4 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT11__EMI_EMI_DEBUG_16 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT11, 0x3B8, 0x08C, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT11__USBPHY2_VSTATUS_2 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT12__IPU_DISP0_DAT_12 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT12__GPIO5_6 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 2, 0x000, 0), /* MX53_PAD_DISP0_DAT12__USBOH3_USBH2_CLK */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT12__SDMA_DEBUG_EVENT_CHANNEL_5 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT12__EMI_EMI_DEBUG_17 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT12, 0x3BC, 0x090, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT12__USBPHY2_VSTATUS_3 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT13__IPU_DISP0_DAT_13 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT13__GPIO5_7 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 3, 0x754, 0), /* MX53_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT13__SDMA_DEBUG_EVT_CHN_LINES_0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT13__EMI_EMI_DEBUG_18 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT13, 0x3C0, 0x094, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT13__USBPHY2_VSTATUS_4 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT14__IPU_DISP0_DAT_14 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT14__GPIO5_8 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 3, 0x750, 0), /* MX53_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT14__SDMA_DEBUG_EVT_CHN_LINES_1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT14__EMI_EMI_DEBUG_19 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT14, 0x3C4, 0x098, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT14__USBPHY2_VSTATUS_5 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT15__IPU_DISP0_DAT_15 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT15__GPIO5_9 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 2, 0x7AC, 1), /* MX53_PAD_DISP0_DAT15__ECSPI1_SS1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 3, 0x7C8, 0), /* MX53_PAD_DISP0_DAT15__ECSPI2_SS1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT15__SDMA_DEBUG_EVT_CHN_LINES_2 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT15__EMI_EMI_DEBUG_20 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT15, 0x3C8, 0x09C, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT15__USBPHY2_VSTATUS_6 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT16__IPU_DISP0_DAT_16 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT16__GPIO5_10 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 2, 0x7C0, 0), /* MX53_PAD_DISP0_DAT16__ECSPI2_MOSI */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 3, 0x758, 1), /* MX53_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 4, 0x868, 0), /* MX53_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT16__SDMA_DEBUG_EVT_CHN_LINES_3 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT16__EMI_EMI_DEBUG_21 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT16, 0x3CC, 0x0A0, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT16__USBPHY2_VSTATUS_7 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT17__IPU_DISP0_DAT_17 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT17__GPIO5_11 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 2, 0x7BC, 0), /* MX53_PAD_DISP0_DAT17__ECSPI2_MISO */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 3, 0x74C, 1), /* MX53_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 4, 0x86C, 0), /* MX53_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT17__SDMA_DEBUG_EVT_CHN_LINES_4 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT17, 0x3D0, 0x0A4, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT17__EMI_EMI_DEBUG_22 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT18__IPU_DISP0_DAT_18 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT18__GPIO5_12 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 2, 0x7C4, 0), /* MX53_PAD_DISP0_DAT18__ECSPI2_SS0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 3, 0x75C, 1), /* MX53_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 4, 0x73C, 0), /* MX53_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT18__SDMA_DEBUG_EVT_CHN_LINES_5 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT18__EMI_EMI_DEBUG_23 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT18, 0x3D4, 0x0A8, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT18__EMI_WEIM_CS_2 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT19__IPU_DISP0_DAT_19 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT19__GPIO5_13 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 2, 0x7B8, 0), /* MX53_PAD_DISP0_DAT19__ECSPI2_SCLK */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 3, 0x748, 1), /* MX53_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 4, 0x738, 0), /* MX53_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT19__SDMA_DEBUG_EVT_CHN_LINES_6 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT19__EMI_EMI_DEBUG_24 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT19, 0x3D8, 0x0AC, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT19__EMI_WEIM_CS_3 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT20__IPU_DISP0_DAT_20 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT20__GPIO5_14 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 2, 0x79C, 1), /* MX53_PAD_DISP0_DAT20__ECSPI1_SCLK */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 3, 0x740, 0), /* MX53_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT20__SDMA_DEBUG_EVT_CHN_LINES_7 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT20__EMI_EMI_DEBUG_25 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT20, 0x3DC, 0x0B0, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT20__SATA_PHY_TDI */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT21__IPU_DISP0_DAT_21 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT21__GPIO5_15 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 2, 0x7A4, 1), /* MX53_PAD_DISP0_DAT21__ECSPI1_MOSI */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 3, 0x734, 0), /* MX53_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT21__SDMA_DEBUG_BUS_DEVICE_0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT21__EMI_EMI_DEBUG_26 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT21, 0x3E0, 0x0B4, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT21__SATA_PHY_TDO */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT22__IPU_DISP0_DAT_22 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT22__GPIO5_16 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 2, 0x7A0, 1), /* MX53_PAD_DISP0_DAT22__ECSPI1_MISO */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 3, 0x744, 0), /* MX53_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT22__SDMA_DEBUG_BUS_DEVICE_1 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT22__EMI_EMI_DEBUG_27 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT22, 0x3E4, 0x0B8, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT22__SATA_PHY_TCK */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 0, 0x000, 0), /* MX53_PAD_DISP0_DAT23__IPU_DISP0_DAT_23 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 1, 0x000, 0), /* MX53_PAD_DISP0_DAT23__GPIO5_17 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 2, 0x7A8, 1), /* MX53_PAD_DISP0_DAT23__ECSPI1_SS0 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 3, 0x730, 0), /* MX53_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 5, 0x000, 0), /* MX53_PAD_DISP0_DAT23__SDMA_DEBUG_BUS_DEVICE_2 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 6, 0x000, 0), /* MX53_PAD_DISP0_DAT23__EMI_EMI_DEBUG_28 */
+	IMX_PIN_REG(MX53_PAD_DISP0_DAT23, 0x3E8, 0x0BC, 7, 0x000, 0), /* MX53_PAD_DISP0_DAT23__SATA_PHY_TMS */
+	IMX_PIN_REG(MX53_PAD_CSI0_PIXCLK, 0x3EC, 0x0C0, 0, 0x000, 0), /* MX53_PAD_CSI0_PIXCLK__IPU_CSI0_PIXCLK */
+	IMX_PIN_REG(MX53_PAD_CSI0_PIXCLK, 0x3EC, 0x0C0, 1, 0x000, 0), /* MX53_PAD_CSI0_PIXCLK__GPIO5_18 */
+	IMX_PIN_REG(MX53_PAD_CSI0_PIXCLK, 0x3EC, 0x0C0, 5, 0x000, 0), /* MX53_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0 */
+	IMX_PIN_REG(MX53_PAD_CSI0_PIXCLK, 0x3EC, 0x0C0, 6, 0x000, 0), /* MX53_PAD_CSI0_PIXCLK__EMI_EMI_DEBUG_29 */
+	IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 0, 0x000, 0), /* MX53_PAD_CSI0_MCLK__IPU_CSI0_HSYNC */
+	IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 1, 0x000, 0), /* MX53_PAD_CSI0_MCLK__GPIO5_19 */
+	IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 2, 0x000, 0), /* MX53_PAD_CSI0_MCLK__CCM_CSI0_MCLK */
+	IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 5, 0x000, 0), /* MX53_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1 */
+	IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 6, 0x000, 0), /* MX53_PAD_CSI0_MCLK__EMI_EMI_DEBUG_30 */
+	IMX_PIN_REG(MX53_PAD_CSI0_MCLK, 0x3F0, 0x0C4, 7, 0x000, 0), /* MX53_PAD_CSI0_MCLK__TPIU_TRCTL */
+	IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 0, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__IPU_CSI0_DATA_EN */
+	IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 1, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__GPIO5_20 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 5, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 6, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__EMI_EMI_DEBUG_31 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DATA_EN, 0x3F4, 0x0C8, 7, 0x000, 0), /* MX53_PAD_CSI0_DATA_EN__TPIU_TRCLK */
+	IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 0, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__IPU_CSI0_VSYNC */
+	IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 1, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__GPIO5_21 */
+	IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 5, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3 */
+	IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 6, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__EMI_EMI_DEBUG_32 */
+	IMX_PIN_REG(MX53_PAD_CSI0_VSYNC, 0x3F8, 0x0CC, 7, 0x000, 0), /* MX53_PAD_CSI0_VSYNC__TPIU_TRACE_0 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT4__IPU_CSI0_D_4 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT4__GPIO5_22 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 2, 0x840, 1), /* MX53_PAD_CSI0_DAT4__KPP_COL_5 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 3, 0x79C, 2), /* MX53_PAD_CSI0_DAT4__ECSPI1_SCLK */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT4__USBOH3_USBH3_STP */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT4__EMI_EMI_DEBUG_33 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT4, 0x3FC, 0x0D0, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT4__TPIU_TRACE_1 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT5__IPU_CSI0_D_5 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT5__GPIO5_23 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 2, 0x84C, 0), /* MX53_PAD_CSI0_DAT5__KPP_ROW_5 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 3, 0x7A4, 2), /* MX53_PAD_CSI0_DAT5__ECSPI1_MOSI */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT5__USBOH3_USBH3_NXT */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT5__EMI_EMI_DEBUG_34 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT5, 0x400, 0x0D4, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT5__TPIU_TRACE_2 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT6__IPU_CSI0_D_6 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT6__GPIO5_24 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 2, 0x844, 0), /* MX53_PAD_CSI0_DAT6__KPP_COL_6 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 3, 0x7A0, 2), /* MX53_PAD_CSI0_DAT6__ECSPI1_MISO */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT6__USBOH3_USBH3_CLK */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT6__EMI_EMI_DEBUG_35 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT6, 0x404, 0x0D8, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT6__TPIU_TRACE_3 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT7__IPU_CSI0_D_7 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT7__GPIO5_25 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 2, 0x850, 0), /* MX53_PAD_CSI0_DAT7__KPP_ROW_6 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 3, 0x7A8, 2), /* MX53_PAD_CSI0_DAT7__ECSPI1_SS0 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT7__USBOH3_USBH3_DIR */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT7__EMI_EMI_DEBUG_36 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT7, 0x408, 0x0DC, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT7__TPIU_TRACE_4 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT8__IPU_CSI0_D_8 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT8__GPIO5_26 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 2, 0x848, 0), /* MX53_PAD_CSI0_DAT8__KPP_COL_7 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 3, 0x7B8, 1), /* MX53_PAD_CSI0_DAT8__ECSPI2_SCLK */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 5, 0x818, 0), /* MX53_PAD_CSI0_DAT8__I2C1_SDA */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT8, 0x40C, 0x0E0, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT9__GPIO5_27 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 2, 0x854, 0), /* MX53_PAD_CSI0_DAT9__KPP_ROW_7 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 3, 0x7C0, 1), /* MX53_PAD_CSI0_DAT9__ECSPI2_MOSI */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 5, 0x814, 0), /* MX53_PAD_CSI0_DAT9__I2C1_SCL */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT9, 0x410, 0x0E4, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT10__GPIO5_28 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT10__UART1_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 3, 0x7BC, 1), /* MX53_PAD_CSI0_DAT10__ECSPI2_MISO */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT10__EMI_EMI_DEBUG_39 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT10, 0x414, 0x0E8, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT10__TPIU_TRACE_7 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT11__IPU_CSI0_D_11 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT11__GPIO5_29 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 2, 0x878, 1), /* MX53_PAD_CSI0_DAT11__UART1_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 3, 0x7C4, 1), /* MX53_PAD_CSI0_DAT11__ECSPI2_SS0 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT11__EMI_EMI_DEBUG_40 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT11, 0x418, 0x0EC, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT11__TPIU_TRACE_8 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT12__GPIO5_30 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT12__UART4_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT12, 0x41C, 0x0F0, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT12__TPIU_TRACE_9 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT13__GPIO5_31 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 2, 0x890, 3), /* MX53_PAD_CSI0_DAT13__UART4_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT13__USBOH3_USBH3_DATA_1 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT13__EMI_EMI_DEBUG_42 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT13, 0x420, 0x0F4, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT13__TPIU_TRACE_10 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT14__GPIO6_0 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT14__UART5_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT14, 0x424, 0x0F8, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT14__TPIU_TRACE_11 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT15__GPIO6_1 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 2, 0x898, 3), /* MX53_PAD_CSI0_DAT15__UART5_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT15__USBOH3_USBH3_DATA_3 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT15__EMI_EMI_DEBUG_44 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT15, 0x428, 0x0FC, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT15__TPIU_TRACE_12 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT16__GPIO6_2 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 2, 0x88C, 0), /* MX53_PAD_CSI0_DAT16__UART4_RTS */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT16__USBOH3_USBH3_DATA_4 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT16__EMI_EMI_DEBUG_45 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT16, 0x42C, 0x100, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT16__TPIU_TRACE_13 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT17__GPIO6_3 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT17__UART4_CTS */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT17, 0x430, 0x104, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT17__TPIU_TRACE_14 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT18__GPIO6_4 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 2, 0x894, 2), /* MX53_PAD_CSI0_DAT18__UART5_RTS */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT18__USBOH3_USBH3_DATA_6 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT18__EMI_EMI_DEBUG_47 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT18, 0x434, 0x108, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT18__TPIU_TRACE_15 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 0, 0x000, 0), /* MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 1, 0x000, 0), /* MX53_PAD_CSI0_DAT19__GPIO6_5 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 2, 0x000, 0), /* MX53_PAD_CSI0_DAT19__UART5_CTS */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 4, 0x000, 0), /* MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 5, 0x000, 0), /* MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 6, 0x000, 0), /* MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48 */
+	IMX_PIN_REG(MX53_PAD_CSI0_DAT19, 0x438, 0x10C, 7, 0x000, 0), /* MX53_PAD_CSI0_DAT19__USBPHY2_BISTOK */
+	IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 0, 0x000, 0), /* MX53_PAD_EIM_A25__EMI_WEIM_A_25 */
+	IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 1, 0x000, 0), /* MX53_PAD_EIM_A25__GPIO5_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 2, 0x000, 0), /* MX53_PAD_EIM_A25__ECSPI2_RDY */
+	IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 3, 0x000, 0), /* MX53_PAD_EIM_A25__IPU_DI1_PIN12 */
+	IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 4, 0x790, 1), /* MX53_PAD_EIM_A25__CSPI_SS1 */
+	IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 6, 0x000, 0), /* MX53_PAD_EIM_A25__IPU_DI0_D1_CS */
+	IMX_PIN_REG(MX53_PAD_EIM_A25, 0x458, 0x110, 7, 0x000, 0), /* MX53_PAD_EIM_A25__USBPHY1_BISTOK */
+	IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 0, 0x000, 0), /* MX53_PAD_EIM_EB2__EMI_WEIM_EB_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 1, 0x000, 0), /* MX53_PAD_EIM_EB2__GPIO2_30 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 2, 0x76C, 0), /* MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK */
+	IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 3, 0x000, 0), /* MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS */
+	IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 4, 0x7A8, 3), /* MX53_PAD_EIM_EB2__ECSPI1_SS0 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB2, 0x45C, 0x114, 5, 0x81C, 1), /* MX53_PAD_EIM_EB2__I2C2_SCL */
+	IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 0, 0x000, 0), /* MX53_PAD_EIM_D16__EMI_WEIM_D_16 */
+	IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 1, 0x000, 0), /* MX53_PAD_EIM_D16__GPIO3_16 */
+	IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 2, 0x000, 0), /* MX53_PAD_EIM_D16__IPU_DI0_PIN5 */
+	IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 3, 0x000, 0), /* MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK */
+	IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 4, 0x79C, 3), /* MX53_PAD_EIM_D16__ECSPI1_SCLK */
+	IMX_PIN_REG(MX53_PAD_EIM_D16, 0x460, 0x118, 5, 0x820, 1), /* MX53_PAD_EIM_D16__I2C2_SDA */
+	IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 0, 0x000, 0), /* MX53_PAD_EIM_D17__EMI_WEIM_D_17 */
+	IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 1, 0x000, 0), /* MX53_PAD_EIM_D17__GPIO3_17 */
+	IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 2, 0x000, 0), /* MX53_PAD_EIM_D17__IPU_DI0_PIN6 */
+	IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 3, 0x830, 0), /* MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN */
+	IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 4, 0x7A0, 3), /* MX53_PAD_EIM_D17__ECSPI1_MISO */
+	IMX_PIN_REG(MX53_PAD_EIM_D17, 0x464, 0x11C, 5, 0x824, 0), /* MX53_PAD_EIM_D17__I2C3_SCL */
+	IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 0, 0x000, 0), /* MX53_PAD_EIM_D18__EMI_WEIM_D_18 */
+	IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 1, 0x000, 0), /* MX53_PAD_EIM_D18__GPIO3_18 */
+	IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 2, 0x000, 0), /* MX53_PAD_EIM_D18__IPU_DI0_PIN7 */
+	IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 3, 0x830, 1), /* MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO */
+	IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 4, 0x7A4, 3), /* MX53_PAD_EIM_D18__ECSPI1_MOSI */
+	IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 5, 0x828, 0), /* MX53_PAD_EIM_D18__I2C3_SDA */
+	IMX_PIN_REG(MX53_PAD_EIM_D18, 0x468, 0x120, 6, 0x000, 0), /* MX53_PAD_EIM_D18__IPU_DI1_D0_CS */
+	IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 0, 0x000, 0), /* MX53_PAD_EIM_D19__EMI_WEIM_D_19 */
+	IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 1, 0x000, 0), /* MX53_PAD_EIM_D19__GPIO3_19 */
+	IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 2, 0x000, 0), /* MX53_PAD_EIM_D19__IPU_DI0_PIN8 */
+	IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 3, 0x000, 0), /* MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS */
+	IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 4, 0x7AC, 2), /* MX53_PAD_EIM_D19__ECSPI1_SS1 */
+	IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 5, 0x000, 0), /* MX53_PAD_EIM_D19__EPIT1_EPITO */
+	IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 6, 0x000, 0), /* MX53_PAD_EIM_D19__UART1_CTS */
+	IMX_PIN_REG(MX53_PAD_EIM_D19, 0x46C, 0x124, 7, 0x8A4, 0), /* MX53_PAD_EIM_D19__USBOH3_USBH2_OC */
+	IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 0, 0x000, 0), /* MX53_PAD_EIM_D20__EMI_WEIM_D_20 */
+	IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 1, 0x000, 0), /* MX53_PAD_EIM_D20__GPIO3_20 */
+	IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 2, 0x000, 0), /* MX53_PAD_EIM_D20__IPU_DI0_PIN16 */
+	IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 3, 0x000, 0), /* MX53_PAD_EIM_D20__IPU_SER_DISP0_CS */
+	IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 4, 0x78C, 1), /* MX53_PAD_EIM_D20__CSPI_SS0 */
+	IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 5, 0x000, 0), /* MX53_PAD_EIM_D20__EPIT2_EPITO */
+	IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 6, 0x874, 1), /* MX53_PAD_EIM_D20__UART1_RTS */
+	IMX_PIN_REG(MX53_PAD_EIM_D20, 0x470, 0x128, 7, 0x000, 0), /* MX53_PAD_EIM_D20__USBOH3_USBH2_PWR */
+	IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 0, 0x000, 0), /* MX53_PAD_EIM_D21__EMI_WEIM_D_21 */
+	IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 1, 0x000, 0), /* MX53_PAD_EIM_D21__GPIO3_21 */
+	IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 2, 0x000, 0), /* MX53_PAD_EIM_D21__IPU_DI0_PIN17 */
+	IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 3, 0x000, 0), /* MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK */
+	IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 4, 0x780, 1), /* MX53_PAD_EIM_D21__CSPI_SCLK */
+	IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 5, 0x814, 1), /* MX53_PAD_EIM_D21__I2C1_SCL */
+	IMX_PIN_REG(MX53_PAD_EIM_D21, 0x474, 0x12C, 6, 0x89C, 1), /* MX53_PAD_EIM_D21__USBOH3_USBOTG_OC */
+	IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 0, 0x000, 0), /* MX53_PAD_EIM_D22__EMI_WEIM_D_22 */
+	IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 1, 0x000, 0), /* MX53_PAD_EIM_D22__GPIO3_22 */
+	IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 2, 0x000, 0), /* MX53_PAD_EIM_D22__IPU_DI0_PIN1 */
+	IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 3, 0x82C, 0), /* MX53_PAD_EIM_D22__IPU_DISPB0_SER_DIN */
+	IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 4, 0x784, 1), /* MX53_PAD_EIM_D22__CSPI_MISO */
+	IMX_PIN_REG(MX53_PAD_EIM_D22, 0x478, 0x130, 6, 0x000, 0), /* MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR */
+	IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 0, 0x000, 0), /* MX53_PAD_EIM_D23__EMI_WEIM_D_23 */
+	IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 1, 0x000, 0), /* MX53_PAD_EIM_D23__GPIO3_23 */
+	IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 2, 0x000, 0), /* MX53_PAD_EIM_D23__UART3_CTS */
+	IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 3, 0x000, 0), /* MX53_PAD_EIM_D23__UART1_DCD */
+	IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 4, 0x000, 0), /* MX53_PAD_EIM_D23__IPU_DI0_D0_CS */
+	IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 5, 0x000, 0), /* MX53_PAD_EIM_D23__IPU_DI1_PIN2 */
+	IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 6, 0x834, 0), /* MX53_PAD_EIM_D23__IPU_CSI1_DATA_EN */
+	IMX_PIN_REG(MX53_PAD_EIM_D23, 0x47C, 0x134, 7, 0x000, 0), /* MX53_PAD_EIM_D23__IPU_DI1_PIN14 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 0, 0x000, 0), /* MX53_PAD_EIM_EB3__EMI_WEIM_EB_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 1, 0x000, 0), /* MX53_PAD_EIM_EB3__GPIO2_31 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 2, 0x884, 1), /* MX53_PAD_EIM_EB3__UART3_RTS */
+	IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 3, 0x000, 0), /* MX53_PAD_EIM_EB3__UART1_RI */
+	IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 5, 0x000, 0), /* MX53_PAD_EIM_EB3__IPU_DI1_PIN3 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 6, 0x838, 0), /* MX53_PAD_EIM_EB3__IPU_CSI1_HSYNC */
+	IMX_PIN_REG(MX53_PAD_EIM_EB3, 0x480, 0x138, 7, 0x000, 0), /* MX53_PAD_EIM_EB3__IPU_DI1_PIN16 */
+	IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 0, 0x000, 0), /* MX53_PAD_EIM_D24__EMI_WEIM_D_24 */
+	IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 1, 0x000, 0), /* MX53_PAD_EIM_D24__GPIO3_24 */
+	IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 2, 0x000, 0), /* MX53_PAD_EIM_D24__UART3_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 3, 0x7B0, 1), /* MX53_PAD_EIM_D24__ECSPI1_SS2 */
+	IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 4, 0x794, 1), /* MX53_PAD_EIM_D24__CSPI_SS2 */
+	IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 5, 0x754, 1), /* MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS */
+	IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 6, 0x000, 0), /* MX53_PAD_EIM_D24__ECSPI2_SS2 */
+	IMX_PIN_REG(MX53_PAD_EIM_D24, 0x484, 0x13C, 7, 0x000, 0), /* MX53_PAD_EIM_D24__UART1_DTR */
+	IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 0, 0x000, 0), /* MX53_PAD_EIM_D25__EMI_WEIM_D_25 */
+	IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 1, 0x000, 0), /* MX53_PAD_EIM_D25__GPIO3_25 */
+	IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 2, 0x888, 1), /* MX53_PAD_EIM_D25__UART3_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 3, 0x7B4, 1), /* MX53_PAD_EIM_D25__ECSPI1_SS3 */
+	IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 4, 0x798, 1), /* MX53_PAD_EIM_D25__CSPI_SS3 */
+	IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 5, 0x750, 1), /* MX53_PAD_EIM_D25__AUDMUX_AUD5_RXC */
+	IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 6, 0x000, 0), /* MX53_PAD_EIM_D25__ECSPI2_SS3 */
+	IMX_PIN_REG(MX53_PAD_EIM_D25, 0x488, 0x140, 7, 0x000, 0), /* MX53_PAD_EIM_D25__UART1_DSR */
+	IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 0, 0x000, 0), /* MX53_PAD_EIM_D26__EMI_WEIM_D_26 */
+	IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 1, 0x000, 0), /* MX53_PAD_EIM_D26__GPIO3_26 */
+	IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 2, 0x000, 0), /* MX53_PAD_EIM_D26__UART2_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 3, 0x80C, 0), /* MX53_PAD_EIM_D26__FIRI_RXD */
+	IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 4, 0x000, 0), /* MX53_PAD_EIM_D26__IPU_CSI0_D_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 5, 0x000, 0), /* MX53_PAD_EIM_D26__IPU_DI1_PIN11 */
+	IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 6, 0x000, 0), /* MX53_PAD_EIM_D26__IPU_SISG_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_D26, 0x48C, 0x144, 7, 0x000, 0), /* MX53_PAD_EIM_D26__IPU_DISP1_DAT_22 */
+	IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 0, 0x000, 0), /* MX53_PAD_EIM_D27__EMI_WEIM_D_27 */
+	IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 1, 0x000, 0), /* MX53_PAD_EIM_D27__GPIO3_27 */
+	IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 2, 0x880, 1), /* MX53_PAD_EIM_D27__UART2_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 3, 0x000, 0), /* MX53_PAD_EIM_D27__FIRI_TXD */
+	IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 4, 0x000, 0), /* MX53_PAD_EIM_D27__IPU_CSI0_D_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 5, 0x000, 0), /* MX53_PAD_EIM_D27__IPU_DI1_PIN13 */
+	IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 6, 0x000, 0), /* MX53_PAD_EIM_D27__IPU_SISG_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_D27, 0x490, 0x148, 7, 0x000, 0), /* MX53_PAD_EIM_D27__IPU_DISP1_DAT_23 */
+	IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 0, 0x000, 0), /* MX53_PAD_EIM_D28__EMI_WEIM_D_28 */
+	IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 1, 0x000, 0), /* MX53_PAD_EIM_D28__GPIO3_28 */
+	IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 2, 0x000, 0), /* MX53_PAD_EIM_D28__UART2_CTS */
+	IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 3, 0x82C, 1), /* MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO */
+	IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 4, 0x788, 1), /* MX53_PAD_EIM_D28__CSPI_MOSI */
+	IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 5, 0x818, 1), /* MX53_PAD_EIM_D28__I2C1_SDA */
+	IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 6, 0x000, 0), /* MX53_PAD_EIM_D28__IPU_EXT_TRIG */
+	IMX_PIN_REG(MX53_PAD_EIM_D28, 0x494, 0x14C, 7, 0x000, 0), /* MX53_PAD_EIM_D28__IPU_DI0_PIN13 */
+	IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 0, 0x000, 0), /* MX53_PAD_EIM_D29__EMI_WEIM_D_29 */
+	IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 1, 0x000, 0), /* MX53_PAD_EIM_D29__GPIO3_29 */
+	IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 2, 0x87C, 1), /* MX53_PAD_EIM_D29__UART2_RTS */
+	IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 3, 0x000, 0), /* MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS */
+	IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 4, 0x78C, 2), /* MX53_PAD_EIM_D29__CSPI_SS0 */
+	IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 5, 0x000, 0), /* MX53_PAD_EIM_D29__IPU_DI1_PIN15 */
+	IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 6, 0x83C, 0), /* MX53_PAD_EIM_D29__IPU_CSI1_VSYNC */
+	IMX_PIN_REG(MX53_PAD_EIM_D29, 0x498, 0x150, 7, 0x000, 0), /* MX53_PAD_EIM_D29__IPU_DI0_PIN14 */
+	IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 0, 0x000, 0), /* MX53_PAD_EIM_D30__EMI_WEIM_D_30 */
+	IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 1, 0x000, 0), /* MX53_PAD_EIM_D30__GPIO3_30 */
+	IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 2, 0x000, 0), /* MX53_PAD_EIM_D30__UART3_CTS */
+	IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 3, 0x000, 0), /* MX53_PAD_EIM_D30__IPU_CSI0_D_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 4, 0x000, 0), /* MX53_PAD_EIM_D30__IPU_DI0_PIN11 */
+	IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 5, 0x000, 0), /* MX53_PAD_EIM_D30__IPU_DISP1_DAT_21 */
+	IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 6, 0x8A0, 0), /* MX53_PAD_EIM_D30__USBOH3_USBH1_OC */
+	IMX_PIN_REG(MX53_PAD_EIM_D30, 0x49C, 0x154, 7, 0x8A4, 1), /* MX53_PAD_EIM_D30__USBOH3_USBH2_OC */
+	IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 0, 0x000, 0), /* MX53_PAD_EIM_D31__EMI_WEIM_D_31 */
+	IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 1, 0x000, 0), /* MX53_PAD_EIM_D31__GPIO3_31 */
+	IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 2, 0x884, 3), /* MX53_PAD_EIM_D31__UART3_RTS */
+	IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 3, 0x000, 0), /* MX53_PAD_EIM_D31__IPU_CSI0_D_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 4, 0x000, 0), /* MX53_PAD_EIM_D31__IPU_DI0_PIN12 */
+	IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 5, 0x000, 0), /* MX53_PAD_EIM_D31__IPU_DISP1_DAT_20 */
+	IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 6, 0x000, 0), /* MX53_PAD_EIM_D31__USBOH3_USBH1_PWR */
+	IMX_PIN_REG(MX53_PAD_EIM_D31, 0x4A0, 0x158, 7, 0x000, 0), /* MX53_PAD_EIM_D31__USBOH3_USBH2_PWR */
+	IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 0, 0x000, 0), /* MX53_PAD_EIM_A24__EMI_WEIM_A_24 */
+	IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 1, 0x000, 0), /* MX53_PAD_EIM_A24__GPIO5_4 */
+	IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 2, 0x000, 0), /* MX53_PAD_EIM_A24__IPU_DISP1_DAT_19 */
+	IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 3, 0x000, 0), /* MX53_PAD_EIM_A24__IPU_CSI1_D_19 */
+	IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 6, 0x000, 0), /* MX53_PAD_EIM_A24__IPU_SISG_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_A24, 0x4A8, 0x15C, 7, 0x000, 0), /* MX53_PAD_EIM_A24__USBPHY2_BVALID */
+	IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 0, 0x000, 0), /* MX53_PAD_EIM_A23__EMI_WEIM_A_23 */
+	IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 1, 0x000, 0), /* MX53_PAD_EIM_A23__GPIO6_6 */
+	IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 2, 0x000, 0), /* MX53_PAD_EIM_A23__IPU_DISP1_DAT_18 */
+	IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 3, 0x000, 0), /* MX53_PAD_EIM_A23__IPU_CSI1_D_18 */
+	IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 6, 0x000, 0), /* MX53_PAD_EIM_A23__IPU_SISG_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_A23, 0x4AC, 0x160, 7, 0x000, 0), /* MX53_PAD_EIM_A23__USBPHY2_ENDSESSION */
+	IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 0, 0x000, 0), /* MX53_PAD_EIM_A22__EMI_WEIM_A_22 */
+	IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 1, 0x000, 0), /* MX53_PAD_EIM_A22__GPIO2_16 */
+	IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 2, 0x000, 0), /* MX53_PAD_EIM_A22__IPU_DISP1_DAT_17 */
+	IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 3, 0x000, 0), /* MX53_PAD_EIM_A22__IPU_CSI1_D_17 */
+	IMX_PIN_REG(MX53_PAD_EIM_A22, 0x4B0, 0x164, 7, 0x000, 0), /* MX53_PAD_EIM_A22__SRC_BT_CFG1_7 */
+	IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 0, 0x000, 0), /* MX53_PAD_EIM_A21__EMI_WEIM_A_21 */
+	IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 1, 0x000, 0), /* MX53_PAD_EIM_A21__GPIO2_17 */
+	IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 2, 0x000, 0), /* MX53_PAD_EIM_A21__IPU_DISP1_DAT_16 */
+	IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 3, 0x000, 0), /* MX53_PAD_EIM_A21__IPU_CSI1_D_16 */
+	IMX_PIN_REG(MX53_PAD_EIM_A21, 0x4B4, 0x168, 7, 0x000, 0), /* MX53_PAD_EIM_A21__SRC_BT_CFG1_6 */
+	IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 0, 0x000, 0), /* MX53_PAD_EIM_A20__EMI_WEIM_A_20 */
+	IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 1, 0x000, 0), /* MX53_PAD_EIM_A20__GPIO2_18 */
+	IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 2, 0x000, 0), /* MX53_PAD_EIM_A20__IPU_DISP1_DAT_15 */
+	IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 3, 0x000, 0), /* MX53_PAD_EIM_A20__IPU_CSI1_D_15 */
+	IMX_PIN_REG(MX53_PAD_EIM_A20, 0x4B8, 0x16C, 7, 0x000, 0), /* MX53_PAD_EIM_A20__SRC_BT_CFG1_5 */
+	IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 0, 0x000, 0), /* MX53_PAD_EIM_A19__EMI_WEIM_A_19 */
+	IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 1, 0x000, 0), /* MX53_PAD_EIM_A19__GPIO2_19 */
+	IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 2, 0x000, 0), /* MX53_PAD_EIM_A19__IPU_DISP1_DAT_14 */
+	IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 3, 0x000, 0), /* MX53_PAD_EIM_A19__IPU_CSI1_D_14 */
+	IMX_PIN_REG(MX53_PAD_EIM_A19, 0x4BC, 0x170, 7, 0x000, 0), /* MX53_PAD_EIM_A19__SRC_BT_CFG1_4 */
+	IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 0, 0x000, 0), /* MX53_PAD_EIM_A18__EMI_WEIM_A_18 */
+	IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 1, 0x000, 0), /* MX53_PAD_EIM_A18__GPIO2_20 */
+	IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 2, 0x000, 0), /* MX53_PAD_EIM_A18__IPU_DISP1_DAT_13 */
+	IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 3, 0x000, 0), /* MX53_PAD_EIM_A18__IPU_CSI1_D_13 */
+	IMX_PIN_REG(MX53_PAD_EIM_A18, 0x4C0, 0x174, 7, 0x000, 0), /* MX53_PAD_EIM_A18__SRC_BT_CFG1_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 0, 0x000, 0), /* MX53_PAD_EIM_A17__EMI_WEIM_A_17 */
+	IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 1, 0x000, 0), /* MX53_PAD_EIM_A17__GPIO2_21 */
+	IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 2, 0x000, 0), /* MX53_PAD_EIM_A17__IPU_DISP1_DAT_12 */
+	IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 3, 0x000, 0), /* MX53_PAD_EIM_A17__IPU_CSI1_D_12 */
+	IMX_PIN_REG(MX53_PAD_EIM_A17, 0x4C4, 0x178, 7, 0x000, 0), /* MX53_PAD_EIM_A17__SRC_BT_CFG1_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 0, 0x000, 0), /* MX53_PAD_EIM_A16__EMI_WEIM_A_16 */
+	IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 1, 0x000, 0), /* MX53_PAD_EIM_A16__GPIO2_22 */
+	IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 2, 0x000, 0), /* MX53_PAD_EIM_A16__IPU_DI1_DISP_CLK */
+	IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 3, 0x000, 0), /* MX53_PAD_EIM_A16__IPU_CSI1_PIXCLK */
+	IMX_PIN_REG(MX53_PAD_EIM_A16, 0x4C8, 0x17C, 7, 0x000, 0), /* MX53_PAD_EIM_A16__SRC_BT_CFG1_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_CS0, 0x4CC, 0x180, 0, 0x000, 0), /* MX53_PAD_EIM_CS0__EMI_WEIM_CS_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_CS0, 0x4CC, 0x180, 1, 0x000, 0), /* MX53_PAD_EIM_CS0__GPIO2_23 */
+	IMX_PIN_REG(MX53_PAD_EIM_CS0, 0x4CC, 0x180, 2, 0x7B8, 2), /* MX53_PAD_EIM_CS0__ECSPI2_SCLK */
+	IMX_PIN_REG(MX53_PAD_EIM_CS0, 0x4CC, 0x180, 3, 0x000, 0), /* MX53_PAD_EIM_CS0__IPU_DI1_PIN5 */
+	IMX_PIN_REG(MX53_PAD_EIM_CS1, 0x4D0, 0x184, 0, 0x000, 0), /* MX53_PAD_EIM_CS1__EMI_WEIM_CS_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_CS1, 0x4D0, 0x184, 1, 0x000, 0), /* MX53_PAD_EIM_CS1__GPIO2_24 */
+	IMX_PIN_REG(MX53_PAD_EIM_CS1, 0x4D0, 0x184, 2, 0x7C0, 2), /* MX53_PAD_EIM_CS1__ECSPI2_MOSI */
+	IMX_PIN_REG(MX53_PAD_EIM_CS1, 0x4D0, 0x184, 3, 0x000, 0), /* MX53_PAD_EIM_CS1__IPU_DI1_PIN6 */
+	IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 0, 0x000, 0), /* MX53_PAD_EIM_OE__EMI_WEIM_OE */
+	IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 1, 0x000, 0), /* MX53_PAD_EIM_OE__GPIO2_25 */
+	IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 2, 0x7BC, 2), /* MX53_PAD_EIM_OE__ECSPI2_MISO */
+	IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 3, 0x000, 0), /* MX53_PAD_EIM_OE__IPU_DI1_PIN7 */
+	IMX_PIN_REG(MX53_PAD_EIM_OE, 0x4D4, 0x188, 7, 0x000, 0), /* MX53_PAD_EIM_OE__USBPHY2_IDDIG */
+	IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 0, 0x000, 0), /* MX53_PAD_EIM_RW__EMI_WEIM_RW */
+	IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 1, 0x000, 0), /* MX53_PAD_EIM_RW__GPIO2_26 */
+	IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 2, 0x7C4, 2), /* MX53_PAD_EIM_RW__ECSPI2_SS0 */
+	IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 3, 0x000, 0), /* MX53_PAD_EIM_RW__IPU_DI1_PIN8 */
+	IMX_PIN_REG(MX53_PAD_EIM_RW, 0x4D8, 0x18C, 7, 0x000, 0), /* MX53_PAD_EIM_RW__USBPHY2_HOSTDISCONNECT */
+	IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 0, 0x000, 0), /* MX53_PAD_EIM_LBA__EMI_WEIM_LBA */
+	IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 1, 0x000, 0), /* MX53_PAD_EIM_LBA__GPIO2_27 */
+	IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 2, 0x7C8, 1), /* MX53_PAD_EIM_LBA__ECSPI2_SS1 */
+	IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 3, 0x000, 0), /* MX53_PAD_EIM_LBA__IPU_DI1_PIN17 */
+	IMX_PIN_REG(MX53_PAD_EIM_LBA, 0x4DC, 0x190, 7, 0x000, 0), /* MX53_PAD_EIM_LBA__SRC_BT_CFG1_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 0, 0x000, 0), /* MX53_PAD_EIM_EB0__EMI_WEIM_EB_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 1, 0x000, 0), /* MX53_PAD_EIM_EB0__GPIO2_28 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 3, 0x000, 0), /* MX53_PAD_EIM_EB0__IPU_DISP1_DAT_11 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 4, 0x000, 0), /* MX53_PAD_EIM_EB0__IPU_CSI1_D_11 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 5, 0x810, 0), /* MX53_PAD_EIM_EB0__GPC_PMIC_RDY */
+	IMX_PIN_REG(MX53_PAD_EIM_EB0, 0x4E4, 0x194, 7, 0x000, 0), /* MX53_PAD_EIM_EB0__SRC_BT_CFG2_7 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 0, 0x000, 0), /* MX53_PAD_EIM_EB1__EMI_WEIM_EB_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 1, 0x000, 0), /* MX53_PAD_EIM_EB1__GPIO2_29 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 3, 0x000, 0), /* MX53_PAD_EIM_EB1__IPU_DISP1_DAT_10 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 4, 0x000, 0), /* MX53_PAD_EIM_EB1__IPU_CSI1_D_10 */
+	IMX_PIN_REG(MX53_PAD_EIM_EB1, 0x4E8, 0x198, 7, 0x000, 0), /* MX53_PAD_EIM_EB1__SRC_BT_CFG2_6 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 0, 0x000, 0), /* MX53_PAD_EIM_DA0__EMI_NAND_WEIM_DA_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 1, 0x000, 0), /* MX53_PAD_EIM_DA0__GPIO3_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 3, 0x000, 0), /* MX53_PAD_EIM_DA0__IPU_DISP1_DAT_9 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 4, 0x000, 0), /* MX53_PAD_EIM_DA0__IPU_CSI1_D_9 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA0, 0x4EC, 0x19C, 7, 0x000, 0), /* MX53_PAD_EIM_DA0__SRC_BT_CFG2_5 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 0, 0x000, 0), /* MX53_PAD_EIM_DA1__EMI_NAND_WEIM_DA_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 1, 0x000, 0), /* MX53_PAD_EIM_DA1__GPIO3_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 3, 0x000, 0), /* MX53_PAD_EIM_DA1__IPU_DISP1_DAT_8 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 4, 0x000, 0), /* MX53_PAD_EIM_DA1__IPU_CSI1_D_8 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA1, 0x4F0, 0x1A0, 7, 0x000, 0), /* MX53_PAD_EIM_DA1__SRC_BT_CFG2_4 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 0, 0x000, 0), /* MX53_PAD_EIM_DA2__EMI_NAND_WEIM_DA_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 1, 0x000, 0), /* MX53_PAD_EIM_DA2__GPIO3_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 3, 0x000, 0), /* MX53_PAD_EIM_DA2__IPU_DISP1_DAT_7 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 4, 0x000, 0), /* MX53_PAD_EIM_DA2__IPU_CSI1_D_7 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA2, 0x4F4, 0x1A4, 7, 0x000, 0), /* MX53_PAD_EIM_DA2__SRC_BT_CFG2_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 0, 0x000, 0), /* MX53_PAD_EIM_DA3__EMI_NAND_WEIM_DA_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 1, 0x000, 0), /* MX53_PAD_EIM_DA3__GPIO3_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 3, 0x000, 0), /* MX53_PAD_EIM_DA3__IPU_DISP1_DAT_6 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 4, 0x000, 0), /* MX53_PAD_EIM_DA3__IPU_CSI1_D_6 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA3, 0x4F8, 0x1A8, 7, 0x000, 0), /* MX53_PAD_EIM_DA3__SRC_BT_CFG2_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 0, 0x000, 0), /* MX53_PAD_EIM_DA4__EMI_NAND_WEIM_DA_4 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 1, 0x000, 0), /* MX53_PAD_EIM_DA4__GPIO3_4 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 3, 0x000, 0), /* MX53_PAD_EIM_DA4__IPU_DISP1_DAT_5 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 4, 0x000, 0), /* MX53_PAD_EIM_DA4__IPU_CSI1_D_5 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA4, 0x4FC, 0x1AC, 7, 0x000, 0), /* MX53_PAD_EIM_DA4__SRC_BT_CFG3_7 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 0, 0x000, 0), /* MX53_PAD_EIM_DA5__EMI_NAND_WEIM_DA_5 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 1, 0x000, 0), /* MX53_PAD_EIM_DA5__GPIO3_5 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 3, 0x000, 0), /* MX53_PAD_EIM_DA5__IPU_DISP1_DAT_4 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 4, 0x000, 0), /* MX53_PAD_EIM_DA5__IPU_CSI1_D_4 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA5, 0x500, 0x1B0, 7, 0x000, 0), /* MX53_PAD_EIM_DA5__SRC_BT_CFG3_6 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 0, 0x000, 0), /* MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 1, 0x000, 0), /* MX53_PAD_EIM_DA6__GPIO3_6 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 3, 0x000, 0), /* MX53_PAD_EIM_DA6__IPU_DISP1_DAT_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 4, 0x000, 0), /* MX53_PAD_EIM_DA6__IPU_CSI1_D_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA6, 0x504, 0x1B4, 7, 0x000, 0), /* MX53_PAD_EIM_DA6__SRC_BT_CFG3_5 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 0, 0x000, 0), /* MX53_PAD_EIM_DA7__EMI_NAND_WEIM_DA_7 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 1, 0x000, 0), /* MX53_PAD_EIM_DA7__GPIO3_7 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 3, 0x000, 0), /* MX53_PAD_EIM_DA7__IPU_DISP1_DAT_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 4, 0x000, 0), /* MX53_PAD_EIM_DA7__IPU_CSI1_D_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA7, 0x508, 0x1B8, 7, 0x000, 0), /* MX53_PAD_EIM_DA7__SRC_BT_CFG3_4 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 0, 0x000, 0), /* MX53_PAD_EIM_DA8__EMI_NAND_WEIM_DA_8 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 1, 0x000, 0), /* MX53_PAD_EIM_DA8__GPIO3_8 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 3, 0x000, 0), /* MX53_PAD_EIM_DA8__IPU_DISP1_DAT_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 4, 0x000, 0), /* MX53_PAD_EIM_DA8__IPU_CSI1_D_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA8, 0x50C, 0x1BC, 7, 0x000, 0), /* MX53_PAD_EIM_DA8__SRC_BT_CFG3_3 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 0, 0x000, 0), /* MX53_PAD_EIM_DA9__EMI_NAND_WEIM_DA_9 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 1, 0x000, 0), /* MX53_PAD_EIM_DA9__GPIO3_9 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 3, 0x000, 0), /* MX53_PAD_EIM_DA9__IPU_DISP1_DAT_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 4, 0x000, 0), /* MX53_PAD_EIM_DA9__IPU_CSI1_D_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA9, 0x510, 0x1C0, 7, 0x000, 0), /* MX53_PAD_EIM_DA9__SRC_BT_CFG3_2 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 0, 0x000, 0), /* MX53_PAD_EIM_DA10__EMI_NAND_WEIM_DA_10 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 1, 0x000, 0), /* MX53_PAD_EIM_DA10__GPIO3_10 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 3, 0x000, 0), /* MX53_PAD_EIM_DA10__IPU_DI1_PIN15 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 4, 0x834, 1), /* MX53_PAD_EIM_DA10__IPU_CSI1_DATA_EN */
+	IMX_PIN_REG(MX53_PAD_EIM_DA10, 0x514, 0x1C4, 7, 0x000, 0), /* MX53_PAD_EIM_DA10__SRC_BT_CFG3_1 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA11, 0x518, 0x1C8, 0, 0x000, 0), /* MX53_PAD_EIM_DA11__EMI_NAND_WEIM_DA_11 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA11, 0x518, 0x1C8, 1, 0x000, 0), /* MX53_PAD_EIM_DA11__GPIO3_11 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA11, 0x518, 0x1C8, 3, 0x000, 0), /* MX53_PAD_EIM_DA11__IPU_DI1_PIN2 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA11, 0x518, 0x1C8, 4, 0x838, 1), /* MX53_PAD_EIM_DA11__IPU_CSI1_HSYNC */
+	IMX_PIN_REG(MX53_PAD_EIM_DA12, 0x51C, 0x1CC, 0, 0x000, 0), /* MX53_PAD_EIM_DA12__EMI_NAND_WEIM_DA_12 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA12, 0x51C, 0x1CC, 1, 0x000, 0), /* MX53_PAD_EIM_DA12__GPIO3_12 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA12, 0x51C, 0x1CC, 3, 0x000, 0), /* MX53_PAD_EIM_DA12__IPU_DI1_PIN3 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA12, 0x51C, 0x1CC, 4, 0x83C, 1), /* MX53_PAD_EIM_DA12__IPU_CSI1_VSYNC */
+	IMX_PIN_REG(MX53_PAD_EIM_DA13, 0x520, 0x1D0, 0, 0x000, 0), /* MX53_PAD_EIM_DA13__EMI_NAND_WEIM_DA_13 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA13, 0x520, 0x1D0, 1, 0x000, 0), /* MX53_PAD_EIM_DA13__GPIO3_13 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA13, 0x520, 0x1D0, 3, 0x000, 0), /* MX53_PAD_EIM_DA13__IPU_DI1_D0_CS */
+	IMX_PIN_REG(MX53_PAD_EIM_DA13, 0x520, 0x1D0, 4, 0x76C, 1), /* MX53_PAD_EIM_DA13__CCM_DI1_EXT_CLK */
+	IMX_PIN_REG(MX53_PAD_EIM_DA14, 0x524, 0x1D4, 0, 0x000, 0), /* MX53_PAD_EIM_DA14__EMI_NAND_WEIM_DA_14 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA14, 0x524, 0x1D4, 1, 0x000, 0), /* MX53_PAD_EIM_DA14__GPIO3_14 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA14, 0x524, 0x1D4, 3, 0x000, 0), /* MX53_PAD_EIM_DA14__IPU_DI1_D1_CS */
+	IMX_PIN_REG(MX53_PAD_EIM_DA14, 0x524, 0x1D4, 4, 0x000, 0), /* MX53_PAD_EIM_DA14__CCM_DI0_EXT_CLK */
+	IMX_PIN_REG(MX53_PAD_EIM_DA15, 0x528, 0x1D8, 0, 0x000, 0), /* MX53_PAD_EIM_DA15__EMI_NAND_WEIM_DA_15 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA15, 0x528, 0x1D8, 1, 0x000, 0), /* MX53_PAD_EIM_DA15__GPIO3_15 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA15, 0x528, 0x1D8, 3, 0x000, 0), /* MX53_PAD_EIM_DA15__IPU_DI1_PIN1 */
+	IMX_PIN_REG(MX53_PAD_EIM_DA15, 0x528, 0x1D8, 4, 0x000, 0), /* MX53_PAD_EIM_DA15__IPU_DI1_PIN4 */
+	IMX_PIN_REG(MX53_PAD_NANDF_WE_B, 0x52C, 0x1DC, 0, 0x000, 0), /* MX53_PAD_NANDF_WE_B__EMI_NANDF_WE_B */
+	IMX_PIN_REG(MX53_PAD_NANDF_WE_B, 0x52C, 0x1DC, 1, 0x000, 0), /* MX53_PAD_NANDF_WE_B__GPIO6_12 */
+	IMX_PIN_REG(MX53_PAD_NANDF_RE_B, 0x530, 0x1E0, 0, 0x000, 0), /* MX53_PAD_NANDF_RE_B__EMI_NANDF_RE_B */
+	IMX_PIN_REG(MX53_PAD_NANDF_RE_B, 0x530, 0x1E0, 1, 0x000, 0), /* MX53_PAD_NANDF_RE_B__GPIO6_13 */
+	IMX_PIN_REG(MX53_PAD_EIM_WAIT, 0x534, 0x1E4, 0, 0x000, 0), /* MX53_PAD_EIM_WAIT__EMI_WEIM_WAIT */
+	IMX_PIN_REG(MX53_PAD_EIM_WAIT, 0x534, 0x1E4, 1, 0x000, 0), /* MX53_PAD_EIM_WAIT__GPIO5_0 */
+	IMX_PIN_REG(MX53_PAD_EIM_WAIT, 0x534, 0x1E4, 2, 0x000, 0), /* MX53_PAD_EIM_WAIT__EMI_WEIM_DTACK_B */
+	IMX_PIN_REG(MX53_PAD_LVDS1_TX3_P, NO_PAD, 0x1EC, 0, 0x000, 0), /* MX53_PAD_LVDS1_TX3_P__GPIO6_22 */
+	IMX_PIN_REG(MX53_PAD_LVDS1_TX3_P, NO_PAD, 0x1EC, 1, 0x000, 0), /* MX53_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3 */
+	IMX_PIN_REG(MX53_PAD_LVDS1_TX2_P, NO_PAD, 0x1F0, 0, 0x000, 0), /* MX53_PAD_LVDS1_TX2_P__GPIO6_24 */
+	IMX_PIN_REG(MX53_PAD_LVDS1_TX2_P, NO_PAD, 0x1F0, 1, 0x000, 0), /* MX53_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2 */
+	IMX_PIN_REG(MX53_PAD_LVDS1_CLK_P, NO_PAD, 0x1F4, 0, 0x000, 0), /* MX53_PAD_LVDS1_CLK_P__GPIO6_26 */
+	IMX_PIN_REG(MX53_PAD_LVDS1_CLK_P, NO_PAD, 0x1F4, 1, 0x000, 0), /* MX53_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK */
+	IMX_PIN_REG(MX53_PAD_LVDS1_TX1_P, NO_PAD, 0x1F8, 0, 0x000, 0), /* MX53_PAD_LVDS1_TX1_P__GPIO6_28 */
+	IMX_PIN_REG(MX53_PAD_LVDS1_TX1_P, NO_PAD, 0x1F8, 1, 0x000, 0), /* MX53_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1 */
+	IMX_PIN_REG(MX53_PAD_LVDS1_TX0_P, NO_PAD, 0x1FC, 0, 0x000, 0), /* MX53_PAD_LVDS1_TX0_P__GPIO6_30 */
+	IMX_PIN_REG(MX53_PAD_LVDS1_TX0_P, NO_PAD, 0x1FC, 1, 0x000, 0), /* MX53_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_TX3_P, NO_PAD, 0x200, 0, 0x000, 0), /* MX53_PAD_LVDS0_TX3_P__GPIO7_22 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_TX3_P, NO_PAD, 0x200, 1, 0x000, 0), /* MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_CLK_P, NO_PAD, 0x204, 0, 0x000, 0), /* MX53_PAD_LVDS0_CLK_P__GPIO7_24 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_CLK_P, NO_PAD, 0x204, 1, 0x000, 0), /* MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK */
+	IMX_PIN_REG(MX53_PAD_LVDS0_TX2_P, NO_PAD, 0x208, 0, 0x000, 0), /* MX53_PAD_LVDS0_TX2_P__GPIO7_26 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_TX2_P, NO_PAD, 0x208, 1, 0x000, 0), /* MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_TX1_P, NO_PAD, 0x20C, 0, 0x000, 0), /* MX53_PAD_LVDS0_TX1_P__GPIO7_28 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_TX1_P, NO_PAD, 0x20C, 1, 0x000, 0), /* MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_TX0_P, NO_PAD, 0x210, 0, 0x000, 0), /* MX53_PAD_LVDS0_TX0_P__GPIO7_30 */
+	IMX_PIN_REG(MX53_PAD_LVDS0_TX0_P, NO_PAD, 0x210, 1, 0x000, 0), /* MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_10, 0x540, 0x214, 0, 0x000, 0), /* MX53_PAD_GPIO_10__GPIO4_0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_10, 0x540, 0x214, 1, 0x000, 0), /* MX53_PAD_GPIO_10__OSC32k_32K_OUT */
+	IMX_PIN_REG(MX53_PAD_GPIO_11, 0x544, 0x218, 0, 0x000, 0), /* MX53_PAD_GPIO_11__GPIO4_1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_12, 0x548, 0x21C, 0, 0x000, 0), /* MX53_PAD_GPIO_12__GPIO4_2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_13, 0x54C, 0x220, 0, 0x000, 0), /* MX53_PAD_GPIO_13__GPIO4_3 */
+	IMX_PIN_REG(MX53_PAD_GPIO_14, 0x550, 0x224, 0, 0x000, 0), /* MX53_PAD_GPIO_14__GPIO4_4 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CLE, 0x5A0, 0x228, 0, 0x000, 0), /* MX53_PAD_NANDF_CLE__EMI_NANDF_CLE */
+	IMX_PIN_REG(MX53_PAD_NANDF_CLE, 0x5A0, 0x228, 1, 0x000, 0), /* MX53_PAD_NANDF_CLE__GPIO6_7 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CLE, 0x5A0, 0x228, 7, 0x000, 0), /* MX53_PAD_NANDF_CLE__USBPHY1_VSTATUS_0 */
+	IMX_PIN_REG(MX53_PAD_NANDF_ALE, 0x5A4, 0x22C, 0, 0x000, 0), /* MX53_PAD_NANDF_ALE__EMI_NANDF_ALE */
+	IMX_PIN_REG(MX53_PAD_NANDF_ALE, 0x5A4, 0x22C, 1, 0x000, 0), /* MX53_PAD_NANDF_ALE__GPIO6_8 */
+	IMX_PIN_REG(MX53_PAD_NANDF_ALE, 0x5A4, 0x22C, 7, 0x000, 0), /* MX53_PAD_NANDF_ALE__USBPHY1_VSTATUS_1 */
+	IMX_PIN_REG(MX53_PAD_NANDF_WP_B, 0x5A8, 0x230, 0, 0x000, 0), /* MX53_PAD_NANDF_WP_B__EMI_NANDF_WP_B */
+	IMX_PIN_REG(MX53_PAD_NANDF_WP_B, 0x5A8, 0x230, 1, 0x000, 0), /* MX53_PAD_NANDF_WP_B__GPIO6_9 */
+	IMX_PIN_REG(MX53_PAD_NANDF_WP_B, 0x5A8, 0x230, 7, 0x000, 0), /* MX53_PAD_NANDF_WP_B__USBPHY1_VSTATUS_2 */
+	IMX_PIN_REG(MX53_PAD_NANDF_RB0, 0x5AC, 0x234, 0, 0x000, 0), /* MX53_PAD_NANDF_RB0__EMI_NANDF_RB_0 */
+	IMX_PIN_REG(MX53_PAD_NANDF_RB0, 0x5AC, 0x234, 1, 0x000, 0), /* MX53_PAD_NANDF_RB0__GPIO6_10 */
+	IMX_PIN_REG(MX53_PAD_NANDF_RB0, 0x5AC, 0x234, 7, 0x000, 0), /* MX53_PAD_NANDF_RB0__USBPHY1_VSTATUS_3 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS0, 0x5B0, 0x238, 0, 0x000, 0), /* MX53_PAD_NANDF_CS0__EMI_NANDF_CS_0 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS0, 0x5B0, 0x238, 1, 0x000, 0), /* MX53_PAD_NANDF_CS0__GPIO6_11 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS0, 0x5B0, 0x238, 7, 0x000, 0), /* MX53_PAD_NANDF_CS0__USBPHY1_VSTATUS_4 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS1, 0x5B4, 0x23C, 0, 0x000, 0), /* MX53_PAD_NANDF_CS1__EMI_NANDF_CS_1 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS1, 0x5B4, 0x23C, 1, 0x000, 0), /* MX53_PAD_NANDF_CS1__GPIO6_14 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS1, 0x5B4, 0x23C, 6, 0x858, 0), /* MX53_PAD_NANDF_CS1__MLB_MLBCLK */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS1, 0x5B4, 0x23C, 7, 0x000, 0), /* MX53_PAD_NANDF_CS1__USBPHY1_VSTATUS_5 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 0, 0x000, 0), /* MX53_PAD_NANDF_CS2__EMI_NANDF_CS_2 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 1, 0x000, 0), /* MX53_PAD_NANDF_CS2__GPIO6_15 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 2, 0x000, 0), /* MX53_PAD_NANDF_CS2__IPU_SISG_0 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 3, 0x7E4, 0), /* MX53_PAD_NANDF_CS2__ESAI1_TX0 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 4, 0x000, 0), /* MX53_PAD_NANDF_CS2__EMI_WEIM_CRE */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 5, 0x000, 0), /* MX53_PAD_NANDF_CS2__CCM_CSI0_MCLK */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 6, 0x860, 0), /* MX53_PAD_NANDF_CS2__MLB_MLBSIG */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS2, 0x5B8, 0x240, 7, 0x000, 0), /* MX53_PAD_NANDF_CS2__USBPHY1_VSTATUS_6 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 0, 0x000, 0), /* MX53_PAD_NANDF_CS3__EMI_NANDF_CS_3 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 1, 0x000, 0), /* MX53_PAD_NANDF_CS3__GPIO6_16 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 2, 0x000, 0), /* MX53_PAD_NANDF_CS3__IPU_SISG_1 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 3, 0x7E8, 0), /* MX53_PAD_NANDF_CS3__ESAI1_TX1 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 4, 0x000, 0), /* MX53_PAD_NANDF_CS3__EMI_WEIM_A_26 */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 6, 0x85C, 0), /* MX53_PAD_NANDF_CS3__MLB_MLBDAT */
+	IMX_PIN_REG(MX53_PAD_NANDF_CS3, 0x5BC, 0x244, 7, 0x000, 0), /* MX53_PAD_NANDF_CS3__USBPHY1_VSTATUS_7 */
+	IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 0, 0x804, 1), /* MX53_PAD_FEC_MDIO__FEC_MDIO */
+	IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 1, 0x000, 0), /* MX53_PAD_FEC_MDIO__GPIO1_22 */
+	IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 2, 0x7DC, 0), /* MX53_PAD_FEC_MDIO__ESAI1_SCKR */
+	IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 3, 0x800, 1), /* MX53_PAD_FEC_MDIO__FEC_COL */
+	IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 4, 0x000, 0), /* MX53_PAD_FEC_MDIO__RTC_CE_RTC_PS2 */
+	IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 5, 0x000, 0), /* MX53_PAD_FEC_MDIO__SDMA_DEBUG_BUS_DEVICE_3 */
+	IMX_PIN_REG(MX53_PAD_FEC_MDIO, 0x5C4, 0x248, 6, 0x000, 0), /* MX53_PAD_FEC_MDIO__EMI_EMI_DEBUG_49 */
+	IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 0, 0x000, 0), /* MX53_PAD_FEC_REF_CLK__FEC_TX_CLK */
+	IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 1, 0x000, 0), /* MX53_PAD_FEC_REF_CLK__GPIO1_23 */
+	IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 2, 0x7CC, 0), /* MX53_PAD_FEC_REF_CLK__ESAI1_FSR */
+	IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 5, 0x000, 0), /* MX53_PAD_FEC_REF_CLK__SDMA_DEBUG_BUS_DEVICE_4 */
+	IMX_PIN_REG(MX53_PAD_FEC_REF_CLK, 0x5C8, 0x24C, 6, 0x000, 0), /* MX53_PAD_FEC_REF_CLK__EMI_EMI_DEBUG_50 */
+	IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 0, 0x000, 0), /* MX53_PAD_FEC_RX_ER__FEC_RX_ER */
+	IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 1, 0x000, 0), /* MX53_PAD_FEC_RX_ER__GPIO1_24 */
+	IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 2, 0x7D4, 0), /* MX53_PAD_FEC_RX_ER__ESAI1_HCKR */
+	IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 3, 0x808, 1), /* MX53_PAD_FEC_RX_ER__FEC_RX_CLK */
+	IMX_PIN_REG(MX53_PAD_FEC_RX_ER, 0x5CC, 0x250, 4, 0x000, 0), /* MX53_PAD_FEC_RX_ER__RTC_CE_RTC_PS3 */
+	IMX_PIN_REG(MX53_PAD_FEC_CRS_DV, 0x5D0, 0x254, 0, 0x000, 0), /* MX53_PAD_FEC_CRS_DV__FEC_RX_DV */
+	IMX_PIN_REG(MX53_PAD_FEC_CRS_DV, 0x5D0, 0x254, 1, 0x000, 0), /* MX53_PAD_FEC_CRS_DV__GPIO1_25 */
+	IMX_PIN_REG(MX53_PAD_FEC_CRS_DV, 0x5D0, 0x254, 2, 0x7E0, 0), /* MX53_PAD_FEC_CRS_DV__ESAI1_SCKT */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 0, 0x000, 0), /* MX53_PAD_FEC_RXD1__FEC_RDATA_1 */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 1, 0x000, 0), /* MX53_PAD_FEC_RXD1__GPIO1_26 */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 2, 0x7D0, 0), /* MX53_PAD_FEC_RXD1__ESAI1_FST */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 3, 0x860, 1), /* MX53_PAD_FEC_RXD1__MLB_MLBSIG */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD1, 0x5D4, 0x258, 4, 0x000, 0), /* MX53_PAD_FEC_RXD1__RTC_CE_RTC_PS1 */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD0, 0x5D8, 0x25C, 0, 0x000, 0), /* MX53_PAD_FEC_RXD0__FEC_RDATA_0 */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD0, 0x5D8, 0x25C, 1, 0x000, 0), /* MX53_PAD_FEC_RXD0__GPIO1_27 */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD0, 0x5D8, 0x25C, 2, 0x7D8, 0), /* MX53_PAD_FEC_RXD0__ESAI1_HCKT */
+	IMX_PIN_REG(MX53_PAD_FEC_RXD0, 0x5D8, 0x25C, 3, 0x000, 0), /* MX53_PAD_FEC_RXD0__OSC32k_32K_OUT */
+	IMX_PIN_REG(MX53_PAD_FEC_TX_EN, 0x5DC, 0x260, 0, 0x000, 0), /* MX53_PAD_FEC_TX_EN__FEC_TX_EN */
+	IMX_PIN_REG(MX53_PAD_FEC_TX_EN, 0x5DC, 0x260, 1, 0x000, 0), /* MX53_PAD_FEC_TX_EN__GPIO1_28 */
+	IMX_PIN_REG(MX53_PAD_FEC_TX_EN, 0x5DC, 0x260, 2, 0x7F0, 0), /* MX53_PAD_FEC_TX_EN__ESAI1_TX3_RX2 */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 0, 0x000, 0), /* MX53_PAD_FEC_TXD1__FEC_TDATA_1 */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 1, 0x000, 0), /* MX53_PAD_FEC_TXD1__GPIO1_29 */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 2, 0x7EC, 0), /* MX53_PAD_FEC_TXD1__ESAI1_TX2_RX3 */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 3, 0x858, 1), /* MX53_PAD_FEC_TXD1__MLB_MLBCLK */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD1, 0x5E0, 0x264, 4, 0x000, 0), /* MX53_PAD_FEC_TXD1__RTC_CE_RTC_PRSC_CLK */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD0, 0x5E4, 0x268, 0, 0x000, 0), /* MX53_PAD_FEC_TXD0__FEC_TDATA_0 */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD0, 0x5E4, 0x268, 1, 0x000, 0), /* MX53_PAD_FEC_TXD0__GPIO1_30 */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD0, 0x5E4, 0x268, 2, 0x7F4, 0), /* MX53_PAD_FEC_TXD0__ESAI1_TX4_RX1 */
+	IMX_PIN_REG(MX53_PAD_FEC_TXD0, 0x5E4, 0x268, 7, 0x000, 0), /* MX53_PAD_FEC_TXD0__USBPHY2_DATAOUT_0 */
+	IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 0, 0x000, 0), /* MX53_PAD_FEC_MDC__FEC_MDC */
+	IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 1, 0x000, 0), /* MX53_PAD_FEC_MDC__GPIO1_31 */
+	IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 2, 0x7F8, 0), /* MX53_PAD_FEC_MDC__ESAI1_TX5_RX0 */
+	IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 3, 0x85C, 1), /* MX53_PAD_FEC_MDC__MLB_MLBDAT */
+	IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 4, 0x000, 0), /* MX53_PAD_FEC_MDC__RTC_CE_RTC_ALARM1_TRIG */
+	IMX_PIN_REG(MX53_PAD_FEC_MDC, 0x5E8, 0x26C, 7, 0x000, 0), /* MX53_PAD_FEC_MDC__USBPHY2_DATAOUT_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOW, 0x5F0, 0x270, 0, 0x000, 0), /* MX53_PAD_PATA_DIOW__PATA_DIOW */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOW, 0x5F0, 0x270, 1, 0x000, 0), /* MX53_PAD_PATA_DIOW__GPIO6_17 */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOW, 0x5F0, 0x270, 3, 0x000, 0), /* MX53_PAD_PATA_DIOW__UART1_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOW, 0x5F0, 0x270, 7, 0x000, 0), /* MX53_PAD_PATA_DIOW__USBPHY2_DATAOUT_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DMACK, 0x5F4, 0x274, 0, 0x000, 0), /* MX53_PAD_PATA_DMACK__PATA_DMACK */
+	IMX_PIN_REG(MX53_PAD_PATA_DMACK, 0x5F4, 0x274, 1, 0x000, 0), /* MX53_PAD_PATA_DMACK__GPIO6_18 */
+	IMX_PIN_REG(MX53_PAD_PATA_DMACK, 0x5F4, 0x274, 3, 0x878, 3), /* MX53_PAD_PATA_DMACK__UART1_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_PATA_DMACK, 0x5F4, 0x274, 7, 0x000, 0), /* MX53_PAD_PATA_DMACK__USBPHY2_DATAOUT_3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 0, 0x000, 0), /* MX53_PAD_PATA_DMARQ__PATA_DMARQ */
+	IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 1, 0x000, 0), /* MX53_PAD_PATA_DMARQ__GPIO7_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 3, 0x000, 0), /* MX53_PAD_PATA_DMARQ__UART2_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 5, 0x000, 0), /* MX53_PAD_PATA_DMARQ__CCM_CCM_OUT_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DMARQ, 0x5F8, 0x278, 7, 0x000, 0), /* MX53_PAD_PATA_DMARQ__USBPHY2_DATAOUT_4 */
+	IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 0, 0x000, 0), /* MX53_PAD_PATA_BUFFER_EN__PATA_BUFFER_EN */
+	IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 1, 0x000, 0), /* MX53_PAD_PATA_BUFFER_EN__GPIO7_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 3, 0x880, 3), /* MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 5, 0x000, 0), /* MX53_PAD_PATA_BUFFER_EN__CCM_CCM_OUT_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_BUFFER_EN, 0x5FC, 0x27C, 7, 0x000, 0), /* MX53_PAD_PATA_BUFFER_EN__USBPHY2_DATAOUT_5 */
+	IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 0, 0x000, 0), /* MX53_PAD_PATA_INTRQ__PATA_INTRQ */
+	IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 1, 0x000, 0), /* MX53_PAD_PATA_INTRQ__GPIO7_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 3, 0x000, 0), /* MX53_PAD_PATA_INTRQ__UART2_CTS */
+	IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 4, 0x000, 0), /* MX53_PAD_PATA_INTRQ__CAN1_TXCAN */
+	IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 5, 0x000, 0), /* MX53_PAD_PATA_INTRQ__CCM_CCM_OUT_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_INTRQ, 0x600, 0x280, 7, 0x000, 0), /* MX53_PAD_PATA_INTRQ__USBPHY2_DATAOUT_6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 0, 0x000, 0), /* MX53_PAD_PATA_DIOR__PATA_DIOR */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 1, 0x000, 0), /* MX53_PAD_PATA_DIOR__GPIO7_3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 3, 0x87C, 3), /* MX53_PAD_PATA_DIOR__UART2_RTS */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 4, 0x760, 1), /* MX53_PAD_PATA_DIOR__CAN1_RXCAN */
+	IMX_PIN_REG(MX53_PAD_PATA_DIOR, 0x604, 0x284, 7, 0x000, 0), /* MX53_PAD_PATA_DIOR__USBPHY2_DATAOUT_7 */
+	IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 0, 0x000, 0), /* MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B */
+	IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 1, 0x000, 0), /* MX53_PAD_PATA_RESET_B__GPIO7_4 */
+	IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 2, 0x000, 0), /* MX53_PAD_PATA_RESET_B__ESDHC3_CMD */
+	IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 3, 0x000, 0), /* MX53_PAD_PATA_RESET_B__UART1_CTS */
+	IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 4, 0x000, 0), /* MX53_PAD_PATA_RESET_B__CAN2_TXCAN */
+	IMX_PIN_REG(MX53_PAD_PATA_RESET_B, 0x608, 0x288, 7, 0x000, 0), /* MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 0, 0x000, 0), /* MX53_PAD_PATA_IORDY__PATA_IORDY */
+	IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 1, 0x000, 0), /* MX53_PAD_PATA_IORDY__GPIO7_5 */
+	IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 2, 0x000, 0), /* MX53_PAD_PATA_IORDY__ESDHC3_CLK */
+	IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 3, 0x874, 3), /* MX53_PAD_PATA_IORDY__UART1_RTS */
+	IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 4, 0x764, 1), /* MX53_PAD_PATA_IORDY__CAN2_RXCAN */
+	IMX_PIN_REG(MX53_PAD_PATA_IORDY, 0x60C, 0x28C, 7, 0x000, 0), /* MX53_PAD_PATA_IORDY__USBPHY1_DATAOUT_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 0, 0x000, 0), /* MX53_PAD_PATA_DA_0__PATA_DA_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 1, 0x000, 0), /* MX53_PAD_PATA_DA_0__GPIO7_6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 2, 0x000, 0), /* MX53_PAD_PATA_DA_0__ESDHC3_RST */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 4, 0x864, 0), /* MX53_PAD_PATA_DA_0__OWIRE_LINE */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_0, 0x610, 0x290, 7, 0x000, 0), /* MX53_PAD_PATA_DA_0__USBPHY1_DATAOUT_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 0, 0x000, 0), /* MX53_PAD_PATA_DA_1__PATA_DA_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 1, 0x000, 0), /* MX53_PAD_PATA_DA_1__GPIO7_7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 2, 0x000, 0), /* MX53_PAD_PATA_DA_1__ESDHC4_CMD */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 4, 0x000, 0), /* MX53_PAD_PATA_DA_1__UART3_CTS */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_1, 0x614, 0x294, 7, 0x000, 0), /* MX53_PAD_PATA_DA_1__USBPHY1_DATAOUT_3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 0, 0x000, 0), /* MX53_PAD_PATA_DA_2__PATA_DA_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 1, 0x000, 0), /* MX53_PAD_PATA_DA_2__GPIO7_8 */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 2, 0x000, 0), /* MX53_PAD_PATA_DA_2__ESDHC4_CLK */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 4, 0x884, 5), /* MX53_PAD_PATA_DA_2__UART3_RTS */
+	IMX_PIN_REG(MX53_PAD_PATA_DA_2, 0x618, 0x298, 7, 0x000, 0), /* MX53_PAD_PATA_DA_2__USBPHY1_DATAOUT_4 */
+	IMX_PIN_REG(MX53_PAD_PATA_CS_0, 0x61C, 0x29C, 0, 0x000, 0), /* MX53_PAD_PATA_CS_0__PATA_CS_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_CS_0, 0x61C, 0x29C, 1, 0x000, 0), /* MX53_PAD_PATA_CS_0__GPIO7_9 */
+	IMX_PIN_REG(MX53_PAD_PATA_CS_0, 0x61C, 0x29C, 4, 0x000, 0), /* MX53_PAD_PATA_CS_0__UART3_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_PATA_CS_0, 0x61C, 0x29C, 7, 0x000, 0), /* MX53_PAD_PATA_CS_0__USBPHY1_DATAOUT_5 */
+	IMX_PIN_REG(MX53_PAD_PATA_CS_1, 0x620, 0x2A0, 0, 0x000, 0), /* MX53_PAD_PATA_CS_1__PATA_CS_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_CS_1, 0x620, 0x2A0, 1, 0x000, 0), /* MX53_PAD_PATA_CS_1__GPIO7_10 */
+	IMX_PIN_REG(MX53_PAD_PATA_CS_1, 0x620, 0x2A0, 4, 0x888, 3), /* MX53_PAD_PATA_CS_1__UART3_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_PATA_CS_1, 0x620, 0x2A0, 7, 0x000, 0), /* MX53_PAD_PATA_CS_1__USBPHY1_DATAOUT_6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 0, 0x000, 0), /* MX53_PAD_PATA_DATA0__PATA_DATA_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 1, 0x000, 0), /* MX53_PAD_PATA_DATA0__GPIO2_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 3, 0x000, 0), /* MX53_PAD_PATA_DATA0__EMI_NANDF_D_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 4, 0x000, 0), /* MX53_PAD_PATA_DATA0__ESDHC3_DAT4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 5, 0x000, 0), /* MX53_PAD_PATA_DATA0__GPU3d_GPU_DEBUG_OUT_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 6, 0x000, 0), /* MX53_PAD_PATA_DATA0__IPU_DIAG_BUS_0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA0, 0x628, 0x2A4, 7, 0x000, 0), /* MX53_PAD_PATA_DATA0__USBPHY1_DATAOUT_7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 0, 0x000, 0), /* MX53_PAD_PATA_DATA1__PATA_DATA_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 1, 0x000, 0), /* MX53_PAD_PATA_DATA1__GPIO2_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 3, 0x000, 0), /* MX53_PAD_PATA_DATA1__EMI_NANDF_D_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 4, 0x000, 0), /* MX53_PAD_PATA_DATA1__ESDHC3_DAT5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 5, 0x000, 0), /* MX53_PAD_PATA_DATA1__GPU3d_GPU_DEBUG_OUT_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA1, 0x62C, 0x2A8, 6, 0x000, 0), /* MX53_PAD_PATA_DATA1__IPU_DIAG_BUS_1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 0, 0x000, 0), /* MX53_PAD_PATA_DATA2__PATA_DATA_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 1, 0x000, 0), /* MX53_PAD_PATA_DATA2__GPIO2_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 3, 0x000, 0), /* MX53_PAD_PATA_DATA2__EMI_NANDF_D_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 4, 0x000, 0), /* MX53_PAD_PATA_DATA2__ESDHC3_DAT6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 5, 0x000, 0), /* MX53_PAD_PATA_DATA2__GPU3d_GPU_DEBUG_OUT_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA2, 0x630, 0x2AC, 6, 0x000, 0), /* MX53_PAD_PATA_DATA2__IPU_DIAG_BUS_2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 0, 0x000, 0), /* MX53_PAD_PATA_DATA3__PATA_DATA_3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 1, 0x000, 0), /* MX53_PAD_PATA_DATA3__GPIO2_3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 3, 0x000, 0), /* MX53_PAD_PATA_DATA3__EMI_NANDF_D_3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 4, 0x000, 0), /* MX53_PAD_PATA_DATA3__ESDHC3_DAT7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 5, 0x000, 0), /* MX53_PAD_PATA_DATA3__GPU3d_GPU_DEBUG_OUT_3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA3, 0x634, 0x2B0, 6, 0x000, 0), /* MX53_PAD_PATA_DATA3__IPU_DIAG_BUS_3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 0, 0x000, 0), /* MX53_PAD_PATA_DATA4__PATA_DATA_4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 1, 0x000, 0), /* MX53_PAD_PATA_DATA4__GPIO2_4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 3, 0x000, 0), /* MX53_PAD_PATA_DATA4__EMI_NANDF_D_4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 4, 0x000, 0), /* MX53_PAD_PATA_DATA4__ESDHC4_DAT4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 5, 0x000, 0), /* MX53_PAD_PATA_DATA4__GPU3d_GPU_DEBUG_OUT_4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA4, 0x638, 0x2B4, 6, 0x000, 0), /* MX53_PAD_PATA_DATA4__IPU_DIAG_BUS_4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 0, 0x000, 0), /* MX53_PAD_PATA_DATA5__PATA_DATA_5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 1, 0x000, 0), /* MX53_PAD_PATA_DATA5__GPIO2_5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 3, 0x000, 0), /* MX53_PAD_PATA_DATA5__EMI_NANDF_D_5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 4, 0x000, 0), /* MX53_PAD_PATA_DATA5__ESDHC4_DAT5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 5, 0x000, 0), /* MX53_PAD_PATA_DATA5__GPU3d_GPU_DEBUG_OUT_5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA5, 0x63C, 0x2B8, 6, 0x000, 0), /* MX53_PAD_PATA_DATA5__IPU_DIAG_BUS_5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 0, 0x000, 0), /* MX53_PAD_PATA_DATA6__PATA_DATA_6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 1, 0x000, 0), /* MX53_PAD_PATA_DATA6__GPIO2_6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 3, 0x000, 0), /* MX53_PAD_PATA_DATA6__EMI_NANDF_D_6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 4, 0x000, 0), /* MX53_PAD_PATA_DATA6__ESDHC4_DAT6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 5, 0x000, 0), /* MX53_PAD_PATA_DATA6__GPU3d_GPU_DEBUG_OUT_6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA6, 0x640, 0x2BC, 6, 0x000, 0), /* MX53_PAD_PATA_DATA6__IPU_DIAG_BUS_6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 0, 0x000, 0), /* MX53_PAD_PATA_DATA7__PATA_DATA_7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 1, 0x000, 0), /* MX53_PAD_PATA_DATA7__GPIO2_7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 3, 0x000, 0), /* MX53_PAD_PATA_DATA7__EMI_NANDF_D_7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 4, 0x000, 0), /* MX53_PAD_PATA_DATA7__ESDHC4_DAT7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 5, 0x000, 0), /* MX53_PAD_PATA_DATA7__GPU3d_GPU_DEBUG_OUT_7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA7, 0x644, 0x2C0, 6, 0x000, 0), /* MX53_PAD_PATA_DATA7__IPU_DIAG_BUS_7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 0, 0x000, 0), /* MX53_PAD_PATA_DATA8__PATA_DATA_8 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 1, 0x000, 0), /* MX53_PAD_PATA_DATA8__GPIO2_8 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 2, 0x000, 0), /* MX53_PAD_PATA_DATA8__ESDHC1_DAT4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 3, 0x000, 0), /* MX53_PAD_PATA_DATA8__EMI_NANDF_D_8 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 4, 0x000, 0), /* MX53_PAD_PATA_DATA8__ESDHC3_DAT0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 5, 0x000, 0), /* MX53_PAD_PATA_DATA8__GPU3d_GPU_DEBUG_OUT_8 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA8, 0x648, 0x2C4, 6, 0x000, 0), /* MX53_PAD_PATA_DATA8__IPU_DIAG_BUS_8 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 0, 0x000, 0), /* MX53_PAD_PATA_DATA9__PATA_DATA_9 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 1, 0x000, 0), /* MX53_PAD_PATA_DATA9__GPIO2_9 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 2, 0x000, 0), /* MX53_PAD_PATA_DATA9__ESDHC1_DAT5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 3, 0x000, 0), /* MX53_PAD_PATA_DATA9__EMI_NANDF_D_9 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 4, 0x000, 0), /* MX53_PAD_PATA_DATA9__ESDHC3_DAT1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 5, 0x000, 0), /* MX53_PAD_PATA_DATA9__GPU3d_GPU_DEBUG_OUT_9 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA9, 0x64C, 0x2C8, 6, 0x000, 0), /* MX53_PAD_PATA_DATA9__IPU_DIAG_BUS_9 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 0, 0x000, 0), /* MX53_PAD_PATA_DATA10__PATA_DATA_10 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 1, 0x000, 0), /* MX53_PAD_PATA_DATA10__GPIO2_10 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 2, 0x000, 0), /* MX53_PAD_PATA_DATA10__ESDHC1_DAT6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 3, 0x000, 0), /* MX53_PAD_PATA_DATA10__EMI_NANDF_D_10 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 4, 0x000, 0), /* MX53_PAD_PATA_DATA10__ESDHC3_DAT2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 5, 0x000, 0), /* MX53_PAD_PATA_DATA10__GPU3d_GPU_DEBUG_OUT_10 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA10, 0x650, 0x2CC, 6, 0x000, 0), /* MX53_PAD_PATA_DATA10__IPU_DIAG_BUS_10 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 0, 0x000, 0), /* MX53_PAD_PATA_DATA11__PATA_DATA_11 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 1, 0x000, 0), /* MX53_PAD_PATA_DATA11__GPIO2_11 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 2, 0x000, 0), /* MX53_PAD_PATA_DATA11__ESDHC1_DAT7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 3, 0x000, 0), /* MX53_PAD_PATA_DATA11__EMI_NANDF_D_11 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 4, 0x000, 0), /* MX53_PAD_PATA_DATA11__ESDHC3_DAT3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 5, 0x000, 0), /* MX53_PAD_PATA_DATA11__GPU3d_GPU_DEBUG_OUT_11 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA11, 0x654, 0x2D0, 6, 0x000, 0), /* MX53_PAD_PATA_DATA11__IPU_DIAG_BUS_11 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 0, 0x000, 0), /* MX53_PAD_PATA_DATA12__PATA_DATA_12 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 1, 0x000, 0), /* MX53_PAD_PATA_DATA12__GPIO2_12 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 2, 0x000, 0), /* MX53_PAD_PATA_DATA12__ESDHC2_DAT4 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 3, 0x000, 0), /* MX53_PAD_PATA_DATA12__EMI_NANDF_D_12 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 4, 0x000, 0), /* MX53_PAD_PATA_DATA12__ESDHC4_DAT0 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 5, 0x000, 0), /* MX53_PAD_PATA_DATA12__GPU3d_GPU_DEBUG_OUT_12 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA12, 0x658, 0x2D4, 6, 0x000, 0), /* MX53_PAD_PATA_DATA12__IPU_DIAG_BUS_12 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 0, 0x000, 0), /* MX53_PAD_PATA_DATA13__PATA_DATA_13 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 1, 0x000, 0), /* MX53_PAD_PATA_DATA13__GPIO2_13 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 2, 0x000, 0), /* MX53_PAD_PATA_DATA13__ESDHC2_DAT5 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 3, 0x000, 0), /* MX53_PAD_PATA_DATA13__EMI_NANDF_D_13 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 4, 0x000, 0), /* MX53_PAD_PATA_DATA13__ESDHC4_DAT1 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 5, 0x000, 0), /* MX53_PAD_PATA_DATA13__GPU3d_GPU_DEBUG_OUT_13 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA13, 0x65C, 0x2D8, 6, 0x000, 0), /* MX53_PAD_PATA_DATA13__IPU_DIAG_BUS_13 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 0, 0x000, 0), /* MX53_PAD_PATA_DATA14__PATA_DATA_14 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 1, 0x000, 0), /* MX53_PAD_PATA_DATA14__GPIO2_14 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 2, 0x000, 0), /* MX53_PAD_PATA_DATA14__ESDHC2_DAT6 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 3, 0x000, 0), /* MX53_PAD_PATA_DATA14__EMI_NANDF_D_14 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 4, 0x000, 0), /* MX53_PAD_PATA_DATA14__ESDHC4_DAT2 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 5, 0x000, 0), /* MX53_PAD_PATA_DATA14__GPU3d_GPU_DEBUG_OUT_14 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA14, 0x660, 0x2DC, 6, 0x000, 0), /* MX53_PAD_PATA_DATA14__IPU_DIAG_BUS_14 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 0, 0x000, 0), /* MX53_PAD_PATA_DATA15__PATA_DATA_15 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 1, 0x000, 0), /* MX53_PAD_PATA_DATA15__GPIO2_15 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 2, 0x000, 0), /* MX53_PAD_PATA_DATA15__ESDHC2_DAT7 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 3, 0x000, 0), /* MX53_PAD_PATA_DATA15__EMI_NANDF_D_15 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 4, 0x000, 0), /* MX53_PAD_PATA_DATA15__ESDHC4_DAT3 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 5, 0x000, 0), /* MX53_PAD_PATA_DATA15__GPU3d_GPU_DEBUG_OUT_15 */
+	IMX_PIN_REG(MX53_PAD_PATA_DATA15, 0x664, 0x2E0, 6, 0x000, 0), /* MX53_PAD_PATA_DATA15__IPU_DIAG_BUS_15 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 0, 0x000, 0), /* MX53_PAD_SD1_DATA0__ESDHC1_DAT0 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 1, 0x000, 0), /* MX53_PAD_SD1_DATA0__GPIO1_16 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 3, 0x000, 0), /* MX53_PAD_SD1_DATA0__GPT_CAPIN1 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 5, 0x784, 2), /* MX53_PAD_SD1_DATA0__CSPI_MISO */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA0, 0x66C, 0x2E4, 7, 0x778, 0), /* MX53_PAD_SD1_DATA0__CCM_PLL3_BYP */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 0, 0x000, 0), /* MX53_PAD_SD1_DATA1__ESDHC1_DAT1 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 1, 0x000, 0), /* MX53_PAD_SD1_DATA1__GPIO1_17 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 3, 0x000, 0), /* MX53_PAD_SD1_DATA1__GPT_CAPIN2 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 5, 0x78C, 3), /* MX53_PAD_SD1_DATA1__CSPI_SS0 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA1, 0x670, 0x2E8, 7, 0x77C, 1), /* MX53_PAD_SD1_DATA1__CCM_PLL4_BYP */
+	IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 0, 0x000, 0), /* MX53_PAD_SD1_CMD__ESDHC1_CMD */
+	IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 1, 0x000, 0), /* MX53_PAD_SD1_CMD__GPIO1_18 */
+	IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 3, 0x000, 0), /* MX53_PAD_SD1_CMD__GPT_CMPOUT1 */
+	IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 5, 0x788, 2), /* MX53_PAD_SD1_CMD__CSPI_MOSI */
+	IMX_PIN_REG(MX53_PAD_SD1_CMD, 0x674, 0x2EC, 7, 0x770, 0), /* MX53_PAD_SD1_CMD__CCM_PLL1_BYP */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 0, 0x000, 0), /* MX53_PAD_SD1_DATA2__ESDHC1_DAT2 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 1, 0x000, 0), /* MX53_PAD_SD1_DATA2__GPIO1_19 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 2, 0x000, 0), /* MX53_PAD_SD1_DATA2__GPT_CMPOUT2 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 3, 0x000, 0), /* MX53_PAD_SD1_DATA2__PWM2_PWMO */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 4, 0x000, 0), /* MX53_PAD_SD1_DATA2__WDOG1_WDOG_B */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 5, 0x790, 2), /* MX53_PAD_SD1_DATA2__CSPI_SS1 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 6, 0x000, 0), /* MX53_PAD_SD1_DATA2__WDOG1_WDOG_RST_B_DEB */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA2, 0x678, 0x2F0, 7, 0x774, 0), /* MX53_PAD_SD1_DATA2__CCM_PLL2_BYP */
+	IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 0, 0x000, 0), /* MX53_PAD_SD1_CLK__ESDHC1_CLK */
+	IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 1, 0x000, 0), /* MX53_PAD_SD1_CLK__GPIO1_20 */
+	IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 2, 0x000, 0), /* MX53_PAD_SD1_CLK__OSC32k_32K_OUT */
+	IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 3, 0x000, 0), /* MX53_PAD_SD1_CLK__GPT_CLKIN */
+	IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 5, 0x780, 2), /* MX53_PAD_SD1_CLK__CSPI_SCLK */
+	IMX_PIN_REG(MX53_PAD_SD1_CLK, 0x67C, 0x2F4, 7, 0x000, 0), /* MX53_PAD_SD1_CLK__SATA_PHY_DTB_0 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 0, 0x000, 0), /* MX53_PAD_SD1_DATA3__ESDHC1_DAT3 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 1, 0x000, 0), /* MX53_PAD_SD1_DATA3__GPIO1_21 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 2, 0x000, 0), /* MX53_PAD_SD1_DATA3__GPT_CMPOUT3 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 3, 0x000, 0), /* MX53_PAD_SD1_DATA3__PWM1_PWMO */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 4, 0x000, 0), /* MX53_PAD_SD1_DATA3__WDOG2_WDOG_B */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 5, 0x794, 2), /* MX53_PAD_SD1_DATA3__CSPI_SS2 */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 6, 0x000, 0), /* MX53_PAD_SD1_DATA3__WDOG2_WDOG_RST_B_DEB */
+	IMX_PIN_REG(MX53_PAD_SD1_DATA3, 0x680, 0x2F8, 7, 0x000, 0), /* MX53_PAD_SD1_DATA3__SATA_PHY_DTB_1 */
+	IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 0, 0x000, 0), /* MX53_PAD_SD2_CLK__ESDHC2_CLK */
+	IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 1, 0x000, 0), /* MX53_PAD_SD2_CLK__GPIO1_10 */
+	IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 2, 0x840, 2), /* MX53_PAD_SD2_CLK__KPP_COL_5 */
+	IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 3, 0x73C, 1), /* MX53_PAD_SD2_CLK__AUDMUX_AUD4_RXFS */
+	IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 5, 0x780, 3), /* MX53_PAD_SD2_CLK__CSPI_SCLK */
+	IMX_PIN_REG(MX53_PAD_SD2_CLK, 0x688, 0x2FC, 7, 0x000, 0), /* MX53_PAD_SD2_CLK__SCC_RANDOM_V */
+	IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 0, 0x000, 0), /* MX53_PAD_SD2_CMD__ESDHC2_CMD */
+	IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 1, 0x000, 0), /* MX53_PAD_SD2_CMD__GPIO1_11 */
+	IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 2, 0x84C, 1), /* MX53_PAD_SD2_CMD__KPP_ROW_5 */
+	IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 3, 0x738, 1), /* MX53_PAD_SD2_CMD__AUDMUX_AUD4_RXC */
+	IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 5, 0x788, 3), /* MX53_PAD_SD2_CMD__CSPI_MOSI */
+	IMX_PIN_REG(MX53_PAD_SD2_CMD, 0x68C, 0x300, 7, 0x000, 0), /* MX53_PAD_SD2_CMD__SCC_RANDOM */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 0, 0x000, 0), /* MX53_PAD_SD2_DATA3__ESDHC2_DAT3 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 1, 0x000, 0), /* MX53_PAD_SD2_DATA3__GPIO1_12 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 2, 0x844, 1), /* MX53_PAD_SD2_DATA3__KPP_COL_6 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 3, 0x740, 1), /* MX53_PAD_SD2_DATA3__AUDMUX_AUD4_TXC */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 5, 0x794, 3), /* MX53_PAD_SD2_DATA3__CSPI_SS2 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA3, 0x690, 0x304, 7, 0x000, 0), /* MX53_PAD_SD2_DATA3__SJC_DONE */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 0, 0x000, 0), /* MX53_PAD_SD2_DATA2__ESDHC2_DAT2 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 1, 0x000, 0), /* MX53_PAD_SD2_DATA2__GPIO1_13 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 2, 0x850, 1), /* MX53_PAD_SD2_DATA2__KPP_ROW_6 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 3, 0x734, 1), /* MX53_PAD_SD2_DATA2__AUDMUX_AUD4_TXD */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 5, 0x790, 3), /* MX53_PAD_SD2_DATA2__CSPI_SS1 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA2, 0x694, 0x308, 7, 0x000, 0), /* MX53_PAD_SD2_DATA2__SJC_FAIL */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 0, 0x000, 0), /* MX53_PAD_SD2_DATA1__ESDHC2_DAT1 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 1, 0x000, 0), /* MX53_PAD_SD2_DATA1__GPIO1_14 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 2, 0x848, 1), /* MX53_PAD_SD2_DATA1__KPP_COL_7 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 3, 0x744, 0), /* MX53_PAD_SD2_DATA1__AUDMUX_AUD4_TXFS */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 5, 0x78C, 4), /* MX53_PAD_SD2_DATA1__CSPI_SS0 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA1, 0x698, 0x30C, 7, 0x000, 0), /* MX53_PAD_SD2_DATA1__RTIC_SEC_VIO */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 0, 0x000, 0), /* MX53_PAD_SD2_DATA0__ESDHC2_DAT0 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 1, 0x000, 0), /* MX53_PAD_SD2_DATA0__GPIO1_15 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 2, 0x854, 1), /* MX53_PAD_SD2_DATA0__KPP_ROW_7 */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 3, 0x730, 1), /* MX53_PAD_SD2_DATA0__AUDMUX_AUD4_RXD */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 5, 0x784, 3), /* MX53_PAD_SD2_DATA0__CSPI_MISO */
+	IMX_PIN_REG(MX53_PAD_SD2_DATA0, 0x69C, 0x310, 7, 0x000, 0), /* MX53_PAD_SD2_DATA0__RTIC_DONE_INT */
+	IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 0, 0x000, 0), /* MX53_PAD_GPIO_0__CCM_CLKO */
+	IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 1, 0x000, 0), /* MX53_PAD_GPIO_0__GPIO1_0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 2, 0x840, 3), /* MX53_PAD_GPIO_0__KPP_COL_5 */
+	IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 3, 0x000, 0), /* MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK */
+	IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 4, 0x000, 0), /* MX53_PAD_GPIO_0__EPIT1_EPITO */
+	IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 5, 0x000, 0), /* MX53_PAD_GPIO_0__SRTC_ALARM_DEB */
+	IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 6, 0x000, 0), /* MX53_PAD_GPIO_0__USBOH3_USBH1_PWR */
+	IMX_PIN_REG(MX53_PAD_GPIO_0, 0x6A4, 0x314, 7, 0x000, 0), /* MX53_PAD_GPIO_0__CSU_TD */
+	IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 0, 0x7DC, 1), /* MX53_PAD_GPIO_1__ESAI1_SCKR */
+	IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 1, 0x000, 0), /* MX53_PAD_GPIO_1__GPIO1_1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 2, 0x84C, 2), /* MX53_PAD_GPIO_1__KPP_ROW_5 */
+	IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 3, 0x000, 0), /* MX53_PAD_GPIO_1__CCM_SSI_EXT2_CLK */
+	IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 4, 0x000, 0), /* MX53_PAD_GPIO_1__PWM2_PWMO */
+	IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 5, 0x000, 0), /* MX53_PAD_GPIO_1__WDOG2_WDOG_B */
+	IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 6, 0x000, 0), /* MX53_PAD_GPIO_1__ESDHC1_CD */
+	IMX_PIN_REG(MX53_PAD_GPIO_1, 0x6A8, 0x318, 7, 0x000, 0), /* MX53_PAD_GPIO_1__SRC_TESTER_ACK */
+	IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 0, 0x7CC, 1), /* MX53_PAD_GPIO_9__ESAI1_FSR */
+	IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 1, 0x000, 0), /* MX53_PAD_GPIO_9__GPIO1_9 */
+	IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 2, 0x844, 2), /* MX53_PAD_GPIO_9__KPP_COL_6 */
+	IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 3, 0x000, 0), /* MX53_PAD_GPIO_9__CCM_REF_EN_B */
+	IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 4, 0x000, 0), /* MX53_PAD_GPIO_9__PWM1_PWMO */
+	IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 5, 0x000, 0), /* MX53_PAD_GPIO_9__WDOG1_WDOG_B */
+	IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 6, 0x7FC, 1), /* MX53_PAD_GPIO_9__ESDHC1_WP */
+	IMX_PIN_REG(MX53_PAD_GPIO_9, 0x6AC, 0x31C, 7, 0x000, 0), /* MX53_PAD_GPIO_9__SCC_FAIL_STATE */
+	IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 0, 0x7D4, 1), /* MX53_PAD_GPIO_3__ESAI1_HCKR */
+	IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 1, 0x000, 0), /* MX53_PAD_GPIO_3__GPIO1_3 */
+	IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 2, 0x824, 1), /* MX53_PAD_GPIO_3__I2C3_SCL */
+	IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 3, 0x000, 0), /* MX53_PAD_GPIO_3__DPLLIP1_TOG_EN */
+	IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 4, 0x000, 0), /* MX53_PAD_GPIO_3__CCM_CLKO2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 5, 0x000, 0), /* MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 6, 0x8A0, 1), /* MX53_PAD_GPIO_3__USBOH3_USBH1_OC */
+	IMX_PIN_REG(MX53_PAD_GPIO_3, 0x6B0, 0x320, 7, 0x858, 2), /* MX53_PAD_GPIO_3__MLB_MLBCLK */
+	IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 0, 0x7E0, 1), /* MX53_PAD_GPIO_6__ESAI1_SCKT */
+	IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 1, 0x000, 0), /* MX53_PAD_GPIO_6__GPIO1_6 */
+	IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 2, 0x828, 1), /* MX53_PAD_GPIO_6__I2C3_SDA */
+	IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 3, 0x000, 0), /* MX53_PAD_GPIO_6__CCM_CCM_OUT_0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 4, 0x000, 0), /* MX53_PAD_GPIO_6__CSU_CSU_INT_DEB */
+	IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 5, 0x000, 0), /* MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 6, 0x000, 0), /* MX53_PAD_GPIO_6__ESDHC2_LCTL */
+	IMX_PIN_REG(MX53_PAD_GPIO_6, 0x6B4, 0x324, 7, 0x860, 2), /* MX53_PAD_GPIO_6__MLB_MLBSIG */
+	IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 0, 0x7D0, 1), /* MX53_PAD_GPIO_2__ESAI1_FST */
+	IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 1, 0x000, 0), /* MX53_PAD_GPIO_2__GPIO1_2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 2, 0x850, 2), /* MX53_PAD_GPIO_2__KPP_ROW_6 */
+	IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 3, 0x000, 0), /* MX53_PAD_GPIO_2__CCM_CCM_OUT_1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 4, 0x000, 0), /* MX53_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 5, 0x000, 0), /* MX53_PAD_GPIO_2__OBSERVE_MUX_OBSRV_INT_OUT2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 6, 0x000, 0), /* MX53_PAD_GPIO_2__ESDHC2_WP */
+	IMX_PIN_REG(MX53_PAD_GPIO_2, 0x6B8, 0x328, 7, 0x85C, 2), /* MX53_PAD_GPIO_2__MLB_MLBDAT */
+	IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 0, 0x7D8, 1), /* MX53_PAD_GPIO_4__ESAI1_HCKT */
+	IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 1, 0x000, 0), /* MX53_PAD_GPIO_4__GPIO1_4 */
+	IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 2, 0x848, 2), /* MX53_PAD_GPIO_4__KPP_COL_7 */
+	IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 3, 0x000, 0), /* MX53_PAD_GPIO_4__CCM_CCM_OUT_2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 4, 0x000, 0), /* MX53_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 5, 0x000, 0), /* MX53_PAD_GPIO_4__OBSERVE_MUX_OBSRV_INT_OUT3 */
+	IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 6, 0x000, 0), /* MX53_PAD_GPIO_4__ESDHC2_CD */
+	IMX_PIN_REG(MX53_PAD_GPIO_4, 0x6BC, 0x32C, 7, 0x000, 0), /* MX53_PAD_GPIO_4__SCC_SEC_STATE */
+	IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 0, 0x7EC, 1), /* MX53_PAD_GPIO_5__ESAI1_TX2_RX3 */
+	IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 1, 0x000, 0), /* MX53_PAD_GPIO_5__GPIO1_5 */
+	IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 2, 0x854, 2), /* MX53_PAD_GPIO_5__KPP_ROW_7 */
+	IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 3, 0x000, 0), /* MX53_PAD_GPIO_5__CCM_CLKO */
+	IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 4, 0x000, 0), /* MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 5, 0x000, 0), /* MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 */
+	IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 6, 0x824, 2), /* MX53_PAD_GPIO_5__I2C3_SCL */
+	IMX_PIN_REG(MX53_PAD_GPIO_5, 0x6C0, 0x330, 7, 0x770, 1), /* MX53_PAD_GPIO_5__CCM_PLL1_BYP */
+	IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 0, 0x7F4, 1), /* MX53_PAD_GPIO_7__ESAI1_TX4_RX1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 1, 0x000, 0), /* MX53_PAD_GPIO_7__GPIO1_7 */
+	IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 2, 0x000, 0), /* MX53_PAD_GPIO_7__EPIT1_EPITO */
+	IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 3, 0x000, 0), /* MX53_PAD_GPIO_7__CAN1_TXCAN */
+	IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 4, 0x000, 0), /* MX53_PAD_GPIO_7__UART2_TXD_MUX */
+	IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 5, 0x80C, 1), /* MX53_PAD_GPIO_7__FIRI_RXD */
+	IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 6, 0x000, 0), /* MX53_PAD_GPIO_7__SPDIF_PLOCK */
+	IMX_PIN_REG(MX53_PAD_GPIO_7, 0x6C4, 0x334, 7, 0x774, 1), /* MX53_PAD_GPIO_7__CCM_PLL2_BYP */
+	IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 0, 0x7F8, 1), /* MX53_PAD_GPIO_8__ESAI1_TX5_RX0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 1, 0x000, 0), /* MX53_PAD_GPIO_8__GPIO1_8 */
+	IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 2, 0x000, 0), /* MX53_PAD_GPIO_8__EPIT2_EPITO */
+	IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 3, 0x760, 3), /* MX53_PAD_GPIO_8__CAN1_RXCAN */
+	IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 4, 0x880, 5), /* MX53_PAD_GPIO_8__UART2_RXD_MUX */
+	IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 5, 0x000, 0), /* MX53_PAD_GPIO_8__FIRI_TXD */
+	IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 6, 0x000, 0), /* MX53_PAD_GPIO_8__SPDIF_SRCLK */
+	IMX_PIN_REG(MX53_PAD_GPIO_8, 0x6C8, 0x338, 7, 0x778, 1), /* MX53_PAD_GPIO_8__CCM_PLL3_BYP */
+	IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 0, 0x7F0, 1), /* MX53_PAD_GPIO_16__ESAI1_TX3_RX2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 1, 0x000, 0), /* MX53_PAD_GPIO_16__GPIO7_11 */
+	IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 2, 0x000, 0), /* MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT */
+	IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 4, 0x000, 0), /* MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 5, 0x870, 1), /* MX53_PAD_GPIO_16__SPDIF_IN1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 6, 0x828, 2), /* MX53_PAD_GPIO_16__I2C3_SDA */
+	IMX_PIN_REG(MX53_PAD_GPIO_16, 0x6CC, 0x33C, 7, 0x000, 0), /* MX53_PAD_GPIO_16__SJC_DE_B */
+	IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 0, 0x7E4, 1), /* MX53_PAD_GPIO_17__ESAI1_TX0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 1, 0x000, 0), /* MX53_PAD_GPIO_17__GPIO7_12 */
+	IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 2, 0x868, 1), /* MX53_PAD_GPIO_17__SDMA_EXT_EVENT_0 */
+	IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 3, 0x810, 1), /* MX53_PAD_GPIO_17__GPC_PMIC_RDY */
+	IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 4, 0x000, 0), /* MX53_PAD_GPIO_17__RTC_CE_RTC_FSV_TRIG */
+	IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 5, 0x000, 0), /* MX53_PAD_GPIO_17__SPDIF_OUT1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 6, 0x000, 0), /* MX53_PAD_GPIO_17__IPU_SNOOP2 */
+	IMX_PIN_REG(MX53_PAD_GPIO_17, 0x6D0, 0x340, 7, 0x000, 0), /* MX53_PAD_GPIO_17__SJC_JTAG_ACT */
+	IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 0, 0x7E8, 1), /* MX53_PAD_GPIO_18__ESAI1_TX1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 1, 0x000, 0), /* MX53_PAD_GPIO_18__GPIO7_13 */
+	IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 2, 0x86C, 1), /* MX53_PAD_GPIO_18__SDMA_EXT_EVENT_1 */
+	IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 3, 0x864, 1), /* MX53_PAD_GPIO_18__OWIRE_LINE */
+	IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 4, 0x000, 0), /* MX53_PAD_GPIO_18__RTC_CE_RTC_ALARM2_TRIG */
+	IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 5, 0x768, 1), /* MX53_PAD_GPIO_18__CCM_ASRC_EXT_CLK */
+	IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 6, 0x000, 0), /* MX53_PAD_GPIO_18__ESDHC1_LCTL */
+	IMX_PIN_REG(MX53_PAD_GPIO_18, 0x6D4, 0x344, 7, 0x000, 0), /* MX53_PAD_GPIO_18__SRC_SYSTEM_RST */
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx53_pinctrl_pads[] = {
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_19),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_COL0),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_ROW0),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_COL1),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_ROW1),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_COL2),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_ROW2),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_COL3),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_ROW3),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_COL4),
+	IMX_PINCTRL_PIN(MX53_PAD_KEY_ROW4),
+	IMX_PINCTRL_PIN(MX53_PAD_DI0_DISP_CLK),
+	IMX_PINCTRL_PIN(MX53_PAD_DI0_PIN15),
+	IMX_PINCTRL_PIN(MX53_PAD_DI0_PIN2),
+	IMX_PINCTRL_PIN(MX53_PAD_DI0_PIN3),
+	IMX_PINCTRL_PIN(MX53_PAD_DI0_PIN4),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT0),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT1),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT2),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT3),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT4),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT5),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT6),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT7),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT8),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT9),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT10),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT11),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT12),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT13),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT14),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT15),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT16),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT17),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT18),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT19),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT20),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT21),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT22),
+	IMX_PINCTRL_PIN(MX53_PAD_DISP0_DAT23),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_PIXCLK),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_MCLK),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DATA_EN),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_VSYNC),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT4),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT5),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT6),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT7),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT8),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT9),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT10),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT11),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT12),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT13),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT14),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT15),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT16),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT17),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT18),
+	IMX_PINCTRL_PIN(MX53_PAD_CSI0_DAT19),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A25),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_EB2),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D16),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D17),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D18),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D19),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D20),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D21),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D22),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D23),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_EB3),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D24),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D25),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D26),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D27),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D28),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D29),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D30),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_D31),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A24),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A23),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A22),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A21),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A20),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A19),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A18),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A17),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_A16),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_CS0),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_CS1),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_OE),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_RW),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_LBA),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_EB0),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_EB1),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA0),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA1),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA2),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA3),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA4),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA5),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA6),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA7),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA8),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA9),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA10),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA11),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA12),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA13),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA14),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_DA15),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_WE_B),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_RE_B),
+	IMX_PINCTRL_PIN(MX53_PAD_EIM_WAIT),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS1_TX3_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS1_TX2_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS1_CLK_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS1_TX1_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS1_TX0_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS0_TX3_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS0_CLK_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS0_TX2_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS0_TX1_P),
+	IMX_PINCTRL_PIN(MX53_PAD_LVDS0_TX0_P),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_10),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_11),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_12),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_13),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_14),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_CLE),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_ALE),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_WP_B),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_RB0),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_CS0),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_CS1),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_CS2),
+	IMX_PINCTRL_PIN(MX53_PAD_NANDF_CS3),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_MDIO),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_REF_CLK),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_RX_ER),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_CRS_DV),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_RXD1),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_RXD0),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_TX_EN),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_TXD1),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_TXD0),
+	IMX_PINCTRL_PIN(MX53_PAD_FEC_MDC),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DIOW),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DMACK),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DMARQ),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_BUFFER_EN),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_INTRQ),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DIOR),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_RESET_B),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_IORDY),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DA_0),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DA_1),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DA_2),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_CS_0),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_CS_1),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA0),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA1),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA2),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA3),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA4),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA5),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA6),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA7),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA8),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA9),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA10),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA11),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA12),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA13),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA14),
+	IMX_PINCTRL_PIN(MX53_PAD_PATA_DATA15),
+	IMX_PINCTRL_PIN(MX53_PAD_SD1_DATA0),
+	IMX_PINCTRL_PIN(MX53_PAD_SD1_DATA1),
+	IMX_PINCTRL_PIN(MX53_PAD_SD1_CMD),
+	IMX_PINCTRL_PIN(MX53_PAD_SD1_DATA2),
+	IMX_PINCTRL_PIN(MX53_PAD_SD1_CLK),
+	IMX_PINCTRL_PIN(MX53_PAD_SD1_DATA3),
+	IMX_PINCTRL_PIN(MX53_PAD_SD2_CLK),
+	IMX_PINCTRL_PIN(MX53_PAD_SD2_CMD),
+	IMX_PINCTRL_PIN(MX53_PAD_SD2_DATA3),
+	IMX_PINCTRL_PIN(MX53_PAD_SD2_DATA2),
+	IMX_PINCTRL_PIN(MX53_PAD_SD2_DATA1),
+	IMX_PINCTRL_PIN(MX53_PAD_SD2_DATA0),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_0),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_1),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_9),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_3),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_6),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_2),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_4),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_5),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_7),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_8),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_16),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_17),
+	IMX_PINCTRL_PIN(MX53_PAD_GPIO_18),
+};
+
+static struct imx_pinctrl_soc_info imx53_pinctrl_info = {
+	.pins = imx53_pinctrl_pads,
+	.npins = ARRAY_SIZE(imx53_pinctrl_pads),
+	.pin_regs = imx53_pin_regs,
+	.npin_regs = ARRAY_SIZE(imx53_pin_regs),
+};
+
+static struct of_device_id imx53_pinctrl_of_match[] __devinitdata = {
+	{ .compatible = "fsl,imx53-iomuxc", },
+	{ /* sentinel */ }
+};
+
+static int __devinit imx53_pinctrl_probe(struct platform_device *pdev)
+{
+	return imx_pinctrl_probe(pdev, &imx53_pinctrl_info);
+}
+
+static struct platform_driver imx53_pinctrl_driver = {
+	.driver = {
+		.name = "imx53-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(imx53_pinctrl_of_match),
+	},
+	.probe = imx53_pinctrl_probe,
+	.remove = __devexit_p(imx_pinctrl_remove),
+};
+
+static int __init imx53_pinctrl_init(void)
+{
+	return platform_driver_register(&imx53_pinctrl_driver);
+}
+arch_initcall(imx53_pinctrl_init);
+
+static void __exit imx53_pinctrl_exit(void)
+{
+	platform_driver_unregister(&imx53_pinctrl_driver);
+}
+module_exit(imx53_pinctrl_exit);
+MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>");
+MODULE_DESCRIPTION("Freescale IMX53 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/pinctrl-imx6q.c
new file mode 100644
index 0000000..7737d4d
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx6q.c
@@ -0,0 +1,2331 @@
+/*
+ * imx6q pinctrl driver based on imx pinmux core
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro, Inc.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx6q_pads {
+	MX6Q_PAD_SD2_DAT1 = 0,
+	MX6Q_PAD_SD2_DAT2 = 1,
+	MX6Q_PAD_SD2_DAT0 = 2,
+	MX6Q_PAD_RGMII_TXC = 3,
+	MX6Q_PAD_RGMII_TD0 = 4,
+	MX6Q_PAD_RGMII_TD1 = 5,
+	MX6Q_PAD_RGMII_TD2 = 6,
+	MX6Q_PAD_RGMII_TD3 = 7,
+	MX6Q_PAD_RGMII_RX_CTL = 8,
+	MX6Q_PAD_RGMII_RD0 = 9,
+	MX6Q_PAD_RGMII_TX_CTL = 10,
+	MX6Q_PAD_RGMII_RD1 = 11,
+	MX6Q_PAD_RGMII_RD2 = 12,
+	MX6Q_PAD_RGMII_RD3 = 13,
+	MX6Q_PAD_RGMII_RXC = 14,
+	MX6Q_PAD_EIM_A25 = 15,
+	MX6Q_PAD_EIM_EB2 = 16,
+	MX6Q_PAD_EIM_D16 = 17,
+	MX6Q_PAD_EIM_D17 = 18,
+	MX6Q_PAD_EIM_D18 = 19,
+	MX6Q_PAD_EIM_D19 = 20,
+	MX6Q_PAD_EIM_D20 = 21,
+	MX6Q_PAD_EIM_D21 = 22,
+	MX6Q_PAD_EIM_D22 = 23,
+	MX6Q_PAD_EIM_D23 = 24,
+	MX6Q_PAD_EIM_EB3 = 25,
+	MX6Q_PAD_EIM_D24 = 26,
+	MX6Q_PAD_EIM_D25 = 27,
+	MX6Q_PAD_EIM_D26 = 28,
+	MX6Q_PAD_EIM_D27 = 29,
+	MX6Q_PAD_EIM_D28 = 30,
+	MX6Q_PAD_EIM_D29 = 31,
+	MX6Q_PAD_EIM_D30 = 32,
+	MX6Q_PAD_EIM_D31 = 33,
+	MX6Q_PAD_EIM_A24 = 34,
+	MX6Q_PAD_EIM_A23 = 35,
+	MX6Q_PAD_EIM_A22 = 36,
+	MX6Q_PAD_EIM_A21 = 37,
+	MX6Q_PAD_EIM_A20 = 38,
+	MX6Q_PAD_EIM_A19 = 39,
+	MX6Q_PAD_EIM_A18 = 40,
+	MX6Q_PAD_EIM_A17 = 41,
+	MX6Q_PAD_EIM_A16 = 42,
+	MX6Q_PAD_EIM_CS0 = 43,
+	MX6Q_PAD_EIM_CS1 = 44,
+	MX6Q_PAD_EIM_OE = 45,
+	MX6Q_PAD_EIM_RW = 46,
+	MX6Q_PAD_EIM_LBA = 47,
+	MX6Q_PAD_EIM_EB0 = 48,
+	MX6Q_PAD_EIM_EB1 = 49,
+	MX6Q_PAD_EIM_DA0 = 50,
+	MX6Q_PAD_EIM_DA1 = 51,
+	MX6Q_PAD_EIM_DA2 = 52,
+	MX6Q_PAD_EIM_DA3 = 53,
+	MX6Q_PAD_EIM_DA4 = 54,
+	MX6Q_PAD_EIM_DA5 = 55,
+	MX6Q_PAD_EIM_DA6 = 56,
+	MX6Q_PAD_EIM_DA7 = 57,
+	MX6Q_PAD_EIM_DA8 = 58,
+	MX6Q_PAD_EIM_DA9 = 59,
+	MX6Q_PAD_EIM_DA10 = 60,
+	MX6Q_PAD_EIM_DA11 = 61,
+	MX6Q_PAD_EIM_DA12 = 62,
+	MX6Q_PAD_EIM_DA13 = 63,
+	MX6Q_PAD_EIM_DA14 = 64,
+	MX6Q_PAD_EIM_DA15 = 65,
+	MX6Q_PAD_EIM_WAIT = 66,
+	MX6Q_PAD_EIM_BCLK = 67,
+	MX6Q_PAD_DI0_DISP_CLK = 68,
+	MX6Q_PAD_DI0_PIN15 = 69,
+	MX6Q_PAD_DI0_PIN2 = 70,
+	MX6Q_PAD_DI0_PIN3 = 71,
+	MX6Q_PAD_DI0_PIN4 = 72,
+	MX6Q_PAD_DISP0_DAT0 = 73,
+	MX6Q_PAD_DISP0_DAT1 = 74,
+	MX6Q_PAD_DISP0_DAT2 = 75,
+	MX6Q_PAD_DISP0_DAT3 = 76,
+	MX6Q_PAD_DISP0_DAT4 = 77,
+	MX6Q_PAD_DISP0_DAT5 = 78,
+	MX6Q_PAD_DISP0_DAT6 = 79,
+	MX6Q_PAD_DISP0_DAT7 = 80,
+	MX6Q_PAD_DISP0_DAT8 = 81,
+	MX6Q_PAD_DISP0_DAT9 = 82,
+	MX6Q_PAD_DISP0_DAT10 = 83,
+	MX6Q_PAD_DISP0_DAT11 = 84,
+	MX6Q_PAD_DISP0_DAT12 = 85,
+	MX6Q_PAD_DISP0_DAT13 = 86,
+	MX6Q_PAD_DISP0_DAT14 = 87,
+	MX6Q_PAD_DISP0_DAT15 = 88,
+	MX6Q_PAD_DISP0_DAT16 = 89,
+	MX6Q_PAD_DISP0_DAT17 = 90,
+	MX6Q_PAD_DISP0_DAT18 = 91,
+	MX6Q_PAD_DISP0_DAT19 = 92,
+	MX6Q_PAD_DISP0_DAT20 = 93,
+	MX6Q_PAD_DISP0_DAT21 = 94,
+	MX6Q_PAD_DISP0_DAT22 = 95,
+	MX6Q_PAD_DISP0_DAT23 = 96,
+	MX6Q_PAD_ENET_MDIO = 97,
+	MX6Q_PAD_ENET_REF_CLK = 98,
+	MX6Q_PAD_ENET_RX_ER = 99,
+	MX6Q_PAD_ENET_CRS_DV = 100,
+	MX6Q_PAD_ENET_RXD1 = 101,
+	MX6Q_PAD_ENET_RXD0 = 102,
+	MX6Q_PAD_ENET_TX_EN = 103,
+	MX6Q_PAD_ENET_TXD1 = 104,
+	MX6Q_PAD_ENET_TXD0 = 105,
+	MX6Q_PAD_ENET_MDC = 106,
+	MX6Q_PAD_DRAM_D40 = 107,
+	MX6Q_PAD_DRAM_D41 = 108,
+	MX6Q_PAD_DRAM_D42 = 109,
+	MX6Q_PAD_DRAM_D43 = 110,
+	MX6Q_PAD_DRAM_D44 = 111,
+	MX6Q_PAD_DRAM_D45 = 112,
+	MX6Q_PAD_DRAM_D46 = 113,
+	MX6Q_PAD_DRAM_D47 = 114,
+	MX6Q_PAD_DRAM_SDQS5 = 115,
+	MX6Q_PAD_DRAM_DQM5 = 116,
+	MX6Q_PAD_DRAM_D32 = 117,
+	MX6Q_PAD_DRAM_D33 = 118,
+	MX6Q_PAD_DRAM_D34 = 119,
+	MX6Q_PAD_DRAM_D35 = 120,
+	MX6Q_PAD_DRAM_D36 = 121,
+	MX6Q_PAD_DRAM_D37 = 122,
+	MX6Q_PAD_DRAM_D38 = 123,
+	MX6Q_PAD_DRAM_D39 = 124,
+	MX6Q_PAD_DRAM_DQM4 = 125,
+	MX6Q_PAD_DRAM_SDQS4 = 126,
+	MX6Q_PAD_DRAM_D24 = 127,
+	MX6Q_PAD_DRAM_D25 = 128,
+	MX6Q_PAD_DRAM_D26 = 129,
+	MX6Q_PAD_DRAM_D27 = 130,
+	MX6Q_PAD_DRAM_D28 = 131,
+	MX6Q_PAD_DRAM_D29 = 132,
+	MX6Q_PAD_DRAM_SDQS3 = 133,
+	MX6Q_PAD_DRAM_D30 = 134,
+	MX6Q_PAD_DRAM_D31 = 135,
+	MX6Q_PAD_DRAM_DQM3 = 136,
+	MX6Q_PAD_DRAM_D16 = 137,
+	MX6Q_PAD_DRAM_D17 = 138,
+	MX6Q_PAD_DRAM_D18 = 139,
+	MX6Q_PAD_DRAM_D19 = 140,
+	MX6Q_PAD_DRAM_D20 = 141,
+	MX6Q_PAD_DRAM_D21 = 142,
+	MX6Q_PAD_DRAM_D22 = 143,
+	MX6Q_PAD_DRAM_SDQS2 = 144,
+	MX6Q_PAD_DRAM_D23 = 145,
+	MX6Q_PAD_DRAM_DQM2 = 146,
+	MX6Q_PAD_DRAM_A0 = 147,
+	MX6Q_PAD_DRAM_A1 = 148,
+	MX6Q_PAD_DRAM_A2 = 149,
+	MX6Q_PAD_DRAM_A3 = 150,
+	MX6Q_PAD_DRAM_A4 = 151,
+	MX6Q_PAD_DRAM_A5 = 152,
+	MX6Q_PAD_DRAM_A6 = 153,
+	MX6Q_PAD_DRAM_A7 = 154,
+	MX6Q_PAD_DRAM_A8 = 155,
+	MX6Q_PAD_DRAM_A9 = 156,
+	MX6Q_PAD_DRAM_A10 = 157,
+	MX6Q_PAD_DRAM_A11 = 158,
+	MX6Q_PAD_DRAM_A12 = 159,
+	MX6Q_PAD_DRAM_A13 = 160,
+	MX6Q_PAD_DRAM_A14 = 161,
+	MX6Q_PAD_DRAM_A15 = 162,
+	MX6Q_PAD_DRAM_CAS = 163,
+	MX6Q_PAD_DRAM_CS0 = 164,
+	MX6Q_PAD_DRAM_CS1 = 165,
+	MX6Q_PAD_DRAM_RAS = 166,
+	MX6Q_PAD_DRAM_RESET = 167,
+	MX6Q_PAD_DRAM_SDBA0 = 168,
+	MX6Q_PAD_DRAM_SDBA1 = 169,
+	MX6Q_PAD_DRAM_SDCLK_0 = 170,
+	MX6Q_PAD_DRAM_SDBA2 = 171,
+	MX6Q_PAD_DRAM_SDCKE0 = 172,
+	MX6Q_PAD_DRAM_SDCLK_1 = 173,
+	MX6Q_PAD_DRAM_SDCKE1 = 174,
+	MX6Q_PAD_DRAM_SDODT0 = 175,
+	MX6Q_PAD_DRAM_SDODT1 = 176,
+	MX6Q_PAD_DRAM_SDWE = 177,
+	MX6Q_PAD_DRAM_D0 = 178,
+	MX6Q_PAD_DRAM_D1 = 179,
+	MX6Q_PAD_DRAM_D2 = 180,
+	MX6Q_PAD_DRAM_D3 = 181,
+	MX6Q_PAD_DRAM_D4 = 182,
+	MX6Q_PAD_DRAM_D5 = 183,
+	MX6Q_PAD_DRAM_SDQS0 = 184,
+	MX6Q_PAD_DRAM_D6 = 185,
+	MX6Q_PAD_DRAM_D7 = 186,
+	MX6Q_PAD_DRAM_DQM0 = 187,
+	MX6Q_PAD_DRAM_D8 = 188,
+	MX6Q_PAD_DRAM_D9 = 189,
+	MX6Q_PAD_DRAM_D10 = 190,
+	MX6Q_PAD_DRAM_D11 = 191,
+	MX6Q_PAD_DRAM_D12 = 192,
+	MX6Q_PAD_DRAM_D13 = 193,
+	MX6Q_PAD_DRAM_D14 = 194,
+	MX6Q_PAD_DRAM_SDQS1 = 195,
+	MX6Q_PAD_DRAM_D15 = 196,
+	MX6Q_PAD_DRAM_DQM1 = 197,
+	MX6Q_PAD_DRAM_D48 = 198,
+	MX6Q_PAD_DRAM_D49 = 199,
+	MX6Q_PAD_DRAM_D50 = 200,
+	MX6Q_PAD_DRAM_D51 = 201,
+	MX6Q_PAD_DRAM_D52 = 202,
+	MX6Q_PAD_DRAM_D53 = 203,
+	MX6Q_PAD_DRAM_D54 = 204,
+	MX6Q_PAD_DRAM_D55 = 205,
+	MX6Q_PAD_DRAM_SDQS6 = 206,
+	MX6Q_PAD_DRAM_DQM6 = 207,
+	MX6Q_PAD_DRAM_D56 = 208,
+	MX6Q_PAD_DRAM_SDQS7 = 209,
+	MX6Q_PAD_DRAM_D57 = 210,
+	MX6Q_PAD_DRAM_D58 = 211,
+	MX6Q_PAD_DRAM_D59 = 212,
+	MX6Q_PAD_DRAM_D60 = 213,
+	MX6Q_PAD_DRAM_DQM7 = 214,
+	MX6Q_PAD_DRAM_D61 = 215,
+	MX6Q_PAD_DRAM_D62 = 216,
+	MX6Q_PAD_DRAM_D63 = 217,
+	MX6Q_PAD_KEY_COL0 = 218,
+	MX6Q_PAD_KEY_ROW0 = 219,
+	MX6Q_PAD_KEY_COL1 = 220,
+	MX6Q_PAD_KEY_ROW1 = 221,
+	MX6Q_PAD_KEY_COL2 = 222,
+	MX6Q_PAD_KEY_ROW2 = 223,
+	MX6Q_PAD_KEY_COL3 = 224,
+	MX6Q_PAD_KEY_ROW3 = 225,
+	MX6Q_PAD_KEY_COL4 = 226,
+	MX6Q_PAD_KEY_ROW4 = 227,
+	MX6Q_PAD_GPIO_0 = 228,
+	MX6Q_PAD_GPIO_1 = 229,
+	MX6Q_PAD_GPIO_9 = 230,
+	MX6Q_PAD_GPIO_3 = 231,
+	MX6Q_PAD_GPIO_6 = 232,
+	MX6Q_PAD_GPIO_2 = 233,
+	MX6Q_PAD_GPIO_4 = 234,
+	MX6Q_PAD_GPIO_5 = 235,
+	MX6Q_PAD_GPIO_7 = 236,
+	MX6Q_PAD_GPIO_8 = 237,
+	MX6Q_PAD_GPIO_16 = 238,
+	MX6Q_PAD_GPIO_17 = 239,
+	MX6Q_PAD_GPIO_18 = 240,
+	MX6Q_PAD_GPIO_19 = 241,
+	MX6Q_PAD_CSI0_PIXCLK = 242,
+	MX6Q_PAD_CSI0_MCLK = 243,
+	MX6Q_PAD_CSI0_DATA_EN = 244,
+	MX6Q_PAD_CSI0_VSYNC = 245,
+	MX6Q_PAD_CSI0_DAT4 = 246,
+	MX6Q_PAD_CSI0_DAT5 = 247,
+	MX6Q_PAD_CSI0_DAT6 = 248,
+	MX6Q_PAD_CSI0_DAT7 = 249,
+	MX6Q_PAD_CSI0_DAT8 = 250,
+	MX6Q_PAD_CSI0_DAT9 = 251,
+	MX6Q_PAD_CSI0_DAT10 = 252,
+	MX6Q_PAD_CSI0_DAT11 = 253,
+	MX6Q_PAD_CSI0_DAT12 = 254,
+	MX6Q_PAD_CSI0_DAT13 = 255,
+	MX6Q_PAD_CSI0_DAT14 = 256,
+	MX6Q_PAD_CSI0_DAT15 = 257,
+	MX6Q_PAD_CSI0_DAT16 = 258,
+	MX6Q_PAD_CSI0_DAT17 = 259,
+	MX6Q_PAD_CSI0_DAT18 = 260,
+	MX6Q_PAD_CSI0_DAT19 = 261,
+	MX6Q_PAD_JTAG_TMS = 262,
+	MX6Q_PAD_JTAG_MOD = 263,
+	MX6Q_PAD_JTAG_TRSTB = 264,
+	MX6Q_PAD_JTAG_TDI = 265,
+	MX6Q_PAD_JTAG_TCK = 266,
+	MX6Q_PAD_JTAG_TDO = 267,
+	MX6Q_PAD_LVDS1_TX3_P = 268,
+	MX6Q_PAD_LVDS1_TX2_P = 269,
+	MX6Q_PAD_LVDS1_CLK_P = 270,
+	MX6Q_PAD_LVDS1_TX1_P = 271,
+	MX6Q_PAD_LVDS1_TX0_P = 272,
+	MX6Q_PAD_LVDS0_TX3_P = 273,
+	MX6Q_PAD_LVDS0_CLK_P = 274,
+	MX6Q_PAD_LVDS0_TX2_P = 275,
+	MX6Q_PAD_LVDS0_TX1_P = 276,
+	MX6Q_PAD_LVDS0_TX0_P = 277,
+	MX6Q_PAD_TAMPER = 278,
+	MX6Q_PAD_PMIC_ON_REQ = 279,
+	MX6Q_PAD_PMIC_STBY_REQ = 280,
+	MX6Q_PAD_POR_B = 281,
+	MX6Q_PAD_BOOT_MODE1 = 282,
+	MX6Q_PAD_RESET_IN_B = 283,
+	MX6Q_PAD_BOOT_MODE0 = 284,
+	MX6Q_PAD_TEST_MODE = 285,
+	MX6Q_PAD_SD3_DAT7 = 286,
+	MX6Q_PAD_SD3_DAT6 = 287,
+	MX6Q_PAD_SD3_DAT5 = 288,
+	MX6Q_PAD_SD3_DAT4 = 289,
+	MX6Q_PAD_SD3_CMD = 290,
+	MX6Q_PAD_SD3_CLK = 291,
+	MX6Q_PAD_SD3_DAT0 = 292,
+	MX6Q_PAD_SD3_DAT1 = 293,
+	MX6Q_PAD_SD3_DAT2 = 294,
+	MX6Q_PAD_SD3_DAT3 = 295,
+	MX6Q_PAD_SD3_RST = 296,
+	MX6Q_PAD_NANDF_CLE = 297,
+	MX6Q_PAD_NANDF_ALE = 298,
+	MX6Q_PAD_NANDF_WP_B = 299,
+	MX6Q_PAD_NANDF_RB0 = 300,
+	MX6Q_PAD_NANDF_CS0 = 301,
+	MX6Q_PAD_NANDF_CS1 = 302,
+	MX6Q_PAD_NANDF_CS2 = 303,
+	MX6Q_PAD_NANDF_CS3 = 304,
+	MX6Q_PAD_SD4_CMD = 305,
+	MX6Q_PAD_SD4_CLK = 306,
+	MX6Q_PAD_NANDF_D0 = 307,
+	MX6Q_PAD_NANDF_D1 = 308,
+	MX6Q_PAD_NANDF_D2 = 309,
+	MX6Q_PAD_NANDF_D3 = 310,
+	MX6Q_PAD_NANDF_D4 = 311,
+	MX6Q_PAD_NANDF_D5 = 312,
+	MX6Q_PAD_NANDF_D6 = 313,
+	MX6Q_PAD_NANDF_D7 = 314,
+	MX6Q_PAD_SD4_DAT0 = 315,
+	MX6Q_PAD_SD4_DAT1 = 316,
+	MX6Q_PAD_SD4_DAT2 = 317,
+	MX6Q_PAD_SD4_DAT3 = 318,
+	MX6Q_PAD_SD4_DAT4 = 319,
+	MX6Q_PAD_SD4_DAT5 = 320,
+	MX6Q_PAD_SD4_DAT6 = 321,
+	MX6Q_PAD_SD4_DAT7 = 322,
+	MX6Q_PAD_SD1_DAT1 = 323,
+	MX6Q_PAD_SD1_DAT0 = 324,
+	MX6Q_PAD_SD1_DAT3 = 325,
+	MX6Q_PAD_SD1_CMD = 326,
+	MX6Q_PAD_SD1_DAT2 = 327,
+	MX6Q_PAD_SD1_CLK = 328,
+	MX6Q_PAD_SD2_CLK = 329,
+	MX6Q_PAD_SD2_CMD = 330,
+	MX6Q_PAD_SD2_DAT3 = 331,
+};
+
+/* imx6q register maps */
+static struct imx_pin_reg imx6q_pin_regs[] = {
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 0, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__USDHC2_DAT1 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 1, 0x0834, 0), /* MX6Q_PAD_SD2_DAT1__ECSPI5_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 2, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__WEIM_WEIM_CS_2 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 3, 0x07C8, 0), /* MX6Q_PAD_SD2_DAT1__AUDMUX_AUD4_TXFS */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 4, 0x08F0, 0), /* MX6Q_PAD_SD2_DAT1__KPP_COL_7 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__GPIO_1_14 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__CCM_WAIT */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT1, 0x0360, 0x004C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT1__ANATOP_TESTO_0 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 0, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__USDHC2_DAT2 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 1, 0x0838, 0), /* MX6Q_PAD_SD2_DAT2__ECSPI5_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 2, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__WEIM_WEIM_CS_3 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 3, 0x07B8, 0), /* MX6Q_PAD_SD2_DAT2__AUDMUX_AUD4_TXD */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 4, 0x08F8, 0), /* MX6Q_PAD_SD2_DAT2__KPP_ROW_6 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__GPIO_1_13 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__CCM_STOP */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT2, 0x0364, 0x0050, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT2__ANATOP_TESTO_1 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 0, 0x0000, 0), /* MX6Q_PAD_SD2_DAT0__USDHC2_DAT0 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 1, 0x082C, 0), /* MX6Q_PAD_SD2_DAT0__ECSPI5_MISO */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 3, 0x07B4, 0), /* MX6Q_PAD_SD2_DAT0__AUDMUX_AUD4_RXD */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 4, 0x08FC, 0), /* MX6Q_PAD_SD2_DAT0__KPP_ROW_7 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT0__GPIO_1_15 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT0__DCIC2_DCIC_OUT */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT0, 0x0368, 0x0054, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT0__TESTO_2 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__USBOH3_H2_DATA */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__ENET_RGMII_TXC */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 2, 0x0918, 0), /* MX6Q_PAD_RGMII_TXC__SPDIF_SPDIF_EXTCLK */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__GPIO_6_19 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__MIPI_CORE_DPHY_IN_0 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TXC, 0x036C, 0x0058, 7, 0x0000, 0), /* MX6Q_PAD_RGMII_TXC__ANATOP_24M_OUT */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD0, 0x0370, 0x005C, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TD0__MIPI_HSI_CRL_TX_RDY */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD0, 0x0370, 0x005C, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TD0__ENET_RGMII_TD0 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD0, 0x0370, 0x005C, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TD0__GPIO_6_20 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD0, 0x0370, 0x005C, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TD0__MIPI_CORE_DPHY_IN_1 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__MIPI_HSI_CRL_RX_FLG */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__ENET_RGMII_TD1 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__GPIO_6_21 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__MIPI_CORE_DPHY_IN_2 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD1, 0x0374, 0x0060, 7, 0x0000, 0), /* MX6Q_PAD_RGMII_TD1__CCM_PLL3_BYP */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__MIPI_HSI_CRL_RX_DTA */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__ENET_RGMII_TD2 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__GPIO_6_22 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__MIPI_CORE_DPHY_IN_3 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD2, 0x0378, 0x0064, 7, 0x0000, 0), /* MX6Q_PAD_RGMII_TD2__CCM_PLL2_BYP */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD3, 0x037C, 0x0068, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TD3__MIPI_HSI_CRL_RX_WAK */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD3, 0x037C, 0x0068, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TD3__ENET_RGMII_TD3 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD3, 0x037C, 0x0068, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TD3__GPIO_6_23 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TD3, 0x037C, 0x0068, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TD3__MIPI_CORE_DPHY_IN_4 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RX_CTL, 0x0380, 0x006C, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RX_CTL__USBOH3_H3_DATA */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RX_CTL, 0x0380, 0x006C, 1, 0x0858, 0), /* MX6Q_PAD_RGMII_RX_CTL__RGMII_RX_CTL */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RX_CTL, 0x0380, 0x006C, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RX_CTL__GPIO_6_24 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RX_CTL, 0x0380, 0x006C, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RX_CTL__MIPI_DPHY_IN_5 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD0, 0x0384, 0x0070, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RD0__MIPI_HSI_CRL_RX_RDY */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD0, 0x0384, 0x0070, 1, 0x0848, 0), /* MX6Q_PAD_RGMII_RD0__ENET_RGMII_RD0 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD0, 0x0384, 0x0070, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RD0__GPIO_6_25 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD0, 0x0384, 0x0070, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RD0__MIPI_CORE_DPHY_IN_6 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_TX_CTL__USBOH3_H2_STROBE */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 1, 0x0000, 0), /* MX6Q_PAD_RGMII_TX_CTL__RGMII_TX_CTL */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_TX_CTL__GPIO_6_26 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_TX_CTL__CORE_DPHY_IN_7 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_TX_CTL, 0x0388, 0x0074, 7, 0x083C, 0), /* MX6Q_PAD_RGMII_TX_CTL__ANATOP_REF_OUT */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RD1__MIPI_HSI_CTRL_TX_FL */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 1, 0x084C, 0), /* MX6Q_PAD_RGMII_RD1__ENET_RGMII_RD1 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RD1__GPIO_6_27 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RD1__CORE_DPHY_TEST_IN_8 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD1, 0x038C, 0x0078, 7, 0x0000, 0), /* MX6Q_PAD_RGMII_RD1__SJC_FAIL */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD2, 0x0390, 0x007C, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RD2__MIPI_HSI_CRL_TX_DTA */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD2, 0x0390, 0x007C, 1, 0x0850, 0), /* MX6Q_PAD_RGMII_RD2__ENET_RGMII_RD2 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD2, 0x0390, 0x007C, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RD2__GPIO_6_28 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD2, 0x0390, 0x007C, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RD2__MIPI_CORE_DPHY_IN_9 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD3, 0x0394, 0x0080, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RD3__MIPI_HSI_CRL_TX_WAK */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD3, 0x0394, 0x0080, 1, 0x0854, 0), /* MX6Q_PAD_RGMII_RD3__ENET_RGMII_RD3 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD3, 0x0394, 0x0080, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RD3__GPIO_6_29 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RD3, 0x0394, 0x0080, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RD3__MIPI_CORE_DPHY_IN10 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RXC, 0x0398, 0x0084, 0, 0x0000, 0), /* MX6Q_PAD_RGMII_RXC__USBOH3_H3_STROBE */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RXC, 0x0398, 0x0084, 1, 0x0844, 0), /* MX6Q_PAD_RGMII_RXC__ENET_RGMII_RXC */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RXC, 0x0398, 0x0084, 5, 0x0000, 0), /* MX6Q_PAD_RGMII_RXC__GPIO_6_30 */
+	IMX_PIN_REG(MX6Q_PAD_RGMII_RXC, 0x0398, 0x0084, 6, 0x0000, 0), /* MX6Q_PAD_RGMII_RXC__MIPI_CORE_DPHY_IN11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A25__WEIM_WEIM_A_25 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A25__ECSPI4_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 2, 0x0000, 0), /* MX6Q_PAD_EIM_A25__ECSPI2_RDY */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A25__IPU1_DI1_PIN12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A25__IPU1_DI0_D1_CS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A25__GPIO_5_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 6, 0x088C, 0), /* MX6Q_PAD_EIM_A25__HDMI_TX_CEC_LINE */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A25, 0x039C, 0x0088, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A25__PL301_PER1_HBURST_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_EB2__WEIM_WEIM_EB_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 1, 0x0800, 0), /* MX6Q_PAD_EIM_EB2__ECSPI1_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 2, 0x07EC, 0), /* MX6Q_PAD_EIM_EB2__CCM_DI1_EXT_CLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 3, 0x08D4, 0), /* MX6Q_PAD_EIM_EB2__IPU2_CSI1_D_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 4, 0x0890, 0), /* MX6Q_PAD_EIM_EB2__HDMI_TX_DDC_SCL */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_EB2__GPIO_2_30 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 6, 0x08A0, 0), /* MX6Q_PAD_EIM_EB2__I2C2_SCL */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB2, 0x03A0, 0x008C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_EB2__SRC_BT_CFG_30 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D16__WEIM_WEIM_D_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 1, 0x07F4, 0), /* MX6Q_PAD_EIM_D16__ECSPI1_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D16__IPU1_DI0_PIN5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 3, 0x08D0, 0), /* MX6Q_PAD_EIM_D16__IPU2_CSI1_D_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 4, 0x0894, 0), /* MX6Q_PAD_EIM_D16__HDMI_TX_DDC_SDA */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D16__GPIO_3_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D16, 0x03A4, 0x0090, 6, 0x08A4, 0), /* MX6Q_PAD_EIM_D16__I2C2_SDA */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D17__WEIM_WEIM_D_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 1, 0x07F8, 0), /* MX6Q_PAD_EIM_D17__ECSPI1_MISO */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D17__IPU1_DI0_PIN6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 3, 0x08E0, 0), /* MX6Q_PAD_EIM_D17__IPU2_CSI1_PIXCLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D17__DCIC1_DCIC_OUT */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D17__GPIO_3_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 6, 0x08A8, 0), /* MX6Q_PAD_EIM_D17__I2C3_SCL */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D17, 0x03A8, 0x0094, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D17__PL301_PER1_HBURST_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D18__WEIM_WEIM_D_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 1, 0x07FC, 0), /* MX6Q_PAD_EIM_D18__ECSPI1_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D18__IPU1_DI0_PIN7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 3, 0x08CC, 0), /* MX6Q_PAD_EIM_D18__IPU2_CSI1_D_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D18__IPU1_DI1_D0_CS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D18__GPIO_3_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 6, 0x08AC, 0), /* MX6Q_PAD_EIM_D18__I2C3_SDA */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D18, 0x03AC, 0x0098, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D18__PL301_PER1_HBURST_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D19__WEIM_WEIM_D_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 1, 0x0804, 0), /* MX6Q_PAD_EIM_D19__ECSPI1_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D19__IPU1_DI0_PIN8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 3, 0x08C8, 0), /* MX6Q_PAD_EIM_D19__IPU2_CSI1_D_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 4, 0x091C, 0), /* MX6Q_PAD_EIM_D19__UART1_CTS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D19__EPIT1_EPITO */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D19, 0x03B0, 0x009C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D19__PL301_PER1_HRESP */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D20__WEIM_WEIM_D_20 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 1, 0x0824, 0), /* MX6Q_PAD_EIM_D20__ECSPI4_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D20__IPU1_DI0_PIN16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 3, 0x08C4, 0), /* MX6Q_PAD_EIM_D20__IPU2_CSI1_D_15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 4, 0x091C, 1), /* MX6Q_PAD_EIM_D20__UART1_RTS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D20__GPIO_3_20 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D20, 0x03B4, 0x00A0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D20__EPIT2_EPITO */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D21__WEIM_WEIM_D_21 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D21__ECSPI4_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D21__IPU1_DI0_PIN17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 3, 0x08B4, 0), /* MX6Q_PAD_EIM_D21__IPU2_CSI1_D_11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 4, 0x0944, 0), /* MX6Q_PAD_EIM_D21__USBOH3_USBOTG_OC */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D21__GPIO_3_21 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 6, 0x0898, 0), /* MX6Q_PAD_EIM_D21__I2C1_SCL */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D21, 0x03B8, 0x00A4, 7, 0x0914, 0), /* MX6Q_PAD_EIM_D21__SPDIF_IN1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D22__WEIM_WEIM_D_22 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D22__ECSPI4_MISO */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D22__IPU1_DI0_PIN1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 3, 0x08B0, 0), /* MX6Q_PAD_EIM_D22__IPU2_CSI1_D_10 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D22__USBOH3_USBOTG_PWR */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D22__SPDIF_OUT1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D22, 0x03BC, 0x00A8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D22__PL301_PER1_HWRITE */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D23__WEIM_WEIM_D_23 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D23__IPU1_DI0_D0_CS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 2, 0x092C, 0), /* MX6Q_PAD_EIM_D23__UART3_CTS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 3, 0x0000, 0), /* MX6Q_PAD_EIM_D23__UART1_DCD */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 4, 0x08D8, 0), /* MX6Q_PAD_EIM_D23__IPU2_CSI1_DATA_EN */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D23__GPIO_3_23 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D23__IPU1_DI1_PIN2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D23, 0x03C0, 0x00AC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D23__IPU1_DI1_PIN14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__WEIM_WEIM_EB_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__ECSPI4_RDY */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 2, 0x092C, 1), /* MX6Q_PAD_EIM_EB3__UART3_RTS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 3, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__UART1_RI */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 4, 0x08DC, 0), /* MX6Q_PAD_EIM_EB3__IPU2_CSI1_HSYNC */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__GPIO_2_31 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__IPU1_DI1_PIN3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB3, 0x03C4, 0x00B0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_EB3__SRC_BT_CFG_31 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D24__WEIM_WEIM_D_24 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D24__ECSPI4_SS2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D24__UART3_TXD */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 3, 0x0808, 0), /* MX6Q_PAD_EIM_D24__ECSPI1_SS2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D24__ECSPI2_SS2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D24__GPIO_3_24 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 6, 0x07D8, 0), /* MX6Q_PAD_EIM_D24__AUDMUX_AUD5_RXFS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D24, 0x03C8, 0x00B4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D24__UART1_DTR */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D25__WEIM_WEIM_D_25 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D25__ECSPI4_SS3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 2, 0x0930, 1), /* MX6Q_PAD_EIM_D25__UART3_RXD */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 3, 0x080C, 0), /* MX6Q_PAD_EIM_D25__ECSPI1_SS3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D25__ECSPI2_SS3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D25__GPIO_3_25 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 6, 0x07D4, 0), /* MX6Q_PAD_EIM_D25__AUDMUX_AUD5_RXC */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D25, 0x03CC, 0x00B8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D25__UART1_DSR */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D26__WEIM_WEIM_D_26 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D26__IPU1_DI1_PIN11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D26__IPU1_CSI0_D_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 3, 0x08C0, 0), /* MX6Q_PAD_EIM_D26__IPU2_CSI1_D_14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 4, 0x0000, 0), /* MX6Q_PAD_EIM_D26__UART2_TXD */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D26__GPIO_3_26 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D26__IPU1_SISG_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D26, 0x03D0, 0x00BC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D26__IPU1_DISP1_DAT_22 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D27__WEIM_WEIM_D_27 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D27__IPU1_DI1_PIN13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D27__IPU1_CSI0_D_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 3, 0x08BC, 0), /* MX6Q_PAD_EIM_D27__IPU2_CSI1_D_13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 4, 0x0928, 1), /* MX6Q_PAD_EIM_D27__UART2_RXD */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D27__GPIO_3_27 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D27__IPU1_SISG_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D27, 0x03D4, 0x00C0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D27__IPU1_DISP1_DAT_23 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D28__WEIM_WEIM_D_28 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 1, 0x089C, 0), /* MX6Q_PAD_EIM_D28__I2C1_SDA */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D28__ECSPI4_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 3, 0x08B8, 0), /* MX6Q_PAD_EIM_D28__IPU2_CSI1_D_12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 4, 0x0924, 0), /* MX6Q_PAD_EIM_D28__UART2_CTS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D28__GPIO_3_28 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D28__IPU1_EXT_TRIG */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D28, 0x03D8, 0x00C4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D28__IPU1_DI0_PIN13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D29__WEIM_WEIM_D_29 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D29__IPU1_DI1_PIN15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 2, 0x0824, 1), /* MX6Q_PAD_EIM_D29__ECSPI4_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 4, 0x0924, 1), /* MX6Q_PAD_EIM_D29__UART2_RTS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D29__GPIO_3_29 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 6, 0x08E4, 0), /* MX6Q_PAD_EIM_D29__IPU2_CSI1_VSYNC */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D29, 0x03DC, 0x00C8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D29__IPU1_DI0_PIN14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D30__WEIM_WEIM_D_30 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D30__IPU1_DISP1_DAT_21 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D30__IPU1_DI0_PIN11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 3, 0x0000, 0), /* MX6Q_PAD_EIM_D30__IPU1_CSI0_D_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 4, 0x092C, 2), /* MX6Q_PAD_EIM_D30__UART3_CTS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D30__GPIO_3_30 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 6, 0x0948, 0), /* MX6Q_PAD_EIM_D30__USBOH3_USBH1_OC */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D30, 0x03E0, 0x00CC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D30__PL301_PER1_HPROT_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_D31__WEIM_WEIM_D_31 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_D31__IPU1_DISP1_DAT_20 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 2, 0x0000, 0), /* MX6Q_PAD_EIM_D31__IPU1_DI0_PIN12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 3, 0x0000, 0), /* MX6Q_PAD_EIM_D31__IPU1_CSI0_D_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 4, 0x092C, 3), /* MX6Q_PAD_EIM_D31__UART3_RTS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_D31__GPIO_3_31 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_D31__USBOH3_USBH1_PWR */
+	IMX_PIN_REG(MX6Q_PAD_EIM_D31, 0x03E4, 0x00D0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_D31__PL301_PER1_HPROT_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A24__WEIM_WEIM_A_24 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A24__IPU1_DISP1_DAT_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 2, 0x08D4, 1), /* MX6Q_PAD_EIM_A24__IPU2_CSI1_D_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A24__IPU2_SISG_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A24__IPU1_SISG_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A24__GPIO_5_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A24__PL301_PER1_HPROT_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A24, 0x03E8, 0x00D4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A24__SRC_BT_CFG_24 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A23__WEIM_WEIM_A_23 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A23__IPU1_DISP1_DAT_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 2, 0x08D0, 1), /* MX6Q_PAD_EIM_A23__IPU2_CSI1_D_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A23__IPU2_SISG_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A23__IPU1_SISG_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A23__GPIO_6_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A23__PL301_PER1_HPROT_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A23, 0x03EC, 0x00D8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A23__SRC_BT_CFG_23 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A22__WEIM_WEIM_A_22 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A22__IPU1_DISP1_DAT_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 2, 0x08CC, 1), /* MX6Q_PAD_EIM_A22__IPU2_CSI1_D_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A22__GPIO_2_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A22__TPSMP_HDATA_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A22, 0x03F0, 0x00DC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A22__SRC_BT_CFG_22 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A21__WEIM_WEIM_A_21 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A21__IPU1_DISP1_DAT_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 2, 0x08C8, 1), /* MX6Q_PAD_EIM_A21__IPU2_CSI1_D_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A21__RESERVED_RESERVED */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A21__MIPI_CORE_DPHY_OUT_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A21__GPIO_2_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A21__TPSMP_HDATA_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A21, 0x03F4, 0x00E0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A21__SRC_BT_CFG_21 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A20__WEIM_WEIM_A_20 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A20__IPU1_DISP1_DAT_15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 2, 0x08C4, 1), /* MX6Q_PAD_EIM_A20__IPU2_CSI1_D_15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A20__RESERVED_RESERVED */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A20__MIPI_CORE_DPHY_OUT_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A20__GPIO_2_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A20__TPSMP_HDATA_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A20, 0x03F8, 0x00E4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A20__SRC_BT_CFG_20 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A19__WEIM_WEIM_A_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A19__IPU1_DISP1_DAT_14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 2, 0x08C0, 1), /* MX6Q_PAD_EIM_A19__IPU2_CSI1_D_14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A19__RESERVED_RESERVED */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A19__MIPI_CORE_DPHY_OUT_20 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A19__GPIO_2_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A19__TPSMP_HDATA_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A19, 0x03FC, 0x00E8, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A19__SRC_BT_CFG_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A18__WEIM_WEIM_A_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A18__IPU1_DISP1_DAT_13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 2, 0x08BC, 1), /* MX6Q_PAD_EIM_A18__IPU2_CSI1_D_13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A18__RESERVED_RESERVED */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A18__MIPI_CORE_DPHY_OUT_21 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A18__GPIO_2_20 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A18__TPSMP_HDATA_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A18, 0x0400, 0x00EC, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A18__SRC_BT_CFG_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A17__WEIM_WEIM_A_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A17__IPU1_DISP1_DAT_12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 2, 0x08B8, 1), /* MX6Q_PAD_EIM_A17__IPU2_CSI1_D_12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 3, 0x0000, 0), /* MX6Q_PAD_EIM_A17__RESERVED_RESERVED */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A17__MIPI_CORE_DPHY_OUT_22 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A17__GPIO_2_21 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A17__TPSMP_HDATA_5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A17, 0x0404, 0x00F0, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A17__SRC_BT_CFG_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 0, 0x0000, 0), /* MX6Q_PAD_EIM_A16__WEIM_WEIM_A_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 1, 0x0000, 0), /* MX6Q_PAD_EIM_A16__IPU1_DI1_DISP_CLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 2, 0x08E0, 1), /* MX6Q_PAD_EIM_A16__IPU2_CSI1_PIXCLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 4, 0x0000, 0), /* MX6Q_PAD_EIM_A16__MIPI_CORE_DPHY_OUT_23 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 5, 0x0000, 0), /* MX6Q_PAD_EIM_A16__GPIO_2_22 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 6, 0x0000, 0), /* MX6Q_PAD_EIM_A16__TPSMP_HDATA_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_A16, 0x0408, 0x00F4, 7, 0x0000, 0), /* MX6Q_PAD_EIM_A16__SRC_BT_CFG_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 0, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__WEIM_WEIM_CS_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 1, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__IPU1_DI1_PIN5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 2, 0x0810, 0), /* MX6Q_PAD_EIM_CS0__ECSPI2_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 4, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__MIPI_CORE_DPHY_OUT_24 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 5, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__GPIO_2_23 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS0, 0x040C, 0x00F8, 6, 0x0000, 0), /* MX6Q_PAD_EIM_CS0__TPSMP_HDATA_7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 0, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__WEIM_WEIM_CS_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 1, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__IPU1_DI1_PIN6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 2, 0x0818, 0), /* MX6Q_PAD_EIM_CS1__ECSPI2_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 4, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__MIPI_CORE_DPHY_OUT_25 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 5, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__GPIO_2_24 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_CS1, 0x0410, 0x00FC, 6, 0x0000, 0), /* MX6Q_PAD_EIM_CS1__TPSMP_HDATA_8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 0, 0x0000, 0), /* MX6Q_PAD_EIM_OE__WEIM_WEIM_OE */
+	IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 1, 0x0000, 0), /* MX6Q_PAD_EIM_OE__IPU1_DI1_PIN7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 2, 0x0814, 0), /* MX6Q_PAD_EIM_OE__ECSPI2_MISO */
+	IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 4, 0x0000, 0), /* MX6Q_PAD_EIM_OE__MIPI_CORE_DPHY_OUT_26 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 5, 0x0000, 0), /* MX6Q_PAD_EIM_OE__GPIO_2_25 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_OE, 0x0414, 0x0100, 6, 0x0000, 0), /* MX6Q_PAD_EIM_OE__TPSMP_HDATA_9 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 0, 0x0000, 0), /* MX6Q_PAD_EIM_RW__WEIM_WEIM_RW */
+	IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 1, 0x0000, 0), /* MX6Q_PAD_EIM_RW__IPU1_DI1_PIN8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 2, 0x081C, 0), /* MX6Q_PAD_EIM_RW__ECSPI2_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 4, 0x0000, 0), /* MX6Q_PAD_EIM_RW__MIPI_CORE_DPHY_OUT_27 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 5, 0x0000, 0), /* MX6Q_PAD_EIM_RW__GPIO_2_26 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 6, 0x0000, 0), /* MX6Q_PAD_EIM_RW__TPSMP_HDATA_10 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_RW, 0x0418, 0x0104, 7, 0x0000, 0), /* MX6Q_PAD_EIM_RW__SRC_BT_CFG_29 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 0, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__WEIM_WEIM_LBA */
+	IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 1, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__IPU1_DI1_PIN17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 2, 0x0820, 0), /* MX6Q_PAD_EIM_LBA__ECSPI2_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 5, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__GPIO_2_27 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 6, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__TPSMP_HDATA_11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_LBA, 0x041C, 0x0108, 7, 0x0000, 0), /* MX6Q_PAD_EIM_LBA__SRC_BT_CFG_26 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__WEIM_WEIM_EB_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__IPU1_DISP1_DAT_11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 2, 0x08B4, 1), /* MX6Q_PAD_EIM_EB0__IPU2_CSI1_D_11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__MIPI_CORE_DPHY_OUT_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 4, 0x07F0, 0), /* MX6Q_PAD_EIM_EB0__CCM_PMIC_RDY */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__GPIO_2_28 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__TPSMP_HDATA_12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB0, 0x0420, 0x010C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_EB0__SRC_BT_CFG_27 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 0, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__WEIM_WEIM_EB_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 1, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__IPU1_DISP1_DAT_10 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 2, 0x08B0, 1), /* MX6Q_PAD_EIM_EB1__IPU2_CSI1_D_10 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 3, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__MIPI_CORE_DPHY__OUT_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 5, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__GPIO_2_29 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 6, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__TPSMP_HDATA_13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_EB1, 0x0424, 0x0110, 7, 0x0000, 0), /* MX6Q_PAD_EIM_EB1__SRC_BT_CFG_28 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__WEIM_WEIM_DA_A_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__IPU1_DISP1_DAT_9 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__IPU2_CSI1_D_9 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__MIPI_CORE_DPHY__OUT_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__GPIO_3_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__TPSMP_HDATA_14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA0, 0x0428, 0x0114, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA0__SRC_BT_CFG_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__WEIM_WEIM_DA_A_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__IPU1_DISP1_DAT_8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__IPU2_CSI1_D_8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__MIPI_CORE_DPHY_OUT_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__USBPHY1_TX_LS_MODE */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__GPIO_3_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__TPSMP_HDATA_15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA1, 0x042C, 0x0118, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA1__SRC_BT_CFG_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__WEIM_WEIM_DA_A_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__IPU1_DISP1_DAT_7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__IPU2_CSI1_D_7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__MIPI_CORE_DPHY_OUT_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__USBPHY1_TX_HS_MODE */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__GPIO_3_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__TPSMP_HDATA_16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA2, 0x0430, 0x011C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA2__SRC_BT_CFG_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__WEIM_WEIM_DA_A_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__IPU1_DISP1_DAT_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__IPU2_CSI1_D_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__MIPI_CORE_DPHY_OUT_5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__USBPHY1_TX_HIZ */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__GPIO_3_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__TPSMP_HDATA_17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA3, 0x0434, 0x0120, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA3__SRC_BT_CFG_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__WEIM_WEIM_DA_A_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__IPU1_DISP1_DAT_5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__IPU2_CSI1_D_5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__MIPI_CORE_DPHY_OUT_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__ANATOP_USBPHY1_TX_EN */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__GPIO_3_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__TPSMP_HDATA_18 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA4, 0x0438, 0x0124, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA4__SRC_BT_CFG_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__WEIM_WEIM_DA_A_5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__IPU1_DISP1_DAT_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__IPU2_CSI1_D_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__MIPI_CORE_DPHY_OUT_7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__ANATOP_USBPHY1_TX_DP */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__GPIO_3_5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__TPSMP_HDATA_19 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA5, 0x043C, 0x0128, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA5__SRC_BT_CFG_5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__WEIM_WEIM_DA_A_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__IPU1_DISP1_DAT_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__IPU2_CSI1_D_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__MIPI_CORE_DPHY_OUT_8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__ANATOP_USBPHY1_TX_DN */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__GPIO_3_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__TPSMP_HDATA_20 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA6, 0x0440, 0x012C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA6__SRC_BT_CFG_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__WEIM_WEIM_DA_A_7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__IPU1_DISP1_DAT_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__IPU2_CSI1_D_2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__MIPI_CORE_DPHY_OUT_9 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__GPIO_3_7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__TPSMP_HDATA_21 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA7, 0x0444, 0x0130, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA7__SRC_BT_CFG_7 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__WEIM_WEIM_DA_A_8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__IPU1_DISP1_DAT_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__IPU2_CSI1_D_1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__MIPI_CORE_DPHY_OUT_10 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__GPIO_3_8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__TPSMP_HDATA_22 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA8, 0x0448, 0x0134, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA8__SRC_BT_CFG_8 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__WEIM_WEIM_DA_A_9 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__IPU1_DISP1_DAT_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__IPU2_CSI1_D_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__MIPI_CORE_DPHY_OUT_11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__GPIO_3_9 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__TPSMP_HDATA_23 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA9, 0x044C, 0x0138, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA9__SRC_BT_CFG_9 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__WEIM_WEIM_DA_A_10 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__IPU1_DI1_PIN15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 2, 0x08D8, 1), /* MX6Q_PAD_EIM_DA10__IPU2_CSI1_DATA_EN */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__MIPI_CORE_DPHY_OUT12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__GPIO_3_10 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__TPSMP_HDATA_24 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA10, 0x0450, 0x013C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA10__SRC_BT_CFG_10 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__WEIM_WEIM_DA_A_11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__IPU1_DI1_PIN2 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 2, 0x08DC, 1), /* MX6Q_PAD_EIM_DA11__IPU2_CSI1_HSYNC */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__MIPI_CORE_DPHY_OUT13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__SDMA_DBG_EVT_CHN_6 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__GPIO_3_11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__TPSMP_HDATA_25 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA11, 0x0454, 0x0140, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA11__SRC_BT_CFG_11 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__WEIM_WEIM_DA_A_12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__IPU1_DI1_PIN3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 2, 0x08E4, 1), /* MX6Q_PAD_EIM_DA12__IPU2_CSI1_VSYNC */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__MIPI_CORE_DPHY_OUT14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__SDMA_DEBUG_EVT_CHN_3 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__GPIO_3_12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__TPSMP_HDATA_26 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA12, 0x0458, 0x0144, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA12__SRC_BT_CFG_12 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__WEIM_WEIM_DA_A_13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__IPU1_DI1_D0_CS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 2, 0x07EC, 1), /* MX6Q_PAD_EIM_DA13__CCM_DI1_EXT_CLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__MIPI_CORE_DPHY_OUT15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__SDMA_DEBUG_EVT_CHN_4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__GPIO_3_13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__TPSMP_HDATA_27 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA13, 0x045C, 0x0148, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA13__SRC_BT_CFG_13 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__WEIM_WEIM_DA_A_14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__IPU1_DI1_D1_CS */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__CCM_DI0_EXT_CLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__MIPI_CORE_DPHY_OUT16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 4, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__SDMA_DEBUG_EVT_CHN_5 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__GPIO_3_14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__TPSMP_HDATA_28 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA14, 0x0460, 0x014C, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA14__SRC_BT_CFG_14 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 0, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__WEIM_WEIM_DA_A_15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 1, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__IPU1_DI1_PIN1 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 2, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__IPU1_DI1_PIN4 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 3, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__MIPI_CORE_DPHY_OUT17 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 5, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__GPIO_3_15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 6, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__TPSMP_HDATA_29 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_DA15, 0x0464, 0x0150, 7, 0x0000, 0), /* MX6Q_PAD_EIM_DA15__SRC_BT_CFG_15 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 0, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__WEIM_WEIM_WAIT */
+	IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 1, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__WEIM_WEIM_DTACK_B */
+	IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 5, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__GPIO_5_0 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 6, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__TPSMP_HDATA_30 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_WAIT, 0x0468, 0x0154, 7, 0x0000, 0), /* MX6Q_PAD_EIM_WAIT__SRC_BT_CFG_25 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_BCLK, 0x046C, 0x0158, 0, 0x0000, 0), /* MX6Q_PAD_EIM_BCLK__WEIM_WEIM_BCLK */
+	IMX_PIN_REG(MX6Q_PAD_EIM_BCLK, 0x046C, 0x0158, 1, 0x0000, 0), /* MX6Q_PAD_EIM_BCLK__IPU1_DI1_PIN16 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_BCLK, 0x046C, 0x0158, 5, 0x0000, 0), /* MX6Q_PAD_EIM_BCLK__GPIO_6_31 */
+	IMX_PIN_REG(MX6Q_PAD_EIM_BCLK, 0x046C, 0x0158, 6, 0x0000, 0), /* MX6Q_PAD_EIM_BCLK__TPSMP_HDATA_31 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 0, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__IPU1_DI0_DSP_CLK */
+	IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 1, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__IPU2_DI0_DSP_CLK */
+	IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 3, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__MIPI_CR_DPY_OT28 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 4, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__SDMA_DBG_CR_STA0 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 5, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__GPIO_4_16 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_DISP_CLK, 0x0470, 0x015C, 6, 0x0000, 0), /* MX6Q_PAD_DI0_DISP_CLK__MMDC_DEBUG_0 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 0, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__IPU1_DI0_PIN15 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 1, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__IPU2_DI0_PIN15 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 2, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__AUDMUX_AUD6_TXC */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 3, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__MIPI_CR_DPHY_OUT_29 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 4, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__SDMA_DBG_CORE_STA_1 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 5, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__GPIO_4_17 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN15, 0x0474, 0x0160, 6, 0x0000, 0), /* MX6Q_PAD_DI0_PIN15__MMDC_MMDC_DEBUG_1 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 0, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__IPU1_DI0_PIN2 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 1, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__IPU2_DI0_PIN2 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 2, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__AUDMUX_AUD6_TXD */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 3, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__MIPI_CR_DPHY_OUT_30 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 4, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__SDMA_DBG_CORE_STA_2 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 5, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__GPIO_4_18 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 6, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__MMDC_DEBUG_2 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN2, 0x0478, 0x0164, 7, 0x0000, 0), /* MX6Q_PAD_DI0_PIN2__PL301_PER1_HADDR_9 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 0, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__IPU1_DI0_PIN3 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 1, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__IPU2_DI0_PIN3 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 2, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__AUDMUX_AUD6_TXFS */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 3, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__MIPI_CORE_DPHY_OUT31 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 4, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__SDMA_DBG_CORE_STA_3 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 5, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__GPIO_4_19 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 6, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__MMDC_MMDC_DEBUG_3 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN3, 0x047C, 0x0168, 7, 0x0000, 0), /* MX6Q_PAD_DI0_PIN3__PL301_PER1_HADDR_10 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 0, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__IPU1_DI0_PIN4 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 1, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__IPU2_DI0_PIN4 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 2, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__AUDMUX_AUD6_RXD */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 3, 0x094C, 0), /* MX6Q_PAD_DI0_PIN4__USDHC1_WP */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 4, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__SDMA_DEBUG_YIELD */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 5, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__GPIO_4_20 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 6, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__MMDC_MMDC_DEBUG_4 */
+	IMX_PIN_REG(MX6Q_PAD_DI0_PIN4, 0x0480, 0x016C, 7, 0x0000, 0), /* MX6Q_PAD_DI0_PIN4__PL301_PER1_HADDR_11 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__IPU1_DISP0_DAT_0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__IPU2_DISP0_DAT_0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__ECSPI3_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__USDHC1_USDHC_DBG_0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__SDMA_DBG_CORE_RUN */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__GPIO_4_21 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT0, 0x0484, 0x0170, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT0__MMDC_MMDC_DEBUG_5 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__IPU1_DISP0_DAT_1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__IPU2_DISP0_DAT_1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__ECSPI3_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__USDHC1_USDHC_DBG_1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__SDMA_DBG_EVT_CHNSL */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__GPIO_4_22 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__MMDC_DEBUG_6 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT1, 0x0488, 0x0174, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT1__PL301_PER1_HADR_12 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__IPU1_DISP0_DAT_2 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__IPU2_DISP0_DAT_2 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__ECSPI3_MISO */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__USDHC1_USDHC_DBG_2 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__SDMA_DEBUG_MODE */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__GPIO_4_23 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__MMDC_DEBUG_7 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT2, 0x048C, 0x0178, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT2__PL301_PER1_HADR_13 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__IPU1_DISP0_DAT_3 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__IPU2_DISP0_DAT_3 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__ECSPI3_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__USDHC1_USDHC_DBG_3 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__SDMA_DBG_BUS_ERROR */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__GPIO_4_24 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__MMDC_MMDC_DBG_8 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT3, 0x0490, 0x017C, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT3__PL301_PER1_HADR_14 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__IPU1_DISP0_DAT_4 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__IPU2_DISP0_DAT_4 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__ECSPI3_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__USDHC1_USDHC_DBG_4 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__SDMA_DEBUG_BUS_RWB */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__GPIO_4_25 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__MMDC_MMDC_DEBUG_9 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT4, 0x0494, 0x0180, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT4__PL301_PER1_HADR_15 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__IPU1_DISP0_DAT_5 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__IPU2_DISP0_DAT_5 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__ECSPI3_SS2 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__AUDMUX_AUD6_RXFS */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__SDMA_DBG_MCH_DMBUS */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__GPIO_4_26 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__MMDC_DEBUG_10 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT5, 0x0498, 0x0184, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT5__PL301_PER1_HADR_16 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__IPU1_DISP0_DAT_6 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__IPU2_DISP0_DAT_6 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__ECSPI3_SS3 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__AUDMUX_AUD6_RXC */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__SDMA_DBG_RTBUF_WRT */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__GPIO_4_27 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__MMDC_DEBUG_11 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT6, 0x049C, 0x0188, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT6__PL301_PER1_HADR_17 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__IPU1_DISP0_DAT_7 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__IPU2_DISP0_DAT_7 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__ECSPI3_RDY */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__USDHC1_USDHC_DBG_5 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__SDMA_DBG_EVT_CHN_0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__GPIO_4_28 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__MMDC_DEBUG_12 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT7, 0x04A0, 0x018C, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT7__PL301_PER1_HADR_18 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__IPU1_DISP0_DAT_8 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__IPU2_DISP0_DAT_8 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__PWM1_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__WDOG1_WDOG_B */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__SDMA_DBG_EVT_CHN_1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__GPIO_4_29 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__MMDC_DEBUG_13 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT8, 0x04A4, 0x0190, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT8__PL301_PER1_HADR_19 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__IPU1_DISP0_DAT_9 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__IPU2_DISP0_DAT_9 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 2, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__PWM2_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__WDOG2_WDOG_B */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__SDMA_DBG_EVT_CHN_2 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__GPIO_4_30 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__MMDC_DEBUG_14 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT9, 0x04A8, 0x0194, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT9__PL301_PER1_HADR_20 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__IPU1_DISP0_DAT_10 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__IPU2_DISP0_DAT_10 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__USDHC1_DBG_6 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__SDMA_DBG_EVT_CHN3 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__GPIO_4_31 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__MMDC_DEBUG_15 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT10, 0x04AC, 0x0198, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT10__PL301_PER1_HADR21 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__IPU1_DISP0_DAT_11 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__IPU2_DISP0_DAT_11 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__USDHC1_USDHC_DBG7 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__SDMA_DBG_EVT_CHN4 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__GPIO_5_5 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__MMDC_DEBUG_16 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT11, 0x04B0, 0x019C, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT11__PL301_PER1_HADR22 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__IPU1_DISP0_DAT_12 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__IPU2_DISP0_DAT_12 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 3, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__RESERVED_RESERVED */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__SDMA_DBG_EVT_CHN5 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__GPIO_5_6 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__MMDC_DEBUG_17 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT12, 0x04B4, 0x01A0, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT12__PL301_PER1_HADR23 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__IPU1_DISP0_DAT_13 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__IPU2_DISP0_DAT_13 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 3, 0x07D8, 1), /* MX6Q_PAD_DISP0_DAT13__AUDMUX_AUD5_RXFS */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__SDMA_DBG_EVT_CHN0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__GPIO_5_7 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__MMDC_DEBUG_18 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT13, 0x04B8, 0x01A4, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT13__PL301_PER1_HADR24 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__IPU1_DISP0_DAT_14 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__IPU2_DISP0_DAT_14 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 3, 0x07D4, 1), /* MX6Q_PAD_DISP0_DAT14__AUDMUX_AUD5_RXC */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__SDMA_DBG_EVT_CHN1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__GPIO_5_8 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT14, 0x04BC, 0x01A8, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT14__MMDC_DEBUG_19 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__IPU1_DISP0_DAT_15 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__IPU2_DISP0_DAT_15 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 2, 0x0804, 1), /* MX6Q_PAD_DISP0_DAT15__ECSPI1_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 3, 0x0820, 1), /* MX6Q_PAD_DISP0_DAT15__ECSPI2_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__SDMA_DBG_EVT_CHN2 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__GPIO_5_9 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__MMDC_DEBUG_20 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT15, 0x04C0, 0x01AC, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT15__PL301_PER1_HADR25 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__IPU1_DISP0_DAT_16 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__IPU2_DISP0_DAT_16 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 2, 0x0818, 1), /* MX6Q_PAD_DISP0_DAT16__ECSPI2_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 3, 0x07DC, 0), /* MX6Q_PAD_DISP0_DAT16__AUDMUX_AUD5_TXC */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 4, 0x090C, 0), /* MX6Q_PAD_DISP0_DAT16__SDMA_EXT_EVENT_0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__GPIO_5_10 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__MMDC_DEBUG_21 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT16, 0x04C4, 0x01B0, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT16__PL301_PER1_HADR26 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__IPU1_DISP0_DAT_17 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__IPU2_DISP0_DAT_17 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 2, 0x0814, 1), /* MX6Q_PAD_DISP0_DAT17__ECSPI2_MISO */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 3, 0x07D0, 0), /* MX6Q_PAD_DISP0_DAT17__AUDMUX_AUD5_TXD */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 4, 0x0910, 0), /* MX6Q_PAD_DISP0_DAT17__SDMA_EXT_EVENT_1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__GPIO_5_11 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__MMDC_DEBUG_22 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT17, 0x04C8, 0x01B4, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT17__PL301_PER1_HADR27 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__IPU1_DISP0_DAT_18 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__IPU2_DISP0_DAT_18 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 2, 0x081C, 1), /* MX6Q_PAD_DISP0_DAT18__ECSPI2_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 3, 0x07E0, 0), /* MX6Q_PAD_DISP0_DAT18__AUDMUX_AUD5_TXFS */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 4, 0x07C0, 0), /* MX6Q_PAD_DISP0_DAT18__AUDMUX_AUD4_RXFS */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__GPIO_5_12 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__MMDC_DEBUG_23 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT18, 0x04CC, 0x01B8, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT18__WEIM_WEIM_CS_2 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__IPU1_DISP0_DAT_19 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__IPU2_DISP0_DAT_19 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 2, 0x0810, 1), /* MX6Q_PAD_DISP0_DAT19__ECSPI2_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 3, 0x07CC, 0), /* MX6Q_PAD_DISP0_DAT19__AUDMUX_AUD5_RXD */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 4, 0x07BC, 0), /* MX6Q_PAD_DISP0_DAT19__AUDMUX_AUD4_RXC */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__GPIO_5_13 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__MMDC_DEBUG_24 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT19, 0x04D0, 0x01BC, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT19__WEIM_WEIM_CS_3 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__IPU1_DISP0_DAT_20 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__IPU2_DISP0_DAT_20 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 2, 0x07F4, 1), /* MX6Q_PAD_DISP0_DAT20__ECSPI1_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 3, 0x07C4, 0), /* MX6Q_PAD_DISP0_DAT20__AUDMUX_AUD4_TXC */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__SDMA_DBG_EVT_CHN7 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__GPIO_5_14 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__MMDC_DEBUG_25 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT20, 0x04D4, 0x01C0, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT20__PL301_PER1_HADR28 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__IPU1_DISP0_DAT_21 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__IPU2_DISP0_DAT_21 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 2, 0x07FC, 1), /* MX6Q_PAD_DISP0_DAT21__ECSPI1_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 3, 0x07B8, 1), /* MX6Q_PAD_DISP0_DAT21__AUDMUX_AUD4_TXD */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__SDMA_DBG_BUS_DEV0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__GPIO_5_15 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__MMDC_DEBUG_26 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT21, 0x04D8, 0x01C4, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT21__PL301_PER1_HADR29 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__IPU1_DISP0_DAT_22 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__IPU2_DISP0_DAT_22 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 2, 0x07F8, 1), /* MX6Q_PAD_DISP0_DAT22__ECSPI1_MISO */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 3, 0x07C8, 1), /* MX6Q_PAD_DISP0_DAT22__AUDMUX_AUD4_TXFS */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__SDMA_DBG_BUS_DEV1 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__GPIO_5_16 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__MMDC_DEBUG_27 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT22, 0x04DC, 0x01C8, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT22__PL301_PER1_HADR30 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 0, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__IPU1_DISP0_DAT_23 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 1, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__IPU2_DISP0_DAT_23 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 2, 0x0800, 1), /* MX6Q_PAD_DISP0_DAT23__ECSPI1_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 3, 0x07B4, 1), /* MX6Q_PAD_DISP0_DAT23__AUDMUX_AUD4_RXD */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 4, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__SDMA_DBG_BUS_DEV2 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 5, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__GPIO_5_17 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 6, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__MMDC_DEBUG_28 */
+	IMX_PIN_REG(MX6Q_PAD_DISP0_DAT23, 0x04E0, 0x01CC, 7, 0x0000, 0), /* MX6Q_PAD_DISP0_DAT23__PL301_PER1_HADR31 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 0, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__RESERVED_RESERVED */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 1, 0x0840, 0), /* MX6Q_PAD_ENET_MDIO__ENET_MDIO */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 2, 0x086C, 0), /* MX6Q_PAD_ENET_MDIO__ESAI1_SCKR */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 3, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__SDMA_DEBUG_BUS_DEV3 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 4, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__ENET_1588_EVT1_OUT */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 5, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__GPIO_1_22 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDIO, 0x04E4, 0x01D0, 6, 0x0000, 0), /* MX6Q_PAD_ENET_MDIO__SPDIF_PLOCK */
+	IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 0, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__RESERVED_RSRVED */
+	IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 1, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__ENET_TX_CLK */
+	IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 2, 0x085C, 0), /* MX6Q_PAD_ENET_REF_CLK__ESAI1_FSR */
+	IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 3, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__SDMA_DBGBUS_DEV4 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 5, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__GPIO_1_23 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 6, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__SPDIF_SRCLK */
+	IMX_PIN_REG(MX6Q_PAD_ENET_REF_CLK, 0x04E8, 0x01D4, 7, 0x0000, 0), /* MX6Q_PAD_ENET_REF_CLK__USBPHY1_RX_SQH */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 1, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ENET_RX_ER */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 2, 0x0864, 0), /* MX6Q_PAD_ENET_RX_ER__ESAI1_HCKR */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 3, 0x0914, 1), /* MX6Q_PAD_ENET_RX_ER__SPDIF_IN1 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 4, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ENET_1588_EVT2_OUT */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 5, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__GPIO_1_24 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 6, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__PHY_TDI */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 7, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__USBPHY1_RX_HS_RXD */
+	IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 0, 0x0000, 0), /* MX6Q_PAD_ENET_CRS_DV__RESERVED_RSRVED */
+	IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 1, 0x0858, 1), /* MX6Q_PAD_ENET_CRS_DV__ENET_RX_EN */
+	IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 2, 0x0870, 0), /* MX6Q_PAD_ENET_CRS_DV__ESAI1_SCKT */
+	IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 3, 0x0918, 1), /* MX6Q_PAD_ENET_CRS_DV__SPDIF_EXTCLK */
+	IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 5, 0x0000, 0), /* MX6Q_PAD_ENET_CRS_DV__GPIO_1_25 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 6, 0x0000, 0), /* MX6Q_PAD_ENET_CRS_DV__PHY_TDO */
+	IMX_PIN_REG(MX6Q_PAD_ENET_CRS_DV, 0x04F0, 0x01DC, 7, 0x0000, 0), /* MX6Q_PAD_ENET_CRS_DV__USBPHY1_RX_FS_RXD */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 0, 0x0908, 0), /* MX6Q_PAD_ENET_RXD1__MLB_MLBSIG */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 1, 0x084C, 1), /* MX6Q_PAD_ENET_RXD1__ENET_RDATA_1 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 2, 0x0860, 0), /* MX6Q_PAD_ENET_RXD1__ESAI1_FST */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 4, 0x0000, 0), /* MX6Q_PAD_ENET_RXD1__ENET_1588_EVT3_OUT */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 5, 0x0000, 0), /* MX6Q_PAD_ENET_RXD1__GPIO_1_26 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 6, 0x0000, 0), /* MX6Q_PAD_ENET_RXD1__PHY_TCK */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD1, 0x04F4, 0x01E0, 7, 0x0000, 0), /* MX6Q_PAD_ENET_RXD1__USBPHY1_RX_DISCON */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__OSC32K_32K_OUT */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 1, 0x0848, 1), /* MX6Q_PAD_ENET_RXD0__ENET_RDATA_0 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 2, 0x0868, 0), /* MX6Q_PAD_ENET_RXD0__ESAI1_HCKT */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 3, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__SPDIF_OUT1 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 5, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__GPIO_1_27 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 6, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__PHY_TMS */
+	IMX_PIN_REG(MX6Q_PAD_ENET_RXD0, 0x04F8, 0x01E4, 7, 0x0000, 0), /* MX6Q_PAD_ENET_RXD0__USBPHY1_PLL_CK20DIV */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__RESERVED_RSRVED */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 1, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__ENET_TX_EN */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 2, 0x0880, 0), /* MX6Q_PAD_ENET_TX_EN__ESAI1_TX3_RX2 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 5, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__GPIO_1_28 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 6, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__SATA_PHY_TDI */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TX_EN, 0x04FC, 0x01E8, 7, 0x0000, 0), /* MX6Q_PAD_ENET_TX_EN__USBPHY2_RX_SQH */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 0, 0x0900, 0), /* MX6Q_PAD_ENET_TXD1__MLB_MLBCLK */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 1, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__ENET_TDATA_1 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 2, 0x087C, 0), /* MX6Q_PAD_ENET_TXD1__ESAI1_TX2_RX3 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 4, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__ENET_1588_EVENT0_IN */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 5, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__GPIO_1_29 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 6, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__SATA_PHY_TDO */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD1, 0x0500, 0x01EC, 7, 0x0000, 0), /* MX6Q_PAD_ENET_TXD1__USBPHY2_RX_HS_RXD */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 0, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__RESERVED_RSRVED */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 1, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__ENET_TDATA_0 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 2, 0x0884, 0), /* MX6Q_PAD_ENET_TXD0__ESAI1_TX4_RX1 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 5, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__GPIO_1_30 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 6, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__SATA_PHY_TCK */
+	IMX_PIN_REG(MX6Q_PAD_ENET_TXD0, 0x0504, 0x01F0, 7, 0x0000, 0), /* MX6Q_PAD_ENET_TXD0__USBPHY2_RX_FS_RXD */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 0, 0x0904, 0), /* MX6Q_PAD_ENET_MDC__MLB_MLBDAT */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 1, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__ENET_MDC */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 2, 0x0888, 0), /* MX6Q_PAD_ENET_MDC__ESAI1_TX5_RX0 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 4, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__ENET_1588_EVENT1_IN */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 5, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__GPIO_1_31 */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 6, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__SATA_PHY_TMS */
+	IMX_PIN_REG(MX6Q_PAD_ENET_MDC, 0x0508, 0x01F4, 7, 0x0000, 0), /* MX6Q_PAD_ENET_MDC__USBPHY2_RX_DISCON */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D40, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D40__MMDC_DRAM_D_40 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D41, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D41__MMDC_DRAM_D_41 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D42, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D42__MMDC_DRAM_D_42 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D43, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D43__MMDC_DRAM_D_43 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D44, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D44__MMDC_DRAM_D_44 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D45, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D45__MMDC_DRAM_D_45 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D46, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D46__MMDC_DRAM_D_46 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D47, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D47__MMDC_DRAM_D_47 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS5, 0x050C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS5__MMDC_DRAM_SDQS_5 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_DQM5, 0x0510, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM5__MMDC_DRAM_DQM_5 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D32, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D32__MMDC_DRAM_D_32 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D33, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D33__MMDC_DRAM_D_33 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D34, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D34__MMDC_DRAM_D_34 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D35, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D35__MMDC_DRAM_D_35 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D36, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D36__MMDC_DRAM_D_36 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D37, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D37__MMDC_DRAM_D_37 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D38, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D38__MMDC_DRAM_D_38 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D39, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D39__MMDC_DRAM_D_39 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_DQM4, 0x0514, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM4__MMDC_DRAM_DQM_4 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS4, 0x0518, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS4__MMDC_DRAM_SDQS_4 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D24, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D24__MMDC_DRAM_D_24 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D25, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D25__MMDC_DRAM_D_25 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D26, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D26__MMDC_DRAM_D_26 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D27, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D27__MMDC_DRAM_D_27 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D28, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D28__MMDC_DRAM_D_28 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D29, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D29__MMDC_DRAM_D_29 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS3, 0x051C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS3__MMDC_DRAM_SDQS_3 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D30, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D30__MMDC_DRAM_D_30 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D31, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D31__MMDC_DRAM_D_31 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_DQM3, 0x0520, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM3__MMDC_DRAM_DQM_3 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D16, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D16__MMDC_DRAM_D_16 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D17, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D17__MMDC_DRAM_D_17 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D18, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D18__MMDC_DRAM_D_18 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D19, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D19__MMDC_DRAM_D_19 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D20, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D20__MMDC_DRAM_D_20 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D21, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D21__MMDC_DRAM_D_21 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D22, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D22__MMDC_DRAM_D_22 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS2, 0x0524, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS2__MMDC_DRAM_SDQS_2 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D23, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D23__MMDC_DRAM_D_23 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_DQM2, 0x0528, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM2__MMDC_DRAM_DQM_2 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A0, 0x052C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A0__MMDC_DRAM_A_0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A1, 0x0530, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A1__MMDC_DRAM_A_1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A2, 0x0534, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A2__MMDC_DRAM_A_2 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A3, 0x0538, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A3__MMDC_DRAM_A_3 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A4, 0x053C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A4__MMDC_DRAM_A_4 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A5, 0x0540, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A5__MMDC_DRAM_A_5 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A6, 0x0544, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A6__MMDC_DRAM_A_6 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A7, 0x0548, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A7__MMDC_DRAM_A_7 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A8, 0x054C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A8__MMDC_DRAM_A_8 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A9, 0x0550, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A9__MMDC_DRAM_A_9 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A10, 0x0554, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A10__MMDC_DRAM_A_10 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A11, 0x0558, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A11__MMDC_DRAM_A_11 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A12, 0x055C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A12__MMDC_DRAM_A_12 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A13, 0x0560, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A13__MMDC_DRAM_A_13 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A14, 0x0564, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A14__MMDC_DRAM_A_14 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_A15, 0x0568, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_A15__MMDC_DRAM_A_15 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_CAS, 0x056C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_CAS__MMDC_DRAM_CAS */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_CS0, 0x0570, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_CS0__MMDC_DRAM_CS_0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_CS1, 0x0574, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_CS1__MMDC_DRAM_CS_1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_RAS, 0x0578, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_RAS__MMDC_DRAM_RAS */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_RESET, 0x057C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_RESET__MMDC_DRAM_RESET */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDBA0, 0x0580, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDBA0__MMDC_DRAM_SDBA_0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDBA1, 0x0584, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDBA1__MMDC_DRAM_SDBA_1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDCLK_0, 0x0588, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDCLK_0__MMDC_DRAM_SDCLK0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDBA2, 0x058C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDBA2__MMDC_DRAM_SDBA_2 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDCKE0, 0x0590, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDCKE0__MMDC_DRAM_SDCKE_0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDCLK_1, 0x0594, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDCLK_1__MMDC_DRAM_SDCLK1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDCKE1, 0x0598, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDCKE1__MMDC_DRAM_SDCKE_1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDODT0, 0x059C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDODT0__MMDC_DRAM_ODT_0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDODT1, 0x05A0, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDODT1__MMDC_DRAM_ODT_1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDWE, 0x05A4, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDWE__MMDC_DRAM_SDWE */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D0, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D0__MMDC_DRAM_D_0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D1, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D1__MMDC_DRAM_D_1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D2, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D2__MMDC_DRAM_D_2 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D3, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D3__MMDC_DRAM_D_3 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D4, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D4__MMDC_DRAM_D_4 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D5, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D5__MMDC_DRAM_D_5 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS0, 0x05A8, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS0__MMDC_DRAM_SDQS_0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D6, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D6__MMDC_DRAM_D_6 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D7, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D7__MMDC_DRAM_D_7 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_DQM0, 0x05AC, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM0__MMDC_DRAM_DQM_0 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D8, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D8__MMDC_DRAM_D_8 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D9, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D9__MMDC_DRAM_D_9 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D10, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D10__MMDC_DRAM_D_10 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D11, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D11__MMDC_DRAM_D_11 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D12, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D12__MMDC_DRAM_D_12 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D13, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D13__MMDC_DRAM_D_13 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D14, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D14__MMDC_DRAM_D_14 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS1, 0x05B0, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS1__MMDC_DRAM_SDQS_1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D15, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D15__MMDC_DRAM_D_15 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_DQM1, 0x05B4, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM1__MMDC_DRAM_DQM_1 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D48, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D48__MMDC_DRAM_D_48 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D49, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D49__MMDC_DRAM_D_49 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D50, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D50__MMDC_DRAM_D_50 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D51, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D51__MMDC_DRAM_D_51 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D52, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D52__MMDC_DRAM_D_52 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D53, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D53__MMDC_DRAM_D_53 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D54, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D54__MMDC_DRAM_D_54 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D55, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D55__MMDC_DRAM_D_55 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS6, 0x05B8, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS6__MMDC_DRAM_SDQS_6 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_DQM6, 0x05BC, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM6__MMDC_DRAM_DQM_6 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D56, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D56__MMDC_DRAM_D_56 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_SDQS7, 0x05C0, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_SDQS7__MMDC_DRAM_SDQS_7 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D57, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D57__MMDC_DRAM_D_57 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D58, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D58__MMDC_DRAM_D_58 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D59, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D59__MMDC_DRAM_D_59 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D60, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D60__MMDC_DRAM_D_60 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_DQM7, 0x05C4, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_DQM7__MMDC_DRAM_DQM_7 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D61, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D61__MMDC_DRAM_D_61 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D62, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D62__MMDC_DRAM_D_62 */
+	IMX_PIN_REG(MX6Q_PAD_DRAM_D63, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_DRAM_D63__MMDC_DRAM_D_63 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 0, 0x07F4, 2), /* MX6Q_PAD_KEY_COL0__ECSPI1_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 1, 0x0854, 1), /* MX6Q_PAD_KEY_COL0__ENET_RDATA_3 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 2, 0x07DC, 1), /* MX6Q_PAD_KEY_COL0__AUDMUX_AUD5_TXC */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__KPP_COL_0 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 4, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__UART4_TXD */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__GPIO_4_6 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 6, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__DCIC1_DCIC_OUT */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL0, 0x05C8, 0x01F8, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL0__SRC_ANY_PU_RST */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 0, 0x07FC, 2), /* MX6Q_PAD_KEY_ROW0__ECSPI1_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 1, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__ENET_TDATA_3 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 2, 0x07D0, 1), /* MX6Q_PAD_KEY_ROW0__AUDMUX_AUD5_TXD */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__KPP_ROW_0 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 4, 0x0938, 1), /* MX6Q_PAD_KEY_ROW0__UART4_RXD */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__GPIO_4_7 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 6, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__DCIC2_DCIC_OUT */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW0, 0x05CC, 0x01FC, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW0__PL301_PER1_HADR_0 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 0, 0x07F8, 2), /* MX6Q_PAD_KEY_COL1__ECSPI1_MISO */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 1, 0x0840, 1), /* MX6Q_PAD_KEY_COL1__ENET_MDIO */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 2, 0x07E0, 1), /* MX6Q_PAD_KEY_COL1__AUDMUX_AUD5_TXFS */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__KPP_COL_1 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 4, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__UART5_TXD */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__GPIO_4_8 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 6, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__USDHC1_VSELECT */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL1, 0x05D0, 0x0200, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL1__PL301MX_PER1_HADR_1 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 0, 0x0800, 2), /* MX6Q_PAD_KEY_ROW1__ECSPI1_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 1, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__ENET_COL */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 2, 0x07CC, 1), /* MX6Q_PAD_KEY_ROW1__AUDMUX_AUD5_RXD */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__KPP_ROW_1 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 4, 0x0940, 1), /* MX6Q_PAD_KEY_ROW1__UART5_RXD */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__GPIO_4_9 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 6, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__USDHC2_VSELECT */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW1, 0x05D4, 0x0204, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW1__PL301_PER1_HADDR_2 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 0, 0x0804, 2), /* MX6Q_PAD_KEY_COL2__ECSPI1_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 1, 0x0850, 1), /* MX6Q_PAD_KEY_COL2__ENET_RDATA_2 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 2, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__CAN1_TXCAN */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__KPP_COL_2 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 4, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__ENET_MDC */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__GPIO_4_10 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 6, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__USBOH3_H1_PWRCTL_WKP */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL2, 0x05D8, 0x0208, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL2__PL301_PER1_HADDR_3 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 0, 0x0808, 1), /* MX6Q_PAD_KEY_ROW2__ECSPI1_SS2 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 1, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__ENET_TDATA_2 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 2, 0x07E4, 0), /* MX6Q_PAD_KEY_ROW2__CAN1_RXCAN */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__KPP_ROW_2 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 4, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__USDHC2_VSELECT */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__GPIO_4_11 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 6, 0x088C, 1), /* MX6Q_PAD_KEY_ROW2__HDMI_TX_CEC_LINE */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW2, 0x05DC, 0x020C, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW2__PL301_PER1_HADR_4 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 0, 0x080C, 1), /* MX6Q_PAD_KEY_COL3__ECSPI1_SS3 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 1, 0x0000, 0), /* MX6Q_PAD_KEY_COL3__ENET_CRS */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 2, 0x0890, 1), /* MX6Q_PAD_KEY_COL3__HDMI_TX_DDC_SCL */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL3__KPP_COL_3 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 4, 0x08A0, 1), /* MX6Q_PAD_KEY_COL3__I2C2_SCL */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL3__GPIO_4_12 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 6, 0x0914, 2), /* MX6Q_PAD_KEY_COL3__SPDIF_IN1 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL3, 0x05E0, 0x0210, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL3__PL301_PER1_HADR_5 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 0, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__OSC32K_32K_OUT */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 1, 0x07B0, 0), /* MX6Q_PAD_KEY_ROW3__ASRC_ASRC_EXT_CLK */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 2, 0x0894, 1), /* MX6Q_PAD_KEY_ROW3__HDMI_TX_DDC_SDA */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__KPP_ROW_3 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 4, 0x08A4, 1), /* MX6Q_PAD_KEY_ROW3__I2C2_SDA */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__GPIO_4_13 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 6, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__USDHC1_VSELECT */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW3, 0x05E4, 0x0214, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW3__PL301_PER1_HADR_6 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 0, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__CAN2_TXCAN */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 1, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__IPU1_SISG_4 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 2, 0x0944, 1), /* MX6Q_PAD_KEY_COL4__USBOH3_USBOTG_OC */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 3, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__KPP_COL_4 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 4, 0x093C, 0), /* MX6Q_PAD_KEY_COL4__UART5_RTS */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 5, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__GPIO_4_14 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 6, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__MMDC_DEBUG_49 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_COL4, 0x05E8, 0x0218, 7, 0x0000, 0), /* MX6Q_PAD_KEY_COL4__PL301_PER1_HADDR_7 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 0, 0x07E8, 0), /* MX6Q_PAD_KEY_ROW4__CAN2_RXCAN */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 1, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__IPU1_SISG_5 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 2, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__USBOH3_USBOTG_PWR */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 3, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__KPP_ROW_4 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 4, 0x093C, 1), /* MX6Q_PAD_KEY_ROW4__UART5_CTS */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 5, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__GPIO_4_15 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 6, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__MMDC_DEBUG_50 */
+	IMX_PIN_REG(MX6Q_PAD_KEY_ROW4, 0x05EC, 0x021C, 7, 0x0000, 0), /* MX6Q_PAD_KEY_ROW4__PL301_PER1_HADR_8 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 0, 0x0000, 0), /* MX6Q_PAD_GPIO_0__CCM_CLKO */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 2, 0x08E8, 0), /* MX6Q_PAD_GPIO_0__KPP_COL_5 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 3, 0x07B0, 1), /* MX6Q_PAD_GPIO_0__ASRC_ASRC_EXT_CLK */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_0__EPIT1_EPITO */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_0__GPIO_1_0 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_0__USBOH3_USBH1_PWR */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_0, 0x05F0, 0x0220, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_0__SNVS_HP_WRAP_SNVS_VIO5 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 0, 0x086C, 1), /* MX6Q_PAD_GPIO_1__ESAI1_SCKR */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_1__WDOG2_WDOG_B */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 2, 0x08F4, 0), /* MX6Q_PAD_GPIO_1__KPP_ROW_5 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_1__PWM2_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_1__GPIO_1_1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_1__USDHC1_CD */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_1__SRC_TESTER_ACK */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 0, 0x085C, 1), /* MX6Q_PAD_GPIO_9__ESAI1_FSR */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_9__WDOG1_WDOG_B */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 2, 0x08EC, 0), /* MX6Q_PAD_GPIO_9__KPP_COL_6 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_9__CCM_REF_EN_B */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_9__PWM1_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_9__GPIO_1_9 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 6, 0x094C, 1), /* MX6Q_PAD_GPIO_9__USDHC1_WP */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_9, 0x05F8, 0x0228, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_9__SRC_EARLY_RST */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 0, 0x0864, 1), /* MX6Q_PAD_GPIO_3__ESAI1_HCKR */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_3__OBSERVE_MUX_INT_OUT0 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 2, 0x08A8, 1), /* MX6Q_PAD_GPIO_3__I2C3_SCL */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_3__ANATOP_24M_OUT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_3__CCM_CLKO2 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_3__GPIO_1_3 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 6, 0x0948, 1), /* MX6Q_PAD_GPIO_3__USBOH3_USBH1_OC */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_3, 0x05FC, 0x022C, 7, 0x0900, 1), /* MX6Q_PAD_GPIO_3__MLB_MLBCLK */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 0, 0x0870, 1), /* MX6Q_PAD_GPIO_6__ESAI1_SCKT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_6__OBSERVE_MUX_INT_OUT1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 2, 0x08AC, 1), /* MX6Q_PAD_GPIO_6__I2C3_SDA */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_6__CCM_CCM_OUT_0 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_6__CSU_CSU_INT_DEB */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_6__GPIO_1_6 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_6__USDHC2_LCTL */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_6, 0x0600, 0x0230, 7, 0x0908, 1), /* MX6Q_PAD_GPIO_6__MLB_MLBSIG */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 0, 0x0860, 1), /* MX6Q_PAD_GPIO_2__ESAI1_FST */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_2__OBSERVE_MUX_INT_OUT2 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 2, 0x08F8, 1), /* MX6Q_PAD_GPIO_2__KPP_ROW_6 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_2__CCM_CCM_OUT_1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_2__CSU_CSU_ALARM_AUT_0 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_2__GPIO_1_2 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_2__USDHC2_WP */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_2, 0x0604, 0x0234, 7, 0x0904, 1), /* MX6Q_PAD_GPIO_2__MLB_MLBDAT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 0, 0x0868, 1), /* MX6Q_PAD_GPIO_4__ESAI1_HCKT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_4__OBSERVE_MUX_INT_OUT3 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 2, 0x08F0, 1), /* MX6Q_PAD_GPIO_4__KPP_COL_7 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_4__CCM_CCM_OUT_2 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_4__CSU_CSU_ALARM_AUT_1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_4__GPIO_1_4 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_4__USDHC2_CD */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_4, 0x0608, 0x0238, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_4__OCOTP_CRL_WRAR_FUSE_LA */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 0, 0x087C, 1), /* MX6Q_PAD_GPIO_5__ESAI1_TX2_RX3 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_5__OBSERVE_MUX_INT_OUT4 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 2, 0x08FC, 1), /* MX6Q_PAD_GPIO_5__KPP_ROW_7 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_5__CCM_CLKO */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_5__GPIO_1_5 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 6, 0x08A8, 2), /* MX6Q_PAD_GPIO_5__I2C3_SCL */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_5, 0x060C, 0x023C, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_5__CHEETAH_EVENTI */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 0, 0x0884, 1), /* MX6Q_PAD_GPIO_7__ESAI1_TX4_RX1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_7__ECSPI5_RDY */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 2, 0x0000, 0), /* MX6Q_PAD_GPIO_7__EPIT1_EPITO */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_7__CAN1_TXCAN */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_7__UART2_TXD */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_7__GPIO_1_7 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_7__SPDIF_PLOCK */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_7, 0x0610, 0x0240, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_7__USBOH3_OTGUSB_HST_MODE */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 0, 0x0888, 1), /* MX6Q_PAD_GPIO_8__ESAI1_TX5_RX0 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_8__ANATOP_ANATOP_32K_OUT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 2, 0x0000, 0), /* MX6Q_PAD_GPIO_8__EPIT2_EPITO */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 3, 0x07E4, 1), /* MX6Q_PAD_GPIO_8__CAN1_RXCAN */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 4, 0x0928, 3), /* MX6Q_PAD_GPIO_8__UART2_RXD */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_8__GPIO_1_8 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_8__SPDIF_SRCLK */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_8, 0x0614, 0x0244, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_8__USBOH3_OTG_PWRCTL_WAK */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 0, 0x0880, 1), /* MX6Q_PAD_GPIO_16__ESAI1_TX3_RX2 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_16__ENET_1588_EVENT2_IN */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 2, 0x083C, 1), /* MX6Q_PAD_GPIO_16__ENET_ETHERNET_REF_OUT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_16__USDHC1_LCTL */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 4, 0x0914, 3), /* MX6Q_PAD_GPIO_16__SPDIF_IN1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_16__GPIO_7_11 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 6, 0x08AC, 2), /* MX6Q_PAD_GPIO_16__I2C3_SDA */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_16, 0x0618, 0x0248, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_16__SJC_DE_B */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 0, 0x0874, 0), /* MX6Q_PAD_GPIO_17__ESAI1_TX0 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_17__ENET_1588_EVENT3_IN */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 2, 0x07F0, 1), /* MX6Q_PAD_GPIO_17__CCM_PMIC_RDY */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 3, 0x090C, 1), /* MX6Q_PAD_GPIO_17__SDMA_SDMA_EXT_EVENT_0 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_17__SPDIF_OUT1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_17__GPIO_7_12 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_17, 0x061C, 0x024C, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_17__SJC_JTAG_ACT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 0, 0x0878, 0), /* MX6Q_PAD_GPIO_18__ESAI1_TX1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 1, 0x0844, 1), /* MX6Q_PAD_GPIO_18__ENET_RX_CLK */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 2, 0x0000, 0), /* MX6Q_PAD_GPIO_18__USDHC3_VSELECT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 3, 0x0910, 1), /* MX6Q_PAD_GPIO_18__SDMA_SDMA_EXT_EVENT_1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 4, 0x07B0, 2), /* MX6Q_PAD_GPIO_18__ASRC_ASRC_EXT_CLK */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_18__GPIO_7_13 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_18__SNVS_HP_WRA_SNVS_VIO5 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_18, 0x0620, 0x0250, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_18__SRC_SYSTEM_RST */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 0, 0x08E8, 1), /* MX6Q_PAD_GPIO_19__KPP_COL_5 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 1, 0x0000, 0), /* MX6Q_PAD_GPIO_19__ENET_1588_EVENT0_OUT */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 2, 0x0000, 0), /* MX6Q_PAD_GPIO_19__SPDIF_OUT1 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_19__CCM_CLKO */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 4, 0x0000, 0), /* MX6Q_PAD_GPIO_19__ECSPI1_RDY */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 5, 0x0000, 0), /* MX6Q_PAD_GPIO_19__GPIO_4_5 */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 6, 0x0000, 0), /* MX6Q_PAD_GPIO_19__ENET_TX_ER */
+	IMX_PIN_REG(MX6Q_PAD_GPIO_19, 0x0624, 0x0254, 7, 0x0000, 0), /* MX6Q_PAD_GPIO_19__SRC_INT_BOOT */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__PCIE_CTRL_MUX_12 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__SDMA_DEBUG_PC_0 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__GPIO_5_18 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK___MMDC_DEBUG_29 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_PIXCLK, 0x0628, 0x0258, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_PIXCLK__CHEETAH_EVENTO */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__PCIE_CTRL_MUX_13 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 3, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__CCM_CLKO */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__SDMA_DEBUG_PC_1 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__GPIO_5_19 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__MMDC_MMDC_DEBUG_30 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_MCLK, 0x062C, 0x025C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_MCLK__CHEETAH_TRCTL */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__IPU1_CSI0_DA_EN */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__WEIM_WEIM_D_0 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__PCIE_CTRL_MUX_14 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__SDMA_DEBUG_PC_2 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__GPIO_5_20 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__MMDC_DEBUG_31 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DATA_EN, 0x0630, 0x0260, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DATA_EN__CHEETAH_TRCLK */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__WEIM_WEIM_D_1 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__PCIE_CTRL_MUX_15 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__SDMA_DEBUG_PC_3 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__GPIO_5_21 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__MMDC_DEBUG_32 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_VSYNC, 0x0634, 0x0264, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_VSYNC__CHEETAH_TRACE_0 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__IPU1_CSI0_D_4 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__WEIM_WEIM_D_2 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 2, 0x07F4, 3), /* MX6Q_PAD_CSI0_DAT4__ECSPI1_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 3, 0x08E8, 2), /* MX6Q_PAD_CSI0_DAT4__KPP_COL_5 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__AUDMUX_AUD3_TXC */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__GPIO_5_22 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__MMDC_DEBUG_43 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT4, 0x0638, 0x0268, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT4__CHEETAH_TRACE_1 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__IPU1_CSI0_D_5 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__WEIM_WEIM_D_3 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 2, 0x07FC, 3), /* MX6Q_PAD_CSI0_DAT5__ECSPI1_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 3, 0x08F4, 1), /* MX6Q_PAD_CSI0_DAT5__KPP_ROW_5 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__AUDMUX_AUD3_TXD */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__GPIO_5_23 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__MMDC_MMDC_DEBUG_44 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT5, 0x063C, 0x026C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT5__CHEETAH_TRACE_2 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__IPU1_CSI0_D_6 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__WEIM_WEIM_D_4 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 2, 0x07F8, 3), /* MX6Q_PAD_CSI0_DAT6__ECSPI1_MISO */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 3, 0x08EC, 1), /* MX6Q_PAD_CSI0_DAT6__KPP_COL_6 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__AUDMUX_AUD3_TXFS */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__GPIO_5_24 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__MMDC_MMDC_DEBUG_45 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT6, 0x0640, 0x0270, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT6__CHEETAH_TRACE_3 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__IPU1_CSI0_D_7 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__WEIM_WEIM_D_5 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 2, 0x0800, 3), /* MX6Q_PAD_CSI0_DAT7__ECSPI1_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 3, 0x08F8, 2), /* MX6Q_PAD_CSI0_DAT7__KPP_ROW_6 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__AUDMUX_AUD3_RXD */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__GPIO_5_25 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__MMDC_MMDC_DEBUG_46 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT7, 0x0644, 0x0274, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT7__CHEETAH_TRACE_4 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__IPU1_CSI0_D_8 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__WEIM_WEIM_D_6 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 2, 0x0810, 2), /* MX6Q_PAD_CSI0_DAT8__ECSPI2_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 3, 0x08F0, 2), /* MX6Q_PAD_CSI0_DAT8__KPP_COL_7 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 4, 0x089C, 1), /* MX6Q_PAD_CSI0_DAT8__I2C1_SDA */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__GPIO_5_26 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__MMDC_MMDC_DEBUG_47 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT8, 0x0648, 0x0278, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT8__CHEETAH_TRACE_5 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__IPU1_CSI0_D_9 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__WEIM_WEIM_D_7 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 2, 0x0818, 2), /* MX6Q_PAD_CSI0_DAT9__ECSPI2_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 3, 0x08FC, 2), /* MX6Q_PAD_CSI0_DAT9__KPP_ROW_7 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 4, 0x0898, 1), /* MX6Q_PAD_CSI0_DAT9__I2C1_SCL */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__GPIO_5_27 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__MMDC_MMDC_DEBUG_48 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT9, 0x064C, 0x027C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT9__CHEETAH_TRACE_6 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__IPU1_CSI0_D_10 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 2, 0x0814, 2), /* MX6Q_PAD_CSI0_DAT10__ECSPI2_MISO */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 3, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__UART1_TXD */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__GPIO_5_28 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__MMDC_MMDC_DEBUG_33 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT10, 0x0650, 0x0280, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT10__CHEETAH_TRACE_7 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__IPU1_CSI0_D_11 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__AUDMUX_AUD3_RXFS */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 2, 0x081C, 2), /* MX6Q_PAD_CSI0_DAT11__ECSPI2_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 3, 0x0920, 1), /* MX6Q_PAD_CSI0_DAT11__UART1_RXD */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__SDMA_DEBUG_PC_5 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__GPIO_5_29 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__MMDC_MMDC_DEBUG_34 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT11, 0x0654, 0x0284, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT11__CHEETAH_TRACE_8 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__IPU1_CSI0_D_12 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__WEIM_WEIM_D_8 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__PCIE_CTRL_MUX_16 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 3, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__UART4_TXD */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__GPIO_5_30 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__MMDC_MMDC_DEBUG_35 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT12, 0x0658, 0x0288, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT12__CHEETAH_TRACE_9 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__IPU1_CSI0_D_13 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__WEIM_WEIM_D_9 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__PCIE_CTRL_MUX_17 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 3, 0x0938, 3), /* MX6Q_PAD_CSI0_DAT13__UART4_RXD */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__GPIO_5_31 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__MMDC_MMDC_DEBUG_36 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT13, 0x065C, 0x028C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT13__CHEETAH_TRACE_10 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__IPU1_CSI0_D_14 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__WEIM_WEIM_D_10 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__PCIE_CTRL_MUX_18 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 3, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__UART5_TXD */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__GPIO_6_0 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__MMDC_MMDC_DEBUG_37 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT14, 0x0660, 0x0290, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT14__CHEETAH_TRACE_11 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__IPU1_CSI0_D_15 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__WEIM_WEIM_D_11 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__PCIE_CTRL_MUX_19 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 3, 0x0940, 3), /* MX6Q_PAD_CSI0_DAT15__UART5_RXD */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__GPIO_6_1 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__MMDC_MMDC_DEBUG_38 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT15, 0x0664, 0x0294, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT15__CHEETAH_TRACE_12 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__IPU1_CSI0_D_16 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__WEIM_WEIM_D_12 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__PCIE_CTRL_MUX_20 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 3, 0x0934, 0), /* MX6Q_PAD_CSI0_DAT16__UART4_RTS */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__GPIO_6_2 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__MMDC_MMDC_DEBUG_39 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT16, 0x0668, 0x0298, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT16__CHEETAH_TRACE_13 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__IPU1_CSI0_D_17 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__WEIM_WEIM_D_13 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__PCIE_CTRL_MUX_21 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 3, 0x0934, 1), /* MX6Q_PAD_CSI0_DAT17__UART4_CTS */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__GPIO_6_3 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__MMDC_MMDC_DEBUG_40 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT17, 0x066C, 0x029C, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT17__CHEETAH_TRACE_14 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__IPU1_CSI0_D_18 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__WEIM_WEIM_D_14 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__PCIE_CTRL_MUX_22 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 3, 0x093C, 2), /* MX6Q_PAD_CSI0_DAT18__UART5_RTS */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__GPIO_6_4 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__MMDC_MMDC_DEBUG_41 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT18, 0x0670, 0x02A0, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT18__CHEETAH_TRACE_15 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 0, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__IPU1_CSI0_D_19 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 1, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__WEIM_WEIM_D_15 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 2, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__PCIE_CTRL_MUX_23 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 3, 0x093C, 3), /* MX6Q_PAD_CSI0_DAT19__UART5_CTS */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 4, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 5, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__GPIO_6_5 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 6, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__MMDC_MMDC_DEBUG_42 */
+	IMX_PIN_REG(MX6Q_PAD_CSI0_DAT19, 0x0674, 0x02A4, 7, 0x0000, 0), /* MX6Q_PAD_CSI0_DAT19__ANATOP_TESTO_9 */
+	IMX_PIN_REG(MX6Q_PAD_JTAG_TMS, 0x0678, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TMS__SJC_TMS */
+	IMX_PIN_REG(MX6Q_PAD_JTAG_MOD, 0x067C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_MOD__SJC_MOD */
+	IMX_PIN_REG(MX6Q_PAD_JTAG_TRSTB, 0x0680, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TRSTB__SJC_TRSTB */
+	IMX_PIN_REG(MX6Q_PAD_JTAG_TDI, 0x0684, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TDI__SJC_TDI */
+	IMX_PIN_REG(MX6Q_PAD_JTAG_TCK, 0x0688, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TCK__SJC_TCK */
+	IMX_PIN_REG(MX6Q_PAD_JTAG_TDO, 0x068C, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_JTAG_TDO__SJC_TDO */
+	IMX_PIN_REG(MX6Q_PAD_LVDS1_TX3_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_TX3_P__LDB_LVDS1_TX3 */
+	IMX_PIN_REG(MX6Q_PAD_LVDS1_TX2_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_TX2_P__LDB_LVDS1_TX2 */
+	IMX_PIN_REG(MX6Q_PAD_LVDS1_CLK_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_CLK_P__LDB_LVDS1_CLK */
+	IMX_PIN_REG(MX6Q_PAD_LVDS1_TX1_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_TX1_P__LDB_LVDS1_TX1 */
+	IMX_PIN_REG(MX6Q_PAD_LVDS1_TX0_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS1_TX0_P__LDB_LVDS1_TX0 */
+	IMX_PIN_REG(MX6Q_PAD_LVDS0_TX3_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3 */
+	IMX_PIN_REG(MX6Q_PAD_LVDS0_CLK_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK */
+	IMX_PIN_REG(MX6Q_PAD_LVDS0_TX2_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2 */
+	IMX_PIN_REG(MX6Q_PAD_LVDS0_TX1_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1 */
+	IMX_PIN_REG(MX6Q_PAD_LVDS0_TX0_P, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0 */
+	IMX_PIN_REG(MX6Q_PAD_TAMPER, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_TAMPER__SNVS_LP_WRAP_SNVS_TD1 */
+	IMX_PIN_REG(MX6Q_PAD_PMIC_ON_REQ, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_PMIC_ON_REQ__SNVS_LPWRAP_WKALM */
+	IMX_PIN_REG(MX6Q_PAD_PMIC_STBY_REQ, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_PMIC_STBY_REQ__CCM_PMIC_STBYRQ */
+	IMX_PIN_REG(MX6Q_PAD_POR_B, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_POR_B__SRC_POR_B */
+	IMX_PIN_REG(MX6Q_PAD_BOOT_MODE1, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_BOOT_MODE1__SRC_BOOT_MODE_1 */
+	IMX_PIN_REG(MX6Q_PAD_RESET_IN_B, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_RESET_IN_B__SRC_RESET_B */
+	IMX_PIN_REG(MX6Q_PAD_BOOT_MODE0, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_BOOT_MODE0__SRC_BOOT_MODE_0 */
+	IMX_PIN_REG(MX6Q_PAD_TEST_MODE, NO_PAD, NO_MUX, 0, 0x0000, 0), /* MX6Q_PAD_TEST_MODE__TCU_TEST_MODE */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__USDHC3_DAT7 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 1, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__UART1_TXD */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__PCIE_CTRL_MUX_24 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__USBOH3_UH3_DFD_OUT_0 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__USBOH3_UH2_DFD_OUT_0 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__GPIO_6_17 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__MIPI_CORE_DPHY_IN_12 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT7, 0x0690, 0x02A8, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT7__USBPHY2_CLK20DIV */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__USDHC3_DAT6 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 1, 0x0920, 3), /* MX6Q_PAD_SD3_DAT6__UART1_RXD */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__PCIE_CTRL_MUX_25 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__USBOH3_UH3_DFD_OUT_1 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__USBOH3_UH2_DFD_OUT_1 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__GPIO_6_18 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__MIPI_CORE_DPHY_IN_13 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT6, 0x0694, 0x02AC, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT6__ANATOP_TESTO_10 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__USDHC3_DAT5 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 1, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__UART2_TXD */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__PCIE_CTRL_MUX_26 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__USBOH3_UH3_DFD_OUT_2 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__USBOH3_UH2_DFD_OUT_2 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__GPIO_7_0 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__MIPI_CORE_DPHY_IN_14 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT5, 0x0698, 0x02B0, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT5__ANATOP_TESTO_11 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__USDHC3_DAT4 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 1, 0x0928, 5), /* MX6Q_PAD_SD3_DAT4__UART2_RXD */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__PCIE_CTRL_MUX_27 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__USBOH3_UH3_DFD_OUT_3 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__USBOH3_UH2_DFD_OUT_3 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__GPIO_7_1 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__MIPI_CORE_DPHY_IN_15 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT4, 0x069C, 0x02B4, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT4__ANATOP_TESTO_12 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 0, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__USDHC3_CMD */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 1, 0x0924, 2), /* MX6Q_PAD_SD3_CMD__UART2_CTS */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 2, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__CAN1_TXCAN */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 3, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__USBOH3_UH3_DFD_OUT_4 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 4, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__USBOH3_UH2_DFD_OUT_4 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 5, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__GPIO_7_2 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 6, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__MIPI_CORE_DPHY_IN_16 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CMD, 0x06A0, 0x02B8, 7, 0x0000, 0), /* MX6Q_PAD_SD3_CMD__ANATOP_TESTO_13 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 0, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__USDHC3_CLK */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 1, 0x0924, 3), /* MX6Q_PAD_SD3_CLK__UART2_RTS */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 2, 0x07E4, 2), /* MX6Q_PAD_SD3_CLK__CAN1_RXCAN */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 3, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__USBOH3_UH3_DFD_OUT_5 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 4, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__USBOH3_UH2_DFD_OUT_5 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 5, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__GPIO_7_3 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 6, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__MIPI_CORE_DPHY_IN_17 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_CLK, 0x06A4, 0x02BC, 7, 0x0000, 0), /* MX6Q_PAD_SD3_CLK__ANATOP_TESTO_14 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__USDHC3_DAT0 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 1, 0x091C, 2), /* MX6Q_PAD_SD3_DAT0__UART1_CTS */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__CAN2_TXCAN */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__USBOH3_UH3_DFD_OUT_6 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__USBOH3_UH2_DFD_OUT_6 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__GPIO_7_4 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__MIPI_CORE_DPHY_IN_18 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT0, 0x06A8, 0x02C0, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT0__ANATOP_TESTO_15 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__USDHC3_DAT1 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 1, 0x091C, 3), /* MX6Q_PAD_SD3_DAT1__UART1_RTS */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 2, 0x07E8, 1), /* MX6Q_PAD_SD3_DAT1__CAN2_RXCAN */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__USBOH3_UH3_DFD_OUT_7 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__USBOH3_UH2_DFD_OUT_7 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__GPIO_7_5 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__MIPI_CORE_DPHY_IN_19 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT1, 0x06AC, 0x02C4, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT1__ANATOP_TESTI_0 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__USDHC3_DAT2 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__PCIE_CTRL_MUX_28 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__USBOH3_UH3_DFD_OUT_8 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__USBOH3_UH2_DFD_OUT_8 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__GPIO_7_6 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__MIPI_CORE_DPHY_IN_20 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT2, 0x06B0, 0x02C8, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT2__ANATOP_TESTI_1 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 0, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__USDHC3_DAT3 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 1, 0x092C, 4), /* MX6Q_PAD_SD3_DAT3__UART3_CTS */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 2, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__PCIE_CTRL_MUX_29 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 3, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__USBOH3_UH3_DFD_OUT_9 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 4, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__USBOH3_UH2_DFD_OUT_9 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 5, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__GPIO_7_7 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 6, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__MIPI_CORE_DPHY_IN_21 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_DAT3, 0x06B4, 0x02CC, 7, 0x0000, 0), /* MX6Q_PAD_SD3_DAT3__ANATOP_TESTI_2 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 0, 0x0000, 0), /* MX6Q_PAD_SD3_RST__USDHC3_RST */
+	IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 1, 0x092C, 5), /* MX6Q_PAD_SD3_RST__UART3_RTS */
+	IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 2, 0x0000, 0), /* MX6Q_PAD_SD3_RST__PCIE_CTRL_MUX_30 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 3, 0x0000, 0), /* MX6Q_PAD_SD3_RST__USBOH3_UH3_DFD_OUT_10 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 4, 0x0000, 0), /* MX6Q_PAD_SD3_RST__USBOH3_UH2_DFD_OUT_10 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 5, 0x0000, 0), /* MX6Q_PAD_SD3_RST__GPIO_7_8 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 6, 0x0000, 0), /* MX6Q_PAD_SD3_RST__MIPI_CORE_DPHY_IN_22 */
+	IMX_PIN_REG(MX6Q_PAD_SD3_RST, 0x06B8, 0x02D0, 7, 0x0000, 0), /* MX6Q_PAD_SD3_RST__ANATOP_ANATOP_TESTI_3 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__RAWNAND_CLE */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__IPU2_SISG_4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__PCIE_CTRL_MUX_31 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__USBOH3_UH3_DFD_OT11 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__USBOH3_UH2_DFD_OT11 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__GPIO_6_7 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__MIPI_CORE_DPHY_IN23 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CLE, 0x06BC, 0x02D4, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_CLE__TPSMP_HTRANS_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__RAWNAND_ALE */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__USDHC4_RST */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__PCIE_CTRL_MUX_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__USBOH3_UH3_DFD_OT12 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__USBOH3_UH2_DFD_OT12 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__GPIO_6_8 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__MIPI_CR_DPHY_IN_24 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_ALE, 0x06C0, 0x02D8, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_ALE__TPSMP_HTRANS_1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__RAWNAND_RESETN */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__IPU2_SISG_5 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__PCIE_CTRL__MUX_1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__USBOH3_UH3_DFDOT13 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__USBOH3_UH2_DFDOT13 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__GPIO_6_9 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__MIPI_CR_DPHY_OUT32 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_WP_B, 0x06C4, 0x02DC, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_WP_B__PL301_PER1_HSIZE_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__RAWNAND_READY0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__IPU2_DI0_PIN1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__PCIE_CTRL_MUX_2 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__USBOH3_UH3_DFD_OT14 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__USBOH3_UH2_DFD_OT14 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__GPIO_6_10 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__MIPI_CR_DPHY_OUT_33 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_RB0, 0x06C8, 0x02E0, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_RB0__PL301_PER1_HSIZE_1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__RAWNAND_CE0N */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__USBOH3_UH3_DFD_OT15 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__USBOH3_UH2_DFD_OT15 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__GPIO_6_11 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS0, 0x06CC, 0x02E4, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_CS0__PL301_PER1_HSIZE_2 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__RAWNAND_CE1N */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__USDHC4_VSELECT */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__USDHC3_VSELECT */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__PCIE_CTRL_MUX_3 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__GPIO_6_14 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS1, 0x06D0, 0x02E8, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_CS1__PL301_PER1_HRDYOUT */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__RAWNAND_CE2N */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__IPU1_SISG_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 2, 0x0874, 1), /* MX6Q_PAD_NANDF_CS2__ESAI1_TX0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__WEIM_WEIM_CRE */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__CCM_CLKO2 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__GPIO_6_15 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS2, 0x06D4, 0x02EC, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_CS2__IPU2_SISG_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__RAWNAND_CE3N */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__IPU1_SISG_1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 2, 0x0878, 1), /* MX6Q_PAD_NANDF_CS3__ESAI1_TX1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__WEIM_WEIM_A_26 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__PCIE_CTRL_MUX_4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__GPIO_6_16 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__IPU2_SISG_1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_CS3, 0x06D8, 0x02F0, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_CS3__TPSMP_CLK */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 0, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__USDHC4_CMD */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 1, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__RAWNAND_RDN */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 2, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__UART3_TXD */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 4, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__PCIE_CTRL_MUX_5 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 5, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__GPIO_7_9 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CMD, 0x06DC, 0x02F4, 7, 0x0000, 0), /* MX6Q_PAD_SD4_CMD__TPSMP_HDATA_DIR */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 0, 0x0000, 0), /* MX6Q_PAD_SD4_CLK__USDHC4_CLK */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 1, 0x0000, 0), /* MX6Q_PAD_SD4_CLK__RAWNAND_WRN */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 2, 0x0930, 3), /* MX6Q_PAD_SD4_CLK__UART3_RXD */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 4, 0x0000, 0), /* MX6Q_PAD_SD4_CLK__PCIE_CTRL_MUX_6 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_CLK, 0x06E0, 0x02F8, 5, 0x0000, 0), /* MX6Q_PAD_SD4_CLK__GPIO_7_10 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__RAWNAND_D0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__USDHC1_DAT4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__GPU3D_GPU_DBG_OUT_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__USBOH3_UH2_DFD_OUT16 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__USBOH3_UH3_DFD_OUT16 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__GPIO_2_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__IPU1_IPU_DIAG_BUS_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D0, 0x06E4, 0x02FC, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D0__IPU2_IPU_DIAG_BUS_0 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__RAWNAND_D1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__USDHC1_DAT5 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__GPU3D_GPU_DEBUG_OUT1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__USBOH3_UH2_DFD_OUT17 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__USBOH3_UH3_DFD_OUT17 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__GPIO_2_1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__IPU1_IPU_DIAG_BUS_1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D1, 0x06E8, 0x0300, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D1__IPU2_IPU_DIAG_BUS_1 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__RAWNAND_D2 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__USDHC1_DAT6 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__GPU3D_GPU_DBG_OUT_2 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__USBOH3_UH2_DFD_OUT18 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__USBOH3_UH3_DFD_OUT18 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__GPIO_2_2 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__IPU1_IPU_DIAG_BUS_2 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D2, 0x06EC, 0x0304, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D2__IPU2_IPU_DIAG_BUS_2 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__RAWNAND_D3 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__USDHC1_DAT7 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__GPU3D_GPU_DBG_OUT_3 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__USBOH3_UH2_DFD_OUT19 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__USBOH3_UH3_DFD_OUT19 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__GPIO_2_3 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__IPU1_IPU_DIAG_BUS_3 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D3, 0x06F0, 0x0308, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D3__IPU2_IPU_DIAG_BUS_3 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__RAWNAND_D4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__USDHC2_DAT4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__GPU3D_GPU_DBG_OUT_4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__USBOH3_UH2_DFD_OUT20 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__USBOH3_UH3_DFD_OUT20 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__GPIO_2_4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__IPU1_IPU_DIAG_BUS_4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D4, 0x06F4, 0x030C, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D4__IPU2_IPU_DIAG_BUS_4 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__RAWNAND_D5 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__USDHC2_DAT5 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__GPU3D_GPU_DBG_OUT_5 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__USBOH3_UH2_DFD_OUT21 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__USBOH3_UH3_DFD_OUT21 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__GPIO_2_5 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__IPU1_IPU_DIAG_BUS_5 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D5, 0x06F8, 0x0310, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D5__IPU2_IPU_DIAG_BUS_5 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__RAWNAND_D6 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__USDHC2_DAT6 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__GPU3D_GPU_DBG_OUT_6 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__USBOH3_UH2_DFD_OUT22 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__USBOH3_UH3_DFD_OUT22 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__GPIO_2_6 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__IPU1_IPU_DIAG_BUS_6 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D6, 0x06FC, 0x0314, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D6__IPU2_IPU_DIAG_BUS_6 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 0, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__RAWNAND_D7 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 1, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__USDHC2_DAT7 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 2, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__GPU3D_GPU_DBG_OUT_7 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 3, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__USBOH3_UH2_DFD_OUT23 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 4, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__USBOH3_UH3_DFD_OUT23 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 5, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__GPIO_2_7 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 6, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__IPU1_IPU_DIAG_BUS_7 */
+	IMX_PIN_REG(MX6Q_PAD_NANDF_D7, 0x0700, 0x0318, 7, 0x0000, 0), /* MX6Q_PAD_NANDF_D7__IPU2_IPU_DIAG_BUS_7 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__RAWNAND_D8 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__USDHC4_DAT0 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 2, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__RAWNAND_DQS */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__USBOH3_UH2_DFD_OUT24 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__USBOH3_UH3_DFD_OUT24 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__GPIO_2_8 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__IPU1_IPU_DIAG_BUS_8 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT0, 0x0704, 0x031C, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT0__IPU2_IPU_DIAG_BUS_8 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__RAWNAND_D9 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__USDHC4_DAT1 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 2, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__PWM3_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__USBOH3_UH2_DFD_OUT25 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__USBOH3_UH3_DFD_OUT25 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__GPIO_2_9 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__IPU1_IPU_DIAG_BUS_9 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT1, 0x0708, 0x0320, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT1__IPU2_IPU_DIAG_BUS_9 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__RAWNAND_D10 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__USDHC4_DAT2 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 2, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__PWM4_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__USBOH3_UH2_DFD_OUT26 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__USBOH3_UH3_DFD_OUT26 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__GPIO_2_10 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__IPU1_IPU_DIAG_BUS_10 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT2, 0x070C, 0x0324, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT2__IPU2_IPU_DIAG_BUS_10 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__RAWNAND_D11 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__USDHC4_DAT3 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__USBOH3_UH2_DFD_OUT27 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__USBOH3_UH3_DFD_OUT27 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__GPIO_2_11 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__IPU1_IPU_DIAG_BUS_11 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT3, 0x0710, 0x0328, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT3__IPU2_IPU_DIAG_BUS_11 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__RAWNAND_D12 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__USDHC4_DAT4 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 2, 0x0928, 6), /* MX6Q_PAD_SD4_DAT4__UART2_RXD */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__USBOH3_UH2_DFD_OUT28 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__USBOH3_UH3_DFD_OUT28 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__GPIO_2_12 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__IPU1_IPU_DIAG_BUS_12 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT4, 0x0714, 0x032C, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT4__IPU2_IPU_DIAG_BUS_12 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__RAWNAND_D13 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__USDHC4_DAT5 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 2, 0x0924, 4), /* MX6Q_PAD_SD4_DAT5__UART2_RTS */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__USBOH3_UH2_DFD_OUT29 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__USBOH3_UH3_DFD_OUT29 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__GPIO_2_13 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__IPU1_IPU_DIAG_BUS_13 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT5, 0x0718, 0x0330, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT5__IPU2_IPU_DIAG_BUS_13 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__RAWNAND_D14 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__USDHC4_DAT6 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 2, 0x0924, 5), /* MX6Q_PAD_SD4_DAT6__UART2_CTS */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__USBOH3_UH2_DFD_OUT30 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__USBOH3_UH3_DFD_OUT30 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__GPIO_2_14 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__IPU1_IPU_DIAG_BUS_14 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT6, 0x071C, 0x0334, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT6__IPU2_IPU_DIAG_BUS_14 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 0, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__RAWNAND_D15 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 1, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 2, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__UART2_TXD */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 3, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__USBOH3_UH2_DFD_OUT31 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 4, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__USBOH3_UH3_DFD_OUT31 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 5, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__GPIO_2_15 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 6, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__IPU1_IPU_DIAG_BUS_15 */
+	IMX_PIN_REG(MX6Q_PAD_SD4_DAT7, 0x0720, 0x0338, 7, 0x0000, 0), /* MX6Q_PAD_SD4_DAT7__IPU2_IPU_DIAG_BUS_15 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 0, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__USDHC1_DAT1 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 1, 0x0834, 1), /* MX6Q_PAD_SD1_DAT1__ECSPI5_SS0 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 2, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__PWM3_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 3, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__GPT_CAPIN2 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 4, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__PCIE_CTRL_MUX_7 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 5, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__GPIO_1_17 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 6, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__HDMI_TX_OPHYDTB_0 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT1, 0x0724, 0x033C, 7, 0x0000, 0), /* MX6Q_PAD_SD1_DAT1__ANATOP_TESTO_8 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 0, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__USDHC1_DAT0 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 1, 0x082C, 1), /* MX6Q_PAD_SD1_DAT0__ECSPI5_MISO */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 2, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__CAAM_WRAP_RNG_OSCOBS */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 3, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__GPT_CAPIN1 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 4, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__PCIE_CTRL_MUX_8 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 5, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__GPIO_1_16 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 6, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__HDMI_TX_OPHYDTB_1 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT0, 0x0728, 0x0340, 7, 0x0000, 0), /* MX6Q_PAD_SD1_DAT0__ANATOP_TESTO_7 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 0, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__USDHC1_DAT3 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 1, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__ECSPI5_SS2 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 2, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__GPT_CMPOUT3 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 3, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__PWM1_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 4, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__WDOG2_WDOG_B */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 5, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__GPIO_1_21 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 6, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__WDOG2_WDOG_RST_B_DEB */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT3, 0x072C, 0x0344, 7, 0x0000, 0), /* MX6Q_PAD_SD1_DAT3__ANATOP_TESTO_6 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 0, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__USDHC1_CMD */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 1, 0x0830, 0), /* MX6Q_PAD_SD1_CMD__ECSPI5_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 2, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__PWM4_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 3, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__GPT_CMPOUT1 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 5, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__GPIO_1_18 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CMD, 0x0730, 0x0348, 7, 0x0000, 0), /* MX6Q_PAD_SD1_CMD__ANATOP_TESTO_5 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 0, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__USDHC1_DAT2 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 1, 0x0838, 1), /* MX6Q_PAD_SD1_DAT2__ECSPI5_SS1 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 2, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__GPT_CMPOUT2 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 3, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__PWM2_PWMO */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 4, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__WDOG1_WDOG_B */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 5, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__GPIO_1_19 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 6, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__WDOG1_WDOG_RST_B_DEB */
+	IMX_PIN_REG(MX6Q_PAD_SD1_DAT2, 0x0734, 0x034C, 7, 0x0000, 0), /* MX6Q_PAD_SD1_DAT2__ANATOP_TESTO_4 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 0, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__USDHC1_CLK */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 1, 0x0828, 0), /* MX6Q_PAD_SD1_CLK__ECSPI5_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 2, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__OSC32K_32K_OUT */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 3, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__GPT_CLKIN */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 5, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__GPIO_1_20 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 6, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__PHY_DTB_0 */
+	IMX_PIN_REG(MX6Q_PAD_SD1_CLK, 0x0738, 0x0350, 7, 0x0000, 0), /* MX6Q_PAD_SD1_CLK__SATA_PHY_DTB_0 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 0, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__USDHC2_CLK */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 1, 0x0828, 1), /* MX6Q_PAD_SD2_CLK__ECSPI5_SCLK */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 2, 0x08E8, 3), /* MX6Q_PAD_SD2_CLK__KPP_COL_5 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 3, 0x07C0, 1), /* MX6Q_PAD_SD2_CLK__AUDMUX_AUD4_RXFS */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 4, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__PCIE_CTRL_MUX_9 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 5, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__GPIO_1_10 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 6, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__PHY_DTB_1 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CLK, 0x073C, 0x0354, 7, 0x0000, 0), /* MX6Q_PAD_SD2_CLK__SATA_PHY_DTB_1 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 0, 0x0000, 0), /* MX6Q_PAD_SD2_CMD__USDHC2_CMD */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 1, 0x0830, 1), /* MX6Q_PAD_SD2_CMD__ECSPI5_MOSI */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 2, 0x08F4, 2), /* MX6Q_PAD_SD2_CMD__KPP_ROW_5 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 3, 0x07BC, 1), /* MX6Q_PAD_SD2_CMD__AUDMUX_AUD4_RXC */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 4, 0x0000, 0), /* MX6Q_PAD_SD2_CMD__PCIE_CTRL_MUX_10 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_CMD, 0x0740, 0x0358, 5, 0x0000, 0), /* MX6Q_PAD_SD2_CMD__GPIO_1_11 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 0, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__USDHC2_DAT3 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 1, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ECSPI5_SS3 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 2, 0x08EC, 2), /* MX6Q_PAD_SD2_DAT3__KPP_COL_6 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 3, 0x07C4, 1), /* MX6Q_PAD_SD2_DAT3__AUDMUX_AUD4_TXC */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 4, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
+	IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx6q_pinctrl_pads[] = {
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD2_DAT1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD2_DAT2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD2_DAT0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_TXC),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_TD0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_TD1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_TD2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_TD3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_RX_CTL),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_RD0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_TX_CTL),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_RD1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_RD2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_RD3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RGMII_RXC),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A25),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_EB2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D16),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D17),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D18),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D19),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D20),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D21),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D22),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D23),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_EB3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D24),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D25),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D26),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D27),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D28),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D29),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D30),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_D31),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A24),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A23),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A22),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A21),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A20),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A19),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A18),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A17),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_A16),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_CS0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_CS1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_OE),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_RW),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_LBA),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_EB0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_EB1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA8),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA9),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA10),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA11),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA12),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA13),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA14),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_DA15),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_WAIT),
+	IMX_PINCTRL_PIN(MX6Q_PAD_EIM_BCLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DI0_DISP_CLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DI0_PIN15),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DI0_PIN2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DI0_PIN3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DI0_PIN4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT8),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT9),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT10),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT11),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT12),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT13),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT14),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT15),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT16),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT17),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT18),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT19),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT20),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT21),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT22),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DISP0_DAT23),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_MDIO),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_REF_CLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_RX_ER),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_CRS_DV),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_RXD1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_RXD0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_TX_EN),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_TXD1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_TXD0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_ENET_MDC),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D40),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D41),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D42),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D43),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D44),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D45),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D46),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D47),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D32),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D33),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D34),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D35),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D36),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D37),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D38),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D39),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D24),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D25),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D26),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D27),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D28),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D29),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D30),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D31),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D16),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D17),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D18),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D19),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D20),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D21),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D22),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D23),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A8),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A9),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A10),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A11),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A12),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A13),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A14),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_A15),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_CAS),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_CS0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_CS1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_RAS),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_RESET),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDBA0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDBA1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDCLK_0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDBA2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDCKE0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDCLK_1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDCKE1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDODT0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDODT1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDWE),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D8),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D9),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D10),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D11),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D12),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D13),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D14),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D15),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D48),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D49),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D50),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D51),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D52),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D53),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D54),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D55),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D56),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_SDQS7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D57),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D58),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D59),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D60),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_DQM7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D61),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D62),
+	IMX_PINCTRL_PIN(MX6Q_PAD_DRAM_D63),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_COL0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_ROW0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_COL1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_ROW1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_COL2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_ROW2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_COL3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_ROW3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_COL4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_KEY_ROW4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_9),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_8),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_16),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_17),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_18),
+	IMX_PINCTRL_PIN(MX6Q_PAD_GPIO_19),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_PIXCLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_MCLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DATA_EN),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_VSYNC),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT8),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT9),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT10),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT11),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT12),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT13),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT14),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT15),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT16),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT17),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT18),
+	IMX_PINCTRL_PIN(MX6Q_PAD_CSI0_DAT19),
+	IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TMS),
+	IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_MOD),
+	IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TRSTB),
+	IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TDI),
+	IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TCK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_JTAG_TDO),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_TX3_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_TX2_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_CLK_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_TX1_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS1_TX0_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_TX3_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_CLK_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_TX2_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_TX1_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_LVDS0_TX0_P),
+	IMX_PINCTRL_PIN(MX6Q_PAD_TAMPER),
+	IMX_PINCTRL_PIN(MX6Q_PAD_PMIC_ON_REQ),
+	IMX_PINCTRL_PIN(MX6Q_PAD_PMIC_STBY_REQ),
+	IMX_PINCTRL_PIN(MX6Q_PAD_POR_B),
+	IMX_PINCTRL_PIN(MX6Q_PAD_BOOT_MODE1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_RESET_IN_B),
+	IMX_PINCTRL_PIN(MX6Q_PAD_BOOT_MODE0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_TEST_MODE),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_CMD),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_CLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_DAT3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD3_RST),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_CLE),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_ALE),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_WP_B),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_RB0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_CS0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_CS1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_CS2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_CS3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_CMD),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_CLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_D0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_D1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_D2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_D3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_D4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_D5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_D6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_NANDF_D7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_DAT0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_DAT1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_DAT2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_DAT3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_DAT4),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_DAT5),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_DAT6),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD4_DAT7),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD1_DAT1),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD1_DAT0),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD1_DAT3),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD1_CMD),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD1_DAT2),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD1_CLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD2_CLK),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD2_CMD),
+	IMX_PINCTRL_PIN(MX6Q_PAD_SD2_DAT3),
+};
+
+static struct imx_pinctrl_soc_info imx6q_pinctrl_info = {
+	.pins = imx6q_pinctrl_pads,
+	.npins = ARRAY_SIZE(imx6q_pinctrl_pads),
+	.pin_regs = imx6q_pin_regs,
+	.npin_regs = ARRAY_SIZE(imx6q_pin_regs),
+};
+
+static struct of_device_id imx6q_pinctrl_of_match[] __devinitdata = {
+	{ .compatible = "fsl,imx6q-iomuxc", },
+	{ /* sentinel */ }
+};
+
+static int __devinit imx6q_pinctrl_probe(struct platform_device *pdev)
+{
+	return imx_pinctrl_probe(pdev, &imx6q_pinctrl_info);
+}
+
+static struct platform_driver imx6q_pinctrl_driver = {
+	.driver = {
+		.name = "imx6q-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(imx6q_pinctrl_of_match),
+	},
+	.probe = imx6q_pinctrl_probe,
+	.remove = __devexit_p(imx_pinctrl_remove),
+};
+
+static int __init imx6q_pinctrl_init(void)
+{
+	return platform_driver_register(&imx6q_pinctrl_driver);
+}
+arch_initcall(imx6q_pinctrl_init);
+
+static void __exit imx6q_pinctrl_exit(void)
+{
+	platform_driver_unregister(&imx6q_pinctrl_driver);
+}
+module_exit(imx6q_pinctrl_exit);
+MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>");
+MODULE_DESCRIPTION("Freescale IMX6Q pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
new file mode 100644
index 0000000..556e45a
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -0,0 +1,528 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "core.h"
+#include "pinctrl-mxs.h"
+
+#define SUFFIX_LEN	4
+
+struct mxs_pinctrl_data {
+	struct device *dev;
+	struct pinctrl_dev *pctl;
+	void __iomem *base;
+	struct mxs_pinctrl_soc_data *soc;
+};
+
+static int mxs_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+
+	return d->soc->ngroups;
+}
+
+static const char *mxs_get_group_name(struct pinctrl_dev *pctldev,
+				      unsigned group)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+
+	return d->soc->groups[group].name;
+}
+
+static int mxs_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
+			      const unsigned **pins, unsigned *num_pins)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = d->soc->groups[group].pins;
+	*num_pins = d->soc->groups[group].npins;
+
+	return 0;
+}
+
+static void mxs_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+			     unsigned offset)
+{
+	seq_printf(s, " %s", dev_name(pctldev->dev));
+}
+
+static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
+			      struct device_node *np,
+			      struct pinctrl_map **map, unsigned *num_maps)
+{
+	struct pinctrl_map *new_map;
+	char *group = NULL;
+	unsigned new_num = 1;
+	unsigned long config = 0;
+	unsigned long *pconfig;
+	int length = strlen(np->name) + SUFFIX_LEN;
+	bool purecfg = false;
+	u32 val, reg;
+	int ret, i = 0;
+
+	/* Check for pin config node which has no 'reg' property */
+	if (of_property_read_u32(np, "reg", &reg))
+		purecfg = true;
+
+	ret = of_property_read_u32(np, "fsl,drive-strength", &val);
+	if (!ret)
+		config = val | MA_PRESENT;
+	ret = of_property_read_u32(np, "fsl,voltage", &val);
+	if (!ret)
+		config |= val << VOL_SHIFT | VOL_PRESENT;
+	ret = of_property_read_u32(np, "fsl,pull-up", &val);
+	if (!ret)
+		config |= val << PULL_SHIFT | PULL_PRESENT;
+
+	/* Check for group node which has both mux and config settings */
+	if (!purecfg && config)
+		new_num = 2;
+
+	new_map = kzalloc(sizeof(*new_map) * new_num, GFP_KERNEL);
+	if (!new_map)
+		return -ENOMEM;
+
+	if (!purecfg) {
+		new_map[i].type = PIN_MAP_TYPE_MUX_GROUP;
+		new_map[i].data.mux.function = np->name;
+
+		/* Compose group name */
+		group = kzalloc(length, GFP_KERNEL);
+		if (!group)
+			return -ENOMEM;
+		snprintf(group, length, "%s.%d", np->name, reg);
+		new_map[i].data.mux.group = group;
+		i++;
+	}
+
+	if (config) {
+		pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
+		if (!pconfig) {
+			ret = -ENOMEM;
+			goto free;
+		}
+
+		new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+		new_map[i].data.configs.group_or_pin = purecfg ? np->name :
+								 group;
+		new_map[i].data.configs.configs = pconfig;
+		new_map[i].data.configs.num_configs = 1;
+	}
+
+	*map = new_map;
+	*num_maps = new_num;
+
+	return 0;
+
+free:
+	kfree(new_map);
+	return ret;
+}
+
+static void mxs_dt_free_map(struct pinctrl_dev *pctldev,
+			    struct pinctrl_map *map, unsigned num_maps)
+{
+	int i;
+
+	for (i = 0; i < num_maps; i++) {
+		if (map[i].type == PIN_MAP_TYPE_MUX_GROUP)
+			kfree(map[i].data.mux.group);
+		if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
+			kfree(map[i].data.configs.configs);
+	}
+
+	kfree(map);
+}
+
+static struct pinctrl_ops mxs_pinctrl_ops = {
+	.get_groups_count = mxs_get_groups_count,
+	.get_group_name = mxs_get_group_name,
+	.get_group_pins = mxs_get_group_pins,
+	.pin_dbg_show = mxs_pin_dbg_show,
+	.dt_node_to_map = mxs_dt_node_to_map,
+	.dt_free_map = mxs_dt_free_map,
+};
+
+static int mxs_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+
+	return d->soc->nfunctions;
+}
+
+static const char *mxs_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+					     unsigned function)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+
+	return d->soc->functions[function].name;
+}
+
+static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+				       unsigned group,
+				       const char * const **groups,
+				       unsigned * const num_groups)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = d->soc->functions[group].groups;
+	*num_groups = d->soc->functions[group].ngroups;
+
+	return 0;
+}
+
+static int mxs_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned selector,
+			      unsigned group)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+	struct mxs_group *g = &d->soc->groups[group];
+	void __iomem *reg;
+	u8 bank, shift;
+	u16 pin;
+	int i;
+
+	for (i = 0; i < g->npins; i++) {
+		bank = PINID_TO_BANK(g->pins[i]);
+		pin = PINID_TO_PIN(g->pins[i]);
+		reg = d->base + d->soc->regs->muxsel;
+		reg += bank * 0x20 + pin / 16 * 0x10;
+		shift = pin % 16 * 2;
+
+		writel(0x3 << shift, reg + CLR);
+		writel(g->muxsel[i] << shift, reg + SET);
+	}
+
+	return 0;
+}
+
+static struct pinmux_ops mxs_pinmux_ops = {
+	.get_functions_count = mxs_pinctrl_get_funcs_count,
+	.get_function_name = mxs_pinctrl_get_func_name,
+	.get_function_groups = mxs_pinctrl_get_func_groups,
+	.enable = mxs_pinctrl_enable,
+};
+
+static int mxs_pinconf_get(struct pinctrl_dev *pctldev,
+			   unsigned pin, unsigned long *config)
+{
+	return -ENOTSUPP;
+}
+
+static int mxs_pinconf_set(struct pinctrl_dev *pctldev,
+			   unsigned pin, unsigned long config)
+{
+	return -ENOTSUPP;
+}
+
+static int mxs_pinconf_group_get(struct pinctrl_dev *pctldev,
+				 unsigned group, unsigned long *config)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+
+	*config = d->soc->groups[group].config;
+
+	return 0;
+}
+
+static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
+				 unsigned group, unsigned long config)
+{
+	struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
+	struct mxs_group *g = &d->soc->groups[group];
+	void __iomem *reg;
+	u8 ma, vol, pull, bank, shift;
+	u16 pin;
+	int i;
+
+	ma = CONFIG_TO_MA(config);
+	vol = CONFIG_TO_VOL(config);
+	pull = CONFIG_TO_PULL(config);
+
+	for (i = 0; i < g->npins; i++) {
+		bank = PINID_TO_BANK(g->pins[i]);
+		pin = PINID_TO_PIN(g->pins[i]);
+
+		/* drive */
+		reg = d->base + d->soc->regs->drive;
+		reg += bank * 0x40 + pin / 8 * 0x10;
+
+		/* mA */
+		if (config & MA_PRESENT) {
+			shift = pin % 8 * 4;
+			writel(0x3 << shift, reg + CLR);
+			writel(ma << shift, reg + SET);
+		}
+
+		/* vol */
+		if (config & VOL_PRESENT) {
+			shift = pin % 8 * 4 + 2;
+			if (vol)
+				writel(1 << shift, reg + SET);
+			else
+				writel(1 << shift, reg + CLR);
+		}
+
+		/* pull */
+		if (config & PULL_PRESENT) {
+			reg = d->base + d->soc->regs->pull;
+			reg += bank * 0x10;
+			shift = pin;
+			if (pull)
+				writel(1 << shift, reg + SET);
+			else
+				writel(1 << shift, reg + CLR);
+		}
+	}
+
+	/* cache the config value for mxs_pinconf_group_get() */
+	g->config = config;
+
+	return 0;
+}
+
+static void mxs_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+				 struct seq_file *s, unsigned pin)
+{
+	/* Not support */
+}
+
+static void mxs_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+				       struct seq_file *s, unsigned group)
+{
+	unsigned long config;
+
+	if (!mxs_pinconf_group_get(pctldev, group, &config))
+		seq_printf(s, "0x%lx", config);
+}
+
+struct pinconf_ops mxs_pinconf_ops = {
+	.pin_config_get = mxs_pinconf_get,
+	.pin_config_set = mxs_pinconf_set,
+	.pin_config_group_get = mxs_pinconf_group_get,
+	.pin_config_group_set = mxs_pinconf_group_set,
+	.pin_config_dbg_show = mxs_pinconf_dbg_show,
+	.pin_config_group_dbg_show = mxs_pinconf_group_dbg_show,
+};
+
+static struct pinctrl_desc mxs_pinctrl_desc = {
+	.pctlops = &mxs_pinctrl_ops,
+	.pmxops = &mxs_pinmux_ops,
+	.confops = &mxs_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static int __devinit mxs_pinctrl_parse_group(struct platform_device *pdev,
+					     struct device_node *np, int idx,
+					     const char **out_name)
+{
+	struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
+	struct mxs_group *g = &d->soc->groups[idx];
+	struct property *prop;
+	const char *propname = "fsl,pinmux-ids";
+	char *group;
+	int length = strlen(np->name) + SUFFIX_LEN;
+	int i;
+	u32 val;
+
+	group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
+	if (!group)
+		return -ENOMEM;
+	if (of_property_read_u32(np, "reg", &val))
+		snprintf(group, length, "%s", np->name);
+	else
+		snprintf(group, length, "%s.%d", np->name, val);
+	g->name = group;
+
+	prop = of_find_property(np, propname, &length);
+	if (!prop)
+		return -EINVAL;
+	g->npins = length / sizeof(u32);
+
+	g->pins = devm_kzalloc(&pdev->dev, g->npins * sizeof(*g->pins),
+			       GFP_KERNEL);
+	if (!g->pins)
+		return -ENOMEM;
+
+	g->muxsel = devm_kzalloc(&pdev->dev, g->npins * sizeof(*g->muxsel),
+				 GFP_KERNEL);
+	if (!g->muxsel)
+		return -ENOMEM;
+
+	of_property_read_u32_array(np, propname, g->pins, g->npins);
+	for (i = 0; i < g->npins; i++) {
+		g->muxsel[i] = MUXID_TO_MUXSEL(g->pins[i]);
+		g->pins[i] = MUXID_TO_PINID(g->pins[i]);
+	}
+
+	if (out_name)
+		*out_name = g->name;
+
+	return 0;
+}
+
+static int __devinit mxs_pinctrl_probe_dt(struct platform_device *pdev,
+					  struct mxs_pinctrl_data *d)
+{
+	struct mxs_pinctrl_soc_data *soc = d->soc;
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *child;
+	struct mxs_function *f;
+	const char *gpio_compat = "fsl,mxs-gpio";
+	const char *fn, *fnull = "";
+	int i = 0, idxf = 0, idxg = 0;
+	int ret;
+	u32 val;
+
+	child = of_get_next_child(np, NULL);
+	if (!child) {
+		dev_err(&pdev->dev, "no group is defined\n");
+		return -ENOENT;
+	}
+
+	/* Count total functions and groups */
+	fn = fnull;
+	for_each_child_of_node(np, child) {
+		if (of_device_is_compatible(child, gpio_compat))
+			continue;
+		soc->ngroups++;
+		/* Skip pure pinconf node */
+		if (of_property_read_u32(child, "reg", &val))
+			continue;
+		if (strcmp(fn, child->name)) {
+			fn = child->name;
+			soc->nfunctions++;
+		}
+	}
+
+	soc->functions = devm_kzalloc(&pdev->dev, soc->nfunctions *
+				      sizeof(*soc->functions), GFP_KERNEL);
+	if (!soc->functions)
+		return -ENOMEM;
+
+	soc->groups = devm_kzalloc(&pdev->dev, soc->ngroups *
+				   sizeof(*soc->groups), GFP_KERNEL);
+	if (!soc->groups)
+		return -ENOMEM;
+
+	/* Count groups for each function */
+	fn = fnull;
+	f = &soc->functions[idxf];
+	for_each_child_of_node(np, child) {
+		if (of_device_is_compatible(child, gpio_compat))
+			continue;
+		if (of_property_read_u32(child, "reg", &val))
+			continue;
+		if (strcmp(fn, child->name)) {
+			f = &soc->functions[idxf++];
+			f->name = fn = child->name;
+		}
+		f->ngroups++;
+	};
+
+	/* Get groups for each function */
+	idxf = 0;
+	fn = fnull;
+	for_each_child_of_node(np, child) {
+		if (of_device_is_compatible(child, gpio_compat))
+			continue;
+		if (of_property_read_u32(child, "reg", &val)) {
+			ret = mxs_pinctrl_parse_group(pdev, child,
+						      idxg++, NULL);
+			if (ret)
+				return ret;
+			continue;
+		}
+
+		if (strcmp(fn, child->name)) {
+			f = &soc->functions[idxf++];
+			f->groups = devm_kzalloc(&pdev->dev, f->ngroups *
+						 sizeof(*f->groups),
+						 GFP_KERNEL);
+			if (!f->groups)
+				return -ENOMEM;
+			fn = child->name;
+			i = 0;
+		}
+		ret = mxs_pinctrl_parse_group(pdev, child, idxg++,
+					      &f->groups[i++]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
+				struct mxs_pinctrl_soc_data *soc)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct mxs_pinctrl_data *d;
+	int ret;
+
+	d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->dev = &pdev->dev;
+	d->soc = soc;
+
+	d->base = of_iomap(np, 0);
+	if (!d->base)
+		return -EADDRNOTAVAIL;
+
+	mxs_pinctrl_desc.pins = d->soc->pins;
+	mxs_pinctrl_desc.npins = d->soc->npins;
+	mxs_pinctrl_desc.name = dev_name(&pdev->dev);
+
+	platform_set_drvdata(pdev, d);
+
+	ret = mxs_pinctrl_probe_dt(pdev, d);
+	if (ret) {
+		dev_err(&pdev->dev, "dt probe failed: %d\n", ret);
+		goto err;
+	}
+
+	d->pctl = pinctrl_register(&mxs_pinctrl_desc, &pdev->dev, d);
+	if (!d->pctl) {
+		dev_err(&pdev->dev, "Couldn't register MXS pinctrl driver\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	iounmap(d->base);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mxs_pinctrl_probe);
+
+int __devexit mxs_pinctrl_remove(struct platform_device *pdev)
+{
+	struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(d->pctl);
+	iounmap(d->base);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mxs_pinctrl_remove);
diff --git a/drivers/pinctrl/pinctrl-mxs.h b/drivers/pinctrl/pinctrl-mxs.h
new file mode 100644
index 0000000..fdd88d0b
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mxs.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __PINCTRL_MXS_H
+#define __PINCTRL_MXS_H
+
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#define SET	0x4
+#define CLR	0x8
+#define TOG	0xc
+
+#define MXS_PINCTRL_PIN(pin)	PINCTRL_PIN(pin, #pin)
+#define PINID(bank, pin)	((bank) * 32 + (pin))
+
+/*
+ * pinmux-id bit field definitions
+ *
+ * bank:	15..12	(4)
+ * pin:		11..4	(8)
+ * muxsel:	3..0	(4)
+ */
+#define MUXID_TO_PINID(m)	PINID((m) >> 12 & 0xf, (m) >> 4 & 0xff)
+#define MUXID_TO_MUXSEL(m)	((m) & 0xf)
+
+#define PINID_TO_BANK(p)	((p) >> 5)
+#define PINID_TO_PIN(p)		((p) % 32)
+
+/*
+ * pin config bit field definitions
+ *
+ * pull-up:	6..5	(2)
+ * voltage:	4..3	(2)
+ * mA:		2..0	(3)
+ *
+ * MSB of each field is presence bit for the config.
+ */
+#define PULL_PRESENT		(1 << 6)
+#define PULL_SHIFT		5
+#define VOL_PRESENT		(1 << 4)
+#define VOL_SHIFT		3
+#define MA_PRESENT		(1 << 2)
+#define MA_SHIFT		0
+#define CONFIG_TO_PULL(c)	((c) >> PULL_SHIFT & 0x1)
+#define CONFIG_TO_VOL(c)	((c) >> VOL_SHIFT & 0x1)
+#define CONFIG_TO_MA(c)		((c) >> MA_SHIFT & 0x3)
+
+struct mxs_function {
+	const char *name;
+	const char **groups;
+	unsigned ngroups;
+};
+
+struct mxs_group {
+	const char *name;
+	unsigned int *pins;
+	unsigned npins;
+	u8 *muxsel;
+	u8 config;
+};
+
+struct mxs_regs {
+	u16 muxsel;
+	u16 drive;
+	u16 pull;
+};
+
+struct mxs_pinctrl_soc_data {
+	const struct mxs_regs *regs;
+	const struct pinctrl_pin_desc *pins;
+	unsigned npins;
+	struct mxs_function *functions;
+	unsigned nfunctions;
+	struct mxs_group *groups;
+	unsigned ngroups;
+};
+
+int mxs_pinctrl_probe(struct platform_device *pdev,
+		      struct mxs_pinctrl_soc_data *soc);
+int mxs_pinctrl_remove(struct platform_device *pdev);
+
+#endif /* __PINCTRL_MXS_H */
diff --git a/drivers/pinctrl/pinctrl-pxa3xx.c b/drivers/pinctrl/pinctrl-pxa3xx.c
index 079dce0..f14cd6b 100644
--- a/drivers/pinctrl/pinctrl-pxa3xx.c
+++ b/drivers/pinctrl/pinctrl-pxa3xx.c
@@ -25,20 +25,18 @@
 	.pin_base	= 0,
 };
 
-static int pxa3xx_list_groups(struct pinctrl_dev *pctrldev, unsigned selector)
+static int pxa3xx_get_groups_count(struct pinctrl_dev *pctrldev)
 {
 	struct pxa3xx_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
-	if (selector >= info->num_grps)
-		return -EINVAL;
-	return 0;
+
+	return info->num_grps;
 }
 
 static const char *pxa3xx_get_group_name(struct pinctrl_dev *pctrldev,
 					 unsigned selector)
 {
 	struct pxa3xx_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
-	if (selector >= info->num_grps)
-		return NULL;
+
 	return info->grps[selector].name;
 }
 
@@ -48,25 +46,23 @@
 				 unsigned *num_pins)
 {
 	struct pxa3xx_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
-	if (selector >= info->num_grps)
-		return -EINVAL;
+
 	*pins = info->grps[selector].pins;
 	*num_pins = info->grps[selector].npins;
 	return 0;
 }
 
 static struct pinctrl_ops pxa3xx_pctrl_ops = {
-	.list_groups	= pxa3xx_list_groups,
+	.get_groups_count = pxa3xx_get_groups_count,
 	.get_group_name	= pxa3xx_get_group_name,
 	.get_group_pins	= pxa3xx_get_group_pins,
 };
 
-static int pxa3xx_pmx_list_func(struct pinctrl_dev *pctrldev, unsigned func)
+static int pxa3xx_pmx_get_funcs_count(struct pinctrl_dev *pctrldev)
 {
 	struct pxa3xx_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
-	if (func >= info->num_funcs)
-		return -EINVAL;
-	return 0;
+
+	return info->num_funcs;
 }
 
 static const char *pxa3xx_pmx_get_func_name(struct pinctrl_dev *pctrldev,
@@ -142,11 +138,6 @@
 	return 0;
 }
 
-static void pxa3xx_pmx_disable(struct pinctrl_dev *pctrldev, unsigned func,
-			       unsigned group)
-{
-}
-
 static int pxa3xx_pmx_request_gpio(struct pinctrl_dev *pctrldev,
 				   struct pinctrl_gpio_range *range,
 				   unsigned pin)
@@ -170,11 +161,10 @@
 }
 
 static struct pinmux_ops pxa3xx_pmx_ops = {
-	.list_functions		= pxa3xx_pmx_list_func,
+	.get_functions_count	= pxa3xx_pmx_get_funcs_count,
 	.get_function_name	= pxa3xx_pmx_get_func_name,
 	.get_function_groups	= pxa3xx_pmx_get_groups,
 	.enable			= pxa3xx_pmx_enable,
-	.disable		= pxa3xx_pmx_disable,
 	.gpio_request_enable	= pxa3xx_pmx_request_gpio,
 };
 
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 6b3534c..ba15b1a 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -853,18 +853,14 @@
 	SIRFSOC_PIN_GROUP("gpsgrp", gps_pins),
 };
 
-static int sirfsoc_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+static int sirfsoc_get_groups_count(struct pinctrl_dev *pctldev)
 {
-	if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
-		return -EINVAL;
-	return 0;
+	return ARRAY_SIZE(sirfsoc_pin_groups);
 }
 
 static const char *sirfsoc_get_group_name(struct pinctrl_dev *pctldev,
 				       unsigned selector)
 {
-	if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
-		return NULL;
 	return sirfsoc_pin_groups[selector].name;
 }
 
@@ -872,8 +868,6 @@
 			       const unsigned **pins,
 			       unsigned *num_pins)
 {
-	if (selector >= ARRAY_SIZE(sirfsoc_pin_groups))
-		return -EINVAL;
 	*pins = sirfsoc_pin_groups[selector].pins;
 	*num_pins = sirfsoc_pin_groups[selector].num_pins;
 	return 0;
@@ -886,7 +880,7 @@
 }
 
 static struct pinctrl_ops sirfsoc_pctrl_ops = {
-	.list_groups = sirfsoc_list_groups,
+	.get_groups_count = sirfsoc_get_groups_count,
 	.get_group_name = sirfsoc_get_group_name,
 	.get_group_pins = sirfsoc_get_group_pins,
 	.pin_dbg_show = sirfsoc_pin_dbg_show,
@@ -1033,11 +1027,9 @@
 	sirfsoc_pinmux_endisable(spmx, selector, false);
 }
 
-static int sirfsoc_pinmux_list_funcs(struct pinctrl_dev *pmxdev, unsigned selector)
+static int sirfsoc_pinmux_get_funcs_count(struct pinctrl_dev *pmxdev)
 {
-	if (selector >= ARRAY_SIZE(sirfsoc_pmx_functions))
-		return -EINVAL;
-	return 0;
+	return ARRAY_SIZE(sirfsoc_pmx_functions);
 }
 
 static const char *sirfsoc_pinmux_get_func_name(struct pinctrl_dev *pctldev,
@@ -1074,9 +1066,9 @@
 }
 
 static struct pinmux_ops sirfsoc_pinmux_ops = {
-	.list_functions = sirfsoc_pinmux_list_funcs,
 	.enable = sirfsoc_pinmux_enable,
 	.disable = sirfsoc_pinmux_disable,
+	.get_functions_count = sirfsoc_pinmux_get_funcs_count,
 	.get_function_name = sirfsoc_pinmux_get_func_name,
 	.get_function_groups = sirfsoc_pinmux_get_groups,
 	.gpio_request_enable = sirfsoc_pinmux_request_gpio,
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 9b32968..2c98fba 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -23,9 +23,11 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/pinconf.h>
+#include <linux/slab.h>
 
 #include <mach/pinconf-tegra.h>
 
@@ -53,15 +55,11 @@
 	writel(val, pmx->regs[bank] + reg);
 }
 
-static int tegra_pinctrl_list_groups(struct pinctrl_dev *pctldev,
-				     unsigned group)
+static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
 {
 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 
-	if (group >= pmx->soc->ngroups)
-		return -EINVAL;
-
-	return 0;
+	return pmx->soc->ngroups;
 }
 
 static const char *tegra_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
@@ -69,9 +67,6 @@
 {
 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 
-	if (group >= pmx->soc->ngroups)
-		return NULL;
-
 	return pmx->soc->groups[group].name;
 }
 
@@ -82,9 +77,6 @@
 {
 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 
-	if (group >= pmx->soc->ngroups)
-		return -EINVAL;
-
 	*pins = pmx->soc->groups[group].pins;
 	*num_pins = pmx->soc->groups[group].npins;
 
@@ -98,22 +90,221 @@
 	seq_printf(s, " " DRIVER_NAME);
 }
 
+static int reserve_map(struct pinctrl_map **map, unsigned *reserved_maps,
+		       unsigned *num_maps, unsigned reserve)
+{
+	unsigned old_num = *reserved_maps;
+	unsigned new_num = *num_maps + reserve;
+	struct pinctrl_map *new_map;
+
+	if (old_num >= new_num)
+		return 0;
+
+	new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
+	if (!new_map)
+		return -ENOMEM;
+
+	memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
+
+	*map = new_map;
+	*reserved_maps = new_num;
+
+	return 0;
+}
+
+static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
+		       unsigned *num_maps, const char *group,
+		       const char *function)
+{
+	if (*num_maps == *reserved_maps)
+		return -ENOSPC;
+
+	(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+	(*map)[*num_maps].data.mux.group = group;
+	(*map)[*num_maps].data.mux.function = function;
+	(*num_maps)++;
+
+	return 0;
+}
+
+static int add_map_configs(struct pinctrl_map **map, unsigned *reserved_maps,
+			   unsigned *num_maps, const char *group,
+			   unsigned long *configs, unsigned num_configs)
+{
+	unsigned long *dup_configs;
+
+	if (*num_maps == *reserved_maps)
+		return -ENOSPC;
+
+	dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
+			      GFP_KERNEL);
+	if (!dup_configs)
+		return -ENOMEM;
+
+	(*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+	(*map)[*num_maps].data.configs.group_or_pin = group;
+	(*map)[*num_maps].data.configs.configs = dup_configs;
+	(*map)[*num_maps].data.configs.num_configs = num_configs;
+	(*num_maps)++;
+
+	return 0;
+}
+
+static int add_config(unsigned long **configs, unsigned *num_configs,
+		      unsigned long config)
+{
+	unsigned old_num = *num_configs;
+	unsigned new_num = old_num + 1;
+	unsigned long *new_configs;
+
+	new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
+			       GFP_KERNEL);
+	if (!new_configs)
+		return -ENOMEM;
+
+	new_configs[old_num] = config;
+
+	*configs = new_configs;
+	*num_configs = new_num;
+
+	return 0;
+}
+
+void tegra_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
+			       struct pinctrl_map *map, unsigned num_maps)
+{
+	int i;
+
+	for (i = 0; i < num_maps; i++)
+		if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
+			kfree(map[i].data.configs.configs);
+
+	kfree(map);
+}
+
+static const struct cfg_param {
+	const char *property;
+	enum tegra_pinconf_param param;
+} cfg_params[] = {
+	{"nvidia,pull",			TEGRA_PINCONF_PARAM_PULL},
+	{"nvidia,tristate",		TEGRA_PINCONF_PARAM_TRISTATE},
+	{"nvidia,enable-input",		TEGRA_PINCONF_PARAM_ENABLE_INPUT},
+	{"nvidia,open-drain",		TEGRA_PINCONF_PARAM_OPEN_DRAIN},
+	{"nvidia,lock",			TEGRA_PINCONF_PARAM_LOCK},
+	{"nvidia,io-reset",		TEGRA_PINCONF_PARAM_IORESET},
+	{"nvidia,high-speed-mode",	TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE},
+	{"nvidia,schmitt",		TEGRA_PINCONF_PARAM_SCHMITT},
+	{"nvidia,low-power-mode",	TEGRA_PINCONF_PARAM_LOW_POWER_MODE},
+	{"nvidia,pull-down-strength",	TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH},
+	{"nvidia,pull-up-strength",	TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH},
+	{"nvidia,slew-rate-falling",	TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING},
+	{"nvidia,slew-rate-rising",	TEGRA_PINCONF_PARAM_SLEW_RATE_RISING},
+};
+
+int tegra_pinctrl_dt_subnode_to_map(struct device_node *np,
+				    struct pinctrl_map **map,
+				    unsigned *reserved_maps,
+				    unsigned *num_maps)
+{
+	int ret, i;
+	const char *function;
+	u32 val;
+	unsigned long config;
+	unsigned long *configs = NULL;
+	unsigned num_configs = 0;
+	unsigned reserve;
+	struct property *prop;
+	const char *group;
+
+	ret = of_property_read_string(np, "nvidia,function", &function);
+	if (ret < 0)
+		function = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
+		ret = of_property_read_u32(np, cfg_params[i].property, &val);
+		if (!ret) {
+			config = TEGRA_PINCONF_PACK(cfg_params[i].param, val);
+			ret = add_config(&configs, &num_configs, config);
+			if (ret < 0)
+				goto exit;
+		}
+	}
+
+	reserve = 0;
+	if (function != NULL)
+		reserve++;
+	if (num_configs)
+		reserve++;
+	ret = of_property_count_strings(np, "nvidia,pins");
+	if (ret < 0)
+		goto exit;
+	reserve *= ret;
+
+	ret = reserve_map(map, reserved_maps, num_maps, reserve);
+	if (ret < 0)
+		goto exit;
+
+	of_property_for_each_string(np, "nvidia,pins", prop, group) {
+		if (function) {
+			ret = add_map_mux(map, reserved_maps, num_maps,
+					  group, function);
+			if (ret < 0)
+				goto exit;
+		}
+
+		if (num_configs) {
+			ret = add_map_configs(map, reserved_maps, num_maps,
+					      group, configs, num_configs);
+			if (ret < 0)
+				goto exit;
+		}
+	}
+
+	ret = 0;
+
+exit:
+	kfree(configs);
+	return ret;
+}
+
+int tegra_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+				 struct device_node *np_config,
+				 struct pinctrl_map **map, unsigned *num_maps)
+{
+	unsigned reserved_maps;
+	struct device_node *np;
+	int ret;
+
+	reserved_maps = 0;
+	*map = NULL;
+	*num_maps = 0;
+
+	for_each_child_of_node(np_config, np) {
+		ret = tegra_pinctrl_dt_subnode_to_map(np, map, &reserved_maps,
+						      num_maps);
+		if (ret < 0) {
+			tegra_pinctrl_dt_free_map(pctldev, *map, *num_maps);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static struct pinctrl_ops tegra_pinctrl_ops = {
-	.list_groups = tegra_pinctrl_list_groups,
+	.get_groups_count = tegra_pinctrl_get_groups_count,
 	.get_group_name = tegra_pinctrl_get_group_name,
 	.get_group_pins = tegra_pinctrl_get_group_pins,
 	.pin_dbg_show = tegra_pinctrl_pin_dbg_show,
+	.dt_node_to_map = tegra_pinctrl_dt_node_to_map,
+	.dt_free_map = tegra_pinctrl_dt_free_map,
 };
 
-static int tegra_pinctrl_list_funcs(struct pinctrl_dev *pctldev,
-				    unsigned function)
+static int tegra_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
 {
 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 
-	if (function >= pmx->soc->nfunctions)
-		return -EINVAL;
-
-	return 0;
+	return pmx->soc->nfunctions;
 }
 
 static const char *tegra_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
@@ -121,9 +312,6 @@
 {
 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 
-	if (function >= pmx->soc->nfunctions)
-		return NULL;
-
 	return pmx->soc->functions[function].name;
 }
 
@@ -134,9 +322,6 @@
 {
 	struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 
-	if (function >= pmx->soc->nfunctions)
-		return -EINVAL;
-
 	*groups = pmx->soc->functions[function].groups;
 	*num_groups = pmx->soc->functions[function].ngroups;
 
@@ -151,8 +336,6 @@
 	int i;
 	u32 val;
 
-	if (group >= pmx->soc->ngroups)
-		return -EINVAL;
 	g = &pmx->soc->groups[group];
 
 	if (g->mux_reg < 0)
@@ -180,8 +363,6 @@
 	const struct tegra_pingroup *g;
 	u32 val;
 
-	if (group >= pmx->soc->ngroups)
-		return;
 	g = &pmx->soc->groups[group];
 
 	if (g->mux_reg < 0)
@@ -194,7 +375,7 @@
 }
 
 static struct pinmux_ops tegra_pinmux_ops = {
-	.list_functions = tegra_pinctrl_list_funcs,
+	.get_functions_count = tegra_pinctrl_get_funcs_count,
 	.get_function_name = tegra_pinctrl_get_func_name,
 	.get_function_groups = tegra_pinctrl_get_func_groups,
 	.enable = tegra_pinctrl_enable,
@@ -324,8 +505,6 @@
 	s16 reg;
 	u32 val, mask;
 
-	if (group >= pmx->soc->ngroups)
-		return -EINVAL;
 	g = &pmx->soc->groups[group];
 
 	ret = tegra_pinconf_reg(pmx, g, param, &bank, &reg, &bit, &width);
@@ -353,8 +532,6 @@
 	s16 reg;
 	u32 val, mask;
 
-	if (group >= pmx->soc->ngroups)
-		return -EINVAL;
 	g = &pmx->soc->groups[group];
 
 	ret = tegra_pinconf_reg(pmx, g, param, &bank, &reg, &bit, &width);
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 26eb8cc..05d0299 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -836,18 +836,14 @@
 	},
 };
 
-static int u300_list_groups(struct pinctrl_dev *pctldev, unsigned selector)
+static int u300_get_groups_count(struct pinctrl_dev *pctldev)
 {
-	if (selector >= ARRAY_SIZE(u300_pin_groups))
-		return -EINVAL;
-	return 0;
+	return ARRAY_SIZE(u300_pin_groups);
 }
 
 static const char *u300_get_group_name(struct pinctrl_dev *pctldev,
 				       unsigned selector)
 {
-	if (selector >= ARRAY_SIZE(u300_pin_groups))
-		return NULL;
 	return u300_pin_groups[selector].name;
 }
 
@@ -855,8 +851,6 @@
 			       const unsigned **pins,
 			       unsigned *num_pins)
 {
-	if (selector >= ARRAY_SIZE(u300_pin_groups))
-		return -EINVAL;
 	*pins = u300_pin_groups[selector].pins;
 	*num_pins = u300_pin_groups[selector].num_pins;
 	return 0;
@@ -869,7 +863,7 @@
 }
 
 static struct pinctrl_ops u300_pctrl_ops = {
-	.list_groups = u300_list_groups,
+	.get_groups_count = u300_get_groups_count,
 	.get_group_name = u300_get_group_name,
 	.get_group_pins = u300_get_group_pins,
 	.pin_dbg_show = u300_pin_dbg_show,
@@ -991,11 +985,9 @@
 	u300_pmx_endisable(upmx, selector, false);
 }
 
-static int u300_pmx_list_funcs(struct pinctrl_dev *pctldev, unsigned selector)
+static int u300_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
 {
-	if (selector >= ARRAY_SIZE(u300_pmx_functions))
-		return -EINVAL;
-	return 0;
+	return ARRAY_SIZE(u300_pmx_functions);
 }
 
 static const char *u300_pmx_get_func_name(struct pinctrl_dev *pctldev,
@@ -1014,7 +1006,7 @@
 }
 
 static struct pinmux_ops u300_pmx_ops = {
-	.list_functions = u300_pmx_list_funcs,
+	.get_functions_count = u300_pmx_get_funcs_count,
 	.get_function_name = u300_pmx_get_func_name,
 	.get_function_groups = u300_pmx_get_groups,
 	.enable = u300_pmx_enable,
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 4e62783..3d5ac73 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -33,22 +33,25 @@
 int pinmux_check_ops(struct pinctrl_dev *pctldev)
 {
 	const struct pinmux_ops *ops = pctldev->desc->pmxops;
+	unsigned nfuncs;
 	unsigned selector = 0;
 
 	/* Check that we implement required operations */
-	if (!ops->list_functions ||
+	if (!ops ||
+	    !ops->get_functions_count ||
 	    !ops->get_function_name ||
 	    !ops->get_function_groups ||
-	    !ops->enable ||
-	    !ops->disable)
+	    !ops->enable) {
+		dev_err(pctldev->dev, "pinmux ops lacks necessary functions\n");
 		return -EINVAL;
-
+	}
 	/* Check that all functions registered have names */
-	while (ops->list_functions(pctldev, selector) >= 0) {
+	nfuncs = ops->get_functions_count(pctldev);
+	while (selector < nfuncs) {
 		const char *fname = ops->get_function_name(pctldev,
 							   selector);
 		if (!fname) {
-			pr_err("pinmux ops has no name for function%u\n",
+			dev_err(pctldev->dev, "pinmux ops has no name for function%u\n",
 				selector);
 			return -EINVAL;
 		}
@@ -85,20 +88,23 @@
 	const struct pinmux_ops *ops = pctldev->desc->pmxops;
 	int status = -EINVAL;
 
-	dev_dbg(pctldev->dev, "request pin %d for %s\n", pin, owner);
-
 	desc = pin_desc_get(pctldev, pin);
 	if (desc == NULL) {
 		dev_err(pctldev->dev,
-			"pin is not registered so it cannot be requested\n");
+			"pin %d is not registered so it cannot be requested\n",
+			pin);
 		goto out;
 	}
 
+	dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n",
+		pin, desc->name, owner);
+
 	if (gpio_range) {
 		/* There's no need to support multiple GPIO requests */
 		if (desc->gpio_owner) {
 			dev_err(pctldev->dev,
-				"pin already requested\n");
+				"pin %s already requested by %s; cannot claim for %s\n",
+				desc->name, desc->gpio_owner, owner);
 			goto out;
 		}
 
@@ -106,7 +112,8 @@
 	} else {
 		if (desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
 			dev_err(pctldev->dev,
-				"pin already requested\n");
+				"pin %s already requested by %s; cannot claim for %s\n",
+				desc->name, desc->mux_owner, owner);
 			goto out;
 		}
 
@@ -139,8 +146,7 @@
 		status = 0;
 
 	if (status) {
-		dev_err(pctldev->dev, "->request on device %s failed for pin %d\n",
-		       pctldev->desc->name, pin);
+		dev_err(pctldev->dev, "request() failed for pin %d\n", pin);
 		module_put(pctldev->owner);
 	}
 
@@ -157,7 +163,7 @@
 out:
 	if (status)
 		dev_err(pctldev->dev, "pin-%d (%s) status %d\n",
-		       pin, owner, status);
+			pin, owner, status);
 
 	return status;
 }
@@ -287,10 +293,11 @@
 					const char *function)
 {
 	const struct pinmux_ops *ops = pctldev->desc->pmxops;
+	unsigned nfuncs = ops->get_functions_count(pctldev);
 	unsigned selector = 0;
 
 	/* See if this pctldev has this function */
-	while (ops->list_functions(pctldev, selector) >= 0) {
+	while (selector < nfuncs) {
 		const char *fname = ops->get_function_name(pctldev,
 							   selector);
 
@@ -319,18 +326,32 @@
 	const unsigned *pins;
 	unsigned num_pins;
 
-	setting->data.mux.func =
-		pinmux_func_name_to_selector(pctldev, map->data.mux.function);
-	if (setting->data.mux.func < 0)
-		return setting->data.mux.func;
+	if (!pmxops) {
+		dev_err(pctldev->dev, "does not support mux function\n");
+		return -EINVAL;
+	}
+
+	ret = pinmux_func_name_to_selector(pctldev, map->data.mux.function);
+	if (ret < 0) {
+		dev_err(pctldev->dev, "invalid function %s in map table\n",
+			map->data.mux.function);
+		return ret;
+	}
+	setting->data.mux.func = ret;
 
 	ret = pmxops->get_function_groups(pctldev, setting->data.mux.func,
 					  &groups, &num_groups);
-	if (ret < 0)
+	if (ret < 0) {
+		dev_err(pctldev->dev, "can't query groups for function %s\n",
+			map->data.mux.function);
 		return ret;
-	if (!num_groups)
+	}
+	if (!num_groups) {
+		dev_err(pctldev->dev,
+			"function %s can't be selected on any group\n",
+			map->data.mux.function);
 		return -EINVAL;
-
+	}
 	if (map->data.mux.group) {
 		bool found = false;
 		group = map->data.mux.group;
@@ -340,15 +361,23 @@
 				break;
 			}
 		}
-		if (!found)
+		if (!found) {
+			dev_err(pctldev->dev,
+				"invalid group \"%s\" for function \"%s\"\n",
+				group, map->data.mux.function);
 			return -EINVAL;
+		}
 	} else {
 		group = groups[0];
 	}
 
-	setting->data.mux.group = pinctrl_get_group_selector(pctldev, group);
-	if (setting->data.mux.group < 0)
-		return setting->data.mux.group;
+	ret = pinctrl_get_group_selector(pctldev, group);
+	if (ret < 0) {
+		dev_err(pctldev->dev, "invalid group %s in map table\n",
+			map->data.mux.group);
+		return ret;
+	}
+	setting->data.mux.group = ret;
 
 	ret = pctlops->get_group_pins(pctldev, setting->data.mux.group, &pins,
 				      &num_pins);
@@ -364,7 +393,7 @@
 		ret = pin_request(pctldev, pins[i], map->dev_name, NULL);
 		if (ret) {
 			dev_err(pctldev->dev,
-				"could not get request pin %d on device %s\n",
+				"could not request pin %d on device %s\n",
 				pins[i], pinctrl_dev_get_name(pctldev));
 			/* On error release all taken pins */
 			i--; /* this pin just failed */
@@ -467,7 +496,8 @@
 		desc->mux_setting = NULL;
 	}
 
-	ops->disable(pctldev, setting->data.mux.func, setting->data.mux.group);
+	if (ops->disable)
+		ops->disable(pctldev, setting->data.mux.func, setting->data.mux.group);
 }
 
 #ifdef CONFIG_DEBUG_FS
@@ -477,11 +507,15 @@
 {
 	struct pinctrl_dev *pctldev = s->private;
 	const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
+	unsigned nfuncs;
 	unsigned func_selector = 0;
 
-	mutex_lock(&pinctrl_mutex);
+	if (!pmxops)
+		return 0;
 
-	while (pmxops->list_functions(pctldev, func_selector) >= 0) {
+	mutex_lock(&pinctrl_mutex);
+	nfuncs = pmxops->get_functions_count(pctldev);
+	while (func_selector < nfuncs) {
 		const char *func = pmxops->get_function_name(pctldev,
 							  func_selector);
 		const char * const *groups;
@@ -515,6 +549,9 @@
 	const struct pinmux_ops *pmxops = pctldev->desc->pmxops;
 	unsigned i, pin;
 
+	if (!pmxops)
+		return 0;
+
 	seq_puts(s, "Pinmux settings per pin\n");
 	seq_puts(s, "Format: pin (name): mux_owner gpio_owner hog?\n");
 
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index 6fc4700..d1a98b1c 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -31,12 +31,6 @@
 int pinmux_enable_setting(struct pinctrl_setting const *setting);
 void pinmux_disable_setting(struct pinctrl_setting const *setting);
 
-void pinmux_show_map(struct seq_file *s, struct pinctrl_map const *map);
-void pinmux_show_setting(struct seq_file *s,
-			 struct pinctrl_setting const *setting);
-void pinmux_init_device_debugfs(struct dentry *devroot,
-				struct pinctrl_dev *pctldev);
-
 #else
 
 static inline int pinmux_check_ops(struct pinctrl_dev *pctldev)
@@ -89,6 +83,18 @@
 {
 }
 
+#endif
+
+#if defined(CONFIG_PINMUX) && defined(CONFIG_DEBUG_FS)
+
+void pinmux_show_map(struct seq_file *s, struct pinctrl_map const *map);
+void pinmux_show_setting(struct seq_file *s,
+			 struct pinctrl_setting const *setting);
+void pinmux_init_device_debugfs(struct dentry *devroot,
+				struct pinctrl_dev *pctldev);
+
+#else
+
 static inline void pinmux_show_map(struct seq_file *s,
 				   struct pinctrl_map const *map)
 {
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index bc8384c..639db4d 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -50,7 +50,7 @@
  */
 #undef START_IN_KERNEL_MODE
 
-#define DRV_VER "0.5.24"
+#define DRV_VER "0.5.26"
 
 /*
  * According to the Atom N270 datasheet,
@@ -83,8 +83,8 @@
 #endif
 
 static unsigned int interval = 10;
-static unsigned int fanon = 63000;
-static unsigned int fanoff = 58000;
+static unsigned int fanon = 60000;
+static unsigned int fanoff = 53000;
 static unsigned int verbose;
 static unsigned int fanstate = ACERHDF_FAN_AUTO;
 static char force_bios[16];
@@ -150,6 +150,8 @@
 	{"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
 	{"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
 	{"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
+	/* LT1005u */
+	{"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
 	/* Acer 1410 */
 	{"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
 	{"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
@@ -161,6 +163,7 @@
 	{"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
 	{"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
 	{"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
+	{"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
 	/* Acer 1810xx */
 	{"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
 	{"Acer", "Aspire 1810T",  "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
@@ -183,29 +186,44 @@
 	{"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
 	{"Acer", "Aspire 1810T",  "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
 	{"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
+	{"Acer", "Aspire 1810T",  "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
 	/* Acer 531 */
+	{"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00} },
 	{"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
+	{"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
+	/* Acer 751 */
+	{"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00} },
+	/* Acer 1825 */
+	{"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
+	{"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
+	/* Acer TravelMate 7730 */
+	{"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00} },
 	/* Gateway */
-	{"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
-	{"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
-	{"Gateway", "LT31",   "v1.3103", 0x55, 0x58, {0x9e, 0x00} },
-	{"Gateway", "LT31",   "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
-	{"Gateway", "LT31",   "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
+	{"Gateway", "AOA110", "v0.3103",  0x55, 0x58, {0x21, 0x00} },
+	{"Gateway", "AOA150", "v0.3103",  0x55, 0x58, {0x20, 0x00} },
+	{"Gateway", "LT31",   "v1.3103",  0x55, 0x58, {0x9e, 0x00} },
+	{"Gateway", "LT31",   "v1.3201",  0x55, 0x58, {0x9e, 0x00} },
+	{"Gateway", "LT31",   "v1.3302",  0x55, 0x58, {0x9e, 0x00} },
+	{"Gateway", "LT31",   "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
 	/* Packard Bell */
-	{"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
-	{"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
-	{"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
-	{"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
-	{"Packard Bell", "DOTMU",  "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMU",  "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMU",  "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMU",  "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMU",  "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMU",  "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMU",  "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMU",  "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMA",  "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
-	{"Packard Bell", "DOTMA",  "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOA150",  "v0.3104",  0x55, 0x58, {0x21, 0x00} },
+	{"Packard Bell", "DOA150",  "v0.3105",  0x55, 0x58, {0x20, 0x00} },
+	{"Packard Bell", "AOA110",  "v0.3105",  0x55, 0x58, {0x21, 0x00} },
+	{"Packard Bell", "AOA150",  "v0.3105",  0x55, 0x58, {0x20, 0x00} },
+	{"Packard Bell", "ENBFT",   "V1.3118",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "ENBFT",   "V1.3127",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMU",   "v1.3303",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMU",   "v0.3120",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMU",   "v0.3108",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMU",   "v0.3113",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMU",   "v0.3115",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMU",   "v0.3117",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMU",   "v0.3119",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMU",   "v1.3204",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMA",   "v1.3201",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMA",   "v1.3302",  0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTMA",   "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
+	{"Packard Bell", "DOTVR46", "v1.3308",  0x55, 0x58, {0x9e, 0x00} },
 	/* pewpew-terminator */
 	{"", "", "", 0, 0, {0, 0} }
 };
@@ -701,15 +719,20 @@
 MODULE_AUTHOR("Peter Feuerer");
 MODULE_DESCRIPTION("Aspire One temperature and fan driver");
 MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
 MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
 MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:");
 MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
+MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
 MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
 MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
 MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
 MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
 MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:");
 MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:");
 
 module_init(acerhdf_init);
 module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index a05fc9c..e6c08ee 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -212,6 +212,7 @@
 		},
 		.driver_data = &quirk_dell_vostro_v130,
 	},
+	{ }
 };
 
 static struct calling_interface_buffer *buffer;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index f7ba316e..0ffdb3c 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1565,7 +1565,7 @@
 		ips->poll_turbo_status = true;
 
 	if (!ips_get_i915_syms(ips)) {
-		dev_err(&dev->dev, "failed to get i915 symbols, graphics turbo disabled\n");
+		dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n");
 		ips->gpu_turbo_enabled = false;
 	} else {
 		dev_dbg(&dev->dev, "graphics turbo enabled\n");
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 0a3594c..bcbad84 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -78,7 +78,7 @@
 
 	input_set_capability(input, EV_KEY, KEY_POWER);
 
-	error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+	error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
 			DRIVER_NAME, input);
 	if (error) {
 		dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index cd9bc3b..5648dad 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -78,9 +78,13 @@
 	depends on PCH_GBE
 	help
 	  This driver adds support for using the PCH EG20T as a PTP
-	  clock. This clock is only useful if your PTP programs are
-	  getting hardware time stamps on the PTP Ethernet packets
-	  using the SO_TIMESTAMPING API.
+	  clock. The hardware supports time stamping of PTP packets
+	  when using the end-to-end delay (E2E) mechansim. The peer
+	  delay mechansim (P2P) is not supported.
+
+	  This clock is only useful if your PTP programs are getting
+	  hardware time stamps on the PTP Ethernet packets using the
+	  SO_TIMESTAMPING API.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called ptp_pch.
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index f519a13..1e528b5 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -304,6 +304,12 @@
 }
 EXPORT_SYMBOL(ptp_clock_event);
 
+int ptp_clock_index(struct ptp_clock *ptp)
+{
+	return ptp->index;
+}
+EXPORT_SYMBOL(ptp_clock_index);
+
 /* module operations */
 
 static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 6f2782b..e03c406 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -284,6 +284,7 @@
 {
 	free_irq(MASTER_IRQ, &ixp_clock);
 	free_irq(SLAVE_IRQ, &ixp_clock);
+	ixp46x_phc_index = -1;
 	ptp_clock_unregister(ixp_clock.ptp_clock);
 }
 
@@ -302,6 +303,8 @@
 	if (IS_ERR(ixp_clock.ptp_clock))
 		return PTR_ERR(ixp_clock.ptp_clock);
 
+	ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock);
+
 	__raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend);
 	__raw_writel(1, &ixp_clock.regs->trgt_lo);
 	__raw_writel(0, &ixp_clock.regs->trgt_hi);
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 375eb04..3a9c17e 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
 
 #define STATION_ADDR_LEN	20
 #define PCI_DEVICE_ID_PCH_1588	0x8819
@@ -261,6 +262,7 @@
 
 	ns = ((u64) hi) << 32;
 	ns |= lo;
+	ns <<= TICKS_NS_SHIFT;
 
 	return ns;
 }
@@ -277,6 +279,7 @@
 
 	ns = ((u64) hi) << 32;
 	ns |= lo;
+	ns <<= TICKS_NS_SHIFT;
 
 	return ns;
 }
@@ -306,7 +309,7 @@
  *				    traffic on the  ethernet interface
  * @addr:	dress which contain the column separated address to be used.
  */
-static int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
 {
 	s32 i;
 	struct pch_dev *chip = pci_get_drvdata(pdev);
@@ -350,6 +353,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(pch_set_station_address);
 
 /*
  * Interrupt service routine
@@ -649,8 +653,6 @@
 	iowrite32(1, &chip->regs->trgt_lo);
 	iowrite32(0, &chip->regs->trgt_hi);
 	iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
-	/* Version: IEEE1588 v1 and IEEE1588-2008,  Mode: All Evwnt, Locked  */
-	iowrite32(0x80020000, &chip->regs->ch_control);
 
 	pch_eth_enable_set(chip);
 
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 28b81ae4..c3482b9 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -27,13 +27,8 @@
 	unsigned int	*vol_table;
 	unsigned int	*vol_suspend;
 
-	int	vol_reg;
-	int	vol_shift;
-	int	vol_nbits;
 	int	update_reg;
 	int	update_bit;
-	int	enable_reg;
-	int	enable_bit;
 	int	slope_double;
 };
 
@@ -216,7 +211,7 @@
 	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
 	int ret = -EINVAL;
 
-	if (info->vol_table && (index < (1 << info->vol_nbits))) {
+	if (info->vol_table && (index < rdev->desc->n_voltages)) {
 		ret = info->vol_table[index];
 		if (info->slope_double)
 			ret <<= 1;
@@ -224,51 +219,16 @@
 	return ret;
 }
 
-static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int pm8607_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
 {
 	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-	int i, ret = -ENOENT;
-
-	if (info->slope_double) {
-		min_uV = min_uV >> 1;
-		max_uV = max_uV >> 1;
-	}
-	if (info->vol_table) {
-		for (i = 0; i < (1 << info->vol_nbits); i++) {
-			if (!info->vol_table[i])
-				break;
-			if ((min_uV <= info->vol_table[i])
-				&& (max_uV >= info->vol_table[i])) {
-				ret = i;
-				break;
-			}
-		}
-	}
-	if (ret < 0)
-		pr_err("invalid voltage range (%d %d) uV\n", min_uV, max_uV);
-	return ret;
-}
-
-static int pm8607_set_voltage(struct regulator_dev *rdev,
-			      int min_uV, int max_uV, unsigned *selector)
-{
-	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-	uint8_t val, mask;
+	uint8_t val;
 	int ret;
 
-	if (min_uV > max_uV) {
-		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
-		return -EINVAL;
-	}
+	val = (uint8_t)(selector << (ffs(rdev->desc->vsel_mask) - 1));
 
-	ret = choose_voltage(rdev, min_uV, max_uV);
-	if (ret < 0)
-		return -EINVAL;
-	*selector = ret;
-	val = (uint8_t)(ret << info->vol_shift);
-	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
-
-	ret = pm860x_set_bits(info->i2c, info->vol_reg, mask, val);
+	ret = pm860x_set_bits(info->i2c, rdev->desc->vsel_reg,
+			      rdev->desc->vsel_mask, val);
 	if (ret)
 		return ret;
 	switch (info->desc.id) {
@@ -282,60 +242,16 @@
 	return ret;
 }
 
-static int pm8607_get_voltage(struct regulator_dev *rdev)
-{
-	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-	uint8_t val, mask;
-	int ret;
-
-	ret = pm860x_reg_read(info->i2c, info->vol_reg);
-	if (ret < 0)
-		return ret;
-
-	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
-	val = ((unsigned char)ret & mask) >> info->vol_shift;
-
-	return pm8607_list_voltage(rdev, val);
-}
-
-static int pm8607_enable(struct regulator_dev *rdev)
-{
-	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-
-	return pm860x_set_bits(info->i2c, info->enable_reg,
-			       1 << info->enable_bit,
-			       1 << info->enable_bit);
-}
-
-static int pm8607_disable(struct regulator_dev *rdev)
-{
-	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-
-	return pm860x_set_bits(info->i2c, info->enable_reg,
-			       1 << info->enable_bit, 0);
-}
-
-static int pm8607_is_enabled(struct regulator_dev *rdev)
-{
-	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-	int ret;
-
-	ret = pm860x_reg_read(info->i2c, info->enable_reg);
-	if (ret < 0)
-		return ret;
-
-	return !!((unsigned char)ret & (1 << info->enable_bit));
-}
-
 static struct regulator_ops pm8607_regulator_ops = {
-	.set_voltage	= pm8607_set_voltage,
-	.get_voltage	= pm8607_get_voltage,
-	.enable		= pm8607_enable,
-	.disable	= pm8607_disable,
-	.is_enabled	= pm8607_is_enabled,
+	.list_voltage	= pm8607_list_voltage,
+	.set_voltage_sel = pm8607_set_voltage_sel,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
 };
 
-#define PM8607_DVC(vreg, nbits, ureg, ubit, ereg, ebit)			\
+#define PM8607_DVC(vreg, ureg, ubit, ereg, ebit)			\
 {									\
 	.desc	= {							\
 		.name	= #vreg,					\
@@ -343,20 +259,20 @@
 		.type	= REGULATOR_VOLTAGE,				\
 		.id	= PM8607_ID_##vreg,				\
 		.owner	= THIS_MODULE,					\
+		.n_voltages = ARRAY_SIZE(vreg##_table),			\
+		.vsel_reg = PM8607_##vreg,				\
+		.vsel_mask = ARRAY_SIZE(vreg##_table) - 1,		\
+		.enable_reg = PM8607_##ereg,				\
+		.enable_mask = 1 << (ebit),				\
 	},								\
-	.vol_reg	= PM8607_##vreg,				\
-	.vol_shift	= (0),						\
-	.vol_nbits	= (nbits),					\
 	.update_reg	= PM8607_##ureg,				\
 	.update_bit	= (ubit),					\
-	.enable_reg	= PM8607_##ereg,				\
-	.enable_bit	= (ebit),					\
 	.slope_double	= (0),						\
 	.vol_table	= (unsigned int *)&vreg##_table,		\
 	.vol_suspend	= (unsigned int *)&vreg##_suspend_table,	\
 }
 
-#define PM8607_LDO(_id, vreg, shift, nbits, ereg, ebit)			\
+#define PM8607_LDO(_id, vreg, shift, ereg, ebit)			\
 {									\
 	.desc	= {							\
 		.name	= "LDO" #_id,					\
@@ -364,35 +280,35 @@
 		.type	= REGULATOR_VOLTAGE,				\
 		.id	= PM8607_ID_LDO##_id,				\
 		.owner	= THIS_MODULE,					\
+		.n_voltages = ARRAY_SIZE(LDO##_id##_table),		\
+		.vsel_reg = PM8607_##vreg,				\
+		.vsel_mask = (ARRAY_SIZE(LDO##_id##_table) - 1) << (shift), \
+		.enable_reg = PM8607_##ereg,				\
+		.enable_mask = 1 << (ebit),				\
 	},								\
-	.vol_reg	= PM8607_##vreg,				\
-	.vol_shift	= (shift),					\
-	.vol_nbits	= (nbits),					\
-	.enable_reg	= PM8607_##ereg,				\
-	.enable_bit	= (ebit),					\
 	.slope_double	= (0),						\
 	.vol_table	= (unsigned int *)&LDO##_id##_table,		\
 	.vol_suspend	= (unsigned int *)&LDO##_id##_suspend_table,	\
 }
 
 static struct pm8607_regulator_info pm8607_regulator_info[] = {
-	PM8607_DVC(BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
-	PM8607_DVC(BUCK2, 6, GO, 1, SUPPLIES_EN11, 1),
-	PM8607_DVC(BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
+	PM8607_DVC(BUCK1, GO, 0, SUPPLIES_EN11, 0),
+	PM8607_DVC(BUCK2, GO, 1, SUPPLIES_EN11, 1),
+	PM8607_DVC(BUCK3, GO, 2, SUPPLIES_EN11, 2),
 
-	PM8607_LDO( 1,         LDO1, 0, 2, SUPPLIES_EN11, 3),
-	PM8607_LDO( 2,         LDO2, 0, 3, SUPPLIES_EN11, 4),
-	PM8607_LDO( 3,         LDO3, 0, 3, SUPPLIES_EN11, 5),
-	PM8607_LDO( 4,         LDO4, 0, 3, SUPPLIES_EN11, 6),
-	PM8607_LDO( 5,         LDO5, 0, 2, SUPPLIES_EN11, 7),
-	PM8607_LDO( 6,         LDO6, 0, 3, SUPPLIES_EN12, 0),
-	PM8607_LDO( 7,         LDO7, 0, 3, SUPPLIES_EN12, 1),
-	PM8607_LDO( 8,         LDO8, 0, 3, SUPPLIES_EN12, 2),
-	PM8607_LDO( 9,         LDO9, 0, 3, SUPPLIES_EN12, 3),
-	PM8607_LDO(10,        LDO10, 0, 4, SUPPLIES_EN12, 4),
-	PM8607_LDO(12,        LDO12, 0, 4, SUPPLIES_EN12, 5),
-	PM8607_LDO(13, VIBRATOR_SET, 1, 3,  VIBRATOR_SET, 0),
-	PM8607_LDO(14,        LDO14, 0, 3, SUPPLIES_EN12, 6),
+	PM8607_LDO(1,         LDO1, 0, SUPPLIES_EN11, 3),
+	PM8607_LDO(2,         LDO2, 0, SUPPLIES_EN11, 4),
+	PM8607_LDO(3,         LDO3, 0, SUPPLIES_EN11, 5),
+	PM8607_LDO(4,         LDO4, 0, SUPPLIES_EN11, 6),
+	PM8607_LDO(5,         LDO5, 0, SUPPLIES_EN11, 7),
+	PM8607_LDO(6,         LDO6, 0, SUPPLIES_EN12, 0),
+	PM8607_LDO(7,         LDO7, 0, SUPPLIES_EN12, 1),
+	PM8607_LDO(8,         LDO8, 0, SUPPLIES_EN12, 2),
+	PM8607_LDO(9,         LDO9, 0, SUPPLIES_EN12, 3),
+	PM8607_LDO(10,        LDO10, 0, SUPPLIES_EN12, 4),
+	PM8607_LDO(12,        LDO12, 0, SUPPLIES_EN12, 5),
+	PM8607_LDO(13, VIBRATOR_SET, 1, VIBRATOR_SET, 0),
+	PM8607_LDO(14,        LDO14, 0, SUPPLIES_EN12, 6),
 };
 
 static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
@@ -400,6 +316,7 @@
 	struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
 	struct pm8607_regulator_info *info = NULL;
 	struct regulator_init_data *pdata = pdev->dev.platform_data;
+	struct regulator_config config = { };
 	struct resource *res;
 	int i;
 
@@ -425,9 +342,17 @@
 	if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double)
 		info->slope_double = 1;
 
+	config.dev = &pdev->dev;
+	config.init_data = pdata;
+	config.driver_data = info;
+
+	if (chip->id == CHIP_PM8607)
+		config.regmap = chip->regmap;
+	else
+		config.regmap = chip->regmap_companion;
+
 	/* replace driver_data with info */
-	info->regulator = regulator_register(&info->desc, &pdev->dev,
-					     pdata, info, NULL);
+	info->regulator = regulator_register(&info->desc, &config);
 	if (IS_ERR(info->regulator)) {
 		dev_err(&pdev->dev, "failed to register regulator %s\n",
 			info->desc.name);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 36db5a4..c86b886 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -223,6 +223,16 @@
 	 Say Y here to support the voltage regulators and convertors
 	 on PCF50633
 
+config REGULATOR_RC5T583
+	tristate "RICOH RC5T583 Power regulators"
+	depends on MFD_RC5T583
+	help
+	  Select this option to enable the power regulator of RICOH
+	  PMIC RC5T583.
+	  This driver supports the control of different power rails of device
+	  through regulator interface. The device supports multiple DCDC/LDO
+	  outputs which can be controlled by i2c communication.
+
 config REGULATOR_S5M8767
 	tristate "Samsung S5M8767A voltage regulator"
 	depends on MFD_S5M_CORE
@@ -258,6 +268,18 @@
 	  This driver supports the voltage domain regulators controlled by the
 	  DB8500 PRCMU
 
+config REGULATOR_PALMAS
+	tristate "TI Palmas PMIC Regulators"
+	depends on MFD_PALMAS
+	help
+	  If you wish to control the regulators on the Palmas series of
+	  chips say Y here. This will enable support for all the software
+	  controllable SMPS/LDO regulators.
+
+	  The regulators available on Palmas series chips vary depending
+	  on the muxing. This is handled automatically in the driver by
+	  reading the mux info from OTP.
+
 config REGULATOR_TPS6105X
 	tristate "TI TPS6105X Power regulators"
 	depends on TPS6105X
@@ -268,11 +290,11 @@
 	  audio amplifiers.
 
 config REGULATOR_TPS62360
-	tristate "TI TPS62360 Power Regulator"
+	tristate "TI TPS6236x Power Regulator"
 	depends on I2C
 	select REGMAP_I2C
 	help
-	  This driver supports TPS62360 voltage regulator chip. This
+	  This driver supports TPS6236x voltage regulator chip. This
 	  regulator is meant for processor core supply. This chip is
 	  high-frequency synchronous step down dc-dc converter optimized
 	  for battery-powered portable applications.
@@ -294,6 +316,13 @@
 	  three step-down converters and two general-purpose LDO voltage regulators.
 	  It supports TI's software based Class-2 SmartReflex implementation.
 
+config REGULATOR_TPS65090
+	tristate "TI TPS65090 Power regulator"
+	depends on MFD_TPS65090
+	help
+	  This driver provides support for the voltage regulators on the
+	  TI TPS65090 PMIC.
+
 config REGULATOR_TPS65217
 	tristate "TI TPS65217 Power regulators"
 	depends on MFD_TPS65217
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 94b5274..977fd46 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -9,7 +9,6 @@
 obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
 obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
 
-obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
 obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
 obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
 obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
@@ -20,6 +19,7 @@
 obj-$(CONFIG_REGULATOR_DA9052)	+= da9052-regulator.o
 obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
 obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
+obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
 obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
 obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
 obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
@@ -33,13 +33,16 @@
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
+obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
+obj-$(CONFIG_REGULATOR_RC5T583)  += rc5t583-regulator.o
 obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
 obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
 obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
 obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65090) += tps65090-regulator.o
 obj-$(CONFIG_REGULATOR_TPS65217) += tps65217-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 9ed5c5d..06776ca 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
@@ -178,6 +177,7 @@
 static int aat2870_regulator_probe(struct platform_device *pdev)
 {
 	struct aat2870_regulator *ri;
+	struct regulator_config config = { 0 };
 	struct regulator_dev *rdev;
 
 	ri = aat2870_get_regulator(pdev->id);
@@ -187,8 +187,11 @@
 	}
 	ri->aat2870 = dev_get_drvdata(pdev->dev.parent);
 
-	rdev = regulator_register(&ri->desc, &pdev->dev,
-				  pdev->dev.platform_data, ri, NULL);
+	config.dev = &pdev->dev;
+	config.driver_data = ri;
+	config.init_data = pdev->dev.platform_data;
+
+	rdev = regulator_register(&ri->desc, &config);
 	if (IS_ERR(rdev)) {
 		dev_err(&pdev->dev, "Failed to register regulator %s\n",
 			ri->desc.name);
@@ -231,3 +234,4 @@
 MODULE_DESCRIPTION("AnalogicTech AAT2870 Regulator");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
+MODULE_ALIAS("platform:aat2870-regulator");
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 042271a..03f4d9c 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -13,7 +13,6 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/err.h>
-#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/mfd/abx500.h>
@@ -305,53 +304,12 @@
 	return abreg->typ_voltages[regval];
 }
 
-static int ab3100_get_best_voltage_index(struct regulator_dev *reg,
-				   int min_uV, int max_uV)
-{
-	struct ab3100_regulator *abreg = reg->reg_data;
-	int i;
-	int bestmatch;
-	int bestindex;
-
-	/*
-	 * Locate the minimum voltage fitting the criteria on
-	 * this regulator. The switchable voltages are not
-	 * in strict falling order so we need to check them
-	 * all for the best match.
-	 */
-	bestmatch = INT_MAX;
-	bestindex = -1;
-	for (i = 0; i < abreg->voltages_len; i++) {
-		if (abreg->typ_voltages[i] <= max_uV &&
-		    abreg->typ_voltages[i] >= min_uV &&
-		    abreg->typ_voltages[i] < bestmatch) {
-			bestmatch = abreg->typ_voltages[i];
-			bestindex = i;
-		}
-	}
-
-	if (bestindex < 0) {
-		dev_warn(&reg->dev, "requested %d<=x<=%d uV, out of range!\n",
-			 min_uV, max_uV);
-		return -EINVAL;
-	}
-	return bestindex;
-}
-
-static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
-					int min_uV, int max_uV,
-					unsigned *selector)
+static int ab3100_set_voltage_regulator_sel(struct regulator_dev *reg,
+					    unsigned selector)
 {
 	struct ab3100_regulator *abreg = reg->reg_data;
 	u8 regval;
 	int err;
-	int bestindex;
-
-	bestindex = ab3100_get_best_voltage_index(reg, min_uV, max_uV);
-	if (bestindex < 0)
-		return bestindex;
-
-	*selector = bestindex;
 
 	err = abx500_get_register_interruptible(abreg->dev, 0,
 						abreg->regreg, &regval);
@@ -364,7 +322,7 @@
 
 	/* The highest three bits control the variable regulators */
 	regval &= ~0xE0;
-	regval |= (bestindex << 5);
+	regval |= (selector << 5);
 
 	err = abx500_set_register_interruptible(abreg->dev, 0,
 						abreg->regreg, regval);
@@ -392,7 +350,7 @@
 		return -EINVAL;
 
 	/* LDO E and BUCK have special suspend voltages you can set */
-	bestindex = ab3100_get_best_voltage_index(reg, uV, uV);
+	bestindex = regulator_map_voltage_iterate(reg, uV, uV);
 
 	err = abx500_get_register_interruptible(abreg->dev, 0,
 						targetreg, &regval);
@@ -464,7 +422,7 @@
 	.disable     = ab3100_disable_regulator,
 	.is_enabled  = ab3100_is_enabled_regulator,
 	.get_voltage = ab3100_get_voltage_regulator,
-	.set_voltage = ab3100_set_voltage_regulator,
+	.set_voltage_sel = ab3100_set_voltage_regulator_sel,
 	.list_voltage = ab3100_list_voltage_regulator,
 	.enable_time = ab3100_enable_time_regulator,
 };
@@ -474,7 +432,7 @@
 	.disable     = ab3100_disable_regulator,
 	.is_enabled  = ab3100_is_enabled_regulator,
 	.get_voltage = ab3100_get_voltage_regulator,
-	.set_voltage = ab3100_set_voltage_regulator,
+	.set_voltage_sel = ab3100_set_voltage_regulator_sel,
 	.set_suspend_voltage = ab3100_set_suspend_voltage_regulator,
 	.list_voltage = ab3100_list_voltage_regulator,
 	.enable_time = ab3100_enable_time_regulator,
@@ -582,6 +540,7 @@
 static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
 {
 	struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
+	struct regulator_config config = { };
 	int err = 0;
 	u8 data;
 	int i;
@@ -627,15 +586,15 @@
 		reg->dev = &pdev->dev;
 		reg->plfdata = plfdata;
 
+		config.dev = &pdev->dev;
+		config.driver_data = reg;
+		config.init_data = &plfdata->reg_constraints[i];
+
 		/*
 		 * Register the regulator, pass around
 		 * the ab3100_regulator struct
 		 */
-		rdev = regulator_register(&ab3100_regulator_desc[i],
-					  &pdev->dev,
-					  &plfdata->reg_constraints[i],
-					  reg, NULL);
-
+		rdev = regulator_register(&ab3100_regulator_desc[i], &config);
 		if (IS_ERR(rdev)) {
 			err = PTR_ERR(rdev);
 			dev_err(&pdev->dev,
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index c7ee4c1..e1b8c54 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -18,9 +18,12 @@
 #include <linux/platform_device.h>
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/ab8500.h>
+#include <linux/slab.h>
 
 /**
  * struct ab8500_regulator_info - ab8500 regulator information
@@ -234,25 +237,8 @@
 		return val;
 }
 
-static int ab8500_get_best_voltage_index(struct regulator_dev *rdev,
-		int min_uV, int max_uV)
-{
-	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-	int i;
-
-	/* check the supported voltage */
-	for (i = 0; i < info->voltages_len; i++) {
-		if ((info->voltages[i] >= min_uV) &&
-		    (info->voltages[i] <= max_uV))
-			return i;
-	}
-
-	return -EINVAL;
-}
-
-static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
-					int min_uV, int max_uV,
-					unsigned *selector)
+static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
+					    unsigned selector)
 {
 	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
@@ -263,18 +249,8 @@
 		return -EINVAL;
 	}
 
-	/* get the appropriate voltages within the range */
-	ret = ab8500_get_best_voltage_index(rdev, min_uV, max_uV);
-	if (ret < 0) {
-		dev_err(rdev_get_dev(rdev),
-				"couldn't get best voltage for regulator\n");
-		return ret;
-	}
-
-	*selector = ret;
-
 	/* set the registers for the request */
-	regval = (u8)ret;
+	regval = (u8)selector;
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
 			info->voltage_bank, info->voltage_reg,
 			info->voltage_mask, regval);
@@ -319,7 +295,7 @@
 	.disable	= ab8500_regulator_disable,
 	.is_enabled	= ab8500_regulator_is_enabled,
 	.get_voltage_sel = ab8500_regulator_get_voltage_sel,
-	.set_voltage	= ab8500_regulator_set_voltage,
+	.set_voltage_sel = ab8500_regulator_set_voltage_sel,
 	.list_voltage	= ab8500_list_voltage,
 	.enable_time	= ab8500_regulator_enable_time,
 	.set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
@@ -735,12 +711,139 @@
 	REG_INIT(AB8500_REGUCTRLDISCH2,		0x04, 0x44, 0x16),
 };
 
+static __devinit int
+ab8500_regulator_init_registers(struct platform_device *pdev, int id, int value)
+{
+	int err;
+
+	if (value & ~ab8500_reg_init[id].mask) {
+		dev_err(&pdev->dev,
+			"Configuration error: value outside mask.\n");
+		return -EINVAL;
+	}
+
+	err = abx500_mask_and_set_register_interruptible(
+		&pdev->dev,
+		ab8500_reg_init[id].bank,
+		ab8500_reg_init[id].addr,
+		ab8500_reg_init[id].mask,
+		value);
+	if (err < 0) {
+		dev_err(&pdev->dev,
+			"Failed to initialize 0x%02x, 0x%02x.\n",
+			ab8500_reg_init[id].bank,
+			ab8500_reg_init[id].addr);
+		return err;
+	}
+
+	dev_vdbg(&pdev->dev,
+		"init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+		ab8500_reg_init[id].bank,
+		ab8500_reg_init[id].addr,
+		ab8500_reg_init[id].mask,
+		value);
+
+	return 0;
+}
+
+static __devinit int ab8500_regulator_register(struct platform_device *pdev,
+					struct regulator_init_data *init_data,
+					int id,
+					struct device_node *np)
+{
+	struct ab8500_regulator_info *info = NULL;
+	struct regulator_config config = { };
+	int err;
+
+	/* assign per-regulator data */
+	info = &ab8500_regulator_info[id];
+	info->dev = &pdev->dev;
+
+	config.dev = &pdev->dev;
+	config.init_data = init_data;
+	config.driver_data = info;
+	config.of_node = np;
+
+	/* fix for hardware before ab8500v2.0 */
+	if (abx500_get_chip_id(info->dev) < 0x20) {
+		if (info->desc.id == AB8500_LDO_AUX3) {
+			info->desc.n_voltages =
+				ARRAY_SIZE(ldo_vauxn_voltages);
+			info->voltages = ldo_vauxn_voltages;
+			info->voltages_len =
+				ARRAY_SIZE(ldo_vauxn_voltages);
+			info->voltage_mask = 0xf;
+		}
+	}
+
+	/* register regulator with framework */
+	info->regulator = regulator_register(&info->desc, &config);
+	if (IS_ERR(info->regulator)) {
+		err = PTR_ERR(info->regulator);
+		dev_err(&pdev->dev, "failed to register regulator %s\n",
+			info->desc.name);
+		/* when we fail, un-register all earlier regulators */
+		while (--id >= 0) {
+			info = &ab8500_regulator_info[id];
+			regulator_unregister(info->regulator);
+		}
+		return err;
+	}
+
+	return 0;
+}
+
+static struct of_regulator_match ab8500_regulator_matches[] = {
+	{ .name	= "LDO-AUX1",    .driver_data = (void *) AB8500_LDO_AUX1, },
+	{ .name	= "LDO-AUX2",    .driver_data = (void *) AB8500_LDO_AUX2, },
+	{ .name	= "LDO-AUX3",    .driver_data = (void *) AB8500_LDO_AUX3, },
+	{ .name	= "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, },
+	{ .name	= "LDO-TVOUT",   .driver_data = (void *) AB8500_LDO_TVOUT, },
+	{ .name = "LDO-USB",     .driver_data = (void *) AB8500_LDO_USB, },
+	{ .name = "LDO-AUDIO",   .driver_data = (void *) AB8500_LDO_AUDIO, },
+	{ .name	= "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+	{ .name	= "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+	{ .name	= "LDO-DMIC",    .driver_data = (void *) AB8500_LDO_DMIC, },
+	{ .name	= "LDO-ANA",     .driver_data = (void *) AB8500_LDO_ANA, },
+};
+
+static __devinit int
+ab8500_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
+{
+	int err, i;
+
+	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
+		err = ab8500_regulator_register(
+			pdev, ab8500_regulator_matches[i].init_data,
+			i, ab8500_regulator_matches[i].of_node);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
 {
 	struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
 	struct ab8500_platform_data *pdata;
+	struct device_node *np = pdev->dev.of_node;
 	int i, err;
 
+	if (np) {
+		err = of_regulator_match(&pdev->dev, np,
+					ab8500_regulator_matches,
+					ARRAY_SIZE(ab8500_regulator_matches));
+		if (err < 0) {
+			dev_err(&pdev->dev,
+				"Error parsing regulator init data: %d\n", err);
+			return err;
+		}
+
+		err = ab8500_regulator_of_probe(pdev, np);
+		return err;
+	}
+
 	if (!ab8500) {
 		dev_err(&pdev->dev, "null mfd parent\n");
 		return -EINVAL;
@@ -759,8 +862,7 @@
 
 	/* initialize registers */
 	for (i = 0; i < pdata->num_regulator_reg_init; i++) {
-		int id;
-		u8 value;
+		int id, value;
 
 		id = pdata->regulator_reg_init[i].id;
 		value = pdata->regulator_reg_init[i].value;
@@ -771,70 +873,17 @@
 				"Configuration error: id outside range.\n");
 			return -EINVAL;
 		}
-		if (value & ~ab8500_reg_init[id].mask) {
-			dev_err(&pdev->dev,
-				"Configuration error: value outside mask.\n");
-			return -EINVAL;
-		}
 
-		/* initialize register */
-		err = abx500_mask_and_set_register_interruptible(&pdev->dev,
-			ab8500_reg_init[id].bank,
-			ab8500_reg_init[id].addr,
-			ab8500_reg_init[id].mask,
-			value);
-		if (err < 0) {
-			dev_err(&pdev->dev,
-				"Failed to initialize 0x%02x, 0x%02x.\n",
-				ab8500_reg_init[id].bank,
-				ab8500_reg_init[id].addr);
+		err = ab8500_regulator_init_registers(pdev, id, value);
+		if (err < 0)
 			return err;
-		}
-		dev_vdbg(&pdev->dev,
-			"  init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
-			ab8500_reg_init[id].bank,
-			ab8500_reg_init[id].addr,
-			ab8500_reg_init[id].mask,
-			value);
 	}
 
 	/* register all regulators */
 	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
-		struct ab8500_regulator_info *info = NULL;
-
-		/* assign per-regulator data */
-		info = &ab8500_regulator_info[i];
-		info->dev = &pdev->dev;
-
-		/* fix for hardware before ab8500v2.0 */
-		if (abx500_get_chip_id(info->dev) < 0x20) {
-			if (info->desc.id == AB8500_LDO_AUX3) {
-				info->desc.n_voltages =
-					ARRAY_SIZE(ldo_vauxn_voltages);
-				info->voltages = ldo_vauxn_voltages;
-				info->voltages_len =
-					ARRAY_SIZE(ldo_vauxn_voltages);
-				info->voltage_mask = 0xf;
-			}
-		}
-
-		/* register regulator with framework */
-		info->regulator = regulator_register(&info->desc, &pdev->dev,
-				&pdata->regulator[i], info, NULL);
-		if (IS_ERR(info->regulator)) {
-			err = PTR_ERR(info->regulator);
-			dev_err(&pdev->dev, "failed to register regulator %s\n",
-					info->desc.name);
-			/* when we fail, un-register all earlier regulators */
-			while (--i >= 0) {
-				info = &ab8500_regulator_info[i];
-				regulator_unregister(info->regulator);
-			}
+		err = ab8500_regulator_register(pdev, &pdata->regulator[i], i, NULL);
+		if (err < 0)
 			return err;
-		}
-
-		dev_vdbg(rdev_get_dev(info->regulator),
-			"%s-probed\n", info->desc.name);
 	}
 
 	return 0;
@@ -857,12 +906,18 @@
 	return 0;
 }
 
+static const struct of_device_id ab8500_regulator_match[] = {
+        { .compatible = "stericsson,ab8500-regulator", },
+        {}
+};
+
 static struct platform_driver ab8500_regulator_driver = {
 	.probe = ab8500_regulator_probe,
 	.remove = __devexit_p(ab8500_regulator_remove),
 	.driver         = {
 		.name   = "ab8500-regulator",
 		.owner  = THIS_MODULE,
+		.of_match_table = ab8500_regulator_match,
 	},
 };
 
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 26d23ad..46d05f3 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -99,8 +99,8 @@
 	if (ad5398_calc_current(chip, selector) > max_uA)
 		return -EINVAL;
 
-	dev_dbg(&client->dev, "changing current %dmA\n",
-		ad5398_calc_current(chip, selector) / 1000);
+	dev_dbg(&client->dev, "changing current %duA\n",
+		ad5398_calc_current(chip, selector));
 
 	/* read chip enable bit */
 	ret = ad5398_read_reg(client, &data);
@@ -184,7 +184,7 @@
 	.is_enabled = ad5398_is_enabled,
 };
 
-static struct regulator_desc ad5398_reg = {
+static const struct regulator_desc ad5398_reg = {
 	.name = "isink",
 	.id = 0,
 	.ops = &ad5398_ops,
@@ -212,6 +212,7 @@
 				const struct i2c_device_id *id)
 {
 	struct regulator_init_data *init_data = client->dev.platform_data;
+	struct regulator_config config = { };
 	struct ad5398_chip_info *chip;
 	const struct ad5398_current_data_format *df =
 			(struct ad5398_current_data_format *)id->driver_data;
@@ -220,10 +221,14 @@
 	if (!init_data)
 		return -EINVAL;
 
-	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip)
 		return -ENOMEM;
 
+	config.dev = &client->dev;
+	config.init_data = init_data;
+	config.driver_data = chip;
+
 	chip->client = client;
 
 	chip->min_uA = df->min_uA;
@@ -232,8 +237,7 @@
 	chip->current_offset = df->current_offset;
 	chip->current_mask = (chip->current_level - 1) << chip->current_offset;
 
-	chip->rdev = regulator_register(&ad5398_reg, &client->dev,
-					init_data, chip, NULL);
+	chip->rdev = regulator_register(&ad5398_reg, &config);
 	if (IS_ERR(chip->rdev)) {
 		ret = PTR_ERR(chip->rdev);
 		dev_err(&client->dev, "failed to register %s %s\n",
@@ -246,7 +250,6 @@
 	return 0;
 
 err:
-	kfree(chip);
 	return ret;
 }
 
@@ -255,8 +258,6 @@
 	struct ad5398_chip_info *chip = i2c_get_clientdata(client);
 
 	regulator_unregister(chip->rdev);
-	kfree(chip);
-
 	return 0;
 }
 
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 81fd606..49b2112 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -122,6 +122,7 @@
 	struct anatop_regulator *sreg;
 	struct regulator_init_data *initdata;
 	struct anatop *anatopmfd = dev_get_drvdata(pdev->dev.parent);
+	struct regulator_config config = { };
 	int ret = 0;
 
 	initdata = of_get_regulator_init_data(dev, np);
@@ -178,9 +179,13 @@
 	rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage)
 		/ 25000 + 1;
 
+	config.dev = &pdev->dev;
+	config.init_data = initdata;
+	config.driver_data = sreg;
+	config.of_node = pdev->dev.of_node;
+
 	/* register regulator */
-	rdev = regulator_register(rdesc, dev,
-				  initdata, sreg, pdev->dev.of_node);
+	rdev = regulator_register(rdesc, &config);
 	if (IS_ERR(rdev)) {
 		dev_err(dev, "failed to register %s\n",
 			rdesc->name);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e70dd38..7584a74 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -24,6 +24,7 @@
 #include <linux/suspend.h>
 #include <linux/delay.h>
 #include <linux/of.h>
+#include <linux/regmap.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/driver.h>
@@ -74,6 +75,7 @@
 struct regulator {
 	struct device *dev;
 	struct list_head list;
+	unsigned int always_on:1;
 	int uA_load;
 	int min_uV;
 	int max_uV;
@@ -155,6 +157,17 @@
 	return regnode;
 }
 
+static int _regulator_can_change_status(struct regulator_dev *rdev)
+{
+	if (!rdev->constraints)
+		return 0;
+
+	if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
+		return 1;
+	else
+		return 0;
+}
+
 /* Platform voltage constraint check */
 static int regulator_check_voltage(struct regulator_dev *rdev,
 				   int *min_uV, int *max_uV)
@@ -649,7 +662,7 @@
 	/* get input voltage */
 	input_uV = 0;
 	if (rdev->supply)
-		input_uV = _regulator_get_voltage(rdev);
+		input_uV = regulator_get_voltage(rdev->supply);
 	if (input_uV <= 0)
 		input_uV = rdev->constraints->input_uV;
 	if (input_uV <= 0)
@@ -673,17 +686,14 @@
 	struct regulator_state *rstate)
 {
 	int ret = 0;
-	bool can_set_state;
-
-	can_set_state = rdev->desc->ops->set_suspend_enable &&
-		rdev->desc->ops->set_suspend_disable;
 
 	/* If we have no suspend mode configration don't set anything;
-	 * only warn if the driver actually makes the suspend mode
-	 * configurable.
+	 * only warn if the driver implements set_suspend_voltage or
+	 * set_suspend_mode callback.
 	 */
 	if (!rstate->enabled && !rstate->disabled) {
-		if (can_set_state)
+		if (rdev->desc->ops->set_suspend_voltage ||
+		    rdev->desc->ops->set_suspend_mode)
 			rdev_warn(rdev, "No configuration\n");
 		return 0;
 	}
@@ -693,15 +703,13 @@
 		return -EINVAL;
 	}
 
-	if (!can_set_state) {
-		rdev_err(rdev, "no way to set suspend state\n");
-		return -EINVAL;
-	}
-
-	if (rstate->enabled)
+	if (rstate->enabled && rdev->desc->ops->set_suspend_enable)
 		ret = rdev->desc->ops->set_suspend_enable(rdev);
-	else
+	else if (rstate->disabled && rdev->desc->ops->set_suspend_disable)
 		ret = rdev->desc->ops->set_suspend_disable(rdev);
+	else /* OK if set_suspend_enable or set_suspend_disable is NULL */
+		ret = 0;
+
 	if (ret < 0) {
 		rdev_err(rdev, "failed to enabled/disable\n");
 		return ret;
@@ -1146,6 +1154,15 @@
 				   &regulator->max_uV);
 	}
 
+	/*
+	 * Check now if the regulator is an always on regulator - if
+	 * it is then we don't need to do nearly so much work for
+	 * enable/disable calls.
+	 */
+	if (!_regulator_can_change_status(rdev) &&
+	    _regulator_is_enabled(rdev))
+		regulator->always_on = true;
+
 	mutex_unlock(&rdev->mutex);
 	return regulator;
 link_name_err:
@@ -1169,26 +1186,52 @@
 }
 
 static struct regulator_dev *regulator_dev_lookup(struct device *dev,
-							 const char *supply)
+						  const char *supply,
+						  int *ret)
 {
 	struct regulator_dev *r;
 	struct device_node *node;
+	struct regulator_map *map;
+	const char *devname = NULL;
 
 	/* first do a dt based lookup */
 	if (dev && dev->of_node) {
 		node = of_get_regulator(dev, supply);
-		if (node)
+		if (node) {
 			list_for_each_entry(r, &regulator_list, list)
 				if (r->dev.parent &&
 					node == r->dev.of_node)
 					return r;
+		} else {
+			/*
+			 * If we couldn't even get the node then it's
+			 * not just that the device didn't register
+			 * yet, there's no node and we'll never
+			 * succeed.
+			 */
+			*ret = -ENODEV;
+		}
 	}
 
 	/* if not found, try doing it non-dt way */
+	if (dev)
+		devname = dev_name(dev);
+
 	list_for_each_entry(r, &regulator_list, list)
 		if (strcmp(rdev_get_name(r), supply) == 0)
 			return r;
 
+	list_for_each_entry(map, &regulator_map_list, list) {
+		/* If the mapping has a device set up it must match */
+		if (map->dev_name &&
+		    (!devname || strcmp(map->dev_name, devname)))
+			continue;
+
+		if (strcmp(map->supply, supply) == 0)
+			return map->regulator;
+	}
+
+
 	return NULL;
 }
 
@@ -1197,7 +1240,6 @@
 					int exclusive)
 {
 	struct regulator_dev *rdev;
-	struct regulator_map *map;
 	struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
 	const char *devname = NULL;
 	int ret;
@@ -1212,22 +1254,10 @@
 
 	mutex_lock(&regulator_list_mutex);
 
-	rdev = regulator_dev_lookup(dev, id);
+	rdev = regulator_dev_lookup(dev, id, &ret);
 	if (rdev)
 		goto found;
 
-	list_for_each_entry(map, &regulator_map_list, list) {
-		/* If the mapping has a device set up it must match */
-		if (map->dev_name &&
-		    (!devname || strcmp(map->dev_name, devname)))
-			continue;
-
-		if (strcmp(map->supply, id) == 0) {
-			rdev = map->regulator;
-			goto found;
-		}
-	}
-
 	if (board_wants_dummy_regulator) {
 		rdev = dummy_regulator_rdev;
 		goto found;
@@ -1431,21 +1461,13 @@
 
 	rc = devres_destroy(regulator->dev, devm_regulator_release,
 			    devm_regulator_match, regulator);
-	WARN_ON(rc);
+	if (rc == 0)
+		regulator_put(regulator);
+	else
+		WARN_ON(rc);
 }
 EXPORT_SYMBOL_GPL(devm_regulator_put);
 
-static int _regulator_can_change_status(struct regulator_dev *rdev)
-{
-	if (!rdev->constraints)
-		return 0;
-
-	if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
-		return 1;
-	else
-		return 0;
-}
-
 /* locks held by regulator_enable() */
 static int _regulator_enable(struct regulator_dev *rdev)
 {
@@ -1525,6 +1547,9 @@
 	struct regulator_dev *rdev = regulator->rdev;
 	int ret = 0;
 
+	if (regulator->always_on)
+		return 0;
+
 	if (rdev->supply) {
 		ret = regulator_enable(rdev->supply);
 		if (ret != 0)
@@ -1603,6 +1628,9 @@
 	struct regulator_dev *rdev = regulator->rdev;
 	int ret = 0;
 
+	if (regulator->always_on)
+		return 0;
+
 	mutex_lock(&rdev->mutex);
 	ret = _regulator_disable(rdev);
 	mutex_unlock(&rdev->mutex);
@@ -1711,6 +1739,9 @@
 	struct regulator_dev *rdev = regulator->rdev;
 	int ret;
 
+	if (regulator->always_on)
+		return 0;
+
 	mutex_lock(&rdev->mutex);
 	rdev->deferred_disables++;
 	mutex_unlock(&rdev->mutex);
@@ -1724,6 +1755,61 @@
 }
 EXPORT_SYMBOL_GPL(regulator_disable_deferred);
 
+/**
+ * regulator_is_enabled_regmap - standard is_enabled() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their is_enabled operation, saving some code.
+ */
+int regulator_is_enabled_regmap(struct regulator_dev *rdev)
+{
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
+	if (ret != 0)
+		return ret;
+
+	return (val & rdev->desc->enable_mask) != 0;
+}
+EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
+
+/**
+ * regulator_enable_regmap - standard enable() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their enable() operation, saving some code.
+ */
+int regulator_enable_regmap(struct regulator_dev *rdev)
+{
+	return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+				  rdev->desc->enable_mask,
+				  rdev->desc->enable_mask);
+}
+EXPORT_SYMBOL_GPL(regulator_enable_regmap);
+
+/**
+ * regulator_disable_regmap - standard disable() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their disable() operation, saving some code.
+ */
+int regulator_disable_regmap(struct regulator_dev *rdev)
+{
+	return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+				  rdev->desc->enable_mask, 0);
+}
+EXPORT_SYMBOL_GPL(regulator_disable_regmap);
+
 static int _regulator_is_enabled(struct regulator_dev *rdev)
 {
 	/* If we don't know then assume that the regulator is always on */
@@ -1749,6 +1835,9 @@
 {
 	int ret;
 
+	if (regulator->always_on)
+		return 1;
+
 	mutex_lock(&regulator->rdev->mutex);
 	ret = _regulator_is_enabled(regulator->rdev);
 	mutex_unlock(&regulator->rdev->mutex);
@@ -1774,6 +1863,26 @@
 EXPORT_SYMBOL_GPL(regulator_count_voltages);
 
 /**
+ * regulator_list_voltage_linear - List voltages with simple calculation
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a simple linear mapping between voltages and
+ * selectors can set min_uV and uV_step in the regulator descriptor
+ * and then use this function as their list_voltage() operation,
+ */
+int regulator_list_voltage_linear(struct regulator_dev *rdev,
+				  unsigned int selector)
+{
+	if (selector >= rdev->desc->n_voltages)
+		return -EINVAL;
+
+	return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
+
+/**
  * regulator_list_voltage - enumerate supported voltages
  * @regulator: regulator source
  * @selector: identify voltage to list
@@ -1837,77 +1946,185 @@
 }
 EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
 
+/**
+ * regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their get_voltage_vsel operation, saving some code.
+ */
+int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev)
+{
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+	if (ret != 0)
+		return ret;
+
+	val &= rdev->desc->vsel_mask;
+	val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
+
+/**
+ * regulator_set_voltage_sel_regmap - standard set_voltage_sel for regmap users
+ *
+ * @rdev: regulator to operate on
+ * @sel: Selector to set
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their set_voltage_vsel operation, saving some code.
+ */
+int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
+{
+	sel <<= ffs(rdev->desc->vsel_mask) - 1;
+
+	return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+				  rdev->desc->vsel_mask, sel);
+}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap);
+
+/**
+ * regulator_map_voltage_iterate - map_voltage() based on list_voltage()
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers implementing set_voltage_sel() and list_voltage() can use
+ * this as their map_voltage() operation.  It will find a suitable
+ * voltage by calling list_voltage() until it gets something in bounds
+ * for the requested voltages.
+ */
+int regulator_map_voltage_iterate(struct regulator_dev *rdev,
+				  int min_uV, int max_uV)
+{
+	int best_val = INT_MAX;
+	int selector = 0;
+	int i, ret;
+
+	/* Find the smallest voltage that falls within the specified
+	 * range.
+	 */
+	for (i = 0; i < rdev->desc->n_voltages; i++) {
+		ret = rdev->desc->ops->list_voltage(rdev, i);
+		if (ret < 0)
+			continue;
+
+		if (ret < best_val && ret >= min_uV && ret <= max_uV) {
+			best_val = ret;
+			selector = i;
+		}
+	}
+
+	if (best_val != INT_MAX)
+		return selector;
+	else
+		return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate);
+
+/**
+ * regulator_map_voltage_linear - map_voltage() for simple linear mappings
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers providing min_uV and uV_step in their regulator_desc can
+ * use this as their map_voltage() operation.
+ */
+int regulator_map_voltage_linear(struct regulator_dev *rdev,
+				 int min_uV, int max_uV)
+{
+	int ret, voltage;
+
+	if (!rdev->desc->uV_step) {
+		BUG_ON(!rdev->desc->uV_step);
+		return -EINVAL;
+	}
+
+	ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
+	if (ret < 0)
+		return ret;
+
+	/* Map back into a voltage to verify we're still in bounds */
+	voltage = rdev->desc->ops->list_voltage(rdev, ret);
+	if (voltage < min_uV || voltage > max_uV)
+		return -EINVAL;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_linear);
+
 static int _regulator_do_set_voltage(struct regulator_dev *rdev,
 				     int min_uV, int max_uV)
 {
 	int ret;
 	int delay = 0;
+	int best_val;
 	unsigned int selector;
+	int old_selector = -1;
 
 	trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
 
 	min_uV += rdev->constraints->uV_offset;
 	max_uV += rdev->constraints->uV_offset;
 
+	/*
+	 * If we can't obtain the old selector there is not enough
+	 * info to call set_voltage_time_sel().
+	 */
+	if (rdev->desc->ops->set_voltage_time_sel &&
+	    rdev->desc->ops->get_voltage_sel) {
+		old_selector = rdev->desc->ops->get_voltage_sel(rdev);
+		if (old_selector < 0)
+			return old_selector;
+	}
+
 	if (rdev->desc->ops->set_voltage) {
 		ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
 						   &selector);
-
-		if (rdev->desc->ops->list_voltage)
-			selector = rdev->desc->ops->list_voltage(rdev,
-								 selector);
-		else
-			selector = -1;
 	} else if (rdev->desc->ops->set_voltage_sel) {
-		int best_val = INT_MAX;
-		int i;
+		if (rdev->desc->ops->map_voltage)
+			ret = rdev->desc->ops->map_voltage(rdev, min_uV,
+							   max_uV);
+		else
+			ret = regulator_map_voltage_iterate(rdev, min_uV,
+							    max_uV);
 
-		selector = 0;
-
-		/* Find the smallest voltage that falls within the specified
-		 * range.
-		 */
-		for (i = 0; i < rdev->desc->n_voltages; i++) {
-			ret = rdev->desc->ops->list_voltage(rdev, i);
-			if (ret < 0)
-				continue;
-
-			if (ret < best_val && ret >= min_uV && ret <= max_uV) {
-				best_val = ret;
-				selector = i;
-			}
-		}
-
-		/*
-		 * If we can't obtain the old selector there is not enough
-		 * info to call set_voltage_time_sel().
-		 */
-		if (rdev->desc->ops->set_voltage_time_sel &&
-		    rdev->desc->ops->get_voltage_sel) {
-			unsigned int old_selector = 0;
-
-			ret = rdev->desc->ops->get_voltage_sel(rdev);
-			if (ret < 0)
-				return ret;
-			old_selector = ret;
-			ret = rdev->desc->ops->set_voltage_time_sel(rdev,
-						old_selector, selector);
-			if (ret < 0)
-				rdev_warn(rdev, "set_voltage_time_sel() failed: %d\n", ret);
-			else
-				delay = ret;
-		}
-
-		if (best_val != INT_MAX) {
-			ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
-			selector = best_val;
-		} else {
-			ret = -EINVAL;
+		if (ret >= 0) {
+			selector = ret;
+			ret = rdev->desc->ops->set_voltage_sel(rdev, ret);
 		}
 	} else {
 		ret = -EINVAL;
 	}
 
+	if (rdev->desc->ops->list_voltage)
+		best_val = rdev->desc->ops->list_voltage(rdev, selector);
+	else
+		best_val = -1;
+
+	/* Call set_voltage_time_sel if successfully obtained old_selector */
+	if (ret == 0 && old_selector >= 0 &&
+	    rdev->desc->ops->set_voltage_time_sel) {
+
+		delay = rdev->desc->ops->set_voltage_time_sel(rdev,
+						old_selector, selector);
+		if (delay < 0) {
+			rdev_warn(rdev, "set_voltage_time_sel() failed: %d\n",
+				  delay);
+			delay = 0;
+		}
+	}
+
 	/* Insert any necessary delays */
 	if (delay >= 1000) {
 		mdelay(delay / 1000);
@@ -1920,7 +2137,7 @@
 		_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
 				     NULL);
 
-	trace_regulator_set_voltage_complete(rdev_get_name(rdev), selector);
+	trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val);
 
 	return ret;
 }
@@ -2324,6 +2541,9 @@
 	 */
 	ret = -EINVAL;
 
+	if (!rdev->desc->ops->set_mode)
+		goto out;
+
 	/* get output voltage */
 	output_uV = _regulator_get_voltage(rdev);
 	if (output_uV <= 0) {
@@ -2525,9 +2745,13 @@
 	int i;
 	int ret = 0;
 
-	for (i = 0; i < num_consumers; i++)
-		async_schedule_domain(regulator_bulk_enable_async,
-				      &consumers[i], &async_domain);
+	for (i = 0; i < num_consumers; i++) {
+		if (consumers[i].consumer->always_on)
+			consumers[i].ret = 0;
+		else
+			async_schedule_domain(regulator_bulk_enable_async,
+					      &consumers[i], &async_domain);
+	}
 
 	async_synchronize_full_domain(&async_domain);
 
@@ -2566,7 +2790,7 @@
 			   struct regulator_bulk_data *consumers)
 {
 	int i;
-	int ret;
+	int ret, r;
 
 	for (i = num_consumers - 1; i >= 0; --i) {
 		ret = regulator_disable(consumers[i].consumer);
@@ -2578,8 +2802,12 @@
 
 err:
 	pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
-	for (++i; i < num_consumers; ++i)
-		regulator_enable(consumers[i].consumer);
+	for (++i; i < num_consumers; ++i) {
+		r = regulator_enable(consumers[i].consumer);
+		if (r != 0)
+			pr_err("Failed to reename %s: %d\n",
+			       consumers[i].supply, r);
+	}
 
 	return ret;
 }
@@ -2756,10 +2984,6 @@
 			return status;
 	}
 
-	/* suspend mode constraints need multiple supporting methods */
-	if (!(ops->set_suspend_enable && ops->set_suspend_disable))
-		return status;
-
 	status = device_create_file(dev, &dev_attr_suspend_standby_state);
 	if (status < 0)
 		return status;
@@ -2820,28 +3044,29 @@
 /**
  * regulator_register - register regulator
  * @regulator_desc: regulator to register
- * @dev: struct device for the regulator
- * @init_data: platform provided init data, passed through by driver
- * @driver_data: private regulator data
- * @of_node: OpenFirmware node to parse for device tree bindings (may be
- *           NULL).
+ * @config: runtime configuration for regulator
  *
  * Called by regulator drivers to register a regulator.
  * Returns 0 on success.
  */
-struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, const struct regulator_init_data *init_data,
-	void *driver_data, struct device_node *of_node)
+struct regulator_dev *
+regulator_register(const struct regulator_desc *regulator_desc,
+		   const struct regulator_config *config)
 {
 	const struct regulation_constraints *constraints = NULL;
+	const struct regulator_init_data *init_data;
 	static atomic_t regulator_no = ATOMIC_INIT(0);
 	struct regulator_dev *rdev;
+	struct device *dev;
 	int ret, i;
 	const char *supply = NULL;
 
-	if (regulator_desc == NULL)
+	if (regulator_desc == NULL || config == NULL)
 		return ERR_PTR(-EINVAL);
 
+	dev = config->dev;
+	WARN_ON(!dev);
+
 	if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
 		return ERR_PTR(-EINVAL);
 
@@ -2865,6 +3090,8 @@
 		return ERR_PTR(-EINVAL);
 	}
 
+	init_data = config->init_data;
+
 	rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
 	if (rdev == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -2872,9 +3099,10 @@
 	mutex_lock(&regulator_list_mutex);
 
 	mutex_init(&rdev->mutex);
-	rdev->reg_data = driver_data;
+	rdev->reg_data = config->driver_data;
 	rdev->owner = regulator_desc->owner;
 	rdev->desc = regulator_desc;
+	rdev->regmap = config->regmap;
 	INIT_LIST_HEAD(&rdev->consumer_list);
 	INIT_LIST_HEAD(&rdev->list);
 	BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
@@ -2889,7 +3117,7 @@
 
 	/* register with sysfs */
 	rdev->dev.class = &regulator_class;
-	rdev->dev.of_node = of_node;
+	rdev->dev.of_node = config->of_node;
 	rdev->dev.parent = dev;
 	dev_set_name(&rdev->dev, "regulator.%d",
 		     atomic_inc_return(&regulator_no) - 1);
@@ -2922,7 +3150,7 @@
 	if (supply) {
 		struct regulator_dev *r;
 
-		r = regulator_dev_lookup(dev, supply);
+		r = regulator_dev_lookup(dev, supply, &ret);
 
 		if (!r) {
 			dev_err(dev, "Failed to find supply %s\n", supply);
@@ -2935,8 +3163,7 @@
 			goto scrub;
 
 		/* Enable supply if rail is enabled */
-		if (rdev->desc->ops->is_enabled &&
-				rdev->desc->ops->is_enabled(rdev)) {
+		if (_regulator_is_enabled(rdev)) {
 			ret = regulator_enable(rdev->supply);
 			if (ret < 0)
 				goto scrub;
@@ -2968,6 +3195,8 @@
 	unset_regulator_supplies(rdev);
 
 scrub:
+	if (rdev->supply)
+		regulator_put(rdev->supply);
 	kfree(rdev->constraints);
 	device_unregister(&rdev->dev);
 	/* device core frees rdev */
@@ -3066,7 +3295,7 @@
 				goto unlock;
 			if (!ops->disable)
 				goto unlock;
-			if (ops->is_enabled && !ops->is_enabled(rdev))
+			if (!_regulator_is_enabled(rdev))
 				goto unlock;
 
 			error = ops->disable(rdev);
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 1851f09..1005f5f 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -76,9 +76,7 @@
 struct da903x_regulator_info {
 	struct regulator_desc desc;
 
-	int	min_uV;
 	int	max_uV;
-	int	step_uV;
 	int	vol_reg;
 	int	vol_shift;
 	int	vol_nbits;
@@ -88,10 +86,6 @@
 	int	enable_bit;
 };
 
-static int da9034_ldo12_data[] = { 1700, 1750, 1800, 1850, 1900, 1950,
-				   2000, 2050, 2700, 2750, 2800, 2850,
-				   2900, 2950, 3000, 3050 };
-
 static inline struct device *to_da903x_dev(struct regulator_dev *rdev)
 {
 	return rdev_get_dev(rdev)->parent->parent;
@@ -100,34 +94,26 @@
 static inline int check_range(struct da903x_regulator_info *info,
 				int min_uV, int max_uV)
 {
-	if (min_uV < info->min_uV || min_uV > info->max_uV)
+	if (min_uV < info->desc.min_uV || min_uV > info->max_uV)
 		return -EINVAL;
 
 	return 0;
 }
 
 /* DA9030/DA9034 common operations */
-static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV, unsigned *selector)
+static int da903x_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
 	uint8_t val, mask;
 
-	if (check_range(info, min_uV, max_uV)) {
-		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
-		return -EINVAL;
-	}
-
-	val = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
-	*selector = val;
-	val <<= info->vol_shift;
+	val = selector << info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
 	return da903x_update(da9034_dev, info->vol_reg, val, mask);
 }
 
-static int da903x_get_voltage(struct regulator_dev *rdev)
+static int da903x_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -141,7 +127,7 @@
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 	val = (val & mask) >> info->vol_shift;
 
-	return info->min_uV + info->step_uV * val;
+	return val;
 }
 
 static int da903x_enable(struct regulator_dev *rdev)
@@ -176,35 +162,16 @@
 	return !!(reg_val & (1 << info->enable_bit));
 }
 
-static int da903x_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
-	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
-	int ret;
-
-	ret = info->min_uV + info->step_uV * selector;
-	if (ret > info->max_uV)
-		return -EINVAL;
-	return ret;
-}
-
 /* DA9030 specific operations */
-static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
-				      int min_uV, int max_uV,
-				      unsigned *selector)
+static int da9030_set_ldo1_15_voltage_sel(struct regulator_dev *rdev,
+					  unsigned selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da903x_dev = to_da903x_dev(rdev);
 	uint8_t val, mask;
 	int ret;
 
-	if (check_range(info, min_uV, max_uV)) {
-		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
-		return -EINVAL;
-	}
-
-	val = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
-	*selector = val;
-	val <<= info->vol_shift;
+	val = selector << info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 	val |= DA9030_LDO_UNLOCK; /* have to set UNLOCK bits */
 	mask |= DA9030_LDO_UNLOCK_MASK;
@@ -217,73 +184,57 @@
 	return da903x_update(da903x_dev, info->vol_reg, val, mask);
 }
 
-static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV,
-				    unsigned *selector)
+static int da9030_map_ldo14_voltage(struct regulator_dev *rdev,
+				    int min_uV, int max_uV)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
-	struct device *da903x_dev = to_da903x_dev(rdev);
-	uint8_t val, mask;
-	int thresh;
+	int thresh, sel;
 
 	if (check_range(info, min_uV, max_uV)) {
 		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
 		return -EINVAL;
 	}
 
-	thresh = (info->max_uV + info->min_uV) / 2;
+	thresh = (info->max_uV + info->desc.min_uV) / 2;
 	if (min_uV < thresh) {
-		val = DIV_ROUND_UP(thresh - min_uV, info->step_uV);
-		val |= 0x4;
+		sel = DIV_ROUND_UP(thresh - min_uV, info->desc.uV_step);
+		sel |= 0x4;
 	} else {
-		val = DIV_ROUND_UP(min_uV - thresh, info->step_uV);
+		sel = DIV_ROUND_UP(min_uV - thresh, info->desc.uV_step);
 	}
 
-	*selector = val;
-	val <<= info->vol_shift;
-	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
-
-	return da903x_update(da903x_dev, info->vol_reg, val, mask);
+	return sel;
 }
 
-static int da9030_get_ldo14_voltage(struct regulator_dev *rdev)
+static int da9030_list_ldo14_voltage(struct regulator_dev *rdev,
+				     unsigned selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
-	struct device *da903x_dev = to_da903x_dev(rdev);
-	uint8_t val, mask;
-	int ret;
+	int volt;
 
-	ret = da903x_read(da903x_dev, info->vol_reg, &val);
-	if (ret)
-		return ret;
-
-	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
-	val = (val & mask) >> info->vol_shift;
-
-	if (val & 0x4)
-		return info->min_uV + info->step_uV * (3 - (val & ~0x4));
+	if (selector & 0x4)
+		volt = rdev->desc->min_uV +
+		       rdev->desc->uV_step * (3 - (selector & ~0x4));
 	else
-		return (info->max_uV + info->min_uV) / 2 +
-			info->step_uV * (val & ~0x4);
+		volt = (info->max_uV + rdev->desc->min_uV) / 2 +
+		       rdev->desc->uV_step * (selector & ~0x4);
+
+	if (volt > info->max_uV)
+		return -EINVAL;
+
+	return volt;
 }
 
 /* DA9034 specific operations */
-static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV, unsigned *selector)
+static int da9034_set_dvc_voltage_sel(struct regulator_dev *rdev,
+				      unsigned selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
 	uint8_t val, mask;
 	int ret;
 
-	if (check_range(info, min_uV, max_uV)) {
-		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
-		return -EINVAL;
-	}
-
-	val = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
-	*selector = val;
-	val <<= info->vol_shift;
+	val = selector << info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
 	ret = da903x_update(da9034_dev, info->vol_reg, val, mask);
@@ -295,59 +246,45 @@
 	return ret;
 }
 
-static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV, unsigned *selector)
+static int da9034_map_ldo12_voltage(struct regulator_dev *rdev,
+				    int min_uV, int max_uV)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
-	struct device *da9034_dev = to_da903x_dev(rdev);
-	uint8_t val, mask;
+	int sel;
 
 	if (check_range(info, min_uV, max_uV)) {
 		pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
 		return -EINVAL;
 	}
 
-	val = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
-	val = (val >= 20) ? val - 12 : ((val > 7) ? 8 : val);
-	*selector = val;
-	val <<= info->vol_shift;
-	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
+	sel = DIV_ROUND_UP(min_uV - info->desc.min_uV, info->desc.uV_step);
+	sel = (sel >= 20) ? sel - 12 : ((sel > 7) ? 8 : sel);
 
-	return da903x_update(da9034_dev, info->vol_reg, val, mask);
-}
-
-static int da9034_get_ldo12_voltage(struct regulator_dev *rdev)
-{
-	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
-	struct device *da9034_dev = to_da903x_dev(rdev);
-	uint8_t val, mask;
-	int ret;
-
-	ret = da903x_read(da9034_dev, info->vol_reg, &val);
-	if (ret)
-		return ret;
-
-	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
-	val = (val & mask) >> info->vol_shift;
-
-	if (val >= 8)
-		return 2700000 + info->step_uV * (val - 8);
-
-	return info->min_uV + info->step_uV * val;
+	return sel;
 }
 
 static int da9034_list_ldo12_voltage(struct regulator_dev *rdev,
 				     unsigned selector)
 {
-	if (selector >= ARRAY_SIZE(da9034_ldo12_data))
+	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
+	int volt;
+
+	if (selector >= 8)
+		volt = 2700000 + rdev->desc->uV_step * (selector - 8);
+	else
+		volt = rdev->desc->min_uV + rdev->desc->uV_step * selector;
+
+	if (volt > info->max_uV)
 		return -EINVAL;
-	return da9034_ldo12_data[selector] * 1000;
+
+	return volt;
 }
 
 static struct regulator_ops da903x_regulator_ldo_ops = {
-	.set_voltage	= da903x_set_ldo_voltage,
-	.get_voltage	= da903x_get_voltage,
-	.list_voltage	= da903x_list_voltage,
+	.set_voltage_sel = da903x_set_voltage_sel,
+	.get_voltage_sel = da903x_get_voltage_sel,
+	.list_voltage	= regulator_list_voltage_linear,
+	.map_voltage	= regulator_map_voltage_linear,
 	.enable		= da903x_enable,
 	.disable	= da903x_disable,
 	.is_enabled	= da903x_is_enabled,
@@ -355,9 +292,10 @@
 
 /* NOTE: this is dedicated for the insane DA9030 LDO14 */
 static struct regulator_ops da9030_regulator_ldo14_ops = {
-	.set_voltage	= da9030_set_ldo14_voltage,
-	.get_voltage	= da9030_get_ldo14_voltage,
-	.list_voltage	= da903x_list_voltage,
+	.set_voltage_sel = da903x_set_voltage_sel,
+	.get_voltage_sel = da903x_get_voltage_sel,
+	.list_voltage	= da9030_list_ldo14_voltage,
+	.map_voltage	= da9030_map_ldo14_voltage,
 	.enable		= da903x_enable,
 	.disable	= da903x_disable,
 	.is_enabled	= da903x_is_enabled,
@@ -365,18 +303,20 @@
 
 /* NOTE: this is dedicated for the DA9030 LDO1 and LDO15 that have locks  */
 static struct regulator_ops da9030_regulator_ldo1_15_ops = {
-	.set_voltage	= da9030_set_ldo1_15_voltage,
-	.get_voltage	= da903x_get_voltage,
-	.list_voltage	= da903x_list_voltage,
+	.set_voltage_sel = da9030_set_ldo1_15_voltage_sel,
+	.get_voltage_sel = da903x_get_voltage_sel,
+	.list_voltage	= regulator_list_voltage_linear,
+	.map_voltage	= regulator_map_voltage_linear,
 	.enable		= da903x_enable,
 	.disable	= da903x_disable,
 	.is_enabled	= da903x_is_enabled,
 };
 
 static struct regulator_ops da9034_regulator_dvc_ops = {
-	.set_voltage	= da9034_set_dvc_voltage,
-	.get_voltage	= da903x_get_voltage,
-	.list_voltage	= da903x_list_voltage,
+	.set_voltage_sel = da9034_set_dvc_voltage_sel,
+	.get_voltage_sel = da903x_get_voltage_sel,
+	.list_voltage	= regulator_list_voltage_linear,
+	.map_voltage	= regulator_map_voltage_linear,
 	.enable		= da903x_enable,
 	.disable	= da903x_disable,
 	.is_enabled	= da903x_is_enabled,
@@ -384,9 +324,10 @@
 
 /* NOTE: this is dedicated for the insane LDO12 */
 static struct regulator_ops da9034_regulator_ldo12_ops = {
-	.set_voltage	= da9034_set_ldo12_voltage,
-	.get_voltage	= da9034_get_ldo12_voltage,
+	.set_voltage_sel = da903x_set_voltage_sel,
+	.get_voltage_sel = da903x_get_voltage_sel,
 	.list_voltage	= da9034_list_ldo12_voltage,
+	.map_voltage	= da9034_map_ldo12_voltage,
 	.enable		= da903x_enable,
 	.disable	= da903x_disable,
 	.is_enabled	= da903x_is_enabled,
@@ -401,10 +342,10 @@
 		.id	= _pmic##_ID_LDO##_id,				\
 		.n_voltages = (step) ? ((max - min) / step + 1) : 1,	\
 		.owner	= THIS_MODULE,					\
+		.min_uV	 = (min) * 1000,				\
+		.uV_step = (step) * 1000,				\
 	},								\
-	.min_uV		= (min) * 1000,					\
 	.max_uV		= (max) * 1000,					\
-	.step_uV	= (step) * 1000,				\
 	.vol_reg	= _pmic##_##vreg,				\
 	.vol_shift	= (shift),					\
 	.vol_nbits	= (nbits),					\
@@ -421,10 +362,10 @@
 		.id	= _pmic##_ID_##_id,				\
 		.n_voltages = (step) ? ((max - min) / step + 1) : 1,	\
 		.owner	= THIS_MODULE,					\
+		.min_uV = (min) * 1000,					\
+		.uV_step = (step) * 1000,				\
 	},								\
-	.min_uV		= (min) * 1000,					\
 	.max_uV		= (max) * 1000,					\
-	.step_uV	= (step) * 1000,				\
 	.vol_reg	= _pmic##_##vreg,				\
 	.vol_shift	= (0),						\
 	.vol_nbits	= (nbits),					\
@@ -517,6 +458,7 @@
 {
 	struct da903x_regulator_info *ri = NULL;
 	struct regulator_dev *rdev;
+	struct regulator_config config = { };
 
 	ri = find_regulator_info(pdev->id);
 	if (ri == NULL) {
@@ -527,7 +469,7 @@
 	/* Workaround for the weird LDO12 voltage setting */
 	if (ri->desc.id == DA9034_ID_LDO12) {
 		ri->desc.ops = &da9034_regulator_ldo12_ops;
-		ri->desc.n_voltages = ARRAY_SIZE(da9034_ldo12_data);
+		ri->desc.n_voltages = 16;
 	}
 
 	if (ri->desc.id == DA9030_ID_LDO14)
@@ -536,8 +478,11 @@
 	if (ri->desc.id == DA9030_ID_LDO1 || ri->desc.id == DA9030_ID_LDO15)
 		ri->desc.ops = &da9030_regulator_ldo1_15_ops;
 
-	rdev = regulator_register(&ri->desc, &pdev->dev,
-				  pdev->dev.platform_data, ri, NULL);
+	config.dev = &pdev->dev;
+	config.init_data = pdev->dev.platform_data;
+	config.driver_data = ri;
+
+	rdev = regulator_register(&ri->desc, &config);
 	if (IS_ERR(rdev)) {
 		dev_err(&pdev->dev, "failed to register regulator %s\n",
 				ri->desc.name);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 09915e8..88976d8 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -19,6 +19,10 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
+#endif
 
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
@@ -37,6 +41,22 @@
 #define DA9052_BUCK_ILIM_MASK_EVEN	0x0c
 #define DA9052_BUCK_ILIM_MASK_ODD	0xc0
 
+/* DA9052 REGULATOR IDs */
+#define DA9052_ID_BUCK1		0
+#define DA9052_ID_BUCK2		1
+#define DA9052_ID_BUCK3		2
+#define DA9052_ID_BUCK4		3
+#define DA9052_ID_LDO1		4
+#define DA9052_ID_LDO2		5
+#define DA9052_ID_LDO3		6
+#define DA9052_ID_LDO4		7
+#define DA9052_ID_LDO5		8
+#define DA9052_ID_LDO6		9
+#define DA9052_ID_LDO7		10
+#define DA9052_ID_LDO8		11
+#define DA9052_ID_LDO9		12
+#define DA9052_ID_LDO10		13
+
 static const u32 da9052_current_limits[3][4] = {
 	{700000, 800000, 1000000, 1200000},	/* DA9052-BC BUCKs */
 	{1600000, 2000000, 2400000, 3000000},	/* DA9053-AA/Bx BUCK-CORE */
@@ -50,8 +70,6 @@
 	int step_uV;
 	int min_uV;
 	int max_uV;
-	unsigned char volt_shift;
-	unsigned char en_bit;
 	unsigned char activate_bit;
 };
 
@@ -70,42 +88,6 @@
 	return 0;
 }
 
-static int da9052_regulator_enable(struct regulator_dev *rdev)
-{
-	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-	struct da9052_regulator_info *info = regulator->info;
-	int offset = rdev_get_id(rdev);
-
-	return da9052_reg_update(regulator->da9052,
-				 DA9052_BUCKCORE_REG + offset,
-				 1 << info->en_bit, 1 << info->en_bit);
-}
-
-static int da9052_regulator_disable(struct regulator_dev *rdev)
-{
-	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-	struct da9052_regulator_info *info = regulator->info;
-	int offset = rdev_get_id(rdev);
-
-	return da9052_reg_update(regulator->da9052,
-				 DA9052_BUCKCORE_REG + offset,
-				 1 << info->en_bit, 0);
-}
-
-static int da9052_regulator_is_enabled(struct regulator_dev *rdev)
-{
-	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-	struct da9052_regulator_info *info = regulator->info;
-	int offset = rdev_get_id(rdev);
-	int ret;
-
-	ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
-	if (ret < 0)
-		return ret;
-
-	return ret & (1 << info->en_bit);
-}
-
 static int da9052_dcdc_get_current_limit(struct regulator_dev *rdev)
 {
 	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
@@ -173,36 +155,23 @@
 					 reg_val << 6);
 }
 
-static int da9052_list_buckperi_voltage(struct regulator_dev *rdev,
-					 unsigned int selector)
-{
-	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-	struct da9052_regulator_info *info = regulator->info;
-	int volt_uV;
-
-	if ((regulator->da9052->chip_id == DA9052) &&
-	    (selector >= DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)) {
-		volt_uV = ((DA9052_BUCK_PERI_REG_MAP_UPTO_3uV * info->step_uV)
-			    + info->min_uV);
-		volt_uV += (selector - DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)
-			    * (DA9052_BUCK_PERI_3uV_STEP);
-	} else
-			volt_uV = (selector * info->step_uV) + info->min_uV;
-
-	if (volt_uV > info->max_uV)
-		return -EINVAL;
-
-	return volt_uV;
-}
-
 static int da9052_list_voltage(struct regulator_dev *rdev,
 				unsigned int selector)
 {
 	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
 	struct da9052_regulator_info *info = regulator->info;
+	int id = rdev_get_id(rdev);
 	int volt_uV;
 
-	volt_uV = info->min_uV + info->step_uV * selector;
+	if ((id == DA9052_ID_BUCK4) && (regulator->da9052->chip_id == DA9052)
+		&& (selector >= DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)) {
+		volt_uV = ((DA9052_BUCK_PERI_REG_MAP_UPTO_3uV * info->step_uV)
+			  + info->min_uV);
+		volt_uV += (selector - DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)
+				    * (DA9052_BUCK_PERI_3uV_STEP);
+	} else {
+		volt_uV = (selector * info->step_uV) + info->min_uV;
+	}
 
 	if (volt_uV > info->max_uV)
 		return -EINVAL;
@@ -210,14 +179,13 @@
 	return volt_uV;
 }
 
-static int da9052_regulator_set_voltage_int(struct regulator_dev *rdev,
-					     int min_uV, int max_uV,
-					     unsigned int *selector)
+static int da9052_map_voltage(struct regulator_dev *rdev,
+			      int min_uV, int max_uV)
 {
 	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
 	struct da9052_regulator_info *info = regulator->info;
-	int offset = rdev_get_id(rdev);
-	int ret;
+	int id = rdev_get_id(rdev);
+	int ret, sel;
 
 	ret = verify_range(info, min_uV, max_uV);
 	if (ret < 0)
@@ -226,281 +194,147 @@
 	if (min_uV < info->min_uV)
 		min_uV = info->min_uV;
 
-	*selector = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
+	if ((id == DA9052_ID_BUCK4) && (regulator->da9052->chip_id == DA9052)
+		&& (min_uV >= DA9052_CONST_3uV)) {
+			sel = DA9052_BUCK_PERI_REG_MAP_UPTO_3uV +
+			      DIV_ROUND_UP(min_uV - DA9052_CONST_3uV,
+					   DA9052_BUCK_PERI_3uV_STEP);
+	} else {
+		sel = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
+	}
 
-	ret = da9052_list_voltage(rdev, *selector);
+	ret = da9052_list_voltage(rdev, sel);
 	if (ret < 0)
 		return ret;
 
-	return da9052_reg_update(regulator->da9052,
-				 DA9052_BUCKCORE_REG + offset,
-				 (1 << info->volt_shift) - 1, *selector);
+	return sel;
 }
 
-static int da9052_set_ldo_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV,
-				   unsigned int *selector)
-{
-	return da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
-}
-
-static int da9052_set_ldo5_6_voltage(struct regulator_dev *rdev,
-				      int min_uV, int max_uV,
-				      unsigned int *selector)
+static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
+					    unsigned int selector)
 {
 	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
 	struct da9052_regulator_info *info = regulator->info;
+	int id = rdev_get_id(rdev);
 	int ret;
 
-	ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+	ret = da9052_reg_update(regulator->da9052, rdev->desc->vsel_reg,
+				rdev->desc->vsel_mask, selector);
 	if (ret < 0)
 		return ret;
 
-	/* Some LDOs are DVC controlled which requires enabling of
-	 * the LDO activate bit to implment the changes on the
-	 * LDO output.
-	*/
-	return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
-				 info->activate_bit, info->activate_bit);
-}
-
-static int da9052_set_dcdc_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV,
-				    unsigned int *selector)
-{
-	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-	struct da9052_regulator_info *info = regulator->info;
-	int ret;
-
-	ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
-	if (ret < 0)
-		return ret;
-
-	/* Some DCDCs are DVC controlled which requires enabling of
-	 * the DCDC activate bit to implment the changes on the
-	 * DCDC output.
-	*/
-	return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
-				 info->activate_bit, info->activate_bit);
-}
-
-static int da9052_get_regulator_voltage_sel(struct regulator_dev *rdev)
-{
-	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-	struct da9052_regulator_info *info = regulator->info;
-	int offset = rdev_get_id(rdev);
-	int ret;
-
-	ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
-	if (ret < 0)
-		return ret;
-
-	ret &= ((1 << info->volt_shift) - 1);
+	/* Some LDOs and DCDCs are DVC controlled which requires enabling of
+	 * the activate bit to implment the changes on the output.
+	 */
+	switch (id) {
+	case DA9052_ID_BUCK1:
+	case DA9052_ID_BUCK2:
+	case DA9052_ID_BUCK3:
+	case DA9052_ID_LDO2:
+	case DA9052_ID_LDO3:
+		ret = da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+					info->activate_bit, info->activate_bit);
+		break;
+	}
 
 	return ret;
 }
 
-static int da9052_set_buckperi_voltage(struct regulator_dev *rdev, int min_uV,
-					int max_uV, unsigned int *selector)
-{
-	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-	struct da9052_regulator_info *info = regulator->info;
-	int offset = rdev_get_id(rdev);
-	int ret;
-
-	ret = verify_range(info, min_uV, max_uV);
-	if (ret < 0)
-		return ret;
-
-	if (min_uV < info->min_uV)
-		min_uV = info->min_uV;
-
-	if ((regulator->da9052->chip_id == DA9052) &&
-	    (min_uV >= DA9052_CONST_3uV))
-		*selector = DA9052_BUCK_PERI_REG_MAP_UPTO_3uV +
-			    DIV_ROUND_UP(min_uV - DA9052_CONST_3uV,
-					 DA9052_BUCK_PERI_3uV_STEP);
-	else
-		*selector = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
-
-	ret = da9052_list_buckperi_voltage(rdev, *selector);
-	if (ret < 0)
-		return ret;
-
-	return da9052_reg_update(regulator->da9052,
-				 DA9052_BUCKCORE_REG + offset,
-				 (1 << info->volt_shift) - 1, *selector);
-}
-
-static int da9052_get_buckperi_voltage_sel(struct regulator_dev *rdev)
-{
-	struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-	struct da9052_regulator_info *info = regulator->info;
-	int offset = rdev_get_id(rdev);
-	int ret;
-
-	ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
-	if (ret < 0)
-		return ret;
-
-	ret &= ((1 << info->volt_shift) - 1);
-
-	return ret;
-}
-
-static struct regulator_ops da9052_buckperi_ops = {
-	.list_voltage = da9052_list_buckperi_voltage,
-	.get_voltage_sel = da9052_get_buckperi_voltage_sel,
-	.set_voltage = da9052_set_buckperi_voltage,
-
-	.get_current_limit = da9052_dcdc_get_current_limit,
-	.set_current_limit = da9052_dcdc_set_current_limit,
-
-	.is_enabled = da9052_regulator_is_enabled,
-	.enable = da9052_regulator_enable,
-	.disable = da9052_regulator_disable,
-};
-
 static struct regulator_ops da9052_dcdc_ops = {
-	.set_voltage = da9052_set_dcdc_voltage,
 	.get_current_limit = da9052_dcdc_get_current_limit,
 	.set_current_limit = da9052_dcdc_set_current_limit,
 
 	.list_voltage = da9052_list_voltage,
-	.get_voltage_sel = da9052_get_regulator_voltage_sel,
-	.is_enabled = da9052_regulator_is_enabled,
-	.enable = da9052_regulator_enable,
-	.disable = da9052_regulator_disable,
-};
-
-static struct regulator_ops da9052_ldo5_6_ops = {
-	.set_voltage = da9052_set_ldo5_6_voltage,
-
-	.list_voltage = da9052_list_voltage,
-	.get_voltage_sel = da9052_get_regulator_voltage_sel,
-	.is_enabled = da9052_regulator_is_enabled,
-	.enable = da9052_regulator_enable,
-	.disable = da9052_regulator_disable,
+	.map_voltage = da9052_map_voltage,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = da9052_regulator_set_voltage_sel,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 };
 
 static struct regulator_ops da9052_ldo_ops = {
-	.set_voltage = da9052_set_ldo_voltage,
-
 	.list_voltage = da9052_list_voltage,
-	.get_voltage_sel = da9052_get_regulator_voltage_sel,
-	.is_enabled = da9052_regulator_is_enabled,
-	.enable = da9052_regulator_enable,
-	.disable = da9052_regulator_disable,
+	.map_voltage = da9052_map_voltage,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = da9052_regulator_set_voltage_sel,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 };
 
-#define DA9052_LDO5_6(_id, step, min, max, sbits, ebits, abits) \
-{\
-	.reg_desc = {\
-		.name = "LDO" #_id,\
-		.ops = &da9052_ldo5_6_ops,\
-		.type = REGULATOR_VOLTAGE,\
-		.id = _id,\
-		.n_voltages = (max - min) / step + 1, \
-		.owner = THIS_MODULE,\
-	},\
-	.min_uV = (min) * 1000,\
-	.max_uV = (max) * 1000,\
-	.step_uV = (step) * 1000,\
-	.volt_shift = (sbits),\
-	.en_bit = (ebits),\
-	.activate_bit = (abits),\
-}
-
 #define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \
 {\
 	.reg_desc = {\
-		.name = "LDO" #_id,\
+		.name = #_id,\
 		.ops = &da9052_ldo_ops,\
 		.type = REGULATOR_VOLTAGE,\
-		.id = _id,\
+		.id = DA9052_ID_##_id,\
 		.n_voltages = (max - min) / step + 1, \
 		.owner = THIS_MODULE,\
+		.vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
+		.vsel_mask = (1 << (sbits)) - 1,\
+		.enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
+		.enable_mask = 1 << (ebits),\
 	},\
 	.min_uV = (min) * 1000,\
 	.max_uV = (max) * 1000,\
 	.step_uV = (step) * 1000,\
-	.volt_shift = (sbits),\
-	.en_bit = (ebits),\
 	.activate_bit = (abits),\
 }
 
 #define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
 {\
 	.reg_desc = {\
-		.name = "BUCK" #_id,\
+		.name = #_id,\
 		.ops = &da9052_dcdc_ops,\
 		.type = REGULATOR_VOLTAGE,\
-		.id = _id,\
+		.id = DA9052_ID_##_id,\
 		.n_voltages = (max - min) / step + 1, \
 		.owner = THIS_MODULE,\
+		.vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
+		.vsel_mask = (1 << (sbits)) - 1,\
+		.enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
+		.enable_mask = 1 << (ebits),\
 	},\
 	.min_uV = (min) * 1000,\
 	.max_uV = (max) * 1000,\
 	.step_uV = (step) * 1000,\
-	.volt_shift = (sbits),\
-	.en_bit = (ebits),\
-	.activate_bit = (abits),\
-}
-
-#define DA9052_BUCKPERI(_id, step, min, max, sbits, ebits, abits) \
-{\
-	.reg_desc = {\
-		.name = "BUCK" #_id,\
-		.ops = &da9052_buckperi_ops,\
-		.type = REGULATOR_VOLTAGE,\
-		.id = _id,\
-		.n_voltages = (max - min) / step + 1, \
-		.owner = THIS_MODULE,\
-	},\
-	.min_uV = (min) * 1000,\
-	.max_uV = (max) * 1000,\
-	.step_uV = (step) * 1000,\
-	.volt_shift = (sbits),\
-	.en_bit = (ebits),\
 	.activate_bit = (abits),\
 }
 
 static struct da9052_regulator_info da9052_regulator_info[] = {
-	/* Buck1 - 4 */
-	DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
-	DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
-	DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
-	DA9052_BUCKPERI(3, 50, 1800, 3600, 5, 6, 0),
-	/* LD01 - LDO10 */
-	DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
-	DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
-	DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
-	DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
-	DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
-	DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
-	DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
-	DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
-	DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
-	DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+	DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+	DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+	DA9052_DCDC(BUCK3, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+	DA9052_DCDC(BUCK4, 50, 1800, 3600, 5, 6, 0),
+	DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0),
+	DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+	DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+	DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0),
+	DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0),
+	DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0),
+	DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0),
+	DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0),
+	DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0),
+	DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0),
 };
 
 static struct da9052_regulator_info da9053_regulator_info[] = {
-	/* Buck1 - 4 */
-	DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
-	DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
-	DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
-	DA9052_BUCKPERI(3, 25, 925, 2500, 6, 6, 0),
-	/* LD01 - LDO10 */
-	DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
-	DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
-	DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
-	DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
-	DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
-	DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
-	DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
-	DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
-	DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
-	DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+	DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+	DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+	DA9052_DCDC(BUCK3, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+	DA9052_DCDC(BUCK4, 25, 925, 2500, 6, 6, 0),
+	DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0),
+	DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+	DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+	DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0),
+	DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0),
+	DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0),
+	DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0),
+	DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0),
+	DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0),
+	DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0),
 };
 
 static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
@@ -533,10 +367,10 @@
 
 static int __devinit da9052_regulator_probe(struct platform_device *pdev)
 {
+	struct regulator_config config = { };
 	struct da9052_regulator *regulator;
 	struct da9052 *da9052;
 	struct da9052_pdata *pdata;
-	int ret;
 
 	regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9052_regulator),
 				 GFP_KERNEL);
@@ -551,26 +385,49 @@
 					      pdev->id);
 	if (regulator->info == NULL) {
 		dev_err(&pdev->dev, "invalid regulator ID specified\n");
-		ret = -EINVAL;
-		goto err;
+		return -EINVAL;
 	}
+
+	config.dev = &pdev->dev;
+	config.driver_data = regulator;
+	config.regmap = da9052->regmap;
+	if (pdata && pdata->regulators) {
+		config.init_data = pdata->regulators[pdev->id];
+	} else {
+#ifdef CONFIG_OF
+		struct device_node *nproot = da9052->dev->of_node;
+		struct device_node *np;
+
+		if (!nproot)
+			return -ENODEV;
+
+		nproot = of_find_node_by_name(nproot, "regulators");
+		if (!nproot)
+			return -ENODEV;
+
+		for (np = of_get_next_child(nproot, NULL); np;
+		     np = of_get_next_child(nproot, np)) {
+			if (!of_node_cmp(np->name,
+					 regulator->info->reg_desc.name)) {
+				config.init_data = of_get_regulator_init_data(
+					&pdev->dev, np);
+				break;
+			}
+		}
+#endif
+	}
+
 	regulator->rdev = regulator_register(&regulator->info->reg_desc,
-					     &pdev->dev,
-					     pdata->regulators[pdev->id],
-					     regulator, NULL);
+					     &config);
 	if (IS_ERR(regulator->rdev)) {
 		dev_err(&pdev->dev, "failed to register regulator %s\n",
 			regulator->info->reg_desc.name);
-		ret = PTR_ERR(regulator->rdev);
-		goto err;
+		return PTR_ERR(regulator->rdev);
 	}
 
 	platform_set_drvdata(pdev, regulator);
 
 	return 0;
-err:
-	devm_kfree(&pdev->dev, regulator);
-	return ret;
 }
 
 static int __devexit da9052_regulator_remove(struct platform_device *pdev)
@@ -578,8 +435,6 @@
 	struct da9052_regulator *regulator = platform_get_drvdata(pdev);
 
 	regulator_unregister(regulator->rdev);
-	devm_kfree(&pdev->dev, regulator);
-
 	return 0;
 }
 
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 4bd25e7..968f97f 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -17,6 +17,8 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/db8500-prcmu.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/of.h>
 #include <linux/module.h>
 #include "dbx500-prcmu.h"
 
@@ -410,45 +412,120 @@
 	},
 };
 
+static __devinit int db8500_regulator_register(struct platform_device *pdev,
+					struct regulator_init_data *init_data,
+					int id,
+					struct device_node *np)
+{
+	struct dbx500_regulator_info *info;
+	struct regulator_config config = { };
+	int err;
+
+	/* assign per-regulator data */
+	info = &dbx500_regulator_info[id];
+	info->dev = &pdev->dev;
+
+	config.dev = &pdev->dev;
+	config.init_data = init_data;
+	config.driver_data = info;
+	config.of_node = np;
+
+	/* register with the regulator framework */
+	info->rdev = regulator_register(&info->desc, &config);
+	if (IS_ERR(info->rdev)) {
+		err = PTR_ERR(info->rdev);
+		dev_err(&pdev->dev, "failed to register %s: err %i\n",
+			info->desc.name, err);
+
+		/* if failing, unregister all earlier regulators */
+		while (--id >= 0) {
+			info = &dbx500_regulator_info[id];
+			regulator_unregister(info->rdev);
+		}
+		return err;
+	}
+
+	dev_dbg(rdev_get_dev(info->rdev),
+		"regulator-%s-probed\n", info->desc.name);
+
+	return 0;
+}
+
+static struct of_regulator_match db8500_regulator_matches[] = {
+	{ .name	= "db8500-vape",          .driver_data = (void *) DB8500_REGULATOR_VAPE, },
+	{ .name	= "db8500-varm",          .driver_data = (void *) DB8500_REGULATOR_VARM, },
+	{ .name	= "db8500-vmodem",        .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
+	{ .name	= "db8500-vpll",          .driver_data = (void *) DB8500_REGULATOR_VPLL, },
+	{ .name	= "db8500-vsmps1",        .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
+	{ .name	= "db8500-vsmps2",        .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
+	{ .name	= "db8500-vsmps3",        .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
+	{ .name	= "db8500-vrf1",          .driver_data = (void *) DB8500_REGULATOR_VRF1, },
+	{ .name	= "db8500-sva-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
+	{ .name	= "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
+	{ .name	= "db8500-sva-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
+	{ .name	= "db8500-sia-mmdsp",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
+	{ .name	= "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
+	{ .name	= "db8500-sia-pipe",      .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
+	{ .name	= "db8500-sga",           .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
+	{ .name	= "db8500-b2r2-mcde",     .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
+	{ .name	= "db8500-esram12",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
+	{ .name	= "db8500-esram12-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
+	{ .name	= "db8500-esram34",       .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
+	{ .name	= "db8500-esram34-ret",   .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
+};
+
+static __devinit int
+db8500_regulator_of_probe(struct platform_device *pdev,
+			struct device_node *np)
+{
+	int i, err;
+
+	for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+		err = db8500_regulator_register(
+			pdev, db8500_regulator_matches[i].init_data,
+			i, db8500_regulator_matches[i].of_node);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int __devinit db8500_regulator_probe(struct platform_device *pdev)
 {
 	struct regulator_init_data *db8500_init_data =
 					dev_get_platdata(&pdev->dev);
+	struct device_node *np = pdev->dev.of_node;
 	int i, err;
 
 	/* register all regulators */
-	for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
-		struct dbx500_regulator_info *info;
-		struct regulator_init_data *init_data = &db8500_init_data[i];
-
-		/* assign per-regulator data */
-		info = &dbx500_regulator_info[i];
-		info->dev = &pdev->dev;
-
-		/* register with the regulator framework */
-		info->rdev = regulator_register(&info->desc, &pdev->dev,
-				init_data, info, NULL);
-		if (IS_ERR(info->rdev)) {
-			err = PTR_ERR(info->rdev);
-			dev_err(&pdev->dev, "failed to register %s: err %i\n",
-				info->desc.name, err);
-
-			/* if failing, unregister all earlier regulators */
-			while (--i >= 0) {
-				info = &dbx500_regulator_info[i];
-				regulator_unregister(info->rdev);
-			}
+	if (np) {
+		err = of_regulator_match(&pdev->dev, np,
+					db8500_regulator_matches,
+					ARRAY_SIZE(db8500_regulator_matches));
+		if (err < 0) {
+			dev_err(&pdev->dev,
+				"Error parsing regulator init data: %d\n", err);
 			return err;
 		}
 
-		dev_dbg(rdev_get_dev(info->rdev),
-			"regulator-%s-probed\n", info->desc.name);
+		err = db8500_regulator_of_probe(pdev, np);
+		if (err)
+			return err;
+	} else {
+		for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) {
+			err = db8500_regulator_register(pdev,
+							&db8500_init_data[i],
+							i, NULL);
+			if (err)
+				return err;
+		}
 	}
+
 	err = ux500_regulator_debug_init(pdev,
 					 dbx500_regulator_info,
 					 ARRAY_SIZE(dbx500_regulator_info));
-
-	return err;
+	return 0;
 }
 
 static int __exit db8500_regulator_remove(struct platform_device *pdev)
@@ -470,10 +547,16 @@
 	return 0;
 }
 
+static const struct of_device_id db8500_prcmu_regulator_match[] = {
+        { .compatible = "stericsson,db8500-prcmu-regulator", },
+        {}
+};
+
 static struct platform_driver db8500_regulator_driver = {
 	.driver = {
 		.name = "db8500-prcmu-regulators",
 		.owner = THIS_MODULE,
+		.of_match_table = db8500_prcmu_regulator_match,
 	},
 	.probe = db8500_regulator_probe,
 	.remove = __exit_p(db8500_regulator_remove),
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 0ee00de..86f655c 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -39,10 +39,13 @@
 
 static int __devinit dummy_regulator_probe(struct platform_device *pdev)
 {
+	struct regulator_config config = { };
 	int ret;
 
-	dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
-						  &dummy_initdata, NULL, NULL);
+	config.dev = &pdev->dev;
+	config.init_data = &dummy_initdata;
+
+	dummy_regulator_rdev = regulator_register(&dummy_desc, &config);
 	if (IS_ERR(dummy_regulator_rdev)) {
 		ret = PTR_ERR(dummy_regulator_rdev);
 		pr_err("Failed to register regulator: %d\n", ret);
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 40f3803..f09fe7b 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -25,7 +25,6 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/fixed.h>
 #include <linux/gpio.h>
-#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
@@ -91,6 +90,9 @@
 	if (of_find_property(np, "enable-active-high", NULL))
 		config->enable_high = true;
 
+	if (of_find_property(np, "gpio-open-drain", NULL))
+		config->gpio_is_open_drain = true;
+
 	return config;
 }
 
@@ -105,10 +107,8 @@
 {
 	struct fixed_voltage_data *data = rdev_get_drvdata(dev);
 
-	if (gpio_is_valid(data->gpio)) {
-		gpio_set_value_cansleep(data->gpio, data->enable_high);
-		data->is_enabled = true;
-	}
+	gpio_set_value_cansleep(data->gpio, data->enable_high);
+	data->is_enabled = true;
 
 	return 0;
 }
@@ -117,10 +117,8 @@
 {
 	struct fixed_voltage_data *data = rdev_get_drvdata(dev);
 
-	if (gpio_is_valid(data->gpio)) {
-		gpio_set_value_cansleep(data->gpio, !data->enable_high);
-		data->is_enabled = false;
-	}
+	gpio_set_value_cansleep(data->gpio, !data->enable_high);
+	data->is_enabled = false;
 
 	return 0;
 }
@@ -153,7 +151,7 @@
 	return data->microvolts;
 }
 
-static struct regulator_ops fixed_voltage_ops = {
+static struct regulator_ops fixed_voltage_gpio_ops = {
 	.is_enabled = fixed_voltage_is_enabled,
 	.enable = fixed_voltage_enable,
 	.disable = fixed_voltage_disable,
@@ -162,10 +160,16 @@
 	.list_voltage = fixed_voltage_list_voltage,
 };
 
+static struct regulator_ops fixed_voltage_ops = {
+	.get_voltage = fixed_voltage_get_voltage,
+	.list_voltage = fixed_voltage_list_voltage,
+};
+
 static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
 {
 	struct fixed_voltage_config *config;
 	struct fixed_voltage_data *drvdata;
+	struct regulator_config cfg = { };
 	int ret;
 
 	if (pdev->dev.of_node)
@@ -176,7 +180,8 @@
 	if (!config)
 		return -ENOMEM;
 
-	drvdata = kzalloc(sizeof(struct fixed_voltage_data), GFP_KERNEL);
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data),
+			       GFP_KERNEL);
 	if (drvdata == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate device data\n");
 		ret = -ENOMEM;
@@ -191,7 +196,6 @@
 	}
 	drvdata->desc.type = REGULATOR_VOLTAGE;
 	drvdata->desc.owner = THIS_MODULE;
-	drvdata->desc.ops = &fixed_voltage_ops;
 
 	if (config->microvolts)
 		drvdata->desc.n_voltages = 1;
@@ -201,6 +205,7 @@
 	drvdata->startup_delay = config->startup_delay;
 
 	if (gpio_is_valid(config->gpio)) {
+		int gpio_flag;
 		drvdata->enable_high = config->enable_high;
 
 		/* FIXME: Remove below print warning
@@ -218,7 +223,20 @@
 			dev_warn(&pdev->dev,
 				"using GPIO 0 for regulator enable control\n");
 
-		ret = gpio_request(config->gpio, config->supply_name);
+		/*
+		 * set output direction without changing state
+		 * to prevent glitch
+		 */
+		drvdata->is_enabled = config->enabled_at_boot;
+		ret = drvdata->is_enabled ?
+				config->enable_high : !config->enable_high;
+		gpio_flag = ret ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+
+		if (config->gpio_is_open_drain)
+			gpio_flag |= GPIOF_OPEN_DRAIN;
+
+		ret = gpio_request_one(config->gpio, gpio_flag,
+						config->supply_name);
 		if (ret) {
 			dev_err(&pdev->dev,
 			   "Could not obtain regulator enable GPIO %d: %d\n",
@@ -226,31 +244,18 @@
 			goto err_name;
 		}
 
-		/* set output direction without changing state
-		 * to prevent glitch
-		 */
-		drvdata->is_enabled = config->enabled_at_boot;
-		ret = drvdata->is_enabled ?
-				config->enable_high : !config->enable_high;
-
-		ret = gpio_direction_output(config->gpio, ret);
-		if (ret) {
-			dev_err(&pdev->dev,
-			   "Could not configure regulator enable GPIO %d direction: %d\n",
-							config->gpio, ret);
-			goto err_gpio;
-		}
+		drvdata->desc.ops = &fixed_voltage_gpio_ops;
 
 	} else {
-		/* Regulator without GPIO control is considered
-		 * always enabled
-		 */
-		drvdata->is_enabled = true;
+		drvdata->desc.ops = &fixed_voltage_ops;
 	}
 
-	drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
-					  config->init_data, drvdata,
-					  pdev->dev.of_node);
+	cfg.dev = &pdev->dev;
+	cfg.init_data = config->init_data;
+	cfg.driver_data = drvdata;
+	cfg.of_node = pdev->dev.of_node;
+
+	drvdata->dev = regulator_register(&drvdata->desc, &cfg);
 	if (IS_ERR(drvdata->dev)) {
 		ret = PTR_ERR(drvdata->dev);
 		dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
@@ -270,7 +275,6 @@
 err_name:
 	kfree(drvdata->desc.name);
 err:
-	kfree(drvdata);
 	return ret;
 }
 
@@ -282,7 +286,6 @@
 	if (gpio_is_valid(drvdata->gpio))
 		gpio_free(drvdata->gpio);
 	kfree(drvdata->desc.name);
-	kfree(drvdata);
 
 	return 0;
 }
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 42e1cb1..9997d7a 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -30,7 +30,6 @@
 #include <linux/regulator/machine.h>
 #include <linux/regulator/gpio-regulator.h>
 #include <linux/gpio.h>
-#include <linux/delay.h>
 #include <linux/slab.h>
 
 struct gpio_regulator_data {
@@ -105,15 +104,15 @@
 					int min, int max)
 {
 	struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-	int ptr, target, state;
+	int ptr, target, state, best_val = INT_MAX;
 
-	target = -1;
 	for (ptr = 0; ptr < data->nr_states; ptr++)
-		if (data->states[ptr].value >= min &&
+		if (data->states[ptr].value < best_val &&
+		    data->states[ptr].value >= min &&
 		    data->states[ptr].value <= max)
 			target = data->states[ptr].gpios;
 
-	if (target < 0)
+	if (best_val == INT_MAX)
 		return -EINVAL;
 
 	for (ptr = 0; ptr < data->nr_gpios; ptr++) {
@@ -172,9 +171,11 @@
 {
 	struct gpio_regulator_config *config = pdev->dev.platform_data;
 	struct gpio_regulator_data *drvdata;
+	struct regulator_config cfg = { };
 	int ptr, ret, state;
 
-	drvdata = kzalloc(sizeof(struct gpio_regulator_data), GFP_KERNEL);
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data),
+			       GFP_KERNEL);
 	if (drvdata == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate device data\n");
 		return -ENOMEM;
@@ -283,8 +284,11 @@
 	}
 	drvdata->state = state;
 
-	drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
-					  config->init_data, drvdata, NULL);
+	cfg.dev = &pdev->dev;
+	cfg.init_data = config->init_data;
+	cfg.driver_data = &drvdata;
+
+	drvdata->dev = regulator_register(&drvdata->desc, &cfg);
 	if (IS_ERR(drvdata->dev)) {
 		ret = PTR_ERR(drvdata->dev);
 		dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
@@ -307,7 +311,6 @@
 err_name:
 	kfree(drvdata->desc.name);
 err:
-	kfree(drvdata);
 	return ret;
 }
 
@@ -326,7 +329,6 @@
 		gpio_free(drvdata->enable_gpio);
 
 	kfree(drvdata->desc.name);
-	kfree(drvdata);
 
 	return 0;
 }
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 775f5fd..56d273f 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -22,7 +22,6 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/i2c.h>
-#include <linux/delay.h>
 #include <linux/slab.h>
 
 #define	ISL6271A_VOLTAGE_MIN	850000
@@ -36,47 +35,30 @@
 	struct mutex		mtx;
 };
 
-static int isl6271a_get_voltage(struct regulator_dev *dev)
+static int isl6271a_get_voltage_sel(struct regulator_dev *dev)
 {
 	struct isl_pmic *pmic = rdev_get_drvdata(dev);
-	int idx, data;
+	int idx;
 
 	mutex_lock(&pmic->mtx);
 
 	idx = i2c_smbus_read_byte(pmic->client);
-	if (idx < 0) {
+	if (idx < 0)
 		dev_err(&pmic->client->dev, "Error getting voltage\n");
-		data = idx;
-		goto out;
-	}
 
-	/* Convert the data from chip to microvolts */
-	data = ISL6271A_VOLTAGE_MIN + (ISL6271A_VOLTAGE_STEP * (idx & 0xf));
-
-out:
 	mutex_unlock(&pmic->mtx);
-	return data;
+	return idx;
 }
 
-static int isl6271a_set_voltage(struct regulator_dev *dev,
-				int minuV, int maxuV,
-				unsigned *selector)
+static int isl6271a_set_voltage_sel(struct regulator_dev *dev,
+				    unsigned selector)
 {
 	struct isl_pmic *pmic = rdev_get_drvdata(dev);
-	int err, data;
-
-	if (minuV < ISL6271A_VOLTAGE_MIN || minuV > ISL6271A_VOLTAGE_MAX)
-		return -EINVAL;
-	if (maxuV < ISL6271A_VOLTAGE_MIN || maxuV > ISL6271A_VOLTAGE_MAX)
-		return -EINVAL;
-
-	data = DIV_ROUND_UP(minuV - ISL6271A_VOLTAGE_MIN,
-			    ISL6271A_VOLTAGE_STEP);
-	*selector = data;
+	int err;
 
 	mutex_lock(&pmic->mtx);
 
-	err = i2c_smbus_write_byte(pmic->client, data);
+	err = i2c_smbus_write_byte(pmic->client, selector);
 	if (err < 0)
 		dev_err(&pmic->client->dev, "Error setting voltage\n");
 
@@ -84,15 +66,11 @@
 	return err;
 }
 
-static int isl6271a_list_voltage(struct regulator_dev *dev, unsigned selector)
-{
-	return ISL6271A_VOLTAGE_MIN + (ISL6271A_VOLTAGE_STEP * selector);
-}
-
 static struct regulator_ops isl_core_ops = {
-	.get_voltage	= isl6271a_get_voltage,
-	.set_voltage	= isl6271a_set_voltage,
-	.list_voltage	= isl6271a_list_voltage,
+	.get_voltage_sel = isl6271a_get_voltage_sel,
+	.set_voltage_sel = isl6271a_set_voltage_sel,
+	.list_voltage	= regulator_list_voltage_linear,
+	.map_voltage	= regulator_map_voltage_linear,
 };
 
 static int isl6271a_get_fixed_voltage(struct regulator_dev *dev)
@@ -112,7 +90,7 @@
 	.list_voltage	= isl6271a_list_fixed_voltage,
 };
 
-static struct regulator_desc isl_rd[] = {
+static const struct regulator_desc isl_rd[] = {
 	{
 		.name		= "Core Buck",
 		.id		= 0,
@@ -120,6 +98,8 @@
 		.ops		= &isl_core_ops,
 		.type		= REGULATOR_VOLTAGE,
 		.owner		= THIS_MODULE,
+		.min_uV		= ISL6271A_VOLTAGE_MIN,
+		.uV_step	= ISL6271A_VOLTAGE_STEP,
 	}, {
 		.name		= "LDO1",
 		.id		= 1,
@@ -140,6 +120,7 @@
 static int __devinit isl6271a_probe(struct i2c_client *i2c,
 				     const struct i2c_device_id *id)
 {
+	struct regulator_config config = { };
 	struct regulator_init_data *init_data	= i2c->dev.platform_data;
 	struct isl_pmic *pmic;
 	int err, i;
@@ -147,12 +128,7 @@
 	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
 		return -EIO;
 
-	if (!init_data) {
-		dev_err(&i2c->dev, "no platform data supplied\n");
-		return -EIO;
-	}
-
-	pmic = kzalloc(sizeof(struct isl_pmic), GFP_KERNEL);
+	pmic = devm_kzalloc(&i2c->dev, sizeof(struct isl_pmic), GFP_KERNEL);
 	if (!pmic)
 		return -ENOMEM;
 
@@ -161,8 +137,14 @@
 	mutex_init(&pmic->mtx);
 
 	for (i = 0; i < 3; i++) {
-		pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
-						init_data, pmic, NULL);
+		config.dev = &i2c->dev;
+		if (i == 0)
+			config.init_data = init_data;
+		else
+			config.init_data = 0;
+		config.driver_data = pmic;
+
+		pmic->rdev[i] = regulator_register(&isl_rd[i], &config);
 		if (IS_ERR(pmic->rdev[i])) {
 			dev_err(&i2c->dev, "failed to register %s\n", id->name);
 			err = PTR_ERR(pmic->rdev[i]);
@@ -177,8 +159,6 @@
 error:
 	while (--i >= 0)
 		regulator_unregister(pmic->rdev[i]);
-
-	kfree(pmic);
 	return err;
 }
 
@@ -189,9 +169,6 @@
 
 	for (i = 0; i < 3; i++)
 		regulator_unregister(pmic->rdev[i]);
-
-	kfree(pmic);
-
 	return 0;
 }
 
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 0cfabd3..981bea9 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -124,6 +124,10 @@
 static int lp3971_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
 {
 	int ldo = rdev_get_id(dev) - LP3971_LDO1;
+
+	if (index > LDO_VOL_MAX_IDX)
+		return -EINVAL;
+
 	return 1000 * LDO_VOL_VALUE_MAP(ldo)[index];
 }
 
@@ -168,32 +172,15 @@
 	return 1000 * LDO_VOL_VALUE_MAP(ldo)[val];
 }
 
-static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV,
-				  unsigned int *selector)
+static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
+				      unsigned int selector)
 {
 	struct lp3971 *lp3971 = rdev_get_drvdata(dev);
 	int ldo = rdev_get_id(dev) - LP3971_LDO1;
-	int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
-	const int *vol_map = LDO_VOL_VALUE_MAP(ldo);
-	u16 val;
-
-	if (min_vol < vol_map[LDO_VOL_MIN_IDX] ||
-	    min_vol > vol_map[LDO_VOL_MAX_IDX])
-		return -EINVAL;
-
-	for (val = LDO_VOL_MIN_IDX; val <= LDO_VOL_MAX_IDX; val++)
-		if (vol_map[val] >= min_vol)
-			break;
-
-	if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
-		return -EINVAL;
-
-	*selector = val;
 
 	return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
 			LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo),
-			val << LDO_VOL_CONTR_SHIFT(ldo));
+			selector << LDO_VOL_CONTR_SHIFT(ldo));
 }
 
 static struct regulator_ops lp3971_ldo_ops = {
@@ -202,11 +189,14 @@
 	.enable = lp3971_ldo_enable,
 	.disable = lp3971_ldo_disable,
 	.get_voltage = lp3971_ldo_get_voltage,
-	.set_voltage = lp3971_ldo_set_voltage,
+	.set_voltage_sel = lp3971_ldo_set_voltage_sel,
 };
 
 static int lp3971_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
 {
+	if (index < BUCK_TARGET_VOL_MIN_IDX || index > BUCK_TARGET_VOL_MAX_IDX)
+		return -EINVAL;
+
 	return 1000 * buck_voltage_map[index];
 }
 
@@ -259,33 +249,15 @@
 	return val;
 }
 
-static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
-				   int min_uV, int max_uV,
-				   unsigned int *selector)
+static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
+				       unsigned int selector)
 {
 	struct lp3971 *lp3971 = rdev_get_drvdata(dev);
 	int buck = rdev_get_id(dev) - LP3971_DCDC1;
-	int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
-	const int *vol_map = buck_voltage_map;
-	u16 val;
 	int ret;
 
-	if (min_vol < vol_map[BUCK_TARGET_VOL_MIN_IDX] ||
-	    min_vol > vol_map[BUCK_TARGET_VOL_MAX_IDX])
-		return -EINVAL;
-
-	for (val = BUCK_TARGET_VOL_MIN_IDX; val <= BUCK_TARGET_VOL_MAX_IDX;
-	     val++)
-		if (vol_map[val] >= min_vol)
-			break;
-
-	if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
-		return -EINVAL;
-
-	*selector = val;
-
 	ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
-	       BUCK_TARGET_VOL_MASK, val);
+	       BUCK_TARGET_VOL_MASK, selector);
 	if (ret)
 		return ret;
 
@@ -306,10 +278,10 @@
 	.enable = lp3971_dcdc_enable,
 	.disable = lp3971_dcdc_disable,
 	.get_voltage = lp3971_dcdc_get_voltage,
-	.set_voltage = lp3971_dcdc_set_voltage,
+	.set_voltage_sel = lp3971_dcdc_set_voltage_sel,
 };
 
-static struct regulator_desc regulators[] = {
+static const struct regulator_desc regulators[] = {
 	{
 		.name = "LDO1",
 		.id = LP3971_LDO1,
@@ -449,10 +421,15 @@
 
 	/* Instantiate the regulators */
 	for (i = 0; i < pdata->num_regulators; i++) {
+		struct regulator_config config = { };
 		struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
-		lp3971->rdev[i] = regulator_register(&regulators[reg->id],
-				lp3971->dev, reg->initdata, lp3971, NULL);
 
+		config.dev = lp3971->dev;
+		config.init_data = reg->initdata;
+		config.driver_data = lp3971;
+
+		lp3971->rdev[i] = regulator_register(&regulators[reg->id],
+						     &config);
 		if (IS_ERR(lp3971->rdev[i])) {
 			err = PTR_ERR(lp3971->rdev[i]);
 			dev_err(lp3971->dev, "regulator init failed: %d\n",
@@ -545,23 +522,7 @@
 	.id_table = lp3971_i2c_id,
 };
 
-static int __init lp3971_module_init(void)
-{
-	int ret;
-
-	ret = i2c_add_driver(&lp3971_i2c_driver);
-	if (ret != 0)
-		pr_err("Failed to register I2C driver: %d\n", ret);
-
-	return ret;
-}
-module_init(lp3971_module_init);
-
-static void __exit lp3971_module_exit(void)
-{
-	i2c_del_driver(&lp3971_i2c_driver);
-}
-module_exit(lp3971_module_exit);
+module_i2c_driver(lp3971_i2c_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Marek Szyprowski <m.szyprowski@samsung.com>");
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 49a15ee..de073df 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -245,6 +245,11 @@
 static int lp3972_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
 {
 	int ldo = rdev_get_id(dev) - LP3972_LDO1;
+
+	if (index < LP3972_LDO_VOL_MIN_IDX(ldo) ||
+	    index > LP3972_LDO_VOL_MAX_IDX(ldo))
+		return -EINVAL;
+
 	return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[index];
 }
 
@@ -292,34 +297,16 @@
 	return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[val];
 }
 
-static int lp3972_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV,
-				  unsigned int *selector)
+static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
+				      unsigned int selector)
 {
 	struct lp3972 *lp3972 = rdev_get_drvdata(dev);
 	int ldo = rdev_get_id(dev) - LP3972_LDO1;
-	int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
-	const int *vol_map = LP3972_LDO_VOL_VALUE_MAP(ldo);
-	u16 val;
 	int shift, ret;
 
-	if (min_vol < vol_map[LP3972_LDO_VOL_MIN_IDX(ldo)] ||
-	    min_vol > vol_map[LP3972_LDO_VOL_MAX_IDX(ldo)])
-		return -EINVAL;
-
-	for (val = LP3972_LDO_VOL_MIN_IDX(ldo);
-		val <= LP3972_LDO_VOL_MAX_IDX(ldo); val++)
-		if (vol_map[val] >= min_vol)
-			break;
-
-	if (val > LP3972_LDO_VOL_MAX_IDX(ldo) || vol_map[val] > max_vol)
-		return -EINVAL;
-
-	*selector = val;
-
 	shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo);
 	ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo),
-		LP3972_LDO_VOL_MASK(ldo) << shift, val << shift);
+		LP3972_LDO_VOL_MASK(ldo) << shift, selector << shift);
 
 	if (ret)
 		return ret;
@@ -355,12 +342,17 @@
 	.enable = lp3972_ldo_enable,
 	.disable = lp3972_ldo_disable,
 	.get_voltage = lp3972_ldo_get_voltage,
-	.set_voltage = lp3972_ldo_set_voltage,
+	.set_voltage_sel = lp3972_ldo_set_voltage_sel,
 };
 
 static int lp3972_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
 {
 	int buck = rdev_get_id(dev) - LP3972_DCDC1;
+
+	if (index < LP3972_BUCK_VOL_MIN_IDX(buck) ||
+	    index > LP3972_BUCK_VOL_MAX_IDX(buck))
+		return -EINVAL;
+
 	return 1000 * buck_voltage_map[buck][index];
 }
 
@@ -419,34 +411,15 @@
 	return val;
 }
 
-static int lp3972_dcdc_set_voltage(struct regulator_dev *dev,
-				   int min_uV, int max_uV,
-				   unsigned int *selector)
+static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
+				       unsigned int selector)
 {
 	struct lp3972 *lp3972 = rdev_get_drvdata(dev);
 	int buck = rdev_get_id(dev) - LP3972_DCDC1;
-	int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
-	const int *vol_map = buck_voltage_map[buck];
-	u16 val;
 	int ret;
 
-	if (min_vol < vol_map[LP3972_BUCK_VOL_MIN_IDX(buck)] ||
-	    min_vol > vol_map[LP3972_BUCK_VOL_MAX_IDX(buck)])
-		return -EINVAL;
-
-	for (val = LP3972_BUCK_VOL_MIN_IDX(buck);
-		val <= LP3972_BUCK_VOL_MAX_IDX(buck); val++)
-		if (vol_map[val] >= min_vol)
-			break;
-
-	if (val > LP3972_BUCK_VOL_MAX_IDX(buck) ||
-	    vol_map[val] > max_vol)
-		return -EINVAL;
-
-	*selector = val;
-
 	ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck),
-				LP3972_BUCK_VOL_MASK, val);
+				LP3972_BUCK_VOL_MASK, selector);
 	if (ret)
 		return ret;
 
@@ -468,10 +441,10 @@
 	.enable = lp3972_dcdc_enable,
 	.disable = lp3972_dcdc_disable,
 	.get_voltage = lp3972_dcdc_get_voltage,
-	.set_voltage = lp3972_dcdc_set_voltage,
+	.set_voltage_sel = lp3972_dcdc_set_voltage_sel,
 };
 
-static struct regulator_desc regulators[] = {
+static const struct regulator_desc regulators[] = {
 	{
 		.name = "LDO1",
 		.id = LP3972_LDO1,
@@ -554,9 +527,14 @@
 	/* Instantiate the regulators */
 	for (i = 0; i < pdata->num_regulators; i++) {
 		struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
-		lp3972->rdev[i] = regulator_register(&regulators[reg->id],
-				lp3972->dev, reg->initdata, lp3972, NULL);
+		struct regulator_config config = { };
 
+		config.dev = lp3972->dev;
+		config.init_data = reg->initdata;
+		config.driver_data = lp3972;
+
+		lp3972->rdev[i] = regulator_register(&regulators[reg->id],
+						     &config);
 		if (IS_ERR(lp3972->rdev[i])) {
 			err = PTR_ERR(lp3972->rdev[i]);
 			dev_err(lp3972->dev, "regulator init failed: %d\n",
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 282d2ee..b9444ee 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -161,7 +161,7 @@
 	.list_voltage = max1586_v6_list,
 };
 
-static struct regulator_desc max1586_reg[] = {
+static const struct regulator_desc max1586_reg[] = {
 	{
 		.name = "Output_V3",
 		.id = MAX1586_V3,
@@ -185,21 +185,21 @@
 {
 	struct regulator_dev **rdev;
 	struct max1586_platform_data *pdata = client->dev.platform_data;
+	struct regulator_config config = { };
 	struct max1586_data *max1586;
 	int i, id, ret = -ENOMEM;
 
-	max1586 = kzalloc(sizeof(struct max1586_data) +
+	max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data) +
 			sizeof(struct regulator_dev *) * (MAX1586_V6 + 1),
 			GFP_KERNEL);
 	if (!max1586)
-		goto out;
+		return -ENOMEM;
 
 	max1586->client = client;
 
-	if (!pdata->v3_gain) {
-		ret = -EINVAL;
-		goto out_unmap;
-	}
+	if (!pdata->v3_gain)
+		return -EINVAL;
+
 	max1586->min_uV = MAX1586_V3_MIN_UV / 1000 * pdata->v3_gain / 1000;
 	max1586->max_uV = MAX1586_V3_MAX_UV / 1000 * pdata->v3_gain / 1000;
 
@@ -212,9 +212,12 @@
 			dev_err(&client->dev, "invalid regulator id %d\n", id);
 			goto err;
 		}
-		rdev[i] = regulator_register(&max1586_reg[id], &client->dev,
-					     pdata->subdevs[i].platform_data,
-					     max1586, NULL);
+
+		config.dev = &client->dev;
+		config.init_data = pdata->subdevs[i].platform_data;
+		config.driver_data = max1586;
+
+		rdev[i] = regulator_register(&max1586_reg[id], &config);
 		if (IS_ERR(rdev[i])) {
 			ret = PTR_ERR(rdev[i]);
 			dev_err(&client->dev, "failed to register %s\n",
@@ -230,9 +233,6 @@
 err:
 	while (--i >= 0)
 		regulator_unregister(rdev[i]);
-out_unmap:
-	kfree(max1586);
-out:
 	return ret;
 }
 
@@ -244,8 +244,6 @@
 	for (i = 0; i <= MAX1586_V6; i++)
 		if (max1586->rdev[i])
 			regulator_unregister(max1586->rdev[i]);
-	kfree(max1586);
-
 	return 0;
 }
 
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 824c650..1f4bb80 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -53,7 +53,6 @@
 	struct device		*dev;
 	struct regmap		*regmap;
 
-	int		vol_reg;
 	unsigned	mode:2;	/* bit[1:0] = VID1, VID0 */
 	unsigned	extclk_freq:2;
 	unsigned	extclk:1;
@@ -61,53 +60,6 @@
 	unsigned	ramp_down:1;
 };
 
-/* I2C operations */
-
-static inline int check_range(int min_uV, int max_uV)
-{
-	if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX)
-		|| (min_uV > max_uV))
-		return -EINVAL;
-	return 0;
-}
-
-static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index)
-{
-	return (MAX8649_DCDC_VMIN + index * MAX8649_DCDC_STEP);
-}
-
-static int max8649_get_voltage(struct regulator_dev *rdev)
-{
-	struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
-	unsigned int val;
-	unsigned char data;
-	int ret;
-
-	ret = regmap_read(info->regmap, info->vol_reg, &val);
-	if (ret != 0)
-		return ret;
-	data = (unsigned char)val & MAX8649_VOL_MASK;
-	return max8649_list_voltage(rdev, data);
-}
-
-static int max8649_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV, unsigned *selector)
-{
-	struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
-	unsigned char data, mask;
-
-	if (check_range(min_uV, max_uV)) {
-		dev_err(info->dev, "invalid voltage range (%d, %d) uV\n",
-			min_uV, max_uV);
-		return -EINVAL;
-	}
-	data = DIV_ROUND_UP(min_uV - MAX8649_DCDC_VMIN, MAX8649_DCDC_STEP);
-	mask = MAX8649_VOL_MASK;
-	*selector = data & mask;
-
-	return regmap_update_bits(info->regmap, info->vol_reg, mask, data);
-}
-
 /* EN_PD means pulldown on EN input */
 static int max8649_enable(struct regulator_dev *rdev)
 {
@@ -145,11 +97,11 @@
 	unsigned int val;
 
 	/* get voltage */
-	ret = regmap_read(info->regmap, info->vol_reg, &val);
+	ret = regmap_read(info->regmap, rdev->desc->vsel_reg, &val);
 	if (ret != 0)
 		return ret;
 	val &= MAX8649_VOL_MASK;
-	voltage = max8649_list_voltage(rdev, (unsigned char)val); /* uV */
+	voltage = regulator_list_voltage_linear(rdev, (unsigned char)val);
 
 	/* get rate */
 	ret = regmap_read(info->regmap, MAX8649_RAMP, &val);
@@ -167,11 +119,11 @@
 
 	switch (mode) {
 	case REGULATOR_MODE_FAST:
-		regmap_update_bits(info->regmap, info->vol_reg, MAX8649_FORCE_PWM,
-				   MAX8649_FORCE_PWM);
+		regmap_update_bits(info->regmap, rdev->desc->vsel_reg,
+				   MAX8649_FORCE_PWM, MAX8649_FORCE_PWM);
 		break;
 	case REGULATOR_MODE_NORMAL:
-		regmap_update_bits(info->regmap, info->vol_reg,
+		regmap_update_bits(info->regmap, rdev->desc->vsel_reg,
 				   MAX8649_FORCE_PWM, 0);
 		break;
 	default:
@@ -186,7 +138,7 @@
 	unsigned int val;
 	int ret;
 
-	ret = regmap_read(info->regmap, info->vol_reg, &val);
+	ret = regmap_read(info->regmap, rdev->desc->vsel_reg, &val);
 	if (ret != 0)
 		return ret;
 	if (val & MAX8649_FORCE_PWM)
@@ -195,9 +147,10 @@
 }
 
 static struct regulator_ops max8649_dcdc_ops = {
-	.set_voltage	= max8649_set_voltage,
-	.get_voltage	= max8649_get_voltage,
-	.list_voltage	= max8649_list_voltage,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.list_voltage	= regulator_list_voltage_linear,
+	.map_voltage	= regulator_map_voltage_linear,
 	.enable		= max8649_enable,
 	.disable	= max8649_disable,
 	.is_enabled	= max8649_is_enabled,
@@ -213,6 +166,9 @@
 	.type		= REGULATOR_VOLTAGE,
 	.n_voltages	= 1 << 6,
 	.owner		= THIS_MODULE,
+	.vsel_mask	= MAX8649_VOL_MASK,
+	.min_uV		= MAX8649_DCDC_VMIN,
+	.uV_step	= MAX8649_DCDC_STEP,
 };
 
 static struct regmap_config max8649_regmap_config = {
@@ -225,21 +181,23 @@
 {
 	struct max8649_platform_data *pdata = client->dev.platform_data;
 	struct max8649_regulator_info *info = NULL;
+	struct regulator_config config = { };
 	unsigned int val;
 	unsigned char data;
 	int ret;
 
-	info = kzalloc(sizeof(struct max8649_regulator_info), GFP_KERNEL);
+	info = devm_kzalloc(&client->dev, sizeof(struct max8649_regulator_info),
+			    GFP_KERNEL);
 	if (!info) {
 		dev_err(&client->dev, "No enough memory\n");
 		return -ENOMEM;
 	}
 
-	info->regmap = regmap_init_i2c(client, &max8649_regmap_config);
+	info->regmap = devm_regmap_init_i2c(client, &max8649_regmap_config);
 	if (IS_ERR(info->regmap)) {
 		ret = PTR_ERR(info->regmap);
 		dev_err(&client->dev, "Failed to allocate register map: %d\n", ret);
-		goto fail;
+		return ret;
 	}
 
 	info->dev = &client->dev;
@@ -248,16 +206,16 @@
 	info->mode = pdata->mode;
 	switch (info->mode) {
 	case 0:
-		info->vol_reg = MAX8649_MODE0;
+		dcdc_desc.vsel_reg = MAX8649_MODE0;
 		break;
 	case 1:
-		info->vol_reg = MAX8649_MODE1;
+		dcdc_desc.vsel_reg = MAX8649_MODE1;
 		break;
 	case 2:
-		info->vol_reg = MAX8649_MODE2;
+		dcdc_desc.vsel_reg = MAX8649_MODE2;
 		break;
 	case 3:
-		info->vol_reg = MAX8649_MODE3;
+		dcdc_desc.vsel_reg = MAX8649_MODE3;
 		break;
 	default:
 		break;
@@ -267,7 +225,7 @@
 	if (ret != 0) {
 		dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
 			ret);
-		goto out;
+		return ret;
 	}
 	dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", val);
 
@@ -277,7 +235,8 @@
 	/* enable/disable external clock synchronization */
 	info->extclk = pdata->extclk;
 	data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
-	regmap_update_bits(info->regmap, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
+	regmap_update_bits(info->regmap, dcdc_desc.vsel_reg,
+			   MAX8649_SYNC_EXTCLK, data);
 	if (info->extclk) {
 		/* set external clock frequency */
 		info->extclk_freq = pdata->extclk_freq;
@@ -297,22 +256,18 @@
 				   MAX8649_RAMP_DOWN);
 	}
 
-	info->regulator = regulator_register(&dcdc_desc, &client->dev,
-					     pdata->regulator, info, NULL);
+	config.dev = &client->dev;
+	config.init_data = pdata->regulator;
+	config.driver_data = info;
+
+	info->regulator = regulator_register(&dcdc_desc, &config);
 	if (IS_ERR(info->regulator)) {
 		dev_err(info->dev, "failed to register regulator %s\n",
 			dcdc_desc.name);
-		ret = PTR_ERR(info->regulator);
-		goto out;
+		return PTR_ERR(info->regulator);
 	}
 
-	dev_info(info->dev, "Max8649 regulator device is detected.\n");
 	return 0;
-out:
-	regmap_exit(info->regmap);
-fail:
-	kfree(info);
-	return ret;
 }
 
 static int __devexit max8649_regulator_remove(struct i2c_client *client)
@@ -322,8 +277,6 @@
 	if (info) {
 		if (info->regulator)
 			regulator_unregister(info->regulator);
-		regmap_exit(info->regmap);
-		kfree(info);
 	}
 
 	return 0;
@@ -360,4 +313,3 @@
 MODULE_DESCRIPTION("MAXIM 8649 voltage regulator driver");
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
 MODULE_LICENSE("GPL");
-
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 4c5b053..8d53174 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -126,42 +126,22 @@
 	return max8660_write(max8660, MAX8660_OVER1, mask, 0);
 }
 
-static int max8660_dcdc_list(struct regulator_dev *rdev, unsigned selector)
-{
-	if (selector > MAX8660_DCDC_MAX_SEL)
-		return -EINVAL;
-	return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
-}
-
-static int max8660_dcdc_get(struct regulator_dev *rdev)
+static int max8660_dcdc_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
+
 	u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
 	u8 selector = max8660->shadow_regs[reg];
-	return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
+	return selector;
 }
 
-static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV,
-			    unsigned int *s)
+static int max8660_dcdc_set_voltage_sel(struct regulator_dev *rdev,
+					unsigned int selector)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
-	u8 reg, selector, bits;
+	u8 reg, bits;
 	int ret;
 
-	if (min_uV < MAX8660_DCDC_MIN_UV || min_uV > MAX8660_DCDC_MAX_UV)
-		return -EINVAL;
-	if (max_uV < MAX8660_DCDC_MIN_UV || max_uV > MAX8660_DCDC_MAX_UV)
-		return -EINVAL;
-
-	selector = DIV_ROUND_UP(min_uV - MAX8660_DCDC_MIN_UV,
-				MAX8660_DCDC_STEP);
-
-	ret = max8660_dcdc_list(rdev, selector);
-	if (ret < 0 || ret > max_uV)
-		return -EINVAL;
-
-	*s = selector;
-
 	reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
 	ret = max8660_write(max8660, reg, 0, selector);
 	if (ret)
@@ -174,9 +154,10 @@
 
 static struct regulator_ops max8660_dcdc_ops = {
 	.is_enabled = max8660_dcdc_is_enabled,
-	.list_voltage = max8660_dcdc_list,
-	.set_voltage = max8660_dcdc_set,
-	.get_voltage = max8660_dcdc_get,
+	.list_voltage = regulator_list_voltage_linear,
+	.map_voltage = regulator_map_voltage_linear,
+	.set_voltage_sel = max8660_dcdc_set_voltage_sel,
+	.get_voltage_sel = max8660_dcdc_get_voltage_sel,
 };
 
 
@@ -184,42 +165,20 @@
  * LDO5 functions
  */
 
-static int max8660_ldo5_list(struct regulator_dev *rdev, unsigned selector)
-{
-	if (selector > MAX8660_LDO5_MAX_SEL)
-		return -EINVAL;
-	return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
-}
-
-static int max8660_ldo5_get(struct regulator_dev *rdev)
+static int max8660_ldo5_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
+
 	u8 selector = max8660->shadow_regs[MAX8660_MDTV2];
-
-	return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
+	return selector;
 }
 
-static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV,
-			    unsigned int *s)
+static int max8660_ldo5_set_voltage_sel(struct regulator_dev *rdev,
+					unsigned int selector)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
-	u8 selector;
 	int ret;
 
-	if (min_uV < MAX8660_LDO5_MIN_UV || min_uV > MAX8660_LDO5_MAX_UV)
-		return -EINVAL;
-	if (max_uV < MAX8660_LDO5_MIN_UV || max_uV > MAX8660_LDO5_MAX_UV)
-		return -EINVAL;
-
-	selector = DIV_ROUND_UP(min_uV - MAX8660_LDO5_MIN_UV,
-				MAX8660_LDO5_STEP);
-
-	ret = max8660_ldo5_list(rdev, selector);
-	if (ret < 0 || ret > max_uV)
-		return -EINVAL;
-
-	*s = selector;
-
 	ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
 	if (ret)
 		return ret;
@@ -229,9 +188,10 @@
 }
 
 static struct regulator_ops max8660_ldo5_ops = {
-	.list_voltage = max8660_ldo5_list,
-	.set_voltage = max8660_ldo5_set,
-	.get_voltage = max8660_ldo5_get,
+	.list_voltage = regulator_list_voltage_linear,
+	.map_voltage = regulator_map_voltage_linear,
+	.set_voltage_sel = max8660_ldo5_set_voltage_sel,
+	.get_voltage_sel = max8660_ldo5_get_voltage_sel,
 };
 
 
@@ -261,59 +221,38 @@
 	return max8660_write(max8660, MAX8660_OVER2, mask, 0);
 }
 
-static int max8660_ldo67_list(struct regulator_dev *rdev, unsigned selector)
-{
-	if (selector > MAX8660_LDO67_MAX_SEL)
-		return -EINVAL;
-	return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
-}
-
-static int max8660_ldo67_get(struct regulator_dev *rdev)
+static int max8660_ldo67_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
+
 	u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4;
 	u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf;
-
-	return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
+	return selector;
 }
 
-static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV,
-			     int max_uV, unsigned int *s)
+static int max8660_ldo67_set_voltage_sel(struct regulator_dev *rdev,
+					 unsigned int selector)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
-	u8 selector;
-	int ret;
-
-	if (min_uV < MAX8660_LDO67_MIN_UV || min_uV > MAX8660_LDO67_MAX_UV)
-		return -EINVAL;
-	if (max_uV < MAX8660_LDO67_MIN_UV || max_uV > MAX8660_LDO67_MAX_UV)
-		return -EINVAL;
-
-	selector = DIV_ROUND_UP(min_uV - MAX8660_LDO67_MIN_UV,
-				MAX8660_LDO67_STEP);
-
-	ret = max8660_ldo67_list(rdev, selector);
-	if (ret < 0 || ret > max_uV)
-		return -EINVAL;
-
-	*s = selector;
 
 	if (rdev_get_id(rdev) == MAX8660_V6)
 		return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
 	else
-		return max8660_write(max8660, MAX8660_L12VCR, 0x0f, selector << 4);
+		return max8660_write(max8660, MAX8660_L12VCR, 0x0f,
+				     selector << 4);
 }
 
 static struct regulator_ops max8660_ldo67_ops = {
 	.is_enabled = max8660_ldo67_is_enabled,
 	.enable = max8660_ldo67_enable,
 	.disable = max8660_ldo67_disable,
-	.list_voltage = max8660_ldo67_list,
-	.get_voltage = max8660_ldo67_get,
-	.set_voltage = max8660_ldo67_set,
+	.list_voltage = regulator_list_voltage_linear,
+	.map_voltage = regulator_map_voltage_linear,
+	.get_voltage_sel = max8660_ldo67_get_voltage_sel,
+	.set_voltage_sel = max8660_ldo67_set_voltage_sel,
 };
 
-static struct regulator_desc max8660_reg[] = {
+static const struct regulator_desc max8660_reg[] = {
 	{
 		.name = "V3(DCDC)",
 		.id = MAX8660_V3,
@@ -321,6 +260,8 @@
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = MAX8660_DCDC_MAX_SEL + 1,
 		.owner = THIS_MODULE,
+		.min_uV = MAX8660_DCDC_MIN_UV,
+		.uV_step = MAX8660_DCDC_STEP,
 	},
 	{
 		.name = "V4(DCDC)",
@@ -329,6 +270,8 @@
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = MAX8660_DCDC_MAX_SEL + 1,
 		.owner = THIS_MODULE,
+		.min_uV = MAX8660_DCDC_MIN_UV,
+		.uV_step = MAX8660_DCDC_STEP,
 	},
 	{
 		.name = "V5(LDO)",
@@ -337,6 +280,8 @@
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = MAX8660_LDO5_MAX_SEL + 1,
 		.owner = THIS_MODULE,
+		.min_uV = MAX8660_LDO5_MIN_UV,
+		.uV_step = MAX8660_LDO5_STEP,
 	},
 	{
 		.name = "V6(LDO)",
@@ -345,6 +290,8 @@
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = MAX8660_LDO67_MAX_SEL + 1,
 		.owner = THIS_MODULE,
+		.min_uV = MAX8660_LDO67_MIN_UV,
+		.uV_step = MAX8660_LDO67_STEP,
 	},
 	{
 		.name = "V7(LDO)",
@@ -353,6 +300,8 @@
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = MAX8660_LDO67_MAX_SEL + 1,
 		.owner = THIS_MODULE,
+		.min_uV = MAX8660_LDO67_MIN_UV,
+		.uV_step = MAX8660_LDO67_STEP,
 	},
 };
 
@@ -361,21 +310,20 @@
 {
 	struct regulator_dev **rdev;
 	struct max8660_platform_data *pdata = client->dev.platform_data;
+	struct regulator_config config = { };
 	struct max8660 *max8660;
 	int boot_on, i, id, ret = -EINVAL;
 
 	if (pdata->num_subdevs > MAX8660_V_END) {
 		dev_err(&client->dev, "Too many regulators found!\n");
-		goto out;
+		return -EINVAL;
 	}
 
-	max8660 = kzalloc(sizeof(struct max8660) +
+	max8660 = devm_kzalloc(&client->dev, sizeof(struct max8660) +
 			sizeof(struct regulator_dev *) * MAX8660_V_END,
 			GFP_KERNEL);
-	if (!max8660) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!max8660)
+		return -ENOMEM;
 
 	max8660->client = client;
 	rdev = max8660->rdev;
@@ -404,7 +352,7 @@
 	for (i = 0; i < pdata->num_subdevs; i++) {
 
 		if (!pdata->subdevs[i].platform_data)
-			goto err_free;
+			goto err_out;
 
 		boot_on = pdata->subdevs[i].platform_data->constraints.boot_on;
 
@@ -430,7 +378,7 @@
 		case MAX8660_V7:
 			if (!strcmp(i2c_id->name, "max8661")) {
 				dev_err(&client->dev, "Regulator not on this chip!\n");
-				goto err_free;
+				goto err_out;
 			}
 
 			if (boot_on)
@@ -440,7 +388,7 @@
 		default:
 			dev_err(&client->dev, "invalid regulator %s\n",
 				 pdata->subdevs[i].name);
-			goto err_free;
+			goto err_out;
 		}
 	}
 
@@ -449,9 +397,11 @@
 
 		id = pdata->subdevs[i].id;
 
-		rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
-					     pdata->subdevs[i].platform_data,
-					     max8660, NULL);
+		config.dev = &client->dev;
+		config.init_data = pdata->subdevs[i].platform_data;
+		config.driver_data = max8660;
+
+		rdev[i] = regulator_register(&max8660_reg[id], &config);
 		if (IS_ERR(rdev[i])) {
 			ret = PTR_ERR(rdev[i]);
 			dev_err(&client->dev, "failed to register %s\n",
@@ -461,15 +411,12 @@
 	}
 
 	i2c_set_clientdata(client, max8660);
-	dev_info(&client->dev, "Maxim 8660/8661 regulator driver loaded\n");
 	return 0;
 
 err_unregister:
 	while (--i >= 0)
 		regulator_unregister(rdev[i]);
-err_free:
-	kfree(max8660);
-out:
+err_out:
 	return ret;
 }
 
@@ -481,8 +428,6 @@
 	for (i = 0; i < MAX8660_V_END; i++)
 		if (max8660->rdev[i])
 			regulator_unregister(max8660->rdev[i]);
-	kfree(max8660);
-
 	return 0;
 }
 
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 2f242f4..43dc97ec 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -38,50 +38,20 @@
 	struct i2c_client	*i2c;
 	struct max8925_chip	*chip;
 
-	int	min_uV;
-	int	max_uV;
-	int	step_uV;
 	int	vol_reg;
-	int	vol_shift;
-	int	vol_nbits;
 	int	enable_reg;
 };
 
-static inline int check_range(struct max8925_regulator_info *info,
-			      int min_uV, int max_uV)
-{
-	if (min_uV < info->min_uV || min_uV > info->max_uV)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int max8925_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int max8925_set_voltage_sel(struct regulator_dev *rdev,
+				   unsigned int selector)
 {
 	struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
-	return info->min_uV + index * info->step_uV;
+	unsigned char mask = rdev->desc->n_voltages - 1;
+
+	return max8925_set_bits(info->i2c, info->vol_reg, mask, selector);
 }
 
-static int max8925_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV, unsigned int *selector)
-{
-	struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
-	unsigned char data, mask;
-
-	if (check_range(info, min_uV, max_uV)) {
-		dev_err(info->chip->dev, "invalid voltage range (%d, %d) uV\n",
-			min_uV, max_uV);
-		return -EINVAL;
-	}
-	data = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
-	*selector = data;
-	data <<= info->vol_shift;
-	mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
-
-	return max8925_set_bits(info->i2c, info->vol_reg, mask, data);
-}
-
-static int max8925_get_voltage(struct regulator_dev *rdev)
+static int max8925_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
 	unsigned char data, mask;
@@ -90,10 +60,10 @@
 	ret = max8925_reg_read(info->i2c, info->vol_reg);
 	if (ret < 0)
 		return ret;
-	mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
-	data = (ret & mask) >> info->vol_shift;
+	mask = rdev->desc->n_voltages - 1;
+	data = ret & mask;
 
-	return max8925_list_voltage(rdev, data);
+	return data;
 }
 
 static int max8925_enable(struct regulator_dev *rdev)
@@ -163,8 +133,10 @@
 }
 
 static struct regulator_ops max8925_regulator_sdv_ops = {
-	.set_voltage		= max8925_set_voltage,
-	.get_voltage		= max8925_get_voltage,
+	.map_voltage		= regulator_map_voltage_linear,
+	.list_voltage		= regulator_list_voltage_linear,
+	.set_voltage_sel	= max8925_set_voltage_sel,
+	.get_voltage_sel	= max8925_get_voltage_sel,
 	.enable			= max8925_enable,
 	.disable		= max8925_disable,
 	.is_enabled		= max8925_is_enabled,
@@ -174,8 +146,10 @@
 };
 
 static struct regulator_ops max8925_regulator_ldo_ops = {
-	.set_voltage		= max8925_set_voltage,
-	.get_voltage		= max8925_get_voltage,
+	.map_voltage		= regulator_map_voltage_linear,
+	.list_voltage		= regulator_list_voltage_linear,
+	.set_voltage_sel	= max8925_set_voltage_sel,
+	.get_voltage_sel	= max8925_get_voltage_sel,
 	.enable			= max8925_enable,
 	.disable		= max8925_disable,
 	.is_enabled		= max8925_is_enabled,
@@ -189,13 +163,11 @@
 		.type	= REGULATOR_VOLTAGE,			\
 		.id	= MAX8925_ID_SD##_id,			\
 		.owner	= THIS_MODULE,				\
+		.n_voltages = 64,				\
+		.min_uV = min * 1000,				\
+		.uV_step = step * 1000,				\
 	},							\
-	.min_uV		= min * 1000,				\
-	.max_uV		= max * 1000,				\
-	.step_uV	= step * 1000,				\
 	.vol_reg	= MAX8925_SDV##_id,			\
-	.vol_shift	= 0,					\
-	.vol_nbits	= 6,					\
 	.enable_reg	= MAX8925_SDCTL##_id,			\
 }
 
@@ -207,13 +179,11 @@
 		.type	= REGULATOR_VOLTAGE,			\
 		.id	= MAX8925_ID_LDO##_id,			\
 		.owner	= THIS_MODULE,				\
+		.n_voltages = 64,				\
+		.min_uV = min * 1000,				\
+		.uV_step = step * 1000,				\
 	},							\
-	.min_uV		= min * 1000,				\
-	.max_uV		= max * 1000,				\
-	.step_uV	= step * 1000,				\
 	.vol_reg	= MAX8925_LDOVOUT##_id,			\
-	.vol_shift	= 0,					\
-	.vol_nbits	= 6,					\
 	.enable_reg	= MAX8925_LDOCTL##_id,			\
 }
 
@@ -261,6 +231,7 @@
 {
 	struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
 	struct max8925_platform_data *pdata = chip->dev->platform_data;
+	struct regulator_config config = { };
 	struct max8925_regulator_info *ri;
 	struct regulator_dev *rdev;
 
@@ -272,8 +243,11 @@
 	ri->i2c = chip->i2c;
 	ri->chip = chip;
 
-	rdev = regulator_register(&ri->desc, &pdev->dev,
-				  pdata->regulator[pdev->id], ri, NULL);
+	config.dev = &pdev->dev;
+	config.init_data = pdata->regulator[pdev->id];
+	config.driver_data = ri;
+
+	rdev = regulator_register(&ri->desc, &config);
 	if (IS_ERR(rdev)) {
 		dev_err(&pdev->dev, "failed to register regulator %s\n",
 				ri->desc.name);
@@ -319,4 +293,3 @@
 MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
 MODULE_DESCRIPTION("Regulator Driver for Maxim 8925 PMIC");
 MODULE_ALIAS("platform:max8925-regulator");
-
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 75d8940..910c9b2 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -69,11 +69,6 @@
 	return i2c_smbus_write_byte_data(max8952->client, reg, value);
 }
 
-static int max8952_voltage(struct max8952_data *max8952, u8 mode)
-{
-	return (max8952->pdata->dvs_mode[mode] * 10 + 770) * 1000;
-}
-
 static int max8952_list_voltage(struct regulator_dev *rdev,
 		unsigned int selector)
 {
@@ -82,7 +77,7 @@
 	if (rdev_get_id(rdev) != 0)
 		return -EINVAL;
 
-	return max8952_voltage(max8952, selector);
+	return (max8952->pdata->dvs_mode[selector] * 10 + 770) * 1000;
 }
 
 static int max8952_is_enabled(struct regulator_dev *rdev)
@@ -117,7 +112,7 @@
 	return 0;
 }
 
-static int max8952_get_voltage(struct regulator_dev *rdev)
+static int max8952_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct max8952_data *max8952 = rdev_get_drvdata(rdev);
 	u8 vid = 0;
@@ -127,14 +122,13 @@
 	if (max8952->vid1)
 		vid += 2;
 
-	return max8952_voltage(max8952, vid);
+	return vid;
 }
 
-static int max8952_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV, unsigned *selector)
+static int max8952_set_voltage_sel(struct regulator_dev *rdev,
+				   unsigned selector)
 {
 	struct max8952_data *max8952 = rdev_get_drvdata(rdev);
-	s8 vid = -1, i;
 
 	if (!gpio_is_valid(max8952->pdata->gpio_vid0) ||
 			!gpio_is_valid(max8952->pdata->gpio_vid1)) {
@@ -142,23 +136,10 @@
 		return -EPERM;
 	}
 
-	for (i = 0; i < MAX8952_NUM_DVS_MODE; i++) {
-		int volt = max8952_voltage(max8952, i);
-
-		/* Set the voltage as low as possible within the range */
-		if (volt <= max_uV && volt >= min_uV)
-			if (vid == -1 || max8952_voltage(max8952, vid) > volt)
-				vid = i;
-	}
-
-	if (vid >= 0 && vid < MAX8952_NUM_DVS_MODE) {
-		max8952->vid0 = (vid % 2 == 1);
-		max8952->vid1 = (((vid >> 1) % 2) == 1);
-		*selector = vid;
-		gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0);
-		gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1);
-	} else
-		return -EINVAL;
+	max8952->vid0 = selector & 0x1;
+	max8952->vid1 = (selector >> 1) & 0x1;
+	gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0);
+	gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1);
 
 	return 0;
 }
@@ -168,12 +149,12 @@
 	.is_enabled		= max8952_is_enabled,
 	.enable			= max8952_enable,
 	.disable		= max8952_disable,
-	.get_voltage		= max8952_get_voltage,
-	.set_voltage		= max8952_set_voltage,
+	.get_voltage_sel	= max8952_get_voltage_sel,
+	.set_voltage_sel	= max8952_set_voltage_sel,
 	.set_suspend_disable	= max8952_disable,
 };
 
-static struct regulator_desc regulator = {
+static const struct regulator_desc regulator = {
 	.name		= "MAX8952_VOUT",
 	.id		= 0,
 	.n_voltages	= MAX8952_NUM_DVS_MODE,
@@ -187,6 +168,7 @@
 {
 	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
 	struct max8952_platform_data *pdata = client->dev.platform_data;
+	struct regulator_config config = { };
 	struct max8952_data *max8952;
 
 	int ret = 0, err = 0;
@@ -199,7 +181,8 @@
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
 		return -EIO;
 
-	max8952 = kzalloc(sizeof(struct max8952_data), GFP_KERNEL);
+	max8952 = devm_kzalloc(&client->dev, sizeof(struct max8952_data),
+			       GFP_KERNEL);
 	if (!max8952)
 		return -ENOMEM;
 
@@ -207,18 +190,21 @@
 	max8952->dev = &client->dev;
 	max8952->pdata = pdata;
 
-	max8952->rdev = regulator_register(&regulator, max8952->dev,
-			&pdata->reg_data, max8952, NULL);
+	config.dev = max8952->dev;
+	config.init_data = &pdata->reg_data;
+	config.driver_data = max8952;
+
+	max8952->rdev = regulator_register(&regulator, &config);
 
 	if (IS_ERR(max8952->rdev)) {
 		ret = PTR_ERR(max8952->rdev);
 		dev_err(max8952->dev, "regulator init failed (%d)\n", ret);
-		goto err_reg;
+		return ret;
 	}
 
 	max8952->en = !!(pdata->reg_data.constraints.boot_on);
-	max8952->vid0 = (pdata->default_mode % 2) == 1;
-	max8952->vid1 = ((pdata->default_mode >> 1) % 2) == 1;
+	max8952->vid0 = pdata->default_mode & 0x1;
+	max8952->vid1 = (pdata->default_mode >> 1) & 0x1;
 
 	if (gpio_is_valid(pdata->gpio_en)) {
 		if (!gpio_request(pdata->gpio_en, "MAX8952 EN"))
@@ -241,13 +227,13 @@
 			gpio_is_valid(pdata->gpio_vid1)) {
 		if (!gpio_request(pdata->gpio_vid0, "MAX8952 VID0"))
 			gpio_direction_output(pdata->gpio_vid0,
-					(pdata->default_mode) % 2);
+					(pdata->default_mode) & 0x1);
 		else
 			err = 1;
 
 		if (!gpio_request(pdata->gpio_vid1, "MAX8952 VID1"))
 			gpio_direction_output(pdata->gpio_vid1,
-				(pdata->default_mode >> 1) % 2);
+				(pdata->default_mode >> 1) & 0x1);
 		else {
 			if (!err)
 				gpio_free(pdata->gpio_vid0);
@@ -310,10 +296,6 @@
 	i2c_set_clientdata(client, max8952);
 
 	return 0;
-
-err_reg:
-	kfree(max8952);
-	return ret;
 }
 
 static int __devexit max8952_pmic_remove(struct i2c_client *client)
@@ -327,8 +309,6 @@
 	gpio_free(pdata->gpio_vid0);
 	gpio_free(pdata->gpio_vid1);
 	gpio_free(pdata->gpio_en);
-
-	kfree(max8952);
 	return 0;
 }
 
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 9657929..704cd49 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -22,7 +22,6 @@
  */
 
 #include <linux/bug.h>
-#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
@@ -68,29 +67,28 @@
 	int min;
 	int max;
 	int step;
-	unsigned int n_bits;
 };
 
 /* Voltage maps in mV */
 static const struct voltage_map_desc ldo_voltage_map_desc = {
-	.min = 800,	.max = 3950,	.step = 50,	.n_bits = 6,
+	.min = 800,	.max = 3950,	.step = 50,
 }; /* LDO1 ~ 18, 21 all */
 
 static const struct voltage_map_desc buck1245_voltage_map_desc = {
-	.min = 650,	.max = 2225,	.step = 25,	.n_bits = 6,
+	.min = 650,	.max = 2225,	.step = 25,
 }; /* Buck1, 2, 4, 5 */
 
 static const struct voltage_map_desc buck37_voltage_map_desc = {
-	.min = 750,	.max = 3900,	.step = 50,	.n_bits = 6,
+	.min = 750,	.max = 3900,	.step = 50,
 }; /* Buck3, 7 */
 
 /* current map in mA */
 static const struct voltage_map_desc charger_current_map_desc = {
-	.min = 200,	.max = 950,	.step = 50,	.n_bits = 4,
+	.min = 200,	.max = 950,	.step = 50,
 };
 
 static const struct voltage_map_desc topoff_current_map_desc = {
-	.min = 50,	.max = 200,	.step = 10,	.n_bits = 4,
+	.min = 50,	.max = 200,	.step = 10,
 };
 
 static const struct voltage_map_desc *reg_voltage_map[] = {
@@ -279,9 +277,7 @@
 	u8 val;
 
 	ret = max8997_get_enable_register(rdev, &reg, &mask, &pattern);
-	if (ret == -EINVAL)
-		return 1; /* "not controllable" */
-	else if (ret)
+	if (ret)
 		return ret;
 
 	ret = max8997_read_reg(i2c, reg, &val);
@@ -320,6 +316,7 @@
 static int max8997_get_voltage_register(struct regulator_dev *rdev,
 		int *_reg, int *_shift, int *_mask)
 {
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
 	int rid = rdev_get_id(rdev);
 	int reg, shift = 0, mask = 0x3f;
 
@@ -329,9 +326,13 @@
 		break;
 	case MAX8997_BUCK1:
 		reg = MAX8997_REG_BUCK1DVS1;
+		if (max8997->buck1_gpiodvs)
+			reg += max8997->buck125_gpioindex;
 		break;
 	case MAX8997_BUCK2:
 		reg = MAX8997_REG_BUCK2DVS1;
+		if (max8997->buck2_gpiodvs)
+			reg += max8997->buck125_gpioindex;
 		break;
 	case MAX8997_BUCK3:
 		reg = MAX8997_REG_BUCK3DVS;
@@ -341,6 +342,8 @@
 		break;
 	case MAX8997_BUCK5:
 		reg = MAX8997_REG_BUCK5DVS1;
+		if (max8997->buck5_gpiodvs)
+			reg += max8997->buck125_gpioindex;
 		break;
 	case MAX8997_BUCK7:
 		reg = MAX8997_REG_BUCK7DVS;
@@ -376,23 +379,17 @@
 	return 0;
 }
 
-static int max8997_get_voltage(struct regulator_dev *rdev)
+static int max8997_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
 	struct i2c_client *i2c = max8997->iodev->i2c;
 	int reg, shift, mask, ret;
-	int rid = rdev_get_id(rdev);
 	u8 val;
 
 	ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
 	if (ret)
 		return ret;
 
-	if ((rid == MAX8997_BUCK1 && max8997->buck1_gpiodvs) ||
-			(rid == MAX8997_BUCK2 && max8997->buck2_gpiodvs) ||
-			(rid == MAX8997_BUCK5 && max8997->buck5_gpiodvs))
-		reg += max8997->buck125_gpioindex;
-
 	ret = max8997_read_reg(i2c, reg, &val);
 	if (ret)
 		return ret;
@@ -400,22 +397,14 @@
 	val >>= shift;
 	val &= mask;
 
-	if (rdev->desc && rdev->desc->ops && rdev->desc->ops->list_voltage)
-		return rdev->desc->ops->list_voltage(rdev, val);
-
-	/*
-	 * max8997_list_voltage returns value for any rdev with voltage_map,
-	 * which works for "CHARGER" and "CHARGER TOPOFF" that do not have
-	 * list_voltage ops (they are current regulators).
-	 */
-	return max8997_list_voltage(rdev, val);
+	return val;
 }
 
 static inline int max8997_get_voltage_proper_val(
 		const struct voltage_map_desc *desc,
 		int min_vol, int max_vol)
 {
-	int i = 0;
+	int i;
 
 	if (desc == NULL)
 		return -EINVAL;
@@ -423,16 +412,14 @@
 	if (max_vol < desc->min || min_vol > desc->max)
 		return -EINVAL;
 
-	while (desc->min + desc->step * i < min_vol &&
-			desc->min + desc->step * i < desc->max)
-		i++;
+	if (min_vol < desc->min)
+		min_vol = desc->min;
+
+	i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
 
 	if (desc->min + desc->step * i > max_vol)
 		return -EINVAL;
 
-	if (i >= (1 << desc->n_bits))
-		return -EINVAL;
-
 	return i;
 }
 
@@ -499,9 +486,7 @@
 	int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
 	const struct voltage_map_desc *desc;
 	int rid = rdev_get_id(rdev);
-	int reg, shift = 0, mask, ret;
-	int i;
-	u8 org;
+	int i, reg, shift, mask, ret;
 
 	switch (rid) {
 	case MAX8997_LDO1 ... MAX8997_LDO21:
@@ -530,21 +515,50 @@
 	if (ret)
 		return ret;
 
-	max8997_read_reg(i2c, reg, &org);
-	org = (org & mask) >> shift;
-
 	ret = max8997_update_reg(i2c, reg, i << shift, mask << shift);
 	*selector = i;
 
-	if (rid == MAX8997_BUCK1 || rid == MAX8997_BUCK2 ||
-			rid == MAX8997_BUCK4 || rid == MAX8997_BUCK5) {
-		/* If the voltage is increasing */
-		if (org < i)
-			udelay(DIV_ROUND_UP(desc->step * (i - org),
-						max8997->ramp_delay));
+	return ret;
+}
+
+static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
+						unsigned int old_selector,
+						unsigned int new_selector)
+{
+	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+	int rid = rdev_get_id(rdev);
+	const struct voltage_map_desc *desc = reg_voltage_map[rid];
+
+	/* Delay is required only if the voltage is increasing */
+	if (old_selector >= new_selector)
+		return 0;
+
+	/* No need to delay if gpio_dvs_mode */
+	switch (rid) {
+	case MAX8997_BUCK1:
+		if (max8997->buck1_gpiodvs)
+			return 0;
+		break;
+	case MAX8997_BUCK2:
+		if (max8997->buck2_gpiodvs)
+			return 0;
+		break;
+	case MAX8997_BUCK5:
+		if (max8997->buck5_gpiodvs)
+			return 0;
+		break;
 	}
 
-	return ret;
+	switch (rid) {
+	case MAX8997_BUCK1:
+	case MAX8997_BUCK2:
+	case MAX8997_BUCK4:
+	case MAX8997_BUCK5:
+		return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
+				    max8997->ramp_delay);
+	}
+
+	return 0;
 }
 
 /*
@@ -684,7 +698,7 @@
 		}
 
 		new_val++;
-	} while (desc->min + desc->step + new_val <= desc->max);
+	} while (desc->min + desc->step * new_val <= desc->max);
 
 	new_idx = tmp_idx;
 	new_val = tmp_val;
@@ -751,11 +765,6 @@
 	return ret;
 }
 
-static int max8997_reg_enable_suspend(struct regulator_dev *rdev)
-{
-	return 0;
-}
-
 static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
 {
 	struct max8997_data *max8997 = rdev_get_drvdata(rdev);
@@ -788,9 +797,9 @@
 	.is_enabled		= max8997_reg_is_enabled,
 	.enable			= max8997_reg_enable,
 	.disable		= max8997_reg_disable,
-	.get_voltage		= max8997_get_voltage,
+	.get_voltage_sel	= max8997_get_voltage_sel,
 	.set_voltage		= max8997_set_voltage_ldobuck,
-	.set_suspend_enable	= max8997_reg_enable_suspend,
+	.set_voltage_time_sel	= max8997_set_voltage_ldobuck_time_sel,
 	.set_suspend_disable	= max8997_reg_disable_suspend,
 };
 
@@ -799,9 +808,9 @@
 	.is_enabled		= max8997_reg_is_enabled,
 	.enable			= max8997_reg_enable,
 	.disable		= max8997_reg_disable,
-	.get_voltage		= max8997_get_voltage,
+	.get_voltage_sel	= max8997_get_voltage_sel,
 	.set_voltage		= max8997_set_voltage_buck,
-	.set_suspend_enable	= max8997_reg_enable_suspend,
+	.set_voltage_time_sel	= max8997_set_voltage_ldobuck_time_sel,
 	.set_suspend_disable	= max8997_reg_disable_suspend,
 };
 
@@ -810,7 +819,6 @@
 	.is_enabled		= max8997_reg_is_enabled,
 	.enable			= max8997_reg_enable,
 	.disable		= max8997_reg_disable,
-	.set_suspend_enable	= max8997_reg_enable_suspend,
 	.set_suspend_disable	= max8997_reg_disable_suspend,
 };
 
@@ -819,144 +827,117 @@
 	.is_enabled		= max8997_reg_is_enabled,
 	.enable			= max8997_reg_enable,
 	.disable		= max8997_reg_disable,
-	.get_voltage		= max8997_get_voltage,
+	.get_voltage_sel	= max8997_get_voltage_sel,
 	.set_voltage		= max8997_set_voltage_safeout,
-	.set_suspend_enable	= max8997_reg_enable_suspend,
 	.set_suspend_disable	= max8997_reg_disable_suspend,
 };
 
 static struct regulator_ops max8997_fixedstate_ops = {
 	.list_voltage		= max8997_list_voltage_charger_cv,
-	.get_voltage		= max8997_get_voltage,
+	.get_voltage_sel	= max8997_get_voltage_sel,
 	.set_voltage		= max8997_set_voltage_charger_cv,
 };
 
-static int max8997_set_voltage_ldobuck_wrap(struct regulator_dev *rdev,
-		int min_uV, int max_uV)
+static int max8997_set_current_limit(struct regulator_dev *rdev,
+				     int min_uA, int max_uA)
 {
 	unsigned dummy;
+	int rid = rdev_get_id(rdev);
 
-	return max8997_set_voltage_ldobuck(rdev, min_uV, max_uV, &dummy);
+	if (rid != MAX8997_CHARGER && rid != MAX8997_CHARGER_TOPOFF)
+		return -EINVAL;
+
+	/* Reuse max8997_set_voltage_ldobuck to set current_limit. */
+	return max8997_set_voltage_ldobuck(rdev, min_uA, max_uA, &dummy);
 }
 
+static int max8997_get_current_limit(struct regulator_dev *rdev)
+{
+	int sel, rid = rdev_get_id(rdev);
+
+	if (rid != MAX8997_CHARGER && rid != MAX8997_CHARGER_TOPOFF)
+		return -EINVAL;
+
+	sel = max8997_get_voltage_sel(rdev);
+	if (sel < 0)
+		return sel;
+
+	/* Reuse max8997_list_voltage to get current_limit. */
+	return max8997_list_voltage(rdev, sel);
+}
 
 static struct regulator_ops max8997_charger_ops = {
 	.is_enabled		= max8997_reg_is_enabled,
 	.enable			= max8997_reg_enable,
 	.disable		= max8997_reg_disable,
-	.get_current_limit	= max8997_get_voltage,
-	.set_current_limit	= max8997_set_voltage_ldobuck_wrap,
+	.get_current_limit	= max8997_get_current_limit,
+	.set_current_limit	= max8997_set_current_limit,
 };
 
 static struct regulator_ops max8997_charger_fixedstate_ops = {
-	.is_enabled		= max8997_reg_is_enabled,
-	.get_current_limit	= max8997_get_voltage,
-	.set_current_limit	= max8997_set_voltage_ldobuck_wrap,
+	.get_current_limit	= max8997_get_current_limit,
+	.set_current_limit	= max8997_set_current_limit,
 };
 
-#define regulator_desc_ldo(num)		{	\
-	.name		= "LDO"#num,		\
-	.id		= MAX8997_LDO##num,	\
-	.ops		= &max8997_ldo_ops,	\
+#define MAX8997_VOLTAGE_REGULATOR(_name, _ops) {\
+	.name		= #_name,		\
+	.id		= MAX8997_##_name,	\
+	.ops		= &_ops,		\
 	.type		= REGULATOR_VOLTAGE,	\
 	.owner		= THIS_MODULE,		\
 }
-#define regulator_desc_buck(num)		{	\
-	.name		= "BUCK"#num,		\
-	.id		= MAX8997_BUCK##num,	\
-	.ops		= &max8997_buck_ops,	\
-	.type		= REGULATOR_VOLTAGE,	\
+
+#define MAX8997_CURRENT_REGULATOR(_name, _ops) {\
+	.name		= #_name,		\
+	.id		= MAX8997_##_name,	\
+	.ops		= &_ops,		\
+	.type		= REGULATOR_CURRENT,	\
 	.owner		= THIS_MODULE,		\
 }
 
 static struct regulator_desc regulators[] = {
-	regulator_desc_ldo(1),
-	regulator_desc_ldo(2),
-	regulator_desc_ldo(3),
-	regulator_desc_ldo(4),
-	regulator_desc_ldo(5),
-	regulator_desc_ldo(6),
-	regulator_desc_ldo(7),
-	regulator_desc_ldo(8),
-	regulator_desc_ldo(9),
-	regulator_desc_ldo(10),
-	regulator_desc_ldo(11),
-	regulator_desc_ldo(12),
-	regulator_desc_ldo(13),
-	regulator_desc_ldo(14),
-	regulator_desc_ldo(15),
-	regulator_desc_ldo(16),
-	regulator_desc_ldo(17),
-	regulator_desc_ldo(18),
-	regulator_desc_ldo(21),
-	regulator_desc_buck(1),
-	regulator_desc_buck(2),
-	regulator_desc_buck(3),
-	regulator_desc_buck(4),
-	regulator_desc_buck(5),
-	{
-		.name	= "BUCK6",
-		.id	= MAX8997_BUCK6,
-		.ops	= &max8997_fixedvolt_ops,
-		.type	= REGULATOR_VOLTAGE,
-		.owner	= THIS_MODULE,
-	},
-	regulator_desc_buck(7),
-	{
-		.name	= "EN32KHz_AP",
-		.id	= MAX8997_EN32KHZ_AP,
-		.ops	= &max8997_fixedvolt_ops,
-		.type	= REGULATOR_VOLTAGE,
-		.owner	= THIS_MODULE,
-	}, {
-		.name	= "EN32KHz_CP",
-		.id	= MAX8997_EN32KHZ_CP,
-		.ops	= &max8997_fixedvolt_ops,
-		.type	= REGULATOR_VOLTAGE,
-		.owner	= THIS_MODULE,
-	}, {
-		.name	= "ENVICHG",
-		.id	= MAX8997_ENVICHG,
-		.ops	= &max8997_fixedvolt_ops,
-		.type	= REGULATOR_VOLTAGE,
-		.owner	= THIS_MODULE,
-	}, {
-		.name	= "ESAFEOUT1",
-		.id	= MAX8997_ESAFEOUT1,
-		.ops	= &max8997_safeout_ops,
-		.type	= REGULATOR_VOLTAGE,
-		.owner	 = THIS_MODULE,
-	}, {
-		.name	= "ESAFEOUT2",
-		.id	= MAX8997_ESAFEOUT2,
-		.ops	= &max8997_safeout_ops,
-		.type	= REGULATOR_VOLTAGE,
-		.owner	 = THIS_MODULE,
-	}, {
-		.name	= "CHARGER_CV",
-		.id	= MAX8997_CHARGER_CV,
-		.ops	= &max8997_fixedstate_ops,
-		.type	= REGULATOR_VOLTAGE,
-		.owner	 = THIS_MODULE,
-	}, {
-		.name	= "CHARGER",
-		.id	= MAX8997_CHARGER,
-		.ops	= &max8997_charger_ops,
-		.type	= REGULATOR_CURRENT,
-		.owner	 = THIS_MODULE,
-	}, {
-		.name	= "CHARGER_TOPOFF",
-		.id	= MAX8997_CHARGER_TOPOFF,
-		.ops	= &max8997_charger_fixedstate_ops,
-		.type	= REGULATOR_CURRENT,
-		.owner	 = THIS_MODULE,
-	},
+	MAX8997_VOLTAGE_REGULATOR(LDO1, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO2, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO3, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO4, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO5, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO6, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO7, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO8, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO9, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO10, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO11, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO12, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO13, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO14, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO15, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO16, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO17, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO18, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(LDO21, max8997_ldo_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK1, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK2, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK3, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK4, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK5, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK6, max8997_fixedvolt_ops),
+	MAX8997_VOLTAGE_REGULATOR(BUCK7, max8997_buck_ops),
+	MAX8997_VOLTAGE_REGULATOR(EN32KHZ_AP, max8997_fixedvolt_ops),
+	MAX8997_VOLTAGE_REGULATOR(EN32KHZ_CP, max8997_fixedvolt_ops),
+	MAX8997_VOLTAGE_REGULATOR(ENVICHG, max8997_fixedvolt_ops),
+	MAX8997_VOLTAGE_REGULATOR(ESAFEOUT1, max8997_safeout_ops),
+	MAX8997_VOLTAGE_REGULATOR(ESAFEOUT2, max8997_safeout_ops),
+	MAX8997_VOLTAGE_REGULATOR(CHARGER_CV, max8997_fixedstate_ops),
+	MAX8997_CURRENT_REGULATOR(CHARGER, max8997_charger_ops),
+	MAX8997_CURRENT_REGULATOR(CHARGER_TOPOFF,
+				  max8997_charger_fixedstate_ops),
 };
 
 static __devinit int max8997_pmic_probe(struct platform_device *pdev)
 {
 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
 	struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+	struct regulator_config config = { };
 	struct regulator_dev **rdev;
 	struct max8997_data *max8997;
 	struct i2c_client *i2c;
@@ -968,16 +949,15 @@
 		return -ENODEV;
 	}
 
-	max8997 = kzalloc(sizeof(struct max8997_data), GFP_KERNEL);
+	max8997 = devm_kzalloc(&pdev->dev, sizeof(struct max8997_data),
+			       GFP_KERNEL);
 	if (!max8997)
 		return -ENOMEM;
 
 	size = sizeof(struct regulator_dev *) * pdata->num_regulators;
-	max8997->rdev = kzalloc(size, GFP_KERNEL);
-	if (!max8997->rdev) {
-		kfree(max8997);
+	max8997->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!max8997->rdev)
 		return -ENOMEM;
-	}
 
 	rdev = max8997->rdev;
 	max8997->dev = &pdev->dev;
@@ -1001,7 +981,7 @@
 					pdata->buck1_voltage[i] / 1000 +
 					buck1245_voltage_map_desc.step);
 		if (ret < 0)
-			goto err_alloc;
+			goto err_out;
 
 		max8997->buck2_vol[i] = ret =
 			max8997_get_voltage_proper_val(
@@ -1010,7 +990,7 @@
 					pdata->buck2_voltage[i] / 1000 +
 					buck1245_voltage_map_desc.step);
 		if (ret < 0)
-			goto err_alloc;
+			goto err_out;
 
 		max8997->buck5_vol[i] = ret =
 			max8997_get_voltage_proper_val(
@@ -1019,7 +999,7 @@
 					pdata->buck5_voltage[i] / 1000 +
 					buck1245_voltage_map_desc.step);
 		if (ret < 0)
-			goto err_alloc;
+			goto err_out;
 
 		if (max_buck1 < max8997->buck1_vol[i])
 			max_buck1 = max8997->buck1_vol[i];
@@ -1052,7 +1032,7 @@
 				!gpio_is_valid(pdata->buck125_gpios[2])) {
 			dev_err(&pdev->dev, "GPIO NOT VALID\n");
 			ret = -EINVAL;
-			goto err_alloc;
+			goto err_out;
 		}
 
 		ret = gpio_request(pdata->buck125_gpios[0],
@@ -1061,7 +1041,7 @@
 			dev_warn(&pdev->dev, "Duplicated gpio request"
 					" on SET1\n");
 		else if (ret)
-			goto err_alloc;
+			goto err_out;
 		else
 			gpio1set = true;
 
@@ -1073,7 +1053,7 @@
 		else if (ret) {
 			if (gpio1set)
 				gpio_free(pdata->buck125_gpios[0]);
-			goto err_alloc;
+			goto err_out;
 		} else
 			gpio2set = true;
 
@@ -1087,7 +1067,7 @@
 				gpio_free(pdata->buck125_gpios[0]);
 			if (gpio2set)
 				gpio_free(pdata->buck125_gpios[1]);
-			goto err_alloc;
+			goto err_out;
 		}
 
 		gpio_direction_output(pdata->buck125_gpios[0],
@@ -1140,8 +1120,11 @@
 		else if (id == MAX8997_CHARGER_CV)
 			regulators[id].n_voltages = 16;
 
-		rdev[i] = regulator_register(&regulators[id], max8997->dev,
-				pdata->regulators[i].initdata, max8997, NULL);
+		config.dev = max8997->dev;
+		config.init_data = pdata->regulators[i].initdata;
+		config.driver_data = max8997;
+
+		rdev[i] = regulator_register(&regulators[id], &config);
 		if (IS_ERR(rdev[i])) {
 			ret = PTR_ERR(rdev[i]);
 			dev_err(max8997->dev, "regulator init failed for %d\n",
@@ -1153,13 +1136,9 @@
 
 	return 0;
 err:
-	for (i = 0; i < max8997->num_regulators; i++)
-		if (rdev[i])
-			regulator_unregister(rdev[i]);
-err_alloc:
-	kfree(max8997->rdev);
-	kfree(max8997);
-
+	while (--i >= 0)
+		regulator_unregister(rdev[i]);
+err_out:
 	return ret;
 }
 
@@ -1170,12 +1149,7 @@
 	int i;
 
 	for (i = 0; i < max8997->num_regulators; i++)
-		if (rdev[i])
-			regulator_unregister(rdev[i]);
-
-	kfree(max8997->rdev);
-	kfree(max8997);
-
+		regulator_unregister(rdev[i]);
 	return 0;
 }
 
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 5890265..18bb58b 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -28,7 +28,6 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/mutex.h>
-#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/mfd/max8998.h>
@@ -277,7 +276,7 @@
 	return 0;
 }
 
-static int max8998_get_voltage(struct regulator_dev *rdev)
+static int max8998_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct max8998_data *max8998 = rdev_get_drvdata(rdev);
 	struct i2c_client *i2c = max8998->iodev->i2c;
@@ -295,7 +294,7 @@
 	val >>= shift;
 	val &= mask;
 
-	return max8998_list_voltage(rdev, val);
+	return val;
 }
 
 static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
@@ -306,8 +305,7 @@
 	int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
 	const struct voltage_map_desc *desc;
 	int ldo = rdev_get_id(rdev);
-	int reg, shift = 0, mask, ret;
-	int i = 0;
+	int reg, shift = 0, mask, ret, i;
 
 	if (ldo >= ARRAY_SIZE(ldo_voltage_map))
 		return -EINVAL;
@@ -319,9 +317,10 @@
 	if (max_vol < desc->min || min_vol > desc->max)
 		return -EINVAL;
 
-	while (desc->min + desc->step*i < min_vol &&
-	       desc->min + desc->step*i < desc->max)
-		i++;
+	if (min_vol < desc->min)
+		min_vol = desc->min;
+
+	i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
 
 	if (desc->min + desc->step*i > max_vol)
 		return -EINVAL;
@@ -359,8 +358,7 @@
 	const struct voltage_map_desc *desc;
 	int buck = rdev_get_id(rdev);
 	int reg, shift = 0, mask, ret;
-	int difference = 0, i = 0, j = 0, previous_vol = 0;
-	u8 val = 0;
+	int i, j, previous_sel;
 	static u8 buck1_last_val;
 
 	if (buck >= ARRAY_SIZE(ldo_voltage_map))
@@ -374,9 +372,10 @@
 	if (max_vol < desc->min || min_vol > desc->max)
 		return -EINVAL;
 
-	while (desc->min + desc->step*i < min_vol &&
-	       desc->min + desc->step*i < desc->max)
-		i++;
+	if (min_vol < desc->min)
+		min_vol = desc->min;
+
+	i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
 
 	if (desc->min + desc->step*i > max_vol)
 		return -EINVAL;
@@ -387,13 +386,14 @@
 	if (ret)
 		return ret;
 
-	previous_vol = max8998_get_voltage(rdev);
+	previous_sel = max8998_get_voltage_sel(rdev);
 
 	/* Check if voltage needs to be changed */
 	/* if previous_voltage equal new voltage, return */
-	if (previous_vol == max8998_list_voltage(rdev, i)) {
+	if (previous_sel == i) {
 		dev_dbg(max8998->dev, "No voltage change, old:%d, new:%d\n",
-			previous_vol, max8998_list_voltage(rdev, i));
+			max8998_list_voltage(rdev, previous_sel),
+			max8998_list_voltage(rdev, i));
 		return ret;
 	}
 
@@ -482,19 +482,40 @@
 		break;
 	}
 
+	return ret;
+}
+
+static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
+					     unsigned int old_selector,
+					     unsigned int new_selector)
+{
+	struct max8998_data *max8998 = rdev_get_drvdata(rdev);
+	struct i2c_client *i2c = max8998->iodev->i2c;
+	const struct voltage_map_desc *desc;
+	int buck = rdev_get_id(rdev);
+	u8 val = 0;
+	int difference, ret;
+
+	if (buck < MAX8998_BUCK1 || buck > MAX8998_BUCK4)
+		return -EINVAL;
+
+	desc = ldo_voltage_map[buck];
+
 	/* Voltage stabilization */
-	max8998_read_reg(i2c, MAX8998_REG_ONOFF4, &val);
+	ret = max8998_read_reg(i2c, MAX8998_REG_ONOFF4, &val);
+	if (ret)
+		return ret;
 
 	/* lp3974 hasn't got ENRAMP bit - ramp is assumed as true */
 	/* MAX8998 has ENRAMP bit implemented, so test it*/
 	if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
-		return ret;
+		return 0;
 
-	difference = desc->min + desc->step*i - previous_vol/1000;
+	difference = (new_selector - old_selector) * desc->step;
 	if (difference > 0)
-		udelay(difference / ((val & 0x0f) + 1));
+		return difference / ((val & 0x0f) + 1);
 
-	return ret;
+	return 0;
 }
 
 static struct regulator_ops max8998_ldo_ops = {
@@ -502,7 +523,7 @@
 	.is_enabled		= max8998_ldo_is_enabled,
 	.enable			= max8998_ldo_enable,
 	.disable		= max8998_ldo_disable,
-	.get_voltage		= max8998_get_voltage,
+	.get_voltage_sel	= max8998_get_voltage_sel,
 	.set_voltage		= max8998_set_voltage_ldo,
 	.set_suspend_enable	= max8998_ldo_enable,
 	.set_suspend_disable	= max8998_ldo_disable,
@@ -513,8 +534,9 @@
 	.is_enabled		= max8998_ldo_is_enabled,
 	.enable			= max8998_ldo_enable,
 	.disable		= max8998_ldo_disable,
-	.get_voltage		= max8998_get_voltage,
+	.get_voltage_sel	= max8998_get_voltage_sel,
 	.set_voltage		= max8998_set_voltage_buck,
+	.set_voltage_time_sel	= max8998_set_voltage_buck_time_sel,
 	.set_suspend_enable	= max8998_ldo_enable,
 	.set_suspend_disable	= max8998_ldo_disable,
 };
@@ -685,6 +707,7 @@
 {
 	struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
 	struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+	struct regulator_config config = { };
 	struct regulator_dev **rdev;
 	struct max8998_data *max8998;
 	struct i2c_client *i2c;
@@ -695,16 +718,15 @@
 		return -ENODEV;
 	}
 
-	max8998 = kzalloc(sizeof(struct max8998_data), GFP_KERNEL);
+	max8998 = devm_kzalloc(&pdev->dev, sizeof(struct max8998_data),
+			       GFP_KERNEL);
 	if (!max8998)
 		return -ENOMEM;
 
 	size = sizeof(struct regulator_dev *) * pdata->num_regulators;
-	max8998->rdev = kzalloc(size, GFP_KERNEL);
-	if (!max8998->rdev) {
-		kfree(max8998);
+	max8998->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!max8998->rdev)
 		return -ENOMEM;
-	}
 
 	rdev = max8998->rdev;
 	max8998->dev = &pdev->dev;
@@ -728,14 +750,14 @@
 			printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n");
 			WARN_ON(!pdata->buck1_set1);
 			ret = -EIO;
-			goto err_free_mem;
+			goto err_out;
 		}
 		/* Check if SET2 is not equal to 0 */
 		if (!pdata->buck1_set2) {
 			printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n");
 			WARN_ON(!pdata->buck1_set2);
 			ret = -EIO;
-			goto err_free_mem;
+			goto err_out;
 		}
 
 		gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
@@ -755,7 +777,7 @@
 		max8998->buck1_vol[0] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
 		if (ret)
-			goto err_free_mem;
+			goto err_out;
 
 		/* Set predefined value for BUCK1 register 2 */
 		i = 0;
@@ -767,7 +789,7 @@
 		max8998->buck1_vol[1] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
 		if (ret)
-			goto err_free_mem;
+			goto err_out;
 
 		/* Set predefined value for BUCK1 register 3 */
 		i = 0;
@@ -779,7 +801,7 @@
 		max8998->buck1_vol[2] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
 		if (ret)
-			goto err_free_mem;
+			goto err_out;
 
 		/* Set predefined value for BUCK1 register 4 */
 		i = 0;
@@ -791,7 +813,7 @@
 		max8998->buck1_vol[3] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
 		if (ret)
-			goto err_free_mem;
+			goto err_out;
 
 	}
 
@@ -801,7 +823,7 @@
 			printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n");
 			WARN_ON(!pdata->buck2_set3);
 			ret = -EIO;
-			goto err_free_mem;
+			goto err_out;
 		}
 		gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
 		gpio_direction_output(pdata->buck2_set3,
@@ -816,7 +838,7 @@
 		max8998->buck2_vol[0] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
 		if (ret)
-			goto err_free_mem;
+			goto err_out;
 
 		/* BUCK2 register 2 */
 		i = 0;
@@ -827,7 +849,7 @@
 		max8998->buck2_vol[1] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
 		if (ret)
-			goto err_free_mem;
+			goto err_out;
 	}
 
 	for (i = 0; i < pdata->num_regulators; i++) {
@@ -840,8 +862,12 @@
 			int count = (desc->max - desc->min) / desc->step + 1;
 			regulators[index].n_voltages = count;
 		}
-		rdev[i] = regulator_register(&regulators[index], max8998->dev,
-				pdata->regulators[i].initdata, max8998, NULL);
+
+		config.dev = max8998->dev;
+		config.init_data = pdata->regulators[i].initdata;
+		config.driver_data = max8998;
+
+		rdev[i] = regulator_register(&regulators[index], &config);
 		if (IS_ERR(rdev[i])) {
 			ret = PTR_ERR(rdev[i]);
 			dev_err(max8998->dev, "regulator init failed\n");
@@ -853,14 +879,9 @@
 
 	return 0;
 err:
-	for (i = 0; i < max8998->num_regulators; i++)
-		if (rdev[i])
-			regulator_unregister(rdev[i]);
-
-err_free_mem:
-	kfree(max8998->rdev);
-	kfree(max8998);
-
+	while (--i >= 0)
+		regulator_unregister(rdev[i]);
+err_out:
 	return ret;
 }
 
@@ -871,12 +892,7 @@
 	int i;
 
 	for (i = 0; i < max8998->num_regulators; i++)
-		if (rdev[i])
-			regulator_unregister(rdev[i]);
-
-	kfree(max8998->rdev);
-	kfree(max8998);
-
+		regulator_unregister(rdev[i]);
 	return 0;
 }
 
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 6c0face..7dcdfa2 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -340,6 +340,7 @@
 	struct mc13xxx_regulator_platform_data *pdata =
 		dev_get_platdata(&pdev->dev);
 	struct mc13xxx_regulator_init_data *init_data;
+	struct regulator_config config = { };
 	int i, ret;
 
 	dev_dbg(&pdev->dev, "%s id %d\n", __func__, pdev->id);
@@ -357,11 +358,16 @@
 	priv->mc13xxx = mc13783;
 
 	for (i = 0; i < pdata->num_regulators; i++) {
-		init_data = &pdata->regulators[i];
-		priv->regulators[i] = regulator_register(
-				&mc13783_regulators[init_data->id].desc,
-				&pdev->dev, init_data->init_data, priv, NULL);
+		struct regulator_desc *desc;
 
+		init_data = &pdata->regulators[i];
+		desc = &mc13783_regulators[init_data->id].desc;
+
+		config.dev = &pdev->dev;
+		config.init_data = init_data->init_data;
+		config.driver_data = priv;
+
+		priv->regulators[i] = regulator_register(desc, &config);
 		if (IS_ERR(priv->regulators[i])) {
 			dev_err(&pdev->dev, "failed to register regulator %s\n",
 				mc13783_regulators[i].desc.name);
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 845aa22..970a233 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -428,24 +428,15 @@
 	return val;
 }
 
-static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
-		int min_uV, int max_uV, unsigned *selector)
+static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
+						unsigned selector)
 {
 	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
 	int hi, value, mask, id = rdev_get_id(rdev);
 	u32 valread;
 	int ret;
 
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
-		__func__, id, min_uV, max_uV);
-
-	/* Find the best index */
-	value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
-	dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
-	if (value < 0)
-		return value;
-
-	value = mc13892_regulators[id].voltages[value];
+	value = mc13892_regulators[id].voltages[selector];
 
 	mc13xxx_lock(priv->mc13xxx);
 	ret = mc13xxx_reg_read(priv->mc13xxx,
@@ -480,7 +471,7 @@
 static struct regulator_ops mc13892_sw_regulator_ops = {
 	.is_enabled = mc13xxx_sw_regulator_is_enabled,
 	.list_voltage = mc13xxx_regulator_list_voltage,
-	.set_voltage = mc13892_sw_regulator_set_voltage,
+	.set_voltage_sel = mc13892_sw_regulator_set_voltage_sel,
 	.get_voltage = mc13892_sw_regulator_get_voltage,
 };
 
@@ -528,6 +519,7 @@
 	struct mc13xxx_regulator_platform_data *pdata =
 		dev_get_platdata(&pdev->dev);
 	struct mc13xxx_regulator_init_data *mc13xxx_data;
+	struct regulator_config config = { };
 	int i, ret;
 	int num_regulators = 0;
 	u32 val;
@@ -597,9 +589,12 @@
 		}
 		desc = &mc13892_regulators[id].desc;
 
-		priv->regulators[i] = regulator_register(
-			desc, &pdev->dev, init_data, priv, node);
+		config.dev = &pdev->dev;
+		config.init_data = init_data;
+		config.driver_data = priv;
+		config.of_node = node;
 
+		priv->regulators[i] = regulator_register(desc, &config);
 		if (IS_ERR(priv->regulators[i])) {
 			dev_err(&pdev->dev, "failed to register regulator %s\n",
 				mc13892_regulators[i].desc.name);
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 62dcd0a..4fa9704 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -94,62 +94,18 @@
 }
 EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage);
 
-int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
+static int mc13xxx_regulator_set_voltage_sel(struct regulator_dev *rdev,
+					     unsigned selector)
 {
 	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
 	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
-	int reg_id = rdev_get_id(rdev);
-	int i;
-	int bestmatch;
-	int bestindex;
-
-	/*
-	 * Locate the minimum voltage fitting the criteria on
-	 * this regulator. The switchable voltages are not
-	 * in strict falling order so we need to check them
-	 * all for the best match.
-	 */
-	bestmatch = INT_MAX;
-	bestindex = -1;
-	for (i = 0; i < mc13xxx_regulators[reg_id].desc.n_voltages; i++) {
-		if (mc13xxx_regulators[reg_id].voltages[i] >= min_uV &&
-		    mc13xxx_regulators[reg_id].voltages[i] < bestmatch) {
-			bestmatch = mc13xxx_regulators[reg_id].voltages[i];
-			bestindex = i;
-		}
-	}
-
-	if (bestindex < 0 || bestmatch > max_uV) {
-		dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
-				min_uV, max_uV);
-		return -EINVAL;
-	}
-	return bestindex;
-}
-EXPORT_SYMBOL_GPL(mc13xxx_get_best_voltage_index);
-
-static int mc13xxx_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
-		int max_uV, unsigned *selector)
-{
-	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
-	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
-	int value, id = rdev_get_id(rdev);
+	int id = rdev_get_id(rdev);
 	int ret;
 
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
-		__func__, id, min_uV, max_uV);
-
-	/* Find the best index */
-	value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
-	dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
-	if (value < 0)
-		return value;
-
 	mc13xxx_lock(priv->mc13xxx);
 	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].vsel_reg,
 			mc13xxx_regulators[id].vsel_mask,
-			value << mc13xxx_regulators[id].vsel_shift);
+			selector << mc13xxx_regulators[id].vsel_shift);
 	mc13xxx_unlock(priv->mc13xxx);
 
 	return ret;
@@ -187,7 +143,7 @@
 	.disable = mc13xxx_regulator_disable,
 	.is_enabled = mc13xxx_regulator_is_enabled,
 	.list_voltage = mc13xxx_regulator_list_voltage,
-	.set_voltage = mc13xxx_regulator_set_voltage,
+	.set_voltage_sel = mc13xxx_regulator_set_voltage_sel,
 	.get_voltage = mc13xxx_regulator_get_voltage,
 };
 EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index b3961c6..044aba4 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -35,8 +35,6 @@
 
 extern int mc13xxx_sw_regulator(struct regulator_dev *rdev);
 extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev);
-extern int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
-						int min_uV, int max_uV);
 extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
 						unsigned selector);
 extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 679734d..56593b7 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
 
 static void of_get_regulation_constraints(struct device_node *np,
 					struct regulator_init_data **init_data)
@@ -85,3 +86,49 @@
 	return init_data;
 }
 EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
+
+/**
+ * of_regulator_match - extract regulator init data
+ * @dev: device requesting the data
+ * @node: parent device node of the regulators
+ * @matches: match table for the regulators
+ * @num_matches: number of entries in match table
+ *
+ * This function uses a match table specified by the regulator driver and
+ * looks up the corresponding init data in the device tree. Note that the
+ * match table is modified in place.
+ *
+ * Returns the number of matches found or a negative error code on failure.
+ */
+int of_regulator_match(struct device *dev, struct device_node *node,
+		       struct of_regulator_match *matches,
+		       unsigned int num_matches)
+{
+	unsigned int count = 0;
+	unsigned int i;
+
+	if (!dev || !node)
+		return -EINVAL;
+
+	for (i = 0; i < num_matches; i++) {
+		struct of_regulator_match *match = &matches[i];
+		struct device_node *child;
+
+		child = of_find_node_by_name(node, match->name);
+		if (!child)
+			continue;
+
+		match->init_data = of_get_regulator_init_data(dev, child);
+		if (!match->init_data) {
+			dev_err(dev, "failed to parse DT for regulator %s\n",
+				child->name);
+			return -EINVAL;
+		}
+
+		match->of_node = child;
+		count++;
+	}
+
+	return count;
+}
+EXPORT_SYMBOL_GPL(of_regulator_match);
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
new file mode 100644
index 0000000..c4435f6
--- /dev/null
+++ b/drivers/regulator/palmas-regulator.c
@@ -0,0 +1,822 @@
+/*
+ * Driver for Regulator part of Palmas PMIC Chips
+ *
+ * Copyright 2011-2012 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/mfd/palmas.h>
+
+struct regs_info {
+	char	*name;
+	u8	vsel_addr;
+	u8	ctrl_addr;
+	u8	tstep_addr;
+};
+
+static const struct regs_info palmas_regs_info[] = {
+	{
+		.name		= "SMPS12",
+		.vsel_addr	= PALMAS_SMPS12_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS12_CTRL,
+		.tstep_addr	= PALMAS_SMPS12_TSTEP,
+	},
+	{
+		.name		= "SMPS123",
+		.vsel_addr	= PALMAS_SMPS12_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS12_CTRL,
+		.tstep_addr	= PALMAS_SMPS12_TSTEP,
+	},
+	{
+		.name		= "SMPS3",
+		.vsel_addr	= PALMAS_SMPS3_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS3_CTRL,
+	},
+	{
+		.name		= "SMPS45",
+		.vsel_addr	= PALMAS_SMPS45_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS45_CTRL,
+		.tstep_addr	= PALMAS_SMPS45_TSTEP,
+	},
+	{
+		.name		= "SMPS457",
+		.vsel_addr	= PALMAS_SMPS45_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS45_CTRL,
+		.tstep_addr	= PALMAS_SMPS45_TSTEP,
+	},
+	{
+		.name		= "SMPS6",
+		.vsel_addr	= PALMAS_SMPS6_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS6_CTRL,
+		.tstep_addr	= PALMAS_SMPS6_TSTEP,
+	},
+	{
+		.name		= "SMPS7",
+		.vsel_addr	= PALMAS_SMPS7_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS7_CTRL,
+	},
+	{
+		.name		= "SMPS8",
+		.vsel_addr	= PALMAS_SMPS8_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS8_CTRL,
+		.tstep_addr	= PALMAS_SMPS8_TSTEP,
+	},
+	{
+		.name		= "SMPS9",
+		.vsel_addr	= PALMAS_SMPS9_VOLTAGE,
+		.ctrl_addr	= PALMAS_SMPS9_CTRL,
+	},
+	{
+		.name		= "SMPS10",
+	},
+	{
+		.name		= "LDO1",
+		.vsel_addr	= PALMAS_LDO1_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO1_CTRL,
+	},
+	{
+		.name		= "LDO2",
+		.vsel_addr	= PALMAS_LDO2_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO2_CTRL,
+	},
+	{
+		.name		= "LDO3",
+		.vsel_addr	= PALMAS_LDO3_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO3_CTRL,
+	},
+	{
+		.name		= "LDO4",
+		.vsel_addr	= PALMAS_LDO4_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO4_CTRL,
+	},
+	{
+		.name		= "LDO5",
+		.vsel_addr	= PALMAS_LDO5_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO5_CTRL,
+	},
+	{
+		.name		= "LDO6",
+		.vsel_addr	= PALMAS_LDO6_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO6_CTRL,
+	},
+	{
+		.name		= "LDO7",
+		.vsel_addr	= PALMAS_LDO7_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO7_CTRL,
+	},
+	{
+		.name		= "LDO8",
+		.vsel_addr	= PALMAS_LDO8_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO8_CTRL,
+	},
+	{
+		.name		= "LDO9",
+		.vsel_addr	= PALMAS_LDO9_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDO9_CTRL,
+	},
+	{
+		.name		= "LDOLN",
+		.vsel_addr	= PALMAS_LDOLN_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDOLN_CTRL,
+	},
+	{
+		.name		= "LDOUSB",
+		.vsel_addr	= PALMAS_LDOUSB_VOLTAGE,
+		.ctrl_addr	= PALMAS_LDOUSB_CTRL,
+	},
+};
+
+#define SMPS_CTRL_MODE_OFF		0x00
+#define SMPS_CTRL_MODE_ON		0x01
+#define SMPS_CTRL_MODE_ECO		0x02
+#define SMPS_CTRL_MODE_PWM		0x03
+
+/* These values are derived from the data sheet. And are the number of steps
+ * where there is a voltage change, the ranges at beginning and end of register
+ * max/min values where there are no change are ommitted.
+ *
+ * So they are basically (maxV-minV)/stepV
+ */
+#define PALMAS_SMPS_NUM_VOLTAGES	116
+#define PALMAS_SMPS10_NUM_VOLTAGES	2
+#define PALMAS_LDO_NUM_VOLTAGES		50
+
+#define SMPS10_VSEL			(1<<3)
+#define SMPS10_BOOST_EN			(1<<2)
+#define SMPS10_BYPASS_EN		(1<<1)
+#define SMPS10_SWITCH_EN		(1<<0)
+
+#define REGULATOR_SLAVE			0
+
+static int palmas_smps_read(struct palmas *palmas, unsigned int reg,
+		unsigned int *dest)
+{
+	unsigned int addr;
+
+	addr = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, reg);
+
+	return regmap_read(palmas->regmap[REGULATOR_SLAVE], addr, dest);
+}
+
+static int palmas_smps_write(struct palmas *palmas, unsigned int reg,
+		unsigned int value)
+{
+	unsigned int addr;
+
+	addr = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, reg);
+
+	return regmap_write(palmas->regmap[REGULATOR_SLAVE], addr, value);
+}
+
+static int palmas_ldo_read(struct palmas *palmas, unsigned int reg,
+		unsigned int *dest)
+{
+	unsigned int addr;
+
+	addr = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, reg);
+
+	return regmap_read(palmas->regmap[REGULATOR_SLAVE], addr, dest);
+}
+
+static int palmas_ldo_write(struct palmas *palmas, unsigned int reg,
+		unsigned int value)
+{
+	unsigned int addr;
+
+	addr = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, reg);
+
+	return regmap_write(palmas->regmap[REGULATOR_SLAVE], addr, value);
+}
+
+static int palmas_is_enabled_smps(struct regulator_dev *dev)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	unsigned int reg;
+
+	palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
+
+	reg &= PALMAS_SMPS12_CTRL_STATUS_MASK;
+	reg >>= PALMAS_SMPS12_CTRL_STATUS_SHIFT;
+
+	return !!(reg);
+}
+
+static int palmas_enable_smps(struct regulator_dev *dev)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	unsigned int reg;
+
+	palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
+
+	reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+	reg |= SMPS_CTRL_MODE_ON;
+
+	palmas_smps_write(pmic->palmas, palmas_regs_info[id].ctrl_addr, reg);
+
+	return 0;
+}
+
+static int palmas_disable_smps(struct regulator_dev *dev)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	unsigned int reg;
+
+	palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
+
+	reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+
+	palmas_smps_write(pmic->palmas, palmas_regs_info[id].ctrl_addr, reg);
+
+	return 0;
+}
+
+
+static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	unsigned int reg;
+
+	palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
+	reg &= ~PALMAS_SMPS12_CTRL_STATUS_MASK;
+	reg >>= PALMAS_SMPS12_CTRL_STATUS_SHIFT;
+
+	switch (mode) {
+	case REGULATOR_MODE_NORMAL:
+		reg |= SMPS_CTRL_MODE_ON;
+		break;
+	case REGULATOR_MODE_IDLE:
+		reg |= SMPS_CTRL_MODE_ECO;
+		break;
+	case REGULATOR_MODE_FAST:
+		reg |= SMPS_CTRL_MODE_PWM;
+		break;
+	default:
+		return -EINVAL;
+	}
+	palmas_smps_write(pmic->palmas, palmas_regs_info[id].ctrl_addr, reg);
+
+	return 0;
+}
+
+static unsigned int palmas_get_mode_smps(struct regulator_dev *dev)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	unsigned int reg;
+
+	palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
+	reg &= PALMAS_SMPS12_CTRL_STATUS_MASK;
+	reg >>= PALMAS_SMPS12_CTRL_STATUS_SHIFT;
+
+	switch (reg) {
+	case SMPS_CTRL_MODE_ON:
+		return REGULATOR_MODE_NORMAL;
+	case SMPS_CTRL_MODE_ECO:
+		return REGULATOR_MODE_IDLE;
+	case SMPS_CTRL_MODE_PWM:
+		return REGULATOR_MODE_FAST;
+	}
+
+	return 0;
+}
+
+static int palmas_list_voltage_smps(struct regulator_dev *dev,
+					unsigned selector)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	int mult = 1;
+
+	if (!selector)
+		return 0;
+
+	/* Read the multiplier set in VSEL register to return
+	 * the correct voltage.
+	 */
+	if (pmic->range[id])
+		mult = 2;
+
+	/* Voltage is (0.49V + (selector * 0.01V)) * RANGE
+	 * as defined in data sheet. RANGE is either x1 or x2
+	 */
+	return  (490000 + (selector * 10000)) * mult;
+}
+
+static int palmas_get_voltage_smps_sel(struct regulator_dev *dev)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	int selector;
+	unsigned int reg;
+	unsigned int addr;
+
+	addr = palmas_regs_info[id].vsel_addr;
+
+	palmas_smps_read(pmic->palmas, addr, &reg);
+
+	selector = reg & PALMAS_SMPS12_VOLTAGE_VSEL_MASK;
+
+	/* Adjust selector to match list_voltage ranges */
+	if ((selector > 0) && (selector < 6))
+		selector = 6;
+	if (!selector)
+		selector = 5;
+	if (selector > 121)
+		selector = 121;
+	selector -= 5;
+
+	return selector;
+}
+
+static int palmas_set_voltage_smps_sel(struct regulator_dev *dev,
+		unsigned selector)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	unsigned int reg = 0;
+	unsigned int addr;
+
+	addr = palmas_regs_info[id].vsel_addr;
+
+	/* Make sure we don't change the value of RANGE */
+	if (pmic->range[id])
+		reg |= PALMAS_SMPS12_VOLTAGE_RANGE;
+
+	/* Adjust the linux selector into range used in VSEL register */
+	if (selector)
+		reg |= selector + 5;
+
+	palmas_smps_write(pmic->palmas, addr, reg);
+
+	return 0;
+}
+
+static int palmas_map_voltage_smps(struct regulator_dev *rdev,
+		int min_uV, int max_uV)
+{
+	int ret, voltage;
+
+	ret = ((min_uV - 500000) / 10000) + 1;
+	if (ret < 0)
+		return ret;
+
+	/* Map back into a voltage to verify we're still in bounds */
+	voltage = palmas_list_voltage_smps(rdev, ret);
+	if (voltage < min_uV || voltage > max_uV)
+		return -EINVAL;
+
+	return ret;
+}
+
+static struct regulator_ops palmas_ops_smps = {
+	.is_enabled		= palmas_is_enabled_smps,
+	.enable			= palmas_enable_smps,
+	.disable		= palmas_disable_smps,
+	.set_mode		= palmas_set_mode_smps,
+	.get_mode		= palmas_get_mode_smps,
+	.get_voltage_sel	= palmas_get_voltage_smps_sel,
+	.set_voltage_sel	= palmas_set_voltage_smps_sel,
+	.list_voltage		= palmas_list_voltage_smps,
+	.map_voltage		= palmas_map_voltage_smps,
+};
+
+static int palmas_list_voltage_smps10(struct regulator_dev *dev,
+					unsigned selector)
+{
+	return 3750000 + (selector * 1250000);
+}
+
+static struct regulator_ops palmas_ops_smps10 = {
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.list_voltage		= palmas_list_voltage_smps10,
+};
+
+static int palmas_is_enabled_ldo(struct regulator_dev *dev)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	unsigned int reg;
+
+	palmas_ldo_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
+
+	reg &= PALMAS_LDO1_CTRL_STATUS;
+
+	return !!(reg);
+}
+
+static int palmas_list_voltage_ldo(struct regulator_dev *dev,
+					unsigned selector)
+{
+	if (!selector)
+		return 0;
+
+	/* voltage is 0.85V + (selector * 0.05v) */
+	return  850000 + (selector * 50000);
+}
+
+static int palmas_get_voltage_ldo_sel(struct regulator_dev *dev)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	int selector;
+	unsigned int reg;
+	unsigned int addr;
+
+	addr = palmas_regs_info[id].vsel_addr;
+
+	palmas_ldo_read(pmic->palmas, addr, &reg);
+
+	selector = reg & PALMAS_LDO1_VOLTAGE_VSEL_MASK;
+
+	/* Adjust selector to match list_voltage ranges */
+	if (selector > 49)
+		selector = 49;
+
+	return selector;
+}
+
+static int palmas_set_voltage_ldo_sel(struct regulator_dev *dev,
+		unsigned selector)
+{
+	struct palmas_pmic *pmic = rdev_get_drvdata(dev);
+	int id = rdev_get_id(dev);
+	unsigned int reg = 0;
+	unsigned int addr;
+
+	addr = palmas_regs_info[id].vsel_addr;
+
+	reg = selector;
+
+	palmas_ldo_write(pmic->palmas, addr, reg);
+
+	return 0;
+}
+
+static int palmas_map_voltage_ldo(struct regulator_dev *rdev,
+		int min_uV, int max_uV)
+{
+	int ret, voltage;
+
+	ret = ((min_uV - 900000) / 50000) + 1;
+	if (ret < 0)
+		return ret;
+
+	/* Map back into a voltage to verify we're still in bounds */
+	voltage = palmas_list_voltage_ldo(rdev, ret);
+	if (voltage < min_uV || voltage > max_uV)
+		return -EINVAL;
+
+	return ret;
+}
+
+static struct regulator_ops palmas_ops_ldo = {
+	.is_enabled		= palmas_is_enabled_ldo,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.get_voltage_sel	= palmas_get_voltage_ldo_sel,
+	.set_voltage_sel	= palmas_set_voltage_ldo_sel,
+	.list_voltage		= palmas_list_voltage_ldo,
+	.map_voltage		= palmas_map_voltage_ldo,
+};
+
+/*
+ * setup the hardware based sleep configuration of the SMPS/LDO regulators
+ * from the platform data. This is different to the software based control
+ * supported by the regulator framework as it is controlled by toggling
+ * pins on the PMIC such as PREQ, SYSEN, ...
+ */
+static int palmas_smps_init(struct palmas *palmas, int id,
+		struct palmas_reg_init *reg_init)
+{
+	unsigned int reg;
+	unsigned int addr;
+	int ret;
+
+	addr = palmas_regs_info[id].ctrl_addr;
+
+	ret = palmas_smps_read(palmas, addr, &reg);
+	if (ret)
+		return ret;
+
+	if (id != PALMAS_REG_SMPS10) {
+		if (reg_init->warm_reset)
+			reg |= PALMAS_SMPS12_CTRL_WR_S;
+
+		if (reg_init->roof_floor)
+			reg |= PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN;
+
+		if (reg_init->mode_sleep) {
+			reg &= ~PALMAS_SMPS12_CTRL_MODE_SLEEP_MASK;
+			reg |= reg_init->mode_sleep <<
+					PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT;
+		}
+	} else {
+		if (reg_init->mode_sleep) {
+			reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
+			reg |= reg_init->mode_sleep <<
+					PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT;
+		}
+
+	}
+	ret = palmas_smps_write(palmas, addr, reg);
+	if (ret)
+		return ret;
+
+	if (palmas_regs_info[id].tstep_addr && reg_init->tstep) {
+		addr = palmas_regs_info[id].tstep_addr;
+
+		reg = reg_init->tstep & PALMAS_SMPS12_TSTEP_TSTEP_MASK;
+
+		ret = palmas_smps_write(palmas, addr, reg);
+		if (ret)
+			return ret;
+	}
+
+	if (palmas_regs_info[id].vsel_addr && reg_init->vsel) {
+		addr = palmas_regs_info[id].vsel_addr;
+
+		reg = reg_init->vsel;
+
+		ret = palmas_smps_write(palmas, addr, reg);
+		if (ret)
+			return ret;
+	}
+
+
+	return 0;
+}
+
+static int palmas_ldo_init(struct palmas *palmas, int id,
+		struct palmas_reg_init *reg_init)
+{
+	unsigned int reg;
+	unsigned int addr;
+	int ret;
+
+	addr = palmas_regs_info[id].ctrl_addr;
+
+	ret = palmas_smps_read(palmas, addr, &reg);
+	if (ret)
+		return ret;
+
+	if (reg_init->warm_reset)
+		reg |= PALMAS_LDO1_CTRL_WR_S;
+
+	if (reg_init->mode_sleep)
+		reg |= PALMAS_LDO1_CTRL_MODE_SLEEP;
+
+	ret = palmas_smps_write(palmas, addr, reg);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static __devinit int palmas_probe(struct platform_device *pdev)
+{
+	struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
+	struct palmas_pmic_platform_data *pdata = pdev->dev.platform_data;
+	struct regulator_dev *rdev;
+	struct regulator_config config = { };
+	struct palmas_pmic *pmic;
+	struct palmas_reg_init *reg_init;
+	int id = 0, ret;
+	unsigned int addr, reg;
+
+	if (!pdata)
+		return -EINVAL;
+	if (!pdata->reg_data)
+		return -EINVAL;
+
+	pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
+	if (!pmic)
+		return -ENOMEM;
+
+	pmic->dev = &pdev->dev;
+	pmic->palmas = palmas;
+	palmas->pmic = pmic;
+	platform_set_drvdata(pdev, pmic);
+
+	ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, &reg);
+	if (ret)
+		goto err_unregister_regulator;
+
+	if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN)
+		pmic->smps123 = 1;
+
+	if (reg & PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN)
+		pmic->smps457 = 1;
+
+	config.regmap = palmas->regmap[REGULATOR_SLAVE];
+	config.dev = &pdev->dev;
+	config.driver_data = pmic;
+
+	for (id = 0; id < PALMAS_REG_LDO1; id++) {
+
+		/*
+		 * Miss out regulators which are not available due
+		 * to slaving configurations.
+		 */
+		switch (id) {
+		case PALMAS_REG_SMPS12:
+		case PALMAS_REG_SMPS3:
+			if (pmic->smps123)
+				continue;
+			break;
+		case PALMAS_REG_SMPS123:
+			if (!pmic->smps123)
+				continue;
+			break;
+		case PALMAS_REG_SMPS45:
+		case PALMAS_REG_SMPS7:
+			if (pmic->smps457)
+				continue;
+			break;
+		case PALMAS_REG_SMPS457:
+			if (!pmic->smps457)
+				continue;
+		}
+
+		/* Register the regulators */
+		pmic->desc[id].name = palmas_regs_info[id].name;
+		pmic->desc[id].id = id;
+
+		if (id != PALMAS_REG_SMPS10) {
+			pmic->desc[id].ops = &palmas_ops_smps;
+			pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
+		} else {
+			pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;
+			pmic->desc[id].ops = &palmas_ops_smps10;
+			pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
+			pmic->desc[id].vsel_mask = SMPS10_VSEL;
+			pmic->desc[id].enable_reg = PALMAS_SMPS10_STATUS;
+			pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
+		}
+
+		pmic->desc[id].type = REGULATOR_VOLTAGE;
+		pmic->desc[id].owner = THIS_MODULE;
+
+		/* Initialise sleep/init values from platform data */
+		if (pdata && pdata->reg_init) {
+			reg_init = pdata->reg_init[id];
+			if (reg_init) {
+				ret = palmas_smps_init(palmas, id, reg_init);
+				if (ret)
+					goto err_unregister_regulator;
+			}
+		}
+
+		/*
+		 * read and store the RANGE bit for later use
+		 * This must be done before regulator is probed otherwise
+		 * we error in probe with unsuportable ranges.
+		 */
+		if (id != PALMAS_REG_SMPS10) {
+			addr = palmas_regs_info[id].vsel_addr;
+
+			ret = palmas_smps_read(pmic->palmas, addr, &reg);
+			if (ret)
+				goto err_unregister_regulator;
+			if (reg & PALMAS_SMPS12_VOLTAGE_RANGE)
+				pmic->range[id] = 1;
+		}
+
+		if (pdata && pdata->reg_data)
+			config.init_data = pdata->reg_data[id];
+		else
+			config.init_data = NULL;
+
+		rdev = regulator_register(&pmic->desc[id], &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev,
+				"failed to register %s regulator\n",
+				pdev->name);
+			ret = PTR_ERR(rdev);
+			goto err_unregister_regulator;
+		}
+
+		/* Save regulator for cleanup */
+		pmic->rdev[id] = rdev;
+	}
+
+	/* Start this loop from the id left from previous loop */
+	for (; id < PALMAS_NUM_REGS; id++) {
+
+		/* Miss out regulators which are not available due
+		 * to alternate functions.
+		 */
+
+		/* Register the regulators */
+		pmic->desc[id].name = palmas_regs_info[id].name;
+		pmic->desc[id].id = id;
+		pmic->desc[id].n_voltages = PALMAS_LDO_NUM_VOLTAGES;
+
+		pmic->desc[id].ops = &palmas_ops_ldo;
+
+		pmic->desc[id].type = REGULATOR_VOLTAGE;
+		pmic->desc[id].owner = THIS_MODULE;
+		pmic->desc[id].enable_reg = palmas_regs_info[id].ctrl_addr;
+		pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
+
+		if (pdata && pdata->reg_data)
+			config.init_data = pdata->reg_data[id];
+		else
+			config.init_data = NULL;
+
+		rdev = regulator_register(&pmic->desc[id], &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev,
+				"failed to register %s regulator\n",
+				pdev->name);
+			ret = PTR_ERR(rdev);
+			goto err_unregister_regulator;
+		}
+
+		/* Save regulator for cleanup */
+		pmic->rdev[id] = rdev;
+
+		/* Initialise sleep/init values from platform data */
+		if (pdata->reg_init) {
+			reg_init = pdata->reg_init[id];
+			if (reg_init) {
+				ret = palmas_ldo_init(palmas, id, reg_init);
+				if (ret)
+					goto err_unregister_regulator;
+			}
+		}
+	}
+
+	return 0;
+
+err_unregister_regulator:
+	while (--id >= 0)
+		regulator_unregister(pmic->rdev[id]);
+	kfree(pmic->rdev);
+	kfree(pmic->desc);
+	kfree(pmic);
+	return ret;
+}
+
+static int __devexit palmas_remove(struct platform_device *pdev)
+{
+	struct palmas_pmic *pmic = platform_get_drvdata(pdev);
+	int id;
+
+	for (id = 0; id < PALMAS_NUM_REGS; id++)
+		regulator_unregister(pmic->rdev[id]);
+
+	kfree(pmic->rdev);
+	kfree(pmic->desc);
+	kfree(pmic);
+	return 0;
+}
+
+static struct platform_driver palmas_driver = {
+	.driver = {
+		.name = "palmas-pmic",
+		.owner = THIS_MODULE,
+	},
+	.probe = palmas_probe,
+	.remove = __devexit_p(palmas_remove),
+};
+
+static int __init palmas_init(void)
+{
+	return platform_driver_register(&palmas_driver);
+}
+subsys_initcall(palmas_init);
+
+static void __exit palmas_exit(void)
+{
+	platform_driver_unregister(&palmas_driver);
+}
+module_exit(palmas_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_DESCRIPTION("Palmas voltage regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:palmas-pmic");
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index a5aab1b..8211101 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -150,57 +150,33 @@
 	VREG_INFO(SW2S,  PCAP_REG_LOWPWR,  NA, 20, NA, NA), */
 };
 
-static int pcap_regulator_set_voltage(struct regulator_dev *rdev,
-				      int min_uV, int max_uV,
-				      unsigned *selector)
+static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev,
+					  unsigned selector)
 {
 	struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
 	void *pcap = rdev_get_drvdata(rdev);
-	int uV;
-	u8 i;
 
 	/* the regulator doesn't support voltage switching */
 	if (vreg->n_voltages == 1)
 		return -EINVAL;
 
-	for (i = 0; i < vreg->n_voltages; i++) {
-		/* For V1 the first is not the best match */
-		if (i == 0 && rdev_get_id(rdev) == V1)
-			i = 1;
-		else if (i + 1 == vreg->n_voltages && rdev_get_id(rdev) == V1)
-			i = 0;
-
-		uV = vreg->voltage_table[i] * 1000;
-		if (min_uV <= uV && uV <= max_uV) {
-			*selector = i;
-			return ezx_pcap_set_bits(pcap, vreg->reg,
-					(vreg->n_voltages - 1) << vreg->index,
-					i << vreg->index);
-		}
-
-		if (i == 0 && rdev_get_id(rdev) == V1)
-			i = vreg->n_voltages - 1;
-	}
-
-	/* the requested voltage range is not supported by this regulator */
-	return -EINVAL;
+	return ezx_pcap_set_bits(pcap, vreg->reg,
+				 (vreg->n_voltages - 1) << vreg->index,
+				 selector << vreg->index);
 }
 
-static int pcap_regulator_get_voltage(struct regulator_dev *rdev)
+static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
 	void *pcap = rdev_get_drvdata(rdev);
 	u32 tmp;
-	int mV;
 
 	if (vreg->n_voltages == 1)
-		return vreg->voltage_table[0] * 1000;
+		return 0;
 
 	ezx_pcap_read(pcap, vreg->reg, &tmp);
 	tmp = ((tmp >> vreg->index) & (vreg->n_voltages - 1));
-	mV = vreg->voltage_table[tmp];
-
-	return mV * 1000;
+	return tmp;
 }
 
 static int pcap_regulator_enable(struct regulator_dev *rdev)
@@ -248,8 +224,8 @@
 
 static struct regulator_ops pcap_regulator_ops = {
 	.list_voltage	= pcap_regulator_list_voltage,
-	.set_voltage	= pcap_regulator_set_voltage,
-	.get_voltage	= pcap_regulator_get_voltage,
+	.set_voltage_sel = pcap_regulator_set_voltage_sel,
+	.get_voltage_sel = pcap_regulator_get_voltage_sel,
 	.enable		= pcap_regulator_enable,
 	.disable	= pcap_regulator_disable,
 	.is_enabled	= pcap_regulator_is_enabled,
@@ -265,7 +241,7 @@
 		.owner		= THIS_MODULE,			\
 	}
 
-static struct regulator_desc pcap_regulators[] = {
+static const struct regulator_desc pcap_regulators[] = {
 	VREG(V1), VREG(V2), VREG(V3), VREG(V4), VREG(V5), VREG(V6), VREG(V7),
 	VREG(V8), VREG(V9), VREG(V10), VREG(VAUX1), VREG(VAUX2), VREG(VAUX3),
 	VREG(VAUX4), VREG(VSIM), VREG(VSIM2), VREG(VVIB), VREG(SW1), VREG(SW2),
@@ -275,9 +251,13 @@
 {
 	struct regulator_dev *rdev;
 	void *pcap = dev_get_drvdata(pdev->dev.parent);
+	struct regulator_config config = { };
 
-	rdev = regulator_register(&pcap_regulators[pdev->id], &pdev->dev,
-				pdev->dev.platform_data, pcap, NULL);
+	config.dev = &pdev->dev;
+	config.init_data = pdev->dev.platform_data;
+	config.driver_data = pcap;
+
+	rdev = regulator_register(&pcap_regulators[pdev->id], &config);
 	if (IS_ERR(rdev))
 		return PTR_ERR(rdev);
 
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 6db46c6..3c9d14c 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -24,35 +24,25 @@
 #include <linux/mfd/pcf50633/core.h>
 #include <linux/mfd/pcf50633/pmic.h>
 
-#define PCF50633_REGULATOR(_name, _id, _n) 		\
-	{					\
-		.name = _name, 			\
-		.id = _id,			\
-		.ops = &pcf50633_regulator_ops,	\
-		.n_voltages = _n, \
-		.type = REGULATOR_VOLTAGE, 	\
-		.owner = THIS_MODULE, 		\
+#define PCF50633_REGULATOR(_name, _id, _n)			\
+	{							\
+		.name = _name,					\
+		.id = PCF50633_REGULATOR_##_id,			\
+		.ops = &pcf50633_regulator_ops,			\
+		.n_voltages = _n,				\
+		.type = REGULATOR_VOLTAGE,			\
+		.owner = THIS_MODULE,				\
+		.vsel_reg = PCF50633_REG_##_id##OUT,		\
+		.vsel_mask = 0xff,				\
+		.enable_reg = PCF50633_REG_##_id##OUT + 1,	\
+		.enable_mask = PCF50633_REGULATOR_ON,		\
 	}
 
-static const u8 pcf50633_regulator_registers[PCF50633_NUM_REGULATORS] = {
-	[PCF50633_REGULATOR_AUTO]	= PCF50633_REG_AUTOOUT,
-	[PCF50633_REGULATOR_DOWN1]	= PCF50633_REG_DOWN1OUT,
-	[PCF50633_REGULATOR_DOWN2]	= PCF50633_REG_DOWN2OUT,
-	[PCF50633_REGULATOR_MEMLDO]	= PCF50633_REG_MEMLDOOUT,
-	[PCF50633_REGULATOR_LDO1]	= PCF50633_REG_LDO1OUT,
-	[PCF50633_REGULATOR_LDO2]	= PCF50633_REG_LDO2OUT,
-	[PCF50633_REGULATOR_LDO3]	= PCF50633_REG_LDO3OUT,
-	[PCF50633_REGULATOR_LDO4]	= PCF50633_REG_LDO4OUT,
-	[PCF50633_REGULATOR_LDO5]	= PCF50633_REG_LDO5OUT,
-	[PCF50633_REGULATOR_LDO6]	= PCF50633_REG_LDO6OUT,
-	[PCF50633_REGULATOR_HCLDO]	= PCF50633_REG_HCLDOOUT,
-};
-
 /* Bits from voltage value */
 static u8 auto_voltage_bits(unsigned int millivolts)
 {
 	if (millivolts < 1800)
-		return 0;
+		return 0x2f;
 	if (millivolts > 3800)
 		return 0xff;
 
@@ -87,6 +77,9 @@
 /* Obtain voltage value from bits */
 static unsigned int auto_voltage_value(u8 bits)
 {
+	/* AUTOOUT: 00000000 to 00101110 are reserved.
+	 * Return 0 for bits in reserved range, which means this selector code
+	 * can't be used on this system */
 	if (bits < 0x2f)
 		return 0;
 
@@ -123,7 +116,7 @@
 
 	millivolts = min_uV / 1000;
 
-	regnr = pcf50633_regulator_registers[regulator_id];
+	regnr = rdev->desc->vsel_reg;
 
 	switch (regulator_id) {
 	case PCF50633_REGULATOR_AUTO:
@@ -154,20 +147,22 @@
 	return pcf50633_reg_write(pcf, regnr, volt_bits);
 }
 
-static int pcf50633_regulator_voltage_value(enum pcf50633_regulator_id id,
-						u8 bits)
+static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
+						unsigned int index)
 {
+	int regulator_id = rdev_get_id(rdev);
+
 	int millivolts;
 
-	switch (id) {
+	switch (regulator_id) {
 	case PCF50633_REGULATOR_AUTO:
-		millivolts = auto_voltage_value(bits);
+		millivolts = auto_voltage_value(index);
 		break;
 	case PCF50633_REGULATOR_DOWN1:
-		millivolts = down_voltage_value(bits);
+		millivolts = down_voltage_value(index);
 		break;
 	case PCF50633_REGULATOR_DOWN2:
-		millivolts = down_voltage_value(bits);
+		millivolts = down_voltage_value(index);
 		break;
 	case PCF50633_REGULATOR_LDO1:
 	case PCF50633_REGULATOR_LDO2:
@@ -177,7 +172,7 @@
 	case PCF50633_REGULATOR_LDO6:
 	case PCF50633_REGULATOR_HCLDO:
 	case PCF50633_REGULATOR_MEMLDO:
-		millivolts = ldo_voltage_value(bits);
+		millivolts = ldo_voltage_value(index);
 		break;
 	default:
 		return -EINVAL;
@@ -186,140 +181,44 @@
 	return millivolts * 1000;
 }
 
-static int pcf50633_regulator_get_voltage(struct regulator_dev *rdev)
-{
-	struct pcf50633 *pcf;
-	int regulator_id;
-	u8 volt_bits, regnr;
-
-	pcf = rdev_get_drvdata(rdev);
-
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= PCF50633_NUM_REGULATORS)
-		return -EINVAL;
-
-	regnr = pcf50633_regulator_registers[regulator_id];
-
-	volt_bits = pcf50633_reg_read(pcf, regnr);
-
-	return pcf50633_regulator_voltage_value(regulator_id, volt_bits);
-}
-
-static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
-						unsigned int index)
-{
-	struct pcf50633 *pcf;
-	int regulator_id;
-
-	pcf = rdev_get_drvdata(rdev);
-
-	regulator_id = rdev_get_id(rdev);
-
-	switch (regulator_id) {
-	case PCF50633_REGULATOR_AUTO:
-		index += 0x2f;
-		break;
-	default:
-		break;
-	}
-
-	return pcf50633_regulator_voltage_value(regulator_id, index);
-}
-
-static int pcf50633_regulator_enable(struct regulator_dev *rdev)
-{
-	struct pcf50633 *pcf = rdev_get_drvdata(rdev);
-	int regulator_id;
-	u8 regnr;
-
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= PCF50633_NUM_REGULATORS)
-		return -EINVAL;
-
-	/* The *ENA register is always one after the *OUT register */
-	regnr = pcf50633_regulator_registers[regulator_id] + 1;
-
-	return pcf50633_reg_set_bit_mask(pcf, regnr, PCF50633_REGULATOR_ON,
-						       PCF50633_REGULATOR_ON);
-}
-
-static int pcf50633_regulator_disable(struct regulator_dev *rdev)
-{
-	struct pcf50633 *pcf = rdev_get_drvdata(rdev);
-	int regulator_id;
-	u8 regnr;
-
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= PCF50633_NUM_REGULATORS)
-		return -EINVAL;
-
-	/* the *ENA register is always one after the *OUT register */
-	regnr = pcf50633_regulator_registers[regulator_id] + 1;
-
-	return pcf50633_reg_set_bit_mask(pcf, regnr,
-					PCF50633_REGULATOR_ON, 0);
-}
-
-static int pcf50633_regulator_is_enabled(struct regulator_dev *rdev)
-{
-	struct pcf50633 *pcf = rdev_get_drvdata(rdev);
-	int regulator_id = rdev_get_id(rdev);
-	u8 regnr;
-
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= PCF50633_NUM_REGULATORS)
-		return -EINVAL;
-
-	/* the *ENA register is always one after the *OUT register */
-	regnr = pcf50633_regulator_registers[regulator_id] + 1;
-
-	return pcf50633_reg_read(pcf, regnr) & PCF50633_REGULATOR_ON;
-}
-
 static struct regulator_ops pcf50633_regulator_ops = {
 	.set_voltage = pcf50633_regulator_set_voltage,
-	.get_voltage = pcf50633_regulator_get_voltage,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
 	.list_voltage = pcf50633_regulator_list_voltage,
-	.enable = pcf50633_regulator_enable,
-	.disable = pcf50633_regulator_disable,
-	.is_enabled = pcf50633_regulator_is_enabled,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
 };
 
-static struct regulator_desc regulators[] = {
-	[PCF50633_REGULATOR_AUTO] =
-		PCF50633_REGULATOR("auto", PCF50633_REGULATOR_AUTO, 81),
-	[PCF50633_REGULATOR_DOWN1] =
-		PCF50633_REGULATOR("down1", PCF50633_REGULATOR_DOWN1, 96),
-	[PCF50633_REGULATOR_DOWN2] =
-		PCF50633_REGULATOR("down2", PCF50633_REGULATOR_DOWN2, 96),
-	[PCF50633_REGULATOR_LDO1] =
-		PCF50633_REGULATOR("ldo1", PCF50633_REGULATOR_LDO1, 28),
-	[PCF50633_REGULATOR_LDO2] =
-		PCF50633_REGULATOR("ldo2", PCF50633_REGULATOR_LDO2, 28),
-	[PCF50633_REGULATOR_LDO3] =
-		PCF50633_REGULATOR("ldo3", PCF50633_REGULATOR_LDO3, 28),
-	[PCF50633_REGULATOR_LDO4] =
-		PCF50633_REGULATOR("ldo4", PCF50633_REGULATOR_LDO4, 28),
-	[PCF50633_REGULATOR_LDO5] =
-		PCF50633_REGULATOR("ldo5", PCF50633_REGULATOR_LDO5, 28),
-	[PCF50633_REGULATOR_LDO6] =
-		PCF50633_REGULATOR("ldo6", PCF50633_REGULATOR_LDO6, 28),
-	[PCF50633_REGULATOR_HCLDO] =
-		PCF50633_REGULATOR("hcldo", PCF50633_REGULATOR_HCLDO, 28),
-	[PCF50633_REGULATOR_MEMLDO] =
-		PCF50633_REGULATOR("memldo", PCF50633_REGULATOR_MEMLDO, 28),
+static const struct regulator_desc regulators[] = {
+	[PCF50633_REGULATOR_AUTO] = PCF50633_REGULATOR("auto", AUTO, 128),
+	[PCF50633_REGULATOR_DOWN1] = PCF50633_REGULATOR("down1", DOWN1, 96),
+	[PCF50633_REGULATOR_DOWN2] = PCF50633_REGULATOR("down2", DOWN2, 96),
+	[PCF50633_REGULATOR_LDO1] = PCF50633_REGULATOR("ldo1", LDO1, 28),
+	[PCF50633_REGULATOR_LDO2] = PCF50633_REGULATOR("ldo2", LDO2, 28),
+	[PCF50633_REGULATOR_LDO3] = PCF50633_REGULATOR("ldo3", LDO3, 28),
+	[PCF50633_REGULATOR_LDO4] = PCF50633_REGULATOR("ldo4", LDO4, 28),
+	[PCF50633_REGULATOR_LDO5] = PCF50633_REGULATOR("ldo5", LDO5, 28),
+	[PCF50633_REGULATOR_LDO6] = PCF50633_REGULATOR("ldo6", LDO6, 28),
+	[PCF50633_REGULATOR_HCLDO] = PCF50633_REGULATOR("hcldo", HCLDO, 28),
+	[PCF50633_REGULATOR_MEMLDO] = PCF50633_REGULATOR("memldo", MEMLDO, 28),
 };
 
 static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
 {
 	struct regulator_dev *rdev;
 	struct pcf50633 *pcf;
+	struct regulator_config config = { };
 
 	/* Already set by core driver */
 	pcf = dev_to_pcf50633(pdev->dev.parent);
 
-	rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
-				  pdev->dev.platform_data, pcf, NULL);
+	config.dev = &pdev->dev;
+	config.init_data = pdev->dev.platform_data;
+	config.driver_data = pcf;
+	config.regmap = pcf->regmap;
+
+	rdev = regulator_register(&regulators[pdev->id], &config);
 	if (IS_ERR(rdev))
 		return PTR_ERR(rdev);
 
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
new file mode 100644
index 0000000..1d34e64
--- /dev/null
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -0,0 +1,255 @@
+/*
+ * Regulator driver for RICOH RC5T583 power management chip.
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION.  All rights reserved.
+ * Author: Laxman dewangan <ldewangan@nvidia.com>
+ *
+ * based on code
+ *      Copyright (C) 2011 RICOH COMPANY,LTD
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
+#include <linux/mfd/rc5t583.h>
+
+struct rc5t583_regulator_info {
+	int			deepsleep_id;
+
+	/* Regulator register address.*/
+	uint8_t			reg_disc_reg;
+	uint8_t			disc_bit;
+	uint8_t			deepsleep_reg;
+
+	/* Regulator specific turn-on delay  and voltage settling time*/
+	int			enable_uv_per_us;
+	int			change_uv_per_us;
+
+	/* Used by regulator core */
+	struct regulator_desc	desc;
+};
+
+struct rc5t583_regulator {
+	struct rc5t583_regulator_info *reg_info;
+
+	/* Devices */
+	struct device		*dev;
+	struct rc5t583		*mfd;
+	struct regulator_dev	*rdev;
+};
+
+static int rc5t583_regulator_enable_time(struct regulator_dev *rdev)
+{
+	struct rc5t583_regulator *reg = rdev_get_drvdata(rdev);
+	int vsel = regulator_get_voltage_sel_regmap(rdev);
+	int curr_uV = regulator_list_voltage_linear(rdev, vsel);
+
+	return DIV_ROUND_UP(curr_uV, reg->reg_info->enable_uv_per_us);
+}
+
+static int rc5t583_set_voltage_time_sel(struct regulator_dev *rdev,
+		unsigned int old_selector, unsigned int new_selector)
+{
+	struct rc5t583_regulator *reg = rdev_get_drvdata(rdev);
+	int old_uV, new_uV;
+	old_uV = regulator_list_voltage_linear(rdev, old_selector);
+
+	if (old_uV < 0)
+		return old_uV;
+
+	new_uV = regulator_list_voltage_linear(rdev, new_selector);
+	if (new_uV < 0)
+		return new_uV;
+
+	return DIV_ROUND_UP(abs(old_uV - new_uV),
+				reg->reg_info->change_uv_per_us);
+}
+
+
+static struct regulator_ops rc5t583_ops = {
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.enable_time		= rc5t583_regulator_enable_time,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.list_voltage		= regulator_list_voltage_linear,
+	.map_voltage		= regulator_map_voltage_linear,
+	.set_voltage_time_sel	= rc5t583_set_voltage_time_sel,
+};
+
+#define RC5T583_REG(_id, _en_reg, _en_bit, _disc_reg, _disc_bit, \
+		_vout_mask, _min_mv, _max_mv, _step_uV, _enable_mv) \
+{								\
+	.reg_disc_reg	= RC5T583_REG_##_disc_reg,		\
+	.disc_bit	= _disc_bit,				\
+	.deepsleep_reg	= RC5T583_REG_##_id##DAC_DS,		\
+	.enable_uv_per_us = _enable_mv * 1000,			\
+	.change_uv_per_us = 40 * 1000,				\
+	.deepsleep_id	= RC5T583_DS_##_id,			\
+	.desc = {						\
+		.name = "rc5t583-regulator-"#_id,		\
+		.id = RC5T583_REGULATOR_##_id,			\
+		.n_voltages = (_max_mv - _min_mv) * 1000 / _step_uV + 1, \
+		.ops = &rc5t583_ops,				\
+		.type = REGULATOR_VOLTAGE,			\
+		.owner = THIS_MODULE,				\
+		.vsel_reg = RC5T583_REG_##_id##DAC,		\
+		.vsel_mask = _vout_mask,			\
+		.enable_reg = RC5T583_REG_##_en_reg,		\
+		.enable_mask = BIT(_en_bit),			\
+		.min_uV	= _min_mv * 1000,			\
+		.uV_step = _step_uV,				\
+	},							\
+}
+
+static struct rc5t583_regulator_info rc5t583_reg_info[RC5T583_REGULATOR_MAX] = {
+	RC5T583_REG(DC0, DC0CTL, 0, DC0CTL, 1, 0x7F, 700, 1500, 12500, 4),
+	RC5T583_REG(DC1, DC1CTL, 0, DC1CTL, 1, 0x7F, 700, 1500, 12500, 14),
+	RC5T583_REG(DC2, DC2CTL, 0, DC2CTL, 1, 0x7F, 900, 2400, 12500, 14),
+	RC5T583_REG(DC3, DC3CTL, 0, DC3CTL, 1, 0x7F, 900, 2400, 12500, 14),
+	RC5T583_REG(LDO0, LDOEN2, 0, LDODIS2, 0, 0x7F, 900, 3400, 25000, 160),
+	RC5T583_REG(LDO1, LDOEN2, 1, LDODIS2, 1, 0x7F, 900, 3400, 25000, 160),
+	RC5T583_REG(LDO2, LDOEN2, 2, LDODIS2, 2, 0x7F, 900, 3400, 25000, 160),
+	RC5T583_REG(LDO3, LDOEN2, 3, LDODIS2, 3, 0x7F, 900, 3400, 25000, 160),
+	RC5T583_REG(LDO4, LDOEN2, 4, LDODIS2, 4, 0x3F, 750, 1500, 12500, 133),
+	RC5T583_REG(LDO5, LDOEN2, 5, LDODIS2, 5, 0x7F, 900, 3400, 25000, 267),
+	RC5T583_REG(LDO6, LDOEN2, 6, LDODIS2, 6, 0x7F, 900, 3400, 25000, 133),
+	RC5T583_REG(LDO7, LDOEN2, 7, LDODIS2, 7, 0x7F, 900, 3400, 25000, 233),
+	RC5T583_REG(LDO8, LDOEN1, 0, LDODIS1, 0, 0x7F, 900, 3400, 25000, 233),
+	RC5T583_REG(LDO9, LDOEN1, 1, LDODIS1, 1, 0x7F, 900, 3400, 25000, 133),
+};
+
+static int __devinit rc5t583_regulator_probe(struct platform_device *pdev)
+{
+	struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
+	struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
+	struct regulator_init_data *reg_data;
+	struct regulator_config config = { };
+	struct rc5t583_regulator *reg = NULL;
+	struct rc5t583_regulator *regs;
+	struct regulator_dev *rdev;
+	struct rc5t583_regulator_info *ri;
+	int ret;
+	int id;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data, exiting...\n");
+		return -ENODEV;
+	}
+
+	regs = devm_kzalloc(&pdev->dev, RC5T583_REGULATOR_MAX *
+			sizeof(struct rc5t583_regulator), GFP_KERNEL);
+	if (!regs) {
+		dev_err(&pdev->dev, "Memory allocation failed exiting..\n");
+		return -ENOMEM;
+	}
+
+
+	for (id = 0; id < RC5T583_REGULATOR_MAX; ++id) {
+		reg_data = pdata->reg_init_data[id];
+
+		/* No need to register if there is no regulator data */
+		if (!reg_data)
+			continue;
+
+		reg = &regs[id];
+		ri = &rc5t583_reg_info[id];
+		reg->reg_info = ri;
+		reg->mfd = rc5t583;
+		reg->dev = &pdev->dev;
+
+		if (ri->deepsleep_id == RC5T583_DS_NONE)
+			goto skip_ext_pwr_config;
+
+		ret = rc5t583_ext_power_req_config(rc5t583->dev,
+				ri->deepsleep_id,
+				pdata->regulator_ext_pwr_control[id],
+				pdata->regulator_deepsleep_slot[id]);
+		/*
+		 * Configuring external control is not a major issue,
+		 * just give warning.
+		 */
+		if (ret < 0)
+			dev_warn(&pdev->dev,
+				"Failed to configure ext control %d\n", id);
+
+skip_ext_pwr_config:
+		config.dev = &pdev->dev;
+		config.init_data = reg_data;
+		config.driver_data = reg;
+		config.regmap = rc5t583->regmap;
+
+		rdev = regulator_register(&ri->desc, &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev, "Failed to register regulator %s\n",
+						ri->desc.name);
+			ret = PTR_ERR(rdev);
+			goto clean_exit;
+		}
+		reg->rdev = rdev;
+	}
+	platform_set_drvdata(pdev, regs);
+	return 0;
+
+clean_exit:
+	while (--id >= 0)
+		regulator_unregister(regs[id].rdev);
+
+	return ret;
+}
+
+static int __devexit rc5t583_regulator_remove(struct platform_device *pdev)
+{
+	struct rc5t583_regulator *regs = platform_get_drvdata(pdev);
+	int id;
+
+	for (id = 0; id < RC5T583_REGULATOR_MAX; ++id)
+		regulator_unregister(regs[id].rdev);
+	return 0;
+}
+
+static struct platform_driver rc5t583_regulator_driver = {
+	.driver	= {
+		.name	= "rc5t583-regulator",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= rc5t583_regulator_probe,
+	.remove		= __devexit_p(rc5t583_regulator_remove),
+};
+
+static int __init rc5t583_regulator_init(void)
+{
+	return platform_driver_register(&rc5t583_regulator_driver);
+}
+subsys_initcall(rc5t583_regulator_init);
+
+static void __exit rc5t583_regulator_exit(void)
+{
+	platform_driver_unregister(&rc5t583_regulator_driver);
+}
+module_exit(rc5t583_regulator_exit);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("RC5T583 regulator driver");
+MODULE_ALIAS("platform:rc5t583-regulator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 4ca2db0..290d6fc 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -12,7 +12,6 @@
  */
 
 #include <linux/bug.h>
-#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
@@ -28,6 +27,7 @@
 	struct s5m87xx_dev *iodev;
 	int num_regulators;
 	struct regulator_dev **rdev;
+	struct s5m_opmode_data *opmode;
 
 	int ramp_delay;
 	bool buck2_ramp;
@@ -141,9 +141,56 @@
 	return val;
 }
 
-static int s5m8767_get_register(struct regulator_dev *rdev, int *reg)
+static unsigned int s5m8767_opmode_reg[][4] = {
+	/* {OFF, ON, LOWPOWER, SUSPEND} */
+	/* LDO1 ... LDO28 */
+	{0x0, 0x3, 0x2, 0x1}, /* LDO1 */
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x0, 0x0, 0x0},
+	{0x0, 0x3, 0x2, 0x1}, /* LDO5 */
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1}, /* LDO10 */
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1}, /* LDO15 */
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x0, 0x0, 0x0},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1}, /* LDO20 */
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x0, 0x0, 0x0},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1}, /* LDO25 */
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1},
+	{0x0, 0x3, 0x2, 0x1}, /* LDO28 */
+
+	/* BUCK1 ... BUCK9 */
+	{0x0, 0x3, 0x1, 0x1}, /* BUCK1 */
+	{0x0, 0x3, 0x1, 0x1},
+	{0x0, 0x3, 0x1, 0x1},
+	{0x0, 0x3, 0x1, 0x1},
+	{0x0, 0x3, 0x2, 0x1}, /* BUCK5 */
+	{0x0, 0x3, 0x1, 0x1},
+	{0x0, 0x3, 0x1, 0x1},
+	{0x0, 0x3, 0x1, 0x1},
+	{0x0, 0x3, 0x1, 0x1}, /* BUCK9 */
+};
+
+static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
+				int *enable_ctrl)
 {
 	int reg_id = rdev_get_id(rdev);
+	unsigned int mode;
+	struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
 
 	switch (reg_id) {
 	case S5M8767_LDO1 ... S5M8767_LDO2:
@@ -168,6 +215,8 @@
 		return -EINVAL;
 	}
 
+	mode = s5m8767->opmode[reg_id].mode;
+	*enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
 	return 0;
 }
 
@@ -175,10 +224,10 @@
 {
 	struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
 	int ret, reg;
-	int mask = 0xc0, pattern = 0xc0;
+	int mask = 0xc0, enable_ctrl;
 	u8 val;
 
-	ret = s5m8767_get_register(rdev, &reg);
+	ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
 	if (ret == -EINVAL)
 		return 1;
 	else if (ret)
@@ -188,33 +237,33 @@
 	if (ret)
 		return ret;
 
-	return (val & mask) == pattern;
+	return (val & mask) == enable_ctrl;
 }
 
 static int s5m8767_reg_enable(struct regulator_dev *rdev)
 {
 	struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
 	int ret, reg;
-	int mask = 0xc0, pattern = 0xc0;
+	int mask = 0xc0, enable_ctrl;
 
-	ret = s5m8767_get_register(rdev, &reg);
+	ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
 	if (ret)
 		return ret;
 
-	return s5m_reg_update(s5m8767->iodev, reg, pattern, mask);
+	return s5m_reg_update(s5m8767->iodev, reg, enable_ctrl, mask);
 }
 
 static int s5m8767_reg_disable(struct regulator_dev *rdev)
 {
 	struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
 	int ret, reg;
-	int  mask = 0xc0, pattern = 0xc0;
+	int  mask = 0xc0, enable_ctrl;
 
-	ret = s5m8767_get_register(rdev, &reg);
+	ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
 	if (ret)
 		return ret;
 
-	return s5m_reg_update(s5m8767->iodev, reg, ~pattern, mask);
+	return s5m_reg_update(s5m8767->iodev, reg, ~mask, mask);
 }
 
 static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
@@ -297,7 +346,10 @@
 	if (max_vol < desc->min || min_vol > desc->max)
 		return -EINVAL;
 
-	selector = (min_vol - desc->min) / desc->step;
+	if (min_vol < desc->min)
+		min_vol = desc->min;
+
+	selector = DIV_ROUND_UP(min_vol - desc->min, desc->step);
 
 	if (desc->min + desc->step * selector > max_vol)
 		return -EINVAL;
@@ -305,51 +357,6 @@
 	return selector;
 }
 
-static int s5m8767_set_voltage(struct regulator_dev *rdev,
-				int min_uV, int max_uV, unsigned *selector)
-{
-	struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
-	const struct s5m_voltage_desc *desc;
-	int reg_id = rdev_get_id(rdev);
-	int sel, reg, mask, ret;
-	u8 val;
-
-	switch (reg_id) {
-	case S5M8767_LDO1 ... S5M8767_LDO28:
-		mask = 0x3f;
-		break;
-	case S5M8767_BUCK1 ... S5M8767_BUCK6:
-		mask = 0xff;
-		break;
-	case S5M8767_BUCK7 ... S5M8767_BUCK8:
-		return -EINVAL;
-	case S5M8767_BUCK9:
-		mask = 0xff;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	desc = reg_voltage_map[reg_id];
-
-	sel = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
-	if (sel < 0)
-		return sel;
-
-	ret = s5m8767_get_voltage_register(rdev, &reg);
-	if (ret)
-		return ret;
-
-	s5m_reg_read(s5m8767->iodev, reg, &val);
-	val &= ~mask;
-	val |= sel;
-
-	ret = s5m_reg_write(s5m8767->iodev, reg, val);
-	*selector = sel;
-
-	return ret;
-}
-
 static inline void s5m8767_set_high(struct s5m8767_info *s5m8767)
 {
 	int temp_index = s5m8767->buck_gpioindex;
@@ -368,70 +375,70 @@
 	gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
 }
 
-static int s5m8767_set_voltage_buck(struct regulator_dev *rdev,
-				    int min_uV, int max_uV, unsigned *selector)
+static int s5m8767_set_voltage(struct regulator_dev *rdev,
+				int min_uV, int max_uV, unsigned *selector)
 {
 	struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
-	int reg_id = rdev_get_id(rdev);
 	const struct s5m_voltage_desc *desc;
-	int new_val, old_val, i = 0;
-
-	if (reg_id < S5M8767_BUCK1 || reg_id > S5M8767_BUCK6)
-		return -EINVAL;
+	int reg_id = rdev_get_id(rdev);
+	int sel, reg, mask, ret = 0, old_index, index = 0;
+	u8 val;
+	u8 *buck234_vol = NULL;
 
 	switch (reg_id) {
-	case S5M8767_BUCK1:
-		return s5m8767_set_voltage(rdev, min_uV, max_uV, selector);
-	case S5M8767_BUCK2 ... S5M8767_BUCK4:
+	case S5M8767_LDO1 ... S5M8767_LDO28:
+		mask = 0x3f;
 		break;
-	case S5M8767_BUCK5 ... S5M8767_BUCK6:
-		return s5m8767_set_voltage(rdev, min_uV, max_uV, selector);
+	case S5M8767_BUCK1 ... S5M8767_BUCK6:
+		mask = 0xff;
+		if (reg_id == S5M8767_BUCK2 && s5m8767->buck2_gpiodvs)
+			buck234_vol = &s5m8767->buck2_vol[0];
+		else if (reg_id == S5M8767_BUCK3 && s5m8767->buck3_gpiodvs)
+			buck234_vol = &s5m8767->buck3_vol[0];
+		else if (reg_id == S5M8767_BUCK4 && s5m8767->buck4_gpiodvs)
+			buck234_vol = &s5m8767->buck4_vol[0];
+		break;
+	case S5M8767_BUCK7 ... S5M8767_BUCK8:
+		return -EINVAL;
 	case S5M8767_BUCK9:
-		return s5m8767_set_voltage(rdev, min_uV, max_uV, selector);
+		mask = 0xff;
+		break;
+	default:
+		return -EINVAL;
 	}
 
 	desc = reg_voltage_map[reg_id];
-	new_val = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
-	if (new_val < 0)
-		return new_val;
 
-	switch (reg_id) {
-	case S5M8767_BUCK2:
-		if (s5m8767->buck2_gpiodvs) {
-			while (s5m8767->buck2_vol[i] != new_val)
-				i++;
-		} else
-			return s5m8767_set_voltage(rdev, min_uV,
-						   max_uV, selector);
-		break;
-	case S5M8767_BUCK3:
-		if (s5m8767->buck3_gpiodvs) {
-			while (s5m8767->buck3_vol[i] != new_val)
-				i++;
-		} else
-			return s5m8767_set_voltage(rdev, min_uV,
-						   max_uV, selector);
-		break;
-	case S5M8767_BUCK4:
-		if (s5m8767->buck3_gpiodvs) {
-			while (s5m8767->buck4_vol[i] != new_val)
-				i++;
-		} else
-			return s5m8767_set_voltage(rdev, min_uV,
-						   max_uV, selector);
-		break;
+	sel = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
+	if (sel < 0)
+		return sel;
+
+	/* buck234_vol != NULL means to control buck234 voltage via DVS GPIO */
+	if (buck234_vol) {
+		while (*buck234_vol != sel) {
+			buck234_vol++;
+			index++;
+		}
+		old_index = s5m8767->buck_gpioindex;
+		s5m8767->buck_gpioindex = index;
+
+		if (index > old_index)
+			s5m8767_set_high(s5m8767);
+		else
+			s5m8767_set_low(s5m8767);
+	} else {
+		ret = s5m8767_get_voltage_register(rdev, &reg);
+		if (ret)
+			return ret;
+
+		s5m_reg_read(s5m8767->iodev, reg, &val);
+		val = (val & ~mask) | sel;
+
+		ret = s5m_reg_write(s5m8767->iodev, reg, val);
 	}
 
-	old_val = s5m8767->buck_gpioindex;
-	s5m8767->buck_gpioindex = i;
-
-	if (i > old_val)
-		s5m8767_set_high(s5m8767);
-	else
-		s5m8767_set_low(s5m8767);
-
-	*selector = new_val;
-	return 0;
+	*selector = sel;
+	return ret;
 }
 
 static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -450,7 +457,7 @@
 	return 0;
 }
 
-static struct regulator_ops s5m8767_ldo_ops = {
+static struct regulator_ops s5m8767_ops = {
 	.list_voltage		= s5m8767_list_voltage,
 	.is_enabled		= s5m8767_reg_is_enabled,
 	.enable			= s5m8767_reg_enable,
@@ -460,75 +467,59 @@
 	.set_voltage_time_sel	= s5m8767_set_voltage_time_sel,
 };
 
-static struct regulator_ops s5m8767_buck_ops = {
-	.list_voltage		= s5m8767_list_voltage,
-	.is_enabled		= s5m8767_reg_is_enabled,
-	.enable			= s5m8767_reg_enable,
-	.disable		= s5m8767_reg_disable,
-	.get_voltage_sel	= s5m8767_get_voltage_sel,
-	.set_voltage		= s5m8767_set_voltage_buck,
-	.set_voltage_time_sel	= s5m8767_set_voltage_time_sel,
-};
-
-#define regulator_desc_ldo(num)		{	\
-	.name		= "LDO"#num,		\
-	.id		= S5M8767_LDO##num,	\
-	.ops		= &s5m8767_ldo_ops,	\
-	.type		= REGULATOR_VOLTAGE,	\
-	.owner		= THIS_MODULE,		\
-}
-#define regulator_desc_buck(num)	{	\
-	.name		= "BUCK"#num,		\
-	.id		= S5M8767_BUCK##num,	\
-	.ops		= &s5m8767_buck_ops,	\
+#define s5m8767_regulator_desc(_name) {		\
+	.name		= #_name,		\
+	.id		= S5M8767_##_name,	\
+	.ops		= &s5m8767_ops,		\
 	.type		= REGULATOR_VOLTAGE,	\
 	.owner		= THIS_MODULE,		\
 }
 
 static struct regulator_desc regulators[] = {
-	regulator_desc_ldo(1),
-	regulator_desc_ldo(2),
-	regulator_desc_ldo(3),
-	regulator_desc_ldo(4),
-	regulator_desc_ldo(5),
-	regulator_desc_ldo(6),
-	regulator_desc_ldo(7),
-	regulator_desc_ldo(8),
-	regulator_desc_ldo(9),
-	regulator_desc_ldo(10),
-	regulator_desc_ldo(11),
-	regulator_desc_ldo(12),
-	regulator_desc_ldo(13),
-	regulator_desc_ldo(14),
-	regulator_desc_ldo(15),
-	regulator_desc_ldo(16),
-	regulator_desc_ldo(17),
-	regulator_desc_ldo(18),
-	regulator_desc_ldo(19),
-	regulator_desc_ldo(20),
-	regulator_desc_ldo(21),
-	regulator_desc_ldo(22),
-	regulator_desc_ldo(23),
-	regulator_desc_ldo(24),
-	regulator_desc_ldo(25),
-	regulator_desc_ldo(26),
-	regulator_desc_ldo(27),
-	regulator_desc_ldo(28),
-	regulator_desc_buck(1),
-	regulator_desc_buck(2),
-	regulator_desc_buck(3),
-	regulator_desc_buck(4),
-	regulator_desc_buck(5),
-	regulator_desc_buck(6),
-	regulator_desc_buck(7),
-	regulator_desc_buck(8),
-	regulator_desc_buck(9),
+	s5m8767_regulator_desc(LDO1),
+	s5m8767_regulator_desc(LDO2),
+	s5m8767_regulator_desc(LDO3),
+	s5m8767_regulator_desc(LDO4),
+	s5m8767_regulator_desc(LDO5),
+	s5m8767_regulator_desc(LDO6),
+	s5m8767_regulator_desc(LDO7),
+	s5m8767_regulator_desc(LDO8),
+	s5m8767_regulator_desc(LDO9),
+	s5m8767_regulator_desc(LDO10),
+	s5m8767_regulator_desc(LDO11),
+	s5m8767_regulator_desc(LDO12),
+	s5m8767_regulator_desc(LDO13),
+	s5m8767_regulator_desc(LDO14),
+	s5m8767_regulator_desc(LDO15),
+	s5m8767_regulator_desc(LDO16),
+	s5m8767_regulator_desc(LDO17),
+	s5m8767_regulator_desc(LDO18),
+	s5m8767_regulator_desc(LDO19),
+	s5m8767_regulator_desc(LDO20),
+	s5m8767_regulator_desc(LDO21),
+	s5m8767_regulator_desc(LDO22),
+	s5m8767_regulator_desc(LDO23),
+	s5m8767_regulator_desc(LDO24),
+	s5m8767_regulator_desc(LDO25),
+	s5m8767_regulator_desc(LDO26),
+	s5m8767_regulator_desc(LDO27),
+	s5m8767_regulator_desc(LDO28),
+	s5m8767_regulator_desc(BUCK1),
+	s5m8767_regulator_desc(BUCK2),
+	s5m8767_regulator_desc(BUCK3),
+	s5m8767_regulator_desc(BUCK4),
+	s5m8767_regulator_desc(BUCK5),
+	s5m8767_regulator_desc(BUCK6),
+	s5m8767_regulator_desc(BUCK7),
+	s5m8767_regulator_desc(BUCK8),
+	s5m8767_regulator_desc(BUCK9),
 };
 
 static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
 {
 	struct s5m87xx_dev *iodev = dev_get_drvdata(pdev->dev.parent);
 	struct s5m_platform_data *pdata = dev_get_platdata(iodev->dev);
+	struct regulator_config config = { };
 	struct regulator_dev **rdev;
 	struct s5m8767_info *s5m8767;
 	int i, ret, size;
@@ -586,6 +577,7 @@
 	s5m8767->buck2_ramp = pdata->buck2_ramp_enable;
 	s5m8767->buck3_ramp = pdata->buck3_ramp_enable;
 	s5m8767->buck4_ramp = pdata->buck4_ramp_enable;
+	s5m8767->opmode = pdata->opmode;
 
 	for (i = 0; i < 8; i++) {
 		if (s5m8767->buck2_gpiodvs) {
@@ -723,8 +715,11 @@
 			regulators[id].n_voltages =
 				(desc->max - desc->min) / desc->step + 1;
 
-		rdev[i] = regulator_register(&regulators[id], s5m8767->dev,
-				pdata->regulators[i].initdata, s5m8767, NULL);
+		config.dev = s5m8767->dev;
+		config.init_data = pdata->regulators[i].initdata;
+		config.driver_data = s5m8767;
+
+		rdev[i] = regulator_register(&regulators[id], &config);
 		if (IS_ERR(rdev[i])) {
 			ret = PTR_ERR(rdev[i]);
 			dev_err(s5m8767->dev, "regulator init failed for %d\n",
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index d9278da..d840d84 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -123,7 +123,7 @@
 	.list_voltage	= tps6105x_regulator_list_voltage,
 };
 
-static struct regulator_desc tps6105x_regulator_desc = {
+static const struct regulator_desc tps6105x_regulator_desc = {
 	.name		= "tps6105x-boost",
 	.ops		= &tps6105x_regulator_ops,
 	.type		= REGULATOR_VOLTAGE,
@@ -139,6 +139,7 @@
 {
 	struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
 	struct tps6105x_platform_data *pdata = tps6105x->pdata;
+	struct regulator_config config = { };
 	int ret;
 
 	/* This instance is not set for regulator mode so bail out */
@@ -148,11 +149,13 @@
 		return 0;
 	}
 
+	config.dev = &tps6105x->client->dev;
+	config.init_data = pdata->regulator_data;
+	config.driver_data = tps6105x;
+
 	/* Register regulator with framework */
 	tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
-					     &tps6105x->client->dev,
-					     pdata->regulator_data, tps6105x,
-					     NULL);
+						 &config);
 	if (IS_ERR(tps6105x->regulator)) {
 		ret = PTR_ERR(tps6105x->regulator);
 		dev_err(&tps6105x->client->dev,
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index e2ec730..e534269 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -1,7 +1,7 @@
 /*
  * tps62360.c -- TI tps62360
  *
- * Driver for processor core supply tps62360 and tps62361B
+ * Driver for processor core supply tps62360, tps62361B, tps62362 and tps62363.
  *
  * Copyright (c) 2012, NVIDIA Corporation.
  *
@@ -26,13 +26,16 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/of_regulator.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/tps62360.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
-#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/regmap.h>
 
@@ -46,20 +49,20 @@
 #define REG_RAMPCTRL		6
 #define REG_CHIPID		8
 
-enum chips {TPS62360, TPS62361};
+#define FORCE_PWM_ENABLE	BIT(7)
 
-#define TPS62360_BASE_VOLTAGE	770
+enum chips {TPS62360, TPS62361, TPS62362, TPS62363};
+
+#define TPS62360_BASE_VOLTAGE	770000
 #define TPS62360_N_VOLTAGES	64
 
-#define TPS62361_BASE_VOLTAGE	500
+#define TPS62361_BASE_VOLTAGE	500000
 #define TPS62361_N_VOLTAGES	128
 
 /* tps 62360 chip information */
 struct tps62360_chip {
-	const char *name;
 	struct device *dev;
 	struct regulator_desc desc;
-	struct i2c_client *client;
 	struct regulator_dev *rdev;
 	struct regmap *regmap;
 	int chip_id;
@@ -68,12 +71,12 @@
 	int voltage_base;
 	u8 voltage_reg_mask;
 	bool en_internal_pulldn;
-	bool en_force_pwm;
 	bool en_discharge;
 	bool valid_gpios;
 	int lru_index[4];
 	int curr_vset_vsel[4];
 	int curr_vset_id;
+	int change_uv_per_us;
 };
 
 /*
@@ -99,6 +102,7 @@
 	bool found = false;
 	int new_vset_reg = tps->lru_index[3];
 	int found_index = 3;
+
 	for (i = 0; i < 4; ++i) {
 		if (tps->curr_vset_vsel[tps->lru_index[i]] == req_vsel) {
 			new_vset_reg = tps->lru_index[i];
@@ -117,7 +121,7 @@
 	return found;
 }
 
-static int tps62360_dcdc_get_voltage(struct regulator_dev *dev)
+static int tps62360_dcdc_get_voltage_sel(struct regulator_dev *dev)
 {
 	struct tps62360_chip *tps = rdev_get_drvdata(dev);
 	int vsel;
@@ -126,196 +130,312 @@
 
 	ret = regmap_read(tps->regmap, REG_VSET0 + tps->curr_vset_id, &data);
 	if (ret < 0) {
-		dev_err(tps->dev, "%s: Error in reading register %d\n",
-			__func__, REG_VSET0 + tps->curr_vset_id);
+		dev_err(tps->dev, "%s(): register %d read failed with err %d\n",
+			__func__, REG_VSET0 + tps->curr_vset_id, ret);
 		return ret;
 	}
 	vsel = (int)data & tps->voltage_reg_mask;
-	return (tps->voltage_base + vsel * 10) * 1000;
+	return vsel;
 }
 
-static int tps62360_dcdc_set_voltage(struct regulator_dev *dev,
-	     int min_uV, int max_uV, unsigned *selector)
+static int tps62360_dcdc_set_voltage_sel(struct regulator_dev *dev,
+					 unsigned selector)
 {
 	struct tps62360_chip *tps = rdev_get_drvdata(dev);
-	int vsel;
 	int ret;
 	bool found = false;
 	int new_vset_id = tps->curr_vset_id;
 
-	if (max_uV < min_uV)
-		return -EINVAL;
-
-	if (min_uV >
-		((tps->voltage_base + (tps->desc.n_voltages - 1) * 10) * 1000))
-		return -EINVAL;
-
-	if (max_uV < tps->voltage_base * 1000)
-		return -EINVAL;
-
-	vsel = DIV_ROUND_UP(min_uV - (tps->voltage_base * 1000), 10000);
-	if (selector)
-		*selector = (vsel & tps->voltage_reg_mask);
-
 	/*
 	 * If gpios are available to select the VSET register then least
 	 * recently used register for new configuration.
 	 */
 	if (tps->valid_gpios)
-		found = find_voltage_set_register(tps, vsel, &new_vset_id);
+		found = find_voltage_set_register(tps, selector, &new_vset_id);
 
 	if (!found) {
 		ret = regmap_update_bits(tps->regmap, REG_VSET0 + new_vset_id,
-				tps->voltage_reg_mask, vsel);
+				tps->voltage_reg_mask, selector);
 		if (ret < 0) {
-			dev_err(tps->dev, "%s: Error in updating register %d\n",
-				 __func__, REG_VSET0 + new_vset_id);
+			dev_err(tps->dev,
+				"%s(): register %d update failed with err %d\n",
+				 __func__, REG_VSET0 + new_vset_id, ret);
 			return ret;
 		}
 		tps->curr_vset_id = new_vset_id;
-		tps->curr_vset_vsel[new_vset_id] = vsel;
+		tps->curr_vset_vsel[new_vset_id] = selector;
 	}
 
 	/* Select proper VSET register vio gpios */
 	if (tps->valid_gpios) {
-		gpio_set_value_cansleep(tps->vsel0_gpio,
-					new_vset_id & 0x1);
+		gpio_set_value_cansleep(tps->vsel0_gpio, new_vset_id & 0x1);
 		gpio_set_value_cansleep(tps->vsel1_gpio,
 					(new_vset_id >> 1) & 0x1);
 	}
 	return 0;
 }
 
-static int tps62360_dcdc_list_voltage(struct regulator_dev *dev,
-					unsigned selector)
+static int tps62360_set_voltage_time_sel(struct regulator_dev *rdev,
+		unsigned int old_selector, unsigned int new_selector)
 {
-	struct tps62360_chip *tps = rdev_get_drvdata(dev);
+	struct tps62360_chip *tps = rdev_get_drvdata(rdev);
+	int old_uV, new_uV;
 
-	if (selector >= tps->desc.n_voltages)
-		return -EINVAL;
-	return (tps->voltage_base + selector * 10) * 1000;
+	old_uV = regulator_list_voltage_linear(rdev, old_selector);
+	if (old_uV < 0)
+		return old_uV;
+
+	new_uV = regulator_list_voltage_linear(rdev, new_selector);
+	if (new_uV < 0)
+		return new_uV;
+
+	return DIV_ROUND_UP(abs(old_uV - new_uV), tps->change_uv_per_us);
 }
 
-static struct regulator_ops tps62360_dcdc_ops = {
-	.get_voltage = tps62360_dcdc_get_voltage,
-	.set_voltage = tps62360_dcdc_set_voltage,
-	.list_voltage = tps62360_dcdc_list_voltage,
-};
-
-static int tps62360_init_force_pwm(struct tps62360_chip *tps,
-	struct tps62360_regulator_platform_data *pdata,
-	int vset_id)
+static int tps62360_set_mode(struct regulator_dev *rdev, unsigned int mode)
 {
-	unsigned int data;
+	struct tps62360_chip *tps = rdev_get_drvdata(rdev);
+	int i;
+	int val;
 	int ret;
-	ret = regmap_read(tps->regmap, REG_VSET0 + vset_id, &data);
-	if (ret < 0) {
-		dev_err(tps->dev, "%s() fails in writing reg %d\n",
-			__func__, REG_VSET0 + vset_id);
+
+	/* Enable force PWM mode in FAST mode only. */
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		val = FORCE_PWM_ENABLE;
+		break;
+
+	case REGULATOR_MODE_NORMAL:
+		val = 0;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (!tps->valid_gpios) {
+		ret = regmap_update_bits(tps->regmap,
+			REG_VSET0 + tps->curr_vset_id, FORCE_PWM_ENABLE, val);
+		if (ret < 0)
+			dev_err(tps->dev,
+				"%s(): register %d update failed with err %d\n",
+				__func__, REG_VSET0 + tps->curr_vset_id, ret);
 		return ret;
 	}
-	tps->curr_vset_vsel[vset_id] = data & tps->voltage_reg_mask;
-	if (pdata->en_force_pwm)
-		data |= BIT(7);
-	else
-		data &= ~BIT(7);
-	ret = regmap_write(tps->regmap, REG_VSET0 + vset_id, data);
-	if (ret < 0)
-		dev_err(tps->dev, "%s() fails in writing reg %d\n",
-				__func__, REG_VSET0 + vset_id);
+
+	/* If gpios are valid then all register set need to be control */
+	for (i = 0; i < 4; ++i) {
+		ret = regmap_update_bits(tps->regmap,
+					REG_VSET0 + i, FORCE_PWM_ENABLE, val);
+		if (ret < 0) {
+			dev_err(tps->dev,
+				"%s(): register %d update failed with err %d\n",
+				__func__, REG_VSET0 + i, ret);
+			return ret;
+		}
+	}
 	return ret;
 }
 
-static int tps62360_init_dcdc(struct tps62360_chip *tps,
+static unsigned int tps62360_get_mode(struct regulator_dev *rdev)
+{
+	struct tps62360_chip *tps = rdev_get_drvdata(rdev);
+	unsigned int data;
+	int ret;
+
+	ret = regmap_read(tps->regmap, REG_VSET0 + tps->curr_vset_id, &data);
+	if (ret < 0) {
+		dev_err(tps->dev, "%s(): register %d read failed with err %d\n",
+			__func__, REG_VSET0 + tps->curr_vset_id, ret);
+		return ret;
+	}
+	return (data & FORCE_PWM_ENABLE) ?
+				REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops tps62360_dcdc_ops = {
+	.get_voltage_sel	= tps62360_dcdc_get_voltage_sel,
+	.set_voltage_sel	= tps62360_dcdc_set_voltage_sel,
+	.list_voltage		= regulator_list_voltage_linear,
+	.map_voltage		= regulator_map_voltage_linear,
+	.set_voltage_time_sel	= tps62360_set_voltage_time_sel,
+	.set_mode		= tps62360_set_mode,
+	.get_mode		= tps62360_get_mode,
+};
+
+static int __devinit tps62360_init_dcdc(struct tps62360_chip *tps,
 		struct tps62360_regulator_platform_data *pdata)
 {
 	int ret;
-	int i;
+	unsigned int ramp_ctrl;
 
-	/* Initailize internal pull up/down control */
+	/* Initialize internal pull up/down control */
 	if (tps->en_internal_pulldn)
 		ret = regmap_write(tps->regmap, REG_CONTROL, 0xE0);
 	else
 		ret = regmap_write(tps->regmap, REG_CONTROL, 0x0);
 	if (ret < 0) {
-		dev_err(tps->dev, "%s() fails in writing reg %d\n",
-			__func__, REG_CONTROL);
+		dev_err(tps->dev,
+			"%s(): register %d write failed with err %d\n",
+			__func__, REG_CONTROL, ret);
 		return ret;
 	}
 
-	/* Initailize force PWM mode */
-	if (tps->valid_gpios) {
-		for (i = 0; i < 4; ++i) {
-			ret = tps62360_init_force_pwm(tps, pdata, i);
-			if (ret < 0)
-				return ret;
-		}
-	} else {
-		ret = tps62360_init_force_pwm(tps, pdata, tps->curr_vset_id);
-		if (ret < 0)
-			return ret;
-	}
-
 	/* Reset output discharge path to reduce power consumption */
 	ret = regmap_update_bits(tps->regmap, REG_RAMPCTRL, BIT(2), 0);
-	if (ret < 0)
-		dev_err(tps->dev, "%s() fails in updating reg %d\n",
-			__func__, REG_RAMPCTRL);
+	if (ret < 0) {
+		dev_err(tps->dev,
+			"%s(): register %d update failed with err %d\n",
+			__func__, REG_RAMPCTRL, ret);
+		return ret;
+	}
+
+	/* Get ramp value from ramp control register */
+	ret = regmap_read(tps->regmap, REG_RAMPCTRL, &ramp_ctrl);
+	if (ret < 0) {
+		dev_err(tps->dev,
+			"%s(): register %d read failed with err %d\n",
+			__func__, REG_RAMPCTRL, ret);
+		return ret;
+	}
+	ramp_ctrl = (ramp_ctrl >> 4) & 0x7;
+
+	/* ramp mV/us = 32/(2^ramp_ctrl) */
+	tps->change_uv_per_us = DIV_ROUND_UP(32000, BIT(ramp_ctrl));
 	return ret;
 }
 
 static const struct regmap_config tps62360_regmap_config = {
-	.reg_bits = 8,
-	.val_bits = 8,
+	.reg_bits		= 8,
+	.val_bits		= 8,
+	.max_register		= REG_CHIPID,
+	.cache_type		= REGCACHE_RBTREE,
 };
 
+static struct tps62360_regulator_platform_data *
+	of_get_tps62360_platform_data(struct device *dev)
+{
+	struct tps62360_regulator_platform_data *pdata;
+	struct device_node *np = dev->of_node;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(dev, "Memory alloc failed for platform data\n");
+		return NULL;
+	}
+
+	pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
+	if (!pdata->reg_init_data) {
+		dev_err(dev, "Not able to get OF regulator init data\n");
+		return NULL;
+	}
+
+	pdata->vsel0_gpio = of_get_named_gpio(np, "vsel0-gpio", 0);
+	pdata->vsel1_gpio = of_get_named_gpio(np, "vsel1-gpio", 0);
+
+	if (of_find_property(np, "ti,vsel0-state-high", NULL))
+		pdata->vsel0_def_state = 1;
+
+	if (of_find_property(np, "ti,vsel1-state-high", NULL))
+		pdata->vsel1_def_state = 1;
+
+	if (of_find_property(np, "ti,enable-pull-down", NULL))
+		pdata->en_internal_pulldn = true;
+
+	if (of_find_property(np, "ti,enable-vout-discharge", NULL))
+		pdata->en_discharge = true;
+
+	return pdata;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id tps62360_of_match[] = {
+	 { .compatible = "ti,tps62360", .data = (void *)TPS62360},
+	 { .compatible = "ti,tps62361", .data = (void *)TPS62361},
+	 { .compatible = "ti,tps62362", .data = (void *)TPS62362},
+	 { .compatible = "ti,tps62363", .data = (void *)TPS62363},
+	{},
+};
+MODULE_DEVICE_TABLE(of, tps62360_of_match);
+#endif
+
 static int __devinit tps62360_probe(struct i2c_client *client,
 				     const struct i2c_device_id *id)
 {
+	struct regulator_config config = { };
 	struct tps62360_regulator_platform_data *pdata;
 	struct regulator_dev *rdev;
 	struct tps62360_chip *tps;
 	int ret;
 	int i;
+	int chip_id;
 
 	pdata = client->dev.platform_data;
+	chip_id = id->driver_data;
+
+	if (client->dev.of_node) {
+		const struct of_device_id *match;
+		match = of_match_device(of_match_ptr(tps62360_of_match),
+				&client->dev);
+		if (!match) {
+			dev_err(&client->dev, "Error: No device match found\n");
+			return -ENODEV;
+		}
+		chip_id = (int)match->data;
+		if (!pdata)
+			pdata = of_get_tps62360_platform_data(&client->dev);
+	}
+
 	if (!pdata) {
-		dev_err(&client->dev, "%s() Err: Platform data not found\n",
+		dev_err(&client->dev, "%s(): Platform data not found\n",
 						__func__);
 		return -EIO;
 	}
 
 	tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
 	if (!tps) {
-		dev_err(&client->dev, "%s() Err: Memory allocation fails\n",
+		dev_err(&client->dev, "%s(): Memory allocation failed\n",
 						__func__);
 		return -ENOMEM;
 	}
 
-	tps->en_force_pwm = pdata->en_force_pwm;
 	tps->en_discharge = pdata->en_discharge;
 	tps->en_internal_pulldn = pdata->en_internal_pulldn;
 	tps->vsel0_gpio = pdata->vsel0_gpio;
 	tps->vsel1_gpio = pdata->vsel1_gpio;
-	tps->client = client;
 	tps->dev = &client->dev;
-	tps->name = id->name;
-	tps->voltage_base = (id->driver_data == TPS62360) ?
-				TPS62360_BASE_VOLTAGE : TPS62361_BASE_VOLTAGE;
-	tps->voltage_reg_mask = (id->driver_data == TPS62360) ? 0x3F : 0x7F;
+
+	switch (chip_id) {
+	case TPS62360:
+	case TPS62362:
+		tps->voltage_base = TPS62360_BASE_VOLTAGE;
+		tps->voltage_reg_mask = 0x3F;
+		tps->desc.n_voltages = TPS62360_N_VOLTAGES;
+		break;
+	case TPS62361:
+	case TPS62363:
+		tps->voltage_base = TPS62361_BASE_VOLTAGE;
+		tps->voltage_reg_mask = 0x7F;
+		tps->desc.n_voltages = TPS62361_N_VOLTAGES;
+		break;
+	default:
+		return -ENODEV;
+	}
 
 	tps->desc.name = id->name;
 	tps->desc.id = 0;
-	tps->desc.n_voltages = (id->driver_data == TPS62360) ?
-				TPS62360_N_VOLTAGES : TPS62361_N_VOLTAGES;
 	tps->desc.ops = &tps62360_dcdc_ops;
 	tps->desc.type = REGULATOR_VOLTAGE;
 	tps->desc.owner = THIS_MODULE;
-	tps->regmap = regmap_init_i2c(client, &tps62360_regmap_config);
+	tps->desc.min_uV = tps->voltage_base;
+	tps->desc.uV_step = 10000;
+
+	tps->regmap = devm_regmap_init_i2c(client, &tps62360_regmap_config);
 	if (IS_ERR(tps->regmap)) {
 		ret = PTR_ERR(tps->regmap);
-		dev_err(&client->dev, "%s() Err: Failed to allocate register"
-			"map: %d\n", __func__, ret);
+		dev_err(&client->dev,
+			"%s(): regmap allocation failed with err %d\n",
+			__func__, ret);
 		return ret;
 	}
 	i2c_set_clientdata(client, tps);
@@ -326,35 +446,26 @@
 	tps->valid_gpios = false;
 
 	if (gpio_is_valid(tps->vsel0_gpio) && gpio_is_valid(tps->vsel1_gpio)) {
-		ret = gpio_request(tps->vsel0_gpio, "tps62360-vsel0");
+		int gpio_flags;
+		gpio_flags = (pdata->vsel0_def_state) ?
+				GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+		ret = gpio_request_one(tps->vsel0_gpio,
+				gpio_flags, "tps62360-vsel0");
 		if (ret) {
 			dev_err(&client->dev,
-				"Err: Could not obtain vsel0 GPIO %d: %d\n",
-						tps->vsel0_gpio, ret);
-			goto err_gpio0;
-		}
-		ret = gpio_direction_output(tps->vsel0_gpio,
-					pdata->vsel0_def_state);
-		if (ret) {
-			dev_err(&client->dev, "Err: Could not set direction of"
-				"vsel0 GPIO %d: %d\n", tps->vsel0_gpio, ret);
-			gpio_free(tps->vsel0_gpio);
+				"%s(): Could not obtain vsel0 GPIO %d: %d\n",
+				__func__, tps->vsel0_gpio, ret);
 			goto err_gpio0;
 		}
 
-		ret = gpio_request(tps->vsel1_gpio, "tps62360-vsel1");
+		gpio_flags = (pdata->vsel1_def_state) ?
+				GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+		ret = gpio_request_one(tps->vsel1_gpio,
+				gpio_flags, "tps62360-vsel1");
 		if (ret) {
 			dev_err(&client->dev,
-				"Err: Could not obtain vsel1 GPIO %d: %d\n",
-						tps->vsel1_gpio, ret);
-			goto err_gpio1;
-		}
-		ret = gpio_direction_output(tps->vsel1_gpio,
-					pdata->vsel1_def_state);
-		if (ret) {
-			dev_err(&client->dev, "Err: Could not set direction of"
-				"vsel1 GPIO %d: %d\n", tps->vsel1_gpio, ret);
-			gpio_free(tps->vsel1_gpio);
+				"%s(): Could not obtain vsel1 GPIO %d: %d\n",
+				__func__, tps->vsel1_gpio, ret);
 			goto err_gpio1;
 		}
 		tps->valid_gpios = true;
@@ -371,17 +482,22 @@
 
 	ret = tps62360_init_dcdc(tps, pdata);
 	if (ret < 0) {
-		dev_err(tps->dev, "%s() Err: Init fails with = %d\n",
+		dev_err(tps->dev, "%s(): Init failed with err = %d\n",
 				__func__, ret);
 		goto err_init;
 	}
 
+	config.dev = &client->dev;
+	config.init_data = pdata->reg_init_data;
+	config.driver_data = tps;
+	config.of_node = client->dev.of_node;
+
 	/* Register the regulators */
-	rdev = regulator_register(&tps->desc, &client->dev,
-				&pdata->reg_init_data, tps, NULL);
+	rdev = regulator_register(&tps->desc, &config);
 	if (IS_ERR(rdev)) {
-		dev_err(tps->dev, "%s() Err: Failed to register %s\n",
-				__func__, id->name);
+		dev_err(tps->dev,
+			"%s(): regulator register failed with err %s\n",
+			__func__, id->name);
 		ret = PTR_ERR(rdev);
 		goto err_init;
 	}
@@ -396,7 +512,6 @@
 	if (gpio_is_valid(tps->vsel0_gpio))
 		gpio_free(tps->vsel0_gpio);
 err_gpio0:
-	regmap_exit(tps->regmap);
 	return ret;
 }
 
@@ -417,7 +532,6 @@
 		gpio_free(tps->vsel0_gpio);
 
 	regulator_unregister(tps->rdev);
-	regmap_exit(tps->regmap);
 	return 0;
 }
 
@@ -432,13 +546,16 @@
 	/* Configure the output discharge path */
 	st = regmap_update_bits(tps->regmap, REG_RAMPCTRL, BIT(2), BIT(2));
 	if (st < 0)
-		dev_err(tps->dev, "%s() fails in updating reg %d\n",
-			__func__, REG_RAMPCTRL);
+		dev_err(tps->dev,
+			"%s(): register %d update failed with err %d\n",
+			__func__, REG_RAMPCTRL, st);
 }
 
 static const struct i2c_device_id tps62360_id[] = {
 	{.name = "tps62360", .driver_data = TPS62360},
 	{.name = "tps62361", .driver_data = TPS62361},
+	{.name = "tps62362", .driver_data = TPS62362},
+	{.name = "tps62363", .driver_data = TPS62363},
 	{},
 };
 
@@ -448,6 +565,7 @@
 	.driver = {
 		.name = "tps62360",
 		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(tps62360_of_match),
 	},
 	.probe = tps62360_probe,
 	.remove = __devexit_p(tps62360_remove),
@@ -468,5 +586,5 @@
 module_exit(tps62360_cleanup);
 
 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_DESCRIPTION("TPS62360 voltage regulator driver");
+MODULE_DESCRIPTION("TPS6236x voltage regulator driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 43e4902..f841bd0 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -23,7 +23,6 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/i2c.h>
-#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/regmap.h>
 
@@ -72,7 +71,7 @@
 
 /* LDO_CTRL bitfields */
 #define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id)	((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)	(0xF0 >> ((ldo_id)*4))
+#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id)	(0x0F << ((ldo_id)*4))
 
 /* Number of step-down converters available */
 #define TPS65023_NUM_DCDC		3
@@ -139,7 +138,6 @@
 /* PMIC details */
 struct tps_pmic {
 	struct regulator_desc desc[TPS65023_NUM_REGULATOR];
-	struct i2c_client *client;
 	struct regulator_dev *rdev[TPS65023_NUM_REGULATOR];
 	const struct tps_info *info[TPS65023_NUM_REGULATOR];
 	struct regmap *regmap;
@@ -152,96 +150,6 @@
 	u8 core_regulator;
 };
 
-static int tps65023_dcdc_is_enabled(struct regulator_dev *dev)
-{
-	struct tps_pmic *tps = rdev_get_drvdata(dev);
-	int data, dcdc = rdev_get_id(dev);
-	int ret;
-	u8 shift;
-
-	if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
-		return -EINVAL;
-
-	shift = TPS65023_NUM_REGULATOR - dcdc;
-	ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
-
-	if (ret != 0)
-		return ret;
-	else
-		return (data & 1<<shift) ? 1 : 0;
-}
-
-static int tps65023_ldo_is_enabled(struct regulator_dev *dev)
-{
-	struct tps_pmic *tps = rdev_get_drvdata(dev);
-	int data, ldo = rdev_get_id(dev);
-	int ret;
-	u8 shift;
-
-	if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
-		return -EINVAL;
-
-	shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
-	ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
-
-	if (ret != 0)
-		return ret;
-	else
-		return (data & 1<<shift) ? 1 : 0;
-}
-
-static int tps65023_dcdc_enable(struct regulator_dev *dev)
-{
-	struct tps_pmic *tps = rdev_get_drvdata(dev);
-	int dcdc = rdev_get_id(dev);
-	u8 shift;
-
-	if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
-		return -EINVAL;
-
-	shift = TPS65023_NUM_REGULATOR - dcdc;
-	return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
-}
-
-static int tps65023_dcdc_disable(struct regulator_dev *dev)
-{
-	struct tps_pmic *tps = rdev_get_drvdata(dev);
-	int dcdc = rdev_get_id(dev);
-	u8 shift;
-
-	if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
-		return -EINVAL;
-
-	shift = TPS65023_NUM_REGULATOR - dcdc;
-	return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
-}
-
-static int tps65023_ldo_enable(struct regulator_dev *dev)
-{
-	struct tps_pmic *tps = rdev_get_drvdata(dev);
-	int ldo = rdev_get_id(dev);
-	u8 shift;
-
-	if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
-		return -EINVAL;
-
-	shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
-	return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
-}
-
-static int tps65023_ldo_disable(struct regulator_dev *dev)
-{
-	struct tps_pmic *tps = rdev_get_drvdata(dev);
-	int ldo = rdev_get_id(dev);
-	u8 shift;
-
-	if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
-		return -EINVAL;
-
-	shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
-	return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
-}
-
 static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
@@ -261,50 +169,28 @@
 		return tps->info[dcdc]->min_uV;
 }
 
-static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
-				     int min_uV, int max_uV,
-				     unsigned *selector)
+static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
+					 unsigned selector)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
 	int dcdc = rdev_get_id(dev);
-	int vsel;
 	int ret;
 
 	if (dcdc != tps->core_regulator)
 		return -EINVAL;
-	if (min_uV < tps->info[dcdc]->min_uV
-			|| min_uV > tps->info[dcdc]->max_uV)
-		return -EINVAL;
-	if (max_uV < tps->info[dcdc]->min_uV
-			|| max_uV > tps->info[dcdc]->max_uV)
-		return -EINVAL;
 
-	for (vsel = 0; vsel < tps->info[dcdc]->table_len; vsel++) {
-		int mV = tps->info[dcdc]->table[vsel];
-		int uV = mV * 1000;
-
-		/* Break at the first in-range value */
-		if (min_uV <= uV && uV <= max_uV)
-			break;
-	}
-
-	*selector = vsel;
-
-	if (vsel == tps->info[dcdc]->table_len)
-		goto failed;
-
-	ret = regmap_write(tps->regmap, TPS65023_REG_DEF_CORE, vsel);
+	ret = regmap_write(tps->regmap, TPS65023_REG_DEF_CORE, selector);
+	if (ret)
+		goto out;
 
 	/* Tell the chip that we have changed the value in DEFCORE
 	 * and its time to update the core voltage
 	 */
-	regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
-			TPS65023_REG_CTRL2_GO, TPS65023_REG_CTRL2_GO);
+	ret = regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+				 TPS65023_REG_CTRL2_GO, TPS65023_REG_CTRL2_GO);
 
+out:
 	return ret;
-
-failed:
-	return -EINVAL;
 }
 
 static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
@@ -325,42 +211,15 @@
 	return tps->info[ldo]->table[data] * 1000;
 }
 
-static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
-				    int min_uV, int max_uV, unsigned *selector)
+static int tps65023_ldo_set_voltage_sel(struct regulator_dev *dev,
+					unsigned selector)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
-	int data, vsel, ldo = rdev_get_id(dev);
-	int ret;
+	int ldo_index = rdev_get_id(dev) - TPS65023_LDO_1;
 
-	if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
-		return -EINVAL;
-
-	if (min_uV < tps->info[ldo]->min_uV || min_uV > tps->info[ldo]->max_uV)
-		return -EINVAL;
-	if (max_uV < tps->info[ldo]->min_uV || max_uV > tps->info[ldo]->max_uV)
-		return -EINVAL;
-
-	for (vsel = 0; vsel < tps->info[ldo]->table_len; vsel++) {
-		int mV = tps->info[ldo]->table[vsel];
-		int uV = mV * 1000;
-
-		/* Break at the first in-range value */
-		if (min_uV <= uV && uV <= max_uV)
-			break;
-	}
-
-	if (vsel == tps->info[ldo]->table_len)
-		return -EINVAL;
-
-	*selector = vsel;
-
-	ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
-	if (ret != 0)
-		return ret;
-
-	data &= TPS65023_LDO_CTRL_LDOx_MASK(ldo - TPS65023_LDO_1);
-	data |= (vsel << (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1)));
-	return regmap_write(tps->regmap, TPS65023_REG_LDO_CTRL, data);
+	return regmap_update_bits(tps->regmap, TPS65023_REG_LDO_CTRL,
+			TPS65023_LDO_CTRL_LDOx_MASK(ldo_index),
+			selector << TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_index));
 }
 
 static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
@@ -398,21 +257,21 @@
 
 /* Operations permitted on VDCDCx */
 static struct regulator_ops tps65023_dcdc_ops = {
-	.is_enabled = tps65023_dcdc_is_enabled,
-	.enable = tps65023_dcdc_enable,
-	.disable = tps65023_dcdc_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 	.get_voltage = tps65023_dcdc_get_voltage,
-	.set_voltage = tps65023_dcdc_set_voltage,
+	.set_voltage_sel = tps65023_dcdc_set_voltage_sel,
 	.list_voltage = tps65023_dcdc_list_voltage,
 };
 
 /* Operations permitted on LDOx */
 static struct regulator_ops tps65023_ldo_ops = {
-	.is_enabled = tps65023_ldo_is_enabled,
-	.enable = tps65023_ldo_enable,
-	.disable = tps65023_ldo_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 	.get_voltage = tps65023_ldo_get_voltage,
-	.set_voltage = tps65023_ldo_set_voltage,
+	.set_voltage_sel = tps65023_ldo_set_voltage_sel,
 	.list_voltage = tps65023_ldo_list_voltage,
 };
 
@@ -426,6 +285,7 @@
 {
 	const struct tps_driver_data *drv_data = (void *)id->driver_data;
 	const struct tps_info *info = drv_data->info;
+	struct regulator_config config = { };
 	struct regulator_init_data *init_data;
 	struct regulator_dev *rdev;
 	struct tps_pmic *tps;
@@ -443,20 +303,19 @@
 	if (!init_data)
 		return -EIO;
 
-	tps = kzalloc(sizeof(*tps), GFP_KERNEL);
+	tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
 	if (!tps)
 		return -ENOMEM;
 
-	tps->regmap = regmap_init_i2c(client, &tps65023_regmap_config);
+	tps->regmap = devm_regmap_init_i2c(client, &tps65023_regmap_config);
 	if (IS_ERR(tps->regmap)) {
 		error = PTR_ERR(tps->regmap);
 		dev_err(&client->dev, "Failed to allocate register map: %d\n",
 			error);
-		goto fail_alloc;
+		return error;
 	}
 
 	/* common for all regulators */
-	tps->client = client;
 	tps->core_regulator = drv_data->core_regulator;
 
 	for (i = 0; i < TPS65023_NUM_REGULATOR; i++, info++, init_data++) {
@@ -471,9 +330,22 @@
 		tps->desc[i].type = REGULATOR_VOLTAGE;
 		tps->desc[i].owner = THIS_MODULE;
 
+		tps->desc[i].enable_reg = TPS65023_REG_REG_CTRL;
+		if (i == TPS65023_LDO_1)
+			tps->desc[i].enable_mask = 1 << 1;
+		else if (i == TPS65023_LDO_2)
+			tps->desc[i].enable_mask = 1 << 2;
+		else /* DCDCx */
+			tps->desc[i].enable_mask =
+					1 << (TPS65023_NUM_REGULATOR - i);
+
+		config.dev = &client->dev;
+		config.init_data = init_data;
+		config.driver_data = tps;
+		config.regmap = tps->regmap;
+
 		/* Register the regulators */
-		rdev = regulator_register(&tps->desc[i], &client->dev,
-					  init_data, tps, NULL);
+		rdev = regulator_register(&tps->desc[i], &config);
 		if (IS_ERR(rdev)) {
 			dev_err(&client->dev, "failed to register %s\n",
 				id->name);
@@ -496,19 +368,9 @@
  fail:
 	while (--i >= 0)
 		regulator_unregister(tps->rdev[i]);
-
-	regmap_exit(tps->regmap);
- fail_alloc:
-	kfree(tps);
 	return error;
 }
 
-/**
- * tps_65023_remove - TPS65023 driver i2c remove handler
- * @client: i2c driver client device structure
- *
- * Unregister TPS driver as an i2c client device driver
- */
 static int __devexit tps_65023_remove(struct i2c_client *client)
 {
 	struct tps_pmic *tps = i2c_get_clientdata(client);
@@ -516,10 +378,6 @@
 
 	for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
 		regulator_unregister(tps->rdev[i]);
-
-	regmap_exit(tps->regmap);
-	kfree(tps);
-
 	return 0;
 }
 
@@ -638,13 +496,13 @@
 };
 
 static struct tps_driver_data tps65021_drv_data = {
-		.info = tps65021_regs,
-		.core_regulator = TPS65023_DCDC_3,
+	.info = tps65021_regs,
+	.core_regulator = TPS65023_DCDC_3,
 };
 
 static struct tps_driver_data tps65023_drv_data = {
-		.info = tps65023_regs,
-		.core_regulator = TPS65023_DCDC_1,
+	.info = tps65023_regs,
+	.core_regulator = TPS65023_DCDC_1,
 };
 
 static const struct i2c_device_id tps_65023_id[] = {
@@ -669,22 +527,12 @@
 	.id_table = tps_65023_id,
 };
 
-/**
- * tps_65023_init
- *
- * Module init function
- */
 static int __init tps_65023_init(void)
 {
 	return i2c_add_driver(&tps_65023_i2c_driver);
 }
 subsys_initcall(tps_65023_init);
 
-/**
- * tps_65023_cleanup
- *
- * Module exit function
- */
 static void __exit tps_65023_cleanup(void)
 {
 	i2c_del_driver(&tps_65023_i2c_driver);
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 832833f..da38be1 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -23,7 +23,6 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/tps6507x.h>
-#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/mfd/tps6507x.h>
 
@@ -283,7 +282,7 @@
 					1 << shift);
 }
 
-static int tps6507x_pmic_get_voltage(struct regulator_dev *dev)
+static int tps6507x_pmic_get_voltage_sel(struct regulator_dev *dev)
 {
 	struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
 	int data, rid = rdev_get_id(dev);
@@ -325,7 +324,7 @@
 		return data;
 
 	data &= mask;
-	return tps->info[rid]->table[data] * 1000;
+	return data;
 }
 
 static int tps6507x_pmic_set_voltage_sel(struct regulator_dev *dev,
@@ -395,7 +394,7 @@
 	.is_enabled = tps6507x_pmic_is_enabled,
 	.enable = tps6507x_pmic_enable,
 	.disable = tps6507x_pmic_disable,
-	.get_voltage = tps6507x_pmic_get_voltage,
+	.get_voltage_sel = tps6507x_pmic_get_voltage_sel,
 	.set_voltage_sel = tps6507x_pmic_set_voltage_sel,
 	.list_voltage = tps6507x_pmic_list_voltage,
 };
@@ -404,6 +403,7 @@
 {
 	struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
 	struct tps_info *info = &tps6507x_pmic_regs[0];
+	struct regulator_config config = { };
 	struct regulator_init_data *init_data;
 	struct regulator_dev *rdev;
 	struct tps6507x_pmic *tps;
@@ -428,7 +428,7 @@
 	if (!init_data)
 		return -EINVAL;
 
-	tps = kzalloc(sizeof(*tps), GFP_KERNEL);
+	tps = devm_kzalloc(&pdev->dev, sizeof(*tps), GFP_KERNEL);
 	if (!tps)
 		return -ENOMEM;
 
@@ -453,8 +453,11 @@
 		tps->desc[i].type = REGULATOR_VOLTAGE;
 		tps->desc[i].owner = THIS_MODULE;
 
-		rdev = regulator_register(&tps->desc[i],
-					tps6507x_dev->dev, init_data, tps, NULL);
+		config.dev = tps6507x_dev->dev;
+		config.init_data = init_data;
+		config.driver_data = tps;
+
+		rdev = regulator_register(&tps->desc[i], &config);
 		if (IS_ERR(rdev)) {
 			dev_err(tps6507x_dev->dev,
 				"failed to register %s regulator\n",
@@ -475,8 +478,6 @@
 fail:
 	while (--i >= 0)
 		regulator_unregister(tps->rdev[i]);
-
-	kfree(tps);
 	return error;
 }
 
@@ -488,9 +489,6 @@
 
 	for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
 		regulator_unregister(tps->rdev[i]);
-
-	kfree(tps);
-
 	return 0;
 }
 
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
new file mode 100644
index 0000000..001ad55
--- /dev/null
+++ b/drivers/regulator/tps65090-regulator.c
@@ -0,0 +1,150 @@
+/*
+ * Regulator driver for tps65090 power management chip.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps65090.h>
+#include <linux/regulator/tps65090-regulator.h>
+
+struct tps65090_regulator {
+	int		id;
+	/* used by regulator core */
+	struct regulator_desc	desc;
+
+	/* Device */
+	struct device		*dev;
+};
+
+static struct regulator_ops tps65090_ops = {
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+};
+
+#define tps65090_REG(_id)				\
+{							\
+	.id		= TPS65090_ID_##_id,		\
+	.desc = {					\
+		.name = tps65090_rails(_id),		\
+		.id = TPS65090_ID_##_id,		\
+		.ops = &tps65090_ops,			\
+		.type = REGULATOR_VOLTAGE,		\
+		.owner = THIS_MODULE,			\
+		.enable_reg = (TPS65090_ID_##_id) + 12,	\
+		.enable_mask = BIT(0),			\
+	},						\
+}
+
+static struct tps65090_regulator TPS65090_regulator[] = {
+	tps65090_REG(DCDC1),
+	tps65090_REG(DCDC2),
+	tps65090_REG(DCDC3),
+	tps65090_REG(FET1),
+	tps65090_REG(FET2),
+	tps65090_REG(FET3),
+	tps65090_REG(FET4),
+	tps65090_REG(FET5),
+	tps65090_REG(FET6),
+	tps65090_REG(FET7),
+};
+
+static inline struct tps65090_regulator *find_regulator_info(int id)
+{
+	struct tps65090_regulator *ri;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(TPS65090_regulator); i++) {
+		ri = &TPS65090_regulator[i];
+		if (ri->desc.id == id)
+			return ri;
+	}
+	return NULL;
+}
+
+static int __devinit tps65090_regulator_probe(struct platform_device *pdev)
+{
+	struct tps65090 *tps65090_mfd = dev_get_drvdata(pdev->dev.parent);
+	struct tps65090_regulator *ri = NULL;
+	struct regulator_config config = { };
+	struct regulator_dev *rdev;
+	struct tps65090_regulator_platform_data *tps_pdata;
+	int id = pdev->id;
+
+	dev_dbg(&pdev->dev, "Probing regulator %d\n", id);
+
+	ri = find_regulator_info(id);
+	if (ri == NULL) {
+		dev_err(&pdev->dev, "invalid regulator ID specified\n");
+		return -EINVAL;
+	}
+	tps_pdata = pdev->dev.platform_data;
+	ri->dev = &pdev->dev;
+
+	config.dev = &pdev->dev;
+	config.init_data = &tps_pdata->regulator;
+	config.driver_data = ri;
+	config.regmap = tps65090_mfd->rmap;
+
+	rdev = regulator_register(&ri->desc, &config);
+	if (IS_ERR(rdev)) {
+		dev_err(&pdev->dev, "failed to register regulator %s\n",
+				ri->desc.name);
+		return PTR_ERR(rdev);
+	}
+
+	platform_set_drvdata(pdev, rdev);
+	return 0;
+}
+
+static int __devexit tps65090_regulator_remove(struct platform_device *pdev)
+{
+	struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+	regulator_unregister(rdev);
+	return 0;
+}
+
+static struct platform_driver tps65090_regulator_driver = {
+	.driver	= {
+		.name	= "tps65090-regulator",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= tps65090_regulator_probe,
+	.remove		= __devexit_p(tps65090_regulator_remove),
+};
+
+static int __init tps65090_regulator_init(void)
+{
+	return platform_driver_register(&tps65090_regulator_driver);
+}
+subsys_initcall(tps65090_regulator_init);
+
+static void __exit tps65090_regulator_exit(void)
+{
+	platform_driver_unregister(&tps65090_regulator_driver);
+}
+module_exit(tps65090_regulator_exit);
+
+MODULE_DESCRIPTION("tps65090 regulator driver");
+MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index e39521b..9d371d2 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -213,54 +213,17 @@
 	return selector;
 }
 
-static int tps65217_pmic_ldo1_set_voltage_sel(struct regulator_dev *dev,
-						unsigned selector)
-{
-	struct tps65217 *tps = rdev_get_drvdata(dev);
-	int ldo = rdev_get_id(dev);
-
-	if (ldo != TPS65217_LDO_1)
-		return -EINVAL;
-
-	if (selector >= tps->info[ldo]->table_len)
-		return -EINVAL;
-
-	/* Set the voltage based on vsel value and write protect level is 2 */
-	return tps65217_set_bits(tps, tps->info[ldo]->set_vout_reg,
-					tps->info[ldo]->set_vout_mask,
-					selector, TPS65217_PROTECT_L2);
-}
-
-static int tps65217_pmic_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV, unsigned *selector)
+static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev,
+					 unsigned selector)
 {
 	int ret;
 	struct tps65217 *tps = rdev_get_drvdata(dev);
 	unsigned int rid = rdev_get_id(dev);
 
-	/* LDO1 implements set_voltage_sel callback */
-	if (rid == TPS65217_LDO_1)
-		return -EINVAL;
-
-	if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
-		return -EINVAL;
-
-	if (min_uV < tps->info[rid]->min_uV
-		|| min_uV > tps->info[rid]->max_uV)
-		return -EINVAL;
-
-	if (max_uV < tps->info[rid]->min_uV
-		|| max_uV > tps->info[rid]->max_uV)
-		return -EINVAL;
-
-	ret = tps->info[rid]->uv_to_vsel(min_uV, selector);
-	if (ret)
-		return ret;
-
 	/* Set the voltage based on vsel value and write protect level is 2 */
 	ret = tps65217_set_bits(tps, tps->info[rid]->set_vout_reg,
 				tps->info[rid]->set_vout_mask,
-				*selector, TPS65217_PROTECT_L2);
+				selector, TPS65217_PROTECT_L2);
 
 	/* Set GO bit for DCDCx to initiate voltage transistion */
 	switch (rid) {
@@ -274,6 +237,34 @@
 	return ret;
 }
 
+static int tps65217_pmic_map_voltage(struct regulator_dev *dev,
+				     int min_uV, int max_uV)
+{
+
+	struct tps65217 *tps = rdev_get_drvdata(dev);
+	unsigned int sel, rid = rdev_get_id(dev);
+	int ret;
+
+	/* LDO1 uses regulator_map_voltage_iterate() */
+	if (rid == TPS65217_LDO_1)
+		return -EINVAL;
+
+	if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
+		return -EINVAL;
+
+	if (min_uV < tps->info[rid]->min_uV || min_uV > tps->info[rid]->max_uV)
+		return -EINVAL;
+
+	if (max_uV < tps->info[rid]->min_uV || max_uV > tps->info[rid]->max_uV)
+		return -EINVAL;
+
+	ret = tps->info[rid]->uv_to_vsel(min_uV, &sel);
+	if (ret)
+		return ret;
+
+	return sel;
+}
+
 static int tps65217_pmic_list_voltage(struct regulator_dev *dev,
 					unsigned selector)
 {
@@ -298,8 +289,9 @@
 	.enable			= tps65217_pmic_enable,
 	.disable		= tps65217_pmic_disable,
 	.get_voltage_sel	= tps65217_pmic_get_voltage_sel,
-	.set_voltage		= tps65217_pmic_set_voltage,
+	.set_voltage_sel	= tps65217_pmic_set_voltage_sel,
 	.list_voltage		= tps65217_pmic_list_voltage,
+	.map_voltage		= tps65217_pmic_map_voltage,
 };
 
 /* Operations permitted on LDO1 */
@@ -308,11 +300,11 @@
 	.enable			= tps65217_pmic_enable,
 	.disable		= tps65217_pmic_disable,
 	.get_voltage_sel	= tps65217_pmic_get_voltage_sel,
-	.set_voltage_sel	= tps65217_pmic_ldo1_set_voltage_sel,
+	.set_voltage_sel	= tps65217_pmic_set_voltage_sel,
 	.list_voltage		= tps65217_pmic_list_voltage,
 };
 
-static struct regulator_desc regulators[] = {
+static const struct regulator_desc regulators[] = {
 	TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64),
 	TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64),
 	TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64),
@@ -327,13 +319,17 @@
 	struct regulator_dev *rdev;
 	struct tps65217 *tps;
 	struct tps_info *info = &tps65217_pmic_regs[pdev->id];
+	struct regulator_config config = { };
 
 	/* Already set by core driver */
 	tps = dev_to_tps65217(pdev->dev.parent);
 	tps->info[pdev->id] = info;
 
-	rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
-				  pdev->dev.platform_data, tps, NULL);
+	config.dev = &pdev->dev;
+	config.init_data = pdev->dev.platform_data;
+	config.driver_data = tps;
+
+	rdev = regulator_register(&regulators[pdev->id], &config);
 	if (IS_ERR(rdev))
 		return PTR_ERR(rdev);
 
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 4a421be..b88b3df 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -458,12 +458,10 @@
 		info->voltages[selector] : -EINVAL);
 }
 
-static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
-		       unsigned *selector)
+static int set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
 {
 	const struct supply_info *info;
 	struct tps6524x *hw;
-	unsigned i;
 
 	hw	= rdev_get_drvdata(rdev);
 	info	= &supply_info[rdev_get_id(rdev)];
@@ -471,20 +469,10 @@
 	if (info->flags & FIXED_VOLTAGE)
 		return -EINVAL;
 
-	for (i = 0; i < info->n_voltages; i++)
-		if (min_uV <= info->voltages[i] &&
-		    max_uV >= info->voltages[i])
-			break;
-
-	if (i >= info->n_voltages)
-		i = info->n_voltages - 1;
-
-	*selector = i;
-
-	return write_field(hw, &info->voltage, i);
+	return write_field(hw, &info->voltage, selector);
 }
 
-static int get_voltage(struct regulator_dev *rdev)
+static int get_voltage_sel(struct regulator_dev *rdev)
 {
 	const struct supply_info *info;
 	struct tps6524x *hw;
@@ -502,7 +490,7 @@
 	if (WARN_ON(ret >= info->n_voltages))
 		return -EIO;
 
-	return info->voltages[ret];
+	return ret;
 }
 
 static int set_current_limit(struct regulator_dev *rdev, int min_uA,
@@ -587,8 +575,8 @@
 	.is_enabled		= is_supply_enabled,
 	.enable			= enable_supply,
 	.disable		= disable_supply,
-	.get_voltage		= get_voltage,
-	.set_voltage		= set_voltage,
+	.get_voltage_sel	= get_voltage_sel,
+	.set_voltage_sel	= set_voltage_sel,
 	.list_voltage		= list_voltage,
 	.set_current_limit	= set_current_limit,
 	.get_current_limit	= get_current_limit,
@@ -607,7 +595,6 @@
 		hw->rdev[i] = NULL;
 	}
 	spi_set_drvdata(spi, NULL);
-	kfree(hw);
 	return 0;
 }
 
@@ -617,6 +604,7 @@
 	struct device *dev = &spi->dev;
 	const struct supply_info *info = supply_info;
 	struct regulator_init_data *init_data;
+	struct regulator_config config = { };
 	int ret = 0, i;
 
 	init_data = dev->platform_data;
@@ -625,7 +613,7 @@
 		return -EINVAL;
 	}
 
-	hw = kzalloc(sizeof(struct tps6524x), GFP_KERNEL);
+	hw = devm_kzalloc(&spi->dev, sizeof(struct tps6524x), GFP_KERNEL);
 	if (!hw) {
 		dev_err(dev, "cannot allocate regulator private data\n");
 		return -ENOMEM;
@@ -648,8 +636,11 @@
 		if (info->flags & FIXED_VOLTAGE)
 			hw->desc[i].n_voltages = 1;
 
-		hw->rdev[i] = regulator_register(&hw->desc[i], dev,
-						 init_data, hw, NULL);
+		config.dev = dev;
+		config.init_data = init_data;
+		config.driver_data = hw;
+
+		hw->rdev[i] = regulator_register(&hw->desc[i], &config);
 		if (IS_ERR(hw->rdev[i])) {
 			ret = PTR_ERR(hw->rdev[i]);
 			hw->rdev[i] = NULL;
@@ -673,17 +664,7 @@
 	},
 };
 
-static int __init pmic_driver_init(void)
-{
-	return spi_register_driver(&pmic_driver);
-}
-module_init(pmic_driver_init);
-
-static void __exit pmic_driver_exit(void)
-{
-	spi_unregister_driver(&pmic_driver);
-}
-module_exit(pmic_driver_exit);
+module_spi_driver(pmic_driver);
 
 MODULE_DESCRIPTION("TPS6524X PMIC Driver");
 MODULE_AUTHOR("Cyril Chemparathy");
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index cfc1f16..c0a2145 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -75,8 +75,7 @@
 	return rdev_get_dev(rdev)->parent->parent;
 }
 
-static int tps6586x_ldo_list_voltage(struct regulator_dev *rdev,
-				     unsigned selector)
+static int tps6586x_list_voltage(struct regulator_dev *rdev, unsigned selector)
 {
 	struct tps6586x_regulator *info = rdev_get_drvdata(rdev);
 	int rid = rdev_get_id(rdev);
@@ -89,47 +88,34 @@
 }
 
 
-static int __tps6586x_ldo_set_voltage(struct device *parent,
-				      struct tps6586x_regulator *ri,
-				      int min_uV, int max_uV,
-				      unsigned *selector)
-{
-	int val, uV;
-	uint8_t mask;
-
-	for (val = 0; val < ri->desc.n_voltages; val++) {
-		uV = ri->voltages[val] * 1000;
-
-		/* LDO0 has minimal voltage 1.2 rather than 1.25 */
-		if (ri->desc.id == TPS6586X_ID_LDO_0 && val == 0)
-			uV -= 50 * 1000;
-
-		/* use the first in-range value */
-		if (min_uV <= uV && uV <= max_uV) {
-
-			*selector = val;
-
-			val <<= ri->volt_shift;
-			mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
-
-			return tps6586x_update(parent, ri->volt_reg, val, mask);
-		}
-	}
-
-	return -EINVAL;
-}
-
-static int tps6586x_ldo_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV, unsigned *selector)
+static int tps6586x_set_voltage_sel(struct regulator_dev *rdev,
+				    unsigned selector)
 {
 	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
 	struct device *parent = to_tps6586x_dev(rdev);
+	int ret, val, rid = rdev_get_id(rdev);
+	uint8_t mask;
 
-	return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
-					  selector);
+	val = selector << ri->volt_shift;
+	mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
+
+	ret = tps6586x_update(parent, ri->volt_reg, val, mask);
+	if (ret)
+		return ret;
+
+	/* Update go bit for DVM regulators */
+	switch (rid) {
+	case TPS6586X_ID_LDO_2:
+	case TPS6586X_ID_LDO_4:
+	case TPS6586X_ID_SM_0:
+	case TPS6586X_ID_SM_1:
+		ret = tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
+		break;
+	}
+	return ret;
 }
 
-static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
+static int tps6586x_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
 	struct device *parent = to_tps6586x_dev(rdev);
@@ -146,22 +132,7 @@
 	if (val >= ri->desc.n_voltages)
 		BUG();
 
-	return ri->voltages[val] * 1000;
-}
-
-static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV, unsigned *selector)
-{
-	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
-	struct device *parent = to_tps6586x_dev(rdev);
-	int ret;
-
-	ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
-					 selector);
-	if (ret)
-		return ret;
-
-	return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
+	return val;
 }
 
 static int tps6586x_regulator_enable(struct regulator_dev *rdev)
@@ -196,20 +167,10 @@
 	return !!(reg_val & (1 << ri->enable_bit[0]));
 }
 
-static struct regulator_ops tps6586x_regulator_ldo_ops = {
-	.list_voltage = tps6586x_ldo_list_voltage,
-	.get_voltage = tps6586x_ldo_get_voltage,
-	.set_voltage = tps6586x_ldo_set_voltage,
-
-	.is_enabled = tps6586x_regulator_is_enabled,
-	.enable = tps6586x_regulator_enable,
-	.disable = tps6586x_regulator_disable,
-};
-
-static struct regulator_ops tps6586x_regulator_dvm_ops = {
-	.list_voltage = tps6586x_ldo_list_voltage,
-	.get_voltage = tps6586x_ldo_get_voltage,
-	.set_voltage = tps6586x_dvm_set_voltage,
+static struct regulator_ops tps6586x_regulator_ops = {
+	.list_voltage = tps6586x_list_voltage,
+	.get_voltage_sel = tps6586x_get_voltage_sel,
+	.set_voltage_sel = tps6586x_set_voltage_sel,
 
 	.is_enabled = tps6586x_regulator_is_enabled,
 	.enable = tps6586x_regulator_enable,
@@ -241,11 +202,11 @@
 	1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
 };
 
-#define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits,	\
+#define TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits,		\
 			   ereg0, ebit0, ereg1, ebit1)			\
 	.desc	= {							\
 		.name	= "REG-" #_id,					\
-		.ops	= &tps6586x_regulator_##_ops,			\
+		.ops	= &tps6586x_regulator_ops,			\
 		.type	= REGULATOR_VOLTAGE,				\
 		.id	= TPS6586X_ID_##_id,				\
 		.n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages),	\
@@ -267,14 +228,14 @@
 #define TPS6586X_LDO(_id, vdata, vreg, shift, nbits,			\
 		     ereg0, ebit0, ereg1, ebit1)			\
 {									\
-	TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits,	\
+	TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits,		\
 			   ereg0, ebit0, ereg1, ebit1)			\
 }
 
 #define TPS6586X_DVM(_id, vdata, vreg, shift, nbits,			\
 		     ereg0, ebit0, ereg1, ebit1, goreg, gobit)		\
 {									\
-	TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits,	\
+	TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits,		\
 			   ereg0, ebit0, ereg1, ebit1)			\
 	TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit)			\
 }
@@ -384,6 +345,7 @@
 static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
 {
 	struct tps6586x_regulator *ri = NULL;
+	struct regulator_config config = { };
 	struct regulator_dev *rdev;
 	int id = pdev->id;
 	int err;
@@ -400,8 +362,12 @@
 	if (err)
 		return err;
 
-	rdev = regulator_register(&ri->desc, &pdev->dev,
-				  pdev->dev.platform_data, ri, NULL);
+	config.dev = &pdev->dev;
+	config.of_node = pdev->dev.of_node;
+	config.init_data = pdev->dev.platform_data;
+	config.driver_data = ri;
+
+	rdev = regulator_register(&ri->desc, &config);
 	if (IS_ERR(rdev)) {
 		dev_err(&pdev->dev, "failed to register regulator %s\n",
 				ri->desc.name);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 4a37c2b6..8dc3d93 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -20,10 +20,10 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
-#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/mfd/tps65910.h>
+#include <linux/regulator/of_regulator.h>
 
 #define TPS65910_SUPPLY_STATE_ENABLED	0x1
 #define EXT_SLEEP_CONTROL (TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1 |	\
@@ -94,11 +94,11 @@
 
 static struct tps_info tps65910_regs[] = {
 	{
-		.name = "VRTC",
+		.name = "vrtc",
 		.enable_time_us = 2200,
 	},
 	{
-		.name = "VIO",
+		.name = "vio",
 		.min_uV = 1500000,
 		.max_uV = 3300000,
 		.n_voltages = ARRAY_SIZE(VIO_VSEL_table),
@@ -106,19 +106,19 @@
 		.enable_time_us = 350,
 	},
 	{
-		.name = "VDD1",
+		.name = "vdd1",
 		.min_uV = 600000,
 		.max_uV = 4500000,
 		.enable_time_us = 350,
 	},
 	{
-		.name = "VDD2",
+		.name = "vdd2",
 		.min_uV = 600000,
 		.max_uV = 4500000,
 		.enable_time_us = 350,
 	},
 	{
-		.name = "VDD3",
+		.name = "vdd3",
 		.min_uV = 5000000,
 		.max_uV = 5000000,
 		.n_voltages = ARRAY_SIZE(VDD3_VSEL_table),
@@ -126,7 +126,7 @@
 		.enable_time_us = 200,
 	},
 	{
-		.name = "VDIG1",
+		.name = "vdig1",
 		.min_uV = 1200000,
 		.max_uV = 2700000,
 		.n_voltages = ARRAY_SIZE(VDIG1_VSEL_table),
@@ -134,7 +134,7 @@
 		.enable_time_us = 100,
 	},
 	{
-		.name = "VDIG2",
+		.name = "vdig2",
 		.min_uV = 1000000,
 		.max_uV = 1800000,
 		.n_voltages = ARRAY_SIZE(VDIG2_VSEL_table),
@@ -142,7 +142,7 @@
 		.enable_time_us = 100,
 	},
 	{
-		.name = "VPLL",
+		.name = "vpll",
 		.min_uV = 1000000,
 		.max_uV = 2500000,
 		.n_voltages = ARRAY_SIZE(VPLL_VSEL_table),
@@ -150,7 +150,7 @@
 		.enable_time_us = 100,
 	},
 	{
-		.name = "VDAC",
+		.name = "vdac",
 		.min_uV = 1800000,
 		.max_uV = 2850000,
 		.n_voltages = ARRAY_SIZE(VDAC_VSEL_table),
@@ -158,7 +158,7 @@
 		.enable_time_us = 100,
 	},
 	{
-		.name = "VAUX1",
+		.name = "vaux1",
 		.min_uV = 1800000,
 		.max_uV = 2850000,
 		.n_voltages = ARRAY_SIZE(VAUX1_VSEL_table),
@@ -166,7 +166,7 @@
 		.enable_time_us = 100,
 	},
 	{
-		.name = "VAUX2",
+		.name = "vaux2",
 		.min_uV = 1800000,
 		.max_uV = 3300000,
 		.n_voltages = ARRAY_SIZE(VAUX2_VSEL_table),
@@ -174,7 +174,7 @@
 		.enable_time_us = 100,
 	},
 	{
-		.name = "VAUX33",
+		.name = "vaux33",
 		.min_uV = 1800000,
 		.max_uV = 3300000,
 		.n_voltages = ARRAY_SIZE(VAUX33_VSEL_table),
@@ -182,7 +182,7 @@
 		.enable_time_us = 100,
 	},
 	{
-		.name = "VMMC",
+		.name = "vmmc",
 		.min_uV = 1800000,
 		.max_uV = 3300000,
 		.n_voltages = ARRAY_SIZE(VMMC_VSEL_table),
@@ -193,11 +193,11 @@
 
 static struct tps_info tps65911_regs[] = {
 	{
-		.name = "VRTC",
+		.name = "vrtc",
 		.enable_time_us = 2200,
 	},
 	{
-		.name = "VIO",
+		.name = "vio",
 		.min_uV = 1500000,
 		.max_uV = 3300000,
 		.n_voltages = ARRAY_SIZE(VIO_VSEL_table),
@@ -205,77 +205,77 @@
 		.enable_time_us = 350,
 	},
 	{
-		.name = "VDD1",
+		.name = "vdd1",
 		.min_uV = 600000,
 		.max_uV = 4500000,
 		.n_voltages = 73,
 		.enable_time_us = 350,
 	},
 	{
-		.name = "VDD2",
+		.name = "vdd2",
 		.min_uV = 600000,
 		.max_uV = 4500000,
 		.n_voltages = 73,
 		.enable_time_us = 350,
 	},
 	{
-		.name = "VDDCTRL",
+		.name = "vddctrl",
 		.min_uV = 600000,
 		.max_uV = 1400000,
 		.n_voltages = 65,
 		.enable_time_us = 900,
 	},
 	{
-		.name = "LDO1",
+		.name = "ldo1",
 		.min_uV = 1000000,
 		.max_uV = 3300000,
 		.n_voltages = 47,
 		.enable_time_us = 420,
 	},
 	{
-		.name = "LDO2",
+		.name = "ldo2",
 		.min_uV = 1000000,
 		.max_uV = 3300000,
 		.n_voltages = 47,
 		.enable_time_us = 420,
 	},
 	{
-		.name = "LDO3",
+		.name = "ldo3",
 		.min_uV = 1000000,
 		.max_uV = 3300000,
 		.n_voltages = 24,
 		.enable_time_us = 230,
 	},
 	{
-		.name = "LDO4",
+		.name = "ldo4",
 		.min_uV = 1000000,
 		.max_uV = 3300000,
 		.n_voltages = 47,
 		.enable_time_us = 230,
 	},
 	{
-		.name = "LDO5",
+		.name = "ldo5",
 		.min_uV = 1000000,
 		.max_uV = 3300000,
 		.n_voltages = 24,
 		.enable_time_us = 230,
 	},
 	{
-		.name = "LDO6",
+		.name = "ldo6",
 		.min_uV = 1000000,
 		.max_uV = 3300000,
 		.n_voltages = 24,
 		.enable_time_us = 230,
 	},
 	{
-		.name = "LDO7",
+		.name = "ldo7",
 		.min_uV = 1000000,
 		.max_uV = 3300000,
 		.n_voltages = 24,
 		.enable_time_us = 230,
 	},
 	{
-		.name = "LDO8",
+		.name = "ldo8",
 		.min_uV = 1000000,
 		.max_uV = 3300000,
 		.n_voltages = 24,
@@ -467,48 +467,6 @@
 	}
 }
 
-static int tps65910_is_enabled(struct regulator_dev *dev)
-{
-	struct tps65910_reg *pmic = rdev_get_drvdata(dev);
-	int reg, value, id = rdev_get_id(dev);
-
-	reg = pmic->get_ctrl_reg(id);
-	if (reg < 0)
-		return reg;
-
-	value = tps65910_reg_read(pmic, reg);
-	if (value < 0)
-		return value;
-
-	return value & TPS65910_SUPPLY_STATE_ENABLED;
-}
-
-static int tps65910_enable(struct regulator_dev *dev)
-{
-	struct tps65910_reg *pmic = rdev_get_drvdata(dev);
-	struct tps65910 *mfd = pmic->mfd;
-	int reg, id = rdev_get_id(dev);
-
-	reg = pmic->get_ctrl_reg(id);
-	if (reg < 0)
-		return reg;
-
-	return tps65910_set_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
-}
-
-static int tps65910_disable(struct regulator_dev *dev)
-{
-	struct tps65910_reg *pmic = rdev_get_drvdata(dev);
-	struct tps65910 *mfd = pmic->mfd;
-	int reg, id = rdev_get_id(dev);
-
-	reg = pmic->get_ctrl_reg(id);
-	if (reg < 0)
-		return reg;
-
-	return tps65910_clear_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
-}
-
 static int tps65910_enable_time(struct regulator_dev *dev)
 {
 	struct tps65910_reg *pmic = rdev_get_drvdata(dev);
@@ -621,10 +579,10 @@
 	return -EINVAL;
 }
 
-static int tps65910_get_voltage(struct regulator_dev *dev)
+static int tps65910_get_voltage_sel(struct regulator_dev *dev)
 {
 	struct tps65910_reg *pmic = rdev_get_drvdata(dev);
-	int reg, value, id = rdev_get_id(dev), voltage = 0;
+	int reg, value, id = rdev_get_id(dev);
 
 	reg = pmic->get_ctrl_reg(id);
 	if (reg < 0)
@@ -651,9 +609,7 @@
 		return -EINVAL;
 	}
 
-	voltage = pmic->info[id]->voltage_table[value] * 1000;
-
-	return voltage;
+	return value;
 }
 
 static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
@@ -661,10 +617,10 @@
 	return 5 * 1000 * 1000;
 }
 
-static int tps65911_get_voltage(struct regulator_dev *dev)
+static int tps65911_get_voltage_sel(struct regulator_dev *dev)
 {
 	struct tps65910_reg *pmic = rdev_get_drvdata(dev);
-	int step_mv, id = rdev_get_id(dev);
+	int id = rdev_get_id(dev);
 	u8 value, reg;
 
 	reg = pmic->get_ctrl_reg(id);
@@ -677,13 +633,6 @@
 	case TPS65911_REG_LDO4:
 		value &= LDO1_SEL_MASK;
 		value >>= LDO_SEL_SHIFT;
-		/* The first 5 values of the selector correspond to 1V */
-		if (value < 5)
-			value = 0;
-		else
-			value -= 4;
-
-		step_mv = 50;
 		break;
 	case TPS65911_REG_LDO3:
 	case TPS65911_REG_LDO5:
@@ -692,23 +641,16 @@
 	case TPS65911_REG_LDO8:
 		value &= LDO3_SEL_MASK;
 		value >>= LDO_SEL_SHIFT;
-		/* The first 3 values of the selector correspond to 1V */
-		if (value < 3)
-			value = 0;
-		else
-			value -= 2;
-
-		step_mv = 100;
 		break;
 	case TPS65910_REG_VIO:
 		value &= LDO_SEL_MASK;
 		value >>= LDO_SEL_SHIFT;
-		return pmic->info[id]->voltage_table[value] * 1000;
+		break;
 	default:
 		return -EINVAL;
 	}
 
-	return (LDO_MIN_VOLT + value * step_mv) * 1000;
+	return value;
 }
 
 static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
@@ -914,9 +856,9 @@
 
 /* Regulator ops (except VRTC) */
 static struct regulator_ops tps65910_ops_dcdc = {
-	.is_enabled		= tps65910_is_enabled,
-	.enable			= tps65910_enable,
-	.disable		= tps65910_disable,
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
 	.enable_time		= tps65910_enable_time,
 	.set_mode		= tps65910_set_mode,
 	.get_mode		= tps65910_get_mode,
@@ -927,9 +869,9 @@
 };
 
 static struct regulator_ops tps65910_ops_vdd3 = {
-	.is_enabled		= tps65910_is_enabled,
-	.enable			= tps65910_enable,
-	.disable		= tps65910_disable,
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
 	.enable_time		= tps65910_enable_time,
 	.set_mode		= tps65910_set_mode,
 	.get_mode		= tps65910_get_mode,
@@ -938,25 +880,25 @@
 };
 
 static struct regulator_ops tps65910_ops = {
-	.is_enabled		= tps65910_is_enabled,
-	.enable			= tps65910_enable,
-	.disable		= tps65910_disable,
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
 	.enable_time		= tps65910_enable_time,
 	.set_mode		= tps65910_set_mode,
 	.get_mode		= tps65910_get_mode,
-	.get_voltage		= tps65910_get_voltage,
+	.get_voltage_sel	= tps65910_get_voltage_sel,
 	.set_voltage_sel	= tps65910_set_voltage_sel,
 	.list_voltage		= tps65910_list_voltage,
 };
 
 static struct regulator_ops tps65911_ops = {
-	.is_enabled		= tps65910_is_enabled,
-	.enable			= tps65910_enable,
-	.disable		= tps65910_disable,
+	.is_enabled		= regulator_is_enabled_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
 	.enable_time		= tps65910_enable_time,
 	.set_mode		= tps65910_set_mode,
 	.get_mode		= tps65910_get_mode,
-	.get_voltage		= tps65911_get_voltage,
+	.get_voltage_sel	= tps65911_get_voltage_sel,
 	.set_voltage_sel	= tps65911_set_voltage_sel,
 	.list_voltage		= tps65911_list_voltage,
 };
@@ -1094,23 +1036,141 @@
 	return ret;
 }
 
+#ifdef CONFIG_OF
+
+static struct of_regulator_match tps65910_matches[] = {
+	{ .name = "vrtc",	.driver_data = (void *) &tps65910_regs[0] },
+	{ .name = "vio",	.driver_data = (void *) &tps65910_regs[1] },
+	{ .name = "vdd1",	.driver_data = (void *) &tps65910_regs[2] },
+	{ .name = "vdd2",	.driver_data = (void *) &tps65910_regs[3] },
+	{ .name = "vdd3",	.driver_data = (void *) &tps65910_regs[4] },
+	{ .name = "vdig1",	.driver_data = (void *) &tps65910_regs[5] },
+	{ .name = "vdig2",	.driver_data = (void *) &tps65910_regs[6] },
+	{ .name = "vpll",	.driver_data = (void *) &tps65910_regs[7] },
+	{ .name = "vdac",	.driver_data = (void *) &tps65910_regs[8] },
+	{ .name = "vaux1",	.driver_data = (void *) &tps65910_regs[9] },
+	{ .name = "vaux2",	.driver_data = (void *) &tps65910_regs[10] },
+	{ .name = "vaux33",	.driver_data = (void *) &tps65910_regs[11] },
+	{ .name = "vmmc",	.driver_data = (void *) &tps65910_regs[12] },
+};
+
+static struct of_regulator_match tps65911_matches[] = {
+	{ .name = "vrtc",	.driver_data = (void *) &tps65911_regs[0] },
+	{ .name = "vio",	.driver_data = (void *) &tps65911_regs[1] },
+	{ .name = "vdd1",	.driver_data = (void *) &tps65911_regs[2] },
+	{ .name = "vdd2",	.driver_data = (void *) &tps65911_regs[3] },
+	{ .name = "vddctrl",	.driver_data = (void *) &tps65911_regs[4] },
+	{ .name = "ldo1",	.driver_data = (void *) &tps65911_regs[5] },
+	{ .name = "ldo2",	.driver_data = (void *) &tps65911_regs[6] },
+	{ .name = "ldo3",	.driver_data = (void *) &tps65911_regs[7] },
+	{ .name = "ldo4",	.driver_data = (void *) &tps65911_regs[8] },
+	{ .name = "ldo5",	.driver_data = (void *) &tps65911_regs[9] },
+	{ .name = "ldo6",	.driver_data = (void *) &tps65911_regs[10] },
+	{ .name = "ldo7",	.driver_data = (void *) &tps65911_regs[11] },
+	{ .name = "ldo8",	.driver_data = (void *) &tps65911_regs[12] },
+};
+
+static struct tps65910_board *tps65910_parse_dt_reg_data(
+		struct platform_device *pdev,
+		struct of_regulator_match **tps65910_reg_matches)
+{
+	struct tps65910_board *pmic_plat_data;
+	struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+	struct device_node *np = pdev->dev.parent->of_node;
+	struct device_node *regulators;
+	struct of_regulator_match *matches;
+	unsigned int prop;
+	int idx = 0, ret, count;
+
+	pmic_plat_data = devm_kzalloc(&pdev->dev, sizeof(*pmic_plat_data),
+					GFP_KERNEL);
+
+	if (!pmic_plat_data) {
+		dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+		return NULL;
+	}
+
+	regulators = of_find_node_by_name(np, "regulators");
+	if (!regulators) {
+		dev_err(&pdev->dev, "regulator node not found\n");
+		return NULL;
+	}
+
+	switch (tps65910_chip_id(tps65910)) {
+	case TPS65910:
+		count = ARRAY_SIZE(tps65910_matches);
+		matches = tps65910_matches;
+		break;
+	case TPS65911:
+		count = ARRAY_SIZE(tps65911_matches);
+		matches = tps65911_matches;
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid tps chip version\n");
+		return NULL;
+	}
+
+	ret = of_regulator_match(pdev->dev.parent, regulators, matches, count);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
+			ret);
+		return NULL;
+	}
+
+	*tps65910_reg_matches = matches;
+
+	for (idx = 0; idx < count; idx++) {
+		if (!matches[idx].init_data || !matches[idx].of_node)
+			continue;
+
+		pmic_plat_data->tps65910_pmic_init_data[idx] =
+							matches[idx].init_data;
+
+		ret = of_property_read_u32(matches[idx].of_node,
+				"ti,regulator-ext-sleep-control", &prop);
+		if (!ret)
+			pmic_plat_data->regulator_ext_sleep_control[idx] = prop;
+	}
+
+	return pmic_plat_data;
+}
+#else
+static inline struct tps65910_board *tps65910_parse_dt_reg_data(
+			struct platform_device *pdev,
+			struct of_regulator_match **tps65910_reg_matches)
+{
+	*tps65910_reg_matches = NULL;
+	return 0;
+}
+#endif
+
 static __devinit int tps65910_probe(struct platform_device *pdev)
 {
 	struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+	struct regulator_config config = { };
 	struct tps_info *info;
 	struct regulator_init_data *reg_data;
 	struct regulator_dev *rdev;
 	struct tps65910_reg *pmic;
 	struct tps65910_board *pmic_plat_data;
+	struct of_regulator_match *tps65910_reg_matches = NULL;
 	int i, err;
 
 	pmic_plat_data = dev_get_platdata(tps65910->dev);
-	if (!pmic_plat_data)
-		return -EINVAL;
+	if (!pmic_plat_data && tps65910->dev->of_node)
+		pmic_plat_data = tps65910_parse_dt_reg_data(pdev,
+						&tps65910_reg_matches);
 
-	pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
-	if (!pmic)
+	if (!pmic_plat_data) {
+		dev_err(&pdev->dev, "Platform data not found\n");
+		return -EINVAL;
+	}
+
+	pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
+	if (!pmic) {
+		dev_err(&pdev->dev, "Memory allocation failed for pmic\n");
 		return -ENOMEM;
+	}
 
 	mutex_init(&pmic->mutex);
 	pmic->mfd = tps65910;
@@ -1134,30 +1194,29 @@
 		info = tps65911_regs;
 		break;
 	default:
-		pr_err("Invalid tps chip version\n");
-		kfree(pmic);
+		dev_err(&pdev->dev, "Invalid tps chip version\n");
 		return -ENODEV;
 	}
 
-	pmic->desc = kcalloc(pmic->num_regulators,
+	pmic->desc = devm_kzalloc(&pdev->dev, pmic->num_regulators *
 			sizeof(struct regulator_desc), GFP_KERNEL);
 	if (!pmic->desc) {
-		err = -ENOMEM;
-		goto err_free_pmic;
+		dev_err(&pdev->dev, "Memory alloc fails for desc\n");
+		return -ENOMEM;
 	}
 
-	pmic->info = kcalloc(pmic->num_regulators,
+	pmic->info = devm_kzalloc(&pdev->dev, pmic->num_regulators *
 			sizeof(struct tps_info *), GFP_KERNEL);
 	if (!pmic->info) {
-		err = -ENOMEM;
-		goto err_free_desc;
+		dev_err(&pdev->dev, "Memory alloc fails for info\n");
+		return -ENOMEM;
 	}
 
-	pmic->rdev = kcalloc(pmic->num_regulators,
+	pmic->rdev = devm_kzalloc(&pdev->dev, pmic->num_regulators *
 			sizeof(struct regulator_dev *), GFP_KERNEL);
 	if (!pmic->rdev) {
-		err = -ENOMEM;
-		goto err_free_info;
+		dev_err(&pdev->dev, "Memory alloc fails for rdev\n");
+		return -ENOMEM;
 	}
 
 	for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
@@ -1205,9 +1264,18 @@
 
 		pmic->desc[i].type = REGULATOR_VOLTAGE;
 		pmic->desc[i].owner = THIS_MODULE;
+		pmic->desc[i].enable_reg = pmic->get_ctrl_reg(i);
+		pmic->desc[i].enable_mask = TPS65910_SUPPLY_STATE_ENABLED;
 
-		rdev = regulator_register(&pmic->desc[i],
-				tps65910->dev, reg_data, pmic, NULL);
+		config.dev = tps65910->dev;
+		config.init_data = reg_data;
+		config.driver_data = pmic;
+		config.regmap = tps65910->regmap;
+
+		if (tps65910_reg_matches)
+			config.of_node = tps65910_reg_matches[i].of_node;
+
+		rdev = regulator_register(&pmic->desc[i], &config);
 		if (IS_ERR(rdev)) {
 			dev_err(tps65910->dev,
 				"failed to register %s regulator\n",
@@ -1224,13 +1292,6 @@
 err_unregister_regulator:
 	while (--i >= 0)
 		regulator_unregister(pmic->rdev[i]);
-	kfree(pmic->rdev);
-err_free_info:
-	kfree(pmic->info);
-err_free_desc:
-	kfree(pmic->desc);
-err_free_pmic:
-	kfree(pmic);
 	return err;
 }
 
@@ -1242,10 +1303,6 @@
 	for (i = 0; i < pmic->num_regulators; i++)
 		regulator_unregister(pmic->rdev[i]);
 
-	kfree(pmic->rdev);
-	kfree(pmic->info);
-	kfree(pmic->desc);
-	kfree(pmic);
 	return 0;
 }
 
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index b36799b..18b2a1d 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -20,7 +20,6 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
-#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/mfd/tps65912.h>
@@ -372,12 +371,14 @@
 	return mode;
 }
 
-static int tps65912_list_voltage_dcdc(struct regulator_dev *dev,
-					unsigned selector)
+static int tps65912_list_voltage(struct regulator_dev *dev, unsigned selector)
 {
 	struct tps65912_reg *pmic = rdev_get_drvdata(dev);
 	int range, voltage = 0, id = rdev_get_id(dev);
 
+	if (id >= TPS65912_REG_LDO1 && id <= TPS65912_REG_LDO10)
+		return tps65912_vsel_to_uv_ldo(selector);
+
 	if (id > TPS65912_REG_DCDC4)
 		return -EINVAL;
 
@@ -404,7 +405,7 @@
 	return voltage;
 }
 
-static int tps65912_get_voltage_dcdc(struct regulator_dev *dev)
+static int tps65912_get_voltage_sel(struct regulator_dev *dev)
 {
 	struct tps65912_reg *pmic = rdev_get_drvdata(dev);
 	struct tps65912 *mfd = pmic->mfd;
@@ -418,7 +419,7 @@
 	vsel = tps65912_reg_read(mfd, reg);
 	vsel &= 0x3F;
 
-	return tps65912_list_voltage_dcdc(dev, vsel);
+	return vsel;
 }
 
 static int tps65912_set_voltage_sel(struct regulator_dev *dev,
@@ -436,32 +437,6 @@
 	return tps65912_reg_write(mfd, reg, selector | value);
 }
 
-static int tps65912_get_voltage_ldo(struct regulator_dev *dev)
-{
-	struct tps65912_reg *pmic = rdev_get_drvdata(dev);
-	struct tps65912 *mfd = pmic->mfd;
-	int id = rdev_get_id(dev);
-	int vsel = 0;
-	u8 reg;
-
-	reg = tps65912_get_sel_register(pmic, id);
-	vsel = tps65912_reg_read(mfd, reg);
-	vsel &= 0x3F;
-
-	return tps65912_vsel_to_uv_ldo(vsel);
-}
-
-static int tps65912_list_voltage_ldo(struct regulator_dev *dev,
-					unsigned selector)
-{
-	int ldo = rdev_get_id(dev);
-
-	if (ldo < TPS65912_REG_LDO1 || ldo > TPS65912_REG_LDO10)
-		return -EINVAL;
-
-	return tps65912_vsel_to_uv_ldo(selector);
-}
-
 /* Operations permitted on DCDCx */
 static struct regulator_ops tps65912_ops_dcdc = {
 	.is_enabled = tps65912_reg_is_enabled,
@@ -469,9 +444,9 @@
 	.disable = tps65912_reg_disable,
 	.set_mode = tps65912_set_mode,
 	.get_mode = tps65912_get_mode,
-	.get_voltage = tps65912_get_voltage_dcdc,
+	.get_voltage_sel = tps65912_get_voltage_sel,
 	.set_voltage_sel = tps65912_set_voltage_sel,
-	.list_voltage = tps65912_list_voltage_dcdc,
+	.list_voltage = tps65912_list_voltage,
 };
 
 /* Operations permitted on LDOx */
@@ -479,14 +454,15 @@
 	.is_enabled = tps65912_reg_is_enabled,
 	.enable = tps65912_reg_enable,
 	.disable = tps65912_reg_disable,
-	.get_voltage = tps65912_get_voltage_ldo,
+	.get_voltage_sel = tps65912_get_voltage_sel,
 	.set_voltage_sel = tps65912_set_voltage_sel,
-	.list_voltage = tps65912_list_voltage_ldo,
+	.list_voltage = tps65912_list_voltage,
 };
 
 static __devinit int tps65912_probe(struct platform_device *pdev)
 {
 	struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
+	struct regulator_config config = { };
 	struct tps_info *info;
 	struct regulator_init_data *reg_data;
 	struct regulator_dev *rdev;
@@ -500,7 +476,7 @@
 
 	reg_data = pmic_plat_data->tps65912_pmic_init_data;
 
-	pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
+	pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
 	if (!pmic)
 		return -ENOMEM;
 
@@ -524,8 +500,12 @@
 		pmic->desc[i].type = REGULATOR_VOLTAGE;
 		pmic->desc[i].owner = THIS_MODULE;
 		range = tps65912_get_range(pmic, i);
-		rdev = regulator_register(&pmic->desc[i],
-					tps65912->dev, reg_data, pmic, NULL);
+
+		config.dev = tps65912->dev;
+		config.init_data = reg_data;
+		config.driver_data = pmic;
+
+		rdev = regulator_register(&pmic->desc[i], &config);
 		if (IS_ERR(rdev)) {
 			dev_err(tps65912->dev,
 				"failed to register %s regulator\n",
@@ -542,8 +522,6 @@
 err:
 	while (--i >= 0)
 		regulator_unregister(pmic->rdev[i]);
-
-	kfree(pmic);
 	return err;
 }
 
@@ -554,8 +532,6 @@
 
 	for (i = 0; i < TPS65912_NUM_REGULATOR; i++)
 		regulator_unregister(tps65912_reg->rdev[i]);
-
-	kfree(tps65912_reg);
 	return 0;
 }
 
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 9cdfc38..c739071 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
-#include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -175,15 +174,14 @@
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			grp = 0, val;
 
-	if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
-		grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
-	if (grp < 0)
-		return grp;
-
-	if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+	if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS))) {
+		grp = twlreg_grp(rdev);
+		if (grp < 0)
+			return grp;
 		grp &= P1_GRP_6030;
-	else
+	} else {
 		grp = 1;
+	}
 
 	val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
 	val = TWL6030_CFG_STATE_APP(val);
@@ -197,7 +195,7 @@
 	int			grp;
 	int			ret;
 
-	grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+	grp = twlreg_grp(rdev);
 	if (grp < 0)
 		return grp;
 
@@ -205,8 +203,6 @@
 
 	ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
 
-	udelay(info->delay);
-
 	return ret;
 }
 
@@ -217,26 +213,37 @@
 	int			ret;
 
 	if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
-		grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+		grp = twlreg_grp(rdev);
 	if (grp < 0)
 		return grp;
 
 	ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
 			grp << TWL6030_CFG_STATE_GRP_SHIFT |
 			TWL6030_CFG_STATE_ON);
-
-	udelay(info->delay);
-
 	return ret;
 }
 
+static int twl4030reg_enable_time(struct regulator_dev *rdev)
+{
+	struct twlreg_info	*info = rdev_get_drvdata(rdev);
+
+	return info->delay;
+}
+
+static int twl6030reg_enable_time(struct regulator_dev *rdev)
+{
+	struct twlreg_info	*info = rdev_get_drvdata(rdev);
+
+	return info->delay;
+}
+
 static int twl4030reg_disable(struct regulator_dev *rdev)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			grp;
 	int			ret;
 
-	grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+	grp = twlreg_grp(rdev);
 	if (grp < 0)
 		return grp;
 
@@ -348,7 +355,7 @@
 	int val;
 
 	if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
-		grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+		grp = twlreg_grp(rdev);
 
 	if (grp < 0)
 		return grp;
@@ -388,14 +395,12 @@
  * VAUX3 at 3V is incorrectly listed in some TI manuals as unsupported.
  * TI are revising the twl5030/tps659x0 specs to support that 3.0V setting.
  */
-#ifdef CONFIG_TWL4030_ALLOW_UNSUPPORTED
-#define UNSUP_MASK	0x0000
-#else
 #define UNSUP_MASK	0x8000
-#endif
 
 #define UNSUP(x)	(UNSUP_MASK | (x))
-#define IS_UNSUP(x)	(UNSUP_MASK & (x))
+#define IS_UNSUP(info, x)			\
+	((UNSUP_MASK & (x)) &&			\
+	 !((info)->features & TWL4030_ALLOW_UNSUPPORTED))
 #define LDO_MV(x)	(~UNSUP_MASK & (x))
 
 
@@ -469,35 +474,16 @@
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			mV = info->table[index];
 
-	return IS_UNSUP(mV) ? 0 : (LDO_MV(mV) * 1000);
+	return IS_UNSUP(info, mV) ? 0 : (LDO_MV(mV) * 1000);
 }
 
 static int
-twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
-		       unsigned *selector)
+twl4030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
-	int			vsel;
 
-	for (vsel = 0; vsel < info->table_len; vsel++) {
-		int mV = info->table[vsel];
-		int uV;
-
-		if (IS_UNSUP(mV))
-			continue;
-		uV = LDO_MV(mV) * 1000;
-
-		/* REVISIT for VAUX2, first match may not be best/lowest */
-
-		/* use the first in-range value */
-		if (min_uV <= uV && uV <= max_uV) {
-			*selector = vsel;
-			return twlreg_write(info, TWL_MODULE_PM_RECEIVER,
-							VREG_VOLTAGE, vsel);
-		}
-	}
-
-	return -EDOM;
+	return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE,
+			    selector);
 }
 
 static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
@@ -516,12 +502,13 @@
 static struct regulator_ops twl4030ldo_ops = {
 	.list_voltage	= twl4030ldo_list_voltage,
 
-	.set_voltage	= twl4030ldo_set_voltage,
+	.set_voltage_sel = twl4030ldo_set_voltage_sel,
 	.get_voltage	= twl4030ldo_get_voltage,
 
 	.enable		= twl4030reg_enable,
 	.disable	= twl4030reg_disable,
 	.is_enabled	= twl4030reg_is_enabled,
+	.enable_time	= twl4030reg_enable_time,
 
 	.set_mode	= twl4030reg_set_mode,
 
@@ -642,6 +629,7 @@
 	.enable		= twl6030reg_enable,
 	.disable	= twl6030reg_disable,
 	.is_enabled	= twl6030reg_is_enabled,
+	.enable_time	= twl6030reg_enable_time,
 
 	.set_mode	= twl6030reg_set_mode,
 
@@ -675,6 +663,7 @@
 	.enable		= twl4030reg_enable,
 	.disable	= twl4030reg_disable,
 	.is_enabled	= twl4030reg_is_enabled,
+	.enable_time	= twl4030reg_enable_time,
 
 	.set_mode	= twl4030reg_set_mode,
 
@@ -689,6 +678,7 @@
 	.enable		= twl6030reg_enable,
 	.disable	= twl6030reg_disable,
 	.is_enabled	= twl6030reg_is_enabled,
+	.enable_time	= twl6030reg_enable_time,
 
 	.set_mode	= twl6030reg_set_mode,
 
@@ -699,6 +689,7 @@
 	.enable		= twl6030reg_enable,
 	.disable	= twl6030reg_disable,
 	.is_enabled	= twl6030reg_is_enabled,
+	.enable_time	= twl6030reg_enable_time,
 	.get_status	= twl6030reg_get_status,
 };
 
@@ -806,10 +797,7 @@
 			vsel = 0;
 		else if ((min_uV >= 600000) && (min_uV <= 1300000)) {
 			int calc_uV;
-			vsel = (min_uV - 600000) / 125;
-			if (vsel % 100)
-				vsel += 100;
-			vsel /= 100;
+			vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
 			vsel++;
 			calc_uV = twl6030smps_list_voltage(rdev, vsel);
 			if (calc_uV > max_uV)
@@ -836,10 +824,7 @@
 			vsel = 0;
 		else if ((min_uV >= 700000) && (min_uV <= 1420000)) {
 			int calc_uV;
-			vsel = (min_uV - 700000) / 125;
-			if (vsel % 100)
-				vsel += 100;
-			vsel /= 100;
+			vsel = DIV_ROUND_UP(min_uV - 700000, 12500);
 			vsel++;
 			calc_uV = twl6030smps_list_voltage(rdev, vsel);
 			if (calc_uV > max_uV)
@@ -862,24 +847,18 @@
 			return -EINVAL;
 		break;
 	case SMPS_EXTENDED_EN:
-		if (min_uV == 0)
+		if (min_uV == 0) {
 			vsel = 0;
-		else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
-			vsel = (min_uV - 1852000) / 386;
-			if (vsel % 100)
-				vsel += 100;
-			vsel /= 100;
+		} else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
+			vsel = DIV_ROUND_UP(min_uV - 1852000, 38600);
 			vsel++;
 		}
 		break;
 	case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
-		if (min_uV == 0)
+		if (min_uV == 0) {
 			vsel = 0;
-		else if ((min_uV >= 2161000) && (max_uV <= 4321000)) {
-			vsel = (min_uV - 2161000) / 386;
-			if (vsel % 100)
-				vsel += 100;
-			vsel /= 100;
+		} else if ((min_uV >= 2161000) && (max_uV <= 4321000)) {
+			vsel = DIV_ROUND_UP(min_uV - 2161000, 38600);
 			vsel++;
 		}
 		break;
@@ -907,6 +886,7 @@
 	.enable			= twl6030reg_enable,
 	.disable		= twl6030reg_disable,
 	.is_enabled		= twl6030reg_is_enabled,
+	.enable_time		= twl6030reg_enable_time,
 
 	.set_mode		= twl6030reg_set_mode,
 
@@ -1194,6 +1174,7 @@
 	struct regulator_dev		*rdev;
 	struct twl_regulator_driver_data	*drvdata;
 	const struct of_device_id	*match;
+	struct regulator_config		config = { };
 
 	match = of_match_device(twl_of_match, &pdev->dev);
 	if (match) {
@@ -1207,10 +1188,12 @@
 		initdata = pdev->dev.platform_data;
 		for (i = 0, info = NULL; i < ARRAY_SIZE(twl_of_match); i++) {
 			info = twl_of_match[i].data;
-			if (!info || info->desc.id != id)
-				continue;
-			break;
+			if (info && info->desc.id == id)
+				break;
 		}
+		if (i == ARRAY_SIZE(twl_of_match))
+			return -ENODEV;
+
 		drvdata = initdata->driver_data;
 		if (!drvdata)
 			return -EINVAL;
@@ -1273,8 +1256,12 @@
 		break;
 	}
 
-	rdev = regulator_register(&info->desc, &pdev->dev, initdata, info,
-							pdev->dev.of_node);
+	config.dev = &pdev->dev;
+	config.init_data = initdata;
+	config.driver_data = info;
+	config.of_node = pdev->dev.of_node;
+
+	rdev = regulator_register(&info->desc, &config);
 	if (IS_ERR(rdev)) {
 		dev_err(&pdev->dev, "can't register %s, %ld\n",
 				info->desc.name, PTR_ERR(rdev));
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 518667e..a7c8deb 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -115,7 +115,9 @@
 	if (!pdata)
 		return -EINVAL;
 
-	drvdata = kzalloc(sizeof(struct userspace_consumer_data), GFP_KERNEL);
+	drvdata = devm_kzalloc(&pdev->dev,
+			       sizeof(struct userspace_consumer_data),
+			       GFP_KERNEL);
 	if (drvdata == NULL)
 		return -ENOMEM;
 
@@ -125,16 +127,16 @@
 
 	mutex_init(&drvdata->lock);
 
-	ret = regulator_bulk_get(&pdev->dev, drvdata->num_supplies,
-				 drvdata->supplies);
+	ret = devm_regulator_bulk_get(&pdev->dev, drvdata->num_supplies,
+				      drvdata->supplies);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to get supplies: %d\n", ret);
-		goto err_alloc_supplies;
+		return ret;
 	}
 
 	ret = sysfs_create_group(&pdev->dev.kobj, &attr_group);
 	if (ret != 0)
-		goto err_create_attrs;
+		return ret;
 
 	if (pdata->init_on) {
 		ret = regulator_bulk_enable(drvdata->num_supplies,
@@ -154,11 +156,6 @@
 err_enable:
 	sysfs_remove_group(&pdev->dev.kobj, &attr_group);
 
-err_create_attrs:
-	regulator_bulk_free(drvdata->num_supplies, drvdata->supplies);
-
-err_alloc_supplies:
-	kfree(drvdata);
 	return ret;
 }
 
@@ -171,9 +168,6 @@
 	if (data->enabled)
 		regulator_bulk_disable(data->num_supplies, data->supplies);
 
-	regulator_bulk_free(data->num_supplies, data->supplies);
-	kfree(data);
-
 	return 0;
 }
 
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index ee0b161..c038e74 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -121,7 +121,7 @@
 	struct virtual_consumer_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) != 0)
+	if (kstrtol(buf, 10, &val) != 0)
 		return count;
 
 	mutex_lock(&data->lock);
@@ -147,7 +147,7 @@
 	struct virtual_consumer_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) != 0)
+	if (kstrtol(buf, 10, &val) != 0)
 		return count;
 
 	mutex_lock(&data->lock);
@@ -173,7 +173,7 @@
 	struct virtual_consumer_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) != 0)
+	if (kstrtol(buf, 10, &val) != 0)
 		return count;
 
 	mutex_lock(&data->lock);
@@ -199,7 +199,7 @@
 	struct virtual_consumer_data *data = dev_get_drvdata(dev);
 	long val;
 
-	if (strict_strtol(buf, 10, &val) != 0)
+	if (kstrtol(buf, 10, &val) != 0)
 		return count;
 
 	mutex_lock(&data->lock);
@@ -291,18 +291,19 @@
 	struct virtual_consumer_data *drvdata;
 	int ret;
 
-	drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL);
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(struct virtual_consumer_data),
+			       GFP_KERNEL);
 	if (drvdata == NULL)
 		return -ENOMEM;
 
 	mutex_init(&drvdata->lock);
 
-	drvdata->regulator = regulator_get(&pdev->dev, reg_id);
+	drvdata->regulator = devm_regulator_get(&pdev->dev, reg_id);
 	if (IS_ERR(drvdata->regulator)) {
 		ret = PTR_ERR(drvdata->regulator);
 		dev_err(&pdev->dev, "Failed to obtain supply '%s': %d\n",
 			reg_id, ret);
-		goto err;
+		return ret;
 	}
 
 	ret = sysfs_create_group(&pdev->dev.kobj,
@@ -310,7 +311,7 @@
 	if (ret != 0) {
 		dev_err(&pdev->dev,
 			"Failed to create attribute group: %d\n", ret);
-		goto err_regulator;
+		return ret;
 	}
 
 	drvdata->mode = regulator_get_mode(drvdata->regulator);
@@ -318,12 +319,6 @@
 	platform_set_drvdata(pdev, drvdata);
 
 	return 0;
-
-err_regulator:
-	regulator_put(drvdata->regulator);
-err:
-	kfree(drvdata);
-	return ret;
 }
 
 static int __devexit regulator_virtual_remove(struct platform_device *pdev)
@@ -334,9 +329,6 @@
 
 	if (drvdata->enabled)
 		regulator_disable(drvdata->regulator);
-	regulator_put(drvdata->regulator);
-
-	kfree(drvdata);
 
 	platform_set_drvdata(pdev, NULL);
 
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index ff810e7..a885911 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -35,7 +35,7 @@
 #define WM831X_DCDC_MODE_IDLE    2
 #define WM831X_DCDC_MODE_STANDBY 3
 
-#define WM831X_DCDC_MAX_NAME 6
+#define WM831X_DCDC_MAX_NAME 9
 
 /* Register offsets in control block */
 #define WM831X_DCDC_CONTROL_1     0
@@ -50,6 +50,7 @@
 
 struct wm831x_dcdc {
 	char name[WM831X_DCDC_MAX_NAME];
+	char supply_name[WM831X_DCDC_MAX_NAME];
 	struct regulator_desc desc;
 	int base;
 	struct wm831x *wm831x;
@@ -60,41 +61,6 @@
 	int dvs_vsel;
 };
 
-static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev)
-{
-	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = dcdc->wm831x;
-	int mask = 1 << rdev_get_id(rdev);
-	int reg;
-
-	reg = wm831x_reg_read(wm831x, WM831X_DCDC_ENABLE);
-	if (reg < 0)
-		return reg;
-
-	if (reg & mask)
-		return 1;
-	else
-		return 0;
-}
-
-static int wm831x_dcdc_enable(struct regulator_dev *rdev)
-{
-	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = dcdc->wm831x;
-	int mask = 1 << rdev_get_id(rdev);
-
-	return wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, mask, mask);
-}
-
-static int wm831x_dcdc_disable(struct regulator_dev *rdev)
-{
-	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = dcdc->wm831x;
-	int mask = 1 << rdev_get_id(rdev);
-
-	return wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, mask, 0);
-}
-
 static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
 
 {
@@ -414,9 +380,9 @@
 	.set_current_limit = wm831x_buckv_set_current_limit,
 	.get_current_limit = wm831x_buckv_get_current_limit,
 
-	.is_enabled = wm831x_dcdc_is_enabled,
-	.enable = wm831x_dcdc_enable,
-	.disable = wm831x_dcdc_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 	.get_status = wm831x_dcdc_get_status,
 	.get_mode = wm831x_dcdc_get_mode,
 	.set_mode = wm831x_dcdc_set_mode,
@@ -437,23 +403,17 @@
 	if (!pdata || !pdata->dvs_gpio)
 		return;
 
-	ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
-	if (ret < 0) {
-		dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
-			dcdc->name, ret);
-		return;
-	}
-
 	/* gpiolib won't let us read the GPIO status so pick the higher
 	 * of the two existing voltages so we take it as platform data.
 	 */
 	dcdc->dvs_gpio_state = pdata->dvs_init_state;
 
-	ret = gpio_direction_output(pdata->dvs_gpio, dcdc->dvs_gpio_state);
+	ret = gpio_request_one(pdata->dvs_gpio,
+			       dcdc->dvs_gpio_state ? GPIOF_INIT_HIGH : 0,
+			       "DCDC DVS");
 	if (ret < 0) {
-		dev_err(wm831x->dev, "Failed to enable %s DVS GPIO: %d\n",
+		dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
 			dcdc->name, ret);
-		gpio_free(pdata->dvs_gpio);
 		return;
 	}
 
@@ -498,6 +458,7 @@
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+	struct regulator_config config = { };
 	int id;
 	struct wm831x_dcdc *dcdc;
 	struct resource *res;
@@ -511,9 +472,6 @@
 
 	dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
 
-	if (pdata == NULL || pdata->dcdc[id] == NULL)
-		return -ENODEV;
-
 	dcdc = devm_kzalloc(&pdev->dev,  sizeof(struct wm831x_dcdc),
 			    GFP_KERNEL);
 	if (dcdc == NULL) {
@@ -533,11 +491,18 @@
 
 	snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1);
 	dcdc->desc.name = dcdc->name;
+
+	snprintf(dcdc->supply_name, sizeof(dcdc->supply_name),
+		 "DC%dVDD", id + 1);
+	dcdc->desc.supply_name = dcdc->supply_name;
+
 	dcdc->desc.id = id;
 	dcdc->desc.type = REGULATOR_VOLTAGE;
 	dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1;
 	dcdc->desc.ops = &wm831x_buckv_ops;
 	dcdc->desc.owner = THIS_MODULE;
+	dcdc->desc.enable_reg = WM831X_DCDC_ENABLE;
+	dcdc->desc.enable_mask = 1 << id;
 
 	ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
 	if (ret < 0) {
@@ -553,11 +518,16 @@
 	}
 	dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK;
 
-	if (pdata->dcdc[id])
+	if (pdata && pdata->dcdc[id])
 		wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
 
-	dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
-					     pdata->dcdc[id], dcdc, NULL);
+	config.dev = pdev->dev.parent;
+	if (pdata)
+		config.init_data = pdata->dcdc[id];
+	config.driver_data = dcdc;
+	config.regmap = wm831x->regmap;
+
+	dcdc->regulator = regulator_register(&dcdc->desc, &config);
 	if (IS_ERR(dcdc->regulator)) {
 		ret = PTR_ERR(dcdc->regulator);
 		dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -675,29 +645,15 @@
 	return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_buckp_get_voltage_sel(struct regulator_dev *rdev)
-{
-	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = dcdc->wm831x;
-	u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
-	int val;
-
-	val = wm831x_reg_read(wm831x, reg);
-	if (val < 0)
-		return val;
-
-	return val & WM831X_DC3_ON_VSEL_MASK;
-}
-
 static struct regulator_ops wm831x_buckp_ops = {
 	.set_voltage = wm831x_buckp_set_voltage,
-	.get_voltage_sel = wm831x_buckp_get_voltage_sel,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
 	.list_voltage = wm831x_buckp_list_voltage,
 	.set_suspend_voltage = wm831x_buckp_set_suspend_voltage,
 
-	.is_enabled = wm831x_dcdc_is_enabled,
-	.enable = wm831x_dcdc_enable,
-	.disable = wm831x_dcdc_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 	.get_status = wm831x_dcdc_get_status,
 	.get_mode = wm831x_dcdc_get_mode,
 	.set_mode = wm831x_dcdc_set_mode,
@@ -708,6 +664,7 @@
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+	struct regulator_config config = { };
 	int id;
 	struct wm831x_dcdc *dcdc;
 	struct resource *res;
@@ -721,9 +678,6 @@
 
 	dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
 
-	if (pdata == NULL || pdata->dcdc[id] == NULL)
-		return -ENODEV;
-
 	dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
 			    GFP_KERNEL);
 	if (dcdc == NULL) {
@@ -743,14 +697,28 @@
 
 	snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1);
 	dcdc->desc.name = dcdc->name;
+
+	snprintf(dcdc->supply_name, sizeof(dcdc->supply_name),
+		 "DC%dVDD", id + 1);
+	dcdc->desc.supply_name = dcdc->supply_name;
+
 	dcdc->desc.id = id;
 	dcdc->desc.type = REGULATOR_VOLTAGE;
 	dcdc->desc.n_voltages = WM831X_BUCKP_MAX_SELECTOR + 1;
 	dcdc->desc.ops = &wm831x_buckp_ops;
 	dcdc->desc.owner = THIS_MODULE;
+	dcdc->desc.vsel_reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
+	dcdc->desc.vsel_mask = WM831X_DC3_ON_VSEL_MASK;
+	dcdc->desc.enable_reg = WM831X_DCDC_ENABLE;
+	dcdc->desc.enable_mask = 1 << id;
 
-	dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
-					     pdata->dcdc[id], dcdc, NULL);
+	config.dev = pdev->dev.parent;
+	if (pdata)
+		config.init_data = pdata->dcdc[id];
+	config.driver_data = dcdc;
+	config.regmap = wm831x->regmap;
+
+	dcdc->regulator = regulator_register(&dcdc->desc, &config);
 	if (IS_ERR(dcdc->regulator)) {
 		ret = PTR_ERR(dcdc->regulator);
 		dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -832,15 +800,16 @@
 static struct regulator_ops wm831x_boostp_ops = {
 	.get_status = wm831x_boostp_get_status,
 
-	.is_enabled = wm831x_dcdc_is_enabled,
-	.enable = wm831x_dcdc_enable,
-	.disable = wm831x_dcdc_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 };
 
 static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+	struct regulator_config config = { };
 	int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
 	struct wm831x_dcdc *dcdc;
 	struct resource *res;
@@ -851,7 +820,7 @@
 	if (pdata == NULL || pdata->dcdc[id] == NULL)
 		return -ENODEV;
 
-	dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+	dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL);
 	if (dcdc == NULL) {
 		dev_err(&pdev->dev, "Unable to allocate private data\n");
 		return -ENOMEM;
@@ -873,9 +842,16 @@
 	dcdc->desc.type = REGULATOR_VOLTAGE;
 	dcdc->desc.ops = &wm831x_boostp_ops;
 	dcdc->desc.owner = THIS_MODULE;
+	dcdc->desc.enable_reg = WM831X_DCDC_ENABLE;
+	dcdc->desc.enable_mask = 1 << id;
 
-	dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
-					     pdata->dcdc[id], dcdc, NULL);
+	config.dev = pdev->dev.parent;
+	if (pdata)
+		config.init_data = pdata->dcdc[id];
+	config.driver_data = dcdc;
+	config.regmap = wm831x->regmap;
+
+	dcdc->regulator = regulator_register(&dcdc->desc, &config);
 	if (IS_ERR(dcdc->regulator)) {
 		ret = PTR_ERR(dcdc->regulator);
 		dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -900,7 +876,6 @@
 err_regulator:
 	regulator_unregister(dcdc->regulator);
 err:
-	kfree(dcdc);
 	return ret;
 }
 
@@ -912,7 +887,6 @@
 
 	free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
 	regulator_unregister(dcdc->regulator);
-	kfree(dcdc);
 
 	return 0;
 }
@@ -936,9 +910,9 @@
 #define WM831X_EPE_BASE 6
 
 static struct regulator_ops wm831x_epe_ops = {
-	.is_enabled = wm831x_dcdc_is_enabled,
-	.enable = wm831x_dcdc_enable,
-	.disable = wm831x_dcdc_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 	.get_status = wm831x_dcdc_get_status,
 };
 
@@ -946,16 +920,14 @@
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+	struct regulator_config config = { };
 	int id = pdev->id % ARRAY_SIZE(pdata->epe);
 	struct wm831x_dcdc *dcdc;
 	int ret;
 
 	dev_dbg(&pdev->dev, "Probing EPE%d\n", id + 1);
 
-	if (pdata == NULL || pdata->epe[id] == NULL)
-		return -ENODEV;
-
-	dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+	dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL);
 	if (dcdc == NULL) {
 		dev_err(&pdev->dev, "Unable to allocate private data\n");
 		return -ENOMEM;
@@ -972,9 +944,16 @@
 	dcdc->desc.ops = &wm831x_epe_ops;
 	dcdc->desc.type = REGULATOR_VOLTAGE;
 	dcdc->desc.owner = THIS_MODULE;
+	dcdc->desc.enable_reg = WM831X_DCDC_ENABLE;
+	dcdc->desc.enable_mask = 1 << dcdc->desc.id;
 
-	dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
-					     pdata->epe[id], dcdc, NULL);
+	config.dev = pdev->dev.parent;
+	if (pdata)
+		config.init_data = pdata->epe[id];
+	config.driver_data = dcdc;
+	config.regmap = wm831x->regmap;
+
+	dcdc->regulator = regulator_register(&dcdc->desc, &config);
 	if (IS_ERR(dcdc->regulator)) {
 		ret = PTR_ERR(dcdc->regulator);
 		dev_err(wm831x->dev, "Failed to register EPE%d: %d\n",
@@ -987,7 +966,6 @@
 	return 0;
 
 err:
-	kfree(dcdc);
 	return ret;
 }
 
@@ -996,9 +974,7 @@
 	struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
 
 	platform_set_drvdata(pdev, NULL);
-
 	regulator_unregister(dcdc->regulator);
-	kfree(dcdc);
 
 	return 0;
 }
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index b414e09..b50ab77 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -154,6 +154,7 @@
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
 	struct wm831x_isink *isink;
 	int id = pdev->id % ARRAY_SIZE(pdata->isink);
+	struct regulator_config config = { };
 	struct resource *res;
 	int ret, irq;
 
@@ -189,8 +190,11 @@
 	isink->desc.type = REGULATOR_CURRENT;
 	isink->desc.owner = THIS_MODULE;
 
-	isink->regulator = regulator_register(&isink->desc, &pdev->dev,
-					     pdata->isink[id], isink, NULL);
+	config.dev = pdev->dev.parent;
+	config.init_data = pdata->isink[id];
+	config.driver_data = isink;
+
+	isink->regulator = regulator_register(&isink->desc, &config);
 	if (IS_ERR(isink->regulator)) {
 		ret = PTR_ERR(isink->regulator);
 		dev_err(wm831x->dev, "Failed to register ISINK%d: %d\n",
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 641e9f6..aa1f8b3 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -25,7 +25,7 @@
 #include <linux/mfd/wm831x/regulator.h>
 #include <linux/mfd/wm831x/pdata.h>
 
-#define WM831X_LDO_MAX_NAME 6
+#define WM831X_LDO_MAX_NAME 9
 
 #define WM831X_LDO_CONTROL       0
 #define WM831X_LDO_ON_CONTROL    1
@@ -36,6 +36,7 @@
 
 struct wm831x_ldo {
 	char name[WM831X_LDO_MAX_NAME];
+	char supply_name[WM831X_LDO_MAX_NAME];
 	struct regulator_desc desc;
 	int base;
 	struct wm831x *wm831x;
@@ -46,41 +47,6 @@
  * Shared
  */
 
-static int wm831x_ldo_is_enabled(struct regulator_dev *rdev)
-{
-	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = ldo->wm831x;
-	int mask = 1 << rdev_get_id(rdev);
-	int reg;
-
-	reg = wm831x_reg_read(wm831x, WM831X_LDO_ENABLE);
-	if (reg < 0)
-		return reg;
-
-	if (reg & mask)
-		return 1;
-	else
-		return 0;
-}
-
-static int wm831x_ldo_enable(struct regulator_dev *rdev)
-{
-	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = ldo->wm831x;
-	int mask = 1 << rdev_get_id(rdev);
-
-	return wm831x_set_bits(wm831x, WM831X_LDO_ENABLE, mask, mask);
-}
-
-static int wm831x_ldo_disable(struct regulator_dev *rdev)
-{
-	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = ldo->wm831x;
-	int mask = 1 << rdev_get_id(rdev);
-
-	return wm831x_set_bits(wm831x, WM831X_LDO_ENABLE, mask, 0);
-}
-
 static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
 {
 	struct wm831x_ldo *ldo = data;
@@ -105,7 +71,7 @@
 	/* 0.9-1.6V in 50mV steps */
 	if (selector <= WM831X_GP_LDO_SELECTOR_LOW)
 		return 900000 + (selector * 50000);
-	/* 1.7-3.3V in 50mV steps */
+	/* 1.7-3.3V in 100mV steps */
 	if (selector <= WM831X_GP_LDO_MAX_SELECTOR)
 		return 1600000 + ((selector - WM831X_GP_LDO_SELECTOR_LOW)
 				  * 100000);
@@ -160,22 +126,6 @@
 	return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_gp_ldo_get_voltage_sel(struct regulator_dev *rdev)
-{
-	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = ldo->wm831x;
-	int reg = ldo->base + WM831X_LDO_ON_CONTROL;
-	int ret;
-
-	ret = wm831x_reg_read(wm831x, reg);
-	if (ret < 0)
-		return ret;
-
-	ret &= WM831X_LDO1_ON_VSEL_MASK;
-
-	return ret;
-}
-
 static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
@@ -293,7 +243,7 @@
 
 static struct regulator_ops wm831x_gp_ldo_ops = {
 	.list_voltage = wm831x_gp_ldo_list_voltage,
-	.get_voltage_sel = wm831x_gp_ldo_get_voltage_sel,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
 	.set_voltage = wm831x_gp_ldo_set_voltage,
 	.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
 	.get_mode = wm831x_gp_ldo_get_mode,
@@ -301,15 +251,16 @@
 	.get_status = wm831x_gp_ldo_get_status,
 	.get_optimum_mode = wm831x_gp_ldo_get_optimum_mode,
 
-	.is_enabled = wm831x_ldo_is_enabled,
-	.enable = wm831x_ldo_enable,
-	.disable = wm831x_ldo_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 };
 
 static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+	struct regulator_config config = { };
 	int id;
 	struct wm831x_ldo *ldo;
 	struct resource *res;
@@ -323,9 +274,6 @@
 
 	dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
 
-	if (pdata == NULL || pdata->ldo[id] == NULL)
-		return -ENODEV;
-
 	ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
 	if (ldo == NULL) {
 		dev_err(&pdev->dev, "Unable to allocate private data\n");
@@ -344,14 +292,28 @@
 
 	snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1);
 	ldo->desc.name = ldo->name;
+
+	snprintf(ldo->supply_name, sizeof(ldo->supply_name),
+		 "LDO%dVDD", id + 1);
+	ldo->desc.supply_name = ldo->supply_name;
+
 	ldo->desc.id = id;
 	ldo->desc.type = REGULATOR_VOLTAGE;
 	ldo->desc.n_voltages = WM831X_GP_LDO_MAX_SELECTOR + 1;
 	ldo->desc.ops = &wm831x_gp_ldo_ops;
 	ldo->desc.owner = THIS_MODULE;
+	ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL;
+	ldo->desc.vsel_mask = WM831X_LDO1_ON_VSEL_MASK;
+	ldo->desc.enable_reg = WM831X_LDO_ENABLE;
+	ldo->desc.enable_mask = 1 << id;
 
-	ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
-					     pdata->ldo[id], ldo, NULL);
+	config.dev = pdev->dev.parent;
+	if (pdata)
+		config.init_data = pdata->ldo[id];
+	config.driver_data = ldo;
+	config.regmap = wm831x->regmap;
+
+	ldo->regulator = regulator_register(&ldo->desc, &config);
 	if (IS_ERR(ldo->regulator)) {
 		ret = PTR_ERR(ldo->regulator);
 		dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -414,7 +376,7 @@
 	/* 1-1.6V in 50mV steps */
 	if (selector <= WM831X_ALDO_SELECTOR_LOW)
 		return 1000000 + (selector * 50000);
-	/* 1.7-3.5V in 50mV steps */
+	/* 1.7-3.5V in 100mV steps */
 	if (selector <= WM831X_ALDO_MAX_SELECTOR)
 		return 1600000 + ((selector - WM831X_ALDO_SELECTOR_LOW)
 				  * 100000);
@@ -468,22 +430,6 @@
 	return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_aldo_get_voltage_sel(struct regulator_dev *rdev)
-{
-	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = ldo->wm831x;
-	int reg = ldo->base + WM831X_LDO_ON_CONTROL;
-	int ret;
-
-	ret = wm831x_reg_read(wm831x, reg);
-	if (ret < 0)
-		return ret;
-
-	ret &= WM831X_LDO7_ON_VSEL_MASK;
-
-	return ret;
-}
-
 static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
@@ -559,22 +505,23 @@
 
 static struct regulator_ops wm831x_aldo_ops = {
 	.list_voltage = wm831x_aldo_list_voltage,
-	.get_voltage_sel = wm831x_aldo_get_voltage_sel,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
 	.set_voltage = wm831x_aldo_set_voltage,
 	.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
 	.get_mode = wm831x_aldo_get_mode,
 	.set_mode = wm831x_aldo_set_mode,
 	.get_status = wm831x_aldo_get_status,
 
-	.is_enabled = wm831x_ldo_is_enabled,
-	.enable = wm831x_ldo_enable,
-	.disable = wm831x_ldo_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 };
 
 static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+	struct regulator_config config = { };
 	int id;
 	struct wm831x_ldo *ldo;
 	struct resource *res;
@@ -588,9 +535,6 @@
 
 	dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
 
-	if (pdata == NULL || pdata->ldo[id] == NULL)
-		return -ENODEV;
-
 	ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
 	if (ldo == NULL) {
 		dev_err(&pdev->dev, "Unable to allocate private data\n");
@@ -609,14 +553,28 @@
 
 	snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1);
 	ldo->desc.name = ldo->name;
+
+	snprintf(ldo->supply_name, sizeof(ldo->supply_name),
+		 "LDO%dVDD", id + 1);
+	ldo->desc.supply_name = ldo->supply_name;
+
 	ldo->desc.id = id;
 	ldo->desc.type = REGULATOR_VOLTAGE;
 	ldo->desc.n_voltages = WM831X_ALDO_MAX_SELECTOR + 1;
 	ldo->desc.ops = &wm831x_aldo_ops;
 	ldo->desc.owner = THIS_MODULE;
+	ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL;
+	ldo->desc.vsel_mask = WM831X_LDO7_ON_VSEL_MASK;
+	ldo->desc.enable_reg = WM831X_LDO_ENABLE;
+	ldo->desc.enable_mask = 1 << id;
 
-	ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
-					     pdata->ldo[id], ldo, NULL);
+	config.dev = pdev->dev.parent;
+	if (pdata)
+		config.init_data = pdata->ldo[id];
+	config.driver_data = ldo;
+	config.regmap = wm831x->regmap;
+
+	ldo->regulator = regulator_register(&ldo->desc, &config);
 	if (IS_ERR(ldo->regulator)) {
 		ret = PTR_ERR(ldo->regulator);
 		dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -668,15 +626,6 @@
 
 #define WM831X_ALIVE_LDO_MAX_SELECTOR 0xf
 
-static int wm831x_alive_ldo_list_voltage(struct regulator_dev *rdev,
-				      unsigned int selector)
-{
-	/* 0.8-1.55V in 50mV steps */
-	if (selector <= WM831X_ALIVE_LDO_MAX_SELECTOR)
-		return 800000 + (selector * 50000);
-	return -EINVAL;
-}
-
 static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev,
 					    int reg,
 					    int min_uV, int max_uV,
@@ -688,7 +637,7 @@
 
 	vsel = (min_uV - 800000) / 50000;
 
-	ret = wm831x_alive_ldo_list_voltage(rdev, vsel);
+	ret = regulator_list_voltage_linear(rdev, vsel);
 	if (ret < 0)
 		return ret;
 	if (ret < min_uV || ret > max_uV)
@@ -720,22 +669,6 @@
 	return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_alive_ldo_get_voltage_sel(struct regulator_dev *rdev)
-{
-	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
-	struct wm831x *wm831x = ldo->wm831x;
-	int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
-	int ret;
-
-	ret = wm831x_reg_read(wm831x, reg);
-	if (ret < 0)
-		return ret;
-
-	ret &= WM831X_LDO11_ON_VSEL_MASK;
-
-	return ret;
-}
-
 static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
@@ -754,21 +687,22 @@
 }
 
 static struct regulator_ops wm831x_alive_ldo_ops = {
-	.list_voltage = wm831x_alive_ldo_list_voltage,
-	.get_voltage_sel = wm831x_alive_ldo_get_voltage_sel,
+	.list_voltage = regulator_list_voltage_linear,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
 	.set_voltage = wm831x_alive_ldo_set_voltage,
 	.set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage,
 	.get_status = wm831x_alive_ldo_get_status,
 
-	.is_enabled = wm831x_ldo_is_enabled,
-	.enable = wm831x_ldo_enable,
-	.disable = wm831x_ldo_disable,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
 };
 
 static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
 {
 	struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
 	struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+	struct regulator_config config = { };
 	int id;
 	struct wm831x_ldo *ldo;
 	struct resource *res;
@@ -783,9 +717,6 @@
 
 	dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
 
-	if (pdata == NULL || pdata->ldo[id] == NULL)
-		return -ENODEV;
-
 	ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
 	if (ldo == NULL) {
 		dev_err(&pdev->dev, "Unable to allocate private data\n");
@@ -804,14 +735,30 @@
 
 	snprintf(ldo->name, sizeof(ldo->name), "LDO%d", id + 1);
 	ldo->desc.name = ldo->name;
+
+	snprintf(ldo->supply_name, sizeof(ldo->supply_name),
+		 "LDO%dVDD", id + 1);
+	ldo->desc.supply_name = ldo->supply_name;
+
 	ldo->desc.id = id;
 	ldo->desc.type = REGULATOR_VOLTAGE;
 	ldo->desc.n_voltages = WM831X_ALIVE_LDO_MAX_SELECTOR + 1;
 	ldo->desc.ops = &wm831x_alive_ldo_ops;
 	ldo->desc.owner = THIS_MODULE;
+	ldo->desc.vsel_reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
+	ldo->desc.vsel_mask = WM831X_LDO11_ON_VSEL_MASK;
+	ldo->desc.enable_reg = WM831X_LDO_ENABLE;
+	ldo->desc.enable_mask = 1 << id;
+	ldo->desc.min_uV = 800000;
+	ldo->desc.uV_step = 50000;
 
-	ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
-					     pdata->ldo[id], ldo, NULL);
+	config.dev = pdev->dev.parent;
+	if (pdata)
+		config.init_data = pdata->ldo[id];
+	config.driver_data = ldo;
+	config.regmap = wm831x->regmap;
+
+	ldo->regulator = regulator_register(&ldo->desc, &config);
 	if (IS_ERR(ldo->regulator)) {
 		ret = PTR_ERR(ldo->regulator);
 		dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 05ecfb8..94e550d 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1269,7 +1269,7 @@
 	.enable_time = wm8350_isink_enable_time,
 };
 
-static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
+static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
 	{
 		.name = "DCDC1",
 		.id = WM8350_DCDC_1,
@@ -1398,6 +1398,7 @@
 static int wm8350_regulator_probe(struct platform_device *pdev)
 {
 	struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
+	struct regulator_config config = { };
 	struct regulator_dev *rdev;
 	int ret;
 	u16 val;
@@ -1425,10 +1426,12 @@
 		break;
 	}
 
+	config.dev = &pdev->dev;
+	config.init_data = pdev->dev.platform_data;
+	config.driver_data = dev_get_drvdata(&pdev->dev);
+
 	/* register regulator */
-	rdev = regulator_register(&wm8350_reg[pdev->id], &pdev->dev,
-				  pdev->dev.platform_data,
-				  dev_get_drvdata(&pdev->dev), NULL);
+	rdev = regulator_register(&wm8350_reg[pdev->id], &config);
 	if (IS_ERR(rdev)) {
 		dev_err(&pdev->dev, "failed to register %s\n",
 			wm8350_reg[pdev->id].name);
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 8477153..69a2b7c 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -19,31 +19,6 @@
 #include <linux/regulator/driver.h>
 #include <linux/mfd/wm8400-private.h>
 
-static int wm8400_ldo_is_enabled(struct regulator_dev *dev)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-	u16 val;
-
-	val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev));
-	return (val & WM8400_LDO1_ENA) != 0;
-}
-
-static int wm8400_ldo_enable(struct regulator_dev *dev)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-
-	return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev),
-			       WM8400_LDO1_ENA, WM8400_LDO1_ENA);
-}
-
-static int wm8400_ldo_disable(struct regulator_dev *dev)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-
-	return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev),
-			       WM8400_LDO1_ENA, 0);
-}
-
 static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
 				   unsigned selector)
 {
@@ -56,21 +31,9 @@
 		return 1600000 + ((selector - 14) * 100000);
 }
 
-static int wm8400_ldo_get_voltage_sel(struct regulator_dev *dev)
+static int wm8400_ldo_map_voltage(struct regulator_dev *dev,
+				  int min_uV, int max_uV)
 {
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-	u16 val;
-
-	val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev));
-	val &= WM8400_LDO1_VSEL_MASK;
-
-	return val;
-}
-
-static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV, unsigned *selector)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
 	u16 val;
 
 	if (min_uV < 900000 || min_uV > 3300000)
@@ -94,91 +57,18 @@
 		val += 0xf;
 	}
 
-	*selector = val;
-
-	return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev),
-			       WM8400_LDO1_VSEL_MASK, val);
-}
-
-static struct regulator_ops wm8400_ldo_ops = {
-	.is_enabled = wm8400_ldo_is_enabled,
-	.enable = wm8400_ldo_enable,
-	.disable = wm8400_ldo_disable,
-	.list_voltage = wm8400_ldo_list_voltage,
-	.get_voltage_sel = wm8400_ldo_get_voltage_sel,
-	.set_voltage = wm8400_ldo_set_voltage,
-};
-
-static int wm8400_dcdc_is_enabled(struct regulator_dev *dev)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-	int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
-	u16 val;
-
-	val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset);
-	return (val & WM8400_DC1_ENA) != 0;
-}
-
-static int wm8400_dcdc_enable(struct regulator_dev *dev)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-	int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
-
-	return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
-			       WM8400_DC1_ENA, WM8400_DC1_ENA);
-}
-
-static int wm8400_dcdc_disable(struct regulator_dev *dev)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-	int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
-
-	return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
-			       WM8400_DC1_ENA, 0);
-}
-
-static int wm8400_dcdc_list_voltage(struct regulator_dev *dev,
-				    unsigned selector)
-{
-	if (selector > WM8400_DC1_VSEL_MASK)
-		return -EINVAL;
-
-	return 850000 + (selector * 25000);
-}
-
-static int wm8400_dcdc_get_voltage_sel(struct regulator_dev *dev)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-	u16 val;
-	int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
-
-	val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset);
-	val &= WM8400_DC1_VSEL_MASK;
-
 	return val;
 }
 
-static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
-				   int min_uV, int max_uV, unsigned *selector)
-{
-	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
-	u16 val;
-	int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2;
-
-	if (min_uV < 850000)
-		return -EINVAL;
-
-	val = DIV_ROUND_UP(min_uV - 850000, 25000);
-
-	if (850000 + (25000 * val) > max_uV)
-		return -EINVAL;
-	BUG_ON(850000 + (25000 * val) < min_uV);
-
-	*selector = val;
-
-	return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
-			       WM8400_DC1_VSEL_MASK, val);
-}
+static struct regulator_ops wm8400_ldo_ops = {
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.list_voltage = wm8400_ldo_list_voltage,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.map_voltage = wm8400_ldo_map_voltage,
+};
 
 static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev)
 {
@@ -258,12 +148,12 @@
 }
 
 static struct regulator_ops wm8400_dcdc_ops = {
-	.is_enabled = wm8400_dcdc_is_enabled,
-	.enable = wm8400_dcdc_enable,
-	.disable = wm8400_dcdc_disable,
-	.list_voltage = wm8400_dcdc_list_voltage,
-	.get_voltage_sel = wm8400_dcdc_get_voltage_sel,
-	.set_voltage = wm8400_dcdc_set_voltage,
+	.is_enabled = regulator_is_enabled_regmap,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.list_voltage = regulator_list_voltage_linear,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
 	.get_mode = wm8400_dcdc_get_mode,
 	.set_mode = wm8400_dcdc_set_mode,
 	.get_optimum_mode = wm8400_dcdc_get_optimum_mode,
@@ -274,7 +164,11 @@
 		.name = "LDO1",
 		.id = WM8400_LDO1,
 		.ops = &wm8400_ldo_ops,
+		.enable_reg = WM8400_LDO1_CONTROL,
+		.enable_mask = WM8400_LDO1_ENA,
 		.n_voltages = WM8400_LDO1_VSEL_MASK + 1,
+		.vsel_reg = WM8400_LDO1_CONTROL,
+		.vsel_mask = WM8400_LDO1_VSEL_MASK,
 		.type = REGULATOR_VOLTAGE,
 		.owner = THIS_MODULE,
 	},
@@ -282,15 +176,23 @@
 		.name = "LDO2",
 		.id = WM8400_LDO2,
 		.ops = &wm8400_ldo_ops,
+		.enable_reg = WM8400_LDO2_CONTROL,
+		.enable_mask = WM8400_LDO2_ENA,
 		.n_voltages = WM8400_LDO2_VSEL_MASK + 1,
 		.type = REGULATOR_VOLTAGE,
+		.vsel_reg = WM8400_LDO2_CONTROL,
+		.vsel_mask = WM8400_LDO2_VSEL_MASK,
 		.owner = THIS_MODULE,
 	},
 	{
 		.name = "LDO3",
 		.id = WM8400_LDO3,
 		.ops = &wm8400_ldo_ops,
+		.enable_reg = WM8400_LDO3_CONTROL,
+		.enable_mask = WM8400_LDO3_ENA,
 		.n_voltages = WM8400_LDO3_VSEL_MASK + 1,
+		.vsel_reg = WM8400_LDO3_CONTROL,
+		.vsel_mask = WM8400_LDO3_VSEL_MASK,
 		.type = REGULATOR_VOLTAGE,
 		.owner = THIS_MODULE,
 	},
@@ -298,7 +200,11 @@
 		.name = "LDO4",
 		.id = WM8400_LDO4,
 		.ops = &wm8400_ldo_ops,
+		.enable_reg = WM8400_LDO4_CONTROL,
+		.enable_mask = WM8400_LDO4_ENA,
 		.n_voltages = WM8400_LDO4_VSEL_MASK + 1,
+		.vsel_reg = WM8400_LDO4_CONTROL,
+		.vsel_mask = WM8400_LDO4_VSEL_MASK,
 		.type = REGULATOR_VOLTAGE,
 		.owner = THIS_MODULE,
 	},
@@ -306,7 +212,13 @@
 		.name = "DCDC1",
 		.id = WM8400_DCDC1,
 		.ops = &wm8400_dcdc_ops,
+		.enable_reg = WM8400_DCDC1_CONTROL_1,
+		.enable_mask = WM8400_DC1_ENA_MASK,
 		.n_voltages = WM8400_DC1_VSEL_MASK + 1,
+		.vsel_reg = WM8400_DCDC1_CONTROL_1,
+		.vsel_mask = WM8400_DC1_VSEL_MASK,
+		.min_uV = 850000,
+		.uV_step = 25000,
 		.type = REGULATOR_VOLTAGE,
 		.owner = THIS_MODULE,
 	},
@@ -314,7 +226,13 @@
 		.name = "DCDC2",
 		.id = WM8400_DCDC2,
 		.ops = &wm8400_dcdc_ops,
+		.enable_reg = WM8400_DCDC2_CONTROL_1,
+		.enable_mask = WM8400_DC1_ENA_MASK,
 		.n_voltages = WM8400_DC2_VSEL_MASK + 1,
+		.vsel_reg = WM8400_DCDC2_CONTROL_1,
+		.vsel_mask = WM8400_DC2_VSEL_MASK,
+		.min_uV = 850000,
+		.uV_step = 25000,
 		.type = REGULATOR_VOLTAGE,
 		.owner = THIS_MODULE,
 	},
@@ -323,11 +241,15 @@
 static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
 {
 	struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]);
+	struct regulator_config config = { };
 	struct regulator_dev *rdev;
 
-	rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
-				  pdev->dev.platform_data, wm8400, NULL);
+	config.dev = &pdev->dev;
+	config.init_data = pdev->dev.platform_data;
+	config.driver_data = wm8400;
+	config.regmap = wm8400->regmap;
 
+	rdev = regulator_register(&regulators[pdev->id], &config);
 	if (IS_ERR(rdev))
 		return PTR_ERR(rdev);
 
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 75ed402..9a99431 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -86,36 +86,6 @@
 	return (selector * 100000) + 2400000;
 }
 
-static int wm8994_ldo1_get_voltage_sel(struct regulator_dev *rdev)
-{
-	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-	int val;
-
-	val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_1);
-	if (val < 0)
-		return val;
-
-	return (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
-}
-
-static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV, unsigned *s)
-{
-	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-	int selector, v;
-
-	selector = (min_uV - 2400000) / 100000;
-	v = wm8994_ldo1_list_voltage(rdev, selector);
-	if (v < 0 || v > max_uV)
-		return -EINVAL;
-
-	*s = selector;
-	selector <<= WM8994_LDO1_VSEL_SHIFT;
-
-	return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1,
-			       WM8994_LDO1_VSEL_MASK, selector);
-}
-
 static struct regulator_ops wm8994_ldo1_ops = {
 	.enable = wm8994_ldo_enable,
 	.disable = wm8994_ldo_disable,
@@ -123,8 +93,8 @@
 	.enable_time = wm8994_ldo_enable_time,
 
 	.list_voltage = wm8994_ldo1_list_voltage,
-	.get_voltage_sel = wm8994_ldo1_get_voltage_sel,
-	.set_voltage = wm8994_ldo1_set_voltage,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
 };
 
 static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
@@ -153,51 +123,6 @@
 	}
 }
 
-static int wm8994_ldo2_get_voltage_sel(struct regulator_dev *rdev)
-{
-	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-	int val;
-
-	val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_2);
-	if (val < 0)
-		return val;
-
-	return (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
-}
-
-static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV, unsigned *s)
-{
-	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-	int selector, v;
-
-	switch (ldo->wm8994->type) {
-	case WM8994:
-		selector = (min_uV - 900000) / 100000;
-		break;
-	case WM8958:
-		selector = (min_uV - 1000000) / 100000;
-		break;
-	case WM1811:
-		selector = (min_uV - 950000) / 100000;
-		if (selector == 0)
-			selector = 1;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	v = wm8994_ldo2_list_voltage(rdev, selector);
-	if (v < 0 || v > max_uV)
-		return -EINVAL;
-
-	*s = selector;
-	selector <<= WM8994_LDO2_VSEL_SHIFT;
-
-	return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2,
-			       WM8994_LDO2_VSEL_MASK, selector);
-}
-
 static struct regulator_ops wm8994_ldo2_ops = {
 	.enable = wm8994_ldo_enable,
 	.disable = wm8994_ldo_disable,
@@ -205,16 +130,18 @@
 	.enable_time = wm8994_ldo_enable_time,
 
 	.list_voltage = wm8994_ldo2_list_voltage,
-	.get_voltage_sel = wm8994_ldo2_get_voltage_sel,
-	.set_voltage = wm8994_ldo2_set_voltage,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
 };
 
-static struct regulator_desc wm8994_ldo_desc[] = {
+static const struct regulator_desc wm8994_ldo_desc[] = {
 	{
 		.name = "LDO1",
 		.id = 1,
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
+		.vsel_reg = WM8994_LDO_1,
+		.vsel_mask = WM8994_LDO1_VSEL_MASK,
 		.ops = &wm8994_ldo1_ops,
 		.owner = THIS_MODULE,
 	},
@@ -223,6 +150,8 @@
 		.id = 2,
 		.type = REGULATOR_VOLTAGE,
 		.n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
+		.vsel_reg = WM8994_LDO_2,
+		.vsel_mask = WM8994_LDO2_VSEL_MASK,
 		.ops = &wm8994_ldo2_ops,
 		.owner = THIS_MODULE,
 	},
@@ -233,14 +162,12 @@
 	struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
 	struct wm8994_pdata *pdata = wm8994->dev->platform_data;
 	int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+	struct regulator_config config = { };
 	struct wm8994_ldo *ldo;
 	int ret;
 
 	dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
 
-	if (!pdata)
-		return -ENODEV;
-
 	ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm8994_ldo), GFP_KERNEL);
 	if (ldo == NULL) {
 		dev_err(&pdev->dev, "Unable to allocate private data\n");
@@ -252,24 +179,22 @@
 	if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
 		ldo->enable = pdata->ldo[id].enable;
 
-		ret = gpio_request(ldo->enable, "WM8994 LDO enable");
+		ret = gpio_request_one(ldo->enable, 0, "WM8994 LDO enable");
 		if (ret < 0) {
 			dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n",
 				ret);
 			goto err;
 		}
-
-		ret = gpio_direction_output(ldo->enable, ldo->is_enabled);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "Failed to set GPIO up: %d\n",
-				ret);
-			goto err_gpio;
-		}
 	} else
 		ldo->is_enabled = true;
 
-	ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
-					     pdata->ldo[id].init_data, ldo, NULL);
+	config.dev = wm8994->dev;
+	config.driver_data = ldo;
+	config.regmap = wm8994->regmap;
+	if (pdata)
+		config.init_data = pdata->ldo[id].init_data;
+
+	ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &config);
 	if (IS_ERR(ldo->regulator)) {
 		ret = PTR_ERR(ldo->regulator);
 		dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index ee15c68..e756a0d 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -354,7 +354,7 @@
 {
 	struct rproc *rproc = rvdev->rproc;
 
-	for (i--; i > 0; i--) {
+	for (i--; i >= 0; i--) {
 		struct rproc_vring *rvring = &rvdev->vring[i];
 		int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
 
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cd188ab..c293d0c 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -902,6 +902,7 @@
 		}
 		ds1307->nvram->attr.name = "nvram";
 		ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
+		sysfs_bin_attr_init(ds1307->nvram);
 		ds1307->nvram->read = ds1307_nvram_read,
 		ds1307->nvram->write = ds1307_nvram_write,
 		ds1307->nvram->size = chip->nvram_size;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 42f5f82..029e421 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -360,12 +360,11 @@
 						&mpc5200_rtc_ops, THIS_MODULE);
 	}
 
-	rtc->rtc->uie_unsupported = 1;
-
 	if (IS_ERR(rtc->rtc)) {
 		err = PTR_ERR(rtc->rtc);
 		goto out_free_irq;
 	}
+	rtc->rtc->uie_unsupported = 1;
 
 	return 0;
 
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 684ef4b..f027c06 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -312,6 +312,7 @@
 	int ret;
 	struct pl031_local *ldata;
 	struct rtc_class_ops *ops = id->data;
+	unsigned long time;
 
 	ret = amba_request_regions(adev, NULL);
 	if (ret)
@@ -343,6 +344,23 @@
 		writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
 		       ldata->base + RTC_CR);
 
+	/*
+	 * On ST PL031 variants, the RTC reset value does not provide correct
+	 * weekday for 2000-01-01. Correct the erroneous sunday to saturday.
+	 */
+	if (ldata->hw_designer == AMBA_VENDOR_ST) {
+		if (readl(ldata->base + RTC_YDR) == 0x2000) {
+			time = readl(ldata->base + RTC_DR);
+			if ((time &
+			     (RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK))
+			    == 0x02120000) {
+				time = time | (0x7 << RTC_WDAY_SHIFT);
+				writel(0x2000, ldata->base + RTC_YLR);
+				writel(time, ldata->base + RTC_LR);
+			}
+		}
+	}
+
 	ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
 					THIS_MODULE);
 	if (IS_ERR(ldata->rtc)) {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 231a1d8..3650636 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -352,7 +352,17 @@
 
 static int sclp_assign_storage(u16 rn)
 {
-	return do_assign_storage(0x000d0001, rn);
+	unsigned long long start, address;
+	int rc;
+
+	rc = do_assign_storage(0x000d0001, rn);
+	if (rc)
+		goto out;
+	start = address = rn2addr(rn);
+	for (; address < start + rzm; address += PAGE_SIZE)
+		page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
+out:
+	return rc;
 }
 
 static int sclp_unassign_storage(u16 rn)
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 267b54e..bc6c7cf 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -154,12 +154,6 @@
 	struct tape_request *(*read_block)(struct tape_device *, size_t);
 	struct tape_request *(*write_block)(struct tape_device *, size_t);
 	void (*process_eov)(struct tape_device*);
-#ifdef CONFIG_S390_TAPE_BLOCK
-	/* Block device stuff. */
-	struct tape_request *(*bread)(struct tape_device *, struct request *);
-	void (*check_locate)(struct tape_device *, struct tape_request *);
-	void (*free_bread)(struct tape_request *);
-#endif
 	/* ioctl function for additional ioctls. */
 	int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
 	/* Array of tape commands with TAPE_NR_MTOPS entries */
@@ -182,26 +176,6 @@
 	int block_size;			/*   of size block_size. */
 };
 
-#ifdef CONFIG_S390_TAPE_BLOCK
-/* Block Frontend Data */
-struct tape_blk_data
-{
-	struct tape_device *	device;
-	/* Block device request queue. */
-	struct request_queue *	request_queue;
-	spinlock_t		request_queue_lock;
-
-	/* Task to move entries from block request to CCS request queue. */
-	struct work_struct	requeue_task;
-	atomic_t		requeue_scheduled;
-
-	/* Current position on the tape. */
-	long			block_position;
-	int			medium_changed;
-	struct gendisk *	disk;
-};
-#endif
-
 /* Tape Info */
 struct tape_device {
 	/* entry in tape_device_list */
@@ -248,10 +222,6 @@
 
 	/* Character device frontend data */
 	struct tape_char_data		char_data;
-#ifdef CONFIG_S390_TAPE_BLOCK
-	/* Block dev frontend data */
-	struct tape_blk_data		blk_data;
-#endif
 
 	/* Function to start or stop the next request later. */
 	struct delayed_work		tape_dnr;
@@ -313,19 +283,6 @@
 extern int  tapechar_setup_device(struct tape_device *);
 extern void tapechar_cleanup_device(struct tape_device *);
 
-/* Externals from tape_block.c */
-#ifdef CONFIG_S390_TAPE_BLOCK
-extern int tapeblock_init (void);
-extern void tapeblock_exit(void);
-extern int tapeblock_setup_device(struct tape_device *);
-extern void tapeblock_cleanup_device(struct tape_device *);
-#else
-static inline int tapeblock_init (void) {return 0;}
-static inline void tapeblock_exit (void) {;}
-static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
-static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
-#endif
-
 /* tape initialisation functions */
 #ifdef CONFIG_PROC_FS
 extern void tape_proc_init (void);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 934ef33..b28de80 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -323,20 +323,6 @@
 	inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
 	sense = irb->ecw;
 
-#ifdef CONFIG_S390_TAPE_BLOCK
-	if (request->op == TO_BLOCK) {
-		/*
-		 * Recovery for block device requests. Set the block_position
-		 * to something invalid and retry.
-		 */
-		device->blk_data.block_position = -1;
-		if (request->retries-- <= 0)
-			return tape_34xx_erp_failed(request, -EIO);
-		else
-			return tape_34xx_erp_retry(request);
-	}
-#endif
-
 	if (
 		sense[0] & SENSE_COMMAND_REJECT &&
 		sense[1] & SENSE_WRITE_PROTECT
@@ -1129,123 +1115,6 @@
 	return tape_do_io_free(device, request);
 }
 
-#ifdef CONFIG_S390_TAPE_BLOCK
-/*
- * Tape block read for 34xx.
- */
-static struct tape_request *
-tape_34xx_bread(struct tape_device *device, struct request *req)
-{
-	struct tape_request *request;
-	struct ccw1 *ccw;
-	int count = 0;
-	unsigned off;
-	char *dst;
-	struct bio_vec *bv;
-	struct req_iterator iter;
-	struct tape_34xx_block_id *	start_block;
-
-	DBF_EVENT(6, "xBREDid:");
-
-	/* Count the number of blocks for the request. */
-	rq_for_each_segment(bv, req, iter)
-		count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
-
-	/* Allocate the ccw request. */
-	request = tape_alloc_request(3+count+1, 8);
-	if (IS_ERR(request))
-		return request;
-
-	/* Setup ccws. */
-	request->op = TO_BLOCK;
-	start_block = (struct tape_34xx_block_id *) request->cpdata;
-	start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
-	DBF_EVENT(6, "start_block = %i\n", start_block->block);
-
-	ccw = request->cpaddr;
-	ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
-
-	/*
-	 * We always setup a nop after the mode set ccw. This slot is
-	 * used in tape_std_check_locate to insert a locate ccw if the
-	 * current tape position doesn't match the start block to be read.
-	 * The second nop will be filled with a read block id which is in
-	 * turn used by tape_34xx_free_bread to populate the segment bid
-	 * table.
-	 */
-	ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
-	ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
-
-	rq_for_each_segment(bv, req, iter) {
-		dst = kmap(bv->bv_page) + bv->bv_offset;
-		for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
-			ccw->flags = CCW_FLAG_CC;
-			ccw->cmd_code = READ_FORWARD;
-			ccw->count = TAPEBLOCK_HSEC_SIZE;
-			set_normalized_cda(ccw, (void*) __pa(dst));
-			ccw++;
-			dst += TAPEBLOCK_HSEC_SIZE;
-		}
-	}
-
-	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
-	DBF_EVENT(6, "xBREDccwg\n");
-	return request;
-}
-
-static void
-tape_34xx_free_bread (struct tape_request *request)
-{
-	struct ccw1* ccw;
-
-	ccw = request->cpaddr;
-	if ((ccw + 2)->cmd_code == READ_BLOCK_ID) {
-		struct {
-			struct tape_34xx_block_id	cbid;
-			struct tape_34xx_block_id	dbid;
-		} __attribute__ ((packed)) *rbi_data;
-
-		rbi_data = request->cpdata;
-
-		if (request->device)
-			tape_34xx_add_sbid(request->device, rbi_data->cbid);
-	}
-
-	/* Last ccw is a nop and doesn't need clear_normalized_cda */
-	for (; ccw->flags & CCW_FLAG_CC; ccw++)
-		if (ccw->cmd_code == READ_FORWARD)
-			clear_normalized_cda(ccw);
-	tape_free_request(request);
-}
-
-/*
- * check_locate is called just before the tape request is passed to
- * the common io layer for execution. It has to check the current
- * tape position and insert a locate ccw if it doesn't match the
- * start block for the request.
- */
-static void
-tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
-{
-	struct tape_34xx_block_id *	start_block;
-
-	start_block = (struct tape_34xx_block_id *) request->cpdata;
-	if (start_block->block == device->blk_data.block_position)
-		return;
-
-	DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof);
-	start_block->wrap    = 0;
-	start_block->segment = 1;
-	start_block->format  = (*device->modeset_byte & 0x08) ?
-				TAPE34XX_FMT_3480_XF :
-				TAPE34XX_FMT_3480;
-	start_block->block   = start_block->block + device->bof;
-	tape_34xx_merge_sbid(device, start_block);
-	tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
-	tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
-}
-#endif
-
 /*
  * List of 3480/3490 magnetic tape commands.
  */
@@ -1295,11 +1164,6 @@
 	.irq = tape_34xx_irq,
 	.read_block = tape_std_read_block,
 	.write_block = tape_std_write_block,
-#ifdef CONFIG_S390_TAPE_BLOCK
-	.bread = tape_34xx_bread,
-	.free_bread = tape_34xx_free_bread,
-	.check_locate = tape_34xx_check_locate,
-#endif
 	.ioctl_fn = tape_34xx_ioctl,
 	.mtop_array = tape_34xx_mtop
 };
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 49c6aab..a5c6614 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -670,92 +670,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_S390_TAPE_BLOCK
-/*
- * Tape Block READ
- */
-static struct tape_request *
-tape_3590_bread(struct tape_device *device, struct request *req)
-{
-	struct tape_request *request;
-	struct ccw1 *ccw;
-	int count = 0, start_block;
-	unsigned off;
-	char *dst;
-	struct bio_vec *bv;
-	struct req_iterator iter;
-
-	DBF_EVENT(6, "xBREDid:");
-	start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
-	DBF_EVENT(6, "start_block = %i\n", start_block);
-
-	rq_for_each_segment(bv, req, iter)
-		count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
-
-	request = tape_alloc_request(2 + count + 1, 4);
-	if (IS_ERR(request))
-		return request;
-	request->op = TO_BLOCK;
-	*(__u32 *) request->cpdata = start_block;
-	ccw = request->cpaddr;
-	ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
-
-	/*
-	 * We always setup a nop after the mode set ccw. This slot is
-	 * used in tape_std_check_locate to insert a locate ccw if the
-	 * current tape position doesn't match the start block to be read.
-	 */
-	ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
-
-	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
-		for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
-			ccw->flags = CCW_FLAG_CC;
-			ccw->cmd_code = READ_FORWARD;
-			ccw->count = TAPEBLOCK_HSEC_SIZE;
-			set_normalized_cda(ccw, (void *) __pa(dst));
-			ccw++;
-			dst += TAPEBLOCK_HSEC_SIZE;
-		}
-		BUG_ON(off > bv->bv_len);
-	}
-	ccw = tape_ccw_end(ccw, NOP, 0, NULL);
-	DBF_EVENT(6, "xBREDccwg\n");
-	return request;
-}
-
-static void
-tape_3590_free_bread(struct tape_request *request)
-{
-	struct ccw1 *ccw;
-
-	/* Last ccw is a nop and doesn't need clear_normalized_cda */
-	for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++)
-		if (ccw->cmd_code == READ_FORWARD)
-			clear_normalized_cda(ccw);
-	tape_free_request(request);
-}
-
-/*
- * check_locate is called just before the tape request is passed to
- * the common io layer for execution. It has to check the current
- * tape position and insert a locate ccw if it doesn't match the
- * start block for the request.
- */
-static void
-tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
-{
-	__u32 *start_block;
-
-	start_block = (__u32 *) request->cpdata;
-	if (*start_block != device->blk_data.block_position) {
-		/* Add the start offset of the file to get the real block. */
-		*start_block += device->bof;
-		tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
-	}
-}
-#endif
-
 static void tape_3590_med_state_set(struct tape_device *device,
 				    struct tape_3590_med_sense *sense)
 {
@@ -1423,20 +1337,6 @@
 {
 	struct tape_3590_sense *sense;
 
-#ifdef CONFIG_S390_TAPE_BLOCK
-	if (request->op == TO_BLOCK) {
-		/*
-		 * Recovery for block device requests. Set the block_position
-		 * to something invalid and retry.
-		 */
-		device->blk_data.block_position = -1;
-		if (request->retries-- <= 0)
-			return tape_3590_erp_failed(device, request, irb, -EIO);
-		else
-			return tape_3590_erp_retry(device, request, irb);
-	}
-#endif
-
 	sense = (struct tape_3590_sense *) irb->ecw;
 
 	DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
@@ -1729,11 +1629,6 @@
 	.irq = tape_3590_irq,
 	.read_block = tape_std_read_block,
 	.write_block = tape_std_write_block,
-#ifdef CONFIG_S390_TAPE_BLOCK
-	.bread = tape_3590_bread,
-	.free_bread = tape_3590_free_bread,
-	.check_locate = tape_3590_check_locate,
-#endif
 	.ioctl_fn = tape_3590_ioctl,
 	.mtop_array = tape_3590_mtop
 };
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 87cd0ab..46886a7 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -161,11 +161,6 @@
 	if (rc)
 		return rc;
 
-#ifdef CONFIG_S390_TAPE_BLOCK
-	/* Changes position. */
-	device->blk_data.medium_changed = 1;
-#endif
-
 	DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
 	/* Let the discipline build the ccw chain. */
 	request = device->discipline->read_block(device, block_size);
@@ -218,11 +213,6 @@
 	if (rc)
 		return rc;
 
-#ifdef CONFIG_S390_TAPE_BLOCK
-	/* Changes position. */
-	device->blk_data.medium_changed = 1;
-#endif
-
 	DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
 	DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
 	/* Let the discipline build the ccw chain. */
@@ -379,9 +369,6 @@
 			case MTBSFM:
 			case MTFSFM:
 			case MTSEEK:
-#ifdef CONFIG_S390_TAPE_BLOCK
-				device->blk_data.medium_changed = 1;
-#endif
 				if (device->required_tapemarks)
 					tape_std_terminate_write(device);
 			default:
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index b3a3e8e..5856186 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -401,9 +401,6 @@
 	rc = tapechar_setup_device(device);
 	if (rc)
 		goto out_minor;
-	rc = tapeblock_setup_device(device);
-	if (rc)
-		goto out_char;
 
 	tape_state_set(device, TS_UNUSED);
 
@@ -411,8 +408,6 @@
 
 	return 0;
 
-out_char:
-	tapechar_cleanup_device(device);
 out_minor:
 	tape_remove_minor(device);
 out_discipline:
@@ -426,7 +421,6 @@
 static void
 tape_cleanup_device(struct tape_device *device)
 {
-	tapeblock_cleanup_device(device);
 	tapechar_cleanup_device(device);
 	device->discipline->cleanup_device(device);
 	module_put(device->discipline->owner);
@@ -785,10 +779,6 @@
 {
 	int rc;
 
-#ifdef CONFIG_S390_TAPE_BLOCK
-	if (request->op == TO_BLOCK)
-		device->discipline->check_locate(device, request);
-#endif
 	rc = ccw_device_start(
 		device->cdev,
 		request->cpaddr,
@@ -1253,7 +1243,7 @@
 }
 
 /*
- * Tape device open function used by tape_char & tape_block frontends.
+ * Tape device open function used by tape_char frontend.
  */
 int
 tape_open(struct tape_device *device)
@@ -1283,7 +1273,7 @@
 }
 
 /*
- * Tape device release function used by tape_char & tape_block frontends.
+ * Tape device release function used by tape_char frontend.
  */
 int
 tape_release(struct tape_device *device)
@@ -1344,7 +1334,6 @@
 	DBF_EVENT(3, "tape init\n");
 	tape_proc_init();
 	tapechar_init ();
-	tapeblock_init ();
 	return 0;
 }
 
@@ -1358,7 +1347,6 @@
 
 	/* Get rid of the frontends */
 	tapechar_exit();
-	tapeblock_exit();
 	tape_proc_cleanup();
 	debug_unregister (TAPE_DBF_AREA);
 }
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5f1dc6f..731470e 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
 /*
  *  bus driver for ccwgroup
  *
- *  Copyright IBM Corp. 2002, 2009
+ *  Copyright IBM Corp. 2002, 2012
  *
  *  Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *	       Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -15,10 +15,13 @@
 #include <linux/ctype.h>
 #include <linux/dcache.h>
 
+#include <asm/cio.h>
 #include <asm/ccwdev.h>
 #include <asm/ccwgroup.h>
 
-#define CCW_BUS_ID_SIZE		20
+#include "device.h"
+
+#define CCW_BUS_ID_SIZE		10
 
 /* In Linux 2.4, we had a channel device layer called "chandev"
  * that did all sorts of obscure stuff for networking devices.
@@ -27,19 +30,6 @@
  * to devices that use multiple subchannels.
  */
 
-/* a device matches a driver if all its slave devices match the same
- * entry of the driver */
-static int ccwgroup_bus_match(struct device *dev, struct device_driver * drv)
-{
-	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(drv);
-
-	if (gdev->creator_id == gdrv->driver_id)
-		return 1;
-
-	return 0;
-}
-
 static struct bus_type ccwgroup_bus_type;
 
 static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
@@ -254,9 +244,10 @@
 	return 0;
 }
 
-static int __get_next_bus_id(const char **buf, char *bus_id)
+static int __get_next_id(const char **buf, struct ccw_dev_id *id)
 {
-	int rc, len;
+	unsigned int cssid, ssid, devno;
+	int ret = 0, len;
 	char *start, *end;
 
 	start = (char *)*buf;
@@ -271,49 +262,40 @@
 		len = end - start + 1;
 		end++;
 	}
-	if (len < CCW_BUS_ID_SIZE) {
-		strlcpy(bus_id, start, len);
-		rc = 0;
+	if (len <= CCW_BUS_ID_SIZE) {
+		if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
+			ret = -EINVAL;
 	} else
-		rc = -EINVAL;
+		ret = -EINVAL;
+
+	if (!ret) {
+		id->ssid = ssid;
+		id->devno = devno;
+	}
 	*buf = end;
-	return rc;
-}
-
-static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
-{
-	int cssid, ssid, devno;
-
-	/* Must be of form %x.%x.%04x */
-	if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
-		return 0;
-	return 1;
+	return ret;
 }
 
 /**
- * ccwgroup_create_from_string() - create and register a ccw group device
- * @root: parent device for the new device
- * @creator_id: identifier of creating driver
- * @cdrv: ccw driver of slave devices
+ * ccwgroup_create_dev() - create and register a ccw group device
+ * @parent: parent device for the new device
+ * @gdrv: driver for the new group device
  * @num_devices: number of slave devices
  * @buf: buffer containing comma separated bus ids of slave devices
  *
- * Create and register a new ccw group device as a child of @root. Slave
- * devices are obtained from the list of bus ids given in @buf and must all
- * belong to @cdrv.
+ * Create and register a new ccw group device as a child of @parent. Slave
+ * devices are obtained from the list of bus ids given in @buf.
  * Returns:
  *  %0 on success and an error code on failure.
  * Context:
  *  non-atomic
  */
-int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
-				struct ccw_driver *cdrv, int num_devices,
-				const char *buf)
+int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
+			int num_devices, const char *buf)
 {
 	struct ccwgroup_device *gdev;
+	struct ccw_dev_id dev_id;
 	int rc, i;
-	char tmp_bus_id[CCW_BUS_ID_SIZE];
-	const char *curr_buf;
 
 	gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
 		       GFP_KERNEL);
@@ -323,29 +305,24 @@
 	atomic_set(&gdev->onoff, 0);
 	mutex_init(&gdev->reg_mutex);
 	mutex_lock(&gdev->reg_mutex);
-	gdev->creator_id = creator_id;
 	gdev->count = num_devices;
 	gdev->dev.bus = &ccwgroup_bus_type;
-	gdev->dev.parent = root;
+	gdev->dev.parent = parent;
 	gdev->dev.release = ccwgroup_release;
 	device_initialize(&gdev->dev);
 
-	curr_buf = buf;
-	for (i = 0; i < num_devices && curr_buf; i++) {
-		rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
+	for (i = 0; i < num_devices && buf; i++) {
+		rc = __get_next_id(&buf, &dev_id);
 		if (rc != 0)
 			goto error;
-		if (!__is_valid_bus_id(tmp_bus_id)) {
-			rc = -EINVAL;
-			goto error;
-		}
-		gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
+		gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
 		/*
 		 * All devices have to be of the same type in
 		 * order to be grouped.
 		 */
-		if (!gdev->cdev[i]
-		    || gdev->cdev[i]->id.driver_info !=
+		if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
+		    gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
+		    gdev->cdev[i]->id.driver_info !=
 		    gdev->cdev[0]->id.driver_info) {
 			rc = -EINVAL;
 			goto error;
@@ -361,18 +338,25 @@
 		spin_unlock_irq(gdev->cdev[i]->ccwlock);
 	}
 	/* Check for sufficient number of bus ids. */
-	if (i < num_devices && !curr_buf) {
+	if (i < num_devices) {
 		rc = -EINVAL;
 		goto error;
 	}
 	/* Check for trailing stuff. */
-	if (i == num_devices && strlen(curr_buf) > 0) {
+	if (i == num_devices && strlen(buf) > 0) {
 		rc = -EINVAL;
 		goto error;
 	}
 
 	dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
 	gdev->dev.groups = ccwgroup_attr_groups;
+
+	if (gdrv) {
+		gdev->dev.driver = &gdrv->driver;
+		rc = gdrv->setup ? gdrv->setup(gdev) : 0;
+		if (rc)
+			goto error;
+	}
 	rc = device_add(&gdev->dev);
 	if (rc)
 		goto error;
@@ -397,7 +381,7 @@
 	put_device(&gdev->dev);
 	return rc;
 }
-EXPORT_SYMBOL(ccwgroup_create_from_string);
+EXPORT_SYMBOL(ccwgroup_create_dev);
 
 static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
 			     void *data)
@@ -440,14 +424,6 @@
 
 /************************** driver stuff ******************************/
 
-static int ccwgroup_probe(struct device *dev)
-{
-	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-	struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
-
-	return gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
-}
-
 static int ccwgroup_remove(struct device *dev)
 {
 	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
@@ -542,8 +518,6 @@
 
 static struct bus_type ccwgroup_bus_type = {
 	.name   = "ccwgroup",
-	.match  = ccwgroup_bus_match,
-	.probe  = ccwgroup_probe,
 	.remove = ccwgroup_remove,
 	.shutdown = ccwgroup_shutdown,
 	.pm = &ccwgroup_pm_ops,
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index a49c46c..a6ddaed 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -656,51 +656,34 @@
 static int console_subchannel_in_use;
 
 /*
- * Use cio_tpi to get a pending interrupt and call the interrupt handler.
- * Return non-zero if an interrupt was processed, zero otherwise.
+ * Use cio_tsch to update the subchannel status and call the interrupt handler
+ * if status had been pending. Called with the console_subchannel lock.
  */
-static int cio_tpi(void)
+static void cio_tsch(struct subchannel *sch)
 {
-	struct tpi_info *tpi_info;
-	struct subchannel *sch;
 	struct irb *irb;
 	int irq_context;
 
-	tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
-	if (tpi(NULL) != 1)
-		return 0;
-	kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
-	if (tpi_info->adapter_IO) {
-		do_adapter_IO(tpi_info->isc);
-		return 1;
-	}
 	irb = (struct irb *)&S390_lowcore.irb;
 	/* Store interrupt response block to lowcore. */
-	if (tsch(tpi_info->schid, irb) != 0) {
+	if (tsch(sch->schid, irb) != 0)
 		/* Not status pending or not operational. */
-		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
-		return 1;
-	}
-	sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
-	if (!sch) {
-		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
-		return 1;
-	}
-	irq_context = in_interrupt();
-	if (!irq_context)
-		local_bh_disable();
-	irq_enter();
-	spin_lock(sch->lock);
+		return;
 	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
+	/* Call interrupt handler with updated status. */
+	irq_context = in_interrupt();
+	if (!irq_context) {
+		local_bh_disable();
+		irq_enter();
+	}
 	if (sch->driver && sch->driver->irq)
 		sch->driver->irq(sch);
 	else
 		kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
-	spin_unlock(sch->lock);
-	irq_exit();
-	if (!irq_context)
+	if (!irq_context) {
+		irq_exit();
 		_local_bh_enable();
-	return 1;
+	}
 }
 
 void *cio_get_console_priv(void)
@@ -712,34 +695,16 @@
  * busy wait for the next interrupt on the console
  */
 void wait_cons_dev(void)
-	__releases(console_subchannel.lock)
-	__acquires(console_subchannel.lock)
 {
-	unsigned long cr6      __attribute__ ((aligned (8)));
-	unsigned long save_cr6 __attribute__ ((aligned (8)));
-
-	/* 
-	 * before entering the spinlock we may already have
-	 * processed the interrupt on a different CPU...
-	 */
 	if (!console_subchannel_in_use)
 		return;
 
-	/* disable all but the console isc */
-	__ctl_store (save_cr6, 6, 6);
-	cr6 = 1UL << (31 - CONSOLE_ISC);
-	__ctl_load (cr6, 6, 6);
-
-	do {
-		spin_unlock(console_subchannel.lock);
-		if (!cio_tpi())
-			cpu_relax();
-		spin_lock(console_subchannel.lock);
-	} while (console_subchannel.schib.scsw.cmd.actl != 0);
-	/*
-	 * restore previous isc value
-	 */
-	__ctl_load (save_cr6, 6, 6);
+	while (1) {
+		cio_tsch(&console_subchannel);
+		if (console_subchannel.schib.scsw.cmd.actl == 0)
+			break;
+		udelay_simple(100);
+	}
 }
 
 static int
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 02d0152..f8f952d 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -695,7 +695,17 @@
 	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
 }
 
-static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
+/**
+ * get_ccwdev_by_dev_id() - obtain device from a ccw device id
+ * @dev_id: id of the device to be searched
+ *
+ * This function searches all devices attached to the ccw bus for a device
+ * matching @dev_id.
+ * Returns:
+ *  If a device is found its reference count is increased and returned;
+ *  else %NULL is returned.
+ */
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
 {
 	struct device *dev;
 
@@ -703,6 +713,7 @@
 
 	return dev ? to_ccwdev(dev) : NULL;
 }
+EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
 
 static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
 {
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 179824b..6bace69 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -101,6 +101,7 @@
 void ccw_device_schedule_sch_unregister(struct ccw_device *);
 int ccw_purge_blacklisted(void);
 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
+struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
 
 /* Function prototypes for device status and basic sense stuff. */
 void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 35c685c..7493efa 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -63,7 +63,7 @@
 		"	ipm	%0\n"
 		"	srl	%0,28\n"
 		: "=d" (cc)
-		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
+		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
 	return cc;
 }
 
@@ -74,7 +74,7 @@
  * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  * @fc: function code to perform
  *
- * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
+ * Returns condition code.
  * Note: For IQDC unicast queues only the highest priority queue is processed.
  */
 static inline int do_siga_output(unsigned long schid, unsigned long mask,
@@ -85,18 +85,16 @@
 	register unsigned long __schid asm("1") = schid;
 	register unsigned long __mask asm("2") = mask;
 	register unsigned long __aob asm("3") = aob;
-	int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
+	int cc;
 
 	asm volatile(
 		"	siga	0\n"
-		"0:	ipm	%0\n"
+		"	ipm	%0\n"
 		"	srl	%0,28\n"
-		"1:\n"
-		EX_TABLE(0b, 1b)
-		: "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
-		  "+d" (__aob)
-		: : "cc", "memory");
-	*bb = ((unsigned int) __fc) >> 31;
+		: "=d" (cc), "+d" (__fc), "+d" (__aob)
+		: "d" (__schid), "d" (__mask)
+		: "cc");
+	*bb = __fc >> 31;
 	return cc;
 }
 
@@ -167,7 +165,7 @@
 
 	DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
-	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+	q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
 		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 	return 0;
 }
@@ -215,7 +213,7 @@
 
 	DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
-	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+	q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
 		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 	return 0;
 }
@@ -313,7 +311,7 @@
 	cc = do_siga_sync(schid, output, input, fc);
 	if (unlikely(cc))
 		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
-	return cc;
+	return (cc) ? -EIO : 0;
 }
 
 static inline int qdio_siga_sync_q(struct qdio_q *q)
@@ -384,7 +382,7 @@
 	cc = do_siga_input(schid, q->mask, fc);
 	if (unlikely(cc))
 		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
-	return cc;
+	return (cc) ? -EIO : 0;
 }
 
 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
@@ -443,7 +441,7 @@
 	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
 					SLSB_P_OUTPUT_NOT_INIT;
 
-	q->qdio_error |= QDIO_ERROR_SLSB_STATE;
+	q->qdio_error = QDIO_ERROR_SLSB_STATE;
 
 	/* special handling for no target buffer empty */
 	if ((!q->is_input_q &&
@@ -519,7 +517,7 @@
 	int count, stop;
 	unsigned char state = 0;
 
-	q->timestamp = get_clock_fast();
+	q->timestamp = get_clock();
 
 	/*
 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -575,7 +573,7 @@
 
 	bufnr = get_inbound_buffer_frontier(q);
 
-	if ((bufnr != q->last_move) || q->qdio_error) {
+	if (bufnr != q->last_move) {
 		q->last_move = bufnr;
 		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
 			q->u.in.timestamp = get_clock();
@@ -790,7 +788,7 @@
 	int count, stop;
 	unsigned char state = 0;
 
-	q->timestamp = get_clock_fast();
+	q->timestamp = get_clock();
 
 	if (need_siga_sync(q))
 		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
@@ -863,7 +861,7 @@
 
 	bufnr = get_outbound_buffer_frontier(q);
 
-	if ((bufnr != q->last_move) || q->qdio_error) {
+	if (bufnr != q->last_move) {
 		q->last_move = bufnr;
 		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 		return 1;
@@ -894,13 +892,16 @@
 				goto retry;
 			}
 			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
-			cc |= QDIO_ERROR_SIGA_BUSY;
-		} else
+			cc = -EBUSY;
+		} else {
 			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
+			cc = -ENOBUFS;
+		}
 		break;
 	case 1:
 	case 3:
 		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
+		cc = -EIO;
 		break;
 	}
 	if (retries) {
@@ -1090,7 +1091,7 @@
 	}
 
 	count = sub_buf(q->first_to_check, q->first_to_kick);
-	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
 		   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
 no_handler:
 	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
@@ -1691,7 +1692,7 @@
 		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
 
 	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
-		return -EBUSY;
+		return -EIO;
 	if (!count)
 		return 0;
 	if (callflags & QDIO_FLAG_SYNC_INPUT)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 7e9a72e..b987d46 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -215,7 +215,7 @@
 	register struct ap_queue_status reg1_out asm ("1");
 	register void *reg2 asm ("2") = ind;
 	asm volatile(
-		".long 0xb2af0000"		/* PQAP(RAPQ) */
+		".long 0xb2af0000"		/* PQAP(AQIC) */
 		: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
 		:
 		: "cc" );
@@ -232,7 +232,7 @@
 	register unsigned long reg2 asm ("2");
 
 	asm volatile(
-		".long 0xb2af0000\n"
+		".long 0xb2af0000\n"		/* PQAP(TAPQ) */
 		"0:\n"
 		EX_TABLE(0b, 0b)
 		: "+d" (reg0), "+d" (reg1), "=d" (reg2)
@@ -391,7 +391,7 @@
 		reg0 |= 0x400000UL;
 
 	asm volatile (
-		"0: .long 0xb2ad0042\n"		/* DQAP */
+		"0: .long 0xb2ad0042\n"		/* NQAP */
 		"   brc   2,0b"
 		: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
 		: "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
@@ -450,7 +450,7 @@
 
 
 	asm volatile(
-		"0: .long 0xb2ae0064\n"
+		"0: .long 0xb2ae0064\n"		/* DQAP */
 		"   brc   6,0b\n"
 		: "+d" (reg0), "=d" (reg1), "+d" (reg2),
 		"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
@@ -836,12 +836,12 @@
 	list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
 		list_del_init(&ap_msg->list);
 		ap_dev->pendingq_count--;
-		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+		ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
 	}
 	list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
 		list_del_init(&ap_msg->list);
 		ap_dev->requestq_count--;
-		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+		ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
 	}
 }
 
@@ -1329,7 +1329,7 @@
 				continue;
 			list_del_init(&ap_msg->list);
 			ap_dev->pendingq_count--;
-			ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
+			ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
 			break;
 		}
 		if (ap_dev->queue_count > 0)
@@ -1450,10 +1450,10 @@
 			return -EBUSY;
 		case AP_RESPONSE_REQ_FAC_NOT_INST:
 		case AP_RESPONSE_MESSAGE_TOO_BIG:
-			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
+			ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
 			return -EINVAL;
 		default:	/* Device is gone. */
-			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+			ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
 			return -ENODEV;
 		}
 	} else {
@@ -1471,6 +1471,10 @@
 	unsigned long flags;
 	int rc;
 
+	/* For asynchronous message handling a valid receive-callback
+	 * is required. */
+	BUG_ON(!ap_msg->receive);
+
 	spin_lock_bh(&ap_dev->lock);
 	if (!ap_dev->unregistered) {
 		/* Make room on the queue by polling for finished requests. */
@@ -1482,7 +1486,7 @@
 		if (rc == -ENODEV)
 			ap_dev->unregistered = 1;
 	} else {
-		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
+		ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
 		rc = -ENODEV;
 	}
 	spin_unlock_bh(&ap_dev->lock);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d960a63..726fc65 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -136,9 +136,6 @@
 
 	int (*probe)(struct ap_device *);
 	void (*remove)(struct ap_device *);
-	/* receive is called from tasklet context */
-	void (*receive)(struct ap_device *, struct ap_message *,
-			struct ap_message *);
 	int request_timeout;		/* request timeout in jiffies */
 };
 
@@ -183,6 +180,9 @@
 
 	void *private;			/* ap driver private pointer. */
 	unsigned int special:1;		/* Used for special commands. */
+	/* receive is called from tasklet context */
+	void (*receive)(struct ap_device *, struct ap_message *,
+			struct ap_message *);
 };
 
 #define AP_DEVICE(dt)					\
@@ -199,6 +199,7 @@
 	ap_msg->psmid = 0;
 	ap_msg->length = 0;
 	ap_msg->special = 0;
+	ap_msg->receive = NULL;
 }
 
 /*
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 0842867..4681244 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -77,7 +77,6 @@
 static struct ap_driver zcrypt_cex2a_driver = {
 	.probe = zcrypt_cex2a_probe,
 	.remove = zcrypt_cex2a_remove,
-	.receive = zcrypt_cex2a_receive,
 	.ids = zcrypt_cex2a_ids,
 	.request_timeout = CEX2A_CLEANUP_TIME,
 };
@@ -349,6 +348,7 @@
 		ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_cex2a_receive;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &work;
@@ -390,6 +390,7 @@
 		ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_cex2a_receive;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &work;
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 0effca9..ad7951c 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -67,7 +67,6 @@
 static struct ap_driver zcrypt_pcica_driver = {
 	.probe = zcrypt_pcica_probe,
 	.remove = zcrypt_pcica_remove,
-	.receive = zcrypt_pcica_receive,
 	.ids = zcrypt_pcica_ids,
 	.request_timeout = PCICA_CLEANUP_TIME,
 };
@@ -284,6 +283,7 @@
 	ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_pcica_receive;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &work;
@@ -322,6 +322,7 @@
 	ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_pcica_receive;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &work;
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index f9523c0..e5dd335 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -79,7 +79,6 @@
 static struct ap_driver zcrypt_pcicc_driver = {
 	.probe = zcrypt_pcicc_probe,
 	.remove = zcrypt_pcicc_remove,
-	.receive = zcrypt_pcicc_receive,
 	.ids = zcrypt_pcicc_ids,
 	.request_timeout = PCICC_CLEANUP_TIME,
 };
@@ -488,6 +487,7 @@
 	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_pcicc_receive;
 	ap_msg.length = PAGE_SIZE;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
@@ -527,6 +527,7 @@
 	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_pcicc_receive;
 	ap_msg.length = PAGE_SIZE;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index cf1cbd4..f7cc434 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -89,7 +89,6 @@
 static struct ap_driver zcrypt_pcixcc_driver = {
 	.probe = zcrypt_pcixcc_probe,
 	.remove = zcrypt_pcixcc_remove,
-	.receive = zcrypt_pcixcc_receive,
 	.ids = zcrypt_pcixcc_ids,
 	.request_timeout = PCIXCC_CLEANUP_TIME,
 };
@@ -698,6 +697,7 @@
 	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_pcixcc_receive;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &resp_type;
@@ -738,6 +738,7 @@
 	ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_pcixcc_receive;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &resp_type;
@@ -778,6 +779,7 @@
 	ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_pcixcc_receive;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &resp_type;
@@ -818,6 +820,7 @@
 	ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
+	ap_msg.receive = zcrypt_pcixcc_receive;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &resp_type;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 9b66d2d..dfda748 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -4,11 +4,10 @@
 config LCS
 	def_tristate m
 	prompt "Lan Channel Station Interface"
-	depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI)
+	depends on CCW && NETDEVICES && (ETHERNET || FDDI)
 	help
 	   Select this option if you want to use LCS networking on IBM System z.
-	   This device driver supports Token Ring (IEEE 802.5),
-	   FDDI (IEEE 802.7) and Ethernet.
+	   This device driver supports FDDI (IEEE 802.7) and Ethernet.
 	   To compile as a module, choose M. The module name is lcs.
 	   If you do not know what it is, it's safe to choose Y.
 
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index b41fae3..6b1ff90 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -136,7 +136,6 @@
 claw_set_busy(struct net_device *dev)
 {
  ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
- eieio();
 }
 
 static inline void
@@ -144,13 +143,11 @@
 {
 	clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
 	netif_wake_queue(dev);
-	eieio();
 }
 
 static inline int
 claw_check_busy(struct net_device *dev)
 {
-	eieio();
 	return ((struct claw_privbk *) dev->ml_priv)->tbusy;
 }
 
@@ -233,8 +230,6 @@
 static ssize_t claw_rbuff_write(struct device *dev,
 	struct device_attribute *attr,
 	const char *buf, size_t count);
-static int claw_add_files(struct device *dev);
-static void claw_remove_files(struct device *dev);
 
 /*   Functions for System Validate  */
 static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
@@ -267,12 +262,10 @@
 		.owner	= THIS_MODULE,
 		.name	= "claw",
 	},
-        .max_slaves  = 2,
-        .driver_id   = 0xC3D3C1E6,
-        .probe       = claw_probe,
-        .remove      = claw_remove_device,
-        .set_online  = claw_new_device,
-        .set_offline = claw_shutdown_device,
+	.setup	     = claw_probe,
+	.remove      = claw_remove_device,
+	.set_online  = claw_new_device,
+	.set_offline = claw_shutdown_device,
 	.prepare     = claw_pm_prepare,
 };
 
@@ -293,30 +286,24 @@
 	.int_class = IOINT_CLW,
 };
 
-static ssize_t
-claw_driver_group_store(struct device_driver *ddrv, const char *buf,
-			size_t count)
+static ssize_t claw_driver_group_store(struct device_driver *ddrv,
+				       const char *buf,	size_t count)
 {
 	int err;
-	err = ccwgroup_create_from_string(claw_root_dev,
-					  claw_group_driver.driver_id,
-					  &claw_ccw_driver, 2, buf);
+	err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf);
 	return err ? err : count;
 }
-
 static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
 
-static struct attribute *claw_group_attrs[] = {
+static struct attribute *claw_drv_attrs[] = {
 	&driver_attr_group.attr,
 	NULL,
 };
-
-static struct attribute_group claw_group_attr_group = {
-	.attrs = claw_group_attrs,
+static struct attribute_group claw_drv_attr_group = {
+	.attrs = claw_drv_attrs,
 };
-
-static const struct attribute_group *claw_group_attr_groups[] = {
-	&claw_group_attr_group,
+static const struct attribute_group *claw_drv_attr_groups[] = {
+	&claw_drv_attr_group,
 	NULL,
 };
 
@@ -324,60 +311,6 @@
 *       Key functions
 */
 
-/*----------------------------------------------------------------*
- *   claw_probe                                                   *
- *      this function is called for each CLAW device.             *
- *----------------------------------------------------------------*/
-static int
-claw_probe(struct ccwgroup_device *cgdev)
-{
-	int  		rc;
-	struct claw_privbk *privptr=NULL;
-
-	CLAW_DBF_TEXT(2, setup, "probe");
-	if (!get_device(&cgdev->dev))
-		return -ENODEV;
-	privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
-	dev_set_drvdata(&cgdev->dev, privptr);
-	if (privptr == NULL) {
-		probe_error(cgdev);
-		put_device(&cgdev->dev);
-		CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
-		return -ENOMEM;
-	}
-	privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
-	privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
-        if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
-                probe_error(cgdev);
-		put_device(&cgdev->dev);
-		CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
-                return -ENOMEM;
-        }
-	memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
-	memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
-	memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
-	privptr->p_env->packing = 0;
-	privptr->p_env->write_buffers = 5;
-	privptr->p_env->read_buffers = 5;
-	privptr->p_env->read_size = CLAW_FRAME_SIZE;
-	privptr->p_env->write_size = CLAW_FRAME_SIZE;
-	rc = claw_add_files(&cgdev->dev);
-	if (rc) {
-		probe_error(cgdev);
-		put_device(&cgdev->dev);
-		dev_err(&cgdev->dev, "Creating the /proc files for a new"
-		" CLAW device failed\n");
-		CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
-		return rc;
-	}
-	privptr->p_env->p_priv = privptr;
-        cgdev->cdev[0]->handler = claw_irq_handler;
-	cgdev->cdev[1]->handler = claw_irq_handler;
-	CLAW_DBF_TEXT(2, setup, "prbext 0");
-
-        return 0;
-}  /*  end of claw_probe       */
-
 /*-------------------------------------------------------------------*
  *   claw_tx                                                         *
  *-------------------------------------------------------------------*/
@@ -3093,7 +3026,6 @@
 	dev_info(&cgdev->dev, " will be removed.\n");
 	if (cgdev->state == CCWGROUP_ONLINE)
 		claw_shutdown_device(cgdev);
-	claw_remove_files(&cgdev->dev);
 	kfree(priv->p_mtc_envelope);
 	priv->p_mtc_envelope=NULL;
 	kfree(priv->p_env);
@@ -3321,7 +3253,6 @@
 	CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
 	return count;
 }
-
 static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
 
 static struct attribute *claw_attr[] = {
@@ -3332,40 +3263,73 @@
 	&dev_attr_host_name.attr,
 	NULL,
 };
-
 static struct attribute_group claw_attr_group = {
 	.attrs = claw_attr,
 };
+static const struct attribute_group *claw_attr_groups[] = {
+	&claw_attr_group,
+	NULL,
+};
+static const struct device_type claw_devtype = {
+	.name = "claw",
+	.groups = claw_attr_groups,
+};
 
-static int
-claw_add_files(struct device *dev)
+/*----------------------------------------------------------------*
+ *   claw_probe 						  *
+ *	this function is called for each CLAW device.		  *
+ *----------------------------------------------------------------*/
+static int claw_probe(struct ccwgroup_device *cgdev)
 {
-	CLAW_DBF_TEXT(2, setup, "add_file");
-	return sysfs_create_group(&dev->kobj, &claw_attr_group);
-}
+	struct claw_privbk *privptr = NULL;
 
-static void
-claw_remove_files(struct device *dev)
-{
-	CLAW_DBF_TEXT(2, setup, "rem_file");
-	sysfs_remove_group(&dev->kobj, &claw_attr_group);
-}
+	CLAW_DBF_TEXT(2, setup, "probe");
+	if (!get_device(&cgdev->dev))
+		return -ENODEV;
+	privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
+	dev_set_drvdata(&cgdev->dev, privptr);
+	if (privptr == NULL) {
+		probe_error(cgdev);
+		put_device(&cgdev->dev);
+		CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
+		return -ENOMEM;
+	}
+	privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
+	privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
+	if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
+		probe_error(cgdev);
+		put_device(&cgdev->dev);
+		CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
+		return -ENOMEM;
+	}
+	memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
+	memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
+	memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
+	privptr->p_env->packing = 0;
+	privptr->p_env->write_buffers = 5;
+	privptr->p_env->read_buffers = 5;
+	privptr->p_env->read_size = CLAW_FRAME_SIZE;
+	privptr->p_env->write_size = CLAW_FRAME_SIZE;
+	privptr->p_env->p_priv = privptr;
+	cgdev->cdev[0]->handler = claw_irq_handler;
+	cgdev->cdev[1]->handler = claw_irq_handler;
+	cgdev->dev.type = &claw_devtype;
+	CLAW_DBF_TEXT(2, setup, "prbext 0");
+
+	return 0;
+}  /*  end of claw_probe       */
 
 /*--------------------------------------------------------------------*
 *    claw_init  and cleanup                                           *
 *---------------------------------------------------------------------*/
 
-static void __exit
-claw_cleanup(void)
+static void __exit claw_cleanup(void)
 {
-	driver_remove_file(&claw_group_driver.driver,
-			   &driver_attr_group);
 	ccwgroup_driver_unregister(&claw_group_driver);
 	ccw_driver_unregister(&claw_ccw_driver);
 	root_device_unregister(claw_root_dev);
 	claw_unregister_debug_facility();
 	pr_info("Driver unloaded\n");
-
 }
 
 /**
@@ -3374,8 +3338,7 @@
  *
  * @return 0 on success, !0 on error.
  */
-static int __init
-claw_init(void)
+static int __init claw_init(void)
 {
 	int ret = 0;
 
@@ -3394,7 +3357,7 @@
 	ret = ccw_driver_register(&claw_ccw_driver);
 	if (ret)
 		goto ccw_err;
-	claw_group_driver.driver.groups = claw_group_attr_groups;
+	claw_group_driver.driver.groups = claw_drv_attr_groups;
 	ret = ccwgroup_driver_register(&claw_group_driver);
 	if (ret)
 		goto ccwgroup_err;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 11f3b07..3cd2554 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1296,6 +1296,11 @@
 
 }
 
+static const struct device_type ctcm_devtype = {
+	.name = "ctcm",
+	.groups = ctcm_attr_groups,
+};
+
 /**
  * Add ctcm specific attributes.
  * Add ctcm private data.
@@ -1307,7 +1312,6 @@
 static int ctcm_probe_device(struct ccwgroup_device *cgdev)
 {
 	struct ctcm_priv *priv;
-	int rc;
 
 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
 			"%s %p",
@@ -1324,17 +1328,11 @@
 		put_device(&cgdev->dev);
 		return -ENOMEM;
 	}
-
-	rc = ctcm_add_files(&cgdev->dev);
-	if (rc) {
-		kfree(priv);
-		put_device(&cgdev->dev);
-		return rc;
-	}
 	priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
 	cgdev->cdev[0]->handler = ctcm_irq_handler;
 	cgdev->cdev[1]->handler = ctcm_irq_handler;
 	dev_set_drvdata(&cgdev->dev, priv);
+	cgdev->dev.type = &ctcm_devtype;
 
 	return 0;
 }
@@ -1611,11 +1609,6 @@
 		goto out_dev;
 	}
 
-	if (ctcm_add_attributes(&cgdev->dev)) {
-		result = -ENODEV;
-		goto out_unregister;
-	}
-
 	strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
 
 	dev_info(&dev->dev,
@@ -1629,8 +1622,6 @@
 			priv->channel[CTCM_WRITE]->id, priv->protocol);
 
 	return 0;
-out_unregister:
-	unregister_netdev(dev);
 out_dev:
 	ctcm_free_netdevice(dev);
 out_ccw2:
@@ -1669,7 +1660,6 @@
 		/* Close the device */
 		ctcm_close(dev);
 		dev->flags &= ~IFF_RUNNING;
-		ctcm_remove_attributes(&cgdev->dev);
 		channel_free(priv->channel[CTCM_READ]);
 	} else
 		dev = NULL;
@@ -1711,7 +1701,6 @@
 
 	if (cgdev->state == CCWGROUP_ONLINE)
 		ctcm_shutdown_device(cgdev);
-	ctcm_remove_files(&cgdev->dev);
 	dev_set_drvdata(&cgdev->dev, NULL);
 	kfree(priv);
 	put_device(&cgdev->dev);
@@ -1778,9 +1767,7 @@
 		.owner	= THIS_MODULE,
 		.name	= CTC_DRIVER_NAME,
 	},
-	.max_slaves  = 2,
-	.driver_id   = 0xC3E3C3D4,	/* CTCM */
-	.probe       = ctcm_probe_device,
+	.setup	     = ctcm_probe_device,
 	.remove      = ctcm_remove_device,
 	.set_online  = ctcm_new_device,
 	.set_offline = ctcm_shutdown_device,
@@ -1789,31 +1776,25 @@
 	.restore     = ctcm_pm_resume,
 };
 
-static ssize_t
-ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
-			size_t count)
+static ssize_t ctcm_driver_group_store(struct device_driver *ddrv,
+				       const char *buf,	size_t count)
 {
 	int err;
 
-	err = ccwgroup_create_from_string(ctcm_root_dev,
-					  ctcm_group_driver.driver_id,
-					  &ctcm_ccw_driver, 2, buf);
+	err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf);
 	return err ? err : count;
 }
-
 static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
 
-static struct attribute *ctcm_group_attrs[] = {
+static struct attribute *ctcm_drv_attrs[] = {
 	&driver_attr_group.attr,
 	NULL,
 };
-
-static struct attribute_group ctcm_group_attr_group = {
-	.attrs = ctcm_group_attrs,
+static struct attribute_group ctcm_drv_attr_group = {
+	.attrs = ctcm_drv_attrs,
 };
-
-static const struct attribute_group *ctcm_group_attr_groups[] = {
-	&ctcm_group_attr_group,
+static const struct attribute_group *ctcm_drv_attr_groups[] = {
+	&ctcm_drv_attr_group,
 	NULL,
 };
 
@@ -1829,7 +1810,6 @@
  */
 static void __exit ctcm_exit(void)
 {
-	driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
 	ccwgroup_driver_unregister(&ctcm_group_driver);
 	ccw_driver_unregister(&ctcm_ccw_driver);
 	root_device_unregister(ctcm_root_dev);
@@ -1867,7 +1847,7 @@
 	ret = ccw_driver_register(&ctcm_ccw_driver);
 	if (ret)
 		goto ccw_err;
-	ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
+	ctcm_group_driver.driver.groups = ctcm_drv_attr_groups;
 	ret = ccwgroup_driver_register(&ctcm_group_driver);
 	if (ret)
 		goto ccwgroup_err;
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index 24d5215..b9056a5 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -225,13 +225,7 @@
 int ctcm_open(struct net_device *dev);
 int ctcm_close(struct net_device *dev);
 
-/*
- * prototypes for non-static sysfs functions
- */
-int ctcm_add_attributes(struct device *dev);
-void ctcm_remove_attributes(struct device *dev);
-int ctcm_add_files(struct device *dev);
-void ctcm_remove_files(struct device *dev);
+extern const struct attribute_group *ctcm_attr_groups[];
 
 /*
  * Compatibility macros for busy handling
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 650aec1..0c27ae7 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -13,6 +13,7 @@
 #define KMSG_COMPONENT "ctcm"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/device.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
 #include "ctcm_main.h"
@@ -108,10 +109,12 @@
 }
 
 static ssize_t stats_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
+			  struct device_attribute *attr, char *buf)
 {
+	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
 	struct ctcm_priv *priv = dev_get_drvdata(dev);
-	if (!priv)
+
+	if (!priv || gdev->state != CCWGROUP_ONLINE)
 		return -ENODEV;
 	ctcm_print_statistics(priv);
 	return sprintf(buf, "0\n");
@@ -190,34 +193,14 @@
 	&dev_attr_protocol.attr,
 	&dev_attr_type.attr,
 	&dev_attr_buffer.attr,
+	&dev_attr_stats.attr,
 	NULL,
 };
 
 static struct attribute_group ctcm_attr_group = {
 	.attrs = ctcm_attr,
 };
-
-int ctcm_add_attributes(struct device *dev)
-{
-	int rc;
-
-	rc = device_create_file(dev, &dev_attr_stats);
-
-	return rc;
-}
-
-void ctcm_remove_attributes(struct device *dev)
-{
-	device_remove_file(dev, &dev_attr_stats);
-}
-
-int ctcm_add_files(struct device *dev)
-{
-	return sysfs_create_group(&dev->kobj, &ctcm_attr_group);
-}
-
-void ctcm_remove_files(struct device *dev)
-{
-	sysfs_remove_group(&dev->kobj, &ctcm_attr_group);
-}
-
+const struct attribute_group *ctcm_attr_groups[] = {
+	&ctcm_attr_group,
+	NULL,
+};
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 687efe4..a3adf4b 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -30,7 +30,6 @@
 #include <linux/if.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/trdevice.h>
 #include <linux/fddidevice.h>
 #include <linux/inetdevice.h>
 #include <linux/in.h>
@@ -50,8 +49,7 @@
 #include "lcs.h"
 
 
-#if !defined(CONFIG_ETHERNET) && \
-    !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
+#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI)
 #error Cannot compile lcs.c without some net devices switched on.
 #endif
 
@@ -1166,10 +1164,7 @@
 lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
 {
 	LCS_DBF_TEXT(4,trace, "getmac");
-	if (dev->type == ARPHRD_IEEE802_TR)
-		ip_tr_mc_map(ipm, mac);
-	else
-		ip_eth_mc_map(ipm, mac);
+	ip_eth_mc_map(ipm, mac);
 }
 
 /**
@@ -1641,12 +1636,6 @@
 		return 0;
 
 #endif
-#ifdef CONFIG_TR
-	card->lan_type = LCS_FRAME_TYPE_TR;
-	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
-	if (rc == 0)
-		return 0;
-#endif
 #ifdef CONFIG_FDDI
 	card->lan_type = LCS_FRAME_TYPE_FDDI;
 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
@@ -2051,10 +2040,17 @@
 	&dev_attr_recover.attr,
 	NULL,
 };
-
 static struct attribute_group lcs_attr_group = {
 	.attrs = lcs_attrs,
 };
+static const struct attribute_group *lcs_attr_groups[] = {
+	&lcs_attr_group,
+	NULL,
+};
+static const struct device_type lcs_devtype = {
+	.name = "lcs",
+	.groups = lcs_attr_groups,
+};
 
 /**
  * lcs_probe_device is called on establishing a new ccwgroup_device.
@@ -2063,7 +2059,6 @@
 lcs_probe_device(struct ccwgroup_device *ccwgdev)
 {
 	struct lcs_card *card;
-	int ret;
 
 	if (!get_device(&ccwgdev->dev))
 		return -ENODEV;
@@ -2075,12 +2070,6 @@
 		put_device(&ccwgdev->dev);
                 return -ENOMEM;
         }
-	ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
-	if (ret) {
-		lcs_free_card(card);
-		put_device(&ccwgdev->dev);
-		return ret;
-        }
 	dev_set_drvdata(&ccwgdev->dev, card);
 	ccwgdev->cdev[0]->handler = lcs_irq;
 	ccwgdev->cdev[1]->handler = lcs_irq;
@@ -2089,7 +2078,9 @@
 	card->thread_start_mask = 0;
 	card->thread_allowed_mask = 0;
 	card->thread_running_mask = 0;
-        return 0;
+	ccwgdev->dev.type = &lcs_devtype;
+
+	return 0;
 }
 
 static int
@@ -2172,12 +2163,6 @@
 		dev = alloc_etherdev(0);
 		break;
 #endif
-#ifdef CONFIG_TR
-	case LCS_FRAME_TYPE_TR:
-		card->lan_type_trans = tr_type_trans;
-		dev = alloc_trdev(0);
-		break;
-#endif
 #ifdef CONFIG_FDDI
 	case LCS_FRAME_TYPE_FDDI:
 		card->lan_type_trans = fddi_type_trans;
@@ -2323,9 +2308,9 @@
 	}
 	if (card->dev)
 		unregister_netdev(card->dev);
-	sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
 	lcs_cleanup_card(card);
 	lcs_free_card(card);
+	dev_set_drvdata(&ccwgdev->dev, NULL);
 	put_device(&ccwgdev->dev);
 }
 
@@ -2410,9 +2395,7 @@
 		.owner	= THIS_MODULE,
 		.name	= "lcs",
 	},
-	.max_slaves  = 2,
-	.driver_id   = 0xD3C3E2,
-	.probe       = lcs_probe_device,
+	.setup	     = lcs_probe_device,
 	.remove      = lcs_remove_device,
 	.set_online  = lcs_new_device,
 	.set_offline = lcs_shutdown_device,
@@ -2423,30 +2406,24 @@
 	.restore     = lcs_restore,
 };
 
-static ssize_t
-lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
-		       size_t count)
+static ssize_t lcs_driver_group_store(struct device_driver *ddrv,
+				      const char *buf, size_t count)
 {
 	int err;
-	err = ccwgroup_create_from_string(lcs_root_dev,
-					  lcs_group_driver.driver_id,
-					  &lcs_ccw_driver, 2, buf);
+	err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf);
 	return err ? err : count;
 }
-
 static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
 
-static struct attribute *lcs_group_attrs[] = {
+static struct attribute *lcs_drv_attrs[] = {
 	&driver_attr_group.attr,
 	NULL,
 };
-
-static struct attribute_group lcs_group_attr_group = {
-	.attrs = lcs_group_attrs,
+static struct attribute_group lcs_drv_attr_group = {
+	.attrs = lcs_drv_attrs,
 };
-
-static const struct attribute_group *lcs_group_attr_groups[] = {
-	&lcs_group_attr_group,
+static const struct attribute_group *lcs_drv_attr_groups[] = {
+	&lcs_drv_attr_group,
 	NULL,
 };
 
@@ -2470,7 +2447,7 @@
 	rc = ccw_driver_register(&lcs_ccw_driver);
 	if (rc)
 		goto ccw_err;
-	lcs_group_driver.driver.groups = lcs_group_attr_groups;
+	lcs_group_driver.driver.groups = lcs_drv_attr_groups;
 	rc = ccwgroup_driver_register(&lcs_group_driver);
 	if (rc)
 		goto ccwgroup_err;
@@ -2496,8 +2473,6 @@
 {
 	pr_info("Terminating lcs module.\n");
 	LCS_DBF_TEXT(0, trace, "cleanup");
-	driver_remove_file(&lcs_group_driver.driver,
-			   &driver_attr_group);
 	ccwgroup_driver_unregister(&lcs_group_driver);
 	ccw_driver_unregister(&lcs_ccw_driver);
 	root_device_unregister(lcs_root_dev);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ec7921b..06e8f31 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -13,8 +13,6 @@
 
 #include <linux/if.h>
 #include <linux/if_arp.h>
-#include <linux/if_tr.h>
-#include <linux/trdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/ctype.h>
@@ -676,8 +674,6 @@
 	struct qeth_ipa_info adp; /*Adapter parameters*/
 	struct qeth_routing_info route6;
 	struct qeth_ipa_info ipa6;
-	int broadcast_mode;
-	int macaddr_mode;
 	int fake_broadcast;
 	int add_hhlen;
 	int layer2;
@@ -711,7 +707,16 @@
 	qdio_handler_t *input_handler;
 	qdio_handler_t *output_handler;
 	int (*recover)(void *ptr);
-	struct ccwgroup_driver *ccwgdriver;
+	int (*setup) (struct ccwgroup_device *);
+	void (*remove) (struct ccwgroup_device *);
+	int (*set_online) (struct ccwgroup_device *);
+	int (*set_offline) (struct ccwgroup_device *);
+	void (*shutdown)(struct ccwgroup_device *);
+	int (*prepare) (struct ccwgroup_device *);
+	void (*complete) (struct ccwgroup_device *);
+	int (*freeze)(struct ccwgroup_device *);
+	int (*thaw) (struct ccwgroup_device *);
+	int (*restore)(struct ccwgroup_device *);
 };
 
 struct qeth_vlan_vid {
@@ -775,7 +780,7 @@
 	struct qeth_perf_stats perf_stats;
 	int read_or_write_problem;
 	struct qeth_osn_info osn_info;
-	struct qeth_discipline discipline;
+	struct qeth_discipline *discipline;
 	atomic_t force_alloc_skb;
 	struct service_level qeth_service_level;
 	struct qdio_ssqd_desc ssqd;
@@ -841,16 +846,15 @@
 	return card->info.diagass_support & (__u32)cmd;
 }
 
-extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
-extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
+extern struct qeth_discipline qeth_l2_discipline;
+extern struct qeth_discipline qeth_l3_discipline;
+extern const struct attribute_group *qeth_generic_attr_groups[];
+extern const struct attribute_group *qeth_osn_attr_groups[];
+
 const char *qeth_get_cardname_short(struct qeth_card *);
 int qeth_realloc_buffer_pool(struct qeth_card *, int);
 int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
 void qeth_core_free_discipline(struct qeth_card *);
-int qeth_core_create_device_attributes(struct device *);
-void qeth_core_remove_device_attributes(struct device *);
-int qeth_core_create_osn_attributes(struct device *);
-void qeth_core_remove_osn_attributes(struct device *);
 void qeth_buffer_reclaim_work(struct work_struct *);
 
 /* exports for qeth discipline device drivers */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 120955c..e118e1e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1329,8 +1329,6 @@
 {
 	card->options.route4.type = NO_ROUTER;
 	card->options.route6.type = NO_ROUTER;
-	card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
-	card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
 	card->options.fake_broadcast = 0;
 	card->options.add_hhlen = DEFAULT_ADD_HHLEN;
 	card->options.performance_stats = 0;
@@ -1365,7 +1363,7 @@
 	    card->write.state != CH_STATE_UP)
 		return;
 	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
-		ts = kthread_run(card->discipline.recover, (void *)card,
+		ts = kthread_run(card->discipline->recover, (void *)card,
 				"qeth_recover");
 		if (IS_ERR(ts)) {
 			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
@@ -1672,7 +1670,8 @@
 {
 	QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
 
-	if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
+	if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
+	    (prcd[76] == 0xF5 || prcd[76] == 0xF6)) {
 		card->info.blkt.time_total = 250;
 		card->info.blkt.inter_packet = 5;
 		card->info.blkt.inter_packet_jumbo = 15;
@@ -3338,7 +3337,7 @@
 	if (rc) {
 		queue->card->stats.tx_errors += count;
 		/* ignore temporary SIGA errors without busy condition */
-		if (rc == QDIO_ERROR_SIGA_TARGET)
+		if (rc == -ENOBUFS)
 			return;
 		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
 		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
@@ -3532,7 +3531,7 @@
 	int i;
 
 	QETH_CARD_TEXT(card, 6, "qdouhdl");
-	if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
+	if (qdio_error & QDIO_ERROR_FATAL) {
 		QETH_CARD_TEXT(card, 2, "achkcond");
 		netif_stop_queue(card->dev);
 		qeth_schedule_recovery(card);
@@ -4540,7 +4539,8 @@
 		goto out_offline;
 	}
 	qeth_configure_unitaddr(card, prcd);
-	qeth_configure_blkt_default(card, prcd);
+	if (ddev_offline)
+		qeth_configure_blkt_default(card, prcd);
 	kfree(prcd);
 
 	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
@@ -4627,7 +4627,7 @@
 		goto out_free_in_sbals;
 	}
 	for (i = 0; i < card->qdio.no_in_queues; ++i)
-		queue_start_poll[i] = card->discipline.start_poll;
+		queue_start_poll[i] = card->discipline->start_poll;
 
 	qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
 
@@ -4651,8 +4651,8 @@
 	init_data.qib_param_field        = qib_param_field;
 	init_data.no_input_qs            = card->qdio.no_in_queues;
 	init_data.no_output_qs           = card->qdio.no_out_queues;
-	init_data.input_handler          = card->discipline.input_handler;
-	init_data.output_handler         = card->discipline.output_handler;
+	init_data.input_handler 	 = card->discipline->input_handler;
+	init_data.output_handler	 = card->discipline->output_handler;
 	init_data.queue_start_poll_array = queue_start_poll;
 	init_data.int_parm               = (unsigned long) card;
 	init_data.input_sbal_addr_array  = (void **) in_sbal_ptrs;
@@ -4737,13 +4737,6 @@
 	.remove = ccwgroup_remove_ccwdev,
 };
 
-static int qeth_core_driver_group(const char *buf, struct device *root_dev,
-				unsigned long driver_id)
-{
-	return ccwgroup_create_from_string(root_dev, driver_id,
-					   &qeth_ccw_driver, 3, buf);
-}
-
 int qeth_core_hardsetup_card(struct qeth_card *card)
 {
 	int retries = 0;
@@ -4909,11 +4902,7 @@
 		break;
 	case QETH_HEADER_TYPE_LAYER3:
 		skb_len = (*hdr)->hdr.l3.length;
-		if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
-		    (card->info.link_type == QETH_LINK_TYPE_HSTR))
-			headroom = TR_HLEN;
-		else
-			headroom = ETH_HLEN;
+		headroom = ETH_HLEN;
 		break;
 	case QETH_HEADER_TYPE_OSN:
 		skb_len = (*hdr)->hdr.osn.pdu_length;
@@ -5044,17 +5033,15 @@
 	mutex_lock(&qeth_mod_mutex);
 	switch (discipline) {
 	case QETH_DISCIPLINE_LAYER3:
-		card->discipline.ccwgdriver = try_then_request_module(
-			symbol_get(qeth_l3_ccwgroup_driver),
-			"qeth_l3");
+		card->discipline = try_then_request_module(
+			symbol_get(qeth_l3_discipline), "qeth_l3");
 		break;
 	case QETH_DISCIPLINE_LAYER2:
-		card->discipline.ccwgdriver = try_then_request_module(
-			symbol_get(qeth_l2_ccwgroup_driver),
-			"qeth_l2");
+		card->discipline = try_then_request_module(
+			symbol_get(qeth_l2_discipline), "qeth_l2");
 		break;
 	}
-	if (!card->discipline.ccwgdriver) {
+	if (!card->discipline) {
 		dev_err(&card->gdev->dev, "There is no kernel module to "
 			"support discipline %d\n", discipline);
 		rc = -EINVAL;
@@ -5066,12 +5053,21 @@
 void qeth_core_free_discipline(struct qeth_card *card)
 {
 	if (card->options.layer2)
-		symbol_put(qeth_l2_ccwgroup_driver);
+		symbol_put(qeth_l2_discipline);
 	else
-		symbol_put(qeth_l3_ccwgroup_driver);
-	card->discipline.ccwgdriver = NULL;
+		symbol_put(qeth_l3_discipline);
+	card->discipline = NULL;
 }
 
+static const struct device_type qeth_generic_devtype = {
+	.name = "qeth_generic",
+	.groups = qeth_generic_attr_groups,
+};
+static const struct device_type qeth_osn_devtype = {
+	.name = "qeth_osn",
+	.groups = qeth_osn_attr_groups,
+};
+
 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card;
@@ -5126,18 +5122,17 @@
 	}
 
 	if (card->info.type == QETH_CARD_TYPE_OSN)
-		rc = qeth_core_create_osn_attributes(dev);
+		gdev->dev.type = &qeth_osn_devtype;
 	else
-		rc = qeth_core_create_device_attributes(dev);
-	if (rc)
-		goto err_dbf;
+		gdev->dev.type = &qeth_generic_devtype;
+
 	switch (card->info.type) {
 	case QETH_CARD_TYPE_OSN:
 	case QETH_CARD_TYPE_OSM:
 		rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
 		if (rc)
-			goto err_attr;
-		rc = card->discipline.ccwgdriver->probe(card->gdev);
+			goto err_dbf;
+		rc = card->discipline->setup(card->gdev);
 		if (rc)
 			goto err_disc;
 	case QETH_CARD_TYPE_OSD:
@@ -5155,11 +5150,6 @@
 
 err_disc:
 	qeth_core_free_discipline(card);
-err_attr:
-	if (card->info.type == QETH_CARD_TYPE_OSN)
-		qeth_core_remove_osn_attributes(dev);
-	else
-		qeth_core_remove_device_attributes(dev);
 err_dbf:
 	debug_unregister(card->debug);
 err_card:
@@ -5176,14 +5166,8 @@
 
 	QETH_DBF_TEXT(SETUP, 2, "removedv");
 
-	if (card->info.type == QETH_CARD_TYPE_OSN) {
-		qeth_core_remove_osn_attributes(&gdev->dev);
-	} else {
-		qeth_core_remove_device_attributes(&gdev->dev);
-	}
-
-	if (card->discipline.ccwgdriver) {
-		card->discipline.ccwgdriver->remove(gdev);
+	if (card->discipline) {
+		card->discipline->remove(gdev);
 		qeth_core_free_discipline(card);
 	}
 
@@ -5203,7 +5187,7 @@
 	int rc = 0;
 	int def_discipline;
 
-	if (!card->discipline.ccwgdriver) {
+	if (!card->discipline) {
 		if (card->info.type == QETH_CARD_TYPE_IQD)
 			def_discipline = QETH_DISCIPLINE_LAYER3;
 		else
@@ -5211,11 +5195,11 @@
 		rc = qeth_core_load_discipline(card, def_discipline);
 		if (rc)
 			goto err;
-		rc = card->discipline.ccwgdriver->probe(card->gdev);
+		rc = card->discipline->setup(card->gdev);
 		if (rc)
 			goto err;
 	}
-	rc = card->discipline.ccwgdriver->set_online(gdev);
+	rc = card->discipline->set_online(gdev);
 err:
 	return rc;
 }
@@ -5223,58 +5207,52 @@
 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	return card->discipline.ccwgdriver->set_offline(gdev);
+	return card->discipline->set_offline(gdev);
 }
 
 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	if (card->discipline.ccwgdriver &&
-	    card->discipline.ccwgdriver->shutdown)
-		card->discipline.ccwgdriver->shutdown(gdev);
+	if (card->discipline && card->discipline->shutdown)
+		card->discipline->shutdown(gdev);
 }
 
 static int qeth_core_prepare(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	if (card->discipline.ccwgdriver &&
-	    card->discipline.ccwgdriver->prepare)
-		return card->discipline.ccwgdriver->prepare(gdev);
+	if (card->discipline && card->discipline->prepare)
+		return card->discipline->prepare(gdev);
 	return 0;
 }
 
 static void qeth_core_complete(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	if (card->discipline.ccwgdriver &&
-	    card->discipline.ccwgdriver->complete)
-		card->discipline.ccwgdriver->complete(gdev);
+	if (card->discipline && card->discipline->complete)
+		card->discipline->complete(gdev);
 }
 
 static int qeth_core_freeze(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	if (card->discipline.ccwgdriver &&
-	    card->discipline.ccwgdriver->freeze)
-		return card->discipline.ccwgdriver->freeze(gdev);
+	if (card->discipline && card->discipline->freeze)
+		return card->discipline->freeze(gdev);
 	return 0;
 }
 
 static int qeth_core_thaw(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	if (card->discipline.ccwgdriver &&
-	    card->discipline.ccwgdriver->thaw)
-		return card->discipline.ccwgdriver->thaw(gdev);
+	if (card->discipline && card->discipline->thaw)
+		return card->discipline->thaw(gdev);
 	return 0;
 }
 
 static int qeth_core_restore(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
-	if (card->discipline.ccwgdriver &&
-	    card->discipline.ccwgdriver->restore)
-		return card->discipline.ccwgdriver->restore(gdev);
+	if (card->discipline && card->discipline->restore)
+		return card->discipline->restore(gdev);
 	return 0;
 }
 
@@ -5283,8 +5261,7 @@
 		.owner = THIS_MODULE,
 		.name = "qeth",
 	},
-	.driver_id = 0xD8C5E3C8,
-	.probe = qeth_core_probe_device,
+	.setup = qeth_core_probe_device,
 	.remove = qeth_core_remove_device,
 	.set_online = qeth_core_set_online,
 	.set_offline = qeth_core_set_offline,
@@ -5296,21 +5273,30 @@
 	.restore = qeth_core_restore,
 };
 
-static ssize_t
-qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
-			   size_t count)
+static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv,
+					    const char *buf, size_t count)
 {
 	int err;
-	err = qeth_core_driver_group(buf, qeth_core_root_dev,
-					qeth_core_ccwgroup_driver.driver_id);
-	if (err)
-		return err;
-	else
-		return count;
-}
 
+	err = ccwgroup_create_dev(qeth_core_root_dev,
+				  &qeth_core_ccwgroup_driver, 3, buf);
+
+	return err ? err : count;
+}
 static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
 
+static struct attribute *qeth_drv_attrs[] = {
+	&driver_attr_group.attr,
+	NULL,
+};
+static struct attribute_group qeth_drv_attr_group = {
+	.attrs = qeth_drv_attrs,
+};
+static const struct attribute_group *qeth_drv_attr_groups[] = {
+	&qeth_drv_attr_group,
+	NULL,
+};
+
 static struct {
 	const char str[ETH_GSTRING_LEN];
 } qeth_ethtool_stats_keys[] = {
@@ -5548,49 +5534,41 @@
 	rc = qeth_register_dbf_views();
 	if (rc)
 		goto out_err;
-	rc = ccw_driver_register(&qeth_ccw_driver);
-	if (rc)
-		goto ccw_err;
-	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
-	if (rc)
-		goto ccwgroup_err;
-	rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
-				&driver_attr_group);
-	if (rc)
-		goto driver_err;
 	qeth_core_root_dev = root_device_register("qeth");
 	rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
 	if (rc)
 		goto register_err;
-
 	qeth_core_header_cache = kmem_cache_create("qeth_hdr",
 			sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
 	if (!qeth_core_header_cache) {
 		rc = -ENOMEM;
 		goto slab_err;
 	}
-
 	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
 			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
 	if (!qeth_qdio_outbuf_cache) {
 		rc = -ENOMEM;
 		goto cqslab_err;
 	}
+	rc = ccw_driver_register(&qeth_ccw_driver);
+	if (rc)
+		goto ccw_err;
+	qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups;
+	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
+	if (rc)
+		goto ccwgroup_err;
 
 	return 0;
+
+ccwgroup_err:
+	ccw_driver_unregister(&qeth_ccw_driver);
+ccw_err:
+	kmem_cache_destroy(qeth_qdio_outbuf_cache);
 cqslab_err:
 	kmem_cache_destroy(qeth_core_header_cache);
 slab_err:
 	root_device_unregister(qeth_core_root_dev);
 register_err:
-	driver_remove_file(&qeth_core_ccwgroup_driver.driver,
-			   &driver_attr_group);
-driver_err:
-	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
-ccwgroup_err:
-	ccw_driver_unregister(&qeth_ccw_driver);
-ccw_err:
-	QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc);
 	qeth_unregister_dbf_views();
 out_err:
 	pr_err("Initializing the qeth device driver failed\n");
@@ -5599,13 +5577,11 @@
 
 static void __exit qeth_core_exit(void)
 {
-	root_device_unregister(qeth_core_root_dev);
-	driver_remove_file(&qeth_core_ccwgroup_driver.driver,
-			   &driver_attr_group);
 	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
 	ccw_driver_unregister(&qeth_ccw_driver);
 	kmem_cache_destroy(qeth_qdio_outbuf_cache);
 	kmem_cache_destroy(qeth_core_header_cache);
+	root_device_unregister(qeth_core_root_dev);
 	qeth_unregister_dbf_views();
 	pr_info("core functions removed\n");
 }
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index ff41e42..a11b30c 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -70,16 +70,6 @@
 	QETH_LINK_TYPE_ATM_NATIVE   = 0x90,
 };
 
-enum qeth_tr_macaddr_modes {
-	QETH_TR_MACADDR_NONCANONICAL = 0,
-	QETH_TR_MACADDR_CANONICAL    = 1,
-};
-
-enum qeth_tr_broadcast_modes {
-	QETH_TR_BROADCAST_ALLRINGS = 0,
-	QETH_TR_BROADCAST_LOCAL    = 1,
-};
-
 /*
  * Routing stuff
  */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 0a8e86c..f163af5 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -434,8 +434,8 @@
 		goto out;
 	else {
 		card->info.mac_bits  = 0;
-		if (card->discipline.ccwgdriver) {
-			card->discipline.ccwgdriver->remove(card->gdev);
+		if (card->discipline) {
+			card->discipline->remove(card->gdev);
 			qeth_core_free_discipline(card);
 		}
 	}
@@ -444,7 +444,7 @@
 	if (rc)
 		goto out;
 
-	rc = card->discipline.ccwgdriver->probe(card->gdev);
+	rc = card->discipline->setup(card->gdev);
 out:
 	mutex_unlock(&card->discipline_mutex);
 	return rc ? rc : count;
@@ -693,7 +693,6 @@
 	&dev_attr_inter_jumbo.attr,
 	NULL,
 };
-
 static struct attribute_group qeth_device_blkt_group = {
 	.name = "blkt",
 	.attrs = qeth_blkt_device_attrs,
@@ -716,11 +715,16 @@
 	&dev_attr_hw_trap.attr,
 	NULL,
 };
-
 static struct attribute_group qeth_device_attr_group = {
 	.attrs = qeth_device_attrs,
 };
 
+const struct attribute_group *qeth_generic_attr_groups[] = {
+	&qeth_device_attr_group,
+	&qeth_device_blkt_group,
+	NULL,
+};
+
 static struct attribute *qeth_osn_device_attrs[] = {
 	&dev_attr_state.attr,
 	&dev_attr_chpid.attr,
@@ -730,37 +734,10 @@
 	&dev_attr_recover.attr,
 	NULL,
 };
-
 static struct attribute_group qeth_osn_device_attr_group = {
 	.attrs = qeth_osn_device_attrs,
 };
-
-int qeth_core_create_device_attributes(struct device *dev)
-{
-	int ret;
-	ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group);
-	if (ret)
-		return ret;
-	ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group);
-	if (ret)
-		sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
-
-	return 0;
-}
-
-void qeth_core_remove_device_attributes(struct device *dev)
-{
-	sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
-	sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
-}
-
-int qeth_core_create_osn_attributes(struct device *dev)
-{
-	return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group);
-}
-
-void qeth_core_remove_osn_attributes(struct device *dev)
-{
-	sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
-	return;
-}
+const struct attribute_group *qeth_osn_attr_groups[] = {
+	&qeth_osn_device_attr_group,
+	NULL,
+};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0e7c29d..4269865 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -882,12 +882,6 @@
 	INIT_LIST_HEAD(&card->mc_list);
 	card->options.layer2 = 1;
 	card->info.hwtrap = 0;
-	card->discipline.start_poll = qeth_qdio_start_poll;
-	card->discipline.input_handler = (qdio_handler_t *)
-		qeth_qdio_input_handler;
-	card->discipline.output_handler = (qdio_handler_t *)
-		qeth_qdio_output_handler;
-	card->discipline.recover = qeth_l2_recover;
 	return 0;
 }
 
@@ -1227,8 +1221,12 @@
 	return rc;
 }
 
-struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
-	.probe = qeth_l2_probe_device,
+struct qeth_discipline qeth_l2_discipline = {
+	.start_poll = qeth_qdio_start_poll,
+	.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
+	.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
+	.recover = qeth_l2_recover,
+	.setup = qeth_l2_probe_device,
 	.remove = qeth_l2_remove_device,
 	.set_online = qeth_l2_set_online,
 	.set_offline = qeth_l2_set_offline,
@@ -1237,7 +1235,7 @@
 	.thaw = qeth_l2_pm_resume,
 	.restore = qeth_l2_pm_resume,
 };
-EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
+EXPORT_SYMBOL_GPL(qeth_l2_discipline);
 
 static int qeth_osn_send_control_data(struct qeth_card *card, int len,
 			   struct qeth_cmd_buffer *iob)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index f859216..7be5e97 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -976,57 +976,6 @@
 	return ct | QETH_CAST_UNICAST;
 }
 
-static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
-					__u32 mode)
-{
-	int rc;
-	struct qeth_cmd_buffer *iob;
-	struct qeth_ipa_cmd *cmd;
-
-	QETH_CARD_TEXT(card, 4, "adpmode");
-
-	iob = qeth_get_adapter_cmd(card, command,
-				   sizeof(struct qeth_ipacmd_setadpparms));
-	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-	cmd->data.setadapterparms.data.mode = mode;
-	rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
-			       NULL);
-	return rc;
-}
-
-static int qeth_l3_setadapter_hstr(struct qeth_card *card)
-{
-	int rc;
-
-	QETH_CARD_TEXT(card, 4, "adphstr");
-
-	if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
-		rc = qeth_l3_send_setadp_mode(card,
-					IPA_SETADP_SET_BROADCAST_MODE,
-					card->options.broadcast_mode);
-		if (rc)
-			QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on "
-				   "device %s: x%x\n",
-				   CARD_BUS_ID(card), rc);
-		rc = qeth_l3_send_setadp_mode(card,
-					IPA_SETADP_ALTER_MAC_ADDRESS,
-					card->options.macaddr_mode);
-		if (rc)
-			QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on "
-				   "device %s: x%x\n", CARD_BUS_ID(card), rc);
-		return rc;
-	}
-	if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
-		QETH_DBF_MESSAGE(2, "set adapter parameters not available "
-			   "to set broadcast mode, using ALLRINGS "
-			   "on device %s:\n", CARD_BUS_ID(card));
-	if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
-		QETH_DBF_MESSAGE(2, "set adapter parameters not available "
-			   "to set macaddr mode, using NONCANONICAL "
-			   "on device %s:\n", CARD_BUS_ID(card));
-	return 0;
-}
-
 static int qeth_l3_setadapter_parms(struct qeth_card *card)
 {
 	int rc;
@@ -1052,10 +1001,6 @@
 				" address failed\n");
 	}
 
-	if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
-	    (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
-		rc = qeth_l3_setadapter_hstr(card);
-
 	return rc;
 }
 
@@ -1671,10 +1616,7 @@
 static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
 				struct net_device *dev)
 {
-	if (dev->type == ARPHRD_IEEE802_TR)
-		ip_tr_mc_map(ipm, mac);
-	else
-		ip_eth_mc_map(ipm, mac);
+	ip_eth_mc_map(ipm, mac);
 }
 
 static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
@@ -1922,8 +1864,6 @@
 #endif
 			case __constant_htons(ETH_P_IP):
 				ip_hdr = (struct iphdr *)skb->data;
-				(card->dev->type == ARPHRD_IEEE802_TR) ?
-				ip_tr_mc_map(ip_hdr->daddr, tg_addr):
 				ip_eth_mc_map(ip_hdr->daddr, tg_addr);
 				break;
 			default:
@@ -1959,12 +1899,7 @@
 				tg_addr, "FAKELL", card->dev->addr_len);
 	}
 
-#ifdef CONFIG_TR
-	if (card->dev->type == ARPHRD_IEEE802_TR)
-		skb->protocol = tr_type_trans(skb, card->dev);
-	else
-#endif
-		skb->protocol = eth_type_trans(skb, card->dev);
+	skb->protocol = eth_type_trans(skb, card->dev);
 
 	if (hdr->hdr.l3.ext_flags &
 	    (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
@@ -2138,7 +2073,7 @@
 		struct net_device *netdev;
 
 		rcu_read_lock();
-		netdev = __vlan_find_dev_deep(dev, vid);
+		netdev = __vlan_find_dev_deep(card->dev, vid);
 		rcu_read_unlock();
 		if (netdev == dev) {
 			rc = QETH_VLAN_CARD;
@@ -2883,13 +2818,7 @@
 			hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
 		memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
 	} else {
-		/* passthrough */
-		if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
-			!memcmp(skb->data + sizeof(struct qeth_hdr) +
-			sizeof(__u16), skb->dev->broadcast, 6)) {
-			hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
-						QETH_HDR_PASSTHRU;
-		} else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
+		if (!memcmp(skb->data + sizeof(struct qeth_hdr),
 			    skb->dev->broadcast, 6)) {
 			/* broadcast? */
 			hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
@@ -3031,10 +2960,7 @@
 			skb_pull(new_skb, ETH_HLEN);
 	} else {
 		if (ipv == 4) {
-			if (card->dev->type == ARPHRD_IEEE802_TR)
-				skb_pull(new_skb, TR_HLEN);
-			else
-				skb_pull(new_skb, ETH_HLEN);
+			skb_pull(new_skb, ETH_HLEN);
 		}
 
 		if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
@@ -3318,12 +3244,8 @@
 	    card->info.type == QETH_CARD_TYPE_OSX) {
 		if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
 		    (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
-#ifdef CONFIG_TR
-			card->dev = alloc_trdev(0);
-#endif
-			if (!card->dev)
-				return -ENODEV;
-			card->dev->netdev_ops = &qeth_l3_netdev_ops;
+			pr_info("qeth_l3: ignoring TR device\n");
+			return -ENODEV;
 		} else {
 			card->dev = alloc_etherdev(0);
 			if (!card->dev)
@@ -3376,12 +3298,6 @@
 	qeth_l3_create_device_attributes(&gdev->dev);
 	card->options.layer2 = 0;
 	card->info.hwtrap = 0;
-	card->discipline.start_poll = qeth_qdio_start_poll;
-	card->discipline.input_handler = (qdio_handler_t *)
-		qeth_qdio_input_handler;
-	card->discipline.output_handler = (qdio_handler_t *)
-		qeth_qdio_output_handler;
-	card->discipline.recover = qeth_l3_recover;
 	return 0;
 }
 
@@ -3656,8 +3572,12 @@
 	return rc;
 }
 
-struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
-	.probe = qeth_l3_probe_device,
+struct qeth_discipline qeth_l3_discipline = {
+	.start_poll = qeth_qdio_start_poll,
+	.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
+	.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
+	.recover = qeth_l3_recover,
+	.setup = qeth_l3_probe_device,
 	.remove = qeth_l3_remove_device,
 	.set_online = qeth_l3_set_online,
 	.set_offline = qeth_l3_set_offline,
@@ -3666,7 +3586,7 @@
 	.thaw = qeth_l3_pm_resume,
 	.restore = qeth_l3_pm_resume,
 };
-EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
+EXPORT_SYMBOL_GPL(qeth_l3_discipline);
 
 static int qeth_l3_ip_event(struct notifier_block *this,
 			    unsigned long event, void *ptr)
@@ -3680,9 +3600,9 @@
 		return NOTIFY_DONE;
 
 	card = qeth_l3_get_card_from_dev(dev);
-	QETH_CARD_TEXT(card, 3, "ipevent");
 	if (!card)
 		return NOTIFY_DONE;
+	QETH_CARD_TEXT(card, 3, "ipevent");
 
 	addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
 	if (addr != NULL) {
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index d979bb2..4cafedf 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -175,116 +175,6 @@
 static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
 		   qeth_l3_dev_fake_broadcast_store);
 
-static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
-	      (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
-		return sprintf(buf, "n/a\n");
-
-	return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
-				     QETH_TR_BROADCAST_ALLRINGS)?
-		       "all rings":"local");
-}
-
-static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct qeth_card *card = dev_get_drvdata(dev);
-	char *tmp;
-	int rc = 0;
-
-	if (!card)
-		return -EINVAL;
-
-	mutex_lock(&card->conf_mutex);
-	if ((card->state != CARD_STATE_DOWN) &&
-	    (card->state != CARD_STATE_RECOVER)) {
-		rc = -EPERM;
-		goto out;
-	}
-
-	if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
-	      (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
-		rc = -EINVAL;
-		goto out;
-	}
-
-	tmp = strsep((char **) &buf, "\n");
-
-	if (!strcmp(tmp, "local"))
-		card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
-	else if (!strcmp(tmp, "all_rings"))
-		card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
-	else
-		rc = -EINVAL;
-out:
-	mutex_unlock(&card->conf_mutex);
-	return rc ? rc : count;
-}
-
-static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
-		   qeth_l3_dev_broadcast_mode_store);
-
-static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
-	      (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
-		return sprintf(buf, "n/a\n");
-
-	return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
-				     QETH_TR_MACADDR_CANONICAL)? 1:0);
-}
-
-static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct qeth_card *card = dev_get_drvdata(dev);
-	char *tmp;
-	int i, rc = 0;
-
-	if (!card)
-		return -EINVAL;
-
-	mutex_lock(&card->conf_mutex);
-	if ((card->state != CARD_STATE_DOWN) &&
-	    (card->state != CARD_STATE_RECOVER)) {
-		rc = -EPERM;
-		goto out;
-	}
-
-	if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
-	      (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
-		rc = -EINVAL;
-		goto out;
-	}
-
-	i = simple_strtoul(buf, &tmp, 16);
-	if ((i == 0) || (i == 1))
-		card->options.macaddr_mode = i?
-			QETH_TR_MACADDR_CANONICAL :
-			QETH_TR_MACADDR_NONCANONICAL;
-	else
-		rc = -EINVAL;
-out:
-	mutex_unlock(&card->conf_mutex);
-	return rc ? rc : count;
-}
-
-static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
-		   qeth_l3_dev_canonical_macaddr_store);
-
 static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
@@ -458,8 +348,6 @@
 	&dev_attr_route4.attr,
 	&dev_attr_route6.attr,
 	&dev_attr_fake_broadcast.attr,
-	&dev_attr_broadcast_mode.attr,
-	&dev_attr_canonical_macaddr.attr,
 	&dev_attr_sniffer.attr,
 	&dev_attr_hsuid.attr,
 	NULL,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 29684c8..bea04e5 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -408,6 +408,7 @@
 config SCSI_HPSA
 	tristate "HP Smart Array SCSI driver"
 	depends on PCI && SCSI
+	select CHECK_SIGNATURE
 	help
 	  This driver supports HP Smart Array Controllers (circa 2009).
 	  It is a SCSI alternative to the cciss driver, which is a block
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2bee515..7628206 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -424,6 +424,8 @@
 static int aac_src_ioremap(struct aac_dev *dev, u32 size)
 {
 	if (!size) {
+		iounmap(dev->regs.src.bar1);
+		dev->regs.src.bar1 = NULL;
 		iounmap(dev->regs.src.bar0);
 		dev->base = dev->regs.src.bar0 = NULL;
 		return 0;
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 04a154f..df740cb 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -572,7 +572,7 @@
 }
 
 
-int __init atari_scsi_detect(struct scsi_host_template *host)
+static int __init atari_scsi_detect(struct scsi_host_template *host)
 {
 	static int called = 0;
 	struct Scsi_Host *instance;
@@ -724,7 +724,7 @@
 	return 1;
 }
 
-int atari_scsi_release(struct Scsi_Host *sh)
+static int atari_scsi_release(struct Scsi_Host *sh)
 {
 	if (IS_A_TT())
 		free_irq(IRQ_TT_MFP_SCSI, sh);
@@ -734,17 +734,21 @@
 	return 1;
 }
 
-void __init atari_scsi_setup(char *str, int *ints)
+#ifndef MODULE
+static int __init atari_scsi_setup(char *str)
 {
 	/* Format of atascsi parameter is:
 	 *   atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
 	 * Defaults depend on TT or Falcon, hostid determined at run time.
 	 * Negative values mean don't change.
 	 */
+	int ints[6];
+
+	get_options(str, ARRAY_SIZE(ints), ints);
 
 	if (ints[0] < 1) {
 		printk("atari_scsi_setup: no arguments!\n");
-		return;
+		return 0;
 	}
 
 	if (ints[0] >= 1) {
@@ -777,9 +781,14 @@
 			setup_use_tagged_queuing = !!ints[5];
 	}
 #endif
+
+	return 1;
 }
 
-int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
+__setup("atascsi=", atari_scsi_setup);
+#endif /* !MODULE */
+
+static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
 {
 	int rv;
 	struct NCR5380_hostdata *hostdata =
@@ -852,7 +861,7 @@
 #endif
 
 
-const char *atari_scsi_info(struct Scsi_Host *host)
+static const char *atari_scsi_info(struct Scsi_Host *host)
 {
 	/* atari_scsi_detect() is verbose enough... */
 	static const char string[] = "Atari native SCSI";
@@ -862,8 +871,9 @@
 
 #if defined(REAL_DMA)
 
-unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, void *data,
-				   unsigned long count, int dir)
+static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
+					  void *data, unsigned long count,
+					  int dir)
 {
 	unsigned long addr = virt_to_phys(data);
 
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h
index efadb8d..bd52df7 100644
--- a/drivers/scsi/atari_scsi.h
+++ b/drivers/scsi/atari_scsi.h
@@ -18,11 +18,6 @@
 /* (I_HAVE_OVERRUNS stuff removed) */
 
 #ifndef ASM
-int atari_scsi_detect (struct scsi_host_template *);
-const char *atari_scsi_info (struct Scsi_Host *);
-int atari_scsi_reset (Scsi_Cmnd *, unsigned int);
-int atari_scsi_release (struct Scsi_Host *);
-
 /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher
  * values should work, too; try it! (but cmd_per_lun costs memory!) */
 
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 1d7b976..a50b6a9 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -132,10 +132,6 @@
 		((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) +	\
 			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
 
-/* Byte offset into the page corresponding to given address */
-#define OFFSET_IN_PAGE(addr)						\
-		((size_t)(addr) & (PAGE_SIZE_4K-1))
-
 /* Returns bit offset within a DWORD of a bitfield */
 #define AMAP_BIT_OFFSET(_struct, field)					\
 		(((size_t)&(((_struct *)0)->field))%32)
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index cdb1536..d2e9e93 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -15,6 +15,8 @@
  * Costa Mesa, CA 92626
  */
 
+#include <scsi/iscsi_proto.h>
+
 #include "be.h"
 #include "be_mgmt.h"
 #include "be_main.h"
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 8b40a5b4..b0b36c6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -23,7 +23,7 @@
  * firmware in the BE. These requests are communicated to the processor
  * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one
  * WRB inside a MAILBOX.
- * The commands are serviced by the ARM processor in the BladeEngine's MPU.
+ * The commands are serviced by the ARM processor in the OneConnect's MPU.
  */
 struct be_sge {
 	u32 pa_lo;
@@ -163,7 +163,8 @@
 #define OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES        3
 #define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG		7
 #define OPCODE_COMMON_ISCSI_NTWK_SET_VLAN		14
-#define OPCODE_COMMON_ISCSI_NTWK_CONFIGURE_STATELESS_IP_ADDR	17
+#define OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR	17
+#define OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR	18
 #define OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR		21
 #define OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY	22
 #define OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY 23
@@ -274,15 +275,15 @@
 	struct	mgmt_auth_method_format auth_data;
 } __packed;
 
-struct ip_address_format {
+struct ip_addr_format {
 	u16 size_of_structure;
 	u8 reserved;
 	u8 ip_type;
-	u8 ip_address[16];
+	u8 addr[16];
 	u32 rsvd0;
 } __packed;
 
-struct	mgmt_conn_info {
+struct mgmt_conn_info {
 	u32	connection_handle;
 	u32	connection_status;
 	u16	src_port;
@@ -290,9 +291,9 @@
 	u16	dest_port_redirected;
 	u16	cid;
 	u32	estimated_throughput;
-	struct	ip_address_format	src_ipaddr;
-	struct	ip_address_format	dest_ipaddr;
-	struct	ip_address_format	dest_ipaddr_redirected;
+	struct	ip_addr_format	src_ipaddr;
+	struct	ip_addr_format	dest_ipaddr;
+	struct	ip_addr_format	dest_ipaddr_redirected;
 	struct	mgmt_conn_login_options	negotiated_login_options;
 } __packed;
 
@@ -322,43 +323,115 @@
 	struct	mgmt_conn_info	conn_list[1];
 } __packed;
 
-struct  be_cmd_req_get_session {
+struct be_cmd_get_session_req {
 	struct be_cmd_req_hdr hdr;
 	u32 session_handle;
 } __packed;
 
-struct  be_cmd_resp_get_session {
+struct be_cmd_get_session_resp {
 	struct be_cmd_resp_hdr hdr;
 	struct mgmt_session_info session_info;
 } __packed;
 
 struct mac_addr {
-	u16 size_of_struct;
+	u16 size_of_structure;
 	u8 addr[ETH_ALEN];
 } __packed;
 
-struct be_cmd_req_get_boot_target {
+struct be_cmd_get_boot_target_req {
 	struct be_cmd_req_hdr hdr;
 } __packed;
 
-struct be_cmd_resp_get_boot_target {
+struct be_cmd_get_boot_target_resp {
 	struct be_cmd_resp_hdr hdr;
 	u32  boot_session_count;
 	int  boot_session_handle;
 };
 
-struct be_cmd_req_mac_query {
+struct be_cmd_mac_query_req {
 	struct be_cmd_req_hdr hdr;
 	u8 type;
 	u8 permanent;
 	u16 if_id;
 } __packed;
 
-struct be_cmd_resp_mac_query {
+struct be_cmd_get_mac_resp {
 	struct be_cmd_resp_hdr hdr;
 	struct mac_addr mac;
 };
 
+struct be_ip_addr_subnet_format {
+	u16 size_of_structure;
+	u8 ip_type;
+	u8 ipv6_prefix_length;
+	u8 addr[16];
+	u8 subnet_mask[16];
+	u32 rsvd0;
+} __packed;
+
+struct be_cmd_get_if_info_req {
+	struct be_cmd_req_hdr hdr;
+	u32 interface_hndl;
+	u32 ip_type;
+} __packed;
+
+struct be_cmd_get_if_info_resp {
+	struct be_cmd_req_hdr hdr;
+	u32 interface_hndl;
+	u32 vlan_priority;
+	u32 ip_addr_count;
+	u32 dhcp_state;
+	struct be_ip_addr_subnet_format ip_addr;
+} __packed;
+
+struct be_ip_addr_record {
+	u32 action;
+	u32 interface_hndl;
+	struct be_ip_addr_subnet_format ip_addr;
+	u32 status;
+} __packed;
+
+struct be_ip_addr_record_params {
+	u32 record_entry_count;
+	struct be_ip_addr_record ip_record;
+} __packed;
+
+struct be_cmd_set_ip_addr_req {
+	struct be_cmd_req_hdr hdr;
+	struct be_ip_addr_record_params ip_params;
+} __packed;
+
+
+struct be_cmd_set_dhcp_req {
+	struct be_cmd_req_hdr hdr;
+	u32 interface_hndl;
+	u32 ip_type;
+	u32 flags;
+	u32 retry_count;
+} __packed;
+
+struct be_cmd_rel_dhcp_req {
+	struct be_cmd_req_hdr hdr;
+	u32 interface_hndl;
+	u32 ip_type;
+} __packed;
+
+struct be_cmd_set_def_gateway_req {
+	struct be_cmd_req_hdr hdr;
+	u32 action;
+	struct ip_addr_format ip_addr;
+} __packed;
+
+struct be_cmd_get_def_gateway_req {
+	struct be_cmd_req_hdr hdr;
+	u32 ip_type;
+} __packed;
+
+struct be_cmd_get_def_gateway_resp {
+	struct be_cmd_req_hdr hdr;
+	struct ip_addr_format ip_addr;
+} __packed;
+
 /******************** Create CQ ***************************/
 /**
  * Pseudo amap definition in which each bit of the actual structure is defined
@@ -489,7 +562,7 @@
 
 #define ETH_ALEN	6
 
-struct be_cmd_req_get_mac_addr {
+struct be_cmd_get_nic_conf_req {
 	struct be_cmd_req_hdr hdr;
 	u32 nic_port_count;
 	u32 speed;
@@ -501,7 +574,7 @@
 	u32 rsvd[23];
 };
 
-struct be_cmd_resp_get_mac_addr {
+struct be_cmd_get_nic_conf_resp {
 	struct be_cmd_resp_hdr hdr;
 	u32 nic_port_count;
 	u32 speed;
@@ -513,6 +586,39 @@
 	u32 rsvd[23];
 };
 
+#define BEISCSI_ALIAS_LEN 32
+
+struct be_cmd_hba_name {
+	struct be_cmd_req_hdr hdr;
+	u16 flags;
+	u16 rsvd0;
+	u8 initiator_name[ISCSI_NAME_LEN];
+	u8 initiator_alias[BEISCSI_ALIAS_LEN];
+} __packed;
+
+struct be_cmd_ntwk_link_status_req {
+	struct be_cmd_req_hdr hdr;
+	u32 rsvd0;
+} __packed;
+
+/*** Port Speed Values ***/
+#define BE2ISCSI_LINK_SPEED_ZERO	0x00
+#define BE2ISCSI_LINK_SPEED_10MBPS	0x01
+#define BE2ISCSI_LINK_SPEED_100MBPS	0x02
+#define BE2ISCSI_LINK_SPEED_1GBPS	0x03
+#define BE2ISCSI_LINK_SPEED_10GBPS	0x04
+struct be_cmd_ntwk_link_status_resp {
+	struct be_cmd_resp_hdr hdr;
+	u8 phys_port;
+	u8 mac_duplex;
+	u8 mac_speed;
+	u8 mac_fault;
+	u8 mgmt_mac_duplex;
+	u8 mgmt_mac_speed;
+	u16 qos_link_speed;
+	u32 logical_link_speed;
+} __packed;
+
 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 			  struct be_queue_info *eq, int eq_delay);
 
@@ -530,11 +636,8 @@
 int be_poll_mcc(struct be_ctrl_info *ctrl);
 int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
 				      struct beiscsi_hba *phba);
-unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
-unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba);
-unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
-				  u32 boot_session_handle,
-				  struct be_dma_mem *nonemb_cmd);
+unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
+unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
 
 void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
 /*ISCSI Functuions */
@@ -715,7 +818,7 @@
 
 struct tcp_connect_and_offload_in {
 	struct be_cmd_req_hdr hdr;
-	struct ip_address_format ip_address;
+	struct ip_addr_format ip_address;
 	u16 tcp_port;
 	u16 cid;
 	u16 cq_id;
@@ -792,13 +895,14 @@
 	u32 function_caps;
 } __packed;
 
-struct be_all_if_id {
+struct be_cmd_get_all_if_id_req {
 	struct be_cmd_req_hdr hdr;
 	u32 if_count;
 	u32 if_hndl_list[1];
 } __packed;
 
 #define ISCSI_OPCODE_SCSI_DATA_OUT		5
+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
 #define OPCODE_COMMON_MODIFY_EQ_DELAY		41
 #define OPCODE_COMMON_ISCSI_CLEANUP		59
 #define	OPCODE_COMMON_TCP_UPLOAD		56
@@ -810,6 +914,8 @@
 #define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
 #define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
 #define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET	52
+#define OPCODE_COMMON_WRITE_FLASH		96
+#define OPCODE_COMMON_READ_FLASH		97
 
 /* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
 #define CMD_ISCSI_COMMAND_INVALIDATE		1
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 33c8f09..43f3503 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -23,6 +23,8 @@
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
+#include <scsi/scsi_netlink.h>
+#include <net/netlink.h>
 #include <scsi/scsi.h>
 
 #include "be_iscsi.h"
@@ -207,6 +209,301 @@
 	return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
 }
 
+static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
+{
+	if (phba->ipv4_iface)
+		return 0;
+
+	phba->ipv4_iface = iscsi_create_iface(phba->shost,
+					      &beiscsi_iscsi_transport,
+					      ISCSI_IFACE_TYPE_IPV4,
+					      0, 0);
+	if (!phba->ipv4_iface) {
+		shost_printk(KERN_ERR, phba->shost, "Could not "
+			     "create default IPv4 address.\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
+{
+	if (phba->ipv6_iface)
+		return 0;
+
+	phba->ipv6_iface = iscsi_create_iface(phba->shost,
+					      &beiscsi_iscsi_transport,
+					      ISCSI_IFACE_TYPE_IPV6,
+					      0, 0);
+	if (!phba->ipv6_iface) {
+		shost_printk(KERN_ERR, phba->shost, "Could not "
+			     "create default IPv6 address.\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
+{
+	struct be_cmd_get_if_info_resp if_info;
+
+	if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info))
+		beiscsi_create_ipv4_iface(phba);
+
+	if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info))
+		beiscsi_create_ipv6_iface(phba);
+}
+
+void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
+{
+	if (phba->ipv6_iface)
+		iscsi_destroy_iface(phba->ipv6_iface);
+	if (phba->ipv4_iface)
+		iscsi_destroy_iface(phba->ipv4_iface);
+}
+
+static int
+beiscsi_set_static_ip(struct Scsi_Host *shost,
+		struct iscsi_iface_param_info *iface_param,
+		void *data, uint32_t dt_len)
+{
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	struct iscsi_iface_param_info *iface_ip = NULL;
+	struct iscsi_iface_param_info *iface_subnet = NULL;
+	struct nlattr *nla;
+	int ret;
+
+
+	switch (iface_param->param) {
+	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+		if (nla)
+			iface_ip = nla_data(nla);
+
+		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+		if (nla)
+			iface_subnet = nla_data(nla);
+		break;
+	case ISCSI_NET_PARAM_IPV4_ADDR:
+		iface_ip = iface_param;
+		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+		if (nla)
+			iface_subnet = nla_data(nla);
+		break;
+	case ISCSI_NET_PARAM_IPV4_SUBNET:
+		iface_subnet = iface_param;
+		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+		if (nla)
+			iface_ip = nla_data(nla);
+		break;
+	default:
+		shost_printk(KERN_ERR, shost, "Unsupported param %d\n",
+			     iface_param->param);
+	}
+
+	if (!iface_ip || !iface_subnet) {
+		shost_printk(KERN_ERR, shost, "IP and Subnet Mask required\n");
+		return -EINVAL;
+	}
+
+	ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
+			ISCSI_BOOTPROTO_STATIC);
+
+	return ret;
+}
+
+static int
+beiscsi_set_ipv4(struct Scsi_Host *shost,
+		struct iscsi_iface_param_info *iface_param,
+		void *data, uint32_t dt_len)
+{
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	int ret = 0;
+
+	/* Check the param */
+	switch (iface_param->param) {
+	case ISCSI_NET_PARAM_IPV4_GW:
+		ret = mgmt_set_gateway(phba, iface_param);
+		break;
+	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
+			ret = mgmt_set_ip(phba, iface_param,
+					NULL, ISCSI_BOOTPROTO_DHCP);
+		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
+			ret = beiscsi_set_static_ip(shost, iface_param,
+						    data, dt_len);
+		else
+			shost_printk(KERN_ERR, shost, "Invalid BOOTPROTO: %d\n",
+					iface_param->value[0]);
+		break;
+	case ISCSI_NET_PARAM_IFACE_ENABLE:
+		if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
+			ret = beiscsi_create_ipv4_iface(phba);
+		else
+			iscsi_destroy_iface(phba->ipv4_iface);
+		break;
+	case ISCSI_NET_PARAM_IPV4_SUBNET:
+	case ISCSI_NET_PARAM_IPV4_ADDR:
+		ret = beiscsi_set_static_ip(shost, iface_param,
+					    data, dt_len);
+		break;
+	default:
+		shost_printk(KERN_ERR, shost, "Param %d not supported\n",
+			     iface_param->param);
+	}
+
+	return ret;
+}
+
+static int
+beiscsi_set_ipv6(struct Scsi_Host *shost,
+		struct iscsi_iface_param_info *iface_param,
+		void *data, uint32_t dt_len)
+{
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	int ret = 0;
+
+	switch (iface_param->param) {
+	case ISCSI_NET_PARAM_IFACE_ENABLE:
+		if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
+			ret = beiscsi_create_ipv6_iface(phba);
+		else {
+			iscsi_destroy_iface(phba->ipv6_iface);
+			ret = 0;
+		}
+		break;
+	case ISCSI_NET_PARAM_IPV6_ADDR:
+		ret = mgmt_set_ip(phba, iface_param, NULL,
+				  ISCSI_BOOTPROTO_STATIC);
+		break;
+	default:
+		shost_printk(KERN_ERR, shost, "Param %d not supported\n",
+			     iface_param->param);
+	}
+
+	return ret;
+}
+
+int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+		void *data, uint32_t dt_len)
+{
+	struct iscsi_iface_param_info *iface_param = NULL;
+	struct nlattr *attrib;
+	uint32_t rm_len = dt_len;
+	int ret = 0 ;
+
+	nla_for_each_attr(attrib, data, dt_len, rm_len) {
+		iface_param = nla_data(attrib);
+
+		if (iface_param->param_type != ISCSI_NET_PARAM)
+			continue;
+
+		/*
+		 * BE2ISCSI only supports 1 interface
+		 */
+		if (iface_param->iface_num) {
+			shost_printk(KERN_ERR, shost, "Invalid iface_num %d."
+				     "Only iface_num 0 is supported.\n",
+				     iface_param->iface_num);
+			return -EINVAL;
+		}
+
+		switch (iface_param->iface_type) {
+		case ISCSI_IFACE_TYPE_IPV4:
+			ret = beiscsi_set_ipv4(shost, iface_param,
+					       data, dt_len);
+			break;
+		case ISCSI_IFACE_TYPE_IPV6:
+			ret = beiscsi_set_ipv6(shost, iface_param,
+					       data, dt_len);
+			break;
+		default:
+			shost_printk(KERN_ERR, shost,
+				     "Invalid iface type :%d passed\n",
+				     iface_param->iface_type);
+			break;
+		}
+
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
+		struct iscsi_iface *iface, int param,
+		char *buf)
+{
+	struct be_cmd_get_if_info_resp if_info;
+	int len, ip_type = BE2_IPV4;
+
+	memset(&if_info, 0, sizeof(if_info));
+
+	if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+		ip_type = BE2_IPV6;
+
+	len = mgmt_get_if_info(phba, ip_type, &if_info);
+	if (len)
+		return len;
+
+	switch (param) {
+	case ISCSI_NET_PARAM_IPV4_ADDR:
+		len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr);
+		break;
+	case ISCSI_NET_PARAM_IPV6_ADDR:
+		len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr);
+		break;
+	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+		if (!if_info.dhcp_state)
+			len = sprintf(buf, "static");
+		else
+			len = sprintf(buf, "dhcp");
+		break;
+	case ISCSI_NET_PARAM_IPV4_SUBNET:
+		len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
+		break;
+	default:
+		WARN_ON(1);
+	}
+
+	return len;
+}
+
+int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+		enum iscsi_param_type param_type,
+		int param, char *buf)
+{
+	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	struct be_cmd_get_def_gateway_resp gateway;
+	int len = -ENOSYS;
+
+	switch (param) {
+	case ISCSI_NET_PARAM_IPV4_ADDR:
+	case ISCSI_NET_PARAM_IPV4_SUBNET:
+	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+	case ISCSI_NET_PARAM_IPV6_ADDR:
+		len = be2iscsi_get_if_param(phba, iface, param, buf);
+		break;
+	case ISCSI_NET_PARAM_IFACE_ENABLE:
+		len = sprintf(buf, "enabled");
+		break;
+	case ISCSI_NET_PARAM_IPV4_GW:
+		memset(&gateway, 0, sizeof(gateway));
+		len = mgmt_get_gateway(phba, BE2_IPV4, &gateway);
+		if (!len)
+			len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
+		break;
+	default:
+		len = -ENOSYS;
+	}
+
+	return len;
+}
+
 /**
  * beiscsi_ep_get_param - get the iscsi parameter
  * @ep: pointer to iscsi ep
@@ -221,7 +518,7 @@
 	struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
 	int len = 0;
 
-	SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
+	SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_get_param, param= %d\n", param);
 
 	switch (param) {
 	case ISCSI_PARAM_CONN_PORT:
@@ -279,6 +576,121 @@
 }
 
 /**
+ * beiscsi_get_initname - Read Initiator Name from flash
+ * @buf: buffer bointer
+ * @phba: The device priv structure instance
+ *
+ * returns number of bytes
+ */
+static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
+{
+	int rc;
+	unsigned int tag, wrb_num;
+	unsigned short status, extd_status;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_hba_name *resp;
+	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+	tag = be_cmd_get_initname(phba);
+	if (!tag) {
+		SE_DEBUG(DBG_LVL_1, "Getting Initiator Name Failed\n");
+		return -EBUSY;
+	} else
+		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+				phba->ctrl.mcc_numtag[tag]);
+
+	wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+
+	if (status || extd_status) {
+		SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
+				"status = %d extd_status = %d\n",
+				status, extd_status);
+		free_mcc_tag(&phba->ctrl, tag);
+		return -EAGAIN;
+	}
+	wrb = queue_get_wrb(mccq, wrb_num);
+	free_mcc_tag(&phba->ctrl, tag);
+	resp = embedded_payload(wrb);
+	rc = sprintf(buf, "%s\n", resp->initiator_name);
+	return rc;
+}
+
+/**
+ * beiscsi_get_port_state - Get the Port State
+ * @shost : pointer to scsi_host structure
+ *
+ * returns number of bytes
+ */
+static void beiscsi_get_port_state(struct Scsi_Host *shost)
+{
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	struct iscsi_cls_host *ihost = shost->shost_data;
+
+	ihost->port_state = (phba->state == BE_ADAPTER_UP) ?
+		ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
+}
+
+/**
+ * beiscsi_get_port_speed  - Get the Port Speed from Adapter
+ * @shost : pointer to scsi_host structure
+ *
+ * returns Success/Failure
+ */
+static int beiscsi_get_port_speed(struct Scsi_Host *shost)
+{
+	unsigned int tag, wrb_num;
+	unsigned short status, extd_status;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_ntwk_link_status_resp *resp;
+	struct beiscsi_hba *phba = iscsi_host_priv(shost);
+	struct iscsi_cls_host *ihost = shost->shost_data;
+	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+	tag = be_cmd_get_port_speed(phba);
+	if (!tag) {
+		SE_DEBUG(DBG_LVL_1, "Getting Port Speed Failed\n");
+		 return -EBUSY;
+	 } else
+		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+				phba->ctrl.mcc_numtag[tag]);
+
+	wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+
+	if (status || extd_status) {
+		SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with "
+				"status = %d extd_status = %d\n",
+				status, extd_status);
+		free_mcc_tag(&phba->ctrl, tag);
+		return -EAGAIN;
+	}
+	wrb = queue_get_wrb(mccq, wrb_num);
+	free_mcc_tag(&phba->ctrl, tag);
+	resp = embedded_payload(wrb);
+
+	switch (resp->mac_speed) {
+	case BE2ISCSI_LINK_SPEED_10MBPS:
+		ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
+		break;
+	case BE2ISCSI_LINK_SPEED_100MBPS:
+		ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS;
+		break;
+	case BE2ISCSI_LINK_SPEED_1GBPS:
+		ihost->port_speed = ISCSI_PORT_SPEED_1GBPS;
+		break;
+	case BE2ISCSI_LINK_SPEED_10GBPS:
+		ihost->port_speed = ISCSI_PORT_SPEED_10GBPS;
+		break;
+	default:
+		ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN;
+	}
+	return 0;
+}
+
+/**
  * beiscsi_get_host_param - get the iscsi parameter
  * @shost: pointer to scsi_host structure
  * @param: parameter type identifier
@@ -301,6 +713,27 @@
 			return status;
 		}
 		break;
+	case ISCSI_HOST_PARAM_INITIATOR_NAME:
+		status = beiscsi_get_initname(buf, phba);
+		if (status < 0) {
+			SE_DEBUG(DBG_LVL_1,
+					"Retreiving Initiator Name Failed\n");
+			return status;
+		}
+		break;
+	case ISCSI_HOST_PARAM_PORT_STATE:
+		beiscsi_get_port_state(shost);
+		status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
+		break;
+	case ISCSI_HOST_PARAM_PORT_SPEED:
+		status = beiscsi_get_port_speed(shost);
+		if (status) {
+			SE_DEBUG(DBG_LVL_1,
+					"Retreiving Port Speed Failed\n");
+			return status;
+		}
+		status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
+		break;
 	default:
 		return iscsi_host_get_param(shost, param, buf);
 	}
@@ -309,46 +742,21 @@
 
 int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
 {
-	struct be_cmd_resp_get_mac_addr *resp;
-	struct be_mcc_wrb *wrb;
-	unsigned int tag, wrb_num;
-	unsigned short status, extd_status;
-	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+	struct be_cmd_get_nic_conf_resp resp;
 	int rc;
 
-	if (phba->read_mac_address)
-		return sysfs_format_mac(buf, phba->mac_address,
-					ETH_ALEN);
+	if (strlen(phba->mac_address))
+		return strlcpy(buf, phba->mac_address, PAGE_SIZE);
 
-	tag = be_cmd_get_mac_addr(phba);
-	if (!tag) {
-		SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
-		return -EBUSY;
-	} else
-		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
-					 phba->ctrl.mcc_numtag[tag]);
+	memset(&resp, 0, sizeof(resp));
+	rc = mgmt_get_nic_conf(phba, &resp);
+	if (rc)
+		return rc;
 
-	wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
-	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
-	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
-	if (status || extd_status) {
-		SE_DEBUG(DBG_LVL_1, "Failed to get be_cmd_get_mac_addr"
-				    " status = %d extd_status = %d\n",
-				    status, extd_status);
-		free_mcc_tag(&phba->ctrl, tag);
-		return -EAGAIN;
-	}
-	wrb = queue_get_wrb(mccq, wrb_num);
-	free_mcc_tag(&phba->ctrl, tag);
-	resp = embedded_payload(wrb);
-	memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
-	rc = sysfs_format_mac(buf, phba->mac_address,
-			       ETH_ALEN);
-	phba->read_mac_address = 1;
-	return rc;
+	memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
+	return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
 }
 
-
 /**
  * beiscsi_conn_get_stats - get the iscsi stats
  * @cls_conn: pointer to iscsi cls conn
@@ -736,11 +1144,24 @@
 umode_t be2iscsi_attr_is_visible(int param_type, int param)
 {
 	switch (param_type) {
+	case ISCSI_NET_PARAM:
+		switch (param) {
+		case ISCSI_NET_PARAM_IFACE_ENABLE:
+		case ISCSI_NET_PARAM_IPV4_ADDR:
+		case ISCSI_NET_PARAM_IPV4_SUBNET:
+		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
+		case ISCSI_NET_PARAM_IPV4_GW:
+		case ISCSI_NET_PARAM_IPV6_ADDR:
+			return S_IRUGO;
+		default:
+			return 0;
+		}
 	case ISCSI_HOST_PARAM:
 		switch (param) {
 		case ISCSI_HOST_PARAM_HWADDRESS:
-		case ISCSI_HOST_PARAM_IPADDRESS:
 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
+		case ISCSI_HOST_PARAM_PORT_STATE:
+		case ISCSI_HOST_PARAM_PORT_SPEED:
 			return S_IRUGO;
 		default:
 			return 0;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 5c45be1..8b826fc 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -25,6 +25,21 @@
 
 #define BE2_IPV4  0x1
 #define BE2_IPV6  0x10
+#define BE2_DHCP_V4 0x05
+
+#define NON_BLOCKING 0x0
+#define BLOCKING 0x1
+
+void beiscsi_create_def_ifaces(struct beiscsi_hba *phba);
+
+void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba);
+
+int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+			     enum iscsi_param_type param_type,
+			     int param, char *buf);
+
+int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+			     void *data, uint32_t count);
 
 umode_t be2iscsi_attr_is_visible(int param_type, int param);
 
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 375756f..0b1d99c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -28,8 +28,11 @@
 #include <linux/semaphore.h>
 #include <linux/iscsi_boot_sysfs.h>
 #include <linux/module.h>
+#include <linux/bsg-lib.h>
 
 #include <scsi/libiscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
+#include <scsi/scsi_netlink.h>
 #include <scsi/scsi_transport_iscsi.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_cmnd.h>
@@ -48,7 +51,8 @@
 
 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_VERSION(BUILD_STR);
+MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 module_param(be_iopoll_budget, int, 0);
 module_param(enable_msix, int, 0);
@@ -147,15 +151,15 @@
 	struct invalidate_command_table *inv_tbl;
 	struct be_dma_mem nonemb_cmd;
 	unsigned int cid, tag, i, num_invalidate;
-	int rc = FAILED;
 
 	/* invalidate iocbs */
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
 	spin_lock_bh(&session->lock);
-	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
-		goto unlock;
-
+	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
+		spin_unlock_bh(&session->lock);
+		return FAILED;
+	}
 	conn = session->leadconn;
 	beiscsi_conn = conn->dd_data;
 	phba = beiscsi_conn->phba;
@@ -208,9 +212,6 @@
 	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 			    nonemb_cmd.va, nonemb_cmd.dma);
 	return iscsi_eh_device_reset(sc);
-unlock:
-	spin_unlock_bh(&session->lock);
-	return rc;
 }
 
 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
@@ -230,10 +231,10 @@
 	case ISCSI_BOOT_TGT_IP_ADDR:
 		if (boot_conn->dest_ipaddr.ip_type == 0x1)
 			rc = sprintf(buf, "%pI4\n",
-				(char *)&boot_conn->dest_ipaddr.ip_address);
+				(char *)&boot_conn->dest_ipaddr.addr);
 		else
 			rc = sprintf(str, "%pI6\n",
-				(char *)&boot_conn->dest_ipaddr.ip_address);
+				(char *)&boot_conn->dest_ipaddr.addr);
 		break;
 	case ISCSI_BOOT_TGT_PORT:
 		rc = sprintf(str, "%d\n", boot_conn->dest_port);
@@ -311,12 +312,8 @@
 		rc = sprintf(str, "0\n");
 		break;
 	case ISCSI_BOOT_ETH_MAC:
-		rc  = beiscsi_get_macaddr(buf, phba);
-		if (rc < 0) {
-			SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
-			return rc;
-		}
-	break;
+		rc  = beiscsi_get_macaddr(str, phba);
+		break;
 	default:
 		rc = -ENOSYS;
 		break;
@@ -394,7 +391,7 @@
 
 static struct scsi_host_template beiscsi_sht = {
 	.module = THIS_MODULE,
-	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
+	.name = "Emulex 10Gbe open-iscsi Initiator Driver",
 	.proc_name = DRV_NAME,
 	.queuecommand = iscsi_queuecommand,
 	.change_queue_depth = iscsi_change_queue_depth,
@@ -409,6 +406,8 @@
 	.max_sectors = BEISCSI_MAX_SECTORS,
 	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
 	.use_clustering = ENABLE_CLUSTERING,
+	.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
+
 };
 
 static struct scsi_transport_template *beiscsi_scsi_transport;
@@ -435,6 +434,7 @@
 	phba->shost = shost;
 	phba->pcidev = pci_dev_get(pcidev);
 	pci_set_drvdata(pcidev, phba);
+	phba->interface_handle = 0xFFFFFFFF;
 
 	if (iscsi_host_add(shost, &phba->pcidev->dev))
 		goto free_devices;
@@ -544,8 +544,7 @@
 						  &mbox_mem_alloc->dma);
 	if (!mbox_mem_alloc->va) {
 		beiscsi_unmap_pci_function(phba);
-		status = -ENOMEM;
-		return status;
+		return -ENOMEM;
 	}
 
 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
@@ -1252,9 +1251,9 @@
 	task = pwrb_handle->pio_handle;
 
 	io_task = task->dd_data;
-	spin_lock(&phba->mgmt_sgl_lock);
+	spin_lock_bh(&phba->mgmt_sgl_lock);
 	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
-	spin_unlock(&phba->mgmt_sgl_lock);
+	spin_unlock_bh(&phba->mgmt_sgl_lock);
 	spin_lock_bh(&session->lock);
 	free_wrb_handle(phba, pwrb_context, pwrb_handle);
 	spin_unlock_bh(&session->lock);
@@ -1370,8 +1369,6 @@
 	struct be_bus_address phys_addr;
 	struct list_head *pbusy_list;
 	struct async_pdu_handle *pasync_handle = NULL;
-	int buffer_len = 0;
-	unsigned char buffer_index = -1;
 	unsigned char is_header = 0;
 
 	phys_addr.u.a32.address_lo =
@@ -1392,22 +1389,11 @@
 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
 			(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
 			index) / 32] & PDUCQE_INDEX_MASK));
-
-		buffer_len = (unsigned int)(phys_addr.u.a64.address -
-				pasync_ctx->async_header.pa_base.u.a64.address);
-
-		buffer_index = buffer_len /
-				pasync_ctx->async_header.buffer_size;
-
 		break;
 	case UNSOL_DATA_NOTIFY:
 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
 					dw[offsetof(struct amap_i_t_dpdu_cqe,
 					index) / 32] & PDUCQE_INDEX_MASK));
-		buffer_len = (unsigned long)(phys_addr.u.a64.address -
-					pasync_ctx->async_data.pa_base.u.
-					a64.address);
-		buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
 		break;
 	default:
 		pbusy_list = NULL;
@@ -1418,11 +1404,9 @@
 		return NULL;
 	}
 
-	WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
 	WARN_ON(list_empty(pbusy_list));
 	list_for_each_entry(pasync_handle, pbusy_list, link) {
-		WARN_ON(pasync_handle->consumed);
-		if (pasync_handle->index == buffer_index)
+		if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
 			break;
 	}
 
@@ -1449,15 +1433,13 @@
 	unsigned int num_entries, writables = 0;
 	unsigned int *pep_read_ptr, *pwritables;
 
-
+	num_entries = pasync_ctx->num_entries;
 	if (is_header) {
 		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
 		pwritables = &pasync_ctx->async_header.writables;
-		num_entries = pasync_ctx->async_header.num_entries;
 	} else {
 		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
 		pwritables = &pasync_ctx->async_data.writables;
-		num_entries = pasync_ctx->async_data.num_entries;
 	}
 
 	while ((*pep_read_ptr) != cq_index) {
@@ -1491,14 +1473,13 @@
 	return 0;
 }
 
-static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
+static void hwi_free_async_msg(struct beiscsi_hba *phba,
 				       unsigned int cri)
 {
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_async_pdu_context *pasync_ctx;
 	struct async_pdu_handle *pasync_handle, *tmp_handle;
 	struct list_head *plist;
-	unsigned int i = 0;
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
@@ -1508,23 +1489,20 @@
 	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
 		list_del(&pasync_handle->link);
 
-		if (i == 0) {
+		if (pasync_handle->is_header) {
 			list_add_tail(&pasync_handle->link,
 				      &pasync_ctx->async_header.free_list);
 			pasync_ctx->async_header.free_entries++;
-			i++;
 		} else {
 			list_add_tail(&pasync_handle->link,
 				      &pasync_ctx->async_data.free_list);
 			pasync_ctx->async_data.free_entries++;
-			i++;
 		}
 	}
 
 	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
 	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
 	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
-	return 0;
 }
 
 static struct phys_addr *
@@ -1557,16 +1535,15 @@
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+	num_entries = pasync_ctx->num_entries;
 
 	if (is_header) {
-		num_entries = pasync_ctx->async_header.num_entries;
 		writables = min(pasync_ctx->async_header.writables,
 				pasync_ctx->async_header.free_entries);
 		pfree_link = pasync_ctx->async_header.free_list.next;
 		host_write_num = pasync_ctx->async_header.host_write_ptr;
 		ring_id = phwi_ctrlr->default_pdu_hdr.id;
 	} else {
-		num_entries = pasync_ctx->async_data.num_entries;
 		writables = min(pasync_ctx->async_data.writables,
 				pasync_ctx->async_data.free_entries);
 		pfree_link = pasync_ctx->async_data.free_list.next;
@@ -1673,7 +1650,7 @@
 			}
 			memcpy(pfirst_buffer + offset,
 			       pasync_handle->pbuffer, buf_len);
-			offset = buf_len;
+			offset += buf_len;
 		}
 		index++;
 	}
@@ -1682,10 +1659,9 @@
 					   (beiscsi_conn->beiscsi_conn_cid -
 					    phba->fw_config.iscsi_cid_start),
 					    phdr, hdr_len, pfirst_buffer,
-					    buf_len);
+					    offset);
 
-	if (status == 0)
-		hwi_free_async_msg(phba, cri);
+	hwi_free_async_msg(phba, cri);
 	return 0;
 }
 
@@ -2229,7 +2205,7 @@
 	struct mem_array *mem_arr, *mem_arr_orig;
 	unsigned int i, j, alloc_size, curr_alloc_size;
 
-	phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
+	phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
 	if (!phba->phwi_ctrlr)
 		return -ENOMEM;
 
@@ -2349,27 +2325,21 @@
 	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
 }
 
-static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
+static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 {
 	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
-	struct wrb_handle *pwrb_handle;
+	struct wrb_handle *pwrb_handle = NULL;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_wrb_context *pwrb_context;
-	struct iscsi_wrb *pwrb;
-	unsigned int num_cxn_wrbh;
-	unsigned int num_cxn_wrb, j, idx, index;
+	struct iscsi_wrb *pwrb = NULL;
+	unsigned int num_cxn_wrbh = 0;
+	unsigned int num_cxn_wrb = 0, j, idx = 0, index;
 
 	mem_descr_wrbh = phba->init_mem;
 	mem_descr_wrbh += HWI_MEM_WRBH;
 
 	mem_descr_wrb = phba->init_mem;
 	mem_descr_wrb += HWI_MEM_WRB;
-
-	idx = 0;
-	pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
-	num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
-			((sizeof(struct wrb_handle)) *
-			 phba->params.wrbs_per_cxn));
 	phwi_ctrlr = phba->phwi_ctrlr;
 
 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
@@ -2377,12 +2347,32 @@
 		pwrb_context->pwrb_handle_base =
 				kzalloc(sizeof(struct wrb_handle *) *
 					phba->params.wrbs_per_cxn, GFP_KERNEL);
+		if (!pwrb_context->pwrb_handle_base) {
+			shost_printk(KERN_ERR, phba->shost,
+					"Mem Alloc Failed. Failing to load\n");
+			goto init_wrb_hndl_failed;
+		}
 		pwrb_context->pwrb_handle_basestd =
 				kzalloc(sizeof(struct wrb_handle *) *
 					phba->params.wrbs_per_cxn, GFP_KERNEL);
+		if (!pwrb_context->pwrb_handle_basestd) {
+			shost_printk(KERN_ERR, phba->shost,
+					"Mem Alloc Failed. Failing to load\n");
+			goto init_wrb_hndl_failed;
+		}
+		if (!num_cxn_wrbh) {
+			pwrb_handle =
+				mem_descr_wrbh->mem_array[idx].virtual_address;
+			num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
+					((sizeof(struct wrb_handle)) *
+					 phba->params.wrbs_per_cxn));
+			idx++;
+		}
+		pwrb_context->alloc_index = 0;
+		pwrb_context->wrb_handles_available = 0;
+		pwrb_context->free_index = 0;
+
 		if (num_cxn_wrbh) {
-			pwrb_context->alloc_index = 0;
-			pwrb_context->wrb_handles_available = 0;
 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
 				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
 				pwrb_context->pwrb_handle_basestd[j] =
@@ -2391,36 +2381,20 @@
 				pwrb_handle->wrb_index = j;
 				pwrb_handle++;
 			}
-			pwrb_context->free_index = 0;
-			num_cxn_wrbh--;
-		} else {
-			idx++;
-			pwrb_handle =
-			    mem_descr_wrbh->mem_array[idx].virtual_address;
-			num_cxn_wrbh =
-			    ((mem_descr_wrbh->mem_array[idx].size) /
-			     ((sizeof(struct wrb_handle)) *
-			      phba->params.wrbs_per_cxn));
-			pwrb_context->alloc_index = 0;
-			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
-				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
-				pwrb_context->pwrb_handle_basestd[j] =
-				    pwrb_handle;
-				pwrb_context->wrb_handles_available++;
-				pwrb_handle->wrb_index = j;
-				pwrb_handle++;
-			}
-			pwrb_context->free_index = 0;
 			num_cxn_wrbh--;
 		}
 	}
 	idx = 0;
-	pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
-	num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
-		      ((sizeof(struct iscsi_wrb) *
-			phba->params.wrbs_per_cxn));
 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
 		pwrb_context = &phwi_ctrlr->wrb_context[index];
+		if (!num_cxn_wrb) {
+			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
+			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
+				((sizeof(struct iscsi_wrb) *
+				  phba->params.wrbs_per_cxn));
+			idx++;
+		}
+
 		if (num_cxn_wrb) {
 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
 				pwrb_handle = pwrb_context->pwrb_handle_base[j];
@@ -2428,20 +2402,16 @@
 				pwrb++;
 			}
 			num_cxn_wrb--;
-		} else {
-			idx++;
-			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
-			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
-				      ((sizeof(struct iscsi_wrb) *
-					phba->params.wrbs_per_cxn));
-			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
-				pwrb_handle = pwrb_context->pwrb_handle_base[j];
-				pwrb_handle->pwrb = pwrb;
-				pwrb++;
-			}
-			num_cxn_wrb--;
 		}
 	}
+	return 0;
+init_wrb_hndl_failed:
+	for (j = index; j > 0; j--) {
+		pwrb_context = &phwi_ctrlr->wrb_context[j];
+		kfree(pwrb_context->pwrb_handle_base);
+		kfree(pwrb_context->pwrb_handle_basestd);
+	}
+	return -ENOMEM;
 }
 
 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
@@ -2450,7 +2420,7 @@
 	struct hba_parameters *p = &phba->params;
 	struct hwi_async_pdu_context *pasync_ctx;
 	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
-	unsigned int index;
+	unsigned int index, idx, num_per_mem, num_async_data;
 	struct be_mem_descriptor *mem_descr;
 
 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
@@ -2462,10 +2432,8 @@
 	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
 	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
-	pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
-	pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
-	pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
-	pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
+	pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
+	pasync_ctx->buffer_size = p->defpdu_hdr_sz;
 
 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 	mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
@@ -2510,19 +2478,6 @@
 	pasync_ctx->async_header.writables = 0;
 	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
 
-	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
-	if (mem_descr->mem_array[0].virtual_address) {
-		SE_DEBUG(DBG_LVL_8,
-			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
-			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
-	} else
-		shost_printk(KERN_WARNING, phba->shost,
-			    "No Virtual address\n");
-	pasync_ctx->async_data.va_base =
-			mem_descr->mem_array[0].virtual_address;
-	pasync_ctx->async_data.pa_base.u.a64.address =
-			mem_descr->mem_array[0].bus_address.u.a64.address;
 
 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 	mem_descr += HWI_MEM_ASYNC_DATA_RING;
@@ -2553,6 +2508,25 @@
 	pasync_data_h =
 		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
 
+	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
+	if (mem_descr->mem_array[0].virtual_address) {
+		SE_DEBUG(DBG_LVL_8,
+			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
+			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
+	} else
+		shost_printk(KERN_WARNING, phba->shost,
+			    "No Virtual address\n");
+	idx = 0;
+	pasync_ctx->async_data.va_base =
+			mem_descr->mem_array[idx].virtual_address;
+	pasync_ctx->async_data.pa_base.u.a64.address =
+			mem_descr->mem_array[idx].bus_address.u.a64.address;
+
+	num_async_data = ((mem_descr->mem_array[idx].size) /
+				phba->params.defpdu_data_sz);
+	num_per_mem = 0;
+
 	for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
 		pasync_header_h->cri = -1;
 		pasync_header_h->index = (char)index;
@@ -2578,14 +2552,29 @@
 		pasync_data_h->cri = -1;
 		pasync_data_h->index = (char)index;
 		INIT_LIST_HEAD(&pasync_data_h->link);
+
+		if (!num_async_data) {
+			num_per_mem = 0;
+			idx++;
+			pasync_ctx->async_data.va_base =
+				mem_descr->mem_array[idx].virtual_address;
+			pasync_ctx->async_data.pa_base.u.a64.address =
+				mem_descr->mem_array[idx].
+				bus_address.u.a64.address;
+
+			num_async_data = ((mem_descr->mem_array[idx].size) /
+					phba->params.defpdu_data_sz);
+		}
 		pasync_data_h->pbuffer =
 			(void *)((unsigned long)
 			(pasync_ctx->async_data.va_base) +
-			(p->defpdu_data_sz * index));
+			(p->defpdu_data_sz * num_per_mem));
 
 		pasync_data_h->pa.u.a64.address =
 		    pasync_ctx->async_data.pa_base.u.a64.address +
-		    (p->defpdu_data_sz * index);
+		    (p->defpdu_data_sz * num_per_mem);
+		num_per_mem++;
+		num_async_data--;
 
 		list_add_tail(&pasync_data_h->link,
 			      &pasync_ctx->async_data.free_list);
@@ -2913,9 +2902,11 @@
 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
 {
 	struct be_dma_mem *mem = &q->dma_mem;
-	if (mem->va)
+	if (mem->va) {
 		pci_free_consistent(phba->pcidev, mem->size,
 			mem->va, mem->dma);
+		mem->va = NULL;
+	}
 }
 
 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
@@ -3215,7 +3206,7 @@
 error:
 	shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
 	hwi_cleanup(phba);
-	return -ENOMEM;
+	return status;
 }
 
 static int hwi_init_controller(struct beiscsi_hba *phba)
@@ -3236,7 +3227,9 @@
 	}
 
 	iscsi_init_global_templates(phba);
-	beiscsi_init_wrb_handle(phba);
+	if (beiscsi_init_wrb_handle(phba))
+		return -ENOMEM;
+
 	hwi_init_async_pdu_ctx(phba);
 	if (hwi_init_port(phba) != 0) {
 		shost_printk(KERN_ERR, phba->shost,
@@ -3288,7 +3281,7 @@
 
 free_init:
 	beiscsi_free_mem(phba);
-	return -ENOMEM;
+	return ret;
 }
 
 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
@@ -3475,8 +3468,8 @@
 
 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
 {
-	struct be_cmd_resp_get_boot_target *boot_resp;
-	struct be_cmd_resp_get_session *session_resp;
+	struct be_cmd_get_boot_target_resp *boot_resp;
+	struct be_cmd_get_session_resp *session_resp;
 	struct be_mcc_wrb *wrb;
 	struct be_dma_mem nonemb_cmd;
 	unsigned int tag, wrb_num;
@@ -3484,9 +3477,9 @@
 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 	int ret = -ENOMEM;
 
-	tag = beiscsi_get_boot_target(phba);
+	tag = mgmt_get_boot_target(phba);
 	if (!tag) {
-		SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
+		SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed\n");
 		return -EAGAIN;
 	} else
 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -3496,7 +3489,7 @@
 	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
 	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
 	if (status || extd_status) {
-		SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
+		SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed"
 				    " status = %d extd_status = %d\n",
 				    status, extd_status);
 		free_mcc_tag(&phba->ctrl, tag);
@@ -3522,8 +3515,8 @@
 	}
 
 	memset(nonemb_cmd.va, 0, sizeof(*session_resp));
-	tag = beiscsi_get_session_info(phba,
-		boot_resp->boot_session_handle, &nonemb_cmd);
+	tag = mgmt_get_session_info(phba, boot_resp->boot_session_handle,
+				    &nonemb_cmd);
 	if (!tag) {
 		SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
 			" Failed\n");
@@ -3696,6 +3689,57 @@
 	kfree(phba->ep_array);
 }
 
+static void beiscsi_cleanup_task(struct iscsi_task *task)
+{
+	struct beiscsi_io_task *io_task = task->dd_data;
+	struct iscsi_conn *conn = task->conn;
+	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+	struct beiscsi_hba *phba = beiscsi_conn->phba;
+	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
+	struct hwi_wrb_context *pwrb_context;
+	struct hwi_controller *phwi_ctrlr;
+
+	phwi_ctrlr = phba->phwi_ctrlr;
+	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
+			- phba->fw_config.iscsi_cid_start];
+
+	if (io_task->cmd_bhs) {
+		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
+			      io_task->bhs_pa.u.a64.address);
+		io_task->cmd_bhs = NULL;
+	}
+
+	if (task->sc) {
+		if (io_task->pwrb_handle) {
+			free_wrb_handle(phba, pwrb_context,
+					io_task->pwrb_handle);
+			io_task->pwrb_handle = NULL;
+		}
+
+		if (io_task->psgl_handle) {
+			spin_lock(&phba->io_sgl_lock);
+			free_io_sgl_handle(phba, io_task->psgl_handle);
+			spin_unlock(&phba->io_sgl_lock);
+			io_task->psgl_handle = NULL;
+		}
+	} else {
+		if (!beiscsi_conn->login_in_progress) {
+			if (io_task->pwrb_handle) {
+				free_wrb_handle(phba, pwrb_context,
+						io_task->pwrb_handle);
+				io_task->pwrb_handle = NULL;
+			}
+			if (io_task->psgl_handle) {
+				spin_lock(&phba->mgmt_sgl_lock);
+				free_mgmt_sgl_handle(phba,
+						     io_task->psgl_handle);
+				spin_unlock(&phba->mgmt_sgl_lock);
+				io_task->psgl_handle = NULL;
+			}
+		}
+	}
+}
+
 void
 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
 			   struct beiscsi_offload_params *params)
@@ -3704,12 +3748,19 @@
 	struct iscsi_target_context_update_wrb *pwrb = NULL;
 	struct be_mem_descriptor *mem_descr;
 	struct beiscsi_hba *phba = beiscsi_conn->phba;
+	struct iscsi_task *task = beiscsi_conn->task;
+	struct iscsi_session *session = task->conn->session;
 	u32 doorbell = 0;
 
 	/*
 	 * We can always use 0 here because it is reserved by libiscsi for
 	 * login/startup related tasks.
 	 */
+	beiscsi_conn->login_in_progress = 0;
+	spin_lock_bh(&session->lock);
+	beiscsi_cleanup_task(task);
+	spin_unlock_bh(&session->lock);
+
 	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
 				       phba->fw_config.iscsi_cid_start));
 	pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
@@ -3823,7 +3874,7 @@
 	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
 	task->hdr_max = sizeof(struct be_cmd_bhs);
 	io_task->psgl_handle = NULL;
-	io_task->psgl_handle = NULL;
+	io_task->pwrb_handle = NULL;
 
 	if (task->sc) {
 		spin_lock(&phba->io_sgl_lock);
@@ -3865,6 +3916,7 @@
 				io_task->pwrb_handle =
 						beiscsi_conn->plogin_wrb_handle;
 			}
+			beiscsi_conn->task = task;
 		} else {
 			spin_lock(&phba->mgmt_sgl_lock);
 			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -3907,53 +3959,11 @@
 	io_task->pwrb_handle = NULL;
 	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
 		      io_task->bhs_pa.u.a64.address);
+	io_task->cmd_bhs = NULL;
 	SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
 	return -ENOMEM;
 }
 
-static void beiscsi_cleanup_task(struct iscsi_task *task)
-{
-	struct beiscsi_io_task *io_task = task->dd_data;
-	struct iscsi_conn *conn = task->conn;
-	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
-	struct beiscsi_hba *phba = beiscsi_conn->phba;
-	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
-	struct hwi_wrb_context *pwrb_context;
-	struct hwi_controller *phwi_ctrlr;
-
-	phwi_ctrlr = phba->phwi_ctrlr;
-	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
-			- phba->fw_config.iscsi_cid_start];
-	if (io_task->pwrb_handle) {
-		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
-		io_task->pwrb_handle = NULL;
-	}
-
-	if (io_task->cmd_bhs) {
-		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
-			      io_task->bhs_pa.u.a64.address);
-	}
-
-	if (task->sc) {
-		if (io_task->psgl_handle) {
-			spin_lock(&phba->io_sgl_lock);
-			free_io_sgl_handle(phba, io_task->psgl_handle);
-			spin_unlock(&phba->io_sgl_lock);
-			io_task->psgl_handle = NULL;
-		}
-	} else {
-		if (task->hdr &&
-		   ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
-			return;
-		if (io_task->psgl_handle) {
-			spin_lock(&phba->mgmt_sgl_lock);
-			free_mgmt_sgl_handle(phba, io_task->psgl_handle);
-			spin_unlock(&phba->mgmt_sgl_lock);
-			io_task->psgl_handle = NULL;
-		}
-	}
-}
-
 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
 			  unsigned int num_sg, unsigned int xferlen,
 			  unsigned int writedir)
@@ -3993,7 +4003,8 @@
 	       &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
 
 	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
-		      cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun));
+		      cpu_to_be16(*(unsigned short *)
+				  &io_task->cmd_bhs->iscsi_hdr.lun));
 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
 		      io_task->pwrb_handle->wrb_index);
@@ -4126,6 +4137,76 @@
 	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
 }
 
+/**
+ * beiscsi_bsg_request - handle bsg request from ISCSI transport
+ * @job: job to handle
+ */
+static int beiscsi_bsg_request(struct bsg_job *job)
+{
+	struct Scsi_Host *shost;
+	struct beiscsi_hba *phba;
+	struct iscsi_bsg_request *bsg_req = job->request;
+	int rc = -EINVAL;
+	unsigned int tag;
+	struct be_dma_mem nonemb_cmd;
+	struct be_cmd_resp_hdr *resp;
+	struct iscsi_bsg_reply *bsg_reply = job->reply;
+	unsigned short status, extd_status;
+
+	shost = iscsi_job_to_shost(job);
+	phba = iscsi_host_priv(shost);
+
+	switch (bsg_req->msgcode) {
+	case ISCSI_BSG_HST_VENDOR:
+		nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+					job->request_payload.payload_len,
+					&nonemb_cmd.dma);
+		if (nonemb_cmd.va == NULL) {
+			SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for "
+				 "beiscsi_bsg_request\n");
+			return -EIO;
+		}
+		tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
+						  &nonemb_cmd);
+		if (!tag) {
+			SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
+			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+					    nonemb_cmd.va, nonemb_cmd.dma);
+			return -EAGAIN;
+		} else
+			wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+						 phba->ctrl.mcc_numtag[tag]);
+		extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+		status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+		free_mcc_tag(&phba->ctrl, tag);
+		resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
+		sg_copy_from_buffer(job->reply_payload.sg_list,
+				    job->reply_payload.sg_cnt,
+				    nonemb_cmd.va, (resp->response_length
+				    + sizeof(*resp)));
+		bsg_reply->reply_payload_rcv_len = resp->response_length;
+		bsg_reply->result = status;
+		bsg_job_done(job, bsg_reply->result,
+			     bsg_reply->reply_payload_rcv_len);
+		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+				    nonemb_cmd.va, nonemb_cmd.dma);
+		if (status || extd_status) {
+			SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
+				 " status = %d extd_status = %d\n",
+				 status, extd_status);
+			return -EIO;
+		}
+		break;
+
+	default:
+		SE_DEBUG(DBG_LVL_1, "Unsupported bsg command: 0x%x\n",
+			 bsg_req->msgcode);
+		break;
+	}
+
+	return rc;
+}
+
 static void beiscsi_quiesce(struct beiscsi_hba *phba)
 {
 	struct hwi_controller *phwi_ctrlr;
@@ -4183,6 +4264,7 @@
 		return;
 	}
 
+	beiscsi_destroy_def_ifaces(phba);
 	beiscsi_quiesce(phba);
 	iscsi_boot_destroy_kset(phba->boot_kset);
 	iscsi_host_remove(phba->shost);
@@ -4267,8 +4349,11 @@
 	phba->num_cpus = num_cpus;
 	SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
 
-	if (enable_msix)
+	if (enable_msix) {
 		beiscsi_msix_enable(phba);
+		if (!phba->msix_enabled)
+			phba->num_cpus = 1;
+	}
 	ret = be_ctrl_init(phba, pcidev);
 	if (ret) {
 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -4366,8 +4451,9 @@
 		 * iscsi boot.
 		 */
 		shost_printk(KERN_ERR, phba->shost, "Could not set up "
-			     "iSCSI boot info.");
+			     "iSCSI boot info.\n");
 
+	beiscsi_create_def_ifaces(phba);
 	SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
 	return 0;
 
@@ -4418,6 +4504,8 @@
 	.bind_conn = beiscsi_conn_bind,
 	.destroy_conn = iscsi_conn_teardown,
 	.attr_is_visible = be2iscsi_attr_is_visible,
+	.set_iface_param = be2iscsi_iface_set_param,
+	.get_iface_param = be2iscsi_iface_get_param,
 	.set_param = beiscsi_set_param,
 	.get_conn_param = iscsi_conn_get_param,
 	.get_session_param = iscsi_session_get_param,
@@ -4435,6 +4523,7 @@
 	.ep_poll = beiscsi_ep_poll,
 	.ep_disconnect = beiscsi_ep_disconnect,
 	.session_recovery_timedout = iscsi_session_recovery_timedout,
+	.bsg_request = beiscsi_bsg_request,
 };
 
 static struct pci_driver beiscsi_pci_driver = {
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index b4a06d5..40fea6e 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -34,9 +34,9 @@
 
 #include "be.h"
 #define DRV_NAME		"be2iscsi"
-#define BUILD_STR		"4.1.239.0"
-#define BE_NAME			"ServerEngines BladeEngine2" \
-				"Linux iSCSI Driver version" BUILD_STR
+#define BUILD_STR		"4.2.162.0"
+#define BE_NAME			"Emulex OneConnect" \
+				"Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
 
 #define BE_VENDOR_ID		0x19A2
@@ -316,6 +316,8 @@
 	struct iscsi_endpoint **ep_array;
 	struct iscsi_boot_kset *boot_kset;
 	struct Scsi_Host *shost;
+	struct iscsi_iface *ipv4_iface;
+	struct iscsi_iface *ipv6_iface;
 	struct {
 		/**
 		 * group together since they are used most frequently
@@ -345,7 +347,7 @@
 	struct work_struct work_cqs;	/* The work being queued */
 	struct be_ctrl_info ctrl;
 	unsigned int generation;
-	unsigned int read_mac_address;
+	unsigned int interface_handle;
 	struct mgmt_session_info boot_sess;
 	struct invalidate_command_table inv_tbl[128];
 
@@ -525,8 +527,6 @@
 
 		unsigned int free_entries;
 		unsigned int busy_entries;
-		unsigned int buffer_size;
-		unsigned int num_entries;
 
 		struct list_head free_list;
 	} async_header;
@@ -543,11 +543,12 @@
 
 		unsigned int free_entries;
 		unsigned int busy_entries;
-		unsigned int buffer_size;
 		struct list_head free_list;
-		unsigned int num_entries;
 	} async_data;
 
+	unsigned int buffer_size;
+	unsigned int num_entries;
+
 	/**
 	 * This is a varying size list! Do not add anything
 	 * after this entry!!
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 44762cf..01bb04c 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -17,15 +17,17 @@
  * Costa Mesa, CA 92626
  */
 
+#include <linux/bsg-lib.h>
+#include <scsi/scsi_transport_iscsi.h>
+#include <scsi/scsi_bsg_iscsi.h>
 #include "be_mgmt.h"
 #include "be_iscsi.h"
-#include <scsi/scsi_transport_iscsi.h>
 
-unsigned int beiscsi_get_boot_target(struct beiscsi_hba *phba)
+unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct be_mcc_wrb *wrb;
-	struct be_cmd_req_get_mac_addr *req;
+	struct be_cmd_get_boot_target_req *req;
 	unsigned int tag = 0;
 
 	SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n");
@@ -42,22 +44,22 @@
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
 			   OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
-			   sizeof(*req));
+			   sizeof(struct be_cmd_get_boot_target_resp));
 
 	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
 	return tag;
 }
 
-unsigned int beiscsi_get_session_info(struct beiscsi_hba *phba,
-				  u32 boot_session_handle,
-				  struct be_dma_mem *nonemb_cmd)
+unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
+				   u32 boot_session_handle,
+				   struct be_dma_mem *nonemb_cmd)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct be_mcc_wrb *wrb;
 	unsigned int tag = 0;
-	struct  be_cmd_req_get_session *req;
-	struct be_cmd_resp_get_session *resp;
+	struct  be_cmd_get_session_req *req;
+	struct be_cmd_get_session_resp *resp;
 	struct be_sge *sge;
 
 	SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n");
@@ -187,6 +189,72 @@
 	return status;
 }
 
+unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
+					 struct beiscsi_hba *phba,
+					 struct bsg_job *job,
+					 struct be_dma_mem *nonemb_cmd)
+{
+	struct be_cmd_resp_hdr *resp;
+	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
+	struct be_sge *mcc_sge = nonembedded_sgl(wrb);
+	unsigned int tag = 0;
+	struct iscsi_bsg_request *bsg_req = job->request;
+	struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
+	unsigned short region, sector_size, sector, offset;
+
+	nonemb_cmd->size = job->request_payload.payload_len;
+	memset(nonemb_cmd->va, 0, nonemb_cmd->size);
+	resp = nonemb_cmd->va;
+	region =  bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+	sector_size =  bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+	sector =  bsg_req->rqst_data.h_vendor.vendor_cmd[3];
+	offset =  bsg_req->rqst_data.h_vendor.vendor_cmd[4];
+	req->region = region;
+	req->sector = sector;
+	req->offset = offset;
+	spin_lock(&ctrl->mbox_lock);
+	memset(wrb, 0, sizeof(*wrb));
+
+	switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
+	case BEISCSI_WRITE_FLASH:
+		offset = sector * sector_size + offset;
+		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+				   OPCODE_COMMON_WRITE_FLASH, sizeof(*req));
+		sg_copy_to_buffer(job->request_payload.sg_list,
+				  job->request_payload.sg_cnt,
+				  nonemb_cmd->va + offset, job->request_len);
+		break;
+	case BEISCSI_READ_FLASH:
+		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+			   OPCODE_COMMON_READ_FLASH, sizeof(*req));
+		break;
+	default:
+		shost_printk(KERN_WARNING, phba->shost,
+			     "Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data.
+			     h_vendor.vendor_cmd[0]);
+		spin_unlock(&ctrl->mbox_lock);
+		return -ENOSYS;
+	}
+
+	tag = alloc_mcc_tag(phba);
+	if (!tag) {
+		spin_unlock(&ctrl->mbox_lock);
+		return tag;
+	}
+
+	be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
+			   job->request_payload.sg_cnt);
+	mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+	mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+	mcc_sge->len = cpu_to_le32(nonemb_cmd->size);
+	wrb->tag0 |= tag;
+
+	be_mcc_notify(phba);
+
+	spin_unlock(&ctrl->mbox_lock);
+	return tag;
+}
+
 int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -328,7 +396,6 @@
 			 struct sockaddr *dst_addr,
 			 struct beiscsi_endpoint *beiscsi_ep,
 			 struct be_dma_mem *nonemb_cmd)
-
 {
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
@@ -374,17 +441,17 @@
 	if (dst_addr->sa_family == PF_INET) {
 		__be32 s_addr = daddr_in->sin_addr.s_addr;
 		req->ip_address.ip_type = BE2_IPV4;
-		req->ip_address.ip_address[0] = s_addr & 0x000000ff;
-		req->ip_address.ip_address[1] = (s_addr & 0x0000ff00) >> 8;
-		req->ip_address.ip_address[2] = (s_addr & 0x00ff0000) >> 16;
-		req->ip_address.ip_address[3] = (s_addr & 0xff000000) >> 24;
+		req->ip_address.addr[0] = s_addr & 0x000000ff;
+		req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
+		req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
+		req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24;
 		req->tcp_port = ntohs(daddr_in->sin_port);
 		beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
 		beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
 		beiscsi_ep->ip_type = BE2_IPV4;
 	} else if (dst_addr->sa_family == PF_INET6) {
 		req->ip_address.ip_type = BE2_IPV6;
-		memcpy(&req->ip_address.ip_address,
+		memcpy(&req->ip_address.addr,
 		       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
 		req->tcp_port = ntohs(daddr_in6->sin6_port);
 		beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
@@ -419,14 +486,399 @@
 	return tag;
 }
 
-unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
+unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb;
-	struct be_cmd_req_get_mac_addr *req;
-	unsigned int tag = 0;
+	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb);
+	struct be_cmd_get_all_if_id_req *pbe_allid = req;
+	int status = 0;
 
-	SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
+	memset(wrb, 0, sizeof(*wrb));
+
+	spin_lock(&ctrl->mbox_lock);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+			   OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
+			   sizeof(*req));
+	status = be_mbox_notify(ctrl);
+	if (!status)
+		phba->interface_handle = pbe_allid->if_hndl_list[0];
+	else {
+		shost_printk(KERN_WARNING, phba->shost,
+			     "Failed in mgmt_get_all_if_id\n");
+	}
+	spin_unlock(&ctrl->mbox_lock);
+
+	return status;
+}
+
+static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
+				struct be_dma_mem *nonemb_cmd, void *resp_buf,
+				int resp_buf_len)
+{
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
+	unsigned short status, extd_status;
+	struct be_sge *sge;
+	unsigned int tag;
+	int rc = 0;
+
+	spin_lock(&ctrl->mbox_lock);
+	tag = alloc_mcc_tag(phba);
+	if (!tag) {
+		spin_unlock(&ctrl->mbox_lock);
+		rc = -ENOMEM;
+		goto free_cmd;
+	}
+	memset(wrb, 0, sizeof(*wrb));
+	wrb->tag0 |= tag;
+	sge = nonembedded_sgl(wrb);
+
+	be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
+	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+	sge->len = cpu_to_le32(nonemb_cmd->size);
+
+	be_mcc_notify(phba);
+	spin_unlock(&ctrl->mbox_lock);
+
+	wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+				 phba->ctrl.mcc_numtag[tag]);
+
+	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+	if (status || extd_status) {
+		SE_DEBUG(DBG_LVL_1,
+			 "mgmt_exec_nonemb_cmd Failed status = %d"
+			 "extd_status = %d\n", status, extd_status);
+		rc = -EIO;
+		goto free_tag;
+	}
+
+	if (resp_buf)
+		memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
+
+free_tag:
+	free_mcc_tag(&phba->ctrl, tag);
+free_cmd:
+	pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
+			    nonemb_cmd->va, nonemb_cmd->dma);
+	return rc;
+}
+
+static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
+			       int iscsi_cmd, int size)
+{
+	cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
+				       &cmd->dma);
+	if (!cmd->va) {
+		SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
+		return -ENOMEM;
+	}
+	memset(cmd->va, 0, sizeof(size));
+	cmd->size = size;
+	be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
+	return 0;
+}
+
+static int
+mgmt_static_ip_modify(struct beiscsi_hba *phba,
+		      struct be_cmd_get_if_info_resp *if_info,
+		      struct iscsi_iface_param_info *ip_param,
+		      struct iscsi_iface_param_info *subnet_param,
+		      uint32_t ip_action)
+{
+	struct be_cmd_set_ip_addr_req *req;
+	struct be_dma_mem nonemb_cmd;
+	uint32_t ip_type;
+	int rc;
+
+	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+				 sizeof(*req));
+	if (rc)
+		return rc;
+
+	ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+		BE2_IPV6 : BE2_IPV4 ;
+
+	req = nonemb_cmd.va;
+	req->ip_params.record_entry_count = 1;
+	req->ip_params.ip_record.action = ip_action;
+	req->ip_params.ip_record.interface_hndl =
+		phba->interface_handle;
+	req->ip_params.ip_record.ip_addr.size_of_structure =
+		sizeof(struct be_ip_addr_subnet_format);
+	req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+
+	if (ip_action == IP_ACTION_ADD) {
+		memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
+		       ip_param->len);
+
+		if (subnet_param)
+			memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+			       subnet_param->value, subnet_param->len);
+	} else {
+		memcpy(req->ip_params.ip_record.ip_addr.addr,
+		       if_info->ip_addr.addr, ip_param->len);
+
+		memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+		       if_info->ip_addr.subnet_mask, ip_param->len);
+	}
+
+	rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+	if (rc < 0)
+		shost_printk(KERN_WARNING, phba->shost,
+			     "Failed to Modify existing IP Address\n");
+	return rc;
+}
+
+static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
+			       uint32_t gtway_action, uint32_t param_len)
+{
+	struct be_cmd_set_def_gateway_req *req;
+	struct be_dma_mem nonemb_cmd;
+	int rt_val;
+
+
+	rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
+				sizeof(*req));
+	if (rt_val)
+		return rt_val;
+
+	req = nonemb_cmd.va;
+	req->action = gtway_action;
+	req->ip_addr.ip_type = BE2_IPV4;
+
+	memcpy(req->ip_addr.addr, gt_addr, param_len);
+
+	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+}
+
+int mgmt_set_ip(struct beiscsi_hba *phba,
+		struct iscsi_iface_param_info *ip_param,
+		struct iscsi_iface_param_info *subnet_param,
+		uint32_t boot_proto)
+{
+	struct be_cmd_get_def_gateway_resp gtway_addr_set;
+	struct be_cmd_get_if_info_resp if_info;
+	struct be_cmd_set_dhcp_req *dhcpreq;
+	struct be_cmd_rel_dhcp_req *reldhcp;
+	struct be_dma_mem nonemb_cmd;
+	uint8_t *gtway_addr;
+	uint32_t ip_type;
+	int rc;
+
+	if (mgmt_get_all_if_id(phba))
+		return -EIO;
+
+	memset(&if_info, 0, sizeof(if_info));
+	ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+		BE2_IPV6 : BE2_IPV4 ;
+
+	rc = mgmt_get_if_info(phba, ip_type, &if_info);
+	if (rc)
+		return rc;
+
+	if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+		if (if_info.dhcp_state) {
+			shost_printk(KERN_WARNING, phba->shost,
+				     "DHCP Already Enabled\n");
+			return 0;
+		}
+		/* The ip_param->len is 1 in DHCP case. Setting
+		   proper IP len as this it is used while
+		   freeing the Static IP.
+		 */
+		ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
+				IP_V6_LEN : IP_V4_LEN;
+
+	} else {
+		if (if_info.dhcp_state) {
+
+			memset(&if_info, 0, sizeof(if_info));
+			rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
+				sizeof(*reldhcp));
+
+			if (rc)
+				return rc;
+
+			reldhcp = nonemb_cmd.va;
+			reldhcp->interface_hndl = phba->interface_handle;
+			reldhcp->ip_type = ip_type;
+
+			rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+			if (rc < 0) {
+				shost_printk(KERN_WARNING, phba->shost,
+					     "Failed to Delete existing dhcp\n");
+				return rc;
+			}
+		}
+	}
+
+	/* Delete the Static IP Set */
+	if (if_info.ip_addr.addr[0]) {
+		rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL,
+					   IP_ACTION_DEL);
+		if (rc)
+			return rc;
+	}
+
+	/* Delete the Gateway settings if mode change is to DHCP */
+	if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+		memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
+		rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
+		if (rc) {
+			shost_printk(KERN_WARNING, phba->shost,
+				     "Failed to Get Gateway Addr\n");
+			return rc;
+		}
+
+		if (gtway_addr_set.ip_addr.addr[0]) {
+			gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
+			rc = mgmt_modify_gateway(phba, gtway_addr,
+						 IP_ACTION_DEL, IP_V4_LEN);
+
+			if (rc) {
+				shost_printk(KERN_WARNING, phba->shost,
+					     "Failed to clear Gateway Addr Set\n");
+				return rc;
+			}
+		}
+	}
+
+	/* Set Adapter to DHCP/Static Mode */
+	if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
+		rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+			OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
+			sizeof(*dhcpreq));
+		if (rc)
+			return rc;
+
+		dhcpreq = nonemb_cmd.va;
+		dhcpreq->flags = BLOCKING;
+		dhcpreq->retry_count = 1;
+		dhcpreq->interface_hndl = phba->interface_handle;
+		dhcpreq->ip_type = BE2_DHCP_V4;
+
+		return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+	} else {
+		return mgmt_static_ip_modify(phba, &if_info, ip_param,
+					     subnet_param, IP_ACTION_ADD);
+	}
+
+	return rc;
+}
+
+int mgmt_set_gateway(struct beiscsi_hba *phba,
+		     struct iscsi_iface_param_info *gateway_param)
+{
+	struct be_cmd_get_def_gateway_resp gtway_addr_set;
+	uint8_t *gtway_addr;
+	int rt_val;
+
+	memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
+	rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
+	if (rt_val) {
+		shost_printk(KERN_WARNING, phba->shost,
+			     "Failed to Get Gateway Addr\n");
+		return rt_val;
+	}
+
+	if (gtway_addr_set.ip_addr.addr[0]) {
+		gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
+		rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
+					     gateway_param->len);
+		if (rt_val) {
+			shost_printk(KERN_WARNING, phba->shost,
+				     "Failed to clear Gateway Addr Set\n");
+			return rt_val;
+		}
+	}
+
+	gtway_addr = (uint8_t *)&gateway_param->value;
+	rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD,
+				     gateway_param->len);
+
+	if (rt_val)
+		shost_printk(KERN_WARNING, phba->shost,
+			     "Failed to Set Gateway Addr\n");
+
+	return rt_val;
+}
+
+int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
+		     struct be_cmd_get_def_gateway_resp *gateway)
+{
+	struct be_cmd_get_def_gateway_req *req;
+	struct be_dma_mem nonemb_cmd;
+	int rc;
+
+	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				 OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
+				 sizeof(*gateway));
+	if (rc)
+		return rc;
+
+	req = nonemb_cmd.va;
+	req->ip_type = ip_type;
+
+	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway,
+				    sizeof(*gateway));
+}
+
+int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
+		     struct be_cmd_get_if_info_resp *if_info)
+{
+	struct be_cmd_get_if_info_req *req;
+	struct be_dma_mem nonemb_cmd;
+	int rc;
+
+	if (mgmt_get_all_if_id(phba))
+		return -EIO;
+
+	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				 OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+				 sizeof(*if_info));
+	if (rc)
+		return rc;
+
+	req = nonemb_cmd.va;
+	req->interface_hndl = phba->interface_handle;
+	req->ip_type = ip_type;
+
+	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info,
+				    sizeof(*if_info));
+}
+
+int mgmt_get_nic_conf(struct beiscsi_hba *phba,
+		      struct be_cmd_get_nic_conf_resp *nic)
+{
+	struct be_dma_mem nonemb_cmd;
+	int rc;
+
+	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				 OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
+				 sizeof(*nic));
+	if (rc)
+		return rc;
+
+	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, nic, sizeof(*nic));
+}
+
+
+
+unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
+{
+	unsigned int tag = 0;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_hba_name *req;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+
 	spin_lock(&ctrl->mbox_lock);
 	tag = alloc_mcc_tag(phba);
 	if (!tag) {
@@ -438,12 +890,38 @@
 	req = embedded_payload(wrb);
 	wrb->tag0 |= tag;
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
-			   OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
-			   sizeof(*req));
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+			OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
+			sizeof(*req));
 
 	be_mcc_notify(phba);
 	spin_unlock(&ctrl->mbox_lock);
 	return tag;
 }
 
+unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
+{
+	unsigned int tag = 0;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_ntwk_link_status_req *req;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+
+	spin_lock(&ctrl->mbox_lock);
+	tag = alloc_mcc_tag(phba);
+	if (!tag) {
+		spin_unlock(&ctrl->mbox_lock);
+		return tag;
+	}
+
+	wrb = wrb_from_mccq(phba);
+	req = embedded_payload(wrb);
+	wrb->tag0 |= tag;
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+			sizeof(*req));
+
+	be_mcc_notify(phba);
+	spin_unlock(&ctrl->mbox_lock);
+	return tag;
+}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 0842882..5c2e376 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -20,11 +20,16 @@
 #ifndef _BEISCSI_MGMT_
 #define _BEISCSI_MGMT_
 
-#include <linux/types.h>
-#include <linux/list.h>
+#include <scsi/scsi_bsg_iscsi.h>
 #include "be_iscsi.h"
 #include "be_main.h"
 
+#define IP_ACTION_ADD	0x01
+#define IP_ACTION_DEL	0x02
+
+#define IP_V6_LEN	16
+#define IP_V4_LEN	4
+
 /**
  * Pseudo amap definition in which each bit of the actual structure is defined
  * as a byte: used to calculate offset/shift/mask of each field
@@ -98,6 +103,10 @@
 				struct invalidate_command_table *inv_tbl,
 				unsigned int num_invalidate, unsigned int cid,
 				struct be_dma_mem *nonemb_cmd);
+unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
+					 struct beiscsi_hba *phba,
+					 struct bsg_job *job,
+					 struct be_dma_mem *nonemb_cmd);
 
 struct iscsi_invalidate_connection_params_in {
 	struct be_cmd_req_hdr hdr;
@@ -204,6 +213,13 @@
 	struct mgmt_controller_attributes params;
 } __packed;
 
+struct be_bsg_vendor_cmd {
+	struct be_cmd_req_hdr hdr;
+	unsigned short region;
+	unsigned short offset;
+	unsigned short sector;
+} __packed;
+
 /* configuration management */
 
 #define GET_MGMT_CONTROLLER_WS(phba)    (phba->pmgmt_ws)
@@ -219,12 +235,15 @@
 				/* the CMD_RESPONSE_HEADER  */
 
 #define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
-    pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
+	pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
 					bus_address.u.a32.address_lo;  \
-    pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
+	pa->hi = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
 					bus_address.u.a32.address_hi;  \
 }
 
+#define BEISCSI_WRITE_FLASH 0
+#define BEISCSI_READ_FLASH 1
+
 struct beiscsi_endpoint {
 	struct beiscsi_hba *phba;
 	struct beiscsi_sess *sess;
@@ -248,4 +267,27 @@
 					 unsigned short issue_reset,
 					 unsigned short savecfg_flag);
 
+int mgmt_set_ip(struct beiscsi_hba *phba,
+		struct iscsi_iface_param_info *ip_param,
+		struct iscsi_iface_param_info *subnet_param,
+		uint32_t boot_proto);
+
+unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
+
+unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
+				   u32 boot_session_handle,
+				   struct be_dma_mem *nonemb_cmd);
+
+int mgmt_get_nic_conf(struct beiscsi_hba *phba,
+		      struct be_cmd_get_nic_conf_resp *mac);
+
+int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
+		     struct be_cmd_get_if_info_resp *if_info);
+
+int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
+		     struct be_cmd_get_def_gateway_resp *gateway);
+
+int mgmt_set_gateway(struct beiscsi_hba *phba,
+		     struct iscsi_iface_param_info *gateway_param);
+
 #endif
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index e75e07d..51c9e13 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -799,9 +799,6 @@
 				      enum bfa_lport_role roles,
 				      struct bfad_vf_s *vf_drv,
 				      struct bfad_vport_s *vp_drv);
-void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
-			  struct bfad_vf_s *vf_drv,
-			  struct bfad_vport_s *vp_drv);
 
 /*
  * vport callbacks
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 5d2a130..937000d 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -616,7 +616,7 @@
 	__port_action[port->fabric->fab_type].online(port);
 
 	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
-	BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
 		"Logical port online: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
 	bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
@@ -639,12 +639,12 @@
 	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
 	if (bfa_sm_cmp_state(port->fabric,
 			bfa_fcs_fabric_sm_online) == BFA_TRUE) {
-		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
+		BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
 		"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
 		bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
 	} else {
-		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
+		BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
 		"Logical port taken offline: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
 		bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
@@ -709,14 +709,10 @@
 	bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
 
 	/* Base port will be deleted by the OS driver */
-	if (port->vport) {
-		bfa_fcb_lport_delete(port->fcs->bfad, port->port_cfg.roles,
-				port->fabric->vf_drv,
-				port->vport ? port->vport->vport_drv : NULL);
+	if (port->vport)
 		bfa_fcs_vport_delete_comp(port->vport);
-	} else {
+	else
 		bfa_wc_down(&port->fabric->wc);
-	}
 }
 
 
@@ -5714,17 +5710,23 @@
 			(struct bfad_vport_s *)vport->vport_drv;
 
 	bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
-
-	if (vport_drv->comp_del)
-		complete(vport_drv->comp_del);
-	else
-		kfree(vport_drv);
-
 	bfa_lps_delete(vport->lps);
+
+	if (vport_drv->comp_del) {
+		complete(vport_drv->comp_del);
+		return;
+	}
+
+	/*
+	 * We queue the vport delete work to the IM work_q from here.
+	 * The memory for the bfad_vport_s is freed from the FC function
+	 * template vport_delete entry point.
+	 */
+	if (vport_drv)
+		bfad_im_port_delete(vport_drv->drv_port.bfad,
+				&vport_drv->drv_port);
 }
 
-
-
 /*
  *  fcs_vport_public FCS virtual port public interfaces
  */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 404fd10..2e4b0be 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -456,23 +456,6 @@
 	return port_drv;
 }
 
-void
-bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
-		    struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
-{
-	struct bfad_port_s    *port_drv;
-
-	/* this will be only called from rmmod context */
-	if (vp_drv && !vp_drv->comp_del) {
-		port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
-				((vf_drv) ? (&(vf_drv)->base_port) :
-				(&(bfad)->pport));
-		bfa_trc(bfad, roles);
-		if (roles & BFA_LPORT_ROLE_FCP_IM)
-			bfad_im_port_delete(bfad, port_drv);
-	}
-}
-
 /*
  * FCS RPORT alloc callback, after successful PLOGI by FCS
  */
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 7b1ecd2..8b6c6bf 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -497,6 +497,7 @@
 	if (im_port->flags & BFAD_PORT_DELETE) {
 		bfad_scsi_host_free(bfad, im_port);
 		list_del(&vport->list_entry);
+		kfree(vport);
 		return 0;
 	}
 
@@ -758,25 +759,10 @@
 	else if (!strcmp(model, "Brocade-804"))
 		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
 			"Brocade 8Gbps FC HBA for HP Bladesystem C-class");
-	else if (!strcmp(model, "Brocade-902") ||
-		 !strcmp(model, "Brocade-1741"))
+	else if (!strcmp(model, "Brocade-1741"))
 		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
 			"Brocade 10Gbps CNA for Dell M-Series Blade Servers");
-	else if (strstr(model, "Brocade-1560")) {
-		if (nports == 1)
-			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-				"Brocade 16Gbps PCIe single port FC HBA");
-		else
-			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-				"Brocade 16Gbps PCIe dual port FC HBA");
-	} else if (strstr(model, "Brocade-1710")) {
-		if (nports == 1)
-			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-				"Brocade 10Gbps single port CNA");
-		else
-			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
-				"Brocade 10Gbps dual port CNA");
-	} else if (strstr(model, "Brocade-1860")) {
+	else if (strstr(model, "Brocade-1860")) {
 		if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
 			snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
 				"Brocade 10Gbps single port CNA");
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 495a841..25093a0 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,6 +1,6 @@
 /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
  *
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 72118db..dc0a08e 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,6 +1,6 @@
 /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
  *
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0bd70e8..0c53c28 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
 /* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index f9d6f41..ece47e5 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
 /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 4927cca..8b68167 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
 /* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -18,8 +18,8 @@
 static u32 adapter_count;
 
 #define DRV_MODULE_NAME		"bnx2i"
-#define DRV_MODULE_VERSION	"2.7.0.3"
-#define DRV_MODULE_RELDATE	"Jun 15, 2011"
+#define DRV_MODULE_VERSION	"2.7.2.2"
+#define DRV_MODULE_RELDATE	"Apr 25, 2012"
 
 static char version[] __devinitdata =
 		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1a44b45..f8d516b 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
 /*
  * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2011 Broadcom Corporation
+ * Copyright (c) 2006 - 2012 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -2244,6 +2244,7 @@
 	.eh_device_reset_handler = iscsi_eh_device_reset,
 	.eh_target_reset_handler = iscsi_eh_recover_target,
 	.change_queue_depth	= iscsi_change_queue_depth,
+	.target_alloc		= iscsi_target_alloc,
 	.can_queue		= 2048,
 	.max_sectors		= 127,
 	.cmd_per_lun		= 128,
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 83a77f7..c61cf7a 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,6 +1,6 @@
 /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2004 - 2011 Broadcom Corporation
+ * Copyright (c) 2004 - 2012 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 04c5cea..fda9cde 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -55,11 +55,16 @@
 #define ALUA_FAILOVER_TIMEOUT		(60 * HZ)
 #define ALUA_FAILOVER_RETRIES		5
 
+/* flags passed from user level */
+#define ALUA_OPTIMIZE_STPG		1
+
 struct alua_dh_data {
 	int			group_id;
 	int			rel_port;
 	int			tpgs;
 	int			state;
+	int			pref;
+	unsigned		flags; /* used for optimizing STPG */
 	unsigned char		inq[ALUA_INQUIRY_SIZE];
 	unsigned char		*buff;
 	int			bufflen;
@@ -554,14 +559,16 @@
 	for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
 		if (h->group_id == (ucp[2] << 8) + ucp[3]) {
 			h->state = ucp[0] & 0x0f;
+			h->pref = ucp[0] >> 7;
 			valid_states = ucp[1];
 		}
 		off = 8 + (ucp[7] * 4);
 	}
 
 	sdev_printk(KERN_INFO, sdev,
-		    "%s: port group %02x state %c supports %c%c%c%c%c%c%c\n",
+		    "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
 		    ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
+		    h->pref ? "preferred" : "non-preferred",
 		    valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
 		    valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
 		    valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
@@ -621,6 +628,37 @@
 out:
 	return err;
 }
+/*
+ * alua_set_params - set/unset the optimize flag
+ * @sdev: device on the path to be activated
+ * params - parameters in the following format
+ *      "no_of_params\0param1\0param2\0param3\0...\0"
+ * For example, to set the flag pass the following parameters
+ * from multipath.conf
+ *     hardware_handler        "2 alua 1"
+ */
+static int alua_set_params(struct scsi_device *sdev, const char *params)
+{
+	struct alua_dh_data *h = get_alua_data(sdev);
+	unsigned int optimize = 0, argc;
+	const char *p = params;
+	int result = SCSI_DH_OK;
+
+	if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
+		return -EINVAL;
+
+	while (*p++)
+		;
+	if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
+		return -EINVAL;
+
+	if (optimize)
+		h->flags |= ALUA_OPTIMIZE_STPG;
+	else
+		h->flags &= ~ALUA_OPTIMIZE_STPG;
+
+	return result;
+}
 
 /*
  * alua_activate - activate a path
@@ -637,14 +675,37 @@
 {
 	struct alua_dh_data *h = get_alua_data(sdev);
 	int err = SCSI_DH_OK;
+	int stpg = 0;
 
 	err = alua_rtpg(sdev, h);
 	if (err != SCSI_DH_OK)
 		goto out;
 
-	if (h->tpgs & TPGS_MODE_EXPLICIT &&
-	    h->state != TPGS_STATE_OPTIMIZED &&
-	    h->state != TPGS_STATE_LBA_DEPENDENT) {
+	if (h->tpgs & TPGS_MODE_EXPLICIT) {
+		switch (h->state) {
+		case TPGS_STATE_NONOPTIMIZED:
+			stpg = 1;
+			if ((h->flags & ALUA_OPTIMIZE_STPG) &&
+			    (!h->pref) &&
+			    (h->tpgs & TPGS_MODE_IMPLICIT))
+				stpg = 0;
+			break;
+		case TPGS_STATE_STANDBY:
+			stpg = 1;
+			break;
+		case TPGS_STATE_UNAVAILABLE:
+		case TPGS_STATE_OFFLINE:
+			err = SCSI_DH_IO;
+			break;
+		case TPGS_STATE_TRANSITIONING:
+			err = SCSI_DH_RETRY;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (stpg) {
 		h->callback_fn = fn;
 		h->callback_data = data;
 		err = submit_stpg(h);
@@ -698,6 +759,7 @@
 	.prep_fn = alua_prep_fn,
 	.check_sense = alua_check_sense,
 	.activate = alua_activate,
+	.set_params = alua_set_params,
 	.match = alua_match,
 };
 
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 335e851..76e3d0b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -411,20 +411,18 @@
 }
 
 /**
- * fcoe_interface_cleanup() - Clean up a FCoE interface
+ * fcoe_interface_remove() - remove FCoE interface from netdev
  * @fcoe: The FCoE interface to be cleaned up
  *
  * Caller must be holding the RTNL mutex
  */
-static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_remove(struct fcoe_interface *fcoe)
 {
 	struct net_device *netdev = fcoe->netdev;
 	struct fcoe_ctlr *fip = &fcoe->ctlr;
 	u8 flogi_maddr[ETH_ALEN];
 	const struct net_device_ops *ops;
 
-	rtnl_lock();
-
 	/*
 	 * Don't listen for Ethernet packets anymore.
 	 * synchronize_net() ensures that the packet handlers are not running
@@ -453,12 +451,28 @@
 			FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
 					" specific feature for LLD.\n");
 	}
+	fcoe->removed = 1;
+}
 
+
+/**
+ * fcoe_interface_cleanup() - Clean up a FCoE interface
+ * @fcoe: The FCoE interface to be cleaned up
+ */
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+{
+	struct net_device *netdev = fcoe->netdev;
+	struct fcoe_ctlr *fip = &fcoe->ctlr;
+
+	rtnl_lock();
+	if (!fcoe->removed)
+		fcoe_interface_remove(fcoe);
 	rtnl_unlock();
 
 	/* Release the self-reference taken during fcoe_interface_create() */
 	/* tear-down the FCoE controller */
 	fcoe_ctlr_destroy(fip);
+	scsi_host_put(fcoe->ctlr.lp->host);
 	kfree(fcoe);
 	dev_put(netdev);
 	module_put(THIS_MODULE);
@@ -522,13 +536,11 @@
 	struct fcoe_port *port = lport_priv(lport);
 	struct fcoe_interface *fcoe = port->priv;
 
-	rtnl_lock();
 	if (!is_zero_ether_addr(port->data_src_addr))
 		dev_uc_del(fcoe->netdev, port->data_src_addr);
 	if (!is_zero_ether_addr(addr))
 		dev_uc_add(fcoe->netdev, addr);
 	memcpy(port->data_src_addr, addr, ETH_ALEN);
-	rtnl_unlock();
 }
 
 /**
@@ -941,6 +953,10 @@
 	rtnl_lock();
 	if (!is_zero_ether_addr(port->data_src_addr))
 		dev_uc_del(netdev, port->data_src_addr);
+	if (lport->vport)
+		synchronize_net();
+	else
+		fcoe_interface_remove(fcoe);
 	rtnl_unlock();
 
 	/* Free queued packets for the per-CPU receive threads */
@@ -959,8 +975,12 @@
 	/* Free memory used by statistical counters */
 	fc_lport_free_stats(lport);
 
-	/* Release the Scsi_Host */
-	scsi_host_put(lport->host);
+	/*
+	 * Release the Scsi_Host for vport but hold on to
+	 * master lport until it fcoe interface fully cleaned-up.
+	 */
+	if (lport->vport)
+		scsi_host_put(lport->host);
 }
 
 /**
@@ -2274,10 +2294,9 @@
 			continue;
 
 		skb = dev_alloc_skb(0);
-		if (!skb) {
-			spin_unlock_bh(&pp->fcoe_rx_list.lock);
+		if (!skb)
 			continue;
-		}
+
 		skb->destructor = fcoe_percpu_flush_done;
 
 		spin_lock_bh(&pp->fcoe_rx_list.lock);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 3c2733a..96ac938 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,7 +71,8 @@
  * @ctlr:	      The FCoE controller (for FIP)
  * @oem:	      The offload exchange manager for all local port
  *		      instances associated with this port
- * This structure is 1:1 with a net devive.
+ * @removed:	      Indicates fcoe interface removed from net device
+ * This structure is 1:1 with a net device.
  */
 struct fcoe_interface {
 	struct list_head   list;
@@ -81,6 +82,7 @@
 	struct packet_type fip_packet_type;
 	struct fcoe_ctlr   ctlr;
 	struct fc_exch_mgr *oem;
+	u8	removed;
 };
 
 #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 249a106..5a4c725 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1883,7 +1883,13 @@
 	frame = (struct fip_frame *)skb->data;
 	memset(frame, 0, len);
 	memcpy(frame->eth.h_dest, dest, ETH_ALEN);
-	memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+
+	if (sub == FIP_SC_VN_BEACON) {
+		hton24(frame->eth.h_source, FIP_VN_FC_MAP);
+		hton24(frame->eth.h_source + 3, fip->port_id);
+	} else {
+		memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+	}
 	frame->eth.h_proto = htons(ETH_P_FIP);
 
 	frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 351dc0b..a3a056a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -218,6 +218,9 @@
 
 	if (!shost->shost_gendev.parent)
 		shost->shost_gendev.parent = dev ? dev : &platform_bus;
+	if (!dma_dev)
+		dma_dev = shost->shost_gendev.parent;
+
 	shost->dma_dev = dma_dev;
 
 	error = device_add(&shost->shost_gendev);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 500e20d..796482b 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -159,6 +159,7 @@
 	int qdepth, int reason);
 
 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
+static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
 static int hpsa_slave_alloc(struct scsi_device *sdev);
 static void hpsa_slave_destroy(struct scsi_device *sdev);
 
@@ -171,7 +172,7 @@
 static void calc_bucket_map(int *bucket, int num_buckets,
 	int nsgs, int *bucket_map);
 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
-static inline u32 next_command(struct ctlr_info *h);
+static inline u32 next_command(struct ctlr_info *h, u8 q);
 static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
 	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
 	u64 *cfg_offset);
@@ -180,6 +181,7 @@
 static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
 static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
 	void __iomem *vaddr, int wait_for_ready);
+static inline void finish_cmd(struct CommandList *c);
 #define BOARD_NOT_READY 0
 #define BOARD_READY 1
 
@@ -234,6 +236,16 @@
 	return 1;
 }
 
+static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
+{
+	if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
+		(c->err_info->ScsiStatus != SAM_STAT_BUSY &&
+		 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
+		return 0;
+	dev_warn(&h->pdev->dev, HPSA "device busy");
+	return 1;
+}
+
 static ssize_t host_store_rescan(struct device *dev,
 				 struct device_attribute *attr,
 				 const char *buf, size_t count)
@@ -368,7 +380,7 @@
 }
 
 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
-	"UNKNOWN"
+	"1(ADM)", "UNKNOWN"
 };
 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
 
@@ -497,6 +509,7 @@
 	.change_queue_depth	= hpsa_change_queue_depth,
 	.this_id		= -1,
 	.use_clustering		= ENABLE_CLUSTERING,
+	.eh_abort_handler	= hpsa_eh_abort_handler,
 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
 	.ioctl			= hpsa_ioctl,
 	.slave_alloc		= hpsa_slave_alloc,
@@ -516,24 +529,28 @@
 	list_add_tail(&c->list, list);
 }
 
-static inline u32 next_command(struct ctlr_info *h)
+static inline u32 next_command(struct ctlr_info *h, u8 q)
 {
 	u32 a;
+	struct reply_pool *rq = &h->reply_queue[q];
+	unsigned long flags;
 
 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
-		return h->access.command_completed(h);
+		return h->access.command_completed(h, q);
 
-	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
-		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
-		(h->reply_pool_head)++;
+	if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+		a = rq->head[rq->current_entry];
+		rq->current_entry++;
+		spin_lock_irqsave(&h->lock, flags);
 		h->commands_outstanding--;
+		spin_unlock_irqrestore(&h->lock, flags);
 	} else {
 		a = FIFO_EMPTY;
 	}
 	/* Check for wraparound */
-	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
-		h->reply_pool_head = h->reply_pool;
-		h->reply_pool_wraparound ^= 1;
+	if (rq->current_entry == h->max_commands) {
+		rq->current_entry = 0;
+		rq->wraparound ^= 1;
 	}
 	return a;
 }
@@ -544,8 +561,41 @@
  */
 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
 {
-	if (likely(h->transMethod & CFGTBL_Trans_Performant))
+	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
 		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+		if (likely(h->msix_vector))
+			c->Header.ReplyQueue =
+				smp_processor_id() % h->nreply_queues;
+	}
+}
+
+static int is_firmware_flash_cmd(u8 *cdb)
+{
+	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
+}
+
+/*
+ * During firmware flash, the heartbeat register may not update as frequently
+ * as it should.  So we dial down lockup detection during firmware flash. and
+ * dial it back up when firmware flash completes.
+ */
+#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
+#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
+static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
+		struct CommandList *c)
+{
+	if (!is_firmware_flash_cmd(c->Request.CDB))
+		return;
+	atomic_inc(&h->firmware_flash_in_progress);
+	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
+}
+
+static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
+		struct CommandList *c)
+{
+	if (is_firmware_flash_cmd(c->Request.CDB) &&
+		atomic_dec_and_test(&h->firmware_flash_in_progress))
+		h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
 }
 
 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
@@ -554,11 +604,12 @@
 	unsigned long flags;
 
 	set_performant_mode(h, c);
+	dial_down_lockup_detection_during_fw_flash(h, c);
 	spin_lock_irqsave(&h->lock, flags);
 	addQ(&h->reqQ, c);
 	h->Qdepth++;
-	start_io(h);
 	spin_unlock_irqrestore(&h->lock, flags);
+	start_io(h);
 }
 
 static inline void removeQ(struct CommandList *c)
@@ -1193,7 +1244,7 @@
 				break;
 			}
 			/* Must be some other type of check condition */
-			dev_warn(&h->pdev->dev, "cp %p has check condition: "
+			dev_dbg(&h->pdev->dev, "cp %p has check condition: "
 					"unknown type: "
 					"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
 					"Returning result: 0x%x, "
@@ -1370,16 +1421,24 @@
 	}
 }
 
+#define MAX_DRIVER_CMD_RETRIES 25
 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
 	struct CommandList *c, int data_direction)
 {
-	int retry_count = 0;
+	int backoff_time = 10, retry_count = 0;
 
 	do {
 		memset(c->err_info, 0, sizeof(*c->err_info));
 		hpsa_scsi_do_simple_cmd_core(h, c);
 		retry_count++;
-	} while (check_for_unit_attention(h, c) && retry_count <= 3);
+		if (retry_count > 3) {
+			msleep(backoff_time);
+			if (backoff_time < 1000)
+				backoff_time *= 2;
+		}
+	} while ((check_for_unit_attention(h, c) ||
+			check_for_busy(h, c)) &&
+			retry_count <= MAX_DRIVER_CMD_RETRIES);
 	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
 }
 
@@ -2065,9 +2124,8 @@
 		done(cmd);
 		return 0;
 	}
-	/* Need a lock as this is being allocated from the pool */
-	c = cmd_alloc(h);
 	spin_unlock_irqrestore(&h->lock, flags);
+	c = cmd_alloc(h);
 	if (c == NULL) {			/* trouble... */
 		dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
 		return SCSI_MLQUEUE_HOST_BUSY;
@@ -2334,6 +2392,261 @@
 	return FAILED;
 }
 
+static void swizzle_abort_tag(u8 *tag)
+{
+	u8 original_tag[8];
+
+	memcpy(original_tag, tag, 8);
+	tag[0] = original_tag[3];
+	tag[1] = original_tag[2];
+	tag[2] = original_tag[1];
+	tag[3] = original_tag[0];
+	tag[4] = original_tag[7];
+	tag[5] = original_tag[6];
+	tag[6] = original_tag[5];
+	tag[7] = original_tag[4];
+}
+
+static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
+	struct CommandList *abort, int swizzle)
+{
+	int rc = IO_OK;
+	struct CommandList *c;
+	struct ErrorInfo *ei;
+
+	c = cmd_special_alloc(h);
+	if (c == NULL) {	/* trouble... */
+		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		return -ENOMEM;
+	}
+
+	fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+	if (swizzle)
+		swizzle_abort_tag(&c->Request.CDB[4]);
+	hpsa_scsi_do_simple_cmd_core(h, c);
+	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
+		__func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
+	/* no unmap needed here because no data xfer. */
+
+	ei = c->err_info;
+	switch (ei->CommandStatus) {
+	case CMD_SUCCESS:
+		break;
+	case CMD_UNABORTABLE: /* Very common, don't make noise. */
+		rc = -1;
+		break;
+	default:
+		dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
+			__func__, abort->Header.Tag.upper,
+			abort->Header.Tag.lower);
+		hpsa_scsi_interpret_error(c);
+		rc = -1;
+		break;
+	}
+	cmd_special_free(h, c);
+	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
+		abort->Header.Tag.upper, abort->Header.Tag.lower);
+	return rc;
+}
+
+/*
+ * hpsa_find_cmd_in_queue
+ *
+ * Used to determine whether a command (find) is still present
+ * in queue_head.   Optionally excludes the last element of queue_head.
+ *
+ * This is used to avoid unnecessary aborts.  Commands in h->reqQ have
+ * not yet been submitted, and so can be aborted by the driver without
+ * sending an abort to the hardware.
+ *
+ * Returns pointer to command if found in queue, NULL otherwise.
+ */
+static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
+			struct scsi_cmnd *find, struct list_head *queue_head)
+{
+	unsigned long flags;
+	struct CommandList *c = NULL;	/* ptr into cmpQ */
+
+	if (!find)
+		return 0;
+	spin_lock_irqsave(&h->lock, flags);
+	list_for_each_entry(c, queue_head, list) {
+		if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
+			continue;
+		if (c->scsi_cmd == find) {
+			spin_unlock_irqrestore(&h->lock, flags);
+			return c;
+		}
+	}
+	spin_unlock_irqrestore(&h->lock, flags);
+	return NULL;
+}
+
+static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
+					u8 *tag, struct list_head *queue_head)
+{
+	unsigned long flags;
+	struct CommandList *c;
+
+	spin_lock_irqsave(&h->lock, flags);
+	list_for_each_entry(c, queue_head, list) {
+		if (memcmp(&c->Header.Tag, tag, 8) != 0)
+			continue;
+		spin_unlock_irqrestore(&h->lock, flags);
+		return c;
+	}
+	spin_unlock_irqrestore(&h->lock, flags);
+	return NULL;
+}
+
+/* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to
+ * tell which kind we're dealing with, so we send the abort both ways.  There
+ * shouldn't be any collisions between swizzled and unswizzled tags due to the
+ * way we construct our tags but we check anyway in case the assumptions which
+ * make this true someday become false.
+ */
+static int hpsa_send_abort_both_ways(struct ctlr_info *h,
+	unsigned char *scsi3addr, struct CommandList *abort)
+{
+	u8 swizzled_tag[8];
+	struct CommandList *c;
+	int rc = 0, rc2 = 0;
+
+	/* we do not expect to find the swizzled tag in our queue, but
+	 * check anyway just to be sure the assumptions which make this
+	 * the case haven't become wrong.
+	 */
+	memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
+	swizzle_abort_tag(swizzled_tag);
+	c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
+	if (c != NULL) {
+		dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
+		return hpsa_send_abort(h, scsi3addr, abort, 0);
+	}
+	rc = hpsa_send_abort(h, scsi3addr, abort, 0);
+
+	/* if the command is still in our queue, we can't conclude that it was
+	 * aborted (it might have just completed normally) but in any case
+	 * we don't need to try to abort it another way.
+	 */
+	c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
+	if (c)
+		rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
+	return rc && rc2;
+}
+
+/* Send an abort for the specified command.
+ *	If the device and controller support it,
+ *		send a task abort request.
+ */
+static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
+{
+
+	int i, rc;
+	struct ctlr_info *h;
+	struct hpsa_scsi_dev_t *dev;
+	struct CommandList *abort; /* pointer to command to be aborted */
+	struct CommandList *found;
+	struct scsi_cmnd *as;	/* ptr to scsi cmd inside aborted command. */
+	char msg[256];		/* For debug messaging. */
+	int ml = 0;
+
+	/* Find the controller of the command to be aborted */
+	h = sdev_to_hba(sc->device);
+	if (WARN(h == NULL,
+			"ABORT REQUEST FAILED, Controller lookup failed.\n"))
+		return FAILED;
+
+	/* Check that controller supports some kind of task abort */
+	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
+		!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
+		return FAILED;
+
+	memset(msg, 0, sizeof(msg));
+	ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
+		h->scsi_host->host_no, sc->device->channel,
+		sc->device->id, sc->device->lun);
+
+	/* Find the device of the command to be aborted */
+	dev = sc->device->hostdata;
+	if (!dev) {
+		dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
+				msg);
+		return FAILED;
+	}
+
+	/* Get SCSI command to be aborted */
+	abort = (struct CommandList *) sc->host_scribble;
+	if (abort == NULL) {
+		dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
+				msg);
+		return FAILED;
+	}
+
+	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
+		abort->Header.Tag.upper, abort->Header.Tag.lower);
+	as  = (struct scsi_cmnd *) abort->scsi_cmd;
+	if (as != NULL)
+		ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
+			as->cmnd[0], as->serial_number);
+	dev_dbg(&h->pdev->dev, "%s\n", msg);
+	dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
+		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
+
+	/* Search reqQ to See if command is queued but not submitted,
+	 * if so, complete the command with aborted status and remove
+	 * it from the reqQ.
+	 */
+	found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
+	if (found) {
+		found->err_info->CommandStatus = CMD_ABORTED;
+		finish_cmd(found);
+		dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
+				msg);
+		return SUCCESS;
+	}
+
+	/* not in reqQ, if also not in cmpQ, must have already completed */
+	found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
+	if (!found)  {
+		dev_dbg(&h->pdev->dev, "%s Request FAILED (not known to driver).\n",
+				msg);
+		return SUCCESS;
+	}
+
+	/*
+	 * Command is in flight, or possibly already completed
+	 * by the firmware (but not to the scsi mid layer) but we can't
+	 * distinguish which.  Send the abort down.
+	 */
+	rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
+	if (rc != 0) {
+		dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
+		dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
+			h->scsi_host->host_no,
+			dev->bus, dev->target, dev->lun);
+		return FAILED;
+	}
+	dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
+
+	/* If the abort(s) above completed and actually aborted the
+	 * command, then the command to be aborted should already be
+	 * completed.  If not, wait around a bit more to see if they
+	 * manage to complete normally.
+	 */
+#define ABORT_COMPLETE_WAIT_SECS 30
+	for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
+		found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
+		if (!found)
+			return SUCCESS;
+		msleep(100);
+	}
+	dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
+		msg, ABORT_COMPLETE_WAIT_SECS);
+	return FAILED;
+}
+
+
 /*
  * For operations that cannot sleep, a command block is allocated at init,
  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
@@ -2346,14 +2659,21 @@
 	int i;
 	union u64bit temp64;
 	dma_addr_t cmd_dma_handle, err_dma_handle;
+	unsigned long flags;
 
+	spin_lock_irqsave(&h->lock, flags);
 	do {
 		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
-		if (i == h->nr_cmds)
+		if (i == h->nr_cmds) {
+			spin_unlock_irqrestore(&h->lock, flags);
 			return NULL;
+		}
 	} while (test_and_set_bit
 		 (i & (BITS_PER_LONG - 1),
 		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+	h->nr_allocs++;
+	spin_unlock_irqrestore(&h->lock, flags);
+
 	c = h->cmd_pool + i;
 	memset(c, 0, sizeof(*c));
 	cmd_dma_handle = h->cmd_pool_dhandle
@@ -2362,7 +2682,6 @@
 	memset(c->err_info, 0, sizeof(*c->err_info));
 	err_dma_handle = h->errinfo_pool_dhandle
 	    + i * sizeof(*c->err_info);
-	h->nr_allocs++;
 
 	c->cmdindex = i;
 
@@ -2418,11 +2737,14 @@
 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
 {
 	int i;
+	unsigned long flags;
 
 	i = c - h->cmd_pool;
+	spin_lock_irqsave(&h->lock, flags);
 	clear_bit(i & (BITS_PER_LONG - 1),
 		  h->cmd_pool_bits + (i / BITS_PER_LONG));
 	h->nr_frees++;
+	spin_unlock_irqrestore(&h->lock, flags);
 }
 
 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
@@ -2866,6 +3188,7 @@
 	int cmd_type)
 {
 	int pci_dir = XFER_NONE;
+	struct CommandList *a; /* for commands to be aborted */
 
 	c->cmd_type = CMD_IOCTL_PEND;
 	c->Header.ReplyQueue = 0;
@@ -2949,8 +3272,35 @@
 			c->Request.CDB[5] = 0x00;
 			c->Request.CDB[6] = 0x00;
 			c->Request.CDB[7] = 0x00;
+			break;
+		case  HPSA_ABORT_MSG:
+			a = buff;       /* point to command to be aborted */
+			dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
+				a->Header.Tag.upper, a->Header.Tag.lower,
+				c->Header.Tag.upper, c->Header.Tag.lower);
+			c->Request.CDBLen = 16;
+			c->Request.Type.Type = TYPE_MSG;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_WRITE;
+			c->Request.Timeout = 0; /* Don't time out */
+			c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
+			c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
+			c->Request.CDB[2] = 0x00; /* reserved */
+			c->Request.CDB[3] = 0x00; /* reserved */
+			/* Tag to abort goes in CDB[4]-CDB[11] */
+			c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
+			c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
+			c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
+			c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
+			c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
+			c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
+			c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
+			c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
+			c->Request.CDB[12] = 0x00; /* reserved */
+			c->Request.CDB[13] = 0x00; /* reserved */
+			c->Request.CDB[14] = 0x00; /* reserved */
+			c->Request.CDB[15] = 0x00; /* reserved */
 		break;
-
 		default:
 			dev_warn(&h->pdev->dev, "unknown message type %d\n",
 				cmd);
@@ -2998,7 +3348,9 @@
 static void start_io(struct ctlr_info *h)
 {
 	struct CommandList *c;
+	unsigned long flags;
 
+	spin_lock_irqsave(&h->lock, flags);
 	while (!list_empty(&h->reqQ)) {
 		c = list_entry(h->reqQ.next, struct CommandList, list);
 		/* can't do anything if fifo is full */
@@ -3011,17 +3363,28 @@
 		removeQ(c);
 		h->Qdepth--;
 
-		/* Tell the controller execute command */
-		h->access.submit_command(h, c);
-
 		/* Put job onto the completed Q */
 		addQ(&h->cmpQ, c);
+
+		/* Must increment commands_outstanding before unlocking
+		 * and submitting to avoid race checking for fifo full
+		 * condition.
+		 */
+		h->commands_outstanding++;
+		if (h->commands_outstanding > h->max_outstanding)
+			h->max_outstanding = h->commands_outstanding;
+
+		/* Tell the controller execute command */
+		spin_unlock_irqrestore(&h->lock, flags);
+		h->access.submit_command(h, c);
+		spin_lock_irqsave(&h->lock, flags);
 	}
+	spin_unlock_irqrestore(&h->lock, flags);
 }
 
-static inline unsigned long get_next_completion(struct ctlr_info *h)
+static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
 {
-	return h->access.command_completed(h);
+	return h->access.command_completed(h, q);
 }
 
 static inline bool interrupt_pending(struct ctlr_info *h)
@@ -3045,9 +3408,14 @@
 	return 0;
 }
 
-static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
+static inline void finish_cmd(struct CommandList *c)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->h->lock, flags);
 	removeQ(c);
+	spin_unlock_irqrestore(&c->h->lock, flags);
+	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
 	if (likely(c->cmd_type == CMD_SCSI))
 		complete_scsi_command(c);
 	else if (c->cmd_type == CMD_IOCTL_PEND)
@@ -3075,36 +3443,38 @@
 }
 
 /* process completion of an indexed ("direct lookup") command */
-static inline u32 process_indexed_cmd(struct ctlr_info *h,
+static inline void process_indexed_cmd(struct ctlr_info *h,
 	u32 raw_tag)
 {
 	u32 tag_index;
 	struct CommandList *c;
 
 	tag_index = hpsa_tag_to_index(raw_tag);
-	if (bad_tag(h, tag_index, raw_tag))
-		return next_command(h);
-	c = h->cmd_pool + tag_index;
-	finish_cmd(c, raw_tag);
-	return next_command(h);
+	if (!bad_tag(h, tag_index, raw_tag)) {
+		c = h->cmd_pool + tag_index;
+		finish_cmd(c);
+	}
 }
 
 /* process completion of a non-indexed command */
-static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
+static inline void process_nonindexed_cmd(struct ctlr_info *h,
 	u32 raw_tag)
 {
 	u32 tag;
 	struct CommandList *c = NULL;
+	unsigned long flags;
 
 	tag = hpsa_tag_discard_error_bits(h, raw_tag);
+	spin_lock_irqsave(&h->lock, flags);
 	list_for_each_entry(c, &h->cmpQ, list) {
 		if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
-			finish_cmd(c, raw_tag);
-			return next_command(h);
+			spin_unlock_irqrestore(&h->lock, flags);
+			finish_cmd(c);
+			return;
 		}
 	}
+	spin_unlock_irqrestore(&h->lock, flags);
 	bad_tag(h, h->nr_cmds + 1, raw_tag);
-	return next_command(h);
 }
 
 /* Some controllers, like p400, will give us one interrupt
@@ -3126,10 +3496,20 @@
 	return 1;
 }
 
-static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
+/*
+ * Convert &h->q[x] (passed to interrupt handlers) back to h.
+ * Relies on (h-q[x] == x) being true for x such that
+ * 0 <= x < MAX_REPLY_QUEUES.
+ */
+static struct ctlr_info *queue_to_hba(u8 *queue)
 {
-	struct ctlr_info *h = dev_id;
-	unsigned long flags;
+	return container_of((queue - *queue), struct ctlr_info, q[0]);
+}
+
+static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
+{
+	struct ctlr_info *h = queue_to_hba(queue);
+	u8 q = *(u8 *) queue;
 	u32 raw_tag;
 
 	if (ignore_bogus_interrupt(h))
@@ -3137,74 +3517,68 @@
 
 	if (interrupt_not_for_us(h))
 		return IRQ_NONE;
-	spin_lock_irqsave(&h->lock, flags);
 	h->last_intr_timestamp = get_jiffies_64();
 	while (interrupt_pending(h)) {
-		raw_tag = get_next_completion(h);
+		raw_tag = get_next_completion(h, q);
 		while (raw_tag != FIFO_EMPTY)
-			raw_tag = next_command(h);
+			raw_tag = next_command(h, q);
 	}
-	spin_unlock_irqrestore(&h->lock, flags);
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
+static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
 {
-	struct ctlr_info *h = dev_id;
-	unsigned long flags;
+	struct ctlr_info *h = queue_to_hba(queue);
 	u32 raw_tag;
+	u8 q = *(u8 *) queue;
 
 	if (ignore_bogus_interrupt(h))
 		return IRQ_NONE;
 
-	spin_lock_irqsave(&h->lock, flags);
 	h->last_intr_timestamp = get_jiffies_64();
-	raw_tag = get_next_completion(h);
+	raw_tag = get_next_completion(h, q);
 	while (raw_tag != FIFO_EMPTY)
-		raw_tag = next_command(h);
-	spin_unlock_irqrestore(&h->lock, flags);
+		raw_tag = next_command(h, q);
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
+static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
 {
-	struct ctlr_info *h = dev_id;
-	unsigned long flags;
+	struct ctlr_info *h = queue_to_hba((u8 *) queue);
 	u32 raw_tag;
+	u8 q = *(u8 *) queue;
 
 	if (interrupt_not_for_us(h))
 		return IRQ_NONE;
-	spin_lock_irqsave(&h->lock, flags);
 	h->last_intr_timestamp = get_jiffies_64();
 	while (interrupt_pending(h)) {
-		raw_tag = get_next_completion(h);
+		raw_tag = get_next_completion(h, q);
 		while (raw_tag != FIFO_EMPTY) {
-			if (hpsa_tag_contains_index(raw_tag))
-				raw_tag = process_indexed_cmd(h, raw_tag);
+			if (likely(hpsa_tag_contains_index(raw_tag)))
+				process_indexed_cmd(h, raw_tag);
 			else
-				raw_tag = process_nonindexed_cmd(h, raw_tag);
+				process_nonindexed_cmd(h, raw_tag);
+			raw_tag = next_command(h, q);
 		}
 	}
-	spin_unlock_irqrestore(&h->lock, flags);
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
+static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
 {
-	struct ctlr_info *h = dev_id;
-	unsigned long flags;
+	struct ctlr_info *h = queue_to_hba(queue);
 	u32 raw_tag;
+	u8 q = *(u8 *) queue;
 
-	spin_lock_irqsave(&h->lock, flags);
 	h->last_intr_timestamp = get_jiffies_64();
-	raw_tag = get_next_completion(h);
+	raw_tag = get_next_completion(h, q);
 	while (raw_tag != FIFO_EMPTY) {
-		if (hpsa_tag_contains_index(raw_tag))
-			raw_tag = process_indexed_cmd(h, raw_tag);
+		if (likely(hpsa_tag_contains_index(raw_tag)))
+			process_indexed_cmd(h, raw_tag);
 		else
-			raw_tag = process_nonindexed_cmd(h, raw_tag);
+			process_nonindexed_cmd(h, raw_tag);
+		raw_tag = next_command(h, q);
 	}
-	spin_unlock_irqrestore(&h->lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -3638,10 +4012,13 @@
 static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
 {
 #ifdef CONFIG_PCI_MSI
-	int err;
-	struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
-	{0, 2}, {0, 3}
-	};
+	int err, i;
+	struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
+
+	for (i = 0; i < MAX_REPLY_QUEUES; i++) {
+		hpsa_msix_entries[i].vector = 0;
+		hpsa_msix_entries[i].entry = i;
+	}
 
 	/* Some boards advertise MSI but don't really support it */
 	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
@@ -3649,12 +4026,11 @@
 		goto default_int_mode;
 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
 		dev_info(&h->pdev->dev, "MSIX\n");
-		err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
+		err = pci_enable_msix(h->pdev, hpsa_msix_entries,
+						MAX_REPLY_QUEUES);
 		if (!err) {
-			h->intr[0] = hpsa_msix_entries[0].vector;
-			h->intr[1] = hpsa_msix_entries[1].vector;
-			h->intr[2] = hpsa_msix_entries[2].vector;
-			h->intr[3] = hpsa_msix_entries[3].vector;
+			for (i = 0; i < MAX_REPLY_QUEUES; i++)
+				h->intr[i] = hpsa_msix_entries[i].vector;
 			h->msix_vector = 1;
 			return;
 		}
@@ -3705,14 +4081,6 @@
 	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
 }
 
-static inline bool hpsa_board_disabled(struct pci_dev *pdev)
-{
-	u16 command;
-
-	(void) pci_read_config_word(pdev, PCI_COMMAND, &command);
-	return ((command & PCI_COMMAND_MEMORY) == 0);
-}
-
 static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 	unsigned long *memory_bar)
 {
@@ -3838,14 +4206,14 @@
 		h->maxsgentries = 31; /* default to traditional values */
 		h->chainsize = 0;
 	}
+
+	/* Find out what task management functions are supported and cache */
+	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
 }
 
 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
 {
-	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
-	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
-	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
-	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
+	if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
 		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
 		return false;
 	}
@@ -3932,11 +4300,6 @@
 	h->product_name = products[prod_index].product_name;
 	h->access = *(products[prod_index].access);
 
-	if (hpsa_board_disabled(h->pdev)) {
-		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
-		return -ENODEV;
-	}
-
 	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
 			       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
 
@@ -3946,6 +4309,9 @@
 		return err;
 	}
 
+	/* Enable bus mastering (pci_disable_device may disable this) */
+	pci_set_master(h->pdev);
+
 	err = pci_request_regions(h->pdev, HPSA);
 	if (err) {
 		dev_err(&h->pdev->dev,
@@ -3987,10 +4353,7 @@
 		iounmap(h->cfgtable);
 	if (h->vaddr)
 		iounmap(h->vaddr);
-	/*
-	 * Deliberately omit pci_disable_device(): it does something nasty to
-	 * Smart Array controllers that pci_enable_device does not undo
-	 */
+	pci_disable_device(h->pdev);
 	pci_release_regions(h->pdev);
 	return err;
 }
@@ -4081,14 +4444,33 @@
 	irqreturn_t (*msixhandler)(int, void *),
 	irqreturn_t (*intxhandler)(int, void *))
 {
-	int rc;
+	int rc, i;
 
-	if (h->msix_vector || h->msi_vector)
-		rc = request_irq(h->intr[h->intr_mode], msixhandler,
-				0, h->devname, h);
-	else
-		rc = request_irq(h->intr[h->intr_mode], intxhandler,
-				IRQF_SHARED, h->devname, h);
+	/*
+	 * initialize h->q[x] = x so that interrupt handlers know which
+	 * queue to process.
+	 */
+	for (i = 0; i < MAX_REPLY_QUEUES; i++)
+		h->q[i] = (u8) i;
+
+	if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
+		/* If performant mode and MSI-X, use multiple reply queues */
+		for (i = 0; i < MAX_REPLY_QUEUES; i++)
+			rc = request_irq(h->intr[i], msixhandler,
+					0, h->devname,
+					&h->q[i]);
+	} else {
+		/* Use single reply pool */
+		if (h->msix_vector || h->msi_vector) {
+			rc = request_irq(h->intr[h->intr_mode],
+				msixhandler, 0, h->devname,
+				&h->q[h->intr_mode]);
+		} else {
+			rc = request_irq(h->intr[h->intr_mode],
+				intxhandler, IRQF_SHARED, h->devname,
+				&h->q[h->intr_mode]);
+		}
+	}
 	if (rc) {
 		dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
 		       h->intr[h->intr_mode], h->devname);
@@ -4121,15 +4503,38 @@
 	return 0;
 }
 
+static void free_irqs(struct ctlr_info *h)
+{
+	int i;
+
+	if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+		/* Single reply queue, only one irq to free */
+		i = h->intr_mode;
+		free_irq(h->intr[i], &h->q[i]);
+		return;
+	}
+
+	for (i = 0; i < MAX_REPLY_QUEUES; i++)
+		free_irq(h->intr[i], &h->q[i]);
+}
+
+static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
+{
+	free_irqs(h);
+#ifdef CONFIG_PCI_MSI
+	if (h->msix_vector) {
+		if (h->pdev->msix_enabled)
+			pci_disable_msix(h->pdev);
+	} else if (h->msi_vector) {
+		if (h->pdev->msi_enabled)
+			pci_disable_msi(h->pdev);
+	}
+#endif /* CONFIG_PCI_MSI */
+}
+
 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
 {
-	free_irq(h->intr[h->intr_mode], h);
-#ifdef CONFIG_PCI_MSI
-	if (h->msix_vector)
-		pci_disable_msix(h->pdev);
-	else if (h->msi_vector)
-		pci_disable_msi(h->pdev);
-#endif /* CONFIG_PCI_MSI */
+	hpsa_free_irqs_and_disable_msix(h);
 	hpsa_free_sg_chain_blocks(h);
 	hpsa_free_cmd_pool(h);
 	kfree(h->blockFetchTable);
@@ -4165,7 +4570,7 @@
 	while (!list_empty(list)) {
 		c = list_entry(list->next, struct CommandList, list);
 		c->err_info->CommandStatus = CMD_HARDWARE_ERR;
-		finish_cmd(c, c->Header.Tag.lower);
+		finish_cmd(c);
 	}
 }
 
@@ -4188,9 +4593,6 @@
 	spin_unlock_irqrestore(&h->lock, flags);
 }
 
-#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
-#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
-
 static void detect_controller_lockup(struct ctlr_info *h)
 {
 	u64 now;
@@ -4201,7 +4603,7 @@
 	now = get_jiffies_64();
 	/* If we've received an interrupt recently, we're ok. */
 	if (time_after64(h->last_intr_timestamp +
-				(HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+				(h->heartbeat_sample_interval), now))
 		return;
 
 	/*
@@ -4210,7 +4612,7 @@
 	 * otherwise don't care about signals in this thread.
 	 */
 	if (time_after64(h->last_heartbeat_timestamp +
-				(HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
+				(h->heartbeat_sample_interval), now))
 		return;
 
 	/* If heartbeat has not changed since we last looked, we're not ok. */
@@ -4252,6 +4654,7 @@
 {
 	unsigned long flags;
 
+	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
 	spin_lock_irqsave(&lockup_detector_lock, flags);
 	list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
 	spin_unlock_irqrestore(&lockup_detector_lock, flags);
@@ -4391,7 +4794,7 @@
 		spin_lock_irqsave(&h->lock, flags);
 		h->access.set_intr_mask(h, HPSA_INTR_OFF);
 		spin_unlock_irqrestore(&h->lock, flags);
-		free_irq(h->intr[h->intr_mode], h);
+		free_irqs(h);
 		rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
 					hpsa_intx_discard_completions);
 		if (rc) {
@@ -4441,7 +4844,7 @@
 clean4:
 	hpsa_free_sg_chain_blocks(h);
 	hpsa_free_cmd_pool(h);
-	free_irq(h->intr[h->intr_mode], h);
+	free_irqs(h);
 clean2:
 clean1:
 	kfree(h);
@@ -4484,13 +4887,7 @@
 	 */
 	hpsa_flush_cache(h);
 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
-	free_irq(h->intr[h->intr_mode], h);
-#ifdef CONFIG_PCI_MSI
-	if (h->msix_vector)
-		pci_disable_msix(h->pdev);
-	else if (h->msi_vector)
-		pci_disable_msi(h->pdev);
-#endif				/* CONFIG_PCI_MSI */
+	hpsa_free_irqs_and_disable_msix(h);
 }
 
 static void __devexit hpsa_free_device_info(struct ctlr_info *h)
@@ -4529,10 +4926,7 @@
 	kfree(h->cmd_pool_bits);
 	kfree(h->blockFetchTable);
 	kfree(h->hba_inquiry_data);
-	/*
-	 * Deliberately omit pci_disable_device(): it does something nasty to
-	 * Smart Array controllers that pci_enable_device does not undo
-	 */
+	pci_disable_device(pdev);
 	pci_release_regions(pdev);
 	pci_set_drvdata(pdev, NULL);
 	kfree(h);
@@ -4627,11 +5021,8 @@
 	 * 10 = 6 s/g entry or 24k
 	 */
 
-	h->reply_pool_wraparound = 1; /* spec: init to 1 */
-
 	/* Controller spec: zero out this buffer. */
 	memset(h->reply_pool, 0, h->reply_pool_size);
-	h->reply_pool_head = h->reply_pool;
 
 	bft[7] = SG_ENTRIES_IN_CMD + 4;
 	calc_bucket_map(bft, ARRAY_SIZE(bft),
@@ -4641,12 +5032,19 @@
 
 	/* size of controller ring buffer */
 	writel(h->max_commands, &h->transtable->RepQSize);
-	writel(1, &h->transtable->RepQCount);
+	writel(h->nreply_queues, &h->transtable->RepQCount);
 	writel(0, &h->transtable->RepQCtrAddrLow32);
 	writel(0, &h->transtable->RepQCtrAddrHigh32);
-	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
-	writel(0, &h->transtable->RepQAddr0High32);
-	writel(CFGTBL_Trans_Performant | use_short_tags,
+
+	for (i = 0; i < h->nreply_queues; i++) {
+		writel(0, &h->transtable->RepQAddr[i].upper);
+		writel(h->reply_pool_dhandle +
+			(h->max_commands * sizeof(u64) * i),
+			&h->transtable->RepQAddr[i].lower);
+	}
+
+	writel(CFGTBL_Trans_Performant | use_short_tags |
+		CFGTBL_Trans_enable_directed_msix,
 		&(h->cfgtable->HostWrite.TransportRequest));
 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 	hpsa_wait_for_mode_change_ack(h);
@@ -4664,6 +5062,7 @@
 static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 {
 	u32 trans_support;
+	int i;
 
 	if (hpsa_simple_mode)
 		return;
@@ -4672,12 +5071,20 @@
 	if (!(trans_support & PERFORMANT_MODE))
 		return;
 
+	h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
 	hpsa_get_max_perf_mode_cmds(h);
 	/* Performant mode ring buffer and supporting data structures */
-	h->reply_pool_size = h->max_commands * sizeof(u64);
+	h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
 	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
 				&(h->reply_pool_dhandle));
 
+	for (i = 0; i < h->nreply_queues; i++) {
+		h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
+		h->reply_queue[i].size = h->max_commands;
+		h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
+		h->reply_queue[i].current_entry = 0;
+	}
+
 	/* Need a block fetch table for performant mode */
 	h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
 				sizeof(u32)), GFP_KERNEL);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 7b28d54..9816479 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -34,7 +34,7 @@
 	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
 	unsigned long (*fifo_full)(struct ctlr_info *h);
 	bool (*intr_pending)(struct ctlr_info *h);
-	unsigned long (*command_completed)(struct ctlr_info *h);
+	unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
 };
 
 struct hpsa_scsi_dev_t {
@@ -48,6 +48,13 @@
 	unsigned char raid_level;	/* from inquiry page 0xC1 */
 };
 
+struct reply_pool {
+	u64 *head;
+	size_t size;
+	u8 wraparound;
+	u32 current_entry;
+};
+
 struct ctlr_info {
 	int	ctlr;
 	char	devname[8];
@@ -68,7 +75,7 @@
 #	define DOORBELL_INT	1
 #	define SIMPLE_MODE_INT	2
 #	define MEMQ_MODE_INT	3
-	unsigned int intr[4];
+	unsigned int intr[MAX_REPLY_QUEUES];
 	unsigned int msix_vector;
 	unsigned int msi_vector;
 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
@@ -78,7 +85,6 @@
 	struct list_head reqQ;
 	struct list_head cmpQ;
 	unsigned int Qdepth;
-	unsigned int maxQsinceinit;
 	unsigned int maxSG;
 	spinlock_t lock;
 	int maxsgentries;
@@ -111,20 +117,45 @@
 	unsigned long transMethod;
 
 	/*
-	 * Performant mode completion buffer
+	 * Performant mode completion buffers
 	 */
 	u64 *reply_pool;
-	dma_addr_t reply_pool_dhandle;
-	u64 *reply_pool_head;
 	size_t reply_pool_size;
-	unsigned char reply_pool_wraparound;
+	struct reply_pool reply_queue[MAX_REPLY_QUEUES];
+	u8 nreply_queues;
+	dma_addr_t reply_pool_dhandle;
 	u32 *blockFetchTable;
 	unsigned char *hba_inquiry_data;
 	u64 last_intr_timestamp;
 	u32 last_heartbeat;
 	u64 last_heartbeat_timestamp;
+	u32 heartbeat_sample_interval;
+	atomic_t firmware_flash_in_progress;
 	u32 lockup_detected;
 	struct list_head lockup_list;
+	/* Address of h->q[x] is passed to intr handler to know which queue */
+	u8 q[MAX_REPLY_QUEUES];
+	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
+#define HPSATMF_BITS_SUPPORTED  (1 << 0)
+#define HPSATMF_PHYS_LUN_RESET  (1 << 1)
+#define HPSATMF_PHYS_NEX_RESET  (1 << 2)
+#define HPSATMF_PHYS_TASK_ABORT (1 << 3)
+#define HPSATMF_PHYS_TSET_ABORT (1 << 4)
+#define HPSATMF_PHYS_CLEAR_ACA  (1 << 5)
+#define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
+#define HPSATMF_PHYS_QRY_TASK   (1 << 7)
+#define HPSATMF_PHYS_QRY_TSET   (1 << 8)
+#define HPSATMF_PHYS_QRY_ASYNC  (1 << 9)
+#define HPSATMF_MASK_SUPPORTED  (1 << 16)
+#define HPSATMF_LOG_LUN_RESET   (1 << 17)
+#define HPSATMF_LOG_NEX_RESET   (1 << 18)
+#define HPSATMF_LOG_TASK_ABORT  (1 << 19)
+#define HPSATMF_LOG_TSET_ABORT  (1 << 20)
+#define HPSATMF_LOG_CLEAR_ACA   (1 << 21)
+#define HPSATMF_LOG_CLEAR_TSET  (1 << 22)
+#define HPSATMF_LOG_QRY_TASK    (1 << 23)
+#define HPSATMF_LOG_QRY_TSET    (1 << 24)
+#define HPSATMF_LOG_QRY_ASYNC   (1 << 25)
 };
 #define HPSA_ABORT_MSG 0
 #define HPSA_DEVICE_RESET_MSG 1
@@ -216,9 +247,6 @@
 		c->Header.Tag.lower);
 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
 	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
-	h->commands_outstanding++;
-	if (h->commands_outstanding > h->max_outstanding)
-		h->max_outstanding = h->commands_outstanding;
 }
 
 /*
@@ -254,16 +282,17 @@
 	}
 }
 
-static unsigned long SA5_performant_completed(struct ctlr_info *h)
+static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
 {
-	unsigned long register_value = FIFO_EMPTY;
+	struct reply_pool *rq = &h->reply_queue[q];
+	unsigned long flags, register_value = FIFO_EMPTY;
 
-	/* flush the controller write of the reply queue by reading
-	 * outbound doorbell status register.
-	 */
-	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
 	/* msi auto clears the interrupt pending bit. */
 	if (!(h->msi_vector || h->msix_vector)) {
+		/* flush the controller write of the reply queue by reading
+		 * outbound doorbell status register.
+		 */
+		register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
 		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
 		/* Do a read in order to flush the write to the controller
 		 * (as per spec.)
@@ -271,19 +300,20 @@
 		register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
 	}
 
-	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
-		register_value = *(h->reply_pool_head);
-		(h->reply_pool_head)++;
+	if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
+		register_value = rq->head[rq->current_entry];
+		rq->current_entry++;
+		spin_lock_irqsave(&h->lock, flags);
 		h->commands_outstanding--;
+		spin_unlock_irqrestore(&h->lock, flags);
 	} else {
 		register_value = FIFO_EMPTY;
 	}
 	/* Check for wraparound */
-	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
-		h->reply_pool_head = h->reply_pool;
-		h->reply_pool_wraparound ^= 1;
+	if (rq->current_entry == h->max_commands) {
+		rq->current_entry = 0;
+		rq->wraparound ^= 1;
 	}
-
 	return register_value;
 }
 
@@ -303,13 +333,18 @@
  *   returns value read from hardware.
  *     returns FIFO_EMPTY if there is nothing to read
  */
-static unsigned long SA5_completed(struct ctlr_info *h)
+static unsigned long SA5_completed(struct ctlr_info *h,
+	__attribute__((unused)) u8 q)
 {
 	unsigned long register_value
 		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+	unsigned long flags;
 
-	if (register_value != FIFO_EMPTY)
+	if (register_value != FIFO_EMPTY) {
+		spin_lock_irqsave(&h->lock, flags);
 		h->commands_outstanding--;
+		spin_unlock_irqrestore(&h->lock, flags);
+	}
 
 #ifdef HPSA_DEBUG
 	if (register_value != FIFO_EMPTY)
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 8049815..a894f2e 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -82,6 +82,29 @@
 #define TYPE_CMD				0x00
 #define TYPE_MSG				0x01
 
+/* Message Types  */
+#define HPSA_TASK_MANAGEMENT    0x00
+#define HPSA_RESET              0x01
+#define HPSA_SCAN               0x02
+#define HPSA_NOOP               0x03
+
+#define HPSA_CTLR_RESET_TYPE    0x00
+#define HPSA_BUS_RESET_TYPE     0x01
+#define HPSA_TARGET_RESET_TYPE  0x03
+#define HPSA_LUN_RESET_TYPE     0x04
+#define HPSA_NEXUS_RESET_TYPE   0x05
+
+/* Task Management Functions */
+#define HPSA_TMF_ABORT_TASK     0x00
+#define HPSA_TMF_ABORT_TASK_SET 0x01
+#define HPSA_TMF_CLEAR_ACA      0x02
+#define HPSA_TMF_CLEAR_TASK_SET 0x03
+#define HPSA_TMF_QUERY_TASK     0x04
+#define HPSA_TMF_QUERY_TASK_SET 0x05
+#define HPSA_TMF_QUERY_ASYNCEVENT 0x06
+
+
+
 /* config space register offsets */
 #define CFG_VENDORID            0x00
 #define CFG_DEVICEID            0x02
@@ -106,6 +129,7 @@
 #define CFGTBL_Trans_Simple     0x00000002l
 #define CFGTBL_Trans_Performant 0x00000004l
 #define CFGTBL_Trans_use_short_tags 0x20000000l
+#define CFGTBL_Trans_enable_directed_msix (1 << 30)
 
 #define CFGTBL_BusType_Ultra2   0x00000001l
 #define CFGTBL_BusType_Ultra3   0x00000002l
@@ -162,6 +186,7 @@
 #define BMIC_WRITE 0x27
 #define BMIC_CACHE_FLUSH 0xc2
 #define HPSA_CACHE_FLUSH 0x01	/* C2 was already being used by HPSA */
+#define BMIC_FLASH_FIRMWARE 0xF7
 
 /* Command List Structure */
 union SCSI3Addr {
@@ -337,11 +362,17 @@
 	u32		MaxPhysicalDevices;
 	u32		MaxPhysicalDrivesPerLogicalUnit;
 	u32		MaxPerformantModeCommands;
-	u8		reserved[0x78 - 0x58];
+	u32		MaxBlockFetch;
+	u32		PowerConservationSupport;
+	u32		PowerConservationEnable;
+	u32		TMFSupportFlags;
+	u8		TMFTagMask[8];
+	u8		reserved[0x78 - 0x70];
 	u32		misc_fw_support; /* offset 0x78 */
 #define			MISC_FW_DOORBELL_RESET (0x02)
 #define			MISC_FW_DOORBELL_RESET2 (0x010)
 	u8		driver_version[32];
+
 };
 
 #define NUM_BLOCKFETCH_ENTRIES 8
@@ -351,8 +382,8 @@
 	u32            RepQCount;
 	u32            RepQCtrAddrLow32;
 	u32            RepQCtrAddrHigh32;
-	u32            RepQAddr0Low32;
-	u32            RepQAddr0High32;
+#define MAX_REPLY_QUEUES 8
+	struct vals32  RepQAddr[MAX_REPLY_QUEUES];
 };
 
 struct hpsa_pci_info {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e002cd4..467dc38 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4549,8 +4549,12 @@
 	ENTER;
 	if (sdev->sdev_target)
 		sata_port = sdev->sdev_target->hostdata;
-	if (sata_port)
+	if (sata_port) {
 		rc = ata_sas_port_init(sata_port->ap);
+		if (rc == 0)
+			rc = ata_sas_sync_probe(sata_port->ap);
+	}
+
 	if (rc)
 		ipr_slave_destroy(sdev);
 
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d4bf9c1..45385f5 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -192,22 +192,27 @@
 
 static bool sci_controller_isr(struct isci_host *ihost)
 {
-	if (sci_controller_completion_queue_has_entries(ihost)) {
+	if (sci_controller_completion_queue_has_entries(ihost))
 		return true;
-	} else {
-		/*
-		 * we have a spurious interrupt it could be that we have already
-		 * emptied the completion queue from a previous interrupt */
-		writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
 
-		/*
-		 * There is a race in the hardware that could cause us not to be notified
-		 * of an interrupt completion if we do not take this step.  We will mask
-		 * then unmask the interrupts so if there is another interrupt pending
-		 * the clearing of the interrupt source we get the next interrupt message. */
+	/* we have a spurious interrupt it could be that we have already
+	 * emptied the completion queue from a previous interrupt
+	 * FIXME: really!?
+	 */
+	writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+	/* There is a race in the hardware that could cause us not to be
+	 * notified of an interrupt completion if we do not take this
+	 * step.  We will mask then unmask the interrupts so if there is
+	 * another interrupt pending the clearing of the interrupt
+	 * source we get the next interrupt message.
+	 */
+	spin_lock(&ihost->scic_lock);
+	if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
 		writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
 		writel(0, &ihost->smu_registers->interrupt_mask);
 	}
+	spin_unlock(&ihost->scic_lock);
 
 	return false;
 }
@@ -642,7 +647,6 @@
 	if (completion_status != SCI_SUCCESS)
 		dev_info(&ihost->pdev->dev,
 			"controller start timed out, continuing...\n");
-	isci_host_change_state(ihost, isci_ready);
 	clear_bit(IHOST_START_PENDING, &ihost->flags);
 	wake_up(&ihost->eventq);
 }
@@ -657,12 +661,7 @@
 
 	sas_drain_work(ha);
 
-	dev_dbg(&ihost->pdev->dev,
-		"%s: ihost->status = %d, time = %ld\n",
-		 __func__, isci_host_get_state(ihost), time);
-
 	return 1;
-
 }
 
 /**
@@ -704,14 +703,15 @@
 
 static void sci_controller_enable_interrupts(struct isci_host *ihost)
 {
-	BUG_ON(ihost->smu_registers == NULL);
+	set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
 	writel(0, &ihost->smu_registers->interrupt_mask);
 }
 
 void sci_controller_disable_interrupts(struct isci_host *ihost)
 {
-	BUG_ON(ihost->smu_registers == NULL);
+	clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
 	writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+	readl(&ihost->smu_registers->interrupt_mask); /* flush */
 }
 
 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
@@ -822,7 +822,7 @@
 	       &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
 }
 
-static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
 {
 	if (ihost->sm.current_state_id == SCIC_STARTING) {
 		/*
@@ -849,6 +849,7 @@
 	case SCI_PHY_SUB_AWAIT_SATA_POWER:
 	case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
 	case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+	case SCI_PHY_SUB_AWAIT_OSSP_EN:
 	case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
 	case SCI_PHY_SUB_FINAL:
 		return true;
@@ -857,6 +858,39 @@
 	}
 }
 
+bool is_controller_start_complete(struct isci_host *ihost)
+{
+	int i;
+
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		struct isci_phy *iphy = &ihost->phys[i];
+		u32 state = iphy->sm.current_state_id;
+
+		/* in apc mode we need to check every phy, in
+		 * mpc mode we only need to check phys that have
+		 * been configured into a port
+		 */
+		if (is_port_config_apc(ihost))
+			/* pass */;
+		else if (!phy_get_non_dummy_port(iphy))
+			continue;
+
+		/* The controller start operation is complete iff:
+		 * - all links have been given an opportunity to start
+		 * - have no indication of a connected device
+		 * - have an indication of a connected device and it has
+		 *   finished the link training process.
+		 */
+		if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+		    (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+		    (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+		    (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
+			return false;
+	}
+
+	return true;
+}
+
 /**
  * sci_controller_start_next_phy - start phy
  * @scic: controller
@@ -877,36 +911,7 @@
 		return status;
 
 	if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
-		bool is_controller_start_complete = true;
-		u32 state;
-		u8 index;
-
-		for (index = 0; index < SCI_MAX_PHYS; index++) {
-			iphy = &ihost->phys[index];
-			state = iphy->sm.current_state_id;
-
-			if (!phy_get_non_dummy_port(iphy))
-				continue;
-
-			/* The controller start operation is complete iff:
-			 * - all links have been given an opportunity to start
-			 * - have no indication of a connected device
-			 * - have an indication of a connected device and it has
-			 *   finished the link training process.
-			 */
-			if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
-			    (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
-			    (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
-			    (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
-				is_controller_start_complete = false;
-				break;
-			}
-		}
-
-		/*
-		 * The controller has successfully finished the start process.
-		 * Inform the SCI Core user and transition to the READY state. */
-		if (is_controller_start_complete == true) {
+		if (is_controller_start_complete(ihost)) {
 			sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
 			sci_del_timer(&ihost->phy_timer);
 			ihost->phy_startup_timer_pending = false;
@@ -987,9 +992,8 @@
 	u16 index;
 
 	if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
-		dev_warn(&ihost->pdev->dev,
-			 "SCIC Controller start operation requested in "
-			 "invalid state\n");
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -1053,9 +1057,8 @@
 	spin_unlock_irq(&ihost->scic_lock);
 }
 
-static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+static void isci_host_stop_complete(struct isci_host *ihost)
 {
-	isci_host_change_state(ihost, isci_stopped);
 	sci_controller_disable_interrupts(ihost);
 	clear_bit(IHOST_STOP_PENDING, &ihost->flags);
 	wake_up(&ihost->eventq);
@@ -1074,6 +1077,32 @@
 	writel(0, &ihost->smu_registers->interrupt_mask);
 }
 
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
+{
+	task->lldd_task = NULL;
+	if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
+	    !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+		if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
+			/* Normal notification (task_done) */
+			dev_dbg(&ihost->pdev->dev,
+				"%s: Normal - ireq/task = %p/%p\n",
+				__func__, ireq, task);
+
+			task->task_done(task);
+		} else {
+			dev_dbg(&ihost->pdev->dev,
+				"%s: Error - ireq/task = %p/%p\n",
+				__func__, ireq, task);
+
+			sas_task_abort(task);
+		}
+	}
+	if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+		wake_up_all(&ihost->eventq);
+
+	if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+		isci_free_tag(ihost, ireq->io_tag);
+}
 /**
  * isci_host_completion_routine() - This function is the delayed service
  *    routine that calls the sci core library's completion handler. It's
@@ -1082,107 +1111,15 @@
  * @data: This parameter specifies the ISCI host object
  *
  */
-static void isci_host_completion_routine(unsigned long data)
+void isci_host_completion_routine(unsigned long data)
 {
 	struct isci_host *ihost = (struct isci_host *)data;
-	struct list_head    completed_request_list;
-	struct list_head    errored_request_list;
-	struct list_head    *current_position;
-	struct list_head    *next_position;
-	struct isci_request *request;
-	struct isci_request *next_request;
-	struct sas_task     *task;
 	u16 active;
 
-	INIT_LIST_HEAD(&completed_request_list);
-	INIT_LIST_HEAD(&errored_request_list);
-
 	spin_lock_irq(&ihost->scic_lock);
-
 	sci_controller_completion_handler(ihost);
-
-	/* Take the lists of completed I/Os from the host. */
-
-	list_splice_init(&ihost->requests_to_complete,
-			 &completed_request_list);
-
-	/* Take the list of errored I/Os from the host. */
-	list_splice_init(&ihost->requests_to_errorback,
-			 &errored_request_list);
-
 	spin_unlock_irq(&ihost->scic_lock);
 
-	/* Process any completions in the lists. */
-	list_for_each_safe(current_position, next_position,
-			   &completed_request_list) {
-
-		request = list_entry(current_position, struct isci_request,
-				     completed_node);
-		task = isci_request_access_task(request);
-
-		/* Normal notification (task_done) */
-		dev_dbg(&ihost->pdev->dev,
-			"%s: Normal - request/task = %p/%p\n",
-			__func__,
-			request,
-			task);
-
-		/* Return the task to libsas */
-		if (task != NULL) {
-
-			task->lldd_task = NULL;
-			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-
-				/* If the task is already in the abort path,
-				* the task_done callback cannot be called.
-				*/
-				task->task_done(task);
-			}
-		}
-
-		spin_lock_irq(&ihost->scic_lock);
-		isci_free_tag(ihost, request->io_tag);
-		spin_unlock_irq(&ihost->scic_lock);
-	}
-	list_for_each_entry_safe(request, next_request, &errored_request_list,
-				 completed_node) {
-
-		task = isci_request_access_task(request);
-
-		/* Use sas_task_abort */
-		dev_warn(&ihost->pdev->dev,
-			 "%s: Error - request/task = %p/%p\n",
-			 __func__,
-			 request,
-			 task);
-
-		if (task != NULL) {
-
-			/* Put the task into the abort path if it's not there
-			 * already.
-			 */
-			if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
-				sas_task_abort(task);
-
-		} else {
-			/* This is a case where the request has completed with a
-			 * status such that it needed further target servicing,
-			 * but the sas_task reference has already been removed
-			 * from the request.  Since it was errored, it was not
-			 * being aborted, so there is nothing to do except free
-			 * it.
-			 */
-
-			spin_lock_irq(&ihost->scic_lock);
-			/* Remove the request from the remote device's list
-			* of pending requests.
-			*/
-			list_del_init(&request->dev_node);
-			isci_free_tag(ihost, request->io_tag);
-			spin_unlock_irq(&ihost->scic_lock);
-		}
-	}
-
 	/* the coalesence timeout doubles at each encoding step, so
 	 * update it based on the ilog2 value of the outstanding requests
 	 */
@@ -1213,9 +1150,8 @@
 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
 {
 	if (ihost->sm.current_state_id != SCIC_READY) {
-		dev_warn(&ihost->pdev->dev,
-			 "SCIC Controller stop operation requested in "
-			 "invalid state\n");
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -1241,7 +1177,7 @@
 	switch (ihost->sm.current_state_id) {
 	case SCIC_RESET:
 	case SCIC_READY:
-	case SCIC_STOPPED:
+	case SCIC_STOPPING:
 	case SCIC_FAILED:
 		/*
 		 * The reset operation is not a graceful cleanup, just
@@ -1250,13 +1186,50 @@
 		sci_change_state(&ihost->sm, SCIC_RESETTING);
 		return SCI_SUCCESS;
 	default:
-		dev_warn(&ihost->pdev->dev,
-			 "SCIC Controller reset operation requested in "
-			 "invalid state\n");
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
 
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+	u32 index;
+	enum sci_status status;
+	enum sci_status phy_status;
+
+	status = SCI_SUCCESS;
+
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		phy_status = sci_phy_stop(&ihost->phys[index]);
+
+		if (phy_status != SCI_SUCCESS &&
+		    phy_status != SCI_FAILURE_INVALID_STATE) {
+			status = SCI_FAILURE;
+
+			dev_warn(&ihost->pdev->dev,
+				 "%s: Controller stop operation failed to stop "
+				 "phy %d because of status %d.\n",
+				 __func__,
+				 ihost->phys[index].phy_index, phy_status);
+		}
+	}
+
+	return status;
+}
+
+
+/**
+ * isci_host_deinit - shutdown frame reception and dma
+ * @ihost: host to take down
+ *
+ * This is called in either the driver shutdown or the suspend path.  In
+ * the shutdown case libsas went through port teardown and normal device
+ * removal (i.e. physical links stayed up to service scsi_device removal
+ * commands).  In the suspend case we disable the hardware without
+ * notifying libsas of the link down events since we want libsas to
+ * remember the domain across the suspend/resume cycle
+ */
 void isci_host_deinit(struct isci_host *ihost)
 {
 	int i;
@@ -1265,17 +1238,6 @@
 	for (i = 0; i < isci_gpio_count(ihost); i++)
 		writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
 
-	isci_host_change_state(ihost, isci_stopping);
-	for (i = 0; i < SCI_MAX_PORTS; i++) {
-		struct isci_port *iport = &ihost->ports[i];
-		struct isci_remote_device *idev, *d;
-
-		list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
-			if (test_bit(IDEV_ALLOCATED, &idev->flags))
-				isci_remote_device_stop(ihost, idev);
-		}
-	}
-
 	set_bit(IHOST_STOP_PENDING, &ihost->flags);
 
 	spin_lock_irq(&ihost->scic_lock);
@@ -1284,12 +1246,21 @@
 
 	wait_for_stop(ihost);
 
+	/* phy stop is after controller stop to allow port and device to
+	 * go idle before shutting down the phys, but the expectation is
+	 * that i/o has been shut off well before we reach this
+	 * function.
+	 */
+	sci_controller_stop_phys(ihost);
+
 	/* disable sgpio: where the above wait should give time for the
 	 * enclosure to sample the gpios going inactive
 	 */
 	writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
 
+	spin_lock_irq(&ihost->scic_lock);
 	sci_controller_reset(ihost);
+	spin_unlock_irq(&ihost->scic_lock);
 
 	/* Cancel any/all outstanding port timers */
 	for (i = 0; i < ihost->logical_port_entries; i++) {
@@ -1328,29 +1299,6 @@
 	return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
 }
 
-static void isci_user_parameters_get(struct sci_user_parameters *u)
-{
-	int i;
-
-	for (i = 0; i < SCI_MAX_PHYS; i++) {
-		struct sci_phy_user_params *u_phy = &u->phys[i];
-
-		u_phy->max_speed_generation = phy_gen;
-
-		/* we are not exporting these for now */
-		u_phy->align_insertion_frequency = 0x7f;
-		u_phy->in_connection_align_insertion_frequency = 0xff;
-		u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
-	}
-
-	u->stp_inactivity_timeout = stp_inactive_to;
-	u->ssp_inactivity_timeout = ssp_inactive_to;
-	u->stp_max_occupancy_timeout = stp_max_occ_to;
-	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
-	u->no_outbound_task_timeout = no_outbound_task_to;
-	u->max_concurr_spinup = max_concurr_spinup;
-}
-
 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
 {
 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
@@ -1510,32 +1458,6 @@
 	sci_controller_set_interrupt_coalescence(ihost, 0, 0);
 }
 
-static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
-{
-	u32 index;
-	enum sci_status status;
-	enum sci_status phy_status;
-
-	status = SCI_SUCCESS;
-
-	for (index = 0; index < SCI_MAX_PHYS; index++) {
-		phy_status = sci_phy_stop(&ihost->phys[index]);
-
-		if (phy_status != SCI_SUCCESS &&
-		    phy_status != SCI_FAILURE_INVALID_STATE) {
-			status = SCI_FAILURE;
-
-			dev_warn(&ihost->pdev->dev,
-				 "%s: Controller stop operation failed to stop "
-				 "phy %d because of status %d.\n",
-				 __func__,
-				 ihost->phys[index].phy_index, phy_status);
-		}
-	}
-
-	return status;
-}
-
 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
 {
 	u32 index;
@@ -1595,10 +1517,11 @@
 {
 	struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
 
-	/* Stop all of the components for this controller */
-	sci_controller_stop_phys(ihost);
-	sci_controller_stop_ports(ihost);
 	sci_controller_stop_devices(ihost);
+	sci_controller_stop_ports(ihost);
+
+	if (!sci_controller_has_remote_devices_stopping(ihost))
+		isci_host_stop_complete(ihost);
 }
 
 static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
@@ -1624,6 +1547,9 @@
 
 	/* The write to the UFQGP clears the UFQPR */
 	writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+
+	/* clear all interrupts */
+	writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
 }
 
 static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
@@ -1655,59 +1581,9 @@
 		.enter_state = sci_controller_stopping_state_enter,
 		.exit_state = sci_controller_stopping_state_exit,
 	},
-	[SCIC_STOPPED] = {},
 	[SCIC_FAILED] = {}
 };
 
-static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
-{
-	/* these defaults are overridden by the platform / firmware */
-	u16 index;
-
-	/* Default to APC mode. */
-	ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-
-	/* Default to APC mode. */
-	ihost->oem_parameters.controller.max_concurr_spin_up = 1;
-
-	/* Default to no SSC operation. */
-	ihost->oem_parameters.controller.do_enable_ssc = false;
-
-	/* Default to short cables on all phys. */
-	ihost->oem_parameters.controller.cable_selection_mask = 0;
-
-	/* Initialize all of the port parameter information to narrow ports. */
-	for (index = 0; index < SCI_MAX_PORTS; index++) {
-		ihost->oem_parameters.ports[index].phy_mask = 0;
-	}
-
-	/* Initialize all of the phy parameter information. */
-	for (index = 0; index < SCI_MAX_PHYS; index++) {
-		/* Default to 3G (i.e. Gen 2). */
-		ihost->user_parameters.phys[index].max_speed_generation =
-			SCIC_SDS_PARM_GEN2_SPEED;
-
-		/* the frequencies cannot be 0 */
-		ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
-		ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
-		ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
-
-		/*
-		 * Previous Vitesse based expanders had a arbitration issue that
-		 * is worked around by having the upper 32-bits of SAS address
-		 * with a value greater then the Vitesse company identifier.
-		 * Hence, usage of 0x5FCFFFFF. */
-		ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
-		ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
-	}
-
-	ihost->user_parameters.stp_inactivity_timeout = 5;
-	ihost->user_parameters.ssp_inactivity_timeout = 5;
-	ihost->user_parameters.stp_max_occupancy_timeout = 5;
-	ihost->user_parameters.ssp_max_occupancy_timeout = 20;
-	ihost->user_parameters.no_outbound_task_timeout = 2;
-}
-
 static void controller_timeout(unsigned long data)
 {
 	struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1724,7 +1600,7 @@
 		sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
 	else if (sm->current_state_id == SCIC_STOPPING) {
 		sci_change_state(sm, SCIC_FAILED);
-		isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+		isci_host_stop_complete(ihost);
 	} else	/* / @todo Now what do we want to do in this case? */
 		dev_err(&ihost->pdev->dev,
 			"%s: Controller timer fired when controller was not "
@@ -1764,9 +1640,6 @@
 
 	sci_init_timer(&ihost->timer, controller_timeout);
 
-	/* Initialize the User and OEM parameters to default values. */
-	sci_controller_set_default_config_parameters(ihost);
-
 	return sci_controller_reset(ihost);
 }
 
@@ -1846,27 +1719,6 @@
 	return 0;
 }
 
-static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
-{
-	u32 state = ihost->sm.current_state_id;
-	struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
-
-	if (state == SCIC_RESET ||
-	    state == SCIC_INITIALIZING ||
-	    state == SCIC_INITIALIZED) {
-		u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version :
-			ISCI_ROM_VER_1_0;
-
-		if (sci_oem_parameters_validate(&ihost->oem_parameters,
-						oem_version))
-			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
-		return SCI_SUCCESS;
-	}
-
-	return SCI_FAILURE_INVALID_STATE;
-}
-
 static u8 max_spin_up(struct isci_host *ihost)
 {
 	if (ihost->user_parameters.max_concurr_spinup)
@@ -1914,7 +1766,7 @@
 		ihost->power_control.phys_granted_power++;
 		sci_phy_consume_power_handler(iphy);
 
-		if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+		if (iphy->protocol == SAS_PROTOCOL_SSP) {
 			u8 j;
 
 			for (j = 0; j < SCI_MAX_PHYS; j++) {
@@ -1988,7 +1840,7 @@
 				       sizeof(current_phy->frame_rcvd.iaf.sas_addr));
 
 			if (current_phy->sm.current_state_id == SCI_PHY_READY &&
-			    current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+			    current_phy->protocol == SAS_PROTOCOL_SSP &&
 			    other == 0) {
 				sci_phy_consume_power_handler(iphy);
 				break;
@@ -2279,9 +2131,8 @@
 	unsigned long i, state, val;
 
 	if (ihost->sm.current_state_id != SCIC_RESET) {
-		dev_warn(&ihost->pdev->dev,
-			 "SCIC Controller initialize operation requested "
-			 "in invalid state\n");
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -2384,96 +2235,76 @@
 	return result;
 }
 
-static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
-					       struct sci_user_parameters *sci_parms)
+static int sci_controller_dma_alloc(struct isci_host *ihost)
 {
-	u32 state = ihost->sm.current_state_id;
+	struct device *dev = &ihost->pdev->dev;
+	size_t size;
+	int i;
 
-	if (state == SCIC_RESET ||
-	    state == SCIC_INITIALIZING ||
-	    state == SCIC_INITIALIZED) {
-		u16 index;
+	/* detect re-initialization */
+	if (ihost->completion_queue)
+		return 0;
 
-		/*
-		 * Validate the user parameters.  If they are not legal, then
-		 * return a failure.
-		 */
-		for (index = 0; index < SCI_MAX_PHYS; index++) {
-			struct sci_phy_user_params *user_phy;
+	size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+	ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
+						      GFP_KERNEL);
+	if (!ihost->completion_queue)
+		return -ENOMEM;
 
-			user_phy = &sci_parms->phys[index];
+	size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+	ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
+							       GFP_KERNEL);
 
-			if (!((user_phy->max_speed_generation <=
-						SCIC_SDS_PARM_MAX_SPEED) &&
-			      (user_phy->max_speed_generation >
-						SCIC_SDS_PARM_NO_SPEED)))
-				return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+	if (!ihost->remote_node_context_table)
+		return -ENOMEM;
 
-			if (user_phy->in_connection_align_insertion_frequency <
-					3)
-				return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+	size = ihost->task_context_entries * sizeof(struct scu_task_context),
+	ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
+							GFP_KERNEL);
+	if (!ihost->task_context_table)
+		return -ENOMEM;
 
-			if ((user_phy->in_connection_align_insertion_frequency <
-						3) ||
-			    (user_phy->align_insertion_frequency == 0) ||
-			    (user_phy->
-				notify_enable_spin_up_insertion_frequency ==
-						0))
-				return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-		}
+	size = SCI_UFI_TOTAL_SIZE;
+	ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
+	if (!ihost->ufi_buf)
+		return -ENOMEM;
 
-		if ((sci_parms->stp_inactivity_timeout == 0) ||
-		    (sci_parms->ssp_inactivity_timeout == 0) ||
-		    (sci_parms->stp_max_occupancy_timeout == 0) ||
-		    (sci_parms->ssp_max_occupancy_timeout == 0) ||
-		    (sci_parms->no_outbound_task_timeout == 0))
-			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+		struct isci_request *ireq;
+		dma_addr_t dma;
 
-		memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+		ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
+		if (!ireq)
+			return -ENOMEM;
 
-		return SCI_SUCCESS;
+		ireq->tc = &ihost->task_context_table[i];
+		ireq->owning_controller = ihost;
+		ireq->request_daddr = dma;
+		ireq->isci_host = ihost;
+		ihost->reqs[i] = ireq;
 	}
 
-	return SCI_FAILURE_INVALID_STATE;
+	return 0;
 }
 
 static int sci_controller_mem_init(struct isci_host *ihost)
 {
-	struct device *dev = &ihost->pdev->dev;
-	dma_addr_t dma;
-	size_t size;
-	int err;
+	int err = sci_controller_dma_alloc(ihost);
 
-	size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
-	ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
-	if (!ihost->completion_queue)
-		return -ENOMEM;
-
-	writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
-	writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
-
-	size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
-	ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
-							       GFP_KERNEL);
-	if (!ihost->remote_node_context_table)
-		return -ENOMEM;
-
-	writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
-	writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
-
-	size = ihost->task_context_entries * sizeof(struct scu_task_context),
-	ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
-	if (!ihost->task_context_table)
-		return -ENOMEM;
-
-	ihost->task_context_dma = dma;
-	writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
-	writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
-
-	err = sci_unsolicited_frame_control_construct(ihost);
 	if (err)
 		return err;
 
+	writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
+	writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
+
+	writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
+	writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
+
+	writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
+	writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
+
+	sci_unsolicited_frame_control_construct(ihost);
+
 	/*
 	 * Inform the silicon as to the location of the UF headers and
 	 * address table.
@@ -2491,22 +2322,22 @@
 	return 0;
 }
 
+/**
+ * isci_host_init - (re-)initialize hardware and internal (private) state
+ * @ihost: host to init
+ *
+ * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
+ * one-time initialization objects like locks and waitqueues, are
+ * not touched (they are initialized in isci_host_alloc)
+ */
 int isci_host_init(struct isci_host *ihost)
 {
-	int err = 0, i;
+	int i, err;
 	enum sci_status status;
-	struct sci_user_parameters sci_user_params;
-	struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
 
-	spin_lock_init(&ihost->state_lock);
-	spin_lock_init(&ihost->scic_lock);
-	init_waitqueue_head(&ihost->eventq);
-
-	isci_host_change_state(ihost, isci_starting);
-
-	status = sci_controller_construct(ihost, scu_base(ihost),
-					  smu_base(ihost));
-
+	spin_lock_irq(&ihost->scic_lock);
+	status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
+	spin_unlock_irq(&ihost->scic_lock);
 	if (status != SCI_SUCCESS) {
 		dev_err(&ihost->pdev->dev,
 			"%s: sci_controller_construct failed - status = %x\n",
@@ -2515,48 +2346,6 @@
 		return -ENODEV;
 	}
 
-	ihost->sas_ha.dev = &ihost->pdev->dev;
-	ihost->sas_ha.lldd_ha = ihost;
-
-	/*
-	 * grab initial values stored in the controller object for OEM and USER
-	 * parameters
-	 */
-	isci_user_parameters_get(&sci_user_params);
-	status = sci_user_parameters_set(ihost, &sci_user_params);
-	if (status != SCI_SUCCESS) {
-		dev_warn(&ihost->pdev->dev,
-			 "%s: sci_user_parameters_set failed\n",
-			 __func__);
-		return -ENODEV;
-	}
-
-	/* grab any OEM parameters specified in orom */
-	if (pci_info->orom) {
-		status = isci_parse_oem_parameters(&ihost->oem_parameters,
-						   pci_info->orom,
-						   ihost->id);
-		if (status != SCI_SUCCESS) {
-			dev_warn(&ihost->pdev->dev,
-				 "parsing firmware oem parameters failed\n");
-			return -EINVAL;
-		}
-	}
-
-	status = sci_oem_parameters_set(ihost);
-	if (status != SCI_SUCCESS) {
-		dev_warn(&ihost->pdev->dev,
-				"%s: sci_oem_parameters_set failed\n",
-				__func__);
-		return -ENODEV;
-	}
-
-	tasklet_init(&ihost->completion_tasklet,
-		     isci_host_completion_routine, (unsigned long)ihost);
-
-	INIT_LIST_HEAD(&ihost->requests_to_complete);
-	INIT_LIST_HEAD(&ihost->requests_to_errorback);
-
 	spin_lock_irq(&ihost->scic_lock);
 	status = sci_controller_initialize(ihost);
 	spin_unlock_irq(&ihost->scic_lock);
@@ -2572,43 +2361,12 @@
 	if (err)
 		return err;
 
-	for (i = 0; i < SCI_MAX_PORTS; i++)
-		isci_port_init(&ihost->ports[i], ihost, i);
-
-	for (i = 0; i < SCI_MAX_PHYS; i++)
-		isci_phy_init(&ihost->phys[i], ihost, i);
-
 	/* enable sgpio */
 	writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
 	for (i = 0; i < isci_gpio_count(ihost); i++)
 		writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
 	writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
 
-	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
-		struct isci_remote_device *idev = &ihost->devices[i];
-
-		INIT_LIST_HEAD(&idev->reqs_in_process);
-		INIT_LIST_HEAD(&idev->node);
-	}
-
-	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
-		struct isci_request *ireq;
-		dma_addr_t dma;
-
-		ireq = dmam_alloc_coherent(&ihost->pdev->dev,
-					   sizeof(struct isci_request), &dma,
-					   GFP_KERNEL);
-		if (!ireq)
-			return -ENOMEM;
-
-		ireq->tc = &ihost->task_context_table[i];
-		ireq->owning_controller = ihost;
-		spin_lock_init(&ireq->state_lock);
-		ireq->request_daddr = dma;
-		ireq->isci_host = ihost;
-		ihost->reqs[i] = ireq;
-	}
-
 	return 0;
 }
 
@@ -2654,7 +2412,7 @@
 	}
 }
 
-static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
 {
 	u32 index;
 
@@ -2680,7 +2438,7 @@
 	}
 
 	if (!sci_controller_has_remote_devices_stopping(ihost))
-		sci_change_state(&ihost->sm, SCIC_STOPPED);
+		isci_host_stop_complete(ihost);
 }
 
 void sci_controller_post_request(struct isci_host *ihost, u32 request)
@@ -2842,7 +2600,8 @@
 	enum sci_status status;
 
 	if (ihost->sm.current_state_id != SCIC_READY) {
-		dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -2866,22 +2625,26 @@
 	enum sci_status status;
 
 	if (ihost->sm.current_state_id != SCIC_READY) {
-		dev_warn(&ihost->pdev->dev,
-			 "invalid state to terminate request\n");
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
 		return SCI_FAILURE_INVALID_STATE;
 	}
-
 	status = sci_io_request_terminate(ireq);
-	if (status != SCI_SUCCESS)
-		return status;
 
-	/*
-	 * Utilize the original post context command and or in the POST_TC_ABORT
-	 * request sub-type.
-	 */
-	sci_controller_post_request(ihost,
-				    ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
-	return SCI_SUCCESS;
+	dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
+		__func__, status, ireq, ireq->flags);
+
+	if ((status == SCI_SUCCESS) &&
+	    !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
+	    !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
+		/* Utilize the original post context command and or in the
+		 * POST_TC_ABORT request sub-type.
+		 */
+		sci_controller_post_request(
+			ihost, ireq->post_context |
+				SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+	}
+	return status;
 }
 
 /**
@@ -2915,7 +2678,8 @@
 		clear_bit(IREQ_ACTIVE, &ireq->flags);
 		return SCI_SUCCESS;
 	default:
-		dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
@@ -2926,7 +2690,8 @@
 	struct isci_host *ihost = ireq->owning_controller;
 
 	if (ihost->sm.current_state_id != SCIC_READY) {
-		dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+		dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+			 __func__, ihost->sm.current_state_id);
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index adbad69..9ab58e0 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -55,6 +55,7 @@
 #ifndef _SCI_HOST_H_
 #define _SCI_HOST_H_
 
+#include <scsi/sas_ata.h>
 #include "remote_device.h"
 #include "phy.h"
 #include "isci.h"
@@ -108,6 +109,8 @@
 typedef void (*port_config_fn)(struct isci_host *,
 			       struct sci_port_configuration_agent *,
 			       struct isci_port *, struct isci_phy *);
+bool is_port_config_apc(struct isci_host *ihost);
+bool is_controller_start_complete(struct isci_host *ihost);
 
 struct sci_port_configuration_agent {
 	u16 phy_configured_mask;
@@ -157,13 +160,17 @@
 	struct sci_power_control power_control;
 	u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
 	struct scu_task_context *task_context_table;
-	dma_addr_t task_context_dma;
+	dma_addr_t tc_dma;
 	union scu_remote_node_context *remote_node_context_table;
+	dma_addr_t rnc_dma;
 	u32 *completion_queue;
+	dma_addr_t cq_dma;
 	u32 completion_queue_get;
 	u32 logical_port_entries;
 	u32 remote_node_entries;
 	u32 task_context_entries;
+	void *ufi_buf;
+	dma_addr_t ufi_dma;
 	struct sci_unsolicited_frame_control uf_control;
 
 	/* phy startup */
@@ -190,17 +197,13 @@
 	struct asd_sas_port sas_ports[SCI_MAX_PORTS];
 	struct sas_ha_struct sas_ha;
 
-	spinlock_t state_lock;
 	struct pci_dev *pdev;
-	enum isci_status status;
 	#define IHOST_START_PENDING 0
 	#define IHOST_STOP_PENDING 1
+	#define IHOST_IRQ_ENABLED 2
 	unsigned long flags;
 	wait_queue_head_t eventq;
-	struct Scsi_Host *shost;
 	struct tasklet_struct completion_tasklet;
-	struct list_head requests_to_complete;
-	struct list_head requests_to_errorback;
 	spinlock_t scic_lock;
 	struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
 	struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
@@ -274,13 +277,6 @@
 	SCIC_STOPPING,
 
 	/**
-	 * This state indicates that the controller has successfully been stopped.
-	 * In this state no new IO operations are permitted.
-	 * This state is entered from the STOPPING state.
-	 */
-	SCIC_STOPPED,
-
-	/**
 	 * This state indicates that the controller could not successfully be
 	 * initialized.  In this state no new IO operations are permitted.
 	 * This state is entered from the INITIALIZING state.
@@ -309,32 +305,16 @@
 	return pci_get_drvdata(pdev);
 }
 
+static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
+{
+	return ihost->sas_ha.core.shost;
+}
+
 #define for_each_isci_host(id, ihost, pdev) \
 	for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
 	     id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
 	     ihost = to_pci_info(pdev)->hosts[++id])
 
-static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
-{
-	return isci_host->status;
-}
-
-static inline void isci_host_change_state(struct isci_host *isci_host,
-					  enum isci_status status)
-{
-	unsigned long flags;
-
-	dev_dbg(&isci_host->pdev->dev,
-		"%s: isci_host = %p, state = 0x%x",
-		__func__,
-		isci_host,
-		status);
-	spin_lock_irqsave(&isci_host->state_lock, flags);
-	isci_host->status = status;
-	spin_unlock_irqrestore(&isci_host->state_lock, flags);
-
-}
-
 static inline void wait_for_start(struct isci_host *ihost)
 {
 	wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
@@ -360,6 +340,11 @@
 	return dev->port->ha->lldd_ha;
 }
 
+static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev)
+{
+	return dev_to_ihost(idev->domain_dev);
+}
+
 /* we always use protocol engine group zero */
 #define ISCI_PEG 0
 
@@ -378,8 +363,7 @@
 {
 	struct domain_device *dev = idev->domain_dev;
 
-	if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
-	    !idev->is_direct_attached)
+	if (dev_is_sata(dev) && dev->parent)
 		return SCU_STP_REMOTE_NODE_COUNT;
 	return SCU_SSP_REMOTE_NODE_COUNT;
 }
@@ -475,36 +459,17 @@
 	struct isci_remote_device *idev,
 	u16 node_id);
 
-struct isci_request *sci_request_by_tag(struct isci_host *ihost,
-					     u16 io_tag);
-
-void sci_controller_power_control_queue_insert(
-	struct isci_host *ihost,
-	struct isci_phy *iphy);
-
-void sci_controller_power_control_queue_remove(
-	struct isci_host *ihost,
-	struct isci_phy *iphy);
-
-void sci_controller_link_up(
-	struct isci_host *ihost,
-	struct isci_port *iport,
-	struct isci_phy *iphy);
-
-void sci_controller_link_down(
-	struct isci_host *ihost,
-	struct isci_port *iport,
-	struct isci_phy *iphy);
-
-void sci_controller_remote_device_stopped(
-	struct isci_host *ihost,
-	struct isci_remote_device *idev);
-
-void sci_controller_copy_task_context(
-	struct isci_host *ihost,
-	struct isci_request *ireq);
-
-void sci_controller_register_setup(struct isci_host *ihost);
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+					       struct isci_phy *iphy);
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+					       struct isci_phy *iphy);
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+			    struct isci_phy *iphy);
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+			      struct isci_phy *iphy);
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+					  struct isci_remote_device *idev);
 
 enum sci_status sci_controller_continue_io(struct isci_request *ireq);
 int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
@@ -512,29 +477,14 @@
 u16 isci_alloc_tag(struct isci_host *ihost);
 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
 void isci_tci_free(struct isci_host *ihost, u16 tci);
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
 
 int isci_host_init(struct isci_host *);
-
-void isci_host_init_controller_names(
-	struct isci_host *isci_host,
-	unsigned int controller_idx);
-
-void isci_host_deinit(
-	struct isci_host *);
-
-void isci_host_port_link_up(
-	struct isci_host *,
-	struct isci_port *,
-	struct isci_phy *);
-int isci_host_dev_found(struct domain_device *);
-
-void isci_host_remote_device_start_complete(
-	struct isci_host *,
-	struct isci_remote_device *,
-	enum sci_status);
-
-void sci_controller_disable_interrupts(
-	struct isci_host *ihost);
+void isci_host_completion_routine(unsigned long data);
+void isci_host_deinit(struct isci_host *);
+void sci_controller_disable_interrupts(struct isci_host *ihost);
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost);
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status);
 
 enum sci_status sci_controller_start_io(
 	struct isci_host *ihost,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 5137db5..47e28b5 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -271,13 +271,12 @@
 	if (!isci_host)
 		return;
 
-	shost = isci_host->shost;
-
 	sas_unregister_ha(&isci_host->sas_ha);
 
-	sas_remove_host(isci_host->shost);
-	scsi_remove_host(isci_host->shost);
-	scsi_host_put(isci_host->shost);
+	shost = to_shost(isci_host);
+	sas_remove_host(shost);
+	scsi_remove_host(shost);
+	scsi_host_put(shost);
 }
 
 static int __devinit isci_pci_init(struct pci_dev *pdev)
@@ -397,38 +396,199 @@
 	return err;
 }
 
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+	int i;
+
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		struct sci_phy_user_params *u_phy = &u->phys[i];
+
+		u_phy->max_speed_generation = phy_gen;
+
+		/* we are not exporting these for now */
+		u_phy->align_insertion_frequency = 0x7f;
+		u_phy->in_connection_align_insertion_frequency = 0xff;
+		u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+	}
+
+	u->stp_inactivity_timeout = stp_inactive_to;
+	u->ssp_inactivity_timeout = ssp_inactive_to;
+	u->stp_max_occupancy_timeout = stp_max_occ_to;
+	u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+	u->no_outbound_task_timeout = no_outbound_task_to;
+	u->max_concurr_spinup = max_concurr_spinup;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+					       struct sci_user_parameters *sci_parms)
+{
+	u16 index;
+
+	/*
+	 * Validate the user parameters.  If they are not legal, then
+	 * return a failure.
+	 */
+	for (index = 0; index < SCI_MAX_PHYS; index++) {
+		struct sci_phy_user_params *u;
+
+		u = &sci_parms->phys[index];
+
+		if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
+		      (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
+			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+		if (u->in_connection_align_insertion_frequency < 3)
+			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+		if ((u->in_connection_align_insertion_frequency < 3) ||
+		    (u->align_insertion_frequency == 0) ||
+		    (u->notify_enable_spin_up_insertion_frequency == 0))
+			return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+	}
+
+	if ((sci_parms->stp_inactivity_timeout == 0) ||
+	    (sci_parms->ssp_inactivity_timeout == 0) ||
+	    (sci_parms->stp_max_occupancy_timeout == 0) ||
+	    (sci_parms->ssp_max_occupancy_timeout == 0) ||
+	    (sci_parms->no_outbound_task_timeout == 0))
+		return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+	memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+	return SCI_SUCCESS;
+}
+
+static void sci_oem_defaults(struct isci_host *ihost)
+{
+	/* these defaults are overridden by the platform / firmware */
+	struct sci_user_parameters *user = &ihost->user_parameters;
+	struct sci_oem_params *oem = &ihost->oem_parameters;
+	int i;
+
+	/* Default to APC mode. */
+	oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+	/* Default to APC mode. */
+	oem->controller.max_concurr_spin_up = 1;
+
+	/* Default to no SSC operation. */
+	oem->controller.do_enable_ssc = false;
+
+	/* Default to short cables on all phys. */
+	oem->controller.cable_selection_mask = 0;
+
+	/* Initialize all of the port parameter information to narrow ports. */
+	for (i = 0; i < SCI_MAX_PORTS; i++)
+		oem->ports[i].phy_mask = 0;
+
+	/* Initialize all of the phy parameter information. */
+	for (i = 0; i < SCI_MAX_PHYS; i++) {
+		/* Default to 3G (i.e. Gen 2). */
+		user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
+
+		/* the frequencies cannot be 0 */
+		user->phys[i].align_insertion_frequency = 0x7f;
+		user->phys[i].in_connection_align_insertion_frequency = 0xff;
+		user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
+
+		/* Previous Vitesse based expanders had a arbitration issue that
+		 * is worked around by having the upper 32-bits of SAS address
+		 * with a value greater then the Vitesse company identifier.
+		 * Hence, usage of 0x5FCFFFFF.
+		 */
+		oem->phys[i].sas_address.low = 0x1 + ihost->id;
+		oem->phys[i].sas_address.high = 0x5FCFFFFF;
+	}
+
+	user->stp_inactivity_timeout = 5;
+	user->ssp_inactivity_timeout = 5;
+	user->stp_max_occupancy_timeout = 5;
+	user->ssp_max_occupancy_timeout = 20;
+	user->no_outbound_task_timeout = 2;
+}
+
 static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
 {
-	struct isci_host *isci_host;
+	struct isci_orom *orom = to_pci_info(pdev)->orom;
+	struct sci_user_parameters sci_user_params;
+	u8 oem_version = ISCI_ROM_VER_1_0;
+	struct isci_host *ihost;
 	struct Scsi_Host *shost;
-	int err;
+	int err, i;
 
-	isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
-	if (!isci_host)
+	ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
+	if (!ihost)
 		return NULL;
 
-	isci_host->pdev = pdev;
-	isci_host->id = id;
+	ihost->pdev = pdev;
+	ihost->id = id;
+	spin_lock_init(&ihost->scic_lock);
+	init_waitqueue_head(&ihost->eventq);
+	ihost->sas_ha.dev = &ihost->pdev->dev;
+	ihost->sas_ha.lldd_ha = ihost;
+	tasklet_init(&ihost->completion_tasklet,
+		     isci_host_completion_routine, (unsigned long)ihost);
+
+	/* validate module parameters */
+	/* TODO: kill struct sci_user_parameters and reference directly */
+	sci_oem_defaults(ihost);
+	isci_user_parameters_get(&sci_user_params);
+	if (sci_user_parameters_set(ihost, &sci_user_params)) {
+		dev_warn(&pdev->dev,
+			 "%s: sci_user_parameters_set failed\n", __func__);
+		return NULL;
+	}
+
+	/* sanity check platform (or 'firmware') oem parameters */
+	if (orom) {
+		if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
+			dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
+			return NULL;
+		}
+		ihost->oem_parameters = orom->ctrl[id];
+		oem_version = orom->hdr.version;
+	}
+
+	/* validate oem parameters (platform, firmware, or built-in defaults) */
+	if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
+		dev_warn(&pdev->dev, "oem parameter validation failed\n");
+		return NULL;
+	}
+
+	for (i = 0; i < SCI_MAX_PORTS; i++) {
+		struct isci_port *iport = &ihost->ports[i];
+
+		INIT_LIST_HEAD(&iport->remote_dev_list);
+		iport->isci_host = ihost;
+	}
+
+	for (i = 0; i < SCI_MAX_PHYS; i++)
+		isci_phy_init(&ihost->phys[i], ihost, i);
+
+	for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+		struct isci_remote_device *idev = &ihost->devices[i];
+
+		INIT_LIST_HEAD(&idev->node);
+	}
 
 	shost = scsi_host_alloc(&isci_sht, sizeof(void *));
 	if (!shost)
 		return NULL;
-	isci_host->shost = shost;
 
 	dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
 		 "{%s, %s, %s, %s}\n",
-		 (is_cable_select_overridden() ? "* " : ""), isci_host->id,
-		 lookup_cable_names(decode_cable_selection(isci_host, 3)),
-		 lookup_cable_names(decode_cable_selection(isci_host, 2)),
-		 lookup_cable_names(decode_cable_selection(isci_host, 1)),
-		 lookup_cable_names(decode_cable_selection(isci_host, 0)));
+		 (is_cable_select_overridden() ? "* " : ""), ihost->id,
+		 lookup_cable_names(decode_cable_selection(ihost, 3)),
+		 lookup_cable_names(decode_cable_selection(ihost, 2)),
+		 lookup_cable_names(decode_cable_selection(ihost, 1)),
+		 lookup_cable_names(decode_cable_selection(ihost, 0)));
 
-	err = isci_host_init(isci_host);
+	err = isci_host_init(ihost);
 	if (err)
 		goto err_shost;
 
-	SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
-	isci_host->sas_ha.core.shost = shost;
+	SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
+	ihost->sas_ha.core.shost = shost;
 	shost->transportt = isci_transport_template;
 
 	shost->max_id = ~0;
@@ -439,11 +599,11 @@
 	if (err)
 		goto err_shost;
 
-	err = isci_register_sas_ha(isci_host);
+	err = isci_register_sas_ha(ihost);
 	if (err)
 		goto err_shost_remove;
 
-	return isci_host;
+	return ihost;
 
  err_shost_remove:
 	scsi_remove_host(shost);
@@ -476,7 +636,7 @@
 	if (!orom)
 		orom = isci_request_oprom(pdev);
 
-	for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+	for (i = 0; orom && i < num_controllers(pdev); i++) {
 		if (sci_oem_parameters_validate(&orom->ctrl[i],
 						orom->hdr.version)) {
 			dev_warn(&pdev->dev,
@@ -525,11 +685,11 @@
 		pci_info->hosts[i] = h;
 
 		/* turn on DIF support */
-		scsi_host_set_prot(h->shost,
+		scsi_host_set_prot(to_shost(h),
 				   SHOST_DIF_TYPE1_PROTECTION |
 				   SHOST_DIF_TYPE2_PROTECTION |
 				   SHOST_DIF_TYPE3_PROTECTION);
-		scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC);
+		scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
 	}
 
 	err = isci_setup_interrupts(pdev);
@@ -537,7 +697,7 @@
 		goto err_host_alloc;
 
 	for_each_isci_host(i, isci_host, pdev)
-		scsi_scan_host(isci_host->shost);
+		scsi_scan_host(to_shost(isci_host));
 
 	return 0;
 
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fab3586..18f43d4 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -580,7 +580,7 @@
 
 	sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
 
-	iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+	iphy->protocol = SAS_PROTOCOL_SSP;
 }
 
 static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
@@ -591,7 +591,7 @@
 	 */
 	sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
 
-	iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+	iphy->protocol = SAS_PROTOCOL_SATA;
 }
 
 /**
@@ -668,6 +668,19 @@
 		phy_to_host(iphy)->id, iphy->phy_index, \
 		phy_state_name(state), phy_event_name(code), code)
 
+
+void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
+{
+	u32 val;
+
+	/* Extend timeout */
+	val = readl(&iphy->link_layer_registers->transmit_comsas_signal);
+	val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK);
+	val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout);
+
+	writel(val, &iphy->link_layer_registers->transmit_comsas_signal);
+}
+
 enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
 {
 	enum sci_phy_states state = iphy->sm.current_state_id;
@@ -683,6 +696,13 @@
 			sci_phy_start_sata_link_training(iphy);
 			iphy->is_in_link_training = true;
 			break;
+		case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+		       /* Extend timeout value */
+		       scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+		       /* Start the oob/sn state machine over again */
+		       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+		       break;
 		default:
 			phy_event_dbg(iphy, state, event_code);
 			return SCI_FAILURE;
@@ -717,9 +737,19 @@
 			sci_phy_start_sata_link_training(iphy);
 			break;
 		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
 			/* Link failure change state back to the starting state */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
+		case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+		       /* Extend the timeout value */
+		       scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+		       /* Start the oob/sn state machine over again */
+		       sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+		       break;
 		default:
 			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE;
@@ -740,7 +770,14 @@
 			sci_phy_start_sata_link_training(iphy);
 			break;
 		case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+			/* Extend the timeout value */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+			/* Start the oob/sn state machine over again */
+			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+			break;
 		case SCU_EVENT_LINK_FAILURE:
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
 		case SCU_EVENT_HARD_RESET_RECEIVED:
 			/* Start the oob/sn state machine over again */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
@@ -753,6 +790,9 @@
 	case SCI_PHY_SUB_AWAIT_SAS_POWER:
 		switch (scu_get_event_code(event_code)) {
 		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
 			/* Link failure change state back to the starting state */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
@@ -764,6 +804,9 @@
 	case SCI_PHY_SUB_AWAIT_SATA_POWER:
 		switch (scu_get_event_code(event_code)) {
 		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
 			/* Link failure change state back to the starting state */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
@@ -788,6 +831,9 @@
 	case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
 		switch (scu_get_event_code(event_code)) {
 		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
 			/* Link failure change state back to the starting state */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
@@ -797,7 +843,7 @@
 			 */
 			break;
 		case SCU_EVENT_SATA_PHY_DETECTED:
-			iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+			iphy->protocol = SAS_PROTOCOL_SATA;
 
 			/* We have received the SATA PHY notification change state */
 			sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
@@ -836,6 +882,9 @@
 						       SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
 			break;
 		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
 			/* Link failure change state back to the starting state */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
@@ -859,6 +908,9 @@
 			break;
 
 		case SCU_EVENT_LINK_FAILURE:
+			/* Change the timeout value to default */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
 			/* Link failure change state back to the starting state */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
@@ -871,16 +923,26 @@
 	case SCI_PHY_READY:
 		switch (scu_get_event_code(event_code)) {
 		case SCU_EVENT_LINK_FAILURE:
+			/* Set default timeout */
+			scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
 			/* Link failure change state back to the starting state */
 			sci_change_state(&iphy->sm, SCI_PHY_STARTING);
 			break;
 		case SCU_EVENT_BROADCAST_CHANGE:
+		case SCU_EVENT_BROADCAST_SES:
+		case SCU_EVENT_BROADCAST_RESERVED0:
+		case SCU_EVENT_BROADCAST_RESERVED1:
+		case SCU_EVENT_BROADCAST_EXPANDER:
+		case SCU_EVENT_BROADCAST_AEN:
 			/* Broadcast change received. Notify the port. */
 			if (phy_get_non_dummy_port(iphy) != NULL)
 				sci_port_broadcast_change_received(iphy->owning_port, iphy);
 			else
 				iphy->bcn_received_while_port_unassigned = true;
 			break;
+		case SCU_EVENT_BROADCAST_RESERVED3:
+		case SCU_EVENT_BROADCAST_RESERVED4:
 		default:
 			phy_event_warn(iphy, state, event_code);
 			return SCI_FAILURE_INVALID_STATE;
@@ -1215,7 +1277,7 @@
 	scu_link_layer_start_oob(iphy);
 
 	/* We don't know what kind of phy we are going to be just yet */
-	iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+	iphy->protocol = SAS_PROTOCOL_NONE;
 	iphy->bcn_received_while_port_unassigned = false;
 
 	if (iphy->sm.previous_state_id == SCI_PHY_READY)
@@ -1250,7 +1312,7 @@
 	 */
 	sci_port_deactivate_phy(iphy->owning_port, iphy, false);
 
-	if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+	if (iphy->protocol == SAS_PROTOCOL_SSP) {
 		scu_link_layer_tx_hard_reset(iphy);
 	} else {
 		/* The SCU does not need to have a discrete reset state so
@@ -1316,7 +1378,7 @@
 	iphy->owning_port = iport;
 	iphy->phy_index = phy_index;
 	iphy->bcn_received_while_port_unassigned = false;
-	iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+	iphy->protocol = SAS_PROTOCOL_NONE;
 	iphy->link_layer_registers = NULL;
 	iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
 
@@ -1380,12 +1442,14 @@
 	switch (func) {
 	case PHY_FUNC_DISABLE:
 		spin_lock_irqsave(&ihost->scic_lock, flags);
+		scu_link_layer_start_oob(iphy);
 		sci_phy_stop(iphy);
 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 		break;
 
 	case PHY_FUNC_LINK_RESET:
 		spin_lock_irqsave(&ihost->scic_lock, flags);
+		scu_link_layer_start_oob(iphy);
 		sci_phy_stop(iphy);
 		sci_phy_start(iphy);
 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 0e45833..45fecfa 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -76,13 +76,6 @@
  */
 #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT  250
 
-enum sci_phy_protocol {
-	SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
-	SCIC_SDS_PHY_PROTOCOL_SAS,
-	SCIC_SDS_PHY_PROTOCOL_SATA,
-	SCIC_SDS_MAX_PHY_PROTOCOLS
-};
-
 /**
  * isci_phy - hba local phy infrastructure
  * @sm:
@@ -95,7 +88,7 @@
 	struct sci_base_state_machine sm;
 	struct isci_port *owning_port;
 	enum sas_linkrate max_negotiated_speed;
-	enum sci_phy_protocol protocol;
+	enum sas_protocol protocol;
 	u8 phy_index;
 	bool bcn_received_while_port_unassigned;
 	bool is_in_link_training;
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 5fada73..2fb85bf 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -184,7 +184,7 @@
 
 	sci_port_get_properties(iport, &properties);
 
-	if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
+	if (iphy->protocol == SAS_PROTOCOL_SATA) {
 		u64 attached_sas_address;
 
 		iphy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -204,7 +204,7 @@
 
 		memcpy(&iphy->sas_phy.attached_sas_addr,
 		       &attached_sas_address, sizeof(attached_sas_address));
-	} else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+	} else if (iphy->protocol == SAS_PROTOCOL_SSP) {
 		iphy->sas_phy.oob_mode = SAS_OOB_MODE;
 		iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
 
@@ -251,10 +251,10 @@
 		if (isci_phy->sas_phy.port &&
 		    isci_phy->sas_phy.port->num_phys == 1) {
 			/* change the state for all devices on this port.  The
-			 * next task sent to this device will be returned as
-			 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
-			 * remove the target
-			 */
+			* next task sent to this device will be returned as
+			* SAS_TASK_UNDELIVERED, and the scsi mid layer will
+			* remove the target
+			*/
 			list_for_each_entry(isci_device,
 					    &isci_port->remote_dev_list,
 					    node) {
@@ -517,7 +517,7 @@
 	 */
 	iphy = sci_port_get_a_connected_phy(iport);
 	if (iphy) {
-		if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
+		if (iphy->protocol != SAS_PROTOCOL_SATA) {
 			sci_phy_get_attached_sas_address(iphy, sas);
 		} else {
 			sci_phy_get_sas_address(iphy, sas);
@@ -624,7 +624,7 @@
 {
 	struct isci_host *ihost = iport->owning_controller;
 
-	if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
+	if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
 		sci_phy_resume(iphy);
 
 	iport->active_phy_mask |= 1 << iphy->phy_index;
@@ -751,12 +751,10 @@
  * wide ports and direct attached phys.  Since there are no wide ported SATA
  * devices this could become an invalid port configuration.
  */
-bool sci_port_link_detected(
-	struct isci_port *iport,
-	struct isci_phy *iphy)
+bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
 {
 	if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
-	    (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+	    (iphy->protocol == SAS_PROTOCOL_SATA)) {
 		if (sci_port_is_wide(iport)) {
 			sci_port_invalid_link_up(iport, iphy);
 			return false;
@@ -1201,6 +1199,8 @@
 	enum sci_status status;
 	enum sci_port_states state;
 
+	sci_port_bcn_enable(iport);
+
 	state = iport->sm.current_state_id;
 	switch (state) {
 	case SCI_PORT_STOPPED: {
@@ -1548,6 +1548,29 @@
 	isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
 }
 
+void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
+{
+	int phy_index;
+	u32 phy_mask = iport->active_phy_mask;
+
+	if (timeout)
+		++iport->hang_detect_users;
+	else if (iport->hang_detect_users > 1)
+		--iport->hang_detect_users;
+	else
+		iport->hang_detect_users = 0;
+
+	if (timeout || (iport->hang_detect_users == 0)) {
+		for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+			if ((phy_mask >> phy_index) & 1) {
+				writel(timeout,
+				       &iport->phy_table[phy_index]
+					  ->link_layer_registers
+					  ->link_layer_hang_detection_timeout);
+			}
+		}
+	}
+}
 /* --------------------------------------------------------------------------- */
 
 static const struct sci_base_state sci_port_state_table[] = {
@@ -1596,6 +1619,7 @@
 
 	iport->started_request_count = 0;
 	iport->assigned_device_count = 0;
+	iport->hang_detect_users = 0;
 
 	iport->reserved_rni = SCU_DUMMY_INDEX;
 	iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
@@ -1608,13 +1632,6 @@
 		iport->phy_table[index] = NULL;
 }
 
-void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
-{
-	INIT_LIST_HEAD(&iport->remote_dev_list);
-	INIT_LIST_HEAD(&iport->domain_dev_list);
-	iport->isci_host = ihost;
-}
-
 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
 {
 	struct isci_host *ihost = iport->owning_controller;
@@ -1671,17 +1688,6 @@
 			__func__, iport, status);
 
 	}
-
-	/* If the hard reset for the port has failed, consider this
-	 * the same as link failures on all phys in the port.
-	 */
-	if (ret != TMF_RESP_FUNC_COMPLETE) {
-
-		dev_err(&ihost->pdev->dev,
-			"%s: iport = %p; hard reset failed "
-			"(0x%x) - driving explicit link fail for all phys\n",
-			__func__, iport, iport->hard_reset_status);
-	}
 	return ret;
 }
 
@@ -1740,7 +1746,7 @@
 	struct isci_host *ihost = phy->ha->lldd_ha;
 	struct isci_phy *iphy = to_iphy(phy);
 	struct asd_sas_port *port = phy->port;
-	struct isci_port *iport;
+	struct isci_port *iport = NULL;
 	unsigned long flags;
 	int i;
 
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 6b56240..861e8f7 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -97,7 +97,6 @@
 struct isci_port {
 	struct isci_host *isci_host;
 	struct list_head remote_dev_list;
-	struct list_head domain_dev_list;
 	#define IPORT_RESET_PENDING 0
 	unsigned long state;
 	enum sci_status hard_reset_status;
@@ -112,6 +111,7 @@
 	u16 reserved_tag;
 	u32 started_request_count;
 	u32 assigned_device_count;
+	u32 hang_detect_users;
 	u32 not_ready_reason;
 	struct isci_phy *phy_table[SCI_MAX_PHYS];
 	struct isci_host *owning_controller;
@@ -270,14 +270,13 @@
 	struct isci_port *iport,
 	struct sci_sas_address *sas_address);
 
+void sci_port_set_hang_detection_timeout(
+	struct isci_port *isci_port,
+	u32 timeout);
+
 void isci_port_formed(struct asd_sas_phy *);
 void isci_port_deformed(struct asd_sas_phy *);
 
-void isci_port_init(
-	struct isci_port *port,
-	struct isci_host *host,
-	int index);
-
 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
 				 struct isci_phy *iphy);
 int isci_ata_check_ready(struct domain_device *dev);
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 6d1e954..cd962da 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
 
 #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT    (10)
 #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT    (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (250)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (1000)
 
 enum SCIC_SDS_APC_ACTIVITY {
 	SCIC_SDS_APC_SKIP_PHY,
@@ -472,13 +472,9 @@
  * down event or a link up event where we can not yet tell to which a phy
  * belongs.
  */
-static void sci_apc_agent_start_timer(
-	struct sci_port_configuration_agent *port_agent,
-	u32 timeout)
+static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent,
+				      u32 timeout)
 {
-	if (port_agent->timer_pending)
-		sci_del_timer(&port_agent->timer);
-
 	port_agent->timer_pending = true;
 	sci_mod_timer(&port_agent->timer, timeout);
 }
@@ -697,6 +693,9 @@
 						   &ihost->phys[index], false);
 	}
 
+	if (is_controller_start_complete(ihost))
+		sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+
 done:
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 }
@@ -732,6 +731,11 @@
 	}
 }
 
+bool is_port_config_apc(struct isci_host *ihost)
+{
+	return ihost->port_agent.link_up_handler == sci_apc_agent_link_up;
+}
+
 enum sci_status sci_port_configuration_agent_initialize(
 	struct isci_host *ihost,
 	struct sci_port_configuration_agent *port_agent)
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 9b8117b..4d95654 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -112,18 +112,6 @@
 	return rom;
 }
 
-enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
-					  struct isci_orom *orom, int scu_index)
-{
-	/* check for valid inputs */
-	if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
-	    scu_index > orom->hdr.num_elements || !oem)
-		return -EINVAL;
-
-	*oem = orom->ctrl[scu_index];
-	return 0;
-}
-
 struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
 {
 	struct isci_orom *orom = NULL, *data;
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index bb0e9d4..e08b578 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -156,8 +156,6 @@
 
 struct isci_orom;
 struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
-enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
-					  struct isci_orom *orom, int scu_index);
 struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
 struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
 
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 7eb0ccd..97f3ceb 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1239,6 +1239,14 @@
 #define SCU_SAS_LLCTL_GEN_BIT(name) \
 	SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
 
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT                     (0xF0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED                    (0x1FF)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT                       (0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK                        (0x3FF)
+
+#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \
+	SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value)
+
 
 /* #define SCU_FRXHECR_DCNT_OFFSET      0x00B0 */
 #define SCU_PSZGCR_OFFSET           0x00E4
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 8f501b0..c3aa6c5 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -72,46 +72,11 @@
 }
 #undef C
 
-/**
- * isci_remote_device_not_ready() - This function is called by the ihost when
- *    the remote device is not ready. We mark the isci device as ready (not
- *    "ready_for_io") and signal the waiting proccess.
- * @isci_host: This parameter specifies the isci host object.
- * @isci_device: This parameter specifies the remote device
- *
- * sci_lock is held on entrance to this function.
- */
-static void isci_remote_device_not_ready(struct isci_host *ihost,
-				  struct isci_remote_device *idev, u32 reason)
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+					  enum sci_remote_node_suspension_reasons reason)
 {
-	struct isci_request *ireq;
-
-	dev_dbg(&ihost->pdev->dev,
-		"%s: isci_device = %p\n", __func__, idev);
-
-	switch (reason) {
-	case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
-		set_bit(IDEV_GONE, &idev->flags);
-		break;
-	case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
-		set_bit(IDEV_IO_NCQERROR, &idev->flags);
-
-		/* Kill all outstanding requests for the device. */
-		list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
-
-			dev_dbg(&ihost->pdev->dev,
-				"%s: isci_device = %p request = %p\n",
-				__func__, idev, ireq);
-
-			sci_controller_terminate_request(ihost,
-							  idev,
-							  ireq);
-		}
-		/* Fall through into the default case... */
-	default:
-		clear_bit(IDEV_IO_READY, &idev->flags);
-		break;
-	}
+	return sci_remote_node_context_suspend(&idev->rnc, reason,
+					       SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
 }
 
 /**
@@ -133,6 +98,225 @@
 		wake_up(&ihost->eventq);
 }
 
+static enum sci_status sci_remote_device_terminate_req(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	int check_abort,
+	struct isci_request *ireq)
+{
+	if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+	    (ireq->target_device != idev) ||
+	    (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
+		return SCI_SUCCESS;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
+		__func__, idev, idev->flags, ireq, ireq->target_device);
+
+	set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+
+	return sci_controller_terminate_request(ihost, idev, ireq);
+}
+
+static enum sci_status sci_remote_device_terminate_reqs_checkabort(
+	struct isci_remote_device *idev,
+	int chk)
+{
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	enum sci_status status  = SCI_SUCCESS;
+	u32 i;
+
+	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+		struct isci_request *ireq = ihost->reqs[i];
+		enum sci_status s;
+
+		s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
+		if (s != SCI_SUCCESS)
+			status = s;
+	}
+	return status;
+}
+
+static bool isci_compare_suspendcount(
+	struct isci_remote_device *idev,
+	u32 localcount)
+{
+	smp_rmb();
+
+	/* Check for a change in the suspend count, or the RNC
+	 * being destroyed.
+	 */
+	return (localcount != idev->rnc.suspend_count)
+	    || sci_remote_node_context_is_being_destroyed(&idev->rnc);
+}
+
+static bool isci_check_reqterm(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq,
+	u32 localcount)
+{
+	unsigned long flags;
+	bool res;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	res = isci_compare_suspendcount(idev, localcount)
+		&& !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return res;
+}
+
+static bool isci_check_devempty(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	u32 localcount)
+{
+	unsigned long flags;
+	bool res;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	res = isci_compare_suspendcount(idev, localcount)
+		&& idev->started_request_count == 0;
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return res;
+}
+
+enum sci_status isci_remote_device_terminate_requests(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq)
+{
+	enum sci_status status = SCI_SUCCESS;
+	unsigned long flags;
+	u32 rnc_suspend_count;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
+	if (isci_get_device(idev) == NULL) {
+		dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
+			__func__, idev);
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		status = SCI_FAILURE;
+	} else {
+		/* If already suspended, don't wait for another suspension. */
+		smp_rmb();
+		rnc_suspend_count
+			= sci_remote_node_context_is_suspended(&idev->rnc)
+				? 0 : idev->rnc.suspend_count;
+
+		dev_dbg(&ihost->pdev->dev,
+			"%s: idev=%p, ireq=%p; started_request_count=%d, "
+				"rnc_suspend_count=%d, rnc.suspend_count=%d"
+				"about to wait\n",
+			__func__, idev, ireq, idev->started_request_count,
+			rnc_suspend_count, idev->rnc.suspend_count);
+
+		#define MAX_SUSPEND_MSECS 10000
+		if (ireq) {
+			/* Terminate a specific TC. */
+			set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+			sci_remote_device_terminate_req(ihost, idev, 0, ireq);
+			spin_unlock_irqrestore(&ihost->scic_lock, flags);
+			if (!wait_event_timeout(ihost->eventq,
+						isci_check_reqterm(ihost, idev, ireq,
+								   rnc_suspend_count),
+						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+				dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
+					 __func__, ihost->id);
+				dev_dbg(&ihost->pdev->dev,
+					 "%s: ******* Timeout waiting for "
+					 "suspend; idev=%p, current state %s; "
+					 "started_request_count=%d, flags=%lx\n\t"
+					 "rnc_suspend_count=%d, rnc.suspend_count=%d "
+					 "RNC: current state %s, current "
+					 "suspend_type %x dest state %d;\n"
+					 "ireq=%p, ireq->flags = %lx\n",
+					 __func__, idev,
+					 dev_state_name(idev->sm.current_state_id),
+					 idev->started_request_count, idev->flags,
+					 rnc_suspend_count, idev->rnc.suspend_count,
+					 rnc_state_name(idev->rnc.sm.current_state_id),
+					 idev->rnc.suspend_type,
+					 idev->rnc.destination_state,
+					 ireq, ireq->flags);
+			}
+			spin_lock_irqsave(&ihost->scic_lock, flags);
+			clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+			if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+				isci_free_tag(ihost, ireq->io_tag);
+			spin_unlock_irqrestore(&ihost->scic_lock, flags);
+		} else {
+			/* Terminate all TCs. */
+			sci_remote_device_terminate_requests(idev);
+			spin_unlock_irqrestore(&ihost->scic_lock, flags);
+			if (!wait_event_timeout(ihost->eventq,
+						isci_check_devempty(ihost, idev,
+								    rnc_suspend_count),
+						msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+				dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
+					 __func__, ihost->id);
+				dev_dbg(&ihost->pdev->dev,
+					"%s: ******* Timeout waiting for "
+					"suspend; idev=%p, current state %s; "
+					"started_request_count=%d, flags=%lx\n\t"
+					"rnc_suspend_count=%d, "
+					"RNC: current state %s, "
+					"rnc.suspend_count=%d, current "
+					"suspend_type %x dest state %d\n",
+					__func__, idev,
+					dev_state_name(idev->sm.current_state_id),
+					idev->started_request_count, idev->flags,
+					rnc_suspend_count,
+					rnc_state_name(idev->rnc.sm.current_state_id),
+					idev->rnc.suspend_count,
+					idev->rnc.suspend_type,
+					idev->rnc.destination_state);
+			}
+		}
+		dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
+			__func__, idev);
+		isci_put_device(idev);
+	}
+	return status;
+}
+
+/**
+* isci_remote_device_not_ready() - This function is called by the ihost when
+*    the remote device is not ready. We mark the isci device as ready (not
+*    "ready_for_io") and signal the waiting proccess.
+* @isci_host: This parameter specifies the isci host object.
+* @isci_device: This parameter specifies the remote device
+*
+* sci_lock is held on entrance to this function.
+*/
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+					 struct isci_remote_device *idev,
+					 u32 reason)
+{
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
+
+	switch (reason) {
+	case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+		set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+		/* Suspend the remote device so the I/O can be terminated. */
+		sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
+
+		/* Kill all outstanding requests for the device. */
+		sci_remote_device_terminate_requests(idev);
+
+		/* Fall through into the default case... */
+	default:
+		clear_bit(IDEV_IO_READY, &idev->flags);
+		break;
+	}
+}
+
 /* called once the remote node context is ready to be freed.
  * The remote device can now report that its stop operation is complete. none
  */
@@ -144,26 +328,10 @@
 	sci_change_state(&idev->sm, SCI_DEV_STOPPED);
 }
 
-static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
+enum sci_status sci_remote_device_terminate_requests(
+	struct isci_remote_device *idev)
 {
-	struct isci_host *ihost = idev->owning_port->owning_controller;
-	enum sci_status status  = SCI_SUCCESS;
-	u32 i;
-
-	for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
-		struct isci_request *ireq = ihost->reqs[i];
-		enum sci_status s;
-
-		if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
-		    ireq->target_device != idev)
-			continue;
-
-		s = sci_controller_terminate_request(ihost, idev, ireq);
-		if (s != SCI_SUCCESS)
-			status = s;
-	}
-
-	return status;
+	return sci_remote_device_terminate_reqs_checkabort(idev, 0);
 }
 
 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
@@ -201,13 +369,16 @@
 	case SCI_SMP_DEV_IDLE:
 	case SCI_SMP_DEV_CMD:
 		sci_change_state(sm, SCI_DEV_STOPPING);
-		if (idev->started_request_count == 0) {
+		if (idev->started_request_count == 0)
 			sci_remote_node_context_destruct(&idev->rnc,
-							      rnc_destruct_done, idev);
-			return SCI_SUCCESS;
-		} else
-			return sci_remote_device_terminate_requests(idev);
-		break;
+							 rnc_destruct_done,
+							 idev);
+		else {
+			sci_remote_device_suspend(
+				idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+			sci_remote_device_terminate_requests(idev);
+		}
+		return SCI_SUCCESS;
 	case SCI_DEV_STOPPING:
 		/* All requests should have been terminated, but if there is an
 		 * attempt to stop a device already in the stopping state, then
@@ -265,22 +436,6 @@
 	return SCI_SUCCESS;
 }
 
-enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
-					       u32 suspend_type)
-{
-	struct sci_base_state_machine *sm = &idev->sm;
-	enum sci_remote_device_states state = sm->current_state_id;
-
-	if (state != SCI_STP_DEV_CMD) {
-		dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
-			 __func__, dev_state_name(state));
-		return SCI_FAILURE_INVALID_STATE;
-	}
-
-	return sci_remote_node_context_suspend(&idev->rnc,
-						    suspend_type, NULL, NULL);
-}
-
 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
 						     u32 frame_index)
 {
@@ -412,9 +567,9 @@
 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
 						     u32 event_code)
 {
+	enum sci_status status;
 	struct sci_base_state_machine *sm = &idev->sm;
 	enum sci_remote_device_states state = sm->current_state_id;
-	enum sci_status status;
 
 	switch (scu_get_event_type(event_code)) {
 	case SCU_EVENT_TYPE_RNC_OPS_MISC:
@@ -427,9 +582,7 @@
 			status = SCI_SUCCESS;
 
 			/* Suspend the associated RNC */
-			sci_remote_node_context_suspend(&idev->rnc,
-							      SCI_SOFTWARE_SUSPENSION,
-							      NULL, NULL);
+			sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
 
 			dev_dbg(scirdev_to_dev(idev),
 				"%s: device: %p event code: %x: %s\n",
@@ -455,6 +608,10 @@
 	if (status != SCI_SUCCESS)
 		return status;
 
+	/* Decode device-specific states that may require an RNC resume during
+	 * normal operation.  When the abort path is active, these resumes are
+	 * managed when the abort path exits.
+	 */
 	if (state == SCI_STP_DEV_ATAPI_ERROR) {
 		/* For ATAPI error state resume the RNC right away. */
 		if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
@@ -743,10 +900,6 @@
 		if (status != SCI_SUCCESS)
 			return status;
 
-		status = sci_remote_node_context_start_task(&idev->rnc, ireq);
-		if (status != SCI_SUCCESS)
-			goto out;
-
 		status = sci_request_start(ireq);
 		if (status != SCI_SUCCESS)
 			goto out;
@@ -765,11 +918,11 @@
 		 * the correct action when the remote node context is suspended
 		 * and later resumed.
 		 */
-		sci_remote_node_context_suspend(&idev->rnc,
-				SCI_SOFTWARE_SUSPENSION, NULL, NULL);
-		sci_remote_node_context_resume(&idev->rnc,
-				sci_remote_device_continue_request,
-						    idev);
+		sci_remote_device_suspend(idev,
+					  SCI_SW_SUSPEND_LINKHANG_DETECT);
+
+		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+				sci_remote_device_continue_request, idev);
 
 	out:
 		sci_remote_device_start_request(idev, ireq, status);
@@ -783,7 +936,9 @@
 		if (status != SCI_SUCCESS)
 			return status;
 
-		status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+		/* Resume the RNC as needed: */
+		status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+							    NULL, NULL);
 		if (status != SCI_SUCCESS)
 			break;
 
@@ -892,7 +1047,7 @@
 	 * here should go through isci_remote_device_nuke_requests.
 	 * If we hit this condition, we will need a way to complete
 	 * io requests in process */
-	BUG_ON(!list_empty(&idev->reqs_in_process));
+	BUG_ON(idev->started_request_count > 0);
 
 	sci_remote_device_destruct(idev);
 	list_del_init(&idev->node);
@@ -954,14 +1109,21 @@
 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
 {
 	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
 
-	sci_remote_node_context_suspend(
-		&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p\n", __func__, idev);
+
+	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
 }
 
 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
 {
 	struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p\n", __func__, idev);
 
 	sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
 }
@@ -1113,33 +1275,20 @@
 {
 	enum sci_status status;
 	struct sci_port_properties properties;
-	struct domain_device *dev = idev->domain_dev;
 
 	sci_remote_device_construct(iport, idev);
 
-	/*
-	 * This information is request to determine how many remote node context
-	 * entries will be needed to store the remote node.
-	 */
-	idev->is_direct_attached = true;
-
 	sci_port_get_properties(iport, &properties);
 	/* Get accurate port width from port's phy mask for a DA device. */
 	idev->device_port_width = hweight32(properties.phy_mask);
 
 	status = sci_controller_allocate_remote_node_context(iport->owning_controller,
-								  idev,
-								  &idev->rnc.remote_node_index);
+							     idev,
+							     &idev->rnc.remote_node_index);
 
 	if (status != SCI_SUCCESS)
 		return status;
 
-	if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
-	    (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
-		/* pass */;
-	else
-		return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
-
 	idev->connection_rate = sci_port_get_max_allowed_speed(iport);
 
 	return SCI_SUCCESS;
@@ -1171,19 +1320,13 @@
 	if (status != SCI_SUCCESS)
 		return status;
 
-	if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
-	    (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
-		/* pass */;
-	else
-		return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
-
-	/*
-	 * For SAS-2 the physical link rate is actually a logical link
+	/* For SAS-2 the physical link rate is actually a logical link
 	 * rate that incorporates multiplexing.  The SCU doesn't
 	 * incorporate multiplexing and for the purposes of the
 	 * connection the logical link rate is that same as the
 	 * physical.  Furthermore, the SAS-2 and SAS-1.1 fields overlay
-	 * one another, so this code works for both situations. */
+	 * one another, so this code works for both situations.
+	 */
 	idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
 					 dev->linkrate);
 
@@ -1193,6 +1336,105 @@
 	return SCI_SUCCESS;
 }
 
+enum sci_status sci_remote_device_resume(
+	struct isci_remote_device *idev,
+	scics_sds_remote_node_context_callback cb_fn,
+	void *cb_p)
+{
+	enum sci_status status;
+
+	status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
+	if (status != SCI_SUCCESS)
+		dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
+			__func__, status);
+	return status;
+}
+
+static void isci_remote_device_resume_from_abort_complete(void *cbparam)
+{
+	struct isci_remote_device *idev = cbparam;
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	scics_sds_remote_node_context_callback abort_resume_cb =
+		idev->abort_resume_cb;
+
+	dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
+		__func__, abort_resume_cb);
+
+	if (abort_resume_cb != NULL) {
+		idev->abort_resume_cb = NULL;
+		abort_resume_cb(idev->abort_resume_cbparam);
+	}
+	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+	wake_up(&ihost->eventq);
+}
+
+static bool isci_remote_device_test_resume_done(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev)
+{
+	unsigned long flags;
+	bool done;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
+		|| test_bit(IDEV_STOP_PENDING, &idev->flags)
+		|| sci_remote_node_context_is_being_destroyed(&idev->rnc);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return done;
+}
+
+void isci_remote_device_wait_for_resume_from_abort(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev)
+{
+	dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
+		 __func__, idev);
+
+	#define MAX_RESUME_MSECS 10000
+	if (!wait_event_timeout(ihost->eventq,
+				isci_remote_device_test_resume_done(ihost, idev),
+				msecs_to_jiffies(MAX_RESUME_MSECS))) {
+
+		dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
+			 "resume: %p\n", __func__, idev);
+	}
+	clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+	dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
+		 __func__, idev);
+}
+
+enum sci_status isci_remote_device_resume_from_abort(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev)
+{
+	unsigned long flags;
+	enum sci_status status = SCI_SUCCESS;
+	int destroyed;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	/* Preserve any current resume callbacks, for instance from other
+	 * resumptions.
+	 */
+	idev->abort_resume_cb = idev->rnc.user_callback;
+	idev->abort_resume_cbparam = idev->rnc.user_cookie;
+	set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+	clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+	destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
+	if (!destroyed)
+		status = sci_remote_device_resume(
+			idev, isci_remote_device_resume_from_abort_complete,
+			idev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+	if (!destroyed && (status == SCI_SUCCESS))
+		isci_remote_device_wait_for_resume_from_abort(ihost, idev);
+	else
+		clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+	return status;
+}
+
 /**
  * sci_remote_device_start() - This method will start the supplied remote
  *    device.  This method enables normal IO requests to flow through to the
@@ -1207,7 +1449,7 @@
  * the device when there have been no phys added to it.
  */
 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
-						u32 timeout)
+					       u32 timeout)
 {
 	struct sci_base_state_machine *sm = &idev->sm;
 	enum sci_remote_device_states state = sm->current_state_id;
@@ -1219,9 +1461,8 @@
 		return SCI_FAILURE_INVALID_STATE;
 	}
 
-	status = sci_remote_node_context_resume(&idev->rnc,
-						     remote_device_resume_done,
-						     idev);
+	status = sci_remote_device_resume(idev, remote_device_resume_done,
+					  idev);
 	if (status != SCI_SUCCESS)
 		return status;
 
@@ -1259,20 +1500,6 @@
 	return status;
 }
 
-void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
-{
-	DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
-
-	dev_dbg(&ihost->pdev->dev,
-		"%s: idev = %p\n", __func__, idev);
-
-	/* Cleanup all requests pending for this device. */
-	isci_terminate_pending_requests(ihost, idev);
-
-	dev_dbg(&ihost->pdev->dev,
-		"%s: idev = %p, done\n", __func__, idev);
-}
-
 /**
  * This function builds the isci_remote_device when a libsas dev_found message
  *    is received.
@@ -1297,10 +1524,6 @@
 		dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
 		return NULL;
 	}
-
-	if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
-		return NULL;
-
 	if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
 		return NULL;
 
@@ -1342,14 +1565,8 @@
 	spin_lock_irqsave(&ihost->scic_lock, flags);
 	idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
 	set_bit(IDEV_GONE, &idev->flags);
-	spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
-	/* Kill all outstanding requests. */
-	isci_remote_device_nuke_requests(ihost, idev);
 
 	set_bit(IDEV_STOP_PENDING, &idev->flags);
-
-	spin_lock_irqsave(&ihost->scic_lock, flags);
 	status = sci_remote_device_stop(idev, 50);
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
@@ -1359,6 +1576,9 @@
 	else
 		wait_for_device_stop(ihost, idev);
 
+	dev_dbg(&ihost->pdev->dev,
+		"%s: isci_device = %p, waiting done.\n", __func__, idev);
+
 	return status;
 }
 
@@ -1434,3 +1654,73 @@
 
 	return status == SCI_SUCCESS ? 0 : -ENODEV;
 }
+
+enum sci_status isci_remote_device_suspend_terminate(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq)
+{
+	unsigned long flags;
+	enum sci_status status;
+
+	/* Put the device into suspension. */
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+	sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	/* Terminate and wait for the completions. */
+	status = isci_remote_device_terminate_requests(ihost, idev, ireq);
+	if (status != SCI_SUCCESS)
+		dev_dbg(&ihost->pdev->dev,
+			"%s: isci_remote_device_terminate_requests(%p) "
+				"returned %d!\n",
+			__func__, idev, status);
+
+	/* NOTE: RNC resumption is left to the caller! */
+	return status;
+}
+
+int isci_remote_device_is_safe_to_abort(
+	struct isci_remote_device *idev)
+{
+	return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
+}
+
+enum sci_status sci_remote_device_abort_requests_pending_abort(
+	struct isci_remote_device *idev)
+{
+	return sci_remote_device_terminate_reqs_checkabort(idev, 1);
+}
+
+enum sci_status isci_remote_device_reset_complete(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev)
+{
+	unsigned long flags;
+	enum sci_status status;
+
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	status = sci_remote_device_reset_complete(idev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+	return status;
+}
+
+void isci_dev_set_hang_detection_timeout(
+	struct isci_remote_device *idev,
+	u32 timeout)
+{
+	if (dev_is_sata(idev->domain_dev)) {
+		if (timeout) {
+			if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
+					     &idev->flags))
+				return;  /* Already enabled. */
+		} else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
+					       &idev->flags))
+			return;  /* Not enabled. */
+
+		sci_port_set_hang_detection_timeout(idev->owning_port,
+						    timeout);
+	}
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 58637ee..7674caa 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -85,27 +85,38 @@
 	#define IDEV_GONE 3
 	#define IDEV_IO_READY 4
 	#define IDEV_IO_NCQERROR 5
+	#define IDEV_RNC_LLHANG_ENABLED 6
+	#define IDEV_ABORT_PATH_ACTIVE 7
+	#define IDEV_ABORT_PATH_RESUME_PENDING 8
 	unsigned long flags;
 	struct kref kref;
 	struct isci_port *isci_port;
 	struct domain_device *domain_dev;
 	struct list_head node;
-	struct list_head reqs_in_process;
 	struct sci_base_state_machine sm;
 	u32 device_port_width;
 	enum sas_linkrate connection_rate;
-	bool is_direct_attached;
 	struct isci_port *owning_port;
 	struct sci_remote_node_context rnc;
 	/* XXX unify with device reference counting and delete */
 	u32 started_request_count;
 	struct isci_request *working_request;
 	u32 not_ready_reason;
+	scics_sds_remote_node_context_callback abort_resume_cb;
+	void *abort_resume_cbparam;
 };
 
 #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
 
 /* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_get_device(
+	struct isci_remote_device *idev)
+{
+	if (idev)
+		kref_get(&idev->kref);
+	return idev;
+}
+
 static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
 {
 	struct isci_remote_device *idev = dev->lldd_dev;
@@ -302,6 +313,8 @@
 		idev->started_request_count--;
 }
 
+void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout);
+
 enum sci_status sci_remote_device_frame_handler(
 	struct isci_remote_device *idev,
 	u32 frame_index);
@@ -325,12 +338,50 @@
 	struct isci_remote_device *idev,
 	struct isci_request *ireq);
 
-enum sci_status sci_remote_device_suspend(
-	struct isci_remote_device *idev,
-	u32 suspend_type);
-
 void sci_remote_device_post_request(
 	struct isci_remote_device *idev,
 	u32 request);
 
+enum sci_status sci_remote_device_terminate_requests(
+	struct isci_remote_device *idev);
+
+int isci_remote_device_is_safe_to_abort(
+	struct isci_remote_device *idev);
+
+enum sci_status
+sci_remote_device_abort_requests_pending_abort(
+	struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev);
+
+enum sci_status sci_remote_device_resume(
+	struct isci_remote_device *idev,
+	scics_sds_remote_node_context_callback cb_fn,
+	void *cb_p);
+
+enum sci_status isci_remote_device_resume_from_abort(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset_complete(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend_terminate(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+
+enum sci_status isci_remote_device_terminate_requests(
+	struct isci_host *ihost,
+	struct isci_remote_device *idev,
+	struct isci_request *ireq);
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+					  enum sci_remote_node_suspension_reasons reason);
 #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 3a94634..1910100 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -52,7 +52,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
-
+#include <scsi/sas_ata.h>
 #include "host.h"
 #include "isci.h"
 #include "remote_device.h"
@@ -90,6 +90,15 @@
 	return false;
 }
 
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
+{
+	u32 current_state = sci_rnc->sm.current_state_id;
+
+	if (current_state == SCI_RNC_TX_RX_SUSPENDED)
+		return true;
+	return false;
+}
+
 static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
 {
 	if (id < ihost->remote_node_entries &&
@@ -131,7 +140,7 @@
 
 	rnc->ssp.arbitration_wait_time = 0;
 
-	if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+	if (dev_is_sata(dev)) {
 		rnc->ssp.connection_occupancy_timeout =
 			ihost->user_parameters.stp_max_occupancy_timeout;
 		rnc->ssp.connection_inactivity_timeout =
@@ -151,7 +160,6 @@
 	rnc->ssp.oaf_source_zone_group = 0;
 	rnc->ssp.oaf_more_compatibility_features = 0;
 }
-
 /**
  *
  * @sci_rnc:
@@ -165,23 +173,30 @@
 static void sci_remote_node_context_setup_to_resume(
 	struct sci_remote_node_context *sci_rnc,
 	scics_sds_remote_node_context_callback callback,
-	void *callback_parameter)
+	void *callback_parameter,
+	enum sci_remote_node_context_destination_state dest_param)
 {
-	if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
-		sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
-		sci_rnc->user_callback     = callback;
-		sci_rnc->user_cookie       = callback_parameter;
+	if (sci_rnc->destination_state != RNC_DEST_FINAL) {
+		sci_rnc->destination_state = dest_param;
+		if (callback != NULL) {
+			sci_rnc->user_callback = callback;
+			sci_rnc->user_cookie   = callback_parameter;
+		}
 	}
 }
 
-static void sci_remote_node_context_setup_to_destory(
+static void sci_remote_node_context_setup_to_destroy(
 	struct sci_remote_node_context *sci_rnc,
 	scics_sds_remote_node_context_callback callback,
 	void *callback_parameter)
 {
-	sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
+	struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
+
+	sci_rnc->destination_state = RNC_DEST_FINAL;
 	sci_rnc->user_callback     = callback;
 	sci_rnc->user_cookie       = callback_parameter;
+
+	wake_up(&ihost->eventq);
 }
 
 /**
@@ -203,9 +218,19 @@
 
 static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
 {
-	if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+	switch (rnc->destination_state) {
+	case RNC_DEST_READY:
+	case RNC_DEST_SUSPENDED_RESUME:
+		rnc->destination_state = RNC_DEST_READY;
+		/* Fall through... */
+	case RNC_DEST_FINAL:
 		sci_remote_node_context_resume(rnc, rnc->user_callback,
-						    rnc->user_cookie);
+					       rnc->user_cookie);
+		break;
+	default:
+		rnc->destination_state = RNC_DEST_UNSPECIFIED;
+		break;
+	}
 }
 
 static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
@@ -219,13 +244,12 @@
 
 	rnc_buffer->ssp.is_valid = true;
 
-	if (!idev->is_direct_attached &&
-	    (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
+	if (dev_is_sata(dev) && dev->parent) {
 		sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
 	} else {
 		sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
 
-		if (idev->is_direct_attached)
+		if (!dev->parent)
 			sci_port_setup_transports(idev->owning_port,
 						  sci_rnc->remote_node_index);
 	}
@@ -248,13 +272,18 @@
 static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
 {
 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+	struct isci_remote_device *idev = rnc_to_dev(rnc);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
 
 	/* Check to see if we have gotten back to the initial state because
 	 * someone requested to destroy the remote node context object.
 	 */
 	if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
-		rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+		rnc->destination_state = RNC_DEST_UNSPECIFIED;
 		sci_remote_node_context_notify_user(rnc);
+
+		smp_wmb();
+		wake_up(&ihost->eventq);
 	}
 }
 
@@ -269,6 +298,8 @@
 {
 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
 
+	/* Terminate all outstanding requests. */
+	sci_remote_device_terminate_requests(rnc_to_dev(rnc));
 	sci_remote_node_context_invalidate_context_buffer(rnc);
 }
 
@@ -287,10 +318,8 @@
 	 * resume because of a target reset we also need to update
 	 * the STPTLDARNI register with the RNi of the device
 	 */
-	if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
-	    idev->is_direct_attached)
-		sci_port_setup_transports(idev->owning_port,
-					       rnc->remote_node_index);
+	if (dev_is_sata(dev) && !dev->parent)
+		sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
 
 	sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
 }
@@ -298,10 +327,22 @@
 static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
 {
 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+	enum sci_remote_node_context_destination_state dest_select;
+	int tell_user = 1;
 
-	rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+	dest_select = rnc->destination_state;
+	rnc->destination_state = RNC_DEST_UNSPECIFIED;
 
-	if (rnc->user_callback)
+	if ((dest_select == RNC_DEST_SUSPENDED) ||
+	    (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
+		sci_remote_node_context_suspend(
+			rnc, rnc->suspend_reason,
+			SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
+
+		if (dest_select == RNC_DEST_SUSPENDED_RESUME)
+			tell_user = 0;  /* Wait until ready again. */
+	}
+	if (tell_user)
 		sci_remote_node_context_notify_user(rnc);
 }
 
@@ -315,10 +356,34 @@
 static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
 {
 	struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+	struct isci_remote_device *idev = rnc_to_dev(rnc);
+	struct isci_host *ihost = idev->owning_port->owning_controller;
+	u32 new_count = rnc->suspend_count + 1;
 
+	if (new_count == 0)
+		rnc->suspend_count = 1;
+	else
+		rnc->suspend_count = new_count;
+	smp_wmb();
+
+	/* Terminate outstanding requests pending abort. */
+	sci_remote_device_abort_requests_pending_abort(idev);
+
+	wake_up(&ihost->eventq);
 	sci_remote_node_context_continue_state_transitions(rnc);
 }
 
+static void sci_remote_node_context_await_suspend_state_exit(
+	struct sci_base_state_machine *sm)
+{
+	struct sci_remote_node_context *rnc
+		= container_of(sm, typeof(*rnc), sm);
+	struct isci_remote_device *idev = rnc_to_dev(rnc);
+
+	if (dev_is_sata(idev->domain_dev))
+		isci_dev_set_hang_detection_timeout(idev, 0);
+}
+
 static const struct sci_base_state sci_remote_node_context_state_table[] = {
 	[SCI_RNC_INITIAL] = {
 		.enter_state = sci_remote_node_context_initial_state_enter,
@@ -341,7 +406,9 @@
 	[SCI_RNC_TX_RX_SUSPENDED] = {
 		.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
 	},
-	[SCI_RNC_AWAIT_SUSPENSION] = { },
+	[SCI_RNC_AWAIT_SUSPENSION] = {
+		.exit_state = sci_remote_node_context_await_suspend_state_exit,
+	},
 };
 
 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
@@ -350,7 +417,7 @@
 	memset(rnc, 0, sizeof(struct sci_remote_node_context));
 
 	rnc->remote_node_index = remote_node_index;
-	rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+	rnc->destination_state = RNC_DEST_UNSPECIFIED;
 
 	sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
 }
@@ -359,6 +426,7 @@
 							   u32 event_code)
 {
 	enum scis_sds_remote_node_context_states state;
+	u32 next_state;
 
 	state = sci_rnc->sm.current_state_id;
 	switch (state) {
@@ -373,18 +441,18 @@
 		break;
 	case SCI_RNC_INVALIDATING:
 		if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
-			if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
-				state = SCI_RNC_INITIAL;
+			if (sci_rnc->destination_state == RNC_DEST_FINAL)
+				next_state = SCI_RNC_INITIAL;
 			else
-				state = SCI_RNC_POSTING;
-			sci_change_state(&sci_rnc->sm, state);
+				next_state = SCI_RNC_POSTING;
+			sci_change_state(&sci_rnc->sm, next_state);
 		} else {
 			switch (scu_get_event_type(event_code)) {
 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
 				/* We really dont care if the hardware is going to suspend
 				 * the device since it's being invalidated anyway */
-				dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
 					"%s: SCIC Remote Node Context 0x%p was "
 					"suspeneded by hardware while being "
 					"invalidated.\n", __func__, sci_rnc);
@@ -403,7 +471,7 @@
 			case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
 				/* We really dont care if the hardware is going to suspend
 				 * the device since it's being resumed anyway */
-				dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
 					"%s: SCIC Remote Node Context 0x%p was "
 					"suspeneded by hardware while being resumed.\n",
 					__func__, sci_rnc);
@@ -417,11 +485,11 @@
 		switch (scu_get_event_type(event_code)) {
 		case SCU_EVENT_TL_RNC_SUSPEND_TX:
 			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
-			sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+			sci_rnc->suspend_type = scu_get_event_type(event_code);
 			break;
 		case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
 			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
-			sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+			sci_rnc->suspend_type = scu_get_event_type(event_code);
 			break;
 		default:
 			goto out;
@@ -430,27 +498,29 @@
 	case SCI_RNC_AWAIT_SUSPENSION:
 		switch (scu_get_event_type(event_code)) {
 		case SCU_EVENT_TL_RNC_SUSPEND_TX:
-			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
-			sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+			next_state = SCI_RNC_TX_SUSPENDED;
 			break;
 		case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
-			sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
-			sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+			next_state = SCI_RNC_TX_RX_SUSPENDED;
 			break;
 		default:
 			goto out;
 		}
+		if (sci_rnc->suspend_type == scu_get_event_type(event_code))
+			sci_change_state(&sci_rnc->sm, next_state);
 		break;
 	default:
 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
-			 "%s: invalid state %d\n", __func__, state);
+			 "%s: invalid state: %s\n", __func__,
+			 rnc_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 	return SCI_SUCCESS;
 
  out:
 	dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
-		 "%s: code: %#x state: %d\n", __func__, event_code, state);
+		 "%s: code: %#x state: %s\n", __func__, event_code,
+		 rnc_state_name(state));
 	return SCI_FAILURE;
 
 }
@@ -464,20 +534,23 @@
 	state = sci_rnc->sm.current_state_id;
 	switch (state) {
 	case SCI_RNC_INVALIDATING:
-		sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
 		return SCI_SUCCESS;
 	case SCI_RNC_POSTING:
 	case SCI_RNC_RESUMING:
 	case SCI_RNC_READY:
 	case SCI_RNC_TX_SUSPENDED:
 	case SCI_RNC_TX_RX_SUSPENDED:
-	case SCI_RNC_AWAIT_SUSPENSION:
-		sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
 		sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
 		return SCI_SUCCESS;
+	case SCI_RNC_AWAIT_SUSPENSION:
+		sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+		return SCI_SUCCESS;
 	case SCI_RNC_INITIAL:
 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
-			 "%s: invalid state %d\n", __func__, state);
+			 "%s: invalid state: %s\n", __func__,
+			 rnc_state_name(state));
 		/* We have decided that the destruct request on the remote node context
 		 * can not fail since it is either in the initial/destroyed state or is
 		 * can be destroyed.
@@ -485,35 +558,101 @@
 		return SCI_SUCCESS;
 	default:
 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
-			 "%s: invalid state %d\n", __func__, state);
+			 "%s: invalid state %s\n", __func__,
+			 rnc_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
 
-enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
-						     u32 suspend_type,
-						     scics_sds_remote_node_context_callback cb_fn,
-						     void *cb_p)
+enum sci_status sci_remote_node_context_suspend(
+			struct sci_remote_node_context *sci_rnc,
+			enum sci_remote_node_suspension_reasons suspend_reason,
+			u32 suspend_type)
 {
-	enum scis_sds_remote_node_context_states state;
+	enum scis_sds_remote_node_context_states state
+		= sci_rnc->sm.current_state_id;
+	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+	enum sci_status status = SCI_FAILURE_INVALID_STATE;
+	enum sci_remote_node_context_destination_state dest_param =
+		RNC_DEST_UNSPECIFIED;
 
-	state = sci_rnc->sm.current_state_id;
-	if (state != SCI_RNC_READY) {
+	dev_dbg(scirdev_to_dev(idev),
+		"%s: current state %s, current suspend_type %x dest state %d,"
+			" arg suspend_reason %d, arg suspend_type %x",
+		__func__, rnc_state_name(state), sci_rnc->suspend_type,
+		sci_rnc->destination_state, suspend_reason,
+		suspend_type);
+
+	/* Disable automatic state continuations if explicitly suspending. */
+	if ((suspend_reason == SCI_HW_SUSPEND) ||
+	    (sci_rnc->destination_state == RNC_DEST_FINAL))
+		dest_param = sci_rnc->destination_state;
+
+	switch (state) {
+	case SCI_RNC_READY:
+		break;
+	case SCI_RNC_INVALIDATING:
+		if (sci_rnc->destination_state == RNC_DEST_FINAL) {
+			dev_warn(scirdev_to_dev(idev),
+				 "%s: already destroying %p\n",
+				 __func__, sci_rnc);
+			return SCI_FAILURE_INVALID_STATE;
+		}
+		/* Fall through and handle like SCI_RNC_POSTING */
+	case SCI_RNC_RESUMING:
+		/* Fall through and handle like SCI_RNC_POSTING */
+	case SCI_RNC_POSTING:
+		/* Set the destination state to AWAIT - this signals the
+		 * entry into the SCI_RNC_READY state that a suspension
+		 * needs to be done immediately.
+		 */
+		if (sci_rnc->destination_state != RNC_DEST_FINAL)
+			sci_rnc->destination_state = RNC_DEST_SUSPENDED;
+		sci_rnc->suspend_type = suspend_type;
+		sci_rnc->suspend_reason = suspend_reason;
+		return SCI_SUCCESS;
+
+	case SCI_RNC_TX_SUSPENDED:
+		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
+			status = SCI_SUCCESS;
+		break;
+	case SCI_RNC_TX_RX_SUSPENDED:
+		if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+			status = SCI_SUCCESS;
+		break;
+	case SCI_RNC_AWAIT_SUSPENSION:
+		if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+		    || (suspend_type == sci_rnc->suspend_type))
+			return SCI_SUCCESS;
+		break;
+	default:
 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
-			 "%s: invalid state %d\n", __func__, state);
+			 "%s: invalid state %s\n", __func__,
+			 rnc_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
+	sci_rnc->destination_state = dest_param;
+	sci_rnc->suspend_type = suspend_type;
+	sci_rnc->suspend_reason = suspend_reason;
 
-	sci_rnc->user_callback   = cb_fn;
-	sci_rnc->user_cookie     = cb_p;
-	sci_rnc->suspension_code = suspend_type;
+	if (status == SCI_SUCCESS) { /* Already in the destination state? */
+		struct isci_host *ihost = idev->owning_port->owning_controller;
 
-	if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
-		sci_remote_device_post_request(rnc_to_dev(sci_rnc),
-						    SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
+		wake_up_all(&ihost->eventq); /* Let observers look. */
+		return SCI_SUCCESS;
 	}
+	if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
+	    (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
 
-	sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+		if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
+			isci_dev_set_hang_detection_timeout(idev, 0x00000001);
+
+		sci_remote_device_post_request(
+			idev, SCI_SOFTWARE_SUSPEND_CMD);
+	}
+	if (state != SCI_RNC_AWAIT_SUSPENSION)
+		sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+
 	return SCI_SUCCESS;
 }
 
@@ -522,56 +661,86 @@
 						    void *cb_p)
 {
 	enum scis_sds_remote_node_context_states state;
+	struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
 
 	state = sci_rnc->sm.current_state_id;
+	dev_dbg(scirdev_to_dev(idev),
+		"%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
+			"dev resume path %s\n",
+		__func__, rnc_state_name(state), cb_fn, cb_p,
+		sci_rnc->destination_state,
+		test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
+			? "<abort active>" : "<normal>");
+
 	switch (state) {
 	case SCI_RNC_INITIAL:
 		if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
 			return SCI_FAILURE_INVALID_STATE;
 
-		sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
-		sci_remote_node_context_construct_buffer(sci_rnc);
-		sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+		sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn,	cb_p,
+							RNC_DEST_READY);
+		if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+			sci_remote_node_context_construct_buffer(sci_rnc);
+			sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+		}
 		return SCI_SUCCESS;
+
 	case SCI_RNC_POSTING:
 	case SCI_RNC_INVALIDATING:
 	case SCI_RNC_RESUMING:
-		if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
-			return SCI_FAILURE_INVALID_STATE;
-
-		sci_rnc->user_callback = cb_fn;
-		sci_rnc->user_cookie   = cb_p;
+		/* We are still waiting to post when a resume was
+		 * requested.
+		 */
+		switch (sci_rnc->destination_state) {
+		case RNC_DEST_SUSPENDED:
+		case RNC_DEST_SUSPENDED_RESUME:
+			/* Previously waiting to suspend after posting.
+			 * Now continue onto resumption.
+			 */
+			sci_remote_node_context_setup_to_resume(
+				sci_rnc, cb_fn, cb_p,
+				RNC_DEST_SUSPENDED_RESUME);
+			break;
+		default:
+			sci_remote_node_context_setup_to_resume(
+				sci_rnc, cb_fn, cb_p,
+				RNC_DEST_READY);
+			break;
+		}
 		return SCI_SUCCESS;
-	case SCI_RNC_TX_SUSPENDED: {
-		struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
-		struct domain_device *dev = idev->domain_dev;
 
-		sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
-
-		/* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
-		if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
-			sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
-		else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
-			if (idev->is_direct_attached) {
-				/* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
-				sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
-			} else {
-				sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
-			}
-		} else
-			return SCI_FAILURE;
-		return SCI_SUCCESS;
-	}
+	case SCI_RNC_TX_SUSPENDED:
 	case SCI_RNC_TX_RX_SUSPENDED:
-		sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
-		sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
-		return SCI_FAILURE_INVALID_STATE;
+		{
+			struct domain_device *dev = idev->domain_dev;
+			/* If this is an expander attached SATA device we must
+			 * invalidate and repost the RNC since this is the only
+			 * way to clear the TCi to NCQ tag mapping table for
+			 * the RNi. All other device types we can just resume.
+			 */
+			sci_remote_node_context_setup_to_resume(
+				sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
+
+			if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+				if ((dev_is_sata(dev) && dev->parent) ||
+				    (sci_rnc->destination_state == RNC_DEST_FINAL))
+					sci_change_state(&sci_rnc->sm,
+							 SCI_RNC_INVALIDATING);
+				else
+					sci_change_state(&sci_rnc->sm,
+							 SCI_RNC_RESUMING);
+			}
+		}
+		return SCI_SUCCESS;
+
 	case SCI_RNC_AWAIT_SUSPENSION:
-		sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+		sci_remote_node_context_setup_to_resume(
+			sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
 		return SCI_SUCCESS;
 	default:
 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
-			 "%s: invalid state %d\n", __func__, state);
+			 "%s: invalid state %s\n", __func__,
+			 rnc_state_name(state));
 		return SCI_FAILURE_INVALID_STATE;
 	}
 }
@@ -590,35 +759,51 @@
 	case SCI_RNC_TX_RX_SUSPENDED:
 	case SCI_RNC_AWAIT_SUSPENSION:
 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
-			 "%s: invalid state %d\n", __func__, state);
+			 "%s: invalid state %s\n", __func__,
+			 rnc_state_name(state));
 		return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
 	default:
-		break;
+		dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			"%s: invalid state %s\n", __func__,
+			rnc_state_name(state));
+		return SCI_FAILURE_INVALID_STATE;
 	}
-	dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
-		"%s: requested to start IO while still resuming, %d\n",
-		__func__, state);
-	return SCI_FAILURE_INVALID_STATE;
 }
 
-enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
-							struct isci_request *ireq)
+enum sci_status sci_remote_node_context_start_task(
+	struct sci_remote_node_context *sci_rnc,
+	struct isci_request *ireq,
+	scics_sds_remote_node_context_callback cb_fn,
+	void *cb_p)
+{
+	enum sci_status status = sci_remote_node_context_resume(sci_rnc,
+								cb_fn, cb_p);
+	if (status != SCI_SUCCESS)
+		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+			"%s: resume failed: %d\n", __func__, status);
+	return status;
+}
+
+int sci_remote_node_context_is_safe_to_abort(
+	struct sci_remote_node_context *sci_rnc)
 {
 	enum scis_sds_remote_node_context_states state;
 
 	state = sci_rnc->sm.current_state_id;
 	switch (state) {
+	case SCI_RNC_INVALIDATING:
+	case SCI_RNC_TX_RX_SUSPENDED:
+		return 1;
+	case SCI_RNC_POSTING:
 	case SCI_RNC_RESUMING:
 	case SCI_RNC_READY:
-	case SCI_RNC_AWAIT_SUSPENSION:
-		return SCI_SUCCESS;
 	case SCI_RNC_TX_SUSPENDED:
-	case SCI_RNC_TX_RX_SUSPENDED:
-		sci_remote_node_context_resume(sci_rnc, NULL, NULL);
-		return SCI_SUCCESS;
+	case SCI_RNC_AWAIT_SUSPENSION:
+	case SCI_RNC_INITIAL:
+		return 0;
 	default:
 		dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
 			 "%s: invalid state %d\n", __func__, state);
-		return SCI_FAILURE_INVALID_STATE;
+		return 0;
 	}
 }
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index a241e0f..a703b9c 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -75,8 +75,13 @@
  */
 #define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX    0x0FFF
 
-#define SCU_HARDWARE_SUSPENSION  (0)
-#define SCI_SOFTWARE_SUSPENSION  (1)
+enum sci_remote_node_suspension_reasons {
+	SCI_HW_SUSPEND,
+	SCI_SW_SUSPEND_NORMAL,
+	SCI_SW_SUSPEND_LINKHANG_DETECT
+};
+#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX
+#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX
 
 struct isci_request;
 struct isci_remote_device;
@@ -137,9 +142,13 @@
  * node context.
  */
 enum sci_remote_node_context_destination_state {
-	SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
-	SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
-	SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
+	RNC_DEST_UNSPECIFIED,
+	RNC_DEST_READY,
+	RNC_DEST_FINAL,
+	RNC_DEST_SUSPENDED,       /* Set when suspend during post/invalidate */
+	RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting
+				   * or invalidating and already suspending.
+				   */
 };
 
 /**
@@ -156,10 +165,12 @@
 	u16 remote_node_index;
 
 	/**
-	 * This field is the recored suspension code or the reason for the remote node
+	 * This field is the recored suspension type of the remote node
 	 * context suspension.
 	 */
-	u32 suspension_code;
+	u32 suspend_type;
+	enum sci_remote_node_suspension_reasons suspend_reason;
+	u32 suspend_count;
 
 	/**
 	 * This field is true if the remote node context is resuming from its current
@@ -193,6 +204,8 @@
 bool sci_remote_node_context_is_ready(
 	struct sci_remote_node_context *sci_rnc);
 
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc);
+
 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
 							   u32 event_code);
 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
@@ -200,14 +213,24 @@
 						      void *callback_parameter);
 enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
 						     u32 suspend_type,
-						     scics_sds_remote_node_context_callback cb_fn,
-						     void *cb_p);
+						     u32 suspension_code);
 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
 						    scics_sds_remote_node_context_callback cb_fn,
 						    void *cb_p);
 enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
-							struct isci_request *ireq);
+						   struct isci_request *ireq,
+						   scics_sds_remote_node_context_callback cb_fn,
+						   void *cb_p);
 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
 						      struct isci_request *ireq);
+int sci_remote_node_context_is_safe_to_abort(
+	struct sci_remote_node_context *sci_rnc);
 
+static inline bool sci_remote_node_context_is_being_destroyed(
+	struct sci_remote_node_context *sci_rnc)
+{
+	return (sci_rnc->destination_state == RNC_DEST_FINAL)
+		|| ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL)
+		    && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED));
+}
 #endif  /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 2def1e3..7a0431c 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -92,11 +92,11 @@
 	if (idx == 0) {
 		offset = (void *) &ireq->tc->sgl_pair_ab -
 			 (void *) &ihost->task_context_table[0];
-		return ihost->task_context_dma + offset;
+		return ihost->tc_dma + offset;
 	} else if (idx == 1) {
 		offset = (void *) &ireq->tc->sgl_pair_cd -
 			 (void *) &ihost->task_context_table[0];
-		return ihost->task_context_dma + offset;
+		return ihost->tc_dma + offset;
 	}
 
 	return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
@@ -730,7 +730,7 @@
 {
 	struct sas_task *task = isci_request_access_task(ireq);
 
-	ireq->protocol = SCIC_SSP_PROTOCOL;
+	ireq->protocol = SAS_PROTOCOL_SSP;
 
 	scu_ssp_io_request_construct_task_context(ireq,
 						  task->data_dir,
@@ -763,7 +763,7 @@
 	bool copy = false;
 	struct sas_task *task = isci_request_access_task(ireq);
 
-	ireq->protocol = SCIC_STP_PROTOCOL;
+	ireq->protocol = SAS_PROTOCOL_STP;
 
 	copy = (task->data_dir == DMA_NONE) ? false : true;
 
@@ -863,6 +863,8 @@
 
 	switch (state) {
 	case SCI_REQ_CONSTRUCTED:
+		/* Set to make sure no HW terminate posting is done: */
+		set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
 		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
 		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
 		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -883,8 +885,7 @@
 	case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
 	case SCI_REQ_ATAPI_WAIT_D2H:
 	case SCI_REQ_ATAPI_WAIT_TC_COMP:
-		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
-		return SCI_SUCCESS;
+		/* Fall through and change state to ABORTING... */
 	case SCI_REQ_TASK_WAIT_TC_RESP:
 		/* The task frame was already confirmed to have been
 		 * sent by the SCU HW.  Since the state machine is
@@ -893,20 +894,21 @@
 		 * and don't wait for the task response.
 		 */
 		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
-		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
-		return SCI_SUCCESS;
+		/* Fall through and handle like ABORTING... */
 	case SCI_REQ_ABORTING:
-		/* If a request has a termination requested twice, return
-		 * a failure indication, since HW confirmation of the first
-		 * abort is still outstanding.
+		if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
+			set_bit(IREQ_PENDING_ABORT, &ireq->flags);
+		else
+			clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
+		/* If the request is only waiting on the remote device
+		 * suspension, return SUCCESS so the caller will wait too.
 		 */
+		return SCI_SUCCESS;
 	case SCI_REQ_COMPLETED:
 	default:
 		dev_warn(&ireq->owning_controller->pdev->dev,
 			 "%s: SCIC IO Request requested to abort while in wrong "
-			 "state %d\n",
-			 __func__,
-			 ireq->sm.current_state_id);
+			 "state %d\n", __func__, ireq->sm.current_state_id);
 		break;
 	}
 
@@ -1070,7 +1072,7 @@
 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
 	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
-		if (ireq->protocol == SCIC_STP_PROTOCOL) {
+		if (ireq->protocol == SAS_PROTOCOL_STP) {
 			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
 					   SCU_COMPLETION_TL_STATUS_SHIFT;
 			ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
@@ -2117,7 +2119,7 @@
 		 */
 		if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
 			sci_remote_device_suspend(ireq->target_device,
-				SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+						  SCI_SW_SUSPEND_NORMAL);
 
 			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
 			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
@@ -2138,13 +2140,6 @@
 	/* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
 	 * - this comes only for B0
 	 */
-	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
-	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
-	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
-	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
-		sci_remote_device_suspend(ireq->target_device,
-			SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
-		/* Fall through to the default case */
 	default:
 		/* All other completion status cause the IO to be complete. */
 		ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
@@ -2262,15 +2257,151 @@
 	return status;
 }
 
+static int sci_request_smp_completion_status_is_tx_suspend(
+	unsigned int completion_status)
+{
+	switch (completion_status) {
+	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+		return 1;
+	}
+	return 0;
+}
+
+static int sci_request_smp_completion_status_is_tx_rx_suspend(
+	unsigned int completion_status)
+{
+	return 0; /* There are no Tx/Rx SMP suspend conditions. */
+}
+
+static int sci_request_ssp_completion_status_is_tx_suspend(
+	unsigned int completion_status)
+{
+	switch (completion_status) {
+	case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+	case SCU_TASK_DONE_LF_ERR:
+	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+		return 1;
+	}
+	return 0;
+}
+
+static int sci_request_ssp_completion_status_is_tx_rx_suspend(
+	unsigned int completion_status)
+{
+	return 0; /* There are no Tx/Rx SSP suspend conditions. */
+}
+
+static int sci_request_stpsata_completion_status_is_tx_suspend(
+	unsigned int completion_status)
+{
+	switch (completion_status) {
+	case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+	case SCU_TASK_DONE_LL_R_ERR:
+	case SCU_TASK_DONE_LL_PERR:
+	case SCU_TASK_DONE_REG_ERR:
+	case SCU_TASK_DONE_SDB_ERR:
+	case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+		return 1;
+	}
+	return 0;
+}
+
+
+static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
+	unsigned int completion_status)
+{
+	switch (completion_status) {
+	case SCU_TASK_DONE_LF_ERR:
+	case SCU_TASK_DONE_LL_SY_TERM:
+	case SCU_TASK_DONE_LL_LF_TERM:
+	case SCU_TASK_DONE_BREAK_RCVD:
+	case SCU_TASK_DONE_INV_FIS_LEN:
+	case SCU_TASK_DONE_UNEXP_FIS:
+	case SCU_TASK_DONE_UNEXP_SDBFIS:
+	case SCU_TASK_DONE_MAX_PLD_ERR:
+		return 1;
+	}
+	return 0;
+}
+
+static void sci_request_handle_suspending_completions(
+	struct isci_request *ireq,
+	u32 completion_code)
+{
+	int is_tx = 0;
+	int is_tx_rx = 0;
+
+	switch (ireq->protocol) {
+	case SAS_PROTOCOL_SMP:
+		is_tx = sci_request_smp_completion_status_is_tx_suspend(
+			completion_code);
+		is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
+			completion_code);
+		break;
+	case SAS_PROTOCOL_SSP:
+		is_tx = sci_request_ssp_completion_status_is_tx_suspend(
+			completion_code);
+		is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
+			completion_code);
+		break;
+	case SAS_PROTOCOL_STP:
+		is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
+			completion_code);
+		is_tx_rx =
+			sci_request_stpsata_completion_status_is_tx_rx_suspend(
+				completion_code);
+		break;
+	default:
+		dev_warn(&ireq->isci_host->pdev->dev,
+			 "%s: request %p has no valid protocol\n",
+			 __func__, ireq);
+		break;
+	}
+	if (is_tx || is_tx_rx) {
+		BUG_ON(is_tx && is_tx_rx);
+
+		sci_remote_node_context_suspend(
+			&ireq->target_device->rnc,
+			SCI_HW_SUSPEND,
+			(is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
+				   : SCU_EVENT_TL_RNC_SUSPEND_TX);
+	}
+}
+
 enum sci_status
 sci_io_request_tc_completion(struct isci_request *ireq,
-				  u32 completion_code)
+			     u32 completion_code)
 {
 	enum sci_base_request_states state;
 	struct isci_host *ihost = ireq->owning_controller;
 
 	state = ireq->sm.current_state_id;
 
+	/* Decode those completions that signal upcoming suspension events. */
+	sci_request_handle_suspending_completions(
+		ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
+
 	switch (state) {
 	case SCI_REQ_STARTED:
 		return request_started_state_tc_event(ireq, completion_code);
@@ -2362,9 +2493,6 @@
  * @request: This parameter is the completed isci_request object.
  * @response_ptr: This parameter specifies the service response for the I/O.
  * @status_ptr: This parameter specifies the exec status for the I/O.
- * @complete_to_host_ptr: This parameter specifies the action to be taken by
- *    the LLDD with respect to completing this request or forcing an abort
- *    condition on the I/O.
  * @open_rej_reason: This parameter specifies the encoded reason for the
  *    abandon-class reject.
  *
@@ -2375,14 +2503,12 @@
 	struct sas_task *task,
 	enum service_response *response_ptr,
 	enum exec_status *status_ptr,
-	enum isci_completion_selection *complete_to_host_ptr,
 	enum sas_open_rej_reason open_rej_reason)
 {
 	/* Task in the target is done. */
 	set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
 	*response_ptr                     = SAS_TASK_UNDELIVERED;
 	*status_ptr                       = SAS_OPEN_REJECT;
-	*complete_to_host_ptr             = isci_perform_normal_io_completion;
 	task->task_status.open_rej_reason = open_rej_reason;
 }
 
@@ -2392,9 +2518,6 @@
  * @request: This parameter is the completed isci_request object.
  * @response_ptr: This parameter specifies the service response for the I/O.
  * @status_ptr: This parameter specifies the exec status for the I/O.
- * @complete_to_host_ptr: This parameter specifies the action to be taken by
- *    the LLDD with respect to completing this request or forcing an abort
- *    condition on the I/O.
  *
  * none.
  */
@@ -2403,8 +2526,7 @@
 	struct isci_request *request,
 	struct sas_task *task,
 	enum service_response *response_ptr,
-	enum exec_status *status_ptr,
-	enum isci_completion_selection *complete_to_host_ptr)
+	enum exec_status *status_ptr)
 {
 	unsigned int cstatus;
 
@@ -2445,9 +2567,6 @@
 				*status_ptr = SAS_ABORTED_TASK;
 
 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
-			*complete_to_host_ptr =
-				isci_perform_normal_io_completion;
 		} else {
 			/* Task in the target is not done. */
 			*response_ptr = SAS_TASK_UNDELIVERED;
@@ -2458,9 +2577,6 @@
 				*status_ptr = SAM_STAT_TASK_ABORTED;
 
 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
-			*complete_to_host_ptr =
-				isci_perform_error_io_completion;
 		}
 
 		break;
@@ -2489,8 +2605,6 @@
 			*status_ptr = SAS_ABORTED_TASK;
 
 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
-		*complete_to_host_ptr = isci_perform_normal_io_completion;
 		break;
 
 
@@ -2501,7 +2615,7 @@
 
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
+			SAS_OREJ_WRONG_DEST);
 		break;
 
 	case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
@@ -2511,56 +2625,56 @@
 		 */
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_RESV_AB0);
+			SAS_OREJ_RESV_AB0);
 		break;
 
 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
 
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_RESV_AB1);
+			SAS_OREJ_RESV_AB1);
 		break;
 
 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
 
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_RESV_AB2);
+			SAS_OREJ_RESV_AB2);
 		break;
 
 	case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
 
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_RESV_AB3);
+			SAS_OREJ_RESV_AB3);
 		break;
 
 	case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
 
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_BAD_DEST);
+			SAS_OREJ_BAD_DEST);
 		break;
 
 	case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
 
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_STP_NORES);
+			SAS_OREJ_STP_NORES);
 		break;
 
 	case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
 
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_EPROTO);
+			SAS_OREJ_EPROTO);
 		break;
 
 	case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
 
 		isci_request_set_open_reject_status(
 			request, task, response_ptr, status_ptr,
-			complete_to_host_ptr, SAS_OREJ_CONN_RATE);
+			SAS_OREJ_CONN_RATE);
 		break;
 
 	case SCU_TASK_DONE_LL_R_ERR:
@@ -2592,97 +2706,14 @@
 		*response_ptr = SAS_TASK_UNDELIVERED;
 		*status_ptr = SAM_STAT_TASK_ABORTED;
 
-		if (task->task_proto == SAS_PROTOCOL_SMP) {
+		if (task->task_proto == SAS_PROTOCOL_SMP)
 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
-			*complete_to_host_ptr = isci_perform_normal_io_completion;
-		} else {
+		else
 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
-			*complete_to_host_ptr = isci_perform_error_io_completion;
-		}
 		break;
 	}
 }
 
-/**
- * isci_task_save_for_upper_layer_completion() - This function saves the
- *    request for later completion to the upper layer driver.
- * @host: This parameter is a pointer to the host on which the the request
- *    should be queued (either as an error or success).
- * @request: This parameter is the completed request.
- * @response: This parameter is the response code for the completed task.
- * @status: This parameter is the status code for the completed task.
- *
- * none.
- */
-static void isci_task_save_for_upper_layer_completion(
-	struct isci_host *host,
-	struct isci_request *request,
-	enum service_response response,
-	enum exec_status status,
-	enum isci_completion_selection task_notification_selection)
-{
-	struct sas_task *task = isci_request_access_task(request);
-
-	task_notification_selection
-		= isci_task_set_completion_status(task, response, status,
-						  task_notification_selection);
-
-	/* Tasks aborted specifically by a call to the lldd_abort_task
-	 * function should not be completed to the host in the regular path.
-	 */
-	switch (task_notification_selection) {
-
-	case isci_perform_normal_io_completion:
-		/* Normal notification (task_done) */
-
-		/* Add to the completed list. */
-		list_add(&request->completed_node,
-			 &host->requests_to_complete);
-
-		/* Take the request off the device's pending request list. */
-		list_del_init(&request->dev_node);
-		break;
-
-	case isci_perform_aborted_io_completion:
-		/* No notification to libsas because this request is
-		 * already in the abort path.
-		 */
-		/* Wake up whatever process was waiting for this
-		 * request to complete.
-		 */
-		WARN_ON(request->io_request_completion == NULL);
-
-		if (request->io_request_completion != NULL) {
-
-			/* Signal whoever is waiting that this
-			* request is complete.
-			*/
-			complete(request->io_request_completion);
-		}
-		break;
-
-	case isci_perform_error_io_completion:
-		/* Use sas_task_abort */
-		/* Add to the aborted list. */
-		list_add(&request->completed_node,
-			 &host->requests_to_errorback);
-		break;
-
-	default:
-		/* Add to the error to libsas list. */
-		list_add(&request->completed_node,
-			 &host->requests_to_errorback);
-		break;
-	}
-	dev_dbg(&host->pdev->dev,
-		"%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
-		__func__, task_notification_selection, task,
-		(task) ? task->task_status.resp : 0, response,
-		(task) ? task->task_status.stat : 0, status);
-}
-
 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
 {
 	struct task_status_struct *ts = &task->task_status;
@@ -2715,295 +2746,164 @@
 	struct isci_remote_device *idev = request->target_device;
 	enum service_response response = SAS_TASK_UNDELIVERED;
 	enum exec_status status = SAS_ABORTED_TASK;
-	enum isci_request_status request_status;
-	enum isci_completion_selection complete_to_host
-		= isci_perform_normal_io_completion;
 
 	dev_dbg(&ihost->pdev->dev,
-		"%s: request = %p, task = %p,\n"
+		"%s: request = %p, task = %p, "
 		"task->data_dir = %d completion_status = 0x%x\n",
-		__func__,
-		request,
-		task,
-		task->data_dir,
-		completion_status);
+		__func__, request, task, task->data_dir, completion_status);
 
-	spin_lock(&request->state_lock);
-	request_status = request->status;
+	/* The request is done from an SCU HW perspective. */
 
-	/* Decode the request status.  Note that if the request has been
-	 * aborted by a task management function, we don't care
-	 * what the status is.
-	 */
-	switch (request_status) {
+	/* This is an active request being completed from the core. */
+	switch (completion_status) {
 
-	case aborted:
-		/* "aborted" indicates that the request was aborted by a task
-		 * management function, since once a task management request is
-		 * perfomed by the device, the request only completes because
-		 * of the subsequent driver terminate.
-		 *
-		 * Aborted also means an external thread is explicitly managing
-		 * this request, so that we do not complete it up the stack.
-		 *
-		 * The target is still there (since the TMF was successful).
-		 */
+	case SCI_IO_FAILURE_RESPONSE_VALID:
+		dev_dbg(&ihost->pdev->dev,
+			"%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+			__func__, request, task);
+
+		if (sas_protocol_ata(task->task_proto)) {
+			isci_process_stp_response(task, &request->stp.rsp);
+		} else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+			/* crack the iu response buffer. */
+			resp_iu = &request->ssp.rsp;
+			isci_request_process_response_iu(task, resp_iu,
+							 &ihost->pdev->dev);
+
+		} else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+			dev_err(&ihost->pdev->dev,
+				"%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+					"SAS_PROTOCOL_SMP protocol\n",
+				__func__);
+
+		} else
+			dev_err(&ihost->pdev->dev,
+				"%s: unknown protocol\n", __func__);
+
+		/* use the task status set in the task struct by the
+		* isci_request_process_response_iu call.
+		*/
 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		response = task->task_status.resp;
+		status = task->task_status.stat;
+		break;
+
+	case SCI_IO_SUCCESS:
+	case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
 		response = SAS_TASK_COMPLETE;
+		status   = SAM_STAT_GOOD;
+		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+		if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+
+			/* This was an SSP / STP / SATA transfer.
+			* There is a possibility that less data than
+			* the maximum was transferred.
+			*/
+			u32 transferred_length = sci_req_tx_bytes(request);
+
+			task->task_status.residual
+				= task->total_xfer_len - transferred_length;
+
+			/* If there were residual bytes, call this an
+			* underrun.
+			*/
+			if (task->task_status.residual != 0)
+				status = SAS_DATA_UNDERRUN;
+
+			dev_dbg(&ihost->pdev->dev,
+				"%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+				__func__, status);
+
+		} else
+			dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
+				__func__);
+		break;
+
+	case SCI_IO_FAILURE_TERMINATED:
+
+		dev_dbg(&ihost->pdev->dev,
+			"%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+			__func__, request, task);
+
+		/* The request was terminated explicitly. */
+		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		response = SAS_TASK_UNDELIVERED;
 
 		/* See if the device has been/is being stopped. Note
-		 * that we ignore the quiesce state, since we are
-		 * concerned about the actual device state.
-		 */
+		* that we ignore the quiesce state, since we are
+		* concerned about the actual device state.
+		*/
+		if (!idev)
+			status = SAS_DEVICE_UNKNOWN;
+		else
+			status = SAS_ABORTED_TASK;
+		break;
+
+	case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
+
+		isci_request_handle_controller_specific_errors(idev, request,
+							       task, &response,
+							       &status);
+		break;
+
+	case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+		/* This is a special case, in that the I/O completion
+		* is telling us that the device needs a reset.
+		* In order for the device reset condition to be
+		* noticed, the I/O has to be handled in the error
+		* handler.  Set the reset flag and cause the
+		* SCSI error thread to be scheduled.
+		*/
+		spin_lock_irqsave(&task->task_state_lock, task_flags);
+		task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+		spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+		/* Fail the I/O. */
+		response = SAS_TASK_UNDELIVERED;
+		status = SAM_STAT_TASK_ABORTED;
+
+		clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+		break;
+
+	case SCI_FAILURE_RETRY_REQUIRED:
+
+		/* Fail the I/O so it can be retried. */
+		response = SAS_TASK_UNDELIVERED;
 		if (!idev)
 			status = SAS_DEVICE_UNKNOWN;
 		else
 			status = SAS_ABORTED_TASK;
 
-		complete_to_host = isci_perform_aborted_io_completion;
-		/* This was an aborted request. */
-
-		spin_unlock(&request->state_lock);
-		break;
-
-	case aborting:
-		/* aborting means that the task management function tried and
-		 * failed to abort the request. We need to note the request
-		 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
-		 * target as down.
-		 *
-		 * Aborting also means an external thread is explicitly managing
-		 * this request, so that we do not complete it up the stack.
-		 */
 		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-		response = SAS_TASK_UNDELIVERED;
-
-		if (!idev)
-			/* The device has been /is being stopped. Note that
-			 * we ignore the quiesce state, since we are
-			 * concerned about the actual device state.
-			 */
-			status = SAS_DEVICE_UNKNOWN;
-		else
-			status = SAS_PHY_DOWN;
-
-		complete_to_host = isci_perform_aborted_io_completion;
-
-		/* This was an aborted request. */
-
-		spin_unlock(&request->state_lock);
 		break;
 
-	case terminating:
-
-		/* This was an terminated request.  This happens when
-		 * the I/O is being terminated because of an action on
-		 * the device (reset, tear down, etc.), and the I/O needs
-		 * to be completed up the stack.
-		 */
-		set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-		response = SAS_TASK_UNDELIVERED;
-
-		/* See if the device has been/is being stopped. Note
-		 * that we ignore the quiesce state, since we are
-		 * concerned about the actual device state.
-		 */
-		if (!idev)
-			status = SAS_DEVICE_UNKNOWN;
-		else
-			status = SAS_ABORTED_TASK;
-
-		complete_to_host = isci_perform_aborted_io_completion;
-
-		/* This was a terminated request. */
-
-		spin_unlock(&request->state_lock);
-		break;
-
-	case dead:
-		/* This was a terminated request that timed-out during the
-		 * termination process.  There is no task to complete to
-		 * libsas.
-		 */
-		complete_to_host = isci_perform_normal_io_completion;
-		spin_unlock(&request->state_lock);
-		break;
 
 	default:
+		/* Catch any otherwise unhandled error codes here. */
+		dev_dbg(&ihost->pdev->dev,
+			"%s: invalid completion code: 0x%x - "
+				"isci_request = %p\n",
+			__func__, completion_status, request);
 
-		/* The request is done from an SCU HW perspective. */
-		request->status = completed;
+		response = SAS_TASK_UNDELIVERED;
 
-		spin_unlock(&request->state_lock);
+		/* See if the device has been/is being stopped. Note
+		* that we ignore the quiesce state, since we are
+		* concerned about the actual device state.
+		*/
+		if (!idev)
+			status = SAS_DEVICE_UNKNOWN;
+		else
+			status = SAS_ABORTED_TASK;
 
-		/* This is an active request being completed from the core. */
-		switch (completion_status) {
-
-		case SCI_IO_FAILURE_RESPONSE_VALID:
-			dev_dbg(&ihost->pdev->dev,
-				"%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
-				__func__,
-				request,
-				task);
-
-			if (sas_protocol_ata(task->task_proto)) {
-				isci_process_stp_response(task, &request->stp.rsp);
-			} else if (SAS_PROTOCOL_SSP == task->task_proto) {
-
-				/* crack the iu response buffer. */
-				resp_iu = &request->ssp.rsp;
-				isci_request_process_response_iu(task, resp_iu,
-								 &ihost->pdev->dev);
-
-			} else if (SAS_PROTOCOL_SMP == task->task_proto) {
-
-				dev_err(&ihost->pdev->dev,
-					"%s: SCI_IO_FAILURE_RESPONSE_VALID: "
-					"SAS_PROTOCOL_SMP protocol\n",
-					__func__);
-
-			} else
-				dev_err(&ihost->pdev->dev,
-					"%s: unknown protocol\n", __func__);
-
-			/* use the task status set in the task struct by the
-			 * isci_request_process_response_iu call.
-			 */
+		if (SAS_PROTOCOL_SMP == task->task_proto)
 			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-			response = task->task_status.resp;
-			status = task->task_status.stat;
-			break;
-
-		case SCI_IO_SUCCESS:
-		case SCI_IO_SUCCESS_IO_DONE_EARLY:
-
-			response = SAS_TASK_COMPLETE;
-			status   = SAM_STAT_GOOD;
-			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
-			if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
-
-				/* This was an SSP / STP / SATA transfer.
-				 * There is a possibility that less data than
-				 * the maximum was transferred.
-				 */
-				u32 transferred_length = sci_req_tx_bytes(request);
-
-				task->task_status.residual
-					= task->total_xfer_len - transferred_length;
-
-				/* If there were residual bytes, call this an
-				 * underrun.
-				 */
-				if (task->task_status.residual != 0)
-					status = SAS_DATA_UNDERRUN;
-
-				dev_dbg(&ihost->pdev->dev,
-					"%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
-					__func__,
-					status);
-
-			} else
-				dev_dbg(&ihost->pdev->dev,
-					"%s: SCI_IO_SUCCESS\n",
-					__func__);
-
-			break;
-
-		case SCI_IO_FAILURE_TERMINATED:
-			dev_dbg(&ihost->pdev->dev,
-				"%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
-				__func__,
-				request,
-				task);
-
-			/* The request was terminated explicitly.  No handling
-			 * is needed in the SCSI error handler path.
-			 */
-			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-			response = SAS_TASK_UNDELIVERED;
-
-			/* See if the device has been/is being stopped. Note
-			 * that we ignore the quiesce state, since we are
-			 * concerned about the actual device state.
-			 */
-			if (!idev)
-				status = SAS_DEVICE_UNKNOWN;
-			else
-				status = SAS_ABORTED_TASK;
-
-			complete_to_host = isci_perform_normal_io_completion;
-			break;
-
-		case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
-
-			isci_request_handle_controller_specific_errors(
-				idev, request, task, &response, &status,
-				&complete_to_host);
-
-			break;
-
-		case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
-			/* This is a special case, in that the I/O completion
-			 * is telling us that the device needs a reset.
-			 * In order for the device reset condition to be
-			 * noticed, the I/O has to be handled in the error
-			 * handler.  Set the reset flag and cause the
-			 * SCSI error thread to be scheduled.
-			 */
-			spin_lock_irqsave(&task->task_state_lock, task_flags);
-			task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
-			spin_unlock_irqrestore(&task->task_state_lock, task_flags);
-
-			/* Fail the I/O. */
-			response = SAS_TASK_UNDELIVERED;
-			status = SAM_STAT_TASK_ABORTED;
-
-			complete_to_host = isci_perform_error_io_completion;
+		else
 			clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-			break;
-
-		case SCI_FAILURE_RETRY_REQUIRED:
-
-			/* Fail the I/O so it can be retried. */
-			response = SAS_TASK_UNDELIVERED;
-			if (!idev)
-				status = SAS_DEVICE_UNKNOWN;
-			else
-				status = SAS_ABORTED_TASK;
-
-			complete_to_host = isci_perform_normal_io_completion;
-			set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-			break;
-
-
-		default:
-			/* Catch any otherwise unhandled error codes here. */
-			dev_dbg(&ihost->pdev->dev,
-				 "%s: invalid completion code: 0x%x - "
-				 "isci_request = %p\n",
-				 __func__, completion_status, request);
-
-			response = SAS_TASK_UNDELIVERED;
-
-			/* See if the device has been/is being stopped. Note
-			 * that we ignore the quiesce state, since we are
-			 * concerned about the actual device state.
-			 */
-			if (!idev)
-				status = SAS_DEVICE_UNKNOWN;
-			else
-				status = SAS_ABORTED_TASK;
-
-			if (SAS_PROTOCOL_SMP == task->task_proto) {
-				set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-				complete_to_host = isci_perform_normal_io_completion;
-			} else {
-				clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-				complete_to_host = isci_perform_error_io_completion;
-			}
-			break;
-		}
 		break;
 	}
 
@@ -3038,10 +2938,18 @@
 		break;
 	}
 
-	/* Put the completed request on the correct list */
-	isci_task_save_for_upper_layer_completion(ihost, request, response,
-						  status, complete_to_host
-						  );
+	spin_lock_irqsave(&task->task_state_lock, task_flags);
+
+	task->task_status.resp = response;
+	task->task_status.stat = status;
+
+	if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
+		/* Normal notification (task_done) */
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+					    SAS_TASK_STATE_PENDING);
+	}
+	spin_unlock_irqrestore(&task->task_state_lock, task_flags);
 
 	/* complete the io request to the core. */
 	sci_controller_complete_io(ihost, request->target_device, request);
@@ -3051,6 +2959,8 @@
 	 * task to recognize the already completed case.
 	 */
 	set_bit(IREQ_TERMINATED, &request->flags);
+
+	ireq_done(ihost, request, task);
 }
 
 static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
@@ -3169,7 +3079,7 @@
 	sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
 
 	ireq->target_device = idev;
-	ireq->protocol = SCIC_NO_PROTOCOL;
+	ireq->protocol = SAS_PROTOCOL_NONE;
 	ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
 
 	ireq->sci_status   = SCI_SUCCESS;
@@ -3193,7 +3103,7 @@
 
 	if (dev->dev_type == SAS_END_DEV)
 		/* pass */;
-	else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+	else if (dev_is_sata(dev))
 		memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
 	else if (dev_is_expander(dev))
 		/* pass */;
@@ -3215,10 +3125,15 @@
 	/* Build the common part of the request */
 	sci_general_request_construct(ihost, idev, ireq);
 
-	if (dev->dev_type == SAS_END_DEV ||
-	    dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+	if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
 		set_bit(IREQ_TMF, &ireq->flags);
 		memset(ireq->tc, 0, sizeof(struct scu_task_context));
+
+		/* Set the protocol indicator. */
+		if (dev_is_sata(dev))
+			ireq->protocol = SAS_PROTOCOL_STP;
+		else
+			ireq->protocol = SAS_PROTOCOL_SSP;
 	} else
 		status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
 
@@ -3311,7 +3226,7 @@
 	if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
 		return SCI_FAILURE;
 
-	ireq->protocol = SCIC_SMP_PROTOCOL;
+	ireq->protocol = SAS_PROTOCOL_SMP;
 
 	/* byte swap the smp request. */
 
@@ -3496,9 +3411,6 @@
 	ireq->io_request_completion = NULL;
 	ireq->flags = 0;
 	ireq->num_sg_entries = 0;
-	INIT_LIST_HEAD(&ireq->completed_node);
-	INIT_LIST_HEAD(&ireq->dev_node);
-	isci_request_change_state(ireq, allocated);
 
 	return ireq;
 }
@@ -3582,26 +3494,15 @@
 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 		return status;
 	}
-
 	/* Either I/O started OK, or the core has signaled that
 	 * the device needs a target reset.
-	 *
-	 * In either case, hold onto the I/O for later.
-	 *
-	 * Update it's status and add it to the list in the
-	 * remote device object.
 	 */
-	list_add(&ireq->dev_node, &idev->reqs_in_process);
-
-	if (status == SCI_SUCCESS) {
-		isci_request_change_state(ireq, started);
-	} else {
+	if (status != SCI_SUCCESS) {
 		/* The request did not really start in the
 		 * hardware, so clear the request handle
 		 * here so no terminations will be done.
 		 */
 		set_bit(IREQ_TERMINATED, &ireq->flags);
-		isci_request_change_state(ireq, completed);
 	}
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 057f237..aff9531 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -61,30 +61,6 @@
 #include "scu_task_context.h"
 
 /**
- * struct isci_request_status - This enum defines the possible states of an I/O
- *    request.
- *
- *
- */
-enum isci_request_status {
-	unallocated = 0x00,
-	allocated   = 0x01,
-	started     = 0x02,
-	completed   = 0x03,
-	aborting    = 0x04,
-	aborted     = 0x05,
-	terminating = 0x06,
-	dead        = 0x07
-};
-
-enum sci_request_protocol {
-	SCIC_NO_PROTOCOL,
-	SCIC_SMP_PROTOCOL,
-	SCIC_SSP_PROTOCOL,
-	SCIC_STP_PROTOCOL
-}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
-
-/**
  * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
  * @pio_len - number of bytes requested at PIO setup
  * @status - pio setup ending status value to tell us if we need
@@ -104,11 +80,14 @@
 };
 
 struct isci_request {
-	enum isci_request_status status;
 	#define IREQ_COMPLETE_IN_TARGET 0
 	#define IREQ_TERMINATED 1
 	#define IREQ_TMF 2
 	#define IREQ_ACTIVE 3
+	#define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
+	#define IREQ_TC_ABORT_POSTED 5
+	#define IREQ_ABORT_PATH_ACTIVE 6
+	#define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */
 	unsigned long flags;
 	/* XXX kill ttype and ttype_ptr, allocate full sas_task */
 	union ttype_ptr_union {
@@ -116,11 +95,6 @@
 		struct isci_tmf *tmf_task_ptr;  /* When ttype==tmf_task */
 	} ttype_ptr;
 	struct isci_host *isci_host;
-	/* For use in the requests_to_{complete|abort} lists: */
-	struct list_head completed_node;
-	/* For use in the reqs_in_process list: */
-	struct list_head dev_node;
-	spinlock_t state_lock;
 	dma_addr_t request_daddr;
 	dma_addr_t zero_scatter_daddr;
 	unsigned int num_sg_entries;
@@ -140,7 +114,7 @@
 	struct isci_host *owning_controller;
 	struct isci_remote_device *target_device;
 	u16 io_tag;
-	enum sci_request_protocol protocol;
+	enum sas_protocol protocol;
 	u32 scu_status; /* hardware result */
 	u32 sci_status; /* upper layer disposition */
 	u32 post_context;
@@ -309,92 +283,6 @@
 	return ireq->request_daddr + (requested_addr - base_addr);
 }
 
-/**
- * isci_request_change_state() - This function sets the status of the request
- *    object.
- * @request: This parameter points to the isci_request object
- * @status: This Parameter is the new status of the object
- *
- */
-static inline enum isci_request_status
-isci_request_change_state(struct isci_request *isci_request,
-			  enum isci_request_status status)
-{
-	enum isci_request_status old_state;
-	unsigned long flags;
-
-	dev_dbg(&isci_request->isci_host->pdev->dev,
-		"%s: isci_request = %p, state = 0x%x\n",
-		__func__,
-		isci_request,
-		status);
-
-	BUG_ON(isci_request == NULL);
-
-	spin_lock_irqsave(&isci_request->state_lock, flags);
-	old_state = isci_request->status;
-	isci_request->status = status;
-	spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
-	return old_state;
-}
-
-/**
- * isci_request_change_started_to_newstate() - This function sets the status of
- *    the request object.
- * @request: This parameter points to the isci_request object
- * @status: This Parameter is the new status of the object
- *
- * state previous to any change.
- */
-static inline enum isci_request_status
-isci_request_change_started_to_newstate(struct isci_request *isci_request,
-					struct completion *completion_ptr,
-					enum isci_request_status newstate)
-{
-	enum isci_request_status old_state;
-	unsigned long flags;
-
-	spin_lock_irqsave(&isci_request->state_lock, flags);
-
-	old_state = isci_request->status;
-
-	if (old_state == started || old_state == aborting) {
-		BUG_ON(isci_request->io_request_completion != NULL);
-
-		isci_request->io_request_completion = completion_ptr;
-		isci_request->status = newstate;
-	}
-
-	spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
-	dev_dbg(&isci_request->isci_host->pdev->dev,
-		"%s: isci_request = %p, old_state = 0x%x\n",
-		__func__,
-		isci_request,
-		old_state);
-
-	return old_state;
-}
-
-/**
- * isci_request_change_started_to_aborted() - This function sets the status of
- *    the request object.
- * @request: This parameter points to the isci_request object
- * @completion_ptr: This parameter is saved as the kernel completion structure
- *    signalled when the old request completes.
- *
- * state previous to any change.
- */
-static inline enum isci_request_status
-isci_request_change_started_to_aborted(struct isci_request *isci_request,
-				       struct completion *completion_ptr)
-{
-	return isci_request_change_started_to_newstate(isci_request,
-						       completion_ptr,
-						       aborted);
-}
-
 #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
 
 #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
@@ -404,8 +292,6 @@
 					       u16 tag);
 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
 			 struct sas_task *task, u16 tag);
-void isci_terminate_pending_requests(struct isci_host *ihost,
-				     struct isci_remote_device *idev);
 enum sci_status
 sci_task_request_construct(struct isci_host *ihost,
 			    struct isci_remote_device *idev,
@@ -421,5 +307,4 @@
 		task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
 
 }
-
 #endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
index c8b329c..071cb74 100644
--- a/drivers/scsi/isci/scu_completion_codes.h
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -224,6 +224,7 @@
  * 32-bit value like we want, each immediate value must be cast to a u32.
  */
 #define SCU_TASK_DONE_GOOD                                  ((u32)0x00)
+#define SCU_TASK_DONE_TX_RAW_CMD_ERR                        ((u32)0x08)
 #define SCU_TASK_DONE_CRC_ERR                               ((u32)0x14)
 #define SCU_TASK_DONE_CHECK_RESPONSE                        ((u32)0x14)
 #define SCU_TASK_DONE_GEN_RESPONSE                          ((u32)0x15)
@@ -237,6 +238,7 @@
 #define SCU_TASK_DONE_LL_LF_TERM                            ((u32)0x1A)
 #define SCU_TASK_DONE_DATA_LEN_ERR                          ((u32)0x1A)
 #define SCU_TASK_DONE_LL_CL_TERM                            ((u32)0x1B)
+#define SCU_TASK_DONE_BREAK_RCVD                            ((u32)0x1B)
 #define SCU_TASK_DONE_LL_ABORT_ERR                          ((u32)0x1B)
 #define SCU_TASK_DONE_SEQ_INV_TYPE                          ((u32)0x1C)
 #define SCU_TASK_DONE_UNEXP_XR                              ((u32)0x1C)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 374254e..6bc74eb 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -78,54 +78,25 @@
 			     enum exec_status status)
 
 {
-	enum isci_completion_selection disposition;
+	unsigned long flags;
 
-	disposition = isci_perform_normal_io_completion;
-	disposition = isci_task_set_completion_status(task, response, status,
-						      disposition);
+	/* Normal notification (task_done) */
+	dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
+		__func__, task, response, status);
 
-	/* Tasks aborted specifically by a call to the lldd_abort_task
-	 * function should not be completed to the host in the regular path.
-	 */
-	switch (disposition) {
-	case isci_perform_normal_io_completion:
-		/* Normal notification (task_done) */
-		dev_dbg(&ihost->pdev->dev,
-			"%s: Normal - task = %p, response=%d, "
-			"status=%d\n",
-			__func__, task, response, status);
+	spin_lock_irqsave(&task->task_state_lock, flags);
 
-		task->lldd_task = NULL;
-		task->task_done(task);
-		break;
+	task->task_status.resp = response;
+	task->task_status.stat = status;
 
-	case isci_perform_aborted_io_completion:
-		/*
-		 * No notification because this request is already in the
-		 * abort path.
-		 */
-		dev_dbg(&ihost->pdev->dev,
-			"%s: Aborted - task = %p, response=%d, "
-			"status=%d\n",
-			__func__, task, response, status);
-		break;
+	/* Normal notification (task_done) */
+	task->task_state_flags |= SAS_TASK_STATE_DONE;
+	task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+				    SAS_TASK_STATE_PENDING);
+	task->lldd_task = NULL;
+	spin_unlock_irqrestore(&task->task_state_lock, flags);
 
-	case isci_perform_error_io_completion:
-		/* Use sas_task_abort */
-		dev_dbg(&ihost->pdev->dev,
-			"%s: Error - task = %p, response=%d, "
-			"status=%d\n",
-			__func__, task, response, status);
-		sas_task_abort(task);
-		break;
-
-	default:
-		dev_dbg(&ihost->pdev->dev,
-			"%s: isci task notification default case!",
-			__func__);
-		sas_task_abort(task);
-		break;
-	}
+	task->task_done(task);
 }
 
 #define for_each_sas_task(num, task) \
@@ -289,60 +260,6 @@
 	return ireq;
 }
 
-/**
-* isci_request_mark_zombie() - This function must be called with scic_lock held.
-*/
-static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
-{
-	struct completion *tmf_completion = NULL;
-	struct completion *req_completion;
-
-	/* Set the request state to "dead". */
-	ireq->status = dead;
-
-	req_completion = ireq->io_request_completion;
-	ireq->io_request_completion = NULL;
-
-	if (test_bit(IREQ_TMF, &ireq->flags)) {
-		/* Break links with the TMF request. */
-		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
-
-		/* In the case where a task request is dying,
-		 * the thread waiting on the complete will sit and
-		 * timeout unless we wake it now.  Since the TMF
-		 * has a default error status, complete it here
-		 * to wake the waiting thread.
-		 */
-		if (tmf) {
-			tmf_completion = tmf->complete;
-			tmf->complete = NULL;
-		}
-		ireq->ttype_ptr.tmf_task_ptr = NULL;
-		dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
-			__func__, tmf->tmf_code, tmf->io_tag);
-	} else {
-		/* Break links with the sas_task - the callback is done
-		 * elsewhere.
-		 */
-		struct sas_task *task = isci_request_access_task(ireq);
-
-		if (task)
-			task->lldd_task = NULL;
-
-		ireq->ttype_ptr.io_task_ptr = NULL;
-	}
-
-	dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
-		 ireq->io_tag);
-
-	/* Don't force waiting threads to timeout. */
-	if (req_completion)
-		complete(req_completion);
-
-	if (tmf_completion != NULL)
-		complete(tmf_completion);
-}
-
 static int isci_task_execute_tmf(struct isci_host *ihost,
 				 struct isci_remote_device *idev,
 				 struct isci_tmf *tmf, unsigned long timeout_ms)
@@ -400,17 +317,11 @@
 		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 		goto err_tci;
 	}
-
-	if (tmf->cb_state_func != NULL)
-		tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
-
-	isci_request_change_state(ireq, started);
-
-	/* add the request to the remote device request list. */
-	list_add(&ireq->dev_node, &idev->reqs_in_process);
-
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
+	/* The RNC must be unsuspended before the TMF can get a response. */
+	isci_remote_device_resume_from_abort(ihost, idev);
+
 	/* Wait for the TMF to complete, or a timeout. */
 	timeleft = wait_for_completion_timeout(&completion,
 					       msecs_to_jiffies(timeout_ms));
@@ -419,32 +330,7 @@
 		/* The TMF did not complete - this could be because
 		 * of an unplug.  Terminate the TMF request now.
 		 */
-		spin_lock_irqsave(&ihost->scic_lock, flags);
-
-		if (tmf->cb_state_func != NULL)
-			tmf->cb_state_func(isci_tmf_timed_out, tmf,
-					   tmf->cb_data);
-
-		sci_controller_terminate_request(ihost, idev, ireq);
-
-		spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
-		timeleft = wait_for_completion_timeout(
-			&completion,
-			msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
-
-		if (!timeleft) {
-			/* Strange condition - the termination of the TMF
-			 * request timed-out.
-			 */
-			spin_lock_irqsave(&ihost->scic_lock, flags);
-
-			/* If the TMF status has not changed, kill it. */
-			if (tmf->status == SCI_FAILURE_TIMEOUT)
-				isci_request_mark_zombie(ihost, ireq);
-
-			spin_unlock_irqrestore(&ihost->scic_lock, flags);
-		}
+		isci_remote_device_suspend_terminate(ihost, idev, ireq);
 	}
 
 	isci_print_tmf(ihost, tmf);
@@ -476,315 +362,21 @@
 }
 
 static void isci_task_build_tmf(struct isci_tmf *tmf,
-				enum isci_tmf_function_codes code,
-				void (*tmf_sent_cb)(enum isci_tmf_cb_state,
-						    struct isci_tmf *,
-						    void *),
-				void *cb_data)
+				enum isci_tmf_function_codes code)
 {
 	memset(tmf, 0, sizeof(*tmf));
-
-	tmf->tmf_code      = code;
-	tmf->cb_state_func = tmf_sent_cb;
-	tmf->cb_data       = cb_data;
+	tmf->tmf_code = code;
 }
 
 static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
 					   enum isci_tmf_function_codes code,
-					   void (*tmf_sent_cb)(enum isci_tmf_cb_state,
-							       struct isci_tmf *,
-							       void *),
 					   struct isci_request *old_request)
 {
-	isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
+	isci_task_build_tmf(tmf, code);
 	tmf->io_tag = old_request->io_tag;
 }
 
 /**
- * isci_task_validate_request_to_abort() - This function checks the given I/O
- *    against the "started" state.  If the request is still "started", it's
- *    state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
- *    BEFORE CALLING THIS FUNCTION.
- * @isci_request: This parameter specifies the request object to control.
- * @isci_host: This parameter specifies the ISCI host object
- * @isci_device: This is the device to which the request is pending.
- * @aborted_io_completion: This is a completion structure that will be added to
- *    the request in case it is changed to aborting; this completion is
- *    triggered when the request is fully completed.
- *
- * Either "started" on successful change of the task status to "aborted", or
- * "unallocated" if the task cannot be controlled.
- */
-static enum isci_request_status isci_task_validate_request_to_abort(
-	struct isci_request *isci_request,
-	struct isci_host *isci_host,
-	struct isci_remote_device *isci_device,
-	struct completion *aborted_io_completion)
-{
-	enum isci_request_status old_state = unallocated;
-
-	/* Only abort the task if it's in the
-	 *  device's request_in_process list
-	 */
-	if (isci_request && !list_empty(&isci_request->dev_node)) {
-		old_state = isci_request_change_started_to_aborted(
-			isci_request, aborted_io_completion);
-
-	}
-
-	return old_state;
-}
-
-static int isci_request_is_dealloc_managed(enum isci_request_status stat)
-{
-	switch (stat) {
-	case aborted:
-	case aborting:
-	case terminating:
-	case completed:
-	case dead:
-		return true;
-	default:
-		return false;
-	}
-}
-
-/**
- * isci_terminate_request_core() - This function will terminate the given
- *    request, and wait for it to complete.  This function must only be called
- *    from a thread that can wait.  Note that the request is terminated and
- *    completed (back to the host, if started there).
- * @ihost: This SCU.
- * @idev: The target.
- * @isci_request: The I/O request to be terminated.
- *
- */
-static void isci_terminate_request_core(struct isci_host *ihost,
-					struct isci_remote_device *idev,
-					struct isci_request *isci_request)
-{
-	enum sci_status status      = SCI_SUCCESS;
-	bool was_terminated         = false;
-	bool needs_cleanup_handling = false;
-	unsigned long     flags;
-	unsigned long     termination_completed = 1;
-	struct completion *io_request_completion;
-
-	dev_dbg(&ihost->pdev->dev,
-		"%s: device = %p; request = %p\n",
-		__func__, idev, isci_request);
-
-	spin_lock_irqsave(&ihost->scic_lock, flags);
-
-	io_request_completion = isci_request->io_request_completion;
-
-	/* Note that we are not going to control
-	 * the target to abort the request.
-	 */
-	set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
-
-	/* Make sure the request wasn't just sitting around signalling
-	 * device condition (if the request handle is NULL, then the
-	 * request completed but needed additional handling here).
-	 */
-	if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
-		was_terminated = true;
-		needs_cleanup_handling = true;
-		status = sci_controller_terminate_request(ihost,
-							   idev,
-							   isci_request);
-	}
-	spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
-	/*
-	 * The only time the request to terminate will
-	 * fail is when the io request is completed and
-	 * being aborted.
-	 */
-	if (status != SCI_SUCCESS) {
-		dev_dbg(&ihost->pdev->dev,
-			"%s: sci_controller_terminate_request"
-			" returned = 0x%x\n",
-			__func__, status);
-
-		isci_request->io_request_completion = NULL;
-
-	} else {
-		if (was_terminated) {
-			dev_dbg(&ihost->pdev->dev,
-				"%s: before completion wait (%p/%p)\n",
-				__func__, isci_request, io_request_completion);
-
-			/* Wait here for the request to complete. */
-			termination_completed
-				= wait_for_completion_timeout(
-				   io_request_completion,
-				   msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
-
-			if (!termination_completed) {
-
-				/* The request to terminate has timed out.  */
-				spin_lock_irqsave(&ihost->scic_lock, flags);
-
-				/* Check for state changes. */
-				if (!test_bit(IREQ_TERMINATED,
-					      &isci_request->flags)) {
-
-					/* The best we can do is to have the
-					 * request die a silent death if it
-					 * ever really completes.
-					 */
-					isci_request_mark_zombie(ihost,
-								 isci_request);
-					needs_cleanup_handling = true;
-				} else
-					termination_completed = 1;
-
-				spin_unlock_irqrestore(&ihost->scic_lock,
-						       flags);
-
-				if (!termination_completed) {
-
-					dev_dbg(&ihost->pdev->dev,
-						"%s: *** Timeout waiting for "
-						"termination(%p/%p)\n",
-						__func__, io_request_completion,
-						isci_request);
-
-					/* The request can no longer be referenced
-					 * safely since it may go away if the
-					 * termination every really does complete.
-					 */
-					isci_request = NULL;
-				}
-			}
-			if (termination_completed)
-				dev_dbg(&ihost->pdev->dev,
-					"%s: after completion wait (%p/%p)\n",
-					__func__, isci_request, io_request_completion);
-		}
-
-		if (termination_completed) {
-
-			isci_request->io_request_completion = NULL;
-
-			/* Peek at the status of the request.  This will tell
-			 * us if there was special handling on the request such that it
-			 * needs to be detached and freed here.
-			 */
-			spin_lock_irqsave(&isci_request->state_lock, flags);
-
-			needs_cleanup_handling
-				= isci_request_is_dealloc_managed(
-					isci_request->status);
-
-			spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
-		}
-		if (needs_cleanup_handling) {
-
-			dev_dbg(&ihost->pdev->dev,
-				"%s: cleanup isci_device=%p, request=%p\n",
-				__func__, idev, isci_request);
-
-			if (isci_request != NULL) {
-				spin_lock_irqsave(&ihost->scic_lock, flags);
-				isci_free_tag(ihost, isci_request->io_tag);
-				isci_request_change_state(isci_request, unallocated);
-				list_del_init(&isci_request->dev_node);
-				spin_unlock_irqrestore(&ihost->scic_lock, flags);
-			}
-		}
-	}
-}
-
-/**
- * isci_terminate_pending_requests() - This function will change the all of the
- *    requests on the given device's state to "aborting", will terminate the
- *    requests, and wait for them to complete.  This function must only be
- *    called from a thread that can wait.  Note that the requests are all
- *    terminated and completed (back to the host, if started there).
- * @isci_host: This parameter specifies SCU.
- * @idev: This parameter specifies the target.
- *
- */
-void isci_terminate_pending_requests(struct isci_host *ihost,
-				     struct isci_remote_device *idev)
-{
-	struct completion request_completion;
-	enum isci_request_status old_state;
-	unsigned long flags;
-	LIST_HEAD(list);
-
-	spin_lock_irqsave(&ihost->scic_lock, flags);
-	list_splice_init(&idev->reqs_in_process, &list);
-
-	/* assumes that isci_terminate_request_core deletes from the list */
-	while (!list_empty(&list)) {
-		struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
-
-		/* Change state to "terminating" if it is currently
-		 * "started".
-		 */
-		old_state = isci_request_change_started_to_newstate(ireq,
-								    &request_completion,
-								    terminating);
-		switch (old_state) {
-		case started:
-		case completed:
-		case aborting:
-			break;
-		default:
-			/* termination in progress, or otherwise dispositioned.
-			 * We know the request was on 'list' so should be safe
-			 * to move it back to reqs_in_process
-			 */
-			list_move(&ireq->dev_node, &idev->reqs_in_process);
-			ireq = NULL;
-			break;
-		}
-
-		if (!ireq)
-			continue;
-		spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
-		init_completion(&request_completion);
-
-		dev_dbg(&ihost->pdev->dev,
-			 "%s: idev=%p request=%p; task=%p old_state=%d\n",
-			 __func__, idev, ireq,
-			(!test_bit(IREQ_TMF, &ireq->flags)
-				? isci_request_access_task(ireq)
-				: NULL),
-			old_state);
-
-		/* If the old_state is started:
-		 * This request was not already being aborted. If it had been,
-		 * then the aborting I/O (ie. the TMF request) would not be in
-		 * the aborting state, and thus would be terminated here.  Note
-		 * that since the TMF completion's call to the kernel function
-		 * "complete()" does not happen until the pending I/O request
-		 * terminate fully completes, we do not have to implement a
-		 * special wait here for already aborting requests - the
-		 * termination of the TMF request will force the request
-		 * to finish it's already started terminate.
-		 *
-		 * If old_state == completed:
-		 * This request completed from the SCU hardware perspective
-		 * and now just needs cleaning up in terms of freeing the
-		 * request and potentially calling up to libsas.
-		 *
-		 * If old_state == aborting:
-		 * This request has already gone through a TMF timeout, but may
-		 * not have been terminated; needs cleaning up at least.
-		 */
-		isci_terminate_request_core(ihost, idev, ireq);
-		spin_lock_irqsave(&ihost->scic_lock, flags);
-	}
-	spin_unlock_irqrestore(&ihost->scic_lock, flags);
-}
-
-/**
  * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
  *    Template functions.
  * @lun: This parameter specifies the lun to be reset.
@@ -807,7 +399,7 @@
 	 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
 	 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
 	 */
-	isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
+	isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
 
 	#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
 	ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
@@ -826,42 +418,44 @@
 
 int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
 {
-	struct isci_host *isci_host = dev_to_ihost(dev);
-	struct isci_remote_device *isci_device;
+	struct isci_host *ihost = dev_to_ihost(dev);
+	struct isci_remote_device *idev;
 	unsigned long flags;
-	int ret;
+	int ret = TMF_RESP_FUNC_COMPLETE;
 
-	spin_lock_irqsave(&isci_host->scic_lock, flags);
-	isci_device = isci_lookup_device(dev);
-	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+	idev = isci_get_device(dev->lldd_dev);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-	dev_dbg(&isci_host->pdev->dev,
+	dev_dbg(&ihost->pdev->dev,
 		"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
-		 __func__, dev, isci_host, isci_device);
+		__func__, dev, ihost, idev);
 
-	if (!isci_device) {
-		/* If the device is gone, stop the escalations. */
-		dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
+	if (!idev) {
+		/* If the device is gone, escalate to I_T_Nexus_Reset. */
+		dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
 
-		ret = TMF_RESP_FUNC_COMPLETE;
+		ret = TMF_RESP_FUNC_FAILED;
 		goto out;
 	}
 
-	/* Send the task management part of the reset. */
-	if (dev_is_sata(dev)) {
-		sas_ata_schedule_reset(dev);
-		ret = TMF_RESP_FUNC_COMPLETE;
-	} else
-		ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
-
-	/* If the LUN reset worked, all the I/O can now be terminated. */
-	if (ret == TMF_RESP_FUNC_COMPLETE)
-		/* Terminate all I/O now. */
-		isci_terminate_pending_requests(isci_host,
-						isci_device);
-
+	/* Suspend the RNC, kill all TCs */
+	if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+	    != SCI_SUCCESS) {
+		/* The suspend/terminate only fails if isci_get_device fails */
+		ret = TMF_RESP_FUNC_FAILED;
+		goto out;
+	}
+	/* All pending I/Os have been terminated and cleaned up. */
+	if (!test_bit(IDEV_GONE, &idev->flags)) {
+		if (dev_is_sata(dev))
+			sas_ata_schedule_reset(dev);
+		else
+			/* Send the task management part of the reset. */
+			ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
+	}
  out:
-	isci_put_device(isci_device);
+	isci_put_device(idev);
 	return ret;
 }
 
@@ -882,63 +476,6 @@
 /* Task Management Functions. Must be called from process context.	 */
 
 /**
- * isci_abort_task_process_cb() - This is a helper function for the abort task
- *    TMF command.  It manages the request state with respect to the successful
- *    transmission / completion of the abort task request.
- * @cb_state: This parameter specifies when this function was called - after
- *    the TMF request has been started and after it has timed-out.
- * @tmf: This parameter specifies the TMF in progress.
- *
- *
- */
-static void isci_abort_task_process_cb(
-	enum isci_tmf_cb_state cb_state,
-	struct isci_tmf *tmf,
-	void *cb_data)
-{
-	struct isci_request *old_request;
-
-	old_request = (struct isci_request *)cb_data;
-
-	dev_dbg(&old_request->isci_host->pdev->dev,
-		"%s: tmf=%p, old_request=%p\n",
-		__func__, tmf, old_request);
-
-	switch (cb_state) {
-
-	case isci_tmf_started:
-		/* The TMF has been started.  Nothing to do here, since the
-		 * request state was already set to "aborted" by the abort
-		 * task function.
-		 */
-		if ((old_request->status != aborted)
-			&& (old_request->status != completed))
-			dev_dbg(&old_request->isci_host->pdev->dev,
-				"%s: Bad request status (%d): tmf=%p, old_request=%p\n",
-				__func__, old_request->status, tmf, old_request);
-		break;
-
-	case isci_tmf_timed_out:
-
-		/* Set the task's state to "aborting", since the abort task
-		 * function thread set it to "aborted" (above) in anticipation
-		 * of the task management request working correctly.  Since the
-		 * timeout has now fired, the TMF request failed.  We set the
-		 * state such that the request completion will indicate the
-		 * device is no longer present.
-		 */
-		isci_request_change_state(old_request, aborting);
-		break;
-
-	default:
-		dev_dbg(&old_request->isci_host->pdev->dev,
-			"%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
-			__func__, cb_state, tmf, old_request);
-		break;
-	}
-}
-
-/**
  * isci_task_abort_task() - This function is one of the SAS Domain Template
  *    functions. This function is called by libsas to abort a specified task.
  * @task: This parameter specifies the SAS task to abort.
@@ -947,22 +484,20 @@
  */
 int isci_task_abort_task(struct sas_task *task)
 {
-	struct isci_host *isci_host = dev_to_ihost(task->dev);
+	struct isci_host *ihost = dev_to_ihost(task->dev);
 	DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
 	struct isci_request       *old_request = NULL;
-	enum isci_request_status  old_state;
-	struct isci_remote_device *isci_device = NULL;
+	struct isci_remote_device *idev = NULL;
 	struct isci_tmf           tmf;
 	int                       ret = TMF_RESP_FUNC_FAILED;
 	unsigned long             flags;
-	int                       perform_termination = 0;
 
 	/* Get the isci_request reference from the task.  Note that
 	 * this check does not depend on the pending request list
 	 * in the device, because tasks driving resets may land here
 	 * after completion in the core.
 	 */
-	spin_lock_irqsave(&isci_host->scic_lock, flags);
+	spin_lock_irqsave(&ihost->scic_lock, flags);
 	spin_lock(&task->task_state_lock);
 
 	old_request = task->lldd_task;
@@ -971,20 +506,29 @@
 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
 	    (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
 	    old_request)
-		isci_device = isci_lookup_device(task->dev);
+		idev = isci_get_device(task->dev->lldd_dev);
 
 	spin_unlock(&task->task_state_lock);
-	spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-	dev_dbg(&isci_host->pdev->dev,
-		"%s: dev = %p, task = %p, old_request == %p\n",
-		__func__, isci_device, task, old_request);
+	dev_warn(&ihost->pdev->dev,
+		 "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
+		 __func__, idev,
+		 (dev_is_sata(task->dev) ? "STP/SATA"
+					 : ((dev_is_expander(task->dev))
+						? "SMP"
+						: "SSP")),
+		 ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
+			   ? " IDEV_GONE"
+			   : "")
+			 : " <NULL>"),
+		 task, old_request);
 
 	/* Device reset conditions signalled in task_state_flags are the
 	 * responsbility of libsas to observe at the start of the error
 	 * handler thread.
 	 */
-	if (!isci_device || !old_request) {
+	if (!idev || !old_request) {
 		/* The request has already completed and there
 		* is nothing to do here other than to set the task
 		* done bit, and indicate that the task abort function
@@ -998,108 +542,72 @@
 
 		ret = TMF_RESP_FUNC_COMPLETE;
 
-		dev_dbg(&isci_host->pdev->dev,
-			"%s: abort task not needed for %p\n",
-			__func__, task);
+		dev_warn(&ihost->pdev->dev,
+			 "%s: abort task not needed for %p\n",
+			 __func__, task);
 		goto out;
 	}
-
-	spin_lock_irqsave(&isci_host->scic_lock, flags);
-
-	/* Check the request status and change to "aborted" if currently
-	 * "starting"; if true then set the I/O kernel completion
-	 * struct that will be triggered when the request completes.
-	 */
-	old_state = isci_task_validate_request_to_abort(
-				old_request, isci_host, isci_device,
-				&aborted_io_completion);
-	if ((old_state != started) &&
-	    (old_state != completed) &&
-	    (old_state != aborting)) {
-
-		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
-		/* The request was already being handled by someone else (because
-		* they got to set the state away from started).
-		*/
-		dev_dbg(&isci_host->pdev->dev,
-			"%s:  device = %p; old_request %p already being aborted\n",
-			__func__,
-			isci_device, old_request);
-		ret = TMF_RESP_FUNC_COMPLETE;
+	/* Suspend the RNC, kill the TC */
+	if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
+	    != SCI_SUCCESS) {
+		dev_warn(&ihost->pdev->dev,
+			 "%s: isci_remote_device_reset_terminate(dev=%p, "
+				 "req=%p, task=%p) failed\n",
+			 __func__, idev, old_request, task);
+		ret = TMF_RESP_FUNC_FAILED;
 		goto out;
 	}
+	spin_lock_irqsave(&ihost->scic_lock, flags);
+
 	if (task->task_proto == SAS_PROTOCOL_SMP ||
 	    sas_protocol_ata(task->task_proto) ||
-	    test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+	    test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
+	    test_bit(IDEV_GONE, &idev->flags)) {
 
-		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
-		dev_dbg(&isci_host->pdev->dev,
-			"%s: %s request"
-			" or complete_in_target (%d), thus no TMF\n",
-			__func__,
-			((task->task_proto == SAS_PROTOCOL_SMP)
-				? "SMP"
-				: (sas_protocol_ata(task->task_proto)
-					? "SATA/STP"
-					: "<other>")
-			 ),
-			test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
+		/* No task to send, so explicitly resume the device here */
+		isci_remote_device_resume_from_abort(ihost, idev);
 
-		if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
-			spin_lock_irqsave(&task->task_state_lock, flags);
-			task->task_state_flags |= SAS_TASK_STATE_DONE;
-			task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
-						    SAS_TASK_STATE_PENDING);
-			spin_unlock_irqrestore(&task->task_state_lock, flags);
-			ret = TMF_RESP_FUNC_COMPLETE;
-		} else {
-			spin_lock_irqsave(&task->task_state_lock, flags);
-			task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
-						    SAS_TASK_STATE_PENDING);
-			spin_unlock_irqrestore(&task->task_state_lock, flags);
-		}
+		dev_warn(&ihost->pdev->dev,
+			 "%s: %s request"
+				 " or complete_in_target (%d), "
+				 "or IDEV_GONE (%d), thus no TMF\n",
+			 __func__,
+			 ((task->task_proto == SAS_PROTOCOL_SMP)
+			  ? "SMP"
+			  : (sas_protocol_ata(task->task_proto)
+				? "SATA/STP"
+				: "<other>")
+			  ),
+			 test_bit(IREQ_COMPLETE_IN_TARGET,
+				  &old_request->flags),
+			 test_bit(IDEV_GONE, &idev->flags));
 
-		/* STP and SMP devices are not sent a TMF, but the
-		 * outstanding I/O request is terminated below.  This is
-		 * because SATA/STP and SMP discovery path timeouts directly
-		 * call the abort task interface for cleanup.
-		 */
-		perform_termination = 1;
+		spin_lock_irqsave(&task->task_state_lock, flags);
+		task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+					    SAS_TASK_STATE_PENDING);
+		task->task_state_flags |= SAS_TASK_STATE_DONE;
+		spin_unlock_irqrestore(&task->task_state_lock, flags);
 
+		ret = TMF_RESP_FUNC_COMPLETE;
 	} else {
 		/* Fill in the tmf stucture */
 		isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
-					       isci_abort_task_process_cb,
 					       old_request);
 
-		spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+		spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
+		/* Send the task management request. */
 		#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
-		ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
+		ret = isci_task_execute_tmf(ihost, idev, &tmf,
 					    ISCI_ABORT_TASK_TIMEOUT_MS);
-
-		if (ret == TMF_RESP_FUNC_COMPLETE)
-			perform_termination = 1;
-		else
-			dev_dbg(&isci_host->pdev->dev,
-				"%s: isci_task_send_tmf failed\n", __func__);
 	}
-	if (perform_termination) {
-		set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
-
-		/* Clean up the request on our side, and wait for the aborted
-		 * I/O to complete.
-		 */
-		isci_terminate_request_core(isci_host, isci_device,
-					    old_request);
-	}
-
-	/* Make sure we do not leave a reference to aborted_io_completion */
-	old_request->io_request_completion = NULL;
- out:
-	isci_put_device(isci_device);
+out:
+	dev_warn(&ihost->pdev->dev,
+		 "%s: Done; dev = %p, task = %p , old_request == %p\n",
+		 __func__, idev, task, old_request);
+	isci_put_device(idev);
 	return ret;
 }
 
@@ -1195,14 +703,11 @@
 {
 	struct isci_tmf *tmf = isci_request_access_tmf(ireq);
 	struct completion *tmf_complete = NULL;
-	struct completion *request_complete = ireq->io_request_completion;
 
 	dev_dbg(&ihost->pdev->dev,
 		"%s: request = %p, status=%d\n",
 		__func__, ireq, completion_status);
 
-	isci_request_change_state(ireq, completed);
-
 	set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
 
 	if (tmf) {
@@ -1226,20 +731,11 @@
 	 */
 	set_bit(IREQ_TERMINATED, &ireq->flags);
 
-	/* As soon as something is in the terminate path, deallocation is
-	 * managed there.  Note that the final non-managed state of a task
-	 * request is "completed".
-	 */
-	if ((ireq->status == completed) ||
-	    !isci_request_is_dealloc_managed(ireq->status)) {
-		isci_request_change_state(ireq, unallocated);
-		isci_free_tag(ihost, ireq->io_tag);
-		list_del_init(&ireq->dev_node);
-	}
+	if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+		wake_up_all(&ihost->eventq);
 
-	/* "request_complete" is set if the task was being terminated. */
-	if (request_complete)
-		complete(request_complete);
+	if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+		isci_free_tag(ihost, ireq->io_tag);
 
 	/* The task management part completes last. */
 	if (tmf_complete)
@@ -1250,48 +746,38 @@
 			     struct domain_device *dev,
 			     struct isci_remote_device *idev)
 {
-	int rc;
-	unsigned long flags;
-	enum sci_status status;
+	int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
 	struct sas_phy *phy = sas_get_local_phy(dev);
 	struct isci_port *iport = dev->port->lldd_port;
 
 	dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
 
-	spin_lock_irqsave(&ihost->scic_lock, flags);
-	status = sci_remote_device_reset(idev);
-	spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
-	if (status != SCI_SUCCESS) {
-		dev_dbg(&ihost->pdev->dev,
-			 "%s: sci_remote_device_reset(%p) returned %d!\n",
-			 __func__, idev, status);
+	/* Suspend the RNC, terminate all outstanding TCs. */
+	if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+	    != SCI_SUCCESS) {
 		rc = TMF_RESP_FUNC_FAILED;
 		goto out;
 	}
+	/* Note that since the termination for outstanding requests succeeded,
+	 * this function will return success.  This is because the resets will
+	 * only fail if the device has been removed (ie. hotplug), and the
+	 * primary duty of this function is to cleanup tasks, so that is the
+	 * relevant status.
+	 */
+	if (!test_bit(IDEV_GONE, &idev->flags)) {
+		if (scsi_is_sas_phy_local(phy)) {
+			struct isci_phy *iphy = &ihost->phys[phy->number];
 
-	if (scsi_is_sas_phy_local(phy)) {
-		struct isci_phy *iphy = &ihost->phys[phy->number];
-
-		rc = isci_port_perform_hard_reset(ihost, iport, iphy);
-	} else
-		rc = sas_phy_reset(phy, !dev_is_sata(dev));
-
-	/* Terminate in-progress I/O now. */
-	isci_remote_device_nuke_requests(ihost, idev);
-
-	/* Since all pending TCs have been cleaned, resume the RNC. */
-	spin_lock_irqsave(&ihost->scic_lock, flags);
-	status = sci_remote_device_reset_complete(idev);
-	spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
-	if (status != SCI_SUCCESS) {
-		dev_dbg(&ihost->pdev->dev,
-			 "%s: sci_remote_device_reset_complete(%p) "
-			 "returned %d!\n", __func__, idev, status);
+			reset_stat = isci_port_perform_hard_reset(ihost, iport,
+								  iphy);
+		} else
+			reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
 	}
+	/* Explicitly resume the RNC here, since there was no task sent. */
+	isci_remote_device_resume_from_abort(ihost, idev);
 
-	dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
+	dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
+		__func__, idev, reset_stat);
  out:
 	sas_put_local_phy(phy);
 	return rc;
@@ -1305,7 +791,7 @@
 	int ret;
 
 	spin_lock_irqsave(&ihost->scic_lock, flags);
-	idev = isci_lookup_device(dev);
+	idev = isci_get_device(dev->lldd_dev);
 	spin_unlock_irqrestore(&ihost->scic_lock, flags);
 
 	if (!idev) {
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 7b6d0e3..9c06cba 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -63,19 +63,6 @@
 struct isci_request;
 
 /**
- * enum isci_tmf_cb_state - This enum defines the possible states in which the
- *    TMF callback function is invoked during the TMF execution process.
- *
- *
- */
-enum isci_tmf_cb_state {
-
-	isci_tmf_init_state = 0,
-	isci_tmf_started,
-	isci_tmf_timed_out
-};
-
-/**
  * enum isci_tmf_function_codes - This enum defines the possible preparations
  *    of task management requests.
  *
@@ -87,6 +74,7 @@
 	isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
 	isci_tmf_ssp_lun_reset  = TMF_LU_RESET,
 };
+
 /**
  * struct isci_tmf - This class represents the task management object which
  *    acts as an interface to libsas for processing task management requests
@@ -106,15 +94,6 @@
 	u16 io_tag;
 	enum isci_tmf_function_codes tmf_code;
 	int status;
-
-	/* The optional callback function allows the user process to
-	 * track the TMF transmit / timeout conditions.
-	 */
-	void (*cb_state_func)(
-		enum isci_tmf_cb_state,
-		struct isci_tmf *, void *);
-	void *cb_data;
-
 };
 
 static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
@@ -208,113 +187,4 @@
 	struct scsi_cmnd *scsi_cmd,
 	void (*donefunc)(struct scsi_cmnd *));
 
-/**
- * enum isci_completion_selection - This enum defines the possible actions to
- *    take with respect to a given request's notification back to libsas.
- *
- *
- */
-enum isci_completion_selection {
-
-	isci_perform_normal_io_completion,      /* Normal notify (task_done) */
-	isci_perform_aborted_io_completion,     /* No notification.   */
-	isci_perform_error_io_completion        /* Use sas_task_abort */
-};
-
-/**
- * isci_task_set_completion_status() - This function sets the completion status
- *    for the request.
- * @task: This parameter is the completed request.
- * @response: This parameter is the response code for the completed task.
- * @status: This parameter is the status code for the completed task.
- *
-* @return The new notification mode for the request.
-*/
-static inline enum isci_completion_selection
-isci_task_set_completion_status(
-	struct sas_task *task,
-	enum service_response response,
-	enum exec_status status,
-	enum isci_completion_selection task_notification_selection)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&task->task_state_lock, flags);
-
-	/* If a device reset is being indicated, make sure the I/O
-	* is in the error path.
-	*/
-	if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
-		/* Fail the I/O to make sure it goes into the error path. */
-		response = SAS_TASK_UNDELIVERED;
-		status = SAM_STAT_TASK_ABORTED;
-
-		task_notification_selection = isci_perform_error_io_completion;
-	}
-	task->task_status.resp = response;
-	task->task_status.stat = status;
-
-	switch (task->task_proto) {
-
-	case SAS_PROTOCOL_SATA:
-	case SAS_PROTOCOL_STP:
-	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-
-		if (task_notification_selection
-		    == isci_perform_error_io_completion) {
-			/* SATA/STP I/O has it's own means of scheduling device
-			* error handling on the normal path.
-			*/
-			task_notification_selection
-				= isci_perform_normal_io_completion;
-		}
-		break;
-	default:
-		break;
-	}
-
-	switch (task_notification_selection) {
-
-	case isci_perform_error_io_completion:
-
-		if (task->task_proto == SAS_PROTOCOL_SMP) {
-			/* There is no error escalation in the SMP case.
-			 * Convert to a normal completion to avoid the
-			 * timeout in the discovery path and to let the
-			 * next action take place quickly.
-			 */
-			task_notification_selection
-				= isci_perform_normal_io_completion;
-
-			/* Fall through to the normal case... */
-		} else {
-			/* Use sas_task_abort */
-			/* Leave SAS_TASK_STATE_DONE clear
-			 * Leave SAS_TASK_AT_INITIATOR set.
-			 */
-			break;
-		}
-
-	case isci_perform_aborted_io_completion:
-		/* This path can occur with task-managed requests as well as
-		 * requests terminated because of LUN or device resets.
-		 */
-		/* Fall through to the normal case... */
-	case isci_perform_normal_io_completion:
-		/* Normal notification (task_done) */
-		task->task_state_flags |= SAS_TASK_STATE_DONE;
-		task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
-					    SAS_TASK_STATE_PENDING);
-		break;
-	default:
-		WARN_ONCE(1, "unknown task_notification_selection: %d\n",
-			 task_notification_selection);
-		break;
-	}
-
-	spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-	return task_notification_selection;
-
-}
 #endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index 16f88ab..04a6d0d 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -57,31 +57,19 @@
 #include "unsolicited_frame_control.h"
 #include "registers.h"
 
-int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost)
 {
 	struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
 	struct sci_unsolicited_frame *uf;
-	u32 buf_len, header_len, i;
-	dma_addr_t dma;
-	size_t size;
-	void *virt;
-
-	/*
-	 * Prepare all of the memory sizes for the UF headers, UF address
-	 * table, and UF buffers themselves.
-	 */
-	buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
-	header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
-	size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
+	dma_addr_t dma = ihost->ufi_dma;
+	void *virt = ihost->ufi_buf;
+	int i;
 
 	/*
 	 * The Unsolicited Frame buffers are set at the start of the UF
 	 * memory descriptor entry. The headers and address table will be
 	 * placed after the buffers.
 	 */
-	virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
-	if (!virt)
-		return -ENOMEM;
 
 	/*
 	 * Program the location of the UF header table into the SCU.
@@ -93,8 +81,8 @@
 	 *   headers, since we program the UF address table pointers to
 	 *   NULL.
 	 */
-	uf_control->headers.physical_address = dma + buf_len;
-	uf_control->headers.array = virt + buf_len;
+	uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE;
+	uf_control->headers.array = virt + SCI_UFI_BUF_SIZE;
 
 	/*
 	 * Program the location of the UF address table into the SCU.
@@ -103,8 +91,8 @@
 	 *   byte boundary already due to above programming headers being on a
 	 *   64-bit boundary and headers are on a 64-bytes in size.
 	 */
-	uf_control->address_table.physical_address = dma + buf_len + header_len;
-	uf_control->address_table.array = virt + buf_len + header_len;
+	uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
+	uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
 	uf_control->get = 0;
 
 	/*
@@ -135,8 +123,6 @@
 		virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
 		dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
 	}
-
-	return 0;
 }
 
 enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 75d8966..1bc551e 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -257,9 +257,13 @@
 
 };
 
+#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE)
+#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header))
+#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64))
+
 struct isci_host;
 
-int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost);
 
 enum sci_status sci_unsolicited_frame_control_get_header(
 	struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 453a740..9220861 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -662,7 +662,7 @@
 
 	/* setup Socket parameters */
 	sk = sock->sk;
-	sk->sk_reuse = 1;
+	sk->sk_reuse = SK_CAN_REUSE;
 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
 	sk->sk_allocation = GFP_ATOMIC;
 
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index ef9560d..c1402fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -648,6 +648,7 @@
 	lport->tt.fcp_abort_io(lport);
 	lport->tt.disc_stop_final(lport);
 	lport->tt.exch_mgr_reset(lport, 0, 0);
+	cancel_delayed_work_sync(&lport->retry_work);
 	fc_fc4_del_lport(lport);
 	return 0;
 }
@@ -1564,7 +1565,6 @@
 
 	switch (lport->state) {
 	case LPORT_ST_DISABLED:
-		WARN_ON(1);
 		break;
 	case LPORT_ST_READY:
 		break;
@@ -1742,17 +1742,19 @@
 
 	mfs = ntohs(flp->fl_csp.sp_bb_data) &
 		FC_SP_BB_DATA_MASK;
-	if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
-	    mfs <= lport->mfs) {
-		lport->mfs = mfs;
-		fc_host_maxframe_size(lport->host) = mfs;
-	} else {
+
+	if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
 		FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
 			     "lport->mfs:%hu\n", mfs, lport->mfs);
 		fc_lport_error(lport, fp);
 		goto err;
 	}
 
+	if (mfs <= lport->mfs) {
+		lport->mfs = mfs;
+		fc_host_maxframe_size(lport->host) = mfs;
+	}
+
 	csp_flags = ntohs(flp->fl_csp.sp_features);
 	r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
 	e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index bc0cecc..441d88a 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -546,11 +546,12 @@
 	.port_ops = &sas_sata_ops
 };
 
-int sas_ata_init_host_and_port(struct domain_device *found_dev)
+int sas_ata_init(struct domain_device *found_dev)
 {
 	struct sas_ha_struct *ha = found_dev->port->ha;
 	struct Scsi_Host *shost = ha->core.shost;
 	struct ata_port *ap;
+	int rc;
 
 	ata_host_init(&found_dev->sata_dev.ata_host,
 		      ha->dev,
@@ -567,8 +568,11 @@
 	ap->private_data = found_dev;
 	ap->cbl = ATA_CBL_SATA;
 	ap->scsi_host = shost;
-	/* publish initialized ata port */
-	smp_wmb();
+	rc = ata_sas_port_init(ap);
+	if (rc) {
+		ata_sas_port_destroy(ap);
+		return rc;
+	}
 	found_dev->sata_dev.ap = ap;
 
 	return 0;
@@ -648,18 +652,13 @@
 void sas_probe_sata(struct asd_sas_port *port)
 {
 	struct domain_device *dev, *n;
-	int err;
 
 	mutex_lock(&port->ha->disco_mutex);
-	list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+	list_for_each_entry(dev, &port->disco_list, disco_list_node) {
 		if (!dev_is_sata(dev))
 			continue;
 
-		err = sas_ata_init_host_and_port(dev);
-		if (err)
-			sas_fail_probe(dev, __func__, err);
-		else
-			ata_sas_async_port_init(dev->sata_dev.ap);
+		ata_sas_async_probe(dev->sata_dev.ap);
 	}
 	mutex_unlock(&port->ha->disco_mutex);
 
@@ -718,18 +717,6 @@
 	sas_put_device(dev);
 }
 
-static bool sas_ata_dev_eh_valid(struct domain_device *dev)
-{
-	struct ata_port *ap;
-
-	if (!dev_is_sata(dev))
-		return false;
-	ap = dev->sata_dev.ap;
-	/* consume fully initialized ata ports */
-	smp_rmb();
-	return !!ap;
-}
-
 void sas_ata_strategy_handler(struct Scsi_Host *shost)
 {
 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
@@ -753,7 +740,7 @@
 
 		spin_lock(&port->dev_list_lock);
 		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
-			if (!sas_ata_dev_eh_valid(dev))
+			if (!dev_is_sata(dev))
 				continue;
 			async_schedule_domain(async_sas_ata_eh, dev, &async);
 		}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 3646796..629a086 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -72,6 +72,7 @@
 	struct asd_sas_phy *phy;
 	struct sas_rphy *rphy;
 	struct domain_device *dev;
+	int rc = -ENODEV;
 
 	dev = sas_alloc_device();
 	if (!dev)
@@ -110,9 +111,16 @@
 
 	sas_init_dev(dev);
 
+	dev->port = port;
 	switch (dev->dev_type) {
-	case SAS_END_DEV:
 	case SATA_DEV:
+		rc = sas_ata_init(dev);
+		if (rc) {
+			rphy = NULL;
+			break;
+		}
+		/* fall through */
+	case SAS_END_DEV:
 		rphy = sas_end_device_alloc(port->port);
 		break;
 	case EDGE_DEV:
@@ -131,19 +139,14 @@
 
 	if (!rphy) {
 		sas_put_device(dev);
-		return -ENODEV;
+		return rc;
 	}
 
-	spin_lock_irq(&port->phy_list_lock);
-	list_for_each_entry(phy, &port->phy_list, port_phy_el)
-		sas_phy_set_target(phy, dev);
-	spin_unlock_irq(&port->phy_list_lock);
 	rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
 	memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
 	sas_fill_in_rphy(dev, rphy);
 	sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
 	port->port_dev = dev;
-	dev->port = port;
 	dev->linkrate = port->linkrate;
 	dev->min_linkrate = port->linkrate;
 	dev->max_linkrate = port->linkrate;
@@ -155,6 +158,7 @@
 	sas_device_set_phy(dev, port->port);
 
 	dev->rphy = rphy;
+	get_device(&dev->rphy->dev);
 
 	if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
 		list_add_tail(&dev->disco_list_node, &port->disco_list);
@@ -164,6 +168,11 @@
 		spin_unlock_irq(&port->dev_list_lock);
 	}
 
+	spin_lock_irq(&port->phy_list_lock);
+	list_for_each_entry(phy, &port->phy_list, port_phy_el)
+		sas_phy_set_target(phy, dev);
+	spin_unlock_irq(&port->phy_list_lock);
+
 	return 0;
 }
 
@@ -205,8 +214,7 @@
 static void sas_probe_devices(struct work_struct *work)
 {
 	struct domain_device *dev, *n;
-	struct sas_discovery_event *ev =
-		container_of(work, struct sas_discovery_event, work);
+	struct sas_discovery_event *ev = to_sas_discovery_event(work);
 	struct asd_sas_port *port = ev->port;
 
 	clear_bit(DISCE_PROBE, &port->disc.pending);
@@ -255,6 +263,9 @@
 {
 	struct domain_device *dev = container_of(kref, typeof(*dev), kref);
 
+	put_device(&dev->rphy->dev);
+	dev->rphy = NULL;
+
 	if (dev->parent)
 		sas_put_device(dev->parent);
 
@@ -291,8 +302,7 @@
 static void sas_destruct_devices(struct work_struct *work)
 {
 	struct domain_device *dev, *n;
-	struct sas_discovery_event *ev =
-		container_of(work, struct sas_discovery_event, work);
+	struct sas_discovery_event *ev = to_sas_discovery_event(work);
 	struct asd_sas_port *port = ev->port;
 
 	clear_bit(DISCE_DESTRUCT, &port->disc.pending);
@@ -302,7 +312,6 @@
 
 		sas_remove_children(&dev->rphy->dev);
 		sas_rphy_delete(dev->rphy);
-		dev->rphy = NULL;
 		sas_unregister_common_dev(port, dev);
 	}
 }
@@ -314,11 +323,11 @@
 		/* this rphy never saw sas_rphy_add */
 		list_del_init(&dev->disco_list_node);
 		sas_rphy_free(dev->rphy);
-		dev->rphy = NULL;
 		sas_unregister_common_dev(port, dev);
+		return;
 	}
 
-	if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
+	if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
 		sas_rphy_unlink(dev->rphy);
 		list_move_tail(&dev->disco_list_node, &port->destroy_list);
 		sas_discover_event(dev->port, DISCE_DESTRUCT);
@@ -377,8 +386,7 @@
 {
 	struct domain_device *dev;
 	int error = 0;
-	struct sas_discovery_event *ev =
-		container_of(work, struct sas_discovery_event, work);
+	struct sas_discovery_event *ev = to_sas_discovery_event(work);
 	struct asd_sas_port *port = ev->port;
 
 	clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
@@ -419,8 +427,6 @@
 
 	if (error) {
 		sas_rphy_free(dev->rphy);
-		dev->rphy = NULL;
-
 		list_del_init(&dev->disco_list_node);
 		spin_lock_irq(&port->dev_list_lock);
 		list_del_init(&dev->dev_list_node);
@@ -437,8 +443,7 @@
 static void sas_revalidate_domain(struct work_struct *work)
 {
 	int res = 0;
-	struct sas_discovery_event *ev =
-		container_of(work, struct sas_discovery_event, work);
+	struct sas_discovery_event *ev = to_sas_discovery_event(work);
 	struct asd_sas_port *port = ev->port;
 	struct sas_ha_struct *ha = port->ha;
 
@@ -466,21 +471,25 @@
 
 /* ---------- Events ---------- */
 
-static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work)
+static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
 {
-	/* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */
-	scsi_queue_work(ha->core.shost, work);
+	/* chained work is not subject to SA_HA_DRAINING or
+	 * SAS_HA_REGISTERED, because it is either submitted in the
+	 * workqueue, or known to be submitted from a context that is
+	 * not racing against draining
+	 */
+	scsi_queue_work(ha->core.shost, &sw->work);
 }
 
 static void sas_chain_event(int event, unsigned long *pending,
-			    struct work_struct *work,
+			    struct sas_work *sw,
 			    struct sas_ha_struct *ha)
 {
 	if (!test_and_set_bit(event, pending)) {
 		unsigned long flags;
 
 		spin_lock_irqsave(&ha->state_lock, flags);
-		sas_chain_work(ha, work);
+		sas_chain_work(ha, sw);
 		spin_unlock_irqrestore(&ha->state_lock, flags);
 	}
 }
@@ -519,7 +528,7 @@
 
 	disc->pending = 0;
 	for (i = 0; i < DISC_NUM_EVENTS; i++) {
-		INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
+		INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
 		disc->disc_work[i].port = port;
 	}
 }
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 16639bb..4e4292d 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -27,19 +27,21 @@
 #include "sas_internal.h"
 #include "sas_dump.h"
 
-void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work)
+void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
 {
 	if (!test_bit(SAS_HA_REGISTERED, &ha->state))
 		return;
 
-	if (test_bit(SAS_HA_DRAINING, &ha->state))
-		list_add(&work->entry, &ha->defer_q);
-	else
-		scsi_queue_work(ha->core.shost, work);
+	if (test_bit(SAS_HA_DRAINING, &ha->state)) {
+		/* add it to the defer list, if not already pending */
+		if (list_empty(&sw->drain_node))
+			list_add(&sw->drain_node, &ha->defer_q);
+	} else
+		scsi_queue_work(ha->core.shost, &sw->work);
 }
 
 static void sas_queue_event(int event, unsigned long *pending,
-			    struct work_struct *work,
+			    struct sas_work *work,
 			    struct sas_ha_struct *ha)
 {
 	if (!test_and_set_bit(event, pending)) {
@@ -55,7 +57,7 @@
 void __sas_drain_work(struct sas_ha_struct *ha)
 {
 	struct workqueue_struct *wq = ha->core.shost->work_q;
-	struct work_struct *w, *_w;
+	struct sas_work *sw, *_sw;
 
 	set_bit(SAS_HA_DRAINING, &ha->state);
 	/* flush submitters */
@@ -66,9 +68,9 @@
 
 	spin_lock_irq(&ha->state_lock);
 	clear_bit(SAS_HA_DRAINING, &ha->state);
-	list_for_each_entry_safe(w, _w, &ha->defer_q, entry) {
-		list_del_init(&w->entry);
-		sas_queue_work(ha, w);
+	list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
+		list_del_init(&sw->drain_node);
+		sas_queue_work(ha, sw);
 	}
 	spin_unlock_irq(&ha->state_lock);
 }
@@ -151,7 +153,7 @@
 	int i;
 
 	for (i = 0; i < HA_NUM_EVENTS; i++) {
-		INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
+		INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
 		sas_ha->ha_events[i].ha = sas_ha;
 	}
 
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 05acd9e..caa0525 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -202,6 +202,7 @@
 	u8 sas_addr[SAS_ADDR_SIZE];
 	struct smp_resp *resp = rsp;
 	struct discover_resp *dr = &resp->disc;
+	struct sas_ha_struct *ha = dev->port->ha;
 	struct expander_device *ex = &dev->ex_dev;
 	struct ex_phy *phy = &ex->ex_phy[phy_id];
 	struct sas_rphy *rphy = dev->rphy;
@@ -209,6 +210,8 @@
 	char *type;
 
 	if (new_phy) {
+		if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)))
+			return;
 		phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
 
 		/* FIXME: error_handling */
@@ -233,6 +236,8 @@
 	memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
 
 	phy->attached_dev_type = to_dev_type(dr);
+	if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
+		goto out;
 	phy->phy_id = phy_id;
 	phy->linkrate = dr->linkrate;
 	phy->attached_sata_host = dr->attached_sata_host;
@@ -240,7 +245,14 @@
 	phy->attached_sata_ps   = dr->attached_sata_ps;
 	phy->attached_iproto = dr->iproto << 1;
 	phy->attached_tproto = dr->tproto << 1;
-	memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
+	/* help some expanders that fail to zero sas_address in the 'no
+	 * device' case
+	 */
+	if (phy->attached_dev_type == NO_DEVICE ||
+	    phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
+		memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+	else
+		memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
 	phy->attached_phy_id = dr->attached_phy_id;
 	phy->phy_change_count = dr->change_count;
 	phy->routing_attr = dr->routing_attr;
@@ -266,6 +278,7 @@
 			return;
 		}
 
+ out:
 	switch (phy->attached_dev_type) {
 	case SATA_PENDING:
 		type = "stp pending";
@@ -304,7 +317,15 @@
 	else
 		return;
 
-	SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
+	/* if the attached device type changed and ata_eh is active,
+	 * make sure we run revalidation when eh completes (see:
+	 * sas_enable_revalidation)
+	 */
+	if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
+		set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
+
+	SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
+		    test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
 		    SAS_ADDR(dev->sas_addr), phy->phy_id,
 		    sas_route_char(dev, phy), phy->linkrate,
 		    SAS_ADDR(phy->attached_sas_addr), type);
@@ -776,13 +797,16 @@
 		if (res)
 			goto out_free;
 
+		sas_init_dev(child);
+		res = sas_ata_init(child);
+		if (res)
+			goto out_free;
 		rphy = sas_end_device_alloc(phy->port);
-		if (unlikely(!rphy))
+		if (!rphy)
 			goto out_free;
 
-		sas_init_dev(child);
-
 		child->rphy = rphy;
+		get_device(&rphy->dev);
 
 		list_add_tail(&child->disco_list_node, &parent->port->disco_list);
 
@@ -806,6 +830,7 @@
 		sas_init_dev(child);
 
 		child->rphy = rphy;
+		get_device(&rphy->dev);
 		sas_fill_in_rphy(child, rphy);
 
 		list_add_tail(&child->disco_list_node, &parent->port->disco_list);
@@ -830,8 +855,6 @@
 
  out_list_del:
 	sas_rphy_free(child->rphy);
-	child->rphy = NULL;
-
 	list_del(&child->disco_list_node);
 	spin_lock_irq(&parent->port->dev_list_lock);
 	list_del(&child->dev_list_node);
@@ -911,6 +934,7 @@
 	}
 	port = parent->port;
 	child->rphy = rphy;
+	get_device(&rphy->dev);
 	edev = rphy_to_expander_device(rphy);
 	child->dev_type = phy->attached_dev_type;
 	kref_get(&parent->kref);
@@ -934,6 +958,7 @@
 
 	res = sas_discover_expander(child);
 	if (res) {
+		sas_rphy_delete(rphy);
 		spin_lock_irq(&parent->port->dev_list_lock);
 		list_del(&child->dev_list_node);
 		spin_unlock_irq(&parent->port->dev_list_lock);
@@ -1718,9 +1743,17 @@
 		int phy_change_count = 0;
 
 		res = sas_get_phy_change_count(dev, i, &phy_change_count);
-		if (res)
-			goto out;
-		else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
+		switch (res) {
+		case SMP_RESP_PHY_VACANT:
+		case SMP_RESP_NO_PHY:
+			continue;
+		case SMP_RESP_FUNC_ACC:
+			break;
+		default:
+			return res;
+		}
+
+		if (phy_change_count != ex->ex_phy[i].phy_change_count) {
 			if (update)
 				ex->ex_phy[i].phy_change_count =
 					phy_change_count;
@@ -1728,8 +1761,7 @@
 			return 0;
 		}
 	}
-out:
-	return res;
+	return 0;
 }
 
 static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 120bff6..10cb5ae 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -94,8 +94,7 @@
 
 void sas_hae_reset(struct work_struct *work)
 {
-	struct sas_ha_event *ev =
-		container_of(work, struct sas_ha_event, work);
+	struct sas_ha_event *ev = to_sas_ha_event(work);
 	struct sas_ha_struct *ha = ev->ha;
 
 	clear_bit(HAE_RESET, &ha->pending);
@@ -369,14 +368,14 @@
 
 static void phy_reset_work(struct work_struct *work)
 {
-	struct sas_phy_data *d = container_of(work, typeof(*d), reset_work);
+	struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
 
 	d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
 }
 
 static void phy_enable_work(struct work_struct *work)
 {
-	struct sas_phy_data *d = container_of(work, typeof(*d), enable_work);
+	struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
 
 	d->enable_result = sas_phy_enable(d->phy, d->enable);
 }
@@ -389,8 +388,8 @@
 		return -ENOMEM;
 
 	mutex_init(&d->event_lock);
-	INIT_WORK(&d->reset_work, phy_reset_work);
-	INIT_WORK(&d->enable_work, phy_enable_work);
+	INIT_SAS_WORK(&d->reset_work, phy_reset_work);
+	INIT_SAS_WORK(&d->enable_work, phy_enable_work);
 	d->phy = phy;
 	phy->hostdata = d;
 
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index f05c638..507e4cf 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -45,10 +45,10 @@
 	struct mutex event_lock;
 	int hard_reset;
 	int reset_result;
-	struct work_struct reset_work;
+	struct sas_work reset_work;
 	int enable;
 	int enable_result;
-	struct work_struct enable_work;
+	struct sas_work enable_work;
 };
 
 void sas_scsi_recover_host(struct Scsi_Host *shost);
@@ -80,7 +80,7 @@
 void sas_porte_link_reset_err(struct work_struct *work);
 void sas_porte_timer_event(struct work_struct *work);
 void sas_porte_hard_reset(struct work_struct *work);
-void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work);
+void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
 
 int sas_notify_lldd_dev_found(struct domain_device *);
 void sas_notify_lldd_dev_gone(struct domain_device *);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index dcfd4a9..521422e 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -32,8 +32,7 @@
 
 static void sas_phye_loss_of_signal(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 
 	clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
@@ -43,8 +42,7 @@
 
 static void sas_phye_oob_done(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 
 	clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
@@ -53,8 +51,7 @@
 
 static void sas_phye_oob_error(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 	struct sas_ha_struct *sas_ha = phy->ha;
 	struct asd_sas_port *port = phy->port;
@@ -85,8 +82,7 @@
 
 static void sas_phye_spinup_hold(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 	struct sas_ha_struct *sas_ha = phy->ha;
 	struct sas_internal *i =
@@ -127,14 +123,12 @@
 		phy->error = 0;
 		INIT_LIST_HEAD(&phy->port_phy_el);
 		for (k = 0; k < PORT_NUM_EVENTS; k++) {
-			INIT_WORK(&phy->port_events[k].work,
-				  sas_port_event_fns[k]);
+			INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
 			phy->port_events[k].phy = phy;
 		}
 
 		for (k = 0; k < PHY_NUM_EVENTS; k++) {
-			INIT_WORK(&phy->phy_events[k].work,
-				  sas_phy_event_fns[k]);
+			INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
 			phy->phy_events[k].phy = phy;
 		}
 
@@ -144,8 +138,7 @@
 		spin_lock_init(&phy->sas_prim_lock);
 		phy->frame_rcvd_size = 0;
 
-		phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev,
-					 i);
+		phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i);
 		if (!phy->phy)
 			return -ENOMEM;
 
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index eb19c01..e884a8c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -123,7 +123,7 @@
 	spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
 
 	if (!port->port) {
-		port->port = sas_port_alloc(phy->phy->dev.parent, phy->id);
+		port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
 		BUG_ON(!port->port);
 		sas_port_add(port->port);
 	}
@@ -208,8 +208,7 @@
 
 void sas_porte_bytes_dmaed(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 
 	clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
@@ -219,8 +218,7 @@
 
 void sas_porte_broadcast_rcvd(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 	unsigned long flags;
 	u32 prim;
@@ -237,8 +235,7 @@
 
 void sas_porte_link_reset_err(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 
 	clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
@@ -248,8 +245,7 @@
 
 void sas_porte_timer_event(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 
 	clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
@@ -259,8 +255,7 @@
 
 void sas_porte_hard_reset(struct work_struct *work)
 {
-	struct asd_sas_event *ev =
-		container_of(work, struct asd_sas_event, work);
+	struct asd_sas_event *ev = to_asd_sas_event(work);
 	struct asd_sas_phy *phy = ev->phy;
 
 	clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 3a1ffdd..e5da6da 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -93,6 +93,9 @@
 /* lpfc wait event data ready flag */
 #define LPFC_DATA_READY		(1<<0)
 
+/* queue dump line buffer size */
+#define LPFC_LBUF_SZ		128
+
 enum lpfc_polling_flags {
 	ENABLE_FCP_RING_POLLING = 0x1,
 	DISABLE_FCP_RING_INT    = 0x2
@@ -620,6 +623,7 @@
 #define HBA_AER_ENABLED		0x1000 /* AER enabled with HBA */
 #define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */
 #define HBA_RRQ_ACTIVE		0x4000 /* process the rrq active list */
+#define HBA_FCP_IOQ_FLUSH	0x8000 /* FCP I/O queues being flushed */
 	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
 	struct lpfc_dmabuf slim2p;
 
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 141e4b4..253d9a8 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2009-2011 Emulex.  All rights reserved.           *
+ * Copyright (C) 2009-2012 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -599,6 +599,7 @@
 
 	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
 	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context_un.ndlp = ndlp;
 	cmdiocbq->context2 = rspiocbq;
 	dd_data->type = TYPE_IOCB;
 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -3978,6 +3979,7 @@
 		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
 			switch (opcode) {
 			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 						"3106 Handled SLI_CONFIG "
 						"subsys_comn, opcode:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index edfe61fc..67f7d0a 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2010 Emulex.  All rights reserved.                *
+ * Copyright (C) 2010-2012 Emulex.  All rights reserved.                *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -249,6 +249,7 @@
 #define COMN_OPCODE_READ_OBJECT_LIST	0xAD
 #define COMN_OPCODE_DELETE_OBJECT	0xAE
 #define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES	0x79
+#define COMN_OPCODE_GET_CNTL_ATTRIBUTES	0x20
 	uint32_t timeout;
 	uint32_t request_length;
 	uint32_t word9;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 330dd71..9b2a16f 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,7 @@
 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
 			struct lpfc_sli_ring *, uint32_t);
 
+struct lpfc_iocbq *__lpfc_sli_get_iocbq(struct lpfc_hba *);
 struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
 void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
 uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
@@ -460,6 +461,7 @@
 int lpfc_issue_reg_vfi(struct lpfc_vport *);
 int lpfc_issue_unreg_vfi(struct lpfc_vport *);
 int lpfc_selective_reset(struct lpfc_hba *);
-int lpfc_sli4_read_config(struct lpfc_hba *phba);
-int lpfc_scsi_buf_update(struct lpfc_hba *phba);
-void lpfc_sli4_node_prep(struct lpfc_hba *phba);
+int lpfc_sli4_read_config(struct lpfc_hba *);
+void lpfc_sli4_node_prep(struct lpfc_hba *);
+int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
+void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index af04b0d..3217d63 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -4466,3 +4466,49 @@
 #endif
 	return;
 }
+
+/*
+ * Driver debug utility routines outside of debugfs. The debug utility
+ * routines implemented here is intended to be used in the instrumented
+ * debug driver for debugging host or port issues.
+ */
+
+/**
+ * lpfc_debug_dump_all_queues - dump all the queues with a hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps entries of all the queues asociated with the @phba.
+ **/
+void
+lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
+{
+	int fcp_wqidx;
+
+	/*
+	 * Dump Work Queues (WQs)
+	 */
+	lpfc_debug_dump_mbx_wq(phba);
+	lpfc_debug_dump_els_wq(phba);
+
+	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+		lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
+
+	lpfc_debug_dump_hdr_rq(phba);
+	lpfc_debug_dump_dat_rq(phba);
+	/*
+	 * Dump Complete Queues (CQs)
+	 */
+	lpfc_debug_dump_mbx_cq(phba);
+	lpfc_debug_dump_els_cq(phba);
+
+	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+		lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
+
+	/*
+	 * Dump Event Queues (EQs)
+	 */
+	lpfc_debug_dump_sp_eq(phba);
+
+	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
+		lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
+}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index f83bd94..616c400 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -267,3 +267,421 @@
 #define LPFC_DISC_TRC_DISCOVERY		0xef    /* common mask for general
 						 * discovery */
 #endif /* H_LPFC_DEBUG_FS */
+
+
+/*
+ * Driver debug utility routines outside of debugfs. The debug utility
+ * routines implemented here is intended to be used in the instrumented
+ * debug driver for debugging host or port issues.
+ */
+
+/**
+ * lpfc_debug_dump_qe - dump an specific entry from a queue
+ * @q: Pointer to the queue descriptor.
+ * @idx: Index to the entry on the queue.
+ *
+ * This function dumps an entry indexed by @idx from a queue specified by the
+ * queue descriptor @q.
+ **/
+static inline void
+lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
+{
+	char line_buf[LPFC_LBUF_SZ];
+	int i, esize, qe_word_cnt, len;
+	uint32_t *pword;
+
+	/* sanity checks */
+	if (!q)
+		return;
+	if (idx >= q->entry_count)
+		return;
+
+	esize = q->entry_size;
+	qe_word_cnt = esize / sizeof(uint32_t);
+	pword = q->qe[idx].address;
+
+	len = 0;
+	len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "QE[%04d]: ", idx);
+	if (qe_word_cnt > 8)
+		printk(KERN_ERR "%s\n", line_buf);
+
+	for (i = 0; i < qe_word_cnt; i++) {
+		if (!(i % 8)) {
+			if (i != 0)
+				printk(KERN_ERR "%s\n", line_buf);
+			if (qe_word_cnt > 8) {
+				len = 0;
+				memset(line_buf, 0, LPFC_LBUF_SZ);
+				len += snprintf(line_buf+len, LPFC_LBUF_SZ-len,
+						"%03d: ", i);
+			}
+		}
+		len += snprintf(line_buf+len, LPFC_LBUF_SZ-len, "%08x ",
+				((uint32_t)*pword) & 0xffffffff);
+		pword++;
+	}
+	if (qe_word_cnt <= 8 || (i - 1) % 8)
+		printk(KERN_ERR "%s\n", line_buf);
+}
+
+/**
+ * lpfc_debug_dump_q - dump all entries from an specific queue
+ * @q: Pointer to the queue descriptor.
+ *
+ * This function dumps all entries from a queue specified by the queue
+ * descriptor @q.
+ **/
+static inline void
+lpfc_debug_dump_q(struct lpfc_queue *q)
+{
+	int idx, entry_count;
+
+	/* sanity check */
+	if (!q)
+		return;
+
+	dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev,
+		"%d: [qid:%d, type:%d, subtype:%d, "
+		"qe_size:%d, qe_count:%d, "
+		"host_index:%d, port_index:%d]\n",
+		(q->phba)->brd_no,
+		q->queue_id, q->type, q->subtype,
+		q->entry_size, q->entry_count,
+		q->host_index, q->hba_index);
+	entry_count = q->entry_count;
+	for (idx = 0; idx < entry_count; idx++)
+		lpfc_debug_dump_qe(q, idx);
+	printk(KERN_ERR "\n");
+}
+
+/**
+ * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP work queue specified by the
+ * @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+	/* sanity check */
+	if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+		return;
+
+	printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
+		fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]);
+}
+
+/**
+ * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP complete queue which is
+ * associated to the FCP work queue specified by the @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+	int fcp_cqidx, fcp_cqid;
+
+	/* sanity check */
+	if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+		return;
+
+	fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
+	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
+		if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
+			break;
+	if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+		return;
+
+	printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
+		fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+		fcp_cqidx, fcp_cqid);
+	lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]);
+}
+
+/**
+ * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue
+ * @phba: Pointer to HBA context object.
+ * @fcp_wqidx: Index to a FCP work queue.
+ *
+ * This function dumps all entries from a FCP event queue which is
+ * associated to the FCP work queue specified by the @fcp_wqidx.
+ **/
+static inline void
+lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
+{
+	struct lpfc_queue *qdesc;
+	int fcp_eqidx, fcp_eqid;
+	int fcp_cqidx, fcp_cqid;
+
+	/* sanity check */
+	if (fcp_wqidx >= phba->cfg_fcp_wq_count)
+		return;
+	fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
+	for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
+		if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
+			break;
+	if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+		return;
+
+	if (phba->cfg_fcp_eq_count == 0) {
+		fcp_eqidx = -1;
+		fcp_eqid = phba->sli4_hba.sp_eq->queue_id;
+		qdesc = phba->sli4_hba.sp_eq;
+	} else {
+		fcp_eqidx = fcp_cqidx;
+		fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
+		qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
+	}
+
+	printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
+		"EQ[Idx:%d|Qid:%d]\n",
+		fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+		fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid);
+	lpfc_debug_dump_q(qdesc);
+}
+
+/**
+ * lpfc_debug_dump_els_wq - dump all entries from the els work queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the ELS work queue.
+ **/
+static inline void
+lpfc_debug_dump_els_wq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n",
+		phba->sli4_hba.els_wq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.els_wq);
+}
+
+/**
+ * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the MBOX work queue.
+ **/
+static inline void
+lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n",
+		phba->sli4_hba.mbx_wq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
+}
+
+/**
+ * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the receive data queue.
+ **/
+static inline void
+lpfc_debug_dump_dat_rq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "DAT RQ: RQ[Qid:%d]\n",
+		phba->sli4_hba.dat_rq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
+}
+
+/**
+ * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the receive header queue.
+ **/
+static inline void
+lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "HDR RQ: RQ[Qid:%d]\n",
+		phba->sli4_hba.hdr_rq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
+}
+
+/**
+ * lpfc_debug_dump_els_cq - dump all entries from the els complete queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the els complete queue.
+ **/
+static inline void
+lpfc_debug_dump_els_cq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
+		phba->sli4_hba.els_wq->queue_id,
+		phba->sli4_hba.els_cq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.els_cq);
+}
+
+/**
+ * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the mbox complete queue.
+ **/
+static inline void
+lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
+		phba->sli4_hba.mbx_wq->queue_id,
+		phba->sli4_hba.mbx_cq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
+}
+
+/**
+ * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
+ * @phba: Pointer to HBA context object.
+ *
+ * This function dumps all entries from the slow-path event queue.
+ **/
+static inline void
+lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
+{
+	printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
+		"EQ[Qid:%d]:\n",
+		phba->sli4_hba.mbx_wq->queue_id,
+		phba->sli4_hba.els_wq->queue_id,
+		phba->sli4_hba.mbx_cq->queue_id,
+		phba->sli4_hba.els_cq->queue_id,
+		phba->sli4_hba.sp_eq->queue_id);
+	lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
+}
+
+/**
+ * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Work queue identifier.
+ *
+ * This function dumps all entries from a work queue identified by the queue
+ * identifier.
+ **/
+static inline void
+lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
+{
+	int wq_idx;
+
+	for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++)
+		if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
+			break;
+	if (wq_idx < phba->cfg_fcp_wq_count) {
+		printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
+		return;
+	}
+
+	if (phba->sli4_hba.els_wq->queue_id == qid) {
+		printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.els_wq);
+	}
+}
+
+/**
+ * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Mbox work queue identifier.
+ *
+ * This function dumps all entries from a mbox work queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid)
+{
+	if (phba->sli4_hba.mbx_wq->queue_id == qid) {
+		printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
+	}
+}
+
+/**
+ * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Receive queue identifier.
+ *
+ * This function dumps all entries from a receive queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid)
+{
+	if (phba->sli4_hba.hdr_rq->queue_id == qid) {
+		printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.hdr_rq);
+		return;
+	}
+	if (phba->sli4_hba.dat_rq->queue_id == qid) {
+		printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.dat_rq);
+	}
+}
+
+/**
+ * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Complete queue identifier.
+ *
+ * This function dumps all entries from a complete queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
+{
+	int cq_idx = 0;
+
+	do {
+		if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
+			break;
+	} while (++cq_idx < phba->cfg_fcp_eq_count);
+
+	if (cq_idx < phba->cfg_fcp_eq_count) {
+		printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
+		return;
+	}
+
+	if (phba->sli4_hba.els_cq->queue_id == qid) {
+		printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.els_cq);
+		return;
+	}
+
+	if (phba->sli4_hba.mbx_cq->queue_id == qid) {
+		printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
+	}
+}
+
+/**
+ * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id
+ * @phba: Pointer to HBA context object.
+ * @qid: Complete queue identifier.
+ *
+ * This function dumps all entries from an event queue identified by the
+ * queue identifier.
+ **/
+static inline void
+lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
+{
+	int eq_idx;
+
+	for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) {
+		if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid)
+			break;
+	}
+
+	if (eq_idx < phba->cfg_fcp_eq_count) {
+		printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
+		lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]);
+		return;
+	}
+
+	if (phba->sli4_hba.sp_eq->queue_id == qid) {
+		printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
+		lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
+	}
+}
+
+void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3407b39..d54ae19 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -230,27 +230,43 @@
 
 	INIT_LIST_HEAD(&pbuflist->list);
 
-	icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
-	icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
-	icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
-	icmd->un.elsreq64.remoteID = did;	/* DID */
 	if (expectRsp) {
+		icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+		icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+		icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
 		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+
+		icmd->un.elsreq64.remoteID = did;		/* DID */
 		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
 		icmd->ulpTimeout = phba->fc_ratov * 2;
 	} else {
-		icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+		icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+		icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+		icmd->un.xseq64.xmit_els_remoteID = did;	/* DID */
 		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
 	}
 	icmd->ulpBdeCount = 1;
 	icmd->ulpLe = 1;
 	icmd->ulpClass = CLASS3;
 
-	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
-		icmd->un.elsreq64.myID = vport->fc_myDID;
+	/*
+	 * If we have NPIV enabled, we want to send ELS traffic by VPI.
+	 * For SLI4, since the driver controls VPIs we also want to include
+	 * all ELS pt2pt protocol traffic as well.
+	 */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
+		((phba->sli_rev == LPFC_SLI_REV4) &&
+		    (vport->fc_flag & FC_PT2PT))) {
 
-		/* For ELS_REQUEST64_CR, use the VPI by default */
-		icmd->ulpContext = phba->vpi_ids[vport->vpi];
+		if (expectRsp) {
+			icmd->un.elsreq64.myID = vport->fc_myDID;
+
+			/* For ELS_REQUEST64_CR, use the VPI by default */
+			icmd->ulpContext = phba->vpi_ids[vport->vpi];
+		}
+
 		icmd->ulpCt_h = 0;
 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
 		if (elscmd == ELS_CMD_ECHO)
@@ -438,9 +454,10 @@
 	int rc = 0;
 
 	sp = &phba->fc_fabparam;
-	/* move forward in case of SLI4 FC port loopback test */
+	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
-	    !(phba->link_flag & LS_LOOPBACK_MODE)) {
+	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
+	    !(vport->fc_flag & FC_PT2PT)) {
 		ndlp = lpfc_findnode_did(vport, Fabric_DID);
 		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
 			rc = -ENODEV;
@@ -707,14 +724,17 @@
 			lpfc_sli4_unreg_all_rpis(vport);
 			lpfc_mbx_unreg_vpi(vport);
 			spin_lock_irq(shost->host_lock);
-			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-			/*
-			* If VPI is unreged, driver need to do INIT_VPI
-			* before re-registering
-			*/
 			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
 			spin_unlock_irq(shost->host_lock);
 		}
+
+		/*
+		 * For SLI3 and SLI4, the VPI needs to be reregistered in
+		 * response to this fabric parameter change event.
+		 */
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+		spin_unlock_irq(shost->host_lock);
 	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 			/*
@@ -817,6 +837,17 @@
 			mempool_free(mbox, phba->mbox_mem_pool);
 			goto fail;
 		}
+
+		/*
+		 * For SLI4, the VFI/VPI are registered AFTER the
+		 * Nport with the higher WWPN sends the PLOGI with
+		 * an assigned NPortId.
+		 */
+
+		/* not equal */
+		if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
+			lpfc_issue_reg_vfi(vport);
+
 		/* Decrement ndlp reference count indicating that ndlp can be
 		 * safely released when other references to it are done.
 		 */
@@ -2972,7 +3003,7 @@
 			 * ABTS we cannot generate and RRQ.
 			 */
 			lpfc_set_rrq_active(phba, ndlp,
-					 cmdiocb->sli4_xritag, 0, 0);
+					 cmdiocb->sli4_lxritag, 0, 0);
 		}
 		break;
 	case IOSTAT_LOCAL_REJECT:
@@ -3803,10 +3834,11 @@
 	/* Xmit ELS ACC response tag <ulpIoTag> */
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
 			 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
-			 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
+			 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
+			 "fc_flag x%x\n",
 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
-			 ndlp->nlp_rpi);
+			 ndlp->nlp_rpi, vport->fc_flag);
 	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_LOGO_ACC;
@@ -4936,8 +4968,6 @@
 		return 1;
 	}
 
-	did = Fabric_DID;
-
 	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
 		/* For a FLOGI we accept, then if our portname is greater
 		 * then the remote portname we initiate Nport login.
@@ -4976,26 +5006,82 @@
 			spin_lock_irq(shost->host_lock);
 			vport->fc_flag |= FC_PT2PT_PLOGI;
 			spin_unlock_irq(shost->host_lock);
+
+			/* If we have the high WWPN we can assign our own
+			 * myDID; otherwise, we have to WAIT for a PLOGI
+			 * from the remote NPort to find out what it
+			 * will be.
+			 */
+			vport->fc_myDID = PT2PT_LocalID;
 		}
+
+		/*
+		 * The vport state should go to LPFC_FLOGI only
+		 * AFTER we issue a FLOGI, not receive one.
+		 */
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_PT2PT;
 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
 		spin_unlock_irq(shost->host_lock);
+
+		/*
+		 * We temporarily set fc_myDID to make it look like we are
+		 * a Fabric. This is done just so we end up with the right
+		 * did / sid on the FLOGI ACC rsp.
+		 */
+		did = vport->fc_myDID;
+		vport->fc_myDID = Fabric_DID;
+
 	} else {
 		/* Reject this request because invalid parameters */
 		stat.un.b.lsRjtRsvd0 = 0;
 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
 		stat.un.b.vendorUnique = 0;
+
+		/*
+		 * We temporarily set fc_myDID to make it look like we are
+		 * a Fabric. This is done just so we end up with the right
+		 * did / sid on the FLOGI LS_RJT rsp.
+		 */
+		did = vport->fc_myDID;
+		vport->fc_myDID = Fabric_DID;
+
 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
 			NULL);
+
+		/* Now lets put fc_myDID back to what its supposed to be */
+		vport->fc_myDID = did;
+
 		return 1;
 	}
 
 	/* Send back ACC */
 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
 
+	/* Now lets put fc_myDID back to what its supposed to be */
+	vport->fc_myDID = did;
+
+	if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!mbox)
+			goto fail;
+
+		lpfc_config_link(phba, mbox);
+
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->vport = vport;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED) {
+			mempool_free(mbox, phba->mbox_mem_pool);
+			goto fail;
+		}
+	}
+
 	return 0;
+fail:
+	return 1;
 }
 
 /**
@@ -5176,7 +5262,6 @@
 	}
 
 	cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
-	mempool_free(pmb, phba->mbox_mem_pool);
 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
 				     lpfc_max_els_tries, ndlp,
 				     ndlp->nlp_DID, ELS_CMD_ACC);
@@ -5184,8 +5269,10 @@
 	/* Decrement the ndlp reference count from previous mbox command */
 	lpfc_nlp_put(ndlp);
 
-	if (!elsiocb)
+	if (!elsiocb) {
+		mempool_free(pmb, phba->mbox_mem_pool);
 		return;
+	}
 
 	icmd = &elsiocb->iocb;
 	icmd->ulpContext = rxid;
@@ -5202,7 +5289,7 @@
 	rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
 	rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
 	rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
-
+	mempool_free(pmb, phba->mbox_mem_pool);
 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
 			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
@@ -5586,7 +5673,7 @@
 	pcmd += sizeof(uint32_t);
 	els_rrq = (struct RRQ *) pcmd;
 
-	bf_set(rrq_oxid, els_rrq, rrq->xritag);
+	bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
 	bf_set(rrq_rxid, els_rrq, rrq->rxid);
 	bf_set(rrq_did, els_rrq, vport->fc_myDID);
 	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
@@ -7873,7 +7960,9 @@
 			sglq_entry->state = SGL_FREED;
 			spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
-			lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+			lpfc_set_rrq_active(phba, ndlp,
+				sglq_entry->sli4_lxritag,
+				rxid, 1);
 
 			/* Check if TXQ queue needs to be serviced */
 			if (pring->txq_cnt)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index b507536..5bb269e 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -713,6 +713,7 @@
 	int rc;
 
 	set_user_nice(current, -20);
+	current->flags |= PF_NOFREEZE;
 	phba->data_flags = 0;
 
 	while (!kthread_should_stop()) {
@@ -1094,7 +1095,7 @@
 	/* Start discovery by sending a FLOGI. port_state is identically
 	 * LPFC_FLOGI while waiting for FLOGI cmpl
 	 */
-	if (vport->port_state != LPFC_FLOGI)
+	if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
 		lpfc_initial_flogi(vport);
 	return;
 
@@ -2881,9 +2882,14 @@
 	}
 
 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
-		/* For private loop just start discovery and we are done. */
-		if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
-		    !(vport->fc_flag & FC_PUBLIC_LOOP)) {
+		/*
+		 * For private loop or for NPort pt2pt,
+		 * just start discovery and we are done.
+		 */
+		if ((vport->fc_flag & FC_PT2PT) ||
+		    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+		    !(vport->fc_flag & FC_PUBLIC_LOOP))) {
+
 			/* Use loop map to make discovery list */
 			lpfc_disc_list_loopmap(vport);
 			/* Start discovery */
@@ -5490,9 +5496,9 @@
 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
 
 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
-			"0279 lpfc_nlp_release: ndlp:x%p "
+			"0279 lpfc_nlp_release: ndlp:x%p did %x "
 			"usgmap:x%x refcnt:%d\n",
-			(void *)ndlp, ndlp->nlp_usg_map,
+			(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
 			atomic_read(&ndlp->kref.refcount));
 
 	/* remove ndlp from action. */
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5f280b5..41bb1d2 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3374,6 +3374,9 @@
 	WORD5 w5;		/* Header control/status word */
 } XMT_SEQ_FIELDS64;
 
+/* This word is remote ports D_ID for XMIT_ELS_RSP64 */
+#define xmit_els_remoteID xrsqRo
+
 /* IOCB Command template for 64 bit RCV_SEQUENCE64 */
 typedef struct {
 	struct ulp_bde64 rcvBde;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 91f0976..f1946df 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -228,19 +228,15 @@
 #define lpfc_idx_rsrc_rdy_MASK		0x00000001
 #define lpfc_idx_rsrc_rdy_WORD		word0
 #define LPFC_IDX_RSRC_RDY		1
-#define lpfc_xri_rsrc_rdy_SHIFT		1
-#define lpfc_xri_rsrc_rdy_MASK		0x00000001
-#define lpfc_xri_rsrc_rdy_WORD		word0
-#define LPFC_XRI_RSRC_RDY		1
-#define lpfc_rpi_rsrc_rdy_SHIFT		2
+#define lpfc_rpi_rsrc_rdy_SHIFT		1
 #define lpfc_rpi_rsrc_rdy_MASK		0x00000001
 #define lpfc_rpi_rsrc_rdy_WORD		word0
 #define LPFC_RPI_RSRC_RDY		1
-#define lpfc_vpi_rsrc_rdy_SHIFT		3
+#define lpfc_vpi_rsrc_rdy_SHIFT		2
 #define lpfc_vpi_rsrc_rdy_MASK		0x00000001
 #define lpfc_vpi_rsrc_rdy_WORD		word0
 #define LPFC_VPI_RSRC_RDY		1
-#define lpfc_vfi_rsrc_rdy_SHIFT		4
+#define lpfc_vfi_rsrc_rdy_SHIFT		3
 #define lpfc_vfi_rsrc_rdy_MASK		0x00000001
 #define lpfc_vfi_rsrc_rdy_WORD		word0
 #define LPFC_VFI_RSRC_RDY		1
@@ -3299,7 +3295,13 @@
 struct xmit_els_rsp64_wqe {
 	struct ulp_bde64 bde;
 	uint32_t response_payload_len;
-	uint32_t rsvd4;
+	uint32_t word4;
+#define els_rsp64_sid_SHIFT         0
+#define els_rsp64_sid_MASK          0x00FFFFFF
+#define els_rsp64_sid_WORD          word4
+#define els_rsp64_sp_SHIFT          24
+#define els_rsp64_sp_MASK           0x00000001
+#define els_rsp64_sp_WORD           word4
 	struct wqe_did wqe_dest;
 	struct wqe_common wqe_com; /* words 6-11 */
 	uint32_t word12;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9598fdc..411ed48 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -64,8 +64,8 @@
 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
 static int lpfc_setup_endian_order(struct lpfc_hba *);
 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
-static void lpfc_free_sgl_list(struct lpfc_hba *);
-static int lpfc_init_sgl_list(struct lpfc_hba *);
+static void lpfc_free_els_sgl_list(struct lpfc_hba *);
+static void lpfc_init_sgl_list(struct lpfc_hba *);
 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
 static void lpfc_free_active_sgl(struct lpfc_hba *);
 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
@@ -2767,47 +2767,14 @@
 }
 
 /**
- * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine goes through all the scsi buffers in the system and updates the
- * Physical XRIs assigned to the SCSI buffer because these may change after any
- * firmware reset
- *
- * Return codes
- *   0 - successful (for now, it always returns 0)
- **/
-int
-lpfc_scsi_buf_update(struct lpfc_hba *phba)
-{
-	struct lpfc_scsi_buf *sb, *sb_next;
-
-	spin_lock_irq(&phba->hbalock);
-	spin_lock(&phba->scsi_buf_list_lock);
-	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
-		sb->cur_iocbq.sli4_xritag =
-			phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
-		set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
-		phba->sli4_hba.max_cfg_param.xri_used++;
-		phba->sli4_hba.xri_count++;
-	}
-	spin_unlock(&phba->scsi_buf_list_lock);
-	spin_unlock_irq(&phba->hbalock);
-	return 0;
-}
-
-/**
  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is to free all the SCSI buffers and IOCBs from the driver
  * list back to kernel. It is called from lpfc_pci_remove_one to free
  * the internal resources before the device is removed from the system.
- *
- * Return codes
- *   0 - successful (for now, it always returns 0)
  **/
-static int
+static void
 lpfc_scsi_free(struct lpfc_hba *phba)
 {
 	struct lpfc_scsi_buf *sb, *sb_next;
@@ -2833,7 +2800,178 @@
 	}
 
 	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
+	struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
+	uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
+	LIST_HEAD(els_sgl_list);
+	LIST_HEAD(scsi_sgl_list);
+	int rc;
+
+	/*
+	 * update on pci function's els xri-sgl list
+	 */
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+	if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
+		/* els xri-sgl expanded */
+		xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3157 ELS xri-sgl count increased from "
+				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
+				els_xri_cnt);
+		/* allocate the additional els sgls */
+		for (i = 0; i < xri_cnt; i++) {
+			sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
+					     GFP_KERNEL);
+			if (sglq_entry == NULL) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"2562 Failure to allocate an "
+						"ELS sgl entry:%d\n", i);
+				rc = -ENOMEM;
+				goto out_free_mem;
+			}
+			sglq_entry->buff_type = GEN_BUFF_TYPE;
+			sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
+							   &sglq_entry->phys);
+			if (sglq_entry->virt == NULL) {
+				kfree(sglq_entry);
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"2563 Failure to allocate an "
+						"ELS mbuf:%d\n", i);
+				rc = -ENOMEM;
+				goto out_free_mem;
+			}
+			sglq_entry->sgl = sglq_entry->virt;
+			memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
+			sglq_entry->state = SGL_FREED;
+			list_add_tail(&sglq_entry->list, &els_sgl_list);
+		}
+		spin_lock(&phba->hbalock);
+		list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+		spin_unlock(&phba->hbalock);
+	} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
+		/* els xri-sgl shrinked */
+		xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3158 ELS xri-sgl count decreased from "
+				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
+				els_xri_cnt);
+		spin_lock_irq(&phba->hbalock);
+		list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
+		spin_unlock_irq(&phba->hbalock);
+		/* release extra els sgls from list */
+		for (i = 0; i < xri_cnt; i++) {
+			list_remove_head(&els_sgl_list,
+					 sglq_entry, struct lpfc_sglq, list);
+			if (sglq_entry) {
+				lpfc_mbuf_free(phba, sglq_entry->virt,
+					       sglq_entry->phys);
+				kfree(sglq_entry);
+			}
+		}
+		spin_lock_irq(&phba->hbalock);
+		list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+		spin_unlock_irq(&phba->hbalock);
+	} else
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3163 ELS xri-sgl count unchanged: %d\n",
+				els_xri_cnt);
+	phba->sli4_hba.els_xri_cnt = els_xri_cnt;
+
+	/* update xris to els sgls on the list */
+	sglq_entry = NULL;
+	sglq_entry_next = NULL;
+	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+				 &phba->sli4_hba.lpfc_sgl_list, list) {
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2400 Failed to allocate xri for "
+					"ELS sgl\n");
+			rc = -ENOMEM;
+			goto out_free_mem;
+		}
+		sglq_entry->sli4_lxritag = lxri;
+		sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+	}
+
+	/*
+	 * update on pci function's allocated scsi xri-sgl list
+	 */
+	phba->total_scsi_bufs = 0;
+
+	/* maximum number of xris available for scsi buffers */
+	phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
+				      els_xri_cnt;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2401 Current allocated SCSI xri-sgl count:%d, "
+			"maximum  SCSI xri count:%d\n",
+			phba->sli4_hba.scsi_xri_cnt,
+			phba->sli4_hba.scsi_xri_max);
+
+	spin_lock_irq(&phba->scsi_buf_list_lock);
+	list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
+	spin_unlock_irq(&phba->scsi_buf_list_lock);
+
+	if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
+		/* max scsi xri shrinked below the allocated scsi buffers */
+		scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
+					phba->sli4_hba.scsi_xri_max;
+		/* release the extra allocated scsi buffers */
+		for (i = 0; i < scsi_xri_cnt; i++) {
+			list_remove_head(&scsi_sgl_list, psb,
+					 struct lpfc_scsi_buf, list);
+			pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
+				      psb->dma_handle);
+			kfree(psb);
+		}
+		spin_lock_irq(&phba->scsi_buf_list_lock);
+		phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
+		spin_unlock_irq(&phba->scsi_buf_list_lock);
+	}
+
+	/* update xris associated to remaining allocated scsi buffers */
+	psb = NULL;
+	psb_next = NULL;
+	list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2560 Failed to allocate xri for "
+					"scsi buffer\n");
+			rc = -ENOMEM;
+			goto out_free_mem;
+		}
+		psb->cur_iocbq.sli4_lxritag = lxri;
+		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+	}
+	spin_lock(&phba->scsi_buf_list_lock);
+	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
+	spin_unlock(&phba->scsi_buf_list_lock);
+
 	return 0;
+
+out_free_mem:
+	lpfc_free_els_sgl_list(phba);
+	lpfc_scsi_free(phba);
+	return rc;
 }
 
 /**
@@ -4636,18 +4774,15 @@
 	if (rc)
 		goto out_free_bsmbx;
 
-	/* Initialize and populate the iocb list per host */
-	rc = lpfc_init_sgl_list(phba);
-	if (rc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1400 Failed to initialize sgl list.\n");
-		goto out_destroy_cq_event_pool;
-	}
+	/* Initialize sgl lists per host */
+	lpfc_init_sgl_list(phba);
+
+	/* Allocate and initialize active sgl array */
 	rc = lpfc_init_active_sgl_array(phba);
 	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1430 Failed to initialize sgl list.\n");
-		goto out_free_sgl_list;
+		goto out_destroy_cq_event_pool;
 	}
 	rc = lpfc_sli4_init_rpi_hdrs(phba);
 	if (rc) {
@@ -4722,8 +4857,6 @@
 	lpfc_sli4_remove_rpi_hdrs(phba);
 out_free_active_sgl:
 	lpfc_free_active_sgl(phba);
-out_free_sgl_list:
-	lpfc_free_sgl_list(phba);
 out_destroy_cq_event_pool:
 	lpfc_sli4_cq_event_pool_destroy(phba);
 out_free_bsmbx:
@@ -4760,10 +4893,7 @@
 
 	/* Free the ELS sgl list */
 	lpfc_free_active_sgl(phba);
-	lpfc_free_sgl_list(phba);
-
-	/* Free the SCSI sgl management array */
-	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+	lpfc_free_els_sgl_list(phba);
 
 	/* Free the completion queue EQ event pool */
 	lpfc_sli4_cq_event_release_all(phba);
@@ -4990,29 +5120,42 @@
 }
 
 /**
- * lpfc_free_sgl_list - Free sgl list.
+ * lpfc_free_sgl_list - Free a given sgl list.
  * @phba: pointer to lpfc hba data structure.
+ * @sglq_list: pointer to the head of sgl list.
  *
- * This routine is invoked to free the driver's sgl list and memory.
+ * This routine is invoked to free a give sgl list and memory.
  **/
-static void
-lpfc_free_sgl_list(struct lpfc_hba *phba)
+void
+lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
 {
 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+
+	list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
+		list_del(&sglq_entry->list);
+		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
+		kfree(sglq_entry);
+	}
+}
+
+/**
+ * lpfc_free_els_sgl_list - Free els sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's els sgl list and memory.
+ **/
+static void
+lpfc_free_els_sgl_list(struct lpfc_hba *phba)
+{
 	LIST_HEAD(sglq_list);
 
+	/* Retrieve all els sgls from driver list */
 	spin_lock_irq(&phba->hbalock);
 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
 	spin_unlock_irq(&phba->hbalock);
 
-	list_for_each_entry_safe(sglq_entry, sglq_next,
-				 &sglq_list, list) {
-		list_del(&sglq_entry->list);
-		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
-		kfree(sglq_entry);
-		phba->sli4_hba.total_sglq_bufs--;
-	}
-	kfree(phba->sli4_hba.lpfc_els_sgl_array);
+	/* Now free the sgl list */
+	lpfc_free_sgl_list(phba, &sglq_list);
 }
 
 /**
@@ -5057,99 +5200,19 @@
  * This routine is invoked to allocate and initizlize the driver's sgl
  * list and set up the sgl xritag tag array accordingly.
  *
- * Return codes
- *	0 - successful
- *	other values - error
  **/
-static int
+static void
 lpfc_init_sgl_list(struct lpfc_hba *phba)
 {
-	struct lpfc_sglq *sglq_entry = NULL;
-	int i;
-	int els_xri_cnt;
-
-	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-				"2400 ELS XRI count %d.\n",
-				els_xri_cnt);
 	/* Initialize and populate the sglq list per host/VF. */
 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
 	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
 
-	/* Sanity check on XRI management */
-	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"2562 No room left for SCSI XRI allocation: "
-				"max_xri=%d, els_xri=%d\n",
-				phba->sli4_hba.max_cfg_param.max_xri,
-				els_xri_cnt);
-		return -ENOMEM;
-	}
+	/* els xri-sgl book keeping */
+	phba->sli4_hba.els_xri_cnt = 0;
 
-	/* Allocate memory for the ELS XRI management array */
-	phba->sli4_hba.lpfc_els_sgl_array =
-			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
-			GFP_KERNEL);
-
-	if (!phba->sli4_hba.lpfc_els_sgl_array) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"2401 Failed to allocate memory for ELS "
-				"XRI management array of size %d.\n",
-				els_xri_cnt);
-		return -ENOMEM;
-	}
-
-	/* Keep the SCSI XRI into the XRI management array */
-	phba->sli4_hba.scsi_xri_max =
-			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+	/* scsi xri-buffer book keeping */
 	phba->sli4_hba.scsi_xri_cnt = 0;
-	phba->sli4_hba.lpfc_scsi_psb_array =
-			kzalloc((sizeof(struct lpfc_scsi_buf *) *
-			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
-
-	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"2563 Failed to allocate memory for SCSI "
-				"XRI management array of size %d.\n",
-				phba->sli4_hba.scsi_xri_max);
-		kfree(phba->sli4_hba.lpfc_els_sgl_array);
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < els_xri_cnt; i++) {
-		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
-		if (sglq_entry == NULL) {
-			printk(KERN_ERR "%s: only allocated %d sgls of "
-				"expected %d count. Unloading driver.\n",
-				__func__, i, els_xri_cnt);
-			goto out_free_mem;
-		}
-
-		sglq_entry->buff_type = GEN_BUFF_TYPE;
-		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
-		if (sglq_entry->virt == NULL) {
-			kfree(sglq_entry);
-			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
-				"Unloading driver.\n", __func__);
-			goto out_free_mem;
-		}
-		sglq_entry->sgl = sglq_entry->virt;
-		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
-
-		/* The list order is used by later block SGL registraton */
-		spin_lock_irq(&phba->hbalock);
-		sglq_entry->state = SGL_FREED;
-		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
-		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
-		phba->sli4_hba.total_sglq_bufs++;
-		spin_unlock_irq(&phba->hbalock);
-	}
-	return 0;
-
-out_free_mem:
-	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
-	lpfc_free_sgl_list(phba);
-	return -ENOMEM;
 }
 
 /**
@@ -7320,9 +7383,11 @@
 					phba->sli4_hba.u.if_type2.ERR2regaddr);
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"2890 Port error detected during port "
-					"reset(%d): port status reg 0x%x, "
+					"reset(%d): wait_tmo:%d ms, "
+					"port status reg 0x%x, "
 					"error 1=0x%x, error 2=0x%x\n",
-					num_resets, reg_data.word0,
+					num_resets, rdy_chk*10,
+					reg_data.word0,
 					phba->work_status[0],
 					phba->work_status[1]);
 				rc = -ENODEV;
@@ -8694,8 +8759,11 @@
 	/* Release all the vports against this physical port */
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+				continue;
 			fc_vport_terminate(vports[i]->fc_vport);
+		}
 	lpfc_destroy_vport_work_array(phba, vports);
 
 	/* Remove FC host and then SCSI host with the physical port */
@@ -9115,8 +9183,12 @@
 			return 50;
 		else if (max_xri <= 1024)
 			return 100;
-		else
+		else if (max_xri <= 1536)
 			return 150;
+		else if (max_xri <= 2048)
+			return 200;
+		else
+			return 250;
 	} else
 		return 0;
 }
@@ -9455,8 +9527,11 @@
 	/* Release all the vports against this physical port */
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
-		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+				continue;
 			fc_vport_terminate(vports[i]->fc_vport);
+		}
 	lpfc_destroy_vport_work_array(phba, vports);
 
 	/* Remove FC host and then SCSI host with the physical port */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 15ca2a9a..9133a97 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -367,8 +367,10 @@
 		return 1;
 	}
 
+	/* Check for Nport to NPort pt2pt protocol */
 	if ((vport->fc_flag & FC_PT2PT) &&
 	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+
 		/* rcv'ed PLOGI decides what our NPortId will be */
 		vport->fc_myDID = icmd->un.rcvels.parmRo;
 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -382,6 +384,13 @@
 			mempool_free(mbox, phba->mbox_mem_pool);
 			goto out;
 		}
+		/*
+		 * For SLI4, the VFI/VPI are registered AFTER the
+		 * Nport with the higher WWPN sends us a PLOGI with
+		 * our assigned NPortId.
+		 */
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_issue_reg_vfi(vport);
 
 		lpfc_can_disctmo(vport);
 	}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 88f3a83..66e0906 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -399,6 +399,14 @@
 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
 	num_cmd_success = atomic_read(&phba->num_cmd_success);
 
+	/*
+	 * The error and success command counters are global per
+	 * driver instance.  If another handler has already
+	 * operated on this error event, just exit.
+	 */
+	if (num_rsrc_err == 0)
+		return;
+
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -688,7 +696,8 @@
 			rrq_empty = list_empty(&phba->active_rrq_list);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			if (ndlp) {
-				lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+				lpfc_set_rrq_active(phba, ndlp,
+					psb->cur_iocbq.sli4_lxritag, rxid, 1);
 				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
 			}
 			lpfc_release_scsi_buf_s4(phba, psb);
@@ -718,11 +727,137 @@
 }
 
 /**
- * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
+ * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
+ * @phba: pointer to lpfc hba data structure.
+ * @post_sblist: pointer to the scsi buffer list.
+ *
+ * This routine walks a list of scsi buffers that was passed in. It attempts
+ * to construct blocks of scsi buffer sgls which contains contiguous xris and
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
+ * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
+ * embedded SGL post mailbox command for posting. The @post_sblist passed in
+ * must be local list, thus no lock is needed when manipulate the list.
+ *
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
+ **/
+int
+lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
+			     struct list_head *post_sblist, int sb_count)
+{
+	struct lpfc_scsi_buf *psb, *psb_next;
+	int status;
+	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
+	dma_addr_t pdma_phys_bpl1;
+	int last_xritag = NO_XRI;
+	LIST_HEAD(prep_sblist);
+	LIST_HEAD(blck_sblist);
+	LIST_HEAD(scsi_sblist);
+
+	/* sanity check */
+	if (sb_count <= 0)
+		return -EINVAL;
+
+	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
+		list_del_init(&psb->list);
+		block_cnt++;
+		if ((last_xritag != NO_XRI) &&
+		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
+			/* a hole in xri block, form a sgl posting block */
+			list_splice_init(&prep_sblist, &blck_sblist);
+			post_cnt = block_cnt - 1;
+			/* prepare list for next posting block */
+			list_add_tail(&psb->list, &prep_sblist);
+			block_cnt = 1;
+		} else {
+			/* prepare list for next posting block */
+			list_add_tail(&psb->list, &prep_sblist);
+			/* enough sgls for non-embed sgl mbox command */
+			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+				list_splice_init(&prep_sblist, &blck_sblist);
+				post_cnt = block_cnt;
+				block_cnt = 0;
+			}
+		}
+		num_posting++;
+		last_xritag = psb->cur_iocbq.sli4_xritag;
+
+		/* end of repost sgl list condition for SCSI buffers */
+		if (num_posting == sb_count) {
+			if (post_cnt == 0) {
+				/* last sgl posting block */
+				list_splice_init(&prep_sblist, &blck_sblist);
+				post_cnt = block_cnt;
+			} else if (block_cnt == 1) {
+				/* last single sgl with non-contiguous xri */
+				if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+					pdma_phys_bpl1 = psb->dma_phys_bpl +
+								SGL_PAGE_SIZE;
+				else
+					pdma_phys_bpl1 = 0;
+				status = lpfc_sli4_post_sgl(phba,
+						psb->dma_phys_bpl,
+						pdma_phys_bpl1,
+						psb->cur_iocbq.sli4_xritag);
+				if (status) {
+					/* failure, put on abort scsi list */
+					psb->exch_busy = 1;
+				} else {
+					/* success, put on SCSI buffer list */
+					psb->exch_busy = 0;
+					psb->status = IOSTAT_SUCCESS;
+					num_posted++;
+				}
+				/* success, put on SCSI buffer sgl list */
+				list_add_tail(&psb->list, &scsi_sblist);
+			}
+		}
+
+		/* continue until a nembed page worth of sgls */
+		if (post_cnt == 0)
+			continue;
+
+		/* post block of SCSI buffer list sgls */
+		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
+						       post_cnt);
+
+		/* don't reset xirtag due to hole in xri block */
+		if (block_cnt == 0)
+			last_xritag = NO_XRI;
+
+		/* reset SCSI buffer post count for next round of posting */
+		post_cnt = 0;
+
+		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
+		while (!list_empty(&blck_sblist)) {
+			list_remove_head(&blck_sblist, psb,
+					 struct lpfc_scsi_buf, list);
+			if (status) {
+				/* failure, put on abort scsi list */
+				psb->exch_busy = 1;
+			} else {
+				/* success, put on SCSI buffer list */
+				psb->exch_busy = 0;
+				psb->status = IOSTAT_SUCCESS;
+				num_posted++;
+			}
+			list_add_tail(&psb->list, &scsi_sblist);
+		}
+	}
+	/* Push SCSI buffers with sgl posted to the availble list */
+	while (!list_empty(&scsi_sblist)) {
+		list_remove_head(&scsi_sblist, psb,
+				 struct lpfc_scsi_buf, list);
+		lpfc_release_scsi_buf_s4(phba, psb);
+	}
+	return num_posted;
+}
+
+/**
+ * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine walks the list of scsi buffers that have been allocated and
- * repost them to the HBA by using SGL block post. This is needed after a
+ * repost them to the port by using SGL block post. This is needed after a
  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
  * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
  * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
@@ -732,57 +867,21 @@
 int
 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
 {
-	struct lpfc_scsi_buf *psb;
-	int index, status, bcnt = 0, rcnt = 0, rc = 0;
-	LIST_HEAD(sblist);
+	LIST_HEAD(post_sblist);
+	int num_posted, rc = 0;
 
-	for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
-		psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
-		if (psb) {
-			/* Remove from SCSI buffer list */
-			list_del(&psb->list);
-			/* Add it to a local SCSI buffer list */
-			list_add_tail(&psb->list, &sblist);
-			if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
-				bcnt = rcnt;
-				rcnt = 0;
-			}
-		} else
-			/* A hole present in the XRI array, need to skip */
-			bcnt = rcnt;
+	/* get all SCSI buffers need to repost to a local list */
+	spin_lock(&phba->scsi_buf_list_lock);
+	list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
+	spin_unlock(&phba->scsi_buf_list_lock);
 
-		if (index == phba->sli4_hba.scsi_xri_cnt - 1)
-			/* End of XRI array for SCSI buffer, complete */
-			bcnt = rcnt;
-
-		/* Continue until collect up to a nembed page worth of sgls */
-		if (bcnt == 0)
-			continue;
-		/* Now, post the SCSI buffer list sgls as a block */
-		if (!phba->sli4_hba.extents_in_use)
-			status = lpfc_sli4_post_scsi_sgl_block(phba,
-							&sblist,
-							bcnt);
-		else
-			status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
-							&sblist,
-							bcnt);
-		/* Reset SCSI buffer count for next round of posting */
-		bcnt = 0;
-		while (!list_empty(&sblist)) {
-			list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
-					 list);
-			if (status) {
-				/* Put this back on the abort scsi list */
-				psb->exch_busy = 1;
-				rc++;
-			} else {
-				psb->exch_busy = 0;
-				psb->status = IOSTAT_SUCCESS;
-			}
-			/* Put it back into the SCSI buffer list */
-			lpfc_release_scsi_buf_s4(phba, psb);
-		}
+	/* post the list of scsi buffer sgls to port if available */
+	if (!list_empty(&post_sblist)) {
+		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
+						phba->sli4_hba.scsi_xri_cnt);
+		/* failed to post any scsi buffer, return error */
+		if (num_posted == 0)
+			rc = -EIO;
 	}
 	return rc;
 }
@@ -792,12 +891,13 @@
  * @vport: The virtual port for which this call being executed.
  * @num_to_allocate: The requested number of buffers to allocate.
  *
- * This routine allocates a scsi buffer for device with SLI-4 interface spec,
+ * This routine allocates scsi buffers for device with SLI-4 interface spec,
  * the scsi buffer contains all the necessary information needed to initiate
- * a SCSI I/O.
+ * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
+ * them on a list, it post them to the port by using SGL block post.
  *
  * Return codes:
- *   int - number of scsi buffers that were allocated.
+ *   int - number of scsi buffers that were allocated and posted.
  *   0 = failure, less than num_to_alloc is a partial failure.
  **/
 static int
@@ -810,22 +910,21 @@
 	dma_addr_t pdma_phys_fcp_cmd;
 	dma_addr_t pdma_phys_fcp_rsp;
 	dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
-	uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
-	int status = 0, index;
-	int bcnt;
-	int non_sequential_xri = 0;
-	LIST_HEAD(sblist);
+	uint16_t iotag, lxri = 0;
+	int bcnt, num_posted;
+	LIST_HEAD(prep_sblist);
+	LIST_HEAD(post_sblist);
+	LIST_HEAD(scsi_sblist);
 
 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
 		if (!psb)
 			break;
-
 		/*
-		 * Get memory from the pci pool to map the virt space to pci bus
-		 * space for an I/O.  The DMA buffer includes space for the
-		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
-		 * necessary to support the sg_tablesize.
+		 * Get memory from the pci pool to map the virt space to
+		 * pci bus space for an I/O. The DMA buffer includes space
+		 * for the struct fcp_cmnd, struct fcp_rsp and the number
+		 * of bde's necessary to support the sg_tablesize.
 		 */
 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
 						GFP_KERNEL, &psb->dma_handle);
@@ -833,8 +932,6 @@
 			kfree(psb);
 			break;
 		}
-
-		/* Initialize virtual ptrs to dma_buf region. */
 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
 		/* Allocate iotag for psb->cur_iocbq. */
@@ -855,16 +952,7 @@
 		}
 		psb->cur_iocbq.sli4_lxritag = lxri;
 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
-		if (last_xritag != NO_XRI
-			&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
-			non_sequential_xri = 1;
-		} else
-			list_add_tail(&psb->list, &sblist);
-		last_xritag = psb->cur_iocbq.sli4_xritag;
-
-		index = phba->sli4_hba.scsi_xri_cnt++;
 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
-
 		psb->fcp_bpl = psb->data;
 		psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
 			- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
@@ -880,9 +968,9 @@
 		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
 
 		/*
-		 * The first two bdes are the FCP_CMD and FCP_RSP.  The balance
-		 * are sg list bdes.  Initialize the first two and leave the
-		 * rest for queuecommand.
+		 * The first two bdes are the FCP_CMD and FCP_RSP.
+		 * The balance are sg list bdes. Initialize the
+		 * first two and leave the rest for queuecommand.
 		 */
 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
@@ -917,62 +1005,31 @@
 		iocb->ulpBdeCount = 1;
 		iocb->ulpLe = 1;
 		iocb->ulpClass = CLASS3;
-		psb->cur_iocbq.context1  = psb;
+		psb->cur_iocbq.context1 = psb;
 		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
 			pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
 		else
 			pdma_phys_bpl1 = 0;
 		psb->dma_phys_bpl = pdma_phys_bpl;
-		phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
-		if (non_sequential_xri) {
-			status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
-						pdma_phys_bpl1,
-						psb->cur_iocbq.sli4_xritag);
-			if (status) {
-				/* Put this back on the abort scsi list */
-				psb->exch_busy = 1;
-			} else {
-				psb->exch_busy = 0;
-				psb->status = IOSTAT_SUCCESS;
-			}
-			/* Put it back into the SCSI buffer list */
-			lpfc_release_scsi_buf_s4(phba, psb);
-			break;
-		}
-	}
-	if (bcnt) {
-		if (!phba->sli4_hba.extents_in_use)
-			status = lpfc_sli4_post_scsi_sgl_block(phba,
-								&sblist,
-								bcnt);
-		else
-			status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
-								&sblist,
-								bcnt);
 
-		if (status) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-					"3021 SCSI SGL post error %d\n",
-					status);
-			bcnt = 0;
-		}
-		/* Reset SCSI buffer count for next round of posting */
-		while (!list_empty(&sblist)) {
-			list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
-				 list);
-			if (status) {
-				/* Put this back on the abort scsi list */
-				psb->exch_busy = 1;
-			} else {
-				psb->exch_busy = 0;
-				psb->status = IOSTAT_SUCCESS;
-			}
-			/* Put it back into the SCSI buffer list */
-			lpfc_release_scsi_buf_s4(phba, psb);
-		}
+		/* add the scsi buffer to a post list */
+		list_add_tail(&psb->list, &post_sblist);
+		spin_lock_irq(&phba->scsi_buf_list_lock);
+		phba->sli4_hba.scsi_xri_cnt++;
+		spin_unlock_irq(&phba->scsi_buf_list_lock);
 	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
+			"3021 Allocate %d out of %d requested new SCSI "
+			"buffers\n", bcnt, num_to_alloc);
 
-	return bcnt + non_sequential_xri;
+	/* post the list of scsi buffer sgls to port if available */
+	if (!list_empty(&post_sblist))
+		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
+							  &post_sblist, bcnt);
+	else
+		num_posted = 0;
+
+	return num_posted;
 }
 
 /**
@@ -1043,7 +1100,7 @@
 	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
 							list) {
 		if (lpfc_test_rrq_active(phba, ndlp,
-					 lpfc_cmd->cur_iocbq.sli4_xritag))
+					 lpfc_cmd->cur_iocbq.sli4_lxritag))
 			continue;
 		list_del(&lpfc_cmd->list);
 		found = 1;
@@ -1897,7 +1954,9 @@
 	dma_addr_t physaddr;
 	int i = 0, num_bde = 0, status;
 	int datadir = sc->sc_data_direction;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	uint32_t rc;
+#endif
 	uint32_t checking = 1;
 	uint32_t reftag;
 	unsigned blksize;
@@ -2034,7 +2093,9 @@
 	int datadir = sc->sc_data_direction;
 	unsigned char pgdone = 0, alldone = 0;
 	unsigned blksize;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	uint32_t rc;
+#endif
 	uint32_t checking = 1;
 	uint32_t reftag;
 	uint8_t txop, rxop;
@@ -2253,7 +2314,9 @@
 	uint32_t reftag;
 	unsigned blksize;
 	uint8_t txop, rxop;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	uint32_t rc;
+#endif
 	uint32_t checking = 1;
 	uint32_t dma_len;
 	uint32_t dma_offset = 0;
@@ -2383,7 +2446,9 @@
 	uint32_t reftag;
 	uint8_t txop, rxop;
 	uint32_t dma_len;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	uint32_t rc;
+#endif
 	uint32_t checking = 1;
 	uint32_t dma_offset = 0;
 	int num_sge = 0;
@@ -3604,11 +3669,16 @@
 			logit = LOG_FCP | LOG_FCP_UNDER;
 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
 			 "9030 FCP cmd x%x failed <%d/%d> "
-			 "status: x%x result: x%x Data: x%x x%x\n",
+			 "status: x%x result: x%x "
+			 "sid: x%x did: x%x oxid: x%x "
+			 "Data: x%x x%x\n",
 			 cmd->cmnd[0],
 			 cmd->device ? cmd->device->id : 0xffff,
 			 cmd->device ? cmd->device->lun : 0xffff,
 			 lpfc_cmd->status, lpfc_cmd->result,
+			 vport->fc_myDID, pnode->nlp_DID,
+			 phba->sli_rev == LPFC_SLI_REV4 ?
+			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
 			 pIocbOut->iocb.ulpContext,
 			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
 
@@ -3689,8 +3759,8 @@
 				 * ABTS we cannot generate and RRQ.
 				 */
 				lpfc_set_rrq_active(phba, pnode,
-						lpfc_cmd->cur_iocbq.sli4_xritag,
-						0, 0);
+					lpfc_cmd->cur_iocbq.sli4_lxritag,
+					0, 0);
 			}
 		/* else: fall through */
 		default:
@@ -4348,8 +4418,20 @@
 	ret = fc_block_scsi_eh(cmnd);
 	if (ret)
 		return ret;
+
+	spin_lock_irq(&phba->hbalock);
+	/* driver queued commands are in process of being flushed */
+	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			"3168 SCSI Layer abort requested I/O has been "
+			"flushed by LLD.\n");
+		return FAILED;
+	}
+
 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
 	if (!lpfc_cmd) {
+		spin_unlock_irq(&phba->hbalock);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
 			 "x%x ID %d LUN %d\n",
@@ -4357,23 +4439,34 @@
 		return SUCCESS;
 	}
 
+	iocb = &lpfc_cmd->cur_iocbq;
+	/* the command is in process of being cancelled */
+	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			"3169 SCSI Layer abort requested I/O has been "
+			"cancelled by LLD.\n");
+		return FAILED;
+	}
 	/*
 	 * If pCmd field of the corresponding lpfc_scsi_buf structure
 	 * points to a different SCSI command, then the driver has
 	 * already completed this command, but the midlayer did not
-	 * see the completion before the eh fired.  Just return
-	 * SUCCESS.
+	 * see the completion before the eh fired. Just return SUCCESS.
 	 */
-	iocb = &lpfc_cmd->cur_iocbq;
-	if (lpfc_cmd->pCmd != cmnd)
-		goto out;
+	if (lpfc_cmd->pCmd != cmnd) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			"3170 SCSI Layer abort requested I/O has been "
+			"completed by LLD.\n");
+		goto out_unlock;
+	}
 
 	BUG_ON(iocb->context1 != lpfc_cmd);
 
-	abtsiocb = lpfc_sli_get_iocbq(phba);
+	abtsiocb = __lpfc_sli_get_iocbq(phba);
 	if (abtsiocb == NULL) {
 		ret = FAILED;
-		goto out;
+		goto out_unlock;
 	}
 
 	/*
@@ -4405,6 +4498,9 @@
 
 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
 	abtsiocb->vport = vport;
+	/* no longer need the lock after this point */
+	spin_unlock_irq(&phba->hbalock);
+
 	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
 	    IOCB_ERROR) {
 		lpfc_sli_release_iocbq(phba, abtsiocb);
@@ -4421,10 +4517,7 @@
 	wait_event_timeout(waitq,
 			  (lpfc_cmd->pCmd != cmnd),
 			   (2*vport->cfg_devloss_tmo*HZ));
-
-	spin_lock_irq(shost->host_lock);
 	lpfc_cmd->waitq = NULL;
-	spin_unlock_irq(shost->host_lock);
 
 	if (lpfc_cmd->pCmd == cmnd) {
 		ret = FAILED;
@@ -4434,8 +4527,11 @@
 				 "LUN %d\n",
 				 ret, cmnd->device->id, cmnd->device->lun);
 	}
+	goto out;
 
- out:
+out_unlock:
+	spin_unlock_irq(&phba->hbalock);
+out:
 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
 			 "LUN %d\n", ret, cmnd->device->id,
@@ -4863,6 +4959,43 @@
 }
 
 /**
+ * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does host reset to the adaptor port. It brings the HBA
+ * offline, performs a board restart, and then brings the board back online.
+ * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
+ * reject all outstanding SCSI commands to the host and error returned
+ * back to SCSI mid-level. As this will be SCSI mid-level's last resort
+ * of error handling, it will only return error if resetting of the adapter
+ * is not successful; in all other cases, will return success.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	int rc, ret = SUCCESS;
+
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	rc = lpfc_sli_brdrestart(phba);
+	if (rc)
+		ret = FAILED;
+	lpfc_online(phba);
+	lpfc_unblock_mgmt_io(phba);
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+			"3172 SCSI layer issued Host Reset Data: x%x\n", ret);
+	return ret;
+}
+
+/**
  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
  * @sdev: Pointer to scsi_device.
  *
@@ -4994,6 +5127,7 @@
 	.eh_device_reset_handler = lpfc_device_reset_handler,
 	.eh_target_reset_handler = lpfc_target_reset_handler,
 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
+	.eh_host_reset_handler  = lpfc_host_reset_handler,
 	.slave_alloc		= lpfc_slave_alloc,
 	.slave_configure	= lpfc_slave_configure,
 	.slave_destroy		= lpfc_slave_destroy,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index dbaf5b9..b4720a1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -67,6 +67,8 @@
 				      struct hbq_dmabuf *);
 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
 				    struct lpfc_cqe *);
+static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
+				       int);
 
 static IOCB_t *
 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -500,7 +502,7 @@
  * allocation is successful, it returns pointer to the newly
  * allocated iocb object else it returns NULL.
  **/
-static struct lpfc_iocbq *
+struct lpfc_iocbq *
 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 {
 	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
@@ -875,6 +877,9 @@
 	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
 			!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
 		ndlp = piocbq->context_un.ndlp;
+	else  if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
+			(piocbq->iocb_flag & LPFC_IO_LIBDFC))
+		ndlp = piocbq->context_un.ndlp;
 	else
 		ndlp = piocbq->context1;
 
@@ -883,7 +888,7 @@
 	while (!found) {
 		if (!sglq)
 			return NULL;
-		if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+		if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
 			/* This xri has an rrq outstanding for this DID.
 			 * put it back in the list and get another xri.
 			 */
@@ -1257,7 +1262,7 @@
 			struct lpfc_iocbq *piocb)
 {
 	list_add_tail(&piocb->list, &pring->txcmplq);
-	piocb->iocb_flag |= LPFC_IO_ON_Q;
+	piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
 	pring->txcmplq_cnt++;
 	if (pring->txcmplq_cnt > pring->txcmplq_max)
 		pring->txcmplq_max = pring->txcmplq_cnt;
@@ -2556,9 +2561,9 @@
 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
 		list_del_init(&cmd_iocb->list);
-		if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
+		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
 			pring->txcmplq_cnt--;
-			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
 		}
 		return cmd_iocb;
 	}
@@ -2591,14 +2596,14 @@
 
 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
-		list_del_init(&cmd_iocb->list);
-		if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
-			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+			/* remove from txcmpl queue list */
+			list_del_init(&cmd_iocb->list);
+			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
 			pring->txcmplq_cnt--;
+			return cmd_iocb;
 		}
-		return cmd_iocb;
 	}
-
 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0372 iotag x%x is out off range: max iotag (x%x)\n",
 			iotag, phba->sli.last_iotag);
@@ -3466,6 +3471,9 @@
 	/* Retrieve everything on the txcmplq */
 	list_splice_init(&pring->txcmplq, &txcmplq);
 	pring->txcmplq_cnt = 0;
+
+	/* Indicate the I/O queues are flushed */
+	phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Flush the txq */
@@ -3877,6 +3885,7 @@
 {
 	struct lpfc_sli *psli = &phba->sli;
 	uint16_t cfg_value;
+	int rc;
 
 	/* Reset HBA */
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3905,12 +3914,12 @@
 
 	/* Perform FCoE PCI function reset */
 	lpfc_sli4_queue_destroy(phba);
-	lpfc_pci_function_reset(phba);
+	rc = lpfc_pci_function_reset(phba);
 
 	/* Restore PCI cmd register */
 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
 
-	return 0;
+	return rc;
 }
 
 /**
@@ -4002,6 +4011,7 @@
 {
 	struct lpfc_sli *psli = &phba->sli;
 	uint32_t hba_aer_enabled;
+	int rc;
 
 	/* Restart HBA */
 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -4011,7 +4021,7 @@
 	/* Take PCIe device Advanced Error Reporting (AER) state */
 	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
 
-	lpfc_sli4_brdreset(phba);
+	rc = lpfc_sli4_brdreset(phba);
 
 	spin_lock_irq(&phba->hbalock);
 	phba->pport->stopped = 0;
@@ -4028,7 +4038,7 @@
 
 	lpfc_hba_down_post(phba);
 
-	return 0;
+	return rc;
 }
 
 /**
@@ -4967,7 +4977,12 @@
 			      &rsrc_info->u.rsp);
 	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
 			     &rsrc_info->u.rsp);
- err_exit:
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"3162 Retrieved extents type-%d from port: count:%d, "
+			"size:%d\n", type, *extnt_count, *extnt_size);
+
+err_exit:
 	mempool_free(mbox, phba->mbox_mem_pool);
 	return rc;
 }
@@ -5051,7 +5066,7 @@
  *   0: if successful
  **/
 static int
-lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
 			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
 {
 	int rc = 0;
@@ -5060,7 +5075,7 @@
 	uint32_t alloc_len, mbox_tmo;
 
 	/* Calculate the total requested length of the dma memory */
-	req_len = *extnt_cnt * sizeof(uint16_t);
+	req_len = extnt_cnt * sizeof(uint16_t);
 
 	/*
 	 * Calculate the size of an embedded mailbox.  The uint32_t
@@ -5075,7 +5090,7 @@
 	 */
 	*emb = LPFC_SLI4_MBX_EMBED;
 	if (req_len > emb_len) {
-		req_len = *extnt_cnt * sizeof(uint16_t) +
+		req_len = extnt_cnt * sizeof(uint16_t) +
 			sizeof(union lpfc_sli4_cfg_shdr) +
 			sizeof(uint32_t);
 		*emb = LPFC_SLI4_MBX_NEMBED;
@@ -5091,7 +5106,7 @@
 			"size (x%x)\n", alloc_len, req_len);
 		return -ENOMEM;
 	}
-	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
 	if (unlikely(rc))
 		return -EIO;
 
@@ -5149,17 +5164,15 @@
 		return -ENOMEM;
 	}
 
-	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
-			"2903 Available Resource Extents "
-			"for resource type 0x%x: Count: 0x%x, "
-			"Size 0x%x\n", type, rsrc_cnt,
-			rsrc_size);
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
+			"2903 Post resource extents type-0x%x: "
+			"count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
 
 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mbox)
 		return -ENOMEM;
 
-	rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
+	rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
 	if (unlikely(rc)) {
 		rc = -EIO;
 		goto err_exit;
@@ -5250,6 +5263,7 @@
 			rc = -ENOMEM;
 			goto err_exit;
 		}
+		phba->sli4_hba.max_cfg_param.xri_used = 0;
 		phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
 						 sizeof(uint16_t),
 						 GFP_KERNEL);
@@ -5420,7 +5434,6 @@
 	case LPFC_RSC_TYPE_FCOE_XRI:
 		kfree(phba->sli4_hba.xri_bmask);
 		kfree(phba->sli4_hba.xri_ids);
-		bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
 				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
 			list_del_init(&rsrc_blk->list);
@@ -5612,7 +5625,6 @@
 			goto free_vpi_ids;
 		}
 		phba->sli4_hba.max_cfg_param.xri_used = 0;
-		phba->sli4_hba.xri_count = 0;
 		phba->sli4_hba.xri_ids = kzalloc(count *
 						 sizeof(uint16_t),
 						 GFP_KERNEL);
@@ -5694,7 +5706,6 @@
 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
 		kfree(phba->sli4_hba.xri_bmask);
 		kfree(phba->sli4_hba.xri_ids);
-		bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
 		kfree(phba->sli4_hba.vfi_bmask);
 		kfree(phba->sli4_hba.vfi_ids);
 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
@@ -5853,6 +5864,149 @@
 }
 
 /**
+ * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of els buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. It attempts to construct blocks
+ * of els buffer sgls which contains contiguous xris and uses the non-embedded
+ * SGL block post mailbox commands to post them to the port. For single els
+ * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
+ * mailbox command for posting.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+static int
+lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry = NULL;
+	struct lpfc_sglq *sglq_entry_next = NULL;
+	struct lpfc_sglq *sglq_entry_first = NULL;
+	int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+	int last_xritag = NO_XRI;
+	LIST_HEAD(prep_sgl_list);
+	LIST_HEAD(blck_sgl_list);
+	LIST_HEAD(allc_sgl_list);
+	LIST_HEAD(post_sgl_list);
+	LIST_HEAD(free_sgl_list);
+
+	spin_lock(&phba->hbalock);
+	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
+	spin_unlock(&phba->hbalock);
+
+	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+				 &allc_sgl_list, list) {
+		list_del_init(&sglq_entry->list);
+		block_cnt++;
+		if ((last_xritag != NO_XRI) &&
+		    (sglq_entry->sli4_xritag != last_xritag + 1)) {
+			/* a hole in xri block, form a sgl posting block */
+			list_splice_init(&prep_sgl_list, &blck_sgl_list);
+			post_cnt = block_cnt - 1;
+			/* prepare list for next posting block */
+			list_add_tail(&sglq_entry->list, &prep_sgl_list);
+			block_cnt = 1;
+		} else {
+			/* prepare list for next posting block */
+			list_add_tail(&sglq_entry->list, &prep_sgl_list);
+			/* enough sgls for non-embed sgl mbox command */
+			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+				list_splice_init(&prep_sgl_list,
+						 &blck_sgl_list);
+				post_cnt = block_cnt;
+				block_cnt = 0;
+			}
+		}
+		num_posted++;
+
+		/* keep track of last sgl's xritag */
+		last_xritag = sglq_entry->sli4_xritag;
+
+		/* end of repost sgl list condition for els buffers */
+		if (num_posted == phba->sli4_hba.els_xri_cnt) {
+			if (post_cnt == 0) {
+				list_splice_init(&prep_sgl_list,
+						 &blck_sgl_list);
+				post_cnt = block_cnt;
+			} else if (block_cnt == 1) {
+				status = lpfc_sli4_post_sgl(phba,
+						sglq_entry->phys, 0,
+						sglq_entry->sli4_xritag);
+				if (!status) {
+					/* successful, put sgl to posted list */
+					list_add_tail(&sglq_entry->list,
+						      &post_sgl_list);
+				} else {
+					/* Failure, put sgl to free list */
+					lpfc_printf_log(phba, KERN_WARNING,
+						LOG_SLI,
+						"3159 Failed to post els "
+						"sgl, xritag:x%x\n",
+						sglq_entry->sli4_xritag);
+					list_add_tail(&sglq_entry->list,
+						      &free_sgl_list);
+					spin_lock_irq(&phba->hbalock);
+					phba->sli4_hba.els_xri_cnt--;
+					spin_unlock_irq(&phba->hbalock);
+				}
+			}
+		}
+
+		/* continue until a nembed page worth of sgls */
+		if (post_cnt == 0)
+			continue;
+
+		/* post the els buffer list sgls as a block */
+		status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
+						     post_cnt);
+
+		if (!status) {
+			/* success, put sgl list to posted sgl list */
+			list_splice_init(&blck_sgl_list, &post_sgl_list);
+		} else {
+			/* Failure, put sgl list to free sgl list */
+			sglq_entry_first = list_first_entry(&blck_sgl_list,
+							    struct lpfc_sglq,
+							    list);
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"3160 Failed to post els sgl-list, "
+					"xritag:x%x-x%x\n",
+					sglq_entry_first->sli4_xritag,
+					(sglq_entry_first->sli4_xritag +
+					 post_cnt - 1));
+			list_splice_init(&blck_sgl_list, &free_sgl_list);
+			spin_lock_irq(&phba->hbalock);
+			phba->sli4_hba.els_xri_cnt -= post_cnt;
+			spin_unlock_irq(&phba->hbalock);
+		}
+
+		/* don't reset xirtag due to hole in xri block */
+		if (block_cnt == 0)
+			last_xritag = NO_XRI;
+
+		/* reset els sgl post count for next round of posting */
+		post_cnt = 0;
+	}
+
+	/* free the els sgls failed to post */
+	lpfc_free_sgl_list(phba, &free_sgl_list);
+
+	/* push els sgls posted to the availble list */
+	if (!list_empty(&post_sgl_list)) {
+		spin_lock(&phba->hbalock);
+		list_splice_init(&post_sgl_list,
+				 &phba->sli4_hba.lpfc_sgl_list);
+		spin_unlock(&phba->hbalock);
+	} else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3161 Failure to post els sgl to port.\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
  * @phba: Pointer to HBA context object.
  *
@@ -5923,6 +6077,8 @@
 	else
 		phba->hba_flag &= ~HBA_FIP_SUPPORT;
 
+	phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
+
 	if (phba->sli_rev != LPFC_SLI_REV4) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 			"0376 READ_REV Error. SLI Level %d "
@@ -6063,8 +6219,6 @@
 				"rc = x%x\n", rc);
 		goto out_free_mbox;
 	}
-	/* update physical xri mappings in the scsi buffers */
-	lpfc_scsi_buf_update(phba);
 
 	/* Read the port's service parameters. */
 	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6105,28 +6259,26 @@
 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
 
-	/* Register SGL pool to the device using non-embedded mailbox command */
-	if (!phba->sli4_hba.extents_in_use) {
-		rc = lpfc_sli4_post_els_sgl_list(phba);
-		if (unlikely(rc)) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-					"0582 Error %d during els sgl post "
-					"operation\n", rc);
-			rc = -ENODEV;
-			goto out_free_mbox;
-		}
-	} else {
-		rc = lpfc_sli4_post_els_sgl_list_ext(phba);
-		if (unlikely(rc)) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-					"2560 Error %d during els sgl post "
-					"operation\n", rc);
-			rc = -ENODEV;
-			goto out_free_mbox;
-		}
+	/* update host els and scsi xri-sgl sizes and mappings */
+	rc = lpfc_sli4_xri_sgl_update(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"1400 Failed to update xri-sgl size and "
+				"mapping: %d\n", rc);
+		goto out_free_mbox;
 	}
 
-	/* Register SCSI SGL pool to the device */
+	/* register the els sgl pool to the port */
+	rc = lpfc_sli4_repost_els_sgl_list(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0582 Error %d during els sgl post "
+				"operation\n", rc);
+		rc = -ENODEV;
+		goto out_free_mbox;
+	}
+
+	/* register the allocated scsi sgl pool to the port */
 	rc = lpfc_sli4_repost_scsi_sgl_list(phba);
 	if (unlikely(rc)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7060,14 +7212,19 @@
 		if (rc != MBX_SUCCESS)
 			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
 					"(%d):2541 Mailbox command x%x "
-					"(x%x/x%x) cannot issue Data: "
-					"x%x x%x\n",
+					"(x%x/x%x) failure: "
+					"mqe_sta: x%x mcqe_sta: x%x/x%x "
+					"Data: x%x x%x\n,",
 					mboxq->vport ? mboxq->vport->vpi : 0,
 					mboxq->u.mb.mbxCommand,
 					lpfc_sli_config_mbox_subsys_get(phba,
 									mboxq),
 					lpfc_sli_config_mbox_opcode_get(phba,
 									mboxq),
+					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
+					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
+					bf_get(lpfc_mcqe_ext_status,
+					       &mboxq->mcqe),
 					psli->sli_flag, flag);
 		return rc;
 	} else if (flag == MBX_POLL) {
@@ -7086,18 +7243,22 @@
 			/* Successfully blocked, now issue sync mbox cmd */
 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
 			if (rc != MBX_SUCCESS)
-				lpfc_printf_log(phba, KERN_ERR,
+				lpfc_printf_log(phba, KERN_WARNING,
 					LOG_MBOX | LOG_SLI,
-					"(%d):2597 Mailbox command "
-					"x%x (x%x/x%x) cannot issue "
-					"Data: x%x x%x\n",
-					mboxq->vport ?
-					mboxq->vport->vpi : 0,
+					"(%d):2597 Sync Mailbox command "
+					"x%x (x%x/x%x) failure: "
+					"mqe_sta: x%x mcqe_sta: x%x/x%x "
+					"Data: x%x x%x\n,",
+					mboxq->vport ? mboxq->vport->vpi : 0,
 					mboxq->u.mb.mbxCommand,
 					lpfc_sli_config_mbox_subsys_get(phba,
 									mboxq),
 					lpfc_sli_config_mbox_opcode_get(phba,
 									mboxq),
+					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
+					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
+					bf_get(lpfc_mcqe_ext_status,
+					       &mboxq->mcqe),
 					psli->sli_flag, flag);
 			/* Unblock the async mailbox posting afterward */
 			lpfc_sli4_async_mbox_unblock(phba);
@@ -7712,7 +7873,10 @@
 
 	switch (iocbq->iocb.ulpCommand) {
 	case CMD_ELS_REQUEST64_CR:
-		ndlp = (struct lpfc_nodelist *)iocbq->context1;
+		if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
+			ndlp = iocbq->context_un.ndlp;
+		else
+			ndlp = (struct lpfc_nodelist *)iocbq->context1;
 		if (!iocbq->iocb.ulpLe) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"2007 Only Limited Edition cmd Format"
@@ -7751,9 +7915,13 @@
 				bf_set(els_req64_sp, &wqe->els_req, 1);
 				bf_set(els_req64_sid, &wqe->els_req,
 					iocbq->vport->fc_myDID);
+				if ((*pcmd == ELS_CMD_FLOGI) &&
+					!(phba->fc_topology ==
+						LPFC_TOPOLOGY_LOOP))
+					bf_set(els_req64_sid, &wqe->els_req, 0);
 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
-					phba->vpi_ids[phba->pport->vpi]);
+					phba->vpi_ids[iocbq->vport->vpi]);
 			} else if (pcmd && iocbq->context1) {
 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
@@ -7908,11 +8076,25 @@
 		/* words0-2 BDE memcpy */
 		/* word3 iocb=iotag32 wqe=response_payload_len */
 		wqe->xmit_els_rsp.response_payload_len = xmit_len;
-		/* word4 iocb=did wge=rsvd. */
-		wqe->xmit_els_rsp.rsvd4 = 0;
+		/* word4 */
+		wqe->xmit_els_rsp.word4 = 0;
 		/* word5 iocb=rsvd wge=did */
 		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
-			 iocbq->iocb.un.elsreq64.remoteID);
+			 iocbq->iocb.un.xseq64.xmit_els_remoteID);
+
+		if_type = bf_get(lpfc_sli_intf_if_type,
+					&phba->sli4_hba.sli_intf);
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+			if (iocbq->vport->fc_flag & FC_PT2PT) {
+				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+					iocbq->vport->fc_myDID);
+				if (iocbq->vport->fc_myDID == Fabric_DID) {
+					bf_set(wqe_els_did,
+						&wqe->xmit_els_rsp.wqe_dest, 0);
+				}
+			}
+		}
 		bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
 		       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
 		bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
@@ -7932,11 +8114,11 @@
 		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
 					iocbq->context2)->virt);
 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
-				bf_set(els_req64_sp, &wqe->els_req, 1);
-				bf_set(els_req64_sid, &wqe->els_req,
+				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
 					iocbq->vport->fc_myDID);
-				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
-				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+				bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
+				bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
 					phba->vpi_ids[phba->pport->vpi]);
 		}
 		command_type = OTHER_COMMAND;
@@ -13080,9 +13262,7 @@
 	} else {
 		set_bit(xri, phba->sli4_hba.xri_bmask);
 		phba->sli4_hba.max_cfg_param.xri_used++;
-		phba->sli4_hba.xri_count++;
 	}
-
 	spin_unlock_irq(&phba->hbalock);
 	return xri;
 }
@@ -13098,7 +13278,6 @@
 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
 {
 	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
-		phba->sli4_hba.xri_count--;
 		phba->sli4_hba.max_cfg_param.xri_used--;
 	}
 }
@@ -13134,46 +13313,45 @@
 	uint16_t xri_index;
 
 	xri_index = lpfc_sli4_alloc_xri(phba);
-	if (xri_index != NO_XRI)
-		return xri_index;
-
-	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-			"2004 Failed to allocate XRI.last XRITAG is %d"
-			" Max XRI is %d, Used XRI is %d\n",
-			xri_index,
-			phba->sli4_hba.max_cfg_param.max_xri,
-			phba->sli4_hba.max_cfg_param.xri_used);
-	return NO_XRI;
+	if (xri_index == NO_XRI)
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2004 Failed to allocate XRI.last XRITAG is %d"
+				" Max XRI is %d, Used XRI is %d\n",
+				xri_index,
+				phba->sli4_hba.max_cfg_param.max_xri,
+				phba->sli4_hba.max_cfg_param.xri_used);
+	return xri_index;
 }
 
 /**
  * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
  * @phba: pointer to lpfc hba data structure.
+ * @post_sgl_list: pointer to els sgl entry list.
+ * @count: number of els sgl entries on the list.
  *
  * This routine is invoked to post a block of driver's sgl pages to the
  * HBA using non-embedded mailbox command. No Lock is held. This routine
  * is only called when the driver is loading and after all IO has been
  * stopped.
  **/
-int
-lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
+static int
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
+			    struct list_head *post_sgl_list,
+			    int post_cnt)
 {
-	struct lpfc_sglq *sglq_entry;
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
 	struct sgl_page_pairs *sgl_pg_pairs;
 	void *viraddr;
 	LPFC_MBOXQ_t *mbox;
 	uint32_t reqlen, alloclen, pg_pairs;
 	uint32_t mbox_tmo;
-	uint16_t xritag_start = 0, lxri = 0;
-	int els_xri_cnt, rc = 0;
+	uint16_t xritag_start = 0;
+	int rc = 0;
 	uint32_t shdr_status, shdr_add_status;
 	union lpfc_sli4_cfg_shdr *shdr;
 
-	/* The number of sgls to be posted */
-	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-
-	reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+	reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
 	if (reqlen > SLI4_PAGE_SIZE) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13203,25 +13381,8 @@
 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
 
-	for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
-		sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
-
-		/*
-		 * Assign the sglq a physical xri only if the driver has not
-		 * initialized those resources.  A port reset only needs
-		 * the sglq's posted.
-		 */
-		if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
-		    LPFC_XRI_RSRC_RDY) {
-			lxri = lpfc_sli4_next_xritag(phba);
-			if (lxri == NO_XRI) {
-				lpfc_sli4_mbox_cmd_free(phba, mbox);
-				return -ENOMEM;
-			}
-			sglq_entry->sli4_lxritag = lxri;
-			sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
-		}
-
+	pg_pairs = 0;
+	list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
 		/* Set up the sge entry */
 		sgl_pg_pairs->sgl_pg0_addr_lo =
 				cpu_to_le32(putPaddrLow(sglq_entry->phys));
@@ -13236,11 +13397,12 @@
 		if (pg_pairs == 0)
 			xritag_start = sglq_entry->sli4_xritag;
 		sgl_pg_pairs++;
+		pg_pairs++;
 	}
 
 	/* Complete initialization and perform endian conversion. */
 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
-	bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
 	sgl->word0 = cpu_to_le32(sgl->word0);
 	if (!phba->sli4_hba.intr_enable)
 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
@@ -13260,183 +13422,6 @@
 				shdr_status, shdr_add_status, rc);
 		rc = -ENXIO;
 	}
-
-	if (rc == 0)
-		bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
-		       LPFC_XRI_RSRC_RDY);
-	return rc;
-}
-
-/**
- * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to post a block of driver's sgl pages to the
- * HBA using non-embedded mailbox command. No Lock is held. This routine
- * is only called when the driver is loading and after all IO has been
- * stopped.
- **/
-int
-lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
-{
-	struct lpfc_sglq *sglq_entry;
-	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
-	struct sgl_page_pairs *sgl_pg_pairs;
-	void *viraddr;
-	LPFC_MBOXQ_t *mbox;
-	uint32_t reqlen, alloclen, index;
-	uint32_t mbox_tmo;
-	uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
-	uint16_t xritag_start = 0, lxri = 0;
-	struct lpfc_rsrc_blks *rsrc_blk;
-	int cnt, ttl_cnt, rc = 0;
-	int loop_cnt;
-	uint32_t shdr_status, shdr_add_status;
-	union lpfc_sli4_cfg_shdr *shdr;
-
-	/* The number of sgls to be posted */
-	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
-
-	reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
-		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
-	if (reqlen > SLI4_PAGE_SIZE) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"2989 Block sgl registration required DMA "
-				"size (%d) great than a page\n", reqlen);
-		return -ENOMEM;
-	}
-
-	cnt = 0;
-	ttl_cnt = 0;
-	post_els_xri_cnt = els_xri_cnt;
-	list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
-			    list) {
-		rsrc_start = rsrc_blk->rsrc_start;
-		rsrc_size = rsrc_blk->rsrc_size;
-
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"3014 Working ELS Extent start %d, cnt %d\n",
-				rsrc_start, rsrc_size);
-
-		loop_cnt = min(post_els_xri_cnt, rsrc_size);
-		if (loop_cnt < post_els_xri_cnt) {
-			post_els_xri_cnt -= loop_cnt;
-			ttl_cnt += loop_cnt;
-		} else
-			ttl_cnt += post_els_xri_cnt;
-
-		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-		if (!mbox)
-			return -ENOMEM;
-		/*
-		 * Allocate DMA memory and set up the non-embedded mailbox
-		 * command.
-		 */
-		alloclen = lpfc_sli4_config(phba, mbox,
-					LPFC_MBOX_SUBSYSTEM_FCOE,
-					LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
-					reqlen, LPFC_SLI4_MBX_NEMBED);
-		if (alloclen < reqlen) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2987 Allocated DMA memory size (%d) "
-					"is less than the requested DMA memory "
-					"size (%d)\n", alloclen, reqlen);
-			lpfc_sli4_mbox_cmd_free(phba, mbox);
-			return -ENOMEM;
-		}
-
-		/* Set up the SGL pages in the non-embedded DMA pages */
-		viraddr = mbox->sge_array->addr[0];
-		sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
-		sgl_pg_pairs = &sgl->sgl_pg_pairs;
-
-		/*
-		 * The starting resource may not begin at zero. Control
-		 * the loop variants via the block resource parameters,
-		 * but handle the sge pointers with a zero-based index
-		 * that doesn't get reset per loop pass.
-		 */
-		for (index = rsrc_start;
-		     index < rsrc_start + loop_cnt;
-		     index++) {
-			sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
-
-			/*
-			 * Assign the sglq a physical xri only if the driver
-			 * has not initialized those resources.  A port reset
-			 * only needs the sglq's posted.
-			 */
-			if (bf_get(lpfc_xri_rsrc_rdy,
-				   &phba->sli4_hba.sli4_flags) !=
-				   LPFC_XRI_RSRC_RDY) {
-				lxri = lpfc_sli4_next_xritag(phba);
-				if (lxri == NO_XRI) {
-					lpfc_sli4_mbox_cmd_free(phba, mbox);
-					rc = -ENOMEM;
-					goto err_exit;
-				}
-				sglq_entry->sli4_lxritag = lxri;
-				sglq_entry->sli4_xritag =
-						phba->sli4_hba.xri_ids[lxri];
-			}
-
-			/* Set up the sge entry */
-			sgl_pg_pairs->sgl_pg0_addr_lo =
-				cpu_to_le32(putPaddrLow(sglq_entry->phys));
-			sgl_pg_pairs->sgl_pg0_addr_hi =
-				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
-			sgl_pg_pairs->sgl_pg1_addr_lo =
-				cpu_to_le32(putPaddrLow(0));
-			sgl_pg_pairs->sgl_pg1_addr_hi =
-				cpu_to_le32(putPaddrHigh(0));
-
-			/* Track the starting physical XRI for the mailbox. */
-			if (index == rsrc_start)
-				xritag_start = sglq_entry->sli4_xritag;
-			sgl_pg_pairs++;
-			cnt++;
-		}
-
-		/* Complete initialization and perform endian conversion. */
-		rsrc_blk->rsrc_used += loop_cnt;
-		bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
-		bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
-		sgl->word0 = cpu_to_le32(sgl->word0);
-
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"3015 Post ELS Extent SGL, start %d, "
-				"cnt %d, used %d\n",
-				xritag_start, loop_cnt, rsrc_blk->rsrc_used);
-		if (!phba->sli4_hba.intr_enable)
-			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
-		else {
-			mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
-			rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
-		}
-		shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
-		shdr_status = bf_get(lpfc_mbox_hdr_status,
-				     &shdr->response);
-		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
-					 &shdr->response);
-		if (rc != MBX_TIMEOUT)
-			lpfc_sli4_mbox_cmd_free(phba, mbox);
-		if (shdr_status || shdr_add_status || rc) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-					"2988 POST_SGL_BLOCK mailbox "
-					"command failed status x%x "
-					"add_status x%x mbx status x%x\n",
-					shdr_status, shdr_add_status, rc);
-			rc = -ENXIO;
-			goto err_exit;
-		}
-		if (ttl_cnt >= els_xri_cnt)
-			break;
-	}
-
- err_exit:
-	if (rc == 0)
-		bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
-		       LPFC_XRI_RSRC_RDY);
 	return rc;
 }
 
@@ -13452,8 +13437,9 @@
  *
  **/
 int
-lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
-			      int cnt)
+lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
+			      struct list_head *sblist,
+			      int count)
 {
 	struct lpfc_scsi_buf *psb;
 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
@@ -13469,7 +13455,7 @@
 	union lpfc_sli4_cfg_shdr *shdr;
 
 	/* Calculate the requested length of the dma memory */
-	reqlen = cnt * sizeof(struct sgl_page_pairs) +
+	reqlen = count * sizeof(struct sgl_page_pairs) +
 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
 	if (reqlen > SLI4_PAGE_SIZE) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -13553,169 +13539,6 @@
 }
 
 /**
- * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
- * @phba: pointer to lpfc hba data structure.
- * @sblist: pointer to scsi buffer list.
- * @count: number of scsi buffers on the list.
- *
- * This routine is invoked to post a block of @count scsi sgl pages from a
- * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
- * No Lock is held.
- *
- **/
-int
-lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
-				int cnt)
-{
-	struct lpfc_scsi_buf *psb = NULL;
-	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
-	struct sgl_page_pairs *sgl_pg_pairs;
-	void *viraddr;
-	LPFC_MBOXQ_t *mbox;
-	uint32_t reqlen, alloclen, pg_pairs;
-	uint32_t mbox_tmo;
-	uint16_t xri_start = 0, scsi_xri_start;
-	uint16_t rsrc_range;
-	int rc = 0, avail_cnt;
-	uint32_t shdr_status, shdr_add_status;
-	dma_addr_t pdma_phys_bpl1;
-	union lpfc_sli4_cfg_shdr *shdr;
-	struct lpfc_rsrc_blks *rsrc_blk;
-	uint32_t xri_cnt = 0;
-
-	/* Calculate the total requested length of the dma memory */
-	reqlen = cnt * sizeof(struct sgl_page_pairs) +
-		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
-	if (reqlen > SLI4_PAGE_SIZE) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-				"2932 Block sgl registration required DMA "
-				"size (%d) great than a page\n", reqlen);
-		return -ENOMEM;
-	}
-
-	/*
-	 * The use of extents requires the driver to post the sgl headers
-	 * in multiple postings to meet the contiguous resource assignment.
-	 */
-	psb = list_prepare_entry(psb, sblist, list);
-	scsi_xri_start = phba->sli4_hba.scsi_xri_start;
-	list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
-			    list) {
-		rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
-		if (rsrc_range < scsi_xri_start)
-			continue;
-		else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
-			continue;
-		else
-			avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
-
-		reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
-			sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
-		/*
-		 * Allocate DMA memory and set up the non-embedded mailbox
-		 * command. The mbox is used to post an SGL page per loop
-		 * but the DMA memory has a use-once semantic so the mailbox
-		 * is used and freed per loop pass.
-		 */
-		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-		if (!mbox) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2933 Failed to allocate mbox cmd "
-					"memory\n");
-			return -ENOMEM;
-		}
-		alloclen = lpfc_sli4_config(phba, mbox,
-					LPFC_MBOX_SUBSYSTEM_FCOE,
-					LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
-					reqlen,
-					LPFC_SLI4_MBX_NEMBED);
-		if (alloclen < reqlen) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2934 Allocated DMA memory size (%d) "
-					"is less than the requested DMA memory "
-					"size (%d)\n", alloclen, reqlen);
-			lpfc_sli4_mbox_cmd_free(phba, mbox);
-			return -ENOMEM;
-		}
-
-		/* Get the first SGE entry from the non-embedded DMA memory */
-		viraddr = mbox->sge_array->addr[0];
-
-		/* Set up the SGL pages in the non-embedded DMA pages */
-		sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
-		sgl_pg_pairs = &sgl->sgl_pg_pairs;
-
-		/* pg_pairs tracks posted SGEs per loop iteration. */
-		pg_pairs = 0;
-		list_for_each_entry_continue(psb, sblist, list) {
-			/* Set up the sge entry */
-			sgl_pg_pairs->sgl_pg0_addr_lo =
-				cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
-			sgl_pg_pairs->sgl_pg0_addr_hi =
-				cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
-			if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
-				pdma_phys_bpl1 = psb->dma_phys_bpl +
-					SGL_PAGE_SIZE;
-			else
-				pdma_phys_bpl1 = 0;
-			sgl_pg_pairs->sgl_pg1_addr_lo =
-				cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
-			sgl_pg_pairs->sgl_pg1_addr_hi =
-				cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
-			/* Keep the first xri for this extent. */
-			if (pg_pairs == 0)
-				xri_start = psb->cur_iocbq.sli4_xritag;
-			sgl_pg_pairs++;
-			pg_pairs++;
-			xri_cnt++;
-
-			/*
-			 * Track two exit conditions - the loop has constructed
-			 * all of the caller's SGE pairs or all available
-			 * resource IDs in this extent are consumed.
-			 */
-			if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
-				break;
-		}
-		rsrc_blk->rsrc_used += pg_pairs;
-		bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
-		bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
-
-		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"3016 Post SCSI Extent SGL, start %d, cnt %d "
-				"blk use %d\n",
-				xri_start, pg_pairs, rsrc_blk->rsrc_used);
-		/* Perform endian conversion if necessary */
-		sgl->word0 = cpu_to_le32(sgl->word0);
-		if (!phba->sli4_hba.intr_enable)
-			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
-		else {
-			mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
-			rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
-		}
-		shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
-		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
-		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
-					 &shdr->response);
-		if (rc != MBX_TIMEOUT)
-			lpfc_sli4_mbox_cmd_free(phba, mbox);
-		if (shdr_status || shdr_add_status || rc) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-					"2935 POST_SGL_BLOCK mailbox command "
-					"failed status x%x add_status x%x "
-					"mbx status x%x\n",
-					shdr_status, shdr_add_status, rc);
-			return -ENXIO;
-		}
-
-		/* Post only what is requested. */
-		if (xri_cnt >= cnt)
-			break;
-	}
-	return rc;
-}
-
-/**
  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
  * @phba: pointer to lpfc_hba struct that the frame was received on
  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
@@ -13839,8 +13662,13 @@
 	uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
 			fc_hdr->fh_d_id[1] << 8 |
 			fc_hdr->fh_d_id[2]);
+
 	if (did == Fabric_DID)
 		return phba->pport;
+	if ((phba->pport->fc_flag & FC_PT2PT) &&
+		!(phba->link_state == LPFC_HBA_READY))
+		return phba->pport;
+
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL)
 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
@@ -14133,7 +13961,6 @@
 	return NO_XRI;
 }
 
-
 /**
  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
  * @phba: Pointer to HBA context object.
@@ -14148,7 +13975,7 @@
 {
 	struct lpfc_iocbq *ctiocb = NULL;
 	struct lpfc_nodelist *ndlp;
-	uint16_t oxid, rxid;
+	uint16_t oxid, rxid, xri, lxri;
 	uint32_t sid, fctl;
 	IOCB_t *icmd;
 	int rc;
@@ -14167,8 +13994,6 @@
 				"SID:x%x\n", oxid, sid);
 		return;
 	}
-	if (lpfc_sli4_xri_inrange(phba, rxid))
-		lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
 
 	/* Allocate buffer for rsp iocb */
 	ctiocb = lpfc_sli_get_iocbq(phba);
@@ -14199,13 +14024,24 @@
 	ctiocb->sli4_lxritag = NO_XRI;
 	ctiocb->sli4_xritag = NO_XRI;
 
+	if (fctl & FC_FC_EX_CTX)
+		/* Exchange responder sent the abort so we
+		 * own the oxid.
+		 */
+		xri = oxid;
+	else
+		xri = rxid;
+	lxri = lpfc_sli4_xri_inrange(phba, xri);
+	if (lxri != NO_XRI)
+		lpfc_set_rrq_active(phba, ndlp, lxri,
+			(xri == oxid) ? rxid : oxid, 0);
 	/* If the oxid maps to the FCP XRI range or if it is out of range,
 	 * send a BLS_RJT.  The driver no longer has that exchange.
 	 * Override the IOCB for a BA_RJT.
 	 */
-	if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
+	if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
 		    phba->sli4_hba.max_cfg_param.xri_base) ||
-	    oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
+	    xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
 		    phba->sli4_hba.max_cfg_param.xri_base)) {
 		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
 		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
@@ -14377,7 +14213,15 @@
 		/* Initialize the first IOCB. */
 		first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
 		first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
-		first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
+
+		/* Check FC Header to see what TYPE of frame we are rcv'ing */
+		if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
+			first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
+			first_iocbq->iocb.un.rcvels.parmRo =
+				sli4_did_from_fc_hdr(fc_hdr);
+			first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
+		} else
+			first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
 		first_iocbq->iocb.ulpContext = NO_XRI;
 		first_iocbq->iocb.unsli3.rcvsli3.ox_id =
 			be16_to_cpu(fc_hdr->fh_ox_id);
@@ -14507,6 +14351,7 @@
 	struct fc_frame_header *fc_hdr;
 	struct lpfc_vport *vport;
 	uint32_t fcfi;
+	uint32_t did;
 
 	/* Process each received buffer */
 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
@@ -14522,12 +14367,32 @@
 	else
 		fcfi = bf_get(lpfc_rcqe_fcf_id,
 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
+
 	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
-	if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+	if (!vport) {
 		/* throw out the frame */
 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
 		return;
 	}
+
+	/* d_id this frame is directed to */
+	did = sli4_did_from_fc_hdr(fc_hdr);
+
+	/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
+	if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
+		(did != Fabric_DID)) {
+		/*
+		 * Throw out the frame if we are not pt2pt.
+		 * The pt2pt protocol allows for discovery frames
+		 * to be received without a registered VPI.
+		 */
+		if (!(vport->fc_flag & FC_PT2PT) ||
+			(phba->link_state == LPFC_HBA_READY)) {
+			lpfc_in_buf_free(phba, &dmabuf->dbuf);
+			return;
+		}
+	}
+
 	/* Handle the basic abort sequence (BA_ABTS) event */
 	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
 		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 3290b8e..2626f58 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -68,7 +68,7 @@
 #define LPFC_EXCHANGE_BUSY	0x40    /* SLI4 hba reported XB in response */
 #define LPFC_USE_FCPWQIDX	0x80    /* Submit to specified FCPWQ index */
 #define DSS_SECURITY_OP		0x100	/* security IO */
-#define LPFC_IO_ON_Q		0x200	/* The IO is still on the TXCMPLQ */
+#define LPFC_IO_ON_TXCMPLQ	0x200	/* The IO is still on the TXCMPLQ */
 #define LPFC_IO_DIF		0x400	/* T10 DIF IO */
 
 #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c19d139..a4a7708 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -75,11 +75,19 @@
 	 (fc_hdr)->fh_s_id[1] <<  8 | \
 	 (fc_hdr)->fh_s_id[2])
 
+#define sli4_did_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_d_id[0] << 16 | \
+	 (fc_hdr)->fh_d_id[1] <<  8 | \
+	 (fc_hdr)->fh_d_id[2])
+
 #define sli4_fctl_from_fc_hdr(fc_hdr)  \
 	((fc_hdr)->fh_f_ctl[0] << 16 | \
 	 (fc_hdr)->fh_f_ctl[1] <<  8 | \
 	 (fc_hdr)->fh_f_ctl[2])
 
+#define sli4_type_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_type)
+
 #define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
 
 enum lpfc_sli4_queue_type {
@@ -493,14 +501,12 @@
 	uint16_t next_rpi;
 	uint16_t scsi_xri_max;
 	uint16_t scsi_xri_cnt;
+	uint16_t els_xri_cnt;
 	uint16_t scsi_xri_start;
 	struct list_head lpfc_free_sgl_list;
 	struct list_head lpfc_sgl_list;
-	struct lpfc_sglq **lpfc_els_sgl_array;
 	struct list_head lpfc_abts_els_sgl_list;
-	struct lpfc_scsi_buf **lpfc_scsi_psb_array;
 	struct list_head lpfc_abts_scsi_buf_list;
-	uint32_t total_sglq_bufs;
 	struct lpfc_sglq **lpfc_sglq_active_list;
 	struct list_head lpfc_rpi_hdr_list;
 	unsigned long *rpi_bmask;
@@ -509,7 +515,6 @@
 	struct list_head lpfc_rpi_blk_list;
 	unsigned long *xri_bmask;
 	uint16_t *xri_ids;
-	uint16_t xri_count;
 	struct list_head lpfc_xri_blk_list;
 	unsigned long *vfi_bmask;
 	uint16_t *vfi_ids;
@@ -614,11 +619,7 @@
 int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
 uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
 int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
-int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
-int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
 int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
-int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
-				    int);
 struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
 struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
 void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 25cefc2..59c57a4 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.30"
+#define LPFC_DRIVER_VERSION "8.3.31"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e5f416f..e8f8926 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"00.00.06.14-rc1"
-#define MEGASAS_RELDATE				"Jan. 6, 2012"
-#define MEGASAS_EXT_VERSION			"Fri. Jan. 6 17:00:00 PDT 2012"
+#define MEGASAS_VERSION				"00.00.06.15-rc1"
+#define MEGASAS_RELDATE				"Mar. 19, 2012"
+#define MEGASAS_EXT_VERSION			"Mon. Mar. 19 17:00:00 PDT 2012"
 
 /*
  * Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 8b300be..dc27598 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v00.00.06.14-rc1
+ *  Version : v00.00.06.15-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 294abb0..e3d251a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -362,15 +362,20 @@
 	/* assume this IO needs the full row - we'll adjust if not true */
 	regSize             = stripSize;
 
-	/* If IO spans more than 1 strip, fp is not possible
-	   FP is not possible for writes on non-0 raid levels
-	   FP is not possible if LD is not capable */
-	if (num_strips > 1 || (!isRead && raid->level != 0) ||
-	    !raid->capability.fpCapable) {
+	/* Check if we can send this I/O via FastPath */
+	if (raid->capability.fpCapable) {
+		if (isRead)
+			io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+					      ((num_strips == 1) ||
+					       raid->capability.
+					       fpReadAcrossStripe));
+		else
+			io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+					      ((num_strips == 1) ||
+					       raid->capability.
+					       fpWriteAcrossStripe));
+	} else
 		io_info->fpOkForIo = FALSE;
-	} else {
-		io_info->fpOkForIo = TRUE;
-	}
 
 	if (numRows == 1) {
 		/* single-strip IOs can always lock only the data needed */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index bfd87fa..a610cf1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -634,9 +634,7 @@
 		fusion->reply_frames_desc_phys;
 	IOCInitMessage->SystemRequestFrameBaseAddress =
 		fusion->io_request_frames_phys;
-	/* Set to 0 for none or 1 MSI-X vectors */
-	IOCInitMessage->HostMSIxVectors = (instance->msix_vectors > 0 ?
-					   instance->msix_vectors : 0);
+	IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
 	init_frame = (struct megasas_init_frame *)cmd->frame;
 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index a01f0aa..a80f322 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.22
+ *  mpi2.h Version:  02.00.23
  *
  *  Version History
  *  ---------------
@@ -71,6 +71,7 @@
  *  03-09-11  02.00.20  Bumped MPI2_HEADER_VERSION_UNIT.
  *  05-25-11  02.00.21  Bumped MPI2_HEADER_VERSION_UNIT.
  *  08-24-11  02.00.22  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  11-18-11  02.00.23  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -96,7 +97,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x16)
+#define MPI2_HEADER_VERSION_UNIT            (0x17)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -480,7 +481,7 @@
     MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR  RAIDAcceleratorSuccess;
     U64                                             Words;
 } MPI2_REPLY_DESCRIPTORS_UNION, MPI2_POINTER PTR_MPI2_REPLY_DESCRIPTORS_UNION,
-  Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
+Mpi2ReplyDescriptorsUnion_t, MPI2_POINTER pMpi2ReplyDescriptorsUnion_t;
 
 
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 3a023da..737fa8c 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.21
+ *    mpi2_cnfg.h Version:  02.00.22
  *
  *  Version History
  *  ---------------
@@ -146,7 +146,9 @@
  *                      Added SpinupFlags field containing a Disable Spin-up
  *                      bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
  *                      SAS IO Unit Page 4.
-
+ *  11-18-11  02.00.22  Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ *                      Added UEFIVersion field to BIOS Page 1 and defined new
+ *                      BiosOptions bits.
  *  --------------------------------------------------------------------------
  */
 
@@ -1131,9 +1133,10 @@
 } MPI2_CONFIG_PAGE_IOC_6, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IOC_6,
   Mpi2IOCPage6_t, MPI2_POINTER pMpi2IOCPage6_t;
 
-#define MPI2_IOCPAGE6_PAGEVERSION                       (0x04)
+#define MPI2_IOCPAGE6_PAGEVERSION                       (0x05)
 
 /* defines for IOC Page 6 CapabilitiesFlags */
+#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT      (0x00000020)
 #define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT          (0x00000010)
 #define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT           (0x00000008)
 #define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT          (0x00000004)
@@ -1204,24 +1207,29 @@
 
 typedef struct _MPI2_CONFIG_PAGE_BIOS_1
 {
-    MPI2_CONFIG_PAGE_HEADER Header;                     /* 0x00 */
-    U32                     BiosOptions;                /* 0x04 */
-    U32                     IOCSettings;                /* 0x08 */
-    U32                     Reserved1;                  /* 0x0C */
-    U32                     DeviceSettings;             /* 0x10 */
-    U16                     NumberOfDevices;            /* 0x14 */
-    U16                     Reserved2;                  /* 0x16 */
-    U16                     IOTimeoutBlockDevicesNonRM; /* 0x18 */
-    U16                     IOTimeoutSequential;        /* 0x1A */
-    U16                     IOTimeoutOther;             /* 0x1C */
-    U16                     IOTimeoutBlockDevicesRM;    /* 0x1E */
+	MPI2_CONFIG_PAGE_HEADER Header;                     /* 0x00 */
+	U32                     BiosOptions;                /* 0x04 */
+	U32                     IOCSettings;                /* 0x08 */
+	U32                     Reserved1;                  /* 0x0C */
+	U32                     DeviceSettings;             /* 0x10 */
+	U16                     NumberOfDevices;            /* 0x14 */
+	U16                     UEFIVersion;                /* 0x16 */
+	U16                     IOTimeoutBlockDevicesNonRM; /* 0x18 */
+	U16                     IOTimeoutSequential;        /* 0x1A */
+	U16                     IOTimeoutOther;             /* 0x1C */
+	U16                     IOTimeoutBlockDevicesRM;    /* 0x1E */
 } MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1,
   Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t;
 
-#define MPI2_BIOSPAGE1_PAGEVERSION                      (0x04)
+#define MPI2_BIOSPAGE1_PAGEVERSION                      (0x05)
 
 /* values for BIOS Page 1 BiosOptions field */
-#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS             (0x00000001)
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION   (0x00000006)
+#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII              (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII             (0x00000002)
+#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII       (0x00000004)
+
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS                 (0x00000001)
 
 /* values for BIOS Page 1 IOCSettings field */
 #define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE      (0x00030000)
@@ -1248,6 +1256,13 @@
 #define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN        (0x00000002)
 #define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN         (0x00000001)
 
+/* defines for BIOS Page 1 UEFIVersion field */
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK              (0xFF00)
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT             (8)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK              (0x00FF)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT             (0)
+
+
 
 /* BIOS Page 2 */
 
@@ -2216,6 +2231,27 @@
 
 
 
+/* SAS IO Unit Page 16 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 {
+	MPI2_CONFIG_EXTENDED_PAGE_HEADER  Header;                  /* 0x00 */
+	U64                         TimeStamp;                     /* 0x08 */
+	U32                         Reserved1;                     /* 0x10 */
+	U32                         Reserved2;                     /* 0x14 */
+	U32                         FastPathPendedRequests;        /* 0x18 */
+	U32                         FastPathUnPendedRequests;      /* 0x1C */
+	U32                         FastPathHostRequestStarts;     /* 0x20 */
+	U32                         FastPathFirmwareRequestStarts; /* 0x24 */
+	U32                         FastPathHostCompletions;       /* 0x28 */
+	U32                         FastPathFirmwareCompletions;   /* 0x2C */
+	U32                         NonFastPathRequestStarts;      /* 0x30 */
+	U32			    NonFastPathHostCompletions;    /* 0x30 */
+} MPI2_CONFIG_PAGE_SASIOUNIT16,
+MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
+Mpi2SasIOUnitPage16_t, MPI2_POINTER pMpi2SasIOUnitPage16_t;
+
+#define MPI2_SASIOUNITPAGE16_PAGEVERSION    (0x00)
+
 
 /****************************************************************************
 *   SAS Expander Config Pages
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 8a59a77..6102ef2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -699,6 +699,11 @@
 	u16 ioc_status;
 
 	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+	if (unlikely(!mpi_reply)) {
+		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+			ioc->name, __FILE__, __LINE__, __func__);
+		return;
+	}
 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
@@ -930,16 +935,18 @@
 		else if (request_desript_type ==
 		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
 			goto next;
-		if (smid)
+		if (smid) {
 			cb_idx = _base_get_cb_idx(ioc, smid);
-		if (smid && cb_idx != 0xFF) {
-			rc = mpt_callbacks[cb_idx](ioc, smid, msix_index,
-			    reply);
+		if ((likely(cb_idx < MPT_MAX_CALLBACKS))
+			    && (likely(mpt_callbacks[cb_idx] != NULL))) {
+				rc = mpt_callbacks[cb_idx](ioc, smid,
+				    msix_index, reply);
 			if (reply)
-				_base_display_reply_info(ioc, smid, msix_index,
-				    reply);
+				_base_display_reply_info(ioc, smid,
+				    msix_index, reply);
 			if (rc)
 				mpt2sas_base_free_smid(ioc, smid);
+			}
 		}
 		if (!smid)
 			_base_async_event(ioc, msix_index, reply);
@@ -3343,7 +3350,7 @@
 	}
 
 	pfacts = &ioc->pfacts[port];
-	memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
+	memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
 	pfacts->PortNumber = mpi_reply.PortNumber;
 	pfacts->VP_ID = mpi_reply.VP_ID;
 	pfacts->VF_ID = mpi_reply.VF_ID;
@@ -3385,7 +3392,7 @@
 	}
 
 	facts = &ioc->facts;
-	memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
+	memset(facts, 0, sizeof(struct mpt2sas_facts));
 	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
 	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
 	facts->VP_ID = mpi_reply.VP_ID;
@@ -4153,7 +4160,8 @@
 	if (ioc->is_driver_loading) {
 		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
 		    == 0x80) {
-			hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
+			hide_flag = (u8) (
+			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
 			    MFG_PAGE10_HIDE_SSDS_MASK);
 			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
 				ioc->mfg_pg10_hide_flag = hide_flag;
@@ -4262,7 +4270,7 @@
 		goto out_free_resources;
 
 	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
-	    sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
+	    sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
 	if (!ioc->pfacts) {
 		r = -ENOMEM;
 		goto out_free_resources;
@@ -4279,7 +4287,6 @@
 		goto out_free_resources;
 
 	init_waitqueue_head(&ioc->reset_wq);
-
 	/* allocate memory pd handle bitmask list */
 	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
 	if (ioc->facts.MaxDevHandle % 8)
@@ -4290,7 +4297,12 @@
 		r = -ENOMEM;
 		goto out_free_resources;
 	}
-
+	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
+	    GFP_KERNEL);
+	if (!ioc->blocking_handles) {
+		r = -ENOMEM;
+		goto out_free_resources;
+	}
 	ioc->fwfault_debug = mpt2sas_fwfault_debug;
 
 	/* base internal command bits */
@@ -4377,6 +4389,7 @@
 	if (ioc->is_warpdrive)
 		kfree(ioc->reply_post_host_index);
 	kfree(ioc->pd_handles);
+	kfree(ioc->blocking_handles);
 	kfree(ioc->tm_cmds.reply);
 	kfree(ioc->transport_cmds.reply);
 	kfree(ioc->scsih_cmds.reply);
@@ -4418,6 +4431,7 @@
 	if (ioc->is_warpdrive)
 		kfree(ioc->reply_post_host_index);
 	kfree(ioc->pd_handles);
+	kfree(ioc->blocking_handles);
 	kfree(ioc->pfacts);
 	kfree(ioc->ctl_cmds.reply);
 	kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index c7459fd..b6dd3a5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"12.100.00.00"
-#define MPT2SAS_MAJOR_VERSION		12
+#define MPT2SAS_DRIVER_VERSION		"13.100.00.00"
+#define MPT2SAS_MAJOR_VERSION		13
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		00
 #define MPT2SAS_RELEASE_VERSION		00
@@ -720,6 +720,7 @@
  * @io_missing_delay: time for IO completed by fw when PDR enabled
  * @device_missing_delay: time for device missing by fw when PDR enabled
  * @sas_id : used for setting volume target IDs
+ * @blocking_handles: bitmask used to identify which devices need blocking
  * @pd_handles : bitmask for PD handles
  * @pd_handles_sz : size of pd_handle bitmask
  * @config_page_sz: config page size
@@ -889,7 +890,7 @@
 	u8		io_missing_delay;
 	u16		device_missing_delay;
 	int		sas_id;
-
+	void		*blocking_handles;
 	void		*pd_handles;
 	u16		pd_handles_sz;
 
@@ -1058,7 +1059,8 @@
 void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
 void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
 void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
-void mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
+void mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+		u64 sas_address);
 struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
     u16 handle);
 struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 3b9a28e..49bdd2d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -620,11 +620,10 @@
  * @ioc: per adapter object
  * @karg - (struct mpt2_ioctl_command)
  * @mf - pointer to mf in user space
- * @state - NON_BLOCKING or BLOCKING
  */
 static long
-_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
-    struct mpt2_ioctl_command karg, void __user *mf, enum block_state state)
+_ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,
+	void __user *mf)
 {
 	MPI2RequestHeader_t *mpi_request = NULL, *request;
 	MPI2DefaultReply_t *mpi_reply;
@@ -647,11 +646,6 @@
 
 	issue_reset = 0;
 
-	if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
-		return -EAGAIN;
-	else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
-		return -ERESTARTSYS;
-
 	if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
 		printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
 		    ioc->name, __func__);
@@ -871,8 +865,16 @@
 		if (smp_request->PassthroughFlags &
 		    MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
 			data = (u8 *)&smp_request->SGL;
-		else
+		else {
+			if (unlikely(data_out == NULL)) {
+				printk(KERN_ERR "failure at %s:%d/%s()!\n",
+				    __FILE__, __LINE__, __func__);
+				mpt2sas_base_free_smid(ioc, smid);
+				ret = -EINVAL;
+				goto out;
+			}
 			data = data_out;
+		}
 
 		if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
 			ioc->ioc_link_reset_in_progress = 1;
@@ -985,7 +987,8 @@
 		ret = -ENODATA;
 		if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
 		    mpi_request->Function ==
-		    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+		    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+		    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
 			printk(MPT2SAS_INFO_FMT "issue target reset: handle "
 			    "= (0x%04x)\n", ioc->name,
 			    le16_to_cpu(mpi_request->FunctionDependent1));
@@ -1013,27 +1016,24 @@
 
 	kfree(mpi_request);
 	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
-	mutex_unlock(&ioc->ctl_cmds.mutex);
 	return ret;
 }
 
 /**
  * _ctl_getiocinfo - main handler for MPT2IOCINFO opcode
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
  */
 static long
-_ctl_getiocinfo(void __user *arg)
+_ctl_getiocinfo(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_ioctl_iocinfo karg;
-	struct MPT2SAS_ADAPTER *ioc;
 
 	if (copy_from_user(&karg, arg, sizeof(karg))) {
 		printk(KERN_ERR "failure at %s:%d/%s()!\n",
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
@@ -1069,21 +1069,19 @@
 
 /**
  * _ctl_eventquery - main handler for MPT2EVENTQUERY opcode
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
  */
 static long
-_ctl_eventquery(void __user *arg)
+_ctl_eventquery(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_ioctl_eventquery karg;
-	struct MPT2SAS_ADAPTER *ioc;
 
 	if (copy_from_user(&karg, arg, sizeof(karg))) {
 		printk(KERN_ERR "failure at %s:%d/%s()!\n",
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
@@ -1102,21 +1100,19 @@
 
 /**
  * _ctl_eventenable - main handler for MPT2EVENTENABLE opcode
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
  */
 static long
-_ctl_eventenable(void __user *arg)
+_ctl_eventenable(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_ioctl_eventenable karg;
-	struct MPT2SAS_ADAPTER *ioc;
 
 	if (copy_from_user(&karg, arg, sizeof(karg))) {
 		printk(KERN_ERR "failure at %s:%d/%s()!\n",
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
@@ -1142,13 +1138,13 @@
 
 /**
  * _ctl_eventreport - main handler for MPT2EVENTREPORT opcode
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
  */
 static long
-_ctl_eventreport(void __user *arg)
+_ctl_eventreport(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_ioctl_eventreport karg;
-	struct MPT2SAS_ADAPTER *ioc;
 	u32 number_bytes, max_events, max;
 	struct mpt2_ioctl_eventreport __user *uarg = arg;
 
@@ -1157,8 +1153,6 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
@@ -1188,13 +1182,13 @@
 
 /**
  * _ctl_do_reset - main handler for MPT2HARDRESET opcode
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
  */
 static long
-_ctl_do_reset(void __user *arg)
+_ctl_do_reset(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_ioctl_diag_reset karg;
-	struct MPT2SAS_ADAPTER *ioc;
 	int retval;
 
 	if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1202,8 +1196,6 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	if (ioc->shost_recovery || ioc->pci_error_recovery ||
 		ioc->is_driver_loading)
@@ -1292,13 +1284,13 @@
 
 /**
  * _ctl_btdh_mapping - main handler for MPT2BTDHMAPPING opcode
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
  */
 static long
-_ctl_btdh_mapping(void __user *arg)
+_ctl_btdh_mapping(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_ioctl_btdh_mapping karg;
-	struct MPT2SAS_ADAPTER *ioc;
 	int rc;
 
 	if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1306,8 +1298,6 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -1576,17 +1566,16 @@
 
 /**
  * _ctl_diag_register - application register with driver
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
- * @state - NON_BLOCKING or BLOCKING
  *
  * This will allow the driver to setup any required buffers that will be
  * needed by firmware to communicate with the driver.
  */
 static long
-_ctl_diag_register(void __user *arg, enum block_state state)
+_ctl_diag_register(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_diag_register karg;
-	struct MPT2SAS_ADAPTER *ioc;
 	long rc;
 
 	if (copy_from_user(&karg, arg, sizeof(karg))) {
@@ -1594,30 +1583,23 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
-	if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
-		return -EAGAIN;
-	else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
-		return -ERESTARTSYS;
 	rc = _ctl_diag_register_2(ioc, &karg);
-	mutex_unlock(&ioc->ctl_cmds.mutex);
 	return rc;
 }
 
 /**
  * _ctl_diag_unregister - application unregister with driver
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
  *
  * This will allow the driver to cleanup any memory allocated for diag
  * messages and to free up any resources.
  */
 static long
-_ctl_diag_unregister(void __user *arg)
+_ctl_diag_unregister(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_diag_unregister karg;
-	struct MPT2SAS_ADAPTER *ioc;
 	void *request_data;
 	dma_addr_t request_data_dma;
 	u32 request_data_sz;
@@ -1628,8 +1610,6 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -1678,6 +1658,7 @@
 
 /**
  * _ctl_diag_query - query relevant info associated with diag buffers
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
  *
  * The application will send only buffer_type and unique_id.  Driver will
@@ -1685,10 +1666,9 @@
  * 0x00, the driver will return info specified by Buffer Type.
  */
 static long
-_ctl_diag_query(void __user *arg)
+_ctl_diag_query(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_diag_query karg;
-	struct MPT2SAS_ADAPTER *ioc;
 	void *request_data;
 	int i;
 	u8 buffer_type;
@@ -1698,8 +1678,6 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -1866,17 +1844,15 @@
 /**
  * _ctl_diag_release - request to send Diag Release Message to firmware
  * @arg - user space buffer containing ioctl content
- * @state - NON_BLOCKING or BLOCKING
  *
  * This allows ownership of the specified buffer to returned to the driver,
  * allowing an application to read the buffer without fear that firmware is
  * overwritting information in the buffer.
  */
 static long
-_ctl_diag_release(void __user *arg, enum block_state state)
+_ctl_diag_release(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_diag_release karg;
-	struct MPT2SAS_ADAPTER *ioc;
 	void *request_data;
 	int rc;
 	u8 buffer_type;
@@ -1887,8 +1863,6 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -1942,32 +1916,25 @@
 		return 0;
 	}
 
-	if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
-		return -EAGAIN;
-	else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
-		return -ERESTARTSYS;
-
 	rc = _ctl_send_release(ioc, buffer_type, &issue_reset);
 
 	if (issue_reset)
 		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
 		    FORCE_BIG_HAMMER);
 
-	mutex_unlock(&ioc->ctl_cmds.mutex);
 	return rc;
 }
 
 /**
  * _ctl_diag_read_buffer - request for copy of the diag buffer
+ * @ioc: per adapter object
  * @arg - user space buffer containing ioctl content
- * @state - NON_BLOCKING or BLOCKING
  */
 static long
-_ctl_diag_read_buffer(void __user *arg, enum block_state state)
+_ctl_diag_read_buffer(struct MPT2SAS_ADAPTER *ioc, void __user *arg)
 {
 	struct mpt2_diag_read_buffer karg;
 	struct mpt2_diag_read_buffer __user *uarg = arg;
-	struct MPT2SAS_ADAPTER *ioc;
 	void *request_data, *diag_data;
 	Mpi2DiagBufferPostRequest_t *mpi_request;
 	Mpi2DiagBufferPostReply_t *mpi_reply;
@@ -1983,8 +1950,6 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
 
 	dctlprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -2055,10 +2020,6 @@
 	}
 	/* Get a free request frame and save the message context.
 	*/
-	if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
-		return -EAGAIN;
-	else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
-		return -ERESTARTSYS;
 
 	if (ioc->ctl_cmds.status != MPT2_CMD_NOT_USED) {
 		printk(MPT2SAS_ERR_FMT "%s: ctl_cmd in use\n",
@@ -2139,152 +2100,26 @@
  out:
 
 	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
-	mutex_unlock(&ioc->ctl_cmds.mutex);
 	return rc;
 }
 
-/**
- * _ctl_ioctl_main - main ioctl entry point
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg -
- */
-static long
-_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg)
-{
-	enum block_state state;
-	long ret = -EINVAL;
-
-	state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING :
-	    BLOCKING;
-
-	switch (cmd) {
-	case MPT2IOCINFO:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo))
-			ret = _ctl_getiocinfo(arg);
-		break;
-	case MPT2COMMAND:
-	{
-		struct mpt2_ioctl_command karg;
-		struct mpt2_ioctl_command __user *uarg;
-		struct MPT2SAS_ADAPTER *ioc;
-
-		if (copy_from_user(&karg, arg, sizeof(karg))) {
-			printk(KERN_ERR "failure at %s:%d/%s()!\n",
-			    __FILE__, __LINE__, __func__);
-			return -EFAULT;
-		}
-
-		if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
-		    !ioc)
-			return -ENODEV;
-
-		if (ioc->shost_recovery || ioc->pci_error_recovery ||
-				ioc->is_driver_loading)
-			return -EAGAIN;
-
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
-			uarg = arg;
-			ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
-		}
-		break;
-	}
-	case MPT2EVENTQUERY:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery))
-			ret = _ctl_eventquery(arg);
-		break;
-	case MPT2EVENTENABLE:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable))
-			ret = _ctl_eventenable(arg);
-		break;
-	case MPT2EVENTREPORT:
-		ret = _ctl_eventreport(arg);
-		break;
-	case MPT2HARDRESET:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset))
-			ret = _ctl_do_reset(arg);
-		break;
-	case MPT2BTDHMAPPING:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping))
-			ret = _ctl_btdh_mapping(arg);
-		break;
-	case MPT2DIAGREGISTER:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register))
-			ret = _ctl_diag_register(arg, state);
-		break;
-	case MPT2DIAGUNREGISTER:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister))
-			ret = _ctl_diag_unregister(arg);
-		break;
-	case MPT2DIAGQUERY:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query))
-			ret = _ctl_diag_query(arg);
-		break;
-	case MPT2DIAGRELEASE:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release))
-			ret = _ctl_diag_release(arg, state);
-		break;
-	case MPT2DIAGREADBUFFER:
-		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer))
-			ret = _ctl_diag_read_buffer(arg, state);
-		break;
-	default:
-	{
-		struct mpt2_ioctl_command karg;
-		struct MPT2SAS_ADAPTER *ioc;
-
-		if (copy_from_user(&karg, arg, sizeof(karg))) {
-			printk(KERN_ERR "failure at %s:%d/%s()!\n",
-			    __FILE__, __LINE__, __func__);
-			return -EFAULT;
-		}
-
-		if (_ctl_verify_adapter(karg.hdr.ioc_number, &ioc) == -1 ||
-		    !ioc)
-			return -ENODEV;
-
-		dctlprintk(ioc, printk(MPT2SAS_INFO_FMT
-		    "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
-		break;
-	}
-	}
-	return ret;
-}
-
-/**
- * _ctl_ioctl - main ioctl entry point (unlocked)
- * @file - (struct file)
- * @cmd - ioctl opcode
- * @arg -
- */
-static long
-_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	long ret;
-
-	mutex_lock(&_ctl_mutex);
-	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
-	mutex_unlock(&_ctl_mutex);
-	return ret;
-}
 
 #ifdef CONFIG_COMPAT
 /**
  * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
- * @file - (struct file)
+ * @ioc: per adapter object
  * @cmd - ioctl opcode
  * @arg - (struct mpt2_ioctl_command32)
  *
  * MPT2COMMAND32 - Handle 32bit applications running on 64bit os.
  */
 static long
-_ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
+_ctl_compat_mpt_command(struct MPT2SAS_ADAPTER *ioc, unsigned cmd,
+	void __user *arg)
 {
 	struct mpt2_ioctl_command32 karg32;
 	struct mpt2_ioctl_command32 __user *uarg;
 	struct mpt2_ioctl_command karg;
-	struct MPT2SAS_ADAPTER *ioc;
-	enum block_state state;
 
 	if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
 		return -EINVAL;
@@ -2296,12 +2131,6 @@
 		    __FILE__, __LINE__, __func__);
 		return -EFAULT;
 	}
-	if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
-		return -ENODEV;
-
-	if (ioc->shost_recovery || ioc->pci_error_recovery ||
-			ioc->is_driver_loading)
-		return -EAGAIN;
 
 	memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
 	karg.hdr.ioc_number = karg32.hdr.ioc_number;
@@ -2317,11 +2146,143 @@
 	karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
 	karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
 	karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
+	return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+}
+#endif
+
+/**
+ * _ctl_ioctl_main - main ioctl entry point
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ * compat - handles 32 bit applications in 64bit os
+ */
+static long
+_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
+	u8 compat)
+{
+	struct MPT2SAS_ADAPTER *ioc;
+	struct mpt2_ioctl_header ioctl_header;
+	enum block_state state;
+	long ret = -EINVAL;
+
+	/* get IOCTL header */
+	if (copy_from_user(&ioctl_header, (char __user *)arg,
+	    sizeof(struct mpt2_ioctl_header))) {
+		printk(KERN_ERR "failure at %s:%d/%s()!\n",
+		    __FILE__, __LINE__, __func__);
+		return -EFAULT;
+	}
+
+	if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
+		return -ENODEV;
+	if (ioc->shost_recovery || ioc->pci_error_recovery ||
+	    ioc->is_driver_loading)
+		return -EAGAIN;
+
 	state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
-	return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
+	if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex))
+		return -EAGAIN;
+	else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+		return -ERESTARTSYS;
+
+	switch (cmd) {
+	case MPT2IOCINFO:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_iocinfo))
+			ret = _ctl_getiocinfo(ioc, arg);
+		break;
+#ifdef CONFIG_COMPAT
+	case MPT2COMMAND32:
+#endif
+	case MPT2COMMAND:
+	{
+		struct mpt2_ioctl_command __user *uarg;
+		struct mpt2_ioctl_command karg;
+#ifdef CONFIG_COMPAT
+		if (compat) {
+			ret = _ctl_compat_mpt_command(ioc, cmd, arg);
+			break;
+		}
+#endif
+		if (copy_from_user(&karg, arg, sizeof(karg))) {
+			printk(KERN_ERR "failure at %s:%d/%s()!\n",
+			    __FILE__, __LINE__, __func__);
+			ret = -EFAULT;
+			break;
+		}
+
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
+			uarg = arg;
+			ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+		}
+		break;
+	}
+	case MPT2EVENTQUERY:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventquery))
+			ret = _ctl_eventquery(ioc, arg);
+		break;
+	case MPT2EVENTENABLE:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_eventenable))
+			ret = _ctl_eventenable(ioc, arg);
+		break;
+	case MPT2EVENTREPORT:
+		ret = _ctl_eventreport(ioc, arg);
+		break;
+	case MPT2HARDRESET:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_diag_reset))
+			ret = _ctl_do_reset(ioc, arg);
+		break;
+	case MPT2BTDHMAPPING:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_btdh_mapping))
+			ret = _ctl_btdh_mapping(ioc, arg);
+		break;
+	case MPT2DIAGREGISTER:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_register))
+			ret = _ctl_diag_register(ioc, arg);
+		break;
+	case MPT2DIAGUNREGISTER:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_unregister))
+			ret = _ctl_diag_unregister(ioc, arg);
+		break;
+	case MPT2DIAGQUERY:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_query))
+			ret = _ctl_diag_query(ioc, arg);
+		break;
+	case MPT2DIAGRELEASE:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_release))
+			ret = _ctl_diag_release(ioc, arg);
+		break;
+	case MPT2DIAGREADBUFFER:
+		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_diag_read_buffer))
+			ret = _ctl_diag_read_buffer(ioc, arg);
+		break;
+	default:
+
+		dctlprintk(ioc, printk(MPT2SAS_INFO_FMT
+		    "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+		break;
+	}
+
+	mutex_unlock(&ioc->ctl_cmds.mutex);
+	return ret;
 }
 
 /**
+ * _ctl_ioctl - main ioctl entry point (unlocked)
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ */
+static long
+_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
+	return ret;
+}
+#ifdef CONFIG_COMPAT
+/**
  * _ctl_ioctl_compat - main ioctl entry point (compat)
  * @file -
  * @cmd -
@@ -2334,12 +2295,7 @@
 {
 	long ret;
 
-	mutex_lock(&_ctl_mutex);
-	if (cmd == MPT2COMMAND32)
-		ret = _ctl_compat_mpt_command(file, cmd, arg);
-	else
-		ret = _ctl_ioctl_main(file, cmd, (void __user *)arg);
-	mutex_unlock(&_ctl_mutex);
+	ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
 	return ret;
 }
 #endif
@@ -2884,7 +2840,7 @@
 	struct mpt2_diag_register diag_register;
 	u8 issue_reset = 0;
 
-	if (sscanf(buf, "%s", str) != 1)
+	if (sscanf(buf, "%9s", str) != 1)
 		return -EINVAL;
 
 	if (!strcmp(str, "post")) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index d953a57..76973e8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -579,14 +579,12 @@
 		return;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	if (mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-	    sas_device->sas_address)) {
-		list_del(&sas_device->list);
-		kfree(sas_device);
-	}
+	list_del(&sas_device->list);
+	kfree(sas_device);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 }
 
+
 /**
  * _scsih_sas_device_add - insert sas_device to the list.
  * @ioc: per adapter object
@@ -645,8 +643,8 @@
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	_scsih_determine_boot_device(ioc, sas_device, 0);
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 }
 
 /**
@@ -755,7 +753,6 @@
  * @ioc: per adapter object
  * @raid_device: raid_device object
  *
- * This is removed from the raid_device_list link list.
  */
 static void
 _scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc,
@@ -765,7 +762,6 @@
 
 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
 	list_del(&raid_device->list);
-	memset(raid_device, 0, sizeof(struct _raid_device));
 	kfree(raid_device);
 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 }
@@ -1199,10 +1195,10 @@
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 	   sas_device_priv_data->sas_target->sas_address);
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	if (sas_device && sas_device->device_info &
 	    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
 		max_depth = MPT2SAS_SATA_QUEUE_DEPTH;
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
  not_sata:
 
@@ -1299,7 +1295,8 @@
 			sas_target_priv_data->handle = raid_device->handle;
 			sas_target_priv_data->sas_address = raid_device->wwid;
 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
-			sas_target_priv_data->raid_device = raid_device;
+			if (ioc->is_warpdrive)
+				sas_target_priv_data->raid_device = raid_device;
 			raid_device->starget = starget;
 		}
 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
@@ -1465,12 +1462,12 @@
 /**
  * _scsih_display_sata_capabilities - sata capabilities
  * @ioc: per adapter object
- * @sas_device: the sas_device object
+ * @handle: device handle
  * @sdev: scsi device struct
  */
 static void
 _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
-    struct _sas_device *sas_device, struct scsi_device *sdev)
+	u16 handle, struct scsi_device *sdev)
 {
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasDevicePage0_t sas_device_pg0;
@@ -1479,7 +1476,7 @@
 	u32 device_info;
 
 	if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
-	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, sas_device->handle))) {
+	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
 		return;
@@ -1537,27 +1534,40 @@
 	Mpi2RaidVolPage0_t vol_pg0;
 	Mpi2ConfigReply_t mpi_reply;
 	u32 volume_status_flags;
-	u8 percent_complete = 0;
+	u8 percent_complete;
+	u16 handle;
+
+	percent_complete = 0;
+	handle = 0;
+	if (ioc->is_warpdrive)
+		goto out;
 
 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
 	    sdev->channel);
+	if (raid_device) {
+		handle = raid_device->handle;
+		percent_complete = raid_device->percent_complete;
+	}
 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 
-	if (!raid_device || ioc->is_warpdrive)
+	if (!handle)
 		goto out;
 
 	if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
-	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
+	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
 	     sizeof(Mpi2RaidVolPage0_t))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
+		percent_complete = 0;
 		goto out;
 	}
 
 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
-	if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)
-		percent_complete = raid_device->percent_complete;
+	if (!(volume_status_flags &
+	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
+		percent_complete = 0;
+
  out:
 	raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
 }
@@ -1577,17 +1587,20 @@
 	Mpi2ConfigReply_t mpi_reply;
 	u32 volstate;
 	enum raid_state state = RAID_STATE_UNKNOWN;
+	u16 handle = 0;
 
 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
 	    sdev->channel);
+	if (raid_device)
+		handle = raid_device->handle;
 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 
 	if (!raid_device)
 		goto out;
 
 	if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
-	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
+	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
 	     sizeof(Mpi2RaidVolPage0_t))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
@@ -1620,14 +1633,14 @@
 /**
  * _scsih_set_level - set raid level
  * @sdev: scsi device struct
- * @raid_device: raid_device object
+ * @volume_type: volume type
  */
 static void
-_scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
+_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
 {
 	enum raid_level level = RAID_LEVEL_UNKNOWN;
 
-	switch (raid_device->volume_type) {
+	switch (volume_type) {
 	case MPI2_RAID_VOL_TYPE_RAID0:
 		level = RAID_LEVEL_0;
 		break;
@@ -1722,6 +1735,7 @@
 	struct _raid_device *raid_device;
 	u16 handle;
 	u16 ioc_status;
+	unsigned long flags;
 
 	handle = 0xFFFF;
 	while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
@@ -1731,9 +1745,11 @@
 		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 			break;
 		handle = le16_to_cpu(vol_pg1.DevHandle);
+		spin_lock_irqsave(&ioc->raid_device_lock, flags);
 		raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
 		if (raid_device)
 			raid_device->direct_io_enabled = 0;
+		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 	}
 	return;
 }
@@ -1838,7 +1854,8 @@
 		if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
 		    vol_pg0->PhysDisk[count].PhysDiskNum) ||
-		    pd_pg0.DevHandle == MPT2SAS_INVALID_DEVICE_HANDLE) {
+		     le16_to_cpu(pd_pg0.DevHandle) ==
+		    MPT2SAS_INVALID_DEVICE_HANDLE) {
 			printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
 			    "disabled for the drive with handle(0x%04x) member"
 			    "handle retrieval failed for member number=%d\n",
@@ -1968,19 +1985,21 @@
 	u8 ssp_target = 0;
 	char *ds = "";
 	char *r_level = "";
+	u16 handle, volume_handle = 0;
+	u64 volume_wwid = 0;
 
 	qdepth = 1;
 	sas_device_priv_data = sdev->hostdata;
 	sas_device_priv_data->configured_lun = 1;
 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
 	sas_target_priv_data = sas_device_priv_data->sas_target;
+	handle = sas_target_priv_data->handle;
 
 	/* raid volume handling */
 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
 
 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
-		raid_device = _scsih_raid_device_find_by_handle(ioc,
-		     sas_target_priv_data->handle);
+		raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 		if (!raid_device) {
 			dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
@@ -1989,8 +2008,6 @@
 			return 1;
 		}
 
-		_scsih_get_volume_capabilities(ioc, raid_device);
-
 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
 			dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
 			    "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
@@ -2058,68 +2075,67 @@
 		_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
 		/* raid transport support */
 		if (!ioc->is_warpdrive)
-			_scsih_set_level(sdev, raid_device);
+			_scsih_set_level(sdev, raid_device->volume_type);
 		return 0;
 	}
 
 	/* non-raid handling */
+	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+		if (mpt2sas_config_get_volume_handle(ioc, handle,
+		    &volume_handle)) {
+			dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+			    "failure at %s:%d/%s()!\n", ioc->name,
+			    __FILE__, __LINE__, __func__));
+			return 1;
+		}
+		if (volume_handle && mpt2sas_config_get_volume_wwid(ioc,
+		    volume_handle, &volume_wwid)) {
+			dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
+			    "failure at %s:%d/%s()!\n", ioc->name,
+			    __FILE__, __LINE__, __func__));
+			return 1;
+		}
+	}
+
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 	   sas_device_priv_data->sas_target->sas_address);
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-	if (sas_device) {
-		if (sas_target_priv_data->flags &
-		    MPT_TARGET_FLAGS_RAID_COMPONENT) {
-			if (mpt2sas_config_get_volume_handle(ioc,
-			    sas_device->handle, &sas_device->volume_handle)) {
-				dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
-				    "failure at %s:%d/%s()!\n", ioc->name,
-				    __FILE__, __LINE__, __func__));
-				return 1;
-			}
-			if (sas_device->volume_handle &&
-			    mpt2sas_config_get_volume_wwid(ioc,
-			    sas_device->volume_handle,
-			    &sas_device->volume_wwid)) {
-				dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
-				    "failure at %s:%d/%s()!\n", ioc->name,
-				    __FILE__, __LINE__, __func__));
-				return 1;
-			}
-		}
-		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
-			qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
-			ssp_target = 1;
-			ds = "SSP";
-		} else {
-			qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
-			if (sas_device->device_info &
-			    MPI2_SAS_DEVICE_INFO_STP_TARGET)
-				ds = "STP";
-			else if (sas_device->device_info &
-			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
-				ds = "SATA";
-		}
-
-		sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
-		    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
-		    ds, sas_device->handle,
-		    (unsigned long long)sas_device->sas_address,
-		    sas_device->phy,
-		    (unsigned long long)sas_device->device_name);
-		sdev_printk(KERN_INFO, sdev, "%s: "
-		    "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
-		    (unsigned long long) sas_device->enclosure_logical_id,
-		    sas_device->slot);
-
-		if (!ssp_target)
-			_scsih_display_sata_capabilities(ioc, sas_device, sdev);
-	} else {
+	if (!sas_device) {
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		dfailprintk(ioc, printk(MPT2SAS_WARN_FMT
-		    "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-		    __func__));
+			"failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+			__LINE__, __func__));
 		return 1;
 	}
+	sas_device->volume_handle = volume_handle;
+	sas_device->volume_wwid = volume_wwid;
+	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+		qdepth = MPT2SAS_SAS_QUEUE_DEPTH;
+		ssp_target = 1;
+		ds = "SSP";
+	} else {
+		qdepth = MPT2SAS_SATA_QUEUE_DEPTH;
+		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+			ds = "STP";
+		else if (sas_device->device_info &
+		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+			ds = "SATA";
+	}
+	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), "
+	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
+	    ds, sas_device->handle,
+	    (unsigned long long)sas_device->sas_address,
+	    sas_device->phy,
+	    (unsigned long long)sas_device->device_name);
+	sdev_printk(KERN_INFO, sdev, "%s: "
+	    "enclosure_logical_id(0x%016llx), slot(%d)\n", ds,
+	    (unsigned long long) sas_device->enclosure_logical_id,
+	    sas_device->slot);
+
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	if (!ssp_target)
+		_scsih_display_sata_capabilities(ioc, handle, sdev);
+
 
 	_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
 
@@ -2899,7 +2915,7 @@
  * During device pull we need to appropiately set the sdev state.
  */
 static void
-_scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
 {
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
 	struct scsi_device *sdev;
@@ -2910,10 +2926,12 @@
 			continue;
 		if (!sas_device_priv_data->block)
 			continue;
-		if (sas_device_priv_data->sas_target->handle == handle) {
+		if (sas_device_priv_data->sas_target->sas_address ==
+								sas_address) {
 			dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
 			    MPT2SAS_INFO_FMT "SDEV_RUNNING: "
-			    "handle(0x%04x)\n", ioc->name, handle));
+			    "sas address(0x%016llx)\n", ioc->name,
+				(unsigned long long)sas_address));
 			sas_device_priv_data->block = 0;
 			scsi_internal_device_unblock(sdev);
 		}
@@ -3006,10 +3024,10 @@
 			sas_device =
 			    mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 			   mpt2sas_port->remote_identify.sas_address);
+			if (sas_device)
+				set_bit(sas_device->handle,
+				    ioc->blocking_handles);
 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-			if (!sas_device)
-				continue;
-			_scsih_block_io_device(ioc, sas_device->handle);
 		}
 	}
 
@@ -3020,12 +3038,9 @@
 		    SAS_EDGE_EXPANDER_DEVICE ||
 		    mpt2sas_port->remote_identify.device_type ==
 		    SAS_FANOUT_EXPANDER_DEVICE) {
-
-			spin_lock_irqsave(&ioc->sas_node_lock, flags);
 			expander_sibling =
 			    mpt2sas_scsih_expander_find_by_sas_address(
 			    ioc, mpt2sas_port->remote_identify.sas_address);
-			spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 			_scsih_block_io_to_children_attached_to_ex(ioc,
 			    expander_sibling);
 		}
@@ -3124,7 +3139,7 @@
 		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: "
 		"handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle,
 			(unsigned long long)sas_address));
-		_scsih_ublock_io_device(ioc, handle);
+		_scsih_ublock_io_device(ioc, sas_address);
 		sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE;
 	}
 
@@ -3174,16 +3189,19 @@
 _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
     u8 msix_index, u32 reply)
 {
-#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	Mpi2SasIoUnitControlReply_t *mpi_reply =
 	    mpt2sas_base_get_reply_virt_addr(ioc, reply);
-#endif
-	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
-	    "sc_complete:handle(0x%04x), (open) "
-	    "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
-	    ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
-	    le16_to_cpu(mpi_reply->IOCStatus),
-	    le32_to_cpu(mpi_reply->IOCLogInfo)));
+	if (likely(mpi_reply)) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+		"sc_complete:handle(0x%04x), (open) "
+		"smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+		ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+		le16_to_cpu(mpi_reply->IOCStatus),
+		le32_to_cpu(mpi_reply->IOCLogInfo)));
+	} else {
+		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+	}
 	return 1;
 }
 
@@ -3262,7 +3280,11 @@
 		   "progress!\n", __func__, ioc->name));
 		return 1;
 	}
-
+	if (unlikely(!mpi_reply)) {
+		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		return 1;
+	}
 	mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
@@ -3325,7 +3347,11 @@
 		    "operational\n", __func__, ioc->name));
 		return 1;
 	}
-
+	if (unlikely(!mpi_reply)) {
+		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		return 1;
+	}
 	mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid);
 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
@@ -3441,14 +3467,20 @@
 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
 		return;
 	}
-
-	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING
-	 || event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) {
+	if (event_data->ExpStatus ==
+	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
+		/* put expander attached devices into blocking state */
 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
 		sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
 		    expander_handle);
-		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
+		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+		do {
+			handle = find_first_bit(ioc->blocking_handles,
+			    ioc->facts.MaxDevHandle);
+			if (handle < ioc->facts.MaxDevHandle)
+				_scsih_block_io_device(ioc, handle);
+		} while (test_and_clear_bit(handle, ioc->blocking_handles));
 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
 
@@ -4446,8 +4478,8 @@
 	    != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 		ioc->scsi_lookup[smid - 1].scmd = scmd;
-		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 		_scsih_scsi_direct_io_set(ioc, smid, 0);
+		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
 		mpi_request->DevHandle =
 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5020,13 +5052,11 @@
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
 	    sas_address);
-	if (!sas_expander) {
-		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-		return;
-	}
-	list_del(&sas_expander->list);
+	if (sas_expander)
+		list_del(&sas_expander->list);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-	_scsih_expander_node_remove(ioc, sas_expander);
+	if (sas_expander)
+		_scsih_expander_node_remove(ioc, sas_expander);
 }
 
 /**
@@ -5106,6 +5136,7 @@
 	struct MPT2SAS_TARGET *sas_target_priv_data;
 	u32 device_info;
 
+
 	if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
 		return;
@@ -5139,21 +5170,24 @@
 		sas_target_priv_data->handle = handle;
 		sas_device->handle = handle;
 	}
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
 	/* check if device is present */
 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
 		printk(MPT2SAS_ERR_FMT "device is not present "
 		    "handle(0x%04x), flags!!!\n", ioc->name, handle);
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		return;
 	}
 
 	/* check if there were any issues with discovery */
 	if (_scsih_check_access_status(ioc, sas_address, handle,
-	    sas_device_pg0.AccessStatus))
+	    sas_device_pg0.AccessStatus)) {
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		return;
-	_scsih_ublock_io_device(ioc, handle);
+	}
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	_scsih_ublock_io_device(ioc, sas_address);
 
 }
 
@@ -5280,54 +5314,71 @@
 _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
     struct _sas_device *sas_device)
 {
-	struct _sas_device sas_device_backup;
 	struct MPT2SAS_TARGET *sas_target_priv_data;
 
-	if (!sas_device)
-		return;
-
-	memcpy(&sas_device_backup, sas_device, sizeof(struct _sas_device));
-	_scsih_sas_device_remove(ioc, sas_device);
-
 	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
 	    "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
-	    sas_device_backup.handle, (unsigned long long)
-	    sas_device_backup.sas_address));
+		sas_device->handle, (unsigned long long)
+	    sas_device->sas_address));
 
-	if (sas_device_backup.starget && sas_device_backup.starget->hostdata) {
-		sas_target_priv_data = sas_device_backup.starget->hostdata;
+	if (sas_device->starget && sas_device->starget->hostdata) {
+		sas_target_priv_data = sas_device->starget->hostdata;
 		sas_target_priv_data->deleted = 1;
-		_scsih_ublock_io_device(ioc, sas_device_backup.handle);
+		_scsih_ublock_io_device(ioc, sas_device->sas_address);
 		sas_target_priv_data->handle =
 		     MPT2SAS_INVALID_DEVICE_HANDLE;
 	}
 
-	_scsih_ublock_io_device(ioc, sas_device_backup.handle);
-
 	if (!ioc->hide_drives)
 		mpt2sas_transport_port_remove(ioc,
-		    sas_device_backup.sas_address,
-		    sas_device_backup.sas_address_parent);
+		    sas_device->sas_address,
+		    sas_device->sas_address_parent);
 
 	printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr"
-	    "(0x%016llx)\n", ioc->name, sas_device_backup.handle,
-	    (unsigned long long) sas_device_backup.sas_address);
+	    "(0x%016llx)\n", ioc->name, sas_device->handle,
+	    (unsigned long long) sas_device->sas_address);
 
 	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: "
 	    "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
-	    sas_device_backup.handle, (unsigned long long)
-	    sas_device_backup.sas_address));
+	    sas_device->handle, (unsigned long long)
+	    sas_device->sas_address));
+	kfree(sas_device);
+}
+/**
+ * _scsih_device_remove_by_handle - removing device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_device_remove_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+{
+	struct _sas_device *sas_device;
+	unsigned long flags;
+
+	if (ioc->shost_recovery)
+		return;
+
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	if (sas_device)
+		list_del(&sas_device->list);
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	if (sas_device)
+		_scsih_remove_device(ioc, sas_device);
 }
 
 /**
- * mpt2sas_device_remove - removing device object
+ * mpt2sas_device_remove_by_sas_address - removing device object by sas address
  * @ioc: per adapter object
- * @sas_address: expander sas_address
+ * @sas_address: device sas_address
  *
  * Return nothing.
  */
 void
-mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
+mpt2sas_device_remove_by_sas_address(struct MPT2SAS_ADAPTER *ioc,
+	u64 sas_address)
 {
 	struct _sas_device *sas_device;
 	unsigned long flags;
@@ -5338,14 +5389,12 @@
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 	    sas_address);
-	if (!sas_device) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
-	}
+	if (sas_device)
+		list_del(&sas_device->list);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-	_scsih_remove_device(ioc, sas_device);
+	if (sas_device)
+		_scsih_remove_device(ioc, sas_device);
 }
-
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 /**
  * _scsih_sas_topology_change_event_debug - debug for topology event
@@ -5442,7 +5491,6 @@
 	u16 reason_code;
 	u8 phy_number, max_phys;
 	struct _sas_node *sas_expander;
-	struct _sas_device *sas_device;
 	u64 sas_address;
 	unsigned long flags;
 	u8 link_rate, prev_link_rate;
@@ -5477,15 +5525,17 @@
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
 	    parent_handle);
-	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 	if (sas_expander) {
 		sas_address = sas_expander->sas_address;
 		max_phys = sas_expander->num_phys;
 	} else if (parent_handle < ioc->sas_hba.num_phys) {
 		sas_address = ioc->sas_hba.sas_address;
 		max_phys = ioc->sas_hba.num_phys;
-	} else
+	} else {
+	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 		return;
+	}
+	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 
 	/* handle siblings events */
 	for (i = 0; i < event_data->NumEntries; i++) {
@@ -5540,16 +5590,7 @@
 			break;
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
 
-			spin_lock_irqsave(&ioc->sas_device_lock, flags);
-			sas_device = _scsih_sas_device_find_by_handle(ioc,
-			    handle);
-			if (!sas_device) {
-				spin_unlock_irqrestore(&ioc->sas_device_lock,
-				    flags);
-				break;
-			}
-			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-			_scsih_remove_device(ioc, sas_device);
+			_scsih_device_remove_by_handle(ioc, handle);
 			break;
 		}
 	}
@@ -5672,20 +5713,24 @@
 	sas_address = le64_to_cpu(event_data->SASAddress);
 	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 	    sas_address);
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
-	if (!sas_device || !sas_device->starget)
+	if (!sas_device || !sas_device->starget) {
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		return;
+	}
 
 	target_priv_data = sas_device->starget->hostdata;
-	if (!target_priv_data)
+	if (!target_priv_data) {
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		return;
+	}
 
 	if (event_data->ReasonCode ==
 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
 		target_priv_data->tm_busy = 1;
 	else
 		target_priv_data->tm_busy = 0;
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 }
 
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -5950,30 +5995,6 @@
 }
 
 /**
- * _scsih_reprobe_target - reprobing target
- * @starget: scsi target struct
- * @no_uld_attach: sdev->no_uld_attach flag setting
- *
- * Note: no_uld_attach flag determines whether the disk device is attached
- * to block layer. A value of `1` means to not attach.
- **/
-static void
-_scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach)
-{
-	struct MPT2SAS_TARGET *sas_target_priv_data;
-
-	if (starget == NULL)
-		return;
-	sas_target_priv_data = starget->hostdata;
-	if (no_uld_attach)
-		sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
-	else
-		sas_target_priv_data->flags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
-
-	starget_for_each_device(starget, no_uld_attach ? (void *)1 : NULL,
-	    _scsih_reprobe_lun);
-}
-/**
  * _scsih_sas_volume_add - add new volume
  * @ioc: per adapter object
  * @element: IR config element data
@@ -6024,8 +6045,11 @@
 		    raid_device->id, 0);
 		if (rc)
 			_scsih_raid_device_remove(ioc, raid_device);
-	} else
+	} else {
+		spin_lock_irqsave(&ioc->raid_device_lock, flags);
 		_scsih_determine_boot_device(ioc, raid_device, 1);
+		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+	}
 }
 
 /**
@@ -6042,21 +6066,25 @@
 	struct _raid_device *raid_device;
 	unsigned long flags;
 	struct MPT2SAS_TARGET *sas_target_priv_data;
+	struct scsi_target *starget = NULL;
 
 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
 	raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
-	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-	if (!raid_device)
-		return;
-	if (raid_device->starget) {
-		sas_target_priv_data = raid_device->starget->hostdata;
-		sas_target_priv_data->deleted = 1;
-		scsi_remove_target(&raid_device->starget->dev);
+	if (raid_device) {
+		if (raid_device->starget) {
+			starget = raid_device->starget;
+			sas_target_priv_data = starget->hostdata;
+			sas_target_priv_data->deleted = 1;
+		}
+		printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
+		    "(0x%016llx)\n", ioc->name,  raid_device->handle,
+		    (unsigned long long) raid_device->wwid);
+		list_del(&raid_device->list);
+		kfree(raid_device);
 	}
-	printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
-	    "(0x%016llx)\n", ioc->name,  raid_device->handle,
-	    (unsigned long long) raid_device->wwid);
-	_scsih_raid_device_remove(ioc, raid_device);
+	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+	if (starget)
+		scsi_remove_target(&starget->dev);
 }
 
 /**
@@ -6072,20 +6100,31 @@
     Mpi2EventIrConfigElement_t *element)
 {
 	struct _sas_device *sas_device;
+	struct scsi_target *starget = NULL;
+	struct MPT2SAS_TARGET *sas_target_priv_data;
 	unsigned long flags;
 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	if (sas_device) {
+		sas_device->volume_handle = 0;
+		sas_device->volume_wwid = 0;
+		clear_bit(handle, ioc->pd_handles);
+		if (sas_device->starget && sas_device->starget->hostdata) {
+			starget = sas_device->starget;
+			sas_target_priv_data = starget->hostdata;
+			sas_target_priv_data->flags &=
+			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+		}
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	if (!sas_device)
 		return;
 
 	/* exposing raid component */
-	sas_device->volume_handle = 0;
-	sas_device->volume_wwid = 0;
-	clear_bit(handle, ioc->pd_handles);
-	_scsih_reprobe_target(sas_device->starget, 0);
+	if (starget)
+		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
 }
 
 /**
@@ -6101,23 +6140,38 @@
     Mpi2EventIrConfigElement_t *element)
 {
 	struct _sas_device *sas_device;
+	struct scsi_target *starget = NULL;
+	struct MPT2SAS_TARGET *sas_target_priv_data;
 	unsigned long flags;
 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+	u16 volume_handle = 0;
+	u64 volume_wwid = 0;
+
+	mpt2sas_config_get_volume_handle(ioc, handle, &volume_handle);
+	if (volume_handle)
+		mpt2sas_config_get_volume_wwid(ioc, volume_handle,
+		    &volume_wwid);
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	if (sas_device) {
+		set_bit(handle, ioc->pd_handles);
+		if (sas_device->starget && sas_device->starget->hostdata) {
+			starget = sas_device->starget;
+			sas_target_priv_data = starget->hostdata;
+			sas_target_priv_data->flags |=
+			    MPT_TARGET_FLAGS_RAID_COMPONENT;
+			sas_device->volume_handle = volume_handle;
+			sas_device->volume_wwid = volume_wwid;
+		}
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 	if (!sas_device)
 		return;
 
 	/* hiding raid component */
-	mpt2sas_config_get_volume_handle(ioc, handle,
-	    &sas_device->volume_handle);
-	mpt2sas_config_get_volume_wwid(ioc, sas_device->volume_handle,
-	    &sas_device->volume_wwid);
-	set_bit(handle, ioc->pd_handles);
-	_scsih_reprobe_target(sas_device->starget, 1);
-
+	if (starget)
+		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
 }
 
 /**
@@ -6132,16 +6186,9 @@
 _scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc,
     Mpi2EventIrConfigElement_t *element)
 {
-	struct _sas_device *sas_device;
-	unsigned long flags;
 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
 
-	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-	if (!sas_device)
-		return;
-	_scsih_remove_device(ioc, sas_device);
+	_scsih_device_remove_by_handle(ioc, handle);
 }
 
 /**
@@ -6583,18 +6630,13 @@
 	/* code added for raid transport support */
 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
 
-		handle = le16_to_cpu(event_data->VolDevHandle);
-
 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
+		handle = le16_to_cpu(event_data->VolDevHandle);
 		raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
-		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-
-		if (!raid_device)
-			return;
-
-		if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC)
+		if (raid_device)
 			raid_device->percent_complete =
 			    event_data->PercentComplete;
+		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 	}
 }
 
@@ -6761,13 +6803,18 @@
 			 * required data for Direct IO
 			 */
 			_scsih_init_warpdrive_properties(ioc, raid_device);
-			if (raid_device->handle == handle)
+			spin_lock_irqsave(&ioc->raid_device_lock, flags);
+			if (raid_device->handle == handle) {
+				spin_unlock_irqrestore(&ioc->raid_device_lock,
+				    flags);
 				return;
+			}
 			printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
 			    raid_device->handle);
 			raid_device->handle = handle;
 			if (sas_target_priv_data)
 				sas_target_priv_data->handle = handle;
+			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 			return;
 		}
 	}
@@ -6939,58 +6986,56 @@
 _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
 {
 	struct _sas_device *sas_device, *sas_device_next;
-	struct _sas_node *sas_expander;
+	struct _sas_node *sas_expander, *sas_expander_next;
 	struct _raid_device *raid_device, *raid_device_next;
+	struct list_head tmp_list;
+	unsigned long flags;
 
 	printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n",
 	    ioc->name);
 
+	/* removing unresponding end devices */
+	printk(MPT2SAS_INFO_FMT "removing unresponding devices: end-devices\n",
+	    ioc->name);
 	list_for_each_entry_safe(sas_device, sas_device_next,
 	    &ioc->sas_device_list, list) {
-		if (sas_device->responding) {
+		if (!sas_device->responding)
+			mpt2sas_device_remove_by_sas_address(ioc,
+				sas_device->sas_address);
+		else
 			sas_device->responding = 0;
-			continue;
-		}
-		if (sas_device->starget)
-			starget_printk(KERN_INFO, sas_device->starget,
-			    "removing: handle(0x%04x), sas_addr(0x%016llx), "
-			    "enclosure logical id(0x%016llx), slot(%d)\n",
-			    sas_device->handle,
-			    (unsigned long long)sas_device->sas_address,
-			    (unsigned long long)
-			    sas_device->enclosure_logical_id,
-			    sas_device->slot);
-		_scsih_remove_device(ioc, sas_device);
 	}
 
-	if (!ioc->ir_firmware)
-		goto retry_expander_search;
-
-	list_for_each_entry_safe(raid_device, raid_device_next,
-	    &ioc->raid_device_list, list) {
-		if (raid_device->responding) {
-			raid_device->responding = 0;
-			continue;
+	/* removing unresponding volumes */
+	if (ioc->ir_firmware) {
+		printk(MPT2SAS_INFO_FMT "removing unresponding devices: "
+		    "volumes\n", ioc->name);
+		list_for_each_entry_safe(raid_device, raid_device_next,
+		    &ioc->raid_device_list, list) {
+			if (!raid_device->responding)
+				_scsih_sas_volume_delete(ioc,
+				    raid_device->handle);
+			else
+				raid_device->responding = 0;
 		}
-		if (raid_device->starget) {
-			starget_printk(KERN_INFO, raid_device->starget,
-			    "removing: handle(0x%04x), wwid(0x%016llx)\n",
-			      raid_device->handle,
-			    (unsigned long long)raid_device->wwid);
-			scsi_remove_target(&raid_device->starget->dev);
-		}
-		_scsih_raid_device_remove(ioc, raid_device);
 	}
-
- retry_expander_search:
-	sas_expander = NULL;
-	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
-		if (sas_expander->responding) {
+	/* removing unresponding expanders */
+	printk(MPT2SAS_INFO_FMT "removing unresponding devices: expanders\n",
+	    ioc->name);
+	spin_lock_irqsave(&ioc->sas_node_lock, flags);
+	INIT_LIST_HEAD(&tmp_list);
+	list_for_each_entry_safe(sas_expander, sas_expander_next,
+	    &ioc->sas_expander_list, list) {
+		if (!sas_expander->responding)
+			list_move_tail(&sas_expander->list, &tmp_list);
+		else
 			sas_expander->responding = 0;
-			continue;
-		}
-		mpt2sas_expander_remove(ioc, sas_expander->sas_address);
-		goto retry_expander_search;
+	}
+	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
+	    list) {
+		list_del(&sas_expander->list);
+		_scsih_expander_node_remove(ioc, sas_expander);
 	}
 	printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n",
 	    ioc->name);
@@ -7043,6 +7088,7 @@
 	struct _sas_device *sas_device;
 	struct _sas_node *expander_device;
 	static struct _raid_device *raid_device;
+	unsigned long flags;
 
 	printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
 
@@ -7057,8 +7103,10 @@
 		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 			break;
 		handle = le16_to_cpu(expander_pg0.DevHandle);
+		spin_lock_irqsave(&ioc->sas_node_lock, flags);
 		expander_device = mpt2sas_scsih_expander_find_by_sas_address(
 		    ioc, le64_to_cpu(expander_pg0.SASAddress));
+		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 		if (expander_device)
 			_scsih_refresh_expander_links(ioc, expander_device,
 			    handle);
@@ -7080,7 +7128,9 @@
 			break;
 		phys_disk_num = pd_pg0.PhysDiskNum;
 		handle = le16_to_cpu(pd_pg0.DevHandle);
+		spin_lock_irqsave(&ioc->sas_device_lock, flags);
 		sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		if (sas_device)
 			continue;
 		if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
@@ -7107,8 +7157,10 @@
 		if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 			break;
 		handle = le16_to_cpu(volume_pg1.DevHandle);
+		spin_lock_irqsave(&ioc->raid_device_lock, flags);
 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
 		    le64_to_cpu(volume_pg1.WWID));
+		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 		if (raid_device)
 			continue;
 		if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
@@ -7140,8 +7192,10 @@
 		if (!(_scsih_is_end_device(
 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
 			continue;
+		spin_lock_irqsave(&ioc->sas_device_lock, flags);
 		sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 		    le64_to_cpu(sas_device_pg0.SASAddress));
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 		if (sas_device)
 			continue;
 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
@@ -7235,7 +7289,7 @@
 
 	switch (fw_event->event) {
 	case MPT2SAS_REMOVE_UNRESPONDING_DEVICES:
-		while (scsi_host_in_recovery(ioc->shost))
+		while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
 			ssleep(1);
 		_scsih_remove_unresponding_sas_devices(ioc);
 		_scsih_scan_for_devices_after_reset(ioc);
@@ -7313,6 +7367,13 @@
 		return 1;
 
 	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+
+	if (unlikely(!mpi_reply)) {
+		printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		return 1;
+	}
+
 	event = le16_to_cpu(mpi_reply->Event);
 
 	switch (event) {
@@ -7353,14 +7414,14 @@
 	case MPI2_EVENT_LOG_ENTRY_ADDED:
 	{
 		Mpi2EventDataLogEntryAdded_t *log_entry;
-		u32 *log_code;
+		__le32 *log_code;
 
 		if (!ioc->is_warpdrive)
 			break;
 
 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
 		    mpi_reply->EventData;
-		log_code = (u32 *)log_entry->LogData;
+		log_code = (__le32 *)log_entry->LogData;
 
 		if (le16_to_cpu(log_entry->LogEntryQualifier)
 		    != MPT2_WARPDRIVE_LOGENTRY)
@@ -7487,7 +7548,7 @@
 			return;
 		if (mpt2sas_port->remote_identify.device_type ==
 		    SAS_END_DEVICE)
-			mpt2sas_device_remove(ioc,
+			mpt2sas_device_remove_by_sas_address(ioc,
 			    mpt2sas_port->remote_identify.sas_address);
 		else if (mpt2sas_port->remote_identify.device_type ==
 		    SAS_EDGE_EXPANDER_DEVICE ||
@@ -7661,7 +7722,7 @@
 	   &ioc->sas_hba.sas_port_list, port_list) {
 		if (mpt2sas_port->remote_identify.device_type ==
 		    SAS_END_DEVICE)
-			mpt2sas_device_remove(ioc,
+			mpt2sas_device_remove_by_sas_address(ioc,
 			    mpt2sas_port->remote_identify.sas_address);
 		else if (mpt2sas_port->remote_identify.device_type ==
 		    SAS_EDGE_EXPANDER_DEVICE ||
@@ -7733,11 +7794,11 @@
 		if (rc)
 			_scsih_raid_device_remove(ioc, raid_device);
 	} else {
+		spin_lock_irqsave(&ioc->sas_device_lock, flags);
 		sas_device = device;
 		handle = sas_device->handle;
 		sas_address_parent = sas_device->sas_address_parent;
 		sas_address = sas_device->sas_address;
-		spin_lock_irqsave(&ioc->sas_device_lock, flags);
 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
@@ -8061,8 +8122,8 @@
  out_thread_fail:
 	list_del(&ioc->list);
 	scsi_remove_host(shost);
-	scsi_host_put(shost);
  out_add_shost_fail:
+	scsi_host_put(shost);
 	return -ENODEV;
 }
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 8310474..c6cf20f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -163,12 +163,15 @@
 		return -EIO;
 	}
 
-	memset(identify, 0, sizeof(*identify));
+	memset(identify, 0, sizeof(struct sas_identify));
 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
 
 	/* sas_address */
 	identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
 
+	/* phy number of the parent device this device is linked to */
+	identify->phy_identifier = sas_device_pg0.PhyNum;
+
 	/* device_type */
 	switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
 	case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
@@ -484,7 +487,7 @@
 
 	ioc->logging_level |= MPT_DEBUG_TRANSPORT;
 	if (device_type == SAS_END_DEVICE)
-		mpt2sas_device_remove(ioc, sas_address);
+		mpt2sas_device_remove_by_sas_address(ioc, sas_address);
 	else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
 	    device_type == SAS_FANOUT_EXPANDER_DEVICE)
 		mpt2sas_expander_remove(ioc, sas_address);
@@ -792,9 +795,10 @@
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	sas_node = _transport_sas_node_find_by_sas_address(ioc,
 	    sas_address_parent);
-	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-	if (!sas_node)
+	if (!sas_node) {
+		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 		return;
+	}
 	list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
 	    port_list) {
 		if (mpt2sas_port->remote_identify.sas_address != sas_address)
@@ -804,8 +808,10 @@
 		goto out;
 	}
  out:
-	if (!found)
+	if (!found) {
+		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 		return;
+	}
 
 	for (i = 0; i < sas_node->num_phys; i++) {
 		if (sas_node->phy[i].remote_identify.sas_address == sas_address)
@@ -813,6 +819,7 @@
 			    sizeof(struct sas_identify));
 	}
 
+	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 	list_for_each_entry_safe(mpt2sas_phy, next_phy,
 	    &mpt2sas_port->phy_list, port_siblings) {
 		if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
@@ -986,12 +993,14 @@
 
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
-	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-	if (!sas_node)
+	if (!sas_node) {
+		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 		return;
+	}
 
 	mpt2sas_phy = &sas_node->phy[phy_number];
 	mpt2sas_phy->attached_handle = handle;
+	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 	if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
 		_transport_set_identify(ioc, handle,
 		    &mpt2sas_phy->remote_identify);
@@ -1310,17 +1319,20 @@
 	struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
 	struct _sas_device *sas_device;
 	unsigned long flags;
+	int rc;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 	    rphy->identify.sas_address);
+	if (sas_device) {
+		*identifier = sas_device->enclosure_logical_id;
+		rc = 0;
+	} else {
+		*identifier = 0;
+		rc = -ENXIO;
+	}
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
-	if (!sas_device)
-		return -ENXIO;
-
-	*identifier = sas_device->enclosure_logical_id;
-	return 0;
+	return rc;
 }
 
 /**
@@ -1335,16 +1347,17 @@
 	struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
 	struct _sas_device *sas_device;
 	unsigned long flags;
+	int rc;
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
 	    rphy->identify.sas_address);
+	if (sas_device)
+		rc = sas_device->slot;
+	else
+		rc = -ENXIO;
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
-	if (!sas_device)
-		return -ENXIO;
-
-	return sas_device->slot;
+	return rc;
 }
 
 /* phy control request structure */
@@ -1629,11 +1642,13 @@
 {
 	struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
 	Mpi2ConfigReply_t mpi_reply;
 	u16 ioc_status;
 	u16 sz;
 	int rc = 0;
 	unsigned long flags;
+	int i, discovery_active;
 
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	if (_transport_sas_node_find_by_sas_address(ioc,
@@ -1651,7 +1666,50 @@
 
 	/* handle hba phys */
 
-	/* sas_iounit page 1 */
+	/* read sas_iounit page 0 */
+	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+	    sizeof(Mpi2SasIOUnit0PhyData_t));
+	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+	if (!sas_iounit_pg0) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+	if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+	    sas_iounit_pg0, sz))) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -ENXIO;
+		goto out;
+	}
+	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+	    MPI2_IOCSTATUS_MASK;
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* unable to enable/disable phys when when discovery is active */
+	for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
+		if (sas_iounit_pg0->PhyData[i].PortFlags &
+		    MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
+			printk(MPT2SAS_ERR_FMT "discovery is active on "
+			    "port = %d, phy = %d: unable to enable/disable "
+			    "phys, try again later!\n", ioc->name,
+			    sas_iounit_pg0->PhyData[i].Port, i);
+			discovery_active = 1;
+		}
+	}
+
+	if (discovery_active) {
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	/* read sas_iounit page 1 */
 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
 	    sizeof(Mpi2SasIOUnit1PhyData_t));
 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
@@ -1676,7 +1734,18 @@
 		rc = -EIO;
 		goto out;
 	}
-
+	/* copy Port/PortFlags/PhyFlags from page 0 */
+	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+		sas_iounit_pg1->PhyData[i].Port =
+		    sas_iounit_pg0->PhyData[i].Port;
+		sas_iounit_pg1->PhyData[i].PortFlags =
+		    (sas_iounit_pg0->PhyData[i].PortFlags &
+		    MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
+		sas_iounit_pg1->PhyData[i].PhyFlags =
+		    (sas_iounit_pg0->PhyData[i].PhyFlags &
+		    (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
+		    MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
+	}
 	if (enable)
 		sas_iounit_pg1->PhyData[phy->number].PhyFlags
 		    &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
@@ -1692,6 +1761,7 @@
 
  out:
 	kfree(sas_iounit_pg1);
+	kfree(sas_iounit_pg0);
 	return rc;
 }
 
@@ -1828,7 +1898,7 @@
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	Mpi2SmpPassthroughRequest_t *mpi_request;
 	Mpi2SmpPassthroughReply_t *mpi_reply;
-	int rc;
+	int rc, i;
 	u16 smid;
 	u32 ioc_state;
 	unsigned long timeleft;
@@ -1837,24 +1907,20 @@
 	u8 issue_reset = 0;
 	dma_addr_t dma_addr_in = 0;
 	dma_addr_t dma_addr_out = 0;
+	dma_addr_t pci_dma_in = 0;
+	dma_addr_t pci_dma_out = 0;
+	void *pci_addr_in = NULL;
+	void *pci_addr_out = NULL;
 	u16 wait_state_count;
 	struct request *rsp = req->next_rq;
+	struct bio_vec *bvec = NULL;
 
 	if (!rsp) {
 		printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
 		    "missing\n", ioc->name, __func__);
 		return -EINVAL;
 	}
-
-	/* do we need to support multiple segments? */
-	if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
-		printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
-		    "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
-		    blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
-		return -EINVAL;
-	}
-
-	if (ioc->shost_recovery) {
+	if (ioc->shost_recovery || ioc->pci_error_recovery) {
 		printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
 		    __func__, ioc->name);
 		return -EFAULT;
@@ -1872,6 +1938,59 @@
 	}
 	ioc->transport_cmds.status = MPT2_CMD_PENDING;
 
+	/* Check if the request is split across multiple segments */
+	if (req->bio->bi_vcnt > 1) {
+		u32 offset = 0;
+
+		/* Allocate memory and copy the request */
+		pci_addr_out = pci_alloc_consistent(ioc->pdev,
+		    blk_rq_bytes(req), &pci_dma_out);
+		if (!pci_addr_out) {
+			printk(MPT2SAS_INFO_FMT "%s(): PCI Addr out = NULL\n",
+			    ioc->name, __func__);
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		bio_for_each_segment(bvec, req->bio, i) {
+			memcpy(pci_addr_out + offset,
+			    page_address(bvec->bv_page) + bvec->bv_offset,
+			    bvec->bv_len);
+			offset += bvec->bv_len;
+		}
+	} else {
+		dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
+		    blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
+		if (!dma_addr_out) {
+			printk(MPT2SAS_INFO_FMT "%s(): DMA Addr out = NULL\n",
+			    ioc->name, __func__);
+			rc = -ENOMEM;
+			goto free_pci;
+		}
+	}
+
+	/* Check if the response needs to be populated across
+	 * multiple segments */
+	if (rsp->bio->bi_vcnt > 1) {
+		pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
+		    &pci_dma_in);
+		if (!pci_addr_in) {
+			printk(MPT2SAS_INFO_FMT "%s(): PCI Addr in = NULL\n",
+			    ioc->name, __func__);
+			rc = -ENOMEM;
+			goto unmap;
+		}
+	} else {
+		dma_addr_in =  pci_map_single(ioc->pdev, bio_data(rsp->bio),
+		    blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
+		if (!dma_addr_in) {
+			printk(MPT2SAS_INFO_FMT "%s(): DMA Addr in = NULL\n",
+			    ioc->name, __func__);
+			rc = -ENOMEM;
+			goto unmap;
+		}
+	}
+
 	wait_state_count = 0;
 	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
 	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
@@ -1880,7 +1999,7 @@
 			    "%s: failed due to ioc not operational\n",
 			    ioc->name, __func__);
 			rc = -EFAULT;
-			goto out;
+			goto unmap;
 		}
 		ssleep(1);
 		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
@@ -1897,7 +2016,7 @@
 		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
 		    ioc->name, __func__);
 		rc = -EAGAIN;
-		goto out;
+		goto unmap;
 	}
 
 	rc = 0;
@@ -1919,16 +2038,14 @@
 	sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-	dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
-		blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
-	if (!dma_addr_out) {
-		mpt2sas_base_free_smid(ioc, smid);
-		goto unmap;
+	if (req->bio->bi_vcnt > 1) {
+		ioc->base_add_sg_single(psge, sgl_flags |
+		    (blk_rq_bytes(req) - 4), pci_dma_out);
+	} else {
+		ioc->base_add_sg_single(psge, sgl_flags |
+		    (blk_rq_bytes(req) - 4), dma_addr_out);
 	}
 
-	ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
-	    dma_addr_out);
-
 	/* incr sgel */
 	psge += ioc->sge_size;
 
@@ -1937,16 +2054,14 @@
 	    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
 	    MPI2_SGE_FLAGS_END_OF_LIST);
 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-	dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
-				     blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
-	if (!dma_addr_in) {
-		mpt2sas_base_free_smid(ioc, smid);
-		goto unmap;
+	if (rsp->bio->bi_vcnt > 1) {
+		ioc->base_add_sg_single(psge, sgl_flags |
+		    (blk_rq_bytes(rsp) + 4), pci_dma_in);
+	} else {
+		ioc->base_add_sg_single(psge, sgl_flags |
+		    (blk_rq_bytes(rsp) + 4), dma_addr_in);
 	}
 
-	ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
-	    dma_addr_in);
-
 	dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
 	    "sending smp request\n", ioc->name, __func__));
 
@@ -1982,6 +2097,27 @@
 		req->resid_len = 0;
 		rsp->resid_len -=
 		    le16_to_cpu(mpi_reply->ResponseDataLength);
+		/* check if the resp needs to be copied from the allocated
+		 * pci mem */
+		if (rsp->bio->bi_vcnt > 1) {
+			u32 offset = 0;
+			u32 bytes_to_copy =
+			    le16_to_cpu(mpi_reply->ResponseDataLength);
+			bio_for_each_segment(bvec, rsp->bio, i) {
+				if (bytes_to_copy <= bvec->bv_len) {
+					memcpy(page_address(bvec->bv_page) +
+					    bvec->bv_offset, pci_addr_in +
+					    offset, bytes_to_copy);
+					break;
+				} else {
+					memcpy(page_address(bvec->bv_page) +
+					    bvec->bv_offset, pci_addr_in +
+					    offset, bvec->bv_len);
+					bytes_to_copy -= bvec->bv_len;
+				}
+				offset += bvec->bv_len;
+			}
+		}
 	} else {
 		dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT
 		    "%s - no reply\n", ioc->name, __func__));
@@ -2003,6 +2139,15 @@
 		pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
 		    PCI_DMA_BIDIRECTIONAL);
 
+ free_pci:
+	if (pci_addr_out)
+		pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
+		    pci_dma_out);
+
+	if (pci_addr_in)
+		pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
+		    pci_dma_in);
+
  out:
 	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
 	mutex_unlock(&ioc->transport_cmds.mutex);
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 944afad..c3d20c8 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -66,9 +66,10 @@
 
 /* driver compile-time configuration */
 #define	PM8001_MAX_CCB		 512	/* max ccbs supported */
+#define PM8001_MPI_QUEUE         1024   /* maximum mpi queue entries */
 #define	PM8001_MAX_INB_NUM	 1
 #define	PM8001_MAX_OUTB_NUM	 1
-#define	PM8001_CAN_QUEUE	 128	/* SCSI Queue depth */
+#define	PM8001_CAN_QUEUE	 508	/* SCSI Queue depth */
 
 /* unchangeable hardware details */
 #define	PM8001_MAX_PHYS		 8	/* max. possible phys */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 9d82ee5..bf54aaf 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -192,7 +192,7 @@
 	pm8001_ha->main_cfg_tbl.fatal_err_interrupt		= 0x01;
 	for (i = 0; i < qn; i++) {
 		pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt	=
-			0x00000100 | (0x00000040 << 16) | (0x00<<30);
+			PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
 		pm8001_ha->inbnd_q_tbl[i].upper_base_addr	=
 			pm8001_ha->memoryMap.region[IB].phys_addr_hi;
 		pm8001_ha->inbnd_q_tbl[i].lower_base_addr	=
@@ -218,7 +218,7 @@
 	}
 	for (i = 0; i < qn; i++) {
 		pm8001_ha->outbnd_q_tbl[i].element_size_cnt	=
-			256 | (64 << 16) | (1<<30);
+			PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
 		pm8001_ha->outbnd_q_tbl[i].upper_base_addr	=
 			pm8001_ha->memoryMap.region[OB].phys_addr_hi;
 		pm8001_ha->outbnd_q_tbl[i].lower_base_addr	=
@@ -1245,7 +1245,7 @@
 	/* Stores the new consumer index */
 	consumer_index = pm8001_read_32(circularQ->ci_virt);
 	circularQ->consumer_index = cpu_to_le32(consumer_index);
-	if (((circularQ->producer_idx + bcCount) % 256) ==
+	if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) ==
 		le32_to_cpu(circularQ->consumer_index)) {
 		*messagePtr = NULL;
 		return -1;
@@ -1253,7 +1253,8 @@
 	/* get memory IOMB buffer address */
 	offset = circularQ->producer_idx * 64;
 	/* increment to next bcCount element */
-	circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256;
+	circularQ->producer_idx = (circularQ->producer_idx + bcCount)
+				% PM8001_MPI_QUEUE;
 	/* Adds that distance to the base of the region virtual address plus
 	the message header size*/
 	msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt	+ offset);
@@ -1326,7 +1327,8 @@
 		return 0;
 	}
 	/* free the circular queue buffer elements associated with the message*/
-	circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256;
+	circularQ->consumer_idx = (circularQ->consumer_idx + bc)
+				% PM8001_MPI_QUEUE;
 	/* update the CI of outbound queue */
 	pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
 		circularQ->consumer_idx);
@@ -1383,7 +1385,8 @@
 					circularQ->consumer_idx =
 						(circularQ->consumer_idx +
 						((le32_to_cpu(msgHeader_tmp)
-						>> 24) & 0x1f)) % 256;
+						 >> 24) & 0x1f))
+							% PM8001_MPI_QUEUE;
 					msgHeader_tmp = 0;
 					pm8001_write_32(msgHeader, 0, 0);
 					/* update the CI of outbound queue */
@@ -1396,7 +1399,7 @@
 				circularQ->consumer_idx =
 					(circularQ->consumer_idx +
 					((le32_to_cpu(msgHeader_tmp) >> 24) &
-					0x1f)) % 256;
+					0x1f)) % PM8001_MPI_QUEUE;
 				msgHeader_tmp = 0;
 				pm8001_write_32(msgHeader, 0, 0);
 				/* update the CI of outbound queue */
@@ -3357,7 +3360,7 @@
 	struct fw_control_ex	fw_control_context;
 	struct fw_flash_Update_resp *ppayload =
 		(struct fw_flash_Update_resp *)(piomb + 4);
-	u32 tag = ppayload->tag;
+	u32 tag = le32_to_cpu(ppayload->tag);
 	struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
 	status = le32_to_cpu(ppayload->status);
 	memcpy(&fw_control_context,
@@ -3703,8 +3706,8 @@
  */
 static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
-	u32 pHeader = (u32)*(u32 *)piomb;
-	u8 opc = (u8)(pHeader & 0xFFF);
+	__le32 pHeader = *(__le32 *)piomb;
+	u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
 
 	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
 
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 1a4611e..d437309 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -599,7 +599,7 @@
  *
  */
 struct fw_flash_Update_resp {
-	dma_addr_t	tag;
+	__le32	tag;
 	__le32	status;
 	u32	reserved[13];
 } __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 36efaa7..0267c22 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -235,15 +235,15 @@
 	pm8001_ha->memoryMap.region[PI].alignment = 4;
 
 	/* MPI Memory region 5 inbound queues */
-	pm8001_ha->memoryMap.region[IB].num_elements = 256;
+	pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
 	pm8001_ha->memoryMap.region[IB].element_size = 64;
-	pm8001_ha->memoryMap.region[IB].total_len = 256 * 64;
+	pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
 	pm8001_ha->memoryMap.region[IB].alignment = 64;
 
-	/* MPI Memory region 6 inbound queues */
-	pm8001_ha->memoryMap.region[OB].num_elements = 256;
+	/* MPI Memory region 6 outbound queues */
+	pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
 	pm8001_ha->memoryMap.region[OB].element_size = 64;
-	pm8001_ha->memoryMap.region[OB].total_len = 256 * 64;
+	pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
 	pm8001_ha->memoryMap.region[OB].alignment = 64;
 
 	/* Memory region write DMA*/
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f74cc06..bc3cc6d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1367,6 +1367,9 @@
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
 
+	if (ha->flags.isp82xx_reset_hdlr_active)
+		return -EBUSY;
+
 	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
 	if (rval)
 		return rval;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 897731b..62324a1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,7 +15,7 @@
  * | Mailbox commands             |       0x113e       | 0x112c-0x112e  |
  * |                              |                    | 0x113a         |
  * | Device Discovery             |       0x2086       | 0x2020-0x2022  |
- * | Queue Command and IO tracing |       0x302f       | 0x3006,0x3008  |
+ * | Queue Command and IO tracing |       0x3030       | 0x3006,0x3008  |
  * |                              |                    | 0x302d-0x302e  |
  * | DPC Thread                   |       0x401c       |		|
  * | Async Events                 |       0x505d       | 0x502b-0x502f  |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index f79844c..ce42288 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1715,13 +1715,24 @@
 				res = DID_ERROR << 16;
 				break;
 			}
-		} else {
+		} else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
+			    lscsi_status != SAM_STAT_BUSY) {
+			/*
+			 * scsi status of task set and busy are considered to be
+			 * task not completed.
+			 */
+
 			ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
 			    "Dropped frame(s) detected (0x%x "
-			    "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
+			    "of 0x%x bytes).\n", resid,
+			    scsi_bufflen(cp));
 
 			res = DID_ERROR << 16 | lscsi_status;
 			goto check_scsi_status;
+		} else {
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
+			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+			    scsi_status, lscsi_status);
 		}
 
 		res = DID_OK << 16 | lscsi_status;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index f052853..de722a9 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3125,6 +3125,7 @@
 		ql_log(ql_log_info, vha, 0x00b7,
 		    "HW State: COLD/RE-INIT.\n");
 		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+		qla82xx_set_rst_ready(ha);
 		if (ql2xmdenable) {
 			if (qla82xx_md_collect(vha))
 				ql_log(ql_log_warn, vha, 0xb02c,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a2f9992..7db8033 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3577,9 +3577,25 @@
 						continue;
 					/* Attempt a retry. */
 					status = 1;
-				} else
+				} else {
 					status = qla2x00_fabric_login(vha,
 					    fcport, &next_loopid);
+					if (status ==  QLA_SUCCESS) {
+						int status2;
+						uint8_t opts;
+
+						opts = 0;
+						if (fcport->flags &
+						    FCF_FCP2_DEVICE)
+							opts |= BIT_1;
+							status2 =
+							    qla2x00_get_port_database(
+								vha, fcport,
+								opts);
+						if (status2 != QLA_SUCCESS)
+							status = 1;
+					}
+				}
 			} else
 				status = qla2x00_local_device_login(vha,
 								fcport);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3c13c0a..a683e76 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1017,6 +1017,9 @@
 	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
 		return;
 
+	if (ha->flags.isp82xx_reset_hdlr_active)
+		return;
+
 	ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
 	    ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
 	if (hdr.version == __constant_cpu_to_le16(0xffff))
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 29d780c..f5fdb16 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.07.13-k"
+#define QLA2XXX_VERSION      "8.04.00.03-k"
 
 #define QLA_DRIVER_MAJOR_VER	8
-#define QLA_DRIVER_MINOR_VER	3
-#define QLA_DRIVER_PATCH_VER	7
+#define QLA_DRIVER_MINOR_VER	4
+#define QLA_DRIVER_PATCH_VER	0
 #define QLA_DRIVER_BETA_VER	3
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 07322ec..61c82a3 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,6 +90,12 @@
 EXPORT_SYMBOL(scsi_logging_level);
 #endif
 
+#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
+/* sd and scsi_pm need to coordinate flushing async actions */
+LIST_HEAD(scsi_sd_probe_domain);
+EXPORT_SYMBOL(scsi_sd_probe_domain);
+#endif
+
 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
  * You may not alter any existing entry (although adding new ones is
  * encouraged once assigned by ANSI/INCITS T10
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ead6405..62ddfd3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1638,7 +1638,7 @@
 					 request_fn_proc *request_fn)
 {
 	struct request_queue *q;
-	struct device *dev = shost->shost_gendev.parent;
+	struct device *dev = shost->dma_dev;
 
 	q = blk_init_queue(request_fn, NULL);
 	if (!q)
@@ -2348,10 +2348,14 @@
  *
  *	Must be called with user context, may sleep.
  */
-void
-scsi_device_resume(struct scsi_device *sdev)
+void scsi_device_resume(struct scsi_device *sdev)
 {
-	if(scsi_device_set_state(sdev, SDEV_RUNNING))
+	/* check if the device state was mutated prior to resume, and if
+	 * so assume the state is being managed elsewhere (for example
+	 * device deleted during suspend)
+	 */
+	if (sdev->sdev_state != SDEV_QUIESCE ||
+	    scsi_device_set_state(sdev, SDEV_RUNNING))
 		return;
 	scsi_run_queue(sdev->request_queue);
 }
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index c467064..f661a41 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -97,7 +97,7 @@
 {
 	if (scsi_is_sdev_device(dev)) {
 		/* sd probing uses async_schedule.  Wait until it finishes. */
-		async_synchronize_full();
+		async_synchronize_full_domain(&scsi_sd_probe_domain);
 
 	} else if (scsi_is_host_device(dev)) {
 		/* Wait until async scanning is finished */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index be4fa6d..07ce3f5 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -163,6 +163,8 @@
 static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
 #endif /* CONFIG_PM_RUNTIME */
 
+extern struct list_head scsi_sd_probe_domain;
+
 /* 
  * internal scsi timeout functions: for use by mid-layer and transport
  * classes.
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 80fbe2a..5797604 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2808,17 +2808,20 @@
 						  FC_RPORT_DEVLOSS_PENDING |
 						  FC_RPORT_DEVLOSS_CALLBK_DONE);
 
+				spin_unlock_irqrestore(shost->host_lock, flags);
+
 				/* if target, initiate a scan */
 				if (rport->scsi_target_id != -1) {
+					scsi_target_unblock(&rport->dev);
+
+					spin_lock_irqsave(shost->host_lock,
+							  flags);
 					rport->flags |= FC_RPORT_SCAN_PENDING;
 					scsi_queue_work(shost,
 							&rport->scan_work);
 					spin_unlock_irqrestore(shost->host_lock,
 							flags);
-					scsi_target_unblock(&rport->dev);
-				} else
-					spin_unlock_irqrestore(shost->host_lock,
-							flags);
+				}
 
 				fc_bsg_goose_queue(rport);
 
@@ -2876,16 +2879,17 @@
 			if (fci->f->dd_fcrport_size)
 				memset(rport->dd_data, 0,
 						fci->f->dd_fcrport_size);
+			spin_unlock_irqrestore(shost->host_lock, flags);
 
-			if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
+			if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
+				scsi_target_unblock(&rport->dev);
+
 				/* initiate a scan of the target */
+				spin_lock_irqsave(shost->host_lock, flags);
 				rport->flags |= FC_RPORT_SCAN_PENDING;
 				scsi_queue_work(shost, &rport->scan_work);
 				spin_unlock_irqrestore(shost->host_lock, flags);
-				scsi_target_unblock(&rport->dev);
-			} else
-				spin_unlock_irqrestore(shost->host_lock, flags);
-
+			}
 			return rport;
 		}
 	}
@@ -3083,12 +3087,12 @@
 		/* ensure any stgt delete functions are done */
 		fc_flush_work(shost);
 
+		scsi_target_unblock(&rport->dev);
 		/* initiate a scan of the target */
 		spin_lock_irqsave(shost->host_lock, flags);
 		rport->flags |= FC_RPORT_SCAN_PENDING;
 		scsi_queue_work(shost, &rport->scan_work);
 		spin_unlock_irqrestore(shost->host_lock, flags);
-		scsi_target_unblock(&rport->dev);
 	}
 }
 EXPORT_SYMBOL(fc_remote_port_rolechg);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index a2715c3..cf08071 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1010,10 +1010,10 @@
 	u8 *buffer;
 	const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
 
-	if (unlikely(scsi_device_get(sdev)))
+	if (unlikely(spi_dv_in_progress(starget)))
 		return;
 
-	if (unlikely(spi_dv_in_progress(starget)))
+	if (unlikely(scsi_device_get(sdev)))
 		return;
 	spi_dv_in_progress(starget) = 1;
 
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5ba5c2a..6f0a4c6 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -65,6 +65,7 @@
 #include <scsi/scsicam.h>
 
 #include "sd.h"
+#include "scsi_priv.h"
 #include "scsi_logging.h"
 
 MODULE_AUTHOR("Eric Youngdale");
@@ -2722,7 +2723,7 @@
 	dev_set_drvdata(dev, sdkp);
 
 	get_device(&sdkp->dev);	/* prevent release before async_schedule */
-	async_schedule(sd_probe_async, sdkp);
+	async_schedule_domain(sd_probe_async, sdkp, &scsi_sd_probe_domain);
 
 	return 0;
 
@@ -2756,7 +2757,7 @@
 	sdkp = dev_get_drvdata(dev);
 	scsi_autopm_get_device(sdkp->device);
 
-	async_synchronize_full();
+	async_synchronize_full_domain(&scsi_sd_probe_domain);
 	blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
 	blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
 	device_del(&sdkp->dev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index eacd46b..9c5c5f2 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -104,7 +104,7 @@
 static int sg_add(struct device *, struct class_interface *);
 static void sg_remove(struct device *, struct class_interface *);
 
-static DEFINE_MUTEX(sg_mutex);
+static DEFINE_SPINLOCK(sg_open_exclusive_lock);
 
 static DEFINE_IDR(sg_index_idr);
 static DEFINE_RWLOCK(sg_index_lock);	/* Also used to lock
@@ -137,13 +137,15 @@
 	char res_used;		/* 1 -> using reserve buffer, 0 -> not ... */
 	char orphan;		/* 1 -> drop on sight, 0 -> normal */
 	char sg_io_owned;	/* 1 -> packet belongs to SG_IO */
-	volatile char done;	/* 0->before bh, 1->before read, 2->read */
+	/* done protected by rq_list_lock */
+	char done;		/* 0->before bh, 1->before read, 2->read */
 	struct request *rq;
 	struct bio *bio;
 	struct execute_work ew;
 } Sg_request;
 
 typedef struct sg_fd {		/* holds the state of a file descriptor */
+	/* sfd_siblings is protected by sg_index_lock */
 	struct list_head sfd_siblings;
 	struct sg_device *parentdp;	/* owning device */
 	wait_queue_head_t read_wait;	/* queue read until command done */
@@ -157,7 +159,6 @@
 	Sg_request req_arr[SG_MAX_QUEUE];	/* used as singly-linked list */
 	char low_dma;		/* as in parent but possibly overridden to 1 */
 	char force_packid;	/* 1 -> pack_id input to read(), 0 -> ignored */
-	volatile char closed;	/* 1 -> fd closed but request(s) outstanding */
 	char cmd_q;		/* 1 -> allow command queuing, 0 -> don't */
 	char next_cmd_len;	/* 0 -> automatic (def), >0 -> use on next write() */
 	char keep_orphan;	/* 0 -> drop orphan (def), 1 -> keep for read() */
@@ -171,9 +172,11 @@
 	wait_queue_head_t o_excl_wait;	/* queue open() when O_EXCL in use */
 	int sg_tablesize;	/* adapter's max scatter-gather table size */
 	u32 index;		/* device index number */
+	/* sfds is protected by sg_index_lock */
 	struct list_head sfds;
 	volatile char detached;	/* 0->attached, 1->detached pending removal */
-	volatile char exclude;	/* opened for exclusive access */
+	/* exclude protected by sg_open_exclusive_lock */
+	char exclude;		/* opened for exclusive access */
 	char sgdebug;		/* 0->off, 1->sense, 9->dump dev, 10-> all devs */
 	struct gendisk *disk;
 	struct cdev * cdev;	/* char_dev [sysfs: /sys/cdev/major/sg<n>] */
@@ -221,6 +224,38 @@
 	return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
 }
 
+static int get_exclude(Sg_device *sdp)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+	ret = sdp->exclude;
+	spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+	return ret;
+}
+
+static int set_exclude(Sg_device *sdp, char val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+	sdp->exclude = val;
+	spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+	return val;
+}
+
+static int sfds_list_empty(Sg_device *sdp)
+{
+	unsigned long flags;
+	int ret;
+
+	read_lock_irqsave(&sg_index_lock, flags);
+	ret = list_empty(&sdp->sfds);
+	read_unlock_irqrestore(&sg_index_lock, flags);
+	return ret;
+}
+
 static int
 sg_open(struct inode *inode, struct file *filp)
 {
@@ -232,7 +267,6 @@
 	int res;
 	int retval;
 
-	mutex_lock(&sg_mutex);
 	nonseekable_open(inode, filp);
 	SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
 	sdp = sg_get_dev(dev);
@@ -264,25 +298,22 @@
 			retval = -EPERM; /* Can't lock it with read only access */
 			goto error_out;
 		}
-		if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) {
+		if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
 			retval = -EBUSY;
 			goto error_out;
 		}
-		res = 0;
-		__wait_event_interruptible(sdp->o_excl_wait,
-					   ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
+		res = wait_event_interruptible(sdp->o_excl_wait,
+					   ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
 		if (res) {
 			retval = res;	/* -ERESTARTSYS because signal hit process */
 			goto error_out;
 		}
-	} else if (sdp->exclude) {	/* some other fd has an exclusive lock on dev */
+	} else if (get_exclude(sdp)) {	/* some other fd has an exclusive lock on dev */
 		if (flags & O_NONBLOCK) {
 			retval = -EBUSY;
 			goto error_out;
 		}
-		res = 0;
-		__wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
-					   res);
+		res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
 		if (res) {
 			retval = res;	/* -ERESTARTSYS because signal hit process */
 			goto error_out;
@@ -292,7 +323,7 @@
 		retval = -ENODEV;
 		goto error_out;
 	}
-	if (list_empty(&sdp->sfds)) {	/* no existing opens on this device */
+	if (sfds_list_empty(sdp)) {	/* no existing opens on this device */
 		sdp->sgdebug = 0;
 		q = sdp->device->request_queue;
 		sdp->sg_tablesize = queue_max_segments(q);
@@ -301,7 +332,7 @@
 		filp->private_data = sfp;
 	else {
 		if (flags & O_EXCL) {
-			sdp->exclude = 0;	/* undo if error */
+			set_exclude(sdp, 0);	/* undo if error */
 			wake_up_interruptible(&sdp->o_excl_wait);
 		}
 		retval = -ENOMEM;
@@ -317,7 +348,6 @@
 sg_put:
 	if (sdp)
 		sg_put_dev(sdp);
-	mutex_unlock(&sg_mutex);
 	return retval;
 }
 
@@ -332,9 +362,7 @@
 		return -ENXIO;
 	SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
 
-	sfp->closed = 1;
-
-	sdp->exclude = 0;
+	set_exclude(sdp, 0);
 	wake_up_interruptible(&sdp->o_excl_wait);
 
 	scsi_autopm_put_device(sdp->device);
@@ -398,19 +426,14 @@
 			retval = -EAGAIN;
 			goto free_old_hdr;
 		}
-		while (1) {
-			retval = 0; /* following macro beats race condition */
-			__wait_event_interruptible(sfp->read_wait,
-				(sdp->detached ||
-				(srp = sg_get_rq_mark(sfp, req_pack_id))), 
-				retval);
-			if (sdp->detached) {
-				retval = -ENODEV;
-				goto free_old_hdr;
-			}
-			if (0 == retval)
-				break;
-
+		retval = wait_event_interruptible(sfp->read_wait,
+			(sdp->detached ||
+			(srp = sg_get_rq_mark(sfp, req_pack_id))));
+		if (sdp->detached) {
+			retval = -ENODEV;
+			goto free_old_hdr;
+		}
+		if (retval) {
 			/* -ERESTARTSYS as signal hit process */
 			goto free_old_hdr;
 		}
@@ -771,7 +794,18 @@
 	return 0;
 }
 
-static int
+static int srp_done(Sg_fd *sfp, Sg_request *srp)
+{
+	unsigned long flags;
+	int ret;
+
+	read_lock_irqsave(&sfp->rq_list_lock, flags);
+	ret = srp->done;
+	read_unlock_irqrestore(&sfp->rq_list_lock, flags);
+	return ret;
+}
+
+static long
 sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 {
 	void __user *p = (void __user *)arg;
@@ -791,40 +825,30 @@
 
 	switch (cmd_in) {
 	case SG_IO:
-		{
-			int blocking = 1;	/* ignore O_NONBLOCK flag */
-
-			if (sdp->detached)
-				return -ENODEV;
-			if (!scsi_block_when_processing_errors(sdp->device))
-				return -ENXIO;
-			if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
-				return -EFAULT;
-			result =
-			    sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
-					 blocking, read_only, 1, &srp);
-			if (result < 0)
-				return result;
-			while (1) {
-				result = 0;	/* following macro to beat race condition */
-				__wait_event_interruptible(sfp->read_wait,
-					(srp->done || sdp->detached),
-					result);
-				if (sdp->detached)
-					return -ENODEV;
-				write_lock_irq(&sfp->rq_list_lock);
-				if (srp->done) {
-					srp->done = 2;
-					write_unlock_irq(&sfp->rq_list_lock);
-					break;
-				}
-				srp->orphan = 1;
-				write_unlock_irq(&sfp->rq_list_lock);
-				return result;	/* -ERESTARTSYS because signal hit process */
-			}
+		if (sdp->detached)
+			return -ENODEV;
+		if (!scsi_block_when_processing_errors(sdp->device))
+			return -ENXIO;
+		if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
+			return -EFAULT;
+		result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
+				 1, read_only, 1, &srp);
+		if (result < 0)
+			return result;
+		result = wait_event_interruptible(sfp->read_wait,
+			(srp_done(sfp, srp) || sdp->detached));
+		if (sdp->detached)
+			return -ENODEV;
+		write_lock_irq(&sfp->rq_list_lock);
+		if (srp->done) {
+			srp->done = 2;
+			write_unlock_irq(&sfp->rq_list_lock);
 			result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
 			return (result < 0) ? result : 0;
 		}
+		srp->orphan = 1;
+		write_unlock_irq(&sfp->rq_list_lock);
+		return result;	/* -ERESTARTSYS because signal hit process */
 	case SG_SET_TIMEOUT:
 		result = get_user(val, ip);
 		if (result)
@@ -1091,18 +1115,6 @@
 	}
 }
 
-static long
-sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
-{
-	int ret;
-
-	mutex_lock(&sg_mutex);
-	ret = sg_ioctl(filp, cmd_in, arg);
-	mutex_unlock(&sg_mutex);
-
-	return ret;
-}
-
 #ifdef CONFIG_COMPAT
 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 {
@@ -1136,8 +1148,11 @@
 	int count = 0;
 	unsigned long iflags;
 
-	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
-	    || sfp->closed)
+	sfp = filp->private_data;
+	if (!sfp)
+		return POLLERR;
+	sdp = sfp->parentdp;
+	if (!sdp)
 		return POLLERR;
 	poll_wait(filp, &sfp->read_wait, wait);
 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
@@ -1347,7 +1362,7 @@
 	.read = sg_read,
 	.write = sg_write,
 	.poll = sg_poll,
-	.unlocked_ioctl = sg_unlocked_ioctl,
+	.unlocked_ioctl = sg_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = sg_compat_ioctl,
 #endif
@@ -2312,7 +2327,7 @@
 	const struct file_operations * fops;
 };
 
-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
 	{"allow_dio", &adio_fops},
 	{"debug", &debug_fops},
 	{"def_reserved_size", &dressz_fops},
@@ -2332,7 +2347,7 @@
 	if (!sg_proc_sgp)
 		return 1;
 	for (k = 0; k < num_leaves; ++k) {
-		struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
+		const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
 		umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
 		proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
 	}
@@ -2533,9 +2548,9 @@
 			   fp->reserve.bufflen,
 			   (int) fp->reserve.k_use_sg,
 			   (int) fp->low_dma);
-		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
+		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
 			   (int) fp->cmd_q, (int) fp->force_packid,
-			   (int) fp->keep_orphan, (int) fp->closed);
+			   (int) fp->keep_orphan);
 		for (m = 0, srp = fp->headrp;
 				srp != NULL;
 				++m, srp = srp->nextrp) {
@@ -2612,7 +2627,7 @@
 			     scsidp->lun,
 			     scsidp->host->hostt->emulated);
 		seq_printf(s, " sg_tablesize=%d excl=%d\n",
-			   sdp->sg_tablesize, sdp->exclude);
+			   sdp->sg_tablesize, get_exclude(sdp));
 		sg_proc_debug_helper(s, sdp);
 	}
 	read_unlock_irqrestore(&sg_index_lock, iflags);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index ea35632..b548923 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -35,8 +35,8 @@
 /* The tape buffer descriptor. */
 struct st_buffer {
 	unsigned char dma;	/* DMA-able buffer */
-	unsigned char do_dio;   /* direct i/o set up? */
 	unsigned char cleared;  /* internal buffer cleared after open? */
+	unsigned short do_dio;  /* direct i/o set up? */
 	int buffer_size;
 	int buffer_blocks;
 	int buffer_bytes;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 83a1972..528d52b 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -785,12 +785,22 @@
 	/*
 	 * If there is an error; offline the device since all
 	 * error recovery strategies would have already been
-	 * deployed on the host side.
+	 * deployed on the host side. However, if the command
+	 * were a pass-through command deal with it appropriately.
 	 */
-	if (vm_srb->srb_status == SRB_STATUS_ERROR)
-		scmnd->result = DID_TARGET_FAILURE << 16;
-	else
-		scmnd->result = vm_srb->scsi_status;
+	scmnd->result = vm_srb->scsi_status;
+
+	if (vm_srb->srb_status == SRB_STATUS_ERROR) {
+		switch (scmnd->cmnd[0]) {
+		case ATA_16:
+		case ATA_12:
+			set_host_byte(scmnd, DID_PASSTHROUGH);
+			break;
+		default:
+			set_host_byte(scmnd, DID_TARGET_FAILURE);
+		}
+	}
+
 
 	/*
 	 * If the LUN is invalid; remove the device.
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 52b96e8..4e010b7 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1032,11 +1032,11 @@
 		return -EIO;
 
 	/* Configure UTRL and UTMRL base address registers */
-	writel(hba->utrdl_dma_addr,
-	       (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
 	writel(lower_32_bits(hba->utrdl_dma_addr),
+	       (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
+	writel(upper_32_bits(hba->utrdl_dma_addr),
 	       (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
-	writel(hba->utmrdl_dma_addr,
+	writel(lower_32_bits(hba->utmrdl_dma_addr),
 	       (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
 	writel(upper_32_bits(hba->utmrdl_dma_addr),
 	       (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
@@ -1160,7 +1160,7 @@
 		task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
 		task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
 
-		if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL ||
+		if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
 		    task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
 			task_result = FAILED;
 	} else {
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index efccd72..1b38431 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -175,7 +175,8 @@
 
 	if (cmd->comp)
 		complete_all(cmd->comp);
-	mempool_free(cmd, virtscsi_cmd_pool);
+	else
+		mempool_free(cmd, virtscsi_cmd_pool);
 }
 
 static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -311,21 +312,22 @@
 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
 {
 	DECLARE_COMPLETION_ONSTACK(comp);
-	int ret;
+	int ret = FAILED;
 
 	cmd->comp = &comp;
-	ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
-			       sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
-			       GFP_NOIO);
-	if (ret < 0)
-		return FAILED;
+	if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
+			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
+			      GFP_NOIO) < 0)
+		goto out;
 
 	wait_for_completion(&comp);
-	if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK &&
-	    cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
-		return FAILED;
+	if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
+	    cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
+		ret = SUCCESS;
 
-	return SUCCESS;
+out:
+	mempool_free(cmd, virtscsi_cmd_pool);
+	return ret;
 }
 
 static int virtscsi_device_reset(struct scsi_cmnd *sc)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3ed7483..00c0240 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -74,7 +74,7 @@
 	  This selects a driver for the Atmel SPI Controller, present on
 	  many AT32 (AVR32) and AT91 (ARM) chips.
 
-config SPI_BFIN
+config SPI_BFIN5XX
 	tristate "SPI controller driver for ADI Blackfin5xx"
 	depends on BLACKFIN
 	help
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index a1d48e0..9d75d21 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,7 +15,7 @@
 obj-$(CONFIG_SPI_ATH79)			+= spi-ath79.o
 obj-$(CONFIG_SPI_AU1550)		+= spi-au1550.o
 obj-$(CONFIG_SPI_BCM63XX)		+= spi-bcm63xx.o
-obj-$(CONFIG_SPI_BFIN)			+= spi-bfin5xx.o
+obj-$(CONFIG_SPI_BFIN5XX)		+= spi-bfin5xx.o
 obj-$(CONFIG_SPI_BFIN_SPORT)		+= spi-bfin-sport.o
 obj-$(CONFIG_SPI_BITBANG)		+= spi-bitbang.o
 obj-$(CONFIG_SPI_BUTTERFLY)		+= spi-butterfly.o
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index f01b264..7491971 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -1,7 +1,7 @@
 /*
  * Broadcom BCM63xx SPI controller support
  *
- * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2009-2012 Florian Fainelli <florian@openwrt.org>
  * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
  *
  * This program is free software; you can redistribute it and/or
@@ -30,6 +30,8 @@
 #include <linux/spi/spi.h>
 #include <linux/completion.h>
 #include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
 
 #include <bcm63xx_dev_spi.h>
 
@@ -37,8 +39,6 @@
 #define DRV_VER		"0.1.2"
 
 struct bcm63xx_spi {
-	spinlock_t		lock;
-	int			stopping;
 	struct completion	done;
 
 	void __iomem		*regs;
@@ -96,17 +96,12 @@
 	{   391000, SPI_CLK_0_391MHZ }
 };
 
-static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
-				      struct spi_transfer *t)
+static int bcm63xx_spi_check_transfer(struct spi_device *spi,
+					struct spi_transfer *t)
 {
-	struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
 	u8 bits_per_word;
-	u8 clk_cfg, reg;
-	u32 hz;
-	int i;
 
 	bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
-	hz = (t) ? t->speed_hz : spi->max_speed_hz;
 	if (bits_per_word != 8) {
 		dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
 			__func__, bits_per_word);
@@ -119,6 +114,19 @@
 		return -EINVAL;
 	}
 
+	return 0;
+}
+
+static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
+				      struct spi_transfer *t)
+{
+	struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+	u32 hz;
+	u8 clk_cfg, reg;
+	int i;
+
+	hz = (t) ? t->speed_hz : spi->max_speed_hz;
+
 	/* Find the closest clock configuration */
 	for (i = 0; i < SPI_CLK_MASK; i++) {
 		if (hz <= bcm63xx_spi_freq_table[i][0]) {
@@ -139,8 +147,6 @@
 	bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
 	dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
 		clk_cfg, hz);
-
-	return 0;
 }
 
 /* the spi->mode bits understood by this driver: */
@@ -153,9 +159,6 @@
 
 	bs = spi_master_get_devdata(spi->master);
 
-	if (bs->stopping)
-		return -ESHUTDOWN;
-
 	if (!spi->bits_per_word)
 		spi->bits_per_word = 8;
 
@@ -165,7 +168,7 @@
 		return -EINVAL;
 	}
 
-	ret = bcm63xx_spi_setup_transfer(spi, NULL);
+	ret = bcm63xx_spi_check_transfer(spi, NULL);
 	if (ret < 0) {
 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
 			spi->mode & ~MODEBITS);
@@ -190,28 +193,29 @@
 	bs->remaining_bytes -= size;
 }
 
-static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
+					struct spi_transfer *t)
 {
 	struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
 	u16 msg_ctl;
 	u16 cmd;
 
+	/* Disable the CMD_DONE interrupt */
+	bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
 	dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
 		t->tx_buf, t->rx_buf, t->len);
 
 	/* Transmitter is inhibited */
 	bs->tx_ptr = t->tx_buf;
 	bs->rx_ptr = t->rx_buf;
-	init_completion(&bs->done);
 
 	if (t->tx_buf) {
 		bs->remaining_bytes = t->len;
 		bcm63xx_spi_fill_tx_fifo(bs);
 	}
 
-	/* Enable the command done interrupt which
-	 * we use to determine completion of a command */
-	bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
+	init_completion(&bs->done);
 
 	/* Fill in the Message control register */
 	msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
@@ -230,33 +234,76 @@
 	cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
 	cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
 	bcm_spi_writew(bs, cmd, SPI_CMD);
-	wait_for_completion(&bs->done);
 
-	/* Disable the CMD_DONE interrupt */
-	bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+	/* Enable the CMD_DONE interrupt */
+	bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
 
 	return t->len - bs->remaining_bytes;
 }
 
-static int bcm63xx_transfer(struct spi_device *spi, struct spi_message *m)
+static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
 {
-	struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+	struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+	pm_runtime_get_sync(&bs->pdev->dev);
+
+	return 0;
+}
+
+static int bcm63xx_spi_unprepare_transfer(struct spi_master *master)
+{
+	struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+	pm_runtime_put(&bs->pdev->dev);
+
+	return 0;
+}
+
+static int bcm63xx_spi_transfer_one(struct spi_master *master,
+					struct spi_message *m)
+{
+	struct bcm63xx_spi *bs = spi_master_get_devdata(master);
 	struct spi_transfer *t;
-	int ret = 0;
-
-	if (unlikely(list_empty(&m->transfers)))
-		return -EINVAL;
-
-	if (bs->stopping)
-		return -ESHUTDOWN;
+	struct spi_device *spi = m->spi;
+	int status = 0;
+	unsigned int timeout = 0;
 
 	list_for_each_entry(t, &m->transfers, transfer_list) {
-		ret += bcm63xx_txrx_bufs(spi, t);
+		unsigned int len = t->len;
+		u8 rx_tail;
+
+		status = bcm63xx_spi_check_transfer(spi, t);
+		if (status < 0)
+			goto exit;
+
+		/* configure adapter for a new transfer */
+		bcm63xx_spi_setup_transfer(spi, t);
+
+		while (len) {
+			/* send the data */
+			len -= bcm63xx_txrx_bufs(spi, t);
+
+			timeout = wait_for_completion_timeout(&bs->done, HZ);
+			if (!timeout) {
+				status = -ETIMEDOUT;
+				goto exit;
+			}
+
+			/* read out all data */
+			rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
+
+			/* Read out all the data */
+			if (rx_tail)
+				memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
+		}
+
+		m->actual_length += t->len;
 	}
+exit:
+	m->status = status;
+	spi_finalize_current_message(master);
 
-	m->complete(m->context);
-
-	return ret;
+	return 0;
 }
 
 /* This driver supports single master mode only. Hence
@@ -267,39 +314,15 @@
 	struct spi_master *master = (struct spi_master *)dev_id;
 	struct bcm63xx_spi *bs = spi_master_get_devdata(master);
 	u8 intr;
-	u16 cmd;
 
 	/* Read interupts and clear them immediately */
 	intr = bcm_spi_readb(bs, SPI_INT_STATUS);
 	bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
 	bcm_spi_writeb(bs, 0, SPI_INT_MASK);
 
-	/* A tansfer completed */
-	if (intr & SPI_INTR_CMD_DONE) {
-		u8 rx_tail;
-
-		rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
-
-		/* Read out all the data */
-		if (rx_tail)
-			memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
-
-		/* See if there is more data to send */
-		if (bs->remaining_bytes > 0) {
-			bcm63xx_spi_fill_tx_fifo(bs);
-
-			/* Start the transfer */
-			bcm_spi_writew(bs, SPI_HD_W << SPI_MSG_TYPE_SHIFT,
-				       SPI_MSG_CTL);
-			cmd = bcm_spi_readw(bs, SPI_CMD);
-			cmd |= SPI_CMD_START_IMMEDIATE;
-			cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
-			bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
-			bcm_spi_writew(bs, cmd, SPI_CMD);
-		} else {
-			complete(&bs->done);
-		}
-	}
+	/* A transfer completed */
+	if (intr & SPI_INTR_CMD_DONE)
+		complete(&bs->done);
 
 	return IRQ_HANDLED;
 }
@@ -345,7 +368,6 @@
 	}
 
 	bs = spi_master_get_devdata(master);
-	init_completion(&bs->done);
 
 	platform_set_drvdata(pdev, master);
 	bs->pdev = pdev;
@@ -379,12 +401,13 @@
 	master->bus_num = pdata->bus_num;
 	master->num_chipselect = pdata->num_chipselect;
 	master->setup = bcm63xx_spi_setup;
-	master->transfer = bcm63xx_transfer;
+	master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer;
+	master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer;
+	master->transfer_one_message = bcm63xx_spi_transfer_one;
+	master->mode_bits = MODEBITS;
 	bs->speed_hz = pdata->speed_hz;
-	bs->stopping = 0;
 	bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
 	bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
-	spin_lock_init(&bs->lock);
 
 	/* Initialize hardware */
 	clk_enable(bs->clk);
@@ -418,18 +441,16 @@
 	struct spi_master *master = platform_get_drvdata(pdev);
 	struct bcm63xx_spi *bs = spi_master_get_devdata(master);
 
+	spi_unregister_master(master);
+
 	/* reset spi block */
 	bcm_spi_writeb(bs, 0, SPI_INT_MASK);
-	spin_lock(&bs->lock);
-	bs->stopping = 1;
 
 	/* HW shutdown */
 	clk_disable(bs->clk);
 	clk_put(bs->clk);
 
-	spin_unlock(&bs->lock);
 	platform_set_drvdata(pdev, 0);
-	spi_unregister_master(master);
 
 	return 0;
 }
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 248a2cc..1fe5119 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -252,19 +252,15 @@
 bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
 {
 	struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
-	unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
 
 	bfin_sport_spi_disable(drv_data);
 	dev_dbg(drv_data->dev, "restoring spi ctl state\n");
 
 	bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
-	bfin_write(&drv_data->regs->tcr2, bits);
 	bfin_write(&drv_data->regs->tclkdiv, chip->baud);
-	bfin_write(&drv_data->regs->tfsdiv, bits);
 	SSYNC();
 
 	bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
-	bfin_write(&drv_data->regs->rcr2, bits);
 	SSYNC();
 
 	bfin_sport_spi_cs_active(chip);
@@ -420,11 +416,15 @@
 	drv_data->cs_change = transfer->cs_change;
 
 	/* Bits per word setup */
-	bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
-	if (bits_per_word == 8)
-		drv_data->ops = &bfin_sport_transfer_ops_u8;
-	else
+	bits_per_word = transfer->bits_per_word ? :
+		message->spi->bits_per_word ? : 8;
+	if (bits_per_word % 16 == 0)
 		drv_data->ops = &bfin_sport_transfer_ops_u16;
+	else
+		drv_data->ops = &bfin_sport_transfer_ops_u8;
+	bfin_write(&drv_data->regs->tcr2, bits_per_word - 1);
+	bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1);
+	bfin_write(&drv_data->regs->rcr2, bits_per_word - 1);
 
 	drv_data->state = RUNNING_STATE;
 
@@ -598,11 +598,12 @@
 			}
 			chip->cs_chg_udelay = chip_info->cs_chg_udelay;
 			chip->idle_tx_val = chip_info->idle_tx_val;
-			spi->bits_per_word = chip_info->bits_per_word;
 		}
 	}
 
-	if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
+	if (spi->bits_per_word % 8) {
+		dev_err(&spi->dev, "%d bits_per_word is not supported\n",
+				spi->bits_per_word);
 		ret = -EINVAL;
 		goto error;
 	}
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 3b83ff8..9bb4d4a 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -396,7 +396,7 @@
 		/* last read */
 		if (drv_data->rx) {
 			dev_dbg(&drv_data->pdev->dev, "last read\n");
-			if (n_bytes % 2) {
+			if (!(n_bytes % 2)) {
 				u16 *buf = (u16 *)drv_data->rx;
 				for (loop = 0; loop < n_bytes / 2; loop++)
 					*buf++ = bfin_read(&drv_data->regs->rdbr);
@@ -424,7 +424,7 @@
 	if (drv_data->rx && drv_data->tx) {
 		/* duplex */
 		dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
-		if (n_bytes % 2) {
+		if (!(n_bytes % 2)) {
 			u16 *buf = (u16 *)drv_data->rx;
 			u16 *buf2 = (u16 *)drv_data->tx;
 			for (loop = 0; loop < n_bytes / 2; loop++) {
@@ -442,7 +442,7 @@
 	} else if (drv_data->rx) {
 		/* read */
 		dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
-		if (n_bytes % 2) {
+		if (!(n_bytes % 2)) {
 			u16 *buf = (u16 *)drv_data->rx;
 			for (loop = 0; loop < n_bytes / 2; loop++) {
 				*buf++ = bfin_read(&drv_data->regs->rdbr);
@@ -458,7 +458,7 @@
 	} else if (drv_data->tx) {
 		/* write */
 		dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
-		if (n_bytes % 2) {
+		if (!(n_bytes % 2)) {
 			u16 *buf = (u16 *)drv_data->tx;
 			for (loop = 0; loop < n_bytes / 2; loop++) {
 				bfin_read(&drv_data->regs->rdbr);
@@ -587,6 +587,7 @@
 	if (message->state == DONE_STATE) {
 		dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n");
 		message->status = 0;
+		bfin_spi_flush(drv_data);
 		bfin_spi_giveback(drv_data);
 		return;
 	}
@@ -870,8 +871,10 @@
 		message->actual_length += drv_data->len_in_bytes;
 		/* Move to next transfer of this msg */
 		message->state = bfin_spi_next_transfer(drv_data);
-		if (drv_data->cs_change)
+		if (drv_data->cs_change && message->state != DONE_STATE) {
+			bfin_spi_flush(drv_data);
 			bfin_spi_cs_deactive(drv_data, chip);
+		}
 	}
 
 	/* Schedule next transfer tasklet */
@@ -1026,7 +1029,6 @@
 		chip->cs_chg_udelay = chip_info->cs_chg_udelay;
 		chip->idle_tx_val = chip_info->idle_tx_val;
 		chip->pio_interrupt = chip_info->pio_interrupt;
-		spi->bits_per_word = chip_info->bits_per_word;
 	} else {
 		/* force a default base state */
 		chip->ctl_reg &= bfin_ctl_reg;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 6db2887..e805507 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -545,13 +545,12 @@
  * in case of failure.
  */
 static struct dma_async_tx_descriptor *
-ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
+ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
 {
 	struct spi_transfer *t = espi->current_msg->state;
 	struct dma_async_tx_descriptor *txd;
 	enum dma_slave_buswidth buswidth;
 	struct dma_slave_config conf;
-	enum dma_transfer_direction slave_dirn;
 	struct scatterlist *sg;
 	struct sg_table *sgt;
 	struct dma_chan *chan;
@@ -567,14 +566,13 @@
 	memset(&conf, 0, sizeof(conf));
 	conf.direction = dir;
 
-	if (dir == DMA_FROM_DEVICE) {
+	if (dir == DMA_DEV_TO_MEM) {
 		chan = espi->dma_rx;
 		buf = t->rx_buf;
 		sgt = &espi->rx_sgt;
 
 		conf.src_addr = espi->sspdr_phys;
 		conf.src_addr_width = buswidth;
-		slave_dirn = DMA_DEV_TO_MEM;
 	} else {
 		chan = espi->dma_tx;
 		buf = t->tx_buf;
@@ -582,7 +580,6 @@
 
 		conf.dst_addr = espi->sspdr_phys;
 		conf.dst_addr_width = buswidth;
-		slave_dirn = DMA_MEM_TO_DEV;
 	}
 
 	ret = dmaengine_slave_config(chan, &conf);
@@ -633,8 +630,7 @@
 	if (!nents)
 		return ERR_PTR(-ENOMEM);
 
-	txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents,
-					slave_dirn, DMA_CTRL_ACK);
+	txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
 	if (!txd) {
 		dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
 		return ERR_PTR(-ENOMEM);
@@ -651,12 +647,12 @@
  * unmapped.
  */
 static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
-				  enum dma_data_direction dir)
+				  enum dma_transfer_direction dir)
 {
 	struct dma_chan *chan;
 	struct sg_table *sgt;
 
-	if (dir == DMA_FROM_DEVICE) {
+	if (dir == DMA_DEV_TO_MEM) {
 		chan = espi->dma_rx;
 		sgt = &espi->rx_sgt;
 	} else {
@@ -677,16 +673,16 @@
 	struct spi_message *msg = espi->current_msg;
 	struct dma_async_tx_descriptor *rxd, *txd;
 
-	rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
+	rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
 	if (IS_ERR(rxd)) {
 		dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
 		msg->status = PTR_ERR(rxd);
 		return;
 	}
 
-	txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
+	txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
 	if (IS_ERR(txd)) {
-		ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+		ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
 		dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
 		msg->status = PTR_ERR(txd);
 		return;
@@ -705,8 +701,8 @@
 
 	wait_for_completion(&espi->wait);
 
-	ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
-	ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+	ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
+	ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
 }
 
 /**
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 09c925a..400ae21 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1667,9 +1667,15 @@
 	/* cpsdvsr = 254 & scr = 255 */
 	min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
 
-	if (!((freq <= max_tclk) && (freq >= min_tclk))) {
+	if (freq > max_tclk)
+		dev_warn(&pl022->adev->dev,
+			"Max speed that can be programmed is %d Hz, you requested %d\n",
+			max_tclk, freq);
+
+	if (freq < min_tclk) {
 		dev_err(&pl022->adev->dev,
-			"controller data is incorrect: out of range frequency");
+			"Requested frequency: %d Hz is less than minimum possible %d Hz\n",
+			freq, min_tclk);
 		return -EINVAL;
 	}
 
@@ -1681,26 +1687,37 @@
 		while (scr <= SCR_MAX) {
 			tmp = spi_rate(rate, cpsdvsr, scr);
 
-			if (tmp > freq)
+			if (tmp > freq) {
+				/* we need lower freq */
 				scr++;
+				continue;
+			}
+
 			/*
-			 * If found exact value, update and break.
-			 * If found more closer value, update and continue.
+			 * If found exact value, mark found and break.
+			 * If found more closer value, update and break.
 			 */
-			else if ((tmp == freq) || (tmp > best_freq)) {
+			if (tmp > best_freq) {
 				best_freq = tmp;
 				best_cpsdvsr = cpsdvsr;
 				best_scr = scr;
 
 				if (tmp == freq)
-					break;
+					found = 1;
 			}
-			scr++;
+			/*
+			 * increased scr will give lower rates, which are not
+			 * required
+			 */
+			break;
 		}
 		cpsdvsr += 2;
 		scr = SCR_MIN;
 	}
 
+	WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
+			freq);
+
 	clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
 	clk_freq->scr = (u8) (best_scr & 0xFF);
 	dev_dbg(&pl022->adev->dev,
@@ -1823,9 +1840,12 @@
 	} else
 		chip->cs_control = chip_info->cs_control;
 
-	if (bits <= 3) {
-		/* PL022 doesn't support less than 4-bits */
+	/* Check bits per word with vendor specific range */
+	if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
 		status = -ENOTSUPP;
+		dev_err(&spi->dev, "illegal data size for this controller!\n");
+		dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
+				pl022->vendor->max_bpw);
 		goto err_config_params;
 	} else if (bits <= 8) {
 		dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
@@ -1838,20 +1858,10 @@
 		chip->read = READING_U16;
 		chip->write = WRITING_U16;
 	} else {
-		if (pl022->vendor->max_bpw >= 32) {
-			dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
-			chip->n_bytes = 4;
-			chip->read = READING_U32;
-			chip->write = WRITING_U32;
-		} else {
-			dev_err(&spi->dev,
-				"illegal data size for this controller!\n");
-			dev_err(&spi->dev,
-				"a standard pl022 can only handle "
-				"1 <= n <= 16 bit words\n");
-			status = -ENOTSUPP;
-			goto err_config_params;
-		}
+		dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
+		chip->n_bytes = 4;
+		chip->read = READING_U32;
+		chip->write = WRITING_U32;
 	}
 
 	/* Now Initialize all register settings required for this chip */
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 400df8c..d91751f 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -36,6 +36,7 @@
 #include <linux/prefetch.h>
 #include <linux/ratelimit.h>
 #include <linux/smp.h>
+#include <linux/interrupt.h>
 #include <net/dst.h>
 #ifdef CONFIG_XFRM
 #include <linux/xfrm.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 56d74dc..5877b2c 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -32,6 +32,7 @@
 #include <linux/ip.h>
 #include <linux/ratelimit.h>
 #include <linux/string.h>
+#include <linux/interrupt.h>
 #include <net/dst.h>
 #ifdef CONFIG_XFRM
 #include <linux/xfrm.h>
@@ -344,7 +345,7 @@
 	}
 	if (unlikely
 	    (skb->truesize !=
-	     sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
+	     sizeof(*skb) + skb_end_offset(skb))) {
 		/*
 		   printk("TX buffer truesize has been changed\n");
 		 */
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 9112cd8..60cba81 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -31,6 +31,7 @@
 #include <linux/etherdevice.h>
 #include <linux/phy.h>
 #include <linux/slab.h>
+#include <linux/interrupt.h>
 
 #include <net/dst.h>
 
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index 2b45d3d..04cd57f 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -383,8 +383,6 @@
 		pd->tx_pool = &f->link;
 		pd->tx_pool_count++;
 		f = 0;
-	} else {
-		kfree(f);
 	}
 	spin_unlock_bh(&pd->tx_frame_lock);
 	if (f)
diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/cluster/tcp.c
index 3af1b2c..b9721c1 100644
--- a/drivers/staging/ramster/cluster/tcp.c
+++ b/drivers/staging/ramster/cluster/tcp.c
@@ -2106,7 +2106,7 @@
 	r2net_listen_sock = sock;
 	INIT_WORK(&r2net_listen_work, r2net_accept_many);
 
-	sock->sk->sk_reuse = 1;
+	sock->sk->sk_reuse = SK_CAN_REUSE;
 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
 	if (ret < 0) {
 		printk(KERN_ERR "ramster: Error %d while binding socket at "
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 7862513..9cf29fc 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -79,10 +79,6 @@
 #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
 #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
 
-#define OMAP343X_CTRL_REGADDR(reg) \
-	OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
-
-
 /* Forward Declarations: */
 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
@@ -418,19 +414,27 @@
 
 		/* Assert RST1 i.e only the RST only for DSP megacell */
 		if (!status) {
+			/*
+			 * XXX: ioremapping  MUST be removed once ctrl
+			 * function is made available.
+			 */
+			void __iomem *ctrl = ioremap(OMAP343X_CTRL_BASE, SZ_4K);
+			if (!ctrl)
+				return -ENOMEM;
+
 			(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
 					OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
 					OMAP2_RM_RSTCTRL);
 			/* Mask address with 1K for compatibility */
 			__raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
-					OMAP343X_CTRL_REGADDR(
-					OMAP343X_CONTROL_IVA2_BOOTADDR));
+					ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR);
 			/*
 			 * Set bootmode to self loop if dsp_debug flag is true
 			 */
 			__raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
-					OMAP343X_CTRL_REGADDR(
-					OMAP343X_CONTROL_IVA2_BOOTMOD));
+					ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD);
+
+			iounmap(ctrl);
 		}
 	}
 	if (!status) {
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 70055c8..870f934 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -53,7 +53,10 @@
 	int ret = 0;
 
 	dsp_wdt.sm_wdt = NULL;
-	dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE);
+	dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K);
+	if (!dsp_wdt.reg_base)
+		return -ENOMEM;
+
 	tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
 
 	dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
@@ -99,6 +102,9 @@
 	dsp_wdt.fclk = NULL;
 	dsp_wdt.iclk = NULL;
 	dsp_wdt.sm_wdt = NULL;
+
+	if (dsp_wdt.reg_base)
+		iounmap(dsp_wdt.reg_base);
 	dsp_wdt.reg_base = NULL;
 }
 
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 3ed2c8f..7048e01 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -2,7 +2,7 @@
 	bool "Dynamic compression of swap pages and clean pagecache pages"
 	# X86 dependency is because zsmalloc uses non-portable pte/tlb
 	# functions
-	depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86
+	depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && X86
 	select ZSMALLOC
 	select CRYPTO_LZO
 	default n
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 8b1d5e6..d57d10c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -27,8 +27,10 @@
 #include <asm/unaligned.h>
 #include <scsi/scsi_device.h>
 #include <scsi/iscsi_proto.h>
+#include <scsi/scsi_tcq.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_parameters.h"
@@ -593,7 +595,7 @@
 	kfree(iscsit_global);
 }
 
-int iscsit_add_reject(
+static int iscsit_add_reject(
 	u8 reason,
 	int fail_conn,
 	unsigned char *buf,
@@ -622,7 +624,7 @@
 	}
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 
 	cmd->i_state = ISTATE_SEND_REJECT;
@@ -669,7 +671,7 @@
 
 	if (add_to_conn) {
 		spin_lock_bh(&conn->cmd_lock);
-		list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 		spin_unlock_bh(&conn->cmd_lock);
 	}
 
@@ -685,9 +687,7 @@
 
 /*
  * Map some portion of the allocated scatterlist to an iovec, suitable for
- * kernel sockets to copy data in/out. This handles both pages and slab-allocated
- * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
- * either case (see iscsit_alloc_buffs)
+ * kernel sockets to copy data in/out.
  */
 static int iscsit_map_iovec(
 	struct iscsi_cmd *cmd,
@@ -700,10 +700,9 @@
 	unsigned int page_off;
 
 	/*
-	 * We have a private mapping of the allocated pages in t_mem_sg.
-	 * At this point, we also know each contains a page.
+	 * We know each entry in t_data_sg contains a page.
 	 */
-	sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
+	sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
 	page_off = (data_offset % PAGE_SIZE);
 
 	cmd->first_data_sg = sg;
@@ -744,7 +743,7 @@
 	conn->exp_statsn = exp_statsn;
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 		spin_lock(&cmd->istate_lock);
 		if ((cmd->i_state == ISTATE_SENT_STATUS) &&
 		    (cmd->stat_sn < exp_statsn)) {
@@ -761,8 +760,7 @@
 
 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
 {
-	u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
-				cmd->se_cmd.t_data_nents;
+	u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
 
 	iov_count += ISCSI_IOV_DATA_BUFFER;
 
@@ -776,64 +774,6 @@
 	return 0;
 }
 
-static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
-{
-	struct scatterlist *sgl;
-	u32 length = cmd->se_cmd.data_length;
-	int nents = DIV_ROUND_UP(length, PAGE_SIZE);
-	int i = 0, j = 0, ret;
-	/*
-	 * If no SCSI payload is present, allocate the default iovecs used for
-	 * iSCSI PDU Header
-	 */
-	if (!length)
-		return iscsit_allocate_iovecs(cmd);
-
-	sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
-	if (!sgl)
-		return -ENOMEM;
-
-	sg_init_table(sgl, nents);
-
-	while (length) {
-		int buf_size = min_t(int, length, PAGE_SIZE);
-		struct page *page;
-
-		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-		if (!page)
-			goto page_alloc_failed;
-
-		sg_set_page(&sgl[i], page, buf_size, 0);
-
-		length -= buf_size;
-		i++;
-	}
-
-	cmd->t_mem_sg = sgl;
-	cmd->t_mem_sg_nents = nents;
-
-	/* BIDI ops not supported */
-
-	/* Tell the core about our preallocated memory */
-	transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
-	/*
-	 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
-	 * so that cmd->se_cmd.t_tasks_se_num has been set.
-	 */
-        ret = iscsit_allocate_iovecs(cmd);
-        if (ret < 0)
-		return -ENOMEM;
-
-	return 0;
-
-page_alloc_failed:
-	while (j < i)
-		__free_page(sg_page(&sgl[j++]));
-
-	kfree(sgl);
-	return -ENOMEM;
-}
-
 static int iscsit_handle_scsi_cmd(
 	struct iscsi_conn *conn,
 	unsigned char *buf)
@@ -842,6 +782,8 @@
 	int	dump_immediate_data = 0, send_check_condition = 0, payload_length;
 	struct iscsi_cmd	*cmd = NULL;
 	struct iscsi_scsi_req *hdr;
+	int iscsi_task_attr;
+	int sam_task_attr;
 
 	spin_lock_bh(&conn->sess->session_stats_lock);
 	conn->sess->cmd_pdus++;
@@ -958,15 +900,30 @@
 			 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
 			  DMA_NONE;
 
-	cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
-				(hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
+	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 	if (!cmd)
 		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
-					buf, conn);
+					 buf, conn);
 
-	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
-		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
-		hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
+	cmd->data_direction = data_direction;
+	iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
+	/*
+	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
+	 */
+	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
+	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
+		sam_task_attr = MSG_SIMPLE_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
+		sam_task_attr = MSG_ORDERED_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
+		sam_task_attr = MSG_HEAD_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
+		sam_task_attr = MSG_ACA_TAG;
+	else {
+		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
+			" MSG_SIMPLE_TAG\n", iscsi_task_attr);
+		sam_task_attr = MSG_SIMPLE_TAG;
+	}
 
 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_CMD;
 	cmd->i_state		= ISTATE_NEW_CMD;
@@ -1003,6 +960,17 @@
 	}
 
 	/*
+	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+	 */
+	transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops,
+			conn->sess->se_sess, hdr->data_length, cmd->data_direction,
+			sam_task_attr, &cmd->sense_buffer[0]);
+
+	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+		hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
+
+	/*
 	 * The CDB is going to an se_device_t.
 	 */
 	ret = transport_lookup_cmd_lun(&cmd->se_cmd,
@@ -1016,13 +984,8 @@
 		send_check_condition = 1;
 		goto attach_cmd;
 	}
-	/*
-	 * The Initiator Node has access to the LUN (the addressing method
-	 * is handled inside of iscsit_get_lun_for_cmd()).  Now it's time to
-	 * allocate 1->N transport tasks (depending on sector count and
-	 * maximum request size the physical HBA(s) can handle.
-	 */
-	transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
+
+	transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
 	if (transport_ret == -ENOMEM) {
 		return iscsit_add_reject_from_cmd(
 				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1035,9 +998,7 @@
 		 */
 		send_check_condition = 1;
 	} else {
-		cmd->data_length = cmd->se_cmd.data_length;
-
-		if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
+		if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
 			return iscsit_add_reject_from_cmd(
 				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
 				1, 1, buf, cmd);
@@ -1045,18 +1006,15 @@
 
 attach_cmd:
 	spin_lock_bh(&conn->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 	/*
 	 * Check if we need to delay processing because of ALUA
 	 * Active/NonOptimized primary access state..
 	 */
 	core_alua_check_nonop_delay(&cmd->se_cmd);
-	/*
-	 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
-	 * also call iscsit_allocate_iovecs()
-	 */
-	ret = iscsit_alloc_buffs(cmd);
+
+	ret = iscsit_allocate_iovecs(cmd);
 	if (ret < 0)
 		return iscsit_add_reject_from_cmd(
 				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1303,10 +1261,10 @@
 	se_cmd = &cmd->se_cmd;
 	iscsit_mod_dataout_timer(cmd);
 
-	if ((hdr->offset + payload_length) > cmd->data_length) {
+	if ((hdr->offset + payload_length) > cmd->se_cmd.data_length) {
 		pr_err("DataOut Offset: %u, Length %u greater than"
 			" iSCSI Command EDTL %u, protocol error.\n",
-			hdr->offset, payload_length, cmd->data_length);
+			hdr->offset, payload_length, cmd->se_cmd.data_length);
 		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
 				1, 0, buf, cmd);
 	}
@@ -1442,7 +1400,7 @@
 		return 0;
 	else if (ret == DATAOUT_SEND_R2T) {
 		iscsit_set_dataout_sequence_values(cmd);
-		iscsit_build_r2ts_for_cmd(cmd, conn, 0);
+		iscsit_build_r2ts_for_cmd(cmd, conn, false);
 	} else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
 		/*
 		 * Handle extra special case for out of order
@@ -1617,7 +1575,7 @@
 		 * Initiator is expecting a NopIN ping reply,
 		 */
 		spin_lock_bh(&conn->cmd_lock);
-		list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 		spin_unlock_bh(&conn->cmd_lock);
 
 		iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
@@ -1723,10 +1681,75 @@
 	    (hdr->refcmdsn != ISCSI_RESERVED_TAG))
 		hdr->refcmdsn = ISCSI_RESERVED_TAG;
 
-	cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
+	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 	if (!cmd)
 		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-					1, buf, conn);
+					 1, buf, conn);
+
+	cmd->data_direction = DMA_NONE;
+
+	cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
+	if (!cmd->tmr_req) {
+		pr_err("Unable to allocate memory for"
+			" Task Management command!\n");
+		return iscsit_add_reject_from_cmd(
+			ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+			1, 1, buf, cmd);
+	}
+
+	/*
+	 * TASK_REASSIGN for ERL=2 / connection stays inside of
+	 * LIO-Target $FABRIC_MOD
+	 */
+	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+
+		u8 tcm_function;
+		int ret;
+
+		transport_init_se_cmd(&cmd->se_cmd,
+				      &lio_target_fabric_configfs->tf_ops,
+				      conn->sess->se_sess, 0, DMA_NONE,
+				      MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
+
+		switch (function) {
+		case ISCSI_TM_FUNC_ABORT_TASK:
+			tcm_function = TMR_ABORT_TASK;
+			break;
+		case ISCSI_TM_FUNC_ABORT_TASK_SET:
+			tcm_function = TMR_ABORT_TASK_SET;
+			break;
+		case ISCSI_TM_FUNC_CLEAR_ACA:
+			tcm_function = TMR_CLEAR_ACA;
+			break;
+		case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+			tcm_function = TMR_CLEAR_TASK_SET;
+			break;
+		case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+			tcm_function = TMR_LUN_RESET;
+			break;
+		case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+			tcm_function = TMR_TARGET_WARM_RESET;
+			break;
+		case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+			tcm_function = TMR_TARGET_COLD_RESET;
+			break;
+		default:
+			pr_err("Unknown iSCSI TMR Function:"
+			       " 0x%02x\n", function);
+			return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+				1, 1, buf, cmd);
+		}
+
+		ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
+					 tcm_function, GFP_KERNEL);
+		if (ret < 0)
+			return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+				1, 1, buf, cmd);
+
+		cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
+	}
 
 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_TMFUNC;
 	cmd->i_state		= ISTATE_SEND_TASKMGTRSP;
@@ -1804,7 +1827,7 @@
 		se_tmr->call_transport = 1;
 attach:
 	spin_lock_bh(&conn->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 
 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -1980,7 +2003,7 @@
 	cmd->data_direction	= DMA_NONE;
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 
 	iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
@@ -2168,7 +2191,7 @@
 		logout_remove = 1;
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 
 	if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
@@ -2178,7 +2201,7 @@
 	 * Immediate commands are executed, well, immediately.
 	 * Non-Immediate Logout Commands are executed in CmdSN order.
 	 */
-	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+	if (cmd->immediate_cmd) {
 		int ret = iscsit_execute_cmd(cmd, 0);
 
 		if (ret < 0)
@@ -2336,7 +2359,7 @@
 
 	cmd->write_data_done += length;
 
-	if (cmd->write_data_done == cmd->data_length) {
+	if (cmd->write_data_done == cmd->se_cmd.data_length) {
 		spin_lock_bh(&cmd->istate_lock);
 		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
 		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -2381,7 +2404,7 @@
 	cmd->i_state = ISTATE_SEND_ASYNCMSG;
 
 	spin_lock_bh(&conn_p->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list);
 	spin_unlock_bh(&conn_p->cmd_lock);
 
 	iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
@@ -2434,10 +2457,19 @@
 	return 0;
 }
 
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+		wait_for_completion_interruptible_timeout(
+					&conn->tx_half_close_comp,
+					ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
+	}
+}
+
 static int iscsit_send_data_in(
 	struct iscsi_cmd *cmd,
-	struct iscsi_conn *conn,
-	int *eodr)
+	struct iscsi_conn *conn)
 {
 	int iov_ret = 0, set_statsn = 0;
 	u32 iov_count = 0, tx_size = 0;
@@ -2445,6 +2477,8 @@
 	struct iscsi_datain_req *dr;
 	struct iscsi_data_rsp *hdr;
 	struct kvec *iov;
+	int eodr = 0;
+	int ret;
 
 	memset(&datain, 0, sizeof(struct iscsi_datain));
 	dr = iscsit_get_datain_values(cmd, &datain);
@@ -2457,11 +2491,11 @@
 	/*
 	 * Be paranoid and double check the logic for now.
 	 */
-	if ((datain.offset + datain.length) > cmd->data_length) {
+	if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
 		pr_err("Command ITT: 0x%08x, datain.offset: %u and"
 			" datain.length: %u exceeds cmd->data_length: %u\n",
 			cmd->init_task_tag, datain.offset, datain.length,
-				cmd->data_length);
+				cmd->se_cmd.data_length);
 		return -1;
 	}
 
@@ -2577,13 +2611,26 @@
 		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
 		ntohl(hdr->offset), datain.length, conn->cid);
 
+	/* sendpage is preferred but can't insert markers */
+	if (!conn->conn_ops->IFMarker)
+		ret = iscsit_fe_sendpage_sg(cmd, conn);
+	else
+		ret = iscsit_send_tx_data(cmd, conn, 0);
+
+	iscsit_unmap_iovec(cmd);
+
+	if (ret < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		return ret;
+	}
+
 	if (dr->dr_complete) {
-		*eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
+		eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
 				2 : 1;
 		iscsit_free_datain_req(cmd, dr);
 	}
 
-	return 0;
+	return eodr;
 }
 
 static int iscsit_send_logout_response(
@@ -2715,6 +2762,7 @@
 {
 	int tx_size = ISCSI_HDR_LEN;
 	struct iscsi_nopin *hdr;
+	int ret;
 
 	hdr			= (struct iscsi_nopin *) cmd->pdu;
 	memset(hdr, 0, ISCSI_HDR_LEN);
@@ -2747,6 +2795,17 @@
 	pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
 		" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
 
+	ret = iscsit_send_tx_data(cmd, conn, 1);
+	if (ret < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		return ret;
+	}
+
+	spin_lock_bh(&cmd->istate_lock);
+	cmd->i_state = want_response ?
+		ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS;
+	spin_unlock_bh(&cmd->istate_lock);
+
 	return 0;
 }
 
@@ -2837,13 +2896,14 @@
 	return 0;
 }
 
-int iscsit_send_r2t(
+static int iscsit_send_r2t(
 	struct iscsi_cmd *cmd,
 	struct iscsi_conn *conn)
 {
 	int tx_size = 0;
 	struct iscsi_r2t *r2t;
 	struct iscsi_r2t_rsp *hdr;
+	int ret;
 
 	r2t = iscsit_get_r2t_from_list(cmd);
 	if (!r2t)
@@ -2899,19 +2959,27 @@
 	r2t->sent_r2t = 1;
 	spin_unlock_bh(&cmd->r2t_lock);
 
+	ret = iscsit_send_tx_data(cmd, conn, 1);
+	if (ret < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		return ret;
+	}
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	iscsit_start_dataout_timer(cmd, conn);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+
 	return 0;
 }
 
 /*
- *	type 0: Normal Operation.
- *	type 1: Called from Storage Transport.
- *	type 2: Called from iscsi_task_reassign_complete_write() for
- *	        connection recovery.
+ *	@recovery: If called from iscsi_task_reassign_complete_write() for
+ *		connection recovery.
  */
 int iscsit_build_r2ts_for_cmd(
 	struct iscsi_cmd *cmd,
 	struct iscsi_conn *conn,
-	int type)
+	bool recovery)
 {
 	int first_r2t = 1;
 	u32 offset = 0, xfer_len = 0;
@@ -2922,32 +2990,37 @@
 		return 0;
 	}
 
-	if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
-		if (cmd->r2t_offset < cmd->write_data_done)
-			cmd->r2t_offset = cmd->write_data_done;
+	if (conn->sess->sess_ops->DataSequenceInOrder &&
+	    !recovery)
+		cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done);
 
 	while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
 		if (conn->sess->sess_ops->DataSequenceInOrder) {
 			offset = cmd->r2t_offset;
 
-			if (first_r2t && (type == 2)) {
-				xfer_len = ((offset +
-					     (conn->sess->sess_ops->MaxBurstLength -
-					     cmd->next_burst_len) >
-					     cmd->data_length) ?
-					    (cmd->data_length - offset) :
-					    (conn->sess->sess_ops->MaxBurstLength -
-					     cmd->next_burst_len));
+			if (first_r2t && recovery) {
+				int new_data_end = offset +
+					conn->sess->sess_ops->MaxBurstLength -
+					cmd->next_burst_len;
+
+				if (new_data_end > cmd->se_cmd.data_length)
+					xfer_len = cmd->se_cmd.data_length - offset;
+				else
+					xfer_len =
+						conn->sess->sess_ops->MaxBurstLength -
+						cmd->next_burst_len;
 			} else {
-				xfer_len = ((offset +
-					     conn->sess->sess_ops->MaxBurstLength) >
-					     cmd->data_length) ?
-					     (cmd->data_length - offset) :
-					     conn->sess->sess_ops->MaxBurstLength;
+				int new_data_end = offset +
+					conn->sess->sess_ops->MaxBurstLength;
+
+				if (new_data_end > cmd->se_cmd.data_length)
+					xfer_len = cmd->se_cmd.data_length - offset;
+				else
+					xfer_len = conn->sess->sess_ops->MaxBurstLength;
 			}
 			cmd->r2t_offset += xfer_len;
 
-			if (cmd->r2t_offset == cmd->data_length)
+			if (cmd->r2t_offset == cmd->se_cmd.data_length)
 				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
 		} else {
 			struct iscsi_seq *seq;
@@ -3179,6 +3252,8 @@
 	return ret;
 }
 
+#define SENDTARGETS_BUF_LIMIT 32768U
+
 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
 {
 	char *payload = NULL;
@@ -3187,12 +3262,10 @@
 	struct iscsi_tiqn *tiqn;
 	struct iscsi_tpg_np *tpg_np;
 	int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
-	unsigned char buf[256];
+	unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
 
-	buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
-			32768 : conn->conn_ops->MaxRecvDataSegmentLength;
-
-	memset(buf, 0, 256);
+	buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
+			 SENDTARGETS_BUF_LIMIT);
 
 	payload = kzalloc(buffer_len, GFP_KERNEL);
 	if (!payload) {
@@ -3408,18 +3481,6 @@
 	return 0;
 }
 
-static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
-{
-	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
-	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
-		wait_for_completion_interruptible_timeout(
-					&conn->tx_half_close_comp,
-					ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
-	}
-}
-
-#ifdef CONFIG_SMP
-
 void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
 {
 	struct iscsi_thread_set *ts = conn->thread_set;
@@ -3433,10 +3494,6 @@
 	 * execute upon.
 	 */
 	ord = ts->thread_id % cpumask_weight(cpu_online_mask);
-#if 0
-	pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
-			" thread_id: %d\n", ord, ts->thread_id);
-#endif
 	for_each_online_cpu(cpu) {
 		if (ord-- == 0) {
 			cpumask_set_cpu(cpu, conn->conn_cpumask);
@@ -3476,34 +3533,196 @@
 	 */
 	memset(buf, 0, 128);
 	cpumask_scnprintf(buf, 128, conn->conn_cpumask);
-#if 0
-	pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
-			" %s for %s\n", buf, p->comm);
-#endif
 	set_cpus_allowed_ptr(p, conn->conn_cpumask);
 }
 
-#else
-
-void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+static int handle_immediate_queue(struct iscsi_conn *conn)
 {
-	return;
+	struct iscsi_queue_req *qr;
+	struct iscsi_cmd *cmd;
+	u8 state;
+	int ret;
+
+	while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) {
+		atomic_set(&conn->check_immediate_queue, 0);
+		cmd = qr->cmd;
+		state = qr->state;
+		kmem_cache_free(lio_qr_cache, qr);
+
+		switch (state) {
+		case ISTATE_SEND_R2T:
+			ret = iscsit_send_r2t(cmd, conn);
+			if (ret < 0)
+				goto err;
+			break;
+		case ISTATE_REMOVE:
+			if (cmd->data_direction == DMA_TO_DEVICE)
+				iscsit_stop_dataout_timer(cmd);
+
+			spin_lock_bh(&conn->cmd_lock);
+			list_del(&cmd->i_conn_node);
+			spin_unlock_bh(&conn->cmd_lock);
+
+			iscsit_free_cmd(cmd);
+			continue;
+		case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+			iscsit_mod_nopin_response_timer(conn);
+			ret = iscsit_send_unsolicited_nopin(cmd,
+							    conn, 1);
+			if (ret < 0)
+				goto err;
+			break;
+		case ISTATE_SEND_NOPIN_NO_RESPONSE:
+			ret = iscsit_send_unsolicited_nopin(cmd,
+							    conn, 0);
+			if (ret < 0)
+				goto err;
+			break;
+		default:
+			pr_err("Unknown Opcode: 0x%02x ITT:"
+			       " 0x%08x, i_state: %d on CID: %hu\n",
+			       cmd->iscsi_opcode, cmd->init_task_tag, state,
+			       conn->cid);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	return -1;
 }
 
-#define iscsit_thread_check_cpumask(X, Y, Z) ({})
-#endif /* CONFIG_SMP */
+static int handle_response_queue(struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr;
+	struct iscsi_cmd *cmd;
+	u8 state;
+	int ret;
+
+	while ((qr = iscsit_get_cmd_from_response_queue(conn))) {
+		cmd = qr->cmd;
+		state = qr->state;
+		kmem_cache_free(lio_qr_cache, qr);
+
+check_rsp_state:
+		switch (state) {
+		case ISTATE_SEND_DATAIN:
+			ret = iscsit_send_data_in(cmd, conn);
+			if (ret < 0)
+				goto err;
+			else if (!ret)
+				/* more drs */
+				goto check_rsp_state;
+			else if (ret == 1) {
+				/* all done */
+				spin_lock_bh(&cmd->istate_lock);
+				cmd->i_state = ISTATE_SENT_STATUS;
+				spin_unlock_bh(&cmd->istate_lock);
+				continue;
+			} else if (ret == 2) {
+				/* Still must send status,
+				   SCF_TRANSPORT_TASK_SENSE was set */
+				spin_lock_bh(&cmd->istate_lock);
+				cmd->i_state = ISTATE_SEND_STATUS;
+				spin_unlock_bh(&cmd->istate_lock);
+				state = ISTATE_SEND_STATUS;
+				goto check_rsp_state;
+			}
+
+			break;
+		case ISTATE_SEND_STATUS:
+		case ISTATE_SEND_STATUS_RECOVERY:
+			ret = iscsit_send_status(cmd, conn);
+			break;
+		case ISTATE_SEND_LOGOUTRSP:
+			ret = iscsit_send_logout_response(cmd, conn);
+			break;
+		case ISTATE_SEND_ASYNCMSG:
+			ret = iscsit_send_conn_drop_async_message(
+				cmd, conn);
+			break;
+		case ISTATE_SEND_NOPIN:
+			ret = iscsit_send_nopin_response(cmd, conn);
+			break;
+		case ISTATE_SEND_REJECT:
+			ret = iscsit_send_reject(cmd, conn);
+			break;
+		case ISTATE_SEND_TASKMGTRSP:
+			ret = iscsit_send_task_mgt_rsp(cmd, conn);
+			if (ret != 0)
+				break;
+			ret = iscsit_tmr_post_handler(cmd, conn);
+			if (ret != 0)
+				iscsit_fall_back_to_erl0(conn->sess);
+			break;
+		case ISTATE_SEND_TEXTRSP:
+			ret = iscsit_send_text_rsp(cmd, conn);
+			break;
+		default:
+			pr_err("Unknown Opcode: 0x%02x ITT:"
+			       " 0x%08x, i_state: %d on CID: %hu\n",
+			       cmd->iscsi_opcode, cmd->init_task_tag,
+			       state, conn->cid);
+			goto err;
+		}
+		if (ret < 0)
+			goto err;
+
+		if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
+			iscsit_tx_thread_wait_for_tcp(conn);
+			iscsit_unmap_iovec(cmd);
+			goto err;
+		}
+		iscsit_unmap_iovec(cmd);
+
+		switch (state) {
+		case ISTATE_SEND_LOGOUTRSP:
+			if (!iscsit_logout_post_handler(cmd, conn))
+				goto restart;
+			/* fall through */
+		case ISTATE_SEND_STATUS:
+		case ISTATE_SEND_ASYNCMSG:
+		case ISTATE_SEND_NOPIN:
+		case ISTATE_SEND_STATUS_RECOVERY:
+		case ISTATE_SEND_TEXTRSP:
+		case ISTATE_SEND_TASKMGTRSP:
+			spin_lock_bh(&cmd->istate_lock);
+			cmd->i_state = ISTATE_SENT_STATUS;
+			spin_unlock_bh(&cmd->istate_lock);
+			break;
+		case ISTATE_SEND_REJECT:
+			if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
+				cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
+				complete(&cmd->reject_comp);
+				goto err;
+			}
+			complete(&cmd->reject_comp);
+			break;
+		default:
+			pr_err("Unknown Opcode: 0x%02x ITT:"
+			       " 0x%08x, i_state: %d on CID: %hu\n",
+			       cmd->iscsi_opcode, cmd->init_task_tag,
+			       cmd->i_state, conn->cid);
+			goto err;
+		}
+
+		if (atomic_read(&conn->check_immediate_queue))
+			break;
+	}
+
+	return 0;
+
+err:
+	return -1;
+restart:
+	return -EAGAIN;
+}
 
 int iscsi_target_tx_thread(void *arg)
 {
-	u8 state;
-	int eodr = 0;
 	int ret = 0;
-	int sent_status = 0;
-	int use_misc = 0;
-	int map_sg = 0;
-	struct iscsi_cmd *cmd = NULL;
 	struct iscsi_conn *conn;
-	struct iscsi_queue_req *qr = NULL;
 	struct iscsi_thread_set *ts = arg;
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
@@ -3516,7 +3735,7 @@
 	if (!conn)
 		goto out;
 
-	eodr = map_sg = ret = sent_status = use_misc = 0;
+	ret = 0;
 
 	while (!kthread_should_stop()) {
 		/*
@@ -3531,251 +3750,15 @@
 		     signal_pending(current))
 			goto transport_err;
 
-get_immediate:
-		qr = iscsit_get_cmd_from_immediate_queue(conn);
-		if (qr) {
-			atomic_set(&conn->check_immediate_queue, 0);
-			cmd = qr->cmd;
-			state = qr->state;
-			kmem_cache_free(lio_qr_cache, qr);
+		ret = handle_immediate_queue(conn);
+		if (ret < 0)
+			goto transport_err;
 
-			spin_lock_bh(&cmd->istate_lock);
-			switch (state) {
-			case ISTATE_SEND_R2T:
-				spin_unlock_bh(&cmd->istate_lock);
-				ret = iscsit_send_r2t(cmd, conn);
-				break;
-			case ISTATE_REMOVE:
-				spin_unlock_bh(&cmd->istate_lock);
-
-				if (cmd->data_direction == DMA_TO_DEVICE)
-					iscsit_stop_dataout_timer(cmd);
-
-				spin_lock_bh(&conn->cmd_lock);
-				list_del(&cmd->i_list);
-				spin_unlock_bh(&conn->cmd_lock);
-
-				iscsit_free_cmd(cmd);
-				goto get_immediate;
-			case ISTATE_SEND_NOPIN_WANT_RESPONSE:
-				spin_unlock_bh(&cmd->istate_lock);
-				iscsit_mod_nopin_response_timer(conn);
-				ret = iscsit_send_unsolicited_nopin(cmd,
-						conn, 1);
-				break;
-			case ISTATE_SEND_NOPIN_NO_RESPONSE:
-				spin_unlock_bh(&cmd->istate_lock);
-				ret = iscsit_send_unsolicited_nopin(cmd,
-						conn, 0);
-				break;
-			default:
-				pr_err("Unknown Opcode: 0x%02x ITT:"
-				" 0x%08x, i_state: %d on CID: %hu\n",
-				cmd->iscsi_opcode, cmd->init_task_tag, state,
-				conn->cid);
-				spin_unlock_bh(&cmd->istate_lock);
-				goto transport_err;
-			}
-			if (ret < 0) {
-				conn->tx_immediate_queue = 0;
-				goto transport_err;
-			}
-
-			if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
-				conn->tx_immediate_queue = 0;
-				iscsit_tx_thread_wait_for_tcp(conn);
-				goto transport_err;
-			}
-
-			spin_lock_bh(&cmd->istate_lock);
-			switch (state) {
-			case ISTATE_SEND_R2T:
-				spin_unlock_bh(&cmd->istate_lock);
-				spin_lock_bh(&cmd->dataout_timeout_lock);
-				iscsit_start_dataout_timer(cmd, conn);
-				spin_unlock_bh(&cmd->dataout_timeout_lock);
-				break;
-			case ISTATE_SEND_NOPIN_WANT_RESPONSE:
-				cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
-				spin_unlock_bh(&cmd->istate_lock);
-				break;
-			case ISTATE_SEND_NOPIN_NO_RESPONSE:
-				cmd->i_state = ISTATE_SENT_STATUS;
-				spin_unlock_bh(&cmd->istate_lock);
-				break;
-			default:
-				pr_err("Unknown Opcode: 0x%02x ITT:"
-					" 0x%08x, i_state: %d on CID: %hu\n",
-					cmd->iscsi_opcode, cmd->init_task_tag,
-					state, conn->cid);
-				spin_unlock_bh(&cmd->istate_lock);
-				goto transport_err;
-			}
-			goto get_immediate;
-		} else
-			conn->tx_immediate_queue = 0;
-
-get_response:
-		qr = iscsit_get_cmd_from_response_queue(conn);
-		if (qr) {
-			cmd = qr->cmd;
-			state = qr->state;
-			kmem_cache_free(lio_qr_cache, qr);
-
-			spin_lock_bh(&cmd->istate_lock);
-check_rsp_state:
-			switch (state) {
-			case ISTATE_SEND_DATAIN:
-				spin_unlock_bh(&cmd->istate_lock);
-				ret = iscsit_send_data_in(cmd, conn,
-							  &eodr);
-				map_sg = 1;
-				break;
-			case ISTATE_SEND_STATUS:
-			case ISTATE_SEND_STATUS_RECOVERY:
-				spin_unlock_bh(&cmd->istate_lock);
-				use_misc = 1;
-				ret = iscsit_send_status(cmd, conn);
-				break;
-			case ISTATE_SEND_LOGOUTRSP:
-				spin_unlock_bh(&cmd->istate_lock);
-				use_misc = 1;
-				ret = iscsit_send_logout_response(cmd, conn);
-				break;
-			case ISTATE_SEND_ASYNCMSG:
-				spin_unlock_bh(&cmd->istate_lock);
-				use_misc = 1;
-				ret = iscsit_send_conn_drop_async_message(
-						cmd, conn);
-				break;
-			case ISTATE_SEND_NOPIN:
-				spin_unlock_bh(&cmd->istate_lock);
-				use_misc = 1;
-				ret = iscsit_send_nopin_response(cmd, conn);
-				break;
-			case ISTATE_SEND_REJECT:
-				spin_unlock_bh(&cmd->istate_lock);
-				use_misc = 1;
-				ret = iscsit_send_reject(cmd, conn);
-				break;
-			case ISTATE_SEND_TASKMGTRSP:
-				spin_unlock_bh(&cmd->istate_lock);
-				use_misc = 1;
-				ret = iscsit_send_task_mgt_rsp(cmd, conn);
-				if (ret != 0)
-					break;
-				ret = iscsit_tmr_post_handler(cmd, conn);
-				if (ret != 0)
-					iscsit_fall_back_to_erl0(conn->sess);
-				break;
-			case ISTATE_SEND_TEXTRSP:
-				spin_unlock_bh(&cmd->istate_lock);
-				use_misc = 1;
-				ret = iscsit_send_text_rsp(cmd, conn);
-				break;
-			default:
-				pr_err("Unknown Opcode: 0x%02x ITT:"
-					" 0x%08x, i_state: %d on CID: %hu\n",
-					cmd->iscsi_opcode, cmd->init_task_tag,
-					state, conn->cid);
-				spin_unlock_bh(&cmd->istate_lock);
-				goto transport_err;
-			}
-			if (ret < 0) {
-				conn->tx_response_queue = 0;
-				goto transport_err;
-			}
-
-			if (map_sg && !conn->conn_ops->IFMarker) {
-				if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
-					conn->tx_response_queue = 0;
-					iscsit_tx_thread_wait_for_tcp(conn);
-					iscsit_unmap_iovec(cmd);
-					goto transport_err;
-				}
-			} else {
-				if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
-					conn->tx_response_queue = 0;
-					iscsit_tx_thread_wait_for_tcp(conn);
-					iscsit_unmap_iovec(cmd);
-					goto transport_err;
-				}
-			}
-			map_sg = 0;
-			iscsit_unmap_iovec(cmd);
-
-			spin_lock_bh(&cmd->istate_lock);
-			switch (state) {
-			case ISTATE_SEND_DATAIN:
-				if (!eodr)
-					goto check_rsp_state;
-
-				if (eodr == 1) {
-					cmd->i_state = ISTATE_SENT_LAST_DATAIN;
-					sent_status = 1;
-					eodr = use_misc = 0;
-				} else if (eodr == 2) {
-					cmd->i_state = state =
-							ISTATE_SEND_STATUS;
-					sent_status = 0;
-					eodr = use_misc = 0;
-					goto check_rsp_state;
-				}
-				break;
-			case ISTATE_SEND_STATUS:
-				use_misc = 0;
-				sent_status = 1;
-				break;
-			case ISTATE_SEND_ASYNCMSG:
-			case ISTATE_SEND_NOPIN:
-			case ISTATE_SEND_STATUS_RECOVERY:
-			case ISTATE_SEND_TEXTRSP:
-				use_misc = 0;
-				sent_status = 1;
-				break;
-			case ISTATE_SEND_REJECT:
-				use_misc = 0;
-				if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
-					cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
-					spin_unlock_bh(&cmd->istate_lock);
-					complete(&cmd->reject_comp);
-					goto transport_err;
-				}
-				complete(&cmd->reject_comp);
-				break;
-			case ISTATE_SEND_TASKMGTRSP:
-				use_misc = 0;
-				sent_status = 1;
-				break;
-			case ISTATE_SEND_LOGOUTRSP:
-				spin_unlock_bh(&cmd->istate_lock);
-				if (!iscsit_logout_post_handler(cmd, conn))
-					goto restart;
-				spin_lock_bh(&cmd->istate_lock);
-				use_misc = 0;
-				sent_status = 1;
-				break;
-			default:
-				pr_err("Unknown Opcode: 0x%02x ITT:"
-					" 0x%08x, i_state: %d on CID: %hu\n",
-					cmd->iscsi_opcode, cmd->init_task_tag,
-					cmd->i_state, conn->cid);
-				spin_unlock_bh(&cmd->istate_lock);
-				goto transport_err;
-			}
-
-			if (sent_status) {
-				cmd->i_state = ISTATE_SENT_STATUS;
-				sent_status = 0;
-			}
-			spin_unlock_bh(&cmd->istate_lock);
-
-			if (atomic_read(&conn->check_immediate_queue))
-				goto get_immediate;
-
-			goto get_response;
-		} else
-			conn->tx_response_queue = 0;
+		ret = handle_response_queue(conn);
+		if (ret == -EAGAIN)
+			goto restart;
+		else if (ret < 0)
+			goto transport_err;
 	}
 
 transport_err:
@@ -3952,9 +3935,9 @@
 	 * has been reset -> returned sleeping pre-handler state.
 	 */
 	spin_lock_bh(&conn->cmd_lock);
-	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
 
-		list_del(&cmd->i_list);
+		list_del(&cmd->i_conn_node);
 		spin_unlock_bh(&conn->cmd_lock);
 
 		iscsit_increment_maxcmdsn(cmd, sess);
@@ -3972,7 +3955,7 @@
 	struct iscsi_cmd *cmd;
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 		if (cmd->data_direction == DMA_TO_DEVICE)
 			iscsit_stop_dataout_timer(cmd);
 	}
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 5db2dde..12abb4c 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -18,8 +18,7 @@
 extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
 extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
 extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
-extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery);
 extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
 extern int iscsi_target_tx_thread(void *);
 extern int iscsi_target_rx_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 00c58cc..69dc8e3 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1538,7 +1538,7 @@
 	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
 
 	if (!cmd->immediate_data && !cmd->unsolicited_data)
-		return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
+		return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false);
 
 	return 0;
 }
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 2aaee7e..1c70144 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -296,12 +296,11 @@
 	u32			runlength;
 	u32			data_length;
 	u32			data_offset;
-	u32			data_offset_end;
 	u32			data_sn;
 	u32			next_burst_len;
 	u32			read_data_done;
 	u32			seq_send_order;
-	struct list_head	dr_list;
+	struct list_head	cmd_datain_node;
 } ____cacheline_aligned;
 
 struct iscsi_ooo_cmdsn {
@@ -381,8 +380,6 @@
 	u32			buf_ptr_size;
 	/* Used to store DataDigest */
 	u32			data_crc;
-	/* Total size in bytes associated with command */
-	u32			data_length;
 	/* Counter for MaxOutstandingR2T */
 	u32			outstanding_r2ts;
 	/* Next R2T Offset when DataSequenceInOrder=Yes */
@@ -464,16 +461,13 @@
 	/* Session the command is part of,  used for connection recovery */
 	struct iscsi_session	*sess;
 	/* list_head for connection list */
-	struct list_head	i_list;
+	struct list_head	i_conn_node;
 	/* The TCM I/O descriptor that is accessed via container_of() */
 	struct se_cmd		se_cmd;
 	/* Sense buffer that will be mapped into outgoing status */
 #define ISCSI_SENSE_BUFFER_LEN          (TRANSPORT_SENSE_BUFFER + 2)
 	unsigned char		sense_buffer[ISCSI_SENSE_BUFFER_LEN];
 
-	struct scatterlist	*t_mem_sg;
-	u32			t_mem_sg_nents;
-
 	u32			padding;
 	u8			pad_bytes[4];
 
@@ -500,8 +494,6 @@
 	u8			network_transport;
 	enum iscsi_timer_flags_table nopin_timer_flags;
 	enum iscsi_timer_flags_table nopin_response_timer_flags;
-	u8			tx_immediate_queue;
-	u8			tx_response_queue;
 	/* Used to know what thread encountered a transport failure */
 	u8			which_thread;
 	/* connection id assigned by the Initiator */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 8c04951..848fee7 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -37,7 +37,7 @@
 				" struct iscsi_datain_req\n");
 		return NULL;
 	}
-	INIT_LIST_HEAD(&dr->dr_list);
+	INIT_LIST_HEAD(&dr->cmd_datain_node);
 
 	return dr;
 }
@@ -45,14 +45,14 @@
 void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
 {
 	spin_lock(&cmd->datain_lock);
-	list_add_tail(&dr->dr_list, &cmd->datain_list);
+	list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
 	spin_unlock(&cmd->datain_lock);
 }
 
 void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
 {
 	spin_lock(&cmd->datain_lock);
-	list_del(&dr->dr_list);
+	list_del(&dr->cmd_datain_node);
 	spin_unlock(&cmd->datain_lock);
 
 	kmem_cache_free(lio_dr_cache, dr);
@@ -63,8 +63,8 @@
 	struct iscsi_datain_req *dr, *dr_tmp;
 
 	spin_lock(&cmd->datain_lock);
-	list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
-		list_del(&dr->dr_list);
+	list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) {
+		list_del(&dr->cmd_datain_node);
 		kmem_cache_free(lio_dr_cache, dr);
 	}
 	spin_unlock(&cmd->datain_lock);
@@ -72,17 +72,14 @@
 
 struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
 {
-	struct iscsi_datain_req *dr;
-
 	if (list_empty(&cmd->datain_list)) {
 		pr_err("cmd->datain_list is empty for ITT:"
 			" 0x%08x\n", cmd->init_task_tag);
 		return NULL;
 	}
-	list_for_each_entry(dr, &cmd->datain_list, dr_list)
-		break;
 
-	return dr;
+	return list_first_entry(&cmd->datain_list, struct iscsi_datain_req,
+				cmd_datain_node);
 }
 
 /*
@@ -113,7 +110,7 @@
 	read_data_done = (!dr->recovery) ?
 			cmd->read_data_done : dr->read_data_done;
 
-	read_data_left = (cmd->data_length - read_data_done);
+	read_data_left = (cmd->se_cmd.data_length - read_data_done);
 	if (!read_data_left) {
 		pr_err("ITT: 0x%08x read_data_left is zero!\n",
 				cmd->init_task_tag);
@@ -212,7 +209,7 @@
 	seq_send_order = (!dr->recovery) ?
 			cmd->seq_send_order : dr->seq_send_order;
 
-	read_data_left = (cmd->data_length - read_data_done);
+	read_data_left = (cmd->se_cmd.data_length - read_data_done);
 	if (!read_data_left) {
 		pr_err("ITT: 0x%08x read_data_left is zero!\n",
 				cmd->init_task_tag);
@@ -231,8 +228,8 @@
 	offset = (seq->offset + seq->next_burst_len);
 
 	if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
-	     cmd->data_length) {
-		datain->length = (cmd->data_length - offset);
+	     cmd->se_cmd.data_length) {
+		datain->length = (cmd->se_cmd.data_length - offset);
 		datain->offset = offset;
 
 		datain->flags |= ISCSI_FLAG_CMD_FINAL;
@@ -264,7 +261,7 @@
 		}
 	}
 
-	if ((read_data_done + datain->length) == cmd->data_length)
+	if ((read_data_done + datain->length) == cmd->se_cmd.data_length)
 		datain->flags |= ISCSI_FLAG_DATA_STATUS;
 
 	datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
@@ -333,7 +330,7 @@
 	read_data_done = (!dr->recovery) ?
 			cmd->read_data_done : dr->read_data_done;
 
-	read_data_left = (cmd->data_length - read_data_done);
+	read_data_left = (cmd->se_cmd.data_length - read_data_done);
 	if (!read_data_left) {
 		pr_err("ITT: 0x%08x read_data_left is zero!\n",
 				cmd->init_task_tag);
@@ -344,7 +341,7 @@
 	if (!pdu)
 		return dr;
 
-	if ((read_data_done + pdu->length) == cmd->data_length) {
+	if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) {
 		pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
 		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
 			pdu->flags |= ISCSI_FLAG_DATA_ACK;
@@ -433,7 +430,7 @@
 	seq_send_order = (!dr->recovery) ?
 			cmd->seq_send_order : dr->seq_send_order;
 
-	read_data_left = (cmd->data_length - read_data_done);
+	read_data_left = (cmd->se_cmd.data_length - read_data_done);
 	if (!read_data_left) {
 		pr_err("ITT: 0x%08x read_data_left is zero!\n",
 				cmd->init_task_tag);
@@ -463,7 +460,7 @@
 	} else
 		seq->next_burst_len += pdu->length;
 
-	if ((read_data_done + pdu->length) == cmd->data_length)
+	if ((read_data_done + pdu->length) == cmd->se_cmd.data_length)
 		pdu->flags |= ISCSI_FLAG_DATA_STATUS;
 
 	pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 1ab0560..1a02016 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -48,9 +48,9 @@
 	if (cmd->unsolicited_data) {
 		cmd->seq_start_offset = cmd->write_data_done;
 		cmd->seq_end_offset = (cmd->write_data_done +
-			(cmd->data_length >
+			(cmd->se_cmd.data_length >
 			 conn->sess->sess_ops->FirstBurstLength) ?
-			conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
+			conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length);
 		return;
 	}
 
@@ -59,15 +59,15 @@
 
 	if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
 		cmd->seq_start_offset = cmd->write_data_done;
-		cmd->seq_end_offset = (cmd->data_length >
+		cmd->seq_end_offset = (cmd->se_cmd.data_length >
 			conn->sess->sess_ops->MaxBurstLength) ?
 			(cmd->write_data_done +
-			conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
+			conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length;
 	} else {
 		cmd->seq_start_offset = cmd->seq_end_offset;
 		cmd->seq_end_offset = ((cmd->seq_end_offset +
 			conn->sess->sess_ops->MaxBurstLength) >=
-			cmd->data_length) ? cmd->data_length :
+			cmd->se_cmd.data_length) ? cmd->se_cmd.data_length :
 			(cmd->seq_end_offset +
 			 conn->sess->sess_ops->MaxBurstLength);
 	}
@@ -182,13 +182,13 @@
 		if (!conn->sess->sess_ops->DataPDUInOrder)
 			goto out;
 
-		if ((first_burst_len != cmd->data_length) &&
+		if ((first_burst_len != cmd->se_cmd.data_length) &&
 		    (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
 			pr_err("Unsolicited non-immediate data"
 			" received %u does not equal FirstBurstLength: %u, and"
 			" does not equal ExpXferLen %u.\n", first_burst_len,
 				conn->sess->sess_ops->FirstBurstLength,
-				cmd->data_length);
+				cmd->se_cmd.data_length);
 			transport_send_check_condition_and_sense(&cmd->se_cmd,
 					TCM_INCORRECT_AMOUNT_OF_DATA, 0);
 			return DATAOUT_CANNOT_RECOVER;
@@ -201,10 +201,10 @@
 				conn->sess->sess_ops->FirstBurstLength);
 			return DATAOUT_CANNOT_RECOVER;
 		}
-		if (first_burst_len == cmd->data_length) {
+		if (first_burst_len == cmd->se_cmd.data_length) {
 			pr_err("Command ITT: 0x%08x reached"
 			" ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
-			" error.\n", cmd->init_task_tag, cmd->data_length);
+			" error.\n", cmd->init_task_tag, cmd->se_cmd.data_length);
 			return DATAOUT_CANNOT_RECOVER;
 		}
 	}
@@ -294,7 +294,7 @@
 			if ((next_burst_len <
 			     conn->sess->sess_ops->MaxBurstLength) &&
 			   ((cmd->write_data_done + payload_length) <
-			     cmd->data_length)) {
+			     cmd->se_cmd.data_length)) {
 				pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
 				" before end of DataOUT sequence, protocol"
 				" error.\n", cmd->init_task_tag);
@@ -319,7 +319,7 @@
 				return DATAOUT_CANNOT_RECOVER;
 			}
 			if ((cmd->write_data_done + payload_length) ==
-					cmd->data_length) {
+					cmd->se_cmd.data_length) {
 				pr_err("Command ITT: 0x%08x reached"
 				" last DataOUT PDU in sequence but ISCSI_FLAG_"
 				"CMD_FINAL is not set, protocol error.\n",
@@ -640,9 +640,12 @@
 
 	cmd->write_data_done += payload_length;
 
-	return (cmd->write_data_done == cmd->data_length) ?
-		DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
-		DATAOUT_SEND_R2T : DATAOUT_NORMAL;
+	if (cmd->write_data_done == cmd->se_cmd.data_length)
+		return DATAOUT_SEND_TO_TRANSPORT;
+	else if (send_r2t)
+		return DATAOUT_SEND_R2T;
+	else
+		return DATAOUT_NORMAL;
 }
 
 static int iscsit_dataout_post_crc_failed(
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 006f605..ecdd46d 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -279,11 +279,9 @@
 		 * seq->first_datasn and seq->last_datasn have not been set.
 		 */
 		if (!seq->sent) {
-#if 0
 			pr_err("Ignoring non-sent sequence 0x%08x ->"
 				" 0x%08x\n\n", seq->first_datasn,
 				seq->last_datasn);
-#endif
 			continue;
 		}
 
@@ -294,11 +292,10 @@
 		 */
 		if ((seq->first_datasn < begrun) &&
 				(seq->last_datasn < begrun)) {
-#if 0
 			pr_err("Pre BegRun sequence 0x%08x ->"
 				" 0x%08x\n", seq->first_datasn,
 				seq->last_datasn);
-#endif
+
 			read_data_done += cmd->seq_list[i].xfer_len;
 			seq->next_burst_len = seq->pdu_send_order = 0;
 			continue;
@@ -309,11 +306,10 @@
 		 */
 		if ((seq->first_datasn <= begrun) &&
 				(seq->last_datasn >= begrun)) {
-#if 0
 			pr_err("Found sequence begrun: 0x%08x in"
 				" 0x%08x -> 0x%08x\n", begrun,
 				seq->first_datasn, seq->last_datasn);
-#endif
+
 			seq_send_order = seq->seq_send_order;
 			data_sn = seq->first_datasn;
 			seq->next_burst_len = seq->pdu_send_order = 0;
@@ -369,10 +365,9 @@
 		 */
 		if ((seq->first_datasn > begrun) ||
 				(seq->last_datasn > begrun)) {
-#if 0
 			pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
 					seq->first_datasn, seq->last_datasn);
-#endif
+
 			seq->next_burst_len = seq->pdu_send_order = 0;
 			continue;
 		}
@@ -526,7 +521,7 @@
 		found_cmd = 0;
 
 		spin_lock_bh(&conn->cmd_lock);
-		list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+		list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 			if (cmd->stat_sn == begrun) {
 				found_cmd = 1;
 				break;
@@ -987,7 +982,7 @@
 					return 0;
 
 				iscsit_set_dataout_sequence_values(cmd);
-				iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
+				iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false);
 			}
 			return 0;
 		}
@@ -1121,8 +1116,8 @@
 	if (cmd->unsolicited_data) {
 		*offset = 0;
 		*length = (conn->sess->sess_ops->FirstBurstLength >
-			   cmd->data_length) ?
-			   cmd->data_length :
+			   cmd->se_cmd.data_length) ?
+			   cmd->se_cmd.data_length :
 			   conn->sess->sess_ops->FirstBurstLength;
 		return 0;
 	}
@@ -1193,8 +1188,8 @@
 		if (conn->sess->sess_ops->DataPDUInOrder) {
 			pdu_offset = cmd->write_data_done;
 			if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
-			     cmd->next_burst_len)) > cmd->data_length)
-				pdu_length = (cmd->data_length -
+			     cmd->next_burst_len)) > cmd->se_cmd.data_length)
+				pdu_length = (cmd->se_cmd.data_length -
 					cmd->write_data_done);
 			else
 				pdu_length = (conn->sess->sess_ops->MaxBurstLength -
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 1af1f21..65aac14 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -138,9 +138,9 @@
 
 		spin_lock(&cr->conn_recovery_cmd_lock);
 		list_for_each_entry_safe(cmd, cmd_tmp,
-				&cr->conn_recovery_cmd_list, i_list) {
+				&cr->conn_recovery_cmd_list, i_conn_node) {
 
-			list_del(&cmd->i_list);
+			list_del(&cmd->i_conn_node);
 			cmd->conn = NULL;
 			spin_unlock(&cr->conn_recovery_cmd_lock);
 			iscsit_free_cmd(cmd);
@@ -160,9 +160,9 @@
 
 		spin_lock(&cr->conn_recovery_cmd_lock);
 		list_for_each_entry_safe(cmd, cmd_tmp,
-				&cr->conn_recovery_cmd_list, i_list) {
+				&cr->conn_recovery_cmd_list, i_conn_node) {
 
-			list_del(&cmd->i_list);
+			list_del(&cmd->i_conn_node);
 			cmd->conn = NULL;
 			spin_unlock(&cr->conn_recovery_cmd_lock);
 			iscsit_free_cmd(cmd);
@@ -220,7 +220,7 @@
 	}
 	cr = cmd->cr;
 
-	list_del(&cmd->i_list);
+	list_del(&cmd->i_conn_node);
 	return --cr->cmd_count;
 }
 
@@ -234,7 +234,7 @@
 
 	spin_lock(&cr->conn_recovery_cmd_lock);
 	list_for_each_entry_safe(cmd, cmd_tmp,
-			&cr->conn_recovery_cmd_list, i_list) {
+			&cr->conn_recovery_cmd_list, i_conn_node) {
 
 		if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
 		     (cmd->deferred_i_state != ISTATE_REMOVE)) ||
@@ -297,11 +297,11 @@
 	mutex_unlock(&sess->cmdsn_mutex);
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
 		if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
 			continue;
 
-		list_del(&cmd->i_list);
+		list_del(&cmd->i_conn_node);
 
 		spin_unlock_bh(&conn->cmd_lock);
 		iscsit_free_cmd(cmd);
@@ -339,14 +339,14 @@
 	/*
 	 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
 	 * ISCSI_OP_NOOP_OUT opcodes.  For all other opcodes call
-	 * list_del(&cmd->i_list); to release the command to the
+	 * list_del(&cmd->i_conn_node); to release the command to the
 	 * session pool and remove it from the connection's list.
 	 *
 	 * Also stop the DataOUT timer, which will be restarted after
 	 * sending the TMR response.
 	 */
 	spin_lock_bh(&conn->cmd_lock);
-	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
 
 		if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
 		    (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
@@ -355,7 +355,7 @@
 				" CID: %hu\n", cmd->iscsi_opcode,
 				cmd->init_task_tag, cmd->cmd_sn, conn->cid);
 
-			list_del(&cmd->i_list);
+			list_del(&cmd->i_conn_node);
 			spin_unlock_bh(&conn->cmd_lock);
 			iscsit_free_cmd(cmd);
 			spin_lock_bh(&conn->cmd_lock);
@@ -375,7 +375,7 @@
 		 */
 		if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
 		     (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
-			list_del(&cmd->i_list);
+			list_del(&cmd->i_conn_node);
 			spin_unlock_bh(&conn->cmd_lock);
 			iscsit_free_cmd(cmd);
 			spin_lock_bh(&conn->cmd_lock);
@@ -397,7 +397,7 @@
 
 		cmd->sess = conn->sess;
 
-		list_del(&cmd->i_list);
+		list_del(&cmd->i_conn_node);
 		spin_unlock_bh(&conn->cmd_lock);
 
 		iscsit_free_all_datain_reqs(cmd);
@@ -407,7 +407,7 @@
 		 * Add the struct iscsi_cmd to the connection recovery cmd list
 		 */
 		spin_lock(&cr->conn_recovery_cmd_lock);
-		list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
+		list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
 		spin_unlock(&cr->conn_recovery_cmd_lock);
 
 		spin_lock_bh(&conn->cmd_lock);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index eb05c9d..ed5241e 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -803,14 +803,6 @@
 
 	value = simple_strtoul(value_ptr, &tmpptr, 0);
 
-/* #warning FIXME: Fix this */
-#if 0
-	if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
-		pr_err("Illegal value \"%s\" for \"%s\".\n",
-			value, param->name);
-		return -1;
-	}
-#endif
 	if (IS_TYPERANGE_0_TO_2(param)) {
 		if ((value < 0) || (value > 2)) {
 			pr_err("Illegal value for \"%s\", must be"
@@ -1045,13 +1037,6 @@
 			tmp2 = strchr(acceptor_values, ',');
 			if (tmp2)
 				*tmp2 = '\0';
-			if (!acceptor_values || !proposer_values) {
-				if (tmp1)
-					*tmp1 = ',';
-				if (tmp2)
-					*tmp2 = ',';
-				return NULL;
-			}
 			if (!strcmp(acceptor_values, proposer_values)) {
 				if (tmp2)
 					*tmp2 = ',';
@@ -1061,8 +1046,6 @@
 				*tmp2++ = ',';
 
 			acceptor_values = tmp2;
-			if (!acceptor_values)
-				break;
 		} while (acceptor_values);
 		if (tmp1)
 			*tmp1++ = ',';
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index fc69408..85a306e 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -24,11 +24,13 @@
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_util.h"
+#include "iscsi_target_tpg.h"
 #include "iscsi_target_seq_pdu_list.h"
 
 #define OFFLOAD_BUF_SIZE	32768
 
-void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+#ifdef DEBUG
+static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
 {
 	int i;
 	struct iscsi_seq *seq;
@@ -46,7 +48,7 @@
 	}
 }
 
-void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
 {
 	int i;
 	struct iscsi_pdu *pdu;
@@ -61,6 +63,10 @@
 			pdu->length, pdu->pdu_send_order, pdu->seq_no);
 	}
 }
+#else
+static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {}
+static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {}
+#endif
 
 static void iscsit_ordered_seq_lists(
 	struct iscsi_cmd *cmd,
@@ -135,11 +141,11 @@
 			seq_count++;
 			continue;
 		}
-		array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+		array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
 		if (!array) {
 			pr_err("Unable to allocate memory"
 				" for random array.\n");
-			return -1;
+			return -ENOMEM;
 		}
 		iscsit_create_random_array(array, seq_count);
 
@@ -155,11 +161,11 @@
 	}
 
 	if (seq_count) {
-		array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+		array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
 		if (!array) {
 			pr_err("Unable to allocate memory for"
 				" random array.\n");
-			return -1;
+			return -ENOMEM;
 		}
 		iscsit_create_random_array(array, seq_count);
 
@@ -187,10 +193,10 @@
 	if (!seq_count)
 		return 0;
 
-	array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+	array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
 	if (!array) {
 		pr_err("Unable to allocate memory for random array.\n");
-		return -1;
+		return -ENOMEM;
 	}
 	iscsit_create_random_array(array, seq_count);
 
@@ -221,11 +227,10 @@
 
 	if ((bl->type == PDULIST_UNSOLICITED) ||
 	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
-		unsolicited_data_length = (cmd->data_length >
-			conn->sess->sess_ops->FirstBurstLength) ?
-			conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+		unsolicited_data_length = min(cmd->se_cmd.data_length,
+			conn->sess->sess_ops->FirstBurstLength);
 
-	while (offset < cmd->data_length) {
+	while (offset < cmd->se_cmd.data_length) {
 		*pdu_count += 1;
 
 		if (check_immediate) {
@@ -239,10 +244,10 @@
 		}
 		if (unsolicited_data_length > 0) {
 			if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
-					>= cmd->data_length) {
+					>= cmd->se_cmd.data_length) {
 				unsolicited_data_length -=
-					(cmd->data_length - offset);
-				offset += (cmd->data_length - offset);
+					(cmd->se_cmd.data_length - offset);
+				offset += (cmd->se_cmd.data_length - offset);
 				continue;
 			}
 			if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
@@ -263,8 +268,8 @@
 			continue;
 		}
 		if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
-		     cmd->data_length) {
-			offset += (cmd->data_length - offset);
+		     cmd->se_cmd.data_length) {
+			offset += (cmd->se_cmd.data_length - offset);
 			continue;
 		}
 		if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
@@ -283,10 +288,10 @@
 
 
 /*
- *	Builds PDU and/or Sequence list,  called while DataSequenceInOrder=No
- *	and DataPDUInOrder=No.
+ *	Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
+ *	or DataPDUInOrder=No.
  */
-static int iscsit_build_pdu_and_seq_list(
+static int iscsit_do_build_pdu_and_seq_lists(
 	struct iscsi_cmd *cmd,
 	struct iscsi_build_list *bl)
 {
@@ -306,11 +311,10 @@
 
 	if ((bl->type == PDULIST_UNSOLICITED) ||
 	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
-		unsolicited_data_length = (cmd->data_length >
-			conn->sess->sess_ops->FirstBurstLength) ?
-			conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+		unsolicited_data_length = min(cmd->se_cmd.data_length,
+			conn->sess->sess_ops->FirstBurstLength);
 
-	while (offset < cmd->data_length) {
+	while (offset < cmd->se_cmd.data_length) {
 		pdu_count++;
 		if (!datapduinorder) {
 			pdu[i].offset = offset;
@@ -346,21 +350,21 @@
 		if (unsolicited_data_length > 0) {
 			if ((offset +
 			     conn->conn_ops->MaxRecvDataSegmentLength) >=
-			     cmd->data_length) {
+			     cmd->se_cmd.data_length) {
 				if (!datapduinorder) {
 					pdu[i].type = PDUTYPE_UNSOLICITED;
 					pdu[i].length =
-						(cmd->data_length - offset);
+						(cmd->se_cmd.data_length - offset);
 				}
 				if (!datasequenceinorder) {
 					seq[seq_no].type = SEQTYPE_UNSOLICITED;
 					seq[seq_no].pdu_count = pdu_count;
 					seq[seq_no].xfer_len = (burstlength +
-						(cmd->data_length - offset));
+						(cmd->se_cmd.data_length - offset));
 				}
 				unsolicited_data_length -=
-						(cmd->data_length - offset);
-				offset += (cmd->data_length - offset);
+						(cmd->se_cmd.data_length - offset);
+				offset += (cmd->se_cmd.data_length - offset);
 				continue;
 			}
 			if ((offset +
@@ -402,18 +406,18 @@
 			continue;
 		}
 		if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
-		     cmd->data_length) {
+		     cmd->se_cmd.data_length) {
 			if (!datapduinorder) {
 				pdu[i].type = PDUTYPE_NORMAL;
-				pdu[i].length = (cmd->data_length - offset);
+				pdu[i].length = (cmd->se_cmd.data_length - offset);
 			}
 			if (!datasequenceinorder) {
 				seq[seq_no].type = SEQTYPE_NORMAL;
 				seq[seq_no].pdu_count = pdu_count;
 				seq[seq_no].xfer_len = (burstlength +
-					(cmd->data_length - offset));
+					(cmd->se_cmd.data_length - offset));
 			}
-			offset += (cmd->data_length - offset);
+			offset += (cmd->se_cmd.data_length - offset);
 			continue;
 		}
 		if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
@@ -464,9 +468,8 @@
 			} else
 				iscsit_ordered_seq_lists(cmd, bl->type);
 		}
-#if 0
+
 		iscsit_dump_seq_list(cmd);
-#endif
 	}
 	if (!datapduinorder) {
 		if (bl->data_direction & ISCSI_PDU_WRITE) {
@@ -484,50 +487,86 @@
 			} else
 				iscsit_ordered_pdu_lists(cmd, bl->type);
 		}
-#if 0
+
 		iscsit_dump_pdu_list(cmd);
-#endif
 	}
 
 	return 0;
 }
 
-/*
- *	Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
- */
-int iscsit_do_build_list(
+int iscsit_build_pdu_and_seq_lists(
 	struct iscsi_cmd *cmd,
-	struct iscsi_build_list *bl)
+	u32 immediate_data_length)
 {
+	struct iscsi_build_list bl;
 	u32 pdu_count = 0, seq_count = 1;
 	struct iscsi_conn *conn = cmd->conn;
 	struct iscsi_pdu *pdu = NULL;
 	struct iscsi_seq *seq = NULL;
 
-	iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na;
+
+	/*
+	 * Do nothing if no OOO shenanigans
+	 */
+	if (sess->sess_ops->DataSequenceInOrder &&
+	    sess->sess_ops->DataPDUInOrder)
+		return 0;
+
+	if (cmd->data_direction == DMA_NONE)
+		return 0;
+
+	na = iscsit_tpg_get_node_attrib(sess);
+	memset(&bl, 0, sizeof(struct iscsi_build_list));
+
+	if (cmd->data_direction == DMA_FROM_DEVICE) {
+		bl.data_direction = ISCSI_PDU_READ;
+		bl.type = PDULIST_NORMAL;
+		if (na->random_datain_pdu_offsets)
+			bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
+		if (na->random_datain_seq_offsets)
+			bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
+	} else {
+		bl.data_direction = ISCSI_PDU_WRITE;
+		bl.immediate_data_length = immediate_data_length;
+		if (na->random_r2t_offsets)
+			bl.randomize |= RANDOM_R2T_OFFSETS;
+
+		if (!cmd->immediate_data && !cmd->unsolicited_data)
+			bl.type = PDULIST_NORMAL;
+		else if (cmd->immediate_data && !cmd->unsolicited_data)
+			bl.type = PDULIST_IMMEDIATE;
+		else if (!cmd->immediate_data && cmd->unsolicited_data)
+			bl.type = PDULIST_UNSOLICITED;
+		else if (cmd->immediate_data && cmd->unsolicited_data)
+			bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
+	}
+
+	iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count);
 
 	if (!conn->sess->sess_ops->DataSequenceInOrder) {
-		seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
+		seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC);
 		if (!seq) {
 			pr_err("Unable to allocate struct iscsi_seq list\n");
-			return -1;
+			return -ENOMEM;
 		}
 		cmd->seq_list = seq;
 		cmd->seq_count = seq_count;
 	}
 
 	if (!conn->sess->sess_ops->DataPDUInOrder) {
-		pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
+		pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC);
 		if (!pdu) {
 			pr_err("Unable to allocate struct iscsi_pdu list.\n");
 			kfree(seq);
-			return -1;
+			return -ENOMEM;
 		}
 		cmd->pdu_list = pdu;
 		cmd->pdu_count = pdu_count;
 	}
 
-	return iscsit_build_pdu_and_seq_list(cmd, bl);
+	return iscsit_do_build_pdu_and_seq_lists(cmd, &bl);
 }
 
 struct iscsi_pdu *iscsit_get_pdu_holder(
@@ -572,13 +611,12 @@
 		pdu = &cmd->pdu_list[cmd->pdu_start];
 
 		for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
-#if 0
 			pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
 				"_send_order: %d, pdu[i].offset: %d,"
 				" pdu[i].length: %d\n", pdu[i].seq_no,
 				pdu[i].pdu_send_order, pdu[i].offset,
 				pdu[i].length);
-#endif
+
 			if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
 				cmd->pdu_send_order++;
 				return &pdu[i];
@@ -601,11 +639,11 @@
 			pr_err("struct iscsi_seq is NULL!\n");
 			return NULL;
 		}
-#if 0
+
 		pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
 			" seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
 			seq->seq_no);
-#endif
+
 		pdu = &cmd->pdu_list[seq->pdu_start];
 
 		if (seq->pdu_send_order == seq->pdu_count) {
@@ -645,12 +683,11 @@
 	}
 
 	for (i = 0; i < cmd->seq_count; i++) {
-#if 0
 		pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
 			"xfer_len: %d, seq_list[i].seq_no %u\n",
 			cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
 			cmd->seq_list[i].seq_no);
-#endif
+
 		if ((cmd->seq_list[i].orig_offset +
 				cmd->seq_list[i].xfer_len) >=
 				(offset + length))
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
index 0d52a10..d5b1537 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -78,7 +78,7 @@
 	u32		xfer_len;
 } ____cacheline_aligned;
 
-extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
+extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
 extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
 extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
 extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index e01da9d..f4e640b 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -78,10 +78,7 @@
 {
 	struct iscsi_session *sess = conn->sess;
 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
-#if 0
-	struct iscsi_init_task_mgt_cmnd *hdr =
-		(struct iscsi_init_task_mgt_cmnd *) buf;
-#endif
+
 	if (!na->tmr_warm_reset) {
 		pr_err("TMR Opcode TARGET_WARM_RESET authorization"
 			" failed for Initiator Node: %s\n",
@@ -216,7 +213,7 @@
 	iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 
 	cmd->i_state = ISTATE_SEND_NOPIN;
@@ -272,9 +269,9 @@
 		offset = cmd->next_burst_len = cmd->write_data_done;
 
 		if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
-		     cmd->data_length) {
+		     cmd->se_cmd.data_length) {
 			no_build_r2ts = 1;
-			length = (cmd->data_length - offset);
+			length = (cmd->se_cmd.data_length - offset);
 		} else
 			length = (conn->sess->sess_ops->FirstBurstLength - offset);
 
@@ -292,7 +289,7 @@
 	/*
 	 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
 	 */
-	return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
+	return iscsit_build_r2ts_for_cmd(cmd, conn, true);
 }
 
 static int iscsit_task_reassign_complete_read(
@@ -385,7 +382,7 @@
 	iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 
 	if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 4eba86d..b42cdeb 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -163,7 +163,7 @@
 	}
 
 	cmd->conn	= conn;
-	INIT_LIST_HEAD(&cmd->i_list);
+	INIT_LIST_HEAD(&cmd->i_conn_node);
 	INIT_LIST_HEAD(&cmd->datain_list);
 	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
 	init_completion(&cmd->reject_comp);
@@ -176,174 +176,6 @@
 	return cmd;
 }
 
-/*
- * Called from iscsi_handle_scsi_cmd()
- */
-struct iscsi_cmd *iscsit_allocate_se_cmd(
-	struct iscsi_conn *conn,
-	u32 data_length,
-	int data_direction,
-	int iscsi_task_attr)
-{
-	struct iscsi_cmd *cmd;
-	struct se_cmd *se_cmd;
-	int sam_task_attr;
-
-	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
-	if (!cmd)
-		return NULL;
-
-	cmd->data_direction = data_direction;
-	cmd->data_length = data_length;
-	/*
-	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
-	 */
-	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
-	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
-		sam_task_attr = MSG_SIMPLE_TAG;
-	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
-		sam_task_attr = MSG_ORDERED_TAG;
-	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
-		sam_task_attr = MSG_HEAD_TAG;
-	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
-		sam_task_attr = MSG_ACA_TAG;
-	else {
-		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
-			" MSG_SIMPLE_TAG\n", iscsi_task_attr);
-		sam_task_attr = MSG_SIMPLE_TAG;
-	}
-
-	se_cmd = &cmd->se_cmd;
-	/*
-	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
-	 */
-	transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
-			conn->sess->se_sess, data_length, data_direction,
-			sam_task_attr, &cmd->sense_buffer[0]);
-	return cmd;
-}
-
-struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
-	struct iscsi_conn *conn,
-	u8 function)
-{
-	struct iscsi_cmd *cmd;
-	struct se_cmd *se_cmd;
-	int rc;
-	u8 tcm_function;
-
-	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
-	if (!cmd)
-		return NULL;
-
-	cmd->data_direction = DMA_NONE;
-
-	cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
-	if (!cmd->tmr_req) {
-		pr_err("Unable to allocate memory for"
-			" Task Management command!\n");
-		goto out;
-	}
-	/*
-	 * TASK_REASSIGN for ERL=2 / connection stays inside of
-	 * LIO-Target $FABRIC_MOD
-	 */
-	if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
-		return cmd;
-
-	se_cmd = &cmd->se_cmd;
-	/*
-	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
-	 */
-	transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
-				conn->sess->se_sess, 0, DMA_NONE,
-				MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
-
-	switch (function) {
-	case ISCSI_TM_FUNC_ABORT_TASK:
-		tcm_function = TMR_ABORT_TASK;
-		break;
-	case ISCSI_TM_FUNC_ABORT_TASK_SET:
-		tcm_function = TMR_ABORT_TASK_SET;
-		break;
-	case ISCSI_TM_FUNC_CLEAR_ACA:
-		tcm_function = TMR_CLEAR_ACA;
-		break;
-	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
-		tcm_function = TMR_CLEAR_TASK_SET;
-		break;
-	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
-		tcm_function = TMR_LUN_RESET;
-		break;
-	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
-		tcm_function = TMR_TARGET_WARM_RESET;
-		break;
-	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
-		tcm_function = TMR_TARGET_COLD_RESET;
-		break;
-	default:
-		pr_err("Unknown iSCSI TMR Function:"
-			" 0x%02x\n", function);
-		goto out;
-	}
-
-	rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL);
-	if (rc < 0)
-		goto out;
-
-	cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
-
-	return cmd;
-out:
-	iscsit_release_cmd(cmd);
-	return NULL;
-}
-
-int iscsit_decide_list_to_build(
-	struct iscsi_cmd *cmd,
-	u32 immediate_data_length)
-{
-	struct iscsi_build_list bl;
-	struct iscsi_conn *conn = cmd->conn;
-	struct iscsi_session *sess = conn->sess;
-	struct iscsi_node_attrib *na;
-
-	if (sess->sess_ops->DataSequenceInOrder &&
-	    sess->sess_ops->DataPDUInOrder)
-		return 0;
-
-	if (cmd->data_direction == DMA_NONE)
-		return 0;
-
-	na = iscsit_tpg_get_node_attrib(sess);
-	memset(&bl, 0, sizeof(struct iscsi_build_list));
-
-	if (cmd->data_direction == DMA_FROM_DEVICE) {
-		bl.data_direction = ISCSI_PDU_READ;
-		bl.type = PDULIST_NORMAL;
-		if (na->random_datain_pdu_offsets)
-			bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
-		if (na->random_datain_seq_offsets)
-			bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
-	} else {
-		bl.data_direction = ISCSI_PDU_WRITE;
-		bl.immediate_data_length = immediate_data_length;
-		if (na->random_r2t_offsets)
-			bl.randomize |= RANDOM_R2T_OFFSETS;
-
-		if (!cmd->immediate_data && !cmd->unsolicited_data)
-			bl.type = PDULIST_NORMAL;
-		else if (cmd->immediate_data && !cmd->unsolicited_data)
-			bl.type = PDULIST_IMMEDIATE;
-		else if (!cmd->immediate_data && cmd->unsolicited_data)
-			bl.type = PDULIST_UNSOLICITED;
-		else if (cmd->immediate_data && cmd->unsolicited_data)
-			bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
-	}
-
-	return iscsit_do_build_list(cmd, &bl);
-}
-
 struct iscsi_seq *iscsit_get_seq_holder_for_datain(
 	struct iscsi_cmd *cmd,
 	u32 seq_send_order)
@@ -502,14 +334,14 @@
 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
 		return 0;
 
-	if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
+	if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
 	    ((cmd->first_burst_len + payload_length) !=
 	      conn->sess->sess_ops->FirstBurstLength)) {
 		pr_err("Unsolicited non-immediate data received %u"
 			" does not equal FirstBurstLength: %u, and does"
 			" not equal ExpXferLen %u.\n",
 			(cmd->first_burst_len + payload_length),
-			conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
+			conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
 		transport_send_check_condition_and_sense(se_cmd,
 				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
 		return -1;
@@ -524,7 +356,7 @@
 	struct iscsi_cmd *cmd;
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 		if (cmd->init_task_tag == init_task_tag) {
 			spin_unlock_bh(&conn->cmd_lock);
 			return cmd;
@@ -545,7 +377,7 @@
 	struct iscsi_cmd *cmd;
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 		if (cmd->init_task_tag == init_task_tag) {
 			spin_unlock_bh(&conn->cmd_lock);
 			return cmd;
@@ -568,7 +400,7 @@
 	struct iscsi_cmd *cmd = NULL;
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 		if (cmd->targ_xfer_tag == targ_xfer_tag) {
 			spin_unlock_bh(&conn->cmd_lock);
 			return cmd;
@@ -596,7 +428,7 @@
 	spin_lock(&sess->cr_i_lock);
 	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
 		spin_lock(&cr->conn_recovery_cmd_lock);
-		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
 			if (cmd->init_task_tag == init_task_tag) {
 				spin_unlock(&cr->conn_recovery_cmd_lock);
 				spin_unlock(&sess->cr_i_lock);
@@ -616,7 +448,7 @@
 	spin_lock(&sess->cr_a_lock);
 	list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
 		spin_lock(&cr->conn_recovery_cmd_lock);
-		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
 			if (cmd->init_task_tag == init_task_tag) {
 				spin_unlock(&cr->conn_recovery_cmd_lock);
 				spin_unlock(&sess->cr_a_lock);
@@ -813,7 +645,6 @@
 void iscsit_release_cmd(struct iscsi_cmd *cmd)
 {
 	struct iscsi_conn *conn = cmd->conn;
-	int i;
 
 	iscsit_free_r2ts_from_list(cmd);
 	iscsit_free_all_datain_reqs(cmd);
@@ -824,11 +655,6 @@
 	kfree(cmd->tmr_req);
 	kfree(cmd->iov_data);
 
-	for (i = 0; i < cmd->t_mem_sg_nents; i++)
-		__free_page(sg_page(&cmd->t_mem_sg[i]));
-
-	kfree(cmd->t_mem_sg);
-
 	if (conn) {
 		iscsit_remove_cmd_from_immediate_queue(cmd, conn);
 		iscsit_remove_cmd_from_response_queue(cmd, conn);
@@ -1038,7 +864,7 @@
 	spin_unlock_bh(&conn->sess->ttt_lock);
 
 	spin_lock_bh(&conn->cmd_lock);
-	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
 	spin_unlock_bh(&conn->cmd_lock);
 
 	if (want_response)
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 835bf7d..e1c729b 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -9,9 +9,6 @@
 extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
 extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
 extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
-extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
-extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
-extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
 extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
 extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
 extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index a9b4eee..38dfac2 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -213,7 +213,7 @@
 	 * associated read buffers, go ahead and do that here for type
 	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
 	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
-	 * by target core in transport_generic_allocate_tasks() ->
+	 * by target core in target_setup_cmd_from_cdb() ->
 	 * transport_generic_cmd_sequencer().
 	 */
 	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
@@ -227,7 +227,7 @@
 		}
 	}
 
-	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
+	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
 	if (ret == -ENOMEM) {
 		transport_send_check_condition_and_sense(se_cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index c7746a3..e624b83 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -59,26 +59,31 @@
  *
  * See spc4r17 section 6.27
  */
-int target_emulate_report_target_port_groups(struct se_task *task)
+int target_emulate_report_target_port_groups(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
 	struct se_port *port;
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 	unsigned char *buf;
-	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
-				    Target port group descriptor */
+	u32 rd_len = 0, off;
+	int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
 	/*
-	 * Need at least 4 bytes of response data or else we can't
-	 * even fit the return data length.
+	 * Skip over RESERVED area to first Target port group descriptor
+	 * depending on the PARAMETER DATA FORMAT type..
 	 */
-	if (cmd->data_length < 4) {
-		pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
-			" too small\n", cmd->data_length);
+	if (ext_hdr != 0)
+		off = 8;
+	else
+		off = 4;
+
+	if (cmd->data_length < off) {
+		pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
+			" small for %s header\n", cmd->data_length,
+			(ext_hdr) ? "extended" : "normal");
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
 		return -EINVAL;
 	}
-
 	buf = transport_kmap_data_sg(cmd);
 
 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
@@ -159,15 +164,34 @@
 	/*
 	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
 	 */
-	buf[0] = ((rd_len >> 24) & 0xff);
-	buf[1] = ((rd_len >> 16) & 0xff);
-	buf[2] = ((rd_len >> 8) & 0xff);
-	buf[3] = (rd_len & 0xff);
+	put_unaligned_be32(rd_len, &buf[0]);
 
+	/*
+	 * Fill in the Extended header parameter data format if requested
+	 */
+	if (ext_hdr != 0) {
+		buf[4] = 0x10;
+		/*
+		 * Set the implict transition time (in seconds) for the application
+		 * client to use as a base for it's transition timeout value.
+		 *
+		 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
+		 * this CDB was received upon to determine this value individually
+		 * for ALUA target port group.
+		 */
+		port = cmd->se_lun->lun_sep;
+		tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+		if (tg_pt_gp_mem) {
+			spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+			tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+			if (tg_pt_gp)
+				buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		}
+	}
 	transport_kunmap_data_sg(cmd);
 
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
+	target_complete_cmd(cmd, GOOD);
 	return 0;
 }
 
@@ -176,9 +200,8 @@
  *
  * See spc4r17 section 6.35
  */
-int target_emulate_set_target_port_groups(struct se_task *task)
+int target_emulate_set_target_port_groups(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
 	struct se_port *port, *l_port = cmd->se_lun->lun_sep;
@@ -351,8 +374,7 @@
 
 out:
 	transport_kunmap_data_sg(cmd);
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
+	target_complete_cmd(cmd, GOOD);
 	return 0;
 }
 
@@ -391,7 +413,7 @@
 	case RECEIVE_DIAGNOSTIC:
 	case SEND_DIAGNOSTIC:
 	case MAINTENANCE_IN:
-		switch (cdb[1]) {
+		switch (cdb[1] & 0x1f) {
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
@@ -433,7 +455,7 @@
 	case INQUIRY:
 	case REPORT_LUNS:
 	case MAINTENANCE_IN:
-		switch (cdb[1]) {
+		switch (cdb[1] & 0x1f) {
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
@@ -473,7 +495,7 @@
 	case INQUIRY:
 	case REPORT_LUNS:
 	case MAINTENANCE_IN:
-		switch (cdb[1]) {
+		switch (cdb[1] & 0x1f) {
 		case MI_REPORT_TARGET_PGS:
 			return 0;
 		default:
@@ -1359,6 +1381,7 @@
 	 */
 	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
 	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+	tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
 
 	if (def_group) {
 		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
@@ -1855,6 +1878,37 @@
 	return count;
 }
 
+ssize_t core_alua_show_implict_trans_secs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
+}
+
+ssize_t core_alua_store_implict_trans_secs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract implict_trans_secs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
+		pr_err("Passed implict_trans_secs: %lu, exceeds"
+			" ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
+			ALUA_MAX_IMPLICT_TRANS_SECS);
+		return  -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
+
+	return count;
+}
+
 ssize_t core_alua_show_preferred_bit(
 	struct t10_alua_tg_pt_gp *tg_pt_gp,
 	char *page)
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index c5b4ecd..f920c17 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -52,6 +52,12 @@
 #define ALUA_DEFAULT_TRANS_DELAY_MSECS			0
 #define ALUA_MAX_TRANS_DELAY_MSECS			30000 /* 30 seconds */
 /*
+ * Used for the recommended application client implict transition timeout
+ * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
+ */
+#define ALUA_DEFAULT_IMPLICT_TRANS_SECS			0
+#define ALUA_MAX_IMPLICT_TRANS_SECS			255
+/*
  * Used by core_alua_update_tpg_primary_metadata() and
  * core_alua_update_tpg_secondary_metadata()
  */
@@ -66,8 +72,8 @@
 extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
 extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 
-extern int target_emulate_report_target_port_groups(struct se_task *);
-extern int target_emulate_set_target_port_groups(struct se_task *);
+extern int target_emulate_report_target_port_groups(struct se_cmd *);
+extern int target_emulate_set_target_port_groups(struct se_cmd *);
 extern int core_alua_check_nonop_delay(struct se_cmd *);
 extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
 				struct se_device *, struct se_port *,
@@ -107,6 +113,10 @@
 					char *);
 extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
 					const char *, size_t);
+extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
 extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
 					char *);
 extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 30a6770..9888693 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -432,6 +432,7 @@
 target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
 	struct se_device *dev = cmd->se_dev;
+	u32 max_sectors;
 	int have_tp = 0;
 
 	/*
@@ -456,7 +457,9 @@
 	/*
 	 * Set MAXIMUM TRANSFER LENGTH
 	 */
-	put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]);
+	max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
+			  dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+	put_unaligned_be32(max_sectors, &buf[8]);
 
 	/*
 	 * Set OPTIMAL TRANSFER LENGTH
@@ -598,9 +601,8 @@
 	return 0;
 }
 
-int target_emulate_inquiry(struct se_task *task)
+int target_emulate_inquiry(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
 	unsigned char *buf, *map_buf;
@@ -664,16 +666,13 @@
 	}
 	transport_kunmap_data_sg(cmd);
 
-	if (!ret) {
-		task->task_scsi_status = GOOD;
-		transport_complete_task(task, 1);
-	}
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
 	return ret;
 }
 
-int target_emulate_readcapacity(struct se_task *task)
+int target_emulate_readcapacity(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	unsigned char *buf;
 	unsigned long long blocks_long = dev->transport->get_blocks(dev);
@@ -697,14 +696,12 @@
 
 	transport_kunmap_data_sg(cmd);
 
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
+	target_complete_cmd(cmd, GOOD);
 	return 0;
 }
 
-int target_emulate_readcapacity_16(struct se_task *task)
+int target_emulate_readcapacity_16(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	unsigned char *buf;
 	unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -732,8 +729,7 @@
 
 	transport_kunmap_data_sg(cmd);
 
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
+	target_complete_cmd(cmd, GOOD);
 	return 0;
 }
 
@@ -872,9 +868,8 @@
 	}
 }
 
-int target_emulate_modesense(struct se_task *task)
+int target_emulate_modesense(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	char *cdb = cmd->t_task_cdb;
 	unsigned char *rbuf;
@@ -947,14 +942,12 @@
 	memcpy(rbuf, buf, offset);
 	transport_kunmap_data_sg(cmd);
 
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
+	target_complete_cmd(cmd, GOOD);
 	return 0;
 }
 
-int target_emulate_request_sense(struct se_task *task)
+int target_emulate_request_sense(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	unsigned char *cdb = cmd->t_task_cdb;
 	unsigned char *buf;
 	u8 ua_asc = 0, ua_ascq = 0;
@@ -1008,8 +1001,7 @@
 
 end:
 	transport_kunmap_data_sg(cmd);
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
+	target_complete_cmd(cmd, GOOD);
 	return 0;
 }
 
@@ -1017,9 +1009,8 @@
  * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
  * Note this is not used for TCM/pSCSI passthrough
  */
-int target_emulate_unmap(struct se_task *task)
+int target_emulate_unmap(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	unsigned char *buf, *ptr = NULL;
 	unsigned char *cdb = &cmd->t_task_cdb[0];
@@ -1066,10 +1057,8 @@
 
 err:
 	transport_kunmap_data_sg(cmd);
-	if (!ret) {
-		task->task_scsi_status = GOOD;
-		transport_complete_task(task, 1);
-	}
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
 	return ret;
 }
 
@@ -1077,9 +1066,8 @@
  * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
  * Note this is not used for TCM/pSCSI passthrough
  */
-int target_emulate_write_same(struct se_task *task)
+int target_emulate_write_same(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	sector_t range;
 	sector_t lba = cmd->t_task_lba;
@@ -1118,79 +1106,25 @@
 		return ret;
 	}
 
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
+	target_complete_cmd(cmd, GOOD);
 	return 0;
 }
 
-int target_emulate_synchronize_cache(struct se_task *task)
+int target_emulate_synchronize_cache(struct se_cmd *cmd)
 {
-	struct se_device *dev = task->task_se_cmd->se_dev;
-	struct se_cmd *cmd = task->task_se_cmd;
-
-	if (!dev->transport->do_sync_cache) {
+	if (!cmd->se_dev->transport->do_sync_cache) {
 		pr_err("SYNCHRONIZE_CACHE emulation not supported"
-			" for: %s\n", dev->transport->name);
+			" for: %s\n", cmd->se_dev->transport->name);
 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
 		return -ENOSYS;
 	}
 
-	dev->transport->do_sync_cache(task);
+	cmd->se_dev->transport->do_sync_cache(cmd);
 	return 0;
 }
 
-int target_emulate_noop(struct se_task *task)
+int target_emulate_noop(struct se_cmd *cmd)
 {
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
+	target_complete_cmd(cmd, GOOD);
 	return 0;
 }
-
-/*
- * Write a CDB into @cdb that is based on the one the intiator sent us,
- * but updated to only cover the sectors that the current task handles.
- */
-void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
-{
-	struct se_cmd *cmd = task->task_se_cmd;
-	unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb);
-
-	memcpy(cdb, cmd->t_task_cdb, cdb_len);
-	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
-		unsigned long long lba = task->task_lba;
-		u32 sectors = task->task_sectors;
-
-		switch (cdb_len) {
-		case 6:
-			/* 21-bit LBA and 8-bit sectors */
-			cdb[1] = (lba >> 16) & 0x1f;
-			cdb[2] = (lba >> 8) & 0xff;
-			cdb[3] = lba & 0xff;
-			cdb[4] = sectors & 0xff;
-			break;
-		case 10:
-			/* 32-bit LBA and 16-bit sectors */
-			put_unaligned_be32(lba, &cdb[2]);
-			put_unaligned_be16(sectors, &cdb[7]);
-			break;
-		case 12:
-			/* 32-bit LBA and 32-bit sectors */
-			put_unaligned_be32(lba, &cdb[2]);
-			put_unaligned_be32(sectors, &cdb[6]);
-			break;
-		case 16:
-			/* 64-bit LBA and 32-bit sectors */
-			put_unaligned_be64(lba, &cdb[2]);
-			put_unaligned_be32(sectors, &cdb[10]);
-			break;
-		case 32:
-			/* 64-bit LBA and 32-bit sectors, extended CDB */
-			put_unaligned_be64(lba, &cdb[12]);
-			put_unaligned_be32(sectors, &cdb[28]);
-			break;
-		default:
-			BUG();
-		}
-	}
-}
-EXPORT_SYMBOL(target_get_task_cdb);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index cbb6653..801efa8 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -683,9 +683,6 @@
 DEF_DEV_ATTRIB_RO(hw_max_sectors);
 SE_DEV_ATTR_RO(hw_max_sectors);
 
-DEF_DEV_ATTRIB(max_sectors);
-SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
-
 DEF_DEV_ATTRIB(fabric_max_sectors);
 SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
 
@@ -727,7 +724,6 @@
 	&target_core_dev_attrib_hw_block_size.attr,
 	&target_core_dev_attrib_block_size.attr,
 	&target_core_dev_attrib_hw_max_sectors.attr,
-	&target_core_dev_attrib_max_sectors.attr,
 	&target_core_dev_attrib_fabric_max_sectors.attr,
 	&target_core_dev_attrib_optimal_sectors.attr,
 	&target_core_dev_attrib_hw_queue_depth.attr,
@@ -2451,6 +2447,26 @@
 SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
 
 /*
+ * implict_trans_secs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_implict_trans_secs(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_implict_trans_secs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR);
+
+/*
  * preferred
  */
 
@@ -2574,6 +2590,7 @@
 	&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
 	&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
 	&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_implict_trans_secs.attr,
 	&target_core_alua_tg_pt_gp_preferred.attr,
 	&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
 	&target_core_alua_tg_pt_gp_members.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index aa62677..5ad9728 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -643,9 +643,8 @@
 	lun->lun_se_dev = NULL;
 }
 
-int target_report_luns(struct se_task *se_task)
+int target_report_luns(struct se_cmd *se_cmd)
 {
-	struct se_cmd *se_cmd = se_task->task_se_cmd;
 	struct se_dev_entry *deve;
 	struct se_session *se_sess = se_cmd->se_sess;
 	unsigned char *buf;
@@ -696,8 +695,7 @@
 	buf[3] = (lun_count & 0xff);
 	transport_kunmap_data_sg(se_cmd);
 
-	se_task->task_scsi_status = GOOD;
-	transport_complete_task(se_task, 1);
+	target_complete_cmd(se_cmd, GOOD);
 	return 0;
 }
 
@@ -878,15 +876,12 @@
 	dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
 	dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
 	/*
-	 * max_sectors is based on subsystem plugin dependent requirements.
+	 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
 	 */
-	dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
-	/*
-	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
-	 */
-	limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
+	limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
 						limits->logical_block_size);
-	dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
+	dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
+
 	/*
 	 * Set fabric_max_sectors, which is reported in block limits
 	 * VPD page (B0h).
@@ -1170,64 +1165,6 @@
 	return 0;
 }
 
-int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
-{
-	int force = 0; /* Force setting for VDEVS */
-
-	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-		pr_err("dev[%p]: Unable to change SE Device"
-			" max_sectors while dev_export_obj: %d count exists\n",
-			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
-		return -EINVAL;
-	}
-	if (!max_sectors) {
-		pr_err("dev[%p]: Illegal ZERO value for"
-			" max_sectors\n", dev);
-		return -EINVAL;
-	}
-	if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
-		pr_err("dev[%p]: Passed max_sectors: %u less than"
-			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
-				DA_STATUS_MAX_SECTORS_MIN);
-		return -EINVAL;
-	}
-	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-		if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
-			pr_err("dev[%p]: Passed max_sectors: %u"
-				" greater than TCM/SE_Device max_sectors:"
-				" %u\n", dev, max_sectors,
-				dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
-			 return -EINVAL;
-		}
-	} else {
-		if (!force && (max_sectors >
-				 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
-			pr_err("dev[%p]: Passed max_sectors: %u"
-				" greater than TCM/SE_Device max_sectors"
-				": %u, use force=1 to override.\n", dev,
-				max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
-			return -EINVAL;
-		}
-		if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
-			pr_err("dev[%p]: Passed max_sectors: %u"
-				" greater than DA_STATUS_MAX_SECTORS_MAX:"
-				" %u\n", dev, max_sectors,
-				DA_STATUS_MAX_SECTORS_MAX);
-			return -EINVAL;
-		}
-	}
-	/*
-	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
-	 */
-	max_sectors = se_dev_align_max_sectors(max_sectors,
-				dev->se_sub_dev->se_dev_attrib.block_size);
-
-	dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
-	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
-			dev, max_sectors);
-	return 0;
-}
-
 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
 {
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
@@ -1341,7 +1278,6 @@
 	u32 lun)
 {
 	struct se_lun *lun_p;
-	u32 lun_access = 0;
 	int rc;
 
 	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
@@ -1354,12 +1290,8 @@
 	if (IS_ERR(lun_p))
 		return lun_p;
 
-	if (dev->dev_flags & DF_READ_ONLY)
-		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
-	else
-		lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
-
-	rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
+	rc = core_tpg_post_addlun(tpg, lun_p,
+				TRANSPORT_LUNFLAGS_READ_WRITE, dev);
 	if (rc < 0)
 		return ERR_PTR(rc);
 
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7ed58e2..686dba1 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,15 +133,10 @@
 		ret = PTR_ERR(dev_p);
 		goto fail;
 	}
-#if 0
-	if (di->no_create_file)
-		flags = O_RDWR | O_LARGEFILE;
-	else
-		flags = O_RDWR | O_CREAT | O_LARGEFILE;
-#else
+
+	/* O_DIRECT too? */
 	flags = O_RDWR | O_CREAT | O_LARGEFILE;
-#endif
-/*	flags |= O_DIRECT; */
+
 	/*
 	 * If fd_buffered_io=1 has not been set explicitly (the default),
 	 * use O_SYNC to force FILEIO writes to disk.
@@ -169,6 +164,7 @@
 	inode = file->f_mapping->host;
 	if (S_ISBLK(inode->i_mode)) {
 		struct request_queue *q;
+		unsigned long long dev_size;
 		/*
 		 * Setup the local scope queue_limits from struct request_queue->limits
 		 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
@@ -183,13 +179,12 @@
 		 * one (1) logical sector from underlying struct block_device
 		 */
 		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
-		fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
+		dev_size = (i_size_read(file->f_mapping->host) -
 				       fd_dev->fd_block_size);
 
 		pr_debug("FILEIO: Using size: %llu bytes from struct"
 			" block_device blocks: %llu logical_block_size: %d\n",
-			fd_dev->fd_dev_size,
-			div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
+			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
 			fd_dev->fd_block_size);
 	} else {
 		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
@@ -249,53 +244,33 @@
 	kfree(fd_dev);
 }
 
-static inline struct fd_request *FILE_REQ(struct se_task *task)
+static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
+		u32 sgl_nents)
 {
-	return container_of(task, struct fd_request, fd_task);
-}
-
-
-static struct se_task *
-fd_alloc_task(unsigned char *cdb)
-{
-	struct fd_request *fd_req;
-
-	fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
-	if (!fd_req) {
-		pr_err("Unable to allocate struct fd_request\n");
-		return NULL;
-	}
-
-	return &fd_req->fd_task;
-}
-
-static int fd_do_readv(struct se_task *task)
-{
-	struct fd_request *req = FILE_REQ(task);
-	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
+	struct se_device *se_dev = cmd->se_dev;
 	struct fd_dev *dev = se_dev->dev_ptr;
 	struct file *fd = dev->fd_file;
-	struct scatterlist *sg = task->task_sg;
+	struct scatterlist *sg;
 	struct iovec *iov;
 	mm_segment_t old_fs;
-	loff_t pos = (task->task_lba *
+	loff_t pos = (cmd->t_task_lba *
 		      se_dev->se_sub_dev->se_dev_attrib.block_size);
 	int ret = 0, i;
 
-	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+	iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
 	if (!iov) {
 		pr_err("Unable to allocate fd_do_readv iov[]\n");
 		return -ENOMEM;
 	}
 
-	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+	for_each_sg(sgl, sg, sgl_nents, i) {
 		iov[i].iov_len = sg->length;
 		iov[i].iov_base = sg_virt(sg);
 	}
 
 	old_fs = get_fs();
 	set_fs(get_ds());
-	ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
+	ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
 	set_fs(old_fs);
 
 	kfree(iov);
@@ -305,10 +280,10 @@
 	 * block_device.
 	 */
 	if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
-		if (ret < 0 || ret != task->task_size) {
+		if (ret < 0 || ret != cmd->data_length) {
 			pr_err("vfs_readv() returned %d,"
 				" expecting %d for S_ISBLK\n", ret,
-				(int)task->task_size);
+				(int)cmd->data_length);
 			return (ret < 0 ? ret : -EINVAL);
 		}
 	} else {
@@ -322,38 +297,38 @@
 	return 1;
 }
 
-static int fd_do_writev(struct se_task *task)
+static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
+		u32 sgl_nents)
 {
-	struct fd_request *req = FILE_REQ(task);
-	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
+	struct se_device *se_dev = cmd->se_dev;
 	struct fd_dev *dev = se_dev->dev_ptr;
 	struct file *fd = dev->fd_file;
-	struct scatterlist *sg = task->task_sg;
+	struct scatterlist *sg;
 	struct iovec *iov;
 	mm_segment_t old_fs;
-	loff_t pos = (task->task_lba *
+	loff_t pos = (cmd->t_task_lba *
 		      se_dev->se_sub_dev->se_dev_attrib.block_size);
 	int ret, i = 0;
 
-	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+	iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
 	if (!iov) {
 		pr_err("Unable to allocate fd_do_writev iov[]\n");
 		return -ENOMEM;
 	}
 
-	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+	for_each_sg(sgl, sg, sgl_nents, i) {
 		iov[i].iov_len = sg->length;
 		iov[i].iov_base = sg_virt(sg);
 	}
 
 	old_fs = get_fs();
 	set_fs(get_ds());
-	ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
+	ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
 	set_fs(old_fs);
 
 	kfree(iov);
 
-	if (ret < 0 || ret != task->task_size) {
+	if (ret < 0 || ret != cmd->data_length) {
 		pr_err("vfs_writev() returned %d\n", ret);
 		return (ret < 0 ? ret : -EINVAL);
 	}
@@ -361,9 +336,8 @@
 	return 1;
 }
 
-static void fd_emulate_sync_cache(struct se_task *task)
+static void fd_emulate_sync_cache(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct fd_dev *fd_dev = dev->dev_ptr;
 	int immed = (cmd->t_task_cdb[1] & 0x2);
@@ -375,7 +349,7 @@
 	 * for this SYNCHRONIZE_CACHE op
 	 */
 	if (immed)
-		transport_complete_sync_cache(cmd, 1);
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
 
 	/*
 	 * Determine if we will be flushing the entire device.
@@ -395,33 +369,37 @@
 	if (ret != 0)
 		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
 
-	if (!immed)
-		transport_complete_sync_cache(cmd, ret == 0);
+	if (immed)
+		return;
+
+	if (ret) {
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
+	} else {
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+	}
 }
 
-/*
- * WRITE Force Unit Access (FUA) emulation on a per struct se_task
- * LBA range basis..
- */
-static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
+static void fd_emulate_write_fua(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct fd_dev *fd_dev = dev->dev_ptr;
-	loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
-	loff_t end = start + task->task_size;
+	loff_t start = cmd->t_task_lba *
+		dev->se_sub_dev->se_dev_attrib.block_size;
+	loff_t end = start + cmd->data_length;
 	int ret;
 
 	pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
-			task->task_lba, task->task_size);
+		cmd->t_task_lba, cmd->data_length);
 
 	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
 	if (ret != 0)
 		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
 }
 
-static int fd_do_task(struct se_task *task)
+static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+		u32 sgl_nents, enum dma_data_direction data_direction)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	int ret = 0;
 
@@ -429,10 +407,10 @@
 	 * Call vectorized fileio functions to map struct scatterlist
 	 * physical memory addresses to struct iovec virtual memory.
 	 */
-	if (task->task_data_direction == DMA_FROM_DEVICE) {
-		ret = fd_do_readv(task);
+	if (data_direction == DMA_FROM_DEVICE) {
+		ret = fd_do_readv(cmd, sgl, sgl_nents);
 	} else {
-		ret = fd_do_writev(task);
+		ret = fd_do_writev(cmd, sgl, sgl_nents);
 
 		if (ret > 0 &&
 		    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
@@ -443,7 +421,7 @@
 			 * and return some sense data to let the initiator
 			 * know the FUA WRITE cache sync failed..?
 			 */
-			fd_emulate_write_fua(cmd, task);
+			fd_emulate_write_fua(cmd);
 		}
 
 	}
@@ -452,24 +430,11 @@
 		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 		return ret;
 	}
-	if (ret) {
-		task->task_scsi_status = GOOD;
-		transport_complete_task(task, 1);
-	}
+	if (ret)
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
 	return 0;
 }
 
-/*	fd_free_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void fd_free_task(struct se_task *task)
-{
-	struct fd_request *req = FILE_REQ(task);
-
-	kfree(req);
-}
-
 enum {
 	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
 };
@@ -605,10 +570,20 @@
 static sector_t fd_get_blocks(struct se_device *dev)
 {
 	struct fd_dev *fd_dev = dev->dev_ptr;
-	unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
-			dev->se_sub_dev->se_dev_attrib.block_size);
+	struct file *f = fd_dev->fd_file;
+	struct inode *i = f->f_mapping->host;
+	unsigned long long dev_size;
+	/*
+	 * When using a file that references an underlying struct block_device,
+	 * ensure dev_size is always based on the current inode size in order
+	 * to handle underlying block_device resize operations.
+	 */
+	if (S_ISBLK(i->i_mode))
+		dev_size = (i_size_read(i) - fd_dev->fd_block_size);
+	else
+		dev_size = fd_dev->fd_dev_size;
 
-	return blocks_long;
+	return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
 }
 
 static struct se_subsystem_api fileio_template = {
@@ -622,10 +597,8 @@
 	.allocate_virtdevice	= fd_allocate_virtdevice,
 	.create_virtdevice	= fd_create_virtdevice,
 	.free_device		= fd_free_device,
-	.alloc_task		= fd_alloc_task,
-	.do_task		= fd_do_task,
+	.execute_cmd		= fd_execute_cmd,
 	.do_sync_cache		= fd_emulate_sync_cache,
-	.free_task		= fd_free_task,
 	.check_configfs_dev_params = fd_check_configfs_dev_params,
 	.set_configfs_dev_params = fd_set_configfs_dev_params,
 	.show_configfs_dev_params = fd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 59e6e73..fbd59ef 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -12,10 +12,6 @@
 #define RRF_EMULATE_CDB		0x01
 #define RRF_GOT_LBA		0x02
 
-struct fd_request {
-	struct se_task	fd_task;
-};
-
 #define FBDF_HAS_PATH		0x01
 #define FBDF_HAS_SIZE		0x02
 #define FDBD_USE_BUFFERED_IO	0x04
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 2ec299e..fd47950 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -189,26 +189,6 @@
 	kfree(ib_dev);
 }
 
-static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
-{
-	return container_of(task, struct iblock_req, ib_task);
-}
-
-static struct se_task *
-iblock_alloc_task(unsigned char *cdb)
-{
-	struct iblock_req *ib_req;
-
-	ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
-	if (!ib_req) {
-		pr_err("Unable to allocate memory for struct iblock_req\n");
-		return NULL;
-	}
-
-	atomic_set(&ib_req->pending, 1);
-	return &ib_req->ib_task;
-}
-
 static unsigned long long iblock_emulate_read_cap_with_block_size(
 	struct se_device *dev,
 	struct block_device *bd,
@@ -295,8 +275,16 @@
 	if (err)
 		pr_err("IBLOCK: cache flush failed: %d\n", err);
 
-	if (cmd)
-		transport_complete_sync_cache(cmd, err == 0);
+	if (cmd) {
+		if (err) {
+			cmd->scsi_sense_reason =
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
+		} else {
+			target_complete_cmd(cmd, SAM_STAT_GOOD);
+		}
+	}
+
 	bio_put(bio);
 }
 
@@ -304,9 +292,8 @@
  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
  * always flush the whole cache.
  */
-static void iblock_emulate_sync_cache(struct se_task *task)
+static void iblock_emulate_sync_cache(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
 	int immed = (cmd->t_task_cdb[1] & 0x2);
 	struct bio *bio;
@@ -316,7 +303,7 @@
 	 * for this SYNCHRONIZE_CACHE op.
 	 */
 	if (immed)
-		transport_complete_sync_cache(cmd, 1);
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
 
 	bio = bio_alloc(GFP_KERNEL, 0);
 	bio->bi_end_io = iblock_end_io_flush;
@@ -335,11 +322,6 @@
 	return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
 }
 
-static void iblock_free_task(struct se_task *task)
-{
-	kfree(IBLOCK_REQ(task));
-}
-
 enum {
 	Opt_udev_path, Opt_force, Opt_err
 };
@@ -448,19 +430,35 @@
 	return bl;
 }
 
+static void iblock_complete_cmd(struct se_cmd *cmd)
+{
+	struct iblock_req *ibr = cmd->priv;
+	u8 status;
+
+	if (!atomic_dec_and_test(&ibr->pending))
+		return;
+
+	if (atomic_read(&ibr->ib_bio_err_cnt))
+		status = SAM_STAT_CHECK_CONDITION;
+	else
+		status = SAM_STAT_GOOD;
+
+	target_complete_cmd(cmd, status);
+	kfree(ibr);
+}
+
 static void iblock_bio_destructor(struct bio *bio)
 {
-	struct se_task *task = bio->bi_private;
-	struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
+	struct se_cmd *cmd = bio->bi_private;
+	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
 
 	bio_free(bio, ib_dev->ibd_bio_set);
 }
 
 static struct bio *
-iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
 {
-	struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
-	struct iblock_req *ib_req = IBLOCK_REQ(task);
+	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
 	struct bio *bio;
 
 	/*
@@ -476,19 +474,11 @@
 		return NULL;
 	}
 
-	pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
-		" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
-	pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
-
 	bio->bi_bdev = ib_dev->ibd_bd;
-	bio->bi_private = task;
+	bio->bi_private = cmd;
 	bio->bi_destructor = iblock_bio_destructor;
 	bio->bi_end_io = &iblock_bio_done;
 	bio->bi_sector = lba;
-	atomic_inc(&ib_req->pending);
-
-	pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
-	pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending));
 	return bio;
 }
 
@@ -503,20 +493,21 @@
 	blk_finish_plug(&plug);
 }
 
-static int iblock_do_task(struct se_task *task)
+static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+		u32 sgl_nents, enum dma_data_direction data_direction)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
-	struct iblock_req *ibr = IBLOCK_REQ(task);
+	struct iblock_req *ibr;
 	struct bio *bio;
 	struct bio_list list;
 	struct scatterlist *sg;
-	u32 i, sg_num = task->task_sg_nents;
+	u32 sg_num = sgl_nents;
 	sector_t block_lba;
 	unsigned bio_cnt;
 	int rw;
+	int i;
 
-	if (task->task_data_direction == DMA_TO_DEVICE) {
+	if (data_direction == DMA_TO_DEVICE) {
 		/*
 		 * Force data to disk if we pretend to not have a volatile
 		 * write cache, or the initiator set the Force Unit Access bit.
@@ -532,17 +523,17 @@
 	}
 
 	/*
-	 * Do starting conversion up from non 512-byte blocksize with
-	 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
+	 * Convert the blocksize advertised to the initiator to the 512 byte
+	 * units unconditionally used by the Linux block layer.
 	 */
 	if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
-		block_lba = (task->task_lba << 3);
+		block_lba = (cmd->t_task_lba << 3);
 	else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
-		block_lba = (task->task_lba << 2);
+		block_lba = (cmd->t_task_lba << 2);
 	else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
-		block_lba = (task->task_lba << 1);
+		block_lba = (cmd->t_task_lba << 1);
 	else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
-		block_lba = task->task_lba;
+		block_lba = cmd->t_task_lba;
 	else {
 		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
 				" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
@@ -550,17 +541,22 @@
 		return -ENOSYS;
 	}
 
-	bio = iblock_get_bio(task, block_lba, sg_num);
-	if (!bio) {
-		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-		return -ENOMEM;
-	}
+	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+	if (!ibr)
+		goto fail;
+	cmd->priv = ibr;
+
+	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
+	if (!bio)
+		goto fail_free_ibr;
 
 	bio_list_init(&list);
 	bio_list_add(&list, bio);
+
+	atomic_set(&ibr->pending, 2);
 	bio_cnt = 1;
 
-	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+	for_each_sg(sgl, sg, sgl_nents, i) {
 		/*
 		 * XXX: if the length the device accepts is shorter than the
 		 *	length of the S/G list entry this will cause and
@@ -573,9 +569,11 @@
 				bio_cnt = 0;
 			}
 
-			bio = iblock_get_bio(task, block_lba, sg_num);
+			bio = iblock_get_bio(cmd, block_lba, sg_num);
 			if (!bio)
-				goto fail;
+				goto fail_put_bios;
+
+			atomic_inc(&ibr->pending);
 			bio_list_add(&list, bio);
 			bio_cnt++;
 		}
@@ -586,17 +584,16 @@
 	}
 
 	iblock_submit_bios(&list, rw);
-
-	if (atomic_dec_and_test(&ibr->pending)) {
-		transport_complete_task(task,
-				!atomic_read(&ibr->ib_bio_err_cnt));
-	}
+	iblock_complete_cmd(cmd);
 	return 0;
 
-fail:
+fail_put_bios:
 	while ((bio = bio_list_pop(&list)))
 		bio_put(bio);
+fail_free_ibr:
+	kfree(ibr);
 	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+fail:
 	return -ENOMEM;
 }
 
@@ -621,8 +618,8 @@
 
 static void iblock_bio_done(struct bio *bio, int err)
 {
-	struct se_task *task = bio->bi_private;
-	struct iblock_req *ibr = IBLOCK_REQ(task);
+	struct se_cmd *cmd = bio->bi_private;
+	struct iblock_req *ibr = cmd->priv;
 
 	/*
 	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
@@ -642,14 +639,7 @@
 
 	bio_put(bio);
 
-	if (!atomic_dec_and_test(&ibr->pending))
-		return;
-
-	pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
-		 task, bio, task->task_lba,
-		 (unsigned long long)bio->bi_sector, err);
-
-	transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
+	iblock_complete_cmd(cmd);
 }
 
 static struct se_subsystem_api iblock_template = {
@@ -663,11 +653,9 @@
 	.allocate_virtdevice	= iblock_allocate_virtdevice,
 	.create_virtdevice	= iblock_create_virtdevice,
 	.free_device		= iblock_free_device,
-	.alloc_task		= iblock_alloc_task,
-	.do_task		= iblock_do_task,
+	.execute_cmd		= iblock_execute_cmd,
 	.do_discard		= iblock_do_discard,
 	.do_sync_cache		= iblock_emulate_sync_cache,
-	.free_task		= iblock_free_task,
 	.check_configfs_dev_params = iblock_check_configfs_dev_params,
 	.set_configfs_dev_params = iblock_set_configfs_dev_params,
 	.show_configfs_dev_params = iblock_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index e929370..66cf7b9 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -7,7 +7,6 @@
 #define IBLOCK_LBA_SHIFT	9
 
 struct iblock_req {
-	struct se_task ib_task;
 	atomic_t pending;
 	atomic_t ib_bio_err_cnt;
 } ____cacheline_aligned;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 21c0563..165e824 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -5,15 +5,15 @@
 extern struct t10_alua_lu_gp *default_lu_gp;
 
 /* target_core_cdb.c */
-int	target_emulate_inquiry(struct se_task *task);
-int	target_emulate_readcapacity(struct se_task *task);
-int	target_emulate_readcapacity_16(struct se_task *task);
-int	target_emulate_modesense(struct se_task *task);
-int	target_emulate_request_sense(struct se_task *task);
-int	target_emulate_unmap(struct se_task *task);
-int	target_emulate_write_same(struct se_task *task);
-int	target_emulate_synchronize_cache(struct se_task *task);
-int	target_emulate_noop(struct se_task *task);
+int	target_emulate_inquiry(struct se_cmd *cmd);
+int	target_emulate_readcapacity(struct se_cmd *cmd);
+int	target_emulate_readcapacity_16(struct se_cmd *cmd);
+int	target_emulate_modesense(struct se_cmd *cmd);
+int	target_emulate_request_sense(struct se_cmd *cmd);
+int	target_emulate_unmap(struct se_cmd *cmd);
+int	target_emulate_write_same(struct se_cmd *cmd);
+int	target_emulate_synchronize_cache(struct se_cmd *cmd);
+int	target_emulate_noop(struct se_cmd *cmd);
 
 /* target_core_device.c */
 struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
@@ -28,7 +28,7 @@
 		struct se_lun *);
 void	core_dev_unexport(struct se_device *, struct se_portal_group *,
 		struct se_lun *);
-int	target_report_luns(struct se_task *);
+int	target_report_luns(struct se_cmd *);
 void	se_release_device_for_hba(struct se_device *);
 void	se_release_vpd_for_dev(struct se_device *);
 int	se_free_virtual_device(struct se_device *, struct se_hba *);
@@ -104,8 +104,7 @@
 u32	scsi_get_new_index(scsi_index_t);
 void	transport_subsystem_check_init(void);
 void	transport_cmd_finish_abort(struct se_cmd *, int);
-void	__transport_remove_task_from_execute_queue(struct se_task *,
-		struct se_device *);
+void	__target_remove_from_execute_list(struct se_cmd *);
 unsigned char *transport_dump_cmd_direction(struct se_cmd *);
 void	transport_dump_dev_state(struct se_device *, char *, int *);
 void	transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -114,7 +113,7 @@
 int	transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
 int	transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
 int	transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
-bool	target_stop_task(struct se_task *task, unsigned long *flags);
+bool	target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
 int	transport_clear_lun_from_sessions(struct se_lun *);
 void	transport_send_task_abort(struct se_cmd *);
 
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 86f0c3b..8556499 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -193,9 +193,8 @@
 	return 0;
 }
 
-int target_scsi2_reservation_release(struct se_task *task)
+int target_scsi2_reservation_release(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct se_session *sess = cmd->se_sess;
 	struct se_portal_group *tpg = sess->se_tpg;
@@ -220,6 +219,9 @@
 	if (dev->dev_reserved_node_acl != sess->se_node_acl)
 		goto out_unlock;
 
+	if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+		goto out_unlock;
+
 	dev->dev_reserved_node_acl = NULL;
 	dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
 	if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
@@ -234,16 +236,13 @@
 out_unlock:
 	spin_unlock(&dev->dev_reservation_lock);
 out:
-	if (!ret) {
-		task->task_scsi_status = GOOD;
-		transport_complete_task(task, 1);
-	}
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
 	return ret;
 }
 
-int target_scsi2_reservation_reserve(struct se_task *task)
+int target_scsi2_reservation_reserve(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
 	struct se_session *sess = cmd->se_sess;
 	struct se_portal_group *tpg = sess->se_tpg;
@@ -304,10 +303,8 @@
 out_unlock:
 	spin_unlock(&dev->dev_reservation_lock);
 out:
-	if (!ret) {
-		task->task_scsi_status = GOOD;
-		transport_complete_task(task, 1);
-	}
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
 	return ret;
 }
 
@@ -500,11 +497,10 @@
 	 * statement.
 	 */
 	if (!ret && !other_cdb) {
-#if 0
 		pr_debug("Allowing explict CDB: 0x%02x for %s"
 			" reservation holder\n", cdb[0],
 			core_scsi3_pr_dump_type(pr_reg_type));
-#endif
+
 		return ret;
 	}
 	/*
@@ -532,14 +528,14 @@
 			 * as we expect registered non-reservation holding
 			 * nexuses to issue CDBs.
 			 */
-#if 0
+
 			if (!registered_nexus) {
 				pr_debug("Allowing implict CDB: 0x%02x"
 					" for %s reservation on unregistered"
 					" nexus\n", cdb[0],
 					core_scsi3_pr_dump_type(pr_reg_type));
 			}
-#endif
+
 			return 0;
 		}
 	} else if ((reg_only) || (all_reg)) {
@@ -548,11 +544,11 @@
 			 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
 			 * allow commands from registered nexuses.
 			 */
-#if 0
+
 			pr_debug("Allowing implict CDB: 0x%02x for %s"
 				" reservation\n", cdb[0],
 				core_scsi3_pr_dump_type(pr_reg_type));
-#endif
+
 			return 0;
 		}
 	}
@@ -1666,12 +1662,12 @@
 			ret = -EINVAL;
 			goto out;
 		}
-#if 0
+
 		pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
 			" tid_len: %d for %s + %s\n",
 			dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
 			tpdl, tid_len, i_str, iport_ptr);
-#endif
+
 		if (tid_len > tpdl) {
 			pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
 				" %u for Transport ID: %s\n", tid_len, ptr);
@@ -1714,12 +1710,12 @@
 			ret = -EINVAL;
 			goto out;
 		}
-#if 0
+
 		pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
 			" dest_se_deve mapped_lun: %u\n",
 			dest_tpg->se_tpg_tfo->get_fabric_name(),
 			dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
-#endif
+
 		/*
 		 * Skip any TransportIDs that already have a registration for
 		 * this target port.
@@ -3473,10 +3469,10 @@
 
 	buf = transport_kmap_data_sg(cmd);
 	proto_ident = (buf[24] & 0x0f);
-#if 0
+
 	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
 			" 0x%02x\n", proto_ident);
-#endif
+
 	if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
 		pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
 			" proto_ident: 0x%02x does not match ident: 0x%02x"
@@ -3575,11 +3571,11 @@
 		ret = -EINVAL;
 		goto out;
 	}
-#if 0
+
 	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
 		" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
 		dest_node_acl->initiatorname);
-#endif
+
 	/*
 	 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
 	 * PORT IDENTIFIER.
@@ -3603,12 +3599,12 @@
 		ret = -EINVAL;
 		goto out;
 	}
-#if 0
+
 	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
 		" ACL for dest_se_deve->mapped_lun: %u\n",
 		dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
 		dest_se_deve->mapped_lun);
-#endif
+
 	/*
 	 * A persistent reservation needs to already existing in order to
 	 * successfully complete the REGISTER_AND_MOVE service action..
@@ -3799,9 +3795,8 @@
 /*
  * See spc4r17 section 6.14 Table 170
  */
-int target_scsi3_emulate_pr_out(struct se_task *task)
+int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	unsigned char *cdb = &cmd->t_task_cdb[0];
 	unsigned char *buf;
 	u64 res_key, sa_res_key;
@@ -3941,10 +3936,8 @@
 	}
 
 out:
-	if (!ret) {
-		task->task_scsi_status = GOOD;
-		transport_complete_task(task, 1);
-	}
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
 	return ret;
 }
 
@@ -4299,9 +4292,8 @@
 	return 0;
 }
 
-int target_scsi3_emulate_pr_in(struct se_task *task)
+int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	int ret;
 
 	/*
@@ -4342,10 +4334,8 @@
 		break;
 	}
 
-	if (!ret) {
-		task->task_scsi_status = GOOD;
-		transport_complete_task(task, 1);
-	}
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
 	return ret;
 }
 
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 7a233fe..af6c460 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -47,8 +47,8 @@
 
 extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
 			char *, u32);
-extern int target_scsi2_reservation_release(struct se_task *task);
-extern int target_scsi2_reservation_reserve(struct se_task *task);
+extern int target_scsi2_reservation_release(struct se_cmd *);
+extern int target_scsi2_reservation_reserve(struct se_cmd *);
 extern int core_scsi3_alloc_aptpl_registration(
 			struct t10_reservation *, u64,
 			unsigned char *, unsigned char *, u32,
@@ -61,8 +61,8 @@
 extern void core_scsi3_free_all_registrations(struct se_device *);
 extern unsigned char *core_scsi3_pr_dump_type(int);
 
-extern int target_scsi3_emulate_pr_in(struct se_task *task);
-extern int target_scsi3_emulate_pr_out(struct se_task *task);
+extern int target_scsi3_emulate_pr_in(struct se_cmd *);
+extern int target_scsi3_emulate_pr_out(struct se_cmd *);
 extern int core_setup_reservations(struct se_device *, int);
 
 #endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 94c905f..4ce2cf6 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -663,22 +663,12 @@
 	kfree(pdv);
 }
 
-static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
+static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
 {
-	return container_of(task, struct pscsi_plugin_task, pscsi_task);
-}
-
-
-/*	pscsi_transport_complete():
- *
- *
- */
-static int pscsi_transport_complete(struct se_task *task)
-{
-	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+	struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
 	struct scsi_device *sd = pdv->pdv_sd;
 	int result;
-	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_plugin_task *pt = cmd->priv;
 	unsigned char *cdb = &pt->pscsi_cdb[0];
 
 	result = pt->pscsi_result;
@@ -688,12 +678,11 @@
 	 */
 	if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
 	     (status_byte(result) << 1) == SAM_STAT_GOOD) {
-		if (!task->task_se_cmd->se_deve)
+		if (!cmd->se_deve)
 			goto after_mode_sense;
 
-		if (task->task_se_cmd->se_deve->lun_flags &
-				TRANSPORT_LUNFLAGS_READ_ONLY) {
-			unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
+		if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
+			unsigned char *buf = transport_kmap_data_sg(cmd);
 
 			if (cdb[0] == MODE_SENSE_10) {
 				if (!(buf[3] & 0x80))
@@ -703,7 +692,7 @@
 					buf[2] |= 0x80;
 			}
 
-			transport_kunmap_data_sg(task->task_se_cmd);
+			transport_kunmap_data_sg(cmd);
 		}
 	}
 after_mode_sense:
@@ -722,7 +711,6 @@
 	if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
 	      (status_byte(result) << 1) == SAM_STAT_GOOD) {
 		unsigned char *buf;
-		struct scatterlist *sg = task->task_sg;
 		u16 bdl;
 		u32 blocksize;
 
@@ -757,35 +745,6 @@
 	return 0;
 }
 
-static struct se_task *
-pscsi_alloc_task(unsigned char *cdb)
-{
-	struct pscsi_plugin_task *pt;
-
-	/*
-	 * Dynamically alloc cdb space, since it may be larger than
-	 * TCM_MAX_COMMAND_SIZE
-	 */
-	pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
-	if (!pt) {
-		pr_err("Unable to allocate struct pscsi_plugin_task\n");
-		return NULL;
-	}
-
-	return &pt->pscsi_task;
-}
-
-static void pscsi_free_task(struct se_task *task)
-{
-	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
-
-	/*
-	 * We do not release the bio(s) here associated with this task, as
-	 * this is handled by bio_put() and pscsi_bi_endio().
-	 */
-	kfree(pt);
-}
-
 enum {
 	Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
 	Opt_scsi_lun_id, Opt_err
@@ -958,26 +917,25 @@
 	return bio;
 }
 
-static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
+static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
+		u32 sgl_nents, enum dma_data_direction data_direction,
 		struct bio **hbio)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
-	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
-	u32 task_sg_num = task->task_sg_nents;
+	struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
 	struct bio *bio = NULL, *tbio = NULL;
 	struct page *page;
 	struct scatterlist *sg;
-	u32 data_len = task->task_size, i, len, bytes, off;
-	int nr_pages = (task->task_size + task_sg[0].offset +
+	u32 data_len = cmd->data_length, i, len, bytes, off;
+	int nr_pages = (cmd->data_length + sgl[0].offset +
 			PAGE_SIZE - 1) >> PAGE_SHIFT;
 	int nr_vecs = 0, rc;
-	int rw = (task->task_data_direction == DMA_TO_DEVICE);
+	int rw = (data_direction == DMA_TO_DEVICE);
 
 	*hbio = NULL;
 
 	pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
 
-	for_each_sg(task_sg, sg, task_sg_num, i) {
+	for_each_sg(sgl, sg, sgl_nents, i) {
 		page = sg_page(sg);
 		off = sg->offset;
 		len = sg->length;
@@ -1009,7 +967,7 @@
 				 * Set *hbio pointer to handle the case:
 				 * nr_pages > BIO_MAX_PAGES, where additional
 				 * bios need to be added to complete a given
-				 * struct se_task
+				 * command.
 				 */
 				if (!*hbio)
 					*hbio = tbio = bio;
@@ -1049,7 +1007,7 @@
 		}
 	}
 
-	return task->task_sg_nents;
+	return sgl_nents;
 fail:
 	while (*hbio) {
 		bio = *hbio;
@@ -1061,52 +1019,61 @@
 	return -ENOMEM;
 }
 
-static int pscsi_do_task(struct se_task *task)
+static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+		u32 sgl_nents, enum dma_data_direction data_direction)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
-	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
-	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+	struct pscsi_plugin_task *pt;
 	struct request *req;
 	struct bio *hbio;
 	int ret;
 
-	target_get_task_cdb(task, pt->pscsi_cdb);
+	/*
+	 * Dynamically alloc cdb space, since it may be larger than
+	 * TCM_MAX_COMMAND_SIZE
+	 */
+	pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
+	if (!pt) {
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -ENOMEM;
+	}
+	cmd->priv = pt;
 
-	if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+	memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
+		scsi_command_size(cmd->t_task_cdb));
+
+	if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
 		req = blk_get_request(pdv->pdv_sd->request_queue,
-				(task->task_data_direction == DMA_TO_DEVICE),
+				(data_direction == DMA_TO_DEVICE),
 				GFP_KERNEL);
 		if (!req || IS_ERR(req)) {
 			pr_err("PSCSI: blk_get_request() failed: %ld\n",
 					req ? IS_ERR(req) : -ENOMEM);
 			cmd->scsi_sense_reason =
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-			return -ENODEV;
+			goto fail;
 		}
 	} else {
-		BUG_ON(!task->task_size);
+		BUG_ON(!cmd->data_length);
 
-		/*
-		 * Setup the main struct request for the task->task_sg[] payload
-		 */
-		ret = pscsi_map_sg(task, task->task_sg, &hbio);
+		ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
 		if (ret < 0) {
 			cmd->scsi_sense_reason =
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-			return ret;
+			goto fail;
 		}
 
 		req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
 				       GFP_KERNEL);
 		if (IS_ERR(req)) {
 			pr_err("pSCSI: blk_make_request() failed\n");
-			goto fail;
+			goto fail_free_bio;
 		}
 	}
 
 	req->cmd_type = REQ_TYPE_BLOCK_PC;
 	req->end_io = pscsi_req_done;
-	req->end_io_data = task;
+	req->end_io_data = cmd;
 	req->cmd_len = scsi_command_size(pt->pscsi_cdb);
 	req->cmd = &pt->pscsi_cdb[0];
 	req->sense = &pt->pscsi_sense[0];
@@ -1118,12 +1085,12 @@
 	req->retries = PS_RETRY;
 
 	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
-			(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
+			(cmd->sam_task_attr == MSG_HEAD_TAG),
 			pscsi_req_done);
 
 	return 0;
 
-fail:
+fail_free_bio:
 	while (hbio) {
 		struct bio *bio = hbio;
 		hbio = hbio->bi_next;
@@ -1131,16 +1098,14 @@
 		bio_endio(bio, 0);	/* XXX: should be error */
 	}
 	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+fail:
+	kfree(pt);
 	return -ENOMEM;
 }
 
-/*	pscsi_get_sense_buffer():
- *
- *
- */
-static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
+static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd)
 {
-	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_plugin_task *pt = cmd->priv;
 
 	return pt->pscsi_sense;
 }
@@ -1180,48 +1145,36 @@
 	return 0;
 }
 
-/*	pscsi_handle_SAM_STATUS_failures():
- *
- *
- */
-static inline void pscsi_process_SAM_status(
-	struct se_task *task,
-	struct pscsi_plugin_task *pt)
+static void pscsi_req_done(struct request *req, int uptodate)
 {
-	task->task_scsi_status = status_byte(pt->pscsi_result);
-	if (task->task_scsi_status) {
-		task->task_scsi_status <<= 1;
-		pr_debug("PSCSI Status Byte exception at task: %p CDB:"
-			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+	struct se_cmd *cmd = req->end_io_data;
+	struct pscsi_plugin_task *pt = cmd->priv;
+
+	pt->pscsi_result = req->errors;
+	pt->pscsi_resid = req->resid_len;
+
+	cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
+	if (cmd->scsi_status) {
+		pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
 			pt->pscsi_result);
 	}
 
 	switch (host_byte(pt->pscsi_result)) {
 	case DID_OK:
-		transport_complete_task(task, (!task->task_scsi_status));
+		target_complete_cmd(cmd, cmd->scsi_status);
 		break;
 	default:
-		pr_debug("PSCSI Host Byte exception at task: %p CDB:"
-			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+		pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
 			pt->pscsi_result);
-		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-		task->task_se_cmd->scsi_sense_reason =
-					TCM_UNSUPPORTED_SCSI_OPCODE;
-		transport_complete_task(task, 0);
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
 		break;
 	}
-}
 
-static void pscsi_req_done(struct request *req, int uptodate)
-{
-	struct se_task *task = req->end_io_data;
-	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
-
-	pt->pscsi_result = req->errors;
-	pt->pscsi_resid = req->resid_len;
-
-	pscsi_process_SAM_status(task, pt);
 	__blk_put_request(req->q, req);
+	kfree(pt);
 }
 
 static struct se_subsystem_api pscsi_template = {
@@ -1235,9 +1188,7 @@
 	.create_virtdevice	= pscsi_create_virtdevice,
 	.free_device		= pscsi_free_device,
 	.transport_complete	= pscsi_transport_complete,
-	.alloc_task		= pscsi_alloc_task,
-	.do_task		= pscsi_do_task,
-	.free_task		= pscsi_free_task,
+	.execute_cmd		= pscsi_execute_cmd,
 	.check_configfs_dev_params = pscsi_check_configfs_dev_params,
 	.set_configfs_dev_params = pscsi_set_configfs_dev_params,
 	.show_configfs_dev_params = pscsi_show_configfs_dev_params,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 43f1c41..bc1e5e11 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -22,7 +22,6 @@
 #include <linux/kobject.h>
 
 struct pscsi_plugin_task {
-	struct se_task pscsi_task;
 	unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
 	int	pscsi_direction;
 	int	pscsi_result;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 8b68f7b..d0ceb87 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -64,9 +64,6 @@
 	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
 		" Generic Target Core Stack %s\n", hba->hba_id,
 		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
-	pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
-		" MaxSectors: %u\n", hba->hba_id,
-		rd_host->rd_host_id, RD_MAX_SECTORS);
 
 	return 0;
 }
@@ -199,10 +196,7 @@
 	return 0;
 }
 
-static void *rd_allocate_virtdevice(
-	struct se_hba *hba,
-	const char *name,
-	int rd_direct)
+static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
 {
 	struct rd_dev *rd_dev;
 	struct rd_host *rd_host = hba->hba_ptr;
@@ -214,25 +208,12 @@
 	}
 
 	rd_dev->rd_host = rd_host;
-	rd_dev->rd_direct = rd_direct;
 
 	return rd_dev;
 }
 
-static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
-{
-	return rd_allocate_virtdevice(hba, name, 0);
-}
-
-/*	rd_create_virtdevice():
- *
- *
- */
-static struct se_device *rd_create_virtdevice(
-	struct se_hba *hba,
-	struct se_subsystem_dev *se_dev,
-	void *p,
-	int rd_direct)
+static struct se_device *rd_create_virtdevice(struct se_hba *hba,
+		struct se_subsystem_dev *se_dev, void *p)
 {
 	struct se_device *dev;
 	struct se_dev_limits dev_limits;
@@ -247,13 +228,12 @@
 	if (ret < 0)
 		goto fail;
 
-	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
-	snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
-						RD_MCP_VERSION);
+	snprintf(prod, 16, "RAMDISK-MCP");
+	snprintf(rev, 4, "%s", RD_MCP_VERSION);
 
 	dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
-	dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
-	dev_limits.limits.max_sectors = RD_MAX_SECTORS;
+	dev_limits.limits.max_hw_sectors = UINT_MAX;
+	dev_limits.limits.max_sectors = UINT_MAX;
 	dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
 	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
 
@@ -264,12 +244,10 @@
 		goto fail;
 
 	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
-	rd_dev->rd_queue_depth = dev->queue_depth;
 
-	pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
 		" %u pages in %u tables, %lu total bytes\n",
-		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
-		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
 		rd_dev->sg_table_count,
 		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
 
@@ -280,18 +258,6 @@
 	return ERR_PTR(ret);
 }
 
-static struct se_device *rd_MEMCPY_create_virtdevice(
-	struct se_hba *hba,
-	struct se_subsystem_dev *se_dev,
-	void *p)
-{
-	return rd_create_virtdevice(hba, se_dev, p, 0);
-}
-
-/*	rd_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
 static void rd_free_device(void *p)
 {
 	struct rd_dev *rd_dev = p;
@@ -300,29 +266,6 @@
 	kfree(rd_dev);
 }
 
-static inline struct rd_request *RD_REQ(struct se_task *task)
-{
-	return container_of(task, struct rd_request, rd_task);
-}
-
-static struct se_task *
-rd_alloc_task(unsigned char *cdb)
-{
-	struct rd_request *rd_req;
-
-	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
-	if (!rd_req) {
-		pr_err("Unable to allocate struct rd_request\n");
-		return NULL;
-	}
-
-	return &rd_req->rd_task;
-}
-
-/*	rd_get_sg_table():
- *
- *
- */
 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
 {
 	u32 i;
@@ -341,31 +284,41 @@
 	return NULL;
 }
 
-static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
+static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+		u32 sgl_nents, enum dma_data_direction data_direction)
 {
-	struct se_task *task = &req->rd_task;
-	struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
+	struct se_device *se_dev = cmd->se_dev;
+	struct rd_dev *dev = se_dev->dev_ptr;
 	struct rd_dev_sg_table *table;
 	struct scatterlist *rd_sg;
 	struct sg_mapping_iter m;
-	u32 rd_offset = req->rd_offset;
+	u32 rd_offset;
+	u32 rd_size;
+	u32 rd_page;
 	u32 src_len;
+	u64 tmp;
 
-	table = rd_get_sg_table(dev, req->rd_page);
+	tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
+	rd_offset = do_div(tmp, PAGE_SIZE);
+	rd_page = tmp;
+	rd_size = cmd->data_length;
+
+	table = rd_get_sg_table(dev, rd_page);
 	if (!table)
 		return -EINVAL;
 
-	rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
+	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
 
 	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
-			dev->rd_dev_id, read_rd ? "Read" : "Write",
-			task->task_lba, req->rd_size, req->rd_page,
-			rd_offset);
+			dev->rd_dev_id,
+			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
+			cmd->t_task_lba, rd_size, rd_page, rd_offset);
 
 	src_len = PAGE_SIZE - rd_offset;
-	sg_miter_start(&m, task->task_sg, task->task_sg_nents,
-			read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
-	while (req->rd_size) {
+	sg_miter_start(&m, sgl, sgl_nents,
+			data_direction == DMA_FROM_DEVICE ?
+				SG_MITER_TO_SG : SG_MITER_FROM_SG);
+	while (rd_size) {
 		u32 len;
 		void *rd_addr;
 
@@ -375,13 +328,13 @@
 
 		rd_addr = sg_virt(rd_sg) + rd_offset;
 
-		if (read_rd)
+		if (data_direction == DMA_FROM_DEVICE)
 			memcpy(m.addr, rd_addr, len);
 		else
 			memcpy(rd_addr, m.addr, len);
 
-		req->rd_size -= len;
-		if (!req->rd_size)
+		rd_size -= len;
+		if (!rd_size)
 			continue;
 
 		src_len -= len;
@@ -391,15 +344,15 @@
 		}
 
 		/* rd page completed, next one please */
-		req->rd_page++;
+		rd_page++;
 		rd_offset = 0;
 		src_len = PAGE_SIZE;
-		if (req->rd_page <= table->page_end_offset) {
+		if (rd_page <= table->page_end_offset) {
 			rd_sg++;
 			continue;
 		}
 
-		table = rd_get_sg_table(dev, req->rd_page);
+		table = rd_get_sg_table(dev, rd_page);
 		if (!table) {
 			sg_miter_stop(&m);
 			return -EINVAL;
@@ -409,43 +362,11 @@
 		rd_sg = table->sg_table;
 	}
 	sg_miter_stop(&m);
+
+	target_complete_cmd(cmd, SAM_STAT_GOOD);
 	return 0;
 }
 
-/*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static int rd_MEMCPY_do_task(struct se_task *task)
-{
-	struct se_device *dev = task->task_se_cmd->se_dev;
-	struct rd_request *req = RD_REQ(task);
-	u64 tmp;
-	int ret;
-
-	tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
-	req->rd_offset = do_div(tmp, PAGE_SIZE);
-	req->rd_page = tmp;
-	req->rd_size = task->task_size;
-
-	ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
-	if (ret != 0)
-		return ret;
-
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
-	return 0;
-}
-
-/*	rd_free_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void rd_free_task(struct se_task *task)
-{
-	kfree(RD_REQ(task));
-}
-
 enum {
 	Opt_rd_pages, Opt_err
 };
@@ -512,9 +433,8 @@
 	char *b)
 {
 	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
-	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
-			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
-			"rd_direct" : "rd_mcp");
+	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
+			rd_dev->rd_dev_id);
 	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
 			"  SG_table_count: %u\n", rd_dev->rd_page_count,
 			PAGE_SIZE, rd_dev->sg_table_count);
@@ -545,12 +465,10 @@
 	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
 	.attach_hba		= rd_attach_hba,
 	.detach_hba		= rd_detach_hba,
-	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
-	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
+	.allocate_virtdevice	= rd_allocate_virtdevice,
+	.create_virtdevice	= rd_create_virtdevice,
 	.free_device		= rd_free_device,
-	.alloc_task		= rd_alloc_task,
-	.do_task		= rd_MEMCPY_do_task,
-	.free_task		= rd_free_task,
+	.execute_cmd		= rd_execute_cmd,
 	.check_configfs_dev_params = rd_check_configfs_dev_params,
 	.set_configfs_dev_params = rd_set_configfs_dev_params,
 	.show_configfs_dev_params = rd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 784e56a..2145812 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -2,7 +2,6 @@
 #define TARGET_CORE_RD_H
 
 #define RD_HBA_VERSION		"v4.0"
-#define RD_DR_VERSION		"4.0"
 #define RD_MCP_VERSION		"4.0"
 
 /* Largest piece of memory kmalloc can allocate */
@@ -10,28 +9,11 @@
 #define RD_DEVICE_QUEUE_DEPTH	32
 #define RD_MAX_DEVICE_QUEUE_DEPTH 128
 #define RD_BLOCKSIZE		512
-#define RD_MAX_SECTORS		1024
 
 /* Used in target_core_init_configfs() for virtual LUN 0 access */
 int __init rd_module_init(void);
 void rd_module_exit(void);
 
-#define RRF_EMULATE_CDB		0x01
-#define RRF_GOT_LBA		0x02
-
-struct rd_request {
-	struct se_task	rd_task;
-
-	/* Offset from start of page */
-	u32		rd_offset;
-	/* Starting page in Ramdisk for request */
-	u32		rd_page;
-	/* Total number of pages needed for request */
-	u32		rd_page_count;
-	/* Scatterlist count */
-	u32		rd_size;
-} ____cacheline_aligned;
-
 struct rd_dev_sg_table {
 	u32		page_start_offset;
 	u32		page_end_offset;
@@ -42,7 +24,6 @@
 #define RDF_HAS_PAGE_COUNT	0x01
 
 struct rd_dev {
-	int		rd_direct;
 	u32		rd_flags;
 	/* Unique Ramdisk Device ID in Ramdisk HBA */
 	u32		rd_dev_id;
@@ -50,7 +31,6 @@
 	u32		rd_page_count;
 	/* Number of SG tables in sg_table_array */
 	u32		sg_table_count;
-	u32		rd_queue_depth;
 	/* Array of rd_dev_sg_table_t containing scatterlists */
 	struct rd_dev_sg_table *sg_table_array;
 	/* Ramdisk HBA device is connected to */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index f015839..84caf1b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -244,7 +244,7 @@
 	}
 }
 
-static void core_tmr_drain_task_list(
+static void core_tmr_drain_state_list(
 	struct se_device *dev,
 	struct se_cmd *prout_cmd,
 	struct se_node_acl *tmr_nacl,
@@ -252,12 +252,13 @@
 	struct list_head *preempt_and_abort_list)
 {
 	LIST_HEAD(drain_task_list);
-	struct se_cmd *cmd;
-	struct se_task *task, *task_tmp;
+	struct se_cmd *cmd, *next;
 	unsigned long flags;
 	int fe_count;
+
 	/*
-	 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
+	 * Complete outstanding commands with TASK_ABORTED SAM status.
+	 *
 	 * This is following sam4r17, section 5.6 Aborting commands, Table 38
 	 * for TMR LUN_RESET:
 	 *
@@ -278,56 +279,43 @@
 	 * in the Control Mode Page.
 	 */
 	spin_lock_irqsave(&dev->execute_task_lock, flags);
-	list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
-				t_state_list) {
-		if (!task->task_se_cmd) {
-			pr_err("task->task_se_cmd is NULL!\n");
-			continue;
-		}
-		cmd = task->task_se_cmd;
-
+	list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
 		/*
 		 * For PREEMPT_AND_ABORT usage, only process commands
 		 * with a matching reservation key.
 		 */
 		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 			continue;
+
 		/*
 		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
 		 */
 		if (prout_cmd == cmd)
 			continue;
 
-		list_move_tail(&task->t_state_list, &drain_task_list);
-		task->t_state_active = false;
-		/*
-		 * Remove from task execute list before processing drain_task_list
-		 */
-		if (!list_empty(&task->t_execute_list))
-			__transport_remove_task_from_execute_queue(task, dev);
+		list_move_tail(&cmd->state_list, &drain_task_list);
+		cmd->state_active = false;
+
+		if (!list_empty(&cmd->execute_list))
+			__target_remove_from_execute_list(cmd);
 	}
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
 	while (!list_empty(&drain_task_list)) {
-		task = list_entry(drain_task_list.next, struct se_task, t_state_list);
-		list_del(&task->t_state_list);
-		cmd = task->task_se_cmd;
+		cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
+		list_del(&cmd->state_list);
 
-		pr_debug("LUN_RESET: %s cmd: %p task: %p"
+		pr_debug("LUN_RESET: %s cmd: %p"
 			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
 			"cdb: 0x%02x\n",
-			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
+			(preempt_and_abort_list) ? "Preempt" : "", cmd,
 			cmd->se_tfo->get_task_tag(cmd), 0,
 			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
 			cmd->t_task_cdb[0]);
 		pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
-			" t_task_cdbs: %d t_task_cdbs_left: %d"
-			" t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d"
+			" -- CMD_T_ACTIVE: %d"
 			" CMD_T_STOP: %d CMD_T_SENT: %d\n",
 			cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
-			cmd->t_task_list_num,
-			atomic_read(&cmd->t_task_cdbs_left),
-			atomic_read(&cmd->t_task_cdbs_sent),
 			(cmd->transport_state & CMD_T_ACTIVE) != 0,
 			(cmd->transport_state & CMD_T_STOP) != 0,
 			(cmd->transport_state & CMD_T_SENT) != 0);
@@ -343,20 +331,13 @@
 			cancel_work_sync(&cmd->work);
 
 		spin_lock_irqsave(&cmd->t_state_lock, flags);
-		target_stop_task(task, &flags);
+		target_stop_cmd(cmd, &flags);
 
-		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
-			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-			pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
-				" t_task_cdbs_ex_left: %d\n", task, dev,
-				atomic_read(&cmd->t_task_cdbs_ex_left));
-			continue;
-		}
 		fe_count = atomic_read(&cmd->t_fe_count);
 
 		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
 			pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
-				" task: %p, t_fe_count: %d dev: %p\n", task,
+				" cdb: %p, t_fe_count: %d dev: %p\n", cmd,
 				fe_count, dev);
 			cmd->transport_state |= CMD_T_ABORTED;
 			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -364,8 +345,8 @@
 			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
 			continue;
 		}
-		pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p,"
-			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
+		pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
+			" t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
 		cmd->transport_state |= CMD_T_ABORTED;
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
@@ -384,13 +365,11 @@
 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
 	struct se_cmd *cmd, *tcmd;
 	unsigned long flags;
+
 	/*
-	 * Release all commands remaining in the struct se_device cmd queue.
+	 * Release all commands remaining in the per-device command queue.
 	 *
-	 * This follows the same logic as above for the struct se_device
-	 * struct se_task state list, where commands are returned with
-	 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
-	 * reference, otherwise the struct se_cmd is released.
+	 * This follows the same logic as above for the state list.
 	 */
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
 	list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
@@ -466,7 +445,7 @@
 		dev->transport->name, tas);
 
 	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
-	core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
+	core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
 				preempt_and_abort_list);
 	core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
 				preempt_and_abort_list);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 70c3ffb..8bd58e2 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -60,7 +60,6 @@
 	int i;
 	struct se_dev_entry *deve;
 	struct se_lun *lun;
-	struct se_lun_acl *acl, *acl_tmp;
 
 	spin_lock_irq(&nacl->device_list_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -81,28 +80,7 @@
 		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
 			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
 
-		spin_lock(&lun->lun_acl_lock);
-		list_for_each_entry_safe(acl, acl_tmp,
-					&lun->lun_acl_list, lacl_list) {
-			if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
-			    (acl->mapped_lun == deve->mapped_lun))
-				break;
-		}
-
-		if (!acl) {
-			pr_err("Unable to locate struct se_lun_acl for %s,"
-				" mapped_lun: %u\n", nacl->initiatorname,
-				deve->mapped_lun);
-			spin_unlock(&lun->lun_acl_lock);
-			spin_lock_irq(&nacl->device_list_lock);
-			continue;
-		}
-
-		list_del(&acl->lacl_list);
-		spin_unlock(&lun->lun_acl_lock);
-
 		spin_lock_irq(&nacl->device_list_lock);
-		kfree(acl);
 	}
 	spin_unlock_irq(&nacl->device_list_lock);
 }
@@ -175,10 +153,7 @@
 		 * demo_mode_write_protect is ON, or READ_ONLY;
 		 */
 		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
-			if (dev->dev_flags & DF_READ_ONLY)
-				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
-			else
-				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
 		} else {
 			/*
 			 * Allow only optical drives to issue R/W in default RO
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 443704f..b05fdc0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -72,7 +72,6 @@
 static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
 		struct se_device *dev);
-static void transport_free_dev_tasks(struct se_cmd *cmd);
 static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
@@ -331,9 +330,9 @@
 }
 EXPORT_SYMBOL(target_get_session);
 
-int target_put_session(struct se_session *se_sess)
+void target_put_session(struct se_session *se_sess)
 {
-	return kref_put(&se_sess->sess_kref, target_release_session);
+	kref_put(&se_sess->sess_kref, target_release_session);
 }
 EXPORT_SYMBOL(target_put_session);
 
@@ -444,31 +443,23 @@
 /*
  * Called with cmd->t_state_lock held.
  */
-static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
+static void target_remove_from_state_list(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	struct se_task *task;
 	unsigned long flags;
 
 	if (!dev)
 		return;
 
-	list_for_each_entry(task, &cmd->t_task_list, t_list) {
-		if (task->task_flags & TF_ACTIVE)
-			continue;
+	if (cmd->transport_state & CMD_T_BUSY)
+		return;
 
-		spin_lock_irqsave(&dev->execute_task_lock, flags);
-		if (task->t_state_active) {
-			pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
-				cmd->se_tfo->get_task_tag(cmd), dev, task);
-
-			list_del(&task->t_state_list);
-			atomic_dec(&cmd->t_task_cdbs_ex_left);
-			task->t_state_active = false;
-		}
-		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	if (cmd->state_active) {
+		list_del(&cmd->state_list);
+		cmd->state_active = false;
 	}
-
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
 /*	transport_cmd_check_stop():
@@ -497,7 +488,7 @@
 
 		cmd->transport_state &= ~CMD_T_ACTIVE;
 		if (transport_off == 2)
-			transport_all_task_dev_remove_state(cmd);
+			target_remove_from_state_list(cmd);
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
 		complete(&cmd->transport_lun_stop_comp);
@@ -513,7 +504,7 @@
 			cmd->se_tfo->get_task_tag(cmd));
 
 		if (transport_off == 2)
-			transport_all_task_dev_remove_state(cmd);
+			target_remove_from_state_list(cmd);
 
 		/*
 		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
@@ -529,7 +520,7 @@
 	if (transport_off) {
 		cmd->transport_state &= ~CMD_T_ACTIVE;
 		if (transport_off == 2) {
-			transport_all_task_dev_remove_state(cmd);
+			target_remove_from_state_list(cmd);
 			/*
 			 * Clear struct se_cmd->se_lun before the transport_off == 2
 			 * handoff to fabric module.
@@ -577,7 +568,7 @@
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
-		transport_all_task_dev_remove_state(cmd);
+		target_remove_from_state_list(cmd);
 	}
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
@@ -669,29 +660,6 @@
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 }
 
-/*
- * Completion function used by TCM subsystem plugins (such as FILEIO)
- * for queueing up response from struct se_subsystem_api->do_task()
- */
-void transport_complete_sync_cache(struct se_cmd *cmd, int good)
-{
-	struct se_task *task = list_entry(cmd->t_task_list.next,
-				struct se_task, t_list);
-
-	if (good) {
-		cmd->scsi_status = SAM_STAT_GOOD;
-		task->task_scsi_status = GOOD;
-	} else {
-		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-		task->task_se_cmd->scsi_sense_reason =
-				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-	}
-
-	transport_complete_task(task, good);
-}
-EXPORT_SYMBOL(transport_complete_sync_cache);
-
 static void target_complete_failure_work(struct work_struct *work)
 {
 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -699,40 +667,32 @@
 	transport_generic_request_failure(cmd);
 }
 
-/*	transport_complete_task():
- *
- *	Called from interrupt and non interrupt context depending
- *	on the transport plugin.
- */
-void transport_complete_task(struct se_task *task, int success)
+void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	struct se_device *dev = cmd->se_dev;
+	int success = scsi_status == GOOD;
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	task->task_flags &= ~TF_ACTIVE;
+	cmd->scsi_status = scsi_status;
 
-	/*
-	 * See if any sense data exists, if so set the TASK_SENSE flag.
-	 * Also check for any other post completion work that needs to be
-	 * done by the plugins.
-	 */
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	cmd->transport_state &= ~CMD_T_BUSY;
+
 	if (dev && dev->transport->transport_complete) {
-		if (dev->transport->transport_complete(task) != 0) {
+		if (dev->transport->transport_complete(cmd,
+				cmd->t_data_sg) != 0) {
 			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
-			task->task_flags |= TF_HAS_SENSE;
 			success = 1;
 		}
 	}
 
 	/*
-	 * See if we are waiting for outstanding struct se_task
-	 * to complete for an exception condition
+	 * See if we are waiting to complete for an exception condition.
 	 */
-	if (task->task_flags & TF_REQUEST_STOP) {
+	if (cmd->transport_state & CMD_T_REQUEST_STOP) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		complete(&task->task_stop_comp);
+		complete(&cmd->task_stop_comp);
 		return;
 	}
 
@@ -740,15 +700,6 @@
 		cmd->transport_state |= CMD_T_FAILED;
 
 	/*
-	 * Decrement the outstanding t_task_cdbs_left count.  The last
-	 * struct se_task from struct se_cmd will complete itself into the
-	 * device queue depending upon int success.
-	 */
-	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		return;
-	}
-	/*
 	 * Check for case where an explict ABORT_TASK has been received
 	 * and transport_wait_for_tasks() will be waiting for completion..
 	 */
@@ -770,157 +721,77 @@
 
 	queue_work(target_completion_wq, &cmd->work);
 }
-EXPORT_SYMBOL(transport_complete_task);
+EXPORT_SYMBOL(target_complete_cmd);
 
-/*
- * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
- * struct se_task list are ready to be added to the active execution list
- * struct se_device
-
- * Called with se_dev_t->execute_task_lock called.
- */
-static inline int transport_add_task_check_sam_attr(
-	struct se_task *task,
-	struct se_task *task_prev,
-	struct se_device *dev)
-{
-	/*
-	 * No SAM Task attribute emulation enabled, add to tail of
-	 * execution queue
-	 */
-	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
-		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
-		return 0;
-	}
-	/*
-	 * HEAD_OF_QUEUE attribute for received CDB, which means
-	 * the first task that is associated with a struct se_cmd goes to
-	 * head of the struct se_device->execute_task_list, and task_prev
-	 * after that for each subsequent task
-	 */
-	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
-		list_add(&task->t_execute_list,
-				(task_prev != NULL) ?
-				&task_prev->t_execute_list :
-				&dev->execute_task_list);
-
-		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
-				" in execution queue\n",
-				task->task_se_cmd->t_task_cdb[0]);
-		return 1;
-	}
-	/*
-	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
-	 * transitioned from Dermant -> Active state, and are added to the end
-	 * of the struct se_device->execute_task_list
-	 */
-	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
-	return 0;
-}
-
-/*	__transport_add_task_to_execute_queue():
- *
- *	Called with se_dev_t->execute_task_lock called.
- */
-static void __transport_add_task_to_execute_queue(
-	struct se_task *task,
-	struct se_task *task_prev,
-	struct se_device *dev)
-{
-	int head_of_queue;
-
-	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
-	atomic_inc(&dev->execute_tasks);
-
-	if (task->t_state_active)
-		return;
-	/*
-	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
-	 * state list as well.  Running with SAM Task Attribute emulation
-	 * will always return head_of_queue == 0 here
-	 */
-	if (head_of_queue)
-		list_add(&task->t_state_list, (task_prev) ?
-				&task_prev->t_state_list :
-				&dev->state_task_list);
-	else
-		list_add_tail(&task->t_state_list, &dev->state_task_list);
-
-	task->t_state_active = true;
-
-	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
-		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
-		task, dev);
-}
-
-static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
+static void target_add_to_state_list(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	struct se_task *task;
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	list_for_each_entry(task, &cmd->t_task_list, t_list) {
-		spin_lock(&dev->execute_task_lock);
-		if (!task->t_state_active) {
-			list_add_tail(&task->t_state_list,
-				      &dev->state_task_list);
-			task->t_state_active = true;
-
-			pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
-				task->task_se_cmd->se_tfo->get_task_tag(
-				task->task_se_cmd), task, dev);
-		}
-		spin_unlock(&dev->execute_task_lock);
-	}
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
-{
-	struct se_device *dev = cmd->se_dev;
-	struct se_task *task, *task_prev = NULL;
-
-	list_for_each_entry(task, &cmd->t_task_list, t_list) {
-		if (!list_empty(&task->t_execute_list))
-			continue;
-		/*
-		 * __transport_add_task_to_execute_queue() handles the
-		 * SAM Task Attribute emulation if enabled
-		 */
-		__transport_add_task_to_execute_queue(task, task_prev, dev);
-		task_prev = task;
-	}
-}
-
-static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
-{
-	unsigned long flags;
-	struct se_device *dev = cmd->se_dev;
-
 	spin_lock_irqsave(&dev->execute_task_lock, flags);
-	__transport_add_tasks_from_cmd(cmd);
+	if (!cmd->state_active) {
+		list_add_tail(&cmd->state_list, &dev->state_list);
+		cmd->state_active = true;
+	}
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
-void __transport_remove_task_from_execute_queue(struct se_task *task,
-		struct se_device *dev)
+static void __target_add_to_execute_list(struct se_cmd *cmd)
 {
-	list_del_init(&task->t_execute_list);
-	atomic_dec(&dev->execute_tasks);
+	struct se_device *dev = cmd->se_dev;
+	bool head_of_queue = false;
+
+	if (!list_empty(&cmd->execute_list))
+		return;
+
+	if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
+	    cmd->sam_task_attr == MSG_HEAD_TAG)
+		head_of_queue = true;
+
+	if (head_of_queue)
+		list_add(&cmd->execute_list, &dev->execute_list);
+	else
+		list_add_tail(&cmd->execute_list, &dev->execute_list);
+
+	atomic_inc(&dev->execute_tasks);
+
+	if (cmd->state_active)
+		return;
+
+	if (head_of_queue)
+		list_add(&cmd->state_list, &dev->state_list);
+	else
+		list_add_tail(&cmd->state_list, &dev->state_list);
+
+	cmd->state_active = true;
 }
 
-static void transport_remove_task_from_execute_queue(
-	struct se_task *task,
-	struct se_device *dev)
+static void target_add_to_execute_list(struct se_cmd *cmd)
 {
 	unsigned long flags;
+	struct se_device *dev = cmd->se_dev;
 
-	if (WARN_ON(list_empty(&task->t_execute_list)))
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	__target_add_to_execute_list(cmd);
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+void __target_remove_from_execute_list(struct se_cmd *cmd)
+{
+	list_del_init(&cmd->execute_list);
+	atomic_dec(&cmd->se_dev->execute_tasks);
+}
+
+static void target_remove_from_execute_list(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned long flags;
+
+	if (WARN_ON(list_empty(&cmd->execute_list)))
 		return;
 
 	spin_lock_irqsave(&dev->execute_task_lock, flags);
-	__transport_remove_task_from_execute_queue(task, dev);
+	__target_remove_from_execute_list(cmd);
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
@@ -999,8 +870,9 @@
 
 	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
 		atomic_read(&dev->execute_tasks), dev->queue_depth);
-	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
-		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
+	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
+		dev->se_sub_dev->se_dev_attrib.block_size,
+		dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
 	*bl += sprintf(b + *bl, "        ");
 }
 
@@ -1344,9 +1216,9 @@
 	INIT_LIST_HEAD(&dev->dev_list);
 	INIT_LIST_HEAD(&dev->dev_sep_list);
 	INIT_LIST_HEAD(&dev->dev_tmr_list);
-	INIT_LIST_HEAD(&dev->execute_task_list);
+	INIT_LIST_HEAD(&dev->execute_list);
 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
-	INIT_LIST_HEAD(&dev->state_task_list);
+	INIT_LIST_HEAD(&dev->state_list);
 	INIT_LIST_HEAD(&dev->qf_cmd_list);
 	spin_lock_init(&dev->execute_task_lock);
 	spin_lock_init(&dev->delayed_cmd_lock);
@@ -1457,6 +1329,7 @@
 	case VERIFY_16: /* SBC - VRProtect */
 	case WRITE_VERIFY: /* SBC - VRProtect */
 	case WRITE_VERIFY_12: /* SBC - VRProtect */
+	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
 		break;
 	default:
 		cdb[1] &= 0x1f; /* clear logical unit number */
@@ -1464,29 +1337,6 @@
 	}
 }
 
-static struct se_task *
-transport_generic_get_task(struct se_cmd *cmd,
-		enum dma_data_direction data_direction)
-{
-	struct se_task *task;
-	struct se_device *dev = cmd->se_dev;
-
-	task = dev->transport->alloc_task(cmd->t_task_cdb);
-	if (!task) {
-		pr_err("Unable to allocate struct se_task\n");
-		return NULL;
-	}
-
-	INIT_LIST_HEAD(&task->t_list);
-	INIT_LIST_HEAD(&task->t_execute_list);
-	INIT_LIST_HEAD(&task->t_state_list);
-	init_completion(&task->task_stop_comp);
-	task->task_se_cmd = cmd;
-	task->task_data_direction = data_direction;
-
-	return task;
-}
-
 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
 
 /*
@@ -1507,11 +1357,13 @@
 	INIT_LIST_HEAD(&cmd->se_qf_node);
 	INIT_LIST_HEAD(&cmd->se_queue_node);
 	INIT_LIST_HEAD(&cmd->se_cmd_list);
-	INIT_LIST_HEAD(&cmd->t_task_list);
+	INIT_LIST_HEAD(&cmd->execute_list);
+	INIT_LIST_HEAD(&cmd->state_list);
 	init_completion(&cmd->transport_lun_fe_stop_comp);
 	init_completion(&cmd->transport_lun_stop_comp);
 	init_completion(&cmd->t_transport_stop_comp);
 	init_completion(&cmd->cmd_wait_comp);
+	init_completion(&cmd->task_stop_comp);
 	spin_lock_init(&cmd->t_state_lock);
 	cmd->transport_state = CMD_T_DEV_ACTIVE;
 
@@ -1521,6 +1373,8 @@
 	cmd->data_direction = data_direction;
 	cmd->sam_task_attr = task_attr;
 	cmd->sense_buffer = sense_buffer;
+
+	cmd->state_active = false;
 }
 EXPORT_SYMBOL(transport_init_se_cmd);
 
@@ -1550,11 +1404,11 @@
 	return 0;
 }
 
-/*	transport_generic_allocate_tasks():
+/*	target_setup_cmd_from_cdb():
  *
  *	Called from fabric RX Thread.
  */
-int transport_generic_allocate_tasks(
+int target_setup_cmd_from_cdb(
 	struct se_cmd *cmd,
 	unsigned char *cdb)
 {
@@ -1620,7 +1474,7 @@
 	spin_unlock(&cmd->se_lun->lun_sep_lock);
 	return 0;
 }
-EXPORT_SYMBOL(transport_generic_allocate_tasks);
+EXPORT_SYMBOL(target_setup_cmd_from_cdb);
 
 /*
  * Used by fabric module frontends to queue tasks directly.
@@ -1701,6 +1555,8 @@
 	 */
 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
 				data_length, data_dir, task_attr, sense);
+	if (flags & TARGET_SCF_UNKNOWN_SIZE)
+		se_cmd->unknown_data_length = 1;
 	/*
 	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
 	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
@@ -1726,11 +1582,18 @@
 	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
 	 * allocate the necessary tasks to complete the received CDB+data
 	 */
-	rc = transport_generic_allocate_tasks(se_cmd, cdb);
+	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
 	if (rc != 0) {
 		transport_generic_request_failure(se_cmd);
 		return;
 	}
+
+	/*
+	 * Check if we need to delay processing because of ALUA
+	 * Active/NonOptimized primary access state..
+	 */
+	core_alua_check_nonop_delay(se_cmd);
+
 	/*
 	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
 	 * for immediate execution of READs, otherwise wait for
@@ -1872,72 +1735,30 @@
 EXPORT_SYMBOL(transport_generic_handle_tmr);
 
 /*
- * If the task is active, request it to be stopped and sleep until it
+ * If the cmd is active, request it to be stopped and sleep until it
  * has completed.
  */
-bool target_stop_task(struct se_task *task, unsigned long *flags)
+bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
 {
-	struct se_cmd *cmd = task->task_se_cmd;
 	bool was_active = false;
 
-	if (task->task_flags & TF_ACTIVE) {
-		task->task_flags |= TF_REQUEST_STOP;
+	if (cmd->transport_state & CMD_T_BUSY) {
+		cmd->transport_state |= CMD_T_REQUEST_STOP;
 		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
 
-		pr_debug("Task %p waiting to complete\n", task);
-		wait_for_completion(&task->task_stop_comp);
-		pr_debug("Task %p stopped successfully\n", task);
+		pr_debug("cmd %p waiting to complete\n", cmd);
+		wait_for_completion(&cmd->task_stop_comp);
+		pr_debug("cmd %p stopped successfully\n", cmd);
 
 		spin_lock_irqsave(&cmd->t_state_lock, *flags);
-		atomic_dec(&cmd->t_task_cdbs_left);
-		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
+		cmd->transport_state &= ~CMD_T_REQUEST_STOP;
+		cmd->transport_state &= ~CMD_T_BUSY;
 		was_active = true;
 	}
 
 	return was_active;
 }
 
-static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
-{
-	struct se_task *task, *task_tmp;
-	unsigned long flags;
-	int ret = 0;
-
-	pr_debug("ITT[0x%08x] - Stopping tasks\n",
-		cmd->se_tfo->get_task_tag(cmd));
-
-	/*
-	 * No tasks remain in the execution queue
-	 */
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task_list, t_list) {
-		pr_debug("Processing task %p\n", task);
-		/*
-		 * If the struct se_task has not been sent and is not active,
-		 * remove the struct se_task from the execution queue.
-		 */
-		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
-			spin_unlock_irqrestore(&cmd->t_state_lock,
-					flags);
-			transport_remove_task_from_execute_queue(task,
-					cmd->se_dev);
-
-			pr_debug("Task %p removed from execute queue\n", task);
-			spin_lock_irqsave(&cmd->t_state_lock, flags);
-			continue;
-		}
-
-		if (!target_stop_task(task, &flags)) {
-			pr_debug("Task %p - did nothing\n", task);
-			ret++;
-		}
-	}
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-	return ret;
-}
-
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
@@ -1951,13 +1772,7 @@
 	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
 		cmd->se_tfo->get_cmd_state(cmd),
 		cmd->t_state, cmd->scsi_sense_reason);
-	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
-		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
-		" CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
-		cmd->t_task_list_num,
-		atomic_read(&cmd->t_task_cdbs_left),
-		atomic_read(&cmd->t_task_cdbs_sent),
-		atomic_read(&cmd->t_task_cdbs_ex_left),
+	pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
 		(cmd->transport_state & CMD_T_ACTIVE) != 0,
 		(cmd->transport_state & CMD_T_STOP) != 0,
 		(cmd->transport_state & CMD_T_SENT) != 0);
@@ -2156,7 +1971,7 @@
  * Called from fabric module context in transport_generic_new_cmd() and
  * transport_generic_process_write()
  */
-static int transport_execute_tasks(struct se_cmd *cmd)
+static void transport_execute_tasks(struct se_cmd *cmd)
 {
 	int add_tasks;
 	struct se_device *se_dev = cmd->se_dev;
@@ -2170,71 +1985,52 @@
 		 * attribute for the tasks of the received struct se_cmd CDB
 		 */
 		add_tasks = transport_execute_task_attr(cmd);
-		if (!add_tasks)
-			goto execute_tasks;
-		/*
-		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
-		 * adds associated se_tasks while holding dev->execute_task_lock
-		 * before I/O dispath to avoid a double spinlock access.
-		 */
-		__transport_execute_tasks(se_dev, cmd);
-		return 0;
+		if (add_tasks) {
+			__transport_execute_tasks(se_dev, cmd);
+			return;
+		}
 	}
-
-execute_tasks:
 	__transport_execute_tasks(se_dev, NULL);
-	return 0;
 }
 
-/*
- * Called to check struct se_device tcq depth window, and once open pull struct se_task
- * from struct se_device->execute_task_list and
- *
- * Called from transport_processing_thread()
- */
 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
 {
 	int error;
 	struct se_cmd *cmd = NULL;
-	struct se_task *task = NULL;
 	unsigned long flags;
 
 check_depth:
 	spin_lock_irq(&dev->execute_task_lock);
 	if (new_cmd != NULL)
-		__transport_add_tasks_from_cmd(new_cmd);
+		__target_add_to_execute_list(new_cmd);
 
-	if (list_empty(&dev->execute_task_list)) {
+	if (list_empty(&dev->execute_list)) {
 		spin_unlock_irq(&dev->execute_task_lock);
 		return 0;
 	}
-	task = list_first_entry(&dev->execute_task_list,
-				struct se_task, t_execute_list);
-	__transport_remove_task_from_execute_queue(task, dev);
+	cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
+	__target_remove_from_execute_list(cmd);
 	spin_unlock_irq(&dev->execute_task_lock);
 
-	cmd = task->task_se_cmd;
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	task->task_flags |= (TF_ACTIVE | TF_SENT);
-	atomic_inc(&cmd->t_task_cdbs_sent);
-
-	if (atomic_read(&cmd->t_task_cdbs_sent) ==
-	    cmd->t_task_list_num)
-		cmd->transport_state |= CMD_T_SENT;
+	cmd->transport_state |= CMD_T_BUSY;
+	cmd->transport_state |= CMD_T_SENT;
 
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-	if (cmd->execute_task)
-		error = cmd->execute_task(task);
-	else
-		error = dev->transport->do_task(task);
+	if (cmd->execute_cmd)
+		error = cmd->execute_cmd(cmd);
+	else {
+		error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
+				cmd->t_data_nents, cmd->data_direction);
+	}
+
 	if (error != 0) {
 		spin_lock_irqsave(&cmd->t_state_lock, flags);
-		task->task_flags &= ~TF_ACTIVE;
+		cmd->transport_state &= ~CMD_T_BUSY;
 		cmd->transport_state &= ~CMD_T_SENT;
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-		transport_stop_tasks_for_cmd(cmd);
 		transport_generic_request_failure(cmd);
 	}
 
@@ -2392,12 +2188,12 @@
 		} else /* bytes */
 			return sectors;
 	}
-#if 0
+
 	pr_debug("Returning block_size: %u, sectors: %u == %u for"
-			" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
-			dev->se_sub_dev->se_dev_attrib.block_size * sectors,
-			dev->transport->name);
-#endif
+		" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
+		sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
+		dev->transport->name);
+
 	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
 }
 
@@ -2462,7 +2258,6 @@
 {
 	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
 	struct se_device *dev = cmd->se_dev;
-	struct se_task *task = NULL, *task_tmp;
 	unsigned long flags;
 	u32 offset = 0;
 
@@ -2477,44 +2272,37 @@
 		return 0;
 	}
 
-	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task_list, t_list) {
-		if (!(task->task_flags & TF_HAS_SENSE))
-			continue;
+	if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
+		goto out;
 
-		if (!dev->transport->get_sense_buffer) {
-			pr_err("dev->transport->get_sense_buffer"
-					" is NULL\n");
-			continue;
-		}
-
-		sense_buffer = dev->transport->get_sense_buffer(task);
-		if (!sense_buffer) {
-			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
-				" sense buffer for task with sense\n",
-				cmd->se_tfo->get_task_tag(cmd), task);
-			continue;
-		}
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
-				TRANSPORT_SENSE_BUFFER);
-
-		memcpy(&buffer[offset], sense_buffer,
-				TRANSPORT_SENSE_BUFFER);
-		cmd->scsi_status = task->task_scsi_status;
-		/* Automatically padded */
-		cmd->scsi_sense_length =
-				(TRANSPORT_SENSE_BUFFER + offset);
-
-		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
-				" and sense\n",
-			dev->se_hba->hba_id, dev->transport->name,
-				cmd->scsi_status);
-		return 0;
+	if (!dev->transport->get_sense_buffer) {
+		pr_err("dev->transport->get_sense_buffer is NULL\n");
+		goto out;
 	}
+
+	sense_buffer = dev->transport->get_sense_buffer(cmd);
+	if (!sense_buffer) {
+		pr_err("ITT 0x%08x cmd %p: Unable to locate"
+			" sense buffer for task with sense\n",
+			cmd->se_tfo->get_task_tag(cmd), cmd);
+		goto out;
+	}
+
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
+	offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
+
+	memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
+
+	/* Automatically padded */
+	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
+
+	pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
+		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
+	return 0;
+
+out:
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 	return -1;
 }
 
@@ -2581,7 +2369,7 @@
  *	Generic Command Sequencer that should work for most DAS transport
  *	drivers.
  *
- *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
+ *	Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
  *	RX Thread.
  *
  *	FIXME: Need to support other SCSI OPCODES where as well.
@@ -2615,11 +2403,10 @@
 		 * by the ALUA primary or secondary access state..
 		 */
 		if (ret > 0) {
-#if 0
 			pr_debug("[%s]: ALUA TG Port not available,"
 				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
 				cmd->se_tfo->get_fabric_name(), alua_ascq);
-#endif
+
 			transport_set_sense_codes(cmd, 0x04, alua_ascq);
 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
@@ -2695,6 +2482,7 @@
 		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
 		break;
 	case WRITE_10:
+	case WRITE_VERIFY:
 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
 		if (sector_ret)
 			goto out_unsupported_cdb;
@@ -2796,7 +2584,7 @@
 			if (target_check_write_same_discard(&cdb[10], dev) < 0)
 				goto out_unsupported_cdb;
 			if (!passthrough)
-				cmd->execute_task = target_emulate_write_same;
+				cmd->execute_cmd = target_emulate_write_same;
 			break;
 		default:
 			pr_err("VARIABLE_LENGTH_CMD service action"
@@ -2810,9 +2598,9 @@
 			/*
 			 * Check for emulated MI_REPORT_TARGET_PGS.
 			 */
-			if (cdb[1] == MI_REPORT_TARGET_PGS &&
+			if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
 			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
-				cmd->execute_task =
+				cmd->execute_cmd =
 					target_emulate_report_target_port_groups;
 			}
 			size = (cdb[6] << 24) | (cdb[7] << 16) |
@@ -2835,13 +2623,13 @@
 		size = cdb[4];
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_modesense;
+			cmd->execute_cmd = target_emulate_modesense;
 		break;
 	case MODE_SENSE_10:
 		size = (cdb[7] << 8) + cdb[8];
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_modesense;
+			cmd->execute_cmd = target_emulate_modesense;
 		break;
 	case GPCMD_READ_BUFFER_CAPACITY:
 	case GPCMD_SEND_OPC:
@@ -2863,13 +2651,13 @@
 		break;
 	case PERSISTENT_RESERVE_IN:
 		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
-			cmd->execute_task = target_scsi3_emulate_pr_in;
+			cmd->execute_cmd = target_scsi3_emulate_pr_in;
 		size = (cdb[7] << 8) + cdb[8];
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		break;
 	case PERSISTENT_RESERVE_OUT:
 		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
-			cmd->execute_task = target_scsi3_emulate_pr_out;
+			cmd->execute_cmd = target_scsi3_emulate_pr_out;
 		size = (cdb[7] << 8) + cdb[8];
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		break;
@@ -2890,7 +2678,7 @@
 			 */
 			if (cdb[1] == MO_SET_TARGET_PGS &&
 			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
-				cmd->execute_task =
+				cmd->execute_cmd =
 					target_emulate_set_target_port_groups;
 			}
 
@@ -2912,7 +2700,7 @@
 			cmd->sam_task_attr = MSG_HEAD_TAG;
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_inquiry;
+			cmd->execute_cmd = target_emulate_inquiry;
 		break;
 	case READ_BUFFER:
 		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
@@ -2922,7 +2710,7 @@
 		size = READ_CAP_LEN;
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_readcapacity;
+			cmd->execute_cmd = target_emulate_readcapacity;
 		break;
 	case READ_MEDIA_SERIAL_NUMBER:
 	case SECURITY_PROTOCOL_IN:
@@ -2934,7 +2722,7 @@
 		switch (cmd->t_task_cdb[1] & 0x1f) {
 		case SAI_READ_CAPACITY_16:
 			if (!passthrough)
-				cmd->execute_task =
+				cmd->execute_cmd =
 					target_emulate_readcapacity_16;
 			break;
 		default:
@@ -2977,7 +2765,7 @@
 		size = cdb[4];
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_request_sense;
+			cmd->execute_cmd = target_emulate_request_sense;
 		break;
 	case READ_ELEMENT_STATUS:
 		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
@@ -3006,7 +2794,7 @@
 		 * emulation disabled.
 		 */
 		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
-			cmd->execute_task = target_scsi2_reservation_reserve;
+			cmd->execute_cmd = target_scsi2_reservation_reserve;
 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
 		break;
 	case RELEASE:
@@ -3021,7 +2809,7 @@
 			size = cmd->data_length;
 
 		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
-			cmd->execute_task = target_scsi2_reservation_release;
+			cmd->execute_cmd = target_scsi2_reservation_release;
 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
 		break;
 	case SYNCHRONIZE_CACHE:
@@ -3053,13 +2841,13 @@
 			if (transport_cmd_get_valid_sectors(cmd) < 0)
 				goto out_invalid_cdb_field;
 		}
-		cmd->execute_task = target_emulate_synchronize_cache;
+		cmd->execute_cmd = target_emulate_synchronize_cache;
 		break;
 	case UNMAP:
 		size = get_unaligned_be16(&cdb[7]);
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_unmap;
+			cmd->execute_cmd = target_emulate_unmap;
 		break;
 	case WRITE_SAME_16:
 		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
@@ -3079,7 +2867,7 @@
 		if (target_check_write_same_discard(&cdb[1], dev) < 0)
 			goto out_unsupported_cdb;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_write_same;
+			cmd->execute_cmd = target_emulate_write_same;
 		break;
 	case WRITE_SAME:
 		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
@@ -3102,7 +2890,7 @@
 		if (target_check_write_same_discard(&cdb[1], dev) < 0)
 			goto out_unsupported_cdb;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_write_same;
+			cmd->execute_cmd = target_emulate_write_same;
 		break;
 	case ALLOW_MEDIUM_REMOVAL:
 	case ERASE:
@@ -3115,7 +2903,7 @@
 	case WRITE_FILEMARKS:
 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
 		if (!passthrough)
-			cmd->execute_task = target_emulate_noop;
+			cmd->execute_cmd = target_emulate_noop;
 		break;
 	case GPCMD_CLOSE_TRACK:
 	case INITIALIZE_ELEMENT_STATUS:
@@ -3125,7 +2913,7 @@
 		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
 		break;
 	case REPORT_LUNS:
-		cmd->execute_task = target_report_luns;
+		cmd->execute_cmd = target_report_luns;
 		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
 		/*
 		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
@@ -3135,6 +2923,42 @@
 			cmd->sam_task_attr = MSG_HEAD_TAG;
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		break;
+	case GET_EVENT_STATUS_NOTIFICATION:
+		size = (cdb[7] << 8) | cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case ATA_16:
+		/* Only support ATA passthrough to pSCSI backends.. */
+		if (!passthrough)
+			goto out_unsupported_cdb;
+
+		/* T_LENGTH */
+		switch (cdb[2] & 0x3) {
+		case 0x0:
+			sectors = 0;
+			break;
+		case 0x1:
+			sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
+			break;
+		case 0x2:
+			sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
+			break;
+		case 0x3:
+			pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
+			goto out_invalid_cdb_field;
+		}
+
+		/* BYTE_BLOCK */
+		if (cdb[2] & 0x4) {
+			/* BLOCK T_TYPE: 512 or sector */
+			size = sectors * ((cdb[2] & 0x10) ?
+				dev->se_sub_dev->se_dev_attrib.block_size : 512);
+		} else {
+			/* BYTE */
+			size = sectors;
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
 	default:
 		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
 			" 0x%02x, sending CHECK_CONDITION.\n",
@@ -3142,6 +2966,9 @@
 		goto out_unsupported_cdb;
 	}
 
+	if (cmd->unknown_data_length)
+		cmd->data_length = size;
+
 	if (size != cmd->data_length) {
 		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
@@ -3177,15 +3004,25 @@
 		cmd->data_length = size;
 	}
 
-	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
-	    sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
-		printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
-				   cdb[0], sectors);
-		goto out_invalid_cdb_field;
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+		if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
+			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+				" big sectors %u exceeds fabric_max_sectors:"
+				" %u\n", cdb[0], sectors,
+				su_dev->se_dev_attrib.fabric_max_sectors);
+			goto out_invalid_cdb_field;
+		}
+		if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
+			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+				" big sectors %u exceeds backend hw_max_sectors:"
+				" %u\n", cdb[0], sectors,
+				su_dev->se_dev_attrib.hw_max_sectors);
+			goto out_invalid_cdb_field;
+		}
 	}
 
 	/* reject any command that we don't have a handler for */
-	if (!(passthrough || cmd->execute_task ||
+	if (!(passthrough || cmd->execute_cmd ||
 	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
 		goto out_unsupported_cdb;
 
@@ -3250,7 +3087,7 @@
 			cmd_p->t_task_cdb[0],
 			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
 
-		transport_add_tasks_from_cmd(cmd_p);
+		target_add_to_execute_list(cmd_p);
 		new_active_tasks++;
 
 		spin_lock(&dev->delayed_cmd_lock);
@@ -3346,10 +3183,6 @@
 		if (transport_get_sense_data(cmd) < 0)
 			reason = TCM_NON_EXISTENT_LUN;
 
-		/*
-		 * Only set when an struct se_task->task_scsi_status returned
-		 * a non GOOD status.
-		 */
 		if (cmd->scsi_status) {
 			ret = transport_send_check_condition_and_sense(
 					cmd, reason, 1);
@@ -3424,33 +3257,6 @@
 	transport_handle_queue_full(cmd, cmd->se_dev);
 }
 
-static void transport_free_dev_tasks(struct se_cmd *cmd)
-{
-	struct se_task *task, *task_tmp;
-	unsigned long flags;
-	LIST_HEAD(dispose_list);
-
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	list_for_each_entry_safe(task, task_tmp,
-				&cmd->t_task_list, t_list) {
-		if (!(task->task_flags & TF_ACTIVE))
-			list_move_tail(&task->t_list, &dispose_list);
-	}
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-	while (!list_empty(&dispose_list)) {
-		task = list_first_entry(&dispose_list, struct se_task, t_list);
-
-		if (task->task_sg != cmd->t_data_sg &&
-		    task->task_sg != cmd->t_bidi_data_sg)
-			kfree(task->task_sg);
-
-		list_del(&task->t_list);
-
-		cmd->se_dev->transport->free_task(task);
-	}
-}
-
 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
 {
 	struct scatterlist *sg;
@@ -3511,7 +3317,6 @@
 static void transport_put_cmd(struct se_cmd *cmd)
 {
 	unsigned long flags;
-	int free_tasks = 0;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (atomic_read(&cmd->t_fe_count)) {
@@ -3519,21 +3324,12 @@
 			goto out_busy;
 	}
 
-	if (atomic_read(&cmd->t_se_count)) {
-		if (!atomic_dec_and_test(&cmd->t_se_count))
-			goto out_busy;
-	}
-
 	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
-		transport_all_task_dev_remove_state(cmd);
-		free_tasks = 1;
+		target_remove_from_state_list(cmd);
 	}
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-	if (free_tasks != 0)
-		transport_free_dev_tasks(cmd);
-
 	transport_free_pages(cmd);
 	transport_release_cmd(cmd);
 	return;
@@ -3683,245 +3479,14 @@
 	return -ENOMEM;
 }
 
-/* Reduce sectors if they are too long for the device */
-static inline sector_t transport_limit_task_sectors(
-	struct se_device *dev,
-	unsigned long long lba,
-	sector_t sectors)
-{
-	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
-
-	if (dev->transport->get_device_type(dev) == TYPE_DISK)
-		if ((lba + sectors) > transport_dev_end_lba(dev))
-			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
-
-	return sectors;
-}
-
-
 /*
- * This function can be used by HW target mode drivers to create a linked
- * scatterlist from all contiguously allocated struct se_task->task_sg[].
- * This is intended to be called during the completion path by TCM Core
- * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
- */
-void transport_do_task_sg_chain(struct se_cmd *cmd)
-{
-	struct scatterlist *sg_first = NULL;
-	struct scatterlist *sg_prev = NULL;
-	int sg_prev_nents = 0;
-	struct scatterlist *sg;
-	struct se_task *task;
-	u32 chained_nents = 0;
-	int i;
-
-	BUG_ON(!cmd->se_tfo->task_sg_chaining);
-
-	/*
-	 * Walk the struct se_task list and setup scatterlist chains
-	 * for each contiguously allocated struct se_task->task_sg[].
-	 */
-	list_for_each_entry(task, &cmd->t_task_list, t_list) {
-		if (!task->task_sg)
-			continue;
-
-		if (!sg_first) {
-			sg_first = task->task_sg;
-			chained_nents = task->task_sg_nents;
-		} else {
-			sg_chain(sg_prev, sg_prev_nents, task->task_sg);
-			chained_nents += task->task_sg_nents;
-		}
-		/*
-		 * For the padded tasks, use the extra SGL vector allocated
-		 * in transport_allocate_data_tasks() for the sg_prev_nents
-		 * offset into sg_chain() above.
-		 *
-		 * We do not need the padding for the last task (or a single
-		 * task), but in that case we will never use the sg_prev_nents
-		 * value below which would be incorrect.
-		 */
-		sg_prev_nents = (task->task_sg_nents + 1);
-		sg_prev = task->task_sg;
-	}
-	/*
-	 * Setup the starting pointer and total t_tasks_sg_linked_no including
-	 * padding SGs for linking and to mark the end.
-	 */
-	cmd->t_tasks_sg_chained = sg_first;
-	cmd->t_tasks_sg_chained_no = chained_nents;
-
-	pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
-		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
-		cmd->t_tasks_sg_chained_no);
-
-	for_each_sg(cmd->t_tasks_sg_chained, sg,
-			cmd->t_tasks_sg_chained_no, i) {
-
-		pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
-			i, sg, sg_page(sg), sg->length, sg->offset);
-		if (sg_is_chain(sg))
-			pr_debug("SG: %p sg_is_chain=1\n", sg);
-		if (sg_is_last(sg))
-			pr_debug("SG: %p sg_is_last=1\n", sg);
-	}
-}
-EXPORT_SYMBOL(transport_do_task_sg_chain);
-
-/*
- * Break up cmd into chunks transport can handle
- */
-static int
-transport_allocate_data_tasks(struct se_cmd *cmd,
-	enum dma_data_direction data_direction,
-	struct scatterlist *cmd_sg, unsigned int sgl_nents)
-{
-	struct se_device *dev = cmd->se_dev;
-	int task_count, i;
-	unsigned long long lba;
-	sector_t sectors, dev_max_sectors;
-	u32 sector_size;
-
-	if (transport_cmd_get_valid_sectors(cmd) < 0)
-		return -EINVAL;
-
-	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
-	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
-
-	WARN_ON(cmd->data_length % sector_size);
-
-	lba = cmd->t_task_lba;
-	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
-	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
-
-	/*
-	 * If we need just a single task reuse the SG list in the command
-	 * and avoid a lot of work.
-	 */
-	if (task_count == 1) {
-		struct se_task *task;
-		unsigned long flags;
-
-		task = transport_generic_get_task(cmd, data_direction);
-		if (!task)
-			return -ENOMEM;
-
-		task->task_sg = cmd_sg;
-		task->task_sg_nents = sgl_nents;
-
-		task->task_lba = lba;
-		task->task_sectors = sectors;
-		task->task_size = task->task_sectors * sector_size;
-
-		spin_lock_irqsave(&cmd->t_state_lock, flags);
-		list_add_tail(&task->t_list, &cmd->t_task_list);
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-		return task_count;
-	}
-
-	for (i = 0; i < task_count; i++) {
-		struct se_task *task;
-		unsigned int task_size, task_sg_nents_padded;
-		struct scatterlist *sg;
-		unsigned long flags;
-		int count;
-
-		task = transport_generic_get_task(cmd, data_direction);
-		if (!task)
-			return -ENOMEM;
-
-		task->task_lba = lba;
-		task->task_sectors = min(sectors, dev_max_sectors);
-		task->task_size = task->task_sectors * sector_size;
-
-		/*
-		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
-		 * in order to calculate the number per task SGL entries
-		 */
-		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
-		/*
-		 * Check if the fabric module driver is requesting that all
-		 * struct se_task->task_sg[] be chained together..  If so,
-		 * then allocate an extra padding SG entry for linking and
-		 * marking the end of the chained SGL for every task except
-		 * the last one for (task_count > 1) operation, or skipping
-		 * the extra padding for the (task_count == 1) case.
-		 */
-		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
-			task_sg_nents_padded = (task->task_sg_nents + 1);
-		} else
-			task_sg_nents_padded = task->task_sg_nents;
-
-		task->task_sg = kmalloc(sizeof(struct scatterlist) *
-					task_sg_nents_padded, GFP_KERNEL);
-		if (!task->task_sg) {
-			cmd->se_dev->transport->free_task(task);
-			return -ENOMEM;
-		}
-
-		sg_init_table(task->task_sg, task_sg_nents_padded);
-
-		task_size = task->task_size;
-
-		/* Build new sgl, only up to task_size */
-		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
-			if (cmd_sg->length > task_size)
-				break;
-
-			*sg = *cmd_sg;
-			task_size -= cmd_sg->length;
-			cmd_sg = sg_next(cmd_sg);
-		}
-
-		lba += task->task_sectors;
-		sectors -= task->task_sectors;
-
-		spin_lock_irqsave(&cmd->t_state_lock, flags);
-		list_add_tail(&task->t_list, &cmd->t_task_list);
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-	}
-
-	return task_count;
-}
-
-static int
-transport_allocate_control_task(struct se_cmd *cmd)
-{
-	struct se_task *task;
-	unsigned long flags;
-
-	/* Workaround for handling zero-length control CDBs */
-	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
-	    !cmd->data_length)
-		return 0;
-
-	task = transport_generic_get_task(cmd, cmd->data_direction);
-	if (!task)
-		return -ENOMEM;
-
-	task->task_sg = cmd->t_data_sg;
-	task->task_size = cmd->data_length;
-	task->task_sg_nents = cmd->t_data_nents;
-
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	list_add_tail(&task->t_list, &cmd->t_task_list);
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-	/* Success! Return number of tasks allocated */
-	return 1;
-}
-
-/*
- * Allocate any required ressources to execute the command, and either place
- * it on the execution queue if possible.  For writes we might not have the
- * payload yet, thus notify the fabric via a call to ->write_pending instead.
+ * Allocate any required resources to execute the command.  For writes we
+ * might not have the payload yet, so notify the fabric via a call to
+ * ->write_pending instead. Otherwise place it on the execution queue.
  */
 int transport_generic_new_cmd(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
-	int task_cdbs, task_cdbs_bidi = 0;
-	int set_counts = 1;
 	int ret = 0;
 
 	/*
@@ -3936,35 +3501,9 @@
 			goto out_fail;
 	}
 
-	/*
-	 * For BIDI command set up the read tasks first.
-	 */
-	if (cmd->t_bidi_data_sg &&
-	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
-		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
-
-		task_cdbs_bidi = transport_allocate_data_tasks(cmd,
-				DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
-				cmd->t_bidi_data_nents);
-		if (task_cdbs_bidi <= 0)
-			goto out_fail;
-
-		atomic_inc(&cmd->t_fe_count);
-		atomic_inc(&cmd->t_se_count);
-		set_counts = 0;
-	}
-
-	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
-		task_cdbs = transport_allocate_data_tasks(cmd,
-					cmd->data_direction, cmd->t_data_sg,
-					cmd->t_data_nents);
-	} else {
-		task_cdbs = transport_allocate_control_task(cmd);
-	}
-
-	if (task_cdbs < 0)
-		goto out_fail;
-	else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+	/* Workaround for handling zero-length control CDBs */
+	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+	    !cmd->data_length) {
 		spin_lock_irq(&cmd->t_state_lock);
 		cmd->t_state = TRANSPORT_COMPLETE;
 		cmd->transport_state |= CMD_T_ACTIVE;
@@ -3982,29 +3521,31 @@
 		return 0;
 	}
 
-	if (set_counts) {
-		atomic_inc(&cmd->t_fe_count);
-		atomic_inc(&cmd->t_se_count);
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+		struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
+
+		if (transport_cmd_get_valid_sectors(cmd) < 0)
+			return -EINVAL;
+
+		BUG_ON(cmd->data_length % attr->block_size);
+		BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
+			attr->hw_max_sectors);
 	}
 
-	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
-	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
-	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
+	atomic_inc(&cmd->t_fe_count);
 
 	/*
-	 * For WRITEs, let the fabric know its buffer is ready..
-	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
-	 * will be added to the struct se_device execution queue after its WRITE
-	 * data has arrived. (ie: It gets handled by the transport processing
-	 * thread a second time)
+	 * For WRITEs, let the fabric know its buffer is ready.
+	 *
+	 * The command will be added to the execution queue after its write
+	 * data has arrived.
 	 */
 	if (cmd->data_direction == DMA_TO_DEVICE) {
-		transport_add_tasks_to_state_queue(cmd);
+		target_add_to_state_list(cmd);
 		return transport_generic_write_pending(cmd);
 	}
 	/*
-	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
-	 * to the execution queue.
+	 * Everything else but a WRITE, add the command to the execution queue.
 	 */
 	transport_execute_tasks(cmd);
 	return 0;
@@ -4091,8 +3632,6 @@
 		if (cmd->se_lun)
 			transport_lun_remove_cmd(cmd);
 
-		transport_free_dev_tasks(cmd);
-
 		transport_put_cmd(cmd);
 	}
 }
@@ -4233,7 +3772,8 @@
 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
 {
 	unsigned long flags;
-	int ret;
+	int ret = 0;
+
 	/*
 	 * If the frontend has already requested this struct se_cmd to
 	 * be stopped, we can safely ignore this struct se_cmd.
@@ -4253,10 +3793,21 @@
 
 	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
 
-	ret = transport_stop_tasks_for_cmd(cmd);
+	// XXX: audit task_flags checks.
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if ((cmd->transport_state & CMD_T_BUSY) &&
+	    (cmd->transport_state & CMD_T_SENT)) {
+		if (!target_stop_cmd(cmd, &flags))
+			ret++;
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+	} else {
+		spin_unlock_irqrestore(&cmd->t_state_lock,
+				flags);
+		target_remove_from_execute_list(cmd);
+	}
 
-	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
-			" %d\n", cmd, cmd->t_task_list_num, ret);
+	pr_debug("ConfigFS: cmd: %p stop tasks ret:"
+			" %d\n", cmd, ret);
 	if (!ret) {
 		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
 				cmd->se_tfo->get_task_tag(cmd));
@@ -4328,10 +3879,9 @@
 			goto check_cond;
 		}
 		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
-		transport_all_task_dev_remove_state(cmd);
+		target_remove_from_state_list(cmd);
 		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
 
-		transport_free_dev_tasks(cmd);
 		/*
 		 * The Storage engine stopped this struct se_cmd before it was
 		 * send to the fabric frontend for delivery back to the
@@ -4444,7 +3994,7 @@
 		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
 		spin_lock_irqsave(&cmd->t_state_lock, flags);
 
-		transport_all_task_dev_remove_state(cmd);
+		target_remove_from_state_list(cmd);
 		/*
 		 * At this point, the frontend who was the originator of this
 		 * struct se_cmd, now owns the structure and can be released through
@@ -4710,12 +4260,12 @@
 		if (!send_status ||
 		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
 			return 1;
-#if 0
+
 		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
 			" status for CDB: 0x%02x ITT: 0x%08x\n",
 			cmd->t_task_cdb[0],
 			cmd->se_tfo->get_task_tag(cmd));
-#endif
+
 		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
 		cmd->se_tfo->queue_status(cmd);
 		ret = 1;
@@ -4748,11 +4298,11 @@
 		}
 	}
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-#if 0
+
 	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
 		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
 		cmd->se_tfo->get_task_tag(cmd));
-#endif
+
 	cmd->se_tfo->queue_status(cmd);
 }
 
@@ -4865,7 +4415,7 @@
 	}
 
 out:
-	WARN_ON(!list_empty(&dev->state_task_list));
+	WARN_ON(!list_empty(&dev->state_list));
 	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
 	dev->process_thread = NULL;
 	return 0;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index a375f25..f03fb97 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -215,20 +215,10 @@
 		 */
 		if ((ep->xid <= lport->lro_xid) &&
 		    (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
-			if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
-				/*
-				 * cmd may have been broken up into multiple
-				 * tasks. Link their sgs together so we can
-				 * operate on them all at once.
-				 */
-				transport_do_task_sg_chain(se_cmd);
-				cmd->sg = se_cmd->t_tasks_sg_chained;
-				cmd->sg_cnt =
-					se_cmd->t_tasks_sg_chained_no;
-			}
-			if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
-							    cmd->sg,
-							    cmd->sg_cnt))
+			if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) &&
+			    lport->tt.ddp_target(lport, ep->xid,
+						 se_cmd->t_data_sg,
+						 se_cmd->t_data_nents))
 				cmd->was_ddp_setup = 1;
 		}
 	}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 2948dc9..9501844 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -576,9 +576,6 @@
 	}
 	fabric->tf_ops = ft_fabric_ops;
 
-	/* Allowing support for task_sg_chaining */
-	fabric->tf_ops.task_sg_chaining = 1;
-
 	/*
 	 * Setup default attribute lists for various fabric->tf_cit_tmpl
 	 */
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index dc7c0db..071a505 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -228,7 +228,7 @@
 				"payload, Frame will be dropped if"
 				"'Sequence Initiative' bit in f_ctl is"
 				"not set\n", __func__, ep->xid, f_ctl,
-				cmd->sg, cmd->sg_cnt);
+				se_cmd->t_data_sg, se_cmd->t_data_nents);
 		/*
 		 * Invalidate HW DDP context if it was setup for respective
 		 * command. Invalidation of HW DDP context is requited in both
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 08ebe90..654755a 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -469,7 +469,7 @@
 	tty = NULL;
 	if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
 		if (!ZS_IS_OPEN(uap_a)) {
-			pmz_debug("ChanA interrupt while open !\n");
+			pmz_debug("ChanA interrupt while not open !\n");
 			goto skip_a;
 		}
 		write_zsreg(uap_a, R0, RES_H_IUS);
@@ -493,8 +493,8 @@
 	spin_lock(&uap_b->port.lock);
 	tty = NULL;
 	if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
-		if (!ZS_IS_OPEN(uap_a)) {
-			pmz_debug("ChanB interrupt while open !\n");
+		if (!ZS_IS_OPEN(uap_b)) {
+			pmz_debug("ChanB interrupt while not open !\n");
 			goto skip_b;
 		}
 		write_zsreg(uap_b, R0, RES_H_IUS);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 86dd1e3..3b0c4e3 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1085,15 +1085,21 @@
  *
  *	Handle console start. This is a wrapper for the VT layer
  *	so that we can keep kbd knowledge internal
+ *
+ *	FIXME: We eventually need to hold the kbd lock here to protect
+ *	the LED updating. We can't do it yet because fn_hold calls stop_tty
+ *	and start_tty under the kbd_event_lock, while normal tty paths
+ *	don't hold the lock. We probably need to split out an LED lock
+ *	but not during an -rc release!
  */
 void vt_kbd_con_start(int console)
 {
 	struct kbd_struct * kbd = kbd_table + console;
-	unsigned long flags;
-	spin_lock_irqsave(&kbd_event_lock, flags);
+/*	unsigned long flags; */
+/*	spin_lock_irqsave(&kbd_event_lock, flags); */
 	clr_vc_kbd_led(kbd, VC_SCROLLOCK);
 	set_leds();
-	spin_unlock_irqrestore(&kbd_event_lock, flags);
+/*	spin_unlock_irqrestore(&kbd_event_lock, flags); */
 }
 
 /**
@@ -1102,22 +1108,28 @@
  *
  *	Handle console stop. This is a wrapper for the VT layer
  *	so that we can keep kbd knowledge internal
+ *
+ *	FIXME: We eventually need to hold the kbd lock here to protect
+ *	the LED updating. We can't do it yet because fn_hold calls stop_tty
+ *	and start_tty under the kbd_event_lock, while normal tty paths
+ *	don't hold the lock. We probably need to split out an LED lock
+ *	but not during an -rc release!
  */
 void vt_kbd_con_stop(int console)
 {
 	struct kbd_struct * kbd = kbd_table + console;
-	unsigned long flags;
-	spin_lock_irqsave(&kbd_event_lock, flags);
+/*	unsigned long flags; */
+/*	spin_lock_irqsave(&kbd_event_lock, flags); */
 	set_vc_kbd_led(kbd, VC_SCROLLOCK);
 	set_leds();
-	spin_unlock_irqrestore(&kbd_event_lock, flags);
+/*	spin_unlock_irqrestore(&kbd_event_lock, flags); */
 }
 
 /*
  * This is the tasklet that updates LED state on all keyboards
  * attached to the box. The reason we use tasklet is that we
  * need to handle the scenario when keyboard handler is not
- * registered yet but we already getting updates form VT to
+ * registered yet but we already getting updates from the VT to
  * update led state.
  */
 static void kbd_bh(unsigned long dummy)
@@ -2032,7 +2044,7 @@
 		kbd->default_ledflagstate = ((arg >> 4) & 7);
 		set_leds();
                 spin_unlock_irqrestore(&kbd_event_lock, flags);
-		break;
+		return 0;
 
 	/* the ioctls below only set the lights, not the functions */
 	/* for those, see KDGKBLED and KDSKBLED above */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c6f6560..0bb2b32 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -157,8 +157,9 @@
 	spin_lock(&desc->iuspin);
 	desc->werr = urb->status;
 	spin_unlock(&desc->iuspin);
-	clear_bit(WDM_IN_USE, &desc->flags);
 	kfree(desc->outbuf);
+	desc->outbuf = NULL;
+	clear_bit(WDM_IN_USE, &desc->flags);
 	wake_up(&desc->wait);
 }
 
@@ -338,7 +339,7 @@
 	if (we < 0)
 		return -EIO;
 
-	desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
+	buf = kmalloc(count, GFP_KERNEL);
 	if (!buf) {
 		rv = -ENOMEM;
 		goto outnl;
@@ -406,10 +407,12 @@
 	req->wIndex = desc->inum;
 	req->wLength = cpu_to_le16(count);
 	set_bit(WDM_IN_USE, &desc->flags);
+	desc->outbuf = buf;
 
 	rv = usb_submit_urb(desc->command, GFP_KERNEL);
 	if (rv < 0) {
 		kfree(buf);
+		desc->outbuf = NULL;
 		clear_bit(WDM_IN_USE, &desc->flags);
 		dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
 	} else {
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 622b4a4..57ed9e4 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,6 +493,15 @@
 
 	pci_save_state(pci_dev);
 
+	/*
+	 * Some systems crash if an EHCI controller is in D3 during
+	 * a sleep transition.  We have to leave such controllers in D0.
+	 */
+	if (hcd->broken_pci_sleep) {
+		dev_dbg(dev, "Staying in PCI D0\n");
+		return retval;
+	}
+
 	/* If the root hub is dead rather than suspended, disallow remote
 	 * wakeup.  usb_hc_died() should ensure that both hosts are marked as
 	 * dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 2633f75..569b33e 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -798,6 +798,16 @@
 	  Say "y" to link the driver statically, or "m" to build
 	  a dynamically linked module called "g_mass_storage".
 
+config USB_GADGET_TARGET
+	tristate "USB Gadget Target Fabric Module"
+	depends on TARGET_CORE
+	help
+	  This fabric is an USB gadget. Two USB protocols are supported that is
+	  BBB or BOT (Bulk Only Transport) and UAS (USB Attached SCSI). BOT is
+	  advertised on alternative interface 0 (primary) and UAS is on
+	  alternative interface 1. Both protocols can work on USB2.0 and USB3.0.
+	  UAS utilizes the USB 3.0 feature called streams support.
+
 config USB_G_SERIAL
 	tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
 	help
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index b7f6eef..fc5b8368 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -52,6 +52,7 @@
 g_webcam-y			:= webcam.o
 g_ncm-y				:= ncm.o
 g_acm_ms-y			:= acm_ms.o
+g_tcm_usb_gadget-y		:= tcm_usb_gadget.o
 
 obj-$(CONFIG_USB_ZERO)		+= g_zero.o
 obj-$(CONFIG_USB_AUDIO)		+= g_audio.o
@@ -71,3 +72,4 @@
 obj-$(CONFIG_USB_G_WEBCAM)	+= g_webcam.o
 obj-$(CONFIG_USB_G_NCM)		+= g_ncm.o
 obj-$(CONFIG_USB_G_ACM_MS)	+= g_acm_ms.o
+obj-$(CONFIG_USB_GADGET_TARGET)	+= tcm_usb_gadget.o
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index a6dfd21..170cbe8 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -927,7 +927,6 @@
 
 	dum->driver = NULL;
 
-	dummy_pullup(&dum->gadget, 0);
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a371e96..cb8c162 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2189,7 +2189,7 @@
 		common->data_size_from_cmnd = 0;
 		sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
 		reply = check_command(common, common->cmnd_size,
-				      DATA_DIR_UNKNOWN, 0xff, 0, unknown);
+				      DATA_DIR_UNKNOWN, ~0, 0, unknown);
 		if (reply == 0) {
 			common->curlun->sense_data = SS_INVALID_COMMAND;
 			reply = -EINVAL;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 5234365..d4f823f 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -637,7 +637,7 @@
 
 	DBG(cdev, "%s\n", __func__);
 
-	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3,
+	rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3,
 				bitrate(cdev->gadget) / 100);
 	rndis_signal_connect(rndis->config);
 }
@@ -648,7 +648,7 @@
 
 	DBG(geth->func.config->cdev, "%s\n", __func__);
 
-	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+	rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
 	rndis_signal_disconnect(rndis->config);
 }
 
@@ -765,7 +765,7 @@
 		goto fail;
 	rndis->config = status;
 
-	rndis_set_param_medium(rndis->config, NDIS_MEDIUM_802_3, 0);
+	rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
 	rndis_set_host_mac(rndis->config, rndis->ethaddr);
 
 #if 0
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 4fac569..a896d73 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -2579,7 +2579,7 @@
 		fsg->data_size_from_cmnd = 0;
 		sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
 		if ((reply = check_command(fsg, fsg->cmnd_size,
-				DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
+				DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
 			fsg->curlun->sense_data = SS_INVALID_COMMAND;
 			reply = -EINVAL;
 		}
diff --git a/drivers/usb/gadget/ndis.h b/drivers/usb/gadget/ndis.h
index b0e52fc..a19f72d 100644
--- a/drivers/usb/gadget/ndis.h
+++ b/drivers/usb/gadget/ndis.h
@@ -15,11 +15,6 @@
 #ifndef _LINUX_NDIS_H
 #define _LINUX_NDIS_H
 
-
-#define NDIS_STATUS_MULTICAST_FULL	  0xC0010009
-#define NDIS_STATUS_MULTICAST_EXISTS      0xC001000A
-#define NDIS_STATUS_MULTICAST_NOT_FOUND   0xC001000B
-
 enum NDIS_DEVICE_POWER_STATE {
 	NdisDeviceStateUnspecified = 0,
 	NdisDeviceStateD0,
@@ -35,11 +30,6 @@
 	enum NDIS_DEVICE_POWER_STATE  MinLinkChangeWakeUp;
 };
 
-/* NDIS_PNP_CAPABILITIES.Flags constants */
-#define NDIS_DEVICE_WAKE_UP_ENABLE                0x00000001
-#define NDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE  0x00000002
-#define NDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE   0x00000004
-
 struct NDIS_PNP_CAPABILITIES {
 	__le32					Flags;
 	struct NDIS_PM_WAKE_UP_CAPABILITIES	WakeUpCapabilities;
@@ -54,158 +44,4 @@
 	__le32	PatternFlags;
 };
 
-
-/* Required Object IDs (OIDs) */
-#define OID_GEN_SUPPORTED_LIST            0x00010101
-#define OID_GEN_HARDWARE_STATUS           0x00010102
-#define OID_GEN_MEDIA_SUPPORTED           0x00010103
-#define OID_GEN_MEDIA_IN_USE              0x00010104
-#define OID_GEN_MAXIMUM_LOOKAHEAD         0x00010105
-#define OID_GEN_MAXIMUM_FRAME_SIZE        0x00010106
-#define OID_GEN_LINK_SPEED                0x00010107
-#define OID_GEN_TRANSMIT_BUFFER_SPACE     0x00010108
-#define OID_GEN_RECEIVE_BUFFER_SPACE      0x00010109
-#define OID_GEN_TRANSMIT_BLOCK_SIZE       0x0001010A
-#define OID_GEN_RECEIVE_BLOCK_SIZE        0x0001010B
-#define OID_GEN_VENDOR_ID                 0x0001010C
-#define OID_GEN_VENDOR_DESCRIPTION        0x0001010D
-#define OID_GEN_CURRENT_PACKET_FILTER     0x0001010E
-#define OID_GEN_CURRENT_LOOKAHEAD         0x0001010F
-#define OID_GEN_DRIVER_VERSION            0x00010110
-#define OID_GEN_MAXIMUM_TOTAL_SIZE        0x00010111
-#define OID_GEN_PROTOCOL_OPTIONS          0x00010112
-#define OID_GEN_MAC_OPTIONS               0x00010113
-#define OID_GEN_MEDIA_CONNECT_STATUS      0x00010114
-#define OID_GEN_MAXIMUM_SEND_PACKETS      0x00010115
-#define OID_GEN_VENDOR_DRIVER_VERSION     0x00010116
-#define OID_GEN_SUPPORTED_GUIDS           0x00010117
-#define OID_GEN_NETWORK_LAYER_ADDRESSES   0x00010118
-#define OID_GEN_TRANSPORT_HEADER_OFFSET   0x00010119
-#define OID_GEN_MACHINE_NAME              0x0001021A
-#define OID_GEN_RNDIS_CONFIG_PARAMETER    0x0001021B
-#define OID_GEN_VLAN_ID                   0x0001021C
-
-/* Optional OIDs */
-#define OID_GEN_MEDIA_CAPABILITIES        0x00010201
-#define OID_GEN_PHYSICAL_MEDIUM           0x00010202
-
-/* Required statistics OIDs */
-#define OID_GEN_XMIT_OK                   0x00020101
-#define OID_GEN_RCV_OK                    0x00020102
-#define OID_GEN_XMIT_ERROR                0x00020103
-#define OID_GEN_RCV_ERROR                 0x00020104
-#define OID_GEN_RCV_NO_BUFFER             0x00020105
-
-/* Optional statistics OIDs */
-#define OID_GEN_DIRECTED_BYTES_XMIT       0x00020201
-#define OID_GEN_DIRECTED_FRAMES_XMIT      0x00020202
-#define OID_GEN_MULTICAST_BYTES_XMIT      0x00020203
-#define OID_GEN_MULTICAST_FRAMES_XMIT     0x00020204
-#define OID_GEN_BROADCAST_BYTES_XMIT      0x00020205
-#define OID_GEN_BROADCAST_FRAMES_XMIT     0x00020206
-#define OID_GEN_DIRECTED_BYTES_RCV        0x00020207
-#define OID_GEN_DIRECTED_FRAMES_RCV       0x00020208
-#define OID_GEN_MULTICAST_BYTES_RCV       0x00020209
-#define OID_GEN_MULTICAST_FRAMES_RCV      0x0002020A
-#define OID_GEN_BROADCAST_BYTES_RCV       0x0002020B
-#define OID_GEN_BROADCAST_FRAMES_RCV      0x0002020C
-#define OID_GEN_RCV_CRC_ERROR             0x0002020D
-#define OID_GEN_TRANSMIT_QUEUE_LENGTH     0x0002020E
-#define OID_GEN_GET_TIME_CAPS             0x0002020F
-#define OID_GEN_GET_NETCARD_TIME          0x00020210
-#define OID_GEN_NETCARD_LOAD              0x00020211
-#define OID_GEN_DEVICE_PROFILE            0x00020212
-#define OID_GEN_INIT_TIME_MS              0x00020213
-#define OID_GEN_RESET_COUNTS              0x00020214
-#define OID_GEN_MEDIA_SENSE_COUNTS        0x00020215
-#define OID_GEN_FRIENDLY_NAME             0x00020216
-#define OID_GEN_MINIPORT_INFO             0x00020217
-#define OID_GEN_RESET_VERIFY_PARAMETERS   0x00020218
-
-/* IEEE 802.3 (Ethernet) OIDs */
-#define NDIS_802_3_MAC_OPTION_PRIORITY    0x00000001
-
-#define OID_802_3_PERMANENT_ADDRESS       0x01010101
-#define OID_802_3_CURRENT_ADDRESS         0x01010102
-#define OID_802_3_MULTICAST_LIST          0x01010103
-#define OID_802_3_MAXIMUM_LIST_SIZE       0x01010104
-#define OID_802_3_MAC_OPTIONS             0x01010105
-#define OID_802_3_RCV_ERROR_ALIGNMENT     0x01020101
-#define OID_802_3_XMIT_ONE_COLLISION      0x01020102
-#define OID_802_3_XMIT_MORE_COLLISIONS    0x01020103
-#define OID_802_3_XMIT_DEFERRED           0x01020201
-#define OID_802_3_XMIT_MAX_COLLISIONS     0x01020202
-#define OID_802_3_RCV_OVERRUN             0x01020203
-#define OID_802_3_XMIT_UNDERRUN           0x01020204
-#define OID_802_3_XMIT_HEARTBEAT_FAILURE  0x01020205
-#define OID_802_3_XMIT_TIMES_CRS_LOST     0x01020206
-#define OID_802_3_XMIT_LATE_COLLISIONS    0x01020207
-
-/* OID_GEN_MINIPORT_INFO constants */
-#define NDIS_MINIPORT_BUS_MASTER                      0x00000001
-#define NDIS_MINIPORT_WDM_DRIVER                      0x00000002
-#define NDIS_MINIPORT_SG_LIST                         0x00000004
-#define NDIS_MINIPORT_SUPPORTS_MEDIA_QUERY            0x00000008
-#define NDIS_MINIPORT_INDICATES_PACKETS               0x00000010
-#define NDIS_MINIPORT_IGNORE_PACKET_QUEUE             0x00000020
-#define NDIS_MINIPORT_IGNORE_REQUEST_QUEUE            0x00000040
-#define NDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS        0x00000080
-#define NDIS_MINIPORT_INTERMEDIATE_DRIVER             0x00000100
-#define NDIS_MINIPORT_IS_NDIS_5                       0x00000200
-#define NDIS_MINIPORT_IS_CO                           0x00000400
-#define NDIS_MINIPORT_DESERIALIZE                     0x00000800
-#define NDIS_MINIPORT_REQUIRES_MEDIA_POLLING          0x00001000
-#define NDIS_MINIPORT_SUPPORTS_MEDIA_SENSE            0x00002000
-#define NDIS_MINIPORT_NETBOOT_CARD                    0x00004000
-#define NDIS_MINIPORT_PM_SUPPORTED                    0x00008000
-#define NDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE  0x00010000
-#define NDIS_MINIPORT_USES_SAFE_BUFFER_APIS           0x00020000
-#define NDIS_MINIPORT_HIDDEN                          0x00040000
-#define NDIS_MINIPORT_SWENUM                          0x00080000
-#define NDIS_MINIPORT_SURPRISE_REMOVE_OK              0x00100000
-#define NDIS_MINIPORT_NO_HALT_ON_SUSPEND              0x00200000
-#define NDIS_MINIPORT_HARDWARE_DEVICE                 0x00400000
-#define NDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS    0x00800000
-#define NDIS_MINIPORT_64BITS_DMA                      0x01000000
-
-#define NDIS_MEDIUM_802_3		0x00000000
-#define NDIS_MEDIUM_802_5		0x00000001
-#define NDIS_MEDIUM_FDDI		0x00000002
-#define NDIS_MEDIUM_WAN			0x00000003
-#define NDIS_MEDIUM_LOCAL_TALK		0x00000004
-#define NDIS_MEDIUM_DIX			0x00000005
-#define NDIS_MEDIUM_ARCENT_RAW		0x00000006
-#define NDIS_MEDIUM_ARCENT_878_2	0x00000007
-#define NDIS_MEDIUM_ATM			0x00000008
-#define NDIS_MEDIUM_WIRELESS_LAN	0x00000009
-#define NDIS_MEDIUM_IRDA		0x0000000A
-#define NDIS_MEDIUM_BPC			0x0000000B
-#define NDIS_MEDIUM_CO_WAN		0x0000000C
-#define NDIS_MEDIUM_1394		0x0000000D
-
-#define NDIS_PACKET_TYPE_DIRECTED	0x00000001
-#define NDIS_PACKET_TYPE_MULTICAST	0x00000002
-#define NDIS_PACKET_TYPE_ALL_MULTICAST	0x00000004
-#define NDIS_PACKET_TYPE_BROADCAST	0x00000008
-#define NDIS_PACKET_TYPE_SOURCE_ROUTING	0x00000010
-#define NDIS_PACKET_TYPE_PROMISCUOUS	0x00000020
-#define NDIS_PACKET_TYPE_SMT		0x00000040
-#define NDIS_PACKET_TYPE_ALL_LOCAL	0x00000080
-#define NDIS_PACKET_TYPE_GROUP		0x00000100
-#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL	0x00000200
-#define NDIS_PACKET_TYPE_FUNCTIONAL	0x00000400
-#define NDIS_PACKET_TYPE_MAC_FRAME	0x00000800
-
-#define NDIS_MEDIA_STATE_CONNECTED	0x00000000
-#define NDIS_MEDIA_STATE_DISCONNECTED	0x00000001
-
-#define NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA     0x00000001
-#define NDIS_MAC_OPTION_RECEIVE_SERIALIZED      0x00000002
-#define NDIS_MAC_OPTION_TRANSFERS_NOT_PEND      0x00000004
-#define NDIS_MAC_OPTION_NO_LOOPBACK             0x00000008
-#define NDIS_MAC_OPTION_FULL_DUPLEX             0x00000010
-#define NDIS_MAC_OPTION_EOTX_INDICATION         0x00000020
-#define NDIS_MAC_OPTION_8021P_PRIORITY          0x00000040
-#define NDIS_MAC_OPTION_RESERVED                0x80000000
-
 #endif /* _LINUX_NDIS_H */
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 73a934a..b35babe 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -73,65 +73,65 @@
 static const u32 oid_supported_list[] =
 {
 	/* the general stuff */
-	OID_GEN_SUPPORTED_LIST,
-	OID_GEN_HARDWARE_STATUS,
-	OID_GEN_MEDIA_SUPPORTED,
-	OID_GEN_MEDIA_IN_USE,
-	OID_GEN_MAXIMUM_FRAME_SIZE,
-	OID_GEN_LINK_SPEED,
-	OID_GEN_TRANSMIT_BLOCK_SIZE,
-	OID_GEN_RECEIVE_BLOCK_SIZE,
-	OID_GEN_VENDOR_ID,
-	OID_GEN_VENDOR_DESCRIPTION,
-	OID_GEN_VENDOR_DRIVER_VERSION,
-	OID_GEN_CURRENT_PACKET_FILTER,
-	OID_GEN_MAXIMUM_TOTAL_SIZE,
-	OID_GEN_MEDIA_CONNECT_STATUS,
-	OID_GEN_PHYSICAL_MEDIUM,
+	RNDIS_OID_GEN_SUPPORTED_LIST,
+	RNDIS_OID_GEN_HARDWARE_STATUS,
+	RNDIS_OID_GEN_MEDIA_SUPPORTED,
+	RNDIS_OID_GEN_MEDIA_IN_USE,
+	RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
+	RNDIS_OID_GEN_LINK_SPEED,
+	RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE,
+	RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE,
+	RNDIS_OID_GEN_VENDOR_ID,
+	RNDIS_OID_GEN_VENDOR_DESCRIPTION,
+	RNDIS_OID_GEN_VENDOR_DRIVER_VERSION,
+	RNDIS_OID_GEN_CURRENT_PACKET_FILTER,
+	RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE,
+	RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
+	RNDIS_OID_GEN_PHYSICAL_MEDIUM,
 
 	/* the statistical stuff */
-	OID_GEN_XMIT_OK,
-	OID_GEN_RCV_OK,
-	OID_GEN_XMIT_ERROR,
-	OID_GEN_RCV_ERROR,
-	OID_GEN_RCV_NO_BUFFER,
+	RNDIS_OID_GEN_XMIT_OK,
+	RNDIS_OID_GEN_RCV_OK,
+	RNDIS_OID_GEN_XMIT_ERROR,
+	RNDIS_OID_GEN_RCV_ERROR,
+	RNDIS_OID_GEN_RCV_NO_BUFFER,
 #ifdef	RNDIS_OPTIONAL_STATS
-	OID_GEN_DIRECTED_BYTES_XMIT,
-	OID_GEN_DIRECTED_FRAMES_XMIT,
-	OID_GEN_MULTICAST_BYTES_XMIT,
-	OID_GEN_MULTICAST_FRAMES_XMIT,
-	OID_GEN_BROADCAST_BYTES_XMIT,
-	OID_GEN_BROADCAST_FRAMES_XMIT,
-	OID_GEN_DIRECTED_BYTES_RCV,
-	OID_GEN_DIRECTED_FRAMES_RCV,
-	OID_GEN_MULTICAST_BYTES_RCV,
-	OID_GEN_MULTICAST_FRAMES_RCV,
-	OID_GEN_BROADCAST_BYTES_RCV,
-	OID_GEN_BROADCAST_FRAMES_RCV,
-	OID_GEN_RCV_CRC_ERROR,
-	OID_GEN_TRANSMIT_QUEUE_LENGTH,
+	RNDIS_OID_GEN_DIRECTED_BYTES_XMIT,
+	RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT,
+	RNDIS_OID_GEN_MULTICAST_BYTES_XMIT,
+	RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT,
+	RNDIS_OID_GEN_BROADCAST_BYTES_XMIT,
+	RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT,
+	RNDIS_OID_GEN_DIRECTED_BYTES_RCV,
+	RNDIS_OID_GEN_DIRECTED_FRAMES_RCV,
+	RNDIS_OID_GEN_MULTICAST_BYTES_RCV,
+	RNDIS_OID_GEN_MULTICAST_FRAMES_RCV,
+	RNDIS_OID_GEN_BROADCAST_BYTES_RCV,
+	RNDIS_OID_GEN_BROADCAST_FRAMES_RCV,
+	RNDIS_OID_GEN_RCV_CRC_ERROR,
+	RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH,
 #endif	/* RNDIS_OPTIONAL_STATS */
 
 	/* mandatory 802.3 */
 	/* the general stuff */
-	OID_802_3_PERMANENT_ADDRESS,
-	OID_802_3_CURRENT_ADDRESS,
-	OID_802_3_MULTICAST_LIST,
-	OID_802_3_MAC_OPTIONS,
-	OID_802_3_MAXIMUM_LIST_SIZE,
+	RNDIS_OID_802_3_PERMANENT_ADDRESS,
+	RNDIS_OID_802_3_CURRENT_ADDRESS,
+	RNDIS_OID_802_3_MULTICAST_LIST,
+	RNDIS_OID_802_3_MAC_OPTIONS,
+	RNDIS_OID_802_3_MAXIMUM_LIST_SIZE,
 
 	/* the statistical stuff */
-	OID_802_3_RCV_ERROR_ALIGNMENT,
-	OID_802_3_XMIT_ONE_COLLISION,
-	OID_802_3_XMIT_MORE_COLLISIONS,
+	RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT,
+	RNDIS_OID_802_3_XMIT_ONE_COLLISION,
+	RNDIS_OID_802_3_XMIT_MORE_COLLISIONS,
 #ifdef	RNDIS_OPTIONAL_STATS
-	OID_802_3_XMIT_DEFERRED,
-	OID_802_3_XMIT_MAX_COLLISIONS,
-	OID_802_3_RCV_OVERRUN,
-	OID_802_3_XMIT_UNDERRUN,
-	OID_802_3_XMIT_HEARTBEAT_FAILURE,
-	OID_802_3_XMIT_TIMES_CRS_LOST,
-	OID_802_3_XMIT_LATE_COLLISIONS,
+	RNDIS_OID_802_3_XMIT_DEFERRED,
+	RNDIS_OID_802_3_XMIT_MAX_COLLISIONS,
+	RNDIS_OID_802_3_RCV_OVERRUN,
+	RNDIS_OID_802_3_XMIT_UNDERRUN,
+	RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE,
+	RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST,
+	RNDIS_OID_802_3_XMIT_LATE_COLLISIONS,
 #endif	/* RNDIS_OPTIONAL_STATS */
 
 #ifdef	RNDIS_PM
@@ -200,8 +200,8 @@
 	/* general oids (table 4-1) */
 
 	/* mandatory */
-	case OID_GEN_SUPPORTED_LIST:
-		pr_debug("%s: OID_GEN_SUPPORTED_LIST\n", __func__);
+	case RNDIS_OID_GEN_SUPPORTED_LIST:
+		pr_debug("%s: RNDIS_OID_GEN_SUPPORTED_LIST\n", __func__);
 		length = sizeof(oid_supported_list);
 		count  = length / sizeof(u32);
 		for (i = 0; i < count; i++)
@@ -210,8 +210,8 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_HARDWARE_STATUS:
-		pr_debug("%s: OID_GEN_HARDWARE_STATUS\n", __func__);
+	case RNDIS_OID_GEN_HARDWARE_STATUS:
+		pr_debug("%s: RNDIS_OID_GEN_HARDWARE_STATUS\n", __func__);
 		/* Bogus question!
 		 * Hardware must be ready to receive high level protocols.
 		 * BTW:
@@ -223,23 +223,23 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_MEDIA_SUPPORTED:
-		pr_debug("%s: OID_GEN_MEDIA_SUPPORTED\n", __func__);
+	case RNDIS_OID_GEN_MEDIA_SUPPORTED:
+		pr_debug("%s: RNDIS_OID_GEN_MEDIA_SUPPORTED\n", __func__);
 		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
 		retval = 0;
 		break;
 
 	/* mandatory */
-	case OID_GEN_MEDIA_IN_USE:
-		pr_debug("%s: OID_GEN_MEDIA_IN_USE\n", __func__);
+	case RNDIS_OID_GEN_MEDIA_IN_USE:
+		pr_debug("%s: RNDIS_OID_GEN_MEDIA_IN_USE\n", __func__);
 		/* one medium, one transport... (maybe you do it better) */
 		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
 		retval = 0;
 		break;
 
 	/* mandatory */
-	case OID_GEN_MAXIMUM_FRAME_SIZE:
-		pr_debug("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__);
+	case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE:
+		pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__);
 		if (rndis_per_dev_params[configNr].dev) {
 			*outbuf = cpu_to_le32(
 				rndis_per_dev_params[configNr].dev->mtu);
@@ -248,11 +248,11 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_LINK_SPEED:
+	case RNDIS_OID_GEN_LINK_SPEED:
 		if (rndis_debug > 1)
-			pr_debug("%s: OID_GEN_LINK_SPEED\n", __func__);
+			pr_debug("%s: RNDIS_OID_GEN_LINK_SPEED\n", __func__);
 		if (rndis_per_dev_params[configNr].media_state
-				== NDIS_MEDIA_STATE_DISCONNECTED)
+				== RNDIS_MEDIA_STATE_DISCONNECTED)
 			*outbuf = cpu_to_le32(0);
 		else
 			*outbuf = cpu_to_le32(
@@ -261,8 +261,8 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_TRANSMIT_BLOCK_SIZE:
-		pr_debug("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__);
+	case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE:
+		pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__);
 		if (rndis_per_dev_params[configNr].dev) {
 			*outbuf = cpu_to_le32(
 				rndis_per_dev_params[configNr].dev->mtu);
@@ -271,8 +271,8 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_RECEIVE_BLOCK_SIZE:
-		pr_debug("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__);
+	case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE:
+		pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__);
 		if (rndis_per_dev_params[configNr].dev) {
 			*outbuf = cpu_to_le32(
 				rndis_per_dev_params[configNr].dev->mtu);
@@ -281,16 +281,16 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_VENDOR_ID:
-		pr_debug("%s: OID_GEN_VENDOR_ID\n", __func__);
+	case RNDIS_OID_GEN_VENDOR_ID:
+		pr_debug("%s: RNDIS_OID_GEN_VENDOR_ID\n", __func__);
 		*outbuf = cpu_to_le32(
 			rndis_per_dev_params[configNr].vendorID);
 		retval = 0;
 		break;
 
 	/* mandatory */
-	case OID_GEN_VENDOR_DESCRIPTION:
-		pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
+	case RNDIS_OID_GEN_VENDOR_DESCRIPTION:
+		pr_debug("%s: RNDIS_OID_GEN_VENDOR_DESCRIPTION\n", __func__);
 		if (rndis_per_dev_params[configNr].vendorDescr) {
 			length = strlen(rndis_per_dev_params[configNr].
 					vendorDescr);
@@ -303,38 +303,38 @@
 		retval = 0;
 		break;
 
-	case OID_GEN_VENDOR_DRIVER_VERSION:
-		pr_debug("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __func__);
+	case RNDIS_OID_GEN_VENDOR_DRIVER_VERSION:
+		pr_debug("%s: RNDIS_OID_GEN_VENDOR_DRIVER_VERSION\n", __func__);
 		/* Created as LE */
 		*outbuf = rndis_driver_version;
 		retval = 0;
 		break;
 
 	/* mandatory */
-	case OID_GEN_CURRENT_PACKET_FILTER:
-		pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __func__);
+	case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
+		pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER\n", __func__);
 		*outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter);
 		retval = 0;
 		break;
 
 	/* mandatory */
-	case OID_GEN_MAXIMUM_TOTAL_SIZE:
-		pr_debug("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__);
+	case RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE:
+		pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__);
 		*outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE);
 		retval = 0;
 		break;
 
 	/* mandatory */
-	case OID_GEN_MEDIA_CONNECT_STATUS:
+	case RNDIS_OID_GEN_MEDIA_CONNECT_STATUS:
 		if (rndis_debug > 1)
-			pr_debug("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __func__);
+			pr_debug("%s: RNDIS_OID_GEN_MEDIA_CONNECT_STATUS\n", __func__);
 		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr]
 						.media_state);
 		retval = 0;
 		break;
 
-	case OID_GEN_PHYSICAL_MEDIUM:
-		pr_debug("%s: OID_GEN_PHYSICAL_MEDIUM\n", __func__);
+	case RNDIS_OID_GEN_PHYSICAL_MEDIUM:
+		pr_debug("%s: RNDIS_OID_GEN_PHYSICAL_MEDIUM\n", __func__);
 		*outbuf = cpu_to_le32(0);
 		retval = 0;
 		break;
@@ -343,20 +343,20 @@
 	 * of MS-Windows expect OIDs that aren't specified there.  Other
 	 * versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
 	 */
-	case OID_GEN_MAC_OPTIONS:		/* from WinME */
-		pr_debug("%s: OID_GEN_MAC_OPTIONS\n", __func__);
+	case RNDIS_OID_GEN_MAC_OPTIONS:		/* from WinME */
+		pr_debug("%s: RNDIS_OID_GEN_MAC_OPTIONS\n", __func__);
 		*outbuf = cpu_to_le32(
-			  NDIS_MAC_OPTION_RECEIVE_SERIALIZED
-			| NDIS_MAC_OPTION_FULL_DUPLEX);
+			  RNDIS_MAC_OPTION_RECEIVE_SERIALIZED
+			| RNDIS_MAC_OPTION_FULL_DUPLEX);
 		retval = 0;
 		break;
 
 	/* statistics OIDs (table 4-2) */
 
 	/* mandatory */
-	case OID_GEN_XMIT_OK:
+	case RNDIS_OID_GEN_XMIT_OK:
 		if (rndis_debug > 1)
-			pr_debug("%s: OID_GEN_XMIT_OK\n", __func__);
+			pr_debug("%s: RNDIS_OID_GEN_XMIT_OK\n", __func__);
 		if (stats) {
 			*outbuf = cpu_to_le32(stats->tx_packets
 				- stats->tx_errors - stats->tx_dropped);
@@ -365,9 +365,9 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_RCV_OK:
+	case RNDIS_OID_GEN_RCV_OK:
 		if (rndis_debug > 1)
-			pr_debug("%s: OID_GEN_RCV_OK\n", __func__);
+			pr_debug("%s: RNDIS_OID_GEN_RCV_OK\n", __func__);
 		if (stats) {
 			*outbuf = cpu_to_le32(stats->rx_packets
 				- stats->rx_errors - stats->rx_dropped);
@@ -376,9 +376,9 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_XMIT_ERROR:
+	case RNDIS_OID_GEN_XMIT_ERROR:
 		if (rndis_debug > 1)
-			pr_debug("%s: OID_GEN_XMIT_ERROR\n", __func__);
+			pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__);
 		if (stats) {
 			*outbuf = cpu_to_le32(stats->tx_errors);
 			retval = 0;
@@ -386,9 +386,9 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_RCV_ERROR:
+	case RNDIS_OID_GEN_RCV_ERROR:
 		if (rndis_debug > 1)
-			pr_debug("%s: OID_GEN_RCV_ERROR\n", __func__);
+			pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__);
 		if (stats) {
 			*outbuf = cpu_to_le32(stats->rx_errors);
 			retval = 0;
@@ -396,8 +396,8 @@
 		break;
 
 	/* mandatory */
-	case OID_GEN_RCV_NO_BUFFER:
-		pr_debug("%s: OID_GEN_RCV_NO_BUFFER\n", __func__);
+	case RNDIS_OID_GEN_RCV_NO_BUFFER:
+		pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__);
 		if (stats) {
 			*outbuf = cpu_to_le32(stats->rx_dropped);
 			retval = 0;
@@ -407,8 +407,8 @@
 	/* ieee802.3 OIDs (table 4-3) */
 
 	/* mandatory */
-	case OID_802_3_PERMANENT_ADDRESS:
-		pr_debug("%s: OID_802_3_PERMANENT_ADDRESS\n", __func__);
+	case RNDIS_OID_802_3_PERMANENT_ADDRESS:
+		pr_debug("%s: RNDIS_OID_802_3_PERMANENT_ADDRESS\n", __func__);
 		if (rndis_per_dev_params[configNr].dev) {
 			length = ETH_ALEN;
 			memcpy(outbuf,
@@ -419,8 +419,8 @@
 		break;
 
 	/* mandatory */
-	case OID_802_3_CURRENT_ADDRESS:
-		pr_debug("%s: OID_802_3_CURRENT_ADDRESS\n", __func__);
+	case RNDIS_OID_802_3_CURRENT_ADDRESS:
+		pr_debug("%s: RNDIS_OID_802_3_CURRENT_ADDRESS\n", __func__);
 		if (rndis_per_dev_params[configNr].dev) {
 			length = ETH_ALEN;
 			memcpy(outbuf,
@@ -431,23 +431,23 @@
 		break;
 
 	/* mandatory */
-	case OID_802_3_MULTICAST_LIST:
-		pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__);
+	case RNDIS_OID_802_3_MULTICAST_LIST:
+		pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__);
 		/* Multicast base address only */
 		*outbuf = cpu_to_le32(0xE0000000);
 		retval = 0;
 		break;
 
 	/* mandatory */
-	case OID_802_3_MAXIMUM_LIST_SIZE:
-		pr_debug("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
+	case RNDIS_OID_802_3_MAXIMUM_LIST_SIZE:
+		pr_debug("%s: RNDIS_OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
 		/* Multicast base address only */
 		*outbuf = cpu_to_le32(1);
 		retval = 0;
 		break;
 
-	case OID_802_3_MAC_OPTIONS:
-		pr_debug("%s: OID_802_3_MAC_OPTIONS\n", __func__);
+	case RNDIS_OID_802_3_MAC_OPTIONS:
+		pr_debug("%s: RNDIS_OID_802_3_MAC_OPTIONS\n", __func__);
 		*outbuf = cpu_to_le32(0);
 		retval = 0;
 		break;
@@ -455,8 +455,8 @@
 	/* ieee802.3 statistics OIDs (table 4-4) */
 
 	/* mandatory */
-	case OID_802_3_RCV_ERROR_ALIGNMENT:
-		pr_debug("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__);
+	case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT:
+		pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__);
 		if (stats) {
 			*outbuf = cpu_to_le32(stats->rx_frame_errors);
 			retval = 0;
@@ -464,15 +464,15 @@
 		break;
 
 	/* mandatory */
-	case OID_802_3_XMIT_ONE_COLLISION:
-		pr_debug("%s: OID_802_3_XMIT_ONE_COLLISION\n", __func__);
+	case RNDIS_OID_802_3_XMIT_ONE_COLLISION:
+		pr_debug("%s: RNDIS_OID_802_3_XMIT_ONE_COLLISION\n", __func__);
 		*outbuf = cpu_to_le32(0);
 		retval = 0;
 		break;
 
 	/* mandatory */
-	case OID_802_3_XMIT_MORE_COLLISIONS:
-		pr_debug("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __func__);
+	case RNDIS_OID_802_3_XMIT_MORE_COLLISIONS:
+		pr_debug("%s: RNDIS_OID_802_3_XMIT_MORE_COLLISIONS\n", __func__);
 		*outbuf = cpu_to_le32(0);
 		retval = 0;
 		break;
@@ -516,7 +516,7 @@
 
 	params = &rndis_per_dev_params[configNr];
 	switch (OID) {
-	case OID_GEN_CURRENT_PACKET_FILTER:
+	case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
 
 		/* these NDIS_PACKET_TYPE_* bitflags are shared with
 		 * cdc_filter; it's not RNDIS-specific
@@ -525,7 +525,7 @@
 		 *	MULTICAST, ALL_MULTICAST, BROADCAST
 		 */
 		*params->filter = (u16)get_unaligned_le32(buf);
-		pr_debug("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
+		pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER %08x\n",
 			__func__, *params->filter);
 
 		/* this call has a significant side effect:  it's
@@ -545,9 +545,9 @@
 		}
 		break;
 
-	case OID_802_3_MULTICAST_LIST:
+	case RNDIS_OID_802_3_MULTICAST_LIST:
 		/* I think we can ignore this */
-		pr_debug("%s: OID_802_3_MULTICAST_LIST\n", __func__);
+		pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__);
 		retval = 0;
 		break;
 
@@ -577,7 +577,7 @@
 		return -ENOMEM;
 	resp = (rndis_init_cmplt_type *)r->buf;
 
-	resp->MessageType = cpu_to_le32(REMOTE_NDIS_INITIALIZE_CMPLT);
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_INIT_C);
 	resp->MessageLength = cpu_to_le32(52);
 	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
 	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
@@ -621,7 +621,7 @@
 		return -ENOMEM;
 	resp = (rndis_query_cmplt_type *)r->buf;
 
-	resp->MessageType = cpu_to_le32(REMOTE_NDIS_QUERY_CMPLT);
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C);
 	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
 
 	if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID),
@@ -668,7 +668,7 @@
 	pr_debug("\n");
 #endif
 
-	resp->MessageType = cpu_to_le32(REMOTE_NDIS_SET_CMPLT);
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C);
 	resp->MessageLength = cpu_to_le32(16);
 	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
 	if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID),
@@ -692,7 +692,7 @@
 		return -ENOMEM;
 	resp = (rndis_reset_cmplt_type *)r->buf;
 
-	resp->MessageType = cpu_to_le32(REMOTE_NDIS_RESET_CMPLT);
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_RESET_C);
 	resp->MessageLength = cpu_to_le32(16);
 	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
 	/* resent information */
@@ -716,8 +716,7 @@
 		return -ENOMEM;
 	resp = (rndis_keepalive_cmplt_type *)r->buf;
 
-	resp->MessageType = cpu_to_le32(
-			REMOTE_NDIS_KEEPALIVE_CMPLT);
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C);
 	resp->MessageLength = cpu_to_le32(16);
 	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
 	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
@@ -745,7 +744,7 @@
 		return -ENOMEM;
 	resp = (rndis_indicate_status_msg_type *)r->buf;
 
-	resp->MessageType = cpu_to_le32(REMOTE_NDIS_INDICATE_STATUS_MSG);
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_INDICATE);
 	resp->MessageLength = cpu_to_le32(20);
 	resp->Status = cpu_to_le32(status);
 	resp->StatusBufferLength = cpu_to_le32(0);
@@ -758,7 +757,7 @@
 int rndis_signal_connect(int configNr)
 {
 	rndis_per_dev_params[configNr].media_state
-			= NDIS_MEDIA_STATE_CONNECTED;
+			= RNDIS_MEDIA_STATE_CONNECTED;
 	return rndis_indicate_status_msg(configNr,
 					  RNDIS_STATUS_MEDIA_CONNECT);
 }
@@ -766,7 +765,7 @@
 int rndis_signal_disconnect(int configNr)
 {
 	rndis_per_dev_params[configNr].media_state
-			= NDIS_MEDIA_STATE_DISCONNECTED;
+			= RNDIS_MEDIA_STATE_DISCONNECTED;
 	return rndis_indicate_status_msg(configNr,
 					  RNDIS_STATUS_MEDIA_DISCONNECT);
 }
@@ -817,15 +816,15 @@
 
 	/* For USB: responses may take up to 10 seconds */
 	switch (MsgType) {
-	case REMOTE_NDIS_INITIALIZE_MSG:
-		pr_debug("%s: REMOTE_NDIS_INITIALIZE_MSG\n",
+	case RNDIS_MSG_INIT:
+		pr_debug("%s: RNDIS_MSG_INIT\n",
 			__func__);
 		params->state = RNDIS_INITIALIZED;
 		return rndis_init_response(configNr,
 					(rndis_init_msg_type *)buf);
 
-	case REMOTE_NDIS_HALT_MSG:
-		pr_debug("%s: REMOTE_NDIS_HALT_MSG\n",
+	case RNDIS_MSG_HALT:
+		pr_debug("%s: RNDIS_MSG_HALT\n",
 			__func__);
 		params->state = RNDIS_UNINITIALIZED;
 		if (params->dev) {
@@ -834,24 +833,24 @@
 		}
 		return 0;
 
-	case REMOTE_NDIS_QUERY_MSG:
+	case RNDIS_MSG_QUERY:
 		return rndis_query_response(configNr,
 					(rndis_query_msg_type *)buf);
 
-	case REMOTE_NDIS_SET_MSG:
+	case RNDIS_MSG_SET:
 		return rndis_set_response(configNr,
 					(rndis_set_msg_type *)buf);
 
-	case REMOTE_NDIS_RESET_MSG:
-		pr_debug("%s: REMOTE_NDIS_RESET_MSG\n",
+	case RNDIS_MSG_RESET:
+		pr_debug("%s: RNDIS_MSG_RESET\n",
 			__func__);
 		return rndis_reset_response(configNr,
 					(rndis_reset_msg_type *)buf);
 
-	case REMOTE_NDIS_KEEPALIVE_MSG:
+	case RNDIS_MSG_KEEPALIVE:
 		/* For USB: host does this every 5 seconds */
 		if (rndis_debug > 1)
-			pr_debug("%s: REMOTE_NDIS_KEEPALIVE_MSG\n",
+			pr_debug("%s: RNDIS_MSG_KEEPALIVE\n",
 				__func__);
 		return rndis_keepalive_response(configNr,
 						 (rndis_keepalive_msg_type *)
@@ -963,7 +962,7 @@
 		return;
 	header = (void *)skb_push(skb, sizeof(*header));
 	memset(header, 0, sizeof *header);
-	header->MessageType = cpu_to_le32(REMOTE_NDIS_PACKET_MSG);
+	header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
 	header->MessageLength = cpu_to_le32(skb->len);
 	header->DataOffset = cpu_to_le32(36);
 	header->DataLength = cpu_to_le32(skb->len - sizeof(*header));
@@ -1031,7 +1030,7 @@
 	__le32 *tmp = (void *)skb->data;
 
 	/* MessageType, MessageLength */
-	if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG)
+	if (cpu_to_le32(RNDIS_MSG_PACKET)
 			!= get_unaligned(tmp++)) {
 		dev_kfree_skb_any(skb);
 		return -EINVAL;
@@ -1173,7 +1172,7 @@
 		rndis_per_dev_params[i].used = 0;
 		rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED;
 		rndis_per_dev_params[i].media_state
-				= NDIS_MEDIA_STATE_DISCONNECTED;
+				= RNDIS_MEDIA_STATE_DISCONNECTED;
 		INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
 	}
 
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index 907c330..0647f2f 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -15,58 +15,12 @@
 #ifndef _LINUX_RNDIS_H
 #define _LINUX_RNDIS_H
 
+#include <linux/rndis.h>
 #include "ndis.h"
 
 #define RNDIS_MAXIMUM_FRAME_SIZE	1518
 #define RNDIS_MAX_TOTAL_SIZE		1558
 
-/* Remote NDIS Versions */
-#define RNDIS_MAJOR_VERSION		1
-#define RNDIS_MINOR_VERSION		0
-
-/* Status Values */
-#define RNDIS_STATUS_SUCCESS		0x00000000U	/* Success           */
-#define RNDIS_STATUS_FAILURE		0xC0000001U	/* Unspecified error */
-#define RNDIS_STATUS_INVALID_DATA	0xC0010015U	/* Invalid data      */
-#define RNDIS_STATUS_NOT_SUPPORTED	0xC00000BBU	/* Unsupported request */
-#define RNDIS_STATUS_MEDIA_CONNECT	0x4001000BU	/* Device connected  */
-#define RNDIS_STATUS_MEDIA_DISCONNECT	0x4001000CU	/* Device disconnected */
-/* For all not specified status messages:
- * RNDIS_STATUS_Xxx -> NDIS_STATUS_Xxx
- */
-
-/* Message Set for Connectionless (802.3) Devices */
-#define REMOTE_NDIS_PACKET_MSG		0x00000001U
-#define REMOTE_NDIS_INITIALIZE_MSG	0x00000002U	/* Initialize device */
-#define REMOTE_NDIS_HALT_MSG		0x00000003U
-#define REMOTE_NDIS_QUERY_MSG		0x00000004U
-#define REMOTE_NDIS_SET_MSG		0x00000005U
-#define REMOTE_NDIS_RESET_MSG		0x00000006U
-#define REMOTE_NDIS_INDICATE_STATUS_MSG	0x00000007U
-#define REMOTE_NDIS_KEEPALIVE_MSG	0x00000008U
-
-/* Message completion */
-#define REMOTE_NDIS_INITIALIZE_CMPLT	0x80000002U
-#define REMOTE_NDIS_QUERY_CMPLT		0x80000004U
-#define REMOTE_NDIS_SET_CMPLT		0x80000005U
-#define REMOTE_NDIS_RESET_CMPLT		0x80000006U
-#define REMOTE_NDIS_KEEPALIVE_CMPLT	0x80000008U
-
-/* Device Flags */
-#define RNDIS_DF_CONNECTIONLESS		0x00000001U
-#define RNDIS_DF_CONNECTION_ORIENTED	0x00000002U
-
-#define RNDIS_MEDIUM_802_3		0x00000000U
-
-/* from drivers/net/sk98lin/h/skgepnmi.h */
-#define OID_PNP_CAPABILITIES			0xFD010100
-#define OID_PNP_SET_POWER			0xFD010101
-#define OID_PNP_QUERY_POWER			0xFD010102
-#define OID_PNP_ADD_WAKE_UP_PATTERN		0xFD010103
-#define OID_PNP_REMOVE_WAKE_UP_PATTERN		0xFD010104
-#define OID_PNP_ENABLE_WAKE_UP			0xFD010106
-
-
 typedef struct rndis_init_msg_type
 {
 	__le32	MessageType;
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
new file mode 100644
index 0000000..c46439c
--- /dev/null
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -0,0 +1,2480 @@
+/* Target based USB-Gadget
+ *
+ * UAS protocol handling, target callbacks, configfs handling,
+ * BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
+ *
+ * Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
+ * License: GPLv2 as published by FSF.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/storage.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <asm/unaligned.h>
+
+#include "usbstring.c"
+#include "epautoconf.c"
+#include "config.c"
+#include "composite.c"
+
+#include "tcm_usb_gadget.h"
+
+static struct target_fabric_configfs *usbg_fabric_configfs;
+
+static inline struct f_uas *to_f_uas(struct usb_function *f)
+{
+	return container_of(f, struct f_uas, function);
+}
+
+static void usbg_cmd_release(struct kref *);
+
+static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
+{
+	kref_put(&cmd->ref, usbg_cmd_release);
+}
+
+/* Start bot.c code */
+
+static int bot_enqueue_cmd_cbw(struct f_uas *fu)
+{
+	int ret;
+
+	if (fu->flags & USBG_BOT_CMD_PEND)
+		return 0;
+
+	ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
+	if (!ret)
+		fu->flags |= USBG_BOT_CMD_PEND;
+	return ret;
+}
+
+static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+	struct f_uas *fu = cmd->fu;
+
+	usbg_cleanup_cmd(cmd);
+	if (req->status < 0) {
+		pr_err("ERR %s(%d)\n", __func__, __LINE__);
+		return;
+	}
+
+	/* CSW completed, wait for next CBW */
+	bot_enqueue_cmd_cbw(fu);
+}
+
+static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
+{
+	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+	int ret;
+	u8 *sense;
+	unsigned int csw_stat;
+
+	csw_stat = cmd->csw_code;
+
+	/*
+	 * We can't send SENSE as a response. So we take ASC & ASCQ from our
+	 * sense buffer and queue it and hope the host sends a REQUEST_SENSE
+	 * command where it learns why we failed.
+	 */
+	sense = cmd->sense_iu.sense;
+
+	csw->Tag = cmd->bot_tag;
+	csw->Status = csw_stat;
+	fu->bot_status.req->context = cmd;
+	ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
+	if (ret)
+		pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
+}
+
+static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+	struct f_uas *fu = cmd->fu;
+
+	if (req->status < 0)
+		pr_err("ERR %s(%d)\n", __func__, __LINE__);
+
+	if (cmd->data_len) {
+		if (cmd->data_len > ep->maxpacket) {
+			req->length = ep->maxpacket;
+			cmd->data_len -= ep->maxpacket;
+		} else {
+			req->length = cmd->data_len;
+			cmd->data_len = 0;
+		}
+
+		usb_ep_queue(ep, req, GFP_ATOMIC);
+		return ;
+	}
+	bot_enqueue_sense_code(fu, cmd);
+}
+
+static void bot_send_bad_status(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+	struct usb_request *req;
+	struct usb_ep *ep;
+
+	csw->Residue = cpu_to_le32(cmd->data_len);
+
+	if (cmd->data_len) {
+		if (cmd->is_read) {
+			ep = fu->ep_in;
+			req = fu->bot_req_in;
+		} else {
+			ep = fu->ep_out;
+			req = fu->bot_req_out;
+		}
+
+		if (cmd->data_len > fu->ep_in->maxpacket) {
+			req->length = ep->maxpacket;
+			cmd->data_len -= ep->maxpacket;
+		} else {
+			req->length = cmd->data_len;
+			cmd->data_len = 0;
+		}
+		req->complete = bot_err_compl;
+		req->context = cmd;
+		req->buf = fu->cmd.buf;
+		usb_ep_queue(ep, req, GFP_KERNEL);
+	} else {
+		bot_enqueue_sense_code(fu, cmd);
+	}
+}
+
+static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
+{
+	struct f_uas *fu = cmd->fu;
+	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+	int ret;
+
+	if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
+		if (!moved_data && cmd->data_len) {
+			/*
+			 * the host wants to move data, we don't. Fill / empty
+			 * the pipe and then send the csw with reside set.
+			 */
+			cmd->csw_code = US_BULK_STAT_OK;
+			bot_send_bad_status(cmd);
+			return 0;
+		}
+
+		csw->Tag = cmd->bot_tag;
+		csw->Residue = cpu_to_le32(0);
+		csw->Status = US_BULK_STAT_OK;
+		fu->bot_status.req->context = cmd;
+
+		ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
+		if (ret)
+			pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
+	} else {
+		cmd->csw_code = US_BULK_STAT_FAIL;
+		bot_send_bad_status(cmd);
+	}
+	return 0;
+}
+
+/*
+ * Called after command (no data transfer) or after the write (to device)
+ * operation is completed
+ */
+static int bot_send_status_response(struct usbg_cmd *cmd)
+{
+	bool moved_data = false;
+
+	if (!cmd->is_read)
+		moved_data = true;
+	return bot_send_status(cmd, moved_data);
+}
+
+/* Read request completed, now we have to send the CSW */
+static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+
+	if (req->status < 0)
+		pr_err("ERR %s(%d)\n", __func__, __LINE__);
+
+	bot_send_status(cmd, true);
+}
+
+static int bot_send_read_response(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct usb_gadget *gadget = fuas_to_gadget(fu);
+	int ret;
+
+	if (!cmd->data_len) {
+		cmd->csw_code = US_BULK_STAT_PHASE;
+		bot_send_bad_status(cmd);
+		return 0;
+	}
+
+	if (!gadget->sg_supported) {
+		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+		if (!cmd->data_buf)
+			return -ENOMEM;
+
+		sg_copy_to_buffer(se_cmd->t_data_sg,
+				se_cmd->t_data_nents,
+				cmd->data_buf,
+				se_cmd->data_length);
+
+		fu->bot_req_in->buf = cmd->data_buf;
+	} else {
+		fu->bot_req_in->buf = NULL;
+		fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
+		fu->bot_req_in->sg = se_cmd->t_data_sg;
+	}
+
+	fu->bot_req_in->complete = bot_read_compl;
+	fu->bot_req_in->length = se_cmd->data_length;
+	fu->bot_req_in->context = cmd;
+	ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
+	if (ret)
+		pr_err("%s(%d)\n", __func__, __LINE__);
+	return 0;
+}
+
+static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
+static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
+
+static int bot_send_write_request(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct usb_gadget *gadget = fuas_to_gadget(fu);
+	int ret;
+
+	init_completion(&cmd->write_complete);
+	cmd->fu = fu;
+
+	if (!cmd->data_len) {
+		cmd->csw_code = US_BULK_STAT_PHASE;
+		return -EINVAL;
+	}
+
+	if (!gadget->sg_supported) {
+		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
+		if (!cmd->data_buf)
+			return -ENOMEM;
+
+		fu->bot_req_out->buf = cmd->data_buf;
+	} else {
+		fu->bot_req_out->buf = NULL;
+		fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
+		fu->bot_req_out->sg = se_cmd->t_data_sg;
+	}
+
+	fu->bot_req_out->complete = usbg_data_write_cmpl;
+	fu->bot_req_out->length = se_cmd->data_length;
+	fu->bot_req_out->context = cmd;
+
+	ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
+	if (ret)
+		goto cleanup;
+	ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
+	if (ret)
+		pr_err("%s(%d)\n", __func__, __LINE__);
+
+	wait_for_completion(&cmd->write_complete);
+	transport_generic_process_write(se_cmd);
+cleanup:
+	return ret;
+}
+
+static int bot_submit_command(struct f_uas *, void *, unsigned int);
+
+static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_uas *fu = req->context;
+	int ret;
+
+	fu->flags &= ~USBG_BOT_CMD_PEND;
+
+	if (req->status < 0)
+		return;
+
+	ret = bot_submit_command(fu, req->buf, req->actual);
+	if (ret)
+		pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
+}
+
+static int bot_prepare_reqs(struct f_uas *fu)
+{
+	int ret;
+
+	fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+	if (!fu->bot_req_in)
+		goto err;
+
+	fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+	if (!fu->bot_req_out)
+		goto err_out;
+
+	fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+	if (!fu->cmd.req)
+		goto err_cmd;
+
+	fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+	if (!fu->bot_status.req)
+		goto err_sts;
+
+	fu->bot_status.req->buf = &fu->bot_status.csw;
+	fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
+	fu->bot_status.req->complete = bot_status_complete;
+	fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
+
+	fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
+	if (!fu->cmd.buf)
+		goto err_buf;
+
+	fu->cmd.req->complete = bot_cmd_complete;
+	fu->cmd.req->buf = fu->cmd.buf;
+	fu->cmd.req->length = fu->ep_out->maxpacket;
+	fu->cmd.req->context = fu;
+
+	ret = bot_enqueue_cmd_cbw(fu);
+	if (ret)
+		goto err_queue;
+	return 0;
+err_queue:
+	kfree(fu->cmd.buf);
+	fu->cmd.buf = NULL;
+err_buf:
+	usb_ep_free_request(fu->ep_in, fu->bot_status.req);
+err_sts:
+	usb_ep_free_request(fu->ep_out, fu->cmd.req);
+	fu->cmd.req = NULL;
+err_cmd:
+	usb_ep_free_request(fu->ep_out, fu->bot_req_out);
+	fu->bot_req_out = NULL;
+err_out:
+	usb_ep_free_request(fu->ep_in, fu->bot_req_in);
+	fu->bot_req_in = NULL;
+err:
+	pr_err("BOT: endpoint setup failed\n");
+	return -ENOMEM;
+}
+
+void bot_cleanup_old_alt(struct f_uas *fu)
+{
+	if (!(fu->flags & USBG_ENABLED))
+		return;
+
+	usb_ep_disable(fu->ep_in);
+	usb_ep_disable(fu->ep_out);
+
+	if (!fu->bot_req_in)
+		return;
+
+	usb_ep_free_request(fu->ep_in, fu->bot_req_in);
+	usb_ep_free_request(fu->ep_out, fu->bot_req_out);
+	usb_ep_free_request(fu->ep_out, fu->cmd.req);
+	usb_ep_free_request(fu->ep_out, fu->bot_status.req);
+
+	kfree(fu->cmd.buf);
+
+	fu->bot_req_in = NULL;
+	fu->bot_req_out = NULL;
+	fu->cmd.req = NULL;
+	fu->bot_status.req = NULL;
+	fu->cmd.buf = NULL;
+}
+
+static void bot_set_alt(struct f_uas *fu)
+{
+	struct usb_function *f = &fu->function;
+	struct usb_gadget *gadget = f->config->cdev->gadget;
+	int ret;
+
+	fu->flags = USBG_IS_BOT;
+
+	config_ep_by_speed(gadget, f, fu->ep_in);
+	ret = usb_ep_enable(fu->ep_in);
+	if (ret)
+		goto err_b_in;
+
+	config_ep_by_speed(gadget, f, fu->ep_out);
+	ret = usb_ep_enable(fu->ep_out);
+	if (ret)
+		goto err_b_out;
+
+	ret = bot_prepare_reqs(fu);
+	if (ret)
+		goto err_wq;
+	fu->flags |= USBG_ENABLED;
+	pr_info("Using the BOT protocol\n");
+	return;
+err_wq:
+	usb_ep_disable(fu->ep_out);
+err_b_out:
+	usb_ep_disable(fu->ep_in);
+err_b_in:
+	fu->flags = USBG_IS_BOT;
+}
+
+static int usbg_bot_setup(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_uas *fu = to_f_uas(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+	int luns;
+	u8 *ret_lun;
+
+	switch (ctrl->bRequest) {
+	case US_BULK_GET_MAX_LUN:
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
+					USB_RECIP_INTERFACE))
+			return -ENOTSUPP;
+
+		if (w_length < 1)
+			return -EINVAL;
+		if (w_value != 0)
+			return -EINVAL;
+		luns = atomic_read(&fu->tpg->tpg_port_count);
+		if (!luns) {
+			pr_err("No LUNs configured?\n");
+			return -EINVAL;
+		}
+		/*
+		 * If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
+		 * accessed. The upper limit is 0xf
+		 */
+		luns--;
+		if (luns > 0xf) {
+			pr_info_once("Limiting the number of luns to 16\n");
+			luns = 0xf;
+		}
+		ret_lun = cdev->req->buf;
+		*ret_lun = luns;
+		cdev->req->length = 1;
+		return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		break;
+
+	case US_BULK_RESET_REQUEST:
+		/* XXX maybe we should remove previous requests for IN + OUT */
+		bot_enqueue_cmd_cbw(fu);
+		return 0;
+		break;
+	};
+	return -ENOTSUPP;
+}
+
+/* Start uas.c code */
+
+static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
+{
+	/* We have either all three allocated or none */
+	if (!stream->req_in)
+		return;
+
+	usb_ep_free_request(fu->ep_in, stream->req_in);
+	usb_ep_free_request(fu->ep_out, stream->req_out);
+	usb_ep_free_request(fu->ep_status, stream->req_status);
+
+	stream->req_in = NULL;
+	stream->req_out = NULL;
+	stream->req_status = NULL;
+}
+
+static void uasp_free_cmdreq(struct f_uas *fu)
+{
+	usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
+	kfree(fu->cmd.buf);
+	fu->cmd.req = NULL;
+	fu->cmd.buf = NULL;
+}
+
+static void uasp_cleanup_old_alt(struct f_uas *fu)
+{
+	int i;
+
+	if (!(fu->flags & USBG_ENABLED))
+		return;
+
+	usb_ep_disable(fu->ep_in);
+	usb_ep_disable(fu->ep_out);
+	usb_ep_disable(fu->ep_status);
+	usb_ep_disable(fu->ep_cmd);
+
+	for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
+		uasp_cleanup_one_stream(fu, &fu->stream[i]);
+	uasp_free_cmdreq(fu);
+}
+
+static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
+
+static int uasp_prepare_r_request(struct usbg_cmd *cmd)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct f_uas *fu = cmd->fu;
+	struct usb_gadget *gadget = fuas_to_gadget(fu);
+	struct uas_stream *stream = cmd->stream;
+
+	if (!gadget->sg_supported) {
+		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+		if (!cmd->data_buf)
+			return -ENOMEM;
+
+		sg_copy_to_buffer(se_cmd->t_data_sg,
+				se_cmd->t_data_nents,
+				cmd->data_buf,
+				se_cmd->data_length);
+
+		stream->req_in->buf = cmd->data_buf;
+	} else {
+		stream->req_in->buf = NULL;
+		stream->req_in->num_sgs = se_cmd->t_data_nents;
+		stream->req_in->sg = se_cmd->t_data_sg;
+	}
+
+	stream->req_in->complete = uasp_status_data_cmpl;
+	stream->req_in->length = se_cmd->data_length;
+	stream->req_in->context = cmd;
+
+	cmd->state = UASP_SEND_STATUS;
+	return 0;
+}
+
+static void uasp_prepare_status(struct usbg_cmd *cmd)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct sense_iu *iu = &cmd->sense_iu;
+	struct uas_stream *stream = cmd->stream;
+
+	cmd->state = UASP_QUEUE_COMMAND;
+	iu->iu_id = IU_ID_STATUS;
+	iu->tag = cpu_to_be16(cmd->tag);
+
+	/*
+	 * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
+	 */
+	iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
+	iu->status = se_cmd->scsi_status;
+	stream->req_status->context = cmd;
+	stream->req_status->length = se_cmd->scsi_sense_length + 16;
+	stream->req_status->buf = iu;
+	stream->req_status->complete = uasp_status_data_cmpl;
+}
+
+static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+	struct uas_stream *stream = cmd->stream;
+	struct f_uas *fu = cmd->fu;
+	int ret;
+
+	if (req->status < 0)
+		goto cleanup;
+
+	switch (cmd->state) {
+	case UASP_SEND_DATA:
+		ret = uasp_prepare_r_request(cmd);
+		if (ret)
+			goto cleanup;
+		ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+		break;
+
+	case UASP_RECEIVE_DATA:
+		ret = usbg_prepare_w_request(cmd, stream->req_out);
+		if (ret)
+			goto cleanup;
+		ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+		break;
+
+	case UASP_SEND_STATUS:
+		uasp_prepare_status(cmd);
+		ret = usb_ep_queue(fu->ep_status, stream->req_status,
+				GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+		break;
+
+	case UASP_QUEUE_COMMAND:
+		usbg_cleanup_cmd(cmd);
+		usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+		break;
+
+	default:
+		BUG();
+	};
+	return;
+
+cleanup:
+	usbg_cleanup_cmd(cmd);
+}
+
+static int uasp_send_status_response(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct uas_stream *stream = cmd->stream;
+	struct sense_iu *iu = &cmd->sense_iu;
+
+	iu->tag = cpu_to_be16(cmd->tag);
+	stream->req_status->complete = uasp_status_data_cmpl;
+	stream->req_status->context = cmd;
+	cmd->fu = fu;
+	uasp_prepare_status(cmd);
+	return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
+}
+
+static int uasp_send_read_response(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct uas_stream *stream = cmd->stream;
+	struct sense_iu *iu = &cmd->sense_iu;
+	int ret;
+
+	cmd->fu = fu;
+
+	iu->tag = cpu_to_be16(cmd->tag);
+	if (fu->flags & USBG_USE_STREAMS) {
+
+		ret = uasp_prepare_r_request(cmd);
+		if (ret)
+			goto out;
+		ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
+		if (ret) {
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+			kfree(cmd->data_buf);
+			cmd->data_buf = NULL;
+		}
+
+	} else {
+
+		iu->iu_id = IU_ID_READ_READY;
+		iu->tag = cpu_to_be16(cmd->tag);
+
+		stream->req_status->complete = uasp_status_data_cmpl;
+		stream->req_status->context = cmd;
+
+		cmd->state = UASP_SEND_DATA;
+		stream->req_status->buf = iu;
+		stream->req_status->length = sizeof(struct iu);
+
+		ret = usb_ep_queue(fu->ep_status, stream->req_status,
+				GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+	}
+out:
+	return ret;
+}
+
+static int uasp_send_write_request(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct uas_stream *stream = cmd->stream;
+	struct sense_iu *iu = &cmd->sense_iu;
+	int ret;
+
+	init_completion(&cmd->write_complete);
+	cmd->fu = fu;
+
+	iu->tag = cpu_to_be16(cmd->tag);
+
+	if (fu->flags & USBG_USE_STREAMS) {
+
+		ret = usbg_prepare_w_request(cmd, stream->req_out);
+		if (ret)
+			goto cleanup;
+		ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d)\n", __func__, __LINE__);
+
+	} else {
+
+		iu->iu_id = IU_ID_WRITE_READY;
+		iu->tag = cpu_to_be16(cmd->tag);
+
+		stream->req_status->complete = uasp_status_data_cmpl;
+		stream->req_status->context = cmd;
+
+		cmd->state = UASP_RECEIVE_DATA;
+		stream->req_status->buf = iu;
+		stream->req_status->length = sizeof(struct iu);
+
+		ret = usb_ep_queue(fu->ep_status, stream->req_status,
+				GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d)\n", __func__, __LINE__);
+	}
+
+	wait_for_completion(&cmd->write_complete);
+	transport_generic_process_write(se_cmd);
+cleanup:
+	return ret;
+}
+
+static int usbg_submit_command(struct f_uas *, void *, unsigned int);
+
+static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_uas *fu = req->context;
+	int ret;
+
+	if (req->status < 0)
+		return;
+
+	ret = usbg_submit_command(fu, req->buf, req->actual);
+	/*
+	 * Once we tune for performance enqueue the command req here again so
+	 * we can receive a second command while we processing this one. Pay
+	 * attention to properly sync STAUS endpoint with DATA IN + OUT so you
+	 * don't break HS.
+	 */
+	if (!ret)
+		return;
+	usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+}
+
+static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
+{
+	stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+	if (!stream->req_in)
+		goto out;
+
+	stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+	if (!stream->req_out)
+		goto err_out;
+
+	stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
+	if (!stream->req_status)
+		goto err_sts;
+
+	return 0;
+err_sts:
+	usb_ep_free_request(fu->ep_status, stream->req_status);
+	stream->req_status = NULL;
+err_out:
+	usb_ep_free_request(fu->ep_out, stream->req_out);
+	stream->req_out = NULL;
+out:
+	return -ENOMEM;
+}
+
+static int uasp_alloc_cmd(struct f_uas *fu)
+{
+	fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
+	if (!fu->cmd.req)
+		goto err;
+
+	fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
+	if (!fu->cmd.buf)
+		goto err_buf;
+
+	fu->cmd.req->complete = uasp_cmd_complete;
+	fu->cmd.req->buf = fu->cmd.buf;
+	fu->cmd.req->length = fu->ep_cmd->maxpacket;
+	fu->cmd.req->context = fu;
+	return 0;
+
+err_buf:
+	usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
+err:
+	return -ENOMEM;
+}
+
+static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
+{
+	int i;
+
+	for (i = 0; i < max_streams; i++) {
+		struct uas_stream *s = &fu->stream[i];
+
+		s->req_in->stream_id = i + 1;
+		s->req_out->stream_id = i + 1;
+		s->req_status->stream_id = i + 1;
+	}
+}
+
+static int uasp_prepare_reqs(struct f_uas *fu)
+{
+	int ret;
+	int i;
+	int max_streams;
+
+	if (fu->flags & USBG_USE_STREAMS)
+		max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
+	else
+		max_streams = 1;
+
+	for (i = 0; i < max_streams; i++) {
+		ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
+		if (ret)
+			goto err_cleanup;
+	}
+
+	ret = uasp_alloc_cmd(fu);
+	if (ret)
+		goto err_free_stream;
+	uasp_setup_stream_res(fu, max_streams);
+
+	ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+	if (ret)
+		goto err_free_stream;
+
+	return 0;
+
+err_free_stream:
+	uasp_free_cmdreq(fu);
+
+err_cleanup:
+	if (i) {
+		do {
+			uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
+			i--;
+		} while (i);
+	}
+	pr_err("UASP: endpoint setup failed\n");
+	return ret;
+}
+
+static void uasp_set_alt(struct f_uas *fu)
+{
+	struct usb_function *f = &fu->function;
+	struct usb_gadget *gadget = f->config->cdev->gadget;
+	int ret;
+
+	fu->flags = USBG_IS_UAS;
+
+	if (gadget->speed == USB_SPEED_SUPER)
+		fu->flags |= USBG_USE_STREAMS;
+
+	config_ep_by_speed(gadget, f, fu->ep_in);
+	ret = usb_ep_enable(fu->ep_in);
+	if (ret)
+		goto err_b_in;
+
+	config_ep_by_speed(gadget, f, fu->ep_out);
+	ret = usb_ep_enable(fu->ep_out);
+	if (ret)
+		goto err_b_out;
+
+	config_ep_by_speed(gadget, f, fu->ep_cmd);
+	ret = usb_ep_enable(fu->ep_cmd);
+	if (ret)
+		goto err_cmd;
+	config_ep_by_speed(gadget, f, fu->ep_status);
+	ret = usb_ep_enable(fu->ep_status);
+	if (ret)
+		goto err_status;
+
+	ret = uasp_prepare_reqs(fu);
+	if (ret)
+		goto err_wq;
+	fu->flags |= USBG_ENABLED;
+
+	pr_info("Using the UAS protocol\n");
+	return;
+err_wq:
+	usb_ep_disable(fu->ep_status);
+err_status:
+	usb_ep_disable(fu->ep_cmd);
+err_cmd:
+	usb_ep_disable(fu->ep_out);
+err_b_out:
+	usb_ep_disable(fu->ep_in);
+err_b_in:
+	fu->flags = 0;
+}
+
+static int get_cmd_dir(const unsigned char *cdb)
+{
+	int ret;
+
+	switch (cdb[0]) {
+	case READ_6:
+	case READ_10:
+	case READ_12:
+	case READ_16:
+	case INQUIRY:
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case SERVICE_ACTION_IN:
+	case MAINTENANCE_IN:
+	case PERSISTENT_RESERVE_IN:
+	case SECURITY_PROTOCOL_IN:
+	case ACCESS_CONTROL_IN:
+	case REPORT_LUNS:
+	case READ_BLOCK_LIMITS:
+	case READ_POSITION:
+	case READ_CAPACITY:
+	case READ_TOC:
+	case READ_FORMAT_CAPACITIES:
+	case REQUEST_SENSE:
+		ret = DMA_FROM_DEVICE;
+		break;
+
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_12:
+	case WRITE_16:
+	case MODE_SELECT:
+	case MODE_SELECT_10:
+	case WRITE_VERIFY:
+	case WRITE_VERIFY_12:
+	case PERSISTENT_RESERVE_OUT:
+	case MAINTENANCE_OUT:
+	case SECURITY_PROTOCOL_OUT:
+	case ACCESS_CONTROL_OUT:
+		ret = DMA_TO_DEVICE;
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case TEST_UNIT_READY:
+	case SYNCHRONIZE_CACHE:
+	case START_STOP:
+	case ERASE:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case SPACE:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+		ret = DMA_NONE;
+		break;
+	default:
+		pr_warn("target: Unknown data direction for SCSI Opcode "
+				"0x%02x\n", cdb[0]);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+
+	if (req->status < 0) {
+		pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
+		goto cleanup;
+	}
+
+	if (req->num_sgs == 0) {
+		sg_copy_from_buffer(se_cmd->t_data_sg,
+				se_cmd->t_data_nents,
+				cmd->data_buf,
+				se_cmd->data_length);
+	}
+
+	complete(&cmd->write_complete);
+	return;
+
+cleanup:
+	usbg_cleanup_cmd(cmd);
+}
+
+static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct f_uas *fu = cmd->fu;
+	struct usb_gadget *gadget = fuas_to_gadget(fu);
+
+	if (!gadget->sg_supported) {
+		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+		if (!cmd->data_buf)
+			return -ENOMEM;
+
+		req->buf = cmd->data_buf;
+	} else {
+		req->buf = NULL;
+		req->num_sgs = se_cmd->t_data_nents;
+		req->sg = se_cmd->t_data_sg;
+	}
+
+	req->complete = usbg_data_write_cmpl;
+	req->length = se_cmd->data_length;
+	req->context = cmd;
+	return 0;
+}
+
+static int usbg_send_status_response(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	struct f_uas *fu = cmd->fu;
+
+	if (fu->flags & USBG_IS_BOT)
+		return bot_send_status_response(cmd);
+	else
+		return uasp_send_status_response(cmd);
+}
+
+static int usbg_send_write_request(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	struct f_uas *fu = cmd->fu;
+
+	if (fu->flags & USBG_IS_BOT)
+		return bot_send_write_request(cmd);
+	else
+		return uasp_send_write_request(cmd);
+}
+
+static int usbg_send_read_response(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	struct f_uas *fu = cmd->fu;
+
+	if (fu->flags & USBG_IS_BOT)
+		return bot_send_read_response(cmd);
+	else
+		return uasp_send_read_response(cmd);
+}
+
+static void usbg_cmd_work(struct work_struct *work)
+{
+	struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
+	struct se_cmd *se_cmd;
+	struct tcm_usbg_nexus *tv_nexus;
+	struct usbg_tpg *tpg;
+	int dir;
+
+	se_cmd = &cmd->se_cmd;
+	tpg = cmd->fu->tpg;
+	tv_nexus = tpg->tpg_nexus;
+	dir = get_cmd_dir(cmd->cmd_buf);
+	if (dir < 0) {
+		transport_init_se_cmd(se_cmd,
+				tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+				tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+				cmd->prio_attr, cmd->sense_iu.sense);
+
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+		usbg_cleanup_cmd(cmd);
+		return;
+	}
+
+	target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+			cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
+			0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE);
+}
+
+static int usbg_submit_command(struct f_uas *fu,
+		void *cmdbuf, unsigned int len)
+{
+	struct command_iu *cmd_iu = cmdbuf;
+	struct usbg_cmd *cmd;
+	struct usbg_tpg *tpg;
+	struct se_cmd *se_cmd;
+	struct tcm_usbg_nexus *tv_nexus;
+	u32 cmd_len;
+	int ret;
+
+	if (cmd_iu->iu_id != IU_ID_COMMAND) {
+		pr_err("Unsupported type %d\n", cmd_iu->iu_id);
+		return -EINVAL;
+	}
+
+	cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->fu = fu;
+
+	/* XXX until I figure out why I can't free in on complete */
+	kref_init(&cmd->ref);
+	kref_get(&cmd->ref);
+
+	tpg = fu->tpg;
+	cmd_len = (cmd_iu->len & ~0x3) + 16;
+	if (cmd_len > USBG_MAX_CMD)
+		goto err;
+
+	memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
+
+	cmd->tag = be16_to_cpup(&cmd_iu->tag);
+	if (fu->flags & USBG_USE_STREAMS) {
+		if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
+			goto err;
+		if (!cmd->tag)
+			cmd->stream = &fu->stream[0];
+		else
+			cmd->stream = &fu->stream[cmd->tag - 1];
+	} else {
+		cmd->stream = &fu->stream[0];
+	}
+
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		pr_err("Missing nexus, ignoring command\n");
+		goto err;
+	}
+
+	switch (cmd_iu->prio_attr & 0x7) {
+	case UAS_HEAD_TAG:
+		cmd->prio_attr = MSG_HEAD_TAG;
+		break;
+	case UAS_ORDERED_TAG:
+		cmd->prio_attr = MSG_ORDERED_TAG;
+		break;
+	case UAS_ACA:
+		cmd->prio_attr = MSG_ACA_TAG;
+		break;
+	default:
+		pr_debug_once("Unsupported prio_attr: %02x.\n",
+				cmd_iu->prio_attr);
+	case UAS_SIMPLE_TAG:
+		cmd->prio_attr = MSG_SIMPLE_TAG;
+		break;
+	}
+
+	se_cmd = &cmd->se_cmd;
+	cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
+
+	INIT_WORK(&cmd->work, usbg_cmd_work);
+	ret = queue_work(tpg->workqueue, &cmd->work);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(cmd);
+	return -EINVAL;
+}
+
+static void bot_cmd_work(struct work_struct *work)
+{
+	struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
+	struct se_cmd *se_cmd;
+	struct tcm_usbg_nexus *tv_nexus;
+	struct usbg_tpg *tpg;
+	int dir;
+
+	se_cmd = &cmd->se_cmd;
+	tpg = cmd->fu->tpg;
+	tv_nexus = tpg->tpg_nexus;
+	dir = get_cmd_dir(cmd->cmd_buf);
+	if (dir < 0) {
+		transport_init_se_cmd(se_cmd,
+				tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+				tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+				cmd->prio_attr, cmd->sense_iu.sense);
+
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+		usbg_cleanup_cmd(cmd);
+		return;
+	}
+
+	target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+			cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
+			cmd->data_len, cmd->prio_attr, dir, 0);
+}
+
+static int bot_submit_command(struct f_uas *fu,
+		void *cmdbuf, unsigned int len)
+{
+	struct bulk_cb_wrap *cbw = cmdbuf;
+	struct usbg_cmd *cmd;
+	struct usbg_tpg *tpg;
+	struct se_cmd *se_cmd;
+	struct tcm_usbg_nexus *tv_nexus;
+	u32 cmd_len;
+	int ret;
+
+	if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
+		pr_err("Wrong signature on CBW\n");
+		return -EINVAL;
+	}
+	if (len != 31) {
+		pr_err("Wrong length for CBW\n");
+		return -EINVAL;
+	}
+
+	cmd_len = cbw->Length;
+	if (cmd_len < 1 || cmd_len > 16)
+		return -EINVAL;
+
+	cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->fu = fu;
+
+	/* XXX until I figure out why I can't free in on complete */
+	kref_init(&cmd->ref);
+	kref_get(&cmd->ref);
+
+	tpg = fu->tpg;
+
+	memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
+
+	cmd->bot_tag = cbw->Tag;
+
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		pr_err("Missing nexus, ignoring command\n");
+		goto err;
+	}
+
+	cmd->prio_attr = MSG_SIMPLE_TAG;
+	se_cmd = &cmd->se_cmd;
+	cmd->unpacked_lun = cbw->Lun;
+	cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
+	cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
+
+	INIT_WORK(&cmd->work, bot_cmd_work);
+	ret = queue_work(tpg->workqueue, &cmd->work);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(cmd);
+	return -EINVAL;
+}
+
+/* Start fabric.c code */
+
+static int usbg_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int usbg_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static char *usbg_get_fabric_name(void)
+{
+	return "usb_gadget";
+}
+
+static u8 usbg_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+	u8 proto_id;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+	default:
+		proto_id = sas_get_fabric_proto_ident(se_tpg);
+		break;
+	}
+
+	return proto_id;
+}
+
+static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+
+	return &tport->tport_name[0];
+}
+
+static u16 usbg_get_tag(struct se_portal_group *se_tpg)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	return tpg->tport_tpgt;
+}
+
+static u32 usbg_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 usbg_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+	int ret = 0;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+	default:
+		ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+		break;
+	}
+
+	return ret;
+}
+
+static u32 usbg_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+	int ret = 0;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+	default:
+		ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+		break;
+	}
+
+	return ret;
+}
+
+static char *usbg_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+	char *tid = NULL;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+	default:
+		tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	}
+
+	return tid;
+}
+
+static struct se_node_acl *usbg_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+	struct usbg_nacl *nacl;
+
+	nacl = kzalloc(sizeof(struct usbg_nacl), GFP_KERNEL);
+	if (!nacl) {
+		printk(KERN_ERR "Unable to alocate struct usbg_nacl\n");
+		return NULL;
+	}
+
+	return &nacl->se_node_acl;
+}
+
+static void usbg_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl)
+{
+	struct usbg_nacl *nacl = container_of(se_nacl,
+			struct usbg_nacl, se_node_acl);
+	kfree(nacl);
+}
+
+static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int usbg_new_cmd(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	int ret;
+
+	ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf);
+	if (ret)
+		return ret;
+
+	return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0);
+}
+
+static void usbg_cmd_release(struct kref *ref)
+{
+	struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
+			ref);
+
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+static void usbg_release_cmd(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	kfree(cmd->data_buf);
+	kfree(cmd);
+	return;
+}
+
+static int usbg_shutdown_session(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void usbg_close_session(struct se_session *se_sess)
+{
+	return;
+}
+
+static u32 usbg_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+/*
+ * XXX Error recovery: return != 0 if we expect writes. Dunno when that could be
+ */
+static int usbg_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+static u32 usbg_get_task_tag(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	struct f_uas *fu = cmd->fu;
+
+	if (fu->flags & USBG_IS_BOT)
+		return le32_to_cpu(cmd->bot_tag);
+	else
+		return cmd->tag;
+}
+
+static int usbg_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int usbg_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static u16 usbg_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+	return 0;
+}
+
+static u16 usbg_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static const char *usbg_check_wwn(const char *name)
+{
+	const char *n;
+	unsigned int len;
+
+	n = strstr(name, "naa.");
+	if (!n)
+		return NULL;
+	n += 4;
+	len = strlen(n);
+	if (len == 0 || len > USBG_NAMELEN - 1)
+		return NULL;
+	return n;
+}
+
+static struct se_node_acl *usbg_make_nodeacl(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl, *se_nacl_new;
+	struct usbg_nacl *nacl;
+	u64 wwpn = 0;
+	u32 nexus_depth;
+	const char *wnn_name;
+
+	wnn_name = usbg_check_wwn(name);
+	if (!wnn_name)
+		return ERR_PTR(-EINVAL);
+	se_nacl_new = usbg_alloc_fabric_acl(se_tpg);
+	if (!(se_nacl_new))
+		return ERR_PTR(-ENOMEM);
+
+	nexus_depth = 1;
+	/*
+	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+	 * when converting a NodeACL from demo mode -> explict
+	 */
+	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+				name, nexus_depth);
+	if (IS_ERR(se_nacl)) {
+		usbg_release_fabric_acl(se_tpg, se_nacl_new);
+		return se_nacl;
+	}
+	/*
+	 * Locate our struct usbg_nacl and set the FC Nport WWPN
+	 */
+	nacl = container_of(se_nacl, struct usbg_nacl, se_node_acl);
+	nacl->iport_wwpn = wwpn;
+	snprintf(nacl->iport_name, sizeof(nacl->iport_name), "%s", name);
+	return se_nacl;
+}
+
+static void usbg_drop_nodeacl(struct se_node_acl *se_acl)
+{
+	struct usbg_nacl *nacl = container_of(se_acl,
+				struct usbg_nacl, se_node_acl);
+	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+	kfree(nacl);
+}
+
+struct usbg_tpg *the_only_tpg_I_currently_have;
+
+static struct se_portal_group *usbg_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
+			tport_wwn);
+	struct usbg_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+	if (the_only_tpg_I_currently_have) {
+		pr_err("Until the gadget framework can't handle multiple\n");
+		pr_err("gadgets, you can't do this here.\n");
+		return ERR_PTR(-EBUSY);
+	}
+
+	tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
+	if (!tpg) {
+		printk(KERN_ERR "Unable to allocate struct usbg_tpg");
+		return ERR_PTR(-ENOMEM);
+	}
+	mutex_init(&tpg->tpg_mutex);
+	atomic_set(&tpg->tpg_port_count, 0);
+	tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
+	if (!tpg->workqueue) {
+		kfree(tpg);
+		return NULL;
+	}
+
+	tpg->tport = tport;
+	tpg->tport_tpgt = tpgt;
+
+	ret = core_tpg_register(&usbg_fabric_configfs->tf_ops, wwn,
+				&tpg->se_tpg, tpg,
+				TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		destroy_workqueue(tpg->workqueue);
+		kfree(tpg);
+		return NULL;
+	}
+	the_only_tpg_I_currently_have = tpg;
+	return &tpg->se_tpg;
+}
+
+static void usbg_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+
+	core_tpg_deregister(se_tpg);
+	destroy_workqueue(tpg->workqueue);
+	kfree(tpg);
+	the_only_tpg_I_currently_have = NULL;
+}
+
+static struct se_wwn *usbg_make_tport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct usbg_tport *tport;
+	const char *wnn_name;
+	u64 wwpn = 0;
+
+	wnn_name = usbg_check_wwn(name);
+	if (!wnn_name)
+		return ERR_PTR(-EINVAL);
+
+	tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
+	if (!(tport)) {
+		printk(KERN_ERR "Unable to allocate struct usbg_tport");
+		return ERR_PTR(-ENOMEM);
+	}
+	tport->tport_wwpn = wwpn;
+	snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name);
+	return &tport->tport_wwn;
+}
+
+static void usbg_drop_tport(struct se_wwn *wwn)
+{
+	struct usbg_tport *tport = container_of(wwn,
+				struct usbg_tport, tport_wwn);
+	kfree(tport);
+}
+
+/*
+ * If somebody feels like dropping the version property, go ahead.
+ */
+static ssize_t usbg_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "usb-gadget fabric module\n");
+}
+TF_WWN_ATTR_RO(usbg, version);
+
+static struct configfs_attribute *usbg_wwn_attrs[] = {
+	&usbg_wwn_version.attr,
+	NULL,
+};
+
+static ssize_t tcm_usbg_tpg_show_enable(
+		struct se_portal_group *se_tpg,
+		char *page)
+{
+	struct usbg_tpg  *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect);
+}
+
+static int usbg_attach(struct usbg_tpg *);
+static void usbg_detach(struct usbg_tpg *);
+
+static ssize_t tcm_usbg_tpg_store_enable(
+		struct se_portal_group *se_tpg,
+		const char *page,
+		size_t count)
+{
+	struct usbg_tpg  *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+	unsigned long op;
+	ssize_t ret;
+
+	ret = kstrtoul(page, 0, &op);
+	if (ret < 0)
+		return -EINVAL;
+	if (op > 1)
+		return -EINVAL;
+
+	if (op && tpg->gadget_connect)
+		goto out;
+	if (!op && !tpg->gadget_connect)
+		goto out;
+
+	if (op) {
+		ret = usbg_attach(tpg);
+		if (ret)
+			goto out;
+	} else {
+		usbg_detach(tpg);
+	}
+	tpg->gadget_connect = op;
+out:
+	return count;
+}
+TF_TPG_BASE_ATTR(tcm_usbg, enable, S_IRUGO | S_IWUSR);
+
+static ssize_t tcm_usbg_tpg_show_nexus(
+		struct se_portal_group *se_tpg,
+		char *page)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+	struct tcm_usbg_nexus *tv_nexus;
+	ssize_t ret;
+
+	mutex_lock(&tpg->tpg_mutex);
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		ret = -ENODEV;
+		goto out;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%s\n",
+			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+out:
+	mutex_unlock(&tpg->tpg_mutex);
+	return ret;
+}
+
+static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
+{
+	struct se_portal_group *se_tpg;
+	struct tcm_usbg_nexus *tv_nexus;
+	int ret;
+
+	mutex_lock(&tpg->tpg_mutex);
+	if (tpg->tpg_nexus) {
+		ret = -EEXIST;
+		pr_debug("tpg->tpg_nexus already exists\n");
+		goto err_unlock;
+	}
+	se_tpg = &tpg->se_tpg;
+
+	ret = -ENOMEM;
+	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
+	if (!tv_nexus) {
+		pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+		goto err_unlock;
+	}
+	tv_nexus->tvn_se_sess = transport_init_session();
+	if (IS_ERR(tv_nexus->tvn_se_sess))
+		goto err_free;
+
+	/*
+	 * Since we are running in 'demo mode' this call with generate a
+	 * struct se_node_acl for the tcm_vhost struct se_portal_group with
+	 * the SCSI Initiator port name of the passed configfs group 'name'.
+	 */
+	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+			se_tpg, name);
+	if (!tv_nexus->tvn_se_sess->se_node_acl) {
+		pr_debug("core_tpg_check_initiator_node_acl() failed"
+				" for %s\n", name);
+		goto err_session;
+	}
+	/*
+	 * Now register the TCM vHost virtual I_T Nexus as active with the
+	 * call to __transport_register_session()
+	 */
+	__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
+			tv_nexus->tvn_se_sess, tv_nexus);
+	tpg->tpg_nexus = tv_nexus;
+	mutex_unlock(&tpg->tpg_mutex);
+	return 0;
+
+err_session:
+	transport_free_session(tv_nexus->tvn_se_sess);
+err_free:
+	kfree(tv_nexus);
+err_unlock:
+	mutex_unlock(&tpg->tpg_mutex);
+	return ret;
+}
+
+static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
+{
+	struct se_session *se_sess;
+	struct tcm_usbg_nexus *tv_nexus;
+	int ret = -ENODEV;
+
+	mutex_lock(&tpg->tpg_mutex);
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus)
+		goto out;
+
+	se_sess = tv_nexus->tvn_se_sess;
+	if (!se_sess)
+		goto out;
+
+	if (atomic_read(&tpg->tpg_port_count)) {
+		ret = -EPERM;
+		pr_err("Unable to remove Host I_T Nexus with"
+				" active TPG port count: %d\n",
+				atomic_read(&tpg->tpg_port_count));
+		goto out;
+	}
+
+	pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
+			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+	/*
+	 * Release the SCSI I_T Nexus to the emulated vHost Target Port
+	 */
+	transport_deregister_session(tv_nexus->tvn_se_sess);
+	tpg->tpg_nexus = NULL;
+
+	kfree(tv_nexus);
+out:
+	mutex_unlock(&tpg->tpg_mutex);
+	return 0;
+}
+
+static ssize_t tcm_usbg_tpg_store_nexus(
+		struct se_portal_group *se_tpg,
+		const char *page,
+		size_t count)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+	unsigned char i_port[USBG_NAMELEN], *ptr;
+	int ret;
+
+	if (!strncmp(page, "NULL", 4)) {
+		ret = tcm_usbg_drop_nexus(tpg);
+		return (!ret) ? count : ret;
+	}
+	if (strlen(page) > USBG_NAMELEN) {
+		pr_err("Emulated NAA Sas Address: %s, exceeds"
+				" max: %d\n", page, USBG_NAMELEN);
+		return -EINVAL;
+	}
+	snprintf(i_port, USBG_NAMELEN, "%s", page);
+
+	ptr = strstr(i_port, "naa.");
+	if (!ptr) {
+		pr_err("Missing 'naa.' prefix\n");
+		return -EINVAL;
+	}
+
+	if (i_port[strlen(i_port) - 1] == '\n')
+		i_port[strlen(i_port) - 1] = '\0';
+
+	ret = tcm_usbg_make_nexus(tpg, &i_port[4]);
+	if (ret < 0)
+		return ret;
+	return count;
+}
+TF_TPG_BASE_ATTR(tcm_usbg, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *usbg_base_attrs[] = {
+	&tcm_usbg_tpg_enable.attr,
+	&tcm_usbg_tpg_nexus.attr,
+	NULL,
+};
+
+static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+	atomic_inc(&tpg->tpg_port_count);
+	smp_mb__after_atomic_inc();
+	return 0;
+}
+
+static void usbg_port_unlink(struct se_portal_group *se_tpg,
+		struct se_lun *se_lun)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+	atomic_dec(&tpg->tpg_port_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int usbg_check_stop_free(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+
+	kref_put(&cmd->ref, usbg_cmd_release);
+	return 1;
+}
+
+static struct target_core_fabric_ops usbg_ops = {
+	.get_fabric_name		= usbg_get_fabric_name,
+	.get_fabric_proto_ident		= usbg_get_fabric_proto_ident,
+	.tpg_get_wwn			= usbg_get_fabric_wwn,
+	.tpg_get_tag			= usbg_get_tag,
+	.tpg_get_default_depth		= usbg_get_default_depth,
+	.tpg_get_pr_transport_id	= usbg_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= usbg_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= usbg_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= usbg_check_true,
+	.tpg_check_demo_mode_cache	= usbg_check_false,
+	.tpg_check_demo_mode_write_protect = usbg_check_false,
+	.tpg_check_prod_mode_write_protect = usbg_check_false,
+	.tpg_alloc_fabric_acl		= usbg_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= usbg_release_fabric_acl,
+	.tpg_get_inst_index		= usbg_tpg_get_inst_index,
+	.new_cmd_map			= usbg_new_cmd,
+	.release_cmd			= usbg_release_cmd,
+	.shutdown_session		= usbg_shutdown_session,
+	.close_session			= usbg_close_session,
+	.sess_get_index			= usbg_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= usbg_send_write_request,
+	.write_pending_status		= usbg_write_pending_status,
+	.set_default_node_attributes	= usbg_set_default_node_attrs,
+	.get_task_tag			= usbg_get_task_tag,
+	.get_cmd_state			= usbg_get_cmd_state,
+	.queue_data_in			= usbg_send_read_response,
+	.queue_status			= usbg_send_status_response,
+	.queue_tm_rsp			= usbg_queue_tm_rsp,
+	.get_fabric_sense_len		= usbg_get_fabric_sense_len,
+	.set_fabric_sense_len		= usbg_set_fabric_sense_len,
+	.check_stop_free		= usbg_check_stop_free,
+
+	.fabric_make_wwn		= usbg_make_tport,
+	.fabric_drop_wwn		= usbg_drop_tport,
+	.fabric_make_tpg		= usbg_make_tpg,
+	.fabric_drop_tpg		= usbg_drop_tpg,
+	.fabric_post_link		= usbg_port_link,
+	.fabric_pre_unlink		= usbg_port_unlink,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= usbg_make_nodeacl,
+	.fabric_drop_nodeacl		= usbg_drop_nodeacl,
+};
+
+static int usbg_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric;
+	int ret;
+
+	fabric = target_fabric_configfs_init(THIS_MODULE, "usb_gadget");
+	if (IS_ERR(fabric)) {
+		printk(KERN_ERR "target_fabric_configfs_init() failed\n");
+		return PTR_ERR(fabric);
+	}
+
+	fabric->tf_ops = usbg_ops;
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		printk(KERN_ERR "target_fabric_configfs_register() failed"
+				" for usb-gadget\n");
+		return ret;
+	}
+	usbg_fabric_configfs = fabric;
+	return 0;
+};
+
+static void usbg_deregister_configfs(void)
+{
+	if (!(usbg_fabric_configfs))
+		return;
+
+	target_fabric_configfs_deregister(usbg_fabric_configfs);
+	usbg_fabric_configfs = NULL;
+};
+
+/* Start gadget.c code */
+
+static struct usb_interface_descriptor bot_intf_desc = {
+	.bLength =              sizeof(bot_intf_desc),
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =        2,
+	.bAlternateSetting =	USB_G_ALT_INT_BBB,
+	.bInterfaceClass =      USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass =   USB_SC_SCSI,
+	.bInterfaceProtocol =   USB_PR_BULK,
+	.iInterface =           USB_G_STR_INT_UAS,
+};
+
+static struct usb_interface_descriptor uasp_intf_desc = {
+	.bLength =		sizeof(uasp_intf_desc),
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	4,
+	.bAlternateSetting =	USB_G_ALT_INT_UAS,
+	.bInterfaceClass =	USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass =	USB_SC_SCSI,
+	.bInterfaceProtocol =	USB_PR_UAS,
+	.iInterface =		USB_G_STR_INT_BBB,
+};
+
+static struct usb_endpoint_descriptor uasp_bi_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
+	.bLength =		sizeof(uasp_bi_pipe_desc),
+	.bDescriptorType =	USB_DT_PIPE_USAGE,
+	.bPipeID =		DATA_IN_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
+	.bLength =		sizeof(uasp_bi_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst =		0,
+	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
+	.wBytesPerInterval =	0,
+};
+
+static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
+	.bLength =		sizeof(bot_bi_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst =		0,
+};
+
+static struct usb_endpoint_descriptor uasp_bo_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
+	.bLength =		sizeof(uasp_bo_pipe_desc),
+	.bDescriptorType =	USB_DT_PIPE_USAGE,
+	.bPipeID =		DATA_OUT_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(0x400),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
+	.bLength =		sizeof(uasp_bo_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
+};
+
+static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
+	.bLength =		sizeof(bot_bo_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_endpoint_descriptor uasp_status_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_status_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
+	.bLength =		sizeof(uasp_status_pipe_desc),
+	.bDescriptorType =	USB_DT_PIPE_USAGE,
+	.bPipeID =		STATUS_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_status_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
+	.bLength =		sizeof(uasp_status_in_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
+};
+
+static struct usb_endpoint_descriptor uasp_cmd_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
+	.bLength =		sizeof(uasp_cmd_pipe_desc),
+	.bDescriptorType =	USB_DT_PIPE_USAGE,
+	.bPipeID =		CMD_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
+	.bLength =		sizeof(uasp_cmd_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *uasp_fs_function_desc[] = {
+	(struct usb_descriptor_header *) &bot_intf_desc,
+	(struct usb_descriptor_header *) &uasp_fs_bi_desc,
+	(struct usb_descriptor_header *) &uasp_fs_bo_desc,
+
+	(struct usb_descriptor_header *) &uasp_intf_desc,
+	(struct usb_descriptor_header *) &uasp_fs_bi_desc,
+	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_fs_bo_desc,
+	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_fs_status_desc,
+	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_fs_cmd_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+};
+
+static struct usb_descriptor_header *uasp_hs_function_desc[] = {
+	(struct usb_descriptor_header *) &bot_intf_desc,
+	(struct usb_descriptor_header *) &uasp_bi_desc,
+	(struct usb_descriptor_header *) &uasp_bo_desc,
+
+	(struct usb_descriptor_header *) &uasp_intf_desc,
+	(struct usb_descriptor_header *) &uasp_bi_desc,
+	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_bo_desc,
+	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_status_desc,
+	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *uasp_ss_function_desc[] = {
+	(struct usb_descriptor_header *) &bot_intf_desc,
+	(struct usb_descriptor_header *) &uasp_ss_bi_desc,
+	(struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
+	(struct usb_descriptor_header *) &uasp_ss_bo_desc,
+	(struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
+
+	(struct usb_descriptor_header *) &uasp_intf_desc,
+	(struct usb_descriptor_header *) &uasp_ss_bi_desc,
+	(struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
+	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_ss_bo_desc,
+	(struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
+	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_ss_status_desc,
+	(struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
+	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_ss_cmd_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_comp_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+	NULL,
+};
+
+#define UAS_VENDOR_ID	0x0525	/* NetChip */
+#define UAS_PRODUCT_ID	0xa4a5	/* Linux-USB File-backed Storage Gadget */
+
+static struct usb_device_descriptor usbg_device_desc = {
+	.bLength =		sizeof(usbg_device_desc),
+	.bDescriptorType =	USB_DT_DEVICE,
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+	.idVendor =		cpu_to_le16(UAS_VENDOR_ID),
+	.idProduct =		cpu_to_le16(UAS_PRODUCT_ID),
+	.iManufacturer =	USB_G_STR_MANUFACTOR,
+	.iProduct =		USB_G_STR_PRODUCT,
+	.iSerialNumber =	USB_G_STR_SERIAL,
+
+	.bNumConfigurations =   1,
+};
+
+static struct usb_string	usbg_us_strings[] = {
+	{ USB_G_STR_MANUFACTOR,	"Target Manufactor"},
+	{ USB_G_STR_PRODUCT,	"Target Product"},
+	{ USB_G_STR_SERIAL,	"000000000001"},
+	{ USB_G_STR_CONFIG,	"default config"},
+	{ USB_G_STR_INT_UAS,	"USB Attached SCSI"},
+	{ USB_G_STR_INT_BBB,	"Bulk Only Transport"},
+	{ },
+};
+
+static struct usb_gadget_strings usbg_stringtab = {
+	.language = 0x0409,
+	.strings = usbg_us_strings,
+};
+
+static struct usb_gadget_strings *usbg_strings[] = {
+	&usbg_stringtab,
+	NULL,
+};
+
+static int guas_unbind(struct usb_composite_dev *cdev)
+{
+	return 0;
+}
+
+static struct usb_configuration usbg_config_driver = {
+	.label                  = "Linux Target",
+	.bConfigurationValue    = 1,
+	.iConfiguration		= USB_G_STR_CONFIG,
+	.bmAttributes           = USB_CONFIG_ATT_SELFPOWER,
+};
+
+static void give_back_ep(struct usb_ep **pep)
+{
+	struct usb_ep *ep = *pep;
+	if (!ep)
+		return;
+	ep->driver_data = NULL;
+}
+
+static int usbg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_uas		*fu = to_f_uas(f);
+	struct usb_gadget	*gadget = c->cdev->gadget;
+	struct usb_ep		*ep;
+	int			iface;
+
+	iface = usb_interface_id(c, f);
+	if (iface < 0)
+		return iface;
+
+	bot_intf_desc.bInterfaceNumber = iface;
+	uasp_intf_desc.bInterfaceNumber = iface;
+	fu->iface = iface;
+	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
+			&uasp_bi_ep_comp_desc);
+	if (!ep)
+		goto ep_fail;
+
+	ep->driver_data = fu;
+	fu->ep_in = ep;
+
+	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
+			&uasp_bo_ep_comp_desc);
+	if (!ep)
+		goto ep_fail;
+	ep->driver_data = fu;
+	fu->ep_out = ep;
+
+	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
+			&uasp_status_in_ep_comp_desc);
+	if (!ep)
+		goto ep_fail;
+	ep->driver_data = fu;
+	fu->ep_status = ep;
+
+	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
+			&uasp_cmd_comp_desc);
+	if (!ep)
+		goto ep_fail;
+	ep->driver_data = fu;
+	fu->ep_cmd = ep;
+
+	/* Assume endpoint addresses are the same for both speeds */
+	uasp_bi_desc.bEndpointAddress =	uasp_ss_bi_desc.bEndpointAddress;
+	uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+	uasp_status_desc.bEndpointAddress =
+		uasp_ss_status_desc.bEndpointAddress;
+	uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+
+	uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
+	uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+	uasp_fs_status_desc.bEndpointAddress =
+		uasp_ss_status_desc.bEndpointAddress;
+	uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+
+	return 0;
+ep_fail:
+	pr_err("Can't claim all required eps\n");
+
+	give_back_ep(&fu->ep_in);
+	give_back_ep(&fu->ep_out);
+	give_back_ep(&fu->ep_status);
+	give_back_ep(&fu->ep_cmd);
+	return -ENOTSUPP;
+}
+
+static void usbg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_uas *fu = to_f_uas(f);
+
+	kfree(fu);
+}
+
+struct guas_setup_wq {
+	struct work_struct work;
+	struct f_uas *fu;
+	unsigned int alt;
+};
+
+static void usbg_delayed_set_alt(struct work_struct *wq)
+{
+	struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
+			work);
+	struct f_uas *fu = work->fu;
+	int alt = work->alt;
+
+	kfree(work);
+
+	if (fu->flags & USBG_IS_BOT)
+		bot_cleanup_old_alt(fu);
+	if (fu->flags & USBG_IS_UAS)
+		uasp_cleanup_old_alt(fu);
+
+	if (alt == USB_G_ALT_INT_BBB)
+		bot_set_alt(fu);
+	else if (alt == USB_G_ALT_INT_UAS)
+		uasp_set_alt(fu);
+	usb_composite_setup_continue(fu->function.config->cdev);
+}
+
+static int usbg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_uas *fu = to_f_uas(f);
+
+	if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
+		struct guas_setup_wq *work;
+
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work)
+			return -ENOMEM;
+		INIT_WORK(&work->work, usbg_delayed_set_alt);
+		work->fu = fu;
+		work->alt = alt;
+		schedule_work(&work->work);
+		return USB_GADGET_DELAYED_STATUS;
+	}
+	return -EOPNOTSUPP;
+}
+
+static void usbg_disable(struct usb_function *f)
+{
+	struct f_uas *fu = to_f_uas(f);
+
+	if (fu->flags & USBG_IS_UAS)
+		uasp_cleanup_old_alt(fu);
+	else if (fu->flags & USBG_IS_BOT)
+		bot_cleanup_old_alt(fu);
+	fu->flags = 0;
+}
+
+static int usbg_setup(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_uas *fu = to_f_uas(f);
+
+	if (!(fu->flags & USBG_IS_BOT))
+		return -EOPNOTSUPP;
+
+	return usbg_bot_setup(f, ctrl);
+}
+
+static int usbg_cfg_bind(struct usb_configuration *c)
+{
+	struct f_uas *fu;
+	int ret;
+
+	fu = kzalloc(sizeof(*fu), GFP_KERNEL);
+	if (!fu)
+		return -ENOMEM;
+	fu->function.name = "Target Function";
+	fu->function.descriptors = uasp_fs_function_desc;
+	fu->function.hs_descriptors = uasp_hs_function_desc;
+	fu->function.ss_descriptors = uasp_ss_function_desc;
+	fu->function.bind = usbg_bind;
+	fu->function.unbind = usbg_unbind;
+	fu->function.set_alt = usbg_set_alt;
+	fu->function.setup = usbg_setup;
+	fu->function.disable = usbg_disable;
+	fu->tpg = the_only_tpg_I_currently_have;
+
+	ret = usb_add_function(c, &fu->function);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	kfree(fu);
+	return ret;
+}
+
+static int usb_target_bind(struct usb_composite_dev *cdev)
+{
+	int ret;
+
+	ret = usb_add_config(cdev, &usbg_config_driver,
+			usbg_cfg_bind);
+	return 0;
+}
+
+static struct usb_composite_driver usbg_driver = {
+	.name           = "g_target",
+	.dev            = &usbg_device_desc,
+	.strings        = usbg_strings,
+	.max_speed      = USB_SPEED_SUPER,
+	.unbind         = guas_unbind,
+};
+
+static int usbg_attach(struct usbg_tpg *tpg)
+{
+	return usb_composite_probe(&usbg_driver, usb_target_bind);
+}
+
+static void usbg_detach(struct usbg_tpg *tpg)
+{
+	usb_composite_unregister(&usbg_driver);
+}
+
+static int __init usb_target_gadget_init(void)
+{
+	int ret;
+
+	ret = usbg_register_configfs();
+	return ret;
+}
+module_init(usb_target_gadget_init);
+
+static void __exit usb_target_gadget_exit(void)
+{
+	usbg_deregister_configfs();
+}
+module_exit(usb_target_gadget_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
+MODULE_DESCRIPTION("usb-gadget fabric");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/tcm_usb_gadget.h b/drivers/usb/gadget/tcm_usb_gadget.h
new file mode 100644
index 0000000..bb18999
--- /dev/null
+++ b/drivers/usb/gadget/tcm_usb_gadget.h
@@ -0,0 +1,146 @@
+#ifndef __TARGET_USB_GADGET_H__
+#define __TARGET_USB_GADGET_H__
+
+#include <linux/kref.h>
+/* #include <linux/usb/uas.h> */
+#include <linux/usb/composite.h>
+#include <linux/usb/uas.h>
+#include <linux/usb/storage.h>
+#include <scsi/scsi.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#define USBG_NAMELEN 32
+
+#define fuas_to_gadget(f)	(f->function.config->cdev->gadget)
+#define UASP_SS_EP_COMP_LOG_STREAMS 4
+#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS)
+
+#define USB_G_STR_MANUFACTOR    1
+#define USB_G_STR_PRODUCT       2
+#define USB_G_STR_SERIAL        3
+#define USB_G_STR_CONFIG        4
+#define USB_G_STR_INT_UAS       5
+#define USB_G_STR_INT_BBB       6
+
+#define USB_G_ALT_INT_BBB       0
+#define USB_G_ALT_INT_UAS       1
+
+struct usbg_nacl {
+	/* Binary World Wide unique Port Name for SAS Initiator port */
+	u64 iport_wwpn;
+	/* ASCII formatted WWPN for Sas Initiator port */
+	char iport_name[USBG_NAMELEN];
+	/* Returned by usbg_make_nodeacl() */
+	struct se_node_acl se_node_acl;
+};
+
+struct tcm_usbg_nexus {
+	struct se_session *tvn_se_sess;
+};
+
+struct usbg_tpg {
+	struct mutex tpg_mutex;
+	/* SAS port target portal group tag for TCM */
+	u16 tport_tpgt;
+	/* Pointer back to usbg_tport */
+	struct usbg_tport *tport;
+	struct workqueue_struct *workqueue;
+	/* Returned by usbg_make_tpg() */
+	struct se_portal_group se_tpg;
+	u32 gadget_connect;
+	struct tcm_usbg_nexus *tpg_nexus;
+	atomic_t tpg_port_count;
+};
+
+struct usbg_tport {
+	/* SCSI protocol the tport is providing */
+	u8 tport_proto_id;
+	/* Binary World Wide unique Port Name for SAS Target port */
+	u64 tport_wwpn;
+	/* ASCII formatted WWPN for SAS Target port */
+	char tport_name[USBG_NAMELEN];
+	/* Returned by usbg_make_tport() */
+	struct se_wwn tport_wwn;
+};
+
+enum uas_state {
+	UASP_SEND_DATA,
+	UASP_RECEIVE_DATA,
+	UASP_SEND_STATUS,
+	UASP_QUEUE_COMMAND,
+};
+
+#define USBG_MAX_CMD    64
+struct usbg_cmd {
+	/* common */
+	u8 cmd_buf[USBG_MAX_CMD];
+	u32 data_len;
+	struct work_struct work;
+	int unpacked_lun;
+	struct se_cmd se_cmd;
+	void *data_buf; /* used if no sg support available */
+	struct f_uas *fu;
+	struct completion write_complete;
+	struct kref ref;
+
+	/* UAS only */
+	u16 tag;
+	u16 prio_attr;
+	struct sense_iu sense_iu;
+	enum uas_state state;
+	struct uas_stream *stream;
+
+	/* BOT only */
+	__le32 bot_tag;
+	unsigned int csw_code;
+	unsigned is_read:1;
+
+};
+
+struct uas_stream {
+	struct usb_request	*req_in;
+	struct usb_request	*req_out;
+	struct usb_request	*req_status;
+};
+
+struct usbg_cdb {
+	struct usb_request	*req;
+	void			*buf;
+};
+
+struct bot_status {
+	struct usb_request	*req;
+	struct bulk_cs_wrap	csw;
+};
+
+struct f_uas {
+	struct usbg_tpg		*tpg;
+	struct usb_function	function;
+	u16			iface;
+
+	u32			flags;
+#define USBG_ENABLED		(1 << 0)
+#define USBG_IS_UAS		(1 << 1)
+#define USBG_USE_STREAMS	(1 << 2)
+#define USBG_IS_BOT		(1 << 3)
+#define USBG_BOT_CMD_PEND	(1 << 4)
+
+	struct usbg_cdb		cmd;
+	struct usb_ep		*ep_in;
+	struct usb_ep		*ep_out;
+
+	/* UAS */
+	struct usb_ep		*ep_status;
+	struct usb_ep		*ep_cmd;
+	struct uas_stream	stream[UASP_SS_EP_COMP_NUM_STREAMS];
+
+	/* BOT */
+	struct bot_status	bot_status;
+	struct usb_request	*bot_req_in;
+	struct usb_request	*bot_req_out;
+};
+
+extern struct usbg_tpg *the_only_tpg_I_currently_have;
+
+#endif
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 2fa9865..e5e44f8 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -263,8 +263,8 @@
 
 	if (udc_is_newstyle(udc)) {
 		udc->driver->disconnect(udc->gadget);
-		udc->driver->unbind(udc->gadget);
 		usb_gadget_disconnect(udc->gadget);
+		udc->driver->unbind(udc->gadget);
 		usb_gadget_udc_stop(udc->gadget, udc->driver);
 	} else {
 		usb_gadget_stop(udc->gadget, udc->driver);
@@ -415,9 +415,9 @@
 			usb_gadget_udc_start(udc->gadget, udc->driver);
 		usb_gadget_connect(udc->gadget);
 	} else if (sysfs_streq(buf, "disconnect")) {
+		usb_gadget_disconnect(udc->gadget);
 		if (udc_is_newstyle(udc))
 			usb_gadget_udc_stop(udc->gadget, udc->driver);
-		usb_gadget_disconnect(udc->gadget);
 	} else {
 		dev_err(dev, "unsupported command '%s'\n", buf);
 		return -EINVAL;
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
index bc78c60..ca4e03a 100644
--- a/drivers/usb/gadget/uvc.h
+++ b/drivers/usb/gadget/uvc.h
@@ -28,7 +28,7 @@
 
 struct uvc_request_data
 {
-	unsigned int length;
+	__s32 length;
 	__u8 data[60];
 };
 
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index f6e083b..54d7ca5 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -39,7 +39,7 @@
 	if (data->length < 0)
 		return usb_ep_set_halt(cdev->gadget->ep0);
 
-	req->length = min(uvc->event_length, data->length);
+	req->length = min_t(unsigned int, uvc->event_length, data->length);
 	req->zero = data->length < uvc->event_length;
 	req->dma = DMA_ADDR_INVALID;
 
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 01bb7241d..fe8dc06 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,6 +144,14 @@
 			hcd->has_tt = 1;
 			tdi_reset(ehci);
 		}
+		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
+			/* EHCI #1 or #2 on 6 Series/C200 Series chipset */
+			if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
+				ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
+				hcd->broken_pci_sleep = 1;
+				device_set_wakeup_capable(&pdev->dev, false);
+			}
+		}
 		break;
 	case PCI_VENDOR_ID_TDI:
 		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 8618336..f214a80 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -24,6 +24,7 @@
 #include <linux/gpio.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
 
 #include <mach/usb_phy.h>
 #include <mach/iomap.h>
@@ -37,9 +38,7 @@
 	struct clk *emc_clk;
 	struct usb_phy *transceiver;
 	int host_resumed;
-	int bus_suspended;
 	int port_resuming;
-	int power_down_on_bus_suspend;
 	enum tegra_usb_phy_port_speed port_speed;
 };
 
@@ -273,120 +272,6 @@
 	up_write(&ehci_cf_port_reset_rwsem);
 }
 
-static int tegra_usb_suspend(struct usb_hcd *hcd)
-{
-	struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
-	struct ehci_regs __iomem *hw = tegra->ehci->regs;
-	unsigned long flags;
-
-	spin_lock_irqsave(&tegra->ehci->lock, flags);
-
-	tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
-	ehci_halt(tegra->ehci);
-	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
-	spin_unlock_irqrestore(&tegra->ehci->lock, flags);
-
-	tegra_ehci_power_down(hcd);
-	return 0;
-}
-
-static int tegra_usb_resume(struct usb_hcd *hcd)
-{
-	struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
-	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
-	struct ehci_regs __iomem *hw = ehci->regs;
-	unsigned long val;
-
-	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-	tegra_ehci_power_up(hcd);
-
-	if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
-		/* Wait for the phy to detect new devices
-		 * before we restart the controller */
-		msleep(10);
-		goto restart;
-	}
-
-	/* Force the phy to keep data lines in suspend state */
-	tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
-
-	/* Enable host mode */
-	tdi_reset(ehci);
-
-	/* Enable Port Power */
-	val = readl(&hw->port_status[0]);
-	val |= PORT_POWER;
-	writel(val, &hw->port_status[0]);
-	udelay(10);
-
-	/* Check if the phy resume from LP0. When the phy resume from LP0
-	 * USB register will be reset. */
-	if (!readl(&hw->async_next)) {
-		/* Program the field PTC based on the saved speed mode */
-		val = readl(&hw->port_status[0]);
-		val &= ~PORT_TEST(~0);
-		if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
-			val |= PORT_TEST_FORCE;
-		else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
-			val |= PORT_TEST(6);
-		else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
-			val |= PORT_TEST(7);
-		writel(val, &hw->port_status[0]);
-		udelay(10);
-
-		/* Disable test mode by setting PTC field to NORMAL_OP */
-		val = readl(&hw->port_status[0]);
-		val &= ~PORT_TEST(~0);
-		writel(val, &hw->port_status[0]);
-		udelay(10);
-	}
-
-	/* Poll until CCS is enabled */
-	if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
-						 PORT_CONNECT, 2000)) {
-		pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
-		goto restart;
-	}
-
-	/* Poll until PE is enabled */
-	if (handshake(ehci, &hw->port_status[0], PORT_PE,
-						 PORT_PE, 2000)) {
-		pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
-		goto restart;
-	}
-
-	/* Clear the PCI status, to avoid an interrupt taken upon resume */
-	val = readl(&hw->status);
-	val |= STS_PCD;
-	writel(val, &hw->status);
-
-	/* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
-	val = readl(&hw->port_status[0]);
-	if ((val & PORT_POWER) && (val & PORT_PE)) {
-		val |= PORT_SUSPEND;
-		writel(val, &hw->port_status[0]);
-
-		/* Wait until port suspend completes */
-		if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
-							 PORT_SUSPEND, 1000)) {
-			pr_err("%s: timeout waiting for PORT_SUSPEND\n",
-								__func__);
-			goto restart;
-		}
-	}
-
-	tegra_ehci_phy_restore_end(tegra->phy);
-	return 0;
-
-restart:
-	if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
-		tegra_ehci_phy_restore_end(tegra->phy);
-
-	tegra_ehci_restart(hcd);
-	return 0;
-}
-
 static void tegra_ehci_shutdown(struct usb_hcd *hcd)
 {
 	struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
@@ -434,36 +319,6 @@
 	return retval;
 }
 
-#ifdef CONFIG_PM
-static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
-{
-	struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
-	int error_status = 0;
-
-	error_status = ehci_bus_suspend(hcd);
-	if (!error_status && tegra->power_down_on_bus_suspend) {
-		tegra_usb_suspend(hcd);
-		tegra->bus_suspended = 1;
-	}
-
-	return error_status;
-}
-
-static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
-{
-	struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
-
-	if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) {
-		tegra_usb_resume(hcd);
-		tegra->bus_suspended = 0;
-	}
-
-	tegra_usb_phy_preresume(tegra->phy);
-	tegra->port_resuming = 1;
-	return ehci_bus_resume(hcd);
-}
-#endif
-
 struct temp_buffer {
 	void *kmalloc_ptr;
 	void *old_xfer_buffer;
@@ -574,8 +429,8 @@
 	.hub_control		= tegra_ehci_hub_control,
 	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
 #ifdef CONFIG_PM
-	.bus_suspend		= tegra_ehci_bus_suspend,
-	.bus_resume		= tegra_ehci_bus_resume,
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
 #endif
 	.relinquish_port	= ehci_relinquish_port,
 	.port_handed_over	= ehci_port_handed_over,
@@ -603,11 +458,187 @@
 		dev_err(&pdev->dev, "can't enable vbus\n");
 		return err;
 	}
-	gpio_set_value(gpio, 1);
 
 	return err;
 }
 
+#ifdef CONFIG_PM
+
+static int controller_suspend(struct device *dev)
+{
+	struct tegra_ehci_hcd *tegra =
+			platform_get_drvdata(to_platform_device(dev));
+	struct ehci_hcd	*ehci = tegra->ehci;
+	struct usb_hcd *hcd = ehci_to_hcd(ehci);
+	struct ehci_regs __iomem *hw = ehci->regs;
+	unsigned long flags;
+
+	if (time_before(jiffies, ehci->next_statechange))
+		msleep(10);
+
+	spin_lock_irqsave(&ehci->lock, flags);
+
+	tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
+	ehci_halt(ehci);
+	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
+
+	tegra_ehci_power_down(hcd);
+	return 0;
+}
+
+static int controller_resume(struct device *dev)
+{
+	struct tegra_ehci_hcd *tegra =
+			platform_get_drvdata(to_platform_device(dev));
+	struct ehci_hcd	*ehci = tegra->ehci;
+	struct usb_hcd *hcd = ehci_to_hcd(ehci);
+	struct ehci_regs __iomem *hw = ehci->regs;
+	unsigned long val;
+
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	tegra_ehci_power_up(hcd);
+
+	if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
+		/* Wait for the phy to detect new devices
+		 * before we restart the controller */
+		msleep(10);
+		goto restart;
+	}
+
+	/* Force the phy to keep data lines in suspend state */
+	tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
+
+	/* Enable host mode */
+	tdi_reset(ehci);
+
+	/* Enable Port Power */
+	val = readl(&hw->port_status[0]);
+	val |= PORT_POWER;
+	writel(val, &hw->port_status[0]);
+	udelay(10);
+
+	/* Check if the phy resume from LP0. When the phy resume from LP0
+	 * USB register will be reset. */
+	if (!readl(&hw->async_next)) {
+		/* Program the field PTC based on the saved speed mode */
+		val = readl(&hw->port_status[0]);
+		val &= ~PORT_TEST(~0);
+		if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
+			val |= PORT_TEST_FORCE;
+		else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
+			val |= PORT_TEST(6);
+		else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
+			val |= PORT_TEST(7);
+		writel(val, &hw->port_status[0]);
+		udelay(10);
+
+		/* Disable test mode by setting PTC field to NORMAL_OP */
+		val = readl(&hw->port_status[0]);
+		val &= ~PORT_TEST(~0);
+		writel(val, &hw->port_status[0]);
+		udelay(10);
+	}
+
+	/* Poll until CCS is enabled */
+	if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
+						 PORT_CONNECT, 2000)) {
+		pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
+		goto restart;
+	}
+
+	/* Poll until PE is enabled */
+	if (handshake(ehci, &hw->port_status[0], PORT_PE,
+						 PORT_PE, 2000)) {
+		pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
+		goto restart;
+	}
+
+	/* Clear the PCI status, to avoid an interrupt taken upon resume */
+	val = readl(&hw->status);
+	val |= STS_PCD;
+	writel(val, &hw->status);
+
+	/* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
+	val = readl(&hw->port_status[0]);
+	if ((val & PORT_POWER) && (val & PORT_PE)) {
+		val |= PORT_SUSPEND;
+		writel(val, &hw->port_status[0]);
+
+		/* Wait until port suspend completes */
+		if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
+							 PORT_SUSPEND, 1000)) {
+			pr_err("%s: timeout waiting for PORT_SUSPEND\n",
+								__func__);
+			goto restart;
+		}
+	}
+
+	tegra_ehci_phy_restore_end(tegra->phy);
+	goto done;
+
+ restart:
+	if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
+		tegra_ehci_phy_restore_end(tegra->phy);
+
+	tegra_ehci_restart(hcd);
+
+ done:
+	tegra_usb_phy_preresume(tegra->phy);
+	tegra->port_resuming = 1;
+	return 0;
+}
+
+static int tegra_ehci_suspend(struct device *dev)
+{
+	struct tegra_ehci_hcd *tegra =
+			platform_get_drvdata(to_platform_device(dev));
+	struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+	int rc = 0;
+
+	/*
+	 * When system sleep is supported and USB controller wakeup is
+	 * implemented: If the controller is runtime-suspended and the
+	 * wakeup setting needs to be changed, call pm_runtime_resume().
+	 */
+	if (HCD_HW_ACCESSIBLE(hcd))
+		rc = controller_suspend(dev);
+	return rc;
+}
+
+static int tegra_ehci_resume(struct device *dev)
+{
+	int rc;
+
+	rc = controller_resume(dev);
+	if (rc == 0) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
+	return rc;
+}
+
+static int tegra_ehci_runtime_suspend(struct device *dev)
+{
+	return controller_suspend(dev);
+}
+
+static int tegra_ehci_runtime_resume(struct device *dev)
+{
+	return controller_resume(dev);
+}
+
+static const struct dev_pm_ops tegra_ehci_pm_ops = {
+	.suspend	= tegra_ehci_suspend,
+	.resume		= tegra_ehci_resume,
+	.runtime_suspend = tegra_ehci_runtime_suspend,
+	.runtime_resume	= tegra_ehci_runtime_resume,
+};
+
+#endif
+
 static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
 
 static int tegra_ehci_probe(struct platform_device *pdev)
@@ -722,7 +753,6 @@
 	}
 
 	tegra->host_resumed = 1;
-	tegra->power_down_on_bus_suspend = pdata->power_down_on_bus_suspend;
 	tegra->ehci = hcd_to_ehci(hcd);
 
 	irq = platform_get_irq(pdev, 0);
@@ -746,6 +776,14 @@
 		goto fail;
 	}
 
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+
+	/* Don't skip the pm_runtime_forbid call if wakeup isn't working */
+	/* if (!pdata->power_down_on_bus_suspend) */
+		pm_runtime_forbid(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
 	return err;
 
 fail:
@@ -772,33 +810,6 @@
 	return err;
 }
 
-#ifdef CONFIG_PM
-static int tegra_ehci_resume(struct platform_device *pdev)
-{
-	struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
-	struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
-
-	if (tegra->bus_suspended)
-		return 0;
-
-	return tegra_usb_resume(hcd);
-}
-
-static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state)
-{
-	struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
-	struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
-
-	if (tegra->bus_suspended)
-		return 0;
-
-	if (time_before(jiffies, tegra->ehci->next_statechange))
-		msleep(10);
-
-	return tegra_usb_suspend(hcd);
-}
-#endif
-
 static int tegra_ehci_remove(struct platform_device *pdev)
 {
 	struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
@@ -807,6 +818,10 @@
 	if (tegra == NULL || hcd == NULL)
 		return -EINVAL;
 
+	pm_runtime_get_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_put_noidle(&pdev->dev);
+
 #ifdef CONFIG_USB_OTG_UTILS
 	if (tegra->transceiver) {
 		otg_set_host(tegra->transceiver->otg, NULL);
@@ -847,13 +862,12 @@
 static struct platform_driver tegra_ehci_driver = {
 	.probe		= tegra_ehci_probe,
 	.remove		= tegra_ehci_remove,
-#ifdef CONFIG_PM
-	.suspend	= tegra_ehci_suspend,
-	.resume		= tegra_ehci_resume,
-#endif
 	.shutdown	= tegra_ehci_hcd_shutdown,
 	.driver		= {
 		.name	= "tegra-ehci",
 		.of_match_table = tegra_ehci_of_match,
+#ifdef CONFIG_PM
+		.pm	= &tegra_ehci_pm_ops,
+#endif
 	}
 };
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 97ab975..768b4b5 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -386,7 +386,7 @@
 	usb_nop_xceiv_register();
 	musb->xceiv = usb_get_transceiver();
 	if (!musb->xceiv)
-		return -ENODEV;
+		goto unregister;
 
 	musb->mregs += DAVINCI_BASE_OFFSET;
 
@@ -444,6 +444,7 @@
 
 fail:
 	usb_put_transceiver(musb->xceiv);
+unregister:
 	usb_nop_xceiv_unregister();
 	return -ENODEV;
 }
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 93de517..f4a40f0 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -449,7 +449,7 @@
 	 * We added this flag to forcefully disable double
 	 * buffering until we get it working.
 	 */
-	unsigned                double_buffer_not_ok:1 __deprecated;
+	unsigned                double_buffer_not_ok:1;
 
 	struct musb_hdrc_config	*config;
 
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 1d5eda2..f7c1c8e 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -40,7 +40,7 @@
 #if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
 	&& !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
 	&& !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \
-	&& !defined(CONFIG_MIPS)
+	&& !defined(CONFIG_MIPS) && !defined(CONFIG_M68K)
 static inline void readsl(const void __iomem *addr, void *buf, int len)
 	{ insl((unsigned long)addr, buf, len); }
 static inline void readsw(const void __iomem *addr, void *buf, int len)
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c
index 3ece43a..a0a2178 100644
--- a/drivers/usb/otg/gpio_vbus.c
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -96,7 +96,7 @@
 	struct gpio_vbus_data *gpio_vbus =
 		container_of(work, struct gpio_vbus_data, work);
 	struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data;
-	int gpio;
+	int gpio, status;
 
 	if (!gpio_vbus->phy.otg->gadget)
 		return;
@@ -108,7 +108,9 @@
 	 */
 	gpio = pdata->gpio_pullup;
 	if (is_vbus_powered(pdata)) {
+		status = USB_EVENT_VBUS;
 		gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL;
+		gpio_vbus->phy.last_event = status;
 		usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget);
 
 		/* drawing a "unit load" is *always* OK, except for OTG */
@@ -117,6 +119,9 @@
 		/* optionally enable D+ pullup */
 		if (gpio_is_valid(gpio))
 			gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
+
+		atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
+					   status, gpio_vbus->phy.otg->gadget);
 	} else {
 		/* optionally disable D+ pullup */
 		if (gpio_is_valid(gpio))
@@ -125,7 +130,12 @@
 		set_vbus_draw(gpio_vbus, 0);
 
 		usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget);
+		status = USB_EVENT_NONE;
 		gpio_vbus->phy.state = OTG_STATE_B_IDLE;
+		gpio_vbus->phy.last_event = status;
+
+		atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
+					   status, gpio_vbus->phy.otg->gadget);
 	}
 }
 
@@ -287,6 +297,9 @@
 			irq, err);
 		goto err_irq;
 	}
+
+	ATOMIC_INIT_NOTIFIER_HEAD(&gpio_vbus->phy.notifier);
+
 	INIT_WORK(&gpio_vbus->work, gpio_vbus_work);
 
 	gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw");
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 1f21d2a..f82a739 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -24,6 +24,7 @@
 #include <linux/if_arp.h>
 #include <linux/if_tun.h>
 #include <linux/if_macvlan.h>
+#include <linux/if_vlan.h>
 
 #include <net/sock.h>
 
@@ -166,7 +167,7 @@
 	if (wmem < sock->sk->sk_sndbuf / 2)
 		tx_poll_stop(net);
 	hdr_size = vq->vhost_hlen;
-	zcopy = vhost_sock_zcopy(sock);
+	zcopy = vq->ubufs;
 
 	for (;;) {
 		/* Release DMAs done buffers first */
@@ -257,7 +258,8 @@
 					UIO_MAXIOV;
 			}
 			vhost_discard_vq_desc(vq, 1);
-			tx_poll_start(net, sock);
+			if (err == -EAGAIN || err == -ENOBUFS)
+				tx_poll_start(net, sock);
 			break;
 		}
 		if (err != len)
@@ -265,6 +267,8 @@
 				 " len %d != %zd\n", err, len);
 		if (!zcopy)
 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
+		else
+			vhost_zerocopy_signal_used(vq);
 		total_len += len;
 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
 			vhost_poll_queue(&vq->poll);
@@ -283,8 +287,12 @@
 
 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
 	head = skb_peek(&sk->sk_receive_queue);
-	if (likely(head))
+	if (likely(head)) {
 		len = head->len;
+		if (vlan_tx_tag_present(head))
+			len += VLAN_HLEN;
+	}
+
 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
 	return len;
 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 51e4c1e..94dbd25 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1603,6 +1603,7 @@
 	struct vhost_ubuf_ref *ubufs = ubuf->ctx;
 	struct vhost_virtqueue *vq = ubufs->vq;
 
+	vhost_poll_queue(&vq->poll);
 	/* set len = 1 to mark this desc buffers done DMA */
 	vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
 	kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 86922ac..353c02f 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -13,6 +13,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/fb.h>
+#include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/types.h>
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 6468a29..39571f9 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -22,7 +22,9 @@
 #include <linux/font.h>
 
 #include <asm/hardware.h>
+#include <asm/page.h>
 #include <asm/parisc-device.h>
+#include <asm/pdc.h>
 #include <asm/cacheflush.h>
 #include <asm/grfioctl.h>
 
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 26e83d7..b0e2a42 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -73,7 +73,7 @@
 	struct uvesafb_task *utask;
 	struct uvesafb_ktask *task;
 
-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
+	if (!capable(CAP_SYS_ADMIN))
 		return;
 
 	if (msg->seq >= UVESAFB_TASKS_MAX)
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index cb4529c..b7f5173 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -365,7 +365,7 @@
 	struct fb_info *fb_info;
 	int fb_size;
 	int val;
-	int ret;
+	int ret = 0;
 
 	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (info == NULL) {
@@ -458,26 +458,31 @@
 	xenfb_init_shared_page(info, fb_info);
 
 	ret = xenfb_connect_backend(dev, info);
-	if (ret < 0)
-		goto error;
+	if (ret < 0) {
+		xenbus_dev_fatal(dev, ret, "xenfb_connect_backend");
+		goto error_fb;
+	}
 
 	ret = register_framebuffer(fb_info);
 	if (ret) {
-		fb_deferred_io_cleanup(fb_info);
-		fb_dealloc_cmap(&fb_info->cmap);
-		framebuffer_release(fb_info);
 		xenbus_dev_fatal(dev, ret, "register_framebuffer");
-		goto error;
+		goto error_fb;
 	}
 	info->fb_info = fb_info;
 
 	xenfb_make_preferred_console();
 	return 0;
 
- error_nomem:
-	ret = -ENOMEM;
-	xenbus_dev_fatal(dev, ret, "allocating device memory");
- error:
+error_fb:
+	fb_deferred_io_cleanup(fb_info);
+	fb_dealloc_cmap(&fb_info->cmap);
+	framebuffer_release(fb_info);
+error_nomem:
+	if (!ret) {
+		ret = -ENOMEM;
+		xenbus_dev_fatal(dev, ret, "allocating device memory");
+	}
+error:
 	xenfb_remove(dev);
 	return ret;
 }
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c2d05a8..8807fe5 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -390,6 +390,7 @@
 	/* There might be pages left in the balloon: free them. */
 	while (vb->num_pages)
 		leak_balloon(vb, vb->num_pages);
+	update_balloon_size(vb);
 
 	/* Now we reset the device so we can clean up the queues. */
 	vdev->config->reset(vdev);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 6e414b5..23885f2d 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -434,16 +434,16 @@
 {
 	reload = SECS_TO_TICKS(soft_margin);
 	iowrite16(reload, hpwdt_timer_reg);
-	iowrite16(0x85, hpwdt_timer_con);
+	iowrite8(0x85, hpwdt_timer_con);
 }
 
 static void hpwdt_stop(void)
 {
 	unsigned long data;
 
-	data = ioread16(hpwdt_timer_con);
+	data = ioread8(hpwdt_timer_con);
 	data &= 0xFE;
-	iowrite16(data, hpwdt_timer_con);
+	iowrite8(data, hpwdt_timer_con);
 }
 
 static void hpwdt_ping(void)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 9424313..ea20c51 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -183,15 +183,17 @@
 	depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
 	default m
 	help
-          This ACPI processor uploads Power Management information to the Xen hypervisor.
+          This ACPI processor uploads Power Management information to the Xen
+	  hypervisor.
 
-	  To do that the driver parses the Power Management data and uploads said
-	  information to the Xen hypervisor. Then the Xen hypervisor can select the
-          proper Cx and Pxx states. It also registers itslef as the SMM so that
-          other drivers (such as ACPI cpufreq scaling driver) will not load.
+	  To do that the driver parses the Power Management data and uploads
+	  said information to the Xen hypervisor. Then the Xen hypervisor can
+	  select the proper Cx and Pxx states. It also registers itslef as the
+	  SMM so that other drivers (such as ACPI cpufreq scaling driver) will
+	  not load.
 
-          To compile this driver as a module, choose M here: the
-          module will be called xen_acpi_processor  If you do not know what to choose,
-          select M here. If the CPUFREQ drivers are built in, select Y here.
+          To compile this driver as a module, choose M here: the module will be
+	  called xen_acpi_processor  If you do not know what to choose, select
+	  M here. If the CPUFREQ drivers are built in, select Y here.
 
 endmenu
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 4b33acd..0a8a17c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -274,7 +274,7 @@
 
 static bool pirq_check_eoi_map(unsigned irq)
 {
-	return test_bit(irq, pirq_eoi_map);
+	return test_bit(pirq_from_irq(irq), pirq_eoi_map);
 }
 
 static bool pirq_needs_eoi_flag(unsigned irq)
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 174b565..0b48579 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -128,7 +128,10 @@
 			pr_debug("     C%d: %s %d uS\n",
 				 cx->type, cx->desc, (u32)cx->latency);
 		}
-	} else
+	} else if (ret != -EINVAL)
+		/* EINVAL means the ACPI ID is incorrect - meaning the ACPI
+		 * table is referencing a non-existing CPU - which can happen
+		 * with broken ACPI tables. */
 		pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n",
 		       ret, _pr->acpi_id);
 
diff --git a/firmware/3com/3C359.bin.ihex b/firmware/3com/3C359.bin.ihex
deleted file mode 100644
index 781bac3..0000000
--- a/firmware/3com/3C359.bin.ihex
+++ /dev/null
@@ -1,1573 +0,0 @@
-:10000000FE3A0000000000000000000000000000B8
-:1000100000000000000000000000000000000000E0
-:1000200000000000000000000000000000000000D0
-:1000300000000000000000000000000000000000C0
-:1000400000000000000030332F30322F39392031CA
-:10005000373A3133000000000000000000000000CB
-:1000600030313233343536373839414243444546EE
-:10007000000007FF0200FE9F0600007C48000070A1
-:100080008200FFFF8600FFFF8800FFFF9A00FFFF4E
-:10009000FFFF1100C000FFFFFFFF11223344556630
-:1000A00033434F4D20424142451140C000FFFFFF06
-:1000B000FF1122334455665374617274206F6620B9
-:1000C0004C4C43206672616D652E2020546F746124
-:1000D0006C20646174612073697A6520697320788B
-:1000E000787820202042414245E8D201833EF7340F
-:1000F000007521E84100833EF734007517E882005F
-:10010000833EF73400750DE8BF00833EF734007579
-:1001100003E84102C31EB800F08ED833F6B9008060
-:1001200033DBAD03D8E2FB1FB8000083FB00740390
-:10013000B82200A3F734C3FABA5600B0FFEE33C0BA
-:100140008EC033F6B9FF7F833EFF340074088D3EC6
-:100150003061D1EF2BCF268B1C26C704FFFF2683EF
-:100160003CFF751726C704000026833C00750C264B
-:10017000891C4646E2E0B80000EB03B82400A3F770
-:1001800034C3FAB4D79E733A753879367B349FB14D
-:1001900005D2EC732DB040D0E071277925D0E07303
-:1001A000217B1F32C0751B32E49E721674147812C4
-:1001B0007A109FD2EC720BD0E470077505B800007E
-:1001C000EB03B82600A3F734C3FABA5A0033C0EFE2
-:1001D000EFEFEFB000E656B000E654BA5200B801B7
-:1001E00001EFE8CA003C01757FE88300BA5200B80D
-:1001F0000202EFE8B9003C02756EE87A00BA5200DC
-:10020000B80404EFE8A8003C04755DE87100BA5238
-:1002100000B80808EFE897003C08754CE86800BA99
-:100220005200B81010EFE886003C10753BE85F0004
-:10023000BA5200B82020EFE875003C20752AE85635
-:1002400000BA5200B84040EFE864003C407519E83D
-:100250004D00BA5200B88080EFE853003C8075082A
-:10026000E84400B80000EB03B82800A3F734C3BA91
-:100270005A00B80080EFC3BA5A00B80180EFC3BA81
-:100280005A00B80280EFC3BA5A00B80380EFC3BA6D
-:100290005A00B80480EFC3BA5A00B80580EFC3BA59
-:1002A0005A00B80680EFC3BA5A00B80780EFC3B946
-:1002B000FFFFE458E4543C0075034975F7C3FA3274
-:1002C000C0E656E4563C007403E98200B0FFE656EF
-:1002D000E4563CFF7578BA5200B8FFFFEFED3CFFE3
-:1002E000756CB800FFEFED3C007563B0FFE654E4B9
-:1002F000543CFF755932C0E654E4543C00754FB08D
-:100300000FE650E450240F3C0F7543B000E650E474
-:1003100050240F3C0075378CC88EC0BE7000268BF1
-:1003200014268B5C02B80000EFED23C33D0000757E
-:100330001DB8FFFF23C3EF8BC8ED23C33BC1750E70
-:1003400083C60426833CFF75D5B80000EB03B82AAA
-:1003500000A3F734C3FA33C0BF0020B91700F3ABD2
-:10036000BF0030B91700F3ABBF0022B94000F3ABB8
-:10037000BF0032B94000F3ABFC1E8CC88ED833C02E
-:100380008EC0BE9200BF0020B91700F3A4BEA90022
-:10039000BF0022B94000F3A41FC706FB346400BAB3
-:1003A0000800B80F00EFE88201E89B01720DC70654
-:1003B000F7342C00C706F9340400C3BA0A0033C06E
-:1003C000EFE89801E8B501B81700BA9C00EFB80053
-:1003D00010BA9A00EFB81700A90100740140BA8C56
-:1003E00000EFB80018BA8600EFB80C00BA8200EF30
-:1003F000BA0200ED25F9FF0D0200EFBA060033C086
-:10040000EFBA0400B86000EFBA0000B81800EFBA05
-:100410008000B9FFFFEDA901007504E2F8EB3EBAD8
-:100420000A00EDA900407435A90020743033C0EFF4
-:1004300051B9C800E2FE591E061F268B0E023083FA
-:10044000F91775184949BE0220BF0630F3A61F23CD
-:10045000C9750AFF0EFB347412E94DFF1FB82C005A
-:10046000BB0000A3F734891EF934C3C706FB34640C
-:1004700000E8D300720DC706F7342C00C706F93424
-:100480000400C3E8D600E8F300B80300BA8200EF26
-:10049000B84080BA9800EFB80011BA9600EFB840A3
-:1004A00000A90100740140BA9200EFB80019BA8E99
-:1004B00000EFBA0200ED25F9FF0D0600EFBA0600C5
-:1004C00033C0EFBA0000B81800EFBA8000B9FFFFE0
-:1004D000EDA920007504E2F8EB43BA0A00EDA9008B
-:1004E00040743AA90020743533C0EF51B9C800E216
-:1004F000FE591E061F268B0E023283F940751D49D8
-:1005000049BE0222BF0632F3A61F23C9750FFF0E94
-:10051000FB347403E95AFFB80000EB0B1FB82C0042
-:10052000BB0200891EF934A3F734C3BA0200B80035
-:100530009CEFBA0000B80084EF33C0EFBA0A00EFB6
-:10054000BA0E0033C0EFC3BA0A00B9FFFFED2500B1
-:10055000603D00607404E2F5F8C3F9C3B000E656EC
-:10056000B800FFBA5200EFB9FFFFBA5800ED25EF0F
-:10057000007408BA5A0033C0EFE2EFC3BA8000ED4E
-:10058000BA8400EFBA8000EDC30000000000000054
-:10059000C606EC341533C08ED88EC01E8CC8BE4043
-:1005A00054BF60FE8ED8B91000F3A41FC706803672
-:1005B0001035C7068C3630358D063835A33035A357
-:1005C0003235053301A33435C70636355001C70629
-:1005D000843680FEC7068836C0FEC606C2FEFFC649
-:1005E00006933680C606923600C60680FE80C70691
-:1005F00082FE5450C70684FE2B4DE5CEA90200753D
-:1006000008C60681FE23E90500C60681FE22A1F781
-:1006100034A386FEB8483486E0A388FE8D064E34A7
-:1006200086E0A38AFEB8583486E0A38CFEB89C34DA
-:1006300086E0A38EFE8D06200386E0A390FE33C0E5
-:10064000BA7200EF33C0BA7400EFBA7600EFB88028
-:10065000FE86E0BA7200EFE8BF07BA0C01B840406E
-:10066000EFEDBA6A00B80300C1E0080D0300EFB96E
-:100670000A00E89400BA6A00B80300C1E008EFA1DC
-:100680003234A3A233C706A63304008D06A033C1BB
-:10069000E804CD39C7069036FFFFE9E300630D6635
-:1006A0000D660D8A0DE60E75122E0F030F500F60AA
-:1006B0000D600D600DED0FE912600D600D600D60B5
-:1006C0000D600D2210600D600D600D600DFE10605C
-:1006D0000D600D600D600D600D600DAF0F321037B5
-:1006E0000D600D600D600D600D600D600D600D60A2
-:1006F0000D600D600D600D600D600D600D600D6092
-:100700000D640E000F9509600A49BBFFFFBA6A002D
-:10071000EDA900207438803E80FE127531E84A0051
-:10072000A13234A3A233C706A63304008D06A0333A
-:10073000C1E804CD39E82200C706F3344600C706F5
-:10074000F534FFFFC7069036FFFF58E932004B83B0
-:10075000FB0075B983F90075B0C352BA6A00B803DB
-:1007600000C1E0080D0300EF5AC352BA6A00B80393
-:1007700000C1E008EF5AC3000000000000000000C4
-:10078000688007A19036CD358B3624022EFFA43524
-:100790000AFA8A2694368826E834C606943600FB80
-:1007A00022E47501C3F6C420747DF6C40874058084
-:1007B0000E9236048026E834D7C41E8436268B3742
-:1007C00081E6FF0083FE207605B001E9280053068C
-:1007D000D1E62EFF949D06075B268847023CFF74F6
-:1007E000073CFE7511E93B00F6069236087534F6B3
-:1007F00006923604742D80269236F3803E9536009C
-:10080000752126803F057513C60695360026807F24
-:1008100006007407268B4704A29536BA0C01B8402F
-:1008200040EFED8A26E834F6C4107503E95B00F664
-:10083000C4047405800E9236018026E834EBC43E71
-:100840008836268B3583E67F83FE12720826C645DE
-:100850000201E9240083C620D1E62EFF949D06C440
-:100860003E8836268845023CFF750EF60692360114
-:100870007414F606923602750D80269236FCBA0C78
-:1008800001B82020EFED8A26E834F6C408742280EF
-:1008900026E834F7800E923604F606923608741174
-:1008A00080269236F3BA0C01B84040EFED8A26E874
-:1008B00034F6C40474228026E834FB800E9236019C
-:1008C000F606923602751180269236FEBA0C01B8F1
-:1008D0002020EFED8A26E834F6C40174678026E80C
-:1008E00034FE803EE8FF007439803EE8FF04743235
-:1008F000803EE8FF017521E580A90007740ABA9ED1
-:1009000000B80002EFE9EFFFC606E8FF03BA0C01EA
-:10091000B80808EFEDE92800803EE8FF037406E917
-:100920001E00E90000BA1001B80202EFEDE5000D6B
-:100930001800E700E5820D0200E782C606E8FF0422
-:100940008A26E834F6C402740D8026E834FD802639
-:100950009236BFE84F0BFAA0E83408069436C60674
-:10096000E83400FBC3E8E70FC41E84362EFF1601EF
-:100970000726884702E97EFEE82D10C41E84362E25
-:10098000FF16030726884702E96BFE8E0626022E15
-:10099000FF160707C3C3833EF53400740FFF0EF341
-:1009A000347509E8C4FDC706F5340000F606933631
-:1009B000207430A1C2343B06E934A3E934742480A6
-:1009C0003E953600751DF706E63420007412A92006
-:1009D00000740D8326C234DF8326E934DFE9030087
-:1009E000E8DD09BA0601ED8BD081E200C0C1EA0E54
-:1009F00003167434C1E002110672347304FF0674E6
-:100A000034BA0201ED8BD081E200C0C1EA0E0316B8
-:100A10007034C1E00211066E347304FF067034C7EF
-:100A200006A6330400C706AA3300008D06A033C112
-:100A3000E804CD39C39509950965097809950995A3
-:100A4000099107950996098B0995099509950995C5
-:100A500009950995098BC08BC08BC08BC08BC0904A
-:100A6000F6069336207503E9CC008CC0408EC02674
-:100A70008B0E060086E926890E06008CC2C1E204B0
-:100A8000BE0E0026A10400D0E024C08AE0C0EC0421
-:100A90000AC426A2050026A10800A900C07403E923
-:100AA0009E0026F6061000807503E90A0026A016AF
-:100AB00000241F32E403F0803EEC3406725C803E7A
-:100AC00095360075668BFA33DB8EC326891D268822
-:100AD0005D045150C41E8C36B90F0033C0E82109A3
-:100AE00058590BDB7434FE0EE63A26C6078126C63B
-:100AF00047010026C64702FF26C747040000268993
-:100B00004F0A86F2268957062689770826C647099E
-:100B10000026C6470C02E88C09C3FF06EC338CC0E4
-:100B2000488EC0FAE89710FBE9EBFF8CC0488EC0F6
-:100B3000FAE88A10FBC38CC08EC0FAE88010FBC3B1
-:100B4000803E9536007503E9C200BF080026F60610
-:100B5000100080750503FEE90C0026A01600241F76
-:100B600032E403F003FEA095363C007503E99C00D7
-:100B70003C01740B3C0274143C03741DE98D00C6E7
-:100B800006963601E83C017227E98000C6069636D3
-:100B900002E88300721AE97300C606963601E8225D
-:100BA00001720DC606963602E86C007203E95C001D
-:100BB000530650C41E8C36B90B0033C0E8420858A7
-:100BC00026C6078226C64702FF8D06E0FE86C4269B
-:100BD000894706A0963626884708E8C808075B8339
-:100BE00026AD36FEA1AD36E704BA1001B88080EF1D
-:100BF000EDBA1001B80202EFED52BAE000B84110B0
-:100C0000EF5AB89C03CD39C6069536008CC0488E85
-:100C1000C0FAE8A90FFBC31E061F0633C08EC08BA7
-:100C2000F08D3E20F351B10A26837D0C01752A57C1
-:100C300026837D0E007406E82F00E90300E86607AE
-:100C40005F731633C08ED8268B4D128D75208D3E66
-:100C5000E0FEF3A459071FF9C3FEC9740781C7203A
-:100C600001E9C4FF59071FF8C35150535652573377
-:100C7000DB268A5D0E268B4D128D7D205A87D72666
-:100C80008A451487D74232FF80FF087508FECB22C1
-:100C9000DB75EA33DB23DB7406FEC7D0C8730C5068
-:100CA000268A053804587403E90A0049464723C9CF
-:100CB000740AE9D3FF5A5E5B5859F8C35A5E5B5811
-:100CC00059F9C31E061F0633C08EC086CD2BCE8BAE
-:100CD000F78BC133C9803CFF741680F90673093263
-:100CE000C94648742EE9EDFF3D6000730CE923000E
-:100CF000FEC14648741DE9DCFFB810008D3E183473
-:100D000032EDB106F3A67403E908004823C0740766
-:100D1000E9E9FF071FF8C38D36183433C08ED88D2C
-:100D20003EE0FEB81000B9060056F3A45E483D0050
-:100D30000075F3071FF9C3FF06E433C606EB340062
-:100D4000268B450686E0C1E80448068EC0FE06E60E
-:100D50003AFAE8690EFB07B0FFC30000000000008C
-:100D6000B001C3B000C3F6069336207503B004C3C8
-:100D70008B0E973681E18030268B4704257FCF0B81
-:100D8000C1A39736A3E634B000C3F60693362074A9
-:100D900003B003C3268B4708A39736A3E634268AFD
-:100DA0004720A2FD343C017506C706A13600002687
-:100DB0008A4721A2FE34268B470AA31834A358344D
-:100DC000268B470CA31A34A35A34268B470EA31C38
-:100DD00034A35C34C6062A34C0268B4714257FFF13
-:100DE00009062C34268B471625FFFE25FFFC090635
-:100DF0002E34C6060034C0268B4710A30234268B3F
-:100E00004712A304340653E8840A5B073D000075CB
-:100E100007800E923608B0FEC3B90001A1AC33338F
-:100E2000D2F7F9A3AE33914933D2F7E905003BA3DA
-:100E30004634BF003B893E4434BA6800B8E0E0EF76
-:100E4000A1AE33E762A1AE33BA0801EFA14434E7A3
-:100E500064A14434BA0A01EFB800012D04000D006A
-:100E600010E792C33D0000740A26894707E8833AD9
-:100E7000B007C3A1AE332689472BA1443426894746
-:100E80002DA146342689472F800E933620A188361F
-:100E900086E026894708A1843686E02689470AA18C
-:100EA000803686E02689470CB860FE86E0268947B2
-:100EB0000EA0A136268847108B36883626C64402F7
-:100EC000FFE59EA90008740CBA8400ED0D0800EF40
-:100ED000BA8E00EFE50225F9FFE702BA1001B80269
-:100EE00002EFEDB000C3F6069336207503B001C3E0
-:100EF000802693369FE88D0A800E923608B0FEC396
-:100F0000B000C3F6069336207503B004C3C6062AA4
-:100F100034C0268B4706257FFFA32C34268B470839
-:100F200025FFFE25FFFCA32E34CD52B000C3F606EC
-:100F30009336207503B004C3C6060034C0268B4721
-:100F400006A30234268B4708A30434CD52B000C355
-:100F5000F6069336207503B004C3578D7F0651B94A
-:100F6000070033C0F3AB598D7F06A17A34030639ED
-:100F700037268805A1953726884502A180340306C7
-:100F8000763426884507A1C63426884509A1D8337A
-:100F90002688450A33C0A37A34A33937A39537A3EB
-:100FA0008034A37634A3C634A3D8335FB000C3F62D
-:100FB000069336207503B004C3268B4F0483F906CD
-:100FC000741283F904740D83F900740883F90274B0
-:100FD00003B001C3890EE83A8326AB36F9090EAB9C
-:100FE00036E50225F9FF0BC1E702B000C3F6069310
-:100FF00036207503B004C3268B4F0480F9FF7408B4
-:1010000080F9007410B001C3830EAD3602A1AD3675
-:10101000E704E90A008326AD36FDA1AD36E704B04A
-:1010200000C3F6069336207503B004C3E8D504B0B8
-:1010300000C3F6069336807503B001C326837F068E
-:10104000057503E99D00268B5704268B47082681EA
-:101050007F0600807508ED2689470AE99D002683F2
-:101060007F06017504EFE9920026817F06018075F5
-:1010700009EFED2689470AE9810026837F0602757C
-:101080000726214704E9730026817F060280750C3C
-:1010900026214704ED2689470AE95F0026837F065B
-:1010A00003750726094704E9510026817F0603805E
-:1010B000750C26094704ED2689470AE93D00268379
-:1010C0007F0604750726314704E92F0026817F0635
-:1010D0000480750C26314704ED2689470AE91B0078
-:1010E000B001C3FA53268B4F080BC9740C8D1EE058
-:1010F000FEE852FF83C308E2F85BFBB000C3F606CC
-:10110000933680750AF6069336207503B001C38DB9
-:101110003EE0FEE500268905E50226894502A1ADEF
-:101120003626894504E50626894506E508268945CB
-:1011300008E50A2689450AE50E2689450CE5482674
-:1011400089450EE54A26894510E54C26894512A1B8
-:10115000B73626894514E55026894516E552268975
-:101160004518E5542689451AE5562689451CE55853
-:101170002689451EE56226894520E56426894522A3
-:10118000E56626894524E56826894526E56A268997
-:101190004528E56C2689452AE5702689452CE572A7
-:1011A0002689452EE57426894530E576268945321F
-:1011B000E57C26894534E57E26894536E580268905
-:1011C0004538E5822689453AE5862689453CE58805
-:1011D0002689453EE59A26894540E59E2689454271
-:1011E000E5CC26894544E5CE26894546E5D02689C5
-:1011F0004548E5D22689454ABA0001ED1106663414
-:101200007304FF0668342689454CBA0201EDC1E03B
-:101210000211066E347304FF0670342689454EBAF7
-:101220000401ED11066A347304FF066C3426894507
-:1012300050BA0601EDC1E002110672347304FF06D4
-:10124000743426894552BA0801ED26894554BA0AF4
-:1012500001ED26894556BA0C01ED26894558BA0E8E
-:1012600001ED01067A342689455EBA1001ED268922
-:10127000455CB000C3F6069336807407F6069336D5
-:10128000207503B001C326807F06007530803E952F
-:1012900036007452C6069536008326AD36FEA1ADE3
-:1012A00036E704BA1001B88080EFEDBA1001B80239
-:1012B00002EFEDBAE000B80010EFB000C3268B4794
-:1012C000043D000074203D0300771BBA1001B802F2
-:1012D00000EFBAE000B80110EF830EAD3601A1AD0A
-:1012E00036E704B000C3B006C3F606933680750334
-:1012F000B001C326837F0401740A26837F0402742D
-:1013000019B006C326837F060C77F626837F0A6012
-:1013100077EFE81000720BB046C3E84E007203B0DE
-:1013200046C3B000C351B10A8B3E20F326837D0C27
-:10133000027503E90E00FEC9740781C72001E9EBBD
-:10134000FF59F8C3578D7D0E8D7706B91200F3A4AF
-:101350008D7D208D36E0FE268B4D12F3A4FF060115
-:10136000355F26C7450C010059F9C351B10A8D3EBE
-:1013700020F38D36E0FE26837D0C01751B57E82592
-:10138000005F731433C0B92001F3AA26C7450C02CD
-:1013900000FF0E013559F9C3FEC9740781C720014A
-:1013A000E9D3FF59F8C351268B4D128D7D20F3A64A
-:1013B000740359F8C359F9C300000000000000008D
-:1013C000803EEC34067233FF06F03350C41E8C3678
-:1013D000B90F0033C0E82900588126C234DF7F816D
-:1013E00026E934DF7F0BDB741126C6078426C64747
-:1013F00002FF26894706E8AC00C3FF06EA33E9F599
-:10140000FF57268B3F03F9263B7F027416263B7F4E
-:10141000047C2A3D000075138D7F0803F9263B7F6D
-:10142000027C14FF06DE3333DB5FC3268B7F02268C
-:10143000893F03F9E9060026893F26290F26C705BB
-:10144000FFFF26873F26890D8D5D02508BFB83E9C8
-:101450000233C0F3AA58FE0EEC345FC38B7C023B10
-:101460003C742F833DFF750B8D7C08897C02833D86
-:10147000FF741E8A45023C81750C803EEB3400747B
-:101480000533C0E90B008B0D014C028D750283E919
-:1014900002C3803EEC3406720533C0E9F3FFFF0659
-:1014A000EE33E9BEFFF6069236407401C35756513B
-:1014B000528B368C36E8A4FF7503E91A00E91C004C
-:1014C000FE06EC34C43E8036F3A4800E923640BA59
-:1014D0000C01B88080EFED5A595E5FC3FF06E03320
-:1014E000803C81750CFF06E233C606EB3401E9CF80
-:1014F000FF803C847507FF06E633E9C3FFFF06E87B
-:1015000033E9BCFF8D3EE0FEA17234C706723400A1
-:10151000008905A17434C70674340000894502BAF5
-:101520000401ED894504C745060000A16E34C706D5
-:101530006E340000894508A17034C706703400007D
-:1015400089450ABA0001ED89450CC7450E000032F5
-:10155000E4BA0E01EC894510A17E34C7067E340042
-:1015600000894512A18C34C7068C340000894514CB
-:10157000A18A34C7068A340000894516A17C34C785
-:10158000067C340000894518A18834C706883400D9
-:101590000089451AA1CA33C706CA33000089451C11
-:1015A000A17834C7067834000089451EA1C634C727
-:1015B00006C6340000894520C3000000000000007A
-:1015C000FA33C08ED88EC0B8A001C1E8048ED08D89
-:1015D000268000E80001E810EB8B1EF7348B16F92B
-:1015E000348B36FF3433C0B9EFFF8D3E14002BCF60
-:1015F0002BCED1E9F3AB891EF7348916F93483FE7B
-:1016000000740CB9EFFFBF80FE2BCFD1E9F3ABB96B
-:10161000FFFF81E9003B83FE007403E91B00511EBC
-:10162000B800E08ED833F68D3E00D8B9000CF3A593
-:101630001F59BEFFFF81EE00D82BCE81E100FF894C
-:101640000EAC338D062002C1E804A332348ED036AE
-:10165000C7061E00801836C7062200FF7F36C70661
-:101660000A00FFFF36C7061C0080008D06A002C1DD
-:10167000E804A330348ED036C7061E00502836C783
-:10168000060A00FFFF36C7061C008000B8A001C193
-:10169000E804A33434A3F2338ED08D268000B80042
-:1016A00090E7028D3E70018BC7C1E804B903008941
-:1016B000450E894502C705FFFF83C710050100E2FB
-:1016C000EEE85B01E5CEA3B536E82100E84501A1CF
-:1016D00032348CCBCD370E58A900F0740733F6891D
-:1016E00036FF34C38D3630618936FF34C333C08B47
-:1016F000D08BF2B968002E80BCAC17807501EF83E7
-:10170000C20246E2F1B80200E750B95A0033FFC7FF
-:101710000565188C4D0283C704E2F433C08EC08C7B
-:10172000C88ED88D3E80008D369C17B90800E837EA
-:10173000008D3620218D3EC000B90D00E829008DB6
-:101740003E4001B90A00E81F00E84B0E33C08ED8B6
-:10175000C7064E376F17E748E74CB8409CE74AE5A5
-:101760004890B80070E748C3A583C702E2FAC3E512
-:101770004CC35051565752061E33C08ED8E558D12F
-:10178000E073118BF0D1E633C08ED88BB480008328
-:10179000C60BFFE61F075A5F5E5958CF581CE41C62
-:1017A0006C1C8E1AC01F401A441C6518808080FF74
-:1017B00080030280FFFFFFFFFFFFFFFFFFFFFFFF30
-:1017C000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF29
-:1017D0008003034380800280420302FF0301030170
-:1017E00001030203FFFFFFFFFFFFFFFF02030103EF
-:1017F00003FF0101FF01FF0101030303FFFFFFFFDF
-:10180000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE8
-:10181000FFFFFF02B80F00E784B80FF8E782C3B9F3
-:101820000800890EE63A8D0620038BD0C1E804A398
-:1018300090018BC28BD8C1E8048EC005610026A33D
-:101840000000A1303426A3020083C314D1EB268903
-:101850001E080081C21006E2D926C7060000FFFF5D
-:101860008C069201C35051565752061E33C08ED873
-:10187000E75AFF06BE33BAD200EDCF0000000000E9
-:101880008CCBA13034CD37E906EDB83200C3E88CFB
-:1018900001FE06E234E8210175F0E8530E810EAF37
-:1018A0003600C0C706AD366000F706E63480007526
-:1018B0001AF706E63400087409C706AB360B00E9D0
-:1018C0000F00C706AB360300E90600C706AB3611AA
-:1018D0009CC706A9361800F706E6348000750DF798
-:1018E00006B53602007405830EA93620A1A936E795
-:1018F00000A1AB36E702F706E6348000742EE8F26A
-:101900002F33C00D4100E756A1B1360D0010E70896
-:10191000A1B336E70AA1AF36E706B84000E74E3379
-:10192000C0E70EC70626020000E92300C7064E37AF
-:101930003F208E06303426F7060A00008074072602
-:10194000810E08000080C606E03401B80000C3FE26
-:1019500006E134C606E03400A126020BC07401C3C0
-:10196000E80400B80000C3A1A936E7008B1EAB361F
-:1019700083E306E50225F9FF0BC30D1000E702A182
-:10198000AD36E704C3B80A00E784FE06E534C606B0
-:10199000E334018E06303426F7060A00004074074F
-:1019A00026810E08000040C3C7064E376F17FE069B
-:1019B000E434C606E33400C3C3F606183480750D5C
-:1019C000A118340B061A340B061C347501C3A12E62
-:1019D0003425FFFE8B16E73681E200010BC2A32EF1
-:1019E000348D161000BF0000B908008B850034EF5D
-:1019F00083C2108B850234EF83C2108B850434EFD1
-:101A000083C2E283C7064975E2B800008EC0BE00FB
-:101A100034BFB936B91800F3A5B80000C333C08E7F
-:101A2000C08D3EB033B90800F3AB8D3E3E34B903F0
-:101A300000F3ABC300000000000000000000000045
-:101A40005051565752061E33C08ED8E75AFF06BA79
-:101A500033E5560D2000E756BA7A00ED0826943695
-:101A600033C0B10832ED068EC08D3EE0FFF3AA8E82
-:101A700006323426810E0800000207E55625DFFFF6
-:101A8000E756E9F8FC00BD1B101BD91AF31A505198
-:101A9000565752061E33C08ED8E75AFF06B6335348
-:101AA0000651E580A3B4338BD88BC8251000A3ED75
-:101AB000340BC07414FF068034803EFE340074037F
-:101AC000E90600B88000E89D0483E303D1E32EFF1C
-:101AD00097861A59075BE9A4FCBA20008E063C34AD
-:101AE000833E3C34007503E9F000C7063C34000037
-:101AF000E92A00BA10008E063A34833E3A34007563
-:101B000003E9D5FFC7063A340000E81000E9C9FF31
-:101B1000BA10008E063A34C7063A34000026A114E3
-:101B20000026A30C0026A1160026A30E0026C6063A
-:101B30000A0000C1EA0223D1741CBA200026C7069D
-:101B40000E00EA05260B160C002689160C00FF066F
-:101B50008634FF06DC3326A10C00A9003774162654
-:101B6000C6060A0002A900307404FF067A34FF0694
-:101B7000DA33E94900C0EC0783168A340024073CB5
-:101B8000077504FF068C34FF067E34A130348CC305
-:101B90008EC08EDB26830E0800408CD82687061662
-:101BA0000026833E1400FF740A8EC0268C1E00009F
-:101BB000E90500268C1E140033C08ED8C3C38CC028
-:101BC000870692013DFFFF740D8ED88C060000330E
-:101BD000C08ED8E904008C069001E80100C306839A
-:101BE0003E9001FF7429833E3A34007511BA860095
-:101BF000E81E008C063A34833E9001FF7411833E48
-:101C00003C3400750ABA8800E806008C063C3407AC
-:101C1000C3A190018EC026A10800EF26A1000026D6
-:101C2000C7060000FFFFA390013DFFFF7503A392CD
-:101C300001833EED3400740BB81000E784C706ED55
-:101C4000340000C35051565752061E33C08ED8E799
-:101C50005AFF06BC33E925FB5051565752061E3336
-:101C6000C08ED8E75AFF06B033E911FB50515657E2
-:101C700052061E33C08ED8E75AFF06B43306FF065D
-:101C80007634803EFE3400740407E9F0FAB8800030
-:101C9000E8D30207E9E6FA000000000000000000B7
-:101CA000C61D081D911E5D1E731E891E911EA81D56
-:101CB000911E911EAF1EAF1E151D151D911E991F61
-:101CC000000000000000000000040000000200000E
-:101CD00000010010000100400000000000010000B1
-:101CE00007E999FA5051565752061E33C08ED8E76D
-:101CF0005AFF06B2330668F61CE506A3B2338BF032
-:101D000083E61E2EFFA4A01CE50CA980007406E843
-:101D1000A401E506C353E50C8BD8A9010074148314
-:101D20003EE03A00740D8E063834E8BF06C706E080
-:101D30003A0000E5000D1800E700E5020D1100E78C
-:101D4000028BC35BA901007401C38BD0B80008E704
-:101D5000848BC28E06383426A30C008BD0C1E003DE
-:101D60008316883400FF067C3426833E06000A75FD
-:101D7000218BC22540183D4000740C3D00107512A7
-:101D800026FE0E0A00740BF706EF3420007503E9F7
-:101D90005A068CC0268E06020026830E08002026D6
-:101DA000A3120026A31000C3FF06C433E50CA9014B
-:101DB000007501C3A9F0077401C3FF06D433E50021
-:101DC0000D1800E700C3FF06CA33803EA036087531
-:101DD000148E06303426F7060A00000874072681A0
-:101DE0000E08000008E58225FDFFE782E50C50E5BE
-:101DF00080250007A3E43AE58C250080A3E23A5849
-:101E0000A902007525833EE23A00751E833EE43A3E
-:101E1000007517E5080D000425FF04E708E86A01CE
-:101E2000E5820D0200E782E92100E81A06803EE81B
-:101E3000FF00740A803EE8FF047403E90D00C60643
-:101E4000E8FF01BA0C01B80808EFED803E9F3606A6
-:101E50007505830E993640B80001E90901FF06CCEB
-:101E6000338126AF36FFF7A1AF36E706FF06C6344B
-:101E7000E91E00FF06CE33FF0695378126AF36FFF9
-:101E8000EFA1AF36E706E90800FF06D033FF067A78
-:101E900034FF06D233D1E68E0630342E8B84C01C3C
-:101EA00026090608002E8B84C21C09066637C3E586
-:101EB0000CA98000745650E8F00058A9000175077D
-:101EC000FF06C633E90800FF067834FF06C833E58D
-:101ED0008225FDFFE782E86E05BA1001ED803EE83D
-:101EE000FF00740A803EE8FF047403E91D00C60683
-:101EF000E8FF01BA0C01B80808EFEDE90D00C606CD
-:101F0000E8FF03BA0C01B80808EFEDC3A90100749B
-:101F10001CE82C00833EE03A00740F068E0638342D
-:101F2000E8C904C706E03A000007E95D008BD08EDF
-:101F300006383426A30C00E8060068691DE94A004B
-:101F4000A90004740AB80004FF06D833E91700A9F1
-:101F50000001740AFF063937B80001E90800A9102A
-:101F600000B81000741D090666378CC08E06303428
-:101F700026F7060A000001740726810E08000001FA
-:101F80008EC0C3FF06C233E9F8FFE5000D1800E775
-:101F900000E5020D1100E702C358E943FDE5080D15
-:101FA000000425FF04E708E9E0FFE50EA900087535
-:101FB00001C3E9F5FF000000000000000000000080
-:101FC0005051565752061E33C08ED8E75AFF06B8F6
-:101FD00033E548065357FF164E375F5B833E80015B
-:101FE000FF74588E06800126FF0E0800754D26A14D
-:101FF0000000A3800126C7060000FFFF8CC0268ECC
-:1020000006020026810E080080008BD02687061A63
-:102010000026833E1800FF740A8EC0268916000031
-:10202000E905002689161800833E8001FF740C8E96
-:1020300006800126833E08000074B307E93EF7E5F9
-:102040004C90E502A90020740D25FFDF0D0100E78B
-:10205000020D0020E702E50A8BD8A3F43325C3570D
-:102060000D0010E70AF7069B3600807437F7C300AF
-:10207000807406F7C30008745D8126C2347FFFC7F1
-:102080000635370500B88003CD3981269B36FF7FA2
-:10209000C7060F370400F7069B3640007506C706D3
-:1020A0000F370300F7069B360020742AF7C3000899
-:1020B0007424803E9D36067C1DFF069434830E6694
-:1020C00037208E06303426F7060A000001740726F2
-:1020D000810E08000001F7C30020753BF7069A3710
-:1020E0008000740BFF06893733C0E70EE90400FF58
-:1020F000063B37F7069B360020741C80269E36FF71
-:1021000075158E06303426F7060A00000874072677
-:10211000810E08000008C3C300000000000000009A
-:1021200002230223022302230323DD220223FD21B3
-:102130000223A424F32402238D227A23022397244A
-:102140001B247524022302238E25FB8E067E01FBB1
-:1021500026833E0000FF74F2268E060000FA268BCE
-:102160001E080026231E0A0074E58CC08ED0268B24
-:102170002602008C16F23322FF756A26A11C008A03
-:10218000E38ADC22D8750DD0E824F80AC075F2B0D5
-:1021900080E9EDFFD0E824F80AC07502B08032E48F
-:1021A00026A31C00F7C3080075472E8A9FC5252E5D
-:1021B0008BBFC52680C310268E1D268C1E06008B65
-:1021C000160000C7060000FFFF26891583FAFF7579
-:1021D0000A2E8B97CD26262116080033C08ED826CE
-:1021E000891E0400C38ADFB7002E8A9FC525E9E057
-:1021F000FF2683260800F783C310E9DEFF60061E72
-:102200006887256A001F8E06F2338B0E3434390E30
-:10221000F233740E26810E0A00000226810E080099
-:1022200000022689260200A3F2338ED08D2680007C
-:10223000368926020036891E200036C706080000AF
-:1022400000B90400BE00002E8BBCC52636C705FFB2
-:10225000FF36C74502FFFF83C602E2EB8E067E0112
-:10226000368B0E22008CC026833E0000FF268E0691
-:1022700000007407263B0E22007DEA368C06000023
-:102280008EC0268C160000FB36FF2E1E00061E6830
-:102290008B256A001F2609360800F7C600FF740167
-:1022A000C356522E8BB4C52581E6FF002E8BB4C5D4
-:1022B000268CC28EC026C7060000FFFF8EC2268372
-:1022C0003CFF740F8BD0268754028EC226A30000D9
-:1022D000E90700268944022689045A5EC3061E685F
-:1022E0008B256A001F8E06F23326A30A0026892654
-:1022F0000200A134348ED08D2680008C16F233E992
-:102300004DFECF501E525333C08ED826833E04005C
-:10231000FF26C706040000007403E91A00833EE6A6
-:102320003A027613FF06D6338CC08E063234BE4096
-:1023300000683A23E95EFFE884F85B5A1F58CFE84B
-:10234000E10026C606180010268A1E2900881E1BDA
-:102350003726C7060C00FF7F26A10E00E79C26A1AA
-:102360000800E79AE50080FB0874090D18ACE70047
-:10237000071F58CF0D1800E9F4FF501E0633C08E1A
-:10238000D8833EA1360075B7268B3606002EFF9403
-:10239000DC23071F58CFE88A00E5000D1800E7008E
-:1023A000E84900C353F706EF342000752DE58C256E
-:1023B00000708BD8E58C2500703BC374058BD8E981
-:1023C000F2FF3D00307510E50225EFFFE702C7067A
-:1023D000E03AFFFFE90300E812005BC3A323962362
-:1023E000A423A4239623A4239623962326A029007E
-:1023F000A21B3726C7060C00FF7F26A10E00E79C14
-:1024000026A10800E79AE50025FF53268B36060033
-:1024100083E60E2E0B84AD25E700C3061E688B25D0
-:102420006A001F830EEF3420830E9B3608E50025DB
-:10243000EFFF0D0800E700E500A910007501C3E5F6
-:1024400000A9100075F9C350535156061E33C08EB3
-:10245000D8B80500E784E5080D000425FF04E70867
-:10246000E5000D1800E700E5020D1100E7021F0767
-:102470005E595B58C3501E33C08ED8C706EF340078
-:102480000083269B36F7E5000D1800E700E5020DF6
-:102490001100E7021F58CF60061E6887256A001FDB
-:1024A000E816F5C3061E688B256A001F8EC02683BA
-:1024B0003E0A00007403E8430026C7060A00FFFF37
-:1024C000268B1606008E1E8E018CD88BCA833E008A
-:1024D00000FF8E1E0000740A2B16080073EB290EF5
-:1024E000080026890E0800268C1E00008ED88C0657
-:1024F0000000C360061E6887256A001F8EC08BC857
-:102500008E1E8E0126C7060A0000008CD8833E006E
-:1025100000FF74253B0E00008E1E000075ED8ED866
-:1025200026A10000A300003DFFFF74568ED826A10F
-:10253000080001060800E94900268E1E0200BE18A8
-:1025400000833CFF743C390C74198E1CBE00008360
-:102550003E0000FF742C390E000074078E1E000030
-:10256000E9ECFF26A10000890433C98ED93DFFFFA5
-:10257000751083FE18750B268E1E0200812608003A
-:102580007FFF33C08ED8C31F0761CF1F07CF600600
-:102590001E6887256A001FE506251E003D1E007582
-:1025A000F6B90800E558E75A23C0E0F8C300000078
-:1025B000000000000000AC000000A8008C02040035
-:1025C0000008102000FF0E0C0C0A0A0A0A0808086E
-:1025D0000808080808060606060606060606060691
-:1025E00006060606060404040404040404040404A1
-:1025F000040404040404040404040404040404049B
-:1026000004040404040202020202020202020202A0
-:10261000020202020202020202020202020202029A
-:10262000020202020202020202020202020202028A
-:10263000020202020202020202020202020202027A
-:102640000202020202000000000000000000000080
-:10265000000000000000000000000000000000007A
-:10266000000000000000000000000000000000006A
-:10267000000000000000000000000000000000005A
-:10268000000000000000000000000000000000004A
-:10269000000000000000000000000000000000003A
-:1026A000000000000000000000000000000000002A
-:1026B000000000000000000000000000000000001A
-:1026C00000000000001800140010000C00FF7FFF45
-:1026D000BFFFDFFFEFFFF7FFFBFFFDFFFE7FFFBF49
-:1026E000FFDFFFEFFFF7FFFBFFFDFFFEFF00000036
-:1026F000803EE234017603E9A500B80000E74EB958
-:102700002800E2FEC606453702BF3F282E8B45084B
-:10271000E74EB92800E2FE2E8B1DC706B3364011E6
-:10272000C706B1362700C70646370200C706483736
-:102730006400F706B5360200751C2E0B5D0281267B
-:10274000B336FFFEC706B1369C00C7064637080001
-:10275000C70648379001891EB736891EFE33BE2052
-:10276000008BC3E74EB92800E2FE2E8B4504E74EEE
-:10277000B92800E2FEE54E8BCB2E2345062E234DD5
-:10278000063AC174364E75D9803E453700740BC683
-:1027900006453700BF2F28E972FFC606453701F707
-:1027A00006B53602007414E5CE25FDFFE7CEE843FA
-:1027B00000E5CE0D0200E7CEE83900803EE23401AC
-:1027C0007601C3B8EA05E78CFAE812F4FB8D06D06F
-:1027D000398BD8C1E804A338348EC0A1303426A385
-:1027E000020026C7060000FFFF83C318D1EB26892D
-:1027F0001E0800C3E5020D0040E702E5000D0400DD
-:10280000E700B80000E70AE50AA900807514E508AA
-:102810000D0010E708E50A0D0008B90500E70AE217
-:10282000FCC3E5080D0010B90500E708E2FCC3048D
-:102830000C2000010C7EFF000C0200100040000C78
-:10284000C6010000C0F7FF00C002001000400000F9
-:1028500033C08ED88D3E72498D36B037B914008B97
-:102860001E3034895C022E8B45028944062E8B056E
-:1028700089440483C70483C610E2E8C6069E360E68
-:10288000E8FD26688328A1AA02CD35833EA1360043
-:102890007403E93B2733FF8E06A6028B36A4022E73
-:1028A000FFA42E30830E993604C70637370100C6C1
-:1028B00006CA3401E97D19803EA0360874E68026F8
-:1028C0009E36FF751AF7069B3600207412F7069B9A
-:1028D000360300750A830E663710C606A03608E96F
-:1028E000FB01803E9E360275CEC606A03606E9EC98
-:1028F00001C3E9E80126C7060A00000026FF2604F6
-:1029000000A1D1362639061A007522A1D336263900
-:10291000061C007518A1D5362639061E00750E2630
-:10292000F7060C0040007405830E663740810EAF39
-:10293000360010A1AF36E706803E9D36027506CD03
-:1029400034E9A21AC3F7069B361000755426F60622
-:102950000A00FF754C26A0190024C03C4075118068
-:102960003E953600743B26C7060400FFFFE93100A0
-:10297000E8F104F7069B360300742F8BD8B87D036B
-:10298000CD3A8BC3C606A03606F7069B3602007505
-:1029900005C606A03604810E9B36800083269B3632
-:1029A000FCE92301E8871DE933015026A10C00252D
-:1029B00007003D07007503E984003D05007503E944
-:1029C0007C00833EE83A047475833EE83A02746EF4
-:1029D000F706E63418807503E96A00F706E6340066
-:1029E00080743526803E290002752D5156578D364C
-:1029F0003E348D3E2000B90600F3A65F5E59744553
-:102A000026A12000A33E3426A12200A3403426A103
-:102A10002400A34234E92600F706E6340800740BCC
-:102A200026803E1900007403E91300F706E634100F
-:102A300000741226A02800C0E80422C0740726C72C
-:102A4000060400FFFF5823C07403E957FF81269B4B
-:102A500036FFFE83FE067F2426A120003B06D136EA
-:102A6000751A26A122003B06D336751026A1240034
-:102A70003B06D5367506810E9B36000126A1200047
-:102A8000257FFFA3B83426A12200A3BA3426A124AF
-:102A900000A3BC348BC686C4A3C034D1E680FC0935
-:102AA0007403E8AA1C8BC62EFFA4304926A10C0093
-:102AB0003DFF7F740F26FF2604008E063834E8366B
-:102AC00006CD50C3E91600CD34E91100CD34893666
-:102AD0003D37A19D36A33F37C606A0360CE88E00D1
-:102AE000A19F3622E47532F7064C370100752AF6AD
-:102AF000069D3680740788269E36E931003A069D89
-:102B000036A39D3674288BF02EFFA40D2B4429EE9E
-:102B1000421944CD442F455A453A269E367501C385
-:102B200032C086C48BF0A29E362EFFA420498B2E85
-:102B3000993623ED7501C3BF0100BE000085FD7508
-:102B40001A46D1E7E9F6FF2A0029002800270025C8
-:102B50000005000700260006002000F7D7213E9957
-:102B600036D1E62E8BB4472BE94FFFE956FF80267E
-:102B70009E36FF7517F7064C370100750FF6069D58
-:102B800036807408F7066637FFFF7507C706663795
-:102B90000000C3F70641370100750BB87F03CD393C
-:102BA000C7064137010033F6B80040850666377422
-:102BB0002180BC5437FF7404FE84543780BC9634A3
-:102BC000FF7404FE84963431066637833E66370010
-:102BD000740546D1E873D4C3A1F433A90088740BFB
-:102BE000A9001075098B1E4337FFE3E9D700C7061C
-:102BF00035370500C70643371E2CF706F4330008A7
-:102C00007406C7064337102CB88003CD39E9CDFED2
-:102C1000A9000874D9FF0E353775EDE96600A900E3
-:102C20000875CBFF0E353775DF810EC234C000F654
-:102C3000069D36807448810E9B360080F7069B36D1
-:102C40000100741EB87D03CD3A810E9B368000834F
-:102C5000269B36FEC7060F370200C606A03604E9DB
-:102C60007BFE803EA036047507833E0F3701750555
-:102C7000C606A03606C7060F370200E95FFEBE0291
-:102C800000E94AFE80269E36FF753AF6069D36809C
-:102C9000742DF7069B360020752BC606A03606FF5E
-:102CA000069434830E6637208E06303426F7060AE3
-:102CB000000001740726810E08000001E90600BE2D
-:102CC0000400E909FE810EAF360008A1AF36E70621
-:102CD000E50AA90080740E8126AF36FFF7A1AF3652
-:102CE000E706E909FFE9F5FDC70641370000830E55
-:102CF000993602E9E7FD80269E36FF751DF7069B93
-:102D00003600407505830E993608830E993620816A
-:102D1000269B36FFBFB88503CD39E9C0FD803E9EB6
-:102D200036067407803E9E360A7534F6069D368058
-:102D30007506BE0700E996FDC606A03604833E0F61
-:102D40003702741BC7060F370400803E9E36067597
-:102D50000EF7069B3640007506C7060F370300E9DD
-:102D60007BFD803E9D36047512810EC2340040FF0B
-:102D7000069234C606A03606E962FDBE0500E94D9E
-:102D8000FDF6069D36807519830EC23404BE06001A
-:102D9000E93BFD80269E36FF75C5FF063137E90009
-:102DA000008326C234BFC606A03606E92FFDE50A19
-:102DB0005025C3BFE70A5880269E36FF750DA9002F
-:102DC000407508C606A03606E912FDB88303CD3962
-:102DD000C3B87C03CD39F706F43300107509C70674
-:102DE00033370200E9F6FCFF0E33377403E9EDFCDC
-:102DF000FF068E34E8F719830EC23408BE0300E9DB
-:102E0000CCFC0000000000000000000400040405E9
-:102E1000040404000300030300000000000000009D
-:102E20000004000808050808080003000303000068
-:102E3000020404040400000800000A1400001A0040
-:102E40001C001E2000000441060B08C2FFE704031B
-:102E500006040405040604870403060404854EA240
-:102E600004CF04CDC706A2370000C706A63700006E
-:102E700026A12000257FFFA3F53626A12200A3F777
-:102E80003626A12400A3F936E83B198BF0268B0ED9
-:102E90000E002BC883E90EB8018083F9047C51260B
-:102EA0008A542888161C3740268B6C2686CD3BCD4D
-:102EB00086CD890EA43775384032FF268A5C29807A
-:102EC000FB15772580FB0A742080FB01741BB80476
-:102ED000802E3A97022E74072E3A97182E751133CA
-:102EE000C080FB09754F8BF3C326C7060400FFFFA4
-:102EF0005052A1A43786C4263B0626007C32268188
-:102F00003E260000047E298D742A268B1422D2745A
-:102F10001F80E6BF80FE097517C706A23701008033
-:102F2000FA04750C268B4402A3033786C4A3D0345D
-:102F30005A58E9B1FFBD72372E8A872E2E22C074EF
-:102F40001605442E8BF82E8B053E89460083C5025C
-:102F500083C70222E47DEF8D742A83E9047503E9B7
-:102F6000A100268B1422D27503E97C00C706A63780
-:102F70000100BF72378B0583C70280E6BF80E43F44
-:102F800080FE09752280FA04755EC706A23701002B
-:102F9000268B4402A3033786C4A3D03486C4C70655
-:102FA000A6370000E947003BFD7E15268B04A840AC
-:102FB0007406B80780E938FF32C0268B04E92E007A
-:102FC0003AF475B1C745FE000080FE22750D3AD077
-:102FD0007716C706A6370000E913003AD07509C76F
-:102FE00006A6370000E90600B80580E902FF32F6C0
-:102FF00003F22BCAB8058023C97603E964FF740382
-:10300000E9EDFE33C0BF72378B1547473BFD7F1B91
-:10301000F6C6807416F706A63701007406B8088055
-:10302000E9C3FEF6C64074E0B80780E9B8FE7D4209
-:10303000A34544294429B728E228EE2BF228F52895
-:103040000129AC2A4429442944294429442900005F
-:10305000733600000336C535833545350735D23420
-:1030600045340000000000000000000000000000E7
-:103070000000A6380000E03800000000000000005A
-:103080000000000000000000000000000000000040
-:10309000F2330000A6336033FD32BC3277323C326B
-:1030A000FB316A310A31E0E0101010E0E0E0E000AE
-:1030B0000000000000000000000000000000000010
-:1030C000000000000000E000E0E0E0E0E0E0E0E020
-:1030D000E033FF26F6061A0080741B2680261A00AD
-:1030E0007F268B3E260083E71F740B26800E200070
-:1030F0008026013E0E00C3602E8B84A63026A318C6
-:1031000000D1E62EFF94503061C326C7060400C4E8
-:103110002A26C7060E00160026C706060006002649
-:10312000C606190000E8BF05E8980526C706260070
-:10313000000826C60628004026C60629002ABF2AFF
-:103140000026C6050426C645012AA1933733DBA90C
-:1031500040007502B301A900107402B788A90008E5
-:10316000740380CF4426895D02C3830EC2342026B7
-:10317000C70604006B2B26C7060E00300026C706C4
-:1031800006000A0026C7060A00040026C606190023
-:1031900000E86905E82C0526C7062600002226C699
-:1031A0000628006026C606290029BF2A0026C60573
-:1031B0000826C645012D8D7D02BE5437B90300F3A4
-:1031C000A526C6050826C645012E8D7D02BE5A37A6
-:1031D000B90300F3A5E8D405E86405B90600BE54B8
-:1031E000378D2E2C00268B4600290483C60283C50A
-:1031F0000283F90475024545E2EBC326C7060400C5
-:10320000C42A26C7060E00240026C70606000600AC
-:1032100026C606190000E8E404E8A70426C7062627
-:1032200000001626C60628006026C606290028BF0C
-:103230002A00E85B06E87405E80405C326C706040F
-:1032400000C42A26C7060E001A0026C70606000676
-:103250000026C606190000E8A304E8660426C7068F
-:103260002600000C26C60628006026C60629002770
-:10327000BF2A00E82105C326C7060400C42A26C7C2
-:10328000060E00200026C70606000A0026C7060A0A
-:1032900000040026C606190000E84B04E8240426B2
-:1032A000C7062600001226C60628004026C60629A4
-:1032B0000026BF2A00E8F404E88404C326C70604F5
-:1032C00000C42A26C7060E00340026C706060006DC
-:1032D0000026C606190000E80D04E8E60326C70626
-:1032E0002600002626C60628004026C606290025F8
-:1032F000BF2A00E8B604E84604E8FA04C326C70675
-:103300000400C42A26C7060E003800A1A237500BBD
-:10331000C0750726C7060E00340026C7060600063D
-:103320000026C606190000E89903E8A4FD26C74553
-:1033300026002A580BC0750626C745260026A11C64
-:1033400037C1E0042688452826C645292483C72A94
-:10335000E82904E8A004E82205E8F803E80904C322
-:1033600026C7060400C42A26C7060E00320026C758
-:10337000060600060026C606190000E84503E850C8
-:10338000FD26C745260024A11C37C1E00426884538
-:103390002826C645292383C72AE8E003E86C04E809
-:1033A0008A04E89C04C326C7060400C42A26C7066C
-:1033B0000E00340026C7060600060026C6061900C1
-:1033C00000E8FF02E80AFD26C745260026A11C37B3
-:1033D000C1E0042688452826C645292283C72AE855
-:1033E0009A03E8C703E85703E8F803E87804E88A93
-:1033F00004C326C7060400744526C7060E003E0017
-:1034000026C7060600060026C7060A00040026C6D0
-:1034100006190000E8FC02E8A902833E8D37037517
-:10342000019026C7062600003026C6062800502632
-:10343000C606290020BF2A00E8D003E80103E8B54A
-:1034400003E89F03C326C70604006143B9F0008365
-:10345000E90226890E0E0026C7060600020026C6CF
-:103460000619000026C7061A00000026C7061C0021
-:10347000000026C7061E000000E8470283E90E860A
-:10348000CD26890E260086CD26C60628000026C633
-:1034900006290008BF2A0083E90426890D26C645AF
-:1034A00001268D7D0283E902BB0100B830304B75E7
-:1034B00017BB0A008AC4268805B03180C40180FC8D
-:1034C0003A750AB461E90500268805040147497583
-:1034D000DDC326C7060400044526C7060E001200F9
-:1034E00026C7060600060026C606190001E8E50103
-:1034F000E8D00126C7062600000426C606280000DC
-:1035000026C606290007C326C7060400C42A26C704
-:10351000060E00200026C7060600060026C606196D
-:103520000006E80402E89B0126C7062600001226D2
-:10353000C60628000026C606290006BF2A00E86B3A
-:1035400002E8FB01C326C7060400C42A26C7060EEC
-:1035500000200026C7060600060026C6061900053C
-:10356000E8C601E85D0126C7062600001226C60649
-:1035700028000026C606290005BF2A00E82D02E81B
-:10358000BD01C3FF06823426C70604003D4126C79D
-:10359000060E00200026C70606000E0026C60619E5
-:1035A0000004E88401E81B0126C706260000122655
-:1035B000C60628000026C606290004BF2A00E8EB3C
-:1035C00001E87B01C326C7060400674226C7060E32
-:1035D00000200026C7060600080026C606190003BC
-:1035E000E84601E8DD0026C7062600001226C606CA
-:1035F00028000026C606290003BF2A00E8AD01E81E
-:103600003D01C3FF06843426C7060400674226C76F
-:10361000060E00240026C7060600080026C6061966
-:103620000002E80401E89B0026C7062600001626D3
-:10363000C60628000026C606290002BF2A0026C6A4
-:10364000050426C6450101A10F3786E0F6066F374F
-:1036500001750F3906CC3474098BD8B88903CD397C
-:103660008BC3A3CC34268945028D7D04E83D01E857
-:10367000CD00C326C7060400C42A26C7060E001CB8
-:1036800000A1A237500BC0750726C7060E00180010
-:1036900026C7060600060026C606190000E8230015
-:1036A000E82EFA26C74526000E580BC0750626C719
-:1036B0004526000A26C645290083C72AE8BD00E83A
-:1036C000FF00C3565751B90300BED136BF2000F3E7
-:1036D000A5595F5EC3565751B90300BED136BF1A14
-:1036E00000F3A5595F5EC326C7061A00C00026C7AF
-:1036F000061C00000026C7061E000010C326C706D1
-:103700001A00C00026C7061C00000026C7061E00BF
-:103710000008C326C7061A00C00026C7061C000002
-:103720000026C7061E000002C326C7061A00C000F6
-:1037300026C7061C00FFFF26C7061E00FFFFC32684
-:10374000C6050826C64501028D7D02BE0537B903B0
-:1037500000F3A5C326C6050426C6450106A10D37FC
-:10376000268945028D7D04C326C6050426C645016B
-:1037700007A10B372689450283C704C3A1A2370BD3
-:10378000C0741326C6050426C6450109A1033726C1
-:1037900089450283C704C326C6050826C64501021B
-:1037A0008D7D02BE0537B90300F3A5C326C6050605
-:1037B00026C645010B8D7D02BEEF36B90200F3A58A
-:1037C000C326C6050626C6450120A16837268945B9
-:1037D00002A16A3726886505C1E00426884504836E
-:1037E000C706C326C6050426C645012126C74502CD
-:1037F000000083C704C326C6051426C64501228DD2
-:103800007D02BE1F37B90900F3A5C326C6050C26E5
-:10381000C64501238D7D021E0E1F8D364054B9030F
-:1038200000F3A533C0B90200F3AB1FC326C60508D9
-:1038300026C64501288D7D02BED136B90300F3A509
-:10384000C326C6050826C6450129A1C23486E0263E
-:10385000894502A19B362689450426884506268887
-:1038600045078D7D08C326C6050626C645012B8D56
-:103870007D02BEBB36B90200F3A5C326C6050626E7
-:10388000C645012C8D7D02BEE536B90200F3A5C305
-:1038900026C6050426C6450130A1373786E02689AD
-:1038A00045028D7D04C326C7060E001E0026C706EE
-:1038B0000600020026C606190000E86CFEE803FEBA
-:1038C00026C7062600001026C60628003026C60693
-:1038D000290011BF2A00E83500E84500E85500C37B
-:1038E00026C7060E00120026C7060600020026C6DE
-:1038F00006190000E832FEE8C9FD26C706260000CA
-:103900000426C60628003026C606290013C326C68C
-:10391000050426C645010C26C74502000183C704DD
-:10392000C326C6050426C645010E26C74502000269
-:1039300083C704C326C6050426C645012126C745FC
-:1039400002000083C704C300000000000000000064
-:10395000B339C939833AB339B339B3391C3A1C3A4C
-:10396000A3B634A1E936A31137A3D234A1EB36A311
-:103970001337A3D434A1ED36A31537A3D634A10150
-:1039800037A3CE34A1F736A31737A3DC34A1F93619
-:10399000A31937A3DE34F7069B360200750C33C03B
-:1039A000A09E368BF02EFFA45039E90F01BE070010
-:1039B000E919F1F6069D368074F3C606A03602C6F4
-:1039C000066E3708C606703702B88803CD39F6068A
-:1039D0006F3701754AA1D1363A06E93675413A2664
-:1039E000EA36753BA1D3363A06EB3675323A26EC09
-:1039F00036752CA1D5363A06ED3675233A26EE36C5
-:103A0000751DC606703702FE0E6E37750FB8880337
-:103A1000CD3A830E9B3612C606A0360CE9A8F0A15B
-:103A20000537263B0620007540A10737263B0622B6
-:103A3000007536A10937263B062400752CA09E365A
-:103A40003C02750826F6061800087547C6066E374C
-:103A500008FE0E7037751CC606703702E5020D01B0
-:103A60000425EFFFE702E95EF0C606703702C606DE
-:103A70006E3708E50225FFFB0D010025EFFFE70289
-:103A8000E944F0F7069B360001742526F606180077
-:103A90000875ED81269B367FFFB88903CD3AB8843F
-:103AA00003CD3AC606A036068326C234AFE917F026
-:103AB000A101373A260F377FC7E9F7FE83269B36E9
-:103AC000ECE82A0D810E9B368000BBFF7FCD53C6EC
-:103AD00006A03602E9F0EF830E9B3611C606A0362B
-:103AE0000CE9F9EF443B2C3BC72A6B3B443BC72A0C
-:103AF000C72AC72AA3B634810EC2340020F7064174
-:103B0000370100741B8CC3C70641370000B87F0320
-:103B1000CD3A33C08EC0BF5437B90600F3AB8EC365
-:103B200033C0A09E368BF02EFFA4E43AF7069B36F6
-:103B3000000175218326C234BFA1A936E700A19BED
-:103B400036E90900A19B3681269B36FFDFA90020BC
-:103B50007506E96E00E96FEF830E993604C70637E4
-:103B6000370100C606CA3401E95800830E9B36406F
-:103B7000E85800A105373B06E9367537A107373B02
-:103B800006EB36752EA109373B06ED367525FE0E80
-:103B90007137751CB88703CD3A830E993610A15042
-:103BA00037C7065037000009069936C606A0360802
-:103BB000E914EF830E993604C70637370300C606AB
-:103BC000CA3403C606A0360AE9FCEEA1D136263B6C
-:103BD0000620007515A1D336263B0622007512A1DA
-:103BE000D536263B062400750FC38D362000E90B21
-:103BF000008D362200E904008D36240083C402F7CC
-:103C000006E63401007415263A047708720E263A47
-:103C100064017208C606A03606E9ABEEE87C0A8CA1
-:103C2000C03DFFFF741B26C60618001026C70604F9
-:103C300000493C26C70606000C00CD50B94E00E2F4
-:103C4000FEC606A0360AE994EEE97BEE8F3C063DFF
-:103C5000063D063DD23CEA3C063D063DA3B6348116
-:103C600026C234AFDFC7064C370000B88A03CD3A0E
-:103C7000803E9D3604750C803E9E36067405C60651
-:103C80009F360633C0A09E368BF02EFFA44C3CF727
-:103C9000069B360020750E81269B36FFBFB88B032E
-:103CA000CD3AE95400F7069B3600017403E917EE9C
-:103CB000C70637370200C606CA3402830E99360497
-:103CC000830E503704F6069D3680752AE81F0BE9EF
-:103CD0002700F7069B36000175D3C7063737020069
-:103CE000C606CA3402830E993604C606A03600F60C
-:103CF000069D36807403E8DE0A81269B367CFFBB76
-:103D0000FFFFCD53CD54E9BEEDA3B634E8AD01B805
-:103D10008603CD39C7064C3700008126C234AFDF99
-:103D2000F6069D36807434F7069B3600207456F7ED
-:103D3000069B3600017427E83501721CBE004085E1
-:103D400036C23475080936C234FF069234E88B0156
-:103D50007306810E99368000E96CEDE9B500C7065F
-:103D600037370200C606CA3402830E993604830E22
-:103D7000503704803E9E36087403E85A0AE8EF0084
-:103D800072D6E9C8FF803E9E360A7512C606A03676
-:103D900000F7069B3608007402CD54E8390A8126E4
-:103DA0009B36FFBFE8C80072AFB88B03CD39E99CE2
-:103DB000FFF6069E36FF7558A3B634E8FE0081264E
-:103DC000C234FFBFF6069D36807448F7069B360066
-:103DD000207422F7069B3600407508E89100723087
-:103DE000E9220026A10C00A960007524810E663727
-:103DF0000008E9D2ECC7064C370000E871007210E9
-:103E0000B88B03CD39E8D3007306810E9936800054
-:103E1000E9B4EC803E9D3604750C803E9E360674F7
-:103E200046C6069F3606F7069B360001740C803E98
-:103E30009D36087505C6069F360AE8320072D1E83D
-:103E40009900803E9D36087513810E99368000F7E3
-:103E5000069B3600207508B88B03CD39E968ECC69F
-:103E6000069F360AE960ECB88603CD3AE958EC269D
-:103E7000A10C00A9600074088126C234FFBFF9C3F9
-:103E8000F7069B3600407413810E66370008E84A37
-:103E9000007306810E99368000F9C3810E9B3600AF
-:103EA0004080266F37FE81269B367FFFC606A036F0
-:103EB00000F8C3810E99360001E921EC26A120000B
-:103EC000A3FB36A3AA3426A12200A3FD36A3AC345B
-:103ED00026A12400A3FF36A3AE34C3A10537263B99
-:103EE0000620007519A10737263B062200750FA191
-:103EF0000937263B0624007505E80200F8C3511E69
-:103F0000068BC78D362000BF0537B903001E061F7C
-:103F100007F3A58BF88D362000BFA034B90300F35A
-:103F2000A5071F598BF8A10737A3A634A10937A30A
-:103F3000A834F9C3C606B63401E98BEBE887088BD1
-:103F4000F00512002629060E00268B442A263A0682
-:103F50000E00755B26832E0E000280FC277550260E
-:103F60008B442CA9FFFF75478BFE33C026F6453CDA
-:103F7000807406268A453A241F03F826807D450969
-:103F8000752D8CC28E0638348EDA8B0E0E00268983
-:103F90000E0E008D742CBF1800F3A433C08ED826EB
-:103FA000C7060400B53F26C70606000600CD50B878
-:103FB0000680E9EFE926A10C00A39337830E99361A
-:103FC00001E900EB26803E1C00FF752F26803E1E77
-:103FD00000FF752726F7060C004000751BA1D1369F
-:103FE00026A31A00A1D33626A31C00A1D53626A3EA
-:103FF0001E00B80A80E83607E9E2EAFF069034BE00
-:104000000A00C606B63401F6069D36807505830E95
-:10401000C23401E9B6EA803E9D360A750F26A10C2E
-:10402000002507003D04007503E87900A1F33686FA
-:10403000E0E71EA3E33681260B37000381260D3708
-:104040007B7F830E0D3748E81E0026A10C00250754
-:10405000003D0400740926F7060C0020007506B820
-:104060000100E93FE9E95FEAC70641370000B87F90
-:1040700003CD3AA11D37A3C43486E0687F031FA394
-:10408000060033C08ED8A10B37A3B234A10D37A3DD
-:10409000B434A1F336A3C834A1EF36A39C34A1F104
-:1040A00036A39E34C3800E9D3680BE0000E8B40760
-:1040B000B87B03CD3AB87C03CD39C706333702004D
-:1040C000A1E536E72EA1E736E73EB88203CD3AF701
-:1040D000069B3600207503E8FD06A1D336A3EF3614
-:1040E000A39C34A1D536A3F136A39E34C3F6069D16
-:1040F00036807431BE2200E91700F6069D368074C2
-:1041000024BE2300E90A00F6069D36807417BE24FB
-:104110000056E8A8058CC03DFFFF5E7405E8D7EFA8
-:10412000CD50E91FE8E99FE9000000000000000011
-:10413000B88403CD3AB88A03CD39E9F700803EA0B0
-:104140003608752EA9D007752CA1B1360D0004E7ED
-:1041500008E50025FF73E700B88A03CD3AE8C306F7
-:1041600033C0E70EE50A25C317E70ACD54C606A0FB
-:104170003600E968E9BE0400E93FE983269B36BFC3
-:10418000C606713703B88603CD3AB88803CD3AB86E
-:104190008303CD3AB88703CD39810EC2340020E9BC
-:1041A0009200E84906B88703CD39BBFF7FCD53B8ED
-:1041B0008403CD3AB88803CD3AB88B03CD3AB8839F
-:1041C00003CD3AB88603CD3AB88503CD3AC3E500AE
-:1041D00025FF53E700830EC234408326C234EFE844
-:1041E0000C06BBFF7FCD53B88A03CD3AB88503CD0B
-:1041F0003AB88603CD3AB88303CD3AB88703CD3AAF
-:10420000B88B03CD3AB88403CD3AB88903CD3AC30D
-:10421000830EC23450E81804E8D305F6066F370160
-:104220007512B88903CD39833E0F37007506C7066E
-:104230000F370400A19D3680FC087405B88403CDB7
-:1042400039E5020D010825EFFFE702A19D3686E062
-:1042500032E48BF0D1EE33C00D20000906AD36A15B
-:10426000AD36E704E953E8E95AE833C0A01B37D17B
-:10427000E03A06A0367503E9BAFFE960E8C70641EF
-:10428000370000E8C1E1E86A0633C00D4100E75697
-:10429000A1B1360D0010E708E50225F9FF0D030076
-:1042A000E702A1B336E70AA1AF36E706A1AD36E7CC
-:1042B00004E87C03E89F03C7061D3700C8C7060B48
-:1042C000370003C7060D377B7F33C0A39936A39B06
-:1042D00036A39D36A39F36A34C37A3F336A3EF3600
-:1042E000A3F136E882FDC6069F3602E9EFE7E50254
-:1042F0000D018825EFFF0D00400D0004E702E8F2F4
-:1043000005E50A0D4000E70A33C0A38137A38537CE
-:10431000A38337A38737A38937E5000D0084E7001F
-:10432000B88C03CD39B88000CD35C706AA02FFFF8F
-:10433000E50025FF7BE700810E9A378000B87E03F9
-:10434000CD3933C0E70EBE08008E063834E8A7ED3D
-:104350008326EF34DFFF068137CD50830EEF342004
-:10436000C3F7069A378000743DA9D0077410A900DE
-:1043700004741233C0E70EFF068737E9D2FFFF0649
-:104380008537E9CBFFFF068337E9C4FF83269A37D9
-:104390007FA18937030687373D05007F01C3BBFF37
-:1043A0007FCD53E90000E50225FFFB25EFFF0D015E
-:1043B00000E702A183373B0646377F2AA185373BBA
-:1043C0000648377C21A18937030687373D05007FE2
-:1043D00015C6069F3604E50225FFF70D010025EFFF
-:1043E000FFE702E9F7E6BE0100F7069B360300741B
-:1043F0000A83269B36FC830EC23404E9D0E6B87BE0
-:1044000003CD39E5020D016025EFFFE702C706F194
-:10441000342003B88E03CD39C38126C2347FFF8098
-:104420000E6F3701F7069B36030074D2B87B03CDBD
-:104430003AB87D03CD3983269B36EF33C0B08AA2CC
-:104440009F36A29D36C7064C370100C7060F3704BA
-:1044500000F7069B3640007506C7060F370300B805
-:104460008D03CD39E800D5E5020D014025EFFF8B26
-:10447000D8B87C03CD39C706333702008BC30D0093
-:104480002025F9FF0B06E83AE702C3FF0EF1347569
-:1044900001C3E54EA901007512E500A900047505E8
-:1044A0000D0004E700B88E03CD39C3E500A9000470
-:1044B00074F325FFFBE700E9EBFFC606A036048393
-:1044C000269B36FC810E9B368000E910E6B88E03F1
-:1044D000CD3ACD54810EAF360018A1AF36E706B8FD
-:1044E0007B03CD39A1D336A38F37A1D536A391371E
-:1044F000C7068B370200C7068D370200830E993638
-:1045000040E9D9E5803E9F36067515A9D00775ECC0
-:10451000250018750EFF0E8B3775E1C6069F36080D
-:10452000E9BAE5FF0E8D3775D3BE0800E99FE5B8FF
-:104530007B03CD39F7069B3600207408C6069F36EC
-:104540000AE90D00F7069B360040740BB88B03CDCB
-:1045500039810E99368000E983E5B87B03CD39C7F0
-:10456000068B370400C7068D370400810E9936008C
-:1045700002E969E5F6069D3680751BA9D00775EB43
-:10458000A90018750CFF0E8D3775E0E817FBE94C94
-:10459000E5B88203CD39C3FF0E8B3775CEBE090057
-:1045A000E92BE5C7063D370000C7069B360000E84B
-:1045B0003C028126AF36FFE7A1AF36E70681269B96
-:1045C00036FF7FE5020D010025EFFF25FFDFE70243
-:1045D000BBFF7FCD5333C0A39D36A39F36E8500069
-:1045E000E87300B88103CD39C3F7069B3603007426
-:1045F0000DC6069F3602C606A03600E9DFE4830E2C
-:104600009B3610C70699360000E8E702E5560D0212
-:1046100000E756C706A80200008B363D37E8440283
-:10462000C606A0360EE9B5E4000000000000000058
-:1046300006B88A03CD3AB88503CD3AB88603CD3A99
-:10464000B88303CD3AB88703CD3AB88B03CD3AB8D7
-:104650008803CD3A07C306B88803CD3AB87B03CDAB
-:104660003AB88203CD3AB87F03CD3AB87C03CD3A4D
-:10467000B87E03CD3AB88003CD3AB88103CD3AB8BD
-:104680008403CD3AB88903CD3AB87D03CD3AB88DCD
-:1046900003CD3AC7064137000007C3068E063834FB
-:1046A0001F8B0E0E0026890E0E00BE1800BF1800CC
-:1046B000F3A4061E07CD340733C08ED8C326F606F2
-:1046C000200080744433C026A02600241F8BF026CF
-:1046D0008B5C28891E6A37068E0638341FC0E304B7
-:1046E00026885C288BC6B90600BE2000BF1A00F3DE
-:1046F000A48BC883C706F3A426812626001F802624
-:10470000813626000080E9A9FF268B1E2800891E1D
-:104710006A37068E0638341FC0E30426881E280038
-:10472000B90600BE2000BF1A00F3A4E984FF86C4C6
-:10473000A36837E887FFF7066A370F007410803EDA
-:104740009E36007509BE0000E8ACE9CD50C3C350E9
-:10475000560633C026F606200080740626A02600E2
-:10476000241F8BF0268B5C2686FB83EB04744F831F
-:10477000C62A8CC08ED8B9070033C08EC0BF72372E
-:10478000F3AB33C98A0C80F9007503E930003BD9DB
-:104790007303E929002BD98A4401253F0074193D90
-:1047A0000B007D14D1E08BF82E8BBD5C498D74021B
-:1047B00083E902F3A4E9020003F123DB75C433C0EB
-:1047C0008ED8075E58C333C026F6062000807406D4
-:1047D00026A02600241FC3E50A25C3BFE70AB88622
-:1047E00003CD39B88303CD3981269B367CDFB8856C
-:1047F00003CD3AE50225FFF30D010025EFFFE702A7
-:10480000E50025FF53E700A1E73625FFFEA3E736C5
-:10481000E73E83269936CF810EAF360010A1AF3622
-:10482000E706C3E5020D010C25EFFFE702A1E7361D
-:104830000D0001E73EA3E736810E9B360020830E74
-:1048400099362081269B367CBF810EAF360010A1A1
-:10485000AF36E706B88603CD39B88503CD39B883BE
-:1048600003CD3AC30BF67549068E063234803EE01E
-:104870003401751B26893606008E06323426F7066B
-:104880000A000020740726810E0800002007C3805C
-:104890003EE33401751926893606008E0632342629
-:1048A000F7060A000010740726810E0800001007A2
-:1048B000C3E9B4FF50515733C0B906008EC0BFD111
-:1048C00036F3AE5F740C26F6060000C07504F85986
-:1048D00058C3F9E9F9FF8B050B45020B4504C35298
-:1048E00050E506251E003D1E0075F6B80180E75A0A
-:1048F000585AC3E8E9FF50E50225FF7F0D01002566
-:10490000EFFFE7020D0080E702A1AD36E704A1AF9B
-:1049100036E70658C3000000000000000000000059
-:104920002E2BCE4110427B413041A241AF4544295C
-:10493000C72AC72A6039F43A5C3C093DB13D343F8F
-:10494000C72A3C3FC72AC43F16401640ED40FA40F4
-:104950000741C72AC72AC72AC72AD65200000137EB
-:10496000E936F336EF361D370D370B379C370337F3
-:10497000FB36622D4006D12DF401BA4440068C432B
-:104980006400E82CC800D82B0500E9455000974585
-:10499000FA00AE2D04016A420200F62CBC02932DEF
-:1049A000DC051D2D6400A12D1400D73A0807812DC8
-:1049B0006400B33E020030436400C52CF4018B4414
-:1049C00002000000000000000000000000000000E5
-:1049D000803EFD3402740CE82005C706A1360000B5
-:1049E000E99AF8FF06C033E810058B363D37E873C7
-:1049F000FEC3CD34E9E805C706A3360000C706416B
-:104A0000370000E8EDFE33C00D4100E756A1B13696
-:104A10000D0010E708A1B336E70AA1AF36E706A1FB
-:104A2000AD36E704E82B09C7061D3700C8C7060BDB
-:104A3000370003C7060D377B7F33C0A39B36A39D8A
-:104A400036C7064C370100C6069E36FFC706053737
-:104A50000000C70607370000C70609370000A3F3A8
-:104A600036A3EF36A3F136E8FEF5E50225F9FF0D92
-:104A700003000D008825EFFF0D00400D0004E70244
-:104A8000B88F03CD39B88000CD35C706AA02FFFF25
-:104A9000A1A936A3A7360D00A40D0008E700A3A91D
-:104AA00036C706A3360100C706A5360C00833EA50F
-:104AB00036007509C7063D370500E913FFFF0EA54F
-:104AC00036BE1100E82205B89003CD39C3833EA35A
-:104AD000360174D9C3B89003CD3A26A02B00268B9B
-:104AE0001E2C00CD34833EA336017403E9F0043C50
-:104AF0000F751E81FB0002751826A12000A3053743
-:104B000026A12200A3073726A12400A30937E9091B
-:104B100000C7063D370100E9B6FEC706A33602000E
-:104B2000C6069E36FFE8CBFDE81CD933C0A3853707
-:104B3000A38337A38737A38937B89103CD39B880CA
-:104B400000CD35C706AA02FFFFE50025FF53E700A9
-:104B5000810E9A378000B89203CD3933C0E70EBE7C
-:104B600008008E063834E88EE526C70604007D4B23
-:104B70008326EF34DFCD50830EEF3420C3F7069A3F
-:104B80003780007432A9D007740CA90004740E3366
-:104B9000C0E70EE9DAFFFF068537E9D3FFFF06839A
-:104BA00037E9CCFFC7063D370100E936FE83269A78
-:104BB000377FBBFF7FCD53E5000D00ACE700E5027A
-:104BC00025FFFB25EFFF25FFF70D0100E702A1837D
-:104BD000373B0646377FCDA185373B0648377CC437
-:104BE000C706A3360300BE1300E8FD03B89303CD48
-:104BF00039B89403CD39B89603CD39B89503CD397A
-:104C0000BE0600E8E303E9D603833EA3360374013E
-:104C1000C3BE1300E8D203B89403CD39C3B89403DC
-:104C2000CD3A26A02B00268B1E2C00CD34833EA32C
-:104C300036037403E9A8033C0D753E83FB00753908
-:104C4000E5020D0020E702B89303CD3AC706A3366C
-:104C50000400BE0000E80CFCC6069D3680C6069E19
-:104C60003600C70633370200B89A03CD39E8FC0096
-:104C7000C7064C370000E96603C7063D370800E960
-:104C800061FD833EA336037509C7063D370500E97C
-:104C900051FDE94A03833EA336047412833EA336D2
-:104CA00005740BCD34C7063D370700E935FDC7064F
-:104CB000A3360600C6069E36FFB89A03CD3AB899C9
-:104CC00003CD3AB89603CD3AB89703CD39B89803D7
-:104CD000CD39B89B03CD39E918FDCD34833EA336D9
-:104CE000047718833EA336037508F7069B36000148
-:104CF0007509C7063D370100E9E8FCE9E102CD345A
-:104D0000833EA336027709C7063D370100E9D3FC8D
-:104D1000833EA336047705B89603CD39E9C00283F4
-:104D20003EA33603751026A10C00250700503D0454
-:104D3000007503E83600A1F33686E0E71EA3E336EC
-:104D400081260B37000381260D377B7F830E0D37BD
-:104D500048E814F3583D0400740926F7060C0020B7
-:104D6000007506B80100E97A02E986FCA1E536E79C
-:104D70002EA1E736E73EA1D336A39C34A1D536A3B6
-:104D80009E34C326803E1C00FF752F26803E1E00E9
-:104D9000FF752726F7060C004000751BA1D13626AB
-:104DA000A31A00A1D33626A31C00A1D53626A31E24
-:104DB00000B80A80E92C02E938FCFF069034BE0AEC
-:104DC00000C606B63401F6069D36807505830EC210
-:104DD0003401CD34E90CFC833EA336037509C706C4
-:104DE0003D370500E9FCFBE5020D03000D00880DD1
-:104DF00000400D0004E702C706A3360500C6069E64
-:104E000036FFBE0200E8E101B88903CD3AB89A0343
-:104E1000CD3AB89903CD39B89703CD39B89803CDB9
-:104E200039E9BB01833EA33603740A833EA33604EB
-:104E30007403E9AA01BE0600E8AE01B89503CD39B6
-:104E4000E99C01833EA336057403E99201BE02008A
-:104E5000E89601B89903CD39E98401C7060F3705F3
-:104E600000E97B01E50225FFDFE702C706A336075D
-:104E700000C7060F370500E96501E8D504C6069DA1
-:104E80003600C7069B360000C7060F370500C70669
-:104E9000A8020000C7064C370100E50225F9FF0D06
-:104EA00003000D008825EFFF0D00400D0004E70210
-:104EB000E967FCB89A03CD39F706F4330010750999
-:104EC000C70633370200E91601FF0E33377403E9D2
-:104ED0000D01FF068E34830EC23408C7063D37032A
-:104EE00000E9FFFAC35250BAE000B80010EF585A78
-:104EF000C3C7063D370000E9E9FAFAE85404B88070
-:104F0000038EC026C7060400D82BB87F038EC026A8
-:104F1000C7060400E82C33C08EC0A1A736A3A9366B
-:104F2000A1A936E700A1AB36E702C70605370000A6
-:104F3000C70607370000C70609370000C6069D36BA
-:104F400000C6069E36FFC7069B360000C706A3367E
-:104F50000000C7060F370000C706A8020000C706FA
-:104F60004C3701008126AF36FFE7A1AF36E706BB1D
-:104F7000FF7FCD53E87CF9E5560D0200E756FBC3F1
-:104F80008D3EC0538D36F038B90E008B1E303489FB
-:104F90005C022E8B45028944062E8B0589440483CE
-:104FA000C70483C610E2E8B880038EC026C7060493
-:104FB00000E251B87F038EC026C7060400B2523308
-:104FC000C08EC0C706A1360100C7060F370500C353
-:104FD00033FF8E06A6028B36A4022EFFA4A053E850
-:104FE0008CDBC3E848F7E9F6FF8E063834E807E1C2
-:104FF00026C7060400DF4FCD50C326C7060A0000AF
-:105000000026FF260400CD34E9D4FFA1D13626398D
-:10501000061A007522A1D3362639061C007518A180
-:10502000D5362639061E00750E26F7060C00400000
-:105030007405830E663740810EAF360010A1AF367F
-:10504000E706833EA336027505CD34E956FB833E61
-:10505000A3360074B1833EA3360577AA26F6060A66
-:1050600000FF75A2E8FDDD50F6069336207503E9D2
-:105070008C0026A10C002507003D07007503E9768A
-:10508000003D05007503E96E00F706E634188075EB
-:1050900003E96A00F706E6340080743526803E296D
-:1050A0000002752D5156578D363E348D3E2000B985
-:1050B0000600F3A65F5E59754526A12000A33E3485
-:1050C00026A12200A3403426A12400A34234E926CD
-:1050D00000F706E6340800740B26803E19000074C1
-:1050E00003E91300F706E6341000741226A0280026
-:1050F000C0E80422C0740726C7060400FFFF582337
-:10510000C07403E9DDFE81269B36FFFE26A1200048
-:105110003B06D136751A26A122003B06D336751000
-:1051200026A124003B06D5367506810E9B3600016C
-:1051300026A12000257FFFA3B83426A12200A3BA10
-:105140003426A12400A3BC348BC686C4A3C034D1AA
-:10515000E680FC097403E8F6F5A105370B0607376E
-:105160000B060937743E26A120003B06053775174C
-:1051700026A122003B060737750D26A124003B0619
-:1051800009377503E91D0026A02800240F3C03748D
-:105190001B3C00750F833EA336047410F7069B3644
-:1051A000000174082EFF94F853E933FECD34C7068E
-:1051B0003D370100E92CF8833EA336057410833E89
-:1051C000A336017E0983EE162EFF942454C3CD34FA
-:1051D000C326A10C003DFF7F740526FF260400E9CD
-:1051E000FDFDA1F433A90088740BA9001075098B8B
-:1051F0001E4337FFE3E99700C70635370500C706AA
-:1052000043372852F706F43300087406C7064337BD
-:105210001A52B88003CD39E9C5FDA9000874D9FF39
-:105220000E353775EDE93000A9000875CBFF0E3556
-:105230003775DF810EC234C000F6069D3680740FCC
-:10524000810E9B360080C7060F370200E990FDC72C
-:10525000063D370200E98BF780269E36FF7530F653
-:10526000069D36807420FF069434830E6637208EA8
-:1052700006303426F7060A000001740726810E085E
-:10528000000001E90900C7063D370400E954F78131
-:105290000EAF360008A1AF36E706E50AA900807414
-:1052A0000E8126AF36FFF7A1AF36E706E949FFE9E1
-:1052B0002DFDC70641370000BE2900E82BFDE91E81
-:1052C000FDCD34833EA336047709C7063D37010080
-:1052D000E910F7E909FDCD34C3C7069B360000E8A5
-:1052E0000CF58126AF36FFE7A1AF36E70681269B96
-:1052F00036FF7FE5020D010025EFFF25FFDFE70206
-:10530000BBFF7FCD5333C0A39D36A39F36E820F368
-:10531000E843F3830E9B3610C70699360000E8D2A7
-:10532000F5E5560D0200E756C706A8020000BE00CC
-:1053300000E830F5C606A0360EB89C03CD39B8801B
-:1053400000CD35C706AA02FFFFC706A1360100E956
-:10535000A5F606B88F03CD3AB89003CD3AB89103BD
-:10536000CD3AB89203CD3AB89303CD3AB89403CD71
-:105370003AB89503CD3AB89603CD3AB89703CD3AEB
-:10538000B89803CD3AB89903CD3AB89A03CD3AB854
-:105390009B03CD3AB87F03CD3AB88003CD3A07C31B
-:1053A000F749F14EDF4FDF4FDF4FDF4FF851DF4F4F
-:1053B000FA4F0B50D151DF4FDF4FDF4FDF4FDF4F41
-:1053C000E44E0600CD4A0400E44E1900AD4BFA004D
-:1053D000824C0807094C1400244E6400D74DF40198
-:1053E000644EBC027A4EE803434E0200B34EF40111
-:1053F0005B4EF401E54E140006500650954CC15228
-:10540000C152FE4CDA4C0650065006500650B751B9
-:10541000B751B751B751B751B7510650D54A065099
-:105420001D4C0650834D1F4D1F4DED40FA40074166
-:1054300037372E3737202079792F79792F797920CE
-:1054400030312E3930202030322F31372F3939206A
-:10545000000000000000000000000000000000004C
-:10546000000000000000000000000000000000003C
-:10547000000000000000000000000000000000002C
-:10548000000000000000000000000000000000001C
-:10549000000000000000000000000000000000000C
-:1054A00000000000000000000000000000000000FC
-:1054B00000000000000000000000000000000000EC
-:1054C00000000000000000000000000000000000DC
-:1054D00000000000000000000000000000000000CC
-:1054E00000000000000000000000000000000000BC
-:1054F00000000000000000000000000000000000AC
-:10550000000000000000000000000000000000009B
-:10551000000000000000000000000000000000008B
-:10552000000000000000000000000000000000007B
-:10553000000000000000000000000000000000006B
-:10554000000000000000000000000000000000005B
-:10555000000000000000000000000000000000004B
-:10556000000000000000000000000000000000003B
-:10557000000000000000000000000000000000002B
-:10558000000000000000000000000000000000001B
-:10559000000000000000000000000000000000000B
-:1055A00000000000000000000000000000000000FB
-:1055B00000000000000000000000000000000000EB
-:1055C00000000000000000000000000000000000DB
-:1055D00000000000000000000000000000000000CB
-:1055E00000000000000000000000000000000000BB
-:1055F00000000000000000000000000000000000AB
-:10560000000000000000000000000000000000009A
-:10561000000000000000000000000000000000008A
-:10562000000000000000000000000000000000007A
-:10563000000000000000000000000000000000006A
-:10564000000000000000000000000000000000005A
-:10565000000000000000000000000000000000004A
-:10566000000000000000000000000000000000003A
-:10567000000000000000000000000000000000002A
-:10568000000000000000000000000000000000001A
-:10569000000000000000000000000000000000000A
-:1056A00000000000000000000000000000000000FA
-:1056B00000000000000000000000000000000000EA
-:1056C00000000000000000000000000000000000DA
-:1056D00000000000000000000000000000000000CA
-:1056E00000000000000000000000000000000000BA
-:1056F00000000000000000000000000000000000AA
-:105700000000000000000000000000000000000099
-:105710000000000000000000000000000000000089
-:105720000000000000000000000000000000000079
-:105730000000000000000000000000000000000069
-:105740000000000000000000000000000000000059
-:105750000000000000000000000000000000000049
-:105760000000000000000000000000000000000039
-:105770000000000000000000000000000000000029
-:105780000000000000000000000000000000000019
-:105790000000000000000000000000000000000009
-:1057A00000000000000000000000000000000000F9
-:1057B00000000000000000000000000000000000E9
-:1057C00000000000000000000000000000000000D9
-:1057D00000000000000000000000000000000000C9
-:1057E00000000000000000000000000000000000B9
-:1057F00000000000000000000000000000000000A9
-:105800000000000000000000000000000000000098
-:105810000000000000000000000000000000000088
-:105820000000000000000000000000000000000078
-:105830000000000000000000000000000000000068
-:105840000000000000000000000000000000000058
-:105850000000000000000000000000000000000048
-:105860000000000000000000000000000000000038
-:105870000000000000000000000000000000000028
-:105880000000000000000000000000000000000018
-:105890000000000000000000000000000000000008
-:1058A00000000000000000000000000000000000F8
-:1058B00000000000000000000000000000000000E8
-:1058C00000000000000000000000000000000000D8
-:1058D00000000000000000000000000000000000C8
-:1058E00000000000000000000000000000000000B8
-:1058F00000000000000000000000000000000000A8
-:105900000000000000000000000000000000000097
-:105910000000000000000000000000000000000087
-:105920000000000000000000000000000000000077
-:105930000000000000000000000000000000000067
-:105940000000000000000000000000000000000057
-:105950000000000000000000000000000000000047
-:105960000000000000000000000000000000000037
-:105970000000000000000000000000000000000027
-:105980000000000000000000000000000000000017
-:105990000000000000000000000000000000000007
-:1059A00000000000000000000000000000000000F7
-:1059B00000000000000000000000000000000000E7
-:1059C00000000000000000000000000000000000D7
-:1059D00000000000000000000000000000000000C7
-:1059E00000000000000000000000000000000000B7
-:1059F00000000000000000000000000000000000A7
-:105A00000000000000000000000000000000000096
-:105A10000000000000000000000000000000000086
-:105A20000000000000000000000000000000000076
-:105A30000000000000000000000000000000000066
-:105A40000000000000000000000000000000000056
-:105A50000000000000000000000000000000000046
-:105A60000000000000000000000000000000000036
-:105A70000000000000000000000000000000000026
-:105A80000000000000000000000000000000000016
-:105A90000000000000000000000000000000000006
-:105AA00000000000000000000000000000000000F6
-:105AB00000000000000000000000000000000000E6
-:105AC00000000000000000000000000000000000D6
-:105AD00000000000000000000000000000000000C6
-:105AE00000000000000000000000000000000000B6
-:105AF00000000000000000000000000000000000A6
-:105B00000000000000000000000000000000000095
-:105B10000000000000000000000000000000000085
-:105B20000000000000000000000000000000000075
-:105B30000000000000000000000000000000000065
-:105B40000000000000000000000000000000000055
-:105B50000000000000000000000000000000000045
-:105B60000000000000000000000000000000000035
-:105B70000000000000000000000000000000000025
-:105B80000000000000000000000000000000000015
-:105B90000000000000000000000000000000000005
-:105BA00000000000000000000000000000000000F5
-:105BB00000000000000000000000000000000000E5
-:105BC00000000000000000000000000000000000D5
-:105BD00000000000000000000000000000000000C5
-:105BE00000000000000000000000000000000000B5
-:105BF00000000000000000000000000000000000A5
-:105C00000000000000000000000000000000000094
-:105C10000000000000000000000000000000000084
-:105C20000000000000000000000000000000000074
-:105C30000000000000000000000000000000000064
-:105C40000000000000000000000000000000000054
-:105C50000000000000000000000000000000000044
-:105C60000000000000000000000000000000000034
-:105C70000000000000000000000000000000000024
-:105C80000000000000000000000000000000000014
-:105C90000000000000000000000000000000000004
-:105CA00000000000000000000000000000000000F4
-:105CB00000000000000000000000000000000000E4
-:105CC00000000000000000000000000000000000D4
-:105CD00000000000000000000000000000000000C4
-:105CE00000000000000000000000000000000000B4
-:105CF00000000000000000000000000000000000A4
-:105D00000000000000000000000000000000000093
-:105D10000000000000000000000000000000000083
-:105D20000000000000000000000000000000000073
-:105D30000000000000000000000000000000000063
-:105D40000000000000000000000000000000000053
-:105D50000000000000000000000000000000000043
-:105D60000000000000000000000000000000000033
-:105D70000000000000000000000000000000000023
-:105D80000000000000000000000000000000000013
-:105D90000000000000000000000000000000000003
-:105DA00000000000000000000000000000000000F3
-:105DB00000000000000000000000000000000000E3
-:105DC00000000000000000000000000000000000D3
-:105DD00000000000000000000000000000000000C3
-:105DE00000000000000000000000000000000000B3
-:105DF00000000000000000000000000000000000A3
-:105E00000000000000000000000000000000000092
-:105E10000000000000000000000000000000000082
-:105E20000000000000000000000000000000000072
-:105E30000000000000000000000000000000000062
-:105E40000000000000000000000000000000000052
-:105E50000000000000000000000000000000000042
-:105E60000000000000000000000000000000000032
-:105E70000000000000000000000000000000000022
-:105E80000000000000000000000000000000000012
-:105E90000000000000000000000000000000000002
-:105EA00000000000000000000000000000000000F2
-:105EB00000000000000000000000000000000000E2
-:105EC00000000000000000000000000000000000D2
-:105ED00000000000000000000000000000000000C2
-:105EE00000000000000000000000000000000000B2
-:105EF00000000000000000000000000000000000A2
-:105F00000000000000000000000000000000000091
-:105F10000000000000000000000000000000000081
-:105F20000000000000000000000000000000000071
-:105F30000000000000000000000000000000000061
-:105F40000000000000000000000000000000000051
-:105F50000000000000000000000000000000000041
-:105F60000000000000000000000000000000000031
-:105F70000000000000000000000000000000000021
-:105F80000000000000000000000000000000000011
-:105F90000000000000000000000000000000000001
-:105FA00000000000000000000000000000000000F1
-:105FB00000000000000000000000000000000000E1
-:105FC00000000000000000000000000000000000D1
-:105FD00000000000000000000000000000000000C1
-:105FE00000000000000000000000000000000000B1
-:105FF00000000000000000000000000000000000A1
-:106000000000000000000000000000000000000090
-:106010000000000000000000000000000000000080
-:106020000000000000000000000000000000000070
-:106030000000000000000000000000000000000060
-:106040000000000000000000000000000000000050
-:106050000000000000000000000000000000000040
-:106060000000000000000000000000000000000030
-:106070000000000000000000000000000000000020
-:106080000000000000000000000000000000000010
-:106090000000000000000000000000000000000000
-:1060A00000000000000000000000000000000000F0
-:1060B00000000000000000000000000000000000E0
-:1060C00000000000000000000000000000000000D0
-:1060D00000000000000000000000000000000000C0
-:1060E00000000000000000000000000000000000B0
-:1060F00000000000000000000000000000000000A0
-:10610000000000000000000000000000000000008F
-:10611000000000000000000000000000000000007F
-:1061200090EAC01500000000000000000000130607
-:00000001FF
-/*
- * The firmware this driver downloads into the tokenring card is a
- * separate program and is not GPL'd source code, even though the Linux
- * side driver and the routine that loads this data into the card are.
- *
- * This firmware is licensed to you strictly for use in conjunction
- * with the use of 3Com 3C359 TokenRing adapters. There is no
- * waranty expressed or implied about its fitness for any purpose.
- */
-
-/* 3c359_microcode.mac: 3Com 3C359 Tokenring microcode.
- *
- * Notes:
- *  - Loaded from xl_init upon adapter initialization.
- *
- * Available from 3Com as part of their standard 3C359 driver.
- */
diff --git a/firmware/Makefile b/firmware/Makefile
index 0d15a3d..344713b 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -26,7 +26,6 @@
 else
 acenic-objs := acenic/tg1.bin acenic/tg2.bin
 endif
-fw-shipped-$(CONFIG_3C359) += 3com/3C359.bin
 fw-shipped-$(CONFIG_ACENIC) += $(acenic-objs)
 fw-shipped-$(CONFIG_ADAPTEC_STARFIRE) += adaptec/starfire_rx.bin \
 					 adaptec/starfire_tx.bin
@@ -86,7 +85,6 @@
 					 qlogic/12160.bin
 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
 fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw
-fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin
 fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp
 fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \
 				     ess/maestro3_assp_minisrc.fw
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 182ecb6..8388f02 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -89,18 +89,6 @@
   Copyright (C) 2001 Qlogic Corporation (www.qlogic.com)
 
 --------------------------------------------------------------------------
-Driver: smctr -- SMC ISA/MCA Token Ring adapter
-
-File: tr_smctr.bin
-Info: MCT.BIN v6.3C1 03/01/95
-
-Original licence info:
-
- * This firmware is licensed to you strictly for use in conjunction
- * with the use of SMC TokenRing adapters. There is no waranty
- * expressed or implied about its fitness for any purpose.
-
---------------------------------------------------------------------------
 
 Driver: kaweth -- USB KLSI KL5USB101-based Ethernet device
 
@@ -567,32 +555,6 @@
 
 --------------------------------------------------------------------------
 
-Driver: 3C359 - 3Com 3C359 Token Link Velocity XL adapter
-
-File: 3com/3C359.bin
-
-Licence:
-/*
- * The firmware this driver downloads into the tokenring card is a
- * separate program and is not GPL'd source code, even though the Linux
- * side driver and the routine that loads this data into the card are.
- *
- * This firmware is licensed to you strictly for use in conjunction
- * with the use of 3Com 3C359 TokenRing adapters. There is no
- * waranty expressed or implied about its fitness for any purpose.
- */
-/* 3c359_microcode.mac: 3Com 3C359 Tokenring microcode.
- *
- * Notes:
- *  - Loaded from xl_init upon adapter initialization.
- *
- * Available from 3Com as part of their standard 3C359 driver.
- */
-
-Found in hex form in kernel source.
-
---------------------------------------------------------------------------
-
 Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter
 
 File: cis/LA-PCM.cis
diff --git a/firmware/tr_smctr.bin.ihex b/firmware/tr_smctr.bin.ihex
deleted file mode 100644
index 6797451..0000000
--- a/firmware/tr_smctr.bin.ihex
+++ /dev/null
@@ -1,477 +0,0 @@
-:10000000BC1D123B63B4E900001F000101000205A2
-:10001000010006030100040901000A070100080BA2
-:1000200001000C000000000F0100100D01000E1374
-:10003000010014110100120000050015010016193D
-:1000400001001A1701001800000E00000001000056
-:100050000004001B01001C0000070000000F00004E
-:10006000000B001D01001E0000080000000200003F
-:10007000000C000000060000000D0000000300005E
-:10008000000A00000009000478C6BC0194049380B3
-:10009000C84062E9DA1C2C1555555555555555582B
-:1000A0000BE9E5D595C19D77CEBBA06E1C05F67713
-:1000B000C602FA9670E81DC0170E02FA587DC05F9E
-:1000C00072CEECA4C384907A30CD8D7919E76C247C
-:1000D000279C08390738A84A4CEA4D989B244CC005
-:1000E00026D3E7545A4DF24C0C13234990326EA498
-:1000F000DF9371137726E126F8260C4C12260809A7
-:10010000828260A9307936B0B2A8A772648F9B331F
-:1001100033F9B839D51173AA75265D2651932A494A
-:1001200094C99589BC4DC89B809BA099064C862696
-:10013000589BA49B9937626C679B3330BF366661CE
-:10014000BF36ECC5BD66825A5031D59D9818293C02
-:1001500098864C17263E2CB8693B492EB408431AA2
-:10016000A4F9B351F110F343CD086F6379B3330EA3
-:100170001398499804DA7CE05279310C982E4DACF2
-:100180002C8414EE4CFE675EE49A7529D7A9353AA3
-:10019000945BD59B58B4AF7566AF14A9EF40952515
-:1001A00008B9AD42FCD8D98C330E1398661E45AC05
-:1001B000B00C42D3CCA61262DEB4B180497DA2DE7F
-:1001C000B418C02484E654F5834601681A630CC64B
-:1001D0001264FA4C351C2C0EAAAAAAAAAAAAAAAA88
-:1001E000AAAAAAAAAAADD70270E04CF3A1C1D5C0B1
-:1001F0003CB96939604E58770267933C99E4CF382F
-:100200001C972E401B903146A35E0E88346A35E061
-:10021000E8AA351AA9F51546A3EA7D4AA351AA9F73
-:100220007054A6572EB4CDC8A30CC1DAC6E1CB7A60
-:10023000D41C68FFCF55A8C02D851117442A300B58
-:100240004A88C24DB520D5260169516952195260BC
-:100250001695168296549805A545F3DD6AF9281877
-:10026000EF003030514E445D12D143E6126F9EBA1A
-:10027000CCDF25031DE006060A30CCA9EB2D008655
-:10028000A612654F56D665495F3DE837C940C77825
-:100290000181828C33184980AE40C518059C6D18C9
-:1002A000660EF3A0C61262DEF504B4AC6BC61991FB
-:1002B0007305482E72948073A1C8473666642F3642
-:1002C0006664079902918E72D10F9D063173A0C3A7
-:1002D000516A1A20BF3A0C2C7387435E600223FCDC
-:1002E000E0D635EF9EF5EF92818EF0030305186698
-:1002F00045CC0B482E700A4039D0E4239B3332178B
-:100300009B333203CC8548C73814A5CE297ED280D2
-:10031000A1A8B448882FCE830B1CE1D0D7980488BD
-:1003200087CE963173A58FF38358D7BE7B82AF9269
-:10033000818EF0030305186645CC1520B9C8290045
-:10034000E743908E6CCCC85E6CCCC80F3205231C82
-:10035000E450D45A17882FCE8310F9D023173A04CB
-:1003600035E600221639C3A3FCE0D635E0BFF41809
-:10037000F22D4D43516E5A221F30D417E74191732D
-:1003800005482E776900E743908E6CCCC85E6CCC34
-:10039000C80F3205231CEF4C4E0604C99E0BFF41CB
-:1003A0008F22D4D43516E5A221F35A82FCE8322EEE
-:1003B00060A905CE1348073A1C8473666642F3664B
-:1003C000664079902918E70A989C0A9EB5125C7CD1
-:1003D000C3318B982A7CD3ED38E9D34E74ED499E16
-:1003E0000BFF418F22D4D43516E5A22DEB45338F78
-:1003F000FCF7A05F25031DE40E060A30CC0CF3EBDE
-:1004000040DE61A870920A00E1241E00E1241E0073
-:10041000E1241E00E1241E00E1241E010F982A0B96
-:10042000F3A0C8B9A2A4173A6900E743908E7548B3
-:100430005E706901E6005231CC1814A5CC09829493
-:10044000730CA091F525CC070684849F30A2A47D6F
-:100450005075A665014A8EB4CCC435547566A49710
-:100460007A895053138019E3495C6DCEA940350653
-:1004700078D25706F1B32A8D972362925D69991C51
-:100480006A36E6CD46126F9EE1ABE4A30CC0DEAC4B
-:10049000D40D281BD012A500F84BAD332806A0DEE2
-:1004A00014973A895DC00DE30690925D699866B92C
-:1004B0001995E4A8CF9D331849BE7B86AF928C3343
-:1004C00024140CF4832421C270BFF418F22D4D4380
-:1004D000516E5A221F32A82FCE8322E605A4173A66
-:1004E0006900E743908E75485E706901E642A46337
-:1004F0009802294B9A2978E9405313818132678207
-:10050000FFD063C8B5350D45AE50087CE0D05F9D87
-:100510000645CC01A4173A6900E743908E75485E02
-:10052000706901E659A463981C52973B30528E7D46
-:100530002A091F51EBA4A40AB99487AEC531380229
-:10054000FFD063C8B5350D45AE50087CEA20BF3AF0
-:100550000C8B9A16905CE9A4039D0E4239D5217943
-:1005600095480F300A918E60EB297300095404CA34
-:1005700082655265E4CA226572650932E099724C5F
-:10058000C4E00BFF418F22D4D43516B94021F38A41
-:1005900082FCE8322E60A905CE9A4039D0E4239D32
-:1005A00052179954619901E640A4639804B1849864
-:1005B00018EF2D0305313802FFD063C8B5350D455E
-:1005C000B968887CE0505F9D0645CC81482E713427
-:1005D0008F48014815210521E90A5203CE5A4639B0
-:1005E000CF478E60AB1AF35343EB3524B81B30076B
-:1005F000098A742F7E41741E1D0D874649D595D1F9
-:10060000D5D5BBA94E829D053A0A7414E829D0427B
-:10061000745BCE50C40745BCE20C40745BCE8304CF
-:10062000F9954D13635E6F313BA08BA2C5398D7870
-:100630003A22A0006BC1D1546016D991A2E7438C35
-:1006400024DC1CE05117396B3BCC4B422E6B50BF66
-:100650003636654F7A185525789823E7503EF38152
-:100660004C026D3E7153AF78A9D4A629B1BCD9997B
-:10067000B28E628F222E7516B0B2AB23281654525A
-:1006800031BCD999B28E6619022E7516502CA9C8A4
-:10069000C6F520D3E47F4F9C0AD6167F90EE4CEB34
-:1006A000CFE288BA2F4286AEBDE5A7529F93637909
-:1006B000EB3308F9945247CD99256F3A0C13E65560
-:1006C000344C5A4DB52395A548115A0A4395AC2C84
-:1006D000BA240549B1BCCAA7726C6BC5BDE83169C3
-:1006E000525D0612653EB1504C7D4FAC0A300B3660
-:1006F0006411738A838E75129F7BD29958EE822E75
-:1007000077A0E39D5D4FBC2A532953DE9324BAB3EF
-:1007100036AA4AC679D4B9DE625A11735050BF372F
-:10072000366F1323BA0C24CEBDE2A752B28E6B6093
-:10073000622E751330ACA059CA646379B333651C5B
-:10074000CC32045CEA2CA059DF231BD4835247DD52
-:100750007996D49EB3524BA25A1A8D5D7B82A752D2
-:10076000B28E6619022E7516502C8C321D7B8EA708
-:1007700052B1BCD9999804DA7CE2ACFE6619022E1B
-:100780006550BF336664FE7418864C1726D6165221
-:100790003918DE7ACCC23E651491F36649086E833F
-:1007A0000933AF31ED0D9D0612622A318D6DE7419F
-:1007B000827CCAA68987092E29B1AF1039D66497E1
-:1007C000301D42759344028C24D27AB350F68905C9
-:1007D000435E6198C02C92253C8B2489490549E7EA
-:1007E0000CB98498B7AD3344AE5A5186609F38A98E
-:1007F000A26C6BC48EF45E49461262DEB4CD215CFD
-:10080000B4A30CC13E7229A26C6BC6126247F0E819
-:10081000C33204354092A4828810927CCBD42FA49A
-:1008200002118498B7AD3344AE5A5186609F38A9FF
-:10083000A26C6BC48EF45E494408493E65EA17D247
-:100840000108C24C5BD699A42B9694619827CE459B
-:10085000344D8D78810927CCBD12286C58AFB6F382
-:10086000A0C13E655344D8D7928E7D4BC2FA612613
-:10087000063AB36B030549E70CB96F5A66955CB449
-:10088000A30CC13E7029A26EA4DF9371137726E1F9
-:1008900026F826C6BC9473F92F0BE9849818EACC85
-:1008A000EC0C15279C32FF3D56AF928B7AD335D591
-:1008B000CB4A30CC13E7029A26C6BC947341979179
-:1008C000F483CE0420628B0516498C24C0C7569051
-:1008D000C0C15279C32E5BD5A672D294FAAD58C866
-:1008E000FA9F54B3324BB954A651866B79D0609FAE
-:1008F0003205344D8D7A4D1E7AB35100A93D59A869
-:100900007B4482A1AF4A8D52A95241494F3A2E40B1
-:10091000A49950BE90085279C32E61262DEB4CD07D
-:1009200015CB4A30CC13E7029A26C6BC48FE1D25DB
-:1009300046A954A920A4A79D1720524CA85F48049B
-:100940002309316F5A6680AE5A5186609F3814D1A0
-:100950003635E4A79D1720524CA2450D8B15F49116
-:10096000DE8BC928C24C5BD699A95CB4A30CD6F324
-:10097000A0C13E640A689B1AF16D4CAA92E03694BD
-:10098000709B297813AEB3AA85D44375093AC9EB95
-:100990003524B81B328E13487E4EFD40FD40FD408D
-:1009A000FD40FD40FC13F421F917458A300B335FFD
-:1009B00083A22A300B335F83A2A8C02DB32070928C
-:1009C000139ADE741827CCAA689B1AF70745518042
-:1009D0005B66470738A823E751113FE0E8854601E9
-:1009E0006D990612654F7A2024BAB33215257BAD76
-:1009F0003378AE0E73D047CEA730CC44FF83A2A885
-:100A0000C02CD991C1D11518059B3208BA2C518040
-:100A100059B3207092E29889FDBCEE1890FC8BA22D
-:100A2000C52B0D783A22A561AF074551805B66441E
-:100A30009EB3524B83ADC709BE1F9F74655D0A17F5
-:100A40007CABA0C24C3849122E384907A30CC13EDA
-:100A5000655344D8D7ADE700324B9B33344A03008B
-:100A60009D25CE8324B819998C02124BA199D8C028
-:100A7000274973CFF93CF47CE79804E92E7F39E3EA
-:100A80004F4653C06013A4B9E53C03DE8F9CF300CE
-:100A90009C6FCF3E85F9A336021E6038923E631AE2
-:100AA000109FCF181092BCD0A40CDCC00F9C9734C0
-:100AB00062B6E7F3F3A5CF1842341CC2CAFA8E68B7
-:100AC0005206AF3CA30DBF9E50E1D173CAE03AFC81
-:100AD000C1091A1E6A5C5B8E634E7773CC6167DD59
-:100AE000E66C48D1F31B24695108D4421BF467D14A
-:100AF000804E2FD08CD83009C21E801C46013A4748
-:100B0000D031A106013A7F4630211804E95E8429DC
-:100B100000C027CDD0007C9804F92E84628C027D21
-:100B2000BA3E7E4C027D2E8C61083009F41D0165B1
-:100B300073009F51D085201804FABD194618C027AC
-:100B4000DFD194384C027D174657013009F5FA0180
-:100B50000906013E87A14B88C027DC740D39D300FC
-:100B60009F73D030B39804FBBD06C483009F47D069
-:100B70003648CC0271BF3F9A17E63F0821E692A49F
-:100B80008F9A1031A7F310B184AF3AACDCF773F24F
-:100B90005CC62ADB9E7E7E97310863D0737B43A8B8
-:100BA000E63D34EAF3E315BF9F185F45CFE89F5F4A
-:100BB0009A5B03D0F3D3CE371CD00FBB9E68783B33
-:100BC000BCCA31E8F9A20212A27351086FD1F346F0
-:100BD0000138BF40FC23009C21E84951804E91F42C
-:100BE000210319804E9FD0216306013A568C02746E
-:100BF000FE75495E63D34A54423513A7D1804E95A2
-:100C0000E81E9A4C027CDD1BB9E6013E4BA062A3B4
-:100C1000009F6E8CFCF3009F4BA04218CC027D0716
-:100C200043DA13009F51D03D349804FABD1C628C06
-:100C3000027DFD1C6173009F45D1F44E6013EBF4FF
-:100C400025B033009F43D1A79C1804FB8E8403E991
-:100C5000804FB9E843C13009F77A0A319804FA3E67
-:100C6000844041804E82E7418709230423009D058B
-:100C7000CE961C248C108C0274173A043849182123
-:100C80001804E82E7450E12460846013A0B9D411D4
-:100C9000C248C108C0274173A82384918211804EA5
-:100CA00082E7528E12460846013A0B9D401C248C66
-:100CB000108C0274173A090E12460846013A0B9836
-:100CC0006A1C24B0E11804E82E6B50E1258708C0A7
-:100CD000274173054384961C23009D05CCAA1C2440
-:100CE000B0E11804E82E70687092C3846013E54484
-:100CF000F9409D05CE5A1C24B0E11804F9D13E708C
-:100D000027CF13E5442CA042CB89F2213A0B9C0A51
-:100D10001C24B0E11804F9D10B3810B3C4213936C2
-:100D20005C42C8842B79D061C2741524BAD331E5F2
-:100D300059082908E066634295128100290BC151C8
-:100D400024B81999902290B418A0914101414141D1
-:100D50005283CA4028682908BA16109C990B5694E9
-:100D600090521574C0271A2AD29025D3009D28AB23
-:100D70004A42174C0270D4842E9804E12A42174C40
-:100D8000027082904BA60138514842E9804E15A46A
-:100D90002174C0270FA412E9804E82AC80ACA0ACB5
-:100DA000A959E5644565CAC84ACE0ACE4ACE95918E
-:100DB000959495932925C0CCCC88A4975636647217
-:100DC00090548A9C4508B9B766129309C9B2748ECB
-:100DD000BA6013E5348EBA6013E4748EBA6013E51A
-:100DE000691D74C027CA291D74C027CED225D3001F
-:100DF0009F38A44BA6013E5E912E9804F915225D02
-:100E00003009F3E912E9804F905225D3009DC5487F
-:100E100025D3009C45CECD09C9B21A44BA6013E768
-:100E2000348974C0271C27B79C80C2D776599B93FE
-:100E30000C64C31D1BF4454BC7C63A37E8814BC74A
-:100E4000C63A37E8914BC7C632618EB3BCC34A225B
-:100E5000E6B5249771C987B431AE73A2CF39D25D9C
-:100E6000044442C0D6DE710616BBDBCE830C64C3DD
-:100E70001D311304F9954D133293635E6614CC292A
-:100E80002A5330A6614CC299853A72CCC299850624
-:100E90001BB30A661414249985330A08B186614C81
-:100EA000C2842168733B30A661414EA5985330AC93
-:100EB0005976614CC2B08DD6614CC2B02CF6614CF3
-:100EC000C2B18CA5985330AC0F24CC2998560F286A
-:100ED0006615921A1985330ACA850CC2998565C3AD
-:100EE000D985330ACE7086614CC2B397710C993B99
-:100EF000CC83580BEA779D064ABE047460E0D14E5D
-:100F0000384C3EEE3EEE3EEE3EEE30BBCAE11F7781
-:100F10001F771F771F7727708FBB800E11F771F730
-:100F20007C6F3CB33602FB8DE655707F2D246955EE
-:100F30004F58A9231F54F78A95252B750CCCAC5616
-:100F400051CC51E445CEA21239C0A0AF566A497FB8
-:100F5000028C09F80BEBAF56766752B28E69A71177
-:100F600073A8B1BCCAA0A936502C98E70AF566A4AC
-:100F700097E25A3027BAF7834EA5330A66158DE6F5
-:100F80005539D2A7AC546016701B728E628F222E18
-:100F9000751602FB8DE60A953D62A300B701B553B5
-:100FA000DE2A5494ADD43332B15947314791173AC0
-:100FB0008848E702B017DC679D4B8DE752AA7BD4C7
-:100FC000AA92BDD699BC5602FB8CF36666C6F36640
-:100FD0006662992AF8186870B08A0D5555555552B1
-:100FE00032E1405C380BEA9B87017DC05F7017DC03
-:100FF00005F5DC9B017D614D80BEA77982A21F5063
-:10100000152A8F8B1CE5A5138458E702915405021D
-:101010004BBD221A947F9C1AC05F421A21D180597D
-:10102000C06D1C2C0A83555555555555555555556C
-:1010300055541CB85C6E179C2F385E70E7B85E7014
-:10104000BCE179C2F385E70BCE179C299C299C292A
-:101050009C230F5814EE357726219305C9B017D27B
-:101060001D188A219305C9B017D187AC0A740FAE39
-:10107000F55A82A3E43A3114BBD7599974A21930B6
-:101080005C9B017D187AC0A740F843D4638925D0C2
-:1010900010D61C6A10F5558925D151661F51F5915E
-:1010A000492E8915986AA3E08A9465640E1317384F
-:1010B000A8864C1726C05F461EB028631F087A8C8E
-:1010C0007124BA021AD00D421EAAB124BA2A2D31B7
-:1010D000F51F587492E8875A6352DEF451694A3E0C
-:1010E00009694650F0E131730545BD598D8B4A7C45
-:1010F000D3ED38E9D34E74ED443260B93602FA5B71
-:10110000DE8A2D29D0E121F5A39221F219305C9BD2
-:10111000017D21F5A0C6016701B445CEA51239D4E1
-:101120001C05F440A1C2C3506AAAAAAAAAAAAAAAE4
-:10113000AAAAAAAAAAAA81AF869F191BE781F3656A
-:10114000F280BE7017DFDF380BEB0DC380BEA70F38
-:10115000954F5A94C02CD8B1A7CE5A1173A83AC251
-:10116000CCB63017DC6F35A9804DA7CE2A1879C5CB
-:1011700049DE61A822E75033F9986408B99542FC2A
-:10118000CCD9953D62A248D448E70288B9C1A0E312
-:101190009D4E62E6CCC66BCE8310C982E4DAC2C82B
-:1011A0001EC3B93602FAA9EB4E3030FA0DF0A9EBA6
-:1011B00040B90FAA7AD2C2C8FAA7AD410A47D53DB5
-:1011C00068ACF1F54F5A97547D4FA8AA551F11737B
-:1011D0005AB017DE5D59A925D0552A46BCB822AEB3
-:1011E00045293E14FAE19994CA4ABE3DD699925DCA
-:1011F0001517C8D7DC15178A401F0A9EACC9654968
-:101200005C1D10684A3E5BDE83169580BE91745863
-:10121000A4007C38E7563017DF75A6649745209DFB
-:10122000035F70545E291DF0A9EACC865495C1D1A4
-:1012300006830FAA7BD0654945BDE962D291DF04E0
-:101240005D16291C7D4FAC1A471AA9F5676653280D
-:10125000B7BD2C5A523BE3DD59A925D1A8AC086B88
-:10126000EE08ABC5202F854F566675495C1C181DCE
-:1012700081C26405F080BE355CD017C255F0957C04
-:10128000255F080BE1017C7BAB3524BA1055931A1E
-:10129000FB822AF148D7C2A7AB31B2A4AC639D4A06
-:1012A0008D7C7BAB3524BA1054308D7DC11578AC64
-:1012B0006F5A94601AE379D4AA4F854F5666D54980
-:1012C00058C73A9549F045D1629486BC1D13D29017
-:1012D000FFCF7A83F25031DE006060A11735A85F3E
-:1012E0009B1B3707441A300B380DBC1CE0D047CE8F
-:1012F000A0AA7AA1986A92953D6831805B80DAA9AC
-:10130000EF41952516F7A58B4AC679B333602FAA0E
-:101310009EB15180599ECAA7AC0A300B67B2ADD5B9
-:10132000DA925D17A300B32D956E08A958A1173A5C
-:101330008B017D54F78E9525081CE05602FBC1D128
-:10134000151805926B3C1D1228C02CA56C11701746
-:10135000B2384D80BEE02FB4EC4AEDB39E02FB8064
-:10136000BEE02FB139933E6DE710609F32A9A26CA9
-:1013700005F440E60A953D6A2300B380DAA7D62A31
-:10138000030D7017D22E76294FBC54A6516F7A5890
-:10139000B4AC05F48BA2F40E350D492EB4CC18A5CF
-:1013A000C8F84A9723E1052E47C28A5C8F85697287
-:1013B0003E1F4AC3551F5643328CA35E60A845CEDC
-:1013C0000D602FA3849DD8F017D22E0E1B2384D836
-:1013D0000BEB89F380BEE02FBB3985DF2203E701E9
-:1013E0007DC05F7017D11738145BD6A2740D4B7A8D
-:1013F000B33196946BCC3523D749481573290F5DCB
-:101400008AC05F4D79843580BE881CC3529F59685D
-:10141000C02CE036AA7BCD4A92BEF3814A7D5B594F
-:1014200094CA1C24EEC780BE881CC3529F5968C052
-:101430002CE036AA7BCD4A92BEF38143849C7B3854
-:101440000BEBAF70D4EA53009B4F9C5430F38A945B
-:10145000FAB6B3299422E61A85F9B05993F9D2C4A1
-:101460003260B936B0B390D977261C2722E896B4FB
-:1014700023EA9EB511805965862073968D79AD5803
-:101480000BE917448A4A07D77A82A190FAEF0154F0
-:10149000BA50D4591E2CE9F38A99856B0B23159702
-:1014A00072611730D42C738748AA028125DE910D12
-:1014B0004AC05F7ED280A53EB2D0C86B80BE881C79
-:1014C000EA0917441A371A917458A371AF074454A4
-:1014D0006E35E0E8AA640F90FAD06300B380DA2C8E
-:1014E000738748AA028125DE910D4AC05F48BA275A
-:1014F000A300B701B74F9CB46BCC3516F566632DCE
-:10150000291EBA4A40AB99487AEC508B9C0822FCC1
-:10151000F9B2553D62A92351239C0A3C730D445CEA
-:10152000E15071CEA11FE7156B0B25ED0B93602FDA
-:10153000AA9EAC3665495F7A2050087FEF3914497E
-:10154000011181046040CC59C0AD23EB41B081F260
-:101550003A41AA5043E4D48654A087C152CA9301A9
-:1015600032549D2402000052AF1646A7916708B47A
-:101570000451F16519B46E2DC0AD490092571B742A
-:10158000455F2351B7440A1006A36E8B6B081F19E1
-:10159000D1E680828054042A4591A9E459C22D01E4
-:1015A000140450D3FC558461D980512FE21F465F4B
-:1015B00040E020154ABC591A9E459C22D01148CBC8
-:1015C000E81408015415E2C8D4F22CE116808A46CA
-:1015D0005F527CD9A8F888D05A3CD25C5B80DAA7ED
-:1015E000D65A0886A45D17A0C3522E88A8221F537E
-:1015F000EADACCA650E127763C05F54FAB6B329981
-:1016000043849C7B380BE927ACD492E00EDA384D4A
-:1016100080BEE67D50BA51AE66EFBCDC7B871E0211
-:10162000FA93E6CD47C443CD0F349DA300B05501D6
-:10163000AE038404CE01D0E17002800E89E9221F3E
-:10164000E0E896B011F4C2CE036A442DC06D48059F
-:10165000B80DA300B776D5DEB150DC7D77BC54BAA7
-:10166000527F5814340F9AF381580BEAEF581460E4
-:1016700016A56C2EF7814BA56F7D5DEEB52E95807E
-:10168000BEF073BD047CEAFEEB4CDE2953DD6A54E8
-:1016900094A9EA0A8C02D64C3C05F400EACD56AF78
-:1016A000C047D29C8D29CAE02FAEBD75999D4AF9DD
-:1016B000EF517C940C77801818292AF8E0E8AA30BA
-:1016C0000B2A987C1D1151805954C351F51B3324AA
-:1016D000BB82A5195C1D1028C02C9AC7C1D1228CD1
-:1016E00002C994645C0CD68E13602FB80BEA30E309
-:1016F000C05F48DC780BE800E3C05F6C38D52E355E
-:101700004F5A8A61AA9F561B32994642C8010C451E
-:10171000CEA517E6C6CEA9EB151646A24738144348
-:101720002622E73D602FAA9EB512E07F017DE3E708
-:101730000293F995445CE5A0E39D4A7F9C54A9EB94
-:10174000510546B9FCC01B222E64542FCD46CCA7B0
-:10175000D586CCA65055C645CE5A0E39D4A7F9C564
-:101760004A9EB5118059C06DCFE600D9117322A1F0
-:101770007E6A36653EAC366532B017DD3E72D27990
-:10178000310C982E4C20732A8FF38AADE741827C6E
-:10179000CAA689B5859FB0F017D51F5454251AA83D
-:1017A000FF2A946511D74944D5CCA055D8AE0E88F0
-:1017B0001460164D6322E07286384D80BEE02FB86B
-:1017C0000BEE02FB8138F017D7D71E02FAFAE3C0FE
-:1017D0005F4C85900218C85B80DA300B701B4C227E
-:1017E000D34C33038C2E4C4326D0F56366D095A79B
-:1017F000CE45330AD61642386EE4CEBD592CD2AB54
-:10180000BA949DE61AB017D54F5A8B091A88B9C5F4
-:10181000424730D43216728865BD599925A5602F8C
-:10182000B860F308B74A1A8FAB0D994651AF38A884
-:101830008E9065135218A054B1422E61A848E72D2E
-:1018400016F7A805A5602FA475D251357328157613
-:101850002B83A20518059358C8B806286384D80BB3
-:10186000EE02FB80BEE02FA043A7017D4CE3C05FEA
-:101870007017DC05F4642DC06D1805B80DA5BD6AA0
-:101880002386AA9EB511A46AA3EA8A8D23E117389C
-:101890003469719845A6986A3EAC36651946BCE233
-:1018A000A23A41944D48628152C516F7A88B4A541A
-:1018B000F5A88C02DC06D1039CB4A9EE0A95252A72
-:1018C0007AAD46016701B5D7AC0A300B6C4935E6F5
-:1018D000B567F3006C88B99150BF311B32A7B86867
-:1018E00095257BAD3378A7CD3ED38E9D34E74ED47E
-:1018F00022E706848E60A8FF38AB839C2A08F9D4BF
-:101900002063BC1A060AC05F4642DC06D1805B80B9
-:10191000DA22E61A848E72D16F5A80871AAA7AD494
-:1019200048C8D547D5152323E11738348CBA4B7BEB
-:10193000D402D28C22DC06D51F561B328CA35E71DA
-:10194000511D20CA26A43140A962B017DF9EF4B70A
-:10195000C940C778018182B83839491C26C05F70F8
-:1019600017D4ABE12AF84ABE12AF8F974FCBA7012D
-:101970007DDA80AA91647F4A81D522C8FE828025C3
-:1019800048B23EBBDC352E9407E88A9C03E24BA5A7
-:1019900077ABB332E94BBD598684977A04BA53E1E9
-:1019A00032EF50D4E63553EB029CC7D77AB330D22E
-:1019B0005DEA02E9445D1628C02CE0369174455971
-:1019C00018D54FAC0AC435308B38692BBD5998698E
-:1019D0002EF512E958674AEF50D58E3E1CA4B0CEC2
-:1019E00093216E1A481FA22AC30D577AB30D092EF0
-:1019F000F4435D288B832092384D80BEE02FAC17D6
-:101A000049B3A582E93EE93674E02FA6CE9C05F4E1
-:101A1000C22C8C52577AD48D48FAEF50D5AE35533C
-:101A2000EB028621AAEF56661A4BBD44BA50C4E9B0
-:101A300053EB028681F5DEA1A8621F5DFEA25D293F
-:101A400077A86A618D40FD11530C6AA7D60530C78F
-:101A5000D77FA9574A5DEB481B0C7C8B9D8A53EFBF
-:101A60006694CA54F5A0C6016E036A9F5676653225
-:101A70008B7BD2C5A5602FAA7D65A300B701B4C832
-:101A80005A078FED01D527916701B48B9C541C73C5
-:101A9000A8845CC150BF365660AB8C8B9C541C73C1
-:101AA000A8845CC150BF36566C05F553D6A2300BE6
-:101AB000295B19FCF69445CF150BF33CB32A7AC584
-:101AC0004601648A31239C0A5DEA34332E95C7CEE1
-:101AD0002A4FE65020B9310C9BEF391445CE45070B
-:101AE0001CEA4687AB1B3684A75EAC966752B017DC
-:101AF000DCFE7B4A22E78A85F99E59977A8D0CCBCA
-:101B0000A527F3A0443260B937DE72288B9C8A0E79
-:101B100039D48C05F7E7B82AF92818EF0030305788
-:101B200007440A508FF07391411F3A9045C0BB188B
-:101B3000E13602FBFB9E02FAEEE7F5CF017D105C79
-:101B4000F017D105CF017D53EB2D1805B80DA64236
-:101B5000DC06D31735A88B9C0A0E39D40CFE7B4AC1
-:101B600022E6550BF331B3602FBC7CE2A4FE655135
-:101B70001738141C73A819FCF69445CCAA17E66311
-:101B8000660AB8CC85A158F6A23548487F4A89959F
-:101B90002121FD0502549E45910E3C05F507405557
-:101BA00048523E86A07548523EB5004A9C006BC71D
-:101BB000CE4527F32A843735DEA0AB231AAEF58352
-:101BC0005918D743DE2AD094EBDE053A959FCCC353
-:101BD0002045CCAA17E666CC43264FE741222E705B
-:101BE0006838E753E02FABBC12D2E9580BEAA7AD37
-:101BF00045A11FC05F7839C8A08F9D481C24EEC73F
-:101C000080BEBAF56D6649770D4EA53009B4F9C5A9
-:101C1000430F38A93F9D02FBCE4511739141C73A4E
-:101C2000919FCF69445CF150BF33CB32A7AC549045
-:101C30008D448E702977A8D0CCBA56B0B29D8C86D0
-:101C40004C172677261C271C249E2361BE8E124F1C
-:101C500011871CEA5C05F5D7B86A752977AB0D9931
-:101C600074A54F72A0AA4AC6F36666C63982AF75DC
-:101C7000A66F146BCE05707396823E7528E13AA765
-:101C8000AD44601652B61D7AB6B324BB86A75298EF
-:101C900004DA7CE2A1879C55F79CB5AC2C9533B94E
-:101CA0003105D953D6A2300B295B022E615A17E6B3
-:101CB0009CB32A7AC54021A891CE0527F3A5886454
-:101CC000C172654F58140C8D7EF381445CEF41C79F
-:101CD0003ABE02FAA9EACECCA92953D6A24647DDDC
-:101CE0007AC0A30086E29B29788B810998709B2992
-:101CF000795DD972ED94BCB976133B2A5DB29795A4
-:101D00002ED94BCA7D5B5994CA1C24EEC794BCC023
-:101D100026D3E7150C3CE2ACFE7B4A22E78A85F924
-:101D20009E59977A8D0CCBA527F3A0417262193783
-:101D3000DE70288B9C8A0E39D48D0F56366D094E75
-:101D4000BD592CCEA56B0B22D99DC9B297BEF3818C
-:101D50004A7D65A300938F672978C24DC1D1068261
-:101D600031AF07383411F3A82A9EA8661AA4A54FEC
-:101D70005A0C118FAA7BD0654945BDE962D2B19E4C
-:101D80006CCCC6198709C38E75411F3AA513D5556A
-:101D900055555555555555555555555555555555F3
-:101DA00055555555555555555555555555555555E3
-:0E1DB00055555555555555555555555ACC90C8
-:00000001FF
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index e95d1b6..0225742 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -33,7 +33,7 @@
 config BINFMT_ELF_FDPIC
 	bool "Kernel support for FDPIC ELF binaries"
 	default y
-	depends on (FRV || BLACKFIN || (SUPERH32 && !MMU))
+	depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X)
 	help
 	  ELF FDPIC binaries are based on ELF, but allow the individual load
 	  segments of a binary to be located in memory independently of each
diff --git a/fs/aio.c b/fs/aio.c
index 67a6db3..e7f2fad 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1456,6 +1456,10 @@
 	if (ret < 0)
 		goto out;
 
+	ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
+	if (ret < 0)
+		goto out;
+
 	kiocb->ki_nr_segs = kiocb->ki_nbytes;
 	kiocb->ki_cur_seg = 0;
 	/* ki_nbytes/left now reflect bytes instead of segs */
@@ -1467,11 +1471,17 @@
 	return ret;
 }
 
-static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
+static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb)
 {
+	int bytes;
+
+	bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
+	if (bytes < 0)
+		return bytes;
+
 	kiocb->ki_iovec = &kiocb->ki_inline_vec;
 	kiocb->ki_iovec->iov_base = kiocb->ki_buf;
-	kiocb->ki_iovec->iov_len = kiocb->ki_left;
+	kiocb->ki_iovec->iov_len = bytes;
 	kiocb->ki_nr_segs = 1;
 	kiocb->ki_cur_seg = 0;
 	return 0;
@@ -1496,10 +1506,7 @@
 		if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
 			kiocb->ki_left)))
 			break;
-		ret = security_file_permission(file, MAY_READ);
-		if (unlikely(ret))
-			break;
-		ret = aio_setup_single_vector(kiocb);
+		ret = aio_setup_single_vector(READ, file, kiocb);
 		if (ret)
 			break;
 		ret = -EINVAL;
@@ -1514,10 +1521,7 @@
 		if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
 			kiocb->ki_left)))
 			break;
-		ret = security_file_permission(file, MAY_WRITE);
-		if (unlikely(ret))
-			break;
-		ret = aio_setup_single_vector(kiocb);
+		ret = aio_setup_single_vector(WRITE, file, kiocb);
 		if (ret)
 			break;
 		ret = -EINVAL;
@@ -1528,9 +1532,6 @@
 		ret = -EBADF;
 		if (unlikely(!(file->f_mode & FMODE_READ)))
 			break;
-		ret = security_file_permission(file, MAY_READ);
-		if (unlikely(ret))
-			break;
 		ret = aio_setup_vectored_rw(READ, kiocb, compat);
 		if (ret)
 			break;
@@ -1542,9 +1543,6 @@
 		ret = -EBADF;
 		if (unlikely(!(file->f_mode & FMODE_WRITE)))
 			break;
-		ret = security_file_permission(file, MAY_WRITE);
-		if (unlikely(ret))
-			break;
 		ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
 		if (ret)
 			break;
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index eb1cc92..908e184 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -110,7 +110,6 @@
 	int sub_version;
 	int min_proto;
 	int max_proto;
-	int compat_daemon;
 	unsigned long exp_timeout;
 	unsigned int type;
 	int reghost_enabled;
@@ -270,6 +269,17 @@
 struct autofs_info *autofs4_new_ino(struct autofs_sb_info *);
 void autofs4_clean_ino(struct autofs_info *);
 
+static inline int autofs_prepare_pipe(struct file *pipe)
+{
+	if (!pipe->f_op || !pipe->f_op->write)
+		return -EINVAL;
+	if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode))
+		return -EINVAL;
+	/* We want a packet pipe */
+	pipe->f_flags |= O_DIRECT;
+	return 0;
+}
+
 /* Queue management functions */
 
 int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 9dacb85..aa9103f 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -376,7 +376,7 @@
 			err = -EBADF;
 			goto out;
 		}
-		if (!pipe->f_op || !pipe->f_op->write) {
+		if (autofs_prepare_pipe(pipe) < 0) {
 			err = -EPIPE;
 			fput(pipe);
 			goto out;
@@ -385,7 +385,6 @@
 		sbi->pipefd = pipefd;
 		sbi->pipe = pipe;
 		sbi->catatonic = 0;
-		sbi->compat_daemon = is_compat_task();
 	}
 out:
 	mutex_unlock(&sbi->wq_mutex);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index d8dc002..6e488eb 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -19,7 +19,6 @@
 #include <linux/parser.h>
 #include <linux/bitops.h>
 #include <linux/magic.h>
-#include <linux/compat.h>
 #include "autofs_i.h"
 #include <linux/module.h>
 
@@ -225,7 +224,6 @@
 	set_autofs_type_indirect(&sbi->type);
 	sbi->min_proto = 0;
 	sbi->max_proto = 0;
-	sbi->compat_daemon = is_compat_task();
 	mutex_init(&sbi->wq_mutex);
 	mutex_init(&sbi->pipe_mutex);
 	spin_lock_init(&sbi->fs_lock);
@@ -292,7 +290,7 @@
 		printk("autofs: could not open pipe file descriptor\n");
 		goto fail_dput;
 	}
-	if (!pipe->f_op || !pipe->f_op->write)
+	if (autofs_prepare_pipe(pipe) < 0)
 		goto fail_fput;
 	sbi->pipe = pipe;
 	sbi->pipefd = pipefd;
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 9c098db..da8876d 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -91,24 +91,7 @@
 
 	return (bytes > 0);
 }
-
-/*
- * The autofs_v5 packet was misdesigned.
- *
- * The packets are identical on x86-32 and x86-64, but have different
- * alignment. Which means that 'sizeof()' will give different results.
- * Fix it up for the case of running 32-bit user mode on a 64-bit kernel.
- */
-static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi)
-{
-	size_t pktsz = sizeof(struct autofs_v5_packet);
-#if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT)
-	if (sbi->compat_daemon > 0)
-		pktsz -= 4;
-#endif
-	return pktsz;
-}
-
+	
 static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
 				 struct autofs_wait_queue *wq,
 				 int type)
@@ -172,7 +155,8 @@
 	{
 		struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
 
-		pktsz = autofs_v5_packet_size(sbi);
+		pktsz = sizeof(*packet);
+
 		packet->wait_queue_token = wq->wait_queue_token;
 		packet->len = wq->name.len;
 		memcpy(packet->name, wq->name.name, wq->name.len);
diff --git a/fs/bio.c b/fs/bio.c
index e453924..84da885 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -505,9 +505,14 @@
 int bio_get_nr_vecs(struct block_device *bdev)
 {
 	struct request_queue *q = bdev_get_queue(bdev);
-	return min_t(unsigned,
+	int nr_pages;
+
+	nr_pages = min_t(unsigned,
 		     queue_max_segments(q),
 		     queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
+
+	return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
+
 }
 EXPORT_SYMBOL(bio_get_nr_vecs);
 
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e08f6a20..ba11c30 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -70,7 +70,7 @@
 	spin_unlock(&dst->wb.list_lock);
 }
 
-static sector_t max_block(struct block_device *bdev)
+sector_t blkdev_max_block(struct block_device *bdev)
 {
 	sector_t retval = ~((sector_t)0);
 	loff_t sz = i_size_read(bdev->bd_inode);
@@ -163,7 +163,7 @@
 blkdev_get_block(struct inode *inode, sector_t iblock,
 		struct buffer_head *bh, int create)
 {
-	if (iblock >= max_block(I_BDEV(inode))) {
+	if (iblock >= blkdev_max_block(I_BDEV(inode))) {
 		if (create)
 			return -EIO;
 
@@ -185,7 +185,7 @@
 blkdev_get_blocks(struct inode *inode, sector_t iblock,
 		struct buffer_head *bh, int create)
 {
-	sector_t end_block = max_block(I_BDEV(inode));
+	sector_t end_block = blkdev_max_block(I_BDEV(inode));
 	unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
 
 	if ((iblock + max_blocks) > end_block) {
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f4e9074..bcec067 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -22,6 +22,7 @@
 #include "ulist.h"
 #include "transaction.h"
 #include "delayed-ref.h"
+#include "locking.h"
 
 /*
  * this structure records all encountered refs on the way up to the root
@@ -893,18 +894,22 @@
 	s64 bytes_left = size - 1;
 	struct extent_buffer *eb = eb_in;
 	struct btrfs_key found_key;
+	int leave_spinning = path->leave_spinning;
 
 	if (bytes_left >= 0)
 		dest[bytes_left] = '\0';
 
+	path->leave_spinning = 1;
 	while (1) {
 		len = btrfs_inode_ref_name_len(eb, iref);
 		bytes_left -= len;
 		if (bytes_left >= 0)
 			read_extent_buffer(eb, dest + bytes_left,
 						(unsigned long)(iref + 1), len);
-		if (eb != eb_in)
+		if (eb != eb_in) {
+			btrfs_tree_read_unlock_blocking(eb);
 			free_extent_buffer(eb);
+		}
 		ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
 		if (ret > 0)
 			ret = -ENOENT;
@@ -919,8 +924,11 @@
 		slot = path->slots[0];
 		eb = path->nodes[0];
 		/* make sure we can use eb after releasing the path */
-		if (eb != eb_in)
+		if (eb != eb_in) {
 			atomic_inc(&eb->refs);
+			btrfs_tree_read_lock(eb);
+			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+		}
 		btrfs_release_path(path);
 
 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
@@ -931,6 +939,7 @@
 	}
 
 	btrfs_release_path(path);
+	path->leave_spinning = leave_spinning;
 
 	if (ret)
 		return ERR_PTR(ret);
@@ -1247,7 +1256,7 @@
 				struct btrfs_path *path,
 				iterate_irefs_t *iterate, void *ctx)
 {
-	int ret;
+	int ret = 0;
 	int slot;
 	u32 cur;
 	u32 len;
@@ -1259,7 +1268,8 @@
 	struct btrfs_inode_ref *iref;
 	struct btrfs_key found_key;
 
-	while (1) {
+	while (!ret) {
+		path->leave_spinning = 1;
 		ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
 					&found_key);
 		if (ret < 0)
@@ -1275,6 +1285,8 @@
 		eb = path->nodes[0];
 		/* make sure we can use eb after releasing the path */
 		atomic_inc(&eb->refs);
+		btrfs_tree_read_lock(eb);
+		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
 		btrfs_release_path(path);
 
 		item = btrfs_item_nr(eb, slot);
@@ -1288,13 +1300,12 @@
 				 (unsigned long long)found_key.objectid,
 				 (unsigned long long)fs_root->objectid);
 			ret = iterate(parent, iref, eb, ctx);
-			if (ret) {
-				free_extent_buffer(eb);
+			if (ret)
 				break;
-			}
 			len = sizeof(*iref) + name_len;
 			iref = (struct btrfs_inode_ref *)((char *)iref + len);
 		}
+		btrfs_tree_read_unlock_blocking(eb);
 		free_extent_buffer(eb);
 	}
 
@@ -1414,6 +1425,8 @@
 
 void free_ipath(struct inode_fs_paths *ipath)
 {
+	if (!ipath)
+		return;
 	kfree(ipath->fspath);
 	kfree(ipath);
 }
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index e801f22..4106264 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -220,10 +220,12 @@
  */
 static void add_root_to_dirty_list(struct btrfs_root *root)
 {
+	spin_lock(&root->fs_info->trans_lock);
 	if (root->track_dirty && list_empty(&root->dirty_list)) {
 		list_add(&root->dirty_list,
 			 &root->fs_info->dirty_cowonly_roots);
 	}
+	spin_unlock(&root->fs_info->trans_lock);
 }
 
 /*
@@ -723,7 +725,7 @@
 
 		cur = btrfs_find_tree_block(root, blocknr, blocksize);
 		if (cur)
-			uptodate = btrfs_buffer_uptodate(cur, gen);
+			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
 		else
 			uptodate = 0;
 		if (!cur || !uptodate) {
@@ -1358,7 +1360,12 @@
 		block1 = btrfs_node_blockptr(parent, slot - 1);
 		gen = btrfs_node_ptr_generation(parent, slot - 1);
 		eb = btrfs_find_tree_block(root, block1, blocksize);
-		if (eb && btrfs_buffer_uptodate(eb, gen))
+		/*
+		 * if we get -eagain from btrfs_buffer_uptodate, we
+		 * don't want to return eagain here.  That will loop
+		 * forever
+		 */
+		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
 			block1 = 0;
 		free_extent_buffer(eb);
 	}
@@ -1366,7 +1373,7 @@
 		block2 = btrfs_node_blockptr(parent, slot + 1);
 		gen = btrfs_node_ptr_generation(parent, slot + 1);
 		eb = btrfs_find_tree_block(root, block2, blocksize);
-		if (eb && btrfs_buffer_uptodate(eb, gen))
+		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
 			block2 = 0;
 		free_extent_buffer(eb);
 	}
@@ -1504,8 +1511,9 @@
 
 	tmp = btrfs_find_tree_block(root, blocknr, blocksize);
 	if (tmp) {
-		if (btrfs_buffer_uptodate(tmp, 0)) {
-			if (btrfs_buffer_uptodate(tmp, gen)) {
+		/* first we do an atomic uptodate check */
+		if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
+			if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
 				/*
 				 * we found an up to date block without
 				 * sleeping, return
@@ -1523,8 +1531,9 @@
 			free_extent_buffer(tmp);
 			btrfs_set_path_blocking(p);
 
+			/* now we're allowed to do a blocking uptodate check */
 			tmp = read_tree_block(root, blocknr, blocksize, gen);
-			if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
+			if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
 				*eb_ret = tmp;
 				return 0;
 			}
@@ -1559,7 +1568,7 @@
 		 * and give up so that our caller doesn't loop forever
 		 * on our EAGAINs.
 		 */
-		if (!btrfs_buffer_uptodate(tmp, 0))
+		if (!btrfs_buffer_uptodate(tmp, 0, 0))
 			ret = -EIO;
 		free_extent_buffer(tmp);
 	}
@@ -4043,7 +4052,7 @@
 			tmp = btrfs_find_tree_block(root, blockptr,
 					    btrfs_level_size(root, level - 1));
 
-			if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
+			if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
 				free_extent_buffer(tmp);
 				break;
 			}
@@ -4166,7 +4175,8 @@
 				struct extent_buffer *cur;
 				cur = btrfs_find_tree_block(root, blockptr,
 					    btrfs_level_size(root, level - 1));
-				if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
+				if (!cur ||
+				    btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
 					slot++;
 					if (cur)
 						free_extent_buffer(cur);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3f65a81..8fd7233 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1078,7 +1078,7 @@
 	 * is required instead of the faster short fsync log commits
 	 */
 	u64 last_trans_log_full_commit;
-	unsigned long mount_opt:21;
+	unsigned long mount_opt;
 	unsigned long compress_type:4;
 	u64 max_inline;
 	u64 alloc_start;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 20196f4..a7ffc88 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -323,7 +323,8 @@
  * in the wrong place.
  */
 static int verify_parent_transid(struct extent_io_tree *io_tree,
-				 struct extent_buffer *eb, u64 parent_transid)
+				 struct extent_buffer *eb, u64 parent_transid,
+				 int atomic)
 {
 	struct extent_state *cached_state = NULL;
 	int ret;
@@ -331,6 +332,9 @@
 	if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
 		return 0;
 
+	if (atomic)
+		return -EAGAIN;
+
 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
 			 0, &cached_state);
 	if (extent_buffer_uptodate(eb) &&
@@ -372,7 +376,8 @@
 		ret = read_extent_buffer_pages(io_tree, eb, start,
 					       WAIT_COMPLETE,
 					       btree_get_extent, mirror_num);
-		if (!ret && !verify_parent_transid(io_tree, eb, parent_transid))
+		if (!ret && !verify_parent_transid(io_tree, eb,
+						   parent_transid, 0))
 			break;
 
 		/*
@@ -383,17 +388,16 @@
 		if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
 			break;
 
-		if (!failed_mirror) {
-			failed = 1;
-			printk(KERN_ERR "failed mirror was %d\n", eb->failed_mirror);
-			failed_mirror = eb->failed_mirror;
-		}
-
 		num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
 					      eb->start, eb->len);
 		if (num_copies == 1)
 			break;
 
+		if (!failed_mirror) {
+			failed = 1;
+			failed_mirror = eb->read_mirror;
+		}
+
 		mirror_num++;
 		if (mirror_num == failed_mirror)
 			mirror_num++;
@@ -564,7 +568,7 @@
 }
 
 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
-			       struct extent_state *state)
+			       struct extent_state *state, int mirror)
 {
 	struct extent_io_tree *tree;
 	u64 found_start;
@@ -589,6 +593,7 @@
 	if (!reads_done)
 		goto err;
 
+	eb->read_mirror = mirror;
 	if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
 		ret = -EIO;
 		goto err;
@@ -652,7 +657,7 @@
 
 	eb = (struct extent_buffer *)page->private;
 	set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
-	eb->failed_mirror = failed_mirror;
+	eb->read_mirror = failed_mirror;
 	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
 		btree_readahead_hook(root, eb, eb->start, -EIO);
 	return -EIO;	/* we fixed nothing */
@@ -1202,7 +1207,7 @@
 	root->commit_root = NULL;
 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
 				     blocksize, generation);
-	if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
+	if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
 		free_extent_buffer(root->node);
 		root->node = NULL;
 		return -EIO;
@@ -2254,9 +2259,9 @@
 		goto fail_sb_buffer;
 	}
 
-	if (sectorsize < PAGE_SIZE) {
-		printk(KERN_WARNING "btrfs: Incompatible sector size "
-		       "found on %s\n", sb->s_id);
+	if (sectorsize != PAGE_SIZE) {
+		printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) "
+		       "found on %s\n", (unsigned long)sectorsize, sb->s_id);
 		goto fail_sb_buffer;
 	}
 
@@ -3143,7 +3148,8 @@
 	return 0;
 }
 
-int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
+int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
+			  int atomic)
 {
 	int ret;
 	struct inode *btree_inode = buf->pages[0]->mapping->host;
@@ -3153,7 +3159,9 @@
 		return ret;
 
 	ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
-				    parent_transid);
+				    parent_transid, atomic);
+	if (ret == -EAGAIN)
+		return ret;
 	return !ret;
 }
 
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index a7ace1a..ab1830a 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -66,7 +66,8 @@
 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
 void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
-int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
+int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
+			  int atomic);
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2b35f8d..49fd7b6 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2301,6 +2301,7 @@
 
 				if (ret) {
 					printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
+					spin_lock(&delayed_refs->lock);
 					return ret;
 				}
 
@@ -2331,6 +2332,7 @@
 
 		if (ret) {
 			printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
+			spin_lock(&delayed_refs->lock);
 			return ret;
 		}
 
@@ -3769,13 +3771,10 @@
 		 */
 		if (current->journal_info)
 			return -EAGAIN;
-		ret = wait_event_interruptible(space_info->wait,
-					       !space_info->flush);
-		/* Must have been interrupted, return */
-		if (ret) {
-			printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__);
+		ret = wait_event_killable(space_info->wait, !space_info->flush);
+		/* Must have been killed, return */
+		if (ret)
 			return -EINTR;
-		}
 
 		spin_lock(&space_info->lock);
 	}
@@ -4215,8 +4214,8 @@
 
 	num_bytes = calc_global_metadata_size(fs_info);
 
-	spin_lock(&block_rsv->lock);
 	spin_lock(&sinfo->lock);
+	spin_lock(&block_rsv->lock);
 
 	block_rsv->size = num_bytes;
 
@@ -4242,8 +4241,8 @@
 		block_rsv->full = 1;
 	}
 
-	spin_unlock(&sinfo->lock);
 	spin_unlock(&block_rsv->lock);
+	spin_unlock(&sinfo->lock);
 }
 
 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
@@ -6569,7 +6568,7 @@
 			goto skip;
 	}
 
-	if (!btrfs_buffer_uptodate(next, generation)) {
+	if (!btrfs_buffer_uptodate(next, generation, 0)) {
 		btrfs_tree_unlock(next);
 		free_extent_buffer(next);
 		next = NULL;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cd4b5e4..c9018a0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -402,20 +402,28 @@
 	return 0;
 }
 
+static struct extent_state *next_state(struct extent_state *state)
+{
+	struct rb_node *next = rb_next(&state->rb_node);
+	if (next)
+		return rb_entry(next, struct extent_state, rb_node);
+	else
+		return NULL;
+}
+
 /*
  * utility function to clear some bits in an extent state struct.
- * it will optionally wake up any one waiting on this state (wake == 1), or
- * forcibly remove the state from the tree (delete == 1).
+ * it will optionally wake up any one waiting on this state (wake == 1)
  *
  * If no bits are set on the state struct after clearing things, the
  * struct is freed and removed from the tree
  */
-static int clear_state_bit(struct extent_io_tree *tree,
-			    struct extent_state *state,
-			    int *bits, int wake)
+static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
+					    struct extent_state *state,
+					    int *bits, int wake)
 {
+	struct extent_state *next;
 	int bits_to_clear = *bits & ~EXTENT_CTLBITS;
-	int ret = state->state & bits_to_clear;
 
 	if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
 		u64 range = state->end - state->start + 1;
@@ -427,6 +435,7 @@
 	if (wake)
 		wake_up(&state->wq);
 	if (state->state == 0) {
+		next = next_state(state);
 		if (state->tree) {
 			rb_erase(&state->rb_node, &tree->state);
 			state->tree = NULL;
@@ -436,8 +445,9 @@
 		}
 	} else {
 		merge_state(tree, state);
+		next = next_state(state);
 	}
-	return ret;
+	return next;
 }
 
 static struct extent_state *
@@ -476,7 +486,6 @@
 	struct extent_state *state;
 	struct extent_state *cached;
 	struct extent_state *prealloc = NULL;
-	struct rb_node *next_node;
 	struct rb_node *node;
 	u64 last_end;
 	int err;
@@ -528,14 +537,11 @@
 	WARN_ON(state->end < start);
 	last_end = state->end;
 
-	if (state->end < end && !need_resched())
-		next_node = rb_next(&state->rb_node);
-	else
-		next_node = NULL;
-
 	/* the state doesn't have the wanted bits, go ahead */
-	if (!(state->state & bits))
+	if (!(state->state & bits)) {
+		state = next_state(state);
 		goto next;
+	}
 
 	/*
 	 *     | ---- desired range ---- |
@@ -593,16 +599,13 @@
 		goto out;
 	}
 
-	clear_state_bit(tree, state, &bits, wake);
+	state = clear_state_bit(tree, state, &bits, wake);
 next:
 	if (last_end == (u64)-1)
 		goto out;
 	start = last_end + 1;
-	if (start <= end && next_node) {
-		state = rb_entry(next_node, struct extent_state,
-				 rb_node);
+	if (start <= end && state && !need_resched())
 		goto hit_next;
-	}
 	goto search_again;
 
 out:
@@ -2301,7 +2304,7 @@
 	u64 start;
 	u64 end;
 	int whole_page;
-	int failed_mirror;
+	int mirror;
 	int ret;
 
 	if (err)
@@ -2340,20 +2343,18 @@
 		}
 		spin_unlock(&tree->lock);
 
+		mirror = (int)(unsigned long)bio->bi_bdev;
 		if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
 			ret = tree->ops->readpage_end_io_hook(page, start, end,
-							      state);
+							      state, mirror);
 			if (ret)
 				uptodate = 0;
 			else
 				clean_io_failure(start, page);
 		}
 
-		if (!uptodate)
-			failed_mirror = (int)(unsigned long)bio->bi_bdev;
-
 		if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
-			ret = tree->ops->readpage_io_failed_hook(page, failed_mirror);
+			ret = tree->ops->readpage_io_failed_hook(page, mirror);
 			if (!ret && !err &&
 			    test_bit(BIO_UPTODATE, &bio->bi_flags))
 				uptodate = 1;
@@ -2368,8 +2369,7 @@
 			 * can't handle the error it will return -EIO and we
 			 * remain responsible for that page.
 			 */
-			ret = bio_readpage_error(bio, page, start, end,
-							failed_mirror, NULL);
+			ret = bio_readpage_error(bio, page, start, end, mirror, NULL);
 			if (ret == 0) {
 				uptodate =
 					test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -4120,6 +4120,7 @@
 			if (atomic_inc_not_zero(&exists->refs)) {
 				spin_unlock(&mapping->private_lock);
 				unlock_page(p);
+				page_cache_release(p);
 				mark_extent_buffer_accessed(exists);
 				goto free_eb;
 			}
@@ -4199,8 +4200,7 @@
 			unlock_page(eb->pages[i]);
 	}
 
-	if (!atomic_dec_and_test(&eb->refs))
-		return exists;
+	WARN_ON(!atomic_dec_and_test(&eb->refs));
 	btrfs_release_extent_buffer(eb);
 	return exists;
 }
@@ -4462,7 +4462,7 @@
 	}
 
 	clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
-	eb->failed_mirror = 0;
+	eb->read_mirror = 0;
 	atomic_set(&eb->io_pages, num_reads);
 	for (i = start_i; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index faf10eb5..b516c3b 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -79,7 +79,7 @@
 					u64 start, u64 end,
 				       struct extent_state *state);
 	int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
-				    struct extent_state *state);
+				    struct extent_state *state, int mirror);
 	int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
 				      struct extent_state *state, int uptodate);
 	void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
@@ -135,7 +135,7 @@
 	spinlock_t refs_lock;
 	atomic_t refs;
 	atomic_t io_pages;
-	int failed_mirror;
+	int read_mirror;
 	struct list_head leak_list;
 	struct rcu_head rcu_head;
 	pid_t lock_owner;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index d83260d..53bf2d7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -567,6 +567,7 @@
 	int extent_type;
 	int recow;
 	int ret;
+	int modify_tree = -1;
 
 	if (drop_cache)
 		btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -575,10 +576,13 @@
 	if (!path)
 		return -ENOMEM;
 
+	if (start >= BTRFS_I(inode)->disk_i_size)
+		modify_tree = 0;
+
 	while (1) {
 		recow = 0;
 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
-					       search_start, -1);
+					       search_start, modify_tree);
 		if (ret < 0)
 			break;
 		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
@@ -634,7 +638,8 @@
 		}
 
 		search_start = max(key.offset, start);
-		if (recow) {
+		if (recow || !modify_tree) {
+			modify_tree = -1;
 			btrfs_release_path(path);
 			continue;
 		}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 115bc05..61b16c6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1947,7 +1947,7 @@
  * extent_io.c will try to find good copies for us.
  */
 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
-			       struct extent_state *state)
+			       struct extent_state *state, int mirror)
 {
 	size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
 	struct inode *inode = page->mapping->host;
@@ -4069,7 +4069,7 @@
 	BTRFS_I(inode)->dummy_inode = 1;
 
 	inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
-	inode->i_op = &simple_dir_inode_operations;
+	inode->i_op = &btrfs_dir_ro_inode_operations;
 	inode->i_fop = &simple_dir_operations;
 	inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
@@ -4140,14 +4140,18 @@
 static int btrfs_dentry_delete(const struct dentry *dentry)
 {
 	struct btrfs_root *root;
+	struct inode *inode = dentry->d_inode;
 
-	if (!dentry->d_inode && !IS_ROOT(dentry))
-		dentry = dentry->d_parent;
+	if (!inode && !IS_ROOT(dentry))
+		inode = dentry->d_parent->d_inode;
 
-	if (dentry->d_inode) {
-		root = BTRFS_I(dentry->d_inode)->root;
+	if (inode) {
+		root = BTRFS_I(inode)->root;
 		if (btrfs_root_refs(&root->root_item) == 0)
 			return 1;
+
+		if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+			return 1;
 	}
 	return 0;
 }
@@ -4188,7 +4192,6 @@
 	struct btrfs_path *path;
 	struct list_head ins_list;
 	struct list_head del_list;
-	struct qstr q;
 	int ret;
 	struct extent_buffer *leaf;
 	int slot;
@@ -4279,7 +4282,6 @@
 
 		while (di_cur < di_total) {
 			struct btrfs_key location;
-			struct dentry *tmp;
 
 			if (verify_dir_item(root, leaf, di))
 				break;
@@ -4300,35 +4302,15 @@
 			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
 			btrfs_dir_item_key_to_cpu(leaf, di, &location);
 
-			q.name = name_ptr;
-			q.len = name_len;
-			q.hash = full_name_hash(q.name, q.len);
-			tmp = d_lookup(filp->f_dentry, &q);
-			if (!tmp) {
-				struct btrfs_key *newkey;
 
-				newkey = kzalloc(sizeof(struct btrfs_key),
-						 GFP_NOFS);
-				if (!newkey)
-					goto no_dentry;
-				tmp = d_alloc(filp->f_dentry, &q);
-				if (!tmp) {
-					kfree(newkey);
-					dput(tmp);
-					goto no_dentry;
-				}
-				memcpy(newkey, &location,
-				       sizeof(struct btrfs_key));
-				tmp->d_fsdata = newkey;
-				tmp->d_flags |= DCACHE_NEED_LOOKUP;
-				d_rehash(tmp);
-				dput(tmp);
-			} else {
-				dput(tmp);
-			}
-no_dentry:
 			/* is this a reference to our own snapshot? If so
-			 * skip it
+			 * skip it.
+			 *
+			 * In contrast to old kernels, we insert the snapshot's
+			 * dir item and dir index after it has been created, so
+			 * we won't find a reference to our own snapshot. We
+			 * still keep the following code for backward
+			 * compatibility.
 			 */
 			if (location.type == BTRFS_ROOT_ITEM_KEY &&
 			    location.objectid == root->root_key.objectid) {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 18cc23d..14f8e1f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2262,7 +2262,10 @@
 	di_args->bytes_used = dev->bytes_used;
 	di_args->total_bytes = dev->total_bytes;
 	memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
-	strncpy(di_args->path, dev->name, sizeof(di_args->path));
+	if (dev->name)
+		strncpy(di_args->path, dev->name, sizeof(di_args->path));
+	else
+		di_args->path[0] = '\0';
 
 out:
 	if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 4f69028..086e6bd 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -252,7 +252,7 @@
 
 struct btrfs_ioctl_ino_path_args {
 	__u64				inum;		/* in */
-	__u32				size;		/* in */
+	__u64				size;		/* in */
 	__u64				reserved[4];
 	/* struct btrfs_data_container	*fspath;	   out */
 	__u64				fspath;		/* out */
@@ -260,7 +260,7 @@
 
 struct btrfs_ioctl_logical_ino_args {
 	__u64				logical;	/* in */
-	__u32				size;		/* in */
+	__u64				size;		/* in */
 	__u64				reserved[4];
 	/* struct btrfs_data_container	*inodes;	out   */
 	__u64				inodes;
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index dc5d331..ac5d010 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -250,14 +250,12 @@
 					  struct btrfs_bio *bbio)
 {
 	int ret;
-	int looped = 0;
 	struct reada_zone *zone;
 	struct btrfs_block_group_cache *cache = NULL;
 	u64 start;
 	u64 end;
 	int i;
 
-again:
 	zone = NULL;
 	spin_lock(&fs_info->reada_lock);
 	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
@@ -274,9 +272,6 @@
 		spin_unlock(&fs_info->reada_lock);
 	}
 
-	if (looped)
-		return NULL;
-
 	cache = btrfs_lookup_block_group(fs_info, logical);
 	if (!cache)
 		return NULL;
@@ -307,13 +302,15 @@
 	ret = radix_tree_insert(&dev->reada_zones,
 				(unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
 				zone);
-	spin_unlock(&fs_info->reada_lock);
 
-	if (ret) {
+	if (ret == -EEXIST) {
 		kfree(zone);
-		looped = 1;
-		goto again;
+		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
+					     logical >> PAGE_CACHE_SHIFT, 1);
+		if (ret == 1)
+			kref_get(&zone->refcnt);
 	}
+	spin_unlock(&fs_info->reada_lock);
 
 	return zone;
 }
@@ -323,26 +320,26 @@
 					      struct btrfs_key *top, int level)
 {
 	int ret;
-	int looped = 0;
 	struct reada_extent *re = NULL;
+	struct reada_extent *re_exist = NULL;
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
 	struct btrfs_bio *bbio = NULL;
 	struct btrfs_device *dev;
+	struct btrfs_device *prev_dev;
 	u32 blocksize;
 	u64 length;
 	int nzones = 0;
 	int i;
 	unsigned long index = logical >> PAGE_CACHE_SHIFT;
 
-again:
 	spin_lock(&fs_info->reada_lock);
 	re = radix_tree_lookup(&fs_info->reada_tree, index);
 	if (re)
 		kref_get(&re->refcnt);
 	spin_unlock(&fs_info->reada_lock);
 
-	if (re || looped)
+	if (re)
 		return re;
 
 	re = kzalloc(sizeof(*re), GFP_NOFS);
@@ -398,16 +395,31 @@
 	/* insert extent in reada_tree + all per-device trees, all or nothing */
 	spin_lock(&fs_info->reada_lock);
 	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
-	if (ret) {
+	if (ret == -EEXIST) {
+		re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
+		BUG_ON(!re_exist);
+		kref_get(&re_exist->refcnt);
 		spin_unlock(&fs_info->reada_lock);
-		if (ret != -ENOMEM) {
-			/* someone inserted the extent in the meantime */
-			looped = 1;
-		}
 		goto error;
 	}
+	if (ret) {
+		spin_unlock(&fs_info->reada_lock);
+		goto error;
+	}
+	prev_dev = NULL;
 	for (i = 0; i < nzones; ++i) {
 		dev = bbio->stripes[i].dev;
+		if (dev == prev_dev) {
+			/*
+			 * in case of DUP, just add the first zone. As both
+			 * are on the same device, there's nothing to gain
+			 * from adding both.
+			 * Also, it wouldn't work, as the tree is per device
+			 * and adding would fail with EEXIST
+			 */
+			continue;
+		}
+		prev_dev = dev;
 		ret = radix_tree_insert(&dev->reada_extents, index, re);
 		if (ret) {
 			while (--i >= 0) {
@@ -450,9 +462,7 @@
 	}
 	kfree(bbio);
 	kfree(re);
-	if (looped)
-		goto again;
-	return NULL;
+	return re_exist;
 }
 
 static void reada_kref_dummy(struct kref *kr)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 017281d..646ee21 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1279,7 +1279,9 @@
 		if (rb_node)
 			backref_tree_panic(rb_node, -EEXIST, node->bytenr);
 	} else {
+		spin_lock(&root->fs_info->trans_lock);
 		list_del_init(&root->root_list);
+		spin_unlock(&root->fs_info->trans_lock);
 		kfree(node);
 	}
 	return 0;
@@ -3811,7 +3813,7 @@
 
 		ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5);
 		if (ret < 0) {
-			if (ret != -EAGAIN) {
+			if (ret != -ENOSPC) {
 				err = ret;
 				WARN_ON(1);
 				break;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index bc015f7..2f3d6f9 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -998,6 +998,7 @@
 			page = sblock->pagev + page_index;
 			page->logical = logical;
 			page->physical = bbio->stripes[mirror_index].physical;
+			/* for missing devices, bdev is NULL */
 			page->bdev = bbio->stripes[mirror_index].dev->bdev;
 			page->mirror_num = mirror_index + 1;
 			page->page = alloc_page(GFP_NOFS);
@@ -1042,6 +1043,12 @@
 		struct scrub_page *page = sblock->pagev + page_num;
 		DECLARE_COMPLETION_ONSTACK(complete);
 
+		if (page->bdev == NULL) {
+			page->io_error = 1;
+			sblock->no_io_error_seen = 0;
+			continue;
+		}
+
 		BUG_ON(!page->page);
 		bio = bio_alloc(GFP_NOFS, 1);
 		if (!bio)
@@ -1257,12 +1264,6 @@
 	if (memcmp(csum, on_disk_csum, sdev->csum_size))
 		fail = 1;
 
-	if (fail) {
-		spin_lock(&sdev->stat_lock);
-		++sdev->stat.csum_errors;
-		spin_unlock(&sdev->stat_lock);
-	}
-
 	return fail;
 }
 
@@ -1335,15 +1336,6 @@
 	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
 		++crc_fail;
 
-	if (crc_fail || fail) {
-		spin_lock(&sdev->stat_lock);
-		if (crc_fail)
-			++sdev->stat.csum_errors;
-		if (fail)
-			++sdev->stat.verify_errors;
-		spin_unlock(&sdev->stat_lock);
-	}
-
 	return fail || crc_fail;
 }
 
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8d5d380..c5f8fca 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -815,7 +815,6 @@
 		return 0;
 	}
 
-	btrfs_start_delalloc_inodes(root, 0);
 	btrfs_wait_ordered_extents(root, 0, 0);
 
 	trans = btrfs_start_transaction(root, 0);
@@ -1148,13 +1147,15 @@
 		if (ret)
 			goto restore;
 	} else {
-		if (fs_info->fs_devices->rw_devices == 0)
+		if (fs_info->fs_devices->rw_devices == 0) {
 			ret = -EACCES;
 			goto restore;
+		}
 
-		if (btrfs_super_log_root(fs_info->super_copy) != 0)
+		if (btrfs_super_log_root(fs_info->super_copy) != 0) {
 			ret = -EINVAL;
 			goto restore;
+		}
 
 		ret = btrfs_cleanup_fs_roots(fs_info);
 		if (ret)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 11b77a5..3642225 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -73,8 +73,10 @@
 
 	cur_trans = root->fs_info->running_transaction;
 	if (cur_trans) {
-		if (cur_trans->aborted)
+		if (cur_trans->aborted) {
+			spin_unlock(&root->fs_info->trans_lock);
 			return cur_trans->aborted;
+		}
 		atomic_inc(&cur_trans->use_count);
 		atomic_inc(&cur_trans->num_writers);
 		cur_trans->num_joined++;
@@ -1400,6 +1402,7 @@
 	ret = commit_fs_roots(trans, root);
 	if (ret) {
 		mutex_unlock(&root->fs_info->tree_log_mutex);
+		mutex_unlock(&root->fs_info->reloc_mutex);
 		goto cleanup_transaction;
 	}
 
@@ -1411,6 +1414,7 @@
 	ret = commit_cowonly_roots(trans, root);
 	if (ret) {
 		mutex_unlock(&root->fs_info->tree_log_mutex);
+		mutex_unlock(&root->fs_info->reloc_mutex);
 		goto cleanup_transaction;
 	}
 
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d017283..eb1ae90 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -279,7 +279,7 @@
 						log->fs_info->extent_root,
 						eb->start, eb->len);
 
-	if (btrfs_buffer_uptodate(eb, gen)) {
+	if (btrfs_buffer_uptodate(eb, gen, 0)) {
 		if (wc->write)
 			btrfs_write_tree_block(eb);
 		if (wc->wait)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 759d0248..1411b99 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3324,12 +3324,14 @@
 	stripe_size = devices_info[ndevs-1].max_avail;
 	num_stripes = ndevs * dev_stripes;
 
-	if (stripe_size * num_stripes > max_chunk_size * ncopies) {
+	if (stripe_size * ndevs > max_chunk_size * ncopies) {
 		stripe_size = max_chunk_size * ncopies;
-		do_div(stripe_size, num_stripes);
+		do_div(stripe_size, ndevs);
 	}
 
 	do_div(stripe_size, dev_stripes);
+
+	/* align to BTRFS_STRIPE_LEN */
 	do_div(stripe_size, BTRFS_STRIPE_LEN);
 	stripe_size *= BTRFS_STRIPE_LEN;
 
@@ -3805,10 +3807,11 @@
 		else if (mirror_num)
 			stripe_index += mirror_num - 1;
 		else {
+			int old_stripe_index = stripe_index;
 			stripe_index = find_live_mirror(map, stripe_index,
 					      map->sub_stripes, stripe_index +
 					      current->pid % map->sub_stripes);
-			mirror_num = stripe_index + 1;
+			mirror_num = stripe_index - old_stripe_index + 1;
 		}
 	} else {
 		/*
@@ -4350,8 +4353,10 @@
 
 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
 				   root->fs_info->bdev_holder);
-	if (ret)
+	if (ret) {
+		free_fs_devices(fs_devices);
 		goto out;
+	}
 
 	if (!fs_devices->seeding) {
 		__btrfs_close_devices(fs_devices);
diff --git a/fs/buffer.c b/fs/buffer.c
index 36d6665..ad5938c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -921,6 +921,7 @@
 	struct buffer_head *head = page_buffers(page);
 	struct buffer_head *bh = head;
 	int uptodate = PageUptodate(page);
+	sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
 
 	do {
 		if (!buffer_mapped(bh)) {
@@ -929,7 +930,8 @@
 			bh->b_blocknr = block;
 			if (uptodate)
 				set_buffer_uptodate(bh);
-			set_buffer_mapped(bh);
+			if (block < end_block)
+				set_buffer_mapped(bh);
 		}
 		block++;
 		bh = bh->b_this_page;
@@ -985,7 +987,6 @@
 	return page;
 
 failed:
-	BUG();
 	unlock_page(page);
 	page_cache_release(page);
 	return NULL;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d342128..541ef81 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -370,13 +370,13 @@
 				   (int)(srcaddr->sa_family));
 	}
 
-	seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
+	seq_printf(s, ",uid=%u", cifs_sb->mnt_uid);
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
 		seq_printf(s, ",forceuid");
 	else
 		seq_printf(s, ",noforceuid");
 
-	seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
+	seq_printf(s, ",gid=%u", cifs_sb->mnt_gid);
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
 		seq_printf(s, ",forcegid");
 	else
@@ -434,11 +434,15 @@
 		seq_printf(s, ",noperm");
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
 		seq_printf(s, ",strictcache");
+	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
+		seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid);
+	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
+		seq_printf(s, ",backupgid=%u", cifs_sb->mnt_backupgid);
 
-	seq_printf(s, ",rsize=%d", cifs_sb->rsize);
-	seq_printf(s, ",wsize=%d", cifs_sb->wsize);
+	seq_printf(s, ",rsize=%u", cifs_sb->rsize);
+	seq_printf(s, ",wsize=%u", cifs_sb->wsize);
 	/* convert actimeo and display it in seconds */
-		seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
+	seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
 
 	return 0;
 }
@@ -695,7 +699,7 @@
 	 * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
 	 * the cached file length
 	 */
-	if (origin != SEEK_SET || origin != SEEK_CUR) {
+	if (origin != SEEK_SET && origin != SEEK_CUR) {
 		int rc;
 		struct inode *inode = file->f_path.dentry->d_inode;
 
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d1389bb..6536535 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -125,5 +125,5 @@
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "1.77"
+#define CIFS_VERSION   "1.78"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index f52c5ab..da2f544 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4844,8 +4844,12 @@
 		max_len = data_end - temp;
 		node->node_name = cifs_strndup_from_utf16(temp, max_len,
 						is_unicode, nls_codepage);
-		if (!node->node_name)
+		if (!node->node_name) {
 			rc = -ENOMEM;
+			goto parse_DFS_referrals_exit;
+		}
+
+		ref++;
 	}
 
 parse_DFS_referrals_exit:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f31dc9a..e0b56d7 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -164,7 +164,8 @@
 	{ Opt_sign, "sign" },
 	{ Opt_seal, "seal" },
 	{ Opt_direct, "direct" },
-	{ Opt_direct, "forceddirectio" },
+	{ Opt_direct, "directio" },
+	{ Opt_direct, "forcedirectio" },
 	{ Opt_strictcache, "strictcache" },
 	{ Opt_noac, "noac" },
 	{ Opt_fsc, "fsc" },
@@ -215,6 +216,8 @@
 
 	{ Opt_ignore, "cred" },
 	{ Opt_ignore, "credentials" },
+	{ Opt_ignore, "cred=%s" },
+	{ Opt_ignore, "credentials=%s" },
 	{ Opt_ignore, "guest" },
 	{ Opt_ignore, "rw" },
 	{ Opt_ignore, "ro" },
@@ -2183,6 +2186,7 @@
 	tcp_ses->session_estab = false;
 	tcp_ses->sequence_number = 0;
 	tcp_ses->lstrp = jiffies;
+	spin_lock_init(&tcp_ses->req_lock);
 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
 	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
@@ -3228,10 +3232,6 @@
 
 	cifs_sb->mnt_uid = pvolume_info->linux_uid;
 	cifs_sb->mnt_gid = pvolume_info->linux_gid;
-	if (pvolume_info->backupuid_specified)
-		cifs_sb->mnt_backupuid = pvolume_info->backupuid;
-	if (pvolume_info->backupgid_specified)
-		cifs_sb->mnt_backupgid = pvolume_info->backupgid;
 	cifs_sb->mnt_file_mode = pvolume_info->file_mode;
 	cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
 	cFYI(1, "file mode: 0x%hx  dir mode: 0x%hx",
@@ -3262,10 +3262,14 @@
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
 	if (pvolume_info->cifs_acl)
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
-	if (pvolume_info->backupuid_specified)
+	if (pvolume_info->backupuid_specified) {
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
-	if (pvolume_info->backupgid_specified)
+		cifs_sb->mnt_backupuid = pvolume_info->backupuid;
+	}
+	if (pvolume_info->backupgid_specified) {
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
+		cifs_sb->mnt_backupgid = pvolume_info->backupgid;
+	}
 	if (pvolume_info->override_uid)
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
 	if (pvolume_info->override_gid)
@@ -3614,22 +3618,6 @@
 	return volume_info;
 }
 
-/* make sure ra_pages is a multiple of rsize */
-static inline unsigned int
-cifs_ra_pages(struct cifs_sb_info *cifs_sb)
-{
-	unsigned int reads;
-	unsigned int rsize_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
-
-	if (rsize_pages >= default_backing_dev_info.ra_pages)
-		return default_backing_dev_info.ra_pages;
-	else if (rsize_pages == 0)
-		return rsize_pages;
-
-	reads = default_backing_dev_info.ra_pages / rsize_pages;
-	return reads * rsize_pages;
-}
-
 int
 cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
 {
@@ -3717,7 +3705,7 @@
 	cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
 
 	/* tune readahead according to rsize */
-	cifs_sb->bdi.ra_pages = cifs_ra_pages(cifs_sb);
+	cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
 
 remote_path_check:
 #ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d172c8e..ec4e9a2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -668,12 +668,19 @@
 			return 0;
 		else {
 			/*
-			 * Forcibly invalidate automounting directory inodes
-			 * (remote DFS directories) so to have them
-			 * instantiated again for automount
+			 * If the inode wasn't known to be a dfs entry when
+			 * the dentry was instantiated, such as when created
+			 * via ->readdir(), it needs to be set now since the
+			 * attributes will have been updated by
+			 * cifs_revalidate_dentry().
 			 */
-			if (IS_AUTOMOUNT(direntry->d_inode))
-				return 0;
+			if (IS_AUTOMOUNT(direntry->d_inode) &&
+			   !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
+				spin_lock(&direntry->d_lock);
+				direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
+				spin_unlock(&direntry->d_lock);
+			}
+
 			return 1;
 		}
 	}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fae765d..81725e9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2178,7 +2178,7 @@
 	unsigned long nr_pages, i;
 	size_t copied, len, cur_len;
 	ssize_t total_written = 0;
-	loff_t offset = *poffset;
+	loff_t offset;
 	struct iov_iter it;
 	struct cifsFileInfo *open_file;
 	struct cifs_tcon *tcon;
@@ -2200,6 +2200,7 @@
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 	open_file = file->private_data;
 	tcon = tlink_tcon(open_file->tlink);
+	offset = *poffset;
 
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
 		pid = open_file->pid;
diff --git a/fs/dcache.c b/fs/dcache.c
index b60ddc4..8c1ab8f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -141,18 +141,25 @@
  * Compare 2 name strings, return 0 if they match, otherwise non-zero.
  * The strings are both count bytes long, and count is non-zero.
  */
-static inline int dentry_cmp(const unsigned char *cs, size_t scount,
-				const unsigned char *ct, size_t tcount)
-{
 #ifdef CONFIG_DCACHE_WORD_ACCESS
-	unsigned long a,b,mask;
 
-	if (unlikely(scount != tcount))
-		return 1;
+#include <asm/word-at-a-time.h>
+/*
+ * NOTE! 'cs' and 'scount' come from a dentry, so it has a
+ * aligned allocation for this particular component. We don't
+ * strictly need the load_unaligned_zeropad() safety, but it
+ * doesn't hurt either.
+ *
+ * In contrast, 'ct' and 'tcount' can be from a pathname, and do
+ * need the careful unaligned handling.
+ */
+static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
+{
+	unsigned long a,b,mask;
 
 	for (;;) {
 		a = *(unsigned long *)cs;
-		b = *(unsigned long *)ct;
+		b = load_unaligned_zeropad(ct);
 		if (tcount < sizeof(unsigned long))
 			break;
 		if (unlikely(a != b))
@@ -165,10 +172,12 @@
 	}
 	mask = ~(~0ul << tcount*8);
 	return unlikely(!!((a ^ b) & mask));
-#else
-	if (scount != tcount)
-		return 1;
+}
 
+#else
+
+static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
+{
 	do {
 		if (*cs != *ct)
 			return 1;
@@ -177,7 +186,32 @@
 		tcount--;
 	} while (tcount);
 	return 0;
+}
+
 #endif
+
+static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
+{
+	const unsigned char *cs;
+	/*
+	 * Be careful about RCU walk racing with rename:
+	 * use ACCESS_ONCE to fetch the name pointer.
+	 *
+	 * NOTE! Even if a rename will mean that the length
+	 * was not loaded atomically, we don't care. The
+	 * RCU walk will check the sequence count eventually,
+	 * and catch it. And we won't overrun the buffer,
+	 * because we're reading the name pointer atomically,
+	 * and a dentry name is guaranteed to be properly
+	 * terminated with a NUL byte.
+	 *
+	 * End result: even if 'len' is wrong, we'll exit
+	 * early because the data cannot match (there can
+	 * be no NUL in the ct/tcount data)
+	 */
+	cs = ACCESS_ONCE(dentry->d_name.name);
+	smp_read_barrier_depends();
+	return dentry_string_cmp(cs, ct, tcount);
 }
 
 static void __d_free(struct rcu_head *head)
@@ -1240,6 +1274,13 @@
 	if (!dentry)
 		return NULL;
 
+	/*
+	 * We guarantee that the inline name is always NUL-terminated.
+	 * This way the memcpy() done by the name switching in rename
+	 * will still always have a NUL at the end, even if we might
+	 * be overwriting an internal NUL character
+	 */
+	dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
 	if (name->len > DNAME_INLINE_LEN-1) {
 		dname = kmalloc(name->len + 1, GFP_KERNEL);
 		if (!dname) {
@@ -1249,13 +1290,16 @@
 	} else  {
 		dname = dentry->d_iname;
 	}	
-	dentry->d_name.name = dname;
 
 	dentry->d_name.len = name->len;
 	dentry->d_name.hash = name->hash;
 	memcpy(dname, name->name, name->len);
 	dname[name->len] = 0;
 
+	/* Make sure we always see the terminating NUL character */
+	smp_wmb();
+	dentry->d_name.name = dname;
+
 	dentry->d_count = 1;
 	dentry->d_flags = 0;
 	spin_lock_init(&dentry->d_lock);
@@ -1421,18 +1465,18 @@
 	}
 
 	list_for_each_entry(alias, &inode->i_dentry, d_alias) {
-		struct qstr *qstr = &alias->d_name;
-
 		/*
 		 * Don't need alias->d_lock here, because aliases with
 		 * d_parent == entry->d_parent are not subject to name or
 		 * parent changes, because the parent inode i_mutex is held.
 		 */
-		if (qstr->hash != hash)
+		if (alias->d_name.hash != hash)
 			continue;
 		if (alias->d_parent != entry->d_parent)
 			continue;
-		if (dentry_cmp(qstr->name, qstr->len, name, len))
+		if (alias->d_name.len != len)
+			continue;
+		if (dentry_cmp(alias, name, len))
 			continue;
 		__dget(alias);
 		return alias;
@@ -1471,7 +1515,7 @@
 	struct dentry *res = NULL;
 
 	if (root_inode) {
-		static const struct qstr name = { .name = "/", .len = 1 };
+		static const struct qstr name = QSTR_INIT("/", 1);
 
 		res = __d_alloc(root_inode->i_sb, &name);
 		if (res)
@@ -1709,6 +1753,48 @@
 }
 EXPORT_SYMBOL(d_add_ci);
 
+/*
+ * Do the slow-case of the dentry name compare.
+ *
+ * Unlike the dentry_cmp() function, we need to atomically
+ * load the name, length and inode information, so that the
+ * filesystem can rely on them, and can use the 'name' and
+ * 'len' information without worrying about walking off the
+ * end of memory etc.
+ *
+ * Thus the read_seqcount_retry() and the "duplicate" info
+ * in arguments (the low-level filesystem should not look
+ * at the dentry inode or name contents directly, since
+ * rename can change them while we're in RCU mode).
+ */
+enum slow_d_compare {
+	D_COMP_OK,
+	D_COMP_NOMATCH,
+	D_COMP_SEQRETRY,
+};
+
+static noinline enum slow_d_compare slow_dentry_cmp(
+		const struct dentry *parent,
+		struct inode *inode,
+		struct dentry *dentry,
+		unsigned int seq,
+		const struct qstr *name)
+{
+	int tlen = dentry->d_name.len;
+	const char *tname = dentry->d_name.name;
+	struct inode *i = dentry->d_inode;
+
+	if (read_seqcount_retry(&dentry->d_seq, seq)) {
+		cpu_relax();
+		return D_COMP_SEQRETRY;
+	}
+	if (parent->d_op->d_compare(parent, inode,
+				dentry, i,
+				tlen, tname, name))
+		return D_COMP_NOMATCH;
+	return D_COMP_OK;
+}
+
 /**
  * __d_lookup_rcu - search for a dentry (racy, store-free)
  * @parent: parent dentry
@@ -1735,15 +1821,17 @@
  * the returned dentry, so long as its parent's seqlock is checked after the
  * child is looked up. Thus, an interlocking stepping of sequence lock checks
  * is formed, giving integrity down the path walk.
+ *
+ * NOTE! The caller *has* to check the resulting dentry against the sequence
+ * number we've returned before using any of the resulting dentry state!
  */
 struct dentry *__d_lookup_rcu(const struct dentry *parent,
 				const struct qstr *name,
-				unsigned *seqp, struct inode **inode)
+				unsigned *seqp, struct inode *inode)
 {
-	unsigned int len = name->len;
-	unsigned int hash = name->hash;
+	u64 hashlen = name->hash_len;
 	const unsigned char *str = name->name;
-	struct hlist_bl_head *b = d_hash(parent, hash);
+	struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
 	struct hlist_bl_node *node;
 	struct dentry *dentry;
 
@@ -1769,49 +1857,47 @@
 	 */
 	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
 		unsigned seq;
-		struct inode *i;
-		const char *tname;
-		int tlen;
-
-		if (dentry->d_name.hash != hash)
-			continue;
 
 seqretry:
-		seq = read_seqcount_begin(&dentry->d_seq);
+		/*
+		 * The dentry sequence count protects us from concurrent
+		 * renames, and thus protects inode, parent and name fields.
+		 *
+		 * The caller must perform a seqcount check in order
+		 * to do anything useful with the returned dentry,
+		 * including using the 'd_inode' pointer.
+		 *
+		 * NOTE! We do a "raw" seqcount_begin here. That means that
+		 * we don't wait for the sequence count to stabilize if it
+		 * is in the middle of a sequence change. If we do the slow
+		 * dentry compare, we will do seqretries until it is stable,
+		 * and if we end up with a successful lookup, we actually
+		 * want to exit RCU lookup anyway.
+		 */
+		seq = raw_seqcount_begin(&dentry->d_seq);
 		if (dentry->d_parent != parent)
 			continue;
 		if (d_unhashed(dentry))
 			continue;
-		tlen = dentry->d_name.len;
-		tname = dentry->d_name.name;
-		i = dentry->d_inode;
-		prefetch(tname);
-		/*
-		 * This seqcount check is required to ensure name and
-		 * len are loaded atomically, so as not to walk off the
-		 * edge of memory when walking. If we could load this
-		 * atomically some other way, we could drop this check.
-		 */
-		if (read_seqcount_retry(&dentry->d_seq, seq))
-			goto seqretry;
-		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
-			if (parent->d_op->d_compare(parent, *inode,
-						dentry, i,
-						tlen, tname, name))
-				continue;
-		} else {
-			if (dentry_cmp(tname, tlen, str, len))
-				continue;
-		}
-		/*
-		 * No extra seqcount check is required after the name
-		 * compare. The caller must perform a seqcount check in
-		 * order to do anything useful with the returned dentry
-		 * anyway.
-		 */
 		*seqp = seq;
-		*inode = i;
-		return dentry;
+
+		if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
+			if (dentry->d_name.hash != hashlen_hash(hashlen))
+				continue;
+			switch (slow_dentry_cmp(parent, inode, dentry, seq, name)) {
+			case D_COMP_OK:
+				return dentry;
+			case D_COMP_NOMATCH:
+				continue;
+			default:
+				goto seqretry;
+			}
+		}
+
+		if (dentry->d_name.hash_len != hashlen)
+			continue;
+		if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
+			return dentry;
 	}
 	return NULL;
 }
@@ -1890,8 +1976,6 @@
 	rcu_read_lock();
 	
 	hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
-		const char *tname;
-		int tlen;
 
 		if (dentry->d_name.hash != hash)
 			continue;
@@ -1906,15 +1990,17 @@
 		 * It is safe to compare names since d_move() cannot
 		 * change the qstr (protected by d_lock).
 		 */
-		tlen = dentry->d_name.len;
-		tname = dentry->d_name.name;
 		if (parent->d_flags & DCACHE_OP_COMPARE) {
+			int tlen = dentry->d_name.len;
+			const char *tname = dentry->d_name.name;
 			if (parent->d_op->d_compare(parent, parent->d_inode,
 						dentry, dentry->d_inode,
 						tlen, tname, name))
 				goto next;
 		} else {
-			if (dentry_cmp(tname, tlen, str, len))
+			if (dentry->d_name.len != len)
+				goto next;
+			if (dentry_cmp(dentry, str, len))
 				goto next;
 		}
 
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 739b098..c0b3c70 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1663,8 +1663,10 @@
 	if (op == EPOLL_CTL_ADD) {
 		if (is_file_epoll(tfile)) {
 			error = -ELOOP;
-			if (ep_loop_check(ep, tfile) != 0)
+			if (ep_loop_check(ep, tfile) != 0) {
+				clear_tfile_check_list();
 				goto error_tgt_fput;
+			}
 		} else
 			list_add(&tfile->f_tfile_llink, &tfile_check_list);
 	}
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index dffb865..f663a67 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -79,7 +79,7 @@
 
 struct dentry *ext2_get_parent(struct dentry *child)
 {
-	struct qstr dotdot = {.name = "..", .len = 2};
+	struct qstr dotdot = QSTR_INIT("..", 2);
 	unsigned long ino = ext2_inode_by_name(child->d_inode, &dotdot);
 	if (!ino)
 		return ERR_PTR(-ENOENT);
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index d7940b2..eeb63df 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1045,7 +1045,7 @@
 struct dentry *ext3_get_parent(struct dentry *child)
 {
 	unsigned long ino;
-	struct qstr dotdot = {.name = "..", .len = 2};
+	struct qstr dotdot = QSTR_INIT("..", 2);
 	struct ext3_dir_entry_2 * de;
 	struct buffer_head *bh;
 
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 349d7b3..e2a3f4b 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1052,10 +1052,7 @@
 struct dentry *ext4_get_parent(struct dentry *child)
 {
 	__u32 ino;
-	static const struct qstr dotdot = {
-		.name = "..",
-		.len = 2,
-	};
+	static const struct qstr dotdot = QSTR_INIT("..", 2);
 	struct ext4_dir_entry_2 * de;
 	struct buffer_head *bh;
 
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 230eb0f..bd4a589 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -73,12 +73,8 @@
 	int error = 0;
 
 	if (mode != inode->i_mode) {
-		struct iattr iattr;
-
-		iattr.ia_valid = ATTR_MODE;
-		iattr.ia_mode = mode;
-
-		error = gfs2_setattr_simple(inode, &iattr);
+		inode->i_mode = mode;
+		mark_inode_dirty(inode);
 	}
 
 	return error;
@@ -126,9 +122,7 @@
 		return PTR_ERR(acl);
 	if (!acl) {
 		mode &= ~current_umask();
-		if (mode != inode->i_mode)
-			error = gfs2_set_mode(inode, mode);
-		return error;
+		return gfs2_set_mode(inode, mode);
 	}
 
 	if (S_ISDIR(inode->i_mode)) {
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 9b2ff0e..e80a464 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -36,8 +36,8 @@
 #include "glops.h"
 
 
-void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
-			    unsigned int from, unsigned int to)
+static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
+				   unsigned int from, unsigned int to)
 {
 	struct buffer_head *head = page_buffers(page);
 	unsigned int bsize = head->b_size;
@@ -517,15 +517,14 @@
 /**
  * gfs2_internal_read - read an internal file
  * @ip: The gfs2 inode
- * @ra_state: The readahead state (or NULL for no readahead)
  * @buf: The buffer to fill
  * @pos: The file position
  * @size: The amount to read
  *
  */
 
-int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
-                       char *buf, loff_t *pos, unsigned size)
+int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
+                       unsigned size)
 {
 	struct address_space *mapping = ip->i_inode.i_mapping;
 	unsigned long index = *pos / PAGE_CACHE_SIZE;
@@ -943,8 +942,8 @@
 	clear_buffer_dirty(bh);
 	bd = bh->b_private;
 	if (bd) {
-		if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
-			list_del_init(&bd->bd_le.le_list);
+		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
+			list_del_init(&bd->bd_list);
 		else
 			gfs2_remove_from_journal(bh, current->journal_info, 0);
 	}
@@ -1084,10 +1083,9 @@
 		bd = bh->b_private;
 		if (bd) {
 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
-			gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
-			if (!list_empty(&bd->bd_le.le_list)) {
+			if (!list_empty(&bd->bd_list)) {
 				if (!buffer_pinned(bh))
-					list_del_init(&bd->bd_le.le_list);
+					list_del_init(&bd->bd_list);
 				else
 					bd = NULL;
 			}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 03c04fe..dab5409 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -324,7 +324,7 @@
 		if (!dblock)
 			return x + 1;
 
-		ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, 0, &mp->mp_bh[x+1]);
+		ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
 		if (ret)
 			return ret;
 	}
@@ -882,7 +882,7 @@
 		top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
 		bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
 	} else {
-		error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh);
+		error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
 		if (error)
 			return error;
 
@@ -1169,6 +1169,7 @@
 	struct buffer_head *dibh;
 	struct gfs2_qadata *qa = NULL;
 	int error;
+	int unstuff = 0;
 
 	if (gfs2_is_stuffed(ip) &&
 	    (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
@@ -1183,13 +1184,14 @@
 		error = gfs2_inplace_reserve(ip, 1);
 		if (error)
 			goto do_grow_qunlock;
+		unstuff = 1;
 	}
 
 	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
 	if (error)
 		goto do_grow_release;
 
-	if (qa) {
+	if (unstuff) {
 		error = gfs2_unstuff_dinode(ip, NULL);
 		if (error)
 			goto do_end_trans;
@@ -1208,7 +1210,7 @@
 do_end_trans:
 	gfs2_trans_end(sdp);
 do_grow_release:
-	if (qa) {
+	if (unstuff) {
 		gfs2_inplace_release(ip);
 do_grow_qunlock:
 		gfs2_quota_unlock(ip);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index a836056..8aaeb07 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -821,7 +821,7 @@
 	struct buffer_head *bh;
 	struct gfs2_leaf *leaf;
 	struct gfs2_dirent *dent;
-	struct qstr name = { .name = "", .len = 0, .hash = 0 };
+	struct qstr name = { .name = "" };
 
 	error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 	if (error)
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index a3d2c9e..31b199f 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -558,14 +558,14 @@
 }
 
 /**
- * gfs2_close - called to close a struct file
+ * gfs2_release - called to close a struct file
  * @inode: the inode the struct file belongs to
  * @file: the struct file being closed
  *
  * Returns: errno
  */
 
-static int gfs2_close(struct inode *inode, struct file *file)
+static int gfs2_release(struct inode *inode, struct file *file)
 {
 	struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
 	struct gfs2_file *fp;
@@ -1005,7 +1005,7 @@
 	.unlocked_ioctl	= gfs2_ioctl,
 	.mmap		= gfs2_mmap,
 	.open		= gfs2_open,
-	.release	= gfs2_close,
+	.release	= gfs2_release,
 	.fsync		= gfs2_fsync,
 	.lock		= gfs2_lock,
 	.flock		= gfs2_flock,
@@ -1019,7 +1019,7 @@
 	.readdir	= gfs2_readdir,
 	.unlocked_ioctl	= gfs2_ioctl,
 	.open		= gfs2_open,
-	.release	= gfs2_close,
+	.release	= gfs2_release,
 	.fsync		= gfs2_fsync,
 	.lock		= gfs2_lock,
 	.flock		= gfs2_flock,
@@ -1037,7 +1037,7 @@
 	.unlocked_ioctl	= gfs2_ioctl,
 	.mmap		= gfs2_mmap,
 	.open		= gfs2_open,
-	.release	= gfs2_close,
+	.release	= gfs2_release,
 	.fsync		= gfs2_fsync,
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
@@ -1049,7 +1049,7 @@
 	.readdir	= gfs2_readdir,
 	.unlocked_ioctl	= gfs2_ioctl,
 	.open		= gfs2_open,
-	.release	= gfs2_close,
+	.release	= gfs2_release,
 	.fsync		= gfs2_fsync,
 	.llseek		= default_llseek,
 };
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 1656df7a..4bdcf37 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -94,7 +94,6 @@
 	/* A shortened, inline version of gfs2_trans_begin() */
 	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
 	tr.tr_ip = (unsigned long)__builtin_return_address(0);
-	INIT_LIST_HEAD(&tr.tr_list_buf);
 	gfs2_log_reserve(sdp, tr.tr_reserved);
 	BUG_ON(current->journal_info);
 	current->journal_info = &tr;
@@ -379,11 +378,6 @@
 	if (error)
 		return error;
 
-	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
-		brelse(dibh);
-		return -EIO;
-	}
-
 	error = gfs2_dinode_in(ip, dibh->b_data);
 	brelse(dibh);
 	clear_bit(GIF_INVALID, &ip->i_flags);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 47d0bda..aa9949e 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -26,7 +26,7 @@
 #define DIO_METADATA	0x00000020
 
 struct gfs2_log_operations;
-struct gfs2_log_element;
+struct gfs2_bufdata;
 struct gfs2_holder;
 struct gfs2_glock;
 struct gfs2_quota_data;
@@ -52,7 +52,7 @@
  */
 
 struct gfs2_log_operations {
-	void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le);
+	void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
 	void (*lo_before_commit) (struct gfs2_sbd *sdp);
 	void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
 	void (*lo_before_scan) (struct gfs2_jdesc *jd,
@@ -64,11 +64,6 @@
 	const char *lo_name;
 };
 
-struct gfs2_log_element {
-	struct list_head le_list;
-	const struct gfs2_log_operations *le_ops;
-};
-
 #define GBF_FULL 1
 
 struct gfs2_bitmap {
@@ -118,15 +113,10 @@
 struct gfs2_bufdata {
 	struct buffer_head *bd_bh;
 	struct gfs2_glock *bd_gl;
+	u64 bd_blkno;
 
-	union {
-		struct list_head list_tr;
-		u64 blkno;
-	} u;
-#define bd_list_tr u.list_tr
-#define bd_blkno u.blkno
-
-	struct gfs2_log_element bd_le;
+	struct list_head bd_list;
+	const struct gfs2_log_operations *bd_ops;
 
 	struct gfs2_ail *bd_ail;
 	struct list_head bd_ail_st_list;
@@ -411,13 +401,10 @@
 
 	int tr_touched;
 
-	unsigned int tr_num_buf;
 	unsigned int tr_num_buf_new;
 	unsigned int tr_num_databuf_new;
 	unsigned int tr_num_buf_rm;
 	unsigned int tr_num_databuf_rm;
-	struct list_head tr_list_buf;
-
 	unsigned int tr_num_revoke;
 	unsigned int tr_num_revoke_rm;
 };
@@ -699,7 +686,6 @@
 
 	struct list_head sd_log_le_buf;
 	struct list_head sd_log_le_revoke;
-	struct list_head sd_log_le_rg;
 	struct list_head sd_log_le_databuf;
 	struct list_head sd_log_le_ordered;
 
@@ -716,7 +702,9 @@
 
 	struct rw_semaphore sd_log_flush_lock;
 	atomic_t sd_log_in_flight;
+	struct bio *sd_log_bio;
 	wait_queue_head_t sd_log_flush_wait;
+	int sd_log_error;
 
 	unsigned int sd_log_flush_head;
 	u64 sd_log_flush_wrapped;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 276e7b5..c53c747 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -17,10 +17,7 @@
 
 extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
 extern int gfs2_internal_read(struct gfs2_inode *ip,
-			      struct file_ra_state *ra_state,
 			      char *buf, loff_t *pos, unsigned size);
-extern void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
-				   unsigned int from, unsigned int to);
 extern void gfs2_set_aops(struct inode *inode);
 
 static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 4752ead..f4beeb9 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -32,8 +32,6 @@
 #include "dir.h"
 #include "trace_gfs2.h"
 
-#define PULL 1
-
 /**
  * gfs2_struct2blk - compute stuff
  * @sdp: the filesystem
@@ -359,18 +357,6 @@
 	return 0;
 }
 
-u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
-{
-	struct gfs2_journal_extent *je;
-
-	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
-		if (lbn >= je->lblock && lbn < je->lblock + je->blocks)
-			return je->dblock + lbn - je->lblock;
-	}
-
-	return -1;
-}
-
 /**
  * log_distance - Compute distance between two journal blocks
  * @sdp: The GFS2 superblock
@@ -466,17 +452,6 @@
 	return tail;
 }
 
-void gfs2_log_incr_head(struct gfs2_sbd *sdp)
-{
-	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
-	       (sdp->sd_log_flush_head != sdp->sd_log_head));
-
-	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
-		sdp->sd_log_flush_head = 0;
-		sdp->sd_log_flush_wrapped = 1;
-	}
-}
-
 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
 {
 	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
@@ -511,8 +486,8 @@
 {
 	struct gfs2_bufdata *bda, *bdb;
 
-	bda = list_entry(a, struct gfs2_bufdata, bd_le.le_list);
-	bdb = list_entry(b, struct gfs2_bufdata, bd_le.le_list);
+	bda = list_entry(a, struct gfs2_bufdata, bd_list);
+	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
 
 	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
 		return -1;
@@ -530,8 +505,8 @@
 	gfs2_log_lock(sdp);
 	list_sort(NULL, &sdp->sd_log_le_ordered, &bd_cmp);
 	while (!list_empty(&sdp->sd_log_le_ordered)) {
-		bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_le.le_list);
-		list_move(&bd->bd_le.le_list, &written);
+		bd = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_bufdata, bd_list);
+		list_move(&bd->bd_list, &written);
 		bh = bd->bd_bh;
 		if (!buffer_dirty(bh))
 			continue;
@@ -558,7 +533,7 @@
 
 	gfs2_log_lock(sdp);
 	while (!list_empty(&sdp->sd_log_le_ordered)) {
-		bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_le.le_list);
+		bd = list_entry(sdp->sd_log_le_ordered.prev, struct gfs2_bufdata, bd_list);
 		bh = bd->bd_bh;
 		if (buffer_locked(bh)) {
 			get_bh(bh);
@@ -568,7 +543,7 @@
 			gfs2_log_lock(sdp);
 			continue;
 		}
-		list_del_init(&bd->bd_le.le_list);
+		list_del_init(&bd->bd_list);
 	}
 	gfs2_log_unlock(sdp);
 }
@@ -580,25 +555,19 @@
  * Returns: the initialized log buffer descriptor
  */
 
-static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
+static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 {
-	u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head);
-	struct buffer_head *bh;
 	struct gfs2_log_header *lh;
 	unsigned int tail;
 	u32 hash;
-
-	bh = sb_getblk(sdp->sd_vfs, blkno);
-	lock_buffer(bh);
-	memset(bh->b_data, 0, bh->b_size);
-	set_buffer_uptodate(bh);
-	clear_buffer_dirty(bh);
+	int rw = WRITE_FLUSH_FUA | REQ_META;
+	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
+	lh = page_address(page);
+	clear_page(lh);
 
 	gfs2_ail1_empty(sdp);
 	tail = current_tail(sdp);
 
-	lh = (struct gfs2_log_header *)bh->b_data;
-	memset(lh, 0, sizeof(struct gfs2_log_header));
 	lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
 	lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
 	lh->lh_header.__pad0 = cpu_to_be64(0);
@@ -608,31 +577,22 @@
 	lh->lh_flags = cpu_to_be32(flags);
 	lh->lh_tail = cpu_to_be32(tail);
 	lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
-	hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
+	hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
 	lh->lh_hash = cpu_to_be32(hash);
 
-	bh->b_end_io = end_buffer_write_sync;
-	get_bh(bh);
 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
 		gfs2_ordered_wait(sdp);
 		log_flush_wait(sdp);
-		submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
-	} else {
-		submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
+		rw = WRITE_SYNC | REQ_META | REQ_PRIO;
 	}
-	wait_on_buffer(bh);
 
-	if (!buffer_uptodate(bh))
-		gfs2_io_error_bh(sdp, bh);
-	brelse(bh);
+	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
+	gfs2_log_write_page(sdp, page);
+	gfs2_log_flush_bio(sdp, rw);
+	log_flush_wait(sdp);
 
 	if (sdp->sd_log_tail != tail)
 		log_pull_tail(sdp, tail);
-	else
-		gfs2_assert_withdraw(sdp, !pull);
-
-	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
-	gfs2_log_incr_head(sdp);
 }
 
 /**
@@ -678,15 +638,14 @@
 
 	gfs2_ordered_write(sdp);
 	lops_before_commit(sdp);
+	gfs2_log_flush_bio(sdp, WRITE);
 
 	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
-		log_write_header(sdp, 0, 0);
+		log_write_header(sdp, 0);
 	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
-		gfs2_log_lock(sdp);
 		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
 		trace_gfs2_log_blocks(sdp, -1);
-		gfs2_log_unlock(sdp);
-		log_write_header(sdp, 0, PULL);
+		log_write_header(sdp, 0);
 	}
 	lops_after_commit(sdp, ai);
 
@@ -735,21 +694,6 @@
 	gfs2_log_unlock(sdp);
 }
 
-static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
-{
-	struct list_head *head = &tr->tr_list_buf;
-	struct gfs2_bufdata *bd;
-
-	gfs2_log_lock(sdp);
-	while (!list_empty(head)) {
-		bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
-		list_del_init(&bd->bd_list_tr);
-		tr->tr_num_buf--;
-	}
-	gfs2_log_unlock(sdp);
-	gfs2_assert_warn(sdp, !tr->tr_num_buf);
-}
-
 /**
  * gfs2_log_commit - Commit a transaction to the log
  * @sdp: the filesystem
@@ -768,8 +712,6 @@
 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
 	log_refund(sdp, tr);
-	buf_lo_incore_commit(sdp, tr);
-
 	up_read(&sdp->sd_log_flush_lock);
 
 	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
@@ -798,8 +740,7 @@
 	sdp->sd_log_flush_head = sdp->sd_log_head;
 	sdp->sd_log_flush_wrapped = 0;
 
-	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT,
-			 (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL);
+	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
 
 	gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
 	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
@@ -854,11 +795,9 @@
 	struct gfs2_sbd *sdp = data;
 	unsigned long t = 1;
 	DEFINE_WAIT(wait);
-	unsigned preflush;
 
 	while (!kthread_should_stop()) {
 
-		preflush = atomic_read(&sdp->sd_log_pinned);
 		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
 			gfs2_ail1_empty(sdp);
 			gfs2_log_flush(sdp, NULL);
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index ff07454..3fd5215 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -52,8 +52,6 @@
 			    unsigned int ssize);
 
 extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
-extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
-extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp, unsigned int lbn);
 extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
 extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
 extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 6b1efb5..852c1be 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -127,146 +127,277 @@
 	atomic_dec(&sdp->sd_log_pinned);
 }
 
-
-static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
+static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
 {
-	return (struct gfs2_log_descriptor *)bh->b_data;
+	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
+	       (sdp->sd_log_flush_head != sdp->sd_log_head));
+
+	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
+		sdp->sd_log_flush_head = 0;
+		sdp->sd_log_flush_wrapped = 1;
+	}
 }
 
-static inline __be64 *bh_log_ptr(struct buffer_head *bh)
+static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
 {
-	struct gfs2_log_descriptor *ld = bh_log_desc(bh);
-	return (__force __be64 *)(ld + 1);
-}
+	unsigned int lbn = sdp->sd_log_flush_head;
+	struct gfs2_journal_extent *je;
+	u64 block;
 
-static inline __be64 *bh_ptr_end(struct buffer_head *bh)
-{
-	return (__force __be64 *)(bh->b_data + bh->b_size);
+	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
+		if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
+			block = je->dblock + lbn - je->lblock;
+			gfs2_log_incr_head(sdp);
+			return block;
+		}
+	}
+
+	return -1;
 }
 
 /**
- * gfs2_log_write_endio - End of I/O for a log buffer
- * @bh: The buffer head
- * @uptodate: I/O Status
+ * gfs2_end_log_write_bh - end log write of pagecache data with buffers
+ * @sdp: The superblock
+ * @bvec: The bio_vec
+ * @error: The i/o status
+ *
+ * This finds the relavent buffers and unlocks then and sets the
+ * error flag according to the status of the i/o request. This is
+ * used when the log is writing data which has an in-place version
+ * that is pinned in the pagecache.
+ */
+
+static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
+				  int error)
+{
+	struct buffer_head *bh, *next;
+	struct page *page = bvec->bv_page;
+	unsigned size;
+
+	bh = page_buffers(page);
+	size = bvec->bv_len;
+	while (bh_offset(bh) < bvec->bv_offset)
+		bh = bh->b_this_page;
+	do {
+		if (error)
+			set_buffer_write_io_error(bh);
+		unlock_buffer(bh);
+		next = bh->b_this_page;
+		size -= bh->b_size;
+		brelse(bh);
+		bh = next;
+	} while(bh && size);
+}
+
+/**
+ * gfs2_end_log_write - end of i/o to the log
+ * @bio: The bio
+ * @error: Status of i/o request
+ *
+ * Each bio_vec contains either data from the pagecache or data
+ * relating to the log itself. Here we iterate over the bio_vec
+ * array, processing both kinds of data.
  *
  */
 
-static void gfs2_log_write_endio(struct buffer_head *bh, int uptodate)
+static void gfs2_end_log_write(struct bio *bio, int error)
 {
-	struct gfs2_sbd *sdp = bh->b_private;
-	bh->b_private = NULL;
+	struct gfs2_sbd *sdp = bio->bi_private;
+	struct bio_vec *bvec;
+	struct page *page;
+	int i;
 
-	end_buffer_write_sync(bh, uptodate);
+	if (error) {
+		sdp->sd_log_error = error;
+		fs_err(sdp, "Error %d writing to log\n", error);
+	}
+
+	bio_for_each_segment(bvec, bio, i) {
+		page = bvec->bv_page;
+		if (page_has_buffers(page))
+			gfs2_end_log_write_bh(sdp, bvec, error);
+		else
+			mempool_free(page, gfs2_page_pool);
+	}
+
+	bio_put(bio);
 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
 		wake_up(&sdp->sd_log_flush_wait);
 }
 
 /**
- * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
- * @sdp: The GFS2 superblock
+ * gfs2_log_flush_bio - Submit any pending log bio
+ * @sdp: The superblock
+ * @rw: The rw flags
  *
- * tReturns: the buffer_head
+ * Submit any pending part-built or full bio to the block device. If
+ * there is no pending bio, then this is a no-op.
  */
 
-static struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
+void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
 {
-	u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head);
-	struct buffer_head *bh;
-
-	bh = sb_getblk(sdp->sd_vfs, blkno);
-	lock_buffer(bh);
-	memset(bh->b_data, 0, bh->b_size);
-	set_buffer_uptodate(bh);
-	clear_buffer_dirty(bh);
-	gfs2_log_incr_head(sdp);
-	atomic_inc(&sdp->sd_log_in_flight);
-	bh->b_private = sdp;
-	bh->b_end_io = gfs2_log_write_endio;
-
-	return bh;
+	if (sdp->sd_log_bio) {
+		atomic_inc(&sdp->sd_log_in_flight);
+		submit_bio(rw, sdp->sd_log_bio);
+		sdp->sd_log_bio = NULL;
+	}
 }
 
 /**
- * gfs2_fake_write_endio - 
- * @bh: The buffer head
- * @uptodate: The I/O Status
+ * gfs2_log_alloc_bio - Allocate a new bio for log writing
+ * @sdp: The superblock
+ * @blkno: The next device block number we want to write to
  *
+ * This should never be called when there is a cached bio in the
+ * super block. When it returns, there will be a cached bio in the
+ * super block which will have as many bio_vecs as the device is
+ * happy to handle.
+ *
+ * Returns: Newly allocated bio
  */
 
-static void gfs2_fake_write_endio(struct buffer_head *bh, int uptodate)
+static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
 {
-	struct buffer_head *real_bh = bh->b_private;
-	struct gfs2_bufdata *bd = real_bh->b_private;
-	struct gfs2_sbd *sdp = bd->bd_gl->gl_sbd;
+	struct super_block *sb = sdp->sd_vfs;
+	unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
+	struct bio *bio;
 
-	end_buffer_write_sync(bh, uptodate);
-	mempool_free(bh, gfs2_bh_pool);
-	unlock_buffer(real_bh);
-	brelse(real_bh);
-	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
-		wake_up(&sdp->sd_log_flush_wait);
+	BUG_ON(sdp->sd_log_bio);
+
+	while (1) {
+		bio = bio_alloc(GFP_NOIO, nrvecs);
+		if (likely(bio))
+			break;
+		nrvecs = max(nrvecs/2, 1U);
+	}
+
+	bio->bi_sector = blkno * (sb->s_blocksize >> 9);
+	bio->bi_bdev = sb->s_bdev;
+	bio->bi_end_io = gfs2_end_log_write;
+	bio->bi_private = sdp;
+
+	sdp->sd_log_bio = bio;
+
+	return bio;
 }
 
 /**
- * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
+ * gfs2_log_get_bio - Get cached log bio, or allocate a new one
+ * @sdp: The superblock
+ * @blkno: The device block number we want to write to
+ *
+ * If there is a cached bio, then if the next block number is sequential
+ * with the previous one, return it, otherwise flush the bio to the
+ * device. If there is not a cached bio, or we just flushed it, then
+ * allocate a new one.
+ *
+ * Returns: The bio to use for log writes
+ */
+
+static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
+{
+	struct bio *bio = sdp->sd_log_bio;
+	u64 nblk;
+
+	if (bio) {
+		nblk = bio->bi_sector + bio_sectors(bio);
+		nblk >>= sdp->sd_fsb2bb_shift;
+		if (blkno == nblk)
+			return bio;
+		gfs2_log_flush_bio(sdp, WRITE);
+	}
+
+	return gfs2_log_alloc_bio(sdp, blkno);
+}
+
+
+/**
+ * gfs2_log_write - write to log
  * @sdp: the filesystem
- * @data: the data the buffer_head should point to
+ * @page: the page to write
+ * @size: the size of the data to write
+ * @offset: the offset within the page 
  *
- * Returns: the log buffer descriptor
+ * Try and add the page segment to the current bio. If that fails,
+ * submit the current bio to the device and create a new one, and
+ * then add the page segment to that.
  */
 
-static struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
-				      struct buffer_head *real)
+static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
+			   unsigned size, unsigned offset)
 {
-	u64 blkno = gfs2_log_bmap(sdp, sdp->sd_log_flush_head);
-	struct buffer_head *bh;
+	u64 blkno = gfs2_log_bmap(sdp);
+	struct bio *bio;
+	int ret;
 
-	bh = mempool_alloc(gfs2_bh_pool, GFP_NOFS);
-	atomic_set(&bh->b_count, 1);
-	bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate) | (1 << BH_Lock);
-	set_bh_page(bh, real->b_page, bh_offset(real));
-	bh->b_blocknr = blkno;
-	bh->b_size = sdp->sd_sb.sb_bsize;
-	bh->b_bdev = sdp->sd_vfs->s_bdev;
-	bh->b_private = real;
-	bh->b_end_io = gfs2_fake_write_endio;
-
-	gfs2_log_incr_head(sdp);
-	atomic_inc(&sdp->sd_log_in_flight);
-
-	return bh;
+	bio = gfs2_log_get_bio(sdp, blkno);
+	ret = bio_add_page(bio, page, size, offset);
+	if (ret == 0) {
+		gfs2_log_flush_bio(sdp, WRITE);
+		bio = gfs2_log_alloc_bio(sdp, blkno);
+		ret = bio_add_page(bio, page, size, offset);
+		WARN_ON(ret == 0);
+	}
 }
 
-static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
+/**
+ * gfs2_log_write_bh - write a buffer's content to the log
+ * @sdp: The super block
+ * @bh: The buffer pointing to the in-place location
+ * 
+ * This writes the content of the buffer to the next available location
+ * in the log. The buffer will be unlocked once the i/o to the log has
+ * completed.
+ */
+
+static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
 {
-	struct buffer_head *bh = gfs2_log_get_buf(sdp);
-	struct gfs2_log_descriptor *ld = bh_log_desc(bh);
+	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
+}
+
+/**
+ * gfs2_log_write_page - write one block stored in a page, into the log
+ * @sdp: The superblock
+ * @page: The struct page
+ *
+ * This writes the first block-sized part of the page into the log. Note
+ * that the page must have been allocated from the gfs2_page_pool mempool
+ * and that after this has been called, ownership has been transferred and
+ * the page may be freed at any time.
+ */
+
+void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
+{
+	struct super_block *sb = sdp->sd_vfs;
+	gfs2_log_write(sdp, page, sb->s_blocksize, 0);
+}
+
+static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
+				      u32 ld_length, u32 ld_data1)
+{
+	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
+	struct gfs2_log_descriptor *ld = page_address(page);
+	clear_page(ld);
 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
 	ld->ld_type = cpu_to_be32(ld_type);
-	ld->ld_length = 0;
-	ld->ld_data1 = 0;
+	ld->ld_length = cpu_to_be32(ld_length);
+	ld->ld_data1 = cpu_to_be32(ld_data1);
 	ld->ld_data2 = 0;
-	memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
-	return bh;
+	return page;
 }
 
-static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 {
-	struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
 	struct gfs2_meta_header *mh;
 	struct gfs2_trans *tr;
 
 	lock_buffer(bd->bd_bh);
 	gfs2_log_lock(sdp);
-	if (!list_empty(&bd->bd_list_tr))
-		goto out;
 	tr = current->journal_info;
 	tr->tr_touched = 1;
-	tr->tr_num_buf++;
-	list_add(&bd->bd_list_tr, &tr->tr_list_buf);
-	if (!list_empty(&le->le_list))
+	if (!list_empty(&bd->bd_list))
 		goto out;
 	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
 	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
@@ -276,62 +407,86 @@
 	mh->__pad0 = cpu_to_be64(0);
 	mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
 	sdp->sd_log_num_buf++;
-	list_add(&le->le_list, &sdp->sd_log_le_buf);
+	list_add(&bd->bd_list, &sdp->sd_log_le_buf);
 	tr->tr_num_buf_new++;
 out:
 	gfs2_log_unlock(sdp);
 	unlock_buffer(bd->bd_bh);
 }
 
-static void buf_lo_before_commit(struct gfs2_sbd *sdp)
+static void gfs2_check_magic(struct buffer_head *bh)
 {
-	struct buffer_head *bh;
+	void *kaddr;
+	__be32 *ptr;
+
+	clear_buffer_escaped(bh);
+	kaddr = kmap_atomic(bh->b_page);
+	ptr = kaddr + bh_offset(bh);
+	if (*ptr == cpu_to_be32(GFS2_MAGIC))
+		set_buffer_escaped(bh);
+	kunmap_atomic(kaddr);
+}
+
+static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
+				unsigned int total, struct list_head *blist,
+				bool is_databuf)
+{
 	struct gfs2_log_descriptor *ld;
 	struct gfs2_bufdata *bd1 = NULL, *bd2;
-	unsigned int total;
-	unsigned int limit;
+	struct page *page;
 	unsigned int num;
 	unsigned n;
 	__be64 *ptr;
 
-	limit = buf_limit(sdp);
-	/* for 4k blocks, limit = 503 */
-
 	gfs2_log_lock(sdp);
-	total = sdp->sd_log_num_buf;
-	bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
+	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
 	while(total) {
 		num = total;
 		if (total > limit)
 			num = limit;
 		gfs2_log_unlock(sdp);
-		bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
+		page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
+		ld = page_address(page);
 		gfs2_log_lock(sdp);
-		ld = bh_log_desc(bh);
-		ptr = bh_log_ptr(bh);
-		ld->ld_length = cpu_to_be32(num + 1);
-		ld->ld_data1 = cpu_to_be32(num);
+		ptr = (__be64 *)(ld + 1);
 
 		n = 0;
-		list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
-					     bd_le.le_list) {
+		list_for_each_entry_continue(bd1, blist, bd_list) {
 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
+			if (is_databuf) {
+				gfs2_check_magic(bd1->bd_bh);
+				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
+			}
 			if (++n >= num)
 				break;
 		}
 
 		gfs2_log_unlock(sdp);
-		submit_bh(WRITE_SYNC, bh);
+		gfs2_log_write_page(sdp, page);
 		gfs2_log_lock(sdp);
 
 		n = 0;
-		list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
-					     bd_le.le_list) {
+		list_for_each_entry_continue(bd2, blist, bd_list) {
 			get_bh(bd2->bd_bh);
 			gfs2_log_unlock(sdp);
 			lock_buffer(bd2->bd_bh);
-			bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
-			submit_bh(WRITE_SYNC, bh);
+
+			if (buffer_escaped(bd2->bd_bh)) {
+				void *kaddr;
+				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
+				ptr = page_address(page);
+				kaddr = kmap_atomic(bd2->bd_bh->b_page);
+				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
+				       bd2->bd_bh->b_size);
+				kunmap_atomic(kaddr);
+				*(__be32 *)ptr = 0;
+				clear_buffer_escaped(bd2->bd_bh);
+				unlock_buffer(bd2->bd_bh);
+				brelse(bd2->bd_bh);
+				gfs2_log_write_page(sdp, page);
+			} else {
+				gfs2_log_write_bh(sdp, bd2->bd_bh);
+			}
 			gfs2_log_lock(sdp);
 			if (++n >= num)
 				break;
@@ -343,14 +498,22 @@
 	gfs2_log_unlock(sdp);
 }
 
+static void buf_lo_before_commit(struct gfs2_sbd *sdp)
+{
+	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
+
+	gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
+			   &sdp->sd_log_le_buf, 0);
+}
+
 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
 {
 	struct list_head *head = &sdp->sd_log_le_buf;
 	struct gfs2_bufdata *bd;
 
 	while (!list_empty(head)) {
-		bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
-		list_del_init(&bd->bd_le.le_list);
+		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
+		list_del_init(&bd->bd_list);
 		sdp->sd_log_num_buf--;
 
 		gfs2_unpin(sdp, bd->bd_bh, ai);
@@ -437,9 +600,8 @@
 	        jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
 }
 
-static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 {
-	struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
 	struct gfs2_glock *gl = bd->bd_gl;
 	struct gfs2_trans *tr;
 
@@ -449,48 +611,48 @@
 	sdp->sd_log_num_revoke++;
 	atomic_inc(&gl->gl_revokes);
 	set_bit(GLF_LFLUSH, &gl->gl_flags);
-	list_add(&le->le_list, &sdp->sd_log_le_revoke);
+	list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
 }
 
 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
 {
 	struct gfs2_log_descriptor *ld;
 	struct gfs2_meta_header *mh;
-	struct buffer_head *bh;
 	unsigned int offset;
 	struct list_head *head = &sdp->sd_log_le_revoke;
 	struct gfs2_bufdata *bd;
+	struct page *page;
+	unsigned int length;
 
 	if (!sdp->sd_log_num_revoke)
 		return;
 
-	bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
-	ld = bh_log_desc(bh);
-	ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
-						    sizeof(u64)));
-	ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
+	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
+	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
+	ld = page_address(page);
 	offset = sizeof(struct gfs2_log_descriptor);
 
-	list_for_each_entry(bd, head, bd_le.le_list) {
+	list_for_each_entry(bd, head, bd_list) {
 		sdp->sd_log_num_revoke--;
 
 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
-			submit_bh(WRITE_SYNC, bh);
 
-			bh = gfs2_log_get_buf(sdp);
-			mh = (struct gfs2_meta_header *)bh->b_data;
+			gfs2_log_write_page(sdp, page);
+			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
+			mh = page_address(page);
+			clear_page(mh);
 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
 			offset = sizeof(struct gfs2_meta_header);
 		}
 
-		*(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
+		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
 		offset += sizeof(u64);
 	}
 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
 
-	submit_bh(WRITE_SYNC, bh);
+	gfs2_log_write_page(sdp, page);
 }
 
 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
@@ -500,8 +662,8 @@
 	struct gfs2_glock *gl;
 
 	while (!list_empty(head)) {
-		bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
-		list_del_init(&bd->bd_le.le_list);
+		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
+		list_del_init(&bd->bd_list);
 		gl = bd->bd_gl;
 		atomic_dec(&gl->gl_revokes);
 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
@@ -604,108 +766,33 @@
  *    blocks, which isn't an enormous overhead but twice as much as
  *    for normal metadata blocks.
  */
-static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 {
-	struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
 	struct gfs2_trans *tr = current->journal_info;
 	struct address_space *mapping = bd->bd_bh->b_page->mapping;
 	struct gfs2_inode *ip = GFS2_I(mapping->host);
 
 	lock_buffer(bd->bd_bh);
 	gfs2_log_lock(sdp);
-	if (tr) {
-		if (!list_empty(&bd->bd_list_tr))
-			goto out;
+	if (tr)
 		tr->tr_touched = 1;
-		if (gfs2_is_jdata(ip)) {
-			tr->tr_num_buf++;
-			list_add(&bd->bd_list_tr, &tr->tr_list_buf);
-		}
-	}
-	if (!list_empty(&le->le_list))
+	if (!list_empty(&bd->bd_list))
 		goto out;
-
 	set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
 	set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
 	if (gfs2_is_jdata(ip)) {
 		gfs2_pin(sdp, bd->bd_bh);
 		tr->tr_num_databuf_new++;
 		sdp->sd_log_num_databuf++;
-		list_add_tail(&le->le_list, &sdp->sd_log_le_databuf);
+		list_add_tail(&bd->bd_list, &sdp->sd_log_le_databuf);
 	} else {
-		list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
+		list_add_tail(&bd->bd_list, &sdp->sd_log_le_ordered);
 	}
 out:
 	gfs2_log_unlock(sdp);
 	unlock_buffer(bd->bd_bh);
 }
 
-static void gfs2_check_magic(struct buffer_head *bh)
-{
-	void *kaddr;
-	__be32 *ptr;
-
-	clear_buffer_escaped(bh);
-	kaddr = kmap_atomic(bh->b_page);
-	ptr = kaddr + bh_offset(bh);
-	if (*ptr == cpu_to_be32(GFS2_MAGIC))
-		set_buffer_escaped(bh);
-	kunmap_atomic(kaddr);
-}
-
-static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
-			      struct list_head *list, struct list_head *done,
-			      unsigned int n)
-{
-	struct buffer_head *bh1;
-	struct gfs2_log_descriptor *ld;
-	struct gfs2_bufdata *bd;
-	__be64 *ptr;
-
-	if (!bh)
-		return;
-
-	ld = bh_log_desc(bh);
-	ld->ld_length = cpu_to_be32(n + 1);
-	ld->ld_data1 = cpu_to_be32(n);
-
-	ptr = bh_log_ptr(bh);
-	
-	get_bh(bh);
-	submit_bh(WRITE_SYNC, bh);
-	gfs2_log_lock(sdp);
-	while(!list_empty(list)) {
-		bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
-		list_move_tail(&bd->bd_le.le_list, done);
-		get_bh(bd->bd_bh);
-		while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
-			gfs2_log_incr_head(sdp);
-			ptr += 2;
-		}
-		gfs2_log_unlock(sdp);
-		lock_buffer(bd->bd_bh);
-		if (buffer_escaped(bd->bd_bh)) {
-			void *kaddr;
-			bh1 = gfs2_log_get_buf(sdp);
-			kaddr = kmap_atomic(bd->bd_bh->b_page);
-			memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
-			       bh1->b_size);
-			kunmap_atomic(kaddr);
-			*(__be32 *)bh1->b_data = 0;
-			clear_buffer_escaped(bd->bd_bh);
-			unlock_buffer(bd->bd_bh);
-			brelse(bd->bd_bh);
-		} else {
-			bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
-		}
-		submit_bh(WRITE_SYNC, bh1);
-		gfs2_log_lock(sdp);
-		ptr += 2;
-	}
-	gfs2_log_unlock(sdp);
-	brelse(bh);
-}
-
 /**
  * databuf_lo_before_commit - Scan the data buffers, writing as we go
  *
@@ -713,37 +800,10 @@
 
 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
 {
-	struct gfs2_bufdata *bd = NULL;
-	struct buffer_head *bh = NULL;
-	unsigned int n = 0;
-	__be64 *ptr = NULL, *end = NULL;
-	LIST_HEAD(processed);
-	LIST_HEAD(in_progress);
+	unsigned int limit = buf_limit(sdp) / 2;
 
-	gfs2_log_lock(sdp);
-	while (!list_empty(&sdp->sd_log_le_databuf)) {
-		if (ptr == end) {
-			gfs2_log_unlock(sdp);
-			gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
-			n = 0;
-			bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
-			ptr = bh_log_ptr(bh);
-			end = bh_ptr_end(bh) - 1;
-			gfs2_log_lock(sdp);
-			continue;
-		}
-		bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
-		list_move_tail(&bd->bd_le.le_list, &in_progress);
-		gfs2_check_magic(bd->bd_bh);
-		*ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
-		*ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
-		n++;
-	}
-	gfs2_log_unlock(sdp);
-	gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
-	gfs2_log_lock(sdp);
-	list_splice(&processed, &sdp->sd_log_le_databuf);
-	gfs2_log_unlock(sdp);
+	gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
+			   &sdp->sd_log_le_databuf, 1);
 }
 
 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
@@ -822,8 +882,8 @@
 	struct gfs2_bufdata *bd;
 
 	while (!list_empty(head)) {
-		bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
-		list_del_init(&bd->bd_le.le_list);
+		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
+		list_del_init(&bd->bd_list);
 		sdp->sd_log_num_databuf--;
 		gfs2_unpin(sdp, bd->bd_bh, ai);
 	}
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 3c0b273..954a330 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -27,6 +27,8 @@
 extern const struct gfs2_log_operations gfs2_databuf_lops;
 
 extern const struct gfs2_log_operations *gfs2_log_ops[];
+extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
+extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw);
 
 static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
 {
@@ -44,17 +46,17 @@
 	return limit;
 }
 
-static inline void lops_init_le(struct gfs2_log_element *le,
+static inline void lops_init_le(struct gfs2_bufdata *bd,
 				const struct gfs2_log_operations *lops)
 {
-	INIT_LIST_HEAD(&le->le_list);
-	le->le_ops = lops;
+	INIT_LIST_HEAD(&bd->bd_list);
+	bd->bd_ops = lops;
 }
 
-static inline void lops_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+static inline void lops_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 {
-	if (le->le_ops->lo_add)
-		le->le_ops->lo_add(sdp, le);
+	if (bd->bd_ops->lo_add)
+		bd->bd_ops->lo_add(sdp, bd);
 }
 
 static inline void lops_before_commit(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 754426b..6cdb0f2 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -70,16 +70,6 @@
 	address_space_init_once(mapping);
 }
 
-static void *gfs2_bh_alloc(gfp_t mask, void *data)
-{
-	return alloc_buffer_head(mask);
-}
-
-static void gfs2_bh_free(void *ptr, void *data)
-{
-	return free_buffer_head(ptr);
-}
-
 /**
  * init_gfs2_fs - Register GFS2 as a filesystem
  *
@@ -143,6 +133,12 @@
 	if (!gfs2_quotad_cachep)
 		goto fail;
 
+	gfs2_rsrv_cachep = kmem_cache_create("gfs2_mblk",
+					     sizeof(struct gfs2_blkreserv),
+					       0, 0, NULL);
+	if (!gfs2_rsrv_cachep)
+		goto fail;
+
 	register_shrinker(&qd_shrinker);
 
 	error = register_filesystem(&gfs2_fs_type);
@@ -164,8 +160,8 @@
 	if (!gfs2_control_wq)
 		goto fail_recovery;
 
-	gfs2_bh_pool = mempool_create(1024, gfs2_bh_alloc, gfs2_bh_free, NULL);
-	if (!gfs2_bh_pool)
+	gfs2_page_pool = mempool_create_page_pool(64, 0);
+	if (!gfs2_page_pool)
 		goto fail_control;
 
 	gfs2_register_debugfs();
@@ -186,6 +182,9 @@
 	unregister_shrinker(&qd_shrinker);
 	gfs2_glock_exit();
 
+	if (gfs2_rsrv_cachep)
+		kmem_cache_destroy(gfs2_rsrv_cachep);
+
 	if (gfs2_quotad_cachep)
 		kmem_cache_destroy(gfs2_quotad_cachep);
 
@@ -225,7 +224,8 @@
 
 	rcu_barrier();
 
-	mempool_destroy(gfs2_bh_pool);
+	mempool_destroy(gfs2_page_pool);
+	kmem_cache_destroy(gfs2_rsrv_cachep);
 	kmem_cache_destroy(gfs2_quotad_cachep);
 	kmem_cache_destroy(gfs2_rgrpd_cachep);
 	kmem_cache_destroy(gfs2_bufdata_cachep);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 181586e..6c1e5d1 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -293,11 +293,10 @@
 	bd->bd_bh = bh;
 	bd->bd_gl = gl;
 
-	INIT_LIST_HEAD(&bd->bd_list_tr);
 	if (meta)
-		lops_init_le(&bd->bd_le, &gfs2_buf_lops);
+		lops_init_le(bd, &gfs2_buf_lops);
 	else
-		lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
+		lops_init_le(bd, &gfs2_databuf_lops);
 	bh->b_private = bd;
 
 	if (meta)
@@ -313,7 +312,7 @@
 	if (test_clear_buffer_pinned(bh)) {
 		trace_gfs2_pin(bd, 0);
 		atomic_dec(&sdp->sd_log_pinned);
-		list_del_init(&bd->bd_le.le_list);
+		list_del_init(&bd->bd_list);
 		if (meta) {
 			gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
 			sdp->sd_log_num_buf--;
@@ -375,33 +374,24 @@
  * @ip: The GFS2 inode
  * @height: The level of this buf in the metadata (indir addr) tree (if any)
  * @num: The block number (device relative) of the buffer
- * @new: Non-zero if we may create a new buffer
  * @bhp: the buffer is returned here
  *
  * Returns: errno
  */
 
 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
-			      int new, struct buffer_head **bhp)
+			      struct buffer_head **bhp)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_glock *gl = ip->i_gl;
 	struct buffer_head *bh;
 	int ret = 0;
+	u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
 
-	if (new) {
-		BUG_ON(height == 0);
-		bh = gfs2_meta_new(gl, num);
-		gfs2_trans_add_bh(ip->i_gl, bh, 1);
-		gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
-		gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
-	} else {
-		u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
-		ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh);
-		if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
-			brelse(bh);
-			ret = -EIO;
-		}
+	ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh);
+	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
+		brelse(bh);
+		ret = -EIO;
 	}
 	*bhp = bh;
 	return ret;
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 22c5265..c30973b 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -65,12 +65,12 @@
 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
 
 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
-			      int new, struct buffer_head **bhp);
+			      struct buffer_head **bhp);
 
 static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
 					 struct buffer_head **bhp)
 {
-	return gfs2_meta_indirect_buffer(ip, 0, ip->i_no_addr, 0, bhp);
+	return gfs2_meta_indirect_buffer(ip, 0, ip->i_no_addr, bhp);
 }
 
 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 6f3a18f..c5871ae 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -99,7 +99,6 @@
 	atomic_set(&sdp->sd_log_pinned, 0);
 	INIT_LIST_HEAD(&sdp->sd_log_le_buf);
 	INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
-	INIT_LIST_HEAD(&sdp->sd_log_le_rg);
 	INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
 	INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
 
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 6019da3..b97178e 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -652,7 +652,7 @@
 	}
 
 	memset(&q, 0, sizeof(struct gfs2_quota));
-	err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
+	err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 	if (err < 0)
 		return err;
 
@@ -744,7 +744,7 @@
 		i_size_write(inode, size);
 	inode->i_mtime = inode->i_atime = CURRENT_TIME;
 	mark_inode_dirty(inode);
-	return err;
+	return 0;
 
 unlock_out:
 	unlock_page(page);
@@ -852,7 +852,7 @@
 
 	memset(&q, 0, sizeof(struct gfs2_quota));
 	pos = qd2offset(qd);
-	error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
+	error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 	if (error < 0)
 		return error;
 
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 3df65c9..f74fb9b 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -70,15 +70,15 @@
 
 /**
  * gfs2_setbit - Set a bit in the bitmaps
- * @buffer: the buffer that holds the bitmaps
- * @buflen: the length (in bytes) of the buffer
+ * @rgd: the resource group descriptor
+ * @buf2: the clone buffer that holds the bitmaps
+ * @bi: the bitmap structure
  * @block: the block to set
  * @new_state: the new state of the block
  *
  */
 
-static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1,
-			       unsigned char *buf2, unsigned int offset,
+static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2,
 			       struct gfs2_bitmap *bi, u32 block,
 			       unsigned char new_state)
 {
@@ -86,8 +86,8 @@
 	unsigned int buflen = bi->bi_len;
 	const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
 
-	byte1 = buf1 + offset + (block / GFS2_NBBY);
-	end = buf1 + offset + buflen;
+	byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY);
+	end = bi->bi_bh->b_data + bi->bi_offset + buflen;
 
 	BUG_ON(byte1 >= end);
 
@@ -110,7 +110,7 @@
 	*byte1 ^= (cur_state ^ new_state) << bit;
 
 	if (buf2) {
-		byte2 = buf2 + offset + (block / GFS2_NBBY);
+		byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY);
 		cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
 		*byte2 ^= (cur_state ^ new_state) << bit;
 	}
@@ -118,6 +118,7 @@
 
 /**
  * gfs2_testbit - test a bit in the bitmaps
+ * @rgd: the resource group descriptor
  * @buffer: the buffer that holds the bitmaps
  * @buflen: the length (in bytes) of the buffer
  * @block: the block to read
@@ -179,7 +180,7 @@
 /**
  * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
  *       a block in a given allocation state.
- * @buffer: the buffer that holds the bitmaps
+ * @buf: the buffer that holds the bitmaps
  * @len: the length (in bytes) of the buffer
  * @goal: start search at this block's bit-pair (within @buffer)
  * @state: GFS2_BLKST_XXX the state of the block we're looking for.
@@ -231,6 +232,7 @@
 
 /**
  * gfs2_bitcount - count the number of bits in a certain state
+ * @rgd: the resource group descriptor
  * @buffer: the buffer that holds the bitmaps
  * @buflen: the length (in bytes) of the buffer
  * @state: the state of the block we're looking for
@@ -264,7 +266,6 @@
 
 /**
  * gfs2_rgrp_verify - Verify that a resource group is consistent
- * @sdp: the filesystem
  * @rgd: the rgrp
  *
  */
@@ -322,7 +323,8 @@
 /**
  * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
  * @sdp: The GFS2 superblock
- * @n: The data block number
+ * @blk: The data block number
+ * @exact: True if this needs to be an exact match
  *
  * Returns: The resource group, or NULL if not found
  */
@@ -380,7 +382,7 @@
 
 /**
  * gfs2_rgrpd_get_next - get the next RG
- * @rgd: A RG
+ * @rgd: the resource group descriptor
  *
  * Returns: The next rgrp
  */
@@ -529,6 +531,7 @@
 
 /**
  * gfs2_ri_total - Total up the file system space, according to the rindex.
+ * @sdp: the filesystem
  *
  */
 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
@@ -537,16 +540,14 @@
 	struct inode *inode = sdp->sd_rindex;
 	struct gfs2_inode *ip = GFS2_I(inode);
 	char buf[sizeof(struct gfs2_rindex)];
-	struct file_ra_state ra_state;
 	int error, rgrps;
 
-	file_ra_state_init(&ra_state, inode->i_mapping);
 	for (rgrps = 0;; rgrps++) {
 		loff_t pos = rgrps * sizeof(struct gfs2_rindex);
 
 		if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
 			break;
-		error = gfs2_internal_read(ip, &ra_state, buf, &pos,
+		error = gfs2_internal_read(ip, buf, &pos,
 					   sizeof(struct gfs2_rindex));
 		if (error != sizeof(struct gfs2_rindex))
 			break;
@@ -582,13 +583,12 @@
 
 /**
  * read_rindex_entry - Pull in a new resource index entry from the disk
- * @gl: The glock covering the rindex inode
+ * @ip: Pointer to the rindex inode
  *
  * Returns: 0 on success, > 0 on EOF, error code otherwise
  */
 
-static int read_rindex_entry(struct gfs2_inode *ip,
-			     struct file_ra_state *ra_state)
+static int read_rindex_entry(struct gfs2_inode *ip)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
@@ -599,7 +599,7 @@
 	if (pos >= i_size_read(&ip->i_inode))
 		return 1;
 
-	error = gfs2_internal_read(ip, ra_state, (char *)&buf, &pos,
+	error = gfs2_internal_read(ip, (char *)&buf, &pos,
 				   sizeof(struct gfs2_rindex));
 
 	if (error != sizeof(struct gfs2_rindex))
@@ -655,13 +655,10 @@
 static int gfs2_ri_update(struct gfs2_inode *ip)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-	struct inode *inode = &ip->i_inode;
-	struct file_ra_state ra_state;
 	int error;
 
-	file_ra_state_init(&ra_state, inode->i_mapping);
 	do {
-		error = read_rindex_entry(ip, &ra_state);
+		error = read_rindex_entry(ip);
 	} while (error == 0);
 
 	if (error < 0)
@@ -741,7 +738,7 @@
 
 /**
  * gfs2_rgrp_go_lock - Read in a RG's header and bitmaps
- * @rgd: the struct gfs2_rgrpd describing the RG to read in
+ * @gh: The glock holder for the resource group
  *
  * Read in all of a Resource Group's header and bitmap blocks.
  * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
@@ -801,7 +798,7 @@
 
 /**
  * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
- * @rgd: the struct gfs2_rgrpd describing the RG to read in
+ * @gh: The glock holder for the resource group
  *
  */
 
@@ -1002,11 +999,13 @@
  * Returns: the struct gfs2_qadata
  */
 
-static struct gfs2_blkreserv *gfs2_blkrsv_get(struct gfs2_inode *ip)
+static int gfs2_blkrsv_get(struct gfs2_inode *ip)
 {
 	BUG_ON(ip->i_res != NULL);
-	ip->i_res = kzalloc(sizeof(struct gfs2_blkreserv), GFP_NOFS);
-	return ip->i_res;
+	ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
+	if (!ip->i_res)
+		return -ENOMEM;
+	return 0;
 }
 
 /**
@@ -1038,6 +1037,8 @@
 /**
  * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
  * @rgd: The rgrp
+ * @last_unlinked: block address of the last dinode we unlinked
+ * @skip: block address we should explicitly not unlink
  *
  * Returns: 0 if no error
  *          The inode, if one has been found, in inode.
@@ -1102,7 +1103,7 @@
 /**
  * get_local_rgrp - Choose and lock a rgrp for allocation
  * @ip: the inode to reserve space for
- * @rgp: the chosen and locked rgrp
+ * @last_unlinked: the last unlinked block
  *
  * Try to acquire rgrp in way which avoids contending with others.
  *
@@ -1164,13 +1165,14 @@
 static void gfs2_blkrsv_put(struct gfs2_inode *ip)
 {
 	BUG_ON(ip->i_res == NULL);
-	kfree(ip->i_res);
+	kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
 	ip->i_res = NULL;
 }
 
 /**
  * gfs2_inplace_reserve - Reserve space in the filesystem
  * @ip: the inode to reserve space for
+ * @requested: the number of blocks to be reserved
  *
  * Returns: errno
  */
@@ -1179,14 +1181,15 @@
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_blkreserv *rs;
-	int error = 0;
+	int error;
 	u64 last_unlinked = NO_BLOCK;
 	int tries = 0;
 
-	rs = gfs2_blkrsv_get(ip);
-	if (!rs)
-		return -ENOMEM;
+	error = gfs2_blkrsv_get(ip);
+	if (error)
+		return error;
 
+	rs = ip->i_res;
 	rs->rs_requested = requested;
 	if (gfs2_assert_warn(sdp, requested)) {
 		error = -EINVAL;
@@ -1268,7 +1271,6 @@
  * @rgd: the resource group descriptor
  * @goal: the goal block within the RG (start here to search for avail block)
  * @state: GFS2_BLKST_XXX the before-allocation state to find
- * @dinode: TRUE if the first block we allocate is for a dinode
  * @rbi: address of the pointer to the bitmap containing the block found
  *
  * Walk rgrp's bitmap to find bits that represent a block in @state.
@@ -1282,13 +1284,12 @@
  * Returns: the block number found relative to the bitmap rbi
  */
 
-static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
-			unsigned char state,
+static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, unsigned char state,
 			struct gfs2_bitmap **rbi)
 {
 	struct gfs2_bitmap *bi = NULL;
 	const u32 length = rgd->rd_length;
-	u32 blk = BFITNOENT;
+	u32 biblk = BFITNOENT;
 	unsigned int buf, x;
 	const u8 *buffer = NULL;
 
@@ -1325,8 +1326,8 @@
 		if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
 			buffer = bi->bi_clone + bi->bi_offset;
 
-		blk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
-		if (blk != BFITNOENT)
+		biblk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
+		if (biblk != BFITNOENT)
 			break;
 
 		if ((goal == 0) && (state == GFS2_BLKST_FREE))
@@ -1339,10 +1340,10 @@
 		goal = 0;
 	}
 
-	if (blk != BFITNOENT)
+	if (biblk != BFITNOENT)
 		*rbi = bi;
 
-	return blk;
+	return biblk;
 }
 
 /**
@@ -1367,8 +1368,8 @@
 	*n = 0;
 	buffer = bi->bi_bh->b_data + bi->bi_offset;
 	gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
-	gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
-		    bi, blk, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
+	gfs2_setbit(rgd, bi->bi_clone, bi, blk,
+		    dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
 	(*n)++;
 	goal = blk;
 	while (*n < elen) {
@@ -1378,8 +1379,7 @@
 		if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
 		    GFS2_BLKST_FREE)
 			break;
-		gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
-			    bi, goal, GFS2_BLKST_USED);
+		gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED);
 		(*n)++;
 	}
 	blk = gfs2_bi2rgd_blk(bi, blk);
@@ -1436,8 +1436,7 @@
 			       bi->bi_len);
 		}
 		gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
-		gfs2_setbit(rgd, bi->bi_bh->b_data, NULL, bi->bi_offset,
-			    bi, buf_blk, new_state);
+		gfs2_setbit(rgd, NULL, bi, buf_blk, new_state);
 	}
 
 	return rgd;
@@ -1557,7 +1556,7 @@
 				  ip->i_inode.i_gid);
 
 	rgd->rd_free_clone -= *nblocks;
-	trace_gfs2_block_alloc(ip, block, *nblocks,
+	trace_gfs2_block_alloc(ip, rgd, block, *nblocks,
 			       dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
 	*bn = block;
 	return 0;
@@ -1584,7 +1583,7 @@
 	rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
 	if (!rgd)
 		return;
-	trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE);
+	trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
 	rgd->rd_free += blen;
 	rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
@@ -1622,7 +1621,7 @@
 	rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
 	if (!rgd)
 		return;
-	trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED);
+	trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
 	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
 	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
 }
@@ -1652,7 +1651,7 @@
 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
 {
 	gfs2_free_uninit_di(rgd, ip->i_no_addr);
-	trace_gfs2_block_alloc(ip, ip->i_no_addr, 1, GFS2_BLKST_FREE);
+	trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
 	gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
 	gfs2_meta_wipe(ip, ip->i_no_addr, 1);
 }
@@ -1752,7 +1751,6 @@
  *      and initialize an array of glock holders for them
  * @rlist: the list of resource groups
  * @state: the lock state to acquire the RG lock in
- * @flags: the modifier flags for the holder structures
  *
  * FIXME: Don't use NOFAIL
  *
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index dfa89cd..1b8b815 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -457,10 +457,10 @@
 /* Keep track of blocks as they are allocated/freed */
 TRACE_EVENT(gfs2_block_alloc,
 
-	TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len,
-		u8 block_state),
+	TP_PROTO(const struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+		 u64 block, unsigned len, u8 block_state),
 
-	TP_ARGS(ip, block, len, block_state),
+	TP_ARGS(ip, rgd, block, len, block_state),
 
 	TP_STRUCT__entry(
 		__field(        dev_t,  dev                     )
@@ -468,6 +468,8 @@
 		__field(	u64,	inum			)
 		__field(	u32,	len			)
 		__field(	u8,	block_state		)
+		__field(        u64,	rd_addr			)
+		__field(        u32,	rd_free_clone		)
 	),
 
 	TP_fast_assign(
@@ -476,14 +478,18 @@
 		__entry->inum		= ip->i_no_addr;
 		__entry->len		= len;
 		__entry->block_state	= block_state;
+		__entry->rd_addr	= rgd->rd_addr;
+		__entry->rd_free_clone	= rgd->rd_free_clone;
 	),
 
-	TP_printk("%u,%u bmap %llu alloc %llu/%lu %s",
+	TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  (unsigned long long)__entry->inum,
 		  (unsigned long long)__entry->start,
 		  (unsigned long)__entry->len,
-		  block_state_name(__entry->block_state))
+		  block_state_name(__entry->block_state),
+		  (unsigned long long)__entry->rd_addr,
+		  __entry->rd_free_clone)
 );
 
 #endif /* _TRACE_GFS2_H */
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 86ac75d..ad3e2fb 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -50,8 +50,6 @@
 	if (revokes)
 		tr->tr_reserved += gfs2_struct2blk(sdp, revokes,
 						   sizeof(u64));
-	INIT_LIST_HEAD(&tr->tr_list_buf);
-
 	gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh);
 
 	error = gfs2_glock_nq(&tr->tr_t_gh);
@@ -93,10 +91,21 @@
 	up_read(&sdp->sd_log_flush_lock);
 }
 
+static void gfs2_print_trans(const struct gfs2_trans *tr)
+{
+	print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip);
+	printk(KERN_WARNING "GFS2: blocks=%u revokes=%u reserved=%u touched=%d\n",
+	       tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, tr->tr_touched);
+	printk(KERN_WARNING "GFS2: Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
+	       tr->tr_num_buf_new, tr->tr_num_buf_rm,
+	       tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
+	       tr->tr_num_revoke, tr->tr_num_revoke_rm);
+}
+
 void gfs2_trans_end(struct gfs2_sbd *sdp)
 {
 	struct gfs2_trans *tr = current->journal_info;
-
+	s64 nbuf;
 	BUG_ON(!tr);
 	current->journal_info = NULL;
 
@@ -110,16 +119,13 @@
 		return;
 	}
 
-	if (gfs2_assert_withdraw(sdp, tr->tr_num_buf <= tr->tr_blocks)) {
-		fs_err(sdp, "tr_num_buf = %u, tr_blocks = %u ",
-		       tr->tr_num_buf, tr->tr_blocks);
-		print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip);
-	}
-	if (gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes)) {
-		fs_err(sdp, "tr_num_revoke = %u, tr_revokes = %u ",
-		       tr->tr_num_revoke, tr->tr_revokes);
-		print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip);
-	}
+	nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
+	nbuf -= tr->tr_num_buf_rm;
+	nbuf -= tr->tr_num_databuf_rm;
+
+	if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) &&
+				       (tr->tr_num_revoke <= tr->tr_revokes)))
+		gfs2_print_trans(tr);
 
 	gfs2_log_commit(sdp, tr);
 	if (tr->tr_t_gh.gh_gl) {
@@ -152,16 +158,16 @@
 		gfs2_attach_bufdata(gl, bh, meta);
 		bd = bh->b_private;
 	}
-	lops_add(sdp, &bd->bd_le);
+	lops_add(sdp, bd);
 }
 
 void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 {
-	BUG_ON(!list_empty(&bd->bd_le.le_list));
+	BUG_ON(!list_empty(&bd->bd_list));
 	BUG_ON(!list_empty(&bd->bd_ail_st_list));
 	BUG_ON(!list_empty(&bd->bd_ail_gl_list));
-	lops_init_le(&bd->bd_le, &gfs2_revoke_lops);
-	lops_add(sdp, &bd->bd_le);
+	lops_init_le(bd, &gfs2_revoke_lops);
+	lops_add(sdp, bd);
 }
 
 void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
@@ -171,9 +177,9 @@
 	unsigned int n = len;
 
 	gfs2_log_lock(sdp);
-	list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_le.le_list) {
+	list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_list) {
 		if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
-			list_del_init(&bd->bd_le.le_list);
+			list_del_init(&bd->bd_list);
 			gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
 			sdp->sd_log_num_revoke--;
 			kmem_cache_free(gfs2_bufdata_cachep, bd);
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 9e7765e..f00d7c5 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -25,7 +25,8 @@
 struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
 struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
 struct kmem_cache *gfs2_quotad_cachep __read_mostly;
-mempool_t *gfs2_bh_pool __read_mostly;
+struct kmem_cache *gfs2_rsrv_cachep __read_mostly;
+mempool_t *gfs2_page_pool __read_mostly;
 
 void gfs2_assert_i(struct gfs2_sbd *sdp)
 {
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index a4ce76c..3586b0d 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -152,7 +152,8 @@
 extern struct kmem_cache *gfs2_bufdata_cachep;
 extern struct kmem_cache *gfs2_rgrpd_cachep;
 extern struct kmem_cache *gfs2_quotad_cachep;
-extern mempool_t *gfs2_bh_pool;
+extern struct kmem_cache *gfs2_rsrv_cachep;
+extern mempool_t *gfs2_page_pool;
 
 static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
 					   unsigned int *p)
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 4dfbfec..ec2a9c2 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -366,6 +366,10 @@
 	err = hfs_brec_find(&src_fd);
 	if (err)
 		goto out;
+	if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
+		err = -EIO;
+		goto out;
+	}
 
 	hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
 				src_fd.entrylength);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 88e155f..26b53fb 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -150,6 +150,11 @@
 		filp->f_pos++;
 		/* fall through */
 	case 1:
+		if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
+			err = -EIO;
+			goto out;
+		}
+
 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
 			fd.entrylength);
 		if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
@@ -181,6 +186,12 @@
 			err = -EIO;
 			goto out;
 		}
+
+		if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
+			err = -EIO;
+			goto out;
+		}
+
 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
 			fd.entrylength);
 		type = be16_to_cpu(entry.type);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 28cf06e..001ef01 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -485,6 +485,7 @@
 		inode->i_fop = &simple_dir_operations;
 		/* directory inodes start off with i_nlink == 2 (for "." entry) */
 		inc_nlink(inode);
+		lockdep_annotate_inode_mutex_key(inode);
 	}
 	return inode;
 }
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index ad271c7..5a2dec2 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -234,8 +234,8 @@
 			return 0;
 
 		jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
-		spin_lock(&c->erase_completion_lock);
 		mutex_lock(&c->alloc_sem);
+		spin_lock(&c->erase_completion_lock);
 	}
 
 	/* First, work out which block we're garbage-collecting */
diff --git a/fs/libfs.c b/fs/libfs.c
index 18d08f5..f86ec27 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -68,7 +68,7 @@
 
 int dcache_dir_open(struct inode *inode, struct file *file)
 {
-	static struct qstr cursor_name = {.len = 1, .name = "."};
+	static struct qstr cursor_name = QSTR_INIT(".", 1);
 
 	file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
 
@@ -225,7 +225,7 @@
 	struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
 	struct dentry *dentry;
 	struct inode *root;
-	struct qstr d_name = {.name = name, .len = strlen(name)};
+	struct qstr d_name = QSTR_INIT(name, strlen(name));
 
 	if (IS_ERR(s))
 		return ERR_CAST(s);
diff --git a/fs/namei.c b/fs/namei.c
index 0062dd1..f9e883c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -116,47 +116,37 @@
  * POSIX.1 2.4: an empty pathname is invalid (ENOENT).
  * PATH_MAX includes the nul terminator --RR.
  */
-static int do_getname(const char __user *filename, char *page)
-{
-	int retval;
-	unsigned long len = PATH_MAX;
-
-	if (!segment_eq(get_fs(), KERNEL_DS)) {
-		if ((unsigned long) filename >= TASK_SIZE)
-			return -EFAULT;
-		if (TASK_SIZE - (unsigned long) filename < PATH_MAX)
-			len = TASK_SIZE - (unsigned long) filename;
-	}
-
-	retval = strncpy_from_user(page, filename, len);
-	if (retval > 0) {
-		if (retval < len)
-			return 0;
-		return -ENAMETOOLONG;
-	} else if (!retval)
-		retval = -ENOENT;
-	return retval;
-}
-
 static char *getname_flags(const char __user *filename, int flags, int *empty)
 {
-	char *result = __getname();
-	int retval;
+	char *result = __getname(), *err;
+	int len;
 
-	if (!result)
+	if (unlikely(!result))
 		return ERR_PTR(-ENOMEM);
 
-	retval = do_getname(filename, result);
-	if (retval < 0) {
-		if (retval == -ENOENT && empty)
+	len = strncpy_from_user(result, filename, PATH_MAX);
+	err = ERR_PTR(len);
+	if (unlikely(len < 0))
+		goto error;
+
+	/* The empty path is special. */
+	if (unlikely(!len)) {
+		if (empty)
 			*empty = 1;
-		if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
-			__putname(result);
-			return ERR_PTR(retval);
-		}
+		err = ERR_PTR(-ENOENT);
+		if (!(flags & LOOKUP_EMPTY))
+			goto error;
 	}
-	audit_getname(result);
-	return result;
+
+	err = ERR_PTR(-ENAMETOOLONG);
+	if (likely(len < PATH_MAX)) {
+		audit_getname(result);
+		return result;
+	}
+
+error:
+	__putname(result);
+	return err;
 }
 
 char *getname(const char __user * filename)
@@ -1154,12 +1144,25 @@
 	 */
 	if (nd->flags & LOOKUP_RCU) {
 		unsigned seq;
-		*inode = nd->inode;
-		dentry = __d_lookup_rcu(parent, name, &seq, inode);
+		dentry = __d_lookup_rcu(parent, name, &seq, nd->inode);
 		if (!dentry)
 			goto unlazy;
 
-		/* Memory barrier in read_seqcount_begin of child is enough */
+		/*
+		 * This sequence count validates that the inode matches
+		 * the dentry name information from lookup.
+		 */
+		*inode = dentry->d_inode;
+		if (read_seqcount_retry(&dentry->d_seq, seq))
+			return -ECHILD;
+
+		/*
+		 * This sequence count validates that the parent had no
+		 * changes while we did the lookup of the dentry above.
+		 *
+		 * The memory barrier in read_seqcount_begin of child is
+		 *  enough, we can use __read_seqcount_retry here.
+		 */
 		if (__read_seqcount_retry(&parent->d_seq, nd->seq))
 			return -ECHILD;
 		nd->seq = seq;
@@ -1429,7 +1432,7 @@
 	unsigned long hash = 0;
 
 	for (;;) {
-		a = *(unsigned long *)name;
+		a = load_unaligned_zeropad(name);
 		if (len < sizeof(unsigned long))
 			break;
 		hash += a;
@@ -1459,7 +1462,7 @@
 	do {
 		hash = (hash + a) * 9;
 		len += sizeof(unsigned long);
-		a = *(unsigned long *)(name+len);
+		a = load_unaligned_zeropad(name+len);
 		/* Do we have any NUL or '/' bytes in this word? */
 		mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
 	} while (!mask);
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 9c94297..7f6a23f 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -38,6 +38,8 @@
 #include <linux/buffer_head.h>	/* various write calls */
 #include <linux/prefetch.h>
 
+#include "../pnfs.h"
+#include "../internal.h"
 #include "blocklayout.h"
 
 #define NFSDBG_FACILITY	NFSDBG_PNFS_LD
@@ -868,7 +870,7 @@
 	 * GETDEVICEINFO's maxcount
 	 */
 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
-	max_pages = max_resp_sz >> PAGE_SHIFT;
+	max_pages = nfs_page_array_len(0, max_resp_sz);
 	dprintk("%s max_resp_sz %u max_pages %d\n",
 		__func__, max_resp_sz, max_pages);
 
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index da7b5e4..60f7e4e 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1729,7 +1729,8 @@
  */
 struct nfs_server *nfs_clone_server(struct nfs_server *source,
 				    struct nfs_fh *fh,
-				    struct nfs_fattr *fattr)
+				    struct nfs_fattr *fattr,
+				    rpc_authflavor_t flavor)
 {
 	struct nfs_server *server;
 	struct nfs_fattr *fattr_fsinfo;
@@ -1758,7 +1759,7 @@
 
 	error = nfs_init_server_rpcclient(server,
 			source->client->cl_timeout,
-			source->client->cl_auth->au_flavor);
+			flavor);
 	if (error < 0)
 		goto out_free_server;
 	if (!IS_ERR(source->client_acl))
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 4aaf031..eedd24d 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -477,10 +477,7 @@
 static
 void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
 {
-	struct qstr filename = {
-		.len = entry->len,
-		.name = entry->name,
-	};
+	struct qstr filename = QSTR_INIT(entry->name, entry->len);
 	struct dentry *dentry;
 	struct dentry *alias;
 	struct inode *dir = parent->d_inode;
@@ -1429,7 +1426,7 @@
 	}
 
 	open_flags = nd->intent.open.flags;
-	attr.ia_valid = 0;
+	attr.ia_valid = ATTR_OPEN;
 
 	ctx = create_nfs_open_context(dentry, open_flags);
 	res = ERR_CAST(ctx);
@@ -1536,7 +1533,7 @@
 	if (IS_ERR(ctx))
 		goto out;
 
-	attr.ia_valid = 0;
+	attr.ia_valid = ATTR_OPEN;
 	if (openflags & O_TRUNC) {
 		attr.ia_valid |= ATTR_SIZE;
 		attr.ia_size = 0;
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b7f348b..ba3019f 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -554,12 +554,16 @@
 	struct nfs_client *clp;
 	int error = 0;
 
+	if (!try_module_get(THIS_MODULE))
+		return 0;
+
 	while ((clp = nfs_get_client_for_event(sb->s_fs_info, event))) {
 		error = __rpc_pipefs_event(clp, event, sb);
 		nfs_put_client(clp);
 		if (error)
 			break;
 	}
+	module_put(THIS_MODULE);
 	return error;
 }
 
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 2476dc6..b777bda 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -165,7 +165,8 @@
 extern void nfs_free_server(struct nfs_server *server);
 extern struct nfs_server *nfs_clone_server(struct nfs_server *,
 					   struct nfs_fh *,
-					   struct nfs_fattr *);
+					   struct nfs_fattr *,
+					   rpc_authflavor_t);
 extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
 extern int nfs4_check_client_ready(struct nfs_client *clp);
 extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
@@ -186,10 +187,10 @@
 
 /* nfs4namespace.c */
 #ifdef CONFIG_NFS_V4
-extern struct vfsmount *nfs_do_refmount(struct dentry *dentry);
+extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry);
 #else
 static inline
-struct vfsmount *nfs_do_refmount(struct dentry *dentry)
+struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
 {
 	return ERR_PTR(-ENOENT);
 }
@@ -234,7 +235,6 @@
 /* nfs4proc.c */
 #ifdef CONFIG_NFS_V4
 extern struct rpc_procinfo nfs4_procedures[];
-void nfs_fixup_secinfo_attributes(struct nfs_fattr *, struct nfs_fh *);
 #endif
 
 extern int nfs4_init_ds_session(struct nfs_client *clp);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 1807866..d51868e 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -148,66 +148,31 @@
 	return pseudoflavor;
 }
 
-static int nfs_negotiate_security(const struct dentry *parent,
-				  const struct dentry *dentry,
-				  rpc_authflavor_t *flavor)
+static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
+					      struct qstr *name,
+					      struct nfs_fh *fh,
+					      struct nfs_fattr *fattr)
 {
-	struct page *page;
-	struct nfs4_secinfo_flavors *flavors;
-	int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
-	int ret = -EPERM;
-
-	secinfo = NFS_PROTO(parent->d_inode)->secinfo;
-	if (secinfo != NULL) {
-		page = alloc_page(GFP_KERNEL);
-		if (!page) {
-			ret = -ENOMEM;
-			goto out;
-		}
-		flavors = page_address(page);
-		ret = secinfo(parent->d_inode, &dentry->d_name, flavors);
-		*flavor = nfs_find_best_sec(flavors);
-		put_page(page);
-	}
-
-out:
-	return ret;
-}
-
-static int nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent,
-			       struct dentry *dentry, struct path *path,
-			       struct nfs_fh *fh, struct nfs_fattr *fattr,
-			       rpc_authflavor_t *flavor)
-{
-	struct rpc_clnt *clone;
-	struct rpc_auth *auth;
 	int err;
 
-	err = nfs_negotiate_security(parent, path->dentry, flavor);
-	if (err < 0)
-		goto out;
-	clone  = rpc_clone_client(server->client);
-	auth   = rpcauth_create(*flavor, clone);
-	if (!auth) {
-		err = -EIO;
-		goto out_shutdown;
-	}
-	err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode,
-						  &path->dentry->d_name,
-						  fh, fattr);
-out_shutdown:
-	rpc_shutdown_client(clone);
-out:
-	return err;
+	if (NFS_PROTO(dir)->version == 4)
+		return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
+
+	err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
+	if (err)
+		return ERR_PTR(err);
+	return rpc_clone_client(NFS_SERVER(dir)->client);
 }
 #else /* CONFIG_NFS_V4 */
-static inline int nfs_lookup_with_sec(struct nfs_server *server,
-				      struct dentry *parent, struct dentry *dentry,
-				      struct path *path, struct nfs_fh *fh,
-				      struct nfs_fattr *fattr,
-				      rpc_authflavor_t *flavor)
+static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
+						     struct qstr *name,
+						     struct nfs_fh *fh,
+						     struct nfs_fattr *fattr)
 {
-	return -EPERM;
+	int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
+	if (err)
+		return ERR_PTR(err);
+	return rpc_clone_client(NFS_SERVER(dir)->client);
 }
 #endif /* CONFIG_NFS_V4 */
 
@@ -226,12 +191,10 @@
 struct vfsmount *nfs_d_automount(struct path *path)
 {
 	struct vfsmount *mnt;
-	struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
 	struct dentry *parent;
 	struct nfs_fh *fh = NULL;
 	struct nfs_fattr *fattr = NULL;
-	int err;
-	rpc_authflavor_t flavor = RPC_AUTH_UNIX;
+	struct rpc_clnt *client;
 
 	dprintk("--> nfs_d_automount()\n");
 
@@ -249,21 +212,19 @@
 
 	/* Look it up again to get its attributes */
 	parent = dget_parent(path->dentry);
-	err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode,
-						  &path->dentry->d_name,
-						  fh, fattr);
-	if (err == -EPERM && NFS_PROTO(parent->d_inode)->secinfo != NULL)
-		err = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr, &flavor);
+	client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr);
 	dput(parent);
-	if (err != 0) {
-		mnt = ERR_PTR(err);
+	if (IS_ERR(client)) {
+		mnt = ERR_CAST(client);
 		goto out;
 	}
 
 	if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
-		mnt = nfs_do_refmount(path->dentry);
+		mnt = nfs_do_refmount(client, path->dentry);
 	else
-		mnt = nfs_do_submount(path->dentry, fh, fattr, flavor);
+		mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor);
+	rpc_shutdown_client(client);
+
 	if (IS_ERR(mnt))
 		goto out;
 
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 5242eae..75c6829 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -398,8 +398,7 @@
 {
 	struct nfs_removeargs arg = {
 		.fh = NFS_FH(dir),
-		.name.len = name->len,
-		.name.name = name->name,
+		.name = *name,
 	};
 	struct nfs_removeres res;
 	struct rpc_message msg = {
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 97ecc86..8d75021 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -59,6 +59,7 @@
 
 #define NFS_SEQID_CONFIRMED 1
 struct nfs_seqid_counter {
+	ktime_t create_time;
 	int owner_id;
 	int flags;
 	u32 counter;
@@ -204,6 +205,9 @@
 extern const struct dentry_operations nfs4_dentry_operations;
 extern const struct inode_operations nfs4_dir_inode_operations;
 
+/* nfs4namespace.c */
+struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
+
 /* nfs4proc.c */
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
@@ -212,8 +216,11 @@
 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
 extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
-extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
-		struct nfs4_fs_locations *fs_locations, struct page *page);
+extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *,
+				  struct nfs4_fs_locations *, struct page *);
+extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *,
+			    struct nfs_fh *, struct nfs_fattr *);
+extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
 extern int nfs4_release_lockowner(struct nfs4_lock_state *);
 extern const struct xattr_handler *nfs4_xattr_handlers[];
 
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index a866bbd..c9cff9a 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -699,7 +699,7 @@
 	 * GETDEVICEINFO's maxcount
 	 */
 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
-	max_pages = max_resp_sz >> PAGE_SHIFT;
+	max_pages = nfs_page_array_len(0, max_resp_sz);
 	dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
 		__func__, inode, max_resp_sz, max_pages);
 
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 9c8eca31..a7f3ded 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -52,6 +52,30 @@
 }
 
 /*
+ * return the path component of "<server>:<path>"
+ *  nfspath - the "<server>:<path>" string
+ *  end - one past the last char that could contain "<server>:"
+ * returns NULL on failure
+ */
+static char *nfs_path_component(const char *nfspath, const char *end)
+{
+	char *p;
+
+	if (*nfspath == '[') {
+		/* parse [] escaped IPv6 addrs */
+		p = strchr(nfspath, ']');
+		if (p != NULL && ++p < end && *p == ':')
+			return p + 1;
+	} else {
+		/* otherwise split on first colon */
+		p = strchr(nfspath, ':');
+		if (p != NULL && p < end)
+			return p + 1;
+	}
+	return NULL;
+}
+
+/*
  * Determine the mount path as a string
  */
 static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
@@ -59,9 +83,9 @@
 	char *limit;
 	char *path = nfs_path(&limit, dentry, buffer, buflen);
 	if (!IS_ERR(path)) {
-		char *colon = strchr(path, ':');
-		if (colon && colon < limit)
-			path = colon + 1;
+		char *path_component = nfs_path_component(path, limit);
+		if (path_component)
+			return path_component;
 	}
 	return path;
 }
@@ -108,6 +132,58 @@
 	return ret;
 }
 
+static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
+{
+	struct page *page;
+	struct nfs4_secinfo_flavors *flavors;
+	rpc_authflavor_t flavor;
+	int err;
+
+	page = alloc_page(GFP_KERNEL);
+	if (!page)
+		return -ENOMEM;
+	flavors = page_address(page);
+
+	err = nfs4_proc_secinfo(inode, name, flavors);
+	if (err < 0) {
+		flavor = err;
+		goto out;
+	}
+
+	flavor = nfs_find_best_sec(flavors);
+
+out:
+	put_page(page);
+	return flavor;
+}
+
+/*
+ * Please call rpc_shutdown_client() when you are done with this client.
+ */
+struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode,
+					struct qstr *name)
+{
+	struct rpc_clnt *clone;
+	struct rpc_auth *auth;
+	rpc_authflavor_t flavor;
+
+	flavor = nfs4_negotiate_security(inode, name);
+	if (flavor < 0)
+		return ERR_PTR(flavor);
+
+	clone = rpc_clone_client(clnt);
+	if (IS_ERR(clone))
+		return clone;
+
+	auth = rpcauth_create(flavor, clone);
+	if (!auth) {
+		rpc_shutdown_client(clone);
+		clone = ERR_PTR(-EIO);
+	}
+
+	return clone;
+}
+
 static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
 				     char *page, char *page2,
 				     const struct nfs4_fs_location *location)
@@ -224,7 +300,7 @@
  * @dentry - dentry of referral
  *
  */
-struct vfsmount *nfs_do_refmount(struct dentry *dentry)
+struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
 {
 	struct vfsmount *mnt = ERR_PTR(-ENOMEM);
 	struct dentry *parent;
@@ -250,7 +326,7 @@
 	dprintk("%s: getting locations for %s/%s\n",
 		__func__, parent->d_name.name, dentry->d_name.name);
 
-	err = nfs4_proc_fs_locations(parent->d_inode, &dentry->d_name, fs_locations, page);
+	err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page);
 	dput(parent);
 	if (err != 0 ||
 	    fs_locations->nlocations <= 0 ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f82bde0..ab985f6 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -838,7 +838,8 @@
 	p->o_arg.open_flags = flags;
 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
 	p->o_arg.clientid = server->nfs_client->cl_clientid;
-	p->o_arg.id = sp->so_seqid.owner_id;
+	p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
+	p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
 	p->o_arg.name = &dentry->d_name;
 	p->o_arg.server = server;
 	p->o_arg.bitmask = server->attr_bitmask;
@@ -1466,8 +1467,7 @@
 			goto unlock_no_action;
 		rcu_read_unlock();
 	}
-	/* Update sequence id. */
-	data->o_arg.id = sp->so_seqid.owner_id;
+	/* Update client id. */
 	data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
@@ -1954,10 +1954,19 @@
 	};
 	int err;
 	do {
-		err = nfs4_handle_exception(server,
-				_nfs4_do_setattr(inode, cred, fattr, sattr, state),
-				&exception);
+		err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
+		switch (err) {
+		case -NFS4ERR_OPENMODE:
+			if (state && !(state->state & FMODE_WRITE)) {
+				err = -EBADF;
+				if (sattr->ia_valid & ATTR_OPEN)
+					err = -EACCES;
+				goto out;
+			}
+		}
+		err = nfs4_handle_exception(server, err, &exception);
 	} while (exception.retry);
+out:
 	return err;
 }
 
@@ -2368,8 +2377,9 @@
  * Note that we'll actually follow the referral later when
  * we detect fsid mismatch in inode revalidation
  */
-static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
-			     struct nfs_fattr *fattr, struct nfs_fh *fhandle)
+static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
+			     const struct qstr *name, struct nfs_fattr *fattr,
+			     struct nfs_fh *fhandle)
 {
 	int status = -ENOMEM;
 	struct page *page = NULL;
@@ -2382,7 +2392,7 @@
 	if (locations == NULL)
 		goto out;
 
-	status = nfs4_proc_fs_locations(dir, name, locations, page);
+	status = nfs4_proc_fs_locations(client, dir, name, locations, page);
 	if (status != 0)
 		goto out;
 	/* Make sure server returned a different fsid for the referral */
@@ -2519,37 +2529,82 @@
 	return status;
 }
 
-void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh)
+static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
 {
-	memset(fh, 0, sizeof(struct nfs_fh));
-	fattr->fsid.major = 1;
 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
-		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT;
+		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
 	fattr->nlink = 2;
 }
 
+static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
+				   struct qstr *name, struct nfs_fh *fhandle,
+				   struct nfs_fattr *fattr)
+{
+	struct nfs4_exception exception = { };
+	struct rpc_clnt *client = *clnt;
+	int err;
+	do {
+		err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
+		switch (err) {
+		case -NFS4ERR_BADNAME:
+			err = -ENOENT;
+			goto out;
+		case -NFS4ERR_MOVED:
+			err = nfs4_get_referral(client, dir, name, fattr, fhandle);
+			goto out;
+		case -NFS4ERR_WRONGSEC:
+			err = -EPERM;
+			if (client != *clnt)
+				goto out;
+
+			client = nfs4_create_sec_client(client, dir, name);
+			if (IS_ERR(client))
+				return PTR_ERR(client);
+
+			exception.retry = 1;
+			break;
+		default:
+			err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
+		}
+	} while (exception.retry);
+
+out:
+	if (err == 0)
+		*clnt = client;
+	else if (client != *clnt)
+		rpc_shutdown_client(client);
+
+	return err;
+}
+
 static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
 {
-	struct nfs4_exception exception = { };
-	int err;
-	do {
-		int status;
+	int status;
+	struct rpc_clnt *client = NFS_CLIENT(dir);
 
-		status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr);
-		switch (status) {
-		case -NFS4ERR_BADNAME:
-			return -ENOENT;
-		case -NFS4ERR_MOVED:
-			return nfs4_get_referral(dir, name, fattr, fhandle);
-		case -NFS4ERR_WRONGSEC:
-			nfs_fixup_secinfo_attributes(fattr, fhandle);
-		}
-		err = nfs4_handle_exception(NFS_SERVER(dir),
-				status, &exception);
-	} while (exception.retry);
-	return err;
+	status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
+	if (client != NFS_CLIENT(dir)) {
+		rpc_shutdown_client(client);
+		nfs_fixup_secinfo_attributes(fattr);
+	}
+	return status;
+}
+
+struct rpc_clnt *
+nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
+			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+	int status;
+	struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
+
+	status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
+	if (status < 0) {
+		rpc_shutdown_client(client);
+		return ERR_PTR(status);
+	}
+	return client;
 }
 
 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
@@ -2727,8 +2782,7 @@
 	struct nfs_server *server = NFS_SERVER(dir);
 	struct nfs_removeargs args = {
 		.fh = NFS_FH(dir),
-		.name.len = name->len,
-		.name.name = name->name,
+		.name = *name,
 		.bitmask = server->attr_bitmask,
 	};
 	struct nfs_removeres res = {
@@ -3619,16 +3673,16 @@
 	return ret;
 }
 
-static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
+static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
 {
 	struct nfs4_cached_acl *acl;
 
-	if (buf && acl_len <= PAGE_SIZE) {
+	if (pages && acl_len <= PAGE_SIZE) {
 		acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
 		if (acl == NULL)
 			goto out;
 		acl->cached = 1;
-		memcpy(acl->data, buf, acl_len);
+		_copy_from_pages(acl->data, pages, pgbase, acl_len);
 	} else {
 		acl = kmalloc(sizeof(*acl), GFP_KERNEL);
 		if (acl == NULL)
@@ -3661,7 +3715,6 @@
 	struct nfs_getaclres res = {
 		.acl_len = buflen,
 	};
-	void *resp_buf;
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
 		.rpc_argp = &args,
@@ -3675,24 +3728,27 @@
 	if (npages == 0)
 		npages = 1;
 
+	/* Add an extra page to handle the bitmap returned */
+	npages++;
+
 	for (i = 0; i < npages; i++) {
 		pages[i] = alloc_page(GFP_KERNEL);
 		if (!pages[i])
 			goto out_free;
 	}
-	if (npages > 1) {
-		/* for decoding across pages */
-		res.acl_scratch = alloc_page(GFP_KERNEL);
-		if (!res.acl_scratch)
-			goto out_free;
-	}
+
+	/* for decoding across pages */
+	res.acl_scratch = alloc_page(GFP_KERNEL);
+	if (!res.acl_scratch)
+		goto out_free;
+
 	args.acl_len = npages * PAGE_SIZE;
 	args.acl_pgbase = 0;
+
 	/* Let decode_getfacl know not to fail if the ACL data is larger than
 	 * the page we send as a guess */
 	if (buf == NULL)
 		res.acl_flags |= NFS4_ACL_LEN_REQUEST;
-	resp_buf = page_address(pages[0]);
 
 	dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
 		__func__, buf, buflen, npages, args.acl_len);
@@ -3703,9 +3759,9 @@
 
 	acl_len = res.acl_len - res.acl_data_offset;
 	if (acl_len > args.acl_len)
-		nfs4_write_cached_acl(inode, NULL, acl_len);
+		nfs4_write_cached_acl(inode, NULL, 0, acl_len);
 	else
-		nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
+		nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
 				      acl_len);
 	if (buf) {
 		ret = -ERANGE;
@@ -4558,7 +4614,9 @@
 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
-	struct nfs4_exception exception = { };
+	struct nfs4_exception exception = {
+		.inode = state->inode,
+	};
 	int err;
 
 	do {
@@ -4576,7 +4634,9 @@
 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
-	struct nfs4_exception exception = { };
+	struct nfs4_exception exception = {
+		.inode = state->inode,
+	};
 	int err;
 
 	err = nfs4_set_lock_state(state, request);
@@ -4676,6 +4736,7 @@
 {
 	struct nfs4_exception exception = {
 		.state = state,
+		.inode = state->inode,
 	};
 	int err;
 
@@ -4721,6 +4782,20 @@
 
 	if (state == NULL)
 		return -ENOLCK;
+	/*
+	 * Don't rely on the VFS having checked the file open mode,
+	 * since it won't do this for flock() locks.
+	 */
+	switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
+	case F_RDLCK:
+		if (!(filp->f_mode & FMODE_READ))
+			return -EBADF;
+		break;
+	case F_WRLCK:
+		if (!(filp->f_mode & FMODE_WRITE))
+			return -EBADF;
+	}
+
 	do {
 		status = nfs4_proc_setlk(state, cmd, request);
 		if ((status != -EAGAIN) || IS_SETLK(cmd))
@@ -4891,8 +4966,10 @@
 	fattr->nlink = 2;
 }
 
-int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
-		struct nfs4_fs_locations *fs_locations, struct page *page)
+static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
+				   const struct qstr *name,
+				   struct nfs4_fs_locations *fs_locations,
+				   struct page *page)
 {
 	struct nfs_server *server = NFS_SERVER(dir);
 	u32 bitmask[2] = {
@@ -4926,11 +5003,26 @@
 	nfs_fattr_init(&fs_locations->fattr);
 	fs_locations->server = server;
 	fs_locations->nlocations = 0;
-	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+	status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
 	dprintk("%s: returned status = %d\n", __func__, status);
 	return status;
 }
 
+int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
+			   const struct qstr *name,
+			   struct nfs4_fs_locations *fs_locations,
+			   struct page *page)
+{
+	struct nfs4_exception exception = { };
+	int err;
+	do {
+		err = nfs4_handle_exception(NFS_SERVER(dir),
+				_nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
+				&exception);
+	} while (exception.retry);
+	return err;
+}
+
 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
 {
 	int status;
@@ -4953,8 +5045,8 @@
 	return status;
 }
 
-static int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
-		struct nfs4_secinfo_flavors *flavors)
+int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
+		      struct nfs4_secinfo_flavors *flavors)
 {
 	struct nfs4_exception exception = { };
 	int err;
@@ -5029,10 +5121,9 @@
 	nfs4_construct_boot_verifier(clp, &verifier);
 
 	args.id_len = scnprintf(args.id, sizeof(args.id),
-				"%s/%s.%s/%u",
+				"%s/%s/%u",
 				clp->cl_ipaddr,
-				init_utsname()->nodename,
-				init_utsname()->domainname,
+				clp->cl_rpcclient->cl_nodename,
 				clp->cl_rpcclient->cl_auth->au_flavor);
 
 	res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0f43414..7f0fcfc 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -393,6 +393,7 @@
 static void
 nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
 {
+	sc->create_time = ktime_get();
 	sc->flags = 0;
 	sc->counter = 0;
 	spin_lock_init(&sc->lock);
@@ -434,13 +435,17 @@
 static void
 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_server_node)) {
+	struct rb_node *rb_node = &sp->so_server_node;
+
+	if (!RB_EMPTY_NODE(rb_node)) {
 		struct nfs_server *server = sp->so_server;
 		struct nfs_client *clp = server->nfs_client;
 
 		spin_lock(&clp->cl_lock);
-		rb_erase(&sp->so_server_node, &server->state_owners);
-		RB_CLEAR_NODE(&sp->so_server_node);
+		if (!RB_EMPTY_NODE(rb_node)) {
+			rb_erase(rb_node, &server->state_owners);
+			RB_CLEAR_NODE(rb_node);
+		}
 		spin_unlock(&clp->cl_lock);
 	}
 }
@@ -516,6 +521,14 @@
 /**
  * nfs4_put_state_owner - Release a nfs4_state_owner
  * @sp: state owner data to release
+ *
+ * Note that we keep released state owners on an LRU
+ * list.
+ * This caches valid state owners so that they can be
+ * reused, to avoid the OPEN_CONFIRM on minor version 0.
+ * It also pins the uniquifier of dropped state owners for
+ * a while, to ensure that those state owner names are
+ * never reused.
  */
 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 {
@@ -525,15 +538,9 @@
 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
 		return;
 
-	if (!RB_EMPTY_NODE(&sp->so_server_node)) {
-		sp->so_expires = jiffies;
-		list_add_tail(&sp->so_lru, &server->state_owners_lru);
-		spin_unlock(&clp->cl_lock);
-	} else {
-		nfs4_remove_state_owner_locked(sp);
-		spin_unlock(&clp->cl_lock);
-		nfs4_free_state_owner(sp);
-	}
+	sp->so_expires = jiffies;
+	list_add_tail(&sp->so_lru, &server->state_owners_lru);
+	spin_unlock(&clp->cl_lock);
 }
 
 /**
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c74fdb1..c54aae3 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -74,7 +74,7 @@
 /* lock,open owner id:
  * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT  >> 2)
  */
-#define open_owner_id_maxsz	(1 + 1 + 4)
+#define open_owner_id_maxsz	(1 + 2 + 1 + 1 + 2)
 #define lock_owner_id_maxsz	(1 + 1 + 4)
 #define decode_lockowner_maxsz	(1 + XDR_QUADLEN(IDMAP_NAMESZ))
 #define compound_encode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
@@ -1340,12 +1340,13 @@
  */
 	encode_nfs4_seqid(xdr, arg->seqid);
 	encode_share_access(xdr, arg->fmode);
-	p = reserve_space(xdr, 32);
+	p = reserve_space(xdr, 36);
 	p = xdr_encode_hyper(p, arg->clientid);
-	*p++ = cpu_to_be32(20);
+	*p++ = cpu_to_be32(24);
 	p = xdr_encode_opaque_fixed(p, "open id:", 8);
 	*p++ = cpu_to_be32(arg->server->s_dev);
-	xdr_encode_hyper(p, arg->id);
+	*p++ = cpu_to_be32(arg->id.uniquifier);
+	xdr_encode_hyper(p, arg->id.create_time);
 }
 
 static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
@@ -4257,8 +4258,6 @@
 	status = decode_attr_error(xdr, bitmap, &err);
 	if (status < 0)
 		goto xdr_error;
-	if (err == -NFS4ERR_WRONGSEC)
-		nfs_fixup_secinfo_attributes(fattr, fh);
 
 	status = decode_attr_filehandle(xdr, bitmap, fh);
 	if (status < 0)
@@ -4901,11 +4900,19 @@
 		 bitmap[3] = {0};
 	struct kvec *iov = req->rq_rcv_buf.head;
 	int status;
+	size_t page_len = xdr->buf->page_len;
 
 	res->acl_len = 0;
 	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
 		goto out;
+
 	bm_p = xdr->p;
+	res->acl_data_offset = be32_to_cpup(bm_p) + 2;
+	res->acl_data_offset <<= 2;
+	/* Check if the acl data starts beyond the allocated buffer */
+	if (res->acl_data_offset > page_len)
+		return -ERANGE;
+
 	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
 		goto out;
 	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
@@ -4915,28 +4922,24 @@
 		return -EIO;
 	if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
 		size_t hdrlen;
-		u32 recvd;
 
 		/* The bitmap (xdr len + bitmaps) and the attr xdr len words
 		 * are stored with the acl data to handle the problem of
 		 * variable length bitmaps.*/
 		xdr->p = bm_p;
-		res->acl_data_offset = be32_to_cpup(bm_p) + 2;
-		res->acl_data_offset <<= 2;
 
 		/* We ignore &savep and don't do consistency checks on
 		 * the attr length.  Let userspace figure it out.... */
 		hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
 		attrlen += res->acl_data_offset;
-		recvd = req->rq_rcv_buf.len - hdrlen;
-		if (attrlen > recvd) {
+		if (attrlen > page_len) {
 			if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
 				/* getxattr interface called with a NULL buf */
 				res->acl_len = attrlen;
 				goto out;
 			}
-			dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
-					attrlen, recvd);
+			dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
+					attrlen, page_len);
 			return -EINVAL;
 		}
 		xdr_read_pages(xdr, attrlen);
@@ -5089,16 +5092,13 @@
 	return -EINVAL;
 }
 
-static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
+static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
 {
 	struct nfs4_secinfo_flavor *sec_flavor;
 	int status;
 	__be32 *p;
 	int i, num_flavors;
 
-	status = decode_op_hdr(xdr, OP_SECINFO);
-	if (status)
-		goto out;
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(!p))
 		goto out_overflow;
@@ -5124,6 +5124,7 @@
 		res->flavors->num_flavors++;
 	}
 
+	status = 0;
 out:
 	return status;
 out_overflow:
@@ -5131,7 +5132,23 @@
 	return -EIO;
 }
 
+static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
+{
+	int status = decode_op_hdr(xdr, OP_SECINFO);
+	if (status)
+		return status;
+	return decode_secinfo_common(xdr, res);
+}
+
 #if defined(CONFIG_NFS_V4_1)
+static int decode_secinfo_no_name(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
+{
+	int status = decode_op_hdr(xdr, OP_SECINFO_NO_NAME);
+	if (status)
+		return status;
+	return decode_secinfo_common(xdr, res);
+}
+
 static int decode_exchange_id(struct xdr_stream *xdr,
 			      struct nfs41_exchange_id_res *res)
 {
@@ -6816,7 +6833,7 @@
 	status = decode_putrootfh(xdr);
 	if (status)
 		goto out;
-	status = decode_secinfo(xdr, res);
+	status = decode_secinfo_no_name(xdr, res);
 out:
 	return status;
 }
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 8d45f1c..595c5fc 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -604,7 +604,6 @@
 {
 	struct objlayout_deviceinfo *odi;
 	struct pnfs_device pd;
-	struct super_block *sb;
 	struct page *page, **pages;
 	u32 *p;
 	int err;
@@ -623,7 +622,6 @@
 	pd.pglen = PAGE_SIZE;
 	pd.mincount = 0;
 
-	sb = pnfslay->plh_inode->i_sb;
 	err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd);
 	dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err);
 	if (err)
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index b5d4515..38512bc 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -587,7 +587,7 @@
 
 	/* allocate pages for xdr post processing */
 	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
-	max_pages = max_resp_sz >> PAGE_SHIFT;
+	max_pages = nfs_page_array_len(0, max_resp_sz);
 
 	pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
 	if (!pages)
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b63b6f4..d6408b6 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -335,8 +335,7 @@
 {
 	struct nfs_removeargs arg = {
 		.fh = NFS_FH(dir),
-		.name.len = name->len,
-		.name.name = name->name,
+		.name = *name,
 	};
 	struct rpc_message msg = { 
 		.rpc_proc = &nfs_procedures[NFSPROC_REMOVE],
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 9a0e8ef4..0a4be28 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -322,7 +322,7 @@
 	while (!list_empty(res)) {
 		data = list_entry(res->next, struct nfs_read_data, list);
 		list_del(&data->list);
-		nfs_readdata_free(data);
+		nfs_readdata_release(data);
 	}
 	nfs_readpage_release(req);
 	return -ENOMEM;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 37412f7..4ac7fca 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2428,7 +2428,7 @@
 	dprintk("--> nfs_xdev_mount()\n");
 
 	/* create a new volume representation */
-	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr);
+	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
 	if (IS_ERR(server)) {
 		error = PTR_ERR(server);
 		goto out_err_noserver;
@@ -2767,11 +2767,15 @@
 	char *root_devname;
 	size_t len;
 
-	len = strlen(hostname) + 3;
+	len = strlen(hostname) + 5;
 	root_devname = kmalloc(len, GFP_KERNEL);
 	if (root_devname == NULL)
 		return ERR_PTR(-ENOMEM);
-	snprintf(root_devname, len, "%s:/", hostname);
+	/* Does hostname needs to be enclosed in brackets? */
+	if (strchr(hostname, ':'))
+		snprintf(root_devname, len, "[%s]:/", hostname);
+	else
+		snprintf(root_devname, len, "%s:/", hostname);
 	root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
 	kfree(root_devname);
 	return root_mnt;
@@ -2951,7 +2955,7 @@
 	dprintk("--> nfs4_xdev_mount()\n");
 
 	/* create a new volume representation */
-	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr);
+	server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
 	if (IS_ERR(server)) {
 		error = PTR_ERR(server);
 		goto out_err_noserver;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 2c68818..c074623 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -682,7 +682,8 @@
 		req->wb_bytes = rqend - req->wb_offset;
 out_unlock:
 	spin_unlock(&inode->i_lock);
-	nfs_clear_request_commit(req);
+	if (req)
+		nfs_clear_request_commit(req);
 	return req;
 out_flushme:
 	spin_unlock(&inode->i_lock);
@@ -1018,7 +1019,7 @@
 	while (!list_empty(res)) {
 		data = list_entry(res->next, struct nfs_write_data, list);
 		list_del(&data->list);
-		nfs_writedata_free(data);
+		nfs_writedata_release(data);
 	}
 	nfs_redirty_request(req);
 	return -ENOMEM;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 4767429..ed3f920 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -577,7 +577,7 @@
 	struct cld_net *cn = nn->cld_net;
 
 	if (mlen != sizeof(*cmsg)) {
-		dprintk("%s: got %lu bytes, expected %lu\n", __func__, mlen,
+		dprintk("%s: got %zu bytes, expected %zu\n", __func__, mlen,
 			sizeof(*cmsg));
 		return -EINVAL;
 	}
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index fce2bbe..0bb2c20 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -441,7 +441,7 @@
 {
 	unsigned long ino;
 	struct inode *inode;
-	struct qstr dotdot = {.name = "..", .len = 2};
+	struct qstr dotdot = QSTR_INIT("..", 2);
 	struct nilfs_root *root;
 
 	ino = nilfs_inode_by_name(child->d_inode, &dotdot);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 044e7b5..1bfe880 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -2005,7 +2005,7 @@
 	o2net_listen_sock = sock;
 	INIT_WORK(&o2net_listen_work, o2net_accept_many);
 
-	sock->sk->sk_reuse = 1;
+	sock->sk->sk_reuse = SK_CAN_REUSE;
 	ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
 	if (ret < 0) {
 		printk(KERN_ERR "o2net: Error %d while binding socket at "
diff --git a/fs/pipe.c b/fs/pipe.c
index 25feaa3..fec5e4a 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -346,6 +346,16 @@
 	.get = generic_pipe_buf_get,
 };
 
+static const struct pipe_buf_operations packet_pipe_buf_ops = {
+	.can_merge = 0,
+	.map = generic_pipe_buf_map,
+	.unmap = generic_pipe_buf_unmap,
+	.confirm = generic_pipe_buf_confirm,
+	.release = anon_pipe_buf_release,
+	.steal = generic_pipe_buf_steal,
+	.get = generic_pipe_buf_get,
+};
+
 static ssize_t
 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
 	   unsigned long nr_segs, loff_t pos)
@@ -407,6 +417,13 @@
 			ret += chars;
 			buf->offset += chars;
 			buf->len -= chars;
+
+			/* Was it a packet buffer? Clean up and exit */
+			if (buf->flags & PIPE_BUF_FLAG_PACKET) {
+				total_len = chars;
+				buf->len = 0;
+			}
+
 			if (!buf->len) {
 				buf->ops = NULL;
 				ops->release(pipe, buf);
@@ -459,6 +476,11 @@
 	return ret;
 }
 
+static inline int is_packetized(struct file *file)
+{
+	return (file->f_flags & O_DIRECT) != 0;
+}
+
 static ssize_t
 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
 	    unsigned long nr_segs, loff_t ppos)
@@ -593,6 +615,11 @@
 			buf->ops = &anon_pipe_buf_ops;
 			buf->offset = 0;
 			buf->len = chars;
+			buf->flags = 0;
+			if (is_packetized(filp)) {
+				buf->ops = &packet_pipe_buf_ops;
+				buf->flags = PIPE_BUF_FLAG_PACKET;
+			}
 			pipe->nrbufs = ++bufs;
 			pipe->tmp_page = NULL;
 
@@ -1013,7 +1040,7 @@
 		goto err_dentry;
 	f->f_mapping = inode->i_mapping;
 
-	f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
+	f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
 	f->f_version = 0;
 
 	return f;
@@ -1057,7 +1084,7 @@
 	int error;
 	int fdw, fdr;
 
-	if (flags & ~(O_CLOEXEC | O_NONBLOCK))
+	if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
 		return -EINVAL;
 
 	fw = create_write_pipe(flags);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 1c8b280..57b8159 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1799,10 +1799,15 @@
 	if (task) {
 		files = get_files_struct(task);
 		if (files) {
+			struct file *file;
 			rcu_read_lock();
-			if (fcheck_files(files, fd)) {
+			file = fcheck_files(files, fd);
+			if (file) {
+				unsigned i_mode, f_mode = file->f_mode;
+
 				rcu_read_unlock();
 				put_files_struct(files);
+
 				if (task_dumpable(task)) {
 					rcu_read_lock();
 					cred = __task_cred(task);
@@ -1813,7 +1818,14 @@
 					inode->i_uid = 0;
 					inode->i_gid = 0;
 				}
-				inode->i_mode &= ~(S_ISUID | S_ISGID);
+
+				i_mode = S_IFLNK;
+				if (f_mode & FMODE_READ)
+					i_mode |= S_IRUSR | S_IXUSR;
+				if (f_mode & FMODE_WRITE)
+					i_mode |= S_IWUSR | S_IXUSR;
+				inode->i_mode = i_mode;
+
 				security_task_to_inode(task, inode);
 				put_task_struct(task);
 				return 1;
@@ -1837,8 +1849,6 @@
 	struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
 	unsigned fd = *(const unsigned *)ptr;
-	struct file *file;
-	struct files_struct *files;
  	struct inode *inode;
  	struct proc_inode *ei;
 	struct dentry *error = ERR_PTR(-ENOENT);
@@ -1848,25 +1858,6 @@
 		goto out;
 	ei = PROC_I(inode);
 	ei->fd = fd;
-	files = get_files_struct(task);
-	if (!files)
-		goto out_iput;
-	inode->i_mode = S_IFLNK;
-
-	/*
-	 * We are not taking a ref to the file structure, so we must
-	 * hold ->file_lock.
-	 */
-	spin_lock(&files->file_lock);
-	file = fcheck_files(files, fd);
-	if (!file)
-		goto out_unlock;
-	if (file->f_mode & FMODE_READ)
-		inode->i_mode |= S_IRUSR | S_IXUSR;
-	if (file->f_mode & FMODE_WRITE)
-		inode->i_mode |= S_IWUSR | S_IXUSR;
-	spin_unlock(&files->file_lock);
-	put_files_struct(files);
 
 	inode->i_op = &proc_pid_link_inode_operations;
 	inode->i_size = 64;
@@ -1879,12 +1870,6 @@
 
  out:
 	return error;
-out_unlock:
-	spin_unlock(&files->file_lock);
-	put_files_struct(files);
-out_iput:
-	iput(inode);
-	goto out;
 }
 
 static struct dentry *proc_lookupfd_common(struct inode *dir,
@@ -2177,16 +2162,16 @@
 		goto out;
 
 	result = ERR_PTR(-EACCES);
-	if (lock_trace(task))
+	if (!ptrace_may_access(task, PTRACE_MODE_READ))
 		goto out_put_task;
 
 	result = ERR_PTR(-ENOENT);
 	if (dname_to_vma_addr(dentry, &vm_start, &vm_end))
-		goto out_unlock;
+		goto out_put_task;
 
 	mm = get_task_mm(task);
 	if (!mm)
-		goto out_unlock;
+		goto out_put_task;
 
 	down_read(&mm->mmap_sem);
 	vma = find_exact_vma(mm, vm_start, vm_end);
@@ -2198,8 +2183,6 @@
 out_no_vma:
 	up_read(&mm->mmap_sem);
 	mmput(mm);
-out_unlock:
-	unlock_trace(task);
 out_put_task:
 	put_task_struct(task);
 out:
@@ -2233,7 +2216,7 @@
 		goto out;
 
 	ret = -EACCES;
-	if (lock_trace(task))
+	if (!ptrace_may_access(task, PTRACE_MODE_READ))
 		goto out_put_task;
 
 	ret = 0;
@@ -2241,12 +2224,12 @@
 	case 0:
 		ino = inode->i_ino;
 		if (filldir(dirent, ".", 1, 0, ino, DT_DIR) < 0)
-			goto out_unlock;
+			goto out_put_task;
 		filp->f_pos++;
 	case 1:
 		ino = parent_ino(dentry);
 		if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
-			goto out_unlock;
+			goto out_put_task;
 		filp->f_pos++;
 	default:
 	{
@@ -2257,7 +2240,7 @@
 
 		mm = get_task_mm(task);
 		if (!mm)
-			goto out_unlock;
+			goto out_put_task;
 		down_read(&mm->mmap_sem);
 
 		nr_files = 0;
@@ -2287,7 +2270,7 @@
 					flex_array_free(fa);
 				up_read(&mm->mmap_sem);
 				mmput(mm);
-				goto out_unlock;
+				goto out_put_task;
 			}
 			for (i = 0, vma = mm->mmap, pos = 2; vma;
 					vma = vma->vm_next) {
@@ -2332,8 +2315,6 @@
 	}
 	}
 
-out_unlock:
-	unlock_trace(task);
 out_put_task:
 	put_task_struct(task);
 out:
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2b9a760..1030a71 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -597,9 +597,6 @@
 		if (!page)
 			continue;
 
-		if (PageReserved(page))
-			continue;
-
 		/* Clear accessed and referenced bits. */
 		ptep_test_and_clear_young(vma, addr, pte);
 		ClearPageReferenced(page);
@@ -750,6 +747,8 @@
 	else if (pte_present(pte))
 		*pme = make_pme(PM_PFRAME(pte_pfn(pte))
 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
+	else
+		*pme = make_pme(PM_NOT_PRESENT);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -764,6 +763,8 @@
 	if (pmd_present(pmd))
 		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
+	else
+		*pme = make_pme(PM_NOT_PRESENT);
 }
 #else
 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
@@ -804,8 +805,10 @@
 
 		/* check to see if we've left 'vma' behind
 		 * and need a new, higher one */
-		if (vma && (addr >= vma->vm_end))
+		if (vma && (addr >= vma->vm_end)) {
 			vma = find_vma(walk->mm, addr);
+			pme = make_pme(PM_NOT_PRESENT);
+		}
 
 		/* check that 'vma' actually covers this address,
 		 * and that it isn't a huge page vma */
@@ -833,6 +836,8 @@
 	if (pte_present(pte))
 		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
 				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
+	else
+		*pme = make_pme(PM_NOT_PRESENT);
 }
 
 /* This function walks within one hugetlb entry in the single call */
@@ -842,7 +847,7 @@
 {
 	struct pagemapread *pm = walk->private;
 	int err = 0;
-	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
+	pagemap_entry_t pme;
 
 	for (; addr != end; addr += PAGE_SIZE) {
 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
diff --git a/fs/stat.c b/fs/stat.c
index c733dc5..0cef336 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -57,12 +57,13 @@
 
 int vfs_fstat(unsigned int fd, struct kstat *stat)
 {
-	struct file *f = fget(fd);
+	int fput_needed;
+	struct file *f = fget_light(fd, &fput_needed);
 	int error = -EBADF;
 
 	if (f) {
 		error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
-		fput(f);
+		fput_light(f, fput_needed);
 	}
 	return error;
 }
@@ -190,24 +191,32 @@
 
 #endif /* __ARCH_WANT_OLD_STAT */
 
+#if BITS_PER_LONG == 32
+#  define choose_32_64(a,b) a
+#else
+#  define choose_32_64(a,b) b
+#endif
+
+#define valid_dev(x)  choose_32_64(old_valid_dev,new_valid_dev)(x)
+#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
+
+#ifndef INIT_STRUCT_STAT_PADDING
+#  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
+#endif
+
 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
 {
 	struct stat tmp;
 
-#if BITS_PER_LONG == 32
-	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
+	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
 		return -EOVERFLOW;
-#else
-	if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
+#if BITS_PER_LONG == 32
+	if (stat->size > MAX_NON_LFS)
 		return -EOVERFLOW;
 #endif
 
-	memset(&tmp, 0, sizeof(tmp));
-#if BITS_PER_LONG == 32
-	tmp.st_dev = old_encode_dev(stat->dev);
-#else
-	tmp.st_dev = new_encode_dev(stat->dev);
-#endif
+	INIT_STRUCT_STAT_PADDING(tmp);
+	tmp.st_dev = encode_dev(stat->dev);
 	tmp.st_ino = stat->ino;
 	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
 		return -EOVERFLOW;
@@ -217,15 +226,7 @@
 		return -EOVERFLOW;
 	SET_UID(tmp.st_uid, stat->uid);
 	SET_GID(tmp.st_gid, stat->gid);
-#if BITS_PER_LONG == 32
-	tmp.st_rdev = old_encode_dev(stat->rdev);
-#else
-	tmp.st_rdev = new_encode_dev(stat->rdev);
-#endif
-#if BITS_PER_LONG == 32
-	if (stat->size > MAX_NON_LFS)
-		return -EOVERFLOW;
-#endif	
+	tmp.st_rdev = encode_dev(stat->rdev);
 	tmp.st_size = stat->size;
 	tmp.st_atime = stat->atime.tv_sec;
 	tmp.st_mtime = stat->mtime.tv_sec;
@@ -327,11 +328,15 @@
 /* ---------- LFS-64 ----------- */
 #ifdef __ARCH_WANT_STAT64
 
+#ifndef INIT_STRUCT_STAT64_PADDING
+#  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
+#endif
+
 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
 {
 	struct stat64 tmp;
 
-	memset(&tmp, 0, sizeof(struct stat64));
+	INIT_STRUCT_STAT64_PADDING(tmp);
 #ifdef CONFIG_MIPS
 	/* mips has weird padding, so we don't get 64 bits there */
 	if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 16ad84d..abd5133 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -2361,7 +2361,7 @@
 			 * by passing 'ubifs_tnc_remove_nm()' the same key but
 			 * an unmatchable name.
 			 */
-			struct qstr noname = { .len = 0, .name = "" };
+			struct qstr noname = { .name = "" };
 
 			err = dbg_check_tnc(c, 0);
 			mutex_unlock(&c->tnc_mutex);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 85b2722..7a8bafa 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -298,7 +298,7 @@
 {
 	struct inode *inode, *host = dentry->d_inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
-	struct qstr nm = { .name = name, .len = strlen(name) };
+	struct qstr nm = QSTR_INIT(name, strlen(name));
 	struct ubifs_dent_node *xent;
 	union ubifs_key key;
 	int err, type;
@@ -361,7 +361,7 @@
 {
 	struct inode *inode, *host = dentry->d_inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
-	struct qstr nm = { .name = name, .len = strlen(name) };
+	struct qstr nm = QSTR_INIT(name, strlen(name));
 	struct ubifs_inode *ui;
 	struct ubifs_dent_node *xent;
 	union ubifs_key key;
@@ -524,7 +524,7 @@
 {
 	struct inode *inode, *host = dentry->d_inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
-	struct qstr nm = { .name = name, .len = strlen(name) };
+	struct qstr nm = QSTR_INIT(name, strlen(name));
 	struct ubifs_dent_node *xent;
 	union ubifs_key key;
 	int err;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 38de8f2..a165c66 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -1193,7 +1193,7 @@
 {
 	struct kernel_lb_addr tloc;
 	struct inode *inode = NULL;
-	struct qstr dotdot = {.name = "..", .len = 2};
+	struct qstr dotdot = QSTR_INIT("..", 2);
 	struct fileIdentDesc cfi;
 	struct udf_fileident_bh fibh;
 
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index ac8e279..302f340 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -146,10 +146,7 @@
 
 static struct dentry *ufs_get_parent(struct dentry *child)
 {
-	struct qstr dot_dot = {
-		.name	= "..",
-		.len	= 2,
-	};
+	struct qstr dot_dot = QSTR_INIT("..", 2);
 	ino_t ino;
 
 	ino = ufs_inode_by_name(child->d_inode, &dot_dot);
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index eba6604..e8bcc47 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -499,9 +499,10 @@
 #define ACPI_STATE_D0                   (u8) 0
 #define ACPI_STATE_D1                   (u8) 1
 #define ACPI_STATE_D2                   (u8) 2
-#define ACPI_STATE_D3                   (u8) 3
-#define ACPI_STATE_D3_COLD              (u8) 4
-#define ACPI_D_STATES_MAX               ACPI_STATE_D3_COLD
+#define ACPI_STATE_D3_HOT               (u8) 3
+#define ACPI_STATE_D3                   (u8) 4
+#define ACPI_STATE_D3_COLD              ACPI_STATE_D3
+#define ACPI_D_STATES_MAX               ACPI_STATE_D3
 #define ACPI_D_STATE_COUNT              5
 
 #define ACPI_STATE_C0                   (u8) 0
diff --git a/include/asm-generic/pci-bridge.h b/include/asm-generic/pci-bridge.h
index a5b5d5a..20db2e5 100644
--- a/include/asm-generic/pci-bridge.h
+++ b/include/asm-generic/pci-bridge.h
@@ -30,6 +30,12 @@
 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,
 	/* ... except for domain 0 */
 	PCI_COMPAT_DOMAIN_0	= 0x00000020,
+
+	/* PCIe downstream ports are bridges that normally lead to only a
+	 * device 0, but if this is set, we scan all possible devices, not
+	 * just device 0.
+	 */
+	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,
 };
 
 #ifdef CONFIG_PCI
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 0dd4e87..5e5e386 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -35,6 +35,14 @@
 #define __ARCH_SI_BAND_T long
 #endif
 
+#ifndef __ARCH_SI_CLOCK_T
+#define __ARCH_SI_CLOCK_T __kernel_clock_t
+#endif
+
+#ifndef __ARCH_SI_ATTRIBUTES
+#define __ARCH_SI_ATTRIBUTES
+#endif
+
 #ifndef HAVE_ARCH_SIGINFO_T
 
 typedef struct siginfo {
@@ -72,8 +80,8 @@
 			__kernel_pid_t _pid;	/* which child */
 			__ARCH_SI_UID_T _uid;	/* sender's uid */
 			int _status;		/* exit code */
-			__kernel_clock_t _utime;
-			__kernel_clock_t _stime;
+			__ARCH_SI_CLOCK_T _utime;
+			__ARCH_SI_CLOCK_T _stime;
 		} _sigchld;
 
 		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
@@ -91,7 +99,7 @@
 			int _fd;
 		} _sigpoll;
 	} _sifields;
-} siginfo_t;
+} __ARCH_SI_ATTRIBUTES siginfo_t;
 
 #endif
 
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
index 0fd28e0..c749af9 100644
--- a/include/asm-generic/statfs.h
+++ b/include/asm-generic/statfs.h
@@ -15,7 +15,7 @@
  * with a 10' pole.
  */
 #ifndef __statfs_word
-#if BITS_PER_LONG == 64
+#if __BITS_PER_LONG == 64
 #define __statfs_word long
 #else
 #define __statfs_word __u32
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 3c9b616..b5d568f 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -167,7 +167,6 @@
 header-y += if_bonding.h
 header-y += if_bridge.h
 header-y += if_cablemodem.h
-header-y += if_ec.h
 header-y += if_eql.h
 header-y += if_ether.h
 header-y += if_fc.h
@@ -186,7 +185,6 @@
 header-y += if_slip.h
 header-y += if_strip.h
 header-y += if_team.h
-header-y += if_tr.h
 header-y += if_tun.h
 header-y += if_tunnel.h
 header-y += if_vlan.h
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 8d54f79..d364171 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -63,6 +63,14 @@
 void amba_device_put(struct amba_device *);
 int amba_device_add(struct amba_device *, struct resource *);
 int amba_device_register(struct amba_device *, struct resource *);
+struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
+					resource_size_t base, size_t size,
+					int irq1, int irq2, void *pdata,
+					unsigned int periphid);
+struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
+					resource_size_t base, size_t size,
+					int irq1, int irq2, void *pdata,
+					unsigned int periphid);
 void amba_device_unregister(struct amba_device *);
 struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
 int amba_request_regions(struct amba_device *, const char *);
diff --git a/include/linux/atmlec.h b/include/linux/atmlec.h
index 39c917f..302791e 100644
--- a/include/linux/atmlec.h
+++ b/include/linux/atmlec.h
@@ -21,13 +21,6 @@
 /* Maximum number of LEC interfaces (tweakable) */
 #define MAX_LEC_ITF 48
 
-/*
- * From the total of MAX_LEC_ITF, last NUM_TR_DEVS are reserved for Token Ring.
- * E.g. if MAX_LEC_ITF = 48 and NUM_TR_DEVS = 8, then lec0-lec39 are for
- * Ethernet ELANs and lec40-lec47 are for Token Ring ELANS.
- */
-#define NUM_TR_DEVS 8
-
 typedef enum {
 	l_set_mac_addr,
 	l_del_mac_addr,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2aa2466..4d4ac24 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,9 +1,10 @@
 #ifndef _LINUX_BLKDEV_H
 #define _LINUX_BLKDEV_H
 
+#include <linux/sched.h>
+
 #ifdef CONFIG_BLOCK
 
-#include <linux/sched.h>
 #include <linux/major.h>
 #include <linux/genhd.h>
 #include <linux/list.h>
diff --git a/include/linux/clk.h b/include/linux/clk.h
index b025272..70cf722 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -101,6 +101,26 @@
 struct clk *clk_get(struct device *dev, const char *id);
 
 /**
+ * devm_clk_get - lookup and obtain a managed reference to a clock producer.
+ * @dev: device for clock "consumer"
+ * @id: clock comsumer ID
+ *
+ * Returns a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno.  The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer.  (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * devm_clk_get should not be called from within interrupt context.
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
+ */
+struct clk *devm_clk_get(struct device *dev, const char *id);
+
+/**
  * clk_prepare - prepare a clock source
  * @clk: clock source
  *
@@ -206,6 +226,18 @@
  */
 void clk_put(struct clk *clk);
 
+/**
+ * devm_clk_put	- "free" a managed clock source
+ * @dev: device used to acuqire the clock
+ * @clk: clock source acquired with devm_clk_get()
+ *
+ * Note: drivers must ensure that all clk_enable calls made on this
+ * clock source are balanced by clk_disable calls prior to calling
+ * this function.
+ *
+ * clk_put should not be called from within interrupt context.
+ */
+void devm_clk_put(struct device *dev, struct clk *clk);
 
 /*
  * The remaining APIs are optional for machine class support.
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index d9a4fd0..a6a6f60 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -40,4 +40,7 @@
 void clkdev_add_table(struct clk_lookup *, size_t);
 int clk_add_alias(const char *, const char *, char *, struct device *);
 
+int clk_register_clkdev(struct clk *, const char *, const char *, ...);
+int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
+
 #endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7e11f14..094789f 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -25,6 +25,13 @@
 
 #define IS_ROOT(x) ((x) == (x)->d_parent)
 
+/* The hash is always the low bits of hash_len */
+#ifdef __LITTLE_ENDIAN
+ #define HASH_LEN_DECLARE u32 hash; u32 len;
+#else
+ #define HASH_LEN_DECLARE u32 len; u32 hash;
+#endif
+
 /*
  * "quick string" -- eases parameter passing, but more importantly
  * saves "metadata" about the string (ie length and the hash).
@@ -33,11 +40,19 @@
  * dentry.
  */
 struct qstr {
-	unsigned int hash;
-	unsigned int len;
+	union {
+		struct {
+			HASH_LEN_DECLARE;
+		};
+		u64 hash_len;
+	};
 	const unsigned char *name;
 };
 
+#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
+#define hashlen_hash(hashlen) ((u32) (hashlen))
+#define hashlen_len(hashlen)  ((u32)((hashlen) >> 32))
+
 struct dentry_stat_t {
 	int nr_dentry;
 	int nr_unused;
@@ -282,7 +297,7 @@
 extern struct dentry *__d_lookup(struct dentry *, struct qstr *);
 extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
 				const struct qstr *name,
-				unsigned *seq, struct inode **inode);
+				unsigned *seq, struct inode *inode);
 
 /**
  * __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 65a2562..6bb4338 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -67,6 +67,17 @@
 	__u8	reco_prio_tc[IEEE_8021QAZ_MAX_TCS];
 };
 
+/* This structure contains rate limit extension to the IEEE 802.1Qaz ETS
+ * managed object.
+ * Values are 64 bits long and specified in Kbps to enable usage over both
+ * slow and very fast networks.
+ *
+ * @tc_maxrate: maximal tc tx bandwidth indexed by traffic class
+ */
+struct ieee_maxrate {
+	__u64	tc_maxrate[IEEE_8021QAZ_MAX_TCS];
+};
+
 /* This structure contains the IEEE 802.1Qaz PFC managed object
  *
  * @pfc_cap: Indicates the number of traffic classes on the local device
@@ -321,6 +332,7 @@
 	DCB_ATTR_IEEE_PEER_ETS,
 	DCB_ATTR_IEEE_PEER_PFC,
 	DCB_ATTR_IEEE_PEER_APP,
+	DCB_ATTR_IEEE_MAXRATE,
 	__DCB_ATTR_IEEE_MAX
 };
 #define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index eaf95a0..d16294e 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -549,6 +549,8 @@
 	return NULL;
 }
 
+extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_DCCP_H */
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 731a609..b029d1a 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -114,91 +114,6 @@
 	};
 };
 
-#ifdef CONFIG_IRQ_REMAP
-extern int intr_remapping_enabled;
-extern int intr_remapping_supported(void);
-extern int enable_intr_remapping(void);
-extern void disable_intr_remapping(void);
-extern int reenable_intr_remapping(int);
-
-extern int get_irte(int irq, struct irte *entry);
-extern int modify_irte(int irq, struct irte *irte_modified);
-extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
-extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
-   			u16 sub_handle);
-extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
-extern int free_irte(int irq);
-
-extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
-extern struct intel_iommu *map_ioapic_to_ir(int apic);
-extern struct intel_iommu *map_hpet_to_ir(u8 id);
-extern int set_ioapic_sid(struct irte *irte, int apic);
-extern int set_hpet_sid(struct irte *irte, u8 id);
-extern int set_msi_sid(struct irte *irte, struct pci_dev *dev);
-#else
-static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
-{
-	return -1;
-}
-static inline int modify_irte(int irq, struct irte *irte_modified)
-{
-	return -1;
-}
-static inline int free_irte(int irq)
-{
-	return -1;
-}
-static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle)
-{
-	return -1;
-}
-static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
-			       u16 sub_handle)
-{
-	return -1;
-}
-static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
-{
-	return NULL;
-}
-static inline struct intel_iommu *map_ioapic_to_ir(int apic)
-{
-	return NULL;
-}
-static inline struct intel_iommu *map_hpet_to_ir(unsigned int hpet_id)
-{
-	return NULL;
-}
-static inline int set_ioapic_sid(struct irte *irte, int apic)
-{
-	return 0;
-}
-static inline int set_hpet_sid(struct irte *irte, u8 id)
-{
-	return -1;
-}
-static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
-{
-	return 0;
-}
-
-#define intr_remapping_enabled		(0)
-
-static inline int enable_intr_remapping(void)
-{
-	return -1;
-}
-
-static inline void disable_intr_remapping(void)
-{
-}
-
-static inline int reenable_intr_remapping(int eim)
-{
-	return 0;
-}
-#endif
-
 enum {
 	IRQ_REMAP_XAPIC_MODE,
 	IRQ_REMAP_X2APIC_MODE,
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 88ec806..ec45ccd 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -554,7 +554,18 @@
 #define EFI_VARIABLE_NON_VOLATILE       0x0000000000000001
 #define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
 #define EFI_VARIABLE_RUNTIME_ACCESS     0x0000000000000004
+#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008
+#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010
+#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
+#define EFI_VARIABLE_APPEND_WRITE	0x0000000000000040
 
+#define EFI_VARIABLE_MASK 	(EFI_VARIABLE_NON_VOLATILE | \
+				EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+				EFI_VARIABLE_RUNTIME_ACCESS | \
+				EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
+				EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \
+				EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \
+				EFI_VARIABLE_APPEND_WRITE)
 /*
  * The type of search to perform when calling boottime->locate_handle
  */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 8a18358..3d406e0 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -18,8 +18,6 @@
  *		as published by the Free Software Foundation; either version
  *		2 of the License, or (at your option) any later version.
  *
- *	WARNING: This move may well be temporary. This file will get merged with others RSN.
- *
  */
 #ifndef _LINUX_ETHERDEVICE_H
 #define _LINUX_ETHERDEVICE_H
@@ -59,7 +57,7 @@
  *
  * Return true if the address is all zeroes.
  */
-static inline int is_zero_ether_addr(const u8 *addr)
+static inline bool is_zero_ether_addr(const u8 *addr)
 {
 	return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
 }
@@ -71,7 +69,7 @@
  * Return true if the address is a multicast address.
  * By definition the broadcast address is also a multicast address.
  */
-static inline int is_multicast_ether_addr(const u8 *addr)
+static inline bool is_multicast_ether_addr(const u8 *addr)
 {
 	return 0x01 & addr[0];
 }
@@ -82,7 +80,7 @@
  *
  * Return true if the address is a local address.
  */
-static inline int is_local_ether_addr(const u8 *addr)
+static inline bool is_local_ether_addr(const u8 *addr)
 {
 	return 0x02 & addr[0];
 }
@@ -93,7 +91,7 @@
  *
  * Return true if the address is the broadcast address.
  */
-static inline int is_broadcast_ether_addr(const u8 *addr)
+static inline bool is_broadcast_ether_addr(const u8 *addr)
 {
 	return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff;
 }
@@ -104,7 +102,7 @@
  *
  * Return true if the address is a unicast address.
  */
-static inline int is_unicast_ether_addr(const u8 *addr)
+static inline bool is_unicast_ether_addr(const u8 *addr)
 {
 	return !is_multicast_ether_addr(addr);
 }
@@ -118,7 +116,7 @@
  *
  * Return true if the address is valid.
  */
-static inline int is_valid_ether_addr(const u8 *addr)
+static inline bool is_valid_ether_addr(const u8 *addr)
 {
 	/* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to
 	 * explicitly check for it here. */
@@ -159,7 +157,8 @@
  * @addr1: Pointer to a six-byte array containing the Ethernet address
  * @addr2: Pointer other six-byte array containing the Ethernet address
  *
- * Compare two ethernet addresses, returns 0 if equal
+ * Compare two Ethernet addresses, returns 0 if equal, non-zero otherwise.
+ * Unlike memcmp(), it doesn't return a value suitable for sorting.
  */
 static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
 {
@@ -170,6 +169,18 @@
 	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
 }
 
+/**
+ * ether_addr_equal - Compare two Ethernet addresses
+ * @addr1: Pointer to a six-byte array containing the Ethernet address
+ * @addr2: Pointer other six-byte array containing the Ethernet address
+ *
+ * Compare two Ethernet addresses, returns true if equal
+ */
+static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+	return !compare_ether_addr(addr1, addr2);
+}
+
 static inline unsigned long zap_last_2bytes(unsigned long value)
 {
 #ifdef __BIG_ENDIAN
@@ -180,34 +191,34 @@
 }
 
 /**
- * compare_ether_addr_64bits - Compare two Ethernet addresses
+ * ether_addr_equal_64bits - Compare two Ethernet addresses
  * @addr1: Pointer to an array of 8 bytes
  * @addr2: Pointer to an other array of 8 bytes
  *
- * Compare two ethernet addresses, returns 0 if equal.
- * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional
- * branches, and possibly long word memory accesses on CPU allowing cheap
- * unaligned memory reads.
- * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2}
+ * Compare two Ethernet addresses, returns true if equal, false otherwise.
  *
- * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.
+ * The function doesn't need any conditional branches and possibly uses
+ * word memory accesses on CPU allowing cheap unaligned memory reads.
+ * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 }
+ *
+ * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
  */
 
-static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
-						 const u8 addr2[6+2])
+static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
+					   const u8 addr2[6+2])
 {
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 	unsigned long fold = ((*(unsigned long *)addr1) ^
 			      (*(unsigned long *)addr2));
 
 	if (sizeof(fold) == 8)
-		return zap_last_2bytes(fold) != 0;
+		return zap_last_2bytes(fold) == 0;
 
 	fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^
 				(*(unsigned long *)(addr2 + 4)));
-	return fold != 0;
+	return fold == 0;
 #else
-	return compare_ether_addr(addr1, addr2);
+	return ether_addr_equal(addr1, addr2);
 #endif
 }
 
@@ -219,23 +230,23 @@
  * Compare passed address with all addresses of the device. Return true if the
  * address if one of the device addresses.
  *
- * Note that this function calls compare_ether_addr_64bits() so take care of
+ * Note that this function calls ether_addr_equal_64bits() so take care of
  * the right padding.
  */
 static inline bool is_etherdev_addr(const struct net_device *dev,
 				    const u8 addr[6 + 2])
 {
 	struct netdev_hw_addr *ha;
-	int res = 1;
+	bool res = false;
 
 	rcu_read_lock();
 	for_each_dev_addr(dev, ha) {
-		res = compare_ether_addr_64bits(addr, ha->addr);
-		if (!res)
+		res = ether_addr_equal_64bits(addr, ha->addr);
+		if (res)
 			break;
 	}
 	rcu_read_unlock();
-	return !res;
+	return res;
 }
 #endif	/* __KERNEL__ */
 
@@ -244,7 +255,7 @@
  * @a: Pointer to Ethernet header
  * @b: Pointer to Ethernet header
  *
- * Compare two ethernet headers, returns 0 if equal.
+ * Compare two Ethernet headers, returns 0 if equal.
  * This assumes that the network header (i.e., IP header) is 4-byte
  * aligned OR the platform can handle unaligned access.  This is the
  * case for all packets coming into netif_receive_skb or similar
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index f5647b5..e17fa71 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -137,6 +137,23 @@
 };
 
 /**
+ * struct ethtool_modinfo - plugin module eeprom information
+ * @cmd: %ETHTOOL_GMODULEINFO
+ * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
+ * @eeprom_len: Length of the eeprom
+ *
+ * This structure is used to return the information to
+ * properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
+ * The type code indicates the eeprom data format
+ */
+struct ethtool_modinfo {
+	__u32   cmd;
+	__u32   type;
+	__u32   eeprom_len;
+	__u32   reserved[8];
+};
+
+/**
  * struct ethtool_coalesce - coalescing parameters for IRQs and stats updates
  * @cmd: ETHTOOL_{G,S}COALESCE
  * @rx_coalesce_usecs: How many usecs to delay an RX interrupt after
@@ -661,12 +678,17 @@
  * 	%ETHTOOL_SET_DUMP
  * @version: FW version of the dump, filled in by driver
  * @flag: driver dependent flag for dump setting, filled in by driver during
- * 	  get and filled in by ethtool for set operation
+ *        get and filled in by ethtool for set operation.
+ *        flag must be initialized by macro ETH_FW_DUMP_DISABLE value when
+ *        firmware dump is disabled.
  * @len: length of dump data, used as the length of the user buffer on entry to
  * 	 %ETHTOOL_GET_DUMP_DATA and this is returned as dump length by driver
  * 	 for %ETHTOOL_GET_DUMP_FLAG command
  * @data: data collected for get dump data operation
  */
+
+#define ETH_FW_DUMP_DISABLE 0
+
 struct ethtool_dump {
 	__u32	cmd;
 	__u32	version;
@@ -726,6 +748,29 @@
 	struct ethtool_set_features_block features[0];
 };
 
+/**
+ * struct ethtool_ts_info - holds a device's timestamping and PHC association
+ * @cmd: command number = %ETHTOOL_GET_TS_INFO
+ * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
+ * @phc_index: device index of the associated PHC, or -1 if there is none
+ * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ *
+ * The bits in the 'tx_types' and 'rx_filters' fields correspond to
+ * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
+ * respectively.  For example, if the device supports HWTSTAMP_TX_ON,
+ * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ */
+struct ethtool_ts_info {
+	__u32	cmd;
+	__u32	so_timestamping;
+	__s32	phc_index;
+	__u32	tx_types;
+	__u32	tx_reserved[3];
+	__u32	rx_filters;
+	__u32	rx_reserved[3];
+};
+
 /*
  * %ETHTOOL_SFEATURES changes features present in features[].valid to the
  * values of corresponding bits in features[].requested. Bits in .requested
@@ -788,6 +833,7 @@
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
 
 /**
  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
@@ -893,6 +939,12 @@
  * 		   and flag of the device.
  * @get_dump_data: Get dump data.
  * @set_dump: Set dump specific flags to the device.
+ * @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
+ *	Drivers supporting transmit time stamps in software should set this to
+ *	ethtool_op_get_ts_info().
+ * @get_module_info: Get the size and type of the eeprom contained within
+ *	a plug-in module.
+ * @get_module_eeprom: Get the eeprom information from the plug-in module
  *
  * All operations are optional (i.e. the function pointer may be set
  * to %NULL) and callers must take this into account.  Callers must
@@ -954,6 +1006,12 @@
 	int	(*get_dump_data)(struct net_device *,
 				 struct ethtool_dump *, void *);
 	int	(*set_dump)(struct net_device *, struct ethtool_dump *);
+	int	(*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
+	int     (*get_module_info)(struct net_device *,
+				   struct ethtool_modinfo *);
+	int     (*get_module_eeprom)(struct net_device *,
+				     struct ethtool_eeprom *, u8 *);
+
 
 };
 #endif /* __KERNEL__ */
@@ -1028,6 +1086,9 @@
 #define ETHTOOL_SET_DUMP	0x0000003e /* Set dump settings */
 #define ETHTOOL_GET_DUMP_FLAG	0x0000003f /* Get dump settings */
 #define ETHTOOL_GET_DUMP_DATA	0x00000040 /* Get dump data */
+#define ETHTOOL_GET_TS_INFO	0x00000041 /* Get time stamping and PHC info */
+#define ETHTOOL_GMODULEINFO	0x00000042 /* Get plug-in module information */
+#define ETHTOOL_GMODULEEEPROM	0x00000043 /* Get plug-in module eeprom */
 
 /* compatibility with older code */
 #define SPARC_ETH_GSET		ETHTOOL_GSET
@@ -1177,6 +1238,12 @@
 #define RX_CLS_LOC_FIRST	0xfffffffe
 #define RX_CLS_LOC_LAST		0xfffffffd
 
+/* EEPROM Standards for plug in modules */
+#define ETH_MODULE_SFF_8079		0x1
+#define ETH_MODULE_SFF_8079_LEN		256
+#define ETH_MODULE_SFF_8472		0x2
+#define ETH_MODULE_SFF_8472_LEN		512
+
 /* Reset flags */
 /* The reset() operation must clear the flags for the components which
  * were actually reset.  On successful return, the flags indicate the
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 8eeb205..7209099 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -126,7 +126,8 @@
 #define SKF_AD_HATYPE	28
 #define SKF_AD_RXHASH	32
 #define SKF_AD_CPU	36
-#define SKF_AD_MAX	40
+#define SKF_AD_ALU_XOR_X	40
+#define SKF_AD_MAX	44
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
@@ -153,6 +154,9 @@
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
 extern unsigned int sk_run_filter(const struct sk_buff *skb,
 				  const struct sock_filter *filter);
+extern int sk_unattached_filter_create(struct sk_filter **pfp,
+				       struct sock_fprog *fprog);
+extern void sk_unattached_filter_destroy(struct sk_filter *fp);
 extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
@@ -228,6 +232,7 @@
 	BPF_S_ANC_HATYPE,
 	BPF_S_ANC_RXHASH,
 	BPF_S_ANC_CPU,
+	BPF_S_ANC_ALU_XOR_X,
 };
 
 #endif /* __KERNEL__ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8de6755..25c40b9 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2051,6 +2051,7 @@
 extern struct block_device *bdget(dev_t);
 extern struct block_device *bdgrab(struct block_device *bdev);
 extern void bd_set_size(struct block_device *, loff_t size);
+extern sector_t blkdev_max_block(struct block_device *bdev);
 extern void bd_forget(struct inode *inode);
 extern void bdput(struct block_device *);
 extern void invalidate_bdev(struct block_device *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5f3f3be..176a939 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -179,6 +179,7 @@
 	TRACE_EVENT_FL_RECORDED_CMD_BIT,
 	TRACE_EVENT_FL_CAP_ANY_BIT,
 	TRACE_EVENT_FL_NO_SET_FILTER_BIT,
+	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 };
 
 enum {
@@ -187,6 +188,7 @@
 	TRACE_EVENT_FL_RECORDED_CMD	= (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
 	TRACE_EVENT_FL_CAP_ANY		= (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
 	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
+	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 };
 
 struct ftrace_event_call {
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e61d319..017a7fb 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -222,12 +222,6 @@
 	}
 }
 
-static inline char *part_unpack_uuid(const u8 *uuid, char *out)
-{
-	sprintf(out, "%pU", uuid);
-	return out;
-}
-
 static inline int disk_max_parts(struct gendisk *disk)
 {
 	if (disk->flags & GENHD_FL_EXT_DEVT)
diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h
index 05071ee..d755b28 100644
--- a/include/linux/gpio-pxa.h
+++ b/include/linux/gpio-pxa.h
@@ -13,4 +13,8 @@
 
 extern int pxa_irq_to_gpio(int irq);
 
+struct pxa_gpio_platform_data {
+	int (*gpio_set_wake)(unsigned int gpio, unsigned int on);
+};
+
 #endif /* __GPIO_PXA_H */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5852545..6af8738 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -274,6 +274,33 @@
 	u32 bytes_avail_towrite;
 };
 
+
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static inline void
+hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
+			  u32 *read, u32 *write)
+{
+	u32 read_loc, write_loc, dsize;
+
+	smp_read_barrier_depends();
+
+	/* Capture the read/write indices before they changed */
+	read_loc = rbi->ring_buffer->read_index;
+	write_loc = rbi->ring_buffer->write_index;
+	dsize = rbi->ring_datasize;
+
+	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+		read_loc - write_loc;
+	*read = dsize - *write;
+}
+
+
 /*
  * We use the same version numbering for all Hyper-V modules.
  *
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 1f90de0..3993477 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -171,8 +171,6 @@
 TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
 TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
 
-#define TWL6025_SUBCLASS	BIT(4)  /* TWL6025 has changed registers */
-
 /*
  * Read and write single 8-bit registers
  */
@@ -746,6 +744,17 @@
 	void		*data;
 	unsigned long	features;
 };
+/* chip-specific feature flags, for twl_regulator_driver_data.features */
+#define TWL4030_VAUX2		BIT(0)	/* pre-5030 voltage ranges */
+#define TPS_SUBSET		BIT(1)	/* tps659[23]0 have fewer LDOs */
+#define TWL5031			BIT(2)  /* twl5031 has different registers */
+#define TWL6030_CLASS		BIT(3)	/* TWL6030 class */
+#define TWL6025_SUBCLASS	BIT(4)  /* TWL6025 has changed registers */
+#define TWL4030_ALLOW_UNSUPPORTED BIT(5) /* Some voltages are possible
+					  * but not officially supported.
+					  * This flag is necessary to
+					  * enable them.
+					  */
 
 /*----------------------------------------------------------------------*/
 
diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h
deleted file mode 100644
index 06695b7..0000000
--- a/include/linux/ibmtr.h
+++ /dev/null
@@ -1,373 +0,0 @@
-#ifndef __LINUX_IBMTR_H__
-#define __LINUX_IBMTR_H__
-
-/* Definitions for an IBM Token Ring card. */
-/* This file is distributed under the GNU GPL   */
-
-/* ported to the Alpha architecture 02/20/96 (just used the HZ macro) */
-
-#define TR_RETRY_INTERVAL	(30*HZ)	/* 500 on PC = 5 s */
-#define TR_RST_TIME		(msecs_to_jiffies(50))	/* 5 on PC = 50 ms */
-#define TR_BUSY_INTERVAL	(msecs_to_jiffies(200))	/* 5 on PC = 200 ms */
-#define TR_SPIN_INTERVAL	(3*HZ)	/* 3 seconds before init timeout */
-
-#define TR_ISA 1
-#define TR_MCA 2
-#define TR_ISAPNP 3
-#define NOTOK 0
-
-#define IBMTR_SHARED_RAM_SIZE 0x10000
-#define IBMTR_IO_EXTENT 4
-#define IBMTR_MAX_ADAPTERS 4
-
-#define CHANNEL_ID      0X1F30
-#define AIP             0X1F00
-#define AIPADAPTYPE     0X1FA0
-#define AIPDATARATE     0X1FA2
-#define AIPEARLYTOKEN   0X1FA4
-#define AIPAVAILSHRAM   0X1FA6
-#define AIPSHRAMPAGE    0X1FA8
-#define AIP4MBDHB       0X1FAA
-#define AIP16MBDHB      0X1FAC
-#define AIPFID		0X1FBA
-
-#define ADAPTRESET      0x1     /* Control Adapter reset (add to base) */
-#define ADAPTRESETREL   0x2     /* Release Adapter from reset ( """)  */
-#define ADAPTINTREL	0x3 	/* Adapter interrupt release */
-
-#define GLOBAL_INT_ENABLE 0x02f0
-
-/* MMIO bits 0-4 select register */
-#define RRR_EVEN       0x00 /* Shared RAM relocation registers - even and odd */
-/* Used to set the starting address of shared RAM  */
-/* Bits 1 through 7 of this register map to bits 13 through 19 of the shared
-   RAM address.*/
-/* ie: 0x02 sets RAM address to ...ato!  issy su wazzoo !! GODZILLA!!! */
-#define RRR_ODD         0x01
-/* Bits 2 and 3 of this register can be read to determine shared RAM size */
-/* 00 for 8k, 01 for 16k, 10 for 32k, 11 for 64k  */
-#define WRBR_EVEN       0x02    /* Write region base registers - even and odd */
-#define WRBR_ODD        0x03
-#define WWOR_EVEN       0x04    /* Write window open registers - even and odd */
-#define WWOR_ODD        0x05
-#define WWCR_EVEN       0x06   /* Write window close registers - even and odd */
-#define WWCR_ODD        0x07
-
-/* Interrupt status registers - PC system  - even and odd */
-#define ISRP_EVEN       0x08
-
-#define TCR_INT    0x10    /* Bit 4 - Timer interrupt.  The TVR_EVEN timer has
-                                                                   expired. */
-#define ERR_INT	   0x08    /* Bit 3 - Error interrupt.  The adapter has had an
-                                                            internal error. */
-#define ACCESS_INT 0x04    /* Bit 2 - Access interrupt.  You have attempted to
-				      write to an invalid area of shared RAM
-				      or an invalid register within the MMIO. */
-/* In addition, the following bits within ISRP_EVEN can be turned on or off   */
-/* by you to control the interrupt processing:   */
-#define INT_ENABLE 0x40 /* Bit 6 - Interrupt enable.  If 0, no interrupts will
-                                   occur.  If 1, interrupts will occur normally.
-                                                         Normally set to 1.  */
-/* Bit 0 - Primary or alternate adapter.  Set to zero if this adapter is the
-		primary adapter, 1 if this adapter is the alternate adapter. */
-
-
-#define ISRP_ODD        0x09
-
-#define ADAP_CHK_INT 0x40 /* Bit 6 - Adapter check.  the adapter has
-                             encountered a serious problem and has closed
-                             itself.  Whoa.  */
-#define SRB_RESP_INT 0x20 /* Bit 5 - SRB response.  The adapter has accepted
-                             an SRB request and set the return code within
-                             the SRB. */
-#define ASB_FREE_INT 0x10 /* Bit 4 - ASB free.  The adapter has read the ASB
-                             and this area can be safely reused. This interrupt
-                             is only used if your application has set the ASB
-                             free request bit in ISRA_ODD or if an error was
-                             detected in your response. */
-#define ARB_CMD_INT  0x08 /* Bit 3 - ARB command.  The adapter has given you a
-                             command for action.  The command is located in the
-                             ARB area of shared memory. */
-#define SSB_RESP_INT 0x04 /* Bit 2 - SSB response.  The adapter has posted a
-                             response to your SRB (the response is located in
-                             the SSB area of shared memory). */
-/* Bit 1 - Bridge frame forward complete. */
-
-
-
-#define ISRA_EVEN 0x0A /*Interrupt status registers - adapter  - even and odd */
-/* Bit 7 - Internal parity error (on adapter's internal bus) */
-/* Bit 6 - Timer interrupt pending */
-/* Bit 5 - Access interrupt (attempt by adapter to access illegal address) */
-/* Bit 4 - Adapter microcode problem (microcode dead-man timer expired) */
-/* Bit 3 - Adapter processor check status */
-/* Bit 2 - Reserved */
-/* Bit 1 - Adapter hardware interrupt mask (prevents internal interrupts) */
-/* Bit 0 - Adapter software interrupt mask (prevents internal software ints) */
-
-#define ISRA_ODD        0x0B
-#define CMD_IN_SRB  0x20 /* Bit 5  - Indicates that you have placed a new
-                           command in the SRB and are ready for the adapter to
-                           process the command. */
-#define RESP_IN_ASB 0x10 /* Bit 4 - Indicates that you have placed a response
-                            (an ASB) in the shared RAM which is available for
-                            the adapter's use. */
-/* Bit 3 - Indicates that you are ready to put an SRB in the shared RAM, but
-	that a previous command is still pending.  The adapter will then
-	interrupt you when the previous command is completed */
-/* Bit 2 - Indicates that you are ready to put an ASB in the shared RAM, but
-	that a previous ASB is still pending.  The adapter will then interrupt
-	you when the previous ASB is copied.  */
-#define ARB_FREE 0x2
-#define SSB_FREE 0x1
-
-#define TCR_EVEN        0x0C    /* Timer control registers - even and odd */
-#define TCR_ODD         0x0D
-#define TVR_EVEN        0x0E    /* Timer value registers - even and odd */
-#define TVR_ODD         0x0F
-#define SRPR_EVEN       0x18    /* Shared RAM paging registers - even and odd */
-#define SRPR_ENABLE_PAGING 0xc0
-#define SRPR_ODD        0x19	/* Not used. */
-#define TOKREAD         0x60
-#define TOKOR           0x40
-#define TOKAND          0x20
-#define TOKWRITE        0x00
-
-/* MMIO bits 5-6 select operation */
-/* 00 is used to write to a register */
-/* 01 is used to bitwise AND a byte with a register */
-/* 10 is used to bitwise OR a byte with a register  */
-/* 11 is used to read from a register */
-
-/* MMIO bits 7-8 select area of interest.. see below */
-/* 00 selects attachment control area. */
-/* 01 is reserved. */
-/* 10 selects adapter identification area A containing the adapter encoded
-	address. */
-/* 11 selects the adapter identification area B containing test patterns. */
-
-#define PCCHANNELID 5049434F3631313039393020
-#define MCCHANNELID 4D4152533633583435313820
-
-#define ACA_OFFSET 0x1e00
-#define ACA_SET 0x40
-#define ACA_RESET 0x20
-#define ACA_RW 0x00
-
-#ifdef ENABLE_PAGING
-#define SET_PAGE(x) (writeb((x), ti->mmio + ACA_OFFSET+ ACA_RW + SRPR_EVEN))
-#else
-#define SET_PAGE(x)
-#endif
-
-/* do_tok_int possible values */
-#define FIRST_INT 1
-#define NOT_FIRST 2
-
-typedef enum {	CLOSED,	OPEN } open_state;
-//staic const char *printstate[] = { "CLOSED","OPEN"};
-
-struct tok_info {
-	unsigned char irq;
-	void __iomem *mmio;
-	unsigned char hw_address[32];
-	unsigned char adapter_type;
-	unsigned char data_rate;
-	unsigned char token_release;
-	unsigned char avail_shared_ram;
-	unsigned char shared_ram_paging;
-        unsigned char turbo;
-	unsigned short dhb_size4mb;
-	unsigned short rbuf_len4;
-	unsigned short rbuf_cnt4;
-	unsigned short maxmtu4;
-	unsigned short dhb_size16mb;
-	unsigned short rbuf_len16;
-	unsigned short rbuf_cnt16;
-	unsigned short maxmtu16;
-	/* Additions by David Morris       */
-	unsigned char do_tok_int;
-	wait_queue_head_t wait_for_reset;
-	unsigned char sram_base;
-	/* Additions by Peter De Schrijver */
-	unsigned char page_mask;          /* mask to select RAM page to Map*/
-	unsigned char mapped_ram_size;    /* size of RAM page */
-	__u32 sram_phys;          /* Shared memory base address */
-	void __iomem *sram_virt;          /* Shared memory base address */
-	void __iomem *init_srb;   /* Initial System Request Block address */
-	void __iomem *srb;                /* System Request Block address */
-	void __iomem *ssb;                /* System Status Block address */
-	void __iomem *arb;                /* Adapter Request Block address */
-	void __iomem *asb;                /* Adapter Status Block address */
-        __u8  init_srb_page;
-        __u8  srb_page;
-        __u8  ssb_page;
-        __u8  arb_page;
-        __u8  asb_page;
-	unsigned short exsap_station_id;
-	unsigned short global_int_enable;
-	struct sk_buff *current_skb;
-
-	unsigned char auto_speedsave;
-	open_state			open_status, sap_status;
-	enum {MANUAL, AUTOMATIC}	open_mode;
-	enum {FAIL, RESTART, REOPEN}	open_action;
-	enum {NO, YES}			open_failure;
-	unsigned char readlog_pending;
-	unsigned short adapter_int_enable; /* Adapter-specific int enable */
-        struct timer_list tr_timer;
-	unsigned char ring_speed;
-	spinlock_t lock;		/* SMP protection */
-};
-
-/* token ring adapter commands */
-#define DIR_INTERRUPT 		0x00 /* struct srb_interrupt */
-#define DIR_MOD_OPEN_PARAMS 	0x01
-#define DIR_OPEN_ADAPTER 	0x03 /* struct dir_open_adapter */
-#define DIR_CLOSE_ADAPTER   	0x04
-#define DIR_SET_GRP_ADDR    	0x06
-#define DIR_SET_FUNC_ADDR   	0x07 /* struct srb_set_funct_addr */
-#define DIR_READ_LOG 		0x08 /* struct srb_read_log */
-#define DLC_OPEN_SAP 		0x15 /* struct dlc_open_sap */
-#define DLC_CLOSE_SAP       	0x16
-#define DATA_LOST 		0x20 /* struct asb_rec */
-#define REC_DATA 		0x81 /* struct arb_rec_req */
-#define XMIT_DATA_REQ 		0x82 /* struct arb_xmit_req */
-#define DLC_STATUS 		0x83 /* struct arb_dlc_status */
-#define RING_STAT_CHANGE    	0x84 /* struct dlc_open_sap ??? */
-
-/* DIR_OPEN_ADAPTER options */
-#define OPEN_PASS_BCON_MAC 0x0100
-#define NUM_RCV_BUF 2
-#define RCV_BUF_LEN 1024
-#define DHB_LENGTH 2048
-#define NUM_DHB 2
-#define DLC_MAX_SAP 2
-#define DLC_MAX_STA 1
-
-/* DLC_OPEN_SAP options */
-#define MAX_I_FIELD 0x0088
-#define SAP_OPEN_IND_SAP 0x04
-#define SAP_OPEN_PRIORITY 0x20
-#define SAP_OPEN_STATION_CNT 0x1
-#define XMIT_DIR_FRAME 0x0A
-#define XMIT_UI_FRAME  0x0d
-#define XMIT_XID_CMD   0x0e
-#define XMIT_TEST_CMD  0x11
-
-/* srb close return code */
-#define SIGNAL_LOSS  0x8000
-#define HARD_ERROR   0x4000
-#define XMIT_BEACON  0x1000
-#define LOBE_FAULT   0x0800
-#define AUTO_REMOVAL 0x0400
-#define REMOVE_RECV  0x0100
-#define LOG_OVERFLOW 0x0080
-#define RING_RECOVER 0x0020
-
-struct srb_init_response {
-	unsigned char command;
-	unsigned char init_status;
-	unsigned char init_status_2;
-	unsigned char reserved[3];
-	__u16 bring_up_code;
-	__u16 encoded_address;
-	__u16 level_address;
-	__u16 adapter_address;
-	__u16 parms_address;
-	__u16 mac_address;
-};
-
-struct dir_open_adapter {
-	unsigned char command;
-	char reserved[7];
-	__u16 open_options;
-	unsigned char node_address[6];
-	unsigned char group_address[4];
-	unsigned char funct_address[4];
-	__u16 num_rcv_buf;
-	__u16 rcv_buf_len;
-	__u16 dhb_length;
-	unsigned char num_dhb;
-	char reserved2;
-	unsigned char dlc_max_sap;
-	unsigned char dlc_max_sta;
-	unsigned char dlc_max_gsap;
-	unsigned char dlc_max_gmem;
-	unsigned char dlc_t1_tick_1;
-	unsigned char dlc_t2_tick_1;
-	unsigned char dlc_ti_tick_1;
-	unsigned char dlc_t1_tick_2;
-	unsigned char dlc_t2_tick_2;
-	unsigned char dlc_ti_tick_2;
-	unsigned char product_id[18];
-};
-
-struct dlc_open_sap {
-	unsigned char command;
-	unsigned char reserved1;
-	unsigned char ret_code;
-	unsigned char reserved2;
-	__u16 station_id;
-	unsigned char timer_t1;
-	unsigned char timer_t2;
-	unsigned char timer_ti;
-	unsigned char maxout;
-	unsigned char maxin;
-	unsigned char maxout_incr;
-	unsigned char max_retry_count;
-	unsigned char gsap_max_mem;
-	__u16 max_i_field;
-	unsigned char sap_value;
-	unsigned char sap_options;
-	unsigned char station_count;
-	unsigned char sap_gsap_mem;
-	unsigned char gsap[0];
-};
-
-struct srb_xmit {
-	unsigned char command;
-	unsigned char cmd_corr;
-	unsigned char ret_code;
-	unsigned char reserved1;
-	__u16 station_id;
-};
-
-struct arb_rec_req {
-	unsigned char command;
-	unsigned char reserved1[3];
-	__u16 station_id;
-	__u16 rec_buf_addr;
-	unsigned char lan_hdr_len;
-	unsigned char dlc_hdr_len;
-	__u16 frame_len;
-	unsigned char msg_type;
-};
-
-struct asb_rec {
-	unsigned char command;
-	unsigned char reserved1;
-	unsigned char ret_code;
-	unsigned char reserved2;
-	__u16 station_id;
-	__u16 rec_buf_addr;
-};
-
-struct rec_buf {
-  	unsigned char reserved1[2];
-	__u16 buf_ptr;
-	unsigned char reserved2;
-	unsigned char receive_fs;
-	__u16 buf_len;
-	unsigned char data[0];
-};
-
-struct srb_set_funct_addr {
-	unsigned char command;
-	unsigned char reserved1;
-	unsigned char ret_code;
-	unsigned char reserved2[3];
-	unsigned char funct_address[4];
-};
-
-#endif
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 210e2c3..ce9af89 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -640,9 +640,9 @@
 	u8 rann_hopcount;
 	u8 rann_ttl;
 	u8 rann_addr[6];
-	u32 rann_seq;
-	u32 rann_interval;
-	u32 rann_metric;
+	__le32 rann_seq;
+	__le32 rann_interval;
+	__le32 rann_metric;
 } __attribute__ ((packed));
 
 enum ieee80211_rann_flags {
@@ -1007,13 +1007,13 @@
 };
 
 /**
- * struct ieee80211_ht_info - HT information
+ * struct ieee80211_ht_operation - HT operation IE
  *
- * This structure is the "HT information element" as
- * described in 802.11n D5.0 7.3.2.58
+ * This structure is the "HT operation element" as
+ * described in 802.11n-2009 7.3.2.57
  */
-struct ieee80211_ht_info {
-	u8 control_chan;
+struct ieee80211_ht_operation {
+	u8 primary_chan;
 	u8 ht_param;
 	__le16 operation_mode;
 	__le16 stbc_param;
@@ -1027,8 +1027,6 @@
 #define		IEEE80211_HT_PARAM_CHA_SEC_BELOW	0x03
 #define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY		0x04
 #define IEEE80211_HT_PARAM_RIFS_MODE			0x08
-#define IEEE80211_HT_PARAM_SPSMP_SUPPORT		0x10
-#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN		0xE0
 
 /* for operation_mode */
 #define IEEE80211_HT_OP_MODE_PROTECTION			0x0003
@@ -1301,7 +1299,7 @@
 	WLAN_EID_EXT_SUPP_RATES = 50,
 
 	WLAN_EID_HT_CAPABILITY = 45,
-	WLAN_EID_HT_INFORMATION = 61,
+	WLAN_EID_HT_OPERATION = 61,
 
 	WLAN_EID_RSN = 48,
 	WLAN_EID_MMIE = 76,
@@ -1441,6 +1439,18 @@
 #define WLAN_TDLS_SNAP_RFTYPE	0x2
 
 /**
+ * enum - mesh synchronization method identifier
+ *
+ * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
+ * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
+ * that will be specified in a vendor specific information element
+ */
+enum {
+	IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
+	IEEE80211_SYNC_METHOD_VENDOR = 255,
+};
+
+/**
  * enum - mesh path selection protocol identifier
  *
  * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
@@ -1448,7 +1458,7 @@
  * be specified in a vendor specific information element
  */
 enum {
-	IEEE80211_PATH_PROTOCOL_HWMP = 0,
+	IEEE80211_PATH_PROTOCOL_HWMP = 1,
 	IEEE80211_PATH_PROTOCOL_VENDOR = 255,
 };
 
@@ -1460,7 +1470,7 @@
  * specified in a vendor specific information element
  */
 enum {
-	IEEE80211_PATH_METRIC_AIRTIME = 0,
+	IEEE80211_PATH_METRIC_AIRTIME = 1,
 	IEEE80211_PATH_METRIC_VENDOR = 255,
 };
 
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 6d722f4..26cb3c2 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -82,11 +82,12 @@
 #define ARPHRD_FCPL	786		/* Fibrechannel public loop	*/
 #define ARPHRD_FCFABRIC	787		/* Fibrechannel fabric		*/
 	/* 787->799 reserved for fibrechannel media types */
-#define ARPHRD_IEEE802_TR 800		/* Magic type ident for TR	*/
+/* 800 used to be used for token ring */
 #define ARPHRD_IEEE80211 801		/* IEEE 802.11			*/
 #define ARPHRD_IEEE80211_PRISM 802	/* IEEE 802.11 + Prism2 header  */
 #define ARPHRD_IEEE80211_RADIOTAP 803	/* IEEE 802.11 + radiotap header */
 #define ARPHRD_IEEE802154	  804
+#define ARPHRD_IEEE802154_MONITOR 805	/* IEEE 802.15.4 network monitor */
 
 #define ARPHRD_PHONET	820		/* PhoNet media type		*/
 #define ARPHRD_PHONET_PIPE 821		/* PhoNet pipe header		*/
diff --git a/include/linux/if_ec.h b/include/linux/if_ec.h
deleted file mode 100644
index d85f9f4..0000000
--- a/include/linux/if_ec.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* Definitions for Econet sockets. */
-
-#ifndef __LINUX_IF_EC
-#define __LINUX_IF_EC
-
-/* User visible stuff. Glibc provides its own but libc5 folk will use these */
-
-struct ec_addr {
-  unsigned char station;		/* Station number.  */
-  unsigned char net;			/* Network number.  */
-};
-
-struct sockaddr_ec {
-  unsigned short sec_family;
-  unsigned char port;			/* Port number.  */
-  unsigned char cb;			/* Control/flag byte.  */
-  unsigned char type;			/* Type of message.  */
-  struct ec_addr addr;
-  unsigned long cookie;
-};
-
-#define ECTYPE_PACKET_RECEIVED		0	/* Packet received */
-#define ECTYPE_TRANSMIT_STATUS		0x10	/* Transmit completed, 
-						   low nibble holds status */
-
-#define ECTYPE_TRANSMIT_OK		1
-#define ECTYPE_TRANSMIT_NOT_LISTENING	2
-#define ECTYPE_TRANSMIT_NET_ERROR	3
-#define ECTYPE_TRANSMIT_NO_CLOCK	4
-#define ECTYPE_TRANSMIT_LINE_JAMMED	5
-#define ECTYPE_TRANSMIT_NOT_PRESENT	6
-
-#ifdef __KERNEL__
-
-#define EC_HLEN				6
-
-/* This is what an Econet frame looks like on the wire. */
-struct ec_framehdr {
-  unsigned char dst_stn;
-  unsigned char dst_net;
-  unsigned char src_stn;
-  unsigned char src_net;
-  unsigned char cb;
-  unsigned char port;
-};
-
-struct econet_sock {
-  /* struct sock has to be the first member of econet_sock */
-  struct sock	sk;
-  unsigned char cb;
-  unsigned char port;
-  unsigned char station;
-  unsigned char net;
-  unsigned short num;
-};
-
-static inline struct econet_sock *ec_sk(const struct sock *sk)
-{
-	return (struct econet_sock *)sk;
-}
-
-struct ec_device {
-  unsigned char station, net;		/* Econet protocol address */
-};
-
-#endif
-
-#endif
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4b24ff4..f715750 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -138,6 +138,8 @@
 	IFLA_GROUP,		/* Group the device belongs to */
 	IFLA_NET_NS_FD,
 	IFLA_EXT_MASK,		/* Extended info mask, VFs, etc */
+	IFLA_PROMISCUITY,	/* Promiscuity count: > 0 means acts PROMISC */
+#define IFLA_PROMISCUITY IFLA_PROMISCUITY
 	__IFLA_MAX
 };
 
@@ -253,6 +255,7 @@
 enum {
 	IFLA_MACVLAN_UNSPEC,
 	IFLA_MACVLAN_MODE,
+	IFLA_MACVLAN_FLAGS,
 	__IFLA_MACVLAN_MAX,
 };
 
@@ -265,6 +268,8 @@
 	MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
 };
 
+#define MACVLAN_FLAG_NOPROMISC	1
+
 /* SR-IOV virtual function management section */
 
 enum {
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index d103dca..f65e8d2 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -60,6 +60,7 @@
 	struct net_device	*lowerdev;
 	struct macvlan_pcpu_stats __percpu *pcpu_stats;
 	enum macvlan_mode	mode;
+	u16			flags;
 	int (*receive)(struct sk_buff *skb);
 	int (*forward)(struct net_device *dev, struct sk_buff *skb);
 	struct macvtap_queue	*taps[MAX_MACVTAP_QUEUES];
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 23cefa1..b477541 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -19,10 +19,11 @@
 
 #ifdef __KERNEL__
 #include <linux/in.h>
+#include <linux/in6.h>
 #endif
 
 /* Structure used to connect() the socket to a particular tunnel UDP
- * socket.
+ * socket over IPv4.
  */
 struct pppol2tp_addr {
 	__kernel_pid_t	pid;		/* pid that owns the fd.
@@ -35,6 +36,20 @@
 	__u16 d_tunnel, d_session;	/* For sending outgoing packets */
 };
 
+/* Structure used to connect() the socket to a particular tunnel UDP
+ * socket over IPv6.
+ */
+struct pppol2tpin6_addr {
+	__kernel_pid_t	pid;		/* pid that owns the fd.
+					 * 0 => current */
+	int	fd;			/* FD of UDP socket to use */
+
+	__u16 s_tunnel, s_session;	/* For matching incoming packets */
+	__u16 d_tunnel, d_session;	/* For sending outgoing packets */
+
+	struct sockaddr_in6 addr;	/* IP address and port to send to */
+};
+
 /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
  * bits. So we need a different sockaddr structure.
  */
@@ -49,6 +64,17 @@
 	__u32 d_tunnel, d_session;	/* For sending outgoing packets */
 };
 
+struct pppol2tpv3in6_addr {
+	__kernel_pid_t	pid;		/* pid that owns the fd.
+					 * 0 => current */
+	int	fd;			/* FD of UDP or IP socket to use */
+
+	__u32 s_tunnel, s_session;	/* For matching incoming packets */
+	__u32 d_tunnel, d_session;	/* For sending outgoing packets */
+
+	struct sockaddr_in6 addr;	/* IP address and port to send to */
+};
+
 /* Socket options:
  * DEBUG	- bitmask of debug message categories
  * SENDSEQ	- 0 => don't send packets with sequence numbers
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index b5f927f..09c474c 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -70,7 +70,7 @@
 		struct pppoe_addr  pppoe;
 		struct pptp_addr   pptp;
 	} sa_addr;
-} __attribute__((packed));
+} __packed;
 
 /* The use of the above union isn't viable because the size of this
  * struct must stay fixed over time -- applications use sizeof(struct
@@ -81,7 +81,13 @@
 	__kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
 	unsigned int    sa_protocol;    /* protocol identifier */
 	struct pppol2tp_addr pppol2tp;
-} __attribute__((packed));
+} __packed;
+
+struct sockaddr_pppol2tpin6 {
+	__kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
+	unsigned int    sa_protocol;    /* protocol identifier */
+	struct pppol2tpin6_addr pppol2tp;
+} __packed;
 
 /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
  * bits. So we need a different sockaddr structure.
@@ -90,7 +96,13 @@
 	__kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
 	unsigned int    sa_protocol;    /* protocol identifier */
 	struct pppol2tpv3_addr pppol2tp;
-} __attribute__((packed));
+} __packed;
+
+struct sockaddr_pppol2tpv3in6 {
+	__kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
+	unsigned int    sa_protocol;    /* protocol identifier */
+	struct pppol2tpv3in6_addr pppol2tp;
+} __packed;
 
 /*********************************************************************
  *
@@ -140,7 +152,7 @@
 	__be16 sid;
 	__be16 length;
 	struct pppoe_tag tag[0];
-} __attribute__((packed));
+} __packed;
 
 /* Length of entire PPPoE + PPP header */
 #define PPPOE_SES_HLEN	8
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 58404b0c..8185f57a 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -28,10 +28,28 @@
 
 struct team_port {
 	struct net_device *dev;
-	struct hlist_node hlist; /* node in hash list */
+	struct hlist_node hlist; /* node in enabled ports hash list */
 	struct list_head list; /* node in ordinary list */
 	struct team *team;
-	int index;
+	int index; /* index of enabled port. If disabled, it's set to -1 */
+
+	bool linkup; /* either state.linkup or user.linkup */
+
+	struct {
+		bool linkup;
+		u32 speed;
+		u8 duplex;
+	} state;
+
+	/* Values set by userspace */
+	struct {
+		bool linkup;
+		bool linkup_enabled;
+	} user;
+
+	/* Custom gennetlink interface related flags */
+	bool changed;
+	bool removed;
 
 	/*
 	 * A place for storing original values of the device before it
@@ -42,14 +60,6 @@
 		unsigned int mtu;
 	} orig;
 
-	bool linkup;
-	u32 speed;
-	u8 duplex;
-
-	/* Custom gennetlink interface related flags */
-	bool changed;
-	bool removed;
-
 	struct rcu_head rcu;
 };
 
@@ -68,18 +78,30 @@
 enum team_option_type {
 	TEAM_OPTION_TYPE_U32,
 	TEAM_OPTION_TYPE_STRING,
+	TEAM_OPTION_TYPE_BINARY,
+	TEAM_OPTION_TYPE_BOOL,
+};
+
+struct team_gsetter_ctx {
+	union {
+		u32 u32_val;
+		const char *str_val;
+		struct {
+			const void *ptr;
+			u32 len;
+		} bin_val;
+		bool bool_val;
+	} data;
+	struct team_port *port;
 };
 
 struct team_option {
 	struct list_head list;
 	const char *name;
+	bool per_port;
 	enum team_option_type type;
-	int (*getter)(struct team *team, void *arg);
-	int (*setter)(struct team *team, void *arg);
-
-	/* Custom gennetlink interface related flags */
-	bool changed;
-	bool removed;
+	int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+	int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
 };
 
 struct team_mode {
@@ -103,13 +125,15 @@
 	struct mutex lock; /* used for overall locking, e.g. port lists write */
 
 	/*
-	 * port lists with port count
+	 * List of enabled ports and their count
 	 */
-	int port_count;
-	struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES];
-	struct list_head port_list;
+	int en_port_count;
+	struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
+
+	struct list_head port_list; /* list of all ports */
 
 	struct list_head option_list;
+	struct list_head option_inst_list; /* list of option instances */
 
 	const struct team_mode *mode;
 	struct team_mode_ops ops;
@@ -119,7 +143,7 @@
 static inline struct hlist_head *team_port_index_hash(struct team *team,
 						      int port_index)
 {
-	return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+	return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
 }
 
 static inline struct team_port *team_get_port_by_index(struct team *team,
@@ -216,6 +240,7 @@
 	TEAM_ATTR_OPTION_TYPE,		/* u8 */
 	TEAM_ATTR_OPTION_DATA,		/* dynamic */
 	TEAM_ATTR_OPTION_REMOVED,	/* flag */
+	TEAM_ATTR_OPTION_PORT_IFINDEX,	/* u32 */ /* for per-port options */
 
 	__TEAM_ATTR_OPTION_MAX,
 	TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
diff --git a/include/linux/if_tr.h b/include/linux/if_tr.h
deleted file mode 100644
index fc23aeb..0000000
--- a/include/linux/if_tr.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * INET		An implementation of the TCP/IP protocol suite for the LINUX
- *		operating system.  INET is implemented using the  BSD Socket
- *		interface as the means of communication with the user level.
- *
- *		Global definitions for the Token-Ring IEEE 802.5 interface.
- *
- * Version:	@(#)if_tr.h	0.0	07/11/94
- *
- * Author:	Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *		Donald Becker, <becker@super.org>
- *		Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
- *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		as published by the Free Software Foundation; either version
- *		2 of the License, or (at your option) any later version.
- */
-#ifndef _LINUX_IF_TR_H
-#define _LINUX_IF_TR_H
-
-#include <linux/types.h>
-#include <asm/byteorder.h>	/* For __be16 */
-
-/* IEEE 802.5 Token-Ring magic constants.  The frame sizes omit the preamble
-   and FCS/CRC (frame check sequence). */
-#define TR_ALEN		6		/* Octets in one token-ring addr */
-#define TR_HLEN 	(sizeof(struct trh_hdr)+sizeof(struct trllc))
-#define AC		0x10
-#define LLC_FRAME 	0x40
-
-/* LLC and SNAP constants */
-#define EXTENDED_SAP 	0xAA
-#define UI_CMD       	0x03
-
-/* This is an Token-Ring frame header. */
-struct trh_hdr {
-	__u8  ac;			/* access control field */
-	__u8  fc;			/* frame control field */
-	__u8  daddr[TR_ALEN];		/* destination address */
-	__u8  saddr[TR_ALEN];		/* source address */
-	__be16 rcf;			/* route control field */
-	__be16 rseg[8];			/* routing registers */
-};
-
-#ifdef __KERNEL__
-#include <linux/skbuff.h>
-
-static inline struct trh_hdr *tr_hdr(const struct sk_buff *skb)
-{
-	return (struct trh_hdr *)skb_mac_header(skb);
-}
-#endif
-
-/* This is an Token-Ring LLC structure */
-struct trllc {
-	__u8  dsap;			/* destination SAP */
-	__u8  ssap;			/* source SAP */
-	__u8  llc;			/* LLC control field */
-	__u8  protid[3];		/* protocol id */
-	__be16 ethertype;		/* ether type field */
-};
-
-/* Token-Ring statistics collection data. */
-struct tr_statistics {
-	unsigned long rx_packets;       /* total packets received	*/
-	unsigned long tx_packets;	/* total packets transmitted	*/
-	unsigned long rx_bytes;		/* total bytes received   	*/
-	unsigned long tx_bytes;		/* total bytes transmitted	*/
-	unsigned long rx_errors;	/* bad packets received		*/
-	unsigned long tx_errors;	/* packet transmit problems	*/
-	unsigned long rx_dropped;	/* no space in linux buffers	*/
-	unsigned long tx_dropped;	/* no space available in linux	*/
-	unsigned long multicast;	/* multicast packets received	*/
-	unsigned long transmit_collision;
-
-	/* detailed Token-Ring errors. See IBM Token-Ring Network
-	   Architecture for more info */
-
-	unsigned long line_errors;
-	unsigned long internal_errors;
-	unsigned long burst_errors;
-	unsigned long A_C_errors;
-	unsigned long abort_delimiters;
-	unsigned long lost_frames;
-	unsigned long recv_congest_count;
-	unsigned long frame_copied_errors;
-	unsigned long frequency_errors;
-	unsigned long token_errors;
-	unsigned long dummy1;
-};
-
-/* source routing stuff */
-#define TR_RII 			0x80
-#define TR_RCF_DIR_BIT 		0x80
-#define TR_RCF_LEN_MASK 	0x1f00
-#define TR_RCF_BROADCAST 	0x8000	/* all-routes broadcast */
-#define TR_RCF_LIMITED_BROADCAST 0xC000	/* single-route broadcast */
-#define TR_RCF_FRAME2K 		0x20
-#define TR_RCF_BROADCAST_MASK 	0xC000
-#define TR_MAXRIFLEN 		18
-
-#endif	/* _LINUX_IF_TR_H */
diff --git a/include/linux/in6.h b/include/linux/in6.h
index 5c83d9e..cba469b 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -142,7 +142,7 @@
 /*
  *	IPv6 TLV options.
  */
-#define IPV6_TLV_PAD0		0
+#define IPV6_TLV_PAD1		0
 #define IPV6_TLV_PADN		1
 #define IPV6_TLV_ROUTERALERT	5
 #define IPV6_TLV_JUMBO		194
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index e885ba23..589e0e7 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -223,5 +223,12 @@
 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
 		void *arg, int (*func)(unsigned long, unsigned long, void *));
 
+/* True if any part of r1 overlaps r2 */
+static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+{
+       return (r1->start <= r2->end && r1->end >= r2->start);
+}
+
+
 #endif /* __ASSEMBLY__ */
 #endif	/* _LINUX_IOPORT_H */
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 4deb383..8a2d438 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -89,6 +89,7 @@
 #define IP_VS_CONN_F_TEMPLATE	0x1000		/* template, not connection */
 #define IP_VS_CONN_F_ONE_PACKET	0x2000		/* forward only one packet */
 
+/* Initial bits allowed in backup server */
 #define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \
 				  IP_VS_CONN_F_NOOUTPUT | \
 				  IP_VS_CONN_F_INACTIVE | \
@@ -97,6 +98,10 @@
 				  IP_VS_CONN_F_TEMPLATE \
 				 )
 
+/* Bits allowed to update in backup server */
+#define IP_VS_CONN_F_BACKUP_UPD_MASK (IP_VS_CONN_F_INACTIVE | \
+				      IP_VS_CONN_F_SEQ_MASK)
+
 /* Flags that are not sent to backup server start from bit 16 */
 #define IP_VS_CONN_F_NFCT	(1 << 16)	/* use netfilter conntrack */
 
@@ -125,8 +130,8 @@
 
 	/* virtual service options */
 	char			sched_name[IP_VS_SCHEDNAME_MAXLEN];
-	unsigned		flags;		/* virtual service flags */
-	unsigned		timeout;	/* persistent timeout in sec */
+	unsigned int		flags;		/* virtual service flags */
+	unsigned int		timeout;	/* persistent timeout in sec */
 	__be32			netmask;	/* persistent netmask */
 };
 
@@ -137,7 +142,7 @@
 	__be16			port;
 
 	/* real server options */
-	unsigned		conn_flags;	/* connection flags */
+	unsigned int		conn_flags;	/* connection flags */
 	int			weight;		/* destination weight */
 
 	/* thresholds for active connections */
@@ -187,8 +192,8 @@
 
 	/* service options */
 	char			sched_name[IP_VS_SCHEDNAME_MAXLEN];
-	unsigned		flags;          /* virtual service flags */
-	unsigned		timeout;	/* persistent timeout */
+	unsigned int		flags;          /* virtual service flags */
+	unsigned int		timeout;	/* persistent timeout */
 	__be32			netmask;	/* persistent netmask */
 
 	/* number of real servers */
@@ -202,7 +207,7 @@
 struct ip_vs_dest_entry {
 	__be32			addr;		/* destination address */
 	__be16			port;
-	unsigned		conn_flags;	/* connection flags */
+	unsigned int		conn_flags;	/* connection flags */
 	int			weight;		/* destination weight */
 
 	__u32		u_threshold;	/* upper threshold */
diff --git a/include/linux/ipx.h b/include/linux/ipx.h
index 3d48014..8f02439 100644
--- a/include/linux/ipx.h
+++ b/include/linux/ipx.h
@@ -38,7 +38,7 @@
 #define IPX_FRAME_8022		2
 #define IPX_FRAME_ETHERII	3
 #define IPX_FRAME_8023		4
-#define IPX_FRAME_TR_8022       5 /* obsolete */
+/* obsolete token ring was	5 */
 	unsigned char ipx_special;
 #define IPX_SPECIAL_NONE	0
 #define IPX_PRIMARY		1
diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h
index e77d7f9..7eab668 100644
--- a/include/linux/l2tp.h
+++ b/include/linux/l2tp.h
@@ -11,6 +11,7 @@
 #include <linux/socket.h>
 #ifdef __KERNEL__
 #include <linux/in.h>
+#include <linux/in6.h>
 #else
 #include <netinet/in.h>
 #endif
@@ -39,6 +40,22 @@
 			      sizeof(__u32)];
 };
 
+/**
+ * struct sockaddr_l2tpip6 - the sockaddr structure for L2TP-over-IPv6 sockets
+ * @l2tp_family:  address family number AF_L2TPIP.
+ * @l2tp_addr:    protocol specific address information
+ * @l2tp_conn_id: connection id of tunnel
+ */
+struct sockaddr_l2tpip6 {
+	/* The first fields must match struct sockaddr_in6 */
+	__kernel_sa_family_t l2tp_family; /* AF_INET6 */
+	__be16		l2tp_unused;	/* INET port number (unused) */
+	__be32		l2tp_flowinfo;	/* IPv6 flow information */
+	struct in6_addr	l2tp_addr;	/* IPv6 address */
+	__u32		l2tp_scope_id;	/* scope id (new in RFC2553) */
+	__u32		l2tp_conn_id;	/* Connection ID of tunnel */
+};
+
 /*****************************************************************************
  *  NETLINK_GENERIC netlink family.
  *****************************************************************************/
@@ -108,6 +125,8 @@
 	L2TP_ATTR_MTU,			/* u16 */
 	L2TP_ATTR_MRU,			/* u16 */
 	L2TP_ATTR_STATS,		/* nested */
+	L2TP_ATTR_IP6_SADDR,		/* struct in6_addr */
+	L2TP_ATTR_IP6_DADDR,		/* struct in6_addr */
 	__L2TP_ATTR_MAX,
 };
 
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 42378d6..e926df7 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -996,7 +996,8 @@
 extern void ata_sas_port_destroy(struct ata_port *);
 extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
 					   struct ata_port_info *, struct Scsi_Host *);
-extern int ata_sas_async_port_init(struct ata_port *);
+extern void ata_sas_async_probe(struct ata_port *ap);
+extern int ata_sas_sync_probe(struct ata_port *ap);
 extern int ata_sas_port_init(struct ata_port *);
 extern int ata_sas_port_start(struct ata_port *ap);
 extern void ata_sas_port_stop(struct ata_port *ap);
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h
index 4af8414..d0752ec 100644
--- a/include/linux/mISDNhw.h
+++ b/include/linux/mISDNhw.h
@@ -72,7 +72,9 @@
 #define FLG_LL_OK		24
 #define FLG_LL_CONN		25
 #define FLG_DTMFSEND		26
-
+#define FLG_TX_EMPTY		27
+/* stop sending received data upstream */
+#define FLG_RX_OFF		28
 /* workq events */
 #define FLG_RECVQUEUE		30
 #define	FLG_PHCHANGE		31
@@ -135,10 +137,14 @@
 #define HW_TESTRX_RAW	0x9602
 #define HW_TESTRX_HDLC	0x9702
 #define HW_TESTRX_OFF	0x9802
+#define HW_TIMER3_IND	0x9902
+#define HW_TIMER3_VALUE	0x9a00
+#define HW_TIMER3_VMASK	0x00FF
 
 struct layer1;
 extern int	l1_event(struct layer1 *, u_int);
 
+#define MISDN_BCH_FILL_SIZE	4
 
 struct bchannel {
 	struct mISDNchannel	ch;
@@ -150,8 +156,14 @@
 	int			slot;	/* multiport card channel slot */
 	struct timer_list	timer;
 	/* receive data */
+	u8			fill[MISDN_BCH_FILL_SIZE];
 	struct sk_buff		*rx_skb;
-	int			maxlen;
+	unsigned short		maxlen;
+	unsigned short		init_maxlen; /* initial value */
+	unsigned short		next_maxlen; /* pending value */
+	unsigned short		minlen; /* for transparent data */
+	unsigned short		init_minlen; /* initial value */
+	unsigned short		next_minlen; /* pending value */
 	/* send data */
 	struct sk_buff		*next_skb;
 	struct sk_buff		*tx_skb;
@@ -163,23 +175,26 @@
 	int			err_crc;
 	int			err_tx;
 	int			err_rx;
+	int			dropcnt;
 };
 
 extern int	mISDN_initdchannel(struct dchannel *, int, void *);
-extern int	mISDN_initbchannel(struct bchannel *, int);
+extern int	mISDN_initbchannel(struct bchannel *, unsigned short,
+				   unsigned short);
 extern int	mISDN_freedchannel(struct dchannel *);
 extern void	mISDN_clear_bchannel(struct bchannel *);
 extern int	mISDN_freebchannel(struct bchannel *);
+extern int	mISDN_ctrl_bchannel(struct bchannel *, struct mISDN_ctrl_req *);
 extern void	queue_ch_frame(struct mISDNchannel *, u_int,
 			int, struct sk_buff *);
 extern int	dchannel_senddata(struct dchannel *, struct sk_buff *);
 extern int	bchannel_senddata(struct bchannel *, struct sk_buff *);
+extern int      bchannel_get_rxbuf(struct bchannel *, int);
 extern void	recv_Dchannel(struct dchannel *);
 extern void	recv_Echannel(struct dchannel *, struct dchannel *);
-extern void	recv_Bchannel(struct bchannel *, unsigned int id);
+extern void	recv_Bchannel(struct bchannel *, unsigned int, bool);
 extern void	recv_Dchannel_skb(struct dchannel *, struct sk_buff *);
 extern void	recv_Bchannel_skb(struct bchannel *, struct sk_buff *);
-extern void	confirm_Bsend(struct bchannel *bch);
 extern int	get_next_bframe(struct bchannel *);
 extern int	get_next_dframe(struct dchannel *);
 
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index b5e7f22..246a352 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -37,7 +37,7 @@
  */
 #define	MISDN_MAJOR_VERSION	1
 #define	MISDN_MINOR_VERSION	1
-#define MISDN_RELEASE		21
+#define MISDN_RELEASE		29
 
 /* primitives for information exchange
  * generell format
@@ -115,6 +115,11 @@
 #define MDL_ERROR_IND		0x1F04
 #define MDL_ERROR_RSP		0x5F04
 
+/* intern layer 2 */
+#define DL_TIMER200_IND		0x7004
+#define DL_TIMER203_IND		0x7304
+#define DL_INTERN_MSG		0x7804
+
 /* DL_INFORMATION_IND types */
 #define DL_INFO_L2_CONNECT	0x0001
 #define DL_INFO_L2_REMOVED	0x0002
@@ -360,6 +365,7 @@
 #define MISDN_CTRL_LOOP			0x0001
 #define MISDN_CTRL_CONNECT		0x0002
 #define MISDN_CTRL_DISCONNECT		0x0004
+#define MISDN_CTRL_RX_BUFFER		0x0008
 #define MISDN_CTRL_PCMCONNECT		0x0010
 #define MISDN_CTRL_PCMDISCONNECT	0x0020
 #define MISDN_CTRL_SETPEER		0x0040
@@ -367,6 +373,7 @@
 #define MISDN_CTRL_RX_OFF		0x0100
 #define MISDN_CTRL_FILL_EMPTY		0x0200
 #define MISDN_CTRL_GETPEER		0x0400
+#define MISDN_CTRL_L1_TIMER3		0x0800
 #define MISDN_CTRL_HW_FEATURES_OP	0x2000
 #define MISDN_CTRL_HW_FEATURES		0x2001
 #define MISDN_CTRL_HFC_OP		0x4000
@@ -381,6 +388,12 @@
 #define MISDN_CTRL_HFC_WD_INIT		0x4009
 #define MISDN_CTRL_HFC_WD_RESET		0x400A
 
+/* special RX buffer value for MISDN_CTRL_RX_BUFFER request.p1 is the minimum
+ * buffer size request.p2 the maximum. Using  MISDN_CTRL_RX_SIZE_IGNORE will
+ * not change the value, but still read back the actual stetting.
+ */
+#define MISDN_CTRL_RX_SIZE_IGNORE	-1
+
 /* socket options */
 #define MISDN_TIME_STAMP		0x0001
 
@@ -585,6 +598,7 @@
 extern void	set_channel_address(struct mISDNchannel *, u_int, u_int);
 extern void	mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
 extern unsigned short mISDN_clock_get(void);
+extern const char *mISDNDevName4ch(struct mISDNchannel *);
 
 #endif /* __KERNEL__ */
 #endif /* mISDNIF_H */
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
new file mode 100644
index 0000000..a243dbb
--- /dev/null
+++ b/include/linux/mdio-mux.h
@@ -0,0 +1,21 @@
+/*
+ * MDIO bus multiplexer framwork.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+#ifndef __LINUX_MDIO_MUX_H
+#define __LINUX_MDIO_MUX_H
+#include <linux/device.h>
+
+int mdio_mux_init(struct device *dev,
+		  int (*switch_fn) (int cur, int desired, void *data),
+		  void **mux_handle,
+		  void *data);
+
+void mdio_mux_uninit(void *mux_handle);
+
+#endif /* __LINUX_MDIO_MUX_H */
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 7ffbd6e..8313cd9 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -80,6 +80,7 @@
 	struct regmap *regmap;
 
 	int irq_base;
+	struct regmap_irq_chip_data *irq_data;
 	u8 chip_id;
 
 	int chip_irq;
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
new file mode 100644
index 0000000..9cbc642
--- /dev/null
+++ b/include/linux/mfd/palmas.h
@@ -0,0 +1,2620 @@
+/*
+ * TI Palmas
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_PALMAS_H
+#define __LINUX_MFD_PALMAS_H
+
+#include <linux/usb/otg.h>
+#include <linux/leds.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define PALMAS_NUM_CLIENTS		3
+
+struct palmas_pmic;
+
+struct palmas {
+	struct device *dev;
+
+	struct i2c_client *i2c_clients[PALMAS_NUM_CLIENTS];
+	struct regmap *regmap[PALMAS_NUM_CLIENTS];
+
+	/* Stored chip id */
+	int id;
+
+	/* IRQ Data */
+	int irq;
+	u32 irq_mask;
+	struct mutex irq_lock;
+	struct regmap_irq_chip_data *irq_data;
+
+	/* Child Devices */
+	struct palmas_pmic *pmic;
+
+	/* GPIO MUXing */
+	u8 gpio_muxed;
+	u8 led_muxed;
+	u8 pwm_muxed;
+};
+
+struct palmas_reg_init {
+	/* warm_rest controls the voltage levels after a warm reset
+	 *
+	 * 0: reload default values from OTP on warm reset
+	 * 1: maintain voltage from VSEL on warm reset
+	 */
+	int warm_reset;
+
+	/* roof_floor controls whether the regulator uses the i2c style
+	 * of DVS or uses the method where a GPIO or other control method is
+	 * attached to the NSLEEP/ENABLE1/ENABLE2 pins
+	 *
+	 * For SMPS
+	 *
+	 * 0: i2c selection of voltage
+	 * 1: pin selection of voltage.
+	 *
+	 * For LDO unused
+	 */
+	int roof_floor;
+
+	/* sleep_mode is the mode loaded to MODE_SLEEP bits as defined in
+	 * the data sheet.
+	 *
+	 * For SMPS
+	 *
+	 * 0: Off
+	 * 1: AUTO
+	 * 2: ECO
+	 * 3: Forced PWM
+	 *
+	 * For LDO
+	 *
+	 * 0: Off
+	 * 1: On
+	 */
+	int mode_sleep;
+
+	/* tstep is the timestep loaded to the TSTEP register
+	 *
+	 * For SMPS
+	 *
+	 * 0: Jump (no slope control)
+	 * 1: 10mV/us
+	 * 2: 5mV/us
+	 * 3: 2.5mV/us
+	 *
+	 * For LDO unused
+	 */
+	int tstep;
+
+	/* voltage_sel is the bitfield loaded onto the SMPSX_VOLTAGE
+	 * register. Set this is the default voltage set in OTP needs
+	 * to be overridden.
+	 */
+	u8 vsel;
+
+};
+
+struct palmas_pmic_platform_data {
+	/* An array of pointers to regulator init data indexed by regulator
+	 * ID
+	 */
+	struct regulator_init_data **reg_data;
+
+	/* An array of pointers to structures containing sleep mode and DVS
+	 * configuration for regulators indexed by ID
+	 */
+	struct palmas_reg_init **reg_init;
+
+	/* use LDO6 for vibrator control */
+	int ldo6_vibrator;
+
+
+};
+
+struct palmas_platform_data {
+	int gpio_base;
+
+	/* bit value to be loaded to the POWER_CTRL register */
+	u8 power_ctrl;
+
+	/*
+	 * boolean to select if we want to configure muxing here
+	 * then the two value to load into the registers if true
+	 */
+	int mux_from_pdata;
+	u8 pad1, pad2;
+
+	struct palmas_pmic_platform_data *pmic_pdata;
+};
+
+/* Define the palmas IRQ numbers */
+enum palmas_irqs {
+	/* INT1 registers */
+	PALMAS_CHARG_DET_N_VBUS_OVV_IRQ,
+	PALMAS_PWRON_IRQ,
+	PALMAS_LONG_PRESS_KEY_IRQ,
+	PALMAS_RPWRON_IRQ,
+	PALMAS_PWRDOWN_IRQ,
+	PALMAS_HOTDIE_IRQ,
+	PALMAS_VSYS_MON_IRQ,
+	PALMAS_VBAT_MON_IRQ,
+	/* INT2 registers */
+	PALMAS_RTC_ALARM_IRQ,
+	PALMAS_RTC_TIMER_IRQ,
+	PALMAS_WDT_IRQ,
+	PALMAS_BATREMOVAL_IRQ,
+	PALMAS_RESET_IN_IRQ,
+	PALMAS_FBI_BB_IRQ,
+	PALMAS_SHORT_IRQ,
+	PALMAS_VAC_ACOK_IRQ,
+	/* INT3 registers */
+	PALMAS_GPADC_AUTO_0_IRQ,
+	PALMAS_GPADC_AUTO_1_IRQ,
+	PALMAS_GPADC_EOC_SW_IRQ,
+	PALMAS_GPADC_EOC_RT_IRQ,
+	PALMAS_ID_OTG_IRQ,
+	PALMAS_ID_IRQ,
+	PALMAS_VBUS_OTG_IRQ,
+	PALMAS_VBUS_IRQ,
+	/* INT4 registers */
+	PALMAS_GPIO_0_IRQ,
+	PALMAS_GPIO_1_IRQ,
+	PALMAS_GPIO_2_IRQ,
+	PALMAS_GPIO_3_IRQ,
+	PALMAS_GPIO_4_IRQ,
+	PALMAS_GPIO_5_IRQ,
+	PALMAS_GPIO_6_IRQ,
+	PALMAS_GPIO_7_IRQ,
+	/* Total Number IRQs */
+	PALMAS_NUM_IRQ,
+};
+
+enum palmas_regulators {
+	/* SMPS regulators */
+	PALMAS_REG_SMPS12,
+	PALMAS_REG_SMPS123,
+	PALMAS_REG_SMPS3,
+	PALMAS_REG_SMPS45,
+	PALMAS_REG_SMPS457,
+	PALMAS_REG_SMPS6,
+	PALMAS_REG_SMPS7,
+	PALMAS_REG_SMPS8,
+	PALMAS_REG_SMPS9,
+	PALMAS_REG_SMPS10,
+	/* LDO regulators */
+	PALMAS_REG_LDO1,
+	PALMAS_REG_LDO2,
+	PALMAS_REG_LDO3,
+	PALMAS_REG_LDO4,
+	PALMAS_REG_LDO5,
+	PALMAS_REG_LDO6,
+	PALMAS_REG_LDO7,
+	PALMAS_REG_LDO8,
+	PALMAS_REG_LDO9,
+	PALMAS_REG_LDOLN,
+	PALMAS_REG_LDOUSB,
+	/* Total number of regulators */
+	PALMAS_NUM_REGS,
+};
+
+struct palmas_pmic {
+	struct palmas *palmas;
+	struct device *dev;
+	struct regulator_desc desc[PALMAS_NUM_REGS];
+	struct regulator_dev *rdev[PALMAS_NUM_REGS];
+	struct mutex mutex;
+
+	int smps123;
+	int smps457;
+
+	int range[PALMAS_REG_SMPS10];
+};
+
+/* defines so we can store the mux settings */
+#define PALMAS_GPIO_0_MUXED					(1 << 0)
+#define PALMAS_GPIO_1_MUXED					(1 << 1)
+#define PALMAS_GPIO_2_MUXED					(1 << 2)
+#define PALMAS_GPIO_3_MUXED					(1 << 3)
+#define PALMAS_GPIO_4_MUXED					(1 << 4)
+#define PALMAS_GPIO_5_MUXED					(1 << 5)
+#define PALMAS_GPIO_6_MUXED					(1 << 6)
+#define PALMAS_GPIO_7_MUXED					(1 << 7)
+
+#define PALMAS_LED1_MUXED					(1 << 0)
+#define PALMAS_LED2_MUXED					(1 << 1)
+
+#define PALMAS_PWM1_MUXED					(1 << 0)
+#define PALMAS_PWM2_MUXED					(1 << 1)
+
+/* helper macro to get correct slave number */
+#define PALMAS_BASE_TO_SLAVE(x)		((x >> 8) - 1)
+#define PALMAS_BASE_TO_REG(x, y)	((x & 0xff) + y)
+
+/* Base addresses of IP blocks in Palmas */
+#define PALMAS_SMPS_DVS_BASE					0x20
+#define PALMAS_RTC_BASE						0x100
+#define PALMAS_VALIDITY_BASE					0x118
+#define PALMAS_SMPS_BASE					0x120
+#define PALMAS_LDO_BASE						0x150
+#define PALMAS_DVFS_BASE					0x180
+#define PALMAS_PMU_CONTROL_BASE					0x1A0
+#define PALMAS_RESOURCE_BASE					0x1D4
+#define PALMAS_PU_PD_OD_BASE					0x1F4
+#define PALMAS_LED_BASE						0x200
+#define PALMAS_INTERRUPT_BASE					0x210
+#define PALMAS_USB_OTG_BASE					0x250
+#define PALMAS_VIBRATOR_BASE					0x270
+#define PALMAS_GPIO_BASE					0x280
+#define PALMAS_USB_BASE						0x290
+#define PALMAS_GPADC_BASE					0x2C0
+#define PALMAS_TRIM_GPADC_BASE					0x3CD
+
+/* Registers for function RTC */
+#define PALMAS_SECONDS_REG					0x0
+#define PALMAS_MINUTES_REG					0x1
+#define PALMAS_HOURS_REG					0x2
+#define PALMAS_DAYS_REG						0x3
+#define PALMAS_MONTHS_REG					0x4
+#define PALMAS_YEARS_REG					0x5
+#define PALMAS_WEEKS_REG					0x6
+#define PALMAS_ALARM_SECONDS_REG				0x8
+#define PALMAS_ALARM_MINUTES_REG				0x9
+#define PALMAS_ALARM_HOURS_REG					0xA
+#define PALMAS_ALARM_DAYS_REG					0xB
+#define PALMAS_ALARM_MONTHS_REG					0xC
+#define PALMAS_ALARM_YEARS_REG					0xD
+#define PALMAS_RTC_CTRL_REG					0x10
+#define PALMAS_RTC_STATUS_REG					0x11
+#define PALMAS_RTC_INTERRUPTS_REG				0x12
+#define PALMAS_RTC_COMP_LSB_REG					0x13
+#define PALMAS_RTC_COMP_MSB_REG					0x14
+#define PALMAS_RTC_RES_PROG_REG					0x15
+#define PALMAS_RTC_RESET_STATUS_REG				0x16
+
+/* Bit definitions for SECONDS_REG */
+#define PALMAS_SECONDS_REG_SEC1_MASK				0x70
+#define PALMAS_SECONDS_REG_SEC1_SHIFT				4
+#define PALMAS_SECONDS_REG_SEC0_MASK				0x0f
+#define PALMAS_SECONDS_REG_SEC0_SHIFT				0
+
+/* Bit definitions for MINUTES_REG */
+#define PALMAS_MINUTES_REG_MIN1_MASK				0x70
+#define PALMAS_MINUTES_REG_MIN1_SHIFT				4
+#define PALMAS_MINUTES_REG_MIN0_MASK				0x0f
+#define PALMAS_MINUTES_REG_MIN0_SHIFT				0
+
+/* Bit definitions for HOURS_REG */
+#define PALMAS_HOURS_REG_PM_NAM					0x80
+#define PALMAS_HOURS_REG_PM_NAM_SHIFT				7
+#define PALMAS_HOURS_REG_HOUR1_MASK				0x30
+#define PALMAS_HOURS_REG_HOUR1_SHIFT				4
+#define PALMAS_HOURS_REG_HOUR0_MASK				0x0f
+#define PALMAS_HOURS_REG_HOUR0_SHIFT				0
+
+/* Bit definitions for DAYS_REG */
+#define PALMAS_DAYS_REG_DAY1_MASK				0x30
+#define PALMAS_DAYS_REG_DAY1_SHIFT				4
+#define PALMAS_DAYS_REG_DAY0_MASK				0x0f
+#define PALMAS_DAYS_REG_DAY0_SHIFT				0
+
+/* Bit definitions for MONTHS_REG */
+#define PALMAS_MONTHS_REG_MONTH1				0x10
+#define PALMAS_MONTHS_REG_MONTH1_SHIFT				4
+#define PALMAS_MONTHS_REG_MONTH0_MASK				0x0f
+#define PALMAS_MONTHS_REG_MONTH0_SHIFT				0
+
+/* Bit definitions for YEARS_REG */
+#define PALMAS_YEARS_REG_YEAR1_MASK				0xf0
+#define PALMAS_YEARS_REG_YEAR1_SHIFT				4
+#define PALMAS_YEARS_REG_YEAR0_MASK				0x0f
+#define PALMAS_YEARS_REG_YEAR0_SHIFT				0
+
+/* Bit definitions for WEEKS_REG */
+#define PALMAS_WEEKS_REG_WEEK_MASK				0x07
+#define PALMAS_WEEKS_REG_WEEK_SHIFT				0
+
+/* Bit definitions for ALARM_SECONDS_REG */
+#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_MASK		0x70
+#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_SHIFT		4
+#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_MASK		0x0f
+#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_SHIFT		0
+
+/* Bit definitions for ALARM_MINUTES_REG */
+#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_MASK		0x70
+#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_SHIFT		4
+#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_MASK		0x0f
+#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_SHIFT		0
+
+/* Bit definitions for ALARM_HOURS_REG */
+#define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM			0x80
+#define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM_SHIFT		7
+#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_MASK			0x30
+#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_SHIFT		4
+#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_MASK			0x0f
+#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_SHIFT		0
+
+/* Bit definitions for ALARM_DAYS_REG */
+#define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_MASK			0x30
+#define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_SHIFT			4
+#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_MASK			0x0f
+#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_SHIFT			0
+
+/* Bit definitions for ALARM_MONTHS_REG */
+#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1			0x10
+#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1_SHIFT		4
+#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_MASK		0x0f
+#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_SHIFT		0
+
+/* Bit definitions for ALARM_YEARS_REG */
+#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_MASK			0xf0
+#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_SHIFT		4
+#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_MASK			0x0f
+#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_SHIFT		0
+
+/* Bit definitions for RTC_CTRL_REG */
+#define PALMAS_RTC_CTRL_REG_RTC_V_OPT				0x80
+#define PALMAS_RTC_CTRL_REG_RTC_V_OPT_SHIFT			7
+#define PALMAS_RTC_CTRL_REG_GET_TIME				0x40
+#define PALMAS_RTC_CTRL_REG_GET_TIME_SHIFT			6
+#define PALMAS_RTC_CTRL_REG_SET_32_COUNTER			0x20
+#define PALMAS_RTC_CTRL_REG_SET_32_COUNTER_SHIFT		5
+#define PALMAS_RTC_CTRL_REG_TEST_MODE				0x10
+#define PALMAS_RTC_CTRL_REG_TEST_MODE_SHIFT			4
+#define PALMAS_RTC_CTRL_REG_MODE_12_24				0x08
+#define PALMAS_RTC_CTRL_REG_MODE_12_24_SHIFT			3
+#define PALMAS_RTC_CTRL_REG_AUTO_COMP				0x04
+#define PALMAS_RTC_CTRL_REG_AUTO_COMP_SHIFT			2
+#define PALMAS_RTC_CTRL_REG_ROUND_30S				0x02
+#define PALMAS_RTC_CTRL_REG_ROUND_30S_SHIFT			1
+#define PALMAS_RTC_CTRL_REG_STOP_RTC				0x01
+#define PALMAS_RTC_CTRL_REG_STOP_RTC_SHIFT			0
+
+/* Bit definitions for RTC_STATUS_REG */
+#define PALMAS_RTC_STATUS_REG_POWER_UP				0x80
+#define PALMAS_RTC_STATUS_REG_POWER_UP_SHIFT			7
+#define PALMAS_RTC_STATUS_REG_ALARM				0x40
+#define PALMAS_RTC_STATUS_REG_ALARM_SHIFT			6
+#define PALMAS_RTC_STATUS_REG_EVENT_1D				0x20
+#define PALMAS_RTC_STATUS_REG_EVENT_1D_SHIFT			5
+#define PALMAS_RTC_STATUS_REG_EVENT_1H				0x10
+#define PALMAS_RTC_STATUS_REG_EVENT_1H_SHIFT			4
+#define PALMAS_RTC_STATUS_REG_EVENT_1M				0x08
+#define PALMAS_RTC_STATUS_REG_EVENT_1M_SHIFT			3
+#define PALMAS_RTC_STATUS_REG_EVENT_1S				0x04
+#define PALMAS_RTC_STATUS_REG_EVENT_1S_SHIFT			2
+#define PALMAS_RTC_STATUS_REG_RUN				0x02
+#define PALMAS_RTC_STATUS_REG_RUN_SHIFT				1
+
+/* Bit definitions for RTC_INTERRUPTS_REG */
+#define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN		0x10
+#define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN_SHIFT	4
+#define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM			0x08
+#define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM_SHIFT		3
+#define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER			0x04
+#define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER_SHIFT		2
+#define PALMAS_RTC_INTERRUPTS_REG_EVERY_MASK			0x03
+#define PALMAS_RTC_INTERRUPTS_REG_EVERY_SHIFT			0
+
+/* Bit definitions for RTC_COMP_LSB_REG */
+#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_MASK		0xff
+#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_SHIFT		0
+
+/* Bit definitions for RTC_COMP_MSB_REG */
+#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_MASK		0xff
+#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_SHIFT		0
+
+/* Bit definitions for RTC_RES_PROG_REG */
+#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_MASK		0x3f
+#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_SHIFT		0
+
+/* Bit definitions for RTC_RESET_STATUS_REG */
+#define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS		0x01
+#define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS_SHIFT		0
+
+/* Registers for function BACKUP */
+#define PALMAS_BACKUP0						0x0
+#define PALMAS_BACKUP1						0x1
+#define PALMAS_BACKUP2						0x2
+#define PALMAS_BACKUP3						0x3
+#define PALMAS_BACKUP4						0x4
+#define PALMAS_BACKUP5						0x5
+#define PALMAS_BACKUP6						0x6
+#define PALMAS_BACKUP7						0x7
+
+/* Bit definitions for BACKUP0 */
+#define PALMAS_BACKUP0_BACKUP_MASK				0xff
+#define PALMAS_BACKUP0_BACKUP_SHIFT				0
+
+/* Bit definitions for BACKUP1 */
+#define PALMAS_BACKUP1_BACKUP_MASK				0xff
+#define PALMAS_BACKUP1_BACKUP_SHIFT				0
+
+/* Bit definitions for BACKUP2 */
+#define PALMAS_BACKUP2_BACKUP_MASK				0xff
+#define PALMAS_BACKUP2_BACKUP_SHIFT				0
+
+/* Bit definitions for BACKUP3 */
+#define PALMAS_BACKUP3_BACKUP_MASK				0xff
+#define PALMAS_BACKUP3_BACKUP_SHIFT				0
+
+/* Bit definitions for BACKUP4 */
+#define PALMAS_BACKUP4_BACKUP_MASK				0xff
+#define PALMAS_BACKUP4_BACKUP_SHIFT				0
+
+/* Bit definitions for BACKUP5 */
+#define PALMAS_BACKUP5_BACKUP_MASK				0xff
+#define PALMAS_BACKUP5_BACKUP_SHIFT				0
+
+/* Bit definitions for BACKUP6 */
+#define PALMAS_BACKUP6_BACKUP_MASK				0xff
+#define PALMAS_BACKUP6_BACKUP_SHIFT				0
+
+/* Bit definitions for BACKUP7 */
+#define PALMAS_BACKUP7_BACKUP_MASK				0xff
+#define PALMAS_BACKUP7_BACKUP_SHIFT				0
+
+/* Registers for function SMPS */
+#define PALMAS_SMPS12_CTRL					0x0
+#define PALMAS_SMPS12_TSTEP					0x1
+#define PALMAS_SMPS12_FORCE					0x2
+#define PALMAS_SMPS12_VOLTAGE					0x3
+#define PALMAS_SMPS3_CTRL					0x4
+#define PALMAS_SMPS3_VOLTAGE					0x7
+#define PALMAS_SMPS45_CTRL					0x8
+#define PALMAS_SMPS45_TSTEP					0x9
+#define PALMAS_SMPS45_FORCE					0xA
+#define PALMAS_SMPS45_VOLTAGE					0xB
+#define PALMAS_SMPS6_CTRL					0xC
+#define PALMAS_SMPS6_TSTEP					0xD
+#define PALMAS_SMPS6_FORCE					0xE
+#define PALMAS_SMPS6_VOLTAGE					0xF
+#define PALMAS_SMPS7_CTRL					0x10
+#define PALMAS_SMPS7_VOLTAGE					0x13
+#define PALMAS_SMPS8_CTRL					0x14
+#define PALMAS_SMPS8_TSTEP					0x15
+#define PALMAS_SMPS8_FORCE					0x16
+#define PALMAS_SMPS8_VOLTAGE					0x17
+#define PALMAS_SMPS9_CTRL					0x18
+#define PALMAS_SMPS9_VOLTAGE					0x1B
+#define PALMAS_SMPS10_CTRL					0x1C
+#define PALMAS_SMPS10_STATUS					0x1F
+#define PALMAS_SMPS_CTRL					0x24
+#define PALMAS_SMPS_PD_CTRL					0x25
+#define PALMAS_SMPS_DITHER_EN					0x26
+#define PALMAS_SMPS_THERMAL_EN					0x27
+#define PALMAS_SMPS_THERMAL_STATUS				0x28
+#define PALMAS_SMPS_SHORT_STATUS				0x29
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN			0x2A
+#define PALMAS_SMPS_POWERGOOD_MASK1				0x2B
+#define PALMAS_SMPS_POWERGOOD_MASK2				0x2C
+
+/* Bit definitions for SMPS12_CTRL */
+#define PALMAS_SMPS12_CTRL_WR_S					0x80
+#define PALMAS_SMPS12_CTRL_WR_S_SHIFT				7
+#define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN			0x40
+#define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN_SHIFT			6
+#define PALMAS_SMPS12_CTRL_STATUS_MASK				0x30
+#define PALMAS_SMPS12_CTRL_STATUS_SHIFT				4
+#define PALMAS_SMPS12_CTRL_MODE_SLEEP_MASK			0x0c
+#define PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK			0x03
+#define PALMAS_SMPS12_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SMPS12_TSTEP */
+#define PALMAS_SMPS12_TSTEP_TSTEP_MASK				0x03
+#define PALMAS_SMPS12_TSTEP_TSTEP_SHIFT				0
+
+/* Bit definitions for SMPS12_FORCE */
+#define PALMAS_SMPS12_FORCE_CMD					0x80
+#define PALMAS_SMPS12_FORCE_CMD_SHIFT				7
+#define PALMAS_SMPS12_FORCE_VSEL_MASK				0x7f
+#define PALMAS_SMPS12_FORCE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS12_VOLTAGE */
+#define PALMAS_SMPS12_VOLTAGE_RANGE				0x80
+#define PALMAS_SMPS12_VOLTAGE_RANGE_SHIFT			7
+#define PALMAS_SMPS12_VOLTAGE_VSEL_MASK				0x7f
+#define PALMAS_SMPS12_VOLTAGE_VSEL_SHIFT			0
+
+/* Bit definitions for SMPS3_CTRL */
+#define PALMAS_SMPS3_CTRL_WR_S					0x80
+#define PALMAS_SMPS3_CTRL_WR_S_SHIFT				7
+#define PALMAS_SMPS3_CTRL_STATUS_MASK				0x30
+#define PALMAS_SMPS3_CTRL_STATUS_SHIFT				4
+#define PALMAS_SMPS3_CTRL_MODE_SLEEP_MASK			0x0c
+#define PALMAS_SMPS3_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SMPS3_CTRL_MODE_ACTIVE_MASK			0x03
+#define PALMAS_SMPS3_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SMPS3_VOLTAGE */
+#define PALMAS_SMPS3_VOLTAGE_RANGE				0x80
+#define PALMAS_SMPS3_VOLTAGE_RANGE_SHIFT			7
+#define PALMAS_SMPS3_VOLTAGE_VSEL_MASK				0x7f
+#define PALMAS_SMPS3_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS45_CTRL */
+#define PALMAS_SMPS45_CTRL_WR_S					0x80
+#define PALMAS_SMPS45_CTRL_WR_S_SHIFT				7
+#define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN			0x40
+#define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN_SHIFT			6
+#define PALMAS_SMPS45_CTRL_STATUS_MASK				0x30
+#define PALMAS_SMPS45_CTRL_STATUS_SHIFT				4
+#define PALMAS_SMPS45_CTRL_MODE_SLEEP_MASK			0x0c
+#define PALMAS_SMPS45_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SMPS45_CTRL_MODE_ACTIVE_MASK			0x03
+#define PALMAS_SMPS45_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SMPS45_TSTEP */
+#define PALMAS_SMPS45_TSTEP_TSTEP_MASK				0x03
+#define PALMAS_SMPS45_TSTEP_TSTEP_SHIFT				0
+
+/* Bit definitions for SMPS45_FORCE */
+#define PALMAS_SMPS45_FORCE_CMD					0x80
+#define PALMAS_SMPS45_FORCE_CMD_SHIFT				7
+#define PALMAS_SMPS45_FORCE_VSEL_MASK				0x7f
+#define PALMAS_SMPS45_FORCE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS45_VOLTAGE */
+#define PALMAS_SMPS45_VOLTAGE_RANGE				0x80
+#define PALMAS_SMPS45_VOLTAGE_RANGE_SHIFT			7
+#define PALMAS_SMPS45_VOLTAGE_VSEL_MASK				0x7f
+#define PALMAS_SMPS45_VOLTAGE_VSEL_SHIFT			0
+
+/* Bit definitions for SMPS6_CTRL */
+#define PALMAS_SMPS6_CTRL_WR_S					0x80
+#define PALMAS_SMPS6_CTRL_WR_S_SHIFT				7
+#define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN				0x40
+#define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN_SHIFT			6
+#define PALMAS_SMPS6_CTRL_STATUS_MASK				0x30
+#define PALMAS_SMPS6_CTRL_STATUS_SHIFT				4
+#define PALMAS_SMPS6_CTRL_MODE_SLEEP_MASK			0x0c
+#define PALMAS_SMPS6_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SMPS6_CTRL_MODE_ACTIVE_MASK			0x03
+#define PALMAS_SMPS6_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SMPS6_TSTEP */
+#define PALMAS_SMPS6_TSTEP_TSTEP_MASK				0x03
+#define PALMAS_SMPS6_TSTEP_TSTEP_SHIFT				0
+
+/* Bit definitions for SMPS6_FORCE */
+#define PALMAS_SMPS6_FORCE_CMD					0x80
+#define PALMAS_SMPS6_FORCE_CMD_SHIFT				7
+#define PALMAS_SMPS6_FORCE_VSEL_MASK				0x7f
+#define PALMAS_SMPS6_FORCE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS6_VOLTAGE */
+#define PALMAS_SMPS6_VOLTAGE_RANGE				0x80
+#define PALMAS_SMPS6_VOLTAGE_RANGE_SHIFT			7
+#define PALMAS_SMPS6_VOLTAGE_VSEL_MASK				0x7f
+#define PALMAS_SMPS6_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS7_CTRL */
+#define PALMAS_SMPS7_CTRL_WR_S					0x80
+#define PALMAS_SMPS7_CTRL_WR_S_SHIFT				7
+#define PALMAS_SMPS7_CTRL_STATUS_MASK				0x30
+#define PALMAS_SMPS7_CTRL_STATUS_SHIFT				4
+#define PALMAS_SMPS7_CTRL_MODE_SLEEP_MASK			0x0c
+#define PALMAS_SMPS7_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SMPS7_CTRL_MODE_ACTIVE_MASK			0x03
+#define PALMAS_SMPS7_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SMPS7_VOLTAGE */
+#define PALMAS_SMPS7_VOLTAGE_RANGE				0x80
+#define PALMAS_SMPS7_VOLTAGE_RANGE_SHIFT			7
+#define PALMAS_SMPS7_VOLTAGE_VSEL_MASK				0x7f
+#define PALMAS_SMPS7_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS8_CTRL */
+#define PALMAS_SMPS8_CTRL_WR_S					0x80
+#define PALMAS_SMPS8_CTRL_WR_S_SHIFT				7
+#define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN				0x40
+#define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN_SHIFT			6
+#define PALMAS_SMPS8_CTRL_STATUS_MASK				0x30
+#define PALMAS_SMPS8_CTRL_STATUS_SHIFT				4
+#define PALMAS_SMPS8_CTRL_MODE_SLEEP_MASK			0x0c
+#define PALMAS_SMPS8_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SMPS8_CTRL_MODE_ACTIVE_MASK			0x03
+#define PALMAS_SMPS8_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SMPS8_TSTEP */
+#define PALMAS_SMPS8_TSTEP_TSTEP_MASK				0x03
+#define PALMAS_SMPS8_TSTEP_TSTEP_SHIFT				0
+
+/* Bit definitions for SMPS8_FORCE */
+#define PALMAS_SMPS8_FORCE_CMD					0x80
+#define PALMAS_SMPS8_FORCE_CMD_SHIFT				7
+#define PALMAS_SMPS8_FORCE_VSEL_MASK				0x7f
+#define PALMAS_SMPS8_FORCE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS8_VOLTAGE */
+#define PALMAS_SMPS8_VOLTAGE_RANGE				0x80
+#define PALMAS_SMPS8_VOLTAGE_RANGE_SHIFT			7
+#define PALMAS_SMPS8_VOLTAGE_VSEL_MASK				0x7f
+#define PALMAS_SMPS8_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS9_CTRL */
+#define PALMAS_SMPS9_CTRL_WR_S					0x80
+#define PALMAS_SMPS9_CTRL_WR_S_SHIFT				7
+#define PALMAS_SMPS9_CTRL_STATUS_MASK				0x30
+#define PALMAS_SMPS9_CTRL_STATUS_SHIFT				4
+#define PALMAS_SMPS9_CTRL_MODE_SLEEP_MASK			0x0c
+#define PALMAS_SMPS9_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SMPS9_CTRL_MODE_ACTIVE_MASK			0x03
+#define PALMAS_SMPS9_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SMPS9_VOLTAGE */
+#define PALMAS_SMPS9_VOLTAGE_RANGE				0x80
+#define PALMAS_SMPS9_VOLTAGE_RANGE_SHIFT			7
+#define PALMAS_SMPS9_VOLTAGE_VSEL_MASK				0x7f
+#define PALMAS_SMPS9_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for SMPS10_CTRL */
+#define PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK			0xf0
+#define PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT			4
+#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_MASK			0x0f
+#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SMPS10_STATUS */
+#define PALMAS_SMPS10_STATUS_STATUS_MASK			0x0f
+#define PALMAS_SMPS10_STATUS_STATUS_SHIFT			0
+
+/* Bit definitions for SMPS_CTRL */
+#define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN			0x20
+#define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN_SHIFT		5
+#define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN			0x10
+#define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN_SHIFT		4
+#define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_MASK			0x0c
+#define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_SHIFT		2
+#define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_MASK		0x03
+#define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_SHIFT		0
+
+/* Bit definitions for SMPS_PD_CTRL */
+#define PALMAS_SMPS_PD_CTRL_SMPS9				0x40
+#define PALMAS_SMPS_PD_CTRL_SMPS9_SHIFT				6
+#define PALMAS_SMPS_PD_CTRL_SMPS8				0x20
+#define PALMAS_SMPS_PD_CTRL_SMPS8_SHIFT				5
+#define PALMAS_SMPS_PD_CTRL_SMPS7				0x10
+#define PALMAS_SMPS_PD_CTRL_SMPS7_SHIFT				4
+#define PALMAS_SMPS_PD_CTRL_SMPS6				0x08
+#define PALMAS_SMPS_PD_CTRL_SMPS6_SHIFT				3
+#define PALMAS_SMPS_PD_CTRL_SMPS45				0x04
+#define PALMAS_SMPS_PD_CTRL_SMPS45_SHIFT			2
+#define PALMAS_SMPS_PD_CTRL_SMPS3				0x02
+#define PALMAS_SMPS_PD_CTRL_SMPS3_SHIFT				1
+#define PALMAS_SMPS_PD_CTRL_SMPS12				0x01
+#define PALMAS_SMPS_PD_CTRL_SMPS12_SHIFT			0
+
+/* Bit definitions for SMPS_THERMAL_EN */
+#define PALMAS_SMPS_THERMAL_EN_SMPS9				0x40
+#define PALMAS_SMPS_THERMAL_EN_SMPS9_SHIFT			6
+#define PALMAS_SMPS_THERMAL_EN_SMPS8				0x20
+#define PALMAS_SMPS_THERMAL_EN_SMPS8_SHIFT			5
+#define PALMAS_SMPS_THERMAL_EN_SMPS6				0x08
+#define PALMAS_SMPS_THERMAL_EN_SMPS6_SHIFT			3
+#define PALMAS_SMPS_THERMAL_EN_SMPS457				0x04
+#define PALMAS_SMPS_THERMAL_EN_SMPS457_SHIFT			2
+#define PALMAS_SMPS_THERMAL_EN_SMPS123				0x01
+#define PALMAS_SMPS_THERMAL_EN_SMPS123_SHIFT			0
+
+/* Bit definitions for SMPS_THERMAL_STATUS */
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS9			0x40
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS9_SHIFT			6
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS8			0x20
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS8_SHIFT			5
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS6			0x08
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS6_SHIFT			3
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS457			0x04
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS457_SHIFT		2
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS123			0x01
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS123_SHIFT		0
+
+/* Bit definitions for SMPS_SHORT_STATUS */
+#define PALMAS_SMPS_SHORT_STATUS_SMPS10				0x80
+#define PALMAS_SMPS_SHORT_STATUS_SMPS10_SHIFT			7
+#define PALMAS_SMPS_SHORT_STATUS_SMPS9				0x40
+#define PALMAS_SMPS_SHORT_STATUS_SMPS9_SHIFT			6
+#define PALMAS_SMPS_SHORT_STATUS_SMPS8				0x20
+#define PALMAS_SMPS_SHORT_STATUS_SMPS8_SHIFT			5
+#define PALMAS_SMPS_SHORT_STATUS_SMPS7				0x10
+#define PALMAS_SMPS_SHORT_STATUS_SMPS7_SHIFT			4
+#define PALMAS_SMPS_SHORT_STATUS_SMPS6				0x08
+#define PALMAS_SMPS_SHORT_STATUS_SMPS6_SHIFT			3
+#define PALMAS_SMPS_SHORT_STATUS_SMPS45				0x04
+#define PALMAS_SMPS_SHORT_STATUS_SMPS45_SHIFT			2
+#define PALMAS_SMPS_SHORT_STATUS_SMPS3				0x02
+#define PALMAS_SMPS_SHORT_STATUS_SMPS3_SHIFT			1
+#define PALMAS_SMPS_SHORT_STATUS_SMPS12				0x01
+#define PALMAS_SMPS_SHORT_STATUS_SMPS12_SHIFT			0
+
+/* Bit definitions for SMPS_NEGATIVE_CURRENT_LIMIT_EN */
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9		0x40
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9_SHIFT	6
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8		0x20
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8_SHIFT	5
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7		0x10
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7_SHIFT	4
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6		0x08
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6_SHIFT	3
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45		0x04
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45_SHIFT	2
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3		0x02
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3_SHIFT	1
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12		0x01
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12_SHIFT	0
+
+/* Bit definitions for SMPS_POWERGOOD_MASK1 */
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10			0x80
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10_SHIFT		7
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9			0x40
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9_SHIFT			6
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8			0x20
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8_SHIFT			5
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7			0x10
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7_SHIFT			4
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6			0x08
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6_SHIFT			3
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45			0x04
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45_SHIFT		2
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3			0x02
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3_SHIFT			1
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12			0x01
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12_SHIFT		0
+
+/* Bit definitions for SMPS_POWERGOOD_MASK2 */
+#define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT	0x80
+#define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT_SHIFT	7
+#define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7			0x04
+#define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7_SHIFT		2
+#define PALMAS_SMPS_POWERGOOD_MASK2_VBUS			0x02
+#define PALMAS_SMPS_POWERGOOD_MASK2_VBUS_SHIFT			1
+#define PALMAS_SMPS_POWERGOOD_MASK2_ACOK			0x01
+#define PALMAS_SMPS_POWERGOOD_MASK2_ACOK_SHIFT			0
+
+/* Registers for function LDO */
+#define PALMAS_LDO1_CTRL					0x0
+#define PALMAS_LDO1_VOLTAGE					0x1
+#define PALMAS_LDO2_CTRL					0x2
+#define PALMAS_LDO2_VOLTAGE					0x3
+#define PALMAS_LDO3_CTRL					0x4
+#define PALMAS_LDO3_VOLTAGE					0x5
+#define PALMAS_LDO4_CTRL					0x6
+#define PALMAS_LDO4_VOLTAGE					0x7
+#define PALMAS_LDO5_CTRL					0x8
+#define PALMAS_LDO5_VOLTAGE					0x9
+#define PALMAS_LDO6_CTRL					0xA
+#define PALMAS_LDO6_VOLTAGE					0xB
+#define PALMAS_LDO7_CTRL					0xC
+#define PALMAS_LDO7_VOLTAGE					0xD
+#define PALMAS_LDO8_CTRL					0xE
+#define PALMAS_LDO8_VOLTAGE					0xF
+#define PALMAS_LDO9_CTRL					0x10
+#define PALMAS_LDO9_VOLTAGE					0x11
+#define PALMAS_LDOLN_CTRL					0x12
+#define PALMAS_LDOLN_VOLTAGE					0x13
+#define PALMAS_LDOUSB_CTRL					0x14
+#define PALMAS_LDOUSB_VOLTAGE					0x15
+#define PALMAS_LDO_CTRL						0x1A
+#define PALMAS_LDO_PD_CTRL1					0x1B
+#define PALMAS_LDO_PD_CTRL2					0x1C
+#define PALMAS_LDO_SHORT_STATUS1				0x1D
+#define PALMAS_LDO_SHORT_STATUS2				0x1E
+
+/* Bit definitions for LDO1_CTRL */
+#define PALMAS_LDO1_CTRL_WR_S					0x80
+#define PALMAS_LDO1_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO1_CTRL_STATUS					0x10
+#define PALMAS_LDO1_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO1_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO1_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO1_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO1_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO1_VOLTAGE */
+#define PALMAS_LDO1_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO1_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDO2_CTRL */
+#define PALMAS_LDO2_CTRL_WR_S					0x80
+#define PALMAS_LDO2_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO2_CTRL_STATUS					0x10
+#define PALMAS_LDO2_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO2_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO2_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO2_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO2_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO2_VOLTAGE */
+#define PALMAS_LDO2_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO2_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDO3_CTRL */
+#define PALMAS_LDO3_CTRL_WR_S					0x80
+#define PALMAS_LDO3_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO3_CTRL_STATUS					0x10
+#define PALMAS_LDO3_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO3_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO3_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO3_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO3_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO3_VOLTAGE */
+#define PALMAS_LDO3_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO3_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDO4_CTRL */
+#define PALMAS_LDO4_CTRL_WR_S					0x80
+#define PALMAS_LDO4_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO4_CTRL_STATUS					0x10
+#define PALMAS_LDO4_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO4_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO4_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO4_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO4_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO4_VOLTAGE */
+#define PALMAS_LDO4_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO4_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDO5_CTRL */
+#define PALMAS_LDO5_CTRL_WR_S					0x80
+#define PALMAS_LDO5_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO5_CTRL_STATUS					0x10
+#define PALMAS_LDO5_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO5_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO5_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO5_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO5_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO5_VOLTAGE */
+#define PALMAS_LDO5_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO5_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDO6_CTRL */
+#define PALMAS_LDO6_CTRL_WR_S					0x80
+#define PALMAS_LDO6_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO6_CTRL_LDO_VIB_EN				0x40
+#define PALMAS_LDO6_CTRL_LDO_VIB_EN_SHIFT			6
+#define PALMAS_LDO6_CTRL_STATUS					0x10
+#define PALMAS_LDO6_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO6_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO6_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO6_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO6_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO6_VOLTAGE */
+#define PALMAS_LDO6_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO6_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDO7_CTRL */
+#define PALMAS_LDO7_CTRL_WR_S					0x80
+#define PALMAS_LDO7_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO7_CTRL_STATUS					0x10
+#define PALMAS_LDO7_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO7_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO7_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO7_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO7_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO7_VOLTAGE */
+#define PALMAS_LDO7_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO7_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDO8_CTRL */
+#define PALMAS_LDO8_CTRL_WR_S					0x80
+#define PALMAS_LDO8_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO8_CTRL_LDO_TRACKING_EN			0x40
+#define PALMAS_LDO8_CTRL_LDO_TRACKING_EN_SHIFT			6
+#define PALMAS_LDO8_CTRL_STATUS					0x10
+#define PALMAS_LDO8_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO8_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO8_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO8_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO8_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO8_VOLTAGE */
+#define PALMAS_LDO8_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO8_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDO9_CTRL */
+#define PALMAS_LDO9_CTRL_WR_S					0x80
+#define PALMAS_LDO9_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDO9_CTRL_LDO_BYPASS_EN				0x40
+#define PALMAS_LDO9_CTRL_LDO_BYPASS_EN_SHIFT			6
+#define PALMAS_LDO9_CTRL_STATUS					0x10
+#define PALMAS_LDO9_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDO9_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDO9_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDO9_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDO9_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDO9_VOLTAGE */
+#define PALMAS_LDO9_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDO9_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDOLN_CTRL */
+#define PALMAS_LDOLN_CTRL_WR_S					0x80
+#define PALMAS_LDOLN_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDOLN_CTRL_STATUS				0x10
+#define PALMAS_LDOLN_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDOLN_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDOLN_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDOLN_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDOLN_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDOLN_VOLTAGE */
+#define PALMAS_LDOLN_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDOLN_VOLTAGE_VSEL_SHIFT				0
+
+/* Bit definitions for LDOUSB_CTRL */
+#define PALMAS_LDOUSB_CTRL_WR_S					0x80
+#define PALMAS_LDOUSB_CTRL_WR_S_SHIFT				7
+#define PALMAS_LDOUSB_CTRL_STATUS				0x10
+#define PALMAS_LDOUSB_CTRL_STATUS_SHIFT				4
+#define PALMAS_LDOUSB_CTRL_MODE_SLEEP				0x04
+#define PALMAS_LDOUSB_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_LDOUSB_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_LDOUSB_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for LDOUSB_VOLTAGE */
+#define PALMAS_LDOUSB_VOLTAGE_VSEL_MASK				0x3f
+#define PALMAS_LDOUSB_VOLTAGE_VSEL_SHIFT			0
+
+/* Bit definitions for LDO_CTRL */
+#define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS			0x01
+#define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS_SHIFT		0
+
+/* Bit definitions for LDO_PD_CTRL1 */
+#define PALMAS_LDO_PD_CTRL1_LDO8				0x80
+#define PALMAS_LDO_PD_CTRL1_LDO8_SHIFT				7
+#define PALMAS_LDO_PD_CTRL1_LDO7				0x40
+#define PALMAS_LDO_PD_CTRL1_LDO7_SHIFT				6
+#define PALMAS_LDO_PD_CTRL1_LDO6				0x20
+#define PALMAS_LDO_PD_CTRL1_LDO6_SHIFT				5
+#define PALMAS_LDO_PD_CTRL1_LDO5				0x10
+#define PALMAS_LDO_PD_CTRL1_LDO5_SHIFT				4
+#define PALMAS_LDO_PD_CTRL1_LDO4				0x08
+#define PALMAS_LDO_PD_CTRL1_LDO4_SHIFT				3
+#define PALMAS_LDO_PD_CTRL1_LDO3				0x04
+#define PALMAS_LDO_PD_CTRL1_LDO3_SHIFT				2
+#define PALMAS_LDO_PD_CTRL1_LDO2				0x02
+#define PALMAS_LDO_PD_CTRL1_LDO2_SHIFT				1
+#define PALMAS_LDO_PD_CTRL1_LDO1				0x01
+#define PALMAS_LDO_PD_CTRL1_LDO1_SHIFT				0
+
+/* Bit definitions for LDO_PD_CTRL2 */
+#define PALMAS_LDO_PD_CTRL2_LDOUSB				0x04
+#define PALMAS_LDO_PD_CTRL2_LDOUSB_SHIFT			2
+#define PALMAS_LDO_PD_CTRL2_LDOLN				0x02
+#define PALMAS_LDO_PD_CTRL2_LDOLN_SHIFT				1
+#define PALMAS_LDO_PD_CTRL2_LDO9				0x01
+#define PALMAS_LDO_PD_CTRL2_LDO9_SHIFT				0
+
+/* Bit definitions for LDO_SHORT_STATUS1 */
+#define PALMAS_LDO_SHORT_STATUS1_LDO8				0x80
+#define PALMAS_LDO_SHORT_STATUS1_LDO8_SHIFT			7
+#define PALMAS_LDO_SHORT_STATUS1_LDO7				0x40
+#define PALMAS_LDO_SHORT_STATUS1_LDO7_SHIFT			6
+#define PALMAS_LDO_SHORT_STATUS1_LDO6				0x20
+#define PALMAS_LDO_SHORT_STATUS1_LDO6_SHIFT			5
+#define PALMAS_LDO_SHORT_STATUS1_LDO5				0x10
+#define PALMAS_LDO_SHORT_STATUS1_LDO5_SHIFT			4
+#define PALMAS_LDO_SHORT_STATUS1_LDO4				0x08
+#define PALMAS_LDO_SHORT_STATUS1_LDO4_SHIFT			3
+#define PALMAS_LDO_SHORT_STATUS1_LDO3				0x04
+#define PALMAS_LDO_SHORT_STATUS1_LDO3_SHIFT			2
+#define PALMAS_LDO_SHORT_STATUS1_LDO2				0x02
+#define PALMAS_LDO_SHORT_STATUS1_LDO2_SHIFT			1
+#define PALMAS_LDO_SHORT_STATUS1_LDO1				0x01
+#define PALMAS_LDO_SHORT_STATUS1_LDO1_SHIFT			0
+
+/* Bit definitions for LDO_SHORT_STATUS2 */
+#define PALMAS_LDO_SHORT_STATUS2_LDOVANA			0x08
+#define PALMAS_LDO_SHORT_STATUS2_LDOVANA_SHIFT			3
+#define PALMAS_LDO_SHORT_STATUS2_LDOUSB				0x04
+#define PALMAS_LDO_SHORT_STATUS2_LDOUSB_SHIFT			2
+#define PALMAS_LDO_SHORT_STATUS2_LDOLN				0x02
+#define PALMAS_LDO_SHORT_STATUS2_LDOLN_SHIFT			1
+#define PALMAS_LDO_SHORT_STATUS2_LDO9				0x01
+#define PALMAS_LDO_SHORT_STATUS2_LDO9_SHIFT			0
+
+/* Registers for function PMU_CONTROL */
+#define PALMAS_DEV_CTRL						0x0
+#define PALMAS_POWER_CTRL					0x1
+#define PALMAS_VSYS_LO						0x2
+#define PALMAS_VSYS_MON						0x3
+#define PALMAS_VBAT_MON						0x4
+#define PALMAS_WATCHDOG						0x5
+#define PALMAS_BOOT_STATUS					0x6
+#define PALMAS_BATTERY_BOUNCE					0x7
+#define PALMAS_BACKUP_BATTERY_CTRL				0x8
+#define PALMAS_LONG_PRESS_KEY					0x9
+#define PALMAS_OSC_THERM_CTRL					0xA
+#define PALMAS_BATDEBOUNCING					0xB
+#define PALMAS_SWOFF_HWRST					0xF
+#define PALMAS_SWOFF_COLDRST					0x10
+#define PALMAS_SWOFF_STATUS					0x11
+#define PALMAS_PMU_CONFIG					0x12
+#define PALMAS_SPARE						0x14
+#define PALMAS_PMU_SECONDARY_INT				0x15
+#define PALMAS_SW_REVISION					0x17
+#define PALMAS_EXT_CHRG_CTRL					0x18
+#define PALMAS_PMU_SECONDARY_INT2				0x19
+
+/* Bit definitions for DEV_CTRL */
+#define PALMAS_DEV_CTRL_DEV_STATUS_MASK				0x0c
+#define PALMAS_DEV_CTRL_DEV_STATUS_SHIFT			2
+#define PALMAS_DEV_CTRL_SW_RST					0x02
+#define PALMAS_DEV_CTRL_SW_RST_SHIFT				1
+#define PALMAS_DEV_CTRL_DEV_ON					0x01
+#define PALMAS_DEV_CTRL_DEV_ON_SHIFT				0
+
+/* Bit definitions for POWER_CTRL */
+#define PALMAS_POWER_CTRL_ENABLE2_MASK				0x04
+#define PALMAS_POWER_CTRL_ENABLE2_MASK_SHIFT			2
+#define PALMAS_POWER_CTRL_ENABLE1_MASK				0x02
+#define PALMAS_POWER_CTRL_ENABLE1_MASK_SHIFT			1
+#define PALMAS_POWER_CTRL_NSLEEP_MASK				0x01
+#define PALMAS_POWER_CTRL_NSLEEP_MASK_SHIFT			0
+
+/* Bit definitions for VSYS_LO */
+#define PALMAS_VSYS_LO_THRESHOLD_MASK				0x1f
+#define PALMAS_VSYS_LO_THRESHOLD_SHIFT				0
+
+/* Bit definitions for VSYS_MON */
+#define PALMAS_VSYS_MON_ENABLE					0x80
+#define PALMAS_VSYS_MON_ENABLE_SHIFT				7
+#define PALMAS_VSYS_MON_THRESHOLD_MASK				0x3f
+#define PALMAS_VSYS_MON_THRESHOLD_SHIFT				0
+
+/* Bit definitions for VBAT_MON */
+#define PALMAS_VBAT_MON_ENABLE					0x80
+#define PALMAS_VBAT_MON_ENABLE_SHIFT				7
+#define PALMAS_VBAT_MON_THRESHOLD_MASK				0x3f
+#define PALMAS_VBAT_MON_THRESHOLD_SHIFT				0
+
+/* Bit definitions for WATCHDOG */
+#define PALMAS_WATCHDOG_LOCK					0x20
+#define PALMAS_WATCHDOG_LOCK_SHIFT				5
+#define PALMAS_WATCHDOG_ENABLE					0x10
+#define PALMAS_WATCHDOG_ENABLE_SHIFT				4
+#define PALMAS_WATCHDOG_MODE					0x08
+#define PALMAS_WATCHDOG_MODE_SHIFT				3
+#define PALMAS_WATCHDOG_TIMER_MASK				0x07
+#define PALMAS_WATCHDOG_TIMER_SHIFT				0
+
+/* Bit definitions for BOOT_STATUS */
+#define PALMAS_BOOT_STATUS_BOOT1				0x02
+#define PALMAS_BOOT_STATUS_BOOT1_SHIFT				1
+#define PALMAS_BOOT_STATUS_BOOT0				0x01
+#define PALMAS_BOOT_STATUS_BOOT0_SHIFT				0
+
+/* Bit definitions for BATTERY_BOUNCE */
+#define PALMAS_BATTERY_BOUNCE_BB_DELAY_MASK			0x3f
+#define PALMAS_BATTERY_BOUNCE_BB_DELAY_SHIFT			0
+
+/* Bit definitions for BACKUP_BATTERY_CTRL */
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15			0x80
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15_SHIFT		7
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP			0x40
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP_SHIFT		6
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF			0x20
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF_SHIFT		5
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN			0x10
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN_SHIFT		4
+#define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG		0x08
+#define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG_SHIFT	3
+#define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_MASK			0x06
+#define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_SHIFT			1
+#define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN			0x01
+#define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN_SHIFT		0
+
+/* Bit definitions for LONG_PRESS_KEY */
+#define PALMAS_LONG_PRESS_KEY_LPK_LOCK				0x80
+#define PALMAS_LONG_PRESS_KEY_LPK_LOCK_SHIFT			7
+#define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR			0x10
+#define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR_SHIFT			4
+#define PALMAS_LONG_PRESS_KEY_LPK_TIME_MASK			0x0c
+#define PALMAS_LONG_PRESS_KEY_LPK_TIME_SHIFT			2
+#define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_MASK		0x03
+#define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_SHIFT		0
+
+/* Bit definitions for OSC_THERM_CTRL */
+#define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP			0x80
+#define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP_SHIFT		7
+#define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP			0x40
+#define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP_SHIFT		6
+#define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP		0x20
+#define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP_SHIFT		5
+#define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP		0x10
+#define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP_SHIFT		4
+#define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_MASK			0x0c
+#define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_SHIFT		2
+#define PALMAS_OSC_THERM_CTRL_OSC_BYPASS			0x02
+#define PALMAS_OSC_THERM_CTRL_OSC_BYPASS_SHIFT			1
+#define PALMAS_OSC_THERM_CTRL_OSC_HPMODE			0x01
+#define PALMAS_OSC_THERM_CTRL_OSC_HPMODE_SHIFT			0
+
+/* Bit definitions for BATDEBOUNCING */
+#define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS			0x80
+#define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS_SHIFT		7
+#define PALMAS_BATDEBOUNCING_BINS_DEB_MASK			0x78
+#define PALMAS_BATDEBOUNCING_BINS_DEB_SHIFT			3
+#define PALMAS_BATDEBOUNCING_BEXT_DEB_MASK			0x07
+#define PALMAS_BATDEBOUNCING_BEXT_DEB_SHIFT			0
+
+/* Bit definitions for SWOFF_HWRST */
+#define PALMAS_SWOFF_HWRST_PWRON_LPK				0x80
+#define PALMAS_SWOFF_HWRST_PWRON_LPK_SHIFT			7
+#define PALMAS_SWOFF_HWRST_PWRDOWN				0x40
+#define PALMAS_SWOFF_HWRST_PWRDOWN_SHIFT			6
+#define PALMAS_SWOFF_HWRST_WTD					0x20
+#define PALMAS_SWOFF_HWRST_WTD_SHIFT				5
+#define PALMAS_SWOFF_HWRST_TSHUT				0x10
+#define PALMAS_SWOFF_HWRST_TSHUT_SHIFT				4
+#define PALMAS_SWOFF_HWRST_RESET_IN				0x08
+#define PALMAS_SWOFF_HWRST_RESET_IN_SHIFT			3
+#define PALMAS_SWOFF_HWRST_SW_RST				0x04
+#define PALMAS_SWOFF_HWRST_SW_RST_SHIFT				2
+#define PALMAS_SWOFF_HWRST_VSYS_LO				0x02
+#define PALMAS_SWOFF_HWRST_VSYS_LO_SHIFT			1
+#define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN			0x01
+#define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN_SHIFT			0
+
+/* Bit definitions for SWOFF_COLDRST */
+#define PALMAS_SWOFF_COLDRST_PWRON_LPK				0x80
+#define PALMAS_SWOFF_COLDRST_PWRON_LPK_SHIFT			7
+#define PALMAS_SWOFF_COLDRST_PWRDOWN				0x40
+#define PALMAS_SWOFF_COLDRST_PWRDOWN_SHIFT			6
+#define PALMAS_SWOFF_COLDRST_WTD				0x20
+#define PALMAS_SWOFF_COLDRST_WTD_SHIFT				5
+#define PALMAS_SWOFF_COLDRST_TSHUT				0x10
+#define PALMAS_SWOFF_COLDRST_TSHUT_SHIFT			4
+#define PALMAS_SWOFF_COLDRST_RESET_IN				0x08
+#define PALMAS_SWOFF_COLDRST_RESET_IN_SHIFT			3
+#define PALMAS_SWOFF_COLDRST_SW_RST				0x04
+#define PALMAS_SWOFF_COLDRST_SW_RST_SHIFT			2
+#define PALMAS_SWOFF_COLDRST_VSYS_LO				0x02
+#define PALMAS_SWOFF_COLDRST_VSYS_LO_SHIFT			1
+#define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN			0x01
+#define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN_SHIFT		0
+
+/* Bit definitions for SWOFF_STATUS */
+#define PALMAS_SWOFF_STATUS_PWRON_LPK				0x80
+#define PALMAS_SWOFF_STATUS_PWRON_LPK_SHIFT			7
+#define PALMAS_SWOFF_STATUS_PWRDOWN				0x40
+#define PALMAS_SWOFF_STATUS_PWRDOWN_SHIFT			6
+#define PALMAS_SWOFF_STATUS_WTD					0x20
+#define PALMAS_SWOFF_STATUS_WTD_SHIFT				5
+#define PALMAS_SWOFF_STATUS_TSHUT				0x10
+#define PALMAS_SWOFF_STATUS_TSHUT_SHIFT				4
+#define PALMAS_SWOFF_STATUS_RESET_IN				0x08
+#define PALMAS_SWOFF_STATUS_RESET_IN_SHIFT			3
+#define PALMAS_SWOFF_STATUS_SW_RST				0x04
+#define PALMAS_SWOFF_STATUS_SW_RST_SHIFT			2
+#define PALMAS_SWOFF_STATUS_VSYS_LO				0x02
+#define PALMAS_SWOFF_STATUS_VSYS_LO_SHIFT			1
+#define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN			0x01
+#define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN_SHIFT		0
+
+/* Bit definitions for PMU_CONFIG */
+#define PALMAS_PMU_CONFIG_MULTI_CELL_EN				0x40
+#define PALMAS_PMU_CONFIG_MULTI_CELL_EN_SHIFT			6
+#define PALMAS_PMU_CONFIG_SPARE_MASK				0x30
+#define PALMAS_PMU_CONFIG_SPARE_SHIFT				4
+#define PALMAS_PMU_CONFIG_SWOFF_DLY_MASK			0x0c
+#define PALMAS_PMU_CONFIG_SWOFF_DLY_SHIFT			2
+#define PALMAS_PMU_CONFIG_GATE_RESET_OUT			0x02
+#define PALMAS_PMU_CONFIG_GATE_RESET_OUT_SHIFT			1
+#define PALMAS_PMU_CONFIG_AUTODEVON				0x01
+#define PALMAS_PMU_CONFIG_AUTODEVON_SHIFT			0
+
+/* Bit definitions for SPARE */
+#define PALMAS_SPARE_SPARE_MASK					0xf8
+#define PALMAS_SPARE_SPARE_SHIFT				3
+#define PALMAS_SPARE_REGEN3_OD					0x04
+#define PALMAS_SPARE_REGEN3_OD_SHIFT				2
+#define PALMAS_SPARE_REGEN2_OD					0x02
+#define PALMAS_SPARE_REGEN2_OD_SHIFT				1
+#define PALMAS_SPARE_REGEN1_OD					0x01
+#define PALMAS_SPARE_REGEN1_OD_SHIFT				0
+
+/* Bit definitions for PMU_SECONDARY_INT */
+#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC		0x80
+#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC_SHIFT		7
+#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC		0x40
+#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC_SHIFT	6
+#define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC			0x20
+#define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC_SHIFT		5
+#define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC			0x10
+#define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC_SHIFT		4
+#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK			0x08
+#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK_SHIFT		3
+#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK		0x04
+#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK_SHIFT		2
+#define PALMAS_PMU_SECONDARY_INT_BB_MASK			0x02
+#define PALMAS_PMU_SECONDARY_INT_BB_MASK_SHIFT			1
+#define PALMAS_PMU_SECONDARY_INT_FBI_MASK			0x01
+#define PALMAS_PMU_SECONDARY_INT_FBI_MASK_SHIFT			0
+
+/* Bit definitions for SW_REVISION */
+#define PALMAS_SW_REVISION_SW_REVISION_MASK			0xff
+#define PALMAS_SW_REVISION_SW_REVISION_SHIFT			0
+
+/* Bit definitions for EXT_CHRG_CTRL */
+#define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS			0x80
+#define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS_SHIFT		7
+#define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS			0x40
+#define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS_SHIFT		6
+#define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY		0x08
+#define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY_SHIFT		3
+#define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N				0x04
+#define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N_SHIFT			2
+#define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN			0x02
+#define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN_SHIFT			1
+#define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN			0x01
+#define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN_SHIFT		0
+
+/* Bit definitions for PMU_SECONDARY_INT2 */
+#define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC			0x20
+#define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC_SHIFT		5
+#define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC			0x10
+#define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC_SHIFT		4
+#define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK			0x02
+#define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK_SHIFT		1
+#define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK			0x01
+#define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK_SHIFT		0
+
+/* Registers for function RESOURCE */
+#define PALMAS_CLK32KG_CTRL					0x0
+#define PALMAS_CLK32KGAUDIO_CTRL				0x1
+#define PALMAS_REGEN1_CTRL					0x2
+#define PALMAS_REGEN2_CTRL					0x3
+#define PALMAS_SYSEN1_CTRL					0x4
+#define PALMAS_SYSEN2_CTRL					0x5
+#define PALMAS_NSLEEP_RES_ASSIGN				0x6
+#define PALMAS_NSLEEP_SMPS_ASSIGN				0x7
+#define PALMAS_NSLEEP_LDO_ASSIGN1				0x8
+#define PALMAS_NSLEEP_LDO_ASSIGN2				0x9
+#define PALMAS_ENABLE1_RES_ASSIGN				0xA
+#define PALMAS_ENABLE1_SMPS_ASSIGN				0xB
+#define PALMAS_ENABLE1_LDO_ASSIGN1				0xC
+#define PALMAS_ENABLE1_LDO_ASSIGN2				0xD
+#define PALMAS_ENABLE2_RES_ASSIGN				0xE
+#define PALMAS_ENABLE2_SMPS_ASSIGN				0xF
+#define PALMAS_ENABLE2_LDO_ASSIGN1				0x10
+#define PALMAS_ENABLE2_LDO_ASSIGN2				0x11
+#define PALMAS_REGEN3_CTRL					0x12
+
+/* Bit definitions for CLK32KG_CTRL */
+#define PALMAS_CLK32KG_CTRL_STATUS				0x10
+#define PALMAS_CLK32KG_CTRL_STATUS_SHIFT			4
+#define PALMAS_CLK32KG_CTRL_MODE_SLEEP				0x04
+#define PALMAS_CLK32KG_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_CLK32KG_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_CLK32KG_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for CLK32KGAUDIO_CTRL */
+#define PALMAS_CLK32KGAUDIO_CTRL_STATUS				0x10
+#define PALMAS_CLK32KGAUDIO_CTRL_STATUS_SHIFT			4
+#define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3			0x08
+#define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3_SHIFT		3
+#define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP			0x04
+#define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP_SHIFT		2
+#define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE			0x01
+#define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE_SHIFT		0
+
+/* Bit definitions for REGEN1_CTRL */
+#define PALMAS_REGEN1_CTRL_STATUS				0x10
+#define PALMAS_REGEN1_CTRL_STATUS_SHIFT				4
+#define PALMAS_REGEN1_CTRL_MODE_SLEEP				0x04
+#define PALMAS_REGEN1_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_REGEN1_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_REGEN1_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for REGEN2_CTRL */
+#define PALMAS_REGEN2_CTRL_STATUS				0x10
+#define PALMAS_REGEN2_CTRL_STATUS_SHIFT				4
+#define PALMAS_REGEN2_CTRL_MODE_SLEEP				0x04
+#define PALMAS_REGEN2_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_REGEN2_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_REGEN2_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SYSEN1_CTRL */
+#define PALMAS_SYSEN1_CTRL_STATUS				0x10
+#define PALMAS_SYSEN1_CTRL_STATUS_SHIFT				4
+#define PALMAS_SYSEN1_CTRL_MODE_SLEEP				0x04
+#define PALMAS_SYSEN1_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SYSEN1_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_SYSEN1_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for SYSEN2_CTRL */
+#define PALMAS_SYSEN2_CTRL_STATUS				0x10
+#define PALMAS_SYSEN2_CTRL_STATUS_SHIFT				4
+#define PALMAS_SYSEN2_CTRL_MODE_SLEEP				0x04
+#define PALMAS_SYSEN2_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_SYSEN2_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_SYSEN2_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Bit definitions for NSLEEP_RES_ASSIGN */
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN3				0x40
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN3_SHIFT			6
+#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO			0x20
+#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO_SHIFT		5
+#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG			0x10
+#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG_SHIFT			4
+#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2				0x08
+#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2_SHIFT			3
+#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1				0x04
+#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1_SHIFT			2
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN2				0x02
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN2_SHIFT			1
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN1				0x01
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN1_SHIFT			0
+
+/* Bit definitions for NSLEEP_SMPS_ASSIGN */
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10			0x80
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10_SHIFT			7
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9				0x40
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9_SHIFT			6
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8				0x20
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8_SHIFT			5
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7				0x10
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7_SHIFT			4
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6				0x08
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6_SHIFT			3
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45			0x04
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45_SHIFT			2
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3				0x02
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3_SHIFT			1
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12			0x01
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12_SHIFT			0
+
+/* Bit definitions for NSLEEP_LDO_ASSIGN1 */
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8				0x80
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8_SHIFT			7
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7				0x40
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7_SHIFT			6
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6				0x20
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6_SHIFT			5
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5				0x10
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5_SHIFT			4
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4				0x08
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4_SHIFT			3
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3				0x04
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3_SHIFT			2
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2				0x02
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2_SHIFT			1
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1				0x01
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1_SHIFT			0
+
+/* Bit definitions for NSLEEP_LDO_ASSIGN2 */
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB			0x04
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB_SHIFT			2
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN				0x02
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN_SHIFT			1
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9				0x01
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9_SHIFT			0
+
+/* Bit definitions for ENABLE1_RES_ASSIGN */
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN3			0x40
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN3_SHIFT			6
+#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO			0x20
+#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO_SHIFT		5
+#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG			0x10
+#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG_SHIFT			4
+#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2			0x08
+#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2_SHIFT			3
+#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1			0x04
+#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1_SHIFT			2
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN2			0x02
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN2_SHIFT			1
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN1			0x01
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN1_SHIFT			0
+
+/* Bit definitions for ENABLE1_SMPS_ASSIGN */
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10			0x80
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10_SHIFT			7
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9			0x40
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9_SHIFT			6
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8			0x20
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8_SHIFT			5
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7			0x10
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7_SHIFT			4
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6			0x08
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6_SHIFT			3
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45			0x04
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45_SHIFT			2
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3			0x02
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3_SHIFT			1
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12			0x01
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12_SHIFT			0
+
+/* Bit definitions for ENABLE1_LDO_ASSIGN1 */
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8				0x80
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8_SHIFT			7
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7				0x40
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7_SHIFT			6
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6				0x20
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6_SHIFT			5
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5				0x10
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5_SHIFT			4
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4				0x08
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4_SHIFT			3
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3				0x04
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3_SHIFT			2
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2				0x02
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2_SHIFT			1
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1				0x01
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1_SHIFT			0
+
+/* Bit definitions for ENABLE1_LDO_ASSIGN2 */
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB			0x04
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB_SHIFT			2
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN			0x02
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN_SHIFT			1
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9				0x01
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9_SHIFT			0
+
+/* Bit definitions for ENABLE2_RES_ASSIGN */
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN3			0x40
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN3_SHIFT			6
+#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO			0x20
+#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO_SHIFT		5
+#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG			0x10
+#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG_SHIFT			4
+#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2			0x08
+#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2_SHIFT			3
+#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1			0x04
+#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1_SHIFT			2
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN2			0x02
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN2_SHIFT			1
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN1			0x01
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN1_SHIFT			0
+
+/* Bit definitions for ENABLE2_SMPS_ASSIGN */
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10			0x80
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10_SHIFT			7
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9			0x40
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9_SHIFT			6
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8			0x20
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8_SHIFT			5
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7			0x10
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7_SHIFT			4
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6			0x08
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6_SHIFT			3
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45			0x04
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45_SHIFT			2
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3			0x02
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3_SHIFT			1
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12			0x01
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12_SHIFT			0
+
+/* Bit definitions for ENABLE2_LDO_ASSIGN1 */
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8				0x80
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8_SHIFT			7
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7				0x40
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7_SHIFT			6
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6				0x20
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6_SHIFT			5
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5				0x10
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5_SHIFT			4
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4				0x08
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4_SHIFT			3
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3				0x04
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3_SHIFT			2
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2				0x02
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2_SHIFT			1
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1				0x01
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1_SHIFT			0
+
+/* Bit definitions for ENABLE2_LDO_ASSIGN2 */
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB			0x04
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB_SHIFT			2
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN			0x02
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN_SHIFT			1
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9				0x01
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9_SHIFT			0
+
+/* Bit definitions for REGEN3_CTRL */
+#define PALMAS_REGEN3_CTRL_STATUS				0x10
+#define PALMAS_REGEN3_CTRL_STATUS_SHIFT				4
+#define PALMAS_REGEN3_CTRL_MODE_SLEEP				0x04
+#define PALMAS_REGEN3_CTRL_MODE_SLEEP_SHIFT			2
+#define PALMAS_REGEN3_CTRL_MODE_ACTIVE				0x01
+#define PALMAS_REGEN3_CTRL_MODE_ACTIVE_SHIFT			0
+
+/* Registers for function PAD_CONTROL */
+#define PALMAS_PU_PD_INPUT_CTRL1				0x0
+#define PALMAS_PU_PD_INPUT_CTRL2				0x1
+#define PALMAS_PU_PD_INPUT_CTRL3				0x2
+#define PALMAS_OD_OUTPUT_CTRL					0x4
+#define PALMAS_POLARITY_CTRL					0x5
+#define PALMAS_PRIMARY_SECONDARY_PAD1				0x6
+#define PALMAS_PRIMARY_SECONDARY_PAD2				0x7
+#define PALMAS_I2C_SPI						0x8
+#define PALMAS_PU_PD_INPUT_CTRL4				0x9
+#define PALMAS_PRIMARY_SECONDARY_PAD3				0xA
+
+/* Bit definitions for PU_PD_INPUT_CTRL1 */
+#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD			0x40
+#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD_SHIFT		6
+#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU			0x20
+#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU_SHIFT		5
+#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD			0x10
+#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD_SHIFT		4
+#define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD			0x04
+#define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD_SHIFT		2
+#define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU			0x02
+#define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU_SHIFT		1
+
+/* Bit definitions for PU_PD_INPUT_CTRL2 */
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU			0x20
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU_SHIFT		5
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD			0x10
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD_SHIFT		4
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU			0x08
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU_SHIFT		3
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD			0x04
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD_SHIFT		2
+#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU			0x02
+#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU_SHIFT		1
+#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD			0x01
+#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD_SHIFT		0
+
+/* Bit definitions for PU_PD_INPUT_CTRL3 */
+#define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD			0x40
+#define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD_SHIFT			6
+#define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD			0x10
+#define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD_SHIFT		4
+#define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD			0x04
+#define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD_SHIFT		2
+#define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD			0x01
+#define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD_SHIFT		0
+
+/* Bit definitions for OD_OUTPUT_CTRL */
+#define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD				0x80
+#define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD_SHIFT			7
+#define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD			0x40
+#define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD_SHIFT			6
+#define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD				0x20
+#define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD_SHIFT			5
+#define PALMAS_OD_OUTPUT_CTRL_INT_OD				0x08
+#define PALMAS_OD_OUTPUT_CTRL_INT_OD_SHIFT			3
+
+/* Bit definitions for POLARITY_CTRL */
+#define PALMAS_POLARITY_CTRL_INT_POLARITY			0x80
+#define PALMAS_POLARITY_CTRL_INT_POLARITY_SHIFT			7
+#define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY			0x40
+#define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY_SHIFT		6
+#define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY			0x20
+#define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY_SHIFT		5
+#define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY			0x10
+#define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY_SHIFT		4
+#define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY			0x08
+#define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY_SHIFT		3
+#define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY		0x04
+#define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY_SHIFT	2
+#define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY	0x02
+#define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY_SHIFT	1
+#define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY			0x01
+#define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY_SHIFT		0
+
+/* Bit definitions for PRIMARY_SECONDARY_PAD1 */
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3			0x80
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3_SHIFT		7
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK		0x60
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT		5
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK		0x18
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT		3
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0			0x04
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0_SHIFT		2
+#define PALMAS_PRIMARY_SECONDARY_PAD1_VAC			0x02
+#define PALMAS_PRIMARY_SECONDARY_PAD1_VAC_SHIFT			1
+#define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD			0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD_SHIFT		0
+
+/* Bit definitions for PRIMARY_SECONDARY_PAD2 */
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK		0x30
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_SHIFT		4
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6			0x08
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6_SHIFT		3
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK		0x06
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_SHIFT		1
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4			0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4_SHIFT		0
+
+/* Bit definitions for I2C_SPI */
+#define PALMAS_I2C_SPI_I2C2OTP_EN				0x80
+#define PALMAS_I2C_SPI_I2C2OTP_EN_SHIFT				7
+#define PALMAS_I2C_SPI_I2C2OTP_PAGESEL				0x40
+#define PALMAS_I2C_SPI_I2C2OTP_PAGESEL_SHIFT			6
+#define PALMAS_I2C_SPI_ID_I2C2					0x20
+#define PALMAS_I2C_SPI_ID_I2C2_SHIFT				5
+#define PALMAS_I2C_SPI_I2C_SPI					0x10
+#define PALMAS_I2C_SPI_I2C_SPI_SHIFT				4
+#define PALMAS_I2C_SPI_ID_I2C1_MASK				0x0f
+#define PALMAS_I2C_SPI_ID_I2C1_SHIFT				0
+
+/* Bit definitions for PU_PD_INPUT_CTRL4 */
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD			0x40
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD_SHIFT		6
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD			0x10
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD_SHIFT		4
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD			0x04
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD_SHIFT		2
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD			0x01
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD_SHIFT		0
+
+/* Bit definitions for PRIMARY_SECONDARY_PAD3 */
+#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2			0x02
+#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2_SHIFT		1
+#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1			0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1_SHIFT		0
+
+/* Registers for function LED_PWM */
+#define PALMAS_LED_PERIOD_CTRL					0x0
+#define PALMAS_LED_CTRL						0x1
+#define PALMAS_PWM_CTRL1					0x2
+#define PALMAS_PWM_CTRL2					0x3
+
+/* Bit definitions for LED_PERIOD_CTRL */
+#define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_MASK		0x38
+#define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_SHIFT		3
+#define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_MASK		0x07
+#define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_SHIFT		0
+
+/* Bit definitions for LED_CTRL */
+#define PALMAS_LED_CTRL_LED_2_SEQ				0x20
+#define PALMAS_LED_CTRL_LED_2_SEQ_SHIFT				5
+#define PALMAS_LED_CTRL_LED_1_SEQ				0x10
+#define PALMAS_LED_CTRL_LED_1_SEQ_SHIFT				4
+#define PALMAS_LED_CTRL_LED_2_ON_TIME_MASK			0x0c
+#define PALMAS_LED_CTRL_LED_2_ON_TIME_SHIFT			2
+#define PALMAS_LED_CTRL_LED_1_ON_TIME_MASK			0x03
+#define PALMAS_LED_CTRL_LED_1_ON_TIME_SHIFT			0
+
+/* Bit definitions for PWM_CTRL1 */
+#define PALMAS_PWM_CTRL1_PWM_FREQ_EN				0x02
+#define PALMAS_PWM_CTRL1_PWM_FREQ_EN_SHIFT			1
+#define PALMAS_PWM_CTRL1_PWM_FREQ_SEL				0x01
+#define PALMAS_PWM_CTRL1_PWM_FREQ_SEL_SHIFT			0
+
+/* Bit definitions for PWM_CTRL2 */
+#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_MASK			0xff
+#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_SHIFT			0
+
+/* Registers for function INTERRUPT */
+#define PALMAS_INT1_STATUS					0x0
+#define PALMAS_INT1_MASK					0x1
+#define PALMAS_INT1_LINE_STATE					0x2
+#define PALMAS_INT1_EDGE_DETECT1_RESERVED			0x3
+#define PALMAS_INT1_EDGE_DETECT2_RESERVED			0x4
+#define PALMAS_INT2_STATUS					0x5
+#define PALMAS_INT2_MASK					0x6
+#define PALMAS_INT2_LINE_STATE					0x7
+#define PALMAS_INT2_EDGE_DETECT1_RESERVED			0x8
+#define PALMAS_INT2_EDGE_DETECT2_RESERVED			0x9
+#define PALMAS_INT3_STATUS					0xA
+#define PALMAS_INT3_MASK					0xB
+#define PALMAS_INT3_LINE_STATE					0xC
+#define PALMAS_INT3_EDGE_DETECT1_RESERVED			0xD
+#define PALMAS_INT3_EDGE_DETECT2_RESERVED			0xE
+#define PALMAS_INT4_STATUS					0xF
+#define PALMAS_INT4_MASK					0x10
+#define PALMAS_INT4_LINE_STATE					0x11
+#define PALMAS_INT4_EDGE_DETECT1				0x12
+#define PALMAS_INT4_EDGE_DETECT2				0x13
+#define PALMAS_INT_CTRL						0x14
+
+/* Bit definitions for INT1_STATUS */
+#define PALMAS_INT1_STATUS_VBAT_MON				0x80
+#define PALMAS_INT1_STATUS_VBAT_MON_SHIFT			7
+#define PALMAS_INT1_STATUS_VSYS_MON				0x40
+#define PALMAS_INT1_STATUS_VSYS_MON_SHIFT			6
+#define PALMAS_INT1_STATUS_HOTDIE				0x20
+#define PALMAS_INT1_STATUS_HOTDIE_SHIFT				5
+#define PALMAS_INT1_STATUS_PWRDOWN				0x10
+#define PALMAS_INT1_STATUS_PWRDOWN_SHIFT			4
+#define PALMAS_INT1_STATUS_RPWRON				0x08
+#define PALMAS_INT1_STATUS_RPWRON_SHIFT				3
+#define PALMAS_INT1_STATUS_LONG_PRESS_KEY			0x04
+#define PALMAS_INT1_STATUS_LONG_PRESS_KEY_SHIFT			2
+#define PALMAS_INT1_STATUS_PWRON				0x02
+#define PALMAS_INT1_STATUS_PWRON_SHIFT				1
+#define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV			0x01
+#define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV_SHIFT		0
+
+/* Bit definitions for INT1_MASK */
+#define PALMAS_INT1_MASK_VBAT_MON				0x80
+#define PALMAS_INT1_MASK_VBAT_MON_SHIFT				7
+#define PALMAS_INT1_MASK_VSYS_MON				0x40
+#define PALMAS_INT1_MASK_VSYS_MON_SHIFT				6
+#define PALMAS_INT1_MASK_HOTDIE					0x20
+#define PALMAS_INT1_MASK_HOTDIE_SHIFT				5
+#define PALMAS_INT1_MASK_PWRDOWN				0x10
+#define PALMAS_INT1_MASK_PWRDOWN_SHIFT				4
+#define PALMAS_INT1_MASK_RPWRON					0x08
+#define PALMAS_INT1_MASK_RPWRON_SHIFT				3
+#define PALMAS_INT1_MASK_LONG_PRESS_KEY				0x04
+#define PALMAS_INT1_MASK_LONG_PRESS_KEY_SHIFT			2
+#define PALMAS_INT1_MASK_PWRON					0x02
+#define PALMAS_INT1_MASK_PWRON_SHIFT				1
+#define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV			0x01
+#define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV_SHIFT		0
+
+/* Bit definitions for INT1_LINE_STATE */
+#define PALMAS_INT1_LINE_STATE_VBAT_MON				0x80
+#define PALMAS_INT1_LINE_STATE_VBAT_MON_SHIFT			7
+#define PALMAS_INT1_LINE_STATE_VSYS_MON				0x40
+#define PALMAS_INT1_LINE_STATE_VSYS_MON_SHIFT			6
+#define PALMAS_INT1_LINE_STATE_HOTDIE				0x20
+#define PALMAS_INT1_LINE_STATE_HOTDIE_SHIFT			5
+#define PALMAS_INT1_LINE_STATE_PWRDOWN				0x10
+#define PALMAS_INT1_LINE_STATE_PWRDOWN_SHIFT			4
+#define PALMAS_INT1_LINE_STATE_RPWRON				0x08
+#define PALMAS_INT1_LINE_STATE_RPWRON_SHIFT			3
+#define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY			0x04
+#define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY_SHIFT		2
+#define PALMAS_INT1_LINE_STATE_PWRON				0x02
+#define PALMAS_INT1_LINE_STATE_PWRON_SHIFT			1
+#define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV		0x01
+#define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV_SHIFT	0
+
+/* Bit definitions for INT2_STATUS */
+#define PALMAS_INT2_STATUS_VAC_ACOK				0x80
+#define PALMAS_INT2_STATUS_VAC_ACOK_SHIFT			7
+#define PALMAS_INT2_STATUS_SHORT				0x40
+#define PALMAS_INT2_STATUS_SHORT_SHIFT				6
+#define PALMAS_INT2_STATUS_FBI_BB				0x20
+#define PALMAS_INT2_STATUS_FBI_BB_SHIFT				5
+#define PALMAS_INT2_STATUS_RESET_IN				0x10
+#define PALMAS_INT2_STATUS_RESET_IN_SHIFT			4
+#define PALMAS_INT2_STATUS_BATREMOVAL				0x08
+#define PALMAS_INT2_STATUS_BATREMOVAL_SHIFT			3
+#define PALMAS_INT2_STATUS_WDT					0x04
+#define PALMAS_INT2_STATUS_WDT_SHIFT				2
+#define PALMAS_INT2_STATUS_RTC_TIMER				0x02
+#define PALMAS_INT2_STATUS_RTC_TIMER_SHIFT			1
+#define PALMAS_INT2_STATUS_RTC_ALARM				0x01
+#define PALMAS_INT2_STATUS_RTC_ALARM_SHIFT			0
+
+/* Bit definitions for INT2_MASK */
+#define PALMAS_INT2_MASK_VAC_ACOK				0x80
+#define PALMAS_INT2_MASK_VAC_ACOK_SHIFT				7
+#define PALMAS_INT2_MASK_SHORT					0x40
+#define PALMAS_INT2_MASK_SHORT_SHIFT				6
+#define PALMAS_INT2_MASK_FBI_BB					0x20
+#define PALMAS_INT2_MASK_FBI_BB_SHIFT				5
+#define PALMAS_INT2_MASK_RESET_IN				0x10
+#define PALMAS_INT2_MASK_RESET_IN_SHIFT				4
+#define PALMAS_INT2_MASK_BATREMOVAL				0x08
+#define PALMAS_INT2_MASK_BATREMOVAL_SHIFT			3
+#define PALMAS_INT2_MASK_WDT					0x04
+#define PALMAS_INT2_MASK_WDT_SHIFT				2
+#define PALMAS_INT2_MASK_RTC_TIMER				0x02
+#define PALMAS_INT2_MASK_RTC_TIMER_SHIFT			1
+#define PALMAS_INT2_MASK_RTC_ALARM				0x01
+#define PALMAS_INT2_MASK_RTC_ALARM_SHIFT			0
+
+/* Bit definitions for INT2_LINE_STATE */
+#define PALMAS_INT2_LINE_STATE_VAC_ACOK				0x80
+#define PALMAS_INT2_LINE_STATE_VAC_ACOK_SHIFT			7
+#define PALMAS_INT2_LINE_STATE_SHORT				0x40
+#define PALMAS_INT2_LINE_STATE_SHORT_SHIFT			6
+#define PALMAS_INT2_LINE_STATE_FBI_BB				0x20
+#define PALMAS_INT2_LINE_STATE_FBI_BB_SHIFT			5
+#define PALMAS_INT2_LINE_STATE_RESET_IN				0x10
+#define PALMAS_INT2_LINE_STATE_RESET_IN_SHIFT			4
+#define PALMAS_INT2_LINE_STATE_BATREMOVAL			0x08
+#define PALMAS_INT2_LINE_STATE_BATREMOVAL_SHIFT			3
+#define PALMAS_INT2_LINE_STATE_WDT				0x04
+#define PALMAS_INT2_LINE_STATE_WDT_SHIFT			2
+#define PALMAS_INT2_LINE_STATE_RTC_TIMER			0x02
+#define PALMAS_INT2_LINE_STATE_RTC_TIMER_SHIFT			1
+#define PALMAS_INT2_LINE_STATE_RTC_ALARM			0x01
+#define PALMAS_INT2_LINE_STATE_RTC_ALARM_SHIFT			0
+
+/* Bit definitions for INT3_STATUS */
+#define PALMAS_INT3_STATUS_VBUS					0x80
+#define PALMAS_INT3_STATUS_VBUS_SHIFT				7
+#define PALMAS_INT3_STATUS_VBUS_OTG				0x40
+#define PALMAS_INT3_STATUS_VBUS_OTG_SHIFT			6
+#define PALMAS_INT3_STATUS_ID					0x20
+#define PALMAS_INT3_STATUS_ID_SHIFT				5
+#define PALMAS_INT3_STATUS_ID_OTG				0x10
+#define PALMAS_INT3_STATUS_ID_OTG_SHIFT				4
+#define PALMAS_INT3_STATUS_GPADC_EOC_RT				0x08
+#define PALMAS_INT3_STATUS_GPADC_EOC_RT_SHIFT			3
+#define PALMAS_INT3_STATUS_GPADC_EOC_SW				0x04
+#define PALMAS_INT3_STATUS_GPADC_EOC_SW_SHIFT			2
+#define PALMAS_INT3_STATUS_GPADC_AUTO_1				0x02
+#define PALMAS_INT3_STATUS_GPADC_AUTO_1_SHIFT			1
+#define PALMAS_INT3_STATUS_GPADC_AUTO_0				0x01
+#define PALMAS_INT3_STATUS_GPADC_AUTO_0_SHIFT			0
+
+/* Bit definitions for INT3_MASK */
+#define PALMAS_INT3_MASK_VBUS					0x80
+#define PALMAS_INT3_MASK_VBUS_SHIFT				7
+#define PALMAS_INT3_MASK_VBUS_OTG				0x40
+#define PALMAS_INT3_MASK_VBUS_OTG_SHIFT				6
+#define PALMAS_INT3_MASK_ID					0x20
+#define PALMAS_INT3_MASK_ID_SHIFT				5
+#define PALMAS_INT3_MASK_ID_OTG					0x10
+#define PALMAS_INT3_MASK_ID_OTG_SHIFT				4
+#define PALMAS_INT3_MASK_GPADC_EOC_RT				0x08
+#define PALMAS_INT3_MASK_GPADC_EOC_RT_SHIFT			3
+#define PALMAS_INT3_MASK_GPADC_EOC_SW				0x04
+#define PALMAS_INT3_MASK_GPADC_EOC_SW_SHIFT			2
+#define PALMAS_INT3_MASK_GPADC_AUTO_1				0x02
+#define PALMAS_INT3_MASK_GPADC_AUTO_1_SHIFT			1
+#define PALMAS_INT3_MASK_GPADC_AUTO_0				0x01
+#define PALMAS_INT3_MASK_GPADC_AUTO_0_SHIFT			0
+
+/* Bit definitions for INT3_LINE_STATE */
+#define PALMAS_INT3_LINE_STATE_VBUS				0x80
+#define PALMAS_INT3_LINE_STATE_VBUS_SHIFT			7
+#define PALMAS_INT3_LINE_STATE_VBUS_OTG				0x40
+#define PALMAS_INT3_LINE_STATE_VBUS_OTG_SHIFT			6
+#define PALMAS_INT3_LINE_STATE_ID				0x20
+#define PALMAS_INT3_LINE_STATE_ID_SHIFT				5
+#define PALMAS_INT3_LINE_STATE_ID_OTG				0x10
+#define PALMAS_INT3_LINE_STATE_ID_OTG_SHIFT			4
+#define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT			0x08
+#define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT_SHIFT		3
+#define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW			0x04
+#define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW_SHIFT		2
+#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1			0x02
+#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1_SHIFT		1
+#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0			0x01
+#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0_SHIFT		0
+
+/* Bit definitions for INT4_STATUS */
+#define PALMAS_INT4_STATUS_GPIO_7				0x80
+#define PALMAS_INT4_STATUS_GPIO_7_SHIFT				7
+#define PALMAS_INT4_STATUS_GPIO_6				0x40
+#define PALMAS_INT4_STATUS_GPIO_6_SHIFT				6
+#define PALMAS_INT4_STATUS_GPIO_5				0x20
+#define PALMAS_INT4_STATUS_GPIO_5_SHIFT				5
+#define PALMAS_INT4_STATUS_GPIO_4				0x10
+#define PALMAS_INT4_STATUS_GPIO_4_SHIFT				4
+#define PALMAS_INT4_STATUS_GPIO_3				0x08
+#define PALMAS_INT4_STATUS_GPIO_3_SHIFT				3
+#define PALMAS_INT4_STATUS_GPIO_2				0x04
+#define PALMAS_INT4_STATUS_GPIO_2_SHIFT				2
+#define PALMAS_INT4_STATUS_GPIO_1				0x02
+#define PALMAS_INT4_STATUS_GPIO_1_SHIFT				1
+#define PALMAS_INT4_STATUS_GPIO_0				0x01
+#define PALMAS_INT4_STATUS_GPIO_0_SHIFT				0
+
+/* Bit definitions for INT4_MASK */
+#define PALMAS_INT4_MASK_GPIO_7					0x80
+#define PALMAS_INT4_MASK_GPIO_7_SHIFT				7
+#define PALMAS_INT4_MASK_GPIO_6					0x40
+#define PALMAS_INT4_MASK_GPIO_6_SHIFT				6
+#define PALMAS_INT4_MASK_GPIO_5					0x20
+#define PALMAS_INT4_MASK_GPIO_5_SHIFT				5
+#define PALMAS_INT4_MASK_GPIO_4					0x10
+#define PALMAS_INT4_MASK_GPIO_4_SHIFT				4
+#define PALMAS_INT4_MASK_GPIO_3					0x08
+#define PALMAS_INT4_MASK_GPIO_3_SHIFT				3
+#define PALMAS_INT4_MASK_GPIO_2					0x04
+#define PALMAS_INT4_MASK_GPIO_2_SHIFT				2
+#define PALMAS_INT4_MASK_GPIO_1					0x02
+#define PALMAS_INT4_MASK_GPIO_1_SHIFT				1
+#define PALMAS_INT4_MASK_GPIO_0					0x01
+#define PALMAS_INT4_MASK_GPIO_0_SHIFT				0
+
+/* Bit definitions for INT4_LINE_STATE */
+#define PALMAS_INT4_LINE_STATE_GPIO_7				0x80
+#define PALMAS_INT4_LINE_STATE_GPIO_7_SHIFT			7
+#define PALMAS_INT4_LINE_STATE_GPIO_6				0x40
+#define PALMAS_INT4_LINE_STATE_GPIO_6_SHIFT			6
+#define PALMAS_INT4_LINE_STATE_GPIO_5				0x20
+#define PALMAS_INT4_LINE_STATE_GPIO_5_SHIFT			5
+#define PALMAS_INT4_LINE_STATE_GPIO_4				0x10
+#define PALMAS_INT4_LINE_STATE_GPIO_4_SHIFT			4
+#define PALMAS_INT4_LINE_STATE_GPIO_3				0x08
+#define PALMAS_INT4_LINE_STATE_GPIO_3_SHIFT			3
+#define PALMAS_INT4_LINE_STATE_GPIO_2				0x04
+#define PALMAS_INT4_LINE_STATE_GPIO_2_SHIFT			2
+#define PALMAS_INT4_LINE_STATE_GPIO_1				0x02
+#define PALMAS_INT4_LINE_STATE_GPIO_1_SHIFT			1
+#define PALMAS_INT4_LINE_STATE_GPIO_0				0x01
+#define PALMAS_INT4_LINE_STATE_GPIO_0_SHIFT			0
+
+/* Bit definitions for INT4_EDGE_DETECT1 */
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING			0x80
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING_SHIFT		7
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING			0x40
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING_SHIFT		6
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING			0x20
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING_SHIFT		5
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING			0x10
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING_SHIFT		4
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING			0x08
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING_SHIFT		3
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING			0x04
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING_SHIFT		2
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING			0x02
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING_SHIFT		1
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING			0x01
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING_SHIFT		0
+
+/* Bit definitions for INT4_EDGE_DETECT2 */
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING			0x80
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING_SHIFT		7
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING			0x40
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING_SHIFT		6
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING			0x20
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING_SHIFT		5
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING			0x10
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING_SHIFT		4
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING			0x08
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING_SHIFT		3
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING			0x04
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING_SHIFT		2
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING			0x02
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING_SHIFT		1
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING			0x01
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING_SHIFT		0
+
+/* Bit definitions for INT_CTRL */
+#define PALMAS_INT_CTRL_INT_PENDING				0x04
+#define PALMAS_INT_CTRL_INT_PENDING_SHIFT			2
+#define PALMAS_INT_CTRL_INT_CLEAR				0x01
+#define PALMAS_INT_CTRL_INT_CLEAR_SHIFT				0
+
+/* Registers for function USB_OTG */
+#define PALMAS_USB_WAKEUP					0x3
+#define PALMAS_USB_VBUS_CTRL_SET				0x4
+#define PALMAS_USB_VBUS_CTRL_CLR				0x5
+#define PALMAS_USB_ID_CTRL_SET					0x6
+#define PALMAS_USB_ID_CTRL_CLEAR				0x7
+#define PALMAS_USB_VBUS_INT_SRC					0x8
+#define PALMAS_USB_VBUS_INT_LATCH_SET				0x9
+#define PALMAS_USB_VBUS_INT_LATCH_CLR				0xA
+#define PALMAS_USB_VBUS_INT_EN_LO_SET				0xB
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR				0xC
+#define PALMAS_USB_VBUS_INT_EN_HI_SET				0xD
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR				0xE
+#define PALMAS_USB_ID_INT_SRC					0xF
+#define PALMAS_USB_ID_INT_LATCH_SET				0x10
+#define PALMAS_USB_ID_INT_LATCH_CLR				0x11
+#define PALMAS_USB_ID_INT_EN_LO_SET				0x12
+#define PALMAS_USB_ID_INT_EN_LO_CLR				0x13
+#define PALMAS_USB_ID_INT_EN_HI_SET				0x14
+#define PALMAS_USB_ID_INT_EN_HI_CLR				0x15
+#define PALMAS_USB_OTG_ADP_CTRL					0x16
+#define PALMAS_USB_OTG_ADP_HIGH					0x17
+#define PALMAS_USB_OTG_ADP_LOW					0x18
+#define PALMAS_USB_OTG_ADP_RISE					0x19
+#define PALMAS_USB_OTG_REVISION					0x1A
+
+/* Bit definitions for USB_WAKEUP */
+#define PALMAS_USB_WAKEUP_ID_WK_UP_COMP				0x01
+#define PALMAS_USB_WAKEUP_ID_WK_UP_COMP_SHIFT			0
+
+/* Bit definitions for USB_VBUS_CTRL_SET */
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS			0x80
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS_SHIFT		7
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG			0x20
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG_SHIFT		5
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC			0x10
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC_SHIFT		4
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK			0x08
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK_SHIFT		3
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP			0x04
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP_SHIFT		2
+
+/* Bit definitions for USB_VBUS_CTRL_CLR */
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS			0x80
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS_SHIFT		7
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG			0x20
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG_SHIFT		5
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC			0x10
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC_SHIFT		4
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK			0x08
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK_SHIFT		3
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP			0x04
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP_SHIFT		2
+
+/* Bit definitions for USB_ID_CTRL_SET */
+#define PALMAS_USB_ID_CTRL_SET_ID_PU_220K			0x80
+#define PALMAS_USB_ID_CTRL_SET_ID_PU_220K_SHIFT			7
+#define PALMAS_USB_ID_CTRL_SET_ID_PU_100K			0x40
+#define PALMAS_USB_ID_CTRL_SET_ID_PU_100K_SHIFT			6
+#define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV			0x20
+#define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV_SHIFT			5
+#define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U			0x10
+#define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U_SHIFT			4
+#define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U			0x08
+#define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U_SHIFT			3
+#define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP			0x04
+#define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP_SHIFT		2
+
+/* Bit definitions for USB_ID_CTRL_CLEAR */
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K			0x80
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K_SHIFT		7
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K			0x40
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K_SHIFT		6
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV			0x20
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV_SHIFT		5
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U			0x10
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U_SHIFT		4
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U			0x08
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U_SHIFT		3
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP			0x04
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP_SHIFT		2
+
+/* Bit definitions for USB_VBUS_INT_SRC */
+#define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD			0x80
+#define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD_SHIFT		7
+#define PALMAS_USB_VBUS_INT_SRC_VADP_PRB			0x40
+#define PALMAS_USB_VBUS_INT_SRC_VADP_PRB_SHIFT			6
+#define PALMAS_USB_VBUS_INT_SRC_VADP_SNS			0x20
+#define PALMAS_USB_VBUS_INT_SRC_VADP_SNS_SHIFT			5
+#define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD			0x08
+#define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD_SHIFT		3
+#define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD			0x04
+#define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD_SHIFT		2
+#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD			0x02
+#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD_SHIFT		1
+#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END			0x01
+#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END_SHIFT		0
+
+/* Bit definitions for USB_VBUS_INT_LATCH_SET */
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD		0x80
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD_SHIFT	7
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB			0x40
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB_SHIFT		6
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS			0x20
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS_SHIFT		5
+#define PALMAS_USB_VBUS_INT_LATCH_SET_ADP			0x10
+#define PALMAS_USB_VBUS_INT_LATCH_SET_ADP_SHIFT			4
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD		0x08
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD_SHIFT		3
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD		0x04
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD_SHIFT		2
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD		0x02
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD_SHIFT		1
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END		0x01
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END_SHIFT		0
+
+/* Bit definitions for USB_VBUS_INT_LATCH_CLR */
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD		0x80
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD_SHIFT	7
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB			0x40
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB_SHIFT		6
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS			0x20
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS_SHIFT		5
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP			0x10
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP_SHIFT			4
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD		0x08
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD_SHIFT		3
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD		0x04
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD_SHIFT		2
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD		0x02
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD_SHIFT		1
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END		0x01
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END_SHIFT		0
+
+/* Bit definitions for USB_VBUS_INT_EN_LO_SET */
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD		0x80
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD_SHIFT	7
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB			0x40
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB_SHIFT		6
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS			0x20
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS_SHIFT		5
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD		0x08
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD_SHIFT		3
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD		0x04
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD_SHIFT		2
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD		0x02
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD_SHIFT		1
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END		0x01
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END_SHIFT		0
+
+/* Bit definitions for USB_VBUS_INT_EN_LO_CLR */
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD		0x80
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD_SHIFT	7
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB			0x40
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB_SHIFT		6
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS			0x20
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS_SHIFT		5
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD		0x08
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD_SHIFT		3
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD		0x04
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD_SHIFT		2
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD		0x02
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD_SHIFT		1
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END		0x01
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END_SHIFT		0
+
+/* Bit definitions for USB_VBUS_INT_EN_HI_SET */
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD		0x80
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD_SHIFT	7
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB			0x40
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB_SHIFT		6
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS			0x20
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS_SHIFT		5
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP			0x10
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP_SHIFT			4
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD		0x08
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD_SHIFT		3
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD		0x04
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD_SHIFT		2
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD		0x02
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD_SHIFT		1
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END		0x01
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END_SHIFT		0
+
+/* Bit definitions for USB_VBUS_INT_EN_HI_CLR */
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD		0x80
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD_SHIFT	7
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB			0x40
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB_SHIFT		6
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS			0x20
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS_SHIFT		5
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP			0x10
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP_SHIFT			4
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD		0x08
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD_SHIFT		3
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD		0x04
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD_SHIFT		2
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD		0x02
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD_SHIFT		1
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END		0x01
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END_SHIFT		0
+
+/* Bit definitions for USB_ID_INT_SRC */
+#define PALMAS_USB_ID_INT_SRC_ID_FLOAT				0x10
+#define PALMAS_USB_ID_INT_SRC_ID_FLOAT_SHIFT			4
+#define PALMAS_USB_ID_INT_SRC_ID_A				0x08
+#define PALMAS_USB_ID_INT_SRC_ID_A_SHIFT			3
+#define PALMAS_USB_ID_INT_SRC_ID_B				0x04
+#define PALMAS_USB_ID_INT_SRC_ID_B_SHIFT			2
+#define PALMAS_USB_ID_INT_SRC_ID_C				0x02
+#define PALMAS_USB_ID_INT_SRC_ID_C_SHIFT			1
+#define PALMAS_USB_ID_INT_SRC_ID_GND				0x01
+#define PALMAS_USB_ID_INT_SRC_ID_GND_SHIFT			0
+
+/* Bit definitions for USB_ID_INT_LATCH_SET */
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT			0x10
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT_SHIFT		4
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_A			0x08
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_A_SHIFT			3
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_B			0x04
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_B_SHIFT			2
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_C			0x02
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_C_SHIFT			1
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_GND			0x01
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_GND_SHIFT		0
+
+/* Bit definitions for USB_ID_INT_LATCH_CLR */
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT			0x10
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT_SHIFT		4
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_A			0x08
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_A_SHIFT			3
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_B			0x04
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_B_SHIFT			2
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_C			0x02
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_C_SHIFT			1
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND			0x01
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND_SHIFT		0
+
+/* Bit definitions for USB_ID_INT_EN_LO_SET */
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT			0x10
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT_SHIFT		4
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_A			0x08
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_A_SHIFT			3
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_B			0x04
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_B_SHIFT			2
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_C			0x02
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_C_SHIFT			1
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND			0x01
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND_SHIFT		0
+
+/* Bit definitions for USB_ID_INT_EN_LO_CLR */
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT			0x10
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT_SHIFT		4
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A			0x08
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A_SHIFT			3
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B			0x04
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B_SHIFT			2
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C			0x02
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C_SHIFT			1
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND			0x01
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND_SHIFT		0
+
+/* Bit definitions for USB_ID_INT_EN_HI_SET */
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT			0x10
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT_SHIFT		4
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_A			0x08
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_A_SHIFT			3
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_B			0x04
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_B_SHIFT			2
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_C			0x02
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_C_SHIFT			1
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND			0x01
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND_SHIFT		0
+
+/* Bit definitions for USB_ID_INT_EN_HI_CLR */
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT			0x10
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT_SHIFT		4
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A			0x08
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A_SHIFT			3
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B			0x04
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B_SHIFT			2
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C			0x02
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C_SHIFT			1
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND			0x01
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND_SHIFT		0
+
+/* Bit definitions for USB_OTG_ADP_CTRL */
+#define PALMAS_USB_OTG_ADP_CTRL_ADP_EN				0x04
+#define PALMAS_USB_OTG_ADP_CTRL_ADP_EN_SHIFT			2
+#define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_MASK			0x03
+#define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_SHIFT			0
+
+/* Bit definitions for USB_OTG_ADP_HIGH */
+#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_MASK			0xff
+#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_SHIFT		0
+
+/* Bit definitions for USB_OTG_ADP_LOW */
+#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_MASK			0xff
+#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_SHIFT			0
+
+/* Bit definitions for USB_OTG_ADP_RISE */
+#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_MASK			0xff
+#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_SHIFT		0
+
+/* Bit definitions for USB_OTG_REVISION */
+#define PALMAS_USB_OTG_REVISION_OTG_REV				0x01
+#define PALMAS_USB_OTG_REVISION_OTG_REV_SHIFT			0
+
+/* Registers for function VIBRATOR */
+#define PALMAS_VIBRA_CTRL					0x0
+
+/* Bit definitions for VIBRA_CTRL */
+#define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_MASK			0x06
+#define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_SHIFT			1
+#define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL				0x01
+#define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL_SHIFT			0
+
+/* Registers for function GPIO */
+#define PALMAS_GPIO_DATA_IN					0x0
+#define PALMAS_GPIO_DATA_DIR					0x1
+#define PALMAS_GPIO_DATA_OUT					0x2
+#define PALMAS_GPIO_DEBOUNCE_EN					0x3
+#define PALMAS_GPIO_CLEAR_DATA_OUT				0x4
+#define PALMAS_GPIO_SET_DATA_OUT				0x5
+#define PALMAS_PU_PD_GPIO_CTRL1					0x6
+#define PALMAS_PU_PD_GPIO_CTRL2					0x7
+#define PALMAS_OD_OUTPUT_GPIO_CTRL				0x8
+
+/* Bit definitions for GPIO_DATA_IN */
+#define PALMAS_GPIO_DATA_IN_GPIO_7_IN				0x80
+#define PALMAS_GPIO_DATA_IN_GPIO_7_IN_SHIFT			7
+#define PALMAS_GPIO_DATA_IN_GPIO_6_IN				0x40
+#define PALMAS_GPIO_DATA_IN_GPIO_6_IN_SHIFT			6
+#define PALMAS_GPIO_DATA_IN_GPIO_5_IN				0x20
+#define PALMAS_GPIO_DATA_IN_GPIO_5_IN_SHIFT			5
+#define PALMAS_GPIO_DATA_IN_GPIO_4_IN				0x10
+#define PALMAS_GPIO_DATA_IN_GPIO_4_IN_SHIFT			4
+#define PALMAS_GPIO_DATA_IN_GPIO_3_IN				0x08
+#define PALMAS_GPIO_DATA_IN_GPIO_3_IN_SHIFT			3
+#define PALMAS_GPIO_DATA_IN_GPIO_2_IN				0x04
+#define PALMAS_GPIO_DATA_IN_GPIO_2_IN_SHIFT			2
+#define PALMAS_GPIO_DATA_IN_GPIO_1_IN				0x02
+#define PALMAS_GPIO_DATA_IN_GPIO_1_IN_SHIFT			1
+#define PALMAS_GPIO_DATA_IN_GPIO_0_IN				0x01
+#define PALMAS_GPIO_DATA_IN_GPIO_0_IN_SHIFT			0
+
+/* Bit definitions for GPIO_DATA_DIR */
+#define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR				0x80
+#define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR_SHIFT			7
+#define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR				0x40
+#define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR_SHIFT			6
+#define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR				0x20
+#define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR_SHIFT			5
+#define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR				0x10
+#define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR_SHIFT			4
+#define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR				0x08
+#define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR_SHIFT			3
+#define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR				0x04
+#define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR_SHIFT			2
+#define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR				0x02
+#define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR_SHIFT			1
+#define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR				0x01
+#define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR_SHIFT			0
+
+/* Bit definitions for GPIO_DATA_OUT */
+#define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT				0x80
+#define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT_SHIFT			7
+#define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT				0x40
+#define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT_SHIFT			6
+#define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT				0x20
+#define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT_SHIFT			5
+#define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT				0x10
+#define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT_SHIFT			4
+#define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT				0x08
+#define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT_SHIFT			3
+#define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT				0x04
+#define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT_SHIFT			2
+#define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT				0x02
+#define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT_SHIFT			1
+#define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT				0x01
+#define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT_SHIFT			0
+
+/* Bit definitions for GPIO_DEBOUNCE_EN */
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN		0x80
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN_SHIFT	7
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN		0x40
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN_SHIFT	6
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN		0x20
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN_SHIFT	5
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN		0x10
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN_SHIFT	4
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN		0x08
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN_SHIFT	3
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN		0x04
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN_SHIFT	2
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN		0x02
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN_SHIFT	1
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN		0x01
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN_SHIFT	0
+
+/* Bit definitions for GPIO_CLEAR_DATA_OUT */
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT	0x80
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT_SHIFT	7
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT	0x40
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT_SHIFT	6
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT	0x20
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT_SHIFT	5
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT	0x10
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT_SHIFT	4
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT	0x08
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT_SHIFT	3
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT	0x04
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT_SHIFT	2
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT	0x02
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT_SHIFT	1
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT	0x01
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT_SHIFT	0
+
+/* Bit definitions for GPIO_SET_DATA_OUT */
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT		0x80
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT_SHIFT	7
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT		0x40
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT_SHIFT	6
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT		0x20
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT_SHIFT	5
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT		0x10
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT_SHIFT	4
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT		0x08
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT_SHIFT	3
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT		0x04
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT_SHIFT	2
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT		0x02
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT_SHIFT	1
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT		0x01
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT_SHIFT	0
+
+/* Bit definitions for PU_PD_GPIO_CTRL1 */
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD			0x40
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD_SHIFT			6
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU			0x20
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU_SHIFT			5
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD			0x10
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD_SHIFT			4
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU			0x08
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU_SHIFT			3
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD			0x04
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD_SHIFT			2
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD			0x01
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD_SHIFT			0
+
+/* Bit definitions for PU_PD_GPIO_CTRL2 */
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD			0x40
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD_SHIFT			6
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU			0x20
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU_SHIFT			5
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD			0x10
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD_SHIFT			4
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU			0x08
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU_SHIFT			3
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD			0x04
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD_SHIFT			2
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU			0x02
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU_SHIFT			1
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD			0x01
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD_SHIFT			0
+
+/* Bit definitions for OD_OUTPUT_GPIO_CTRL */
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD			0x20
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD_SHIFT		5
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD			0x04
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD_SHIFT		2
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD			0x02
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD_SHIFT		1
+
+/* Registers for function GPADC */
+#define PALMAS_GPADC_CTRL1					0x0
+#define PALMAS_GPADC_CTRL2					0x1
+#define PALMAS_GPADC_RT_CTRL					0x2
+#define PALMAS_GPADC_AUTO_CTRL					0x3
+#define PALMAS_GPADC_STATUS					0x4
+#define PALMAS_GPADC_RT_SELECT					0x5
+#define PALMAS_GPADC_RT_CONV0_LSB				0x6
+#define PALMAS_GPADC_RT_CONV0_MSB				0x7
+#define PALMAS_GPADC_AUTO_SELECT				0x8
+#define PALMAS_GPADC_AUTO_CONV0_LSB				0x9
+#define PALMAS_GPADC_AUTO_CONV0_MSB				0xA
+#define PALMAS_GPADC_AUTO_CONV1_LSB				0xB
+#define PALMAS_GPADC_AUTO_CONV1_MSB				0xC
+#define PALMAS_GPADC_SW_SELECT					0xD
+#define PALMAS_GPADC_SW_CONV0_LSB				0xE
+#define PALMAS_GPADC_SW_CONV0_MSB				0xF
+#define PALMAS_GPADC_THRES_CONV0_LSB				0x10
+#define PALMAS_GPADC_THRES_CONV0_MSB				0x11
+#define PALMAS_GPADC_THRES_CONV1_LSB				0x12
+#define PALMAS_GPADC_THRES_CONV1_MSB				0x13
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN				0x14
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING			0x15
+
+/* Bit definitions for GPADC_CTRL1 */
+#define PALMAS_GPADC_CTRL1_RESERVED_MASK			0xc0
+#define PALMAS_GPADC_CTRL1_RESERVED_SHIFT			6
+#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_MASK			0x30
+#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT		4
+#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_MASK			0x0c
+#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_SHIFT		2
+#define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET			0x02
+#define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET_SHIFT		1
+#define PALMAS_GPADC_CTRL1_GPADC_FORCE				0x01
+#define PALMAS_GPADC_CTRL1_GPADC_FORCE_SHIFT			0
+
+/* Bit definitions for GPADC_CTRL2 */
+#define PALMAS_GPADC_CTRL2_RESERVED_MASK			0x06
+#define PALMAS_GPADC_CTRL2_RESERVED_SHIFT			1
+
+/* Bit definitions for GPADC_RT_CTRL */
+#define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY			0x02
+#define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY_SHIFT			1
+#define PALMAS_GPADC_RT_CTRL_START_POLARITY			0x01
+#define PALMAS_GPADC_RT_CTRL_START_POLARITY_SHIFT		0
+
+/* Bit definitions for GPADC_AUTO_CTRL */
+#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1			0x80
+#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1_SHIFT		7
+#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0			0x40
+#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0_SHIFT		6
+#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN			0x20
+#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN_SHIFT		5
+#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN			0x10
+#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN_SHIFT		4
+#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_MASK		0x0f
+#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_SHIFT		0
+
+/* Bit definitions for GPADC_STATUS */
+#define PALMAS_GPADC_STATUS_GPADC_AVAILABLE			0x10
+#define PALMAS_GPADC_STATUS_GPADC_AVAILABLE_SHIFT		4
+
+/* Bit definitions for GPADC_RT_SELECT */
+#define PALMAS_GPADC_RT_SELECT_RT_CONV_EN			0x80
+#define PALMAS_GPADC_RT_SELECT_RT_CONV_EN_SHIFT			7
+#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_MASK		0x0f
+#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_SHIFT		0
+
+/* Bit definitions for GPADC_RT_CONV0_LSB */
+#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_MASK		0xff
+#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_SHIFT		0
+
+/* Bit definitions for GPADC_RT_CONV0_MSB */
+#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_MASK		0x0f
+#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_SHIFT		0
+
+/* Bit definitions for GPADC_AUTO_SELECT */
+#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_MASK		0xf0
+#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_SHIFT		4
+#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_MASK		0x0f
+#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_SHIFT		0
+
+/* Bit definitions for GPADC_AUTO_CONV0_LSB */
+#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_MASK		0xff
+#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_SHIFT	0
+
+/* Bit definitions for GPADC_AUTO_CONV0_MSB */
+#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_MASK		0x0f
+#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_SHIFT	0
+
+/* Bit definitions for GPADC_AUTO_CONV1_LSB */
+#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_MASK		0xff
+#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_SHIFT	0
+
+/* Bit definitions for GPADC_AUTO_CONV1_MSB */
+#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_MASK		0x0f
+#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_SHIFT	0
+
+/* Bit definitions for GPADC_SW_SELECT */
+#define PALMAS_GPADC_SW_SELECT_SW_CONV_EN			0x80
+#define PALMAS_GPADC_SW_SELECT_SW_CONV_EN_SHIFT			7
+#define PALMAS_GPADC_SW_SELECT_SW_START_CONV0			0x10
+#define PALMAS_GPADC_SW_SELECT_SW_START_CONV0_SHIFT		4
+#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_MASK		0x0f
+#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_SHIFT		0
+
+/* Bit definitions for GPADC_SW_CONV0_LSB */
+#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_MASK		0xff
+#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_SHIFT		0
+
+/* Bit definitions for GPADC_SW_CONV0_MSB */
+#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_MASK		0x0f
+#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_SHIFT		0
+
+/* Bit definitions for GPADC_THRES_CONV0_LSB */
+#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_MASK	0xff
+#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_SHIFT	0
+
+/* Bit definitions for GPADC_THRES_CONV0_MSB */
+#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL		0x80
+#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL_SHIFT	7
+#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_MASK	0x0f
+#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_SHIFT	0
+
+/* Bit definitions for GPADC_THRES_CONV1_LSB */
+#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_MASK	0xff
+#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_SHIFT	0
+
+/* Bit definitions for GPADC_THRES_CONV1_MSB */
+#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL		0x80
+#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL_SHIFT	7
+#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_MASK	0x0f
+#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_SHIFT	0
+
+/* Bit definitions for GPADC_SMPS_ILMONITOR_EN */
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN		0x20
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN_SHIFT	5
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT		0x10
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT_SHIFT	4
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_MASK	0x0f
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_SHIFT	0
+
+/* Bit definitions for GPADC_SMPS_VSEL_MONITORING */
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE		0x80
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE_SHIFT	7
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_MASK	0x7f
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_SHIFT	0
+
+/* Registers for function GPADC */
+#define PALMAS_GPADC_TRIM1					0x0
+#define PALMAS_GPADC_TRIM2					0x1
+#define PALMAS_GPADC_TRIM3					0x2
+#define PALMAS_GPADC_TRIM4					0x3
+#define PALMAS_GPADC_TRIM5					0x4
+#define PALMAS_GPADC_TRIM6					0x5
+#define PALMAS_GPADC_TRIM7					0x6
+#define PALMAS_GPADC_TRIM8					0x7
+#define PALMAS_GPADC_TRIM9					0x8
+#define PALMAS_GPADC_TRIM10					0x9
+#define PALMAS_GPADC_TRIM11					0xA
+#define PALMAS_GPADC_TRIM12					0xB
+#define PALMAS_GPADC_TRIM13					0xC
+#define PALMAS_GPADC_TRIM14					0xD
+#define PALMAS_GPADC_TRIM15					0xE
+#define PALMAS_GPADC_TRIM16					0xF
+
+#endif /*  __LINUX_MFD_PALMAS_H */
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
index 0b64b19..c42fe92 100644
--- a/include/linux/mfd/rc5t583.h
+++ b/include/linux/mfd/rc5t583.h
@@ -250,6 +250,26 @@
 	RC5T583_EXT_PWRREQ2_CONTROL = 0x2,
 };
 
+enum {
+	RC5T583_REGULATOR_DC0,
+	RC5T583_REGULATOR_DC1,
+	RC5T583_REGULATOR_DC2,
+	RC5T583_REGULATOR_DC3,
+	RC5T583_REGULATOR_LDO0,
+	RC5T583_REGULATOR_LDO1,
+	RC5T583_REGULATOR_LDO2,
+	RC5T583_REGULATOR_LDO3,
+	RC5T583_REGULATOR_LDO4,
+	RC5T583_REGULATOR_LDO5,
+	RC5T583_REGULATOR_LDO6,
+	RC5T583_REGULATOR_LDO7,
+	RC5T583_REGULATOR_LDO8,
+	RC5T583_REGULATOR_LDO9,
+
+	/* Should be last entry */
+	RC5T583_REGULATOR_MAX,
+};
+
 struct rc5t583 {
 	struct device	*dev;
 	struct regmap	*regmap;
@@ -273,11 +293,20 @@
  * The board specific data is provided through this structure.
  * @irq_base: Irq base number on which this device registers their interrupts.
  * @enable_shutdown: Enable shutdown through the input pin "shutdown".
+ * @regulator_deepsleep_slot: The slot number on which device goes to sleep
+ *		in device sleep mode.
+ * @regulator_ext_pwr_control: External power request regulator control. The
+ *		regulator output enable/disable is controlled by the external
+ *		power request input state.
+ * @reg_init_data: Regulator init data.
  */
 
 struct rc5t583_platform_data {
 	int		irq_base;
 	bool		enable_shutdown;
+	int		regulator_deepsleep_slot[RC5T583_REGULATOR_MAX];
+	unsigned long	regulator_ext_pwr_control[RC5T583_REGULATOR_MAX];
+	struct regulator_init_data *reg_init_data[RC5T583_REGULATOR_MAX];
 };
 
 static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val)
diff --git a/include/linux/mfd/s5m87xx/s5m-core.h b/include/linux/mfd/s5m87xx/s5m-core.h
index a7480b5..21603b4 100644
--- a/include/linux/mfd/s5m87xx/s5m-core.h
+++ b/include/linux/mfd/s5m87xx/s5m-core.h
@@ -335,6 +335,7 @@
 
 struct s5m_platform_data {
 	struct s5m_regulator_data	*regulators;
+	struct s5m_opmode_data		*opmode;
 	int				device_type;
 	int				num_regulators;
 
diff --git a/include/linux/mfd/s5m87xx/s5m-pmic.h b/include/linux/mfd/s5m87xx/s5m-pmic.h
index a72a5d2..7c719f2 100644
--- a/include/linux/mfd/s5m87xx/s5m-pmic.h
+++ b/include/linux/mfd/s5m87xx/s5m-pmic.h
@@ -58,6 +58,8 @@
 	S5M8767_REG_MAX,
 };
 
+#define S5M8767_ENCTRL_SHIFT  6
+
 /* S5M8763 regulator ids */
 enum s5m8763_regulators {
 	S5M8763_LDO1,
@@ -97,4 +99,31 @@
 	struct regulator_init_data	*initdata;
 };
 
+/*
+ * s5m_opmode_data - regulator operation mode data
+ * @id: regulator id
+ * @mode: regulator operation mode
+ */
+struct s5m_opmode_data {
+	int id;
+	int mode;
+};
+
+/*
+ * s5m regulator operation mode
+ * S5M_OPMODE_OFF	Regulator always OFF
+ * S5M_OPMODE_ON	Regulator always ON
+ * S5M_OPMODE_LOWPOWER  Regulator is on in low-power mode
+ * S5M_OPMODE_SUSPEND   Regulator is changed by PWREN pin
+ *			If PWREN is high, regulator is on
+ *			If PWREN is low, regulator is off
+ */
+
+enum s5m_opmode {
+	S5M_OPMODE_OFF,
+	S5M_OPMODE_ON,
+	S5M_OPMODE_LOWPOWER,
+	S5M_OPMODE_SUSPEND,
+};
+
 #endif /*  __LINUX_MFD_S5M_PMIC_H */
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 38e31c5..6bc31d8 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -22,6 +22,19 @@
 #ifndef __LINUX_MFD_TPS65090_H
 #define __LINUX_MFD_TPS65090_H
 
+#include <linux/irq.h>
+
+struct tps65090 {
+	struct mutex		lock;
+	struct device		*dev;
+	struct i2c_client	*client;
+	struct regmap		*rmap;
+	struct irq_chip		irq_chip;
+	struct mutex		irq_lock;
+	int			irq_base;
+	unsigned int		id;
+};
+
 struct tps65090_subdev_info {
 	int		id;
 	const char	*name;
diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h
index b19176e..f350fd0 100644
--- a/include/linux/mfd/tps6586x.h
+++ b/include/linux/mfd/tps6586x.h
@@ -68,6 +68,7 @@
 	int		id;
 	const char	*name;
 	void		*platform_data;
+	struct device_node *of_node;
 };
 
 struct tps6586x_platform_data {
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 9eff2a3..6695c3e 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -17,6 +17,7 @@
 
 #include <linux/mutex.h>
 #include <linux/interrupt.h>
+#include <linux/regmap.h>
 
 enum wm8994_type {
 	WM8994 = 0,
@@ -26,7 +27,6 @@
 
 struct regulator_dev;
 struct regulator_bulk_data;
-struct regmap;
 
 #define WM8994_NUM_GPIO_REGS 11
 #define WM8994_NUM_LDO_REGS   2
@@ -94,17 +94,17 @@
 				     irq_handler_t handler, const char *name,
 				     void *data)
 {
-	if (!wm8994->irq_base)
+	if (!wm8994->irq_data)
 		return -EINVAL;
-	return request_threaded_irq(wm8994->irq_base + irq, NULL, handler,
-				    IRQF_TRIGGER_RISING, name,
+	return request_threaded_irq(regmap_irq_get_virq(wm8994->irq_data, irq),
+				    NULL, handler, IRQF_TRIGGER_RISING, name,
 				    data);
 }
 static inline void wm8994_free_irq(struct wm8994 *wm8994, int irq, void *data)
 {
-	if (!wm8994->irq_base)
+	if (!wm8994->irq_data)
 		return;
-	free_irq(wm8994->irq_base + irq, data);
+	free_irq(regmap_irq_get_virq(wm8994->irq_data, irq), data);
 }
 
 int wm8994_irq_init(struct wm8994 *wm8994);
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 9958ff2..1f3860a 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -150,6 +150,10 @@
 	/* statistics commands */
 	MLX4_CMD_QUERY_IF_STAT	 = 0X54,
 	MLX4_CMD_SET_IF_STAT	 = 0X55,
+
+	/* set port opcode modifiers */
+	MLX4_SET_PORT_PRIO2TC = 0x8,
+	MLX4_SET_PORT_SCHEDULER  = 0x9,
 };
 
 enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 834c96c..6e27fa9 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -98,6 +98,12 @@
 	MLX4_DEV_CAP_FLAG_SENSE_SUPPORT	= 1LL << 55
 };
 
+enum {
+	MLX4_DEV_CAP_FLAG2_RSS			= 1LL <<  0,
+	MLX4_DEV_CAP_FLAG2_RSS_TOP		= 1LL <<  1,
+	MLX4_DEV_CAP_FLAG2_RSS_XOR		= 1LL <<  2
+};
+
 #define MLX4_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
 
 enum {
@@ -292,11 +298,13 @@
 	u32			max_msg_sz;
 	u32			page_size_cap;
 	u64			flags;
+	u64			flags2;
 	u32			bmme_flags;
 	u32			reserved_lkey;
 	u16			stat_rate_support;
 	u8			port_width_cap[MLX4_MAX_PORTS + 1];
 	int			max_gso_sz;
+	int			max_rss_tbl_sz;
 	int                     reserved_qps_cnt[MLX4_NUM_QP_REGION];
 	int			reserved_qps;
 	int                     reserved_qps_base[MLX4_NUM_QP_REGION];
@@ -628,6 +636,9 @@
 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
 			   u8 promisc);
+int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
+int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
+		u8 *pg, u16 *ratelimit);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 091f9e7..338388b 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -139,7 +139,8 @@
 	u8			rgid[16];
 	u8			sched_queue;
 	u8			vlan_index;
-	u8			reserved3[2];
+	u8			feup;
+	u8			reserved3;
 	u8			reserved4[2];
 	u8			dmac[6];
 };
@@ -233,7 +234,8 @@
 	u8			owner;
 	u8			reserved1[2];
 	u8			opcode;
-	u8			reserved2[3];
+	__be16			sched_prio;
+	u8			reserved2;
 	u8			size;
 	/*
 	 * [17]    VL15
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 74aa71b..7d5c37f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -896,10 +896,8 @@
 		unsigned long size);
 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
 		unsigned long size, struct zap_details *);
-void unmap_vmas(struct mmu_gather *tlb,
-		struct vm_area_struct *start_vma, unsigned long start_addr,
-		unsigned long end_addr, unsigned long *nr_accounted,
-		struct zap_details *);
+void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
+		unsigned long start, unsigned long end);
 
 /**
  * mm_walk - callbacks for walk_page_range
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index b188f68..275e5d6 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -33,6 +33,9 @@
 #define NTF_PROXY	0x08	/* == ATF_PUBL */
 #define NTF_ROUTER	0x80
 
+#define NTF_SELF	0x02
+#define NTF_MASTER	0x04
+
 /*
  *	Neighbor Cache Entry States.
  */
diff --git a/include/linux/net.h b/include/linux/net.h
index be60c7f..2d7510f 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -250,6 +250,29 @@
 #define		     sockfd_put(sock) fput(sock->file)
 extern int	     net_ratelimit(void);
 
+#define net_ratelimited_function(function, ...)			\
+do {								\
+	if (net_ratelimit())					\
+		function(__VA_ARGS__);				\
+} while (0)
+
+#define net_emerg_ratelimited(fmt, ...)				\
+	net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__)
+#define net_alert_ratelimited(fmt, ...)				\
+	net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__)
+#define net_crit_ratelimited(fmt, ...)				\
+	net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__)
+#define net_err_ratelimited(fmt, ...)				\
+	net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__)
+#define net_notice_ratelimited(fmt, ...)			\
+	net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__)
+#define net_warn_ratelimited(fmt, ...)				\
+	net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
+#define net_info_ratelimited(fmt, ...)				\
+	net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#define net_dbg_ratelimited(fmt, ...)				\
+	net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+
 #define net_random()		random32()
 #define net_srandom(seed)	srandom32((__force u32)seed)
 
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5cbaa20..e7fd468 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -54,6 +54,7 @@
 #include <net/netprio_cgroup.h>
 
 #include <linux/netdev_features.h>
+#include <linux/neighbour.h>
 
 struct netpoll_info;
 struct device;
@@ -288,7 +289,7 @@
 struct header_ops {
 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
 			   unsigned short type, const void *daddr,
-			   const void *saddr, unsigned len);
+			   const void *saddr, unsigned int len);
 	int	(*parse)(const struct sk_buff *skb, unsigned char *haddr);
 	int	(*rebuild)(struct sk_buff *skb);
 	int	(*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
@@ -905,6 +906,16 @@
  *	feature set might be less than what was returned by ndo_fix_features()).
  *	Must return >0 or -errno if it changed dev->features itself.
  *
+ * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev,
+ *		      unsigned char *addr, u16 flags)
+ *	Adds an FDB entry to dev for addr.
+ * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
+ *		      unsigned char *addr)
+ *	Deletes the FDB entry from dev coresponding to addr.
+ * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
+ *		       struct net_device *dev, int idx)
+ *	Used to add FDB entries to dump requests. Implementers should add
+ *	entries to skb and update idx with the number of entries.
  */
 struct net_device_ops {
 	int			(*ndo_init)(struct net_device *dev);
@@ -1002,6 +1013,18 @@
 						    netdev_features_t features);
 	int			(*ndo_neigh_construct)(struct neighbour *n);
 	void			(*ndo_neigh_destroy)(struct neighbour *n);
+
+	int			(*ndo_fdb_add)(struct ndmsg *ndm,
+					       struct net_device *dev,
+					       unsigned char *addr,
+					       u16 flags);
+	int			(*ndo_fdb_del)(struct ndmsg *ndm,
+					       struct net_device *dev,
+					       unsigned char *addr);
+	int			(*ndo_fdb_dump)(struct sk_buff *skb,
+						struct netlink_callback *cb,
+						struct net_device *dev,
+						int idx);
 };
 
 /*
@@ -1132,7 +1155,6 @@
 	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/
 	struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
 	struct inet6_dev __rcu	*ip6_ptr;       /* IPv6 specific data */
-	void			*ec_ptr;	/* Econet specific data	*/
 	void			*ax25_ptr;	/* AX.25 specific data */
 	struct wireless_dev	*ieee80211_ptr;	/* IEEE 802.11 specific data,
 						   assign before registering */
@@ -1403,15 +1425,6 @@
 	return 0;
 }
 
-#ifndef CONFIG_NET_NS
-static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
-{
-	skb->dev = dev;
-}
-#else /* CONFIG_NET_NS */
-void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
-#endif
-
 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
 {
 #ifdef CONFIG_NET_DSA_TAG_TRAILER
@@ -1486,6 +1499,8 @@
 
 	/* Free the skb? */
 	int free;
+#define NAPI_GRO_FREE		  1
+#define NAPI_GRO_FREE_STOLEN_HEAD 2
 };
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
@@ -1689,7 +1704,7 @@
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 				  unsigned short type,
 				  const void *daddr, const void *saddr,
-				  unsigned len)
+				  unsigned int len)
 {
 	if (!dev->header_ops || !dev->header_ops->create)
 		return 0;
@@ -1740,7 +1755,7 @@
 	unsigned int		input_queue_head;
 	unsigned int		input_queue_tail;
 #endif
-	unsigned		dropped;
+	unsigned int		dropped;
 	struct sk_buff_head	input_pkt_queue;
 	struct napi_struct	backlog;
 };
@@ -1925,7 +1940,7 @@
 }
 
 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
-					     unsigned pkts, unsigned bytes)
+					     unsigned int pkts, unsigned int bytes)
 {
 #ifdef CONFIG_BQL
 	if (unlikely(!bytes))
@@ -1949,7 +1964,7 @@
 }
 
 static inline void netdev_completed_queue(struct net_device *dev,
-					  unsigned pkts, unsigned bytes)
+					  unsigned int pkts, unsigned int bytes)
 {
 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
 }
@@ -2127,7 +2142,6 @@
 extern gro_result_t	napi_frags_finish(struct napi_struct *napi,
 					  struct sk_buff *skb,
 					  gro_result_t ret);
-extern struct sk_buff *	napi_frags_skb(struct napi_struct *napi);
 extern gro_result_t	napi_gro_frags(struct napi_struct *napi);
 
 static inline void napi_free_frags(struct napi_struct *napi)
@@ -2144,9 +2158,9 @@
 extern bool		dev_valid_name(const char *name);
 extern int		dev_ioctl(struct net *net, unsigned int cmd, void __user *);
 extern int		dev_ethtool(struct net *net, struct ifreq *);
-extern unsigned		dev_get_flags(const struct net_device *);
+extern unsigned int	dev_get_flags(const struct net_device *);
 extern int		__dev_change_flags(struct net_device *, unsigned int flags);
-extern int		dev_change_flags(struct net_device *, unsigned);
+extern int		dev_change_flags(struct net_device *, unsigned int);
 extern void		__dev_notify_flags(struct net_device *, unsigned int old_flags);
 extern int		dev_change_name(struct net_device *, const char *);
 extern int		dev_set_alias(struct net_device *, const char *, size_t);
@@ -2546,6 +2560,7 @@
 
 /* Functions used for unicast addresses handling */
 extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
+extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr);
 extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
 extern int dev_uc_sync(struct net_device *to, struct net_device *from);
 extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
@@ -2555,6 +2570,7 @@
 /* Functions used for multicast addresses handling */
 extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
 extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
+extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr);
 extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
 extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
 extern int dev_mc_sync(struct net_device *to, struct net_device *from);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 29734be..ff9c84c 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -154,12 +154,6 @@
 int nf_register_sockopt(struct nf_sockopt_ops *reg);
 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 
-#ifdef CONFIG_SYSCTL
-/* Sysctl registration */
-extern struct ctl_path nf_net_netfilter_sysctl_path[];
-extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
-#endif /* CONFIG_SYSCTL */
-
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
 #if defined(CONFIG_JUMP_LABEL)
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 2f8e18a..2edc64c 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -411,26 +411,32 @@
 #define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
 #define ipset_nest_end(skb, start)  nla_nest_end(skb, start)
 
-#define NLA_PUT_IPADDR4(skb, type, ipaddr)			\
-do {								\
-	struct nlattr *__nested = ipset_nest_start(skb, type);	\
-								\
-	if (!__nested)						\
-		goto nla_put_failure;				\
-	NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);	\
-	ipset_nest_end(skb, __nested);				\
-} while (0)
+static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
+{
+	struct nlattr *__nested = ipset_nest_start(skb, type);
+	int ret;
 
-#define NLA_PUT_IPADDR6(skb, type, ipaddrptr)			\
-do {								\
-	struct nlattr *__nested = ipset_nest_start(skb, type);	\
-								\
-	if (!__nested)						\
-		goto nla_put_failure;				\
-	NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6,			\
-		sizeof(struct in6_addr), ipaddrptr);		\
-	ipset_nest_end(skb, __nested);				\
-} while (0)
+	if (!__nested)
+		return -EMSGSIZE;
+	ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
+	if (!ret)
+		ipset_nest_end(skb, __nested);
+	return ret;
+}
+
+static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr)
+{
+	struct nlattr *__nested = ipset_nest_start(skb, type);
+	int ret;
+
+	if (!__nested)
+		return -EMSGSIZE;
+	ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6,
+		      sizeof(struct in6_addr), ipaddrptr);
+	if (!ret)
+		ipset_nest_end(skb, __nested);
+	return ret;
+}
 
 /* Get address from skbuff */
 static inline __be32
@@ -472,8 +478,8 @@
 
 #define IP_SET_OP_GET_BYNAME	0x00000006	/* Get set index by name */
 struct ip_set_req_get_set {
-	unsigned op;
-	unsigned version;
+	unsigned int op;
+	unsigned int version;
 	union ip_set_name_index set;
 };
 
@@ -482,8 +488,8 @@
 
 #define IP_SET_OP_VERSION	0x00000100	/* Ask kernel version */
 struct ip_set_req_version {
-	unsigned op;
-	unsigned version;
+	unsigned int op;
+	unsigned int version;
 };
 
 #endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index 05a5d72..b114d35 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -99,6 +99,22 @@
 #endif
 };
 
+static size_t
+htable_size(u8 hbits)
+{
+	size_t hsize;
+
+	/* We must fit both into u32 in jhash and size_t */
+	if (hbits > 31)
+		return 0;
+	hsize = jhash_size(hbits);
+	if ((((size_t)-1) - sizeof(struct htable))/sizeof(struct hbucket)
+	    < hsize)
+		return 0;
+
+	return hsize * sizeof(struct hbucket) + sizeof(struct htable);
+}
+
 /* Compute htable_bits from the user input parameter hashsize */
 static u8
 htable_bits(u32 hashsize)
@@ -594,17 +610,20 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
-		      htonl(jhash_size(h->table->htable_bits)));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+	if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
+			  htonl(jhash_size(h->table->htable_bits))) ||
+	    nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
+		goto nla_put_failure;
 #ifdef IP_SET_HASH_WITH_NETMASK
-	if (h->netmask != HOST_MASK)
-		NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+	if (h->netmask != HOST_MASK &&
+	    nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
+		goto nla_put_failure;
 #endif
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
-	if (with_timeout(h->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+	if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
+	    (with_timeout(h->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 4792320..41d9cfa 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -30,6 +30,10 @@
 {
 	unsigned int timeout = ip_set_get_h32(tb);
 
+	/* Normalize to fit into jiffies */
+	if (timeout > UINT_MAX/MSEC_PER_SEC)
+		timeout = UINT_MAX/MSEC_PER_SEC;
+
 	/* Userspace supplied TIMEOUT parameter: adjust crazy size */
 	return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
 }
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 0d3dd66..d146872 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -83,6 +83,10 @@
 	/* Conntrack is a fake untracked entry */
 	IPS_UNTRACKED_BIT = 12,
 	IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
+
+	/* Conntrack got a helper explicitly attached via CT target. */
+	IPS_HELPER_BIT = 13,
+	IPS_HELPER = (1 << IPS_HELPER_BIT),
 };
 
 /* Connection tracking event types */
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index f35b6b4..b0821f4 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -7,12 +7,12 @@
 
 typedef struct TransportAddress_ipAddress {	/* SEQUENCE */
 	int options;		/* No use */
-	unsigned ip;
+	unsigned int ip;
 } TransportAddress_ipAddress;
 
 typedef struct TransportAddress_ip6Address {	/* SEQUENCE */
 	int options;		/* No use */
-	unsigned ip;
+	unsigned int ip;
 } TransportAddress_ip6Address;
 
 typedef struct TransportAddress {	/* CHOICE */
@@ -96,12 +96,12 @@
 
 typedef struct UnicastAddress_iPAddress {	/* SEQUENCE */
 	int options;		/* No use */
-	unsigned network;
+	unsigned int network;
 } UnicastAddress_iPAddress;
 
 typedef struct UnicastAddress_iP6Address {	/* SEQUENCE */
 	int options;		/* No use */
-	unsigned network;
+	unsigned int network;
 } UnicastAddress_iP6Address;
 
 typedef struct UnicastAddress {	/* CHOICE */
@@ -698,7 +698,7 @@
 	} options;
 	RegistrationRequest_callSignalAddress callSignalAddress;
 	RegistrationRequest_rasAddress rasAddress;
-	unsigned timeToLive;
+	unsigned int timeToLive;
 } RegistrationRequest;
 
 typedef struct RegistrationConfirm_callSignalAddress {	/* SEQUENCE OF */
@@ -730,7 +730,7 @@
 		eRegistrationConfirm_genericData = (1 << 12),
 	} options;
 	RegistrationConfirm_callSignalAddress callSignalAddress;
-	unsigned timeToLive;
+	unsigned int timeToLive;
 } RegistrationConfirm;
 
 typedef struct UnregistrationRequest_callSignalAddress {	/* SEQUENCE OF */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 6fd1f0d..a1048c1 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -80,7 +80,7 @@
 extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
 
 extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
-extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group,
+extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group,
 			  int echo, gfp_t flags);
 extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error);
 extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags);
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h
new file mode 100644
index 0000000..abb1650
--- /dev/null
+++ b/include/linux/netfilter/xt_HMARK.h
@@ -0,0 +1,45 @@
+#ifndef XT_HMARK_H_
+#define XT_HMARK_H_
+
+#include <linux/types.h>
+
+enum {
+	XT_HMARK_SADDR_MASK,
+	XT_HMARK_DADDR_MASK,
+	XT_HMARK_SPI,
+	XT_HMARK_SPI_MASK,
+	XT_HMARK_SPORT,
+	XT_HMARK_DPORT,
+	XT_HMARK_SPORT_MASK,
+	XT_HMARK_DPORT_MASK,
+	XT_HMARK_PROTO_MASK,
+	XT_HMARK_RND,
+	XT_HMARK_MODULUS,
+	XT_HMARK_OFFSET,
+	XT_HMARK_CT,
+	XT_HMARK_METHOD_L3,
+	XT_HMARK_METHOD_L3_4,
+};
+#define XT_HMARK_FLAG(flag)	(1 << flag)
+
+union hmark_ports {
+	struct {
+		__u16	src;
+		__u16	dst;
+	} p16;
+	__u32	v32;
+};
+
+struct xt_hmark_info {
+	union nf_inet_addr	src_mask;
+	union nf_inet_addr	dst_mask;
+	union hmark_ports	port_mask;
+	union hmark_ports	port_set;
+	__u32			flags;
+	__u16			proto_mask;
+	__u32			hashrnd;
+	__u32			hmodulus;
+	__u32			hoffset;	/* Mark offset to start from */
+};
+
+#endif /* XT_HMARK_H_ */
diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h
index b1925b5..c42e52f 100644
--- a/include/linux/netfilter/xt_hashlimit.h
+++ b/include/linux/netfilter/xt_hashlimit.h
@@ -6,7 +6,11 @@
 /* timings are in milliseconds. */
 #define XT_HASHLIMIT_SCALE 10000
 /* 1/10,000 sec period => max of 10,000/sec.  Min rate is then 429490
-   seconds, or one every 59 hours. */
+ * seconds, or one packet every 59 hours.
+ */
+
+/* packet length accounting is done in 16-byte steps */
+#define XT_HASHLIMIT_BYTE_SHIFT 4
 
 /* details of this structure hidden by the implementation */
 struct xt_hashlimit_htable;
@@ -17,7 +21,13 @@
 	XT_HASHLIMIT_HASH_SIP = 1 << 2,
 	XT_HASHLIMIT_HASH_SPT = 1 << 3,
 	XT_HASHLIMIT_INVERT   = 1 << 4,
+	XT_HASHLIMIT_BYTES    = 1 << 5,
 };
+#ifdef __KERNEL__
+#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
+			  XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \
+			  XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES)
+#endif
 
 struct hashlimit_cfg {
 	__u32 mode;	  /* bitmask of XT_HASHLIMIT_HASH_* */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 0ddd161..31d2844 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -104,9 +104,18 @@
 	} daddr;
 };
 
+static inline void br_drop_fake_rtable(struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	if (dst && (dst->flags & DST_FAKE_RTABLE))
+		skb_dst_drop(skb);
+}
+
 #else
 #define nf_bridge_maybe_copy_header(skb)	(0)
 #define nf_bridge_pad(skb)			(0)
+#define br_drop_fake_rtable(skb)	        do { } while (0)
 #endif /* CONFIG_BRIDGE_NETFILTER */
 
 #endif /* __KERNEL__ */
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index 31f8bec..c61b8fb 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -1,4 +1,3 @@
-header-y += ip_queue.h
 header-y += ip_tables.h
 header-y += ipt_CLUSTERIP.h
 header-y += ipt_ECN.h
diff --git a/include/linux/netfilter_ipv4/ip_queue.h b/include/linux/netfilter_ipv4/ip_queue.h
deleted file mode 100644
index a03507f..0000000
--- a/include/linux/netfilter_ipv4/ip_queue.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * This is a module which is used for queueing IPv4 packets and
- * communicating with userspace via netlink.
- *
- * (C) 2000 James Morris, this code is GPL.
- */
-#ifndef _IP_QUEUE_H
-#define _IP_QUEUE_H
-
-#ifdef __KERNEL__
-#ifdef DEBUG_IPQ
-#define QDEBUG(x...) printk(KERN_DEBUG ## x)
-#else
-#define QDEBUG(x...)
-#endif  /* DEBUG_IPQ */
-#else
-#include <net/if.h>
-#endif	/* ! __KERNEL__ */
-
-/* Messages sent from kernel */
-typedef struct ipq_packet_msg {
-	unsigned long packet_id;	/* ID of queued packet */
-	unsigned long mark;		/* Netfilter mark value */
-	long timestamp_sec;		/* Packet arrival time (seconds) */
-	long timestamp_usec;		/* Packet arrvial time (+useconds) */
-	unsigned int hook;		/* Netfilter hook we rode in on */
-	char indev_name[IFNAMSIZ];	/* Name of incoming interface */
-	char outdev_name[IFNAMSIZ];	/* Name of outgoing interface */
-	__be16 hw_protocol;		/* Hardware protocol (network order) */
-	unsigned short hw_type;		/* Hardware type */
-	unsigned char hw_addrlen;	/* Hardware address length */
-	unsigned char hw_addr[8];	/* Hardware address */
-	size_t data_len;		/* Length of packet data */
-	unsigned char payload[0];	/* Optional packet data */
-} ipq_packet_msg_t;
-
-/* Messages sent from userspace */
-typedef struct ipq_mode_msg {
-	unsigned char value;		/* Requested mode */
-	size_t range;			/* Optional range of packet requested */
-} ipq_mode_msg_t;
-
-typedef struct ipq_verdict_msg {
-	unsigned int value;		/* Verdict to hand to netfilter */
-	unsigned long id;		/* Packet ID for this verdict */
-	size_t data_len;		/* Length of replacement data */
-	unsigned char payload[0];	/* Optional replacement packet */
-} ipq_verdict_msg_t;
-
-typedef struct ipq_peer_msg {
-	union {
-		ipq_verdict_msg_t verdict;
-		ipq_mode_msg_t mode;
-	} msg;
-} ipq_peer_msg_t;
-
-/* Packet delivery modes */
-enum {
-	IPQ_COPY_NONE,		/* Initial mode, packets are dropped */
-	IPQ_COPY_META,		/* Copy metadata */
-	IPQ_COPY_PACKET		/* Copy metadata + packet (range) */
-};
-#define IPQ_COPY_MAX IPQ_COPY_PACKET
-
-/* Types of messages */
-#define IPQM_BASE	0x10	/* standard netlink messages below this */
-#define IPQM_MODE	(IPQM_BASE + 1)		/* Mode request from peer */
-#define IPQM_VERDICT	(IPQM_BASE + 2)		/* Verdict from peer */ 
-#define IPQM_PACKET	(IPQM_BASE + 3)		/* Packet from kernel */
-#define IPQM_MAX	(IPQM_BASE + 4)
-
-#endif /*_IP_QUEUE_H*/
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 1bc898b..08c2cbb 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -298,9 +298,14 @@
 	       (nexthdr == IPPROTO_DSTOPTS);
 }
 
+enum {
+	IP6T_FH_F_FRAG	= (1 << 0),
+	IP6T_FH_F_AUTH	= (1 << 1),
+};
+
 /* find specified header and get offset to it */
 extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
-			 int target, unsigned short *fragoff);
+			 int target, unsigned short *fragoff, int *fragflg);
 
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index a2092f5..0f628ff 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -7,7 +7,7 @@
 #define NETLINK_ROUTE		0	/* Routing/device hook				*/
 #define NETLINK_UNUSED		1	/* Unused number				*/
 #define NETLINK_USERSOCK	2	/* Reserved for user mode socket protocols 	*/
-#define NETLINK_FIREWALL	3	/* Firewalling hook				*/
+#define NETLINK_FIREWALL	3	/* Unused number, formerly ip_queue		*/
 #define NETLINK_SOCK_DIAG	4	/* socket monitoring				*/
 #define NETLINK_NFLOG		5	/* netfilter/iptables ULOG */
 #define NETLINK_XFRM		6	/* ipsec */
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 39c1fcf..0ae9b58 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -70,6 +70,7 @@
 	NFC_EVENT_TARGETS_FOUND,
 	NFC_EVENT_DEVICE_ADDED,
 	NFC_EVENT_DEVICE_REMOVED,
+	NFC_EVENT_TARGET_LOST,
 /* private: internal use only */
 	__NFC_CMD_AFTER_LAST
 };
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index bfd0d1b..7ba3551 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -312,6 +312,11 @@
 	int rpc_status;
 };
 
+struct stateowner_id {
+	__u64	create_time;
+	__u32	uniquifier;
+};
+
 /*
  * Arguments to the open call.
  */
@@ -321,7 +326,7 @@
 	int			open_flags;
 	fmode_t			fmode;
 	__u64                   clientid;
-	__u64                   id;
+	struct stateowner_id	id;
 	union {
 		struct {
 			struct iattr *  attrs;    /* UNCHECKED, GUARDED */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index e474f6e..2540e86 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -548,6 +548,11 @@
  * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
  *      No Acknowledgement Policy should be applied.
  *
+ * @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels
+ *	independently of the userspace SME, send this event indicating
+ *	%NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
+ *	%NL80211_ATTR_WIPHY_CHANNEL_TYPE.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -689,6 +694,8 @@
 
 	NL80211_CMD_SET_NOACK_MAP,
 
+	NL80211_CMD_CH_SWITCH_NOTIFY,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -1685,6 +1692,7 @@
  * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected
  * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update.
  * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32)
+ * @NL80211_STA_INFO_T_OFFSET: timing offset with respect to this STA (s64)
  * @__NL80211_STA_INFO_AFTER_LAST: internal
  * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
@@ -1708,6 +1716,7 @@
 	NL80211_STA_INFO_CONNECTED_TIME,
 	NL80211_STA_INFO_STA_FLAGS,
 	NL80211_STA_INFO_BEACON_LOSS,
+	NL80211_STA_INFO_T_OFFSET,
 
 	/* keep last */
 	__NL80211_STA_INFO_AFTER_LAST,
@@ -2142,6 +2151,11 @@
  *
  * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
  *
+ * @NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR: maximum number of neighbors
+ * to synchronize to for 11s default synchronization method (see 11C.12.2.2)
+ *
+ * @NL80211_MESHCONF_HT_OPMODE: set mesh HT protection mode.
+ *
  * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
  */
 enum nl80211_meshconf_params {
@@ -2166,6 +2180,8 @@
 	NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
 	NL80211_MESHCONF_FORWARDING,
 	NL80211_MESHCONF_RSSI_THRESHOLD,
+	NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
+	NL80211_MESHCONF_HT_OPMODE,
 
 	/* keep last */
 	__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2205,6 +2221,11 @@
  * complete (unsecured) mesh peering without the need of a userspace daemon.
  *
  * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
+ *
+ * @NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC: Enable this option to use a
+ * vendor specific synchronization method or disable it to use the default
+ * neighbor offset synchronization
+ *
  * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
  */
 enum nl80211_mesh_setup_params {
@@ -2214,6 +2235,7 @@
 	NL80211_MESH_SETUP_IE,
 	NL80211_MESH_SETUP_USERSPACE_AUTH,
 	NL80211_MESH_SETUP_USERSPACE_AMPE,
+	NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC,
 
 	/* keep last */
 	__NL80211_MESH_SETUP_ATTR_AFTER_LAST,
@@ -2223,7 +2245,7 @@
 /**
  * enum nl80211_txq_attr - TX queue parameter attributes
  * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved
- * @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*)
+ * @NL80211_TXQ_ATTR_AC: AC identifier (NL80211_AC_*)
  * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning
  *	disabled
  * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form
@@ -2236,7 +2258,7 @@
  */
 enum nl80211_txq_attr {
 	__NL80211_TXQ_ATTR_INVALID,
-	NL80211_TXQ_ATTR_QUEUE,
+	NL80211_TXQ_ATTR_AC,
 	NL80211_TXQ_ATTR_TXOP,
 	NL80211_TXQ_ATTR_CWMIN,
 	NL80211_TXQ_ATTR_CWMAX,
@@ -2247,13 +2269,21 @@
 	NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1
 };
 
-enum nl80211_txq_q {
-	NL80211_TXQ_Q_VO,
-	NL80211_TXQ_Q_VI,
-	NL80211_TXQ_Q_BE,
-	NL80211_TXQ_Q_BK
+enum nl80211_ac {
+	NL80211_AC_VO,
+	NL80211_AC_VI,
+	NL80211_AC_BE,
+	NL80211_AC_BK,
+	NL80211_NUM_ACS
 };
 
+/* backward compat */
+#define NL80211_TXQ_ATTR_QUEUE	NL80211_TXQ_ATTR_AC
+#define NL80211_TXQ_Q_VO	NL80211_AC_VO
+#define NL80211_TXQ_Q_VI	NL80211_AC_VI
+#define NL80211_TXQ_Q_BE	NL80211_AC_BE
+#define NL80211_TXQ_Q_BK	NL80211_AC_BK
+
 enum nl80211_channel_type {
 	NL80211_CHAN_NO_HT,
 	NL80211_CHAN_HT20,
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 33d9f51..5a3db3a 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -68,6 +68,7 @@
 	IEEE802154_ATTR_CHANNEL_PAGE_LIST,
 
 	IEEE802154_ATTR_PHY_NAME,
+	IEEE802154_ATTR_DEV_TYPE,
 
 	__IEEE802154_ATTR_MAX,
 };
@@ -126,4 +127,23 @@
 
 #define IEEE802154_CMD_MAX (__IEEE802154_CMD_MAX - 1)
 
+enum {
+	__IEEE802154_DEV_INVALID = -1,
+
+	 /* TODO:
+	 * Nowadays three device types supported by this stack at linux-zigbee
+	 * project: WPAN = 0, MONITOR = 1 and SMAC = 2.
+	 *
+	 * Since this stack implementation exists many years, it's definitely
+	 * bad idea to change the assigned values due to they are already used
+	 * by third-party userspace software like: iz-tools, wireshark...
+	 *
+	 * Currently only monitor device is added and initialized by '1' for
+	 * compatibility.
+	 */
+	IEEE802154_DEV_MONITOR = 1,
+
+	__IEEE802154_DEV_MAX,
+};
+
 #endif
diff --git a/include/linux/of.h b/include/linux/of.h
index fa7fb1d..2ec1083 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -193,6 +193,17 @@
 	for (child = of_get_next_child(parent, NULL); child != NULL; \
 	     child = of_get_next_child(parent, child))
 
+static inline int of_get_child_count(const struct device_node *np)
+{
+	struct device_node *child;
+	int num = 0;
+
+	for_each_child_of_node(np, child)
+		num++;
+
+	return num;
+}
+
 extern struct device_node *of_find_node_with_property(
 	struct device_node *from, const char *prop_name);
 #define for_each_node_with_property(dn, prop_name) \
@@ -259,6 +270,37 @@
 #endif
 
 #define of_match_ptr(_ptr)	(_ptr)
+
+/*
+ * struct property *prop;
+ * const __be32 *p;
+ * u32 u;
+ *
+ * of_property_for_each_u32(np, "propname", prop, p, u)
+ *         printk("U32 value: %x\n", u);
+ */
+const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
+			       u32 *pu);
+#define of_property_for_each_u32(np, propname, prop, p, u)	\
+	for (prop = of_find_property(np, propname, NULL),	\
+		p = of_prop_next_u32(prop, NULL, &u);		\
+		p;						\
+		p = of_prop_next_u32(prop, p, &u))
+
+/*
+ * struct property *prop;
+ * const char *s;
+ *
+ * of_property_for_each_string(np, "propname", prop, s)
+ *         printk("String value: %s\n", s);
+ */
+const char *of_prop_next_string(struct property *prop, const char *cur);
+#define of_property_for_each_string(np, propname, prop, s)	\
+	for (prop = of_find_property(np, propname, NULL),	\
+		s = of_prop_next_string(prop, NULL);		\
+		s;						\
+		s = of_prop_next_string(prop, s))
+
 #else /* CONFIG_OF */
 
 static inline bool of_have_populated_dt(void)
@@ -269,6 +311,11 @@
 #define for_each_child_of_node(parent, child) \
 	while (0)
 
+static inline int of_get_child_count(const struct device_node *np)
+{
+	return 0;
+}
+
 static inline int of_device_is_compatible(const struct device_node *device,
 					  const char *name)
 {
@@ -349,6 +396,10 @@
 
 #define of_match_ptr(_ptr)	NULL
 #define of_match_node(_matches, _node)	NULL
+#define of_property_for_each_u32(np, propname, prop, p, u) \
+	while (0)
+#define of_property_for_each_string(np, propname, prop, s) \
+	while (0)
 #endif /* CONFIG_OF */
 
 /**
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 53b94e0..912c27a 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -22,4 +22,6 @@
 					 void (*hndlr)(struct net_device *),
 					 phy_interface_t iface);
 
+extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+
 #endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e444f5b..17b7b5b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -375,11 +375,18 @@
 };
 
 struct pci_host_bridge {
-	struct list_head list;
+	struct device dev;
 	struct pci_bus *bus;		/* root bus */
 	struct list_head windows;	/* pci_host_bridge_windows */
+	void (*release_fn)(struct pci_host_bridge *);
+	void *release_data;
 };
 
+#define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
+void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
+		     void (*release_fn)(struct pci_host_bridge *),
+		     void *release_data);
+
 /*
  * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
  * to P2P or CardBus bridge windows) go in a table.  Additional ones (for
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6fe0a37..c291cae 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -412,6 +412,9 @@
 	/* Clears up any memory if needed */
 	void (*remove)(struct phy_device *phydev);
 
+	/* Handles ethtool queries for hardware time stamping. */
+	int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
+
 	/* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
 	int  (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
 
@@ -477,7 +480,6 @@
 	return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
 }
 
-int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
 struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
 int phy_device_register(struct phy_device *phy);
 int phy_init_hw(struct phy_device *phydev);
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 191e726..6dd96fb 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -36,6 +36,9 @@
 							const char *name);
 extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
 
+extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
+extern void devm_pinctrl_put(struct pinctrl *p);
+
 #else /* !CONFIG_PINCTRL */
 
 static inline int pinctrl_request_gpio(unsigned gpio)
@@ -79,6 +82,15 @@
 	return 0;
 }
 
+static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev)
+{
+	return NULL;
+}
+
+static inline void devm_pinctrl_put(struct pinctrl *p)
+{
+}
+
 #endif /* CONFIG_PINCTRL */
 
 static inline struct pinctrl * __must_check pinctrl_get_select(
@@ -113,6 +125,38 @@
 	return pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
 }
 
+static inline struct pinctrl * __must_check devm_pinctrl_get_select(
+					struct device *dev, const char *name)
+{
+	struct pinctrl *p;
+	struct pinctrl_state *s;
+	int ret;
+
+	p = devm_pinctrl_get(dev);
+	if (IS_ERR(p))
+		return p;
+
+	s = pinctrl_lookup_state(p, name);
+	if (IS_ERR(s)) {
+		devm_pinctrl_put(p);
+		return ERR_PTR(PTR_ERR(s));
+	}
+
+	ret = pinctrl_select_state(p, s);
+	if (ret < 0) {
+		devm_pinctrl_put(p);
+		return ERR_PTR(ret);
+	}
+
+	return p;
+}
+
+static inline struct pinctrl * __must_check devm_pinctrl_get_select_default(
+					struct device *dev)
+{
+	return devm_pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
+}
+
 #ifdef CONFIG_PINCONF
 
 extern int pin_config_get(const char *dev_name, const char *name,
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index e4d1de7..7d22ab0 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -154,7 +154,7 @@
 
 extern int pinctrl_register_mappings(struct pinctrl_map const *map,
 				unsigned num_maps);
-
+extern void pinctrl_provide_dummies(void);
 #else
 
 static inline int pinctrl_register_mappings(struct pinctrl_map const *map,
@@ -163,5 +163,8 @@
 	return 0;
 }
 
-#endif /* !CONFIG_PINMUX */
+static inline void pinctrl_provide_dummies(void)
+{
+}
+#endif /* !CONFIG_PINCTRL */
 #endif
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index ec431f0..e7a7201 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -25,7 +25,6 @@
  * @pin_config_get: get the config of a certain pin, if the requested config
  *	is not available on this controller this should return -ENOTSUPP
  *	and if it is available but disabled it should return -EINVAL
- * @pin_config_get: get the config of a certain pin
  * @pin_config_set: configure an individual pin
  * @pin_config_group_get: get configurations for an entire pin group
  * @pin_config_group_set: configure all pins in a group
@@ -33,6 +32,8 @@
  *	per-device info for a certain pin in debugfs
  * @pin_config_group_dbg_show: optional debugfs display hook that will provide
  *	per-device info for a certain group in debugfs
+ * @pin_config_config_dbg_show: optional debugfs display hook that will decode
+ *	and display a driver's pin configuration parameter
  */
 struct pinconf_ops {
 #ifdef CONFIG_GENERIC_PINCONF
@@ -56,6 +57,9 @@
 	void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
 					   struct seq_file *s,
 					   unsigned selector);
+	void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev,
+					    struct seq_file *s,
+					    unsigned long config);
 };
 
 #endif
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 4e9f078..3b894a6 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -21,9 +21,11 @@
 
 struct device;
 struct pinctrl_dev;
+struct pinctrl_map;
 struct pinmux_ops;
 struct pinconf_ops;
 struct gpio_chip;
+struct device_node;
 
 /**
  * struct pinctrl_pin_desc - boards/machines provide information on their
@@ -64,17 +66,24 @@
 /**
  * struct pinctrl_ops - global pin control operations, to be implemented by
  * pin controller drivers.
- * @list_groups: list the number of selectable named groups available
- *	in this pinmux driver, the core will begin on 0 and call this
- *	repeatedly as long as it returns >= 0 to enumerate the groups
+ * @get_groups_count: Returns the count of total number of groups registered.
  * @get_group_name: return the group name of the pin group
  * @get_group_pins: return an array of pins corresponding to a certain
  *	group selector @pins, and the size of the array in @num_pins
  * @pin_dbg_show: optional debugfs display hook that will provide per-device
  *	info for a certain pin in debugfs
+ * @dt_node_to_map: parse a device tree "pin configuration node", and create
+ *	mapping table entries for it. These are returned through the @map and
+ *	@num_maps output parameters. This function is optional, and may be
+ *	omitted for pinctrl drivers that do not support device tree.
+ * @dt_free_map: free mapping table entries created via @dt_node_to_map. The
+ *	top-level @map pointer must be freed, along with any dynamically
+ *	allocated members of the mapping table entries themselves. This
+ *	function is optional, and may be omitted for pinctrl drivers that do
+ *	not support device tree.
  */
 struct pinctrl_ops {
-	int (*list_groups) (struct pinctrl_dev *pctldev, unsigned selector);
+	int (*get_groups_count) (struct pinctrl_dev *pctldev);
 	const char *(*get_group_name) (struct pinctrl_dev *pctldev,
 				       unsigned selector);
 	int (*get_group_pins) (struct pinctrl_dev *pctldev,
@@ -83,6 +92,11 @@
 			       unsigned *num_pins);
 	void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
 			  unsigned offset);
+	int (*dt_node_to_map) (struct pinctrl_dev *pctldev,
+			       struct device_node *np_config,
+			       struct pinctrl_map **map, unsigned *num_maps);
+	void (*dt_free_map) (struct pinctrl_dev *pctldev,
+			     struct pinctrl_map *map, unsigned num_maps);
 };
 
 /**
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 47e9237..1818dcb 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -23,15 +23,14 @@
 /**
  * struct pinmux_ops - pinmux operations, to be implemented by pin controller
  * drivers that support pinmuxing
- * @request: called by the core to see if a certain pin can be made available
+ * @request: called by the core to see if a certain pin can be made
  *	available for muxing. This is called by the core to acquire the pins
  *	before selecting any actual mux setting across a function. The driver
  *	is allowed to answer "no" by returning a negative error code
  * @free: the reverse function of the request() callback, frees a pin after
  *	being requested
- * @list_functions: list the number of selectable named functions available
- *	in this pinmux driver, the core will begin on 0 and call this
- *	repeatedly as long as it returns >= 0 to enumerate mux settings
+ * @get_functions_count: returns number of selectable named functions available
+ *	in this pinmux driver
  * @get_function_name: return the function name of the muxing selector,
  *	called by the core to figure out which mux setting it shall map a
  *	certain device to
@@ -62,7 +61,7 @@
 struct pinmux_ops {
 	int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
 	int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
-	int (*list_functions) (struct pinctrl_dev *pctldev, unsigned selector);
+	int (*get_functions_count) (struct pinctrl_dev *pctldev);
 	const char *(*get_function_name) (struct pinctrl_dev *pctldev,
 					  unsigned selector);
 	int (*get_function_groups) (struct pinctrl_dev *pctldev,
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 6d626ff..e1ac1ce 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -6,6 +6,7 @@
 #define PIPE_BUF_FLAG_LRU	0x01	/* page is on the LRU */
 #define PIPE_BUF_FLAG_ATOMIC	0x02	/* was atomically mapped */
 #define PIPE_BUF_FLAG_GIFT	0x04	/* page is a gift */
+#define PIPE_BUF_FLAG_PACKET	0x08	/* read() as a packet */
 
 /**
  *	struct pipe_buffer - a linux kernel pipe buffer
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 410b33d..32aef0a 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -509,6 +509,7 @@
 	TCA_NETEM_CORRUPT,
 	TCA_NETEM_LOSS,
 	TCA_NETEM_RATE,
+	TCA_NETEM_ECN,
 	__TCA_NETEM_MAX,
 };
 
@@ -654,4 +655,84 @@
 	__u32 lmax;
 };
 
+/* CODEL */
+
+enum {
+	TCA_CODEL_UNSPEC,
+	TCA_CODEL_TARGET,
+	TCA_CODEL_LIMIT,
+	TCA_CODEL_INTERVAL,
+	TCA_CODEL_ECN,
+	__TCA_CODEL_MAX
+};
+
+#define TCA_CODEL_MAX	(__TCA_CODEL_MAX - 1)
+
+struct tc_codel_xstats {
+	__u32	maxpacket; /* largest packet we've seen so far */
+	__u32	count;	   /* how many drops we've done since the last time we
+			    * entered dropping state
+			    */
+	__u32	lastcount; /* count at entry to dropping state */
+	__u32	ldelay;    /* in-queue delay seen by most recently dequeued packet */
+	__s32	drop_next; /* time to drop next packet */
+	__u32	drop_overlimit; /* number of time max qdisc packet limit was hit */
+	__u32	ecn_mark;  /* number of packets we ECN marked instead of dropped */
+	__u32	dropping;  /* are we in dropping state ? */
+};
+
+/* FQ_CODEL */
+
+enum {
+	TCA_FQ_CODEL_UNSPEC,
+	TCA_FQ_CODEL_TARGET,
+	TCA_FQ_CODEL_LIMIT,
+	TCA_FQ_CODEL_INTERVAL,
+	TCA_FQ_CODEL_ECN,
+	TCA_FQ_CODEL_FLOWS,
+	TCA_FQ_CODEL_QUANTUM,
+	__TCA_FQ_CODEL_MAX
+};
+
+#define TCA_FQ_CODEL_MAX	(__TCA_FQ_CODEL_MAX - 1)
+
+enum {
+	TCA_FQ_CODEL_XSTATS_QDISC,
+	TCA_FQ_CODEL_XSTATS_CLASS,
+};
+
+struct tc_fq_codel_qd_stats {
+	__u32	maxpacket;	/* largest packet we've seen so far */
+	__u32	drop_overlimit; /* number of time max qdisc
+				 * packet limit was hit
+				 */
+	__u32	ecn_mark;	/* number of packets we ECN marked
+				 * instead of being dropped
+				 */
+	__u32	new_flow_count; /* number of time packets
+				 * created a 'new flow'
+				 */
+	__u32	new_flows_len;	/* count of flows in new list */
+	__u32	old_flows_len;	/* count of flows in old list */
+};
+
+struct tc_fq_codel_cl_stats {
+	__s32	deficit;
+	__u32	ldelay;		/* in-queue delay seen by most recently
+				 * dequeued packet
+				 */
+	__u32	count;
+	__u32	lastcount;
+	__u32	dropping;
+	__s32	drop_next;
+};
+
+struct tc_fq_codel_xstats {
+	__u32	type;
+	union {
+		struct tc_fq_codel_qd_stats qdisc_stats;
+		struct tc_fq_codel_cl_stats class_stats;
+	};
+};
+
 #endif
diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h
new file mode 100644
index 0000000..b5d8c19
--- /dev/null
+++ b/include/linux/platform_data/wiznet.h
@@ -0,0 +1,24 @@
+/*
+ * Ethernet driver for the WIZnet W5x00 chip.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef PLATFORM_DATA_WIZNET_H
+#define PLATFORM_DATA_WIZNET_H
+
+#include <linux/if_ether.h>
+
+struct wiznet_platform_data {
+	int	link_gpio;
+	u8	mac_addr[ETH_ALEN];
+};
+
+#ifndef CONFIG_WIZNET_BUS_SHIFT
+#define CONFIG_WIZNET_BUS_SHIFT 0
+#endif
+
+#define W5100_BUS_DIRECT_SIZE	(0x8000 << CONFIG_WIZNET_BUS_SHIFT)
+#define W5300_BUS_DIRECT_SIZE	(0x0400 << CONFIG_WIZNET_BUS_SHIFT)
+
+#endif /* PLATFORM_DATA_WIZNET_H */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index dd2e44f..945704c 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -136,4 +136,12 @@
 extern void ptp_clock_event(struct ptp_clock *ptp,
 			    struct ptp_clock_event *event);
 
+/**
+ * ptp_clock_index() - obtain the device index of a PTP clock
+ *
+ * @ptp:    The clock obtained from ptp_clock_register().
+ */
+
+extern int ptp_clock_index(struct ptp_clock *ptp);
+
 #endif
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a90abb6..56af22e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -46,7 +46,13 @@
 /**
  * Configuration for the register map of a device.
  *
+ * @name: Optional name of the regmap. Useful when a device has multiple
+ *        register regions.
+ *
  * @reg_bits: Number of bits in a register address, mandatory.
+ * @reg_stride: The register address stride. Valid register addresses are a
+ *              multiple of this value. If set to 0, a value of 1 will be
+ *              used.
  * @pad_bits: Number of bits of padding between register and value.
  * @val_bits: Number of bits in a register value, mandatory.
  *
@@ -70,6 +76,9 @@
  * @write_flag_mask: Mask to be set in the top byte of the register when doing
  *                   a write. If both read_flag_mask and write_flag_mask are
  *                   empty the regmap_bus default masks are used.
+ * @use_single_rw: If set, converts the bulk read and write operations into
+ *		    a series of single read and write operations. This is useful
+ *		    for device that does not support bulk read and write.
  *
  * @cache_type: The actual cache type.
  * @reg_defaults_raw: Power on reset values for registers (for use with
@@ -77,7 +86,10 @@
  * @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
  */
 struct regmap_config {
+	const char *name;
+
 	int reg_bits;
+	int reg_stride;
 	int pad_bits;
 	int val_bits;
 
@@ -95,20 +107,25 @@
 
 	u8 read_flag_mask;
 	u8 write_flag_mask;
+
+	bool use_single_rw;
 };
 
-typedef int (*regmap_hw_write)(struct device *dev, const void *data,
+typedef int (*regmap_hw_write)(void *context, const void *data,
 			       size_t count);
-typedef int (*regmap_hw_gather_write)(struct device *dev,
+typedef int (*regmap_hw_gather_write)(void *context,
 				      const void *reg, size_t reg_len,
 				      const void *val, size_t val_len);
-typedef int (*regmap_hw_read)(struct device *dev,
+typedef int (*regmap_hw_read)(void *context,
 			      const void *reg_buf, size_t reg_size,
 			      void *val_buf, size_t val_size);
+typedef void (*regmap_hw_free_context)(void *context);
 
 /**
  * Description of a hardware bus for the register map infrastructure.
  *
+ * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
+ *           to perform locking.
  * @write: Write operation.
  * @gather_write: Write operation with split register/value, return -ENOTSUPP
  *                if not implemented  on a given device.
@@ -118,31 +135,42 @@
  *                  a read.
  */
 struct regmap_bus {
+	bool fast_io;
 	regmap_hw_write write;
 	regmap_hw_gather_write gather_write;
 	regmap_hw_read read;
+	regmap_hw_free_context free_context;
 	u8 read_flag_mask;
 };
 
 struct regmap *regmap_init(struct device *dev,
 			   const struct regmap_bus *bus,
+			   void *bus_context,
 			   const struct regmap_config *config);
 struct regmap *regmap_init_i2c(struct i2c_client *i2c,
 			       const struct regmap_config *config);
 struct regmap *regmap_init_spi(struct spi_device *dev,
 			       const struct regmap_config *config);
+struct regmap *regmap_init_mmio(struct device *dev,
+				void __iomem *regs,
+				const struct regmap_config *config);
 
 struct regmap *devm_regmap_init(struct device *dev,
 				const struct regmap_bus *bus,
+				void *bus_context,
 				const struct regmap_config *config);
 struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
 				    const struct regmap_config *config);
 struct regmap *devm_regmap_init_spi(struct spi_device *dev,
 				    const struct regmap_config *config);
+struct regmap *devm_regmap_init_mmio(struct device *dev,
+				     void __iomem *regs,
+				     const struct regmap_config *config);
 
 void regmap_exit(struct regmap *map);
 int regmap_reinit_cache(struct regmap *map,
 			const struct regmap_config *config);
+struct regmap *dev_get_regmap(struct device *dev, const char *name);
 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
 int regmap_raw_write(struct regmap *map, unsigned int reg,
 		     const void *val, size_t val_len);
@@ -191,6 +219,7 @@
  * @status_base: Base status register address.
  * @mask_base:   Base mask register address.
  * @ack_base:    Base ack address.  If zero then the chip is clear on read.
+ * @irq_reg_stride:  Stride to use for chips where registers are not contiguous.
  *
  * @num_regs:    Number of registers in each control bank.
  * @irqs:        Descriptors for individual IRQs.  Interrupt numbers are
@@ -203,6 +232,7 @@
 	unsigned int status_base;
 	unsigned int mask_base;
 	unsigned int ack_base;
+	unsigned int irq_reg_stride;
 
 	int num_regs;
 
@@ -217,6 +247,7 @@
 			struct regmap_irq_chip_data **data);
 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
+int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
 
 #else
 
@@ -327,6 +358,13 @@
 	return -EINVAL;
 }
 
+static inline struct regmap *dev_get_regmap(struct device *dev,
+					    const char *name)
+{
+	WARN_ONCE(1, "regmap API is disabled");
+	return NULL;
+}
+
 #endif
 
 #endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fa8b55b..b0432cc 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -19,6 +19,7 @@
 #include <linux/notifier.h>
 #include <linux/regulator/consumer.h>
 
+struct regmap;
 struct regulator_dev;
 struct regulator_init_data;
 
@@ -45,6 +46,7 @@
  *               The driver should select the voltage closest to min_uV.
  * @set_voltage_sel: Set the voltage for the regulator using the specified
  *                   selector.
+ * @map_voltage: Convert a voltage into a selector
  * @get_voltage: Return the currently configured voltage for the regulator.
  * @get_voltage_sel: Return the currently configured voltage selector for the
  *                   regulator.
@@ -90,6 +92,7 @@
 	/* get/set regulator voltage */
 	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
 			    unsigned *selector);
+	int (*map_voltage)(struct regulator_dev *, int min_uV, int max_uV);
 	int (*set_voltage_sel) (struct regulator_dev *, unsigned selector);
 	int (*get_voltage) (struct regulator_dev *);
 	int (*get_voltage_sel) (struct regulator_dev *);
@@ -148,19 +151,30 @@
 };
 
 /**
- * struct regulator_desc - Regulator descriptor
+ * struct regulator_desc - Static regulator descriptor
  *
- * Each regulator registered with the core is described with a structure of
- * this type.
+ * Each regulator registered with the core is described with a
+ * structure of this type and a struct regulator_config.  This
+ * structure contains the non-varying parts of the regulator
+ * description.
  *
  * @name: Identifying name for the regulator.
  * @supply_name: Identifying the regulator supply
  * @id: Numerical identifier for the regulator.
- * @n_voltages: Number of selectors available for ops.list_voltage().
  * @ops: Regulator operations table.
  * @irq: Interrupt number for the regulator.
  * @type: Indicates if the regulator is a voltage or current regulator.
  * @owner: Module providing the regulator, used for refcounting.
+ *
+ * @n_voltages: Number of selectors available for ops.list_voltage().
+ *
+ * @min_uV: Voltage given by the lowest selector (if linear mapping)
+ * @uV_step: Voltage increase with each selector (if linear mapping)
+ *
+ * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
+ * @vsel_mask: Mask for register bitfield used for selector
+ * @enable_reg: Register for control when using regmap enable/disable ops
+ * @enable_mask: Mask for control when using regmap enable/disable ops
  */
 struct regulator_desc {
 	const char *name;
@@ -171,6 +185,36 @@
 	int irq;
 	enum regulator_type type;
 	struct module *owner;
+
+	unsigned int min_uV;
+	unsigned int uV_step;
+
+	unsigned int vsel_reg;
+	unsigned int vsel_mask;
+	unsigned int enable_reg;
+	unsigned int enable_mask;
+};
+
+/**
+ * struct regulator_config - Dynamic regulator descriptor
+ *
+ * Each regulator registered with the core is described with a
+ * structure of this type and a struct regulator_desc.  This structure
+ * contains the runtime variable parts of the regulator description.
+ *
+ * @dev: struct device for the regulator
+ * @init_data: platform provided init data, passed through by driver
+ * @driver_data: private regulator data
+ * @of_node: OpenFirmware node to parse for device tree bindings (may be
+ *           NULL).
+ * @regmap: regmap to use for core regmap helpers
+ */
+struct regulator_config {
+	struct device *dev;
+	const struct regulator_init_data *init_data;
+	void *driver_data;
+	struct device_node *of_node;
+	struct regmap *regmap;
 };
 
 /*
@@ -184,7 +228,7 @@
  * no other direct access).
  */
 struct regulator_dev {
-	struct regulator_desc *desc;
+	const struct regulator_desc *desc;
 	int exclusive;
 	u32 use_count;
 	u32 open_count;
@@ -201,6 +245,7 @@
 	struct device dev;
 	struct regulation_constraints *constraints;
 	struct regulator *supply;	/* for tree */
+	struct regmap *regmap;
 
 	struct delayed_work disable_work;
 	int deferred_disables;
@@ -210,9 +255,9 @@
 	struct dentry *debugfs;
 };
 
-struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, const struct regulator_init_data *init_data,
-	void *driver_data, struct device_node *of_node);
+struct regulator_dev *
+regulator_register(const struct regulator_desc *regulator_desc,
+		   const struct regulator_config *config);
 void regulator_unregister(struct regulator_dev *rdev);
 
 int regulator_notifier_call_chain(struct regulator_dev *rdev,
@@ -224,6 +269,18 @@
 
 int regulator_mode_to_status(unsigned int);
 
+int regulator_list_voltage_linear(struct regulator_dev *rdev,
+				  unsigned int selector);
+int regulator_map_voltage_linear(struct regulator_dev *rdev,
+				  int min_uV, int max_uV);
+int regulator_map_voltage_iterate(struct regulator_dev *rdev,
+				  int min_uV, int max_uV);
+int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev);
+int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel);
+int regulator_is_enabled_regmap(struct regulator_dev *rdev);
+int regulator_enable_regmap(struct regulator_dev *rdev);
+int regulator_disable_regmap(struct regulator_dev *rdev);
+
 void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
 
 #endif
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 936a7d8..f83f744 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -26,6 +26,12 @@
  * @gpio:		GPIO to use for enable control
  * 			set to -EINVAL if not used
  * @startup_delay:	Start-up time in microseconds
+ * @gpio_is_open_drain: Gpio pin is open drain or normal type.
+ *			If it is open drain type then HIGH will be set
+ *			through PULL-UP with setting gpio as input
+ *			and low will be set as gpio-output with driven
+ *			to low. For non-open-drain case, the gpio will
+ *			will be in output and drive to low/high accordingly.
  * @enable_high:	Polarity of enable GPIO
  *			1 = Active high, 0 = Active low
  * @enabled_at_boot:	Whether regulator has been enabled at
@@ -43,6 +49,7 @@
 	int microvolts;
 	int gpio;
 	unsigned startup_delay;
+	unsigned gpio_is_open_drain:1;
 	unsigned enable_high:1;
 	unsigned enabled_at_boot:1;
 	struct regulator_init_data *init_data;
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h
index 769704f..f921796 100644
--- a/include/linux/regulator/of_regulator.h
+++ b/include/linux/regulator/of_regulator.h
@@ -6,10 +6,20 @@
 #ifndef __LINUX_OF_REG_H
 #define __LINUX_OF_REG_H
 
+struct of_regulator_match {
+	const char *name;
+	void *driver_data;
+	struct regulator_init_data *init_data;
+	struct device_node *of_node;
+};
+
 #if defined(CONFIG_OF)
 extern struct regulator_init_data
 	*of_get_regulator_init_data(struct device *dev,
 				    struct device_node *node);
+extern int of_regulator_match(struct device *dev, struct device_node *node,
+			      struct of_regulator_match *matches,
+			      unsigned int num_matches);
 #else
 static inline struct regulator_init_data
 	*of_get_regulator_init_data(struct device *dev,
@@ -17,6 +27,14 @@
 {
 	return NULL;
 }
+
+static inline int of_regulator_match(struct device *dev,
+				     struct device_node *node,
+				     struct of_regulator_match *matches,
+				     unsigned int num_matches)
+{
+	return 0;
+}
 #endif /* CONFIG_OF */
 
 #endif /* __LINUX_OF_REG_H */
diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h
index 6a5c1b2..a4c4939 100644
--- a/include/linux/regulator/tps62360.h
+++ b/include/linux/regulator/tps62360.h
@@ -26,13 +26,10 @@
 #ifndef __LINUX_REGULATOR_TPS62360_H
 #define __LINUX_REGULATOR_TPS62360_H
 
-#include <linux/regulator/machine.h>
-
 /*
  * struct tps62360_regulator_platform_data - tps62360 regulator platform data.
  *
  * @reg_init_data: The regulator init data.
- * @en_force_pwm: Enable force pwm or not.
  * @en_discharge: Enable discharge the output capacitor via internal
  *                register.
  * @en_internal_pulldn: internal pull down enable or not.
@@ -44,8 +41,7 @@
  * @vsel1_def_state: Default state of vsel1. 1 if it is high else 0.
  */
 struct tps62360_regulator_platform_data {
-	struct regulator_init_data reg_init_data;
-	bool en_force_pwm;
+	struct regulator_init_data *reg_init_data;
 	bool en_discharge;
 	bool en_internal_pulldn;
 	int vsel0_gpio;
diff --git a/include/linux/regulator/tps65090-regulator.h b/include/linux/regulator/tps65090-regulator.h
new file mode 100644
index 0000000..0fa04b6
--- /dev/null
+++ b/include/linux/regulator/tps65090-regulator.h
@@ -0,0 +1,50 @@
+/*
+ * Regulator driver interface for TI TPS65090 PMIC family
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __REGULATOR_TPS65090_H
+#define __REGULATOR_TPS65090_H
+
+#include <linux/regulator/machine.h>
+
+#define tps65090_rails(_name) "tps65090_"#_name
+
+enum {
+	TPS65090_ID_DCDC1,
+	TPS65090_ID_DCDC2,
+	TPS65090_ID_DCDC3,
+	TPS65090_ID_FET1,
+	TPS65090_ID_FET2,
+	TPS65090_ID_FET3,
+	TPS65090_ID_FET4,
+	TPS65090_ID_FET5,
+	TPS65090_ID_FET6,
+	TPS65090_ID_FET7,
+};
+
+/*
+ * struct tps65090_regulator_platform_data
+ *
+ * @regulator: The regulator init data.
+ * @slew_rate_uV_per_us: Slew rate microvolt per microsec.
+ */
+
+struct tps65090_regulator_platform_data {
+	struct regulator_init_data regulator;
+};
+
+#endif	/* __REGULATOR_TPS65090_H */
diff --git a/include/linux/rndis.h b/include/linux/rndis.h
new file mode 100644
index 0000000..0c8dc71
--- /dev/null
+++ b/include/linux/rndis.h
@@ -0,0 +1,390 @@
+/*
+ * Remote Network Driver Interface Specification (RNDIS)
+ * definitions of the magic numbers used by this protocol
+ */
+
+/* Remote NDIS Versions */
+#define RNDIS_MAJOR_VERSION		0x00000001
+#define RNDIS_MINOR_VERSION		0x00000000
+
+/* Device Flags */
+#define RNDIS_DF_CONNECTIONLESS		0x00000001U
+#define RNDIS_DF_CONNECTION_ORIENTED	0x00000002U
+#define RNDIS_DF_RAW_DATA		0x00000004U
+
+/*
+ * Codes for "msg_type" field of rndis messages;
+ * only the data channel uses packet messages (maybe batched);
+ * everything else goes on the control channel.
+ */
+#define RNDIS_MSG_COMPLETION	0x80000000
+#define RNDIS_MSG_PACKET	0x00000001	/* 1-N packets */
+#define RNDIS_MSG_INIT		0x00000002
+#define RNDIS_MSG_INIT_C	(RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION)
+#define RNDIS_MSG_HALT		0x00000003
+#define RNDIS_MSG_QUERY		0x00000004
+#define RNDIS_MSG_QUERY_C	(RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION)
+#define RNDIS_MSG_SET		0x00000005
+#define RNDIS_MSG_SET_C		(RNDIS_MSG_SET|RNDIS_MSG_COMPLETION)
+#define RNDIS_MSG_RESET		0x00000006
+#define RNDIS_MSG_RESET_C	(RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION)
+#define RNDIS_MSG_INDICATE	0x00000007
+#define RNDIS_MSG_KEEPALIVE	0x00000008
+#define RNDIS_MSG_KEEPALIVE_C	(RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION)
+/*
+ * Reserved message type for private communication between lower-layer host
+ * driver and remote device, if necessary.
+ */
+#define RNDIS_MSG_BUS		0xff000001
+
+/* codes for "status" field of completion messages */
+#define	RNDIS_STATUS_SUCCESS			0x00000000
+#define RNDIS_STATUS_PENDING			0x00000103
+
+/*  Status codes */
+#define RNDIS_STATUS_NOT_RECOGNIZED		0x00010001
+#define RNDIS_STATUS_NOT_COPIED			0x00010002
+#define RNDIS_STATUS_NOT_ACCEPTED		0x00010003
+#define RNDIS_STATUS_CALL_ACTIVE		0x00010007
+
+#define RNDIS_STATUS_ONLINE			0x40010003
+#define RNDIS_STATUS_RESET_START		0x40010004
+#define RNDIS_STATUS_RESET_END			0x40010005
+#define RNDIS_STATUS_RING_STATUS		0x40010006
+#define RNDIS_STATUS_CLOSED			0x40010007
+#define RNDIS_STATUS_WAN_LINE_UP		0x40010008
+#define RNDIS_STATUS_WAN_LINE_DOWN		0x40010009
+#define RNDIS_STATUS_WAN_FRAGMENT		0x4001000A
+#define	RNDIS_STATUS_MEDIA_CONNECT		0x4001000B
+#define	RNDIS_STATUS_MEDIA_DISCONNECT		0x4001000C
+#define RNDIS_STATUS_HARDWARE_LINE_UP		0x4001000D
+#define RNDIS_STATUS_HARDWARE_LINE_DOWN		0x4001000E
+#define RNDIS_STATUS_INTERFACE_UP		0x4001000F
+#define RNDIS_STATUS_INTERFACE_DOWN		0x40010010
+#define RNDIS_STATUS_MEDIA_BUSY			0x40010011
+#define	RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION	0x40010012
+#define RNDIS_STATUS_WW_INDICATION		RDIA_SPECIFIC_INDICATION
+#define RNDIS_STATUS_LINK_SPEED_CHANGE		0x40010013L
+
+#define RNDIS_STATUS_NOT_RESETTABLE		0x80010001
+#define RNDIS_STATUS_SOFT_ERRORS		0x80010003
+#define RNDIS_STATUS_HARD_ERRORS		0x80010004
+#define RNDIS_STATUS_BUFFER_OVERFLOW		0x80000005
+
+#define	RNDIS_STATUS_FAILURE			0xC0000001
+#define RNDIS_STATUS_RESOURCES			0xC000009A
+#define	RNDIS_STATUS_NOT_SUPPORTED		0xc00000BB
+#define RNDIS_STATUS_CLOSING			0xC0010002
+#define RNDIS_STATUS_BAD_VERSION		0xC0010004
+#define RNDIS_STATUS_BAD_CHARACTERISTICS	0xC0010005
+#define RNDIS_STATUS_ADAPTER_NOT_FOUND		0xC0010006
+#define RNDIS_STATUS_OPEN_FAILED		0xC0010007
+#define RNDIS_STATUS_DEVICE_FAILED		0xC0010008
+#define RNDIS_STATUS_MULTICAST_FULL		0xC0010009
+#define RNDIS_STATUS_MULTICAST_EXISTS		0xC001000A
+#define RNDIS_STATUS_MULTICAST_NOT_FOUND	0xC001000B
+#define RNDIS_STATUS_REQUEST_ABORTED		0xC001000C
+#define RNDIS_STATUS_RESET_IN_PROGRESS		0xC001000D
+#define RNDIS_STATUS_CLOSING_INDICATING		0xC001000E
+#define RNDIS_STATUS_INVALID_PACKET		0xC001000F
+#define RNDIS_STATUS_OPEN_LIST_FULL		0xC0010010
+#define RNDIS_STATUS_ADAPTER_NOT_READY		0xC0010011
+#define RNDIS_STATUS_ADAPTER_NOT_OPEN		0xC0010012
+#define RNDIS_STATUS_NOT_INDICATING		0xC0010013
+#define RNDIS_STATUS_INVALID_LENGTH		0xC0010014
+#define	RNDIS_STATUS_INVALID_DATA		0xC0010015
+#define RNDIS_STATUS_BUFFER_TOO_SHORT		0xC0010016
+#define RNDIS_STATUS_INVALID_OID		0xC0010017
+#define RNDIS_STATUS_ADAPTER_REMOVED		0xC0010018
+#define RNDIS_STATUS_UNSUPPORTED_MEDIA		0xC0010019
+#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE	0xC001001A
+#define RNDIS_STATUS_FILE_NOT_FOUND		0xC001001B
+#define RNDIS_STATUS_ERROR_READING_FILE		0xC001001C
+#define RNDIS_STATUS_ALREADY_MAPPED		0xC001001D
+#define RNDIS_STATUS_RESOURCE_CONFLICT		0xC001001E
+#define RNDIS_STATUS_NO_CABLE			0xC001001F
+
+#define RNDIS_STATUS_INVALID_SAP		0xC0010020
+#define RNDIS_STATUS_SAP_IN_USE			0xC0010021
+#define RNDIS_STATUS_INVALID_ADDRESS		0xC0010022
+#define RNDIS_STATUS_VC_NOT_ACTIVATED		0xC0010023
+#define RNDIS_STATUS_DEST_OUT_OF_ORDER		0xC0010024
+#define RNDIS_STATUS_VC_NOT_AVAILABLE		0xC0010025
+#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE	0xC0010026
+#define RNDIS_STATUS_INCOMPATABLE_QOS		0xC0010027
+#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED	0xC0010028
+#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION	0xC0010029
+
+#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR	0xC0011000
+
+/* codes for RNDIS_OID_GEN_PHYSICAL_MEDIUM */
+#define	RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED	0x00000000
+#define	RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN	0x00000001
+#define	RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM	0x00000002
+#define	RNDIS_PHYSICAL_MEDIUM_PHONE_LINE	0x00000003
+#define	RNDIS_PHYSICAL_MEDIUM_POWER_LINE	0x00000004
+#define	RNDIS_PHYSICAL_MEDIUM_DSL		0x00000005
+#define	RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL	0x00000006
+#define	RNDIS_PHYSICAL_MEDIUM_1394		0x00000007
+#define	RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN	0x00000008
+#define	RNDIS_PHYSICAL_MEDIUM_MAX		0x00000009
+
+/*  Remote NDIS medium types. */
+#define RNDIS_MEDIUM_UNSPECIFIED		0x00000000
+#define RNDIS_MEDIUM_802_3			0x00000000
+#define RNDIS_MEDIUM_802_5			0x00000001
+#define RNDIS_MEDIUM_FDDI			0x00000002
+#define RNDIS_MEDIUM_WAN			0x00000003
+#define RNDIS_MEDIUM_LOCAL_TALK			0x00000004
+#define RNDIS_MEDIUM_ARCNET_RAW			0x00000006
+#define RNDIS_MEDIUM_ARCNET_878_2		0x00000007
+#define RNDIS_MEDIUM_ATM			0x00000008
+#define RNDIS_MEDIUM_WIRELESS_LAN		0x00000009
+#define RNDIS_MEDIUM_IRDA			0x0000000A
+#define RNDIS_MEDIUM_BPC			0x0000000B
+#define RNDIS_MEDIUM_CO_WAN			0x0000000C
+#define RNDIS_MEDIUM_1394			0x0000000D
+/* Not a real medium, defined as an upper-bound */
+#define RNDIS_MEDIUM_MAX			0x0000000E
+
+/* Remote NDIS medium connection states. */
+#define RNDIS_MEDIA_STATE_CONNECTED		0x00000000
+#define RNDIS_MEDIA_STATE_DISCONNECTED		0x00000001
+
+/* packet filter bits used by RNDIS_OID_GEN_CURRENT_PACKET_FILTER */
+#define RNDIS_PACKET_TYPE_DIRECTED		0x00000001
+#define RNDIS_PACKET_TYPE_MULTICAST		0x00000002
+#define RNDIS_PACKET_TYPE_ALL_MULTICAST		0x00000004
+#define RNDIS_PACKET_TYPE_BROADCAST		0x00000008
+#define RNDIS_PACKET_TYPE_SOURCE_ROUTING	0x00000010
+#define RNDIS_PACKET_TYPE_PROMISCUOUS		0x00000020
+#define RNDIS_PACKET_TYPE_SMT			0x00000040
+#define RNDIS_PACKET_TYPE_ALL_LOCAL		0x00000080
+#define RNDIS_PACKET_TYPE_GROUP			0x00001000
+#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL	0x00002000
+#define RNDIS_PACKET_TYPE_FUNCTIONAL		0x00004000
+#define RNDIS_PACKET_TYPE_MAC_FRAME		0x00008000
+
+/* RNDIS_OID_GEN_MINIPORT_INFO constants */
+#define RNDIS_MINIPORT_BUS_MASTER			0x00000001
+#define RNDIS_MINIPORT_WDM_DRIVER			0x00000002
+#define RNDIS_MINIPORT_SG_LIST				0x00000004
+#define RNDIS_MINIPORT_SUPPORTS_MEDIA_QUERY		0x00000008
+#define RNDIS_MINIPORT_INDICATES_PACKETS		0x00000010
+#define RNDIS_MINIPORT_IGNORE_PACKET_QUEUE		0x00000020
+#define RNDIS_MINIPORT_IGNORE_REQUEST_QUEUE		0x00000040
+#define RNDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS		0x00000080
+#define RNDIS_MINIPORT_INTERMEDIATE_DRIVER		0x00000100
+#define RNDIS_MINIPORT_IS_NDIS_5			0x00000200
+#define RNDIS_MINIPORT_IS_CO				0x00000400
+#define RNDIS_MINIPORT_DESERIALIZE			0x00000800
+#define RNDIS_MINIPORT_REQUIRES_MEDIA_POLLING		0x00001000
+#define RNDIS_MINIPORT_SUPPORTS_MEDIA_SENSE		0x00002000
+#define RNDIS_MINIPORT_NETBOOT_CARD			0x00004000
+#define RNDIS_MINIPORT_PM_SUPPORTED			0x00008000
+#define RNDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE	0x00010000
+#define RNDIS_MINIPORT_USES_SAFE_BUFFER_APIS		0x00020000
+#define RNDIS_MINIPORT_HIDDEN				0x00040000
+#define RNDIS_MINIPORT_SWENUM				0x00080000
+#define RNDIS_MINIPORT_SURPRISE_REMOVE_OK		0x00100000
+#define RNDIS_MINIPORT_NO_HALT_ON_SUSPEND		0x00200000
+#define RNDIS_MINIPORT_HARDWARE_DEVICE			0x00400000
+#define RNDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS	0x00800000
+#define RNDIS_MINIPORT_64BITS_DMA			0x01000000
+
+#define RNDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA		0x00000001
+#define RNDIS_MAC_OPTION_RECEIVE_SERIALIZED		0x00000002
+#define RNDIS_MAC_OPTION_TRANSFERS_NOT_PEND		0x00000004
+#define RNDIS_MAC_OPTION_NO_LOOPBACK			0x00000008
+#define RNDIS_MAC_OPTION_FULL_DUPLEX			0x00000010
+#define RNDIS_MAC_OPTION_EOTX_INDICATION		0x00000020
+#define RNDIS_MAC_OPTION_8021P_PRIORITY			0x00000040
+#define RNDIS_MAC_OPTION_RESERVED			0x80000000
+
+/* Object Identifiers used by NdisRequest Query/Set Information */
+/* General (Required) Objects */
+#define RNDIS_OID_GEN_SUPPORTED_LIST		0x00010101
+#define RNDIS_OID_GEN_HARDWARE_STATUS		0x00010102
+#define RNDIS_OID_GEN_MEDIA_SUPPORTED		0x00010103
+#define RNDIS_OID_GEN_MEDIA_IN_USE		0x00010104
+#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD		0x00010105
+#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE	0x00010106
+#define RNDIS_OID_GEN_LINK_SPEED		0x00010107
+#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE	0x00010108
+#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE	0x00010109
+#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE	0x0001010A
+#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE	0x0001010B
+#define RNDIS_OID_GEN_VENDOR_ID			0x0001010C
+#define RNDIS_OID_GEN_VENDOR_DESCRIPTION	0x0001010D
+#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER	0x0001010E
+#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD		0x0001010F
+#define RNDIS_OID_GEN_DRIVER_VERSION		0x00010110
+#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE	0x00010111
+#define RNDIS_OID_GEN_PROTOCOL_OPTIONS		0x00010112
+#define RNDIS_OID_GEN_MAC_OPTIONS		0x00010113
+#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS	0x00010114
+#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS	0x00010115
+#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION	0x00010116
+#define RNDIS_OID_GEN_SUPPORTED_GUIDS		0x00010117
+#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES	0x00010118
+#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET	0x00010119
+#define RNDIS_OID_GEN_PHYSICAL_MEDIUM		0x00010202
+#define RNDIS_OID_GEN_MACHINE_NAME		0x0001021A
+#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER	0x0001021B
+#define RNDIS_OID_GEN_VLAN_ID			0x0001021C
+
+/* Optional OIDs */
+#define RNDIS_OID_GEN_MEDIA_CAPABILITIES	0x00010201
+
+/* Required statistics OIDs */
+#define RNDIS_OID_GEN_XMIT_OK			0x00020101
+#define RNDIS_OID_GEN_RCV_OK			0x00020102
+#define RNDIS_OID_GEN_XMIT_ERROR		0x00020103
+#define RNDIS_OID_GEN_RCV_ERROR			0x00020104
+#define RNDIS_OID_GEN_RCV_NO_BUFFER		0x00020105
+
+/* Optional statistics OIDs */
+#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT	0x00020201
+#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT	0x00020202
+#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT	0x00020203
+#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT	0x00020204
+#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT	0x00020205
+#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT	0x00020206
+#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV	0x00020207
+#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV	0x00020208
+#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV	0x00020209
+#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV	0x0002020A
+#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV	0x0002020B
+#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV	0x0002020C
+
+#define RNDIS_OID_GEN_RCV_CRC_ERROR		0x0002020D
+#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH	0x0002020E
+
+#define RNDIS_OID_GEN_GET_TIME_CAPS		0x0002020F
+#define RNDIS_OID_GEN_GET_NETCARD_TIME		0x00020210
+
+#define RNDIS_OID_GEN_NETCARD_LOAD		0x00020211
+#define RNDIS_OID_GEN_DEVICE_PROFILE		0x00020212
+#define RNDIS_OID_GEN_INIT_TIME_MS		0x00020213
+#define RNDIS_OID_GEN_RESET_COUNTS		0x00020214
+#define RNDIS_OID_GEN_MEDIA_SENSE_COUNTS	0x00020215
+#define RNDIS_OID_GEN_FRIENDLY_NAME		0x00020216
+#define RNDIS_OID_GEN_MINIPORT_INFO		0x00020217
+#define RNDIS_OID_GEN_RESET_VERIFY_PARAMETERS	0x00020218
+
+/* These are connection-oriented general OIDs. */
+/* These replace the above OIDs for connection-oriented media. */
+#define RNDIS_OID_GEN_CO_SUPPORTED_LIST		0x00010101
+#define RNDIS_OID_GEN_CO_HARDWARE_STATUS	0x00010102
+#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED	0x00010103
+#define RNDIS_OID_GEN_CO_MEDIA_IN_USE		0x00010104
+#define RNDIS_OID_GEN_CO_LINK_SPEED		0x00010105
+#define RNDIS_OID_GEN_CO_VENDOR_ID		0x00010106
+#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION	0x00010107
+#define RNDIS_OID_GEN_CO_DRIVER_VERSION		0x00010108
+#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS	0x00010109
+#define RNDIS_OID_GEN_CO_MAC_OPTIONS		0x0001010A
+#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS	0x0001010B
+#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION	0x0001010C
+#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED	0x0001010D
+
+#define RNDIS_OID_GEN_CO_GET_TIME_CAPS		0x00010201
+#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME	0x00010202
+
+/* These are connection-oriented statistics OIDs. */
+#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK		0x00020101
+#define RNDIS_OID_GEN_CO_RCV_PDUS_OK		0x00020102
+#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR	0x00020103
+#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR		0x00020104
+#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER	0x00020105
+
+
+#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR		0x00020201
+#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH	0x00020202
+#define RNDIS_OID_GEN_CO_BYTES_XMIT		0x00020203
+#define RNDIS_OID_GEN_CO_BYTES_RCV		0x00020204
+#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING	0x00020205
+#define RNDIS_OID_GEN_CO_NETCARD_LOAD		0x00020206
+
+/* These are objects for Connection-oriented media call-managers. */
+#define RNDIS_OID_CO_ADD_PVC			0xFF000001
+#define RNDIS_OID_CO_DELETE_PVC			0xFF000002
+#define RNDIS_OID_CO_GET_CALL_INFORMATION	0xFF000003
+#define RNDIS_OID_CO_ADD_ADDRESS		0xFF000004
+#define RNDIS_OID_CO_DELETE_ADDRESS		0xFF000005
+#define RNDIS_OID_CO_GET_ADDRESSES		0xFF000006
+#define RNDIS_OID_CO_ADDRESS_CHANGE		0xFF000007
+#define RNDIS_OID_CO_SIGNALING_ENABLED		0xFF000008
+#define RNDIS_OID_CO_SIGNALING_DISABLED		0xFF000009
+
+/* 802.3 Objects (Ethernet) */
+#define RNDIS_OID_802_3_PERMANENT_ADDRESS	0x01010101
+#define RNDIS_OID_802_3_CURRENT_ADDRESS		0x01010102
+#define RNDIS_OID_802_3_MULTICAST_LIST		0x01010103
+#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE	0x01010104
+#define RNDIS_OID_802_3_MAC_OPTIONS		0x01010105
+
+#define RNDIS_802_3_MAC_OPTION_PRIORITY		0x00000001
+
+#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT	0x01020101
+#define RNDIS_OID_802_3_XMIT_ONE_COLLISION	0x01020102
+#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS	0x01020103
+
+#define RNDIS_OID_802_3_XMIT_DEFERRED		0x01020201
+#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS	0x01020202
+#define RNDIS_OID_802_3_RCV_OVERRUN		0x01020203
+#define RNDIS_OID_802_3_XMIT_UNDERRUN		0x01020204
+#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE	0x01020205
+#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST	0x01020206
+#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS	0x01020207
+
+#define RNDIS_OID_802_11_BSSID				0x0d010101
+#define RNDIS_OID_802_11_SSID				0x0d010102
+#define RNDIS_OID_802_11_INFRASTRUCTURE_MODE		0x0d010108
+#define RNDIS_OID_802_11_ADD_WEP			0x0d010113
+#define RNDIS_OID_802_11_REMOVE_WEP			0x0d010114
+#define RNDIS_OID_802_11_DISASSOCIATE			0x0d010115
+#define RNDIS_OID_802_11_AUTHENTICATION_MODE		0x0d010118
+#define RNDIS_OID_802_11_PRIVACY_FILTER			0x0d010119
+#define RNDIS_OID_802_11_BSSID_LIST_SCAN		0x0d01011a
+#define RNDIS_OID_802_11_ENCRYPTION_STATUS		0x0d01011b
+#define RNDIS_OID_802_11_ADD_KEY			0x0d01011d
+#define RNDIS_OID_802_11_REMOVE_KEY			0x0d01011e
+#define RNDIS_OID_802_11_ASSOCIATION_INFORMATION	0x0d01011f
+#define RNDIS_OID_802_11_CAPABILITY			0x0d010122
+#define RNDIS_OID_802_11_PMKID				0x0d010123
+#define RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED	0x0d010203
+#define RNDIS_OID_802_11_NETWORK_TYPE_IN_USE		0x0d010204
+#define RNDIS_OID_802_11_TX_POWER_LEVEL			0x0d010205
+#define RNDIS_OID_802_11_RSSI				0x0d010206
+#define RNDIS_OID_802_11_RSSI_TRIGGER			0x0d010207
+#define RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD	0x0d010209
+#define RNDIS_OID_802_11_RTS_THRESHOLD			0x0d01020a
+#define RNDIS_OID_802_11_SUPPORTED_RATES		0x0d01020e
+#define RNDIS_OID_802_11_CONFIGURATION			0x0d010211
+#define RNDIS_OID_802_11_POWER_MODE			0x0d010216
+#define RNDIS_OID_802_11_BSSID_LIST			0x0d010217
+
+/* Plug and Play capabilities */
+#define RNDIS_OID_PNP_CAPABILITIES		0xFD010100
+#define RNDIS_OID_PNP_SET_POWER			0xFD010101
+#define RNDIS_OID_PNP_QUERY_POWER		0xFD010102
+#define RNDIS_OID_PNP_ADD_WAKE_UP_PATTERN	0xFD010103
+#define RNDIS_OID_PNP_REMOVE_WAKE_UP_PATTERN	0xFD010104
+#define RNDIS_OID_PNP_ENABLE_WAKE_UP		0xFD010106
+
+/* RNDIS_PNP_CAPABILITIES.Flags constants */
+#define RNDIS_DEVICE_WAKE_UP_ENABLE			0x00000001
+#define RNDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE	0x00000002
+#define RNDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE	0x00000004
+
+#define REMOTE_CONDIS_MP_CREATE_VC_MSG		0x00008001
+#define REMOTE_CONDIS_MP_DELETE_VC_MSG		0x00008002
+#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG	0x00008005
+#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG	0x00008006
+#define REMOTE_CONDIS_INDICATE_STATUS_MSG	0x00008007
+
+#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT	0x80008001
+#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT	0x80008002
+#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT	0x80008005
+#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT	0x80008006
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 577592e..2c1de89 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -801,6 +801,10 @@
 	return table;
 }
 
+extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
+			     struct netlink_callback *cb,
+			     struct net_device *dev,
+			     int idx);
 #endif /* __KERNEL__ */
 
 
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index c6db9fb..600060e2 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -141,7 +141,7 @@
 	unsigned ret;
 
 repeat:
-	ret = s->sequence;
+	ret = ACCESS_ONCE(s->sequence);
 	if (unlikely(ret & 1)) {
 		cpu_relax();
 		goto repeat;
@@ -166,6 +166,27 @@
 }
 
 /**
+ * raw_seqcount_begin - begin a seq-read critical section
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * raw_seqcount_begin opens a read critical section of the given seqcount.
+ * Validity of the critical section is tested by checking read_seqcount_retry
+ * function.
+ *
+ * Unlike read_seqcount_begin(), this function will not wait for the count
+ * to stabilize. If a writer is active when we begin, we will fail the
+ * read_seqcount_retry() instead of stabilizing at the beginning of the
+ * critical section.
+ */
+static inline unsigned raw_seqcount_begin(const seqcount_t *s)
+{
+	unsigned ret = ACCESS_ONCE(s->sequence);
+	smp_rmb();
+	return ret & ~1;
+}
+
+/**
  * __read_seqcount_retry - end a seq-read critical section (without barrier)
  * @s: pointer to seqcount_t
  * @start: count, from read_seqcount_begin
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 775292a..0e50171 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -117,11 +117,11 @@
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 struct nf_bridge_info {
-	atomic_t use;
-	struct net_device *physindev;
-	struct net_device *physoutdev;
-	unsigned int mask;
-	unsigned long data[32 / sizeof(unsigned long)];
+	atomic_t		use;
+	unsigned int		mask;
+	struct net_device	*physindev;
+	struct net_device	*physoutdev;
+	unsigned long		data[32 / sizeof(unsigned long)];
 };
 #endif
 
@@ -470,7 +470,8 @@
 	__u8			wifi_acked_valid:1;
 	__u8			wifi_acked:1;
 	__u8			no_fcs:1;
-	/* 9/11 bit hole (depending on ndisc_nodetype presence) */
+	__u8			head_frag:1;
+	/* 8/10 bit hole (depending on ndisc_nodetype presence) */
 	kmemcheck_bitfield_end(flags2);
 
 #ifdef CONFIG_NET_DMA
@@ -560,9 +561,15 @@
 extern void kfree_skb(struct sk_buff *skb);
 extern void consume_skb(struct sk_buff *skb);
 extern void	       __kfree_skb(struct sk_buff *skb);
+extern struct kmem_cache *skbuff_head_cache;
+
+extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
+extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+			     bool *fragstolen, int *delta_truesize);
+
 extern struct sk_buff *__alloc_skb(unsigned int size,
 				   gfp_t priority, int fclone, int node);
-extern struct sk_buff *build_skb(void *data);
+extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
 static inline struct sk_buff *alloc_skb(unsigned int size,
 					gfp_t priority)
 {
@@ -643,11 +650,21 @@
 {
 	return skb->head + skb->end;
 }
+
+static inline unsigned int skb_end_offset(const struct sk_buff *skb)
+{
+	return skb->end;
+}
 #else
 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 {
 	return skb->end;
 }
+
+static inline unsigned int skb_end_offset(const struct sk_buff *skb)
+{
+	return skb->end - skb->head;
+}
 #endif
 
 /* Internal */
@@ -881,10 +898,11 @@
  */
 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
 {
-	struct sk_buff *list = ((const struct sk_buff *)list_)->next;
-	if (list == (struct sk_buff *)list_)
-		list = NULL;
-	return list;
+	struct sk_buff *skb = list_->next;
+
+	if (skb == (struct sk_buff *)list_)
+		skb = NULL;
+	return skb;
 }
 
 /**
@@ -900,6 +918,7 @@
 		const struct sk_buff_head *list_)
 {
 	struct sk_buff *next = skb->next;
+
 	if (next == (struct sk_buff *)list_)
 		next = NULL;
 	return next;
@@ -920,10 +939,12 @@
  */
 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
 {
-	struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
-	if (list == (struct sk_buff *)list_)
-		list = NULL;
-	return list;
+	struct sk_buff *skb = list_->prev;
+
+	if (skb == (struct sk_buff *)list_)
+		skb = NULL;
+	return skb;
+
 }
 
 /**
@@ -1020,7 +1041,7 @@
 }
 
 /**
- *	skb_queue_splice - join two skb lists and reinitialise the emptied list
+ *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
  *	@list: the new list to add
  *	@head: the place to add it in the first list
  *
@@ -1051,7 +1072,7 @@
 }
 
 /**
- *	skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
+ *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
  *	@list: the new list to add
  *	@head: the place to add it in the first list
  *
@@ -1664,31 +1685,11 @@
 		kfree_skb(skb);
 }
 
-/**
- *	__dev_alloc_skb - allocate an skbuff for receiving
- *	@length: length to allocate
- *	@gfp_mask: get_free_pages mask, passed to alloc_skb
- *
- *	Allocate a new &sk_buff and assign it a usage count of one. The
- *	buffer has unspecified headroom built in. Users should allocate
- *	the headroom they think they need without accounting for the
- *	built in space. The built in space is used for optimisations.
- *
- *	%NULL is returned if there is no free memory.
- */
-static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
-					      gfp_t gfp_mask)
-{
-	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
-	if (likely(skb))
-		skb_reserve(skb, NET_SKB_PAD);
-	return skb;
-}
-
-extern struct sk_buff *dev_alloc_skb(unsigned int length);
+extern void *netdev_alloc_frag(unsigned int fragsz);
 
 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-		unsigned int length, gfp_t gfp_mask);
+					  unsigned int length,
+					  gfp_t gfp_mask);
 
 /**
  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -1704,11 +1705,25 @@
  *	allocates memory it can be called from an interrupt.
  */
 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
-		unsigned int length)
+					       unsigned int length)
 {
 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
 }
 
+/* legacy helper around __netdev_alloc_skb() */
+static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+					      gfp_t gfp_mask)
+{
+	return __netdev_alloc_skb(NULL, length, gfp_mask);
+}
+
+/* legacy helper around netdev_alloc_skb() */
+static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+	return netdev_alloc_skb(NULL, length);
+}
+
+
 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
 		unsigned int length, gfp_t gfp)
 {
@@ -1963,8 +1978,8 @@
 	return -EFAULT;
 }
 
-static inline int skb_can_coalesce(struct sk_buff *skb, int i,
-				   const struct page *page, int off)
+static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
+				    const struct page *page, int off)
 {
 	if (i) {
 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1972,7 +1987,7 @@
 		return page == skb_frag_page(frag) &&
 		       off == frag->page_offset + skb_frag_size(frag);
 	}
-	return 0;
+	return false;
 }
 
 static inline int __skb_linearize(struct sk_buff *skb)
@@ -2552,7 +2567,7 @@
 		return false;
 
 	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
-	if (skb_end_pointer(skb) - skb->head < skb_size)
+	if (skb_end_offset(skb) < skb_size)
 		return false;
 
 	if (skb_shared(skb) || skb_cloned(skb))
@@ -2560,5 +2575,19 @@
 
 	return true;
 }
+
+/**
+ * skb_head_is_locked - Determine if the skb->head is locked down
+ * @skb: skb to check
+ *
+ * The head on skbs build around a head frag can be removed if they are
+ * not cloned.  This function returns true if the skb head is locked down
+ * due to either being allocated via kmalloc, or by being a clone with
+ * multiple references to the head.
+ */
+static inline bool skb_head_is_locked(const struct sk_buff *skb)
+{
+	return !skb->head_frag || skb_cloned(skb);
+}
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SKBUFF_H */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 251729a..db4bae7 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -32,8 +32,8 @@
 	int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
 };
 
-int sock_diag_register(struct sock_diag_handler *h);
-void sock_diag_unregister(struct sock_diag_handler *h);
+int sock_diag_register(const struct sock_diag_handler *h);
+void sock_diag_unregister(const struct sock_diag_handler *h);
 
 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b84bbd4..25d6322 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -68,13 +68,13 @@
 	__kernel_size_t	msg_iovlen;	/* Number of blocks		*/
 	void 	*	msg_control;	/* Per protocol magic (eg BSD file descriptor passing) */
 	__kernel_size_t	msg_controllen;	/* Length of cmsg list */
-	unsigned	msg_flags;
+	unsigned int	msg_flags;
 };
 
 /* For recvmmsg/sendmmsg */
 struct mmsghdr {
 	struct msghdr   msg_hdr;
-	unsigned        msg_len;
+	unsigned int        msg_len;
 };
 
 /*
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 98679b0..fa702ae 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -254,7 +254,7 @@
  *	driver is finished with this message, it must call
  *	spi_finalize_current_message() so the subsystem can issue the next
  *	transfer
- * @prepare_transfer_hardware: there are currently no more messages on the
+ * @unprepare_transfer_hardware: there are currently no more messages on the
  *	queue so the subsystem notifies the driver that it may relax the
  *	hardware by issuing this call
  *
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0dddc9e..b69bdb1 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,6 +28,51 @@
 
 #include <linux/platform_device.h>
 
+#define STMMAC_RX_COE_NONE	0
+#define STMMAC_RX_COE_TYPE1	1
+#define STMMAC_RX_COE_TYPE2	2
+
+/* Define the macros for CSR clock range parameters to be passed by
+ * platform code.
+ * This could also be configured at run time using CPU freq framework. */
+
+/* MDC Clock Selection define*/
+#define	STMMAC_CSR_60_100M	0x0	/* MDC = clk_scr_i/42 */
+#define	STMMAC_CSR_100_150M	0x1	/* MDC = clk_scr_i/62 */
+#define	STMMAC_CSR_20_35M	0x2	/* MDC = clk_scr_i/16 */
+#define	STMMAC_CSR_35_60M	0x3	/* MDC = clk_scr_i/26 */
+#define	STMMAC_CSR_150_250M	0x4	/* MDC = clk_scr_i/102 */
+#define	STMMAC_CSR_250_300M	0x5	/* MDC = clk_scr_i/122 */
+
+/* The MDC clock could be set higher than the IEEE 802.3
+ * specified frequency limit 0f 2.5 MHz, by programming a clock divider
+ * of value different than the above defined values. The resultant MDIO
+ * clock frequency of 12.5 MHz is applicable for the interfacing chips
+ * supporting higher MDC clocks.
+ * The MDC clock selection macros need to be defined for MDC clock rate
+ * of 12.5 MHz, corresponding to the following selection.
+ */
+#define STMMAC_CSR_I_4		0x8	/* clk_csr_i/4 */
+#define STMMAC_CSR_I_6		0x9	/* clk_csr_i/6 */
+#define STMMAC_CSR_I_8		0xA	/* clk_csr_i/8 */
+#define STMMAC_CSR_I_10		0xB	/* clk_csr_i/10 */
+#define STMMAC_CSR_I_12		0xC	/* clk_csr_i/12 */
+#define STMMAC_CSR_I_14		0xD	/* clk_csr_i/14 */
+#define STMMAC_CSR_I_16		0xE	/* clk_csr_i/16 */
+#define STMMAC_CSR_I_18		0xF	/* clk_csr_i/18 */
+
+/* AXI DMA Burst length suported */
+#define DMA_AXI_BLEN_4		(1 << 1)
+#define DMA_AXI_BLEN_8		(1 << 2)
+#define DMA_AXI_BLEN_16		(1 << 3)
+#define DMA_AXI_BLEN_32		(1 << 4)
+#define DMA_AXI_BLEN_64		(1 << 5)
+#define DMA_AXI_BLEN_128	(1 << 6)
+#define DMA_AXI_BLEN_256	(1 << 7)
+#define DMA_AXI_BLEN_ALL (DMA_AXI_BLEN_4 | DMA_AXI_BLEN_8 | DMA_AXI_BLEN_16 \
+			| DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
+			| DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
+
 /* Platfrom data for platform device structure's platform_data field */
 
 struct stmmac_mdio_bus_data {
@@ -38,16 +83,25 @@
 	int probed_phy_irq;
 };
 
+struct stmmac_dma_cfg {
+	int pbl;
+	int fixed_burst;
+	int mixed_burst;
+	int burst_len;
+};
+
 struct plat_stmmacenet_data {
+	char *phy_bus_name;
 	int bus_id;
 	int phy_addr;
 	int interface;
 	struct stmmac_mdio_bus_data *mdio_bus_data;
-	int pbl;
+	struct stmmac_dma_cfg *dma_cfg;
 	int clk_csr;
 	int has_gmac;
 	int enh_desc;
 	int tx_coe;
+	int rx_coe;
 	int bugged_jumbo;
 	int pmt;
 	int force_sf_dma_mode;
@@ -56,6 +110,7 @@
 	int (*init)(struct platform_device *pdev);
 	void (*exit)(struct platform_device *pdev);
 	void *custom_cfg;
+	void *custom_data;
 	void *bsp_priv;
 };
 #endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b6c62d2..d9b42c5be 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -106,6 +106,22 @@
 #define TCP_THIN_LINEAR_TIMEOUTS 16      /* Use linear timeouts for thin streams*/
 #define TCP_THIN_DUPACK         17      /* Fast retrans. after 1 dupack */
 #define TCP_USER_TIMEOUT	18	/* How long for loss retry before timeout */
+#define TCP_REPAIR		19	/* TCP sock is under repair right now */
+#define TCP_REPAIR_QUEUE	20
+#define TCP_QUEUE_SEQ		21
+#define TCP_REPAIR_OPTIONS	22
+
+struct tcp_repair_opt {
+	__u32	opt_code;
+	__u32	opt_val;
+};
+
+enum {
+	TCP_NO_QUEUE,
+	TCP_RECV_QUEUE,
+	TCP_SEND_QUEUE,
+	TCP_QUEUES_NR,
+};
 
 /* for TCP_INFO socket option */
 #define TCPI_OPT_TIMESTAMPS	1
@@ -353,7 +369,11 @@
 	u8	nonagle     : 4,/* Disable Nagle algorithm?             */
 		thin_lto    : 1,/* Use linear timeouts for thin streams */
 		thin_dupack : 1,/* Fast retransmit on first dupack      */
-		unused      : 2;
+		repair      : 1,
+		unused      : 1;
+	u8	repair_queue;
+	u8	do_early_retrans:1,/* Enable RFC5827 early-retransmit  */
+		early_retrans_delayed:1; /* Delayed ER timer installed */
 
 /* RTT measurement */
 	u32	srtt;		/* smoothed round trip time << 3	*/
diff --git a/include/linux/trdevice.h b/include/linux/trdevice.h
deleted file mode 100644
index bfc84a7..0000000
--- a/include/linux/trdevice.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * INET		An implementation of the TCP/IP protocol suite for the LINUX
- *		operating system.  NET  is implemented using the  BSD Socket
- *		interface as the means of communication with the user level.
- *
- *		Definitions for the Token-ring handlers.
- *
- * Version:	@(#)eth.h	1.0.4	05/13/93
- *
- * Authors:	Ross Biro
- *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *
- *		Relocated to include/linux where it belongs by Alan Cox 
- *							<gw4pts@gw4pts.ampr.org>
- *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		as published by the Free Software Foundation; either version
- *		2 of the License, or (at your option) any later version.
- *
- *	WARNING: This move may well be temporary. This file will get merged with others RSN.
- *
- */
-#ifndef _LINUX_TRDEVICE_H
-#define _LINUX_TRDEVICE_H
-
-
-#include <linux/if_tr.h>
-
-#ifdef __KERNEL__
-extern __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev);
-extern void tr_source_route(struct sk_buff *skb, struct trh_hdr *trh, struct net_device *dev);
-extern struct net_device *alloc_trdev(int sizeof_priv);
-
-#endif
-
-#endif	/* _LINUX_TRDEVICE_H */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 5de4157..d28cc78 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -126,6 +126,8 @@
 	unsigned		wireless:1;	/* Wireless USB HCD */
 	unsigned		authorized_default:1;
 	unsigned		has_tt:1;	/* Integrated TT in root hub */
+	unsigned		broken_pci_sleep:1;	/* Don't put the
+			controller in PCI-D3 for system sleep */
 
 	unsigned int		irq;		/* irq allocated */
 	void __iomem		*regs;		/* device memory/io */
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
index 88fceb7..d44ef85 100644
--- a/include/linux/usb/rndis_host.h
+++ b/include/linux/usb/rndis_host.h
@@ -20,6 +20,8 @@
 #ifndef	__LINUX_USB_RNDIS_HOST_H
 #define	__LINUX_USB_RNDIS_HOST_H
 
+#include <linux/rndis.h>
+
 /*
  * CONTROL uses CDC "encapsulated commands" with funky notifications.
  *  - control-out:  SEND_ENCAPSULATED
@@ -49,47 +51,6 @@
  */
 #define	RNDIS_CONTROL_TIMEOUT_MS	(5 * 1000)
 
-#define RNDIS_MSG_COMPLETION	cpu_to_le32(0x80000000)
-
-/* codes for "msg_type" field of rndis messages;
- * only the data channel uses packet messages (maybe batched);
- * everything else goes on the control channel.
- */
-#define RNDIS_MSG_PACKET	cpu_to_le32(0x00000001)	/* 1-N packets */
-#define RNDIS_MSG_INIT		cpu_to_le32(0x00000002)
-#define RNDIS_MSG_INIT_C	(RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION)
-#define RNDIS_MSG_HALT		cpu_to_le32(0x00000003)
-#define RNDIS_MSG_QUERY		cpu_to_le32(0x00000004)
-#define RNDIS_MSG_QUERY_C	(RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION)
-#define RNDIS_MSG_SET		cpu_to_le32(0x00000005)
-#define RNDIS_MSG_SET_C		(RNDIS_MSG_SET|RNDIS_MSG_COMPLETION)
-#define RNDIS_MSG_RESET		cpu_to_le32(0x00000006)
-#define RNDIS_MSG_RESET_C	(RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION)
-#define RNDIS_MSG_INDICATE	cpu_to_le32(0x00000007)
-#define RNDIS_MSG_KEEPALIVE	cpu_to_le32(0x00000008)
-#define RNDIS_MSG_KEEPALIVE_C	(RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION)
-
-/* codes for "status" field of completion messages */
-#define	RNDIS_STATUS_SUCCESS			cpu_to_le32(0x00000000)
-#define	RNDIS_STATUS_FAILURE			cpu_to_le32(0xc0000001)
-#define	RNDIS_STATUS_INVALID_DATA		cpu_to_le32(0xc0010015)
-#define	RNDIS_STATUS_NOT_SUPPORTED		cpu_to_le32(0xc00000bb)
-#define	RNDIS_STATUS_MEDIA_CONNECT		cpu_to_le32(0x4001000b)
-#define	RNDIS_STATUS_MEDIA_DISCONNECT		cpu_to_le32(0x4001000c)
-#define	RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION	cpu_to_le32(0x40010012)
-
-/* codes for OID_GEN_PHYSICAL_MEDIUM */
-#define	RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED	cpu_to_le32(0x00000000)
-#define	RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN	cpu_to_le32(0x00000001)
-#define	RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM	cpu_to_le32(0x00000002)
-#define	RNDIS_PHYSICAL_MEDIUM_PHONE_LINE	cpu_to_le32(0x00000003)
-#define	RNDIS_PHYSICAL_MEDIUM_POWER_LINE	cpu_to_le32(0x00000004)
-#define	RNDIS_PHYSICAL_MEDIUM_DSL		cpu_to_le32(0x00000005)
-#define	RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL	cpu_to_le32(0x00000006)
-#define	RNDIS_PHYSICAL_MEDIUM_1394		cpu_to_le32(0x00000007)
-#define	RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN	cpu_to_le32(0x00000008)
-#define	RNDIS_PHYSICAL_MEDIUM_MAX		cpu_to_le32(0x00000009)
-
 struct rndis_data_hdr {
 	__le32	msg_type;		/* RNDIS_MSG_PACKET */
 	__le32	msg_len;		/* rndis_data_hdr + data_len + pad */
@@ -222,29 +183,6 @@
 	__le32	status;
 } __attribute__ ((packed));
 
-/* NOTE:  about 30 OIDs are "mandatory" for peripherals to support ... and
- * there are gobs more that may optionally be supported.  We'll avoid as much
- * of that mess as possible.
- */
-#define OID_802_3_PERMANENT_ADDRESS	cpu_to_le32(0x01010101)
-#define OID_GEN_MAXIMUM_FRAME_SIZE	cpu_to_le32(0x00010106)
-#define OID_GEN_CURRENT_PACKET_FILTER	cpu_to_le32(0x0001010e)
-#define OID_GEN_PHYSICAL_MEDIUM		cpu_to_le32(0x00010202)
-
-/* packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */
-#define RNDIS_PACKET_TYPE_DIRECTED		cpu_to_le32(0x00000001)
-#define RNDIS_PACKET_TYPE_MULTICAST		cpu_to_le32(0x00000002)
-#define RNDIS_PACKET_TYPE_ALL_MULTICAST		cpu_to_le32(0x00000004)
-#define RNDIS_PACKET_TYPE_BROADCAST		cpu_to_le32(0x00000008)
-#define RNDIS_PACKET_TYPE_SOURCE_ROUTING	cpu_to_le32(0x00000010)
-#define RNDIS_PACKET_TYPE_PROMISCUOUS		cpu_to_le32(0x00000020)
-#define RNDIS_PACKET_TYPE_SMT			cpu_to_le32(0x00000040)
-#define RNDIS_PACKET_TYPE_ALL_LOCAL		cpu_to_le32(0x00000080)
-#define RNDIS_PACKET_TYPE_GROUP			cpu_to_le32(0x00001000)
-#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL	cpu_to_le32(0x00002000)
-#define RNDIS_PACKET_TYPE_FUNCTIONAL		cpu_to_le32(0x00004000)
-#define RNDIS_PACKET_TYPE_MAC_FRAME		cpu_to_le32(0x00008000)
-
 /* default filter used with RNDIS devices */
 #define RNDIS_DEFAULT_FILTER ( \
 	RNDIS_PACKET_TYPE_DIRECTED | \
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 605b0aa..76f4396 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -191,7 +191,8 @@
 enum skb_state {
 	illegal = 0,
 	tx_start, tx_done,
-	rx_start, rx_done, rx_cleanup
+	rx_start, rx_done, rx_cleanup,
+	unlink_start
 };
 
 struct skb_data {	/* skb->cb is one of these */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 970d5a2..2470f54 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -49,8 +49,11 @@
 #define VIRTIO_NET_F_CTRL_RX	18	/* Control channel RX mode support */
 #define VIRTIO_NET_F_CTRL_VLAN	19	/* Control channel VLAN filtering */
 #define VIRTIO_NET_F_CTRL_RX_EXTRA 20	/* Extra RX mode control support */
+#define VIRTIO_NET_F_GUEST_ANNOUNCE 21	/* Guest can announce device on the
+					 * network */
 
 #define VIRTIO_NET_S_LINK_UP	1	/* Link is up */
+#define VIRTIO_NET_S_ANNOUNCE	2	/* Announcement is needed */
 
 struct virtio_net_config {
 	/* The config defining mac address (if VIRTIO_NET_F_MAC) */
@@ -152,4 +155,15 @@
  #define VIRTIO_NET_CTRL_VLAN_ADD             0
  #define VIRTIO_NET_CTRL_VLAN_DEL             1
 
+/*
+ * Control link announce acknowledgement
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+ * driver has recevied the notification; device would clear the
+ * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+ * this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE       3
+ #define VIRTIO_NET_CTRL_ANNOUNCE_ACK         0
+
 #endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 03b90cdc..06f8e38 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -26,13 +26,14 @@
 		PGFREE, PGACTIVATE, PGDEACTIVATE,
 		PGFAULT, PGMAJFAULT,
 		FOR_ALL_ZONES(PGREFILL),
-		FOR_ALL_ZONES(PGSTEAL),
+		FOR_ALL_ZONES(PGSTEAL_KSWAPD),
+		FOR_ALL_ZONES(PGSTEAL_DIRECT),
 		FOR_ALL_ZONES(PGSCAN_KSWAPD),
 		FOR_ALL_ZONES(PGSCAN_DIRECT),
 #ifdef CONFIG_NUMA
 		PGSCAN_ZONE_RECLAIM_FAILED,
 #endif
-		PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+		PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
 		KSWAPD_SKIP_CONGESTION_WAIT,
 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index b5c2b6c..cad374b 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -59,7 +59,8 @@
 struct soc_camera_host {
 	struct v4l2_device v4l2_dev;
 	struct list_head list;
-	unsigned char nr;				/* Host number */
+	struct mutex host_lock;		/* Protect during probing */
+	unsigned char nr;		/* Host number */
 	void *priv;
 	const char *drv_name;
 	struct soc_camera_host_ops *ops;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 757a176..f2b801c 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -92,7 +92,7 @@
 					const struct in6_addr *addr);
 
 static inline unsigned long addrconf_timeout_fixup(u32 timeout,
-						    unsigned unit)
+						   unsigned int unit)
 {
 	if (timeout == 0xffffffff)
 		return ~0UL;
@@ -131,9 +131,9 @@
 extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
 			     const struct in6_addr *addr);
 extern void ipv6_sock_mc_close(struct sock *sk);
-extern int inet6_mc_check(struct sock *sk,
-			  const struct in6_addr *mc_addr,
-			  const struct in6_addr *src_addr);
+extern bool inet6_mc_check(struct sock *sk,
+			   const struct in6_addr *mc_addr,
+			   const struct in6_addr *src_addr);
 
 extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
 extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
@@ -146,10 +146,10 @@
 extern void ipv6_mc_destroy_dev(struct inet6_dev *idev);
 extern void addrconf_dad_failure(struct inet6_ifaddr *ifp);
 
-extern int ipv6_chk_mcast_addr(struct net_device *dev,
-			       const struct in6_addr *group,
-			       const struct in6_addr *src_addr);
-extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr);
+extern bool ipv6_chk_mcast_addr(struct net_device *dev,
+				const struct in6_addr *group,
+				const struct in6_addr *src_addr);
+extern bool ipv6_is_mld(struct sk_buff *skb, int nexthdr);
 
 extern void addrconf_prefix_rcv(struct net_device *dev,
 				u8 *opt, int len, bool sllao);
@@ -163,8 +163,8 @@
 
 extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
 extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
-extern int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
-			       const struct in6_addr *addr);
+extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+				const struct in6_addr *addr);
 
 
 /* Device notifier */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index ca68e2c..2ee33da 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -22,7 +22,7 @@
 struct unix_address {
 	atomic_t	refcnt;
 	int		len;
-	unsigned	hash;
+	unsigned int	hash;
 	struct sockaddr_un name[0];
 };
 
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 94e09d3..5d23521 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -215,7 +215,7 @@
 	struct ax25_dev		*next;
 	struct net_device	*dev;
 	struct net_device	*forward;
-	struct ctl_table	*systable;
+	struct ctl_table_header *sysheader;
 	int			values[AX25_MAX_VALUES];
 #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
 	ax25_dama_info		dama;
@@ -441,11 +441,11 @@
 
 /* sysctl_net_ax25.c */
 #ifdef CONFIG_SYSCTL
-extern void ax25_register_sysctl(void);
-extern void ax25_unregister_sysctl(void);
+extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
+extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
 #else
-static inline void ax25_register_sysctl(void) {};
-static inline void ax25_unregister_sysctl(void) {};
+static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; }
+static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {}
 #endif /* CONFIG_SYSCTL */
 
 #endif
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 262ebd1..a65910b 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -191,6 +191,7 @@
 	struct list_head accept_q;
 	struct sock *parent;
 	u32 defer_setup;
+	bool suspended;
 };
 
 struct bt_sock_list {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6822d25..db1c5df 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -314,6 +314,7 @@
 
 	__u8		remote_cap;
 	__u8		remote_auth;
+	bool		flush_key;
 
 	unsigned int	sent;
 
@@ -980,7 +981,7 @@
 int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
-		      u8 persistent);
+		      bool persistent);
 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 			  u8 addr_type, u32 flags, u8 *name, u8 name_len,
 			  u8 *dev_class);
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index 6db8ecf..439dadc 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -123,12 +123,21 @@
 	bool piggy_desc;
 };
 
+/* Priority mapping */
+enum {
+	CFHSI_PRIO_CTL = 0,
+	CFHSI_PRIO_VI,
+	CFHSI_PRIO_VO,
+	CFHSI_PRIO_BEBK,
+	CFHSI_PRIO_LAST,
+};
+
 /* Structure implemented by CAIF HSI drivers. */
 struct cfhsi {
 	struct caif_dev_common cfdev;
 	struct net_device *ndev;
 	struct platform_device *pdev;
-	struct sk_buff_head qhead;
+	struct sk_buff_head qhead[CFHSI_PRIO_LAST];
 	struct cfhsi_drv drv;
 	struct cfhsi_dev *dev;
 	int tx_state;
@@ -151,8 +160,14 @@
 	wait_queue_head_t wake_up_wait;
 	wait_queue_head_t wake_down_wait;
 	wait_queue_head_t flush_fifo_wait;
-	struct timer_list timer;
+	struct timer_list inactivity_timer;
 	struct timer_list rx_slowpath_timer;
+
+	/* TX aggregation */
+	unsigned long aggregation_timeout;
+	int aggregation_len;
+	struct timer_list aggregation_timer;
+
 	unsigned long bits;
 };
 
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 6bd200a..83a89ba 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -188,11 +188,18 @@
  */
 void *cfpkt_tonative(struct cfpkt *pkt);
 
-
 /*
  * Returns packet information for a packet.
  * pkt Packet to get info from;
  * @return Packet information
  */
 struct caif_payload_info *cfpkt_info(struct cfpkt *pkt);
+
+/** cfpkt_set_prio - set priority for a CAIF packet.
+ *
+ * @pkt: The CAIF packet to be adjusted.
+ * @prio: one of TC_PRIO_ constants.
+ */
+void cfpkt_set_prio(struct cfpkt *pkt, int prio);
+
 #endif				/* CFPKT_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 83d800c..adb2320 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -521,6 +521,7 @@
  * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
  * @STATION_INFO_STA_FLAGS: @sta_flags filled
  * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled
+ * @STATION_INFO_T_OFFSET: @t_offset filled
  */
 enum station_info_flags {
 	STATION_INFO_INACTIVE_TIME	= 1<<0,
@@ -542,7 +543,8 @@
 	STATION_INFO_CONNECTED_TIME	= 1<<16,
 	STATION_INFO_ASSOC_REQ_IES	= 1<<17,
 	STATION_INFO_STA_FLAGS		= 1<<18,
-	STATION_INFO_BEACON_LOSS_COUNT	= 1<<19
+	STATION_INFO_BEACON_LOSS_COUNT	= 1<<19,
+	STATION_INFO_T_OFFSET		= 1<<20,
 };
 
 /**
@@ -643,6 +645,7 @@
  * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
  * @sta_flags: station flags mask & values
  * @beacon_loss_count: Number of times beacon loss event has triggered.
+ * @t_offset: Time offset of the station relative to this host.
  */
 struct station_info {
 	u32 filled;
@@ -671,6 +674,7 @@
 	size_t assoc_req_ies_len;
 
 	u32 beacon_loss_count;
+	s64 t_offset;
 
 	/*
 	 * Note: Add a new enum station_info_flags value for each new field and
@@ -798,6 +802,8 @@
 	/* ttl used in path selection information elements */
 	u8  element_ttl;
 	bool auto_open_plinks;
+	/* neighbor offset synchronization */
+	u32 dot11MeshNbrOffsetMaxNeighbor;
 	/* HWMP parameters */
 	u8  dot11MeshHWMPmaxPREQretries;
 	u32 path_refresh_time;
@@ -815,12 +821,14 @@
 	bool  dot11MeshGateAnnouncementProtocol;
 	bool dot11MeshForwarding;
 	s32 rssi_threshold;
+	u16 ht_opmode;
 };
 
 /**
  * struct mesh_setup - 802.11s mesh setup configuration
  * @mesh_id: the mesh ID
  * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes
+ * @sync_method: which synchronization method to use
  * @path_sel_proto: which path selection protocol to use
  * @path_metric: which metric to use
  * @ie: vendor information elements (optional)
@@ -834,8 +842,9 @@
 struct mesh_setup {
 	const u8 *mesh_id;
 	u8 mesh_id_len;
-	u8  path_sel_proto;
-	u8  path_metric;
+	u8 sync_method;
+	u8 path_sel_proto;
+	u8 path_metric;
 	const u8 *ie;
 	u8 ie_len;
 	bool is_authenticated;
@@ -845,7 +854,7 @@
 
 /**
  * struct ieee80211_txq_params - TX queue parameters
- * @queue: TX queue identifier (NL80211_TXQ_Q_*)
+ * @ac: AC identifier
  * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled
  * @cwmin: Minimum contention window [a value of the form 2^n-1 in the range
  *	1..32767]
@@ -854,7 +863,7 @@
  * @aifs: Arbitration interframe space [0..255]
  */
 struct ieee80211_txq_params {
-	enum nl80211_txq_q queue;
+	enum nl80211_ac ac;
 	u16 txop;
 	u16 cwmin;
 	u16 cwmax;
@@ -1336,6 +1345,9 @@
  *	be %NULL or contain the enabled Wake-on-Wireless triggers that are
  *	configured for the device.
  * @resume: wiphy device needs to be resumed
+ * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback
+ *	to call device_set_wakeup_enable() to enable/disable wakeup from
+ *	the device.
  *
  * @add_virtual_intf: create a new virtual interface with the given name,
  *	must set the struct wireless_dev's iftype. Beware: You must create
@@ -1503,10 +1515,21 @@
  *	later passes to cfg80211_probe_status().
  *
  * @set_noack_map: Set the NoAck Map for the TIDs.
+ *
+ * @get_et_sset_count:  Ethtool API to get string-set count.
+ *	See @ethtool_ops.get_sset_count
+ *
+ * @get_et_stats:  Ethtool API to get a set of u64 stats.
+ *	See @ethtool_ops.get_ethtool_stats
+ *
+ * @get_et_strings:  Ethtool API to get a set of strings to describe stats
+ *	and perhaps other supported types of ethtool data-sets.
+ *	See @ethtool_ops.get_strings
  */
 struct cfg80211_ops {
 	int	(*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
 	int	(*resume)(struct wiphy *wiphy);
+	void	(*set_wakeup)(struct wiphy *wiphy, bool enabled);
 
 	struct net_device * (*add_virtual_intf)(struct wiphy *wiphy,
 						char *name,
@@ -1698,7 +1721,15 @@
 				  struct net_device *dev,
 				  u16 noack_map);
 
-	struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
+	struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy,
+					       enum nl80211_channel_type *type);
+
+	int	(*get_et_sset_count)(struct wiphy *wiphy,
+				     struct net_device *dev, int sset);
+	void	(*get_et_stats)(struct wiphy *wiphy, struct net_device *dev,
+				struct ethtool_stats *stats, u64 *data);
+	void	(*get_et_strings)(struct wiphy *wiphy, struct net_device *dev,
+				  u32 sset, u8 *data);
 };
 
 /*
@@ -1732,10 +1763,6 @@
  *	hints read the documenation for regulatory_hint_found_beacon()
  * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
  *	wiphy at all
- * @WIPHY_FLAG_ENFORCE_COMBINATIONS: Set this flag to enforce interface
- *	combinations for this device. This flag is used for backward
- *	compatibility only until all drivers advertise combinations and
- *	they will always be enforced.
  * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
  *	by default -- this flag will be set depending on the kernel's default
  *	on wiphy_new(), but can be changed by the driver if it has a good
@@ -1780,7 +1807,7 @@
 	WIPHY_FLAG_IBSS_RSN			= BIT(8),
 	WIPHY_FLAG_MESH_AUTH			= BIT(10),
 	WIPHY_FLAG_SUPPORTS_SCHED_SCAN		= BIT(11),
-	WIPHY_FLAG_ENFORCE_COMBINATIONS		= BIT(12),
+	/* use hole at 12 */
 	WIPHY_FLAG_SUPPORTS_FW_ROAM		= BIT(13),
 	WIPHY_FLAG_AP_UAPSD			= BIT(14),
 	WIPHY_FLAG_SUPPORTS_TDLS		= BIT(15),
@@ -3343,6 +3370,17 @@
 				 enum nl80211_channel_type channel_type);
 
 /*
+ * cfg80211_ch_switch_notify - update wdev channel and notify userspace
+ * @dev: the device which switched channels
+ * @freq: new channel frequency (in MHz)
+ * @type: channel type
+ *
+ * Acquires wdev_lock, so must only be called from sleepable driver context!
+ */
+void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
+			       enum nl80211_channel_type type);
+
+/*
  * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units)
  * @rate: given rate_info to calculate bitrate from
  *
diff --git a/include/net/codel.h b/include/net/codel.h
new file mode 100644
index 0000000..550debf
--- /dev/null
+++ b/include/net/codel.h
@@ -0,0 +1,342 @@
+#ifndef __NET_SCHED_CODEL_H
+#define __NET_SCHED_CODEL_H
+
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <linux/reciprocal_div.h>
+
+/* Controlling Queue Delay (CoDel) algorithm
+ * =========================================
+ * Source : Kathleen Nichols and Van Jacobson
+ * http://queue.acm.org/detail.cfm?id=2209336
+ *
+ * Implemented on linux by Dave Taht and Eric Dumazet
+ */
+
+
+/* CoDel uses a 1024 nsec clock, encoded in u32
+ * This gives a range of 2199 seconds, because of signed compares
+ */
+typedef u32 codel_time_t;
+typedef s32 codel_tdiff_t;
+#define CODEL_SHIFT 10
+#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
+
+static inline codel_time_t codel_get_time(void)
+{
+	u64 ns = ktime_to_ns(ktime_get());
+
+	return ns >> CODEL_SHIFT;
+}
+
+#define codel_time_after(a, b)		((s32)(a) - (s32)(b) > 0)
+#define codel_time_after_eq(a, b)	((s32)(a) - (s32)(b) >= 0)
+#define codel_time_before(a, b)		((s32)(a) - (s32)(b) < 0)
+#define codel_time_before_eq(a, b)	((s32)(a) - (s32)(b) <= 0)
+
+/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
+struct codel_skb_cb {
+	codel_time_t enqueue_time;
+};
+
+static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
+{
+	qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
+	return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
+{
+	return get_codel_cb(skb)->enqueue_time;
+}
+
+static void codel_set_enqueue_time(struct sk_buff *skb)
+{
+	get_codel_cb(skb)->enqueue_time = codel_get_time();
+}
+
+static inline u32 codel_time_to_us(codel_time_t val)
+{
+	u64 valns = ((u64)val << CODEL_SHIFT);
+
+	do_div(valns, NSEC_PER_USEC);
+	return (u32)valns;
+}
+
+/**
+ * struct codel_params - contains codel parameters
+ * @target:	target queue size (in time units)
+ * @interval:	width of moving time window
+ * @ecn:	is Explicit Congestion Notification enabled
+ */
+struct codel_params {
+	codel_time_t	target;
+	codel_time_t	interval;
+	bool		ecn;
+};
+
+/**
+ * struct codel_vars - contains codel variables
+ * @count:		how many drops we've done since the last time we
+ *			entered dropping state
+ * @lastcount:		count at entry to dropping state
+ * @dropping:		set to true if in dropping state
+ * @rec_inv_sqrt:	reciprocal value of sqrt(count) >> 1
+ * @first_above_time:	when we went (or will go) continuously above target
+ *			for interval
+ * @drop_next:		time to drop next packet, or when we dropped last
+ * @ldelay:		sojourn time of last dequeued packet
+ */
+struct codel_vars {
+	u32		count;
+	u32		lastcount;
+	bool		dropping;
+	u16		rec_inv_sqrt;
+	codel_time_t	first_above_time;
+	codel_time_t	drop_next;
+	codel_time_t	ldelay;
+};
+
+#define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */
+/* needed shift to get a Q0.32 number from rec_inv_sqrt */
+#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS)
+
+/**
+ * struct codel_stats - contains codel shared variables and stats
+ * @maxpacket:	largest packet we've seen so far
+ * @drop_count:	temp count of dropped packets in dequeue()
+ * ecn_mark:	number of packets we ECN marked instead of dropping
+ */
+struct codel_stats {
+	u32		maxpacket;
+	u32		drop_count;
+	u32		ecn_mark;
+};
+
+static void codel_params_init(struct codel_params *params)
+{
+	params->interval = MS2TIME(100);
+	params->target = MS2TIME(5);
+	params->ecn = false;
+}
+
+static void codel_vars_init(struct codel_vars *vars)
+{
+	memset(vars, 0, sizeof(*vars));
+}
+
+static void codel_stats_init(struct codel_stats *stats)
+{
+	stats->maxpacket = 256;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
+ * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
+ *
+ * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
+ */
+static void codel_Newton_step(struct codel_vars *vars)
+{
+	u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
+	u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
+	u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
+
+	val >>= 2; /* avoid overflow in following multiply */
+	val = (val * invsqrt) >> (32 - 2 + 1);
+
+	vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
+}
+
+/*
+ * CoDel control_law is t + interval/sqrt(count)
+ * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
+ * both sqrt() and divide operation.
+ */
+static codel_time_t codel_control_law(codel_time_t t,
+				      codel_time_t interval,
+				      u32 rec_inv_sqrt)
+{
+	return t + reciprocal_divide(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
+}
+
+
+static bool codel_should_drop(const struct sk_buff *skb,
+			      struct Qdisc *sch,
+			      struct codel_vars *vars,
+			      struct codel_params *params,
+			      struct codel_stats *stats,
+			      codel_time_t now)
+{
+	bool ok_to_drop;
+
+	if (!skb) {
+		vars->first_above_time = 0;
+		return false;
+	}
+
+	vars->ldelay = now - codel_get_enqueue_time(skb);
+	sch->qstats.backlog -= qdisc_pkt_len(skb);
+
+	if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
+		stats->maxpacket = qdisc_pkt_len(skb);
+
+	if (codel_time_before(vars->ldelay, params->target) ||
+	    sch->qstats.backlog <= stats->maxpacket) {
+		/* went below - stay below for at least interval */
+		vars->first_above_time = 0;
+		return false;
+	}
+	ok_to_drop = false;
+	if (vars->first_above_time == 0) {
+		/* just went above from below. If we stay above
+		 * for at least interval we'll say it's ok to drop
+		 */
+		vars->first_above_time = now + params->interval;
+	} else if (codel_time_after(now, vars->first_above_time)) {
+		ok_to_drop = true;
+	}
+	return ok_to_drop;
+}
+
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
+						struct Qdisc *sch);
+
+static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+				     struct codel_params *params,
+				     struct codel_vars *vars,
+				     struct codel_stats *stats,
+				     codel_skb_dequeue_t dequeue_func)
+{
+	struct sk_buff *skb = dequeue_func(vars, sch);
+	codel_time_t now;
+	bool drop;
+
+	if (!skb) {
+		vars->dropping = false;
+		return skb;
+	}
+	now = codel_get_time();
+	drop = codel_should_drop(skb, sch, vars, params, stats, now);
+	if (vars->dropping) {
+		if (!drop) {
+			/* sojourn time below target - leave dropping state */
+			vars->dropping = false;
+		} else if (codel_time_after_eq(now, vars->drop_next)) {
+			/* It's time for the next drop. Drop the current
+			 * packet and dequeue the next. The dequeue might
+			 * take us out of dropping state.
+			 * If not, schedule the next drop.
+			 * A large backlog might result in drop rates so high
+			 * that the next drop should happen now,
+			 * hence the while loop.
+			 */
+			while (vars->dropping &&
+			       codel_time_after_eq(now, vars->drop_next)) {
+				vars->count++; /* dont care of possible wrap
+						* since there is no more divide
+						*/
+				codel_Newton_step(vars);
+				if (params->ecn && INET_ECN_set_ce(skb)) {
+					stats->ecn_mark++;
+					vars->drop_next =
+						codel_control_law(vars->drop_next,
+								  params->interval,
+								  vars->rec_inv_sqrt);
+					goto end;
+				}
+				qdisc_drop(skb, sch);
+				stats->drop_count++;
+				skb = dequeue_func(vars, sch);
+				if (!codel_should_drop(skb, sch,
+						       vars, params, stats, now)) {
+					/* leave dropping state */
+					vars->dropping = false;
+				} else {
+					/* and schedule the next drop */
+					vars->drop_next =
+						codel_control_law(vars->drop_next,
+								  params->interval,
+								  vars->rec_inv_sqrt);
+				}
+			}
+		}
+	} else if (drop) {
+		if (params->ecn && INET_ECN_set_ce(skb)) {
+			stats->ecn_mark++;
+		} else {
+			qdisc_drop(skb, sch);
+			stats->drop_count++;
+
+			skb = dequeue_func(vars, sch);
+			drop = codel_should_drop(skb, sch, vars, params,
+						 stats, now);
+		}
+		vars->dropping = true;
+		/* if min went above target close to when we last went below it
+		 * assume that the drop rate that controlled the queue on the
+		 * last cycle is a good starting point to control it now.
+		 */
+		if (codel_time_before(now - vars->drop_next,
+				      16 * params->interval)) {
+			vars->count = (vars->count - vars->lastcount) | 1;
+			/* we dont care if rec_inv_sqrt approximation
+			 * is not very precise :
+			 * Next Newton steps will correct it quadratically.
+			 */
+			codel_Newton_step(vars);
+		} else {
+			vars->count = 1;
+			vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
+		}
+		vars->lastcount = vars->count;
+		vars->drop_next = codel_control_law(now, params->interval,
+						    vars->rec_inv_sqrt);
+	}
+end:
+	return skb;
+}
+#endif
diff --git a/include/net/compat.h b/include/net/compat.h
index a974ae9..6e95653 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -42,12 +42,12 @@
 
 extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
 extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int);
-extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned);
+extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int);
 extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
-					   unsigned, unsigned);
-extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned);
+					   unsigned int, unsigned int);
+extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int);
 extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
-					   unsigned, unsigned,
+					   unsigned int, unsigned int,
 					   struct compat_timespec __user *);
 extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
 extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index f55c980..fc5d5dc 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -48,6 +48,8 @@
 	/* IEEE 802.1Qaz std */
 	int (*ieee_getets) (struct net_device *, struct ieee_ets *);
 	int (*ieee_setets) (struct net_device *, struct ieee_ets *);
+	int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
+	int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
 	int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
 	int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
 	int (*ieee_getapp) (struct net_device *, struct dcb_app *);
diff --git a/include/net/dn.h b/include/net/dn.h
index 814af0b..c88bf4e 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -199,7 +199,7 @@
 	fld->fld_dport = scp->addrrem;
 }
 
-extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu);
+extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
 
 #define DN_MENUVER_ACC 0x01
 #define DN_MENUVER_USR 0x02
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 782ef7c..1ee9d4b 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -31,7 +31,7 @@
 
 struct dn_fib_nh {
 	struct net_device	*nh_dev;
-	unsigned		nh_flags;
+	unsigned int		nh_flags;
 	unsigned char		nh_scope;
 	int			nh_weight;
 	int			nh_power;
@@ -45,7 +45,7 @@
 	int 			fib_treeref;
 	atomic_t		fib_clntref;
 	int			fib_dead;
-	unsigned		fib_flags;
+	unsigned int		fib_flags;
 	int			fib_protocol;
 	__le16			fib_prefsrc;
 	__u32			fib_priority;
@@ -140,7 +140,7 @@
  */
 extern void dn_fib_rules_init(void);
 extern void dn_fib_rules_cleanup(void);
-extern unsigned dnet_addr_type(__le16 addr);
+extern unsigned int dnet_addr_type(__le16 addr);
 extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
 
 extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 81712cf..c507e05 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -76,8 +76,8 @@
 	__le16 rt_src_map;
 	__le16 rt_dst_map;
 
-	unsigned rt_flags;
-	unsigned rt_type;
+	unsigned int rt_flags;
+	unsigned int rt_type;
 };
 
 static inline bool dn_is_input_route(struct dn_route *rt)
diff --git a/include/net/dst.h b/include/net/dst.h
index ff4da42..bed833d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -59,6 +59,7 @@
 #define DST_NOCACHE		0x0010
 #define DST_NOCOUNT		0x0020
 #define DST_NOPEER		0x0040
+#define DST_FAKE_RTABLE		0x0080
 
 	short			error;
 	short			obsolete;
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index e1c2ee0..3682a0a 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -12,7 +12,7 @@
 struct dst_ops {
 	unsigned short		family;
 	__be16			protocol;
-	unsigned		gc_thresh;
+	unsigned int		gc_thresh;
 
 	int			(*gc)(struct dst_ops *ops);
 	struct dst_entry *	(*check)(struct dst_entry *, __u32 cookie);
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 75d6156..9ac2524 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -25,7 +25,7 @@
 
 struct icmp_err {
   int		errno;
-  unsigned	fatal:1;
+  unsigned int	fatal:1;
 };
 
 extern const struct icmp_err icmp_err_convert[];
@@ -41,7 +41,6 @@
 
 extern void	icmp_send(struct sk_buff *skb_in,  int type, int code, __be32 info);
 extern int	icmp_rcv(struct sk_buff *skb);
-extern int	icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 extern int	icmp_init(void);
 extern void	icmp_out_count(struct net *net, unsigned char type);
 
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 5743055..d104c88 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -1,7 +1,7 @@
 /*
  * An interface between IEEE802.15.4 device and rest of the kernel.
  *
- * Copyright (C) 2007, 2008, 2009 Siemens AG
+ * Copyright (C) 2007-2012 Siemens AG
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2
@@ -21,11 +21,14 @@
  * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
  * Maxim Osipov <maxim.osipov@siemens.com>
  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  */
 
 #ifndef IEEE802154_NETDEVICE_H
 #define IEEE802154_NETDEVICE_H
 
+#include <net/af_ieee802154.h>
+
 /*
  * A control block of skb passed between the ARPHRD_IEEE802154 device
  * and other stack parts.
@@ -110,12 +113,26 @@
 	u8 (*get_bsn)(const struct net_device *dev);
 };
 
-static inline struct ieee802154_mlme_ops *ieee802154_mlme_ops(
-		const struct net_device *dev)
+/* The IEEE 802.15.4 standard defines 2 type of the devices:
+ * - FFD - full functionality device
+ * - RFD - reduce functionality device
+ *
+ * So 2 sets of mlme operations are needed
+ */
+struct ieee802154_reduced_mlme_ops {
+	struct wpan_phy *(*get_phy)(const struct net_device *dev);
+};
+
+static inline struct ieee802154_mlme_ops *
+ieee802154_mlme_ops(const struct net_device *dev)
+{
+	return dev->ml_priv;
+}
+
+static inline struct ieee802154_reduced_mlme_ops *
+ieee802154_reduced_mlme_ops(const struct net_device *dev)
 {
 	return dev->ml_priv;
 }
 
 #endif
-
-
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 51a7031..9356322 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -120,7 +120,7 @@
 	unsigned char		mca_crcount;
 	unsigned long		mca_sfcount[2];
 	struct timer_list	mca_timer;
-	unsigned		mca_flags;
+	unsigned int		mca_flags;
 	int			mca_users;
 	atomic_t		mca_refcnt;
 	spinlock_t		mca_lock;
@@ -209,60 +209,6 @@
 	memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32));
 }
 
-static inline void ipv6_tr_mc_map(const struct in6_addr *addr, char *buf)
-{
-	/* All nodes FF01::1, FF02::1, FF02::1:FFxx:xxxx */
-
-	if (((addr->s6_addr[0] == 0xFF) &&
-	    ((addr->s6_addr[1] == 0x01) || (addr->s6_addr[1] == 0x02)) &&
-	     (addr->s6_addr16[1] == 0) &&
-	     (addr->s6_addr32[1] == 0) &&
-	     (addr->s6_addr32[2] == 0) &&
-	     (addr->s6_addr16[6] == 0) &&
-	     (addr->s6_addr[15] == 1)) ||
-	    ((addr->s6_addr[0] == 0xFF) &&
-	     (addr->s6_addr[1] == 0x02) &&
-	     (addr->s6_addr16[1] == 0) &&
-	     (addr->s6_addr32[1] == 0) &&
-	     (addr->s6_addr16[4] == 0) &&
-	     (addr->s6_addr[10] == 0) &&
-	     (addr->s6_addr[11] == 1) &&
-	     (addr->s6_addr[12] == 0xff)))
-	{
-		buf[0]=0xC0;
-		buf[1]=0x00;
-		buf[2]=0x01;
-		buf[3]=0x00;
-		buf[4]=0x00;
-		buf[5]=0x00;
-	/* All routers FF0x::2 */
-	} else if ((addr->s6_addr[0] ==0xff) &&
-		((addr->s6_addr[1] & 0xF0) == 0) &&
-		(addr->s6_addr16[1] == 0) &&
-		(addr->s6_addr32[1] == 0) &&
-		(addr->s6_addr32[2] == 0) &&
-		(addr->s6_addr16[6] == 0) &&
-		(addr->s6_addr[15] == 2))
-	{
-		buf[0]=0xC0;
-		buf[1]=0x00;
-		buf[2]=0x02;
-		buf[3]=0x00;
-		buf[4]=0x00;
-		buf[5]=0x00;
-	} else {
-		unsigned char i ; 
-		
-		i = addr->s6_addr[15] & 7 ; 
-		buf[0]=0xC0;
-		buf[1]=0x00;
-		buf[2]=0x00;
-		buf[3]=0x01 << i ; 
-		buf[4]=0x00;
-		buf[5]=0x00;
-	}
-}
-
 static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf)
 {
 	buf[0] = 0x00;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 3207e58..1866a67 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -23,7 +23,7 @@
 struct sockaddr;
 
 extern int inet6_csk_bind_conflict(const struct sock *sk,
-				   const struct inet_bind_bucket *tb);
+				   const struct inet_bind_bucket *tb, bool relax);
 
 extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
 					     const struct request_sock *req);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index dbf9aab..7d83f90 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -45,6 +45,7 @@
 				      struct dst_entry *dst);
 	struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it);
 	u16	    net_header_len;
+	u16	    net_frag_header_len;
 	u16	    sockaddr_len;
 	int	    (*setsockopt)(struct sock *sk, int level, int optname, 
 				  char __user *optval, unsigned int optlen);
@@ -60,7 +61,7 @@
 #endif
 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
 	int	    (*bind_conflict)(const struct sock *sk,
-				     const struct inet_bind_bucket *tb);
+				     const struct inet_bind_bucket *tb, bool relax);
 };
 
 /** inet_connection_sock - INET connection oriented sock
@@ -245,7 +246,7 @@
 						const __be32 raddr,
 						const __be32 laddr);
 extern int inet_csk_bind_conflict(const struct sock *sk,
-				  const struct inet_bind_bucket *tb);
+				  const struct inet_bind_bucket *tb, bool relax);
 extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
 extern struct dst_entry* inet_csk_route_req(struct sock *sk,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 16ff29a..2431cf8 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -46,8 +46,7 @@
 						void *arg);
 	void			(*destructor)(struct inet_frag_queue *);
 	void			(*skb_free)(struct sk_buff *);
-	int			(*match)(struct inet_frag_queue *q,
-						void *arg);
+	bool			(*match)(struct inet_frag_queue *q, void *arg);
 	void			(*frag_expire)(unsigned long data);
 };
 
diff --git a/include/net/ip.h b/include/net/ip.h
index b53d65f..83e0619 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -141,23 +141,6 @@
 extern int		ip4_datagram_connect(struct sock *sk, 
 					     struct sockaddr *uaddr, int addr_len);
 
-/*
- *	Map a multicast IP onto multicast MAC for type Token Ring.
- *      This conforms to RFC1469 Option 2 Multicasting i.e.
- *      using a functional address to transmit / receive 
- *      multicast packets.
- */
-
-static inline void ip_tr_mc_map(__be32 addr, char *buf)
-{
-	buf[0]=0xC0;
-	buf[1]=0x00;
-	buf[2]=0x00;
-	buf[3]=0x04;
-	buf[4]=0x00;
-	buf[5]=0x00;
-}
-
 struct ip_reply_arg {
 	struct kvec iov[1];   
 	int	    flags;
@@ -222,9 +205,6 @@
 
 extern int sysctl_ip_nonlocal_bind;
 
-extern struct ctl_path net_core_path[];
-extern struct ctl_path net_ipv4_ctl_path[];
-
 /* From inetpeer.c */
 extern int inet_peer_threshold;
 extern int inet_peer_minttl;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2ad92ca..37c1a1e 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -146,7 +146,7 @@
 
 extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
 extern void rt6_ifdown(struct net *net, struct net_device *dev);
-extern void rt6_mtu_change(struct net_device *dev, unsigned mtu);
+extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 
 
@@ -175,7 +175,7 @@
 	spin_unlock(&sk->sk_dst_lock);
 }
 
-static inline int ipv6_unicast_destination(struct sk_buff *skb)
+static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
 {
 	struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
 
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 10422ef..78df0866 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -49,7 +49,7 @@
 	struct net_device	*nh_dev;
 	struct hlist_node	nh_hash;
 	struct fib_info		*nh_parent;
-	unsigned		nh_flags;
+	unsigned int		nh_flags;
 	unsigned char		nh_scope;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	int			nh_weight;
@@ -74,7 +74,7 @@
 	struct net		*fib_net;
 	int			fib_treeref;
 	atomic_t		fib_clntref;
-	unsigned		fib_flags;
+	unsigned int		fib_flags;
 	unsigned char		fib_dead;
 	unsigned char		fib_protocol;
 	unsigned char		fib_scope;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 2bdee51..d6146b4 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -10,7 +10,6 @@
 
 #include <asm/types.h>                  /* for __uXX types */
 
-#include <linux/sysctl.h>               /* for ctl_path */
 #include <linux/list.h>                 /* for struct list_head */
 #include <linux/spinlock.h>             /* for struct rwlock_t */
 #include <linux/atomic.h>                 /* for struct atomic_t */
@@ -393,7 +392,7 @@
 
 	void (*exit)(struct ip_vs_protocol *pp);
 
-	void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+	int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
 
 	void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
 
@@ -505,6 +504,7 @@
 						 * state transition triggerd
 						 * synchronization
 						 */
+	unsigned long		sync_endtime;	/* jiffies + sent_retries */
 
 	/* Control members */
 	struct ip_vs_conn       *control;       /* Master control connection */
@@ -580,8 +580,8 @@
 	/* virtual service options */
 	char			*sched_name;
 	char			*pe_name;
-	unsigned		flags;		/* virtual service flags */
-	unsigned		timeout;	/* persistent timeout in sec */
+	unsigned int		flags;		/* virtual service flags */
+	unsigned int		timeout;	/* persistent timeout in sec */
 	u32			netmask;	/* persistent netmask */
 };
 
@@ -592,7 +592,7 @@
 	u16			port;
 
 	/* real server options */
-	unsigned		conn_flags;	/* connection flags */
+	unsigned int		conn_flags;	/* connection flags */
 	int			weight;		/* destination weight */
 
 	/* thresholds for active connections */
@@ -616,8 +616,8 @@
 	union nf_inet_addr	addr;	  /* IP address for virtual service */
 	__be16			port;	  /* port number for the service */
 	__u32                   fwmark;   /* firewall mark of the service */
-	unsigned		flags;	  /* service status flags */
-	unsigned		timeout;  /* persistent timeout in ticks */
+	unsigned int		flags;	  /* service status flags */
+	unsigned int		timeout;  /* persistent timeout in ticks */
 	__be32			netmask;  /* grouping granularity */
 	struct net		*net;
 
@@ -647,7 +647,7 @@
 	u16			af;		/* address family */
 	__be16			port;		/* port number of the server */
 	union nf_inet_addr	addr;		/* IP address of the server */
-	volatile unsigned	flags;		/* dest status flags */
+	volatile unsigned int	flags;		/* dest status flags */
 	atomic_t		conn_flags;	/* flags to copy to conn */
 	atomic_t		weight;		/* server weight */
 
@@ -784,6 +784,16 @@
 	void (*timeout_change)(struct ip_vs_app *app, int flags);
 };
 
+struct ipvs_master_sync_state {
+	struct list_head	sync_queue;
+	struct ip_vs_sync_buff	*sync_buff;
+	int			sync_queue_len;
+	unsigned int		sync_queue_delay;
+	struct task_struct	*master_thread;
+	struct delayed_work	master_wakeup_work;
+	struct netns_ipvs	*ipvs;
+};
+
 /* IPVS in network namespace */
 struct netns_ipvs {
 	int			gen;		/* Generation */
@@ -870,10 +880,15 @@
 #endif
 	int			sysctl_snat_reroute;
 	int			sysctl_sync_ver;
+	int			sysctl_sync_ports;
+	int			sysctl_sync_qlen_max;
+	int			sysctl_sync_sock_size;
 	int			sysctl_cache_bypass;
 	int			sysctl_expire_nodest_conn;
 	int			sysctl_expire_quiescent_template;
 	int			sysctl_sync_threshold[2];
+	unsigned int		sysctl_sync_refresh_period;
+	int			sysctl_sync_retries;
 	int			sysctl_nat_icmp_send;
 
 	/* ip_vs_lblc */
@@ -889,13 +904,11 @@
 	spinlock_t		est_lock;
 	struct timer_list	est_timer;	/* Estimation timer */
 	/* ip_vs_sync */
-	struct list_head	sync_queue;
 	spinlock_t		sync_lock;
-	struct ip_vs_sync_buff  *sync_buff;
+	struct ipvs_master_sync_state *ms;
 	spinlock_t		sync_buff_lock;
-	struct sockaddr_in	sync_mcast_addr;
-	struct task_struct	*master_thread;
-	struct task_struct	*backup_thread;
+	struct task_struct	**backup_threads;
+	int			threads_mask;
 	int			send_mesg_maxlen;
 	int			recv_mesg_maxlen;
 	volatile int		sync_state;
@@ -912,6 +925,14 @@
 #define DEFAULT_SYNC_THRESHOLD	3
 #define DEFAULT_SYNC_PERIOD	50
 #define DEFAULT_SYNC_VER	1
+#define DEFAULT_SYNC_REFRESH_PERIOD	(0U * HZ)
+#define DEFAULT_SYNC_RETRIES		0
+#define IPVS_SYNC_WAKEUP_RATE	8
+#define IPVS_SYNC_QLEN_MAX	(IPVS_SYNC_WAKEUP_RATE * 4)
+#define IPVS_SYNC_SEND_DELAY	(HZ / 50)
+#define IPVS_SYNC_CHECK_PERIOD	HZ
+#define IPVS_SYNC_FLUSH_TIME	(HZ * 2)
+#define IPVS_SYNC_PORTS_MAX	(1 << 6)
 
 #ifdef CONFIG_SYSCTL
 
@@ -922,7 +943,17 @@
 
 static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
 {
-	return ipvs->sysctl_sync_threshold[1];
+	return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
+}
+
+static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
+{
+	return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
+}
+
+static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
+{
+	return ipvs->sysctl_sync_retries;
 }
 
 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
@@ -930,6 +961,21 @@
 	return ipvs->sysctl_sync_ver;
 }
 
+static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
+{
+	return ACCESS_ONCE(ipvs->sysctl_sync_ports);
+}
+
+static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
+{
+	return ipvs->sysctl_sync_qlen_max;
+}
+
+static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
+{
+	return ipvs->sysctl_sync_sock_size;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -942,18 +988,43 @@
 	return DEFAULT_SYNC_PERIOD;
 }
 
+static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
+{
+	return DEFAULT_SYNC_REFRESH_PERIOD;
+}
+
+static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
+{
+	return DEFAULT_SYNC_RETRIES & 3;
+}
+
 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
 {
 	return DEFAULT_SYNC_VER;
 }
 
+static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
+{
+	return 1;
+}
+
+static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs)
+{
+	return IPVS_SYNC_QLEN_MAX;
+}
+
+static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs)
+{
+	return 0;
+}
+
 #endif
 
 /*
  *      IPVS core functions
  *      (from ip_vs_core.c)
  */
-extern const char *ip_vs_proto_name(unsigned proto);
+extern const char *ip_vs_proto_name(unsigned int proto);
 extern void ip_vs_init_hash_table(struct list_head *table, int rows);
 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
 
@@ -1014,7 +1085,7 @@
 
 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
 				  const union nf_inet_addr *daddr,
-				  __be16 dport, unsigned flags,
+				  __be16 dport, unsigned int flags,
 				  struct ip_vs_dest *dest, __u32 fwmark);
 extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
@@ -1184,10 +1255,8 @@
  *      IPVS control data and functions (from ip_vs_ctl.c)
  */
 extern struct ip_vs_stats ip_vs_stats;
-extern const struct ctl_path net_vs_ctl_path[];
 extern int sysctl_ip_vs_sync_ver;
 
-extern void ip_vs_sync_switch_mode(struct net *net, int mode);
 extern struct ip_vs_service *
 ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
 		  const union nf_inet_addr *vaddr, __be16 vport);
@@ -1203,6 +1272,8 @@
 
 extern int ip_vs_use_count_inc(void);
 extern void ip_vs_use_count_dec(void);
+extern int ip_vs_register_nl_ioctl(void);
+extern void ip_vs_unregister_nl_ioctl(void);
 extern int ip_vs_control_init(void);
 extern void ip_vs_control_cleanup(void);
 extern struct ip_vs_dest *
@@ -1219,7 +1290,7 @@
 extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
 			     __u8 syncid);
 extern int stop_sync_thread(struct net *net, int state);
-extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp);
+extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
 
 
 /*
diff --git a/include/net/ipip.h b/include/net/ipip.h
index a32654d..a93cf6d 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -54,8 +54,10 @@
 									\
 	err = ip_local_out(skb);					\
 	if (likely(net_xmit_eval(err) == 0)) {				\
+		u64_stats_update_begin(&(stats1)->syncp);		\
 		(stats1)->tx_bytes += pkt_len;				\
 		(stats1)->tx_packets++;					\
+		u64_stats_update_end(&(stats1)->syncp);			\
 	} else {							\
 		(stats2)->tx_errors++;					\
 		(stats2)->tx_aborted_errors++;				\
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e4170a2..aecf884 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -113,7 +113,6 @@
 
 /* sysctls */
 extern int sysctl_mld_max_msf;
-extern struct ctl_path net_ipv6_ctl_path[];
 
 #define _DEVINC(net, statname, modifier, idev, field)			\
 ({									\
@@ -264,7 +263,7 @@
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
 					  struct ipv6_txoptions *opt);
 
-extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb);
+extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
 
 int ip6_frag_nqueues(struct net *net);
 int ip6_frag_mem(struct net *net);
@@ -333,8 +332,8 @@
 	addr->s6_addr32[3] = w4;
 }
 
-static inline int ipv6_addr_equal(const struct in6_addr *a1,
-				  const struct in6_addr *a2)
+static inline bool ipv6_addr_equal(const struct in6_addr *a1,
+				   const struct in6_addr *a2)
 {
 	return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
 		(a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
@@ -342,27 +341,27 @@
 		(a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
 }
 
-static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
-				      unsigned int prefixlen)
+static inline bool __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
+				       unsigned int prefixlen)
 {
-	unsigned pdw, pbi;
+	unsigned int pdw, pbi;
 
 	/* check complete u32 in prefix */
 	pdw = prefixlen >> 5;
 	if (pdw && memcmp(a1, a2, pdw << 2))
-		return 0;
+		return false;
 
 	/* check incomplete u32 in prefix */
 	pbi = prefixlen & 0x1f;
 	if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi))))
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
-static inline int ipv6_prefix_equal(const struct in6_addr *a1,
-				    const struct in6_addr *a2,
-				    unsigned int prefixlen)
+static inline bool ipv6_prefix_equal(const struct in6_addr *a1,
+				     const struct in6_addr *a2,
+				     unsigned int prefixlen)
 {
 	return __ipv6_prefix_equal(a1->s6_addr32, a2->s6_addr32,
 				   prefixlen);
@@ -388,21 +387,21 @@
 };
 
 void ip6_frag_init(struct inet_frag_queue *q, void *a);
-int ip6_frag_match(struct inet_frag_queue *q, void *a);
+bool ip6_frag_match(struct inet_frag_queue *q, void *a);
 
-static inline int ipv6_addr_any(const struct in6_addr *a)
+static inline bool ipv6_addr_any(const struct in6_addr *a)
 {
 	return (a->s6_addr32[0] | a->s6_addr32[1] |
 		a->s6_addr32[2] | a->s6_addr32[3]) == 0;
 }
 
-static inline int ipv6_addr_loopback(const struct in6_addr *a)
+static inline bool ipv6_addr_loopback(const struct in6_addr *a)
 {
 	return (a->s6_addr32[0] | a->s6_addr32[1] |
 		a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0;
 }
 
-static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
+static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
 {
 	return (a->s6_addr32[0] | a->s6_addr32[1] |
 		 (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0;
@@ -412,7 +411,7 @@
  * Check for a RFC 4843 ORCHID address
  * (Overlay Routable Cryptographic Hash Identifiers)
  */
-static inline int ipv6_addr_orchid(const struct in6_addr *a)
+static inline bool ipv6_addr_orchid(const struct in6_addr *a)
 {
 	return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
 }
@@ -560,7 +559,7 @@
 extern int			ipv6_skip_exthdr(const struct sk_buff *, int start,
 					         u8 *nexthdrp, __be16 *frag_offp);
 
-extern int 			ipv6_ext_hdr(u8 nexthdr);
+extern bool			ipv6_ext_hdr(u8 nexthdr);
 
 extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
 
@@ -661,8 +660,6 @@
 extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
 extern int ipv6_sysctl_register(void);
 extern void ipv6_sysctl_unregister(void);
-extern int ipv6_static_sysctl_register(void);
-extern void ipv6_static_sysctl_unregister(void);
 #endif
 
 #endif /* _NET_IPV6_H */
diff --git a/include/net/lapb.h b/include/net/lapb.h
index fd2bf57..df892a9 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -149,4 +149,10 @@
  */
 #define	LAPB_DEBUG	0
 
+#define lapb_dbg(level, fmt, ...)			\
+do {							\
+	if (level < LAPB_DEBUG)				\
+		pr_debug(fmt, ##__VA_ARGS__);		\
+} while (0)
+
 #endif
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 23a4093..6ca3113 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -264,6 +264,6 @@
 static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
 {
 	return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
-	       (unsigned)sk->sk_rcvbuf;
+	       (unsigned int)sk->sk_rcvbuf;
 }
 #endif /* LLC_C_EV_H */
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index f57e7d4..5a93d13 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -13,7 +13,6 @@
  */
 
 #include <linux/if_ether.h>
-#include <linux/if_tr.h>
 
 /* Lengths of frame formats */
 #define LLC_PDU_LEN_I	4       /* header and 2 control bytes */
@@ -253,10 +252,6 @@
 {
 	if (skb->protocol == htons(ETH_P_802_2))
 		memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
-	else if (skb->protocol == htons(ETH_P_TR_802_2)) {
-		memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN);
-		*sa &= 0x7F;
-	}
 }
 
 /**
@@ -270,8 +265,6 @@
 {
 	if (skb->protocol == htons(ETH_P_802_2))
 		memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
-	else if (skb->protocol == htons(ETH_P_TR_802_2))
-		memcpy(da, tr_hdr(skb)->daddr, ETH_ALEN);
 }
 
 /**
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 9210bdc..4d6e6c6 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -95,9 +95,11 @@
  * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues.
  */
 enum ieee80211_max_queues {
-	IEEE80211_MAX_QUEUES =		4,
+	IEEE80211_MAX_QUEUES =		16,
 };
 
+#define IEEE80211_INVAL_HW_QUEUE	0xff
+
 /**
  * enum ieee80211_ac_numbers - AC numbers as used in mac80211
  * @IEEE80211_AC_VO: voice
@@ -244,7 +246,7 @@
  * @channel_type: Channel type for this BSS -- the hardware might be
  *	configured for HT40+ while this BSS only uses no-HT, for
  *	example.
- * @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info).
+ * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
  *	This field is only valid when the channel type is one of the HT types.
  * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
  *	implies disabled
@@ -522,7 +524,7 @@
  *
  * @flags: transmit info flags, defined above
  * @band: the band to transmit on (use for checking for races)
- * @antenna_sel_tx: antenna to use, 0 for automatic diversity
+ * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
  * @ack_frame_id: internal frame ID for TX status, used internally
  * @control: union for control data
  * @status: union for status data
@@ -538,7 +540,7 @@
 	u32 flags;
 	u8 band;
 
-	u8 antenna_sel_tx;
+	u8 hw_queue;
 
 	u16 ack_frame_id;
 
@@ -564,7 +566,8 @@
 			u8 ampdu_ack_len;
 			int ack_signal;
 			u8 ampdu_len;
-			/* 15 bytes free */
+			u8 antenna;
+			/* 14 bytes free */
 		} status;
 		struct {
 			struct ieee80211_tx_rate driver_rates[
@@ -888,6 +891,8 @@
  *	these need to be set (or cleared) when the interface is added
  *	or, if supported by the driver, the interface type is changed
  *	at runtime, mac80211 will never touch this field
+ * @hw_queue: hardware queue for each AC
+ * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
  * @drv_priv: data area for driver use, will always be aligned to
  *	sizeof(void *).
  */
@@ -896,7 +901,12 @@
 	struct ieee80211_bss_conf bss_conf;
 	u8 addr[ETH_ALEN];
 	bool p2p;
+
+	u8 cab_queue;
+	u8 hw_queue[IEEE80211_NUM_ACS];
+
 	u32 driver_flags;
+
 	/* must be last */
 	u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
 };
@@ -1174,6 +1184,15 @@
  * @IEEE80211_HW_SCAN_WHILE_IDLE: The device can do hw scan while
  *	being idle (i.e. mac80211 doesn't have to go idle-off during the
  *	the scan).
+ *
+ * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of
+ *	a virtual monitor interface when monitor interfaces are the only
+ *	active interfaces.
+ *
+ * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
+ *	queue mapping in order to use different queues (not just one per AC)
+ *	for different virtual interfaces. See the doc section on HW queue
+ *	control for more details.
  */
 enum ieee80211_hw_flags {
 	IEEE80211_HW_HAS_RATE_CONTROL			= 1<<0,
@@ -1190,13 +1209,13 @@
 	IEEE80211_HW_PS_NULLFUNC_STACK			= 1<<11,
 	IEEE80211_HW_SUPPORTS_DYNAMIC_PS		= 1<<12,
 	IEEE80211_HW_MFP_CAPABLE			= 1<<13,
-	/* reuse bit 14 */
+	IEEE80211_HW_WANT_MONITOR_VIF			= 1<<14,
 	IEEE80211_HW_SUPPORTS_STATIC_SMPS		= 1<<15,
 	IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS		= 1<<16,
 	IEEE80211_HW_SUPPORTS_UAPSD			= 1<<17,
 	IEEE80211_HW_REPORTS_TX_ACK_STATUS		= 1<<18,
 	IEEE80211_HW_CONNECTION_MONITOR			= 1<<19,
-	/* reuse bit 20 */
+	IEEE80211_HW_QUEUE_CONTROL			= 1<<20,
 	IEEE80211_HW_SUPPORTS_PER_STA_GTK		= 1<<21,
 	IEEE80211_HW_AP_LINK_PS				= 1<<22,
 	IEEE80211_HW_TX_AMPDU_SETUP_IN_HW		= 1<<23,
@@ -1266,6 +1285,9 @@
  * @max_tx_aggregation_subframes: maximum number of subframes in an
  *	aggregate an HT driver will transmit, used by the peer as a
  *	hint to size its reorder buffer.
+ *
+ * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
+ *	(if %IEEE80211_HW_QUEUE_CONTROL is set)
  */
 struct ieee80211_hw {
 	struct ieee80211_conf conf;
@@ -1286,6 +1308,7 @@
 	u8 max_rate_tries;
 	u8 max_rx_aggregation_subframes;
 	u8 max_tx_aggregation_subframes;
+	u8 offchannel_tx_hw_queue;
 };
 
 /**
@@ -1694,6 +1717,61 @@
  */
 
 /**
+ * DOC: HW queue control
+ *
+ * Before HW queue control was introduced, mac80211 only had a single static
+ * assignment of per-interface AC software queues to hardware queues. This
+ * was problematic for a few reasons:
+ * 1) off-channel transmissions might get stuck behind other frames
+ * 2) multiple virtual interfaces couldn't be handled correctly
+ * 3) after-DTIM frames could get stuck behind other frames
+ *
+ * To solve this, hardware typically uses multiple different queues for all
+ * the different usages, and this needs to be propagated into mac80211 so it
+ * won't have the same problem with the software queues.
+ *
+ * Therefore, mac80211 now offers the %IEEE80211_HW_QUEUE_CONTROL capability
+ * flag that tells it that the driver implements its own queue control. To do
+ * so, the driver will set up the various queues in each &struct ieee80211_vif
+ * and the offchannel queue in &struct ieee80211_hw. In response, mac80211 will
+ * use those queue IDs in the hw_queue field of &struct ieee80211_tx_info and
+ * if necessary will queue the frame on the right software queue that mirrors
+ * the hardware queue.
+ * Additionally, the driver has to then use these HW queue IDs for the queue
+ * management functions (ieee80211_stop_queue() et al.)
+ *
+ * The driver is free to set up the queue mappings as needed, multiple virtual
+ * interfaces may map to the same hardware queues if needed. The setup has to
+ * happen during add_interface or change_interface callbacks. For example, a
+ * driver supporting station+station and station+AP modes might decide to have
+ * 10 hardware queues to handle different scenarios:
+ *
+ * 4 AC HW queues for 1st vif: 0, 1, 2, 3
+ * 4 AC HW queues for 2nd vif: 4, 5, 6, 7
+ * after-DTIM queue for AP:   8
+ * off-channel queue:         9
+ *
+ * It would then set up the hardware like this:
+ *   hw.offchannel_tx_hw_queue = 9
+ *
+ * and the first virtual interface that is added as follows:
+ *   vif.hw_queue[IEEE80211_AC_VO] = 0
+ *   vif.hw_queue[IEEE80211_AC_VI] = 1
+ *   vif.hw_queue[IEEE80211_AC_BE] = 2
+ *   vif.hw_queue[IEEE80211_AC_BK] = 3
+ *   vif.cab_queue = 8 // if AP mode, otherwise %IEEE80211_INVAL_HW_QUEUE
+ * and the second virtual interface with 4-7.
+ *
+ * If queue 6 gets full, for example, mac80211 would only stop the second
+ * virtual interface's BE queue since virtual interface queues are per AC.
+ *
+ * Note that the vif.cab_queue value should be set to %IEEE80211_INVAL_HW_QUEUE
+ * whenever the queue is not used (i.e. the interface is not in AP mode) if the
+ * queue could potentially be shared since mac80211 will look at cab_queue when
+ * a queue is stopped/woken even if the interface is not in AP mode.
+ */
+
+/**
  * enum ieee80211_filter_flags - hardware filter flags
  *
  * These flags determine what the filter in hardware should be
@@ -1780,6 +1858,18 @@
 };
 
 /**
+ * enum ieee80211_rate_control_changed - flags to indicate what changed
+ *
+ * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
+ *	to this station changed.
+ * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
+ */
+enum ieee80211_rate_control_changed {
+	IEEE80211_RC_BW_CHANGED		= BIT(0),
+	IEEE80211_RC_SMPS_CHANGED	= BIT(1),
+};
+
+/**
  * struct ieee80211_ops - callbacks from mac80211 to the driver
  *
  * This structure contains various callbacks that the driver may
@@ -1980,6 +2070,14 @@
  *	up the list of states.
  *	The callback can sleep.
  *
+ * @sta_rc_update: Notifies the driver of changes to the bitrates that can be
+ *	used to transmit to the station. The changes are advertised with bits
+ *	from &enum ieee80211_rate_control_changed and the values are reflected
+ *	in the station data. This callback should only be used when the driver
+ *	uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
+ *	otherwise the rate control algorithm is notified directly.
+ *	Must be atomic.
+ *
  * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
  *	bursting) for a hardware TX queue.
  *	Returns a negative error code on failure.
@@ -2125,6 +2223,14 @@
  *	The @tids parameter is a bitmap and tells the driver which TIDs the
  *	frames will be on; it will at most have two bits set.
  *	This callback must be atomic.
+ *
+ * @get_et_sset_count:  Ethtool API to get string-set count.
+ *
+ * @get_et_stats:  Ethtool API to get a set of u64 stats.
+ *
+ * @get_et_strings:  Ethtool API to get a set of strings to describe stats
+ *	and perhaps other supported types of ethtool data-sets.
+ *
  */
 struct ieee80211_ops {
 	void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -2135,6 +2241,7 @@
 #ifdef CONFIG_PM
 	int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
 	int (*resume)(struct ieee80211_hw *hw);
+	void (*set_wakeup)(struct ieee80211_hw *hw, bool enabled);
 #endif
 	int (*add_interface)(struct ieee80211_hw *hw,
 			     struct ieee80211_vif *vif);
@@ -2196,8 +2303,12 @@
 			 struct ieee80211_sta *sta,
 			 enum ieee80211_sta_state old_state,
 			 enum ieee80211_sta_state new_state);
+	void (*sta_rc_update)(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_sta *sta,
+			      u32 changed);
 	int (*conf_tx)(struct ieee80211_hw *hw,
-		       struct ieee80211_vif *vif, u16 queue,
+		       struct ieee80211_vif *vif, u16 ac,
 		       const struct ieee80211_tx_queue_params *params);
 	u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
 	void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -2250,6 +2361,15 @@
 					u16 tids, int num_frames,
 					enum ieee80211_frame_release_type reason,
 					bool more_data);
+
+	int	(*get_et_sset_count)(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif, int sset);
+	void	(*get_et_stats)(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ethtool_stats *stats, u64 *data);
+	void	(*get_et_strings)(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  u32 sset, u8 *data);
 };
 
 /**
@@ -2844,6 +2964,7 @@
  */
 __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
 					struct ieee80211_vif *vif,
+					enum ieee80211_band band,
 					size_t frame_len,
 					struct ieee80211_rate *rate);
 
@@ -3512,19 +3633,6 @@
 /* Rate control API */
 
 /**
- * enum rate_control_changed - flags to indicate which parameter changed
- *
- * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have
- *	changed, rate control algorithm can update its internal state if needed.
- * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate
- *	control algorithm needs to adjust accordingly.
- */
-enum rate_control_changed {
-	IEEE80211_RC_HT_CHANGED		= BIT(0),
-	IEEE80211_RC_SMPS_CHANGED	= BIT(1),
-};
-
-/**
  * struct ieee80211_tx_rate_control - rate control information for/from RC algo
  *
  * @hw: The hardware the algorithm is invoked for.
@@ -3569,9 +3677,8 @@
 	void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
 			  struct ieee80211_sta *sta, void *priv_sta);
 	void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
-			    struct ieee80211_sta *sta,
-			    void *priv_sta, u32 changed,
-			    enum nl80211_channel_type oper_chan_type);
+			    struct ieee80211_sta *sta, void *priv_sta,
+			    u32 changed);
 	void (*free_sta)(void *priv, struct ieee80211_sta *sta,
 			 void *priv_sta);
 
@@ -3706,8 +3813,20 @@
 
 void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
 
-int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb);
+int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
+			    struct sk_buff *skb, bool need_basic);
 
 int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
-				struct sk_buff *skb);
+				struct sk_buff *skb, bool need_basic);
+
+/**
+ * ieee80211_ave_rssi - report the average rssi for the specified interface
+ *
+ * @vif: the specified virtual interface
+ *
+ * This function return the average rssi value for the requested interface.
+ * It assumes that the given vif is valid.
+ */
+int ieee80211_ave_rssi(struct ieee80211_vif *vif);
+
 #endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
new file mode 100644
index 0000000..c9f8ab5
--- /dev/null
+++ b/include/net/mac802154.h
@@ -0,0 +1,136 @@
+/*
+ * IEEE802.15.4-2003 specification
+ *
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef NET_MAC802154_H
+#define NET_MAC802154_H
+
+#include <net/af_ieee802154.h>
+
+/* The following flags are used to indicate changed address settings from
+ * the stack to the hardware.
+ */
+
+/* indicates that the Short Address changed */
+#define IEEE802515_AFILT_SADDR_CHANGED		0x00000001
+/* indicates that the IEEE Address changed */
+#define IEEE802515_AFILT_IEEEADDR_CHANGED	0x00000002
+/* indicates that the PAN ID changed */
+#define IEEE802515_AFILT_PANID_CHANGED		0x00000004
+/* indicates that PAN Coordinator status changed */
+#define	IEEE802515_AFILT_PANC_CHANGED		0x00000008
+
+struct ieee802154_hw_addr_filt {
+	__le16	pan_id;		/* Each independent PAN selects a unique
+				 * identifier. This PAN id allows communication
+				 * between devices within a network using short
+				 * addresses and enables transmissions between
+				 * devices across independent networks.
+				 */
+	__le16	short_addr;
+	u8	ieee_addr[IEEE802154_ADDR_LEN];
+	u8	pan_coord;
+};
+
+struct ieee802154_dev {
+	/* filled by the driver */
+	int	extra_tx_headroom;
+	u32	flags;
+	struct	device *parent;
+
+	/* filled by mac802154 core */
+	struct	ieee802154_hw_addr_filt hw_filt;
+	void	*priv;
+	struct	wpan_phy *phy;
+};
+
+/* Checksum is in hardware and is omitted from a packet
+ *
+ * These following flags are used to indicate hardware capabilities to
+ * the stack. Generally, flags here should have their meaning
+ * done in a way that the simplest hardware doesn't need setting
+ * any particular flags. There are some exceptions to this rule,
+ * however, so you are advised to review these flags carefully.
+ */
+
+/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
+#define	IEEE802154_HW_OMIT_CKSUM	0x00000001
+/* Indicates that receiver will autorespond with ACK frames. */
+#define	IEEE802154_HW_AACK		0x00000002
+
+/* struct ieee802154_ops - callbacks from mac802154 to the driver
+ *
+ * This structure contains various callbacks that the driver may
+ * handle or, in some cases, must handle, for example to transmit
+ * a frame.
+ *
+ * start: Handler that 802.15.4 module calls for device initialization.
+ *	  This function is called before the first interface is attached.
+ *
+ * stop:  Handler that 802.15.4 module calls for device cleanup.
+ *	  This function is called after the last interface is removed.
+ *
+ * xmit:  Handler that 802.15.4 module calls for each transmitted frame.
+ *	  skb cntains the buffer starting from the IEEE 802.15.4 header.
+ *	  The low-level driver should send the frame based on available
+ *	  configuration.
+ *	  This function should return zero or negative errno. Called with
+ *	  pib_lock held.
+ *
+ * ed:    Handler that 802.15.4 module calls for Energy Detection.
+ *	  This function should place the value for detected energy
+ *	  (usually device-dependant) in the level pointer and return
+ *	  either zero or negative errno. Called with pib_lock held.
+ *
+ * set_channel:
+ * 	  Set radio for listening on specific channel.
+ *	  Set the device for listening on specified channel.
+ *	  Returns either zero, or negative errno. Called with pib_lock held.
+ *
+ * set_hw_addr_filt:
+ *	  Set radio for listening on specific address.
+ *	  Set the device for listening on specified address.
+ *	  Returns either zero, or negative errno.
+ */
+struct ieee802154_ops {
+	struct module	*owner;
+	int		(*start)(struct ieee802154_dev *dev);
+	void		(*stop)(struct ieee802154_dev *dev);
+	int		(*xmit)(struct ieee802154_dev *dev,
+				struct sk_buff *skb);
+	int		(*ed)(struct ieee802154_dev *dev, u8 *level);
+	int		(*set_channel)(struct ieee802154_dev *dev,
+				       int page,
+				       int channel);
+	int		(*set_hw_addr_filt)(struct ieee802154_dev *dev,
+					  struct ieee802154_hw_addr_filt *filt,
+					    unsigned long changed);
+	int		(*ieee_addr)(struct ieee802154_dev *dev,
+				     u8 addr[IEEE802154_ADDR_LEN]);
+};
+
+/* Basic interface to register ieee802154 device */
+struct ieee802154_dev *
+ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops);
+void ieee802154_free_device(struct ieee802154_dev *dev);
+int ieee802154_register_device(struct ieee802154_dev *dev);
+void ieee802154_unregister_device(struct ieee802154_dev *dev);
+
+void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb,
+			   u8 lqi);
+
+#endif /* NET_MAC802154_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 6f9c25a..c02b6ad 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -34,6 +34,7 @@
 	__ND_OPT_ARRAY_MAX,
 	ND_OPT_ROUTE_INFO = 24,		/* RFC4191 */
 	ND_OPT_RDNSS = 25,		/* RFC5006 */
+	ND_OPT_DNSSL = 31,		/* RFC6106 */
 	__ND_OPT_MAX
 };
 
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 34c996f..6cdfeed 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -195,7 +195,6 @@
 #define NEIGH_UPDATE_F_ADMIN			0x80000000
 
 extern void			neigh_table_init(struct neigh_table *tbl);
-extern void			neigh_table_init_no_netlink(struct neigh_table *tbl);
 extern int			neigh_table_clear(struct neigh_table *tbl);
 extern struct neighbour *	neigh_lookup(struct neigh_table *tbl,
 					     const void *pkey,
@@ -323,7 +322,7 @@
 #ifdef CONFIG_BRIDGE_NETFILTER
 static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 {
-	unsigned seq, hh_alen;
+	unsigned int seq, hh_alen;
 
 	do {
 		seq = read_seqbegin(&hh->hh_lock);
@@ -336,7 +335,7 @@
 
 static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
 {
-	unsigned seq;
+	unsigned int seq;
 	int hh_len;
 
 	do {
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index ee547c1..ac9195e 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -279,14 +279,25 @@
 extern int register_pernet_device(struct pernet_operations *);
 extern void unregister_pernet_device(struct pernet_operations *);
 
-struct ctl_path;
 struct ctl_table;
 struct ctl_table_header;
 
-extern struct ctl_table_header *register_net_sysctl_table(struct net *net,
-	const struct ctl_path *path, struct ctl_table *table);
-extern struct ctl_table_header *register_net_sysctl_rotable(
-	const struct ctl_path *path, struct ctl_table *table);
+#ifdef CONFIG_SYSCTL
+extern int net_sysctl_init(void);
+extern struct ctl_table_header *register_net_sysctl(struct net *net,
+	const char *path, struct ctl_table *table);
 extern void unregister_net_sysctl_table(struct ctl_table_header *header);
+#else
+static inline int net_sysctl_init(void) { return 0; }
+static inline struct ctl_table_header *register_net_sysctl(struct net *net,
+	const char *path, struct ctl_table *table)
+{
+	return NULL;
+}
+static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
+{
+}
+#endif
+
 
 #endif /* __NET_NET_NAMESPACE_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index ab86036..cce7f6a 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -321,14 +321,8 @@
 extern unsigned int nf_conntrack_hash_rnd;
 void init_nf_conntrack_hash_rnd(void);
 
-#define NF_CT_STAT_INC(net, count)	\
-	__this_cpu_inc((net)->ct.stat->count)
-#define NF_CT_STAT_INC_ATOMIC(net, count)		\
-do {							\
-	local_bh_disable();				\
-	__this_cpu_inc((net)->ct.stat->count);		\
-	local_bh_enable();				\
-} while (0)
+#define NF_CT_STAT_INC(net, count)	  __this_cpu_inc((net)->ct.stat->count)
+#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
 
 #define MODULE_ALIAS_NFCT_HELPER(helper) \
         MODULE_ALIAS("nfct-helper-" helper)
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 5767dc2..1d18894 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -60,8 +60,8 @@
 	return nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
 }
 
-extern int nf_conntrack_helper_init(void);
-extern void nf_conntrack_helper_fini(void);
+extern int nf_conntrack_helper_init(struct net *net);
+extern void nf_conntrack_helper_fini(struct net *net);
 
 extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
 				       unsigned int protoff,
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index e8010f4..9699c02 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -65,7 +65,7 @@
 
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header	*ctl_table_header;
-	struct ctl_path		*ctl_table_path;
+	const char		*ctl_table_path;
 	struct ctl_table	*ctl_table;
 #endif /* CONFIG_SYSCTL */
 
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f394fe5..785f37a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -102,20 +102,6 @@
  *   nla_put_flag(skb, type)		add flag attribute to skb
  *   nla_put_msecs(skb, type, jiffies)	add msecs attribute to skb
  *
- * Exceptions Based Attribute Construction:
- *   NLA_PUT(skb, type, len, data)	add attribute to skb
- *   NLA_PUT_U8(skb, type, value)	add u8 attribute to skb
- *   NLA_PUT_U16(skb, type, value)	add u16 attribute to skb
- *   NLA_PUT_U32(skb, type, value)	add u32 attribute to skb
- *   NLA_PUT_U64(skb, type, value)	add u64 attribute to skb
- *   NLA_PUT_STRING(skb, type, str)	add string attribute to skb
- *   NLA_PUT_FLAG(skb, type)		add flag attribute to skb
- *   NLA_PUT_MSECS(skb, type, jiffies)	add msecs attribute to skb
- *
- *   The meaning of these functions is equal to their lower case
- *   variants but they jump to the label nla_put_failure in case
- *   of a failure.
- *
  * Nested Attributes Construction:
  *   nla_nest_start(skb, type)		start a nested attribute
  *   nla_nest_end(skb, nla)		finalize a nested attribute
@@ -772,6 +758,39 @@
 }
 
 /**
+ * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+	return nla_put(skb, attrtype, sizeof(__be16), &value);
+}
+
+/**
+ * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+{
+	return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+{
+	return nla_put(skb, attrtype, sizeof(__le16), &value);
+}
+
+/**
  * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
@@ -783,7 +802,40 @@
 }
 
 /**
- * nla_put_64 - Add a u64 netlink attribute to a socket buffer
+ * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+	return nla_put(skb, attrtype, sizeof(__be32), &value);
+}
+
+/**
+ * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+{
+	return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+{
+	return nla_put(skb, attrtype, sizeof(__le32), &value);
+}
+
+/**
+ * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
  * @value: numeric value
@@ -794,6 +846,39 @@
 }
 
 /**
+ * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+{
+	return nla_put(skb, attrtype, sizeof(__be64), &value);
+}
+
+/**
+ * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+{
+	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+}
+
+/**
+ * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+{
+	return nla_put(skb, attrtype, sizeof(__le64), &value);
+}
+
+/**
  * nla_put_string - Add a string netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
@@ -828,60 +913,6 @@
 	return nla_put(skb, attrtype, sizeof(u64), &tmp);
 }
 
-#define NLA_PUT(skb, attrtype, attrlen, data) \
-	do { \
-		if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \
-			goto nla_put_failure; \
-	} while(0)
-
-#define NLA_PUT_TYPE(skb, type, attrtype, value) \
-	do { \
-		type __tmp = value; \
-		NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \
-	} while(0)
-
-#define NLA_PUT_U8(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, u8, attrtype, value)
-
-#define NLA_PUT_U16(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, u16, attrtype, value)
-
-#define NLA_PUT_LE16(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, __le16, attrtype, value)
-
-#define NLA_PUT_BE16(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, __be16, attrtype, value)
-
-#define NLA_PUT_NET16(skb, attrtype, value) \
-	NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_U32(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, u32, attrtype, value)
-
-#define NLA_PUT_BE32(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, __be32, attrtype, value)
-
-#define NLA_PUT_NET32(skb, attrtype, value) \
-	NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_U64(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, u64, attrtype, value)
-
-#define NLA_PUT_BE64(skb, attrtype, value) \
-	NLA_PUT_TYPE(skb, __be64, attrtype, value)
-
-#define NLA_PUT_NET64(skb, attrtype, value) \
-	NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
-
-#define NLA_PUT_STRING(skb, attrtype, value) \
-	NLA_PUT(skb, attrtype, strlen(value) + 1, value)
-
-#define NLA_PUT_FLAG(skb, attrtype) \
-	NLA_PUT(skb, attrtype, 0, NULL)
-
-#define NLA_PUT_MSECS(skb, attrtype, jiffies) \
-	NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies))
-
 /**
  * nla_get_u32 - return payload of u32 attribute
  * @nla: u32 netlink attribute
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 7a911ec..a053a19 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -26,11 +26,14 @@
 	int			sysctl_tstamp;
 	int			sysctl_checksum;
 	unsigned int		sysctl_log_invalid; /* Log invalid packets */
+	int			sysctl_auto_assign_helper;
+	bool			auto_assign_helper_warned;
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header	*sysctl_header;
 	struct ctl_table_header	*acct_sysctl_header;
 	struct ctl_table_header	*tstamp_sysctl_header;
 	struct ctl_table_header	*event_sysctl_header;
+	struct ctl_table_header	*helper_sysctl_header;
 #endif
 	char			*slabname;
 };
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 548d78f..c06ac58 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -5,7 +5,7 @@
 
 struct net;
 
-static inline unsigned net_hash_mix(struct net *net)
+static inline unsigned int net_hash_mix(struct net *net)
 {
 #ifdef CONFIG_NET_NS
 	/*
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 81abfcb2..b42be53 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -12,7 +12,9 @@
 
 struct netns_sysctl_ipv6 {
 #ifdef CONFIG_SYSCTL
-	struct ctl_table_header *table;
+	struct ctl_table_header *hdr;
+	struct ctl_table_header *route_hdr;
+	struct ctl_table_header *icmp_hdr;
 	struct ctl_table_header *frags_hdr;
 #endif
 	int bindv6only;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
new file mode 100644
index 0000000..aca65a5
--- /dev/null
+++ b/include/net/nfc/hci.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2011  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __NET_HCI_H
+#define __NET_HCI_H
+
+#include <linux/skbuff.h>
+
+#include <net/nfc/nfc.h>
+
+struct nfc_hci_dev;
+
+struct nfc_hci_ops {
+	int (*open) (struct nfc_hci_dev *hdev);
+	void (*close) (struct nfc_hci_dev *hdev);
+	int (*hci_ready) (struct nfc_hci_dev *hdev);
+	int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
+	int (*start_poll) (struct nfc_hci_dev *hdev, u32 protocols);
+	int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate,
+				 struct nfc_target *target);
+	int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
+					   struct nfc_target *target);
+	int (*data_exchange) (struct nfc_hci_dev *hdev,
+			      struct nfc_target *target,
+			      struct sk_buff *skb, struct sk_buff **res_skb);
+};
+
+#define NFC_HCI_MAX_CUSTOM_GATES	15
+struct nfc_hci_init_data {
+	u8 gate_count;
+	u8 gates[NFC_HCI_MAX_CUSTOM_GATES];
+	char session_id[9];
+};
+
+typedef int (*xmit) (struct sk_buff *skb, void *cb_data);
+
+#define NFC_HCI_MAX_GATES		256
+
+struct nfc_hci_dev {
+	struct nfc_dev *ndev;
+
+	u32 max_data_link_payload;
+
+	struct mutex msg_tx_mutex;
+
+	struct list_head msg_tx_queue;
+
+	struct workqueue_struct *msg_tx_wq;
+	struct work_struct msg_tx_work;
+
+	struct timer_list cmd_timer;
+	struct hci_msg *cmd_pending_msg;
+
+	struct sk_buff_head rx_hcp_frags;
+
+	struct workqueue_struct *msg_rx_wq;
+	struct work_struct msg_rx_work;
+
+	struct sk_buff_head msg_rx_queue;
+
+	struct nfc_hci_ops *ops;
+
+	struct nfc_hci_init_data init_data;
+
+	void *clientdata;
+
+	u8 gate2pipe[NFC_HCI_MAX_GATES];
+
+	bool poll_started;
+	struct nfc_target *targets;
+	int target_count;
+
+	u8 sw_romlib;
+	u8 sw_patch;
+	u8 sw_flashlib_major;
+	u8 sw_flashlib_minor;
+
+	u8 hw_derivative;
+	u8 hw_version;
+	u8 hw_mpw;
+	u8 hw_software;
+	u8 hw_bsid;
+};
+
+/* hci device allocation */
+struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
+					    struct nfc_hci_init_data *init_data,
+					    u32 protocols,
+					    int tx_headroom,
+					    int tx_tailroom,
+					    int max_link_payload);
+void nfc_hci_free_device(struct nfc_hci_dev *hdev);
+
+int nfc_hci_register_device(struct nfc_hci_dev *hdev);
+void nfc_hci_unregister_device(struct nfc_hci_dev *hdev);
+
+void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
+void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
+
+/* Host IDs */
+#define NFC_HCI_HOST_CONTROLLER_ID	0x00
+#define NFC_HCI_TERMINAL_HOST_ID	0x01
+#define NFC_HCI_UICC_HOST_ID		0x02
+
+/* Host Controller Gates and registry indexes */
+#define NFC_HCI_ADMIN_GATE 0x00
+#define NFC_HCI_ADMIN_SESSION_IDENTITY	0x01
+#define NFC_HCI_ADMIN_MAX_PIPE		0x02
+#define NFC_HCI_ADMIN_WHITELIST		0x03
+#define NFC_HCI_ADMIN_HOST_LIST		0x04
+
+#define NFC_HCI_LOOPBACK_GATE		0x04
+
+#define NFC_HCI_ID_MGMT_GATE		0x05
+#define NFC_HCI_ID_MGMT_VERSION_SW	0x01
+#define NFC_HCI_ID_MGMT_VERSION_HW	0x03
+#define NFC_HCI_ID_MGMT_VENDOR_NAME	0x04
+#define NFC_HCI_ID_MGMT_MODEL_ID	0x05
+#define NFC_HCI_ID_MGMT_HCI_VERSION	0x02
+#define NFC_HCI_ID_MGMT_GATES_LIST	0x06
+
+#define NFC_HCI_LINK_MGMT_GATE		0x06
+#define NFC_HCI_LINK_MGMT_REC_ERROR	0x01
+
+#define NFC_HCI_RF_READER_B_GATE			0x11
+#define NFC_HCI_RF_READER_B_PUPI			0x03
+#define NFC_HCI_RF_READER_B_APPLICATION_DATA		0x04
+#define NFC_HCI_RF_READER_B_AFI				0x02
+#define NFC_HCI_RF_READER_B_HIGHER_LAYER_RESPONSE	0x01
+#define NFC_HCI_RF_READER_B_HIGHER_LAYER_DATA		0x05
+
+#define NFC_HCI_RF_READER_A_GATE		0x13
+#define NFC_HCI_RF_READER_A_UID			0x02
+#define NFC_HCI_RF_READER_A_ATQA		0x04
+#define NFC_HCI_RF_READER_A_APPLICATION_DATA	0x05
+#define NFC_HCI_RF_READER_A_SAK			0x03
+#define NFC_HCI_RF_READER_A_FWI_SFGT		0x06
+#define NFC_HCI_RF_READER_A_DATARATE_MAX	0x01
+
+#define NFC_HCI_TYPE_A_SEL_PROT(x)		(((x) & 0x60) >> 5)
+#define NFC_HCI_TYPE_A_SEL_PROT_MIFARE		0
+#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443	1
+#define NFC_HCI_TYPE_A_SEL_PROT_DEP		2
+#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP	3
+
+/* Generic events */
+#define NFC_HCI_EVT_HCI_END_OF_OPERATION	0x01
+#define NFC_HCI_EVT_POST_DATA			0x02
+#define NFC_HCI_EVT_HOT_PLUG			0x03
+
+/* Reader RF gates events */
+#define NFC_HCI_EVT_READER_REQUESTED	0x10
+#define NFC_HCI_EVT_END_OPERATION	0x11
+
+/* Reader Application gate events */
+#define NFC_HCI_EVT_TARGET_DISCOVERED	0x10
+
+/* receiving messages from lower layer */
+void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
+			   struct sk_buff *skb);
+void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+			  struct sk_buff *skb);
+void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
+			    struct sk_buff *skb);
+void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+
+/* connecting to gates and sending hci instructions */
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate);
+int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate);
+int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev);
+int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+		      struct sk_buff **skb);
+int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+		      const u8 *param, size_t param_len);
+int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+		     const u8 *param, size_t param_len, struct sk_buff **skb);
+int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
+			  const u8 *param, size_t param_len);
+int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+		       const u8 *param, size_t param_len);
+
+#endif /* __NET_HCI_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index bac070b..9a2505a 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -62,10 +62,12 @@
 	int (*data_exchange)(struct nfc_dev *dev, u32 target_idx,
 			     struct sk_buff *skb, data_exchange_cb_t cb,
 			     void *cb_context);
+	int (*check_presence)(struct nfc_dev *dev, u32 target_idx);
 };
 
 #define NFC_TARGET_IDX_ANY -1
 #define NFC_MAX_GT_LEN 48
+#define NFC_TARGET_IDX_NONE 0xffffffff
 
 struct nfc_target {
 	u32 idx;
@@ -78,6 +80,8 @@
 	u8 sensb_res[NFC_SENSB_RES_MAXSIZE];
 	u8 sensf_res_len;
 	u8 sensf_res[NFC_SENSF_RES_MAXSIZE];
+	u8 hci_reader_gate;
+	u8 logical_idx;
 };
 
 struct nfc_genl_data {
@@ -86,7 +90,8 @@
 };
 
 struct nfc_dev {
-	unsigned idx;
+	unsigned int idx;
+	u32 target_next_idx;
 	struct nfc_target *targets;
 	int n_targets;
 	int targets_generation;
@@ -94,7 +99,7 @@
 	struct device dev;
 	bool dev_up;
 	bool polling;
-	bool remote_activated;
+	u32 activated_target_idx;
 	bool dep_link_up;
 	u32 dep_rf_mode;
 	struct nfc_genl_data genl_data;
@@ -103,6 +108,10 @@
 	int tx_headroom;
 	int tx_tailroom;
 
+	struct timer_list check_pres_timer;
+	struct workqueue_struct *check_pres_wq;
+	struct work_struct check_pres_work;
+
 	struct nfc_ops *ops;
 };
 #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -181,6 +190,7 @@
 
 int nfc_targets_found(struct nfc_dev *dev,
 		      struct nfc_target *targets, int ntargets);
+int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
 
 int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
 		       u8 comm_mode, u8 rf_mode);
diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h
new file mode 100644
index 0000000..1071987
--- /dev/null
+++ b/include/net/nfc/shdlc.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __NFC_SHDLC_H
+#define __NFC_SHDLC_H
+
+struct nfc_shdlc;
+
+struct nfc_shdlc_ops {
+	int (*open) (struct nfc_shdlc *shdlc);
+	void (*close) (struct nfc_shdlc *shdlc);
+	int (*hci_ready) (struct nfc_shdlc *shdlc);
+	int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb);
+	int (*start_poll) (struct nfc_shdlc *shdlc, u32 protocols);
+	int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate,
+				 struct nfc_target *target);
+	int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate,
+					   struct nfc_target *target);
+	int (*data_exchange) (struct nfc_shdlc *shdlc,
+			      struct nfc_target *target,
+			      struct sk_buff *skb, struct sk_buff **res_skb);
+};
+
+enum shdlc_state {
+	SHDLC_DISCONNECTED = 0,
+	SHDLC_CONNECTING = 1,
+	SHDLC_NEGOCIATING = 2,
+	SHDLC_CONNECTED = 3
+};
+
+struct nfc_shdlc {
+	struct mutex state_mutex;
+	enum shdlc_state state;
+	int hard_fault;
+
+	struct nfc_hci_dev *hdev;
+
+	wait_queue_head_t *connect_wq;
+	int connect_tries;
+	int connect_result;
+	struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
+
+	u8 w;				/* window size */
+	bool srej_support;
+
+	struct timer_list t1_timer;	/* send ack timeout */
+	bool t1_active;
+
+	struct timer_list t2_timer;	/* guard/retransmit timeout */
+	bool t2_active;
+
+	int ns;				/* next seq num for send */
+	int nr;				/* next expected seq num for receive */
+	int dnr;			/* oldest sent unacked seq num */
+
+	struct sk_buff_head rcv_q;
+
+	struct sk_buff_head send_q;
+	bool rnr;			/* other side is not ready to receive */
+
+	struct sk_buff_head ack_pending_q;
+
+	struct workqueue_struct *sm_wq;
+	struct work_struct sm_work;
+
+	struct nfc_shdlc_ops *ops;
+
+	int client_headroom;
+	int client_tailroom;
+
+	void *clientdata;
+};
+
+void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb);
+
+struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
+				     struct nfc_hci_init_data *init_data,
+				     u32 protocols,
+				     int tx_headroom, int tx_tailroom,
+				     int max_link_payload, const char *devname);
+
+void nfc_shdlc_free(struct nfc_shdlc *shdlc);
+
+void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata);
+void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc);
+struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc);
+
+#endif /* __NFC_SHDLC_H */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index fffdc60..66f5ac3 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -107,7 +107,7 @@
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
-static inline unsigned psched_mtu(const struct net_device *dev)
+static inline unsigned int psched_mtu(const struct net_device *dev)
 {
 	return dev->mtu + dev->hard_header_len;
 }
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index cf75772..e7ea660 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -5,7 +5,7 @@
 
 void raw6_icmp_error(struct sk_buff *, int nexthdr,
 		u8 type, u8 code, int inner_offset, __be32);
-int raw6_local_deliver(struct sk_buff *, int);
+bool raw6_local_deliver(struct sk_buff *, int);
 
 extern int			rawv6_rcv(struct sock *sk,
 					  struct sk_buff *skb);
diff --git a/include/net/route.h b/include/net/route.h
index b1c0d5b..ed2b78e 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -50,7 +50,7 @@
 	__be32			rt_key_src;
 
 	int			rt_genid;
-	unsigned		rt_flags;
+	unsigned int		rt_flags;
 	__u16			rt_type;
 	__u8			rt_key_tos;
 
@@ -185,8 +185,8 @@
 					  unsigned short new_mtu, struct net_device *dev);
 extern void		ip_rt_send_redirect(struct sk_buff *skb);
 
-extern unsigned		inet_addr_type(struct net *net, __be32 addr);
-extern unsigned		inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
+extern unsigned int		inet_addr_type(struct net *net, __be32 addr);
+extern unsigned int		inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
 extern void		ip_rt_multicast_event(struct in_device *);
 extern int		ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 extern void		ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 3702939..bbcfd09 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -41,9 +41,11 @@
  *	@get_size: Function to calculate required room for dumping device
  *		   specific netlink attributes
  *	@fill_info: Function to dump device specific netlink attributes
- *	@get_xstats_size: Function to calculate required room for dumping devic
+ *	@get_xstats_size: Function to calculate required room for dumping device
  *			  specific statistics
  *	@fill_xstats: Function to dump device specific statistics
+ *	@get_tx_queues: Function to determine number of transmit queues to create when
+ *		        creating a new device.
  */
 struct rtnl_link_ops {
 	struct list_head	list;
@@ -75,9 +77,8 @@
 	size_t			(*get_xstats_size)(const struct net_device *dev);
 	int			(*fill_xstats)(struct sk_buff *skb,
 					       const struct net_device *dev);
-	int			(*get_tx_queues)(struct net *net, struct nlattr *tb[],
-						 unsigned int *tx_queues,
-						 unsigned int *real_tx_queues);
+	int			(*get_tx_queues)(struct net *net,
+						 struct nlattr *tb[]);
 };
 
 extern int	__rtnl_link_register(struct rtnl_link_ops *ops);
@@ -94,7 +95,7 @@
  * 	@fill_link_af: Function to fill IFLA_AF_SPEC with address family
  * 		       specific netlink attributes.
  * 	@get_link_af_size: Function to calculate size of address family specific
- * 			   netlink attributes exlusive the container attribute.
+ * 			   netlink attributes.
  *	@validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
  *			   for invalid configuration settings.
  * 	@set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 6ee44b2..a2ef814 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -704,4 +704,17 @@
 	addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
 }
 
+/* The cookie is always 0 since this is how it's used in the
+ * pmtu code.
+ */
+static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
+{
+	if (t->dst && !dst_check(t->dst, 0)) {
+		dst_release(t->dst);
+		t->dst = NULL;
+	}
+
+	return t->dst;
+}
+
 #endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 88949a9..e4652fe 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1145,10 +1145,10 @@
 	/* Data pending that has never been transmitted.  */
 	struct list_head out_chunk_list;
 
-	unsigned out_qlen;	/* Total length of queued data chunks. */
+	unsigned int out_qlen;	/* Total length of queued data chunks. */
 
 	/* Error of send failed, may used in SCTP_SEND_FAILED event. */
-	unsigned error;
+	unsigned int error;
 
 	/* These are control chunks we want to send.  */
 	struct list_head control_chunk_list;
@@ -2000,8 +2000,8 @@
 __u32 sctp_association_get_next_tsn(struct sctp_association *);
 
 void sctp_assoc_sync_pmtu(struct sctp_association *);
-void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned);
-void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned);
+void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
+void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
 void sctp_assoc_set_primary(struct sctp_association *,
 			    struct sctp_transport *);
 void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/include/net/sock.h b/include/net/sock.h
index 188532e..da93155 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -97,7 +97,7 @@
 #else
 /* Validate arguments and do nothing */
 static inline __printf(2, 3)
-void SOCK_DEBUG(struct sock *sk, const char *msg, ...)
+void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
 {
 }
 #endif
@@ -372,11 +372,22 @@
 	void			(*sk_data_ready)(struct sock *sk, int bytes);
 	void			(*sk_write_space)(struct sock *sk);
 	void			(*sk_error_report)(struct sock *sk);
-  	int			(*sk_backlog_rcv)(struct sock *sk,
-						  struct sk_buff *skb);  
+	int			(*sk_backlog_rcv)(struct sock *sk,
+						  struct sk_buff *skb);
 	void                    (*sk_destruct)(struct sock *sk);
 };
 
+/*
+ * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
+ * or not whether his port will be reused by someone else. SK_FORCE_REUSE
+ * on a socket means that the socket will reuse everybody else's port
+ * without looking at the other's sk_reuse value.
+ */
+
+#define SK_NO_REUSE	0
+#define SK_CAN_REUSE	1
+#define SK_FORCE_REUSE	2
+
 static inline int sk_peek_offset(struct sock *sk, int flags)
 {
 	if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
@@ -443,40 +454,40 @@
 		NULL;
 }
 
-static inline int sk_unhashed(const struct sock *sk)
+static inline bool sk_unhashed(const struct sock *sk)
 {
 	return hlist_unhashed(&sk->sk_node);
 }
 
-static inline int sk_hashed(const struct sock *sk)
+static inline bool sk_hashed(const struct sock *sk)
 {
 	return !sk_unhashed(sk);
 }
 
-static __inline__ void sk_node_init(struct hlist_node *node)
+static inline void sk_node_init(struct hlist_node *node)
 {
 	node->pprev = NULL;
 }
 
-static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
+static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
 {
 	node->pprev = NULL;
 }
 
-static __inline__ void __sk_del_node(struct sock *sk)
+static inline void __sk_del_node(struct sock *sk)
 {
 	__hlist_del(&sk->sk_node);
 }
 
 /* NB: equivalent to hlist_del_init_rcu */
-static __inline__ int __sk_del_node_init(struct sock *sk)
+static inline bool __sk_del_node_init(struct sock *sk)
 {
 	if (sk_hashed(sk)) {
 		__sk_del_node(sk);
 		sk_node_init(&sk->sk_node);
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
 /* Grab socket reference count. This operation is valid only
@@ -498,9 +509,9 @@
 	atomic_dec(&sk->sk_refcnt);
 }
 
-static __inline__ int sk_del_node_init(struct sock *sk)
+static inline bool sk_del_node_init(struct sock *sk)
 {
-	int rc = __sk_del_node_init(sk);
+	bool rc = __sk_del_node_init(sk);
 
 	if (rc) {
 		/* paranoid for a while -acme */
@@ -511,18 +522,18 @@
 }
 #define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
 
-static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
+static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
 {
 	if (sk_hashed(sk)) {
 		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
-static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
+static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
 {
-	int rc = __sk_nulls_del_node_init_rcu(sk);
+	bool rc = __sk_nulls_del_node_init_rcu(sk);
 
 	if (rc) {
 		/* paranoid for a while -acme */
@@ -532,40 +543,40 @@
 	return rc;
 }
 
-static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
+static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
 {
 	hlist_add_head(&sk->sk_node, list);
 }
 
-static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
+static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
 {
 	sock_hold(sk);
 	__sk_add_node(sk, list);
 }
 
-static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 {
 	sock_hold(sk);
 	hlist_add_head_rcu(&sk->sk_node, list);
 }
 
-static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
 	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
-static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
 	sock_hold(sk);
 	__sk_nulls_add_node_rcu(sk, list);
 }
 
-static __inline__ void __sk_del_bind_node(struct sock *sk)
+static inline void __sk_del_bind_node(struct sock *sk)
 {
 	__hlist_del(&sk->sk_bind_node);
 }
 
-static __inline__ void sk_add_bind_node(struct sock *sk,
+static inline void sk_add_bind_node(struct sock *sk,
 					struct hlist_head *list)
 {
 	hlist_add_head(&sk->sk_bind_node, list);
@@ -639,7 +650,7 @@
 	__clear_bit(flag, &sk->sk_flags);
 }
 
-static inline int sock_flag(struct sock *sk, enum sock_flags flag)
+static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
 {
 	return test_bit(flag, &sk->sk_flags);
 }
@@ -654,7 +665,7 @@
 	sk->sk_ack_backlog++;
 }
 
-static inline int sk_acceptq_is_full(struct sock *sk)
+static inline bool sk_acceptq_is_full(const struct sock *sk)
 {
 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
 }
@@ -662,19 +673,19 @@
 /*
  * Compute minimal free write space needed to queue new packets.
  */
-static inline int sk_stream_min_wspace(struct sock *sk)
+static inline int sk_stream_min_wspace(const struct sock *sk)
 {
 	return sk->sk_wmem_queued >> 1;
 }
 
-static inline int sk_stream_wspace(struct sock *sk)
+static inline int sk_stream_wspace(const struct sock *sk)
 {
 	return sk->sk_sndbuf - sk->sk_wmem_queued;
 }
 
 extern void sk_stream_write_space(struct sock *sk);
 
-static inline int sk_stream_memory_free(struct sock *sk)
+static inline bool sk_stream_memory_free(const struct sock *sk)
 {
 	return sk->sk_wmem_queued < sk->sk_sndbuf;
 }
@@ -699,17 +710,19 @@
  * Do not take into account this skb truesize,
  * to allow even a single big packet to come.
  */
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
+				     unsigned int limit)
 {
 	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-	return qsize > sk->sk_rcvbuf;
+	return qsize > limit;
 }
 
 /* The per-socket spinlock must be held here. */
-static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
+					      unsigned int limit)
 {
-	if (sk_rcvqueues_full(sk, skb))
+	if (sk_rcvqueues_full(sk, skb, limit))
 		return -ENOBUFS;
 
 	__sk_add_backlog(sk, skb);
@@ -796,26 +809,26 @@
  * transport -> network interface is defined by struct inet_proto
  */
 struct proto {
-	void			(*close)(struct sock *sk, 
+	void			(*close)(struct sock *sk,
 					long timeout);
 	int			(*connect)(struct sock *sk,
-				        struct sockaddr *uaddr, 
+					struct sockaddr *uaddr,
 					int addr_len);
 	int			(*disconnect)(struct sock *sk, int flags);
 
-	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
+	struct sock *		(*accept)(struct sock *sk, int flags, int *err);
 
 	int			(*ioctl)(struct sock *sk, int cmd,
 					 unsigned long arg);
 	int			(*init)(struct sock *sk);
 	void			(*destroy)(struct sock *sk);
 	void			(*shutdown)(struct sock *sk, int how);
-	int			(*setsockopt)(struct sock *sk, int level, 
+	int			(*setsockopt)(struct sock *sk, int level,
 					int optname, char __user *optval,
 					unsigned int optlen);
-	int			(*getsockopt)(struct sock *sk, int level, 
-					int optname, char __user *optval, 
-					int __user *option);  	 
+	int			(*getsockopt)(struct sock *sk, int level,
+					int optname, char __user *optval,
+					int __user *option);
 #ifdef CONFIG_COMPAT
 	int			(*compat_setsockopt)(struct sock *sk,
 					int level,
@@ -832,14 +845,14 @@
 					   struct msghdr *msg, size_t len);
 	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
 					   struct msghdr *msg,
-					size_t len, int noblock, int flags, 
-					int *addr_len);
+					   size_t len, int noblock, int flags,
+					   int *addr_len);
 	int			(*sendpage)(struct sock *sk, struct page *page,
 					int offset, size_t size, int flags);
-	int			(*bind)(struct sock *sk, 
+	int			(*bind)(struct sock *sk,
 					struct sockaddr *uaddr, int addr_len);
 
-	int			(*backlog_rcv) (struct sock *sk, 
+	int			(*backlog_rcv) (struct sock *sk,
 						struct sk_buff *skb);
 
 	/* Keeping track of sk's, looking them up, and port selection methods. */
@@ -1129,9 +1142,9 @@
 	struct proto *prot = sk->sk_prot;
 
 	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-		return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
+		return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
 
-	return percpu_counter_sum_positive(prot->sockets_allocated);
+	return percpu_counter_read_positive(prot->sockets_allocated);
 }
 
 static inline int
@@ -1160,7 +1173,7 @@
 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
 #else
-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
 		int inc)
 {
 }
@@ -1247,24 +1260,24 @@
 	return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
 }
 
-static inline int sk_has_account(struct sock *sk)
+static inline bool sk_has_account(struct sock *sk)
 {
 	/* return true if protocol supports memory accounting */
 	return !!sk->sk_prot->memory_allocated;
 }
 
-static inline int sk_wmem_schedule(struct sock *sk, int size)
+static inline bool sk_wmem_schedule(struct sock *sk, int size)
 {
 	if (!sk_has_account(sk))
-		return 1;
+		return true;
 	return size <= sk->sk_forward_alloc ||
 		__sk_mem_schedule(sk, size, SK_MEM_SEND);
 }
 
-static inline int sk_rmem_schedule(struct sock *sk, int size)
+static inline bool sk_rmem_schedule(struct sock *sk, int size)
 {
 	if (!sk_has_account(sk))
-		return 1;
+		return true;
 	return size <= sk->sk_forward_alloc ||
 		__sk_mem_schedule(sk, size, SK_MEM_RECV);
 }
@@ -1329,7 +1342,7 @@
  * Mark both the sk_lock and the sk_lock.slock as a
  * per-address-family lock class.
  */
-#define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key)	\
 do {									\
 	sk->sk_lock.owned = 0;						\
 	init_waitqueue_head(&sk->sk_lock.wq);				\
@@ -1337,7 +1350,7 @@
 	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
 			sizeof((sk)->sk_lock));				\
 	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
-		       	(skey), (sname));				\
+				(skey), (sname));				\
 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
 } while (0)
 
@@ -1397,13 +1410,13 @@
 						unsigned int optlen);
 
 extern int			sock_getsockopt(struct socket *sock, int level,
-						int op, char __user *optval, 
+						int op, char __user *optval,
 						int __user *optlen);
-extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
+extern struct sk_buff		*sock_alloc_send_skb(struct sock *sk,
 						     unsigned long size,
 						     int noblock,
 						     int *errcode);
-extern struct sk_buff 		*sock_alloc_send_pskb(struct sock *sk,
+extern struct sk_buff		*sock_alloc_send_pskb(struct sock *sk,
 						      unsigned long header_len,
 						      unsigned long data_len,
 						      int noblock,
@@ -1425,7 +1438,7 @@
  * Functions to fill in entries in struct proto_ops when a protocol
  * does not implement a particular function.
  */
-extern int                      sock_no_bind(struct socket *, 
+extern int                      sock_no_bind(struct socket *,
 					     struct sockaddr *, int);
 extern int                      sock_no_connect(struct socket *,
 						struct sockaddr *, int, int);
@@ -1454,7 +1467,7 @@
 					     struct vm_area_struct *vma);
 extern ssize_t			sock_no_sendpage(struct socket *sock,
 						struct page *page,
-						int offset, size_t size, 
+						int offset, size_t size,
 						int flags);
 
 /*
@@ -1477,7 +1490,7 @@
 /*
  *	Default socket callbacks and setup code
  */
- 
+
 /* Initialise core socket variables */
 extern void sock_init_data(struct socket *sock, struct sock *sk);
 
@@ -1677,7 +1690,7 @@
 
 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
 
-static inline int sk_can_gso(const struct sock *sk)
+static inline bool sk_can_gso(const struct sock *sk)
 {
 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
 }
@@ -1794,7 +1807,7 @@
  *
  * Returns true if socket has write or read allocations
  */
-static inline int sk_has_allocations(const struct sock *sk)
+static inline bool sk_has_allocations(const struct sock *sk)
 {
 	return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
 }
@@ -1833,9 +1846,7 @@
  */
 static inline bool wq_has_sleeper(struct socket_wq *wq)
 {
-
-	/*
-	 * We need to be sure we are in sync with the
+	/* We need to be sure we are in sync with the
 	 * add_wait_queue modifications to the wait queue.
 	 *
 	 * This memory barrier is paired in the sock_poll_wait.
@@ -1857,22 +1868,21 @@
 {
 	if (!poll_does_not_wait(p) && wait_address) {
 		poll_wait(filp, wait_address, p);
-		/*
-		 * We need to be sure we are in sync with the
+		/* We need to be sure we are in sync with the
 		 * socket flags modification.
 		 *
 		 * This memory barrier is paired in the wq_has_sleeper.
-		*/
+		 */
 		smp_mb();
 	}
 }
 
 /*
- * 	Queue a received datagram if it will fit. Stream and sequenced
+ *	Queue a received datagram if it will fit. Stream and sequenced
  *	protocols can't normally use this as they need to fit buffers in
  *	and play with them.
  *
- * 	Inlined as it's very short and called for pretty much every
+ *	Inlined as it's very short and called for pretty much every
  *	packet ever received.
  */
 
@@ -1898,10 +1908,10 @@
 	sk_mem_charge(sk, skb->truesize);
 }
 
-extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
+extern void sk_reset_timer(struct sock *sk, struct timer_list *timer,
 			   unsigned long expires);
 
-extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
+extern void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
@@ -1910,7 +1920,7 @@
 /*
  *	Recover an error report and clear atomically
  */
- 
+
 static inline int sock_error(struct sock *sk)
 {
 	int err;
@@ -1926,7 +1936,7 @@
 
 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
 		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
-		if (amt < 0) 
+		if (amt < 0)
 			amt = 0;
 	}
 	return amt;
@@ -1970,7 +1980,7 @@
 /*
  *	Default write policy as shown to user space via poll/select/SIGIO
  */
-static inline int sock_writeable(const struct sock *sk) 
+static inline bool sock_writeable(const struct sock *sk)
 {
 	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
 }
@@ -1980,12 +1990,12 @@
 	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
 }
 
-static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
+static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
 {
 	return noblock ? 0 : sk->sk_rcvtimeo;
 }
 
-static inline long sock_sndtimeo(const struct sock *sk, int noblock)
+static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
 {
 	return noblock ? 0 : sk->sk_sndtimeo;
 }
@@ -2008,7 +2018,7 @@
 extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
 	struct sk_buff *skb);
 
-static __inline__ void
+static inline void
 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
 {
 	ktime_t kt = skb->tstamp;
@@ -2049,7 +2059,7 @@
 			   (1UL << SOCK_RCVTSTAMP)			| \
 			   (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)	| \
 			   (1UL << SOCK_TIMESTAMPING_SOFTWARE)		| \
-			   (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) 	| \
+			   (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE)	| \
 			   (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
 
 	if (sk->sk_flags & FLAGS_TS_OR_DROPS)
@@ -2078,7 +2088,7 @@
  * locked so that the sk_buff queue operation is ok.
 */
 #ifdef CONFIG_NET_DMA
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
 {
 	__skb_unlink(skb, &sk->sk_receive_queue);
 	if (!copied_early)
@@ -2087,7 +2097,7 @@
 		__skb_queue_tail(&sk->sk_async_wait_queue, skb);
 }
 #else
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
 {
 	__skb_unlink(skb, &sk->sk_receive_queue);
 	__kfree_skb(skb);
@@ -2134,8 +2144,8 @@
 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
 
-/* 
- *	Enable debug/info messages 
+/*
+ *	Enable debug/info messages
  */
 extern int net_msg_warn;
 #define NETDEBUG(fmt, args...) \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f75a04d..e79aa48 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -123,7 +123,7 @@
 #endif
 #define TCP_RTO_MAX	((unsigned)(120*HZ))
 #define TCP_RTO_MIN	((unsigned)(HZ/5))
-#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC2988bis initial RTO value	*/
+#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
 						 * used as a fallback RTO for the
 						 * initial data transmission if no
@@ -252,6 +252,7 @@
 extern int sysctl_tcp_cookie_size;
 extern int sysctl_tcp_thin_linear_timeouts;
 extern int sysctl_tcp_thin_dupack;
+extern int sysctl_tcp_early_retrans;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
@@ -262,14 +263,14 @@
  * and worry about wraparound (automatic with unsigned arithmetic).
  */
 
-static inline int before(__u32 seq1, __u32 seq2)
+static inline bool before(__u32 seq1, __u32 seq2)
 {
         return (__s32)(seq1-seq2) < 0;
 }
 #define after(seq2, seq1) 	before(seq1, seq2)
 
 /* is s2<=s1<=s3 ? */
-static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
+static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 {
 	return seq3 - seq2 >= seq1 - seq2;
 }
@@ -304,7 +305,7 @@
 }
 
 /* syncookies: no recent synqueue overflow on this listening socket? */
-static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
+static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 {
 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 	return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
@@ -366,13 +367,6 @@
 #define	TCP_ECN_DEMAND_CWR	4
 #define	TCP_ECN_SEEN		8
 
-static __inline__ void
-TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
-{
-	if (sysctl_tcp_ecn && th->ece && th->cwr)
-		inet_rsk(req)->ecn_ok = 1;
-}
-
 enum tcp_tw_status {
 	TCP_TW_SUCCESS = 0,
 	TCP_TW_RST = 1,
@@ -389,12 +383,13 @@
 				   struct request_sock **prev);
 extern int tcp_child_process(struct sock *parent, struct sock *child,
 			     struct sk_buff *skb);
-extern int tcp_use_frto(struct sock *sk);
+extern bool tcp_use_frto(struct sock *sk);
 extern void tcp_enter_frto(struct sock *sk);
 extern void tcp_enter_loss(struct sock *sk, int how);
 extern void tcp_clear_retrans(struct tcp_sock *tp);
 extern void tcp_update_metrics(struct sock *sk);
 extern void tcp_close(struct sock *sk, long timeout);
+extern void tcp_init_sock(struct sock *sk);
 extern unsigned int tcp_poll(struct file * file, struct socket *sock,
 			     struct poll_table_struct *wait);
 extern int tcp_getsockopt(struct sock *sk, int level, int optname,
@@ -435,6 +430,9 @@
 					struct request_values *rvp);
 extern int tcp_disconnect(struct sock *sk, int flags);
 
+void tcp_connect_init(struct sock *sk);
+void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
+int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 
 /* From syncookies.c */
 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
@@ -472,7 +470,7 @@
 
 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 				      int nonagle);
-extern int tcp_may_send_now(struct sock *sk);
+extern bool tcp_may_send_now(struct sock *sk);
 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
 extern void tcp_retransmit_timer(struct sock *sk);
 extern void tcp_xmit_retransmit_queue(struct sock *);
@@ -486,15 +484,17 @@
 extern void tcp_send_fin(struct sock *sk);
 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 extern int tcp_send_synack(struct sock *);
-extern int tcp_syn_flood_action(struct sock *sk,
-				const struct sk_buff *skb,
-				const char *proto);
+extern bool tcp_syn_flood_action(struct sock *sk,
+				 const struct sk_buff *skb,
+				 const char *proto);
 extern void tcp_push_one(struct sock *, unsigned int mss_now);
 extern void tcp_send_ack(struct sock *sk);
 extern void tcp_send_delayed_ack(struct sock *sk);
 
 /* tcp_input.c */
 extern void tcp_cwnd_application_limited(struct sock *sk);
+extern void tcp_resume_early_retransmit(struct sock *sk);
+extern void tcp_rearm_rto(struct sock *sk);
 
 /* tcp_timer.c */
 extern void tcp_init_xmit_timers(struct sock *);
@@ -540,8 +540,8 @@
 
 extern void tcp_initialize_rcv_mss(struct sock *sk);
 
-extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(const struct sock *sk, int mss);
+extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
+extern int tcp_mss_to_mtu(struct sock *sk, int mss);
 extern void tcp_mtup_init(struct sock *sk);
 extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
 
@@ -609,6 +609,8 @@
  */
 extern u32 __tcp_select_window(struct sock *sk);
 
+void tcp_send_window_probe(struct sock *sk);
+
 /* TCP timestamps are only 32-bits, this causes a slight
  * complication on 64-bit systems since we store a snapshot
  * of jiffies in the buffer control blocks below.  We decided
@@ -645,21 +647,38 @@
 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
 	__u32		when;		/* used to compute rtt's	*/
 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
+
 	__u8		sacked;		/* State flags for SACK/FACK.	*/
 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
 #define TCPCB_LOST		0x04	/* SKB is lost			*/
 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
-	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
-	/* 1 byte hole */
 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
 
+	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
+	/* 1 byte hole */
 	__u32		ack_seq;	/* Sequence number ACK'd	*/
 };
 
 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
 
+/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
+ *
+ * If we receive a SYN packet with these bits set, it means a network is
+ * playing bad games with TOS bits. In order to avoid possible false congestion
+ * notifications, we disable TCP ECN negociation.
+ */
+static inline void
+TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb)
+{
+	const struct tcphdr *th = tcp_hdr(skb);
+
+	if (sysctl_tcp_ecn && th->ece && th->cwr &&
+	    INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
+		inet_rsk(req)->ecn_ok = 1;
+}
+
 /* Due to TSO, an SKB can be composed of multiple actual
  * packets.  To keep these tracked properly, we use this.
  */
@@ -775,12 +794,12 @@
 	return tp->rx_opt.sack_ok;
 }
 
-static inline int tcp_is_reno(const struct tcp_sock *tp)
+static inline bool tcp_is_reno(const struct tcp_sock *tp)
 {
 	return !tcp_is_sack(tp);
 }
 
-static inline int tcp_is_fack(const struct tcp_sock *tp)
+static inline bool tcp_is_fack(const struct tcp_sock *tp)
 {
 	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
 }
@@ -790,6 +809,21 @@
 	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 }
 
+/* TCP early-retransmit (ER) is similar to but more conservative than
+ * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
+ */
+static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
+{
+	tp->do_early_retrans = sysctl_tcp_early_retrans &&
+		!sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3;
+	tp->early_retrans_delayed = 0;
+}
+
+static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
+{
+	tp->do_early_retrans = 0;
+}
+
 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
 {
 	return tp->sacked_out + tp->lost_out;
@@ -867,7 +901,7 @@
 {
 	return tp->snd_una + tp->snd_wnd;
 }
-extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
+extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
 
 static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
 				       const struct sk_buff *skb)
@@ -910,7 +944,7 @@
 	return __skb_checksum_complete(skb);
 }
 
-static inline int tcp_checksum_complete(struct sk_buff *skb)
+static inline bool tcp_checksum_complete(struct sk_buff *skb)
 {
 	return !skb_csum_unnecessary(skb) &&
 		__tcp_checksum_complete(skb);
@@ -940,12 +974,12 @@
  *
  * NOTE: is this not too big to inline?
  */
-static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
-		return 0;
+		return false;
 
 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
 	tp->ucopy.memory += skb->truesize;
@@ -969,7 +1003,7 @@
 						  (3 * tcp_rto_min(sk)) / 4,
 						  TCP_RTO_MAX);
 	}
-	return 1;
+	return true;
 }
 
 
@@ -1074,28 +1108,28 @@
 	return fin_timeout;
 }
 
-static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
-				 int paws_win)
+static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
+				  int paws_win)
 {
 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
-		return 1;
+		return true;
 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
-		return 1;
+		return true;
 	/*
 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
 	 * then following tcp messages have valid values. Ignore 0 value,
 	 * or else 'negative' tsval might forbid us to accept their packets.
 	 */
 	if (!rx_opt->ts_recent)
-		return 1;
-	return 0;
+		return true;
+	return false;
 }
 
-static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
-				  int rst)
+static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
+				   int rst)
 {
 	if (tcp_paws_check(rx_opt, 0))
-		return 0;
+		return false;
 
 	/* RST segments are not recommended to carry timestamp,
 	   and, if they do, it is recommended to ignore PAWS because
@@ -1110,8 +1144,8 @@
 	   However, we can relax time bounds for RST segments to MSL.
 	 */
 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 static inline void tcp_mib_init(struct net *net)
@@ -1226,7 +1260,7 @@
 
 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
 extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
-				 unsigned header_len);
+				 unsigned int header_len);
 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
 			    const struct tcp_md5sig_key *key);
 
@@ -1349,7 +1383,7 @@
 	__skb_unlink(skb, &sk->sk_write_queue);
 }
 
-static inline int tcp_write_queue_empty(struct sock *sk)
+static inline bool tcp_write_queue_empty(struct sock *sk)
 {
 	return skb_queue_empty(&sk->sk_write_queue);
 }
@@ -1406,7 +1440,7 @@
 /* Determines whether this is a thin stream (which may suffer from
  * increased latency). Used to trigger latency-reducing mechanisms.
  */
-static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
+static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
 {
 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
 }
diff --git a/include/net/udp.h b/include/net/udp.h
index 5d606d9..065f379 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -81,7 +81,7 @@
 extern struct udp_table udp_table;
 extern void udp_table_init(struct udp_table *, const char *);
 static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
-					     struct net *net, unsigned num)
+					     struct net *net, unsigned int num)
 {
 	return &table->hash[udp_hashfn(net, num, table->mask)];
 }
@@ -267,4 +267,8 @@
 extern int udp4_ufo_send_check(struct sk_buff *skb);
 extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
 	netdev_features_t features);
+extern void udp_encap_enable(void);
+#if IS_ENABLED(CONFIG_IPV6)
+extern void udpv6_encap_enable(void);
+#endif
 #endif	/* _UDP_H */
diff --git a/include/net/wimax.h b/include/net/wimax.h
index 322ff4f..bbb74f9 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -423,8 +423,8 @@
 	int (*op_reset)(struct wimax_dev *wimax_dev);
 
 	struct rfkill *rfkill;
-	unsigned rf_hw;
-	unsigned rf_sw;
+	unsigned int rf_hw;
+	unsigned int rf_sw;
 	char name[32];
 
 	struct dentry *debugfs_dentry;
diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h
index ff27f1b..b52bda8 100644
--- a/include/net/wpan-phy.h
+++ b/include/net/wpan-phy.h
@@ -25,6 +25,14 @@
 #include <linux/mutex.h>
 #include <linux/bug.h>
 
+/* According to the IEEE 802.15.4 stadard the upper most significant bits of
+ * the 32-bit channel bitmaps shall be used as an integer value to specify 32
+ * possible channel pages. The lower 27 bits of the channel bit map shall be
+ * used as a bit mask to specify channel numbers within a channel page.
+ */
+#define WPAN_NUM_CHANNELS	27
+#define WPAN_NUM_PAGES		32
+
 struct wpan_phy {
 	struct mutex pib_lock;
 
@@ -43,7 +51,7 @@
 	int idx;
 
 	struct net_device *(*add_iface)(struct wpan_phy *phy,
-			const char *name);
+					const char *name, int type);
 	void (*del_iface)(struct wpan_phy *phy, struct net_device *dev);
 
 	char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
diff --git a/include/net/x25.h b/include/net/x25.h
index a06119a..b4a8a89 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -305,7 +305,7 @@
 #endif /* CONFIG_SYSCTL */
 
 struct x25_skb_cb {
-	unsigned flags;
+	unsigned int flags;
 };
 #define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb))
 
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 96239e7..e0a55df 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -886,15 +886,15 @@
 	return port;
 }
 
-extern int xfrm_selector_match(const struct xfrm_selector *sel,
-			       const struct flowi *fl,
-			       unsigned short family);
+extern bool xfrm_selector_match(const struct xfrm_selector *sel,
+				const struct flowi *fl,
+				unsigned short family);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 /*	If neither has a context --> match
  * 	Otherwise, both must have a context and the sids, doi, alg must match
  */
-static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
+static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
 {
 	return ((!s1 && !s2) ||
 		(s1 && s2 &&
@@ -903,9 +903,9 @@
 		 (s1->ctx_alg == s2->ctx_alg)));
 }
 #else
-static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
+static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
 {
-	return 1;
+	return true;
 }
 #endif
 
@@ -1682,8 +1682,9 @@
 
 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
 {
-	if (m->m | m->v)
-		NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
+	if ((m->m | m->v) &&
+	    nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index b513f57..3d81b90 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -160,7 +160,7 @@
 
 typedef u64 __bitwise ib_sa_comp_mask;
 
-#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
+#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << (n)))
 
 /*
  * ib_sa_hdr and ib_sa_mad structures must be packed because they have
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index c3cca5a..07996af 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -605,7 +605,7 @@
 	IB_QPT_UD,
 	IB_QPT_RAW_IPV6,
 	IB_QPT_RAW_ETHERTYPE,
-	/* Save 8 for RAW_PACKET */
+	IB_QPT_RAW_PACKET = 8,
 	IB_QPT_XRC_INI = 9,
 	IB_QPT_XRC_TGT,
 	IB_QPT_MAX
@@ -964,7 +964,7 @@
 	struct ib_srq	       *srq;
 	struct ib_xrcd	       *xrcd; /* XRC TGT QPs only */
 	struct list_head	xrcd_list;
-	atomic_t		usecnt; /* count times opened */
+	atomic_t		usecnt; /* count times opened, mcast attaches */
 	struct list_head	open_list;
 	struct ib_qp           *real_qp;
 	struct ib_uobject      *uobject;
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index 988ba06..c1260d8 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -661,6 +661,8 @@
 
 #define ISCSI_DEF_TIME2WAIT			2
 
+#define ISCSI_NAME_LEN				224
+
 /************************* RFC 3720 End *****************************/
 
 #endif /* ISCSI_PROTO_H */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 5f5ed1b..f4f1c96 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -217,11 +217,29 @@
 	struct kref kref;
 };
 
-struct sas_discovery_event {
+struct sas_work {
+	struct list_head drain_node;
 	struct work_struct work;
+};
+
+static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *))
+{
+	INIT_WORK(&sw->work, fn);
+	INIT_LIST_HEAD(&sw->drain_node);
+}
+
+struct sas_discovery_event {
+	struct sas_work work;
 	struct asd_sas_port *port;
 };
 
+static inline struct sas_discovery_event *to_sas_discovery_event(struct work_struct *work)
+{
+	struct sas_discovery_event *ev = container_of(work, typeof(*ev), work.work);
+
+	return ev;
+}
+
 struct sas_discovery {
 	struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
 	unsigned long    pending;
@@ -244,7 +262,7 @@
 	struct list_head destroy_list;
 	enum   sas_linkrate linkrate;
 
-	struct work_struct work;
+	struct sas_work work;
 
 /* public: */
 	int id;
@@ -270,10 +288,17 @@
 };
 
 struct asd_sas_event {
-	struct work_struct work;
+	struct sas_work work;
 	struct asd_sas_phy *phy;
 };
 
+static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
+{
+	struct asd_sas_event *ev = container_of(work, typeof(*ev), work.work);
+
+	return ev;
+}
+
 /* The phy pretty much is controlled by the LLDD.
  * The class only reads those fields.
  */
@@ -333,10 +358,17 @@
 };
 
 struct sas_ha_event {
-	struct work_struct work;
+	struct sas_work work;
 	struct sas_ha_struct *ha;
 };
 
+static inline struct sas_ha_event *to_sas_ha_event(struct work_struct *work)
+{
+	struct sas_ha_event *ev = container_of(work, typeof(*ev), work.work);
+
+	return ev;
+}
+
 enum sas_ha_state {
 	SAS_HA_REGISTERED,
 	SAS_HA_DRAINING,
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index a577a83..be3eb0b 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -103,6 +103,7 @@
 };
 
 enum sas_protocol {
+	SAS_PROTOCOL_NONE		= 0,
 	SAS_PROTOCOL_SATA		= 0x01,
 	SAS_PROTOCOL_SMP		= 0x02,
 	SAS_PROTOCOL_STP		= 0x04,
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index cdccd2e..77670e8 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -37,7 +37,7 @@
 }
 
 int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
-int sas_ata_init_host_and_port(struct domain_device *found_dev);
+int sas_ata_init(struct domain_device *dev);
 void sas_ata_task_abort(struct sas_task *task);
 void sas_ata_strategy_handler(struct Scsi_Host *shost);
 void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
@@ -52,7 +52,7 @@
 {
 	return 0;
 }
-static inline int sas_ata_init_host_and_port(struct domain_device *found_dev)
+static inline int sas_ata_init(struct domain_device *dev)
 {
 	return 0;
 }
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 8c9ff1b..2d7db85 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -23,12 +23,11 @@
 	struct se_device *(*create_virtdevice)(struct se_hba *,
 				struct se_subsystem_dev *, void *);
 	void (*free_device)(void *);
-	int (*transport_complete)(struct se_task *task);
-	struct se_task *(*alloc_task)(unsigned char *cdb);
-	int (*do_task)(struct se_task *);
+	int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *);
+	int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32,
+			enum dma_data_direction);
 	int (*do_discard)(struct se_device *, sector_t, u32);
-	void (*do_sync_cache)(struct se_task *);
-	void (*free_task)(struct se_task *);
+	void (*do_sync_cache)(struct se_cmd *);
 	ssize_t (*check_configfs_dev_params)(struct se_hba *,
 			struct se_subsystem_dev *);
 	ssize_t (*set_configfs_dev_params)(struct se_hba *,
@@ -38,7 +37,7 @@
 	u32 (*get_device_rev)(struct se_device *);
 	u32 (*get_device_type)(struct se_device *);
 	sector_t (*get_blocks)(struct se_device *);
-	unsigned char *(*get_sense_buffer)(struct se_task *);
+	unsigned char *(*get_sense_buffer)(struct se_cmd *);
 };
 
 int	transport_subsystem_register(struct se_subsystem_api *);
@@ -48,10 +47,7 @@
 		struct se_subsystem_api *, struct se_subsystem_dev *, u32,
 		void *, struct se_dev_limits *, const char *, const char *);
 
-void	transport_complete_sync_cache(struct se_cmd *, int);
-void	transport_complete_task(struct se_task *, int);
-
-void	target_get_task_cdb(struct se_task *, unsigned char *);
+void	target_complete_cmd(struct se_cmd *, u8);
 
 void	transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
 int	transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index aaccc5f..dc35d86 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -73,9 +73,8 @@
 /*
  * struct se_device->dev_flags
  */
-#define DF_READ_ONLY				0x00000001
-#define DF_SPC2_RESERVATIONS			0x00000002
-#define DF_SPC2_RESERVATIONS_WITH_ISID		0x00000004
+#define DF_SPC2_RESERVATIONS			0x00000001
+#define DF_SPC2_RESERVATIONS_WITH_ISID		0x00000002
 
 /* struct se_dev_attrib sanity values */
 /* Default max_unmap_lba_count */
@@ -141,14 +140,6 @@
 	TRANSPORT_TPG_TYPE_DISCOVERY = 1,
 };
 
-/* struct se_task->task_flags */
-enum se_task_flags {
-	TF_ACTIVE		= (1 << 0),
-	TF_SENT			= (1 << 1),
-	TF_REQUEST_STOP		= (1 << 2),
-	TF_HAS_SENSE		= (1 << 3),
-};
-
 /* Special transport agnostic struct se_cmd->t_states */
 enum transport_state_table {
 	TRANSPORT_NO_STATE	= 0,
@@ -234,6 +225,7 @@
 enum target_sc_flags_table {
 	TARGET_SCF_BIDI_OP		= 0x01,
 	TARGET_SCF_ACK_KREF		= 0x02,
+	TARGET_SCF_UNKNOWN_SIZE		= 0x04,
 };
 
 /* fabric independent task management function values */
@@ -338,6 +330,7 @@
 	int	tg_pt_gp_alua_access_type;
 	int	tg_pt_gp_nonop_delay_msecs;
 	int	tg_pt_gp_trans_delay_msecs;
+	int	tg_pt_gp_implict_trans_secs;
 	int	tg_pt_gp_pref;
 	int	tg_pt_gp_write_metadata;
 	/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
@@ -485,23 +478,6 @@
 	wait_queue_head_t	thread_wq;
 };
 
-struct se_task {
-	unsigned long long	task_lba;
-	u32			task_sectors;
-	u32			task_size;
-	struct se_cmd		*task_se_cmd;
-	struct scatterlist	*task_sg;
-	u32			task_sg_nents;
-	u16			task_flags;
-	u8			task_scsi_status;
-	enum dma_data_direction	task_data_direction;
-	struct list_head	t_list;
-	struct list_head	t_execute_list;
-	struct list_head	t_state_list;
-	bool			t_state_active;
-	struct completion	task_stop_comp;
-};
-
 struct se_tmr_req {
 	/* Task Management function to be performed */
 	u8			function;
@@ -538,6 +514,7 @@
 	/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
 	unsigned		check_release:1;
 	unsigned		cmd_wait_set:1;
+	unsigned		unknown_data_length:1;
 	/* See se_cmd_flags_table */
 	u32			se_cmd_flags;
 	u32			se_ordered_id;
@@ -565,18 +542,13 @@
 	struct completion	cmd_wait_comp;
 	struct kref		cmd_kref;
 	struct target_core_fabric_ops *se_tfo;
-	int (*execute_task)(struct se_task *);
+	int (*execute_cmd)(struct se_cmd *);
 	void (*transport_complete_callback)(struct se_cmd *);
 
 	unsigned char		*t_task_cdb;
 	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
 	unsigned long long	t_task_lba;
-	u32			t_tasks_sg_chained_no;
 	atomic_t		t_fe_count;
-	atomic_t		t_se_count;
-	atomic_t		t_task_cdbs_left;
-	atomic_t		t_task_cdbs_ex_left;
-	atomic_t		t_task_cdbs_sent;
 	unsigned int		transport_state;
 #define CMD_T_ABORTED		(1 << 0)
 #define CMD_T_ACTIVE		(1 << 1)
@@ -588,11 +560,12 @@
 #define CMD_T_LUN_STOP		(1 << 7)
 #define CMD_T_LUN_FE_STOP	(1 << 8)
 #define CMD_T_DEV_ACTIVE	(1 << 9)
+#define CMD_T_REQUEST_STOP	(1 << 10)
+#define CMD_T_BUSY		(1 << 11)
 	spinlock_t		t_state_lock;
 	struct completion	t_transport_stop_comp;
 	struct completion	transport_lun_fe_stop_comp;
 	struct completion	transport_lun_stop_comp;
-	struct scatterlist	*t_tasks_sg_chained;
 
 	struct work_struct	work;
 
@@ -602,10 +575,15 @@
 	struct scatterlist	*t_bidi_data_sg;
 	unsigned int		t_bidi_data_nents;
 
-	/* Used for BIDI READ */
-	struct list_head	t_task_list;
-	u32			t_task_list_num;
+	struct list_head	execute_list;
+	struct list_head	state_list;
+	bool			state_active;
 
+	/* old task stop completion, consider merging with some of the above */
+	struct completion	task_stop_comp;
+
+	/* backend private data */
+	void			*priv;
 };
 
 struct se_ua {
@@ -731,7 +709,6 @@
 	u32		hw_block_size;
 	u32		block_size;
 	u32		hw_max_sectors;
-	u32		max_sectors;
 	u32		fabric_max_sectors;
 	u32		optimal_sectors;
 	u32		hw_queue_depth;
@@ -829,8 +806,8 @@
 	struct task_struct	*process_thread;
 	struct work_struct	qf_work_queue;
 	struct list_head	delayed_cmd_list;
-	struct list_head	execute_task_list;
-	struct list_head	state_task_list;
+	struct list_head	execute_list;
+	struct list_head	state_list;
 	struct list_head	qf_cmd_list;
 	/* Pointer to associated SE HBA */
 	struct se_hba		*se_hba;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 10c6908..1169599 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -3,12 +3,6 @@
 
 struct target_core_fabric_ops {
 	struct configfs_subsystem *tf_subsys;
-	/*
-	 * Optional to signal struct se_task->task_sg[] padding entries
-	 * for scatterlist chaining using transport_do_task_sg_link(),
-	 * disabled by default
-	 */
-	bool task_sg_chaining;
 	char *(*get_fabric_name)(void);
 	u8 (*get_fabric_proto_ident)(struct se_portal_group *);
 	char *(*tpg_get_wwn)(struct se_portal_group *);
@@ -102,7 +96,7 @@
 void	transport_register_session(struct se_portal_group *,
 		struct se_node_acl *, struct se_session *, void *);
 void	target_get_session(struct se_session *);
-int	target_put_session(struct se_session *);
+void	target_put_session(struct se_session *);
 void	transport_free_session(struct se_session *);
 void	target_put_nacl(struct se_node_acl *);
 void	transport_deregister_session_configfs(struct se_session *);
@@ -112,7 +106,7 @@
 void	transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
 		struct se_session *, u32, int, int, unsigned char *);
 int	transport_lookup_cmd_lun(struct se_cmd *, u32);
-int	transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
+int	target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
 void	target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
 		unsigned char *, u32, u32, int, int, int);
 int	target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
@@ -124,7 +118,6 @@
 int	transport_generic_handle_data(struct se_cmd *);
 int	transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
 		struct scatterlist *, u32, struct scatterlist *, u32);
-void	transport_do_task_sg_chain(struct se_cmd *);
 int	transport_generic_new_cmd(struct se_cmd *);
 
 void	transport_generic_process_write(struct se_cmd *);
diff --git a/include/video/vga.h b/include/video/vga.h
index 2b8691f..cac567f 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -19,29 +19,7 @@
 
 #include <linux/types.h>
 #include <asm/io.h>
-#ifndef CONFIG_AMIGA
 #include <asm/vga.h>
-#else
-/*
- * FIXME
- * Ugh, we don't have PCI space, so map readb() and friends to use Zorro space
- * for MMIO accesses. This should make cirrusfb work again on Amiga
- */
-#undef inb_p
-#undef inw_p
-#undef outb_p
-#undef outw
-#undef readb
-#undef writeb
-#undef writew
-#define inb_p(port)	0
-#define inw_p(port)	0
-#define outb_p(port, val)	do { } while (0)
-#define outw(port, val)		do { } while (0)
-#define readb		z_readb
-#define writeb		z_writeb
-#define writew		z_writew
-#endif
 #include <asm/byteorder.h>
 
 
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 0e93f92..42b0707 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -472,7 +472,7 @@
 void __init mount_root(void)
 {
 #ifdef CONFIG_ROOT_NFS
-	if (MAJOR(ROOT_DEV) == UNNAMED_MAJOR) {
+	if (ROOT_DEV == Root_NFS) {
 		if (mount_nfs_root())
 			return;
 
diff --git a/init/main.c b/init/main.c
index 9d454f0..cb54cd3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -225,13 +225,9 @@
 
 early_param("loglevel", loglevel);
 
-/*
- * Unknown boot options get handed to init, unless they look like
- * unused parameters (modprobe will find them in /proc/cmdline).
- */
-static int __init unknown_bootoption(char *param, char *val)
+/* Change NUL term back to "=", to make "param" the whole string. */
+static int __init repair_env_string(char *param, char *val)
 {
-	/* Change NUL term back to "=", to make "param" the whole string. */
 	if (val) {
 		/* param=val or param="val"? */
 		if (val == param+strlen(param)+1)
@@ -243,6 +239,16 @@
 		} else
 			BUG();
 	}
+	return 0;
+}
+
+/*
+ * Unknown boot options get handed to init, unless they look like
+ * unused parameters (modprobe will find them in /proc/cmdline).
+ */
+static int __init unknown_bootoption(char *param, char *val)
+{
+	repair_env_string(param, val);
 
 	/* Handle obsolete-style parameters */
 	if (obsolete_checksetup(param))
@@ -554,9 +560,6 @@
 	early_boot_irqs_disabled = false;
 	local_irq_enable();
 
-	/* Interrupts are enabled now so all GFP allocations are safe. */
-	gfp_allowed_mask = __GFP_BITS_MASK;
-
 	kmem_cache_init_late();
 
 	/*
@@ -732,11 +735,6 @@
 	"late parameters",
 };
 
-static int __init ignore_unknown_bootoption(char *param, char *val)
-{
-	return 0;
-}
-
 static void __init do_initcall_level(int level)
 {
 	extern const struct kernel_param __start___param[], __stop___param[];
@@ -747,7 +745,7 @@
 		   static_command_line, __start___param,
 		   __stop___param - __start___param,
 		   level, level,
-		   ignore_unknown_bootoption);
+		   repair_env_string);
 
 	for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
 		do_one_initcall(*fn);
@@ -841,6 +839,10 @@
 	 * Wait until kthreadd is all set-up.
 	 */
 	wait_for_completion(&kthreadd_done);
+
+	/* Now the scheduler is fully set up and can do blocking allocations */
+	gfp_allowed_mask = __GFP_BITS_MASK;
+
 	/*
 	 * init can allocate pages on any node
 	 */
diff --git a/kernel/compat.c b/kernel/compat.c
index 74ff849..d2c67aa 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -372,25 +372,54 @@
 
 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
 
-asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
-		compat_old_sigset_t __user *oset)
+/*
+ * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the
+ * blocked set of signals to the supplied signal set
+ */
+static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set)
 {
-	old_sigset_t s;
-	long ret;
-	mm_segment_t old_fs;
+	memcpy(blocked->sig, &set, sizeof(set));
+}
 
-	if (set && get_user(s, set))
-		return -EFAULT;
-	old_fs = get_fs();
-	set_fs(KERNEL_DS);
-	ret = sys_sigprocmask(how,
-			      set ? (old_sigset_t __user *) &s : NULL,
-			      oset ? (old_sigset_t __user *) &s : NULL);
-	set_fs(old_fs);
-	if (ret == 0)
-		if (oset)
-			ret = put_user(s, oset);
-	return ret;
+asmlinkage long compat_sys_sigprocmask(int how,
+				       compat_old_sigset_t __user *nset,
+				       compat_old_sigset_t __user *oset)
+{
+	old_sigset_t old_set, new_set;
+	sigset_t new_blocked;
+
+	old_set = current->blocked.sig[0];
+
+	if (nset) {
+		if (get_user(new_set, nset))
+			return -EFAULT;
+		new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
+
+		new_blocked = current->blocked;
+
+		switch (how) {
+		case SIG_BLOCK:
+			sigaddsetmask(&new_blocked, new_set);
+			break;
+		case SIG_UNBLOCK:
+			sigdelsetmask(&new_blocked, new_set);
+			break;
+		case SIG_SETMASK:
+			compat_sig_setmask(&new_blocked, new_set);
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		set_current_blocked(&new_blocked);
+	}
+
+	if (oset) {
+		if (put_user(old_set, oset))
+			return -EFAULT;
+	}
+
+	return 0;
 }
 
 #endif
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a6a9ec4..fd126f8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3183,7 +3183,7 @@
 	perf_event_for_each_child(event, func);
 	func(event);
 	list_for_each_entry(sibling, &event->sibling_list, group_entry)
-		perf_event_for_each_child(event, func);
+		perf_event_for_each_child(sibling, func);
 	mutex_unlock(&ctx->mutex);
 }
 
diff --git a/kernel/fork.c b/kernel/fork.c
index b9372a0..687a15d5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -47,6 +47,7 @@
 #include <linux/audit.h>
 #include <linux/memcontrol.h>
 #include <linux/ftrace.h>
+#include <linux/proc_fs.h>
 #include <linux/profile.h>
 #include <linux/rmap.h>
 #include <linux/ksm.h>
@@ -1464,6 +1465,8 @@
 	if (p->io_context)
 		exit_io_context(p);
 bad_fork_cleanup_namespaces:
+	if (unlikely(clone_flags & CLONE_NEWPID))
+		pid_ns_release_proc(p->nsproxy->pid_ns);
 	exit_task_namespaces(p);
 bad_fork_cleanup_mm:
 	if (p->mm)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6080f6b..3914c1e 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -518,6 +518,7 @@
 out_unlock:
 	raw_spin_unlock(&desc->lock);
 }
+EXPORT_SYMBOL(handle_edge_irq);
 
 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
 /**
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index 97a8bfa..e75e29e 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -4,10 +4,10 @@
 
 #include <linux/kallsyms.h>
 
-#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
-#define PS(f) if (desc->istate & f) printk("%14s set\n", #f)
+#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
+#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
 /* FIXME */
-#define PD(f) do { } while (0)
+#define ___PD(f) do { } while (0)
 
 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
 {
@@ -23,23 +23,23 @@
 		print_symbol("%s\n", (unsigned long)desc->action->handler);
 	}
 
-	P(IRQ_LEVEL);
-	P(IRQ_PER_CPU);
-	P(IRQ_NOPROBE);
-	P(IRQ_NOREQUEST);
-	P(IRQ_NOTHREAD);
-	P(IRQ_NOAUTOEN);
+	___P(IRQ_LEVEL);
+	___P(IRQ_PER_CPU);
+	___P(IRQ_NOPROBE);
+	___P(IRQ_NOREQUEST);
+	___P(IRQ_NOTHREAD);
+	___P(IRQ_NOAUTOEN);
 
-	PS(IRQS_AUTODETECT);
-	PS(IRQS_REPLAY);
-	PS(IRQS_WAITING);
-	PS(IRQS_PENDING);
+	___PS(IRQS_AUTODETECT);
+	___PS(IRQS_REPLAY);
+	___PS(IRQS_WAITING);
+	___PS(IRQS_PENDING);
 
-	PD(IRQS_INPROGRESS);
-	PD(IRQS_DISABLED);
-	PD(IRQS_MASKED);
+	___PD(IRQS_INPROGRESS);
+	___PD(IRQS_DISABLED);
+	___PD(IRQS_MASKED);
 }
 
-#undef P
-#undef PS
-#undef PD
+#undef ___P
+#undef ___PS
+#undef ___PD
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index d86e254..192a302 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -112,6 +112,7 @@
 {
 	return radix_tree_lookup(&irq_desc_tree, irq);
 }
+EXPORT_SYMBOL(irq_to_desc);
 
 static void delete_irq_desc(unsigned int irq)
 {
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8742fd0..eef311a 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -51,6 +51,23 @@
 
 #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
 
+/*
+ * Number of free pages that are not high.
+ */
+static inline unsigned long low_free_pages(void)
+{
+	return nr_free_pages() - nr_free_highpages();
+}
+
+/*
+ * Number of pages required to be kept free while writing the image. Always
+ * half of all available low pages before the writing starts.
+ */
+static inline unsigned long reqd_free_pages(void)
+{
+	return low_free_pages() / 2;
+}
+
 struct swap_map_page {
 	sector_t entries[MAP_PAGE_ENTRIES];
 	sector_t next_swap;
@@ -72,7 +89,7 @@
 	sector_t cur_swap;
 	sector_t first_sector;
 	unsigned int k;
-	unsigned long nr_free_pages, written;
+	unsigned long reqd_free_pages;
 	u32 crc32;
 };
 
@@ -316,8 +333,7 @@
 		goto err_rel;
 	}
 	handle->k = 0;
-	handle->nr_free_pages = nr_free_pages() >> 1;
-	handle->written = 0;
+	handle->reqd_free_pages = reqd_free_pages();
 	handle->first_sector = handle->cur_swap;
 	return 0;
 err_rel:
@@ -352,11 +368,11 @@
 		handle->cur_swap = offset;
 		handle->k = 0;
 	}
-	if (bio_chain && ++handle->written > handle->nr_free_pages) {
+	if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
 		error = hib_wait_on_bio_chain(bio_chain);
 		if (error)
 			goto out;
-		handle->written = 0;
+		handle->reqd_free_pages = reqd_free_pages();
 	}
  out:
 	return error;
@@ -618,7 +634,7 @@
 	 * Adjust number of free pages after all allocations have been done.
 	 * We don't want to run out of pages when writing.
 	 */
-	handle->nr_free_pages = nr_free_pages() >> 1;
+	handle->reqd_free_pages = reqd_free_pages();
 
 	/*
 	 * Start the CRC32 thread.
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 1050d6d..d0c5baf 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1820,7 +1820,6 @@
 	 * a quiescent state betweentimes.
 	 */
 	local_irq_save(flags);
-	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
 	rdp = this_cpu_ptr(rsp->rda);
 
 	/* Add the callback to our list. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4603b9d..e5212ae 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6382,6 +6382,8 @@
 			if (!sg)
 				return -ENOMEM;
 
+			sg->next = sg;
+
 			*per_cpu_ptr(sdd->sg, j) = sg;
 
 			sgp = kzalloc_node(sizeof(struct sched_group_power),
@@ -6405,16 +6407,26 @@
 		struct sd_data *sdd = &tl->data;
 
 		for_each_cpu(j, cpu_map) {
-			struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
-			if (sd && (sd->flags & SD_OVERLAP))
-				free_sched_groups(sd->groups, 0);
-			kfree(*per_cpu_ptr(sdd->sd, j));
-			kfree(*per_cpu_ptr(sdd->sg, j));
-			kfree(*per_cpu_ptr(sdd->sgp, j));
+			struct sched_domain *sd;
+
+			if (sdd->sd) {
+				sd = *per_cpu_ptr(sdd->sd, j);
+				if (sd && (sd->flags & SD_OVERLAP))
+					free_sched_groups(sd->groups, 0);
+				kfree(*per_cpu_ptr(sdd->sd, j));
+			}
+
+			if (sdd->sg)
+				kfree(*per_cpu_ptr(sdd->sg, j));
+			if (sdd->sgp)
+				kfree(*per_cpu_ptr(sdd->sgp, j));
 		}
 		free_percpu(sdd->sd);
+		sdd->sd = NULL;
 		free_percpu(sdd->sg);
+		sdd->sg = NULL;
 		free_percpu(sdd->sgp);
+		sdd->sgp = NULL;
 	}
 }
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0d97ebd..e955364 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -784,7 +784,7 @@
 		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
 #ifdef CONFIG_SMP
 	if (entity_is_task(se))
-		list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
+		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
 #endif
 	cfs_rq->nr_running++;
 }
@@ -3215,6 +3215,8 @@
 
 static unsigned long task_h_load(struct task_struct *p);
 
+static const unsigned int sched_nr_migrate_break = 32;
+
 /*
  * move_tasks tries to move up to load_move weighted load from busiest to
  * this_rq, as part of a balancing operation within domain "sd".
@@ -3242,7 +3244,7 @@
 
 		/* take a breather every nr_migrate tasks */
 		if (env->loop > env->loop_break) {
-			env->loop_break += sysctl_sched_nr_migrate;
+			env->loop_break += sched_nr_migrate_break;
 			env->flags |= LBF_NEED_BREAK;
 			break;
 		}
@@ -3252,7 +3254,7 @@
 
 		load = task_h_load(p);
 
-		if (load < 16 && !env->sd->nr_balance_failed)
+		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
 			goto next;
 
 		if ((load / 2) > env->load_move)
@@ -4407,7 +4409,7 @@
 		.dst_cpu	= this_cpu,
 		.dst_rq		= this_rq,
 		.idle		= idle,
-		.loop_break	= sysctl_sched_nr_migrate,
+		.loop_break	= sched_nr_migrate_break,
 	};
 
 	cpumask_copy(cpus, cpu_active_mask);
@@ -4445,10 +4447,10 @@
 		 * correctly treated as an imbalance.
 		 */
 		env.flags |= LBF_ALL_PINNED;
-		env.load_move = imbalance;
-		env.src_cpu = busiest->cpu;
-		env.src_rq = busiest;
-		env.loop_max = busiest->nr_running;
+		env.load_move	= imbalance;
+		env.src_cpu	= busiest->cpu;
+		env.src_rq	= busiest;
+		env.loop_max	= min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);
 
 more_balance:
 		local_irq_save(flags);
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index e61fd73..de00a48 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -68,3 +68,4 @@
 
 SCHED_FEAT(FORCE_SD_OVERLAP, false)
 SCHED_FEAT(RT_RUNTIME_SHARE, true)
+SCHED_FEAT(LB_MIN, false)
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index bf57abd..f113755 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -346,7 +346,8 @@
 						     tick_get_broadcast_mask());
 			break;
 		case TICKDEV_MODE_ONESHOT:
-			broadcast = tick_resume_broadcast_oneshot(bc);
+			if (!cpumask_empty(tick_get_broadcast_mask()))
+				broadcast = tick_resume_broadcast_oneshot(bc);
 			break;
 		}
 	}
@@ -373,6 +374,9 @@
 {
 	struct clock_event_device *bc = tick_broadcast_device.evtdev;
 
+	if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
+		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
+
 	return clockevents_program_event(bc, expires, force);
 }
 
@@ -531,7 +535,6 @@
 		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
 
 		bc->event_handler = tick_handle_oneshot_broadcast;
-		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
 
 		/* Take the do_timer update */
 		tick_do_timer_cpu = cpu;
@@ -549,6 +552,7 @@
 			   to_cpumask(tmpmask));
 
 		if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
+			clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
 			tick_broadcast_init_next_event(to_cpumask(tmpmask),
 						       tick_next_period);
 			tick_broadcast_set_event(tick_next_period, 1);
@@ -577,15 +581,10 @@
 	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
 
 	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
-
-	if (cpumask_empty(tick_get_broadcast_mask()))
-		goto end;
-
 	bc = tick_broadcast_device.evtdev;
 	if (bc)
 		tick_broadcast_setup_oneshot(bc);
 
-end:
 	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
 }
 
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ed7b5d1..2a22255 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4629,7 +4629,8 @@
 rb_simple_read(struct file *filp, char __user *ubuf,
 	       size_t cnt, loff_t *ppos)
 {
-	struct ring_buffer *buffer = filp->private_data;
+	struct trace_array *tr = filp->private_data;
+	struct ring_buffer *buffer = tr->buffer;
 	char buf[64];
 	int r;
 
@@ -4647,7 +4648,8 @@
 rb_simple_write(struct file *filp, const char __user *ubuf,
 		size_t cnt, loff_t *ppos)
 {
-	struct ring_buffer *buffer = filp->private_data;
+	struct trace_array *tr = filp->private_data;
+	struct ring_buffer *buffer = tr->buffer;
 	unsigned long val;
 	int ret;
 
@@ -4734,7 +4736,7 @@
 			  &trace_clock_fops);
 
 	trace_create_file("tracing_on", 0644, d_tracer,
-			    global_trace.buffer, &rb_simple_fops);
+			    &global_trace, &rb_simple_fops);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 95059f0..f95d65d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -836,11 +836,11 @@
 		     filter)
 #include "trace_entries.h"
 
-#ifdef CONFIG_FUNCTION_TRACER
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
 int perf_ftrace_event_register(struct ftrace_event_call *call,
 			       enum trace_reg type, void *data);
 #else
 #define perf_ftrace_event_register NULL
-#endif /* CONFIG_FUNCTION_TRACER */
+#endif
 
 #endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 079a93a..29111da 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -294,6 +294,9 @@
 		if (!call->name || !call->class || !call->class->reg)
 			continue;
 
+		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
+			continue;
+
 		if (match &&
 		    strcmp(match, call->name) != 0 &&
 		    strcmp(match, call->class->system) != 0)
@@ -1164,7 +1167,7 @@
 		return -1;
 	}
 
-	if (call->class->reg)
+	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
 		trace_create_file("enable", 0644, call->dir, call,
 				  enable);
 
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 3dd15e8..e039906 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -180,6 +180,7 @@
 	.event.type		= etype,				\
 	.class			= &event_class_ftrace_##call,		\
 	.print_fmt		= print,				\
+	.flags			= TRACE_EVENT_FL_IGNORE_ENABLE,		\
 };									\
 struct ftrace_event_call __used						\
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 859fae6b..df611a0 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -652,6 +652,8 @@
 {
 	u64 next_ts;
 	int ret;
+	/* trace_find_next_entry will reset ent_size */
+	int ent_size = iter->ent_size;
 	struct trace_seq *s = &iter->seq;
 	struct trace_entry *entry = iter->ent,
 			   *next_entry = trace_find_next_entry(iter, NULL,
@@ -660,6 +662,9 @@
 	unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
 	unsigned long rel_usecs;
 
+	/* Restore the original ent_size */
+	iter->ent_size = ent_size;
+
 	if (!next_entry)
 		next_ts = iter->ts;
 	rel_usecs = ns2usecs(next_ts - iter->ts);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 0ab9ae8..d11808c 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -79,30 +79,29 @@
 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
 };
 
-static int fill_pool(void)
+static void fill_pool(void)
 {
 	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
 	struct debug_obj *new;
 	unsigned long flags;
 
 	if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
-		return obj_pool_free;
+		return;
 
 	if (unlikely(!obj_cache))
-		return obj_pool_free;
+		return;
 
 	while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
 
 		new = kmem_cache_zalloc(obj_cache, gfp);
 		if (!new)
-			return obj_pool_free;
+			return;
 
 		raw_spin_lock_irqsave(&pool_lock, flags);
 		hlist_add_head(&new->node, &obj_pool);
 		obj_pool_free++;
 		raw_spin_unlock_irqrestore(&pool_lock, flags);
 	}
-	return obj_pool_free;
 }
 
 /*
@@ -1052,10 +1051,10 @@
 			cnt++;
 		}
 	}
+	local_irq_enable();
 
 	printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
 	       obj_pool_used);
-	local_irq_enable();
 	return 0;
 free:
 	hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cd65cb1..ae8f708 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -532,7 +532,7 @@
 				struct vm_area_struct *vma,
 				unsigned long address, int avoid_reserve)
 {
-	struct page *page;
+	struct page *page = NULL;
 	struct mempolicy *mpol;
 	nodemask_t *nodemask;
 	struct zonelist *zonelist;
@@ -2498,7 +2498,6 @@
 		if (outside_reserve) {
 			BUG_ON(huge_pte_none(pte));
 			if (unmap_ref_private(mm, vma, old_page, address)) {
-				BUG_ON(page_count(old_page) != 1);
 				BUG_ON(huge_pte_none(pte));
 				spin_lock(&mm->page_table_lock);
 				ptep = huge_pte_offset(mm, address & huge_page_mask(h));
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b868def..7685d4a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2476,10 +2476,10 @@
 static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 				       struct page *page,
 				       unsigned int nr_pages,
-				       struct page_cgroup *pc,
 				       enum charge_type ctype,
 				       bool lrucare)
 {
+	struct page_cgroup *pc = lookup_page_cgroup(page);
 	struct zone *uninitialized_var(zone);
 	bool was_on_lru = false;
 	bool anon;
@@ -2716,7 +2716,6 @@
 {
 	struct mem_cgroup *memcg = NULL;
 	unsigned int nr_pages = 1;
-	struct page_cgroup *pc;
 	bool oom = true;
 	int ret;
 
@@ -2730,11 +2729,10 @@
 		oom = false;
 	}
 
-	pc = lookup_page_cgroup(page);
 	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
 	if (ret == -ENOMEM)
 		return ret;
-	__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
+	__mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
 	return 0;
 }
 
@@ -2831,16 +2829,13 @@
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
 					enum charge_type ctype)
 {
-	struct page_cgroup *pc;
-
 	if (mem_cgroup_disabled())
 		return;
 	if (!memcg)
 		return;
 	cgroup_exclude_rmdir(&memcg->css);
 
-	pc = lookup_page_cgroup(page);
-	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
+	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
 	/*
 	 * Now swap is on-memory. This means this page may be
 	 * counted both as mem and swap....double count.
@@ -3298,14 +3293,13 @@
 	 * page. In the case new page is migrated but not remapped, new page's
 	 * mapcount will be finally 0 and we call uncharge in end_migration().
 	 */
-	pc = lookup_page_cgroup(newpage);
 	if (PageAnon(page))
 		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
 	else if (page_is_file_cache(page))
 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
 	else
 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
+	__mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
 	return ret;
 }
 
@@ -3392,8 +3386,7 @@
 	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
 	 * LRU while we overwrite pc->mem_cgroup.
 	 */
-	pc = lookup_page_cgroup(newpage);
-	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
+	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
 }
 
 #ifdef CONFIG_DEBUG_VM
@@ -4514,6 +4507,12 @@
 swap_buffers:
 	/* Swap primary and spare array */
 	thresholds->spare = thresholds->primary;
+	/* If all events are unregistered, free the spare array */
+	if (!new) {
+		kfree(thresholds->spare);
+		thresholds->spare = NULL;
+	}
+
 	rcu_assign_pointer(thresholds->primary, new);
 
 	/* To be sure that nobody uses thresholds */
@@ -5482,7 +5481,7 @@
 	 *    part of thp split is not executed yet.
 	 */
 	if (pmd_trans_huge_lock(pmd, vma) == 1) {
-		if (!mc.precharge) {
+		if (mc.precharge < HPAGE_PMD_NR) {
 			spin_unlock(&vma->vm_mm->page_table_lock);
 			return 0;
 		}
diff --git a/mm/memory.c b/mm/memory.c
index 6105f47..1e77da6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1295,7 +1295,7 @@
 
 static void unmap_single_vma(struct mmu_gather *tlb,
 		struct vm_area_struct *vma, unsigned long start_addr,
-		unsigned long end_addr, unsigned long *nr_accounted,
+		unsigned long end_addr,
 		struct zap_details *details)
 {
 	unsigned long start = max(vma->vm_start, start_addr);
@@ -1307,9 +1307,6 @@
 	if (end <= vma->vm_start)
 		return;
 
-	if (vma->vm_flags & VM_ACCOUNT)
-		*nr_accounted += (end - start) >> PAGE_SHIFT;
-
 	if (unlikely(is_pfn_mapping(vma)))
 		untrack_pfn_vma(vma, 0, 0);
 
@@ -1339,8 +1336,6 @@
  * @vma: the starting vma
  * @start_addr: virtual address at which to start unmapping
  * @end_addr: virtual address at which to end unmapping
- * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
- * @details: details of nonlinear truncation or shared cache invalidation
  *
  * Unmap all pages in the vma list.
  *
@@ -1355,15 +1350,13 @@
  */
 void unmap_vmas(struct mmu_gather *tlb,
 		struct vm_area_struct *vma, unsigned long start_addr,
-		unsigned long end_addr, unsigned long *nr_accounted,
-		struct zap_details *details)
+		unsigned long end_addr)
 {
 	struct mm_struct *mm = vma->vm_mm;
 
 	mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
-		unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted,
-				 details);
+		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
 	mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
 }
 
@@ -1376,19 +1369,21 @@
  *
  * Caller must protect the VMA list
  */
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
+void zap_page_range(struct vm_area_struct *vma, unsigned long start,
 		unsigned long size, struct zap_details *details)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct mmu_gather tlb;
-	unsigned long end = address + size;
-	unsigned long nr_accounted = 0;
+	unsigned long end = start + size;
 
 	lru_add_drain();
 	tlb_gather_mmu(&tlb, mm, 0);
 	update_hiwater_rss(mm);
-	unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
-	tlb_finish_mmu(&tlb, address, end);
+	mmu_notifier_invalidate_range_start(mm, start, end);
+	for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
+		unmap_single_vma(&tlb, vma, start, end, details);
+	mmu_notifier_invalidate_range_end(mm, start, end);
+	tlb_finish_mmu(&tlb, start, end);
 }
 
 /**
@@ -1406,13 +1401,12 @@
 	struct mm_struct *mm = vma->vm_mm;
 	struct mmu_gather tlb;
 	unsigned long end = address + size;
-	unsigned long nr_accounted = 0;
 
 	lru_add_drain();
 	tlb_gather_mmu(&tlb, mm, 0);
 	update_hiwater_rss(mm);
 	mmu_notifier_invalidate_range_start(mm, address, end);
-	unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details);
+	unmap_single_vma(&tlb, vma, address, end, details);
 	mmu_notifier_invalidate_range_end(mm, address, end);
 	tlb_finish_mmu(&tlb, address, end);
 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index cfb6c86..b195691 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1361,11 +1361,14 @@
 
 	mm = get_task_mm(task);
 	put_task_struct(task);
-	if (mm)
-		err = do_migrate_pages(mm, old, new,
-			capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
-	else
+
+	if (!mm) {
 		err = -EINVAL;
+		goto out;
+	}
+
+	err = do_migrate_pages(mm, old, new,
+		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
 
 	mmput(mm);
 out:
diff --git a/mm/migrate.c b/mm/migrate.c
index 51c08a0..1107238 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1388,14 +1388,14 @@
 	mm = get_task_mm(task);
 	put_task_struct(task);
 
-	if (mm) {
-		if (nodes)
-			err = do_pages_move(mm, task_nodes, nr_pages, pages,
-					    nodes, status, flags);
-		else
-			err = do_pages_stat(mm, nr_pages, pages, status);
-	} else
-		err = -EINVAL;
+	if (!mm)
+		return -EINVAL;
+
+	if (nodes)
+		err = do_pages_move(mm, task_nodes, nr_pages, pages,
+				    nodes, status, flags);
+	else
+		err = do_pages_stat(mm, nr_pages, pages, status);
 
 	mmput(mm);
 	return err;
diff --git a/mm/mmap.c b/mm/mmap.c
index 848ef52..69a1889 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1889,15 +1889,20 @@
  */
 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
 {
+	unsigned long nr_accounted = 0;
+
 	/* Update high watermark before we lower total_vm */
 	update_hiwater_vm(mm);
 	do {
 		long nrpages = vma_pages(vma);
 
+		if (vma->vm_flags & VM_ACCOUNT)
+			nr_accounted += nrpages;
 		mm->total_vm -= nrpages;
 		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
 		vma = remove_vma(vma);
 	} while (vma);
+	vm_unacct_memory(nr_accounted);
 	validate_mm(mm);
 }
 
@@ -1912,13 +1917,11 @@
 {
 	struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
 	struct mmu_gather tlb;
-	unsigned long nr_accounted = 0;
 
 	lru_add_drain();
 	tlb_gather_mmu(&tlb, mm, 0);
 	update_hiwater_rss(mm);
-	unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
-	vm_unacct_memory(nr_accounted);
+	unmap_vmas(&tlb, vma, start, end);
 	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
 				 next ? next->vm_start : 0);
 	tlb_finish_mmu(&tlb, start, end);
@@ -2305,8 +2308,7 @@
 	tlb_gather_mmu(&tlb, mm, 1);
 	/* update_hiwater_rss(mm) here? but nobody should be looking */
 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
-	unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
-	vm_unacct_memory(nr_accounted);
+	unmap_vmas(&tlb, vma, 0, -1);
 
 	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
 	tlb_finish_mmu(&tlb, 0, -1);
@@ -2315,8 +2317,12 @@
 	 * Walk the list again, actually closing and freeing it,
 	 * with preemption enabled, without holding any MM locks.
 	 */
-	while (vma)
+	while (vma) {
+		if (vma->vm_flags & VM_ACCOUNT)
+			nr_accounted += vma_pages(vma);
 		vma = remove_vma(vma);
+	}
+	vm_unacct_memory(nr_accounted);
 
 	BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
 }
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 24f0fc1..1983fb1 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -82,8 +82,7 @@
 
 static void __init __free_pages_memory(unsigned long start, unsigned long end)
 {
-	int i;
-	unsigned long start_aligned, end_aligned;
+	unsigned long i, start_aligned, end_aligned;
 	int order = ilog2(BITS_PER_LONG);
 
 	start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
@@ -298,13 +297,19 @@
 	if (WARN_ON_ONCE(slab_is_available()))
 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 
+again:
 	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
 					 goal, -1ULL);
 	if (ptr)
 		return ptr;
 
-	return __alloc_memory_core_early(MAX_NUMNODES, size, align,
-					 goal, -1ULL);
+	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
+					goal, -1ULL);
+	if (!ptr && goal) {
+		goal = 0;
+		goto again;
+	}
+	return ptr;
 }
 
 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a712fb9..918330f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5203,7 +5203,7 @@
 	int ret;
 
 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
-	if (!write || (ret == -EINVAL))
+	if (!write || (ret < 0))
 		return ret;
 	for_each_populated_zone(zone) {
 		for_each_possible_cpu(cpu) {
diff --git a/mm/percpu.c b/mm/percpu.c
index f47af91..bb4be74 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1132,20 +1132,20 @@
 		for (alloc_end += gi->nr_units / upa;
 		     alloc < alloc_end; alloc++) {
 			if (!(alloc % apl)) {
-				printk("\n");
+				printk(KERN_CONT "\n");
 				printk("%spcpu-alloc: ", lvl);
 			}
-			printk("[%0*d] ", group_width, group);
+			printk(KERN_CONT "[%0*d] ", group_width, group);
 
 			for (unit_end += upa; unit < unit_end; unit++)
 				if (gi->cpu_map[unit] != NR_CPUS)
-					printk("%0*d ", cpu_width,
+					printk(KERN_CONT "%0*d ", cpu_width,
 					       gi->cpu_map[unit]);
 				else
-					printk("%s ", empty_str);
+					printk(KERN_CONT "%s ", empty_str);
 		}
 	}
-	printk("\n");
+	printk(KERN_CONT "\n");
 }
 
 /**
@@ -1650,6 +1650,16 @@
 		areas[group] = ptr;
 
 		base = min(ptr, base);
+	}
+
+	/*
+	 * Copy data and free unused parts.  This should happen after all
+	 * allocations are complete; otherwise, we may end up with
+	 * overlapping groups.
+	 */
+	for (group = 0; group < ai->nr_groups; group++) {
+		struct pcpu_group_info *gi = &ai->groups[group];
+		void *ptr = areas[group];
 
 		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
 			if (gi->cpu_map[i] == NR_CPUS) {
@@ -1885,6 +1895,8 @@
 	fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 	if (!ai || !fc)
 		panic("Failed to allocate memory for percpu areas.");
+	/* kmemleak tracks the percpu allocations separately */
+	kmemleak_free(fc);
 
 	ai->dyn_size = unit_size;
 	ai->unit_size = unit_size;
diff --git a/mm/slub.c b/mm/slub.c
index ffe13fd..80848cd 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2040,7 +2040,7 @@
 	struct kmem_cache *s = info;
 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
 
-	return !!(c->page);
+	return c->page || c->partial;
 }
 
 static void flush_all(struct kmem_cache *s)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1a51868..33dc256 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1568,9 +1568,14 @@
 	reclaim_stat->recent_scanned[0] += nr_anon;
 	reclaim_stat->recent_scanned[1] += nr_file;
 
-	if (current_is_kswapd())
-		__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
-	__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+	if (global_reclaim(sc)) {
+		if (current_is_kswapd())
+			__count_zone_vm_events(PGSTEAL_KSWAPD, zone,
+					       nr_reclaimed);
+		else
+			__count_zone_vm_events(PGSTEAL_DIRECT, zone,
+					       nr_reclaimed);
+	}
 
 	putback_inactive_pages(mz, &page_list);
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index f600557..7db1b9b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -738,7 +738,8 @@
 	"pgmajfault",
 
 	TEXTS_FOR_ZONES("pgrefill")
-	TEXTS_FOR_ZONES("pgsteal")
+	TEXTS_FOR_ZONES("pgsteal_kswapd")
+	TEXTS_FOR_ZONES("pgsteal_direct")
 	TEXTS_FOR_ZONES("pgscan_kswapd")
 	TEXTS_FOR_ZONES("pgscan_direct")
 
@@ -747,7 +748,6 @@
 #endif
 	"pginodesteal",
 	"slabs_scanned",
-	"kswapd_steal",
 	"kswapd_inodesteal",
 	"kswapd_low_wmark_hit_quickly",
 	"kswapd_high_wmark_hit_quickly",
diff --git a/net/802/Makefile b/net/802/Makefile
index 7893d67..a30d6e3 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -4,7 +4,6 @@
 
 # Check the p8022 selections against net/core/Makefile.
 obj-$(CONFIG_LLC)	+= p8022.o psnap.o
-obj-$(CONFIG_TR)	+= p8022.o psnap.o tr.o
 obj-$(CONFIG_NET_FC)	+=                 fc.o
 obj-$(CONFIG_FDDI)	+=                 fddi.o
 obj-$(CONFIG_HIPPI)	+=                 hippi.o
diff --git a/net/802/fc.c b/net/802/fc.c
index b324e31..05eea6b 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -35,7 +35,7 @@
 
 static int fc_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type,
-		     const void *daddr, const void *saddr, unsigned len)
+		     const void *daddr, const void *saddr, unsigned int len)
 {
 	struct fch_hdr *fch;
 	int hdr_len;
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 5ab25cd..9cda406 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -51,7 +51,7 @@
 
 static int fddi_header(struct sk_buff *skb, struct net_device *dev,
 		       unsigned short type,
-		       const void *daddr, const void *saddr, unsigned len)
+		       const void *daddr, const void *saddr, unsigned int len)
 {
 	int hl = FDDI_K_SNAP_HLEN;
 	struct fddihdr *fddi;
diff --git a/net/802/garp.c b/net/802/garp.c
index a5c2248..8456f5d 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -157,9 +157,9 @@
 	while (parent) {
 		attr = rb_entry(parent, struct garp_attr, node);
 		d = garp_attr_cmp(attr, data, len, type);
-		if (d < 0)
+		if (d > 0)
 			parent = parent->rb_left;
-		else if (d > 0)
+		else if (d < 0)
 			parent = parent->rb_right;
 		else
 			return attr;
@@ -178,9 +178,9 @@
 		parent = *p;
 		attr = rb_entry(parent, struct garp_attr, node);
 		d = garp_attr_cmp(attr, data, len, type);
-		if (d < 0)
+		if (d > 0)
 			p = &parent->rb_left;
-		else if (d > 0)
+		else if (d < 0)
 			p = &parent->rb_right;
 		else {
 			/* The attribute already exists; re-use it. */
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 056794e..51a1f53 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -45,7 +45,7 @@
 
 static int hippi_header(struct sk_buff *skb, struct net_device *dev,
 			unsigned short type,
-			const void *daddr, const void *saddr, unsigned len)
+			const void *daddr, const void *saddr, unsigned int len)
 {
 	struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN);
 	struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
diff --git a/net/802/p8022.c b/net/802/p8022.c
index 7f353c4..0bda8de 100644
--- a/net/802/p8022.c
+++ b/net/802/p8022.c
@@ -1,6 +1,5 @@
 /*
- *	NET3:	Support for 802.2 demultiplexing off Ethernet (Token ring
- *		is kept separate see p8022tr.c)
+ *	NET3:	Support for 802.2 demultiplexing off Ethernet
  *		This program is free software; you can redistribute it and/or
  *		modify it under the terms of the GNU General Public License
  *		as published by the Free Software Foundation; either version
diff --git a/net/802/stp.c b/net/802/stp.c
index 15540b7..2c40ba0 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -46,7 +46,7 @@
 		proto = rcu_dereference(garp_protos[eh->h_dest[5] -
 						    GARP_ADDR_MIN]);
 		if (proto &&
-		    compare_ether_addr(eh->h_dest, proto->group_address))
+		    !ether_addr_equal(eh->h_dest, proto->group_address))
 			goto err;
 	} else
 		proto = rcu_dereference(stp_proto);
diff --git a/net/802/tr.c b/net/802/tr.c
deleted file mode 100644
index b9a3a14..0000000
--- a/net/802/tr.c
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * NET3:	Token ring device handling subroutines
- *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		as published by the Free Software Foundation; either version
- *		2 of the License, or (at your option) any later version.
- *
- * Fixes:       3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
- *              Added rif table to /proc/net/tr_rif and rif timeout to
- *              /proc/sys/net/token-ring/rif_timeout.
- *              22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
- *              tr_header and tr_type_trans to handle passing IPX SNAP and
- *              802.2 through the correct layers. Eliminated tr_reformat.
- *
- */
-
-#include <asm/uaccess.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/trdevice.h>
-#include <linux/skbuff.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/net.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/slab.h>
-#include <net/arp.h>
-#include <net/net_namespace.h>
-
-static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
-static void rif_check_expire(unsigned long dummy);
-
-#define TR_SR_DEBUG 0
-
-/*
- *	Each RIF entry we learn is kept this way
- */
-
-struct rif_cache {
-	unsigned char addr[TR_ALEN];
-	int iface;
-	__be16 rcf;
-	__be16 rseg[8];
-	struct rif_cache *next;
-	unsigned long last_used;
-	unsigned char local_ring;
-};
-
-#define RIF_TABLE_SIZE 32
-
-/*
- *	We hash the RIF cache 32 ways. We do after all have to look it
- *	up a lot.
- */
-
-static struct rif_cache *rif_table[RIF_TABLE_SIZE];
-
-static DEFINE_SPINLOCK(rif_lock);
-
-
-/*
- *	Garbage disposal timer.
- */
-
-static struct timer_list rif_timer;
-
-static int sysctl_tr_rif_timeout = 60*10*HZ;
-
-static inline unsigned long rif_hash(const unsigned char *addr)
-{
-	unsigned long x;
-
-	x = addr[0];
-	x = (x << 2) ^ addr[1];
-	x = (x << 2) ^ addr[2];
-	x = (x << 2) ^ addr[3];
-	x = (x << 2) ^ addr[4];
-	x = (x << 2) ^ addr[5];
-
-	x ^= x >> 8;
-
-	return x & (RIF_TABLE_SIZE - 1);
-}
-
-/*
- *	Put the headers on a token ring packet. Token ring source routing
- *	makes this a little more exciting than on ethernet.
- */
-
-static int tr_header(struct sk_buff *skb, struct net_device *dev,
-		     unsigned short type,
-		     const void *daddr, const void *saddr, unsigned len)
-{
-	struct trh_hdr *trh;
-	int hdr_len;
-
-	/*
-	 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
-	 * dev->hard_header directly.
-	 */
-	if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
-	{
-		struct trllc *trllc;
-
-		hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
-		trh = (struct trh_hdr *)skb_push(skb, hdr_len);
-		trllc = (struct trllc *)(trh+1);
-		trllc->dsap = trllc->ssap = EXTENDED_SAP;
-		trllc->llc = UI_CMD;
-		trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
-		trllc->ethertype = htons(type);
-	}
-	else
-	{
-		hdr_len = sizeof(struct trh_hdr);
-		trh = (struct trh_hdr *)skb_push(skb, hdr_len);
-	}
-
-	trh->ac=AC;
-	trh->fc=LLC_FRAME;
-
-	if(saddr)
-		memcpy(trh->saddr,saddr,dev->addr_len);
-	else
-		memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
-
-	/*
-	 *	Build the destination and then source route the frame
-	 */
-
-	if(daddr)
-	{
-		memcpy(trh->daddr,daddr,dev->addr_len);
-		tr_source_route(skb, trh, dev);
-		return hdr_len;
-	}
-
-	return -hdr_len;
-}
-
-/*
- *	A neighbour discovery of some species (eg arp) has completed. We
- *	can now send the packet.
- */
-
-static int tr_rebuild_header(struct sk_buff *skb)
-{
-	struct trh_hdr *trh=(struct trh_hdr *)skb->data;
-	struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
-	struct net_device *dev = skb->dev;
-
-	/*
-	 *	FIXME: We don't yet support IPv6 over token rings
-	 */
-
-	if(trllc->ethertype != htons(ETH_P_IP)) {
-		printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
-		return 0;
-	}
-
-#ifdef CONFIG_INET
-	if(arp_find(trh->daddr, skb)) {
-			return 1;
-	}
-	else
-#endif
-	{
-		tr_source_route(skb,trh,dev);
-		return 0;
-	}
-}
-
-/*
- *	Some of this is a bit hackish. We intercept RIF information
- *	used for source routing. We also grab IP directly and don't feed
- *	it via SNAP.
- */
-
-__be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
-{
-
-	struct trh_hdr *trh;
-	struct trllc *trllc;
-	unsigned riflen=0;
-
-	skb->dev = dev;
-	skb_reset_mac_header(skb);
-	trh = tr_hdr(skb);
-
-	if(trh->saddr[0] & TR_RII)
-		riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
-
-	trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
-
-	skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
-
-	if(*trh->daddr & 0x80)
-	{
-		if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
-			skb->pkt_type=PACKET_BROADCAST;
-		else
-			skb->pkt_type=PACKET_MULTICAST;
-	}
-	else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
-	{
-		skb->pkt_type=PACKET_MULTICAST;
-	}
-	else if(dev->flags & IFF_PROMISC)
-	{
-		if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
-			skb->pkt_type=PACKET_OTHERHOST;
-	}
-
-	if ((skb->pkt_type != PACKET_BROADCAST) &&
-	    (skb->pkt_type != PACKET_MULTICAST))
-		tr_add_rif_info(trh,dev) ;
-
-	/*
-	 * Strip the SNAP header from ARP packets since we don't
-	 * pass them through to the 802.2/SNAP layers.
-	 */
-
-	if (trllc->dsap == EXTENDED_SAP &&
-	    (trllc->ethertype == htons(ETH_P_IP) ||
-	     trllc->ethertype == htons(ETH_P_IPV6) ||
-	     trllc->ethertype == htons(ETH_P_ARP)))
-	{
-		skb_pull(skb, sizeof(struct trllc));
-		return trllc->ethertype;
-	}
-
-	return htons(ETH_P_TR_802_2);
-}
-
-/*
- *	We try to do source routing...
- */
-
-void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,
-		     struct net_device *dev)
-{
-	int slack;
-	unsigned int hash;
-	struct rif_cache *entry;
-	unsigned char *olddata;
-	unsigned long flags;
-	static const unsigned char mcast_func_addr[]
-		= {0xC0,0x00,0x00,0x04,0x00,0x00};
-
-	spin_lock_irqsave(&rif_lock, flags);
-
-	/*
-	 *	Broadcasts are single route as stated in RFC 1042
-	 */
-	if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
-	    (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN))  )
-	{
-		trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
-			       | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
-		trh->saddr[0]|=TR_RII;
-	}
-	else
-	{
-		hash = rif_hash(trh->daddr);
-		/*
-		 *	Walk the hash table and look for an entry
-		 */
-		for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
-
-		/*
-		 *	If we found an entry we can route the frame.
-		 */
-		if(entry)
-		{
-#if TR_SR_DEBUG
-printk("source routing for %pM\n", trh->daddr);
-#endif
-			if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
-			{
-				trh->rcf=entry->rcf;
-				memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
-				trh->rcf^=htons(TR_RCF_DIR_BIT);
-				trh->rcf&=htons(0x1fff);	/* Issam Chehab <ichehab@madge1.demon.co.uk> */
-
-				trh->saddr[0]|=TR_RII;
-#if TR_SR_DEBUG
-				printk("entry found with rcf %04x\n", entry->rcf);
-			}
-			else
-			{
-				printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
-#endif
-			}
-			entry->last_used=jiffies;
-		}
-		else
-		{
-			/*
-			 *	Without the information we simply have to shout
-			 *	on the wire. The replies should rapidly clean this
-			 *	situation up.
-			 */
-			trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
-				       | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
-			trh->saddr[0]|=TR_RII;
-#if TR_SR_DEBUG
-			printk("no entry in rif table found - broadcasting frame\n");
-#endif
-		}
-	}
-
-	/* Compress the RIF here so we don't have to do it in the driver(s) */
-	if (!(trh->saddr[0] & 0x80))
-		slack = 18;
-	else
-		slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
-	olddata = skb->data;
-	spin_unlock_irqrestore(&rif_lock, flags);
-
-	skb_pull(skb, slack);
-	memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
-}
-
-/*
- *	We have learned some new RIF information for our source
- *	routing.
- */
-
-static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
-{
-	unsigned int hash, rii_p = 0;
-	unsigned long flags;
-	struct rif_cache *entry;
-	unsigned char saddr0;
-
-	spin_lock_irqsave(&rif_lock, flags);
-	saddr0 = trh->saddr[0];
-
-	/*
-	 *	Firstly see if the entry exists
-	 */
-
-	if(trh->saddr[0] & TR_RII)
-	{
-		trh->saddr[0]&=0x7f;
-		if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
-		{
-			rii_p = 1;
-		}
-	}
-
-	hash = rif_hash(trh->saddr);
-	for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
-
-	if(entry==NULL)
-	{
-#if TR_SR_DEBUG
-		printk("adding rif_entry: addr:%pM rcf:%04X\n",
-		       trh->saddr, ntohs(trh->rcf));
-#endif
-		/*
-		 *	Allocate our new entry. A failure to allocate loses
-		 *	use the information. This is harmless.
-		 *
-		 *	FIXME: We ought to keep some kind of cache size
-		 *	limiting and adjust the timers to suit.
-		 */
-		entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
-
-		if(!entry)
-		{
-			printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
-			spin_unlock_irqrestore(&rif_lock, flags);
-			return;
-		}
-
-		memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
-		entry->iface = dev->ifindex;
-		entry->next=rif_table[hash];
-		entry->last_used=jiffies;
-		rif_table[hash]=entry;
-
-		if (rii_p)
-		{
-			entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
-			memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
-			entry->local_ring = 0;
-		}
-		else
-		{
-			entry->local_ring = 1;
-		}
-	}
-	else	/* Y. Tahara added */
-	{
-		/*
-		 *	Update existing entries
-		 */
-		if (!entry->local_ring)
-		    if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
-			 !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
-		    {
-#if TR_SR_DEBUG
-printk("updating rif_entry: addr:%pM rcf:%04X\n",
-		trh->saddr, ntohs(trh->rcf));
-#endif
-			    entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
-			    memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
-		    }
-		entry->last_used=jiffies;
-	}
-	trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
-	spin_unlock_irqrestore(&rif_lock, flags);
-}
-
-/*
- *	Scan the cache with a timer and see what we need to throw out.
- */
-
-static void rif_check_expire(unsigned long dummy)
-{
-	int i;
-	unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
-
-	spin_lock_irqsave(&rif_lock, flags);
-
-	for(i =0; i < RIF_TABLE_SIZE; i++) {
-		struct rif_cache *entry, **pentry;
-
-		pentry = rif_table+i;
-		while((entry=*pentry) != NULL) {
-			unsigned long expires
-				= entry->last_used + sysctl_tr_rif_timeout;
-
-			if (time_before_eq(expires, jiffies)) {
-				*pentry = entry->next;
-				kfree(entry);
-			} else {
-				pentry = &entry->next;
-
-				if (time_before(expires, next_interval))
-					next_interval = expires;
-			}
-		}
-	}
-
-	spin_unlock_irqrestore(&rif_lock, flags);
-
-	mod_timer(&rif_timer, next_interval);
-
-}
-
-/*
- *	Generate the /proc/net information for the token ring RIF
- *	routing.
- */
-
-#ifdef CONFIG_PROC_FS
-
-static struct rif_cache *rif_get_idx(loff_t pos)
-{
-	int i;
-	struct rif_cache *entry;
-	loff_t off = 0;
-
-	for(i = 0; i < RIF_TABLE_SIZE; i++)
-		for(entry = rif_table[i]; entry; entry = entry->next) {
-			if (off == pos)
-				return entry;
-			++off;
-		}
-
-	return NULL;
-}
-
-static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
-	__acquires(&rif_lock)
-{
-	spin_lock_irq(&rif_lock);
-
-	return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
-}
-
-static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	int i;
-	struct rif_cache *ent = v;
-
-	++*pos;
-
-	if (v == SEQ_START_TOKEN) {
-		i = -1;
-		goto scan;
-	}
-
-	if (ent->next)
-		return ent->next;
-
-	i = rif_hash(ent->addr);
- scan:
-	while (++i < RIF_TABLE_SIZE) {
-		if ((ent = rif_table[i]) != NULL)
-			return ent;
-	}
-	return NULL;
-}
-
-static void rif_seq_stop(struct seq_file *seq, void *v)
-	__releases(&rif_lock)
-{
-	spin_unlock_irq(&rif_lock);
-}
-
-static int rif_seq_show(struct seq_file *seq, void *v)
-{
-	int j, rcf_len, segment, brdgnmb;
-	struct rif_cache *entry = v;
-
-	if (v == SEQ_START_TOKEN)
-		seq_puts(seq,
-		     "if     TR address       TTL   rcf   routing segments\n");
-	else {
-		struct net_device *dev = dev_get_by_index(&init_net, entry->iface);
-		long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
-				- (long) jiffies;
-
-		seq_printf(seq, "%s %pM %7li ",
-			   dev?dev->name:"?",
-			   entry->addr,
-			   ttl/HZ);
-
-			if (entry->local_ring)
-				seq_puts(seq, "local\n");
-			else {
-
-				seq_printf(seq, "%04X", ntohs(entry->rcf));
-				rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
-				if (rcf_len)
-					rcf_len >>= 1;
-				for(j = 1; j < rcf_len; j++) {
-					if(j==1) {
-						segment=ntohs(entry->rseg[j-1])>>4;
-						seq_printf(seq,"  %03X",segment);
-					}
-
-					segment=ntohs(entry->rseg[j])>>4;
-					brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
-					seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
-				}
-				seq_putc(seq, '\n');
-			}
-
-		if (dev)
-			dev_put(dev);
-		}
-	return 0;
-}
-
-
-static const struct seq_operations rif_seq_ops = {
-	.start = rif_seq_start,
-	.next  = rif_seq_next,
-	.stop  = rif_seq_stop,
-	.show  = rif_seq_show,
-};
-
-static int rif_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &rif_seq_ops);
-}
-
-static const struct file_operations rif_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open    = rif_seq_open,
-	.read    = seq_read,
-	.llseek  = seq_lseek,
-	.release = seq_release,
-};
-
-#endif
-
-static const struct header_ops tr_header_ops = {
-	.create = tr_header,
-	.rebuild= tr_rebuild_header,
-};
-
-static void tr_setup(struct net_device *dev)
-{
-	/*
-	 *	Configure and register
-	 */
-
-	dev->header_ops	= &tr_header_ops;
-
-	dev->type		= ARPHRD_IEEE802_TR;
-	dev->hard_header_len	= TR_HLEN;
-	dev->mtu		= 2000;
-	dev->addr_len		= TR_ALEN;
-	dev->tx_queue_len	= 100;	/* Long queues on tr */
-
-	memset(dev->broadcast,0xFF, TR_ALEN);
-
-	/* New-style flags. */
-	dev->flags		= IFF_BROADCAST | IFF_MULTICAST ;
-}
-
-/**
- * alloc_trdev - Register token ring device
- * @sizeof_priv: Size of additional driver-private structure to be allocated
- *	for this token ring device
- *
- * Fill in the fields of the device structure with token ring-generic values.
- *
- * Constructs a new net device, complete with a private data area of
- * size @sizeof_priv.  A 32-byte (not bit) alignment is enforced for
- * this private data area.
- */
-struct net_device *alloc_trdev(int sizeof_priv)
-{
-	return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
-}
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table tr_table[] = {
-	{
-		.procname	= "rif_timeout",
-		.data		= &sysctl_tr_rif_timeout,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec
-	},
-	{ },
-};
-
-static __initdata struct ctl_path tr_path[] = {
-	{ .procname = "net", },
-	{ .procname = "token-ring", },
-	{ }
-};
-#endif
-
-/*
- *	Called during bootup.  We don't actually have to initialise
- *	too much for this.
- */
-
-static int __init rif_init(void)
-{
-	rif_timer.expires  = jiffies + sysctl_tr_rif_timeout;
-	setup_timer(&rif_timer, rif_check_expire, 0);
-	add_timer(&rif_timer);
-#ifdef CONFIG_SYSCTL
-	register_sysctl_paths(tr_path, tr_table);
-#endif
-	proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
-	return 0;
-}
-
-module_init(rif_init);
-
-EXPORT_SYMBOL(tr_type_trans);
-EXPORT_SYMBOL(alloc_trdev);
-
-MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index efea35b..6089f0c 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -266,19 +266,19 @@
 	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
 	/* May be called without an actual change */
-	if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
+	if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
 		return;
 
 	/* vlan address was different from the old address and is equal to
 	 * the new address */
-	if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
-	    !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
+	if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+	    ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
 		dev_uc_del(dev, vlandev->dev_addr);
 
 	/* vlan address was equal to the old address and is different from
 	 * the new address */
-	if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
-	    compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
+	if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+	    !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
 		dev_uc_add(dev, vlandev->dev_addr);
 
 	memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4d39d80..8ca533c 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -31,8 +31,7 @@
 		/* Our lower layer thinks this is not local, let's make sure.
 		 * This allows the VLAN to have a different MAC than the
 		 * underlying device, and still route correctly. */
-		if (!compare_ether_addr(eth_hdr(skb)->h_dest,
-					vlan_dev->dev_addr))
+		if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
 			skb->pkt_type = PACKET_HOST;
 	}
 
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9988d4a..da1bc9c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -157,7 +157,7 @@
 		skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
 	}
 
-	skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
+	skb->dev = vlan_dev_priv(dev)->real_dev;
 	len = skb->len;
 	if (netpoll_tx_running(dev))
 		return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
@@ -277,7 +277,7 @@
 	    !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
 		return -ENETDOWN;
 
-	if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
 		err = dev_uc_add(real_dev, dev->dev_addr);
 		if (err < 0)
 			goto out;
@@ -307,7 +307,7 @@
 	if (dev->flags & IFF_ALLMULTI)
 		dev_set_allmulti(real_dev, -1);
 del_unicast:
-	if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
 		dev_uc_del(real_dev, dev->dev_addr);
 out:
 	netif_carrier_off(dev);
@@ -326,7 +326,7 @@
 	if (dev->flags & IFF_PROMISC)
 		dev_set_promiscuity(real_dev, -1);
 
-	if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
 		dev_uc_del(real_dev, dev->dev_addr);
 
 	netif_carrier_off(dev);
@@ -345,13 +345,13 @@
 	if (!(dev->flags & IFF_UP))
 		goto out;
 
-	if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
+	if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) {
 		err = dev_uc_add(real_dev, addr->sa_data);
 		if (err < 0)
 			return err;
 	}
 
-	if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr))
 		dev_uc_del(real_dev, dev->dev_addr);
 
 out:
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 5071136..708c80e 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -166,11 +166,13 @@
 	struct nlattr *nest;
 	unsigned int i;
 
-	NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
+	if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id))
+		goto nla_put_failure;
 	if (vlan->flags) {
 		f.flags = vlan->flags;
 		f.mask  = ~0;
-		NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f);
+		if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
+			goto nla_put_failure;
 	}
 	if (vlan->nr_ingress_mappings) {
 		nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
@@ -183,8 +185,9 @@
 
 			m.from = i;
 			m.to   = vlan->ingress_priority_map[i];
-			NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
-				sizeof(m), &m);
+			if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+				    sizeof(m), &m))
+				goto nla_put_failure;
 		}
 		nla_nest_end(skb, nest);
 	}
@@ -202,8 +205,9 @@
 
 				m.from = pm->priority;
 				m.to   = (pm->vlan_qos >> 13) & 0x7;
-				NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING,
-					sizeof(m), &m);
+				if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+					    sizeof(m), &m))
+					goto nla_put_failure;
 			}
 		}
 		nla_nest_end(skb, nest);
diff --git a/net/9p/client.c b/net/9p/client.c
index b23a17c..a170893 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1530,7 +1530,7 @@
 
 
 	p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
-		   fid->fid, (long long unsigned) offset, count);
+		   fid->fid, (unsigned long long) offset, count);
 	err = 0;
 	clnt = fid->clnt;
 
@@ -1605,7 +1605,7 @@
 	struct p9_req_t *req;
 
 	p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
-				fid->fid, (long long unsigned) offset, count);
+				fid->fid, (unsigned long long) offset, count);
 	err = 0;
 	clnt = fid->clnt;
 
@@ -2040,7 +2040,7 @@
 	char *dataptr;
 
 	p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
-				fid->fid, (long long unsigned) offset, count);
+				fid->fid, (unsigned long long) offset, count);
 
 	err = 0;
 	clnt = fid->clnt;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index fccae26..6449bae 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -513,7 +513,7 @@
 	clear_bit(Wworksched, &m->wsched);
 }
 
-static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
+static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
 {
 	struct p9_poll_wait *pwait =
 		container_of(wait, struct p9_poll_wait, wait);
diff --git a/net/Kconfig b/net/Kconfig
index e07272d..245831b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -207,10 +207,10 @@
 source "drivers/net/appletalk/Kconfig"
 source "net/x25/Kconfig"
 source "net/lapb/Kconfig"
-source "net/econet/Kconfig"
 source "net/wanrouter/Kconfig"
 source "net/phonet/Kconfig"
 source "net/ieee802154/Kconfig"
+source "net/mac802154/Kconfig"
 source "net/sched/Kconfig"
 source "net/dcb/Kconfig"
 source "net/dns_resolver/Kconfig"
@@ -246,9 +246,6 @@
 	select DQL
 	default y
 
-config HAVE_BPF_JIT
-	bool
-
 config BPF_JIT
 	bool "enable BPF Just In Time compiler"
 	depends on HAVE_BPF_JIT
@@ -295,7 +292,7 @@
 	module will be called tcp_probe.
 
 config NET_DROP_MONITOR
-	boolean "Network packet drop alerting service"
+	tristate "Network packet drop alerting service"
 	depends on INET && EXPERIMENTAL && TRACEPOINTS
 	---help---
 	This feature provides an alerting service to userspace in the
@@ -340,3 +337,7 @@
 
 
 endif   # if NET
+
+# Used by archs to tell that they support BPF_JIT
+config HAVE_BPF_JIT
+	bool
diff --git a/net/Makefile b/net/Makefile
index ad432fa..4f4ee08 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -40,7 +40,6 @@
 obj-$(CONFIG_ATM)		+= atm/
 obj-$(CONFIG_L2TP)		+= l2tp/
 obj-$(CONFIG_DECNET)		+= decnet/
-obj-$(CONFIG_ECONET)		+= econet/
 obj-$(CONFIG_PHONET)		+= phonet/
 ifneq ($(CONFIG_VLAN_8021Q),)
 obj-y				+= 8021q/
@@ -60,6 +59,7 @@
 obj-y				+= dcb/
 endif
 obj-$(CONFIG_IEEE802154)	+= ieee802154/
+obj-$(CONFIG_MAC802154)		+= mac802154/
 
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)		+= sysctl_net.o
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bfa9ab9..0301b32 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -63,7 +63,7 @@
 #include <net/tcp_states.h>
 #include <net/route.h>
 #include <linux/atalk.h>
-#include "../core/kmap_skb.h"
+#include <linux/highmem.h>
 
 struct datalink_proto *ddp_dl, *aarp_dl;
 static const struct proto_ops atalk_dgram_ops;
@@ -960,10 +960,10 @@
 
 			if (copy > len)
 				copy = len;
-			vaddr = kmap_skb_frag(frag);
+			vaddr = kmap_atomic(skb_frag_page(frag));
 			sum = atalk_sum_partial(vaddr + frag->page_offset +
 						  offset - start, copy, sum);
-			kunmap_skb_frag(vaddr);
+			kunmap_atomic(vaddr);
 
 			if (!(len -= copy))
 				return sum;
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index 04e9c0d..ebb8643 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -42,20 +42,14 @@
 	{ },
 };
 
-static struct ctl_path atalk_path[] = {
-	{ .procname = "net", },
-	{ .procname = "appletalk", },
-	{ }
-};
-
 static struct ctl_table_header *atalk_table_header;
 
 void atalk_register_sysctl(void)
 {
-	atalk_table_header = register_sysctl_paths(atalk_path, atalk_table);
+	atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
 }
 
 void atalk_unregister_sysctl(void)
 {
-	unregister_sysctl_table(atalk_table_header);
+	unregister_net_sysctl_table(atalk_table_header);
 }
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 353fccf..4819d315 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -73,7 +73,7 @@
 #ifdef CONFIG_ATM_BR2684_IPFILTER
 	struct br2684_filter filter;
 #endif /* CONFIG_ATM_BR2684_IPFILTER */
-	unsigned copies_needed, copies_failed;
+	unsigned int copies_needed, copies_failed;
 };
 
 struct br2684_dev {
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 62dc8bf..bbd3b63 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -97,9 +97,8 @@
 			error = sock_get_timestampns(sk, argp);
 		goto done;
 	case ATM_SETSC:
-		if (net_ratelimit())
-			pr_warning("ATM_SETSC is obsolete; used by %s:%d\n",
-				   current->comm, task_pid_nr(current));
+		net_warn_ratelimited("ATM_SETSC is obsolete; used by %s:%d\n",
+				     current->comm, task_pid_nr(current));
 		error = 0;
 		goto done;
 	case ATMSIGD_CTRL:
@@ -123,8 +122,7 @@
 		   work for 32-bit userspace. TBH I don't really want
 		   to think about it at all. dwmw2. */
 		if (compat) {
-			if (net_ratelimit())
-				pr_warning("32-bit task cannot be atmsigd\n");
+			net_warn_ratelimited("32-bit task cannot be atmsigd\n");
 			error = -EINVAL;
 			goto done;
 		}
diff --git a/net/atm/lec.c b/net/atm/lec.c
index f1964ca..a7d1721 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -26,11 +26,6 @@
 #include <linux/spinlock.h>
 #include <linux/seq_file.h>
 
-/* TokenRing if needed */
-#ifdef CONFIG_TR
-#include <linux/trdevice.h>
-#endif
-
 /* And atm device */
 #include <linux/atmdev.h>
 #include <linux/atmlec.h>
@@ -163,50 +158,6 @@
 #endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
 
 /*
- * Modelled after tr_type_trans
- * All multicast and ARE or STE frames go to BUS.
- * Non source routed frames go by destination address.
- * Last hop source routed frames go by destination address.
- * Not last hop source routed frames go by _next_ route descriptor.
- * Returns pointer to destination MAC address or fills in rdesc
- * and returns NULL.
- */
-#ifdef CONFIG_TR
-static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
-{
-	struct trh_hdr *trh;
-	unsigned int riflen, num_rdsc;
-
-	trh = (struct trh_hdr *)packet;
-	if (trh->daddr[0] & (uint8_t) 0x80)
-		return bus_mac;	/* multicast */
-
-	if (trh->saddr[0] & TR_RII) {
-		riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
-		if ((ntohs(trh->rcf) >> 13) != 0)
-			return bus_mac;	/* ARE or STE */
-	} else
-		return trh->daddr;	/* not source routed */
-
-	if (riflen < 6)
-		return trh->daddr;	/* last hop, source routed */
-
-	/* riflen is 6 or more, packet has more than one route descriptor */
-	num_rdsc = (riflen / 2) - 1;
-	memset(rdesc, 0, ETH_ALEN);
-	/* offset 4 comes from LAN destination field in LE control frames */
-	if (trh->rcf & htons((uint16_t) TR_RCF_DIR_BIT))
-		memcpy(&rdesc[4], &trh->rseg[num_rdsc - 2], sizeof(__be16));
-	else {
-		memcpy(&rdesc[4], &trh->rseg[1], sizeof(__be16));
-		rdesc[5] = ((ntohs(trh->rseg[0]) & 0x000f) | (rdesc[5] & 0xf0));
-	}
-
-	return NULL;
-}
-#endif /* CONFIG_TR */
-
-/*
  * Open/initialize the netdevice. This is called (in the current kernel)
  * sometime after booting when the 'ifconfig' program is run.
  *
@@ -257,9 +208,6 @@
 	struct lec_arp_table *entry;
 	unsigned char *dst;
 	int min_frame_size;
-#ifdef CONFIG_TR
-	unsigned char rdesc[ETH_ALEN];	/* Token Ring route descriptor */
-#endif
 	int is_rdesc;
 
 	pr_debug("called\n");
@@ -290,24 +238,10 @@
 	}
 	skb_push(skb, 2);
 
-	/* Put le header to place, works for TokenRing too */
+	/* Put le header to place */
 	lec_h = (struct lecdatahdr_8023 *)skb->data;
 	lec_h->le_header = htons(priv->lecid);
 
-#ifdef CONFIG_TR
-	/*
-	 * Ugly. Use this to realign Token Ring packets for
-	 * e.g. PCA-200E driver.
-	 */
-	if (priv->is_trdev) {
-		skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
-		kfree_skb(skb);
-		if (skb2 == NULL)
-			return NETDEV_TX_OK;
-		skb = skb2;
-	}
-#endif
-
 #if DUMP_PACKETS >= 2
 #define MAX_DUMP_SKB 99
 #elif DUMP_PACKETS >= 1
@@ -321,12 +255,7 @@
 #endif /* DUMP_PACKETS >= 1 */
 
 	/* Minimum ethernet-frame size */
-#ifdef CONFIG_TR
-	if (priv->is_trdev)
-		min_frame_size = LEC_MINIMUM_8025_SIZE;
-	else
-#endif
-		min_frame_size = LEC_MINIMUM_8023_SIZE;
+	min_frame_size = LEC_MINIMUM_8023_SIZE;
 	if (skb->len < min_frame_size) {
 		if ((skb->len + skb_tailroom(skb)) < min_frame_size) {
 			skb2 = skb_copy_expand(skb, 0,
@@ -345,15 +274,6 @@
 	/* Send to right vcc */
 	is_rdesc = 0;
 	dst = lec_h->h_dest;
-#ifdef CONFIG_TR
-	if (priv->is_trdev) {
-		dst = get_tr_dst(skb->data + 2, rdesc);
-		if (dst == NULL) {
-			dst = rdesc;
-			is_rdesc = 1;
-		}
-	}
-#endif
 	entry = NULL;
 	vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
 	pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
@@ -710,12 +630,7 @@
 			dev_kfree_skb(skb);
 			return;
 		}
-#ifdef CONFIG_TR
-		if (priv->is_trdev)
-			dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest;
-		else
-#endif
-			dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
+		dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest;
 
 		/*
 		 * If this is a Data Direct VCC, and the VCC does not match
@@ -723,16 +638,7 @@
 		 */
 		spin_lock_irqsave(&priv->lec_arp_lock, flags);
 		if (lec_is_data_direct(vcc)) {
-#ifdef CONFIG_TR
-			if (priv->is_trdev)
-				src =
-				    ((struct lecdatahdr_8025 *)skb->data)->
-				    h_source;
-			else
-#endif
-				src =
-				    ((struct lecdatahdr_8023 *)skb->data)->
-				    h_source;
+			src = ((struct lecdatahdr_8023 *)skb->data)->h_source;
 			entry = lec_arp_find(priv, src);
 			if (entry && entry->vcc != vcc) {
 				lec_arp_remove(priv, entry);
@@ -750,12 +656,7 @@
 		if (!hlist_empty(&priv->lec_arp_empty_ones))
 			lec_arp_check_empties(priv, vcc, skb);
 		skb_pull(skb, 2);	/* skip lec_id */
-#ifdef CONFIG_TR
-		if (priv->is_trdev)
-			skb->protocol = tr_type_trans(skb, dev);
-		else
-#endif
-			skb->protocol = eth_type_trans(skb, dev);
+		skb->protocol = eth_type_trans(skb, dev);
 		dev->stats.rx_packets++;
 		dev->stats.rx_bytes += skb->len;
 		memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
@@ -827,27 +728,13 @@
 		i = 0;
 	else
 		i = arg;
-#ifdef CONFIG_TR
 	if (arg >= MAX_LEC_ITF)
 		return -EINVAL;
-#else				/* Reserve the top NUM_TR_DEVS for TR */
-	if (arg >= (MAX_LEC_ITF - NUM_TR_DEVS))
-		return -EINVAL;
-#endif
 	if (!dev_lec[i]) {
-		int is_trdev, size;
-
-		is_trdev = 0;
-		if (i >= (MAX_LEC_ITF - NUM_TR_DEVS))
-			is_trdev = 1;
+		int size;
 
 		size = sizeof(struct lec_priv);
-#ifdef CONFIG_TR
-		if (is_trdev)
-			dev_lec[i] = alloc_trdev(size);
-		else
-#endif
-			dev_lec[i] = alloc_etherdev(size);
+		dev_lec[i] = alloc_etherdev(size);
 		if (!dev_lec[i])
 			return -ENOMEM;
 		dev_lec[i]->netdev_ops = &lec_netdev_ops;
@@ -858,7 +745,6 @@
 		}
 
 		priv = netdev_priv(dev_lec[i]);
-		priv->is_trdev = is_trdev;
 	} else {
 		priv = netdev_priv(dev_lec[i]);
 		if (priv->lecd)
@@ -1255,7 +1141,7 @@
 	struct sk_buff *skb;
 	struct lec_priv *priv = netdev_priv(dev);
 
-	if (compare_ether_addr(lan_dst, dev->dev_addr))
+	if (!ether_addr_equal(lan_dst, dev->dev_addr))
 		return 0;	/* not our mac address */
 
 	kfree(priv->tlvs);	/* NULL if there was no previous association */
@@ -1662,7 +1548,7 @@
 
 	head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
 	hlist_for_each_entry(entry, node, head, next) {
-		if (!compare_ether_addr(mac_addr, entry->mac_addr))
+		if (ether_addr_equal(mac_addr, entry->mac_addr))
 			return entry;
 	}
 	return NULL;
@@ -1849,7 +1735,7 @@
 		case 1:
 			return priv->mcast_vcc;
 		case 2:	/* LANE2 wants arp for multicast addresses */
-			if (!compare_ether_addr(mac_to_find, bus_mac))
+			if (ether_addr_equal(mac_to_find, bus_mac))
 				return priv->mcast_vcc;
 			break;
 		default:
@@ -2372,15 +2258,7 @@
 	struct hlist_node *node, *next;
 	struct lec_arp_table *entry, *tmp;
 	struct lecdatahdr_8023 *hdr = (struct lecdatahdr_8023 *)skb->data;
-	unsigned char *src;
-#ifdef CONFIG_TR
-	struct lecdatahdr_8025 *tr_hdr = (struct lecdatahdr_8025 *)skb->data;
-
-	if (priv->is_trdev)
-		src = tr_hdr->h_source;
-	else
-#endif
-		src = hdr->h_source;
+	unsigned char *src = hdr->h_source;
 
 	spin_lock_irqsave(&priv->lec_arp_lock, flags);
 	hlist_for_each_entry_safe(entry, node, next,
diff --git a/net/atm/lec.h b/net/atm/lec.h
index dfc0719..c730e57 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -142,7 +142,6 @@
 	int itfnum;				/* e.g. 2 for lec2, 5 for lec5 */
 	struct lane2_ops *lane2_ops;		/* can be NULL for LANE v1 */
 	int is_proxy;				/* bridge between ATM and Ethernet */
-	int is_trdev;				/* Device type, 0 = Ethernet, 1 = TokenRing */
 };
 
 struct lec_vcc_priv {
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index aa972409f..d4cc1be 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -592,8 +592,7 @@
 		goto non_ip;
 
 	while (i < mpc->number_of_mps_macs) {
-		if (!compare_ether_addr(eth->h_dest,
-					(mpc->mps_macs + i*ETH_ALEN)))
+		if (ether_addr_equal(eth->h_dest, mpc->mps_macs + i * ETH_ALEN))
 			if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
 				return NETDEV_TX_OK;
 		i++;
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 53e5002..5bdd300 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -207,7 +207,7 @@
 			      size_t nbytes, loff_t *ppos)
 {
 	char *page, *p;
-	unsigned len;
+	unsigned int len;
 
 	if (nbytes == 0)
 		return 0;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 614d3fc..ce1e59f 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -62,12 +62,25 @@
 	void (*old_pop)(struct atm_vcc *, struct sk_buff *);
 					/* keep old push/pop for detaching */
 	enum pppoatm_encaps encaps;
+	atomic_t inflight;
+	unsigned long blocked;
 	int flags;			/* SC_COMP_PROT - compress protocol */
 	struct ppp_channel chan;	/* interface to generic ppp layer */
 	struct tasklet_struct wakeup_tasklet;
 };
 
 /*
+ * We want to allow two packets in the queue. The one that's currently in
+ * flight, and *one* queued up ready for the ATM device to send immediately
+ * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
+ * inflight == -2 represents an empty queue, -1 one packet, and zero means
+ * there are two packets in the queue.
+ */
+#define NONE_INFLIGHT -2
+
+#define BLOCKED 0
+
+/*
  * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
  * ID (0xC021) used in autodetection
  */
@@ -102,16 +115,30 @@
 static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
 {
 	struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
+
 	pvcc->old_pop(atmvcc, skb);
+	atomic_dec(&pvcc->inflight);
+
 	/*
-	 * We don't really always want to do this since it's
-	 * really inefficient - it would be much better if we could
-	 * test if we had actually throttled the generic layer.
-	 * Unfortunately then there would be a nasty SMP race where
-	 * we could clear that flag just as we refuse another packet.
-	 * For now we do the safe thing.
+	 * We always used to run the wakeup tasklet unconditionally here, for
+	 * fear of race conditions where we clear the BLOCKED flag just as we
+	 * refuse another packet in pppoatm_send(). This was quite inefficient.
+	 *
+	 * In fact it's OK. The PPP core will only ever call pppoatm_send()
+	 * while holding the channel->downl lock. And ppp_output_wakeup() as
+	 * called by the tasklet will *also* grab that lock. So even if another
+	 * CPU is in pppoatm_send() right now, the tasklet isn't going to race
+	 * with it. The wakeup *will* happen after the other CPU is safely out
+	 * of pppoatm_send() again.
+	 *
+	 * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
+	 * it about to return, that's fine. We trigger a wakeup which will
+	 * happen later. And if the CPU in pppoatm_send() *hasn't* set the
+	 * BLOCKED bit yet, that's fine too because of the double check in
+	 * pppoatm_may_send() which is commented there.
 	 */
-	tasklet_schedule(&pvcc->wakeup_tasklet);
+	if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
+		tasklet_schedule(&pvcc->wakeup_tasklet);
 }
 
 /*
@@ -184,6 +211,51 @@
 	ppp_input_error(&pvcc->chan, 0);
 }
 
+static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
+{
+	/*
+	 * It's not clear that we need to bother with using atm_may_send()
+	 * to check we don't exceed sk->sk_sndbuf. If userspace sets a
+	 * value of sk_sndbuf which is lower than the MTU, we're going to
+	 * block for ever. But the code always did that before we introduced
+	 * the packet count limit, so...
+	 */
+	if (atm_may_send(pvcc->atmvcc, size) &&
+	    atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
+		return 1;
+
+	/*
+	 * We use test_and_set_bit() rather than set_bit() here because
+	 * we need to ensure there's a memory barrier after it. The bit
+	 * *must* be set before we do the atomic_inc() on pvcc->inflight.
+	 * There's no smp_mb__after_set_bit(), so it's this or abuse
+	 * smp_mb__after_clear_bit().
+	 */
+	test_and_set_bit(BLOCKED, &pvcc->blocked);
+
+	/*
+	 * We may have raced with pppoatm_pop(). If it ran for the
+	 * last packet in the queue, *just* before we set the BLOCKED
+	 * bit, then it might never run again and the channel could
+	 * remain permanently blocked. Cope with that race by checking
+	 * *again*. If it did run in that window, we'll have space on
+	 * the queue now and can return success. It's harmless to leave
+	 * the BLOCKED flag set, since it's only used as a trigger to
+	 * run the wakeup tasklet. Another wakeup will never hurt.
+	 * If pppoatm_pop() is running but hasn't got as far as making
+	 * space on the queue yet, then it hasn't checked the BLOCKED
+	 * flag yet either, so we're safe in that case too. It'll issue
+	 * an "immediate" wakeup... where "immediate" actually involves
+	 * taking the PPP channel's ->downl lock, which is held by the
+	 * code path that calls pppoatm_send(), and is thus going to
+	 * wait for us to finish.
+	 */
+	if (atm_may_send(pvcc->atmvcc, size) &&
+	    atomic_inc_not_zero(&pvcc->inflight))
+		return 1;
+
+	return 0;
+}
 /*
  * Called by the ppp_generic.c to send a packet - returns true if packet
  * was accepted.  If we return false, then it's our job to call
@@ -207,7 +279,7 @@
 			struct sk_buff *n;
 			n = skb_realloc_headroom(skb, LLC_LEN);
 			if (n != NULL &&
-			    !atm_may_send(pvcc->atmvcc, n->truesize)) {
+			    !pppoatm_may_send(pvcc, n->truesize)) {
 				kfree_skb(n);
 				goto nospace;
 			}
@@ -215,12 +287,12 @@
 			skb = n;
 			if (skb == NULL)
 				return DROP_PACKET;
-		} else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
+		} else if (!pppoatm_may_send(pvcc, skb->truesize))
 			goto nospace;
 		memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
 		break;
 	case e_vc:
-		if (!atm_may_send(pvcc->atmvcc, skb->truesize))
+		if (!pppoatm_may_send(pvcc, skb->truesize))
 			goto nospace;
 		break;
 	case e_autodetect:
@@ -285,6 +357,9 @@
 	if (pvcc == NULL)
 		return -ENOMEM;
 	pvcc->atmvcc = atmvcc;
+
+	/* Maximum is zero, so that we can use atomic_inc_not_zero() */
+	atomic_set(&pvcc->inflight, NONE_INFLIGHT);
 	pvcc->old_push = atmvcc->push;
 	pvcc->old_pop = atmvcc->pop;
 	pvcc->encaps = (enum pppoatm_encaps) be.encaps;
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 509c8ac..86767ca 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -166,7 +166,7 @@
 {
 	struct sk_buff *skb;
 	struct atmsvc_msg *msg;
-	static unsigned session = 0;
+	static unsigned int session = 0;
 
 	pr_debug("%d (0x%p)\n", (int)type, vcc);
 	while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 9d9a6a3..051f7ab 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1990,7 +1990,6 @@
 	sock_register(&ax25_family_ops);
 	dev_add_pack(&ax25_packet_type);
 	register_netdevice_notifier(&ax25_dev_notifier);
-	ax25_register_sysctl();
 
 	proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops);
 	proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops);
@@ -2013,7 +2012,6 @@
 	proc_net_remove(&init_net, "ax25_calls");
 
 	unregister_netdevice_notifier(&ax25_dev_notifier);
-	ax25_unregister_sysctl();
 
 	dev_remove_pack(&ax25_packet_type);
 
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index d0de30e..3d10676 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -59,8 +59,6 @@
 		return;
 	}
 
-	ax25_unregister_sysctl();
-
 	dev->ax25_ptr     = ax25_dev;
 	ax25_dev->dev     = dev;
 	dev_hold(dev);
@@ -90,7 +88,7 @@
 	ax25_dev_list  = ax25_dev;
 	spin_unlock_bh(&ax25_dev_lock);
 
-	ax25_register_sysctl();
+	ax25_register_dev_sysctl(ax25_dev);
 }
 
 void ax25_dev_device_down(struct net_device *dev)
@@ -100,7 +98,7 @@
 	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
 		return;
 
-	ax25_unregister_sysctl();
+	ax25_unregister_dev_sysctl(ax25_dev);
 
 	spin_lock_bh(&ax25_dev_lock);
 
@@ -120,7 +118,6 @@
 		spin_unlock_bh(&ax25_dev_lock);
 		dev_put(dev);
 		kfree(ax25_dev);
-		ax25_register_sysctl();
 		return;
 	}
 
@@ -130,7 +127,6 @@
 			spin_unlock_bh(&ax25_dev_lock);
 			dev_put(dev);
 			kfree(ax25_dev);
-			ax25_register_sysctl();
 			return;
 		}
 
@@ -138,8 +134,6 @@
 	}
 	spin_unlock_bh(&ax25_dev_lock);
 	dev->ax25_ptr = NULL;
-
-	ax25_register_sysctl();
 }
 
 int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 846ae4e..67de6b3 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -48,7 +48,7 @@
 
 int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type, const void *daddr,
-		     const void *saddr, unsigned len)
+		     const void *saddr, unsigned int len)
 {
 	unsigned char *buff;
 
@@ -219,7 +219,7 @@
 
 int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type, const void *daddr,
-		     const void *saddr, unsigned len)
+		     const void *saddr, unsigned int len)
 {
 	return -AX25_HEADER_LEN;
 }
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index ebe0ef3..d5744b7 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -29,17 +29,6 @@
 static int min_ds_timeout[1],		max_ds_timeout[] = {65535000};
 #endif
 
-static struct ctl_table_header *ax25_table_header;
-
-static ctl_table *ax25_table;
-static int ax25_table_size;
-
-static struct ctl_path ax25_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ax25", },
-	{ }
-};
-
 static const ctl_table ax25_param_table[] = {
 	{
 		.procname	= "ip_default_mode",
@@ -159,52 +148,37 @@
 	{ }	/* that's all, folks! */
 };
 
-void ax25_register_sysctl(void)
+int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
 {
-	ax25_dev *ax25_dev;
-	int n, k;
+	char path[sizeof("net/ax25/") + IFNAMSIZ];
+	int k;
+	struct ctl_table *table;
 
-	spin_lock_bh(&ax25_dev_lock);
-	for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
-		ax25_table_size += sizeof(ctl_table);
+	table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
 
-	if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
-		spin_unlock_bh(&ax25_dev_lock);
-		return;
+	for (k = 0; k < AX25_MAX_VALUES; k++)
+		table[k].data = &ax25_dev->values[k];
+
+	snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name);
+	ax25_dev->sysheader = register_net_sysctl(&init_net, path, table);
+	if (!ax25_dev->sysheader) {
+		kfree(table);
+		return -ENOMEM;
 	}
-
-	for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) {
-		struct ctl_table *child = kmemdup(ax25_param_table,
-						  sizeof(ax25_param_table),
-						  GFP_ATOMIC);
-		if (!child) {
-			while (n--)
-				kfree(ax25_table[n].child);
-			kfree(ax25_table);
-			spin_unlock_bh(&ax25_dev_lock);
-			return;
-		}
-		ax25_table[n].child = ax25_dev->systable = child;
-		ax25_table[n].procname     = ax25_dev->dev->name;
-		ax25_table[n].mode         = 0555;
-
-
-		for (k = 0; k < AX25_MAX_VALUES; k++)
-			child[k].data = &ax25_dev->values[k];
-
-		n++;
-	}
-	spin_unlock_bh(&ax25_dev_lock);
-
-	ax25_table_header = register_sysctl_paths(ax25_path, ax25_table);
+	return 0;
 }
 
-void ax25_unregister_sysctl(void)
+void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev)
 {
-	ctl_table *p;
-	unregister_sysctl_table(ax25_table_header);
+	struct ctl_table_header *header = ax25_dev->sysheader;
+	struct ctl_table *table;
 
-	for (p = ax25_table; p->procname; p++)
-		kfree(p->child);
-	kfree(ax25_table);
+	if (header) {
+		ax25_dev->sysheader = NULL;
+		table = header->ctl_table_arg;
+		unregister_net_sysctl_table(header);
+		kfree(table);
+	}
 }
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 2b68d06..53f5244 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -7,19 +7,28 @@
 	depends on NET
 	select CRC16
         default n
-	---help---
+	help
+          B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
+          a routing protocol for multi-hop ad-hoc mesh networks. The
+          networks may be wired or wireless. See
+          http://www.open-mesh.org/ for more information and user space
+          tools.
 
-        B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
-        a routing protocol for multi-hop ad-hoc mesh networks. The
-        networks may be wired or wireless. See
-        http://www.open-mesh.org/ for more information and user space
-        tools.
+config BATMAN_ADV_BLA
+	bool "Bridge Loop Avoidance"
+	depends on BATMAN_ADV && INET
+	default y
+	help
+	  This option enables BLA (Bridge Loop Avoidance), a mechanism
+	  to avoid Ethernet frames looping when mesh nodes are connected
+	  to both the same LAN and the same mesh. If you will never use
+	  more than one mesh node in the same LAN, you can safely remove
+	  this feature and save some space.
 
 config BATMAN_ADV_DEBUG
 	bool "B.A.T.M.A.N. debugging"
-	depends on BATMAN_ADV != n
-	---help---
-
+	depends on BATMAN_ADV
+	help
 	  This is an option for use by developers; most people should
 	  say N here. This enables compilation of support for
 	  outputting debugging information to the kernel log. The
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 4e392eb..6d5c194 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -23,6 +23,7 @@
 batman-adv-y += bat_iv_ogm.o
 batman-adv-y += bat_sysfs.o
 batman-adv-y += bitarray.o
+batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
 batman-adv-y += gateway_client.o
 batman-adv-y += gateway_common.o
 batman-adv-y += hard-interface.o
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index c3b0548..3b588f8 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -32,6 +32,7 @@
 #include "soft-interface.h"
 #include "vis.h"
 #include "icmp_socket.h"
+#include "bridge_loop_avoidance.h"
 
 static struct dentry *bat_debugfs;
 
@@ -82,8 +83,8 @@
 
 	va_start(args, fmt);
 	vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
-	fdebug_log(bat_priv->debug_log, "[%10lu] %s",
-		   (jiffies / HZ), tmp_log_buf);
+	fdebug_log(bat_priv->debug_log, "[%10u] %s",
+		   jiffies_to_msecs(jiffies), tmp_log_buf);
 	va_end(args);
 
 	return 0;
@@ -238,18 +239,20 @@
 	return single_open(file, gw_client_seq_print_text, net_dev);
 }
 
-static int softif_neigh_open(struct inode *inode, struct file *file)
-{
-	struct net_device *net_dev = (struct net_device *)inode->i_private;
-	return single_open(file, softif_neigh_seq_print_text, net_dev);
-}
-
 static int transtable_global_open(struct inode *inode, struct file *file)
 {
 	struct net_device *net_dev = (struct net_device *)inode->i_private;
 	return single_open(file, tt_global_seq_print_text, net_dev);
 }
 
+#ifdef CONFIG_BATMAN_ADV_BLA
+static int bla_claim_table_open(struct inode *inode, struct file *file)
+{
+	struct net_device *net_dev = (struct net_device *)inode->i_private;
+	return single_open(file, bla_claim_table_seq_print_text, net_dev);
+}
+#endif
+
 static int transtable_local_open(struct inode *inode, struct file *file)
 {
 	struct net_device *net_dev = (struct net_device *)inode->i_private;
@@ -282,16 +285,20 @@
 static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
 static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
 static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
-static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
 static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
+#ifdef CONFIG_BATMAN_ADV_BLA
+static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
+#endif
 static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
 static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
 
 static struct bat_debuginfo *mesh_debuginfos[] = {
 	&bat_debuginfo_originators,
 	&bat_debuginfo_gateways,
-	&bat_debuginfo_softif_neigh,
 	&bat_debuginfo_transtable_global,
+#ifdef CONFIG_BATMAN_ADV_BLA
+	&bat_debuginfo_bla_claim_table,
+#endif
 	&bat_debuginfo_transtable_local,
 	&bat_debuginfo_vis_data,
 	NULL,
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a6d5d63..dc53798 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -30,33 +30,69 @@
 #include "send.h"
 #include "bat_algo.h"
 
-static void bat_iv_ogm_init(struct hard_iface *hard_iface)
+static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
+					       const uint8_t *neigh_addr,
+					       struct orig_node *orig_node,
+					       struct orig_node *orig_neigh,
+					       uint32_t seqno)
+{
+	struct neigh_node *neigh_node;
+
+	neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
+	if (!neigh_node)
+		goto out;
+
+	INIT_LIST_HEAD(&neigh_node->bonding_list);
+
+	neigh_node->orig_node = orig_neigh;
+	neigh_node->if_incoming = hard_iface;
+
+	spin_lock_bh(&orig_node->neigh_list_lock);
+	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+	spin_unlock_bh(&orig_node->neigh_list_lock);
+
+out:
+	return neigh_node;
+}
+
+static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
 {
 	struct batman_ogm_packet *batman_ogm_packet;
+	uint32_t random_seqno;
+	int res = -1;
 
-	hard_iface->packet_len = BATMAN_OGM_LEN;
+	/* randomize initial seqno to avoid collision */
+	get_random_bytes(&random_seqno, sizeof(random_seqno));
+	atomic_set(&hard_iface->seqno, random_seqno);
+
+	hard_iface->packet_len = BATMAN_OGM_HLEN;
 	hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
 
+	if (!hard_iface->packet_buff)
+		goto out;
+
 	batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
-	batman_ogm_packet->header.packet_type = BAT_OGM;
+	batman_ogm_packet->header.packet_type = BAT_IV_OGM;
 	batman_ogm_packet->header.version = COMPAT_VERSION;
 	batman_ogm_packet->header.ttl = 2;
 	batman_ogm_packet->flags = NO_FLAGS;
 	batman_ogm_packet->tq = TQ_MAX_VALUE;
 	batman_ogm_packet->tt_num_changes = 0;
 	batman_ogm_packet->ttvn = 0;
+
+	res = 0;
+
+out:
+	return res;
 }
 
-static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface)
+static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
 {
-	struct batman_ogm_packet *batman_ogm_packet;
-
-	batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
-	batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
-	batman_ogm_packet->header.ttl = TTL;
+	kfree(hard_iface->packet_buff);
+	hard_iface->packet_buff = NULL;
 }
 
-static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
+static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface)
 {
 	struct batman_ogm_packet *batman_ogm_packet;
 
@@ -67,6 +103,15 @@
 	       hard_iface->net_dev->dev_addr, ETH_ALEN);
 }
 
+static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
+{
+	struct batman_ogm_packet *batman_ogm_packet;
+
+	batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+	batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
+	batman_ogm_packet->header.ttl = TTL;
+}
+
 /* when do we schedule our own ogm to be sent */
 static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
 {
@@ -92,7 +137,7 @@
 static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
 				  int tt_num_changes)
 {
-	int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
+	int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
 
 	return (next_buff_pos <= packet_len) &&
 		(next_buff_pos <= MAX_AGGREGATION_BYTES);
@@ -132,7 +177,7 @@
 							    "Sending own" :
 							    "Forwarding"));
 		bat_dbg(DBG_BATMAN, bat_priv,
-			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+			"%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
 			fwd_str, (packet_num > 0 ? "aggregated " : ""),
 			batman_ogm_packet->orig,
 			ntohl(batman_ogm_packet->seqno),
@@ -142,7 +187,7 @@
 			batman_ogm_packet->ttvn, hard_iface->net_dev->name,
 			hard_iface->net_dev->dev_addr);
 
-		buff_pos += BATMAN_OGM_LEN +
+		buff_pos += BATMAN_OGM_HLEN +
 				tt_len(batman_ogm_packet->tt_num_changes);
 		packet_num++;
 		batman_ogm_packet = (struct batman_ogm_packet *)
@@ -191,7 +236,7 @@
 
 		/* FIXME: what about aggregated packets ? */
 		bat_dbg(DBG_BATMAN, bat_priv,
-			"%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n",
+			"%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
 			(forw_packet->own ? "Sending own" : "Forwarding"),
 			batman_ogm_packet->orig,
 			ntohl(batman_ogm_packet->seqno),
@@ -335,10 +380,9 @@
 	if ((atomic_read(&bat_priv->aggregated_ogms)) &&
 	    (packet_len < MAX_AGGREGATION_BYTES))
 		forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
-						      sizeof(struct ethhdr));
+						      ETH_HLEN);
 	else
-		forw_packet_aggr->skb = dev_alloc_skb(packet_len +
-						      sizeof(struct ethhdr));
+		forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
 
 	if (!forw_packet_aggr->skb) {
 		if (!own_packet)
@@ -346,7 +390,7 @@
 		kfree(forw_packet_aggr);
 		goto out;
 	}
-	skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
+	skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
 
 	INIT_HLIST_NODE(&forw_packet_aggr->list);
 
@@ -461,11 +505,11 @@
 static void bat_iv_ogm_forward(struct orig_node *orig_node,
 			       const struct ethhdr *ethhdr,
 			       struct batman_ogm_packet *batman_ogm_packet,
-			       int directlink, struct hard_iface *if_incoming)
+			       bool is_single_hop_neigh,
+			       bool is_from_best_next_hop,
+			       struct hard_iface *if_incoming)
 {
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	struct neigh_node *router;
-	uint8_t in_tq, in_ttl, tq_avg = 0;
 	uint8_t tt_num_changes;
 
 	if (batman_ogm_packet->header.ttl <= 1) {
@@ -473,54 +517,43 @@
 		return;
 	}
 
-	router = orig_node_get_router(orig_node);
+	if (!is_from_best_next_hop) {
+		/* Mark the forwarded packet when it is not coming from our
+		 * best next hop. We still need to forward the packet for our
+		 * neighbor link quality detection to work in case the packet
+		 * originated from a single hop neighbor. Otherwise we can
+		 * simply drop the ogm.
+		 */
+		if (is_single_hop_neigh)
+			batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP;
+		else
+			return;
+	}
 
-	in_tq = batman_ogm_packet->tq;
-	in_ttl = batman_ogm_packet->header.ttl;
 	tt_num_changes = batman_ogm_packet->tt_num_changes;
 
 	batman_ogm_packet->header.ttl--;
 	memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
 
-	/* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
-	 * of our best tq value */
-	if (router && router->tq_avg != 0) {
-
-		/* rebroadcast ogm of best ranking neighbor as is */
-		if (!compare_eth(router->addr, ethhdr->h_source)) {
-			batman_ogm_packet->tq = router->tq_avg;
-
-			if (router->last_ttl)
-				batman_ogm_packet->header.ttl =
-					router->last_ttl - 1;
-		}
-
-		tq_avg = router->tq_avg;
-	}
-
-	if (router)
-		neigh_node_free_ref(router);
-
 	/* apply hop penalty */
 	batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
 
 	bat_dbg(DBG_BATMAN, bat_priv,
-		"Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
-		in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
-		batman_ogm_packet->header.ttl);
+		"Forwarding packet: tq: %i, ttl: %i\n",
+		batman_ogm_packet->tq, batman_ogm_packet->header.ttl);
 
 	batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
 	batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
 
 	/* switch of primaries first hop flag when forwarding */
 	batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
-	if (directlink)
+	if (is_single_hop_neigh)
 		batman_ogm_packet->flags |= DIRECTLINK;
 	else
 		batman_ogm_packet->flags &= ~DIRECTLINK;
 
 	bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
-			     BATMAN_OGM_LEN + tt_len(tt_num_changes),
+			     BATMAN_OGM_HLEN + tt_len(tt_num_changes),
 			     if_incoming, 0, bat_iv_ogm_fwd_send_time());
 }
 
@@ -603,12 +636,12 @@
 		if (is_duplicate)
 			continue;
 
-		spin_lock_bh(&tmp_neigh_node->tq_lock);
+		spin_lock_bh(&tmp_neigh_node->lq_update_lock);
 		ring_buffer_set(tmp_neigh_node->tq_recv,
 				&tmp_neigh_node->tq_index, 0);
 		tmp_neigh_node->tq_avg =
 			ring_buffer_avg(tmp_neigh_node->tq_recv);
-		spin_unlock_bh(&tmp_neigh_node->tq_lock);
+		spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
 	}
 
 	if (!neigh_node) {
@@ -618,8 +651,9 @@
 		if (!orig_tmp)
 			goto unlock;
 
-		neigh_node = create_neighbor(orig_node, orig_tmp,
-					     ethhdr->h_source, if_incoming);
+		neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source,
+						  orig_node, orig_tmp,
+						  batman_ogm_packet->seqno);
 
 		orig_node_free_ref(orig_tmp);
 		if (!neigh_node)
@@ -631,14 +665,14 @@
 	rcu_read_unlock();
 
 	orig_node->flags = batman_ogm_packet->flags;
-	neigh_node->last_valid = jiffies;
+	neigh_node->last_seen = jiffies;
 
-	spin_lock_bh(&neigh_node->tq_lock);
+	spin_lock_bh(&neigh_node->lq_update_lock);
 	ring_buffer_set(neigh_node->tq_recv,
 			&neigh_node->tq_index,
 			batman_ogm_packet->tq);
 	neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
-	spin_unlock_bh(&neigh_node->tq_lock);
+	spin_unlock_bh(&neigh_node->lq_update_lock);
 
 	if (!is_duplicate) {
 		orig_node->last_ttl = batman_ogm_packet->header.ttl;
@@ -744,19 +778,20 @@
 	rcu_read_unlock();
 
 	if (!neigh_node)
-		neigh_node = create_neighbor(orig_neigh_node,
-					     orig_neigh_node,
-					     orig_neigh_node->orig,
-					     if_incoming);
+		neigh_node = bat_iv_ogm_neigh_new(if_incoming,
+						  orig_neigh_node->orig,
+						  orig_neigh_node,
+						  orig_neigh_node,
+						  batman_ogm_packet->seqno);
 
 	if (!neigh_node)
 		goto out;
 
-	/* if orig_node is direct neighbor update neigh_node last_valid */
+	/* if orig_node is direct neighbor update neigh_node last_seen */
 	if (orig_node == orig_neigh_node)
-		neigh_node->last_valid = jiffies;
+		neigh_node->last_seen = jiffies;
 
-	orig_node->last_valid = jiffies;
+	orig_node->last_seen = jiffies;
 
 	/* find packet count of corresponding one hop neighbor */
 	spin_lock_bh(&orig_node->ogm_cnt_lock);
@@ -842,7 +877,8 @@
 	seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
 
 	/* signalize caller that the packet is to be dropped. */
-	if (window_protected(bat_priv, seq_diff,
+	if (!hlist_empty(&orig_node->neigh_list) &&
+	    window_protected(bat_priv, seq_diff,
 			     &orig_node->batman_seqno_reset))
 		goto out;
 
@@ -850,9 +886,9 @@
 	hlist_for_each_entry_rcu(tmp_neigh_node, node,
 				 &orig_node->neigh_list, list) {
 
-		is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
-					       orig_node->last_real_seqno,
-					       batman_ogm_packet->seqno);
+		is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
+					     orig_node->last_real_seqno,
+					     batman_ogm_packet->seqno);
 
 		if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
 		    (tmp_neigh_node->if_incoming == if_incoming))
@@ -866,13 +902,14 @@
 					      seq_diff, set_mark);
 
 		tmp_neigh_node->real_packet_count =
-			bit_packet_count(tmp_neigh_node->real_bits);
+			bitmap_weight(tmp_neigh_node->real_bits,
+				      TQ_LOCAL_WINDOW_SIZE);
 	}
 	rcu_read_unlock();
 
 	if (need_update) {
 		bat_dbg(DBG_BATMAN, bat_priv,
-			"updating last_seqno: old %d, new %d\n",
+			"updating last_seqno: old %u, new %u\n",
 			orig_node->last_real_seqno, batman_ogm_packet->seqno);
 		orig_node->last_real_seqno = batman_ogm_packet->seqno;
 	}
@@ -897,7 +934,9 @@
 	struct neigh_node *orig_neigh_router = NULL;
 	int has_directlink_flag;
 	int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
-	int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
+	int is_broadcast = 0, is_bidirectional;
+	bool is_single_hop_neigh = false;
+	bool is_from_best_next_hop = false;
 	int is_duplicate;
 	uint32_t if_incoming_seqno;
 
@@ -913,7 +952,7 @@
 	 * packet in an aggregation.  Here we expect that the padding
 	 * is always zero (or not 0x01)
 	 */
-	if (batman_ogm_packet->header.packet_type != BAT_OGM)
+	if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
 		return;
 
 	/* could be changed by schedule_own_packet() */
@@ -921,11 +960,11 @@
 
 	has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
 
-	is_single_hop_neigh = (compare_eth(ethhdr->h_source,
-					   batman_ogm_packet->orig) ? 1 : 0);
+	if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
+		is_single_hop_neigh = true;
 
 	bat_dbg(DBG_BATMAN, bat_priv,
-		"Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+		"Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
 		ethhdr->h_source, if_incoming->net_dev->name,
 		if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
 		batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
@@ -998,11 +1037,11 @@
 
 			spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
 			word = &(orig_neigh_node->bcast_own[offset]);
-			bit_mark(word,
-				 if_incoming_seqno -
+			bat_set_bit(word,
+				    if_incoming_seqno -
 						batman_ogm_packet->seqno - 2);
 			orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
-				bit_packet_count(word);
+				bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
 			spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
 		}
 
@@ -1019,6 +1058,13 @@
 		return;
 	}
 
+	if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) {
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
+			ethhdr->h_source);
+		return;
+	}
+
 	orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
 	if (!orig_node)
 		return;
@@ -1043,6 +1089,10 @@
 	if (router)
 		router_router = orig_node_get_router(router->orig_node);
 
+	if ((router && router->tq_avg != 0) &&
+	    (compare_eth(router->addr, ethhdr->h_source)))
+		is_from_best_next_hop = true;
+
 	/* avoid temporary routing loops */
 	if (router && router_router &&
 	    (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
@@ -1093,7 +1143,8 @@
 
 		/* mark direct link on incoming interface */
 		bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
-				   1, if_incoming);
+				   is_single_hop_neigh, is_from_best_next_hop,
+				   if_incoming);
 
 		bat_dbg(DBG_BATMAN, bat_priv,
 			"Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
@@ -1116,7 +1167,8 @@
 	bat_dbg(DBG_BATMAN, bat_priv,
 		"Forwarding packet: rebroadcast originator packet\n");
 	bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
-			   0, if_incoming);
+			   is_single_hop_neigh, is_from_best_next_hop,
+			   if_incoming);
 
 out_neigh:
 	if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1132,13 +1184,25 @@
 	orig_node_free_ref(orig_node);
 }
 
-static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
-			       struct sk_buff *skb)
+static int bat_iv_ogm_receive(struct sk_buff *skb,
+			      struct hard_iface *if_incoming)
 {
+	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batman_ogm_packet *batman_ogm_packet;
 	struct ethhdr *ethhdr;
 	int buff_pos = 0, packet_len;
 	unsigned char *tt_buff, *packet_buff;
+	bool ret;
+
+	ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN);
+	if (!ret)
+		return NET_RX_DROP;
+
+	/* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
+	 * that does not have B.A.T.M.A.N. IV enabled ?
+	 */
+	if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit)
+		return NET_RX_DROP;
 
 	packet_len = skb_headlen(skb);
 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1152,31 +1216,50 @@
 		batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
 		batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
 
-		tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
+		tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
 
 		bat_iv_ogm_process(ethhdr, batman_ogm_packet,
 				   tt_buff, if_incoming);
 
-		buff_pos += BATMAN_OGM_LEN +
+		buff_pos += BATMAN_OGM_HLEN +
 				tt_len(batman_ogm_packet->tt_num_changes);
 
 		batman_ogm_packet = (struct batman_ogm_packet *)
 						(packet_buff + buff_pos);
 	} while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
 					batman_ogm_packet->tt_num_changes));
+
+	kfree_skb(skb);
+	return NET_RX_SUCCESS;
 }
 
 static struct bat_algo_ops batman_iv __read_mostly = {
 	.name = "BATMAN IV",
-	.bat_ogm_init = bat_iv_ogm_init,
-	.bat_ogm_init_primary = bat_iv_ogm_init_primary,
-	.bat_ogm_update_mac = bat_iv_ogm_update_mac,
+	.bat_iface_enable = bat_iv_ogm_iface_enable,
+	.bat_iface_disable = bat_iv_ogm_iface_disable,
+	.bat_iface_update_mac = bat_iv_ogm_iface_update_mac,
+	.bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
 	.bat_ogm_schedule = bat_iv_ogm_schedule,
 	.bat_ogm_emit = bat_iv_ogm_emit,
-	.bat_ogm_receive = bat_iv_ogm_receive,
 };
 
 int __init bat_iv_init(void)
 {
-	return bat_algo_register(&batman_iv);
+	int ret;
+
+	/* batman originator packet */
+	ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive);
+	if (ret < 0)
+		goto out;
+
+	ret = bat_algo_register(&batman_iv);
+	if (ret < 0)
+		goto handler_unregister;
+
+	goto out;
+
+handler_unregister:
+	recv_handler_unregister(BAT_IV_OGM);
+out:
+	return ret;
 }
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 68ff759..5bc7b66 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -63,7 +63,7 @@
 	.store  = _store,			\
 };
 
-#define BAT_ATTR_STORE_BOOL(_name, _post_func)				\
+#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func)			\
 ssize_t store_##_name(struct kobject *kobj, struct attribute *attr,	\
 		      char *buff, size_t count)				\
 {									\
@@ -73,9 +73,9 @@
 				 &bat_priv->_name, net_dev);		\
 }
 
-#define BAT_ATTR_SHOW_BOOL(_name)					\
-ssize_t show_##_name(struct kobject *kobj, struct attribute *attr,	\
-			    char *buff)					\
+#define BAT_ATTR_SIF_SHOW_BOOL(_name)					\
+ssize_t show_##_name(struct kobject *kobj,				\
+		     struct attribute *attr, char *buff)		\
 {									\
 	struct bat_priv *bat_priv = kobj_to_batpriv(kobj);		\
 	return sprintf(buff, "%s\n",					\
@@ -83,16 +83,17 @@
 		       "disabled" : "enabled");				\
 }									\
 
-/* Use this, if you are going to turn a [name] in bat_priv on or off */
-#define BAT_ATTR_BOOL(_name, _mode, _post_func)				\
-	static BAT_ATTR_STORE_BOOL(_name, _post_func)			\
-	static BAT_ATTR_SHOW_BOOL(_name)				\
+/* Use this, if you are going to turn a [name] in the soft-interface
+ * (bat_priv) on or off */
+#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func)			\
+	static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func)		\
+	static BAT_ATTR_SIF_SHOW_BOOL(_name)				\
 	static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
 
 
-#define BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func)		\
+#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)		\
 ssize_t store_##_name(struct kobject *kobj, struct attribute *attr,	\
-			     char *buff, size_t count)			\
+		      char *buff, size_t count)				\
 {									\
 	struct net_device *net_dev = kobj_to_netdev(kobj);		\
 	struct bat_priv *bat_priv = netdev_priv(net_dev);		\
@@ -100,19 +101,62 @@
 				 attr, &bat_priv->_name, net_dev);	\
 }
 
-#define BAT_ATTR_SHOW_UINT(_name)					\
-ssize_t show_##_name(struct kobject *kobj, struct attribute *attr,	\
-			    char *buff)					\
+#define BAT_ATTR_SIF_SHOW_UINT(_name)					\
+ssize_t show_##_name(struct kobject *kobj,				\
+		     struct attribute *attr, char *buff)		\
 {									\
 	struct bat_priv *bat_priv = kobj_to_batpriv(kobj);		\
 	return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name));	\
 }									\
 
-/* Use this, if you are going to set [name] in bat_priv to unsigned integer
- * values only */
-#define BAT_ATTR_UINT(_name, _mode, _min, _max, _post_func)		\
-	static BAT_ATTR_STORE_UINT(_name, _min, _max, _post_func)	\
-	static BAT_ATTR_SHOW_UINT(_name)				\
+/* Use this, if you are going to set [name] in the soft-interface
+ * (bat_priv) to an unsigned integer value */
+#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func)		\
+	static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)	\
+	static BAT_ATTR_SIF_SHOW_UINT(_name)				\
+	static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
+
+
+#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)		\
+ssize_t store_##_name(struct kobject *kobj, struct attribute *attr,	\
+		      char *buff, size_t count)				\
+{									\
+	struct net_device *net_dev = kobj_to_netdev(kobj);		\
+	struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);	\
+	ssize_t length;							\
+									\
+	if (!hard_iface)						\
+		return 0;						\
+									\
+	length = __store_uint_attr(buff, count, _min, _max, _post_func,	\
+				   attr, &hard_iface->_name, net_dev);	\
+									\
+	hardif_free_ref(hard_iface);					\
+	return length;							\
+}
+
+#define BAT_ATTR_HIF_SHOW_UINT(_name)					\
+ssize_t show_##_name(struct kobject *kobj,				\
+		     struct attribute *attr, char *buff)		\
+{									\
+	struct net_device *net_dev = kobj_to_netdev(kobj);		\
+	struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);	\
+	ssize_t length;							\
+									\
+	if (!hard_iface)						\
+		return 0;						\
+									\
+	length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
+									\
+	hardif_free_ref(hard_iface);					\
+	return length;							\
+}
+
+/* Use this, if you are going to set [name] in hard_iface to an
+ * unsigned integer value*/
+#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func)		\
+	static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)	\
+	static BAT_ATTR_HIF_SHOW_UINT(_name)				\
 	static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
 
 
@@ -149,7 +193,7 @@
 		 atomic_read(attr) == 1 ? "enabled" : "disabled",
 		 enabled == 1 ? "enabled" : "disabled");
 
-	atomic_set(attr, (unsigned)enabled);
+	atomic_set(attr, (unsigned int)enabled);
 	return count;
 }
 
@@ -268,7 +312,7 @@
 		 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
 		 "client" : "server");
 
-	atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp);
+	atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
 	return count;
 }
 
@@ -354,7 +398,7 @@
 		 curr_gw_mode_str, buff);
 
 	gw_deselect(bat_priv);
-	atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp);
+	atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
 	return count;
 }
 
@@ -384,26 +428,32 @@
 	return gw_bandwidth_set(net_dev, buff, count);
 }
 
-BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
-BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
-BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
-BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
+BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
+#ifdef CONFIG_BATMAN_ADV_BLA
+BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
+#endif
+BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
+BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
 static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
 static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
 static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
-BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
-BAT_ATTR_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
-BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
-	      post_gw_deselect);
+BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
+BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
+BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
+		  post_gw_deselect);
 static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
 		store_gw_bwidth);
 #ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
+BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
 #endif
 
 static struct bat_attribute *mesh_attrs[] = {
 	&bat_attr_aggregated_ogms,
 	&bat_attr_bonding,
+#ifdef CONFIG_BATMAN_ADV_BLA
+	&bat_attr_bridge_loop_avoidance,
+#endif
 	&bat_attr_fragmentation,
 	&bat_attr_ap_isolation,
 	&bat_attr_vis_mode,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 6d0aa21..07ae6e1 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -24,100 +24,13 @@
 
 #include <linux/bitops.h>
 
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
-		   uint32_t curr_seqno)
-{
-	int32_t diff, word_offset, word_num;
-
-	diff = last_seqno - curr_seqno;
-	if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
-		return 0;
-	} else {
-		/* which word */
-		word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE;
-		/* which position in the selected word */
-		word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE;
-
-		if (test_bit(word_offset, &seq_bits[word_num]))
-			return 1;
-		else
-			return 0;
-	}
-}
-
-/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(unsigned long *seq_bits, int32_t n)
-{
-	int32_t word_offset, word_num;
-
-	/* if too old, just drop it */
-	if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
-		return;
-
-	/* which word */
-	word_num = n / WORD_BIT_SIZE;
-	/* which position in the selected word */
-	word_offset = n % WORD_BIT_SIZE;
-
-	set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */
-}
-
 /* shift the packet array by n places. */
-static void bit_shift(unsigned long *seq_bits, int32_t n)
+static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
 {
-	int32_t word_offset, word_num;
-	int32_t i;
-
 	if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
 		return;
 
-	word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */
-	word_num = n / WORD_BIT_SIZE;	/* shift over how much (full) words */
-
-	for (i = NUM_WORDS - 1; i > word_num; i--) {
-		/* going from old to new, so we don't overwrite the data we copy
-		 * from.
-		 *
-		 * left is high, right is low: FEDC BA98 7654 3210
-		 *					  ^^ ^^
-		 *			       vvvv
-		 * ^^^^ = from, vvvvv =to, we'd have word_num==1 and
-		 * word_offset==WORD_BIT_SIZE/2 ????? in this example.
-		 * (=24 bits)
-		 *
-		 * our desired output would be: 9876 5432 1000 0000
-		 * */
-
-		seq_bits[i] =
-			(seq_bits[i - word_num] << word_offset) +
-			/* take the lower port from the left half, shift it left
-			 * to its final position */
-			(seq_bits[i - word_num - 1] >>
-			 (WORD_BIT_SIZE-word_offset));
-		/* and the upper part of the right half and shift it left to
-		 * its position */
-		/* for our example that would be: word[0] = 9800 + 0076 =
-		 * 9876 */
-	}
-	/* now for our last word, i==word_num, we only have its "left" half.
-	 * that's the 1000 word in our example.*/
-
-	seq_bits[i] = (seq_bits[i - word_num] << word_offset);
-
-	/* pad the rest with 0, if there is anything */
-	i--;
-
-	for (; i >= 0; i--)
-		seq_bits[i] = 0;
-}
-
-static void bit_reset_window(unsigned long *seq_bits)
-{
-	int i;
-	for (i = 0; i < NUM_WORDS; i++)
-		seq_bits[i] = 0;
+	bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE);
 }
 
 
@@ -137,7 +50,7 @@
 
 	if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
 		if (set_mark)
-			bit_mark(seq_bits, -seq_num_diff);
+			bat_set_bit(seq_bits, -seq_num_diff);
 		return 0;
 	}
 
@@ -145,10 +58,10 @@
 	 * set the mark if required */
 
 	if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
-		bit_shift(seq_bits, seq_num_diff);
+		bat_bitmap_shift_left(seq_bits, seq_num_diff);
 
 		if (set_mark)
-			bit_mark(seq_bits, 0);
+			bat_set_bit(seq_bits, 0);
 		return 1;
 	}
 
@@ -159,9 +72,9 @@
 		bat_dbg(DBG_BATMAN, bat_priv,
 			"We missed a lot of packets (%i) !\n",
 			seq_num_diff - 1);
-		bit_reset_window(seq_bits);
+		bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
 		if (set_mark)
-			bit_mark(seq_bits, 0);
+			bat_set_bit(seq_bits, 0);
 		return 1;
 	}
 
@@ -176,9 +89,9 @@
 		bat_dbg(DBG_BATMAN, bat_priv,
 			"Other host probably restarted!\n");
 
-		bit_reset_window(seq_bits);
+		bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
 		if (set_mark)
-			bit_mark(seq_bits, 0);
+			bat_set_bit(seq_bits, 0);
 
 		return 1;
 	}
@@ -186,16 +99,3 @@
 	/* never reached */
 	return 0;
 }
-
-/* count the hamming weight, how many good packets did we receive? just count
- * the 1's.
- */
-int bit_packet_count(const unsigned long *seq_bits)
-{
-	int i, hamming = 0;
-
-	for (i = 0; i < NUM_WORDS; i++)
-		hamming += hweight_long(seq_bits[i]);
-
-	return hamming;
-}
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index c613572..1835c15 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -22,23 +22,33 @@
 #ifndef _NET_BATMAN_ADV_BITARRAY_H_
 #define _NET_BATMAN_ADV_BITARRAY_H_
 
-#define WORD_BIT_SIZE (sizeof(unsigned long) * 8)
-
 /* returns true if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno */
-int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
-		   uint32_t curr_seqno);
+static inline int bat_test_bit(const unsigned long *seq_bits,
+			       uint32_t last_seqno, uint32_t curr_seqno)
+{
+	int32_t diff;
+
+	diff = last_seqno - curr_seqno;
+	if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
+		return 0;
+	else
+		return  test_bit(diff, seq_bits);
+}
 
 /* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(unsigned long *seq_bits, int32_t n);
+static inline void bat_set_bit(unsigned long *seq_bits, int32_t n)
+{
+	/* if too old, just drop it */
+	if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
+		return;
 
+	set_bit(n, seq_bits); /* turn the position on */
+}
 
 /* receive and process one packet, returns 1 if received seq_num is considered
  * new, 0 if old  */
 int bit_get_packet(void *priv, unsigned long *seq_bits,
 		   int32_t seq_num_diff, int set_mark);
 
-/* count the hamming weight, how many good packets did we receive? */
-int bit_packet_count(const unsigned long *seq_bits);
-
 #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
new file mode 100644
index 0000000..8bf9751
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -0,0 +1,1580 @@
+/*
+ * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include "hash.h"
+#include "hard-interface.h"
+#include "originator.h"
+#include "bridge_loop_avoidance.h"
+#include "translation-table.h"
+#include "send.h"
+
+#include <linux/etherdevice.h>
+#include <linux/crc16.h>
+#include <linux/if_arp.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+
+static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+
+static void bla_periodic_work(struct work_struct *work);
+static void bla_send_announce(struct bat_priv *bat_priv,
+			      struct backbone_gw *backbone_gw);
+
+/* return the index of the claim */
+static inline uint32_t choose_claim(const void *data, uint32_t size)
+{
+	const unsigned char *key = data;
+	uint32_t hash = 0;
+	size_t i;
+
+	for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
+		hash += key[i];
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
+
+	hash += (hash << 3);
+	hash ^= (hash >> 11);
+	hash += (hash << 15);
+
+	return hash % size;
+}
+
+/* return the index of the backbone gateway */
+static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
+{
+	const unsigned char *key = data;
+	uint32_t hash = 0;
+	size_t i;
+
+	for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
+		hash += key[i];
+		hash += (hash << 10);
+		hash ^= (hash >> 6);
+	}
+
+	hash += (hash << 3);
+	hash ^= (hash >> 11);
+	hash += (hash << 15);
+
+	return hash % size;
+}
+
+
+/* compares address and vid of two backbone gws */
+static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
+{
+	const void *data1 = container_of(node, struct backbone_gw,
+					 hash_entry);
+
+	return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+}
+
+/* compares address and vid of two claims */
+static int compare_claim(const struct hlist_node *node, const void *data2)
+{
+	const void *data1 = container_of(node, struct claim,
+					 hash_entry);
+
+	return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
+}
+
+/* free a backbone gw */
+static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
+{
+	if (atomic_dec_and_test(&backbone_gw->refcount))
+		kfree_rcu(backbone_gw, rcu);
+}
+
+/* finally deinitialize the claim */
+static void claim_free_rcu(struct rcu_head *rcu)
+{
+	struct claim *claim;
+
+	claim = container_of(rcu, struct claim, rcu);
+
+	backbone_gw_free_ref(claim->backbone_gw);
+	kfree(claim);
+}
+
+/* free a claim, call claim_free_rcu if its the last reference */
+static void claim_free_ref(struct claim *claim)
+{
+	if (atomic_dec_and_test(&claim->refcount))
+		call_rcu(&claim->rcu, claim_free_rcu);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: search data (may be local/static data)
+ *
+ * looks for a claim in the hash, and returns it if found
+ * or NULL otherwise.
+ */
+static struct claim *claim_hash_find(struct bat_priv *bat_priv,
+				     struct claim *data)
+{
+	struct hashtable_t *hash = bat_priv->claim_hash;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct claim *claim;
+	struct claim *claim_tmp = NULL;
+	int index;
+
+	if (!hash)
+		return NULL;
+
+	index = choose_claim(data, hash->size);
+	head = &hash->table[index];
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+		if (!compare_claim(&claim->hash_entry, data))
+			continue;
+
+		if (!atomic_inc_not_zero(&claim->refcount))
+			continue;
+
+		claim_tmp = claim;
+		break;
+	}
+	rcu_read_unlock();
+
+	return claim_tmp;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the address of the originator
+ * @vid: the VLAN ID
+ *
+ * looks for a claim in the hash, and returns it if found
+ * or NULL otherwise.
+ */
+static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
+					      uint8_t *addr, short vid)
+{
+	struct hashtable_t *hash = bat_priv->backbone_hash;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct backbone_gw search_entry, *backbone_gw;
+	struct backbone_gw *backbone_gw_tmp = NULL;
+	int index;
+
+	if (!hash)
+		return NULL;
+
+	memcpy(search_entry.orig, addr, ETH_ALEN);
+	search_entry.vid = vid;
+
+	index = choose_backbone_gw(&search_entry, hash->size);
+	head = &hash->table[index];
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+		if (!compare_backbone_gw(&backbone_gw->hash_entry,
+					 &search_entry))
+			continue;
+
+		if (!atomic_inc_not_zero(&backbone_gw->refcount))
+			continue;
+
+		backbone_gw_tmp = backbone_gw;
+		break;
+	}
+	rcu_read_unlock();
+
+	return backbone_gw_tmp;
+}
+
+/* delete all claims for a backbone */
+static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
+{
+	struct hashtable_t *hash;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	struct claim *claim;
+	int i;
+	spinlock_t *list_lock;	/* protects write access to the hash lists */
+
+	hash = backbone_gw->bat_priv->claim_hash;
+	if (!hash)
+		return;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(claim, node, node_tmp,
+					  head, hash_entry) {
+
+			if (claim->backbone_gw != backbone_gw)
+				continue;
+
+			claim_free_ref(claim);
+			hlist_del_rcu(node);
+		}
+		spin_unlock_bh(list_lock);
+	}
+
+	/* all claims gone, intialize CRC */
+	backbone_gw->crc = BLA_CRC_INIT;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address to be announced within the claim
+ * @vid: the VLAN ID
+ * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
+ *
+ * sends a claim frame according to the provided info.
+ */
+static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
+			   short vid, int claimtype)
+{
+	struct sk_buff *skb;
+	struct ethhdr *ethhdr;
+	struct hard_iface *primary_if;
+	struct net_device *soft_iface;
+	uint8_t *hw_src;
+	struct bla_claim_dst local_claim_dest;
+	uint32_t zeroip = 0;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		return;
+
+	memcpy(&local_claim_dest, &bat_priv->claim_dest,
+	       sizeof(local_claim_dest));
+	local_claim_dest.type = claimtype;
+
+	soft_iface = primary_if->soft_iface;
+
+	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+			 /* IP DST: 0.0.0.0 */
+			 zeroip,
+			 primary_if->soft_iface,
+			 /* IP SRC: 0.0.0.0 */
+			 zeroip,
+			 /* Ethernet DST: Broadcast */
+			 NULL,
+			 /* Ethernet SRC/HW SRC:  originator mac */
+			 primary_if->net_dev->dev_addr,
+			 /* HW DST: FF:43:05:XX:00:00
+			  * with XX   = claim type
+			  * and YY:YY = group id
+			  */
+			 (uint8_t *)&local_claim_dest);
+
+	if (!skb)
+		goto out;
+
+	ethhdr = (struct ethhdr *)skb->data;
+	hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
+
+	/* now we pretend that the client would have sent this ... */
+	switch (claimtype) {
+	case CLAIM_TYPE_ADD:
+		/* normal claim frame
+		 * set Ethernet SRC to the clients mac
+		 */
+		memcpy(ethhdr->h_source, mac, ETH_ALEN);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
+		break;
+	case CLAIM_TYPE_DEL:
+		/* unclaim frame
+		 * set HW SRC to the clients mac
+		 */
+		memcpy(hw_src, mac, ETH_ALEN);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
+		break;
+	case CLAIM_TYPE_ANNOUNCE:
+		/* announcement frame
+		 * set HW SRC to the special mac containg the crc
+		 */
+		memcpy(hw_src, mac, ETH_ALEN);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
+			ethhdr->h_source, vid);
+		break;
+	case CLAIM_TYPE_REQUEST:
+		/* request frame
+		 * set HW SRC to the special mac containg the crc
+		 */
+		memcpy(hw_src, mac, ETH_ALEN);
+		memcpy(ethhdr->h_dest, mac, ETH_ALEN);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
+			ethhdr->h_source, ethhdr->h_dest, vid);
+		break;
+
+	}
+
+	if (vid != -1)
+		skb = vlan_insert_tag(skb, vid);
+
+	skb_reset_mac_header(skb);
+	skb->protocol = eth_type_trans(skb, soft_iface);
+	bat_priv->stats.rx_packets++;
+	bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+	soft_iface->last_rx = jiffies;
+
+	netif_rx(skb);
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the mac address of the originator
+ * @vid: the VLAN ID
+ *
+ * searches for the backbone gw or creates a new one if it could not
+ * be found.
+ */
+static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
+					       uint8_t *orig, short vid)
+{
+	struct backbone_gw *entry;
+	struct orig_node *orig_node;
+	int hash_added;
+
+	entry = backbone_hash_find(bat_priv, orig, vid);
+
+	if (entry)
+		return entry;
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
+		orig, vid);
+
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		return NULL;
+
+	entry->vid = vid;
+	entry->lasttime = jiffies;
+	entry->crc = BLA_CRC_INIT;
+	entry->bat_priv = bat_priv;
+	atomic_set(&entry->request_sent, 0);
+	memcpy(entry->orig, orig, ETH_ALEN);
+
+	/* one for the hash, one for returning */
+	atomic_set(&entry->refcount, 2);
+
+	hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
+			      choose_backbone_gw, entry, &entry->hash_entry);
+
+	if (unlikely(hash_added != 0)) {
+		/* hash failed, free the structure */
+		kfree(entry);
+		return NULL;
+	}
+
+	/* this is a gateway now, remove any tt entries */
+	orig_node = orig_hash_find(bat_priv, orig);
+	if (orig_node) {
+		tt_global_del_orig(bat_priv, orig_node,
+				   "became a backbone gateway");
+		orig_node_free_ref(orig_node);
+	}
+	return entry;
+}
+
+/* update or add the own backbone gw to make sure we announce
+ * where we receive other backbone gws
+ */
+static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
+				       struct hard_iface *primary_if,
+				       short vid)
+{
+	struct backbone_gw *backbone_gw;
+
+	backbone_gw = bla_get_backbone_gw(bat_priv,
+					  primary_if->net_dev->dev_addr, vid);
+	if (unlikely(!backbone_gw))
+		return;
+
+	backbone_gw->lasttime = jiffies;
+	backbone_gw_free_ref(backbone_gw);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the vid where the request came on
+ *
+ * Repeat all of our own claims, and finally send an ANNOUNCE frame
+ * to allow the requester another check if the CRC is correct now.
+ */
+static void bla_answer_request(struct bat_priv *bat_priv,
+			       struct hard_iface *primary_if, short vid)
+{
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct hashtable_t *hash;
+	struct claim *claim;
+	struct backbone_gw *backbone_gw;
+	int i;
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"bla_answer_request(): received a claim request, send all of our own claims again\n");
+
+	backbone_gw = backbone_hash_find(bat_priv,
+					 primary_if->net_dev->dev_addr, vid);
+	if (!backbone_gw)
+		return;
+
+	hash = bat_priv->claim_hash;
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+			/* only own claims are interesting */
+			if (claim->backbone_gw != backbone_gw)
+				continue;
+
+			bla_send_claim(bat_priv, claim->addr, claim->vid,
+				       CLAIM_TYPE_ADD);
+		}
+		rcu_read_unlock();
+	}
+
+	/* finally, send an announcement frame */
+	bla_send_announce(bat_priv, backbone_gw);
+	backbone_gw_free_ref(backbone_gw);
+}
+
+/**
+ * @backbone_gw: the backbone gateway from whom we are out of sync
+ *
+ * When the crc is wrong, ask the backbone gateway for a full table update.
+ * After the request, it will repeat all of his own claims and finally
+ * send an announcement claim with which we can check again.
+ */
+static void bla_send_request(struct backbone_gw *backbone_gw)
+{
+	/* first, remove all old entries */
+	bla_del_backbone_claims(backbone_gw);
+
+	bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+		"Sending REQUEST to %pM\n",
+		backbone_gw->orig);
+
+	/* send request */
+	bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
+		       backbone_gw->vid, CLAIM_TYPE_REQUEST);
+
+	/* no local broadcasts should be sent or received, for now. */
+	if (!atomic_read(&backbone_gw->request_sent)) {
+		atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
+		atomic_set(&backbone_gw->request_sent, 1);
+	}
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @backbone_gw: our backbone gateway which should be announced
+ *
+ * This function sends an announcement. It is called from multiple
+ * places.
+ */
+static void bla_send_announce(struct bat_priv *bat_priv,
+			      struct backbone_gw *backbone_gw)
+{
+	uint8_t mac[ETH_ALEN];
+	uint16_t crc;
+
+	memcpy(mac, announce_mac, 4);
+	crc = htons(backbone_gw->crc);
+	memcpy(&mac[4], (uint8_t *)&crc, 2);
+
+	bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
+
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: the mac address of the claim
+ * @vid: the VLAN ID of the frame
+ * @backbone_gw: the backbone gateway which claims it
+ *
+ * Adds a claim in the claim hash.
+ */
+static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
+			  const short vid, struct backbone_gw *backbone_gw)
+{
+	struct claim *claim;
+	struct claim search_claim;
+	int hash_added;
+
+	memcpy(search_claim.addr, mac, ETH_ALEN);
+	search_claim.vid = vid;
+	claim = claim_hash_find(bat_priv, &search_claim);
+
+	/* create a new claim entry if it does not exist yet. */
+	if (!claim) {
+		claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
+		if (!claim)
+			return;
+
+		memcpy(claim->addr, mac, ETH_ALEN);
+		claim->vid = vid;
+		claim->lasttime = jiffies;
+		claim->backbone_gw = backbone_gw;
+
+		atomic_set(&claim->refcount, 2);
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
+			mac, vid);
+		hash_added = hash_add(bat_priv->claim_hash, compare_claim,
+				      choose_claim, claim, &claim->hash_entry);
+
+		if (unlikely(hash_added != 0)) {
+			/* only local changes happened. */
+			kfree(claim);
+			return;
+		}
+	} else {
+		claim->lasttime = jiffies;
+		if (claim->backbone_gw == backbone_gw)
+			/* no need to register a new backbone */
+			goto claim_free_ref;
+
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_add_claim(): changing ownership for %pM, vid %d\n",
+			mac, vid);
+
+		claim->backbone_gw->crc ^=
+			crc16(0, claim->addr, ETH_ALEN);
+		backbone_gw_free_ref(claim->backbone_gw);
+
+	}
+	/* set (new) backbone gw */
+	atomic_inc(&backbone_gw->refcount);
+	claim->backbone_gw = backbone_gw;
+
+	backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+	backbone_gw->lasttime = jiffies;
+
+claim_free_ref:
+	claim_free_ref(claim);
+}
+
+/* Delete a claim from the claim hash which has the
+ * given mac address and vid.
+ */
+static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
+			  const short vid)
+{
+	struct claim search_claim, *claim;
+
+	memcpy(search_claim.addr, mac, ETH_ALEN);
+	search_claim.vid = vid;
+	claim = claim_hash_find(bat_priv, &search_claim);
+	if (!claim)
+		return;
+
+	bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
+
+	hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
+	claim_free_ref(claim); /* reference from the hash is gone */
+
+	claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+
+	/* don't need the reference from hash_find() anymore */
+	claim_free_ref(claim);
+}
+
+/* check for ANNOUNCE frame, return 1 if handled */
+static int handle_announce(struct bat_priv *bat_priv,
+			   uint8_t *an_addr, uint8_t *backbone_addr, short vid)
+{
+	struct backbone_gw *backbone_gw;
+	uint16_t crc;
+
+	if (memcmp(an_addr, announce_mac, 4) != 0)
+		return 0;
+
+	backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+
+	if (unlikely(!backbone_gw))
+		return 1;
+
+
+	/* handle as ANNOUNCE frame */
+	backbone_gw->lasttime = jiffies;
+	crc = ntohs(*((uint16_t *)(&an_addr[4])));
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
+		vid, backbone_gw->orig, crc);
+
+	if (backbone_gw->crc != crc) {
+		bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+			"handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
+			backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
+			crc);
+
+		bla_send_request(backbone_gw);
+	} else {
+		/* if we have sent a request and the crc was OK,
+		 * we can allow traffic again.
+		 */
+		if (atomic_read(&backbone_gw->request_sent)) {
+			atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
+			atomic_set(&backbone_gw->request_sent, 0);
+		}
+	}
+
+	backbone_gw_free_ref(backbone_gw);
+	return 1;
+}
+
+/* check for REQUEST frame, return 1 if handled */
+static int handle_request(struct bat_priv *bat_priv,
+			  struct hard_iface *primary_if,
+			  uint8_t *backbone_addr,
+			  struct ethhdr *ethhdr, short vid)
+{
+	/* check for REQUEST frame */
+	if (!compare_eth(backbone_addr, ethhdr->h_dest))
+		return 0;
+
+	/* sanity check, this should not happen on a normal switch,
+	 * we ignore it in this case.
+	 */
+	if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
+		return 1;
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"handle_request(): REQUEST vid %d (sent by %pM)...\n",
+		vid, ethhdr->h_source);
+
+	bla_answer_request(bat_priv, primary_if, vid);
+	return 1;
+}
+
+/* check for UNCLAIM frame, return 1 if handled */
+static int handle_unclaim(struct bat_priv *bat_priv,
+			  struct hard_iface *primary_if,
+			  uint8_t *backbone_addr,
+			  uint8_t *claim_addr, short vid)
+{
+	struct backbone_gw *backbone_gw;
+
+	/* unclaim in any case if it is our own */
+	if (primary_if && compare_eth(backbone_addr,
+				      primary_if->net_dev->dev_addr))
+		bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
+
+	backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
+
+	if (!backbone_gw)
+		return 1;
+
+	/* this must be an UNCLAIM frame */
+	bat_dbg(DBG_BLA, bat_priv,
+		"handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
+		claim_addr, vid, backbone_gw->orig);
+
+	bla_del_claim(bat_priv, claim_addr, vid);
+	backbone_gw_free_ref(backbone_gw);
+	return 1;
+}
+
+/* check for CLAIM frame, return 1 if handled */
+static int handle_claim(struct bat_priv *bat_priv,
+			struct hard_iface *primary_if, uint8_t *backbone_addr,
+			uint8_t *claim_addr, short vid)
+{
+	struct backbone_gw *backbone_gw;
+
+	/* register the gateway if not yet available, and add the claim. */
+
+	backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+
+	if (unlikely(!backbone_gw))
+		return 1;
+
+	/* this must be a CLAIM frame */
+	bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
+	if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+		bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
+
+	/* TODO: we could call something like tt_local_del() here. */
+
+	backbone_gw_free_ref(backbone_gw);
+	return 1;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hw_src: the Hardware source in the ARP Header
+ * @hw_dst: the Hardware destination in the ARP Header
+ * @ethhdr: pointer to the Ethernet header of the claim frame
+ *
+ * checks if it is a claim packet and if its on the same group.
+ * This function also applies the group ID of the sender
+ * if it is in the same mesh.
+ *
+ * returns:
+ *	2  - if it is a claim packet and on the same group
+ *	1  - if is a claim packet from another group
+ *	0  - if it is not a claim packet
+ */
+static int check_claim_group(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if,
+			     uint8_t *hw_src, uint8_t *hw_dst,
+			     struct ethhdr *ethhdr)
+{
+	uint8_t *backbone_addr;
+	struct orig_node *orig_node;
+	struct bla_claim_dst *bla_dst, *bla_dst_own;
+
+	bla_dst = (struct bla_claim_dst *)hw_dst;
+	bla_dst_own = &bat_priv->claim_dest;
+
+	/* check if it is a claim packet in general */
+	if (memcmp(bla_dst->magic, bla_dst_own->magic,
+		   sizeof(bla_dst->magic)) != 0)
+		return 0;
+
+	/* if announcement packet, use the source,
+	 * otherwise assume it is in the hw_src
+	 */
+	switch (bla_dst->type) {
+	case CLAIM_TYPE_ADD:
+		backbone_addr = hw_src;
+		break;
+	case CLAIM_TYPE_REQUEST:
+	case CLAIM_TYPE_ANNOUNCE:
+	case CLAIM_TYPE_DEL:
+		backbone_addr = ethhdr->h_source;
+		break;
+	default:
+		return 0;
+	}
+
+	/* don't accept claim frames from ourselves */
+	if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+		return 0;
+
+	/* if its already the same group, it is fine. */
+	if (bla_dst->group == bla_dst_own->group)
+		return 2;
+
+	/* lets see if this originator is in our mesh */
+	orig_node = orig_hash_find(bat_priv, backbone_addr);
+
+	/* dont accept claims from gateways which are not in
+	 * the same mesh or group.
+	 */
+	if (!orig_node)
+		return 1;
+
+	/* if our mesh friends mac is bigger, use it for ourselves. */
+	if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
+		bat_dbg(DBG_BLA, bat_priv,
+			"taking other backbones claim group: %04x\n",
+			ntohs(bla_dst->group));
+		bla_dst_own->group = bla_dst->group;
+	}
+
+	orig_node_free_ref(orig_node);
+
+	return 2;
+}
+
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ *
+ * Check if this is a claim frame, and process it accordingly.
+ *
+ * returns 1 if it was a claim frame, otherwise return 0 to
+ * tell the callee that it can use the frame on its own.
+ */
+static int bla_process_claim(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if,
+			     struct sk_buff *skb)
+{
+	struct ethhdr *ethhdr;
+	struct vlan_ethhdr *vhdr;
+	struct arphdr *arphdr;
+	uint8_t *hw_src, *hw_dst;
+	struct bla_claim_dst *bla_dst;
+	uint16_t proto;
+	int headlen;
+	short vid = -1;
+	int ret;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+		vhdr = (struct vlan_ethhdr *)ethhdr;
+		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+		proto = ntohs(vhdr->h_vlan_encapsulated_proto);
+		headlen = sizeof(*vhdr);
+	} else {
+		proto = ntohs(ethhdr->h_proto);
+		headlen = ETH_HLEN;
+	}
+
+	if (proto != ETH_P_ARP)
+		return 0; /* not a claim frame */
+
+	/* this must be a ARP frame. check if it is a claim. */
+
+	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
+		return 0;
+
+	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+	arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
+
+	/* Check whether the ARP frame carries a valid
+	 * IP information
+	 */
+
+	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
+		return 0;
+	if (arphdr->ar_pro != htons(ETH_P_IP))
+		return 0;
+	if (arphdr->ar_hln != ETH_ALEN)
+		return 0;
+	if (arphdr->ar_pln != 4)
+		return 0;
+
+	hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
+	hw_dst = hw_src + ETH_ALEN + 4;
+	bla_dst = (struct bla_claim_dst *)hw_dst;
+
+	/* check if it is a claim frame. */
+	ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
+	if (ret == 1)
+		bat_dbg(DBG_BLA, bat_priv,
+			"bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+			ethhdr->h_source, vid, hw_src, hw_dst);
+
+	if (ret < 2)
+		return ret;
+
+	/* become a backbone gw ourselves on this vlan if not happened yet */
+	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+
+	/* check for the different types of claim frames ... */
+	switch (bla_dst->type) {
+	case CLAIM_TYPE_ADD:
+		if (handle_claim(bat_priv, primary_if, hw_src,
+				 ethhdr->h_source, vid))
+			return 1;
+		break;
+	case CLAIM_TYPE_DEL:
+		if (handle_unclaim(bat_priv, primary_if,
+				   ethhdr->h_source, hw_src, vid))
+			return 1;
+		break;
+
+	case CLAIM_TYPE_ANNOUNCE:
+		if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
+			return 1;
+		break;
+	case CLAIM_TYPE_REQUEST:
+		if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
+			return 1;
+		break;
+	}
+
+	bat_dbg(DBG_BLA, bat_priv,
+		"bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+		ethhdr->h_source, vid, hw_src, hw_dst);
+	return 1;
+}
+
+/* Check when we last heard from other nodes, and remove them in case of
+ * a time out, or clean all backbone gws if now is set.
+ */
+static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
+{
+	struct backbone_gw *backbone_gw;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	struct hashtable_t *hash;
+	spinlock_t *list_lock;	/* protects write access to the hash lists */
+	int i;
+
+	hash = bat_priv->backbone_hash;
+	if (!hash)
+		return;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
+					  head, hash_entry) {
+			if (now)
+				goto purge_now;
+			if (!has_timed_out(backbone_gw->lasttime,
+					   BLA_BACKBONE_TIMEOUT))
+				continue;
+
+			bat_dbg(DBG_BLA, backbone_gw->bat_priv,
+				"bla_purge_backbone_gw(): backbone gw %pM timed out\n",
+				backbone_gw->orig);
+
+purge_now:
+			/* don't wait for the pending request anymore */
+			if (atomic_read(&backbone_gw->request_sent))
+				atomic_dec(&bat_priv->bla_num_requests);
+
+			bla_del_backbone_claims(backbone_gw);
+
+			hlist_del_rcu(node);
+			backbone_gw_free_ref(backbone_gw);
+		}
+		spin_unlock_bh(list_lock);
+	}
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the selected primary interface, may be NULL if now is set
+ * @now: whether the whole hash shall be wiped now
+ *
+ * Check when we heard last time from our own claims, and remove them in case of
+ * a time out, or clean all claims if now is set
+ */
+static void bla_purge_claims(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if, int now)
+{
+	struct claim *claim;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct hashtable_t *hash;
+	int i;
+
+	hash = bat_priv->claim_hash;
+	if (!hash)
+		return;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+			if (now)
+				goto purge_now;
+			if (!compare_eth(claim->backbone_gw->orig,
+					 primary_if->net_dev->dev_addr))
+				continue;
+			if (!has_timed_out(claim->lasttime,
+					   BLA_CLAIM_TIMEOUT))
+				continue;
+
+			bat_dbg(DBG_BLA, bat_priv,
+				"bla_purge_claims(): %pM, vid %d, time out\n",
+				claim->addr, claim->vid);
+
+purge_now:
+			handle_unclaim(bat_priv, primary_if,
+				       claim->backbone_gw->orig,
+				       claim->addr, claim->vid);
+		}
+		rcu_read_unlock();
+	}
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the new selected primary_if
+ * @oldif: the old primary interface, may be NULL
+ *
+ * Update the backbone gateways when the own orig address changes.
+ *
+ */
+void bla_update_orig_address(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if,
+			     struct hard_iface *oldif)
+{
+	struct backbone_gw *backbone_gw;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct hashtable_t *hash;
+	int i;
+
+	/* reset bridge loop avoidance group id */
+	bat_priv->claim_dest.group =
+		htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
+
+	if (!oldif) {
+		bla_purge_claims(bat_priv, NULL, 1);
+		bla_purge_backbone_gw(bat_priv, 1);
+		return;
+	}
+
+	hash = bat_priv->backbone_hash;
+	if (!hash)
+		return;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+			/* own orig still holds the old value. */
+			if (!compare_eth(backbone_gw->orig,
+					 oldif->net_dev->dev_addr))
+				continue;
+
+			memcpy(backbone_gw->orig,
+			       primary_if->net_dev->dev_addr, ETH_ALEN);
+			/* send an announce frame so others will ask for our
+			 * claims and update their tables.
+			 */
+			bla_send_announce(bat_priv, backbone_gw);
+		}
+		rcu_read_unlock();
+	}
+}
+
+
+
+/* (re)start the timer */
+static void bla_start_timer(struct bat_priv *bat_priv)
+{
+	INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
+	queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
+			   msecs_to_jiffies(BLA_PERIOD_LENGTH));
+}
+
+/* periodic work to do:
+ *  * purge structures when they are too old
+ *  * send announcements
+ */
+static void bla_periodic_work(struct work_struct *work)
+{
+	struct delayed_work *delayed_work =
+		container_of(work, struct delayed_work, work);
+	struct bat_priv *bat_priv =
+		container_of(delayed_work, struct bat_priv, bla_work);
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct backbone_gw *backbone_gw;
+	struct hashtable_t *hash;
+	struct hard_iface *primary_if;
+	int i;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	bla_purge_claims(bat_priv, primary_if, 0);
+	bla_purge_backbone_gw(bat_priv, 0);
+
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		goto out;
+
+	hash = bat_priv->backbone_hash;
+	if (!hash)
+		goto out;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+			if (!compare_eth(backbone_gw->orig,
+					 primary_if->net_dev->dev_addr))
+				continue;
+
+			backbone_gw->lasttime = jiffies;
+
+			bla_send_announce(bat_priv, backbone_gw);
+		}
+		rcu_read_unlock();
+	}
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+
+	bla_start_timer(bat_priv);
+}
+
+/* initialize all bla structures */
+int bla_init(struct bat_priv *bat_priv)
+{
+	int i;
+	uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
+	struct hard_iface *primary_if;
+
+	bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
+
+	/* setting claim destination address */
+	memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
+	bat_priv->claim_dest.type = 0;
+	primary_if = primary_if_get_selected(bat_priv);
+	if (primary_if) {
+		bat_priv->claim_dest.group =
+			htons(crc16(0, primary_if->net_dev->dev_addr,
+				    ETH_ALEN));
+		hardif_free_ref(primary_if);
+	} else {
+		bat_priv->claim_dest.group = 0; /* will be set later */
+	}
+
+	/* initialize the duplicate list */
+	for (i = 0; i < DUPLIST_SIZE; i++)
+		bat_priv->bcast_duplist[i].entrytime =
+			jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
+	bat_priv->bcast_duplist_curr = 0;
+
+	if (bat_priv->claim_hash)
+		return 1;
+
+	bat_priv->claim_hash = hash_new(128);
+	bat_priv->backbone_hash = hash_new(32);
+
+	if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
+		return -1;
+
+	bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
+
+	bla_start_timer(bat_priv);
+	return 1;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @bcast_packet: originator mac address
+ * @hdr_size: maximum length of the frame
+ *
+ * check if it is on our broadcast list. Another gateway might
+ * have sent the same packet because it is connected to the same backbone,
+ * so we have to remove this duplicate.
+ *
+ * This is performed by checking the CRC, which will tell us
+ * with a good chance that it is the same packet. If it is furthermore
+ * sent by another host, drop it. We allow equal packets from
+ * the same host however as this might be intended.
+ *
+ **/
+
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+			    struct bcast_packet *bcast_packet,
+			    int hdr_size)
+{
+	int i, length, curr;
+	uint8_t *content;
+	uint16_t crc;
+	struct bcast_duplist_entry *entry;
+
+	length = hdr_size - sizeof(*bcast_packet);
+	content = (uint8_t *)bcast_packet;
+	content += sizeof(*bcast_packet);
+
+	/* calculate the crc ... */
+	crc = crc16(0, content, length);
+
+	for (i = 0 ; i < DUPLIST_SIZE; i++) {
+		curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
+		entry = &bat_priv->bcast_duplist[curr];
+
+		/* we can stop searching if the entry is too old ;
+		 * later entries will be even older
+		 */
+		if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
+			break;
+
+		if (entry->crc != crc)
+			continue;
+
+		if (compare_eth(entry->orig, bcast_packet->orig))
+			continue;
+
+		/* this entry seems to match: same crc, not too old,
+		 * and from another gw. therefore return 1 to forbid it.
+		 */
+		return 1;
+	}
+	/* not found, add a new entry (overwrite the oldest entry) */
+	curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
+	entry = &bat_priv->bcast_duplist[curr];
+	entry->crc = crc;
+	entry->entrytime = jiffies;
+	memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
+	bat_priv->bcast_duplist_curr = curr;
+
+	/* allow it, its the first occurence. */
+	return 0;
+}
+
+
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: originator mac address
+ *
+ * check if the originator is a gateway for any VLAN ID.
+ *
+ * returns 1 if it is found, 0 otherwise
+ *
+ */
+
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
+{
+	struct hashtable_t *hash = bat_priv->backbone_hash;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct backbone_gw *backbone_gw;
+	int i;
+
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		return 0;
+
+	if (!hash)
+		return 0;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
+			if (compare_eth(backbone_gw->orig, orig)) {
+				rcu_read_unlock();
+				return 1;
+			}
+		}
+		rcu_read_unlock();
+	}
+
+	return 0;
+}
+
+
+/**
+ * @skb: the frame to be checked
+ * @orig_node: the orig_node of the frame
+ * @hdr_size: maximum length of the frame
+ *
+ * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
+ * if the orig_node is also a gateway on the soft interface, otherwise it
+ * returns 0.
+ *
+ */
+int bla_is_backbone_gw(struct sk_buff *skb,
+		       struct orig_node *orig_node, int hdr_size)
+{
+	struct ethhdr *ethhdr;
+	struct vlan_ethhdr *vhdr;
+	struct backbone_gw *backbone_gw;
+	short vid = -1;
+
+	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
+		return 0;
+
+	/* first, find out the vid. */
+	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
+		return 0;
+
+	ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
+
+	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+		if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
+			return 0;
+
+		vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
+					      hdr_size);
+		vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+	}
+
+	/* see if this originator is a backbone gw for this VLAN */
+
+	backbone_gw = backbone_hash_find(orig_node->bat_priv,
+					 orig_node->orig, vid);
+	if (!backbone_gw)
+		return 0;
+
+	backbone_gw_free_ref(backbone_gw);
+	return 1;
+}
+
+/* free all bla structures (for softinterface free or module unload) */
+void bla_free(struct bat_priv *bat_priv)
+{
+	struct hard_iface *primary_if;
+
+	cancel_delayed_work_sync(&bat_priv->bla_work);
+	primary_if = primary_if_get_selected(bat_priv);
+
+	if (bat_priv->claim_hash) {
+		bla_purge_claims(bat_priv, primary_if, 1);
+		hash_destroy(bat_priv->claim_hash);
+		bat_priv->claim_hash = NULL;
+	}
+	if (bat_priv->backbone_hash) {
+		bla_purge_backbone_gw(bat_priv, 1);
+		hash_destroy(bat_priv->backbone_hash);
+		bat_priv->backbone_hash = NULL;
+	}
+	if (primary_if)
+		hardif_free_ref(primary_if);
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * bla_rx avoidance checks if:
+ *  * we have to race for a claim
+ *  * if the frame is allowed on the LAN
+ *
+ * in these cases, the skb is further handled by this function and
+ * returns 1, otherwise it returns 0 and the caller shall further
+ * process the skb.
+ *
+ */
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+{
+	struct ethhdr *ethhdr;
+	struct claim search_claim, *claim = NULL;
+	struct hard_iface *primary_if;
+	int ret;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto handled;
+
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		goto allow;
+
+
+	if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+		/* don't allow broadcasts while requests are in flight */
+		if (is_multicast_ether_addr(ethhdr->h_dest))
+			goto handled;
+
+	memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+	search_claim.vid = vid;
+	claim = claim_hash_find(bat_priv, &search_claim);
+
+	if (!claim) {
+		/* possible optimization: race for a claim */
+		/* No claim exists yet, claim it for us!
+		 */
+		handle_claim(bat_priv, primary_if,
+			     primary_if->net_dev->dev_addr,
+			     ethhdr->h_source, vid);
+		goto allow;
+	}
+
+	/* if it is our own claim ... */
+	if (compare_eth(claim->backbone_gw->orig,
+			primary_if->net_dev->dev_addr)) {
+		/* ... allow it in any case */
+		claim->lasttime = jiffies;
+		goto allow;
+	}
+
+	/* if it is a broadcast ... */
+	if (is_multicast_ether_addr(ethhdr->h_dest)) {
+		/* ... drop it. the responsible gateway is in charge. */
+		goto handled;
+	} else {
+		/* seems the client considers us as its best gateway.
+		 * send a claim and update the claim table
+		 * immediately.
+		 */
+		handle_claim(bat_priv, primary_if,
+			     primary_if->net_dev->dev_addr,
+			     ethhdr->h_source, vid);
+		goto allow;
+	}
+allow:
+	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+	ret = 0;
+	goto out;
+
+handled:
+	kfree_skb(skb);
+	ret = 1;
+
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (claim)
+		claim_free_ref(claim);
+	return ret;
+}
+
+/**
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the frame to be checked
+ * @vid: the VLAN ID of the frame
+ *
+ * bla_tx checks if:
+ *  * a claim was received which has to be processed
+ *  * the frame is allowed on the mesh
+ *
+ * in these cases, the skb is further handled by this function and
+ * returns 1, otherwise it returns 0 and the caller shall further
+ * process the skb.
+ *
+ */
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+{
+	struct ethhdr *ethhdr;
+	struct claim search_claim, *claim = NULL;
+	struct hard_iface *primary_if;
+	int ret = 0;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		goto allow;
+
+	/* in VLAN case, the mac header might not be set. */
+	skb_reset_mac_header(skb);
+
+	if (bla_process_claim(bat_priv, primary_if, skb))
+		goto handled;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
+		/* don't allow broadcasts while requests are in flight */
+		if (is_multicast_ether_addr(ethhdr->h_dest))
+			goto handled;
+
+	memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+	search_claim.vid = vid;
+
+	claim = claim_hash_find(bat_priv, &search_claim);
+
+	/* if no claim exists, allow it. */
+	if (!claim)
+		goto allow;
+
+	/* check if we are responsible. */
+	if (compare_eth(claim->backbone_gw->orig,
+			primary_if->net_dev->dev_addr)) {
+		/* if yes, the client has roamed and we have
+		 * to unclaim it.
+		 */
+		handle_unclaim(bat_priv, primary_if,
+			       primary_if->net_dev->dev_addr,
+			       ethhdr->h_source, vid);
+		goto allow;
+	}
+
+	/* check if it is a multicast/broadcast frame */
+	if (is_multicast_ether_addr(ethhdr->h_dest)) {
+		/* drop it. the responsible gateway has forwarded it into
+		 * the backbone network.
+		 */
+		goto handled;
+	} else {
+		/* we must allow it. at least if we are
+		 * responsible for the DESTINATION.
+		 */
+		goto allow;
+	}
+allow:
+	bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+	ret = 0;
+	goto out;
+handled:
+	ret = 1;
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (claim)
+		claim_free_ref(claim);
+	return ret;
+}
+
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
+{
+	struct net_device *net_dev = (struct net_device *)seq->private;
+	struct bat_priv *bat_priv = netdev_priv(net_dev);
+	struct hashtable_t *hash = bat_priv->claim_hash;
+	struct claim *claim;
+	struct hard_iface *primary_if;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	uint32_t i;
+	bool is_own;
+	int ret = 0;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if) {
+		ret = seq_printf(seq,
+				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
+				 net_dev->name);
+		goto out;
+	}
+
+	if (primary_if->if_status != IF_ACTIVE) {
+		ret = seq_printf(seq,
+				 "BATMAN mesh %s disabled - primary interface not active\n",
+				 net_dev->name);
+		goto out;
+	}
+
+	seq_printf(seq,
+		   "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
+		   net_dev->name, primary_if->net_dev->dev_addr,
+		   ntohs(bat_priv->claim_dest.group));
+	seq_printf(seq, "   %-17s    %-5s    %-17s [o] (%-4s)\n",
+		   "Client", "VID", "Originator", "CRC");
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
+			is_own = compare_eth(claim->backbone_gw->orig,
+					     primary_if->net_dev->dev_addr);
+			seq_printf(seq,	" * %pM on % 5d by %pM [%c] (%04x)\n",
+				   claim->addr, claim->vid,
+				   claim->backbone_gw->orig,
+				   (is_own ? 'x' : ' '),
+				   claim->backbone_gw->crc);
+		}
+		rcu_read_unlock();
+	}
+out:
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	return ret;
+}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
new file mode 100644
index 0000000..e39f93a
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+ *
+ * Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#ifndef _NET_BATMAN_ADV_BLA_H_
+#define _NET_BATMAN_ADV_BLA_H_
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
+int bla_is_backbone_gw(struct sk_buff *skb,
+		       struct orig_node *orig_node, int hdr_size);
+int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
+int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+			    struct bcast_packet *bcast_packet, int hdr_size);
+void bla_update_orig_address(struct bat_priv *bat_priv,
+			     struct hard_iface *primary_if,
+			     struct hard_iface *oldif);
+int bla_init(struct bat_priv *bat_priv);
+void bla_free(struct bat_priv *bat_priv);
+
+#define BLA_CRC_INIT	0
+#else /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
+			 short vid)
+{
+	return 0;
+}
+
+static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
+			 short vid)
+{
+	return 0;
+}
+
+static inline int bla_is_backbone_gw(struct sk_buff *skb,
+				     struct orig_node *orig_node,
+				     int hdr_size)
+{
+	return 0;
+}
+
+static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
+						 void *offset)
+{
+	return 0;
+}
+
+static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
+					  uint8_t *orig)
+{
+	return 0;
+}
+
+static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
+					  struct bcast_packet *bcast_packet,
+					  int hdr_size)
+{
+	return 0;
+}
+
+static inline void bla_update_orig_address(struct bat_priv *bat_priv,
+					   struct hard_iface *primary_if,
+					   struct hard_iface *oldif)
+{
+}
+
+static inline int bla_init(struct bat_priv *bat_priv)
+{
+	return 1;
+}
+
+static inline void bla_free(struct bat_priv *bat_priv)
+{
+}
+
+#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
+
+#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 6f9b9b7..47f7186 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -558,10 +558,10 @@
 			p++;
 
 			/* ...and then we jump over the data */
-			if (pkt_len < *p)
+			if (pkt_len < 1 + (*p))
 				goto out;
-			pkt_len -= *p;
-			p += (*p);
+			pkt_len -= 1 + (*p);
+			p += 1 + (*p);
 		}
 	}
 out:
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 3778977..dc334fa 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,15 +28,10 @@
 #include "bat_sysfs.h"
 #include "originator.h"
 #include "hash.h"
+#include "bridge_loop_avoidance.h"
 
 #include <linux/if_arp.h>
 
-
-static int batman_skb_recv(struct sk_buff *skb,
-			   struct net_device *dev,
-			   struct packet_type *ptype,
-			   struct net_device *orig_dev);
-
 void hardif_free_rcu(struct rcu_head *rcu)
 {
 	struct hard_iface *hard_iface;
@@ -107,7 +102,8 @@
 	return hard_iface;
 }
 
-static void primary_if_update_addr(struct bat_priv *bat_priv)
+static void primary_if_update_addr(struct bat_priv *bat_priv,
+				   struct hard_iface *oldif)
 {
 	struct vis_packet *vis_packet;
 	struct hard_iface *primary_if;
@@ -122,6 +118,7 @@
 	memcpy(vis_packet->sender_orig,
 	       primary_if->net_dev->dev_addr, ETH_ALEN);
 
+	bla_update_orig_address(bat_priv, primary_if, oldif);
 out:
 	if (primary_if)
 		hardif_free_ref(primary_if);
@@ -140,14 +137,15 @@
 	curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
 	rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
 
+	if (!new_hard_iface)
+		goto out;
+
+	bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
+	primary_if_update_addr(bat_priv, curr_hard_iface);
+
+out:
 	if (curr_hard_iface)
 		hardif_free_ref(curr_hard_iface);
-
-	if (!new_hard_iface)
-		return;
-
-	bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface);
-	primary_if_update_addr(bat_priv);
 }
 
 static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
@@ -175,9 +173,9 @@
 				 net_dev->dev_addr))
 			continue;
 
-		pr_warning("The newly added mac address (%pM) already exists on: %s\n",
-			   net_dev->dev_addr, hard_iface->net_dev->name);
-		pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
+		pr_warn("The newly added mac address (%pM) already exists on: %s\n",
+			net_dev->dev_addr, hard_iface->net_dev->name);
+		pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
 	}
 	rcu_read_unlock();
 }
@@ -230,7 +228,7 @@
 
 	bat_priv = netdev_priv(hard_iface->soft_iface);
 
-	bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
+	bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
 	hard_iface->if_status = IF_TO_BE_ACTIVATED;
 
 	/**
@@ -300,22 +298,17 @@
 	if (!softif_is_valid(soft_iface)) {
 		pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
 		       soft_iface->name);
-		dev_put(soft_iface);
 		ret = -EINVAL;
-		goto err;
+		goto err_dev;
 	}
 
 	hard_iface->soft_iface = soft_iface;
 	bat_priv = netdev_priv(hard_iface->soft_iface);
 
-	bat_priv->bat_algo_ops->bat_ogm_init(hard_iface);
-
-	if (!hard_iface->packet_buff) {
-		bat_err(hard_iface->soft_iface,
-			"Can't add interface packet (%s): out of memory\n",
-			hard_iface->net_dev->name);
+	ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
+	if (ret < 0) {
 		ret = -ENOMEM;
-		goto err;
+		goto err_dev;
 	}
 
 	hard_iface->if_num = bat_priv->num_ifaces;
@@ -328,7 +321,6 @@
 	hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
 	dev_add_pack(&hard_iface->batman_adv_ptype);
 
-	atomic_set(&hard_iface->seqno, 1);
 	atomic_set(&hard_iface->frag_seqno, 1);
 	bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
 		 hard_iface->net_dev->name);
@@ -360,6 +352,8 @@
 out:
 	return 0;
 
+err_dev:
+	dev_put(soft_iface);
 err:
 	hardif_free_ref(hard_iface);
 	return ret;
@@ -394,8 +388,7 @@
 			hardif_free_ref(new_if);
 	}
 
-	kfree(hard_iface->packet_buff);
-	hard_iface->packet_buff = NULL;
+	bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
 	hard_iface->if_status = IF_NOT_IN_USE;
 
 	/* delete all references to this hard_iface */
@@ -447,6 +440,13 @@
 	check_known_mac_addr(hard_iface->net_dev);
 	list_add_tail_rcu(&hard_iface->list, &hardif_list);
 
+	/**
+	 * This can't be called via a bat_priv callback because
+	 * we have no bat_priv yet.
+	 */
+	atomic_set(&hard_iface->seqno, 1);
+	hard_iface->packet_buff = NULL;
+
 	return hard_iface;
 
 free_if:
@@ -524,14 +524,14 @@
 		check_known_mac_addr(hard_iface->net_dev);
 
 		bat_priv = netdev_priv(hard_iface->soft_iface);
-		bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
+		bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
 
 		primary_if = primary_if_get_selected(bat_priv);
 		if (!primary_if)
 			goto hardif_put;
 
 		if (hard_iface == primary_if)
-			primary_if_update_addr(bat_priv);
+			primary_if_update_addr(bat_priv, NULL);
 		break;
 	default:
 		break;
@@ -545,114 +545,6 @@
 	return NOTIFY_DONE;
 }
 
-/* incoming packets with the batman ethertype received on any active hard
- * interface */
-static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
-			   struct packet_type *ptype,
-			   struct net_device *orig_dev)
-{
-	struct bat_priv *bat_priv;
-	struct batman_ogm_packet *batman_ogm_packet;
-	struct hard_iface *hard_iface;
-	int ret;
-
-	hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
-	skb = skb_share_check(skb, GFP_ATOMIC);
-
-	/* skb was released by skb_share_check() */
-	if (!skb)
-		goto err_out;
-
-	/* packet should hold at least type and version */
-	if (unlikely(!pskb_may_pull(skb, 2)))
-		goto err_free;
-
-	/* expect a valid ethernet header here. */
-	if (unlikely(skb->mac_len != sizeof(struct ethhdr) ||
-		     !skb_mac_header(skb)))
-		goto err_free;
-
-	if (!hard_iface->soft_iface)
-		goto err_free;
-
-	bat_priv = netdev_priv(hard_iface->soft_iface);
-
-	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
-		goto err_free;
-
-	/* discard frames on not active interfaces */
-	if (hard_iface->if_status != IF_ACTIVE)
-		goto err_free;
-
-	batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
-
-	if (batman_ogm_packet->header.version != COMPAT_VERSION) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: incompatible batman version (%i)\n",
-			batman_ogm_packet->header.version);
-		goto err_free;
-	}
-
-	/* all receive handlers return whether they received or reused
-	 * the supplied skb. if not, we have to free the skb. */
-
-	switch (batman_ogm_packet->header.packet_type) {
-		/* batman originator packet */
-	case BAT_OGM:
-		ret = recv_bat_ogm_packet(skb, hard_iface);
-		break;
-
-		/* batman icmp packet */
-	case BAT_ICMP:
-		ret = recv_icmp_packet(skb, hard_iface);
-		break;
-
-		/* unicast packet */
-	case BAT_UNICAST:
-		ret = recv_unicast_packet(skb, hard_iface);
-		break;
-
-		/* fragmented unicast packet */
-	case BAT_UNICAST_FRAG:
-		ret = recv_ucast_frag_packet(skb, hard_iface);
-		break;
-
-		/* broadcast packet */
-	case BAT_BCAST:
-		ret = recv_bcast_packet(skb, hard_iface);
-		break;
-
-		/* vis packet */
-	case BAT_VIS:
-		ret = recv_vis_packet(skb, hard_iface);
-		break;
-		/* Translation table query (request or response) */
-	case BAT_TT_QUERY:
-		ret = recv_tt_query(skb, hard_iface);
-		break;
-		/* Roaming advertisement */
-	case BAT_ROAM_ADV:
-		ret = recv_roam_adv(skb, hard_iface);
-		break;
-	default:
-		ret = NET_RX_DROP;
-	}
-
-	if (ret == NET_RX_DROP)
-		kfree_skb(skb);
-
-	/* return NET_RX_SUCCESS in any case as we
-	 * most probably dropped the packet for
-	 * routing-logical reasons. */
-
-	return NET_RX_SUCCESS;
-
-err_free:
-	kfree_skb(skb);
-err_out:
-	return NET_RX_DROP;
-}
-
 /* This function returns true if the interface represented by ifindex is a
  * 802.11 wireless device */
 bool is_wifi_iface(int ifindex)
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index b87518e..2e98a57 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -175,13 +175,13 @@
 	if (len >= sizeof(struct icmp_packet_rr))
 		packet_len = sizeof(struct icmp_packet_rr);
 
-	skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
+	skb = dev_alloc_skb(packet_len + ETH_HLEN);
 	if (!skb) {
 		len = -ENOMEM;
 		goto out;
 	}
 
-	skb_reserve(skb, sizeof(struct ethhdr));
+	skb_reserve(skb, ETH_HLEN);
 	icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
 
 	if (copy_from_user(icmp_packet, buff, packet_len)) {
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 6d51caa..083a299 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -30,6 +30,7 @@
 #include "translation-table.h"
 #include "hard-interface.h"
 #include "gateway_client.h"
+#include "bridge_loop_avoidance.h"
 #include "vis.h"
 #include "hash.h"
 #include "bat_algo.h"
@@ -38,6 +39,7 @@
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
  * list traversals just rcu-locked */
 struct list_head hardif_list;
+static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
 char bat_routing_algo[20] = "BATMAN IV";
 static struct hlist_head bat_algo_list;
 
@@ -45,11 +47,15 @@
 
 struct workqueue_struct *bat_event_workqueue;
 
+static void recv_handler_init(void);
+
 static int __init batman_init(void)
 {
 	INIT_LIST_HEAD(&hardif_list);
 	INIT_HLIST_HEAD(&bat_algo_list);
 
+	recv_handler_init();
+
 	bat_iv_init();
 
 	/* the name should not be longer than 10 chars - see
@@ -96,13 +102,10 @@
 	spin_lock_init(&bat_priv->gw_list_lock);
 	spin_lock_init(&bat_priv->vis_hash_lock);
 	spin_lock_init(&bat_priv->vis_list_lock);
-	spin_lock_init(&bat_priv->softif_neigh_lock);
-	spin_lock_init(&bat_priv->softif_neigh_vid_lock);
 
 	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
 	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
 	INIT_HLIST_HEAD(&bat_priv->gw_list);
-	INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
 	INIT_LIST_HEAD(&bat_priv->tt_changes_list);
 	INIT_LIST_HEAD(&bat_priv->tt_req_list);
 	INIT_LIST_HEAD(&bat_priv->tt_roam_list);
@@ -118,6 +121,9 @@
 	if (vis_init(bat_priv) < 1)
 		goto err;
 
+	if (bla_init(bat_priv) < 1)
+		goto err;
+
 	atomic_set(&bat_priv->gw_reselect, 0);
 	atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
 	goto end;
@@ -145,7 +151,7 @@
 
 	tt_free(bat_priv);
 
-	softif_neigh_purge(bat_priv);
+	bla_free(bat_priv);
 
 	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
 }
@@ -178,6 +184,120 @@
 	return 0;
 }
 
+static int recv_unhandled_packet(struct sk_buff *skb,
+				 struct hard_iface *recv_if)
+{
+	return NET_RX_DROP;
+}
+
+/* incoming packets with the batman ethertype received on any active hard
+ * interface
+ */
+int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+		    struct packet_type *ptype, struct net_device *orig_dev)
+{
+	struct bat_priv *bat_priv;
+	struct batman_ogm_packet *batman_ogm_packet;
+	struct hard_iface *hard_iface;
+	uint8_t idx;
+	int ret;
+
+	hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
+	skb = skb_share_check(skb, GFP_ATOMIC);
+
+	/* skb was released by skb_share_check() */
+	if (!skb)
+		goto err_out;
+
+	/* packet should hold at least type and version */
+	if (unlikely(!pskb_may_pull(skb, 2)))
+		goto err_free;
+
+	/* expect a valid ethernet header here. */
+	if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
+		goto err_free;
+
+	if (!hard_iface->soft_iface)
+		goto err_free;
+
+	bat_priv = netdev_priv(hard_iface->soft_iface);
+
+	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+		goto err_free;
+
+	/* discard frames on not active interfaces */
+	if (hard_iface->if_status != IF_ACTIVE)
+		goto err_free;
+
+	batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
+
+	if (batman_ogm_packet->header.version != COMPAT_VERSION) {
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"Drop packet: incompatible batman version (%i)\n",
+			batman_ogm_packet->header.version);
+		goto err_free;
+	}
+
+	/* all receive handlers return whether they received or reused
+	 * the supplied skb. if not, we have to free the skb.
+	 */
+	idx = batman_ogm_packet->header.packet_type;
+	ret = (*recv_packet_handler[idx])(skb, hard_iface);
+
+	if (ret == NET_RX_DROP)
+		kfree_skb(skb);
+
+	/* return NET_RX_SUCCESS in any case as we
+	 * most probably dropped the packet for
+	 * routing-logical reasons.
+	 */
+	return NET_RX_SUCCESS;
+
+err_free:
+	kfree_skb(skb);
+err_out:
+	return NET_RX_DROP;
+}
+
+static void recv_handler_init(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
+		recv_packet_handler[i] = recv_unhandled_packet;
+
+	/* batman icmp packet */
+	recv_packet_handler[BAT_ICMP] = recv_icmp_packet;
+	/* unicast packet */
+	recv_packet_handler[BAT_UNICAST] = recv_unicast_packet;
+	/* fragmented unicast packet */
+	recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet;
+	/* broadcast packet */
+	recv_packet_handler[BAT_BCAST] = recv_bcast_packet;
+	/* vis packet */
+	recv_packet_handler[BAT_VIS] = recv_vis_packet;
+	/* Translation table query (request or response) */
+	recv_packet_handler[BAT_TT_QUERY] = recv_tt_query;
+	/* Roaming advertisement */
+	recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv;
+}
+
+int recv_handler_register(uint8_t packet_type,
+			  int (*recv_handler)(struct sk_buff *,
+					      struct hard_iface *))
+{
+	if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
+		return -EBUSY;
+
+	recv_packet_handler[packet_type] = recv_handler;
+	return 0;
+}
+
+void recv_handler_unregister(uint8_t packet_type)
+{
+	recv_packet_handler[packet_type] = recv_unhandled_packet;
+}
+
 static struct bat_algo_ops *bat_algo_get(char *name)
 {
 	struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
@@ -207,12 +327,12 @@
 	}
 
 	/* all algorithms must implement all ops (for now) */
-	if (!bat_algo_ops->bat_ogm_init ||
-	    !bat_algo_ops->bat_ogm_init_primary ||
-	    !bat_algo_ops->bat_ogm_update_mac ||
+	if (!bat_algo_ops->bat_iface_enable ||
+	    !bat_algo_ops->bat_iface_disable ||
+	    !bat_algo_ops->bat_iface_update_mac ||
+	    !bat_algo_ops->bat_primary_iface_set ||
 	    !bat_algo_ops->bat_ogm_schedule ||
-	    !bat_algo_ops->bat_ogm_emit ||
-	    !bat_algo_ops->bat_ogm_receive) {
+	    !bat_algo_ops->bat_ogm_emit) {
 		pr_info("Routing algo '%s' does not implement required ops\n",
 			bat_algo_ops->name);
 		goto out;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 94fa1c2..f4a3ec0 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
 #define DRIVER_DEVICE "batman-adv"
 
 #ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2012.1.0"
+#define SOURCE_VERSION "2012.2.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -65,7 +65,7 @@
 
 #define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
 
-#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
+#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE)
 
 #define LOG_BUF_LEN 8192	  /* has to be a power of 2 */
 
@@ -80,8 +80,12 @@
 #define MAX_AGGREGATION_BYTES 512
 #define MAX_AGGREGATION_MS 100
 
-#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */
+#define BLA_PERIOD_LENGTH	10000	/* 10 seconds */
+#define BLA_BACKBONE_TIMEOUT	(BLA_PERIOD_LENGTH * 3)
+#define BLA_CLAIM_TIMEOUT	(BLA_PERIOD_LENGTH * 10)
 
+#define DUPLIST_SIZE		16
+#define DUPLIST_TIMEOUT		500	/* 500 ms */
 /* don't reset again within 30 seconds */
 #define RESET_PROTECTION_MS 30000
 #define EXPECTED_SEQNO_RANGE	65536
@@ -119,7 +123,8 @@
 	DBG_BATMAN = 1 << 0,
 	DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
 	DBG_TT	   = 1 << 2, /* translation table operations */
-	DBG_ALL    = 7
+	DBG_BLA    = 1 << 3, /* bridge loop avoidance */
+	DBG_ALL    = 15
 };
 
 /* Kernel headers */
@@ -150,6 +155,12 @@
 void inc_module_count(void);
 void dec_module_count(void);
 int is_my_mac(const uint8_t *addr);
+int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+		    struct packet_type *ptype, struct net_device *orig_dev);
+int recv_handler_register(uint8_t packet_type,
+			  int (*recv_handler)(struct sk_buff *,
+					      struct hard_iface *));
+void recv_handler_unregister(uint8_t packet_type);
 int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
 int bat_algo_select(struct bat_priv *bat_priv, char *name);
 int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 43c0a4f..41147942 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -28,13 +28,15 @@
 #include "hard-interface.h"
 #include "unicast.h"
 #include "soft-interface.h"
+#include "bridge_loop_avoidance.h"
 
 static void purge_orig(struct work_struct *work);
 
 static void start_purge_timer(struct bat_priv *bat_priv)
 {
 	INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
-	queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
+	queue_delayed_work(bat_event_workqueue,
+			   &bat_priv->orig_work, msecs_to_jiffies(1000));
 }
 
 /* returns 1 if they are the same originator */
@@ -83,35 +85,30 @@
 	return router;
 }
 
-struct neigh_node *create_neighbor(struct orig_node *orig_node,
-				   struct orig_node *orig_neigh_node,
-				   const uint8_t *neigh,
-				   struct hard_iface *if_incoming)
+struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
+					 const uint8_t *neigh_addr,
+					 uint32_t seqno)
 {
-	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct neigh_node *neigh_node;
 
-	bat_dbg(DBG_BATMAN, bat_priv,
-		"Creating new last-hop neighbor of originator\n");
-
 	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
 	if (!neigh_node)
-		return NULL;
+		goto out;
 
 	INIT_HLIST_NODE(&neigh_node->list);
-	INIT_LIST_HEAD(&neigh_node->bonding_list);
-	spin_lock_init(&neigh_node->tq_lock);
 
-	memcpy(neigh_node->addr, neigh, ETH_ALEN);
-	neigh_node->orig_node = orig_neigh_node;
-	neigh_node->if_incoming = if_incoming;
+	memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
+	spin_lock_init(&neigh_node->lq_update_lock);
 
 	/* extra reference for return */
 	atomic_set(&neigh_node->refcount, 2);
 
-	spin_lock_bh(&orig_node->neigh_list_lock);
-	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
-	spin_unlock_bh(&orig_node->neigh_list_lock);
+	bat_dbg(DBG_BATMAN, bat_priv,
+		"Creating new neighbor %pM, initial seqno %d\n",
+		neigh_addr, seqno);
+
+out:
 	return neigh_node;
 }
 
@@ -273,6 +270,7 @@
 	struct hlist_node *node, *node_tmp;
 	struct neigh_node *neigh_node;
 	bool neigh_purged = false;
+	unsigned long last_seen;
 
 	*best_neigh_node = NULL;
 
@@ -282,11 +280,13 @@
 	hlist_for_each_entry_safe(neigh_node, node, node_tmp,
 				  &orig_node->neigh_list, list) {
 
-		if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) ||
+		if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) ||
 		    (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
 		    (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
 		    (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
 
+			last_seen = neigh_node->last_seen;
+
 			if ((neigh_node->if_incoming->if_status ==
 								IF_INACTIVE) ||
 			    (neigh_node->if_incoming->if_status ==
@@ -299,9 +299,9 @@
 					neigh_node->if_incoming->net_dev->name);
 			else
 				bat_dbg(DBG_BATMAN, bat_priv,
-					"neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n",
+					"neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
 					orig_node->orig, neigh_node->addr,
-					(neigh_node->last_valid / HZ));
+					jiffies_to_msecs(last_seen));
 
 			neigh_purged = true;
 
@@ -324,10 +324,11 @@
 {
 	struct neigh_node *best_neigh_node;
 
-	if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) {
+	if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) {
 		bat_dbg(DBG_BATMAN, bat_priv,
-			"Originator timeout: originator %pM, last_valid %lu\n",
-			orig_node->orig, (orig_node->last_valid / HZ));
+			"Originator timeout: originator %pM, last_seen %u\n",
+			orig_node->orig,
+			jiffies_to_msecs(orig_node->last_seen));
 		return true;
 	} else {
 		if (purge_orig_neighbors(bat_priv, orig_node,
@@ -375,8 +376,6 @@
 
 	gw_node_purge(bat_priv);
 	gw_election(bat_priv);
-
-	softif_neigh_purge(bat_priv);
 }
 
 static void purge_orig(struct work_struct *work)
@@ -447,9 +446,9 @@
 				goto next;
 
 			last_seen_secs = jiffies_to_msecs(jiffies -
-						orig_node->last_valid) / 1000;
+						orig_node->last_seen) / 1000;
 			last_seen_msecs = jiffies_to_msecs(jiffies -
-						orig_node->last_valid) % 1000;
+						orig_node->last_seen) % 1000;
 
 			seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
 				   orig_node->orig, last_seen_secs,
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 3fe2eda..f74d0d6 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -29,10 +29,9 @@
 void purge_orig_ref(struct bat_priv *bat_priv);
 void orig_node_free_ref(struct orig_node *orig_node);
 struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
-struct neigh_node *create_neighbor(struct orig_node *orig_node,
-				   struct orig_node *orig_neigh_node,
-				   const uint8_t *neigh,
-				   struct hard_iface *if_incoming);
+struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
+					 const uint8_t *neigh_addr,
+					 uint32_t seqno);
 void neigh_node_free_ref(struct neigh_node *neigh_node);
 struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
 int orig_seq_print_text(struct seq_file *seq, void *offset);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 441f3db..0ee1af7 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -25,7 +25,7 @@
 #define ETH_P_BATMAN  0x4305	/* unofficial/not registered Ethertype */
 
 enum bat_packettype {
-	BAT_OGM		 = 0x01,
+	BAT_IV_OGM	 = 0x01,
 	BAT_ICMP	 = 0x02,
 	BAT_UNICAST	 = 0x03,
 	BAT_BCAST	 = 0x04,
@@ -38,7 +38,8 @@
 /* this file is included by batctl which needs these defines */
 #define COMPAT_VERSION 14
 
-enum batman_flags {
+enum batman_iv_flags {
+	NOT_BEST_NEXT_HOP   = 1 << 3,
 	PRIMARIES_FIRST_HOP = 1 << 4,
 	VIS_SERVER	    = 1 << 5,
 	DIRECTLINK	    = 1 << 6
@@ -90,6 +91,23 @@
 	TT_CLIENT_PENDING = 1 << 10
 };
 
+/* claim frame types for the bridge loop avoidance */
+enum bla_claimframe {
+	CLAIM_TYPE_ADD		= 0x00,
+	CLAIM_TYPE_DEL		= 0x01,
+	CLAIM_TYPE_ANNOUNCE	= 0x02,
+	CLAIM_TYPE_REQUEST	= 0x03
+};
+
+/* the destination hardware field in the ARP frame is used to
+ * transport the claim type and the group id
+ */
+struct bla_claim_dst {
+	uint8_t magic[3];	/* FF:43:05 */
+	uint8_t type;		/* bla_claimframe */
+	uint16_t group;		/* group id */
+} __packed;
+
 struct batman_header {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
@@ -100,8 +118,8 @@
 	struct batman_header header;
 	uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
 	uint32_t seqno;
-	uint8_t  orig[6];
-	uint8_t  prev_sender[6];
+	uint8_t  orig[ETH_ALEN];
+	uint8_t  prev_sender[ETH_ALEN];
 	uint8_t  gw_flags;  /* flags related to gateway class */
 	uint8_t  tq;
 	uint8_t  tt_num_changes;
@@ -109,13 +127,13 @@
 	uint16_t tt_crc;
 } __packed;
 
-#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
+#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet)
 
 struct icmp_packet {
 	struct batman_header header;
 	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[6];
-	uint8_t  orig[6];
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  orig[ETH_ALEN];
 	uint16_t seqno;
 	uint8_t  uid;
 	uint8_t  reserved;
@@ -128,8 +146,8 @@
 struct icmp_packet_rr {
 	struct batman_header header;
 	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[6];
-	uint8_t  orig[6];
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  orig[ETH_ALEN];
 	uint16_t seqno;
 	uint8_t  uid;
 	uint8_t  rr_cur;
@@ -139,16 +157,16 @@
 struct unicast_packet {
 	struct batman_header header;
 	uint8_t  ttvn; /* destination translation table version number */
-	uint8_t  dest[6];
+	uint8_t  dest[ETH_ALEN];
 } __packed;
 
 struct unicast_frag_packet {
 	struct batman_header header;
 	uint8_t  ttvn; /* destination translation table version number */
-	uint8_t  dest[6];
+	uint8_t  dest[ETH_ALEN];
 	uint8_t  flags;
 	uint8_t  align;
-	uint8_t  orig[6];
+	uint8_t  orig[ETH_ALEN];
 	uint16_t seqno;
 } __packed;
 
@@ -156,7 +174,7 @@
 	struct batman_header header;
 	uint8_t  reserved;
 	uint32_t seqno;
-	uint8_t  orig[6];
+	uint8_t  orig[ETH_ALEN];
 } __packed;
 
 struct vis_packet {
@@ -165,9 +183,9 @@
 	uint32_t seqno;		 /* sequence number */
 	uint8_t  entries;	 /* number of entries behind this struct */
 	uint8_t  reserved;
-	uint8_t  vis_orig[6];	 /* originator that announces its neighbors */
-	uint8_t  target_orig[6]; /* who should receive this packet */
-	uint8_t  sender_orig[6]; /* who sent or rebroadcasted this packet */
+	uint8_t  vis_orig[ETH_ALEN];	/* originator reporting its neighbors */
+	uint8_t  target_orig[ETH_ALEN]; /* who should receive this packet */
+	uint8_t  sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
 } __packed;
 
 struct tt_query_packet {
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 7f8e158..840e2c6 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -29,6 +29,10 @@
 #include "originator.h"
 #include "vis.h"
 #include "unicast.h"
+#include "bridge_loop_avoidance.h"
+
+static int route_unicast_packet(struct sk_buff *skb,
+				struct hard_iface *recv_if);
 
 void slide_own_bcast_window(struct hard_iface *hard_iface)
 {
@@ -52,7 +56,7 @@
 
 			bit_get_packet(bat_priv, word, 1, 0);
 			orig_node->bcast_own_sum[hard_iface->if_num] =
-				bit_packet_count(word);
+				bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
 			spin_unlock_bh(&orig_node->ogm_cnt_lock);
 		}
 		rcu_read_unlock();
@@ -230,51 +234,46 @@
 {
 	if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
 	    (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
-		if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) {
-
-			*last_reset = jiffies;
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"old packet received, start protection\n");
-
-			return 0;
-		} else {
+		if (!has_timed_out(*last_reset, RESET_PROTECTION_MS))
 			return 1;
-		}
+
+		*last_reset = jiffies;
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"old packet received, start protection\n");
 	}
+
 	return 0;
 }
 
-int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
+bool check_management_packet(struct sk_buff *skb,
+			     struct hard_iface *hard_iface,
+			     int header_len)
 {
-	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct ethhdr *ethhdr;
 
 	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
-		return NET_RX_DROP;
+	if (unlikely(!pskb_may_pull(skb, header_len)))
+		return false;
 
 	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
 	/* packet with broadcast indication but unicast recipient */
 	if (!is_broadcast_ether_addr(ethhdr->h_dest))
-		return NET_RX_DROP;
+		return false;
 
 	/* packet with broadcast sender address */
 	if (is_broadcast_ether_addr(ethhdr->h_source))
-		return NET_RX_DROP;
+		return false;
 
 	/* create a copy of the skb, if needed, to modify it. */
 	if (skb_cow(skb, 0) < 0)
-		return NET_RX_DROP;
+		return false;
 
 	/* keep skb linear */
 	if (skb_linearize(skb) < 0)
-		return NET_RX_DROP;
+		return false;
 
-	bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb);
-
-	kfree_skb(skb);
-	return NET_RX_SUCCESS;
+	return true;
 }
 
 static int recv_my_icmp_packet(struct bat_priv *bat_priv,
@@ -309,7 +308,7 @@
 		goto out;
 
 	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
 	icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -364,7 +363,7 @@
 		goto out;
 
 	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
 	icmp_packet = (struct icmp_packet *)skb->data;
@@ -450,7 +449,7 @@
 		goto out;
 
 	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
 	icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -669,6 +668,13 @@
 	if (!is_my_mac(roam_adv_packet->dst))
 		return route_unicast_packet(skb, recv_if);
 
+	/* check if it is a backbone gateway. we don't accept
+	 * roaming advertisement from it, as it has the same
+	 * entries as we have.
+	 */
+	if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
+		goto out;
+
 	orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
 	if (!orig_node)
 		goto out;
@@ -798,7 +804,7 @@
 	return 0;
 }
 
-int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
 	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct orig_node *orig_node = NULL;
@@ -830,7 +836,7 @@
 		goto out;
 
 	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+	if (skb_cow(skb, ETH_HLEN) < 0)
 		goto out;
 
 	unicast_packet = (struct unicast_packet *)skb->data;
@@ -907,12 +913,20 @@
 
 	/* Check whether I have to reroute the packet */
 	if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
-		/* Linearize the skb before accessing it */
-		if (skb_linearize(skb) < 0)
+		/* check if there is enough data before accessing it */
+		if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
+				  ETH_HLEN) < 0)
 			return 0;
 
 		ethhdr = (struct ethhdr *)(skb->data +
 			sizeof(struct unicast_packet));
+
+		/* we don't have an updated route for this client, so we should
+		 * not try to reroute the packet!!
+		 */
+		if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+			return 1;
+
 		orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
 
 		if (!orig_node) {
@@ -1047,8 +1061,8 @@
 	spin_lock_bh(&orig_node->bcast_seqno_lock);
 
 	/* check whether the packet is a duplicate */
-	if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
-			   ntohl(bcast_packet->seqno)))
+	if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+			 ntohl(bcast_packet->seqno)))
 		goto spin_unlock;
 
 	seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
@@ -1065,9 +1079,19 @@
 
 	spin_unlock_bh(&orig_node->bcast_seqno_lock);
 
+	/* check whether this has been sent by another originator before */
+	if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
+		goto out;
+
 	/* rebroadcast packet */
 	add_bcast_packet_to_list(bat_priv, skb, 1);
 
+	/* don't hand the broadcast up if it is from an originator
+	 * from the same backbone.
+	 */
+	if (bla_is_backbone_gw(skb, orig_node, hdr_size))
+		goto out;
+
 	/* broadcast for me */
 	interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
 	ret = NET_RX_SUCCESS;
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 92ac100..d6bbbeb 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,15 +23,16 @@
 #define _NET_BATMAN_ADV_ROUTING_H_
 
 void slide_own_bcast_window(struct hard_iface *hard_iface);
+bool check_management_packet(struct sk_buff *skb,
+			     struct hard_iface *hard_iface,
+			     int header_len);
 void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
 		  struct neigh_node *neigh_node);
-int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
 struct neigh_node *find_router(struct bat_priv *bat_priv,
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index af7a674..f47299f 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -45,13 +45,13 @@
 		goto send_skb_err;
 
 	if (!(hard_iface->net_dev->flags & IFF_UP)) {
-		pr_warning("Interface %s is not up - can't send packet via that interface!\n",
-			   hard_iface->net_dev->name);
+		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
+			hard_iface->net_dev->name);
 		goto send_skb_err;
 	}
 
 	/* push to the ethernet header. */
-	if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
+	if (my_skb_head_push(skb, ETH_HLEN) < 0)
 		goto send_skb_err;
 
 	skb_reset_mac_header(skb);
@@ -87,7 +87,7 @@
 	/* keep old buffer if kmalloc should fail */
 	if (new_buff) {
 		memcpy(new_buff, hard_iface->packet_buff,
-		       BATMAN_OGM_LEN);
+		       BATMAN_OGM_HLEN);
 
 		kfree(hard_iface->packet_buff);
 		hard_iface->packet_buff = new_buff;
@@ -101,13 +101,13 @@
 {
 	int new_len;
 
-	new_len = BATMAN_OGM_LEN +
+	new_len = BATMAN_OGM_HLEN +
 		  tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
 
 	/* if we have too many changes for one packet don't send any
 	 * and wait for the tt table request which will be fragmented */
 	if (new_len > hard_iface->soft_iface->mtu)
-		new_len = BATMAN_OGM_LEN;
+		new_len = BATMAN_OGM_HLEN;
 
 	realloc_packet_buffer(hard_iface, new_len);
 
@@ -117,14 +117,14 @@
 	atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
 
 	return tt_changes_fill_buffer(bat_priv,
-				      hard_iface->packet_buff + BATMAN_OGM_LEN,
-				      hard_iface->packet_len - BATMAN_OGM_LEN);
+				      hard_iface->packet_buff + BATMAN_OGM_HLEN,
+				      hard_iface->packet_len - BATMAN_OGM_HLEN);
 }
 
 static int reset_packet_buffer(struct bat_priv *bat_priv,
 				struct hard_iface *hard_iface)
 {
-	realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
+	realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
 	return 0;
 }
 
@@ -292,7 +292,7 @@
 	/* if we still have some more bcasts to send */
 	if (forw_packet->num_packets < 3) {
 		_add_bcast_packet_to_list(bat_priv, forw_packet,
-					  ((5 * HZ) / 1000));
+					  msecs_to_jiffies(5));
 		return;
 	}
 
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a5590f4..6e2530b 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -36,6 +36,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include "unicast.h"
+#include "bridge_loop_avoidance.h"
 
 
 static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -73,439 +74,6 @@
 	return 0;
 }
 
-static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
-{
-	if (atomic_dec_and_test(&softif_neigh->refcount))
-		kfree_rcu(softif_neigh, rcu);
-}
-
-static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *softif_neigh;
-	struct hlist_node *node, *node_tmp;
-	struct bat_priv *bat_priv;
-
-	softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
-	bat_priv = softif_neigh_vid->bat_priv;
-
-	spin_lock_bh(&bat_priv->softif_neigh_lock);
-	hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
-				  &softif_neigh_vid->softif_neigh_list, list) {
-		hlist_del_rcu(&softif_neigh->list);
-		softif_neigh_free_ref(softif_neigh);
-	}
-	spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-	kfree(softif_neigh_vid);
-}
-
-static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
-{
-	if (atomic_dec_and_test(&softif_neigh_vid->refcount))
-		call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
-}
-
-static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
-						     short vid)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct hlist_node *node;
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(softif_neigh_vid, node,
-				 &bat_priv->softif_neigh_vids, list) {
-		if (softif_neigh_vid->vid != vid)
-			continue;
-
-		if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
-			continue;
-
-		goto out;
-	}
-
-	softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
-	if (!softif_neigh_vid)
-		goto out;
-
-	softif_neigh_vid->vid = vid;
-	softif_neigh_vid->bat_priv = bat_priv;
-
-	/* initialize with 2 - caller decrements counter by one */
-	atomic_set(&softif_neigh_vid->refcount, 2);
-	INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
-	INIT_HLIST_NODE(&softif_neigh_vid->list);
-	spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
-	hlist_add_head_rcu(&softif_neigh_vid->list,
-			   &bat_priv->softif_neigh_vids);
-	spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
-
-out:
-	rcu_read_unlock();
-	return softif_neigh_vid;
-}
-
-static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
-					     const uint8_t *addr, short vid)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *softif_neigh = NULL;
-	struct hlist_node *node;
-
-	softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-	if (!softif_neigh_vid)
-		goto out;
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(softif_neigh, node,
-				 &softif_neigh_vid->softif_neigh_list,
-				 list) {
-		if (!compare_eth(softif_neigh->addr, addr))
-			continue;
-
-		if (!atomic_inc_not_zero(&softif_neigh->refcount))
-			continue;
-
-		softif_neigh->last_seen = jiffies;
-		goto unlock;
-	}
-
-	softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
-	if (!softif_neigh)
-		goto unlock;
-
-	memcpy(softif_neigh->addr, addr, ETH_ALEN);
-	softif_neigh->last_seen = jiffies;
-	/* initialize with 2 - caller decrements counter by one */
-	atomic_set(&softif_neigh->refcount, 2);
-
-	INIT_HLIST_NODE(&softif_neigh->list);
-	spin_lock_bh(&bat_priv->softif_neigh_lock);
-	hlist_add_head_rcu(&softif_neigh->list,
-			   &softif_neigh_vid->softif_neigh_list);
-	spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-unlock:
-	rcu_read_unlock();
-out:
-	if (softif_neigh_vid)
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-	return softif_neigh;
-}
-
-static struct softif_neigh *softif_neigh_get_selected(
-				struct softif_neigh_vid *softif_neigh_vid)
-{
-	struct softif_neigh *softif_neigh;
-
-	rcu_read_lock();
-	softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
-
-	if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
-		softif_neigh = NULL;
-
-	rcu_read_unlock();
-	return softif_neigh;
-}
-
-static struct softif_neigh *softif_neigh_vid_get_selected(
-						struct bat_priv *bat_priv,
-						short vid)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *softif_neigh = NULL;
-
-	softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-	if (!softif_neigh_vid)
-		goto out;
-
-	softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-out:
-	if (softif_neigh_vid)
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-	return softif_neigh;
-}
-
-static void softif_neigh_vid_select(struct bat_priv *bat_priv,
-				    struct softif_neigh *new_neigh,
-				    short vid)
-{
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *curr_neigh;
-
-	softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
-	if (!softif_neigh_vid)
-		goto out;
-
-	spin_lock_bh(&bat_priv->softif_neigh_lock);
-
-	if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
-		new_neigh = NULL;
-
-	curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
-					       1);
-	rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
-
-	if ((curr_neigh) && (!new_neigh))
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Removing mesh exit point on vid: %d (prev: %pM).\n",
-			vid, curr_neigh->addr);
-	else if ((curr_neigh) && (new_neigh))
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Changing mesh exit point on vid: %d from %pM to %pM.\n",
-			vid, curr_neigh->addr, new_neigh->addr);
-	else if ((!curr_neigh) && (new_neigh))
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Setting mesh exit point on vid: %d to %pM.\n",
-			vid, new_neigh->addr);
-
-	if (curr_neigh)
-		softif_neigh_free_ref(curr_neigh);
-
-	spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-out:
-	if (softif_neigh_vid)
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-}
-
-static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
-				      struct softif_neigh_vid *softif_neigh_vid)
-{
-	struct softif_neigh *curr_neigh;
-	struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
-	struct hard_iface *primary_if = NULL;
-	struct hlist_node *node;
-
-	primary_if = primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-
-	/* find new softif_neigh immediately to avoid temporary loops */
-	rcu_read_lock();
-	curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
-
-	hlist_for_each_entry_rcu(softif_neigh_tmp, node,
-				 &softif_neigh_vid->softif_neigh_list,
-				 list) {
-		if (softif_neigh_tmp == curr_neigh)
-			continue;
-
-		/* we got a neighbor but its mac is 'bigger' than ours  */
-		if (memcmp(primary_if->net_dev->dev_addr,
-			   softif_neigh_tmp->addr, ETH_ALEN) < 0)
-			continue;
-
-		if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
-			continue;
-
-		softif_neigh = softif_neigh_tmp;
-		goto unlock;
-	}
-
-unlock:
-	rcu_read_unlock();
-out:
-	softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
-
-	if (primary_if)
-		hardif_free_ref(primary_if);
-	if (softif_neigh)
-		softif_neigh_free_ref(softif_neigh);
-}
-
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
-{
-	struct net_device *net_dev = (struct net_device *)seq->private;
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct softif_neigh *softif_neigh;
-	struct hard_iface *primary_if;
-	struct hlist_node *node, *node_tmp;
-	struct softif_neigh *curr_softif_neigh;
-	int ret = 0, last_seen_secs, last_seen_msecs;
-
-	primary_if = primary_if_get_selected(bat_priv);
-	if (!primary_if) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
-				 net_dev->name);
-		goto out;
-	}
-
-	if (primary_if->if_status != IF_ACTIVE) {
-		ret = seq_printf(seq,
-				 "BATMAN mesh %s disabled - primary interface not active\n",
-				 net_dev->name);
-		goto out;
-	}
-
-	seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(softif_neigh_vid, node,
-				 &bat_priv->softif_neigh_vids, list) {
-		seq_printf(seq, "     %-15s %s on vid: %d\n",
-			   "Originator", "last-seen", softif_neigh_vid->vid);
-
-		curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-
-		hlist_for_each_entry_rcu(softif_neigh, node_tmp,
-					 &softif_neigh_vid->softif_neigh_list,
-					 list) {
-			last_seen_secs = jiffies_to_msecs(jiffies -
-						softif_neigh->last_seen) / 1000;
-			last_seen_msecs = jiffies_to_msecs(jiffies -
-						softif_neigh->last_seen) % 1000;
-			seq_printf(seq, "%s %pM  %3i.%03is\n",
-				   curr_softif_neigh == softif_neigh
-				   ? "=>" : "  ", softif_neigh->addr,
-				   last_seen_secs, last_seen_msecs);
-		}
-
-		if (curr_softif_neigh)
-			softif_neigh_free_ref(curr_softif_neigh);
-
-		seq_printf(seq, "\n");
-	}
-	rcu_read_unlock();
-
-out:
-	if (primary_if)
-		hardif_free_ref(primary_if);
-	return ret;
-}
-
-void softif_neigh_purge(struct bat_priv *bat_priv)
-{
-	struct softif_neigh *softif_neigh, *curr_softif_neigh;
-	struct softif_neigh_vid *softif_neigh_vid;
-	struct hlist_node *node, *node_tmp, *node_tmp2;
-	int do_deselect;
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(softif_neigh_vid, node,
-				 &bat_priv->softif_neigh_vids, list) {
-		if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
-			continue;
-
-		curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
-		do_deselect = 0;
-
-		spin_lock_bh(&bat_priv->softif_neigh_lock);
-		hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
-					  &softif_neigh_vid->softif_neigh_list,
-					  list) {
-			if ((!has_timed_out(softif_neigh->last_seen,
-					    SOFTIF_NEIGH_TIMEOUT)) &&
-			    (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
-				continue;
-
-			if (curr_softif_neigh == softif_neigh) {
-				bat_dbg(DBG_ROUTES, bat_priv,
-					"Current mesh exit point on vid: %d '%pM' vanished.\n",
-					softif_neigh_vid->vid,
-					softif_neigh->addr);
-				do_deselect = 1;
-			}
-
-			hlist_del_rcu(&softif_neigh->list);
-			softif_neigh_free_ref(softif_neigh);
-		}
-		spin_unlock_bh(&bat_priv->softif_neigh_lock);
-
-		/* soft_neigh_vid_deselect() needs to acquire the
-		 * softif_neigh_lock */
-		if (do_deselect)
-			softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
-
-		if (curr_softif_neigh)
-			softif_neigh_free_ref(curr_softif_neigh);
-
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-	}
-	rcu_read_unlock();
-
-	spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
-	hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
-				  &bat_priv->softif_neigh_vids, list) {
-		if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
-			continue;
-
-		hlist_del_rcu(&softif_neigh_vid->list);
-		softif_neigh_vid_free_ref(softif_neigh_vid);
-	}
-	spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
-
-}
-
-static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
-			       short vid)
-{
-	struct bat_priv *bat_priv = netdev_priv(dev);
-	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
-	struct batman_ogm_packet *batman_ogm_packet;
-	struct softif_neigh *softif_neigh = NULL;
-	struct hard_iface *primary_if = NULL;
-	struct softif_neigh *curr_softif_neigh = NULL;
-
-	if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
-		batman_ogm_packet = (struct batman_ogm_packet *)
-					(skb->data + ETH_HLEN + VLAN_HLEN);
-	else
-		batman_ogm_packet = (struct batman_ogm_packet *)
-							(skb->data + ETH_HLEN);
-
-	if (batman_ogm_packet->header.version != COMPAT_VERSION)
-		goto out;
-
-	if (batman_ogm_packet->header.packet_type != BAT_OGM)
-		goto out;
-
-	if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
-		goto out;
-
-	if (is_my_mac(batman_ogm_packet->orig))
-		goto out;
-
-	softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
-	if (!softif_neigh)
-		goto out;
-
-	curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-	if (curr_softif_neigh == softif_neigh)
-		goto out;
-
-	primary_if = primary_if_get_selected(bat_priv);
-	if (!primary_if)
-		goto out;
-
-	/* we got a neighbor but its mac is 'bigger' than ours  */
-	if (memcmp(primary_if->net_dev->dev_addr,
-		   softif_neigh->addr, ETH_ALEN) < 0)
-		goto out;
-
-	/* close own batX device and use softif_neigh as exit node */
-	if (!curr_softif_neigh) {
-		softif_neigh_vid_select(bat_priv, softif_neigh, vid);
-		goto out;
-	}
-
-	/* switch to new 'smallest neighbor' */
-	if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
-		softif_neigh_vid_select(bat_priv, softif_neigh, vid);
-
-out:
-	kfree_skb(skb);
-	if (softif_neigh)
-		softif_neigh_free_ref(softif_neigh);
-	if (curr_softif_neigh)
-		softif_neigh_free_ref(curr_softif_neigh);
-	if (primary_if)
-		hardif_free_ref(primary_if);
-	return;
-}
-
 static int interface_open(struct net_device *dev)
 {
 	netif_start_queue(dev);
@@ -562,10 +130,11 @@
 	struct hard_iface *primary_if = NULL;
 	struct bcast_packet *bcast_packet;
 	struct vlan_ethhdr *vhdr;
-	struct softif_neigh *curr_softif_neigh = NULL;
+	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
+						   0x00};
 	unsigned int header_len = 0;
 	int data_len = skb->len, ret;
-	short vid = -1;
+	short vid __maybe_unused = -1;
 	bool do_bcast = false;
 
 	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
@@ -583,21 +152,21 @@
 
 		/* fall through */
 	case ETH_P_BATMAN:
-		softif_batman_recv(skb, soft_iface, vid);
-		goto end;
+		goto dropped;
 	}
 
-	/**
-	 * if we have a another chosen mesh exit node in range
-	 * it will transport the packets to the mesh
-	 */
-	curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-	if (curr_softif_neigh)
+	if (bla_tx(bat_priv, skb, vid))
 		goto dropped;
 
 	/* Register the client MAC in the transtable */
 	tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
 
+	/* don't accept stp packets. STP does not help in meshes.
+	 * better use the bridge loop avoidance ...
+	 */
+	if (compare_eth(ethhdr->h_dest, stp_addr))
+		goto dropped;
+
 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
 		do_bcast = true;
 
@@ -675,8 +244,6 @@
 dropped_freed:
 	bat_priv->stats.tx_dropped++;
 end:
-	if (curr_softif_neigh)
-		softif_neigh_free_ref(curr_softif_neigh);
 	if (primary_if)
 		hardif_free_ref(primary_if);
 	return NETDEV_TX_OK;
@@ -687,12 +254,9 @@
 		  int hdr_size)
 {
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-	struct unicast_packet *unicast_packet;
 	struct ethhdr *ethhdr;
 	struct vlan_ethhdr *vhdr;
-	struct softif_neigh *curr_softif_neigh = NULL;
-	short vid = -1;
-	int ret;
+	short vid __maybe_unused = -1;
 
 	/* check if enough space is available for pulling, and pull */
 	if (!pskb_may_pull(skb, hdr_size))
@@ -716,30 +280,6 @@
 		goto dropped;
 	}
 
-	/**
-	 * if we have a another chosen mesh exit node in range
-	 * it will transport the packets to the non-mesh network
-	 */
-	curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
-	if (curr_softif_neigh) {
-		skb_push(skb, hdr_size);
-		unicast_packet = (struct unicast_packet *)skb->data;
-
-		if ((unicast_packet->header.packet_type != BAT_UNICAST) &&
-		    (unicast_packet->header.packet_type != BAT_UNICAST_FRAG))
-			goto dropped;
-
-		skb_reset_mac_header(skb);
-
-		memcpy(unicast_packet->dest,
-		       curr_softif_neigh->addr, ETH_ALEN);
-		ret = route_unicast_packet(skb, recv_if);
-		if (ret == NET_RX_DROP)
-			goto dropped;
-
-		goto out;
-	}
-
 	/* skb->dev & skb->pkt_type are set here */
 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
 		goto dropped;
@@ -752,21 +292,25 @@
 /*	skb->ip_summed = CHECKSUM_UNNECESSARY;*/
 
 	bat_priv->stats.rx_packets++;
-	bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
+	bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
 
 	soft_iface->last_rx = jiffies;
 
 	if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
 		goto dropped;
 
+	/* Let the bridge loop avoidance check the packet. If will
+	 * not handle it, we can safely push it up.
+	 */
+	if (bla_rx(bat_priv, skb, vid))
+		goto out;
+
 	netif_rx(skb);
 	goto out;
 
 dropped:
 	kfree_skb(skb);
 out:
-	if (curr_softif_neigh)
-		softif_neigh_free_ref(curr_softif_neigh);
 	return;
 }
 
@@ -828,13 +372,14 @@
 
 	atomic_set(&bat_priv->aggregated_ogms, 1);
 	atomic_set(&bat_priv->bonding, 0);
+	atomic_set(&bat_priv->bridge_loop_avoidance, 0);
 	atomic_set(&bat_priv->ap_isolation, 0);
 	atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
 	atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
 	atomic_set(&bat_priv->gw_sel_class, 20);
 	atomic_set(&bat_priv->gw_bandwidth, 41);
 	atomic_set(&bat_priv->orig_interval, 1000);
-	atomic_set(&bat_priv->hop_penalty, 10);
+	atomic_set(&bat_priv->hop_penalty, 30);
 	atomic_set(&bat_priv->log_level, 0);
 	atomic_set(&bat_priv->fragmentation, 1);
 	atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
@@ -845,6 +390,7 @@
 	atomic_set(&bat_priv->ttvn, 0);
 	atomic_set(&bat_priv->tt_local_changes, 0);
 	atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+	atomic_set(&bat_priv->bla_num_requests, 0);
 
 	bat_priv->tt_buff = NULL;
 	bat_priv->tt_buff_len = 0;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 756eab5..0203006 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -23,8 +23,6 @@
 #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
 
 int my_skb_head_push(struct sk_buff *skb, unsigned int len);
-int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
-void softif_neigh_purge(struct bat_priv *bat_priv);
 void interface_rx(struct net_device *soft_iface,
 		  struct sk_buff *skb, struct hard_iface *recv_if,
 		  int hdr_size);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 1f86921..a66c2dc 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  *
- * Marek Lindner, Simon Wunderlich
+ * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -27,13 +27,14 @@
 #include "hash.h"
 #include "originator.h"
 #include "routing.h"
+#include "bridge_loop_avoidance.h"
 
 #include <linux/crc16.h>
 
-static void _tt_global_del(struct bat_priv *bat_priv,
-			   struct tt_global_entry *tt_global_entry,
-			   const char *message);
+static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+			  struct orig_node *orig_node);
 static void tt_purge(struct work_struct *work);
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
 
 /* returns 1 if they are the same mac addr */
 static int compare_tt(const struct hlist_node *node, const void *data2)
@@ -123,17 +124,31 @@
 	tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
 				       common);
 
-	if (tt_global_entry->orig_node)
-		orig_node_free_ref(tt_global_entry->orig_node);
-
 	kfree(tt_global_entry);
 }
 
 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
 {
-	if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+	if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
+		tt_global_del_orig_list(tt_global_entry);
 		call_rcu(&tt_global_entry->common.rcu,
 			 tt_global_entry_free_rcu);
+	}
+}
+
+static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+{
+	struct tt_orig_list_entry *orig_entry;
+
+	orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
+	atomic_dec(&orig_entry->orig_node->tt_size);
+	orig_node_free_ref(orig_entry->orig_node);
+	kfree(orig_entry);
+}
+
+static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
+{
+	call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
 }
 
 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -182,12 +197,17 @@
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
 	struct tt_local_entry *tt_local_entry = NULL;
 	struct tt_global_entry *tt_global_entry = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tt_orig_list_entry *orig_entry;
 	int hash_added;
 
 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
 
 	if (tt_local_entry) {
 		tt_local_entry->last_seen = jiffies;
+		/* possibly unset the TT_CLIENT_PENDING flag */
+		tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
 		goto out;
 	}
 
@@ -232,14 +252,21 @@
 
 	/* Check whether it is a roaming! */
 	if (tt_global_entry) {
-		/* This node is probably going to update its tt table */
-		tt_global_entry->orig_node->tt_poss_change = true;
-		/* The global entry has to be marked as ROAMING and has to be
-		 * kept for consistency purpose */
+		/* These node are probably going to update their tt table */
+		head = &tt_global_entry->orig_list;
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+			orig_entry->orig_node->tt_poss_change = true;
+
+			send_roam_adv(bat_priv, tt_global_entry->common.addr,
+				      orig_entry->orig_node);
+		}
+		rcu_read_unlock();
+		/* The global entry has to be marked as ROAMING and
+		 * has to be kept for consistency purpose
+		 */
 		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
 		tt_global_entry->roam_at = jiffies;
-		send_roam_adv(bat_priv, tt_global_entry->common.addr,
-			      tt_global_entry->orig_node);
 	}
 out:
 	if (tt_local_entry)
@@ -490,33 +517,76 @@
 	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
 }
 
+/* find out if an orig_node is already in the list of a tt_global_entry.
+ * returns 1 if found, 0 otherwise
+ */
+static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
+				     const struct orig_node *orig_node)
+{
+	struct tt_orig_list_entry *tmp_orig_entry;
+	const struct hlist_head *head;
+	struct hlist_node *node;
+	bool found = false;
+
+	rcu_read_lock();
+	head = &entry->orig_list;
+	hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
+		if (tmp_orig_entry->orig_node == orig_node) {
+			found = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	return found;
+}
+
+static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
+				     struct orig_node *orig_node,
+				     int ttvn)
+{
+	struct tt_orig_list_entry *orig_entry;
+
+	orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
+	if (!orig_entry)
+		return;
+
+	INIT_HLIST_NODE(&orig_entry->list);
+	atomic_inc(&orig_node->refcount);
+	atomic_inc(&orig_node->tt_size);
+	orig_entry->orig_node = orig_node;
+	orig_entry->ttvn = ttvn;
+
+	spin_lock_bh(&tt_global_entry->list_lock);
+	hlist_add_head_rcu(&orig_entry->list,
+			   &tt_global_entry->orig_list);
+	spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
 /* caller must hold orig_node refcount */
 int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
 		  const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
 		  bool wifi)
 {
-	struct tt_global_entry *tt_global_entry;
-	struct orig_node *orig_node_tmp;
+	struct tt_global_entry *tt_global_entry = NULL;
 	int ret = 0;
 	int hash_added;
 
 	tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
 	if (!tt_global_entry) {
-		tt_global_entry =
-			kmalloc(sizeof(*tt_global_entry),
-				GFP_ATOMIC);
+		tt_global_entry = kzalloc(sizeof(*tt_global_entry),
+					  GFP_ATOMIC);
 		if (!tt_global_entry)
 			goto out;
 
 		memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+
 		tt_global_entry->common.flags = NO_FLAGS;
-		atomic_set(&tt_global_entry->common.refcount, 2);
-		/* Assign the new orig_node */
-		atomic_inc(&orig_node->refcount);
-		tt_global_entry->orig_node = orig_node;
-		tt_global_entry->ttvn = ttvn;
 		tt_global_entry->roam_at = 0;
+		atomic_set(&tt_global_entry->common.refcount, 2);
+
+		INIT_HLIST_HEAD(&tt_global_entry->orig_list);
+		spin_lock_init(&tt_global_entry->list_lock);
 
 		hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
 				 choose_orig, &tt_global_entry->common,
@@ -527,19 +597,27 @@
 			tt_global_entry_free_ref(tt_global_entry);
 			goto out_remove;
 		}
-		atomic_inc(&orig_node->tt_size);
+
+		tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
 	} else {
-		if (tt_global_entry->orig_node != orig_node) {
-			atomic_dec(&tt_global_entry->orig_node->tt_size);
-			orig_node_tmp = tt_global_entry->orig_node;
-			atomic_inc(&orig_node->refcount);
-			tt_global_entry->orig_node = orig_node;
-			orig_node_free_ref(orig_node_tmp);
-			atomic_inc(&orig_node->tt_size);
+		/* there is already a global entry, use this one. */
+
+		/* If there is the TT_CLIENT_ROAM flag set, there is only one
+		 * originator left in the list and we previously received a
+		 * delete + roaming change for this originator.
+		 *
+		 * We should first delete the old originator before adding the
+		 * new one.
+		 */
+		if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
+			tt_global_del_orig_list(tt_global_entry);
+			tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
+			tt_global_entry->roam_at = 0;
 		}
-		tt_global_entry->common.flags = NO_FLAGS;
-		tt_global_entry->ttvn = ttvn;
-		tt_global_entry->roam_at = 0;
+
+		if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
+			tt_global_add_orig_entry(tt_global_entry, orig_node,
+						 ttvn);
 	}
 
 	if (wifi)
@@ -560,6 +638,34 @@
 	return ret;
 }
 
+/* print all orig nodes who announce the address for this global entry.
+ * it is assumed that the caller holds rcu_read_lock();
+ */
+static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
+				  struct seq_file *seq)
+{
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tt_orig_list_entry *orig_entry;
+	struct tt_common_entry *tt_common_entry;
+	uint16_t flags;
+	uint8_t last_ttvn;
+
+	tt_common_entry = &tt_global_entry->common;
+
+	head = &tt_global_entry->orig_list;
+
+	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+		flags = tt_common_entry->flags;
+		last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
+		seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
+			   tt_global_entry->common.addr, orig_entry->ttvn,
+			   orig_entry->orig_node->orig, last_ttvn,
+			   (flags & TT_CLIENT_ROAM ? 'R' : '.'),
+			   (flags & TT_CLIENT_WIFI ? 'W' : '.'));
+	}
+}
+
 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -603,18 +709,7 @@
 			tt_global_entry = container_of(tt_common_entry,
 						       struct tt_global_entry,
 						       common);
-			seq_printf(seq,
-				   " * %pM  (%3u) via %pM     (%3u)   [%c%c]\n",
-				   tt_global_entry->common.addr,
-				   tt_global_entry->ttvn,
-				   tt_global_entry->orig_node->orig,
-				   (uint8_t) atomic_read(
-						&tt_global_entry->orig_node->
-						last_ttvn),
-				   (tt_global_entry->common.flags &
-				    TT_CLIENT_ROAM ? 'R' : '.'),
-				   (tt_global_entry->common.flags &
-				    TT_CLIENT_WIFI ? 'W' : '.'));
+			tt_global_print_entry(tt_global_entry, seq);
 		}
 		rcu_read_unlock();
 	}
@@ -624,59 +719,150 @@
 	return ret;
 }
 
-static void _tt_global_del(struct bat_priv *bat_priv,
-			   struct tt_global_entry *tt_global_entry,
-			   const char *message)
+/* deletes the orig list of a tt_global_entry */
+static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
 {
-	if (!tt_global_entry)
-		goto out;
+	struct hlist_head *head;
+	struct hlist_node *node, *safe;
+	struct tt_orig_list_entry *orig_entry;
 
+	spin_lock_bh(&tt_global_entry->list_lock);
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+		hlist_del_rcu(node);
+		tt_orig_list_entry_free_ref(orig_entry);
+	}
+	spin_unlock_bh(&tt_global_entry->list_lock);
+
+}
+
+static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
+				     struct tt_global_entry *tt_global_entry,
+				     struct orig_node *orig_node,
+				     const char *message)
+{
+	struct hlist_head *head;
+	struct hlist_node *node, *safe;
+	struct tt_orig_list_entry *orig_entry;
+
+	spin_lock_bh(&tt_global_entry->list_lock);
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
+		if (orig_entry->orig_node == orig_node) {
+			bat_dbg(DBG_TT, bat_priv,
+				"Deleting %pM from global tt entry %pM: %s\n",
+				orig_node->orig, tt_global_entry->common.addr,
+				message);
+			hlist_del_rcu(node);
+			tt_orig_list_entry_free_ref(orig_entry);
+		}
+	}
+	spin_unlock_bh(&tt_global_entry->list_lock);
+}
+
+static void tt_global_del_struct(struct bat_priv *bat_priv,
+				 struct tt_global_entry *tt_global_entry,
+				 const char *message)
+{
 	bat_dbg(DBG_TT, bat_priv,
-		"Deleting global tt entry %pM (via %pM): %s\n",
-		tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
-		message);
-
-	atomic_dec(&tt_global_entry->orig_node->tt_size);
+		"Deleting global tt entry %pM: %s\n",
+		tt_global_entry->common.addr, message);
 
 	hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
 		    tt_global_entry->common.addr);
-out:
-	if (tt_global_entry)
-		tt_global_entry_free_ref(tt_global_entry);
+	tt_global_entry_free_ref(tt_global_entry);
+
 }
 
-void tt_global_del(struct bat_priv *bat_priv,
-		   struct orig_node *orig_node, const unsigned char *addr,
-		   const char *message, bool roaming)
+/* If the client is to be deleted, we check if it is the last origantor entry
+ * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
+ * otherwise we simply remove the originator scheduled for deletion.
+ */
+static void tt_global_del_roaming(struct bat_priv *bat_priv,
+				  struct tt_global_entry *tt_global_entry,
+				  struct orig_node *orig_node,
+				  const char *message)
+{
+	bool last_entry = true;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tt_orig_list_entry *orig_entry;
+
+	/* no local entry exists, case 1:
+	 * Check if this is the last one or if other entries exist.
+	 */
+
+	rcu_read_lock();
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+		if (orig_entry->orig_node != orig_node) {
+			last_entry = false;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	if (last_entry) {
+		/* its the last one, mark for roaming. */
+		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+		tt_global_entry->roam_at = jiffies;
+	} else
+		/* there is another entry, we can simply delete this
+		 * one and can still use the other one.
+		 */
+		tt_global_del_orig_entry(bat_priv, tt_global_entry,
+					 orig_node, message);
+}
+
+
+
+static void tt_global_del(struct bat_priv *bat_priv,
+			  struct orig_node *orig_node,
+			  const unsigned char *addr,
+			  const char *message, bool roaming)
 {
 	struct tt_global_entry *tt_global_entry = NULL;
 	struct tt_local_entry *tt_local_entry = NULL;
 
 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
-	if (!tt_global_entry || tt_global_entry->orig_node != orig_node)
+	if (!tt_global_entry)
 		goto out;
 
-	if (!roaming)
-		goto out_del;
+	if (!roaming) {
+		tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
+					 message);
 
-	/* if we are deleting a global entry due to a roam
-	 * event, there are two possibilities:
-	 * 1) the client roamed from node A to node B => we mark
-	 *    it with TT_CLIENT_ROAM, we start a timer and we
-	 *    wait for node B to claim it. In case of timeout
-	 *    the entry is purged.
-	 * 2) the client roamed to us => we can directly delete
-	 *    the global entry, since it is useless now. */
-	tt_local_entry = tt_local_hash_find(bat_priv,
-					    tt_global_entry->common.addr);
-	if (!tt_local_entry) {
-		tt_global_entry->common.flags |= TT_CLIENT_ROAM;
-		tt_global_entry->roam_at = jiffies;
+		if (hlist_empty(&tt_global_entry->orig_list))
+			tt_global_del_struct(bat_priv, tt_global_entry,
+					     message);
+
 		goto out;
 	}
 
-out_del:
-	_tt_global_del(bat_priv, tt_global_entry, message);
+	/* if we are deleting a global entry due to a roam
+	 * event, there are two possibilities:
+	 * 1) the client roamed from node A to node B => if there
+	 *    is only one originator left for this client, we mark
+	 *    it with TT_CLIENT_ROAM, we start a timer and we
+	 *    wait for node B to claim it. In case of timeout
+	 *    the entry is purged.
+	 *
+	 *    If there are other originators left, we directly delete
+	 *    the originator.
+	 * 2) the client roamed to us => we can directly delete
+	 *    the global entry, since it is useless now. */
+
+	tt_local_entry = tt_local_hash_find(bat_priv,
+					    tt_global_entry->common.addr);
+	if (tt_local_entry) {
+		/* local entry exists, case 2: client roamed to us. */
+		tt_global_del_orig_list(tt_global_entry);
+		tt_global_del_struct(bat_priv, tt_global_entry, message);
+	} else
+		/* no local entry exists, case 1: check for roaming */
+		tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
+				      message);
+
 
 out:
 	if (tt_global_entry)
@@ -709,11 +895,14 @@
 			tt_global_entry = container_of(tt_common_entry,
 						       struct tt_global_entry,
 						       common);
-			if (tt_global_entry->orig_node == orig_node) {
+
+			tt_global_del_orig_entry(bat_priv, tt_global_entry,
+						 orig_node, message);
+
+			if (hlist_empty(&tt_global_entry->orig_list)) {
 				bat_dbg(DBG_TT, bat_priv,
-					"Deleting global tt entry %pM (via %pM): %s\n",
+					"Deleting global tt entry %pM: %s\n",
 					tt_global_entry->common.addr,
-					tt_global_entry->orig_node->orig,
 					message);
 				hlist_del_rcu(node);
 				tt_global_entry_free_ref(tt_global_entry);
@@ -754,7 +943,7 @@
 			bat_dbg(DBG_TT, bat_priv,
 				"Deleting global tt entry (%pM): Roaming timeout\n",
 				tt_global_entry->common.addr);
-			atomic_dec(&tt_global_entry->orig_node->tt_size);
+
 			hlist_del_rcu(node);
 			tt_global_entry_free_ref(tt_global_entry);
 		}
@@ -817,6 +1006,11 @@
 	struct tt_local_entry *tt_local_entry = NULL;
 	struct tt_global_entry *tt_global_entry = NULL;
 	struct orig_node *orig_node = NULL;
+	struct neigh_node *router = NULL;
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct tt_orig_list_entry *orig_entry;
+	int best_tq;
 
 	if (src && atomic_read(&bat_priv->ap_isolation)) {
 		tt_local_entry = tt_local_hash_find(bat_priv, src);
@@ -833,11 +1027,25 @@
 	if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
 		goto out;
 
-	if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
-		goto out;
+	best_tq = 0;
 
-	orig_node = tt_global_entry->orig_node;
+	rcu_read_lock();
+	head = &tt_global_entry->orig_list;
+	hlist_for_each_entry_rcu(orig_entry, node, head, list) {
+		router = orig_node_get_router(orig_entry->orig_node);
+		if (!router)
+			continue;
 
+		if (router->tq_avg > best_tq) {
+			orig_node = orig_entry->orig_node;
+			best_tq = router->tq_avg;
+		}
+		neigh_node_free_ref(router);
+	}
+	/* found anything? */
+	if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
+		orig_node = NULL;
+	rcu_read_unlock();
 out:
 	if (tt_global_entry)
 		tt_global_entry_free_ref(tt_global_entry);
@@ -848,7 +1056,8 @@
 }
 
 /* Calculates the checksum of the local table of a given orig_node */
-uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
+static uint16_t tt_global_crc(struct bat_priv *bat_priv,
+			      struct orig_node *orig_node)
 {
 	uint16_t total = 0, total_one;
 	struct hashtable_t *hash = bat_priv->tt_global_hash;
@@ -868,20 +1077,26 @@
 			tt_global_entry = container_of(tt_common_entry,
 						       struct tt_global_entry,
 						       common);
-			if (compare_eth(tt_global_entry->orig_node,
-					orig_node)) {
-				/* Roaming clients are in the global table for
-				 * consistency only. They don't have to be
-				 * taken into account while computing the
-				 * global crc */
-				if (tt_common_entry->flags & TT_CLIENT_ROAM)
-					continue;
-				total_one = 0;
-				for (j = 0; j < ETH_ALEN; j++)
-					total_one = crc16_byte(total_one,
-						tt_common_entry->addr[j]);
-				total ^= total_one;
-			}
+			/* Roaming clients are in the global table for
+			 * consistency only. They don't have to be
+			 * taken into account while computing the
+			 * global crc
+			 */
+			if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
+				continue;
+
+			/* find out if this global entry is announced by this
+			 * originator
+			 */
+			if (!tt_global_entry_has_orig(tt_global_entry,
+						      orig_node))
+				continue;
+
+			total_one = 0;
+			for (j = 0; j < ETH_ALEN; j++)
+				total_one = crc16_byte(total_one,
+					tt_global_entry->common.addr[j]);
+			total ^= total_one;
 		}
 		rcu_read_unlock();
 	}
@@ -936,8 +1151,10 @@
 	spin_unlock_bh(&bat_priv->tt_req_list_lock);
 }
 
-void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
-			 const unsigned char *tt_buff, uint8_t tt_num_changes)
+static void tt_save_orig_buffer(struct bat_priv *bat_priv,
+				struct orig_node *orig_node,
+				const unsigned char *tt_buff,
+				uint8_t tt_num_changes)
 {
 	uint16_t tt_buff_len = tt_len(tt_num_changes);
 
@@ -1020,7 +1237,7 @@
 	tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
 				       common);
 
-	return (tt_global_entry->orig_node == orig_node);
+	return tt_global_entry_has_orig(tt_global_entry, orig_node);
 }
 
 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
@@ -1124,7 +1341,7 @@
 	memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
 	tt_request->header.ttl = TTL;
 	tt_request->ttvn = ttvn;
-	tt_request->tt_data = tt_crc;
+	tt_request->tt_data = htons(tt_crc);
 	tt_request->flags = TT_REQUEST;
 
 	if (full_table)
@@ -1401,10 +1618,15 @@
 bool send_tt_response(struct bat_priv *bat_priv,
 		      struct tt_query_packet *tt_request)
 {
-	if (is_my_mac(tt_request->dst))
+	if (is_my_mac(tt_request->dst)) {
+		/* don't answer backbone gws! */
+		if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
+			return true;
+
 		return send_my_tt_response(bat_priv, tt_request);
-	else
+	} else {
 		return send_other_tt_response(bat_priv, tt_request);
+	}
 }
 
 static void _tt_update_changes(struct bat_priv *bat_priv,
@@ -1508,6 +1730,10 @@
 		tt_response->src, tt_response->ttvn, tt_response->tt_data,
 		(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
 
+	/* we should have never asked a backbone gw */
+	if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
+		goto out;
+
 	orig_node = orig_hash_find(bat_priv, tt_response->src);
 	if (!orig_node)
 		goto out;
@@ -1627,8 +1853,8 @@
 	return ret;
 }
 
-void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
-		   struct orig_node *orig_node)
+static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+			  struct orig_node *orig_node)
 {
 	struct neigh_node *neigh_node = NULL;
 	struct sk_buff *skb = NULL;
@@ -1796,6 +2022,8 @@
 
 	/* Increment the TTVN only once per OGM interval */
 	atomic_inc(&bat_priv->ttvn);
+	bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
+		(uint8_t)atomic_read(&bat_priv->ttvn));
 	bat_priv->tt_poss_change = false;
 }
 
@@ -1836,6 +2064,10 @@
 	uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
 	bool full_table = true;
 
+	/* don't care about a backbone gateways updates. */
+	if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+		return;
+
 	/* orig table not initialised AND first diff is in the OGM OR the ttvn
 	 * increased by one -> we can apply the attached changes */
 	if ((!orig_node->tt_initialised && ttvn == 1) ||
@@ -1873,6 +2105,7 @@
 	} else {
 		/* if we missed more than one change or our tables are not
 		 * in sync anymore -> request fresh tt data */
+
 		if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
 		    orig_node->tt_crc != tt_crc) {
 request_table:
@@ -1886,3 +2119,22 @@
 		}
 	}
 }
+
+/* returns true whether we know that the client has moved from its old
+ * originator to another one. This entry is kept is still kept for consistency
+ * purposes
+ */
+bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr)
+{
+	struct tt_global_entry *tt_global_entry;
+	bool ret = false;
+
+	tt_global_entry = tt_global_hash_find(bat_priv, addr);
+	if (!tt_global_entry)
+		goto out;
+
+	ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
+	tt_global_entry_free_ref(tt_global_entry);
+out:
+	return ret;
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c753633..c43374d 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  *
- * Marek Lindner, Simon Wunderlich
+ * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -39,27 +39,21 @@
 int tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void tt_global_del_orig(struct bat_priv *bat_priv,
 			struct orig_node *orig_node, const char *message);
-void tt_global_del(struct bat_priv *bat_priv,
-		   struct orig_node *orig_node, const unsigned char *addr,
-		   const char *message, bool roaming);
 struct orig_node *transtable_search(struct bat_priv *bat_priv,
 				    const uint8_t *src, const uint8_t *addr);
-void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
-			 const unsigned char *tt_buff, uint8_t tt_num_changes);
 uint16_t tt_local_crc(struct bat_priv *bat_priv);
-uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
 void tt_free(struct bat_priv *bat_priv);
 bool send_tt_response(struct bat_priv *bat_priv,
 		      struct tt_query_packet *tt_request);
 bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
 void handle_tt_response(struct bat_priv *bat_priv,
 			struct tt_query_packet *tt_response);
-void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
-		   struct orig_node *orig_node);
 void tt_commit_changes(struct bat_priv *bat_priv);
 bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
 void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
 		    const unsigned char *tt_buff, uint8_t tt_num_changes,
 		    uint8_t ttvn, uint16_t tt_crc);
+bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr);
+
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 302efb5..61308e8 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -27,7 +27,7 @@
 #include "packet.h"
 #include "bitarray.h"
 
-#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \
+#define BAT_HEADER_LEN (ETH_HLEN + \
 	((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
 	 sizeof(struct unicast_packet) : \
 	 sizeof(struct bcast_packet))))
@@ -52,7 +52,7 @@
 /**
  *	orig_node - structure for orig_list maintaining nodes of mesh
  *	@primary_addr: hosts primary interface address
- *	@last_valid: when last packet from this node was received
+ *	@last_seen: when last packet from this node was received
  *	@bcast_seqno_reset: time when the broadcast seqno window was reset
  *	@batman_seqno_reset: time when the batman seqno window was reset
  *	@gw_flags: flags related to gateway class
@@ -70,7 +70,7 @@
 	struct neigh_node __rcu *router; /* rcu protected pointer */
 	unsigned long *bcast_own;
 	uint8_t *bcast_own_sum;
-	unsigned long last_valid;
+	unsigned long last_seen;
 	unsigned long bcast_seqno_reset;
 	unsigned long batman_seqno_reset;
 	uint8_t gw_flags;
@@ -90,7 +90,7 @@
 	bool tt_poss_change;
 	uint32_t last_real_seqno;
 	uint8_t last_ttl;
-	unsigned long bcast_bits[NUM_WORDS];
+	DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
 	uint32_t last_bcast_seqno;
 	struct hlist_head neigh_list;
 	struct list_head frag_list;
@@ -120,7 +120,7 @@
 
 /**
  *	neigh_node
- *	@last_valid: when last packet via this neighbor was received
+ *	@last_seen: when last packet via this neighbor was received
  */
 struct neigh_node {
 	struct hlist_node list;
@@ -131,15 +131,22 @@
 	uint8_t tq_avg;
 	uint8_t last_ttl;
 	struct list_head bonding_list;
-	unsigned long last_valid;
-	unsigned long real_bits[NUM_WORDS];
+	unsigned long last_seen;
+	DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
 	atomic_t refcount;
 	struct rcu_head rcu;
 	struct orig_node *orig_node;
 	struct hard_iface *if_incoming;
-	spinlock_t tq_lock;	/* protects: tq_recv, tq_index */
+	spinlock_t lq_update_lock;	/* protects: tq_recv, tq_index */
 };
 
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct bcast_duplist_entry {
+	uint8_t orig[ETH_ALEN];
+	uint16_t crc;
+	unsigned long entrytime;
+};
+#endif
 
 struct bat_priv {
 	atomic_t mesh_state;
@@ -148,6 +155,7 @@
 	atomic_t bonding;		/* boolean */
 	atomic_t fragmentation;		/* boolean */
 	atomic_t ap_isolation;		/* boolean */
+	atomic_t bridge_loop_avoidance;	/* boolean */
 	atomic_t vis_mode;		/* VIS_TYPE_* */
 	atomic_t gw_mode;		/* GW_MODE_* */
 	atomic_t gw_sel_class;		/* uint */
@@ -161,6 +169,7 @@
 	atomic_t ttvn; /* translation table version number */
 	atomic_t tt_ogm_append_cnt;
 	atomic_t tt_local_changes; /* changes registered in a OGM interval */
+	atomic_t bla_num_requests; /* number of bla requests in flight */
 	/* The tt_poss_change flag is used to detect an ongoing roaming phase.
 	 * If true, then I received a Roaming_adv and I have to inspect every
 	 * packet directed to me to check whether I am still the true
@@ -174,15 +183,23 @@
 	struct hlist_head forw_bat_list;
 	struct hlist_head forw_bcast_list;
 	struct hlist_head gw_list;
-	struct hlist_head softif_neigh_vids;
 	struct list_head tt_changes_list; /* tracks changes in a OGM int */
 	struct list_head vis_send_list;
 	struct hashtable_t *orig_hash;
 	struct hashtable_t *tt_local_hash;
 	struct hashtable_t *tt_global_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
+	struct hashtable_t *claim_hash;
+	struct hashtable_t *backbone_hash;
+#endif
 	struct list_head tt_req_list; /* list of pending tt_requests */
 	struct list_head tt_roam_list;
 	struct hashtable_t *vis_hash;
+#ifdef CONFIG_BATMAN_ADV_BLA
+	struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
+	int bcast_duplist_curr;
+	struct bla_claim_dst claim_dest;
+#endif
 	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
 	spinlock_t forw_bcast_list_lock; /* protects  */
 	spinlock_t tt_changes_list_lock; /* protects tt_changes */
@@ -191,8 +208,6 @@
 	spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
 	spinlock_t vis_hash_lock; /* protects vis_hash */
 	spinlock_t vis_list_lock; /* protects vis_info::recv_list */
-	spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
-	spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
 	atomic_t num_local_tt;
 	/* Checksum of the local table, recomputed before sending a new OGM */
 	atomic_t tt_crc;
@@ -202,6 +217,7 @@
 	struct delayed_work tt_work;
 	struct delayed_work orig_work;
 	struct delayed_work vis_work;
+	struct delayed_work bla_work;
 	struct gw_node __rcu *curr_gw;  /* rcu protected pointer */
 	atomic_t gw_reselect;
 	struct hard_iface __rcu *primary_if;  /* rcu protected pointer */
@@ -239,11 +255,42 @@
 
 struct tt_global_entry {
 	struct tt_common_entry common;
-	struct orig_node *orig_node;
-	uint8_t ttvn;
+	struct hlist_head orig_list;
+	spinlock_t list_lock;	/* protects the list */
 	unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
 };
 
+struct tt_orig_list_entry {
+	struct orig_node *orig_node;
+	uint8_t ttvn;
+	struct rcu_head rcu;
+	struct hlist_node list;
+};
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+struct backbone_gw {
+	uint8_t orig[ETH_ALEN];
+	short vid;		/* used VLAN ID */
+	struct hlist_node hash_entry;
+	struct bat_priv *bat_priv;
+	unsigned long lasttime;	/* last time we heard of this backbone gw */
+	atomic_t request_sent;
+	atomic_t refcount;
+	struct rcu_head rcu;
+	uint16_t crc;		/* crc checksum over all claims */
+};
+
+struct claim {
+	uint8_t addr[ETH_ALEN];
+	short vid;
+	struct backbone_gw *backbone_gw;
+	unsigned long lasttime;	/* last time we heard of claim (locals only) */
+	struct rcu_head rcu;
+	atomic_t refcount;
+	struct hlist_node hash_entry;
+};
+#endif
+
 struct tt_change_node {
 	struct list_head list;
 	struct tt_change change;
@@ -327,41 +374,24 @@
 	uint8_t mac[ETH_ALEN];
 };
 
-struct softif_neigh_vid {
-	struct hlist_node list;
-	struct bat_priv *bat_priv;
-	short vid;
-	atomic_t refcount;
-	struct softif_neigh __rcu *softif_neigh;
-	struct rcu_head rcu;
-	struct hlist_head softif_neigh_list;
-};
-
-struct softif_neigh {
-	struct hlist_node list;
-	uint8_t addr[ETH_ALEN];
-	unsigned long last_seen;
-	atomic_t refcount;
-	struct rcu_head rcu;
-};
-
 struct bat_algo_ops {
 	struct hlist_node list;
 	char *name;
-	/* init OGM when hard-interface is enabled */
-	void (*bat_ogm_init)(struct hard_iface *hard_iface);
-	/* init primary OGM when primary interface is selected */
-	void (*bat_ogm_init_primary)(struct hard_iface *hard_iface);
-	/* init mac addresses of the OGM belonging to this hard-interface */
-	void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
+	/* init routing info when hard-interface is enabled */
+	int (*bat_iface_enable)(struct hard_iface *hard_iface);
+	/* de-init routing info when hard-interface is disabled */
+	void (*bat_iface_disable)(struct hard_iface *hard_iface);
+	/* (re-)init mac addresses of the protocol information
+	 * belonging to this hard-interface
+	 */
+	void (*bat_iface_update_mac)(struct hard_iface *hard_iface);
+	/* called when primary interface is selected / changed */
+	void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
 	/* prepare a new outgoing OGM for the send queue */
 	void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
 				 int tt_num_changes);
 	/* send scheduled OGM */
 	void (*bat_ogm_emit)(struct forw_packet *forw_packet);
-	/* receive incoming OGM */
-	void (*bat_ogm_receive)(struct hard_iface *if_incoming,
-				struct sk_buff *skb);
 };
 
 #endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 676f6a6..74175c2 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -331,6 +331,14 @@
 	unicast_packet->ttvn =
 		(uint8_t)atomic_read(&orig_node->last_ttvn);
 
+	/* inform the destination node that we are still missing a correct route
+	 * for this client. The destination will receive this packet and will
+	 * try to reroute it because the ttvn contained in the header is less
+	 * than the current one
+	 */
+	if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+		unicast_packet->ttvn = unicast_packet->ttvn - 1;
+
 	if (atomic_read(&bat_priv->fragmentation) &&
 	    data_len + sizeof(*unicast_packet) >
 				neigh_node->if_incoming->net_dev->mtu) {
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c4a5b8c..cec216f 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -434,12 +434,12 @@
 		return NULL;
 
 	info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
-					 sizeof(struct ethhdr));
+					 ETH_HLEN);
 	if (!info->skb_packet) {
 		kfree(info);
 		return NULL;
 	}
-	skb_reserve(info->skb_packet, sizeof(struct ethhdr));
+	skb_reserve(info->skb_packet, ETH_HLEN);
 	packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
 					      + vis_info_len);
 
@@ -894,11 +894,11 @@
 
 	bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
 							  MAX_VIS_PACKET_SIZE +
-							 sizeof(struct ethhdr));
+							  ETH_HLEN);
 	if (!bat_priv->my_vis_info->skb_packet)
 		goto free_info;
 
-	skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
+	skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
 	packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
 					      sizeof(*packet));
 
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 72eb187..6fb68a9 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -450,7 +450,7 @@
 			sk->sk_state == BT_CONFIG)
 		return mask;
 
-	if (sock_writeable(sk))
+	if (!bt_sk(sk)->suspended && sock_writeable(sk))
 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
 	else
 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index a779ec7..88884d1 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -69,7 +69,7 @@
 	BT_DBG("");
 
 	list_for_each_entry(s, &bnep_session_list, list)
-		if (!compare_ether_addr(dst, s->eh.h_source))
+		if (ether_addr_equal(dst, s->eh.h_source))
 			return s;
 
 	return NULL;
@@ -422,10 +422,10 @@
 	iv[il++] = (struct kvec) { &type, 1 };
 	len++;
 
-	if (compress_src && !compare_ether_addr(eh->h_dest, s->eh.h_source))
+	if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source))
 		type |= 0x01;
 
-	if (compress_dst && !compare_ether_addr(eh->h_source, s->eh.h_dest))
+	if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest))
 		type |= 0x02;
 
 	if (type)
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 92a857e..d6dc44c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1215,40 +1215,40 @@
 	return NULL;
 }
 
-static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
+static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
 						u8 key_type, u8 old_key_type)
 {
 	/* Legacy key */
 	if (key_type < 0x03)
-		return 1;
+		return true;
 
 	/* Debug keys are insecure so don't store them persistently */
 	if (key_type == HCI_LK_DEBUG_COMBINATION)
-		return 0;
+		return false;
 
 	/* Changed combination key and there's no previous one */
 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
-		return 0;
+		return false;
 
 	/* Security mode 3 case */
 	if (!conn)
-		return 1;
+		return true;
 
 	/* Neither local nor remote side had no-bonding as requirement */
 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
-		return 1;
+		return true;
 
 	/* Local side had dedicated bonding as requirement */
 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
-		return 1;
+		return true;
 
 	/* Remote side had dedicated bonding as requirement */
 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
-		return 1;
+		return true;
 
 	/* If none of the above criteria match, then don't store the key
 	 * persistently */
-	return 0;
+	return false;
 }
 
 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
@@ -1285,7 +1285,8 @@
 		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
 {
 	struct link_key *key, *old_key;
-	u8 old_key_type, persistent;
+	u8 old_key_type;
+	bool persistent;
 
 	old_key = hci_find_link_key(hdev, bdaddr);
 	if (old_key) {
@@ -1328,10 +1329,8 @@
 
 	mgmt_new_link_key(hdev, key, persistent);
 
-	if (!persistent) {
-		list_del(&key->list);
-		kfree(key);
-	}
+	if (conn)
+		conn->flush_key = !persistent;
 
 	return 0;
 }
@@ -2785,6 +2784,14 @@
 	if (conn) {
 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
 
+		hci_dev_lock(hdev);
+		if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+		    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+			mgmt_device_connected(hdev, &conn->dst, conn->type,
+					      conn->dst_type, 0, NULL, 0,
+					      conn->dev_class);
+		hci_dev_unlock(hdev);
+
 		/* Send to upper protocol */
 		l2cap_recv_acldata(conn, skb, flags);
 		return;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b375310..1266f78 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1901,6 +1901,8 @@
 	}
 
 	if (ev->status == 0) {
+		if (conn->type == ACL_LINK && conn->flush_key)
+			hci_remove_link_key(hdev, &conn->dst);
 		hci_proto_disconn_cfm(conn, ev->reason);
 		hci_conn_del(conn);
 	}
@@ -2037,6 +2039,12 @@
 
 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
 
+		if (ev->status && conn->state == BT_CONNECTED) {
+			hci_acl_disconn(conn, 0x13);
+			hci_conn_put(conn);
+			goto unlock;
+		}
+
 		if (conn->state == BT_CONFIG) {
 			if (!ev->status)
 				conn->state = BT_CONNECTED;
@@ -2047,6 +2055,7 @@
 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
 	}
 
+unlock:
 	hci_dev_unlock(hdev);
 }
 
@@ -2100,7 +2109,7 @@
 		goto unlock;
 	}
 
-	if (!ev->status) {
+	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
 		struct hci_cp_remote_name_req cp;
 		memset(&cp, 0, sizeof(cp));
 		bacpy(&cp.bdaddr, &conn->dst);
@@ -2311,6 +2320,7 @@
 
 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
 		hci_cc_user_passkey_neg_reply(hdev, skb);
+		break;
 
 	case HCI_OP_LE_SET_SCAN_PARAM:
 		hci_cc_le_set_scan_param(hdev, skb);
@@ -2868,7 +2878,7 @@
 	if (conn->state != BT_CONFIG)
 		goto unlock;
 
-	if (!ev->status) {
+	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
 		struct hci_cp_remote_name_req cp;
 		memset(&cp, 0, sizeof(cp));
 		bacpy(&cp.bdaddr, &conn->dst);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 94552b3..6f9c25b 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -4589,6 +4589,11 @@
 
 		if (!status && (chan->state == BT_CONNECTED ||
 						chan->state == BT_CONFIG)) {
+			struct sock *sk = chan->sk;
+
+			bt_sk(sk)->suspended = false;
+			sk->sk_state_change(sk);
+
 			l2cap_check_encryption(chan, encrypt);
 			l2cap_chan_unlock(chan);
 			continue;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 29122ed..04e7c17 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -592,10 +592,14 @@
 			sk->sk_state = BT_CONFIG;
 			chan->state = BT_CONFIG;
 
-		/* or for ACL link, under defer_setup time */
-		} else if (sk->sk_state == BT_CONNECT2 &&
-					bt_sk(sk)->defer_setup) {
-			err = l2cap_chan_check_security(chan);
+		/* or for ACL link */
+		} else if ((sk->sk_state == BT_CONNECT2 &&
+			   bt_sk(sk)->defer_setup) ||
+			   sk->sk_state == BT_CONNECTED) {
+			if (!l2cap_chan_check_security(chan))
+				bt_sk(sk)->suspended = true;
+			else
+				sk->sk_state_change(sk);
 		} else {
 			err = -EINVAL;
 		}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4ef275c..4bb03b1 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2884,7 +2884,7 @@
 	return 0;
 }
 
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent)
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
 {
 	struct mgmt_ev_new_link_key ev;
 
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ba829de..929e48aed 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -170,7 +170,7 @@
 		return -EADDRNOTAVAIL;
 
 	spin_lock_bh(&br->lock);
-	if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
 		dev->addr_assign_type &= ~NET_ADDR_RANDOM;
 		memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 		br_fdb_change_mac_address(br, addr->sa_data);
@@ -317,6 +317,9 @@
 	.ndo_add_slave		 = br_add_slave,
 	.ndo_del_slave		 = br_del_slave,
 	.ndo_fix_features        = br_fix_features,
+	.ndo_fdb_add		 = br_fdb_add,
+	.ndo_fdb_del		 = br_fdb_delete,
+	.ndo_fdb_dump		 = br_fdb_dump,
 };
 
 static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 5ba0c84..d21f323 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -107,8 +107,8 @@
 				struct net_bridge_port *op;
 				list_for_each_entry(op, &br->port_list, list) {
 					if (op != p &&
-					    !compare_ether_addr(op->dev->dev_addr,
-								f->addr.addr)) {
+					    ether_addr_equal(op->dev->dev_addr,
+							     f->addr.addr)) {
 						f->dst = op;
 						goto insert;
 					}
@@ -214,8 +214,8 @@
 				struct net_bridge_port *op;
 				list_for_each_entry(op, &br->port_list, list) {
 					if (op != p &&
-					    !compare_ether_addr(op->dev->dev_addr,
-								f->addr.addr)) {
+					    ether_addr_equal(op->dev->dev_addr,
+							     f->addr.addr)) {
 						f->dst = op;
 						goto skip_delete;
 					}
@@ -237,7 +237,7 @@
 	struct net_bridge_fdb_entry *fdb;
 
 	hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
-		if (!compare_ether_addr(fdb->addr.addr, addr)) {
+		if (ether_addr_equal(fdb->addr.addr, addr)) {
 			if (unlikely(has_expired(br, fdb)))
 				break;
 			return fdb;
@@ -331,7 +331,7 @@
 	struct net_bridge_fdb_entry *fdb;
 
 	hlist_for_each_entry(fdb, h, head, hlist) {
-		if (!compare_ether_addr(fdb->addr.addr, addr))
+		if (ether_addr_equal(fdb->addr.addr, addr))
 			return fdb;
 	}
 	return NULL;
@@ -344,7 +344,7 @@
 	struct net_bridge_fdb_entry *fdb;
 
 	hlist_for_each_entry_rcu(fdb, h, head, hlist) {
-		if (!compare_ether_addr(fdb->addr.addr, addr))
+		if (ether_addr_equal(fdb->addr.addr, addr))
 			return fdb;
 	}
 	return NULL;
@@ -487,14 +487,14 @@
 	ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
 	ndm->ndm_state   = fdb_to_nud(fdb);
 
-	NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
-
+	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
+		goto nla_put_failure;
 	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
 	ci.ndm_confirmed = 0;
 	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
 	ci.ndm_refcnt	 = 0;
-	NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
-
+	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -535,44 +535,38 @@
 }
 
 /* Dump information about entries, in response to GETNEIGH */
-int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
+int br_fdb_dump(struct sk_buff *skb,
+		struct netlink_callback *cb,
+		struct net_device *dev,
+		int idx)
 {
-	struct net *net = sock_net(skb->sk);
-	struct net_device *dev;
-	int idx = 0;
+	struct net_bridge *br = netdev_priv(dev);
+	int i;
 
-	rcu_read_lock();
-	for_each_netdev_rcu(net, dev) {
-		struct net_bridge *br = netdev_priv(dev);
-		int i;
+	if (!(dev->priv_flags & IFF_EBRIDGE))
+		goto out;
 
-		if (!(dev->priv_flags & IFF_EBRIDGE))
-			continue;
+	for (i = 0; i < BR_HASH_SIZE; i++) {
+		struct hlist_node *h;
+		struct net_bridge_fdb_entry *f;
 
-		for (i = 0; i < BR_HASH_SIZE; i++) {
-			struct hlist_node *h;
-			struct net_bridge_fdb_entry *f;
+		hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
+			if (idx < cb->args[0])
+				goto skip;
 
-			hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
-				if (idx < cb->args[0])
-					goto skip;
-
-				if (fdb_fill_info(skb, br, f,
-						  NETLINK_CB(cb->skb).pid,
-						  cb->nlh->nlmsg_seq,
-						  RTM_NEWNEIGH,
-						  NLM_F_MULTI) < 0)
-					break;
+			if (fdb_fill_info(skb, br, f,
+					  NETLINK_CB(cb->skb).pid,
+					  cb->nlh->nlmsg_seq,
+					  RTM_NEWNEIGH,
+					  NLM_F_MULTI) < 0)
+				break;
 skip:
-				++idx;
-			}
+			++idx;
 		}
 	}
-	rcu_read_unlock();
 
-	cb->args[0] = idx;
-
-	return skb->len;
+out:
+	return idx;
 }
 
 /* Update (create or replace) forwarding database entry */
@@ -614,43 +608,11 @@
 }
 
 /* Add new permanent fdb entry with RTM_NEWNEIGH */
-int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+	       unsigned char *addr, u16 nlh_flags)
 {
-	struct net *net = sock_net(skb->sk);
-	struct ndmsg *ndm;
-	struct nlattr *tb[NDA_MAX+1];
-	struct net_device *dev;
 	struct net_bridge_port *p;
-	const __u8 *addr;
-	int err;
-
-	ASSERT_RTNL();
-	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
-	if (err < 0)
-		return err;
-
-	ndm = nlmsg_data(nlh);
-	if (ndm->ndm_ifindex == 0) {
-		pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
-		return -EINVAL;
-	}
-
-	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
-	if (dev == NULL) {
-		pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
-		return -ENODEV;
-	}
-
-	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
-		pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
-		return -EINVAL;
-	}
-
-	addr = nla_data(tb[NDA_LLADDR]);
-	if (!is_valid_ether_addr(addr)) {
-		pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
-		return -EINVAL;
-	}
+	int err = 0;
 
 	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
 		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
@@ -670,14 +632,14 @@
 		rcu_read_unlock();
 	} else {
 		spin_lock_bh(&p->br->hash_lock);
-		err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+		err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags);
 		spin_unlock_bh(&p->br->hash_lock);
 	}
 
 	return err;
 }
 
-static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
+static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
 {
 	struct net_bridge *br = p->br;
 	struct hlist_head *head = &br->hash[br_mac_hash(addr)];
@@ -692,40 +654,12 @@
 }
 
 /* Remove neighbor entry with RTM_DELNEIGH */
-int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
+		  unsigned char *addr)
 {
-	struct net *net = sock_net(skb->sk);
-	struct ndmsg *ndm;
 	struct net_bridge_port *p;
-	struct nlattr *llattr;
-	const __u8 *addr;
-	struct net_device *dev;
 	int err;
 
-	ASSERT_RTNL();
-	if (nlmsg_len(nlh) < sizeof(*ndm))
-		return -EINVAL;
-
-	ndm = nlmsg_data(nlh);
-	if (ndm->ndm_ifindex == 0) {
-		pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
-		return -EINVAL;
-	}
-
-	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
-	if (dev == NULL) {
-		pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
-		return -ENODEV;
-	}
-
-	llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
-	if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
-		pr_info("bridge: RTM_DELNEIGH with invalid address\n");
-		return -EINVAL;
-	}
-
-	addr = nla_data(llattr);
-
 	p = br_port_get_rtnl(dev);
 	if (p == NULL) {
 		pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f6534..e9466d4 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -34,7 +34,7 @@
 		p->state == BR_STATE_FORWARDING);
 }
 
-static inline unsigned packet_length(const struct sk_buff *skb)
+static inline unsigned int packet_length(const struct sk_buff *skb)
 {
 	return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
 }
@@ -47,6 +47,7 @@
 		kfree_skb(skb);
 	} else {
 		skb_push(skb, ETH_HLEN);
+		br_drop_fake_rtable(skb);
 		dev_queue_xmit(skb);
 	}
 
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5a31731..76f15fd 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -216,7 +216,7 @@
 		}
 		/* fall through */
 	case BR_STATE_LEARNING:
-		if (!compare_ether_addr(p->br->dev->dev_addr, dest))
+		if (ether_addr_equal(p->br->dev->dev_addr, dest))
 			skb->pkt_type = PACKET_HOST;
 
 		NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 27ca25e..b665812 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -36,6 +36,8 @@
 #define mlock_dereference(X, br) \
 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
+static void br_multicast_start_querier(struct net_bridge *br);
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
@@ -458,8 +460,8 @@
 	hopopt[3] = 2;				/* Length of RA Option */
 	hopopt[4] = 0;				/* Type = 0x0000 (MLD) */
 	hopopt[5] = 0;
-	hopopt[6] = IPV6_TLV_PAD0;		/* Pad0 */
-	hopopt[7] = IPV6_TLV_PAD0;		/* Pad0 */
+	hopopt[6] = IPV6_TLV_PAD1;		/* Pad1 */
+	hopopt[7] = IPV6_TLV_PAD1;		/* Pad1 */
 
 	skb_put(skb, sizeof(*ip6h) + 8);
 
@@ -512,8 +514,8 @@
 	struct net_bridge_mdb_htable *mdb;
 	struct net_bridge_mdb_entry *mp;
 	struct hlist_node *p;
-	unsigned count = 0;
-	unsigned max;
+	unsigned int count = 0;
+	unsigned int max;
 	int elasticity;
 	int err;
 
@@ -740,6 +742,20 @@
 {
 }
 
+static void br_multicast_querier_expired(unsigned long data)
+{
+	struct net_bridge *br = (void *)data;
+
+	spin_lock(&br->multicast_lock);
+	if (!netif_running(br->dev) || br->multicast_disabled)
+		goto out;
+
+	br_multicast_start_querier(br);
+
+out:
+	spin_unlock(&br->multicast_lock);
+}
+
 static void __br_multicast_send_query(struct net_bridge *br,
 				      struct net_bridge_port *port,
 				      struct br_ip *ip)
@@ -766,6 +782,7 @@
 	struct br_ip br_group;
 
 	if (!netif_running(br->dev) || br->multicast_disabled ||
+	    !br->multicast_querier ||
 	    timer_pending(&br->multicast_querier_timer))
 		return;
 
@@ -1281,8 +1298,8 @@
 	struct sk_buff *skb2 = skb;
 	const struct iphdr *iph;
 	struct igmphdr *ih;
-	unsigned len;
-	unsigned offset;
+	unsigned int len;
+	unsigned int offset;
 	int err;
 
 	/* We treat OOM as packet loss for now. */
@@ -1382,7 +1399,7 @@
 	u8 icmp6_type;
 	u8 nexthdr;
 	__be16 frag_off;
-	unsigned len;
+	unsigned int len;
 	int offset;
 	int err;
 
@@ -1548,6 +1565,7 @@
 	br->hash_max = 512;
 
 	br->multicast_router = 1;
+	br->multicast_querier = 0;
 	br->multicast_last_member_count = 2;
 	br->multicast_startup_query_count = 2;
 
@@ -1562,7 +1580,7 @@
 	setup_timer(&br->multicast_router_timer,
 		    br_multicast_local_router_expired, 0);
 	setup_timer(&br->multicast_querier_timer,
-		    br_multicast_local_router_expired, 0);
+		    br_multicast_querier_expired, (unsigned long)br);
 	setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
 		    (unsigned long)br);
 }
@@ -1689,9 +1707,23 @@
 	return err;
 }
 
-int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+static void br_multicast_start_querier(struct net_bridge *br)
 {
 	struct net_bridge_port *port;
+
+	br_multicast_open(br);
+
+	list_for_each_entry(port, &br->port_list, list) {
+		if (port->state == BR_STATE_DISABLED ||
+		    port->state == BR_STATE_BLOCKING)
+			continue;
+
+		__br_multicast_enable_port(port);
+	}
+}
+
+int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+{
 	int err = 0;
 	struct net_bridge_mdb_htable *mdb;
 
@@ -1721,14 +1753,7 @@
 			goto rollback;
 	}
 
-	br_multicast_open(br);
-	list_for_each_entry(port, &br->port_list, list) {
-		if (port->state == BR_STATE_DISABLED ||
-		    port->state == BR_STATE_BLOCKING)
-			continue;
-
-		__br_multicast_enable_port(port);
-	}
+	br_multicast_start_querier(br);
 
 unlock:
 	spin_unlock_bh(&br->multicast_lock);
@@ -1736,6 +1761,24 @@
 	return err;
 }
 
+int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
+{
+	val = !!val;
+
+	spin_lock_bh(&br->multicast_lock);
+	if (br->multicast_querier == val)
+		goto unlock;
+
+	br->multicast_querier = val;
+	if (val)
+		br_multicast_start_querier(br);
+
+unlock:
+	spin_unlock_bh(&br->multicast_lock);
+
+	return 0;
+}
+
 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
 {
 	int err = -ENOENT;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f38..e41456bd 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -54,12 +54,14 @@
 static int brnf_call_arptables __read_mostly = 1;
 static int brnf_filter_vlan_tagged __read_mostly = 0;
 static int brnf_filter_pppoe_tagged __read_mostly = 0;
+static int brnf_pass_vlan_indev __read_mostly = 0;
 #else
 #define brnf_call_iptables 1
 #define brnf_call_ip6tables 1
 #define brnf_call_arptables 1
 #define brnf_filter_vlan_tagged 0
 #define brnf_filter_pppoe_tagged 0
+#define brnf_pass_vlan_indev 0
 #endif
 
 #define IS_IP(skb) \
@@ -156,7 +158,7 @@
 	rt->dst.dev = br->dev;
 	rt->dst.path = &rt->dst;
 	dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-	rt->dst.flags	= DST_NOXFRM | DST_NOPEER;
+	rt->dst.flags	= DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
 	rt->dst.ops = &fake_dst_ops;
 }
 
@@ -503,6 +505,19 @@
 	return 0;
 }
 
+static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct net_device *vlan, *br;
+
+	br = bridge_parent(dev);
+	if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
+		return br;
+
+	vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK);
+
+	return vlan ? vlan : br;
+}
+
 /* Some common code for IPv4/IPv6 */
 static struct net_device *setup_pre_routing(struct sk_buff *skb)
 {
@@ -515,7 +530,7 @@
 
 	nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
 	nf_bridge->physindev = skb->dev;
-	skb->dev = bridge_parent(skb->dev);
+	skb->dev = brnf_get_logical_dev(skb, skb->dev);
 	if (skb->protocol == htons(ETH_P_8021Q))
 		nf_bridge->mask |= BRNF_8021Q;
 	else if (skb->protocol == htons(ETH_P_PPP_SES))
@@ -543,7 +558,7 @@
 		int optlen = nh[off + 1] + 2;
 
 		switch (nh[off]) {
-		case IPV6_TLV_PAD0:
+		case IPV6_TLV_PAD1:
 			optlen = 1;
 			break;
 
@@ -694,11 +709,7 @@
 				   const struct net_device *out,
 				   int (*okfn)(struct sk_buff *))
 {
-	struct rtable *rt = skb_rtable(skb);
-
-	if (rt && rt == bridge_parent_rtable(in))
-		skb_dst_drop(skb);
-
+	br_drop_fake_rtable(skb);
 	return NF_ACCEPT;
 }
 
@@ -778,7 +789,7 @@
 	else
 		skb->protocol = htons(ETH_P_IPV6);
 
-	NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
+	NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
 		br_nf_forward_finish);
 
 	return NF_STOLEN;
@@ -1006,12 +1017,13 @@
 		.mode		= 0644,
 		.proc_handler	= brnf_sysctl_call_tables,
 	},
-	{ }
-};
-
-static struct ctl_path brnf_path[] = {
-	{ .procname = "net", },
-	{ .procname = "bridge", },
+	{
+		.procname	= "bridge-nf-pass-vlan-input-dev",
+		.data		= &brnf_pass_vlan_indev,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= brnf_sysctl_call_tables,
+	},
 	{ }
 };
 #endif
@@ -1030,7 +1042,7 @@
 		return ret;
 	}
 #ifdef CONFIG_SYSCTL
-	brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
+	brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
 	if (brnf_sysctl_header == NULL) {
 		printk(KERN_WARNING
 		       "br_netfilter: can't register to sysctl.\n");
@@ -1047,7 +1059,7 @@
 {
 	nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
 #ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(brnf_sysctl_header);
+	unregister_net_sysctl_table(brnf_sysctl_header);
 #endif
 	dst_entries_destroy(&fake_dst_ops);
 }
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a1daf82..2080485 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -60,20 +60,17 @@
 	hdr->ifi_flags = dev_get_flags(dev);
 	hdr->ifi_change = 0;
 
-	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-	NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex);
-	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-	NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate);
-
-	if (dev->addr_len)
-		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-
-	if (dev->ifindex != dev->iflink)
-		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
-	if (event == RTM_NEWLINK)
-		NLA_PUT_U8(skb, IFLA_PROTINFO, port->state);
-
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+	    nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
+	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+	    nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
+	    (dev->addr_len &&
+	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+	    (dev->ifindex != dev->iflink &&
+	     nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+	    (event == RTM_NEWLINK &&
+	     nla_put_u8(skb, IFLA_PROTINFO, port->state)))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -91,7 +88,7 @@
 	int err = -ENOBUFS;
 
 	br_debug(port->br, "port %u(%s) event %d\n",
-		 (unsigned)port->port_no, port->dev->name, event);
+		 (unsigned int)port->port_no, port->dev->name, event);
 
 	skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
 	if (skb == NULL)
@@ -235,18 +232,6 @@
 			      br_rtm_setlink, NULL, NULL);
 	if (err)
 		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH,
-			      br_fdb_add, NULL, NULL);
-	if (err)
-		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH,
-			      br_fdb_delete, NULL, NULL);
-	if (err)
-		goto err3;
-	err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH,
-			      NULL, br_fdb_dump, NULL);
-	if (err)
-		goto err3;
 
 	return 0;
 
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index e1d8822..1a8ad4f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -224,6 +224,7 @@
 	unsigned char			multicast_router;
 
 	u8				multicast_disabled:1;
+	u8				multicast_querier:1;
 
 	u32				hash_elasticity;
 	u32				hash_max;
@@ -359,9 +360,18 @@
 extern void br_fdb_update(struct net_bridge *br,
 			  struct net_bridge_port *source,
 			  const unsigned char *addr);
-extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
-extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+
+extern int br_fdb_delete(struct ndmsg *ndm,
+			 struct net_device *dev,
+			 unsigned char *addr);
+extern int br_fdb_add(struct ndmsg *nlh,
+		      struct net_device *dev,
+		      unsigned char *addr,
+		      u16 nlh_flags);
+extern int br_fdb_dump(struct sk_buff *skb,
+		       struct netlink_callback *cb,
+		       struct net_device *dev,
+		       int idx);
 
 /* br_forward.c */
 extern void br_deliver(const struct net_bridge_port *to,
@@ -417,6 +427,7 @@
 extern int br_multicast_set_port_router(struct net_bridge_port *p,
 					unsigned long val);
 extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
 extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
 
 static inline bool br_multicast_is_router(struct net_bridge *br)
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 05ed9bc..0c0fe36 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -29,10 +29,9 @@
 #define BR_MIN_PATH_COST	1
 #define BR_MAX_PATH_COST	65535
 
-struct br_config_bpdu
-{
-	unsigned	topology_change:1;
-	unsigned	topology_change_ack:1;
+struct br_config_bpdu {
+	unsigned int	topology_change:1;
+	unsigned int	topology_change_ack:1;
 	bridge_id	root;
 	int		root_path_cost;
 	bridge_id	bridge_id;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 8c836d9..af9a120 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -32,7 +32,7 @@
 void br_log_state(const struct net_bridge_port *p)
 {
 	br_info(p->br, "port %u(%s) entered %s state\n",
-		(unsigned) p->port_no, p->dev->name,
+		(unsigned int) p->port_no, p->dev->name,
 		br_port_state_names[p->state]);
 }
 
@@ -478,7 +478,7 @@
 {
 	if (br_is_designated_port(p)) {
 		br_info(p->br, "port %u(%s) received tcn bpdu\n",
-			(unsigned) p->port_no, p->dev->name);
+			(unsigned int) p->port_no, p->dev->name);
 
 		br_topology_change_detection(p->br);
 		br_topology_change_acknowledge(p);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index e16aade..fd30a60 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -167,7 +167,7 @@
 	if (p->state == BR_STATE_DISABLED)
 		goto out;
 
-	if (compare_ether_addr(dest, br->group_addr) != 0)
+	if (!ether_addr_equal(dest, br->group_addr))
 		goto out;
 
 	buf = skb_pull(skb, 3);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index f494496..9d5a414 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -178,7 +178,7 @@
 /* called under bridge lock */
 void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
 {
-	/* should be aligned on 2 bytes for compare_ether_addr() */
+	/* should be aligned on 2 bytes for ether_addr_equal() */
 	unsigned short oldaddr_aligned[ETH_ALEN >> 1];
 	unsigned char *oldaddr = (unsigned char *)oldaddr_aligned;
 	struct net_bridge_port *p;
@@ -191,12 +191,11 @@
 	memcpy(br->dev->dev_addr, addr, ETH_ALEN);
 
 	list_for_each_entry(p, &br->port_list, list) {
-		if (!compare_ether_addr(p->designated_bridge.addr, oldaddr))
+		if (ether_addr_equal(p->designated_bridge.addr, oldaddr))
 			memcpy(p->designated_bridge.addr, addr, ETH_ALEN);
 
-		if (!compare_ether_addr(p->designated_root.addr, oldaddr))
+		if (ether_addr_equal(p->designated_root.addr, oldaddr))
 			memcpy(p->designated_root.addr, addr, ETH_ALEN);
-
 	}
 
 	br_configuration_update(br);
@@ -205,7 +204,7 @@
 		br_become_root_bridge(br);
 }
 
-/* should be aligned on 2 bytes for compare_ether_addr() */
+/* should be aligned on 2 bytes for ether_addr_equal() */
 static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
 
 /* called under bridge lock */
@@ -227,7 +226,7 @@
 
 	}
 
-	if (compare_ether_addr(br->bridge_id.addr, addr) == 0)
+	if (ether_addr_equal(br->bridge_id.addr, addr))
 		return false;	/* no change */
 
 	br_stp_change_bridge_id(br, addr);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 58de2a0..a6747e6 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -56,7 +56,7 @@
 		return;
 
 	br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n",
-		(unsigned) p->port_no, p->dev->name,
+		(unsigned int) p->port_no, p->dev->name,
 		id->prio[0], id->prio[1], &id->addr);
 
 	/*
@@ -84,7 +84,7 @@
 	struct net_bridge *br = p->br;
 
 	br_debug(br, "port %u(%s) forward delay timer\n",
-		 (unsigned) p->port_no, p->dev->name);
+		 (unsigned int) p->port_no, p->dev->name);
 	spin_lock(&br->lock);
 	if (p->state == BR_STATE_LISTENING) {
 		p->state = BR_STATE_LEARNING;
@@ -131,7 +131,7 @@
 	struct net_bridge_port *p = (struct net_bridge_port *) arg;
 
 	br_debug(p->br, "port %u(%s) hold timer expired\n",
-		 (unsigned) p->port_no, p->dev->name);
+		 (unsigned int) p->port_no, p->dev->name);
 
 	spin_lock(&p->br->lock);
 	if (p->config_pending)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c236c0e..c5c0593 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -297,7 +297,7 @@
 				const char *buf, size_t len)
 {
 	struct net_bridge *br = to_bridge(d);
-	unsigned new_addr[6];
+	unsigned int new_addr[6];
 	int i;
 
 	if (!capable(CAP_NET_ADMIN))
@@ -379,6 +379,23 @@
 static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
 		   show_multicast_snooping, store_multicast_snooping);
 
+static ssize_t show_multicast_querier(struct device *d,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct net_bridge *br = to_bridge(d);
+	return sprintf(buf, "%d\n", br->multicast_querier);
+}
+
+static ssize_t store_multicast_querier(struct device *d,
+				       struct device_attribute *attr,
+				       const char *buf, size_t len)
+{
+	return store_bridge_parm(d, buf, len, br_multicast_set_querier);
+}
+static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR,
+		   show_multicast_querier, store_multicast_querier);
+
 static ssize_t show_hash_elasticity(struct device *d,
 				    struct device_attribute *attr, char *buf)
 {
@@ -702,6 +719,7 @@
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 	&dev_attr_multicast_router.attr,
 	&dev_attr_multicast_snooping.attr,
+	&dev_attr_multicast_querier.attr,
 	&dev_attr_hash_elasticity.attr,
 	&dev_attr_hash_max.attr,
 	&dev_attr_multicast_last_member_count.attr,
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 5b33a2e..071d872 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -164,8 +164,8 @@
 	    !(info->bitmask & EBT_STP_MASK))
 		return -EINVAL;
 	/* Make sure the match only receives stp frames */
-	if (compare_ether_addr(e->destmac, bridge_ula) ||
-	    compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
+	if (!ether_addr_equal(e->destmac, bridge_ula) ||
+	    !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
 		return -EINVAL;
 
 	return 0;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 5016fa5..fb89443 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -19,7 +19,7 @@
 #include <linux/uaccess.h>
 #include <linux/debugfs.h>
 #include <linux/caif/caif_socket.h>
-#include <linux/atomic.h>
+#include <linux/pkt_sched.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
 #include <net/caif/caif_layer.h>
@@ -130,11 +130,10 @@
 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
 
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-		(unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
-		if (net_ratelimit())
-			pr_debug("sending flow OFF (queue len = %d %d)\n",
-					atomic_read(&cf_sk->sk.sk_rmem_alloc),
-					sk_rcvbuf_lowwater(cf_sk));
+		(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
+		net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n",
+				    atomic_read(&cf_sk->sk.sk_rmem_alloc),
+				    sk_rcvbuf_lowwater(cf_sk));
 		set_rx_flow_off(cf_sk);
 		caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
 	}
@@ -144,8 +143,7 @@
 		return err;
 	if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
 		set_rx_flow_off(cf_sk);
-		if (net_ratelimit())
-			pr_debug("sending flow OFF due to rmem_schedule\n");
+		net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
 		caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
 	}
 	skb->dev = NULL;
@@ -505,6 +503,7 @@
 
 	pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
 	memset(skb->cb, 0, sizeof(struct caif_payload_info));
+	cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
 
 	if (cf_sk->layer.dn == NULL) {
 		kfree_skb(skb);
@@ -1062,6 +1061,18 @@
 	/* Store the protocol */
 	sk->sk_protocol = (unsigned char) protocol;
 
+	/* Initialize default priority for well-known cases */
+	switch (protocol) {
+	case CAIFPROTO_AT:
+		sk->sk_priority = TC_PRIO_CONTROL;
+		break;
+	case CAIFPROTO_RFM:
+		sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+		break;
+	default:
+		sk->sk_priority = TC_PRIO_BESTEFFORT;
+	}
+
 	/*
 	 * Lock in order to try to stop someone from opening the socket
 	 * too early.
@@ -1081,7 +1092,6 @@
 	set_rx_flow_on(cf_sk);
 
 	/* Set default options on configuration */
-	cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
 	cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
 	cf_sk->conn_req.protocol = protocol;
 	release_sock(&cf_sk->sk);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 5cf5222..047cd0e 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -9,6 +9,7 @@
 #include <linux/stddef.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
+#include <linux/pkt_sched.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfctrl.h>
@@ -189,6 +190,7 @@
 	cfctrl->serv.dev_info.id = physlinkid;
 	cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
 	cfpkt_addbdy(pkt, physlinkid);
+	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 	dn->transmit(dn, pkt);
 }
 
@@ -281,6 +283,7 @@
 	 *	might arrive with the newly allocated channel ID.
 	 */
 	cfpkt_info(pkt)->dev_info->id = param->phyid;
+	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 	ret =
 	    dn->transmit(dn, pkt);
 	if (ret < 0) {
@@ -314,6 +317,7 @@
 	cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
 	cfpkt_addbdy(pkt, channelid);
 	init_info(cfpkt_info(pkt), cfctrl);
+	cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 	ret =
 	    dn->transmit(dn, pkt);
 #ifndef CAIF_NO_LOOP
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index e335ba8..863dedd 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -381,6 +381,7 @@
 	memcpy(skb2->data, split, len2nd);
 	skb2->tail += len2nd;
 	skb2->len += len2nd;
+	skb2->priority = skb->priority;
 	return skb_to_pkt(skb2);
 }
 
@@ -394,3 +395,9 @@
 	return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
 }
 EXPORT_SYMBOL(cfpkt_info);
+
+void cfpkt_set_prio(struct cfpkt *pkt, int prio)
+{
+	pkt_to_skb(pkt)->priority = prio;
+}
+EXPORT_SYMBOL(cfpkt_set_prio);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 4aa33d4..dd485f6 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -11,6 +11,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pkt_sched.h>
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
@@ -120,6 +121,7 @@
 			info->channel_id = service->layer.id;
 			info->hdr_len = 1;
 			info->dev_info = &service->dev_info;
+			cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 			return layr->dn->transmit(layr->dn, pkt);
 		}
 	case CAIF_MODEMCMD_FLOW_OFF_REQ:
@@ -140,6 +142,7 @@
 			info->channel_id = service->layer.id;
 			info->hdr_len = 1;
 			info->dev_info = &service->dev_info;
+			cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
 			return layr->dn->transmit(layr->dn, pkt);
 		}
 	default:
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index d09340e..69771c0 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -424,14 +424,14 @@
 	struct chnl_net *priv;
 	u8 loop;
 	priv = netdev_priv(dev);
-	NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
-		    priv->conn_req.sockaddr.u.dgm.connection_id);
-	NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
-		    priv->conn_req.sockaddr.u.dgm.connection_id);
+	if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID,
+			priv->conn_req.sockaddr.u.dgm.connection_id) ||
+	    nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID,
+			priv->conn_req.sockaddr.u.dgm.connection_id))
+		goto nla_put_failure;
 	loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
-	NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
-
-
+	if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop))
+		goto nla_put_failure;
 	return 0;
 nla_put_failure:
 	return -EMSGSIZE;
diff --git a/net/can/gw.c b/net/can/gw.c
index 3d79b12..b41acf2 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -66,7 +66,7 @@
 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
 MODULE_ALIAS("can-gw");
 
-HLIST_HEAD(cgw_list);
+static HLIST_HEAD(cgw_list);
 static struct notifier_block notifier;
 
 static struct kmem_cache *cgw_cache __read_mostly;
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index e02da7a..f459e93 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -13,7 +13,7 @@
  */
 struct ceph_x_ticket_handler {
 	struct rb_node node;
-	unsigned service;
+	unsigned int service;
 
 	struct ceph_crypto_key session_key;
 	struct ceph_timespec validity;
@@ -27,7 +27,7 @@
 
 struct ceph_x_authorizer {
 	struct ceph_buffer *buf;
-	unsigned service;
+	unsigned int service;
 	u64 nonce;
 	char reply_buf[128];  /* big enough for encrypted blob */
 };
@@ -38,7 +38,7 @@
 	bool starting;
 	u64 server_challenge;
 
-	unsigned have_keys;
+	unsigned int have_keys;
 	struct rb_root ticket_handlers;
 
 	struct ceph_x_authorizer auth_authorizer;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index cc91319..a776f75 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -441,8 +441,8 @@
  * create a fresh client instance
  */
 struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
-				       unsigned supported_features,
-				       unsigned required_features)
+				       unsigned int supported_features,
+				       unsigned int required_features)
 {
 	struct ceph_client *client;
 	struct ceph_entity_addr *myaddr = NULL;
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 0a1b53b..67bb1f1 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -20,7 +20,7 @@
 		c = c - a;  c = c - b;  c = c ^ (b >> 15);	\
 	} while (0)
 
-unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
+unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
 {
 	const unsigned char *k = (const unsigned char *)str;
 	__u32 a, b, c;  /* the internal state */
@@ -81,7 +81,7 @@
 /*
  * linux dcache hash
  */
-unsigned ceph_str_hash_linux(const char *str, unsigned length)
+unsigned int ceph_str_hash_linux(const char *str, unsigned int length)
 {
 	unsigned long hash = 0;
 	unsigned char c;
@@ -94,7 +94,7 @@
 }
 
 
-unsigned ceph_str_hash(int type, const char *s, unsigned len)
+unsigned int ceph_str_hash(int type, const char *s, unsigned int len)
 {
 	switch (type) {
 	case CEPH_STR_HASH_LINUX:
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index b79747c..363f8f7 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -20,6 +20,7 @@
 
 #include <linux/crush/crush.h>
 #include <linux/crush/hash.h>
+#include <linux/crush/mapper.h>
 
 /*
  * Implement the core CRUSH mapping algorithm.
@@ -68,8 +69,8 @@
 static int bucket_perm_choose(struct crush_bucket *bucket,
 			      int x, int r)
 {
-	unsigned pr = r % bucket->size;
-	unsigned i, s;
+	unsigned int pr = r % bucket->size;
+	unsigned int i, s;
 
 	/* start a new permutation if @x has changed */
 	if (bucket->perm_x != x || bucket->perm_n == 0) {
@@ -100,13 +101,13 @@
 	for (i = 0; i < bucket->perm_n; i++)
 		dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
 	while (bucket->perm_n <= pr) {
-		unsigned p = bucket->perm_n;
+		unsigned int p = bucket->perm_n;
 		/* no point in swapping the final entry */
 		if (p < bucket->size - 1) {
 			i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
 				(bucket->size - p);
 			if (i) {
-				unsigned t = bucket->perm[p + i];
+				unsigned int t = bucket->perm[p + i];
 				bucket->perm[p + i] = bucket->perm[p];
 				bucket->perm[p] = t;
 			}
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 27d4ea31..54b531a 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -94,9 +94,9 @@
 	mutex_lock(&monc->mutex);
 
 	if (monc->have_mdsmap)
-		seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
+		seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap);
 	if (monc->have_osdmap)
-		seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
+		seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap);
 	if (monc->want_next_osdmap)
 		seq_printf(s, "want next osdmap\n");
 
@@ -146,7 +146,7 @@
 
 		if (req->r_reassert_version.epoch)
 			seq_printf(s, "\t%u'%llu",
-			   (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
+			   (unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
 			   le64_to_cpu(req->r_reassert_version.version));
 		else
 			seq_printf(s, "\t");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index f0993af..36fa6bf 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -699,7 +699,7 @@
 				 struct ceph_connection *con,
 				 int include_banner)
 {
-	unsigned global_seq = get_global_seq(con->msgr, 0);
+	unsigned int global_seq = get_global_seq(con->msgr, 0);
 	int proto;
 
 	switch (con->peer_name.type) {
@@ -816,7 +816,7 @@
 static int write_partial_msg_pages(struct ceph_connection *con)
 {
 	struct ceph_msg *msg = con->out_msg;
-	unsigned data_len = le32_to_cpu(msg->hdr.data_len);
+	unsigned int data_len = le32_to_cpu(msg->hdr.data_len);
 	size_t len;
 	bool do_datacrc = !con->msgr->nocrc;
 	int ret;
@@ -1554,7 +1554,7 @@
 
 static int read_partial_message_pages(struct ceph_connection *con,
 				      struct page **pages,
-				      unsigned data_len, bool do_datacrc)
+				      unsigned int data_len, bool do_datacrc)
 {
 	void *p;
 	int ret;
@@ -1587,7 +1587,7 @@
 #ifdef CONFIG_BLOCK
 static int read_partial_message_bio(struct ceph_connection *con,
 				    struct bio **bio_iter, int *bio_seg,
-				    unsigned data_len, bool do_datacrc)
+				    unsigned int data_len, bool do_datacrc)
 {
 	struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
 	void *p;
@@ -1629,7 +1629,7 @@
 	struct ceph_msg *m = con->in_msg;
 	int ret;
 	int to, left;
-	unsigned front_len, middle_len, data_len;
+	unsigned int front_len, middle_len, data_len;
 	bool do_datacrc = !con->msgr->nocrc;
 	int skip;
 	u64 seq;
@@ -2345,9 +2345,9 @@
 {
 	mutex_lock(&con->mutex);
 	if (con->in_msg && con->in_msg == msg) {
-		unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
-		unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
-		unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
+		unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
+		unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
+		unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
 
 		/* skip rest of message */
 		dout("con_revoke_pages %p msg %p revoked\n", con, msg);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 1845cde..10d6008 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -168,7 +168,7 @@
  */
 static void __schedule_delayed(struct ceph_mon_client *monc)
 {
-	unsigned delay;
+	unsigned int delay;
 
 	if (monc->cur_mon < 0 || __sub_expired(monc))
 		delay = 10 * HZ;
@@ -184,7 +184,7 @@
 static void __send_subscribe(struct ceph_mon_client *monc)
 {
 	dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
-	     (unsigned)monc->sub_sent, __sub_expired(monc),
+	     (unsigned int)monc->sub_sent, __sub_expired(monc),
 	     monc->want_next_osdmap);
 	if ((__sub_expired(monc) && !monc->sub_sent) ||
 	    monc->want_next_osdmap == 1) {
@@ -201,7 +201,7 @@
 
 		if (monc->want_next_osdmap) {
 			dout("__send_subscribe to 'osdmap' %u\n",
-			     (unsigned)monc->have_osdmap);
+			     (unsigned int)monc->have_osdmap);
 			ceph_encode_string(&p, end, "osdmap", 6);
 			i = p;
 			i->have = cpu_to_le64(monc->have_osdmap);
@@ -211,7 +211,7 @@
 		}
 		if (monc->want_mdsmap) {
 			dout("__send_subscribe to 'mdsmap' %u+\n",
-			     (unsigned)monc->have_mdsmap);
+			     (unsigned int)monc->have_mdsmap);
 			ceph_encode_string(&p, end, "mdsmap", 6);
 			i = p;
 			i->have = cpu_to_le64(monc->have_mdsmap);
@@ -236,7 +236,7 @@
 static void handle_subscribe_ack(struct ceph_mon_client *monc,
 				 struct ceph_msg *msg)
 {
-	unsigned seconds;
+	unsigned int seconds;
 	struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
 
 	if (msg->front.iov_len < sizeof(*h))
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 5e25405..1b0ef3c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1214,7 +1214,7 @@
 	}
 
 	if (!req->r_got_reply) {
-		unsigned bytes;
+		unsigned int bytes;
 
 		req->r_result = le32_to_cpu(rhead->result);
 		bytes = le32_to_cpu(msg->hdr.data_len);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 29ad46e..56e561a 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -38,7 +38,7 @@
 
 /* maps */
 
-static int calc_bits_of(unsigned t)
+static int calc_bits_of(unsigned int t)
 {
 	int b = 0;
 	while (t) {
@@ -154,7 +154,7 @@
 	magic = ceph_decode_32(p);
 	if (magic != CRUSH_MAGIC) {
 		pr_err("crush_decode magic %x != current %x\n",
-		       (unsigned)magic, (unsigned)CRUSH_MAGIC);
+		       (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
 		goto bad;
 	}
 	c->max_buckets = ceph_decode_32(p);
@@ -460,7 +460,7 @@
 
 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
 {
-	unsigned n, m;
+	unsigned int n, m;
 
 	ceph_decode_copy(p, &pi->v, sizeof(pi->v));
 	calc_pg_masks(pi);
@@ -970,7 +970,7 @@
 	objsetno = stripeno / su_per_object;
 
 	*ono = objsetno * sc + stripepos;
-	dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono);
+	dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
 
 	/* *oxoff = *off % layout->fl_stripe_unit;  # offset in su */
 	t = off;
@@ -998,12 +998,12 @@
 			    struct ceph_file_layout *fl,
 			    struct ceph_osdmap *osdmap)
 {
-	unsigned num, num_mask;
+	unsigned int num, num_mask;
 	struct ceph_pg pgid;
 	s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
 	int poolid = le32_to_cpu(fl->fl_pg_pool);
 	struct ceph_pg_pool_info *pool;
-	unsigned ps;
+	unsigned int ps;
 
 	BUG_ON(!osdmap);
 
@@ -1045,7 +1045,7 @@
 	struct ceph_pg_mapping *pg;
 	struct ceph_pg_pool_info *pool;
 	int ruleno;
-	unsigned poolid, ps, pps, t;
+	unsigned int poolid, ps, pps, t;
 	int preferred;
 
 	poolid = le32_to_cpu(pgid.pool);
diff --git a/net/compat.c b/net/compat.c
index e055708..e240441 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -741,13 +741,13 @@
 };
 #undef AL
 
-asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
+asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
 {
 	return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
-				    unsigned vlen, unsigned int flags)
+				    unsigned int vlen, unsigned int flags)
 {
 	return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
 			      flags | MSG_CMSG_COMPAT);
@@ -758,20 +758,20 @@
 	return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
-asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags)
+asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
 {
 	return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
 }
 
 asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
-				    unsigned flags, struct sockaddr __user *addr,
+				    unsigned int flags, struct sockaddr __user *addr,
 				    int __user *addrlen)
 {
 	return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
 }
 
 asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
-				    unsigned vlen, unsigned int flags,
+				    unsigned int vlen, unsigned int flags,
 				    struct compat_timespec __user *timeout)
 {
 	int datagrams;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e4fbfd6..ae6acf6 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -65,7 +65,7 @@
 	return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
 }
 
-static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
 				  void *key)
 {
 	unsigned long bits = (unsigned long)key;
@@ -158,7 +158,7 @@
  *	quite explicitly by POSIX 1003.1g, don't change them without having
  *	the standard around please.
  */
-struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
 				    int *peeked, int *off, int *err)
 {
 	struct sk_buff *skb;
@@ -216,7 +216,7 @@
 }
 EXPORT_SYMBOL(__skb_recv_datagram);
 
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
 				  int noblock, int *err)
 {
 	int peeked, off = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 9bb8f87..cd09819 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -208,7 +208,8 @@
 
 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
 {
-	unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+	unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+
 	return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
 }
 
@@ -299,10 +300,9 @@
 	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
 	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
 	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
-	 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
-	 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
-	 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
-	 ARPHRD_VOID, ARPHRD_NONE};
+	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
+	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
+	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
 
 static const char *const netdev_lock_name[] =
 	{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
@@ -317,10 +317,9 @@
 	 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
 	 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
 	 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
-	 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
-	 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
-	 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
-	 "_xmit_VOID", "_xmit_NONE"};
+	 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
+	 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
+	 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
 
 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -1617,10 +1616,14 @@
 		return NET_RX_DROP;
 	}
 	skb->skb_iif = 0;
-	skb_set_dev(skb, dev);
+	skb->dev = dev;
+	skb_dst_drop(skb);
 	skb->tstamp.tv64 = 0;
 	skb->pkt_type = PACKET_HOST;
 	skb->protocol = eth_type_trans(skb, dev);
+	skb->mark = 0;
+	secpath_reset(skb);
+	nf_reset(skb);
 	return netif_rx(skb);
 }
 EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1672,10 +1675,9 @@
 
 			if (skb_network_header(skb2) < skb2->data ||
 			    skb2->network_header > skb2->tail) {
-				if (net_ratelimit())
-					pr_crit("protocol %04x is buggy, dev %s\n",
-						ntohs(skb2->protocol),
-						dev->name);
+				net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
+						     ntohs(skb2->protocol),
+						     dev->name);
 				skb_reset_network_header(skb2);
 			}
 
@@ -1869,36 +1871,6 @@
 }
 EXPORT_SYMBOL(netif_device_attach);
 
-/**
- * skb_dev_set -- assign a new device to a buffer
- * @skb: buffer for the new device
- * @dev: network device
- *
- * If an skb is owned by a device already, we have to reset
- * all data private to the namespace a device belongs to
- * before assigning it a new device.
- */
-#ifdef CONFIG_NET_NS
-void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
-{
-	skb_dst_drop(skb);
-	if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
-		secpath_reset(skb);
-		nf_reset(skb);
-		skb_init_secmark(skb);
-		skb->mark = 0;
-		skb->priority = 0;
-		skb->nf_trace = 0;
-		skb->ipvs_property = 0;
-#ifdef CONFIG_NET_SCHED
-		skb->tc_index = 0;
-#endif
-	}
-	skb->dev = dev;
-}
-EXPORT_SYMBOL(skb_set_dev);
-#endif /* CONFIG_NET_NS */
-
 static void skb_warn_bad_offload(const struct sk_buff *skb)
 {
 	static const netdev_features_t null_features = 0;
@@ -2342,11 +2314,9 @@
 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
 {
 	if (unlikely(queue_index >= dev->real_num_tx_queues)) {
-		if (net_ratelimit()) {
-			pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
-				dev->name, queue_index,
-				dev->real_num_tx_queues);
-		}
+		net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
+				     dev->name, queue_index,
+				     dev->real_num_tx_queues);
 		return 0;
 	}
 	return queue_index;
@@ -2588,17 +2558,15 @@
 				}
 			}
 			HARD_TX_UNLOCK(dev, txq);
-			if (net_ratelimit())
-				pr_crit("Virtual device %s asks to queue packet!\n",
-					dev->name);
+			net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
+					     dev->name);
 		} else {
 			/* Recursion is detected! It is possible,
 			 * unfortunately
 			 */
 recursion_alert:
-			if (net_ratelimit())
-				pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
-					dev->name);
+			net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
+					     dev->name);
 		}
 	}
 
@@ -3079,9 +3047,8 @@
 	struct Qdisc *q;
 
 	if (unlikely(MAX_RED_LOOP < ttl++)) {
-		if (net_ratelimit())
-			pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
-				skb->skb_iif, dev->ifindex);
+		net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
+				     skb->skb_iif, dev->ifindex);
 		return TC_ACT_SHOT;
 	}
 
@@ -3541,10 +3508,16 @@
 		break;
 
 	case GRO_DROP:
-	case GRO_MERGED_FREE:
 		kfree_skb(skb);
 		break;
 
+	case GRO_MERGED_FREE:
+		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+			kmem_cache_free(skbuff_head_cache, skb);
+		else
+			__kfree_skb(skb);
+		break;
+
 	case GRO_HELD:
 	case GRO_MERGED:
 		break;
@@ -3629,7 +3602,7 @@
 }
 EXPORT_SYMBOL(napi_frags_finish);
 
-struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
 {
 	struct sk_buff *skb = napi->skb;
 	struct ethhdr *eth;
@@ -3664,7 +3637,6 @@
 out:
 	return skb;
 }
-EXPORT_SYMBOL(napi_frags_skb);
 
 gro_result_t napi_gro_frags(struct napi_struct *napi)
 {
@@ -4618,9 +4590,9 @@
  *
  *	Get the combination of flag bits exported through APIs to userspace.
  */
-unsigned dev_get_flags(const struct net_device *dev)
+unsigned int dev_get_flags(const struct net_device *dev)
 {
-	unsigned flags;
+	unsigned int flags;
 
 	flags = (dev->flags & ~(IFF_PROMISC |
 				IFF_ALLMULTI |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 626698f..c4cc2bc 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -21,12 +21,35 @@
  * General list handling functions
  */
 
+static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
+			       unsigned char *addr, int addr_len,
+			       unsigned char addr_type, bool global)
+{
+	struct netdev_hw_addr *ha;
+	int alloc_size;
+
+	alloc_size = sizeof(*ha);
+	if (alloc_size < L1_CACHE_BYTES)
+		alloc_size = L1_CACHE_BYTES;
+	ha = kmalloc(alloc_size, GFP_ATOMIC);
+	if (!ha)
+		return -ENOMEM;
+	memcpy(ha->addr, addr, addr_len);
+	ha->type = addr_type;
+	ha->refcount = 1;
+	ha->global_use = global;
+	ha->synced = false;
+	list_add_tail_rcu(&ha->list, &list->list);
+	list->count++;
+
+	return 0;
+}
+
 static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
 			    unsigned char *addr, int addr_len,
 			    unsigned char addr_type, bool global)
 {
 	struct netdev_hw_addr *ha;
-	int alloc_size;
 
 	if (addr_len > MAX_ADDR_LEN)
 		return -EINVAL;
@@ -46,21 +69,7 @@
 		}
 	}
 
-
-	alloc_size = sizeof(*ha);
-	if (alloc_size < L1_CACHE_BYTES)
-		alloc_size = L1_CACHE_BYTES;
-	ha = kmalloc(alloc_size, GFP_ATOMIC);
-	if (!ha)
-		return -ENOMEM;
-	memcpy(ha->addr, addr, addr_len);
-	ha->type = addr_type;
-	ha->refcount = 1;
-	ha->global_use = global;
-	ha->synced = false;
-	list_add_tail_rcu(&ha->list, &list->list);
-	list->count++;
-	return 0;
+	return __hw_addr_create_ex(list, addr, addr_len, addr_type, global);
 }
 
 static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
@@ -377,6 +386,34 @@
  */
 
 /**
+ *	dev_uc_add_excl - Add a global secondary unicast address
+ *	@dev: device
+ *	@addr: address to add
+ */
+int dev_uc_add_excl(struct net_device *dev, unsigned char *addr)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+
+	netif_addr_lock_bh(dev);
+	list_for_each_entry(ha, &dev->uc.list, list) {
+		if (!memcmp(ha->addr, addr, dev->addr_len) &&
+		    ha->type == NETDEV_HW_ADDR_T_UNICAST) {
+			err = -EEXIST;
+			goto out;
+		}
+	}
+	err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
+				  NETDEV_HW_ADDR_T_UNICAST, true);
+	if (!err)
+		__dev_set_rx_mode(dev);
+out:
+	netif_addr_unlock_bh(dev);
+	return err;
+}
+EXPORT_SYMBOL(dev_uc_add_excl);
+
+/**
  *	dev_uc_add - Add a secondary unicast address
  *	@dev: device
  *	@addr: address to add
@@ -501,6 +538,34 @@
  * Multicast list handling functions
  */
 
+/**
+ *	dev_mc_add_excl - Add a global secondary multicast address
+ *	@dev: device
+ *	@addr: address to add
+ */
+int dev_mc_add_excl(struct net_device *dev, unsigned char *addr)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+
+	netif_addr_lock_bh(dev);
+	list_for_each_entry(ha, &dev->mc.list, list) {
+		if (!memcmp(ha->addr, addr, dev->addr_len) &&
+		    ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
+			err = -EEXIST;
+			goto out;
+		}
+	}
+	err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
+				  NETDEV_HW_ADDR_T_MULTICAST, true);
+	if (!err)
+		__dev_set_rx_mode(dev);
+out:
+	netif_addr_unlock_bh(dev);
+	return err;
+}
+EXPORT_SYMBOL(dev_mc_add_excl);
+
 static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
 			bool global)
 {
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 5c3c81a..3252e7e 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -4,6 +4,8 @@
  * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/string.h>
@@ -22,6 +24,7 @@
 #include <linux/timer.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <net/genetlink.h>
 #include <net/netevent.h>
 
@@ -42,13 +45,14 @@
  * netlink alerts
  */
 static int trace_state = TRACE_OFF;
-static DEFINE_SPINLOCK(trace_state_lock);
+static DEFINE_MUTEX(trace_state_mutex);
 
 struct per_cpu_dm_data {
 	struct work_struct dm_alert_work;
-	struct sk_buff *skb;
+	struct sk_buff __rcu *skb;
 	atomic_t dm_hit_count;
 	struct timer_list send_timer;
+	int cpu;
 };
 
 struct dm_hw_stat_delta {
@@ -79,29 +83,53 @@
 	size_t al;
 	struct net_dm_alert_msg *msg;
 	struct nlattr *nla;
+	struct sk_buff *skb;
+	struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
 
 	al = sizeof(struct net_dm_alert_msg);
 	al += dm_hit_limit * sizeof(struct net_dm_drop_point);
 	al += sizeof(struct nlattr);
 
-	data->skb = genlmsg_new(al, GFP_KERNEL);
-	genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
-			0, NET_DM_CMD_ALERT);
-	nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
-	msg = nla_data(nla);
-	memset(msg, 0, al);
-	atomic_set(&data->dm_hit_count, dm_hit_limit);
+	skb = genlmsg_new(al, GFP_KERNEL);
+
+	if (skb) {
+		genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+				0, NET_DM_CMD_ALERT);
+		nla = nla_reserve(skb, NLA_UNSPEC,
+				  sizeof(struct net_dm_alert_msg));
+		msg = nla_data(nla);
+		memset(msg, 0, al);
+	} else
+		schedule_work_on(data->cpu, &data->dm_alert_work);
+
+	/*
+	 * Don't need to lock this, since we are guaranteed to only
+	 * run this on a single cpu at a time.
+	 * Note also that we only update data->skb if the old and new skb
+	 * pointers don't match.  This ensures that we don't continually call
+	 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
+	 */
+	if (skb != oskb) {
+		rcu_assign_pointer(data->skb, skb);
+
+		synchronize_rcu();
+
+		atomic_set(&data->dm_hit_count, dm_hit_limit);
+	}
+
 }
 
 static void send_dm_alert(struct work_struct *unused)
 {
 	struct sk_buff *skb;
-	struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+	struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+	WARN_ON_ONCE(data->cpu != smp_processor_id());
 
 	/*
 	 * Grab the skb we're about to send
 	 */
-	skb = data->skb;
+	skb = rcu_dereference_protected(data->skb, 1);
 
 	/*
 	 * Replace it with a new one
@@ -111,8 +139,10 @@
 	/*
 	 * Ship it!
 	 */
-	genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+	if (skb)
+		genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
 
+	put_cpu_var(dm_cpu_data);
 }
 
 /*
@@ -123,9 +153,11 @@
  */
 static void sched_send_work(unsigned long unused)
 {
-	struct per_cpu_dm_data *data =  &__get_cpu_var(dm_cpu_data);
+	struct per_cpu_dm_data *data =  &get_cpu_var(dm_cpu_data);
 
-	schedule_work(&data->dm_alert_work);
+	schedule_work_on(smp_processor_id(), &data->dm_alert_work);
+
+	put_cpu_var(dm_cpu_data);
 }
 
 static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,9 +166,16 @@
 	struct nlmsghdr *nlh;
 	struct nlattr *nla;
 	int i;
-	struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+	struct sk_buff *dskb;
+	struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
 
 
+	rcu_read_lock();
+	dskb = rcu_dereference(data->skb);
+
+	if (!dskb)
+		goto out;
+
 	if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
 		/*
 		 * we're already at zero, discard this hit
@@ -144,7 +183,7 @@
 		goto out;
 	}
 
-	nlh = (struct nlmsghdr *)data->skb->data;
+	nlh = (struct nlmsghdr *)dskb->data;
 	nla = genlmsg_data(nlmsg_data(nlh));
 	msg = nla_data(nla);
 	for (i = 0; i < msg->entries; i++) {
@@ -158,7 +197,7 @@
 	/*
 	 * We need to create a new entry
 	 */
-	__nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
+	__nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
 	nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
 	memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
 	msg->points[msg->entries].count = 1;
@@ -170,6 +209,8 @@
 	}
 
 out:
+	rcu_read_unlock();
+	put_cpu_var(dm_cpu_data);
 	return;
 }
 
@@ -214,7 +255,7 @@
 	struct dm_hw_stat_delta *new_stat = NULL;
 	struct dm_hw_stat_delta *temp;
 
-	spin_lock(&trace_state_lock);
+	mutex_lock(&trace_state_mutex);
 
 	if (state == trace_state) {
 		rc = -EAGAIN;
@@ -223,9 +264,15 @@
 
 	switch (state) {
 	case TRACE_ON:
+		if (!try_module_get(THIS_MODULE)) {
+			rc = -ENODEV;
+			break;
+		}
+
 		rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
 		rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
 		break;
+
 	case TRACE_OFF:
 		rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
 		rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
@@ -241,6 +288,9 @@
 				kfree_rcu(new_stat, rcu);
 			}
 		}
+
+		module_put(THIS_MODULE);
+
 		break;
 	default:
 		rc = 1;
@@ -253,7 +303,7 @@
 		rc = -EINPROGRESS;
 
 out_unlock:
-	spin_unlock(&trace_state_lock);
+	mutex_unlock(&trace_state_mutex);
 
 	return rc;
 }
@@ -296,12 +346,12 @@
 
 		new_stat->dev = dev;
 		new_stat->last_rx = jiffies;
-		spin_lock(&trace_state_lock);
+		mutex_lock(&trace_state_mutex);
 		list_add_rcu(&new_stat->list, &hw_stats_list);
-		spin_unlock(&trace_state_lock);
+		mutex_unlock(&trace_state_mutex);
 		break;
 	case NETDEV_UNREGISTER:
-		spin_lock(&trace_state_lock);
+		mutex_lock(&trace_state_mutex);
 		list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
 			if (new_stat->dev == dev) {
 				new_stat->dev = NULL;
@@ -312,7 +362,7 @@
 				}
 			}
 		}
-		spin_unlock(&trace_state_lock);
+		mutex_unlock(&trace_state_mutex);
 		break;
 	}
 out:
@@ -343,10 +393,10 @@
 	struct per_cpu_dm_data *data;
 	int cpu, rc;
 
-	printk(KERN_INFO "Initializing network drop monitor service\n");
+	pr_info("Initializing network drop monitor service\n");
 
 	if (sizeof(void *) > 8) {
-		printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n");
+		pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
 		return -ENOSPC;
 	}
 
@@ -354,27 +404,29 @@
 					   dropmon_ops,
 					   ARRAY_SIZE(dropmon_ops));
 	if (rc) {
-		printk(KERN_ERR "Could not create drop monitor netlink family\n");
+		pr_err("Could not create drop monitor netlink family\n");
 		return rc;
 	}
 
 	rc = register_netdevice_notifier(&dropmon_net_notifier);
 	if (rc < 0) {
-		printk(KERN_CRIT "Failed to register netdevice notifier\n");
+		pr_crit("Failed to register netdevice notifier\n");
 		goto out_unreg;
 	}
 
 	rc = 0;
 
-	for_each_present_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		data = &per_cpu(dm_cpu_data, cpu);
-		reset_per_cpu_data(data);
+		data->cpu = cpu;
 		INIT_WORK(&data->dm_alert_work, send_dm_alert);
 		init_timer(&data->send_timer);
 		data->send_timer.data = cpu;
 		data->send_timer.function = sched_send_work;
+		reset_per_cpu_data(data);
 	}
 
+
 	goto out;
 
 out_unreg:
@@ -383,4 +435,36 @@
 	return rc;
 }
 
-late_initcall(init_net_drop_monitor);
+static void exit_net_drop_monitor(void)
+{
+	struct per_cpu_dm_data *data;
+	int cpu;
+
+	BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier));
+
+	/*
+	 * Because of the module_get/put we do in the trace state change path
+	 * we are guarnateed not to have any current users when we get here
+	 * all we need to do is make sure that we don't have any running timers
+	 * or pending schedule calls
+	 */
+
+	for_each_possible_cpu(cpu) {
+		data = &per_cpu(dm_cpu_data, cpu);
+		del_timer_sync(&data->send_timer);
+		cancel_work_sync(&data->dm_alert_work);
+		/*
+		 * At this point, we should have exclusive access
+		 * to this struct and can free the skb inside it
+		 */
+		kfree_skb(data->skb);
+	}
+
+	BUG_ON(genl_unregister_family(&net_drop_monitor_family));
+}
+
+module_init(init_net_drop_monitor);
+module_exit(exit_net_drop_monitor);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6d6d7d2..9c2afb4 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,8 @@
 #include <linux/errno.h>
 #include <linux/ethtool.h>
 #include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
@@ -36,6 +38,17 @@
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE;
+	info->phc_index = -1;
+	return 0;
+}
+EXPORT_SYMBOL(ethtool_op_get_ts_info);
+
 /* Handlers for each ethtool command */
 
 #define ETHTOOL_DEV_FEATURE_WORDS	((NETDEV_FEATURE_COUNT + 31) / 32)
@@ -738,18 +751,17 @@
 	return 0;
 }
 
-static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
+static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
+				  int (*getter)(struct net_device *,
+						struct ethtool_eeprom *, u8 *),
+				  u32 total_len)
 {
 	struct ethtool_eeprom eeprom;
-	const struct ethtool_ops *ops = dev->ethtool_ops;
 	void __user *userbuf = useraddr + sizeof(eeprom);
 	u32 bytes_remaining;
 	u8 *data;
 	int ret = 0;
 
-	if (!ops->get_eeprom || !ops->get_eeprom_len)
-		return -EOPNOTSUPP;
-
 	if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
 		return -EFAULT;
 
@@ -758,7 +770,7 @@
 		return -EINVAL;
 
 	/* Check for exceeding total eeprom len */
-	if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+	if (eeprom.offset + eeprom.len > total_len)
 		return -EINVAL;
 
 	data = kmalloc(PAGE_SIZE, GFP_USER);
@@ -769,7 +781,7 @@
 	while (bytes_remaining > 0) {
 		eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
 
-		ret = ops->get_eeprom(dev, &eeprom, data);
+		ret = getter(dev, &eeprom, data);
 		if (ret)
 			break;
 		if (copy_to_user(userbuf, data, eeprom.len)) {
@@ -790,6 +802,17 @@
 	return ret;
 }
 
+static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
+{
+	const struct ethtool_ops *ops = dev->ethtool_ops;
+
+	if (!ops->get_eeprom || !ops->get_eeprom_len)
+		return -EOPNOTSUPP;
+
+	return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
+				      ops->get_eeprom_len(dev));
+}
+
 static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
 {
 	struct ethtool_eeprom eeprom;
@@ -1278,6 +1301,81 @@
 	return ret;
 }
 
+static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
+{
+	int err = 0;
+	struct ethtool_ts_info info;
+	const struct ethtool_ops *ops = dev->ethtool_ops;
+	struct phy_device *phydev = dev->phydev;
+
+	memset(&info, 0, sizeof(info));
+	info.cmd = ETHTOOL_GET_TS_INFO;
+
+	if (phydev && phydev->drv && phydev->drv->ts_info) {
+
+		err = phydev->drv->ts_info(phydev, &info);
+
+	} else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) {
+
+		err = ops->get_ts_info(dev, &info);
+
+	} else {
+		info.so_timestamping =
+			SOF_TIMESTAMPING_RX_SOFTWARE |
+			SOF_TIMESTAMPING_SOFTWARE;
+		info.phc_index = -1;
+	}
+
+	if (err)
+		return err;
+
+	if (copy_to_user(useraddr, &info, sizeof(info)))
+		err = -EFAULT;
+
+	return err;
+}
+
+static int ethtool_get_module_info(struct net_device *dev,
+				   void __user *useraddr)
+{
+	int ret;
+	struct ethtool_modinfo modinfo;
+	const struct ethtool_ops *ops = dev->ethtool_ops;
+
+	if (!ops->get_module_info)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&modinfo, useraddr, sizeof(modinfo)))
+		return -EFAULT;
+
+	ret = ops->get_module_info(dev, &modinfo);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(useraddr, &modinfo, sizeof(modinfo)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int ethtool_get_module_eeprom(struct net_device *dev,
+				     void __user *useraddr)
+{
+	int ret;
+	struct ethtool_modinfo modinfo;
+	const struct ethtool_ops *ops = dev->ethtool_ops;
+
+	if (!ops->get_module_info || !ops->get_module_eeprom)
+		return -EOPNOTSUPP;
+
+	ret = ops->get_module_info(dev, &modinfo);
+	if (ret)
+		return ret;
+
+	return ethtool_get_any_eeprom(dev, useraddr, ops->get_module_eeprom,
+				      modinfo.eeprom_len);
+}
+
 /* The main entry point in this file.  Called from net/core/dev.c */
 
 int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1295,11 +1393,13 @@
 		return -EFAULT;
 
 	if (!dev->ethtool_ops) {
-		/* ETHTOOL_GDRVINFO does not require any driver support.
-		 * It is also unprivileged and does not change anything,
-		 * so we can take a shortcut to it. */
+		/* A few commands do not require any driver support,
+		 * are unprivileged, and do not change anything, so we
+		 * can take a shortcut to them. */
 		if (ethcmd == ETHTOOL_GDRVINFO)
 			return ethtool_get_drvinfo(dev, useraddr);
+		else if (ethcmd == ETHTOOL_GET_TS_INFO)
+			return ethtool_get_ts_info(dev, useraddr);
 		else
 			return -EOPNOTSUPP;
 	}
@@ -1330,6 +1430,7 @@
 	case ETHTOOL_GRXCLSRULE:
 	case ETHTOOL_GRXCLSRLALL:
 	case ETHTOOL_GFEATURES:
+	case ETHTOOL_GET_TS_INFO:
 		break;
 	default:
 		if (!capable(CAP_NET_ADMIN))
@@ -1496,6 +1597,15 @@
 	case ETHTOOL_GET_DUMP_DATA:
 		rc = ethtool_get_dump_data(dev, useraddr);
 		break;
+	case ETHTOOL_GET_TS_INFO:
+		rc = ethtool_get_ts_info(dev, useraddr);
+		break;
+	case ETHTOOL_GMODULEINFO:
+		rc = ethtool_get_module_info(dev, useraddr);
+		break;
+	case ETHTOOL_GMODULEEEPROM:
+		rc = ethtool_get_module_eeprom(dev, useraddr);
+		break;
 	default:
 		rc = -EOPNOTSUPP;
 	}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index c02e63c..72cceb7 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -542,7 +542,8 @@
 	frh = nlmsg_data(nlh);
 	frh->family = ops->family;
 	frh->table = rule->table;
-	NLA_PUT_U32(skb, FRA_TABLE, rule->table);
+	if (nla_put_u32(skb, FRA_TABLE, rule->table))
+		goto nla_put_failure;
 	frh->res1 = 0;
 	frh->res2 = 0;
 	frh->action = rule->action;
@@ -553,31 +554,28 @@
 		frh->flags |= FIB_RULE_UNRESOLVED;
 
 	if (rule->iifname[0]) {
-		NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
-
+		if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
+			goto nla_put_failure;
 		if (rule->iifindex == -1)
 			frh->flags |= FIB_RULE_IIF_DETACHED;
 	}
 
 	if (rule->oifname[0]) {
-		NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
-
+		if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
+			goto nla_put_failure;
 		if (rule->oifindex == -1)
 			frh->flags |= FIB_RULE_OIF_DETACHED;
 	}
 
-	if (rule->pref)
-		NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
-
-	if (rule->mark)
-		NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
-
-	if (rule->mark_mask || rule->mark)
-		NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
-
-	if (rule->target)
-		NLA_PUT_U32(skb, FRA_GOTO, rule->target);
-
+	if ((rule->pref &&
+	     nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
+	    (rule->mark &&
+	     nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
+	    ((rule->mark_mask || rule->mark) &&
+	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
+	    (rule->target &&
+	     nla_put_u32(skb, FRA_GOTO, rule->target)))
+		goto nla_put_failure;
 	if (ops->fill(rule, skb, frh) < 0)
 		goto nla_put_failure;
 
diff --git a/net/core/filter.c b/net/core/filter.c
index 6f755cc..47a5f05 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -317,6 +317,9 @@
 		case BPF_S_ANC_CPU:
 			A = raw_smp_processor_id();
 			continue;
+		case BPF_S_ANC_ALU_XOR_X:
+			A ^= X;
+			continue;
 		case BPF_S_ANC_NLATTR: {
 			struct nlattr *nla;
 
@@ -528,7 +531,7 @@
 			 * Compare this with conditional jumps below,
 			 * where offsets are limited. --ANK (981016)
 			 */
-			if (ftest->k >= (unsigned)(flen-pc-1))
+			if (ftest->k >= (unsigned int)(flen-pc-1))
 				return -EINVAL;
 			break;
 		case BPF_S_JMP_JEQ_K:
@@ -561,6 +564,7 @@
 			ANCILLARY(HATYPE);
 			ANCILLARY(RXHASH);
 			ANCILLARY(CPU);
+			ANCILLARY(ALU_XOR_X);
 			}
 		}
 		ftest->code = code;
@@ -589,6 +593,67 @@
 }
 EXPORT_SYMBOL(sk_filter_release_rcu);
 
+static int __sk_prepare_filter(struct sk_filter *fp)
+{
+	int err;
+
+	fp->bpf_func = sk_run_filter;
+
+	err = sk_chk_filter(fp->insns, fp->len);
+	if (err)
+		return err;
+
+	bpf_jit_compile(fp);
+	return 0;
+}
+
+/**
+ *	sk_unattached_filter_create - create an unattached filter
+ *	@fprog: the filter program
+ *	@sk: the socket to use
+ *
+ * Create a filter independent ofr any socket. We first run some
+ * sanity checks on it to make sure it does not explode on us later.
+ * If an error occurs or there is insufficient memory for the filter
+ * a negative errno code is returned. On success the return is zero.
+ */
+int sk_unattached_filter_create(struct sk_filter **pfp,
+				struct sock_fprog *fprog)
+{
+	struct sk_filter *fp;
+	unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+	int err;
+
+	/* Make sure new filter is there and in the right amounts. */
+	if (fprog->filter == NULL)
+		return -EINVAL;
+
+	fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+	if (!fp)
+		return -ENOMEM;
+	memcpy(fp->insns, fprog->filter, fsize);
+
+	atomic_set(&fp->refcnt, 1);
+	fp->len = fprog->len;
+
+	err = __sk_prepare_filter(fp);
+	if (err)
+		goto free_mem;
+
+	*pfp = fp;
+	return 0;
+free_mem:
+	kfree(fp);
+	return err;
+}
+EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
+
+void sk_unattached_filter_destroy(struct sk_filter *fp)
+{
+	sk_filter_release(fp);
+}
+EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
+
 /**
  *	sk_attach_filter - attach a socket filter
  *	@fprog: the filter program
@@ -619,16 +684,13 @@
 
 	atomic_set(&fp->refcnt, 1);
 	fp->len = fprog->len;
-	fp->bpf_func = sk_run_filter;
 
-	err = sk_chk_filter(fp->insns, fp->len);
+	err = __sk_prepare_filter(fp);
 	if (err) {
 		sk_filter_uncharge(sk, fp);
 		return err;
 	}
 
-	bpf_jit_compile(fp);
-
 	old_fp = rcu_dereference_protected(sk->sk_filter,
 					   sock_owned_by_user(sk));
 	rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 0452eb2..ddedf21 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -27,7 +27,8 @@
 static inline int
 gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
 {
-	NLA_PUT(d->skb, type, size, buf);
+	if (nla_put(d->skb, type, size, buf))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
deleted file mode 100644
index 52d0a44..0000000
--- a/net/core/kmap_skb.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <linux/highmem.h>
-
-static inline void *kmap_skb_frag(const skb_frag_t *frag)
-{
-#ifdef CONFIG_HIGHMEM
-	BUG_ON(in_irq());
-
-	local_bh_disable();
-#endif
-	return kmap_atomic(skb_frag_page(frag));
-}
-
-static inline void kunmap_skb_frag(void *vaddr)
-{
-	kunmap_atomic(vaddr);
-#ifdef CONFIG_HIGHMEM
-	local_bh_enable();
-#endif
-}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0a68045..eb09f8b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -15,6 +15,8 @@
  *	Harald Welte		Add neighbour cache statistics like rtstat
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -712,14 +714,13 @@
 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 
 	if (!neigh->dead) {
-		printk(KERN_WARNING
-		       "Destroying alive neighbour %p\n", neigh);
+		pr_warn("Destroying alive neighbour %p\n", neigh);
 		dump_stack();
 		return;
 	}
 
 	if (neigh_del_timer(neigh))
-		printk(KERN_WARNING "Impossible event.\n");
+		pr_warn("Impossible event\n");
 
 	skb_queue_purge(&neigh->arp_queue);
 	neigh->arp_queue_len_bytes = 0;
@@ -890,7 +891,7 @@
 {
 	unsigned long now, next;
 	struct neighbour *neigh = (struct neighbour *)arg;
-	unsigned state;
+	unsigned int state;
 	int notify = 0;
 
 	write_lock(&neigh->lock);
@@ -1500,7 +1501,7 @@
 
 static struct lock_class_key neigh_table_proxy_queue_class;
 
-void neigh_table_init_no_netlink(struct neigh_table *tbl)
+static void neigh_table_init_no_netlink(struct neigh_table *tbl)
 {
 	unsigned long now = jiffies;
 	unsigned long phsize;
@@ -1538,7 +1539,6 @@
 	tbl->last_flush = now;
 	tbl->last_rand	= now + tbl->parms.reachable_time * 20;
 }
-EXPORT_SYMBOL(neigh_table_init_no_netlink);
 
 void neigh_table_init(struct neigh_table *tbl)
 {
@@ -1555,8 +1555,8 @@
 	write_unlock(&neigh_tbl_lock);
 
 	if (unlikely(tmp)) {
-		printk(KERN_ERR "NEIGH: Registering multiple tables for "
-		       "family %d\n", tbl->family);
+		pr_err("Registering multiple tables for family %d\n",
+		       tbl->family);
 		dump_stack();
 	}
 }
@@ -1572,7 +1572,7 @@
 	pneigh_queue_purge(&tbl->proxy_queue);
 	neigh_ifdown(tbl, NULL);
 	if (atomic_read(&tbl->entries))
-		printk(KERN_CRIT "neighbour leakage\n");
+		pr_crit("neighbour leakage\n");
 	write_lock(&neigh_tbl_lock);
 	for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
 		if (*tp == tbl) {
@@ -1768,29 +1768,29 @@
 	if (nest == NULL)
 		return -ENOBUFS;
 
-	if (parms->dev)
-		NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
-
-	NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
-	NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
-	/* approximative value for deprecated QUEUE_LEN (in packets) */
-	NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
-		    DIV_ROUND_UP(parms->queue_len_bytes,
-				 SKB_TRUESIZE(ETH_FRAME_LEN)));
-	NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
-	NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
-	NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
-	NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
-	NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
-	NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
-		      parms->base_reachable_time);
-	NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
-	NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
-	NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
-	NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
-	NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
-	NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
-
+	if ((parms->dev &&
+	     nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
+	    nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
+	    nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
+	    /* approximative value for deprecated QUEUE_LEN (in packets) */
+	    nla_put_u32(skb, NDTPA_QUEUE_LEN,
+			DIV_ROUND_UP(parms->queue_len_bytes,
+				     SKB_TRUESIZE(ETH_FRAME_LEN))) ||
+	    nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
+	    nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
+	    nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
+	    nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
+	    nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
+	    nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
+			  parms->base_reachable_time) ||
+	    nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
+	    nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
+			  parms->delay_probe_time) ||
+	    nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
+	    nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
+	    nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
+	    nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
+		goto nla_put_failure;
 	return nla_nest_end(skb, nest);
 
 nla_put_failure:
@@ -1815,12 +1815,12 @@
 	ndtmsg->ndtm_pad1   = 0;
 	ndtmsg->ndtm_pad2   = 0;
 
-	NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
-	NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
-	NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
-	NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
-	NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
-
+	if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
+	    nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
+	    nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
+	    nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
+	    nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+		goto nla_put_failure;
 	{
 		unsigned long now = jiffies;
 		unsigned int flush_delta = now - tbl->last_flush;
@@ -1841,7 +1841,8 @@
 		ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
 		rcu_read_unlock_bh();
 
-		NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
+		if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
+			goto nla_put_failure;
 	}
 
 	{
@@ -1866,7 +1867,8 @@
 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
 		}
 
-		NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
+		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
+			goto nla_put_failure;
 	}
 
 	BUG_ON(tbl->parms.dev);
@@ -2137,7 +2139,8 @@
 	ndm->ndm_type	 = neigh->type;
 	ndm->ndm_ifindex = neigh->dev->ifindex;
 
-	NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
+	if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
+		goto nla_put_failure;
 
 	read_lock_bh(&neigh->lock);
 	ndm->ndm_state	 = neigh->nud_state;
@@ -2157,8 +2160,9 @@
 	ci.ndm_refcnt	 = atomic_read(&neigh->refcnt) - 1;
 	read_unlock_bh(&neigh->lock);
 
-	NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
-	NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
+	if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
+	    nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -2187,7 +2191,8 @@
 	ndm->ndm_ifindex = pn->dev->ifindex;
 	ndm->ndm_state	 = NUD_NONE;
 
-	NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key);
+	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -2795,7 +2800,6 @@
 static struct neigh_sysctl_table {
 	struct ctl_table_header *sysctl_header;
 	struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
-	char *dev_name;
 } neigh_sysctl_template __read_mostly = {
 	.neigh_vars = {
 		[NEIGH_VAR_MCAST_PROBE] = {
@@ -2921,19 +2925,7 @@
 {
 	struct neigh_sysctl_table *t;
 	const char *dev_name_source = NULL;
-
-#define NEIGH_CTL_PATH_ROOT	0
-#define NEIGH_CTL_PATH_PROTO	1
-#define NEIGH_CTL_PATH_NEIGH	2
-#define NEIGH_CTL_PATH_DEV	3
-
-	struct ctl_path neigh_path[] = {
-		{ .procname = "net",	 },
-		{ .procname = "proto",	 },
-		{ .procname = "neigh",	 },
-		{ .procname = "default", },
-		{ },
-	};
+	char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
 
 	t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
 	if (!t)
@@ -2961,7 +2953,7 @@
 		memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
 		       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
 	} else {
-		dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
+		dev_name_source = "default";
 		t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
 		t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
 		t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
@@ -2984,23 +2976,16 @@
 		t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
 	}
 
-	t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
-	if (!t->dev_name)
-		goto free;
-
-	neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
-	neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
-
+	snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
+		p_name, dev_name_source);
 	t->sysctl_header =
-		register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
+		register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
 	if (!t->sysctl_header)
-		goto free_procname;
+		goto free;
 
 	p->sysctl_table = t;
 	return 0;
 
-free_procname:
-	kfree(t->dev_name);
 free:
 	kfree(t);
 err:
@@ -3013,8 +2998,7 @@
 	if (p->sysctl_table) {
 		struct neigh_sysctl_table *t = p->sysctl_table;
 		p->sysctl_table = NULL;
-		unregister_sysctl_table(t->sysctl_header);
-		kfree(t->dev_name);
+		unregister_net_sysctl_table(t->sysctl_header);
 		kfree(t);
 	}
 }
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4955862..fdf9e61 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -74,15 +74,14 @@
 			    int (*set)(struct net_device *, unsigned long))
 {
 	struct net_device *net = to_net_dev(dev);
-	char *endp;
 	unsigned long new;
 	int ret = -EINVAL;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
-	new = simple_strtoul(buf, &endp, 0);
-	if (endp == buf)
+	ret = kstrtoul(buf, 0, &new);
+	if (ret)
 		goto err;
 
 	if (!rtnl_trylock())
@@ -232,7 +231,7 @@
 
 static int change_flags(struct net_device *net, unsigned long new_flags)
 {
-	return dev_change_flags(net, (unsigned) new_flags);
+	return dev_change_flags(net, (unsigned int) new_flags);
 }
 
 static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
@@ -582,7 +581,7 @@
 		return err;
 	}
 
-	map = kzalloc(max_t(unsigned,
+	map = kzalloc(max_t(unsigned int,
 	    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
 	    GFP_KERNEL);
 	if (!map) {
@@ -903,7 +902,7 @@
 				 const char *buf, size_t len)
 {
 	struct dql *dql = &queue->dql;
-	unsigned value;
+	unsigned int value;
 	int err;
 
 	err = kstrtouint(buf, 10, &value);
@@ -1107,7 +1106,7 @@
 		return err;
 	}
 
-	new_dev_maps = kzalloc(max_t(unsigned,
+	new_dev_maps = kzalloc(max_t(unsigned int,
 	    XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
 	if (!new_dev_maps) {
 		free_cpumask_var(mask);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 31a5ae5..dddbacb 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/workqueue.h>
 #include <linux/rtnetlink.h>
 #include <linux/cache.h>
@@ -212,8 +214,8 @@
 {
 #ifdef NETNS_REFCNT_DEBUG
 	if (unlikely(atomic_read(&net->use_count) != 0)) {
-		printk(KERN_EMERG "network namespace not free! Usage: %d\n",
-			atomic_read(&net->use_count));
+		pr_emerg("network namespace not free! Usage: %d\n",
+			 atomic_read(&net->use_count));
 		return;
 	}
 #endif
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index ba6900f..09eda68 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -9,6 +9,8 @@
  * Authors:	Neil Horman <nhorman@tuxdriver.com>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -88,7 +90,7 @@
 	old_priomap  = rtnl_dereference(dev->priomap);
 
 	if (!new_priomap) {
-		printk(KERN_WARNING "Unable to alloc new priomap!\n");
+		pr_warn("Unable to alloc new priomap!\n");
 		return;
 	}
 
@@ -136,7 +138,7 @@
 
 	ret = get_prioidx(&cs->prioidx);
 	if (ret != 0) {
-		printk(KERN_WARNING "No space in priority index array\n");
+		pr_warn("No space in priority index array\n");
 		kfree(cs);
 		return ERR_PTR(ret);
 	}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d8ce93..cce9e53 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -320,7 +320,7 @@
 				(see RFC 3260, sec. 4) */
 
 	/* MPLS */
-	unsigned nr_labels;	/* Depth of stack, 0 = no MPLS */
+	unsigned int nr_labels;	/* Depth of stack, 0 = no MPLS */
 	__be32 labels[MAX_MPLS_LABELS];
 
 	/* VLAN/SVLAN (802.1Q/Q-in-Q) */
@@ -373,10 +373,10 @@
 				  */
 	char odevname[32];
 	struct flow_state *flows;
-	unsigned cflows;	/* Concurrent flows (config) */
-	unsigned lflow;		/* Flow length  (config) */
-	unsigned nflows;	/* accumulated flows (stats) */
-	unsigned curfl;		/* current sequenced flow (state)*/
+	unsigned int cflows;	/* Concurrent flows (config) */
+	unsigned int lflow;		/* Flow length  (config) */
+	unsigned int nflows;	/* accumulated flows (stats) */
+	unsigned int curfl;		/* current sequenced flow (state)*/
 
 	u16 queue_map_min;
 	u16 queue_map_max;
@@ -592,7 +592,7 @@
 		   pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
 
 	if (pkt_dev->nr_labels) {
-		unsigned i;
+		unsigned int i;
 		seq_printf(seq, "     mpls: ");
 		for (i = 0; i < pkt_dev->nr_labels; i++)
 			seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
@@ -812,7 +812,7 @@
 
 static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
 {
-	unsigned n = 0;
+	unsigned int n = 0;
 	char c;
 	ssize_t i = 0;
 	int len;
@@ -891,8 +891,8 @@
 		if (copy_from_user(tb, user_buffer, copy))
 			return -EFAULT;
 		tb[copy] = 0;
-		printk(KERN_DEBUG "pktgen: %s,%lu  buffer -:%s:-\n", name,
-		       (unsigned long)count, tb);
+		pr_debug("%s,%lu  buffer -:%s:-\n",
+			 name, (unsigned long)count, tb);
 	}
 
 	if (!strcmp(name, "min_pkt_size")) {
@@ -1261,8 +1261,7 @@
 			pkt_dev->cur_daddr = pkt_dev->daddr_min;
 		}
 		if (debug)
-			printk(KERN_DEBUG "pktgen: dst_min set to: %s\n",
-			       pkt_dev->dst_min);
+			pr_debug("dst_min set to: %s\n", pkt_dev->dst_min);
 		i += len;
 		sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
 		return count;
@@ -1284,8 +1283,7 @@
 			pkt_dev->cur_daddr = pkt_dev->daddr_max;
 		}
 		if (debug)
-			printk(KERN_DEBUG "pktgen: dst_max set to: %s\n",
-			       pkt_dev->dst_max);
+			pr_debug("dst_max set to: %s\n", pkt_dev->dst_max);
 		i += len;
 		sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
 		return count;
@@ -1307,7 +1305,7 @@
 		pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
 
 		if (debug)
-			printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
+			pr_debug("dst6 set to: %s\n", buf);
 
 		i += len;
 		sprintf(pg_result, "OK: dst6=%s", buf);
@@ -1329,7 +1327,7 @@
 
 		pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
 		if (debug)
-			printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
+			pr_debug("dst6_min set to: %s\n", buf);
 
 		i += len;
 		sprintf(pg_result, "OK: dst6_min=%s", buf);
@@ -1350,7 +1348,7 @@
 		snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
 
 		if (debug)
-			printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf);
+			pr_debug("dst6_max set to: %s\n", buf);
 
 		i += len;
 		sprintf(pg_result, "OK: dst6_max=%s", buf);
@@ -1373,7 +1371,7 @@
 		pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
 
 		if (debug)
-			printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
+			pr_debug("src6 set to: %s\n", buf);
 
 		i += len;
 		sprintf(pg_result, "OK: src6=%s", buf);
@@ -1394,8 +1392,7 @@
 			pkt_dev->cur_saddr = pkt_dev->saddr_min;
 		}
 		if (debug)
-			printk(KERN_DEBUG "pktgen: src_min set to: %s\n",
-			       pkt_dev->src_min);
+			pr_debug("src_min set to: %s\n", pkt_dev->src_min);
 		i += len;
 		sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
 		return count;
@@ -1415,8 +1412,7 @@
 			pkt_dev->cur_saddr = pkt_dev->saddr_max;
 		}
 		if (debug)
-			printk(KERN_DEBUG "pktgen: src_max set to: %s\n",
-			       pkt_dev->src_max);
+			pr_debug("src_max set to: %s\n", pkt_dev->src_max);
 		i += len;
 		sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
 		return count;
@@ -1510,7 +1506,7 @@
 	}
 
 	if (!strcmp(name, "mpls")) {
-		unsigned n, cnt;
+		unsigned int n, cnt;
 
 		len = get_labels(&user_buffer[i], pkt_dev);
 		if (len < 0)
@@ -1527,7 +1523,7 @@
 			pkt_dev->svlan_id = 0xffff;
 
 			if (debug)
-				printk(KERN_DEBUG "pktgen: VLAN/SVLAN auto turned off\n");
+				pr_debug("VLAN/SVLAN auto turned off\n");
 		}
 		return count;
 	}
@@ -1542,10 +1538,10 @@
 			pkt_dev->vlan_id = value;  /* turn on VLAN */
 
 			if (debug)
-				printk(KERN_DEBUG "pktgen: VLAN turned on\n");
+				pr_debug("VLAN turned on\n");
 
 			if (debug && pkt_dev->nr_labels)
-				printk(KERN_DEBUG "pktgen: MPLS auto turned off\n");
+				pr_debug("MPLS auto turned off\n");
 
 			pkt_dev->nr_labels = 0;    /* turn off MPLS */
 			sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
@@ -1554,7 +1550,7 @@
 			pkt_dev->svlan_id = 0xffff;
 
 			if (debug)
-				printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n");
+				pr_debug("VLAN/SVLAN turned off\n");
 		}
 		return count;
 	}
@@ -1599,10 +1595,10 @@
 			pkt_dev->svlan_id = value;  /* turn on SVLAN */
 
 			if (debug)
-				printk(KERN_DEBUG "pktgen: SVLAN turned on\n");
+				pr_debug("SVLAN turned on\n");
 
 			if (debug && pkt_dev->nr_labels)
-				printk(KERN_DEBUG "pktgen: MPLS auto turned off\n");
+				pr_debug("MPLS auto turned off\n");
 
 			pkt_dev->nr_labels = 0;    /* turn off MPLS */
 			sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
@@ -1611,7 +1607,7 @@
 			pkt_dev->svlan_id = 0xffff;
 
 			if (debug)
-				printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n");
+				pr_debug("VLAN/SVLAN turned off\n");
 		}
 		return count;
 	}
@@ -1779,8 +1775,7 @@
 	i += len;
 
 	if (debug)
-		printk(KERN_DEBUG "pktgen: t=%s, count=%lu\n",
-		       name, (unsigned long)count);
+		pr_debug("t=%s, count=%lu\n", name, (unsigned long)count);
 
 	if (!t) {
 		pr_err("ERROR: No thread\n");
@@ -1931,7 +1926,7 @@
 {
 	struct net_device *dev = ptr;
 
-	if (!net_eq(dev_net(dev), &init_net))
+	if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
 		return NOTIFY_DONE;
 
 	/* It is OK that we do not hold the group lock right now,
@@ -2324,7 +2319,7 @@
 	}
 
 	if (pkt_dev->flags & F_MPLS_RND) {
-		unsigned i;
+		unsigned int i;
 		for (i = 0; i < pkt_dev->nr_labels; i++)
 			if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
 				pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
@@ -2550,7 +2545,7 @@
 
 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
 {
-	unsigned i;
+	unsigned int i;
 	for (i = 0; i < pkt_dev->nr_labels; i++)
 		*mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
 
@@ -2934,8 +2929,7 @@
 
 	if (datalen < sizeof(struct pktgen_hdr)) {
 		datalen = sizeof(struct pktgen_hdr);
-		if (net_ratelimit())
-			pr_info("increased datalen to %d\n", datalen);
+		net_info_ratelimited("increased datalen to %d\n", datalen);
 	}
 
 	udph->source = htons(pkt_dev->cur_udp_src);
@@ -3365,8 +3359,8 @@
 		pkt_dev->errors++;
 		break;
 	default: /* Drivers are not supposed to return other values! */
-		if (net_ratelimit())
-			pr_info("%s xmit error: %d\n", pkt_dev->odevname, ret);
+		net_info_ratelimited("%s xmit error: %d\n",
+				     pkt_dev->odevname, ret);
 		pkt_dev->errors++;
 		/* fallthru */
 	case NETDEV_TX_LOCKED:
@@ -3755,12 +3749,18 @@
 {
 	struct pktgen_thread *t;
 	struct list_head *q, *n;
+	LIST_HEAD(list);
 
 	/* Stop all interfaces & threads */
 	pktgen_exiting = true;
 
-	list_for_each_safe(q, n, &pktgen_threads) {
+	mutex_lock(&pktgen_thread_lock);
+	list_splice_init(&pktgen_threads, &list);
+	mutex_unlock(&pktgen_thread_lock);
+
+	list_for_each_safe(q, n, &list) {
 		t = list_entry(q, struct pktgen_thread, th_list);
+		list_del(&t->th_list);
 		kthread_stop(t->tsk);
 		kfree(t);
 	}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 90430b7..21318d1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,7 +35,9 @@
 #include <linux/security.h>
 #include <linux/mutex.h>
 #include <linux/if_addr.h>
+#include <linux/if_bridge.h>
 #include <linux/pci.h>
+#include <linux/etherdevice.h>
 
 #include <asm/uaccess.h>
 
@@ -552,7 +554,7 @@
 }
 EXPORT_SYMBOL(__rta_fill);
 
-int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
+int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
 {
 	struct sock *rtnl = net->rtnl;
 	int err = 0;
@@ -607,7 +609,8 @@
 	for (i = 0; i < RTAX_MAX; i++) {
 		if (metrics[i]) {
 			valid++;
-			NLA_PUT_U32(skb, i+1, metrics[i]);
+			if (nla_put_u32(skb, i+1, metrics[i]))
+				goto nla_put_failure;
 		}
 	}
 
@@ -782,6 +785,7 @@
 	       + nla_total_size(4) /* IFLA_MTU */
 	       + nla_total_size(4) /* IFLA_LINK */
 	       + nla_total_size(4) /* IFLA_MASTER */
+	       + nla_total_size(4) /* IFLA_PROMISCUITY */
 	       + nla_total_size(1) /* IFLA_OPERSTATE */
 	       + nla_total_size(1) /* IFLA_LINKMODE */
 	       + nla_total_size(ext_filter_mask
@@ -807,7 +811,8 @@
 		vf_port = nla_nest_start(skb, IFLA_VF_PORT);
 		if (!vf_port)
 			goto nla_put_failure;
-		NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
+		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
+			goto nla_put_failure;
 		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
 		if (err == -EMSGSIZE)
 			goto nla_put_failure;
@@ -891,25 +896,23 @@
 	ifm->ifi_flags = dev_get_flags(dev);
 	ifm->ifi_change = change;
 
-	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-	NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len);
-	NLA_PUT_U8(skb, IFLA_OPERSTATE,
-		   netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
-	NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
-	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-	NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
-
-	if (dev->ifindex != dev->iflink)
-		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
-	if (dev->master)
-		NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
-
-	if (dev->qdisc)
-		NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id);
-
-	if (dev->ifalias)
-		NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
+	    nla_put_u8(skb, IFLA_OPERSTATE,
+		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
+	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
+	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
+	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
+	    (dev->ifindex != dev->iflink &&
+	     nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
+	    (dev->master &&
+	     nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
+	    (dev->qdisc &&
+	     nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+	    (dev->ifalias &&
+	     nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
+		goto nla_put_failure;
 
 	if (1) {
 		struct rtnl_link_ifmap map = {
@@ -920,12 +923,14 @@
 			.dma         = dev->dma,
 			.port        = dev->if_port,
 		};
-		NLA_PUT(skb, IFLA_MAP, sizeof(map), &map);
+		if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+			goto nla_put_failure;
 	}
 
 	if (dev->addr_len) {
-		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-		NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
+		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
+		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
+			goto nla_put_failure;
 	}
 
 	attr = nla_reserve(skb, IFLA_STATS,
@@ -942,8 +947,9 @@
 		goto nla_put_failure;
 	copy_rtnl_link_stats64(nla_data(attr), stats);
 
-	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
-		NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
+	    nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
+		goto nla_put_failure;
 
 	if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
 	    && (ext_filter_mask & RTEXT_FILTER_VF)) {
@@ -986,12 +992,13 @@
 				nla_nest_cancel(skb, vfinfo);
 				goto nla_put_failure;
 			}
-			NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
-			NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
-			NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
-				&vf_tx_rate);
-			NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
-				&vf_spoofchk);
+			if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
+			    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
+			    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
+				    &vf_tx_rate) ||
+			    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
+				    &vf_spoofchk))
+				goto nla_put_failure;
 			nla_nest_end(skb, vf);
 		}
 		nla_nest_end(skb, vfinfo);
@@ -1113,6 +1120,7 @@
 	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
 	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
 	[IFLA_EXT_MASK]		= { .type = NLA_U32 },
+	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
 };
 EXPORT_SYMBOL(ifla_policy);
 
@@ -1516,11 +1524,9 @@
 	err = 0;
 
 errout:
-	if (err < 0 && modified && net_ratelimit())
-		printk(KERN_WARNING "A link change request failed with "
-		       "some changes committed already. Interface %s may "
-		       "have been left with an inconsistent configuration, "
-		       "please check.\n", dev->name);
+	if (err < 0 && modified)
+		net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
+				     dev->name);
 
 	if (send_addr_notify)
 		call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
@@ -1634,14 +1640,14 @@
 	int err;
 	struct net_device *dev;
 	unsigned int num_queues = 1;
-	unsigned int real_num_queues = 1;
 
 	if (ops->get_tx_queues) {
-		err = ops->get_tx_queues(src_net, tb, &num_queues,
-					 &real_num_queues);
-		if (err)
+		err = ops->get_tx_queues(src_net, tb);
+		if (err < 0)
 			goto err;
+		num_queues = err;
 	}
+
 	err = -ENOMEM;
 	dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
 	if (!dev)
@@ -1947,7 +1953,7 @@
 	return skb->len;
 }
 
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
 {
 	struct net *net = dev_net(dev);
 	struct sk_buff *skb;
@@ -1972,6 +1978,267 @@
 		rtnl_set_sk_err(net, RTNLGRP_LINK, err);
 }
 
+static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
+				   struct net_device *dev,
+				   u8 *addr, u32 pid, u32 seq,
+				   int type, unsigned int flags)
+{
+	struct nlmsghdr *nlh;
+	struct ndmsg *ndm;
+
+	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	ndm = nlmsg_data(nlh);
+	ndm->ndm_family  = AF_BRIDGE;
+	ndm->ndm_pad1	 = 0;
+	ndm->ndm_pad2    = 0;
+	ndm->ndm_flags	 = flags;
+	ndm->ndm_type	 = 0;
+	ndm->ndm_ifindex = dev->ifindex;
+	ndm->ndm_state   = NUD_PERMANENT;
+
+	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
+		goto nla_put_failure;
+
+	return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static inline size_t rtnl_fdb_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
+}
+
+static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
+{
+	struct net *net = dev_net(dev);
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+
+	skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
+	if (!skb)
+		goto errout;
+
+	err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto errout;
+	}
+
+	rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+	return;
+errout:
+	rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+	struct net *net = sock_net(skb->sk);
+	struct net_device *master = NULL;
+	struct ndmsg *ndm;
+	struct nlattr *tb[NDA_MAX+1];
+	struct net_device *dev;
+	u8 *addr;
+	int err;
+
+	err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+	if (err < 0)
+		return err;
+
+	ndm = nlmsg_data(nlh);
+	if (ndm->ndm_ifindex == 0) {
+		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
+		return -EINVAL;
+	}
+
+	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
+	if (dev == NULL) {
+		pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
+		return -ENODEV;
+	}
+
+	if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
+		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
+		return -EINVAL;
+	}
+
+	addr = nla_data(tb[NDA_LLADDR]);
+	if (!is_valid_ether_addr(addr)) {
+		pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
+		return -EINVAL;
+	}
+
+	err = -EOPNOTSUPP;
+
+	/* Support fdb on master device the net/bridge default case */
+	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
+	    (dev->priv_flags & IFF_BRIDGE_PORT)) {
+		master = dev->master;
+		err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+						      nlh->nlmsg_flags);
+		if (err)
+			goto out;
+		else
+			ndm->ndm_flags &= ~NTF_MASTER;
+	}
+
+	/* Embedded bridge, macvlan, and any other device support */
+	if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
+		err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr,
+						   nlh->nlmsg_flags);
+
+		if (!err) {
+			rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
+			ndm->ndm_flags &= ~NTF_SELF;
+		}
+	}
+out:
+	return err;
+}
+
+static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+	struct net *net = sock_net(skb->sk);
+	struct ndmsg *ndm;
+	struct nlattr *llattr;
+	struct net_device *dev;
+	int err = -EINVAL;
+	__u8 *addr;
+
+	if (nlmsg_len(nlh) < sizeof(*ndm))
+		return -EINVAL;
+
+	ndm = nlmsg_data(nlh);
+	if (ndm->ndm_ifindex == 0) {
+		pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
+		return -EINVAL;
+	}
+
+	dev = __dev_get_by_index(net, ndm->ndm_ifindex);
+	if (dev == NULL) {
+		pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
+		return -ENODEV;
+	}
+
+	llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
+	if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
+		pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n");
+		return -EINVAL;
+	}
+
+	addr = nla_data(llattr);
+	err = -EOPNOTSUPP;
+
+	/* Support fdb on master device the net/bridge default case */
+	if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
+	    (dev->priv_flags & IFF_BRIDGE_PORT)) {
+		struct net_device *master = dev->master;
+
+		if (master->netdev_ops->ndo_fdb_del)
+			err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr);
+
+		if (err)
+			goto out;
+		else
+			ndm->ndm_flags &= ~NTF_MASTER;
+	}
+
+	/* Embedded bridge, macvlan, and any other device support */
+	if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
+		err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr);
+
+		if (!err) {
+			rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
+			ndm->ndm_flags &= ~NTF_SELF;
+		}
+	}
+out:
+	return err;
+}
+
+static int nlmsg_populate_fdb(struct sk_buff *skb,
+			      struct netlink_callback *cb,
+			      struct net_device *dev,
+			      int *idx,
+			      struct netdev_hw_addr_list *list)
+{
+	struct netdev_hw_addr *ha;
+	int err;
+	u32 pid, seq;
+
+	pid = NETLINK_CB(cb->skb).pid;
+	seq = cb->nlh->nlmsg_seq;
+
+	list_for_each_entry(ha, &list->list, list) {
+		if (*idx < cb->args[0])
+			goto skip;
+
+		err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
+					      pid, seq, 0, NTF_SELF);
+		if (err < 0)
+			return err;
+skip:
+		*idx += 1;
+	}
+	return 0;
+}
+
+/**
+ * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table.
+ * @nlh: netlink message header
+ * @dev: netdevice
+ *
+ * Default netdevice operation to dump the existing unicast address list.
+ * Returns zero on success.
+ */
+int ndo_dflt_fdb_dump(struct sk_buff *skb,
+		      struct netlink_callback *cb,
+		      struct net_device *dev,
+		      int idx)
+{
+	int err;
+
+	netif_addr_lock_bh(dev);
+	err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
+	if (err)
+		goto out;
+	nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
+out:
+	netif_addr_unlock_bh(dev);
+	return idx;
+}
+EXPORT_SYMBOL(ndo_dflt_fdb_dump);
+
+static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	int idx = 0;
+	struct net *net = sock_net(skb->sk);
+	struct net_device *dev;
+
+	rcu_read_lock();
+	for_each_netdev_rcu(net, dev) {
+		if (dev->priv_flags & IFF_BRIDGE_PORT) {
+			struct net_device *master = dev->master;
+			const struct net_device_ops *ops = master->netdev_ops;
+
+			if (ops->ndo_fdb_dump)
+				idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
+		}
+
+		if (dev->netdev_ops->ndo_fdb_dump)
+			idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
+	}
+	rcu_read_unlock();
+
+	cb->args[0] = idx;
+	return skb->len;
+}
+
 /* Protected by RTNL sempahore.  */
 static struct rtattr **rta_buf;
 static int rtattr_max;
@@ -2042,7 +2309,7 @@
 		struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
 
 		while (RTA_OK(attr, attrlen)) {
-			unsigned flavor = attr->rta_type;
+			unsigned int flavor = attr->rta_type;
 			if (flavor) {
 				if (flavor > rta_max[sz_idx])
 					return -EINVAL;
@@ -2144,5 +2411,9 @@
 
 	rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
 	rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
+
+	rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
+	rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
+	rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
 }
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e598400..016694d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -36,6 +36,8 @@
  *	The functions in this file will not compile correctly with gcc 2.4.x
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -67,10 +69,9 @@
 
 #include <asm/uaccess.h>
 #include <trace/events/skb.h>
+#include <linux/highmem.h>
 
-#include "kmap_skb.h"
-
-static struct kmem_cache *skbuff_head_cache __read_mostly;
+struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 
 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
@@ -119,11 +120,10 @@
  */
 static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 {
-	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
-			  "data:%p tail:%#lx end:%#lx dev:%s\n",
-	       here, skb->len, sz, skb->head, skb->data,
-	       (unsigned long)skb->tail, (unsigned long)skb->end,
-	       skb->dev ? skb->dev->name : "<NULL>");
+	pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
+		 __func__, here, skb->len, sz, skb->head, skb->data,
+		 (unsigned long)skb->tail, (unsigned long)skb->end,
+		 skb->dev ? skb->dev->name : "<NULL>");
 	BUG();
 }
 
@@ -138,11 +138,10 @@
 
 static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 {
-	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
-			  "data:%p tail:%#lx end:%#lx dev:%s\n",
-	       here, skb->len, sz, skb->head, skb->data,
-	       (unsigned long)skb->tail, (unsigned long)skb->end,
-	       skb->dev ? skb->dev->name : "<NULL>");
+	pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
+		 __func__, here, skb->len, sz, skb->head, skb->data,
+		 (unsigned long)skb->tail, (unsigned long)skb->end,
+		 skb->dev ? skb->dev->name : "<NULL>");
 	BUG();
 }
 
@@ -246,6 +245,7 @@
 /**
  * build_skb - build a network buffer
  * @data: data buffer provided by caller
+ * @frag_size: size of fragment, or 0 if head was kmalloced
  *
  * Allocate a new &sk_buff. Caller provides space holding head and
  * skb_shared_info. @data must have been allocated by kmalloc()
@@ -259,20 +259,21 @@
  *  before giving packet to stack.
  *  RX rings only contains data buffers, not full skbs.
  */
-struct sk_buff *build_skb(void *data)
+struct sk_buff *build_skb(void *data, unsigned int frag_size)
 {
 	struct skb_shared_info *shinfo;
 	struct sk_buff *skb;
-	unsigned int size;
+	unsigned int size = frag_size ? : ksize(data);
 
 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
 	if (!skb)
 		return NULL;
 
-	size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
 	memset(skb, 0, offsetof(struct sk_buff, tail));
 	skb->truesize = SKB_TRUESIZE(size);
+	skb->head_frag = frag_size != 0;
 	atomic_set(&skb->users, 1);
 	skb->head = data;
 	skb->data = data;
@@ -292,6 +293,46 @@
 }
 EXPORT_SYMBOL(build_skb);
 
+struct netdev_alloc_cache {
+	struct page *page;
+	unsigned int offset;
+};
+static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+void *netdev_alloc_frag(unsigned int fragsz)
+{
+	struct netdev_alloc_cache *nc;
+	void *data = NULL;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	nc = &__get_cpu_var(netdev_alloc_cache);
+	if (unlikely(!nc->page)) {
+refill:
+		nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+		nc->offset = 0;
+	}
+	if (likely(nc->page)) {
+		if (nc->offset + fragsz > PAGE_SIZE) {
+			put_page(nc->page);
+			goto refill;
+		}
+		data = page_address(nc->page) + nc->offset;
+		nc->offset += fragsz;
+		get_page(nc->page);
+	}
+	local_irq_restore(flags);
+	return data;
+}
+EXPORT_SYMBOL(netdev_alloc_frag);
+
 /**
  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
  *	@dev: network device to receive on
@@ -306,11 +347,23 @@
  *	%NULL is returned if there is no free memory.
  */
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-		unsigned int length, gfp_t gfp_mask)
+				   unsigned int length, gfp_t gfp_mask)
 {
-	struct sk_buff *skb;
+	struct sk_buff *skb = NULL;
+	unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
+			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
+	if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
+		void *data = netdev_alloc_frag(fragsz);
+
+		if (likely(data)) {
+			skb = build_skb(data, fragsz);
+			if (unlikely(!skb))
+				put_page(virt_to_head_page(data));
+		}
+	} else {
+		skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
+	}
 	if (likely(skb)) {
 		skb_reserve(skb, NET_SKB_PAD);
 		skb->dev = dev;
@@ -329,28 +382,6 @@
 }
 EXPORT_SYMBOL(skb_add_rx_frag);
 
-/**
- *	dev_alloc_skb - allocate an skbuff for receiving
- *	@length: length to allocate
- *
- *	Allocate a new &sk_buff and assign it a usage count of one. The
- *	buffer has unspecified headroom built in. Users should allocate
- *	the headroom they think they need without accounting for the
- *	built in space. The built in space is used for optimisations.
- *
- *	%NULL is returned if there is no free memory. Although this function
- *	allocates memory it can be called from an interrupt.
- */
-struct sk_buff *dev_alloc_skb(unsigned int length)
-{
-	/*
-	 * There is more code here than it seems:
-	 * __dev_alloc_skb is an inline
-	 */
-	return __dev_alloc_skb(length, GFP_ATOMIC);
-}
-EXPORT_SYMBOL(dev_alloc_skb);
-
 static void skb_drop_list(struct sk_buff **listp)
 {
 	struct sk_buff *list = *listp;
@@ -377,6 +408,14 @@
 		skb_get(list);
 }
 
+static void skb_free_head(struct sk_buff *skb)
+{
+	if (skb->head_frag)
+		put_page(virt_to_head_page(skb->head));
+	else
+		kfree(skb->head);
+}
+
 static void skb_release_data(struct sk_buff *skb)
 {
 	if (!skb->cloned ||
@@ -403,7 +442,7 @@
 		if (skb_has_frag_list(skb))
 			skb_drop_fraglist(skb);
 
-		kfree(skb->head);
+		skb_free_head(skb);
 	}
 }
 
@@ -645,6 +684,7 @@
 	C(tail);
 	C(end);
 	C(head);
+	C(head_frag);
 	C(data);
 	C(truesize);
 	atomic_set(&n->users, 1);
@@ -707,10 +747,10 @@
 			}
 			return -ENOMEM;
 		}
-		vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+		vaddr = kmap_atomic(skb_frag_page(f));
 		memcpy(page_address(page),
 		       vaddr + f->page_offset, skb_frag_size(f));
-		kunmap_skb_frag(vaddr);
+		kunmap_atomic(vaddr);
 		page->private = (unsigned long)head;
 		head = page;
 	}
@@ -819,7 +859,7 @@
 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 {
 	int headerlen = skb_headroom(skb);
-	unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
+	unsigned int size = skb_end_offset(skb) + skb->data_len;
 	struct sk_buff *n = alloc_skb(size, gfp_mask);
 
 	if (!n)
@@ -920,9 +960,8 @@
 {
 	int i;
 	u8 *data;
-	int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
+	int size = nhead + skb_end_offset(skb) + ntail;
 	long off;
-	bool fastpath;
 
 	BUG_ON(nhead < 0);
 
@@ -931,27 +970,6 @@
 
 	size = SKB_DATA_ALIGN(size);
 
-	/* Check if we can avoid taking references on fragments if we own
-	 * the last reference on skb->head. (see skb_release_data())
-	 */
-	if (!skb->cloned)
-		fastpath = true;
-	else {
-		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
-		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
-	}
-
-	if (fastpath &&
-	    size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
-		memmove(skb->head + size, skb_shinfo(skb),
-			offsetof(struct skb_shared_info,
-				 frags[skb_shinfo(skb)->nr_frags]));
-		memmove(skb->head + nhead, skb->head,
-			skb_tail_pointer(skb) - skb->head);
-		off = nhead;
-		goto adjust_others;
-	}
-
 	data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
 		       gfp_mask);
 	if (!data)
@@ -967,9 +985,12 @@
 	       skb_shinfo(skb),
 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
 
-	if (fastpath) {
-		kfree(skb->head);
-	} else {
+	/*
+	 * if shinfo is shared we must drop the old head gracefully, but if it
+	 * is not we can just drop the old head and let the existing refcount
+	 * be since all we did is relocate the values
+	 */
+	if (skb_cloned(skb)) {
 		/* copy this zero copy skb frags */
 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 			if (skb_copy_ubufs(skb, gfp_mask))
@@ -982,11 +1003,13 @@
 			skb_clone_fraglist(skb);
 
 		skb_release_data(skb);
+	} else {
+		skb_free_head(skb);
 	}
 	off = (data + nhead) - skb->head;
 
 	skb->head     = data;
-adjust_others:
+	skb->head_frag = 0;
 	skb->data    += off;
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 	skb->end      = size;
@@ -1275,7 +1298,7 @@
 				return -ENOMEM;
 
 			nfrag->next = frag->next;
-			kfree_skb(frag);
+			consume_skb(frag);
 			frag = nfrag;
 			*fragp = frag;
 		}
@@ -1487,21 +1510,22 @@
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		int end;
+		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 
 		WARN_ON(start > offset + len);
 
-		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+		end = start + skb_frag_size(f);
 		if ((copy = end - offset) > 0) {
 			u8 *vaddr;
 
 			if (copy > len)
 				copy = len;
 
-			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
+			vaddr = kmap_atomic(skb_frag_page(f));
 			memcpy(to,
-			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
-			       offset - start, copy);
-			kunmap_skb_frag(vaddr);
+			       vaddr + f->page_offset + offset - start,
+			       copy);
+			kunmap_atomic(vaddr);
 
 			if ((len -= copy) == 0)
 				return 0;
@@ -1547,9 +1571,9 @@
 	put_page(spd->pages[i]);
 }
 
-static inline struct page *linear_to_page(struct page *page, unsigned int *len,
-					  unsigned int *offset,
-					  struct sk_buff *skb, struct sock *sk)
+static struct page *linear_to_page(struct page *page, unsigned int *len,
+				   unsigned int *offset,
+				   struct sk_buff *skb, struct sock *sk)
 {
 	struct page *p = sk->sk_sndmsg_page;
 	unsigned int off;
@@ -1565,6 +1589,9 @@
 	} else {
 		unsigned int mlen;
 
+		/* If we are the only user of the page, we can reset offset */
+		if (page_count(p) == 1)
+			sk->sk_sndmsg_off = 0;
 		off = sk->sk_sndmsg_off;
 		mlen = PAGE_SIZE - off;
 		if (mlen < 64 && mlen < *len) {
@@ -1578,36 +1605,48 @@
 	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
 	sk->sk_sndmsg_off += *len;
 	*offset = off;
-	get_page(p);
 
 	return p;
 }
 
+static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
+			     struct page *page,
+			     unsigned int offset)
+{
+	return	spd->nr_pages &&
+		spd->pages[spd->nr_pages - 1] == page &&
+		(spd->partial[spd->nr_pages - 1].offset +
+		 spd->partial[spd->nr_pages - 1].len == offset);
+}
+
 /*
  * Fill page/offset/length into spd, if it can hold more pages.
  */
-static inline int spd_fill_page(struct splice_pipe_desc *spd,
-				struct pipe_inode_info *pipe, struct page *page,
-				unsigned int *len, unsigned int offset,
-				struct sk_buff *skb, int linear,
-				struct sock *sk)
+static bool spd_fill_page(struct splice_pipe_desc *spd,
+			  struct pipe_inode_info *pipe, struct page *page,
+			  unsigned int *len, unsigned int offset,
+			  struct sk_buff *skb, bool linear,
+			  struct sock *sk)
 {
-	if (unlikely(spd->nr_pages == pipe->buffers))
-		return 1;
+	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
+		return true;
 
 	if (linear) {
 		page = linear_to_page(page, len, &offset, skb, sk);
 		if (!page)
-			return 1;
-	} else
-		get_page(page);
-
+			return true;
+	}
+	if (spd_can_coalesce(spd, page, offset)) {
+		spd->partial[spd->nr_pages - 1].len += *len;
+		return false;
+	}
+	get_page(page);
 	spd->pages[spd->nr_pages] = page;
 	spd->partial[spd->nr_pages].len = *len;
 	spd->partial[spd->nr_pages].offset = offset;
 	spd->nr_pages++;
 
-	return 0;
+	return false;
 }
 
 static inline void __segment_seek(struct page **page, unsigned int *poff,
@@ -1624,20 +1663,20 @@
 	*plen -= off;
 }
 
-static inline int __splice_segment(struct page *page, unsigned int poff,
-				   unsigned int plen, unsigned int *off,
-				   unsigned int *len, struct sk_buff *skb,
-				   struct splice_pipe_desc *spd, int linear,
-				   struct sock *sk,
-				   struct pipe_inode_info *pipe)
+static bool __splice_segment(struct page *page, unsigned int poff,
+			     unsigned int plen, unsigned int *off,
+			     unsigned int *len, struct sk_buff *skb,
+			     struct splice_pipe_desc *spd, bool linear,
+			     struct sock *sk,
+			     struct pipe_inode_info *pipe)
 {
 	if (!*len)
-		return 1;
+		return true;
 
 	/* skip this segment if already processed */
 	if (*off >= plen) {
 		*off -= plen;
-		return 0;
+		return false;
 	}
 
 	/* ignore any bits we already processed */
@@ -1653,34 +1692,38 @@
 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
 
 		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
-			return 1;
+			return true;
 
 		__segment_seek(&page, &poff, &plen, flen);
 		*len -= flen;
 
 	} while (*len && plen);
 
-	return 0;
+	return false;
 }
 
 /*
- * Map linear and fragment data from the skb to spd. It reports failure if the
+ * Map linear and fragment data from the skb to spd. It reports true if the
  * pipe is full or if we already spliced the requested length.
  */
-static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
-			     unsigned int *offset, unsigned int *len,
-			     struct splice_pipe_desc *spd, struct sock *sk)
+static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
+			      unsigned int *offset, unsigned int *len,
+			      struct splice_pipe_desc *spd, struct sock *sk)
 {
 	int seg;
 
-	/*
-	 * map the linear part
+	/* map the linear part :
+	 * If skb->head_frag is set, this 'linear' part is backed by a
+	 * fragment, and if the head is not shared with any clones then
+	 * we can avoid a copy since we own the head portion of this page.
 	 */
 	if (__splice_segment(virt_to_page(skb->data),
 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
 			     skb_headlen(skb),
-			     offset, len, skb, spd, 1, sk, pipe))
-		return 1;
+			     offset, len, skb, spd,
+			     skb_head_is_locked(skb),
+			     sk, pipe))
+		return true;
 
 	/*
 	 * then map the fragments
@@ -1690,11 +1733,11 @@
 
 		if (__splice_segment(skb_frag_page(f),
 				     f->page_offset, skb_frag_size(f),
-				     offset, len, skb, spd, 0, sk, pipe))
-			return 1;
+				     offset, len, skb, spd, false, sk, pipe))
+			return true;
 	}
 
-	return 0;
+	return false;
 }
 
 /*
@@ -1707,8 +1750,8 @@
 		    struct pipe_inode_info *pipe, unsigned int tlen,
 		    unsigned int flags)
 {
-	struct partial_page partial[PIPE_DEF_BUFFERS];
-	struct page *pages[PIPE_DEF_BUFFERS];
+	struct partial_page partial[MAX_SKB_FRAGS];
+	struct page *pages[MAX_SKB_FRAGS];
 	struct splice_pipe_desc spd = {
 		.pages = pages,
 		.partial = partial,
@@ -1720,9 +1763,6 @@
 	struct sock *sk = skb->sk;
 	int ret = 0;
 
-	if (splice_grow_spd(pipe, &spd))
-		return -ENOMEM;
-
 	/*
 	 * __skb_splice_bits() only fails if the output has no room left,
 	 * so no point in going over the frag_list for the error case.
@@ -1758,7 +1798,6 @@
 		lock_sock(sk);
 	}
 
-	splice_shrink_spd(pipe, &spd);
 	return ret;
 }
 
@@ -1806,10 +1845,10 @@
 			if (copy > len)
 				copy = len;
 
-			vaddr = kmap_skb_frag(frag);
+			vaddr = kmap_atomic(skb_frag_page(frag));
 			memcpy(vaddr + frag->page_offset + offset - start,
 			       from, copy);
-			kunmap_skb_frag(vaddr);
+			kunmap_atomic(vaddr);
 
 			if ((len -= copy) == 0)
 				return 0;
@@ -1869,21 +1908,21 @@
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		int end;
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 		WARN_ON(start > offset + len);
 
-		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+		end = start + skb_frag_size(frag);
 		if ((copy = end - offset) > 0) {
 			__wsum csum2;
 			u8 *vaddr;
-			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 			if (copy > len)
 				copy = len;
-			vaddr = kmap_skb_frag(frag);
+			vaddr = kmap_atomic(skb_frag_page(frag));
 			csum2 = csum_partial(vaddr + frag->page_offset +
 					     offset - start, copy, 0);
-			kunmap_skb_frag(vaddr);
+			kunmap_atomic(vaddr);
 			csum = csum_block_add(csum, csum2, pos);
 			if (!(len -= copy))
 				return csum;
@@ -1955,12 +1994,12 @@
 
 			if (copy > len)
 				copy = len;
-			vaddr = kmap_skb_frag(frag);
+			vaddr = kmap_atomic(skb_frag_page(frag));
 			csum2 = csum_partial_copy_nocheck(vaddr +
 							  frag->page_offset +
 							  offset - start, to,
 							  copy, 0);
-			kunmap_skb_frag(vaddr);
+			kunmap_atomic(vaddr);
 			csum = csum_block_add(csum, csum2, pos);
 			if (!(len -= copy))
 				return csum;
@@ -2480,7 +2519,7 @@
 
 		if (abs_offset < block_limit) {
 			if (!st->frag_data)
-				st->frag_data = kmap_skb_frag(frag);
+				st->frag_data = kmap_atomic(skb_frag_page(frag));
 
 			*data = (u8 *) st->frag_data + frag->page_offset +
 				(abs_offset - st->stepped_offset);
@@ -2489,7 +2528,7 @@
 		}
 
 		if (st->frag_data) {
-			kunmap_skb_frag(st->frag_data);
+			kunmap_atomic(st->frag_data);
 			st->frag_data = NULL;
 		}
 
@@ -2498,7 +2537,7 @@
 	}
 
 	if (st->frag_data) {
-		kunmap_skb_frag(st->frag_data);
+		kunmap_atomic(st->frag_data);
 		st->frag_data = NULL;
 	}
 
@@ -2526,7 +2565,7 @@
 void skb_abort_seq_read(struct skb_seq_state *st)
 {
 	if (st->frag_data)
-		kunmap_skb_frag(st->frag_data);
+		kunmap_atomic(st->frag_data);
 }
 EXPORT_SYMBOL(skb_abort_seq_read);
 
@@ -2718,14 +2757,13 @@
 			if (unlikely(!nskb))
 				goto err;
 
-			hsize = skb_end_pointer(nskb) - nskb->head;
+			hsize = skb_end_offset(nskb);
 			if (skb_cow_head(nskb, doffset + headroom)) {
 				kfree_skb(nskb);
 				goto err;
 			}
 
-			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
-					  hsize;
+			nskb->truesize += skb_end_offset(nskb) - hsize;
 			skb_release_head_state(nskb);
 			__skb_push(nskb, doffset);
 		} else {
@@ -2843,6 +2881,7 @@
 	unsigned int len = skb_gro_len(skb);
 	unsigned int offset = skb_gro_offset(skb);
 	unsigned int headlen = skb_headlen(skb);
+	unsigned int delta_truesize;
 
 	if (p->len + len >= 65536)
 		return -E2BIG;
@@ -2872,11 +2911,41 @@
 		frag->page_offset += offset;
 		skb_frag_size_sub(frag, offset);
 
+		/* all fragments truesize : remove (head size + sk_buff) */
+		delta_truesize = skb->truesize -
+				 SKB_TRUESIZE(skb_end_offset(skb));
+
 		skb->truesize -= skb->data_len;
 		skb->len -= skb->data_len;
 		skb->data_len = 0;
 
-		NAPI_GRO_CB(skb)->free = 1;
+		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
+		goto done;
+	} else if (skb->head_frag) {
+		int nr_frags = pinfo->nr_frags;
+		skb_frag_t *frag = pinfo->frags + nr_frags;
+		struct page *page = virt_to_head_page(skb->head);
+		unsigned int first_size = headlen - offset;
+		unsigned int first_offset;
+
+		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
+			return -E2BIG;
+
+		first_offset = skb->data -
+			       (unsigned char *)page_address(page) +
+			       offset;
+
+		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
+
+		frag->page.p	  = page;
+		frag->page_offset = first_offset;
+		skb_frag_size_set(frag, first_size);
+
+		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
+		/* We dont need to clear skbinfo->nr_frags here */
+
+		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
+		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
 		goto done;
 	} else if (skb_gro_len(p) != pinfo->gso_size)
 		return -E2BIG;
@@ -2918,7 +2987,7 @@
 	p = nskb;
 
 merge:
-	p->truesize += skb->truesize - len;
+	delta_truesize = skb->truesize;
 	if (offset > headlen) {
 		unsigned int eat = offset - headlen;
 
@@ -2938,7 +3007,7 @@
 done:
 	NAPI_GRO_CB(p)->count++;
 	p->data_len += len;
-	p->truesize += len;
+	p->truesize += delta_truesize;
 	p->len += len;
 
 	NAPI_GRO_CB(skb)->same_flow = 1;
@@ -3166,7 +3235,7 @@
 	int len = skb->len;
 
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-	    (unsigned)sk->sk_rcvbuf)
+	    (unsigned int)sk->sk_rcvbuf)
 		return -ENOMEM;
 
 	skb_orphan(skb);
@@ -3260,10 +3329,8 @@
 {
 	if (unlikely(start > skb_headlen(skb)) ||
 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
-		if (net_ratelimit())
-			printk(KERN_WARNING
-			       "bad partial csum: csum=%u/%u len=%u\n",
-			       start, off, skb_headlen(skb));
+		net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
+				     start, off, skb_headlen(skb));
 		return false;
 	}
 	skb->ip_summed = CHECKSUM_PARTIAL;
@@ -3275,8 +3342,93 @@
 
 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
 {
-	if (net_ratelimit())
-		pr_warning("%s: received packets cannot be forwarded"
-			   " while LRO is enabled\n", skb->dev->name);
+	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
+			     skb->dev->name);
 }
 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
+{
+	if (head_stolen)
+		kmem_cache_free(skbuff_head_cache, skb);
+	else
+		__kfree_skb(skb);
+}
+EXPORT_SYMBOL(kfree_skb_partial);
+
+/**
+ * skb_try_coalesce - try to merge skb to prior one
+ * @to: prior buffer
+ * @from: buffer to add
+ * @fragstolen: pointer to boolean
+ *
+ */
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+		      bool *fragstolen, int *delta_truesize)
+{
+	int i, delta, len = from->len;
+
+	*fragstolen = false;
+
+	if (skb_cloned(to))
+		return false;
+
+	if (len <= skb_tailroom(to)) {
+		BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
+		*delta_truesize = 0;
+		return true;
+	}
+
+	if (skb_has_frag_list(to) || skb_has_frag_list(from))
+		return false;
+
+	if (skb_headlen(from) != 0) {
+		struct page *page;
+		unsigned int offset;
+
+		if (skb_shinfo(to)->nr_frags +
+		    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
+			return false;
+
+		if (skb_head_is_locked(from))
+			return false;
+
+		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+		page = virt_to_head_page(from->head);
+		offset = from->data - (unsigned char *)page_address(page);
+
+		skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
+				   page, offset, skb_headlen(from));
+		*fragstolen = true;
+	} else {
+		if (skb_shinfo(to)->nr_frags +
+		    skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
+			return false;
+
+		delta = from->truesize -
+			SKB_TRUESIZE(skb_end_pointer(from) - from->head);
+	}
+
+	WARN_ON_ONCE(delta < len);
+
+	memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
+	       skb_shinfo(from)->frags,
+	       skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
+	skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
+
+	if (!skb_cloned(from))
+		skb_shinfo(from)->nr_frags = 0;
+
+	/* if the skb is cloned this does nothing since we set nr_frags to 0 */
+	for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
+		skb_frag_ref(from, i);
+
+	to->truesize += delta;
+	to->len += len;
+	to->data_len += len;
+
+	*delta_truesize = delta;
+	return true;
+}
+EXPORT_SYMBOL(skb_try_coalesce);
diff --git a/net/core/sock.c b/net/core/sock.c
index b2e14c0..5efcd63 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -89,6 +89,8 @@
  *		2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/types.h>
@@ -113,6 +115,7 @@
 #include <linux/user_namespace.h>
 #include <linux/static_key.h>
 #include <linux/memcontrol.h>
+#include <linux/prefetch.h>
 
 #include <asm/uaccess.h>
 
@@ -258,7 +261,9 @@
 
 /* Run time adjustable parameters. */
 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
+EXPORT_SYMBOL(sysctl_wmem_max);
 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+EXPORT_SYMBOL(sysctl_rmem_max);
 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 
@@ -294,9 +299,8 @@
 		*timeo_p = 0;
 		if (warned < 10 && net_ratelimit()) {
 			warned++;
-			printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
-			       "tries to set negative timeout\n",
-				current->comm, task_pid_nr(current));
+			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
+				__func__, current->comm, task_pid_nr(current));
 		}
 		return 0;
 	}
@@ -314,8 +318,8 @@
 	static char warncomm[TASK_COMM_LEN];
 	if (strcmp(warncomm, current->comm) && warned < 5) {
 		strcpy(warncomm,  current->comm);
-		printk(KERN_WARNING "process `%s' is using obsolete "
-		       "%s SO_BSDCOMPAT\n", warncomm, name);
+		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
+			warncomm, name);
 		warned++;
 	}
 }
@@ -389,7 +393,7 @@
 
 	skb->dev = NULL;
 
-	if (sk_rcvqueues_full(sk, skb)) {
+	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
 		atomic_inc(&sk->sk_drops);
 		goto discard_and_relse;
 	}
@@ -406,7 +410,7 @@
 		rc = sk_backlog_rcv(sk, skb);
 
 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-	} else if (sk_add_backlog(sk, skb)) {
+	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 		bh_unlock_sock(sk);
 		atomic_inc(&sk->sk_drops);
 		goto discard_and_relse;
@@ -561,7 +565,7 @@
 			sock_valbool_flag(sk, SOCK_DBG, valbool);
 		break;
 	case SO_REUSEADDR:
-		sk->sk_reuse = valbool;
+		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
 		break;
 	case SO_TYPE:
 	case SO_PROTOCOL:
@@ -577,23 +581,15 @@
 		break;
 	case SO_SNDBUF:
 		/* Don't error on this BSD doesn't and if you think
-		   about it this is right. Otherwise apps have to
-		   play 'guess the biggest size' games. RCVBUF/SNDBUF
-		   are treated in BSD as hints */
-
-		if (val > sysctl_wmem_max)
-			val = sysctl_wmem_max;
+		 * about it this is right. Otherwise apps have to
+		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
+		 * are treated in BSD as hints
+		 */
+		val = min_t(u32, val, sysctl_wmem_max);
 set_sndbuf:
 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-		if ((val * 2) < SOCK_MIN_SNDBUF)
-			sk->sk_sndbuf = SOCK_MIN_SNDBUF;
-		else
-			sk->sk_sndbuf = val * 2;
-
-		/*
-		 *	Wake up sending tasks if we
-		 *	upped the value.
-		 */
+		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
+		/* Wake up sending tasks if we upped the value. */
 		sk->sk_write_space(sk);
 		break;
 
@@ -606,12 +602,11 @@
 
 	case SO_RCVBUF:
 		/* Don't error on this BSD doesn't and if you think
-		   about it this is right. Otherwise apps have to
-		   play 'guess the biggest size' games. RCVBUF/SNDBUF
-		   are treated in BSD as hints */
-
-		if (val > sysctl_rmem_max)
-			val = sysctl_rmem_max;
+		 * about it this is right. Otherwise apps have to
+		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
+		 * are treated in BSD as hints
+		 */
+		val = min_t(u32, val, sysctl_rmem_max);
 set_rcvbuf:
 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 		/*
@@ -629,10 +624,7 @@
 		 * returning the value we actually used in getsockopt
 		 * is the most desirable behavior.
 		 */
-		if ((val * 2) < SOCK_MIN_RCVBUF)
-			sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
-		else
-			sk->sk_rcvbuf = val * 2;
+		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
 		break;
 
 	case SO_RCVBUFFORCE:
@@ -858,7 +850,7 @@
 		break;
 
 	case SO_BROADCAST:
-		v.val = !!sock_flag(sk, SOCK_BROADCAST);
+		v.val = sock_flag(sk, SOCK_BROADCAST);
 		break;
 
 	case SO_SNDBUF:
@@ -874,7 +866,7 @@
 		break;
 
 	case SO_KEEPALIVE:
-		v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
+		v.val = sock_flag(sk, SOCK_KEEPOPEN);
 		break;
 
 	case SO_TYPE:
@@ -896,7 +888,7 @@
 		break;
 
 	case SO_OOBINLINE:
-		v.val = !!sock_flag(sk, SOCK_URGINLINE);
+		v.val = sock_flag(sk, SOCK_URGINLINE);
 		break;
 
 	case SO_NO_CHECK:
@@ -909,7 +901,7 @@
 
 	case SO_LINGER:
 		lv		= sizeof(v.ling);
-		v.ling.l_onoff	= !!sock_flag(sk, SOCK_LINGER);
+		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
 		v.ling.l_linger	= sk->sk_lingertime / HZ;
 		break;
 
@@ -975,7 +967,7 @@
 		break;
 
 	case SO_PASSCRED:
-		v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
+		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
 		break;
 
 	case SO_PEERCRED:
@@ -1010,7 +1002,7 @@
 		break;
 
 	case SO_PASSSEC:
-		v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
 		break;
 
 	case SO_PEERSEC:
@@ -1021,11 +1013,11 @@
 		break;
 
 	case SO_RXQ_OVFL:
-		v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
+		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
 		break;
 
 	case SO_WIFI_STATUS:
-		v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
+		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
 		break;
 
 	case SO_PEEK_OFF:
@@ -1035,7 +1027,7 @@
 		v.val = sk->sk_peek_off;
 		break;
 	case SO_NOFCS:
-		v.val = !!sock_flag(sk, SOCK_NOFCS);
+		v.val = sock_flag(sk, SOCK_NOFCS);
 		break;
 	default:
 		return -ENOPROTOOPT;
@@ -1247,8 +1239,8 @@
 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
 
 	if (atomic_read(&sk->sk_omem_alloc))
-		printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
-		       __func__, atomic_read(&sk->sk_omem_alloc));
+		pr_debug("%s: optmem leakage (%d bytes) detected\n",
+			 __func__, atomic_read(&sk->sk_omem_alloc));
 
 	if (sk->sk_peer_cred)
 		put_cred(sk->sk_peer_cred);
@@ -1534,7 +1526,7 @@
  */
 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
 {
-	if ((unsigned)size <= sysctl_optmem_max &&
+	if ((unsigned int)size <= sysctl_optmem_max &&
 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
 		void *mem;
 		/* First do the add, to avoid the race if kmalloc
@@ -1712,6 +1704,7 @@
 		do {
 			struct sk_buff *next = skb->next;
 
+			prefetch(next);
 			WARN_ON_ONCE(skb_dst_is_noref(skb));
 			skb->next = NULL;
 			sk_backlog_rcv(sk, skb);
@@ -2432,7 +2425,7 @@
 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
 
 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
-		printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
+		pr_err("PROTO_INUSE_NR exhausted\n");
 		return;
 	}
 
@@ -2462,8 +2455,8 @@
 					NULL);
 
 		if (prot->slab == NULL) {
-			printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
-			       prot->name);
+			pr_crit("%s: Can't create sock SLAB cache!\n",
+				prot->name);
 			goto out;
 		}
 
@@ -2477,8 +2470,8 @@
 								 SLAB_HWCACHE_ALIGN, NULL);
 
 			if (prot->rsk_prot->slab == NULL) {
-				printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
-				       prot->name);
+				pr_crit("%s: Can't create request sock SLAB cache!\n",
+					prot->name);
 				goto out_free_request_sock_slab_name;
 			}
 		}
@@ -2576,7 +2569,7 @@
 }
 static long sock_prot_memory_allocated(struct proto *proto)
 {
-	return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
+	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
 }
 
 static char *sock_prot_memory_pressure(struct proto *proto)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b9868e1..5fd1467 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -10,7 +10,7 @@
 #include <linux/inet_diag.h>
 #include <linux/sock_diag.h>
 
-static struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
 static DEFINE_MUTEX(sock_diag_table_mutex);
 
@@ -70,7 +70,7 @@
 }
 EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
 
-int sock_diag_register(struct sock_diag_handler *hndl)
+int sock_diag_register(const struct sock_diag_handler *hndl)
 {
 	int err = 0;
 
@@ -88,7 +88,7 @@
 }
 EXPORT_SYMBOL_GPL(sock_diag_register);
 
-void sock_diag_unregister(struct sock_diag_handler *hnld)
+void sock_diag_unregister(const struct sock_diag_handler *hnld)
 {
 	int family = hnld->family;
 
@@ -102,7 +102,7 @@
 }
 EXPORT_SYMBOL_GPL(sock_diag_unregister);
 
-static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
+static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
 {
 	if (sock_diag_handlers[family] == NULL)
 		request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
@@ -112,7 +112,7 @@
 	return sock_diag_handlers[family];
 }
 
-static inline void sock_diag_unlock_handler(struct sock_diag_handler *h)
+static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
 {
 	mutex_unlock(&sock_diag_table_mutex);
 }
@@ -121,7 +121,7 @@
 {
 	int err;
 	struct sock_diag_req *req = NLMSG_DATA(nlh);
-	struct sock_diag_handler *hndl;
+	const struct sock_diag_handler *hndl;
 
 	if (nlmsg_len(nlh) < sizeof(*req))
 		return -EINVAL;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0c28508..a7c3684 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -14,6 +14,7 @@
 #include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/kmemleak.h>
 
 #include <net/ip.h>
 #include <net/sock.h>
@@ -202,12 +203,6 @@
 	{ }
 };
 
-__net_initdata struct ctl_path net_core_path[] = {
-	{ .procname = "net", },
-	{ .procname = "core", },
-	{ },
-};
-
 static __net_init int sysctl_core_net_init(struct net *net)
 {
 	struct ctl_table *tbl;
@@ -223,8 +218,7 @@
 		tbl[0].data = &net->core.sysctl_somaxconn;
 	}
 
-	net->core.sysctl_hdr = register_net_sysctl_table(net,
-			net_core_path, tbl);
+	net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
 	if (net->core.sysctl_hdr == NULL)
 		goto err_reg;
 
@@ -254,10 +248,7 @@
 
 static __init int sysctl_core_init(void)
 {
-	static struct ctl_table empty[1];
-
-	register_sysctl_paths(net_core_path, empty);
-	register_net_sysctl_rotable(net_core_path, net_core_table);
+	register_net_sysctl(&init_net, "net/core", net_core_table);
 	return register_pernet_subsys(&sysctl_core_ops);
 }
 
diff --git a/net/core/utils.c b/net/core/utils.c
index dc3c3fa..39895a6 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -58,14 +58,11 @@
 	int i;
 
 	l = 0;
-	for (i = 0; i < 4; i++)
-	{
+	for (i = 0; i < 4; i++)	{
 		l <<= 8;
-		if (*str != '\0')
-		{
+		if (*str != '\0') {
 			val = 0;
-			while (*str != '\0' && *str != '.' && *str != '\n')
-			{
+			while (*str != '\0' && *str != '.' && *str != '\n') {
 				val *= 10;
 				val += *str - '0';
 				str++;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d860530..656c7c7 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -178,6 +178,7 @@
 	[DCB_ATTR_IEEE_ETS]	    = {.len = sizeof(struct ieee_ets)},
 	[DCB_ATTR_IEEE_PFC]	    = {.len = sizeof(struct ieee_pfc)},
 	[DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
+	[DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
 };
 
 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -703,6 +704,7 @@
 
 	ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
 			  pid, seq, flags);
+	dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
 out:
 	return ret;
 }
@@ -935,6 +937,7 @@
 
 	ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
 	                  DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
+	dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
 
 	return ret;
 }
@@ -1205,13 +1208,15 @@
 		if (!app)
 			goto nla_put_failure;
 
-		if (app_info_type)
-			NLA_PUT(skb, app_info_type, sizeof(info), &info);
+		if (app_info_type &&
+		    nla_put(skb, app_info_type, sizeof(info), &info))
+			goto nla_put_failure;
 
-		for (i = 0; i < app_count; i++)
-			NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
-				&table[i]);
-
+		for (i = 0; i < app_count; i++) {
+			if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
+				    &table[i]))
+				goto nla_put_failure;
+		}
 		nla_nest_end(skb, app);
 	}
 	err = 0;
@@ -1230,8 +1235,8 @@
 	int dcbx;
 	int err = -EMSGSIZE;
 
-	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
+	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
+		goto nla_put_failure;
 	ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
 	if (!ieee)
 		goto nla_put_failure;
@@ -1239,15 +1244,28 @@
 	if (ops->ieee_getets) {
 		struct ieee_ets ets;
 		err = ops->ieee_getets(netdev, &ets);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
+			goto nla_put_failure;
+	}
+
+	if (ops->ieee_getmaxrate) {
+		struct ieee_maxrate maxrate;
+		err = ops->ieee_getmaxrate(netdev, &maxrate);
+		if (!err) {
+			err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
+				      sizeof(maxrate), &maxrate);
+			if (err)
+				goto nla_put_failure;
+		}
 	}
 
 	if (ops->ieee_getpfc) {
 		struct ieee_pfc pfc;
 		err = ops->ieee_getpfc(netdev, &pfc);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
+			goto nla_put_failure;
 	}
 
 	app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
@@ -1278,15 +1296,17 @@
 	if (ops->ieee_peer_getets) {
 		struct ieee_ets ets;
 		err = ops->ieee_peer_getets(netdev, &ets);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
+			goto nla_put_failure;
 	}
 
 	if (ops->ieee_peer_getpfc) {
 		struct ieee_pfc pfc;
 		err = ops->ieee_peer_getpfc(netdev, &pfc);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
+			goto nla_put_failure;
 	}
 
 	if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1340,10 +1360,11 @@
 			ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
 					  &prio, &pgid, &tc_pct, &up_map);
 
-		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
-		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
-		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
-		NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
+		if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
+		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
+		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
+		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
+			goto nla_put_failure;
 		nla_nest_end(skb, tc_nest);
 	}
 
@@ -1356,7 +1377,8 @@
 		else
 			ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
 					   &tc_pct);
-		NLA_PUT_U8(skb, i, tc_pct);
+		if (nla_put_u8(skb, i, tc_pct))
+			goto nla_put_failure;
 	}
 	nla_nest_end(skb, pg);
 	return 0;
@@ -1373,8 +1395,8 @@
 	int dcbx, i, err = -EMSGSIZE;
 	u8 value;
 
-	NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
-
+	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
+		goto nla_put_failure;
 	cee = nla_nest_start(skb, DCB_ATTR_CEE);
 	if (!cee)
 		goto nla_put_failure;
@@ -1401,7 +1423,8 @@
 
 		for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
 			ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
-			NLA_PUT_U8(skb, i, value);
+			if (nla_put_u8(skb, i, value))
+				goto nla_put_failure;
 		}
 		nla_nest_end(skb, pfc_nest);
 	}
@@ -1454,8 +1477,9 @@
 
 		for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
 		     i++)
-			if (!ops->getfeatcfg(netdev, i, &value))
-				NLA_PUT_U8(skb, i, value);
+			if (!ops->getfeatcfg(netdev, i, &value) &&
+			    nla_put_u8(skb, i, value))
+				goto nla_put_failure;
 
 		nla_nest_end(skb, feat);
 	}
@@ -1464,15 +1488,17 @@
 	if (ops->cee_peer_getpg) {
 		struct cee_pg pg;
 		err = ops->cee_peer_getpg(netdev, &pg);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
+			goto nla_put_failure;
 	}
 
 	if (ops->cee_peer_getpfc) {
 		struct cee_pfc pfc;
 		err = ops->cee_peer_getpfc(netdev, &pfc);
-		if (!err)
-			NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+		if (!err &&
+		    nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
+			goto nla_put_failure;
 	}
 
 	if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1589,6 +1615,14 @@
 			goto err;
 	}
 
+	if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
+		struct ieee_maxrate *maxrate =
+			nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
+		err = ops->ieee_setmaxrate(netdev, maxrate);
+		if (err)
+			goto err;
+	}
+
 	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
 		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
 		err = ops->ieee_setpfc(netdev, pfc);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 70bfaf2..8c67bed 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -100,7 +100,7 @@
 
 	DCCP_BUG_ON(hc->tx_t_ipi == 0);
 	ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
-		       hc->tx_s, (unsigned)(hc->tx_x >> 6));
+		       hc->tx_s, (unsigned int)(hc->tx_x >> 6));
 }
 
 static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
@@ -153,9 +153,9 @@
 
 	if (hc->tx_x != old_x) {
 		ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
-			       "X_recv=%u\n", (unsigned)(old_x >> 6),
-			       (unsigned)(hc->tx_x >> 6), hc->tx_x_calc,
-			       (unsigned)(hc->tx_x_recv >> 6));
+			       "X_recv=%u\n", (unsigned int)(old_x >> 6),
+			       (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
+			       (unsigned int)(hc->tx_x_recv >> 6));
 
 		ccid3_update_send_interval(hc);
 	}
@@ -425,8 +425,8 @@
 			       "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
 			       dccp_role(sk), sk, hc->tx_rtt, r_sample,
 			       hc->tx_s, hc->tx_p, hc->tx_x_calc,
-			       (unsigned)(hc->tx_x_recv >> 6),
-			       (unsigned)(hc->tx_x >> 6));
+			       (unsigned int)(hc->tx_x_recv >> 6),
+			       (unsigned int)(hc->tx_x >> 6));
 
 	/* unschedule no feedback timer */
 	sk_stop_timer(sk, &hc->tx_no_feedback_timer);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 29d6bb6..9040be0 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -75,7 +75,7 @@
 				     * state, about 60 seconds */
 
 /* RFC 1122, 4.2.3.1 initial RTO value */
-#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ))
+#define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ))
 
 /*
  * The maximum back-off value for retransmissions. This is needed for
@@ -84,7 +84,7 @@
  *  - feature-negotiation retransmission (sec. 6.6.3),
  *  - Acks in client-PARTOPEN state (sec. 8.1.5).
  */
-#define DCCP_RTO_MAX ((unsigned)(64 * HZ))
+#define DCCP_RTO_MAX ((unsigned int)(64 * HZ))
 
 /*
  * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4
@@ -287,9 +287,9 @@
 extern int dccp_child_process(struct sock *parent, struct sock *child,
 			      struct sk_buff *skb);
 extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-				  struct dccp_hdr *dh, unsigned len);
+				  struct dccp_hdr *dh, unsigned int len);
 extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-				const struct dccp_hdr *dh, const unsigned len);
+				const struct dccp_hdr *dh, const unsigned int len);
 
 extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
 extern void dccp_destroy_sock(struct sock *sk);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 51d5fe5..bc93a33 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -285,7 +285,7 @@
 }
 
 static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-				  const struct dccp_hdr *dh, const unsigned len)
+				  const struct dccp_hdr *dh, const unsigned int len)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 
@@ -366,7 +366,7 @@
 }
 
 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-			 const struct dccp_hdr *dh, const unsigned len)
+			 const struct dccp_hdr *dh, const unsigned int len)
 {
 	if (dccp_check_seqno(sk, skb))
 		goto discard;
@@ -388,7 +388,7 @@
 static int dccp_rcv_request_sent_state_process(struct sock *sk,
 					       struct sk_buff *skb,
 					       const struct dccp_hdr *dh,
-					       const unsigned len)
+					       const unsigned int len)
 {
 	/*
 	 *  Step 4: Prepare sequence numbers in REQUEST
@@ -521,7 +521,7 @@
 static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
 						   struct sk_buff *skb,
 						   const struct dccp_hdr *dh,
-						   const unsigned len)
+						   const unsigned int len)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 	u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
@@ -572,7 +572,7 @@
 }
 
 int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-			   struct dccp_hdr *dh, unsigned len)
+			   struct dccp_hdr *dh, unsigned int len)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index caf6e17..07f5579 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -574,6 +574,11 @@
 	kfree(inet_rsk(req)->opt);
 }
 
+void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+{
+}
+EXPORT_SYMBOL(dccp_syn_ack_timeout);
+
 static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
 	.family		= PF_INET,
 	.obj_size	= sizeof(struct dccp_request_sock),
@@ -581,6 +586,7 @@
 	.send_ack	= dccp_reqsk_send_ack,
 	.destructor	= dccp_v4_reqsk_destructor,
 	.send_reset	= dccp_v4_ctl_send_reset,
+	.syn_ack_timeout = dccp_syn_ack_timeout,
 };
 
 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4dc588f..fa9512d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -343,6 +343,7 @@
 	.send_ack	= dccp_reqsk_send_ack,
 	.destructor	= dccp_v6_reqsk_destructor,
 	.send_reset	= dccp_v6_ctl_send_reset,
+	.syn_ack_timeout = dccp_syn_ack_timeout,
 };
 
 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
@@ -579,7 +580,7 @@
 	newnp->pktoptions = NULL;
 	if (ireq6->pktopts != NULL) {
 		newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
-		kfree_skb(ireq6->pktopts);
+		consume_skb(ireq6->pktopts);
 		ireq6->pktopts = NULL;
 		if (newnp->pktoptions)
 			skb_set_owner_r(newnp->pktoptions, newsk);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 7065c0a..6c7c78b8 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -848,7 +848,7 @@
 		default:
 			dccp_pr_debug("packet_type=%s\n",
 				      dccp_packet_name(dh->dccph_type));
-			sk_eat_skb(sk, skb, 0);
+			sk_eat_skb(sk, skb, false);
 		}
 verify_sock_status:
 		if (sock_flag(sk, SOCK_DONE)) {
@@ -905,7 +905,7 @@
 			len = skb->len;
 	found_fin_ok:
 		if (!(flags & MSG_PEEK))
-			sk_eat_skb(sk, skb, 0);
+			sk_eat_skb(sk, skb, false);
 		break;
 	} while (1);
 out:
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 4234882..607ab71 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -98,18 +98,11 @@
 	{ }
 };
 
-static struct ctl_path dccp_path[] = {
-	{ .procname = "net", },
-	{ .procname = "dccp", },
-	{ .procname = "default", },
-	{ }
-};
-
 static struct ctl_table_header *dccp_table_header;
 
 int __init dccp_sysctl_init(void)
 {
-	dccp_table_header = register_sysctl_paths(dccp_path,
+	dccp_table_header = register_net_sysctl(&init_net, "net/dccp/default",
 			dccp_default_table);
 
 	return dccp_table_header != NULL ? 0 : -ENOMEM;
@@ -118,7 +111,7 @@
 void dccp_sysctl_exit(void)
 {
 	if (dccp_table_header != NULL) {
-		unregister_sysctl_table(dccp_table_header);
+		unregister_net_sysctl_table(dccp_table_header);
 		dccp_table_header = NULL;
 	}
 }
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 4136987..2ba1a28 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -250,7 +250,7 @@
 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
 {
 	int i;
-	unsigned hash = addr->sdn_objnum;
+	unsigned int hash = addr->sdn_objnum;
 
 	if (hash == 0) {
 		hash = addr->sdn_objnamel;
@@ -1844,9 +1844,9 @@
  * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
  * make much practical difference.
  */
-unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
+unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
 {
-	unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
+	unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
 	if (dev) {
 		struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 		mtu -= LL_RESERVED_SPACE(dev);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index c00e307..f3924ab 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -209,15 +209,7 @@
 	struct dn_dev_sysctl_table *t;
 	int i;
 
-#define DN_CTL_PATH_DEV	3
-
-	struct ctl_path dn_ctl_path[] = {
-		{ .procname = "net",  },
-		{ .procname = "decnet",  },
-		{ .procname = "conf",  },
-		{ /* to be set */ },
-		{ },
-	};
+	char path[sizeof("net/decnet/conf/") + IFNAMSIZ];
 
 	t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
 	if (t == NULL)
@@ -228,15 +220,12 @@
 		t->dn_dev_vars[i].data = ((char *)parms) + offset;
 	}
 
-	if (dev) {
-		dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name;
-	} else {
-		dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name;
-	}
+	snprintf(path, sizeof(path), "net/decnet/conf/%s",
+		dev? dev->name : parms->name);
 
 	t->dn_dev_vars[0].extra1 = (void *)dev;
 
-	t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars);
+	t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars);
 	if (t->sysctl_header == NULL)
 		kfree(t);
 	else
@@ -248,7 +237,7 @@
 	if (parms->sysctl) {
 		struct dn_dev_sysctl_table *t = parms->sysctl;
 		parms->sysctl = NULL;
-		unregister_sysctl_table(t->sysctl_header);
+		unregister_net_sysctl_table(t->sysctl_header);
 		kfree(t);
 	}
 }
@@ -694,13 +683,13 @@
 	ifm->ifa_scope = ifa->ifa_scope;
 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
 
-	if (ifa->ifa_address)
-		NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
-	if (ifa->ifa_local)
-		NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
-	if (ifa->ifa_label[0])
-		NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
-
+	if ((ifa->ifa_address &&
+	     nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+	    (ifa->ifa_local &&
+	     nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
+	    (ifa->ifa_label[0] &&
+	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 9e885f1..7eaf987 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -302,11 +302,12 @@
 		struct rtattr *attr = RTA_DATA(rta->rta_mx);
 
 		while(RTA_OK(attr, attrlen)) {
-			unsigned flavour = attr->rta_type;
+			unsigned int flavour = attr->rta_type;
+
 			if (flavour) {
 				if (flavour > RTAX_MAX)
 					goto err_inval;
-				fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr);
+				fi->fib_metrics[flavour-1] = *(unsigned int *)RTA_DATA(attr);
 			}
 			attr = RTA_NEXT(attr, attrlen);
 		}
@@ -437,9 +438,8 @@
 			res->fi = NULL;
 			return 1;
 		default:
-			if (net_ratelimit())
-				printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
-				       type);
+			net_err_ratelimited("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
+					    type);
 			res->fi = NULL;
 			return -EINVAL;
 		}
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index ee7013f..ac90f658 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -162,8 +162,8 @@
 	else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK))
 		dn_dn2eth(neigh->ha, dn->addr);
 	else {
-		if (net_ratelimit())
-			printk(KERN_DEBUG "Trying to create neigh for hw %d\n",  dev->type);
+		net_dbg_ratelimited("Trying to create neigh for hw %d\n",
+				    dev->type);
 		return -EINVAL;
 	}
 
@@ -236,15 +236,13 @@
 	if (skb_headroom(skb) < headroom) {
 		struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
 		if (skb2 == NULL) {
-			if (net_ratelimit())
-				printk(KERN_CRIT "dn_long_output: no memory\n");
+			net_crit_ratelimited("dn_long_output: no memory\n");
 			kfree_skb(skb);
 			return -ENOBUFS;
 		}
 		kfree_skb(skb);
 		skb = skb2;
-		if (net_ratelimit())
-			printk(KERN_INFO "dn_long_output: Increasing headroom\n");
+		net_info_ratelimited("dn_long_output: Increasing headroom\n");
 	}
 
 	data = skb_push(skb, sizeof(struct dn_long_packet) + 3);
@@ -281,15 +279,13 @@
 	if (skb_headroom(skb) < headroom) {
 		struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
 		if (skb2 == NULL) {
-			if (net_ratelimit())
-				printk(KERN_CRIT "dn_short_output: no memory\n");
+			net_crit_ratelimited("dn_short_output: no memory\n");
 			kfree_skb(skb);
 			return -ENOBUFS;
 		}
 		kfree_skb(skb);
 		skb = skb2;
-		if (net_ratelimit())
-			printk(KERN_INFO "dn_short_output: Increasing headroom\n");
+		net_info_ratelimited("dn_short_output: Increasing headroom\n");
 	}
 
 	data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
@@ -322,15 +318,13 @@
 	if (skb_headroom(skb) < headroom) {
 		struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
 		if (skb2 == NULL) {
-			if (net_ratelimit())
-				printk(KERN_CRIT "dn_phase3_output: no memory\n");
+			net_crit_ratelimited("dn_phase3_output: no memory\n");
 			kfree_skb(skb);
 			return -ENOBUFS;
 		}
 		kfree_skb(skb);
 		skb = skb2;
-		if (net_ratelimit())
-			printk(KERN_INFO "dn_phase3_output: Increasing headroom\n");
+		net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
 	}
 
 	data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index f6544b2..c344163 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -80,12 +80,15 @@
 
 static void dn_log_martian(struct sk_buff *skb, const char *msg)
 {
-	if (decnet_log_martians && net_ratelimit()) {
+	if (decnet_log_martians) {
 		char *devname = skb->dev ? skb->dev->name : "???";
 		struct dn_skb_cb *cb = DN_SKB_CB(skb);
-		printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
-		       msg, devname, le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
-		       le16_to_cpu(cb->src_port), le16_to_cpu(cb->dst_port));
+		net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
+				     msg, devname,
+				     le16_to_cpu(cb->src),
+				     le16_to_cpu(cb->dst),
+				     le16_to_cpu(cb->src_port),
+				     le16_to_cpu(cb->dst_port));
 	}
 }
 
@@ -588,7 +591,7 @@
 	   number of warnings when compiling with -W --ANK
 	 */
 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-	    (unsigned)sk->sk_rcvbuf) {
+	    (unsigned int)sk->sk_rcvbuf) {
 		err = -ENOMEM;
 		goto out;
 	}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index e446e85..564a6ad 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -1,4 +1,3 @@
-
 /*
  * DECnet       An implementation of the DECnet protocol suite for the LINUX
  *              operating system.  DECnet is implemented using the  BSD Socket
@@ -209,7 +208,7 @@
  *
  * Returns: The number of times the packet has been sent previously
  */
-static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb,
+static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb,
 					     gfp_t gfp)
 {
 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -240,7 +239,7 @@
 {
 	struct dn_scp *scp = DN_SK(sk);
 	struct sk_buff *skb;
-	unsigned reduce_win = 0;
+	unsigned int reduce_win = 0;
 
 	/*
 	 * First we check for otherdata/linkservice messages
@@ -554,8 +553,8 @@
 	unsigned char *msg;
 
 	if ((dst == NULL) || (rem == 0)) {
-		if (net_ratelimit())
-			printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", le16_to_cpu(rem), dst);
+		net_dbg_ratelimited("DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n",
+				    le16_to_cpu(rem), dst);
 		return;
 	}
 
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 80a3de4..586302e 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -122,7 +122,7 @@
 static void dn_run_flush(unsigned long dummy);
 
 static struct dn_rt_hash_bucket *dn_rt_hash_table;
-static unsigned dn_rt_hash_mask;
+static unsigned int dn_rt_hash_mask;
 
 static struct timer_list dn_route_timer;
 static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
@@ -149,13 +149,13 @@
 	dst_destroy_metrics_generic(dst);
 }
 
-static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
+static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
 {
 	__u16 tmp = (__u16 __force)(src ^ dst);
 	tmp ^= (tmp >> 3);
 	tmp ^= (tmp >> 5);
 	tmp ^= (tmp >> 10);
-	return dn_rt_hash_mask & (unsigned)tmp;
+	return dn_rt_hash_mask & (unsigned int)tmp;
 }
 
 static inline void dnrt_free(struct dn_route *rt)
@@ -297,7 +297,7 @@
 		(fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
 }
 
-static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
+static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
 {
 	struct dn_route *rth;
 	struct dn_route __rcu **rthp;
@@ -748,8 +748,7 @@
 		       dn_to_neigh_output);
 
 error:
-	if (net_ratelimit())
-		printk(KERN_DEBUG "dn_output: This should not happen\n");
+	net_dbg_ratelimited("dn_output: This should not happen\n");
 
 	kfree_skb(skb);
 
@@ -807,12 +806,10 @@
  */
 static int dn_rt_bug(struct sk_buff *skb)
 {
-	if (net_ratelimit()) {
-		struct dn_skb_cb *cb = DN_SKB_CB(skb);
+	struct dn_skb_cb *cb = DN_SKB_CB(skb);
 
-		printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n",
-				le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
-	}
+	net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
+			    le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
 
 	kfree_skb(skb);
 
@@ -934,8 +931,8 @@
 	struct dn_route *rt = NULL;
 	struct net_device *dev_out = NULL, *dev;
 	struct neighbour *neigh = NULL;
-	unsigned hash;
-	unsigned flags = 0;
+	unsigned int hash;
+	unsigned int flags = 0;
 	struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
 	int err;
 	int free_res = 0;
@@ -1209,7 +1206,7 @@
  */
 static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
 {
-	unsigned hash = dn_hash(flp->saddr, flp->daddr);
+	unsigned int hash = dn_hash(flp->saddr, flp->daddr);
 	struct dn_route *rt = NULL;
 
 	if (!(flags & MSG_TRYHARD)) {
@@ -1275,7 +1272,7 @@
 	struct net_device *out_dev = NULL;
 	struct dn_dev *dn_db;
 	struct neighbour *neigh = NULL;
-	unsigned hash;
+	unsigned int hash;
 	int flags = 0;
 	__le16 gateway = 0;
 	__le16 local_src = 0;
@@ -1327,9 +1324,7 @@
 
 		out_dev = DN_FIB_RES_DEV(res);
 		if (out_dev == NULL) {
-			if (net_ratelimit())
-				printk(KERN_CRIT "Bug in dn_route_input_slow() "
-						 "No output device\n");
+			net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
 			goto e_inval;
 		}
 		dev_hold(out_dev);
@@ -1490,7 +1485,7 @@
 {
 	struct dn_route *rt;
 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
-	unsigned hash = dn_hash(cb->src, cb->dst);
+	unsigned int hash = dn_hash(cb->src, cb->dst);
 
 	if (skb_dst(skb))
 		return 0;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index f65c9dd..e65f2c8 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -177,11 +177,11 @@
 	return 1;
 }
 
-unsigned dnet_addr_type(__le16 addr)
+unsigned int dnet_addr_type(__le16 addr)
 {
 	struct flowidn fld = { .daddr = addr };
 	struct dn_fib_res res;
-	unsigned ret = RTN_UNICAST;
+	unsigned int ret = RTN_UNICAST;
 	struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
 
 	res.r = NULL;
@@ -204,11 +204,11 @@
 	frh->src_len = r->src_len;
 	frh->tos = 0;
 
-	if (r->dst_len)
-		NLA_PUT_LE16(skb, FRA_DST, r->dst);
-	if (r->src_len)
-		NLA_PUT_LE16(skb, FRA_SRC, r->src);
-
+	if ((r->dst_len &&
+	     nla_put_le16(skb, FRA_DST, r->dst)) ||
+	    (r->src_len &&
+	     nla_put_le16(skb, FRA_SRC, r->src)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index a9a62f2..650f338 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -836,8 +836,8 @@
 	if (!create)
 		return NULL;
 
-	if (in_interrupt() && net_ratelimit()) {
-		printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
+	if (in_interrupt()) {
+		net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n");
 		return NULL;
 	}
 
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1531135..44b8909 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -57,8 +57,7 @@
 	if (skb)
 		kfree_skb(skb);
 	*errp = -ENOMEM;
-	if (net_ratelimit())
-		printk(KERN_ERR "dn_rtmsg: error creating netlink message\n");
+	net_err_ratelimited("dn_rtmsg: error creating netlink message\n");
 	return NULL;
 }
 
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 02e75d1..a55eecca 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -351,20 +351,14 @@
 	{ }
 };
 
-static struct ctl_path dn_path[] = {
-	{ .procname = "net", },
-	{ .procname = "decnet", },
-	{ }
-};
-
 void dn_register_sysctl(void)
 {
-	dn_table_header = register_sysctl_paths(dn_path, dn_table);
+	dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table);
 }
 
 void dn_unregister_sysctl(void)
 {
-	unregister_sysctl_table(dn_table_header);
+	unregister_net_sysctl_table(dn_table_header);
 }
 
 #else  /* CONFIG_SYSCTL */
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index c73bba3..6f70ea9 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -38,7 +38,7 @@
 MODULE_AUTHOR("Wang Lei");
 MODULE_LICENSE("GPL");
 
-unsigned dns_resolver_debug;
+unsigned int dns_resolver_debug;
 module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
 
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
index 189ca9e..17c7886 100644
--- a/net/dns_resolver/internal.h
+++ b/net/dns_resolver/internal.h
@@ -31,7 +31,7 @@
 /*
  * debug tracing
  */
-extern unsigned dns_resolver_debug;
+extern unsigned int dns_resolver_debug;
 
 #define	kdebug(FMT, ...)				\
 do {							\
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 56cf9b8..e32083d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -66,7 +66,7 @@
 	if (!(master->flags & IFF_UP))
 		return -ENETDOWN;
 
-	if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
+	if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
 		err = dev_uc_add(master, dev->dev_addr);
 		if (err < 0)
 			goto out;
@@ -89,7 +89,7 @@
 	if (dev->flags & IFF_ALLMULTI)
 		dev_set_allmulti(master, -1);
 del_unicast:
-	if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
 		dev_uc_del(master, dev->dev_addr);
 out:
 	return err;
@@ -107,7 +107,7 @@
 	if (dev->flags & IFF_PROMISC)
 		dev_set_promiscuity(master, -1);
 
-	if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
 		dev_uc_del(master, dev->dev_addr);
 
 	return 0;
@@ -146,13 +146,13 @@
 	if (!(dev->flags & IFF_UP))
 		goto out;
 
-	if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
+	if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
 		err = dev_uc_add(master, addr->sa_data);
 		if (err < 0)
 			return err;
 	}
 
-	if (compare_ether_addr(dev->dev_addr, master->dev_addr))
+	if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
 		dev_uc_del(master, dev->dev_addr);
 
 out:
diff --git a/net/econet/Kconfig b/net/econet/Kconfig
deleted file mode 100644
index 39a2d29..0000000
--- a/net/econet/Kconfig
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Acorn Econet/AUN protocols 
-#
-
-config ECONET
-	tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && INET
-	---help---
-	  Econet is a fairly old and slow networking protocol mainly used by
-	  Acorn computers to access file and print servers. It uses native
-	  Econet network cards. AUN is an implementation of the higher level
-	  parts of Econet that runs over ordinary Ethernet connections, on
-	  top of the UDP packet protocol, which in turn runs on top of the
-	  Internet protocol IP.
-
-	  If you say Y here, you can choose with the next two options whether
-	  to send Econet/AUN traffic over a UDP Ethernet connection or over
-	  a native Econet network card.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called econet.
-
-config ECONET_AUNUDP
-	bool "AUN over UDP"
-	depends on ECONET
-	help
-	  Say Y here if you want to send Econet/AUN traffic over a UDP
-	  connection (UDP is a packet based protocol that runs on top of the
-	  Internet protocol IP) using an ordinary Ethernet network card.
-
-config ECONET_NATIVE
-	bool "Native Econet"
-	depends on ECONET
-	help
-	  Say Y here if you have a native Econet network card installed in
-	  your computer.
diff --git a/net/econet/Makefile b/net/econet/Makefile
deleted file mode 100644
index 05fae8b..0000000
--- a/net/econet/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for Econet support code.
-#
-
-obj-$(CONFIG_ECONET) += econet.o
-
-econet-y := af_econet.o
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
deleted file mode 100644
index 71b5edc..0000000
--- a/net/econet/af_econet.c
+++ /dev/null
@@ -1,1172 +0,0 @@
-/*
- *	An implementation of the Acorn Econet and AUN protocols.
- *	Philip Blundell <philb@gnu.org>
- *
- *	This program is free software; you can redistribute it and/or
- *	modify it under the terms of the GNU General Public License
- *	as published by the Free Software Foundation; either version
- *	2 of the License, or (at your option) any later version.
- *
- */
-
-#define pr_fmt(fmt) fmt
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/route.h>
-#include <linux/inet.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <linux/wireless.h>
-#include <linux/skbuff.h>
-#include <linux/udp.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <net/sock.h>
-#include <net/inet_common.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <linux/if_ec.h>
-#include <net/udp.h>
-#include <net/ip.h>
-#include <linux/spinlock.h>
-#include <linux/rcupdate.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-
-#include <linux/uaccess.h>
-
-static const struct proto_ops econet_ops;
-static struct hlist_head econet_sklist;
-static DEFINE_SPINLOCK(econet_lock);
-static DEFINE_MUTEX(econet_mutex);
-
-/* Since there are only 256 possible network numbers (or fewer, depends
-   how you count) it makes sense to use a simple lookup table. */
-static struct net_device *net2dev_map[256];
-
-#define EC_PORT_IP	0xd2
-
-#ifdef CONFIG_ECONET_AUNUDP
-static DEFINE_SPINLOCK(aun_queue_lock);
-static struct socket *udpsock;
-#define AUN_PORT	0x8000
-
-struct aunhdr {
-	unsigned char code;		/* AUN magic protocol byte */
-	unsigned char port;
-	unsigned char cb;
-	unsigned char pad;
-	unsigned long handle;
-};
-
-static unsigned long aun_seq;
-
-/* Queue of packets waiting to be transmitted. */
-static struct sk_buff_head aun_queue;
-static struct timer_list ab_cleanup_timer;
-
-#endif		/* CONFIG_ECONET_AUNUDP */
-
-/* Per-packet information */
-struct ec_cb {
-	struct sockaddr_ec sec;
-	unsigned long cookie;		/* Supplied by user. */
-#ifdef CONFIG_ECONET_AUNUDP
-	int done;
-	unsigned long seq;		/* Sequencing */
-	unsigned long timeout;		/* Timeout */
-	unsigned long start;		/* jiffies */
-#endif
-#ifdef CONFIG_ECONET_NATIVE
-	void (*sent)(struct sk_buff *, int result);
-#endif
-};
-
-static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
-{
-	spin_lock_bh(&econet_lock);
-	sk_del_node_init(sk);
-	spin_unlock_bh(&econet_lock);
-}
-
-static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
-{
-	spin_lock_bh(&econet_lock);
-	sk_add_node(sk, list);
-	spin_unlock_bh(&econet_lock);
-}
-
-/*
- *	Pull a packet from our receive queue and hand it to the user.
- *	If necessary we block.
- */
-
-static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
-			  struct msghdr *msg, size_t len, int flags)
-{
-	struct sock *sk = sock->sk;
-	struct sk_buff *skb;
-	size_t copied;
-	int err;
-
-	msg->msg_namelen = sizeof(struct sockaddr_ec);
-
-	mutex_lock(&econet_mutex);
-
-	/*
-	 *	Call the generic datagram receiver. This handles all sorts
-	 *	of horrible races and re-entrancy so we can forget about it
-	 *	in the protocol layers.
-	 *
-	 *	Now it will return ENETDOWN, if device have just gone down,
-	 *	but then it will block.
-	 */
-
-	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
-
-	/*
-	 *	An error occurred so return it. Because skb_recv_datagram()
-	 *	handles the blocking we don't see and worry about blocking
-	 *	retries.
-	 */
-
-	if (skb == NULL)
-		goto out;
-
-	/*
-	 *	You lose any data beyond the buffer you gave. If it worries a
-	 *	user program they can ask the device for its MTU anyway.
-	 */
-
-	copied = skb->len;
-	if (copied > len) {
-		copied = len;
-		msg->msg_flags |= MSG_TRUNC;
-	}
-
-	/* We can't use skb_copy_datagram here */
-	err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
-	if (err)
-		goto out_free;
-	sk->sk_stamp = skb->tstamp;
-
-	if (msg->msg_name)
-		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
-
-	/*
-	 *	Free or return the buffer as appropriate. Again this
-	 *	hides all the races and re-entrancy issues from us.
-	 */
-	err = copied;
-
-out_free:
-	skb_free_datagram(sk, skb);
-out:
-	mutex_unlock(&econet_mutex);
-	return err;
-}
-
-/*
- *	Bind an Econet socket.
- */
-
-static int econet_bind(struct socket *sock, struct sockaddr *uaddr,
-		       int addr_len)
-{
-	struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
-	struct sock *sk;
-	struct econet_sock *eo;
-
-	/*
-	 *	Check legality
-	 */
-
-	if (addr_len < sizeof(struct sockaddr_ec) ||
-	    sec->sec_family != AF_ECONET)
-		return -EINVAL;
-
-	mutex_lock(&econet_mutex);
-
-	sk = sock->sk;
-	eo = ec_sk(sk);
-
-	eo->cb	    = sec->cb;
-	eo->port    = sec->port;
-	eo->station = sec->addr.station;
-	eo->net	    = sec->addr.net;
-
-	mutex_unlock(&econet_mutex);
-
-	return 0;
-}
-
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
-/*
- *	Queue a transmit result for the user to be told about.
- */
-
-static void tx_result(struct sock *sk, unsigned long cookie, int result)
-{
-	struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
-	struct ec_cb *eb;
-	struct sockaddr_ec *sec;
-
-	if (skb == NULL) {
-		pr_debug("econet: memory squeeze, transmit result dropped\n");
-		return;
-	}
-
-	eb = (struct ec_cb *)&skb->cb;
-	sec = (struct sockaddr_ec *)&eb->sec;
-	memset(sec, 0, sizeof(struct sockaddr_ec));
-	sec->cookie = cookie;
-	sec->type = ECTYPE_TRANSMIT_STATUS | result;
-	sec->sec_family = AF_ECONET;
-
-	if (sock_queue_rcv_skb(sk, skb) < 0)
-		kfree_skb(skb);
-}
-#endif
-
-#ifdef CONFIG_ECONET_NATIVE
-/*
- *	Called by the Econet hardware driver when a packet transmit
- *	has completed.  Tell the user.
- */
-
-static void ec_tx_done(struct sk_buff *skb, int result)
-{
-	struct ec_cb *eb = (struct ec_cb *)&skb->cb;
-	tx_result(skb->sk, eb->cookie, result);
-}
-#endif
-
-/*
- *	Send a packet.  We have to work out which device it's going out on
- *	and hence whether to use real Econet or the UDP emulation.
- */
-
-static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
-			  struct msghdr *msg, size_t len)
-{
-	struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name;
-	struct net_device *dev;
-	struct ec_addr addr;
-	int err;
-	unsigned char port, cb;
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
-	struct sock *sk = sock->sk;
-	struct sk_buff *skb;
-	struct ec_cb *eb;
-#endif
-#ifdef CONFIG_ECONET_AUNUDP
-	struct msghdr udpmsg;
-	struct iovec iov[2];
-	struct aunhdr ah;
-	struct sockaddr_in udpdest;
-	__kernel_size_t size;
-	mm_segment_t oldfs;
-	char *userbuf;
-#endif
-
-	/*
-	 *	Check the flags.
-	 */
-
-	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
-		return -EINVAL;
-
-	/*
-	 *	Get and verify the address.
-	 */
-
-	mutex_lock(&econet_mutex);
-
-	if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
-		mutex_unlock(&econet_mutex);
-		return -EINVAL;
-	}
-	addr.station = saddr->addr.station;
-	addr.net = saddr->addr.net;
-	port = saddr->port;
-	cb = saddr->cb;
-
-	/* Look for a device with the right network number. */
-	dev = net2dev_map[addr.net];
-
-	/* If not directly reachable, use some default */
-	if (dev == NULL) {
-		dev = net2dev_map[0];
-		/* No interfaces at all? */
-		if (dev == NULL) {
-			mutex_unlock(&econet_mutex);
-			return -ENETDOWN;
-		}
-	}
-
-	if (dev->type == ARPHRD_ECONET) {
-		/* Real hardware Econet.  We're not worthy etc. */
-#ifdef CONFIG_ECONET_NATIVE
-		unsigned short proto = 0;
-		int hlen, tlen;
-		int res;
-
-		if (len + 15 > dev->mtu) {
-			mutex_unlock(&econet_mutex);
-			return -EMSGSIZE;
-		}
-
-		dev_hold(dev);
-
-		hlen = LL_RESERVED_SPACE(dev);
-		tlen = dev->needed_tailroom;
-		skb = sock_alloc_send_skb(sk, len + hlen + tlen,
-					  msg->msg_flags & MSG_DONTWAIT, &err);
-		if (skb == NULL)
-			goto out_unlock;
-
-		skb_reserve(skb, hlen);
-		skb_reset_network_header(skb);
-
-		eb = (struct ec_cb *)&skb->cb;
-
-		eb->cookie = saddr->cookie;
-		eb->sec = *saddr;
-		eb->sent = ec_tx_done;
-
-		err = -EINVAL;
-		res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len);
-		if (res < 0)
-			goto out_free;
-		if (res > 0) {
-			struct ec_framehdr *fh;
-			/* Poke in our control byte and
-			   port number.  Hack, hack.  */
-			fh = (struct ec_framehdr *)skb->data;
-			fh->cb = cb;
-			fh->port = port;
-			if (sock->type != SOCK_DGRAM) {
-				skb_reset_tail_pointer(skb);
-				skb->len = 0;
-			}
-		}
-
-		/* Copy the data. Returns -EFAULT on error */
-		err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
-		skb->protocol = proto;
-		skb->dev = dev;
-		skb->priority = sk->sk_priority;
-		if (err)
-			goto out_free;
-
-		err = -ENETDOWN;
-		if (!(dev->flags & IFF_UP))
-			goto out_free;
-
-		/*
-		 *	Now send it
-		 */
-
-		dev_queue_xmit(skb);
-		dev_put(dev);
-		mutex_unlock(&econet_mutex);
-		return len;
-
-out_free:
-		kfree_skb(skb);
-out_unlock:
-		if (dev)
-			dev_put(dev);
-#else
-		err = -EPROTOTYPE;
-#endif
-		mutex_unlock(&econet_mutex);
-
-		return err;
-	}
-
-#ifdef CONFIG_ECONET_AUNUDP
-	/* AUN virtual Econet. */
-
-	if (udpsock == NULL) {
-		mutex_unlock(&econet_mutex);
-		return -ENETDOWN;		/* No socket - can't send */
-	}
-
-	if (len > 32768) {
-		err = -E2BIG;
-		goto error;
-	}
-
-	/* Make up a UDP datagram and hand it off to some higher intellect. */
-
-	memset(&udpdest, 0, sizeof(udpdest));
-	udpdest.sin_family = AF_INET;
-	udpdest.sin_port = htons(AUN_PORT);
-
-	/* At the moment we use the stupid Acorn scheme of Econet address
-	   y.x maps to IP a.b.c.x.  This should be replaced with something
-	   more flexible and more aware of subnet masks.  */
-	{
-		struct in_device *idev;
-		unsigned long network = 0;
-
-		rcu_read_lock();
-		idev = __in_dev_get_rcu(dev);
-		if (idev) {
-			if (idev->ifa_list)
-				network = ntohl(idev->ifa_list->ifa_address) &
-					0xffffff00;		/* !!! */
-		}
-		rcu_read_unlock();
-		udpdest.sin_addr.s_addr = htonl(network | addr.station);
-	}
-
-	memset(&ah, 0, sizeof(ah));
-	ah.port = port;
-	ah.cb = cb & 0x7f;
-	ah.code = 2;		/* magic */
-
-	/* tack our header on the front of the iovec */
-	size = sizeof(struct aunhdr);
-	iov[0].iov_base = (void *)&ah;
-	iov[0].iov_len = size;
-
-	userbuf = vmalloc(len);
-	if (userbuf == NULL) {
-		err = -ENOMEM;
-		goto error;
-	}
-
-	iov[1].iov_base = userbuf;
-	iov[1].iov_len = len;
-	err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
-	if (err)
-		goto error_free_buf;
-
-	/* Get a skbuff (no data, just holds our cb information) */
-	skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err);
-	if (skb == NULL)
-		goto error_free_buf;
-
-	eb = (struct ec_cb *)&skb->cb;
-
-	eb->cookie = saddr->cookie;
-	eb->timeout = 5 * HZ;
-	eb->start = jiffies;
-	ah.handle = aun_seq;
-	eb->seq = (aun_seq++);
-	eb->sec = *saddr;
-
-	skb_queue_tail(&aun_queue, skb);
-
-	udpmsg.msg_name = (void *)&udpdest;
-	udpmsg.msg_namelen = sizeof(udpdest);
-	udpmsg.msg_iov = &iov[0];
-	udpmsg.msg_iovlen = 2;
-	udpmsg.msg_control = NULL;
-	udpmsg.msg_controllen = 0;
-	udpmsg.msg_flags = 0;
-
-	oldfs = get_fs();
-	set_fs(KERNEL_DS);		/* More privs :-) */
-	err = sock_sendmsg(udpsock, &udpmsg, size);
-	set_fs(oldfs);
-
-error_free_buf:
-	vfree(userbuf);
-error:
-#else
-	err = -EPROTOTYPE;
-#endif
-	mutex_unlock(&econet_mutex);
-
-	return err;
-}
-
-/*
- *	Look up the address of a socket.
- */
-
-static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
-			  int *uaddr_len, int peer)
-{
-	struct sock *sk;
-	struct econet_sock *eo;
-	struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
-
-	if (peer)
-		return -EOPNOTSUPP;
-
-	memset(sec, 0, sizeof(*sec));
-	mutex_lock(&econet_mutex);
-
-	sk = sock->sk;
-	eo = ec_sk(sk);
-
-	sec->sec_family	  = AF_ECONET;
-	sec->port	  = eo->port;
-	sec->addr.station = eo->station;
-	sec->addr.net	  = eo->net;
-
-	mutex_unlock(&econet_mutex);
-
-	*uaddr_len = sizeof(*sec);
-	return 0;
-}
-
-static void econet_destroy_timer(unsigned long data)
-{
-	struct sock *sk = (struct sock *)data;
-
-	if (!sk_has_allocations(sk)) {
-		sk_free(sk);
-		return;
-	}
-
-	sk->sk_timer.expires = jiffies + 10 * HZ;
-	add_timer(&sk->sk_timer);
-	pr_debug("econet: socket destroy delayed\n");
-}
-
-/*
- *	Close an econet socket.
- */
-
-static int econet_release(struct socket *sock)
-{
-	struct sock *sk;
-
-	mutex_lock(&econet_mutex);
-
-	sk = sock->sk;
-	if (!sk)
-		goto out_unlock;
-
-	econet_remove_socket(&econet_sklist, sk);
-
-	/*
-	 *	Now the socket is dead. No more input will appear.
-	 */
-
-	sk->sk_state_change(sk);	/* It is useless. Just for sanity. */
-
-	sock_orphan(sk);
-
-	/* Purge queues */
-
-	skb_queue_purge(&sk->sk_receive_queue);
-
-	if (sk_has_allocations(sk)) {
-		sk->sk_timer.data     = (unsigned long)sk;
-		sk->sk_timer.expires  = jiffies + HZ;
-		sk->sk_timer.function = econet_destroy_timer;
-		add_timer(&sk->sk_timer);
-
-		goto out_unlock;
-	}
-
-	sk_free(sk);
-
-out_unlock:
-	mutex_unlock(&econet_mutex);
-	return 0;
-}
-
-static struct proto econet_proto = {
-	.name	  = "ECONET",
-	.owner	  = THIS_MODULE,
-	.obj_size = sizeof(struct econet_sock),
-};
-
-/*
- *	Create an Econet socket
- */
-
-static int econet_create(struct net *net, struct socket *sock, int protocol,
-			 int kern)
-{
-	struct sock *sk;
-	struct econet_sock *eo;
-	int err;
-
-	if (!net_eq(net, &init_net))
-		return -EAFNOSUPPORT;
-
-	/* Econet only provides datagram services. */
-	if (sock->type != SOCK_DGRAM)
-		return -ESOCKTNOSUPPORT;
-
-	sock->state = SS_UNCONNECTED;
-
-	err = -ENOBUFS;
-	sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
-	if (sk == NULL)
-		goto out;
-
-	sk->sk_reuse = 1;
-	sock->ops = &econet_ops;
-	sock_init_data(sock, sk);
-
-	eo = ec_sk(sk);
-	sock_reset_flag(sk, SOCK_ZAPPED);
-	sk->sk_family = PF_ECONET;
-	eo->num = protocol;
-
-	econet_insert_socket(&econet_sklist, sk);
-	return 0;
-out:
-	return err;
-}
-
-/*
- *	Handle Econet specific ioctls
- */
-
-static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
-{
-	struct ifreq ifr;
-	struct ec_device *edev;
-	struct net_device *dev;
-	struct sockaddr_ec *sec;
-	int err;
-
-	/*
-	 *	Fetch the caller's info block into kernel space
-	 */
-
-	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-		return -EFAULT;
-
-	dev = dev_get_by_name(&init_net, ifr.ifr_name);
-	if (dev == NULL)
-		return -ENODEV;
-
-	sec = (struct sockaddr_ec *)&ifr.ifr_addr;
-
-	mutex_lock(&econet_mutex);
-
-	err = 0;
-	switch (cmd) {
-	case SIOCSIFADDR:
-		if (!capable(CAP_NET_ADMIN)) {
-			err = -EPERM;
-			break;
-		}
-
-		edev = dev->ec_ptr;
-		if (edev == NULL) {
-			/* Magic up a new one. */
-			edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
-			if (edev == NULL) {
-				err = -ENOMEM;
-				break;
-			}
-			dev->ec_ptr = edev;
-		} else
-			net2dev_map[edev->net] = NULL;
-		edev->station = sec->addr.station;
-		edev->net = sec->addr.net;
-		net2dev_map[sec->addr.net] = dev;
-		if (!net2dev_map[0])
-			net2dev_map[0] = dev;
-		break;
-
-	case SIOCGIFADDR:
-		edev = dev->ec_ptr;
-		if (edev == NULL) {
-			err = -ENODEV;
-			break;
-		}
-		memset(sec, 0, sizeof(struct sockaddr_ec));
-		sec->addr.station = edev->station;
-		sec->addr.net = edev->net;
-		sec->sec_family = AF_ECONET;
-		dev_put(dev);
-		if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
-			err = -EFAULT;
-		break;
-
-	default:
-		err = -EINVAL;
-		break;
-	}
-
-	mutex_unlock(&econet_mutex);
-
-	dev_put(dev);
-
-	return err;
-}
-
-/*
- *	Handle generic ioctls
- */
-
-static int econet_ioctl(struct socket *sock, unsigned int cmd,
-			unsigned long arg)
-{
-	struct sock *sk = sock->sk;
-	void __user *argp = (void __user *)arg;
-
-	switch (cmd) {
-	case SIOCGSTAMP:
-		return sock_get_timestamp(sk, argp);
-
-	case SIOCGSTAMPNS:
-		return sock_get_timestampns(sk, argp);
-
-	case SIOCSIFADDR:
-	case SIOCGIFADDR:
-		return ec_dev_ioctl(sock, cmd, argp);
-
-	}
-
-	return -ENOIOCTLCMD;
-}
-
-static const struct net_proto_family econet_family_ops = {
-	.family =	PF_ECONET,
-	.create =	econet_create,
-	.owner	=	THIS_MODULE,
-};
-
-static const struct proto_ops econet_ops = {
-	.family =	PF_ECONET,
-	.owner =	THIS_MODULE,
-	.release =	econet_release,
-	.bind =		econet_bind,
-	.connect =	sock_no_connect,
-	.socketpair =	sock_no_socketpair,
-	.accept =	sock_no_accept,
-	.getname =	econet_getname,
-	.poll =		datagram_poll,
-	.ioctl =	econet_ioctl,
-	.listen =	sock_no_listen,
-	.shutdown =	sock_no_shutdown,
-	.setsockopt =	sock_no_setsockopt,
-	.getsockopt =	sock_no_getsockopt,
-	.sendmsg =	econet_sendmsg,
-	.recvmsg =	econet_recvmsg,
-	.mmap =		sock_no_mmap,
-	.sendpage =	sock_no_sendpage,
-};
-
-#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
-/*
- *	Find the listening socket, if any, for the given data.
- */
-
-static struct sock *ec_listening_socket(unsigned char port, unsigned char
-				 station, unsigned char net)
-{
-	struct sock *sk;
-	struct hlist_node *node;
-
-	spin_lock(&econet_lock);
-	sk_for_each(sk, node, &econet_sklist) {
-		struct econet_sock *opt = ec_sk(sk);
-		if ((opt->port == port || opt->port == 0) &&
-		    (opt->station == station || opt->station == 0) &&
-		    (opt->net == net || opt->net == 0)) {
-			sock_hold(sk);
-			goto found;
-		}
-	}
-	sk = NULL;
-found:
-	spin_unlock(&econet_lock);
-	return sk;
-}
-
-/*
- *	Queue a received packet for a socket.
- */
-
-static int ec_queue_packet(struct sock *sk, struct sk_buff *skb,
-			   unsigned char stn, unsigned char net,
-			   unsigned char cb, unsigned char port)
-{
-	struct ec_cb *eb = (struct ec_cb *)&skb->cb;
-	struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec;
-
-	memset(sec, 0, sizeof(struct sockaddr_ec));
-	sec->sec_family = AF_ECONET;
-	sec->type = ECTYPE_PACKET_RECEIVED;
-	sec->port = port;
-	sec->cb = cb;
-	sec->addr.net = net;
-	sec->addr.station = stn;
-
-	return sock_queue_rcv_skb(sk, skb);
-}
-#endif
-
-#ifdef CONFIG_ECONET_AUNUDP
-/*
- *	Send an AUN protocol response.
- */
-
-static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
-{
-	struct sockaddr_in sin = {
-		.sin_family = AF_INET,
-		.sin_port = htons(AUN_PORT),
-		.sin_addr = {.s_addr = addr}
-	};
-	struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
-	struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
-	struct msghdr udpmsg;
-
-	udpmsg.msg_name = (void *)&sin;
-	udpmsg.msg_namelen = sizeof(sin);
-	udpmsg.msg_control = NULL;
-	udpmsg.msg_controllen = 0;
-	udpmsg.msg_flags = 0;
-
-	kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
-}
-
-
-/*
- *	Handle incoming AUN packets.  Work out if anybody wants them,
- *	and send positive or negative acknowledgements as appropriate.
- */
-
-static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
-{
-	struct iphdr *ip = ip_hdr(skb);
-	unsigned char stn = ntohl(ip->saddr) & 0xff;
-	struct dst_entry *dst = skb_dst(skb);
-	struct ec_device *edev = NULL;
-	struct sock *sk = NULL;
-	struct sk_buff *newskb;
-
-	if (dst)
-		edev = dst->dev->ec_ptr;
-
-	if (!edev)
-		goto bad;
-
-	sk = ec_listening_socket(ah->port, stn, edev->net);
-	if (sk == NULL)
-		goto bad;		/* Nobody wants it */
-
-	newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
-			   GFP_ATOMIC);
-	if (newskb == NULL) {
-		pr_debug("AUN: memory squeeze, dropping packet\n");
-		/* Send nack and hope sender tries again */
-		goto bad;
-	}
-
-	memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1),
-	       len - sizeof(struct aunhdr));
-
-	if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) {
-		/* Socket is bankrupt. */
-		kfree_skb(newskb);
-		goto bad;
-	}
-
-	aun_send_response(ip->saddr, ah->handle, 3, 0);
-	sock_put(sk);
-	return;
-
-bad:
-	aun_send_response(ip->saddr, ah->handle, 4, 0);
-	if (sk)
-		sock_put(sk);
-}
-
-/*
- *	Handle incoming AUN transmit acknowledgements.  If the sequence
- *      number matches something in our backlog then kill it and tell
- *	the user.  If the remote took too long to reply then we may have
- *	dropped the packet already.
- */
-
-static void aun_tx_ack(unsigned long seq, int result)
-{
-	struct sk_buff *skb;
-	unsigned long flags;
-	struct ec_cb *eb;
-
-	spin_lock_irqsave(&aun_queue_lock, flags);
-	skb_queue_walk(&aun_queue, skb) {
-		eb = (struct ec_cb *)&skb->cb;
-		if (eb->seq == seq)
-			goto foundit;
-	}
-	spin_unlock_irqrestore(&aun_queue_lock, flags);
-	pr_debug("AUN: unknown sequence %ld\n", seq);
-	return;
-
-foundit:
-	tx_result(skb->sk, eb->cookie, result);
-	skb_unlink(skb, &aun_queue);
-	spin_unlock_irqrestore(&aun_queue_lock, flags);
-	kfree_skb(skb);
-}
-
-/*
- *	Deal with received AUN frames - sort out what type of thing it is
- *	and hand it to the right function.
- */
-
-static void aun_data_available(struct sock *sk, int slen)
-{
-	int err;
-	struct sk_buff *skb;
-	unsigned char *data;
-	struct aunhdr *ah;
-	size_t len;
-
-	while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
-		if (err == -EAGAIN) {
-			pr_err("AUN: no data available?!\n");
-			return;
-		}
-		pr_debug("AUN: recvfrom() error %d\n", -err);
-	}
-
-	data = skb_transport_header(skb) + sizeof(struct udphdr);
-	ah = (struct aunhdr *)data;
-	len = skb->len - sizeof(struct udphdr);
-
-	switch (ah->code) {
-	case 2:
-		aun_incoming(skb, ah, len);
-		break;
-	case 3:
-		aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_OK);
-		break;
-	case 4:
-		aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
-		break;
-	default:
-		pr_debug("AUN: unknown packet type: %d\n", data[0]);
-	}
-
-	skb_free_datagram(sk, skb);
-}
-
-/*
- *	Called by the timer to manage the AUN transmit queue.  If a packet
- *	was sent to a dead or nonexistent host then we will never get an
- *	acknowledgement back.  After a few seconds we need to spot this and
- *	drop the packet.
- */
-
-static void ab_cleanup(unsigned long h)
-{
-	struct sk_buff *skb, *n;
-	unsigned long flags;
-
-	spin_lock_irqsave(&aun_queue_lock, flags);
-	skb_queue_walk_safe(&aun_queue, skb, n) {
-		struct ec_cb *eb = (struct ec_cb *)&skb->cb;
-		if ((jiffies - eb->start) > eb->timeout) {
-			tx_result(skb->sk, eb->cookie,
-				  ECTYPE_TRANSMIT_NOT_PRESENT);
-			skb_unlink(skb, &aun_queue);
-			kfree_skb(skb);
-		}
-	}
-	spin_unlock_irqrestore(&aun_queue_lock, flags);
-
-	mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2));
-}
-
-static int __init aun_udp_initialise(void)
-{
-	int error;
-	struct sockaddr_in sin;
-
-	skb_queue_head_init(&aun_queue);
-	setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
-	ab_cleanup_timer.expires = jiffies + (HZ * 2);
-	add_timer(&ab_cleanup_timer);
-
-	memset(&sin, 0, sizeof(sin));
-	sin.sin_port = htons(AUN_PORT);
-
-	/* We can count ourselves lucky Acorn machines are too dim to
-	   speak IPv6. :-) */
-	error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock);
-	if (error < 0) {
-		pr_err("AUN: socket error %d\n", -error);
-		return error;
-	}
-
-	udpsock->sk->sk_reuse = 1;
-	udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
-						    from interrupts */
-
-	error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
-				   sizeof(sin));
-	if (error < 0) {
-		pr_err("AUN: bind error %d\n", -error);
-		goto release;
-	}
-
-	udpsock->sk->sk_data_ready = aun_data_available;
-
-	return 0;
-
-release:
-	sock_release(udpsock);
-	udpsock = NULL;
-	return error;
-}
-#endif
-
-#ifdef CONFIG_ECONET_NATIVE
-
-/*
- *	Receive an Econet frame from a device.
- */
-
-static int econet_rcv(struct sk_buff *skb, struct net_device *dev,
-		      struct packet_type *pt, struct net_device *orig_dev)
-{
-	struct ec_framehdr *hdr;
-	struct sock *sk = NULL;
-	struct ec_device *edev = dev->ec_ptr;
-
-	if (!net_eq(dev_net(dev), &init_net))
-		goto drop;
-
-	if (skb->pkt_type == PACKET_OTHERHOST)
-		goto drop;
-
-	if (!edev)
-		goto drop;
-
-	skb = skb_share_check(skb, GFP_ATOMIC);
-	if (skb == NULL)
-		return NET_RX_DROP;
-
-	if (!pskb_may_pull(skb, sizeof(struct ec_framehdr)))
-		goto drop;
-
-	hdr = (struct ec_framehdr *)skb->data;
-
-	/* First check for encapsulated IP */
-	if (hdr->port == EC_PORT_IP) {
-		skb->protocol = htons(ETH_P_IP);
-		skb_pull(skb, sizeof(struct ec_framehdr));
-		netif_rx(skb);
-		return NET_RX_SUCCESS;
-	}
-
-	sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net);
-	if (!sk)
-		goto drop;
-
-	if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
-			    hdr->port))
-		goto drop;
-	sock_put(sk);
-	return NET_RX_SUCCESS;
-
-drop:
-	if (sk)
-		sock_put(sk);
-	kfree_skb(skb);
-	return NET_RX_DROP;
-}
-
-static struct packet_type econet_packet_type __read_mostly = {
-	.type =	cpu_to_be16(ETH_P_ECONET),
-	.func =	econet_rcv,
-};
-
-static void econet_hw_initialise(void)
-{
-	dev_add_pack(&econet_packet_type);
-}
-
-#endif
-
-static int econet_notifier(struct notifier_block *this, unsigned long msg,
-			   void *data)
-{
-	struct net_device *dev = data;
-	struct ec_device *edev;
-
-	if (!net_eq(dev_net(dev), &init_net))
-		return NOTIFY_DONE;
-
-	switch (msg) {
-	case NETDEV_UNREGISTER:
-		/* A device has gone down - kill any data we hold for it. */
-		edev = dev->ec_ptr;
-		if (edev) {
-			if (net2dev_map[0] == dev)
-				net2dev_map[0] = NULL;
-			net2dev_map[edev->net] = NULL;
-			kfree(edev);
-			dev->ec_ptr = NULL;
-		}
-		break;
-	}
-
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block econet_netdev_notifier = {
-	.notifier_call = econet_notifier,
-};
-
-static void __exit econet_proto_exit(void)
-{
-#ifdef CONFIG_ECONET_AUNUDP
-	del_timer(&ab_cleanup_timer);
-	if (udpsock)
-		sock_release(udpsock);
-#endif
-	unregister_netdevice_notifier(&econet_netdev_notifier);
-#ifdef CONFIG_ECONET_NATIVE
-	dev_remove_pack(&econet_packet_type);
-#endif
-	sock_unregister(econet_family_ops.family);
-	proto_unregister(&econet_proto);
-}
-
-static int __init econet_proto_init(void)
-{
-	int err = proto_register(&econet_proto, 0);
-
-	if (err != 0)
-		goto out;
-	sock_register(&econet_family_ops);
-#ifdef CONFIG_ECONET_AUNUDP
-	aun_udp_initialise();
-#endif
-#ifdef CONFIG_ECONET_NATIVE
-	econet_hw_initialise();
-#endif
-	register_netdevice_notifier(&econet_netdev_notifier);
-out:
-	return err;
-}
-
-module_init(econet_proto_init);
-module_exit(econet_proto_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NETPROTO(PF_ECONET);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index bf10a31..36e5880 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -77,7 +77,7 @@
  */
 int eth_header(struct sk_buff *skb, struct net_device *dev,
 	       unsigned short type,
-	       const void *daddr, const void *saddr, unsigned len)
+	       const void *daddr, const void *saddr, unsigned int len)
 {
 	struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
 
@@ -164,7 +164,7 @@
 	eth = eth_hdr(skb);
 
 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
-		if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
+		if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
 			skb->pkt_type = PACKET_BROADCAST;
 		else
 			skb->pkt_type = PACKET_MULTICAST;
@@ -179,7 +179,8 @@
 	 */
 
 	else if (1 /*dev->flags&IFF_PROMISC */ ) {
-		if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr)))
+		if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+						      dev->dev_addr)))
 			skb->pkt_type = PACKET_OTHERHOST;
 	}
 
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 3685158..32eb417 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -196,7 +196,7 @@
 static void
 lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
 {
-	memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN);
+	memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
 	/* second bit-flip (Universe/Local) is done according RFC2464 */
 	ipaddr->s6_addr[8] ^= 0x02;
 }
@@ -221,7 +221,7 @@
 
 	if (lladdr)
 		lowpan_raw_dump_inline(__func__, "linklocal address",
-						lladdr,	IEEE802154_ALEN);
+						lladdr,	IEEE802154_ADDR_LEN);
 	if (prefcount > 0)
 		memcpy(ipaddr, prefix, prefcount);
 
@@ -371,7 +371,7 @@
 static int lowpan_header_create(struct sk_buff *skb,
 			   struct net_device *dev,
 			   unsigned short type, const void *_daddr,
-			   const void *_saddr, unsigned len)
+			   const void *_saddr, unsigned int len)
 {
 	u8 tmp, iphc0, iphc1, *hc06_ptr;
 	struct ipv6hdr *hdr;
@@ -650,6 +650,53 @@
 	kfree(entry);
 }
 
+static struct lowpan_fragment *
+lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
+{
+	struct lowpan_fragment *frame;
+
+	frame = kzalloc(sizeof(struct lowpan_fragment),
+			GFP_ATOMIC);
+	if (!frame)
+		goto frame_err;
+
+	INIT_LIST_HEAD(&frame->list);
+
+	frame->length = (iphc0 & 7) | (len << 3);
+	frame->tag = tag;
+
+	/* allocate buffer for frame assembling */
+	frame->skb = alloc_skb(frame->length +
+			       sizeof(struct ipv6hdr), GFP_ATOMIC);
+
+	if (!frame->skb)
+		goto skb_err;
+
+	frame->skb->priority = skb->priority;
+	frame->skb->dev = skb->dev;
+
+	/* reserve headroom for uncompressed ipv6 header */
+	skb_reserve(frame->skb, sizeof(struct ipv6hdr));
+	skb_put(frame->skb, frame->length);
+
+	init_timer(&frame->timer);
+	/* time out is the same as for ipv6 - 60 sec */
+	frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
+	frame->timer.data = (unsigned long)frame;
+	frame->timer.function = lowpan_fragment_timer_expired;
+
+	add_timer(&frame->timer);
+
+	list_add_tail(&frame->list, &lowpan_fragments);
+
+	return frame;
+
+skb_err:
+	kfree(frame);
+frame_err:
+	return NULL;
+}
+
 static int
 lowpan_process_data(struct sk_buff *skb)
 {
@@ -692,41 +739,9 @@
 
 		/* alloc new frame structure */
 		if (!found) {
-			frame = kzalloc(sizeof(struct lowpan_fragment),
-								GFP_ATOMIC);
+			frame = lowpan_alloc_new_frame(skb, iphc0, len, tag);
 			if (!frame)
 				goto unlock_and_drop;
-
-			INIT_LIST_HEAD(&frame->list);
-
-			frame->length = (iphc0 & 7) | (len << 3);
-			frame->tag = tag;
-
-			/* allocate buffer for frame assembling */
-			frame->skb = alloc_skb(frame->length +
-					sizeof(struct ipv6hdr), GFP_ATOMIC);
-
-			if (!frame->skb) {
-				kfree(frame);
-				goto unlock_and_drop;
-			}
-
-			frame->skb->priority = skb->priority;
-			frame->skb->dev = skb->dev;
-
-			/* reserve headroom for uncompressed ipv6 header */
-			skb_reserve(frame->skb, sizeof(struct ipv6hdr));
-			skb_put(frame->skb, frame->length);
-
-			init_timer(&frame->timer);
-			/* time out is the same as for ipv6 - 60 sec */
-			frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
-			frame->timer.data = (unsigned long)frame;
-			frame->timer.function = lowpan_fragment_timer_expired;
-
-			add_timer(&frame->timer);
-
-			list_add_tail(&frame->list, &lowpan_fragments);
 		}
 
 		if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
@@ -1044,6 +1059,24 @@
 	free_netdev(dev);
 }
 
+static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
+{
+	struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+	return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
+}
+
+static u16 lowpan_get_pan_id(const struct net_device *dev)
+{
+	struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+	return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
+}
+
+static u16 lowpan_get_short_addr(const struct net_device *dev)
+{
+	struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+	return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
+}
+
 static struct header_ops lowpan_header_ops = {
 	.create	= lowpan_header_create,
 };
@@ -1053,6 +1086,12 @@
 	.ndo_set_mac_address	= eth_mac_addr,
 };
 
+static struct ieee802154_mlme_ops lowpan_mlme = {
+	.get_pan_id = lowpan_get_pan_id,
+	.get_phy = lowpan_get_phy,
+	.get_short_addr = lowpan_get_short_addr,
+};
+
 static void lowpan_setup(struct net_device *dev)
 {
 	pr_debug("(%s)\n", __func__);
@@ -1070,6 +1109,7 @@
 
 	dev->netdev_ops		= &lowpan_netdev_ops;
 	dev->header_ops		= &lowpan_header_ops;
+	dev->ml_priv		= &lowpan_mlme;
 	dev->destructor		= lowpan_dev_free;
 }
 
@@ -1143,6 +1183,8 @@
 	list_add_tail(&entry->list, &lowpan_devices);
 	mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
 
+	spin_lock_init(&flist_lock);
+
 	register_netdevice(dev);
 
 	return 0;
@@ -1152,11 +1194,20 @@
 {
 	struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
 	struct net_device *real_dev = lowpan_dev->real_dev;
-	struct lowpan_dev_record *entry;
-	struct lowpan_dev_record *tmp;
+	struct lowpan_dev_record *entry, *tmp;
+	struct lowpan_fragment *frame, *tframe;
 
 	ASSERT_RTNL();
 
+	spin_lock(&flist_lock);
+	list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
+		del_timer(&frame->timer);
+		list_del(&frame->list);
+		dev_kfree_skb(frame->skb);
+		kfree(frame);
+	}
+	spin_unlock(&flist_lock);
+
 	mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
 	list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
 		if (entry->ldev == dev) {
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index aeff3f3..8c2251f 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -53,9 +53,6 @@
 #ifndef __6LOWPAN_H__
 #define __6LOWPAN_H__
 
-/* need to know address length to manipulate with it */
-#define IEEE802154_ALEN		8
-
 #define UIP_802154_SHORTADDR_LEN	2  /* compressed ipv6 address length */
 #define UIP_IPH_LEN			40 /* ipv6 fixed header size */
 #define UIP_PROTO_UDP			17 /* ipv6 next header value for UDP */
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 1b09eaa..6fbb2ad 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -44,8 +44,8 @@
 	struct ieee802154_addr src_addr;
 	struct ieee802154_addr dst_addr;
 
-	unsigned bound:1;
-	unsigned want_ack:1;
+	unsigned int bound:1;
+	unsigned int want_ack:1;
 };
 
 static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -206,7 +206,7 @@
 		struct msghdr *msg, size_t size)
 {
 	struct net_device *dev;
-	unsigned mtu;
+	unsigned int mtu;
 	struct sk_buff *skb;
 	struct dgram_sock *ro = dgram_sk(sk);
 	int hlen, tlen;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index adaf462..ca92587 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -63,15 +63,14 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
-			addr->hwaddr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+		    addr->hwaddr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
+		goto nla_put_failure;
 
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
@@ -92,14 +91,13 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
-	NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -119,20 +117,22 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	if (addr->addr_type == IEEE802154_ADDR_LONG)
-		NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
-				addr->hwaddr);
-	else
-		NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
-				addr->short_addr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr))
+		goto nla_put_failure;
+	if (addr->addr_type == IEEE802154_ADDR_LONG) {
+		if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+			    addr->hwaddr))
+			goto nla_put_failure;
+	} else {
+		if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
+				addr->short_addr))
+			goto nla_put_failure;
+	}
+	if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -151,13 +151,12 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -177,13 +176,13 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-	NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
-	NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -204,19 +203,17 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-	NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
-	NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
-
-	if (edl)
-		NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) ||
+	    (edl &&
+	     nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl)))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -235,13 +232,12 @@
 	if (!msg)
 		return -ENOBUFS;
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-			dev->dev_addr);
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
+		goto nla_put_failure;
 	return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
 
 nla_put_failure:
@@ -266,16 +262,16 @@
 	phy = ieee802154_mlme_ops(dev)->get_phy(dev);
 	BUG_ON(!phy);
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-	NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-
-	NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
-		dev->dev_addr);
-	NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
-		ieee802154_mlme_ops(dev)->get_short_addr(dev));
-	NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
-		ieee802154_mlme_ops(dev)->get_pan_id(dev));
+	if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+	    nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+	    nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+	    nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+		    dev->dev_addr) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR,
+			ieee802154_mlme_ops(dev)->get_short_addr(dev)) ||
+	    nla_put_u16(msg, IEEE802154_ATTR_PAN_ID,
+			ieee802154_mlme_ops(dev)->get_pan_id(dev)))
+		goto nla_put_failure;
 	wpan_phy_put(phy);
 	return genlmsg_end(msg, hdr);
 
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index c64a38d..eed2916 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -53,18 +53,18 @@
 		goto out;
 
 	mutex_lock(&phy->pib_lock);
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-
-	NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page);
-	NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel);
+	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
+	    nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
+		goto nla_put_failure;
 	for (i = 0; i < 32; i++) {
 		if (phy->channels_supported[i])
 			buf[pages++] = phy->channels_supported[i] | (i << 27);
 	}
-	if (pages)
-		NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
-				pages * sizeof(uint32_t), buf);
-
+	if (pages &&
+	    nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
+		    pages * sizeof(uint32_t), buf))
+		goto nla_put_failure;
 	mutex_unlock(&phy->pib_lock);
 	kfree(buf);
 	return genlmsg_end(msg, hdr);
@@ -179,6 +179,7 @@
 	const char *devname;
 	int rc = -ENOBUFS;
 	struct net_device *dev;
+	int type = __IEEE802154_DEV_INVALID;
 
 	pr_debug("%s\n", __func__);
 
@@ -221,7 +222,13 @@
 		goto nla_put_failure;
 	}
 
-	dev = phy->add_iface(phy, devname);
+	if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
+		type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
+		if (type >= __IEEE802154_DEV_MAX)
+			return -EINVAL;
+	}
+
+	dev = phy->add_iface(phy, devname, type);
 	if (IS_ERR(dev)) {
 		rc = PTR_ERR(dev);
 		goto nla_put_failure;
@@ -245,9 +252,9 @@
 			goto dev_unregister;
 	}
 
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+		goto nla_put_failure;
 	dev_put(dev);
 
 	wpan_phy_put(phy);
@@ -333,10 +340,9 @@
 
 	rtnl_unlock();
 
-
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
-	NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name);
-
+	if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
+	    nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
+		goto nla_put_failure;
 	wpan_phy_put(phy);
 
 	return ieee802154_nl_reply(msg, info);
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index f96bae8..50e8239 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -106,7 +106,7 @@
 		       size_t size)
 {
 	struct net_device *dev;
-	unsigned mtu;
+	unsigned int mtu;
 	struct sk_buff *skb;
 	int hlen, tlen;
 	int err;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index d183262..20f1cb5 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -262,8 +262,8 @@
 	bool "IP: ARP daemon support"
 	---help---
 	  The kernel maintains an internal cache which maps IP addresses to
-	  hardware addresses on the local network, so that Ethernet/Token Ring/
-	  etc. frames are sent to the proper address on the physical networking
+	  hardware addresses on the local network, so that Ethernet
+	  frames are sent to the proper address on the physical networking
 	  layer. Normally, kernel uses the ARP protocol to resolve these
 	  mappings.
 
@@ -312,7 +312,7 @@
 
 config INET_AH
 	tristate "IP: AH transformation"
-	select XFRM
+	select XFRM_ALGO
 	select CRYPTO
 	select CRYPTO_HMAC
 	select CRYPTO_MD5
@@ -324,7 +324,7 @@
 
 config INET_ESP
 	tristate "IP: ESP transformation"
-	select XFRM
+	select XFRM_ALGO
 	select CRYPTO
 	select CRYPTO_AUTHENC
 	select CRYPTO_HMAC
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 10e3751..c8f7aee 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -350,7 +350,7 @@
 	err = 0;
 	sk->sk_no_check = answer_no_check;
 	if (INET_PROTOSW_REUSE & answer_flags)
-		sk->sk_reuse = 1;
+		sk->sk_reuse = SK_CAN_REUSE;
 
 	inet = inet_sk(sk);
 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -541,7 +541,7 @@
 }
 EXPORT_SYMBOL(inet_bind);
 
-int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
 		       int addr_len, int flags)
 {
 	struct sock *sk = sock->sk;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index fd508b5..e8f2617 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -77,7 +77,7 @@
 
 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
 {
-	unsigned char * optptr = (unsigned char*)(iph+1);
+	unsigned char *optptr = (unsigned char *)(iph+1);
 	int  l = iph->ihl*4 - sizeof(struct iphdr);
 	int  optlen;
 
@@ -406,8 +406,8 @@
 			      ah->spi, IPPROTO_AH, AF_INET);
 	if (!x)
 		return;
-	printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
-	       ntohl(ah->spi), ntohl(iph->daddr));
+	pr_debug("pmtu discovery on SA AH/%08x/%08x\n",
+		 ntohl(ah->spi), ntohl(iph->daddr));
 	xfrm_state_put(x);
 }
 
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 18d9b81..cda37be 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -73,6 +73,8 @@
  *		Jesper D. Brouer:       Proxy ARP PVLAN RFC 3069 support.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/string.h>
@@ -89,7 +91,6 @@
 #include <linux/etherdevice.h>
 #include <linux/fddidevice.h>
 #include <linux/if_arp.h>
-#include <linux/trdevice.h>
 #include <linux/skbuff.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -193,9 +194,6 @@
 	case ARPHRD_IEEE802:
 		ip_eth_mc_map(addr, haddr);
 		return 0;
-	case ARPHRD_IEEE802_TR:
-		ip_tr_mc_map(addr, haddr);
-		return 0;
 	case ARPHRD_INFINIBAND:
 		ip_ib_mc_map(addr, dev->broadcast, haddr);
 		return 0;
@@ -364,8 +362,7 @@
 	probes -= neigh->parms->ucast_probes;
 	if (probes < 0) {
 		if (!(neigh->nud_state & NUD_VALID))
-			printk(KERN_DEBUG
-			       "trying to ucast probe in NUD_INVALID\n");
+			pr_debug("trying to ucast probe in NUD_INVALID\n");
 		dst_ha = neigh->ha;
 		read_lock_bh(&neigh->lock);
 	} else {
@@ -452,7 +449,7 @@
 {
 	switch (addr_hint) {
 	case RTN_LOCAL:
-		printk(KERN_DEBUG "ARP: arp called for own IP address\n");
+		pr_debug("arp called for own IP address\n");
 		memcpy(haddr, dev->dev_addr, dev->addr_len);
 		return 1;
 	case RTN_MULTICAST:
@@ -473,7 +470,7 @@
 	struct neighbour *n;
 
 	if (!skb_dst(skb)) {
-		printk(KERN_DEBUG "arp_find is called with dst==NULL\n");
+		pr_debug("arp_find is called with dst==NULL\n");
 		kfree_skb(skb);
 		return 1;
 	}
@@ -648,12 +645,6 @@
 		arp->ar_pro = htons(ETH_P_IP);
 		break;
 #endif
-#if IS_ENABLED(CONFIG_TR)
-	case ARPHRD_IEEE802_TR:
-		arp->ar_hrd = htons(ARPHRD_IEEE802);
-		arp->ar_pro = htons(ETH_P_IP);
-		break;
-#endif
 	}
 
 	arp->ar_hln = dev->addr_len;
@@ -751,11 +742,10 @@
 			goto out;
 		break;
 	case ARPHRD_ETHER:
-	case ARPHRD_IEEE802_TR:
 	case ARPHRD_FDDI:
 	case ARPHRD_IEEE802:
 		/*
-		 * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802
+		 * ETHERNET, and Fibre Channel (which are IEEE 802
 		 * devices, according to RFC 2625) devices will accept ARP
 		 * hardware types of either 1 (Ethernet) or 6 (IEEE 802.2).
 		 * This is the case also of FDDI, where the RFC 1390 says that
@@ -1059,7 +1049,7 @@
 	neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev);
 	err = PTR_ERR(neigh);
 	if (!IS_ERR(neigh)) {
-		unsigned state = NUD_STALE;
+		unsigned int state = NUD_STALE;
 		if (r->arp_flags & ATF_PERM)
 			state = NUD_PERMANENT;
 		err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
@@ -1071,7 +1061,7 @@
 	return err;
 }
 
-static unsigned arp_state_to_flags(struct neighbour *neigh)
+static unsigned int arp_state_to_flags(struct neighbour *neigh)
 {
 	if (neigh->nud_state&NUD_PERMANENT)
 		return ATF_PERM | ATF_COM;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 6e447ff..10e15a1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -217,8 +217,7 @@
 	WARN_ON(idev->ifa_list);
 	WARN_ON(idev->mc_list);
 #ifdef NET_REFCNT_DEBUG
-	printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
-	       idev, dev ? dev->name : "NIL");
+	pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
 #endif
 	dev_put(dev);
 	if (!idev->dead)
@@ -1125,7 +1124,7 @@
 	}
 }
 
-static inline bool inetdev_valid_mtu(unsigned mtu)
+static inline bool inetdev_valid_mtu(unsigned int mtu)
 {
 	return mtu >= 68;
 }
@@ -1174,7 +1173,7 @@
 
 	switch (event) {
 	case NETDEV_REGISTER:
-		printk(KERN_DEBUG "inetdev_event: bug\n");
+		pr_debug("%s: bug\n", __func__);
 		RCU_INIT_POINTER(dev->ip_ptr, NULL);
 		break;
 	case NETDEV_UP:
@@ -1266,17 +1265,15 @@
 	ifm->ifa_scope = ifa->ifa_scope;
 	ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
 
-	if (ifa->ifa_address)
-		NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address);
-
-	if (ifa->ifa_local)
-		NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local);
-
-	if (ifa->ifa_broadcast)
-		NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast);
-
-	if (ifa->ifa_label[0])
-		NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
+	if ((ifa->ifa_address &&
+	     nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
+	    (ifa->ifa_local &&
+	     nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
+	    (ifa->ifa_broadcast &&
+	     nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
+	    (ifa->ifa_label[0] &&
+	     nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -1587,7 +1584,6 @@
 static struct devinet_sysctl_table {
 	struct ctl_table_header *sysctl_header;
 	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
-	char *dev_name;
 } devinet_sysctl = {
 	.devinet_vars = {
 		DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
@@ -1629,16 +1625,7 @@
 {
 	int i;
 	struct devinet_sysctl_table *t;
-
-#define DEVINET_CTL_PATH_DEV	3
-
-	struct ctl_path devinet_ctl_path[] = {
-		{ .procname = "net",  },
-		{ .procname = "ipv4", },
-		{ .procname = "conf", },
-		{ /* to be set */ },
-		{ },
-	};
+	char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
 
 	t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
 	if (!t)
@@ -1650,27 +1637,15 @@
 		t->devinet_vars[i].extra2 = net;
 	}
 
-	/*
-	 * Make a copy of dev_name, because '.procname' is regarded as const
-	 * by sysctl and we wouldn't want anyone to change it under our feet
-	 * (see SIOCSIFNAME).
-	 */
-	t->dev_name = kstrdup(dev_name, GFP_KERNEL);
-	if (!t->dev_name)
-		goto free;
+	snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
 
-	devinet_ctl_path[DEVINET_CTL_PATH_DEV].procname = t->dev_name;
-
-	t->sysctl_header = register_net_sysctl_table(net, devinet_ctl_path,
-			t->devinet_vars);
+	t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
 	if (!t->sysctl_header)
-		goto free_procname;
+		goto free;
 
 	p->sysctl = t;
 	return 0;
 
-free_procname:
-	kfree(t->dev_name);
 free:
 	kfree(t);
 out:
@@ -1686,7 +1661,6 @@
 
 	cnf->sysctl = NULL;
 	unregister_net_sysctl_table(t->sysctl_header);
-	kfree(t->dev_name);
 	kfree(t);
 }
 
@@ -1716,12 +1690,6 @@
 	},
 	{ },
 };
-
-static __net_initdata struct ctl_path net_ipv4_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ },
-};
 #endif
 
 static __net_init int devinet_init_net(struct net *net)
@@ -1767,7 +1735,7 @@
 		goto err_reg_dflt;
 
 	err = -ENOMEM;
-	forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl);
+	forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
 	if (forw_hdr == NULL)
 		goto err_reg_ctl;
 	net->ipv4.forw_hdr = forw_hdr;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cbe3a68..3854411 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -136,13 +136,13 @@
  * Find address type as if only "dev" was present in the system. If
  * on_dev is NULL then all interfaces are taken into consideration.
  */
-static inline unsigned __inet_dev_addr_type(struct net *net,
-					    const struct net_device *dev,
-					    __be32 addr)
+static inline unsigned int __inet_dev_addr_type(struct net *net,
+						const struct net_device *dev,
+						__be32 addr)
 {
 	struct flowi4		fl4 = { .daddr = addr };
 	struct fib_result	res;
-	unsigned ret = RTN_BROADCAST;
+	unsigned int ret = RTN_BROADCAST;
 	struct fib_table *local_table;
 
 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
@@ -740,7 +740,7 @@
 #define BRD_OK		2
 #define BRD0_OK		4
 #define BRD1_OK		8
-	unsigned ok = 0;
+	unsigned int ok = 0;
 	int subnet = 0;		/* Primary network */
 	int gone = 1;		/* Address is missing */
 	int same_prefsrc = 0;	/* Another primary with same IP */
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 799fc79..2d043f7 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -221,15 +221,15 @@
 	frh->src_len = rule4->src_len;
 	frh->tos = rule4->tos;
 
-	if (rule4->dst_len)
-		NLA_PUT_BE32(skb, FRA_DST, rule4->dst);
-
-	if (rule4->src_len)
-		NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
-
+	if ((rule4->dst_len &&
+	     nla_put_be32(skb, FRA_DST, rule4->dst)) ||
+	    (rule4->src_len &&
+	     nla_put_be32(skb, FRA_SRC, rule4->src)))
+		goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-	if (rule4->tclassid)
-		NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
+	if (rule4->tclassid &&
+	    nla_put_u32(skb, FRA_FLOW, rule4->tclassid))
+		goto nla_put_failure;
 #endif
 	return 0;
 
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5063fa3..a8bdf740 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -931,33 +931,36 @@
 		rtm->rtm_table = tb_id;
 	else
 		rtm->rtm_table = RT_TABLE_COMPAT;
-	NLA_PUT_U32(skb, RTA_TABLE, tb_id);
+	if (nla_put_u32(skb, RTA_TABLE, tb_id))
+		goto nla_put_failure;
 	rtm->rtm_type = type;
 	rtm->rtm_flags = fi->fib_flags;
 	rtm->rtm_scope = fi->fib_scope;
 	rtm->rtm_protocol = fi->fib_protocol;
 
-	if (rtm->rtm_dst_len)
-		NLA_PUT_BE32(skb, RTA_DST, dst);
-
-	if (fi->fib_priority)
-		NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority);
-
+	if (rtm->rtm_dst_len &&
+	    nla_put_be32(skb, RTA_DST, dst))
+		goto nla_put_failure;
+	if (fi->fib_priority &&
+	    nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
+		goto nla_put_failure;
 	if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
 		goto nla_put_failure;
 
-	if (fi->fib_prefsrc)
-		NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc);
-
+	if (fi->fib_prefsrc &&
+	    nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
+		goto nla_put_failure;
 	if (fi->fib_nhs == 1) {
-		if (fi->fib_nh->nh_gw)
-			NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw);
-
-		if (fi->fib_nh->nh_oif)
-			NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
+		if (fi->fib_nh->nh_gw &&
+		    nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
+			goto nla_put_failure;
+		if (fi->fib_nh->nh_oif &&
+		    nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
+			goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-		if (fi->fib_nh[0].nh_tclassid)
-			NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
+		if (fi->fib_nh[0].nh_tclassid &&
+		    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
+			goto nla_put_failure;
 #endif
 	}
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -978,11 +981,13 @@
 			rtnh->rtnh_hops = nh->nh_weight - 1;
 			rtnh->rtnh_ifindex = nh->nh_oif;
 
-			if (nh->nh_gw)
-				NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
+			if (nh->nh_gw &&
+			    nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
+				goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-			if (nh->nh_tclassid)
-				NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
+			if (nh->nh_tclassid &&
+			    nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+				goto nla_put_failure;
 #endif
 			/* length of rtnetlink header + attributes */
 			rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index bce36f1..30b88d7 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1370,6 +1370,8 @@
 
 			if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
 				continue;
+			if (fi->fib_dead)
+				continue;
 			if (fa->fa_info->fib_scope < flp->flowi4_scope)
 				continue;
 			fib_alias_accessed(fa);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 2cb2bf8..c75efbd 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -713,11 +713,10 @@
 
 	if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
 	    inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
-		if (net_ratelimit())
-			pr_warn("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
-				&ip_hdr(skb)->saddr,
-				icmph->type, icmph->code,
-				&iph->daddr, skb->dev->name);
+		net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
+				     &ip_hdr(skb)->saddr,
+				     icmph->type, icmph->code,
+				     &iph->daddr, skb->dev->name);
 		goto out;
 	}
 
@@ -906,8 +905,7 @@
 static void icmp_address(struct sk_buff *skb)
 {
 #if 0
-	if (net_ratelimit())
-		printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
+	net_dbg_ratelimited("a guy asks for address mask. Who is it?\n");
 #endif
 }
 
@@ -943,10 +941,10 @@
 			    inet_ifa_match(ip_hdr(skb)->saddr, ifa))
 				break;
 		}
-		if (!ifa && net_ratelimit()) {
-			pr_info("Wrong address mask %pI4 from %s/%pI4\n",
-				mp, dev->name, &ip_hdr(skb)->saddr);
-		}
+		if (!ifa)
+			net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n",
+					     mp,
+					     dev->name, &ip_hdr(skb)->saddr);
 	}
 }
 
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5dfecfd..6699f23 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -344,10 +344,10 @@
 	pip->protocol = IPPROTO_IGMP;
 	pip->tot_len  = 0;	/* filled in later */
 	ip_select_ident(pip, &rt->dst, NULL);
-	((u8*)&pip[1])[0] = IPOPT_RA;
-	((u8*)&pip[1])[1] = 4;
-	((u8*)&pip[1])[2] = 0;
-	((u8*)&pip[1])[3] = 0;
+	((u8 *)&pip[1])[0] = IPOPT_RA;
+	((u8 *)&pip[1])[1] = 4;
+	((u8 *)&pip[1])[2] = 0;
+	((u8 *)&pip[1])[3] = 0;
 
 	skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
 	skb_put(skb, sizeof(*pig));
@@ -688,10 +688,10 @@
 	iph->saddr    = fl4.saddr;
 	iph->protocol = IPPROTO_IGMP;
 	ip_select_ident(iph, &rt->dst, NULL);
-	((u8*)&iph[1])[0] = IPOPT_RA;
-	((u8*)&iph[1])[1] = 4;
-	((u8*)&iph[1])[2] = 0;
-	((u8*)&iph[1])[3] = 0;
+	((u8 *)&iph[1])[0] = IPOPT_RA;
+	((u8 *)&iph[1])[1] = 4;
+	((u8 *)&iph[1])[2] = 0;
+	((u8 *)&iph[1])[3] = 0;
 
 	ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
 	ih->type = type;
@@ -774,7 +774,7 @@
 			if (psf->sf_count[MCAST_INCLUDE] ||
 			    pmc->sfcount[MCAST_EXCLUDE] !=
 			    psf->sf_count[MCAST_EXCLUDE])
-				continue;
+				break;
 			if (srcs[i] == psf->sf_inaddr) {
 				scount++;
 				break;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 19d66ce..95e61596 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -42,7 +42,8 @@
 
 void inet_get_local_port_range(int *low, int *high)
 {
-	unsigned seq;
+	unsigned int seq;
+
 	do {
 		seq = read_seqbegin(&sysctl_local_ports.lock);
 
@@ -53,7 +54,7 @@
 EXPORT_SYMBOL(inet_get_local_port_range);
 
 int inet_csk_bind_conflict(const struct sock *sk,
-			   const struct inet_bind_bucket *tb)
+			   const struct inet_bind_bucket *tb, bool relax)
 {
 	struct sock *sk2;
 	struct hlist_node *node;
@@ -79,6 +80,14 @@
 				    sk2_rcv_saddr == sk_rcv_saddr(sk))
 					break;
 			}
+			if (!relax && reuse && sk2->sk_reuse &&
+			    sk2->sk_state != TCP_LISTEN) {
+				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+
+				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
+				    sk2_rcv_saddr == sk_rcv_saddr(sk))
+					break;
+			}
 		}
 	}
 	return node != NULL;
@@ -122,12 +131,13 @@
 					    (tb->num_owners < smallest_size || smallest_size == -1)) {
 						smallest_size = tb->num_owners;
 						smallest_rover = rover;
-						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
 							snum = smallest_rover;
 							goto tb_found;
 						}
 					}
-					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
 						snum = rover;
 						goto tb_found;
 					}
@@ -172,18 +182,22 @@
 	goto tb_not_found;
 tb_found:
 	if (!hlist_empty(&tb->owners)) {
+		if (sk->sk_reuse == SK_FORCE_REUSE)
+			goto success;
+
 		if (tb->fastreuse > 0 &&
 		    sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
 		    smallest_size == -1) {
 			goto success;
 		} else {
 			ret = 1;
-			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
 				if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
 				    smallest_size != -1 && --attempts >= 0) {
 					spin_unlock(&head->lock);
 					goto again;
 				}
+
 				goto fail_unlock;
 			}
 		}
@@ -514,7 +528,7 @@
 
 	/* Normally all the openreqs are young and become mature
 	 * (i.e. converted to established socket) for first timeout.
-	 * If synack was not acknowledged for 3 seconds, it means
+	 * If synack was not acknowledged for 1 second, it means
 	 * one of the following things: synack was lost, ack was lost,
 	 * rtt is high or nobody planned to ack (i.e. synflood).
 	 * When server is a bit loaded, queue is populated with old
@@ -555,8 +569,7 @@
 				syn_ack_recalc(req, thresh, max_retries,
 					       queue->rskq_defer_accept,
 					       &expire, &resend);
-				if (req->rsk_ops->syn_ack_timeout)
-					req->rsk_ops->syn_ack_timeout(parent, req);
+				req->rsk_ops->syn_ack_timeout(parent, req);
 				if (!expire &&
 				    (!resend ||
 				     !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8d25a1c..46d1e71 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@
 			goto rtattr_failure;
 
 	if (icsk == NULL) {
-		r->idiag_rqueue = r->idiag_wqueue = 0;
+		handler->idiag_get_info(sk, r, NULL);
 		goto out;
 	}
 
@@ -999,12 +999,12 @@
 	return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
 }
 
-static struct sock_diag_handler inet_diag_handler = {
+static const struct sock_diag_handler inet_diag_handler = {
 	.family = AF_INET,
 	.dump = inet_diag_handler_dump,
 };
 
-static struct sock_diag_handler inet6_diag_handler = {
+static const struct sock_diag_handler inet6_diag_handler = {
 	.family = AF_INET6,
 	.dump = inet_diag_handler_dump,
 };
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 984ec65..7880af9 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -217,7 +217,7 @@
 }
 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
 
-struct sock * __inet_lookup_established(struct net *net,
+struct sock *__inet_lookup_established(struct net *net,
 				  struct inet_hashinfo *hashinfo,
 				  const __be32 saddr, const __be16 sport,
 				  const __be32 daddr, const u16 hnum,
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 89168c6..2784db3 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -89,8 +89,8 @@
 
 #ifdef SOCK_REFCNT_DEBUG
 	if (atomic_read(&tw->tw_refcnt) != 1) {
-		printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
-		       tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
+		pr_debug("%s timewait_sock %p refcnt=%d\n",
+			 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
 	}
 #endif
 	while (refcnt) {
@@ -263,7 +263,7 @@
 void inet_twdr_hangman(unsigned long data)
 {
 	struct inet_timewait_death_row *twdr;
-	int unsigned need_timer;
+	unsigned int need_timer;
 
 	twdr = (struct inet_timewait_death_row *)data;
 	spin_lock(&twdr->death_lock);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 29a07b6..e5c44fc 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -41,7 +41,7 @@
 
 static int ip_forward_finish(struct sk_buff *skb)
 {
-	struct ip_options * opt	= &(IPCB(skb)->opt);
+	struct ip_options *opt	= &(IPCB(skb)->opt);
 
 	IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
 
@@ -55,7 +55,7 @@
 {
 	struct iphdr *iph;	/* Our header */
 	struct rtable *rt;	/* Route we use */
-	struct ip_options * opt	= &(IPCB(skb)->opt);
+	struct ip_options *opt	= &(IPCB(skb)->opt);
 
 	if (skb_warn_if_lro(skb))
 		goto drop;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 3727e23..9dbd3dd 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -148,17 +148,17 @@
 	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
 }
 
-static int ip4_frag_match(struct inet_frag_queue *q, void *a)
+static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
 {
 	struct ipq *qp;
 	struct ip4_create_arg *arg = a;
 
 	qp = container_of(q, struct ipq, q);
 	return	qp->id == arg->iph->id &&
-			qp->saddr == arg->iph->saddr &&
-			qp->daddr == arg->iph->daddr &&
-			qp->protocol == arg->iph->protocol &&
-			qp->user == arg->user;
+		qp->saddr == arg->iph->saddr &&
+		qp->daddr == arg->iph->daddr &&
+		qp->protocol == arg->iph->protocol &&
+		qp->user == arg->user;
 }
 
 /* Memory Tracking Functions. */
@@ -545,6 +545,7 @@
 	int len;
 	int ihlen;
 	int err;
+	int sum_truesize;
 	u8 ecn;
 
 	ipq_kill(qp);
@@ -569,7 +570,7 @@
 		skb_morph(head, qp->q.fragments);
 		head->next = qp->q.fragments->next;
 
-		kfree_skb(qp->q.fragments);
+		consume_skb(qp->q.fragments);
 		qp->q.fragments = head;
 	}
 
@@ -611,19 +612,32 @@
 		atomic_add(clone->truesize, &qp->q.net->mem);
 	}
 
-	skb_shinfo(head)->frag_list = head->next;
 	skb_push(head, head->data - skb_network_header(head));
 
-	for (fp=head->next; fp; fp = fp->next) {
-		head->data_len += fp->len;
-		head->len += fp->len;
+	sum_truesize = head->truesize;
+	for (fp = head->next; fp;) {
+		bool headstolen;
+		int delta;
+		struct sk_buff *next = fp->next;
+
+		sum_truesize += fp->truesize;
 		if (head->ip_summed != fp->ip_summed)
 			head->ip_summed = CHECKSUM_NONE;
 		else if (head->ip_summed == CHECKSUM_COMPLETE)
 			head->csum = csum_add(head->csum, fp->csum);
-		head->truesize += fp->truesize;
+
+		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+			kfree_skb_partial(fp, headstolen);
+		} else {
+			if (!skb_shinfo(head)->frag_list)
+				skb_shinfo(head)->frag_list = fp;
+			head->data_len += fp->len;
+			head->len += fp->len;
+			head->truesize += fp->truesize;
+		}
+		fp = next;
 	}
-	atomic_sub(head->truesize, &qp->q.net->mem);
+	atomic_sub(sum_truesize, &qp->q.net->mem);
 
 	head->next = NULL;
 	head->dev = dev;
@@ -644,8 +658,7 @@
 	err = -ENOMEM;
 	goto out_fail;
 out_oversize:
-	if (net_ratelimit())
-		pr_info("Oversized IP packet from %pI4\n", &qp->saddr);
+	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
 out_fail:
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
 	return err;
@@ -782,7 +795,7 @@
 		table[2].data = &net->ipv4.frags.timeout;
 	}
 
-	hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
+	hdr = register_net_sysctl(net, "net/ipv4", table);
 	if (hdr == NULL)
 		goto err_reg;
 
@@ -807,7 +820,7 @@
 
 static void ip4_frags_ctl_register(void)
 {
-	register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
+	register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
 }
 #else
 static inline int ip4_frags_ns_ctl_register(struct net *net)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b57532d..f49047b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -169,37 +169,56 @@
 
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_tstats {
-	unsigned long	rx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_packets;
-	unsigned long	tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+	struct u64_stats_sync	syncp;
+};
 
-static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
+						   struct rtnl_link_stats64 *tot)
 {
-	struct pcpu_tstats sum = { 0 };
 	int i;
 
 	for_each_possible_cpu(i) {
 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+		unsigned int start;
 
-		sum.rx_packets += tstats->rx_packets;
-		sum.rx_bytes   += tstats->rx_bytes;
-		sum.tx_packets += tstats->tx_packets;
-		sum.tx_bytes   += tstats->tx_bytes;
+		do {
+			start = u64_stats_fetch_begin_bh(&tstats->syncp);
+			rx_packets = tstats->rx_packets;
+			tx_packets = tstats->tx_packets;
+			rx_bytes = tstats->rx_bytes;
+			tx_bytes = tstats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+		tot->rx_packets += rx_packets;
+		tot->tx_packets += tx_packets;
+		tot->rx_bytes   += rx_bytes;
+		tot->tx_bytes   += tx_bytes;
 	}
-	dev->stats.rx_packets = sum.rx_packets;
-	dev->stats.rx_bytes   = sum.rx_bytes;
-	dev->stats.tx_packets = sum.tx_packets;
-	dev->stats.tx_bytes   = sum.tx_bytes;
-	return &dev->stats;
+
+	tot->multicast = dev->stats.multicast;
+	tot->rx_crc_errors = dev->stats.rx_crc_errors;
+	tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+	tot->rx_length_errors = dev->stats.rx_length_errors;
+	tot->rx_errors = dev->stats.rx_errors;
+	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+	tot->tx_dropped = dev->stats.tx_dropped;
+	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+	tot->tx_errors = dev->stats.tx_errors;
+
+	return tot;
 }
 
 /* Given src, dst and key, find appropriate for input tunnel. */
 
-static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
-					      __be32 remote, __be32 local,
-					      __be32 key, __be16 gre_proto)
+static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
+					     __be32 remote, __be32 local,
+					     __be32 key, __be16 gre_proto)
 {
 	struct net *net = dev_net(dev);
 	int link = dev->ifindex;
@@ -464,7 +483,7 @@
  */
 
 	const struct iphdr *iph = (const struct iphdr *)skb->data;
-	__be16	     *p = (__be16*)(skb->data+(iph->ihl<<2));
+	__be16	     *p = (__be16 *)(skb->data+(iph->ihl<<2));
 	int grehlen = (iph->ihl<<2) + 4;
 	const int type = icmp_hdr(skb)->type;
 	const int code = icmp_hdr(skb)->code;
@@ -574,7 +593,7 @@
 
 	iph = ip_hdr(skb);
 	h = skb->data;
-	flags = *(__be16*)h;
+	flags = *(__be16 *)h;
 
 	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
 		/* - Version must be 0.
@@ -598,11 +617,11 @@
 			offset += 4;
 		}
 		if (flags&GRE_KEY) {
-			key = *(__be32*)(h + offset);
+			key = *(__be32 *)(h + offset);
 			offset += 4;
 		}
 		if (flags&GRE_SEQ) {
-			seqno = ntohl(*(__be32*)(h + offset));
+			seqno = ntohl(*(__be32 *)(h + offset));
 			offset += 4;
 		}
 	}
@@ -672,8 +691,10 @@
 		}
 
 		tstats = this_cpu_ptr(tunnel->dev->tstats);
+		u64_stats_update_begin(&tstats->syncp);
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
+		u64_stats_update_end(&tstats->syncp);
 
 		__skb_tunnel_rx(skb, tunnel->dev);
 
@@ -900,7 +921,7 @@
 				   htons(ETH_P_TEB) : skb->protocol;
 
 	if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
-		__be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
+		__be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
 
 		if (tunnel->parms.o_flags&GRE_SEQ) {
 			++tunnel->o_seqno;
@@ -913,7 +934,7 @@
 		}
 		if (tunnel->parms.o_flags&GRE_CSUM) {
 			*ptr = 0;
-			*(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
+			*(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
 		}
 	}
 
@@ -1169,7 +1190,7 @@
 {
 	struct ip_tunnel *t = netdev_priv(dev);
 	struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
-	__be16 *p = (__be16*)(iph+1);
+	__be16 *p = (__be16 *)(iph+1);
 
 	memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
 	p[0]		= t->parms.o_flags;
@@ -1253,7 +1274,7 @@
 	.ndo_start_xmit		= ipgre_tunnel_xmit,
 	.ndo_do_ioctl		= ipgre_tunnel_ioctl,
 	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
-	.ndo_get_stats		= ipgre_get_stats,
+	.ndo_get_stats64	= ipgre_get_stats64,
 };
 
 static void ipgre_dev_free(struct net_device *dev)
@@ -1507,7 +1528,7 @@
 	.ndo_set_mac_address 	= eth_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_change_mtu		= ipgre_tunnel_change_mtu,
-	.ndo_get_stats		= ipgre_get_stats,
+	.ndo_get_stats64	= ipgre_get_stats64,
 };
 
 static void ipgre_tap_setup(struct net_device *dev)
@@ -1654,17 +1675,18 @@
 	struct ip_tunnel *t = netdev_priv(dev);
 	struct ip_tunnel_parm *p = &t->parms;
 
-	NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
-	NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
-	NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
-	NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
-	NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
-	NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
-	NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
-	NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
-	NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
-	NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
-
+	if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
+	    nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
+	    nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
+	    nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
+	    nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
+	    nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
+	    nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
+	    nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
+	    nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
+	    nla_put_u8(skb, IFLA_GRE_PMTUDISC,
+		       !!(p->iph.frag_off & htons(IP_DF))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26eccc5..8590144 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -210,9 +210,8 @@
 			int ret;
 
 			if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
-				if (net_ratelimit())
-					printk("%s: proto %d isn't netns-ready\n",
-						__func__, protocol);
+				net_info_ratelimited("%s: proto %d isn't netns-ready\n",
+						     __func__, protocol);
 				kfree_skb(skb);
 				goto out;
 			}
@@ -298,10 +297,10 @@
 
 		if (in_dev) {
 			if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
-				if (IN_DEV_LOG_MARTIANS(in_dev) &&
-				    net_ratelimit())
-					pr_info("source route option %pI4 -> %pI4\n",
-						&iph->saddr, &iph->daddr);
+				if (IN_DEV_LOG_MARTIANS(in_dev))
+					net_info_ratelimited("source route option %pI4 -> %pI4\n",
+							     &iph->saddr,
+							     &iph->daddr);
 				goto drop;
 			}
 		}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index a0d0d9d..708b994 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -210,10 +210,10 @@
  *	Simple and stupid 8), but the most efficient way.
  */
 
-void ip_options_fragment(struct sk_buff * skb)
+void ip_options_fragment(struct sk_buff *skb)
 {
 	unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
-	struct ip_options * opt = &(IPCB(skb)->opt);
+	struct ip_options *opt = &(IPCB(skb)->opt);
 	int  l = opt->optlen;
 	int  optlen;
 
@@ -248,13 +248,13 @@
  */
 
 int ip_options_compile(struct net *net,
-		       struct ip_options * opt, struct sk_buff * skb)
+		       struct ip_options *opt, struct sk_buff *skb)
 {
 	int l;
-	unsigned char * iph;
-	unsigned char * optptr;
+	unsigned char *iph;
+	unsigned char *optptr;
 	int optlen;
-	unsigned char * pp_ptr = NULL;
+	unsigned char *pp_ptr = NULL;
 	struct rtable *rt = NULL;
 
 	if (skb != NULL) {
@@ -413,7 +413,7 @@
 					opt->is_changed = 1;
 				}
 			} else {
-				unsigned overflow = optptr[3]>>4;
+				unsigned int overflow = optptr[3]>>4;
 				if (overflow == 15) {
 					pp_ptr = optptr + 3;
 					goto error;
@@ -473,20 +473,20 @@
  *	Undo all the changes done by ip_options_compile().
  */
 
-void ip_options_undo(struct ip_options * opt)
+void ip_options_undo(struct ip_options *opt)
 {
 	if (opt->srr) {
-		unsigned  char * optptr = opt->__data+opt->srr-sizeof(struct  iphdr);
+		unsigned  char *optptr = opt->__data+opt->srr-sizeof(struct  iphdr);
 		memmove(optptr+7, optptr+3, optptr[1]-7);
 		memcpy(optptr+3, &opt->faddr, 4);
 	}
 	if (opt->rr_needaddr) {
-		unsigned  char * optptr = opt->__data+opt->rr-sizeof(struct  iphdr);
+		unsigned  char *optptr = opt->__data+opt->rr-sizeof(struct  iphdr);
 		optptr[2] -= 4;
 		memset(&optptr[optptr[2]-1], 0, 4);
 	}
 	if (opt->ts) {
-		unsigned  char * optptr = opt->__data+opt->ts-sizeof(struct  iphdr);
+		unsigned  char *optptr = opt->__data+opt->ts-sizeof(struct  iphdr);
 		if (opt->ts_needtime) {
 			optptr[2] -= 4;
 			memset(&optptr[optptr[2]-1], 0, 4);
@@ -549,8 +549,8 @@
 
 void ip_forward_options(struct sk_buff *skb)
 {
-	struct   ip_options * opt	= &(IPCB(skb)->opt);
-	unsigned char * optptr;
+	struct   ip_options *opt	= &(IPCB(skb)->opt);
+	unsigned char *optptr;
 	struct rtable *rt = skb_rtable(skb);
 	unsigned char *raw = skb_network_header(skb);
 
@@ -578,8 +578,10 @@
 			ip_hdr(skb)->daddr = opt->nexthop;
 			ip_rt_get_source(&optptr[srrptr-1], skb, rt);
 			optptr[2] = srrptr+4;
-		} else if (net_ratelimit())
-			pr_crit("%s(): Argh! Destination lost!\n", __func__);
+		} else {
+			net_crit_ratelimited("%s(): Argh! Destination lost!\n",
+					     __func__);
+		}
 		if (opt->ts_needaddr) {
 			optptr = raw + opt->ts;
 			ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4910176..451f97c 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -214,8 +214,8 @@
 	}
 	rcu_read_unlock();
 
-	if (net_ratelimit())
-		printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
+	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
+			    __func__);
 	kfree_skb(skb);
 	return -EINVAL;
 }
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 2fd0fba..0d11f23 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -90,7 +90,7 @@
 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
 {
 	unsigned char optbuf[sizeof(struct ip_options) + 40];
-	struct ip_options * opt = (struct ip_options *)optbuf;
+	struct ip_options *opt = (struct ip_options *)optbuf;
 
 	if (IPCB(skb)->opt.optlen == 0)
 		return;
@@ -147,7 +147,7 @@
 void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
 {
 	struct inet_sock *inet = inet_sk(skb->sk);
-	unsigned flags = inet->cmsg_flags;
+	unsigned int flags = inet->cmsg_flags;
 
 	/* Ordered by supposed usage frequency */
 	if (flags & 1)
@@ -673,10 +673,15 @@
 				break;
 		} else {
 			memset(&mreq, 0, sizeof(mreq));
-			if (optlen >= sizeof(struct in_addr) &&
-			    copy_from_user(&mreq.imr_address, optval,
-					   sizeof(struct in_addr)))
-				break;
+			if (optlen >= sizeof(struct ip_mreq)) {
+				if (copy_from_user(&mreq, optval,
+						   sizeof(struct ip_mreq)))
+					break;
+			} else if (optlen >= sizeof(struct in_addr)) {
+				if (copy_from_user(&mreq.imr_address, optval,
+						   sizeof(struct in_addr)))
+					break;
+			}
 		}
 
 		if (!mreq.imr_ifindex) {
@@ -1094,7 +1099,7 @@
  */
 
 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
-			    char __user *optval, int __user *optlen, unsigned flags)
+			    char __user *optval, int __user *optlen, unsigned int flags)
 {
 	struct inet_sock *inet = inet_sk(sk);
 	int val;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 92ac7e7..67e8a6b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -808,8 +808,6 @@
 	b->op = BOOTP_REQUEST;
 	if (dev->type < 256) /* check for false types */
 		b->htype = dev->type;
-	else if (dev->type == ARPHRD_IEEE802_TR) /* fix for token ring */
-		b->htype = ARPHRD_IEEE802;
 	else if (dev->type == ARPHRD_FDDI)
 		b->htype = ARPHRD_ETHER;
 	else {
@@ -955,8 +953,7 @@
 
 	/* Fragments are not supported */
 	if (ip_is_fragment(h)) {
-		if (net_ratelimit())
-			pr_err("DHCP/BOOTP: Ignoring fragmented reply\n");
+		net_err_ratelimited("DHCP/BOOTP: Ignoring fragmented reply\n");
 		goto drop;
 	}
 
@@ -1004,16 +1001,14 @@
 	/* Is it a reply to our BOOTP request? */
 	if (b->op != BOOTP_REPLY ||
 	    b->xid != d->xid) {
-		if (net_ratelimit())
-			pr_err("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
-			       b->op, b->xid);
+		net_err_ratelimited("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n",
+				    b->op, b->xid);
 		goto drop_unlock;
 	}
 
 	/* Is it a reply for the device we are configuring? */
 	if (b->xid != ic_dev_xid) {
-		if (net_ratelimit())
-			pr_err("DHCP/BOOTP: Ignoring delayed packet\n");
+		net_err_ratelimited("DHCP/BOOTP: Ignoring delayed packet\n");
 		goto drop_unlock;
 	}
 
@@ -1198,7 +1193,7 @@
 	d = ic_first_dev;
 	retries = CONF_SEND_RETRIES;
 	get_random_bytes(&timeout, sizeof(timeout));
-	timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
+	timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM);
 	for (;;) {
 		/* Track the device we are configuring */
 		ic_dev_xid = d->xid;
@@ -1626,11 +1621,13 @@
 
 	return 1;
 }
+__setup("ip=", ip_auto_config_setup);
 
 static int __init nfsaddrs_config_setup(char *addrs)
 {
 	return ip_auto_config_setup(addrs);
 }
+__setup("nfsaddrs=", nfsaddrs_config_setup);
 
 static int __init vendor_class_identifier_setup(char *addrs)
 {
@@ -1641,7 +1638,4 @@
 			vendor_class_identifier);
 	return 1;
 }
-
-__setup("ip=", ip_auto_config_setup);
-__setup("nfsaddrs=", nfsaddrs_config_setup);
 __setup("dhcpclass=", vendor_class_identifier_setup);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ae1413e..2d0f99b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -144,33 +144,48 @@
 
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_tstats {
-	unsigned long	rx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_packets;
-	unsigned long	tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+	struct u64_stats_sync	syncp;
+};
 
-static struct net_device_stats *ipip_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
+						  struct rtnl_link_stats64 *tot)
 {
-	struct pcpu_tstats sum = { 0 };
 	int i;
 
 	for_each_possible_cpu(i) {
 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+		unsigned int start;
 
-		sum.rx_packets += tstats->rx_packets;
-		sum.rx_bytes   += tstats->rx_bytes;
-		sum.tx_packets += tstats->tx_packets;
-		sum.tx_bytes   += tstats->tx_bytes;
+		do {
+			start = u64_stats_fetch_begin_bh(&tstats->syncp);
+			rx_packets = tstats->rx_packets;
+			tx_packets = tstats->tx_packets;
+			rx_bytes = tstats->rx_bytes;
+			tx_bytes = tstats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+		tot->rx_packets += rx_packets;
+		tot->tx_packets += tx_packets;
+		tot->rx_bytes   += rx_bytes;
+		tot->tx_bytes   += tx_bytes;
 	}
-	dev->stats.rx_packets = sum.rx_packets;
-	dev->stats.rx_bytes   = sum.rx_bytes;
-	dev->stats.tx_packets = sum.tx_packets;
-	dev->stats.tx_bytes   = sum.tx_bytes;
-	return &dev->stats;
+
+	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+	tot->tx_dropped = dev->stats.tx_dropped;
+	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+	tot->tx_errors = dev->stats.tx_errors;
+	tot->collisions = dev->stats.collisions;
+
+	return tot;
 }
 
-static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
+static struct ip_tunnel *ipip_tunnel_lookup(struct net *net,
 		__be32 remote, __be32 local)
 {
 	unsigned int h0 = HASH(remote);
@@ -245,7 +260,7 @@
 	rcu_assign_pointer(*tp, t);
 }
 
-static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
+static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
 		struct ip_tunnel_parm *parms, int create)
 {
 	__be32 remote = parms->iph.daddr;
@@ -404,8 +419,10 @@
 		skb->pkt_type = PACKET_HOST;
 
 		tstats = this_cpu_ptr(tunnel->dev->tstats);
+		u64_stats_update_begin(&tstats->syncp);
 		tstats->rx_packets++;
 		tstats->rx_bytes += skb->len;
+		u64_stats_update_end(&tstats->syncp);
 
 		__skb_tunnel_rx(skb, tunnel->dev);
 
@@ -730,7 +747,7 @@
 	.ndo_start_xmit	= ipip_tunnel_xmit,
 	.ndo_do_ioctl	= ipip_tunnel_ioctl,
 	.ndo_change_mtu	= ipip_tunnel_change_mtu,
-	.ndo_get_stats  = ipip_get_stats,
+	.ndo_get_stats64 = ipip_get_stats64,
 };
 
 static void ipip_dev_free(struct net_device *dev)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 960fbfc3..a9e519a 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -949,8 +949,7 @@
 	ret = sock_queue_rcv_skb(mroute_sk, skb);
 	rcu_read_unlock();
 	if (ret < 0) {
-		if (net_ratelimit())
-			pr_warn("mroute: pending queue full, dropping entries\n");
+		net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
 		kfree_skb(skb);
 	}
 
@@ -2119,15 +2118,16 @@
 	rtm->rtm_src_len  = 32;
 	rtm->rtm_tos      = 0;
 	rtm->rtm_table    = mrt->id;
-	NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
+		goto nla_put_failure;
 	rtm->rtm_type     = RTN_MULTICAST;
 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
 	rtm->rtm_protocol = RTPROT_UNSPEC;
 	rtm->rtm_flags    = 0;
 
-	NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin);
-	NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp);
-
+	if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
+	    nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
+		goto nla_put_failure;
 	if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
 		goto nla_put_failure;
 
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 4f47e06..ed1b367 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -12,7 +12,7 @@
 #include <net/netfilter/nf_queue.h>
 
 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
-int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
+int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
 {
 	struct net *net = dev_net(skb_dst(skb)->dev);
 	const struct iphdr *iph = ip_hdr(skb);
@@ -237,13 +237,3 @@
 
 module_init(ipv4_netfilter_init);
 module_exit(ipv4_netfilter_fini);
-
-#ifdef CONFIG_SYSCTL
-struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ .procname = "netfilter", },
-	{ }
-};
-EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path);
-#endif /* CONFIG_SYSCTL */
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 240b684..c20674d 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -66,6 +66,3 @@
 
 # just filtering instance of ARP tables for now
 obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
-
-obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
-
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index fd7a3f6..97e61ea 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -221,9 +221,8 @@
 static unsigned int
 arpt_error(struct sk_buff *skb, const struct xt_action_param *par)
 {
-	if (net_ratelimit())
-		pr_err("arp_tables: error: '%s'\n",
-		       (const char *)par->targinfo);
+	net_err_ratelimited("arp_tables: error: '%s'\n",
+			    (const char *)par->targinfo);
 
 	return NF_DROP;
 }
@@ -303,7 +302,7 @@
 			if (v < 0) {
 				/* Pop from stack? */
 				if (v != XT_RETURN) {
-					verdict = (unsigned)(-v) - 1;
+					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
 				e = back;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
deleted file mode 100644
index 94d45e1..0000000
--- a/net/ipv4/netfilter/ip_queue.c
+++ /dev/null
@@ -1,639 +0,0 @@
-/*
- * This is a module which is used for queueing IPv4 packets and
- * communicating with userspace via netlink.
- *
- * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
- * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/ip.h>
-#include <linux/notifier.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_queue.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netlink.h>
-#include <linux/spinlock.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/security.h>
-#include <linux/net.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-#include <net/sock.h>
-#include <net/route.h>
-#include <net/netfilter/nf_queue.h>
-#include <net/ip.h>
-
-#define IPQ_QMAX_DEFAULT 1024
-#define IPQ_PROC_FS_NAME "ip_queue"
-#define NET_IPQ_QMAX 2088
-#define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
-
-typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
-
-static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
-static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_SPINLOCK(queue_lock);
-static int peer_pid __read_mostly;
-static unsigned int copy_range __read_mostly;
-static unsigned int queue_total;
-static unsigned int queue_dropped = 0;
-static unsigned int queue_user_dropped = 0;
-static struct sock *ipqnl __read_mostly;
-static LIST_HEAD(queue_list);
-static DEFINE_MUTEX(ipqnl_mutex);
-
-static inline void
-__ipq_enqueue_entry(struct nf_queue_entry *entry)
-{
-       list_add_tail(&entry->list, &queue_list);
-       queue_total++;
-}
-
-static inline int
-__ipq_set_mode(unsigned char mode, unsigned int range)
-{
-	int status = 0;
-
-	switch(mode) {
-	case IPQ_COPY_NONE:
-	case IPQ_COPY_META:
-		copy_mode = mode;
-		copy_range = 0;
-		break;
-
-	case IPQ_COPY_PACKET:
-		if (range > 0xFFFF)
-			range = 0xFFFF;
-		copy_range = range;
-		copy_mode = mode;
-		break;
-
-	default:
-		status = -EINVAL;
-
-	}
-	return status;
-}
-
-static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
-
-static inline void
-__ipq_reset(void)
-{
-	peer_pid = 0;
-	net_disable_timestamp();
-	__ipq_set_mode(IPQ_COPY_NONE, 0);
-	__ipq_flush(NULL, 0);
-}
-
-static struct nf_queue_entry *
-ipq_find_dequeue_entry(unsigned long id)
-{
-	struct nf_queue_entry *entry = NULL, *i;
-
-	spin_lock_bh(&queue_lock);
-
-	list_for_each_entry(i, &queue_list, list) {
-		if ((unsigned long)i == id) {
-			entry = i;
-			break;
-		}
-	}
-
-	if (entry) {
-		list_del(&entry->list);
-		queue_total--;
-	}
-
-	spin_unlock_bh(&queue_lock);
-	return entry;
-}
-
-static void
-__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
-{
-	struct nf_queue_entry *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &queue_list, list) {
-		if (!cmpfn || cmpfn(entry, data)) {
-			list_del(&entry->list);
-			queue_total--;
-			nf_reinject(entry, NF_DROP);
-		}
-	}
-}
-
-static void
-ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
-{
-	spin_lock_bh(&queue_lock);
-	__ipq_flush(cmpfn, data);
-	spin_unlock_bh(&queue_lock);
-}
-
-static struct sk_buff *
-ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
-{
-	sk_buff_data_t old_tail;
-	size_t size = 0;
-	size_t data_len = 0;
-	struct sk_buff *skb;
-	struct ipq_packet_msg *pmsg;
-	struct nlmsghdr *nlh;
-	struct timeval tv;
-
-	switch (ACCESS_ONCE(copy_mode)) {
-	case IPQ_COPY_META:
-	case IPQ_COPY_NONE:
-		size = NLMSG_SPACE(sizeof(*pmsg));
-		break;
-
-	case IPQ_COPY_PACKET:
-		if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
-		    (*errp = skb_checksum_help(entry->skb)))
-			return NULL;
-
-		data_len = ACCESS_ONCE(copy_range);
-		if (data_len == 0 || data_len > entry->skb->len)
-			data_len = entry->skb->len;
-
-		size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
-		break;
-
-	default:
-		*errp = -EINVAL;
-		return NULL;
-	}
-
-	skb = alloc_skb(size, GFP_ATOMIC);
-	if (!skb)
-		goto nlmsg_failure;
-
-	old_tail = skb->tail;
-	nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
-	pmsg = NLMSG_DATA(nlh);
-	memset(pmsg, 0, sizeof(*pmsg));
-
-	pmsg->packet_id       = (unsigned long )entry;
-	pmsg->data_len        = data_len;
-	tv = ktime_to_timeval(entry->skb->tstamp);
-	pmsg->timestamp_sec   = tv.tv_sec;
-	pmsg->timestamp_usec  = tv.tv_usec;
-	pmsg->mark            = entry->skb->mark;
-	pmsg->hook            = entry->hook;
-	pmsg->hw_protocol     = entry->skb->protocol;
-
-	if (entry->indev)
-		strcpy(pmsg->indev_name, entry->indev->name);
-	else
-		pmsg->indev_name[0] = '\0';
-
-	if (entry->outdev)
-		strcpy(pmsg->outdev_name, entry->outdev->name);
-	else
-		pmsg->outdev_name[0] = '\0';
-
-	if (entry->indev && entry->skb->dev &&
-	    entry->skb->mac_header != entry->skb->network_header) {
-		pmsg->hw_type = entry->skb->dev->type;
-		pmsg->hw_addrlen = dev_parse_header(entry->skb,
-						    pmsg->hw_addr);
-	}
-
-	if (data_len)
-		if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
-			BUG();
-
-	nlh->nlmsg_len = skb->tail - old_tail;
-	return skb;
-
-nlmsg_failure:
-	kfree_skb(skb);
-	*errp = -EINVAL;
-	printk(KERN_ERR "ip_queue: error creating packet message\n");
-	return NULL;
-}
-
-static int
-ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
-{
-	int status = -EINVAL;
-	struct sk_buff *nskb;
-
-	if (copy_mode == IPQ_COPY_NONE)
-		return -EAGAIN;
-
-	nskb = ipq_build_packet_message(entry, &status);
-	if (nskb == NULL)
-		return status;
-
-	spin_lock_bh(&queue_lock);
-
-	if (!peer_pid)
-		goto err_out_free_nskb;
-
-	if (queue_total >= queue_maxlen) {
-		queue_dropped++;
-		status = -ENOSPC;
-		if (net_ratelimit())
-			  printk (KERN_WARNING "ip_queue: full at %d entries, "
-				  "dropping packets(s). Dropped: %d\n", queue_total,
-				  queue_dropped);
-		goto err_out_free_nskb;
-	}
-
-	/* netlink_unicast will either free the nskb or attach it to a socket */
-	status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
-	if (status < 0) {
-		queue_user_dropped++;
-		goto err_out_unlock;
-	}
-
-	__ipq_enqueue_entry(entry);
-
-	spin_unlock_bh(&queue_lock);
-	return status;
-
-err_out_free_nskb:
-	kfree_skb(nskb);
-
-err_out_unlock:
-	spin_unlock_bh(&queue_lock);
-	return status;
-}
-
-static int
-ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
-{
-	int diff;
-	struct iphdr *user_iph = (struct iphdr *)v->payload;
-	struct sk_buff *nskb;
-
-	if (v->data_len < sizeof(*user_iph))
-		return 0;
-	diff = v->data_len - e->skb->len;
-	if (diff < 0) {
-		if (pskb_trim(e->skb, v->data_len))
-			return -ENOMEM;
-	} else if (diff > 0) {
-		if (v->data_len > 0xFFFF)
-			return -EINVAL;
-		if (diff > skb_tailroom(e->skb)) {
-			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
-					       diff, GFP_ATOMIC);
-			if (!nskb) {
-				printk(KERN_WARNING "ip_queue: error "
-				      "in mangle, dropping packet\n");
-				return -ENOMEM;
-			}
-			kfree_skb(e->skb);
-			e->skb = nskb;
-		}
-		skb_put(e->skb, diff);
-	}
-	if (!skb_make_writable(e->skb, v->data_len))
-		return -ENOMEM;
-	skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
-	e->skb->ip_summed = CHECKSUM_NONE;
-
-	return 0;
-}
-
-static int
-ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
-{
-	struct nf_queue_entry *entry;
-
-	if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
-		return -EINVAL;
-
-	entry = ipq_find_dequeue_entry(vmsg->id);
-	if (entry == NULL)
-		return -ENOENT;
-	else {
-		int verdict = vmsg->value;
-
-		if (vmsg->data_len && vmsg->data_len == len)
-			if (ipq_mangle_ipv4(vmsg, entry) < 0)
-				verdict = NF_DROP;
-
-		nf_reinject(entry, verdict);
-		return 0;
-	}
-}
-
-static int
-ipq_set_mode(unsigned char mode, unsigned int range)
-{
-	int status;
-
-	spin_lock_bh(&queue_lock);
-	status = __ipq_set_mode(mode, range);
-	spin_unlock_bh(&queue_lock);
-	return status;
-}
-
-static int
-ipq_receive_peer(struct ipq_peer_msg *pmsg,
-		 unsigned char type, unsigned int len)
-{
-	int status = 0;
-
-	if (len < sizeof(*pmsg))
-		return -EINVAL;
-
-	switch (type) {
-	case IPQM_MODE:
-		status = ipq_set_mode(pmsg->msg.mode.value,
-				      pmsg->msg.mode.range);
-		break;
-
-	case IPQM_VERDICT:
-		status = ipq_set_verdict(&pmsg->msg.verdict,
-					 len - sizeof(*pmsg));
-		break;
-	default:
-		status = -EINVAL;
-	}
-	return status;
-}
-
-static int
-dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
-{
-	if (entry->indev)
-		if (entry->indev->ifindex == ifindex)
-			return 1;
-	if (entry->outdev)
-		if (entry->outdev->ifindex == ifindex)
-			return 1;
-#ifdef CONFIG_BRIDGE_NETFILTER
-	if (entry->skb->nf_bridge) {
-		if (entry->skb->nf_bridge->physindev &&
-		    entry->skb->nf_bridge->physindev->ifindex == ifindex)
-			return 1;
-		if (entry->skb->nf_bridge->physoutdev &&
-		    entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
-			return 1;
-	}
-#endif
-	return 0;
-}
-
-static void
-ipq_dev_drop(int ifindex)
-{
-	ipq_flush(dev_cmp, ifindex);
-}
-
-#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
-
-static inline void
-__ipq_rcv_skb(struct sk_buff *skb)
-{
-	int status, type, pid, flags;
-	unsigned int nlmsglen, skblen;
-	struct nlmsghdr *nlh;
-	bool enable_timestamp = false;
-
-	skblen = skb->len;
-	if (skblen < sizeof(*nlh))
-		return;
-
-	nlh = nlmsg_hdr(skb);
-	nlmsglen = nlh->nlmsg_len;
-	if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
-		return;
-
-	pid = nlh->nlmsg_pid;
-	flags = nlh->nlmsg_flags;
-
-	if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
-		RCV_SKB_FAIL(-EINVAL);
-
-	if (flags & MSG_TRUNC)
-		RCV_SKB_FAIL(-ECOMM);
-
-	type = nlh->nlmsg_type;
-	if (type < NLMSG_NOOP || type >= IPQM_MAX)
-		RCV_SKB_FAIL(-EINVAL);
-
-	if (type <= IPQM_BASE)
-		return;
-
-	if (!capable(CAP_NET_ADMIN))
-		RCV_SKB_FAIL(-EPERM);
-
-	spin_lock_bh(&queue_lock);
-
-	if (peer_pid) {
-		if (peer_pid != pid) {
-			spin_unlock_bh(&queue_lock);
-			RCV_SKB_FAIL(-EBUSY);
-		}
-	} else {
-		enable_timestamp = true;
-		peer_pid = pid;
-	}
-
-	spin_unlock_bh(&queue_lock);
-	if (enable_timestamp)
-		net_enable_timestamp();
-	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
-				  nlmsglen - NLMSG_LENGTH(0));
-	if (status < 0)
-		RCV_SKB_FAIL(status);
-
-	if (flags & NLM_F_ACK)
-		netlink_ack(skb, nlh, 0);
-}
-
-static void
-ipq_rcv_skb(struct sk_buff *skb)
-{
-	mutex_lock(&ipqnl_mutex);
-	__ipq_rcv_skb(skb);
-	mutex_unlock(&ipqnl_mutex);
-}
-
-static int
-ipq_rcv_dev_event(struct notifier_block *this,
-		  unsigned long event, void *ptr)
-{
-	struct net_device *dev = ptr;
-
-	if (!net_eq(dev_net(dev), &init_net))
-		return NOTIFY_DONE;
-
-	/* Drop any packets associated with the downed device */
-	if (event == NETDEV_DOWN)
-		ipq_dev_drop(dev->ifindex);
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block ipq_dev_notifier = {
-	.notifier_call	= ipq_rcv_dev_event,
-};
-
-static int
-ipq_rcv_nl_event(struct notifier_block *this,
-		 unsigned long event, void *ptr)
-{
-	struct netlink_notify *n = ptr;
-
-	if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
-		spin_lock_bh(&queue_lock);
-		if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
-			__ipq_reset();
-		spin_unlock_bh(&queue_lock);
-	}
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block ipq_nl_notifier = {
-	.notifier_call	= ipq_rcv_nl_event,
-};
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *ipq_sysctl_header;
-
-static ctl_table ipq_table[] = {
-	{
-		.procname	= NET_IPQ_QMAX_NAME,
-		.data		= &queue_maxlen,
-		.maxlen		= sizeof(queue_maxlen),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec
-	},
-	{ }
-};
-#endif
-
-#ifdef CONFIG_PROC_FS
-static int ip_queue_show(struct seq_file *m, void *v)
-{
-	spin_lock_bh(&queue_lock);
-
-	seq_printf(m,
-		      "Peer PID          : %d\n"
-		      "Copy mode         : %hu\n"
-		      "Copy range        : %u\n"
-		      "Queue length      : %u\n"
-		      "Queue max. length : %u\n"
-		      "Queue dropped     : %u\n"
-		      "Netlink dropped   : %u\n",
-		      peer_pid,
-		      copy_mode,
-		      copy_range,
-		      queue_total,
-		      queue_maxlen,
-		      queue_dropped,
-		      queue_user_dropped);
-
-	spin_unlock_bh(&queue_lock);
-	return 0;
-}
-
-static int ip_queue_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, ip_queue_show, NULL);
-}
-
-static const struct file_operations ip_queue_proc_fops = {
-	.open		= ip_queue_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.owner		= THIS_MODULE,
-};
-#endif
-
-static const struct nf_queue_handler nfqh = {
-	.name	= "ip_queue",
-	.outfn	= &ipq_enqueue_packet,
-};
-
-static int __init ip_queue_init(void)
-{
-	int status = -ENOMEM;
-	struct proc_dir_entry *proc __maybe_unused;
-
-	netlink_register_notifier(&ipq_nl_notifier);
-	ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
-				      ipq_rcv_skb, NULL, THIS_MODULE);
-	if (ipqnl == NULL) {
-		printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
-		goto cleanup_netlink_notifier;
-	}
-
-#ifdef CONFIG_PROC_FS
-	proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
-			   &ip_queue_proc_fops);
-	if (!proc) {
-		printk(KERN_ERR "ip_queue: failed to create proc entry\n");
-		goto cleanup_ipqnl;
-	}
-#endif
-	register_netdevice_notifier(&ipq_dev_notifier);
-#ifdef CONFIG_SYSCTL
-	ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
-#endif
-	status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh);
-	if (status < 0) {
-		printk(KERN_ERR "ip_queue: failed to register queue handler\n");
-		goto cleanup_sysctl;
-	}
-	return status;
-
-cleanup_sysctl:
-#ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(ipq_sysctl_header);
-#endif
-	unregister_netdevice_notifier(&ipq_dev_notifier);
-	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
-cleanup_ipqnl: __maybe_unused
-	netlink_kernel_release(ipqnl);
-	mutex_lock(&ipqnl_mutex);
-	mutex_unlock(&ipqnl_mutex);
-
-cleanup_netlink_notifier:
-	netlink_unregister_notifier(&ipq_nl_notifier);
-	return status;
-}
-
-static void __exit ip_queue_fini(void)
-{
-	nf_unregister_queue_handlers(&nfqh);
-
-	ipq_flush(NULL, 0);
-
-#ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(ipq_sysctl_header);
-#endif
-	unregister_netdevice_notifier(&ipq_dev_notifier);
-	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
-
-	netlink_kernel_release(ipqnl);
-	mutex_lock(&ipqnl_mutex);
-	mutex_unlock(&ipqnl_mutex);
-
-	netlink_unregister_notifier(&ipq_nl_notifier);
-}
-
-MODULE_DESCRIPTION("IPv4 packet queue handler");
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL);
-
-module_init(ip_queue_init);
-module_exit(ip_queue_fini);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 24e556e..170b1fd 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -153,8 +153,7 @@
 static unsigned int
 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
 {
-	if (net_ratelimit())
-		pr_info("error: `%s'\n", (const char *)par->targinfo);
+	net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
 
 	return NF_DROP;
 }
@@ -377,7 +376,7 @@
 			if (v < 0) {
 				/* Pop from stack? */
 				if (v != XT_RETURN) {
-					verdict = (unsigned)(-v) - 1;
+					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
 				if (*stackptr <= origptr) {
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a639967..fe5daea 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -246,8 +246,7 @@
 			dport = ports[1];
 		}
 	} else {
-		if (net_ratelimit())
-			pr_info("unknown protocol %u\n", iph->protocol);
+		net_info_ratelimited("unknown protocol %u\n", iph->protocol);
 	}
 
 	switch (config->hash_mode) {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index cf73cc7..91747d4 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -311,8 +311,9 @@
 static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
 				const struct nf_conntrack_tuple *tuple)
 {
-	NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip);
-	NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip);
+	if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
+	    nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -364,7 +365,7 @@
 	.nla_policy	 = ipv4_nla_policy,
 #endif
 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
-	.ctl_table_path  = nf_net_ipv4_netfilter_sysctl_path,
+	.ctl_table_path  = "net/ipv4/netfilter",
 	.ctl_table	 = ip_ct_sysctl_table,
 #endif
 	.me		 = THIS_MODULE,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7cbe9cb..0847e37 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -228,10 +228,10 @@
 static int icmp_tuple_to_nlattr(struct sk_buff *skb,
 				const struct nf_conntrack_tuple *t)
 {
-	NLA_PUT_BE16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id);
-	NLA_PUT_U8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type);
-	NLA_PUT_U8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code);
-
+	if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
+	    nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
+	    nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -293,8 +293,8 @@
 {
 	const unsigned int *timeout = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ));
-
+	if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 8253670..cad29c1 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -42,9 +42,7 @@
 		if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
 					      addroff, sizeof(buf),
 					      (char *) &buf, sizeof(buf))) {
-			if (net_ratelimit())
-				pr_notice("nf_nat_h323: nf_nat_mangle_tcp_packet"
-				       " error\n");
+			net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_tcp_packet error\n");
 			return -1;
 		}
 
@@ -58,9 +56,7 @@
 		if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
 					      addroff, sizeof(buf),
 					      (char *) &buf, sizeof(buf))) {
-			if (net_ratelimit())
-				pr_notice("nf_nat_h323: nf_nat_mangle_udp_packet"
-				       " error\n");
+			net_notice_ratelimited("nf_nat_h323: nf_nat_mangle_udp_packet error\n");
 			return -1;
 		}
 		/* nf_nat_mangle_udp_packet uses skb_make_writable() to copy
@@ -214,8 +210,7 @@
 
 	/* Run out of expectations */
 	if (i >= H323_RTP_CHANNEL_MAX) {
-		if (net_ratelimit())
-			pr_notice("nf_nat_h323: out of expectations\n");
+		net_notice_ratelimited("nf_nat_h323: out of expectations\n");
 		return 0;
 	}
 
@@ -244,8 +239,7 @@
 	}
 
 	if (nated_port == 0) {	/* No port available */
-		if (net_ratelimit())
-			pr_notice("nf_nat_h323: out of RTP ports\n");
+		net_notice_ratelimited("nf_nat_h323: out of RTP ports\n");
 		return 0;
 	}
 
@@ -308,8 +302,7 @@
 	}
 
 	if (nated_port == 0) {	/* No port available */
-		if (net_ratelimit())
-			pr_notice("nf_nat_h323: out of TCP ports\n");
+		net_notice_ratelimited("nf_nat_h323: out of TCP ports\n");
 		return 0;
 	}
 
@@ -365,8 +358,7 @@
 	}
 
 	if (nated_port == 0) {	/* No port available */
-		if (net_ratelimit())
-			pr_notice("nf_nat_q931: out of TCP ports\n");
+		net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
 		return 0;
 	}
 
@@ -456,8 +448,7 @@
 	}
 
 	if (nated_port == 0) {	/* No port available */
-		if (net_ratelimit())
-			pr_notice("nf_nat_ras: out of TCP ports\n");
+		net_notice_ratelimited("nf_nat_ras: out of TCP ports\n");
 		return 0;
 	}
 
@@ -545,8 +536,7 @@
 	}
 
 	if (nated_port == 0) {	/* No port available */
-		if (net_ratelimit())
-			pr_notice("nf_nat_q931: out of TCP ports\n");
+		net_notice_ratelimited("nf_nat_q931: out of TCP ports\n");
 		return 0;
 	}
 
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 57932c4..ea4a2381 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -283,7 +283,7 @@
 	__be32 newip;
 	u_int16_t port;
 	char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
-	unsigned buflen;
+	unsigned int buflen;
 
 	/* Connection will come from reply */
 	if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip)
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 2133c30..746edec 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1206,8 +1206,7 @@
 
 	if (!snmp_parse_mangle((unsigned char *)udph + sizeof(struct udphdr),
 			       paylen, &map, &udph->check)) {
-		if (net_ratelimit())
-			printk(KERN_WARNING "bsalg: parser failed\n");
+		net_warn_ratelimited("bsalg: parser failed\n");
 		return NF_DROP;
 	}
 	return NF_ACCEPT;
@@ -1241,9 +1240,8 @@
 	 * can mess around with the payload.
 	 */
 	if (ntohs(udph->len) != skb->len - (iph->ihl << 2)) {
-		 if (net_ratelimit())
-			 printk(KERN_WARNING "SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
-				&iph->saddr, &iph->daddr);
+		net_warn_ratelimited("SNMP: dropping malformed packet src=%pI4 dst=%pI4\n",
+				     &iph->saddr, &iph->daddr);
 		 return NF_DROP;
 	}
 
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 50009c7..6e930c7 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -51,15 +51,16 @@
 
 static u16 ping_port_rover;
 
-static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask)
+static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
 {
 	int res = (num + net_hash_mix(net)) & mask;
+
 	pr_debug("hash(%d) = %d\n", num, res);
 	return res;
 }
 
 static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
-					     struct net *net, unsigned num)
+					     struct net *net, unsigned int num)
 {
 	return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
 }
@@ -188,7 +189,8 @@
 					  gid_t *high)
 {
 	gid_t *data = net->ipv4.sysctl_ping_group_range;
-	unsigned seq;
+	unsigned int seq;
+
 	do {
 		seq = read_seqbegin(&sysctl_local_ports.lock);
 
@@ -410,7 +412,7 @@
 	__wsum wcheck;
 };
 
-static int ping_getfrag(void *from, char * to,
+static int ping_getfrag(void *from, char *to,
 			int offset, int fraglen, int odd, struct sk_buff *skb)
 {
 	struct pingfakehdr *pfh = (struct pingfakehdr *)from;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bbd604c..4032b81 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -288,7 +288,7 @@
 	read_unlock(&raw_v4_hashinfo.lock);
 }
 
-static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
+static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	/* Charge it to the socket. */
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 167ea10..ffcb3b0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
 #include <net/rtnetlink.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
+#include <linux/kmemleak.h>
 #endif
 #include <net/secure_seq.h>
 
@@ -229,7 +230,7 @@
 	TC_PRIO_INTERACTIVE_BULK,
 	ECN_OR_COST(INTERACTIVE_BULK)
 };
-
+EXPORT_SYMBOL(ip_tos2prio);
 
 /*
  * Route cache.
@@ -296,7 +297,7 @@
 #endif
 
 static struct rt_hash_bucket 	*rt_hash_table __read_mostly;
-static unsigned			rt_hash_mask __read_mostly;
+static unsigned int		rt_hash_mask __read_mostly;
 static unsigned int		rt_hash_log  __read_mostly;
 
 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
@@ -959,8 +960,7 @@
 
 static void rt_emergency_hash_rebuild(struct net *net)
 {
-	if (net_ratelimit())
-		pr_warn("Route hash chain too long!\n");
+	net_warn_ratelimited("Route hash chain too long!\n");
 	rt_cache_invalidate(net);
 }
 
@@ -1083,8 +1083,7 @@
 		goto out;
 	if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
 		goto out;
-	if (net_ratelimit())
-		pr_warn("dst cache overflow\n");
+	net_warn_ratelimited("dst cache overflow\n");
 	RT_CACHE_STAT_INC(gc_dst_overflow);
 	return 1;
 
@@ -1143,7 +1142,7 @@
 	return 0;
 }
 
-static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
+static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
 				     struct sk_buff *skb, int ifindex)
 {
 	struct rtable	*rth, *cand;
@@ -1181,8 +1180,7 @@
 		if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
 			int err = rt_bind_neighbour(rt);
 			if (err) {
-				if (net_ratelimit())
-					pr_warn("Neighbour table failure & not caching routes\n");
+				net_warn_ratelimited("Neighbour table failure & not caching routes\n");
 				ip_rt_put(rt);
 				return ERR_PTR(err);
 			}
@@ -1298,8 +1296,7 @@
 				goto restart;
 			}
 
-			if (net_ratelimit())
-				pr_warn("Neighbour table overflow\n");
+			net_warn_ratelimited("Neighbour table overflow\n");
 			rt_drop(rt);
 			return ERR_PTR(-ENOBUFS);
 		}
@@ -1377,14 +1374,13 @@
 			return;
 		}
 	} else if (!rt)
-		printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
-		       __builtin_return_address(0));
+		pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
 
 	ip_select_fb_ident(iph);
 }
 EXPORT_SYMBOL(__ip_select_ident);
 
-static void rt_del(unsigned hash, struct rtable *rt)
+static void rt_del(unsigned int hash, struct rtable *rt)
 {
 	struct rtable __rcu **rthp;
 	struct rtable *aux;
@@ -1502,11 +1498,11 @@
 
 reject_redirect:
 #ifdef CONFIG_IP_ROUTE_VERBOSE
-	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
-		pr_info("Redirect from %pI4 on %s about %pI4 ignored\n"
-			"  Advised path = %pI4 -> %pI4\n",
-			&old_gw, dev->name, &new_gw,
-			&saddr, &daddr);
+	if (IN_DEV_LOG_MARTIANS(in_dev))
+		net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
+				     "  Advised path = %pI4 -> %pI4\n",
+				     &old_gw, dev->name, &new_gw,
+				     &saddr, &daddr);
 #endif
 	;
 }
@@ -1538,7 +1534,7 @@
 			ip_rt_put(rt);
 			ret = NULL;
 		} else if (rt->rt_flags & RTCF_REDIRECTED) {
-			unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
+			unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
 						rt->rt_oif,
 						rt_genid(dev_net(dst->dev)));
 			rt_del(hash, rt);
@@ -1616,11 +1612,10 @@
 		++peer->rate_tokens;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
 		if (log_martians &&
-		    peer->rate_tokens == ip_rt_redirect_number &&
-		    net_ratelimit())
-			pr_warn("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
-				&ip_hdr(skb)->saddr, rt->rt_iif,
-				&rt->rt_dst, &rt->rt_gateway);
+		    peer->rate_tokens == ip_rt_redirect_number)
+			net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+					     &ip_hdr(skb)->saddr, rt->rt_iif,
+					     &rt->rt_dst, &rt->rt_gateway);
 #endif
 	}
 }
@@ -1843,9 +1838,9 @@
 
 static int ip_rt_bug(struct sk_buff *skb)
 {
-	printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
-		&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
-		skb->dev ? skb->dev->name : "?");
+	pr_debug("%s: %pI4 -> %pI4, %s\n",
+		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
+		 skb->dev ? skb->dev->name : "?");
 	kfree_skb(skb);
 	WARN_ON(1);
 	return 0;
@@ -2134,8 +2129,7 @@
 	/* get a working reference to the output device */
 	out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
 	if (out_dev == NULL) {
-		if (net_ratelimit())
-			pr_crit("Bug in ip_route_input_slow(). Please report.\n");
+		net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
 		return -EINVAL;
 	}
 
@@ -2215,9 +2209,9 @@
 			    struct in_device *in_dev,
 			    __be32 daddr, __be32 saddr, u32 tos)
 {
-	struct rtable* rth = NULL;
+	struct rtable *rth = NULL;
 	int err;
-	unsigned hash;
+	unsigned int hash;
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	if (res->fi && res->fi->fib_nhs > 1)
@@ -2255,13 +2249,13 @@
 	struct fib_result res;
 	struct in_device *in_dev = __in_dev_get_rcu(dev);
 	struct flowi4	fl4;
-	unsigned	flags = 0;
+	unsigned int	flags = 0;
 	u32		itag = 0;
-	struct rtable * rth;
-	unsigned	hash;
+	struct rtable	*rth;
+	unsigned int	hash;
 	__be32		spec_dst;
 	int		err = -EINVAL;
-	struct net    * net = dev_net(dev);
+	struct net    *net = dev_net(dev);
 
 	/* IP on this device is disabled. */
 
@@ -2406,9 +2400,9 @@
 martian_destination:
 	RT_CACHE_STAT_INC(in_martian_dst);
 #ifdef CONFIG_IP_ROUTE_VERBOSE
-	if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
-		pr_warn("martian destination %pI4 from %pI4, dev %s\n",
-			&daddr, &saddr, dev->name);
+	if (IN_DEV_LOG_MARTIANS(in_dev))
+		net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
+				     &daddr, &saddr, dev->name);
 #endif
 
 e_hostunreach:
@@ -2433,8 +2427,8 @@
 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 			   u8 tos, struct net_device *dev, bool noref)
 {
-	struct rtable * rth;
-	unsigned	hash;
+	struct rtable	*rth;
+	unsigned int	hash;
 	int iif = dev->ifindex;
 	struct net *net;
 	int res;
@@ -2972,7 +2966,8 @@
 	r->rtm_src_len	= 0;
 	r->rtm_tos	= rt->rt_key_tos;
 	r->rtm_table	= RT_TABLE_MAIN;
-	NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
+	if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
+		goto nla_put_failure;
 	r->rtm_type	= rt->rt_type;
 	r->rtm_scope	= RT_SCOPE_UNIVERSE;
 	r->rtm_protocol = RTPROT_UNSPEC;
@@ -2980,31 +2975,38 @@
 	if (rt->rt_flags & RTCF_NOTIFY)
 		r->rtm_flags |= RTM_F_NOTIFY;
 
-	NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
-
+	if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
+		goto nla_put_failure;
 	if (rt->rt_key_src) {
 		r->rtm_src_len = 32;
-		NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
+		if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
+			goto nla_put_failure;
 	}
-	if (rt->dst.dev)
-		NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
+	if (rt->dst.dev &&
+	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+		goto nla_put_failure;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-	if (rt->dst.tclassid)
-		NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
+	if (rt->dst.tclassid &&
+	    nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
+		goto nla_put_failure;
 #endif
-	if (rt_is_input_route(rt))
-		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
-	else if (rt->rt_src != rt->rt_key_src)
-		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
-
-	if (rt->rt_dst != rt->rt_gateway)
-		NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
+	if (rt_is_input_route(rt)) {
+		if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
+			goto nla_put_failure;
+	} else if (rt->rt_src != rt->rt_key_src) {
+		if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
+			goto nla_put_failure;
+	}
+	if (rt->rt_dst != rt->rt_gateway &&
+	    nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
+		goto nla_put_failure;
 
 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
 		goto nla_put_failure;
 
-	if (rt->rt_mark)
-		NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
+	if (rt->rt_mark &&
+	    nla_put_be32(skb, RTA_MARK, rt->rt_mark))
+		goto nla_put_failure;
 
 	error = rt->dst.error;
 	if (peer) {
@@ -3045,7 +3047,8 @@
 			}
 		} else
 #endif
-			NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
+			if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
+				goto nla_put_failure;
 	}
 
 	if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
@@ -3059,7 +3062,7 @@
 	return -EMSGSIZE;
 }
 
-static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
+static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
 {
 	struct net *net = sock_net(in_skb->sk);
 	struct rtmsg *rtm;
@@ -3334,23 +3337,6 @@
 	{ }
 };
 
-static struct ctl_table empty[1];
-
-static struct ctl_table ipv4_skeleton[] =
-{
-	{ .procname = "route", 
-	  .mode = 0555, .child = ipv4_route_table},
-	{ .procname = "neigh", 
-	  .mode = 0555, .child = empty},
-	{ }
-};
-
-static __net_initdata struct ctl_path ipv4_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ },
-};
-
 static struct ctl_table ipv4_route_flush_table[] = {
 	{
 		.procname	= "flush",
@@ -3361,13 +3347,6 @@
 	{ },
 };
 
-static __net_initdata struct ctl_path ipv4_route_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ .procname = "route", },
-	{ },
-};
-
 static __net_init int sysctl_route_net_init(struct net *net)
 {
 	struct ctl_table *tbl;
@@ -3380,8 +3359,7 @@
 	}
 	tbl[0].extra1 = net;
 
-	net->ipv4.route_hdr =
-		register_net_sysctl_table(net, ipv4_route_path, tbl);
+	net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
 	if (net->ipv4.route_hdr == NULL)
 		goto err_reg;
 	return 0;
@@ -3430,9 +3408,15 @@
 static __initdata unsigned long rhash_entries;
 static int __init set_rhash_entries(char *str)
 {
+	ssize_t ret;
+
 	if (!str)
 		return 0;
-	rhash_entries = simple_strtoul(str, &str, 0);
+
+	ret = kstrtoul(str, 0, &rhash_entries);
+	if (ret)
+		return 0;
+
 	return 1;
 }
 __setup("rhash_entries=", set_rhash_entries);
@@ -3505,6 +3489,6 @@
  */
 void __init ip_static_sysctl_init(void)
 {
-	register_sysctl_paths(ipv4_path, ipv4_skeleton);
+	register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
 }
 #endif
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7a7724d..ef32956 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -27,6 +27,7 @@
 #include <net/tcp_memcontrol.h>
 
 static int zero;
+static int two = 2;
 static int tcp_retr1_max = 255;
 static int ip_local_port_range_min[] = { 1, 1 };
 static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -78,7 +79,7 @@
 static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
 {
 	gid_t *data = table->data;
-	unsigned seq;
+	unsigned int seq;
 	do {
 		seq = read_seqbegin(&sysctl_local_ports.lock);
 
@@ -677,6 +678,15 @@
 		.proc_handler   = proc_dointvec
 	},
 	{
+		.procname	= "tcp_early_retrans",
+		.data		= &sysctl_tcp_early_retrans,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &two,
+	},
+	{
 		.procname	= "udp_mem",
 		.data		= &sysctl_udp_mem,
 		.maxlen		= sizeof(sysctl_udp_mem),
@@ -768,13 +778,6 @@
 	{ }
 };
 
-struct ctl_path net_ipv4_ctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ },
-};
-EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
-
 static __net_init int ipv4_sysctl_init_net(struct net *net)
 {
 	struct ctl_table *table;
@@ -815,8 +818,7 @@
 
 	tcp_init_mem(net);
 
-	net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
-			net_ipv4_ctl_path, table);
+	net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
 	if (net->ipv4.ipv4_hdr == NULL)
 		goto err_reg;
 
@@ -857,12 +859,12 @@
 	if (!i->procname)
 		return -EINVAL;
 
-	hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table);
+	hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
 	if (hdr == NULL)
 		return -ENOMEM;
 
 	if (register_pernet_subsys(&ipv4_sysctl_ops)) {
-		unregister_sysctl_table(hdr);
+		unregister_net_sysctl_table(hdr);
 		return -ENOMEM;
 	}
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8bb6ade..bb485fc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -363,6 +363,71 @@
 	return period;
 }
 
+/* Address-family independent initialization for a tcp_sock.
+ *
+ * NOTE: A lot of things set to zero explicitly by call to
+ *       sk_alloc() so need not be done here.
+ */
+void tcp_init_sock(struct sock *sk)
+{
+	struct inet_connection_sock *icsk = inet_csk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	skb_queue_head_init(&tp->out_of_order_queue);
+	tcp_init_xmit_timers(sk);
+	tcp_prequeue_init(tp);
+
+	icsk->icsk_rto = TCP_TIMEOUT_INIT;
+	tp->mdev = TCP_TIMEOUT_INIT;
+
+	/* So many TCP implementations out there (incorrectly) count the
+	 * initial SYN frame in their delayed-ACK and congestion control
+	 * algorithms that we must have the following bandaid to talk
+	 * efficiently to them.  -DaveM
+	 */
+	tp->snd_cwnd = TCP_INIT_CWND;
+
+	/* See draft-stevens-tcpca-spec-01 for discussion of the
+	 * initialization of these values.
+	 */
+	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+	tp->snd_cwnd_clamp = ~0;
+	tp->mss_cache = TCP_MSS_DEFAULT;
+
+	tp->reordering = sysctl_tcp_reordering;
+	tcp_enable_early_retrans(tp);
+	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
+
+	sk->sk_state = TCP_CLOSE;
+
+	sk->sk_write_space = sk_stream_write_space;
+	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+	icsk->icsk_sync_mss = tcp_sync_mss;
+
+	/* TCP Cookie Transactions */
+	if (sysctl_tcp_cookie_size > 0) {
+		/* Default, cookies without s_data_payload. */
+		tp->cookie_values =
+			kzalloc(sizeof(*tp->cookie_values),
+				sk->sk_allocation);
+		if (tp->cookie_values != NULL)
+			kref_init(&tp->cookie_values->kref);
+	}
+	/* Presumed zeroed, in order of appearance:
+	 *	cookie_in_always, cookie_out_never,
+	 *	s_data_constant, s_data_in, s_data_out
+	 */
+	sk->sk_sndbuf = sysctl_tcp_wmem[1];
+	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
+
+	local_bh_disable();
+	sock_update_memcg(sk);
+	sk_sockets_allocated_inc(sk);
+	local_bh_enable();
+}
+EXPORT_SYMBOL(tcp_init_sock);
+
 /*
  *	Wait for a TCP event.
  *
@@ -528,7 +593,7 @@
 	tp->pushed_seq = tp->write_seq;
 }
 
-static inline int forced_push(const struct tcp_sock *tp)
+static inline bool forced_push(const struct tcp_sock *tp)
 {
 	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
 }
@@ -784,9 +849,10 @@
 	while (psize > 0) {
 		struct sk_buff *skb = tcp_write_queue_tail(sk);
 		struct page *page = pages[poffset / PAGE_SIZE];
-		int copy, i, can_coalesce;
+		int copy, i;
 		int offset = poffset % PAGE_SIZE;
 		int size = min_t(size_t, psize, PAGE_SIZE - offset);
+		bool can_coalesce;
 
 		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
 new_segment:
@@ -851,8 +917,7 @@
 wait_for_sndbuf:
 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
-		if (copied)
-			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+		tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
 			goto do_error;
@@ -919,7 +984,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
 	int iovlen, flags, err, copied;
-	int mss_now, size_goal;
+	int mss_now = 0, size_goal;
 	bool sg;
 	long timeo;
 
@@ -933,6 +998,19 @@
 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
 			goto out_err;
 
+	if (unlikely(tp->repair)) {
+		if (tp->repair_queue == TCP_RECV_QUEUE) {
+			copied = tcp_send_rcvq(sk, msg, size);
+			goto out;
+		}
+
+		err = -EINVAL;
+		if (tp->repair_queue == TCP_NO_QUEUE)
+			goto out_err;
+
+		/* 'common' sending to sendq */
+	}
+
 	/* This should be in poll */
 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 
@@ -1003,7 +1081,7 @@
 				if (err)
 					goto do_fault;
 			} else {
-				int merge = 0;
+				bool merge = false;
 				int i = skb_shinfo(skb)->nr_frags;
 				struct page *page = sk->sk_sndmsg_page;
 				int off;
@@ -1017,7 +1095,7 @@
 				    off != PAGE_SIZE) {
 					/* We can extend the last page
 					 * fragment. */
-					merge = 1;
+					merge = true;
 				} else if (i == MAX_SKB_FRAGS || !sg) {
 					/* Need to add new fragment and cannot
 					 * do this because interface is non-SG,
@@ -1089,7 +1167,7 @@
 			if ((seglen -= copy) == 0 && iovlen == 0)
 				goto out;
 
-			if (skb->len < max || (flags & MSG_OOB))
+			if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
 				continue;
 
 			if (forced_push(tp)) {
@@ -1102,7 +1180,7 @@
 wait_for_sndbuf:
 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
-			if (copied)
+			if (copied && likely(!tp->repair))
 				tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
 			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
@@ -1113,7 +1191,7 @@
 	}
 
 out:
-	if (copied)
+	if (copied && likely(!tp->repair))
 		tcp_push(sk, flags, mss_now, tp->nonagle);
 	release_sock(sk);
 	return copied;
@@ -1187,6 +1265,24 @@
 	return -EAGAIN;
 }
 
+static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
+{
+	struct sk_buff *skb;
+	int copied = 0, err = 0;
+
+	/* XXX -- need to support SO_PEEK_OFF */
+
+	skb_queue_walk(&sk->sk_write_queue, skb) {
+		err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
+		if (err)
+			break;
+
+		copied += skb->len;
+	}
+
+	return err ?: copied;
+}
+
 /* Clean up the receive buffer for full frames taken by the user,
  * then send an ACK if necessary.  COPIED is the number of bytes
  * tcp_recvmsg has given to the user so far, it speeds up the
@@ -1196,7 +1292,7 @@
 void tcp_cleanup_rbuf(struct sock *sk, int copied)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	int time_to_ack = 0;
+	bool time_to_ack = false;
 
 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
 
@@ -1222,7 +1318,7 @@
 		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
 		       !icsk->icsk_ack.pingpong)) &&
 		      !atomic_read(&sk->sk_rmem_alloc)))
-			time_to_ack = 1;
+			time_to_ack = true;
 	}
 
 	/* We send an ACK if we can now advertise a non-zero window
@@ -1244,7 +1340,7 @@
 			 * "Lots" means "at least twice" here.
 			 */
 			if (new_window && new_window >= 2 * rcv_window_now)
-				time_to_ack = 1;
+				time_to_ack = true;
 		}
 	}
 	if (time_to_ack)
@@ -1376,11 +1472,11 @@
 				break;
 		}
 		if (tcp_hdr(skb)->fin) {
-			sk_eat_skb(sk, skb, 0);
+			sk_eat_skb(sk, skb, false);
 			++seq;
 			break;
 		}
-		sk_eat_skb(sk, skb, 0);
+		sk_eat_skb(sk, skb, false);
 		if (!desc->count)
 			break;
 		tp->copied_seq = seq;
@@ -1416,7 +1512,7 @@
 	int target;		/* Read at least this many bytes */
 	long timeo;
 	struct task_struct *user_recv = NULL;
-	int copied_early = 0;
+	bool copied_early = false;
 	struct sk_buff *skb;
 	u32 urg_hole = 0;
 
@@ -1432,6 +1528,21 @@
 	if (flags & MSG_OOB)
 		goto recv_urg;
 
+	if (unlikely(tp->repair)) {
+		err = -EPERM;
+		if (!(flags & MSG_PEEK))
+			goto out;
+
+		if (tp->repair_queue == TCP_SEND_QUEUE)
+			goto recv_sndq;
+
+		err = -EINVAL;
+		if (tp->repair_queue == TCP_NO_QUEUE)
+			goto out;
+
+		/* 'common' recv queue MSG_PEEK-ing */
+	}
+
 	seq = &tp->copied_seq;
 	if (flags & MSG_PEEK) {
 		peek_seq = tp->copied_seq;
@@ -1633,9 +1744,9 @@
 		}
 		if ((flags & MSG_PEEK) &&
 		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
-			if (net_ratelimit())
-				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
-				       current->comm, task_pid_nr(current));
+			net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
+					    current->comm,
+					    task_pid_nr(current));
 			peek_seq = tp->copied_seq;
 		}
 		continue;
@@ -1689,7 +1800,7 @@
 				dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
 
 				if ((offset + used) == skb->len)
-					copied_early = 1;
+					copied_early = true;
 
 			} else
 #endif
@@ -1723,7 +1834,7 @@
 			goto found_fin_ok;
 		if (!(flags & MSG_PEEK)) {
 			sk_eat_skb(sk, skb, copied_early);
-			copied_early = 0;
+			copied_early = false;
 		}
 		continue;
 
@@ -1732,7 +1843,7 @@
 		++*seq;
 		if (!(flags & MSG_PEEK)) {
 			sk_eat_skb(sk, skb, copied_early);
-			copied_early = 0;
+			copied_early = false;
 		}
 		break;
 	} while (len > 0);
@@ -1783,6 +1894,10 @@
 recv_urg:
 	err = tcp_recv_urg(sk, msg, len, flags);
 	goto out;
+
+recv_sndq:
+	err = tcp_peek_sndq(sk, msg, len);
+	goto out;
 }
 EXPORT_SYMBOL(tcp_recvmsg);
 
@@ -1886,10 +2001,10 @@
 	too_many_orphans = tcp_too_many_orphans(sk, shift);
 	out_of_socket_memory = tcp_out_of_memory(sk);
 
-	if (too_many_orphans && net_ratelimit())
-		pr_info("too many orphaned sockets\n");
-	if (out_of_socket_memory && net_ratelimit())
-		pr_info("out of memory -- consider tuning tcp_mem\n");
+	if (too_many_orphans)
+		net_info_ratelimited("too many orphaned sockets\n");
+	if (out_of_socket_memory)
+		net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
 	return too_many_orphans || out_of_socket_memory;
 }
 
@@ -1935,7 +2050,9 @@
 	 * advertise a zero window, then kill -9 the FTP client, wheee...
 	 * Note: timeout is always zero in such a case.
 	 */
-	if (data_was_unread) {
+	if (unlikely(tcp_sk(sk)->repair)) {
+		sk->sk_prot->disconnect(sk, 0);
+	} else if (data_was_unread) {
 		/* Unread data was tossed, zap the connection. */
 		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
 		tcp_set_state(sk, TCP_CLOSE);
@@ -2053,7 +2170,7 @@
 
 /* These states need RST on ABORT according to RFC793 */
 
-static inline int tcp_need_reset(int state)
+static inline bool tcp_need_reset(int state)
 {
 	return (1 << state) &
 	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
@@ -2074,6 +2191,8 @@
 	/* ABORT function of RFC793 */
 	if (old_state == TCP_LISTEN) {
 		inet_csk_listen_stop(sk);
+	} else if (unlikely(tp->repair)) {
+		sk->sk_err = ECONNABORTED;
 	} else if (tcp_need_reset(old_state) ||
 		   (tp->snd_nxt != tp->write_seq &&
 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
@@ -2125,6 +2244,54 @@
 }
 EXPORT_SYMBOL(tcp_disconnect);
 
+static inline bool tcp_can_repair_sock(const struct sock *sk)
+{
+	return capable(CAP_NET_ADMIN) &&
+		((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
+}
+
+static int tcp_repair_options_est(struct tcp_sock *tp,
+		struct tcp_repair_opt __user *optbuf, unsigned int len)
+{
+	struct tcp_repair_opt opt;
+
+	while (len >= sizeof(opt)) {
+		if (copy_from_user(&opt, optbuf, sizeof(opt)))
+			return -EFAULT;
+
+		optbuf++;
+		len -= sizeof(opt);
+
+		switch (opt.opt_code) {
+		case TCPOPT_MSS:
+			tp->rx_opt.mss_clamp = opt.opt_val;
+			break;
+		case TCPOPT_WINDOW:
+			if (opt.opt_val > 14)
+				return -EFBIG;
+
+			tp->rx_opt.snd_wscale = opt.opt_val;
+			break;
+		case TCPOPT_SACK_PERM:
+			if (opt.opt_val != 0)
+				return -EINVAL;
+
+			tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
+			if (sysctl_tcp_fack)
+				tcp_enable_fack(tp);
+			break;
+		case TCPOPT_TIMESTAMP:
+			if (opt.opt_val != 0)
+				return -EINVAL;
+
+			tp->rx_opt.tstamp_ok = 1;
+			break;
+		}
+	}
+
+	return 0;
+}
+
 /*
  *	Socket option code for TCP.
  */
@@ -2295,6 +2462,55 @@
 			err = -EINVAL;
 		else
 			tp->thin_dupack = val;
+			if (tp->thin_dupack)
+				tcp_disable_early_retrans(tp);
+		break;
+
+	case TCP_REPAIR:
+		if (!tcp_can_repair_sock(sk))
+			err = -EPERM;
+		else if (val == 1) {
+			tp->repair = 1;
+			sk->sk_reuse = SK_FORCE_REUSE;
+			tp->repair_queue = TCP_NO_QUEUE;
+		} else if (val == 0) {
+			tp->repair = 0;
+			sk->sk_reuse = SK_NO_REUSE;
+			tcp_send_window_probe(sk);
+		} else
+			err = -EINVAL;
+
+		break;
+
+	case TCP_REPAIR_QUEUE:
+		if (!tp->repair)
+			err = -EPERM;
+		else if (val < TCP_QUEUES_NR)
+			tp->repair_queue = val;
+		else
+			err = -EINVAL;
+		break;
+
+	case TCP_QUEUE_SEQ:
+		if (sk->sk_state != TCP_CLOSE)
+			err = -EPERM;
+		else if (tp->repair_queue == TCP_SEND_QUEUE)
+			tp->write_seq = val;
+		else if (tp->repair_queue == TCP_RECV_QUEUE)
+			tp->rcv_nxt = val;
+		else
+			err = -EINVAL;
+		break;
+
+	case TCP_REPAIR_OPTIONS:
+		if (!tp->repair)
+			err = -EINVAL;
+		else if (sk->sk_state == TCP_ESTABLISHED)
+			err = tcp_repair_options_est(tp,
+					(struct tcp_repair_opt __user *)optval,
+					optlen);
+		else
+			err = -EPERM;
 		break;
 
 	case TCP_CORK:
@@ -2530,6 +2746,8 @@
 		val = tp->mss_cache;
 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
 			val = tp->rx_opt.user_mss;
+		if (tp->repair)
+			val = tp->rx_opt.mss_clamp;
 		break;
 	case TCP_NODELAY:
 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
@@ -2632,6 +2850,26 @@
 		val = tp->thin_dupack;
 		break;
 
+	case TCP_REPAIR:
+		val = tp->repair;
+		break;
+
+	case TCP_REPAIR_QUEUE:
+		if (tp->repair)
+			val = tp->repair_queue;
+		else
+			return -EINVAL;
+		break;
+
+	case TCP_QUEUE_SEQ:
+		if (tp->repair_queue == TCP_SEND_QUEUE)
+			val = tp->write_seq;
+		else if (tp->repair_queue == TCP_RECV_QUEUE)
+			val = tp->rcv_nxt;
+		else
+			return -EINVAL;
+		break;
+
 	case TCP_USER_TIMEOUT:
 		val = jiffies_to_msecs(icsk->icsk_user_timeout);
 		break;
@@ -2675,7 +2913,7 @@
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	struct tcphdr *th;
-	unsigned thlen;
+	unsigned int thlen;
 	unsigned int seq;
 	__be32 delta;
 	unsigned int oldlen;
@@ -2933,13 +3171,13 @@
 struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
 {
 	struct tcp_md5sig_pool __percpu *pool;
-	int alloc = 0;
+	bool alloc = false;
 
 retry:
 	spin_lock_bh(&tcp_md5sig_pool_lock);
 	pool = tcp_md5sig_pool;
 	if (tcp_md5sig_users++ == 0) {
-		alloc = 1;
+		alloc = true;
 		spin_unlock_bh(&tcp_md5sig_pool_lock);
 	} else if (!pool) {
 		tcp_md5sig_users--;
@@ -3033,9 +3271,9 @@
 	struct scatterlist sg;
 	const struct tcphdr *tp = tcp_hdr(skb);
 	struct hash_desc *desc = &hp->md5_desc;
-	unsigned i;
-	const unsigned head_data_len = skb_headlen(skb) > header_len ?
-				       skb_headlen(skb) - header_len : 0;
+	unsigned int i;
+	const unsigned int head_data_len = skb_headlen(skb) > header_len ?
+					   skb_headlen(skb) - header_len : 0;
 	const struct skb_shared_info *shi = skb_shinfo(skb);
 	struct sk_buff *frag_iter;
 
@@ -3223,9 +3461,15 @@
 static __initdata unsigned long thash_entries;
 static int __init set_thash_entries(char *str)
 {
+	ssize_t ret;
+
 	if (!str)
 		return 0;
-	thash_entries = simple_strtoul(str, &str, 0);
+
+	ret = kstrtoul(str, 0, &thash_entries);
+	if (ret)
+		return 0;
+
 	return 1;
 }
 __setup("thash_entries=", set_thash_entries);
@@ -3243,7 +3487,7 @@
 {
 	struct sk_buff *skb = NULL;
 	unsigned long limit;
-	int max_share, cnt;
+	int max_rshare, max_wshare, cnt;
 	unsigned int i;
 	unsigned long jiffy = jiffies;
 
@@ -3303,15 +3547,16 @@
 	tcp_init_mem(&init_net);
 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
 	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
-	max_share = min(4UL*1024*1024, limit);
+	max_wshare = min(4UL*1024*1024, limit);
+	max_rshare = min(6UL*1024*1024, limit);
 
 	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
 	sysctl_tcp_wmem[1] = 16*1024;
-	sysctl_tcp_wmem[2] = max(64*1024, max_share);
+	sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
 
 	sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
 	sysctl_tcp_rmem[1] = 87380;
-	sysctl_tcp_rmem[2] = max(87380, max_share);
+	sysctl_tcp_rmem[2] = max(87380, max_rshare);
 
 	pr_info("Hash tables configured (established %u bind %u)\n",
 		tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 272a845..04dbd7a 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -280,19 +280,19 @@
 /* RFC2861 Check whether we are limited by application or congestion window
  * This is the inverse of cwnd check in tcp_tso_should_defer
  */
-int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
+bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	u32 left;
 
 	if (in_flight >= tp->snd_cwnd)
-		return 1;
+		return true;
 
 	left = tp->snd_cwnd - in_flight;
 	if (sk_can_gso(sk) &&
 	    left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
 	    left * tp->mss_cache < sk->sk_gso_max_size)
-		return 1;
+		return true;
 	return left <= tcp_max_tso_deferred_mss(tp);
 }
 EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index fe3ecf4..57bdd17 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -15,7 +15,7 @@
 
 /* Tcp Hybla structure. */
 struct hybla {
-	u8    hybla_en;
+	bool  hybla_en;
 	u32   snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
 	u32   rho;	      /* Rho parameter, integer part  */
 	u32   rho2;	      /* Rho * Rho, integer part */
@@ -24,8 +24,7 @@
 	u32   minrtt;	      /* Minimum smoothed round trip time value seen */
 };
 
-/* Hybla reference round trip time (default= 1/40 sec = 25 ms),
-   expressed in jiffies */
+/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
 static int rtt0 = 25;
 module_param(rtt0, int, 0644);
 MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
@@ -39,7 +38,7 @@
 	ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
 	ca->rho = ca->rho_3ls >> 3;
 	ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
-	ca->rho2 = ca->rho2_7ls >>7;
+	ca->rho2 = ca->rho2_7ls >> 7;
 }
 
 static void hybla_init(struct sock *sk)
@@ -52,7 +51,7 @@
 	ca->rho_3ls = 0;
 	ca->rho2_7ls = 0;
 	ca->snd_cwnd_cents = 0;
-	ca->hybla_en = 1;
+	ca->hybla_en = true;
 	tp->snd_cwnd = 2;
 	tp->snd_cwnd_clamp = 65535;
 
@@ -67,6 +66,7 @@
 static void hybla_state(struct sock *sk, u8 ca_state)
 {
 	struct hybla *ca = inet_csk_ca(sk);
+
 	ca->hybla_en = (ca_state == TCP_CA_Open);
 }
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3ff36406..cfa2aa1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@
 EXPORT_SYMBOL(sysctl_tcp_ecn);
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
-int sysctl_tcp_adv_win_scale __read_mostly = 2;
+int sysctl_tcp_adv_win_scale __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 
 int sysctl_tcp_stdurg __read_mostly;
@@ -99,6 +99,7 @@
 
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_abc __read_mostly;
+int sysctl_tcp_early_retrans __read_mostly = 2;
 
 #define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
 #define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
@@ -175,7 +176,7 @@
 static void tcp_incr_quickack(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
+	unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
 
 	if (quickacks == 0)
 		quickacks = 2;
@@ -195,9 +196,10 @@
  * and the session is not interactive.
  */
 
-static inline int tcp_in_quickack_mode(const struct sock *sk)
+static inline bool tcp_in_quickack_mode(const struct sock *sk)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
+
 	return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
 }
 
@@ -252,11 +254,11 @@
 		tp->ecn_flags &= ~TCP_ECN_OK;
 }
 
-static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
+static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
 {
 	if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
-		return 1;
-	return 0;
+		return true;
+	return false;
 }
 
 /* Buffer size and advertised window tuning.
@@ -495,7 +497,7 @@
 		goto new_measure;
 	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
 		return;
-	tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
+	tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
 
 new_measure:
 	tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -906,6 +908,7 @@
 	if (dst_metric(dst, RTAX_REORDERING) &&
 	    tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
 		tcp_disable_fack(tp);
+		tcp_disable_early_retrans(tp);
 		tp->reordering = dst_metric(dst, RTAX_REORDERING);
 	}
 
@@ -937,7 +940,7 @@
 	tcp_set_rto(sk);
 reset:
 	if (tp->srtt == 0) {
-		/* RFC2988bis: We've failed to get a valid RTT sample from
+		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
 		 * 3WHS. This is most likely due to retransmission,
 		 * including spurious one. Reset the RTO back to 3secs
 		 * from the more aggressive 1sec to avoid more spurious
@@ -947,7 +950,7 @@
 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 	}
 	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
-	 * retransmitted. In light of RFC2988bis' more aggressive 1sec
+	 * retransmitted. In light of RFC6298 more aggressive 1sec
 	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
 	 * retransmission has occurred.
 	 */
@@ -979,15 +982,18 @@
 
 		NET_INC_STATS_BH(sock_net(sk), mib_idx);
 #if FASTRETRANS_DEBUG > 1
-		printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
-		       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
-		       tp->reordering,
-		       tp->fackets_out,
-		       tp->sacked_out,
-		       tp->undo_marker ? tp->undo_retrans : 0);
+		pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
+			 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
+			 tp->reordering,
+			 tp->fackets_out,
+			 tp->sacked_out,
+			 tp->undo_marker ? tp->undo_retrans : 0);
 #endif
 		tcp_disable_fack(tp);
 	}
+
+	if (metric > 0)
+		tcp_disable_early_retrans(tp);
 }
 
 /* This must be called before lost_out is incremented */
@@ -1118,36 +1124,36 @@
  * the exact amount is rather hard to quantify. However, tp->max_window can
  * be used as an exaggerated estimate.
  */
-static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
-				  u32 start_seq, u32 end_seq)
+static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
+				   u32 start_seq, u32 end_seq)
 {
 	/* Too far in future, or reversed (interpretation is ambiguous) */
 	if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
-		return 0;
+		return false;
 
 	/* Nasty start_seq wrap-around check (see comments above) */
 	if (!before(start_seq, tp->snd_nxt))
-		return 0;
+		return false;
 
 	/* In outstanding window? ...This is valid exit for D-SACKs too.
 	 * start_seq == snd_una is non-sensical (see comments above)
 	 */
 	if (after(start_seq, tp->snd_una))
-		return 1;
+		return true;
 
 	if (!is_dsack || !tp->undo_marker)
-		return 0;
+		return false;
 
 	/* ...Then it's D-SACK, and must reside below snd_una completely */
 	if (after(end_seq, tp->snd_una))
-		return 0;
+		return false;
 
 	if (!before(start_seq, tp->undo_marker))
-		return 1;
+		return true;
 
 	/* Too old */
 	if (!after(end_seq, tp->undo_marker))
-		return 0;
+		return false;
 
 	/* Undo_marker boundary crossing (overestimates a lot). Known already:
 	 *   start_seq < undo_marker and end_seq >= undo_marker.
@@ -1219,17 +1225,17 @@
 		tp->lost_retrans_low = new_low_seq;
 }
 
-static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
-			   struct tcp_sack_block_wire *sp, int num_sacks,
-			   u32 prior_snd_una)
+static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
+			    struct tcp_sack_block_wire *sp, int num_sacks,
+			    u32 prior_snd_una)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
 	u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
-	int dup_sack = 0;
+	bool dup_sack = false;
 
 	if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
-		dup_sack = 1;
+		dup_sack = true;
 		tcp_dsack_seen(tp);
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
 	} else if (num_sacks > 1) {
@@ -1238,7 +1244,7 @@
 
 		if (!after(end_seq_0, end_seq_1) &&
 		    !before(start_seq_0, start_seq_1)) {
-			dup_sack = 1;
+			dup_sack = true;
 			tcp_dsack_seen(tp);
 			NET_INC_STATS_BH(sock_net(sk),
 					LINUX_MIB_TCPDSACKOFORECV);
@@ -1269,9 +1275,10 @@
  * FIXME: this could be merged to shift decision code
  */
 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
-				 u32 start_seq, u32 end_seq)
+				  u32 start_seq, u32 end_seq)
 {
-	int in_sack, err;
+	int err;
+	bool in_sack;
 	unsigned int pkt_len;
 	unsigned int mss;
 
@@ -1317,7 +1324,7 @@
 static u8 tcp_sacktag_one(struct sock *sk,
 			  struct tcp_sacktag_state *state, u8 sacked,
 			  u32 start_seq, u32 end_seq,
-			  int dup_sack, int pcount)
+			  bool dup_sack, int pcount)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	int fack_count = state->fack_count;
@@ -1397,10 +1404,10 @@
 /* Shift newly-SACKed bytes from this skb to the immediately previous
  * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
  */
-static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
-			   struct tcp_sacktag_state *state,
-			   unsigned int pcount, int shifted, int mss,
-			   int dup_sack)
+static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+			    struct tcp_sacktag_state *state,
+			    unsigned int pcount, int shifted, int mss,
+			    bool dup_sack)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
@@ -1450,7 +1457,7 @@
 	if (skb->len > 0) {
 		BUG_ON(!tcp_skb_pcount(skb));
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
-		return 0;
+		return false;
 	}
 
 	/* Whole SKB was eaten :-) */
@@ -1473,7 +1480,7 @@
 
 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
 
-	return 1;
+	return true;
 }
 
 /* I wish gso_size would have a bit more sane initialization than
@@ -1496,7 +1503,7 @@
 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
 					  struct tcp_sacktag_state *state,
 					  u32 start_seq, u32 end_seq,
-					  int dup_sack)
+					  bool dup_sack)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *prev;
@@ -1635,14 +1642,14 @@
 					struct tcp_sack_block *next_dup,
 					struct tcp_sacktag_state *state,
 					u32 start_seq, u32 end_seq,
-					int dup_sack_in)
+					bool dup_sack_in)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *tmp;
 
 	tcp_for_write_queue_from(skb, sk) {
 		int in_sack = 0;
-		int dup_sack = dup_sack_in;
+		bool dup_sack = dup_sack_in;
 
 		if (skb == tcp_send_head(sk))
 			break;
@@ -1657,7 +1664,7 @@
 							next_dup->start_seq,
 							next_dup->end_seq);
 			if (in_sack > 0)
-				dup_sack = 1;
+				dup_sack = true;
 		}
 
 		/* skb reference here is a bit tricky to get right, since
@@ -1762,7 +1769,7 @@
 	struct sk_buff *skb;
 	int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
 	int used_sacks;
-	int found_dup_sack = 0;
+	bool found_dup_sack = false;
 	int i, j;
 	int first_sack_index;
 
@@ -1793,7 +1800,7 @@
 	used_sacks = 0;
 	first_sack_index = 0;
 	for (i = 0; i < num_sacks; i++) {
-		int dup_sack = !i && found_dup_sack;
+		bool dup_sack = !i && found_dup_sack;
 
 		sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
 		sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
@@ -1860,7 +1867,7 @@
 	while (i < used_sacks) {
 		u32 start_seq = sp[i].start_seq;
 		u32 end_seq = sp[i].end_seq;
-		int dup_sack = (found_dup_sack && (i == first_sack_index));
+		bool dup_sack = (found_dup_sack && (i == first_sack_index));
 		struct tcp_sack_block *next_dup = NULL;
 
 		if (found_dup_sack && ((i + 1) == first_sack_index))
@@ -1962,9 +1969,9 @@
 }
 
 /* Limits sacked_out so that sum with lost_out isn't ever larger than
- * packets_out. Returns zero if sacked_out adjustement wasn't necessary.
+ * packets_out. Returns false if sacked_out adjustement wasn't necessary.
  */
-static int tcp_limit_reno_sacked(struct tcp_sock *tp)
+static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
 {
 	u32 holes;
 
@@ -1973,9 +1980,9 @@
 
 	if ((tp->sacked_out + holes) > tp->packets_out) {
 		tp->sacked_out = tp->packets_out - holes;
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
 /* If we receive more dupacks than we expected counting segments
@@ -2029,40 +2036,40 @@
 /* F-RTO can only be used if TCP has never retransmitted anything other than
  * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
  */
-int tcp_use_frto(struct sock *sk)
+bool tcp_use_frto(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct sk_buff *skb;
 
 	if (!sysctl_tcp_frto)
-		return 0;
+		return false;
 
 	/* MTU probe and F-RTO won't really play nicely along currently */
 	if (icsk->icsk_mtup.probe_size)
-		return 0;
+		return false;
 
 	if (tcp_is_sackfrto(tp))
-		return 1;
+		return true;
 
 	/* Avoid expensive walking of rexmit queue if possible */
 	if (tp->retrans_out > 1)
-		return 0;
+		return false;
 
 	skb = tcp_write_queue_head(sk);
 	if (tcp_skb_is_last(sk, skb))
-		return 1;
+		return true;
 	skb = tcp_write_queue_next(sk, skb);	/* Skips head */
 	tcp_for_write_queue_from(skb, sk) {
 		if (skb == tcp_send_head(sk))
 			break;
 		if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
-			return 0;
+			return false;
 		/* Short-circuit when first non-SACKed skb has been checked */
 		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
 			break;
 	}
-	return 1;
+	return true;
 }
 
 /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
@@ -2298,7 +2305,7 @@
  *
  * Do processing similar to RTO timeout.
  */
-static int tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int flag)
 {
 	if (flag & FLAG_SACK_RENEGING) {
 		struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2309,9 +2316,9 @@
 		tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 					  icsk->icsk_rto, TCP_RTO_MAX);
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
 static inline int tcp_fackets_out(const struct tcp_sock *tp)
@@ -2339,6 +2346,27 @@
 	return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
 }
 
+static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	unsigned long delay;
+
+	/* Delay early retransmit and entering fast recovery for
+	 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
+	 * available, or RTO is scheduled to fire first.
+	 */
+	if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt)
+		return false;
+
+	delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
+	if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
+		return false;
+
+	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX);
+	tp->early_retrans_delayed = 1;
+	return true;
+}
+
 static inline int tcp_skb_timedout(const struct sock *sk,
 				   const struct sk_buff *skb)
 {
@@ -2446,28 +2474,28 @@
  * Main question: may we further continue forward transmission
  * with the same cwnd?
  */
-static int tcp_time_to_recover(struct sock *sk)
+static bool tcp_time_to_recover(struct sock *sk, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	__u32 packets_out;
 
 	/* Do not perform any recovery during F-RTO algorithm */
 	if (tp->frto_counter)
-		return 0;
+		return false;
 
 	/* Trick#1: The loss is proven. */
 	if (tp->lost_out)
-		return 1;
+		return true;
 
 	/* Not-A-Trick#2 : Classic rule... */
 	if (tcp_dupack_heuristics(tp) > tp->reordering)
-		return 1;
+		return true;
 
 	/* Trick#3 : when we use RFC2988 timer restart, fast
 	 * retransmit can be triggered by timeout of queue head.
 	 */
 	if (tcp_is_fack(tp) && tcp_head_timedout(sk))
-		return 1;
+		return true;
 
 	/* Trick#4: It is still not OK... But will it be useful to delay
 	 * recovery more?
@@ -2479,7 +2507,7 @@
 		/* We have nothing to send. This connection is limited
 		 * either by receiver window or by application.
 		 */
-		return 1;
+		return true;
 	}
 
 	/* If a thin stream is detected, retransmit after first
@@ -2490,9 +2518,19 @@
 	if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
 	    tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
 	    tcp_is_sack(tp) && !tcp_send_head(sk))
-		return 1;
+		return true;
 
-	return 0;
+	/* Trick#6: TCP early retransmit, per RFC5827.  To avoid spurious
+	 * retransmissions due to small network reorderings, we implement
+	 * Mitigation A.3 in the RFC and delay the retransmission for a short
+	 * interval if appropriate.
+	 */
+	if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
+	    (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) &&
+	    !tcp_may_send_now(sk))
+		return !tcp_pause_early_retransmit(sk, flag);
+
+	return false;
 }
 
 /* New heuristics: it is possible only after we switched to restart timer
@@ -2680,22 +2718,22 @@
 	struct inet_sock *inet = inet_sk(sk);
 
 	if (sk->sk_family == AF_INET) {
-		printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
-		       msg,
-		       &inet->inet_daddr, ntohs(inet->inet_dport),
-		       tp->snd_cwnd, tcp_left_out(tp),
-		       tp->snd_ssthresh, tp->prior_ssthresh,
-		       tp->packets_out);
+		pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
+			 msg,
+			 &inet->inet_daddr, ntohs(inet->inet_dport),
+			 tp->snd_cwnd, tcp_left_out(tp),
+			 tp->snd_ssthresh, tp->prior_ssthresh,
+			 tp->packets_out);
 	}
 #if IS_ENABLED(CONFIG_IPV6)
 	else if (sk->sk_family == AF_INET6) {
 		struct ipv6_pinfo *np = inet6_sk(sk);
-		printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
-		       msg,
-		       &np->daddr, ntohs(inet->inet_dport),
-		       tp->snd_cwnd, tcp_left_out(tp),
-		       tp->snd_ssthresh, tp->prior_ssthresh,
-		       tp->packets_out);
+		pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
+			 msg,
+			 &np->daddr, ntohs(inet->inet_dport),
+			 tp->snd_cwnd, tcp_left_out(tp),
+			 tp->snd_ssthresh, tp->prior_ssthresh,
+			 tp->packets_out);
 	}
 #endif
 }
@@ -2731,7 +2769,7 @@
 }
 
 /* People celebrate: "We love our President!" */
-static int tcp_try_undo_recovery(struct sock *sk)
+static bool tcp_try_undo_recovery(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2756,10 +2794,10 @@
 		 * is ACKed. For Reno it is MUST to prevent false
 		 * fast retransmits (RFC2582). SACK TCP is safe. */
 		tcp_moderate_cwnd(tp);
-		return 1;
+		return true;
 	}
 	tcp_set_ca_state(sk, TCP_CA_Open);
-	return 0;
+	return false;
 }
 
 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
@@ -2789,19 +2827,19 @@
  * that successive retransmissions of a segment must not advance
  * retrans_stamp under any conditions.
  */
-static int tcp_any_retrans_done(const struct sock *sk)
+static bool tcp_any_retrans_done(const struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
 
 	if (tp->retrans_out)
-		return 1;
+		return true;
 
 	skb = tcp_write_queue_head(sk);
 	if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
-		return 1;
+		return true;
 
-	return 0;
+	return false;
 }
 
 /* Undo during fast recovery after partial ACK. */
@@ -2835,7 +2873,7 @@
 }
 
 /* Undo during loss recovery after partial ACK. */
-static int tcp_try_undo_loss(struct sock *sk)
+static bool tcp_try_undo_loss(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2857,9 +2895,9 @@
 		tp->undo_marker = 0;
 		if (tcp_is_sack(tp))
 			tcp_set_ca_state(sk, TCP_CA_Open);
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
 static inline void tcp_complete_cwr(struct sock *sk)
@@ -2868,11 +2906,14 @@
 
 	/* Do not moderate cwnd if it's already undone in cwr or recovery. */
 	if (tp->undo_marker) {
-		if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
+		if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
 			tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
-		else /* PRR */
+			tp->snd_cwnd_stamp = tcp_time_stamp;
+		} else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
+			/* PRR algorithm. */
 			tp->snd_cwnd = tp->snd_ssthresh;
-		tp->snd_cwnd_stamp = tcp_time_stamp;
+			tp->snd_cwnd_stamp = tcp_time_stamp;
+		}
 	}
 	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
@@ -3022,6 +3063,38 @@
 	tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 }
 
+static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	int mib_idx;
+
+	if (tcp_is_reno(tp))
+		mib_idx = LINUX_MIB_TCPRENORECOVERY;
+	else
+		mib_idx = LINUX_MIB_TCPSACKRECOVERY;
+
+	NET_INC_STATS_BH(sock_net(sk), mib_idx);
+
+	tp->high_seq = tp->snd_nxt;
+	tp->prior_ssthresh = 0;
+	tp->undo_marker = tp->snd_una;
+	tp->undo_retrans = tp->retrans_out;
+
+	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+		if (!ece_ack)
+			tp->prior_ssthresh = tcp_current_ssthresh(sk);
+		tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+		TCP_ECN_queue_cwr(tp);
+	}
+
+	tp->bytes_acked = 0;
+	tp->snd_cwnd_cnt = 0;
+	tp->prior_cwnd = tp->snd_cwnd;
+	tp->prr_delivered = 0;
+	tp->prr_out = 0;
+	tcp_set_ca_state(sk, TCP_CA_Recovery);
+}
+
 /* Process an event, which can update packets-in-flight not trivially.
  * Main goal of this function is to calculate new estimate for left_out,
  * taking into account both packets sitting in receiver's buffer and
@@ -3041,7 +3114,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
 				    (tcp_fackets_out(tp) > tp->reordering));
-	int fast_rexmit = 0, mib_idx;
+	int fast_rexmit = 0;
 
 	if (WARN_ON(!tp->packets_out && tp->sacked_out))
 		tp->sacked_out = 0;
@@ -3125,7 +3198,7 @@
 		if (icsk->icsk_ca_state <= TCP_CA_Disorder)
 			tcp_try_undo_dsack(sk);
 
-		if (!tcp_time_to_recover(sk)) {
+		if (!tcp_time_to_recover(sk, flag)) {
 			tcp_try_to_open(sk, flag);
 			return;
 		}
@@ -3142,32 +3215,7 @@
 		}
 
 		/* Otherwise enter Recovery state */
-
-		if (tcp_is_reno(tp))
-			mib_idx = LINUX_MIB_TCPRENORECOVERY;
-		else
-			mib_idx = LINUX_MIB_TCPSACKRECOVERY;
-
-		NET_INC_STATS_BH(sock_net(sk), mib_idx);
-
-		tp->high_seq = tp->snd_nxt;
-		tp->prior_ssthresh = 0;
-		tp->undo_marker = tp->snd_una;
-		tp->undo_retrans = tp->retrans_out;
-
-		if (icsk->icsk_ca_state < TCP_CA_CWR) {
-			if (!(flag & FLAG_ECE))
-				tp->prior_ssthresh = tcp_current_ssthresh(sk);
-			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
-			TCP_ECN_queue_cwr(tp);
-		}
-
-		tp->bytes_acked = 0;
-		tp->snd_cwnd_cnt = 0;
-		tp->prior_cwnd = tp->snd_cwnd;
-		tp->prr_delivered = 0;
-		tp->prr_out = 0;
-		tcp_set_ca_state(sk, TCP_CA_Recovery);
+		tcp_enter_recovery(sk, (flag & FLAG_ECE));
 		fast_rexmit = 1;
 	}
 
@@ -3249,16 +3297,47 @@
 /* Restart timer after forward progress on connection.
  * RFC2988 recommends to restart timer to now+rto.
  */
-static void tcp_rearm_rto(struct sock *sk)
+void tcp_rearm_rto(struct sock *sk)
 {
-	const struct tcp_sock *tp = tcp_sk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
 
 	if (!tp->packets_out) {
 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
 	} else {
-		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-					  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+		u32 rto = inet_csk(sk)->icsk_rto;
+		/* Offset the time elapsed after installing regular RTO */
+		if (tp->early_retrans_delayed) {
+			struct sk_buff *skb = tcp_write_queue_head(sk);
+			const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
+			s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
+			/* delta may not be positive if the socket is locked
+			 * when the delayed ER timer fires and is rescheduled.
+			 */
+			if (delta > 0)
+				rto = delta;
+		}
+		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
+					  TCP_RTO_MAX);
 	}
+	tp->early_retrans_delayed = 0;
+}
+
+/* This function is called when the delayed ER timer fires. TCP enters
+ * fast recovery and performs fast-retransmit.
+ */
+void tcp_resume_early_retransmit(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	tcp_rearm_rto(sk);
+
+	/* Stop if ER is disabled after the delayed ER timer is scheduled */
+	if (!tp->do_early_retrans)
+		return;
+
+	tcp_enter_recovery(sk, false);
+	tcp_update_scoreboard(sk, 1);
+	tcp_xmit_retransmit_queue(sk);
 }
 
 /* If we get here, the whole TSO packet has not been acked. */
@@ -3293,7 +3372,7 @@
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct sk_buff *skb;
 	u32 now = tcp_time_stamp;
-	int fully_acked = 1;
+	int fully_acked = true;
 	int flag = 0;
 	u32 pkts_acked = 0;
 	u32 reord = tp->packets_out;
@@ -3317,7 +3396,7 @@
 			if (!acked_pcount)
 				break;
 
-			fully_acked = 0;
+			fully_acked = false;
 		} else {
 			acked_pcount = tcp_skb_pcount(skb);
 		}
@@ -3434,18 +3513,18 @@
 	if (!tp->packets_out && tcp_is_sack(tp)) {
 		icsk = inet_csk(sk);
 		if (tp->lost_out) {
-			printk(KERN_DEBUG "Leak l=%u %d\n",
-			       tp->lost_out, icsk->icsk_ca_state);
+			pr_debug("Leak l=%u %d\n",
+				 tp->lost_out, icsk->icsk_ca_state);
 			tp->lost_out = 0;
 		}
 		if (tp->sacked_out) {
-			printk(KERN_DEBUG "Leak s=%u %d\n",
-			       tp->sacked_out, icsk->icsk_ca_state);
+			pr_debug("Leak s=%u %d\n",
+				 tp->sacked_out, icsk->icsk_ca_state);
 			tp->sacked_out = 0;
 		}
 		if (tp->retrans_out) {
-			printk(KERN_DEBUG "Leak r=%u %d\n",
-			       tp->retrans_out, icsk->icsk_ca_state);
+			pr_debug("Leak r=%u %d\n",
+				 tp->retrans_out, icsk->icsk_ca_state);
 			tp->retrans_out = 0;
 		}
 	}
@@ -3596,7 +3675,7 @@
  *     to prove that the RTO is indeed spurious. It transfers the control
  *     from F-RTO to the conventional RTO recovery
  */
-static int tcp_process_frto(struct sock *sk, int flag)
+static bool tcp_process_frto(struct sock *sk, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -3612,7 +3691,7 @@
 
 	if (!before(tp->snd_una, tp->frto_highmark)) {
 		tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
-		return 1;
+		return true;
 	}
 
 	if (!tcp_is_sackfrto(tp)) {
@@ -3621,19 +3700,19 @@
 		 * data, winupdate
 		 */
 		if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
-			return 1;
+			return true;
 
 		if (!(flag & FLAG_DATA_ACKED)) {
 			tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
 					    flag);
-			return 1;
+			return true;
 		}
 	} else {
 		if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
 			/* Prevent sending of new data. */
 			tp->snd_cwnd = min(tp->snd_cwnd,
 					   tcp_packets_in_flight(tp));
-			return 1;
+			return true;
 		}
 
 		if ((tp->frto_counter >= 2) &&
@@ -3643,10 +3722,10 @@
 			/* RFC4138 shortcoming (see comment above) */
 			if (!(flag & FLAG_FORWARD_PROGRESS) &&
 			    (flag & FLAG_NOT_DUP))
-				return 1;
+				return true;
 
 			tcp_enter_frto_loss(sk, 3, flag);
-			return 1;
+			return true;
 		}
 	}
 
@@ -3658,7 +3737,7 @@
 		if (!tcp_may_send_now(sk))
 			tcp_enter_frto_loss(sk, 2, flag);
 
-		return 1;
+		return true;
 	} else {
 		switch (sysctl_tcp_frto_response) {
 		case 2:
@@ -3675,7 +3754,7 @@
 		tp->undo_marker = 0;
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
 	}
-	return 0;
+	return false;
 }
 
 /* This routine deals with incoming acks, but not outgoing ones. */
@@ -3693,7 +3772,7 @@
 	int prior_sacked = tp->sacked_out;
 	int pkts_acked = 0;
 	int newly_acked_sacked = 0;
-	int frto_cwnd = 0;
+	bool frto_cwnd = false;
 
 	/* If the ack is older than previous acks
 	 * then we can probably ignore it.
@@ -3707,6 +3786,9 @@
 	if (after(ack, tp->snd_nxt))
 		goto invalid_ack;
 
+	if (tp->early_retrans_delayed)
+		tcp_rearm_rto(sk);
+
 	if (after(ack, prior_snd_una))
 		flag |= FLAG_SND_UNA_ADVANCED;
 
@@ -3872,10 +3954,9 @@
 					__u8 snd_wscale = *(__u8 *)ptr;
 					opt_rx->wscale_ok = 1;
 					if (snd_wscale > 14) {
-						if (net_ratelimit())
-							pr_info("%s: Illegal window scaling value %d >14 received\n",
-								__func__,
-								snd_wscale);
+						net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n",
+								     __func__,
+								     snd_wscale);
 						snd_wscale = 14;
 					}
 					opt_rx->snd_wscale = snd_wscale;
@@ -3946,7 +4027,7 @@
 }
 EXPORT_SYMBOL(tcp_parse_options);
 
-static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
+static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
 {
 	const __be32 *ptr = (const __be32 *)(th + 1);
 
@@ -3957,31 +4038,31 @@
 		tp->rx_opt.rcv_tsval = ntohl(*ptr);
 		++ptr;
 		tp->rx_opt.rcv_tsecr = ntohl(*ptr);
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
 /* Fast parse options. This hopes to only see timestamps.
  * If it is wrong it falls back on tcp_parse_options().
  */
-static int tcp_fast_parse_options(const struct sk_buff *skb,
-				  const struct tcphdr *th,
-				  struct tcp_sock *tp, const u8 **hvpp)
+static bool tcp_fast_parse_options(const struct sk_buff *skb,
+				   const struct tcphdr *th,
+				   struct tcp_sock *tp, const u8 **hvpp)
 {
 	/* In the spirit of fast parsing, compare doff directly to constant
 	 * values.  Because equality is used, short doff can be ignored here.
 	 */
 	if (th->doff == (sizeof(*th) / 4)) {
 		tp->rx_opt.saw_tstamp = 0;
-		return 0;
+		return false;
 	} else if (tp->rx_opt.tstamp_ok &&
 		   th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
 		if (tcp_parse_aligned_timestamp(tp, th))
-			return 1;
+			return true;
 	}
 	tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
-	return 1;
+	return true;
 }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -4222,7 +4303,7 @@
 	}
 }
 
-static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
+static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
 				  u32 end_seq)
 {
 	if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
@@ -4230,9 +4311,9 @@
 			sp->start_seq = seq;
 		if (after(end_seq, sp->end_seq))
 			sp->end_seq = end_seq;
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
@@ -4428,10 +4509,10 @@
 	}
 }
 
-static int tcp_prune_ofo_queue(struct sock *sk);
+static bool tcp_prune_ofo_queue(struct sock *sk);
 static int tcp_prune_queue(struct sock *sk);
 
-static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
+static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
 {
 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
 	    !sk_rmem_schedule(sk, size)) {
@@ -4450,6 +4531,41 @@
 	return 0;
 }
 
+/**
+ * tcp_try_coalesce - try to merge skb to prior one
+ * @sk: socket
+ * @to: prior buffer
+ * @from: buffer to add in queue
+ * @fragstolen: pointer to boolean
+ *
+ * Before queueing skb @from after @to, try to merge them
+ * to reduce overall memory use and queue lengths, if cost is small.
+ * Packets in ofo or receive queues can stay a long time.
+ * Better try to coalesce them right now to avoid future collapses.
+ * Returns true if caller should free @from instead of queueing it
+ */
+static bool tcp_try_coalesce(struct sock *sk,
+			     struct sk_buff *to,
+			     struct sk_buff *from,
+			     bool *fragstolen)
+{
+	int delta;
+
+	*fragstolen = false;
+
+	if (tcp_hdr(from)->fin)
+		return false;
+	if (!skb_try_coalesce(to, from, fragstolen, &delta))
+		return false;
+
+	atomic_add(delta, &sk->sk_rmem_alloc);
+	sk_mem_charge(sk, delta);
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
+	TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
+	TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
+	return true;
+}
+
 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -4488,23 +4604,13 @@
 	end_seq = TCP_SKB_CB(skb)->end_seq;
 
 	if (seq == TCP_SKB_CB(skb1)->end_seq) {
-		/* Packets in ofo can stay in queue a long time.
-		 * Better try to coalesce them right now
-		 * to avoid future tcp_collapse_ofo_queue(),
-		 * probably the most expensive function in tcp stack.
-		 */
-		if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) {
-			NET_INC_STATS_BH(sock_net(sk),
-					 LINUX_MIB_TCPRCVCOALESCE);
-			BUG_ON(skb_copy_bits(skb, 0,
-					     skb_put(skb1, skb->len),
-					     skb->len));
-			TCP_SKB_CB(skb1)->end_seq = end_seq;
-			TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
-			__kfree_skb(skb);
-			skb = NULL;
-		} else {
+		bool fragstolen;
+
+		if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
 			__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
+		} else {
+			kfree_skb_partial(skb, fragstolen);
+			skb = NULL;
 		}
 
 		if (!tp->rx_opt.num_sacks ||
@@ -4580,12 +4686,65 @@
 		skb_set_owner_r(skb, sk);
 }
 
+static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
+		  bool *fragstolen)
+{
+	int eaten;
+	struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
+
+	__skb_pull(skb, hdrlen);
+	eaten = (tail &&
+		 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0;
+	tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+	if (!eaten) {
+		__skb_queue_tail(&sk->sk_receive_queue, skb);
+		skb_set_owner_r(skb, sk);
+	}
+	return eaten;
+}
+
+int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+{
+	struct sk_buff *skb;
+	struct tcphdr *th;
+	bool fragstolen;
+
+	if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
+		goto err;
+
+	skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
+	if (!skb)
+		goto err;
+
+	th = (struct tcphdr *)skb_put(skb, sizeof(*th));
+	skb_reset_transport_header(skb);
+	memset(th, 0, sizeof(*th));
+
+	if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
+		goto err_free;
+
+	TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
+	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
+	TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
+
+	if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) {
+		WARN_ON_ONCE(fragstolen); /* should not happen */
+		__kfree_skb(skb);
+	}
+	return size;
+
+err_free:
+	kfree_skb(skb);
+err:
+	return -ENOMEM;
+}
 
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
 	const struct tcphdr *th = tcp_hdr(skb);
 	struct tcp_sock *tp = tcp_sk(sk);
 	int eaten = -1;
+	bool fragstolen = false;
 
 	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
 		goto drop;
@@ -4630,8 +4789,7 @@
 			    tcp_try_rmem_schedule(sk, skb->truesize))
 				goto drop;
 
-			skb_set_owner_r(skb, sk);
-			__skb_queue_tail(&sk->sk_receive_queue, skb);
+			eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
 		}
 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
 		if (skb->len)
@@ -4655,7 +4813,7 @@
 		tcp_fast_path_check(sk);
 
 		if (eaten > 0)
-			__kfree_skb(skb);
+			kfree_skb_partial(skb, fragstolen);
 		else if (!sock_flag(sk, SOCK_DEAD))
 			sk->sk_data_ready(sk, 0);
 		return;
@@ -4875,10 +5033,10 @@
  * Purge the out-of-order queue.
  * Return true if queue was pruned.
  */
-static int tcp_prune_ofo_queue(struct sock *sk)
+static bool tcp_prune_ofo_queue(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	int res = 0;
+	bool res = false;
 
 	if (!skb_queue_empty(&tp->out_of_order_queue)) {
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
@@ -4892,7 +5050,7 @@
 		if (tp->rx_opt.sack_ok)
 			tcp_sack_reset(&tp->rx_opt);
 		sk_mem_reclaim(sk);
-		res = 1;
+		res = true;
 	}
 	return res;
 }
@@ -4969,7 +5127,7 @@
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-static int tcp_should_expand_sndbuf(const struct sock *sk)
+static bool tcp_should_expand_sndbuf(const struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 
@@ -4977,21 +5135,21 @@
 	 * not modify it.
 	 */
 	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
-		return 0;
+		return false;
 
 	/* If we are under global TCP memory pressure, do not expand.  */
 	if (sk_under_memory_pressure(sk))
-		return 0;
+		return false;
 
 	/* If we are under soft global TCP memory pressure, do not expand.  */
 	if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
-		return 0;
+		return false;
 
 	/* If we filled the congestion window, do not expand.  */
 	if (tp->packets_out >= tp->snd_cwnd)
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
 /* When incoming ACK allowed to free some skb from write_queue,
@@ -5217,16 +5375,16 @@
 }
 
 #ifdef CONFIG_NET_DMA
-static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
+static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
 				  int hlen)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	int chunk = skb->len - hlen;
 	int dma_cookie;
-	int copied_early = 0;
+	bool copied_early = false;
 
 	if (tp->ucopy.wakeup)
-		return 0;
+		return false;
 
 	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
 		tp->ucopy.dma_chan = net_dma_find_channel();
@@ -5242,7 +5400,7 @@
 			goto out;
 
 		tp->ucopy.dma_cookie = dma_cookie;
-		copied_early = 1;
+		copied_early = true;
 
 		tp->ucopy.len -= chunk;
 		tp->copied_seq += chunk;
@@ -5434,6 +5592,7 @@
 		} else {
 			int eaten = 0;
 			int copied_early = 0;
+			bool fragstolen = false;
 
 			if (tp->copied_seq == tp->rcv_nxt &&
 			    len - tcp_header_len <= tp->ucopy.len) {
@@ -5491,10 +5650,8 @@
 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
 				/* Bulk data transfer: receiver */
-				__skb_pull(skb, tcp_header_len);
-				__skb_queue_tail(&sk->sk_receive_queue, skb);
-				skb_set_owner_r(skb, sk);
-				tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+				eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
+						      &fragstolen);
 			}
 
 			tcp_event_data_recv(sk, skb);
@@ -5516,7 +5673,7 @@
 			else
 #endif
 			if (eaten)
-				__kfree_skb(skb);
+				kfree_skb_partial(skb, fragstolen);
 			else
 				sk->sk_data_ready(sk, 0);
 			return 0;
@@ -5560,6 +5717,44 @@
 }
 EXPORT_SYMBOL(tcp_rcv_established);
 
+void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct inet_connection_sock *icsk = inet_csk(sk);
+
+	tcp_set_state(sk, TCP_ESTABLISHED);
+
+	if (skb != NULL)
+		security_inet_conn_established(sk, skb);
+
+	/* Make sure socket is routed, for correct metrics.  */
+	icsk->icsk_af_ops->rebuild_header(sk);
+
+	tcp_init_metrics(sk);
+
+	tcp_init_congestion_control(sk);
+
+	/* Prevent spurious tcp_cwnd_restart() on first data
+	 * packet.
+	 */
+	tp->lsndtime = tcp_time_stamp;
+
+	tcp_init_buffer_space(sk);
+
+	if (sock_flag(sk, SOCK_KEEPOPEN))
+		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
+
+	if (!tp->rx_opt.snd_wscale)
+		__tcp_fast_path_on(tp, tp->snd_wnd);
+	else
+		tp->pred_flags = 0;
+
+	if (!sock_flag(sk, SOCK_DEAD)) {
+		sk->sk_state_change(sk);
+		sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+	}
+}
+
 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 					 const struct tcphdr *th, unsigned int len)
 {
@@ -5692,36 +5887,8 @@
 		}
 
 		smp_mb();
-		tcp_set_state(sk, TCP_ESTABLISHED);
 
-		security_inet_conn_established(sk, skb);
-
-		/* Make sure socket is routed, for correct metrics.  */
-		icsk->icsk_af_ops->rebuild_header(sk);
-
-		tcp_init_metrics(sk);
-
-		tcp_init_congestion_control(sk);
-
-		/* Prevent spurious tcp_cwnd_restart() on first data
-		 * packet.
-		 */
-		tp->lsndtime = tcp_time_stamp;
-
-		tcp_init_buffer_space(sk);
-
-		if (sock_flag(sk, SOCK_KEEPOPEN))
-			inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
-
-		if (!tp->rx_opt.snd_wscale)
-			__tcp_fast_path_on(tp, tp->snd_wnd);
-		else
-			tp->pred_flags = 0;
-
-		if (!sock_flag(sk, SOCK_DEAD)) {
-			sk->sk_state_change(sk);
-			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
-		}
+		tcp_finish_connect(sk, skb);
 
 		if (sk->sk_write_pending ||
 		    icsk->icsk_accept_queue.rskq_defer_accept ||
@@ -5735,8 +5902,6 @@
 			 */
 			inet_csk_schedule_ack(sk);
 			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
-			icsk->icsk_ack.ato	 = TCP_ATO_MIN;
-			tcp_incr_quickack(sk);
 			tcp_enter_quickack_mode(sk);
 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 						  TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0cb86ce..a43b87d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -138,6 +138,14 @@
 }
 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
 
+static int tcp_repair_connect(struct sock *sk)
+{
+	tcp_connect_init(sk);
+	tcp_finish_connect(sk, NULL);
+
+	return 0;
+}
+
 /* This will initiate an outgoing connection. */
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
@@ -196,7 +204,8 @@
 		/* Reset inherited state */
 		tp->rx_opt.ts_recent	   = 0;
 		tp->rx_opt.ts_recent_stamp = 0;
-		tp->write_seq		   = 0;
+		if (likely(!tp->repair))
+			tp->write_seq	   = 0;
 	}
 
 	if (tcp_death_row.sysctl_tw_recycle &&
@@ -247,7 +256,7 @@
 	sk->sk_gso_type = SKB_GSO_TCPV4;
 	sk_setup_caps(sk, &rt->dst);
 
-	if (!tp->write_seq)
+	if (!tp->write_seq && likely(!tp->repair))
 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
 							   inet->inet_daddr,
 							   inet->inet_sport,
@@ -255,7 +264,11 @@
 
 	inet->inet_id = tp->write_seq ^ jiffies;
 
-	err = tcp_connect(sk);
+	if (likely(!tp->repair))
+		err = tcp_connect(sk);
+	else
+		err = tcp_repair_connect(sk);
+
 	rt = NULL;
 	if (err)
 		goto failure;
@@ -853,14 +866,14 @@
 }
 
 /*
- * Return 1 if a syncookie should be sent
+ * Return true if a syncookie should be sent
  */
-int tcp_syn_flood_action(struct sock *sk,
+bool tcp_syn_flood_action(struct sock *sk,
 			 const struct sk_buff *skb,
 			 const char *proto)
 {
 	const char *msg = "Dropping request";
-	int want_cookie = 0;
+	bool want_cookie = false;
 	struct listen_sock *lopt;
 
 
@@ -868,7 +881,7 @@
 #ifdef CONFIG_SYN_COOKIES
 	if (sysctl_tcp_syncookies) {
 		msg = "Sending cookies";
-		want_cookie = 1;
+		want_cookie = true;
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
 	} else
 #endif
@@ -1183,7 +1196,7 @@
 }
 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
 
-static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
+static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 {
 	/*
 	 * This gets called for each TCP segment that arrives
@@ -1206,16 +1219,16 @@
 
 	/* We've parsed the options - do we have a hash? */
 	if (!hash_expected && !hash_location)
-		return 0;
+		return false;
 
 	if (hash_expected && !hash_location) {
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
-		return 1;
+		return true;
 	}
 
 	if (!hash_expected && hash_location) {
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
-		return 1;
+		return true;
 	}
 
 	/* Okay, so this is hash_expected and hash_location -
@@ -1226,15 +1239,14 @@
 				      NULL, NULL, skb);
 
 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
-		if (net_ratelimit()) {
-			pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
-				&iph->saddr, ntohs(th->source),
-				&iph->daddr, ntohs(th->dest),
-				genhash ? " tcp_v4_calc_md5_hash failed" : "");
-		}
-		return 1;
+		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
+				     &iph->saddr, ntohs(th->source),
+				     &iph->daddr, ntohs(th->dest),
+				     genhash ? " tcp_v4_calc_md5_hash failed"
+				     : "");
+		return true;
 	}
-	return 0;
+	return false;
 }
 
 #endif
@@ -1268,7 +1280,7 @@
 	__be32 saddr = ip_hdr(skb)->saddr;
 	__be32 daddr = ip_hdr(skb)->daddr;
 	__u32 isn = TCP_SKB_CB(skb)->when;
-	int want_cookie = 0;
+	bool want_cookie = false;
 
 	/* Never answer to SYNs send to broadcast or multicast */
 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1327,7 +1339,7 @@
 		while (l-- > 0)
 			*c++ ^= *hash_location++;
 
-		want_cookie = 0;	/* not our kind of cookie */
+		want_cookie = false;	/* not our kind of cookie */
 		tmp_ext.cookie_out_never = 0; /* false */
 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
 	} else if (!tp->rx_opt.cookie_in_always) {
@@ -1355,7 +1367,7 @@
 		goto drop_and_free;
 
 	if (!want_cookie || tmp_opt.tstamp_ok)
-		TCP_ECN_create_request(req, tcp_hdr(skb));
+		TCP_ECN_create_request(req, skb);
 
 	if (want_cookie) {
 		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -1739,7 +1751,8 @@
 			if (!tcp_prequeue(sk, skb))
 				ret = tcp_v4_do_rcv(sk, skb);
 		}
-	} else if (unlikely(sk_add_backlog(sk, skb))) {
+	} else if (unlikely(sk_add_backlog(sk, skb,
+					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
 		bh_unlock_sock(sk);
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
 		goto discard_and_relse;
@@ -1875,64 +1888,15 @@
 static int tcp_v4_init_sock(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct tcp_sock *tp = tcp_sk(sk);
 
-	skb_queue_head_init(&tp->out_of_order_queue);
-	tcp_init_xmit_timers(sk);
-	tcp_prequeue_init(tp);
-
-	icsk->icsk_rto = TCP_TIMEOUT_INIT;
-	tp->mdev = TCP_TIMEOUT_INIT;
-
-	/* So many TCP implementations out there (incorrectly) count the
-	 * initial SYN frame in their delayed-ACK and congestion control
-	 * algorithms that we must have the following bandaid to talk
-	 * efficiently to them.  -DaveM
-	 */
-	tp->snd_cwnd = TCP_INIT_CWND;
-
-	/* See draft-stevens-tcpca-spec-01 for discussion of the
-	 * initialization of these values.
-	 */
-	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-	tp->snd_cwnd_clamp = ~0;
-	tp->mss_cache = TCP_MSS_DEFAULT;
-
-	tp->reordering = sysctl_tcp_reordering;
-	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
-
-	sk->sk_state = TCP_CLOSE;
-
-	sk->sk_write_space = sk_stream_write_space;
-	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+	tcp_init_sock(sk);
 
 	icsk->icsk_af_ops = &ipv4_specific;
-	icsk->icsk_sync_mss = tcp_sync_mss;
+
 #ifdef CONFIG_TCP_MD5SIG
-	tp->af_specific = &tcp_sock_ipv4_specific;
+	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
 #endif
 
-	/* TCP Cookie Transactions */
-	if (sysctl_tcp_cookie_size > 0) {
-		/* Default, cookies without s_data_payload. */
-		tp->cookie_values =
-			kzalloc(sizeof(*tp->cookie_values),
-				sk->sk_allocation);
-		if (tp->cookie_values != NULL)
-			kref_init(&tp->cookie_values->kref);
-	}
-	/* Presumed zeroed, in order of appearance:
-	 *	cookie_in_always, cookie_out_never,
-	 *	s_data_constant, s_data_in, s_data_out
-	 */
-	sk->sk_sndbuf = sysctl_tcp_wmem[1];
-	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
-
-	local_bh_disable();
-	sock_update_memcg(sk);
-	sk_sockets_allocated_inc(sk);
-	local_bh_enable();
-
 	return 0;
 }
 
@@ -2109,7 +2073,7 @@
 	return rc;
 }
 
-static inline int empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(struct tcp_iter_state *st)
 {
 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
 		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 3cabafb..b85d9fe 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -55,7 +55,7 @@
  * state.
  */
 
-static int tcp_remember_stamp(struct sock *sk)
+static bool tcp_remember_stamp(struct sock *sk)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -72,13 +72,13 @@
 		}
 		if (release_it)
 			inet_putpeer(peer);
-		return 1;
+		return true;
 	}
 
-	return 0;
+	return false;
 }
 
-static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
+static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
 {
 	struct sock *sk = (struct sock *) tw;
 	struct inet_peer *peer;
@@ -94,17 +94,17 @@
 			peer->tcp_ts	   = tcptw->tw_ts_recent;
 		}
 		inet_putpeer(peer);
-		return 1;
+		return true;
 	}
-	return 0;
+	return false;
 }
 
-static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
+static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 {
 	if (seq == s_win)
-		return 1;
+		return true;
 	if (after(end_seq, s_win) && before(seq, e_win))
-		return 1;
+		return true;
 	return seq == e_win && seq == end_seq;
 }
 
@@ -143,7 +143,7 @@
 	struct tcp_options_received tmp_opt;
 	const u8 *hash_location;
 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
-	int paws_reject = 0;
+	bool paws_reject = false;
 
 	tmp_opt.saw_tstamp = 0;
 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
@@ -316,7 +316,7 @@
 	struct inet_timewait_sock *tw = NULL;
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	const struct tcp_sock *tp = tcp_sk(sk);
-	int recycle_ok = 0;
+	bool recycle_ok = false;
 
 	if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
 		recycle_ok = tcp_remember_stamp(sk);
@@ -482,6 +482,7 @@
 		newtp->sacked_out = 0;
 		newtp->fackets_out = 0;
 		newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+		tcp_enable_early_retrans(newtp);
 
 		/* So many TCP implementations out there (incorrectly) count the
 		 * initial SYN frame in their delayed-ACK and congestion control
@@ -574,7 +575,7 @@
 	struct sock *child;
 	const struct tcphdr *th = tcp_hdr(skb);
 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
-	int paws_reject = 0;
+	bool paws_reject = false;
 
 	tmp_opt.saw_tstamp = 0;
 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7ac6423..803cbfe 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -34,6 +34,8 @@
  *
  */
 
+#define pr_fmt(fmt) "TCP: " fmt
+
 #include <net/tcp.h>
 
 #include <linux/compiler.h>
@@ -78,9 +80,8 @@
 		tp->frto_counter = 3;
 
 	tp->packets_out += tcp_skb_pcount(skb);
-	if (!prior_packets)
-		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-					  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+	if (!prior_packets || tp->early_retrans_delayed)
+		tcp_rearm_rto(sk);
 }
 
 /* SND.NXT, if window was not shrunk.
@@ -369,7 +370,7 @@
 	TCP_SKB_CB(skb)->end_seq = seq;
 }
 
-static inline int tcp_urg_mode(const struct tcp_sock *tp)
+static inline bool tcp_urg_mode(const struct tcp_sock *tp)
 {
 	return tp->snd_una != tp->snd_up;
 }
@@ -563,13 +564,13 @@
 /* Compute TCP options for SYN packets. This is not the final
  * network wire format yet.
  */
-static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 				struct tcp_out_options *opts,
 				struct tcp_md5sig_key **md5)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct tcp_cookie_values *cvp = tp->cookie_values;
-	unsigned remaining = MAX_TCP_OPTION_SPACE;
+	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 	u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
 			 tcp_cookie_size_check(cvp->cookie_desired) :
 			 0;
@@ -663,15 +664,15 @@
 }
 
 /* Set up TCP options for SYN-ACKs. */
-static unsigned tcp_synack_options(struct sock *sk,
+static unsigned int tcp_synack_options(struct sock *sk,
 				   struct request_sock *req,
-				   unsigned mss, struct sk_buff *skb,
+				   unsigned int mss, struct sk_buff *skb,
 				   struct tcp_out_options *opts,
 				   struct tcp_md5sig_key **md5,
 				   struct tcp_extend_values *xvp)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
-	unsigned remaining = MAX_TCP_OPTION_SPACE;
+	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 	u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
 			 xvp->cookie_plus :
 			 0;
@@ -742,13 +743,13 @@
 /* Compute TCP options for ESTABLISHED sockets. This is not the
  * final wire format yet.
  */
-static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 					struct tcp_out_options *opts,
 					struct tcp_md5sig_key **md5)
 {
 	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
 	struct tcp_sock *tp = tcp_sk(sk);
-	unsigned size = 0;
+	unsigned int size = 0;
 	unsigned int eff_sacks;
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -770,9 +771,9 @@
 
 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 	if (unlikely(eff_sacks)) {
-		const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
+		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 		opts->num_sack_blocks =
-			min_t(unsigned, eff_sacks,
+			min_t(unsigned int, eff_sacks,
 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 			      TCPOLEN_SACK_PERBLOCK);
 		size += TCPOLEN_SACK_BASE_ALIGNED +
@@ -801,7 +802,7 @@
 	struct tcp_sock *tp;
 	struct tcp_skb_cb *tcb;
 	struct tcp_out_options opts;
-	unsigned tcp_options_size, tcp_header_size;
+	unsigned int tcp_options_size, tcp_header_size;
 	struct tcp_md5sig_key *md5;
 	struct tcphdr *th;
 	int err;
@@ -1150,7 +1151,7 @@
 }
 
 /* Calculate MSS. Not accounting for SACKs here.  */
-int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
+int tcp_mtu_to_mss(struct sock *sk, int pmtu)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1161,6 +1162,14 @@
 	 */
 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
 
+	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
+	if (icsk->icsk_af_ops->net_frag_header_len) {
+		const struct dst_entry *dst = __sk_dst_get(sk);
+
+		if (dst && dst_allfrag(dst))
+			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
+	}
+
 	/* Clamp it (mss_clamp does not include tcp options) */
 	if (mss_now > tp->rx_opt.mss_clamp)
 		mss_now = tp->rx_opt.mss_clamp;
@@ -1179,7 +1188,7 @@
 }
 
 /* Inverse of above */
-int tcp_mss_to_mtu(const struct sock *sk, int mss)
+int tcp_mss_to_mtu(struct sock *sk, int mss)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1190,6 +1199,13 @@
 	      icsk->icsk_ext_hdr_len +
 	      icsk->icsk_af_ops->net_header_len;
 
+	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
+	if (icsk->icsk_af_ops->net_frag_header_len) {
+		const struct dst_entry *dst = __sk_dst_get(sk);
+
+		if (dst && dst_allfrag(dst))
+			mtu += icsk->icsk_af_ops->net_frag_header_len;
+	}
 	return mtu;
 }
 
@@ -1259,7 +1275,7 @@
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct dst_entry *dst = __sk_dst_get(sk);
 	u32 mss_now;
-	unsigned header_len;
+	unsigned int header_len;
 	struct tcp_out_options opts;
 	struct tcp_md5sig_key *md5;
 
@@ -1375,33 +1391,33 @@
 }
 
 /* Minshall's variant of the Nagle send check. */
-static inline int tcp_minshall_check(const struct tcp_sock *tp)
+static inline bool tcp_minshall_check(const struct tcp_sock *tp)
 {
 	return after(tp->snd_sml, tp->snd_una) &&
 		!after(tp->snd_sml, tp->snd_nxt);
 }
 
-/* Return 0, if packet can be sent now without violation Nagle's rules:
+/* Return false, if packet can be sent now without violation Nagle's rules:
  * 1. It is full sized.
  * 2. Or it contains FIN. (already checked by caller)
  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
  *    With Minshall's modification: all sent small packets are ACKed.
  */
-static inline int tcp_nagle_check(const struct tcp_sock *tp,
+static inline bool tcp_nagle_check(const struct tcp_sock *tp,
 				  const struct sk_buff *skb,
-				  unsigned mss_now, int nonagle)
+				  unsigned int mss_now, int nonagle)
 {
 	return skb->len < mss_now &&
 		((nonagle & TCP_NAGLE_CORK) ||
 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
 }
 
-/* Return non-zero if the Nagle test allows this packet to be
+/* Return true if the Nagle test allows this packet to be
  * sent now.
  */
-static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
-				 unsigned int cur_mss, int nonagle)
+static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
+				  unsigned int cur_mss, int nonagle)
 {
 	/* Nagle rule does not apply to frames, which sit in the middle of the
 	 * write_queue (they have no chances to get new data).
@@ -1410,24 +1426,25 @@
 	 * argument based upon the location of SKB in the send queue.
 	 */
 	if (nonagle & TCP_NAGLE_PUSH)
-		return 1;
+		return true;
 
 	/* Don't use the nagle rule for urgent data (or for the final FIN).
 	 * Nagle can be ignored during F-RTO too (see RFC4138).
 	 */
 	if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
 	    (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
-		return 1;
+		return true;
 
 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
-		return 1;
+		return true;
 
-	return 0;
+	return false;
 }
 
 /* Does at least the first segment of SKB fit into the send window? */
-static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
-				   unsigned int cur_mss)
+static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
+			     const struct sk_buff *skb,
+			     unsigned int cur_mss)
 {
 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
@@ -1460,7 +1477,7 @@
 }
 
 /* Test if sending is allowed right now. */
-int tcp_may_send_now(struct sock *sk)
+bool tcp_may_send_now(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb = tcp_send_head(sk);
@@ -1530,7 +1547,7 @@
  *
  * This algorithm is from John Heffner.
  */
-static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1590,11 +1607,11 @@
 	/* Ok, it looks like it is advisable to defer.  */
 	tp->tso_deferred = 1 | (jiffies << 1);
 
-	return 1;
+	return true;
 
 send_now:
 	tp->tso_deferred = 0;
-	return 0;
+	return false;
 }
 
 /* Create a new MTU probe if we are ready.
@@ -1736,11 +1753,11 @@
  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
  * account rare use of URG, this is not a big flaw.
  *
- * Returns 1, if no segments are in flight and we have queued segments, but
- * cannot send anything now because of SWS or another problem.
+ * Returns true, if no segments are in flight and we have queued segments,
+ * but cannot send anything now because of SWS or another problem.
  */
-static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
-			  int push_one, gfp_t gfp)
+static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+			   int push_one, gfp_t gfp)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb;
@@ -1754,7 +1771,7 @@
 		/* Do MTU probing. */
 		result = tcp_mtu_probe(sk);
 		if (!result) {
-			return 0;
+			return false;
 		} else if (result > 0) {
 			sent_pkts = 1;
 		}
@@ -1813,7 +1830,7 @@
 
 	if (likely(sent_pkts)) {
 		tcp_cwnd_validate(sk);
-		return 0;
+		return false;
 	}
 	return !tp->packets_out && tcp_send_head(sk);
 }
@@ -2012,22 +2029,22 @@
 }
 
 /* Check if coalescing SKBs is legal. */
-static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
+static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
 {
 	if (tcp_skb_pcount(skb) > 1)
-		return 0;
+		return false;
 	/* TODO: SACK collapsing could be used to remove this condition */
 	if (skb_shinfo(skb)->nr_frags != 0)
-		return 0;
+		return false;
 	if (skb_cloned(skb))
-		return 0;
+		return false;
 	if (skb == tcp_send_head(sk))
-		return 0;
+		return false;
 	/* Some heurestics for collapsing over SACK'd could be invented */
 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
 /* Collapse packets in the retransmit queue to make to create
@@ -2038,7 +2055,7 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *skb = to, *tmp;
-	int first = 1;
+	bool first = true;
 
 	if (!sysctl_tcp_retrans_collapse)
 		return;
@@ -2052,7 +2069,7 @@
 		space -= skb->len;
 
 		if (first) {
-			first = 0;
+			first = false;
 			continue;
 		}
 
@@ -2167,8 +2184,7 @@
 
 #if FASTRETRANS_DEBUG > 0
 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
-			if (net_ratelimit())
-				printk(KERN_DEBUG "retrans_out leaked.\n");
+			net_dbg_ratelimited("retrans_out leaked\n");
 		}
 #endif
 		if (!tp->retrans_out)
@@ -2193,18 +2209,18 @@
 /* Check if we forward retransmits are possible in the current
  * window/congestion state.
  */
-static int tcp_can_forward_retransmit(struct sock *sk)
+static bool tcp_can_forward_retransmit(struct sock *sk)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	const struct tcp_sock *tp = tcp_sk(sk);
 
 	/* Forward retransmissions are possible only during Recovery. */
 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
-		return 0;
+		return false;
 
 	/* No forward retransmissions in Reno are possible. */
 	if (tcp_is_reno(tp))
-		return 0;
+		return false;
 
 	/* Yeah, we have to make difficult choice between forward transmission
 	 * and retransmission... Both ways have their merits...
@@ -2215,9 +2231,9 @@
 	 */
 
 	if (tcp_may_send_now(sk))
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
 /* This gets called after a retransmit timeout, and the initially
@@ -2402,7 +2418,7 @@
 
 	skb = tcp_write_queue_head(sk);
 	if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
-		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
+		pr_debug("%s: wrong queue state\n", __func__);
 		return -EFAULT;
 	}
 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
@@ -2562,7 +2578,7 @@
 EXPORT_SYMBOL(tcp_make_synack);
 
 /* Do all connect socket setups that can be done AF independent. */
-static void tcp_connect_init(struct sock *sk)
+void tcp_connect_init(struct sock *sk)
 {
 	const struct dst_entry *dst = __sk_dst_get(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -2617,9 +2633,12 @@
 	tp->snd_una = tp->write_seq;
 	tp->snd_sml = tp->write_seq;
 	tp->snd_up = tp->write_seq;
-	tp->rcv_nxt = 0;
-	tp->rcv_wup = 0;
-	tp->copied_seq = 0;
+	tp->snd_nxt = tp->write_seq;
+
+	if (likely(!tp->repair))
+		tp->rcv_nxt = 0;
+	tp->rcv_wup = tp->rcv_nxt;
+	tp->copied_seq = tp->rcv_nxt;
 
 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
 	inet_csk(sk)->icsk_retransmits = 0;
@@ -2642,7 +2661,6 @@
 	/* Reserve space for headers. */
 	skb_reserve(buff, MAX_TCP_HEADER);
 
-	tp->snd_nxt = tp->write_seq;
 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
 	TCP_ECN_send_syn(sk, buff);
 
@@ -2791,6 +2809,15 @@
 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
 
+void tcp_send_window_probe(struct sock *sk)
+{
+	if (sk->sk_state == TCP_ESTABLISHED) {
+		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
+		tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
+		tcp_xmit_probe_skb(sk, 0);
+	}
+}
+
 /* Initiate keepalive or window probe from timer. */
 int tcp_write_wakeup(struct sock *sk)
 {
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index a981cdc..4526fe6 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -91,7 +91,7 @@
  * Note: arguments must match tcp_rcv_established()!
  */
 static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
-			       struct tcphdr *th, unsigned len)
+			       struct tcphdr *th, unsigned int len)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_sock *inet = inet_sk(sk);
@@ -138,7 +138,7 @@
 	.entry	= jtcp_rcv_established,
 };
 
-static int tcpprobe_open(struct inode * inode, struct file * file)
+static int tcpprobe_open(struct inode *inode, struct file *file)
 {
 	/* Reset (empty) log */
 	spin_lock_bh(&tcp_probe.lock);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 34d4a02..e911e6c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -319,6 +319,11 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
 
+	if (tp->early_retrans_delayed) {
+		tcp_resume_early_retransmit(sk);
+		return;
+	}
+
 	if (!tp->packets_out)
 		goto out;
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fe14105..609397e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -107,6 +107,7 @@
 #include <net/checksum.h>
 #include <net/xfrm.h>
 #include <trace/events/udp.h>
+#include <linux/static_key.h>
 #include "udp_impl.h"
 
 struct udp_table udp_table __read_mostly;
@@ -206,7 +207,7 @@
 
 	if (!snum) {
 		int low, high, remaining;
-		unsigned rand;
+		unsigned int rand;
 		unsigned short first, last;
 		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 
@@ -846,7 +847,7 @@
 	 *	Get and verify the address.
 	 */
 	if (msg->msg_name) {
-		struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
+		struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
 		if (msg->msg_namelen < sizeof(*usin))
 			return -EINVAL;
 		if (usin->sin_family != AF_INET) {
@@ -1379,6 +1380,14 @@
 
 }
 
+static struct static_key udp_encap_needed __read_mostly;
+void udp_encap_enable(void)
+{
+	if (!static_key_enabled(&udp_encap_needed))
+		static_key_slow_inc(&udp_encap_needed);
+}
+EXPORT_SYMBOL(udp_encap_enable);
+
 /* returns:
  *  -1: error
  *   0: success
@@ -1400,7 +1409,7 @@
 		goto drop;
 	nf_reset(skb);
 
-	if (up->encap_type) {
+	if (static_key_false(&udp_encap_needed) && up->encap_type) {
 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 
 		/*
@@ -1470,7 +1479,7 @@
 		goto drop;
 
 
-	if (sk_rcvqueues_full(sk, skb))
+	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
 		goto drop;
 
 	rc = 0;
@@ -1479,7 +1488,7 @@
 	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 		rc = __udp_queue_rcv_skb(sk, skb);
-	else if (sk_add_backlog(sk, skb)) {
+	else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 		bh_unlock_sock(sk);
 		goto drop;
 	}
@@ -1760,6 +1769,7 @@
 			/* FALLTHROUGH */
 		case UDP_ENCAP_L2TPINUDP:
 			up->encap_type = val;
+			udp_encap_enable();
 			break;
 		default:
 			err = -ENOPROTOOPT;
@@ -2163,9 +2173,15 @@
 static __initdata unsigned long uhash_entries;
 static int __init set_uhash_entries(char *str)
 {
+	ssize_t ret;
+
 	if (!str)
 		return 0;
-	uhash_entries = simple_strtoul(str, &str, 0);
+
+	ret = kstrtoul(str, 0, &uhash_entries);
+	if (ret)
+		return 0;
+
 	if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
 		uhash_entries = UDP_HTABLE_SIZE_MIN;
 	return 1;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f1..a7f86a3 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@
 	return udp_dump_one(&udp_table, in_skb, nlh, req);
 }
 
+static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+		void *info)
+{
+	r->idiag_rqueue = sk_rmem_alloc_get(sk);
+	r->idiag_wqueue = sk_wmem_alloc_get(sk);
+}
+
 static const struct inet_diag_handler udp_diag_handler = {
 	.dump		 = udp_diag_dump,
 	.dump_one	 = udp_diag_dump_one,
+	.idiag_get_info  = udp_diag_get_info,
 	.idiag_type	 = IPPROTO_UDP,
 };
 
@@ -167,6 +175,7 @@
 static const struct inet_diag_handler udplite_diag_handler = {
 	.dump		 = udplite_diag_dump,
 	.dump_one	 = udplite_diag_dump_one,
+	.idiag_get_info  = udp_diag_get_info,
 	.idiag_type	 = IPPROTO_UDPLITE,
 };
 
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index aaad650..5a681e2 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,7 @@
 			    size_t len, int noblock, int flags, int *addr_len);
 extern int	udp_sendpage(struct sock *sk, struct page *page, int offset,
 			     size_t size, int flags);
-extern int	udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
+extern int	udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 extern void	udp_destroy_sock(struct sock *sk);
 
 #ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index a0b4c5d..0d3426c 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -152,7 +152,7 @@
 
 		case IPPROTO_AH:
 			if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
-				__be32 *ah_hdr = (__be32*)xprth;
+				__be32 *ah_hdr = (__be32 *)xprth;
 
 				fl4->fl4_ipsec_spi = ah_hdr[1];
 			}
@@ -298,8 +298,8 @@
 	xfrm4_state_init();
 	xfrm4_policy_init();
 #ifdef CONFIG_SYSCTL
-	sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
-						xfrm4_policy_table);
+	sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4",
+					 xfrm4_policy_table);
 #endif
 }
 
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 36d7437..5728695 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -69,7 +69,7 @@
 
 config INET6_AH
 	tristate "IPv6: AH transformation"
-	select XFRM
+	select XFRM_ALGO
 	select CRYPTO
 	select CRYPTO_HMAC
 	select CRYPTO_MD5
@@ -81,7 +81,7 @@
 
 config INET6_ESP
 	tristate "IPv6: ESP transformation"
-	select XFRM
+	select XFRM_ALGO
 	select CRYPTO
 	select CRYPTO_AUTHENC
 	select CRYPTO_HMAC
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7d5cb97..8f6411c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -38,6 +38,8 @@
  *						status etc.
  */
 
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -66,6 +68,7 @@
 #include <net/sock.h>
 #include <net/snmp.h>
 
+#include <net/af_ieee802154.h>
 #include <net/ipv6.h>
 #include <net/protocol.h>
 #include <net/ndisc.h>
@@ -149,7 +152,7 @@
 				 unsigned long event);
 static int addrconf_ifdown(struct net_device *dev, int how);
 
-static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
+static void addrconf_dad_start(struct inet6_ifaddr *ifp);
 static void addrconf_dad_timer(unsigned long data);
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
 static void addrconf_dad_run(struct inet6_dev *idev);
@@ -326,20 +329,19 @@
 	WARN_ON(idev->mc_list != NULL);
 
 #ifdef NET_REFCNT_DEBUG
-	printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL");
+	pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
 #endif
 	dev_put(dev);
 	if (!idev->dead) {
-		pr_warning("Freeing alive inet6 device %p\n", idev);
+		pr_warn("Freeing alive inet6 device %p\n", idev);
 		return;
 	}
 	snmp6_free_dev(idev);
 	kfree_rcu(idev, rcu);
 }
-
 EXPORT_SYMBOL(in6_dev_finish_destroy);
 
-static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
+static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
 {
 	struct inet6_dev *ndev;
 
@@ -372,7 +374,7 @@
 
 	if (snmp6_alloc_dev(ndev) < 0) {
 		ADBG((KERN_WARNING
-			"%s(): cannot allocate memory for statistics; dev=%s.\n",
+			"%s: cannot allocate memory for statistics; dev=%s.\n",
 			__func__, dev->name));
 		neigh_parms_release(&nd_tbl, ndev->nd_parms);
 		dev_put(dev);
@@ -382,7 +384,7 @@
 
 	if (snmp6_register_dev(ndev) < 0) {
 		ADBG((KERN_WARNING
-			"%s(): cannot create /proc/net/dev_snmp6/%s\n",
+			"%s: cannot create /proc/net/dev_snmp6/%s\n",
 			__func__, dev->name));
 		neigh_parms_release(&nd_tbl, ndev->nd_parms);
 		ndev->dead = 1;
@@ -400,9 +402,7 @@
 
 #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
 	if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
-		printk(KERN_INFO
-		       "%s: Disabled Multicast RS\n",
-		       dev->name);
+		pr_info("%s: Disabled Multicast RS\n", dev->name);
 		ndev->cnf.rtr_solicits = 0;
 	}
 #endif
@@ -441,7 +441,7 @@
 	return ndev;
 }
 
-static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
+static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
 {
 	struct inet6_dev *idev;
 
@@ -542,7 +542,7 @@
 	WARN_ON(!hlist_unhashed(&ifp->addr_lst));
 
 #ifdef NET_REFCNT_DEBUG
-	printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
+	pr_debug("%s\n", __func__);
 #endif
 
 	in6_dev_put(ifp->idev);
@@ -551,7 +551,7 @@
 		pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
 
 	if (ifp->state != INET6_IFADDR_STATE_DEAD) {
-		pr_warning("Freeing alive inet6 address %p\n", ifp);
+		pr_warn("Freeing alive inet6 address %p\n", ifp);
 		return;
 	}
 	dst_release(&ifp->rt->dst);
@@ -841,8 +841,7 @@
 	in6_dev_hold(idev);
 	if (idev->cnf.use_tempaddr <= 0) {
 		write_unlock(&idev->lock);
-		printk(KERN_INFO
-			"ipv6_create_tempaddr(): use_tempaddr is disabled.\n");
+		pr_info("%s: use_tempaddr is disabled\n", __func__);
 		in6_dev_put(idev);
 		ret = -1;
 		goto out;
@@ -852,8 +851,8 @@
 		idev->cnf.use_tempaddr = -1;	/*XXX*/
 		spin_unlock_bh(&ifp->lock);
 		write_unlock(&idev->lock);
-		printk(KERN_WARNING
-			"ipv6_create_tempaddr(): regeneration time exceeded. disabled temporary address support.\n");
+		pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
+			__func__);
 		in6_dev_put(idev);
 		ret = -1;
 		goto out;
@@ -863,8 +862,8 @@
 	if (__ipv6_try_regen_rndid(idev, tmpaddr) < 0) {
 		spin_unlock_bh(&ifp->lock);
 		write_unlock(&idev->lock);
-		printk(KERN_WARNING
-			"ipv6_create_tempaddr(): regeneration of randomized interface id failed.\n");
+		pr_warn("%s: regeneration of randomized interface id failed\n",
+			__func__);
 		in6_ifa_put(ifp);
 		in6_dev_put(idev);
 		ret = -1;
@@ -914,8 +913,7 @@
 	if (!ift || IS_ERR(ift)) {
 		in6_ifa_put(ifp);
 		in6_dev_put(idev);
-		printk(KERN_INFO
-			"ipv6_create_tempaddr(): retry temporary address regeneration.\n");
+		pr_info("%s: retry temporary address regeneration\n", __func__);
 		tmpaddr = &addr;
 		write_lock(&idev->lock);
 		goto retry;
@@ -929,7 +927,7 @@
 	ift->tstamp = tmp_tstamp;
 	spin_unlock_bh(&ift->lock);
 
-	addrconf_dad_start(ift, 0);
+	addrconf_dad_start(ift);
 	in6_ifa_put(ift);
 	in6_dev_put(idev);
 out:
@@ -1332,7 +1330,6 @@
 	rcu_read_unlock();
 	return onlink;
 }
-
 EXPORT_SYMBOL(ipv6_chk_prefix);
 
 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
@@ -1416,9 +1413,8 @@
 		return;
 	}
 
-	if (net_ratelimit())
-		printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
-			ifp->idev->dev->name, &ifp->addr);
+	net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
+			     ifp->idev->dev->name, &ifp->addr);
 
 	if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
 		struct in6_addr addr;
@@ -1431,7 +1427,7 @@
 			/* DAD failed for link-local based on MAC address */
 			idev->cnf.disable_ipv6 = 1;
 
-			printk(KERN_INFO "%s: IPv6 being disabled!\n",
+			pr_info("%s: IPv6 being disabled!\n",
 				ifp->idev->dev->name);
 		}
 	}
@@ -1516,13 +1512,21 @@
 	return 0;
 }
 
+static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
+{
+	if (dev->addr_len != IEEE802154_ADDR_LEN)
+		return -1;
+	memcpy(eui, dev->dev_addr, 8);
+	return 0;
+}
+
 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
 {
 	/* XXX: inherit EUI-64 from other interface -- yoshfuji */
 	if (dev->addr_len != ARCNET_ALEN)
 		return -1;
 	memset(eui, 0, 7);
-	eui[7] = *(u8*)dev->dev_addr;
+	eui[7] = *(u8 *)dev->dev_addr;
 	return 0;
 }
 
@@ -1569,7 +1573,6 @@
 	switch (dev->type) {
 	case ARPHRD_ETHER:
 	case ARPHRD_FDDI:
-	case ARPHRD_IEEE802_TR:
 		return addrconf_ifid_eui48(eui, dev);
 	case ARPHRD_ARCNET:
 		return addrconf_ifid_arcnet(eui, dev);
@@ -1579,6 +1582,8 @@
 		return addrconf_ifid_sit(eui, dev);
 	case ARPHRD_IPGRE:
 		return addrconf_ifid_gre(eui, dev);
+	case ARPHRD_IEEE802154:
+		return addrconf_ifid_eui64(eui, dev);
 	}
 	return -1;
 }
@@ -1652,9 +1657,8 @@
 		idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
 		idev->cnf.max_desync_factor * HZ;
 	if (time_before(expires, jiffies)) {
-		printk(KERN_WARNING
-			"ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n",
-			idev->dev->name);
+		pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
+			__func__, idev->dev->name);
 		goto out;
 	}
 
@@ -1667,7 +1671,8 @@
 	in6_dev_put(idev);
 }
 
-static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) {
+static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+{
 	int ret = 0;
 
 	if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
@@ -1837,16 +1842,15 @@
 	prefered_lft = ntohl(pinfo->prefered);
 
 	if (prefered_lft > valid_lft) {
-		if (net_ratelimit())
-			printk(KERN_WARNING "addrconf: prefix option has invalid lifetime\n");
+		net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
 		return;
 	}
 
 	in6_dev = in6_dev_get(dev);
 
 	if (in6_dev == NULL) {
-		if (net_ratelimit())
-			printk(KERN_DEBUG "addrconf: device %s not configured\n", dev->name);
+		net_dbg_ratelimited("addrconf: device %s not configured\n",
+				    dev->name);
 		return;
 	}
 
@@ -1908,7 +1912,7 @@
 	/* Try to figure out our local address for this prefix */
 
 	if (pinfo->autoconf && in6_dev->cnf.autoconf) {
-		struct inet6_ifaddr * ifp;
+		struct inet6_ifaddr *ifp;
 		struct in6_addr addr;
 		int create = 0, update_lft = 0;
 
@@ -1921,9 +1925,8 @@
 			}
 			goto ok;
 		}
-		if (net_ratelimit())
-			printk(KERN_DEBUG "IPv6 addrconf: prefix with wrong length %d\n",
-			       pinfo->prefix_len);
+		net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
+				    pinfo->prefix_len);
 		in6_dev_put(in6_dev);
 		return;
 
@@ -1957,7 +1960,7 @@
 
 			update_lft = create = 1;
 			ifp->cstamp = jiffies;
-			addrconf_dad_start(ifp, RTF_ADDRCONF|RTF_PREFIX_RT);
+			addrconf_dad_start(ifp);
 		}
 
 		if (ifp) {
@@ -2236,7 +2239,7 @@
 		 * that the Optimistic flag should not be set for
 		 * manually configured addresses
 		 */
-		addrconf_dad_start(ifp, 0);
+		addrconf_dad_start(ifp);
 		in6_ifa_put(ifp);
 		addrconf_verify(0);
 		return 0;
@@ -2362,9 +2365,9 @@
 	}
 
 	for_each_netdev(net, dev) {
-		struct in_device * in_dev = __in_dev_get_rtnl(dev);
+		struct in_device *in_dev = __in_dev_get_rtnl(dev);
 		if (in_dev && (dev->flags & IFF_UP)) {
-			struct in_ifaddr * ifa;
+			struct in_ifaddr *ifa;
 
 			int flag = scope;
 
@@ -2401,7 +2404,7 @@
 	ASSERT_RTNL();
 
 	if ((idev = ipv6_find_idev(dev)) == NULL) {
-		printk(KERN_DEBUG "init loopback: add_dev failed\n");
+		pr_debug("%s: add_dev failed\n", __func__);
 		return;
 	}
 
@@ -2410,7 +2413,7 @@
 
 static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
 {
-	struct inet6_ifaddr * ifp;
+	struct inet6_ifaddr *ifp;
 	u32 addr_flags = IFA_F_PERMANENT;
 
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -2423,7 +2426,7 @@
 	ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
 	if (!IS_ERR(ifp)) {
 		addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
-		addrconf_dad_start(ifp, 0);
+		addrconf_dad_start(ifp);
 		in6_ifa_put(ifp);
 	}
 }
@@ -2431,15 +2434,15 @@
 static void addrconf_dev_config(struct net_device *dev)
 {
 	struct in6_addr addr;
-	struct inet6_dev    * idev;
+	struct inet6_dev *idev;
 
 	ASSERT_RTNL();
 
 	if ((dev->type != ARPHRD_ETHER) &&
 	    (dev->type != ARPHRD_FDDI) &&
-	    (dev->type != ARPHRD_IEEE802_TR) &&
 	    (dev->type != ARPHRD_ARCNET) &&
-	    (dev->type != ARPHRD_INFINIBAND)) {
+	    (dev->type != ARPHRD_INFINIBAND) &&
+	    (dev->type != ARPHRD_IEEE802154)) {
 		/* Alas, we support only Ethernet autoconfiguration. */
 		return;
 	}
@@ -2469,7 +2472,7 @@
 	 */
 
 	if ((idev = ipv6_find_idev(dev)) == NULL) {
-		printk(KERN_DEBUG "init sit: add_dev failed\n");
+		pr_debug("%s: add_dev failed\n", __func__);
 		return;
 	}
 
@@ -2499,12 +2502,12 @@
 	struct inet6_dev *idev;
 	struct in6_addr addr;
 
-	pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name);
+	pr_info("%s(%s)\n", __func__, dev->name);
 
 	ASSERT_RTNL();
 
 	if ((idev = ipv6_find_idev(dev)) == NULL) {
-		printk(KERN_DEBUG "init gre: add_dev failed\n");
+		pr_debug("%s: add_dev failed\n", __func__);
 		return;
 	}
 
@@ -2544,7 +2547,7 @@
 		if (!ipv6_inherit_linklocal(idev, link_dev))
 			return;
 	}
-	printk(KERN_DEBUG "init ip6-ip6: add_linklocal failed\n");
+	pr_debug("init ip6-ip6: add_linklocal failed\n");
 }
 
 /*
@@ -2560,14 +2563,14 @@
 
 	idev = addrconf_add_dev(dev);
 	if (IS_ERR(idev)) {
-		printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
+		pr_debug("init ip6-ip6: add_dev failed\n");
 		return;
 	}
 	ip6_tnl_add_linklocal(idev);
 }
 
 static int addrconf_notify(struct notifier_block *this, unsigned long event,
-			   void * data)
+			   void *data)
 {
 	struct net_device *dev = (struct net_device *) data;
 	struct inet6_dev *idev = __in6_dev_get(dev);
@@ -2591,9 +2594,7 @@
 		if (event == NETDEV_UP) {
 			if (!addrconf_qdisc_ok(dev)) {
 				/* device is not ready yet. */
-				printk(KERN_INFO
-					"ADDRCONF(NETDEV_UP): %s: "
-					"link is not ready\n",
+				pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
 					dev->name);
 				break;
 			}
@@ -2618,10 +2619,8 @@
 				idev->if_flags |= IF_READY;
 			}
 
-			printk(KERN_INFO
-					"ADDRCONF(NETDEV_CHANGE): %s: "
-					"link becomes ready\n",
-					dev->name);
+			pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
+				dev->name);
 
 			run_pending = 1;
 		}
@@ -2892,8 +2891,7 @@
 		 * Note: we do not support deprecated "all on-link"
 		 * assumption any longer.
 		 */
-		printk(KERN_DEBUG "%s: no IPv6 routers present\n",
-		       idev->dev->name);
+		pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
 	}
 
 out:
@@ -2918,7 +2916,7 @@
 	addrconf_mod_timer(ifp, AC_DAD, rand_num);
 }
 
-static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
+static void addrconf_dad_start(struct inet6_ifaddr *ifp)
 {
 	struct inet6_dev *idev = ifp->idev;
 	struct net_device *dev = idev->dev;
@@ -3791,7 +3789,7 @@
 	return inet6_dump_addr(skb, cb, type);
 }
 
-static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
+static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 			     void *arg)
 {
 	struct net *net = sock_net(in_skb->sk);
@@ -3986,14 +3984,14 @@
 	struct nlattr *nla;
 	struct ifla_cacheinfo ci;
 
-	NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
-
+	if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
+		goto nla_put_failure;
 	ci.max_reasm_len = IPV6_MAXPLEN;
 	ci.tstamp = cstamp_delta(idev->tstamp);
 	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
 	ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
-	NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
-
+	if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
 	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
 	if (nla == NULL)
 		goto nla_put_failure;
@@ -4058,15 +4056,13 @@
 	hdr->ifi_flags = dev_get_flags(dev);
 	hdr->ifi_change = 0;
 
-	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
-
-	if (dev->addr_len)
-		NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
-
-	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
-	if (dev->ifindex != dev->iflink)
-		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
-
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
+	    (dev->addr_len &&
+	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
+	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
+	    (dev->ifindex != dev->iflink &&
+	     nla_put_u32(skb, IFLA_LINK, dev->iflink)))
+		goto nla_put_failure;
 	protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
 	if (protoinfo == NULL)
 		goto nla_put_failure;
@@ -4179,12 +4175,12 @@
 	if (pinfo->autoconf)
 		pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
 
-	NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix);
-
+	if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
+		goto nla_put_failure;
 	ci.preferred_time = ntohl(pinfo->prefered);
 	ci.valid_time = ntohl(pinfo->valid);
-	NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci);
-
+	if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -4368,7 +4364,6 @@
 {
 	struct ctl_table_header *sysctl_header;
 	ctl_table addrconf_vars[DEVCONF_MAX+1];
-	char *dev_name;
 } addrconf_sysctl __read_mostly = {
 	.sysctl_header = NULL,
 	.addrconf_vars = {
@@ -4597,17 +4592,7 @@
 {
 	int i;
 	struct addrconf_sysctl_table *t;
-
-#define ADDRCONF_CTL_PATH_DEV	3
-
-	struct ctl_path addrconf_ctl_path[] = {
-		{ .procname = "net", },
-		{ .procname = "ipv6", },
-		{ .procname = "conf", },
-		{ /* to be set */ },
-		{ },
-	};
-
+	char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
 
 	t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
 	if (t == NULL)
@@ -4619,27 +4604,15 @@
 		t->addrconf_vars[i].extra2 = net;
 	}
 
-	/*
-	 * Make a copy of dev_name, because '.procname' is regarded as const
-	 * by sysctl and we wouldn't want anyone to change it under our feet
-	 * (see SIOCSIFNAME).
-	 */
-	t->dev_name = kstrdup(dev_name, GFP_KERNEL);
-	if (!t->dev_name)
-		goto free;
+	snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
 
-	addrconf_ctl_path[ADDRCONF_CTL_PATH_DEV].procname = t->dev_name;
-
-	t->sysctl_header = register_net_sysctl_table(net, addrconf_ctl_path,
-			t->addrconf_vars);
+	t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
 	if (t->sysctl_header == NULL)
-		goto free_procname;
+		goto free;
 
 	p->sysctl = t;
 	return 0;
 
-free_procname:
-	kfree(t->dev_name);
 free:
 	kfree(t);
 out:
@@ -4656,7 +4629,6 @@
 	t = p->sysctl;
 	p->sysctl = NULL;
 	unregister_net_sysctl_table(t->sysctl_header);
-	kfree(t->dev_name);
 	kfree(t);
 }
 
@@ -4775,8 +4747,8 @@
 
 	err = ipv6_addr_label_init();
 	if (err < 0) {
-		printk(KERN_CRIT "IPv6 Addrconf:"
-		       " cannot initialize default policy table: %d.\n", err);
+		pr_crit("%s: cannot initialize default policy table: %d\n",
+			__func__, err);
 		goto out;
 	}
 
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 399287e..d051e5f 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -8,9 +8,9 @@
 
 #define IPV6_ADDR_SCOPE_TYPE(scope)	((scope) << 16)
 
-static inline unsigned ipv6_addr_scope2type(unsigned scope)
+static inline unsigned int ipv6_addr_scope2type(unsigned int scope)
 {
-	switch(scope) {
+	switch (scope) {
 	case IPV6_ADDR_SCOPE_NODELOCAL:
 		return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
 			IPV6_ADDR_LOOPBACK);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 2d8ddba..eb6a636 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -129,7 +129,7 @@
 	ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu));
 }
 
-static inline int ip6addrlbl_hold(struct ip6addrlbl_entry *p)
+static bool ip6addrlbl_hold(struct ip6addrlbl_entry *p)
 {
 	return atomic_inc_not_zero(&p->refcnt);
 }
@@ -141,20 +141,20 @@
 }
 
 /* Find label */
-static int __ip6addrlbl_match(struct net *net,
-			      struct ip6addrlbl_entry *p,
-			      const struct in6_addr *addr,
-			      int addrtype, int ifindex)
+static bool __ip6addrlbl_match(struct net *net,
+			       const struct ip6addrlbl_entry *p,
+			       const struct in6_addr *addr,
+			       int addrtype, int ifindex)
 {
 	if (!net_eq(ip6addrlbl_net(p), net))
-		return 0;
+		return false;
 	if (p->ifindex && p->ifindex != ifindex)
-		return 0;
+		return false;
 	if (p->addrtype && p->addrtype != addrtype)
-		return 0;
+		return false;
 	if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen))
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net,
@@ -350,7 +350,7 @@
 	int err = 0;
 	int i;
 
-	ADDRLABEL(KERN_DEBUG "%s()\n", __func__);
+	ADDRLABEL(KERN_DEBUG "%s\n", __func__);
 
 	for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
 		int ret = ip6addrlbl_add(net,
@@ -456,8 +456,8 @@
 	return err;
 }
 
-static inline void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
-				     int prefixlen, int ifindex, u32 lseq)
+static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
+			      int prefixlen, int ifindex, u32 lseq)
 {
 	struct ifaddrlblmsg *ifal = nlmsg_data(nlh);
 	ifal->ifal_family = AF_INET6;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 8ed1b93..e22e6d8 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -18,6 +18,7 @@
  *      2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) "IPv6: " fmt
 
 #include <linux/module.h>
 #include <linux/capability.h>
@@ -77,7 +78,7 @@
 	.autoconf = 1,
 };
 
-static int disable_ipv6_mod = 0;
+static int disable_ipv6_mod;
 
 module_param_named(disable, disable_ipv6_mod, int, 0444);
 MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional");
@@ -180,7 +181,7 @@
 	err = 0;
 	sk->sk_no_check = answer_no_check;
 	if (INET_PROTOSW_REUSE & answer_flags)
-		sk->sk_reuse = 1;
+		sk->sk_reuse = SK_CAN_REUSE;
 
 	inet = inet_sk(sk);
 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -256,7 +257,7 @@
 /* bind for INET6 API */
 int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 {
-	struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr;
+	struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr;
 	struct sock *sk = sock->sk;
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -390,7 +391,6 @@
 	rcu_read_unlock();
 	goto out;
 }
-
 EXPORT_SYMBOL(inet6_bind);
 
 int inet6_release(struct socket *sock)
@@ -408,7 +408,6 @@
 
 	return inet_release(sock);
 }
-
 EXPORT_SYMBOL(inet6_release);
 
 void inet6_destroy_sock(struct sock *sk)
@@ -419,10 +418,12 @@
 
 	/* Release rx options */
 
-	if ((skb = xchg(&np->pktoptions, NULL)) != NULL)
+	skb = xchg(&np->pktoptions, NULL);
+	if (skb != NULL)
 		kfree_skb(skb);
 
-	if ((skb = xchg(&np->rxpmtu, NULL)) != NULL)
+	skb = xchg(&np->rxpmtu, NULL);
+	if (skb != NULL)
 		kfree_skb(skb);
 
 	/* Free flowlabels */
@@ -430,10 +431,10 @@
 
 	/* Free tx options */
 
-	if ((opt = xchg(&np->opt, NULL)) != NULL)
+	opt = xchg(&np->opt, NULL);
+	if (opt != NULL)
 		sock_kfree_s(sk, opt, opt->tot_len);
 }
-
 EXPORT_SYMBOL_GPL(inet6_destroy_sock);
 
 /*
@@ -443,7 +444,7 @@
 int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
 		 int *uaddr_len, int peer)
 {
-	struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr;
+	struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr;
 	struct sock *sk = sock->sk;
 	struct inet_sock *inet = inet_sk(sk);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +475,6 @@
 	*uaddr_len = sizeof(*sin);
 	return 0;
 }
-
 EXPORT_SYMBOL(inet6_getname);
 
 int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
@@ -482,8 +482,7 @@
 	struct sock *sk = sock->sk;
 	struct net *net = sock_net(sk);
 
-	switch(cmd)
-	{
+	switch (cmd) {
 	case SIOCGSTAMP:
 		return sock_get_timestamp(sk, (struct timeval __user *)arg);
 
@@ -509,7 +508,6 @@
 	/*NOTREACHED*/
 	return 0;
 }
-
 EXPORT_SYMBOL(inet6_ioctl);
 
 const struct proto_ops inet6_stream_ops = {
@@ -615,25 +613,21 @@
 	return ret;
 
 out_permanent:
-	printk(KERN_ERR "Attempt to override permanent protocol %d.\n",
-	       protocol);
+	pr_err("Attempt to override permanent protocol %d\n", protocol);
 	goto out;
 
 out_illegal:
-	printk(KERN_ERR
-	       "Ignoring attempt to register invalid socket type %d.\n",
+	pr_err("Ignoring attempt to register invalid socket type %d\n",
 	       p->type);
 	goto out;
 }
-
 EXPORT_SYMBOL(inet6_register_protosw);
 
 void
 inet6_unregister_protosw(struct inet_protosw *p)
 {
 	if (INET_PROTOSW_PERMANENT & p->flags) {
-		printk(KERN_ERR
-		       "Attempt to unregister permanent protocol %d.\n",
+		pr_err("Attempt to unregister permanent protocol %d\n",
 		       p->protocol);
 	} else {
 		spin_lock_bh(&inetsw6_lock);
@@ -643,7 +637,6 @@
 		synchronize_net();
 	}
 }
-
 EXPORT_SYMBOL(inet6_unregister_protosw);
 
 int inet6_sk_rebuild_header(struct sock *sk)
@@ -683,13 +676,12 @@
 
 	return 0;
 }
-
 EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header);
 
-int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb)
 {
-	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct inet6_skb_parm *opt = IP6CB(skb);
+	const struct ipv6_pinfo *np = inet6_sk(sk);
+	const struct inet6_skb_parm *opt = IP6CB(skb);
 
 	if (np->rxopt.all) {
 		if ((opt->hop && (np->rxopt.bits.hopopts ||
@@ -701,11 +693,10 @@
 		     np->rxopt.bits.osrcrt)) ||
 		    ((opt->dst1 || opt->dst0) &&
 		     (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts)))
-			return 1;
+			return true;
 	}
-	return 0;
+	return false;
 }
-
 EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
 
 static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
@@ -1070,13 +1061,11 @@
 	BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
 
 	/* Register the socket-side information for inet6_create.  */
-	for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
+	for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
 		INIT_LIST_HEAD(r);
 
 	if (disable_ipv6_mod) {
-		printk(KERN_INFO
-		       "IPv6: Loaded, but administratively disabled, "
-		       "reboot required to enable\n");
+		pr_info("Loaded, but administratively disabled, reboot required to enable\n");
 		goto out;
 	}
 
@@ -1111,11 +1100,6 @@
 	if (err)
 		goto out_sock_register_fail;
 
-#ifdef CONFIG_SYSCTL
-	err = ipv6_static_sysctl_register();
-	if (err)
-		goto static_sysctl_fail;
-#endif
 	tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
 
 	/*
@@ -1242,10 +1226,6 @@
 icmp_fail:
 	unregister_pernet_subsys(&inet6_net_ops);
 register_pernet_fail:
-#ifdef CONFIG_SYSCTL
-	ipv6_static_sysctl_unregister();
-static_sysctl_fail:
-#endif
 	sock_unregister(PF_INET6);
 	rtnl_unregister_all(PF_INET6);
 out_sock_register_fail:
@@ -1272,9 +1252,6 @@
 	/* Disallow any further netlink messages */
 	rtnl_unregister_all(PF_INET6);
 
-#ifdef CONFIG_SYSCTL
-	ipv6_sysctl_unregister();
-#endif
 	udpv6_exit();
 	udplitev6_exit();
 	tcpv6_exit();
@@ -1302,9 +1279,6 @@
 	rawv6_exit();
 
 	unregister_pernet_subsys(&inet6_net_ops);
-#ifdef CONFIG_SYSCTL
-	ipv6_static_sysctl_unregister();
-#endif
 	proto_unregister(&rawv6_prot);
 	proto_unregister(&udplitev6_prot);
 	proto_unregister(&udpv6_prot);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 2ae79db..f1a4a2c 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -24,6 +24,8 @@
  * 	This file is derived from net/ipv4/ah.c.
  */
 
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <crypto/hash.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -111,7 +113,7 @@
 			     __alignof__(struct scatterlist));
 }
 
-static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
+static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
 {
 	u8 *opt = (u8 *)opthdr;
 	int len = ipv6_optlen(opthdr);
@@ -125,7 +127,7 @@
 
 		switch (opt[off]) {
 
-		case IPV6_TLV_PAD0:
+		case IPV6_TLV_PAD1:
 			optlen = 1;
 			break;
 		default:
@@ -143,10 +145,10 @@
 		len -= optlen;
 	}
 	if (len == 0)
-		return 1;
+		return true;
 
 bad:
-	return 0;
+	return false;
 }
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -169,7 +171,7 @@
 
 		switch (opt[off]) {
 
-		case IPV6_TLV_PAD0:
+		case IPV6_TLV_PAD1:
 			optlen = 1;
 			break;
 		default:
@@ -189,8 +191,8 @@
 
 				hao = (struct ipv6_destopt_hao *)&opt[off];
 				if (hao->length != sizeof(hao->addr)) {
-					if (net_ratelimit())
-						printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
+					net_warn_ratelimited("destopt hao: invalid header length: %u\n",
+							     hao->length);
 					goto bad;
 				}
 				final_addr = hao->addr;
@@ -659,9 +661,9 @@
 
 	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
 	    crypto_ahash_digestsize(ahash)) {
-		printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
-		       x->aalg->alg_name, crypto_ahash_digestsize(ahash),
-		       aalg_desc->uinfo.auth.icv_fullbits/8);
+		pr_info("AH: %s digestsize %u != %hu\n",
+			x->aalg->alg_name, crypto_ahash_digestsize(ahash),
+			aalg_desc->uinfo.auth.icv_fullbits/8);
 		goto error;
 	}
 
@@ -727,12 +729,12 @@
 static int __init ah6_init(void)
 {
 	if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
-		printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n");
+		pr_info("%s: can't add xfrm type\n", __func__);
 		return -EAGAIN;
 	}
 
 	if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
-		printk(KERN_INFO "ipv6 ah init: can't add protocol\n");
+		pr_info("%s: can't add protocol\n", __func__);
 		xfrm_unregister_type(&ah6_type, AF_INET6);
 		return -EAGAIN;
 	}
@@ -743,10 +745,10 @@
 static void __exit ah6_fini(void)
 {
 	if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
-		printk(KERN_INFO "ipv6 ah close: can't remove protocol\n");
+		pr_info("%s: can't remove protocol\n", __func__);
 
 	if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
-		printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n");
+		pr_info("%s: can't remove xfrm type\n", __func__);
 
 }
 
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index db00d27..cdf02be 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -342,7 +342,7 @@
  *	check if the interface has this anycast address
  *	called with rcu_read_lock()
  */
-static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
+static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
 {
 	struct inet6_dev *idev;
 	struct ifacaddr6 *aca;
@@ -356,16 +356,16 @@
 		read_unlock_bh(&idev->lock);
 		return aca != NULL;
 	}
-	return 0;
+	return false;
 }
 
 /*
  *	check if given interface (or any, if dev==0) has this anycast address
  */
-int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
-			const struct in6_addr *addr)
+bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+			 const struct in6_addr *addr)
 {
-	int found = 0;
+	bool found = false;
 
 	rcu_read_lock();
 	if (dev)
@@ -373,7 +373,7 @@
 	else
 		for_each_netdev_rcu(net, dev)
 			if (ipv6_chk_acast_dev(dev, addr)) {
-				found = 1;
+				found = true;
 				break;
 			}
 	rcu_read_unlock();
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 76832c8..be2b67d6 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -22,6 +22,7 @@
 #include <linux/ipv6.h>
 #include <linux/route.h>
 #include <linux/slab.h>
+#include <linux/export.h>
 
 #include <net/ipv6.h>
 #include <net/ndisc.h>
@@ -33,9 +34,9 @@
 #include <linux/errqueue.h>
 #include <asm/uaccess.h>
 
-static inline int ipv6_mapped_addr_any(const struct in6_addr *a)
+static bool ipv6_mapped_addr_any(const struct in6_addr *a)
 {
-	return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0));
+	return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
 }
 
 int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -98,7 +99,7 @@
 		sin.sin_port = usin->sin6_port;
 
 		err = ip4_datagram_connect(sk,
-					   (struct sockaddr*) &sin,
+					   (struct sockaddr *) &sin,
 					   sizeof(sin));
 
 ipv4_connected:
@@ -202,6 +203,7 @@
 	fl6_sock_release(flowlabel);
 	return err;
 }
+EXPORT_SYMBOL_GPL(ip6_datagram_connect);
 
 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
 		     __be16 port, u32 info, u8 *payload)
@@ -414,6 +416,7 @@
 out:
 	return err;
 }
+EXPORT_SYMBOL_GPL(ipv6_recv_error);
 
 /*
  *	Handle IPV6_RECVPATHMTU
@@ -515,10 +518,10 @@
 		u8 nexthdr = ipv6_hdr(skb)->nexthdr;
 
 		while (off <= opt->lastopt) {
-			unsigned len;
+			unsigned int len;
 			u8 *ptr = nh + off;
 
-			switch(nexthdr) {
+			switch (nexthdr) {
 			case IPPROTO_DSTOPTS:
 				nexthdr = ptr[0];
 				len = (ptr[1] + 1) << 3;
@@ -827,9 +830,8 @@
 			int tc;
 
 			err = -EINVAL;
-			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
 				goto exit_f;
-			}
 
 			tc = *(int *)CMSG_DATA(cmsg);
 			if (tc < -1 || tc > 0xff)
@@ -846,9 +848,8 @@
 			int df;
 
 			err = -EINVAL;
-			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) {
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
 				goto exit_f;
-			}
 
 			df = *(int *)CMSG_DATA(cmsg);
 			if (df < 0 || df > 1)
@@ -870,3 +871,4 @@
 exit_f:
 	return err;
 }
+EXPORT_SYMBOL_GPL(datagram_send_ctl);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1ac7938..1e62b755 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -24,6 +24,8 @@
  * 	This file is derived from net/ipv4/esp.c
  */
 
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <crypto/aead.h>
 #include <crypto/authenc.h>
 #include <linux/err.h>
@@ -442,8 +444,8 @@
 			      esph->spi, IPPROTO_ESP, AF_INET6);
 	if (!x)
 		return;
-	printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
-			ntohl(esph->spi), &iph->daddr);
+	pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n",
+		 ntohl(esph->spi), &iph->daddr);
 	xfrm_state_put(x);
 }
 
@@ -651,11 +653,11 @@
 static int __init esp6_init(void)
 {
 	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
-		printk(KERN_INFO "ipv6 esp init: can't add xfrm type\n");
+		pr_info("%s: can't add xfrm type\n", __func__);
 		return -EAGAIN;
 	}
 	if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
-		printk(KERN_INFO "ipv6 esp init: can't add protocol\n");
+		pr_info("%s: can't add protocol\n", __func__);
 		xfrm_unregister_type(&esp6_type, AF_INET6);
 		return -EAGAIN;
 	}
@@ -666,9 +668,9 @@
 static void __exit esp6_fini(void)
 {
 	if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
-		printk(KERN_INFO "ipv6 esp close: can't remove protocol\n");
+		pr_info("%s: can't remove protocol\n", __func__);
 	if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
-		printk(KERN_INFO "ipv6 esp close: can't remove xfrm type\n");
+		pr_info("%s: can't remove xfrm type\n", __func__);
 }
 
 module_init(esp6_init);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 3d641b6..6447dc4 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -75,7 +75,7 @@
 			return offset;
 
 		switch (opttype) {
-		case IPV6_TLV_PAD0:
+		case IPV6_TLV_PAD1:
 			optlen = 1;
 			break;
 		default:
@@ -96,14 +96,14 @@
 /*
  *	Parsing tlv encoded headers.
  *
- *	Parsing function "func" returns 1, if parsing succeed
- *	and 0, if it failed.
+ *	Parsing function "func" returns true, if parsing succeed
+ *	and false, if it failed.
  *	It MUST NOT touch skb->h.
  */
 
 struct tlvtype_proc {
 	int	type;
-	int	(*func)(struct sk_buff *skb, int offset);
+	bool	(*func)(struct sk_buff *skb, int offset);
 };
 
 /*********************
@@ -112,11 +112,11 @@
 
 /* An unknown option is detected, decide what to do */
 
-static int ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
+static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff)
 {
 	switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
 	case 0: /* ignore */
-		return 1;
+		return true;
 
 	case 1: /* drop packet */
 		break;
@@ -129,21 +129,22 @@
 			break;
 	case 2: /* send ICMP PARM PROB regardless and drop packet */
 		icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
-		return 0;
+		return false;
 	}
 
 	kfree_skb(skb);
-	return 0;
+	return false;
 }
 
 /* Parse tlv encoded option header (hop-by-hop or destination) */
 
-static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
+static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
 {
-	struct tlvtype_proc *curr;
+	const struct tlvtype_proc *curr;
 	const unsigned char *nh = skb_network_header(skb);
 	int off = skb_network_header_len(skb);
 	int len = (skb_transport_header(skb)[1] + 1) << 3;
+	int padlen = 0;
 
 	if (skb_transport_offset(skb) + len > skb_headlen(skb))
 		goto bad;
@@ -153,13 +154,33 @@
 
 	while (len > 0) {
 		int optlen = nh[off + 1] + 2;
+		int i;
 
 		switch (nh[off]) {
-		case IPV6_TLV_PAD0:
+		case IPV6_TLV_PAD1:
 			optlen = 1;
+			padlen++;
+			if (padlen > 7)
+				goto bad;
 			break;
 
 		case IPV6_TLV_PADN:
+			/* RFC 2460 states that the purpose of PadN is
+			 * to align the containing header to multiples
+			 * of 8. 7 is therefore the highest valid value.
+			 * See also RFC 4942, Section 2.1.9.5.
+			 */
+			padlen += optlen;
+			if (padlen > 7)
+				goto bad;
+			/* RFC 4942 recommends receiving hosts to
+			 * actively check PadN payload to contain
+			 * only zeroes.
+			 */
+			for (i = 2; i < optlen; i++) {
+				if (nh[off + i] != 0)
+					goto bad;
+			}
 			break;
 
 		default: /* Other TLV code so scan list */
@@ -170,25 +191,33 @@
 					/* type specific length/alignment
 					   checks will be performed in the
 					   func(). */
-					if (curr->func(skb, off) == 0)
-						return 0;
+					if (curr->func(skb, off) == false)
+						return false;
 					break;
 				}
 			}
 			if (curr->type < 0) {
 				if (ip6_tlvopt_unknown(skb, off) == 0)
-					return 0;
+					return false;
 			}
+			padlen = 0;
 			break;
 		}
 		off += optlen;
 		len -= optlen;
 	}
+	/* This case will not be caught by above check since its padding
+	 * length is smaller than 7:
+	 * 1 byte NH + 1 byte Length + 6 bytes Padding
+	 */
+	if ((padlen == 6) && ((off - skb_network_header_len(skb)) == 8))
+		goto bad;
+
 	if (len == 0)
-		return 1;
+		return true;
 bad:
 	kfree_skb(skb);
-	return 0;
+	return false;
 }
 
 /*****************************
@@ -196,7 +225,7 @@
  *****************************/
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
+static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
 {
 	struct ipv6_destopt_hao *hao;
 	struct inet6_skb_parm *opt = IP6CB(skb);
@@ -250,15 +279,15 @@
 	if (skb->tstamp.tv64 == 0)
 		__net_timestamp(skb);
 
-	return 1;
+	return true;
 
  discard:
 	kfree_skb(skb);
-	return 0;
+	return false;
 }
 #endif
 
-static struct tlvtype_proc tlvprocdestopt_lst[] = {
+static const struct tlvtype_proc tlvprocdestopt_lst[] = {
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 	{
 		.type	= IPV6_TLV_HAO,
@@ -563,23 +592,23 @@
 
 /* Router Alert as of RFC 2711 */
 
-static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
+static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
 {
 	const unsigned char *nh = skb_network_header(skb);
 
 	if (nh[optoff + 1] == 2) {
 		IP6CB(skb)->ra = optoff;
-		return 1;
+		return true;
 	}
 	LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
 		       nh[optoff + 1]);
 	kfree_skb(skb);
-	return 0;
+	return false;
 }
 
 /* Jumbo payload */
 
-static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
+static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
 {
 	const unsigned char *nh = skb_network_header(skb);
 	struct net *net = ipv6_skb_net(skb);
@@ -598,13 +627,13 @@
 		IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
 				 IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
-		return 0;
+		return false;
 	}
 	if (ipv6_hdr(skb)->payload_len) {
 		IP6_INC_STATS_BH(net, ipv6_skb_idev(skb),
 				 IPSTATS_MIB_INHDRERRORS);
 		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
-		return 0;
+		return false;
 	}
 
 	if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
@@ -616,14 +645,14 @@
 	if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
 		goto drop;
 
-	return 1;
+	return true;
 
 drop:
 	kfree_skb(skb);
-	return 0;
+	return false;
 }
 
-static struct tlvtype_proc tlvprochopopt_lst[] = {
+static const struct tlvtype_proc tlvprochopopt_lst[] = {
 	{
 		.type	= IPV6_TLV_ROUTERALERT,
 		.func	= ipv6_hop_ra,
@@ -722,7 +751,6 @@
 	if (opt->hopopt)
 		ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
 }
-
 EXPORT_SYMBOL(ipv6_push_nfrag_opts);
 
 void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
@@ -738,20 +766,19 @@
 
 	opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
 	if (opt2) {
-		long dif = (char*)opt2 - (char*)opt;
+		long dif = (char *)opt2 - (char *)opt;
 		memcpy(opt2, opt, opt->tot_len);
 		if (opt2->hopopt)
-			*((char**)&opt2->hopopt) += dif;
+			*((char **)&opt2->hopopt) += dif;
 		if (opt2->dst0opt)
-			*((char**)&opt2->dst0opt) += dif;
+			*((char **)&opt2->dst0opt) += dif;
 		if (opt2->dst1opt)
-			*((char**)&opt2->dst1opt) += dif;
+			*((char **)&opt2->dst1opt) += dif;
 		if (opt2->srcrt)
-			*((char**)&opt2->srcrt) += dif;
+			*((char **)&opt2->srcrt) += dif;
 	}
 	return opt2;
 }
-
 EXPORT_SYMBOL_GPL(ipv6_dup_options);
 
 static int ipv6_renew_option(void *ohdr,
@@ -869,6 +896,7 @@
 
 	return opt;
 }
+EXPORT_SYMBOL_GPL(ipv6_fixup_options);
 
 /**
  * fl6_update_dst - update flowi destination address with info given
@@ -892,5 +920,4 @@
 	fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
 	return orig;
 }
-
 EXPORT_SYMBOL_GPL(fl6_update_dst);
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 72957f4..f73d59a 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -9,7 +9,7 @@
  * find out if nexthdr is a well-known extension header or a protocol
  */
 
-int ipv6_ext_hdr(u8 nexthdr)
+bool ipv6_ext_hdr(u8 nexthdr)
 {
 	/*
 	 * find out if nexthdr is an extension header or a protocol
@@ -21,6 +21,7 @@
 		 (nexthdr == NEXTHDR_NONE)	||
 		 (nexthdr == NEXTHDR_DEST);
 }
+EXPORT_SYMBOL(ipv6_ext_hdr);
 
 /*
  * Skip any extension headers. This is used by the ICMP module.
@@ -109,6 +110,4 @@
 	*nexthdrp = nexthdr;
 	return start;
 }
-
-EXPORT_SYMBOL(ipv6_ext_hdr);
 EXPORT_SYMBOL(ipv6_skip_exthdr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b6c5731..0ff1cfd 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -22,8 +22,7 @@
 #include <net/ip6_route.h>
 #include <net/netlink.h>
 
-struct fib6_rule
-{
+struct fib6_rule {
 	struct fib_rule		common;
 	struct rt6key		src;
 	struct rt6key		dst;
@@ -215,14 +214,13 @@
 	frh->src_len = rule6->src.plen;
 	frh->tos = rule6->tclass;
 
-	if (rule6->dst.plen)
-		NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr),
-			&rule6->dst.addr);
-
-	if (rule6->src.plen)
-		NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr),
-			&rule6->src.addr);
-
+	if ((rule6->dst.plen &&
+	     nla_put(skb, FRA_DST, sizeof(struct in6_addr),
+		     &rule6->dst.addr)) ||
+	    (rule6->src.plen &&
+	     nla_put(skb, FRA_SRC, sizeof(struct in6_addr),
+		     &rule6->src.addr)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 27ac95a..091a297 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -29,6 +29,8 @@
  *	Kazunori MIYAZAWA @USAGI:       change output process to use ip6_append_data
  */
 
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/types.h>
@@ -129,7 +131,7 @@
  *	--ANK (980726)
  */
 
-static int is_ineligible(struct sk_buff *skb)
+static bool is_ineligible(const struct sk_buff *skb)
 {
 	int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
 	int len = skb->len - ptr;
@@ -137,11 +139,11 @@
 	__be16 frag_off;
 
 	if (len < 0)
-		return 1;
+		return true;
 
 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
 	if (ptr < 0)
-		return 0;
+		return false;
 	if (nexthdr == IPPROTO_ICMPV6) {
 		u8 _type, *tp;
 		tp = skb_header_pointer(skb,
@@ -149,9 +151,9 @@
 			sizeof(_type), &_type);
 		if (tp == NULL ||
 		    !(*tp & ICMPV6_INFOMSG_MASK))
-			return 1;
+			return true;
 	}
-	return 0;
+	return false;
 }
 
 /*
@@ -206,14 +208,14 @@
  *	highest-order two bits set to 10
  */
 
-static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
+static bool opt_unrec(struct sk_buff *skb, __u32 offset)
 {
 	u8 _optval, *op;
 
 	offset += skb_network_offset(skb);
 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
 	if (op == NULL)
-		return 1;
+		return true;
 	return (*op & 0xC0) == 0x80;
 }
 
@@ -498,7 +500,7 @@
 	err = ip6_append_data(sk, icmpv6_getfrag, &msg,
 			      len + sizeof(struct icmp6hdr),
 			      sizeof(struct icmp6hdr), hlimit,
-			      np->tclass, NULL, &fl6, (struct rt6_info*)dst,
+			      np->tclass, NULL, &fl6, (struct rt6_info *)dst,
 			      MSG_DONTWAIT, np->dontfrag);
 	if (err) {
 		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
@@ -579,7 +581,7 @@
 
 	err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
 				sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
-				(struct rt6_info*)dst, MSG_DONTWAIT,
+				(struct rt6_info *)dst, MSG_DONTWAIT,
 				np->dontfrag);
 
 	if (err) {
@@ -820,9 +822,7 @@
 		err = inet_ctl_sock_create(&sk, PF_INET6,
 					   SOCK_RAW, IPPROTO_ICMPV6, net);
 		if (err < 0) {
-			printk(KERN_ERR
-			       "Failed to initialize the ICMP6 control socket "
-			       "(err %d).\n",
+			pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
 			       err);
 			goto fail;
 		}
@@ -881,7 +881,7 @@
 	return 0;
 
 fail:
-	printk(KERN_ERR "Failed to register ICMP6 protocol\n");
+	pr_err("Failed to register ICMP6 protocol\n");
 	unregister_pernet_subsys(&icmpv6_sk_ops);
 	return err;
 }
@@ -950,7 +950,6 @@
 
 	return fatal;
 }
-
 EXPORT_SYMBOL(icmpv6_err_convert);
 
 #ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 02dd203..e6cee52 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -28,7 +28,7 @@
 #include <net/inet6_connection_sock.h>
 
 int inet6_csk_bind_conflict(const struct sock *sk,
-			    const struct inet_bind_bucket *tb)
+			    const struct inet_bind_bucket *tb, bool relax)
 {
 	const struct sock *sk2;
 	const struct hlist_node *node;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 9371743..0c220a4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -18,6 +18,9 @@
  * 				routing table.
  * 	Ville Nuorvala:		Fixed routing subtrees.
  */
+
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/net.h>
@@ -38,7 +41,7 @@
 #define RT6_DEBUG 2
 
 #if RT6_DEBUG >= 3
-#define RT6_TRACE(x...) printk(KERN_DEBUG x)
+#define RT6_TRACE(x...) pr_debug(x)
 #else
 #define RT6_TRACE(x...) do { ; } while (0)
 #endif
@@ -451,12 +454,10 @@
 		    !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
 			if (!allow_create) {
 				if (replace_required) {
-					pr_warn("IPv6: Can't replace route, "
-						"no match found\n");
+					pr_warn("Can't replace route, no match found\n");
 					return ERR_PTR(-ENOENT);
 				}
-				pr_warn("IPv6: NLM_F_CREATE should be set "
-					"when creating new route\n");
+				pr_warn("NLM_F_CREATE should be set when creating new route\n");
 			}
 			goto insert_above;
 		}
@@ -499,11 +500,10 @@
 		 * That would keep IPv6 consistent with IPv4
 		 */
 		if (replace_required) {
-			pr_warn("IPv6: Can't replace route, no match found\n");
+			pr_warn("Can't replace route, no match found\n");
 			return ERR_PTR(-ENOENT);
 		}
-		pr_warn("IPv6: NLM_F_CREATE should be set "
-			"when creating new route\n");
+		pr_warn("NLM_F_CREATE should be set when creating new route\n");
 	}
 	/*
 	 *	We walked to the bottom of tree.
@@ -696,7 +696,7 @@
 	 */
 	if (!replace) {
 		if (!add)
-			pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n");
+			pr_warn("NLM_F_CREATE should be set when creating new route\n");
 
 add:
 		rt->dst.rt6_next = iter;
@@ -715,7 +715,7 @@
 		if (!found) {
 			if (add)
 				goto add;
-			pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n");
+			pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
 			return -ENOENT;
 		}
 		*ins = rt;
@@ -768,7 +768,7 @@
 			replace_required = 1;
 	}
 	if (!allow_create && !replace_required)
-		pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
+		pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
 
 	fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
 			rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
@@ -1420,7 +1420,8 @@
 			res = fib6_del(rt, &info);
 			if (res) {
 #if RT6_DEBUG >= 2
-				printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res);
+				pr_debug("%s: del failed: rt=%p@%p err=%d\n",
+					 __func__, rt, rt->rt6i_node, res);
 #endif
 				continue;
 			}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b7867a1..9772fbd 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -294,6 +294,7 @@
 	opt_space->opt_flen = fopt->opt_flen;
 	return opt_space;
 }
+EXPORT_SYMBOL_GPL(fl6_merge_options);
 
 static unsigned long check_linger(unsigned long ttl)
 {
@@ -432,32 +433,32 @@
 	return 0;
 }
 
-static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
+static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
 {
 	if (h1 == h2)
-		return 0;
+		return false;
 	if (h1 == NULL || h2 == NULL)
-		return 1;
+		return true;
 	if (h1->hdrlen != h2->hdrlen)
-		return 1;
+		return true;
 	return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
 }
 
-static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
+static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
 {
 	if (o1 == o2)
-		return 0;
+		return false;
 	if (o1 == NULL || o2 == NULL)
-		return 1;
+		return true;
 	if (o1->opt_nflen != o2->opt_nflen)
-		return 1;
+		return true;
 	if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
-		return 1;
+		return true;
 	if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
-		return 1;
+		return true;
 	if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
-		return 1;
-	return 0;
+		return true;
+	return false;
 }
 
 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
@@ -705,9 +706,9 @@
 		struct ip6_flowlabel *fl = v;
 		seq_printf(seq,
 			   "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
-			   (unsigned)ntohl(fl->label),
+			   (unsigned int)ntohl(fl->label),
 			   fl->share,
-			   (unsigned)fl->owner,
+			   (int)fl->owner,
 			   atomic_read(&fl->users),
 			   fl->linger/HZ,
 			   (long)(fl->expires - jiffies)/HZ,
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 1ca5d45..21a15df 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -170,7 +170,8 @@
 {
 	const struct inet6_protocol *ipprot;
 	unsigned int nhoff;
-	int nexthdr, raw;
+	int nexthdr;
+	bool raw;
 	u8 hash;
 	struct inet6_dev *idev;
 	struct net *net = dev_net(skb_dst(skb)->dev);
@@ -251,7 +252,7 @@
 int ip6_mc_input(struct sk_buff *skb)
 {
 	const struct ipv6hdr *hdr;
-	int deliver;
+	bool deliver;
 
 	IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
 			 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
@@ -287,7 +288,7 @@
 			 * is for MLD (0x0000).
 			 */
 			if ((ptr[2] | ptr[3]) == 0) {
-				deliver = 0;
+				deliver = false;
 
 				if (!ipv6_ext_hdr(nexthdr)) {
 					/* BUG */
@@ -312,7 +313,7 @@
 				case ICMPV6_MGM_REPORT:
 				case ICMPV6_MGM_REDUCTION:
 				case ICMPV6_MLD2_REPORT:
-					deliver = 1;
+					deliver = true;
 					break;
 				}
 				goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index b7ca461..d99fdc6 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -210,7 +210,7 @@
 				kfree_skb(skb);
 				return -ENOBUFS;
 			}
-			kfree_skb(skb);
+			consume_skb(skb);
 			skb = skb2;
 			skb_set_owner_w(skb, sk);
 		}
@@ -252,8 +252,7 @@
 			       dst->dev, dst_output);
 	}
 
-	if (net_ratelimit())
-		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
+	net_dbg_ratelimited("IPv6: sending pkt_too_big to self\n");
 	skb->dev = dst->dev;
 	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
@@ -644,7 +643,10 @@
 	/* We must not fragment if the socket is set to force MTU discovery
 	 * or if the skb it not generated by a local socket.
 	 */
-	if (!skb->local_df && skb->len > mtu) {
+	if (unlikely(!skb->local_df && skb->len > mtu)) {
+		if (skb->sk && dst_allfrag(skb_dst(skb)))
+			sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+
 		skb->dev = skb_dst(skb)->dev;
 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
@@ -789,6 +791,10 @@
 	}
 
 slow_path:
+	if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
+	    skb_checksum_help(skb))
+		goto fail;
+
 	left = skb->len - hlen;		/* Space per frame */
 	ptr = hlen;			/* Where to start from */
 
@@ -889,7 +895,7 @@
 	}
 	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 		      IPSTATS_MIB_FRAGOKS);
-	kfree_skb(skb);
+	consume_skb(skb);
 	return err;
 
 fail:
@@ -1199,7 +1205,6 @@
 	int copy;
 	int err;
 	int offset = 0;
-	int csummode = CHECKSUM_NONE;
 	__u8 tx_flags = 0;
 
 	if (flags&MSG_PROBE)
@@ -1412,7 +1417,7 @@
 			/*
 			 *	Fill in the control structures
 			 */
-			skb->ip_summed = csummode;
+			skb->ip_summed = CHECKSUM_NONE;
 			skb->csum = 0;
 			/* reserve for fragmentation and ipsec header */
 			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
@@ -1455,7 +1460,6 @@
 			transhdrlen = 0;
 			exthdrlen = 0;
 			dst_exthdrlen = 0;
-			csummode = CHECKSUM_NONE;
 
 			/*
 			 * Put the packet on the pending queue
@@ -1535,6 +1539,7 @@
 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
 	return err;
 }
+EXPORT_SYMBOL_GPL(ip6_append_data);
 
 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
 {
@@ -1638,6 +1643,7 @@
 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
 	goto out;
 }
+EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
 
 void ip6_flush_pending_frames(struct sock *sk)
 {
@@ -1652,3 +1658,4 @@
 
 	ip6_cork_release(inet_sk(sk), inet6_sk(sk));
 }
+EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index aa21da6..c9015fa 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -18,6 +18,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/capability.h>
 #include <linux/errno.h>
@@ -60,7 +62,7 @@
 MODULE_ALIAS_NETDEV("ip6tnl0");
 
 #ifdef IP6_TNL_DEBUG
-#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
+#define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
 #else
 #define IP6_TNL_TRACE(x...) do {;} while(0)
 #endif
@@ -198,7 +200,7 @@
 {
 	const struct in6_addr *remote = &p->raddr;
 	const struct in6_addr *local = &p->laddr;
-	unsigned h = 0;
+	unsigned int h = 0;
 	int prio = 0;
 
 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
@@ -460,19 +462,14 @@
 		struct ipv6_tlv_tnl_enc_lim *tel;
 		__u32 mtu;
 	case ICMPV6_DEST_UNREACH:
-		if (net_ratelimit())
-			printk(KERN_WARNING
-			       "%s: Path to destination invalid "
-			       "or inactive!\n", t->parms.name);
+		net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
+				     t->parms.name);
 		rel_msg = 1;
 		break;
 	case ICMPV6_TIME_EXCEED:
 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
-			if (net_ratelimit())
-				printk(KERN_WARNING
-				       "%s: Too small hop limit or "
-				       "routing loop in tunnel!\n",
-				       t->parms.name);
+			net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
+					     t->parms.name);
 			rel_msg = 1;
 		}
 		break;
@@ -484,17 +481,13 @@
 		if (teli && teli == *info - 2) {
 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
 			if (tel->encap_limit == 0) {
-				if (net_ratelimit())
-					printk(KERN_WARNING
-					       "%s: Too small encapsulation "
-					       "limit or routing loop in "
-					       "tunnel!\n", t->parms.name);
+				net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
+						     t->parms.name);
 				rel_msg = 1;
 			}
-		} else if (net_ratelimit()) {
-			printk(KERN_WARNING
-			       "%s: Recipient unable to parse tunneled "
-			       "packet!\n ", t->parms.name);
+		} else {
+			net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
+					     t->parms.name);
 		}
 		break;
 	case ICMPV6_PKT_TOOBIG:
@@ -825,7 +818,7 @@
  *   0 else
  **/
 
-static inline int
+static inline bool
 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
 {
 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
@@ -845,15 +838,12 @@
 			ldev = dev_get_by_index_rcu(net, p->link);
 
 		if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
-			printk(KERN_WARNING
-			       "%s xmit: Local address not yet configured!\n",
-			       p->name);
+			pr_warn("%s xmit: Local address not yet configured!\n",
+				p->name);
 		else if (!ipv6_addr_is_multicast(&p->raddr) &&
 			 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
-			printk(KERN_WARNING
-			       "%s xmit: Routing loop! "
-			       "Remote address found on this node!\n",
-			       p->name);
+			pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
+				p->name);
 		else
 			ret = 1;
 		rcu_read_unlock();
@@ -919,10 +909,8 @@
 
 	if (tdev == dev) {
 		stats->collisions++;
-		if (net_ratelimit())
-			printk(KERN_WARNING
-			       "%s: Local routing loop detected!\n",
-			       t->parms.name);
+		net_warn_ratelimited("%s: Local routing loop detected!\n",
+				     t->parms.name);
 		goto tx_err_dst_release;
 	}
 	mtu = dst_mtu(dst) - sizeof (*ipv6h);
@@ -954,7 +942,7 @@
 
 		if (skb->sk)
 			skb_set_owner_w(new_skb, skb->sk);
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = new_skb;
 	}
 	skb_dst_drop(skb);
@@ -1553,13 +1541,13 @@
 
 	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
 	if (err < 0) {
-		printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
+		pr_err("%s: can't register ip4ip6\n", __func__);
 		goto out_ip4ip6;
 	}
 
 	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
 	if (err < 0) {
-		printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
+		pr_err("%s: can't register ip6ip6\n", __func__);
 		goto out_ip6ip6;
 	}
 
@@ -1580,10 +1568,10 @@
 static void __exit ip6_tunnel_cleanup(void)
 {
 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
-		printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n");
+		pr_info("%s: can't deregister ip4ip6\n", __func__);
 
 	if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
-		printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
+		pr_info("%s: can't deregister ip6ip6\n", __func__);
 
 	unregister_pernet_device(&ip6_tnl_net_ops);
 }
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8110362..b15dc08 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1147,8 +1147,7 @@
 	 */
 	ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
 	if (ret < 0) {
-		if (net_ratelimit())
-			printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
+		net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
 		kfree_skb(skb);
 	}
 
@@ -1351,7 +1350,7 @@
 		goto reg_notif_fail;
 #ifdef CONFIG_IPV6_PIMSM_V2
 	if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
-		printk(KERN_ERR "ip6_mr_init: can't add PIM protocol\n");
+		pr_err("%s: can't add PIM protocol\n", __func__);
 		err = -EAGAIN;
 		goto add_proto_fail;
 	}
@@ -2215,14 +2214,15 @@
 	rtm->rtm_src_len  = 128;
 	rtm->rtm_tos      = 0;
 	rtm->rtm_table    = mrt->id;
-	NLA_PUT_U32(skb, RTA_TABLE, mrt->id);
+	if (nla_put_u32(skb, RTA_TABLE, mrt->id))
+		goto nla_put_failure;
 	rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
 	rtm->rtm_protocol = RTPROT_UNSPEC;
 	rtm->rtm_flags    = 0;
 
-	NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin);
-	NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp);
-
+	if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
+	    nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
+		goto nla_put_failure;
 	if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
 		goto nla_put_failure;
 
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index bba658d..5cb75bf 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -30,6 +30,9 @@
  *  The decompression of IP datagram MUST be done after the reassembly,
  *  AH/ESP processing.
  */
+
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <linux/module.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
@@ -69,8 +72,8 @@
 	if (!x)
 		return;
 
-	printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI6\n",
-			spi, &iph->daddr);
+	pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n",
+		 spi, &iph->daddr);
 	xfrm_state_put(x);
 }
 
@@ -190,11 +193,11 @@
 static int __init ipcomp6_init(void)
 {
 	if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) {
-		printk(KERN_INFO "ipcomp6 init: can't add xfrm type\n");
+		pr_info("%s: can't add xfrm type\n", __func__);
 		return -EAGAIN;
 	}
 	if (inet6_add_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) {
-		printk(KERN_INFO "ipcomp6 init: can't add protocol\n");
+		pr_info("%s: can't add protocol\n", __func__);
 		xfrm_unregister_type(&ipcomp6_type, AF_INET6);
 		return -EAGAIN;
 	}
@@ -204,9 +207,9 @@
 static void __exit ipcomp6_fini(void)
 {
 	if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0)
-		printk(KERN_INFO "ipv6 ipcomp close: can't remove protocol\n");
+		pr_info("%s: can't remove protocol\n", __func__);
 	if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0)
-		printk(KERN_INFO "ipv6 ipcomp close: can't remove xfrm type\n");
+		pr_info("%s: can't remove xfrm type\n", __func__);
 }
 
 module_init(ipcomp6_init);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 63dd1f8..ba6d13d 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -678,7 +678,6 @@
 	}
 	case MCAST_MSFILTER:
 	{
-		extern int sysctl_mld_max_msf;
 		struct group_filter *gsf;
 
 		if (optlen < GROUP_FILTER_SIZE(0))
@@ -943,7 +942,7 @@
 }
 
 static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
-		    char __user *optval, int __user *optlen, unsigned flags)
+		    char __user *optval, int __user *optlen, unsigned int flags)
 {
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	int len;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index b2869ca..6d0f5dc 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -606,13 +606,13 @@
 	return err;
 }
 
-int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
-		   const struct in6_addr *src_addr)
+bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+		    const struct in6_addr *src_addr)
 {
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct ipv6_mc_socklist *mc;
 	struct ip6_sf_socklist *psl;
-	int rv = 1;
+	bool rv = true;
 
 	rcu_read_lock();
 	for_each_pmc_rcu(np, mc) {
@@ -621,7 +621,7 @@
 	}
 	if (!mc) {
 		rcu_read_unlock();
-		return 1;
+		return true;
 	}
 	read_lock(&mc->sflock);
 	psl = mc->sflist;
@@ -635,9 +635,9 @@
 				break;
 		}
 		if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
-			rv = 0;
+			rv = false;
 		if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
-			rv = 0;
+			rv = false;
 	}
 	read_unlock(&mc->sflock);
 	rcu_read_unlock();
@@ -931,15 +931,15 @@
 /*
  * identify MLD packets for MLD filter exceptions
  */
-int ipv6_is_mld(struct sk_buff *skb, int nexthdr)
+bool ipv6_is_mld(struct sk_buff *skb, int nexthdr)
 {
 	struct icmp6hdr *pic;
 
 	if (nexthdr != IPPROTO_ICMPV6)
-		return 0;
+		return false;
 
 	if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
-		return 0;
+		return false;
 
 	pic = icmp6_hdr(skb);
 
@@ -948,22 +948,22 @@
 	case ICMPV6_MGM_REPORT:
 	case ICMPV6_MGM_REDUCTION:
 	case ICMPV6_MLD2_REPORT:
-		return 1;
+		return true;
 	default:
 		break;
 	}
-	return 0;
+	return false;
 }
 
 /*
  *	check if the interface/address pair is valid
  */
-int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
-			const struct in6_addr *src_addr)
+bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
+			 const struct in6_addr *src_addr)
 {
 	struct inet6_dev *idev;
 	struct ifmcaddr6 *mc;
-	int rv = 0;
+	bool rv = false;
 
 	rcu_read_lock();
 	idev = __in6_dev_get(dev);
@@ -990,7 +990,7 @@
 					rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
 				spin_unlock_bh(&mc->mca_lock);
 			} else
-				rv = 1; /* don't filter unspecified source */
+				rv = true; /* don't filter unspecified source */
 		}
 		read_unlock_bh(&idev->lock);
 	}
@@ -1046,8 +1046,8 @@
 }
 
 /* mark EXCLUDE-mode sources */
-static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
-	const struct in6_addr *srcs)
+static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
+			     const struct in6_addr *srcs)
 {
 	struct ip6_sf_list *psf;
 	int i, scount;
@@ -1061,7 +1061,7 @@
 			if (psf->sf_count[MCAST_INCLUDE] ||
 			    pmc->mca_sfcount[MCAST_EXCLUDE] !=
 			    psf->sf_count[MCAST_EXCLUDE])
-				continue;
+				break;
 			if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
 				scount++;
 				break;
@@ -1070,12 +1070,12 @@
 	}
 	pmc->mca_flags &= ~MAF_GSQUERY;
 	if (scount == nsrcs)	/* all sources excluded */
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
-static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
-	const struct in6_addr *srcs)
+static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
+			    const struct in6_addr *srcs)
 {
 	struct ip6_sf_list *psf;
 	int i, scount;
@@ -1099,10 +1099,10 @@
 	}
 	if (!scount) {
 		pmc->mca_flags &= ~MAF_GSQUERY;
-		return 0;
+		return false;
 	}
 	pmc->mca_flags |= MAF_GSQUERY;
-	return 1;
+	return true;
 }
 
 /* called with rcu_read_lock() */
@@ -1276,17 +1276,17 @@
 	return 0;
 }
 
-static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
-	int gdeleted, int sdeleted)
+static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
+		  int gdeleted, int sdeleted)
 {
 	switch (type) {
 	case MLD2_MODE_IS_INCLUDE:
 	case MLD2_MODE_IS_EXCLUDE:
 		if (gdeleted || sdeleted)
-			return 0;
+			return false;
 		if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
 			if (pmc->mca_sfmode == MCAST_INCLUDE)
-				return 1;
+				return true;
 			/* don't include if this source is excluded
 			 * in all filters
 			 */
@@ -1295,29 +1295,29 @@
 			return pmc->mca_sfcount[MCAST_EXCLUDE] ==
 				psf->sf_count[MCAST_EXCLUDE];
 		}
-		return 0;
+		return false;
 	case MLD2_CHANGE_TO_INCLUDE:
 		if (gdeleted || sdeleted)
-			return 0;
+			return false;
 		return psf->sf_count[MCAST_INCLUDE] != 0;
 	case MLD2_CHANGE_TO_EXCLUDE:
 		if (gdeleted || sdeleted)
-			return 0;
+			return false;
 		if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
 		    psf->sf_count[MCAST_INCLUDE])
-			return 0;
+			return false;
 		return pmc->mca_sfcount[MCAST_EXCLUDE] ==
 			psf->sf_count[MCAST_EXCLUDE];
 	case MLD2_ALLOW_NEW_SOURCES:
 		if (gdeleted || !psf->sf_crcount)
-			return 0;
+			return false;
 		return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
 	case MLD2_BLOCK_OLD_SOURCES:
 		if (pmc->mca_sfmode == MCAST_INCLUDE)
 			return gdeleted || (psf->sf_crcount && sdeleted);
 		return psf->sf_crcount && !gdeleted && !sdeleted;
 	}
-	return 0;
+	return false;
 }
 
 static int
@@ -2627,8 +2627,7 @@
 	err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
 				   SOCK_RAW, IPPROTO_ICMPV6, net);
 	if (err < 0) {
-		printk(KERN_ERR
-		       "Failed to initialize the IGMP6 control socket (err %d).\n",
+		pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
 		       err);
 		goto out;
 	}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 7e1e0fb..5b087c3 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -22,6 +22,8 @@
  *	Masahide NAKAMURA @USAGI
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/time.h>
@@ -44,7 +46,7 @@
 	if (!data)
 		return NULL;
 	if (padlen == 1) {
-		data[0] = IPV6_TLV_PAD0;
+		data[0] = IPV6_TLV_PAD1;
 	} else if (padlen > 1) {
 		data[0] = IPV6_TLV_PADN;
 		data[1] = padlen - 2;
@@ -307,13 +309,12 @@
 static int mip6_destopt_init_state(struct xfrm_state *x)
 {
 	if (x->id.spi) {
-		printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
-		       x->id.spi);
+		pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
 		return -EINVAL;
 	}
 	if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
-		printk(KERN_INFO "%s: state's mode is not %u: %u\n",
-		       __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
+		pr_info("%s: state's mode is not %u: %u\n",
+			__func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
 		return -EINVAL;
 	}
 
@@ -443,13 +444,12 @@
 static int mip6_rthdr_init_state(struct xfrm_state *x)
 {
 	if (x->id.spi) {
-		printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
-		       x->id.spi);
+		pr_info("%s: spi is not 0: %u\n", __func__, x->id.spi);
 		return -EINVAL;
 	}
 	if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
-		printk(KERN_INFO "%s: state's mode is not %u: %u\n",
-		       __func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
+		pr_info("%s: state's mode is not %u: %u\n",
+			__func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
 		return -EINVAL;
 	}
 
@@ -481,18 +481,18 @@
 
 static int __init mip6_init(void)
 {
-	printk(KERN_INFO "Mobile IPv6\n");
+	pr_info("Mobile IPv6\n");
 
 	if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) {
-		printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__);
+		pr_info("%s: can't add xfrm type(destopt)\n", __func__);
 		goto mip6_destopt_xfrm_fail;
 	}
 	if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) {
-		printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__);
+		pr_info("%s: can't add xfrm type(rthdr)\n", __func__);
 		goto mip6_rthdr_xfrm_fail;
 	}
 	if (rawv6_mh_filter_register(mip6_mh_filter) < 0) {
-		printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__);
+		pr_info("%s: can't add rawv6 mh filter\n", __func__);
 		goto mip6_rawv6_mh_fail;
 	}
 
@@ -510,11 +510,11 @@
 static void __exit mip6_fini(void)
 {
 	if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0)
-		printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__);
+		pr_info("%s: can't remove rawv6 mh filter\n", __func__);
 	if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0)
-		printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__);
+		pr_info("%s: can't remove xfrm type(rthdr)\n", __func__);
 	if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0)
-		printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__);
+		pr_info("%s: can't remove xfrm type(destopt)\n", __func__);
 }
 
 module_init(mip6_init);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 176b469..54f62d3 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -15,6 +15,7 @@
 /*
  *	Changes:
  *
+ *	Alexey I. Froloff		:	RFC6106 (DNSSL) support
  *	Pierre Ynard			:	export userland ND options
  *						through netlink (RDNSS support)
  *	Lars Fenneberg			:	fixed MTU setting on receipt
@@ -26,27 +27,7 @@
  *	YOSHIFUJI Hideaki @USAGI	:	Verify ND options properly
  */
 
-/* Set to 3 to get tracing... */
-#define ND_DEBUG 1
-
-#define ND_PRINTK(fmt, args...) do { if (net_ratelimit()) { printk(fmt, ## args); } } while(0)
-#define ND_NOPRINTK(x...) do { ; } while(0)
-#define ND_PRINTK0 ND_PRINTK
-#define ND_PRINTK1 ND_NOPRINTK
-#define ND_PRINTK2 ND_NOPRINTK
-#define ND_PRINTK3 ND_NOPRINTK
-#if ND_DEBUG >= 1
-#undef ND_PRINTK1
-#define ND_PRINTK1 ND_PRINTK
-#endif
-#if ND_DEBUG >= 2
-#undef ND_PRINTK2
-#define ND_PRINTK2 ND_PRINTK
-#endif
-#if ND_DEBUG >= 3
-#undef ND_PRINTK3
-#define ND_PRINTK3 ND_PRINTK
-#endif
+#define pr_fmt(fmt) "ICMPv6: " fmt
 
 #include <linux/module.h>
 #include <linux/errno.h>
@@ -91,6 +72,15 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 
+/* Set to 3 to get tracing... */
+#define ND_DEBUG 1
+
+#define ND_PRINTK(val, level, fmt, ...)				\
+do {								\
+	if (val <= ND_DEBUG)					\
+		net_##level##_ratelimited(fmt, ##__VA_ARGS__);	\
+} while (0)
+
 static u32 ndisc_hash(const void *pkey,
 		      const struct net_device *dev,
 		      __u32 *hash_rnd);
@@ -228,7 +218,8 @@
 
 static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
 {
-	return opt->nd_opt_type == ND_OPT_RDNSS;
+	return opt->nd_opt_type == ND_OPT_RDNSS ||
+		opt->nd_opt_type == ND_OPT_DNSSL;
 }
 
 static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -263,10 +254,9 @@
 		case ND_OPT_MTU:
 		case ND_OPT_REDIRECT_HDR:
 			if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) {
-				ND_PRINTK2(KERN_WARNING
-					   "%s(): duplicated ND6 option found: type=%d\n",
-					   __func__,
-					   nd_opt->nd_opt_type);
+				ND_PRINTK(2, warn,
+					  "%s: duplicated ND6 option found: type=%d\n",
+					  __func__, nd_opt->nd_opt_type);
 			} else {
 				ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt;
 			}
@@ -294,10 +284,11 @@
 				 * to accommodate future extension to the
 				 * protocol.
 				 */
-				ND_PRINTK2(KERN_NOTICE
-					   "%s(): ignored unsupported option; type=%d, len=%d\n",
-					   __func__,
-					   nd_opt->nd_opt_type, nd_opt->nd_opt_len);
+				ND_PRINTK(2, notice,
+					  "%s: ignored unsupported option; type=%d, len=%d\n",
+					  __func__,
+					  nd_opt->nd_opt_type,
+					  nd_opt->nd_opt_len);
 			}
 		}
 		opt_len -= l;
@@ -325,9 +316,6 @@
 	case ARPHRD_FDDI:
 		ipv6_eth_mc_map(addr, buf);
 		return 0;
-	case ARPHRD_IEEE802_TR:
-		ipv6_tr_mc_map(addr,buf);
-		return 0;
 	case ARPHRD_ARCNET:
 		ipv6_arcnet_mc_map(addr, buf);
 		return 0;
@@ -360,7 +348,7 @@
 	struct net_device *dev = neigh->dev;
 	struct inet6_dev *in6_dev;
 	struct neigh_parms *parms;
-	int is_multicast = ipv6_addr_is_multicast(addr);
+	bool is_multicast = ipv6_addr_is_multicast(addr);
 
 	in6_dev = in6_dev_get(dev);
 	if (in6_dev == NULL) {
@@ -456,9 +444,8 @@
 				   len + hlen + tlen),
 				  1, &err);
 	if (!skb) {
-		ND_PRINTK0(KERN_ERR
-			   "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
-			   __func__, err);
+		ND_PRINTK(0, err, "ND: %s failed to allocate an skb, err=%d\n",
+			  __func__, err);
 		return NULL;
 	}
 
@@ -694,8 +681,9 @@
 
 	if ((probes -= neigh->parms->ucast_probes) < 0) {
 		if (!(neigh->nud_state & NUD_VALID)) {
-			ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %pI6\n",
-				   __func__, target);
+			ND_PRINTK(1, dbg,
+				  "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
+				  __func__, target);
 		}
 		ndisc_send_ns(dev, neigh, target, target, saddr);
 	} else if ((probes -= neigh->parms->app_probes) < 0) {
@@ -737,12 +725,11 @@
 	struct inet6_dev *idev = NULL;
 	struct neighbour *neigh;
 	int dad = ipv6_addr_any(saddr);
-	int inc;
+	bool inc;
 	int is_router = -1;
 
 	if (ipv6_addr_is_multicast(&msg->target)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NS: multicast target address");
+		ND_PRINTK(2, warn, "NS: multicast target address\n");
 		return;
 	}
 
@@ -755,22 +742,20 @@
 	      daddr->s6_addr32[1] == htonl(0x00000000) &&
 	      daddr->s6_addr32[2] == htonl(0x00000001) &&
 	      daddr->s6_addr [12] == 0xff )) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NS: bad DAD packet (wrong destination)\n");
+		ND_PRINTK(2, warn, "NS: bad DAD packet (wrong destination)\n");
 		return;
 	}
 
 	if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NS: invalid ND options\n");
+		ND_PRINTK(2, warn, "NS: invalid ND options\n");
 		return;
 	}
 
 	if (ndopts.nd_opts_src_lladdr) {
 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr, dev);
 		if (!lladdr) {
-			ND_PRINTK2(KERN_WARNING
-				   "ICMPv6 NS: invalid link-layer address length\n");
+			ND_PRINTK(2, warn,
+				  "NS: invalid link-layer address length\n");
 			return;
 		}
 
@@ -780,8 +765,8 @@
 		 *	in the message.
 		 */
 		if (dad) {
-			ND_PRINTK2(KERN_WARNING
-				   "ICMPv6 NS: bad DAD packet (link-layer address option)\n");
+			ND_PRINTK(2, warn,
+				  "NS: bad DAD packet (link-layer address option)\n");
 			return;
 		}
 	}
@@ -793,20 +778,6 @@
 
 		if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) {
 			if (dad) {
-				if (dev->type == ARPHRD_IEEE802_TR) {
-					const unsigned char *sadr;
-					sadr = skb_mac_header(skb);
-					if (((sadr[8] ^ dev->dev_addr[0]) & 0x7f) == 0 &&
-					    sadr[9] == dev->dev_addr[1] &&
-					    sadr[10] == dev->dev_addr[2] &&
-					    sadr[11] == dev->dev_addr[3] &&
-					    sadr[12] == dev->dev_addr[4] &&
-					    sadr[13] == dev->dev_addr[5]) {
-						/* looped-back to us */
-						goto out;
-					}
-				}
-
 				/*
 				 * We are colliding with another node
 				 * who is doing DAD
@@ -913,34 +884,30 @@
 	struct neighbour *neigh;
 
 	if (skb->len < sizeof(struct nd_msg)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NA: packet too short\n");
+		ND_PRINTK(2, warn, "NA: packet too short\n");
 		return;
 	}
 
 	if (ipv6_addr_is_multicast(&msg->target)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NA: target address is multicast.\n");
+		ND_PRINTK(2, warn, "NA: target address is multicast\n");
 		return;
 	}
 
 	if (ipv6_addr_is_multicast(daddr) &&
 	    msg->icmph.icmp6_solicited) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NA: solicited NA is multicasted.\n");
+		ND_PRINTK(2, warn, "NA: solicited NA is multicasted\n");
 		return;
 	}
 
 	if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NS: invalid ND option\n");
+		ND_PRINTK(2, warn, "NS: invalid ND option\n");
 		return;
 	}
 	if (ndopts.nd_opts_tgt_lladdr) {
 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, dev);
 		if (!lladdr) {
-			ND_PRINTK2(KERN_WARNING
-				   "ICMPv6 NA: invalid link-layer address length\n");
+			ND_PRINTK(2, warn,
+				  "NA: invalid link-layer address length\n");
 			return;
 		}
 	}
@@ -961,9 +928,9 @@
 		   unsolicited advertisement.
 		 */
 		if (skb->pkt_type != PACKET_LOOPBACK)
-			ND_PRINTK1(KERN_WARNING
-			   "ICMPv6 NA: someone advertises our address %pI6 on %s!\n",
-			   &ifp->addr, ifp->idev->dev->name);
+			ND_PRINTK(1, warn,
+				  "NA: someone advertises our address %pI6 on %s!\n",
+				  &ifp->addr, ifp->idev->dev->name);
 		in6_ifa_put(ifp);
 		return;
 	}
@@ -1025,8 +992,7 @@
 
 	idev = __in6_dev_get(skb->dev);
 	if (!idev) {
-		if (net_ratelimit())
-			ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
+		ND_PRINTK(1, err, "RS: can't find in6 device\n");
 		return;
 	}
 
@@ -1043,8 +1009,7 @@
 
 	/* Parse ND options */
 	if (!ndisc_parse_options(rs_msg->opt, ndoptlen, &ndopts)) {
-		if (net_ratelimit())
-			ND_PRINTK2("ICMP6 NS: invalid ND option, ignored\n");
+		ND_PRINTK(2, notice, "NS: invalid ND option, ignored\n");
 		goto out;
 	}
 
@@ -1099,8 +1064,9 @@
 
 	memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3);
 
-	NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
-		&ipv6_hdr(ra)->saddr);
+	if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
+		    &ipv6_hdr(ra)->saddr))
+		goto nla_put_failure;
 	nlmsg_end(skb, nlh);
 
 	rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC);
@@ -1141,20 +1107,17 @@
 	optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg);
 
 	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 RA: source address is not link-local.\n");
+		ND_PRINTK(2, warn, "RA: source address is not link-local\n");
 		return;
 	}
 	if (optlen < 0) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 RA: packet too short\n");
+		ND_PRINTK(2, warn, "RA: packet too short\n");
 		return;
 	}
 
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
 	if (skb->ndisc_nodetype == NDISC_NODETYPE_HOST) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 RA: from host or unauthorized router\n");
+		ND_PRINTK(2, warn, "RA: from host or unauthorized router\n");
 		return;
 	}
 #endif
@@ -1165,15 +1128,13 @@
 
 	in6_dev = __in6_dev_get(skb->dev);
 	if (in6_dev == NULL) {
-		ND_PRINTK0(KERN_ERR
-			   "ICMPv6 RA: can't find inet6 device for %s.\n",
-			   skb->dev->name);
+		ND_PRINTK(0, err, "RA: can't find inet6 device for %s\n",
+			  skb->dev->name);
 		return;
 	}
 
 	if (!ndisc_parse_options(opt, optlen, &ndopts)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMP6 RA: invalid ND options\n");
+		ND_PRINTK(2, warn, "RA: invalid ND options\n");
 		return;
 	}
 
@@ -1226,9 +1187,9 @@
 	if (rt) {
 		neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
 		if (!neigh) {
-			ND_PRINTK0(KERN_ERR
-				   "ICMPv6 RA: %s() got default router without neighbour.\n",
-				   __func__);
+			ND_PRINTK(0, err,
+				  "RA: %s got default router without neighbour\n",
+				  __func__);
 			dst_release(&rt->dst);
 			return;
 		}
@@ -1239,22 +1200,21 @@
 	}
 
 	if (rt == NULL && lifetime) {
-		ND_PRINTK3(KERN_DEBUG
-			   "ICMPv6 RA: adding default router.\n");
+		ND_PRINTK(3, dbg, "RA: adding default router\n");
 
 		rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
 		if (rt == NULL) {
-			ND_PRINTK0(KERN_ERR
-				   "ICMPv6 RA: %s() failed to add default route.\n",
-				   __func__);
+			ND_PRINTK(0, err,
+				  "RA: %s failed to add default route\n",
+				  __func__);
 			return;
 		}
 
 		neigh = dst_neigh_lookup(&rt->dst, &ipv6_hdr(skb)->saddr);
 		if (neigh == NULL) {
-			ND_PRINTK0(KERN_ERR
-				   "ICMPv6 RA: %s() got default router without neighbour.\n",
-				   __func__);
+			ND_PRINTK(0, err,
+				  "RA: %s got default router without neighbour\n",
+				  __func__);
 			dst_release(&rt->dst);
 			return;
 		}
@@ -1322,8 +1282,8 @@
 			lladdr = ndisc_opt_addr_data(ndopts.nd_opts_src_lladdr,
 						     skb->dev);
 			if (!lladdr) {
-				ND_PRINTK2(KERN_WARNING
-					   "ICMPv6 RA: invalid link-layer address length\n");
+				ND_PRINTK(2, warn,
+					  "RA: invalid link-layer address length\n");
 				goto out;
 			}
 		}
@@ -1387,9 +1347,7 @@
 		mtu = ntohl(n);
 
 		if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
-			ND_PRINTK2(KERN_WARNING
-				   "ICMPv6 RA: invalid mtu: %d\n",
-				   mtu);
+			ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu);
 		} else if (in6_dev->cnf.mtu6 != mtu) {
 			in6_dev->cnf.mtu6 = mtu;
 
@@ -1410,8 +1368,7 @@
 	}
 
 	if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 RA: invalid RA options");
+		ND_PRINTK(2, warn, "RA: invalid RA options\n");
 	}
 out:
 	if (rt)
@@ -1436,15 +1393,15 @@
 	switch (skb->ndisc_nodetype) {
 	case NDISC_NODETYPE_HOST:
 	case NDISC_NODETYPE_NODEFAULT:
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 Redirect: from host or unauthorized router\n");
+		ND_PRINTK(2, warn,
+			  "Redirect: from host or unauthorized router\n");
 		return;
 	}
 #endif
 
 	if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 Redirect: source address is not link-local.\n");
+		ND_PRINTK(2, warn,
+			  "Redirect: source address is not link-local\n");
 		return;
 	}
 
@@ -1452,8 +1409,7 @@
 	optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
 
 	if (optlen < 0) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 Redirect: packet too short\n");
+		ND_PRINTK(2, warn, "Redirect: packet too short\n");
 		return;
 	}
 
@@ -1462,8 +1418,8 @@
 	dest = target + 1;
 
 	if (ipv6_addr_is_multicast(dest)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 Redirect: destination address is multicast.\n");
+		ND_PRINTK(2, warn,
+			  "Redirect: destination address is multicast\n");
 		return;
 	}
 
@@ -1471,8 +1427,8 @@
 		on_link = 1;
 	} else if (ipv6_addr_type(target) !=
 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 Redirect: target address is not link-local unicast.\n");
+		ND_PRINTK(2, warn,
+			  "Redirect: target address is not link-local unicast\n");
 		return;
 	}
 
@@ -1488,16 +1444,15 @@
 	 */
 
 	if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 Redirect: invalid ND options\n");
+		ND_PRINTK(2, warn, "Redirect: invalid ND options\n");
 		return;
 	}
 	if (ndopts.nd_opts_tgt_lladdr) {
 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
 					     skb->dev);
 		if (!lladdr) {
-			ND_PRINTK2(KERN_WARNING
-				   "ICMPv6 Redirect: invalid link-layer address length\n");
+			ND_PRINTK(2, warn,
+				  "Redirect: invalid link-layer address length\n");
 			return;
 		}
 	}
@@ -1532,16 +1487,15 @@
 	u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
 
 	if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 Redirect: no link-local address on %s\n",
-			   dev->name);
+		ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
+			  dev->name);
 		return;
 	}
 
 	if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
 	    ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
-		ND_PRINTK2(KERN_WARNING
-			"ICMPv6 Redirect: target address is not link-local unicast.\n");
+		ND_PRINTK(2, warn,
+			  "Redirect: target address is not link-local unicast\n");
 		return;
 	}
 
@@ -1560,8 +1514,8 @@
 	rt = (struct rt6_info *) dst;
 
 	if (rt->rt6i_flags & RTF_GATEWAY) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 Redirect: destination is not a neighbour.\n");
+		ND_PRINTK(2, warn,
+			  "Redirect: destination is not a neighbour\n");
 		goto release;
 	}
 	if (!rt->rt6i_peer)
@@ -1572,8 +1526,8 @@
 	if (dev->addr_len) {
 		struct neighbour *neigh = dst_neigh_lookup(skb_dst(skb), target);
 		if (!neigh) {
-			ND_PRINTK2(KERN_WARNING
-				   "ICMPv6 Redirect: no neigh for target address\n");
+			ND_PRINTK(2, warn,
+				  "Redirect: no neigh for target address\n");
 			goto release;
 		}
 
@@ -1601,9 +1555,9 @@
 				    len + hlen + tlen),
 				   1, &err);
 	if (buff == NULL) {
-		ND_PRINTK0(KERN_ERR
-			   "ICMPv6 Redirect: %s() failed to allocate an skb, err=%d.\n",
-			   __func__, err);
+		ND_PRINTK(0, err,
+			  "Redirect: %s failed to allocate an skb, err=%d\n",
+			  __func__, err);
 		goto release;
 	}
 
@@ -1688,16 +1642,14 @@
 	__skb_push(skb, skb->data - skb_transport_header(skb));
 
 	if (ipv6_hdr(skb)->hop_limit != 255) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NDISC: invalid hop-limit: %d\n",
-			   ipv6_hdr(skb)->hop_limit);
+		ND_PRINTK(2, warn, "NDISC: invalid hop-limit: %d\n",
+			  ipv6_hdr(skb)->hop_limit);
 		return 0;
 	}
 
 	if (msg->icmph.icmp6_code != 0) {
-		ND_PRINTK2(KERN_WARNING
-			   "ICMPv6 NDISC: invalid ICMPv6 code: %d\n",
-			   msg->icmph.icmp6_code);
+		ND_PRINTK(2, warn, "NDISC: invalid ICMPv6 code: %d\n",
+			  msg->icmph.icmp6_code);
 		return 0;
 	}
 
@@ -1764,11 +1716,7 @@
 	static int warned;
 	if (strcmp(warncomm, current->comm) && warned < 5) {
 		strcpy(warncomm, current->comm);
-		printk(KERN_WARNING
-			"process `%s' is using deprecated sysctl (%s) "
-			"net.ipv6.neigh.%s.%s; "
-			"Use net.ipv6.neigh.%s.%s_ms "
-			"instead.\n",
+		pr_warn("process `%s' is using deprecated sysctl (%s) net.ipv6.neigh.%s.%s - use net.ipv6.neigh.%s.%s_ms instead\n",
 			warncomm, func,
 			dev_name, ctl->procname,
 			dev_name, ctl->procname);
@@ -1822,9 +1770,9 @@
 	err = inet_ctl_sock_create(&sk, PF_INET6,
 				   SOCK_RAW, IPPROTO_ICMPV6, net);
 	if (err < 0) {
-		ND_PRINTK0(KERN_ERR
-			   "ICMPv6 NDISC: Failed to initialize the control socket (err %d).\n",
-			   err);
+		ND_PRINTK(0, err,
+			  "NDISC: Failed to initialize the control socket (err %d)\n",
+			  err);
 		return err;
 	}
 
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index d33cddd..1013534 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -25,28 +25,6 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
-config IP6_NF_QUEUE
-	tristate "IP6 Userspace queueing via NETLINK (OBSOLETE)"
-	depends on INET && IPV6 && NETFILTER
-	depends on NETFILTER_ADVANCED
-	---help---
-
-	  This option adds a queue handler to the kernel for IPv6
-	  packets which enables users to receive the filtered packets
-	  with QUEUE target using libipq.
-
-	  This option enables the old IPv6-only "ip6_queue" implementation
-	  which has been obsoleted by the new "nfnetlink_queue" code (see
-	  CONFIG_NETFILTER_NETLINK_QUEUE).
-
-	  (C) Fernando Anton 2001
-	  IPv64 Project - Work based in IPv64 draft by Arturo Azcorra.
-	  Universidad Carlos III de Madrid
-	  Universidad Politecnica de Alcala de Henares
-	  email: <fanton@it.uc3m.es>.
-
-	  To compile it as a module, choose M here.  If unsure, say N.
-
 config IP6_NF_IPTABLES
 	tristate "IP6 tables support (required for filtering)"
 	depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index d4dfd0a..534d3f2 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -6,7 +6,6 @@
 obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
 obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
 obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
-obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
 obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
 obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
 
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
deleted file mode 100644
index a34c9e4..0000000
--- a/net/ipv6/netfilter/ip6_queue.c
+++ /dev/null
@@ -1,641 +0,0 @@
-/*
- * This is a module which is used for queueing IPv6 packets and
- * communicating with userspace via netlink.
- *
- * (C) 2001 Fernando Anton, this code is GPL.
- *     IPv64 Project - Work based in IPv64 draft by Arturo Azcorra.
- *     Universidad Carlos III de Madrid - Leganes (Madrid) - Spain
- *     Universidad Politecnica de Alcala de Henares - Alcala de H. (Madrid) - Spain
- *     email: fanton@it.uc3m.es
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/ipv6.h>
-#include <linux/notifier.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter.h>
-#include <linux/netlink.h>
-#include <linux/spinlock.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-#include <net/sock.h>
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/netfilter/nf_queue.h>
-#include <linux/netfilter_ipv4/ip_queue.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-#define IPQ_QMAX_DEFAULT 1024
-#define IPQ_PROC_FS_NAME "ip6_queue"
-#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen"
-
-typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
-
-static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
-static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_SPINLOCK(queue_lock);
-static int peer_pid __read_mostly;
-static unsigned int copy_range __read_mostly;
-static unsigned int queue_total;
-static unsigned int queue_dropped = 0;
-static unsigned int queue_user_dropped = 0;
-static struct sock *ipqnl __read_mostly;
-static LIST_HEAD(queue_list);
-static DEFINE_MUTEX(ipqnl_mutex);
-
-static inline void
-__ipq_enqueue_entry(struct nf_queue_entry *entry)
-{
-       list_add_tail(&entry->list, &queue_list);
-       queue_total++;
-}
-
-static inline int
-__ipq_set_mode(unsigned char mode, unsigned int range)
-{
-	int status = 0;
-
-	switch(mode) {
-	case IPQ_COPY_NONE:
-	case IPQ_COPY_META:
-		copy_mode = mode;
-		copy_range = 0;
-		break;
-
-	case IPQ_COPY_PACKET:
-		if (range > 0xFFFF)
-			range = 0xFFFF;
-		copy_range = range;
-		copy_mode = mode;
-		break;
-
-	default:
-		status = -EINVAL;
-
-	}
-	return status;
-}
-
-static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
-
-static inline void
-__ipq_reset(void)
-{
-	peer_pid = 0;
-	net_disable_timestamp();
-	__ipq_set_mode(IPQ_COPY_NONE, 0);
-	__ipq_flush(NULL, 0);
-}
-
-static struct nf_queue_entry *
-ipq_find_dequeue_entry(unsigned long id)
-{
-	struct nf_queue_entry *entry = NULL, *i;
-
-	spin_lock_bh(&queue_lock);
-
-	list_for_each_entry(i, &queue_list, list) {
-		if ((unsigned long)i == id) {
-			entry = i;
-			break;
-		}
-	}
-
-	if (entry) {
-		list_del(&entry->list);
-		queue_total--;
-	}
-
-	spin_unlock_bh(&queue_lock);
-	return entry;
-}
-
-static void
-__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
-{
-	struct nf_queue_entry *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &queue_list, list) {
-		if (!cmpfn || cmpfn(entry, data)) {
-			list_del(&entry->list);
-			queue_total--;
-			nf_reinject(entry, NF_DROP);
-		}
-	}
-}
-
-static void
-ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
-{
-	spin_lock_bh(&queue_lock);
-	__ipq_flush(cmpfn, data);
-	spin_unlock_bh(&queue_lock);
-}
-
-static struct sk_buff *
-ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
-{
-	sk_buff_data_t old_tail;
-	size_t size = 0;
-	size_t data_len = 0;
-	struct sk_buff *skb;
-	struct ipq_packet_msg *pmsg;
-	struct nlmsghdr *nlh;
-	struct timeval tv;
-
-	switch (ACCESS_ONCE(copy_mode)) {
-	case IPQ_COPY_META:
-	case IPQ_COPY_NONE:
-		size = NLMSG_SPACE(sizeof(*pmsg));
-		break;
-
-	case IPQ_COPY_PACKET:
-		if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
-		    (*errp = skb_checksum_help(entry->skb)))
-			return NULL;
-
-		data_len = ACCESS_ONCE(copy_range);
-		if (data_len == 0 || data_len > entry->skb->len)
-			data_len = entry->skb->len;
-
-		size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
-		break;
-
-	default:
-		*errp = -EINVAL;
-		return NULL;
-	}
-
-	skb = alloc_skb(size, GFP_ATOMIC);
-	if (!skb)
-		goto nlmsg_failure;
-
-	old_tail = skb->tail;
-	nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
-	pmsg = NLMSG_DATA(nlh);
-	memset(pmsg, 0, sizeof(*pmsg));
-
-	pmsg->packet_id       = (unsigned long )entry;
-	pmsg->data_len        = data_len;
-	tv = ktime_to_timeval(entry->skb->tstamp);
-	pmsg->timestamp_sec   = tv.tv_sec;
-	pmsg->timestamp_usec  = tv.tv_usec;
-	pmsg->mark            = entry->skb->mark;
-	pmsg->hook            = entry->hook;
-	pmsg->hw_protocol     = entry->skb->protocol;
-
-	if (entry->indev)
-		strcpy(pmsg->indev_name, entry->indev->name);
-	else
-		pmsg->indev_name[0] = '\0';
-
-	if (entry->outdev)
-		strcpy(pmsg->outdev_name, entry->outdev->name);
-	else
-		pmsg->outdev_name[0] = '\0';
-
-	if (entry->indev && entry->skb->dev &&
-	    entry->skb->mac_header != entry->skb->network_header) {
-		pmsg->hw_type = entry->skb->dev->type;
-		pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
-	}
-
-	if (data_len)
-		if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
-			BUG();
-
-	nlh->nlmsg_len = skb->tail - old_tail;
-	return skb;
-
-nlmsg_failure:
-	kfree_skb(skb);
-	*errp = -EINVAL;
-	printk(KERN_ERR "ip6_queue: error creating packet message\n");
-	return NULL;
-}
-
-static int
-ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
-{
-	int status = -EINVAL;
-	struct sk_buff *nskb;
-
-	if (copy_mode == IPQ_COPY_NONE)
-		return -EAGAIN;
-
-	nskb = ipq_build_packet_message(entry, &status);
-	if (nskb == NULL)
-		return status;
-
-	spin_lock_bh(&queue_lock);
-
-	if (!peer_pid)
-		goto err_out_free_nskb;
-
-	if (queue_total >= queue_maxlen) {
-		queue_dropped++;
-		status = -ENOSPC;
-		if (net_ratelimit())
-			printk (KERN_WARNING "ip6_queue: fill at %d entries, "
-				"dropping packet(s).  Dropped: %d\n", queue_total,
-				queue_dropped);
-		goto err_out_free_nskb;
-	}
-
-	/* netlink_unicast will either free the nskb or attach it to a socket */
-	status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
-	if (status < 0) {
-		queue_user_dropped++;
-		goto err_out_unlock;
-	}
-
-	__ipq_enqueue_entry(entry);
-
-	spin_unlock_bh(&queue_lock);
-	return status;
-
-err_out_free_nskb:
-	kfree_skb(nskb);
-
-err_out_unlock:
-	spin_unlock_bh(&queue_lock);
-	return status;
-}
-
-static int
-ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
-{
-	int diff;
-	struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload;
-	struct sk_buff *nskb;
-
-	if (v->data_len < sizeof(*user_iph))
-		return 0;
-	diff = v->data_len - e->skb->len;
-	if (diff < 0) {
-		if (pskb_trim(e->skb, v->data_len))
-			return -ENOMEM;
-	} else if (diff > 0) {
-		if (v->data_len > 0xFFFF)
-			return -EINVAL;
-		if (diff > skb_tailroom(e->skb)) {
-			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
-					       diff, GFP_ATOMIC);
-			if (!nskb) {
-				printk(KERN_WARNING "ip6_queue: OOM "
-				      "in mangle, dropping packet\n");
-				return -ENOMEM;
-			}
-			kfree_skb(e->skb);
-			e->skb = nskb;
-		}
-		skb_put(e->skb, diff);
-	}
-	if (!skb_make_writable(e->skb, v->data_len))
-		return -ENOMEM;
-	skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
-	e->skb->ip_summed = CHECKSUM_NONE;
-
-	return 0;
-}
-
-static int
-ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
-{
-	struct nf_queue_entry *entry;
-
-	if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN)
-		return -EINVAL;
-
-	entry = ipq_find_dequeue_entry(vmsg->id);
-	if (entry == NULL)
-		return -ENOENT;
-	else {
-		int verdict = vmsg->value;
-
-		if (vmsg->data_len && vmsg->data_len == len)
-			if (ipq_mangle_ipv6(vmsg, entry) < 0)
-				verdict = NF_DROP;
-
-		nf_reinject(entry, verdict);
-		return 0;
-	}
-}
-
-static int
-ipq_set_mode(unsigned char mode, unsigned int range)
-{
-	int status;
-
-	spin_lock_bh(&queue_lock);
-	status = __ipq_set_mode(mode, range);
-	spin_unlock_bh(&queue_lock);
-	return status;
-}
-
-static int
-ipq_receive_peer(struct ipq_peer_msg *pmsg,
-		 unsigned char type, unsigned int len)
-{
-	int status = 0;
-
-	if (len < sizeof(*pmsg))
-		return -EINVAL;
-
-	switch (type) {
-	case IPQM_MODE:
-		status = ipq_set_mode(pmsg->msg.mode.value,
-				      pmsg->msg.mode.range);
-		break;
-
-	case IPQM_VERDICT:
-		status = ipq_set_verdict(&pmsg->msg.verdict,
-					 len - sizeof(*pmsg));
-		break;
-	default:
-		status = -EINVAL;
-	}
-	return status;
-}
-
-static int
-dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
-{
-	if (entry->indev)
-		if (entry->indev->ifindex == ifindex)
-			return 1;
-
-	if (entry->outdev)
-		if (entry->outdev->ifindex == ifindex)
-			return 1;
-#ifdef CONFIG_BRIDGE_NETFILTER
-	if (entry->skb->nf_bridge) {
-		if (entry->skb->nf_bridge->physindev &&
-		    entry->skb->nf_bridge->physindev->ifindex == ifindex)
-			return 1;
-		if (entry->skb->nf_bridge->physoutdev &&
-		    entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
-			return 1;
-	}
-#endif
-	return 0;
-}
-
-static void
-ipq_dev_drop(int ifindex)
-{
-	ipq_flush(dev_cmp, ifindex);
-}
-
-#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
-
-static inline void
-__ipq_rcv_skb(struct sk_buff *skb)
-{
-	int status, type, pid, flags;
-	unsigned int nlmsglen, skblen;
-	struct nlmsghdr *nlh;
-	bool enable_timestamp = false;
-
-	skblen = skb->len;
-	if (skblen < sizeof(*nlh))
-		return;
-
-	nlh = nlmsg_hdr(skb);
-	nlmsglen = nlh->nlmsg_len;
-	if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
-		return;
-
-	pid = nlh->nlmsg_pid;
-	flags = nlh->nlmsg_flags;
-
-	if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
-		RCV_SKB_FAIL(-EINVAL);
-
-	if (flags & MSG_TRUNC)
-		RCV_SKB_FAIL(-ECOMM);
-
-	type = nlh->nlmsg_type;
-	if (type < NLMSG_NOOP || type >= IPQM_MAX)
-		RCV_SKB_FAIL(-EINVAL);
-
-	if (type <= IPQM_BASE)
-		return;
-
-	if (!capable(CAP_NET_ADMIN))
-		RCV_SKB_FAIL(-EPERM);
-
-	spin_lock_bh(&queue_lock);
-
-	if (peer_pid) {
-		if (peer_pid != pid) {
-			spin_unlock_bh(&queue_lock);
-			RCV_SKB_FAIL(-EBUSY);
-		}
-	} else {
-		enable_timestamp = true;
-		peer_pid = pid;
-	}
-
-	spin_unlock_bh(&queue_lock);
-	if (enable_timestamp)
-		net_enable_timestamp();
-
-	status = ipq_receive_peer(NLMSG_DATA(nlh), type,
-				  nlmsglen - NLMSG_LENGTH(0));
-	if (status < 0)
-		RCV_SKB_FAIL(status);
-
-	if (flags & NLM_F_ACK)
-		netlink_ack(skb, nlh, 0);
-}
-
-static void
-ipq_rcv_skb(struct sk_buff *skb)
-{
-	mutex_lock(&ipqnl_mutex);
-	__ipq_rcv_skb(skb);
-	mutex_unlock(&ipqnl_mutex);
-}
-
-static int
-ipq_rcv_dev_event(struct notifier_block *this,
-		  unsigned long event, void *ptr)
-{
-	struct net_device *dev = ptr;
-
-	if (!net_eq(dev_net(dev), &init_net))
-		return NOTIFY_DONE;
-
-	/* Drop any packets associated with the downed device */
-	if (event == NETDEV_DOWN)
-		ipq_dev_drop(dev->ifindex);
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block ipq_dev_notifier = {
-	.notifier_call	= ipq_rcv_dev_event,
-};
-
-static int
-ipq_rcv_nl_event(struct notifier_block *this,
-		 unsigned long event, void *ptr)
-{
-	struct netlink_notify *n = ptr;
-
-	if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
-		spin_lock_bh(&queue_lock);
-		if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
-			__ipq_reset();
-		spin_unlock_bh(&queue_lock);
-	}
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block ipq_nl_notifier = {
-	.notifier_call	= ipq_rcv_nl_event,
-};
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *ipq_sysctl_header;
-
-static ctl_table ipq_table[] = {
-	{
-		.procname	= NET_IPQ_QMAX_NAME,
-		.data		= &queue_maxlen,
-		.maxlen		= sizeof(queue_maxlen),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec
-	},
-	{ }
-};
-#endif
-
-#ifdef CONFIG_PROC_FS
-static int ip6_queue_show(struct seq_file *m, void *v)
-{
-	spin_lock_bh(&queue_lock);
-
-	seq_printf(m,
-		      "Peer PID          : %d\n"
-		      "Copy mode         : %hu\n"
-		      "Copy range        : %u\n"
-		      "Queue length      : %u\n"
-		      "Queue max. length : %u\n"
-		      "Queue dropped     : %u\n"
-		      "Netfilter dropped : %u\n",
-		      peer_pid,
-		      copy_mode,
-		      copy_range,
-		      queue_total,
-		      queue_maxlen,
-		      queue_dropped,
-		      queue_user_dropped);
-
-	spin_unlock_bh(&queue_lock);
-	return 0;
-}
-
-static int ip6_queue_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, ip6_queue_show, NULL);
-}
-
-static const struct file_operations ip6_queue_proc_fops = {
-	.open		= ip6_queue_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.owner		= THIS_MODULE,
-};
-#endif
-
-static const struct nf_queue_handler nfqh = {
-	.name	= "ip6_queue",
-	.outfn	= &ipq_enqueue_packet,
-};
-
-static int __init ip6_queue_init(void)
-{
-	int status = -ENOMEM;
-	struct proc_dir_entry *proc __maybe_unused;
-
-	netlink_register_notifier(&ipq_nl_notifier);
-	ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0,
-			              ipq_rcv_skb, NULL, THIS_MODULE);
-	if (ipqnl == NULL) {
-		printk(KERN_ERR "ip6_queue: failed to create netlink socket\n");
-		goto cleanup_netlink_notifier;
-	}
-
-#ifdef CONFIG_PROC_FS
-	proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
-			   &ip6_queue_proc_fops);
-	if (!proc) {
-		printk(KERN_ERR "ip6_queue: failed to create proc entry\n");
-		goto cleanup_ipqnl;
-	}
-#endif
-	register_netdevice_notifier(&ipq_dev_notifier);
-#ifdef CONFIG_SYSCTL
-	ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table);
-#endif
-	status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh);
-	if (status < 0) {
-		printk(KERN_ERR "ip6_queue: failed to register queue handler\n");
-		goto cleanup_sysctl;
-	}
-	return status;
-
-cleanup_sysctl:
-#ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(ipq_sysctl_header);
-#endif
-	unregister_netdevice_notifier(&ipq_dev_notifier);
-	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
-
-cleanup_ipqnl: __maybe_unused
-	netlink_kernel_release(ipqnl);
-	mutex_lock(&ipqnl_mutex);
-	mutex_unlock(&ipqnl_mutex);
-
-cleanup_netlink_notifier:
-	netlink_unregister_notifier(&ipq_nl_notifier);
-	return status;
-}
-
-static void __exit ip6_queue_fini(void)
-{
-	nf_unregister_queue_handlers(&nfqh);
-
-	ipq_flush(NULL, 0);
-
-#ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(ipq_sysctl_header);
-#endif
-	unregister_netdevice_notifier(&ipq_dev_notifier);
-	proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
-
-	netlink_kernel_release(ipqnl);
-	mutex_lock(&ipqnl_mutex);
-	mutex_unlock(&ipqnl_mutex);
-
-	netlink_unregister_notifier(&ipq_nl_notifier);
-}
-
-MODULE_DESCRIPTION("IPv6 packet queue handler");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_IP6_FW);
-
-module_init(ip6_queue_init);
-module_exit(ip6_queue_fini);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9d4e155..d7cb045 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -133,7 +133,7 @@
 		int protohdr;
 		unsigned short _frag_off;
 
-		protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
+		protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
 		if (protohdr < 0) {
 			if (_frag_off == 0)
 				*hotdrop = true;
@@ -181,8 +181,7 @@
 static unsigned int
 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
 {
-	if (net_ratelimit())
-		pr_info("error: `%s'\n", (const char *)par->targinfo);
+	net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
 
 	return NF_DROP;
 }
@@ -362,6 +361,7 @@
 		const struct xt_entry_match *ematch;
 
 		IP_NF_ASSERT(e);
+		acpar.thoff = 0;
 		if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
 		    &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
  no_match:
@@ -396,7 +396,7 @@
 			if (v < 0) {
 				/* Pop from stack? */
 				if (v != XT_RETURN) {
-					verdict = (unsigned)(-v) - 1;
+					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
 				if (*stackptr <= origptr)
@@ -2278,6 +2278,10 @@
  * if target < 0. "last header" is transport protocol header, ESP, or
  * "No next header".
  *
+ * Note that *offset is used as input/output parameter. an if it is not zero,
+ * then it must be a valid offset to an inner IPv6 header. This can be used
+ * to explore inner IPv6 header, eg. ICMPv6 error messages.
+ *
  * If target header is found, its offset is set in *offset and return protocol
  * number. Otherwise, return -1.
  *
@@ -2289,17 +2293,33 @@
  * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
  * isn't NULL.
  *
+ * if flags is not NULL and it's a fragment, then the frag flag IP6T_FH_F_FRAG
+ * will be set. If it's an AH header, the IP6T_FH_F_AUTH flag is set and
+ * target < 0, then this function will stop at the AH header.
  */
 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
-		  int target, unsigned short *fragoff)
+		  int target, unsigned short *fragoff, int *flags)
 {
 	unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
 	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
-	unsigned int len = skb->len - start;
+	unsigned int len;
 
 	if (fragoff)
 		*fragoff = 0;
 
+	if (*offset) {
+		struct ipv6hdr _ip6, *ip6;
+
+		ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
+		if (!ip6 || (ip6->version != 6)) {
+			printk(KERN_ERR "IPv6 header not found\n");
+			return -EBADMSG;
+		}
+		start = *offset + sizeof(struct ipv6hdr);
+		nexthdr = ip6->nexthdr;
+	}
+	len = skb->len - start;
+
 	while (nexthdr != target) {
 		struct ipv6_opt_hdr _hdr, *hp;
 		unsigned int hdrlen;
@@ -2316,6 +2336,9 @@
 		if (nexthdr == NEXTHDR_FRAGMENT) {
 			unsigned short _frag_off;
 			__be16 *fp;
+
+			if (flags)	/* Indicate that this is a fragment */
+				*flags |= IP6T_FH_F_FRAG;
 			fp = skb_header_pointer(skb,
 						start+offsetof(struct frag_hdr,
 							       frag_off),
@@ -2336,9 +2359,11 @@
 				return -ENOENT;
 			}
 			hdrlen = 8;
-		} else if (nexthdr == NEXTHDR_AUTH)
+		} else if (nexthdr == NEXTHDR_AUTH) {
+			if (flags && (*flags & IP6T_FH_F_AUTH) && (target < 0))
+				break;
 			hdrlen = (hp->hdrlen + 2) << 2;
-		else
+		} else
 			hdrlen = ipv6_optlen(hp);
 
 		nexthdr = hp->nexthdr;
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index aad2fa4..fd4fb34 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -114,8 +114,7 @@
 			 GFP_ATOMIC);
 
 	if (!nskb) {
-		if (net_ratelimit())
-			pr_debug("cannot alloc skb\n");
+		net_dbg_ratelimited("cannot alloc skb\n");
 		dst_release(dst);
 		return;
 	}
@@ -210,8 +209,7 @@
 		send_reset(net, skb);
 		break;
 	default:
-		if (net_ratelimit())
-			pr_info("case %u not handled yet\n", reject->with);
+		net_info_ratelimited("case %u not handled yet\n", reject->with);
 		break;
 	}
 
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 89cccc5..04099ab 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -41,11 +41,11 @@
 	struct ip_auth_hdr _ah;
 	const struct ip_auth_hdr *ah;
 	const struct ip6t_ah *ahinfo = par->matchinfo;
-	unsigned int ptr;
+	unsigned int ptr = 0;
 	unsigned int hdrlen = 0;
 	int err;
 
-	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL);
+	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_AUTH, NULL, NULL);
 	if (err < 0) {
 		if (err != -ENOENT)
 			par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index eda898f..3b5735e 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -40,10 +40,10 @@
 	struct frag_hdr _frag;
 	const struct frag_hdr *fh;
 	const struct ip6t_frag *fraginfo = par->matchinfo;
-	unsigned int ptr;
+	unsigned int ptr = 0;
 	int err;
 
-	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL);
+	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL);
 	if (err < 0) {
 		if (err != -ENOENT)
 			par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 59df051..01df142 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -50,7 +50,7 @@
 	const struct ipv6_opt_hdr *oh;
 	const struct ip6t_opts *optinfo = par->matchinfo;
 	unsigned int temp;
-	unsigned int ptr;
+	unsigned int ptr = 0;
 	unsigned int hdrlen = 0;
 	bool ret = false;
 	u8 _opttype;
@@ -62,7 +62,7 @@
 
 	err = ipv6_find_hdr(skb, &ptr,
 			    (par->match == &hbh_mt6_reg[0]) ?
-			    NEXTHDR_HOP : NEXTHDR_DEST, NULL);
+			    NEXTHDR_HOP : NEXTHDR_DEST, NULL, NULL);
 	if (err < 0) {
 		if (err != -ENOENT)
 			par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index d8488c5..2c99b94 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -42,14 +42,14 @@
 	const struct ipv6_rt_hdr *rh;
 	const struct ip6t_rt *rtinfo = par->matchinfo;
 	unsigned int temp;
-	unsigned int ptr;
+	unsigned int ptr = 0;
 	unsigned int hdrlen = 0;
 	bool ret = false;
 	struct in6_addr _addr;
 	const struct in6_addr *ap;
 	int err;
 
-	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL);
+	err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL, NULL);
 	if (err < 0) {
 		if (err != -ENOENT)
 			par->hotdrop = true;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 00d1917..4d78240 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -42,8 +42,7 @@
 	/* root is playing with raw sockets. */
 	if (skb->len < sizeof(struct iphdr) ||
 	    ip_hdrlen(skb) < sizeof(struct iphdr)) {
-		if (net_ratelimit())
-			pr_warning("ip6t_hook: happy cracking.\n");
+		net_warn_ratelimited("ip6t_hook: happy cracking\n");
 		return NF_ACCEPT;
 	}
 #endif
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4111050..3224ef9 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -232,8 +232,7 @@
 {
 	/* root is playing with raw sockets. */
 	if (skb->len < sizeof(struct ipv6hdr)) {
-		if (net_ratelimit())
-			pr_notice("ipv6_conntrack_local: packet too short\n");
+		net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
 		return NF_ACCEPT;
 	}
 	return __ipv6_conntrack_in(dev_net(out), hooknum, skb, okfn);
@@ -278,10 +277,11 @@
 static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
 				const struct nf_conntrack_tuple *tuple)
 {
-	NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
-		&tuple->src.u3.ip6);
-	NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
-		&tuple->dst.u3.ip6);
+	if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
+		    &tuple->src.u3.ip6) ||
+	    nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
+		    &tuple->dst.u3.ip6))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 92cc9f2..3e81904 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -234,10 +234,10 @@
 static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
 				  const struct nf_conntrack_tuple *t)
 {
-	NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id);
-	NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type);
-	NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code);
-
+	if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
+	    nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
+	    nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -300,8 +300,8 @@
 {
 	const unsigned int *timeout = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ));
-
+	if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 38f00b0..c9c78c2 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -444,12 +444,11 @@
 	return head;
 
 out_oversize:
-	if (net_ratelimit())
-		printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
+	net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
+			    payload_len);
 	goto out_fail;
 out_oom:
-	if (net_ratelimit())
-		printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
+	net_dbg_ratelimited("nf_ct_frag6_reasm: no memory for reassembly\n");
 out_fail:
 	return NULL;
 }
@@ -626,8 +625,8 @@
 	inet_frags_init(&nf_frags);
 
 #ifdef CONFIG_SYSCTL
-	nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
-							  nf_ct_frag6_sysctl_table);
+	nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
+							nf_ct_frag6_sysctl_table);
 	if (!nf_ct_frag6_sysctl_header) {
 		inet_frags_fini(&nf_frags);
 		return -ENOMEM;
@@ -640,7 +639,7 @@
 void nf_ct_frag6_cleanup(void)
 {
 #ifdef CONFIG_SYSCTL
-	unregister_sysctl_table(nf_ct_frag6_sysctl_header);
+	unregister_net_sysctl_table(nf_ct_frag6_sysctl_header);
 	nf_ct_frag6_sysctl_header = NULL;
 #endif
 	inet_frags_fini(&nf_frags);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5bddea7..93d6983 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -72,7 +72,7 @@
 		const struct in6_addr *rmt_addr, int dif)
 {
 	struct hlist_node *node;
-	int is_multicast = ipv6_addr_is_multicast(loc_addr);
+	bool is_multicast = ipv6_addr_is_multicast(loc_addr);
 
 	sk_for_each_from(sk, node)
 		if (inet_sk(sk)->inet_num == num) {
@@ -153,12 +153,12 @@
  *
  *	Caller owns SKB so we must make clones.
  */
-static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
 {
 	const struct in6_addr *saddr;
 	const struct in6_addr *daddr;
 	struct sock *sk;
-	int delivered = 0;
+	bool delivered = false;
 	__u8 hash;
 	struct net *net;
 
@@ -179,7 +179,7 @@
 	while (sk) {
 		int filtered;
 
-		delivered = 1;
+		delivered = true;
 		switch (nexthdr) {
 		case IPPROTO_ICMPV6:
 			filtered = icmpv6_filter(sk, skb);
@@ -225,7 +225,7 @@
 	return delivered;
 }
 
-int raw6_local_deliver(struct sk_buff *skb, int nexthdr)
+bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
 {
 	struct sock *raw_sk;
 
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 9447bd6..4ff9af6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -134,15 +134,16 @@
 	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
 }
 
-int ip6_frag_match(struct inet_frag_queue *q, void *a)
+bool ip6_frag_match(struct inet_frag_queue *q, void *a)
 {
 	struct frag_queue *fq;
 	struct ip6_create_arg *arg = a;
 
 	fq = container_of(q, struct frag_queue, q);
-	return (fq->id == arg->id && fq->user == arg->user &&
-			ipv6_addr_equal(&fq->saddr, arg->src) &&
-			ipv6_addr_equal(&fq->daddr, arg->dst));
+	return	fq->id == arg->id &&
+		fq->user == arg->user &&
+		ipv6_addr_equal(&fq->saddr, arg->src) &&
+		ipv6_addr_equal(&fq->daddr, arg->dst);
 }
 EXPORT_SYMBOL(ip6_frag_match);
 
@@ -414,6 +415,7 @@
 	struct sk_buff *fp, *head = fq->q.fragments;
 	int    payload_len;
 	unsigned int nhoff;
+	int sum_truesize;
 
 	fq_kill(fq);
 
@@ -433,7 +435,7 @@
 		skb_morph(head, fq->q.fragments);
 		head->next = fq->q.fragments->next;
 
-		kfree_skb(fq->q.fragments);
+		consume_skb(fq->q.fragments);
 		fq->q.fragments = head;
 	}
 
@@ -483,20 +485,33 @@
 	head->mac_header += sizeof(struct frag_hdr);
 	head->network_header += sizeof(struct frag_hdr);
 
-	skb_shinfo(head)->frag_list = head->next;
 	skb_reset_transport_header(head);
 	skb_push(head, head->data - skb_network_header(head));
 
-	for (fp=head->next; fp; fp = fp->next) {
-		head->data_len += fp->len;
-		head->len += fp->len;
+	sum_truesize = head->truesize;
+	for (fp = head->next; fp;) {
+		bool headstolen;
+		int delta;
+		struct sk_buff *next = fp->next;
+
+		sum_truesize += fp->truesize;
 		if (head->ip_summed != fp->ip_summed)
 			head->ip_summed = CHECKSUM_NONE;
 		else if (head->ip_summed == CHECKSUM_COMPLETE)
 			head->csum = csum_add(head->csum, fp->csum);
-		head->truesize += fp->truesize;
+
+		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+			kfree_skb_partial(fp, headstolen);
+		} else {
+			if (!skb_shinfo(head)->frag_list)
+				skb_shinfo(head)->frag_list = fp;
+			head->data_len += fp->len;
+			head->len += fp->len;
+			head->truesize += fp->truesize;
+		}
+		fp = next;
 	}
-	atomic_sub(head->truesize, &fq->q.net->mem);
+	atomic_sub(sum_truesize, &fq->q.net->mem);
 
 	head->next = NULL;
 	head->dev = dev;
@@ -518,12 +533,10 @@
 	return 1;
 
 out_oversize:
-	if (net_ratelimit())
-		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
+	net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
 	goto out_fail;
 out_oom:
-	if (net_ratelimit())
-		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
+	net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
 out_fail:
 	rcu_read_lock();
 	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
@@ -646,7 +659,7 @@
 		table[2].data = &net->ipv6.frags.timeout;
 	}
 
-	hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
+	hdr = register_net_sysctl(net, "net/ipv6", table);
 	if (hdr == NULL)
 		goto err_reg;
 
@@ -674,7 +687,7 @@
 
 static int ip6_frags_sysctl_register(void)
 {
-	ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
+	ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
 			ip6_frags_ctl_table);
 	return ip6_ctl_header == NULL ? -ENOMEM : 0;
 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bc4888d..999a982 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -24,6 +24,8 @@
  *		Fixed routing subtrees.
  */
 
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/export.h>
@@ -82,7 +84,7 @@
 static struct rt6_info *rt6_add_route_info(struct net *net,
 					   const struct in6_addr *prefix, int prefixlen,
 					   const struct in6_addr *gwaddr, int ifindex,
-					   unsigned pref);
+					   unsigned int pref);
 static struct rt6_info *rt6_get_route_info(struct net *net,
 					   const struct in6_addr *prefix, int prefixlen,
 					   const struct in6_addr *gwaddr, int ifindex);
@@ -331,22 +333,22 @@
 	}
 }
 
-static __inline__ int rt6_check_expired(const struct rt6_info *rt)
+static bool rt6_check_expired(const struct rt6_info *rt)
 {
 	struct rt6_info *ort = NULL;
 
 	if (rt->rt6i_flags & RTF_EXPIRES) {
 		if (time_after(jiffies, rt->dst.expires))
-			return 1;
+			return true;
 	} else if (rt->dst.from) {
 		ort = (struct rt6_info *) rt->dst.from;
 		return (ort->rt6i_flags & RTF_EXPIRES) &&
 			time_after(jiffies, ort->dst.expires);
 	}
-	return 0;
+	return false;
 }
 
-static inline int rt6_need_strict(const struct in6_addr *daddr)
+static bool rt6_need_strict(const struct in6_addr *daddr)
 {
 	return ipv6_addr_type(daddr) &
 		(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
@@ -794,9 +796,7 @@
 				goto retry;
 			}
 
-			if (net_ratelimit())
-				printk(KERN_WARNING
-				       "ipv6: Neighbour table overflow.\n");
+			net_warn_ratelimited("Neighbour table overflow\n");
 			dst_free(&rt->dst);
 			return NULL;
 		}
@@ -1282,7 +1282,7 @@
 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
 		table = fib6_get_table(net, cfg->fc_table);
 		if (!table) {
-			printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
+			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
 			table = fib6_new_table(net, cfg->fc_table);
 		}
 	} else {
@@ -1643,9 +1643,7 @@
 	rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
 
 	if (rt == net->ipv6.ip6_null_entry) {
-		if (net_ratelimit())
-			printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
-			       "for redirect target\n");
+		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
 		goto out;
 	}
 
@@ -1887,7 +1885,7 @@
 static struct rt6_info *rt6_add_route_info(struct net *net,
 					   const struct in6_addr *prefix, int prefixlen,
 					   const struct in6_addr *gwaddr, int ifindex,
-					   unsigned pref)
+					   unsigned int pref)
 {
 	struct fib6_config cfg = {
 		.fc_table	= RT6_TABLE_INFO,
@@ -2106,9 +2104,7 @@
 	int err;
 
 	if (!rt) {
-		if (net_ratelimit())
-			pr_warning("IPv6:  Maximum number of routes reached,"
-				   " consider increasing route/max_size.\n");
+		net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -2217,10 +2213,9 @@
 	icmp6_clean_all(fib6_ifdown, &adn);
 }
 
-struct rt6_mtu_change_arg
-{
+struct rt6_mtu_change_arg {
 	struct net_device *dev;
-	unsigned mtu;
+	unsigned int mtu;
 };
 
 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
@@ -2262,7 +2257,7 @@
 	return 0;
 }
 
-void rt6_mtu_change(struct net_device *dev, unsigned mtu)
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
 {
 	struct rt6_mtu_change_arg arg = {
 		.dev = dev,
@@ -2430,7 +2425,8 @@
 	else
 		table = RT6_TABLE_UNSPEC;
 	rtm->rtm_table = table;
-	NLA_PUT_U32(skb, RTA_TABLE, table);
+	if (nla_put_u32(skb, RTA_TABLE, table))
+		goto nla_put_failure;
 	if (rt->rt6i_flags & RTF_REJECT)
 		rtm->rtm_type = RTN_UNREACHABLE;
 	else if (rt->rt6i_flags & RTF_LOCAL)
@@ -2453,16 +2449,20 @@
 		rtm->rtm_flags |= RTM_F_CLONED;
 
 	if (dst) {
-		NLA_PUT(skb, RTA_DST, 16, dst);
+		if (nla_put(skb, RTA_DST, 16, dst))
+			goto nla_put_failure;
 		rtm->rtm_dst_len = 128;
 	} else if (rtm->rtm_dst_len)
-		NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
+		if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
+			goto nla_put_failure;
 #ifdef CONFIG_IPV6_SUBTREES
 	if (src) {
-		NLA_PUT(skb, RTA_SRC, 16, src);
+		if (nla_put(skb, RTA_SRC, 16, src))
+			goto nla_put_failure;
 		rtm->rtm_src_len = 128;
-	} else if (rtm->rtm_src_len)
-		NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
+	} else if (rtm->rtm_src_len &&
+		   nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
+		goto nla_put_failure;
 #endif
 	if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
@@ -2480,17 +2480,20 @@
 			}
 		} else
 #endif
-			NLA_PUT_U32(skb, RTA_IIF, iif);
+			if (nla_put_u32(skb, RTA_IIF, iif))
+				goto nla_put_failure;
 	} else if (dst) {
 		struct in6_addr saddr_buf;
-		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
-			NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+		if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
+		    nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+			goto nla_put_failure;
 	}
 
 	if (rt->rt6i_prefsrc.plen) {
 		struct in6_addr saddr_buf;
 		saddr_buf = rt->rt6i_prefsrc.addr;
-		NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+		if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
+			goto nla_put_failure;
 	}
 
 	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
@@ -2506,11 +2509,11 @@
 	}
 	rcu_read_unlock();
 
-	if (rt->dst.dev)
-		NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-
-	NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
-
+	if (rt->dst.dev &&
+	    nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+		goto nla_put_failure;
+	if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
+		goto nla_put_failure;
 	if (!(rt->rt6i_flags & RTF_EXPIRES))
 		expires = 0;
 	else if (rt->dst.expires - jiffies < INT_MAX)
@@ -2615,6 +2618,7 @@
 
 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 	if (!skb) {
+		dst_release(&rt->dst);
 		err = -ENOBUFS;
 		goto errout;
 	}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c4ffd17..6041571 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -17,6 +17,8 @@
  * Fred Templin <fred.l.templin@boeing.com>:	isatap support
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/capability.h>
 #include <linux/errno.h>
@@ -87,35 +89,51 @@
 
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_tstats {
-	unsigned long	rx_packets;
-	unsigned long	rx_bytes;
-	unsigned long	tx_packets;
-	unsigned long	tx_bytes;
-} __attribute__((aligned(4*sizeof(unsigned long))));
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+	struct u64_stats_sync	syncp;
+};
 
-static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
+						   struct rtnl_link_stats64 *tot)
 {
-	struct pcpu_tstats sum = { 0 };
 	int i;
 
 	for_each_possible_cpu(i) {
 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+		unsigned int start;
 
-		sum.rx_packets += tstats->rx_packets;
-		sum.rx_bytes   += tstats->rx_bytes;
-		sum.tx_packets += tstats->tx_packets;
-		sum.tx_bytes   += tstats->tx_bytes;
+		do {
+			start = u64_stats_fetch_begin_bh(&tstats->syncp);
+			rx_packets = tstats->rx_packets;
+			tx_packets = tstats->tx_packets;
+			rx_bytes = tstats->rx_bytes;
+			tx_bytes = tstats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+		tot->rx_packets += rx_packets;
+		tot->tx_packets += tx_packets;
+		tot->rx_bytes   += rx_bytes;
+		tot->tx_bytes   += tx_bytes;
 	}
-	dev->stats.rx_packets = sum.rx_packets;
-	dev->stats.rx_bytes   = sum.rx_bytes;
-	dev->stats.tx_packets = sum.tx_packets;
-	dev->stats.tx_bytes   = sum.tx_bytes;
-	return &dev->stats;
+
+	tot->rx_errors = dev->stats.rx_errors;
+	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+	tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+	tot->tx_dropped = dev->stats.tx_dropped;
+	tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+	tot->tx_errors = dev->stats.tx_errors;
+
+	return tot;
 }
+
 /*
  * Must be invoked with rcu_read_lock
  */
-static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
+static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
 		struct net_device *dev, __be32 remote, __be32 local)
 {
 	unsigned int h0 = HASH(remote);
@@ -686,12 +704,11 @@
 			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
 		if (neigh == NULL) {
-			if (net_ratelimit())
-				printk(KERN_DEBUG "sit: nexthop == NULL\n");
+			net_dbg_ratelimited("sit: nexthop == NULL\n");
 			goto tx_error;
 		}
 
-		addr6 = (const struct in6_addr*)&neigh->primary_key;
+		addr6 = (const struct in6_addr *)&neigh->primary_key;
 		addr_type = ipv6_addr_type(addr6);
 
 		if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -716,12 +733,11 @@
 			neigh = dst_neigh_lookup(skb_dst(skb), &iph6->daddr);
 
 		if (neigh == NULL) {
-			if (net_ratelimit())
-				printk(KERN_DEBUG "sit: nexthop == NULL\n");
+			net_dbg_ratelimited("sit: nexthop == NULL\n");
 			goto tx_error;
 		}
 
-		addr6 = (const struct in6_addr*)&neigh->primary_key;
+		addr6 = (const struct in6_addr *)&neigh->primary_key;
 		addr_type = ipv6_addr_type(addr6);
 
 		if (addr_type == IPV6_ADDR_ANY) {
@@ -1126,7 +1142,7 @@
 	.ndo_start_xmit	= ipip6_tunnel_xmit,
 	.ndo_do_ioctl	= ipip6_tunnel_ioctl,
 	.ndo_change_mtu	= ipip6_tunnel_change_mtu,
-	.ndo_get_stats	= ipip6_get_stats,
+	.ndo_get_stats64= ipip6_get_stats64,
 };
 
 static void ipip6_dev_free(struct net_device *dev)
@@ -1287,7 +1303,7 @@
 {
 	int err;
 
-	printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
+	pr_info("IPv6 over IPv4 tunneling driver\n");
 
 	err = register_pernet_device(&sit_net_ops);
 	if (err < 0)
@@ -1295,7 +1311,7 @@
 	err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
 	if (err < 0) {
 		unregister_pernet_device(&sit_net_ops);
-		printk(KERN_INFO "sit init: Can't add protocol\n");
+		pr_info("%s: can't add protocol\n", __func__);
 	}
 	return err;
 }
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 166a57c..e85c48b 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -16,32 +16,8 @@
 #include <net/addrconf.h>
 #include <net/inet_frag.h>
 
-static struct ctl_table empty[1];
-
-static ctl_table ipv6_static_skeleton[] = {
-	{
-		.procname	= "neigh",
-		.maxlen		= 0,
-		.mode		= 0555,
-		.child		= empty,
-	},
-	{ }
-};
-
 static ctl_table ipv6_table_template[] = {
 	{
-		.procname	= "route",
-		.maxlen		= 0,
-		.mode		= 0555,
-		.child		= ipv6_route_table_template
-	},
-	{
-		.procname	= "icmp",
-		.maxlen		= 0,
-		.mode		= 0555,
-		.child		= ipv6_icmp_table_template
-	},
-	{
 		.procname	= "bindv6only",
 		.data		= &init_net.ipv6.sysctl.bindv6only,
 		.maxlen		= sizeof(int),
@@ -62,13 +38,6 @@
 	{ }
 };
 
-struct ctl_path net_ipv6_ctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv6", },
-	{ },
-};
-EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
-
 static int __net_init ipv6_sysctl_net_init(struct net *net)
 {
 	struct ctl_table *ipv6_table;
@@ -81,28 +50,37 @@
 			     GFP_KERNEL);
 	if (!ipv6_table)
 		goto out;
+	ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
 
 	ipv6_route_table = ipv6_route_sysctl_init(net);
 	if (!ipv6_route_table)
 		goto out_ipv6_table;
-	ipv6_table[0].child = ipv6_route_table;
 
 	ipv6_icmp_table = ipv6_icmp_sysctl_init(net);
 	if (!ipv6_icmp_table)
 		goto out_ipv6_route_table;
-	ipv6_table[1].child = ipv6_icmp_table;
 
-	ipv6_table[2].data = &net->ipv6.sysctl.bindv6only;
-
-	net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
-							   ipv6_table);
-	if (!net->ipv6.sysctl.table)
+	net->ipv6.sysctl.hdr = register_net_sysctl(net, "net/ipv6", ipv6_table);
+	if (!net->ipv6.sysctl.hdr)
 		goto out_ipv6_icmp_table;
 
+	net->ipv6.sysctl.route_hdr =
+		register_net_sysctl(net, "net/ipv6/route", ipv6_route_table);
+	if (!net->ipv6.sysctl.route_hdr)
+		goto out_unregister_ipv6_table;
+
+	net->ipv6.sysctl.icmp_hdr =
+		register_net_sysctl(net, "net/ipv6/icmp", ipv6_icmp_table);
+	if (!net->ipv6.sysctl.icmp_hdr)
+		goto out_unregister_route_table;
+
 	err = 0;
 out:
 	return err;
-
+out_unregister_route_table:
+	unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
+out_unregister_ipv6_table:
+	unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
 out_ipv6_icmp_table:
 	kfree(ipv6_icmp_table);
 out_ipv6_route_table:
@@ -118,11 +96,13 @@
 	struct ctl_table *ipv6_route_table;
 	struct ctl_table *ipv6_icmp_table;
 
-	ipv6_table = net->ipv6.sysctl.table->ctl_table_arg;
-	ipv6_route_table = ipv6_table[0].child;
-	ipv6_icmp_table = ipv6_table[1].child;
+	ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg;
+	ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg;
+	ipv6_icmp_table = net->ipv6.sysctl.icmp_hdr->ctl_table_arg;
 
-	unregister_net_sysctl_table(net->ipv6.sysctl.table);
+	unregister_net_sysctl_table(net->ipv6.sysctl.icmp_hdr);
+	unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
+	unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
 
 	kfree(ipv6_table);
 	kfree(ipv6_route_table);
@@ -140,7 +120,7 @@
 {
 	int err = -ENOMEM;
 
-	ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable);
+	ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable);
 	if (ip6_header == NULL)
 		goto out;
 
@@ -160,18 +140,3 @@
 	unregister_net_sysctl_table(ip6_header);
 	unregister_pernet_subsys(&ipv6_sysctl_net_ops);
 }
-
-static struct ctl_table_header *ip6_base;
-
-int ipv6_static_sysctl_register(void)
-{
-	ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton);
-	if (ip6_base == NULL)
-		return -ENOMEM;
-	return 0;
-}
-
-void ipv6_static_sysctl_unregister(void)
-{
-	unregister_net_sysctl_table(ip6_base);
-}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 98256cf..554d599 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -723,12 +723,10 @@
 				      NULL, NULL, skb);
 
 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
-		if (net_ratelimit()) {
-			printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
-			       genhash ? "failed" : "mismatch",
-			       &ip6h->saddr, ntohs(th->source),
-			       &ip6h->daddr, ntohs(th->dest));
-		}
+		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
+				     genhash ? "failed" : "mismatch",
+				     &ip6h->saddr, ntohs(th->source),
+				     &ip6h->daddr, ntohs(th->dest));
 		return 1;
 	}
 	return 0;
@@ -1057,7 +1055,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	__u32 isn = TCP_SKB_CB(skb)->when;
 	struct dst_entry *dst = NULL;
-	int want_cookie = 0;
+	bool want_cookie = false;
 
 	if (skb->protocol == htons(ETH_P_IP))
 		return tcp_v4_conn_request(sk, skb);
@@ -1118,7 +1116,7 @@
 		while (l-- > 0)
 			*c++ ^= *hash_location++;
 
-		want_cookie = 0;	/* not our kind of cookie */
+		want_cookie = false;	/* not our kind of cookie */
 		tmp_ext.cookie_out_never = 0; /* false */
 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
 	} else if (!tp->rx_opt.cookie_in_always) {
@@ -1140,7 +1138,7 @@
 	treq->rmt_addr = ipv6_hdr(skb)->saddr;
 	treq->loc_addr = ipv6_hdr(skb)->daddr;
 	if (!want_cookie || tmp_opt.tstamp_ok)
-		TCP_ECN_create_request(req, tcp_hdr(skb));
+		TCP_ECN_create_request(req, skb);
 
 	treq->iif = sk->sk_bound_dev_if;
 
@@ -1353,7 +1351,7 @@
 	newnp->pktoptions = NULL;
 	if (treq->pktopts != NULL) {
 		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
-		kfree_skb(treq->pktopts);
+		consume_skb(treq->pktopts);
 		treq->pktopts = NULL;
 		if (newnp->pktoptions)
 			skb_set_owner_r(newnp->pktoptions, newsk);
@@ -1658,7 +1656,8 @@
 			if (!tcp_prequeue(sk, skb))
 				ret = tcp_v6_do_rcv(sk, skb);
 		}
-	} else if (unlikely(sk_add_backlog(sk, skb))) {
+	} else if (unlikely(sk_add_backlog(sk, skb,
+					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
 		bh_unlock_sock(sk);
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
 		goto discard_and_relse;
@@ -1777,6 +1776,7 @@
 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
 	.get_peer	   = tcp_v6_get_peer,
 	.net_header_len	   = sizeof(struct ipv6hdr),
+	.net_frag_header_len = sizeof(struct frag_hdr),
 	.setsockopt	   = ipv6_setsockopt,
 	.getsockopt	   = ipv6_getsockopt,
 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
@@ -1833,64 +1833,15 @@
 static int tcp_v6_init_sock(struct sock *sk)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct tcp_sock *tp = tcp_sk(sk);
 
-	skb_queue_head_init(&tp->out_of_order_queue);
-	tcp_init_xmit_timers(sk);
-	tcp_prequeue_init(tp);
-
-	icsk->icsk_rto = TCP_TIMEOUT_INIT;
-	tp->mdev = TCP_TIMEOUT_INIT;
-
-	/* So many TCP implementations out there (incorrectly) count the
-	 * initial SYN frame in their delayed-ACK and congestion control
-	 * algorithms that we must have the following bandaid to talk
-	 * efficiently to them.  -DaveM
-	 */
-	tp->snd_cwnd = 2;
-
-	/* See draft-stevens-tcpca-spec-01 for discussion of the
-	 * initialization of these values.
-	 */
-	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-	tp->snd_cwnd_clamp = ~0;
-	tp->mss_cache = TCP_MSS_DEFAULT;
-
-	tp->reordering = sysctl_tcp_reordering;
-
-	sk->sk_state = TCP_CLOSE;
+	tcp_init_sock(sk);
 
 	icsk->icsk_af_ops = &ipv6_specific;
-	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
-	icsk->icsk_sync_mss = tcp_sync_mss;
-	sk->sk_write_space = sk_stream_write_space;
-	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 
 #ifdef CONFIG_TCP_MD5SIG
-	tp->af_specific = &tcp_sock_ipv6_specific;
+	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
 #endif
 
-	/* TCP Cookie Transactions */
-	if (sysctl_tcp_cookie_size > 0) {
-		/* Default, cookies without s_data_payload. */
-		tp->cookie_values =
-			kzalloc(sizeof(*tp->cookie_values),
-				sk->sk_allocation);
-		if (tp->cookie_values != NULL)
-			kref_init(&tp->cookie_values->kref);
-	}
-	/* Presumed zeroed, in order of appearance:
-	 *	cookie_in_always, cookie_out_never,
-	 *	s_data_constant, s_data_in, s_data_out
-	 */
-	sk->sk_sndbuf = sysctl_tcp_wmem[1];
-	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
-
-	local_bh_disable();
-	sock_update_memcg(sk);
-	sk_sockets_allocated_inc(sk);
-	local_bh_enable();
-
 	return 0;
 }
 
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 4f3cec1..4b0f50d 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -19,6 +19,8 @@
  * 		YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
  */
 
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <linux/icmpv6.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -160,11 +162,11 @@
 static int __init tunnel6_init(void)
 {
 	if (inet6_add_protocol(&tunnel6_protocol, IPPROTO_IPV6)) {
-		printk(KERN_ERR "tunnel6 init(): can't add protocol\n");
+		pr_err("%s: can't add protocol\n", __func__);
 		return -EAGAIN;
 	}
 	if (inet6_add_protocol(&tunnel46_protocol, IPPROTO_IPIP)) {
-		printk(KERN_ERR "tunnel6 init(): can't add protocol\n");
+		pr_err("%s: can't add protocol\n", __func__);
 		inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6);
 		return -EAGAIN;
 	}
@@ -174,9 +176,9 @@
 static void __exit tunnel6_fini(void)
 {
 	if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP))
-		printk(KERN_ERR "tunnel6 close: can't remove protocol\n");
+		pr_err("%s: can't remove protocol\n", __func__);
 	if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6))
-		printk(KERN_ERR "tunnel6 close: can't remove protocol\n");
+		pr_err("%s: can't remove protocol\n", __func__);
 }
 
 module_init(tunnel6_init);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 37b0699..f05099f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -103,7 +103,7 @@
 {
 	unsigned int hash2_nulladdr =
 		udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
-	unsigned int hash2_partial = 
+	unsigned int hash2_partial =
 		udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
 
 	/* precompute partial secondary hash */
@@ -349,7 +349,7 @@
 	bool slow;
 
 	if (addr_len)
-		*addr_len=sizeof(struct sockaddr_in6);
+		*addr_len = sizeof(struct sockaddr_in6);
 
 	if (flags & MSG_ERRQUEUE)
 		return ipv6_recv_error(sk, msg, len);
@@ -496,6 +496,28 @@
 	sock_put(sk);
 }
 
+static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+	int rc;
+
+	if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+		sock_rps_save_rxhash(sk, skb);
+
+	rc = sock_queue_rcv_skb(sk, skb);
+	if (rc < 0) {
+		int is_udplite = IS_UDPLITE(sk);
+
+		/* Note that an ENOMEM error is charged twice */
+		if (rc == -ENOMEM)
+			UDP6_INC_STATS_BH(sock_net(sk),
+					UDP_MIB_RCVBUFERRORS, is_udplite);
+		UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+		kfree_skb(skb);
+		return -1;
+	}
+	return 0;
+}
+
 static __inline__ void udpv6_err(struct sk_buff *skb,
 				 struct inet6_skb_parm *opt, u8 type,
 				 u8 code, int offset, __be32 info     )
@@ -503,18 +525,54 @@
 	__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
 }
 
-int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
+static struct static_key udpv6_encap_needed __read_mostly;
+void udpv6_encap_enable(void)
+{
+	if (!static_key_enabled(&udpv6_encap_needed))
+		static_key_slow_inc(&udpv6_encap_needed);
+}
+EXPORT_SYMBOL(udpv6_encap_enable);
+
+int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	struct udp_sock *up = udp_sk(sk);
 	int rc;
 	int is_udplite = IS_UDPLITE(sk);
 
-	if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
-		sock_rps_save_rxhash(sk, skb);
-
 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto drop;
 
+	if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
+		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+
+		/*
+		 * This is an encapsulation socket so pass the skb to
+		 * the socket's udp_encap_rcv() hook. Otherwise, just
+		 * fall through and pass this up the UDP socket.
+		 * up->encap_rcv() returns the following value:
+		 * =0 if skb was successfully passed to the encap
+		 *    handler or was discarded by it.
+		 * >0 if skb should be passed on to UDP.
+		 * <0 if skb should be resubmitted as proto -N
+		 */
+
+		/* if we're overly short, let UDP handle it */
+		encap_rcv = ACCESS_ONCE(up->encap_rcv);
+		if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+			int ret;
+
+			ret = encap_rcv(sk, skb);
+			if (ret <= 0) {
+				UDP_INC_STATS_BH(sock_net(sk),
+						 UDP_MIB_INDATAGRAMS,
+						 is_udplite);
+				return -ret;
+			}
+		}
+
+		/* FALLTHROUGH -- it's a UDP Packet */
+	}
+
 	/*
 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 	 */
@@ -539,21 +597,25 @@
 			goto drop;
 	}
 
-	skb_dst_drop(skb);
-	rc = sock_queue_rcv_skb(sk, skb);
-	if (rc < 0) {
-		/* Note that an ENOMEM error is charged twice */
-		if (rc == -ENOMEM)
-			UDP6_INC_STATS_BH(sock_net(sk),
-					UDP_MIB_RCVBUFERRORS, is_udplite);
-		goto drop_no_sk_drops_inc;
-	}
+	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+		goto drop;
 
-	return 0;
+	skb_dst_drop(skb);
+
+	bh_lock_sock(sk);
+	rc = 0;
+	if (!sock_owned_by_user(sk))
+		rc = __udpv6_queue_rcv_skb(sk, skb);
+	else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+		bh_unlock_sock(sk);
+		goto drop;
+	}
+	bh_unlock_sock(sk);
+
+	return rc;
 drop:
-	atomic_inc(&sk->sk_drops);
-drop_no_sk_drops_inc:
 	UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+	atomic_inc(&sk->sk_drops);
 	kfree_skb(skb);
 	return -1;
 }
@@ -602,37 +664,27 @@
 static void flush_stack(struct sock **stack, unsigned int count,
 			struct sk_buff *skb, unsigned int final)
 {
-	unsigned int i;
+	struct sk_buff *skb1 = NULL;
 	struct sock *sk;
-	struct sk_buff *skb1;
+	unsigned int i;
 
 	for (i = 0; i < count; i++) {
-		skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
-
 		sk = stack[i];
-		if (skb1) {
-			if (sk_rcvqueues_full(sk, skb1)) {
-				kfree_skb(skb1);
-				goto drop;
-			}
-			bh_lock_sock(sk);
-			if (!sock_owned_by_user(sk))
-				udpv6_queue_rcv_skb(sk, skb1);
-			else if (sk_add_backlog(sk, skb1)) {
-				kfree_skb(skb1);
-				bh_unlock_sock(sk);
-				goto drop;
-			}
-			bh_unlock_sock(sk);
-			continue;
+		if (likely(skb1 == NULL))
+			skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+		if (!skb1) {
+			atomic_inc(&sk->sk_drops);
+			UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+					  IS_UDPLITE(sk));
+			UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+					  IS_UDPLITE(sk));
 		}
-drop:
-		atomic_inc(&sk->sk_drops);
-		UDP6_INC_STATS_BH(sock_net(sk),
-				UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
-		UDP6_INC_STATS_BH(sock_net(sk),
-				UDP_MIB_INERRORS, IS_UDPLITE(sk));
+
+		if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
+			skb1 = NULL;
 	}
+	if (unlikely(skb1))
+		kfree_skb(skb1);
 }
 /*
  * Note: called only from the BH handler context,
@@ -772,39 +824,29 @@
 	 * for sock caches... i'll skip this for now.
 	 */
 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+	if (sk != NULL) {
+		int ret = udpv6_queue_rcv_skb(sk, skb);
+		sock_put(sk);
 
-	if (sk == NULL) {
-		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
-			goto discard;
+		/* a return value > 0 means to resubmit the input, but
+		 * it wants the return to be -protocol, or 0
+		 */
+		if (ret > 0)
+			return -ret;
 
-		if (udp_lib_checksum_complete(skb))
-			goto discard;
-		UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
-				proto == IPPROTO_UDPLITE);
-
-		icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
-
-		kfree_skb(skb);
 		return 0;
 	}
 
-	/* deliver */
+	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+		goto discard;
 
-	if (sk_rcvqueues_full(sk, skb)) {
-		sock_put(sk);
+	if (udp_lib_checksum_complete(skb))
 		goto discard;
-	}
-	bh_lock_sock(sk);
-	if (!sock_owned_by_user(sk))
-		udpv6_queue_rcv_skb(sk, skb);
-	else if (sk_add_backlog(sk, skb)) {
-		atomic_inc(&sk->sk_drops);
-		bh_unlock_sock(sk);
-		sock_put(sk);
-		goto discard;
-	}
-	bh_unlock_sock(sk);
-	sock_put(sk);
+
+	UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
+	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+	kfree_skb(skb);
 	return 0;
 
 short_packet:
@@ -1337,7 +1379,7 @@
 	 * do checksum of UDP packets sent as multiple IP fragments.
 	 */
 	offset = skb_checksum_start_offset(skb);
-	csum = skb_checksum(skb, offset, skb->len- offset, 0);
+	csum = skb_checksum(skb, offset, skb->len - offset, 0);
 	offset += skb->csum_offset;
 	*(__sum16 *)(skb->data + offset) = csum_fold(csum);
 	skb->ip_summed = CHECKSUM_NONE;
@@ -1471,7 +1513,7 @@
 	.getsockopt	   = udpv6_getsockopt,
 	.sendmsg	   = udpv6_sendmsg,
 	.recvmsg	   = udpv6_recvmsg,
-	.backlog_rcv	   = udpv6_queue_rcv_skb,
+	.backlog_rcv	   = __udpv6_queue_rcv_skb,
 	.hash		   = udp_lib_hash,
 	.unhash		   = udp_lib_unhash,
 	.rehash		   = udp_v6_rehash,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8ea65e0..8625fba 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -334,8 +334,8 @@
 		goto out_policy;
 
 #ifdef CONFIG_SYSCTL
-	sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
-						xfrm6_policy_table);
+	sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6",
+					 xfrm6_policy_table);
 #endif
 out:
 	return ret;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4fe1db12..ee5a706 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,9 +68,9 @@
 
 static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
 
-static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
+static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
 {
-	unsigned h;
+	unsigned int h;
 
 	h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
 	h ^= h >> 16;
@@ -80,7 +80,7 @@
 	return h;
 }
 
-static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
+static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
 {
 	return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
 }
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 9680226..dfd6faa 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -983,10 +983,6 @@
 		goto out;
 
 	switch (idef->ipx_dlink_type) {
-	case IPX_FRAME_TR_8022:
-		printk(KERN_WARNING "IPX frame type 802.2TR is "
-			"obsolete Use 802.2 instead.\n");
-		/* fall through */
 	case IPX_FRAME_8022:
 		dlink_type 	= htons(ETH_P_802_2);
 		datalink 	= p8022_datalink;
@@ -996,10 +992,7 @@
 			dlink_type 	= htons(ETH_P_IPX);
 			datalink 	= pEII_datalink;
 			break;
-		} else
-			printk(KERN_WARNING "IPX frame type EtherII over "
-					"token-ring is obsolete. Use SNAP "
-					"instead.\n");
+		}
 		/* fall through */
 	case IPX_FRAME_SNAP:
 		dlink_type 	= htons(ETH_P_SNAP);
@@ -1275,7 +1268,6 @@
 	case ETH_P_802_2:	rc = "802.2";	break;
 	case ETH_P_SNAP:	rc = "SNAP";	break;
 	case ETH_P_802_3:	rc = "802.3";	break;
-	case ETH_P_TR_802_2:	rc = "802.2TR";	break;
 	}
 
 	return rc;
@@ -1909,9 +1901,7 @@
 			      (const unsigned short __user *)argp);
 		break;
 	case SIOCGSTAMP:
-		rc = -EINVAL;
-		if (sk)
-			rc = sock_get_timestamp(sk, argp);
+		rc = sock_get_timestamp(sk, argp);
 		break;
 	case SIOCGIFDSTADDR:
 	case SIOCSIFDSTADDR:
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c
index bd6dca0..ad7c03d 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/net/ipx/sysctl_net_ipx.c
@@ -8,6 +8,7 @@
 
 #include <linux/mm.h>
 #include <linux/sysctl.h>
+#include <net/net_namespace.h>
 
 #ifndef CONFIG_SYSCTL
 #error This file should not be compiled without CONFIG_SYSCTL defined
@@ -27,20 +28,14 @@
 	{ },
 };
 
-static struct ctl_path ipx_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipx", },
-	{ }
-};
-
 static struct ctl_table_header *ipx_table_header;
 
 void ipx_register_sysctl(void)
 {
-	ipx_table_header = register_sysctl_paths(ipx_path, ipx_table);
+	ipx_table_header = register_net_sysctl(&init_net, "net/ipx", ipx_table);
 }
 
 void ipx_unregister_sysctl(void)
 {
-	unregister_sysctl_table(ipx_table_header);
+	unregister_net_sysctl_table(ipx_table_header);
 }
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index 77c5e64..d0667d6 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -54,7 +54,7 @@
  */
 static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
 {
-	unsigned cflag, cval;
+	unsigned int cflag, cval;
 	int baud;
 
 	IRDA_DEBUG(2, "%s()\n", __func__ );
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2615ffc..de73f64 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -235,12 +235,6 @@
 	{ }
 };
 
-static struct ctl_path irda_path[] = {
-	{ .procname = "net", },
-	{ .procname = "irda", },
-	{ }
-};
-
 static struct ctl_table_header *irda_table_header;
 
 /*
@@ -251,7 +245,7 @@
  */
 int __init irda_sysctl_register(void)
 {
-	irda_table_header = register_sysctl_paths(irda_path, irda_table);
+	irda_table_header = register_net_sysctl(&init_net, "net/irda", irda_table);
 	if (!irda_table_header)
 		return -ENOMEM;
 
@@ -266,7 +260,7 @@
  */
 void irda_sysctl_unregister(void)
 {
-	unregister_sysctl_table(irda_table_header);
+	unregister_net_sysctl_table(irda_table_header);
 }
 
 
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7e5d927..34e4185 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1714,7 +1714,7 @@
 static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
 	struct net *net = sock_net(sk);
-	unsigned proto;
+	unsigned int proto;
 	struct km_event c;
 	struct xfrm_audit audit_info;
 	int err, err2;
@@ -3547,7 +3547,7 @@
 		goto out;
 
 	err = -EMSGSIZE;
-	if ((unsigned)len > sk->sk_sndbuf - 32)
+	if ((unsigned int)len > sk->sk_sndbuf - 32)
 		goto out;
 
 	err = -ENOBUFS;
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
index 110e7bc..2870f41 100644
--- a/net/l2tp/Makefile
+++ b/net/l2tp/Makefile
@@ -10,3 +10,6 @@
 obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
 obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
 obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
+ifneq ($(CONFIG_IPV6),)
+obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip6.o
+endif
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 89ff8c6..32b2155 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -18,6 +18,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/list.h>
@@ -53,6 +55,10 @@
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 #include <net/protocol.h>
+#include <net/inet6_connection_sock.h>
+#include <net/inet_ecn.h>
+#include <net/ip6_route.h>
+#include <net/ip6_checksum.h>
 
 #include <asm/byteorder.h>
 #include <linux/atomic.h>
@@ -82,12 +88,6 @@
 /* Default trace flags */
 #define L2TP_DEFAULT_DEBUG_FLAGS	0
 
-#define PRINTK(_mask, _type, _lvl, _fmt, args...)			\
-	do {								\
-		if ((_mask) & (_type))					\
-			printk(_lvl "L2TP: " _fmt, ##args);		\
-	} while (0)
-
 /* Private data stored for received packets in the skb.
  */
 struct l2tp_skb_cb {
@@ -137,14 +137,20 @@
 		l2tp_tunnel_free(tunnel);
 }
 #ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) do { \
-		printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
-		l2tp_tunnel_inc_refcount_1(_t);				\
-	} while (0)
-#define l2tp_tunnel_dec_refcount(_t) do { \
-		printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
-		l2tp_tunnel_dec_refcount_1(_t);				\
-	} while (0)
+#define l2tp_tunnel_inc_refcount(_t)					\
+do {									\
+	pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n",	\
+		 __func__, __LINE__, (_t)->name,			\
+		 atomic_read(&_t->ref_count));				\
+	l2tp_tunnel_inc_refcount_1(_t);					\
+} while (0)
+#define l2tp_tunnel_dec_refcount(_t)
+do {									\
+	pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n",	\
+		 __func__, __LINE__, (_t)->name,			\
+		 atomic_read(&_t->ref_count));				\
+	l2tp_tunnel_dec_refcount_1(_t);					\
+} while (0)
 #else
 #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
 #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
@@ -326,16 +332,20 @@
 	struct sk_buff *skbp;
 	struct sk_buff *tmp;
 	u32 ns = L2TP_SKB_CB(skb)->ns;
+	struct l2tp_stats *sstats;
 
 	spin_lock_bh(&session->reorder_q.lock);
+	sstats = &session->stats;
 	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
 		if (L2TP_SKB_CB(skbp)->ns > ns) {
 			__skb_queue_before(&session->reorder_q, skbp, skb);
-			PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-			       "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
-			       session->name, ns, L2TP_SKB_CB(skbp)->ns,
-			       skb_queue_len(&session->reorder_q));
-			session->stats.rx_oos_packets++;
+			l2tp_dbg(session, L2TP_MSG_SEQ,
+				 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
+				 session->name, ns, L2TP_SKB_CB(skbp)->ns,
+				 skb_queue_len(&session->reorder_q));
+			u64_stats_update_begin(&sstats->syncp);
+			sstats->rx_oos_packets++;
+			u64_stats_update_end(&sstats->syncp);
 			goto out;
 		}
 	}
@@ -352,16 +362,23 @@
 {
 	struct l2tp_tunnel *tunnel = session->tunnel;
 	int length = L2TP_SKB_CB(skb)->length;
+	struct l2tp_stats *tstats, *sstats;
 
 	/* We're about to requeue the skb, so return resources
 	 * to its current owner (a socket receive buffer).
 	 */
 	skb_orphan(skb);
 
-	tunnel->stats.rx_packets++;
-	tunnel->stats.rx_bytes += length;
-	session->stats.rx_packets++;
-	session->stats.rx_bytes += length;
+	tstats = &tunnel->stats;
+	u64_stats_update_begin(&tstats->syncp);
+	sstats = &session->stats;
+	u64_stats_update_begin(&sstats->syncp);
+	tstats->rx_packets++;
+	tstats->rx_bytes += length;
+	sstats->rx_packets++;
+	sstats->rx_bytes += length;
+	u64_stats_update_end(&tstats->syncp);
+	u64_stats_update_end(&sstats->syncp);
 
 	if (L2TP_SKB_CB(skb)->has_seq) {
 		/* Bump our Nr */
@@ -371,8 +388,8 @@
 		else
 			session->nr &= 0xffffff;
 
-		PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-		       "%s: updated nr to %hu\n", session->name, session->nr);
+		l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
+			 session->name, session->nr);
 	}
 
 	/* call private receive handler */
@@ -392,6 +409,7 @@
 {
 	struct sk_buff *skb;
 	struct sk_buff *tmp;
+	struct l2tp_stats *sstats;
 
 	/* If the pkt at the head of the queue has the nr that we
 	 * expect to send up next, dequeue it and any other
@@ -399,16 +417,19 @@
 	 */
 start:
 	spin_lock_bh(&session->reorder_q.lock);
+	sstats = &session->stats;
 	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
 		if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
-			session->stats.rx_seq_discards++;
-			session->stats.rx_errors++;
-			PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-			       "%s: oos pkt %u len %d discarded (too old), "
-			       "waiting for %u, reorder_q_len=%d\n",
-			       session->name, L2TP_SKB_CB(skb)->ns,
-			       L2TP_SKB_CB(skb)->length, session->nr,
-			       skb_queue_len(&session->reorder_q));
+			u64_stats_update_begin(&sstats->syncp);
+			sstats->rx_seq_discards++;
+			sstats->rx_errors++;
+			u64_stats_update_end(&sstats->syncp);
+			l2tp_dbg(session, L2TP_MSG_SEQ,
+				 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
+				 session->name, L2TP_SKB_CB(skb)->ns,
+				 L2TP_SKB_CB(skb)->length, session->nr,
+				 skb_queue_len(&session->reorder_q));
+			session->reorder_skip = 1;
 			__skb_unlink(skb, &session->reorder_q);
 			kfree_skb(skb);
 			if (session->deref)
@@ -417,13 +438,20 @@
 		}
 
 		if (L2TP_SKB_CB(skb)->has_seq) {
+			if (session->reorder_skip) {
+				l2tp_dbg(session, L2TP_MSG_SEQ,
+					 "%s: advancing nr to next pkt: %u -> %u",
+					 session->name, session->nr,
+					 L2TP_SKB_CB(skb)->ns);
+				session->reorder_skip = 0;
+				session->nr = L2TP_SKB_CB(skb)->ns;
+			}
 			if (L2TP_SKB_CB(skb)->ns != session->nr) {
-				PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-				       "%s: holding oos pkt %u len %d, "
-				       "waiting for %u, reorder_q_len=%d\n",
-				       session->name, L2TP_SKB_CB(skb)->ns,
-				       L2TP_SKB_CB(skb)->length, session->nr,
-				       skb_queue_len(&session->reorder_q));
+				l2tp_dbg(session, L2TP_MSG_SEQ,
+					 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
+					 session->name, L2TP_SKB_CB(skb)->ns,
+					 L2TP_SKB_CB(skb)->length, session->nr,
+					 skb_queue_len(&session->reorder_q));
 				goto out;
 			}
 		}
@@ -446,21 +474,43 @@
 {
 	struct udphdr *uh = udp_hdr(skb);
 	u16 ulen = ntohs(uh->len);
-	struct inet_sock *inet;
 	__wsum psum;
 
-	if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
+	if (sk->sk_no_check || skb_csum_unnecessary(skb))
 		return 0;
 
-	inet = inet_sk(sk);
-	psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
-				  IPPROTO_UDP, 0);
+#if IS_ENABLED(CONFIG_IPV6)
+	if (sk->sk_family == PF_INET6) {
+		if (!uh->check) {
+			LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
+			return 1;
+		}
+		if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
+		    !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+				     &ipv6_hdr(skb)->daddr, ulen,
+				     IPPROTO_UDP, skb->csum)) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			return 0;
+		}
+		skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+							 &ipv6_hdr(skb)->daddr,
+							 skb->len, IPPROTO_UDP,
+							 0));
+	} else
+#endif
+	{
+		struct inet_sock *inet;
+		if (!uh->check)
+			return 0;
+		inet = inet_sk(sk);
+		psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
+					  ulen, IPPROTO_UDP, 0);
 
-	if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
-	    !csum_fold(csum_add(psum, skb->csum)))
-		return 0;
-
-	skb->csum = psum;
+		if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
+		    !csum_fold(csum_add(psum, skb->csum)))
+			return 0;
+		skb->csum = psum;
+	}
 
 	return __skb_checksum_complete(skb);
 }
@@ -532,6 +582,7 @@
 	struct l2tp_tunnel *tunnel = session->tunnel;
 	int offset;
 	u32 ns, nr;
+	struct l2tp_stats *sstats = &session->stats;
 
 	/* The ref count is increased since we now hold a pointer to
 	 * the session. Take care to decrement the refcnt when exiting
@@ -544,10 +595,13 @@
 	/* Parse and check optional cookie */
 	if (session->peer_cookie_len > 0) {
 		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
-			PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
-			       "%s: cookie mismatch (%u/%u). Discarding.\n",
-			       tunnel->name, tunnel->tunnel_id, session->session_id);
-			session->stats.rx_cookie_discards++;
+			l2tp_info(tunnel, L2TP_MSG_DATA,
+				  "%s: cookie mismatch (%u/%u). Discarding.\n",
+				  tunnel->name, tunnel->tunnel_id,
+				  session->session_id);
+			u64_stats_update_begin(&sstats->syncp);
+			sstats->rx_cookie_discards++;
+			u64_stats_update_end(&sstats->syncp);
 			goto discard;
 		}
 		ptr += session->peer_cookie_len;
@@ -573,9 +627,9 @@
 			L2TP_SKB_CB(skb)->ns = ns;
 			L2TP_SKB_CB(skb)->has_seq = 1;
 
-			PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-			       "%s: recv data ns=%u, nr=%u, session nr=%u\n",
-			       session->name, ns, nr, session->nr);
+			l2tp_dbg(session, L2TP_MSG_SEQ,
+				 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
+				 session->name, ns, nr, session->nr);
 		}
 	} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
 		u32 l2h = ntohl(*(__be32 *) ptr);
@@ -587,9 +641,9 @@
 			L2TP_SKB_CB(skb)->ns = ns;
 			L2TP_SKB_CB(skb)->has_seq = 1;
 
-			PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-			       "%s: recv data ns=%u, session nr=%u\n",
-			       session->name, ns, session->nr);
+			l2tp_dbg(session, L2TP_MSG_SEQ,
+				 "%s: recv data ns=%u, session nr=%u\n",
+				 session->name, ns, session->nr);
 		}
 	}
 
@@ -602,9 +656,9 @@
 		 * configure it so.
 		 */
 		if ((!session->lns_mode) && (!session->send_seq)) {
-			PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
-			       "%s: requested to enable seq numbers by LNS\n",
-			       session->name);
+			l2tp_info(session, L2TP_MSG_SEQ,
+				  "%s: requested to enable seq numbers by LNS\n",
+				  session->name);
 			session->send_seq = -1;
 			l2tp_session_set_header_len(session, tunnel->version);
 		}
@@ -613,10 +667,12 @@
 		 * If user has configured mandatory sequence numbers, discard.
 		 */
 		if (session->recv_seq) {
-			PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
-			       "%s: recv data has no seq numbers when required. "
-			       "Discarding\n", session->name);
-			session->stats.rx_seq_discards++;
+			l2tp_warn(session, L2TP_MSG_SEQ,
+				  "%s: recv data has no seq numbers when required. Discarding.\n",
+				  session->name);
+			u64_stats_update_begin(&sstats->syncp);
+			sstats->rx_seq_discards++;
+			u64_stats_update_end(&sstats->syncp);
 			goto discard;
 		}
 
@@ -626,16 +682,18 @@
 		 * LAC is broken. Discard the frame.
 		 */
 		if ((!session->lns_mode) && (session->send_seq)) {
-			PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
-			       "%s: requested to disable seq numbers by LNS\n",
-			       session->name);
+			l2tp_info(session, L2TP_MSG_SEQ,
+				  "%s: requested to disable seq numbers by LNS\n",
+				  session->name);
 			session->send_seq = 0;
 			l2tp_session_set_header_len(session, tunnel->version);
 		} else if (session->send_seq) {
-			PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
-			       "%s: recv data has no seq numbers when required. "
-			       "Discarding\n", session->name);
-			session->stats.rx_seq_discards++;
+			l2tp_warn(session, L2TP_MSG_SEQ,
+				  "%s: recv data has no seq numbers when required. Discarding.\n",
+				  session->name);
+			u64_stats_update_begin(&sstats->syncp);
+			sstats->rx_seq_discards++;
+			u64_stats_update_end(&sstats->syncp);
 			goto discard;
 		}
 	}
@@ -689,13 +747,14 @@
 			 * packets
 			 */
 			if (L2TP_SKB_CB(skb)->ns != session->nr) {
-				session->stats.rx_seq_discards++;
-				PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-				       "%s: oos pkt %u len %d discarded, "
-				       "waiting for %u, reorder_q_len=%d\n",
-				       session->name, L2TP_SKB_CB(skb)->ns,
-				       L2TP_SKB_CB(skb)->length, session->nr,
-				       skb_queue_len(&session->reorder_q));
+				u64_stats_update_begin(&sstats->syncp);
+				sstats->rx_seq_discards++;
+				u64_stats_update_end(&sstats->syncp);
+				l2tp_dbg(session, L2TP_MSG_SEQ,
+					 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
+					 session->name, L2TP_SKB_CB(skb)->ns,
+					 L2TP_SKB_CB(skb)->length, session->nr,
+					 skb_queue_len(&session->reorder_q));
 				goto discard;
 			}
 			skb_queue_tail(&session->reorder_q, skb);
@@ -716,7 +775,9 @@
 	return;
 
 discard:
-	session->stats.rx_errors++;
+	u64_stats_update_begin(&sstats->syncp);
+	sstats->rx_errors++;
+	u64_stats_update_end(&sstats->syncp);
 	kfree_skb(skb);
 
 	if (session->deref)
@@ -739,9 +800,9 @@
 	unsigned char *ptr, *optr;
 	u16 hdrflags;
 	u32 tunnel_id, session_id;
-	int offset;
 	u16 version;
 	int length;
+	struct l2tp_stats *tstats;
 
 	if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
 		goto discard_bad_csum;
@@ -751,8 +812,9 @@
 
 	/* Short packet? */
 	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
-		PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
-		       "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
+		l2tp_info(tunnel, L2TP_MSG_DATA,
+			  "%s: recv short packet (len=%d)\n",
+			  tunnel->name, skb->len);
 		goto error;
 	}
 
@@ -762,14 +824,8 @@
 		if (!pskb_may_pull(skb, length))
 			goto error;
 
-		printk(KERN_DEBUG "%s: recv: ", tunnel->name);
-
-		offset = 0;
-		do {
-			printk(" %02X", skb->data[offset]);
-		} while (++offset < length);
-
-		printk("\n");
+		pr_debug("%s: recv\n", tunnel->name);
+		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
 	}
 
 	/* Point to L2TP header */
@@ -781,9 +837,9 @@
 	/* Check protocol version */
 	version = hdrflags & L2TP_HDR_VER_MASK;
 	if (version != tunnel->version) {
-		PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
-		       "%s: recv protocol version mismatch: got %d expected %d\n",
-		       tunnel->name, version, tunnel->version);
+		l2tp_info(tunnel, L2TP_MSG_DATA,
+			  "%s: recv protocol version mismatch: got %d expected %d\n",
+			  tunnel->name, version, tunnel->version);
 		goto error;
 	}
 
@@ -792,8 +848,9 @@
 
 	/* If type is control packet, it is handled by userspace. */
 	if (hdrflags & L2TP_HDRFLAG_T) {
-		PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
-		       "%s: recv control packet, len=%d\n", tunnel->name, length);
+		l2tp_dbg(tunnel, L2TP_MSG_DATA,
+			 "%s: recv control packet, len=%d\n",
+			 tunnel->name, length);
 		goto error;
 	}
 
@@ -821,9 +878,9 @@
 	session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
 	if (!session || !session->recv_skb) {
 		/* Not found? Pass to userspace to deal with */
-		PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
-		       "%s: no session found (%u/%u). Passing up.\n",
-		       tunnel->name, tunnel_id, session_id);
+		l2tp_info(tunnel, L2TP_MSG_DATA,
+			  "%s: no session found (%u/%u). Passing up.\n",
+			  tunnel->name, tunnel_id, session_id);
 		goto error;
 	}
 
@@ -834,7 +891,10 @@
 discard_bad_csum:
 	LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
 	UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
-	tunnel->stats.rx_errors++;
+	tstats = &tunnel->stats;
+	u64_stats_update_begin(&tstats->syncp);
+	tstats->rx_errors++;
+	u64_stats_update_end(&tstats->syncp);
 	kfree_skb(skb);
 
 	return 0;
@@ -860,8 +920,8 @@
 	if (tunnel == NULL)
 		goto pass_up;
 
-	PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
-	       "%s: received %d bytes\n", tunnel->name, skb->len);
+	l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
+		 tunnel->name, skb->len);
 
 	if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
 		goto pass_up_put;
@@ -903,8 +963,8 @@
 		*bufp++ = 0;
 		session->ns++;
 		session->ns &= 0xffff;
-		PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-		       "%s: updated ns to %u\n", session->name, session->ns);
+		l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
+			 session->name, session->ns);
 	}
 
 	return bufp - optr;
@@ -940,8 +1000,9 @@
 				l2h = 0x40000000 | session->ns;
 				session->ns++;
 				session->ns &= 0xffffff;
-				PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
-				       "%s: updated ns to %u\n", session->name, session->ns);
+				l2tp_dbg(session, L2TP_MSG_SEQ,
+					 "%s: updated ns to %u\n",
+					 session->name, session->ns);
 			}
 
 			*((__be32 *) bufp) = htonl(l2h);
@@ -960,46 +1021,50 @@
 	struct l2tp_tunnel *tunnel = session->tunnel;
 	unsigned int len = skb->len;
 	int error;
+	struct l2tp_stats *tstats, *sstats;
 
 	/* Debug */
 	if (session->send_seq)
-		PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
-		       "%s: send %Zd bytes, ns=%u\n", session->name,
-		       data_len, session->ns - 1);
+		l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
+			 session->name, data_len, session->ns - 1);
 	else
-		PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
-		       "%s: send %Zd bytes\n", session->name, data_len);
+		l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
+			 session->name, data_len);
 
 	if (session->debug & L2TP_MSG_DATA) {
-		int i;
 		int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
 		unsigned char *datap = skb->data + uhlen;
 
-		printk(KERN_DEBUG "%s: xmit:", session->name);
-		for (i = 0; i < (len - uhlen); i++) {
-			printk(" %02X", *datap++);
-			if (i == 31) {
-				printk(" ...");
-				break;
-			}
-		}
-		printk("\n");
+		pr_debug("%s: xmit\n", session->name);
+		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+				     datap, min_t(size_t, 32, len - uhlen));
 	}
 
 	/* Queue the packet to IP for output */
 	skb->local_df = 1;
-	error = ip_queue_xmit(skb, fl);
+#if IS_ENABLED(CONFIG_IPV6)
+	if (skb->sk->sk_family == PF_INET6)
+		error = inet6_csk_xmit(skb, NULL);
+	else
+#endif
+		error = ip_queue_xmit(skb, fl);
 
 	/* Update stats */
+	tstats = &tunnel->stats;
+	u64_stats_update_begin(&tstats->syncp);
+	sstats = &session->stats;
+	u64_stats_update_begin(&sstats->syncp);
 	if (error >= 0) {
-		tunnel->stats.tx_packets++;
-		tunnel->stats.tx_bytes += len;
-		session->stats.tx_packets++;
-		session->stats.tx_bytes += len;
+		tstats->tx_packets++;
+		tstats->tx_bytes += len;
+		sstats->tx_packets++;
+		sstats->tx_bytes += len;
 	} else {
-		tunnel->stats.tx_errors++;
-		session->stats.tx_errors++;
+		tstats->tx_errors++;
+		sstats->tx_errors++;
 	}
+	u64_stats_update_end(&tstats->syncp);
+	u64_stats_update_end(&sstats->syncp);
 
 	return 0;
 }
@@ -1021,6 +1086,31 @@
 	skb->destructor = l2tp_sock_wfree;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
+				int udp_len)
+{
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct udphdr *uh = udp_hdr(skb);
+
+	if (!skb_dst(skb) || !skb_dst(skb)->dev ||
+	    !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
+		__wsum csum = skb_checksum(skb, 0, udp_len, 0);
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
+					    IPPROTO_UDP, csum);
+		if (uh->check == 0)
+			uh->check = CSUM_MANGLED_0;
+	} else {
+		skb->ip_summed = CHECKSUM_PARTIAL;
+		skb->csum_start = skb_transport_header(skb) - skb->head;
+		skb->csum_offset = offsetof(struct udphdr, check);
+		uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
+					     udp_len, IPPROTO_UDP, 0);
+	}
+}
+#endif
+
 /* If caller requires the skb to have a ppp header, the header must be
  * inserted in the skb data before calling this function.
  */
@@ -1089,6 +1179,11 @@
 		uh->check = 0;
 
 		/* Calculate UDP checksum if configured to do so */
+#if IS_ENABLED(CONFIG_IPV6)
+		if (sk->sk_family == PF_INET6)
+			l2tp_xmit_ipv6_csum(sk, skb, udp_len);
+		else
+#endif
 		if (sk->sk_no_check == UDP_CSUM_NOXMIT)
 			skb->ip_summed = CHECKSUM_NONE;
 		else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
@@ -1141,8 +1236,7 @@
 	if (tunnel == NULL)
 		goto end;
 
-	PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
-	       "%s: closing...\n", tunnel->name);
+	l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
 
 	/* Close all sessions */
 	l2tp_tunnel_closeall(tunnel);
@@ -1184,8 +1278,8 @@
 
 	BUG_ON(tunnel == NULL);
 
-	PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
-	       "%s: closing all sessions...\n", tunnel->name);
+	l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
+		  tunnel->name);
 
 	write_lock_bh(&tunnel->hlist_lock);
 	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
@@ -1193,8 +1287,8 @@
 		hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
 			session = hlist_entry(walk, struct l2tp_session, hlist);
 
-			PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
-			       "%s: closing session\n", session->name);
+			l2tp_info(session, L2TP_MSG_CONTROL,
+				  "%s: closing session\n", session->name);
 
 			hlist_del_init(&session->hlist);
 
@@ -1247,8 +1341,7 @@
 	BUG_ON(atomic_read(&tunnel->ref_count) != 0);
 	BUG_ON(tunnel->sock != NULL);
 
-	PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
-	       "%s: free...\n", tunnel->name);
+	l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
 
 	/* Remove from tunnel list */
 	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1268,31 +1361,69 @@
 {
 	int err = -EINVAL;
 	struct sockaddr_in udp_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct sockaddr_in6 udp6_addr;
+	struct sockaddr_l2tpip6 ip6_addr;
+#endif
 	struct sockaddr_l2tpip ip_addr;
 	struct socket *sock = NULL;
 
 	switch (cfg->encap) {
 	case L2TP_ENCAPTYPE_UDP:
-		err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
-		if (err < 0)
-			goto out;
+#if IS_ENABLED(CONFIG_IPV6)
+		if (cfg->local_ip6 && cfg->peer_ip6) {
+			err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp);
+			if (err < 0)
+				goto out;
 
-		sock = *sockp;
+			sock = *sockp;
 
-		memset(&udp_addr, 0, sizeof(udp_addr));
-		udp_addr.sin_family = AF_INET;
-		udp_addr.sin_addr = cfg->local_ip;
-		udp_addr.sin_port = htons(cfg->local_udp_port);
-		err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr));
-		if (err < 0)
-			goto out;
+			memset(&udp6_addr, 0, sizeof(udp6_addr));
+			udp6_addr.sin6_family = AF_INET6;
+			memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
+			       sizeof(udp6_addr.sin6_addr));
+			udp6_addr.sin6_port = htons(cfg->local_udp_port);
+			err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
+					  sizeof(udp6_addr));
+			if (err < 0)
+				goto out;
 
-		udp_addr.sin_family = AF_INET;
-		udp_addr.sin_addr = cfg->peer_ip;
-		udp_addr.sin_port = htons(cfg->peer_udp_port);
-		err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0);
-		if (err < 0)
-			goto out;
+			udp6_addr.sin6_family = AF_INET6;
+			memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
+			       sizeof(udp6_addr.sin6_addr));
+			udp6_addr.sin6_port = htons(cfg->peer_udp_port);
+			err = kernel_connect(sock,
+					     (struct sockaddr *) &udp6_addr,
+					     sizeof(udp6_addr), 0);
+			if (err < 0)
+				goto out;
+		} else
+#endif
+		{
+			err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
+			if (err < 0)
+				goto out;
+
+			sock = *sockp;
+
+			memset(&udp_addr, 0, sizeof(udp_addr));
+			udp_addr.sin_family = AF_INET;
+			udp_addr.sin_addr = cfg->local_ip;
+			udp_addr.sin_port = htons(cfg->local_udp_port);
+			err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
+					  sizeof(udp_addr));
+			if (err < 0)
+				goto out;
+
+			udp_addr.sin_family = AF_INET;
+			udp_addr.sin_addr = cfg->peer_ip;
+			udp_addr.sin_port = htons(cfg->peer_udp_port);
+			err = kernel_connect(sock,
+					     (struct sockaddr *) &udp_addr,
+					     sizeof(udp_addr), 0);
+			if (err < 0)
+				goto out;
+		}
 
 		if (!cfg->use_udp_checksums)
 			sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
@@ -1300,27 +1431,61 @@
 		break;
 
 	case L2TP_ENCAPTYPE_IP:
-		err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp);
-		if (err < 0)
-			goto out;
+#if IS_ENABLED(CONFIG_IPV6)
+		if (cfg->local_ip6 && cfg->peer_ip6) {
+			err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP,
+					  sockp);
+			if (err < 0)
+				goto out;
 
-		sock = *sockp;
+			sock = *sockp;
 
-		memset(&ip_addr, 0, sizeof(ip_addr));
-		ip_addr.l2tp_family = AF_INET;
-		ip_addr.l2tp_addr = cfg->local_ip;
-		ip_addr.l2tp_conn_id = tunnel_id;
-		err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr));
-		if (err < 0)
-			goto out;
+			memset(&ip6_addr, 0, sizeof(ip6_addr));
+			ip6_addr.l2tp_family = AF_INET6;
+			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
+			       sizeof(ip6_addr.l2tp_addr));
+			ip6_addr.l2tp_conn_id = tunnel_id;
+			err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
+					  sizeof(ip6_addr));
+			if (err < 0)
+				goto out;
 
-		ip_addr.l2tp_family = AF_INET;
-		ip_addr.l2tp_addr = cfg->peer_ip;
-		ip_addr.l2tp_conn_id = peer_tunnel_id;
-		err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0);
-		if (err < 0)
-			goto out;
+			ip6_addr.l2tp_family = AF_INET6;
+			memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
+			       sizeof(ip6_addr.l2tp_addr));
+			ip6_addr.l2tp_conn_id = peer_tunnel_id;
+			err = kernel_connect(sock,
+					     (struct sockaddr *) &ip6_addr,
+					     sizeof(ip6_addr), 0);
+			if (err < 0)
+				goto out;
+		} else
+#endif
+		{
+			err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP,
+					  sockp);
+			if (err < 0)
+				goto out;
 
+			sock = *sockp;
+
+			memset(&ip_addr, 0, sizeof(ip_addr));
+			ip_addr.l2tp_family = AF_INET;
+			ip_addr.l2tp_addr = cfg->local_ip;
+			ip_addr.l2tp_conn_id = tunnel_id;
+			err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
+					  sizeof(ip_addr));
+			if (err < 0)
+				goto out;
+
+			ip_addr.l2tp_family = AF_INET;
+			ip_addr.l2tp_addr = cfg->peer_ip;
+			ip_addr.l2tp_conn_id = peer_tunnel_id;
+			err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
+					     sizeof(ip_addr), 0);
+			if (err < 0)
+				goto out;
+		}
 		break;
 
 	default:
@@ -1357,7 +1522,7 @@
 		err = -EBADF;
 		sock = sockfd_lookup(fd, &err);
 		if (!sock) {
-			printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+			pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
 			       tunnel_id, fd, err);
 			goto err;
 		}
@@ -1373,7 +1538,7 @@
 	case L2TP_ENCAPTYPE_UDP:
 		err = -EPROTONOSUPPORT;
 		if (sk->sk_protocol != IPPROTO_UDP) {
-			printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+			pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
 			       tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
 			goto err;
 		}
@@ -1381,7 +1546,7 @@
 	case L2TP_ENCAPTYPE_IP:
 		err = -EPROTONOSUPPORT;
 		if (sk->sk_protocol != IPPROTO_L2TP) {
-			printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+			pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
 			       tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
 			goto err;
 		}
@@ -1424,6 +1589,12 @@
 		/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
 		udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
 		udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
+#if IS_ENABLED(CONFIG_IPV6)
+		if (sk->sk_family == PF_INET6)
+			udpv6_encap_enable();
+		else
+#endif
+		udp_encap_enable();
 	}
 
 	sk->sk_user_data = tunnel;
@@ -1577,7 +1748,7 @@
 
 		session->session_id = session_id;
 		session->peer_session_id = peer_session_id;
-		session->nr = 1;
+		session->nr = 0;
 
 		sprintf(&session->name[0], "sess %u/%u",
 			tunnel->tunnel_id, session->session_id);
@@ -1683,7 +1854,7 @@
 	if (rc)
 		goto out;
 
-	printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
+	pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
 
 out:
 	return rc;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a16a48e..a38ec6c 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -45,6 +45,7 @@
 	u64			rx_oos_packets;
 	u64			rx_errors;
 	u64			rx_cookie_discards;
+	struct u64_stats_sync	syncp;
 };
 
 struct l2tp_tunnel;
@@ -54,15 +55,15 @@
  */
 struct l2tp_session_cfg {
 	enum l2tp_pwtype	pw_type;
-	unsigned		data_seq:2;	/* data sequencing level
+	unsigned int		data_seq:2;	/* data sequencing level
 						 * 0 => none, 1 => IP only,
 						 * 2 => all
 						 */
-	unsigned		recv_seq:1;	/* expect receive packets with
+	unsigned int		recv_seq:1;	/* expect receive packets with
 						 * sequence numbers? */
-	unsigned		send_seq:1;	/* send packets with sequence
+	unsigned int		send_seq:1;	/* send packets with sequence
 						 * numbers? */
-	unsigned		lns_mode:1;	/* behave as LNS? LAC enables
+	unsigned int		lns_mode:1;	/* behave as LNS? LAC enables
 						 * sequence numbers under
 						 * control of LNS. */
 	int			debug;		/* bitmask of debug message
@@ -107,21 +108,22 @@
 
 	char			name[32];	/* for logging */
 	char			ifname[IFNAMSIZ];
-	unsigned		data_seq:2;	/* data sequencing level
+	unsigned int		data_seq:2;	/* data sequencing level
 						 * 0 => none, 1 => IP only,
 						 * 2 => all
 						 */
-	unsigned		recv_seq:1;	/* expect receive packets with
+	unsigned int		recv_seq:1;	/* expect receive packets with
 						 * sequence numbers? */
-	unsigned		send_seq:1;	/* send packets with sequence
+	unsigned int		send_seq:1;	/* send packets with sequence
 						 * numbers? */
-	unsigned		lns_mode:1;	/* behave as LNS? LAC enables
+	unsigned int		lns_mode:1;	/* behave as LNS? LAC enables
 						 * sequence numbers under
 						 * control of LNS. */
 	int			debug;		/* bitmask of debug message
 						 * categories */
 	int			reorder_timeout; /* configured reorder timeout
 						  * (in jiffies) */
+	int			reorder_skip;	/* set if skip to next nr */
 	int			mtu;
 	int			mru;
 	enum l2tp_pwtype	pwtype;
@@ -150,6 +152,10 @@
 	/* Used only for kernel-created sockets */
 	struct in_addr		local_ip;
 	struct in_addr		peer_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct in6_addr		*local_ip6;
+	struct in6_addr		*peer_ip6;
+#endif
 	u16			local_udp_port;
 	u16			peer_udp_port;
 	unsigned int		use_udp_checksums:1;
@@ -255,17 +261,36 @@
 }
 
 #ifdef L2TP_REFCNT_DEBUG
-#define l2tp_session_inc_refcount(_s) do { \
-		printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
-		l2tp_session_inc_refcount_1(_s);				\
-	} while (0)
-#define l2tp_session_dec_refcount(_s) do { \
-		printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
-		l2tp_session_dec_refcount_1(_s);				\
-	} while (0)
+#define l2tp_session_inc_refcount(_s)					\
+do {									\
+	pr_debug("l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n",	\
+		 __func__, __LINE__, (_s)->name,			\
+		 atomic_read(&_s->ref_count));				\
+	l2tp_session_inc_refcount_1(_s);				\
+} while (0)
+#define l2tp_session_dec_refcount(_s)					\
+do {									\
+	pr_debug("l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n",	\
+		 __func__, __LINE__, (_s)->name,			\
+		 atomic_read(&_s->ref_count));				\
+	l2tp_session_dec_refcount_1(_s);				\
+} while (0)
 #else
 #define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s)
 #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
 #endif
 
+#define l2tp_printk(ptr, type, func, fmt, ...)				\
+do {									\
+	if (((ptr)->debug) & (type))					\
+		func(fmt, ##__VA_ARGS__);				\
+} while (0)
+
+#define l2tp_warn(ptr, type, fmt, ...)					\
+	l2tp_printk(ptr, type, pr_warn, fmt, ##__VA_ARGS__)
+#define l2tp_info(ptr, type, fmt, ...)					\
+	l2tp_printk(ptr, type, pr_info, fmt, ##__VA_ARGS__)
+#define l2tp_dbg(ptr, type, fmt, ...)					\
+	l2tp_printk(ptr, type, pr_debug, fmt, ##__VA_ARGS__)
+
 #endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 7613013..c3813bc 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -9,6 +9,8 @@
  *	2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/socket.h>
@@ -122,6 +124,14 @@
 	seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
 	if (tunnel->sock) {
 		struct inet_sock *inet = inet_sk(tunnel->sock);
+
+#if IS_ENABLED(CONFIG_IPV6)
+		if (tunnel->sock->sk_family == AF_INET6) {
+			struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+			seq_printf(m, " from %pI6c to %pI6c\n",
+				&np->saddr, &np->daddr);
+		} else
+#endif
 		seq_printf(m, " from %pI4 to %pI4\n",
 			   &inet->inet_saddr, &inet->inet_daddr);
 		if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
@@ -317,11 +327,11 @@
 	if (tunnels == NULL)
 		rc = -EIO;
 
-	printk(KERN_INFO "L2TP debugfs support\n");
+	pr_info("L2TP debugfs support\n");
 
 out:
 	if (rc)
-		printk(KERN_WARNING "l2tp debugfs: unable to init\n");
+		pr_warn("unable to init\n");
 
 	return rc;
 }
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 63fe5f3..443591d 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -9,6 +9,8 @@
  *	2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/socket.h>
@@ -115,21 +117,14 @@
 
 	if (session->debug & L2TP_MSG_DATA) {
 		unsigned int length;
-		int offset;
 		u8 *ptr = skb->data;
 
 		length = min(32u, skb->len);
 		if (!pskb_may_pull(skb, length))
 			goto error;
 
-		printk(KERN_DEBUG "%s: eth recv: ", session->name);
-
-		offset = 0;
-		do {
-			printk(" %02X", ptr[offset]);
-		} while (++offset < length);
-
-		printk("\n");
+		pr_debug("%s: eth recv\n", session->name);
+		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
 
 	if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
@@ -308,7 +303,7 @@
 	if (err)
 		goto out_unreg;
 
-	printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
+	pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
 
 	return 0;
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 585d93e..889f5d1 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -9,6 +9,8 @@
  *	2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/icmp.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
@@ -32,15 +34,8 @@
 	/* inet_sock has to be the first member of l2tp_ip_sock */
 	struct inet_sock	inet;
 
-	__u32			conn_id;
-	__u32			peer_conn_id;
-
-	__u64			tx_packets;
-	__u64			tx_bytes;
-	__u64			tx_errors;
-	__u64			rx_packets;
-	__u64			rx_bytes;
-	__u64			rx_errors;
+	u32			conn_id;
+	u32			peer_conn_id;
 };
 
 static DEFINE_RWLOCK(l2tp_ip_lock);
@@ -127,7 +122,6 @@
 	struct l2tp_session *session;
 	struct l2tp_tunnel *tunnel = NULL;
 	int length;
-	int offset;
 
 	/* Point to L2TP header */
 	optr = ptr = skb->data;
@@ -162,14 +156,8 @@
 		if (!pskb_may_pull(skb, length))
 			goto discard;
 
-		printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
-
-		offset = 0;
-		do {
-			printk(" %02X", ptr[offset]);
-		} while (++offset < length);
-
-		printk("\n");
+		pr_debug("%s: ip recv\n", tunnel->name);
+		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
 	}
 
 	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
@@ -298,68 +286,27 @@
 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
 	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
-	struct inet_sock *inet = inet_sk(sk);
-	struct flowi4 *fl4;
-	struct rtable *rt;
-	__be32 saddr;
-	int oif, rc;
+	int rc;
 
-	rc = -EINVAL;
 	if (addr_len < sizeof(*lsa))
-		goto out;
+		return -EINVAL;
 
-	rc = -EAFNOSUPPORT;
-	if (lsa->l2tp_family != AF_INET)
-		goto out;
+	if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
+		return -EINVAL;
+
+	rc = ip4_datagram_connect(sk, uaddr, addr_len);
+	if (rc < 0)
+		return rc;
 
 	lock_sock(sk);
 
-	sk_dst_reset(sk);
-
-	oif = sk->sk_bound_dev_if;
-	saddr = inet->inet_saddr;
-
-	rc = -EINVAL;
-	if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
-		goto out;
-
-	fl4 = &inet->cork.fl.u.ip4;
-	rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr,
-			      RT_CONN_FLAGS(sk), oif,
-			      IPPROTO_L2TP,
-			      0, 0, sk, true);
-	if (IS_ERR(rt)) {
-		rc = PTR_ERR(rt);
-		if (rc == -ENETUNREACH)
-			IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
-		goto out;
-	}
-
-	rc = -ENETUNREACH;
-	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
-		ip_rt_put(rt);
-		goto out;
-	}
-
 	l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
 
-	if (!inet->inet_saddr)
-		inet->inet_saddr = fl4->saddr;
-	if (!inet->inet_rcv_saddr)
-		inet->inet_rcv_saddr = fl4->saddr;
-	inet->inet_daddr = fl4->daddr;
-	sk->sk_state = TCP_ESTABLISHED;
-	inet->inet_id = jiffies;
-
-	sk_dst_set(sk, &rt->dst);
-
 	write_lock_bh(&l2tp_ip_lock);
 	hlist_del_init(&sk->sk_bind_node);
 	sk_add_bind_node(sk, &l2tp_ip_bind_table);
 	write_unlock_bh(&l2tp_ip_lock);
 
-	rc = 0;
-out:
 	release_sock(sk);
 	return rc;
 }
@@ -414,7 +361,6 @@
 {
 	struct sk_buff *skb;
 	int rc;
-	struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
 	struct inet_sock *inet = inet_sk(sk);
 	struct rtable *rt = NULL;
 	struct flowi4 *fl4;
@@ -442,8 +388,9 @@
 
 		daddr = lip->l2tp_addr.s_addr;
 	} else {
+		rc = -EDESTADDRREQ;
 		if (sk->sk_state != TCP_ESTABLISHED)
-			return -EDESTADDRREQ;
+			goto out;
 
 		daddr = inet->inet_daddr;
 		connected = 1;
@@ -513,14 +460,8 @@
 	rcu_read_unlock();
 
 error:
-	/* Update stats */
-	if (rc >= 0) {
-		lsa->tx_packets++;
-		lsa->tx_bytes += len;
+	if (rc >= 0)
 		rc = len;
-	} else {
-		lsa->tx_errors++;
-	}
 
 out:
 	release_sock(sk);
@@ -538,7 +479,6 @@
 			   size_t len, int noblock, int flags, int *addr_len)
 {
 	struct inet_sock *inet = inet_sk(sk);
-	struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
 	size_t copied = 0;
 	int err = -EOPNOTSUPP;
 	struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
@@ -580,15 +520,7 @@
 done:
 	skb_free_datagram(sk, skb);
 out:
-	if (err) {
-		lsk->rx_errors++;
-		return err;
-	}
-
-	lsk->rx_packets++;
-	lsk->rx_bytes += copied;
-
-	return copied;
+	return err ? err : copied;
 }
 
 static struct proto l2tp_ip_prot = {
@@ -656,7 +588,7 @@
 {
 	int err;
 
-	printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
+	pr_info("L2TP IP encapsulation support (L2TPv3)\n");
 
 	err = proto_register(&l2tp_ip_prot, 1);
 	if (err != 0)
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
new file mode 100644
index 0000000..0291d8d
--- /dev/null
+++ b/net/l2tp/l2tp_ip6.c
@@ -0,0 +1,787 @@
+/*
+ * L2TPv3 IP encapsulation support for IPv6
+ *
+ * Copyright (c) 2012 Katalix Systems Ltd
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/icmp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/socket.h>
+#include <linux/l2tp.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/inet_common.h>
+#include <net/inet_hashtables.h>
+#include <net/tcp_states.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+#include <net/transp_v6.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+
+#include "l2tp_core.h"
+
+struct l2tp_ip6_sock {
+	/* inet_sock has to be the first member of l2tp_ip6_sock */
+	struct inet_sock	inet;
+
+	u32			conn_id;
+	u32			peer_conn_id;
+
+	/* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
+	   inet6_sk_generic */
+	struct ipv6_pinfo	inet6;
+};
+
+static DEFINE_RWLOCK(l2tp_ip6_lock);
+static struct hlist_head l2tp_ip6_table;
+static struct hlist_head l2tp_ip6_bind_table;
+
+static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
+{
+	return (struct l2tp_ip6_sock *)sk;
+}
+
+static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
+					   struct in6_addr *laddr,
+					   int dif, u32 tunnel_id)
+{
+	struct hlist_node *node;
+	struct sock *sk;
+
+	sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) {
+		struct in6_addr *addr = inet6_rcv_saddr(sk);
+		struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
+
+		if (l2tp == NULL)
+			continue;
+
+		if ((l2tp->conn_id == tunnel_id) &&
+		    net_eq(sock_net(sk), net) &&
+		    !(addr && ipv6_addr_equal(addr, laddr)) &&
+		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+			goto found;
+	}
+
+	sk = NULL;
+found:
+	return sk;
+}
+
+static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
+						struct in6_addr *laddr,
+						int dif, u32 tunnel_id)
+{
+	struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
+	if (sk)
+		sock_hold(sk);
+
+	return sk;
+}
+
+/* When processing receive frames, there are two cases to
+ * consider. Data frames consist of a non-zero session-id and an
+ * optional cookie. Control frames consist of a regular L2TP header
+ * preceded by 32-bits of zeros.
+ *
+ * L2TPv3 Session Header Over IP
+ *
+ *  0                   1                   2                   3
+ *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                           Session ID                          |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |               Cookie (optional, maximum 64 bits)...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *                                                                 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * L2TPv3 Control Message Header Over IP
+ *
+ *  0                   1                   2                   3
+ *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                      (32 bits of zeros)                       |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |T|L|x|x|S|x|x|x|x|x|x|x|  Ver  |             Length            |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                     Control Connection ID                     |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |               Ns              |               Nr              |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * All control frames are passed to userspace.
+ */
+static int l2tp_ip6_recv(struct sk_buff *skb)
+{
+	struct sock *sk;
+	u32 session_id;
+	u32 tunnel_id;
+	unsigned char *ptr, *optr;
+	struct l2tp_session *session;
+	struct l2tp_tunnel *tunnel = NULL;
+	int length;
+
+	/* Point to L2TP header */
+	optr = ptr = skb->data;
+
+	if (!pskb_may_pull(skb, 4))
+		goto discard;
+
+	session_id = ntohl(*((__be32 *) ptr));
+	ptr += 4;
+
+	/* RFC3931: L2TP/IP packets have the first 4 bytes containing
+	 * the session_id. If it is 0, the packet is a L2TP control
+	 * frame and the session_id value can be discarded.
+	 */
+	if (session_id == 0) {
+		__skb_pull(skb, 4);
+		goto pass_up;
+	}
+
+	/* Ok, this is a data packet. Lookup the session. */
+	session = l2tp_session_find(&init_net, NULL, session_id);
+	if (session == NULL)
+		goto discard;
+
+	tunnel = session->tunnel;
+	if (tunnel == NULL)
+		goto discard;
+
+	/* Trace packet contents, if enabled */
+	if (tunnel->debug & L2TP_MSG_DATA) {
+		length = min(32u, skb->len);
+		if (!pskb_may_pull(skb, length))
+			goto discard;
+
+		pr_debug("%s: ip recv\n", tunnel->name);
+		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+	}
+
+	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
+			 tunnel->recv_payload_hook);
+	return 0;
+
+pass_up:
+	/* Get the tunnel_id from the L2TP header */
+	if (!pskb_may_pull(skb, 12))
+		goto discard;
+
+	if ((skb->data[0] & 0xc0) != 0xc0)
+		goto discard;
+
+	tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+	tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+	if (tunnel != NULL)
+		sk = tunnel->sock;
+	else {
+		struct ipv6hdr *iph = ipv6_hdr(skb);
+
+		read_lock_bh(&l2tp_ip6_lock);
+		sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
+					    0, tunnel_id);
+		read_unlock_bh(&l2tp_ip6_lock);
+	}
+
+	if (sk == NULL)
+		goto discard;
+
+	sock_hold(sk);
+
+	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+		goto discard_put;
+
+	nf_reset(skb);
+
+	return sk_receive_skb(sk, skb, 1);
+
+discard_put:
+	sock_put(sk);
+
+discard:
+	kfree_skb(skb);
+	return 0;
+}
+
+static int l2tp_ip6_open(struct sock *sk)
+{
+	/* Prevent autobind. We don't have ports. */
+	inet_sk(sk)->inet_num = IPPROTO_L2TP;
+
+	write_lock_bh(&l2tp_ip6_lock);
+	sk_add_node(sk, &l2tp_ip6_table);
+	write_unlock_bh(&l2tp_ip6_lock);
+
+	return 0;
+}
+
+static void l2tp_ip6_close(struct sock *sk, long timeout)
+{
+	write_lock_bh(&l2tp_ip6_lock);
+	hlist_del_init(&sk->sk_bind_node);
+	sk_del_node_init(sk);
+	write_unlock_bh(&l2tp_ip6_lock);
+
+	sk_common_release(sk);
+}
+
+static void l2tp_ip6_destroy_sock(struct sock *sk)
+{
+	lock_sock(sk);
+	ip6_flush_pending_frames(sk);
+	release_sock(sk);
+
+	inet6_destroy_sock(sk);
+}
+
+static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+	struct inet_sock *inet = inet_sk(sk);
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
+	__be32 v4addr = 0;
+	int addr_type;
+	int err;
+
+	if (addr_len < sizeof(*addr))
+		return -EINVAL;
+
+	addr_type = ipv6_addr_type(&addr->l2tp_addr);
+
+	/* l2tp_ip6 sockets are IPv6 only */
+	if (addr_type == IPV6_ADDR_MAPPED)
+		return -EADDRNOTAVAIL;
+
+	/* L2TP is point-point, not multicast */
+	if (addr_type & IPV6_ADDR_MULTICAST)
+		return -EADDRNOTAVAIL;
+
+	err = -EADDRINUSE;
+	read_lock_bh(&l2tp_ip6_lock);
+	if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
+				   sk->sk_bound_dev_if, addr->l2tp_conn_id))
+		goto out_in_use;
+	read_unlock_bh(&l2tp_ip6_lock);
+
+	lock_sock(sk);
+
+	err = -EINVAL;
+	if (sk->sk_state != TCP_CLOSE)
+		goto out_unlock;
+
+	/* Check if the address belongs to the host. */
+	rcu_read_lock();
+	if (addr_type != IPV6_ADDR_ANY) {
+		struct net_device *dev = NULL;
+
+		if (addr_type & IPV6_ADDR_LINKLOCAL) {
+			if (addr_len >= sizeof(struct sockaddr_in6) &&
+			    addr->l2tp_scope_id) {
+				/* Override any existing binding, if another
+				 * one is supplied by user.
+				 */
+				sk->sk_bound_dev_if = addr->l2tp_scope_id;
+			}
+
+			/* Binding to link-local address requires an
+			   interface */
+			if (!sk->sk_bound_dev_if)
+				goto out_unlock_rcu;
+
+			err = -ENODEV;
+			dev = dev_get_by_index_rcu(sock_net(sk),
+						   sk->sk_bound_dev_if);
+			if (!dev)
+				goto out_unlock_rcu;
+		}
+
+		/* ipv4 addr of the socket is invalid.  Only the
+		 * unspecified and mapped address have a v4 equivalent.
+		 */
+		v4addr = LOOPBACK4_IPV6;
+		err = -EADDRNOTAVAIL;
+		if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
+			goto out_unlock_rcu;
+	}
+	rcu_read_unlock();
+
+	inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
+	np->rcv_saddr = addr->l2tp_addr;
+	np->saddr = addr->l2tp_addr;
+
+	l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
+
+	write_lock_bh(&l2tp_ip6_lock);
+	sk_add_bind_node(sk, &l2tp_ip6_bind_table);
+	sk_del_node_init(sk);
+	write_unlock_bh(&l2tp_ip6_lock);
+
+	release_sock(sk);
+	return 0;
+
+out_unlock_rcu:
+	rcu_read_unlock();
+out_unlock:
+	release_sock(sk);
+	return err;
+
+out_in_use:
+	read_unlock_bh(&l2tp_ip6_lock);
+	return err;
+}
+
+static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
+			    int addr_len)
+{
+	struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr;
+	struct sockaddr_in6	*usin = (struct sockaddr_in6 *) uaddr;
+	struct in6_addr	*daddr;
+	int	addr_type;
+	int rc;
+
+	if (addr_len < sizeof(*lsa))
+		return -EINVAL;
+
+	addr_type = ipv6_addr_type(&usin->sin6_addr);
+	if (addr_type & IPV6_ADDR_MULTICAST)
+		return -EINVAL;
+
+	if (addr_type & IPV6_ADDR_MAPPED) {
+		daddr = &usin->sin6_addr;
+		if (ipv4_is_multicast(daddr->s6_addr32[3]))
+			return -EINVAL;
+	}
+
+	rc = ip6_datagram_connect(sk, uaddr, addr_len);
+
+	lock_sock(sk);
+
+	l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
+
+	write_lock_bh(&l2tp_ip6_lock);
+	hlist_del_init(&sk->sk_bind_node);
+	sk_add_bind_node(sk, &l2tp_ip6_bind_table);
+	write_unlock_bh(&l2tp_ip6_lock);
+
+	release_sock(sk);
+
+	return rc;
+}
+
+static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
+			    int *uaddr_len, int peer)
+{
+	struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
+	struct sock *sk = sock->sk;
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
+
+	lsa->l2tp_family = AF_INET6;
+	lsa->l2tp_flowinfo = 0;
+	lsa->l2tp_scope_id = 0;
+	if (peer) {
+		if (!lsk->peer_conn_id)
+			return -ENOTCONN;
+		lsa->l2tp_conn_id = lsk->peer_conn_id;
+		lsa->l2tp_addr = np->daddr;
+		if (np->sndflow)
+			lsa->l2tp_flowinfo = np->flow_label;
+	} else {
+		if (ipv6_addr_any(&np->rcv_saddr))
+			lsa->l2tp_addr = np->saddr;
+		else
+			lsa->l2tp_addr = np->rcv_saddr;
+
+		lsa->l2tp_conn_id = lsk->conn_id;
+	}
+	if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
+		lsa->l2tp_scope_id = sk->sk_bound_dev_if;
+	*uaddr_len = sizeof(*lsa);
+	return 0;
+}
+
+static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
+{
+	int rc;
+
+	/* Charge it to the socket, dropping if the queue is full. */
+	rc = sock_queue_rcv_skb(sk, skb);
+	if (rc < 0)
+		goto drop;
+
+	return 0;
+
+drop:
+	IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+	kfree_skb(skb);
+	return -1;
+}
+
+static int l2tp_ip6_push_pending_frames(struct sock *sk)
+{
+	struct sk_buff *skb;
+	__be32 *transhdr = NULL;
+	int err = 0;
+
+	skb = skb_peek(&sk->sk_write_queue);
+	if (skb == NULL)
+		goto out;
+
+	transhdr = (__be32 *)skb_transport_header(skb);
+	*transhdr = 0;
+
+	err = ip6_push_pending_frames(sk);
+
+out:
+	return err;
+}
+
+/* Userspace will call sendmsg() on the tunnel socket to send L2TP
+ * control frames.
+ */
+static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
+			    struct msghdr *msg, size_t len)
+{
+	struct ipv6_txoptions opt_space;
+	struct sockaddr_l2tpip6 *lsa =
+		(struct sockaddr_l2tpip6 *) msg->msg_name;
+	struct in6_addr *daddr, *final_p, final;
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct ipv6_txoptions *opt = NULL;
+	struct ip6_flowlabel *flowlabel = NULL;
+	struct dst_entry *dst = NULL;
+	struct flowi6 fl6;
+	int addr_len = msg->msg_namelen;
+	int hlimit = -1;
+	int tclass = -1;
+	int dontfrag = -1;
+	int transhdrlen = 4; /* zero session-id */
+	int ulen = len + transhdrlen;
+	int err;
+
+	/* Rough check on arithmetic overflow,
+	   better check is made in ip6_append_data().
+	 */
+	if (len > INT_MAX)
+		return -EMSGSIZE;
+
+	/* Mirror BSD error message compatibility */
+	if (msg->msg_flags & MSG_OOB)
+		return -EOPNOTSUPP;
+
+	/*
+	 *	Get and verify the address.
+	 */
+	memset(&fl6, 0, sizeof(fl6));
+
+	fl6.flowi6_mark = sk->sk_mark;
+
+	if (lsa) {
+		if (addr_len < SIN6_LEN_RFC2133)
+			return -EINVAL;
+
+		if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
+			return -EAFNOSUPPORT;
+
+		daddr = &lsa->l2tp_addr;
+		if (np->sndflow) {
+			fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
+			if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
+				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
+				if (flowlabel == NULL)
+					return -EINVAL;
+				daddr = &flowlabel->dst;
+			}
+		}
+
+		/*
+		 * Otherwise it will be difficult to maintain
+		 * sk->sk_dst_cache.
+		 */
+		if (sk->sk_state == TCP_ESTABLISHED &&
+		    ipv6_addr_equal(daddr, &np->daddr))
+			daddr = &np->daddr;
+
+		if (addr_len >= sizeof(struct sockaddr_in6) &&
+		    lsa->l2tp_scope_id &&
+		    ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
+			fl6.flowi6_oif = lsa->l2tp_scope_id;
+	} else {
+		if (sk->sk_state != TCP_ESTABLISHED)
+			return -EDESTADDRREQ;
+
+		daddr = &np->daddr;
+		fl6.flowlabel = np->flow_label;
+	}
+
+	if (fl6.flowi6_oif == 0)
+		fl6.flowi6_oif = sk->sk_bound_dev_if;
+
+	if (msg->msg_controllen) {
+		opt = &opt_space;
+		memset(opt, 0, sizeof(struct ipv6_txoptions));
+		opt->tot_len = sizeof(struct ipv6_txoptions);
+
+		err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+					&hlimit, &tclass, &dontfrag);
+		if (err < 0) {
+			fl6_sock_release(flowlabel);
+			return err;
+		}
+		if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
+			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
+			if (flowlabel == NULL)
+				return -EINVAL;
+		}
+		if (!(opt->opt_nflen|opt->opt_flen))
+			opt = NULL;
+	}
+
+	if (opt == NULL)
+		opt = np->opt;
+	if (flowlabel)
+		opt = fl6_merge_options(&opt_space, flowlabel, opt);
+	opt = ipv6_fixup_options(&opt_space, opt);
+
+	fl6.flowi6_proto = sk->sk_protocol;
+	if (!ipv6_addr_any(daddr))
+		fl6.daddr = *daddr;
+	else
+		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
+		fl6.saddr = np->saddr;
+
+	final_p = fl6_update_dst(&fl6, opt, &final);
+
+	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
+		fl6.flowi6_oif = np->mcast_oif;
+	else if (!fl6.flowi6_oif)
+		fl6.flowi6_oif = np->ucast_oif;
+
+	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+
+	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
+	if (IS_ERR(dst)) {
+		err = PTR_ERR(dst);
+		goto out;
+	}
+
+	if (hlimit < 0) {
+		if (ipv6_addr_is_multicast(&fl6.daddr))
+			hlimit = np->mcast_hops;
+		else
+			hlimit = np->hop_limit;
+		if (hlimit < 0)
+			hlimit = ip6_dst_hoplimit(dst);
+	}
+
+	if (tclass < 0)
+		tclass = np->tclass;
+
+	if (dontfrag < 0)
+		dontfrag = np->dontfrag;
+
+	if (msg->msg_flags & MSG_CONFIRM)
+		goto do_confirm;
+
+back_from_confirm:
+	lock_sock(sk);
+	err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
+			      ulen, transhdrlen, hlimit, tclass, opt,
+			      &fl6, (struct rt6_info *)dst,
+			      msg->msg_flags, dontfrag);
+	if (err)
+		ip6_flush_pending_frames(sk);
+	else if (!(msg->msg_flags & MSG_MORE))
+		err = l2tp_ip6_push_pending_frames(sk);
+	release_sock(sk);
+done:
+	dst_release(dst);
+out:
+	fl6_sock_release(flowlabel);
+
+	return err < 0 ? err : len;
+
+do_confirm:
+	dst_confirm(dst);
+	if (!(msg->msg_flags & MSG_PROBE) || len)
+		goto back_from_confirm;
+	err = 0;
+	goto done;
+}
+
+static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
+			    struct msghdr *msg, size_t len, int noblock,
+			    int flags, int *addr_len)
+{
+	struct inet_sock *inet = inet_sk(sk);
+	struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
+	size_t copied = 0;
+	int err = -EOPNOTSUPP;
+	struct sk_buff *skb;
+
+	if (flags & MSG_OOB)
+		goto out;
+
+	if (addr_len)
+		*addr_len = sizeof(*lsa);
+
+	if (flags & MSG_ERRQUEUE)
+		return ipv6_recv_error(sk, msg, len);
+
+	skb = skb_recv_datagram(sk, flags, noblock, &err);
+	if (!skb)
+		goto out;
+
+	copied = skb->len;
+	if (len < copied) {
+		msg->msg_flags |= MSG_TRUNC;
+		copied = len;
+	}
+
+	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+	if (err)
+		goto done;
+
+	sock_recv_timestamp(msg, sk, skb);
+
+	/* Copy the address. */
+	if (lsa) {
+		lsa->l2tp_family = AF_INET6;
+		lsa->l2tp_unused = 0;
+		lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
+		lsa->l2tp_flowinfo = 0;
+		lsa->l2tp_scope_id = 0;
+		if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
+			lsa->l2tp_scope_id = IP6CB(skb)->iif;
+	}
+
+	if (inet->cmsg_flags)
+		ip_cmsg_recv(msg, skb);
+
+	if (flags & MSG_TRUNC)
+		copied = skb->len;
+done:
+	skb_free_datagram(sk, skb);
+out:
+	return err ? err : copied;
+}
+
+static struct proto l2tp_ip6_prot = {
+	.name		   = "L2TP/IPv6",
+	.owner		   = THIS_MODULE,
+	.init		   = l2tp_ip6_open,
+	.close		   = l2tp_ip6_close,
+	.bind		   = l2tp_ip6_bind,
+	.connect	   = l2tp_ip6_connect,
+	.disconnect	   = udp_disconnect,
+	.ioctl		   = udp_ioctl,
+	.destroy	   = l2tp_ip6_destroy_sock,
+	.setsockopt	   = ipv6_setsockopt,
+	.getsockopt	   = ipv6_getsockopt,
+	.sendmsg	   = l2tp_ip6_sendmsg,
+	.recvmsg	   = l2tp_ip6_recvmsg,
+	.backlog_rcv	   = l2tp_ip6_backlog_recv,
+	.hash		   = inet_hash,
+	.unhash		   = inet_unhash,
+	.obj_size	   = sizeof(struct l2tp_ip6_sock),
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt = compat_ipv6_setsockopt,
+	.compat_getsockopt = compat_ipv6_getsockopt,
+#endif
+};
+
+static const struct proto_ops l2tp_ip6_ops = {
+	.family		   = PF_INET6,
+	.owner		   = THIS_MODULE,
+	.release	   = inet6_release,
+	.bind		   = inet6_bind,
+	.connect	   = inet_dgram_connect,
+	.socketpair	   = sock_no_socketpair,
+	.accept		   = sock_no_accept,
+	.getname	   = l2tp_ip6_getname,
+	.poll		   = datagram_poll,
+	.ioctl		   = inet6_ioctl,
+	.listen		   = sock_no_listen,
+	.shutdown	   = inet_shutdown,
+	.setsockopt	   = sock_common_setsockopt,
+	.getsockopt	   = sock_common_getsockopt,
+	.sendmsg	   = inet_sendmsg,
+	.recvmsg	   = sock_common_recvmsg,
+	.mmap		   = sock_no_mmap,
+	.sendpage	   = sock_no_sendpage,
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt = compat_sock_common_setsockopt,
+	.compat_getsockopt = compat_sock_common_getsockopt,
+#endif
+};
+
+static struct inet_protosw l2tp_ip6_protosw = {
+	.type		= SOCK_DGRAM,
+	.protocol	= IPPROTO_L2TP,
+	.prot		= &l2tp_ip6_prot,
+	.ops		= &l2tp_ip6_ops,
+	.no_check	= 0,
+};
+
+static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
+	.handler	= l2tp_ip6_recv,
+};
+
+static int __init l2tp_ip6_init(void)
+{
+	int err;
+
+	pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
+
+	err = proto_register(&l2tp_ip6_prot, 1);
+	if (err != 0)
+		goto out;
+
+	err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
+	if (err)
+		goto out1;
+
+	inet6_register_protosw(&l2tp_ip6_protosw);
+	return 0;
+
+out1:
+	proto_unregister(&l2tp_ip6_prot);
+out:
+	return err;
+}
+
+static void __exit l2tp_ip6_exit(void)
+{
+	inet6_unregister_protosw(&l2tp_ip6_protosw);
+	inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
+	proto_unregister(&l2tp_ip6_prot);
+}
+
+module_init(l2tp_ip6_init);
+module_exit(l2tp_ip6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
+MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
+MODULE_VERSION("1.0");
+
+/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
+ * enums
+ */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 93a41a0..8577264 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -14,6 +14,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <net/sock.h>
 #include <net/genetlink.h>
 #include <net/udp.h>
@@ -133,10 +135,25 @@
 	if (info->attrs[L2TP_ATTR_FD]) {
 		fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
 	} else {
-		if (info->attrs[L2TP_ATTR_IP_SADDR])
-			cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]);
-		if (info->attrs[L2TP_ATTR_IP_DADDR])
-			cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]);
+#if IS_ENABLED(CONFIG_IPV6)
+		if (info->attrs[L2TP_ATTR_IP6_SADDR] &&
+		    info->attrs[L2TP_ATTR_IP6_DADDR]) {
+			cfg.local_ip6 = nla_data(
+				info->attrs[L2TP_ATTR_IP6_SADDR]);
+			cfg.peer_ip6 = nla_data(
+				info->attrs[L2TP_ATTR_IP6_DADDR]);
+		} else
+#endif
+		if (info->attrs[L2TP_ATTR_IP_SADDR] &&
+		    info->attrs[L2TP_ATTR_IP_DADDR]) {
+			cfg.local_ip.s_addr = nla_get_be32(
+				info->attrs[L2TP_ATTR_IP_SADDR]);
+			cfg.peer_ip.s_addr = nla_get_be32(
+				info->attrs[L2TP_ATTR_IP_DADDR]);
+		} else {
+			ret = -EINVAL;
+			goto out;
+		}
 		if (info->attrs[L2TP_ATTR_UDP_SPORT])
 			cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
 		if (info->attrs[L2TP_ATTR_UDP_DPORT])
@@ -225,47 +242,85 @@
 	struct nlattr *nest;
 	struct sock *sk = NULL;
 	struct inet_sock *inet;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct ipv6_pinfo *np = NULL;
+#endif
+	struct l2tp_stats stats;
+	unsigned int start;
 
 	hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
 			  L2TP_CMD_TUNNEL_GET);
 	if (IS_ERR(hdr))
 		return PTR_ERR(hdr);
 
-	NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
-	NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
-	NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
+	if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
+	    nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
+	    nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
+		goto nla_put_failure;
 
 	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
+	do {
+		start = u64_stats_fetch_begin(&tunnel->stats.syncp);
+		stats.tx_packets = tunnel->stats.tx_packets;
+		stats.tx_bytes = tunnel->stats.tx_bytes;
+		stats.tx_errors = tunnel->stats.tx_errors;
+		stats.rx_packets = tunnel->stats.rx_packets;
+		stats.rx_bytes = tunnel->stats.rx_bytes;
+		stats.rx_errors = tunnel->stats.rx_errors;
+		stats.rx_seq_discards = tunnel->stats.rx_seq_discards;
+		stats.rx_oos_packets = tunnel->stats.rx_oos_packets;
+	} while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
+
+	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
+	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+			stats.rx_seq_discards) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
+			stats.rx_oos_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	sk = tunnel->sock;
 	if (!sk)
 		goto out;
 
+#if IS_ENABLED(CONFIG_IPV6)
+	if (sk->sk_family == AF_INET6)
+		np = inet6_sk(sk);
+#endif
+
 	inet = inet_sk(sk);
 
 	switch (tunnel->encap) {
 	case L2TP_ENCAPTYPE_UDP:
-		NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
-		NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
-		NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
+		if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
+		    nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
+		    nla_put_u8(skb, L2TP_ATTR_UDP_CSUM,
+			       (sk->sk_no_check != UDP_CSUM_NOXMIT)))
+			goto nla_put_failure;
 		/* NOBREAK */
 	case L2TP_ENCAPTYPE_IP:
-		NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
-		NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
+#if IS_ENABLED(CONFIG_IPV6)
+		if (np) {
+			if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
+				    &np->saddr) ||
+			    nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
+				    &np->daddr))
+				goto nla_put_failure;
+		} else
+#endif
+		if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
+		    nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
+			goto nla_put_failure;
 		break;
 	}
 
@@ -556,6 +611,8 @@
 	struct nlattr *nest;
 	struct l2tp_tunnel *tunnel = session->tunnel;
 	struct sock *sk = NULL;
+	struct l2tp_stats stats;
+	unsigned int start;
 
 	sk = tunnel->sock;
 
@@ -563,43 +620,64 @@
 	if (IS_ERR(hdr))
 		return PTR_ERR(hdr);
 
-	NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
-	NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
-	NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
-	NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
-	if (session->mru)
-		NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
+	if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
+			session->peer_session_id) ||
+	    nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
+	    nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
+	    nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
+	    (session->mru &&
+	     nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
+		goto nla_put_failure;
 
-	if (session->ifname && session->ifname[0])
-		NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
-	if (session->cookie_len)
-		NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
-	if (session->peer_cookie_len)
-		NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
-	NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
-	NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
-	NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
+	if ((session->ifname && session->ifname[0] &&
+	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
+	    (session->cookie_len &&
+	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
+		     &session->cookie[0])) ||
+	    (session->peer_cookie_len &&
+	     nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
+		     &session->peer_cookie[0])) ||
+	    nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
+	    nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
+	    nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
 #ifdef CONFIG_XFRM
-	if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
-		NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
+	    (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
+	     nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
 #endif
-	if (session->reorder_timeout)
-		NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
+	    (session->reorder_timeout &&
+	     nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
+		goto nla_put_failure;
 
 	nest = nla_nest_start(skb, L2TP_ATTR_STATS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
-	NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
-	NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
+
+	do {
+		start = u64_stats_fetch_begin(&session->stats.syncp);
+		stats.tx_packets = session->stats.tx_packets;
+		stats.tx_bytes = session->stats.tx_bytes;
+		stats.tx_errors = session->stats.tx_errors;
+		stats.rx_packets = session->stats.rx_packets;
+		stats.rx_bytes = session->stats.rx_bytes;
+		stats.rx_errors = session->stats.rx_errors;
+		stats.rx_seq_discards = session->stats.rx_seq_discards;
+		stats.rx_oos_packets = session->stats.rx_oos_packets;
+	} while (u64_stats_fetch_retry(&session->stats.syncp, start));
+
+	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
+	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
+			stats.rx_seq_discards) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
+			stats.rx_oos_packets) ||
+	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	return genlmsg_end(skb, hdr);
@@ -708,6 +786,14 @@
 	[L2TP_ATTR_MTU]			= { .type = NLA_U16, },
 	[L2TP_ATTR_MRU]			= { .type = NLA_U16, },
 	[L2TP_ATTR_STATS]		= { .type = NLA_NESTED, },
+	[L2TP_ATTR_IP6_SADDR] = {
+		.type = NLA_BINARY,
+		.len = sizeof(struct in6_addr),
+	},
+	[L2TP_ATTR_IP6_DADDR] = {
+		.type = NLA_BINARY,
+		.len = sizeof(struct in6_addr),
+	},
 	[L2TP_ATTR_IFNAME] = {
 		.type = NLA_NUL_STRING,
 		.len = IFNAMSIZ - 1,
@@ -818,7 +904,7 @@
 {
 	int err;
 
-	printk(KERN_INFO "L2TP netlink interface\n");
+	pr_info("L2TP netlink interface\n");
 	err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
 					    ARRAY_SIZE(l2tp_nl_ops));
 
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1addd9f..8ef6b94 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -57,6 +57,8 @@
  * http://openl2tp.sourceforge.net.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/list.h>
@@ -106,12 +108,6 @@
 /* Space for UDP, L2TP and PPP headers */
 #define PPPOL2TP_HEADER_OVERHEAD	40
 
-#define PRINTK(_mask, _type, _lvl, _fmt, args...)			\
-	do {								\
-		if ((_mask) & (_type))					\
-			printk(_lvl "PPPOL2TP: " _fmt, ##args);		\
-	} while (0)
-
 /* Number of bytes to build transmit L2TP headers.
  * Unfortunately the size is different depending on whether sequence numbers
  * are enabled.
@@ -236,9 +232,9 @@
 
 	if (sk->sk_state & PPPOX_BOUND) {
 		struct pppox_sock *po;
-		PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
-		       "%s: recv %d byte data frame, passing to ppp\n",
-		       session->name, data_len);
+		l2tp_dbg(session, PPPOL2TP_MSG_DATA,
+			 "%s: recv %d byte data frame, passing to ppp\n",
+			 session->name, data_len);
 
 		/* We need to forget all info related to the L2TP packet
 		 * gathered in the skb as we are going to reuse the same
@@ -259,8 +255,8 @@
 		po = pppox_sk(sk);
 		ppp_input(&po->chan, skb);
 	} else {
-		PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
-		       "%s: socket not bound\n", session->name);
+		l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n",
+			  session->name);
 
 		/* Not bound. Nothing we can do, so discard. */
 		session->stats.rx_errors++;
@@ -270,8 +266,7 @@
 	return;
 
 no_sock:
-	PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
-	       "%s: no socket\n", session->name);
+	l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name);
 	kfree_skb(skb);
 }
 
@@ -628,7 +623,6 @@
 {
 	struct sock *sk = sock->sk;
 	struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
-	struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
 	struct pppox_sock *po = pppox_sk(sk);
 	struct l2tp_session *session = NULL;
 	struct l2tp_tunnel *tunnel;
@@ -657,7 +651,13 @@
 	if (sk->sk_user_data)
 		goto end; /* socket is already attached */
 
-	/* Get params from socket address. Handle L2TPv2 and L2TPv3 */
+	/* Get params from socket address. Handle L2TPv2 and L2TPv3.
+	 * This is nasty because there are different sockaddr_pppol2tp
+	 * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use
+	 * the sockaddr size to determine which structure the caller
+	 * is using.
+	 */
+	peer_tunnel_id = 0;
 	if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
 		fd = sp->pppol2tp.fd;
 		tunnel_id = sp->pppol2tp.s_tunnel;
@@ -665,12 +665,31 @@
 		session_id = sp->pppol2tp.s_session;
 		peer_session_id = sp->pppol2tp.d_session;
 	} else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
+		struct sockaddr_pppol2tpv3 *sp3 =
+			(struct sockaddr_pppol2tpv3 *) sp;
 		ver = 3;
 		fd = sp3->pppol2tp.fd;
 		tunnel_id = sp3->pppol2tp.s_tunnel;
 		peer_tunnel_id = sp3->pppol2tp.d_tunnel;
 		session_id = sp3->pppol2tp.s_session;
 		peer_session_id = sp3->pppol2tp.d_session;
+	} else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) {
+		struct sockaddr_pppol2tpin6 *sp6 =
+			(struct sockaddr_pppol2tpin6 *) sp;
+		fd = sp6->pppol2tp.fd;
+		tunnel_id = sp6->pppol2tp.s_tunnel;
+		peer_tunnel_id = sp6->pppol2tp.d_tunnel;
+		session_id = sp6->pppol2tp.s_session;
+		peer_session_id = sp6->pppol2tp.d_session;
+	} else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) {
+		struct sockaddr_pppol2tpv3in6 *sp6 =
+			(struct sockaddr_pppol2tpv3in6 *) sp;
+		ver = 3;
+		fd = sp6->pppol2tp.fd;
+		tunnel_id = sp6->pppol2tp.s_tunnel;
+		peer_tunnel_id = sp6->pppol2tp.d_tunnel;
+		session_id = sp6->pppol2tp.s_session;
+		peer_session_id = sp6->pppol2tp.d_session;
 	} else {
 		error = -EINVAL;
 		goto end; /* bad socket address */
@@ -711,12 +730,8 @@
 	if (tunnel->recv_payload_hook == NULL)
 		tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
 
-	if (tunnel->peer_tunnel_id == 0) {
-		if (ver == 2)
-			tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
-		else
-			tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
-	}
+	if (tunnel->peer_tunnel_id == 0)
+		tunnel->peer_tunnel_id = peer_tunnel_id;
 
 	/* Create session if it doesn't already exist. We handle the
 	 * case where a session was previously created by the netlink
@@ -807,8 +822,8 @@
 	/* This is how we get the session context from the socket. */
 	sk->sk_user_data = session;
 	sk->sk_state = PPPOX_CONNECTED;
-	PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-	       "%s: created\n", session->name);
+	l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
+		  session->name);
 
 end:
 	release_sock(sk);
@@ -861,8 +876,8 @@
 	ps = l2tp_session_priv(session);
 	ps->tunnel_sock = tunnel->sock;
 
-	PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-	       "%s: created\n", session->name);
+	l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
+		  session->name);
 
 	error = 0;
 
@@ -916,7 +931,7 @@
 	}
 
 	inet = inet_sk(tunnel->sock);
-	if (tunnel->version == 2) {
+	if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) {
 		struct sockaddr_pppol2tp sp;
 		len = sizeof(sp);
 		memset(&sp, 0, len);
@@ -932,6 +947,46 @@
 		sp.pppol2tp.addr.sin_port = inet->inet_dport;
 		sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
 		memcpy(uaddr, &sp, len);
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if ((tunnel->version == 2) &&
+		   (tunnel->sock->sk_family == AF_INET6)) {
+		struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+		struct sockaddr_pppol2tpin6 sp;
+		len = sizeof(sp);
+		memset(&sp, 0, len);
+		sp.sa_family	= AF_PPPOX;
+		sp.sa_protocol	= PX_PROTO_OL2TP;
+		sp.pppol2tp.fd  = tunnel->fd;
+		sp.pppol2tp.pid = pls->owner;
+		sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+		sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+		sp.pppol2tp.s_session = session->session_id;
+		sp.pppol2tp.d_session = session->peer_session_id;
+		sp.pppol2tp.addr.sin6_family = AF_INET6;
+		sp.pppol2tp.addr.sin6_port = inet->inet_dport;
+		memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
+		       sizeof(np->daddr));
+		memcpy(uaddr, &sp, len);
+	} else if ((tunnel->version == 3) &&
+		   (tunnel->sock->sk_family == AF_INET6)) {
+		struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+		struct sockaddr_pppol2tpv3in6 sp;
+		len = sizeof(sp);
+		memset(&sp, 0, len);
+		sp.sa_family	= AF_PPPOX;
+		sp.sa_protocol	= PX_PROTO_OL2TP;
+		sp.pppol2tp.fd  = tunnel->fd;
+		sp.pppol2tp.pid = pls->owner;
+		sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
+		sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
+		sp.pppol2tp.s_session = session->session_id;
+		sp.pppol2tp.d_session = session->peer_session_id;
+		sp.pppol2tp.addr.sin6_family = AF_INET6;
+		sp.pppol2tp.addr.sin6_port = inet->inet_dport;
+		memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
+		       sizeof(np->daddr));
+		memcpy(uaddr, &sp, len);
+#endif
 	} else if (tunnel->version == 3) {
 		struct sockaddr_pppol2tpv3 sp;
 		len = sizeof(sp);
@@ -998,9 +1053,9 @@
 	struct l2tp_tunnel *tunnel = session->tunnel;
 	struct pppol2tp_ioc_stats stats;
 
-	PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
-	       "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
-	       session->name, cmd, arg);
+	l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
+		 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
+		 session->name, cmd, arg);
 
 	sk = ps->sock;
 	sock_hold(sk);
@@ -1018,8 +1073,8 @@
 		if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
 			break;
 
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get mtu=%d\n", session->name, session->mtu);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
+			  session->name, session->mtu);
 		err = 0;
 		break;
 
@@ -1034,8 +1089,8 @@
 
 		session->mtu = ifr.ifr_mtu;
 
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set mtu=%d\n", session->name, session->mtu);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
+			  session->name, session->mtu);
 		err = 0;
 		break;
 
@@ -1048,8 +1103,8 @@
 		if (put_user(session->mru, (int __user *) arg))
 			break;
 
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get mru=%d\n", session->name, session->mru);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
+			  session->name, session->mru);
 		err = 0;
 		break;
 
@@ -1063,8 +1118,8 @@
 			break;
 
 		session->mru = val;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set mru=%d\n", session->name, session->mru);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
+			  session->name, session->mru);
 		err = 0;
 		break;
 
@@ -1073,8 +1128,8 @@
 		if (put_user(ps->flags, (int __user *) arg))
 			break;
 
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get flags=%d\n", session->name, ps->flags);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
+			  session->name, ps->flags);
 		err = 0;
 		break;
 
@@ -1083,8 +1138,8 @@
 		if (get_user(val, (int __user *) arg))
 			break;
 		ps->flags = val;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set flags=%d\n", session->name, ps->flags);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
+			  session->name, ps->flags);
 		err = 0;
 		break;
 
@@ -1100,8 +1155,8 @@
 		if (copy_to_user((void __user *) arg, &stats,
 				 sizeof(stats)))
 			break;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get L2TP stats\n", session->name);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
+			  session->name);
 		err = 0;
 		break;
 
@@ -1128,9 +1183,9 @@
 	struct sock *sk;
 	struct pppol2tp_ioc_stats stats;
 
-	PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
-	       "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
-	       tunnel->name, cmd, arg);
+	l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL,
+		 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
+		 tunnel->name, cmd, arg);
 
 	sk = tunnel->sock;
 	sock_hold(sk);
@@ -1164,8 +1219,8 @@
 			err = -EFAULT;
 			break;
 		}
-		PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get L2TP stats\n", tunnel->name);
+		l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
+			  tunnel->name);
 		err = 0;
 		break;
 
@@ -1254,8 +1309,8 @@
 	switch (optname) {
 	case PPPOL2TP_SO_DEBUG:
 		tunnel->debug = val;
-		PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set debug=%x\n", tunnel->name, tunnel->debug);
+		l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
+			  tunnel->name, tunnel->debug);
 		break;
 
 	default:
@@ -1282,8 +1337,9 @@
 			break;
 		}
 		session->recv_seq = val ? -1 : 0;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set recv_seq=%d\n", session->name, session->recv_seq);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+			  "%s: set recv_seq=%d\n",
+			  session->name, session->recv_seq);
 		break;
 
 	case PPPOL2TP_SO_SENDSEQ:
@@ -1298,8 +1354,9 @@
 			po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
 				PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
 		}
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set send_seq=%d\n", session->name, session->send_seq);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+			  "%s: set send_seq=%d\n",
+			  session->name, session->send_seq);
 		break;
 
 	case PPPOL2TP_SO_LNSMODE:
@@ -1308,20 +1365,22 @@
 			break;
 		}
 		session->lns_mode = val ? -1 : 0;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set lns_mode=%d\n", session->name, session->lns_mode);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+			  "%s: set lns_mode=%d\n",
+			  session->name, session->lns_mode);
 		break;
 
 	case PPPOL2TP_SO_DEBUG:
 		session->debug = val;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set debug=%x\n", session->name, session->debug);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
+			  session->name, session->debug);
 		break;
 
 	case PPPOL2TP_SO_REORDERTO:
 		session->reorder_timeout = msecs_to_jiffies(val);
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+			  "%s: set reorder_timeout=%d\n",
+			  session->name, session->reorder_timeout);
 		break;
 
 	default:
@@ -1400,8 +1459,8 @@
 	switch (optname) {
 	case PPPOL2TP_SO_DEBUG:
 		*val = tunnel->debug;
-		PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get debug=%x\n", tunnel->name, tunnel->debug);
+		l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n",
+			  tunnel->name, tunnel->debug);
 		break;
 
 	default:
@@ -1423,32 +1482,32 @@
 	switch (optname) {
 	case PPPOL2TP_SO_RECVSEQ:
 		*val = session->recv_seq;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get recv_seq=%d\n", session->name, *val);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+			  "%s: get recv_seq=%d\n", session->name, *val);
 		break;
 
 	case PPPOL2TP_SO_SENDSEQ:
 		*val = session->send_seq;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get send_seq=%d\n", session->name, *val);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+			  "%s: get send_seq=%d\n", session->name, *val);
 		break;
 
 	case PPPOL2TP_SO_LNSMODE:
 		*val = session->lns_mode;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get lns_mode=%d\n", session->name, *val);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+			  "%s: get lns_mode=%d\n", session->name, *val);
 		break;
 
 	case PPPOL2TP_SO_DEBUG:
 		*val = session->debug;
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get debug=%d\n", session->name, *val);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n",
+			  session->name, *val);
 		break;
 
 	case PPPOL2TP_SO_REORDERTO:
 		*val = (int) jiffies_to_msecs(session->reorder_timeout);
-		PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
-		       "%s: get reorder_timeout=%d\n", session->name, *val);
+		l2tp_info(session, PPPOL2TP_MSG_CONTROL,
+			  "%s: get reorder_timeout=%d\n", session->name, *val);
 		break;
 
 	default:
@@ -1811,8 +1870,7 @@
 		goto out_unregister_pppox;
 #endif
 
-	printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
-	       PPPOL2TP_DRV_VERSION);
+	pr_info("PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION);
 
 out:
 	return err;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index ab3d35f..3cdaa04 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -15,6 +15,8 @@
  *	2000-10-29	Henner Eisen	lapb_data_indication() return status.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/types.h>
@@ -279,9 +281,7 @@
 
 	lapb_establish_data_link(lapb);
 
-#if LAPB_DEBUG > 0
-	printk(KERN_DEBUG "lapb: (%p) S0 -> S1\n", lapb->dev);
-#endif
+	lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev);
 	lapb->state = LAPB_STATE_1;
 
 	rc = LAPB_OK;
@@ -305,12 +305,8 @@
 		goto out_put;
 
 	case LAPB_STATE_1:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S1 TX DISC(1)\n", lapb->dev);
-#endif
-#if LAPB_DEBUG > 0
-		printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+		lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev);
+		lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
 		lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
 		lapb->state = LAPB_STATE_0;
 		lapb_start_t1timer(lapb);
@@ -329,12 +325,8 @@
 	lapb_stop_t2timer(lapb);
 	lapb->state = LAPB_STATE_2;
 
-#if LAPB_DEBUG > 1
-	printk(KERN_DEBUG "lapb: (%p) S3 DISC(1)\n", lapb->dev);
-#endif
-#if LAPB_DEBUG > 0
-	printk(KERN_DEBUG "lapb: (%p) S3 -> S2\n", lapb->dev);
-#endif
+	lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev);
+	lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev);
 
 	rc = LAPB_OK;
 out_put:
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index f4e3c1a..5dba899 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -15,6 +15,8 @@
  *	2000-10-29	Henner Eisen	lapb_data_indication() return status.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/socket.h>
@@ -44,25 +46,16 @@
 {
 	switch (frame->type) {
 	case LAPB_SABM:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S0 RX SABM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S0 RX SABM(%d)\n", lapb->dev, frame->pf);
 		if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S0 TX DM(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
 		} else {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
-#endif
+			lapb_dbg(1, "(%p) S0 TX UA(%d)\n",
+				 lapb->dev, frame->pf);
+			lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev);
 			lapb_send_control(lapb, LAPB_UA, frame->pf,
 					  LAPB_RESPONSE);
 			lapb_stop_t1timer(lapb);
@@ -78,18 +71,11 @@
 		break;
 
 	case LAPB_SABME:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S0 RX SABME(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S0 RX SABME(%d)\n", lapb->dev, frame->pf);
 		if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
-#endif
+			lapb_dbg(1, "(%p) S0 TX UA(%d)\n",
+				 lapb->dev, frame->pf);
+			lapb_dbg(0, "(%p) S0 -> S3\n", lapb->dev);
 			lapb_send_control(lapb, LAPB_UA, frame->pf,
 					  LAPB_RESPONSE);
 			lapb_stop_t1timer(lapb);
@@ -102,22 +88,16 @@
 			lapb->va        = 0;
 			lapb_connect_indication(lapb, LAPB_OK);
 		} else {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S0 TX DM(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
 		}
 		break;
 
 	case LAPB_DISC:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S0 RX DISC(%d)\n",
-		       lapb->dev, frame->pf);
-		printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S0 RX DISC(%d)\n", lapb->dev, frame->pf);
+		lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf);
 		lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
 		break;
 
@@ -137,68 +117,45 @@
 {
 	switch (frame->type) {
 	case LAPB_SABM:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S1 RX SABM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S1 RX SABM(%d)\n", lapb->dev, frame->pf);
 		if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S1 TX DM(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
 		} else {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S1 TX UA(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_UA, frame->pf,
 					  LAPB_RESPONSE);
 		}
 		break;
 
 	case LAPB_SABME:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S1 RX SABME(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S1 RX SABME(%d)\n", lapb->dev, frame->pf);
 		if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S1 TX UA(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_UA, frame->pf,
 					  LAPB_RESPONSE);
 		} else {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S1 TX DM(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
 		}
 		break;
 
 	case LAPB_DISC:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S1 RX DISC(%d)\n",
-		       lapb->dev, frame->pf);
-		printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S1 RX DISC(%d)\n", lapb->dev, frame->pf);
+		lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf);
 		lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
 		break;
 
 	case LAPB_UA:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S1 RX UA(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S1 RX UA(%d)\n", lapb->dev, frame->pf);
 		if (frame->pf) {
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S1 -> S3\n", lapb->dev);
-#endif
+			lapb_dbg(0, "(%p) S1 -> S3\n", lapb->dev);
 			lapb_stop_t1timer(lapb);
 			lapb_stop_t2timer(lapb);
 			lapb->state     = LAPB_STATE_3;
@@ -212,14 +169,9 @@
 		break;
 
 	case LAPB_DM:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S1 RX DM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S1 RX DM(%d)\n", lapb->dev, frame->pf);
 		if (frame->pf) {
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+			lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
 			lapb_clear_queues(lapb);
 			lapb->state = LAPB_STATE_0;
 			lapb_start_t1timer(lapb);
@@ -242,34 +194,22 @@
 	switch (frame->type) {
 	case LAPB_SABM:
 	case LAPB_SABME:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S2 RX {SABM,SABME}(%d)\n",
-		       lapb->dev, frame->pf);
-		printk(KERN_DEBUG "lapb: (%p) S2 TX DM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S2 RX {SABM,SABME}(%d)\n",
+			 lapb->dev, frame->pf);
+		lapb_dbg(1, "(%p) S2 TX DM(%d)\n", lapb->dev, frame->pf);
 		lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
 		break;
 
 	case LAPB_DISC:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S2 RX DISC(%d)\n",
-		       lapb->dev, frame->pf);
-		printk(KERN_DEBUG "lapb: (%p) S2 TX UA(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S2 RX DISC(%d)\n", lapb->dev, frame->pf);
+		lapb_dbg(1, "(%p) S2 TX UA(%d)\n", lapb->dev, frame->pf);
 		lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
 		break;
 
 	case LAPB_UA:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S2 RX UA(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S2 RX UA(%d)\n", lapb->dev, frame->pf);
 		if (frame->pf) {
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+			lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
 			lapb->state = LAPB_STATE_0;
 			lapb_start_t1timer(lapb);
 			lapb_stop_t2timer(lapb);
@@ -278,14 +218,9 @@
 		break;
 
 	case LAPB_DM:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf);
 		if (frame->pf) {
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+			lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
 			lapb->state = LAPB_STATE_0;
 			lapb_start_t1timer(lapb);
 			lapb_stop_t2timer(lapb);
@@ -297,12 +232,9 @@
 	case LAPB_REJ:
 	case LAPB_RNR:
 	case LAPB_RR:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
+		lapb_dbg(1, "(%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
 		       lapb->dev, frame->pf);
-		printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf);
 		if (frame->pf)
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
@@ -325,22 +257,15 @@
 
 	switch (frame->type) {
 	case LAPB_SABM:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX SABM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S3 RX SABM(%d)\n", lapb->dev, frame->pf);
 		if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S3 TX DM(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
 		} else {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S3 TX UA(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_UA, frame->pf,
 					  LAPB_RESPONSE);
 			lapb_stop_t1timer(lapb);
@@ -355,15 +280,10 @@
 		break;
 
 	case LAPB_SABME:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX SABME(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S3 RX SABME(%d)\n", lapb->dev, frame->pf);
 		if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S3 TX UA(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_UA, frame->pf,
 					  LAPB_RESPONSE);
 			lapb_stop_t1timer(lapb);
@@ -375,23 +295,16 @@
 			lapb->va        = 0;
 			lapb_requeue_frames(lapb);
 		} else {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S3 TX DM(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
 		}
 		break;
 
 	case LAPB_DISC:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX DISC(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
-		printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+		lapb_dbg(1, "(%p) S3 RX DISC(%d)\n", lapb->dev, frame->pf);
+		lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
 		lapb_clear_queues(lapb);
 		lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
 		lapb_start_t1timer(lapb);
@@ -401,13 +314,8 @@
 		break;
 
 	case LAPB_DM:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX DM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
-		printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+		lapb_dbg(1, "(%p) S3 RX DM(%d)\n", lapb->dev, frame->pf);
+		lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
 		lapb_clear_queues(lapb);
 		lapb->state = LAPB_STATE_0;
 		lapb_start_t1timer(lapb);
@@ -416,10 +324,8 @@
 		break;
 
 	case LAPB_RNR:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX RNR(%d) R%d\n",
-		       lapb->dev, frame->pf, frame->nr);
-#endif
+		lapb_dbg(1, "(%p) S3 RX RNR(%d) R%d\n",
+			 lapb->dev, frame->pf, frame->nr);
 		lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION;
 		lapb_check_need_response(lapb, frame->cr, frame->pf);
 		if (lapb_validate_nr(lapb, frame->nr)) {
@@ -428,9 +334,7 @@
 			lapb->frmr_data = *frame;
 			lapb->frmr_type = LAPB_FRMR_Z;
 			lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+			lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
 			lapb_start_t1timer(lapb);
 			lapb_stop_t2timer(lapb);
 			lapb->state   = LAPB_STATE_4;
@@ -439,10 +343,8 @@
 		break;
 
 	case LAPB_RR:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX RR(%d) R%d\n",
-		       lapb->dev, frame->pf, frame->nr);
-#endif
+		lapb_dbg(1, "(%p) S3 RX RR(%d) R%d\n",
+			 lapb->dev, frame->pf, frame->nr);
 		lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
 		lapb_check_need_response(lapb, frame->cr, frame->pf);
 		if (lapb_validate_nr(lapb, frame->nr)) {
@@ -451,9 +353,7 @@
 			lapb->frmr_data = *frame;
 			lapb->frmr_type = LAPB_FRMR_Z;
 			lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+			lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
 			lapb_start_t1timer(lapb);
 			lapb_stop_t2timer(lapb);
 			lapb->state   = LAPB_STATE_4;
@@ -462,10 +362,8 @@
 		break;
 
 	case LAPB_REJ:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX REJ(%d) R%d\n",
-		       lapb->dev, frame->pf, frame->nr);
-#endif
+		lapb_dbg(1, "(%p) S3 RX REJ(%d) R%d\n",
+			 lapb->dev, frame->pf, frame->nr);
 		lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
 		lapb_check_need_response(lapb, frame->cr, frame->pf);
 		if (lapb_validate_nr(lapb, frame->nr)) {
@@ -477,9 +375,7 @@
 			lapb->frmr_data = *frame;
 			lapb->frmr_type = LAPB_FRMR_Z;
 			lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+			lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
 			lapb_start_t1timer(lapb);
 			lapb_stop_t2timer(lapb);
 			lapb->state   = LAPB_STATE_4;
@@ -488,17 +384,13 @@
 		break;
 
 	case LAPB_I:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX I(%d) S%d R%d\n",
-		       lapb->dev, frame->pf, frame->ns, frame->nr);
-#endif
+		lapb_dbg(1, "(%p) S3 RX I(%d) S%d R%d\n",
+			 lapb->dev, frame->pf, frame->ns, frame->nr);
 		if (!lapb_validate_nr(lapb, frame->nr)) {
 			lapb->frmr_data = *frame;
 			lapb->frmr_type = LAPB_FRMR_Z;
 			lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+			lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
 			lapb_start_t1timer(lapb);
 			lapb_stop_t2timer(lapb);
 			lapb->state   = LAPB_STATE_4;
@@ -522,7 +414,7 @@
 			 * a frame lost on the wire.
 			 */
 			if (cn == NET_RX_DROP) {
-				printk(KERN_DEBUG "LAPB: rx congestion\n");
+				pr_debug("rx congestion\n");
 				break;
 			}
 			lapb->vr = (lapb->vr + 1) % modulus;
@@ -541,11 +433,8 @@
 				if (frame->pf)
 					lapb_enquiry_response(lapb);
 			} else {
-#if LAPB_DEBUG > 1
-				printk(KERN_DEBUG
-				       "lapb: (%p) S3 TX REJ(%d) R%d\n",
-				       lapb->dev, frame->pf, lapb->vr);
-#endif
+				lapb_dbg(1, "(%p) S3 TX REJ(%d) R%d\n",
+					 lapb->dev, frame->pf, lapb->vr);
 				lapb->condition |= LAPB_REJECT_CONDITION;
 				lapb_send_control(lapb, LAPB_REJ, frame->pf,
 						  LAPB_RESPONSE);
@@ -555,31 +444,22 @@
 		break;
 
 	case LAPB_FRMR:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX FRMR(%d) %02X "
-		       "%02X %02X %02X %02X\n", lapb->dev, frame->pf,
-		       skb->data[0], skb->data[1], skb->data[2],
-		       skb->data[3], skb->data[4]);
-#endif
+		lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
+			 lapb->dev, frame->pf,
+			 skb->data[0], skb->data[1], skb->data[2],
+			 skb->data[3], skb->data[4]);
 		lapb_establish_data_link(lapb);
-#if LAPB_DEBUG > 0
-		printk(KERN_DEBUG "lapb: (%p) S3 -> S1\n", lapb->dev);
-#endif
+		lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
 		lapb_requeue_frames(lapb);
 		lapb->state = LAPB_STATE_1;
 		break;
 
 	case LAPB_ILLEGAL:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S3 RX ILLEGAL(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S3 RX ILLEGAL(%d)\n", lapb->dev, frame->pf);
 		lapb->frmr_data = *frame;
 		lapb->frmr_type = LAPB_FRMR_W;
 		lapb_transmit_frmr(lapb);
-#if LAPB_DEBUG > 0
-		printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
-#endif
+		lapb_dbg(0, "(%p) S3 -> S4\n", lapb->dev);
 		lapb_start_t1timer(lapb);
 		lapb_stop_t2timer(lapb);
 		lapb->state   = LAPB_STATE_4;
@@ -600,25 +480,16 @@
 {
 	switch (frame->type) {
 	case LAPB_SABM:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S4 RX SABM(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S4 RX SABM(%d)\n", lapb->dev, frame->pf);
 		if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S4 TX DM(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
 		} else {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
-#endif
+			lapb_dbg(1, "(%p) S4 TX UA(%d)\n",
+				 lapb->dev, frame->pf);
+			lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev);
 			lapb_send_control(lapb, LAPB_UA, frame->pf,
 					  LAPB_RESPONSE);
 			lapb_stop_t1timer(lapb);
@@ -634,18 +505,11 @@
 		break;
 
 	case LAPB_SABME:
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S4 RX SABME(%d)\n",
-		       lapb->dev, frame->pf);
-#endif
+		lapb_dbg(1, "(%p) S4 RX SABME(%d)\n", lapb->dev, frame->pf);
 		if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
-#if LAPB_DEBUG > 0
-			printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
-#endif
+			lapb_dbg(1, "(%p) S4 TX UA(%d)\n",
+				 lapb->dev, frame->pf);
+			lapb_dbg(0, "(%p) S4 -> S3\n", lapb->dev);
 			lapb_send_control(lapb, LAPB_UA, frame->pf,
 					  LAPB_RESPONSE);
 			lapb_stop_t1timer(lapb);
@@ -658,10 +522,8 @@
 			lapb->va        = 0;
 			lapb_connect_indication(lapb, LAPB_OK);
 		} else {
-#if LAPB_DEBUG > 1
-			printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n",
-			       lapb->dev, frame->pf);
-#endif
+			lapb_dbg(1, "(%p) S4 TX DM(%d)\n",
+				 lapb->dev, frame->pf);
 			lapb_send_control(lapb, LAPB_DM, frame->pf,
 					  LAPB_RESPONSE);
 		}
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
index baab276..ba4d015 100644
--- a/net/lapb/lapb_out.c
+++ b/net/lapb/lapb_out.c
@@ -14,6 +14,8 @@
  *	LAPB 002	Jonathan Naylor	New timer architecture.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/socket.h>
@@ -60,10 +62,8 @@
 		*frame |= lapb->vs << 1;
 	}
 
-#if LAPB_DEBUG > 1
-	printk(KERN_DEBUG "lapb: (%p) S%d TX I(%d) S%d R%d\n",
-	       lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
-#endif
+	lapb_dbg(1, "(%p) S%d TX I(%d) S%d R%d\n",
+		 lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
 
 	lapb_transmit_buffer(lapb, skb, LAPB_COMMAND);
 }
@@ -148,11 +148,9 @@
 		}
 	}
 
-#if LAPB_DEBUG > 2
-	printk(KERN_DEBUG "lapb: (%p) S%d TX %02X %02X %02X\n",
-	       lapb->dev, lapb->state,
-	       skb->data[0], skb->data[1], skb->data[2]);
-#endif
+	lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
+		 lapb->dev, lapb->state,
+		 skb->data[0], skb->data[1], skb->data[2]);
 
 	if (!lapb_data_transmit(lapb, skb))
 		kfree_skb(skb);
@@ -164,16 +162,10 @@
 	lapb->n2count   = 0;
 
 	if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S%d TX SABME(1)\n",
-		       lapb->dev, lapb->state);
-#endif
+		lapb_dbg(1, "(%p) S%d TX SABME(1)\n", lapb->dev, lapb->state);
 		lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
 	} else {
-#if LAPB_DEBUG > 1
-		printk(KERN_DEBUG "lapb: (%p) S%d TX SABM(1)\n",
-		       lapb->dev, lapb->state);
-#endif
+		lapb_dbg(1, "(%p) S%d TX SABM(1)\n", lapb->dev, lapb->state);
 		lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
 	}
 
@@ -183,10 +175,8 @@
 
 void lapb_enquiry_response(struct lapb_cb *lapb)
 {
-#if LAPB_DEBUG > 1
-	printk(KERN_DEBUG "lapb: (%p) S%d TX RR(1) R%d\n",
-	       lapb->dev, lapb->state, lapb->vr);
-#endif
+	lapb_dbg(1, "(%p) S%d TX RR(1) R%d\n",
+		 lapb->dev, lapb->state, lapb->vr);
 
 	lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE);
 
@@ -195,10 +185,8 @@
 
 void lapb_timeout_response(struct lapb_cb *lapb)
 {
-#if LAPB_DEBUG > 1
-	printk(KERN_DEBUG "lapb: (%p) S%d TX RR(0) R%d\n",
-	       lapb->dev, lapb->state, lapb->vr);
-#endif
+	lapb_dbg(1, "(%p) S%d TX RR(0) R%d\n",
+		 lapb->dev, lapb->state, lapb->vr);
 	lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE);
 
 	lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 066225b..9d0a426 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -13,6 +13,8 @@
  *	LAPB 001	Jonathan Naylor	Started Coding
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/socket.h>
@@ -111,11 +113,9 @@
 {
 	frame->type = LAPB_ILLEGAL;
 
-#if LAPB_DEBUG > 2
-	printk(KERN_DEBUG "lapb: (%p) S%d RX %02X %02X %02X\n",
-	       lapb->dev, lapb->state,
-	       skb->data[0], skb->data[1], skb->data[2]);
-#endif
+	lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
+		 lapb->dev, lapb->state,
+		 skb->data[0], skb->data[1], skb->data[2]);
 
 	/* We always need to look at 2 bytes, sometimes we need
 	 * to look at 3 and those cases are handled below.
@@ -284,12 +284,10 @@
 		dptr++;
 		*dptr++ = lapb->frmr_type;
 
-#if LAPB_DEBUG > 1
-	printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
-	       lapb->dev, lapb->state,
-	       skb->data[1], skb->data[2], skb->data[3],
-	       skb->data[4], skb->data[5]);
-#endif
+		lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
+			 lapb->dev, lapb->state,
+			 skb->data[1], skb->data[2], skb->data[3],
+			 skb->data[4], skb->data[5]);
 	} else {
 		dptr    = skb_put(skb, 4);
 		*dptr++ = LAPB_FRMR;
@@ -301,11 +299,9 @@
 		dptr++;
 		*dptr++ = lapb->frmr_type;
 
-#if LAPB_DEBUG > 1
-	printk(KERN_DEBUG "lapb: (%p) S%d TX FRMR %02X %02X %02X\n",
-	       lapb->dev, lapb->state, skb->data[1],
-	       skb->data[2], skb->data[3]);
-#endif
+		lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
+			 lapb->dev, lapb->state, skb->data[1],
+			 skb->data[2], skb->data[3]);
 	}
 
 	lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
diff --git a/net/lapb/lapb_timer.c b/net/lapb/lapb_timer.c
index f8cd641..54563ad 100644
--- a/net/lapb/lapb_timer.c
+++ b/net/lapb/lapb_timer.c
@@ -14,6 +14,8 @@
  *	LAPB 002	Jonathan Naylor	New timer architecture.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/socket.h>
@@ -105,21 +107,17 @@
 				lapb_clear_queues(lapb);
 				lapb->state = LAPB_STATE_0;
 				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
-				printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
-#endif
+				lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
 				return;
 			} else {
 				lapb->n2count++;
 				if (lapb->mode & LAPB_EXTENDED) {
-#if LAPB_DEBUG > 1
-					printk(KERN_DEBUG "lapb: (%p) S1 TX SABME(1)\n", lapb->dev);
-#endif
+					lapb_dbg(1, "(%p) S1 TX SABME(1)\n",
+						 lapb->dev);
 					lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
 				} else {
-#if LAPB_DEBUG > 1
-					printk(KERN_DEBUG "lapb: (%p) S1 TX SABM(1)\n", lapb->dev);
-#endif
+					lapb_dbg(1, "(%p) S1 TX SABM(1)\n",
+						 lapb->dev);
 					lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
 				}
 			}
@@ -133,15 +131,11 @@
 				lapb_clear_queues(lapb);
 				lapb->state = LAPB_STATE_0;
 				lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
-				printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
-#endif
+				lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
 				return;
 			} else {
 				lapb->n2count++;
-#if LAPB_DEBUG > 1
-				printk(KERN_DEBUG "lapb: (%p) S2 TX DISC(1)\n", lapb->dev);
-#endif
+				lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
 				lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
 			}
 			break;
@@ -155,9 +149,7 @@
 				lapb->state = LAPB_STATE_0;
 				lapb_stop_t2timer(lapb);
 				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
-				printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
-#endif
+				lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
 				return;
 			} else {
 				lapb->n2count++;
@@ -173,9 +165,7 @@
 				lapb_clear_queues(lapb);
 				lapb->state = LAPB_STATE_0;
 				lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
-#if LAPB_DEBUG > 0
-				printk(KERN_DEBUG "lapb: (%p) S4 -> S0\n", lapb->dev);
-#endif
+				lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
 				return;
 			} else {
 				lapb->n2count++;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index b9bef2c..fe5453c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -71,8 +71,7 @@
  */
 static inline __be16 llc_proto_type(u16 arphrd)
 {
-	return arphrd == ARPHRD_IEEE802_TR ?
-			 htons(ETH_P_TR_802_2) : htons(ETH_P_802_2);
+	return htons(ETH_P_802_2);
 }
 
 /**
@@ -518,7 +517,7 @@
 	if (sock_flag(sk, SOCK_ZAPPED))
 		goto out;
 	rc = 0;
-	if (!(unsigned)backlog)	/* BSDism */
+	if (!(unsigned int)backlog)	/* BSDism */
 		backlog = 1;
 	sk->sk_max_ack_backlog = backlog;
 	if (sk->sk_state != TCP_LISTEN) {
@@ -806,10 +805,9 @@
 			sk_wait_data(sk, &timeo);
 
 		if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
-			if (net_ratelimit())
-				printk(KERN_DEBUG "LLC(%s:%d): Application "
-						  "bug, race in MSG_PEEK.\n",
-				       current->comm, task_pid_nr(current));
+			net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
+					    current->comm,
+					    task_pid_nr(current));
 			peek_seq = llc->copied_seq;
 		}
 		continue;
@@ -840,7 +838,7 @@
 
 		if (!(flags & MSG_PEEK)) {
 			spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
-			sk_eat_skb(sk, skb, 0);
+			sk_eat_skb(sk, skb, false);
 			spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
 			*seq = 0;
 		}
@@ -863,7 +861,7 @@
 
 	if (!(flags & MSG_PEEK)) {
 			spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
-			sk_eat_skb(sk, skb, 0);
+			sk_eat_skb(sk, skb, false);
 			spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
 			*seq = 0;
 	}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index ba137a6..0d0d416 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -828,7 +828,7 @@
 	else {
 		dprintk("%s: adding to backlog...\n", __func__);
 		llc_set_backlog_type(skb, LLC_PACKET);
-		if (sk_add_backlog(sk, skb))
+		if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
 			goto drop_unlock;
 	}
 out:
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index b658cba..2dae8a5 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -14,9 +14,7 @@
  */
 
 #include <linux/if_arp.h>
-#include <linux/if_tr.h>
 #include <linux/netdevice.h>
-#include <linux/trdevice.h>
 #include <linux/skbuff.h>
 #include <linux/export.h>
 #include <net/llc.h>
@@ -37,7 +35,6 @@
 	int rc = -EINVAL;
 
 	switch (skb->dev->type) {
-	case ARPHRD_IEEE802_TR:
 	case ARPHRD_ETHER:
 	case ARPHRD_LOOPBACK:
 		rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 94e7fca..7c5073b 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -31,10 +31,6 @@
 	case ARPHRD_ETHER:
 	case ARPHRD_LOOPBACK:
 		return sizeof(struct ethhdr);
-#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
-	case ARPHRD_IEEE802_TR:
-		return sizeof(struct trh_hdr);
-#endif
 	}
 	return 0;
 }
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index e2ebe35..d75306b 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -7,6 +7,7 @@
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/sysctl.h>
+#include <net/net_namespace.h>
 #include <net/llc.h>
 
 #ifndef CONFIG_SYSCTL
@@ -56,48 +57,29 @@
 	{ },
 };
 
-static struct ctl_table llc2_dir_timeout_table[] = {
-	{
-		.procname	= "timeout",
-		.mode		= 0555,
-		.child		= llc2_timeout_table,
-	},
-	{ },
-};
-
-static struct ctl_table llc_table[] = {
-	{
-		.procname	= "llc2",
-		.mode		= 0555,
-		.child		= llc2_dir_timeout_table,
-	},
-	{
-		.procname       = "station",
-		.mode           = 0555,
-		.child          = llc_station_table,
-	},
-	{ },
-};
-
-static struct ctl_path llc_path[] = {
-	{ .procname = "net", },
-	{ .procname = "llc", },
-	{ }
-};
-
-static struct ctl_table_header *llc_table_header;
+static struct ctl_table_header *llc2_timeout_header;
+static struct ctl_table_header *llc_station_header;
 
 int __init llc_sysctl_init(void)
 {
-	llc_table_header = register_sysctl_paths(llc_path, llc_table);
+	llc2_timeout_header = register_net_sysctl(&init_net, "net/llc/llc2/timeout", llc2_timeout_table);
+	llc_station_header = register_net_sysctl(&init_net, "net/llc/station", llc_station_table);
 
-	return llc_table_header ? 0 : -ENOMEM;
+	if (!llc2_timeout_header || !llc_station_header) {
+		llc_sysctl_exit();
+		return -ENOMEM;
+	}
+	return 0;
 }
 
 void llc_sysctl_exit(void)
 {
-	if (llc_table_header) {
-		unregister_sysctl_table(llc_table_header);
-		llc_table_header = NULL;
+	if (llc2_timeout_header) {
+		unregister_net_sysctl_table(llc2_timeout_header);
+		llc2_timeout_header = NULL;
+	}
+	if (llc_station_header) {
+		unregister_net_sysctl_table(llc_station_header);
+		llc_station_header = NULL;
 	}
 }
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 96ddb72..8d249d7 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -225,6 +225,17 @@
 
 	  Do not select this option.
 
+config MAC80211_VERBOSE_MESH_SYNC_DEBUG
+	bool "Verbose mesh mesh synchronization debugging"
+	depends on MAC80211_DEBUG_MENU
+	depends on MAC80211_MESH
+	---help---
+	  Selecting this option causes mac80211 to print out very verbose mesh
+	  synchronization debugging messages (when mac80211 is taking part in a
+	  mesh network).
+
+	  Do not select this option.
+
 config MAC80211_VERBOSE_TDLS_DEBUG
 	bool "Verbose TDLS debugging"
 	depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 1be7a45..3e9d931 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -38,7 +38,8 @@
 	mesh.o \
 	mesh_pathtbl.o \
 	mesh_plink.o \
-	mesh_hwmp.o
+	mesh_hwmp.o \
+	mesh_sync.o
 
 mac80211-$(CONFIG_PM) += pm.o
 
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 64d3ce5..26ddb69 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -142,6 +142,18 @@
 	u8 *timer_to_id = ptid - *ptid;
 	struct sta_info *sta = container_of(timer_to_id, struct sta_info,
 					 timer_to_tid[0]);
+	struct tid_ampdu_rx *tid_rx;
+	unsigned long timeout;
+
+	tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
+	if (!tid_rx)
+		return;
+
+	timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
+	if (time_is_after_jiffies(timeout)) {
+		mod_timer(&tid_rx->session_timer, timeout);
+		return;
+	}
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
@@ -248,11 +260,8 @@
 	    (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
 		status = WLAN_STATUS_INVALID_QOS_PARAM;
 #ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_DEBUG "AddBA Req with bad params from "
-				"%pM on tid %u. policy %d, buffer size %d\n",
-				mgmt->sa, tid, ba_policy,
-				buf_size);
+		net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
+				    mgmt->sa, tid, ba_policy, buf_size);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 		goto end_no_lock;
 	}
@@ -269,10 +278,8 @@
 
 	if (sta->ampdu_mlme.tid_rx[tid]) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
-		if (net_ratelimit())
-			printk(KERN_DEBUG "unexpected AddBA Req from "
-				"%pM on tid %u\n",
-				mgmt->sa, tid);
+		net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n",
+				    mgmt->sa, tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
 		/* delete existing Rx BA session on the same tid */
@@ -291,7 +298,7 @@
 	/* rx timer */
 	tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
 	tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
-	init_timer(&tid_agg_rx->session_timer);
+	init_timer_deferrable(&tid_agg_rx->session_timer);
 
 	/* rx reorder timer */
 	tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired;
@@ -335,8 +342,10 @@
 	/* activate it for RX */
 	rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
 
-	if (timeout)
+	if (timeout) {
 		mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
+		tid_agg_rx->last_rx = jiffies;
+	}
 
 end:
 	mutex_unlock(&sta->ampdu_mlme.mtx);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 76be6174..5b7053c 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -286,25 +286,25 @@
  * a global "agg_queue_stop" refcount.
  */
 static void __acquires(agg_queue)
-ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
+ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
 {
-	int queue = ieee80211_ac_from_tid(tid);
+	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
 
-	if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
+	if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
 		ieee80211_stop_queue_by_reason(
-			&local->hw, queue,
+			&sdata->local->hw, queue,
 			IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
 	__acquire(agg_queue);
 }
 
 static void __releases(agg_queue)
-ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
+ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
 {
-	int queue = ieee80211_ac_from_tid(tid);
+	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
 
-	if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
+	if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
 		ieee80211_wake_queue_by_reason(
-			&local->hw, queue,
+			&sdata->local->hw, queue,
 			IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
 	__release(agg_queue);
 }
@@ -314,13 +314,14 @@
  * requires a call to ieee80211_agg_splice_finish later
  */
 static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
 			     struct tid_ampdu_tx *tid_tx, u16 tid)
 {
-	int queue = ieee80211_ac_from_tid(tid);
+	struct ieee80211_local *local = sdata->local;
+	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
 	unsigned long flags;
 
-	ieee80211_stop_queue_agg(local, tid);
+	ieee80211_stop_queue_agg(sdata, tid);
 
 	if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
 			  " from the pending queue\n", tid))
@@ -336,9 +337,9 @@
 }
 
 static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
 {
-	ieee80211_wake_queue_agg(local, tid);
+	ieee80211_wake_queue_agg(sdata, tid);
 }
 
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
@@ -376,9 +377,9 @@
 					" tid %d\n", tid);
 #endif
 		spin_lock_bh(&sta->lock);
-		ieee80211_agg_splice_packets(local, tid_tx, tid);
+		ieee80211_agg_splice_packets(sdata, tid_tx, tid);
 		ieee80211_assign_tid_tx(sta, tid, NULL);
-		ieee80211_agg_splice_finish(local, tid);
+		ieee80211_agg_splice_finish(sdata, tid);
 		spin_unlock_bh(&sta->lock);
 
 		kfree_rcu(tid_tx, rcu_head);
@@ -417,6 +418,18 @@
 	u8 *timer_to_id = ptid - *ptid;
 	struct sta_info *sta = container_of(timer_to_id, struct sta_info,
 					 timer_to_tid[0]);
+	struct tid_ampdu_tx *tid_tx;
+	unsigned long timeout;
+
+	tid_tx = rcu_dereference_protected_tid_tx(sta, *ptid);
+	if (!tid_tx)
+		return;
+
+	timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
+	if (time_is_after_jiffies(timeout)) {
+		mod_timer(&tid_tx->session_timer, timeout);
+		return;
+	}
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
 	printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
@@ -542,7 +555,7 @@
 	/* tx timer */
 	tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
 	tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
-	init_timer(&tid_tx->session_timer);
+	init_timer_deferrable(&tid_tx->session_timer);
 
 	/* assign a dialog token */
 	sta->ampdu_mlme.dialog_token_allocator++;
@@ -586,14 +599,14 @@
 	 */
 	spin_lock_bh(&sta->lock);
 
-	ieee80211_agg_splice_packets(local, tid_tx, tid);
+	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
 	/*
 	 * Now mark as operational. This will be visible
 	 * in the TX path, and lets it go lock-free in
 	 * the common case.
 	 */
 	set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
-	ieee80211_agg_splice_finish(local, tid);
+	ieee80211_agg_splice_finish(sta->sdata, tid);
 
 	spin_unlock_bh(&sta->lock);
 }
@@ -778,12 +791,12 @@
 	 * more.
 	 */
 
-	ieee80211_agg_splice_packets(local, tid_tx, tid);
+	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
 
 	/* future packets must not find the tid_tx struct any more */
 	ieee80211_assign_tid_tx(sta, tid, NULL);
 
-	ieee80211_agg_splice_finish(local, tid);
+	ieee80211_agg_splice_finish(sta->sdata, tid);
 
 	kfree_rcu(tid_tx, rcu_head);
 
@@ -884,9 +897,11 @@
 
 		sta->ampdu_mlme.addba_req_num[tid] = 0;
 
-		if (tid_tx->timeout)
+		if (tid_tx->timeout) {
 			mod_timer(&tid_tx->session_timer,
 				  TU_TO_EXP_TIME(tid_tx->timeout));
+			tid_tx->last_tx = jiffies;
+		}
 
 	} else {
 		___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 677d659..495831e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -412,6 +412,10 @@
 		sinfo->llid = le16_to_cpu(sta->llid);
 		sinfo->plid = le16_to_cpu(sta->plid);
 		sinfo->plink_state = sta->plink_state;
+		if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
+			sinfo->filled |= STATION_INFO_T_OFFSET;
+			sinfo->t_offset = sta->t_offset;
+		}
 #endif
 	}
 
@@ -446,6 +450,180 @@
 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
 }
 
+static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
+	"rx_packets", "rx_bytes", "wep_weak_iv_count",
+	"rx_duplicates", "rx_fragments", "rx_dropped",
+	"tx_packets", "tx_bytes", "tx_fragments",
+	"tx_filtered", "tx_retry_failed", "tx_retries",
+	"beacon_loss", "sta_state", "txrate", "rxrate", "signal",
+	"channel", "noise", "ch_time", "ch_time_busy",
+	"ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
+};
+#define STA_STATS_LEN	ARRAY_SIZE(ieee80211_gstrings_sta_stats)
+
+static int ieee80211_get_et_sset_count(struct wiphy *wiphy,
+				       struct net_device *dev,
+				       int sset)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	int rv = 0;
+
+	if (sset == ETH_SS_STATS)
+		rv += STA_STATS_LEN;
+
+	rv += drv_get_et_sset_count(sdata, sset);
+
+	if (rv == 0)
+		return -EOPNOTSUPP;
+	return rv;
+}
+
+static void ieee80211_get_et_stats(struct wiphy *wiphy,
+				   struct net_device *dev,
+				   struct ethtool_stats *stats,
+				   u64 *data)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct sta_info *sta;
+	struct ieee80211_local *local = sdata->local;
+	struct station_info sinfo;
+	struct survey_info survey;
+	int i, q;
+#define STA_STATS_SURVEY_LEN 7
+
+	memset(data, 0, sizeof(u64) * STA_STATS_LEN);
+
+#define ADD_STA_STATS(sta)				\
+	do {						\
+		data[i++] += sta->rx_packets;		\
+		data[i++] += sta->rx_bytes;		\
+		data[i++] += sta->wep_weak_iv_count;	\
+		data[i++] += sta->num_duplicates;	\
+		data[i++] += sta->rx_fragments;		\
+		data[i++] += sta->rx_dropped;		\
+							\
+		data[i++] += sta->tx_packets;		\
+		data[i++] += sta->tx_bytes;		\
+		data[i++] += sta->tx_fragments;		\
+		data[i++] += sta->tx_filtered_count;	\
+		data[i++] += sta->tx_retry_failed;	\
+		data[i++] += sta->tx_retry_count;	\
+		data[i++] += sta->beacon_loss_count;	\
+	} while (0)
+
+	/* For Managed stations, find the single station based on BSSID
+	 * and use that.  For interface types, iterate through all available
+	 * stations and add stats for any station that is assigned to this
+	 * network device.
+	 */
+
+	rcu_read_lock();
+
+	if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+		sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
+
+		if (!(sta && !WARN_ON(sta->sdata->dev != dev)))
+			goto do_survey;
+
+		i = 0;
+		ADD_STA_STATS(sta);
+
+		data[i++] = sta->sta_state;
+
+		sinfo.filled = 0;
+		sta_set_sinfo(sta, &sinfo);
+
+		if (sinfo.filled | STATION_INFO_TX_BITRATE)
+			data[i] = 100000 *
+				cfg80211_calculate_bitrate(&sinfo.txrate);
+		i++;
+		if (sinfo.filled | STATION_INFO_RX_BITRATE)
+			data[i] = 100000 *
+				cfg80211_calculate_bitrate(&sinfo.rxrate);
+		i++;
+
+		if (sinfo.filled | STATION_INFO_SIGNAL_AVG)
+			data[i] = (u8)sinfo.signal_avg;
+		i++;
+	} else {
+		list_for_each_entry_rcu(sta, &local->sta_list, list) {
+			/* Make sure this station belongs to the proper dev */
+			if (sta->sdata->dev != dev)
+				continue;
+
+			i = 0;
+			ADD_STA_STATS(sta);
+		}
+	}
+
+do_survey:
+	i = STA_STATS_LEN - STA_STATS_SURVEY_LEN;
+	/* Get survey stats for current channel */
+	q = 0;
+	while (true) {
+		survey.filled = 0;
+		if (drv_get_survey(local, q, &survey) != 0) {
+			survey.filled = 0;
+			break;
+		}
+
+		if (survey.channel &&
+		    (local->oper_channel->center_freq ==
+		     survey.channel->center_freq))
+			break;
+		q++;
+	}
+
+	if (survey.filled)
+		data[i++] = survey.channel->center_freq;
+	else
+		data[i++] = 0;
+	if (survey.filled & SURVEY_INFO_NOISE_DBM)
+		data[i++] = (u8)survey.noise;
+	else
+		data[i++] = -1LL;
+	if (survey.filled & SURVEY_INFO_CHANNEL_TIME)
+		data[i++] = survey.channel_time;
+	else
+		data[i++] = -1LL;
+	if (survey.filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
+		data[i++] = survey.channel_time_busy;
+	else
+		data[i++] = -1LL;
+	if (survey.filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
+		data[i++] = survey.channel_time_ext_busy;
+	else
+		data[i++] = -1LL;
+	if (survey.filled & SURVEY_INFO_CHANNEL_TIME_RX)
+		data[i++] = survey.channel_time_rx;
+	else
+		data[i++] = -1LL;
+	if (survey.filled & SURVEY_INFO_CHANNEL_TIME_TX)
+		data[i++] = survey.channel_time_tx;
+	else
+		data[i++] = -1LL;
+
+	rcu_read_unlock();
+
+	if (WARN_ON(i != STA_STATS_LEN))
+		return;
+
+	drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
+}
+
+static void ieee80211_get_et_strings(struct wiphy *wiphy,
+				     struct net_device *dev,
+				     u32 sset, u8 *data)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	int sz_sta_stats = 0;
+
+	if (sset == ETH_SS_STATS) {
+		sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats);
+		memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats);
+	}
+	drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
+}
 
 static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
 				 int idx, u8 *mac, struct station_info *sinfo)
@@ -640,6 +818,10 @@
 
 	ieee80211_bss_info_change_notify(sdata, changed);
 
+	netif_carrier_on(dev);
+	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+		netif_carrier_on(vlan->dev);
+
 	return 0;
 }
 
@@ -665,7 +847,7 @@
 
 static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
 {
-	struct ieee80211_sub_if_data *sdata;
+	struct ieee80211_sub_if_data *sdata, *vlan;
 	struct beacon_data *old;
 
 	sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -674,6 +856,10 @@
 	if (!old)
 		return -ENOENT;
 
+	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+		netif_carrier_off(vlan->dev);
+	netif_carrier_off(dev);
+
 	RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
 
 	kfree_rcu(old, rcu_head);
@@ -907,7 +1093,7 @@
 	} else
 		sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	if (compare_ether_addr(mac, sdata->vif.addr) == 0)
+	if (ether_addr_equal(mac, sdata->vif.addr))
 		return -EINVAL;
 
 	if (is_multicast_ether_addr(mac))
@@ -993,6 +1179,9 @@
 	}
 
 	if (params->vlan && params->vlan != sta->sdata->dev) {
+		bool prev_4addr = false;
+		bool new_4addr = false;
+
 		vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
 
 		if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
@@ -1008,9 +1197,25 @@
 			}
 
 			rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
+			new_4addr = true;
+		}
+
+		if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+		    sta->sdata->u.vlan.sta) {
+			rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL);
+			prev_4addr = true;
 		}
 
 		sta->sdata = vlansdata;
+
+		if (sta->sta_state == IEEE80211_STA_AUTHORIZED &&
+		    prev_4addr != new_4addr) {
+			if (new_4addr)
+				atomic_dec(&sta->sdata->bss->num_mcast_sta);
+			else
+				atomic_inc(&sta->sdata->bss->num_mcast_sta);
+		}
+
 		ieee80211_send_layer2_update(sta);
 	}
 
@@ -1235,6 +1440,7 @@
 	/* now copy the rest of the setup parameters */
 	ifmsh->mesh_id_len = setup->mesh_id_len;
 	memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
+	ifmsh->mesh_sp_id = setup->sync_method;
 	ifmsh->mesh_pp_id = setup->path_sel_proto;
 	ifmsh->mesh_pm_id = setup->path_metric;
 	ifmsh->security = IEEE80211_MESH_SEC_NONE;
@@ -1279,6 +1485,9 @@
 		conf->dot11MeshTTL = nconf->element_ttl;
 	if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
 		conf->auto_open_plinks = nconf->auto_open_plinks;
+	if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
+		conf->dot11MeshNbrOffsetMaxNeighbor =
+			nconf->dot11MeshNbrOffsetMaxNeighbor;
 	if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask))
 		conf->dot11MeshHWMPmaxPREQretries =
 			nconf->dot11MeshHWMPmaxPREQretries;
@@ -1329,6 +1538,11 @@
 			return -ENOTSUPP;
 		conf->rssi_threshold = nconf->rssi_threshold;
 	}
+	if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) {
+		conf->ht_opmode = nconf->ht_opmode;
+		sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode;
+		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+	}
 	return 0;
 }
 
@@ -1437,6 +1651,9 @@
 	if (!local->ops->conf_tx)
 		return -EOPNOTSUPP;
 
+	if (local->hw.queues < IEEE80211_NUM_ACS)
+		return -EOPNOTSUPP;
+
 	memset(&p, 0, sizeof(p));
 	p.aifs = params->aifs;
 	p.cw_max = params->cwmax;
@@ -1449,14 +1666,11 @@
 	 */
 	p.uapsd = false;
 
-	if (params->queue >= local->hw.queues)
-		return -EINVAL;
-
-	sdata->tx_conf[params->queue] = p;
-	if (drv_conf_tx(local, sdata, params->queue, &p)) {
+	sdata->tx_conf[params->ac] = p;
+	if (drv_conf_tx(local, sdata, params->ac, &p)) {
 		wiphy_debug(local->hw.wiphy,
-			    "failed to set TX queue parameters for queue %d\n",
-			    params->queue);
+			    "failed to set TX queue parameters for AC %d\n",
+			    params->ac);
 		return -EINVAL;
 	}
 
@@ -2090,6 +2304,10 @@
 
 	IEEE80211_SKB_CB(skb)->flags = flags;
 
+	if (flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+		IEEE80211_SKB_CB(skb)->hw_queue =
+			local->hw.offchannel_tx_hw_queue;
+
 	skb->dev = sdata->dev;
 
 	*cookie = (unsigned long) skb;
@@ -2131,6 +2349,8 @@
 		/* modify cookie to prevent API mismatches */
 		*cookie ^= 2;
 		IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+		IEEE80211_SKB_CB(skb)->hw_queue =
+			local->hw.offchannel_tx_hw_queue;
 		local->hw_roc_skb = skb;
 		local->hw_roc_skb_for_status = skb;
 		mutex_unlock(&local->mtx);
@@ -2350,8 +2570,8 @@
 		tf->u.setup_req.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(&sdata->vif, skb);
-		ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+		ieee80211_add_srates_ie(&sdata->vif, skb, false);
+		ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	case WLAN_TDLS_SETUP_RESPONSE:
@@ -2364,8 +2584,8 @@
 		tf->u.setup_resp.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(&sdata->vif, skb);
-		ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+		ieee80211_add_srates_ie(&sdata->vif, skb, false);
+		ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	case WLAN_TDLS_SETUP_CONFIRM:
@@ -2425,8 +2645,8 @@
 		mgmt->u.action.u.tdls_discover_resp.capability =
 			cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
 
-		ieee80211_add_srates_ie(&sdata->vif, skb);
-		ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+		ieee80211_add_srates_ie(&sdata->vif, skb, false);
+		ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
 		ieee80211_tdls_add_ext_capab(skb);
 		break;
 	default:
@@ -2666,13 +2886,22 @@
 }
 
 static struct ieee80211_channel *
-ieee80211_wiphy_get_channel(struct wiphy *wiphy)
+ieee80211_wiphy_get_channel(struct wiphy *wiphy,
+			    enum nl80211_channel_type *type)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
 
+	*type = local->_oper_channel_type;
 	return local->oper_channel;
 }
 
+#ifdef CONFIG_PM
+static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+	drv_set_wakeup(wiphy_priv(wiphy), enabled);
+}
+#endif
+
 struct cfg80211_ops mac80211_config_ops = {
 	.add_virtual_intf = ieee80211_add_iface,
 	.del_virtual_intf = ieee80211_del_iface,
@@ -2741,4 +2970,10 @@
 	.probe_client = ieee80211_probe_client,
 	.get_channel = ieee80211_wiphy_get_channel,
 	.set_noack_map = ieee80211_set_noack_map,
+#ifdef CONFIG_PM
+	.set_wakeup = ieee80211_set_wakeup,
+#endif
+	.get_et_sset_count = ieee80211_get_et_sset_count,
+	.get_et_stats = ieee80211_get_et_stats,
+	.get_et_strings = ieee80211_get_et_strings,
 };
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e00ce8c..c76cf72 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -135,29 +135,3 @@
 
 	return result;
 }
-
-/*
- * ieee80211_get_tx_channel_type returns the channel type we should
- * use for packet transmission, given the channel capability and
- * whatever regulatory flags we have been given.
- */
-enum nl80211_channel_type ieee80211_get_tx_channel_type(
-				struct ieee80211_local *local,
-				enum nl80211_channel_type channel_type)
-{
-	switch (channel_type) {
-	case NL80211_CHAN_HT40PLUS:
-		if (local->hw.conf.channel->flags &
-				IEEE80211_CHAN_NO_HT40PLUS)
-			return NL80211_CHAN_HT20;
-		break;
-	case NL80211_CHAN_HT40MINUS:
-		if (local->hw.conf.channel->flags &
-				IEEE80211_CHAN_NO_HT40MINUS)
-			return NL80211_CHAN_HT20;
-		break;
-	default:
-		break;
-	}
-	return channel_type;
-}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 30f99c3..ea0122d 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -394,7 +394,7 @@
 __IEEE80211_IF_FILE_W(uapsd_max_sp_len);
 
 /* AP attributes */
-IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC);
+IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
 IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
 IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
 
@@ -424,6 +424,7 @@
 	struct ieee80211_local *local = sdata->local;
 	unsigned long long tsf;
 	int ret;
+	int tsf_is_delta = 0;
 
 	if (strncmp(buf, "reset", 5) == 0) {
 		if (local->ops->reset_tsf) {
@@ -431,9 +432,20 @@
 			wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
 		}
 	} else {
+		if (buflen > 10 && buf[1] == '=') {
+			if (buf[0] == '+')
+				tsf_is_delta = 1;
+			else if (buf[0] == '-')
+				tsf_is_delta = -1;
+			else
+				return -EINVAL;
+			buf += 2;
+		}
 		ret = kstrtoull(buf, 10, &tsf);
 		if (ret < 0)
 			return -EINVAL;
+		if (tsf_is_delta)
+			tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf;
 		if (local->ops->set_tsf) {
 			drv_set_tsf(local, sdata, tsf);
 			wiphy_info(local->hw.wiphy,
@@ -499,26 +511,23 @@
 IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
 #endif
 
-
-#define DEBUGFS_ADD(name) \
-	debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
-			    sdata, &name##_ops);
-
 #define DEBUGFS_ADD_MODE(name, mode) \
 	debugfs_create_file(#name, mode, sdata->debugfs.dir, \
 			    sdata, &name##_ops);
 
-static void add_sta_files(struct ieee80211_sub_if_data *sdata)
+#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400)
+
+static void add_common_files(struct ieee80211_sub_if_data *sdata)
 {
 	DEBUGFS_ADD(drop_unencrypted);
-	DEBUGFS_ADD(flags);
-	DEBUGFS_ADD(state);
-	DEBUGFS_ADD(channel_type);
 	DEBUGFS_ADD(rc_rateidx_mask_2ghz);
 	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 	DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
 	DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+}
 
+static void add_sta_files(struct ieee80211_sub_if_data *sdata)
+{
 	DEBUGFS_ADD(bssid);
 	DEBUGFS_ADD(aid);
 	DEBUGFS_ADD(last_beacon);
@@ -531,16 +540,7 @@
 
 static void add_ap_files(struct ieee80211_sub_if_data *sdata)
 {
-	DEBUGFS_ADD(drop_unencrypted);
-	DEBUGFS_ADD(flags);
-	DEBUGFS_ADD(state);
-	DEBUGFS_ADD(channel_type);
-	DEBUGFS_ADD(rc_rateidx_mask_2ghz);
-	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
-	DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
-	DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
-
-	DEBUGFS_ADD(num_sta_authorized);
+	DEBUGFS_ADD(num_mcast_sta);
 	DEBUGFS_ADD(num_sta_ps);
 	DEBUGFS_ADD(dtim_count);
 	DEBUGFS_ADD(num_buffered_multicast);
@@ -549,48 +549,14 @@
 
 static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
 {
-	DEBUGFS_ADD(channel_type);
-	DEBUGFS_ADD(rc_rateidx_mask_2ghz);
-	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
-	DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
-	DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
-
 	DEBUGFS_ADD_MODE(tsf, 0600);
 }
 
 static void add_wds_files(struct ieee80211_sub_if_data *sdata)
 {
-	DEBUGFS_ADD(drop_unencrypted);
-	DEBUGFS_ADD(flags);
-	DEBUGFS_ADD(state);
-	DEBUGFS_ADD(channel_type);
-	DEBUGFS_ADD(rc_rateidx_mask_2ghz);
-	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
-	DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
-	DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
-
 	DEBUGFS_ADD(peer);
 }
 
-static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
-{
-	DEBUGFS_ADD(drop_unencrypted);
-	DEBUGFS_ADD(flags);
-	DEBUGFS_ADD(state);
-	DEBUGFS_ADD(channel_type);
-	DEBUGFS_ADD(rc_rateidx_mask_2ghz);
-	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
-	DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
-	DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
-}
-
-static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
-{
-	DEBUGFS_ADD(flags);
-	DEBUGFS_ADD(state);
-	DEBUGFS_ADD(channel_type);
-}
-
 #ifdef CONFIG_MAC80211_MESH
 
 static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
@@ -651,6 +617,13 @@
 	if (!sdata->debugfs.dir)
 		return;
 
+	DEBUGFS_ADD(flags);
+	DEBUGFS_ADD(state);
+	DEBUGFS_ADD(channel_type);
+
+	if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+		add_common_files(sdata);
+
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_MESH_POINT:
 #ifdef CONFIG_MAC80211_MESH
@@ -671,12 +644,6 @@
 	case NL80211_IFTYPE_WDS:
 		add_wds_files(sdata);
 		break;
-	case NL80211_IFTYPE_MONITOR:
-		add_monitor_files(sdata);
-		break;
-	case NL80211_IFTYPE_AP_VLAN:
-		add_vlan_files(sdata);
-		break;
 	default:
 		break;
 	}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 832b2da..5ccec2c 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,7 +63,7 @@
 	test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
 
 	int res = scnprintf(buf, sizeof(buf),
-			    "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+			    "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
 			    TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
 			    TEST(PS_DRIVER), TEST(AUTHORIZED),
 			    TEST(SHORT_PREAMBLE),
@@ -71,7 +71,8 @@
 			    TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
 			    TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
 			    TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
-			    TEST(INSERTED), TEST(RATE_CONTROL));
+			    TEST(INSERTED), TEST(RATE_CONTROL),
+			    TEST(TOFFSET_KNOWN));
 #undef TEST
 	return simple_read_from_buffer(userbuf, count, ppos, buf, res);
 }
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index af4691f..6d33a0c 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -7,7 +7,9 @@
 
 static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
 {
-	WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER));
+	WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
+	     "%s:  Failed check-sdata-in-driver check, flags: 0x%x\n",
+	     sdata->dev->name, sdata->flags);
 }
 
 static inline struct ieee80211_sub_if_data *
@@ -33,6 +35,43 @@
 	local->ops->tx_frags(&local->hw, vif, sta, skbs);
 }
 
+static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
+				      u32 sset, u8 *data)
+{
+	struct ieee80211_local *local = sdata->local;
+	if (local->ops->get_et_strings) {
+		trace_drv_get_et_strings(local, sset);
+		local->ops->get_et_strings(&local->hw, &sdata->vif, sset, data);
+		trace_drv_return_void(local);
+	}
+}
+
+static inline void drv_get_et_stats(struct ieee80211_sub_if_data *sdata,
+				    struct ethtool_stats *stats,
+				    u64 *data)
+{
+	struct ieee80211_local *local = sdata->local;
+	if (local->ops->get_et_stats) {
+		trace_drv_get_et_stats(local);
+		local->ops->get_et_stats(&local->hw, &sdata->vif, stats, data);
+		trace_drv_return_void(local);
+	}
+}
+
+static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata,
+					int sset)
+{
+	struct ieee80211_local *local = sdata->local;
+	int rv = 0;
+	if (local->ops->get_et_sset_count) {
+		trace_drv_get_et_sset_count(local, sset);
+		rv = local->ops->get_et_sset_count(&local->hw, &sdata->vif,
+						   sset);
+		trace_drv_return_int(local, rv);
+	}
+	return rv;
+}
+
 static inline int drv_start(struct ieee80211_local *local)
 {
 	int ret;
@@ -89,6 +128,19 @@
 	trace_drv_return_int(local, ret);
 	return ret;
 }
+
+static inline void drv_set_wakeup(struct ieee80211_local *local,
+				  bool enabled)
+{
+	might_sleep();
+
+	if (!local->ops->set_wakeup)
+		return;
+
+	trace_drv_set_wakeup(local, enabled);
+	local->ops->set_wakeup(&local->hw, enabled);
+	trace_drv_return_void(local);
+}
 #endif
 
 static inline int drv_add_interface(struct ieee80211_local *local,
@@ -99,7 +151,8 @@
 	might_sleep();
 
 	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
-		    sdata->vif.type == NL80211_IFTYPE_MONITOR))
+		    (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+		     !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))))
 		return -EINVAL;
 
 	trace_drv_add_interface(local, sdata);
@@ -474,8 +527,23 @@
 	return ret;
 }
 
+static inline void drv_sta_rc_update(struct ieee80211_local *local,
+				     struct ieee80211_sub_if_data *sdata,
+				     struct ieee80211_sta *sta, u32 changed)
+{
+	sdata = get_bss_sdata(sdata);
+	check_sdata_in_driver(sdata);
+
+	trace_drv_sta_rc_update(local, sdata, sta, changed);
+	if (local->ops->sta_rc_update)
+		local->ops->sta_rc_update(&local->hw, &sdata->vif,
+					  sta, changed);
+
+	trace_drv_return_void(local);
+}
+
 static inline int drv_conf_tx(struct ieee80211_local *local,
-			      struct ieee80211_sub_if_data *sdata, u16 queue,
+			      struct ieee80211_sub_if_data *sdata, u16 ac,
 			      const struct ieee80211_tx_queue_params *params)
 {
 	int ret = -EOPNOTSUPP;
@@ -484,10 +552,10 @@
 
 	check_sdata_in_driver(sdata);
 
-	trace_drv_conf_tx(local, sdata, queue, params);
+	trace_drv_conf_tx(local, sdata, ac, params);
 	if (local->ops->conf_tx)
 		ret = local->ops->conf_tx(&local->hw, &sdata->vif,
-					  queue, params);
+					  ac, params);
 	trace_drv_return_int(local, ret);
 	return ret;
 }
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 21d6f52..6de00b2 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -161,6 +161,21 @@
 	TP_ARGS(local)
 );
 
+DEFINE_EVENT(local_u32_evt, drv_get_et_strings,
+	     TP_PROTO(struct ieee80211_local *local, u32 sset),
+	     TP_ARGS(local, sset)
+);
+
+DEFINE_EVENT(local_u32_evt, drv_get_et_sset_count,
+	     TP_PROTO(struct ieee80211_local *local, u32 sset),
+	     TP_ARGS(local, sset)
+);
+
+DEFINE_EVENT(local_only_evt, drv_get_et_stats,
+	     TP_PROTO(struct ieee80211_local *local),
+	     TP_ARGS(local)
+);
+
 DEFINE_EVENT(local_only_evt, drv_suspend,
 	TP_PROTO(struct ieee80211_local *local),
 	TP_ARGS(local)
@@ -171,6 +186,20 @@
 	TP_ARGS(local)
 );
 
+TRACE_EVENT(drv_set_wakeup,
+	TP_PROTO(struct ieee80211_local *local, bool enabled),
+	TP_ARGS(local, enabled),
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(bool, enabled)
+	),
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->enabled = enabled;
+	),
+	TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled)
+);
+
 DEFINE_EVENT(local_only_evt, drv_stop,
 	TP_PROTO(struct ieee80211_local *local),
 	TP_ARGS(local)
@@ -624,6 +653,34 @@
 	)
 );
 
+TRACE_EVENT(drv_sta_rc_update,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct ieee80211_sta *sta,
+		 u32 changed),
+
+	TP_ARGS(local, sdata, sta, changed),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		VIF_ENTRY
+		STA_ENTRY
+		__field(u32, changed)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		VIF_ASSIGN;
+		STA_ASSIGN;
+		__entry->changed = changed;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT " changed: 0x%x",
+		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed
+	)
+);
+
 TRACE_EVENT(drv_sta_add,
 	TP_PROTO(struct ieee80211_local *local,
 		 struct ieee80211_sub_if_data *sdata,
@@ -677,15 +734,14 @@
 TRACE_EVENT(drv_conf_tx,
 	TP_PROTO(struct ieee80211_local *local,
 		 struct ieee80211_sub_if_data *sdata,
-		 u16 queue,
-		 const struct ieee80211_tx_queue_params *params),
+		 u16 ac, const struct ieee80211_tx_queue_params *params),
 
-	TP_ARGS(local, sdata, queue, params),
+	TP_ARGS(local, sdata, ac, params),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		VIF_ENTRY
-		__field(u16, queue)
+		__field(u16, ac)
 		__field(u16, txop)
 		__field(u16, cw_min)
 		__field(u16, cw_max)
@@ -696,7 +752,7 @@
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
-		__entry->queue = queue;
+		__entry->ac = ac;
 		__entry->txop = params->txop;
 		__entry->cw_max = params->cw_max;
 		__entry->cw_min = params->cw_min;
@@ -705,8 +761,8 @@
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT  VIF_PR_FMT  " queue:%d",
-		LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue
+		LOCAL_PR_FMT  VIF_PR_FMT  " AC:%d",
+		LOCAL_PR_ARG, VIF_PR_ARG, __entry->ac
 	)
 );
 
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f25fff7..6f8615c 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,15 +19,6 @@
 #include "ieee80211_i.h"
 #include "rate.h"
 
-bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
-{
-	const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-	if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) &&
-	    !(sdata->u.mgd.ht_capa.cap_info & flg))
-		return true;
-	return false;
-}
-
 static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
 				  struct ieee80211_sta_ht_cap *ht_cap,
 				  u16 flag)
@@ -315,10 +306,10 @@
 	initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
-	if (net_ratelimit())
-		printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n",
-			mgmt->sa, initiator ? "initiator" : "recipient", tid,
-			le16_to_cpu(mgmt->u.action.u.delba.reason_code));
+	net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n",
+			    mgmt->sa, initiator ? "initiator" : "recipient",
+			    tid,
+			    le16_to_cpu(mgmt->u.action.u.delba.reason_code));
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
 	if (initiator == WLAN_BACK_INITIATOR)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index cef7c29..3ad33a8 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,7 +66,7 @@
 	skb_reset_tail_pointer(skb);
 	skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
 
-	if (compare_ether_addr(ifibss->bssid, bssid))
+	if (!ether_addr_equal(ifibss->bssid, bssid))
 		sta_info_flush(sdata->local, sdata);
 
 	/* if merging, indicate to driver that we leave the old IBSS */
@@ -160,16 +160,14 @@
 	if (channel_type && sband->ht_cap.ht_supported) {
 		pos = skb_put(skb, 4 +
 				   sizeof(struct ieee80211_ht_cap) +
-				   sizeof(struct ieee80211_ht_info));
+				   sizeof(struct ieee80211_ht_operation));
 		pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
 						sband->ht_cap.cap);
-		pos = ieee80211_ie_build_ht_info(pos,
-						 &sband->ht_cap,
-						 chan,
-						 channel_type);
+		pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
+						 chan, channel_type, 0);
 	}
 
-	if (local->hw.queues >= 4) {
+	if (local->hw.queues >= IEEE80211_NUM_ACS) {
 		pos = skb_put(skb, 9);
 		*pos++ = WLAN_EID_VENDOR_SPECIFIC;
 		*pos++ = 7; /* len */
@@ -305,9 +303,8 @@
 	 * 	allow new one to be added.
 	 */
 	if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
-		if (net_ratelimit())
-			printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
-			       sdata->name, addr);
+		net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+				    sdata->name, addr);
 		rcu_read_lock();
 		return NULL;
 	}
@@ -317,7 +314,7 @@
 		return NULL;
 	}
 
-	if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) {
+	if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) {
 		rcu_read_lock();
 		return NULL;
 	}
@@ -403,14 +400,14 @@
 		return;
 
 	if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
-	    compare_ether_addr(mgmt->bssid, sdata->u.ibss.bssid) == 0) {
+	    ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) {
 
 		rcu_read_lock();
 		sta = sta_info_get(sdata, mgmt->sa);
 
 		if (elems->supp_rates) {
 			supp_rates = ieee80211_sta_get_rates(local, elems,
-							     band);
+							     band, NULL);
 			if (sta) {
 				u32 prev_rates;
 
@@ -441,13 +438,13 @@
 		if (sta && elems->wmm_info)
 			set_sta_flag(sta, WLAN_STA_WME);
 
-		if (sta && elems->ht_info_elem && elems->ht_cap_elem &&
+		if (sta && elems->ht_operation && elems->ht_cap_elem &&
 		    sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
 			/* we both use HT */
 			struct ieee80211_sta_ht_cap sta_ht_cap_new;
 			enum nl80211_channel_type channel_type =
-				ieee80211_ht_info_to_channel_type(
-							elems->ht_info_elem);
+				ieee80211_ht_oper_to_channel_type(
+							elems->ht_operation);
 
 			ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 							  elems->ht_cap_elem,
@@ -508,7 +505,7 @@
 		goto put_bss;
 
 	/* same BSSID */
-	if (compare_ether_addr(cbss->bssid, sdata->u.ibss.bssid) == 0)
+	if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
 		goto put_bss;
 
 	if (rx_status->flag & RX_FLAG_MACTIME_MPDU) {
@@ -560,7 +557,7 @@
 		       sdata->name, mgmt->bssid);
 #endif
 		ieee80211_sta_join_ibss(sdata, bss);
-		supp_rates = ieee80211_sta_get_rates(local, elems, band);
+		supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
 		ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
 				       supp_rates, true);
 		rcu_read_unlock();
@@ -584,16 +581,15 @@
 	 * 	allow new one to be added.
 	 */
 	if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
-		if (net_ratelimit())
-			printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
-			       sdata->name, addr);
+		net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+				    sdata->name, addr);
 		return;
 	}
 
 	if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
 		return;
 
-	if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
+	if (!ether_addr_equal(bssid, sdata->u.ibss.bssid))
 		return;
 
 	sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -831,7 +827,7 @@
 	if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
 		return;
 
-	if (compare_ether_addr(mgmt->bssid, ifibss->bssid) != 0 &&
+	if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) &&
 	    !is_broadcast_ether_addr(mgmt->bssid))
 		return;
 
@@ -1063,7 +1059,7 @@
 			    4 /* IBSS params */ +
 			    2 + (IEEE80211_MAX_SUPP_RATES - 8) +
 			    2 + sizeof(struct ieee80211_ht_cap) +
-			    2 + sizeof(struct ieee80211_ht_info) +
+			    2 + sizeof(struct ieee80211_ht_operation) +
 			    params->ie_len);
 	if (!skb)
 		return -ENOMEM;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d9798a3..3f3cd50 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -52,7 +52,8 @@
  * increased memory use (about 2 kB of RAM per entry). */
 #define IEEE80211_FRAGMENT_MAX 4
 
-#define TU_TO_EXP_TIME(x)	(jiffies + usecs_to_jiffies((x) * 1024))
+#define TU_TO_JIFFIES(x)	(usecs_to_jiffies((x) * 1024))
+#define TU_TO_EXP_TIME(x)	(jiffies + TU_TO_JIFFIES(x))
 
 #define IEEE80211_DEFAULT_UAPSD_QUEUES \
 	(IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |	\
@@ -281,7 +282,7 @@
 	u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
 	struct sk_buff_head ps_bc_buf;
 	atomic_t num_sta_ps; /* number of stations in PS mode */
-	atomic_t num_sta_authorized; /* number of authorized stations */
+	atomic_t num_mcast_sta; /* number of stations receiving multicast */
 	int dtim_count;
 	bool dtim_bc_mc;
 };
@@ -378,6 +379,7 @@
 	IEEE80211_STA_UAPSD_ENABLED	= BIT(7),
 	IEEE80211_STA_NULLFUNC_ACKED	= BIT(8),
 	IEEE80211_STA_RESET_SIGNAL_AVE	= BIT(9),
+	IEEE80211_STA_DISABLE_40MHZ	= BIT(10),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -397,7 +399,7 @@
 struct ieee80211_mgd_assoc_data {
 	struct cfg80211_bss *bss;
 	const u8 *supp_rates;
-	const u8 *ht_information_ie;
+	const u8 *ht_operation_ie;
 
 	unsigned long timeout;
 	int tries;
@@ -552,6 +554,24 @@
 	} state;
 };
 
+/**
+ * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface
+ *
+ * these declarations define the interface, which enables
+ * vendor-specific mesh synchronization
+ *
+ */
+struct ieee802_11_elems;
+struct ieee80211_mesh_sync_ops {
+	void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata,
+			     u16 stype,
+			     struct ieee80211_mgmt *mgmt,
+			     struct ieee802_11_elems *elems,
+			     struct ieee80211_rx_status *rx_status);
+	void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata);
+	/* add other framework functions here */
+};
+
 struct ieee80211_if_mesh {
 	struct timer_list housekeeping_timer;
 	struct timer_list mesh_path_timer;
@@ -600,6 +620,11 @@
 		IEEE80211_MESH_SEC_AUTHED = 0x1,
 		IEEE80211_MESH_SEC_SECURED = 0x2,
 	} security;
+	/* Extensible Synchronization Framework */
+	struct ieee80211_mesh_sync_ops *sync_ops;
+	s64 sync_offset_clockdrift_max;
+	spinlock_t sync_offset_lock;
+	bool adjusting_tbtt;
 };
 
 #ifdef CONFIG_MAC80211_MESH
@@ -666,12 +691,6 @@
 
 	char name[IFNAMSIZ];
 
-	/*
-	 * keep track of whether the HT opmode (stored in
-	 * vif.bss_info.ht_operation_mode) is valid.
-	 */
-	bool ht_opmode_valid;
-
 	/* to detect idle changes */
 	bool old_idle;
 
@@ -691,7 +710,7 @@
 	__be16 control_port_protocol;
 	bool control_port_no_encrypt;
 
-	struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
+	struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
 
 	struct work_struct work;
 	struct sk_buff_head skb_queue;
@@ -761,7 +780,6 @@
 	IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
 	IEEE80211_QUEUE_STOP_REASON_SUSPEND,
 	IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
-	IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
 };
 
 #ifdef CONFIG_MAC80211_LEDS
@@ -785,6 +803,8 @@
  *	well be on the operating channel
  * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
  *	determine if we are on the operating channel or not
+ * @SCAN_ONCHANNEL_SCANNING:  Do a software scan on only the current operating
+ *	channel. This should not interrupt normal traffic.
  * @SCAN_COMPLETED: Set for our scan work function when the driver reported
  *	that the scan completed.
  * @SCAN_ABORTED: Set for our scan work function when the driver reported
@@ -793,6 +813,7 @@
 enum {
 	SCAN_SW_SCANNING,
 	SCAN_HW_SCANNING,
+	SCAN_ONCHANNEL_SCANNING,
 	SCAN_COMPLETED,
 	SCAN_ABORTED,
 };
@@ -1082,6 +1103,9 @@
 	struct net_device napi_dev;
 
 	struct napi_struct napi;
+
+	/* virtual monitor interface */
+	struct ieee80211_sub_if_data __rcu *monitor_sdata;
 };
 
 static inline struct ieee80211_sub_if_data *
@@ -1117,7 +1141,7 @@
 	u8 *wmm_info;
 	u8 *wmm_param;
 	struct ieee80211_ht_cap *ht_cap_elem;
-	struct ieee80211_ht_info *ht_info_elem;
+	struct ieee80211_ht_operation *ht_operation;
 	struct ieee80211_meshconf_ie *mesh_config;
 	u8 *mesh_id;
 	u8 *peering;
@@ -1171,7 +1195,7 @@
 
 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
 {
-	return compare_ether_addr(raddr, addr) == 0 ||
+	return ether_addr_equal(raddr, addr) ||
 	       is_broadcast_ether_addr(raddr);
 }
 
@@ -1210,7 +1234,7 @@
 				  struct sk_buff *skb);
 void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
-void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
 
 /* IBSS code */
 void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1239,6 +1263,7 @@
 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
 			   struct cfg80211_scan_request *req);
 void ieee80211_scan_cancel(struct ieee80211_local *local);
+void ieee80211_run_deferred_scan(struct ieee80211_local *local);
 ieee80211_rx_result
 ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
 
@@ -1251,9 +1276,6 @@
 			  struct ieee802_11_elems *elems,
 			  struct ieee80211_channel *channel,
 			  bool beacon);
-struct ieee80211_bss *
-ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
-		     u8 *ssid, u8 ssid_len);
 void ieee80211_rx_bss_put(struct ieee80211_local *local,
 			  struct ieee80211_bss *bss);
 
@@ -1299,7 +1321,6 @@
 				       struct net_device *dev);
 
 /* HT */
-bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata);
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_sta_ht_cap *ht_cap);
 void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
@@ -1383,7 +1404,7 @@
 extern void *mac80211_wiphy_privid; /* for wiphy privid */
 u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
 			enum nl80211_iftype type);
-int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
+int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
 			     int rate, int erp, int short_preamble);
 void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
 				     struct ieee80211_hdr *hdr, const u8 *tsc,
@@ -1429,13 +1450,17 @@
 				    enum queue_stop_reason reason);
 void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
 				    enum queue_stop_reason reason);
+void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
 void ieee80211_add_pending_skb(struct ieee80211_local *local,
 			       struct sk_buff *skb);
-void ieee80211_add_pending_skbs(struct ieee80211_local *local,
-				struct sk_buff_head *skbs);
 void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
 				   struct sk_buff_head *skbs,
 				   void (*fn)(void *data), void *data);
+static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+					      struct sk_buff_head *skbs)
+{
+	ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
+}
 
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
 			 u16 transaction, u16 auth_alg,
@@ -1460,7 +1485,7 @@
 				  const u8 *supp_rates);
 u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
 			    struct ieee802_11_elems *elems,
-			    enum ieee80211_band band);
+			    enum ieee80211_band band, u32 *basic_rates);
 int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
 			     enum ieee80211_smps_mode smps_mode);
 void ieee80211_recalc_smps(struct ieee80211_local *local);
@@ -1470,10 +1495,10 @@
 size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
 u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
 			      u16 cap);
-u8 *ieee80211_ie_build_ht_info(u8 *pos,
-				struct ieee80211_sta_ht_cap *ht_cap,
-				struct ieee80211_channel *channel,
-				enum nl80211_channel_type channel_type);
+u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
+			       struct ieee80211_channel *channel,
+			       enum nl80211_channel_type channel_type,
+			       u16 prot_mode);
 
 /* internal work items */
 void ieee80211_work_init(struct ieee80211_local *local);
@@ -1501,10 +1526,7 @@
 				struct ieee80211_sub_if_data *sdata,
 				enum nl80211_channel_type chantype);
 enum nl80211_channel_type
-ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info);
-enum nl80211_channel_type ieee80211_get_tx_channel_type(
-					struct ieee80211_local *local,
-					enum nl80211_channel_type channel_type);
+ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper);
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 401c01f..856237c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -127,7 +127,7 @@
 			 * The remaining checks are only performed for interfaces
 			 * with the same MAC address.
 			 */
-			if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
+			if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr))
 				continue;
 
 			/*
@@ -149,6 +149,35 @@
 	return 0;
 }
 
+static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
+{
+	int n_queues = sdata->local->hw.queues;
+	int i;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
+				 IEEE80211_INVAL_HW_QUEUE))
+			return -EINVAL;
+		if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
+				 n_queues))
+			return -EINVAL;
+	}
+
+	if ((sdata->vif.type != NL80211_IFTYPE_AP) ||
+	    !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
+		sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
+		return 0;
+	}
+
+	if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE))
+		return -EINVAL;
+
+	if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues))
+		return -EINVAL;
+
+	return 0;
+}
+
 void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
 				    const int offset)
 {
@@ -169,6 +198,81 @@
 #undef ADJUST
 }
 
+static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	int i;
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+			sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
+		else
+			sdata->vif.hw_queue[i] = i;
+	}
+	sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
+}
+
+static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
+{
+	struct ieee80211_sub_if_data *sdata;
+	int ret;
+
+	if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
+		return 0;
+
+	if (local->monitor_sdata)
+		return 0;
+
+	sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
+	if (!sdata)
+		return -ENOMEM;
+
+	/* set up data */
+	sdata->local = local;
+	sdata->vif.type = NL80211_IFTYPE_MONITOR;
+	snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
+		 wiphy_name(local->hw.wiphy));
+
+	ieee80211_set_default_queues(sdata);
+
+	ret = drv_add_interface(local, sdata);
+	if (WARN_ON(ret)) {
+		/* ok .. stupid driver, it asked for this! */
+		kfree(sdata);
+		return ret;
+	}
+
+	ret = ieee80211_check_queues(sdata);
+	if (ret) {
+		kfree(sdata);
+		return ret;
+	}
+
+	rcu_assign_pointer(local->monitor_sdata, sdata);
+
+	return 0;
+}
+
+static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
+{
+	struct ieee80211_sub_if_data *sdata;
+
+	if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
+		return;
+
+	sdata = rtnl_dereference(local->monitor_sdata);
+
+	if (!sdata)
+		return;
+
+	rcu_assign_pointer(local->monitor_sdata, NULL);
+	synchronize_net();
+
+	drv_remove_interface(local, sdata);
+
+	kfree(sdata);
+}
+
 /*
  * NOTE: Be very careful when changing this function, it must NOT return
  * an error on interface type changes that have been pre-checked, so most
@@ -246,15 +350,18 @@
 		memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
 
 		if (!is_valid_ether_addr(dev->dev_addr)) {
-			if (!local->open_count)
-				drv_stop(local);
-			return -EADDRNOTAVAIL;
+			res = -EADDRNOTAVAIL;
+			goto err_stop;
 		}
 	}
 
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP_VLAN:
-		/* no need to tell driver */
+		/* no need to tell driver, but set carrier */
+		if (rtnl_dereference(sdata->bss->beacon))
+			netif_carrier_on(dev);
+		else
+			netif_carrier_off(dev);
 		break;
 	case NL80211_IFTYPE_MONITOR:
 		if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -262,6 +369,12 @@
 			break;
 		}
 
+		if (local->monitors == 0 && local->open_count == 0) {
+			res = ieee80211_add_virtual_monitor(local);
+			if (res)
+				goto err_stop;
+		}
+
 		/* must be before the call to ieee80211_configure_filter */
 		local->monitors++;
 		if (local->monitors == 1) {
@@ -276,9 +389,14 @@
 		break;
 	default:
 		if (coming_up) {
+			ieee80211_del_virtual_monitor(local);
+
 			res = drv_add_interface(local, sdata);
 			if (res)
 				goto err_stop;
+			res = ieee80211_check_queues(sdata);
+			if (res)
+				goto err_del_interface;
 		}
 
 		if (sdata->vif.type == NL80211_IFTYPE_AP) {
@@ -294,7 +412,8 @@
 		ieee80211_bss_info_change_notify(sdata, changed);
 
 		if (sdata->vif.type == NL80211_IFTYPE_STATION ||
-		    sdata->vif.type == NL80211_IFTYPE_ADHOC)
+		    sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+		    sdata->vif.type == NL80211_IFTYPE_AP)
 			netif_carrier_off(dev);
 		else
 			netif_carrier_on(dev);
@@ -366,6 +485,7 @@
 	sdata->bss = NULL;
 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 		list_del(&sdata->u.vlan.list);
+	/* might already be clear but that doesn't matter */
 	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 	return res;
 }
@@ -486,6 +606,8 @@
 		/* free all potentially still buffered bcast frames */
 		local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
 		skb_queue_purge(&sdata->u.ap.ps_bc_buf);
+	} else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+		ieee80211_mgd_stop(sdata);
 	}
 
 	if (going_down)
@@ -506,6 +628,7 @@
 		if (local->monitors == 0) {
 			local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
 			hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
+			ieee80211_del_virtual_monitor(local);
 		}
 
 		ieee80211_adjust_monitor_flags(sdata, -1);
@@ -579,6 +702,9 @@
 		}
 	}
 	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+	if (local->monitors == local->open_count && local->monitors > 0)
+		ieee80211_add_virtual_monitor(local);
 }
 
 static int ieee80211_stop(struct net_device *dev)
@@ -644,8 +770,6 @@
 
 	if (ieee80211_vif_is_mesh(&sdata->vif))
 		mesh_rmc_free(sdata);
-	else if (sdata->vif.type == NL80211_IFTYPE_STATION)
-		ieee80211_mgd_teardown(sdata);
 
 	flushed = sta_info_flush(local, sdata);
 	WARN_ON(flushed);
@@ -676,7 +800,7 @@
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_radiotap_header *rtap = (void *)skb->data;
 
-	if (local->hw.queues < 4)
+	if (local->hw.queues < IEEE80211_NUM_ACS)
 		return 0;
 
 	if (skb->len < 4 ||
@@ -907,6 +1031,18 @@
 	ieee80211_debugfs_add_netdev(sdata);
 }
 
+static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
+{
+	switch (sdata->vif.type) {
+	case NL80211_IFTYPE_MESH_POINT:
+		mesh_path_flush_by_iface(sdata);
+		break;
+
+	default:
+		break;
+	}
+}
+
 static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
 					   enum nl80211_iftype type)
 {
@@ -970,6 +1106,13 @@
 	if (ret)
 		type = sdata->vif.type;
 
+	/*
+	 * Ignore return value here, there's not much we can do since
+	 * the driver changed the interface type internally already.
+	 * The warnings will hopefully make driver authors fix it :-)
+	 */
+	ieee80211_check_queues(sdata);
+
 	ieee80211_setup_sdata(sdata, type);
 
 	err = ieee80211_do_open(sdata->dev, false);
@@ -1133,11 +1276,15 @@
 	struct net_device *ndev;
 	struct ieee80211_sub_if_data *sdata = NULL;
 	int ret, i;
+	int txqs = 1;
 
 	ASSERT_RTNL();
 
+	if (local->hw.queues >= IEEE80211_NUM_ACS)
+		txqs = IEEE80211_NUM_ACS;
+
 	ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
-				name, ieee80211_if_setup, local->hw.queues, 1);
+				name, ieee80211_if_setup, txqs, 1);
 	if (!ndev)
 		return -ENOMEM;
 	dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -1192,6 +1339,8 @@
 			       sizeof(sdata->rc_rateidx_mcs_mask[i]));
 	}
 
+	ieee80211_set_default_queues(sdata);
+
 	/* setup type-dependent data */
 	ieee80211_setup_sdata(sdata, type);
 
@@ -1227,8 +1376,8 @@
 	list_del_rcu(&sdata->list);
 	mutex_unlock(&sdata->local->iflist_mtx);
 
-	if (ieee80211_vif_is_mesh(&sdata->vif))
-		mesh_path_flush_by_iface(sdata);
+	/* clean up type-dependent data */
+	ieee80211_clean_sdata(sdata);
 
 	synchronize_rcu();
 	unregister_netdevice(sdata->dev);
@@ -1249,8 +1398,7 @@
 	list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
 		list_del(&sdata->list);
 
-		if (ieee80211_vif_is_mesh(&sdata->vif))
-			mesh_path_flush_by_iface(sdata);
+		ieee80211_clean_sdata(sdata);
 
 		unregister_netdevice_queue(sdata->dev, &unreg_list);
 	}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1633648..b70f7f0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -47,7 +47,8 @@
 	if (atomic_read(&local->iff_allmultis))
 		new_flags |= FIF_ALLMULTI;
 
-	if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning))
+	if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+	    test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning))
 		new_flags |= FIF_BCN_PRBRESP_PROMISC;
 
 	if (local->fif_probe_req || local->probe_req_reg)
@@ -148,6 +149,7 @@
 	}
 
 	if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+	    test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
 	    test_bit(SCAN_HW_SCANNING, &local->scanning))
 		power = chan->max_power;
 	else
@@ -557,8 +559,10 @@
 			WIPHY_FLAG_4ADDR_AP |
 			WIPHY_FLAG_4ADDR_STATION |
 			WIPHY_FLAG_REPORTS_OBSS |
-			WIPHY_FLAG_OFFCHAN_TX |
-			WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+			WIPHY_FLAG_OFFCHAN_TX;
+
+	if (ops->remain_on_channel)
+		wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
 	wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
 			  NL80211_FEATURE_HT_IBSS;
@@ -589,6 +593,7 @@
 	local->hw.max_report_rates = 0;
 	local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
 	local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+	local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
 	local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
 	local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
 	local->user_power_level = -1;
@@ -685,6 +690,11 @@
 		WLAN_CIPHER_SUITE_AES_CMAC
 	};
 
+	if (hw->flags & IEEE80211_HW_QUEUE_CONTROL &&
+	    (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE ||
+	     local->hw.offchannel_tx_hw_queue >= local->hw.queues))
+		return -EINVAL;
+
 	if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
 #ifdef CONFIG_PM
 	    && (!local->ops->suspend || !local->ops->resume)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index e5fbb7c..0675a2f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,9 +13,6 @@
 #include "ieee80211_i.h"
 #include "mesh.h"
 
-#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
-#define MESHCONF_CAPAB_FORWARDING    0x08
-
 #define TMR_RUNNING_HK	0
 #define TMR_RUNNING_MP	1
 #define TMR_RUNNING_MPR	2
@@ -67,16 +64,19 @@
 /**
  * mesh_matches_local - check if the config of a mesh point matches ours
  *
- * @ie: information elements of a management frame from the mesh peer
  * @sdata: local mesh subif
+ * @ie: information elements of a management frame from the mesh peer
  *
  * This function checks if the mesh configuration of a mesh point matches the
  * local mesh configuration, i.e. if both nodes belong to the same mesh network.
  */
-bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
+bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
+			struct ieee802_11_elems *ie)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct ieee80211_local *local = sdata->local;
+	u32 basic_rates = 0;
+	enum nl80211_channel_type sta_channel_type = NL80211_CHAN_NO_HT;
 
 	/*
 	 * As support for each feature is added, check for matching
@@ -97,10 +97,21 @@
 	     (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
 		goto mismatch;
 
-	/* disallow peering with mismatched channel types for now */
-	if (ie->ht_info_elem &&
-	    (local->_oper_channel_type !=
-	     ieee80211_ht_info_to_channel_type(ie->ht_info_elem)))
+	ieee80211_sta_get_rates(local, ie, local->oper_channel->band,
+				&basic_rates);
+
+	if (sdata->vif.bss_conf.basic_rates != basic_rates)
+		goto mismatch;
+
+	if (ie->ht_operation)
+		sta_channel_type =
+			ieee80211_ht_oper_to_channel_type(ie->ht_operation);
+
+	/* Disallow HT40+/- mismatch */
+	if (ie->ht_operation &&
+	    local->_oper_channel_type > NL80211_CHAN_HT20 &&
+	    sta_channel_type > NL80211_CHAN_HT20 &&
+	    local->_oper_channel_type != sta_channel_type)
 		goto mismatch;
 
 	return true;
@@ -204,7 +215,7 @@
 			kmem_cache_free(rm_cache, p);
 			--entries;
 		} else if ((seqnum == p->seqnum) &&
-			   (compare_ether_addr(sa, p->sa) == 0))
+			   (ether_addr_equal(sa, p->sa)))
 			return -1;
 	}
 
@@ -251,8 +262,10 @@
 	/* Mesh capability */
 	ifmsh->accepting_plinks = mesh_plink_availables(sdata);
 	*pos = MESHCONF_CAPAB_FORWARDING;
-	*pos++ |= ifmsh->accepting_plinks ?
+	*pos |= ifmsh->accepting_plinks ?
 	    MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
+	*pos++ |= ifmsh->adjusting_tbtt ?
+	    MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
 	*pos++ = 0x00;
 
 	return 0;
@@ -371,7 +384,7 @@
 	return 0;
 }
 
-int mesh_add_ht_info_ie(struct sk_buff *skb,
+int mesh_add_ht_oper_ie(struct sk_buff *skb,
 			struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
@@ -385,11 +398,12 @@
 	if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
 		return 0;
 
-	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info))
+	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
 		return -ENOMEM;
 
-	pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info));
-	ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type);
+	pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
+	ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type,
+				   sdata->vif.bss_conf.ht_operation_mode);
 
 	return 0;
 }
@@ -573,14 +587,24 @@
 	ieee80211_configure_filter(local);
 
 	ifmsh->mesh_cc_id = 0;	/* Disabled */
-	ifmsh->mesh_sp_id = 0;	/* Neighbor Offset */
 	ifmsh->mesh_auth_id = 0;	/* Disabled */
+	/* register sync ops from extensible synchronization framework */
+	ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
+	ifmsh->adjusting_tbtt = false;
+	ifmsh->sync_offset_clockdrift_max = 0;
 	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
 	ieee80211_mesh_root_setup(ifmsh);
 	ieee80211_queue_work(&local->hw, &sdata->work);
+	sdata->vif.bss_conf.ht_operation_mode =
+				ifmsh->mshcfg.ht_opmode;
 	sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
+	sdata->vif.bss_conf.basic_rates =
+		ieee80211_mandatory_rates(sdata->local,
+					  sdata->local->hw.conf.channel->band);
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
 						BSS_CHANGED_BEACON_ENABLED |
+						BSS_CHANGED_HT |
+						BSS_CHANGED_BASIC_RATES |
 						BSS_CHANGED_BEACON_INT);
 }
 
@@ -616,16 +640,16 @@
 					struct ieee80211_rx_status *rx_status)
 {
 	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct ieee802_11_elems elems;
 	struct ieee80211_channel *channel;
-	u32 supp_rates = 0;
 	size_t baselen;
 	int freq;
 	enum ieee80211_band band = rx_status->band;
 
 	/* ignore ProbeResp to foreign address */
 	if (stype == IEEE80211_STYPE_PROBE_RESP &&
-	    compare_ether_addr(mgmt->da, sdata->vif.addr))
+	    !ether_addr_equal(mgmt->da, sdata->vif.addr))
 		return;
 
 	baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -650,10 +674,12 @@
 		return;
 
 	if (elems.mesh_id && elems.mesh_config &&
-	    mesh_matches_local(&elems, sdata)) {
-		supp_rates = ieee80211_sta_get_rates(local, &elems, band);
-		mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems);
-	}
+	    mesh_matches_local(sdata, &elems))
+		mesh_neighbour_update(sdata, mgmt->sa, &elems);
+
+	if (ifmsh->sync_ops)
+		ifmsh->sync_ops->rx_bcn_presp(sdata,
+			stype, mgmt, &elems, rx_status);
 }
 
 static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
@@ -721,6 +747,9 @@
 
 	if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags))
 		ieee80211_mesh_rootpath(sdata);
+
+	if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
+		mesh_sync_adjust_tbtt(sdata);
 }
 
 void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
@@ -761,4 +790,5 @@
 		    (unsigned long) sdata);
 	INIT_LIST_HEAD(&ifmsh->preq_queue.list);
 	spin_lock_init(&ifmsh->mesh_preq_queue_lock);
+	spin_lock_init(&ifmsh->sync_offset_lock);
 }
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 8d53b71..e364275 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -19,6 +19,20 @@
 /* Data structures */
 
 /**
+ * enum mesh_config_capab_flags - mesh config IE capability flags
+ *
+ * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
+ * additional mesh peerings with other mesh STAs
+ * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
+ * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing
+ */
+enum mesh_config_capab_flags {
+	MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0),
+	MESHCONF_CAPAB_FORWARDING = BIT(3),
+	MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5),
+};
+
+/**
  * enum mesh_path_flags - mac80211 mesh path flags
  *
  *
@@ -56,12 +70,15 @@
  * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
  * grow
  * @MESH_WORK_ROOT: the mesh root station needs to send a frame
+ * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
+ * mesh nodes
  */
 enum mesh_deferred_task_flags {
 	MESH_WORK_HOUSEKEEPING,
 	MESH_WORK_GROW_MPATH_TABLE,
 	MESH_WORK_GROW_MPP_TABLE,
 	MESH_WORK_ROOT,
+	MESH_WORK_DRIFT_ADJUST,
 };
 
 /**
@@ -86,6 +103,7 @@
  * mpath itself.  No need to take this lock when adding or removing
  * an mpath to a hash bucket on a path table.
  * @rann_snd_addr: the RANN sender address
+ * @rann_metric: the aggregated path metric towards the root node
  * @is_root: the destination station of this path is a root node
  * @is_gate: the destination station of this path is a mesh gate
  *
@@ -112,6 +130,7 @@
 	enum mesh_path_flags flags;
 	spinlock_t state_lock;
 	u8 rann_snd_addr[ETH_ALEN];
+	u32 rann_metric;
 	bool is_root;
 	bool is_gate;
 };
@@ -203,8 +222,8 @@
 		char *addr6);
 int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
 		struct ieee80211_sub_if_data *sdata);
-bool mesh_matches_local(struct ieee802_11_elems *ie,
-		struct ieee80211_sub_if_data *sdata);
+bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
+			struct ieee802_11_elems *ie);
 void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
 void mesh_mgmt_ies_add(struct sk_buff *skb,
 		struct ieee80211_sub_if_data *sdata);
@@ -220,7 +239,7 @@
 			  struct ieee80211_sub_if_data *sdata);
 int mesh_add_ht_cap_ie(struct sk_buff *skb,
 		       struct ieee80211_sub_if_data *sdata);
-int mesh_add_ht_info_ie(struct sk_buff *skb,
+int mesh_add_ht_oper_ie(struct sk_buff *skb,
 			struct ieee80211_sub_if_data *sdata);
 void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
@@ -232,6 +251,7 @@
 void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
+struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
 
 /* Mesh paths */
 int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -256,9 +276,9 @@
 int mesh_path_send_to_gates(struct mesh_path *mpath);
 int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
 /* Mesh plinks */
-void mesh_neighbour_update(u8 *hw_addr, u32 rates,
-		struct ieee80211_sub_if_data *sdata,
-		struct ieee802_11_elems *ie);
+void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
+			   u8 *hw_addr,
+			   struct ieee802_11_elems *ie);
 bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
 void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_broken(struct sta_info *sta);
@@ -284,7 +304,6 @@
 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
 void mesh_path_timer(unsigned long data);
 void mesh_path_flush_by_nexthop(struct sta_info *sta);
-void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
 void mesh_path_discard_frame(struct sk_buff *skb,
 		struct ieee80211_sub_if_data *sdata);
 void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
@@ -325,6 +344,8 @@
 void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
 void mesh_plink_quiesce(struct sta_info *sta);
 void mesh_plink_restart(struct sta_info *sta);
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
+void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
 #else
 #define mesh_allocated	0
 static inline void
@@ -337,6 +358,8 @@
 static inline void mesh_plink_restart(struct sta_info *sta) {}
 static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
 { return false; }
+static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
+{}
 #endif
 
 #endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 1c6f3d0..27e0c2f 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -86,8 +86,8 @@
 #define PERR_IE_TARGET_RCODE(x)	u16_field_get(x, 13, 0)
 
 #define MSEC_TO_TU(x) (x*1000/1024)
-#define SN_GT(x, y) ((long) (y) - (long) (x) < 0)
-#define SN_LT(x, y) ((long) (x) - (long) (y) < 0)
+#define SN_GT(x, y) ((s32)(y - x) < 0)
+#define SN_LT(x, y) ((s32)(x - y) < 0)
 
 #define net_traversal_jiffies(s) \
 	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -422,7 +422,7 @@
 		new_metric = MAX_METRIC;
 	exp_time = TU_TO_EXP_TIME(orig_lifetime);
 
-	if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) {
+	if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
 		/* This MP is the originator, we are not interested in this
 		 * frame, except for updating transmitter's path info.
 		 */
@@ -472,7 +472,7 @@
 
 	/* Update and check transmitter routing info */
 	ta = mgmt->sa;
-	if (compare_ether_addr(orig_addr, ta) == 0)
+	if (ether_addr_equal(orig_addr, ta))
 		fresh_info = false;
 	else {
 		fresh_info = true;
@@ -533,7 +533,7 @@
 
 	mhwmp_dbg("received PREQ from %pM", orig_addr);
 
-	if (compare_ether_addr(target_addr, sdata->vif.addr) == 0) {
+	if (ether_addr_equal(target_addr, sdata->vif.addr)) {
 		mhwmp_dbg("PREQ is for us");
 		forward = false;
 		reply = true;
@@ -631,7 +631,7 @@
 	mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
 
 	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
-	if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
+	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 		/* destination, no forwarding required */
 		return;
 
@@ -709,7 +709,7 @@
 		spin_lock_bh(&mpath->state_lock);
 		sta = next_hop_deref_protected(mpath);
 		if (mpath->flags & MESH_PATH_ACTIVE &&
-		    compare_ether_addr(ta, sta->sta.addr) == 0 &&
+		    ether_addr_equal(ta, sta->sta.addr) &&
 		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
 		    SN_GT(target_sn, mpath->sn))) {
 			mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -732,11 +732,12 @@
 				struct ieee80211_rann_ie *rann)
 {
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
 	struct mesh_path *mpath;
 	u8 ttl, flags, hopcount;
 	u8 *orig_addr;
-	u32 orig_sn, metric;
-	u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+	u32 orig_sn, metric, metric_txsta, interval;
 	bool root_is_gate;
 
 	ttl = rann->rann_ttl;
@@ -748,19 +749,28 @@
 	flags = rann->rann_flags;
 	root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
 	orig_addr = rann->rann_addr;
-	orig_sn = rann->rann_seq;
+	orig_sn = le32_to_cpu(rann->rann_seq);
+	interval = le32_to_cpu(rann->rann_interval);
 	hopcount = rann->rann_hopcount;
 	hopcount++;
-	metric = rann->rann_metric;
+	metric = le32_to_cpu(rann->rann_metric);
 
 	/*  Ignore our own RANNs */
-	if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
+	if (ether_addr_equal(orig_addr, sdata->vif.addr))
 		return;
 
 	mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
 			orig_addr, mgmt->sa, root_is_gate);
 
 	rcu_read_lock();
+	sta = sta_info_get(sdata, mgmt->sa);
+	if (!sta) {
+		rcu_read_unlock();
+		return;
+	}
+
+	metric_txsta = airtime_link_metric_get(local, sta);
+
 	mpath = mesh_path_lookup(orig_addr, sdata);
 	if (!mpath) {
 		mesh_path_add(orig_addr, sdata);
@@ -780,18 +790,21 @@
 		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
 	}
 
-	if (mpath->sn < orig_sn && ifmsh->mshcfg.dot11MeshForwarding) {
+	if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn &&
+	   metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) {
 		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
 				       cpu_to_le32(orig_sn),
 				       0, NULL, 0, broadcast_addr,
 				       hopcount, ttl, cpu_to_le32(interval),
-				       cpu_to_le32(metric + mpath->metric),
+				       cpu_to_le32(metric + metric_txsta),
 				       0, sdata);
 		mpath->sn = orig_sn;
+		mpath->rann_metric = metric + metric_txsta;
+		/* Recording RANNs sender address to send individually
+		 * addressed PREQs destined for root mesh STA */
+		memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
 	}
 
-	/* Using individually addressed PREQ for root node */
-	memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
 	mpath->is_root = true;
 
 	if (root_is_gate)
@@ -1086,7 +1099,7 @@
 	if (time_after(jiffies,
 		       mpath->exp_time -
 		       msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
-	    !compare_ether_addr(sdata->vif.addr, hdr->addr4) &&
+	    ether_addr_equal(sdata->vif.addr, hdr->addr4) &&
 	    !(mpath->flags & MESH_PATH_RESOLVING) &&
 	    !(mpath->flags & MESH_PATH_FIXED))
 		mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 49aaefd..b39224d 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -348,7 +348,7 @@
 	hlist_for_each_entry_rcu(node, n, bucket, list) {
 		mpath = node->mpath;
 		if (mpath->sdata == sdata &&
-				compare_ether_addr(dst, mpath->dst) == 0) {
+		    ether_addr_equal(dst, mpath->dst)) {
 			if (MPATH_EXPIRED(mpath)) {
 				spin_lock_bh(&mpath->state_lock);
 				mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -517,7 +517,7 @@
 	int err = 0;
 	u32 hash_idx;
 
-	if (compare_ether_addr(dst, sdata->vif.addr) == 0)
+	if (ether_addr_equal(dst, sdata->vif.addr))
 		/* never add ourselves as neighbours */
 		return -ENOTSUPP;
 
@@ -538,6 +538,8 @@
 
 	read_lock_bh(&pathtbl_resize_lock);
 	memcpy(new_mpath->dst, dst, ETH_ALEN);
+	memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
+	new_mpath->is_root = false;
 	new_mpath->sdata = sdata;
 	new_mpath->flags = 0;
 	skb_queue_head_init(&new_mpath->frame_queue);
@@ -559,7 +561,7 @@
 	hlist_for_each_entry(node, n, bucket, list) {
 		mpath = node->mpath;
 		if (mpath->sdata == sdata &&
-		    compare_ether_addr(dst, mpath->dst) == 0)
+		    ether_addr_equal(dst, mpath->dst))
 			goto err_exists;
 	}
 
@@ -650,7 +652,7 @@
 	int err = 0;
 	u32 hash_idx;
 
-	if (compare_ether_addr(dst, sdata->vif.addr) == 0)
+	if (ether_addr_equal(dst, sdata->vif.addr))
 		/* never add ourselves as neighbours */
 		return -ENOTSUPP;
 
@@ -688,7 +690,7 @@
 	hlist_for_each_entry(node, n, bucket, list) {
 		mpath = node->mpath;
 		if (mpath->sdata == sdata &&
-		    compare_ether_addr(dst, mpath->dst) == 0)
+		    ether_addr_equal(dst, mpath->dst))
 			goto err_exists;
 	}
 
@@ -882,7 +884,7 @@
 	hlist_for_each_entry(node, n, bucket, list) {
 		mpath = node->mpath;
 		if (mpath->sdata == sdata &&
-		    compare_ether_addr(addr, mpath->dst) == 0) {
+		    ether_addr_equal(addr, mpath->dst)) {
 			__mesh_path_del(tbl, node);
 			goto enddel;
 		}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 4e53c4c..8cc8461 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -82,20 +82,14 @@
 }
 
 /*
- * NOTE: This is just an alias for sta_info_alloc(), see notes
- *       on it in the lifecycle management section!
+ * Allocate mesh sta entry and insert into station table
  */
 static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
-					 u8 *hw_addr, u32 rates,
-					 struct ieee802_11_elems *elems)
+					 u8 *hw_addr)
 {
-	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_supported_band *sband;
 	struct sta_info *sta;
 
-	sband = local->hw.wiphy->bands[local->oper_channel->band];
-
-	if (local->num_sta >= MESH_MAX_PLINKS)
+	if (sdata->local->num_sta >= MESH_MAX_PLINKS)
 		return NULL;
 
 	sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
@@ -108,16 +102,70 @@
 
 	set_sta_flag(sta, WLAN_STA_WME);
 
-	sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
-	if (elems->ht_cap_elem)
-		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
-						  elems->ht_cap_elem,
-						  &sta->sta.ht_cap);
-	rate_control_rate_init(sta);
-
 	return sta;
 }
 
+/** mesh_set_ht_prot_mode - set correct HT protection mode
+ *
+ * Section 9.23.3.5 of IEEE 80211s standard describes the protection rules for
+ * HT mesh STA in a MBSS. Three HT protection modes are supported for now,
+ * non-HT mixed mode, 20MHz-protection and no-protection mode. non-HT mixed
+ * mode is selected if any non-HT peers are present in our MBSS.
+ * 20MHz-protection mode is selected if all peers in our 20/40MHz MBSS support
+ * HT and atleast one HT20 peer is present. Otherwise no-protection mode is
+ * selected.
+ */
+static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
+	u32 changed = 0;
+	u16 ht_opmode;
+	bool non_ht_sta = false, ht20_sta = false;
+
+	if (local->_oper_channel_type == NL80211_CHAN_NO_HT)
+		return 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		if (sdata == sta->sdata &&
+		    sta->plink_state == NL80211_PLINK_ESTAB) {
+			switch (sta->ch_type) {
+			case NL80211_CHAN_NO_HT:
+				mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
+					sdata->vif.addr, sta->sta.addr);
+				non_ht_sta = true;
+				goto out;
+			case NL80211_CHAN_HT20:
+				mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
+					sdata->vif.addr, sta->sta.addr);
+				ht20_sta = true;
+			default:
+				break;
+			}
+		}
+	}
+out:
+	rcu_read_unlock();
+
+	if (non_ht_sta)
+		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED;
+	else if (ht20_sta && local->_oper_channel_type > NL80211_CHAN_HT20)
+		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_20MHZ;
+	else
+		ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+	if (sdata->vif.bss_conf.ht_operation_mode != ht_opmode) {
+		sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
+		sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
+		changed = BSS_CHANGED_HT;
+		mpl_dbg("mesh_plink %pM: protection mode changed to %d",
+			sdata->vif.addr, ht_opmode);
+	}
+
+	return changed;
+}
+
 /**
  * __mesh_plink_deactivate - deactivate mesh peer link
  *
@@ -187,7 +235,7 @@
 			    2 + sdata->u.mesh.mesh_id_len +
 			    2 + sizeof(struct ieee80211_meshconf_ie) +
 			    2 + sizeof(struct ieee80211_ht_cap) +
-			    2 + sizeof(struct ieee80211_ht_info) +
+			    2 + sizeof(struct ieee80211_ht_operation) +
 			    2 + 8 + /* peering IE */
 			    sdata->u.mesh.ie_len);
 	if (!skb)
@@ -212,8 +260,8 @@
 			pos = skb_put(skb, 2);
 			memcpy(pos + 2, &plid, 2);
 		}
-		if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
-		    ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+		if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
+		    ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
 		    mesh_add_rsn_ie(skb, sdata) ||
 		    mesh_add_meshid_ie(skb, sdata) ||
 		    mesh_add_meshconf_ie(skb, sdata))
@@ -263,7 +311,7 @@
 
 	if (action != WLAN_SP_MESH_PEERING_CLOSE) {
 		if (mesh_add_ht_cap_ie(skb, sdata) ||
-		    mesh_add_ht_info_ie(skb, sdata))
+		    mesh_add_ht_oper_ie(skb, sdata))
 			return -1;
 	}
 
@@ -274,43 +322,93 @@
 	return 0;
 }
 
-void mesh_neighbour_update(u8 *hw_addr, u32 rates,
-		struct ieee80211_sub_if_data *sdata,
-		struct ieee802_11_elems *elems)
+/* mesh_peer_init - initialize new mesh peer and return resulting sta_info
+ *
+ * @sdata: local meshif
+ * @addr: peer's address
+ * @elems: IEs from beacon or mesh peering frame
+ *
+ * call under RCU
+ */
+static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
+				       u8 *addr,
+				       struct ieee802_11_elems *elems)
 {
 	struct ieee80211_local *local = sdata->local;
+	enum ieee80211_band band = local->oper_channel->band;
+	struct ieee80211_supported_band *sband;
+	u32 rates, basic_rates = 0;
 	struct sta_info *sta;
+	bool insert = false;
 
-	rcu_read_lock();
+	sband = local->hw.wiphy->bands[band];
+	rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
 
-	sta = sta_info_get(sdata, hw_addr);
+	sta = sta_info_get(sdata, addr);
 	if (!sta) {
-		rcu_read_unlock();
-		/* Userspace handles peer allocation when security is enabled
-		 * */
-		if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
-			cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
-					elems->ie_start, elems->total_len,
-					GFP_KERNEL);
-		else
-			sta = mesh_plink_alloc(sdata, hw_addr, rates, elems);
+		sta = mesh_plink_alloc(sdata, addr);
 		if (!sta)
-			return;
-		if (sta_info_insert_rcu(sta)) {
-			rcu_read_unlock();
-			return;
-		}
+			return NULL;
+		insert = true;
 	}
 
+	spin_lock_bh(&sta->lock);
 	sta->last_rx = jiffies;
-	sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+	sta->sta.supp_rates[band] = rates;
+	if (elems->ht_cap_elem &&
+	    sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
+		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+						  elems->ht_cap_elem,
+						  &sta->sta.ht_cap);
+	else
+		memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap));
+
+	if (elems->ht_operation) {
+		if (!(elems->ht_operation->ht_param &
+		      IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
+			sta->sta.ht_cap.cap &=
+					    ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+		sta->ch_type =
+			ieee80211_ht_oper_to_channel_type(elems->ht_operation);
+	}
+
+	rate_control_rate_init(sta);
+	spin_unlock_bh(&sta->lock);
+
+	if (insert && sta_info_insert(sta))
+		return NULL;
+
+	return sta;
+}
+
+void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
+			   u8 *hw_addr,
+			   struct ieee802_11_elems *elems)
+{
+	struct sta_info *sta;
+
+	/* Userspace handles peer allocation when security is enabled */
+	if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+		cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
+						   elems->ie_start,
+						   elems->total_len,
+						   GFP_KERNEL);
+		return;
+	}
+
+	rcu_read_lock();
+	sta = mesh_peer_init(sdata, hw_addr, elems);
+	if (!sta)
+		goto out;
+
 	if (mesh_peer_accepts_plinks(elems) &&
-			sta->plink_state == NL80211_PLINK_LISTEN &&
-			sdata->u.mesh.accepting_plinks &&
-			sdata->u.mesh.mshcfg.auto_open_plinks &&
-			rssi_threshold_check(sta, sdata))
+	    sta->plink_state == NL80211_PLINK_LISTEN &&
+	    sdata->u.mesh.accepting_plinks &&
+	    sdata->u.mesh.mshcfg.auto_open_plinks &&
+	    rssi_threshold_check(sta, sdata))
 		mesh_plink_open(sta);
 
+out:
 	rcu_read_unlock();
 }
 
@@ -456,15 +554,15 @@
 void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
 			 size_t len, struct ieee80211_rx_status *rx_status)
 {
-	struct ieee80211_local *local = sdata->local;
 	struct ieee802_11_elems elems;
 	struct sta_info *sta;
 	enum plink_event event;
 	enum ieee80211_self_protected_actioncode ftype;
 	size_t baselen;
-	bool deactivated, matches_local = true;
+	bool matches_local = true;
 	u8 ie_len;
 	u8 *baseaddr;
+	u32 changed = 0;
 	__le16 plid, llid, reason;
 #ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
 	static const char *mplstates[] = {
@@ -560,7 +658,7 @@
 	/* Now we will figure out the appropriate event... */
 	event = PLINK_UNDEFINED;
 	if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
-	    (!mesh_matches_local(&elems, sdata))) {
+	    !mesh_matches_local(sdata, &elems)) {
 		matches_local = false;
 		switch (ftype) {
 		case WLAN_SP_MESH_PEERING_OPEN:
@@ -583,29 +681,13 @@
 		return;
 	} else if (!sta) {
 		/* ftype == WLAN_SP_MESH_PEERING_OPEN */
-		u32 rates;
-
-		rcu_read_unlock();
-
 		if (!mesh_plink_free_count(sdata)) {
 			mpl_dbg("Mesh plink error: no more free plinks\n");
-			return;
-		}
-
-		rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
-		sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems);
-		if (!sta) {
-			mpl_dbg("Mesh plink error: plink table full\n");
-			return;
-		}
-		if (sta_info_insert_rcu(sta)) {
 			rcu_read_unlock();
 			return;
 		}
 		event = OPN_ACPT;
-		spin_lock_bh(&sta->lock);
 	} else if (matches_local) {
-		spin_lock_bh(&sta->lock);
 		switch (ftype) {
 		case WLAN_SP_MESH_PEERING_OPEN:
 			if (!mesh_plink_free_count(sdata) ||
@@ -642,12 +724,19 @@
 			break;
 		default:
 			mpl_dbg("Mesh plink: unknown frame subtype\n");
-			spin_unlock_bh(&sta->lock);
 			rcu_read_unlock();
 			return;
 		}
-	} else {
-		spin_lock_bh(&sta->lock);
+	}
+
+	if (event == OPN_ACPT) {
+		/* allocate sta entry if necessary and update info */
+		sta = mesh_peer_init(sdata, mgmt->sa, &elems);
+		if (!sta) {
+			mpl_dbg("Mesh plink: failed to init peer!\n");
+			rcu_read_unlock();
+			return;
+		}
 	}
 
 	mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
@@ -655,6 +744,7 @@
 		le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
 		event);
 	reason = 0;
+	spin_lock_bh(&sta->lock);
 	switch (sta->plink_state) {
 		/* spin_unlock as soon as state is updated at each case */
 	case NL80211_PLINK_LISTEN:
@@ -758,7 +848,8 @@
 			sta->plink_state = NL80211_PLINK_ESTAB;
 			spin_unlock_bh(&sta->lock);
 			mesh_plink_inc_estab_count(sdata);
-			ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+			changed |= mesh_set_ht_prot_mode(sdata);
+			changed |= BSS_CHANGED_BEACON;
 			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
 				sta->sta.addr);
 			break;
@@ -793,7 +884,8 @@
 			sta->plink_state = NL80211_PLINK_ESTAB;
 			spin_unlock_bh(&sta->lock);
 			mesh_plink_inc_estab_count(sdata);
-			ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+			changed |= mesh_set_ht_prot_mode(sdata);
+			changed |= BSS_CHANGED_BEACON;
 			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
 				sta->sta.addr);
 			mesh_plink_frame_tx(sdata,
@@ -811,13 +903,13 @@
 		case CLS_ACPT:
 			reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
 			sta->reason = reason;
-			deactivated = __mesh_plink_deactivate(sta);
+			__mesh_plink_deactivate(sta);
 			sta->plink_state = NL80211_PLINK_HOLDING;
 			llid = sta->llid;
 			mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
 			spin_unlock_bh(&sta->lock);
-			if (deactivated)
-				ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+			changed |= mesh_set_ht_prot_mode(sdata);
+			changed |= BSS_CHANGED_BEACON;
 			mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
 					    sta->sta.addr, llid, plid, reason);
 			break;
@@ -864,4 +956,7 @@
 	}
 
 	rcu_read_unlock();
+
+	if (changed)
+		ieee80211_bss_info_change_notify(sdata, changed);
 }
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
new file mode 100644
index 0000000..38d30e8
--- /dev/null
+++ b/net/mac80211/mesh_sync.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2011-2012, Pavel Zubarev <pavel.zubarev@gmail.com>
+ * Copyright 2011-2012, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
+ * Copyright 2011-2012, cozybit Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ieee80211_i.h"
+#include "mesh.h"
+#include "driver-ops.h"
+
+#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG
+#define msync_dbg(fmt, args...) \
+	printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args)
+#else
+#define msync_dbg(fmt, args...)   do { (void)(0); } while (0)
+#endif
+
+/* This is not in the standard.  It represents a tolerable tbtt drift below
+ * which we do no TSF adjustment.
+ */
+#define TOFFSET_MINIMUM_ADJUSTMENT 10
+
+/* This is not in the standard. It is a margin added to the
+ * Toffset setpoint to mitigate TSF overcorrection
+ * introduced by TSF adjustment latency.
+ */
+#define TOFFSET_SET_MARGIN 20
+
+/* This is not in the standard.  It represents the maximum Toffset jump above
+ * which we'll invalidate the Toffset setpoint and choose a new setpoint.  This
+ * could be, for instance, in case a neighbor is restarted and its TSF counter
+ * reset.
+ */
+#define TOFFSET_MAXIMUM_ADJUSTMENT 30000		/* 30 ms */
+
+struct sync_method {
+	u8 method;
+	struct ieee80211_mesh_sync_ops ops;
+};
+
+/**
+ * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT
+ *
+ * @ie: information elements of a management frame from the mesh peer
+ */
+static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
+{
+	return (ie->mesh_config->meshconf_cap &
+	    MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
+}
+
+void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	/* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */
+	u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500;
+	u64 tsf;
+	u64 tsfdelta;
+
+	spin_lock_bh(&ifmsh->sync_offset_lock);
+
+	if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
+		msync_dbg("TBTT : max clockdrift=%lld; adjusting",
+			(long long) ifmsh->sync_offset_clockdrift_max);
+		tsfdelta = -ifmsh->sync_offset_clockdrift_max;
+		ifmsh->sync_offset_clockdrift_max = 0;
+	} else {
+		msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu",
+			(long long) ifmsh->sync_offset_clockdrift_max,
+			(unsigned long long) beacon_int_fraction);
+		tsfdelta = -beacon_int_fraction;
+		ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
+	}
+
+	tsf = drv_get_tsf(local, sdata);
+	if (tsf != -1ULL)
+		drv_set_tsf(local, sdata, tsf + tsfdelta);
+	spin_unlock_bh(&ifmsh->sync_offset_lock);
+}
+
+static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
+				   u16 stype,
+				   struct ieee80211_mgmt *mgmt,
+				   struct ieee802_11_elems *elems,
+				   struct ieee80211_rx_status *rx_status)
+{
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	struct ieee80211_local *local = sdata->local;
+	struct sta_info *sta;
+	u64 t_t, t_r;
+
+	WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
+
+	/* standard mentions only beacons */
+	if (stype != IEEE80211_STYPE_BEACON)
+		return;
+
+	/* The current tsf is a first approximation for the timestamp
+	 * for the received beacon.  Further down we try to get a
+	 * better value from the rx_status->mactime field if
+	 * available. Also we have to call drv_get_tsf() before
+	 * entering the rcu-read section.*/
+	t_r = drv_get_tsf(local, sdata);
+
+	rcu_read_lock();
+	sta = sta_info_get(sdata, mgmt->sa);
+	if (!sta)
+		goto no_sync;
+
+	/* check offset sync conditions (13.13.2.2.1)
+	 *
+	 * TODO also sync to
+	 * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors
+	 */
+
+	if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
+		clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
+		msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr);
+		goto no_sync;
+	}
+
+	if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) {
+		/*
+		 * The mactime is defined as the time the first data symbol
+		 * of the frame hits the PHY, and the timestamp of the beacon
+		 * is defined as "the time that the data symbol containing the
+		 * first bit of the timestamp is transmitted to the PHY plus
+		 * the transmitting STA's delays through its local PHY from the
+		 * MAC-PHY interface to its interface with the WM" (802.11
+		 * 11.1.2)
+		 *
+		 * T_r, in 13.13.2.2.2, is just defined as "the frame reception
+		 * time" but we unless we interpret that time to be the same
+		 * time of the beacon timestamp, the offset calculation will be
+		 * off.  Below we adjust t_r to be "the time at which the first
+		 * symbol of the timestamp element in the beacon is received".
+		 * This correction depends on the rate.
+		 *
+		 * Based on similar code in ibss.c
+		 */
+		int rate;
+
+		if (rx_status->flag & RX_FLAG_HT) {
+			/* TODO:
+			 * In principle there could be HT-beacons (Dual Beacon
+			 * HT Operation options), but for now ignore them and
+			 * just use the primary (i.e. non-HT) beacons for
+			 * synchronization.
+			 * */
+			goto no_sync;
+		} else
+			rate = local->hw.wiphy->bands[rx_status->band]->
+				bitrates[rx_status->rate_idx].bitrate;
+
+		/* 24 bytes of header * 8 bits/byte *
+		 * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/
+		t_r = rx_status->mactime + (24 * 8 * 10 / rate);
+	}
+
+	/* Timing offset calculation (see 13.13.2.2.2) */
+	t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
+	sta->t_offset = t_t - t_r;
+
+	if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
+		s64 t_clockdrift = sta->t_offset_setpoint
+				   - sta->t_offset;
+		msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld",
+			  sta->sta.addr,
+			  (long long) sta->t_offset,
+			  (long long)
+			  sta->t_offset_setpoint,
+			  (long long) t_clockdrift);
+
+		if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
+			t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
+			msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset",
+				  sta->sta.addr,
+				  (long long) t_clockdrift);
+			clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
+			goto no_sync;
+		}
+
+		rcu_read_unlock();
+
+		spin_lock_bh(&ifmsh->sync_offset_lock);
+		if (t_clockdrift >
+		    ifmsh->sync_offset_clockdrift_max)
+			ifmsh->sync_offset_clockdrift_max
+				= t_clockdrift;
+		spin_unlock_bh(&ifmsh->sync_offset_lock);
+
+	} else {
+		sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
+		set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
+		msync_dbg("STA %pM : offset was invalid, "
+			  " sta->t_offset=%lld",
+			  sta->sta.addr,
+			  (long long) sta->t_offset);
+		rcu_read_unlock();
+	}
+	return;
+
+no_sync:
+	rcu_read_unlock();
+}
+
+static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+
+	WARN_ON(ifmsh->mesh_sp_id
+		!= IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
+	BUG_ON(!rcu_read_lock_held());
+
+	spin_lock_bh(&ifmsh->sync_offset_lock);
+
+	if (ifmsh->sync_offset_clockdrift_max >
+		TOFFSET_MINIMUM_ADJUSTMENT) {
+		/* Since ajusting the tsf here would
+		 * require a possibly blocking call
+		 * to the driver tsf setter, we punt
+		 * the tsf adjustment to the mesh tasklet
+		 */
+		msync_dbg("TBTT : kicking off TBTT "
+			  "adjustment with "
+			  "clockdrift_max=%lld",
+		  ifmsh->sync_offset_clockdrift_max);
+		set_bit(MESH_WORK_DRIFT_ADJUST,
+			&ifmsh->wrkq_flags);
+	} else {
+		msync_dbg("TBTT : max clockdrift=%lld; "
+			  "too small to adjust",
+			  (long long)
+		       ifmsh->sync_offset_clockdrift_max);
+		ifmsh->sync_offset_clockdrift_max = 0;
+	}
+	spin_unlock_bh(&ifmsh->sync_offset_lock);
+}
+
+static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+	u8 offset;
+
+	if (!ifmsh->ie || !ifmsh->ie_len)
+		return NULL;
+
+	offset = ieee80211_ie_split_vendor(ifmsh->ie,
+					ifmsh->ie_len, 0);
+
+	if (!offset)
+		return NULL;
+
+	return ifmsh->ie + offset + 2;
+}
+
+static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
+				   u16 stype,
+				   struct ieee80211_mgmt *mgmt,
+				   struct ieee802_11_elems *elems,
+				   struct ieee80211_rx_status *rx_status)
+{
+	const u8 *oui;
+
+	WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
+	msync_dbg("called mesh_sync_vendor_rx_bcn_presp");
+	oui = mesh_get_vendor_oui(sdata);
+	/*  here you would implement the vendor offset tracking for this oui */
+}
+
+static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+{
+	const u8 *oui;
+
+	WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
+	msync_dbg("called mesh_sync_vendor_adjust_tbtt");
+	oui = mesh_get_vendor_oui(sdata);
+	/*  here you would implement the vendor tsf adjustment for this oui */
+}
+
+/* global variable */
+static struct sync_method sync_methods[] = {
+	{
+		.method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
+		.ops = {
+			.rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp,
+			.adjust_tbtt = &mesh_sync_offset_adjust_tbtt,
+		}
+	},
+	{
+		.method = IEEE80211_SYNC_METHOD_VENDOR,
+		.ops = {
+			.rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp,
+			.adjust_tbtt = &mesh_sync_vendor_adjust_tbtt,
+		}
+	},
+};
+
+struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
+{
+	struct ieee80211_mesh_sync_ops *ops = NULL;
+	u8 i;
+
+	for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) {
+		if (sync_methods[i].method == method) {
+			ops = &sync_methods[i].ops;
+			break;
+		}
+	}
+	return ops;
+}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f76da5b..b3b3c26 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -171,122 +171,64 @@
 	return (1 << ecw) - 1;
 }
 
-/*
- * ieee80211_enable_ht should be called only after the operating band
- * has been determined as ht configuration depends on the hw's
- * HT abilities for a specific band.
- */
-static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
-			       struct ieee80211_ht_info *hti,
-			       const u8 *bssid, u16 ap_ht_cap_flags,
-			       bool beacon_htcap_ie)
+static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
+				  struct ieee80211_ht_operation *ht_oper,
+				  const u8 *bssid, bool reconfig)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
 	struct sta_info *sta;
 	u32 changed = 0;
-	int hti_cfreq;
 	u16 ht_opmode;
-	bool enable_ht = true;
-	enum nl80211_channel_type prev_chantype;
-	enum nl80211_channel_type rx_channel_type = NL80211_CHAN_NO_HT;
-	enum nl80211_channel_type tx_channel_type;
+	bool disable_40 = false;
 
 	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-	prev_chantype = sdata->vif.bss_conf.channel_type;
 
-
-	hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan,
-						   sband->band);
-	/* check that channel matches the right operating channel */
-	if (local->hw.conf.channel->center_freq != hti_cfreq) {
-		/* Some APs mess this up, evidently.
-		 * Netgear WNDR3700 sometimes reports 4 higher than
-		 * the actual channel, for instance.
-		 */
-		printk(KERN_DEBUG
-		       "%s: Wrong control channel in association"
-		       " response: configured center-freq: %d"
-		       " hti-cfreq: %d  hti->control_chan: %d"
-		       " band: %d.  Disabling HT.\n",
-		       sdata->name,
-		       local->hw.conf.channel->center_freq,
-		       hti_cfreq, hti->control_chan,
-		       sband->band);
-		enable_ht = false;
+	switch (sdata->vif.bss_conf.channel_type) {
+	case NL80211_CHAN_HT40PLUS:
+		if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
+			disable_40 = true;
+		break;
+	case NL80211_CHAN_HT40MINUS:
+		if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
+			disable_40 = true;
+		break;
+	default:
+		break;
 	}
 
-	if (enable_ht) {
-		rx_channel_type = NL80211_CHAN_HT20;
+	/* This can change during the lifetime of the BSS */
+	if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
+		disable_40 = true;
 
-		if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
-		    !ieee80111_cfg_override_disables_ht40(sdata) &&
-		    (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
-		    (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
-			switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
-			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-				rx_channel_type = NL80211_CHAN_HT40PLUS;
-				break;
-			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-				rx_channel_type = NL80211_CHAN_HT40MINUS;
-				break;
-			}
-		}
+	mutex_lock(&local->sta_mtx);
+	sta = sta_info_get(sdata, bssid);
+
+	WARN_ON_ONCE(!sta);
+
+	if (sta && !sta->supports_40mhz)
+		disable_40 = true;
+
+	if (sta && (!reconfig ||
+		    (disable_40 != !(sta->sta.ht_cap.cap &
+					IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) {
+
+		if (disable_40)
+			sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+		else
+			sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+		rate_control_rate_update(local, sband, sta,
+					 IEEE80211_RC_BW_CHANGED);
 	}
+	mutex_unlock(&local->sta_mtx);
 
-	tx_channel_type = ieee80211_get_tx_channel_type(local, rx_channel_type);
-
-	if (local->tmp_channel)
-		local->tmp_channel_type = rx_channel_type;
-
-	if (!ieee80211_set_channel_type(local, sdata, rx_channel_type)) {
-		/* can only fail due to HT40+/- mismatch */
-		rx_channel_type = NL80211_CHAN_HT20;
-		WARN_ON(!ieee80211_set_channel_type(local, sdata,
-						    rx_channel_type));
-	}
-
-	if (beacon_htcap_ie && (prev_chantype != rx_channel_type)) {
-		/*
-		 * Whenever the AP announces the HT mode change that can be
-		 * 40MHz intolerant or etc., it would be safer to stop tx
-		 * queues before doing hw config to avoid buffer overflow.
-		 */
-		ieee80211_stop_queues_by_reason(&sdata->local->hw,
-				IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
-
-		/* flush out all packets */
-		synchronize_net();
-
-		drv_flush(local, false);
-	}
-
-	/* channel_type change automatically detected */
-	ieee80211_hw_config(local, 0);
-
-	if (prev_chantype != tx_channel_type) {
-		rcu_read_lock();
-		sta = sta_info_get(sdata, bssid);
-		if (sta)
-			rate_control_rate_update(local, sband, sta,
-						 IEEE80211_RC_HT_CHANGED,
-						 tx_channel_type);
-		rcu_read_unlock();
-
-		if (beacon_htcap_ie)
-			ieee80211_wake_queues_by_reason(&sdata->local->hw,
-				IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
-	}
-
-	ht_opmode = le16_to_cpu(hti->operation_mode);
+	ht_opmode = le16_to_cpu(ht_oper->operation_mode);
 
 	/* if bss configuration changed store the new one */
-	if (sdata->ht_opmode_valid != enable_ht ||
-	    sdata->vif.bss_conf.ht_operation_mode != ht_opmode ||
-	    prev_chantype != rx_channel_type) {
+	if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) {
 		changed |= BSS_CHANGED_HT;
 		sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
-		sdata->ht_opmode_valid = enable_ht;
 	}
 
 	return changed;
@@ -316,12 +258,12 @@
 }
 
 static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
-				struct sk_buff *skb, const u8 *ht_info_ie,
+				struct sk_buff *skb, const u8 *ht_oper_ie,
 				struct ieee80211_supported_band *sband,
 				struct ieee80211_channel *channel,
 				enum ieee80211_smps_mode smps)
 {
-	struct ieee80211_ht_info *ht_info;
+	struct ieee80211_ht_operation *ht_oper;
 	u8 *pos;
 	u32 flags = channel->flags;
 	u16 cap;
@@ -329,21 +271,21 @@
 
 	BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
 
-	if (!ht_info_ie)
+	if (!ht_oper_ie)
 		return;
 
-	if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
+	if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation))
 		return;
 
 	memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
 	ieee80211_apply_htcap_overrides(sdata, &ht_cap);
 
-	ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
+	ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2);
 
 	/* determine capability flags */
 	cap = ht_cap.cap;
 
-	switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+	switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
 	case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
 		if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
 			cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -358,6 +300,16 @@
 		break;
 	}
 
+	/*
+	 * If 40 MHz was disabled associate as though we weren't
+	 * capable of 40 MHz -- some broken APs will never fall
+	 * back to trying to transmit in 20 MHz.
+	 */
+	if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_40MHZ) {
+		cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+		cap &= ~IEEE80211_HT_CAP_SGI_40;
+	}
+
 	/* set SM PS mode properly */
 	cap &= ~IEEE80211_HT_CAP_SM_PS;
 	switch (smps) {
@@ -557,7 +509,7 @@
 	}
 
 	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
-		ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_information_ie,
+		ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie,
 				    sband, local->oper_channel, ifmgd->ap_smps);
 
 	/* if present, add any custom non-vendor IEs that go after HT */
@@ -1182,7 +1134,7 @@
 	if (!local->ops->conf_tx)
 		return;
 
-	if (local->hw.queues < 4)
+	if (local->hw.queues < IEEE80211_NUM_ACS)
 		return;
 
 	if (!wmm_param)
@@ -1435,7 +1387,6 @@
 	sdata->vif.bss_conf.assoc = false;
 
 	/* on the next assoc, re-program HT parameters */
-	sdata->ht_opmode_valid = false;
 	memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
 	memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 
@@ -1496,19 +1447,24 @@
 static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	struct ieee80211_local *local = sdata->local;
 
+	mutex_lock(&local->mtx);
 	if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-			      IEEE80211_STA_CONNECTION_POLL)))
-	    return;
+			      IEEE80211_STA_CONNECTION_POLL))) {
+		mutex_unlock(&local->mtx);
+		return;
+	}
 
 	ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
 			  IEEE80211_STA_BEACON_POLL);
-	mutex_lock(&sdata->local->iflist_mtx);
-	ieee80211_recalc_ps(sdata->local, -1);
-	mutex_unlock(&sdata->local->iflist_mtx);
+
+	mutex_lock(&local->iflist_mtx);
+	ieee80211_recalc_ps(local, -1);
+	mutex_unlock(&local->iflist_mtx);
 
 	if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
-		return;
+		goto out;
 
 	/*
 	 * We've received a probe response, but are not sure whether
@@ -1520,6 +1476,9 @@
 	mod_timer(&ifmgd->conn_mon_timer,
 		  round_jiffies_up(jiffies +
 				   IEEE80211_CONNECTION_IDLE_TIME));
+out:
+	ieee80211_run_deferred_scan(local);
+	mutex_unlock(&local->mtx);
 }
 
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1567,14 +1526,23 @@
 		ifmgd->nullfunc_failed = false;
 		ieee80211_send_nullfunc(sdata->local, sdata, 0);
 	} else {
+		int ssid_len;
+
 		ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
-		ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0,
-					 (u32) -1, true, false);
+		if (WARN_ON_ONCE(ssid == NULL))
+			ssid_len = 0;
+		else
+			ssid_len = ssid[1];
+
+		ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
+					 0, (u32) -1, true, false);
 	}
 
 	ifmgd->probe_send_count++;
 	ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
 	run_again(ifmgd, ifmgd->probe_timeout);
+	if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+		drv_flush(sdata->local, false);
 }
 
 static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
@@ -1586,21 +1554,22 @@
 	if (!ieee80211_sdata_running(sdata))
 		return;
 
-	if (sdata->local->scanning)
-		return;
-
-	if (sdata->local->tmp_channel)
-		return;
-
 	mutex_lock(&ifmgd->mtx);
 
 	if (!ifmgd->associated)
 		goto out;
 
+	mutex_lock(&sdata->local->mtx);
+
+	if (sdata->local->tmp_channel || sdata->local->scanning) {
+		mutex_unlock(&sdata->local->mtx);
+		goto out;
+	}
+
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-	if (beacon && net_ratelimit())
-		printk(KERN_DEBUG "%s: detected beacon loss from AP "
-		       "- sending probe request\n", sdata->name);
+	if (beacon)
+		net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n",
+				    sdata->name);
 #endif
 
 	/*
@@ -1623,6 +1592,8 @@
 	else
 		ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
 
+	mutex_unlock(&sdata->local->mtx);
+
 	if (already)
 		goto out;
 
@@ -1643,6 +1614,7 @@
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct sk_buff *skb;
 	const u8 *ssid;
+	int ssid_len;
 
 	if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
 		return NULL;
@@ -1653,8 +1625,13 @@
 		return NULL;
 
 	ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
+	if (WARN_ON_ONCE(ssid == NULL))
+		ssid_len = 0;
+	else
+		ssid_len = ssid[1];
+
 	skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
-					(u32) -1, ssid + 2, ssid[1],
+					(u32) -1, ssid + 2, ssid_len,
 					NULL, 0, true);
 
 	return skb;
@@ -1799,7 +1776,7 @@
 
 	memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
 
-	if (compare_ether_addr(bssid, mgmt->bssid))
+	if (!ether_addr_equal(bssid, mgmt->bssid))
 		return RX_MGMT_NONE;
 
 	auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
@@ -1876,7 +1853,7 @@
 		return RX_MGMT_NONE;
 
 	if (!ifmgd->associated ||
-	    compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+	    !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
 		return RX_MGMT_NONE;
 
 	bssid = ifmgd->associated->bssid;
@@ -1909,7 +1886,7 @@
 		return RX_MGMT_NONE;
 
 	if (!ifmgd->associated ||
-	    compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+	    !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
 		return RX_MGMT_NONE;
 
 	reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -2000,7 +1977,6 @@
 	struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
 	u32 changed = 0;
 	int err;
-	u16 ap_ht_cap_flags;
 
 	/* AssocResp and ReassocResp have identical structure */
 
@@ -2051,7 +2027,8 @@
 		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
 				elems.ht_cap_elem, &sta->sta.ht_cap);
 
-	ap_ht_cap_flags = sta->sta.ht_cap.cap;
+	sta->supports_40mhz =
+		sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 
 	rate_control_rate_init(sta);
 
@@ -2092,11 +2069,10 @@
 		ieee80211_set_wmm_default(sdata, false);
 	changed |= BSS_CHANGED_QOS;
 
-	if (elems.ht_info_elem && elems.wmm_param &&
+	if (elems.ht_operation && elems.wmm_param &&
 	    !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
-		changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
-					       cbss->bssid, ap_ht_cap_flags,
-					       false);
+		changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
+						  cbss->bssid, false);
 
 	/* set AID and assoc capability,
 	 * ieee80211_set_associated() will tell the driver */
@@ -2137,7 +2113,7 @@
 
 	if (!assoc_data)
 		return RX_MGMT_NONE;
-	if (compare_ether_addr(assoc_data->bss->bssid, mgmt->bssid))
+	if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid))
 		return RX_MGMT_NONE;
 
 	/*
@@ -2217,8 +2193,7 @@
 	bool need_ps = false;
 
 	if (sdata->u.mgd.associated &&
-	    compare_ether_addr(mgmt->bssid, sdata->u.mgd.associated->bssid)
-	    == 0) {
+	    ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) {
 		bss = (void *)sdata->u.mgd.associated->priv;
 		/* not previously set so we may need to recalc */
 		need_ps = !bss->dtim_period;
@@ -2273,7 +2248,7 @@
 
 	ASSERT_MGD_MTX(ifmgd);
 
-	if (compare_ether_addr(mgmt->da, sdata->vif.addr))
+	if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
 		return; /* ignore ProbeResp to foreign address */
 
 	baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -2286,12 +2261,11 @@
 	ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
 
 	if (ifmgd->associated &&
-	    compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid) == 0)
+	    ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
 		ieee80211_reset_ap_probe(sdata);
 
 	if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
-	    compare_ether_addr(mgmt->bssid, ifmgd->auth_data->bss->bssid)
-	    == 0) {
+	    ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
 		/* got probe response, continue with auth */
 		printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
 		ifmgd->auth_data->tries = 0;
@@ -2319,7 +2293,7 @@
 	(1ULL << WLAN_EID_CHANNEL_SWITCH) |
 	(1ULL << WLAN_EID_PWR_CONSTRAINT) |
 	(1ULL << WLAN_EID_HT_CAPABILITY) |
-	(1ULL << WLAN_EID_HT_INFORMATION);
+	(1ULL << WLAN_EID_HT_OPERATION);
 
 static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
 				     struct ieee80211_mgmt *mgmt,
@@ -2348,8 +2322,7 @@
 		return;
 
 	if (ifmgd->assoc_data && !ifmgd->assoc_data->have_beacon &&
-	    compare_ether_addr(mgmt->bssid, ifmgd->assoc_data->bss->bssid)
-	    == 0) {
+	    ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
 		ieee802_11_parse_elems(mgmt->u.beacon.variable,
 				       len - baselen, &elems);
 
@@ -2364,7 +2337,7 @@
 	}
 
 	if (!ifmgd->associated ||
-	    compare_ether_addr(mgmt->bssid, ifmgd->associated->bssid))
+	    !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
 		return;
 	bssid = ifmgd->associated->bssid;
 
@@ -2431,10 +2404,8 @@
 
 	if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "%s: cancelling probereq poll due "
-			       "to a received beacon\n", sdata->name);
-		}
+		net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
+				    sdata->name);
 #endif
 		ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
 		mutex_lock(&local->iflist_mtx);
@@ -2468,11 +2439,13 @@
 	if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
 		if (directed_tim) {
 			if (local->hw.conf.dynamic_ps_timeout > 0) {
-				local->hw.conf.flags &= ~IEEE80211_CONF_PS;
-				ieee80211_hw_config(local,
-						    IEEE80211_CONF_CHANGE_PS);
+				if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+					local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+					ieee80211_hw_config(local,
+							    IEEE80211_CONF_CHANGE_PS);
+				}
 				ieee80211_send_nullfunc(local, sdata, 0);
-			} else {
+			} else if (!local->pspolling && sdata->u.mgd.powersave) {
 				local->pspolling = true;
 
 				/*
@@ -2504,31 +2477,14 @@
 			erp_valid, erp_value);
 
 
-	if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param &&
+	if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param &&
 	    !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
-		struct sta_info *sta;
 		struct ieee80211_supported_band *sband;
-		u16 ap_ht_cap_flags;
-
-		rcu_read_lock();
-
-		sta = sta_info_get(sdata, bssid);
-		if (WARN_ON(!sta)) {
-			rcu_read_unlock();
-			return;
-		}
 
 		sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 
-		ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
-				elems.ht_cap_elem, &sta->sta.ht_cap);
-
-		ap_ht_cap_flags = sta->sta.ht_cap.cap;
-
-		rcu_read_unlock();
-
-		changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
-					       bssid, ap_ht_cap_flags, true);
+		changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
+						  bssid, true);
 	}
 
 	/* Note: country IE parsing is done for us by cfg80211 */
@@ -3060,6 +3016,11 @@
 	struct sta_info *sta;
 	bool have_sta = false;
 	int err;
+	int ht_cfreq;
+	enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+	const u8 *ht_oper_ie;
+	const struct ieee80211_ht_operation *ht_oper = NULL;
+	struct ieee80211_supported_band *sband;
 
 	if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
 		return -EINVAL;
@@ -3081,17 +3042,76 @@
 	mutex_unlock(&local->mtx);
 
 	/* switch to the right channel */
+	sband = local->hw.wiphy->bands[cbss->channel->band];
+
+	ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
+
+	if (sband->ht_cap.ht_supported) {
+		ht_oper_ie = cfg80211_find_ie(WLAN_EID_HT_OPERATION,
+					      cbss->information_elements,
+					      cbss->len_information_elements);
+		if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
+			ht_oper = (void *)(ht_oper_ie + 2);
+	}
+
+	if (ht_oper) {
+		ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
+							  cbss->channel->band);
+		/* check that channel matches the right operating channel */
+		if (cbss->channel->center_freq != ht_cfreq) {
+			/*
+			 * It's possible that some APs are confused here;
+			 * Netgear WNDR3700 sometimes reports 4 higher than
+			 * the actual channel in association responses, but
+			 * since we look at probe response/beacon data here
+			 * it should be OK.
+			 */
+			printk(KERN_DEBUG
+			       "%s: Wrong control channel: center-freq: %d"
+			       " ht-cfreq: %d ht->primary_chan: %d"
+			       " band: %d. Disabling HT.\n",
+			       sdata->name, cbss->channel->center_freq,
+			       ht_cfreq, ht_oper->primary_chan,
+			       cbss->channel->band);
+			ht_oper = NULL;
+		}
+	}
+
+	if (ht_oper) {
+		channel_type = NL80211_CHAN_HT20;
+
+		if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+			switch (ht_oper->ht_param &
+					IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				channel_type = NL80211_CHAN_HT40PLUS;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				channel_type = NL80211_CHAN_HT40MINUS;
+				break;
+			}
+		}
+	}
+
+	if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
+		/* can only fail due to HT40+/- mismatch */
+		channel_type = NL80211_CHAN_HT20;
+		printk(KERN_DEBUG
+		       "%s: disabling 40 MHz due to multi-vif mismatch\n",
+		       sdata->name);
+		ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
+		WARN_ON(!ieee80211_set_channel_type(local, sdata,
+						    channel_type));
+	}
+
 	local->oper_channel = cbss->channel;
-	ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+	ieee80211_hw_config(local, 0);
 
 	if (!have_sta) {
-		struct ieee80211_supported_band *sband;
 		u32 rates = 0, basic_rates = 0;
 		bool have_higher_than_11mbit;
 		int min_rate = INT_MAX, min_rate_index = -1;
 
-		sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
-
 		ieee80211_get_rates(sband, bss->supp_rates,
 				    bss->supp_rates_len,
 				    &rates, &basic_rates,
@@ -3141,7 +3161,7 @@
 			return err;
 		}
 	} else
-		WARN_ON_ONCE(compare_ether_addr(ifmgd->bssid, cbss->bssid));
+		WARN_ON_ONCE(!ether_addr_equal(ifmgd->bssid, cbss->bssid));
 
 	return 0;
 }
@@ -3281,7 +3301,7 @@
 		bool match;
 
 		/* keep sta info, bssid if matching */
-		match = compare_ether_addr(ifmgd->bssid, req->bss->bssid) == 0;
+		match = ether_addr_equal(ifmgd->bssid, req->bss->bssid);
 		ieee80211_destroy_auth_data(sdata, match);
 	}
 
@@ -3311,7 +3331,7 @@
 	/* Also disable HT if we don't support it or the AP doesn't use WMM */
 	sband = local->hw.wiphy->bands[req->bss->channel->band];
 	if (!sband->ht_cap.ht_supported ||
-	    local->hw.queues < 4 || !bss->wmm_used)
+	    local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used)
 		ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
 
 	memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3334,11 +3354,12 @@
 		ifmgd->ap_smps = ifmgd->req_smps;
 
 	assoc_data->capability = req->bss->capability;
-	assoc_data->wmm = bss->wmm_used && (local->hw.queues >= 4);
+	assoc_data->wmm = bss->wmm_used &&
+			  (local->hw.queues >= IEEE80211_NUM_ACS);
 	assoc_data->supp_rates = bss->supp_rates;
 	assoc_data->supp_rates_len = bss->supp_rates_len;
-	assoc_data->ht_information_ie =
-		ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
+	assoc_data->ht_operation_ie =
+		ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
 
 	if (bss->wmm_used && bss->uapsd_supported &&
 	    (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@ -3440,7 +3461,7 @@
 	       sdata->name, req->bssid, req->reason_code);
 
 	if (ifmgd->associated &&
-	    compare_ether_addr(ifmgd->associated->bssid, req->bssid) == 0)
+	    ether_addr_equal(ifmgd->associated->bssid, req->bssid))
 		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
 				       req->reason_code, true, frame_buf);
 	else
@@ -3497,7 +3518,7 @@
 	return 0;
 }
 
-void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata)
+void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index ef8eba1..af1c4e2 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -127,6 +127,10 @@
 		drv_remove_interface(local, sdata);
 	}
 
+	sdata = rtnl_dereference(local->monitor_sdata);
+	if (sdata)
+		drv_remove_interface(local, sdata);
+
 	/* stop hardware - this must stop RX */
 	if (local->open_count)
 		ieee80211_stop_device(local);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index fbb1efd..6e4fd32 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -17,6 +17,7 @@
 #include <net/mac80211.h>
 #include "ieee80211_i.h"
 #include "sta_info.h"
+#include "driver-ops.h"
 
 struct rate_control_ref {
 	struct ieee80211_local *local;
@@ -63,8 +64,7 @@
 
 static inline void rate_control_rate_update(struct ieee80211_local *local,
 				    struct ieee80211_supported_band *sband,
-				    struct sta_info *sta, u32 changed,
-				    enum nl80211_channel_type oper_chan_type)
+				    struct sta_info *sta, u32 changed)
 {
 	struct rate_control_ref *ref = local->rate_ctrl;
 	struct ieee80211_sta *ista = &sta->sta;
@@ -72,7 +72,8 @@
 
 	if (ref && ref->ops->rate_update)
 		ref->ops->rate_update(ref->priv, sband, ista,
-				      priv_sta, changed, oper_chan_type);
+				      priv_sta, changed);
+	drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
 }
 
 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index b39dda5..79633ae 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -334,14 +334,15 @@
 
 
 static void
-calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d,
+calc_rate_durations(enum ieee80211_band band,
+		    struct minstrel_rate *d,
 		    struct ieee80211_rate *rate)
 {
 	int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
 
-	d->perfect_tx_time = ieee80211_frame_duration(local, 1200,
+	d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
 			rate->bitrate, erp, 1);
-	d->ack_time = ieee80211_frame_duration(local, 10,
+	d->ack_time = ieee80211_frame_duration(band, 10,
 			rate->bitrate, erp, 1);
 }
 
@@ -379,14 +380,14 @@
 {
 	struct minstrel_sta_info *mi = priv_sta;
 	struct minstrel_priv *mp = priv;
-	struct ieee80211_local *local = hw_to_local(mp->hw);
 	struct ieee80211_rate *ctl_rate;
 	unsigned int i, n = 0;
 	unsigned int t_slot = 9; /* FIXME: get real slot time */
 
 	mi->lowest_rix = rate_lowest_index(sband, sta);
 	ctl_rate = &sband->bitrates[mi->lowest_rix];
-	mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate,
+	mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
+				ctl_rate->bitrate,
 				!!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
 
 	for (i = 0; i < sband->n_bitrates; i++) {
@@ -402,7 +403,7 @@
 
 		mr->rix = i;
 		mr->bitrate = sband->bitrates[i].bitrate / 5;
-		calc_rate_durations(local, mr, &sband->bitrates[i]);
+		calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
 
 		/* calculate maximum number of retransmissions before
 		 * fallback (based on maximum segment size) */
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 16e0b27..2d1acc6 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -686,14 +686,12 @@
 
 static void
 minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
-                        struct ieee80211_sta *sta, void *priv_sta,
-			enum nl80211_channel_type oper_chan_type)
+                        struct ieee80211_sta *sta, void *priv_sta)
 {
 	struct minstrel_priv *mp = priv;
 	struct minstrel_ht_sta_priv *msp = priv_sta;
 	struct minstrel_ht_sta *mi = &msp->ht;
 	struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
-	struct ieee80211_local *local = hw_to_local(mp->hw);
 	u16 sta_cap = sta->ht_cap.cap;
 	int n_supported = 0;
 	int ack_dur;
@@ -712,8 +710,8 @@
 	memset(mi, 0, sizeof(*mi));
 	mi->stats_update = jiffies;
 
-	ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1);
-	mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur;
+	ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
+	mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur;
 	mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
 
 	mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
@@ -735,10 +733,6 @@
 	if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
 		mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
 
-	if (oper_chan_type != NL80211_CHAN_HT40MINUS &&
-	    oper_chan_type != NL80211_CHAN_HT40PLUS)
-		sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-
 	smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
 		IEEE80211_HT_CAP_SM_PS_SHIFT;
 
@@ -788,17 +782,15 @@
 minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
                       struct ieee80211_sta *sta, void *priv_sta)
 {
-	struct minstrel_priv *mp = priv;
-
-	minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type);
+	minstrel_ht_update_caps(priv, sband, sta, priv_sta);
 }
 
 static void
 minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
                         struct ieee80211_sta *sta, void *priv_sta,
-                        u32 changed, enum nl80211_channel_type oper_chan_type)
+                        u32 changed)
 {
-	minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type);
+	minstrel_ht_update_caps(priv, sband, sta, priv_sta);
 }
 
 static void *
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index d64e285..8257a09 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -426,6 +426,7 @@
 
 	if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
 	    test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+	    test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
 	    local->sched_scanning)
 		return ieee80211_scan_rx(rx->sdata, skb);
 
@@ -491,12 +492,12 @@
 			if (ieee80211_has_tods(hdr->frame_control) ||
 				!ieee80211_has_fromds(hdr->frame_control))
 				return RX_DROP_MONITOR;
-			if (compare_ether_addr(hdr->addr3, dev_addr) == 0)
+			if (ether_addr_equal(hdr->addr3, dev_addr))
 				return RX_DROP_MONITOR;
 		} else {
 			if (!ieee80211_has_a4(hdr->frame_control))
 				return RX_DROP_MONITOR;
-			if (compare_ether_addr(hdr->addr4, dev_addr) == 0)
+			if (ether_addr_equal(hdr->addr4, dev_addr))
 				return RX_DROP_MONITOR;
 		}
 	}
@@ -794,8 +795,7 @@
 
 	/* reset session timer */
 	if (tid_agg_rx->timeout)
-		mod_timer(&tid_agg_rx->session_timer,
-			  TU_TO_EXP_TIME(tid_agg_rx->timeout));
+		tid_agg_rx->last_rx = jiffies;
 
 	/* if this mpdu is fragmented - terminate rx aggregation session */
 	sc = le16_to_cpu(hdr->seq_ctrl);
@@ -1275,7 +1275,7 @@
 	if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
 		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
 						NL80211_IFTYPE_ADHOC);
-		if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
+		if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid)) {
 			sta->last_rx = jiffies;
 			if (ieee80211_is_data(hdr->frame_control)) {
 				sta->last_rx_rate_idx = status->rate_idx;
@@ -1438,8 +1438,8 @@
 		 */
 		if (((hdr->frame_control ^ f_hdr->frame_control) &
 		     cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
-		    compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
-		    compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
+		    !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
+		    !ether_addr_equal(hdr->addr2, f_hdr->addr2))
 			continue;
 
 		if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
@@ -1714,8 +1714,8 @@
 	 * of whether the frame was encrypted or not.
 	 */
 	if (ehdr->h_proto == rx->sdata->control_port_protocol &&
-	    (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
-	     compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
+	    (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
+	     ether_addr_equal(ehdr->h_dest, pae_group_addr)))
 		return true;
 
 	if (ieee80211_802_1x_port_control(rx) ||
@@ -1752,9 +1752,9 @@
 			 * local net stack and back to the wireless medium
 			 */
 			xmit_skb = skb_copy(skb, GFP_ATOMIC);
-			if (!xmit_skb && net_ratelimit())
-				printk(KERN_DEBUG "%s: failed to clone "
-				       "multicast frame\n", dev->name);
+			if (!xmit_skb)
+				net_dbg_ratelimited("%s: failed to clone multicast frame\n",
+						    dev->name);
 		} else {
 			dsta = sta_info_get(sdata, skb->data);
 			if (dsta) {
@@ -1925,7 +1925,7 @@
 			mpp_path_add(proxied_addr, mpp_addr, sdata);
 		} else {
 			spin_lock_bh(&mppath->state_lock);
-			if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
+			if (!ether_addr_equal(mppath->mpp, mpp_addr))
 				memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
 			spin_unlock_bh(&mppath->state_lock);
 		}
@@ -1934,7 +1934,7 @@
 
 	/* Frame has reached destination.  Don't forward */
 	if (!is_multicast_ether_addr(hdr->addr1) &&
-	    compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
+	    ether_addr_equal(sdata->vif.addr, hdr->addr3))
 		return RX_CONTINUE;
 
 	q = ieee80211_select_queue_80211(local, skb, hdr);
@@ -1957,9 +1957,8 @@
 
 	fwd_skb = skb_copy(skb, GFP_ATOMIC);
 	if (!fwd_skb) {
-		if (net_ratelimit())
-			printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
-					sdata->name);
+		net_dbg_ratelimited("%s: failed to clone mesh frame\n",
+				    sdata->name);
 		goto out;
 	}
 
@@ -2122,13 +2121,13 @@
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *resp;
 
-	if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
+	if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
 		/* Not to own unicast address */
 		return;
 	}
 
-	if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
-	    compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
+	if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
+	    !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
 		/* Not from the current AP or not associated yet. */
 		return;
 	}
@@ -2270,11 +2269,8 @@
 
 			sband = rx->local->hw.wiphy->bands[status->band];
 
-			rate_control_rate_update(
-				local, sband, rx->sta,
-				IEEE80211_RC_SMPS_CHANGED,
-				ieee80211_get_tx_channel_type(
-					local, local->_oper_channel_type));
+			rate_control_rate_update(local, sband, rx->sta,
+						 IEEE80211_RC_SMPS_CHANGED);
 			goto handled;
 		}
 		default:
@@ -2341,7 +2337,7 @@
 			if (sdata->vif.type != NL80211_IFTYPE_STATION)
 				break;
 
-			if (compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid))
+			if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
 				break;
 
 			goto queue;
@@ -2775,7 +2771,7 @@
 		if (!bssid && !sdata->u.mgd.use_4addr)
 			return 0;
 		if (!multicast &&
-		    compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
+		    !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
 			if (!(sdata->dev->flags & IFF_PROMISC) ||
 			    sdata->u.mgd.use_4addr)
 				return 0;
@@ -2793,8 +2789,7 @@
 				return 0;
 			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
 		} else if (!multicast &&
-			   compare_ether_addr(sdata->vif.addr,
-					      hdr->addr1) != 0) {
+			   !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
 			if (!(sdata->dev->flags & IFF_PROMISC))
 				return 0;
 			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2810,8 +2805,7 @@
 		break;
 	case NL80211_IFTYPE_MESH_POINT:
 		if (!multicast &&
-		    compare_ether_addr(sdata->vif.addr,
-				       hdr->addr1) != 0) {
+		    !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
 			if (!(sdata->dev->flags & IFF_PROMISC))
 				return 0;
 
@@ -2821,8 +2815,7 @@
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_AP:
 		if (!bssid) {
-			if (compare_ether_addr(sdata->vif.addr,
-					       hdr->addr1))
+			if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
 				return 0;
 		} else if (!ieee80211_bssid_match(bssid,
 					sdata->vif.addr)) {
@@ -2844,7 +2837,7 @@
 	case NL80211_IFTYPE_WDS:
 		if (bssid || !ieee80211_is_data(hdr->frame_control))
 			return 0;
-		if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
+		if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
 			return 0;
 		break;
 	default:
@@ -2921,6 +2914,7 @@
 		local->dot11ReceivedFragmentCount++;
 
 	if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+		     test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
 		     test_bit(SCAN_SW_SCANNING, &local->scanning)))
 		status->rx_flags |= IEEE80211_RX_IN_SCAN;
 
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index c70e176..169da07 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -29,20 +29,6 @@
 #define IEEE80211_CHANNEL_TIME (HZ / 33)
 #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8)
 
-struct ieee80211_bss *
-ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
-		     u8 *ssid, u8 ssid_len)
-{
-	struct cfg80211_bss *cbss;
-
-	cbss = cfg80211_get_bss(local->hw.wiphy,
-				ieee80211_get_channel(local->hw.wiphy, freq),
-				bssid, ssid, ssid_len, 0, 0);
-	if (!cbss)
-		return NULL;
-	return (void *)cbss->priv;
-}
-
 static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
 {
 	struct ieee80211_bss *bss = (void *)cbss->priv;
@@ -208,7 +194,7 @@
 	presp = ieee80211_is_probe_resp(fc);
 	if (presp) {
 		/* ignore ProbeResp to foreign address */
-		if (compare_ether_addr(mgmt->da, sdata->vif.addr))
+		if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
 			return RX_DROP_MONITOR;
 
 		presp = true;
@@ -387,6 +373,57 @@
 	return 0;
 }
 
+static bool ieee80211_can_scan(struct ieee80211_local *local,
+			       struct ieee80211_sub_if_data *sdata)
+{
+	if (!list_empty(&local->work_list))
+		return false;
+
+	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+	    sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
+				  IEEE80211_STA_CONNECTION_POLL))
+		return false;
+
+	return true;
+}
+
+void ieee80211_run_deferred_scan(struct ieee80211_local *local)
+{
+	lockdep_assert_held(&local->mtx);
+
+	if (!local->scan_req || local->scanning)
+		return;
+
+	if (!ieee80211_can_scan(local, local->scan_sdata))
+		return;
+
+	ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+				     round_jiffies_relative(0));
+}
+
+static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
+					    unsigned long *next_delay)
+{
+	int i;
+	struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+	enum ieee80211_band band = local->hw.conf.channel->band;
+
+	for (i = 0; i < local->scan_req->n_ssids; i++)
+		ieee80211_send_probe_req(
+			sdata, NULL,
+			local->scan_req->ssids[i].ssid,
+			local->scan_req->ssids[i].ssid_len,
+			local->scan_req->ie, local->scan_req->ie_len,
+			local->scan_req->rates[band], false,
+			local->scan_req->no_cck);
+
+	/*
+	 * After sending probe requests, wait for probe responses
+	 * on the channel.
+	 */
+	*next_delay = IEEE80211_CHANNEL_TIME;
+	local->next_scan_state = SCAN_DECISION;
+}
 
 static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
 				  struct cfg80211_scan_request *req)
@@ -399,7 +436,7 @@
 	if (local->scan_req)
 		return -EBUSY;
 
-	if (!list_empty(&local->work_list)) {
+	if (!ieee80211_can_scan(local, sdata)) {
 		/* wait for the work to finish/time out */
 		local->scan_req = req;
 		local->scan_sdata = sdata;
@@ -438,10 +475,47 @@
 	local->scan_req = req;
 	local->scan_sdata = sdata;
 
-	if (local->ops->hw_scan)
+	if (local->ops->hw_scan) {
 		__set_bit(SCAN_HW_SCANNING, &local->scanning);
-	else
+	} else if ((req->n_channels == 1) &&
+		   (req->channels[0]->center_freq ==
+		    local->hw.conf.channel->center_freq)) {
+
+		/* If we are scanning only on the current channel, then
+		 * we do not need to stop normal activities
+		 */
+		unsigned long next_delay;
+
+		__set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
+
+		ieee80211_recalc_idle(local);
+
+		/* Notify driver scan is starting, keep order of operations
+		 * same as normal software scan, in case that matters. */
+		drv_sw_scan_start(local);
+
+		ieee80211_configure_filter(local); /* accept probe-responses */
+
+		/* We need to ensure power level is at max for scanning. */
+		ieee80211_hw_config(local, 0);
+
+		if ((req->channels[0]->flags &
+		     IEEE80211_CHAN_PASSIVE_SCAN) ||
+		    !local->scan_req->n_ssids) {
+			next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+		} else {
+			ieee80211_scan_state_send_probe(local, &next_delay);
+			next_delay = IEEE80211_CHANNEL_TIME;
+		}
+
+		/* Now, just wait a bit and we are all done! */
+		ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
+					     next_delay);
+		return 0;
+	} else {
+		/* Do normal software scan */
 		__set_bit(SCAN_SW_SCANNING, &local->scanning);
+	}
 
 	ieee80211_recalc_idle(local);
 
@@ -598,30 +672,6 @@
 	local->next_scan_state = SCAN_SEND_PROBE;
 }
 
-static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
-					    unsigned long *next_delay)
-{
-	int i;
-	struct ieee80211_sub_if_data *sdata = local->scan_sdata;
-	enum ieee80211_band band = local->hw.conf.channel->band;
-
-	for (i = 0; i < local->scan_req->n_ssids; i++)
-		ieee80211_send_probe_req(
-			sdata, NULL,
-			local->scan_req->ssids[i].ssid,
-			local->scan_req->ssids[i].ssid_len,
-			local->scan_req->ie, local->scan_req->ie_len,
-			local->scan_req->rates[band], false,
-			local->scan_req->no_cck);
-
-	/*
-	 * After sending probe requests, wait for probe responses
-	 * on the channel.
-	 */
-	*next_delay = IEEE80211_CHANNEL_TIME;
-	local->next_scan_state = SCAN_DECISION;
-}
-
 static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
 					 unsigned long *next_delay)
 {
@@ -672,6 +722,12 @@
 
 	sdata = local->scan_sdata;
 
+	/* When scanning on-channel, the first-callback means completed. */
+	if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
+		aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
+		goto out_complete;
+	}
+
 	if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
 		aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
 		goto out_complete;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 38137cb..f5b1638 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -102,7 +102,7 @@
 				    lockdep_is_held(&local->sta_mtx));
 	while (sta) {
 		if (sta->sdata == sdata &&
-		    compare_ether_addr(sta->sta.addr, addr) == 0)
+		    ether_addr_equal(sta->sta.addr, addr))
 			break;
 		sta = rcu_dereference_check(sta->hnext,
 					    lockdep_is_held(&local->sta_mtx));
@@ -125,7 +125,7 @@
 	while (sta) {
 		if ((sta->sdata == sdata ||
 		     (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
-		    compare_ether_addr(sta->sta.addr, addr) == 0)
+		    ether_addr_equal(sta->sta.addr, addr))
 			break;
 		sta = rcu_dereference_check(sta->hnext,
 					    lockdep_is_held(&local->sta_mtx));
@@ -302,7 +302,7 @@
 	if (unlikely(!ieee80211_sdata_running(sdata)))
 		return -ENETDOWN;
 
-	if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
+	if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) ||
 		    is_multicast_ether_addr(sta->sta.addr)))
 		return -EINVAL;
 
@@ -912,7 +912,7 @@
 	 */
 	for_each_sta_info(hw_to_local(hw), addr, sta, nxt) {
 		if (localaddr &&
-		    compare_ether_addr(sta->sdata->vif.addr, localaddr) != 0)
+		    !ether_addr_equal(sta->sdata->vif.addr, localaddr))
 			continue;
 		if (!sta->uploaded)
 			return NULL;
@@ -1195,13 +1195,15 @@
 			    ieee80211_is_qos_nullfunc(hdr->frame_control))
 				qoshdr = ieee80211_get_qos_ctl(hdr);
 
-			/* set EOSP for the frame */
-			if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
-			    qoshdr && skb_queue_empty(&frames))
-				*qoshdr |= IEEE80211_QOS_CTL_EOSP;
+			/* end service period after last frame */
+			if (skb_queue_empty(&frames)) {
+				if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
+				    qoshdr)
+					*qoshdr |= IEEE80211_QOS_CTL_EOSP;
 
-			info->flags |= IEEE80211_TX_STATUS_EOSP |
-				       IEEE80211_TX_CTL_REQ_TX_STATUS;
+				info->flags |= IEEE80211_TX_STATUS_EOSP |
+					       IEEE80211_TX_CTL_REQ_TX_STATUS;
+			}
 
 			if (qoshdr)
 				tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
@@ -1415,15 +1417,19 @@
 		if (sta->sta_state == IEEE80211_STA_AUTH) {
 			set_bit(WLAN_STA_ASSOC, &sta->_flags);
 		} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
-			if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
-				atomic_dec(&sta->sdata->u.ap.num_sta_authorized);
+			if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
+			    (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+			     !sta->sdata->u.vlan.sta))
+				atomic_dec(&sta->sdata->bss->num_mcast_sta);
 			clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
 		}
 		break;
 	case IEEE80211_STA_AUTHORIZED:
 		if (sta->sta_state == IEEE80211_STA_ASSOC) {
-			if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
-				atomic_inc(&sta->sdata->u.ap.num_sta_authorized);
+			if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
+			    (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+			     !sta->sdata->u.vlan.sta))
+				atomic_inc(&sta->sdata->bss->num_mcast_sta);
 			set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
 		}
 		break;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ab05768..3bb24a1 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -55,6 +55,7 @@
  * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
  * @WLAN_STA_INSERTED: This station is inserted into the hash table.
  * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
+ * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid.
  */
 enum ieee80211_sta_info_flags {
 	WLAN_STA_AUTH,
@@ -76,6 +77,7 @@
 	WLAN_STA_4ADDR_EVENT,
 	WLAN_STA_INSERTED,
 	WLAN_STA_RATE_CONTROL,
+	WLAN_STA_TOFFSET_KNOWN,
 };
 
 #define STA_TID_NUM 16
@@ -101,6 +103,7 @@
  * @dialog_token: dialog token for aggregation session
  * @timeout: session timeout value to be filled in ADDBA requests
  * @state: session state (see above)
+ * @last_tx: jiffies of last tx activity
  * @stop_initiator: initiator of a session stop
  * @tx_stop: TX DelBA frame when stopping
  * @buf_size: reorder buffer size at receiver
@@ -122,6 +125,7 @@
 	struct timer_list addba_resp_timer;
 	struct sk_buff_head pending;
 	unsigned long state;
+	unsigned long last_tx;
 	u16 timeout;
 	u8 dialog_token;
 	u8 stop_initiator;
@@ -139,6 +143,7 @@
  * @reorder_time: jiffies when skb was added
  * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
  * @reorder_timer: releases expired frames from the reorder buffer.
+ * @last_rx: jiffies of last rx activity
  * @head_seq_num: head sequence number in reordering buffer.
  * @stored_mpdu_num: number of MPDUs in reordering buffer
  * @ssn: Starting Sequence Number expected to be aggregated.
@@ -163,6 +168,7 @@
 	unsigned long *reorder_time;
 	struct timer_list session_timer;
 	struct timer_list reorder_timer;
+	unsigned long last_rx;
 	u16 head_seq_num;
 	u16 stored_mpdu_num;
 	u16 ssn;
@@ -264,6 +270,7 @@
  * @plink_timeout: timeout of peer link
  * @plink_timer: peer link watch timer
  * @plink_timer_was_running: used by suspend/resume to restore timers
+ * @t_offset: timing offset relative to this host
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -353,6 +360,9 @@
 	enum nl80211_plink_state plink_state;
 	u32 plink_timeout;
 	struct timer_list plink_timer;
+	s64 t_offset;
+	s64 t_offset_setpoint;
+	enum nl80211_channel_type ch_type;
 #endif
 
 #ifdef CONFIG_MAC80211_DEBUGFS
@@ -365,6 +375,8 @@
 	unsigned int lost_packets;
 	unsigned int beacon_loss_count;
 
+	bool supports_40mhz;
+
 	/* keep last! */
 	struct ieee80211_sta sta;
 };
@@ -490,7 +502,7 @@
 		nxt = _sta ? rcu_dereference(_sta->hnext) : NULL	\
 	     )								\
 	/* compare address and run code only if it matches */		\
-	if (compare_ether_addr(_sta->sta.addr, (_addr)) == 0)
+	if (ether_addr_equal(_sta->sta.addr, (_addr)))
 
 /*
  * Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 5f8f89e..28cfa98 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -355,7 +355,13 @@
 	int rtap_len;
 
 	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
-		if (info->status.rates[i].idx < 0) {
+		if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+		    !(info->flags & IEEE80211_TX_STAT_AMPDU)) {
+			/* just the first aggr frame carry status info */
+			info->status.rates[i].idx = -1;
+			info->status.rates[i].count = 0;
+			break;
+		} else if (info->status.rates[i].idx < 0) {
 			break;
 		} else if (i >= hw->max_report_rates) {
 			/* the HW cannot have attempted that rate */
@@ -378,7 +384,7 @@
 
 	for_each_sta_info(local, hdr->addr1, sta, tmp) {
 		/* skip wrong virtual interface */
-		if (compare_ether_addr(hdr->addr2, sta->sdata->vif.addr))
+		if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
 			continue;
 
 		if (info->flags & IEEE80211_TX_STATUS_EOSP)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 782a601..5f827a6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -159,7 +159,7 @@
 		/* Time needed to transmit ACK
 		 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
 		 * to closest integer */
-		dur = ieee80211_frame_duration(local, 10, rate, erp,
+		dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
 				tx->sdata->vif.bss_conf.use_short_preamble);
 
 	if (next_frag_len) {
@@ -167,7 +167,7 @@
 		 * transmit next fragment plus ACK and 2 x SIFS. */
 		dur *= 2; /* ACK + SIFS */
 		/* next fragment */
-		dur += ieee80211_frame_duration(local, next_frag_len,
+		dur += ieee80211_frame_duration(sband->band, next_frag_len,
 				txrate->bitrate, erp,
 				tx->sdata->vif.bss_conf.use_short_preamble);
 	}
@@ -230,9 +230,9 @@
 	 * changed via debugfs, user needs to reassociate manually to have
 	 * everything in sync.
 	 */
-	if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
-	    && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
-	    && skb_get_queue_mapping(tx->skb) == 0)
+	if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
+	    (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
+	    skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
 		return TX_CONTINUE;
 
 	if (local->hw.conf.flags & IEEE80211_CONF_PS) {
@@ -306,7 +306,7 @@
 		}
 	} else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP &&
 			    ieee80211_is_data(hdr->frame_control) &&
-			    !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) {
+			    !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) {
 		/*
 		 * No associated STAs - no need to send multicast
 		 * frames.
@@ -400,6 +400,8 @@
 		return TX_CONTINUE;
 
 	info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
+	if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+		info->hw_queue = tx->sdata->vif.cab_queue;
 
 	/* device releases frame after DTIM beacon */
 	if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
@@ -411,9 +413,8 @@
 
 	if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
-		if (net_ratelimit())
-			printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
-			       tx->sdata->name);
+		net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n",
+				    tx->sdata->name);
 #endif
 		dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
 	} else
@@ -474,10 +475,8 @@
 		if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
 			struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
-			if (net_ratelimit())
-				printk(KERN_DEBUG "%s: STA %pM TX buffer for "
-				       "AC %d full - dropping oldest frame\n",
-				       tx->sdata->name, sta->sta.addr, ac);
+			net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n",
+					    tx->sdata->name, sta->sta.addr, ac);
 #endif
 			dev_kfree_skb(old);
 		} else
@@ -1118,8 +1117,7 @@
 
 	/* reset session timer */
 	if (reset_agg_timer && tid_tx->timeout)
-		mod_timer(&tid_tx->session_timer,
-			  TU_TO_EXP_TIME(tid_tx->timeout));
+		tid_tx->last_tx = jiffies;
 
 	return queued;
 }
@@ -1158,7 +1156,8 @@
 		tx->sta = rcu_dereference(sdata->u.vlan.sta);
 		if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
 			return TX_DROP;
-	} else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+	} else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+		   tx->sdata->control_port_protocol == tx->skb->protocol) {
 		tx->sta = sta_info_get_bss(sdata, hdr->addr1);
 	}
 	if (!tx->sta)
@@ -1215,11 +1214,19 @@
 			       bool txpending)
 {
 	struct sk_buff *skb, *tmp;
-	struct ieee80211_tx_info *info;
 	unsigned long flags;
 
 	skb_queue_walk_safe(skbs, skb, tmp) {
-		int q = skb_get_queue_mapping(skb);
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+		int q = info->hw_queue;
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+		if (WARN_ON_ONCE(q >= local->hw.queues)) {
+			__skb_unlink(skb, skbs);
+			dev_kfree_skb(skb);
+			continue;
+		}
+#endif
 
 		spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
 		if (local->queue_stop_reasons[q] ||
@@ -1241,7 +1248,6 @@
 		}
 		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
-		info = IEEE80211_SKB_CB(skb);
 		info->control.vif = vif;
 		info->control.sta = sta;
 
@@ -1284,8 +1290,16 @@
 
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_MONITOR:
-		sdata = NULL;
-		vif = NULL;
+		sdata = rcu_dereference(local->monitor_sdata);
+		if (sdata) {
+			vif = &sdata->vif;
+			info->hw_queue =
+				vif->hw_queue[skb_get_queue_mapping(skb)];
+		} else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
+			dev_kfree_skb(skb);
+			return true;
+		} else
+			vif = NULL;
 		break;
 	case NL80211_IFTYPE_AP_VLAN:
 		sdata = container_of(sdata->bss,
@@ -1400,6 +1414,12 @@
 	tx.channel = local->hw.conf.channel;
 	info->band = tx.channel->band;
 
+	/* set up hw_queue value early */
+	if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
+	    !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
+		info->hw_queue =
+			sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+
 	if (!invoke_tx_handlers(&tx))
 		result = __ieee80211_tx(local, &tx.skbs, led_len,
 					tx.sta, txpending);
@@ -1468,12 +1488,12 @@
 
 	if (ieee80211_vif_is_mesh(&sdata->vif) &&
 	    ieee80211_is_data(hdr->frame_control) &&
-		!is_multicast_ether_addr(hdr->addr1))
-			if (mesh_nexthop_resolve(skb, sdata)) {
-				/* skb queued: don't free */
-				rcu_read_unlock();
-				return;
-			}
+	    !is_multicast_ether_addr(hdr->addr1) &&
+	    mesh_nexthop_resolve(skb, sdata)) {
+		/* skb queued: don't free */
+		rcu_read_unlock();
+		return;
+	}
 
 	ieee80211_set_qos_hdr(sdata, skb);
 	ieee80211_tx(sdata, skb, false);
@@ -1642,7 +1662,7 @@
 	    skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
 		u8 *payload = (u8 *)hdr + hdrlen;
 
-		if (compare_ether_addr(payload, rfc1042_header) == 0)
+		if (ether_addr_equal(payload, rfc1042_header))
 			skb->protocol = cpu_to_be16((payload[6] << 8) |
 						    payload[7]);
 	}
@@ -1675,7 +1695,7 @@
 		    tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
 		    tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
 			continue;
-		if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) {
+		if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) {
 			sdata = tmp_sdata;
 			break;
 		}
@@ -1792,9 +1812,8 @@
 		 * is being proxied by a portal (i.e. portal address
 		 * differs from proxied address)
 		 */
-		if (compare_ether_addr(sdata->vif.addr,
-				       skb->data + ETH_ALEN) == 0 &&
-		    !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
+		if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) &&
+		    !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) {
 			hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
 					skb->data, skb->data + ETH_ALEN);
 			rcu_read_unlock();
@@ -1929,7 +1948,7 @@
 		wme_sta = true;
 
 	/* receiver and we are QoS enabled, use a QoS type frame */
-	if (wme_sta && local->hw.queues >= 4) {
+	if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) {
 		fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
 		hdrlen += 2;
 	}
@@ -1941,12 +1960,10 @@
 	if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
 		     !is_multicast_ether_addr(hdr.addr1) && !authorized &&
 		     (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
-		      compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) {
+		      !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
-		if (net_ratelimit())
-			printk(KERN_DEBUG "%s: dropped frame to %pM"
-			       " (unauthorized port)\n", dev->name,
-			       hdr.addr1);
+		net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
+				    dev->name, hdr.addr1);
 #endif
 
 		I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
@@ -2170,7 +2187,6 @@
 void ieee80211_tx_pending(unsigned long data)
 {
 	struct ieee80211_local *local = (struct ieee80211_local *)data;
-	struct ieee80211_sub_if_data *sdata;
 	unsigned long flags;
 	int i;
 	bool txok;
@@ -2207,8 +2223,7 @@
 		}
 
 		if (skb_queue_empty(&local->pending[i]))
-			list_for_each_entry_rcu(sdata, &local->interfaces, list)
-				netif_wake_subqueue(sdata->dev, i);
+			ieee80211_propagate_queue_wake(local, i);
 	}
 	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
@@ -2374,6 +2389,7 @@
 						 IEEE80211_STYPE_BEACON);
 	} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
 		struct ieee80211_mgmt *mgmt;
+		struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 		u8 *pos;
 		int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
 			      sizeof(mgmt->u.beacon);
@@ -2383,6 +2399,10 @@
 			goto out;
 #endif
 
+		if (ifmsh->sync_ops)
+			ifmsh->sync_ops->adjust_tbtt(
+						sdata);
+
 		skb = dev_alloc_skb(local->tx_headroom +
 				    hdr_len +
 				    2 + /* NULL SSID */
@@ -2390,7 +2410,7 @@
 				    2 + 3 + /* DS params */
 				    2 + (IEEE80211_MAX_SUPP_RATES - 8) +
 				    2 + sizeof(struct ieee80211_ht_cap) +
-				    2 + sizeof(struct ieee80211_ht_info) +
+				    2 + sizeof(struct ieee80211_ht_operation) +
 				    2 + sdata->u.mesh.mesh_id_len +
 				    2 + sizeof(struct ieee80211_meshconf_ie) +
 				    sdata->u.mesh.ie_len);
@@ -2414,12 +2434,12 @@
 		*pos++ = WLAN_EID_SSID;
 		*pos++ = 0x0;
 
-		if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
+		if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
 		    mesh_add_ds_params_ie(skb, sdata) ||
-		    ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+		    ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
 		    mesh_add_rsn_ie(skb, sdata) ||
 		    mesh_add_ht_cap_ie(skb, sdata) ||
-		    mesh_add_ht_info_ie(skb, sdata) ||
+		    mesh_add_ht_oper_ie(skb, sdata) ||
 		    mesh_add_meshid_ie(skb, sdata) ||
 		    mesh_add_meshconf_ie(skb, sdata) ||
 		    mesh_add_vendor_ies(skb, sdata)) {
@@ -2603,7 +2623,7 @@
 	pos = skb_put(skb, ie_ssid_len);
 	*pos++ = WLAN_EID_SSID;
 	*pos++ = ssid_len;
-	if (ssid)
+	if (ssid_len)
 		memcpy(pos, ssid, ssid_len);
 	pos += ssid_len;
 
@@ -2710,11 +2730,13 @@
 void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
 			  struct sk_buff *skb, int tid)
 {
+	int ac = ieee802_1d_to_ac[tid];
+
 	skb_set_mac_header(skb, 0);
 	skb_set_network_header(skb, 0);
 	skb_set_transport_header(skb, 0);
 
-	skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+	skb_set_queue_mapping(skb, ac);
 	skb->priority = tid;
 
 	/*
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 32f7a3b..22f2216b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -106,7 +106,7 @@
 	}
 }
 
-int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
+int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
 			     int rate, int erp, int short_preamble)
 {
 	int dur;
@@ -120,7 +120,7 @@
 	 * DIV_ROUND_UP() operations.
 	 */
 
-	if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) {
+	if (band == IEEE80211_BAND_5GHZ || erp) {
 		/*
 		 * OFDM:
 		 *
@@ -162,10 +162,10 @@
 /* Exported duration function for driver use */
 __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
 					struct ieee80211_vif *vif,
+					enum ieee80211_band band,
 					size_t frame_len,
 					struct ieee80211_rate *rate)
 {
-	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_sub_if_data *sdata;
 	u16 dur;
 	int erp;
@@ -179,7 +179,7 @@
 			erp = rate->flags & IEEE80211_RATE_ERP_G;
 	}
 
-	dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp,
+	dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
 				       short_preamble);
 
 	return cpu_to_le16(dur);
@@ -198,7 +198,7 @@
 	u16 dur;
 	struct ieee80211_supported_band *sband;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	sband = local->hw.wiphy->bands[frame_txctl->band];
 
 	short_preamble = false;
 
@@ -213,13 +213,13 @@
 	}
 
 	/* CTS duration */
-	dur = ieee80211_frame_duration(local, 10, rate->bitrate,
+	dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate,
 				       erp, short_preamble);
 	/* Data frame duration */
-	dur += ieee80211_frame_duration(local, frame_len, rate->bitrate,
+	dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
 					erp, short_preamble);
 	/* ACK duration */
-	dur += ieee80211_frame_duration(local, 10, rate->bitrate,
+	dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
 					erp, short_preamble);
 
 	return cpu_to_le16(dur);
@@ -239,7 +239,7 @@
 	u16 dur;
 	struct ieee80211_supported_band *sband;
 
-	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+	sband = local->hw.wiphy->bands[frame_txctl->band];
 
 	short_preamble = false;
 
@@ -253,11 +253,11 @@
 	}
 
 	/* Data frame duration */
-	dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
+	dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
 				       erp, short_preamble);
 	if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
 		/* ACK duration */
-		dur += ieee80211_frame_duration(local, 10, rate->bitrate,
+		dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
 						erp, short_preamble);
 	}
 
@@ -265,17 +265,45 @@
 }
 EXPORT_SYMBOL(ieee80211_ctstoself_duration);
 
+void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
+{
+	struct ieee80211_sub_if_data *sdata;
+
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		int ac;
+
+		if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
+			continue;
+
+		if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE &&
+		    local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
+			continue;
+
+		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+			int ac_queue = sdata->vif.hw_queue[ac];
+
+			if (ac_queue == queue ||
+			    (sdata->vif.cab_queue == queue &&
+			     local->queue_stop_reasons[ac_queue] == 0 &&
+			     skb_queue_empty(&local->pending[ac_queue])))
+				netif_wake_subqueue(sdata->dev, ac);
+		}
+	}
+}
+
 static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
 				   enum queue_stop_reason reason)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
-	struct ieee80211_sub_if_data *sdata;
 
 	trace_wake_queue(local, queue, reason);
 
 	if (WARN_ON(queue >= hw->queues))
 		return;
 
+	if (!test_bit(reason, &local->queue_stop_reasons[queue]))
+		return;
+
 	__clear_bit(reason, &local->queue_stop_reasons[queue]);
 
 	if (local->queue_stop_reasons[queue] != 0)
@@ -284,11 +312,7 @@
 
 	if (skb_queue_empty(&local->pending[queue])) {
 		rcu_read_lock();
-		list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-			if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
-				continue;
-			netif_wake_subqueue(sdata->dev, queue);
-		}
+		ieee80211_propagate_queue_wake(local, queue);
 		rcu_read_unlock();
 	} else
 		tasklet_schedule(&local->tx_pending_tasklet);
@@ -323,11 +347,21 @@
 	if (WARN_ON(queue >= hw->queues))
 		return;
 
+	if (test_bit(reason, &local->queue_stop_reasons[queue]))
+		return;
+
 	__set_bit(reason, &local->queue_stop_reasons[queue]);
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(sdata, &local->interfaces, list)
-		netif_stop_subqueue(sdata->dev, queue);
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		int ac;
+
+		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+			if (sdata->vif.hw_queue[ac] == queue ||
+			    sdata->vif.cab_queue == queue)
+				netif_stop_subqueue(sdata->dev, ac);
+		}
+	}
 	rcu_read_unlock();
 }
 
@@ -354,8 +388,8 @@
 {
 	struct ieee80211_hw *hw = &local->hw;
 	unsigned long flags;
-	int queue = skb_get_queue_mapping(skb);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	int queue = info->hw_queue;
 
 	if (WARN_ON(!info->control.vif)) {
 		kfree_skb(skb);
@@ -379,10 +413,6 @@
 	int queue, i;
 
 	spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-	for (i = 0; i < hw->queues; i++)
-		__ieee80211_stop_queue(hw, i,
-			IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
-
 	while ((skb = skb_dequeue(skbs))) {
 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
@@ -391,7 +421,11 @@
 			continue;
 		}
 
-		queue = skb_get_queue_mapping(skb);
+		queue = info->hw_queue;
+
+		__ieee80211_stop_queue(hw, queue,
+				IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
+
 		__skb_queue_tail(&local->pending[queue], skb);
 	}
 
@@ -404,12 +438,6 @@
 	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 }
 
-void ieee80211_add_pending_skbs(struct ieee80211_local *local,
-				struct sk_buff_head *skbs)
-{
-	ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
-}
-
 void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
 				    enum queue_stop_reason reason)
 {
@@ -684,9 +712,9 @@
 			else
 				elem_parse_failed = true;
 			break;
-		case WLAN_EID_HT_INFORMATION:
-			if (elen >= sizeof(struct ieee80211_ht_info))
-				elems->ht_info_elem = (void *)pos;
+		case WLAN_EID_HT_OPERATION:
+			if (elen >= sizeof(struct ieee80211_ht_operation))
+				elems->ht_operation = (void *)pos;
 			else
 				elem_parse_failed = true;
 			break;
@@ -775,19 +803,22 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_tx_queue_params qparam;
-	int queue;
+	int ac;
 	bool use_11b;
 	int aCWmin, aCWmax;
 
 	if (!local->ops->conf_tx)
 		return;
 
+	if (local->hw.queues < IEEE80211_NUM_ACS)
+		return;
+
 	memset(&qparam, 0, sizeof(qparam));
 
 	use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
 		 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
 
-	for (queue = 0; queue < local->hw.queues; queue++) {
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		/* Set defaults according to 802.11-2007 Table 7-37 */
 		aCWmax = 1023;
 		if (use_11b)
@@ -795,21 +826,21 @@
 		else
 			aCWmin = 15;
 
-		switch (queue) {
-		case 3: /* AC_BK */
+		switch (ac) {
+		case IEEE80211_AC_BK:
 			qparam.cw_max = aCWmax;
 			qparam.cw_min = aCWmin;
 			qparam.txop = 0;
 			qparam.aifs = 7;
 			break;
 		default: /* never happens but let's not leave undefined */
-		case 2: /* AC_BE */
+		case IEEE80211_AC_BE:
 			qparam.cw_max = aCWmax;
 			qparam.cw_min = aCWmin;
 			qparam.txop = 0;
 			qparam.aifs = 3;
 			break;
-		case 1: /* AC_VI */
+		case IEEE80211_AC_VI:
 			qparam.cw_max = aCWmin;
 			qparam.cw_min = (aCWmin + 1) / 2 - 1;
 			if (use_11b)
@@ -818,7 +849,7 @@
 				qparam.txop = 3008/32;
 			qparam.aifs = 2;
 			break;
-		case 0: /* AC_VO */
+		case IEEE80211_AC_VO:
 			qparam.cw_max = (aCWmin + 1) / 2 - 1;
 			qparam.cw_min = (aCWmin + 1) / 4 - 1;
 			if (use_11b)
@@ -831,8 +862,8 @@
 
 		qparam.uapsd = false;
 
-		sdata->tx_conf[queue] = qparam;
-		drv_conf_tx(local, sdata, queue, &qparam);
+		sdata->tx_conf[ac] = qparam;
+		drv_conf_tx(local, sdata, ac, &qparam);
 	}
 
 	/* after reinitialize QoS TX queues setting to default,
@@ -878,10 +909,8 @@
 	int i;
 
 	sband = local->hw.wiphy->bands[band];
-	if (!sband) {
-		WARN_ON(1);
-		sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-	}
+	if (WARN_ON(!sband))
+		return 1;
 
 	if (band == IEEE80211_BAND_2GHZ)
 		mandatory_flag = IEEE80211_RATE_MANDATORY_B;
@@ -1106,7 +1135,7 @@
 
 u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
 			    struct ieee802_11_elems *elems,
-			    enum ieee80211_band band)
+			    enum ieee80211_band band, u32 *basic_rates)
 {
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_rate *bitrates;
@@ -1115,10 +1144,8 @@
 	int i, j;
 	sband = local->hw.wiphy->bands[band];
 
-	if (!sband) {
-		WARN_ON(1);
-		sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-	}
+	if (WARN_ON(!sband))
+		return 1;
 
 	bitrates = sband->bitrates;
 	num_rates = sband->n_bitrates;
@@ -1127,15 +1154,25 @@
 		     elems->ext_supp_rates_len; i++) {
 		u8 rate = 0;
 		int own_rate;
+		bool is_basic;
 		if (i < elems->supp_rates_len)
 			rate = elems->supp_rates[i];
 		else if (elems->ext_supp_rates)
 			rate = elems->ext_supp_rates
 				[i - elems->supp_rates_len];
 		own_rate = 5 * (rate & 0x7f);
-		for (j = 0; j < num_rates; j++)
-			if (bitrates[j].bitrate == own_rate)
+		is_basic = !!(rate & 0x80);
+
+		if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
+			continue;
+
+		for (j = 0; j < num_rates; j++) {
+			if (bitrates[j].bitrate == own_rate) {
 				supp_rates |= BIT(j);
+				if (basic_rates && is_basic)
+					*basic_rates |= BIT(j);
+			}
+		}
 	}
 	return supp_rates;
 }
@@ -1210,6 +1247,16 @@
 				   IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
 
 	/* add interfaces */
+	sdata = rtnl_dereference(local->monitor_sdata);
+	if (sdata) {
+		res = drv_add_interface(local, sdata);
+		if (WARN_ON(res)) {
+			rcu_assign_pointer(local->monitor_sdata, NULL);
+			synchronize_net();
+			kfree(sdata);
+		}
+	}
+
 	list_for_each_entry(sdata, &local->interfaces, list) {
 		if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
 		    sdata->vif.type != NL80211_IFTYPE_MONITOR &&
@@ -1232,14 +1279,17 @@
 	mutex_unlock(&local->sta_mtx);
 
 	/* reconfigure tx conf */
-	list_for_each_entry(sdata, &local->interfaces, list) {
-		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
-		    sdata->vif.type == NL80211_IFTYPE_MONITOR ||
-		    !ieee80211_sdata_running(sdata))
-			continue;
+	if (hw->queues >= IEEE80211_NUM_ACS) {
+		list_for_each_entry(sdata, &local->interfaces, list) {
+			if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+			    sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+			    !ieee80211_sdata_running(sdata))
+				continue;
 
-		for (i = 0; i < hw->queues; i++)
-			drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]);
+			for (i = 0; i < IEEE80211_NUM_ACS; i++)
+				drv_conf_tx(local, sdata, i,
+					    &sdata->tx_conf[i]);
+		}
 	}
 
 	/* reconfigure hardware */
@@ -1611,57 +1661,55 @@
 	return pos;
 }
 
-u8 *ieee80211_ie_build_ht_info(u8 *pos,
-			       struct ieee80211_sta_ht_cap *ht_cap,
+u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
 			       struct ieee80211_channel *channel,
-			       enum nl80211_channel_type channel_type)
+			       enum nl80211_channel_type channel_type,
+			       u16 prot_mode)
 {
-	struct ieee80211_ht_info *ht_info;
+	struct ieee80211_ht_operation *ht_oper;
 	/* Build HT Information */
-	*pos++ = WLAN_EID_HT_INFORMATION;
-	*pos++ = sizeof(struct ieee80211_ht_info);
-	ht_info = (struct ieee80211_ht_info *)pos;
-	ht_info->control_chan =
+	*pos++ = WLAN_EID_HT_OPERATION;
+	*pos++ = sizeof(struct ieee80211_ht_operation);
+	ht_oper = (struct ieee80211_ht_operation *)pos;
+	ht_oper->primary_chan =
 			ieee80211_frequency_to_channel(channel->center_freq);
 	switch (channel_type) {
 	case NL80211_CHAN_HT40MINUS:
-		ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+		ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
 		break;
 	case NL80211_CHAN_HT40PLUS:
-		ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+		ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
 		break;
 	case NL80211_CHAN_HT20:
 	default:
-		ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+		ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
 		break;
 	}
-	if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
-		ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
+	if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+	    channel_type != NL80211_CHAN_NO_HT &&
+	    channel_type != NL80211_CHAN_HT20)
+		ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
 
-	/*
-	 * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and
-	 * RIFS Mode are reserved in IBSS mode, therefore keep them at 0
-	 */
-	ht_info->operation_mode = 0x0000;
-	ht_info->stbc_param = 0x0000;
+	ht_oper->operation_mode = cpu_to_le16(prot_mode);
+	ht_oper->stbc_param = 0x0000;
 
 	/* It seems that Basic MCS set and Supported MCS set
 	   are identical for the first 10 bytes */
-	memset(&ht_info->basic_set, 0, 16);
-	memcpy(&ht_info->basic_set, &ht_cap->mcs, 10);
+	memset(&ht_oper->basic_set, 0, 16);
+	memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10);
 
-	return pos + sizeof(struct ieee80211_ht_info);
+	return pos + sizeof(struct ieee80211_ht_operation);
 }
 
 enum nl80211_channel_type
-ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
+ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
 {
 	enum nl80211_channel_type channel_type;
 
-	if (!ht_info)
+	if (!ht_oper)
 		return NL80211_CHAN_NO_HT;
 
-	switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+	switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
 	case IEEE80211_HT_PARAM_CHA_SEC_NONE:
 		channel_type = NL80211_CHAN_HT20;
 		break;
@@ -1678,13 +1726,15 @@
 	return channel_type;
 }
 
-int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
+			    struct sk_buff *skb, bool need_basic)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
 	int rate;
 	u8 i, rates, *pos;
+	u32 basic_rates = vif->bss_conf.basic_rates;
 
 	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 	rates = sband->n_bitrates;
@@ -1698,20 +1748,25 @@
 	*pos++ = WLAN_EID_SUPP_RATES;
 	*pos++ = rates;
 	for (i = 0; i < rates; i++) {
+		u8 basic = 0;
+		if (need_basic && basic_rates & BIT(i))
+			basic = 0x80;
 		rate = sband->bitrates[i].bitrate;
-		*pos++ = (u8) (rate / 5);
+		*pos++ = basic | (u8) (rate / 5);
 	}
 
 	return 0;
 }
 
-int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
+				struct sk_buff *skb, bool need_basic)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
 	int rate;
 	u8 i, exrates, *pos;
+	u32 basic_rates = vif->bss_conf.basic_rates;
 
 	sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 	exrates = sband->n_bitrates;
@@ -1728,9 +1783,25 @@
 		*pos++ = WLAN_EID_EXT_SUPP_RATES;
 		*pos++ = exrates;
 		for (i = 8; i < sband->n_bitrates; i++) {
+			u8 basic = 0;
+			if (need_basic && basic_rates & BIT(i))
+				basic = 0x80;
 			rate = sband->bitrates[i].bitrate;
-			*pos++ = (u8) (rate / 5);
+			*pos++ = basic | (u8) (rate / 5);
 		}
 	}
 	return 0;
 }
+
+int ieee80211_ave_rssi(struct ieee80211_vif *vif)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+	if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) {
+		/* non-managed type inferfaces */
+		return 0;
+	}
+	return ifmgd->ave_beacon_signal;
+}
+EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89511be..c3d643a 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,6 +52,26 @@
 	}
 }
 
+static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
+				     struct sk_buff *skb)
+{
+	/* in case we are a client verify acm is not set for this ac */
+	while (unlikely(local->wmm_acm & BIT(skb->priority))) {
+		if (wme_downgrade_ac(skb)) {
+			/*
+			 * This should not really happen. The AP has marked all
+			 * lower ACs to require admission control which is not
+			 * a reasonable configuration. Allow the frame to be
+			 * transmitted using AC_BK as a workaround.
+			 */
+			break;
+		}
+	}
+
+	/* look up which queue to use for frames with this 1d tag */
+	return ieee802_1d_to_ac[skb->priority];
+}
+
 /* Indicate which queue to use for this fully formed 802.11 frame */
 u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
 				 struct sk_buff *skb,
@@ -59,7 +79,7 @@
 {
 	u8 *p;
 
-	if (local->hw.queues < 4)
+	if (local->hw.queues < IEEE80211_NUM_ACS)
 		return 0;
 
 	if (!ieee80211_is_data(hdr->frame_control)) {
@@ -86,9 +106,9 @@
 	const u8 *ra = NULL;
 	bool qos = false;
 
-	if (local->hw.queues < 4 || skb->len < 6) {
+	if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
 		skb->priority = 0; /* required for correct WPA/11i MIC */
-		return min_t(u16, local->hw.queues - 1, IEEE80211_AC_BE);
+		return 0;
 	}
 
 	rcu_read_lock();
@@ -139,26 +159,6 @@
 	return ieee80211_downgrade_queue(local, skb);
 }
 
-u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
-			      struct sk_buff *skb)
-{
-	/* in case we are a client verify acm is not set for this ac */
-	while (unlikely(local->wmm_acm & BIT(skb->priority))) {
-		if (wme_downgrade_ac(skb)) {
-			/*
-			 * This should not really happen. The AP has marked all
-			 * lower ACs to require admission control which is not
-			 * a reasonable configuration. Allow the frame to be
-			 * transmitted using AC_BK as a workaround.
-			 */
-			break;
-		}
-	}
-
-	/* look up which queue to use for frames with this 1d tag */
-	return ieee802_1d_to_ac[skb->priority];
-}
-
 void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
 			   struct sk_buff *skb)
 {
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 94edceb..ca80818 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -22,8 +22,5 @@
 			   struct sk_buff *skb);
 void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
 			   struct sk_buff *skb);
-u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
-                              struct sk_buff *skb);
-
 
 #endif /* _WME_H */
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index c6e230e..b2650a9 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -122,9 +122,6 @@
 	enum work_action rma;
 	bool remain_off_channel = false;
 
-	if (local->scanning)
-		return;
-
 	/*
 	 * ieee80211_queue_work() should have picked up most cases,
 	 * here we'll pick the rest.
@@ -134,6 +131,11 @@
 
 	mutex_lock(&local->mtx);
 
+	if (local->scanning) {
+		mutex_unlock(&local->mtx);
+		return;
+	}
+
 	ieee80211_recalc_idle(local);
 
 	list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
@@ -226,13 +228,8 @@
 		run_again(local, jiffies + HZ/2);
 	}
 
-	if (list_empty(&local->work_list) && local->scan_req &&
-	    !local->scanning)
-		ieee80211_queue_delayed_work(&local->hw,
-					     &local->scan_work,
-					     round_jiffies_relative(0));
-
 	ieee80211_recalc_idle(local);
+	ieee80211_run_deferred_scan(local);
 
 	mutex_unlock(&local->mtx);
 
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
new file mode 100644
index 0000000..a967dda
--- /dev/null
+++ b/net/mac802154/Kconfig
@@ -0,0 +1,16 @@
+config MAC802154
+	tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
+	depends on IEEE802154 && EXPERIMENTAL
+	select CRC_CCITT
+	---help---
+	  This option enables the hardware independent IEEE 802.15.4
+	  networking stack for SoftMAC devices (the ones implementing
+	  only PHY level of IEEE 802.15.4 standard).
+
+	  Note: this implementation is neither certified, nor feature
+	  complete! Compatibility with other implementations hasn't
+	  been tested yet!
+
+	  If you plan to use HardMAC IEEE 802.15.4 devices, you can
+	  say N here. Alternatievly you can say M to compile it as
+	  module.
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
new file mode 100644
index 0000000..ec1bd3f
--- /dev/null
+++ b/net/mac802154/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MAC802154)	+= mac802154.o
+mac802154-objs		:= ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
new file mode 100644
index 0000000..e3edfb0
--- /dev/null
+++ b/net/mac802154/ieee802154_dev.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * Written by:
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ *
+ * Based on the code from 'linux-zigbee.sourceforge.net' project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <net/netlink.h>
+#include <linux/nl802154.h>
+#include <net/mac802154.h>
+#include <net/route.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+int mac802154_slave_open(struct net_device *dev)
+{
+	struct mac802154_sub_if_data *priv = netdev_priv(dev);
+	struct mac802154_priv *ipriv = priv->hw;
+	int res = 0;
+
+	if (ipriv->open_count++ == 0) {
+		res = ipriv->ops->start(&ipriv->hw);
+		WARN_ON(res);
+		if (res)
+			goto err;
+	}
+
+	if (ipriv->ops->ieee_addr) {
+		res = ipriv->ops->ieee_addr(&ipriv->hw, dev->dev_addr);
+		WARN_ON(res);
+		if (res)
+			goto err;
+		mac802154_dev_set_ieee_addr(dev);
+	}
+
+	netif_start_queue(dev);
+	return 0;
+err:
+	priv->hw->open_count--;
+
+	return res;
+}
+
+int mac802154_slave_close(struct net_device *dev)
+{
+	struct mac802154_sub_if_data *priv = netdev_priv(dev);
+	struct mac802154_priv *ipriv = priv->hw;
+
+	netif_stop_queue(dev);
+
+	if (!--ipriv->open_count)
+		ipriv->ops->stop(&ipriv->hw);
+
+	return 0;
+}
+
+static int
+mac802154_netdev_register(struct wpan_phy *phy, struct net_device *dev)
+{
+	struct mac802154_sub_if_data *priv;
+	struct mac802154_priv *ipriv;
+	int err;
+
+	ipriv = wpan_phy_priv(phy);
+
+	priv = netdev_priv(dev);
+	priv->dev = dev;
+	priv->hw = ipriv;
+
+	dev->needed_headroom = ipriv->hw.extra_tx_headroom;
+
+	SET_NETDEV_DEV(dev, &ipriv->phy->dev);
+
+	mutex_lock(&ipriv->slaves_mtx);
+	if (!ipriv->running) {
+		mutex_unlock(&ipriv->slaves_mtx);
+		return -ENODEV;
+	}
+	mutex_unlock(&ipriv->slaves_mtx);
+
+	err = register_netdev(dev);
+	if (err < 0)
+		return err;
+
+	rtnl_lock();
+	mutex_lock(&ipriv->slaves_mtx);
+	list_add_tail_rcu(&priv->list, &ipriv->slaves);
+	mutex_unlock(&ipriv->slaves_mtx);
+	rtnl_unlock();
+
+	return 0;
+}
+
+static void
+mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev)
+{
+	struct mac802154_sub_if_data *sdata;
+	ASSERT_RTNL();
+
+	sdata = netdev_priv(dev);
+
+	BUG_ON(sdata->hw->phy != phy);
+
+	mutex_lock(&sdata->hw->slaves_mtx);
+	list_del_rcu(&sdata->list);
+	mutex_unlock(&sdata->hw->slaves_mtx);
+
+	synchronize_rcu();
+	unregister_netdevice(sdata->dev);
+}
+
+static struct net_device *
+mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
+{
+	struct net_device *dev;
+	int err = -ENOMEM;
+
+	switch (type) {
+	case IEEE802154_DEV_MONITOR:
+		dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
+				   name, mac802154_monitor_setup);
+		break;
+	default:
+		dev = NULL;
+		err = -EINVAL;
+		break;
+	}
+	if (!dev)
+		goto err;
+
+	err = mac802154_netdev_register(phy, dev);
+	if (err)
+		goto err_free;
+
+	dev_hold(dev); /* we return an incremented device refcount */
+	return dev;
+
+err_free:
+	free_netdev(dev);
+err:
+	return ERR_PTR(err);
+}
+
+struct ieee802154_dev *
+ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
+{
+	struct wpan_phy *phy;
+	struct mac802154_priv *priv;
+	size_t priv_size;
+
+	if (!ops || !ops->xmit || !ops->ed || !ops->start ||
+	    !ops->stop || !ops->set_channel) {
+		printk(KERN_ERR
+		       "undefined IEEE802.15.4 device operations\n");
+		return NULL;
+	}
+
+	/* Ensure 32-byte alignment of our private data and hw private data.
+	 * We use the wpan_phy priv data for both our mac802154_priv and for
+	 * the driver's private data
+	 *
+	 * in memory it'll be like this:
+	 *
+	 * +-----------------------+
+	 * | struct wpan_phy       |
+	 * +-----------------------+
+	 * | struct mac802154_priv |
+	 * +-----------------------+
+	 * | driver's private data |
+	 * +-----------------------+
+	 *
+	 * Due to ieee802154 layer isn't aware of driver and MAC structures,
+	 * so lets allign them here.
+	 */
+
+	priv_size = ALIGN(sizeof(*priv), NETDEV_ALIGN) + priv_data_len;
+
+	phy = wpan_phy_alloc(priv_size);
+	if (!phy) {
+		printk(KERN_ERR
+		       "failure to allocate master IEEE802.15.4 device\n");
+		return NULL;
+	}
+
+	priv = wpan_phy_priv(phy);
+	priv->hw.phy = priv->phy = phy;
+	priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN);
+	priv->ops = ops;
+
+	INIT_LIST_HEAD(&priv->slaves);
+	mutex_init(&priv->slaves_mtx);
+
+	return &priv->hw;
+}
+EXPORT_SYMBOL(ieee802154_alloc_device);
+
+void ieee802154_free_device(struct ieee802154_dev *hw)
+{
+	struct mac802154_priv *priv = mac802154_to_priv(hw);
+
+	BUG_ON(!list_empty(&priv->slaves));
+
+	wpan_phy_free(priv->phy);
+
+	mutex_destroy(&priv->slaves_mtx);
+}
+EXPORT_SYMBOL(ieee802154_free_device);
+
+int ieee802154_register_device(struct ieee802154_dev *dev)
+{
+	struct mac802154_priv *priv = mac802154_to_priv(dev);
+	int rc = -ENOMEM;
+
+	priv->dev_workqueue =
+		create_singlethread_workqueue(wpan_phy_name(priv->phy));
+	if (!priv->dev_workqueue)
+		goto out;
+
+	wpan_phy_set_dev(priv->phy, priv->hw.parent);
+
+	priv->phy->add_iface = mac802154_add_iface;
+	priv->phy->del_iface = mac802154_del_iface;
+
+	rc = wpan_phy_register(priv->phy);
+	if (rc < 0)
+		goto out_wq;
+
+	rtnl_lock();
+
+	mutex_lock(&priv->slaves_mtx);
+	priv->running = MAC802154_DEVICE_RUN;
+	mutex_unlock(&priv->slaves_mtx);
+
+	rtnl_unlock();
+
+	return 0;
+
+out_wq:
+	destroy_workqueue(priv->dev_workqueue);
+out:
+	return rc;
+}
+EXPORT_SYMBOL(ieee802154_register_device);
+
+void ieee802154_unregister_device(struct ieee802154_dev *dev)
+{
+	struct mac802154_priv *priv = mac802154_to_priv(dev);
+	struct mac802154_sub_if_data *sdata, *next;
+
+	flush_workqueue(priv->dev_workqueue);
+	destroy_workqueue(priv->dev_workqueue);
+
+	rtnl_lock();
+
+	mutex_lock(&priv->slaves_mtx);
+	priv->running = MAC802154_DEVICE_STOPPED;
+	mutex_unlock(&priv->slaves_mtx);
+
+	list_for_each_entry_safe(sdata, next, &priv->slaves, list) {
+		mutex_lock(&sdata->hw->slaves_mtx);
+		list_del(&sdata->list);
+		mutex_unlock(&sdata->hw->slaves_mtx);
+
+		unregister_netdevice(sdata->dev);
+	}
+
+	rtnl_unlock();
+
+	wpan_phy_unregister(priv->phy);
+}
+EXPORT_SYMBOL(ieee802154_unregister_device);
+
+MODULE_DESCRIPTION("IEEE 802.15.4 implementation");
+MODULE_LICENSE("GPL v2");
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
new file mode 100644
index 0000000..789d9c9
--- /dev/null
+++ b/net/mac802154/mac802154.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+#ifndef MAC802154_H
+#define MAC802154_H
+
+/* mac802154 device private data */
+struct mac802154_priv {
+	struct ieee802154_dev hw;
+	struct ieee802154_ops *ops;
+
+	/* ieee802154 phy */
+	struct wpan_phy *phy;
+
+	int open_count;
+
+	/* As in mac80211 slaves list is modified:
+	 * 1) under the RTNL
+	 * 2) protected by slaves_mtx;
+	 * 3) in an RCU manner
+	 *
+	 * So atomic readers can use any of this protection methods.
+	 */
+	struct list_head	slaves;
+	struct mutex		slaves_mtx;
+
+	/* This one is used for scanning and other jobs not to be interfered
+	 * with serial driver.
+	 */
+	struct workqueue_struct	*dev_workqueue;
+
+	/* SoftMAC device is registered and running. One can add subinterfaces.
+	 * This flag should be modified under slaves_mtx and RTNL, so you can
+	 * read them using any of protection methods.
+	 */
+	bool running;
+};
+
+#define	MAC802154_DEVICE_STOPPED	0x00
+#define MAC802154_DEVICE_RUN		0x01
+
+/* Slave interface definition.
+ *
+ * Slaves represent typical network interfaces available from userspace.
+ * Each ieee802154 device/transceiver may have several slaves and able
+ * to be associated with several networks at the same time.
+ */
+struct mac802154_sub_if_data {
+	struct list_head list; /* the ieee802154_priv->slaves list */
+
+	struct mac802154_priv *hw;
+	struct net_device *dev;
+
+	int type;
+
+	spinlock_t mib_lock;
+
+	__le16 pan_id;
+	__le16 short_addr;
+
+	u8 chan;
+	u8 page;
+
+	/* MAC BSN field */
+	u8 bsn;
+	/* MAC DSN field */
+	u8 dsn;
+};
+
+#define mac802154_to_priv(_hw)	container_of(_hw, struct mac802154_priv, hw)
+
+#define MAC802154_MAX_XMIT_ATTEMPTS	3
+
+#define MAC802154_CHAN_NONE		(~(u8)0) /* No channel is assigned */
+
+extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
+
+int mac802154_slave_open(struct net_device *dev);
+int mac802154_slave_close(struct net_device *dev);
+
+void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
+void mac802154_monitor_setup(struct net_device *dev);
+
+netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
+			 u8 page, u8 chan);
+
+/* MIB callbacks */
+void mac802154_dev_set_ieee_addr(struct net_device *dev);
+
+#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
new file mode 100644
index 0000000..7a5d0e0
--- /dev/null
+++ b/net/mac802154/mac_cmd.c
@@ -0,0 +1,45 @@
+/*
+ * MAC commands interface
+ *
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/wpan-phy.h>
+#include <net/mac802154.h>
+
+#include "mac802154.h"
+
+struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
+{
+	struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+	BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+	return to_phy(get_device(&priv->hw->phy->dev));
+}
+
+struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
+	.get_phy = mac802154_get_phy,
+};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
new file mode 100644
index 0000000..ab59821
--- /dev/null
+++ b/net/mac802154/mib.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/if_arp.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+struct hw_addr_filt_notify_work {
+	struct work_struct work;
+	struct net_device *dev;
+	unsigned long changed;
+};
+
+struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
+{
+	struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+	BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+	return priv->hw;
+}
+
+static void hw_addr_notify(struct work_struct *work)
+{
+	struct hw_addr_filt_notify_work *nw = container_of(work,
+			struct hw_addr_filt_notify_work, work);
+	struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
+	int res;
+
+	res = hw->ops->set_hw_addr_filt(&hw->hw,
+					&hw->hw.hw_filt,
+					nw->changed);
+	if (res)
+		pr_debug("failed changed mask %lx\n", nw->changed);
+
+	kfree(nw);
+
+	return;
+}
+
+static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
+{
+	struct mac802154_sub_if_data *priv = netdev_priv(dev);
+	struct hw_addr_filt_notify_work *work;
+
+	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return;
+
+	INIT_WORK(&work->work, hw_addr_notify);
+	work->dev = dev;
+	work->changed = changed;
+	queue_work(priv->hw->dev_workqueue, &work->work);
+
+	return;
+}
+
+void mac802154_dev_set_ieee_addr(struct net_device *dev)
+{
+	struct mac802154_sub_if_data *priv = netdev_priv(dev);
+	struct mac802154_priv *mac = priv->hw;
+
+	if (mac->ops->set_hw_addr_filt &&
+	    memcmp(mac->hw.hw_filt.ieee_addr,
+		   dev->dev_addr, IEEE802154_ADDR_LEN)) {
+		memcpy(mac->hw.hw_filt.ieee_addr,
+		       dev->dev_addr, IEEE802154_ADDR_LEN);
+		set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
+	}
+}
diff --git a/net/mac802154/monitor.c b/net/mac802154/monitor.c
new file mode 100644
index 0000000..434a26f
--- /dev/null
+++ b/net/mac802154/monitor.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/ieee802154.h>
+#include <net/mac802154.h>
+#include <net/netlink.h>
+#include <net/wpan-phy.h>
+#include <linux/nl802154.h>
+
+#include "mac802154.h"
+
+static netdev_tx_t mac802154_monitor_xmit(struct sk_buff *skb,
+					  struct net_device *dev)
+{
+	struct mac802154_sub_if_data *priv;
+	u8 chan, page;
+
+	priv = netdev_priv(dev);
+
+	/* FIXME: locking */
+	chan = priv->hw->phy->current_channel;
+	page = priv->hw->phy->current_page;
+
+	if (chan == MAC802154_CHAN_NONE) /* not initialized */
+		return NETDEV_TX_OK;
+
+	if (WARN_ON(page >= WPAN_NUM_PAGES) ||
+	    WARN_ON(chan >= WPAN_NUM_CHANNELS))
+		return NETDEV_TX_OK;
+
+	skb->skb_iif = dev->ifindex;
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+
+	return mac802154_tx(priv->hw, skb, page, chan);
+}
+
+
+void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb)
+{
+	struct sk_buff *skb2;
+	struct mac802154_sub_if_data *sdata;
+	u16 crc = crc_ccitt(0, skb->data, skb->len);
+	u8 *data;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &priv->slaves, list) {
+		if (sdata->type != IEEE802154_DEV_MONITOR)
+			continue;
+
+		skb2 = skb_clone(skb, GFP_ATOMIC);
+		skb2->dev = sdata->dev;
+		skb2->pkt_type = PACKET_HOST;
+		data = skb_put(skb2, 2);
+		data[0] = crc & 0xff;
+		data[1] = crc >> 8;
+
+		netif_rx_ni(skb2);
+	}
+	rcu_read_unlock();
+}
+
+static const struct net_device_ops mac802154_monitor_ops = {
+	.ndo_open		= mac802154_slave_open,
+	.ndo_stop		= mac802154_slave_close,
+	.ndo_start_xmit		= mac802154_monitor_xmit,
+};
+
+void mac802154_monitor_setup(struct net_device *dev)
+{
+	struct mac802154_sub_if_data *priv;
+
+	dev->addr_len		= 0;
+	dev->hard_header_len	= 0;
+	dev->needed_tailroom	= 2; /* room for FCS */
+	dev->mtu		= IEEE802154_MTU;
+	dev->tx_queue_len	= 10;
+	dev->type		= ARPHRD_IEEE802154_MONITOR;
+	dev->flags		= IFF_NOARP | IFF_BROADCAST;
+	dev->watchdog_timeo	= 0;
+
+	dev->destructor		= free_netdev;
+	dev->netdev_ops		= &mac802154_monitor_ops;
+	dev->ml_priv		= &mac802154_mlme_reduced;
+
+	priv = netdev_priv(dev);
+	priv->type = IEEE802154_DEV_MONITOR;
+
+	priv->chan = MAC802154_CHAN_NONE; /* not initialized */
+	priv->page = 0;
+}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
new file mode 100644
index 0000000..4a7d76d
--- /dev/null
+++ b/net/mac802154/rx.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
+
+#include "mac802154.h"
+
+/* The IEEE 802.15.4 standard defines 4 MAC packet types:
+ * - beacon frame
+ * - MAC command frame
+ * - acknowledgement frame
+ * - data frame
+ *
+ * and only the data frame should be pushed to the upper layers, other types
+ * are just internal MAC layer management information. So only data packets
+ * are going to be sent to the networking queue, all other will be processed
+ * right here by using the device workqueue.
+ */
+struct rx_work {
+	struct sk_buff *skb;
+	struct work_struct work;
+	struct ieee802154_dev *dev;
+	u8 lqi;
+};
+
+static void
+mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
+{
+	struct mac802154_priv *priv = mac802154_to_priv(hw);
+
+	mac_cb(skb)->lqi = lqi;
+	skb->protocol = htons(ETH_P_IEEE802154);
+	skb_reset_mac_header(skb);
+
+	BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
+
+	if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
+		u16 crc;
+
+		if (skb->len < 2) {
+			pr_debug("got invalid frame\n");
+			goto out;
+		}
+		crc = crc_ccitt(0, skb->data, skb->len);
+		if (crc) {
+			pr_debug("CRC mismatch\n");
+			goto out;
+		}
+		skb_trim(skb, skb->len - 2); /* CRC */
+	}
+
+	mac802154_monitors_rx(priv, skb);
+out:
+	dev_kfree_skb(skb);
+	return;
+}
+
+static void mac802154_rx_worker(struct work_struct *work)
+{
+	struct rx_work *rw = container_of(work, struct rx_work, work);
+	struct sk_buff *skb = rw->skb;
+
+	mac802154_subif_rx(rw->dev, skb, rw->lqi);
+	kfree(rw);
+}
+
+void
+ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi)
+{
+	struct mac802154_priv *priv = mac802154_to_priv(dev);
+	struct rx_work *work;
+
+	if (!skb)
+		return;
+
+	work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC);
+	if (!work)
+		return;
+
+	INIT_WORK(&work->work, mac802154_rx_worker);
+	work->skb = skb;
+	work->dev = dev;
+	work->lqi = lqi;
+
+	queue_work(priv->dev_workqueue, &work->work);
+}
+EXPORT_SYMBOL(ieee802154_rx_irqsafe);
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
new file mode 100644
index 0000000..8781d8f9
--- /dev/null
+++ b/net/mac802154/tx.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/crc-ccitt.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
+ * packets through the workqueue.
+ */
+struct xmit_work {
+	struct sk_buff *skb;
+	struct work_struct work;
+	struct mac802154_priv *priv;
+	u8 chan;
+	u8 page;
+	u8 xmit_attempts;
+};
+
+static void mac802154_xmit_worker(struct work_struct *work)
+{
+	struct xmit_work *xw = container_of(work, struct xmit_work, work);
+	int res;
+
+	mutex_lock(&xw->priv->phy->pib_lock);
+	if (xw->priv->phy->current_channel != xw->chan ||
+	    xw->priv->phy->current_page != xw->page) {
+		res = xw->priv->ops->set_channel(&xw->priv->hw,
+						  xw->page,
+						  xw->chan);
+		if (res) {
+			pr_debug("set_channel failed\n");
+			goto out;
+		}
+	}
+
+	res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb);
+
+out:
+	mutex_unlock(&xw->priv->phy->pib_lock);
+
+	if (res) {
+		if (xw->xmit_attempts++ < MAC802154_MAX_XMIT_ATTEMPTS) {
+			queue_work(xw->priv->dev_workqueue, &xw->work);
+			return;
+		} else
+			pr_debug("transmission failed for %d times",
+				 MAC802154_MAX_XMIT_ATTEMPTS);
+	}
+
+	dev_kfree_skb(xw->skb);
+
+	kfree(xw);
+}
+
+netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
+			 u8 page, u8 chan)
+{
+	struct xmit_work *work;
+
+	if (!(priv->phy->channels_supported[page] & (1 << chan)))
+		WARN_ON(1);
+		return NETDEV_TX_OK;
+
+	if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
+		u16 crc = crc_ccitt(0, skb->data, skb->len);
+		u8 *data = skb_put(skb, 2);
+		data[0] = crc & 0xff;
+		data[1] = crc >> 8;
+	}
+
+	if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC);
+	if (!work)
+		return NETDEV_TX_BUSY;
+
+	INIT_WORK(&work->work, mac802154_xmit_worker);
+	work->skb = skb;
+	work->priv = priv;
+	work->page = page;
+	work->chan = chan;
+	work->xmit_attempts = 0;
+
+	queue_work(priv->dev_workqueue, &work->work);
+
+	return NETDEV_TX_OK;
+}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0c6f67e..209c1ed 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -509,6 +509,21 @@
 	since you can easily create immortal packets that loop
 	forever on the network.
 
+config NETFILTER_XT_TARGET_HMARK
+	tristate '"HMARK" target support'
+	depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
+	depends on NETFILTER_ADVANCED
+	---help---
+	This option adds the "HMARK" target.
+
+	The target allows you to create rules in the "raw" and "mangle" tables
+	which set the skbuff mark by means of hash calculation within a given
+	range. The nfmark can influence the routing method (see "Use netfilter
+	MARK value as routing key") and can also be used by other subsystems to
+	change their behaviour.
+
+	To compile it as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_IDLETIMER
 	tristate  "IDLETIMER target support"
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ca36765..4e7960c 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -59,6 +59,7 @@
 obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e1b7e05..e19f365 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -290,12 +290,3 @@
 	if (netfilter_log_init() < 0)
 		panic("cannot initialize nf_log");
 }
-
-#ifdef CONFIG_SYSCTL
-struct ctl_path nf_net_netfilter_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "netfilter", },
-	{ }
-};
-EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path);
-#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index a72a4df..7e1b061 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -109,8 +109,9 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-				htonl(map->first_ip + id * map->hosts));
+		if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+				    htonl(map->first_ip + id * map->hosts)))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, atd);
@@ -194,10 +195,11 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-				htonl(map->first_ip + id * map->hosts));
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-			      htonl(ip_set_timeout_get(members[id])));
+		if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+				    htonl(map->first_ip + id * map->hosts)) ||
+		    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+				  htonl(ip_set_timeout_get(members[id]))))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, adt);
@@ -334,15 +336,16 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
-	if (map->netmask != 32)
-		NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-		      htonl(sizeof(*map) + map->memsize));
-	if (with_timeout(map->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
+	    (map->netmask != 32 &&
+	     nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+			  htonl(sizeof(*map) + map->memsize)) ||
+	    (with_timeout(map->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 81324c1..d7eaf10 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -111,7 +111,7 @@
 		return -EAGAIN;
 	case MAC_FILLED:
 		return data->ether == NULL ||
-		       compare_ether_addr(data->ether, elem->ether) == 0;
+		       ether_addr_equal(data->ether, elem->ether);
 	}
 	return 0;
 }
@@ -186,11 +186,12 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-				htonl(map->first_ip + id));
-		if (elem->match == MAC_FILLED)
-			NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
-				elem->ether);
+		if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+				    htonl(map->first_ip + id)) ||
+		    (elem->match == MAC_FILLED &&
+		     nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+			     elem->ether)))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, atd);
@@ -224,7 +225,7 @@
 		return -EAGAIN;
 	case MAC_FILLED:
 		return (data->ether == NULL ||
-			compare_ether_addr(data->ether, elem->ether) == 0) &&
+			ether_addr_equal(data->ether, elem->ether)) &&
 		       !bitmap_expired(map, data->id);
 	}
 	return 0;
@@ -314,14 +315,16 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
-				htonl(map->first_ip + id));
-		if (elem->match == MAC_FILLED)
-			NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
-				elem->ether);
+		if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
+				    htonl(map->first_ip + id)) ||
+		    (elem->match == MAC_FILLED &&
+		     nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+			     elem->ether)))
+		    goto nla_put_failure;
 		timeout = elem->match == MAC_UNSET ? elem->timeout
 				: ip_set_timeout_get(elem->timeout);
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+		if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
+		    goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, atd);
@@ -438,14 +441,16 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-		      htonl(sizeof(*map)
-			    + (map->last_ip - map->first_ip + 1) * map->dsize));
-	if (with_timeout(map->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+			  htonl(sizeof(*map) +
+				((map->last_ip - map->first_ip + 1) *
+				 map->dsize))) ||
+	    (with_timeout(map->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 382ec28..b9f1fce 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -96,8 +96,9 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
-			      htons(map->first_port + id));
+		if (nla_put_net16(skb, IPSET_ATTR_PORT,
+				  htons(map->first_port + id)))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, atd);
@@ -183,10 +184,11 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
-			      htons(map->first_port + id));
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-			      htonl(ip_set_timeout_get(members[id])));
+		if (nla_put_net16(skb, IPSET_ATTR_PORT,
+				  htons(map->first_port + id)) ||
+		    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+				  htonl(ip_set_timeout_get(members[id]))))
+			goto nla_put_failure;
 		ipset_nest_end(skb, nested);
 	}
 	ipset_nest_end(skb, adt);
@@ -320,13 +322,14 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-		      htonl(sizeof(*map) + map->memsize));
-	if (with_timeout(map->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+			  htonl(sizeof(*map) + map->memsize)) ||
+	    (with_timeout(map->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e6c1c96..819c342 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1092,19 +1092,21 @@
 			ret = -EMSGSIZE;
 			goto release_refcount;
 		}
-		NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-		NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+		if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+		    nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
+			goto nla_put_failure;
 		if (dump_flags & IPSET_FLAG_LIST_SETNAME)
 			goto next_set;
 		switch (cb->args[2]) {
 		case 0:
 			/* Core header data */
-			NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
-				       set->type->name);
-			NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
-				   set->family);
-			NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
-				   set->revision);
+			if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
+					   set->type->name) ||
+			    nla_put_u8(skb, IPSET_ATTR_FAMILY,
+				       set->family) ||
+			    nla_put_u8(skb, IPSET_ATTR_REVISION,
+				       set->revision))
+				goto nla_put_failure;
 			ret = set->variant->head(set, skb);
 			if (ret < 0)
 				goto release_refcount;
@@ -1410,11 +1412,12 @@
 			 IPSET_CMD_HEADER);
 	if (!nlh2)
 		goto nlmsg_failure;
-	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-	NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
-	NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
-	NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
-	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision);
+	if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+	    nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
+	    nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
+	    nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
+	    nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
+		goto nla_put_failure;
 	nlmsg_end(skb2, nlh2);
 
 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1469,11 +1472,12 @@
 			 IPSET_CMD_TYPE);
 	if (!nlh2)
 		goto nlmsg_failure;
-	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
-	NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
-	NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
-	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
-	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+	if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+	    nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
+	    nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
+	    nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
+	    nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
+		goto nla_put_failure;
 	nlmsg_end(skb2, nlh2);
 
 	pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
@@ -1517,7 +1521,8 @@
 			 IPSET_CMD_PROTOCOL);
 	if (!nlh2)
 		goto nlmsg_failure;
-	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+	if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
+		goto nla_put_failure;
 	nlmsg_end(skb2, nlh2);
 
 	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1613,7 +1618,7 @@
 static int
 ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
 {
-	unsigned *op;
+	unsigned int *op;
 	void *data;
 	int copylen = *len, ret = 0;
 
@@ -1621,7 +1626,7 @@
 		return -EPERM;
 	if (optval != SO_IP_SET)
 		return -EBADF;
-	if (*len < sizeof(unsigned))
+	if (*len < sizeof(unsigned int))
 		return -EINVAL;
 
 	data = vmalloc(*len);
@@ -1631,7 +1636,7 @@
 		ret = -EFAULT;
 		goto done;
 	}
-	op = (unsigned *) data;
+	op = (unsigned int *) data;
 
 	if (*op < IP_SET_OP_VERSION) {
 		/* Check the version at the beginning of operations */
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 5139dea..a68dbd4 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -81,7 +81,8 @@
 static inline bool
 hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
 {
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -94,9 +95,10 @@
 	const struct hash_ip4_telem *tdata =
 		(const struct hash_ip4_telem *)data;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))))
+		goto nla_put_failure;
 
 	return 0;
 
@@ -262,7 +264,8 @@
 static bool
 hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
 {
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -275,9 +278,10 @@
 	const struct hash_ip6_telem *e =
 		(const struct hash_ip6_telem *)data;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -364,6 +368,7 @@
 {
 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
 	u8 netmask, hbits;
+	size_t hsize;
 	struct ip_set_hash *h;
 
 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
@@ -405,9 +410,12 @@
 	h->timeout = IPSET_NO_TIMEOUT;
 
 	hbits = htable_bits(hashsize);
-	h->table = ip_set_alloc(
-			sizeof(struct htable)
-			+ jhash_size(hbits) * sizeof(struct hbucket));
+	hsize = htable_size(hbits);
+	if (hsize == 0) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table = ip_set_alloc(hsize);
 	if (!h->table) {
 		kfree(h);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 9c27e24..92722bb 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -93,9 +93,10 @@
 hash_ipport4_data_list(struct sk_buff *skb,
 		       const struct hash_ipport4_elem *data)
 {
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -109,12 +110,12 @@
 	const struct hash_ipport4_telem *tdata =
 		(const struct hash_ipport4_telem *)data;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -308,9 +309,10 @@
 hash_ipport6_data_list(struct sk_buff *skb,
 		       const struct hash_ipport6_elem *data)
 {
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -324,11 +326,12 @@
 	const struct hash_ipport6_telem *e =
 		(const struct hash_ipport6_telem *)data;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -449,6 +452,7 @@
 	struct ip_set_hash *h;
 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
 	u8 hbits;
+	size_t hsize;
 
 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
 		return -IPSET_ERR_INVALID_FAMILY;
@@ -476,9 +480,12 @@
 	h->timeout = IPSET_NO_TIMEOUT;
 
 	hbits = htable_bits(hashsize);
-	h->table = ip_set_alloc(
-			sizeof(struct htable)
-			+ jhash_size(hbits) * sizeof(struct hbucket));
+	hsize = htable_size(hbits);
+	if (hsize == 0) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table = ip_set_alloc(hsize);
 	if (!h->table) {
 		kfree(h);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 9134057..0637ce0 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -94,10 +94,11 @@
 hash_ipportip4_data_list(struct sk_buff *skb,
 		       const struct hash_ipportip4_elem *data)
 {
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -111,13 +112,13 @@
 	const struct hash_ipportip4_telem *tdata =
 		(const struct hash_ipportip4_telem *)data;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -319,10 +320,11 @@
 hash_ipportip6_data_list(struct sk_buff *skb,
 			 const struct hash_ipportip6_elem *data)
 {
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -336,12 +338,13 @@
 	const struct hash_ipportip6_telem *e =
 		(const struct hash_ipportip6_telem *)data;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -467,6 +470,7 @@
 	struct ip_set_hash *h;
 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
 	u8 hbits;
+	size_t hsize;
 
 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
 		return -IPSET_ERR_INVALID_FAMILY;
@@ -494,9 +498,12 @@
 	h->timeout = IPSET_NO_TIMEOUT;
 
 	hbits = htable_bits(hashsize);
-	h->table = ip_set_alloc(
-			sizeof(struct htable)
-			+ jhash_size(hbits) * sizeof(struct hbucket));
+	hsize = htable_size(hbits);
+	if (hsize == 0) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table = ip_set_alloc(hsize);
 	if (!h->table) {
 		kfree(h);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 5d05e69..1ce21ca 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -124,13 +124,14 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -145,16 +146,16 @@
 		(const struct hash_ipportnet4_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -436,13 +437,14 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -457,15 +459,16 @@
 		(const struct hash_ipportnet6_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -616,6 +619,7 @@
 	struct ip_set_hash *h;
 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
 	u8 hbits;
+	size_t hsize;
 
 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
 		return -IPSET_ERR_INVALID_FAMILY;
@@ -645,9 +649,12 @@
 	h->timeout = IPSET_NO_TIMEOUT;
 
 	hbits = htable_bits(hashsize);
-	h->table = ip_set_alloc(
-			sizeof(struct htable)
-			+ jhash_size(hbits) * sizeof(struct hbucket));
+	hsize = htable_size(hbits);
+	if (hsize == 0) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table = ip_set_alloc(hsize);
 	if (!h->table) {
 		kfree(h);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 7c3d945..c57a6a0 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -111,10 +111,11 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -128,13 +129,13 @@
 		(const struct hash_net4_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -339,10 +340,11 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -356,12 +358,13 @@
 		(const struct hash_net6_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -460,6 +463,7 @@
 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
 	struct ip_set_hash *h;
 	u8 hbits;
+	size_t hsize;
 
 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
 		return -IPSET_ERR_INVALID_FAMILY;
@@ -489,9 +493,12 @@
 	h->timeout = IPSET_NO_TIMEOUT;
 
 	hbits = htable_bits(hashsize);
-	h->table = ip_set_alloc(
-			sizeof(struct htable)
-			+ jhash_size(hbits) * sizeof(struct hbucket));
+	hsize = htable_size(hbits);
+	if (hsize == 0) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table = ip_set_alloc(hsize);
 	if (!h->table) {
 		kfree(h);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index f24037f..ee86394 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -252,11 +252,12 @@
 
 	if (data->nomatch)
 		flags |= IPSET_FLAG_NOMATCH;
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -273,13 +274,14 @@
 
 	if (data->nomatch)
 		flags |= IPSET_FLAG_NOMATCH;
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))))
+		goto nla_put_failure;
 
 	return 0;
 
@@ -555,11 +557,12 @@
 
 	if (data->nomatch)
 		flags |= IPSET_FLAG_NOMATCH;
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -576,13 +579,14 @@
 
 	if (data->nomatch)
 		flags |= IPSET_FLAG_NOMATCH;
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
-	NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
+	    nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -722,6 +726,7 @@
 	struct ip_set_hash *h;
 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
 	u8 hbits;
+	size_t hsize;
 
 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
 		return -IPSET_ERR_INVALID_FAMILY;
@@ -752,9 +757,12 @@
 	h->ahash_max = AHASH_MAX_SIZE;
 
 	hbits = htable_bits(hashsize);
-	h->table = ip_set_alloc(
-			sizeof(struct htable)
-			+ jhash_size(hbits) * sizeof(struct hbucket));
+	hsize = htable_size(hbits);
+	if (hsize == 0) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table = ip_set_alloc(hsize);
 	if (!h->table) {
 		kfree(h);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index ce2e771..fc3143a 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -124,12 +124,13 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -144,15 +145,15 @@
 		(const struct hash_netport4_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(tdata->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
-
+	if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(tdata->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -402,12 +403,13 @@
 {
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -422,14 +424,15 @@
 		(const struct hash_netport6_telem *)data;
 	u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
 
-	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
-	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
-	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
-	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
-	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-		      htonl(ip_set_timeout_get(e->timeout)));
-	if (flags)
-		NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
+	if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
+	    nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+	    nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
+	    nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+	    nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+			  htonl(ip_set_timeout_get(e->timeout))) ||
+	    (flags &&
+	     nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -572,6 +575,7 @@
 	struct ip_set_hash *h;
 	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
 	u8 hbits;
+	size_t hsize;
 
 	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
 		return -IPSET_ERR_INVALID_FAMILY;
@@ -601,9 +605,12 @@
 	h->timeout = IPSET_NO_TIMEOUT;
 
 	hbits = htable_bits(hashsize);
-	h->table = ip_set_alloc(
-			sizeof(struct htable)
-			+ jhash_size(hbits) * sizeof(struct hbucket));
+	hsize = htable_size(hbits);
+	if (hsize == 0) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table = ip_set_alloc(hsize);
 	if (!h->table) {
 		kfree(h);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 7e095f9..6cb1225 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -402,12 +402,13 @@
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
-	NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
-	if (with_timeout(map->timeout))
-		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
-	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
-	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
-		      htonl(sizeof(*map) + map->size * map->dsize));
+	if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
+	    (with_timeout(map->timeout) &&
+	     nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
+	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
+			  htonl(sizeof(*map) + map->size * map->dsize)))
+		goto nla_put_failure;
 	ipset_nest_end(skb, nested);
 
 	return 0;
@@ -442,13 +443,15 @@
 			} else
 				goto nla_put_failure;
 		}
-		NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
-			       ip_set_name_byindex(e->id));
+		if (nla_put_string(skb, IPSET_ATTR_NAME,
+				   ip_set_name_byindex(e->id)))
+			goto nla_put_failure;
 		if (with_timeout(map->timeout)) {
 			const struct set_telem *te =
 				(const struct set_telem *) e;
-			NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
-				      htonl(ip_set_timeout_get(te->timeout)));
+			__be32 to = htonl(ip_set_timeout_get(te->timeout));
+			if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to))
+				goto nla_put_failure;
 		}
 		ipset_nest_end(skb, nested);
 	}
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 5285617..64f9e8f 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -313,7 +313,7 @@
  *	Assumes already checked proto==IPPROTO_TCP and diff!=0.
  */
 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
-				 unsigned flag, __u32 seq, int diff)
+				 unsigned int flag, __u32 seq, int diff)
 {
 	/* spinlock is to keep updating cp->flags atomic */
 	spin_lock(&cp->lock);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 29fa5ba..1548df9 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -86,42 +86,42 @@
 static struct ip_vs_aligned_lock
 __ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
 
-static inline void ct_read_lock(unsigned key)
+static inline void ct_read_lock(unsigned int key)
 {
 	read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_read_unlock(unsigned key)
+static inline void ct_read_unlock(unsigned int key)
 {
 	read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_write_lock(unsigned key)
+static inline void ct_write_lock(unsigned int key)
 {
 	write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_write_unlock(unsigned key)
+static inline void ct_write_unlock(unsigned int key)
 {
 	write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_read_lock_bh(unsigned key)
+static inline void ct_read_lock_bh(unsigned int key)
 {
 	read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_read_unlock_bh(unsigned key)
+static inline void ct_read_unlock_bh(unsigned int key)
 {
 	read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_write_lock_bh(unsigned key)
+static inline void ct_write_lock_bh(unsigned int key)
 {
 	write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
 
-static inline void ct_write_unlock_bh(unsigned key)
+static inline void ct_write_unlock_bh(unsigned int key)
 {
 	write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
 }
@@ -130,7 +130,7 @@
 /*
  *	Returns hash value for IPVS connection entry
  */
-static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto,
 				       const union nf_inet_addr *addr,
 				       __be16 port)
 {
@@ -188,7 +188,7 @@
  */
 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
 {
-	unsigned hash;
+	unsigned int hash;
 	int ret;
 
 	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
@@ -224,7 +224,7 @@
  */
 static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
 {
-	unsigned hash;
+	unsigned int hash;
 	int ret;
 
 	/* unhash it and decrease its reference counter */
@@ -257,7 +257,7 @@
 static inline struct ip_vs_conn *
 __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_conn *cp;
 	struct hlist_node *n;
 
@@ -344,7 +344,7 @@
 /* Get reference to connection template */
 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_conn *cp;
 	struct hlist_node *n;
 
@@ -394,7 +394,7 @@
  *	p->vaddr, p->vport: pkt dest address (foreign host) */
 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_conn *cp, *ret=NULL;
 	struct hlist_node *n;
 
@@ -548,6 +548,7 @@
 ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
 {
 	unsigned int conn_flags;
+	__u32 flags;
 
 	/* if dest is NULL, then return directly */
 	if (!dest)
@@ -559,17 +560,19 @@
 	conn_flags = atomic_read(&dest->conn_flags);
 	if (cp->protocol != IPPROTO_UDP)
 		conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
+	flags = cp->flags;
 	/* Bind with the destination and its corresponding transmitter */
-	if (cp->flags & IP_VS_CONN_F_SYNC) {
+	if (flags & IP_VS_CONN_F_SYNC) {
 		/* if the connection is not template and is created
 		 * by sync, preserve the activity flag.
 		 */
-		if (!(cp->flags & IP_VS_CONN_F_TEMPLATE))
+		if (!(flags & IP_VS_CONN_F_TEMPLATE))
 			conn_flags &= ~IP_VS_CONN_F_INACTIVE;
 		/* connections inherit forwarding method from dest */
-		cp->flags &= ~IP_VS_CONN_F_FWD_MASK;
+		flags &= ~(IP_VS_CONN_F_FWD_MASK | IP_VS_CONN_F_NOOUTPUT);
 	}
-	cp->flags |= conn_flags;
+	flags |= conn_flags;
+	cp->flags = flags;
 	cp->dest = dest;
 
 	IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
@@ -584,12 +587,12 @@
 		      atomic_read(&dest->refcnt));
 
 	/* Update the connection counters */
-	if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
-		/* It is a normal connection, so increase the inactive
-		   connection counter because it is in TCP SYNRECV
-		   state (inactive) or other protocol inacive state */
-		if ((cp->flags & IP_VS_CONN_F_SYNC) &&
-		    (!(cp->flags & IP_VS_CONN_F_INACTIVE)))
+	if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+		/* It is a normal connection, so modify the counters
+		 * according to the flags, later the protocol can
+		 * update them on state change
+		 */
+		if (!(flags & IP_VS_CONN_F_INACTIVE))
 			atomic_inc(&dest->activeconns);
 		else
 			atomic_inc(&dest->inactconns);
@@ -613,14 +616,40 @@
 {
 	struct ip_vs_dest *dest;
 
-	if ((cp) && (!cp->dest)) {
-		dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
-				       cp->dport, &cp->vaddr, cp->vport,
-				       cp->protocol, cp->fwmark, cp->flags);
+	dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+			       cp->dport, &cp->vaddr, cp->vport,
+			       cp->protocol, cp->fwmark, cp->flags);
+	if (dest) {
+		struct ip_vs_proto_data *pd;
+
+		spin_lock(&cp->lock);
+		if (cp->dest) {
+			spin_unlock(&cp->lock);
+			return dest;
+		}
+
+		/* Applications work depending on the forwarding method
+		 * but better to reassign them always when binding dest */
+		if (cp->app)
+			ip_vs_unbind_app(cp);
+
 		ip_vs_bind_dest(cp, dest);
-		return dest;
-	} else
-		return NULL;
+		spin_unlock(&cp->lock);
+
+		/* Update its packet transmitter */
+		cp->packet_xmit = NULL;
+#ifdef CONFIG_IP_VS_IPV6
+		if (cp->af == AF_INET6)
+			ip_vs_bind_xmit_v6(cp);
+		else
+#endif
+			ip_vs_bind_xmit(cp);
+
+		pd = ip_vs_proto_data_get(ip_vs_conn_net(cp), cp->protocol);
+		if (pd && atomic_read(&pd->appcnt))
+			ip_vs_bind_app(cp, pd->pp);
+	}
+	return dest;
 }
 
 
@@ -743,7 +772,8 @@
 static void ip_vs_conn_expire(unsigned long data)
 {
 	struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
+	struct net *net = ip_vs_conn_net(cp);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	cp->timeout = 60*HZ;
 
@@ -808,6 +838,9 @@
 		  atomic_read(&cp->refcnt)-1,
 		  atomic_read(&cp->n_control));
 
+	if (ipvs->sync_state & IP_VS_STATE_MASTER)
+		ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs));
+
 	ip_vs_conn_put(cp);
 }
 
@@ -824,7 +857,7 @@
  */
 struct ip_vs_conn *
 ip_vs_conn_new(const struct ip_vs_conn_param *p,
-	       const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
+	       const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
 	       struct ip_vs_dest *dest, __u32 fwmark)
 {
 	struct ip_vs_conn *cp;
@@ -881,6 +914,7 @@
 	/* Set its state and timeout */
 	cp->state = 0;
 	cp->timeout = 3*HZ;
+	cp->sync_endtime = jiffies & ~3UL;
 
 	/* Bind its packet transmitter */
 #ifdef CONFIG_IP_VS_IPV6
@@ -1057,7 +1091,7 @@
 	.release = seq_release_net,
 };
 
-static const char *ip_vs_origin_name(unsigned flags)
+static const char *ip_vs_origin_name(unsigned int flags)
 {
 	if (flags & IP_VS_CONN_F_SYNC)
 		return "SYNC";
@@ -1169,7 +1203,7 @@
 	 * Randomly scan 1/32 of the whole table every second
 	 */
 	for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
-		unsigned hash = net_random() & ip_vs_conn_tab_mask;
+		unsigned int hash = net_random() & ip_vs_conn_tab_mask;
 		struct hlist_node *n;
 
 		/*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816..a54b018c 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -80,7 +80,7 @@
 #define icmp_id(icmph)          (((icmph)->un).echo.id)
 #define icmpv6_id(icmph)        (icmph->icmp6_dataun.u_echo.identifier)
 
-const char *ip_vs_proto_name(unsigned proto)
+const char *ip_vs_proto_name(unsigned int proto)
 {
 	static char buf[20];
 
@@ -1613,34 +1613,8 @@
 	else
 		pkts = atomic_add_return(1, &cp->in_pkts);
 
-	if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
-	    cp->protocol == IPPROTO_SCTP) {
-		if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
-			(pkts % sysctl_sync_period(ipvs)
-			 == sysctl_sync_threshold(ipvs))) ||
-				(cp->old_state != cp->state &&
-				 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
-				  (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
-				  (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
-			ip_vs_sync_conn(net, cp);
-			goto out;
-		}
-	}
-
-	/* Keep this block last: TCP and others with pp->num_states <= 1 */
-	else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
-	    (((cp->protocol != IPPROTO_TCP ||
-	       cp->state == IP_VS_TCP_S_ESTABLISHED) &&
-	      (pkts % sysctl_sync_period(ipvs)
-	       == sysctl_sync_threshold(ipvs))) ||
-	     ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
-	      ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
-	       (cp->state == IP_VS_TCP_S_CLOSE) ||
-	       (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
-	       (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
-		ip_vs_sync_conn(net, cp);
-out:
-	cp->old_state = cp->state;
+	if (ipvs->sync_state & IP_VS_STATE_MASTER)
+		ip_vs_sync_conn(net, cp, pkts);
 
 	ip_vs_conn_put(cp);
 	return ret;
@@ -1924,6 +1898,7 @@
 control_fail:
 	ip_vs_estimator_net_cleanup(net);
 estimator_fail:
+	net->ipvs = NULL;
 	return -ENOMEM;
 }
 
@@ -1936,6 +1911,7 @@
 	ip_vs_control_net_cleanup(net);
 	ip_vs_estimator_net_cleanup(net);
 	IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
+	net->ipvs = NULL;
 }
 
 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1969,18 @@
 		goto cleanup_dev;
 	}
 
+	ret = ip_vs_register_nl_ioctl();
+	if (ret < 0) {
+		pr_err("can't register netlink/ioctl.\n");
+		goto cleanup_hooks;
+	}
+
 	pr_info("ipvs loaded.\n");
 
 	return ret;
 
+cleanup_hooks:
+	nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
 cleanup_dev:
 	unregister_pernet_device(&ipvs_core_dev_ops);
 cleanup_sub:
@@ -2012,6 +1996,7 @@
 
 static void __exit ip_vs_cleanup(void)
 {
+	ip_vs_unregister_nl_ioctl();
 	nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
 	unregister_pernet_device(&ipvs_core_dev_ops);
 	unregister_pernet_subsys(&ipvs_core_ops);	/* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe18..dd811b8 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -265,11 +265,11 @@
 /*
  *	Returns hash value for virtual service
  */
-static inline unsigned
-ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+static inline unsigned int
+ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
 		  const union nf_inet_addr *addr, __be16 port)
 {
-	register unsigned porth = ntohs(port);
+	register unsigned int porth = ntohs(port);
 	__be32 addr_fold = addr->ip;
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -286,7 +286,7 @@
 /*
  *	Returns hash value of fwmark for virtual service lookup
  */
-static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
+static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
 {
 	return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
 }
@@ -298,7 +298,7 @@
  */
 static int ip_vs_svc_hash(struct ip_vs_service *svc)
 {
-	unsigned hash;
+	unsigned int hash;
 
 	if (svc->flags & IP_VS_SVC_F_HASHED) {
 		pr_err("%s(): request for already hashed, called from %pF\n",
@@ -361,7 +361,7 @@
 __ip_vs_service_find(struct net *net, int af, __u16 protocol,
 		     const union nf_inet_addr *vaddr, __be16 vport)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_service *svc;
 
 	/* Check for "full" addressed entries */
@@ -388,7 +388,7 @@
 static inline struct ip_vs_service *
 __ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
 {
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_service *svc;
 
 	/* Check for fwmark addressed entries */
@@ -489,11 +489,11 @@
 /*
  *	Returns hash value for real service
  */
-static inline unsigned ip_vs_rs_hashkey(int af,
+static inline unsigned int ip_vs_rs_hashkey(int af,
 					    const union nf_inet_addr *addr,
 					    __be16 port)
 {
-	register unsigned porth = ntohs(port);
+	register unsigned int porth = ntohs(port);
 	__be32 addr_fold = addr->ip;
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -512,7 +512,7 @@
  */
 static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
 {
-	unsigned hash;
+	unsigned int hash;
 
 	if (!list_empty(&dest->d_list)) {
 		return 0;
@@ -555,7 +555,7 @@
 			  __be16 dport)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
-	unsigned hash;
+	unsigned int hash;
 	struct ip_vs_dest *dest;
 
 	/*
@@ -842,7 +842,7 @@
 	       struct ip_vs_dest **dest_p)
 {
 	struct ip_vs_dest *dest;
-	unsigned atype;
+	unsigned int atype;
 
 	EnterFunction(2);
 
@@ -1599,6 +1599,10 @@
 }
 
 #ifdef CONFIG_SYSCTL
+
+static int zero;
+static int three = 3;
+
 static int
 proc_do_defense_mode(ctl_table *table, int write,
 		     void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -1632,7 +1636,8 @@
 	memcpy(val, valp, sizeof(val));
 
 	rc = proc_dointvec(table, write, buffer, lenp, ppos);
-	if (write && (valp[0] < 0 || valp[1] < 0 || valp[0] >= valp[1])) {
+	if (write && (valp[0] < 0 || valp[1] < 0 ||
+	    (valp[0] >= valp[1] && valp[1]))) {
 		/* Restore the correct value */
 		memcpy(valp, val, sizeof(val));
 	}
@@ -1652,9 +1657,24 @@
 		if ((*valp < 0) || (*valp > 1)) {
 			/* Restore the correct value */
 			*valp = val;
-		} else {
-			struct net *net = current->nsproxy->net_ns;
-			ip_vs_sync_switch_mode(net, val);
+		}
+	}
+	return rc;
+}
+
+static int
+proc_do_sync_ports(ctl_table *table, int write,
+		   void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int *valp = table->data;
+	int val = *valp;
+	int rc;
+
+	rc = proc_dointvec(table, write, buffer, lenp, ppos);
+	if (write && (*valp != val)) {
+		if (*valp < 1 || !is_power_of_2(*valp)) {
+			/* Restore the correct value */
+			*valp = val;
 		}
 	}
 	return rc;
@@ -1718,6 +1738,24 @@
 		.proc_handler	= &proc_do_sync_mode,
 	},
 	{
+		.procname	= "sync_ports",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_do_sync_ports,
+	},
+	{
+		.procname	= "sync_qlen_max",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "sync_sock_size",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "cache_bypass",
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
@@ -1743,6 +1781,20 @@
 		.proc_handler	= proc_do_sync_threshold,
 	},
 	{
+		.procname	= "sync_refresh_period",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
+	{
+		.procname	= "sync_retries",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &three,
+	},
+	{
 		.procname	= "nat_icmp_send",
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
@@ -1846,13 +1898,6 @@
 	{ }
 };
 
-const struct ctl_path net_vs_ctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "ipv4", },
-	{ .procname = "vs", },
-	{ }
-};
-EXPORT_SYMBOL_GPL(net_vs_ctl_path);
 #endif
 
 #ifdef CONFIG_PROC_FS
@@ -1867,7 +1912,7 @@
  *	Write the contents of the VS rule table to a PROCfs file.
  *	(It is kept just for backward compatibility)
  */
-static inline const char *ip_vs_fwd_name(unsigned flags)
+static inline const char *ip_vs_fwd_name(unsigned int flags)
 {
 	switch (flags & IP_VS_CONN_F_FWD_MASK) {
 	case IP_VS_CONN_F_LOCALNODE:
@@ -2816,17 +2861,17 @@
 
 	ip_vs_copy_stats(&ustats, stats);
 
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts);
-	NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes);
-	NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps);
-	NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps);
-
+	if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
+	    nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
+	    nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
+	    nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
+		goto nla_put_failure;
 	nla_nest_end(skb, nl_stats);
 
 	return 0;
@@ -2847,23 +2892,25 @@
 	if (!nl_service)
 		return -EMSGSIZE;
 
-	NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
-
+	if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
+		goto nla_put_failure;
 	if (svc->fwmark) {
-		NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
+		if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
+			goto nla_put_failure;
 	} else {
-		NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
-		NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
-		NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
+		if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
+		    nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
+		    nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port))
+			goto nla_put_failure;
 	}
 
-	NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
-	if (svc->pe)
-		NLA_PUT_STRING(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name);
-	NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
-	NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
-	NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
-
+	if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) ||
+	    (svc->pe &&
+	     nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) ||
+	    nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
+	    nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
+	    nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
+		goto nla_put_failure;
 	if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
 		goto nla_put_failure;
 
@@ -3038,21 +3085,22 @@
 	if (!nl_dest)
 		return -EMSGSIZE;
 
-	NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr);
-	NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
-
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
-		    atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
-		    atomic_read(&dest->activeconns));
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS,
-		    atomic_read(&dest->inactconns));
-	NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
-		    atomic_read(&dest->persistconns));
-
+	if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
+	    nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
+			(atomic_read(&dest->conn_flags) &
+			 IP_VS_CONN_F_FWD_MASK)) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
+			atomic_read(&dest->weight)) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
+			atomic_read(&dest->activeconns)) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
+			atomic_read(&dest->inactconns)) ||
+	    nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
+			atomic_read(&dest->persistconns)))
+		goto nla_put_failure;
 	if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
 		goto nla_put_failure;
 
@@ -3181,10 +3229,10 @@
 	if (!nl_daemon)
 		return -EMSGSIZE;
 
-	NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state);
-	NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn);
-	NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid);
-
+	if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
+	    nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
+	    nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
+		goto nla_put_failure;
 	nla_nest_end(skb, nl_daemon);
 
 	return 0;
@@ -3473,21 +3521,26 @@
 
 		__ip_vs_get_timeouts(net, &t);
 #ifdef CONFIG_IP_VS_PROTO_TCP
-		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
-		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
-			    t.tcp_fin_timeout);
+		if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
+				t.tcp_timeout) ||
+		    nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
+				t.tcp_fin_timeout))
+			goto nla_put_failure;
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
-		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout);
+		if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
+			goto nla_put_failure;
 #endif
 
 		break;
 	}
 
 	case IPVS_CMD_GET_INFO:
-		NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
-		NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
-			    ip_vs_conn_tab_size);
+		if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
+				IP_VS_VERSION_CODE) ||
+		    nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
+				ip_vs_conn_tab_size))
+			goto nla_put_failure;
 		break;
 	}
 
@@ -3654,6 +3707,12 @@
 	tbl[idx++].data = &ipvs->sysctl_snat_reroute;
 	ipvs->sysctl_sync_ver = 1;
 	tbl[idx++].data = &ipvs->sysctl_sync_ver;
+	ipvs->sysctl_sync_ports = 1;
+	tbl[idx++].data = &ipvs->sysctl_sync_ports;
+	ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32;
+	tbl[idx++].data = &ipvs->sysctl_sync_qlen_max;
+	ipvs->sysctl_sync_sock_size = 0;
+	tbl[idx++].data = &ipvs->sysctl_sync_sock_size;
 	tbl[idx++].data = &ipvs->sysctl_cache_bypass;
 	tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
 	tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
@@ -3661,11 +3720,14 @@
 	ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
 	tbl[idx].data = &ipvs->sysctl_sync_threshold;
 	tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+	ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
+	tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
+	ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
+	tbl[idx++].data = &ipvs->sysctl_sync_retries;
 	tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
 
 
-	ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
-						     tbl);
+	ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
 	if (ipvs->sysctl_hdr == NULL) {
 		if (!net_eq(net, &init_net))
 			kfree(tbl);
@@ -3680,7 +3742,7 @@
 	return 0;
 }
 
-void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
+void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -3692,7 +3754,7 @@
 #else
 
 int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
 
 #endif
 
@@ -3750,21 +3812,10 @@
 	free_percpu(ipvs->tot_stats.cpustats);
 }
 
-int __init ip_vs_control_init(void)
+int __init ip_vs_register_nl_ioctl(void)
 {
-	int idx;
 	int ret;
 
-	EnterFunction(2);
-
-	/* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
-	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++)  {
-		INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
-		INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
-	}
-
-	smp_wmb();	/* Do we really need it now ? */
-
 	ret = nf_register_sockopt(&ip_vs_sockopts);
 	if (ret) {
 		pr_err("cannot register sockopt.\n");
@@ -3776,28 +3827,47 @@
 		pr_err("cannot register Generic Netlink interface.\n");
 		goto err_genl;
 	}
-
-	ret = register_netdevice_notifier(&ip_vs_dst_notifier);
-	if (ret < 0)
-		goto err_notf;
-
-	LeaveFunction(2);
 	return 0;
 
-err_notf:
-	ip_vs_genl_unregister();
 err_genl:
 	nf_unregister_sockopt(&ip_vs_sockopts);
 err_sock:
 	return ret;
 }
 
+void ip_vs_unregister_nl_ioctl(void)
+{
+	ip_vs_genl_unregister();
+	nf_unregister_sockopt(&ip_vs_sockopts);
+}
+
+int __init ip_vs_control_init(void)
+{
+	int idx;
+	int ret;
+
+	EnterFunction(2);
+
+	/* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
+	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+		INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
+		INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
+	}
+
+	smp_wmb();	/* Do we really need it now ? */
+
+	ret = register_netdevice_notifier(&ip_vs_dst_notifier);
+	if (ret < 0)
+		return ret;
+
+	LeaveFunction(2);
+	return 0;
+}
+
 
 void ip_vs_control_cleanup(void)
 {
 	EnterFunction(2);
 	unregister_netdevice_notifier(&ip_vs_dst_notifier);
-	ip_vs_genl_unregister();
-	nf_unregister_sockopt(&ip_vs_sockopts);
 	LeaveFunction(2);
 }
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 1c269e5..8b7dca9 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -68,7 +68,7 @@
 /*
  *	Returns hash value for IPVS DH entry
  */
-static inline unsigned ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
+static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
 {
 	__be32 addr_fold = addr->ip;
 
@@ -149,7 +149,7 @@
 
 	/* allocate the DH table for this service */
 	tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE,
-		      GFP_ATOMIC);
+		      GFP_KERNEL);
 	if (tbl == NULL)
 		return -ENOMEM;
 
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74e..b20b29c 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -177,7 +177,7 @@
 	__be16 port;
 	struct ip_vs_conn *n_cp;
 	char buf[24];		/* xxx.xxx.xxx.xxx,ppp,ppp\000 */
-	unsigned buf_len;
+	unsigned int buf_len;
 	int ret = 0;
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct;
@@ -439,6 +439,8 @@
 	struct ip_vs_app *app;
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
+	if (!ipvs)
+		return -ENOENT;
 	app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
 	if (!app)
 		return -ENOMEM;
@@ -483,7 +485,7 @@
 	.exit = __ip_vs_ftp_exit,
 };
 
-int __init ip_vs_ftp_init(void)
+static int __init ip_vs_ftp_init(void)
 {
 	int rv;
 
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283..df646cc 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -142,7 +142,7 @@
 /*
  *	Returns hash value for IPVS LBLC entry
  */
-static inline unsigned
+static inline unsigned int
 ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
 {
 	__be32 addr_fold = addr->ip;
@@ -163,7 +163,7 @@
 static void
 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
 {
-	unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr);
+	unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
 
 	list_add(&en->list, &tbl->bucket[hash]);
 	atomic_inc(&tbl->entries);
@@ -178,7 +178,7 @@
 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
 	       const union nf_inet_addr *addr)
 {
-	unsigned hash = ip_vs_lblc_hashkey(af, addr);
+	unsigned int hash = ip_vs_lblc_hashkey(af, addr);
 	struct ip_vs_lblc_entry *en;
 
 	list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -342,7 +342,7 @@
 	/*
 	 *    Allocate the ip_vs_lblc_table for this service
 	 */
-	tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
+	tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
 	if (tbl == NULL)
 		return -ENOMEM;
 
@@ -551,6 +551,9 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
+	if (!ipvs)
+		return -ENOENT;
+
 	if (!net_eq(net, &init_net)) {
 		ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
 						sizeof(vs_vars_table),
@@ -563,8 +566,7 @@
 	ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
 
 	ipvs->lblc_ctl_header =
-		register_net_sysctl_table(net, net_vs_ctl_path,
-					  ipvs->lblc_ctl_table);
+		register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table);
 	if (!ipvs->lblc_ctl_header) {
 		if (!net_eq(net, &init_net))
 			kfree(ipvs->lblc_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f..570e31e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -311,7 +311,7 @@
 /*
  *	Returns hash value for IPVS LBLCR entry
  */
-static inline unsigned
+static inline unsigned int
 ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
 {
 	__be32 addr_fold = addr->ip;
@@ -332,7 +332,7 @@
 static void
 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
 {
-	unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
+	unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
 
 	list_add(&en->list, &tbl->bucket[hash]);
 	atomic_inc(&tbl->entries);
@@ -347,7 +347,7 @@
 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
 		const union nf_inet_addr *addr)
 {
-	unsigned hash = ip_vs_lblcr_hashkey(af, addr);
+	unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
 	struct ip_vs_lblcr_entry *en;
 
 	list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -511,7 +511,7 @@
 	/*
 	 *    Allocate the ip_vs_lblcr_table for this service
 	 */
-	tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
+	tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
 	if (tbl == NULL)
 		return -ENOMEM;
 
@@ -745,6 +745,9 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
+	if (!ipvs)
+		return -ENOENT;
+
 	if (!net_eq(net, &init_net)) {
 		ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
 						sizeof(vs_vars_table),
@@ -757,8 +760,7 @@
 	ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
 
 	ipvs->lblcr_ctl_header =
-		register_net_sysctl_table(net, net_vs_ctl_path,
-					  ipvs->lblcr_ctl_table);
+		register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table);
 	if (!ipvs->lblcr_ctl_header) {
 		if (!net_eq(net, &init_net))
 			kfree(ipvs->lblcr_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f843a88..50d82186 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -48,7 +48,7 @@
  */
 static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
 {
-	unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+	unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
 
 	pp->next = ip_vs_proto_table[hash];
 	ip_vs_proto_table[hash] = pp;
@@ -59,9 +59,6 @@
 	return 0;
 }
 
-#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
-    defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
-    defined(CONFIG_IP_VS_PROTO_ESP)
 /*
  *	register an ipvs protocols netns related data
  */
@@ -69,9 +66,9 @@
 register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
-	unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+	unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
 	struct ip_vs_proto_data *pd =
-			kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
+			kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL);
 
 	if (!pd)
 		return -ENOMEM;
@@ -81,12 +78,18 @@
 	ipvs->proto_data_table[hash] = pd;
 	atomic_set(&pd->appcnt, 0);	/* Init app counter */
 
-	if (pp->init_netns != NULL)
-		pp->init_netns(net, pd);
+	if (pp->init_netns != NULL) {
+		int ret = pp->init_netns(net, pd);
+		if (ret) {
+			/* unlink an free proto data */
+			ipvs->proto_data_table[hash] = pd->next;
+			kfree(pd);
+			return ret;
+		}
+	}
 
 	return 0;
 }
-#endif
 
 /*
  *	unregister an ipvs protocol
@@ -94,7 +97,7 @@
 static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
 {
 	struct ip_vs_protocol **pp_p;
-	unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+	unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
 
 	pp_p = &ip_vs_proto_table[hash];
 	for (; *pp_p; pp_p = &(*pp_p)->next) {
@@ -117,7 +120,7 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_proto_data **pd_p;
-	unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+	unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
 
 	pd_p = &ipvs->proto_data_table[hash];
 	for (; *pd_p; pd_p = &(*pd_p)->next) {
@@ -139,7 +142,7 @@
 struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
 {
 	struct ip_vs_protocol *pp;
-	unsigned hash = IP_VS_PROTO_HASH(proto);
+	unsigned int hash = IP_VS_PROTO_HASH(proto);
 
 	for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) {
 		if (pp->protocol == proto)
@@ -153,11 +156,11 @@
 /*
  *	get ip_vs_protocol object data by netns and proto
  */
-struct ip_vs_proto_data *
+static struct ip_vs_proto_data *
 __ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
 {
 	struct ip_vs_proto_data *pd;
-	unsigned hash = IP_VS_PROTO_HASH(proto);
+	unsigned int hash = IP_VS_PROTO_HASH(proto);
 
 	for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
 		if (pd->pp->protocol == proto)
@@ -196,7 +199,7 @@
 int *
 ip_vs_create_timeout_table(int *table, int size)
 {
-	return kmemdup(table, size, GFP_ATOMIC);
+	return kmemdup(table, size, GFP_KERNEL);
 }
 
 
@@ -316,22 +319,35 @@
  */
 int __net_init ip_vs_protocol_net_init(struct net *net)
 {
+	int i, ret;
+	static struct ip_vs_protocol *protos[] = {
 #ifdef CONFIG_IP_VS_PROTO_TCP
-	register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+        &ip_vs_protocol_tcp,
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
-	register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+	&ip_vs_protocol_udp,
 #endif
 #ifdef CONFIG_IP_VS_PROTO_SCTP
-	register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+	&ip_vs_protocol_sctp,
 #endif
 #ifdef CONFIG_IP_VS_PROTO_AH
-	register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+	&ip_vs_protocol_ah,
 #endif
 #ifdef CONFIG_IP_VS_PROTO_ESP
-	register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+	&ip_vs_protocol_esp,
 #endif
+	};
+
+	for (i = 0; i < ARRAY_SIZE(protos); i++) {
+		ret = register_ip_vs_proto_netns(net, protos[i]);
+		if (ret < 0)
+			goto cleanup;
+	}
 	return 0;
+
+cleanup:
+	ip_vs_protocol_net_cleanup(net);
+	return ret;
 }
 
 void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2..9f3fb75 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@
  *   timeouts is netns related now.
  * ---------------------------------------------
  */
-static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -1098,6 +1098,9 @@
 	spin_lock_init(&ipvs->sctp_app_lock);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
 							sizeof(sctp_timeouts));
+	if (!pd->timeout_table)
+		return -ENOMEM;
+	return 0;
 }
 
 static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f..cd609cc 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@
  *   timeouts is netns related now.
  * ---------------------------------------------
  */
-static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -685,7 +685,10 @@
 	spin_lock_init(&ipvs->tcp_app_lock);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
 							sizeof(tcp_timeouts));
+	if (!pd->timeout_table)
+		return -ENOMEM;
 	pd->tcp_state_table =  tcp_states;
+	return 0;
 }
 
 static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262..2fedb2d 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@
 	cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
 }
 
-static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -475,6 +475,9 @@
 	spin_lock_init(&ipvs->udp_app_lock);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
 							sizeof(udp_timeouts));
+	if (!pd->timeout_table)
+		return -ENOMEM;
+	return 0;
 }
 
 static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 069e8d4..0512652 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -70,7 +70,7 @@
 /*
  *	Returns hash value for IPVS SH entry
  */
-static inline unsigned ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr)
+static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr)
 {
 	__be32 addr_fold = addr->ip;
 
@@ -162,7 +162,7 @@
 
 	/* allocate the SH table for this service */
 	tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE,
-		      GFP_ATOMIC);
+		      GFP_KERNEL);
 	if (tbl == NULL)
 		return -ENOMEM;
 
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8a0d6d6..effa10c 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -196,6 +196,7 @@
 	struct net *net;
 	struct socket *sock;
 	char *buf;
+	int id;
 };
 
 /* Version 0 definition of packet sizes */
@@ -271,13 +272,6 @@
 	unsigned char           *end;
 };
 
-/* multicast addr */
-static struct sockaddr_in mcast_addr = {
-	.sin_family		= AF_INET,
-	.sin_port		= cpu_to_be16(IP_VS_SYNC_PORT),
-	.sin_addr.s_addr	= cpu_to_be32(IP_VS_SYNC_GROUP),
-};
-
 /*
  * Copy of struct ip_vs_seq
  * From unaligned network order to aligned host order
@@ -300,18 +294,22 @@
 	put_unaligned_be32(ho->previous_delta, &no->previous_delta);
 }
 
-static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
+static inline struct ip_vs_sync_buff *
+sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
 {
 	struct ip_vs_sync_buff *sb;
 
 	spin_lock_bh(&ipvs->sync_lock);
-	if (list_empty(&ipvs->sync_queue)) {
+	if (list_empty(&ms->sync_queue)) {
 		sb = NULL;
+		__set_current_state(TASK_INTERRUPTIBLE);
 	} else {
-		sb = list_entry(ipvs->sync_queue.next,
-				struct ip_vs_sync_buff,
+		sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff,
 				list);
 		list_del(&sb->list);
+		ms->sync_queue_len--;
+		if (!ms->sync_queue_len)
+			ms->sync_queue_delay = 0;
 	}
 	spin_unlock_bh(&ipvs->sync_lock);
 
@@ -334,7 +332,7 @@
 		kfree(sb);
 		return NULL;
 	}
-	sb->mesg->reserved = 0;  /* old nr_conns i.e. must be zeo now */
+	sb->mesg->reserved = 0;  /* old nr_conns i.e. must be zero now */
 	sb->mesg->version = SYNC_PROTO_VER;
 	sb->mesg->syncid = ipvs->master_syncid;
 	sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
@@ -353,14 +351,22 @@
 	kfree(sb);
 }
 
-static inline void sb_queue_tail(struct netns_ipvs *ipvs)
+static inline void sb_queue_tail(struct netns_ipvs *ipvs,
+				 struct ipvs_master_sync_state *ms)
 {
-	struct ip_vs_sync_buff *sb = ipvs->sync_buff;
+	struct ip_vs_sync_buff *sb = ms->sync_buff;
 
 	spin_lock(&ipvs->sync_lock);
-	if (ipvs->sync_state & IP_VS_STATE_MASTER)
-		list_add_tail(&sb->list, &ipvs->sync_queue);
-	else
+	if (ipvs->sync_state & IP_VS_STATE_MASTER &&
+	    ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) {
+		if (!ms->sync_queue_len)
+			schedule_delayed_work(&ms->master_wakeup_work,
+					      max(IPVS_SYNC_SEND_DELAY, 1));
+		ms->sync_queue_len++;
+		list_add_tail(&sb->list, &ms->sync_queue);
+		if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
+			wake_up_process(ms->master_thread);
+	} else
 		ip_vs_sync_buff_release(sb);
 	spin_unlock(&ipvs->sync_lock);
 }
@@ -370,49 +376,26 @@
  *	than the specified time or the specified time is zero.
  */
 static inline struct ip_vs_sync_buff *
-get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
+get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms,
+		   unsigned long time)
 {
 	struct ip_vs_sync_buff *sb;
 
 	spin_lock_bh(&ipvs->sync_buff_lock);
-	if (ipvs->sync_buff &&
-	    time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) {
-		sb = ipvs->sync_buff;
-		ipvs->sync_buff = NULL;
+	sb = ms->sync_buff;
+	if (sb && time_after_eq(jiffies - sb->firstuse, time)) {
+		ms->sync_buff = NULL;
+		__set_current_state(TASK_RUNNING);
 	} else
 		sb = NULL;
 	spin_unlock_bh(&ipvs->sync_buff_lock);
 	return sb;
 }
 
-/*
- * Switch mode from sending version 0 or 1
- *  - must handle sync_buf
- */
-void ip_vs_sync_switch_mode(struct net *net, int mode)
+static inline int
+select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
-	if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
-		return;
-	if (mode == sysctl_sync_ver(ipvs) || !ipvs->sync_buff)
-		return;
-
-	spin_lock_bh(&ipvs->sync_buff_lock);
-	/* Buffer empty ? then let buf_create do the job  */
-	if (ipvs->sync_buff->mesg->size <=  sizeof(struct ip_vs_sync_mesg)) {
-		kfree(ipvs->sync_buff);
-		ipvs->sync_buff = NULL;
-	} else {
-		spin_lock_bh(&ipvs->sync_lock);
-		if (ipvs->sync_state & IP_VS_STATE_MASTER)
-			list_add_tail(&ipvs->sync_buff->list,
-				      &ipvs->sync_queue);
-		else
-			ip_vs_sync_buff_release(ipvs->sync_buff);
-		spin_unlock_bh(&ipvs->sync_lock);
-	}
-	spin_unlock_bh(&ipvs->sync_buff_lock);
+	return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask;
 }
 
 /*
@@ -442,15 +425,101 @@
 	return sb;
 }
 
+/* Check if conn should be synced.
+ * pkts: conn packets, use sysctl_sync_threshold to avoid packet check
+ * - (1) sync_refresh_period: reduce sync rate. Additionally, retry
+ *	sync_retries times with period of sync_refresh_period/8
+ * - (2) if both sync_refresh_period and sync_period are 0 send sync only
+ *	for state changes or only once when pkts matches sync_threshold
+ * - (3) templates: rate can be reduced only with sync_refresh_period or
+ *	with (2)
+ */
+static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs,
+				  struct ip_vs_conn *cp, int pkts)
+{
+	unsigned long orig = ACCESS_ONCE(cp->sync_endtime);
+	unsigned long now = jiffies;
+	unsigned long n = (now + cp->timeout) & ~3UL;
+	unsigned int sync_refresh_period;
+	int sync_period;
+	int force;
+
+	/* Check if we sync in current state */
+	if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE))
+		force = 0;
+	else if (likely(cp->protocol == IPPROTO_TCP)) {
+		if (!((1 << cp->state) &
+		      ((1 << IP_VS_TCP_S_ESTABLISHED) |
+		       (1 << IP_VS_TCP_S_FIN_WAIT) |
+		       (1 << IP_VS_TCP_S_CLOSE) |
+		       (1 << IP_VS_TCP_S_CLOSE_WAIT) |
+		       (1 << IP_VS_TCP_S_TIME_WAIT))))
+			return 0;
+		force = cp->state != cp->old_state;
+		if (force && cp->state != IP_VS_TCP_S_ESTABLISHED)
+			goto set;
+	} else if (unlikely(cp->protocol == IPPROTO_SCTP)) {
+		if (!((1 << cp->state) &
+		      ((1 << IP_VS_SCTP_S_ESTABLISHED) |
+		       (1 << IP_VS_SCTP_S_CLOSED) |
+		       (1 << IP_VS_SCTP_S_SHUT_ACK_CLI) |
+		       (1 << IP_VS_SCTP_S_SHUT_ACK_SER))))
+			return 0;
+		force = cp->state != cp->old_state;
+		if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED)
+			goto set;
+	} else {
+		/* UDP or another protocol with single state */
+		force = 0;
+	}
+
+	sync_refresh_period = sysctl_sync_refresh_period(ipvs);
+	if (sync_refresh_period > 0) {
+		long diff = n - orig;
+		long min_diff = max(cp->timeout >> 1, 10UL * HZ);
+
+		/* Avoid sync if difference is below sync_refresh_period
+		 * and below the half timeout.
+		 */
+		if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) {
+			int retries = orig & 3;
+
+			if (retries >= sysctl_sync_retries(ipvs))
+				return 0;
+			if (time_before(now, orig - cp->timeout +
+					(sync_refresh_period >> 3)))
+				return 0;
+			n |= retries + 1;
+		}
+	}
+	sync_period = sysctl_sync_period(ipvs);
+	if (sync_period > 0) {
+		if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) &&
+		    pkts % sync_period != sysctl_sync_threshold(ipvs))
+			return 0;
+	} else if (sync_refresh_period <= 0 &&
+		   pkts != sysctl_sync_threshold(ipvs))
+		return 0;
+
+set:
+	cp->old_state = cp->state;
+	n = cmpxchg(&cp->sync_endtime, orig, n);
+	return n == orig || force;
+}
+
 /*
  *      Version 0 , could be switched in by sys_ctl.
  *      Add an ip_vs_conn information into the current sync_buff.
  */
-void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
+static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
+			       int pkts)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_sync_mesg_v0 *m;
 	struct ip_vs_sync_conn_v0 *s;
+	struct ip_vs_sync_buff *buff;
+	struct ipvs_master_sync_state *ms;
+	int id;
 	int len;
 
 	if (unlikely(cp->af != AF_INET))
@@ -459,21 +528,41 @@
 	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
 		return;
 
+	if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
+		return;
+
 	spin_lock(&ipvs->sync_buff_lock);
-	if (!ipvs->sync_buff) {
-		ipvs->sync_buff =
-			ip_vs_sync_buff_create_v0(ipvs);
-		if (!ipvs->sync_buff) {
+	if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+		spin_unlock(&ipvs->sync_buff_lock);
+		return;
+	}
+
+	id = select_master_thread_id(ipvs, cp);
+	ms = &ipvs->ms[id];
+	buff = ms->sync_buff;
+	if (buff) {
+		m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
+		/* Send buffer if it is for v1 */
+		if (!m->nr_conns) {
+			sb_queue_tail(ipvs, ms);
+			ms->sync_buff = NULL;
+			buff = NULL;
+		}
+	}
+	if (!buff) {
+		buff = ip_vs_sync_buff_create_v0(ipvs);
+		if (!buff) {
 			spin_unlock(&ipvs->sync_buff_lock);
 			pr_err("ip_vs_sync_buff_create failed.\n");
 			return;
 		}
+		ms->sync_buff = buff;
 	}
 
 	len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
 		SIMPLE_CONN_SIZE;
-	m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
-	s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
+	m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
+	s = (struct ip_vs_sync_conn_v0 *) buff->head;
 
 	/* copy members */
 	s->reserved = 0;
@@ -494,18 +583,24 @@
 
 	m->nr_conns++;
 	m->size += len;
-	ipvs->sync_buff->head += len;
+	buff->head += len;
 
 	/* check if there is a space for next one */
-	if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
-		sb_queue_tail(ipvs);
-		ipvs->sync_buff = NULL;
+	if (buff->head + FULL_CONN_SIZE > buff->end) {
+		sb_queue_tail(ipvs, ms);
+		ms->sync_buff = NULL;
 	}
 	spin_unlock(&ipvs->sync_buff_lock);
 
 	/* synchronize its controller if it has */
-	if (cp->control)
-		ip_vs_sync_conn(net, cp->control);
+	cp = cp->control;
+	if (cp) {
+		if (cp->flags & IP_VS_CONN_F_TEMPLATE)
+			pkts = atomic_add_return(1, &cp->in_pkts);
+		else
+			pkts = sysctl_sync_threshold(ipvs);
+		ip_vs_sync_conn(net, cp->control, pkts);
+	}
 }
 
 /*
@@ -513,23 +608,29 @@
  *      Called by ip_vs_in.
  *      Sending Version 1 messages
  */
-void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_sync_mesg *m;
 	union ip_vs_sync_conn *s;
+	struct ip_vs_sync_buff *buff;
+	struct ipvs_master_sync_state *ms;
+	int id;
 	__u8 *p;
 	unsigned int len, pe_name_len, pad;
 
 	/* Handle old version of the protocol */
 	if (sysctl_sync_ver(ipvs) == 0) {
-		ip_vs_sync_conn_v0(net, cp);
+		ip_vs_sync_conn_v0(net, cp, pkts);
 		return;
 	}
 	/* Do not sync ONE PACKET */
 	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
 		goto control;
 sloop:
+	if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
+		goto control;
+
 	/* Sanity checks */
 	pe_name_len = 0;
 	if (cp->pe_data_len) {
@@ -541,6 +642,13 @@
 	}
 
 	spin_lock(&ipvs->sync_buff_lock);
+	if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+		spin_unlock(&ipvs->sync_buff_lock);
+		return;
+	}
+
+	id = select_master_thread_id(ipvs, cp);
+	ms = &ipvs->ms[id];
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (cp->af == AF_INET6)
@@ -559,27 +667,32 @@
 
 	/* check if there is a space for this one  */
 	pad = 0;
-	if (ipvs->sync_buff) {
-		pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
-		if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
-			sb_queue_tail(ipvs);
-			ipvs->sync_buff = NULL;
+	buff = ms->sync_buff;
+	if (buff) {
+		m = buff->mesg;
+		pad = (4 - (size_t) buff->head) & 3;
+		/* Send buffer if it is for v0 */
+		if (buff->head + len + pad > buff->end || m->reserved) {
+			sb_queue_tail(ipvs, ms);
+			ms->sync_buff = NULL;
+			buff = NULL;
 			pad = 0;
 		}
 	}
 
-	if (!ipvs->sync_buff) {
-		ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
-		if (!ipvs->sync_buff) {
+	if (!buff) {
+		buff = ip_vs_sync_buff_create(ipvs);
+		if (!buff) {
 			spin_unlock(&ipvs->sync_buff_lock);
 			pr_err("ip_vs_sync_buff_create failed.\n");
 			return;
 		}
+		ms->sync_buff = buff;
+		m = buff->mesg;
 	}
 
-	m = ipvs->sync_buff->mesg;
-	p = ipvs->sync_buff->head;
-	ipvs->sync_buff->head += pad + len;
+	p = buff->head;
+	buff->head += pad + len;
 	m->size += pad + len;
 	/* Add ev. padding from prev. sync_conn */
 	while (pad--)
@@ -644,16 +757,10 @@
 	cp = cp->control;
 	if (!cp)
 		return;
-	/*
-	 * Reduce sync rate for templates
-	 * i.e only increment in_pkts for Templates.
-	 */
-	if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
-		int pkts = atomic_add_return(1, &cp->in_pkts);
-
-		if (pkts % sysctl_sync_period(ipvs) != 1)
-			return;
-	}
+	if (cp->flags & IP_VS_CONN_F_TEMPLATE)
+		pkts = atomic_add_return(1, &cp->in_pkts);
+	else
+		pkts = sysctl_sync_threshold(ipvs);
 	goto sloop;
 }
 
@@ -731,9 +838,32 @@
 	else
 		cp = ip_vs_ct_in_get(param);
 
-	if (cp && param->pe_data) 	/* Free pe_data */
+	if (cp) {
+		/* Free pe_data */
 		kfree(param->pe_data);
-	if (!cp) {
+
+		dest = cp->dest;
+		spin_lock(&cp->lock);
+		if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE &&
+		    !(flags & IP_VS_CONN_F_TEMPLATE) && dest) {
+			if (flags & IP_VS_CONN_F_INACTIVE) {
+				atomic_dec(&dest->activeconns);
+				atomic_inc(&dest->inactconns);
+			} else {
+				atomic_inc(&dest->activeconns);
+				atomic_dec(&dest->inactconns);
+			}
+		}
+		flags &= IP_VS_CONN_F_BACKUP_UPD_MASK;
+		flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK;
+		cp->flags = flags;
+		spin_unlock(&cp->lock);
+		if (!dest) {
+			dest = ip_vs_try_bind_dest(cp);
+			if (dest)
+				atomic_dec(&dest->refcnt);
+		}
+	} else {
 		/*
 		 * Find the appropriate destination for the connection.
 		 * If it is not found the connection will remain unbound
@@ -742,18 +872,6 @@
 		dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
 				       param->vport, protocol, fwmark, flags);
 
-		/*  Set the approprite ativity flag */
-		if (protocol == IPPROTO_TCP) {
-			if (state != IP_VS_TCP_S_ESTABLISHED)
-				flags |= IP_VS_CONN_F_INACTIVE;
-			else
-				flags &= ~IP_VS_CONN_F_INACTIVE;
-		} else if (protocol == IPPROTO_SCTP) {
-			if (state != IP_VS_SCTP_S_ESTABLISHED)
-				flags |= IP_VS_CONN_F_INACTIVE;
-			else
-				flags &= ~IP_VS_CONN_F_INACTIVE;
-		}
 		cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
 		if (dest)
 			atomic_dec(&dest->refcnt);
@@ -763,34 +881,6 @@
 			IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
 			return;
 		}
-	} else if (!cp->dest) {
-		dest = ip_vs_try_bind_dest(cp);
-		if (dest)
-			atomic_dec(&dest->refcnt);
-	} else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
-		(cp->state != state)) {
-		/* update active/inactive flag for the connection */
-		dest = cp->dest;
-		if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
-			(state != IP_VS_TCP_S_ESTABLISHED)) {
-			atomic_dec(&dest->activeconns);
-			atomic_inc(&dest->inactconns);
-			cp->flags |= IP_VS_CONN_F_INACTIVE;
-		} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
-			(state == IP_VS_TCP_S_ESTABLISHED)) {
-			atomic_inc(&dest->activeconns);
-			atomic_dec(&dest->inactconns);
-			cp->flags &= ~IP_VS_CONN_F_INACTIVE;
-		}
-	} else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
-		(cp->state != state)) {
-		dest = cp->dest;
-		if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
-		(state != IP_VS_SCTP_S_ESTABLISHED)) {
-			atomic_dec(&dest->activeconns);
-			atomic_inc(&dest->inactconns);
-			cp->flags &= ~IP_VS_CONN_F_INACTIVE;
-		}
 	}
 
 	if (opt)
@@ -839,7 +929,7 @@
 
 	p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
 	for (i=0; i<m->nr_conns; i++) {
-		unsigned flags, state;
+		unsigned int flags, state;
 
 		if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
 			IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
@@ -1109,7 +1199,7 @@
 
 		for (i=0; i<nr_conns; i++) {
 			union ip_vs_sync_conn *s;
-			unsigned size;
+			unsigned int size;
 			int retc;
 
 			p = msg_end;
@@ -1149,6 +1239,28 @@
 
 
 /*
+ *      Setup sndbuf (mode=1) or rcvbuf (mode=0)
+ */
+static void set_sock_size(struct sock *sk, int mode, int val)
+{
+	/* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */
+	/* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */
+	lock_sock(sk);
+	if (mode) {
+		val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2,
+			      sysctl_wmem_max);
+		sk->sk_sndbuf = val * 2;
+		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+	} else {
+		val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2,
+			      sysctl_rmem_max);
+		sk->sk_rcvbuf = val * 2;
+		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+	}
+	release_sock(sk);
+}
+
+/*
  *      Setup loopback of outgoing multicasts on a sending socket
  */
 static void set_mcast_loop(struct sock *sk, u_char loop)
@@ -1298,9 +1410,15 @@
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket *make_send_sock(struct net *net)
+static struct socket *make_send_sock(struct net *net, int id)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
+	/* multicast addr */
+	struct sockaddr_in mcast_addr = {
+		.sin_family		= AF_INET,
+		.sin_port		= cpu_to_be16(IP_VS_SYNC_PORT + id),
+		.sin_addr.s_addr	= cpu_to_be32(IP_VS_SYNC_GROUP),
+	};
 	struct socket *sock;
 	int result;
 
@@ -1324,6 +1442,9 @@
 
 	set_mcast_loop(sock->sk, 0);
 	set_mcast_ttl(sock->sk, 1);
+	result = sysctl_sync_sock_size(ipvs);
+	if (result > 0)
+		set_sock_size(sock->sk, 1, result);
 
 	result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
 	if (result < 0) {
@@ -1349,9 +1470,15 @@
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct net *net)
+static struct socket *make_receive_sock(struct net *net, int id)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
+	/* multicast addr */
+	struct sockaddr_in mcast_addr = {
+		.sin_family		= AF_INET,
+		.sin_port		= cpu_to_be16(IP_VS_SYNC_PORT + id),
+		.sin_addr.s_addr	= cpu_to_be32(IP_VS_SYNC_GROUP),
+	};
 	struct socket *sock;
 	int result;
 
@@ -1368,7 +1495,10 @@
 	 */
 	sk_change_net(sock->sk, net);
 	/* it is equivalent to the REUSEADDR option in user-space */
-	sock->sk->sk_reuse = 1;
+	sock->sk->sk_reuse = SK_CAN_REUSE;
+	result = sysctl_sync_sock_size(ipvs);
+	if (result > 0)
+		set_sock_size(sock->sk, 0, result);
 
 	result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
 			sizeof(struct sockaddr));
@@ -1411,18 +1541,22 @@
 	return len;
 }
 
-static void
+static int
 ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg)
 {
 	int msize;
+	int ret;
 
 	msize = msg->size;
 
 	/* Put size in network byte order */
 	msg->size = htons(msg->size);
 
-	if (ip_vs_send_async(sock, (char *)msg, msize) != msize)
-		pr_err("ip_vs_send_async error\n");
+	ret = ip_vs_send_async(sock, (char *)msg, msize);
+	if (ret >= 0 || ret == -EAGAIN)
+		return ret;
+	pr_err("ip_vs_send_async error %d\n", ret);
+	return 0;
 }
 
 static int
@@ -1438,48 +1572,90 @@
 	iov.iov_base     = buffer;
 	iov.iov_len      = (size_t)buflen;
 
-	len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
+	len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT);
 
 	if (len < 0)
-		return -1;
+		return len;
 
 	LeaveFunction(7);
 	return len;
 }
 
+/* Wakeup the master thread for sending */
+static void master_wakeup_work_handler(struct work_struct *work)
+{
+	struct ipvs_master_sync_state *ms =
+		container_of(work, struct ipvs_master_sync_state,
+			     master_wakeup_work.work);
+	struct netns_ipvs *ipvs = ms->ipvs;
+
+	spin_lock_bh(&ipvs->sync_lock);
+	if (ms->sync_queue_len &&
+	    ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
+		ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
+		wake_up_process(ms->master_thread);
+	}
+	spin_unlock_bh(&ipvs->sync_lock);
+}
+
+/* Get next buffer to send */
+static inline struct ip_vs_sync_buff *
+next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms)
+{
+	struct ip_vs_sync_buff *sb;
+
+	sb = sb_dequeue(ipvs, ms);
+	if (sb)
+		return sb;
+	/* Do not delay entries in buffer for more than 2 seconds */
+	return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME);
+}
 
 static int sync_thread_master(void *data)
 {
 	struct ip_vs_sync_thread_data *tinfo = data;
 	struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
+	struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id];
+	struct sock *sk = tinfo->sock->sk;
 	struct ip_vs_sync_buff *sb;
 
 	pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
-		"syncid = %d\n",
-		ipvs->master_mcast_ifn, ipvs->master_syncid);
+		"syncid = %d, id = %d\n",
+		ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id);
 
-	while (!kthread_should_stop()) {
-		while ((sb = sb_dequeue(ipvs))) {
-			ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
-			ip_vs_sync_buff_release(sb);
+	for (;;) {
+		sb = next_sync_buff(ipvs, ms);
+		if (unlikely(kthread_should_stop()))
+			break;
+		if (!sb) {
+			schedule_timeout(IPVS_SYNC_CHECK_PERIOD);
+			continue;
 		}
+		while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) {
+			int ret = 0;
 
-		/* check if entries stay in ipvs->sync_buff for 2 seconds */
-		sb = get_curr_sync_buff(ipvs, 2 * HZ);
-		if (sb) {
-			ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
-			ip_vs_sync_buff_release(sb);
+			__wait_event_interruptible(*sk_sleep(sk),
+						   sock_writeable(sk) ||
+						   kthread_should_stop(),
+						   ret);
+			if (unlikely(kthread_should_stop()))
+				goto done;
 		}
-
-		schedule_timeout_interruptible(HZ);
+		ip_vs_sync_buff_release(sb);
 	}
 
-	/* clean up the sync_buff queue */
-	while ((sb = sb_dequeue(ipvs)))
+done:
+	__set_current_state(TASK_RUNNING);
+	if (sb)
 		ip_vs_sync_buff_release(sb);
 
+	/* clean up the sync_buff queue */
+	while ((sb = sb_dequeue(ipvs, ms)))
+		ip_vs_sync_buff_release(sb);
+	__set_current_state(TASK_RUNNING);
+
 	/* clean up the current sync_buff */
-	sb = get_curr_sync_buff(ipvs, 0);
+	sb = get_curr_sync_buff(ipvs, ms, 0);
 	if (sb)
 		ip_vs_sync_buff_release(sb);
 
@@ -1498,8 +1674,8 @@
 	int len;
 
 	pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
-		"syncid = %d\n",
-		ipvs->backup_mcast_ifn, ipvs->backup_syncid);
+		"syncid = %d, id = %d\n",
+		ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id);
 
 	while (!kthread_should_stop()) {
 		wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -1511,7 +1687,8 @@
 			len = ip_vs_receive(tinfo->sock, tinfo->buf,
 					ipvs->recv_mesg_maxlen);
 			if (len <= 0) {
-				pr_err("receiving message error\n");
+				if (len != -EAGAIN)
+					pr_err("receiving message error\n");
 				break;
 			}
 
@@ -1535,86 +1712,140 @@
 int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
 {
 	struct ip_vs_sync_thread_data *tinfo;
-	struct task_struct **realtask, *task;
+	struct task_struct **array = NULL, *task;
 	struct socket *sock;
 	struct netns_ipvs *ipvs = net_ipvs(net);
-	char *name, *buf = NULL;
+	char *name;
 	int (*threadfn)(void *data);
+	int id, count;
 	int result = -ENOMEM;
 
 	IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
 		  sizeof(struct ip_vs_sync_conn_v0));
 
+	if (!ipvs->sync_state) {
+		count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
+		ipvs->threads_mask = count - 1;
+	} else
+		count = ipvs->threads_mask + 1;
 
 	if (state == IP_VS_STATE_MASTER) {
-		if (ipvs->master_thread)
+		if (ipvs->ms)
 			return -EEXIST;
 
 		strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
 			sizeof(ipvs->master_mcast_ifn));
 		ipvs->master_syncid = syncid;
-		realtask = &ipvs->master_thread;
-		name = "ipvs_master:%d";
+		name = "ipvs-m:%d:%d";
 		threadfn = sync_thread_master;
-		sock = make_send_sock(net);
 	} else if (state == IP_VS_STATE_BACKUP) {
-		if (ipvs->backup_thread)
+		if (ipvs->backup_threads)
 			return -EEXIST;
 
 		strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
 			sizeof(ipvs->backup_mcast_ifn));
 		ipvs->backup_syncid = syncid;
-		realtask = &ipvs->backup_thread;
-		name = "ipvs_backup:%d";
+		name = "ipvs-b:%d:%d";
 		threadfn = sync_thread_backup;
-		sock = make_receive_sock(net);
 	} else {
 		return -EINVAL;
 	}
 
-	if (IS_ERR(sock)) {
-		result = PTR_ERR(sock);
-		goto out;
-	}
+	if (state == IP_VS_STATE_MASTER) {
+		struct ipvs_master_sync_state *ms;
 
+		ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
+		if (!ipvs->ms)
+			goto out;
+		ms = ipvs->ms;
+		for (id = 0; id < count; id++, ms++) {
+			INIT_LIST_HEAD(&ms->sync_queue);
+			ms->sync_queue_len = 0;
+			ms->sync_queue_delay = 0;
+			INIT_DELAYED_WORK(&ms->master_wakeup_work,
+					  master_wakeup_work_handler);
+			ms->ipvs = ipvs;
+		}
+	} else {
+		array = kzalloc(count * sizeof(struct task_struct *),
+				GFP_KERNEL);
+		if (!array)
+			goto out;
+	}
 	set_sync_mesg_maxlen(net, state);
-	if (state == IP_VS_STATE_BACKUP) {
-		buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
-		if (!buf)
+
+	tinfo = NULL;
+	for (id = 0; id < count; id++) {
+		if (state == IP_VS_STATE_MASTER)
+			sock = make_send_sock(net, id);
+		else
+			sock = make_receive_sock(net, id);
+		if (IS_ERR(sock)) {
+			result = PTR_ERR(sock);
+			goto outtinfo;
+		}
+		tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+		if (!tinfo)
 			goto outsocket;
-	}
+		tinfo->net = net;
+		tinfo->sock = sock;
+		if (state == IP_VS_STATE_BACKUP) {
+			tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen,
+					     GFP_KERNEL);
+			if (!tinfo->buf)
+				goto outtinfo;
+		}
+		tinfo->id = id;
 
-	tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
-	if (!tinfo)
-		goto outbuf;
-
-	tinfo->net = net;
-	tinfo->sock = sock;
-	tinfo->buf = buf;
-
-	task = kthread_run(threadfn, tinfo, name, ipvs->gen);
-	if (IS_ERR(task)) {
-		result = PTR_ERR(task);
-		goto outtinfo;
+		task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
+		if (IS_ERR(task)) {
+			result = PTR_ERR(task);
+			goto outtinfo;
+		}
+		tinfo = NULL;
+		if (state == IP_VS_STATE_MASTER)
+			ipvs->ms[id].master_thread = task;
+		else
+			array[id] = task;
 	}
 
 	/* mark as active */
-	*realtask = task;
+
+	if (state == IP_VS_STATE_BACKUP)
+		ipvs->backup_threads = array;
+	spin_lock_bh(&ipvs->sync_buff_lock);
 	ipvs->sync_state |= state;
+	spin_unlock_bh(&ipvs->sync_buff_lock);
 
 	/* increase the module use count */
 	ip_vs_use_count_inc();
 
 	return 0;
 
-outtinfo:
-	kfree(tinfo);
-outbuf:
-	kfree(buf);
 outsocket:
 	sk_release_kernel(sock->sk);
+
+outtinfo:
+	if (tinfo) {
+		sk_release_kernel(tinfo->sock->sk);
+		kfree(tinfo->buf);
+		kfree(tinfo);
+	}
+	count = id;
+	while (count-- > 0) {
+		if (state == IP_VS_STATE_MASTER)
+			kthread_stop(ipvs->ms[count].master_thread);
+		else
+			kthread_stop(array[count]);
+	}
+	kfree(array);
+
 out:
+	if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+		kfree(ipvs->ms);
+		ipvs->ms = NULL;
+	}
 	return result;
 }
 
@@ -1622,38 +1853,60 @@
 int stop_sync_thread(struct net *net, int state)
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct task_struct **array;
+	int id;
 	int retc = -EINVAL;
 
 	IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 
 	if (state == IP_VS_STATE_MASTER) {
-		if (!ipvs->master_thread)
+		if (!ipvs->ms)
 			return -ESRCH;
 
-		pr_info("stopping master sync thread %d ...\n",
-			task_pid_nr(ipvs->master_thread));
-
 		/*
 		 * The lock synchronizes with sb_queue_tail(), so that we don't
 		 * add sync buffers to the queue, when we are already in
 		 * progress of stopping the master sync daemon.
 		 */
 
-		spin_lock_bh(&ipvs->sync_lock);
+		spin_lock_bh(&ipvs->sync_buff_lock);
+		spin_lock(&ipvs->sync_lock);
 		ipvs->sync_state &= ~IP_VS_STATE_MASTER;
-		spin_unlock_bh(&ipvs->sync_lock);
-		retc = kthread_stop(ipvs->master_thread);
-		ipvs->master_thread = NULL;
+		spin_unlock(&ipvs->sync_lock);
+		spin_unlock_bh(&ipvs->sync_buff_lock);
+
+		retc = 0;
+		for (id = ipvs->threads_mask; id >= 0; id--) {
+			struct ipvs_master_sync_state *ms = &ipvs->ms[id];
+			int ret;
+
+			pr_info("stopping master sync thread %d ...\n",
+				task_pid_nr(ms->master_thread));
+			cancel_delayed_work_sync(&ms->master_wakeup_work);
+			ret = kthread_stop(ms->master_thread);
+			if (retc >= 0)
+				retc = ret;
+		}
+		kfree(ipvs->ms);
+		ipvs->ms = NULL;
 	} else if (state == IP_VS_STATE_BACKUP) {
-		if (!ipvs->backup_thread)
+		if (!ipvs->backup_threads)
 			return -ESRCH;
 
-		pr_info("stopping backup sync thread %d ...\n",
-			task_pid_nr(ipvs->backup_thread));
-
 		ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
-		retc = kthread_stop(ipvs->backup_thread);
-		ipvs->backup_thread = NULL;
+		array = ipvs->backup_threads;
+		retc = 0;
+		for (id = ipvs->threads_mask; id >= 0; id--) {
+			int ret;
+
+			pr_info("stopping backup sync thread %d ...\n",
+				task_pid_nr(array[id]));
+			ret = kthread_stop(array[id]);
+			if (retc >= 0)
+				retc = ret;
+		}
+		kfree(array);
+		ipvs->backup_threads = NULL;
 	}
 
 	/* decrease the module use count */
@@ -1670,13 +1923,8 @@
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	__mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
-	INIT_LIST_HEAD(&ipvs->sync_queue);
 	spin_lock_init(&ipvs->sync_lock);
 	spin_lock_init(&ipvs->sync_buff_lock);
-
-	ipvs->sync_mcast_addr.sin_family = AF_INET;
-	ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
-	ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
 	return 0;
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index fd0d4e0..231be7d 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -84,7 +84,7 @@
 	/*
 	 *    Allocate the mark variable for WRR scheduling
 	 */
-	mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC);
+	mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_KERNEL);
 	if (mark == NULL)
 		return -ENOMEM;
 
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index f4f8cda..d61e078 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -69,8 +69,8 @@
 
 	table[0].data = &net->ct.sysctl_acct;
 
-	net->ct.acct_sysctl_header = register_net_sysctl_table(net,
-			nf_net_netfilter_sysctl_path, table);
+	net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter",
+							 table);
 	if (!net->ct.acct_sysctl_header) {
 		printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n");
 		goto out_register;
diff --git a/net/netfilter/nf_conntrack_amanda.c b/net/netfilter/nf_conntrack_amanda.c
index 13fd2c5..f2de8c5 100644
--- a/net/netfilter/nf_conntrack_amanda.c
+++ b/net/netfilter/nf_conntrack_amanda.c
@@ -107,8 +107,7 @@
 	/* No data? */
 	dataoff = protoff + sizeof(struct udphdr);
 	if (dataoff >= skb->len) {
-		if (net_ratelimit())
-			printk(KERN_ERR "amanda_help: skblen = %u\n", skb->len);
+		net_err_ratelimited("amanda_help: skblen = %u\n", skb->len);
 		return NF_ACCEPT;
 	}
 
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 729f157..ac3af97 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -683,10 +683,7 @@
 	    unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
 		if (!early_drop(net, hash_bucket(hash, net))) {
 			atomic_dec(&net->ct.count);
-			if (net_ratelimit())
-				printk(KERN_WARNING
-				       "nf_conntrack: table full, dropping"
-				       " packet.\n");
+			net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
 			return ERR_PTR(-ENOMEM);
 		}
 	}
@@ -1152,8 +1149,9 @@
 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
 			       const struct nf_conntrack_tuple *tuple)
 {
-	NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
-	NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
+	if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
+	    nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -1335,7 +1333,6 @@
 	while (untrack_refs() > 0)
 		schedule();
 
-	nf_conntrack_helper_fini();
 	nf_conntrack_proto_fini();
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 	nf_ct_extend_unregister(&nf_ct_zone_extend);
@@ -1353,6 +1350,7 @@
 	}
 
 	nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
+	nf_conntrack_helper_fini(net);
 	nf_conntrack_timeout_fini(net);
 	nf_conntrack_ecache_fini(net);
 	nf_conntrack_tstamp_fini(net);
@@ -1503,10 +1501,6 @@
 	if (ret < 0)
 		goto err_proto;
 
-	ret = nf_conntrack_helper_init();
-	if (ret < 0)
-		goto err_helper;
-
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 	ret = nf_ct_extend_register(&nf_ct_zone_extend);
 	if (ret < 0)
@@ -1524,10 +1518,8 @@
 
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 err_extend:
-	nf_conntrack_helper_fini();
-#endif
-err_helper:
 	nf_conntrack_proto_fini();
+#endif
 err_proto:
 	return ret;
 }
@@ -1588,9 +1580,14 @@
 	ret = nf_conntrack_timeout_init(net);
 	if (ret < 0)
 		goto err_timeout;
+	ret = nf_conntrack_helper_init(net);
+	if (ret < 0)
+		goto err_helper;
 
 	return 0;
 
+err_helper:
+	nf_conntrack_timeout_fini(net);
 err_timeout:
 	nf_conntrack_ecache_fini(net);
 err_ecache:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5bd3047d..e7be79e 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -84,7 +84,7 @@
 int nf_conntrack_register_notifier(struct net *net,
 				   struct nf_ct_event_notifier *new)
 {
-	int ret = 0;
+	int ret;
 	struct nf_ct_event_notifier *notify;
 
 	mutex_lock(&nf_ct_ecache_mutex);
@@ -95,8 +95,7 @@
 		goto out_unlock;
 	}
 	rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
-	mutex_unlock(&nf_ct_ecache_mutex);
-	return ret;
+	ret = 0;
 
 out_unlock:
 	mutex_unlock(&nf_ct_ecache_mutex);
@@ -121,7 +120,7 @@
 int nf_ct_expect_register_notifier(struct net *net,
 				   struct nf_exp_event_notifier *new)
 {
-	int ret = 0;
+	int ret;
 	struct nf_exp_event_notifier *notify;
 
 	mutex_lock(&nf_ct_ecache_mutex);
@@ -132,8 +131,7 @@
 		goto out_unlock;
 	}
 	rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
-	mutex_unlock(&nf_ct_ecache_mutex);
-	return ret;
+	ret = 0;
 
 out_unlock:
 	mutex_unlock(&nf_ct_ecache_mutex);
@@ -199,8 +197,7 @@
 	table[1].data = &net->ct.sysctl_events_retry_timeout;
 
 	net->ct.event_sysctl_header =
-		register_net_sysctl_table(net,
-					  nf_net_netfilter_sysctl_path, table);
+		register_net_sysctl(net, "net/netfilter", table);
 	if (!net->ct.event_sysctl_header) {
 		printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
 		goto out_register;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4147ba3..45cf602 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -424,9 +424,7 @@
 	}
 
 	if (net->ct.expect_count >= nf_ct_expect_max) {
-		if (net_ratelimit())
-			printk(KERN_WARNING
-			       "nf_conntrack: expectation table full\n");
+		net_warn_ratelimited("nf_conntrack: expectation table full\n");
 		ret = -EMFILE;
 	}
 out:
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 722291f..46d69d7 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -605,8 +605,7 @@
 
       drop:
 	spin_unlock_bh(&nf_h323_lock);
-	if (net_ratelimit())
-		pr_info("nf_ct_h245: packet dropped\n");
+	net_info_ratelimited("nf_ct_h245: packet dropped\n");
 	return NF_DROP;
 }
 
@@ -1156,8 +1155,7 @@
 
       drop:
 	spin_unlock_bh(&nf_h323_lock);
-	if (net_ratelimit())
-		pr_info("nf_ct_q931: packet dropped\n");
+	net_info_ratelimited("nf_ct_q931: packet dropped\n");
 	return NF_DROP;
 }
 
@@ -1230,7 +1228,7 @@
 
 /****************************************************************************/
 static int set_expect_timeout(struct nf_conntrack_expect *exp,
-			      unsigned timeout)
+			      unsigned int timeout)
 {
 	if (!exp || !del_timer(&exp->timeout))
 		return 0;
@@ -1731,8 +1729,7 @@
 
       drop:
 	spin_unlock_bh(&nf_h323_lock);
-	if (net_ratelimit())
-		pr_info("nf_ct_ras: packet dropped\n");
+	net_info_ratelimited("nf_ct_ras: packet dropped\n");
 	return NF_DROP;
 }
 
@@ -1833,4 +1830,6 @@
 MODULE_DESCRIPTION("H.323 connection tracking helper");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("ip_conntrack_h323");
-MODULE_ALIAS_NFCT_HELPER("h323");
+MODULE_ALIAS_NFCT_HELPER("RAS");
+MODULE_ALIAS_NFCT_HELPER("Q.931");
+MODULE_ALIAS_NFCT_HELPER("H.245");
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 436b7cb..4fa2ff9 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -34,6 +34,67 @@
 static unsigned int nf_ct_helper_hsize __read_mostly;
 static unsigned int nf_ct_helper_count __read_mostly;
 
+static bool nf_ct_auto_assign_helper __read_mostly = true;
+module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
+MODULE_PARM_DESC(nf_conntrack_helper,
+		 "Enable automatic conntrack helper assignment (default 1)");
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table helper_sysctl_table[] = {
+	{
+		.procname	= "nf_conntrack_helper",
+		.data		= &init_net.ct.sysctl_auto_assign_helper,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{}
+};
+
+static int nf_conntrack_helper_init_sysctl(struct net *net)
+{
+	struct ctl_table *table;
+
+	table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
+			GFP_KERNEL);
+	if (!table)
+		goto out;
+
+	table[0].data = &net->ct.sysctl_auto_assign_helper;
+
+	net->ct.helper_sysctl_header =
+		register_net_sysctl(net, "net/netfilter", table);
+
+	if (!net->ct.helper_sysctl_header) {
+		pr_err("nf_conntrack_helper: can't register to sysctl.\n");
+		goto out_register;
+	}
+	return 0;
+
+out_register:
+	kfree(table);
+out:
+	return -ENOMEM;
+}
+
+static void nf_conntrack_helper_fini_sysctl(struct net *net)
+{
+	struct ctl_table *table;
+
+	table = net->ct.helper_sysctl_header->ctl_table_arg;
+	unregister_net_sysctl_table(net->ct.helper_sysctl_header);
+	kfree(table);
+}
+#else
+static int nf_conntrack_helper_init_sysctl(struct net *net)
+{
+	return 0;
+}
+
+static void nf_conntrack_helper_fini_sysctl(struct net *net)
+{
+}
+#endif /* CONFIG_SYSCTL */
 
 /* Stupid hash, but collision free for the default registrations of the
  * helpers currently in the kernel. */
@@ -118,17 +179,38 @@
 {
 	struct nf_conntrack_helper *helper = NULL;
 	struct nf_conn_help *help;
+	struct net *net = nf_ct_net(ct);
 	int ret = 0;
 
+	/* We already got a helper explicitly attached. The function
+	 * nf_conntrack_alter_reply - in case NAT is in use - asks for looking
+	 * the helper up again. Since now the user is in full control of
+	 * making consistent helper configurations, skip this automatic
+	 * re-lookup, otherwise we'll lose the helper.
+	 */
+	if (test_bit(IPS_HELPER_BIT, &ct->status))
+		return 0;
+
 	if (tmpl != NULL) {
 		help = nfct_help(tmpl);
-		if (help != NULL)
+		if (help != NULL) {
 			helper = help->helper;
+			set_bit(IPS_HELPER_BIT, &ct->status);
+		}
 	}
 
 	help = nfct_help(ct);
-	if (helper == NULL)
+	if (net->ct.sysctl_auto_assign_helper && helper == NULL) {
 		helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+		if (unlikely(!net->ct.auto_assign_helper_warned && helper)) {
+			pr_info("nf_conntrack: automatic helper "
+				"assignment is deprecated and it will "
+				"be removed soon. Use the iptables CT target "
+				"to attach helpers instead.\n");
+			net->ct.auto_assign_helper_warned = true;
+		}
+	}
+
 	if (helper == NULL) {
 		if (help)
 			RCU_INIT_POINTER(help->helper, NULL);
@@ -315,28 +397,44 @@
 	.id	= NF_CT_EXT_HELPER,
 };
 
-int nf_conntrack_helper_init(void)
+int nf_conntrack_helper_init(struct net *net)
 {
 	int err;
 
-	nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
-	nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
-	if (!nf_ct_helper_hash)
-		return -ENOMEM;
+	net->ct.auto_assign_helper_warned = false;
+	net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
 
-	err = nf_ct_extend_register(&helper_extend);
+	if (net_eq(net, &init_net)) {
+		nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
+		nf_ct_helper_hash =
+			nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
+		if (!nf_ct_helper_hash)
+			return -ENOMEM;
+
+		err = nf_ct_extend_register(&helper_extend);
+		if (err < 0)
+			goto err1;
+	}
+
+	err = nf_conntrack_helper_init_sysctl(net);
 	if (err < 0)
-		goto err1;
+		goto out_sysctl;
 
 	return 0;
 
+out_sysctl:
+	if (net_eq(net, &init_net))
+		nf_ct_extend_unregister(&helper_extend);
 err1:
 	nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
 	return err;
 }
 
-void nf_conntrack_helper_fini(void)
+void nf_conntrack_helper_fini(struct net *net)
 {
-	nf_ct_extend_unregister(&helper_extend);
-	nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
+	nf_conntrack_helper_fini_sysctl(net);
+	if (net_eq(net, &init_net)) {
+		nf_ct_extend_unregister(&helper_extend);
+		nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
+	}
 }
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 4f9390b..81366c1 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -185,11 +185,9 @@
 			tuple = &ct->tuplehash[dir].tuple;
 			if (tuple->src.u3.ip != dcc_ip &&
 			    tuple->dst.u3.ip != dcc_ip) {
-				if (net_ratelimit())
-					printk(KERN_WARNING
-						"Forged DCC command from %pI4: %pI4:%u\n",
-						&tuple->src.u3.ip,
-						&dcc_ip, dcc_port);
+				net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
+						     &tuple->src.u3.ip,
+						     &dcc_ip, dcc_port);
 				continue;
 			}
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ca7e835..6f4b00a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -66,7 +66,8 @@
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
-	NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum);
+	if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
+		goto nla_put_failure;
 
 	if (likely(l4proto->tuple_to_nlattr))
 		ret = l4proto->tuple_to_nlattr(skb, tuple);
@@ -126,7 +127,8 @@
 static inline int
 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status));
+	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -141,7 +143,8 @@
 	if (timeout < 0)
 		timeout = 0;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout));
+	if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -190,7 +193,8 @@
 	nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
 	if (!nest_helper)
 		goto nla_put_failure;
-	NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name);
+	if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
+		goto nla_put_failure;
 
 	if (helper->to_nlattr)
 		helper->to_nlattr(skb, ct);
@@ -214,8 +218,9 @@
 	if (!nest_count)
 		goto nla_put_failure;
 
-	NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts));
-	NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes));
+	if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
+	    nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest_count);
 
@@ -260,11 +265,10 @@
 	if (!nest_count)
 		goto nla_put_failure;
 
-	NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
-	if (tstamp->stop != 0) {
-		NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
-			     cpu_to_be64(tstamp->stop));
-	}
+	if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
+	    (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
+					       cpu_to_be64(tstamp->stop))))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_count);
 
 	return 0;
@@ -277,7 +281,8 @@
 static inline int
 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark));
+	if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -304,7 +309,8 @@
 	if (!nest_secctx)
 		goto nla_put_failure;
 
-	NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx);
+	if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_secctx);
 
 	ret = 0;
@@ -349,12 +355,13 @@
 	if (!nest_parms)
 		goto nla_put_failure;
 
-	NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS,
-		     htonl(natseq->correction_pos));
-	NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
-		     htonl(natseq->offset_before));
-	NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
-		     htonl(natseq->offset_after));
+	if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
+			 htonl(natseq->correction_pos)) ||
+	    nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
+			 htonl(natseq->offset_before)) ||
+	    nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
+			 htonl(natseq->offset_after)))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest_parms);
 
@@ -390,7 +397,8 @@
 static inline int
 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct));
+	if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -400,7 +408,8 @@
 static inline int
 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
 {
-	NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)));
+	if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -440,8 +449,9 @@
 		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct))
-		NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+	if (nf_ct_zone(ct) &&
+	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+		goto nla_put_failure;
 
 	if (ctnetlink_dump_status(skb, ct) < 0 ||
 	    ctnetlink_dump_timeout(skb, ct) < 0 ||
@@ -617,8 +627,9 @@
 		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct))
-		NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+	if (nf_ct_zone(ct) &&
+	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+		goto nla_put_failure;
 
 	if (ctnetlink_dump_id(skb, ct) < 0)
 		goto nla_put_failure;
@@ -1705,7 +1716,8 @@
 		if (!nest_parms)
 			goto nla_put_failure;
 
-		NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir));
+		if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
+			goto nla_put_failure;
 
 		nat_tuple.src.l3num = nf_ct_l3num(master);
 		nat_tuple.src.u3.ip = exp->saved_ip;
@@ -1718,21 +1730,24 @@
 	        nla_nest_end(skb, nest_parms);
 	}
 #endif
-	NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
-	NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
-	NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
-	NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class));
+	if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
+	    nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+	    nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
+	    nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
+		goto nla_put_failure;
 	help = nfct_help(master);
 	if (help) {
 		struct nf_conntrack_helper *helper;
 
 		helper = rcu_dereference(help->helper);
-		if (helper)
-			NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
+		if (helper &&
+		    nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
+			goto nla_put_failure;
 	}
 	expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
-	if (expfn != NULL)
-		NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name);
+	if (expfn != NULL &&
+	    nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
+		goto nla_put_failure;
 
 	return 0;
 
@@ -2065,7 +2080,15 @@
 ctnetlink_change_expect(struct nf_conntrack_expect *x,
 			const struct nlattr * const cda[])
 {
-	return -EOPNOTSUPP;
+	if (cda[CTA_EXPECT_TIMEOUT]) {
+		if (!del_timer(&x->timeout))
+			return -ETIME;
+
+		x->timeout.expires = jiffies +
+			ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
+		add_timer(&x->timeout);
+	}
+	return 0;
 }
 
 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index be3da2c..8b631b0 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,11 +36,11 @@
 
 #ifdef CONFIG_SYSCTL
 static int
-nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path,
+nf_ct_register_sysctl(struct ctl_table_header **header, const char *path,
 		      struct ctl_table *table, unsigned int *users)
 {
 	if (*header == NULL) {
-		*header = register_sysctl_paths(path, table);
+		*header = register_net_sysctl(&init_net, path, table);
 		if (*header == NULL)
 			return -ENOMEM;
 	}
@@ -56,7 +56,7 @@
 	if (users != NULL && --*users > 0)
 		return;
 
-	unregister_sysctl_table(*header);
+	unregister_net_sysctl_table(*header);
 	*header = NULL;
 }
 #endif
@@ -250,7 +250,7 @@
 #ifdef CONFIG_SYSCTL
 	if (l4proto->ctl_table != NULL) {
 		err = nf_ct_register_sysctl(l4proto->ctl_table_header,
-					    nf_net_netfilter_sysctl_path,
+					    "net/netfilter",
 					    l4proto->ctl_table,
 					    l4proto->ctl_table_users);
 		if (err < 0)
@@ -259,7 +259,7 @@
 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
 	if (l4proto->ctl_compat_table != NULL) {
 		err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header,
-					    nf_net_ipv4_netfilter_sysctl_path,
+					    "net/ipv4/netfilter",
 					    l4proto->ctl_compat_table, NULL);
 		if (err == 0)
 			goto out;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 24fdce2..ef706a4 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -643,11 +643,12 @@
 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
-	NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
-	NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE,
-		   ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]);
-	NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
-		     cpu_to_be64(ct->proto.dccp.handshake_seq));
+	if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
+	    nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
+		       ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
+	    nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
+			 cpu_to_be64(ct->proto.dccp.handshake_seq)))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 	spin_unlock_bh(&ct->lock);
 	return 0;
@@ -739,9 +740,10 @@
         const unsigned int *timeouts = data;
 	int i;
 
-	for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++)
-		NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
-
+	for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
+		if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
+			goto nla_put_failure;
+	}
 	return 0;
 
 nla_put_failure:
@@ -908,8 +910,8 @@
 	dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
 	dn->sysctl_table[7].data = &dn->dccp_loose;
 
-	dn->sysctl_header = register_net_sysctl_table(net,
-			nf_net_netfilter_sysctl_path, dn->sysctl_table);
+	dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
+						dn->sysctl_table);
 	if (!dn->sysctl_header) {
 		kfree(dn->sysctl_table);
 		return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 835e24c..d8923d5 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -90,7 +90,8 @@
 {
 	const unsigned int *timeout = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)))
+		goto nla_put_failure;
 
 	return 0;
 
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 659648c..4bf6b4e 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -321,10 +321,11 @@
 {
 	const unsigned int *timeouts = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
-			htonl(timeouts[GRE_CT_UNREPLIED] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED,
-			htonl(timeouts[GRE_CT_REPLIED] / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
+			 htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
+			 htonl(timeouts[GRE_CT_REPLIED] / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 72b5088..996db2f 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -482,15 +482,12 @@
 	if (!nest_parms)
 		goto nla_put_failure;
 
-	NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state);
-
-	NLA_PUT_BE32(skb,
-		     CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
-		     ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]);
-
-	NLA_PUT_BE32(skb,
-		     CTA_PROTOINFO_SCTP_VTAG_REPLY,
-		     ct->proto.sctp.vtag[IP_CT_DIR_REPLY]);
+	if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) ||
+	    nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
+			 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) ||
+	    nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
+			 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]))
+		goto nla_put_failure;
 
 	spin_unlock_bh(&ct->lock);
 
@@ -578,9 +575,10 @@
         const unsigned int *timeouts = data;
 	int i;
 
-	for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++)
-	        NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
-
+	for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
+	        if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
+			goto nla_put_failure;
+	}
         return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 0d07a1d..21ff1a9 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -952,7 +952,8 @@
 		spin_unlock_bh(&ct->lock);
 		if (LOG_INVALID(net, IPPROTO_TCP))
 			nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
-				  "nf_ct_tcp: invalid packet ignored ");
+				  "nf_ct_tcp: invalid packet ignored in "
+				  "state %s ", tcp_conntrack_names[old_state]);
 		return NF_ACCEPT;
 	case TCP_CONNTRACK_MAX:
 		/* Invalid packet */
@@ -1147,21 +1148,22 @@
 	if (!nest_parms)
 		goto nla_put_failure;
 
-	NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state);
-
-	NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
-		   ct->proto.tcp.seen[0].td_scale);
-
-	NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
-		   ct->proto.tcp.seen[1].td_scale);
+	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
+	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
+		       ct->proto.tcp.seen[0].td_scale) ||
+	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
+		       ct->proto.tcp.seen[1].td_scale))
+		goto nla_put_failure;
 
 	tmp.flags = ct->proto.tcp.seen[0].flags;
-	NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
-		sizeof(struct nf_ct_tcp_flags), &tmp);
+	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
+		    sizeof(struct nf_ct_tcp_flags), &tmp))
+		goto nla_put_failure;
 
 	tmp.flags = ct->proto.tcp.seen[1].flags;
-	NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
-		sizeof(struct nf_ct_tcp_flags), &tmp);
+	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
+		    sizeof(struct nf_ct_tcp_flags), &tmp))
+		goto nla_put_failure;
 	spin_unlock_bh(&ct->lock);
 
 	nla_nest_end(skb, nest_parms);
@@ -1310,28 +1312,29 @@
 {
 	const unsigned int *timeouts = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
-			htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
-			htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
-			htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
-			htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
-			htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
-			htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
-			htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE,
-			htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
-			htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS,
-			htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK,
-			htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
+			htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
+			 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
+			 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
+			 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
+			 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
+			 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
+			 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
+			 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
+			 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
+			 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
+			 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index a9073dc..7259a6b 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -181,10 +181,11 @@
 {
 	const unsigned int *timeouts = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
-			htonl(timeouts[UDP_CT_UNREPLIED] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED,
-			htonl(timeouts[UDP_CT_REPLIED] / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
+			 htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED,
+			 htonl(timeouts[UDP_CT_REPLIED] / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index e060639..4d60a53 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -185,10 +185,11 @@
 {
 	const unsigned int *timeouts = data;
 
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
-			htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ));
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
-			htonl(timeouts[UDPLITE_CT_REPLIED] / HZ));
+	if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
+			 htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
+			 htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 885f5ab..9b39432 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -468,18 +468,13 @@
 	{ }
 };
 
-static struct ctl_path nf_ct_path[] = {
-	{ .procname = "net", },
-	{ }
-};
-
 static int nf_conntrack_standalone_init_sysctl(struct net *net)
 {
 	struct ctl_table *table;
 
 	if (net_eq(net, &init_net)) {
 		nf_ct_netfilter_header =
-		       register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
+		       register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
 		if (!nf_ct_netfilter_header)
 			goto out;
 	}
@@ -494,8 +489,7 @@
 	table[3].data = &net->ct.sysctl_checksum;
 	table[4].data = &net->ct.sysctl_log_invalid;
 
-	net->ct.sysctl_header = register_net_sysctl_table(net,
-					nf_net_netfilter_sysctl_path, table);
+	net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
 	if (!net->ct.sysctl_header)
 		goto out_unregister_netfilter;
 
@@ -505,7 +499,7 @@
 	kfree(table);
 out_kmemdup:
 	if (net_eq(net, &init_net))
-		unregister_sysctl_table(nf_ct_netfilter_header);
+		unregister_net_sysctl_table(nf_ct_netfilter_header);
 out:
 	printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
 	return -ENOMEM;
@@ -516,7 +510,7 @@
 	struct ctl_table *table;
 
 	if (net_eq(net, &init_net))
-		unregister_sysctl_table(nf_ct_netfilter_header);
+		unregister_net_sysctl_table(nf_ct_netfilter_header);
 	table = net->ct.sysctl_header->ctl_table_arg;
 	unregister_net_sysctl_table(net->ct.sysctl_header);
 	kfree(table);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index e8d27af..dbb364f 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -51,8 +51,8 @@
 
 	table[0].data = &net->ct.sysctl_tstamp;
 
-	net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
-			nf_net_netfilter_sysctl_path, table);
+	net->ct.tstamp_sysctl_header = register_net_sysctl(net,	"net/netfilter",
+							   table);
 	if (!net->ct.tstamp_sysctl_header) {
 		printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
 		goto out_register;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 957374a..703fb26 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -214,13 +214,6 @@
 #endif /* PROC_FS */
 
 #ifdef CONFIG_SYSCTL
-static struct ctl_path nf_log_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "netfilter", },
-	{ .procname = "nf_log", },
-	{ }
-};
-
 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
 static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
 static struct ctl_table_header *nf_log_dir_header;
@@ -283,7 +276,7 @@
 		nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i;
 	}
 
-	nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path,
+	nf_log_dir_header = register_net_sysctl(&init_net, "net/netfilter/nf_log",
 				       nf_log_sysctl_table);
 	if (!nf_log_dir_header)
 		return -ENOMEM;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e6ddde1..3e797d1 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -103,7 +103,7 @@
 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
 
 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
-		   unsigned group, int echo, gfp_t flags)
+		   unsigned int group, int echo, gfp_t flags)
 {
 	return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
 }
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index d98c868..b2e7310 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -109,7 +109,8 @@
 	nfmsg->version = NFNETLINK_V0;
 	nfmsg->res_id = 0;
 
-	NLA_PUT_STRING(skb, NFACCT_NAME, acct->name);
+	if (nla_put_string(skb, NFACCT_NAME, acct->name))
+		goto nla_put_failure;
 
 	if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
 		pkts = atomic64_xchg(&acct->pkts, 0);
@@ -118,9 +119,10 @@
 		pkts = atomic64_read(&acct->pkts);
 		bytes = atomic64_read(&acct->bytes);
 	}
-	NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts));
-	NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes));
-	NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)));
+	if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
+	    nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
+	    nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
+		goto nla_put_failure;
 
 	nlmsg_end(skb, nlh);
 	return skb->len;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 2b9e79f..3e65528 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -170,11 +170,12 @@
 	nfmsg->version = NFNETLINK_V0;
 	nfmsg->res_id = 0;
 
-	NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
-	NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
-	NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto);
-	NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
-			htonl(atomic_read(&timeout->refcnt)));
+	if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
+	    nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
+	    nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
+	    nla_put_be32(skb, CTA_TIMEOUT_USE,
+			 htonl(atomic_read(&timeout->refcnt))))
+		goto nla_put_failure;
 
 	if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
 		struct nlattr *nest_parms;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 66b2c54..3c3cfc0 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -391,67 +391,78 @@
 	pmsg.hw_protocol	= skb->protocol;
 	pmsg.hook		= hooknum;
 
-	NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);
+	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
+		goto nla_put_failure;
 
-	if (prefix)
-		NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);
+	if (prefix &&
+	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
+		goto nla_put_failure;
 
 	if (indev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-			     htonl(indev->ifindex));
+		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+				 htonl(indev->ifindex)))
+			goto nla_put_failure;
 #else
 		if (pf == PF_BRIDGE) {
 			/* Case 1: outdev is physical input device, we need to
 			 * look for bridge group (when called from
 			 * netfilter_bridge) */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
-				     htonl(indev->ifindex));
+			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+					 htonl(indev->ifindex)) ||
 			/* this is the bridge group "brX" */
 			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-				     htonl(br_port_get_rcu(indev)->br->dev->ifindex));
+			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+				goto nla_put_failure;
 		} else {
 			/* Case 2: indev is bridge group, we need to look for
 			 * physical device (when called from ipv4) */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
-				     htonl(indev->ifindex));
-			if (skb->nf_bridge && skb->nf_bridge->physindev)
-				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
-					     htonl(skb->nf_bridge->physindev->ifindex));
+			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
+					 htonl(indev->ifindex)))
+				goto nla_put_failure;
+			if (skb->nf_bridge && skb->nf_bridge->physindev &&
+			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
+					 htonl(skb->nf_bridge->physindev->ifindex)))
+				goto nla_put_failure;
 		}
 #endif
 	}
 
 	if (outdev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-			     htonl(outdev->ifindex));
+		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+				 htonl(outdev->ifindex)))
+			goto nla_put_failure;
 #else
 		if (pf == PF_BRIDGE) {
 			/* Case 1: outdev is physical output device, we need to
 			 * look for bridge group (when called from
 			 * netfilter_bridge) */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
-				     htonl(outdev->ifindex));
+			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
+					 htonl(outdev->ifindex)) ||
 			/* this is the bridge group "brX" */
 			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-				     htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
+			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
+				goto nla_put_failure;
 		} else {
 			/* Case 2: indev is a bridge group, we need to look
 			 * for physical device (when called from ipv4) */
-			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
-				     htonl(outdev->ifindex));
-			if (skb->nf_bridge && skb->nf_bridge->physoutdev)
-				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
-					     htonl(skb->nf_bridge->physoutdev->ifindex));
+			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
+					 htonl(outdev->ifindex)))
+				goto nla_put_failure;
+			if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
+			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
+					 htonl(skb->nf_bridge->physoutdev->ifindex)))
+				goto nla_put_failure;
 		}
 #endif
 	}
 
-	if (skb->mark)
-		NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
+	if (skb->mark &&
+	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
+		goto nla_put_failure;
 
 	if (indev && skb->dev &&
 	    skb->mac_header != skb->network_header) {
@@ -459,16 +470,18 @@
 		int len = dev_parse_header(skb, phw.hw_addr);
 		if (len > 0) {
 			phw.hw_addrlen = htons(len);
-			NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
+			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
+				goto nla_put_failure;
 		}
 	}
 
 	if (indev && skb_mac_header_was_set(skb)) {
-		NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
-		NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
-			     htons(skb->dev->hard_header_len));
-		NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
-			skb_mac_header(skb));
+		if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+		    nla_put_be16(inst->skb, NFULA_HWLEN,
+				 htons(skb->dev->hard_header_len)) ||
+		    nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
+			    skb_mac_header(skb)))
+			goto nla_put_failure;
 	}
 
 	if (skb->tstamp.tv64) {
@@ -477,7 +490,8 @@
 		ts.sec = cpu_to_be64(tv.tv_sec);
 		ts.usec = cpu_to_be64(tv.tv_usec);
 
-		NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
+		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
+			goto nla_put_failure;
 	}
 
 	/* UID */
@@ -487,22 +501,24 @@
 			struct file *file = skb->sk->sk_socket->file;
 			__be32 uid = htonl(file->f_cred->fsuid);
 			__be32 gid = htonl(file->f_cred->fsgid);
-			/* need to unlock here since NLA_PUT may goto */
 			read_unlock_bh(&skb->sk->sk_callback_lock);
-			NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
-			NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
+			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
+			    nla_put_be32(inst->skb, NFULA_GID, gid))
+				goto nla_put_failure;
 		} else
 			read_unlock_bh(&skb->sk->sk_callback_lock);
 	}
 
 	/* local sequence number */
-	if (inst->flags & NFULNL_CFG_F_SEQ)
-		NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));
+	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
+	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
+		goto nla_put_failure;
 
 	/* global sequence number */
-	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
-		NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
-			     htonl(atomic_inc_return(&global_seq)));
+	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
+	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
+			 htonl(atomic_inc_return(&global_seq))))
+		goto nla_put_failure;
 
 	if (data_len) {
 		struct nlattr *nla;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a80b0cb..4162437 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -288,58 +288,67 @@
 	indev = entry->indev;
 	if (indev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-		NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex));
+		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
+			goto nla_put_failure;
 #else
 		if (entry->pf == PF_BRIDGE) {
 			/* Case 1: indev is physical input device, we need to
 			 * look for bridge group (when called from
 			 * netfilter_bridge) */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
-				     htonl(indev->ifindex));
+			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
+					 htonl(indev->ifindex)) ||
 			/* this is the bridge group "brX" */
 			/* rcu_read_lock()ed by __nf_queue */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
-				     htonl(br_port_get_rcu(indev)->br->dev->ifindex));
+			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
+					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
+				goto nla_put_failure;
 		} else {
 			/* Case 2: indev is bridge group, we need to look for
 			 * physical device (when called from ipv4) */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
-				     htonl(indev->ifindex));
-			if (entskb->nf_bridge && entskb->nf_bridge->physindev)
-				NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
-					     htonl(entskb->nf_bridge->physindev->ifindex));
+			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
+					 htonl(indev->ifindex)))
+				goto nla_put_failure;
+			if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
+			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
+					 htonl(entskb->nf_bridge->physindev->ifindex)))
+				goto nla_put_failure;
 		}
 #endif
 	}
 
 	if (outdev) {
 #ifndef CONFIG_BRIDGE_NETFILTER
-		NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex));
+		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
+			goto nla_put_failure;
 #else
 		if (entry->pf == PF_BRIDGE) {
 			/* Case 1: outdev is physical output device, we need to
 			 * look for bridge group (when called from
 			 * netfilter_bridge) */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
-				     htonl(outdev->ifindex));
+			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
+					 htonl(outdev->ifindex)) ||
 			/* this is the bridge group "brX" */
 			/* rcu_read_lock()ed by __nf_queue */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
-				     htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
+			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
+					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
+				goto nla_put_failure;
 		} else {
 			/* Case 2: outdev is bridge group, we need to look for
 			 * physical output device (when called from ipv4) */
-			NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
-				     htonl(outdev->ifindex));
-			if (entskb->nf_bridge && entskb->nf_bridge->physoutdev)
-				NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
-					     htonl(entskb->nf_bridge->physoutdev->ifindex));
+			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
+					 htonl(outdev->ifindex)))
+				goto nla_put_failure;
+			if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
+			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
+					 htonl(entskb->nf_bridge->physoutdev->ifindex)))
+				goto nla_put_failure;
 		}
 #endif
 	}
 
-	if (entskb->mark)
-		NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
+	if (entskb->mark &&
+	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
+		goto nla_put_failure;
 
 	if (indev && entskb->dev &&
 	    entskb->mac_header != entskb->network_header) {
@@ -347,7 +356,8 @@
 		int len = dev_parse_header(entskb, phw.hw_addr);
 		if (len) {
 			phw.hw_addrlen = htons(len);
-			NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
+			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
+				goto nla_put_failure;
 		}
 	}
 
@@ -357,7 +367,8 @@
 		ts.sec = cpu_to_be64(tv.tv_sec);
 		ts.usec = cpu_to_be64(tv.tv_usec);
 
-		NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
+		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
+			goto nla_put_failure;
 	}
 
 	if (data_len) {
@@ -384,8 +395,7 @@
 nla_put_failure:
 	if (skb)
 		kfree_skb(skb);
-	if (net_ratelimit())
-		printk(KERN_ERR "nf_queue: error creating packet message\n");
+	net_err_ratelimited("nf_queue: error creating packet message\n");
 	return NULL;
 }
 
@@ -422,10 +432,8 @@
 	}
 	if (queue->queue_total >= queue->queue_maxlen) {
 		queue->queue_dropped++;
-		if (net_ratelimit())
-			  printk(KERN_WARNING "nf_queue: full at %d entries, "
-				 "dropping packets(s).\n",
-				 queue->queue_total);
+		net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
+				     queue->queue_total);
 		goto err_out_free_nskb;
 	}
 	entry->id = ++queue->id_sequence;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 59530e9..a51de9b 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -17,7 +17,6 @@
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_timeout.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 
@@ -227,7 +226,7 @@
 	}
 
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-	if (info->timeout) {
+	if (info->timeout[0]) {
 		typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
 		struct nf_conn_timeout *timeout_ext;
 
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
new file mode 100644
index 0000000..0a96a43
--- /dev/null
+++ b/net/netfilter/xt_HMARK.c
@@ -0,0 +1,362 @@
+/*
+ * xt_HMARK - Netfilter module to set mark by means of hashing
+ *
+ * (C) 2012 by Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_HMARK.h>
+
+#include <net/ip.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+#include <net/ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hans Schillstrom <hans.schillstrom@ericsson.com>");
+MODULE_DESCRIPTION("Xtables: packet marking using hash calculation");
+MODULE_ALIAS("ipt_HMARK");
+MODULE_ALIAS("ip6t_HMARK");
+
+struct hmark_tuple {
+	u32			src;
+	u32			dst;
+	union hmark_ports	uports;
+	uint8_t			proto;
+};
+
+static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
+{
+	return (addr32[0] & mask[0]) ^
+	       (addr32[1] & mask[1]) ^
+	       (addr32[2] & mask[2]) ^
+	       (addr32[3] & mask[3]);
+}
+
+static inline u32
+hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
+{
+	switch (l3num) {
+	case AF_INET:
+		return *addr32 & *mask;
+	case AF_INET6:
+		return hmark_addr6_mask(addr32, mask);
+	}
+	return 0;
+}
+
+static int
+hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
+		    const struct xt_hmark_info *info)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+	struct nf_conntrack_tuple *otuple;
+	struct nf_conntrack_tuple *rtuple;
+
+	if (ct == NULL || nf_ct_is_untracked(ct))
+		return -1;
+
+	otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+	t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all,
+				 info->src_mask.all);
+	t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all,
+				 info->dst_mask.all);
+
+	if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+		return 0;
+
+	t->proto = nf_ct_protonum(ct);
+	if (t->proto != IPPROTO_ICMP) {
+		t->uports.p16.src = otuple->src.u.all;
+		t->uports.p16.dst = rtuple->src.u.all;
+		t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
+				info->port_set.v32;
+		if (t->uports.p16.dst < t->uports.p16.src)
+			swap(t->uports.p16.dst, t->uports.p16.src);
+	}
+
+	return 0;
+#else
+	return -1;
+#endif
+}
+
+static inline u32
+hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
+{
+	u32 hash;
+
+	if (t->dst < t->src)
+		swap(t->src, t->dst);
+
+	hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd);
+	hash = hash ^ (t->proto & info->proto_mask);
+
+	return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
+}
+
+static void
+hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
+		      struct hmark_tuple *t, const struct xt_hmark_info *info)
+{
+	int protoff;
+
+	protoff = proto_ports_offset(t->proto);
+	if (protoff < 0)
+		return;
+
+	nhoff += protoff;
+	if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
+		return;
+
+	t->uports.v32 = (t->uports.v32 & info->port_mask.v32) |
+			info->port_set.v32;
+
+	if (t->uports.p16.dst < t->uports.p16.src)
+		swap(t->uports.p16.dst, t->uports.p16.src);
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static int get_inner6_hdr(const struct sk_buff *skb, int *offset)
+{
+	struct icmp6hdr *icmp6h, _ih6;
+
+	icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6);
+	if (icmp6h == NULL)
+		return 0;
+
+	if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) {
+		*offset += sizeof(struct icmp6hdr);
+		return 1;
+	}
+	return 0;
+}
+
+static int
+hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
+			  const struct xt_hmark_info *info)
+{
+	struct ipv6hdr *ip6, _ip6;
+	int flag = IP6T_FH_F_AUTH;
+	unsigned int nhoff = 0;
+	u16 fragoff = 0;
+	int nexthdr;
+
+	ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb));
+	nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
+	if (nexthdr < 0)
+		return 0;
+	/* No need to check for icmp errors on fragments */
+	if ((flag & IP6T_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6))
+		goto noicmp;
+	/* Use inner header in case of ICMP errors */
+	if (get_inner6_hdr(skb, &nhoff)) {
+		ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6);
+		if (ip6 == NULL)
+			return -1;
+		/* If AH present, use SPI like in ESP. */
+		flag = IP6T_FH_F_AUTH;
+		nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag);
+		if (nexthdr < 0)
+			return -1;
+	}
+noicmp:
+	t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all);
+	t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all);
+
+	if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+		return 0;
+
+	t->proto = nexthdr;
+	if (t->proto == IPPROTO_ICMPV6)
+		return 0;
+
+	if (flag & IP6T_FH_F_FRAG)
+		return 0;
+
+	hmark_set_tuple_ports(skb, nhoff, t, info);
+	return 0;
+}
+
+static unsigned int
+hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_hmark_info *info = par->targinfo;
+	struct hmark_tuple t;
+
+	memset(&t, 0, sizeof(struct hmark_tuple));
+
+	if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
+		if (hmark_ct_set_htuple(skb, &t, info) < 0)
+			return XT_CONTINUE;
+	} else {
+		if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0)
+			return XT_CONTINUE;
+	}
+
+	skb->mark = hmark_hash(&t, info);
+	return XT_CONTINUE;
+}
+#endif
+
+static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff)
+{
+	const struct icmphdr *icmph;
+	struct icmphdr _ih;
+
+	/* Not enough header? */
+	icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih);
+	if (icmph == NULL || icmph->type > NR_ICMP_TYPES)
+		return 0;
+
+	/* Error message? */
+	if (icmph->type != ICMP_DEST_UNREACH &&
+	    icmph->type != ICMP_SOURCE_QUENCH &&
+	    icmph->type != ICMP_TIME_EXCEEDED &&
+	    icmph->type != ICMP_PARAMETERPROB &&
+	    icmph->type != ICMP_REDIRECT)
+		return 0;
+
+	*nhoff += iphsz + sizeof(_ih);
+	return 1;
+}
+
+static int
+hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
+			  const struct xt_hmark_info *info)
+{
+	struct iphdr *ip, _ip;
+	int nhoff = skb_network_offset(skb);
+
+	ip = (struct iphdr *) (skb->data + nhoff);
+	if (ip->protocol == IPPROTO_ICMP) {
+		/* Use inner header in case of ICMP errors */
+		if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) {
+			ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip);
+			if (ip == NULL)
+				return -1;
+		}
+	}
+
+	t->src = (__force u32) ip->saddr;
+	t->dst = (__force u32) ip->daddr;
+
+	t->src &= info->src_mask.ip;
+	t->dst &= info->dst_mask.ip;
+
+	if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
+		return 0;
+
+	t->proto = ip->protocol;
+
+	/* ICMP has no ports, skip */
+	if (t->proto == IPPROTO_ICMP)
+		return 0;
+
+	/* follow-up fragments don't contain ports, skip all fragments */
+	if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+		return 0;
+
+	hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
+
+	return 0;
+}
+
+static unsigned int
+hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_hmark_info *info = par->targinfo;
+	struct hmark_tuple t;
+
+	memset(&t, 0, sizeof(struct hmark_tuple));
+
+	if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) {
+		if (hmark_ct_set_htuple(skb, &t, info) < 0)
+			return XT_CONTINUE;
+	} else {
+		if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0)
+			return XT_CONTINUE;
+	}
+
+	skb->mark = hmark_hash(&t, info);
+	return XT_CONTINUE;
+}
+
+static int hmark_tg_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_hmark_info *info = par->targinfo;
+
+	if (!info->hmodulus) {
+		pr_info("xt_HMARK: hash modulus can't be zero\n");
+		return -EINVAL;
+	}
+	if (info->proto_mask &&
+	    (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) {
+		pr_info("xt_HMARK: proto mask must be zero with L3 mode\n");
+		return -EINVAL;
+	}
+	if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
+	    (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
+			     XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) {
+		pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n");
+		return -EINVAL;
+	}
+	if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
+	    (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
+			     XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
+		pr_info("xt_HMARK: spi-set and port-set can't be combined\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct xt_target hmark_tg_reg[] __read_mostly = {
+	{
+		.name		= "HMARK",
+		.family		= NFPROTO_IPV4,
+		.target		= hmark_tg_v4,
+		.targetsize	= sizeof(struct xt_hmark_info),
+		.checkentry	= hmark_tg_check,
+		.me		= THIS_MODULE,
+	},
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+	{
+		.name		= "HMARK",
+		.family		= NFPROTO_IPV6,
+		.target		= hmark_tg_v6,
+		.targetsize	= sizeof(struct xt_hmark_info),
+		.checkentry	= hmark_tg_check,
+		.me		= THIS_MODULE,
+	},
+#endif
+};
+
+static int __init hmark_tg_init(void)
+{
+	return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
+}
+
+static void __exit hmark_tg_exit(void)
+{
+	xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg));
+}
+
+module_init(hmark_tg_init);
+module_exit(hmark_tg_exit);
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 190ad37..71a266d 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -67,15 +67,13 @@
 
 	if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
 		if (dst_mtu(skb_dst(skb)) <= minlen) {
-			if (net_ratelimit())
-				pr_err("unknown or invalid path-MTU (%u)\n",
-				       dst_mtu(skb_dst(skb)));
+			net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
+					    dst_mtu(skb_dst(skb)));
 			return -1;
 		}
 		if (in_mtu <= minlen) {
-			if (net_ratelimit())
-				pr_err("unknown or invalid path-MTU (%u)\n",
-				       in_mtu);
+			net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
+					    in_mtu);
 			return -1;
 		}
 		newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 35a959a..146033a 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -282,10 +282,10 @@
 	struct sock *sk;
 	const struct in6_addr *laddr;
 	__be16 lport;
-	int thoff;
+	int thoff = 0;
 	int tproto;
 
-	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
 	if (tproto < 0) {
 		pr_debug("unable to find transport header in IPv6 packet, dropping\n");
 		return NF_DROP;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index d95f9c9..26a668a 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -171,8 +171,7 @@
 
 	if (ht->cfg.max && ht->count >= ht->cfg.max) {
 		/* FIXME: do something. question is what.. */
-		if (net_ratelimit())
-			pr_err("max count of %u reached\n", ht->cfg.max);
+		net_err_ratelimited("max count of %u reached\n", ht->cfg.max);
 		ent = NULL;
 	} else
 		ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
@@ -388,9 +387,20 @@
 
 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
 
+/* in byte mode, the lowest possible rate is one packet/second.
+ * credit_cap is used as a counter that tells us how many times we can
+ * refill the "credits available" counter when it becomes empty.
+ */
+#define MAX_CPJ_BYTES (0xFFFFFFFF / HZ)
+#define CREDITS_PER_JIFFY_BYTES POW2_BELOW32(MAX_CPJ_BYTES)
+
+static u32 xt_hashlimit_len_to_chunks(u32 len)
+{
+	return (len >> XT_HASHLIMIT_BYTE_SHIFT) + 1;
+}
+
 /* Precision saver. */
-static inline u_int32_t
-user2credits(u_int32_t user)
+static u32 user2credits(u32 user)
 {
 	/* If multiplying would overflow... */
 	if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -400,12 +410,53 @@
 	return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
 }
 
-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
+static u32 user2credits_byte(u32 user)
 {
-	dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
-	if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
-		dh->rateinfo.credit = dh->rateinfo.credit_cap;
+	u64 us = user;
+	us *= HZ * CREDITS_PER_JIFFY_BYTES;
+	return (u32) (us >> 32);
+}
+
+static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now, u32 mode)
+{
+	unsigned long delta = now - dh->rateinfo.prev;
+	u32 cap;
+
+	if (delta == 0)
+		return;
+
 	dh->rateinfo.prev = now;
+
+	if (mode & XT_HASHLIMIT_BYTES) {
+		u32 tmp = dh->rateinfo.credit;
+		dh->rateinfo.credit += CREDITS_PER_JIFFY_BYTES * delta;
+		cap = CREDITS_PER_JIFFY_BYTES * HZ;
+		if (tmp >= dh->rateinfo.credit) {/* overflow */
+			dh->rateinfo.credit = cap;
+			return;
+		}
+	} else {
+		dh->rateinfo.credit += delta * CREDITS_PER_JIFFY;
+		cap = dh->rateinfo.credit_cap;
+	}
+	if (dh->rateinfo.credit > cap)
+		dh->rateinfo.credit = cap;
+}
+
+static void rateinfo_init(struct dsthash_ent *dh,
+			  struct xt_hashlimit_htable *hinfo)
+{
+	dh->rateinfo.prev = jiffies;
+	if (hinfo->cfg.mode & XT_HASHLIMIT_BYTES) {
+		dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
+		dh->rateinfo.cost = user2credits_byte(hinfo->cfg.avg);
+		dh->rateinfo.credit_cap = hinfo->cfg.burst;
+	} else {
+		dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
+						   hinfo->cfg.burst);
+		dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+		dh->rateinfo.credit_cap = dh->rateinfo.credit;
+	}
 }
 
 static inline __be32 maskl(__be32 a, unsigned int l)
@@ -511,6 +562,21 @@
 	return 0;
 }
 
+static u32 hashlimit_byte_cost(unsigned int len, struct dsthash_ent *dh)
+{
+	u64 tmp = xt_hashlimit_len_to_chunks(len);
+	tmp = tmp * dh->rateinfo.cost;
+
+	if (unlikely(tmp > CREDITS_PER_JIFFY_BYTES * HZ))
+		tmp = CREDITS_PER_JIFFY_BYTES * HZ;
+
+	if (dh->rateinfo.credit < tmp && dh->rateinfo.credit_cap) {
+		dh->rateinfo.credit_cap--;
+		dh->rateinfo.credit = CREDITS_PER_JIFFY_BYTES * HZ;
+	}
+	return (u32) tmp;
+}
+
 static bool
 hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
@@ -519,6 +585,7 @@
 	unsigned long now = jiffies;
 	struct dsthash_ent *dh;
 	struct dsthash_dst dst;
+	u32 cost;
 
 	if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
 		goto hotdrop;
@@ -532,21 +599,21 @@
 			goto hotdrop;
 		}
 		dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
-		dh->rateinfo.prev = jiffies;
-		dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
-		                      hinfo->cfg.burst);
-		dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
-		                          hinfo->cfg.burst);
-		dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+		rateinfo_init(dh, hinfo);
 	} else {
 		/* update expiration timeout */
 		dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
-		rateinfo_recalc(dh, now);
+		rateinfo_recalc(dh, now, hinfo->cfg.mode);
 	}
 
-	if (dh->rateinfo.credit >= dh->rateinfo.cost) {
+	if (info->cfg.mode & XT_HASHLIMIT_BYTES)
+		cost = hashlimit_byte_cost(skb->len, dh);
+	else
+		cost = dh->rateinfo.cost;
+
+	if (dh->rateinfo.credit >= cost) {
 		/* below the limit */
-		dh->rateinfo.credit -= dh->rateinfo.cost;
+		dh->rateinfo.credit -= cost;
 		spin_unlock(&dh->lock);
 		rcu_read_unlock_bh();
 		return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
@@ -568,14 +635,6 @@
 	struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
 	int ret;
 
-	/* Check for overflow. */
-	if (info->cfg.burst == 0 ||
-	    user2credits(info->cfg.avg * info->cfg.burst) <
-	    user2credits(info->cfg.avg)) {
-		pr_info("overflow, try lower: %u/%u\n",
-			info->cfg.avg, info->cfg.burst);
-		return -ERANGE;
-	}
 	if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
 		return -EINVAL;
 	if (info->name[sizeof(info->name)-1] != '\0')
@@ -588,6 +647,26 @@
 			return -EINVAL;
 	}
 
+	if (info->cfg.mode & ~XT_HASHLIMIT_ALL) {
+		pr_info("Unknown mode mask %X, kernel too old?\n",
+						info->cfg.mode);
+		return -EINVAL;
+	}
+
+	/* Check for overflow. */
+	if (info->cfg.mode & XT_HASHLIMIT_BYTES) {
+		if (user2credits_byte(info->cfg.avg) == 0) {
+			pr_info("overflow, rate too high: %u\n", info->cfg.avg);
+			return -EINVAL;
+		}
+	} else if (info->cfg.burst == 0 ||
+		    user2credits(info->cfg.avg * info->cfg.burst) <
+		    user2credits(info->cfg.avg)) {
+			pr_info("overflow, try lower: %u/%u\n",
+				info->cfg.avg, info->cfg.burst);
+			return -ERANGE;
+	}
+
 	mutex_lock(&hashlimit_mutex);
 	info->hinfo = htable_find_get(net, info->name, par->family);
 	if (info->hinfo == NULL) {
@@ -680,10 +759,11 @@
 				   struct seq_file *s)
 {
 	int res;
+	const struct xt_hashlimit_htable *ht = s->private;
 
 	spin_lock(&ent->lock);
 	/* recalculate to show accurate numbers */
-	rateinfo_recalc(ent, jiffies);
+	rateinfo_recalc(ent, jiffies, ht->cfg.mode);
 
 	switch (family) {
 	case NFPROTO_IPV4:
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 32b7a57..5c22ce8 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -88,8 +88,7 @@
 }
 
 /* Precision saver. */
-static u_int32_t
-user2credits(u_int32_t user)
+static u32 user2credits(u32 user)
 {
 	/* If multiplying would overflow... */
 	if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
@@ -123,7 +122,7 @@
 		   128. */
 		priv->prev = jiffies;
 		priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
-		r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
+		r->credit_cap = priv->credit; /* Credits full. */
 		r->cost = user2credits(r->avg);
 	}
 	return 0;
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c
index 8160f6b..d5b4fd4 100644
--- a/net/netfilter/xt_mac.c
+++ b/net/netfilter/xt_mac.c
@@ -36,7 +36,7 @@
 		return false;
 	if (skb_mac_header(skb) + ETH_HLEN > skb->data)
 		return false;
-	ret  = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0;
+	ret  = ether_addr_equal(eth_hdr(skb)->h_source, info->srcaddr);
 	ret ^= info->invert;
 	return ret;
 }
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index d2ff15a..fc0d6db 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -314,7 +314,7 @@
 #ifdef CONFIG_PROC_FS
 	struct proc_dir_entry *pde;
 #endif
-	unsigned i;
+	unsigned int i;
 	int ret = -EINVAL;
 
 	if (unlikely(!hash_rnd_inited)) {
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 0ec8138..035960e 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -44,6 +44,14 @@
 	.cmdflags = cfs,		\
 	.timeout = t,			\
 }
+#define ADT_MOPT(n, f, d, fs, cfs, t)	\
+struct ip_set_adt_opt n = {		\
+	.family	= f,			\
+	.dim = d,			\
+	.flags = fs,			\
+	.cmdflags = cfs,		\
+	.timeout = t,			\
+}
 
 /* Revision 0 interface: backward compatible with netfilter/iptables */
 
@@ -296,11 +304,14 @@
 set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_set_info_target_v2 *info = par->targinfo;
-	ADT_OPT(add_opt, par->family, info->add_set.dim,
-		info->add_set.flags, info->flags, info->timeout);
+	ADT_MOPT(add_opt, par->family, info->add_set.dim,
+		 info->add_set.flags, info->flags, info->timeout);
 	ADT_OPT(del_opt, par->family, info->del_set.dim,
 		info->del_set.flags, 0, UINT_MAX);
 
+	/* Normalize to fit into jiffies */
+	if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
+		add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
 	if (info->add_set.index != IPSET_INVALID_ID)
 		ip_set_add(info->add_set.index, skb, par, &add_opt);
 	if (info->del_set.index != IPSET_INVALID_ID)
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 72bb07f..9ea482d 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -263,10 +263,10 @@
 	struct sock *sk;
 	struct in6_addr *daddr, *saddr;
 	__be16 dport, sport;
-	int thoff, tproto;
+	int thoff = 0, tproto;
 	const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
 
-	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
+	tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
 	if (tproto < 0) {
 		pr_debug("unable to find transport header in IPv6 packet, dropping\n");
 		return NF_DROP;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index faa48f7..b3025a6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -104,27 +104,27 @@
 }
 
 struct nl_pid_hash {
-	struct hlist_head *table;
-	unsigned long rehash_time;
+	struct hlist_head	*table;
+	unsigned long		rehash_time;
 
-	unsigned int mask;
-	unsigned int shift;
+	unsigned int		mask;
+	unsigned int		shift;
 
-	unsigned int entries;
-	unsigned int max_shift;
+	unsigned int		entries;
+	unsigned int		max_shift;
 
-	u32 rnd;
+	u32			rnd;
 };
 
 struct netlink_table {
-	struct nl_pid_hash hash;
-	struct hlist_head mc_list;
-	struct listeners __rcu *listeners;
-	unsigned int nl_nonroot;
-	unsigned int groups;
-	struct mutex *cb_mutex;
-	struct module *module;
-	int registered;
+	struct nl_pid_hash	hash;
+	struct hlist_head	mc_list;
+	struct listeners __rcu	*listeners;
+	unsigned int		nl_nonroot;
+	unsigned int		groups;
+	struct mutex		*cb_mutex;
+	struct module		*module;
+	int			registered;
 };
 
 static struct netlink_table *nl_table;
@@ -132,7 +132,6 @@
 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
 
 static int netlink_dump(struct sock *sk);
-static void netlink_destroy_callback(struct netlink_callback *cb);
 
 static DEFINE_RWLOCK(nl_table_lock);
 static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -149,6 +148,18 @@
 	return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
 }
 
+static void netlink_destroy_callback(struct netlink_callback *cb)
+{
+	kfree_skb(cb->skb);
+	kfree(cb);
+}
+
+static void netlink_consume_callback(struct netlink_callback *cb)
+{
+	consume_skb(cb->skb);
+	kfree(cb);
+}
+
 static void netlink_sock_destruct(struct sock *sk)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
@@ -414,9 +425,9 @@
 	sock_init_data(sock, sk);
 
 	nlk = nlk_sk(sk);
-	if (cb_mutex)
+	if (cb_mutex) {
 		nlk->cb_mutex = cb_mutex;
-	else {
+	} else {
 		nlk->cb_mutex = &nlk->cb_def_mutex;
 		mutex_init(nlk->cb_mutex);
 	}
@@ -522,8 +533,9 @@
 			nl_table[sk->sk_protocol].module = NULL;
 			nl_table[sk->sk_protocol].registered = 0;
 		}
-	} else if (nlk->subscriptions)
+	} else if (nlk->subscriptions) {
 		netlink_update_listeners(sk);
+	}
 	netlink_table_ungrab();
 
 	kfree(nlk->groups);
@@ -866,7 +878,7 @@
 		struct sk_buff *nskb = skb_clone(skb, allocation);
 		if (!nskb)
 			return skb;
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = nskb;
 	}
 
@@ -896,8 +908,10 @@
 		ret = skb->len;
 		skb_set_owner_r(skb, sk);
 		nlk->netlink_rcv(skb);
+		consume_skb(skb);
+	} else {
+		kfree_skb(skb);
 	}
-	kfree_skb(skb);
 	sock_put(sk);
 	return ret;
 }
@@ -1086,8 +1100,8 @@
 	if (info.delivery_failure) {
 		kfree_skb(info.skb2);
 		return -ENOBUFS;
-	} else
-		consume_skb(info.skb2);
+	}
+	consume_skb(info.skb2);
 
 	if (info.delivered) {
 		if (info.congested && (allocation & __GFP_WAIT))
@@ -1240,8 +1254,9 @@
 			nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
 			clear_bit(0, &nlk->state);
 			wake_up_interruptible(&nlk->wait);
-		} else
+		} else {
 			nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
+		}
 		err = 0;
 		break;
 	default:
@@ -1645,12 +1660,6 @@
 }
 EXPORT_SYMBOL(netlink_set_nonroot);
 
-static void netlink_destroy_callback(struct netlink_callback *cb)
-{
-	kfree_skb(cb->skb);
-	kfree(cb);
-}
-
 struct nlmsghdr *
 __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
 {
@@ -1727,7 +1736,7 @@
 	nlk->cb = NULL;
 	mutex_unlock(nlk->cb_mutex);
 
-	netlink_destroy_callback(cb);
+	netlink_consume_callback(cb);
 	return 0;
 
 errout_skb:
@@ -1996,11 +2005,11 @@
 
 static int netlink_seq_show(struct seq_file *seq, void *v)
 {
-	if (v == SEQ_START_TOKEN)
+	if (v == SEQ_START_TOKEN) {
 		seq_puts(seq,
 			 "sk       Eth Pid    Groups   "
 			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
-	else {
+	} else {
 		struct sock *s = v;
 		struct netlink_sock *nlk = nlk_sk(s);
 
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 9f40441..8340ace 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -635,11 +635,12 @@
 	if (hdr == NULL)
 		return -1;
 
-	NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name);
-	NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id);
-	NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version);
-	NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize);
-	NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr);
+	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
+	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
+	    nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
+	    nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
+	    nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
+		goto nla_put_failure;
 
 	if (!list_empty(&family->ops_list)) {
 		struct nlattr *nla_ops;
@@ -657,8 +658,9 @@
 			if (nest == NULL)
 				goto nla_put_failure;
 
-			NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd);
-			NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags);
+			if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
+			    nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags))
+				goto nla_put_failure;
 
 			nla_nest_end(skb, nest);
 		}
@@ -682,9 +684,10 @@
 			if (nest == NULL)
 				goto nla_put_failure;
 
-			NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
-			NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
-				       grp->name);
+			if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
+			    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
+					   grp->name))
+				goto nla_put_failure;
 
 			nla_nest_end(skb, nest);
 		}
@@ -710,8 +713,9 @@
 	if (hdr == NULL)
 		return -1;
 
-	NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name);
-	NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id);
+	if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) ||
+	    nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id))
+		goto nla_put_failure;
 
 	nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
 	if (nla_grps == NULL)
@@ -721,9 +725,10 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
-	NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
-		       grp->name);
+	if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
+	    nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
+			   grp->name))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
 	nla_nest_end(skb, nla_grps);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 1c51d7a..743262b 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -97,7 +97,7 @@
 
 static int nr_header(struct sk_buff *skb, struct net_device *dev,
 		     unsigned short type,
-		     const void *daddr, const void *saddr, unsigned len)
+		     const void *daddr, const void *saddr, unsigned int len)
 {
 	unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
 
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index 1e0fa9e5..42f630b 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,18 +146,12 @@
 	{ }
 };
 
-static struct ctl_path nr_path[] = {
-	{ .procname = "net", },
-	{ .procname = "netrom", },
-	{ }
-};
-
 void __init nr_register_sysctl(void)
 {
-	nr_table_header = register_sysctl_paths(nr_path, nr_table);
+	nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
 }
 
 void nr_unregister_sysctl(void)
 {
-	unregister_sysctl_table(nr_table_header);
+	unregister_net_sysctl_table(nr_table_header);
 }
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 44c865b..8d8d9bc 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -14,6 +14,7 @@
 	  be called nfc.
 
 source "net/nfc/nci/Kconfig"
+source "net/nfc/hci/Kconfig"
 source "net/nfc/llcp/Kconfig"
 
 source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index 7b4a6dc..d1a117c 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_NFC) += nfc.o
 obj-$(CONFIG_NFC_NCI) += nci/
+obj-$(CONFIG_NFC_HCI) += hci/
 
 nfc-objs := core.o netlink.o af_nfc.o rawsock.o
 nfc-$(CONFIG_NFC_LLCP)	+= llcp/llcp.o llcp/commands.o llcp/sock.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 295d129..3192c3f 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -33,6 +33,8 @@
 
 #define VERSION "0.1"
 
+#define NFC_CHECK_PRES_FREQ_MS	2000
+
 int nfc_devlist_generation;
 DEFINE_MUTEX(nfc_devlist_mutex);
 
@@ -95,7 +97,7 @@
 		goto error;
 	}
 
-	if (dev->polling || dev->remote_activated) {
+	if (dev->polling || dev->activated_target_idx != NFC_TARGET_IDX_NONE) {
 		rc = -EBUSY;
 		goto error;
 	}
@@ -211,6 +213,8 @@
 	}
 
 	rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len);
+	if (!rc)
+		dev->activated_target_idx = target_index;
 
 error:
 	device_unlock(&dev->dev);
@@ -246,6 +250,7 @@
 	rc = dev->ops->dep_link_down(dev);
 	if (!rc) {
 		dev->dep_link_up = false;
+		dev->activated_target_idx = NFC_TARGET_IDX_NONE;
 		nfc_llcp_mac_is_down(dev);
 		nfc_genl_dep_link_down_event(dev);
 	}
@@ -289,8 +294,13 @@
 	}
 
 	rc = dev->ops->activate_target(dev, target_idx, protocol);
-	if (!rc)
-		dev->remote_activated = true;
+	if (!rc) {
+		dev->activated_target_idx = target_idx;
+
+		if (dev->ops->check_presence)
+			mod_timer(&dev->check_pres_timer, jiffies +
+				  msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
+	}
 
 error:
 	device_unlock(&dev->dev);
@@ -317,8 +327,11 @@
 		goto error;
 	}
 
+	if (dev->ops->check_presence)
+		del_timer_sync(&dev->check_pres_timer);
+
 	dev->ops->deactivate_target(dev, target_idx);
-	dev->remote_activated = false;
+	dev->activated_target_idx = NFC_TARGET_IDX_NONE;
 
 error:
 	device_unlock(&dev->dev);
@@ -352,8 +365,27 @@
 		goto error;
 	}
 
+	if (dev->activated_target_idx == NFC_TARGET_IDX_NONE) {
+		rc = -ENOTCONN;
+		kfree_skb(skb);
+		goto error;
+	}
+
+	if (target_idx != dev->activated_target_idx) {
+		rc = -EADDRNOTAVAIL;
+		kfree_skb(skb);
+		goto error;
+	}
+
+	if (dev->ops->check_presence)
+		del_timer_sync(&dev->check_pres_timer);
+
 	rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context);
 
+	if (!rc && dev->ops->check_presence)
+		mod_timer(&dev->check_pres_timer, jiffies +
+			  msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
+
 error:
 	device_unlock(&dev->dev);
 	return rc;
@@ -428,10 +460,15 @@
 int nfc_targets_found(struct nfc_dev *dev,
 		      struct nfc_target *targets, int n_targets)
 {
+	int i;
+
 	pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
 
 	dev->polling = false;
 
+	for (i = 0; i < n_targets; i++)
+		targets[i].idx = dev->target_next_idx++;
+
 	spin_lock_bh(&dev->targets_lock);
 
 	dev->targets_generation++;
@@ -455,17 +492,92 @@
 }
 EXPORT_SYMBOL(nfc_targets_found);
 
+int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
+{
+	struct nfc_target *tg;
+	int i;
+
+	pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx);
+
+	spin_lock_bh(&dev->targets_lock);
+
+	for (i = 0; i < dev->n_targets; i++) {
+		tg = &dev->targets[i];
+		if (tg->idx == target_idx)
+			break;
+	}
+
+	if (i == dev->n_targets) {
+		spin_unlock_bh(&dev->targets_lock);
+		return -EINVAL;
+	}
+
+	dev->targets_generation++;
+	dev->n_targets--;
+	dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+
+	if (dev->n_targets) {
+		memcpy(&dev->targets[i], &dev->targets[i + 1],
+		       (dev->n_targets - i) * sizeof(struct nfc_target));
+	} else {
+		kfree(dev->targets);
+		dev->targets = NULL;
+	}
+
+	spin_unlock_bh(&dev->targets_lock);
+
+	nfc_genl_target_lost(dev, target_idx);
+
+	return 0;
+}
+EXPORT_SYMBOL(nfc_target_lost);
+
 static void nfc_release(struct device *d)
 {
 	struct nfc_dev *dev = to_nfc_dev(d);
 
 	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
+	if (dev->ops->check_presence) {
+		del_timer_sync(&dev->check_pres_timer);
+		destroy_workqueue(dev->check_pres_wq);
+	}
+
 	nfc_genl_data_exit(&dev->genl_data);
 	kfree(dev->targets);
 	kfree(dev);
 }
 
+static void nfc_check_pres_work(struct work_struct *work)
+{
+	struct nfc_dev *dev = container_of(work, struct nfc_dev,
+					   check_pres_work);
+	int rc;
+
+	device_lock(&dev->dev);
+
+	if (dev->activated_target_idx != NFC_TARGET_IDX_NONE &&
+	    timer_pending(&dev->check_pres_timer) == 0) {
+		rc = dev->ops->check_presence(dev, dev->activated_target_idx);
+		if (!rc) {
+			mod_timer(&dev->check_pres_timer, jiffies +
+				  msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
+		} else {
+			nfc_target_lost(dev, dev->activated_target_idx);
+			dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+		}
+	}
+
+	device_unlock(&dev->dev);
+}
+
+static void nfc_check_pres_timeout(unsigned long data)
+{
+	struct nfc_dev *dev = (struct nfc_dev *)data;
+
+	queue_work(dev->check_pres_wq, &dev->check_pres_work);
+}
+
 struct class nfc_class = {
 	.name = "nfc",
 	.dev_release = nfc_release,
@@ -475,12 +587,12 @@
 static int match_idx(struct device *d, void *data)
 {
 	struct nfc_dev *dev = to_nfc_dev(d);
-	unsigned *idx = data;
+	unsigned int *idx = data;
 
 	return dev->idx == *idx;
 }
 
-struct nfc_dev *nfc_get_device(unsigned idx)
+struct nfc_dev *nfc_get_device(unsigned int idx)
 {
 	struct device *d;
 
@@ -531,6 +643,26 @@
 	/* first generation must not be 0 */
 	dev->targets_generation = 1;
 
+	dev->activated_target_idx = NFC_TARGET_IDX_NONE;
+
+	if (ops->check_presence) {
+		char name[32];
+		init_timer(&dev->check_pres_timer);
+		dev->check_pres_timer.data = (unsigned long)dev;
+		dev->check_pres_timer.function = nfc_check_pres_timeout;
+
+		INIT_WORK(&dev->check_pres_work, nfc_check_pres_work);
+		snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx);
+		dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT |
+						     WQ_UNBOUND |
+						     WQ_MEM_RECLAIM, 1);
+		if (dev->check_pres_wq == NULL) {
+			kfree(dev);
+			return NULL;
+		}
+	}
+
+
 	return dev;
 }
 EXPORT_SYMBOL(nfc_allocate_device);
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
new file mode 100644
index 0000000..17213a6
--- /dev/null
+++ b/net/nfc/hci/Kconfig
@@ -0,0 +1,16 @@
+config NFC_HCI
+       depends on NFC
+       tristate "NFC HCI implementation"
+       default n
+       help
+	 Say Y here if you want to build support for a kernel NFC HCI
+	 implementation. This is mostly needed for devices that only process
+	 HCI frames, like for example the NXP pn544.
+
+config NFC_SHDLC
+	depends on NFC_HCI
+	bool "SHDLC link layer for HCI based NFC drivers"
+	default n
+	---help---
+	  Say yes if you use an NFC HCI driver that requires SHDLC link layer.
+	  If unsure, say N here.
diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile
new file mode 100644
index 0000000..f9c44b2
--- /dev/null
+++ b/net/nfc/hci/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Linux NFC HCI layer.
+#
+
+obj-$(CONFIG_NFC_HCI) += hci.o
+
+hci-y			:= core.o hcp.o command.o
+hci-$(CONFIG_NFC_SHDLC)	+= shdlc.o
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
new file mode 100644
index 0000000..8729abf
--- /dev/null
+++ b/net/nfc/hci/command.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#include <net/nfc/hci.h>
+
+#include "hci.h"
+
+static int nfc_hci_result_to_errno(u8 result)
+{
+	switch (result) {
+	case NFC_HCI_ANY_OK:
+		return 0;
+	case NFC_HCI_ANY_E_TIMEOUT:
+		return -ETIMEDOUT;
+	default:
+		return -1;
+	}
+}
+
+static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result,
+			       struct sk_buff *skb, void *cb_data)
+{
+	struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
+
+	pr_debug("HCI Cmd completed with HCI result=%d\n", result);
+
+	hcp_ew->exec_result = nfc_hci_result_to_errno(result);
+	if (hcp_ew->exec_result == 0)
+		hcp_ew->result_skb = skb;
+	else
+		kfree_skb(skb);
+	hcp_ew->exec_complete = true;
+
+	wake_up(hcp_ew->wq);
+}
+
+static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+			       const u8 *param, size_t param_len,
+			       struct sk_buff **skb)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq);
+	struct hcp_exec_waiter hcp_ew;
+	hcp_ew.wq = &ew_wq;
+	hcp_ew.exec_complete = false;
+	hcp_ew.result_skb = NULL;
+
+	pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len);
+
+	/* TODO: Define hci cmd execution delay. Should it be the same
+	 * for all commands?
+	 */
+	hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe,
+						    NFC_HCI_HCP_COMMAND, cmd,
+						    param, param_len,
+						    nfc_hci_execute_cb, &hcp_ew,
+						    3000);
+	if (hcp_ew.exec_result < 0)
+		return hcp_ew.exec_result;
+
+	wait_event(ew_wq, hcp_ew.exec_complete == true);
+
+	if (hcp_ew.exec_result == 0) {
+		if (skb)
+			*skb = hcp_ew.result_skb;
+		else
+			kfree_skb(hcp_ew.result_skb);
+	}
+
+	return hcp_ew.exec_result;
+}
+
+int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+		       const u8 *param, size_t param_len)
+{
+	u8 pipe;
+
+	pr_debug("%d to gate %d\n", event, gate);
+
+	pipe = hdev->gate2pipe[gate];
+	if (pipe == NFC_HCI_INVALID_PIPE)
+		return -EADDRNOTAVAIL;
+
+	return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event,
+				      param, param_len, NULL, NULL, 0);
+}
+EXPORT_SYMBOL(nfc_hci_send_event);
+
+int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
+			  const u8 *param, size_t param_len)
+{
+	u8 pipe;
+
+	pr_debug("\n");
+
+	pipe = hdev->gate2pipe[gate];
+	if (pipe == NFC_HCI_INVALID_PIPE)
+		return -EADDRNOTAVAIL;
+
+	return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
+				      response, param, param_len, NULL, NULL,
+				      0);
+}
+EXPORT_SYMBOL(nfc_hci_send_response);
+
+/*
+ * Execute an hci command sent to gate.
+ * skb will contain response data if success. skb can be NULL if you are not
+ * interested by the response.
+ */
+int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+		     const u8 *param, size_t param_len, struct sk_buff **skb)
+{
+	u8 pipe;
+
+	pr_debug("\n");
+
+	pipe = hdev->gate2pipe[gate];
+	if (pipe == NFC_HCI_INVALID_PIPE)
+		return -EADDRNOTAVAIL;
+
+	return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb);
+}
+EXPORT_SYMBOL(nfc_hci_send_cmd);
+
+int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+		      const u8 *param, size_t param_len)
+{
+	int r;
+	u8 *tmp;
+
+	/* TODO ELa: reg idx must be inserted before param, but we don't want
+	 * to ask the caller to do it to keep a simpler API.
+	 * For now, just create a new temporary param buffer. This is far from
+	 * optimal though, and the plan is to modify APIs to pass idx down to
+	 * nfc_hci_hcp_message_tx where the frame is actually built, thereby
+	 * eliminating the need for the temp allocation-copy here.
+	 */
+
+	pr_debug("idx=%d to gate %d\n", idx, gate);
+
+	tmp = kmalloc(1 + param_len, GFP_KERNEL);
+	if (tmp == NULL)
+		return -ENOMEM;
+
+	*tmp = idx;
+	memcpy(tmp + 1, param, param_len);
+
+	r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER,
+			     tmp, param_len + 1, NULL);
+
+	kfree(tmp);
+
+	return r;
+}
+EXPORT_SYMBOL(nfc_hci_set_param);
+
+int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+		      struct sk_buff **skb)
+{
+	pr_debug("gate=%d regidx=%d\n", gate, idx);
+
+	return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER,
+				&idx, 1, skb);
+}
+EXPORT_SYMBOL(nfc_hci_get_param);
+
+static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+	struct sk_buff *skb;
+	int r;
+
+	pr_debug("pipe=%d\n", pipe);
+
+	r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE,
+				NULL, 0, &skb);
+	if (r == 0) {
+		/* dest host other than host controller will send
+		 * number of pipes already open on this gate before
+		 * execution. The number can be found in skb->data[0]
+		 */
+		kfree_skb(skb);
+	}
+
+	return r;
+}
+
+static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+	pr_debug("\n");
+
+	return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE,
+				   NULL, 0, NULL);
+}
+
+static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host,
+			      u8 dest_gate, int *result)
+{
+	struct sk_buff *skb;
+	struct hci_create_pipe_params params;
+	struct hci_create_pipe_resp *resp;
+	u8 pipe;
+
+	pr_debug("gate=%d\n", dest_gate);
+
+	params.src_gate = NFC_HCI_ADMIN_GATE;
+	params.dest_host = dest_host;
+	params.dest_gate = dest_gate;
+
+	*result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+				      NFC_HCI_ADM_CREATE_PIPE,
+				      (u8 *) &params, sizeof(params), &skb);
+	if (*result == 0) {
+		resp = (struct hci_create_pipe_resp *)skb->data;
+		pipe = resp->pipe;
+		kfree_skb(skb);
+
+		pr_debug("pipe created=%d\n", pipe);
+
+		return pipe;
+	} else
+		return NFC_HCI_INVALID_PIPE;
+}
+
+static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+	pr_debug("\n");
+
+	return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+				   NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL);
+}
+
+static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
+{
+	int r;
+
+	u8 param[2];
+
+	/* TODO: Find out what the identity reference data is
+	 * and fill param with it. HCI spec 6.1.3.5 */
+
+	pr_debug("\n");
+
+	r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+				NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL);
+
+	return 0;
+}
+
+int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
+{
+	int r;
+	u8 pipe = hdev->gate2pipe[gate];
+
+	pr_debug("\n");
+
+	if (pipe == NFC_HCI_INVALID_PIPE)
+		return -EADDRNOTAVAIL;
+
+	r = nfc_hci_close_pipe(hdev, pipe);
+	if (r < 0)
+		return r;
+
+	if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) {
+		r = nfc_hci_delete_pipe(hdev, pipe);
+		if (r < 0)
+			return r;
+	}
+
+	hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE;
+
+	return 0;
+}
+EXPORT_SYMBOL(nfc_hci_disconnect_gate);
+
+int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
+{
+	int r;
+
+	pr_debug("\n");
+
+	r = nfc_hci_clear_all_pipes(hdev);
+	if (r < 0)
+		return r;
+
+	memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+
+	return 0;
+}
+EXPORT_SYMBOL(nfc_hci_disconnect_all_gates);
+
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
+{
+	u8 pipe = NFC_HCI_INVALID_PIPE;
+	bool pipe_created = false;
+	int r;
+
+	pr_debug("\n");
+
+	if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
+		return -EADDRINUSE;
+
+	switch (dest_gate) {
+	case NFC_HCI_LINK_MGMT_GATE:
+		pipe = NFC_HCI_LINK_MGMT_PIPE;
+		break;
+	case NFC_HCI_ADMIN_GATE:
+		pipe = NFC_HCI_ADMIN_PIPE;
+		break;
+	default:
+		pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r);
+		if (pipe == NFC_HCI_INVALID_PIPE)
+			return r;
+		pipe_created = true;
+		break;
+	}
+
+	r = nfc_hci_open_pipe(hdev, pipe);
+	if (r < 0) {
+		if (pipe_created)
+			if (nfc_hci_delete_pipe(hdev, pipe) < 0) {
+				/* TODO: Cannot clean by deleting pipe...
+				 * -> inconsistent state */
+			}
+		return r;
+	}
+
+	hdev->gate2pipe[dest_gate] = pipe;
+
+	return 0;
+}
+EXPORT_SYMBOL(nfc_hci_connect_gate);
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
new file mode 100644
index 0000000..86fd00d
--- /dev/null
+++ b/net/nfc/hci/core.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/hci.h>
+
+#include "hci.h"
+
+/* Largest headroom needed for outgoing HCI commands */
+#define HCI_CMDS_HEADROOM 1
+
+static void nfc_hci_msg_tx_work(struct work_struct *work)
+{
+	struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
+						msg_tx_work);
+	struct hci_msg *msg;
+	struct sk_buff *skb;
+	int r = 0;
+
+	mutex_lock(&hdev->msg_tx_mutex);
+
+	if (hdev->cmd_pending_msg) {
+		if (timer_pending(&hdev->cmd_timer) == 0) {
+			if (hdev->cmd_pending_msg->cb)
+				hdev->cmd_pending_msg->cb(hdev,
+							  NFC_HCI_ANY_E_TIMEOUT,
+							  NULL,
+							  hdev->
+							  cmd_pending_msg->
+							  cb_context);
+			kfree(hdev->cmd_pending_msg);
+			hdev->cmd_pending_msg = NULL;
+		} else
+			goto exit;
+	}
+
+next_msg:
+	if (list_empty(&hdev->msg_tx_queue))
+		goto exit;
+
+	msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, msg_l);
+	list_del(&msg->msg_l);
+
+	pr_debug("msg_tx_queue has a cmd to send\n");
+	while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
+		r = hdev->ops->xmit(hdev, skb);
+		if (r < 0) {
+			kfree_skb(skb);
+			skb_queue_purge(&msg->msg_frags);
+			if (msg->cb)
+				msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL,
+					msg->cb_context);
+			kfree(msg);
+			break;
+		}
+	}
+
+	if (r)
+		goto next_msg;
+
+	if (msg->wait_response == false) {
+		kfree(msg);
+		goto next_msg;
+	}
+
+	hdev->cmd_pending_msg = msg;
+	mod_timer(&hdev->cmd_timer, jiffies +
+		  msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay));
+
+exit:
+	mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+static void nfc_hci_msg_rx_work(struct work_struct *work)
+{
+	struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
+						msg_rx_work);
+	struct sk_buff *skb;
+	struct hcp_message *message;
+	u8 pipe;
+	u8 type;
+	u8 instruction;
+
+	while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
+		pipe = skb->data[0];
+		skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN);
+		message = (struct hcp_message *)skb->data;
+		type = HCP_MSG_GET_TYPE(message->header);
+		instruction = HCP_MSG_GET_CMD(message->header);
+		skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN);
+
+		nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb);
+	}
+}
+
+void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
+			   struct sk_buff *skb)
+{
+	mutex_lock(&hdev->msg_tx_mutex);
+
+	if (hdev->cmd_pending_msg == NULL) {
+		kfree_skb(skb);
+		goto exit;
+	}
+
+	del_timer_sync(&hdev->cmd_timer);
+
+	if (hdev->cmd_pending_msg->cb)
+		hdev->cmd_pending_msg->cb(hdev, result, skb,
+					  hdev->cmd_pending_msg->cb_context);
+	else
+		kfree_skb(skb);
+
+	kfree(hdev->cmd_pending_msg);
+	hdev->cmd_pending_msg = NULL;
+
+	queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+
+exit:
+	mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+			  struct sk_buff *skb)
+{
+	kfree_skb(skb);
+}
+
+static u32 nfc_hci_sak_to_protocol(u8 sak)
+{
+	switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) {
+	case NFC_HCI_TYPE_A_SEL_PROT_MIFARE:
+		return NFC_PROTO_MIFARE_MASK;
+	case NFC_HCI_TYPE_A_SEL_PROT_ISO14443:
+		return NFC_PROTO_ISO14443_MASK;
+	case NFC_HCI_TYPE_A_SEL_PROT_DEP:
+		return NFC_PROTO_NFC_DEP_MASK;
+	case NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP:
+		return NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK;
+	default:
+		return 0xffffffff;
+	}
+}
+
+static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
+{
+	struct nfc_target *targets;
+	struct sk_buff *atqa_skb = NULL;
+	struct sk_buff *sak_skb = NULL;
+	int r;
+
+	pr_debug("from gate %d\n", gate);
+
+	targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+	if (targets == NULL)
+		return -ENOMEM;
+
+	switch (gate) {
+	case NFC_HCI_RF_READER_A_GATE:
+		r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+				      NFC_HCI_RF_READER_A_ATQA, &atqa_skb);
+		if (r < 0)
+			goto exit;
+
+		r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+				      NFC_HCI_RF_READER_A_SAK, &sak_skb);
+		if (r < 0)
+			goto exit;
+
+		if (atqa_skb->len != 2 || sak_skb->len != 1) {
+			r = -EPROTO;
+			goto exit;
+		}
+
+		targets->supported_protocols =
+				nfc_hci_sak_to_protocol(sak_skb->data[0]);
+		if (targets->supported_protocols == 0xffffffff) {
+			r = -EPROTO;
+			goto exit;
+		}
+
+		targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data);
+		targets->sel_res = sak_skb->data[0];
+
+		if (hdev->ops->complete_target_discovered) {
+			r = hdev->ops->complete_target_discovered(hdev, gate,
+								  targets);
+			if (r < 0)
+				goto exit;
+		}
+		break;
+	case NFC_HCI_RF_READER_B_GATE:
+		targets->supported_protocols = NFC_PROTO_ISO14443_MASK;
+		break;
+	default:
+		if (hdev->ops->target_from_gate)
+			r = hdev->ops->target_from_gate(hdev, gate, targets);
+		else
+			r = -EPROTO;
+		if (r < 0)
+			goto exit;
+
+		if (hdev->ops->complete_target_discovered) {
+			r = hdev->ops->complete_target_discovered(hdev, gate,
+								  targets);
+			if (r < 0)
+				goto exit;
+		}
+		break;
+	}
+
+	targets->hci_reader_gate = gate;
+
+	r = nfc_targets_found(hdev->ndev, targets, 1);
+	if (r < 0)
+		goto exit;
+
+	kfree(hdev->targets);
+	hdev->targets = targets;
+	targets = NULL;
+	hdev->target_count = 1;
+
+exit:
+	kfree(targets);
+	kfree_skb(atqa_skb);
+	kfree_skb(sak_skb);
+
+	return r;
+}
+
+void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
+			    struct sk_buff *skb)
+{
+	int r = 0;
+
+	switch (event) {
+	case NFC_HCI_EVT_TARGET_DISCOVERED:
+		if (hdev->poll_started == false) {
+			r = -EPROTO;
+			goto exit;
+		}
+
+		if (skb->len < 1) {	/* no status data? */
+			r = -EPROTO;
+			goto exit;
+		}
+
+		if (skb->data[0] == 3) {
+			/* TODO: Multiple targets in field, none activated
+			 * poll is supposedly stopped, but there is no
+			 * single target to activate, so nothing to report
+			 * up.
+			 * if we need to restart poll, we must save the
+			 * protocols from the initial poll and reuse here.
+			 */
+		}
+
+		if (skb->data[0] != 0) {
+			r = -EPROTO;
+			goto exit;
+		}
+
+		r = nfc_hci_target_discovered(hdev,
+					      nfc_hci_pipe2gate(hdev, pipe));
+		break;
+	default:
+		/* TODO: Unknown events are hardware specific
+		 * pass them to the driver (needs a new hci_ops) */
+		break;
+	}
+
+exit:
+	kfree_skb(skb);
+
+	if (r) {
+		/* TODO: There was an error dispatching the event,
+		 * how to propagate up to nfc core?
+		 */
+	}
+}
+
+static void nfc_hci_cmd_timeout(unsigned long data)
+{
+	struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data;
+
+	queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+}
+
+static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
+				 u8 gates[])
+{
+	int r;
+	u8 *p = gates;
+	while (gate_count--) {
+		r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p);
+		if (r < 0)
+			return r;
+		p++;
+	}
+
+	return 0;
+}
+
+static int hci_dev_session_init(struct nfc_hci_dev *hdev)
+{
+	struct sk_buff *skb = NULL;
+	int r;
+	u8 hci_gates[] = {	/* NFC_HCI_ADMIN_GATE MUST be first */
+		NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE,
+		NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE,
+		NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE
+	};
+
+	r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+				 NFC_HCI_ADMIN_GATE);
+	if (r < 0)
+		goto exit;
+
+	r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE,
+			      NFC_HCI_ADMIN_SESSION_IDENTITY, &skb);
+	if (r < 0)
+		goto disconnect_all;
+
+	if (skb->len && skb->len == strlen(hdev->init_data.session_id))
+		if (memcmp(hdev->init_data.session_id, skb->data,
+			   skb->len) == 0) {
+			/* TODO ELa: restore gate<->pipe table from
+			 * some TBD location.
+			 * note: it doesn't seem possible to get the chip
+			 * currently open gate/pipe table.
+			 * It is only possible to obtain the supported
+			 * gate list.
+			 */
+
+			/* goto exit
+			 * For now, always do a full initialization */
+		}
+
+	r = nfc_hci_disconnect_all_gates(hdev);
+	if (r < 0)
+		goto exit;
+
+	r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates);
+	if (r < 0)
+		goto disconnect_all;
+
+	r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
+				  hdev->init_data.gates);
+	if (r < 0)
+		goto disconnect_all;
+
+	r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+			      NFC_HCI_ADMIN_SESSION_IDENTITY,
+			      hdev->init_data.session_id,
+			      strlen(hdev->init_data.session_id));
+	if (r == 0)
+		goto exit;
+
+disconnect_all:
+	nfc_hci_disconnect_all_gates(hdev);
+
+exit:
+	if (skb)
+		kfree_skb(skb);
+
+	return r;
+}
+
+static int hci_dev_version(struct nfc_hci_dev *hdev)
+{
+	int r;
+	struct sk_buff *skb;
+
+	r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+			      NFC_HCI_ID_MGMT_VERSION_SW, &skb);
+	if (r < 0)
+		return r;
+
+	if (skb->len != 3) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4;
+	hdev->sw_patch = skb->data[0] & 0x0f;
+	hdev->sw_flashlib_major = skb->data[1];
+	hdev->sw_flashlib_minor = skb->data[2];
+
+	kfree_skb(skb);
+
+	r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+			      NFC_HCI_ID_MGMT_VERSION_HW, &skb);
+	if (r < 0)
+		return r;
+
+	if (skb->len != 3) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5;
+	hdev->hw_version = skb->data[0] & 0x1f;
+	hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6;
+	hdev->hw_software = skb->data[1] & 0x3f;
+	hdev->hw_bsid = skb->data[2];
+
+	kfree_skb(skb);
+
+	pr_info("SOFTWARE INFO:\n");
+	pr_info("RomLib         : %d\n", hdev->sw_romlib);
+	pr_info("Patch          : %d\n", hdev->sw_patch);
+	pr_info("FlashLib Major : %d\n", hdev->sw_flashlib_major);
+	pr_info("FlashLib Minor : %d\n", hdev->sw_flashlib_minor);
+	pr_info("HARDWARE INFO:\n");
+	pr_info("Derivative     : %d\n", hdev->hw_derivative);
+	pr_info("HW Version     : %d\n", hdev->hw_version);
+	pr_info("#MPW           : %d\n", hdev->hw_mpw);
+	pr_info("Software       : %d\n", hdev->hw_software);
+	pr_info("BSID Version   : %d\n", hdev->hw_bsid);
+
+	return 0;
+}
+
+static int hci_dev_up(struct nfc_dev *nfc_dev)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+	int r = 0;
+
+	if (hdev->ops->open) {
+		r = hdev->ops->open(hdev);
+		if (r < 0)
+			return r;
+	}
+
+	r = hci_dev_session_init(hdev);
+	if (r < 0)
+		goto exit;
+
+	r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+			       NFC_HCI_EVT_END_OPERATION, NULL, 0);
+	if (r < 0)
+		goto exit;
+
+	if (hdev->ops->hci_ready) {
+		r = hdev->ops->hci_ready(hdev);
+		if (r < 0)
+			goto exit;
+	}
+
+	r = hci_dev_version(hdev);
+	if (r < 0)
+		goto exit;
+
+exit:
+	if (r < 0)
+		if (hdev->ops->close)
+			hdev->ops->close(hdev);
+	return r;
+}
+
+static int hci_dev_down(struct nfc_dev *nfc_dev)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+	if (hdev->ops->close)
+		hdev->ops->close(hdev);
+
+	memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+
+	return 0;
+}
+
+static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+	int r;
+
+	if (hdev->ops->start_poll)
+		r = hdev->ops->start_poll(hdev, protocols);
+	else
+		r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+				       NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+	if (r == 0)
+		hdev->poll_started = true;
+
+	return r;
+}
+
+static void hci_stop_poll(struct nfc_dev *nfc_dev)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+	if (hdev->poll_started) {
+		nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+				   NFC_HCI_EVT_END_OPERATION, NULL, 0);
+		hdev->poll_started = false;
+	}
+}
+
+static struct nfc_target *hci_find_target(struct nfc_hci_dev *hdev,
+					  u32 target_idx)
+{
+	int i;
+	if (hdev->poll_started == false || hdev->targets == NULL)
+		return NULL;
+
+	for (i = 0; i < hdev->target_count; i++) {
+		if (hdev->targets[i].idx == target_idx)
+			return &hdev->targets[i];
+	}
+
+	return NULL;
+}
+
+static int hci_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
+			       u32 protocol)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+	if (hci_find_target(hdev, target_idx) == NULL)
+		return -ENOMEDIUM;
+
+	return 0;
+}
+
+static void hci_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
+{
+}
+
+static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+			     struct sk_buff *skb, data_exchange_cb_t cb,
+			     void *cb_context)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+	int r;
+	struct nfc_target *target;
+	struct sk_buff *res_skb = NULL;
+
+	pr_debug("target_idx=%d\n", target_idx);
+
+	target = hci_find_target(hdev, target_idx);
+	if (target == NULL)
+		return -ENOMEDIUM;
+
+	switch (target->hci_reader_gate) {
+	case NFC_HCI_RF_READER_A_GATE:
+	case NFC_HCI_RF_READER_B_GATE:
+		if (hdev->ops->data_exchange) {
+			r = hdev->ops->data_exchange(hdev, target, skb,
+						     &res_skb);
+			if (r <= 0)	/* handled */
+				break;
+		}
+
+		*skb_push(skb, 1) = 0;	/* CTR, see spec:10.2.2.1 */
+		r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+				     NFC_HCI_WR_XCHG_DATA,
+				     skb->data, skb->len, &res_skb);
+		/*
+		 * TODO: Check RF Error indicator to make sure data is valid.
+		 * It seems that HCI cmd can complete without error, but data
+		 * can be invalid if an RF error occured? Ignore for now.
+		 */
+		if (r == 0)
+			skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */
+		break;
+	default:
+		if (hdev->ops->data_exchange) {
+			r = hdev->ops->data_exchange(hdev, target, skb,
+						     &res_skb);
+			if (r == 1)
+				r = -ENOTSUPP;
+		}
+		else
+			r = -ENOTSUPP;
+	}
+
+	kfree_skb(skb);
+
+	cb(cb_context, res_skb, r);
+
+	return 0;
+}
+
+struct nfc_ops hci_nfc_ops = {
+	.dev_up = hci_dev_up,
+	.dev_down = hci_dev_down,
+	.start_poll = hci_start_poll,
+	.stop_poll = hci_stop_poll,
+	.activate_target = hci_activate_target,
+	.deactivate_target = hci_deactivate_target,
+	.data_exchange = hci_data_exchange,
+};
+
+struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
+					    struct nfc_hci_init_data *init_data,
+					    u32 protocols,
+					    int tx_headroom,
+					    int tx_tailroom,
+					    int max_link_payload)
+{
+	struct nfc_hci_dev *hdev;
+
+	if (ops->xmit == NULL)
+		return NULL;
+
+	if (protocols == 0)
+		return NULL;
+
+	hdev = kzalloc(sizeof(struct nfc_hci_dev), GFP_KERNEL);
+	if (hdev == NULL)
+		return NULL;
+
+	hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
+					 tx_headroom + HCI_CMDS_HEADROOM,
+					 tx_tailroom);
+	if (!hdev->ndev) {
+		kfree(hdev);
+		return NULL;
+	}
+
+	hdev->ops = ops;
+	hdev->max_data_link_payload = max_link_payload;
+	hdev->init_data = *init_data;
+
+	nfc_set_drvdata(hdev->ndev, hdev);
+
+	memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+
+	return hdev;
+}
+EXPORT_SYMBOL(nfc_hci_allocate_device);
+
+void nfc_hci_free_device(struct nfc_hci_dev *hdev)
+{
+	nfc_free_device(hdev->ndev);
+	kfree(hdev);
+}
+EXPORT_SYMBOL(nfc_hci_free_device);
+
+int nfc_hci_register_device(struct nfc_hci_dev *hdev)
+{
+	struct device *dev = &hdev->ndev->dev;
+	const char *devname = dev_name(dev);
+	char name[32];
+	int r = 0;
+
+	mutex_init(&hdev->msg_tx_mutex);
+
+	INIT_LIST_HEAD(&hdev->msg_tx_queue);
+
+	INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work);
+	snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname);
+	hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
+					  WQ_MEM_RECLAIM, 1);
+	if (hdev->msg_tx_wq == NULL) {
+		r = -ENOMEM;
+		goto exit;
+	}
+
+	init_timer(&hdev->cmd_timer);
+	hdev->cmd_timer.data = (unsigned long)hdev;
+	hdev->cmd_timer.function = nfc_hci_cmd_timeout;
+
+	skb_queue_head_init(&hdev->rx_hcp_frags);
+
+	INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work);
+	snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname);
+	hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
+					  WQ_MEM_RECLAIM, 1);
+	if (hdev->msg_rx_wq == NULL) {
+		r = -ENOMEM;
+		goto exit;
+	}
+
+	skb_queue_head_init(&hdev->msg_rx_queue);
+
+	r = nfc_register_device(hdev->ndev);
+
+exit:
+	if (r < 0) {
+		if (hdev->msg_tx_wq)
+			destroy_workqueue(hdev->msg_tx_wq);
+		if (hdev->msg_rx_wq)
+			destroy_workqueue(hdev->msg_rx_wq);
+	}
+
+	return r;
+}
+EXPORT_SYMBOL(nfc_hci_register_device);
+
+void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
+{
+	struct hci_msg *msg;
+
+	skb_queue_purge(&hdev->rx_hcp_frags);
+	skb_queue_purge(&hdev->msg_rx_queue);
+
+	while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg,
+				       msg_l)) != NULL) {
+		list_del(&msg->msg_l);
+		skb_queue_purge(&msg->msg_frags);
+		kfree(msg);
+	}
+
+	del_timer_sync(&hdev->cmd_timer);
+
+	nfc_unregister_device(hdev->ndev);
+
+	destroy_workqueue(hdev->msg_tx_wq);
+
+	destroy_workqueue(hdev->msg_rx_wq);
+}
+EXPORT_SYMBOL(nfc_hci_unregister_device);
+
+void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata)
+{
+	hdev->clientdata = clientdata;
+}
+EXPORT_SYMBOL(nfc_hci_set_clientdata);
+
+void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
+{
+	return hdev->clientdata;
+}
+EXPORT_SYMBOL(nfc_hci_get_clientdata);
+
+void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hcp_packet *packet;
+	u8 type;
+	u8 instruction;
+	struct sk_buff *hcp_skb;
+	u8 pipe;
+	struct sk_buff *frag_skb;
+	int msg_len;
+
+	if (skb == NULL) {
+		/* TODO ELa: lower layer had permanent failure, need to
+		 * propagate that up
+		 */
+
+		skb_queue_purge(&hdev->rx_hcp_frags);
+
+		return;
+	}
+
+	packet = (struct hcp_packet *)skb->data;
+	if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
+		skb_queue_tail(&hdev->rx_hcp_frags, skb);
+		return;
+	}
+
+	/* it's the last fragment. Does it need re-aggregation? */
+	if (skb_queue_len(&hdev->rx_hcp_frags)) {
+		pipe = packet->header & NFC_HCI_FRAGMENT;
+		skb_queue_tail(&hdev->rx_hcp_frags, skb);
+
+		msg_len = 0;
+		skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+			msg_len += (frag_skb->len -
+				    NFC_HCI_HCP_PACKET_HEADER_LEN);
+		}
+
+		hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
+					     msg_len, GFP_KERNEL);
+		if (hcp_skb == NULL) {
+			/* TODO ELa: cannot deliver HCP message. How to
+			 * propagate error up?
+			 */
+		}
+
+		*skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+
+		skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+			msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
+			memcpy(skb_put(hcp_skb, msg_len),
+			       frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
+			       msg_len);
+		}
+
+		skb_queue_purge(&hdev->rx_hcp_frags);
+	} else {
+		packet->header &= NFC_HCI_FRAGMENT;
+		hcp_skb = skb;
+	}
+
+	/* if this is a response, dispatch immediately to
+	 * unblock waiting cmd context. Otherwise, enqueue to dispatch
+	 * in separate context where handler can also execute command.
+	 */
+	packet = (struct hcp_packet *)hcp_skb->data;
+	type = HCP_MSG_GET_TYPE(packet->message.header);
+	if (type == NFC_HCI_HCP_RESPONSE) {
+		pipe = packet->header;
+		instruction = HCP_MSG_GET_CMD(packet->message.header);
+		skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
+			 NFC_HCI_HCP_MESSAGE_HEADER_LEN);
+		nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
+	} else {
+		skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
+		queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work);
+	}
+}
+EXPORT_SYMBOL(nfc_hci_recv_frame);
+
+MODULE_LICENSE("GPL");
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
new file mode 100644
index 0000000..45f2fe4
--- /dev/null
+++ b/net/nfc/hci/hci.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LOCAL_HCI_H
+#define __LOCAL_HCI_H
+
+struct gate_pipe_map {
+	u8 gate;
+	u8 pipe;
+};
+
+struct hcp_message {
+	u8 header;		/* type -cmd,evt,rsp- + instruction */
+	u8 data[];
+} __packed;
+
+struct hcp_packet {
+	u8 header;		/* cbit+pipe */
+	struct hcp_message message;
+} __packed;
+
+/*
+ * HCI command execution completion callback.
+ * result will be one of the HCI response codes.
+ * skb contains the response data and must be disposed.
+ */
+typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result,
+			      struct sk_buff *skb, void *cb_data);
+
+struct hcp_exec_waiter {
+	wait_queue_head_t *wq;
+	bool exec_complete;
+	int exec_result;
+	struct sk_buff *result_skb;
+};
+
+struct hci_msg {
+	struct list_head msg_l;
+	struct sk_buff_head msg_frags;
+	bool wait_response;
+	hci_cmd_cb_t cb;
+	void *cb_context;
+	unsigned long completion_delay;
+};
+
+struct hci_create_pipe_params {
+	u8 src_gate;
+	u8 dest_host;
+	u8 dest_gate;
+} __packed;
+
+struct hci_create_pipe_resp {
+	u8 src_host;
+	u8 src_gate;
+	u8 dest_host;
+	u8 dest_gate;
+	u8 pipe;
+} __packed;
+
+#define NFC_HCI_FRAGMENT	0x7f
+
+#define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f))
+#define HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6)
+#define HCP_MSG_GET_CMD(header) (header & 0x3f)
+
+int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
+			   u8 type, u8 instruction,
+			   const u8 *payload, size_t payload_len,
+			   hci_cmd_cb_t cb, void *cb_data,
+			   unsigned long completion_delay);
+
+u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
+
+void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
+			    u8 instruction, struct sk_buff *skb);
+
+/* HCP headers */
+#define NFC_HCI_HCP_PACKET_HEADER_LEN	1
+#define NFC_HCI_HCP_MESSAGE_HEADER_LEN	1
+#define NFC_HCI_HCP_HEADER_LEN		2
+
+/* HCP types */
+#define NFC_HCI_HCP_COMMAND	0x00
+#define NFC_HCI_HCP_EVENT	0x01
+#define NFC_HCI_HCP_RESPONSE	0x02
+
+/* Generic commands */
+#define NFC_HCI_ANY_SET_PARAMETER	0x01
+#define NFC_HCI_ANY_GET_PARAMETER	0x02
+#define NFC_HCI_ANY_OPEN_PIPE		0x03
+#define NFC_HCI_ANY_CLOSE_PIPE		0x04
+
+/* Reader RF commands */
+#define NFC_HCI_WR_XCHG_DATA		0x10
+
+/* Admin commands */
+#define NFC_HCI_ADM_CREATE_PIPE			0x10
+#define NFC_HCI_ADM_DELETE_PIPE			0x11
+#define NFC_HCI_ADM_NOTIFY_PIPE_CREATED		0x12
+#define NFC_HCI_ADM_NOTIFY_PIPE_DELETED		0x13
+#define NFC_HCI_ADM_CLEAR_ALL_PIPE		0x14
+#define NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED	0x15
+
+/* Generic responses */
+#define NFC_HCI_ANY_OK				0x00
+#define NFC_HCI_ANY_E_NOT_CONNECTED		0x01
+#define NFC_HCI_ANY_E_CMD_PAR_UNKNOWN		0x02
+#define NFC_HCI_ANY_E_NOK			0x03
+#define NFC_HCI_ANY_E_PIPES_FULL		0x04
+#define NFC_HCI_ANY_E_REG_PAR_UNKNOWN		0x05
+#define NFC_HCI_ANY_E_PIPE_NOT_OPENED		0x06
+#define NFC_HCI_ANY_E_CMD_NOT_SUPPORTED		0x07
+#define NFC_HCI_ANY_E_INHIBITED			0x08
+#define NFC_HCI_ANY_E_TIMEOUT			0x09
+#define NFC_HCI_ANY_E_REG_ACCESS_DENIED		0x0a
+#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED	0x0b
+
+/* Pipes */
+#define NFC_HCI_INVALID_PIPE	0x80
+#define NFC_HCI_LINK_MGMT_PIPE	0x00
+#define NFC_HCI_ADMIN_PIPE	0x01
+
+#endif /* __LOCAL_HCI_H */
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
new file mode 100644
index 0000000..7212cf2
--- /dev/null
+++ b/net/nfc/hci/hcp.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <net/nfc/hci.h>
+
+#include "hci.h"
+
+/*
+ * Payload is the HCP message data only. Instruction will be prepended.
+ * Guarantees that cb will be called upon completion or timeout delay
+ * counted from the moment the cmd is sent to the transport.
+ */
+int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
+			   u8 type, u8 instruction,
+			   const u8 *payload, size_t payload_len,
+			   hci_cmd_cb_t cb, void *cb_data,
+			   unsigned long completion_delay)
+{
+	struct nfc_dev *ndev = hdev->ndev;
+	struct hci_msg *cmd;
+	const u8 *ptr = payload;
+	int hci_len, err;
+	bool firstfrag = true;
+
+	cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL);
+	if (cmd == NULL)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&cmd->msg_l);
+	skb_queue_head_init(&cmd->msg_frags);
+	cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false;
+	cmd->cb = cb;
+	cmd->cb_context = cb_data;
+	cmd->completion_delay = completion_delay;
+
+	hci_len = payload_len + 1;
+	while (hci_len > 0) {
+		struct sk_buff *skb;
+		int skb_len, data_link_len;
+		struct hcp_packet *packet;
+
+		if (NFC_HCI_HCP_PACKET_HEADER_LEN + hci_len <=
+		    hdev->max_data_link_payload)
+			data_link_len = hci_len;
+		else
+			data_link_len = hdev->max_data_link_payload -
+					NFC_HCI_HCP_PACKET_HEADER_LEN;
+
+		skb_len = ndev->tx_headroom + NFC_HCI_HCP_PACKET_HEADER_LEN +
+			  data_link_len + ndev->tx_tailroom;
+		hci_len -= data_link_len;
+
+		skb = alloc_skb(skb_len, GFP_KERNEL);
+		if (skb == NULL) {
+			err = -ENOMEM;
+			goto out_skb_err;
+		}
+		skb_reserve(skb, ndev->tx_headroom);
+
+		skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len);
+
+		/* Only the last fragment will have the cb bit set to 1 */
+		packet = (struct hcp_packet *)skb->data;
+		packet->header = pipe;
+		if (firstfrag) {
+			firstfrag = false;
+			packet->message.header = HCP_HEADER(type, instruction);
+			if (ptr) {
+				memcpy(packet->message.data, ptr,
+				       data_link_len - 1);
+				ptr += data_link_len - 1;
+			}
+		} else {
+			memcpy(&packet->message, ptr, data_link_len);
+			ptr += data_link_len;
+		}
+
+		/* This is the last fragment, set the cb bit */
+		if (hci_len == 0)
+			packet->header |= ~NFC_HCI_FRAGMENT;
+
+		skb_queue_tail(&cmd->msg_frags, skb);
+	}
+
+	mutex_lock(&hdev->msg_tx_mutex);
+	list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l);
+	mutex_unlock(&hdev->msg_tx_mutex);
+
+	queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+
+	return 0;
+
+out_skb_err:
+	skb_queue_purge(&cmd->msg_frags);
+	kfree(cmd);
+
+	return err;
+}
+
+u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe)
+{
+	int gate;
+
+	for (gate = 0; gate < NFC_HCI_MAX_GATES; gate++)
+		if (hdev->gate2pipe[gate] == pipe)
+			return gate;
+
+	return 0xff;
+}
+
+/*
+ * Receive hcp message for pipe, with type and cmd.
+ * skb contains optional message data only.
+ */
+void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
+			    u8 instruction, struct sk_buff *skb)
+{
+	switch (type) {
+	case NFC_HCI_HCP_RESPONSE:
+		nfc_hci_resp_received(hdev, instruction, skb);
+		break;
+	case NFC_HCI_HCP_COMMAND:
+		nfc_hci_cmd_received(hdev, pipe, instruction, skb);
+		break;
+	case NFC_HCI_HCP_EVENT:
+		nfc_hci_event_received(hdev, pipe, instruction, skb);
+		break;
+	default:
+		pr_err("UNKNOWN MSG Type %d, instruction=%d\n",
+		       type, instruction);
+		kfree_skb(skb);
+		break;
+	}
+}
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
new file mode 100644
index 0000000..923bdf7
--- /dev/null
+++ b/net/nfc/hci/shdlc.c
@@ -0,0 +1,945 @@
+/*
+ * Copyright (C) 2012  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
+
+#include <linux/sched.h>
+#include <linux/export.h>
+#include <linux/wait.h>
+#include <linux/crc-ccitt.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+
+#include <net/nfc/hci.h>
+#include <net/nfc/shdlc.h>
+
+#define SHDLC_LLC_HEAD_ROOM	2
+#define SHDLC_LLC_TAIL_ROOM	2
+
+#define SHDLC_MAX_WINDOW	4
+#define SHDLC_SREJ_SUPPORT	false
+
+#define SHDLC_CONTROL_HEAD_MASK	0xe0
+#define SHDLC_CONTROL_HEAD_I	0x80
+#define SHDLC_CONTROL_HEAD_I2	0xa0
+#define SHDLC_CONTROL_HEAD_S	0xc0
+#define SHDLC_CONTROL_HEAD_U	0xe0
+
+#define SHDLC_CONTROL_NS_MASK	0x38
+#define SHDLC_CONTROL_NR_MASK	0x07
+#define SHDLC_CONTROL_TYPE_MASK	0x18
+
+#define SHDLC_CONTROL_M_MASK	0x1f
+
+enum sframe_type {
+	S_FRAME_RR = 0x00,
+	S_FRAME_REJ = 0x01,
+	S_FRAME_RNR = 0x02,
+	S_FRAME_SREJ = 0x03
+};
+
+enum uframe_modifier {
+	U_FRAME_UA = 0x06,
+	U_FRAME_RSET = 0x19
+};
+
+#define SHDLC_CONNECT_VALUE_MS	5
+#define SHDLC_T1_VALUE_MS(w)	((5 * w) / 4)
+#define SHDLC_T2_VALUE_MS	300
+
+#define SHDLC_DUMP_SKB(info, skb)				  \
+do {								  \
+	pr_debug("%s:\n", info);				  \
+	print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
+		       16, 1, skb->data, skb->len, 0);		  \
+} while (0)
+
+/* checks x < y <= z modulo 8 */
+static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
+{
+	if (x < z)
+		return ((x < y) && (y <= z)) ? true : false;
+	else
+		return ((y > x) || (y <= z)) ? true : false;
+}
+
+/* checks x <= y < z modulo 8 */
+static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
+{
+	if (x <= z)
+		return ((x <= y) && (y < z)) ? true : false;
+	else			/* x > z -> z+8 > x */
+		return ((y >= x) || (y < z)) ? true : false;
+}
+
+static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc,
+					   int payload_len)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM +
+			shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM +
+			payload_len, GFP_KERNEL);
+	if (skb)
+		skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM);
+
+	return skb;
+}
+
+static void nfc_shdlc_add_len_crc(struct sk_buff *skb)
+{
+	u16 crc;
+	int len;
+
+	len = skb->len + 2;
+	*skb_push(skb, 1) = len;
+
+	crc = crc_ccitt(0xffff, skb->data, skb->len);
+	crc = ~crc;
+	*skb_put(skb, 1) = crc & 0xff;
+	*skb_put(skb, 1) = crc >> 8;
+}
+
+/* immediately sends an S frame. */
+static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
+				  enum sframe_type sframe_type, int nr)
+{
+	int r;
+	struct sk_buff *skb;
+
+	pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
+
+	skb = nfc_shdlc_alloc_skb(shdlc, 0);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	*skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
+
+	nfc_shdlc_add_len_crc(skb);
+
+	r = shdlc->ops->xmit(shdlc, skb);
+
+	kfree_skb(skb);
+
+	return r;
+}
+
+/* immediately sends an U frame. skb may contain optional payload */
+static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
+				  struct sk_buff *skb,
+				  enum uframe_modifier uframe_modifier)
+{
+	int r;
+
+	pr_debug("uframe_modifier=%d\n", uframe_modifier);
+
+	*skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
+
+	nfc_shdlc_add_len_crc(skb);
+
+	r = shdlc->ops->xmit(shdlc, skb);
+
+	kfree_skb(skb);
+
+	return r;
+}
+
+/*
+ * Free ack_pending frames until y_nr - 1, and reset t2 according to
+ * the remaining oldest ack_pending frame sent time
+ */
+static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr)
+{
+	struct sk_buff *skb;
+	int dnr = shdlc->dnr;	/* MUST initially be < y_nr */
+
+	pr_debug("release ack pending up to frame %d excluded\n", y_nr);
+
+	while (dnr != y_nr) {
+		pr_debug("release ack pending frame %d\n", dnr);
+
+		skb = skb_dequeue(&shdlc->ack_pending_q);
+		kfree_skb(skb);
+
+		dnr = (dnr + 1) % 8;
+	}
+
+	if (skb_queue_empty(&shdlc->ack_pending_q)) {
+		if (shdlc->t2_active) {
+			del_timer_sync(&shdlc->t2_timer);
+			shdlc->t2_active = false;
+
+			pr_debug
+			    ("All sent frames acked. Stopped T2(retransmit)\n");
+		}
+	} else {
+		skb = skb_peek(&shdlc->ack_pending_q);
+
+		mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
+			  msecs_to_jiffies(SHDLC_T2_VALUE_MS));
+		shdlc->t2_active = true;
+
+		pr_debug
+		    ("Start T2(retransmit) for remaining unacked sent frames\n");
+	}
+}
+
+/*
+ * Receive validated frames from lower layer. skb contains HCI payload only.
+ * Handle according to algorithm at spec:10.8.2
+ */
+static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc,
+				  struct sk_buff *skb, int ns, int nr)
+{
+	int x_ns = ns;
+	int y_nr = nr;
+
+	pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
+
+	if (shdlc->state != SHDLC_CONNECTED)
+		goto exit;
+
+	if (x_ns != shdlc->nr) {
+		nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
+		goto exit;
+	}
+
+	if (shdlc->t1_active == false) {
+		shdlc->t1_active = true;
+		mod_timer(&shdlc->t1_timer,
+			  msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
+		pr_debug("(re)Start T1(send ack)\n");
+	}
+
+	if (skb->len) {
+		nfc_hci_recv_frame(shdlc->hdev, skb);
+		skb = NULL;
+	}
+
+	shdlc->nr = (shdlc->nr + 1) % 8;
+
+	if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+		nfc_shdlc_reset_t2(shdlc, y_nr);
+
+		shdlc->dnr = y_nr;
+	}
+
+exit:
+	if (skb)
+		kfree_skb(skb);
+}
+
+static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr)
+{
+	pr_debug("remote acked up to frame %d excluded\n", y_nr);
+
+	if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+		nfc_shdlc_reset_t2(shdlc, y_nr);
+		shdlc->dnr = y_nr;
+	}
+}
+
+static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc)
+{
+	struct sk_buff *skb;
+
+	pr_debug("ns reset to %d\n", shdlc->dnr);
+
+	while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
+		skb_pull(skb, 2);	/* remove len+control */
+		skb_trim(skb, skb->len - 2);	/* remove crc */
+		skb_queue_head(&shdlc->send_q, skb);
+	}
+	shdlc->ns = shdlc->dnr;
+}
+
+static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr)
+{
+	struct sk_buff *skb;
+
+	pr_debug("remote asks retransmition from frame %d\n", y_nr);
+
+	if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
+		if (shdlc->t2_active) {
+			del_timer_sync(&shdlc->t2_timer);
+			shdlc->t2_active = false;
+			pr_debug("Stopped T2(retransmit)\n");
+		}
+
+		if (shdlc->dnr != y_nr) {
+			while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
+				skb = skb_dequeue(&shdlc->ack_pending_q);
+				kfree_skb(skb);
+			}
+		}
+
+		nfc_shdlc_requeue_ack_pending(shdlc);
+	}
+}
+
+/* See spec RR:10.8.3 REJ:10.8.4 */
+static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
+				  enum sframe_type s_frame_type, int nr)
+{
+	struct sk_buff *skb;
+
+	if (shdlc->state != SHDLC_CONNECTED)
+		return;
+
+	switch (s_frame_type) {
+	case S_FRAME_RR:
+		nfc_shdlc_rcv_ack(shdlc, nr);
+		if (shdlc->rnr == true) {	/* see SHDLC 10.7.7 */
+			shdlc->rnr = false;
+			if (shdlc->send_q.qlen == 0) {
+				skb = nfc_shdlc_alloc_skb(shdlc, 0);
+				if (skb)
+					skb_queue_tail(&shdlc->send_q, skb);
+			}
+		}
+		break;
+	case S_FRAME_REJ:
+		nfc_shdlc_rcv_rej(shdlc, nr);
+		break;
+	case S_FRAME_RNR:
+		nfc_shdlc_rcv_ack(shdlc, nr);
+		shdlc->rnr = true;
+		break;
+	default:
+		break;
+	}
+}
+
+static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
+{
+	pr_debug("result=%d\n", r);
+
+	del_timer_sync(&shdlc->connect_timer);
+
+	if (r == 0) {
+		shdlc->ns = 0;
+		shdlc->nr = 0;
+		shdlc->dnr = 0;
+
+		shdlc->state = SHDLC_CONNECTED;
+	} else {
+		shdlc->state = SHDLC_DISCONNECTED;
+
+		/*
+		 * TODO: Could it be possible that there are pending
+		 * executing commands that are waiting for connect to complete
+		 * before they can be carried? As connect is a blocking
+		 * operation, it would require that the userspace process can
+		 * send commands on the same device from a second thread before
+		 * the device is up. I don't think that is possible, is it?
+		 */
+	}
+
+	shdlc->connect_result = r;
+
+	wake_up(shdlc->connect_wq);
+}
+
+static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc)
+{
+	struct sk_buff *skb;
+
+	pr_debug("\n");
+
+	skb = nfc_shdlc_alloc_skb(shdlc, 2);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = SHDLC_MAX_WINDOW;
+	*skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
+
+	return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
+}
+
+static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc)
+{
+	struct sk_buff *skb;
+
+	pr_debug("\n");
+
+	skb = nfc_shdlc_alloc_skb(shdlc, 0);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
+}
+
+static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
+				  struct sk_buff *skb,
+				  enum uframe_modifier u_frame_modifier)
+{
+	u8 w = SHDLC_MAX_WINDOW;
+	bool srej_support = SHDLC_SREJ_SUPPORT;
+	int r;
+
+	pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
+
+	switch (u_frame_modifier) {
+	case U_FRAME_RSET:
+		if (shdlc->state == SHDLC_NEGOCIATING) {
+			/* we sent RSET, but chip wants to negociate */
+			if (skb->len > 0)
+				w = skb->data[0];
+
+			if (skb->len > 1)
+				srej_support = skb->data[1] & 0x01 ? true :
+					       false;
+
+			if ((w <= SHDLC_MAX_WINDOW) &&
+			    (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
+				shdlc->w = w;
+				shdlc->srej_support = srej_support;
+				r = nfc_shdlc_connect_send_ua(shdlc);
+				nfc_shdlc_connect_complete(shdlc, r);
+			}
+		} else if (shdlc->state > SHDLC_NEGOCIATING) {
+			/*
+			 * TODO: Chip wants to reset link
+			 * send ua, empty skb lists, reset counters
+			 * propagate info to HCI layer
+			 */
+		}
+		break;
+	case U_FRAME_UA:
+		if ((shdlc->state == SHDLC_CONNECTING &&
+		     shdlc->connect_tries > 0) ||
+		    (shdlc->state == SHDLC_NEGOCIATING))
+			nfc_shdlc_connect_complete(shdlc, 0);
+		break;
+	default:
+		break;
+	}
+
+	kfree_skb(skb);
+}
+
+static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
+{
+	struct sk_buff *skb;
+	u8 control;
+	int nr;
+	int ns;
+	enum sframe_type s_frame_type;
+	enum uframe_modifier u_frame_modifier;
+
+	if (shdlc->rcv_q.qlen)
+		pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
+
+	while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
+		control = skb->data[0];
+		skb_pull(skb, 1);
+		switch (control & SHDLC_CONTROL_HEAD_MASK) {
+		case SHDLC_CONTROL_HEAD_I:
+		case SHDLC_CONTROL_HEAD_I2:
+			ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
+			nr = control & SHDLC_CONTROL_NR_MASK;
+			nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
+			break;
+		case SHDLC_CONTROL_HEAD_S:
+			s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
+			nr = control & SHDLC_CONTROL_NR_MASK;
+			nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
+			kfree_skb(skb);
+			break;
+		case SHDLC_CONTROL_HEAD_U:
+			u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
+			nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
+			break;
+		default:
+			pr_err("UNKNOWN Control=%d\n", control);
+			kfree_skb(skb);
+			break;
+		}
+	}
+}
+
+static int nfc_shdlc_w_used(int ns, int dnr)
+{
+	int unack_count;
+
+	if (dnr <= ns)
+		unack_count = ns - dnr;
+	else
+		unack_count = 8 - dnr + ns;
+
+	return unack_count;
+}
+
+/* Send frames according to algorithm at spec:10.8.1 */
+static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
+{
+	struct sk_buff *skb;
+	int r;
+	unsigned long time_sent;
+
+	if (shdlc->send_q.qlen)
+		pr_debug
+		    ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
+		     shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
+		     shdlc->rnr == false ? "false" : "true",
+		     shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr),
+		     shdlc->ack_pending_q.qlen);
+
+	while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
+	       (shdlc->rnr == false)) {
+
+		if (shdlc->t1_active) {
+			del_timer_sync(&shdlc->t1_timer);
+			shdlc->t1_active = false;
+			pr_debug("Stopped T1(send ack)\n");
+		}
+
+		skb = skb_dequeue(&shdlc->send_q);
+
+		*skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
+				    shdlc->nr;
+
+		pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
+			 shdlc->nr);
+	/*	SHDLC_DUMP_SKB("shdlc frame written", skb); */
+
+		nfc_shdlc_add_len_crc(skb);
+
+		r = shdlc->ops->xmit(shdlc, skb);
+		if (r < 0) {
+			/*
+			 * TODO: Cannot send, shdlc machine is dead, we
+			 * must propagate the information up to HCI.
+			 */
+			shdlc->hard_fault = r;
+			break;
+		}
+
+		shdlc->ns = (shdlc->ns + 1) % 8;
+
+		time_sent = jiffies;
+		*(unsigned long *)skb->cb = time_sent;
+
+		skb_queue_tail(&shdlc->ack_pending_q, skb);
+
+		if (shdlc->t2_active == false) {
+			shdlc->t2_active = true;
+			mod_timer(&shdlc->t2_timer, time_sent +
+				  msecs_to_jiffies(SHDLC_T2_VALUE_MS));
+			pr_debug("Started T2 (retransmit)\n");
+		}
+	}
+}
+
+static void nfc_shdlc_connect_timeout(unsigned long data)
+{
+	struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+
+	pr_debug("\n");
+
+	queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+
+static void nfc_shdlc_t1_timeout(unsigned long data)
+{
+	struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+
+	pr_debug("SoftIRQ: need to send ack\n");
+
+	queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+
+static void nfc_shdlc_t2_timeout(unsigned long data)
+{
+	struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
+
+	pr_debug("SoftIRQ: need to retransmit\n");
+
+	queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+
+static void nfc_shdlc_sm_work(struct work_struct *work)
+{
+	struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work);
+	int r;
+
+	pr_debug("\n");
+
+	mutex_lock(&shdlc->state_mutex);
+
+	switch (shdlc->state) {
+	case SHDLC_DISCONNECTED:
+		skb_queue_purge(&shdlc->rcv_q);
+		skb_queue_purge(&shdlc->send_q);
+		skb_queue_purge(&shdlc->ack_pending_q);
+		break;
+	case SHDLC_CONNECTING:
+		if (shdlc->connect_tries++ < 5)
+			r = nfc_shdlc_connect_initiate(shdlc);
+		else
+			r = -ETIME;
+		if (r < 0)
+			nfc_shdlc_connect_complete(shdlc, r);
+		else {
+			mod_timer(&shdlc->connect_timer, jiffies +
+				  msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
+
+			shdlc->state = SHDLC_NEGOCIATING;
+		}
+		break;
+	case SHDLC_NEGOCIATING:
+		if (timer_pending(&shdlc->connect_timer) == 0) {
+			shdlc->state = SHDLC_CONNECTING;
+			queue_work(shdlc->sm_wq, &shdlc->sm_work);
+		}
+
+		nfc_shdlc_handle_rcv_queue(shdlc);
+		break;
+	case SHDLC_CONNECTED:
+		nfc_shdlc_handle_rcv_queue(shdlc);
+		nfc_shdlc_handle_send_queue(shdlc);
+
+		if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
+			pr_debug
+			    ("Handle T1(send ack) elapsed (T1 now inactive)\n");
+
+			shdlc->t1_active = false;
+			r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
+						   shdlc->nr);
+			if (r < 0)
+				shdlc->hard_fault = r;
+		}
+
+		if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
+			pr_debug
+			    ("Handle T2(retransmit) elapsed (T2 inactive)\n");
+
+			shdlc->t2_active = false;
+
+			nfc_shdlc_requeue_ack_pending(shdlc);
+			nfc_shdlc_handle_send_queue(shdlc);
+		}
+
+		if (shdlc->hard_fault) {
+			/*
+			 * TODO: Handle hard_fault that occured during
+			 * this invocation of the shdlc worker
+			 */
+		}
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&shdlc->state_mutex);
+}
+
+/*
+ * Called from syscall context to establish shdlc link. Sleeps until
+ * link is ready or failure.
+ */
+static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
+
+	pr_debug("\n");
+
+	mutex_lock(&shdlc->state_mutex);
+
+	shdlc->state = SHDLC_CONNECTING;
+	shdlc->connect_wq = &connect_wq;
+	shdlc->connect_tries = 0;
+	shdlc->connect_result = 1;
+
+	mutex_unlock(&shdlc->state_mutex);
+
+	queue_work(shdlc->sm_wq, &shdlc->sm_work);
+
+	wait_event(connect_wq, shdlc->connect_result != 1);
+
+	return shdlc->connect_result;
+}
+
+static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
+{
+	pr_debug("\n");
+
+	mutex_lock(&shdlc->state_mutex);
+
+	shdlc->state = SHDLC_DISCONNECTED;
+
+	mutex_unlock(&shdlc->state_mutex);
+
+	queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+
+/*
+ * Receive an incoming shdlc frame. Frame has already been crc-validated.
+ * skb contains only LLC header and payload.
+ * If skb == NULL, it is a notification that the link below is dead.
+ */
+void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb)
+{
+	if (skb == NULL) {
+		pr_err("NULL Frame -> link is dead\n");
+		shdlc->hard_fault = -EREMOTEIO;
+	} else {
+		SHDLC_DUMP_SKB("incoming frame", skb);
+		skb_queue_tail(&shdlc->rcv_q, skb);
+	}
+
+	queue_work(shdlc->sm_wq, &shdlc->sm_work);
+}
+EXPORT_SYMBOL(nfc_shdlc_recv_frame);
+
+static int nfc_shdlc_open(struct nfc_hci_dev *hdev)
+{
+	struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+	int r;
+
+	pr_debug("\n");
+
+	if (shdlc->ops->open) {
+		r = shdlc->ops->open(shdlc);
+		if (r < 0)
+			return r;
+	}
+
+	r = nfc_shdlc_connect(shdlc);
+	if (r < 0 && shdlc->ops->close)
+		shdlc->ops->close(shdlc);
+
+	return r;
+}
+
+static void nfc_shdlc_close(struct nfc_hci_dev *hdev)
+{
+	struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+	pr_debug("\n");
+
+	nfc_shdlc_disconnect(shdlc);
+
+	if (shdlc->ops->close)
+		shdlc->ops->close(shdlc);
+}
+
+static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev)
+{
+	struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+	int r = 0;
+
+	pr_debug("\n");
+
+	if (shdlc->ops->hci_ready)
+		r = shdlc->ops->hci_ready(shdlc);
+
+	return r;
+}
+
+static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+	struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+	SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb);
+
+	skb_queue_tail(&shdlc->send_q, skb);
+
+	queue_work(shdlc->sm_wq, &shdlc->sm_work);
+
+	return 0;
+}
+
+static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols)
+{
+	struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+	pr_debug("\n");
+
+	if (shdlc->ops->start_poll)
+		return shdlc->ops->start_poll(shdlc, protocols);
+
+	return 0;
+}
+
+static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
+				      struct nfc_target *target)
+{
+	struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+	if (shdlc->ops->target_from_gate)
+		return shdlc->ops->target_from_gate(shdlc, gate, target);
+
+	return -EPERM;
+}
+
+static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev,
+						u8 gate,
+						struct nfc_target *target)
+{
+	struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+	pr_debug("\n");
+
+	if (shdlc->ops->complete_target_discovered)
+		return shdlc->ops->complete_target_discovered(shdlc, gate,
+							      target);
+
+	return 0;
+}
+
+static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
+				   struct nfc_target *target,
+				   struct sk_buff *skb,
+				   struct sk_buff **res_skb)
+{
+	struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
+
+	if (shdlc->ops->data_exchange)
+		return shdlc->ops->data_exchange(shdlc, target, skb, res_skb);
+
+	return -EPERM;
+}
+
+static struct nfc_hci_ops shdlc_ops = {
+	.open = nfc_shdlc_open,
+	.close = nfc_shdlc_close,
+	.hci_ready = nfc_shdlc_hci_ready,
+	.xmit = nfc_shdlc_xmit,
+	.start_poll = nfc_shdlc_start_poll,
+	.target_from_gate = nfc_shdlc_target_from_gate,
+	.complete_target_discovered = nfc_shdlc_complete_target_discovered,
+	.data_exchange = nfc_shdlc_data_exchange,
+};
+
+struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
+				     struct nfc_hci_init_data *init_data,
+				     u32 protocols,
+				     int tx_headroom, int tx_tailroom,
+				     int max_link_payload, const char *devname)
+{
+	struct nfc_shdlc *shdlc;
+	int r;
+	char name[32];
+
+	if (ops->xmit == NULL)
+		return NULL;
+
+	shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL);
+	if (shdlc == NULL)
+		return NULL;
+
+	mutex_init(&shdlc->state_mutex);
+	shdlc->ops = ops;
+	shdlc->state = SHDLC_DISCONNECTED;
+
+	init_timer(&shdlc->connect_timer);
+	shdlc->connect_timer.data = (unsigned long)shdlc;
+	shdlc->connect_timer.function = nfc_shdlc_connect_timeout;
+
+	init_timer(&shdlc->t1_timer);
+	shdlc->t1_timer.data = (unsigned long)shdlc;
+	shdlc->t1_timer.function = nfc_shdlc_t1_timeout;
+
+	init_timer(&shdlc->t2_timer);
+	shdlc->t2_timer.data = (unsigned long)shdlc;
+	shdlc->t2_timer.function = nfc_shdlc_t2_timeout;
+
+	shdlc->w = SHDLC_MAX_WINDOW;
+	shdlc->srej_support = SHDLC_SREJ_SUPPORT;
+
+	skb_queue_head_init(&shdlc->rcv_q);
+	skb_queue_head_init(&shdlc->send_q);
+	skb_queue_head_init(&shdlc->ack_pending_q);
+
+	INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work);
+	snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname);
+	shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
+				       WQ_MEM_RECLAIM, 1);
+	if (shdlc->sm_wq == NULL)
+		goto err_allocwq;
+
+	shdlc->client_headroom = tx_headroom;
+	shdlc->client_tailroom = tx_tailroom;
+
+	shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols,
+					      tx_headroom + SHDLC_LLC_HEAD_ROOM,
+					      tx_tailroom + SHDLC_LLC_TAIL_ROOM,
+					      max_link_payload);
+	if (shdlc->hdev == NULL)
+		goto err_allocdev;
+
+	nfc_hci_set_clientdata(shdlc->hdev, shdlc);
+
+	r = nfc_hci_register_device(shdlc->hdev);
+	if (r < 0)
+		goto err_regdev;
+
+	return shdlc;
+
+err_regdev:
+	nfc_hci_free_device(shdlc->hdev);
+
+err_allocdev:
+	destroy_workqueue(shdlc->sm_wq);
+
+err_allocwq:
+	kfree(shdlc);
+
+	return NULL;
+}
+EXPORT_SYMBOL(nfc_shdlc_allocate);
+
+void nfc_shdlc_free(struct nfc_shdlc *shdlc)
+{
+	pr_debug("\n");
+
+	/* TODO: Check that this cannot be called while still in use */
+
+	nfc_hci_unregister_device(shdlc->hdev);
+	nfc_hci_free_device(shdlc->hdev);
+
+	destroy_workqueue(shdlc->sm_wq);
+
+	skb_queue_purge(&shdlc->rcv_q);
+	skb_queue_purge(&shdlc->send_q);
+	skb_queue_purge(&shdlc->ack_pending_q);
+
+	kfree(shdlc);
+}
+EXPORT_SYMBOL(nfc_shdlc_free);
+
+void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata)
+{
+	pr_debug("\n");
+
+	shdlc->clientdata = clientdata;
+}
+EXPORT_SYMBOL(nfc_shdlc_set_clientdata);
+
+void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc)
+{
+	return shdlc->clientdata;
+}
+EXPORT_SYMBOL(nfc_shdlc_get_clientdata);
+
+struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc)
+{
+	return shdlc->hdev;
+}
+EXPORT_SYMBOL(nfc_shdlc_get_hci_dev);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index ef10ffc..11a3b7d 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -102,7 +102,7 @@
 	length = llcp_tlv_length[type];
 	if (length == 0 && value_length == 0)
 		return NULL;
-	else
+	else if (length == 0)
 		length = value_length;
 
 	*tlv_length = 2 + length;
@@ -248,7 +248,7 @@
 
 	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
 
-	skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC);
+	skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC);
 
 	skb_queue_tail(&local->tx_queue, skb);
 
@@ -416,7 +416,7 @@
 
 	skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
 
-	skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM);
+	skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM);
 
 	memcpy(skb_put(skb, 1), &reason, 1);
 
@@ -522,7 +522,7 @@
 
 	skb_put(skb, LLCP_SEQUENCE_SIZE);
 
-	skb->data[2] = sock->recv_n % 16;
+	skb->data[2] = sock->recv_n;
 
 	skb_queue_head(&local->tx_queue, skb);
 
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 17a578f..92988aa 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -307,6 +307,8 @@
 	u8 *gb_cur, *version_tlv, version, version_length;
 	u8 *lto_tlv, lto, lto_length;
 	u8 *wks_tlv, wks_length;
+	u8 *miux_tlv, miux_length;
+	__be16 miux;
 	u8 gb_len = 0;
 
 	version = LLCP_VERSION_11;
@@ -316,7 +318,7 @@
 
 	/* 1500 ms */
 	lto = 150;
-	lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &lto, 1, &lto_length);
+	lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &lto, 1, &lto_length);
 	gb_len += lto_length;
 
 	pr_debug("Local wks 0x%lx\n", local->local_wks);
@@ -324,6 +326,11 @@
 				     &wks_length);
 	gb_len += wks_length;
 
+	miux = cpu_to_be16(LLCP_MAX_MIUX);
+	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+				      &miux_length);
+	gb_len += miux_length;
+
 	gb_len += ARRAY_SIZE(llcp_magic);
 
 	if (gb_len > NFC_MAX_GT_LEN) {
@@ -345,6 +352,9 @@
 	memcpy(gb_cur, wks_tlv, wks_length);
 	gb_cur += wks_length;
 
+	memcpy(gb_cur, miux_tlv, miux_length);
+	gb_cur += miux_length;
+
 	kfree(version_tlv);
 	kfree(lto_tlv);
 
@@ -388,6 +398,9 @@
 	skb = skb_dequeue(&local->tx_queue);
 	if (skb != NULL) {
 		pr_debug("Sending pending skb\n");
+		print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET,
+			       16, 1, skb->data, skb->len, true);
+
 		nfc_data_exchange(local->dev, local->target_idx,
 				  skb, nfc_llcp_recv, local);
 	} else {
@@ -425,7 +438,7 @@
 
 static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
 {
-	pdu->data[2] = (sock->send_n << 4) | (sock->recv_n % 16);
+	pdu->data[2] = (sock->send_n << 4) | (sock->recv_n);
 	sock->send_n = (sock->send_n + 1) % 16;
 	sock->recv_ack_n = (sock->recv_n - 1) % 16;
 }
@@ -814,6 +827,10 @@
 
 	pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
 
+	if (ptype != LLCP_PDU_SYMM)
+		print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET,
+			       16, 1, skb->data, skb->len, true);
+
 	switch (ptype) {
 	case LLCP_PDU_SYMM:
 		pr_debug("SYMM\n");
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 9ec065b..8737c20 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -477,7 +477,7 @@
 	}
 
 	if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
-		param.rf_discovery_id = target->idx;
+		param.rf_discovery_id = target->logical_idx;
 
 		if (protocol == NFC_PROTO_JEWEL)
 			param.rf_protocol = NCI_RF_PROTOCOL_T1T;
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 2e3dee4..99e1632 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -227,7 +227,7 @@
 
 	for (i = 0; i < ndev->n_targets; i++) {
 		target = &ndev->targets[i];
-		if (target->idx == ntf->rf_discovery_id) {
+		if (target->logical_idx == ntf->rf_discovery_id) {
 			/* This target already exists, add the new protocol */
 			nci_add_new_protocol(ndev, target, ntf->rf_protocol,
 					     ntf->rf_tech_and_mode,
@@ -248,10 +248,10 @@
 				  ntf->rf_tech_and_mode,
 				  &ntf->rf_tech_specific_params);
 	if (!rc) {
-		target->idx = ntf->rf_discovery_id;
+		target->logical_idx = ntf->rf_discovery_id;
 		ndev->n_targets++;
 
-		pr_debug("target_idx %d, n_targets %d\n", target->idx,
+		pr_debug("logical idx %d, n_targets %d\n", target->logical_idx,
 			 ndev->n_targets);
 	}
 }
@@ -372,10 +372,11 @@
 	if (rc)
 		return;
 
-	target->idx = ntf->rf_discovery_id;
+	target->logical_idx = ntf->rf_discovery_id;
 	ndev->n_targets++;
 
-	pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets);
+	pr_debug("logical idx %d, n_targets %d\n",
+		 target->logical_idx, ndev->n_targets);
 
 	nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets);
 }
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 6404052..f1829f6 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -63,19 +63,23 @@
 
 	genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
 
-	NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
-	NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols);
-	NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
-	NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
-	if (target->nfcid1_len > 0)
-		NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
-			target->nfcid1);
-	if (target->sensb_res_len > 0)
-		NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
-			target->sensb_res);
-	if (target->sensf_res_len > 0)
-		NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
-			target->sensf_res);
+	if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) ||
+	    nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) ||
+	    nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) ||
+	    nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res))
+		goto nla_put_failure;
+	if (target->nfcid1_len > 0 &&
+	    nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
+		    target->nfcid1))
+		goto nla_put_failure;
+	if (target->sensb_res_len > 0 &&
+	    nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
+		    target->sensb_res))
+		goto nla_put_failure;
+	if (target->sensf_res_len > 0 &&
+	    nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
+		    target->sensf_res))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -170,7 +174,8 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -183,6 +188,37 @@
 	return -EMSGSIZE;
 }
 
+int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
+{
+	struct sk_buff *msg;
+	void *hdr;
+
+	msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!msg)
+		return -ENOMEM;
+
+	hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+			  NFC_EVENT_TARGET_LOST);
+	if (!hdr)
+		goto free_msg;
+
+	if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+	    nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+
+	genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+free_msg:
+	nlmsg_free(msg);
+	return -EMSGSIZE;
+}
+
 int nfc_genl_device_added(struct nfc_dev *dev)
 {
 	struct sk_buff *msg;
@@ -197,10 +233,11 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-	NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
-	NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
+	if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+	    nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+	    nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
+	    nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -229,7 +266,8 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -259,10 +297,11 @@
 	if (cb)
 		genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
 
-	NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-	NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
-	NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up);
+	if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
+	    nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+	    nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
+	    nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -339,11 +378,14 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
-	if (rf_mode == NFC_RF_INITIATOR)
-		NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx);
-	NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode);
-	NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode);
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
+	if (rf_mode == NFC_RF_INITIATOR &&
+	    nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
+		goto nla_put_failure;
+	if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) ||
+	    nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -376,7 +418,8 @@
 	if (!hdr)
 		goto free_msg;
 
-	NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
+	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index ec8794c..7d589a8 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -119,6 +119,7 @@
 void nfc_genl_data_exit(struct nfc_genl_data *genl_data);
 
 int nfc_genl_targets_found(struct nfc_dev *dev);
+int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx);
 
 int nfc_genl_device_added(struct nfc_dev *dev);
 int nfc_genl_device_removed(struct nfc_dev *dev);
@@ -127,7 +128,7 @@
 			       u8 comm_mode, u8 rf_mode);
 int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
 
-struct nfc_dev *nfc_get_device(unsigned idx);
+struct nfc_dev *nfc_get_device(unsigned int idx);
 
 static inline void nfc_put_device(struct nfc_dev *dev)
 {
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 5a839ce..ec1134c 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -92,6 +92,12 @@
 		goto error;
 	}
 
+	if (addr->target_idx > dev->target_next_idx - 1 ||
+	    addr->target_idx < dev->target_next_idx - dev->n_targets) {
+		rc = -EINVAL;
+		goto error;
+	}
+
 	rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
 	if (rc)
 		goto put_dev;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e44e631..2c74daa 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -321,7 +321,7 @@
 			return -ENOMEM;
 
 		nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
-		if (!skb)
+		if (!nskb)
 			return -ENOMEM;
 
 		nskb->vlan_tci = 0;
@@ -421,6 +421,19 @@
 	return validate_actions(actions, key, depth + 1);
 }
 
+static int validate_tp_port(const struct sw_flow_key *flow_key)
+{
+	if (flow_key->eth.type == htons(ETH_P_IP)) {
+		if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
+			return 0;
+	} else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
+		if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
 static int validate_set(const struct nlattr *a,
 			const struct sw_flow_key *flow_key)
 {
@@ -462,18 +475,13 @@
 		if (flow_key->ip.proto != IPPROTO_TCP)
 			return -EINVAL;
 
-		if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
-			return -EINVAL;
-
-		break;
+		return validate_tp_port(flow_key);
 
 	case OVS_KEY_ATTR_UDP:
 		if (flow_key->ip.proto != IPPROTO_UDP)
 			return -EINVAL;
 
-		if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
-			return -EINVAL;
-		break;
+		return validate_tp_port(flow_key);
 
 	default:
 		return -EINVAL;
@@ -778,15 +786,18 @@
 	tcp_flags = flow->tcp_flags;
 	spin_unlock_bh(&flow->lock);
 
-	if (used)
-		NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
+	if (used &&
+	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
+		goto nla_put_failure;
 
-	if (stats.n_packets)
-		NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
-			sizeof(struct ovs_flow_stats), &stats);
+	if (stats.n_packets &&
+	    nla_put(skb, OVS_FLOW_ATTR_STATS,
+		    sizeof(struct ovs_flow_stats), &stats))
+		goto nla_put_failure;
 
-	if (tcp_flags)
-		NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+	if (tcp_flags &&
+	    nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
+		goto nla_put_failure;
 
 	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
 	 * this is the first flow to be dumped into 'skb'.  This is unusual for
@@ -1168,7 +1179,8 @@
 		goto nla_put_failure;
 
 	get_dp_stats(dp, &dp_stats);
-	NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
+	if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
+		goto nla_put_failure;
 
 	return genlmsg_end(skb, ovs_header);
 
@@ -1468,14 +1480,16 @@
 
 	ovs_header->dp_ifindex = get_dpifindex(vport->dp);
 
-	NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
-	NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
-	NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
-	NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
+	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
+	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
+	    nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
+	    nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
+		goto nla_put_failure;
 
 	ovs_vport_get_stats(vport, &vport_stats);
-	NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
-		&vport_stats);
+	if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+		    &vport_stats))
+		goto nla_put_failure;
 
 	err = ovs_vport_get_options(vport, skb);
 	if (err == -EMSGSIZE)
@@ -1641,10 +1655,9 @@
 	reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
 					 OVS_VPORT_CMD_NEW);
 	if (IS_ERR(reply)) {
-		err = PTR_ERR(reply);
 		netlink_set_err(init_net.genl_sock, 0,
-				ovs_dp_vport_multicast_group.id, err);
-		return 0;
+				ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
+		goto exit_unlock;
 	}
 
 	genl_notify(reply, genl_info_net(info), info->snd_pid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1252c30..6d4d809 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -183,7 +183,8 @@
 	u8 tcp_flags = 0;
 
 	if (flow->key.eth.type == htons(ETH_P_IP) &&
-	    flow->key.ip.proto == IPPROTO_TCP) {
+	    flow->key.ip.proto == IPPROTO_TCP &&
+	    likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
 		u8 *tcp = (u8 *)tcp_hdr(skb);
 		tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
 	}
@@ -1174,11 +1175,13 @@
 	struct ovs_key_ethernet *eth_key;
 	struct nlattr *nla, *encap;
 
-	if (swkey->phy.priority)
-		NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
+	if (swkey->phy.priority &&
+	    nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
+		goto nla_put_failure;
 
-	if (swkey->phy.in_port != USHRT_MAX)
-		NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
+	if (swkey->phy.in_port != USHRT_MAX &&
+	    nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
+		goto nla_put_failure;
 
 	nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
 	if (!nla)
@@ -1188,8 +1191,9 @@
 	memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
 
 	if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
-		NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
-		NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
+		if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
+		    nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
+			goto nla_put_failure;
 		encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
 		if (!swkey->eth.tci)
 			goto unencap;
@@ -1200,7 +1204,8 @@
 	if (swkey->eth.type == htons(ETH_P_802_2))
 		goto unencap;
 
-	NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
+	if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
+		goto nla_put_failure;
 
 	if (swkey->eth.type == htons(ETH_P_IP)) {
 		struct ovs_key_ipv4 *ipv4_key;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index c1068ae..3fd6c0d 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -140,9 +140,9 @@
 	return netdev_vport->dev->ifindex;
 }
 
-static unsigned packet_length(const struct sk_buff *skb)
+static unsigned int packet_length(const struct sk_buff *skb)
 {
-	unsigned length = skb->len - ETH_HLEN;
+	unsigned int length = skb->len - ETH_HLEN;
 
 	if (skb->protocol == htons(ETH_P_8021Q))
 		length -= VLAN_HLEN;
@@ -157,9 +157,9 @@
 	int len;
 
 	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
-		if (net_ratelimit())
-			pr_warn("%s: dropped over-mtu packet: %d > %d\n",
-				ovs_dp_name(vport->dp), packet_length(skb), mtu);
+		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
+				     ovs_dp_name(vport->dp),
+				     packet_length(skb), mtu);
 		goto error;
 	}
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4f2c0df..0f66174 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1654,7 +1654,7 @@
 			skb->data = skb_head;
 			skb->len = skb_len;
 		}
-		kfree_skb(skb);
+		consume_skb(skb);
 		skb = nskb;
 	}
 
@@ -1764,7 +1764,7 @@
 		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
 				  po->tp_reserve;
 	} else {
-		unsigned maclen = skb_network_offset(skb);
+		unsigned int maclen = skb_network_offset(skb);
 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
 				       (maclen < 16 ? 16 : maclen)) +
 			po->tp_reserve;
@@ -3224,10 +3224,10 @@
 			     char __user *optval, int __user *optlen)
 {
 	int len;
-	int val;
+	int val, lv = sizeof(val);
 	struct sock *sk = sock->sk;
 	struct packet_sock *po = pkt_sk(sk);
-	void *data;
+	void *data = &val;
 	struct tpacket_stats st;
 	union tpacket_stats_u st_u;
 
@@ -3242,21 +3242,17 @@
 
 	switch (optname) {
 	case PACKET_STATISTICS:
-		if (po->tp_version == TPACKET_V3) {
-			len = sizeof(struct tpacket_stats_v3);
-		} else {
-			if (len > sizeof(struct tpacket_stats))
-				len = sizeof(struct tpacket_stats);
-		}
 		spin_lock_bh(&sk->sk_receive_queue.lock);
 		if (po->tp_version == TPACKET_V3) {
+			lv = sizeof(struct tpacket_stats_v3);
 			memcpy(&st_u.stats3, &po->stats,
-			sizeof(struct tpacket_stats));
+			       sizeof(struct tpacket_stats));
 			st_u.stats3.tp_freeze_q_cnt =
-			po->stats_u.stats3.tp_freeze_q_cnt;
+					po->stats_u.stats3.tp_freeze_q_cnt;
 			st_u.stats3.tp_packets += po->stats.tp_drops;
 			data = &st_u.stats3;
 		} else {
+			lv = sizeof(struct tpacket_stats);
 			st = po->stats;
 			st.tp_packets += st.tp_drops;
 			data = &st;
@@ -3265,31 +3261,16 @@
 		spin_unlock_bh(&sk->sk_receive_queue.lock);
 		break;
 	case PACKET_AUXDATA:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->auxdata;
-
-		data = &val;
 		break;
 	case PACKET_ORIGDEV:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->origdev;
-
-		data = &val;
 		break;
 	case PACKET_VNET_HDR:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->has_vnet_hdr;
-
-		data = &val;
 		break;
 	case PACKET_VERSION:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->tp_version;
-		data = &val;
 		break;
 	case PACKET_HDRLEN:
 		if (len > sizeof(int))
@@ -3309,39 +3290,28 @@
 		default:
 			return -EINVAL;
 		}
-		data = &val;
 		break;
 	case PACKET_RESERVE:
-		if (len > sizeof(unsigned int))
-			len = sizeof(unsigned int);
 		val = po->tp_reserve;
-		data = &val;
 		break;
 	case PACKET_LOSS:
-		if (len > sizeof(unsigned int))
-			len = sizeof(unsigned int);
 		val = po->tp_loss;
-		data = &val;
 		break;
 	case PACKET_TIMESTAMP:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = po->tp_tstamp;
-		data = &val;
 		break;
 	case PACKET_FANOUT:
-		if (len > sizeof(int))
-			len = sizeof(int);
 		val = (po->fanout ?
 		       ((u32)po->fanout->id |
 			((u32)po->fanout->type << 16)) :
 		       0);
-		data = &val;
 		break;
 	default:
 		return -ENOPROTOOPT;
 	}
 
+	if (len > lv)
+		len = lv;
 	if (put_user(len, optlen))
 		return -EFAULT;
 	if (copy_to_user(optval, data, len))
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index d65f699..779ce4f 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -129,7 +129,7 @@
 /* Phonet device header operations */
 static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
 				unsigned short type, const void *daddr,
-				const void *saddr, unsigned len)
+				const void *saddr, unsigned int len)
 {
 	u8 *media = skb_push(skb, 1);
 
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9726fe6..9dd4f92 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -273,7 +273,7 @@
 	hdr = pnp_hdr(skb);
 	if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
-				(unsigned)hdr->data[0]);
+				(unsigned int)hdr->data[0]);
 		return -EOPNOTSUPP;
 	}
 
@@ -305,7 +305,7 @@
 
 	default:
 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
-				(unsigned)hdr->data[1]);
+				(unsigned int)hdr->data[1]);
 		return -EOPNOTSUPP;
 	}
 	if (wake)
@@ -478,9 +478,9 @@
 	skb_queue_purge(&pn->ctrlreq_queue);
 }
 
-static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n)
+static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
 {
-	unsigned i;
+	unsigned int i;
 	u8 final_fc = PN_NO_FLOW_CONTROL;
 
 	for (i = 0; i < n; i++) {
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index bf5cf69..36f75a9 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -44,7 +44,7 @@
 	struct phonet_routes routes;
 };
 
-int phonet_net_id __read_mostly;
+static int phonet_net_id __read_mostly;
 
 static struct phonet_net *phonet_pernet(struct net *net)
 {
@@ -268,7 +268,7 @@
 static void phonet_route_autodel(struct net_device *dev)
 {
 	struct phonet_net *pnn = phonet_pernet(dev_net(dev));
-	unsigned i;
+	unsigned int i;
 	DECLARE_BITMAP(deleted, 64);
 
 	/* Remove left-over Phonet routes */
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d61f676..cfdf135 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -116,7 +116,8 @@
 	ifm->ifa_flags = IFA_F_PERMANENT;
 	ifm->ifa_scope = RT_SCOPE_LINK;
 	ifm->ifa_index = dev->ifindex;
-	NLA_PUT_U8(skb, IFA_LOCAL, addr);
+	if (nla_put_u8(skb, IFA_LOCAL, addr))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -183,8 +184,9 @@
 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
 	rtm->rtm_type = RTN_UNICAST;
 	rtm->rtm_flags = 0;
-	NLA_PUT_U8(skb, RTA_DST, dst);
-	NLA_PUT_U32(skb, RTA_OIF, dev->ifindex);
+	if (nla_put_u8(skb, RTA_DST, dst) ||
+	    nla_put_u32(skb, RTA_OIF, dev->ifindex))
+		goto nla_put_failure;
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 4c7eff3..89cfa9c 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -58,7 +58,7 @@
 
 void __init pn_sock_init(void)
 {
-	unsigned i;
+	unsigned int i;
 
 	for (i = 0; i < PN_HASHSIZE; i++)
 		INIT_HLIST_HEAD(pnsocks.hlist + i);
@@ -116,7 +116,7 @@
 void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
 {
 	struct hlist_head *hlist = pnsocks.hlist;
-	unsigned h;
+	unsigned int h;
 
 	rcu_read_lock();
 	for (h = 0; h < PN_HASHSIZE; h++) {
@@ -545,7 +545,7 @@
 	struct hlist_head *hlist = pnsocks.hlist;
 	struct hlist_node *node;
 	struct sock *sknode;
-	unsigned h;
+	unsigned int h;
 
 	for (h = 0; h < PN_HASHSIZE; h++) {
 		sk_for_each_rcu(sknode, node, hlist) {
@@ -710,7 +710,7 @@
 
 void pn_sock_unbind_all_res(struct sock *sk)
 {
-	unsigned res, match = 0;
+	unsigned int res, match = 0;
 
 	mutex_lock(&resource_mutex);
 	for (res = 0; res < 256; res++) {
@@ -732,7 +732,7 @@
 static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
 {
 	struct net *net = seq_file_net(seq);
-	unsigned i;
+	unsigned int i;
 
 	if (!net_eq(net, &init_net))
 		return NULL;
@@ -750,7 +750,7 @@
 static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
 {
 	struct net *net = seq_file_net(seq);
-	unsigned i;
+	unsigned int i;
 
 	BUG_ON(!net_eq(net, &init_net));
 
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index cea1c7d..696348f 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -27,6 +27,10 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 
+#include <net/sock.h>
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+
 #define DYNAMIC_PORT_MIN	0x40
 #define DYNAMIC_PORT_MAX	0x7f
 
@@ -46,7 +50,8 @@
 
 void phonet_get_local_port_range(int *min, int *max)
 {
-	unsigned seq;
+	unsigned int seq;
+
 	do {
 		seq = read_seqbegin(&local_port_range_lock);
 		if (min)
@@ -93,19 +98,13 @@
 	{ }
 };
 
-static struct ctl_path phonet_ctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "phonet", },
-	{ },
-};
-
 int __init phonet_sysctl_init(void)
 {
-	phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table);
+	phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table);
 	return phonet_table_hrd == NULL ? -ENOMEM : 0;
 }
 
 void phonet_sysctl_exit(void)
 {
-	unregister_sysctl_table(phonet_table_hrd);
+	unregister_net_sysctl_table(phonet_table_hrd);
 }
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 1253b00..7e643ba 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -106,22 +106,15 @@
 	{ }
 };
 
-static struct ctl_path rds_ib_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "rds", },
-	{ .procname = "ib", },
-	{ }
-};
-
 void rds_ib_sysctl_exit(void)
 {
 	if (rds_ib_sysctl_hdr)
-		unregister_sysctl_table(rds_ib_sysctl_hdr);
+		unregister_net_sysctl_table(rds_ib_sysctl_hdr);
 }
 
 int rds_ib_sysctl_init(void)
 {
-	rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table);
+	rds_ib_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/ib", rds_ib_sysctl_table);
 	if (!rds_ib_sysctl_hdr)
 		return -ENOMEM;
 	return 0;
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index e2e4717..5d5ebd5 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -109,22 +109,15 @@
 	{ }
 };
 
-static struct ctl_path rds_iw_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "rds", },
-	{ .procname = "iw", },
-	{ }
-};
-
 void rds_iw_sysctl_exit(void)
 {
 	if (rds_iw_sysctl_hdr)
-		unregister_sysctl_table(rds_iw_sysctl_hdr);
+		unregister_net_sysctl_table(rds_iw_sysctl_hdr);
 }
 
 int rds_iw_sysctl_init(void)
 {
-	rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table);
+	rds_iw_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/iw", rds_iw_sysctl_table);
 	if (!rds_iw_sysctl_hdr)
 		return -ENOMEM;
 	return 0;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 25ad0c7..907214b 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -92,17 +92,10 @@
 	{ }
 };
 
-static struct ctl_path rds_sysctl_path[] = {
-	{ .procname = "net", },
-	{ .procname = "rds", },
-	{ }
-};
-
-
 void rds_sysctl_exit(void)
 {
 	if (rds_sysctl_reg_table)
-		unregister_sysctl_table(rds_sysctl_reg_table);
+		unregister_net_sysctl_table(rds_sysctl_reg_table);
 }
 
 int rds_sysctl_init(void)
@@ -110,7 +103,7 @@
 	rds_sysctl_reconnect_min = msecs_to_jiffies(1);
 	rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
 
-	rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table);
+	rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
 	if (!rds_sysctl_reg_table)
 		return -ENOMEM;
 	return 0;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 8b5cc4a..7298137 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -145,7 +145,7 @@
 	if (ret < 0)
 		goto out;
 
-	sock->sk->sk_reuse = 1;
+	sock->sk->sk_reuse = SK_CAN_REUSE;
 	rds_tcp_nonagle(sock);
 
 	write_lock_bh(&sock->sk->sk_callback_lock);
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 906cc05..28dbdb9 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -37,7 +37,7 @@
 
 static int rose_header(struct sk_buff *skb, struct net_device *dev,
 		       unsigned short type,
-		       const void *daddr, const void *saddr, unsigned len)
+		       const void *daddr, const void *saddr, unsigned int len)
 {
 	unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
 
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 47f1fdb..7ca5774 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -399,7 +399,7 @@
 
 	facilities_len = *p++;
 
-	if (facilities_len == 0 || (unsigned)facilities_len > packet_len)
+	if (facilities_len == 0 || (unsigned int)facilities_len > packet_len)
 		return 0;
 
 	while (facilities_len >= 3 && *p == 0x00) {
diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c
index df6d9da..94ca9c2 100644
--- a/net/rose/sysctl_net_rose.c
+++ b/net/rose/sysctl_net_rose.c
@@ -118,18 +118,12 @@
 	{ }
 };
 
-static struct ctl_path rose_path[] = {
-	{ .procname = "net", },
-	{ .procname = "rose", },
-	{ }
-};
-
 void __init rose_register_sysctl(void)
 {
-	rose_table_header = register_sysctl_paths(rose_path, rose_table);
+	rose_table_header = register_net_sysctl(&init_net, "net/rose", rose_table);
 }
 
 void rose_unregister_sysctl(void)
 {
-	unregister_sysctl_table(rose_table_header);
+	unregister_net_sysctl_table(rose_table_header);
 }
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 74c064c..05996d0 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -26,7 +26,7 @@
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_NETPROTO(PF_RXRPC);
 
-unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
+unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
 module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(debug, "RxRPC debugging mask");
 
@@ -513,7 +513,7 @@
 			    char __user *optval, unsigned int optlen)
 {
 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
-	unsigned min_sec_level;
+	unsigned int min_sec_level;
 	int ret;
 
 	_enter(",%d,%d,,%d", level, optname, optlen);
@@ -555,13 +555,13 @@
 
 		case RXRPC_MIN_SECURITY_LEVEL:
 			ret = -EINVAL;
-			if (optlen != sizeof(unsigned))
+			if (optlen != sizeof(unsigned int))
 				goto error;
 			ret = -EISCONN;
 			if (rx->sk.sk_state != RXRPC_UNCONNECTED)
 				goto error;
 			ret = get_user(min_sec_level,
-				       (unsigned __user *) optval);
+				       (unsigned int __user *) optval);
 			if (ret < 0)
 				goto error;
 			ret = -EINVAL;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index c3126e8..e4d9cbc 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -19,7 +19,7 @@
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-static unsigned rxrpc_ack_defer = 1;
+static unsigned int rxrpc_ack_defer = 1;
 
 static const char *const rxrpc_acks[] = {
 	"---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
@@ -548,11 +548,11 @@
  * process the extra information that may be appended to an ACK packet
  */
 static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
-				  unsigned latest, int nAcks)
+				  unsigned int latest, int nAcks)
 {
 	struct rxrpc_ackinfo ackinfo;
 	struct rxrpc_peer *peer;
-	unsigned mtu;
+	unsigned int mtu;
 
 	if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
 		_leave(" [no ackinfo]");
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index bf656c2..a3bbb36 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -38,8 +38,8 @@
 struct kmem_cache *rxrpc_call_jar;
 LIST_HEAD(rxrpc_calls);
 DEFINE_RWLOCK(rxrpc_call_lock);
-static unsigned rxrpc_call_max_lifetime = 60;
-static unsigned rxrpc_dead_call_timeout = 2;
+static unsigned int rxrpc_call_max_lifetime = 60;
+static unsigned int rxrpc_dead_call_timeout = 2;
 
 static void rxrpc_destroy_call(struct work_struct *work);
 static void rxrpc_call_life_expired(unsigned long _call);
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 1a2b0633..529572f 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -76,7 +76,7 @@
 		 * --ANK */
 //		ret = -ENOBUFS;
 //		if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-//		    (unsigned) sk->sk_rcvbuf)
+//		    (unsigned int) sk->sk_rcvbuf)
 //			goto out;
 
 		ret = sk_filter(sk, skb);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 8e22bd3..a693aca 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -83,7 +83,7 @@
 	struct rxrpc_call	*call;		/* call with which associated */
 	unsigned long		resend_at;	/* time in jiffies at which to resend */
 	union {
-		unsigned	offset;		/* offset into buffer of next read */
+		unsigned int	offset;		/* offset into buffer of next read */
 		int		remain;		/* amount of space remaining for next write */
 		u32		error;		/* network error code */
 		bool		need_resend;	/* T if needs resending */
@@ -176,9 +176,9 @@
 	struct list_head	error_targets;	/* targets for net error distribution */
 	spinlock_t		lock;		/* access lock */
 	atomic_t		usage;
-	unsigned		if_mtu;		/* interface MTU for this peer */
-	unsigned		mtu;		/* network MTU for this peer */
-	unsigned		maxdata;	/* data size (MTU - hdrsize) */
+	unsigned int		if_mtu;		/* interface MTU for this peer */
+	unsigned int		mtu;		/* network MTU for this peer */
+	unsigned int		maxdata;	/* data size (MTU - hdrsize) */
 	unsigned short		hdrsize;	/* header size (IP + UDP + RxRPC) */
 	int			debug_id;	/* debug ID for printks */
 	int			net_error;	/* network error distributed */
@@ -187,8 +187,8 @@
 	/* calculated RTT cache */
 #define RXRPC_RTT_CACHE_SIZE 32
 	suseconds_t		rtt;		/* current RTT estimate (in uS) */
-	unsigned		rtt_point;	/* next entry at which to insert */
-	unsigned		rtt_usage;	/* amount of cache actually used */
+	unsigned int		rtt_point;	/* next entry at which to insert */
+	unsigned int		rtt_usage;	/* amount of cache actually used */
 	suseconds_t		rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
 };
 
@@ -271,7 +271,7 @@
 	} state;
 	int			error;		/* error code for local abort */
 	int			debug_id;	/* debug ID for printks */
-	unsigned		call_counter;	/* call ID counter */
+	unsigned int		call_counter;	/* call ID counter */
 	atomic_t		serial;		/* packet serial number counter */
 	atomic_t		hi_serial;	/* highest serial number received */
 	u8			avail_calls;	/* number of calls available */
@@ -592,7 +592,7 @@
 /*
  * debug tracing
  */
-extern unsigned rxrpc_debug;
+extern unsigned int rxrpc_debug;
 
 #define dbgprintk(FMT,...) \
 	printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index ae3a035..8b1f9f4 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -82,7 +82,7 @@
  * - the caller guarantees we have at least 4 words
  */
 static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
-				       unsigned toklen)
+				       unsigned int toklen)
 {
 	struct rxrpc_key_token *token, **pptoken;
 	size_t plen;
@@ -210,10 +210,10 @@
  */
 static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
 				       const __be32 **_xdr,
-				       unsigned *_toklen)
+				       unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned toklen = *_toklen, n_parts, loop, tmp;
+	unsigned int toklen = *_toklen, n_parts, loop, tmp;
 
 	/* there must be at least one name, and at least #names+1 length
 	 * words */
@@ -286,10 +286,10 @@
 static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
 					 size_t max_data_size,
 					 const __be32 **_xdr,
-					 unsigned *_toklen)
+					 unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned toklen = *_toklen, len;
+	unsigned int toklen = *_toklen, len;
 
 	/* there must be at least one tag and one length word */
 	if (toklen <= 8)
@@ -330,11 +330,11 @@
 					  u8 max_n_elem,
 					  size_t max_elem_size,
 					  const __be32 **_xdr,
-					  unsigned *_toklen)
+					  unsigned int *_toklen)
 {
 	struct krb5_tagged_data *td;
 	const __be32 *xdr = *_xdr;
-	unsigned toklen = *_toklen, n_elem, loop;
+	unsigned int toklen = *_toklen, n_elem, loop;
 	int ret;
 
 	/* there must be at least one count */
@@ -380,10 +380,10 @@
  * extract a krb5 ticket
  */
 static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
-				    const __be32 **_xdr, unsigned *_toklen)
+				    const __be32 **_xdr, unsigned int *_toklen)
 {
 	const __be32 *xdr = *_xdr;
-	unsigned toklen = *_toklen, len;
+	unsigned int toklen = *_toklen, len;
 
 	/* there must be at least one length word */
 	if (toklen <= 4)
@@ -419,7 +419,7 @@
  * - the caller guarantees we have at least 4 words
  */
 static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr,
-				      unsigned toklen)
+				      unsigned int toklen)
 {
 	struct rxrpc_key_token *token, **pptoken;
 	struct rxk5_key *rxk5;
@@ -549,7 +549,7 @@
 {
 	const __be32 *xdr = data, *token;
 	const char *cp;
-	unsigned len, tmp, loop, ntoken, toklen, sec_ix;
+	unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
 	int ret;
 
 	_enter(",{%x,%x,%x,%x},%zu",
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 7635107..f226709 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -31,7 +31,7 @@
 #define REALM_SZ			40	/* size of principal's auth domain */
 #define SNAME_SZ			40	/* size of service name */
 
-unsigned rxrpc_debug;
+unsigned int rxrpc_debug;
 module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(debug, "rxkad debugging mask");
 
@@ -207,7 +207,7 @@
 	struct rxrpc_crypt iv;
 	struct scatterlist sg[16];
 	struct sk_buff *trailer;
-	unsigned len;
+	unsigned int len;
 	u16 check;
 	int nsg;
 
@@ -826,7 +826,7 @@
 	struct rxrpc_crypt iv, key;
 	struct scatterlist sg[1];
 	struct in_addr addr;
-	unsigned life;
+	unsigned int life;
 	time_t issue, now;
 	bool little_endian;
 	int ret;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 75b58f8..e7a8976 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -250,6 +250,28 @@
 
 	  If unsure, say N.
 
+config NET_SCH_CODEL
+	tristate "Controlled Delay AQM (CODEL)"
+	help
+	  Say Y here if you want to use the Controlled Delay (CODEL)
+	  packet scheduling algorithm.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sch_codel.
+
+	  If unsure, say N.
+
+config NET_SCH_FQ_CODEL
+	tristate "Fair Queue Controlled Delay AQM (FQ_CODEL)"
+	help
+	  Say Y here if you want to use the FQ Controlled Delay (FQ_CODEL)
+	  packet scheduling algorithm.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sch_fq_codel.
+
+	  If unsure, say N.
+
 config NET_SCH_INGRESS
 	tristate "Ingress Qdisc"
 	depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 8cdf4e2..5940a19 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -37,6 +37,8 @@
 obj-$(CONFIG_NET_SCH_MQPRIO)	+= sch_mqprio.o
 obj-$(CONFIG_NET_SCH_CHOKE)	+= sch_choke.o
 obj-$(CONFIG_NET_SCH_QFQ)	+= sch_qfq.o
+obj-$(CONFIG_NET_SCH_CODEL)	+= sch_codel.o
+obj-$(CONFIG_NET_SCH_FQ_CODEL)	+= sch_fq_codel.o
 
 obj-$(CONFIG_NET_CLS_U32)	+= cls_u32.o
 obj-$(CONFIG_NET_CLS_ROUTE4)	+= cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 93fdf13..5cfb160 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -127,7 +127,8 @@
 	nest = nla_nest_start(skb, a->order);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
+	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+		goto nla_put_failure;
 	for (i = 0; i < (hinfo->hmask + 1); i++) {
 		p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
 
@@ -139,7 +140,8 @@
 			p = s_p;
 		}
 	}
-	NLA_PUT_U32(skb, TCA_FCNT, n_i);
+	if (nla_put_u32(skb, TCA_FCNT, n_i))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	return n_i;
@@ -437,7 +439,8 @@
 	if (a->ops == NULL || a->ops->dump == NULL)
 		return err;
 
-	NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind);
+	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
+		goto nla_put_failure;
 	if (tcf_action_copy_stats(skb, a, 0))
 		goto nla_put_failure;
 	nest = nla_nest_start(skb, TCA_OPTIONS);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 453a734..2c8ad7c 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -397,7 +397,7 @@
 
 	while (len > 1) {
 		switch (xh[off]) {
-		case IPV6_TLV_PAD0:
+		case IPV6_TLV_PAD1:
 			optlen = 1;
 			break;
 		case IPV6_TLV_JUMBO:
@@ -550,11 +550,13 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b77f5a0..f10fb82 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -162,7 +162,8 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 #ifdef CONFIG_GACT_PROB
 	if (gact->tcfg_ptype) {
 		struct tc_gact_p p_opt = {
@@ -171,13 +172,15 @@
 			.ptype   = gact->tcfg_ptype,
 		};
 
-		NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
+		if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
+			goto nla_put_failure;
 	}
 #endif
 	t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
-	NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60f8f61..60e281a 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -1,5 +1,5 @@
 /*
- * net/sched/ipt.c	iptables target interface
+ * net/sched/ipt.c     iptables target interface
  *
  *TODO: Add other tables. For now we only support the ipv4 table targets
  *
@@ -235,9 +235,8 @@
 		result = TC_ACT_PIPE;
 		break;
 	default:
-		if (net_ratelimit())
-			pr_notice("tc filter: Bogus netfilter code"
-				  " %d assume ACCEPT\n", ret);
+		net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
+				       ret);
 		result = TC_POLICE_OK;
 		break;
 	}
@@ -267,15 +266,17 @@
 	c.refcnt = ipt->tcf_refcnt - ref;
 	strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
 
-	NLA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t);
-	NLA_PUT_U32(skb, TCA_IPT_INDEX, ipt->tcf_index);
-	NLA_PUT_U32(skb, TCA_IPT_HOOK, ipt->tcfi_hook);
-	NLA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c);
-	NLA_PUT_STRING(skb, TCA_IPT_TABLE, ipt->tcfi_tname);
+	if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
+	    nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
+	    nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
+	    nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
+	    nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
+		goto nla_put_failure;
 	tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
 	tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
 	tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
-	NLA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm);
+	if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
+		goto nla_put_failure;
 	kfree(t);
 	return skb->len;
 
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e051398..fe81cc1 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -174,9 +174,8 @@
 	}
 
 	if (!(dev->flags & IFF_UP)) {
-		if (net_ratelimit())
-			pr_notice("tc mirred to Houston: device %s is down\n",
-				  dev->name);
+		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+				       dev->name);
 		goto out;
 	}
 
@@ -227,11 +226,13 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
-	NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 001d1b3..b5d029e 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -284,11 +284,13 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 10d3aed..26aa2f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -215,11 +215,13 @@
 	opt->refcnt = p->tcf_refcnt - ref;
 	opt->bindcnt = p->tcf_bindcnt - bind;
 
-	NLA_PUT(skb, TCA_PEDIT_PARMS, s, opt);
+	if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
-	NLA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	kfree(opt);
 	return skb->len;
 
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 6fb3f5a..a9de232 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -356,11 +356,14 @@
 		opt.rate = police->tcfp_R_tab->rate;
 	if (police->tcfp_P_tab)
 		opt.peakrate = police->tcfp_P_tab->rate;
-	NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
-	if (police->tcfp_result)
-		NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
-	if (police->tcfp_ewma_rate)
-		NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate);
+	if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
+		goto nla_put_failure;
+	if (police->tcfp_result &&
+	    nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
+		goto nla_put_failure;
+	if (police->tcfp_ewma_rate &&
+	    nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 73e0a3a..3922f2a 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -172,12 +172,14 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
-	NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
+	if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
+	    nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-	NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 35dbbe9..476e0fa 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -166,20 +166,25 @@
 	};
 	struct tcf_t t;
 
-	NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
-	if (d->flags & SKBEDIT_F_PRIORITY)
-		NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
-			&d->priority);
-	if (d->flags & SKBEDIT_F_QUEUE_MAPPING)
-		NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING,
-			sizeof(d->queue_mapping), &d->queue_mapping);
-	if (d->flags & SKBEDIT_F_MARK)
-		NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
-			&d->mark);
+	if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
+	if ((d->flags & SKBEDIT_F_PRIORITY) &&
+	    nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
+		    &d->priority))
+		goto nla_put_failure;
+	if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
+	    nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING,
+		    sizeof(d->queue_mapping), &d->queue_mapping))
+		goto nla_put_failure;
+	if ((d->flags & SKBEDIT_F_MARK) &&
+	    nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
+		    &d->mark))
+		goto nla_put_failure;
 	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
 	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
 	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
-	NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t);
+	if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a69d44f..f452f69 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -357,7 +357,8 @@
 	tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
 	tcm->tcm_parent = tp->classid;
 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
-	NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
+	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
+		goto nla_put_failure;
 	tcm->tcm_handle = fh;
 	if (RTM_DELTFILTER != event) {
 		tcm->tcm_handle = 0;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index ea1f70b..590960a 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -257,8 +257,9 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (f->res.classid)
-		NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid);
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 ||
 	    tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 1d8bd0d..ccd08c8 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -572,25 +572,32 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask);
-	NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode);
+	if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
+	    nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
+		goto nla_put_failure;
 
 	if (f->mask != ~0 || f->xor != 0) {
-		NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask);
-		NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor);
+		if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
+		    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
+			goto nla_put_failure;
 	}
-	if (f->rshift)
-		NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift);
-	if (f->addend)
-		NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend);
+	if (f->rshift &&
+	    nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
+		goto nla_put_failure;
+	if (f->addend &&
+	    nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
+		goto nla_put_failure;
 
-	if (f->divisor)
-		NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor);
-	if (f->baseclass)
-		NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
+	if (f->divisor &&
+	    nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
+		goto nla_put_failure;
+	if (f->baseclass &&
+	    nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
+		goto nla_put_failure;
 
-	if (f->perturb_period)
-		NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
+	if (f->perturb_period &&
+	    nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
 		goto nla_put_failure;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 389af15..8384a47 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -346,14 +346,17 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (f->res.classid)
-		NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid);
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
+		goto nla_put_failure;
 #ifdef CONFIG_NET_CLS_IND
-	if (strlen(f->indev))
-		NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev);
+	if (strlen(f->indev) &&
+	    nla_put_string(skb, TCA_FW_INDEV, f->indev))
+		goto nla_put_failure;
 #endif /* CONFIG_NET_CLS_IND */
-	if (head->mask != 0xFFFFFFFF)
-		NLA_PUT_U32(skb, TCA_FW_MASK, head->mask);
+	if (head->mask != 0xFFFFFFFF &&
+	    nla_put_u32(skb, TCA_FW_MASK, head->mask))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
 		goto nla_put_failure;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 13ab66e..36fec42 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -571,17 +571,21 @@
 
 	if (!(f->handle & 0x8000)) {
 		id = f->id & 0xFF;
-		NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
+		if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
+			goto nla_put_failure;
 	}
 	if (f->handle & 0x80000000) {
-		if ((f->handle >> 16) != 0xFFFF)
-			NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
+		if ((f->handle >> 16) != 0xFFFF &&
+		    nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
+			goto nla_put_failure;
 	} else {
 		id = f->id >> 16;
-		NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
+		if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
+			goto nla_put_failure;
 	}
-	if (f->res.classid)
-		NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
 		goto nla_put_failure;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b014279..18ab93e 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -615,18 +615,22 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst);
+	if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
+		goto nla_put_failure;
 	pinfo.dpi = s->dpi;
 	pinfo.spi = f->spi;
 	pinfo.protocol = s->protocol;
 	pinfo.tunnelid = s->tunnelid;
 	pinfo.tunnelhdr = f->tunnelhdr;
 	pinfo.pad = 0;
-	NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
-	if (f->res.classid)
-		NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
-	if (((f->handle >> 8) & 0xFF) != 16)
-		NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
+	if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
+		goto nla_put_failure;
+	if (f->res.classid &&
+	    nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
+		goto nla_put_failure;
+	if (((f->handle >> 8) & 0xFF) != 16 &&
+	    nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
+		goto nla_put_failure;
 
 	if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
 		goto nla_put_failure;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index dbe1992..fe29420 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -438,10 +438,11 @@
 
 	if (!fh) {
 		t->tcm_handle = ~0; /* whatever ... */
-		NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash);
-		NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask);
-		NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift);
-		NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through);
+		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
+		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
+		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
+		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
+			goto nla_put_failure;
 		nla_nest_end(skb, nest);
 	} else {
 		if (p->perfect) {
@@ -460,8 +461,9 @@
 			}
 		}
 		pr_debug("handle = %d\n", t->tcm_handle);
-		if (r->res.class)
-			NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid);
+		if (r->res.class &&
+		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
+			goto nla_put_failure;
 
 		if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
 			goto nla_put_failure;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 939b627..d45373f 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -234,8 +234,7 @@
 	return -1;
 
 deadloop:
-	if (net_ratelimit())
-		pr_warning("cls_u32: dead loop\n");
+	net_warn_ratelimited("cls_u32: dead loop\n");
 	return -1;
 }
 
@@ -733,36 +732,44 @@
 		struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
 		u32 divisor = ht->divisor + 1;
 
-		NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
+		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
+			goto nla_put_failure;
 	} else {
-		NLA_PUT(skb, TCA_U32_SEL,
-			sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
-			&n->sel);
+		if (nla_put(skb, TCA_U32_SEL,
+			    sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
+			    &n->sel))
+			goto nla_put_failure;
 		if (n->ht_up) {
 			u32 htid = n->handle & 0xFFFFF000;
-			NLA_PUT_U32(skb, TCA_U32_HASH, htid);
+			if (nla_put_u32(skb, TCA_U32_HASH, htid))
+				goto nla_put_failure;
 		}
-		if (n->res.classid)
-			NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
-		if (n->ht_down)
-			NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
+		if (n->res.classid &&
+		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
+			goto nla_put_failure;
+		if (n->ht_down &&
+		    nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
+			goto nla_put_failure;
 
 #ifdef CONFIG_CLS_U32_MARK
-		if (n->mark.val || n->mark.mask)
-			NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
+		if ((n->mark.val || n->mark.mask) &&
+		    nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
+			goto nla_put_failure;
 #endif
 
 		if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
 			goto nla_put_failure;
 
 #ifdef CONFIG_NET_CLS_IND
-		if (strlen(n->indev))
-			NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
+		if (strlen(n->indev) &&
+		    nla_put_string(skb, TCA_U32_INDEV, n->indev))
+			goto nla_put_failure;
 #endif
 #ifdef CONFIG_CLS_U32_PERF
-		NLA_PUT(skb, TCA_U32_PCNT,
-		sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
-			n->pf);
+		if (nla_put(skb, TCA_U32_PCNT,
+			    sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
+			    n->pf))
+			goto nla_put_failure;
 #endif
 	}
 
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 1363bf1..4790c69 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -585,8 +585,9 @@
 
 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 {
-	if (v->val && v->len)
-		NLA_PUT(skb, tlv, v->len, (void *) v->val);
+	if (v->val && v->len &&
+	    nla_put(skb, tlv, v->len, (void *) v->val))
+		goto nla_put_failure;
 	return 0;
 
 nla_put_failure:
@@ -636,10 +637,13 @@
 
 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 {
-	if (v->len == sizeof(unsigned long))
-		NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
-	else if (v->len == sizeof(u32))
-		NLA_PUT_U32(skb, tlv, v->val);
+	if (v->len == sizeof(unsigned long)) {
+		if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
+			goto nla_put_failure;
+	} else if (v->len == sizeof(u32)) {
+		if (nla_put_u32(skb, tlv, v->val))
+			goto nla_put_failure;
+	}
 
 	return 0;
 
@@ -831,7 +835,8 @@
 	memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
 	memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
 
-	NLA_PUT(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr);
+	if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
+		goto nla_put_failure;
 
 	ops = meta_type_ops(&meta->lvalue);
 	if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 88d93eb..3a633de 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -441,7 +441,8 @@
 	if (top_start == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr);
+	if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
+		goto nla_put_failure;
 
 	list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST);
 	if (list_start == NULL)
@@ -457,7 +458,8 @@
 			.flags = em->flags
 		};
 
-		NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
+		if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
+			goto nla_put_failure;
 
 		if (em->ops && em->ops->dump) {
 			if (em->ops->dump(skb, em) < 0)
@@ -535,9 +537,7 @@
 	return res;
 
 stack_overflow:
-	if (net_ratelimit())
-		pr_warning("tc ematch: local stack overflow,"
-			   " increase NET_EMATCH_STACK\n");
+	net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n");
 	return -1;
 }
 EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 3d8981f..085ce53 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -426,7 +426,8 @@
 	nest = nla_nest_start(skb, TCA_STAB);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
+	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	return skb->len;
@@ -1201,7 +1202,8 @@
 	tcm->tcm_parent = clid;
 	tcm->tcm_handle = q->handle;
 	tcm->tcm_info = atomic_read(&q->refcnt);
-	NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
+	if (nla_put_string(skb, TCA_KIND, q->ops->id))
+		goto nla_put_failure;
 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
 		goto nla_put_failure;
 	q->qstats.qlen = q->q.qlen;
@@ -1505,7 +1507,8 @@
 	tcm->tcm_parent = q->handle;
 	tcm->tcm_handle = q->handle;
 	tcm->tcm_info = 0;
-	NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
+	if (nla_put_string(skb, TCA_KIND, q->ops->id))
+		goto nla_put_failure;
 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
 		goto nla_put_failure;
 
@@ -1688,12 +1691,10 @@
 		tp = otp;
 
 		if (verd++ >= MAX_REC_LOOP) {
-			if (net_ratelimit())
-				pr_notice("%s: packet reclassify loop"
-					  " rule prio %u protocol %02x\n",
-					  tp->q->ops->id,
-					  tp->prio & 0xffff,
-					  ntohs(tp->protocol));
+			net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
+					       tp->q->ops->id,
+					       tp->prio & 0xffff,
+					       ntohs(tp->protocol));
 			return TC_ACT_SHOT;
 		}
 		skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e25e490..8522a47 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -423,8 +423,6 @@
 		}
 		return ret;
 	}
-	qdisc_bstats_update(sch, skb);
-	bstats_update(&flow->bstats, skb);
 	/*
 	 * Okay, this may seem weird. We pretend we've dropped the packet if
 	 * it goes via ATM. The reason for this is that the outer qdisc
@@ -472,6 +470,8 @@
 			if (unlikely(!skb))
 				break;
 
+			qdisc_bstats_update(sch, skb);
+			bstats_update(&flow->bstats, skb);
 			pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
 			/* remove any LL header somebody else has attached */
 			skb_pull(skb, skb_network_offset(skb));
@@ -601,7 +601,8 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
+	if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
+		goto nla_put_failure;
 	if (flow->vcc) {
 		struct sockaddr_atmpvc pvc;
 		int state;
@@ -610,15 +611,19 @@
 		pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
 		pvc.sap_addr.vpi = flow->vcc->vpi;
 		pvc.sap_addr.vci = flow->vcc->vci;
-		NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
+		if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
+			goto nla_put_failure;
 		state = ATM_VF2VS(flow->vcc->flags);
-		NLA_PUT_U32(skb, TCA_ATM_STATE, state);
+		if (nla_put_u32(skb, TCA_ATM_STATE, state))
+			goto nla_put_failure;
 	}
-	if (flow->excess)
-		NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
-	else
-		NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
-
+	if (flow->excess) {
+		if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
+			goto nla_put_failure;
+	} else {
+		if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
+			goto nla_put_failure;
+	}
 	nla_nest_end(skb, nest);
 	return skb->len;
 
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 24d94c0..6aabd77 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1425,7 +1425,8 @@
 {
 	unsigned char *b = skb_tail_pointer(skb);
 
-	NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate);
+	if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
@@ -1450,7 +1451,8 @@
 	opt.minidle = (u32)(-cl->minidle);
 	opt.offtime = cl->offtime;
 	opt.change = ~0;
-	NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
@@ -1468,7 +1470,8 @@
 	opt.priority = cl->priority + 1;
 	opt.cpriority = cl->cpriority + 1;
 	opt.weight = cl->weight;
-	NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
@@ -1485,7 +1488,8 @@
 	opt.priority2 = cl->priority2 + 1;
 	opt.pad = 0;
 	opt.penalty = cl->penalty;
-	NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
@@ -1502,7 +1506,8 @@
 		opt.split = cl->split ? cl->split->common.classid : 0;
 		opt.defmap = cl->defmap;
 		opt.defchange = ~0;
-		NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
+		if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
+			goto nla_put_failure;
 	}
 	return skb->len;
 
@@ -1521,7 +1526,8 @@
 		opt.police = cl->police;
 		opt.__res1 = 0;
 		opt.__res2 = 0;
-		NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt);
+		if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
+			goto nla_put_failure;
 	}
 	return skb->len;
 
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 7e267d7..cc37dd5 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -332,15 +332,13 @@
 	}
 
 	q->stats.pdrop++;
-	sch->qstats.drops++;
-	kfree_skb(skb);
-	return NET_XMIT_DROP;
+	return qdisc_drop(skb, sch);
 
- congestion_drop:
+congestion_drop:
 	qdisc_drop(skb, sch);
 	return NET_XMIT_CN;
 
- other_drop:
+other_drop:
 	if (ret & __NET_XMIT_BYPASS)
 		sch->qstats.drops++;
 	kfree_skb(skb);
@@ -515,8 +513,9 @@
 	if (opts == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
-	NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
+	if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
+	    nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
+		goto nla_put_failure;
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
new file mode 100644
index 0000000..2f9ab17
--- /dev/null
+++ b/net/sched/sch_codel.c
@@ -0,0 +1,276 @@
+/*
+ * Codel - The Controlled-Delay Active Queue Management algorithm
+ *
+ *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
+ *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
+ *
+ *  Implemented on linux by :
+ *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
+ *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the authors may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/prefetch.h>
+#include <net/pkt_sched.h>
+#include <net/codel.h>
+
+
+#define DEFAULT_CODEL_LIMIT 1000
+
+struct codel_sched_data {
+	struct codel_params	params;
+	struct codel_vars	vars;
+	struct codel_stats	stats;
+	u32			drop_overlimit;
+};
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+	struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+	prefetch(&skb->end); /* we'll need skb_shinfo() */
+	return skb;
+}
+
+static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+{
+	struct codel_sched_data *q = qdisc_priv(sch);
+	struct sk_buff *skb;
+
+	skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+
+	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+	 * or HTB crashes. Defer it for next round.
+	 */
+	if (q->stats.drop_count && sch->q.qlen) {
+		qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
+		q->stats.drop_count = 0;
+	}
+	if (skb)
+		qdisc_bstats_update(sch, skb);
+	return skb;
+}
+
+static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct codel_sched_data *q;
+
+	if (likely(qdisc_qlen(sch) < sch->limit)) {
+		codel_set_enqueue_time(skb);
+		return qdisc_enqueue_tail(skb, sch);
+	}
+	q = qdisc_priv(sch);
+	q->drop_overlimit++;
+	return qdisc_drop(skb, sch);
+}
+
+static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+	[TCA_CODEL_TARGET]	= { .type = NLA_U32 },
+	[TCA_CODEL_LIMIT]	= { .type = NLA_U32 },
+	[TCA_CODEL_INTERVAL]	= { .type = NLA_U32 },
+	[TCA_CODEL_ECN]		= { .type = NLA_U32 },
+};
+
+static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct codel_sched_data *q = qdisc_priv(sch);
+	struct nlattr *tb[TCA_CODEL_MAX + 1];
+	unsigned int qlen;
+	int err;
+
+	if (!opt)
+		return -EINVAL;
+
+	err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
+	if (err < 0)
+		return err;
+
+	sch_tree_lock(sch);
+
+	if (tb[TCA_CODEL_TARGET]) {
+		u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
+
+		q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
+	}
+
+	if (tb[TCA_CODEL_INTERVAL]) {
+		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
+
+		q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+	}
+
+	if (tb[TCA_CODEL_LIMIT])
+		sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
+
+	if (tb[TCA_CODEL_ECN])
+		q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
+
+	qlen = sch->q.qlen;
+	while (sch->q.qlen > sch->limit) {
+		struct sk_buff *skb = __skb_dequeue(&sch->q);
+
+		sch->qstats.backlog -= qdisc_pkt_len(skb);
+		qdisc_drop(skb, sch);
+	}
+	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
+
+	sch_tree_unlock(sch);
+	return 0;
+}
+
+static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct codel_sched_data *q = qdisc_priv(sch);
+
+	sch->limit = DEFAULT_CODEL_LIMIT;
+
+	codel_params_init(&q->params);
+	codel_vars_init(&q->vars);
+	codel_stats_init(&q->stats);
+
+	if (opt) {
+		int err = codel_change(sch, opt);
+
+		if (err)
+			return err;
+	}
+
+	if (sch->limit >= 1)
+		sch->flags |= TCQ_F_CAN_BYPASS;
+	else
+		sch->flags &= ~TCQ_F_CAN_BYPASS;
+
+	return 0;
+}
+
+static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct codel_sched_data *q = qdisc_priv(sch);
+	struct nlattr *opts;
+
+	opts = nla_nest_start(skb, TCA_OPTIONS);
+	if (opts == NULL)
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, TCA_CODEL_TARGET,
+			codel_time_to_us(q->params.target)) ||
+	    nla_put_u32(skb, TCA_CODEL_LIMIT,
+			sch->limit) ||
+	    nla_put_u32(skb, TCA_CODEL_INTERVAL,
+			codel_time_to_us(q->params.interval)) ||
+	    nla_put_u32(skb, TCA_CODEL_ECN,
+			q->params.ecn))
+		goto nla_put_failure;
+
+	return nla_nest_end(skb, opts);
+
+nla_put_failure:
+	nla_nest_cancel(skb, opts);
+	return -1;
+}
+
+static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	const struct codel_sched_data *q = qdisc_priv(sch);
+	struct tc_codel_xstats st = {
+		.maxpacket	= q->stats.maxpacket,
+		.count		= q->vars.count,
+		.lastcount	= q->vars.lastcount,
+		.drop_overlimit = q->drop_overlimit,
+		.ldelay		= codel_time_to_us(q->vars.ldelay),
+		.dropping	= q->vars.dropping,
+		.ecn_mark	= q->stats.ecn_mark,
+	};
+
+	if (q->vars.dropping) {
+		codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
+
+		if (delta >= 0)
+			st.drop_next = codel_time_to_us(delta);
+		else
+			st.drop_next = -codel_time_to_us(-delta);
+	}
+
+	return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void codel_reset(struct Qdisc *sch)
+{
+	struct codel_sched_data *q = qdisc_priv(sch);
+
+	qdisc_reset_queue(sch);
+	codel_vars_init(&q->vars);
+}
+
+static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
+	.id		=	"codel",
+	.priv_size	=	sizeof(struct codel_sched_data),
+
+	.enqueue	=	codel_qdisc_enqueue,
+	.dequeue	=	codel_qdisc_dequeue,
+	.peek		=	qdisc_peek_dequeued,
+	.init		=	codel_init,
+	.reset		=	codel_reset,
+	.change 	=	codel_change,
+	.dump		=	codel_dump,
+	.dump_stats	=	codel_dump_stats,
+	.owner		=	THIS_MODULE,
+};
+
+static int __init codel_module_init(void)
+{
+	return register_qdisc(&codel_qdisc_ops);
+}
+
+static void __exit codel_module_exit(void)
+{
+	unregister_qdisc(&codel_qdisc_ops);
+}
+
+module_init(codel_module_init)
+module_exit(codel_module_exit)
+
+MODULE_DESCRIPTION("Controlled Delay queue discipline");
+MODULE_AUTHOR("Dave Taht");
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 6b7fe4a..9ce0b4f 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -260,7 +260,8 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum);
+	if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
+		goto nla_put_failure;
 	return nla_nest_end(skb, nest);
 
 nla_put_failure:
@@ -375,8 +376,6 @@
 		cl->deficit = cl->quantum;
 	}
 
-	bstats_update(&cl->bstats, skb);
-
 	sch->q.qlen++;
 	return err;
 }
@@ -402,6 +401,8 @@
 			skb = qdisc_dequeue_peeked(cl->qdisc);
 			if (cl->qdisc->q.qlen == 0)
 				list_del(&cl->alist);
+
+			bstats_update(&cl->bstats, skb);
 			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
 			return skb;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 2c79020..3886365 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -265,8 +265,7 @@
 	return NET_XMIT_SUCCESS;
 
 drop:
-	kfree_skb(skb);
-	sch->qstats.drops++;
+	qdisc_drop(skb, sch);
 	return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 }
 
@@ -429,8 +428,9 @@
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
-	NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
+	if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) ||
+	    nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]))
+		goto nla_put_failure;
 
 	return nla_nest_end(skb, opts);
 
@@ -447,13 +447,16 @@
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices);
+	if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
+		goto nla_put_failure;
 
-	if (p->default_index != NO_DEFAULT_INDEX)
-		NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index);
+	if (p->default_index != NO_DEFAULT_INDEX &&
+	    nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
+		goto nla_put_failure;
 
-	if (p->set_tc_index)
-		NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX);
+	if (p->set_tc_index &&
+	    nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
+		goto nla_put_failure;
 
 	return nla_nest_end(skb, opts);
 
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 66effe2..e15a9eb 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -85,7 +85,8 @@
 {
 	struct tc_fifo_qopt opt = { .limit = sch->limit };
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
new file mode 100644
index 0000000..9fc1c62
--- /dev/null
+++ b/net/sched/sch_fq_codel.c
@@ -0,0 +1,626 @@
+/*
+ * Fair Queue CoDel discipline
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/flow_keys.h>
+#include <net/codel.h>
+
+/*	Fair Queue CoDel.
+ *
+ * Principles :
+ * Packets are classified (internal classifier or external) on flows.
+ * This is a Stochastic model (as we use a hash, several flows
+ *			       might be hashed on same slot)
+ * Each flow has a CoDel managed queue.
+ * Flows are linked onto two (Round Robin) lists,
+ * so that new flows have priority on old ones.
+ *
+ * For a given flow, packets are not reordered (CoDel uses a FIFO)
+ * head drops only.
+ * ECN capability is on by default.
+ * Low memory footprint (64 bytes per flow)
+ */
+
+struct fq_codel_flow {
+	struct sk_buff	  *head;
+	struct sk_buff	  *tail;
+	struct list_head  flowchain;
+	int		  deficit;
+	u32		  dropped; /* number of drops (or ECN marks) on this flow */
+	struct codel_vars cvars;
+}; /* please try to keep this structure <= 64 bytes */
+
+struct fq_codel_sched_data {
+	struct tcf_proto *filter_list;	/* optional external classifier */
+	struct fq_codel_flow *flows;	/* Flows table [flows_cnt] */
+	u32		*backlogs;	/* backlog table [flows_cnt] */
+	u32		flows_cnt;	/* number of flows */
+	u32		perturbation;	/* hash perturbation */
+	u32		quantum;	/* psched_mtu(qdisc_dev(sch)); */
+	struct codel_params cparams;
+	struct codel_stats cstats;
+	u32		drop_overlimit;
+	u32		new_flow_count;
+
+	struct list_head new_flows;	/* list of new flows */
+	struct list_head old_flows;	/* list of old flows */
+};
+
+static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
+				  const struct sk_buff *skb)
+{
+	struct flow_keys keys;
+	unsigned int hash;
+
+	skb_flow_dissect(skb, &keys);
+	hash = jhash_3words((__force u32)keys.dst,
+			    (__force u32)keys.src ^ keys.ip_proto,
+			    (__force u32)keys.ports, q->perturbation);
+	return ((u64)hash * q->flows_cnt) >> 32;
+}
+
+static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
+				      int *qerr)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	struct tcf_result res;
+	int result;
+
+	if (TC_H_MAJ(skb->priority) == sch->handle &&
+	    TC_H_MIN(skb->priority) > 0 &&
+	    TC_H_MIN(skb->priority) <= q->flows_cnt)
+		return TC_H_MIN(skb->priority);
+
+	if (!q->filter_list)
+		return fq_codel_hash(q, skb) + 1;
+
+	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+	result = tc_classify(skb, q->filter_list, &res);
+	if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+		switch (result) {
+		case TC_ACT_STOLEN:
+		case TC_ACT_QUEUED:
+			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+		case TC_ACT_SHOT:
+			return 0;
+		}
+#endif
+		if (TC_H_MIN(res.classid) <= q->flows_cnt)
+			return TC_H_MIN(res.classid);
+	}
+	return 0;
+}
+
+/* helper functions : might be changed when/if skb use a standard list_head */
+
+/* remove one skb from head of slot queue */
+static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
+{
+	struct sk_buff *skb = flow->head;
+
+	flow->head = skb->next;
+	skb->next = NULL;
+	return skb;
+}
+
+/* add skb to flow queue (tail add) */
+static inline void flow_queue_add(struct fq_codel_flow *flow,
+				  struct sk_buff *skb)
+{
+	if (flow->head == NULL)
+		flow->head = skb;
+	else
+		flow->tail->next = skb;
+	flow->tail = skb;
+	skb->next = NULL;
+}
+
+static unsigned int fq_codel_drop(struct Qdisc *sch)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	struct sk_buff *skb;
+	unsigned int maxbacklog = 0, idx = 0, i, len;
+	struct fq_codel_flow *flow;
+
+	/* Queue is full! Find the fat flow and drop packet from it.
+	 * This might sound expensive, but with 1024 flows, we scan
+	 * 4KB of memory, and we dont need to handle a complex tree
+	 * in fast path (packet queue/enqueue) with many cache misses.
+	 */
+	for (i = 0; i < q->flows_cnt; i++) {
+		if (q->backlogs[i] > maxbacklog) {
+			maxbacklog = q->backlogs[i];
+			idx = i;
+		}
+	}
+	flow = &q->flows[idx];
+	skb = dequeue_head(flow);
+	len = qdisc_pkt_len(skb);
+	q->backlogs[idx] -= len;
+	kfree_skb(skb);
+	sch->q.qlen--;
+	sch->qstats.drops++;
+	sch->qstats.backlog -= len;
+	flow->dropped++;
+	return idx;
+}
+
+static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	unsigned int idx;
+	struct fq_codel_flow *flow;
+	int uninitialized_var(ret);
+
+	idx = fq_codel_classify(skb, sch, &ret);
+	if (idx == 0) {
+		if (ret & __NET_XMIT_BYPASS)
+			sch->qstats.drops++;
+		kfree_skb(skb);
+		return ret;
+	}
+	idx--;
+
+	codel_set_enqueue_time(skb);
+	flow = &q->flows[idx];
+	flow_queue_add(flow, skb);
+	q->backlogs[idx] += qdisc_pkt_len(skb);
+	sch->qstats.backlog += qdisc_pkt_len(skb);
+
+	if (list_empty(&flow->flowchain)) {
+		list_add_tail(&flow->flowchain, &q->new_flows);
+		codel_vars_init(&flow->cvars);
+		q->new_flow_count++;
+		flow->deficit = q->quantum;
+		flow->dropped = 0;
+	}
+	if (++sch->q.qlen < sch->limit)
+		return NET_XMIT_SUCCESS;
+
+	q->drop_overlimit++;
+	/* Return Congestion Notification only if we dropped a packet
+	 * from this flow.
+	 */
+	if (fq_codel_drop(sch) == idx)
+		return NET_XMIT_CN;
+
+	/* As we dropped a packet, better let upper stack know this */
+	qdisc_tree_decrease_qlen(sch, 1);
+	return NET_XMIT_SUCCESS;
+}
+
+/* This is the specific function called from codel_dequeue()
+ * to dequeue a packet from queue. Note: backlog is handled in
+ * codel, we dont need to reduce it here.
+ */
+static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	struct fq_codel_flow *flow;
+	struct sk_buff *skb = NULL;
+
+	flow = container_of(vars, struct fq_codel_flow, cvars);
+	if (flow->head) {
+		skb = dequeue_head(flow);
+		q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
+		sch->q.qlen--;
+	}
+	return skb;
+}
+
+static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	struct sk_buff *skb;
+	struct fq_codel_flow *flow;
+	struct list_head *head;
+	u32 prev_drop_count, prev_ecn_mark;
+
+begin:
+	head = &q->new_flows;
+	if (list_empty(head)) {
+		head = &q->old_flows;
+		if (list_empty(head))
+			return NULL;
+	}
+	flow = list_first_entry(head, struct fq_codel_flow, flowchain);
+
+	if (flow->deficit <= 0) {
+		flow->deficit += q->quantum;
+		list_move_tail(&flow->flowchain, &q->old_flows);
+		goto begin;
+	}
+
+	prev_drop_count = q->cstats.drop_count;
+	prev_ecn_mark = q->cstats.ecn_mark;
+
+	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+			    dequeue);
+
+	flow->dropped += q->cstats.drop_count - prev_drop_count;
+	flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
+
+	if (!skb) {
+		/* force a pass through old_flows to prevent starvation */
+		if ((head == &q->new_flows) && !list_empty(&q->old_flows))
+			list_move_tail(&flow->flowchain, &q->old_flows);
+		else
+			list_del_init(&flow->flowchain);
+		goto begin;
+	}
+	qdisc_bstats_update(sch, skb);
+	flow->deficit -= qdisc_pkt_len(skb);
+	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+	 * or HTB crashes. Defer it for next round.
+	 */
+	if (q->cstats.drop_count && sch->q.qlen) {
+		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+		q->cstats.drop_count = 0;
+	}
+	return skb;
+}
+
+static void fq_codel_reset(struct Qdisc *sch)
+{
+	struct sk_buff *skb;
+
+	while ((skb = fq_codel_dequeue(sch)) != NULL)
+		kfree_skb(skb);
+}
+
+static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
+	[TCA_FQ_CODEL_TARGET]	= { .type = NLA_U32 },
+	[TCA_FQ_CODEL_LIMIT]	= { .type = NLA_U32 },
+	[TCA_FQ_CODEL_INTERVAL]	= { .type = NLA_U32 },
+	[TCA_FQ_CODEL_ECN]	= { .type = NLA_U32 },
+	[TCA_FQ_CODEL_FLOWS]	= { .type = NLA_U32 },
+	[TCA_FQ_CODEL_QUANTUM]	= { .type = NLA_U32 },
+};
+
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
+	int err;
+
+	if (!opt)
+		return -EINVAL;
+
+	err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
+	if (err < 0)
+		return err;
+	if (tb[TCA_FQ_CODEL_FLOWS]) {
+		if (q->flows)
+			return -EINVAL;
+		q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
+		if (!q->flows_cnt ||
+		    q->flows_cnt > 65536)
+			return -EINVAL;
+	}
+	sch_tree_lock(sch);
+
+	if (tb[TCA_FQ_CODEL_TARGET]) {
+		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
+
+		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
+	}
+
+	if (tb[TCA_FQ_CODEL_INTERVAL]) {
+		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
+
+		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+	}
+
+	if (tb[TCA_FQ_CODEL_LIMIT])
+		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
+
+	if (tb[TCA_FQ_CODEL_ECN])
+		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
+
+	if (tb[TCA_FQ_CODEL_QUANTUM])
+		q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
+
+	while (sch->q.qlen > sch->limit) {
+		struct sk_buff *skb = fq_codel_dequeue(sch);
+
+		kfree_skb(skb);
+		q->cstats.drop_count++;
+	}
+	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
+	q->cstats.drop_count = 0;
+
+	sch_tree_unlock(sch);
+	return 0;
+}
+
+static void *fq_codel_zalloc(size_t sz)
+{
+	void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
+
+	if (!ptr)
+		ptr = vzalloc(sz);
+	return ptr;
+}
+
+static void fq_codel_free(void *addr)
+{
+	if (addr) {
+		if (is_vmalloc_addr(addr))
+			vfree(addr);
+		else
+			kfree(addr);
+	}
+}
+
+static void fq_codel_destroy(struct Qdisc *sch)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+
+	tcf_destroy_chain(&q->filter_list);
+	fq_codel_free(q->backlogs);
+	fq_codel_free(q->flows);
+}
+
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	int i;
+
+	sch->limit = 10*1024;
+	q->flows_cnt = 1024;
+	q->quantum = psched_mtu(qdisc_dev(sch));
+	q->perturbation = net_random();
+	INIT_LIST_HEAD(&q->new_flows);
+	INIT_LIST_HEAD(&q->old_flows);
+	codel_params_init(&q->cparams);
+	codel_stats_init(&q->cstats);
+	q->cparams.ecn = true;
+
+	if (opt) {
+		int err = fq_codel_change(sch, opt);
+		if (err)
+			return err;
+	}
+
+	if (!q->flows) {
+		q->flows = fq_codel_zalloc(q->flows_cnt *
+					   sizeof(struct fq_codel_flow));
+		if (!q->flows)
+			return -ENOMEM;
+		q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
+		if (!q->backlogs) {
+			fq_codel_free(q->flows);
+			return -ENOMEM;
+		}
+		for (i = 0; i < q->flows_cnt; i++) {
+			struct fq_codel_flow *flow = q->flows + i;
+
+			INIT_LIST_HEAD(&flow->flowchain);
+		}
+	}
+	if (sch->limit >= 1)
+		sch->flags |= TCQ_F_CAN_BYPASS;
+	else
+		sch->flags &= ~TCQ_F_CAN_BYPASS;
+	return 0;
+}
+
+static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	struct nlattr *opts;
+
+	opts = nla_nest_start(skb, TCA_OPTIONS);
+	if (opts == NULL)
+		goto nla_put_failure;
+
+	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
+			codel_time_to_us(q->cparams.target)) ||
+	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
+			sch->limit) ||
+	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
+			codel_time_to_us(q->cparams.interval)) ||
+	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
+			q->cparams.ecn) ||
+	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
+			q->quantum) ||
+	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
+			q->flows_cnt))
+		goto nla_put_failure;
+
+	nla_nest_end(skb, opts);
+	return skb->len;
+
+nla_put_failure:
+	return -1;
+}
+
+static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	struct tc_fq_codel_xstats st = {
+		.type				= TCA_FQ_CODEL_XSTATS_QDISC,
+	};
+	struct list_head *pos;
+
+	st.qdisc_stats.maxpacket = q->cstats.maxpacket;
+	st.qdisc_stats.drop_overlimit = q->drop_overlimit;
+	st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
+	st.qdisc_stats.new_flow_count = q->new_flow_count;
+
+	list_for_each(pos, &q->new_flows)
+		st.qdisc_stats.new_flows_len++;
+
+	list_for_each(pos, &q->old_flows)
+		st.qdisc_stats.old_flows_len++;
+
+	return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	return NULL;
+}
+
+static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
+{
+	return 0;
+}
+
+static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
+			      u32 classid)
+{
+	/* we cannot bypass queue discipline anymore */
+	sch->flags &= ~TCQ_F_CAN_BYPASS;
+	return 0;
+}
+
+static void fq_codel_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+
+	if (cl)
+		return NULL;
+	return &q->filter_list;
+}
+
+static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
+			  struct sk_buff *skb, struct tcmsg *tcm)
+{
+	tcm->tcm_handle |= TC_H_MIN(cl);
+	return 0;
+}
+
+static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+				     struct gnet_dump *d)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	u32 idx = cl - 1;
+	struct gnet_stats_queue qs = { 0 };
+	struct tc_fq_codel_xstats xstats;
+
+	if (idx < q->flows_cnt) {
+		const struct fq_codel_flow *flow = &q->flows[idx];
+		const struct sk_buff *skb = flow->head;
+
+		memset(&xstats, 0, sizeof(xstats));
+		xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
+		xstats.class_stats.deficit = flow->deficit;
+		xstats.class_stats.ldelay =
+			codel_time_to_us(flow->cvars.ldelay);
+		xstats.class_stats.count = flow->cvars.count;
+		xstats.class_stats.lastcount = flow->cvars.lastcount;
+		xstats.class_stats.dropping = flow->cvars.dropping;
+		if (flow->cvars.dropping) {
+			codel_tdiff_t delta = flow->cvars.drop_next -
+					      codel_get_time();
+
+			xstats.class_stats.drop_next = (delta >= 0) ?
+				codel_time_to_us(delta) :
+				-codel_time_to_us(-delta);
+		}
+		while (skb) {
+			qs.qlen++;
+			skb = skb->next;
+		}
+		qs.backlog = q->backlogs[idx];
+		qs.drops = flow->dropped;
+	}
+	if (gnet_stats_copy_queue(d, &qs) < 0)
+		return -1;
+	if (idx < q->flows_cnt)
+		return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
+	return 0;
+}
+
+static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	unsigned int i;
+
+	if (arg->stop)
+		return;
+
+	for (i = 0; i < q->flows_cnt; i++) {
+		if (list_empty(&q->flows[i].flowchain) ||
+		    arg->count < arg->skip) {
+			arg->count++;
+			continue;
+		}
+		if (arg->fn(sch, i + 1, arg) < 0) {
+			arg->stop = 1;
+			break;
+		}
+		arg->count++;
+	}
+}
+
+static const struct Qdisc_class_ops fq_codel_class_ops = {
+	.leaf		=	fq_codel_leaf,
+	.get		=	fq_codel_get,
+	.put		=	fq_codel_put,
+	.tcf_chain	=	fq_codel_find_tcf,
+	.bind_tcf	=	fq_codel_bind,
+	.unbind_tcf	=	fq_codel_put,
+	.dump		=	fq_codel_dump_class,
+	.dump_stats	=	fq_codel_dump_class_stats,
+	.walk		=	fq_codel_walk,
+};
+
+static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
+	.cl_ops		=	&fq_codel_class_ops,
+	.id		=	"fq_codel",
+	.priv_size	=	sizeof(struct fq_codel_sched_data),
+	.enqueue	=	fq_codel_enqueue,
+	.dequeue	=	fq_codel_dequeue,
+	.peek		=	qdisc_peek_dequeued,
+	.drop		=	fq_codel_drop,
+	.init		=	fq_codel_init,
+	.reset		=	fq_codel_reset,
+	.destroy	=	fq_codel_destroy,
+	.change		=	fq_codel_change,
+	.dump		=	fq_codel_dump,
+	.dump_stats =	fq_codel_dump_stats,
+	.owner		=	THIS_MODULE,
+};
+
+static int __init fq_codel_module_init(void)
+{
+	return register_qdisc(&fq_codel_qdisc_ops);
+}
+
+static void __exit fq_codel_module_exit(void)
+{
+	unregister_qdisc(&fq_codel_qdisc_ops);
+}
+
+module_init(fq_codel_module_init)
+module_exit(fq_codel_module_exit)
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 67fc573..511323e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -86,9 +86,8 @@
 		 * deadloop is detected. Return OK to try the next skb.
 		 */
 		kfree_skb(skb);
-		if (net_ratelimit())
-			pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
-				   dev_queue->dev->name);
+		net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
+				     dev_queue->dev->name);
 		ret = qdisc_qlen(q);
 	} else {
 		/*
@@ -136,9 +135,9 @@
 		ret = handle_dev_cpu_collision(skb, txq, q);
 	} else {
 		/* Driver returned NETDEV_TX_BUSY - requeue skb */
-		if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
-			pr_warning("BUG %s code %d qlen %d\n",
-				   dev->name, ret, q->q.qlen);
+		if (unlikely(ret != NETDEV_TX_BUSY))
+			net_warn_ratelimited("BUG %s code %d qlen %d\n",
+					     dev->name, ret, q->q.qlen);
 
 		ret = dev_requeue_skb(skb, q);
 	}
@@ -512,7 +511,8 @@
 	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 
 	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 8179494..e901583 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -255,10 +255,8 @@
 		u16 dp = tc_index_to_dp(skb);
 
 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
-			if (net_ratelimit())
-				pr_warning("GRED: Unable to relocate VQ 0x%x "
-					   "after dequeue, screwing up "
-					   "backlog.\n", tc_index_to_dp(skb));
+			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
+					     tc_index_to_dp(skb));
 		} else {
 			q->backlog -= qdisc_pkt_len(skb);
 
@@ -287,10 +285,8 @@
 		u16 dp = tc_index_to_dp(skb);
 
 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
-			if (net_ratelimit())
-				pr_warning("GRED: Unable to relocate VQ 0x%x "
-					   "while dropping, screwing up "
-					   "backlog.\n", tc_index_to_dp(skb));
+			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
+					     tc_index_to_dp(skb));
 		} else {
 			q->backlog -= len;
 			q->stats.other++;
@@ -521,14 +517,16 @@
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
+		goto nla_put_failure;
 
 	for (i = 0; i < MAX_DPs; i++) {
 		struct gred_sched_data *q = table->tab[i];
 
 		max_p[i] = q ? q->parms.max_P : 0;
 	}
-	NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
+		goto nla_put_failure;
 
 	parms = nla_nest_start(skb, TCA_GRED_PARMS);
 	if (parms == NULL)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 9bdca2e..6c2ec45 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1305,7 +1305,8 @@
 	tsc.m1 = sm2m(sc->sm1);
 	tsc.d  = dx2d(sc->dx);
 	tsc.m2 = sm2m(sc->sm2);
-	NLA_PUT(skb, attr, sizeof(tsc), &tsc);
+	if (nla_put(skb, attr, sizeof(tsc), &tsc))
+		goto nla_put_failure;
 
 	return skb->len;
 
@@ -1573,7 +1574,8 @@
 	}
 
 	qopt.defcls = q->defcls;
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
+		goto nla_put_failure;
 	return skb->len;
 
  nla_put_failure:
@@ -1607,7 +1609,6 @@
 	if (cl->qdisc->q.qlen == 1)
 		set_active(cl, qdisc_pkt_len(skb));
 
-	bstats_update(&cl->bstats, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
@@ -1655,6 +1656,7 @@
 		return NULL;
 	}
 
+	bstats_update(&cl->bstats, skb);
 	update_vf(cl, qdisc_pkt_len(skb), cur_time);
 	if (realtime)
 		cl->cl_cumul += qdisc_pkt_len(skb);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 29b942c..9d75b77 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -558,9 +558,7 @@
 			__skb_queue_tail(&q->direct_queue, skb);
 			q->direct_pkts++;
 		} else {
-			kfree_skb(skb);
-			sch->qstats.drops++;
-			return NET_XMIT_DROP;
+			return qdisc_drop(skb, sch);
 		}
 #ifdef CONFIG_NET_CLS_ACT
 	} else if (!cl) {
@@ -576,7 +574,6 @@
 		}
 		return ret;
 	} else {
-		bstats_update(&cl->bstats, skb);
 		htb_activate(q, cl);
 	}
 
@@ -837,6 +834,7 @@
 	} while (cl != start);
 
 	if (likely(skb != NULL)) {
+		bstats_update(&cl->bstats, skb);
 		cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
 		if (cl->un.leaf.deficit[level] < 0) {
 			cl->un.leaf.deficit[level] += cl->quantum;
@@ -1051,7 +1049,8 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
+	if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
+		goto nla_put_failure;
 	nla_nest_end(skb, nest);
 
 	spin_unlock_bh(root_lock);
@@ -1090,7 +1089,8 @@
 	opt.quantum = cl->quantum;
 	opt.prio = cl->prio;
 	opt.level = cl->level;
-	NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
 	spin_unlock_bh(root_lock);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 28de430..d1831ca 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -247,7 +247,8 @@
 		opt.offset[i] = dev->tc_to_txq[i].offset;
 	}
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	return skb->len;
 nla_put_failure:
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 49131d7..2a2b096 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -284,7 +284,8 @@
 	opt.bands = q->bands;
 	opt.max_bands = q->max_bands;
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548f..a2a95aa 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -26,6 +26,7 @@
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
 
 #define VERSION "1.3"
 
@@ -78,6 +79,7 @@
 	psched_tdiff_t jitter;
 
 	u32 loss;
+	u32 ecn;
 	u32 limit;
 	u32 counter;
 	u32 gap;
@@ -374,9 +376,12 @@
 		++count;
 
 	/* Drop packet? */
-	if (loss_event(q))
-		--count;
-
+	if (loss_event(q)) {
+		if (q->ecn && INET_ECN_set_ce(skb))
+			sch->qstats.drops++; /* mark packet */
+		else
+			--count;
+	}
 	if (count == 0) {
 		sch->qstats.drops++;
 		kfree_skb(skb);
@@ -408,10 +413,8 @@
 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
 		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
 		    (skb->ip_summed == CHECKSUM_PARTIAL &&
-		     skb_checksum_help(skb))) {
-			sch->qstats.drops++;
-			return NET_XMIT_DROP;
-		}
+		     skb_checksum_help(skb)))
+			return qdisc_drop(skb, sch);
 
 		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
 	}
@@ -706,6 +709,7 @@
 	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
 	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
 	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
+	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 };
 
 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -776,6 +780,9 @@
 	if (tb[TCA_NETEM_RATE])
 		get_rate(sch, tb[TCA_NETEM_RATE]);
 
+	if (tb[TCA_NETEM_ECN])
+		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
+
 	q->loss_model = CLG_RANDOM;
 	if (tb[TCA_NETEM_LOSS])
 		ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
@@ -834,7 +841,8 @@
 			.p23 = q->clg.a5,
 		};
 
-		NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
+		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
+			goto nla_put_failure;
 		break;
 	}
 	case CLG_GILB_ELL: {
@@ -845,7 +853,8 @@
 			.k1 = q->clg.a4,
 		};
 
-		NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
+		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
+			goto nla_put_failure;
 		break;
 	}
 	}
@@ -874,26 +883,34 @@
 	qopt.loss = q->loss;
 	qopt.gap = q->gap;
 	qopt.duplicate = q->duplicate;
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
+		goto nla_put_failure;
 
 	cor.delay_corr = q->delay_cor.rho;
 	cor.loss_corr = q->loss_cor.rho;
 	cor.dup_corr = q->dup_cor.rho;
-	NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
+	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
+		goto nla_put_failure;
 
 	reorder.probability = q->reorder;
 	reorder.correlation = q->reorder_cor.rho;
-	NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
+	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
+		goto nla_put_failure;
 
 	corrupt.probability = q->corrupt;
 	corrupt.correlation = q->corrupt_cor.rho;
-	NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
+	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
+		goto nla_put_failure;
 
 	rate.rate = q->rate;
 	rate.packet_overhead = q->packet_overhead;
 	rate.cell_size = q->cell_size;
 	rate.cell_overhead = q->cell_overhead;
-	NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
+	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
+		goto nla_put_failure;
+
+	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
+		goto nla_put_failure;
 
 	if (dump_loss_model(q, skb) != 0)
 		goto nla_put_failure;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b5d56a2..79359b6 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -247,7 +247,8 @@
 	opt.bands = q->bands;
 	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e68cb44..9af01f3 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -429,8 +429,9 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
-	NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
+	if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
+	    nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
+		goto nla_put_failure;
 	return nla_nest_end(skb, nest);
 
 nla_put_failure:
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a5cc301..633e32d 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -272,8 +272,9 @@
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
-	NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
+	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
+	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
+		goto nla_put_failure;
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index d7eea99..74305c8 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,7 +570,8 @@
 
 	sch->qstats.backlog = q->qdisc->qstats.backlog;
 	opts = nla_nest_start(skb, TCA_OPTIONS);
-	NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 	return nla_nest_end(skb, opts);
 
 nla_put_failure:
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 02a21ab..d3a1bc2 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -812,7 +812,8 @@
 	memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
 	opt.flags	= q->flags;
 
-	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	return skb->len;
 
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b8e1563..4b056c15 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -359,7 +359,8 @@
 		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
 	opt.mtu = q->mtu;
 	opt.buffer = q->buffer;
-	NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
+	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
+		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
 	return skb->len;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 4532659..ca0c296 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -88,9 +88,7 @@
 		return NET_XMIT_SUCCESS;
 	}
 
-	kfree_skb(skb);
-	sch->qstats.drops++;
-	return NET_XMIT_DROP;
+	return qdisc_drop(skb, sch);
 }
 
 static struct sk_buff *
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index acd2edb..5bc9ab1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1408,7 +1408,7 @@
 }
 
 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
-void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
+void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
 {
 	struct sctp_chunk *sack;
 	struct timer_list *timer;
@@ -1465,7 +1465,7 @@
 }
 
 /* Decrease asoc's rwnd by len. */
-void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
+void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
 {
 	int rx_count;
 	int over = 0;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80f71af..80564fe 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -342,7 +342,7 @@
 		sctp_bh_lock_sock(sk);
 
 		if (sock_owned_by_user(sk)) {
-			if (sk_add_backlog(sk, skb))
+			if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
 				sctp_chunk_free(chunk);
 			else
 				backloged = 1;
@@ -376,7 +376,7 @@
 	struct sctp_ep_common *rcvr = chunk->rcvr;
 	int ret;
 
-	ret = sk_add_backlog(sk, skb);
+	ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
 	if (!ret) {
 		/* Hold the assoc/ep while hanging on the backlog queue.
 		 * This way, we know structures we need will not disappear
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 817174e..f1b7d4b 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -377,9 +377,7 @@
 	 */
 	skb_set_owner_w(nskb, sk);
 
-	/* The 'obsolete' field of dst is set to 2 when a dst is freed. */
-	if (!dst || (dst->obsolete > 1)) {
-		dst_release(dst);
+	if (!sctp_transport_dst_check(tp)) {
 		sctp_transport_route(tp, NULL, sctp_sk(sk));
 		if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
 			sctp_assoc_sync_pmtu(asoc);
@@ -663,8 +661,8 @@
 	 */
 	if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
 	    inflight && sctp_state(asoc, ESTABLISHED)) {
-		unsigned max = transport->pathmtu - packet->overhead;
-		unsigned len = chunk->skb->len + q->out_qlen;
+		unsigned int max = transport->pathmtu - packet->overhead;
+		unsigned int len = chunk->skb->len + q->out_qlen;
 
 		/* Check whether this chunk and all the rest of pending
 		 * data will fit or delay in hopes of bundling a full
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index cfeb1d4..a0fa19f 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1147,7 +1147,7 @@
 	__u32 sack_ctsn, ctsn, tsn;
 	__u32 highest_tsn, highest_new_tsn;
 	__u32 sack_a_rwnd;
-	unsigned outstanding;
+	unsigned int outstanding;
 	struct sctp_transport *primary = asoc->peer.primary_path;
 	int count_of_newacks = 0;
 	int gap_ack_blocks;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1ff51c9..c96d1a8 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -524,7 +524,7 @@
 /* Worker routine to handle INIT command failure.  */
 static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
 				 struct sctp_association *asoc,
-				 unsigned error)
+				 unsigned int error)
 {
 	struct sctp_ulpevent *event;
 
@@ -550,7 +550,7 @@
 				  sctp_event_t event_type,
 				  sctp_subtype_t subtype,
 				  struct sctp_chunk *chunk,
-				  unsigned error)
+				  unsigned int error)
 {
 	struct sctp_ulpevent *event;
 
@@ -1161,9 +1161,8 @@
 		break;
 
 	case SCTP_DISPOSITION_VIOLATION:
-		if (net_ratelimit())
-			pr_err("protocol violation state %d chunkid %d\n",
-			       state, subtype.chunk);
+		net_err_ratelimited("protocol violation state %d chunkid %d\n",
+				    state, subtype.chunk);
 		break;
 
 	case SCTP_DISPOSITION_NOT_IMPL:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 891f5db..9fca103 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1129,17 +1129,15 @@
 	/* This should never happen, but lets log it if so.  */
 	if (unlikely(!link)) {
 		if (from_addr.sa.sa_family == AF_INET6) {
-			if (net_ratelimit())
-				pr_warn("%s association %p could not find address %pI6\n",
-					__func__,
-					asoc,
-					&from_addr.v6.sin6_addr);
+			net_warn_ratelimited("%s association %p could not find address %pI6\n",
+					     __func__,
+					     asoc,
+					     &from_addr.v6.sin6_addr);
 		} else {
-			if (net_ratelimit())
-				pr_warn("%s association %p could not find address %pI4\n",
-					__func__,
-					asoc,
-					&from_addr.v4.sin_addr.s_addr);
+			net_warn_ratelimited("%s association %p could not find address %pI4\n",
+					     __func__,
+					     asoc,
+					     &from_addr.v4.sin_addr.s_addr);
 		}
 		return SCTP_DISPOSITION_DISCARD;
 	}
@@ -2410,7 +2408,7 @@
 					sctp_cmd_seq_t *commands)
 {
 	struct sctp_chunk *chunk = arg;
-	unsigned len;
+	unsigned int len;
 	__be16 error = SCTP_ERROR_NO_ERROR;
 
 	/* See if we have an error cause code in the chunk.  */
@@ -2446,7 +2444,7 @@
 				     sctp_cmd_seq_t *commands)
 {
 	struct sctp_chunk *chunk = arg;
-	unsigned len;
+	unsigned int len;
 	__be16 error = SCTP_ERROR_NO_ERROR;
 
 	if (!sctp_vtag_verify_either(chunk, asoc))
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 92ba71d..b3b8a8d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5840,10 +5840,8 @@
 	if (!sctp_sk(sk)->hmac && sctp_hmac_alg) {
 		tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
 		if (IS_ERR(tfm)) {
-			if (net_ratelimit()) {
-				pr_info("failed to load transform for %s: %ld\n",
-					sctp_hmac_alg, PTR_ERR(tfm));
-			}
+			net_info_ratelimited("failed to load transform for %s: %ld\n",
+					     sctp_hmac_alg, PTR_ERR(tfm));
 			return -ENOSYS;
 		}
 		sctp_sk(sk)->hmac = tfm;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 60ffbd0..e5fe639 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -275,22 +275,16 @@
 	{ /* sentinel */ }
 };
 
-static struct ctl_path sctp_path[] = {
-	{ .procname = "net", },
-	{ .procname = "sctp", },
-	{ }
-};
-
 static struct ctl_table_header * sctp_sysctl_header;
 
 /* Sysctl registration.  */
 void sctp_sysctl_register(void)
 {
-	sctp_sysctl_header = register_sysctl_paths(sctp_path, sctp_table);
+	sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table);
 }
 
 /* Sysctl deregistration.  */
 void sctp_sysctl_unregister(void)
 {
-	unregister_sysctl_table(sctp_sysctl_header);
+	unregister_net_sysctl_table(sctp_sysctl_header);
 }
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3889330..b026ba0 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -226,23 +226,6 @@
 		transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 }
 
-/* this is a complete rip-off from __sk_dst_check
- * the cookie is always 0 since this is how it's used in the
- * pmtu code
- */
-static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
-{
-	struct dst_entry *dst = t->dst;
-
-	if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
-		dst_release(t->dst);
-		t->dst = NULL;
-		return NULL;
-	}
-
-	return dst;
-}
-
 void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
 	struct dst_entry *dst;
diff --git a/net/socket.c b/net/socket.c
index 851edcd..2a2898ce 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1234,8 +1234,7 @@
 	 */
 	sock = sock_alloc();
 	if (!sock) {
-		if (net_ratelimit())
-			printk(KERN_WARNING "socket: no more sockets\n");
+		net_warn_ratelimited("socket: no more sockets\n");
 		return -ENFILE;	/* Not exactly a match, but its the
 				   closest posix thing */
 	}
@@ -1479,7 +1478,7 @@
 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
 	if (sock) {
 		somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
-		if ((unsigned)backlog > somaxconn)
+		if ((unsigned int)backlog > somaxconn)
 			backlog = somaxconn;
 
 		err = security_socket_listen(sock, backlog);
@@ -1691,7 +1690,7 @@
  */
 
 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
-		unsigned, flags, struct sockaddr __user *, addr,
+		unsigned int, flags, struct sockaddr __user *, addr,
 		int, addr_len)
 {
 	struct socket *sock;
@@ -1738,7 +1737,7 @@
  */
 
 SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
-		unsigned, flags)
+		unsigned int, flags)
 {
 	return sys_sendto(fd, buff, len, flags, NULL, 0);
 }
@@ -1750,7 +1749,7 @@
  */
 
 SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
-		unsigned, flags, struct sockaddr __user *, addr,
+		unsigned int, flags, struct sockaddr __user *, addr,
 		int __user *, addr_len)
 {
 	struct socket *sock;
@@ -1795,7 +1794,7 @@
  */
 
 asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
-			 unsigned flags)
+			 unsigned int flags)
 {
 	return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
 }
@@ -1897,7 +1896,7 @@
 };
 
 static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
-			 struct msghdr *msg_sys, unsigned flags,
+			 struct msghdr *msg_sys, unsigned int flags,
 			 struct used_address *used_address)
 {
 	struct compat_msghdr __user *msg_compat =
@@ -1908,7 +1907,7 @@
 	    __attribute__ ((aligned(sizeof(__kernel_size_t))));
 	/* 20 is size of ipv6_pktinfo */
 	unsigned char *ctl_buf = ctl;
-	int err, ctl_len, iov_size, total_len;
+	int err, ctl_len, total_len;
 
 	err = -EFAULT;
 	if (MSG_CMSG_COMPAT & flags) {
@@ -1917,16 +1916,13 @@
 	} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
 		return -EFAULT;
 
-	/* do not move before msg_sys is valid */
-	err = -EMSGSIZE;
-	if (msg_sys->msg_iovlen > UIO_MAXIOV)
-		goto out;
-
-	/* Check whether to allocate the iovec area */
-	err = -ENOMEM;
-	iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
-		iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+		err = -EMSGSIZE;
+		if (msg_sys->msg_iovlen > UIO_MAXIOV)
+			goto out;
+		err = -ENOMEM;
+		iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
+			      GFP_KERNEL);
 		if (!iov)
 			goto out;
 	}
@@ -2005,7 +2001,7 @@
 		sock_kfree_s(sock->sk, ctl_buf, ctl_len);
 out_freeiov:
 	if (iov != iovstack)
-		sock_kfree_s(sock->sk, iov, iov_size);
+		kfree(iov);
 out:
 	return err;
 }
@@ -2014,7 +2010,7 @@
  *	BSD sendmsg interface
  */
 
-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
 {
 	int fput_needed, err;
 	struct msghdr msg_sys;
@@ -2096,14 +2092,14 @@
 }
 
 static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
-			 struct msghdr *msg_sys, unsigned flags, int nosec)
+			 struct msghdr *msg_sys, unsigned int flags, int nosec)
 {
 	struct compat_msghdr __user *msg_compat =
 	    (struct compat_msghdr __user *)msg;
 	struct iovec iovstack[UIO_FASTIOV];
 	struct iovec *iov = iovstack;
 	unsigned long cmsg_ptr;
-	int err, iov_size, total_len, len;
+	int err, total_len, len;
 
 	/* kernel mode address */
 	struct sockaddr_storage addr;
@@ -2118,15 +2114,13 @@
 	} else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
 		return -EFAULT;
 
-	err = -EMSGSIZE;
-	if (msg_sys->msg_iovlen > UIO_MAXIOV)
-		goto out;
-
-	/* Check whether to allocate the iovec area */
-	err = -ENOMEM;
-	iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
 	if (msg_sys->msg_iovlen > UIO_FASTIOV) {
-		iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+		err = -EMSGSIZE;
+		if (msg_sys->msg_iovlen > UIO_MAXIOV)
+			goto out;
+		err = -ENOMEM;
+		iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
+			      GFP_KERNEL);
 		if (!iov)
 			goto out;
 	}
@@ -2180,7 +2174,7 @@
 
 out_freeiov:
 	if (iov != iovstack)
-		sock_kfree_s(sock->sk, iov, iov_size);
+		kfree(iov);
 out:
 	return err;
 }
@@ -2524,6 +2518,12 @@
 static int __init sock_init(void)
 {
 	int err;
+	/*
+	 *      Initialize the network sysctl infrastructure.
+	 */
+	err = net_sysctl_init();
+	if (err)
+		goto out;
 
 	/*
 	 *      Initialize sock SLAB cache.
@@ -3223,7 +3223,7 @@
 	return -ENOIOCTLCMD;
 }
 
-static long compat_sock_ioctl(struct file *file, unsigned cmd,
+static long compat_sock_ioctl(struct file *file, unsigned int cmd,
 			      unsigned long arg)
 {
 	struct socket *sock = file->private_data;
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 8eff8c3..d3611f1 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -624,7 +624,7 @@
 	ctx->seq_send = ctx->seq_send64;
 	if (ctx->seq_send64 != ctx->seq_send) {
 		dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
-			(long unsigned)ctx->seq_send64, ctx->seq_send);
+			(unsigned long)ctx->seq_send64, ctx->seq_send);
 		p = ERR_PTR(-EINVAL);
 		goto out_err;
 	}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index ca8cad8..782bfe1 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -242,12 +242,13 @@
 int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
 {
 	struct gss_api_mech *pos = NULL;
-	int i = 0;
+	int j, i = 0;
 
 	spin_lock(&registered_mechs_lock);
 	list_for_each_entry(pos, &registered_mechs, gm_list) {
-		array_ptr[i] = pos->gm_pfs->pseudoflavor;
-		i++;
+		for (j=0; j < pos->gm_pf_num; j++) {
+			array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
+		}
 	}
 	spin_unlock(&registered_mechs_lock);
 	return i;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index de0b0f3..47ad266 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1273,7 +1273,7 @@
 	__acquires(cd->hash_lock)
 {
 	loff_t n = *pos;
-	unsigned hash, entry;
+	unsigned int hash, entry;
 	struct cache_head *ch;
 	struct cache_detail *cd = ((struct handle*)m->private)->cd;
 
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 6797246..7fee13b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -127,9 +127,7 @@
 {
 	static uint32_t clntid;
 	char name[15];
-	struct qstr q = {
-		.name = name,
-	};
+	struct qstr q = { .name = name };
 	struct dentry *dir, *dentry;
 	int error;
 
@@ -176,16 +174,22 @@
 	return 0;
 }
 
-static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
-				struct super_block *sb)
+static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
+{
+	if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
+	    ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
+		return 1;
+	return 0;
+}
+
+static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event,
+				   struct super_block *sb)
 {
 	struct dentry *dentry;
 	int err = 0;
 
 	switch (event) {
 	case RPC_PIPEFS_MOUNT:
-		if (clnt->cl_program->pipe_dir_name == NULL)
-			break;
 		dentry = rpc_setup_pipedir_sb(sb, clnt,
 					      clnt->cl_program->pipe_dir_name);
 		BUG_ON(dentry == NULL);
@@ -208,6 +212,20 @@
 	return err;
 }
 
+static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event,
+				struct super_block *sb)
+{
+	int error = 0;
+
+	for (;; clnt = clnt->cl_parent) {
+		if (!rpc_clnt_skip_event(clnt, event))
+			error = __rpc_clnt_handle_event(clnt, event, sb);
+		if (error || clnt == clnt->cl_parent)
+			break;
+	}
+	return error;
+}
+
 static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event)
 {
 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
@@ -215,10 +233,12 @@
 
 	spin_lock(&sn->rpc_client_lock);
 	list_for_each_entry(clnt, &sn->all_clients, cl_clients) {
-		if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) ||
-		    ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry))
+		if (clnt->cl_program->pipe_dir_name == NULL)
+			break;
+		if (rpc_clnt_skip_event(clnt, event))
 			continue;
-		atomic_inc(&clnt->cl_count);
+		if (atomic_inc_not_zero(&clnt->cl_count) == 0)
+			continue;
 		spin_unlock(&sn->rpc_client_lock);
 		return clnt;
 	}
@@ -257,6 +277,14 @@
 	return rpc_pipefs_notifier_unregister(&rpc_clients_block);
 }
 
+static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
+{
+	clnt->cl_nodelen = strlen(nodename);
+	if (clnt->cl_nodelen > UNX_MAXNODENAME)
+		clnt->cl_nodelen = UNX_MAXNODENAME;
+	memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
+}
+
 static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
 {
 	const struct rpc_program *program = args->program;
@@ -337,10 +365,7 @@
 	}
 
 	/* save the nodename */
-	clnt->cl_nodelen = strlen(init_utsname()->nodename);
-	if (clnt->cl_nodelen > UNX_MAXNODENAME)
-		clnt->cl_nodelen = UNX_MAXNODENAME;
-	memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
+	rpc_clnt_set_nodename(clnt, utsname()->nodename);
 	rpc_register_client(clnt);
 	return clnt;
 
@@ -499,6 +524,7 @@
 	err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
 	if (err != 0)
 		goto out_no_path;
+	rpc_clnt_set_nodename(new, utsname()->nodename);
 	if (new->cl_auth)
 		atomic_inc(&new->cl_auth->au_count);
 	atomic_inc(&clnt->cl_count);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0af37fc..fd24239 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1059,12 +1059,9 @@
 struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
 			       const unsigned char *dir_name)
 {
-	struct qstr dir = {
-		.name = dir_name,
-		.len = strlen(dir_name),
-		.hash = full_name_hash(dir_name, strlen(dir_name)),
-	};
+	struct qstr dir = QSTR_INIT(dir_name, strlen(dir_name));
 
+	dir.hash = full_name_hash(dir.name, dir.len);
 	return d_lookup(sb->s_root, &dir);
 }
 EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
@@ -1126,19 +1123,20 @@
 		return -ENOMEM;
 	dprintk("RPC:	sending pipefs MOUNT notification for net %p%s\n", net,
 								NET_NAME(net));
+	sn->pipefs_sb = sb;
 	err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
 					   RPC_PIPEFS_MOUNT,
 					   sb);
 	if (err)
 		goto err_depopulate;
 	sb->s_fs_info = get_net(net);
-	sn->pipefs_sb = sb;
 	return 0;
 
 err_depopulate:
 	blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
 					   RPC_PIPEFS_UMOUNT,
 					   sb);
+	sn->pipefs_sb = NULL;
 	__rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
 	return err;
 }
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8adfc88..3d6498a 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -75,20 +75,21 @@
 static int __init
 init_sunrpc(void)
 {
-	int err = register_rpc_pipefs();
+	int err = rpc_init_mempool();
 	if (err)
 		goto out;
-	err = rpc_init_mempool();
-	if (err)
-		goto out2;
 	err = rpcauth_init_module();
 	if (err)
-		goto out3;
+		goto out2;
 
 	cache_initialize();
 
 	err = register_pernet_subsys(&sunrpc_net_ops);
 	if (err)
+		goto out3;
+
+	err = register_rpc_pipefs();
+	if (err)
 		goto out4;
 #ifdef RPC_DEBUG
 	rpc_register_sysctl();
@@ -98,11 +99,11 @@
 	return 0;
 
 out4:
-	rpcauth_remove_module();
+	unregister_pernet_subsys(&sunrpc_net_ops);
 out3:
-	rpc_destroy_mempool();
+	rpcauth_remove_module();
 out2:
-	unregister_rpc_pipefs();
+	rpc_destroy_mempool();
 out:
 	return err;
 }
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 4153846..017c011 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1041,23 +1041,21 @@
  * Printk the given error with the address of the client that caused it.
  */
 static __printf(2, 3)
-int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
+void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
-	int 	r;
 	char 	buf[RPC_MAX_ADDRBUFLEN];
 
-	if (!net_ratelimit())
-		return 0;
-
-	printk(KERN_WARNING "svc: %s: ",
-		svc_print_addr(rqstp, buf, sizeof(buf)));
-
 	va_start(args, fmt);
-	r = vprintk(fmt, args);
-	va_end(args);
 
-	return r;
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	net_warn_ratelimited("svc: %s: %pV",
+			     svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
+
+	va_end(args);
 }
 
 /*
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 4bda09d..b98ee35 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -544,14 +544,11 @@
 		struct svc_xprt *xprt = NULL;
 		spin_lock_bh(&serv->sv_lock);
 		if (!list_empty(&serv->sv_tempsocks)) {
-			if (net_ratelimit()) {
-				/* Try to help the admin */
-				printk(KERN_NOTICE "%s: too many open  "
-				       "connections, consider increasing %s\n",
-				       serv->sv_name, serv->sv_maxconn ?
-				       "the max number of connections." :
-				       "the number of threads.");
-			}
+			/* Try to help the admin */
+			net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
+					       serv->sv_name, serv->sv_maxconn ?
+					       "max number of connections" :
+					       "number of threads");
 			/*
 			 * Always select the oldest connection. It's not fair,
 			 * but so is life
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 824d32f..a6de09d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -617,11 +617,8 @@
 	rqstp->rq_prot = IPPROTO_UDP;
 
 	if (!svc_udp_get_dest_address(rqstp, cmh)) {
-		if (net_ratelimit())
-			printk(KERN_WARNING
-				"svc: received unknown control message %d/%d; "
-				"dropping RPC reply datagram\n",
-					cmh->cmsg_level, cmh->cmsg_type);
+		net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
+				     cmh->cmsg_level, cmh->cmsg_type);
 		skb_free_datagram_locked(svsk->sk_sk, skb);
 		return 0;
 	}
@@ -871,18 +868,17 @@
 		if (err == -ENOMEM)
 			printk(KERN_WARNING "%s: no more sockets!\n",
 			       serv->sv_name);
-		else if (err != -EAGAIN && net_ratelimit())
-			printk(KERN_WARNING "%s: accept failed (err %d)!\n",
-				   serv->sv_name, -err);
+		else if (err != -EAGAIN)
+			net_warn_ratelimited("%s: accept failed (err %d)!\n",
+					     serv->sv_name, -err);
 		return NULL;
 	}
 	set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
 
 	err = kernel_getpeername(newsock, sin, &slen);
 	if (err < 0) {
-		if (net_ratelimit())
-			printk(KERN_WARNING "%s: peername failed (err %d)!\n",
-				   serv->sv_name, -err);
+		net_warn_ratelimited("%s: peername failed (err %d)!\n",
+				     serv->sv_name, -err);
 		goto failed;		/* aborted connection or whatever */
 	}
 
@@ -1012,19 +1008,15 @@
 			 *  bit set in the fragment length header.
 			 *  But apparently no known nfs clients send fragmented
 			 *  records. */
-			if (net_ratelimit())
-				printk(KERN_NOTICE "RPC: multiple fragments "
-					"per record not supported\n");
+			net_notice_ratelimited("RPC: multiple fragments per record not supported\n");
 			goto err_delete;
 		}
 
 		svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
 		dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
 		if (svsk->sk_reclen > serv->sv_max_mesg) {
-			if (net_ratelimit())
-				printk(KERN_NOTICE "RPC: "
-					"fragment too large: 0x%08lx\n",
-					(unsigned long)svsk->sk_reclen);
+			net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n",
+					       (unsigned long)svsk->sk_reclen);
 			goto err_delete;
 		}
 	}
@@ -1556,7 +1548,7 @@
 					(char *)&val, sizeof(val));
 
 	if (type == SOCK_STREAM)
-		sock->sk->sk_reuse = 1;		/* allow address reuse */
+		sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
 	error = kernel_bind(sock, sin, len);
 	if (error < 0)
 		goto bummer;
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c
index dd82434..08881d0 100644
--- a/net/sunrpc/timer.c
+++ b/net/sunrpc/timer.c
@@ -34,7 +34,7 @@
 void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
 {
 	unsigned long init = 0;
-	unsigned i;
+	unsigned int i;
 
 	rt->timeo = timeo;
 
@@ -57,7 +57,7 @@
  * NB: When computing the smoothed RTT and standard deviation,
  *     be careful not to produce negative intermediate results.
  */
-void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
+void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m)
 {
 	long *srtt, *sdrtt;
 
@@ -106,7 +106,7 @@
  * read, write, commit     - A+4D
  * other                   - timeo
  */
-unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
+unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer)
 {
 	unsigned long res;
 
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index b97a3dd..fddcccf 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1204,7 +1204,7 @@
 		int (*actor)(struct scatterlist *, void *), void *data)
 {
 	int i, ret = 0;
-	unsigned page_len, thislen, page_offset;
+	unsigned int page_len, thislen, page_offset;
 	struct scatterlist      sg[1];
 
 	sg_init_table(sg, 1);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 0cbcd1a..6fe2dce 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -783,7 +783,7 @@
 {
 	struct rpc_rqst *req = task->tk_rqstp;
 	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
-	unsigned timer = task->tk_msg.rpc_proc->p_timer;
+	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
 	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
 
 	if (timer) {
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index c3e65ae..e3a6e37 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -26,10 +26,6 @@
 #include <linux/if_ether.h>
 #endif
 
-#ifdef CONFIG_TR
-#include <linux/if_tr.h>
-#endif
-
 static struct ctl_table_set *
 net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces)
 {
@@ -59,19 +55,6 @@
 	.permissions = net_ctl_permissions,
 };
 
-static int net_ctl_ro_header_perms(struct ctl_table_root *root,
-		struct nsproxy *namespaces, struct ctl_table *table)
-{
-	if (net_eq(namespaces->net_ns, &init_net))
-		return table->mode;
-	else
-		return table->mode & ~0222;
-}
-
-static struct ctl_table_root net_sysctl_ro_root = {
-	.permissions = net_ctl_ro_header_perms,
-};
-
 static int __net_init sysctl_net_init(struct net *net)
 {
 	setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen);
@@ -88,34 +71,32 @@
 	.exit = sysctl_net_exit,
 };
 
-static __init int net_sysctl_init(void)
+static struct ctl_table_header *net_header;
+__init int net_sysctl_init(void)
 {
-	int ret;
+	static struct ctl_table empty[1];
+	int ret = -ENOMEM;
+	/* Avoid limitations in the sysctl implementation by
+	 * registering "/proc/sys/net" as an empty directory not in a
+	 * network namespace.
+	 */
+	net_header = register_sysctl("net", empty);
+	if (!net_header)
+		goto out;
 	ret = register_pernet_subsys(&sysctl_pernet_ops);
 	if (ret)
 		goto out;
-	setup_sysctl_set(&net_sysctl_ro_root.default_set, &net_sysctl_ro_root, NULL);
-	register_sysctl_root(&net_sysctl_ro_root);
 	register_sysctl_root(&net_sysctl_root);
 out:
 	return ret;
 }
-subsys_initcall(net_sysctl_init);
 
-struct ctl_table_header *register_net_sysctl_table(struct net *net,
-	const struct ctl_path *path, struct ctl_table *table)
+struct ctl_table_header *register_net_sysctl(struct net *net,
+	const char *path, struct ctl_table *table)
 {
-	return __register_sysctl_paths(&net->sysctls, path, table);
+	return __register_sysctl_table(&net->sysctls, path, table);
 }
-EXPORT_SYMBOL_GPL(register_net_sysctl_table);
-
-struct ctl_table_header *register_net_sysctl_rotable(const
-		struct ctl_path *path, struct ctl_table *table)
-{
-	return __register_sysctl_paths(&net_sysctl_ro_root.default_set,
-					path, table);
-}
-EXPORT_SYMBOL_GPL(register_net_sysctl_rotable);
+EXPORT_SYMBOL_GPL(register_net_sysctl);
 
 void unregister_net_sysctl_table(struct ctl_table_header *header)
 {
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 521d24d..6cd55d6 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -9,5 +9,3 @@
 	   name_distr.o  subscr.o name_table.o net.o  \
 	   netlink.o node.o node_subscr.o port.o ref.o  \
 	   socket.o log.o eth_media.o
-
-# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index a6fdab3..357b74b 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -45,7 +45,6 @@
  *
  * Returns 1 if domain address is valid, otherwise 0
  */
-
 int tipc_addr_domain_valid(u32 addr)
 {
 	u32 n = tipc_node(addr);
@@ -66,7 +65,6 @@
  *
  * Returns 1 if address can be used, otherwise 0
  */
-
 int tipc_addr_node_valid(u32 addr)
 {
 	return tipc_addr_domain_valid(addr) && tipc_node(addr);
@@ -86,7 +84,6 @@
 /**
  * tipc_addr_scope - convert message lookup domain to a 2-bit scope value
  */
-
 int tipc_addr_scope(u32 domain)
 {
 	if (likely(!domain))
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index e4f35af..60b00ab 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -50,18 +50,33 @@
 	return addr & TIPC_CLUSTER_MASK;
 }
 
-static inline int in_own_cluster(u32 addr)
+static inline int in_own_cluster_exact(u32 addr)
 {
 	return !((addr ^ tipc_own_addr) >> 12);
 }
 
 /**
+ * in_own_node - test for node inclusion; <0.0.0> always matches
+ */
+static inline int in_own_node(u32 addr)
+{
+	return (addr == tipc_own_addr) || !addr;
+}
+
+/**
+ * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
+ */
+static inline int in_own_cluster(u32 addr)
+{
+	return in_own_cluster_exact(addr) || !addr;
+}
+
+/**
  * addr_domain - convert 2-bit scope value to equivalent message lookup domain
  *
  * Needed when address of a named message must be looked up a second time
  * after a network hop.
  */
-
 static inline u32 addr_domain(u32 sc)
 {
 	if (likely(sc == TIPC_NODE_SCOPE))
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e00441a2..2625f5e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -73,7 +73,6 @@
  * large local variables within multicast routines.  Concurrent access is
  * prevented through use of the spinlock "bc_lock".
  */
-
 struct tipc_bcbearer {
 	struct tipc_bearer bearer;
 	struct tipc_media media;
@@ -92,7 +91,6 @@
  *
  * Handles sequence numbering, fragmentation, bundling, etc.
  */
-
 struct tipc_bclink {
 	struct tipc_link link;
 	struct tipc_node node;
@@ -169,7 +167,6 @@
  *
  * Called with bc_lock locked
  */
-
 struct tipc_node *tipc_bclink_retransmit_to(void)
 {
 	return bclink->retransmit_to;
@@ -182,7 +179,6 @@
  *
  * Called with bc_lock locked
  */
-
 static void bclink_retransmit_pkt(u32 after, u32 to)
 {
 	struct sk_buff *buf;
@@ -200,7 +196,6 @@
  *
  * Node is locked, bc_lock unlocked.
  */
-
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 {
 	struct sk_buff *crs;
@@ -280,7 +275,6 @@
  *
  * tipc_net_lock and node lock set
  */
-
 void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
 {
 	struct sk_buff *buf;
@@ -344,7 +338,6 @@
  *
  * Only tipc_net_lock set.
  */
-
 static void bclink_peek_nack(struct tipc_msg *msg)
 {
 	struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
@@ -365,7 +358,6 @@
 /*
  * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
  */
-
 int tipc_bclink_send_msg(struct sk_buff *buf)
 {
 	int res;
@@ -394,7 +386,6 @@
  *
  * Called with both sending node's lock and bc_lock taken.
  */
-
 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
 {
 	bclink_update_last_sent(node, seqno);
@@ -420,7 +411,6 @@
  *
  * tipc_net_lock is read_locked, no other locks set
  */
-
 void tipc_bclink_recv_pkt(struct sk_buff *buf)
 {
 	struct tipc_msg *msg = buf_msg(buf);
@@ -588,7 +578,6 @@
  * Returns 0 (packet sent successfully) under all circumstances,
  * since the broadcast link's pseudo-bearer never blocks
  */
-
 static int tipc_bcbearer_send(struct sk_buff *buf,
 			      struct tipc_bearer *unused1,
 			      struct tipc_media_addr *unused2)
@@ -601,7 +590,6 @@
 	 * preparation is skipped for broadcast link protocol messages
 	 * since they are sent in an unreliable manner and don't need it
 	 */
-
 	if (likely(!msg_non_seq(buf_msg(buf)))) {
 		struct tipc_msg *msg;
 
@@ -618,7 +606,6 @@
 	}
 
 	/* Send buffer over bearers until all targets reached */
-
 	bcbearer->remains = bclink->bcast_nodes;
 
 	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
@@ -660,7 +647,6 @@
 /**
  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
  */
-
 void tipc_bcbearer_sort(void)
 {
 	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
@@ -671,7 +657,6 @@
 	spin_lock_bh(&bc_lock);
 
 	/* Group bearers by priority (can assume max of two per priority) */
-
 	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
 
 	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
@@ -687,7 +672,6 @@
 	}
 
 	/* Create array of bearer pairs for broadcasting */
-
 	bp_curr = bcbearer->bpairs;
 	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
 
@@ -817,7 +801,6 @@
 /**
  * tipc_nmap_add - add a node to a node map
  */
-
 void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
 {
 	int n = tipc_node(node);
@@ -833,7 +816,6 @@
 /**
  * tipc_nmap_remove - remove a node from a node map
  */
-
 void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
 {
 	int n = tipc_node(node);
@@ -852,7 +834,6 @@
  * @nm_b: input node map B
  * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
  */
-
 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
 			   struct tipc_node_map *nm_b,
 			   struct tipc_node_map *nm_diff)
@@ -878,7 +859,6 @@
 /**
  * tipc_port_list_add - add a port to a port list, ensuring no duplicates
  */
-
 void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
 {
 	struct tipc_port_list *item = pl_ptr;
@@ -912,7 +892,6 @@
  * tipc_port_list_free - free dynamically created entries in port_list chain
  *
  */
-
 void tipc_port_list_free(struct tipc_port_list *pl_ptr)
 {
 	struct tipc_port_list *item;
@@ -923,4 +902,3 @@
 		kfree(item);
 	}
 }
-
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5571394..a933065 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -45,7 +45,6 @@
  * @count: # of nodes in set
  * @map: bitmap of node identifiers that are in the set
  */
-
 struct tipc_node_map {
 	u32 count;
 	u32 map[MAX_NODES / WSIZE];
@@ -59,7 +58,6 @@
  * @next: pointer to next entry in list
  * @ports: array of port references
  */
-
 struct tipc_port_list {
 	int count;
 	struct tipc_port_list *next;
@@ -77,7 +75,6 @@
 /**
  * tipc_nmap_equal - test for equality of node maps
  */
-
 static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b)
 {
 	return !memcmp(nm_a, nm_b, sizeof(*nm_a));
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 5dfd89c..a297e3a 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -53,7 +53,6 @@
  *
  * Returns 1 if media name is valid, otherwise 0.
  */
-
 static int media_name_valid(const char *name)
 {
 	u32 len;
@@ -67,7 +66,6 @@
 /**
  * tipc_media_find - locates specified media object by name
  */
-
 struct tipc_media *tipc_media_find(const char *name)
 {
 	u32 i;
@@ -82,7 +80,6 @@
 /**
  * media_find_id - locates specified media object by type identifier
  */
-
 static struct tipc_media *media_find_id(u8 type)
 {
 	u32 i;
@@ -99,7 +96,6 @@
  *
  * Bearers for this media type must be activated separately at a later stage.
  */
-
 int tipc_register_media(struct tipc_media *m_ptr)
 {
 	int res = -EINVAL;
@@ -134,7 +130,6 @@
 /**
  * tipc_media_addr_printf - record media address in print buffer
  */
-
 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
 {
 	char addr_str[MAX_ADDR_STR];
@@ -156,7 +151,6 @@
 /**
  * tipc_media_get_names - record names of registered media in buffer
  */
-
 struct sk_buff *tipc_media_get_names(void)
 {
 	struct sk_buff *buf;
@@ -183,7 +177,6 @@
  *
  * Returns 1 if bearer name is valid, otherwise 0.
  */
-
 static int bearer_name_validate(const char *name,
 				struct tipc_bearer_names *name_parts)
 {
@@ -194,7 +187,6 @@
 	u32 if_len;
 
 	/* copy bearer name & ensure length is OK */
-
 	name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
 	/* need above in case non-Posix strncpy() doesn't pad with nulls */
 	strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
@@ -202,7 +194,6 @@
 		return 0;
 
 	/* ensure all component parts of bearer name are present */
-
 	media_name = name_copy;
 	if_name = strchr(media_name, ':');
 	if (if_name == NULL)
@@ -212,7 +203,6 @@
 	if_len = strlen(if_name) + 1;
 
 	/* validate component parts of bearer name */
-
 	if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
 	    (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
 	    (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
@@ -220,7 +210,6 @@
 		return 0;
 
 	/* return bearer name components, if necessary */
-
 	if (name_parts) {
 		strcpy(name_parts->media_name, media_name);
 		strcpy(name_parts->if_name, if_name);
@@ -231,7 +220,6 @@
 /**
  * tipc_bearer_find - locates bearer object with matching bearer name
  */
-
 struct tipc_bearer *tipc_bearer_find(const char *name)
 {
 	struct tipc_bearer *b_ptr;
@@ -247,7 +235,6 @@
 /**
  * tipc_bearer_find_interface - locates bearer object with matching interface name
  */
-
 struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
 {
 	struct tipc_bearer *b_ptr;
@@ -267,7 +254,6 @@
 /**
  * tipc_bearer_get_names - record names of bearers in buffer
  */
-
 struct sk_buff *tipc_bearer_get_names(void)
 {
 	struct sk_buff *buf;
@@ -363,7 +349,6 @@
  * the bearer is congested. 'tipc_net_lock' is in read_lock here
  * bearer.lock is busy
  */
-
 static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
 						struct tipc_link *l_ptr)
 {
@@ -377,7 +362,6 @@
  * the bearer is congested. 'tipc_net_lock' is in read_lock here,
  * bearer.lock is free
  */
-
 void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
 {
 	spin_lock_bh(&b_ptr->lock);
@@ -410,7 +394,6 @@
 /**
  * tipc_bearer_congested - determines if bearer is currently congested
  */
-
 int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
 {
 	if (unlikely(b_ptr->blocked))
@@ -423,7 +406,6 @@
 /**
  * tipc_enable_bearer - enable bearer with the given name
  */
-
 int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
 {
 	struct tipc_bearer *b_ptr;
@@ -449,7 +431,7 @@
 		if (tipc_in_scope(disc_domain, tipc_own_addr)) {
 			disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
 			res = 0;   /* accept any node in own cluster */
-		} else if (in_own_cluster(disc_domain))
+		} else if (in_own_cluster_exact(disc_domain))
 			res = 0;   /* accept specified node in own cluster */
 	}
 	if (res) {
@@ -541,7 +523,6 @@
  * tipc_block_bearer(): Block the bearer with the given name,
  *                      and reset all its links
  */
-
 int tipc_block_bearer(const char *name)
 {
 	struct tipc_bearer *b_ptr = NULL;
@@ -573,11 +554,10 @@
 }
 
 /**
- * bearer_disable -
+ * bearer_disable
  *
  * Note: This routine assumes caller holds tipc_net_lock.
  */
-
 static void bearer_disable(struct tipc_bearer *b_ptr)
 {
 	struct tipc_link *l_ptr;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index d3eac56..e3b2be3 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -49,7 +49,6 @@
  * - media type identifier located at offset 3
  * - remaining bytes vary according to media type
  */
-
 #define TIPC_MEDIA_ADDR_SIZE	20
 #define TIPC_MEDIA_TYPE_OFFSET	3
 
@@ -64,7 +63,6 @@
  * @media_id: TIPC media type identifier
  * @broadcast: non-zero if address is a broadcast address
  */
-
 struct tipc_media_addr {
 	u8 value[TIPC_MEDIA_ADDR_SIZE];
 	u8 media_id;
@@ -89,7 +87,6 @@
  * @type_id: TIPC media identifier
  * @name: media name
  */
-
 struct tipc_media {
 	int (*send_msg)(struct sk_buff *buf,
 			struct tipc_bearer *b_ptr,
@@ -216,7 +213,6 @@
  * send routine always returns success -- even if the buffer was not sent --
  * and let TIPC's link code deal with the undelivered message.
  */
-
 static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
 				   struct sk_buff *buf,
 				   struct tipc_media_addr *dest)
diff --git a/net/tipc/config.c b/net/tipc/config.c
index f76d3b1..c5712a3 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -131,7 +131,6 @@
 	tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
 
 	/* Use additional tipc_printf()'s to return more info ... */
-
 	str_len = tipc_printbuf_validate(&pb);
 	skb_put(buf, TLV_SPACE(str_len));
 	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -191,7 +190,6 @@
 	 * configuration commands can't be received until a local configuration
 	 * command to enable the first bearer is received and processed.
 	 */
-
 	spin_unlock_bh(&config_lock);
 	tipc_core_start_net(addr);
 	spin_lock_bh(&config_lock);
@@ -283,14 +281,12 @@
 	spin_lock_bh(&config_lock);
 
 	/* Save request and reply details in a well-known location */
-
 	req_tlv_area = request_area;
 	req_tlv_space = request_space;
 	rep_headroom = reply_headroom;
 
 	/* Check command authorization */
-
-	if (likely(orig_node == tipc_own_addr)) {
+	if (likely(in_own_node(orig_node))) {
 		/* command is permitted */
 	} else if (cmd >= 0x8000) {
 		rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -310,7 +306,6 @@
 	}
 
 	/* Call appropriate processing routine */
-
 	switch (cmd) {
 	case TIPC_CMD_NOOP:
 		rep_tlv_buf = tipc_cfg_reply_none();
@@ -433,7 +428,6 @@
 	struct sk_buff *rep_buf;
 
 	/* Validate configuration message header (ignore invalid message) */
-
 	req_hdr = (struct tipc_cfg_msg_hdr *)msg;
 	if ((size < sizeof(*req_hdr)) ||
 	    (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
@@ -443,7 +437,6 @@
 	}
 
 	/* Generate reply for request (if can't, return request) */
-
 	rep_buf = tipc_cfg_do_cmd(orig->node,
 				  ntohs(req_hdr->tcm_type),
 				  msg + sizeof(*req_hdr),
@@ -489,10 +482,23 @@
 	return res;
 }
 
+void tipc_cfg_reinit(void)
+{
+	struct tipc_name_seq seq;
+	int res;
+
+	seq.type = TIPC_CFG_SRV;
+	seq.lower = seq.upper = 0;
+	tipc_withdraw(config_port_ref, TIPC_ZONE_SCOPE, &seq);
+
+	seq.lower = seq.upper = tipc_own_addr;
+	res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
+	if (res)
+		err("Unable to reinitialize configuration service\n");
+}
+
 void tipc_cfg_stop(void)
 {
-	if (config_port_ref) {
-		tipc_deleteport(config_port_ref);
-		config_port_ref = 0;
-	}
+	tipc_deleteport(config_port_ref);
+	config_port_ref = 0;
 }
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 80da6eb..1f252f3 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -66,6 +66,7 @@
 				int headroom);
 
 int  tipc_cfg_init(void);
+void tipc_cfg_reinit(void);
 void tipc_cfg_stop(void);
 
 #endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 68eba03..f7b9523 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -52,14 +52,12 @@
 #endif
 
 /* global variables used by multiple sub-systems within TIPC */
-
 int tipc_random;
 
 const char tipc_alphabet[] =
 	"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
 
 /* configurable TIPC parameters */
-
 u32 tipc_own_addr;
 int tipc_max_ports;
 int tipc_max_subscriptions;
@@ -77,7 +75,6 @@
  * NOTE: Headroom is reserved to allow prepending of a data link header.
  *       There may also be unrequested tailroom present at the buffer's end.
  */
-
 struct sk_buff *tipc_buf_acquire(u32 size)
 {
 	struct sk_buff *skb;
@@ -95,7 +92,6 @@
 /**
  * tipc_core_stop_net - shut down TIPC networking sub-systems
  */
-
 static void tipc_core_stop_net(void)
 {
 	tipc_net_stop();
@@ -105,7 +101,6 @@
 /**
  * start_net - start TIPC networking sub-systems
  */
-
 int tipc_core_start_net(unsigned long addr)
 {
 	int res;
@@ -121,7 +116,6 @@
 /**
  * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
  */
-
 static void tipc_core_stop(void)
 {
 	tipc_netlink_stop();
@@ -137,7 +131,6 @@
 /**
  * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
  */
-
 static int tipc_core_start(void)
 {
 	int res;
@@ -150,9 +143,9 @@
 	if (!res)
 		res = tipc_nametbl_init();
 	if (!res)
-		res = tipc_k_signal((Handler)tipc_subscr_start, 0);
+		res = tipc_subscr_start();
 	if (!res)
-		res = tipc_k_signal((Handler)tipc_cfg_init, 0);
+		res = tipc_cfg_init();
 	if (!res)
 		res = tipc_netlink_start();
 	if (!res)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 13837e0..2a9bb99 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -85,7 +85,6 @@
 /*
  * TIPC_OUTPUT is the destination print buffer for system messages.
  */
-
 #ifndef TIPC_OUTPUT
 #define TIPC_OUTPUT TIPC_LOG
 #endif
@@ -102,7 +101,6 @@
 /*
  * DBG_OUTPUT is the destination print buffer for debug messages.
  */
-
 #ifndef DBG_OUTPUT
 #define DBG_OUTPUT TIPC_LOG
 #endif
@@ -126,13 +124,11 @@
 /*
  * TIPC-specific error codes
  */
-
 #define ELINKCONG EAGAIN	/* link congestion <=> resource unavailable */
 
 /*
  * Global configuration variables
  */
-
 extern u32 tipc_own_addr;
 extern int tipc_max_ports;
 extern int tipc_max_subscriptions;
@@ -143,7 +139,6 @@
 /*
  * Other global variables
  */
-
 extern int tipc_random;
 extern const char tipc_alphabet[];
 
@@ -151,7 +146,6 @@
 /*
  * Routines available to privileged subsystems
  */
-
 extern int tipc_core_start_net(unsigned long);
 extern int  tipc_handler_start(void);
 extern void tipc_handler_stop(void);
@@ -163,7 +157,6 @@
 /*
  * TIPC timer and signal code
  */
-
 typedef void (*Handler) (unsigned long);
 
 u32 tipc_k_signal(Handler routine, unsigned long argument);
@@ -176,7 +169,6 @@
  *
  * Timer must be initialized before use (and terminated when no longer needed).
  */
-
 static inline void k_init_timer(struct timer_list *timer, Handler routine,
 				unsigned long argument)
 {
@@ -196,7 +188,6 @@
  * then an additional jiffy is added to account for the fact that
  * the starting time may be in the middle of the current jiffy.
  */
-
 static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
 {
 	mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
@@ -212,7 +203,6 @@
  * WARNING: Must not be called when holding locks required by the timer's
  *          timeout routine, otherwise deadlock can occur on SMP systems!
  */
-
 static inline void k_cancel_timer(struct timer_list *timer)
 {
 	del_timer_sync(timer);
@@ -229,12 +219,10 @@
  * (Do not "enhance" this routine to automatically cancel an active timer,
  * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
  */
-
 static inline void k_term_timer(struct timer_list *timer)
 {
 }
 
-
 /*
  * TIPC message buffer code
  *
@@ -244,7 +232,6 @@
  * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
  *       are word aligned for quicker access
  */
-
 #define BUF_HEADROOM LL_MAX_HEADER
 
 struct tipc_skb_cb {
@@ -253,7 +240,6 @@
 
 #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
 
-
 static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
 {
 	return (struct tipc_msg *)skb->data;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index c630a21..ae054cf 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -70,7 +70,6 @@
  * @dest_domain: network domain of node(s) which should respond to message
  * @b_ptr: ptr to bearer issuing message
  */
-
 static struct sk_buff *tipc_disc_init_msg(u32 type,
 					  u32 dest_domain,
 					  struct tipc_bearer *b_ptr)
@@ -96,7 +95,6 @@
  * @node_addr: duplicated node address
  * @media_addr: media address advertised by duplicated node
  */
-
 static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
 			    struct tipc_media_addr *media_addr)
 {
@@ -117,7 +115,6 @@
  * @buf: buffer containing message
  * @b_ptr: bearer that message arrived on
  */
-
 void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
 {
 	struct tipc_node *n_ptr;
@@ -221,7 +218,6 @@
 	 * the new media address and reset the link to ensure it starts up
 	 * cleanly.
 	 */
-
 	if (addr_mismatch) {
 		if (tipc_link_is_up(link)) {
 			disc_dupl_alert(b_ptr, orig, &media_addr);
@@ -264,7 +260,6 @@
  * Reinitiates discovery process if discovery object has no associated nodes
  * and is either not currently searching or is searching at a slow rate
  */
-
 static void disc_update(struct tipc_link_req *req)
 {
 	if (!req->num_nodes) {
@@ -280,7 +275,6 @@
  * tipc_disc_add_dest - increment set of discovered nodes
  * @req: ptr to link request structure
  */
-
 void tipc_disc_add_dest(struct tipc_link_req *req)
 {
 	req->num_nodes++;
@@ -290,7 +284,6 @@
  * tipc_disc_remove_dest - decrement set of discovered nodes
  * @req: ptr to link request structure
  */
-
 void tipc_disc_remove_dest(struct tipc_link_req *req)
 {
 	req->num_nodes--;
@@ -301,7 +294,6 @@
  * disc_send_msg - send link setup request message
  * @req: ptr to link request structure
  */
-
 static void disc_send_msg(struct tipc_link_req *req)
 {
 	if (!req->bearer->blocked)
@@ -314,7 +306,6 @@
  *
  * Called whenever a link setup request timer associated with a bearer expires.
  */
-
 static void disc_timeout(struct tipc_link_req *req)
 {
 	int max_delay;
@@ -322,7 +313,6 @@
 	spin_lock_bh(&req->bearer->lock);
 
 	/* Stop searching if only desired node has been found */
-
 	if (tipc_node(req->domain) && req->num_nodes) {
 		req->timer_intv = TIPC_LINK_REQ_INACTIVE;
 		goto exit;
@@ -335,7 +325,6 @@
 	 * hold at fast polling rate if don't have any associated nodes,
 	 * otherwise hold at slow polling rate
 	 */
-
 	disc_send_msg(req);
 
 	req->timer_intv *= 2;
@@ -359,7 +348,6 @@
  *
  * Returns 0 if successful, otherwise -errno.
  */
-
 int tipc_disc_create(struct tipc_bearer *b_ptr,
 		     struct tipc_media_addr *dest, u32 dest_domain)
 {
@@ -391,7 +379,6 @@
  * tipc_disc_delete - destroy object sending periodic link setup requests
  * @req: ptr to link request structure
  */
-
 void tipc_disc_delete(struct tipc_link_req *req)
 {
 	k_cancel_timer(&req->timer);
@@ -399,4 +386,3 @@
 	kfree_skb(req->buf);
 	kfree(req);
 }
-
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 527e3f0..90ac9bf 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -48,7 +48,6 @@
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
  * @cleanup: work item used when disabling bearer
  */
-
 struct eth_bearer {
 	struct tipc_bearer *bearer;
 	struct net_device *dev;
@@ -67,7 +66,6 @@
  * Media-dependent "value" field stores MAC address in first 6 bytes
  * and zeroes out the remaining bytes.
  */
-
 static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
 {
 	memcpy(a->value, mac, ETH_ALEN);
@@ -79,7 +77,6 @@
 /**
  * send_msg - send a TIPC message out over an Ethernet interface
  */
-
 static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
 		    struct tipc_media_addr *dest)
 {
@@ -115,7 +112,6 @@
  * ignores packets sent using Ethernet multicast, and traffic sent to other
  * nodes (which can happen if interface is running in promiscuous mode).
  */
-
 static int recv_msg(struct sk_buff *buf, struct net_device *dev,
 		    struct packet_type *pt, struct net_device *orig_dev)
 {
@@ -140,7 +136,6 @@
 /**
  * enable_bearer - attach TIPC bearer to an Ethernet interface
  */
-
 static int enable_bearer(struct tipc_bearer *tb_ptr)
 {
 	struct net_device *dev = NULL;
@@ -151,7 +146,6 @@
 	int pending_dev = 0;
 
 	/* Find unused Ethernet bearer structure */
-
 	while (eb_ptr->dev) {
 		if (!eb_ptr->bearer)
 			pending_dev++;
@@ -160,7 +154,6 @@
 	}
 
 	/* Find device with specified name */
-
 	read_lock(&dev_base_lock);
 	for_each_netdev(&init_net, pdev) {
 		if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
@@ -174,7 +167,6 @@
 		return -ENODEV;
 
 	/* Create Ethernet bearer for device */
-
 	eb_ptr->dev = dev;
 	eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
 	eb_ptr->tipc_packet_type.dev = dev;
@@ -184,7 +176,6 @@
 	dev_add_pack(&eb_ptr->tipc_packet_type);
 
 	/* Associate TIPC bearer with Ethernet bearer */
-
 	eb_ptr->bearer = tb_ptr;
 	tb_ptr->usr_handle = (void *)eb_ptr;
 	tb_ptr->mtu = dev->mtu;
@@ -198,7 +189,6 @@
  *
  * This routine must be invoked from a work queue because it can sleep.
  */
-
 static void cleanup_bearer(struct work_struct *work)
 {
 	struct eth_bearer *eb_ptr =
@@ -216,7 +206,6 @@
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
-
 static void disable_bearer(struct tipc_bearer *tb_ptr)
 {
 	struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
@@ -232,7 +221,6 @@
  * Change the state of the Ethernet bearer (if any) associated with the
  * specified device.
  */
-
 static int recv_notification(struct notifier_block *nb, unsigned long evt,
 			     void *dv)
 {
@@ -281,7 +269,6 @@
 /**
  * eth_addr2str - convert Ethernet address to string
  */
-
 static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
 {
 	if (str_size < 18)	/* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
@@ -294,7 +281,6 @@
 /**
  * eth_str2addr - convert string to Ethernet address
  */
-
 static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
 {
 	char mac[ETH_ALEN];
@@ -314,7 +300,6 @@
 /**
  * eth_str2addr - convert Ethernet address format to message header format
  */
-
 static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
 {
 	memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
@@ -326,7 +311,6 @@
 /**
  * eth_str2addr - convert message header address format to Ethernet format
  */
-
 static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
 {
 	if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
@@ -339,7 +323,6 @@
 /*
  * Ethernet media registration info
  */
-
 static struct tipc_media eth_media_info = {
 	.send_msg	= send_msg,
 	.enable_bearer	= enable_bearer,
@@ -363,7 +346,6 @@
  * Register Ethernet media type with TIPC bearer code.  Also register
  * with OS for notifications about device state changes.
  */
-
 int tipc_eth_media_start(void)
 {
 	int res;
@@ -386,7 +368,6 @@
 /**
  * tipc_eth_media_stop - deactivate Ethernet bearer support
  */
-
 void tipc_eth_media_stop(void)
 {
 	if (!eth_started)
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 274c98e..9c6f22f 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -129,4 +129,3 @@
 
 	kmem_cache_destroy(tipc_queue_item_cache);
 }
-
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b4b9b30..7a614f4 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -45,13 +45,11 @@
 /*
  * Out-of-range value for link session numbers
  */
-
 #define INVALID_SESSION 0x10000
 
 /*
  * Link state events:
  */
-
 #define  STARTING_EVT    856384768	/* link processing trigger */
 #define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
 #define  TIMEOUT_EVT     560817u	/* link timer expired */
@@ -67,7 +65,6 @@
 /*
  * State value stored in 'exp_msg_count'
  */
-
 #define START_CHANGEOVER 100000u
 
 /**
@@ -77,7 +74,6 @@
  * @addr_peer: network address of node at far end
  * @if_peer: name of interface at far end
  */
-
 struct tipc_link_name {
 	u32 addr_local;
 	char if_local[TIPC_MAX_IF_NAME];
@@ -105,7 +101,6 @@
 /*
  *  Simple link routines
  */
-
 static unsigned int align(unsigned int i)
 {
 	return (i + 3) & ~3u;
@@ -143,7 +138,6 @@
 /*
  *  Simple non-static link routines (i.e. referenced outside this file)
  */
-
 int tipc_link_is_up(struct tipc_link *l_ptr)
 {
 	if (!l_ptr)
@@ -164,7 +158,6 @@
  *
  * Returns 1 if link name is valid, otherwise 0.
  */
-
 static int link_name_validate(const char *name,
 				struct tipc_link_name *name_parts)
 {
@@ -180,7 +173,6 @@
 	u32 if_peer_len;
 
 	/* copy link name & ensure length is OK */
-
 	name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
 	/* need above in case non-Posix strncpy() doesn't pad with nulls */
 	strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
@@ -188,7 +180,6 @@
 		return 0;
 
 	/* ensure all component parts of link name are present */
-
 	addr_local = name_copy;
 	if_local = strchr(addr_local, ':');
 	if (if_local == NULL)
@@ -206,7 +197,6 @@
 	if_peer_len = strlen(if_peer) + 1;
 
 	/* validate component parts of link name */
-
 	if ((sscanf(addr_local, "%u.%u.%u%c",
 		    &z_local, &c_local, &n_local, &dummy) != 3) ||
 	    (sscanf(addr_peer, "%u.%u.%u%c",
@@ -220,7 +210,6 @@
 		return 0;
 
 	/* return link name components, if necessary */
-
 	if (name_parts) {
 		name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
 		strcpy(name_parts->if_local, if_local);
@@ -239,13 +228,11 @@
  * another thread because tipc_link_delete() always cancels the link timer before
  * tipc_node_delete() is called.)
  */
-
 static void link_timeout(struct tipc_link *l_ptr)
 {
 	tipc_node_lock(l_ptr->owner);
 
 	/* update counters used in statistical profiling of send traffic */
-
 	l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
 	l_ptr->stats.queue_sz_counts++;
 
@@ -278,7 +265,6 @@
 	}
 
 	/* do all other link processing performed on a periodic basis */
-
 	link_check_defragm_bufs(l_ptr);
 
 	link_state_event(l_ptr, TIMEOUT_EVT);
@@ -302,7 +288,6 @@
  *
  * Returns pointer to link.
  */
-
 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 			      struct tipc_bearer *b_ptr,
 			      const struct tipc_media_addr *media_addr)
@@ -383,7 +368,6 @@
  * This routine must not grab the node lock until after link timer cancellation
  * to avoid a potential deadlock situation.
  */
-
 void tipc_link_delete(struct tipc_link *l_ptr)
 {
 	if (!l_ptr) {
@@ -419,7 +403,6 @@
  * Schedules port for renewed sending of messages after link congestion
  * has abated.
  */
-
 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
 {
 	struct tipc_port *p_ptr;
@@ -476,7 +459,6 @@
  * link_release_outqueue - purge link's outbound message queue
  * @l_ptr: pointer to link
  */
-
 static void link_release_outqueue(struct tipc_link *l_ptr)
 {
 	struct sk_buff *buf = l_ptr->first_out;
@@ -495,7 +477,6 @@
  * tipc_link_reset_fragments - purge link's inbound message fragments queue
  * @l_ptr: pointer to link
  */
-
 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
 {
 	struct sk_buff *buf = l_ptr->defragm_buf;
@@ -513,7 +494,6 @@
  * tipc_link_stop - purge all inbound and outbound messages associated with link
  * @l_ptr: pointer to link
  */
-
 void tipc_link_stop(struct tipc_link *l_ptr)
 {
 	struct sk_buff *buf;
@@ -569,7 +549,6 @@
 	}
 
 	/* Clean up all queues: */
-
 	link_release_outqueue(l_ptr);
 	kfree_skb(l_ptr->proto_msg_queue);
 	l_ptr->proto_msg_queue = NULL;
@@ -611,8 +590,7 @@
  * @l_ptr: pointer to link
  * @event: state machine event to process
  */
-
-static void link_state_event(struct tipc_link *l_ptr, unsigned event)
+static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 {
 	struct tipc_link *other;
 	u32 cont_intv = l_ptr->continuity_interval;
@@ -785,7 +763,6 @@
  * link_bundle_buf(): Append contents of a buffer to
  * the tail of an existing one.
  */
-
 static int link_bundle_buf(struct tipc_link *l_ptr,
 			   struct sk_buff *bundler,
 			   struct sk_buff *buf)
@@ -860,7 +837,6 @@
  * inside TIPC when the 'fast path' in tipc_send_buf
  * has failed, and from link_send()
  */
-
 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
 	struct tipc_msg *msg = buf_msg(buf);
@@ -872,7 +848,6 @@
 	u32 max_packet = l_ptr->max_pkt;
 
 	/* Match msg importance against queue limits: */
-
 	if (unlikely(queue_size >= queue_limit)) {
 		if (imp <= TIPC_CRITICAL_IMPORTANCE) {
 			link_schedule_port(l_ptr, msg_origport(msg), size);
@@ -888,12 +863,10 @@
 	}
 
 	/* Fragmentation needed ? */
-
 	if (size > max_packet)
 		return link_send_long_buf(l_ptr, buf);
 
-	/* Packet can be queued or sent: */
-
+	/* Packet can be queued or sent. */
 	if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
 		   !link_congested(l_ptr))) {
 		link_add_to_outqueue(l_ptr, buf, msg);
@@ -907,13 +880,11 @@
 		}
 		return dsz;
 	}
-	/* Congestion: can message be bundled ?: */
-
+	/* Congestion: can message be bundled ? */
 	if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
 	    (msg_user(msg) != MSG_FRAGMENTER)) {
 
 		/* Try adding message to an existing bundle */
-
 		if (l_ptr->next_out &&
 		    link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
 			tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
@@ -921,7 +892,6 @@
 		}
 
 		/* Try creating a new bundle */
-
 		if (size <= max_packet * 2 / 3) {
 			struct sk_buff *bundler = tipc_buf_acquire(max_packet);
 			struct tipc_msg bundler_hdr;
@@ -951,7 +921,6 @@
  * not been selected yet, and the the owner node is not locked
  * Called by TIPC internal users, e.g. the name distributor
  */
-
 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
 {
 	struct tipc_link *l_ptr;
@@ -984,7 +953,6 @@
  * small enough not to require fragmentation.
  * Called without any locks held.
  */
-
 void tipc_link_send_names(struct list_head *message_list, u32 dest)
 {
 	struct tipc_node *n_ptr;
@@ -1013,7 +981,6 @@
 	read_unlock_bh(&tipc_net_lock);
 
 	/* discard the messages if they couldn't be sent */
-
 	list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
 		list_del((struct list_head *)buf);
 		kfree_skb(buf);
@@ -1026,7 +993,6 @@
  * inclusive total message length. Very time critical.
  * Link is locked. Returns user data length.
  */
-
 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
 			      u32 *used_max_pkt)
 {
@@ -1111,7 +1077,6 @@
 	 * Try building message using port's max_pkt hint.
 	 * (Must not hold any locks while building message.)
 	 */
-
 	res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
 			     sender->max_pkt, !sender->user_port, &buf);
 
@@ -1131,12 +1096,10 @@
 			}
 
 			/* Exit if build request was invalid */
-
 			if (unlikely(res < 0))
 				goto exit;
 
 			/* Exit if link (or bearer) is congested */
-
 			if (link_congested(l_ptr) ||
 			    !list_empty(&l_ptr->b_ptr->cong_links)) {
 				res = link_schedule_port(l_ptr,
@@ -1148,7 +1111,6 @@
 			 * Message size exceeds max_pkt hint; update hint,
 			 * then re-try fast path or fragment the message
 			 */
-
 			sender->max_pkt = l_ptr->max_pkt;
 			tipc_node_unlock(node);
 			read_unlock_bh(&tipc_net_lock);
@@ -1166,7 +1128,6 @@
 	read_unlock_bh(&tipc_net_lock);
 
 	/* Couldn't find a link to the destination node */
-
 	if (buf)
 		return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
 	if (res >= 0)
@@ -1220,15 +1181,13 @@
 	sect_crs = NULL;
 	curr_sect = -1;
 
-	/* Prepare reusable fragment header: */
-
+	/* Prepare reusable fragment header */
 	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
 		 INT_H_SIZE, msg_destnode(hdr));
 	msg_set_size(&fragm_hdr, max_pkt);
 	msg_set_fragm_no(&fragm_hdr, 1);
 
-	/* Prepare header of first fragment: */
-
+	/* Prepare header of first fragment */
 	buf_chain = buf = tipc_buf_acquire(max_pkt);
 	if (!buf)
 		return -ENOMEM;
@@ -1237,8 +1196,7 @@
 	hsz = msg_hdr_sz(hdr);
 	skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
 
-	/* Chop up message: */
-
+	/* Chop up message */
 	fragm_crs = INT_H_SIZE + hsz;
 	fragm_rest = fragm_sz - hsz;
 
@@ -1329,7 +1287,6 @@
 	}
 
 	/* Append chain of fragments to send queue & send them */
-
 	l_ptr->long_msg_seq_no++;
 	link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
 	l_ptr->stats.sent_fragments += fragm_no;
@@ -1350,7 +1307,6 @@
 
 	/* Step to position where retransmission failed, if any,    */
 	/* consider that buffers may have been released in meantime */
-
 	if (r_q_size && buf) {
 		u32 last = lesser(mod(r_q_head + r_q_size),
 				  link_last_sent(l_ptr));
@@ -1365,7 +1321,6 @@
 	}
 
 	/* Continue retransmission now, if there is anything: */
-
 	if (r_q_size && buf) {
 		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
 		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
@@ -1381,7 +1336,6 @@
 	}
 
 	/* Send deferred protocol message, if any: */
-
 	buf = l_ptr->proto_msg_queue;
 	if (buf) {
 		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
@@ -1398,7 +1352,6 @@
 	}
 
 	/* Send one deferred data message, if send window not full: */
-
 	buf = l_ptr->next_out;
 	if (buf) {
 		struct tipc_msg *msg = buf_msg(buf);
@@ -1478,16 +1431,12 @@
 	warn("Retransmission failure on link <%s>\n", l_ptr->name);
 
 	if (l_ptr->addr) {
-
 		/* Handle failure on standard link */
-
 		link_print(l_ptr, "Resetting link\n");
 		tipc_link_reset(l_ptr);
 
 	} else {
-
 		/* Handle failure on broadcast link */
-
 		struct tipc_node *n_ptr;
 		char addr_string[16];
 
@@ -1536,7 +1485,6 @@
 		return;
 	} else {
 		/* Detect repeated retransmit failures on uncongested bearer */
-
 		if (l_ptr->last_retransmitted == msg_seqno(msg)) {
 			if (++l_ptr->stale_count > 100) {
 				link_retransmit_failure(l_ptr, buf);
@@ -1571,7 +1519,6 @@
 /**
  * link_insert_deferred_queue - insert deferred messages back into receive chain
  */
-
 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
 						  struct sk_buff *buf)
 {
@@ -1602,7 +1549,6 @@
  * TIPC will ignore the excess, under the assumption that it is optional info
  * introduced by a later release of the protocol.
  */
-
 static int link_recv_buf_validate(struct sk_buff *buf)
 {
 	static u32 min_data_hdr_size[8] = {
@@ -1648,7 +1594,6 @@
  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
  * structure (i.e. cannot be NULL), but bearer can be inactive.
  */
-
 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
 {
 	read_lock_bh(&tipc_net_lock);
@@ -1666,22 +1611,18 @@
 		head = head->next;
 
 		/* Ensure bearer is still enabled */
-
 		if (unlikely(!b_ptr->active))
 			goto cont;
 
 		/* Ensure message is well-formed */
-
 		if (unlikely(!link_recv_buf_validate(buf)))
 			goto cont;
 
 		/* Ensure message data is a single contiguous unit */
-
 		if (unlikely(skb_linearize(buf)))
 			goto cont;
 
 		/* Handle arrival of a non-unicast link message */
-
 		msg = buf_msg(buf);
 
 		if (unlikely(msg_non_seq(msg))) {
@@ -1693,20 +1634,17 @@
 		}
 
 		/* Discard unicast link messages destined for another node */
-
 		if (unlikely(!msg_short(msg) &&
 			     (msg_destnode(msg) != tipc_own_addr)))
 			goto cont;
 
 		/* Locate neighboring node that sent message */
-
 		n_ptr = tipc_node_find(msg_prevnode(msg));
 		if (unlikely(!n_ptr))
 			goto cont;
 		tipc_node_lock(n_ptr);
 
 		/* Locate unicast link endpoint that should handle message */
-
 		l_ptr = n_ptr->links[b_ptr->identity];
 		if (unlikely(!l_ptr)) {
 			tipc_node_unlock(n_ptr);
@@ -1714,7 +1652,6 @@
 		}
 
 		/* Verify that communication with node is currently allowed */
-
 		if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
 			msg_user(msg) == LINK_PROTOCOL &&
 			(msg_type(msg) == RESET_MSG ||
@@ -1728,12 +1665,10 @@
 		}
 
 		/* Validate message sequence number info */
-
 		seq_no = msg_seqno(msg);
 		ackd = msg_ack(msg);
 
 		/* Release acked messages */
-
 		if (n_ptr->bclink.supported)
 			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
 
@@ -1752,7 +1687,6 @@
 		}
 
 		/* Try sending any messages link endpoint has pending */
-
 		if (unlikely(l_ptr->next_out))
 			tipc_link_push_queue(l_ptr);
 		if (unlikely(!list_empty(&l_ptr->waiting_ports)))
@@ -1763,7 +1697,6 @@
 		}
 
 		/* Now (finally!) process the incoming message */
-
 protocol_check:
 		if (likely(link_working_working(l_ptr))) {
 			if (likely(seq_no == mod(l_ptr->next_in_no))) {
@@ -1859,7 +1792,6 @@
  *
  * Returns increase in queue length (i.e. 0 or 1)
  */
-
 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
 			struct sk_buff *buf)
 {
@@ -1908,7 +1840,6 @@
 /*
  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
  */
-
 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
 				       struct sk_buff *buf)
 {
@@ -1920,14 +1851,12 @@
 	}
 
 	/* Record OOS packet arrival (force mismatch on next timeout) */
-
 	l_ptr->checkpoint--;
 
 	/*
 	 * Discard packet if a duplicate; otherwise add it to deferred queue
 	 * and notify peer of gap as per protocol specification
 	 */
-
 	if (less(seq_no, mod(l_ptr->next_in_no))) {
 		l_ptr->stats.duplicates++;
 		kfree_skb(buf);
@@ -1957,7 +1886,6 @@
 	int r_flag;
 
 	/* Discard any previous message that was deferred due to congestion */
-
 	if (l_ptr->proto_msg_queue) {
 		kfree_skb(l_ptr->proto_msg_queue);
 		l_ptr->proto_msg_queue = NULL;
@@ -1967,12 +1895,10 @@
 		return;
 
 	/* Abort non-RESET send if communication with node is prohibited */
-
 	if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
 		return;
 
 	/* Create protocol message with "out-of-sequence" sequence number */
-
 	msg_set_type(msg, msg_typ);
 	msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
 	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
@@ -2040,14 +1966,12 @@
 	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
 
 	/* Defer message if bearer is already congested */
-
 	if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
 		l_ptr->proto_msg_queue = buf;
 		return;
 	}
 
 	/* Defer message if attempting to send results in bearer congestion */
-
 	if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
 		tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
 		l_ptr->proto_msg_queue = buf;
@@ -2056,7 +1980,6 @@
 	}
 
 	/* Discard message if it was sent successfully */
-
 	l_ptr->unacked_window = 0;
 	kfree_skb(buf);
 }
@@ -2066,7 +1989,6 @@
  * Note that network plane id propagates through the network, and may
  * change at any time. The node with lowest address rules
  */
-
 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
 	u32 rec_gap = 0;
@@ -2079,7 +2001,6 @@
 		goto exit;
 
 	/* record unnumbered packet arrival (force mismatch on next timeout) */
-
 	l_ptr->checkpoint--;
 
 	if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
@@ -2111,7 +2032,6 @@
 		/* fall thru' */
 	case ACTIVATE_MSG:
 		/* Update link settings according other endpoint's values */
-
 		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
 
 		msg_tol = msg_link_tolerance(msg);
@@ -2133,7 +2053,6 @@
 		l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
 
 		/* Synchronize broadcast link info, if not done previously */
-
 		if (!tipc_node_is_up(l_ptr->owner)) {
 			l_ptr->owner->bclink.last_sent =
 				l_ptr->owner->bclink.last_in =
@@ -2185,7 +2104,6 @@
 		}
 
 		/* Protocol message before retransmits, reduce loss risk */
-
 		if (l_ptr->owner->bclink.supported)
 			tipc_bclink_update_link_state(l_ptr->owner,
 						      msg_last_bcast(msg));
@@ -2243,7 +2161,6 @@
  * changeover(): Send whole message queue via the remaining link
  *               Owner node is locked.
  */
-
 void tipc_link_changeover(struct tipc_link *l_ptr)
 {
 	u32 msgcount = l_ptr->out_queue_size;
@@ -2343,8 +2260,6 @@
 	}
 }
 
-
-
 /**
  * buf_extract - extracts embedded TIPC message from another message
  * @skb: encapsulating message buffer
@@ -2353,7 +2268,6 @@
  * Returns a new message buffer containing an embedded message.  The
  * encapsulating message itself is left unchanged.
  */
-
 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
 {
 	struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
@@ -2370,7 +2284,6 @@
  *  link_recv_changeover_msg(): Receive tunneled packet sent
  *  via other link. Node is locked. Return extracted buffer.
  */
-
 static int link_recv_changeover_msg(struct tipc_link **l_ptr,
 				    struct sk_buff **buf)
 {
@@ -2405,7 +2318,6 @@
 	}
 
 	/* First original message ?: */
-
 	if (tipc_link_is_up(dest_link)) {
 		info("Resetting link <%s>, changeover initiated by peer\n",
 		     dest_link->name);
@@ -2420,7 +2332,6 @@
 	}
 
 	/* Receive original message */
-
 	if (dest_link->exp_msg_count == 0) {
 		warn("Link switchover error, "
 		     "got too many tunnelled messages\n");
@@ -2469,7 +2380,6 @@
  *  Fragmentation/defragmentation:
  */
 
-
 /*
  * link_send_long_buf: Entry for buffers needing fragmentation.
  * The buffer is complete, inclusive total message length.
@@ -2496,12 +2406,10 @@
 		destaddr = msg_destnode(inmsg);
 
 	/* Prepare reusable fragment header: */
-
 	tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
 		 INT_H_SIZE, destaddr);
 
 	/* Chop up message: */
-
 	while (rest > 0) {
 		struct sk_buff *fragm;
 
@@ -2535,7 +2443,6 @@
 	kfree_skb(buf);
 
 	/* Append chain of fragments to send queue & send them */
-
 	l_ptr->long_msg_seq_no++;
 	link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
 	l_ptr->stats.sent_fragments += fragm_no;
@@ -2551,7 +2458,6 @@
  * help storing these values in unused, available fields in the
  * pending message. This makes dynamic memory allocation unnecessary.
  */
-
 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
 {
 	msg_set_seqno(buf_msg(buf), seqno);
@@ -2603,7 +2509,6 @@
 	*fb = NULL;
 
 	/* Is there an incomplete message waiting for this fragment? */
-
 	while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
 			(msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
 		prev = pbuf;
@@ -2629,7 +2534,6 @@
 			skb_copy_to_linear_data(pbuf, imsg,
 						msg_data_sz(fragm));
 			/*  Prepare buffer for subsequent fragments. */
-
 			set_long_msg_seqno(pbuf, long_msg_seq_no);
 			set_fragm_size(pbuf, fragm_sz);
 			set_expected_frags(pbuf, exp_fragm_cnt - 1);
@@ -2650,7 +2554,6 @@
 		kfree_skb(fbuf);
 
 		/* Is message complete? */
-
 		if (exp_frags == 0) {
 			if (prev)
 				prev->next = pbuf->next;
@@ -2672,7 +2575,6 @@
  * link_check_defragm_bufs - flush stale incoming message fragments
  * @l_ptr: pointer to link
  */
-
 static void link_check_defragm_bufs(struct tipc_link *l_ptr)
 {
 	struct sk_buff *prev = NULL;
@@ -2701,8 +2603,6 @@
 	}
 }
 
-
-
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
 {
 	if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2714,7 +2614,6 @@
 	l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
 }
 
-
 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
 {
 	/* Data messages from this node, inclusive FIRST_FRAGM */
@@ -2744,7 +2643,6 @@
  *
  * Returns pointer to link (or 0 if invalid link name).
  */
-
 static struct tipc_link *link_find_link(const char *name,
 					struct tipc_node **node)
 {
@@ -2778,7 +2676,6 @@
  *
  * Returns 1 if value is within range, 0 if not.
  */
-
 static int link_value_is_valid(u16 cmd, u32 new_value)
 {
 	switch (cmd) {
@@ -2794,7 +2691,6 @@
 	return 0;
 }
 
-
 /**
  * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
  * @name - ptr to link, bearer, or media name
@@ -2805,7 +2701,6 @@
  *
  * Returns 0 if value updated and negative value on error.
  */
-
 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
 {
 	struct tipc_node *node;
@@ -2910,7 +2805,6 @@
  * link_reset_statistics - reset link statistics
  * @l_ptr: pointer to link
  */
-
 static void link_reset_statistics(struct tipc_link *l_ptr)
 {
 	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
@@ -2951,7 +2845,6 @@
 /**
  * percent - convert count to a percentage of total (rounding up or down)
  */
-
 static u32 percent(u32 count, u32 total)
 {
 	return (count * 100 + (total / 2)) / total;
@@ -2965,7 +2858,6 @@
  *
  * Returns length of print buffer data string (or 0 if error)
  */
-
 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
 {
 	struct print_buf pb;
@@ -3087,7 +2979,6 @@
  *
  * If no active link can be found, uses default maximum packet size.
  */
-
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
 {
 	struct tipc_node *n_ptr;
@@ -3171,4 +3062,3 @@
 	tipc_printbuf_validate(buf);
 	info("%s", print_area);
 }
-
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 73c18c1..d6a60a9 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -47,13 +47,11 @@
 /*
  * Out-of-range value for link sequence numbers
  */
-
 #define INVALID_LINK_SEQ 0x10000
 
 /*
  * Link states
  */
-
 #define WORKING_WORKING 560810u
 #define WORKING_UNKNOWN 560811u
 #define RESET_UNKNOWN   560812u
@@ -63,7 +61,6 @@
  * Starting value for maximum packet size negotiation on unicast links
  * (unless bearer MTU is less)
  */
-
 #define MAX_PKT_DEFAULT 1500
 
 /**
@@ -114,7 +111,6 @@
  * @defragm_buf: list of partially reassembled inbound message fragments
  * @stats: collects statistics regarding link activity
  */
-
 struct tipc_link {
 	u32 addr;
 	char name[TIPC_MAX_LINK_NAME];
@@ -255,7 +251,6 @@
 /*
  * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
  */
-
 static inline u32 buf_seqno(struct sk_buff *buf)
 {
 	return msg_seqno(buf_msg(buf));
@@ -294,7 +289,6 @@
 /*
  * Link status checking routines
  */
-
 static inline int link_working_working(struct tipc_link *l_ptr)
 {
 	return l_ptr->state == WORKING_WORKING;
diff --git a/net/tipc/log.c b/net/tipc/log.c
index 895c6e5..026733f2 100644
--- a/net/tipc/log.c
+++ b/net/tipc/log.c
@@ -47,7 +47,6 @@
  *
  * Additional user-defined print buffers are also permitted.
  */
-
 static struct print_buf null_buf = { NULL, 0, NULL, 0 };
 struct print_buf *const TIPC_NULL = &null_buf;
 
@@ -72,7 +71,6 @@
  * on the caller to prevent simultaneous use of the print buffer(s) being
  * manipulated.
  */
-
 static char print_string[TIPC_PB_MAX_STR];
 static DEFINE_SPINLOCK(print_lock);
 
@@ -97,7 +95,6 @@
  * Note: If the character array is too small (or absent), the print buffer
  * becomes a null device that discards anything written to it.
  */
-
 void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
 {
 	pb->buf = raw;
@@ -117,7 +114,6 @@
  * tipc_printbuf_reset - reinitialize print buffer to empty state
  * @pb: pointer to print buffer structure
  */
-
 static void tipc_printbuf_reset(struct print_buf *pb)
 {
 	if (pb->buf) {
@@ -133,7 +129,6 @@
  *
  * Returns non-zero if print buffer is empty.
  */
-
 static int tipc_printbuf_empty(struct print_buf *pb)
 {
 	return !pb->buf || (pb->crs == pb->buf);
@@ -148,7 +143,6 @@
  *
  * Returns length of print buffer data string (including trailing NUL)
  */
-
 int tipc_printbuf_validate(struct print_buf *pb)
 {
 	char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
@@ -182,14 +176,12 @@
  * Current contents of destination print buffer (if any) are discarded.
  * Source print buffer becomes empty if a successful move occurs.
  */
-
 static void tipc_printbuf_move(struct print_buf *pb_to,
 			       struct print_buf *pb_from)
 {
 	int len;
 
 	/* Handle the cases where contents can't be moved */
-
 	if (!pb_to->buf)
 		return;
 
@@ -206,7 +198,6 @@
 	}
 
 	/* Copy data from char after cursor to end (if used) */
-
 	len = pb_from->buf + pb_from->size - pb_from->crs - 2;
 	if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
 		strcpy(pb_to->buf, pb_from->crs + 1);
@@ -215,7 +206,6 @@
 		pb_to->crs = pb_to->buf;
 
 	/* Copy data from start to cursor (always) */
-
 	len = pb_from->crs - pb_from->buf;
 	strcpy(pb_to->crs, pb_from->buf);
 	pb_to->crs += len;
@@ -228,7 +218,6 @@
  * @pb: pointer to print buffer
  * @fmt: formatted info to be printed
  */
-
 void tipc_printf(struct print_buf *pb, const char *fmt, ...)
 {
 	int chars_to_add;
@@ -270,7 +259,6 @@
  * tipc_log_resize - change the size of the TIPC log buffer
  * @log_size: print buffer size to use
  */
-
 int tipc_log_resize(int log_size)
 {
 	int res = 0;
@@ -295,7 +283,6 @@
 /**
  * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
  */
-
 struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
 {
 	u32 value;
@@ -316,7 +303,6 @@
 /**
  * tipc_log_dump - capture TIPC log buffer contents in configuration message
  */
-
 struct sk_buff *tipc_log_dump(void)
 {
 	struct sk_buff *reply;
diff --git a/net/tipc/log.h b/net/tipc/log.h
index 2248d96..d1f5eb9 100644
--- a/net/tipc/log.h
+++ b/net/tipc/log.h
@@ -44,7 +44,6 @@
  * @crs: pointer to first unused space in character array (i.e. final NUL)
  * @echo: echo output to system console if non-zero
  */
-
 struct print_buf {
 	char *buf;
 	u32 size;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index e3afe16..deea0d2 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -72,7 +72,6 @@
  *
  * Returns message data size or errno
  */
-
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
 		   u32 num_sect, unsigned int total_len,
 			    int max_size, int usrmem, struct sk_buff **buf)
@@ -112,7 +111,6 @@
 }
 
 #ifdef CONFIG_TIPC_DEBUG
-
 void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
 {
 	u32 usr = msg_user(msg);
@@ -352,5 +350,4 @@
 	if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT))
 		tipc_msg_dbg(buf, msg_get_wrapped(msg), "      /");
 }
-
 #endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index eba524e3..ba2a72b 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -44,7 +44,6 @@
  *
  * Note: Some items are also used with TIPC internal message headers
  */
-
 #define TIPC_VERSION              2
 
 /*
@@ -58,7 +57,6 @@
 /*
  * Payload message types
  */
-
 #define TIPC_CONN_MSG		0
 #define TIPC_MCAST_MSG		1
 #define TIPC_NAMED_MSG		2
@@ -67,7 +65,6 @@
 /*
  * Message header sizes
  */
-
 #define SHORT_H_SIZE              24	/* In-cluster basic payload message */
 #define BASIC_H_SIZE              32	/* Basic payload message */
 #define NAMED_H_SIZE              40	/* Named payload message */
@@ -121,7 +118,6 @@
 /*
  * Word 0
  */
-
 static inline u32 msg_version(struct tipc_msg *m)
 {
 	return msg_bits(m, 0, 29, 7);
@@ -216,7 +212,6 @@
 /*
  * Word 1
  */
-
 static inline u32 msg_type(struct tipc_msg *m)
 {
 	return msg_bits(m, 1, 29, 0x7);
@@ -291,7 +286,6 @@
 /*
  * Word 2
  */
-
 static inline u32 msg_ack(struct tipc_msg *m)
 {
 	return msg_bits(m, 2, 16, 0xffff);
@@ -315,8 +309,6 @@
 /*
  * Words 3-10
  */
-
-
 static inline u32 msg_prevnode(struct tipc_msg *m)
 {
 	return msg_word(m, 3);
@@ -434,7 +426,6 @@
 	return (struct tipc_msg *)msg_data(m);
 }
 
-
 /*
  * Constants and routines used to read and write TIPC internal message headers
  */
@@ -442,7 +433,6 @@
 /*
  * Internal message users
  */
-
 #define  BCAST_PROTOCOL       5
 #define  MSG_BUNDLER          6
 #define  LINK_PROTOCOL        7
@@ -456,7 +446,6 @@
 /*
  *  Connection management protocol message types
  */
-
 #define CONN_PROBE        0
 #define CONN_PROBE_REPLY  1
 #define CONN_ACK          2
@@ -464,14 +453,12 @@
 /*
  * Name distributor message types
  */
-
 #define PUBLICATION       0
 #define WITHDRAWAL        1
 
 /*
  * Segmentation message types
  */
-
 #define FIRST_FRAGMENT		0
 #define FRAGMENT		1
 #define LAST_FRAGMENT		2
@@ -479,7 +466,6 @@
 /*
  * Link management protocol message types
  */
-
 #define STATE_MSG		0
 #define RESET_MSG		1
 #define ACTIVATE_MSG		2
@@ -493,7 +479,6 @@
 /*
  * Config protocol message types
  */
-
 #define DSC_REQ_MSG		0
 #define DSC_RESP_MSG		1
 
@@ -501,7 +486,6 @@
 /*
  * Word 1
  */
-
 static inline u32 msg_seq_gap(struct tipc_msg *m)
 {
 	return msg_bits(m, 1, 16, 0x1fff);
@@ -526,7 +510,6 @@
 /*
  * Word 2
  */
-
 static inline u32 msg_dest_domain(struct tipc_msg *m)
 {
 	return msg_word(m, 2);
@@ -561,7 +544,6 @@
 /*
  * Word 4
  */
-
 static inline u32 msg_last_bcast(struct tipc_msg *m)
 {
 	return msg_bits(m, 4, 16, 0xffff);
@@ -628,7 +610,6 @@
 /*
  * Word 5
  */
-
 static inline u32 msg_session(struct tipc_msg *m)
 {
 	return msg_bits(m, 5, 16, 0xffff);
@@ -697,7 +678,6 @@
 /*
  * Word 9
  */
-
 static inline u32 msg_msgcnt(struct tipc_msg *m)
 {
 	return msg_bits(m, 9, 16, 0xffff);
@@ -744,5 +724,4 @@
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
 		   u32 num_sect, unsigned int total_len,
 			    int max_size, int usrmem, struct sk_buff **buf);
-
 #endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index d57da61..158318e 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -58,7 +58,6 @@
  * Note: There is no field that identifies the publishing node because it is
  * the same for all items contained within a publication message.
  */
-
 struct distr_item {
 	__be32 type;
 	__be32 lower;
@@ -68,17 +67,41 @@
 };
 
 /**
- * List of externally visible publications by this node --
- * that is, all publications having scope > TIPC_NODE_SCOPE.
+ * struct publ_list - list of publications made by this node
+ * @list: circular list of publications
+ * @list_size: number of entries in list
  */
+struct publ_list {
+	struct list_head list;
+	u32 size;
+};
 
-static LIST_HEAD(publ_root);
-static u32 publ_cnt;
+static struct publ_list publ_zone = {
+	.list = LIST_HEAD_INIT(publ_zone.list),
+	.size = 0,
+};
+
+static struct publ_list publ_cluster = {
+	.list = LIST_HEAD_INIT(publ_cluster.list),
+	.size = 0,
+};
+
+static struct publ_list publ_node = {
+	.list = LIST_HEAD_INIT(publ_node.list),
+	.size = 0,
+};
+
+static struct publ_list *publ_lists[] = {
+	NULL,
+	&publ_zone,	/* publ_lists[TIPC_ZONE_SCOPE]		*/
+	&publ_cluster,	/* publ_lists[TIPC_CLUSTER_SCOPE]	*/
+	&publ_node	/* publ_lists[TIPC_NODE_SCOPE]		*/
+};
+
 
 /**
  * publ_to_item - add publication info to a publication message
  */
-
 static void publ_to_item(struct distr_item *i, struct publication *p)
 {
 	i->type = htonl(p->type);
@@ -91,7 +114,6 @@
 /**
  * named_prepare_buf - allocate & initialize a publication message
  */
-
 static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
 {
 	struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
@@ -126,14 +148,16 @@
 /**
  * tipc_named_publish - tell other nodes about a new publication by this node
  */
-
 void tipc_named_publish(struct publication *publ)
 {
 	struct sk_buff *buf;
 	struct distr_item *item;
 
-	list_add_tail(&publ->local_list, &publ_root);
-	publ_cnt++;
+	list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
+	publ_lists[publ->scope]->size++;
+
+	if (publ->scope == TIPC_NODE_SCOPE)
+		return;
 
 	buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
 	if (!buf) {
@@ -149,14 +173,16 @@
 /**
  * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
  */
-
 void tipc_named_withdraw(struct publication *publ)
 {
 	struct sk_buff *buf;
 	struct distr_item *item;
 
 	list_del(&publ->local_list);
-	publ_cnt--;
+	publ_lists[publ->scope]->size--;
+
+	if (publ->scope == TIPC_NODE_SCOPE)
+		return;
 
 	buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
 	if (!buf) {
@@ -169,25 +195,51 @@
 	named_cluster_distribute(buf);
 }
 
+/*
+ * named_distribute - prepare name info for bulk distribution to another node
+ */
+static void named_distribute(struct list_head *message_list, u32 node,
+			     struct publ_list *pls, u32 max_item_buf)
+{
+	struct publication *publ;
+	struct sk_buff *buf = NULL;
+	struct distr_item *item = NULL;
+	u32 left = 0;
+	u32 rest = pls->size * ITEM_SIZE;
+
+	list_for_each_entry(publ, &pls->list, local_list) {
+		if (!buf) {
+			left = (rest <= max_item_buf) ? rest : max_item_buf;
+			rest -= left;
+			buf = named_prepare_buf(PUBLICATION, left, node);
+			if (!buf) {
+				warn("Bulk publication failure\n");
+				return;
+			}
+			item = (struct distr_item *)msg_data(buf_msg(buf));
+		}
+		publ_to_item(item, publ);
+		item++;
+		left -= ITEM_SIZE;
+		if (!left) {
+			list_add_tail((struct list_head *)buf, message_list);
+			buf = NULL;
+		}
+	}
+}
+
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
-
 void tipc_named_node_up(unsigned long nodearg)
 {
 	struct tipc_node *n_ptr;
 	struct tipc_link *l_ptr;
-	struct publication *publ;
-	struct distr_item *item = NULL;
-	struct sk_buff *buf = NULL;
 	struct list_head message_list;
 	u32 node = (u32)nodearg;
-	u32 left = 0;
-	u32 rest;
 	u32 max_item_buf = 0;
 
 	/* compute maximum amount of publication data to send per message */
-
 	read_lock_bh(&tipc_net_lock);
 	n_ptr = tipc_node_find(node);
 	if (n_ptr) {
@@ -203,32 +255,11 @@
 		return;
 
 	/* create list of publication messages, then send them as a unit */
-
 	INIT_LIST_HEAD(&message_list);
 
 	read_lock_bh(&tipc_nametbl_lock);
-	rest = publ_cnt * ITEM_SIZE;
-
-	list_for_each_entry(publ, &publ_root, local_list) {
-		if (!buf) {
-			left = (rest <= max_item_buf) ? rest : max_item_buf;
-			rest -= left;
-			buf = named_prepare_buf(PUBLICATION, left, node);
-			if (!buf) {
-				warn("Bulk publication distribution failure\n");
-				goto exit;
-			}
-			item = (struct distr_item *)msg_data(buf_msg(buf));
-		}
-		publ_to_item(item, publ);
-		item++;
-		left -= ITEM_SIZE;
-		if (!left) {
-			list_add_tail((struct list_head *)buf, &message_list);
-			buf = NULL;
-		}
-	}
-exit:
+	named_distribute(&message_list, node, &publ_cluster, max_item_buf);
+	named_distribute(&message_list, node, &publ_zone, max_item_buf);
 	read_unlock_bh(&tipc_nametbl_lock);
 
 	tipc_link_send_names(&message_list, (u32)node);
@@ -240,7 +271,6 @@
  * Invoked for each publication issued by a newly failed node.
  * Removes publication structure from name table & deletes it.
  */
-
 static void named_purge_publ(struct publication *publ)
 {
 	struct publication *p;
@@ -264,7 +294,6 @@
 /**
  * tipc_named_recv - process name table update message sent by another node
  */
-
 void tipc_named_recv(struct sk_buff *buf)
 {
 	struct publication *publ;
@@ -316,21 +345,22 @@
 }
 
 /**
- * tipc_named_reinit - re-initialize local publication list
+ * tipc_named_reinit - re-initialize local publications
  *
  * This routine is called whenever TIPC networking is enabled.
- * All existing publications by this node that have "cluster" or "zone" scope
- * are updated to reflect the node's new network address.
+ * All name table entries published by this node are updated to reflect
+ * the node's new network address.
  */
-
 void tipc_named_reinit(void)
 {
 	struct publication *publ;
+	int scope;
 
 	write_lock_bh(&tipc_nametbl_lock);
 
-	list_for_each_entry(publ, &publ_root, local_list)
-		publ->node = tipc_own_addr;
+	for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
+		list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
+			publ->node = tipc_own_addr;
 
 	write_unlock_bh(&tipc_nametbl_lock);
 }
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index c6a1ae3..010f24a 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -56,7 +56,6 @@
  *       publications of the associated name sequence belong to it.
  *       (The cluster and node lists may be empty.)
  */
-
 struct name_info {
 	struct list_head node_list;
 	struct list_head cluster_list;
@@ -72,7 +71,6 @@
  * @upper: name sequence upper bound
  * @info: pointer to name sequence publication info
  */
-
 struct sub_seq {
 	u32 lower;
 	u32 upper;
@@ -90,7 +88,6 @@
  * @subscriptions: list of subscriptions for this 'type'
  * @lock: spinlock controlling access to publication lists of all sub-sequences
  */
-
 struct name_seq {
 	u32 type;
 	struct sub_seq *sseqs;
@@ -107,7 +104,6 @@
  *         accessed via hashing on 'type'; name sequence lists are *not* sorted
  * @local_publ_count: number of publications issued by this node
  */
-
 struct name_table {
 	struct hlist_head *types;
 	u32 local_publ_count;
@@ -124,7 +120,6 @@
 /**
  * publ_create - create a publication structure
  */
-
 static struct publication *publ_create(u32 type, u32 lower, u32 upper,
 				       u32 scope, u32 node, u32 port_ref,
 				       u32 key)
@@ -151,7 +146,6 @@
 /**
  * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
  */
-
 static struct sub_seq *tipc_subseq_alloc(u32 cnt)
 {
 	struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
@@ -163,7 +157,6 @@
  *
  * Allocates a single sub-sequence structure and sets it to all 0's.
  */
-
 static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
 {
 	struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
@@ -186,12 +179,23 @@
 	return nseq;
 }
 
-/**
+/*
+ * nameseq_delete_empty - deletes a name sequence structure if now unused
+ */
+static void nameseq_delete_empty(struct name_seq *seq)
+{
+	if (!seq->first_free && list_empty(&seq->subscriptions)) {
+		hlist_del_init(&seq->ns_list);
+		kfree(seq->sseqs);
+		kfree(seq);
+	}
+}
+
+/*
  * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
  *
  * Very time-critical, so binary searches through sub-sequence array.
  */
-
 static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
 					   u32 instance)
 {
@@ -221,7 +225,6 @@
  *
  * Note: Similar to binary search code for locating a sub-sequence.
  */
-
 static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
 {
 	struct sub_seq *sseqs = nseq->sseqs;
@@ -242,9 +245,8 @@
 }
 
 /**
- * tipc_nameseq_insert_publ -
+ * tipc_nameseq_insert_publ
  */
-
 static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
 						    u32 type, u32 lower, u32 upper,
 						    u32 scope, u32 node, u32 port, u32 key)
@@ -260,7 +262,6 @@
 	if (sseq) {
 
 		/* Lower end overlaps existing entry => need an exact match */
-
 		if ((sseq->lower != lower) || (sseq->upper != upper)) {
 			warn("Cannot publish {%u,%u,%u}, overlap error\n",
 			     type, lower, upper);
@@ -280,11 +281,9 @@
 		struct sub_seq *freesseq;
 
 		/* Find where lower end should be inserted */
-
 		inspos = nameseq_locate_subseq(nseq, lower);
 
 		/* Fail if upper end overlaps into an existing entry */
-
 		if ((inspos < nseq->first_free) &&
 		    (upper >= nseq->sseqs[inspos].lower)) {
 			warn("Cannot publish {%u,%u,%u}, overlap error\n",
@@ -293,7 +292,6 @@
 		}
 
 		/* Ensure there is space for new sub-sequence */
-
 		if (nseq->first_free == nseq->alloc) {
 			struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
 
@@ -321,7 +319,6 @@
 		INIT_LIST_HEAD(&info->zone_list);
 
 		/* Insert new sub-sequence */
-
 		sseq = &nseq->sseqs[inspos];
 		freesseq = &nseq->sseqs[nseq->first_free];
 		memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
@@ -333,8 +330,7 @@
 		created_subseq = 1;
 	}
 
-	/* Insert a publication: */
-
+	/* Insert a publication */
 	publ = publ_create(type, lower, upper, scope, node, port, key);
 	if (!publ)
 		return NULL;
@@ -347,14 +343,12 @@
 		info->cluster_list_size++;
 	}
 
-	if (node == tipc_own_addr) {
+	if (in_own_node(node)) {
 		list_add(&publ->node_list, &info->node_list);
 		info->node_list_size++;
 	}
 
-	/*
-	 * Any subscriptions waiting for notification?
-	 */
+	/* Any subscriptions waiting for notification?  */
 	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
 		tipc_subscr_report_overlap(s,
 					   publ->lower,
@@ -368,7 +362,7 @@
 }
 
 /**
- * tipc_nameseq_remove_publ -
+ * tipc_nameseq_remove_publ
  *
  * NOTE: There may be cases where TIPC is asked to remove a publication
  * that is not in the name table.  For example, if another node issues a
@@ -378,7 +372,6 @@
  * A failed withdraw request simply returns a failure indication and lets the
  * caller issue any error or warning messages associated with such a problem.
  */
-
 static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
 						    u32 node, u32 ref, u32 key)
 {
@@ -395,7 +388,6 @@
 	info = sseq->info;
 
 	/* Locate publication, if it exists */
-
 	list_for_each_entry(publ, &info->zone_list, zone_list) {
 		if ((publ->key == key) && (publ->ref == ref) &&
 		    (!publ->node || (publ->node == node)))
@@ -405,26 +397,22 @@
 
 found:
 	/* Remove publication from zone scope list */
-
 	list_del(&publ->zone_list);
 	info->zone_list_size--;
 
 	/* Remove publication from cluster scope list, if present */
-
 	if (in_own_cluster(node)) {
 		list_del(&publ->cluster_list);
 		info->cluster_list_size--;
 	}
 
 	/* Remove publication from node scope list, if present */
-
-	if (node == tipc_own_addr) {
+	if (in_own_node(node)) {
 		list_del(&publ->node_list);
 		info->node_list_size--;
 	}
 
 	/* Contract subseq list if no more publications for that subseq */
-
 	if (list_empty(&info->zone_list)) {
 		kfree(info);
 		free = &nseq->sseqs[nseq->first_free--];
@@ -433,7 +421,6 @@
 	}
 
 	/* Notify any waiting subscriptions */
-
 	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
 		tipc_subscr_report_overlap(s,
 					   publ->lower,
@@ -452,7 +439,6 @@
  * the prescribed number of events if there is any sub-
  * sequence overlapping with the requested sequence
  */
-
 static void tipc_nameseq_subscribe(struct name_seq *nseq,
 					struct tipc_subscription *s)
 {
@@ -504,9 +490,10 @@
 {
 	struct name_seq *seq = nametbl_find_seq(type);
 
-	if (lower > upper) {
-		warn("Failed to publish illegal {%u,%u,%u}\n",
-		     type, lower, upper);
+	if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
+	    (lower > upper)) {
+		dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n",
+		     type, lower, upper, scope);
 		return NULL;
 	}
 
@@ -529,12 +516,7 @@
 		return NULL;
 
 	publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
-
-	if (!seq->first_free && list_empty(&seq->subscriptions)) {
-		hlist_del_init(&seq->ns_list);
-		kfree(seq->sseqs);
-		kfree(seq);
-	}
+	nameseq_delete_empty(seq);
 	return publ;
 }
 
@@ -551,7 +533,6 @@
  * - if name translation is attempted and fails, sets 'destnode' to 0
  *   and returns 0
  */
-
 u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
 {
 	struct sub_seq *sseq;
@@ -574,7 +555,7 @@
 	spin_lock_bh(&seq->lock);
 	info = sseq->info;
 
-	/* Closest-First Algorithm: */
+	/* Closest-First Algorithm */
 	if (likely(!*destnode)) {
 		if (!list_empty(&info->node_list)) {
 			publ = list_first_entry(&info->node_list,
@@ -597,14 +578,14 @@
 		}
 	}
 
-	/* Round-Robin Algorithm: */
+	/* Round-Robin Algorithm */
 	else if (*destnode == tipc_own_addr) {
 		if (list_empty(&info->node_list))
 			goto no_match;
 		publ = list_first_entry(&info->node_list, struct publication,
 					node_list);
 		list_move_tail(&publ->node_list, &info->node_list);
-	} else if (in_own_cluster(*destnode)) {
+	} else if (in_own_cluster_exact(*destnode)) {
 		if (list_empty(&info->cluster_list))
 			goto no_match;
 		publ = list_first_entry(&info->cluster_list, struct publication,
@@ -638,7 +619,6 @@
  *
  * Returns non-zero if any off-node ports overlap
  */
-
 int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
 			      struct tipc_port_list *dports)
 {
@@ -682,7 +662,6 @@
 /*
  * tipc_nametbl_publish - add name publication to network name tables
  */
-
 struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
 				    u32 scope, u32 port_ref, u32 key)
 {
@@ -695,11 +674,12 @@
 	}
 
 	write_lock_bh(&tipc_nametbl_lock);
-	table.local_publ_count++;
 	publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
 				   tipc_own_addr, port_ref, key);
-	if (publ && (scope != TIPC_NODE_SCOPE))
+	if (likely(publ)) {
+		table.local_publ_count++;
 		tipc_named_publish(publ);
+	}
 	write_unlock_bh(&tipc_nametbl_lock);
 	return publ;
 }
@@ -707,7 +687,6 @@
 /**
  * tipc_nametbl_withdraw - withdraw name publication from network name tables
  */
-
 int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
 {
 	struct publication *publ;
@@ -716,8 +695,7 @@
 	publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
 	if (likely(publ)) {
 		table.local_publ_count--;
-		if (publ->scope != TIPC_NODE_SCOPE)
-			tipc_named_withdraw(publ);
+		tipc_named_withdraw(publ);
 		write_unlock_bh(&tipc_nametbl_lock);
 		list_del_init(&publ->pport_list);
 		kfree(publ);
@@ -733,7 +711,6 @@
 /**
  * tipc_nametbl_subscribe - add a subscription object to the name table
  */
-
 void tipc_nametbl_subscribe(struct tipc_subscription *s)
 {
 	u32 type = s->seq.type;
@@ -757,7 +734,6 @@
 /**
  * tipc_nametbl_unsubscribe - remove a subscription object from name table
  */
-
 void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
 {
 	struct name_seq *seq;
@@ -768,11 +744,7 @@
 		spin_lock_bh(&seq->lock);
 		list_del_init(&s->nameseq_list);
 		spin_unlock_bh(&seq->lock);
-		if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
-			hlist_del_init(&seq->ns_list);
-			kfree(seq->sseqs);
-			kfree(seq);
-		}
+		nameseq_delete_empty(seq);
 	}
 	write_unlock_bh(&tipc_nametbl_lock);
 }
@@ -781,7 +753,6 @@
 /**
  * subseq_list: print specified sub-sequence contents into the given buffer
  */
-
 static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
 			u32 index)
 {
@@ -818,7 +789,6 @@
 /**
  * nameseq_list: print specified name sequence contents into the given buffer
  */
-
 static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
 			 u32 type, u32 lowbound, u32 upbound, u32 index)
 {
@@ -849,7 +819,6 @@
 /**
  * nametbl_header - print name table header into the given buffer
  */
-
 static void nametbl_header(struct print_buf *buf, u32 depth)
 {
 	const char *header[] = {
@@ -871,7 +840,6 @@
 /**
  * nametbl_list - print specified name table contents into the given buffer
  */
-
 static void nametbl_list(struct print_buf *buf, u32 depth_info,
 			 u32 type, u32 lowbound, u32 upbound)
 {
@@ -970,7 +938,6 @@
 		return;
 
 	/* Verify name table is empty, then release it */
-
 	write_lock_bh(&tipc_nametbl_lock);
 	for (i = 0; i < tipc_nametbl_size; i++) {
 		if (!hlist_empty(&table.types[i]))
@@ -980,4 +947,3 @@
 	table.types = NULL;
 	write_unlock_bh(&tipc_nametbl_lock);
 }
-
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 207d59e..71cb4dc 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -45,10 +45,8 @@
 /*
  * TIPC name types reserved for internal TIPC use (both current and planned)
  */
-
 #define TIPC_ZM_SRV 3		/* zone master service name type */
 
-
 /**
  * struct publication - info about a published (name or) name sequence
  * @type: name sequence type
@@ -67,7 +65,6 @@
  *
  * Note that the node list, cluster list, and zone list are circular lists.
  */
-
 struct publication {
 	u32 type;
 	u32 lower;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index d4531b0..7c236c8 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -175,17 +175,14 @@
 {
 	char addr_string[16];
 
-	tipc_subscr_stop();
-	tipc_cfg_stop();
-
+	write_lock_bh(&tipc_net_lock);
 	tipc_own_addr = addr;
 	tipc_named_reinit();
 	tipc_port_reinit();
-
 	tipc_bclink_init();
+	write_unlock_bh(&tipc_net_lock);
 
-	tipc_k_signal((Handler)tipc_subscr_start, 0);
-	tipc_k_signal((Handler)tipc_cfg_init, 0);
+	tipc_cfg_reinit();
 
 	info("Started in network mode\n");
 	info("Own node address %s, network identity %u\n",
diff --git a/net/tipc/node.c b/net/tipc/node.c
index a34cabc..d4fd341 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -58,7 +58,7 @@
  * entries has been chosen so that no hash chain exceeds 8 nodes and will
  * usually be much smaller (typically only a single node).
  */
-static inline unsigned int tipc_hashfn(u32 addr)
+static unsigned int tipc_hashfn(u32 addr)
 {
 	return addr & (NODE_HTABLE_SIZE - 1);
 }
@@ -66,13 +66,12 @@
 /*
  * tipc_node_find - locate specified node object, if it exists
  */
-
 struct tipc_node *tipc_node_find(u32 addr)
 {
 	struct tipc_node *node;
 	struct hlist_node *pos;
 
-	if (unlikely(!in_own_cluster(addr)))
+	if (unlikely(!in_own_cluster_exact(addr)))
 		return NULL;
 
 	hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
@@ -91,7 +90,6 @@
  * time.  (It would be preferable to switch to holding net_lock in write mode,
  * but this is a non-trivial change.)
  */
-
 struct tipc_node *tipc_node_create(u32 addr)
 {
 	struct tipc_node *n_ptr, *temp_node;
@@ -142,13 +140,11 @@
 	tipc_num_nodes--;
 }
 
-
 /**
  * tipc_node_link_up - handle addition of link
  *
  * Link becomes active (alone or shared) or standby, depending on its priority.
  */
-
 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
 	struct tipc_link **active = &n_ptr->active_links[0];
@@ -181,7 +177,6 @@
 /**
  * node_select_active_links - select active link
  */
-
 static void node_select_active_links(struct tipc_node *n_ptr)
 {
 	struct tipc_link **active = &n_ptr->active_links[0];
@@ -209,7 +204,6 @@
 /**
  * tipc_node_link_down - handle loss of link
  */
-
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
 	struct tipc_link **active;
@@ -300,7 +294,6 @@
 	     tipc_addr_string_fill(addr_string, n_ptr->addr));
 
 	/* Flush broadcast link info associated with lost node */
-
 	if (n_ptr->bclink.supported) {
 		while (n_ptr->bclink.deferred_head) {
 			struct sk_buff *buf = n_ptr->bclink.deferred_head;
@@ -334,7 +327,6 @@
 	tipc_nodesub_notify(n_ptr);
 
 	/* Prevent re-contact with node until cleanup is done */
-
 	n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
 	tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
 }
@@ -362,7 +354,6 @@
 	}
 
 	/* For now, get space for all other nodes */
-
 	payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
 	if (payload_size > 32768u) {
 		read_unlock_bh(&tipc_net_lock);
@@ -376,7 +367,6 @@
 	}
 
 	/* Add TLVs for all nodes in scope */
-
 	list_for_each_entry(n_ptr, &tipc_node_list, list) {
 		if (!tipc_in_scope(domain, n_ptr->addr))
 			continue;
@@ -412,7 +402,6 @@
 	read_lock_bh(&tipc_net_lock);
 
 	/* Get space for all unicast links + broadcast link */
-
 	payload_size = TLV_SPACE(sizeof(link_info)) *
 		(atomic_read(&tipc_num_links) + 1);
 	if (payload_size > 32768u) {
@@ -427,14 +416,12 @@
 	}
 
 	/* Add TLV for broadcast link */
-
 	link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
 	link_info.up = htonl(1);
 	strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
 	tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
 
 	/* Add TLVs for any other links in scope */
-
 	list_for_each_entry(n_ptr, &tipc_node_list, list) {
 		u32 i;
 
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 72561c9..cfcaf4d 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -48,7 +48,6 @@
 #define INVALID_NODE_SIG 0x10000
 
 /* Flags used to block (re)establishment of contact with a neighboring node */
-
 #define WAIT_PEER_DOWN	0x0001	/* wait to see that peer's links are down */
 #define WAIT_NAMES_GONE	0x0002	/* wait for peer's publications to be purged */
 #define WAIT_NODE_DOWN	0x0004	/* wait until peer node is declared down */
@@ -79,7 +78,6 @@
  *    @deferred_tail: newest OOS b'cast message received from node
  *    @defragm: list of partially reassembled b'cast message fragments from node
  */
-
 struct tipc_node {
 	u32 addr;
 	spinlock_t lock;
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index c3c2815..7a27344 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -41,11 +41,10 @@
 /**
  * tipc_nodesub_subscribe - create "node down" subscription for specified node
  */
-
 void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
 		       void *usr_handle, net_ev_handler handle_down)
 {
-	if (addr == tipc_own_addr) {
+	if (in_own_node(addr)) {
 		node_sub->node = NULL;
 		return;
 	}
@@ -66,7 +65,6 @@
 /**
  * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
  */
-
 void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
 {
 	if (!node_sub->node)
@@ -82,7 +80,6 @@
  *
  * Note: node is locked by caller
  */
-
 void tipc_nodesub_notify(struct tipc_node *node)
 {
 	struct tipc_node_subscr *ns;
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index 4bc2ca0..c95d207 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -48,7 +48,6 @@
  * @usr_handle: argument to pass to routine when node fails
  * @nodesub_list: adjacent entries in list of subscriptions for the node
  */
-
 struct tipc_node_subscr {
 	struct tipc_node *node;
 	net_ev_handler handle_node_down;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 94d2904..2ad37a4 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,10 +69,30 @@
 	return msg_destport(&p_ptr->phdr);
 }
 
+/*
+ * tipc_port_peer_msg - verify message was sent by connected port's peer
+ *
+ * Handles cases where the node's network address has changed from
+ * the default of <0.0.0> to its configured setting.
+ */
+int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
+{
+	u32 peernode;
+	u32 orignode;
+
+	if (msg_origport(msg) != port_peerport(p_ptr))
+		return 0;
+
+	orignode = msg_orignode(msg);
+	peernode = port_peernode(p_ptr);
+	return (orignode == peernode) ||
+		(!orignode && (peernode == tipc_own_addr)) ||
+		(!peernode && (orignode == tipc_own_addr));
+}
+
 /**
  * tipc_multicast - send a multicast message to local and remote destinations
  */
-
 int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
 		   u32 num_sect, struct iovec const *msg_sect,
 		   unsigned int total_len)
@@ -89,7 +109,6 @@
 		return -EINVAL;
 
 	/* Create multicast message */
-
 	hdr = &oport->phdr;
 	msg_set_type(hdr, TIPC_MCAST_MSG);
 	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
@@ -105,12 +124,10 @@
 		return res;
 
 	/* Figure out where to send multicast message */
-
 	ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
 						TIPC_NODE_SCOPE, &dports);
 
 	/* Send message to destinations (duplicate it only if necessary) */
-
 	if (ext_targets) {
 		if (dports.count != 0) {
 			ibuf = skb_copy(buf, GFP_ATOMIC);
@@ -141,7 +158,6 @@
  *
  * If there is no port list, perform a lookup to create one
  */
-
 void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
 {
 	struct tipc_msg *msg;
@@ -152,7 +168,6 @@
 	msg = buf_msg(buf);
 
 	/* Create destination port list, if one wasn't supplied */
-
 	if (dp == NULL) {
 		tipc_nametbl_mc_translate(msg_nametype(msg),
 				     msg_namelower(msg),
@@ -163,7 +178,6 @@
 	}
 
 	/* Deliver a copy of message to each destination port */
-
 	if (dp->count != 0) {
 		msg_set_destnode(msg, tipc_own_addr);
 		if (dp->count == 1) {
@@ -196,7 +210,6 @@
  *
  * Returns pointer to (locked) TIPC port, or NULL if unable to create it
  */
-
 struct tipc_port *tipc_createport_raw(void *usr_handle,
 			u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
 			void (*wakeup)(struct tipc_port *),
@@ -221,18 +234,24 @@
 	p_ptr->usr_handle = usr_handle;
 	p_ptr->max_pkt = MAX_PKT_DEFAULT;
 	p_ptr->ref = ref;
-	msg = &p_ptr->phdr;
-	tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
-	msg_set_origport(msg, ref);
 	INIT_LIST_HEAD(&p_ptr->wait_list);
 	INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
 	p_ptr->dispatcher = dispatcher;
 	p_ptr->wakeup = wakeup;
 	p_ptr->user_port = NULL;
 	k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
-	spin_lock_bh(&tipc_port_list_lock);
 	INIT_LIST_HEAD(&p_ptr->publications);
 	INIT_LIST_HEAD(&p_ptr->port_list);
+
+	/*
+	 * Must hold port list lock while initializing message header template
+	 * to ensure a change to node's own network address doesn't result
+	 * in template containing out-dated network address information
+	 */
+	spin_lock_bh(&tipc_port_list_lock);
+	msg = &p_ptr->phdr;
+	tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
+	msg_set_origport(msg, ref);
 	list_add_tail(&p_ptr->port_list, &ports);
 	spin_unlock_bh(&tipc_port_list_lock);
 	return p_ptr;
@@ -361,7 +380,6 @@
 	u32 rmsg_sz;
 
 	/* discard rejected message if it shouldn't be returned to sender */
-
 	if (WARN(!msg_isdata(msg),
 		 "attempt to reject message with user=%u", msg_user(msg))) {
 		dump_stack();
@@ -374,7 +392,6 @@
 	 * construct returned message by copying rejected message header and
 	 * data (or subset), then updating header fields that need adjusting
 	 */
-
 	hdr_sz = msg_hdr_sz(msg);
 	rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE);
 
@@ -413,9 +430,8 @@
 	}
 
 	/* send returned message & dispose of rejected message */
-
 	src_node = msg_prevnode(msg);
-	if (src_node == tipc_own_addr)
+	if (in_own_node(src_node))
 		tipc_port_recv_msg(rbuf);
 	else
 		tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
@@ -519,25 +535,20 @@
 	struct tipc_msg *msg = buf_msg(buf);
 	struct tipc_port *p_ptr;
 	struct sk_buff *r_buf = NULL;
-	u32 orignode = msg_orignode(msg);
-	u32 origport = msg_origport(msg);
 	u32 destport = msg_destport(msg);
 	int wakeable;
 
 	/* Validate connection */
-
 	p_ptr = tipc_port_lock(destport);
-	if (!p_ptr || !p_ptr->connected ||
-	    (port_peernode(p_ptr) != orignode) ||
-	    (port_peerport(p_ptr) != origport)) {
+	if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) {
 		r_buf = tipc_buf_acquire(BASIC_H_SIZE);
 		if (r_buf) {
 			msg = buf_msg(r_buf);
 			tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
-				      BASIC_H_SIZE, orignode);
+				      BASIC_H_SIZE, msg_orignode(msg));
 			msg_set_errcode(msg, TIPC_ERR_NO_PORT);
 			msg_set_origport(msg, destport);
-			msg_set_destport(msg, origport);
+			msg_set_destport(msg, msg_origport(msg));
 		}
 		if (p_ptr)
 			tipc_port_unlock(p_ptr);
@@ -545,7 +556,6 @@
 	}
 
 	/* Process protocol message sent by peer */
-
 	switch (msg_type(msg)) {
 	case CONN_ACK:
 		wakeable = tipc_port_congested(p_ptr) && p_ptr->congested &&
@@ -646,8 +656,6 @@
 	spin_lock_bh(&tipc_port_list_lock);
 	list_for_each_entry(p_ptr, &ports, port_list) {
 		msg = &p_ptr->phdr;
-		if (msg_orignode(msg) == tipc_own_addr)
-			break;
 		msg_set_prevnode(msg, tipc_own_addr);
 		msg_set_orignode(msg, tipc_own_addr);
 	}
@@ -659,7 +667,6 @@
  *  port_dispatcher_sigh(): Signal handler for messages destinated
  *                          to the tipc_port interface.
  */
-
 static void port_dispatcher_sigh(void *dummy)
 {
 	struct sk_buff *buf;
@@ -676,6 +683,7 @@
 		struct tipc_name_seq dseq;
 		void *usr_handle;
 		int connected;
+		int peer_invalid;
 		int published;
 		u32 message_type;
 
@@ -696,6 +704,7 @@
 		up_ptr = p_ptr->user_port;
 		usr_handle = up_ptr->usr_handle;
 		connected = p_ptr->connected;
+		peer_invalid = connected && !tipc_port_peer_msg(p_ptr, msg);
 		published = p_ptr->published;
 
 		if (unlikely(msg_errcode(msg)))
@@ -705,8 +714,6 @@
 
 		case TIPC_CONN_MSG:{
 				tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
-				u32 peer_port = port_peerport(p_ptr);
-				u32 peer_node = port_peernode(p_ptr);
 				u32 dsz;
 
 				tipc_port_unlock(p_ptr);
@@ -715,8 +722,7 @@
 				if (unlikely(!connected)) {
 					if (tipc_connect2port(dref, &orig))
 						goto reject;
-				} else if ((msg_origport(msg) != peer_port) ||
-					   (msg_orignode(msg) != peer_node))
+				} else if (peer_invalid)
 					goto reject;
 				dsz = msg_data_sz(msg);
 				if (unlikely(dsz &&
@@ -768,14 +774,9 @@
 		case TIPC_CONN_MSG:{
 				tipc_conn_shutdown_event cb =
 					up_ptr->conn_err_cb;
-				u32 peer_port = port_peerport(p_ptr);
-				u32 peer_node = port_peernode(p_ptr);
 
 				tipc_port_unlock(p_ptr);
-				if (!cb || !connected)
-					break;
-				if ((msg_origport(msg) != peer_port) ||
-				    (msg_orignode(msg) != peer_node))
+				if (!cb || !connected || peer_invalid)
 					break;
 				tipc_disconnect(dref);
 				skb_pull(buf, msg_hdr_sz(msg));
@@ -826,7 +827,6 @@
  *  port_dispatcher(): Dispatcher for messages destinated
  *  to the tipc_port interface. Called with port locked.
  */
-
 static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
 {
 	buf->next = NULL;
@@ -843,10 +843,8 @@
 }
 
 /*
- * Wake up port after congestion: Called with port locked,
- *
+ * Wake up port after congestion: Called with port locked
  */
-
 static void port_wakeup_sh(unsigned long ref)
 {
 	struct tipc_port *p_ptr;
@@ -892,7 +890,6 @@
 /*
  * tipc_createport(): user level call.
  */
-
 int tipc_createport(void *usr_handle,
 		    unsigned int importance,
 		    tipc_msg_err_event error_cb,
@@ -901,7 +898,7 @@
 		    tipc_msg_event msg_cb,
 		    tipc_named_msg_event named_msg_cb,
 		    tipc_conn_msg_event conn_msg_cb,
-		    tipc_continue_event continue_event_cb,/* May be zero */
+		    tipc_continue_event continue_event_cb, /* May be zero */
 		    u32 *portref)
 {
 	struct user_port *up_ptr;
@@ -975,10 +972,6 @@
 
 	if (p_ptr->connected)
 		goto exit;
-	if (seq->lower > seq->upper)
-		goto exit;
-	if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
-		goto exit;
 	key = ref + p_ptr->pub_count + 1;
 	if (key == ref) {
 		res = -EADDRINUSE;
@@ -1078,7 +1071,6 @@
  *
  * Port must be locked.
  */
-
 int tipc_disconnect_port(struct tipc_port *tp_ptr)
 {
 	int res;
@@ -1099,7 +1091,6 @@
  * tipc_disconnect(): Disconnect port form peer.
  *                    This is a node local operation.
  */
-
 int tipc_disconnect(u32 ref)
 {
 	struct tipc_port *p_ptr;
@@ -1134,7 +1125,6 @@
 /**
  * tipc_port_recv_msg - receive message from lower layer and deliver to port user
  */
-
 int tipc_port_recv_msg(struct sk_buff *buf)
 {
 	struct tipc_port *p_ptr;
@@ -1152,17 +1142,6 @@
 	/* validate destination & pass to port, otherwise reject message */
 	p_ptr = tipc_port_lock(destport);
 	if (likely(p_ptr)) {
-		if (likely(p_ptr->connected)) {
-			if ((unlikely(msg_origport(msg) !=
-				      tipc_peer_port(p_ptr))) ||
-			    (unlikely(msg_orignode(msg) !=
-				      tipc_peer_node(p_ptr))) ||
-			    (unlikely(!msg_connected(msg)))) {
-				err = TIPC_ERR_NO_PORT;
-				tipc_port_unlock(p_ptr);
-				goto reject;
-			}
-		}
 		err = p_ptr->dispatcher(p_ptr, buf);
 		tipc_port_unlock(p_ptr);
 		if (likely(!err))
@@ -1170,7 +1149,7 @@
 	} else {
 		err = TIPC_ERR_NO_PORT;
 	}
-reject:
+
 	return tipc_reject_msg(buf, err);
 }
 
@@ -1178,7 +1157,6 @@
  *  tipc_port_recv_sections(): Concatenate and deliver sectioned
  *                        message for this node.
  */
-
 static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
 				   struct iovec const *msg_sect,
 				   unsigned int total_len)
@@ -1196,7 +1174,6 @@
 /**
  * tipc_send - send message sections on connection
  */
-
 int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
 	      unsigned int total_len)
 {
@@ -1211,7 +1188,7 @@
 	p_ptr->congested = 1;
 	if (!tipc_port_congested(p_ptr)) {
 		destnode = port_peernode(p_ptr);
-		if (likely(destnode != tipc_own_addr))
+		if (likely(!in_own_node(destnode)))
 			res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
 							   total_len, destnode);
 		else
@@ -1235,7 +1212,6 @@
 /**
  * tipc_send2name - send message sections to port name
  */
-
 int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
 		   unsigned int num_sect, struct iovec const *msg_sect,
 		   unsigned int total_len)
@@ -1261,13 +1237,17 @@
 	msg_set_destport(msg, destport);
 
 	if (likely(destport || destnode)) {
-		if (likely(destnode == tipc_own_addr))
+		if (likely(in_own_node(destnode)))
 			res = tipc_port_recv_sections(p_ptr, num_sect,
 						      msg_sect, total_len);
-		else
+		else if (tipc_own_addr)
 			res = tipc_link_send_sections_fast(p_ptr, msg_sect,
 							   num_sect, total_len,
 							   destnode);
+		else
+			res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
+							num_sect, total_len,
+							TIPC_ERR_NO_NODE);
 		if (likely(res != -ELINKCONG)) {
 			if (res > 0)
 				p_ptr->sent++;
@@ -1285,7 +1265,6 @@
 /**
  * tipc_send2port - send message sections to port identity
  */
-
 int tipc_send2port(u32 ref, struct tipc_portid const *dest,
 		   unsigned int num_sect, struct iovec const *msg_sect,
 		   unsigned int total_len)
@@ -1305,12 +1284,15 @@
 	msg_set_destport(msg, dest->ref);
 	msg_set_hdr_sz(msg, BASIC_H_SIZE);
 
-	if (dest->node == tipc_own_addr)
+	if (in_own_node(dest->node))
 		res =  tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
 					       total_len);
-	else
+	else if (tipc_own_addr)
 		res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
 						   total_len, dest->node);
+	else
+		res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
+						total_len, TIPC_ERR_NO_NODE);
 	if (likely(res != -ELINKCONG)) {
 		if (res > 0)
 			p_ptr->sent++;
@@ -1325,7 +1307,6 @@
 /**
  * tipc_send_buf2port - send message buffer to port identity
  */
-
 int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
 	       struct sk_buff *buf, unsigned int dsz)
 {
@@ -1349,7 +1330,7 @@
 	skb_push(buf, BASIC_H_SIZE);
 	skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE);
 
-	if (dest->node == tipc_own_addr)
+	if (in_own_node(dest->node))
 		res = tipc_port_recv_msg(buf);
 	else
 		res = tipc_send_buf_fast(buf, dest->node);
@@ -1362,4 +1343,3 @@
 		return dsz;
 	return -ELINKCONG;
 }
-
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 9b88531..98cbec9 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -81,7 +81,6 @@
  * @ref: object reference to associated TIPC port
  * <various callback routines>
  */
-
 struct user_port {
 	void *usr_handle;
 	u32 ref;
@@ -201,6 +200,7 @@
  * The following routines require that the port be locked on entry
  */
 int tipc_disconnect_port(struct tipc_port *tp_ptr);
+int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
 
 /*
  * TIPC messaging routines
@@ -235,7 +235,6 @@
 /**
  * tipc_port_lock - lock port instance referred to and return its pointer
  */
-
 static inline struct tipc_port *tipc_port_lock(u32 ref)
 {
 	return (struct tipc_port *)tipc_ref_lock(ref);
@@ -246,7 +245,6 @@
  *
  * Can use pointer instead of tipc_ref_unlock() since port is already locked.
  */
-
 static inline void tipc_port_unlock(struct tipc_port *p_ptr)
 {
 	spin_unlock_bh(p_ptr->lock);
@@ -257,16 +255,6 @@
 	return (struct tipc_port *)tipc_ref_deref(ref);
 }
 
-static inline u32 tipc_peer_port(struct tipc_port *p_ptr)
-{
-	return msg_destport(&p_ptr->phdr);
-}
-
-static inline u32 tipc_peer_node(struct tipc_port *p_ptr)
-{
-	return msg_destnode(&p_ptr->phdr);
-}
-
 static inline int tipc_port_congested(struct tipc_port *p_ptr)
 {
 	return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 9e37b78..5cada0e 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -43,7 +43,6 @@
  * @lock: spinlock controlling access to object
  * @ref: reference value for object (combines instance & array index info)
  */
-
 struct reference {
 	void *object;
 	spinlock_t lock;
@@ -60,7 +59,6 @@
  * @index_mask: bitmask for array index portion of reference values
  * @start_mask: initial value for instance value portion of reference values
  */
-
 struct ref_table {
 	struct reference *entries;
 	u32 capacity;
@@ -96,7 +94,6 @@
 /**
  * tipc_ref_table_init - create reference table for objects
  */
-
 int tipc_ref_table_init(u32 requested_size, u32 start)
 {
 	struct reference *table;
@@ -109,7 +106,6 @@
 		/* do nothing */ ;
 
 	/* allocate table & mark all entries as uninitialized */
-
 	table = vzalloc(actual_size * sizeof(struct reference));
 	if (table == NULL)
 		return -ENOMEM;
@@ -128,7 +124,6 @@
 /**
  * tipc_ref_table_stop - destroy reference table for objects
  */
-
 void tipc_ref_table_stop(void)
 {
 	if (!tipc_ref_table.entries)
@@ -149,7 +144,6 @@
  * register a partially initialized object, without running the risk that
  * the object will be accessed before initialization is complete.
  */
-
 u32 tipc_ref_acquire(void *object, spinlock_t **lock)
 {
 	u32 index;
@@ -168,7 +162,6 @@
 	}
 
 	/* take a free entry, if available; otherwise initialize a new entry */
-
 	write_lock_bh(&ref_table_lock);
 	if (tipc_ref_table.first_free) {
 		index = tipc_ref_table.first_free;
@@ -211,7 +204,6 @@
  * Disallow future references to an object and free up the entry for re-use.
  * Note: The entry's spin_lock may still be busy after discard
  */
-
 void tipc_ref_discard(u32 ref)
 {
 	struct reference *entry;
@@ -242,12 +234,10 @@
 	 * mark entry as unused; increment instance part of entry's reference
 	 * to invalidate any subsequent references
 	 */
-
 	entry->object = NULL;
 	entry->ref = (ref & ~index_mask) + (index_mask + 1);
 
 	/* append entry to free entry list */
-
 	if (tipc_ref_table.first_free == 0)
 		tipc_ref_table.first_free = index;
 	else
@@ -261,7 +251,6 @@
 /**
  * tipc_ref_lock - lock referenced object and return pointer to it
  */
-
 void *tipc_ref_lock(u32 ref)
 {
 	if (likely(tipc_ref_table.entries)) {
@@ -283,7 +272,6 @@
 /**
  * tipc_ref_deref - return pointer referenced object (without locking it)
  */
-
 void *tipc_ref_deref(u32 ref)
 {
 	if (likely(tipc_ref_table.entries)) {
@@ -296,4 +284,3 @@
 	}
 	return NULL;
 }
-
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 29e957f..5577a44 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -123,7 +123,6 @@
  *
  * Caller must hold socket lock
  */
-
 static void advance_rx_queue(struct sock *sk)
 {
 	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
@@ -135,7 +134,6 @@
  *
  * Caller must hold socket lock
  */
-
 static void discard_rx_queue(struct sock *sk)
 {
 	struct sk_buff *buf;
@@ -151,7 +149,6 @@
  *
  * Caller must hold socket lock
  */
-
 static void reject_rx_queue(struct sock *sk)
 {
 	struct sk_buff *buf;
@@ -174,7 +171,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int tipc_create(struct net *net, struct socket *sock, int protocol,
 		       int kern)
 {
@@ -184,7 +180,6 @@
 	struct tipc_port *tp_ptr;
 
 	/* Validate arguments */
-
 	if (unlikely(protocol != 0))
 		return -EPROTONOSUPPORT;
 
@@ -207,13 +202,11 @@
 	}
 
 	/* Allocate socket's protocol area */
-
 	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
 	if (sk == NULL)
 		return -ENOMEM;
 
 	/* Allocate TIPC port for socket to use */
-
 	tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
 				     TIPC_LOW_IMPORTANCE);
 	if (unlikely(!tp_ptr)) {
@@ -222,7 +215,6 @@
 	}
 
 	/* Finish initializing socket data structures */
-
 	sock->ops = ops;
 	sock->state = state;
 
@@ -258,7 +250,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int release(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
@@ -270,7 +261,6 @@
 	 * Exit if socket isn't fully initialized (occurs when a failed accept()
 	 * releases a pre-allocated child socket that was never used)
 	 */
-
 	if (sk == NULL)
 		return 0;
 
@@ -281,7 +271,6 @@
 	 * Reject all unreceived messages, except on an active connection
 	 * (which disconnects locally & sends a 'FIN+' to peer)
 	 */
-
 	while (sock->state != SS_DISCONNECTING) {
 		buf = __skb_dequeue(&sk->sk_receive_queue);
 		if (buf == NULL)
@@ -303,15 +292,12 @@
 	 * Delete TIPC port; this ensures no more messages are queued
 	 * (also disconnects an active connection & sends a 'FIN-' to peer)
 	 */
-
 	res = tipc_deleteport(tport->ref);
 
 	/* Discard any remaining (connection-based) messages in receive queue */
-
 	discard_rx_queue(sk);
 
 	/* Reject any messages that accumulated in backlog queue */
-
 	sock->state = SS_DISCONNECTING;
 	release_sock(sk);
 
@@ -336,7 +322,6 @@
  * NOTE: This routine doesn't need to take the socket lock since it doesn't
  *       access any non-constant socket information.
  */
-
 static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
 {
 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
@@ -376,7 +361,6 @@
  *       accesses socket information that is unchanging (or which changes in
  *       a completely predictable manner).
  */
-
 static int get_name(struct socket *sock, struct sockaddr *uaddr,
 		    int *uaddr_len, int peer)
 {
@@ -444,7 +428,6 @@
  * imply that the operation will succeed, merely that it should be performed
  * and will not block.
  */
-
 static unsigned int poll(struct file *file, struct socket *sock,
 			 poll_table *wait)
 {
@@ -482,7 +465,6 @@
  *
  * Returns 0 if permission is granted, otherwise errno
  */
-
 static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
 {
 	struct tipc_cfg_msg_hdr hdr;
@@ -518,7 +500,6 @@
  *
  * Returns the number of bytes sent on success, or errno otherwise
  */
-
 static int send_msg(struct kiocb *iocb, struct socket *sock,
 		    struct msghdr *m, size_t total_len)
 {
@@ -535,7 +516,7 @@
 		     (dest->family != AF_TIPC)))
 		return -EINVAL;
 	if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
-	    (m->msg_iovlen > (unsigned)INT_MAX))
+	    (m->msg_iovlen > (unsigned int)INT_MAX))
 		return -EMSGSIZE;
 
 	if (iocb)
@@ -562,7 +543,6 @@
 		}
 
 		/* Abort any pending connection attempts (very unlikely) */
-
 		reject_rx_queue(sk);
 	}
 
@@ -631,7 +611,6 @@
  *
  * Returns the number of bytes sent on success, or errno otherwise
  */
-
 static int send_packet(struct kiocb *iocb, struct socket *sock,
 		       struct msghdr *m, size_t total_len)
 {
@@ -642,12 +621,11 @@
 	int res;
 
 	/* Handle implied connection establishment */
-
 	if (unlikely(dest))
 		return send_msg(iocb, sock, m, total_len);
 
 	if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
-	    (m->msg_iovlen > (unsigned)INT_MAX))
+	    (m->msg_iovlen > (unsigned int)INT_MAX))
 		return -EMSGSIZE;
 
 	if (iocb)
@@ -695,7 +673,6 @@
  * Returns the number of bytes sent on success (or partial success),
  * or errno if no data sent
  */
-
 static int send_stream(struct kiocb *iocb, struct socket *sock,
 		       struct msghdr *m, size_t total_len)
 {
@@ -715,7 +692,6 @@
 	lock_sock(sk);
 
 	/* Handle special cases where there is no connection */
-
 	if (unlikely(sock->state != SS_CONNECTED)) {
 		if (sock->state == SS_UNCONNECTED) {
 			res = send_packet(NULL, sock, m, total_len);
@@ -734,8 +710,8 @@
 		goto exit;
 	}
 
-	if ((total_len > (unsigned)INT_MAX) ||
-	    (m->msg_iovlen > (unsigned)INT_MAX)) {
+	if ((total_len > (unsigned int)INT_MAX) ||
+	    (m->msg_iovlen > (unsigned int)INT_MAX)) {
 		res = -EMSGSIZE;
 		goto exit;
 	}
@@ -747,7 +723,6 @@
 	 * (i.e. one large iovec entry), but could be improved to pass sets
 	 * of small iovec entries into send_packet().
 	 */
-
 	curr_iov = m->msg_iov;
 	curr_iovlen = m->msg_iovlen;
 	my_msg.msg_iov = &my_iov;
@@ -796,7 +771,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int auto_connect(struct socket *sock, struct tipc_msg *msg)
 {
 	struct tipc_sock *tsock = tipc_sk(sock->sk);
@@ -821,7 +795,6 @@
  *
  * Note: Address is not captured if not requested by receiver.
  */
-
 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
 {
 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
@@ -847,7 +820,6 @@
  *
  * Returns 0 if successful, otherwise errno
  */
-
 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
 				struct tipc_port *tport)
 {
@@ -861,7 +833,6 @@
 		return 0;
 
 	/* Optionally capture errored message object(s) */
-
 	err = msg ? msg_errcode(msg) : 0;
 	if (unlikely(err)) {
 		anc_data[0] = err;
@@ -878,7 +849,6 @@
 	}
 
 	/* Optionally capture message destination object */
-
 	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
 	switch (dest_type) {
 	case TIPC_NAMED_MSG:
@@ -923,7 +893,6 @@
  *
  * Returns size of returned message data, errno otherwise
  */
-
 static int recv_msg(struct kiocb *iocb, struct socket *sock,
 		    struct msghdr *m, size_t buf_len, int flags)
 {
@@ -937,7 +906,6 @@
 	int res;
 
 	/* Catch invalid receive requests */
-
 	if (unlikely(!buf_len))
 		return -EINVAL;
 
@@ -952,7 +920,6 @@
 restart:
 
 	/* Look for a message in receive queue; wait if necessary */
-
 	while (skb_queue_empty(&sk->sk_receive_queue)) {
 		if (sock->state == SS_DISCONNECTING) {
 			res = -ENOTCONN;
@@ -970,14 +937,12 @@
 	}
 
 	/* Look at first message in receive queue */
-
 	buf = skb_peek(&sk->sk_receive_queue);
 	msg = buf_msg(buf);
 	sz = msg_data_sz(msg);
 	err = msg_errcode(msg);
 
 	/* Complete connection setup for an implied connect */
-
 	if (unlikely(sock->state == SS_CONNECTING)) {
 		res = auto_connect(sock, msg);
 		if (res)
@@ -985,24 +950,20 @@
 	}
 
 	/* Discard an empty non-errored message & try again */
-
 	if ((!sz) && (!err)) {
 		advance_rx_queue(sk);
 		goto restart;
 	}
 
 	/* Capture sender's address (optional) */
-
 	set_orig_addr(m, msg);
 
 	/* Capture ancillary data (optional) */
-
 	res = anc_data_recv(m, msg, tport);
 	if (res)
 		goto exit;
 
 	/* Capture message data (if valid) & compute return value (always) */
-
 	if (!err) {
 		if (unlikely(buf_len < sz)) {
 			sz = buf_len;
@@ -1022,7 +983,6 @@
 	}
 
 	/* Consume received message (optional) */
-
 	if (likely(!(flags & MSG_PEEK))) {
 		if ((sock->state != SS_READY) &&
 		    (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
@@ -1046,7 +1006,6 @@
  *
  * Returns size of returned message data, errno otherwise
  */
-
 static int recv_stream(struct kiocb *iocb, struct socket *sock,
 		       struct msghdr *m, size_t buf_len, int flags)
 {
@@ -1062,7 +1021,6 @@
 	int res = 0;
 
 	/* Catch invalid receive attempts */
-
 	if (unlikely(!buf_len))
 		return -EINVAL;
 
@@ -1076,10 +1034,9 @@
 
 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
 restart:
-
 	/* Look for a message in receive queue; wait if necessary */
-
 	while (skb_queue_empty(&sk->sk_receive_queue)) {
 		if (sock->state == SS_DISCONNECTING) {
 			res = -ENOTCONN;
@@ -1097,21 +1054,18 @@
 	}
 
 	/* Look at first message in receive queue */
-
 	buf = skb_peek(&sk->sk_receive_queue);
 	msg = buf_msg(buf);
 	sz = msg_data_sz(msg);
 	err = msg_errcode(msg);
 
 	/* Discard an empty non-errored message & try again */
-
 	if ((!sz) && (!err)) {
 		advance_rx_queue(sk);
 		goto restart;
 	}
 
 	/* Optionally capture sender's address & ancillary data of first msg */
-
 	if (sz_copied == 0) {
 		set_orig_addr(m, msg);
 		res = anc_data_recv(m, msg, tport);
@@ -1120,7 +1074,6 @@
 	}
 
 	/* Capture message data (if valid) & compute return value (always) */
-
 	if (!err) {
 		u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
 
@@ -1152,7 +1105,6 @@
 	}
 
 	/* Consume received message (optional) */
-
 	if (likely(!(flags & MSG_PEEK))) {
 		if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
 			tipc_acknowledge(tport->ref, tport->conn_unacked);
@@ -1160,7 +1112,6 @@
 	}
 
 	/* Loop around if more data is required */
-
 	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
 	    (!skb_queue_empty(&sk->sk_receive_queue) ||
 	    (sz_copied < target)) &&	/* and more is ready or required */
@@ -1181,7 +1132,6 @@
  *
  * Returns 1 if queue is unable to accept message, 0 otherwise
  */
-
 static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
 {
 	u32 threshold;
@@ -1214,7 +1164,6 @@
  *
  * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
  */
-
 static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
 {
 	struct socket *sock = sk->sk_socket;
@@ -1222,12 +1171,8 @@
 	u32 recv_q_len;
 
 	/* Reject message if it is wrong sort of message for socket */
-
-	/*
-	 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
-	 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
-	 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
-	 */
+	if (msg_type(msg) > TIPC_DIRECT_MSG)
+		return TIPC_ERR_NO_PORT;
 
 	if (sock->state == SS_READY) {
 		if (msg_connected(msg))
@@ -1236,7 +1181,8 @@
 		if (msg_mcast(msg))
 			return TIPC_ERR_NO_PORT;
 		if (sock->state == SS_CONNECTED) {
-			if (!msg_connected(msg))
+			if (!msg_connected(msg) ||
+			    !tipc_port_peer_msg(tipc_sk_port(sk), msg))
 				return TIPC_ERR_NO_PORT;
 		} else if (sock->state == SS_CONNECTING) {
 			if (!msg_connected(msg) && (msg_errcode(msg) == 0))
@@ -1253,7 +1199,6 @@
 	}
 
 	/* Reject message if there isn't room to queue it */
-
 	recv_q_len = (u32)atomic_read(&tipc_queue_size);
 	if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
 		if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
@@ -1266,13 +1211,11 @@
 	}
 
 	/* Enqueue message (finally!) */
-
 	TIPC_SKB_CB(buf)->handle = 0;
 	atomic_inc(&tipc_queue_size);
 	__skb_queue_tail(&sk->sk_receive_queue, buf);
 
 	/* Initiate connection termination for an incoming 'FIN' */
-
 	if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
 		sock->state = SS_DISCONNECTING;
 		tipc_disconnect_port(tipc_sk_port(sk));
@@ -1292,7 +1235,6 @@
  *
  * Returns 0
  */
-
 static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
 {
 	u32 res;
@@ -1312,7 +1254,6 @@
  *
  * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
  */
-
 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
 {
 	struct sock *sk = (struct sock *)tport->usr_handle;
@@ -1324,12 +1265,11 @@
 	 * This code is based on sk_receive_skb(), but must be distinct from it
 	 * since a TIPC-specific filter/reject mechanism is utilized
 	 */
-
 	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk)) {
 		res = filter_rcv(sk, buf);
 	} else {
-		if (sk_add_backlog(sk, buf))
+		if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
 			res = TIPC_ERR_OVERLOAD;
 		else
 			res = TIPC_OK;
@@ -1345,7 +1285,6 @@
  *
  * Called with port lock already taken.
  */
-
 static void wakeupdispatch(struct tipc_port *tport)
 {
 	struct sock *sk = (struct sock *)tport->usr_handle;
@@ -1363,7 +1302,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
 		   int flags)
 {
@@ -1378,21 +1316,18 @@
 	lock_sock(sk);
 
 	/* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
-
 	if (sock->state == SS_READY) {
 		res = -EOPNOTSUPP;
 		goto exit;
 	}
 
 	/* For now, TIPC does not support the non-blocking form of connect() */
-
 	if (flags & O_NONBLOCK) {
 		res = -EOPNOTSUPP;
 		goto exit;
 	}
 
 	/* Issue Posix-compliant error code if socket is in the wrong state */
-
 	if (sock->state == SS_LISTENING) {
 		res = -EOPNOTSUPP;
 		goto exit;
@@ -1412,18 +1347,15 @@
 	 * Note: send_msg() validates the rest of the address fields,
 	 *       so there's no need to do it here
 	 */
-
 	if (dst->addrtype == TIPC_ADDR_MCAST) {
 		res = -EINVAL;
 		goto exit;
 	}
 
 	/* Reject any messages already in receive queue (very unlikely) */
-
 	reject_rx_queue(sk);
 
 	/* Send a 'SYN-' to destination */
-
 	m.msg_name = dest;
 	m.msg_namelen = destlen;
 	res = send_msg(NULL, sock, &m, 0);
@@ -1431,7 +1363,6 @@
 		goto exit;
 
 	/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
-
 	timeout = tipc_sk(sk)->conn_timeout;
 	release_sock(sk);
 	res = wait_event_interruptible_timeout(*sk_sleep(sk),
@@ -1476,7 +1407,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int listen(struct socket *sock, int len)
 {
 	struct sock *sk = sock->sk;
@@ -1503,7 +1433,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int accept(struct socket *sock, struct socket *new_sock, int flags)
 {
 	struct sock *sk = sock->sk;
@@ -1546,11 +1475,9 @@
 		 * Reject any stray messages received by new socket
 		 * before the socket lock was taken (very, very unlikely)
 		 */
-
 		reject_rx_queue(new_sk);
 
 		/* Connect new socket to it's peer */
-
 		new_tsock->peer_name.ref = msg_origport(msg);
 		new_tsock->peer_name.node = msg_orignode(msg);
 		tipc_connect2port(new_ref, &new_tsock->peer_name);
@@ -1566,7 +1493,6 @@
 		 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
 		 * Respond to 'SYN+' by queuing it on new socket.
 		 */
-
 		if (!msg_data_sz(msg)) {
 			struct msghdr m = {NULL,};
 
@@ -1592,7 +1518,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int shutdown(struct socket *sock, int how)
 {
 	struct sock *sk = sock->sk;
@@ -1609,8 +1534,8 @@
 	case SS_CONNECTING:
 	case SS_CONNECTED:
 
-		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
 restart:
+		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
 		buf = __skb_dequeue(&sk->sk_receive_queue);
 		if (buf) {
 			atomic_dec(&tipc_queue_size);
@@ -1631,7 +1556,6 @@
 	case SS_DISCONNECTING:
 
 		/* Discard any unreceived messages; wake up sleeping tasks */
-
 		discard_rx_queue(sk);
 		if (waitqueue_active(sk_sleep(sk)))
 			wake_up_interruptible(sk_sleep(sk));
@@ -1659,7 +1583,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int setsockopt(struct socket *sock,
 		      int lvl, int opt, char __user *ov, unsigned int ol)
 {
@@ -1719,7 +1642,6 @@
  *
  * Returns 0 on success, errno otherwise
  */
-
 static int getsockopt(struct socket *sock,
 		      int lvl, int opt, char __user *ov, int __user *ol)
 {
@@ -1780,7 +1702,6 @@
 /**
  * Protocol switches for the various types of TIPC sockets
  */
-
 static const struct proto_ops msg_ops = {
 	.owner		= THIS_MODULE,
 	.family		= AF_TIPC,
@@ -1886,7 +1807,6 @@
 /**
  * tipc_socket_stop - stop TIPC socket interface
  */
-
 void tipc_socket_stop(void)
 {
 	if (!sockets_enabled)
@@ -1896,4 +1816,3 @@
 	sock_unregister(tipc_family_ops.family);
 	proto_unregister(&tipc_proto);
 }
-
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index b2964e9..f976e9c 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -46,7 +46,6 @@
  * @subscriber_list: adjacent subscribers in top. server's list of subscribers
  * @subscription_list: list of subscription objects for this subscriber
  */
-
 struct tipc_subscriber {
 	u32 port_ref;
 	spinlock_t *lock;
@@ -56,13 +55,11 @@
 
 /**
  * struct top_srv - TIPC network topology subscription service
- * @user_ref: TIPC userid of subscription service
  * @setup_port: reference to TIPC port that handles subscription requests
  * @subscription_count: number of active subscriptions (not subscribers!)
  * @subscriber_list: list of ports subscribing to service
  * @lock: spinlock govering access to subscriber list
  */
-
 struct top_srv {
 	u32 setup_port;
 	atomic_t subscription_count;
@@ -79,7 +76,6 @@
  *
  * Returns converted value
  */
-
 static u32 htohl(u32 in, int swap)
 {
 	return swap ? swab32(in) : in;
@@ -91,7 +87,6 @@
  * Note: Must not hold subscriber's server port lock, since tipc_send() will
  *       try to take the lock if the message is rejected and returned!
  */
-
 static void subscr_send_event(struct tipc_subscription *sub,
 			      u32 found_lower,
 			      u32 found_upper,
@@ -117,7 +112,6 @@
  *
  * Returns 1 if there is overlap, otherwise 0.
  */
-
 int tipc_subscr_overlap(struct tipc_subscription *sub,
 			u32 found_lower,
 			u32 found_upper)
@@ -137,7 +131,6 @@
  *
  * Protected by nameseq.lock in name_table.c
  */
-
 void tipc_subscr_report_overlap(struct tipc_subscription *sub,
 				u32 found_lower,
 				u32 found_upper,
@@ -157,43 +150,35 @@
 /**
  * subscr_timeout - subscription timeout has occurred
  */
-
 static void subscr_timeout(struct tipc_subscription *sub)
 {
 	struct tipc_port *server_port;
 
 	/* Validate server port reference (in case subscriber is terminating) */
-
 	server_port = tipc_port_lock(sub->server_ref);
 	if (server_port == NULL)
 		return;
 
 	/* Validate timeout (in case subscription is being cancelled) */
-
 	if (sub->timeout == TIPC_WAIT_FOREVER) {
 		tipc_port_unlock(server_port);
 		return;
 	}
 
 	/* Unlink subscription from name table */
-
 	tipc_nametbl_unsubscribe(sub);
 
 	/* Unlink subscription from subscriber */
-
 	list_del(&sub->subscription_list);
 
 	/* Release subscriber's server port */
-
 	tipc_port_unlock(server_port);
 
 	/* Notify subscriber of timeout */
-
 	subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
 			  TIPC_SUBSCR_TIMEOUT, 0, 0);
 
 	/* Now destroy subscription */
-
 	k_term_timer(&sub->timer);
 	kfree(sub);
 	atomic_dec(&topsrv.subscription_count);
@@ -204,7 +189,6 @@
  *
  * Called with subscriber port locked.
  */
-
 static void subscr_del(struct tipc_subscription *sub)
 {
 	tipc_nametbl_unsubscribe(sub);
@@ -223,7 +207,6 @@
  * a new object reference in the interim that uses this lock; this routine will
  * simply wait for it to be released, then claim it.)
  */
-
 static void subscr_terminate(struct tipc_subscriber *subscriber)
 {
 	u32 port_ref;
@@ -231,18 +214,15 @@
 	struct tipc_subscription *sub_temp;
 
 	/* Invalidate subscriber reference */
-
 	port_ref = subscriber->port_ref;
 	subscriber->port_ref = 0;
 	spin_unlock_bh(subscriber->lock);
 
 	/* Sever connection to subscriber */
-
 	tipc_shutdown(port_ref);
 	tipc_deleteport(port_ref);
 
 	/* Destroy any existing subscriptions for subscriber */
-
 	list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
 				 subscription_list) {
 		if (sub->timeout != TIPC_WAIT_FOREVER) {
@@ -253,17 +233,14 @@
 	}
 
 	/* Remove subscriber from topology server's subscriber list */
-
 	spin_lock_bh(&topsrv.lock);
 	list_del(&subscriber->subscriber_list);
 	spin_unlock_bh(&topsrv.lock);
 
 	/* Reclaim subscriber lock */
-
 	spin_lock_bh(subscriber->lock);
 
 	/* Now destroy subscriber */
-
 	kfree(subscriber);
 }
 
@@ -276,7 +253,6 @@
  *
  * Note that fields of 's' use subscriber's endianness!
  */
-
 static void subscr_cancel(struct tipc_subscr *s,
 			  struct tipc_subscriber *subscriber)
 {
@@ -285,7 +261,6 @@
 	int found = 0;
 
 	/* Find first matching subscription, exit if not found */
-
 	list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
 				 subscription_list) {
 		if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
@@ -297,7 +272,6 @@
 		return;
 
 	/* Cancel subscription timer (if used), then delete subscription */
-
 	if (sub->timeout != TIPC_WAIT_FOREVER) {
 		sub->timeout = TIPC_WAIT_FOREVER;
 		spin_unlock_bh(subscriber->lock);
@@ -313,7 +287,6 @@
  *
  * Called with subscriber port locked.
  */
-
 static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
 					     struct tipc_subscriber *subscriber)
 {
@@ -321,11 +294,9 @@
 	int swap;
 
 	/* Determine subscriber's endianness */
-
 	swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
 
 	/* Detect & process a subscription cancellation request */
-
 	if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
 		s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
 		subscr_cancel(s, subscriber);
@@ -333,7 +304,6 @@
 	}
 
 	/* Refuse subscription if global limit exceeded */
-
 	if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
 		warn("Subscription rejected, subscription limit reached (%u)\n",
 		     tipc_max_subscriptions);
@@ -342,7 +312,6 @@
 	}
 
 	/* Allocate subscription object */
-
 	sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
 	if (!sub) {
 		warn("Subscription rejected, no memory\n");
@@ -351,7 +320,6 @@
 	}
 
 	/* Initialize subscription object */
-
 	sub->seq.type = htohl(s->seq.type, swap);
 	sub->seq.lower = htohl(s->seq.lower, swap);
 	sub->seq.upper = htohl(s->seq.upper, swap);
@@ -385,7 +353,6 @@
  *
  * Called with subscriber's server port unlocked.
  */
-
 static void subscr_conn_shutdown_event(void *usr_handle,
 				       u32 port_ref,
 				       struct sk_buff **buf,
@@ -409,7 +376,6 @@
  *
  * Called with subscriber's server port unlocked.
  */
-
 static void subscr_conn_msg_event(void *usr_handle,
 				  u32 port_ref,
 				  struct sk_buff **buf,
@@ -424,7 +390,6 @@
 	 * Lock subscriber's server port (& make a local copy of lock pointer,
 	 * in case subscriber is deleted while processing subscription request)
 	 */
-
 	if (tipc_port_lock(port_ref) == NULL)
 		return;
 
@@ -452,7 +417,6 @@
 			 *    timeout code cannot delete the subscription,
 			 * so the subscription object is still protected.
 			 */
-
 			tipc_nametbl_subscribe(sub);
 		}
 	}
@@ -461,7 +425,6 @@
 /**
  * subscr_named_msg_event - handle request to establish a new subscriber
  */
-
 static void subscr_named_msg_event(void *usr_handle,
 				   u32 port_ref,
 				   struct sk_buff **buf,
@@ -475,7 +438,6 @@
 	u32 server_port_ref;
 
 	/* Create subscriber object */
-
 	subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
 	if (subscriber == NULL) {
 		warn("Subscriber rejected, no memory\n");
@@ -485,7 +447,6 @@
 	INIT_LIST_HEAD(&subscriber->subscriber_list);
 
 	/* Create server port & establish connection to subscriber */
-
 	tipc_createport(subscriber,
 			importance,
 			NULL,
@@ -504,26 +465,21 @@
 	tipc_connect2port(subscriber->port_ref, orig);
 
 	/* Lock server port (& save lock address for future use) */
-
 	subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
 
 	/* Add subscriber to topology server's subscriber list */
-
 	spin_lock_bh(&topsrv.lock);
 	list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
 	spin_unlock_bh(&topsrv.lock);
 
 	/* Unlock server port */
-
 	server_port_ref = subscriber->port_ref;
 	spin_unlock_bh(subscriber->lock);
 
 	/* Send an ACK- to complete connection handshaking */
-
 	tipc_send(server_port_ref, 0, NULL, 0);
 
 	/* Handle optional subscription request */
-
 	if (size != 0) {
 		subscr_conn_msg_event(subscriber, server_port_ref,
 				      buf, data, size);
@@ -535,7 +491,6 @@
 	struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
 	int res;
 
-	memset(&topsrv, 0, sizeof(topsrv));
 	spin_lock_init(&topsrv.lock);
 	INIT_LIST_HEAD(&topsrv.subscriber_list);
 
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ef6529c..218d2e0 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -51,7 +51,6 @@
  * @swap: indicates if subscriber uses opposite endianness in its messages
  * @evt: template for events generated by subscription
  */
-
 struct tipc_subscription {
 	struct tipc_name_seq seq;
 	u32 timeout;
@@ -80,5 +79,4 @@
 
 void tipc_subscr_stop(void);
 
-
 #endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d510353..641f2e4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -149,9 +149,10 @@
  *    each socket state is protected by separate spin lock.
  */
 
-static inline unsigned unix_hash_fold(__wsum n)
+static inline unsigned int unix_hash_fold(__wsum n)
 {
-	unsigned hash = (__force unsigned)n;
+	unsigned int hash = (__force unsigned int)n;
+
 	hash ^= hash>>16;
 	hash ^= hash>>8;
 	return hash&(UNIX_HASH_SIZE-1);
@@ -200,7 +201,7 @@
  *		- if started by zero, it is abstract name.
  */
 
-static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
+static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
 {
 	if (len <= sizeof(short) || len > sizeof(*sunaddr))
 		return -EINVAL;
@@ -250,7 +251,7 @@
 
 static struct sock *__unix_find_socket_byname(struct net *net,
 					      struct sockaddr_un *sunname,
-					      int len, int type, unsigned hash)
+					      int len, int type, unsigned int hash)
 {
 	struct sock *s;
 	struct hlist_node *node;
@@ -273,7 +274,7 @@
 static inline struct sock *unix_find_socket_byname(struct net *net,
 						   struct sockaddr_un *sunname,
 						   int len, int type,
-						   unsigned hash)
+						   unsigned int hash)
 {
 	struct sock *s;
 
@@ -760,7 +761,7 @@
 
 static struct sock *unix_find_other(struct net *net,
 				    struct sockaddr_un *sunname, int len,
-				    int type, unsigned hash, int *error)
+				    int type, unsigned int hash, int *error)
 {
 	struct sock *u;
 	struct path path;
@@ -824,7 +825,7 @@
 	struct dentry *dentry = NULL;
 	struct path path;
 	int err;
-	unsigned hash;
+	unsigned int hash;
 	struct unix_address *addr;
 	struct hlist_head *list;
 
@@ -964,7 +965,7 @@
 	struct net *net = sock_net(sk);
 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
 	struct sock *other;
-	unsigned hash;
+	unsigned int hash;
 	int err;
 
 	if (addr->sa_family != AF_UNSPEC) {
@@ -1062,7 +1063,7 @@
 	struct sock *newsk = NULL;
 	struct sock *other = NULL;
 	struct sk_buff *skb = NULL;
-	unsigned hash;
+	unsigned int hash;
 	int st;
 	int err;
 	long timeo;
@@ -1437,11 +1438,12 @@
 	struct sock *other = NULL;
 	int namelen = 0; /* fake GCC */
 	int err;
-	unsigned hash;
+	unsigned int hash;
 	struct sk_buff *skb;
 	long timeo;
 	struct scm_cookie tmp_scm;
 	int max_level;
+	int data_len = 0;
 
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
@@ -1475,7 +1477,13 @@
 	if (len > sk->sk_sndbuf - 32)
 		goto out;
 
-	skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
+	if (len > SKB_MAX_ALLOC)
+		data_len = min_t(size_t,
+				 len - SKB_MAX_ALLOC,
+				 MAX_SKB_FRAGS * PAGE_SIZE);
+
+	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
+				   msg->msg_flags & MSG_DONTWAIT, &err);
 	if (skb == NULL)
 		goto out;
 
@@ -1485,8 +1493,10 @@
 	max_level = err + 1;
 	unix_get_secdata(siocb->scm, skb);
 
-	skb_reset_transport_header(skb);
-	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+	skb_put(skb, len - data_len);
+	skb->data_len = data_len;
+	skb->len = len;
+	err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
 	if (err)
 		goto out_free;
 
diff --git a/net/unix/diag.c b/net/unix/diag.c
index f0486ae..47d3002 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -310,7 +310,7 @@
 		return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
 }
 
-static struct sock_diag_handler unix_diag_handler = {
+static const struct sock_diag_handler unix_diag_handler = {
 	.family = AF_UNIX,
 	.dump = unix_diag_handler_dump,
 };
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 397cffe..b34b5b9 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -26,12 +26,6 @@
 	{ }
 };
 
-static struct ctl_path unix_path[] = {
-	{ .procname = "net", },
-	{ .procname = "unix", },
-	{ },
-};
-
 int __net_init unix_sysctl_register(struct net *net)
 {
 	struct ctl_table *table;
@@ -41,7 +35,7 @@
 		goto err_alloc;
 
 	table[0].data = &net->unx.sysctl_max_dgram_qlen;
-	net->unx.ctl = register_net_sysctl_table(net, unix_path, table);
+	net->unx.ctl = register_net_sysctl(net, "net/unix", table);
 	if (net->unx.ctl == NULL)
 		goto err_reg;
 
@@ -58,6 +52,6 @@
 	struct ctl_table *table;
 
 	table = net->unx.ctl->ctl_table_arg;
-	unregister_sysctl_table(net->unx.ctl);
+	unregister_net_sysctl_table(net->unx.ctl);
 	kfree(table);
 }
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 3c65eae..a6470ac 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -187,7 +187,7 @@
 
 static
 void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
-		       unsigned allowed_states_bm)
+		       unsigned int allowed_states_bm)
 {
 	if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
 		printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
@@ -425,7 +425,8 @@
 size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
 			   unsigned char *addr, size_t addr_len)
 {
-	unsigned cnt, total;
+	unsigned int cnt, total;
+
 	for (total = cnt = 0; cnt < addr_len; cnt++)
 		total += scnprintf(addr_str + total, addr_str_size - total,
 				   "%02x%c", addr[cnt],
diff --git a/net/wireless/core.c b/net/wireless/core.c
index ccdfed8..39f2538 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -422,10 +422,6 @@
 	const struct ieee80211_iface_combination *c;
 	int i, j;
 
-	/* If we have combinations enforce them */
-	if (wiphy->n_iface_combinations)
-		wiphy->flags |= WIPHY_FLAG_ENFORCE_COMBINATIONS;
-
 	for (i = 0; i < wiphy->n_iface_combinations; i++) {
 		u32 cnt = 0;
 		u16 all_iftypes = 0;
@@ -708,6 +704,10 @@
 	flush_work(&rdev->scan_done_wk);
 	cancel_work_sync(&rdev->conn_work);
 	flush_work(&rdev->event_work);
+
+	if (rdev->wowlan && rdev->ops->set_wakeup)
+		rdev->ops->set_wakeup(&rdev->wiphy, false);
+	cfg80211_rdev_free_wowlan(rdev);
 }
 EXPORT_SYMBOL(wiphy_unregister);
 
@@ -720,7 +720,6 @@
 	mutex_destroy(&rdev->sched_scan_mtx);
 	list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
 		cfg80211_put_bss(&scan->pub);
-	cfg80211_rdev_free_wowlan(rdev);
 	kfree(rdev);
 }
 
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 9bde4d1..7eecdf4 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -68,6 +68,32 @@
 	return -ENOTSUPP;
 }
 
+static int cfg80211_get_sset_count(struct net_device *dev, int sset)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+	if (rdev->ops->get_et_sset_count)
+		return rdev->ops->get_et_sset_count(wdev->wiphy, dev, sset);
+	return -EOPNOTSUPP;
+}
+
+static void cfg80211_get_stats(struct net_device *dev,
+			       struct ethtool_stats *stats, u64 *data)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+	if (rdev->ops->get_et_stats)
+		rdev->ops->get_et_stats(wdev->wiphy, dev, stats, data);
+}
+
+static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+	if (rdev->ops->get_et_strings)
+		rdev->ops->get_et_strings(wdev->wiphy, dev, sset, data);
+}
+
 const struct ethtool_ops cfg80211_ethtool_ops = {
 	.get_drvinfo = cfg80211_get_drvinfo,
 	.get_regs_len = cfg80211_get_regs_len,
@@ -75,4 +101,7 @@
 	.get_link = ethtool_op_get_link,
 	.get_ringparam = cfg80211_get_ringparam,
 	.set_ringparam = cfg80211_set_ringparam,
+	.get_strings = cfg80211_get_strings,
+	.get_ethtool_stats = cfg80211_get_stats,
+	.get_sset_count = cfg80211_get_sset_count,
 };
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 30f20fe..d2a19b0 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -473,7 +473,7 @@
 
 	/* fixed already - and no change */
 	if (wdev->wext.ibss.bssid && bssid &&
-	    compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0)
+	    ether_addr_equal(bssid, wdev->wext.ibss.bssid))
 		return 0;
 
 	wdev_lock(wdev);
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 755738d..1526c21 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -304,10 +304,8 @@
 	pos = skb->data + hdr_len;
 	keyidx = pos[3];
 	if (!(keyidx & (1 << 5))) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "CCMP: received packet without ExtIV"
-			       " flag from %pM\n", hdr->addr2);
-		}
+		net_dbg_ratelimited("CCMP: received packet without ExtIV flag from %pM\n",
+				    hdr->addr2);
 		key->dot11RSNAStatsCCMPFormatErrors++;
 		return -2;
 	}
@@ -318,11 +316,8 @@
 		return -6;
 	}
 	if (!key->key_set) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "CCMP: received packet from %pM"
-			       " with keyid=%d that does not have a configured"
-			       " key\n", hdr->addr2, keyidx);
-		}
+		net_dbg_ratelimited("CCMP: received packet from %pM with keyid=%d that does not have a configured key\n",
+				    hdr->addr2, keyidx);
 		return -3;
 	}
 
@@ -336,15 +331,11 @@
 
 	if (ccmp_replay_check(pn, key->rx_pn)) {
 #ifdef CONFIG_LIB80211_DEBUG
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "CCMP: replay detected: STA=%pM "
-				 "previous PN %02x%02x%02x%02x%02x%02x "
-				 "received PN %02x%02x%02x%02x%02x%02x\n",
-				 hdr->addr2,
-				 key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
-				 key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
-				 pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
-		}
+		net_dbg_ratelimited("CCMP: replay detected: STA=%pM previous PN %02x%02x%02x%02x%02x%02x received PN %02x%02x%02x%02x%02x%02x\n",
+				    hdr->addr2,
+				    key->rx_pn[0], key->rx_pn[1], key->rx_pn[2],
+				    key->rx_pn[3], key->rx_pn[4], key->rx_pn[5],
+				    pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]);
 #endif
 		key->dot11RSNAStatsCCMPReplays++;
 		return -4;
@@ -370,10 +361,8 @@
 	}
 
 	if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "CCMP: decrypt failed: STA="
-			       "%pM\n", hdr->addr2);
-		}
+		net_dbg_ratelimited("CCMP: decrypt failed: STA=%pM\n",
+				    hdr->addr2);
 		key->dot11RSNAStatsCCMPDecryptErrors++;
 		return -5;
 	}
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 3873484..d475cfc 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -360,12 +360,9 @@
 	struct scatterlist sg;
 
 	if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
-		if (net_ratelimit()) {
-			struct ieee80211_hdr *hdr =
-			    (struct ieee80211_hdr *)skb->data;
-			printk(KERN_DEBUG ": TKIP countermeasures: dropped "
-			       "TX packet to %pM\n", hdr->addr1);
-		}
+		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+		net_dbg_ratelimited("TKIP countermeasures: dropped TX packet to %pM\n",
+				    hdr->addr1);
 		return -1;
 	}
 
@@ -420,10 +417,8 @@
 	hdr = (struct ieee80211_hdr *)skb->data;
 
 	if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG ": TKIP countermeasures: dropped "
-			       "received packet from %pM\n", hdr->addr2);
-		}
+		net_dbg_ratelimited("TKIP countermeasures: dropped received packet from %pM\n",
+				    hdr->addr2);
 		return -1;
 	}
 
@@ -433,10 +428,8 @@
 	pos = skb->data + hdr_len;
 	keyidx = pos[3];
 	if (!(keyidx & (1 << 5))) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "TKIP: received packet without ExtIV"
-			       " flag from %pM\n", hdr->addr2);
-		}
+		net_dbg_ratelimited("TKIP: received packet without ExtIV flag from %pM\n",
+				    hdr->addr2);
 		return -2;
 	}
 	keyidx >>= 6;
@@ -446,11 +439,8 @@
 		return -6;
 	}
 	if (!tkey->key_set) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "TKIP: received packet from %pM"
-			       " with keyid=%d that does not have a configured"
-			       " key\n", hdr->addr2, keyidx);
-		}
+		net_dbg_ratelimited("TKIP: received packet from %pM with keyid=%d that does not have a configured key\n",
+				    hdr->addr2, keyidx);
 		return -3;
 	}
 	iv16 = (pos[0] << 8) | pos[2];
@@ -459,12 +449,9 @@
 
 	if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
 #ifdef CONFIG_LIB80211_DEBUG
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
-			       " previous TSC %08x%04x received TSC "
-			       "%08x%04x\n", hdr->addr2,
-			       tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
-		}
+		net_dbg_ratelimited("TKIP: replay detected: STA=%pM previous TSC %08x%04x received TSC %08x%04x\n",
+				    hdr->addr2, tkey->rx_iv32, tkey->rx_iv16,
+				    iv32, iv16);
 #endif
 		tkey->dot11RSNAStatsTKIPReplays++;
 		return -4;
@@ -481,11 +468,8 @@
 	crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
 	sg_init_one(&sg, pos, plen + 4);
 	if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG ": TKIP: failed to decrypt "
-			       "received packet from %pM\n",
-			       hdr->addr2);
-		}
+		net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n",
+				    hdr->addr2);
 		return -7;
 	}
 
@@ -501,10 +485,8 @@
 			tkey->rx_phase1_done = 0;
 		}
 #ifdef CONFIG_LIB80211_DEBUG
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "TKIP: ICV error detected: STA="
-			       "%pM\n", hdr->addr2);
-		}
+		net_dbg_ratelimited("TKIP: ICV error detected: STA=%pM\n",
+				    hdr->addr2);
 #endif
 		tkey->dot11RSNAStatsTKIPICVErrors++;
 		return -5;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index ba21ab2..2749cb8 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -38,6 +38,7 @@
 
 #define MESH_MAX_PREQ_RETRIES	4
 
+#define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50
 
 const struct mesh_config default_mesh_config = {
 	.dot11MeshRetryTimeout = MESH_RET_T,
@@ -48,6 +49,7 @@
 	.element_ttl = MESH_DEFAULT_ELEMENT_TTL,
 	.auto_open_plinks = true,
 	.dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
+	.dot11MeshNbrOffsetMaxNeighbor = MESH_SYNC_NEIGHBOR_OFFSET_MAX,
 	.dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
 	.dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
 	.dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT,
@@ -59,9 +61,11 @@
 	.dot11MeshGateAnnouncementProtocol = false,
 	.dot11MeshForwarding = true,
 	.rssi_threshold = MESH_RSSI_THRESHOLD,
+	.ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED,
 };
 
 const struct mesh_setup default_mesh_setup = {
+	.sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
 	.path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
 	.path_metric = IEEE80211_PATH_METRIC_AIRTIME,
 	.ie = NULL,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f5a7ac3..eb90988 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -6,6 +6,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/etherdevice.h>
 #include <linux/netdevice.h>
 #include <linux/nl80211.h>
 #include <linux/slab.h>
@@ -100,7 +101,7 @@
 	ASSERT_WDEV_LOCK(wdev);
 
 	if (wdev->current_bss &&
-	    memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+	    ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
 		cfg80211_unhold_bss(wdev->current_bss);
 		cfg80211_put_bss(&wdev->current_bss->pub);
 		wdev->current_bss = NULL;
@@ -115,7 +116,7 @@
 
 		reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
 
-		from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
+		from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
 		__cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
 	} else if (wdev->sme_state == CFG80211_SME_CONNECTING) {
 		__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
@@ -154,7 +155,7 @@
 		return;
 
 	if (wdev->current_bss &&
-	    memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+	    ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
 		cfg80211_sme_disassoc(dev, wdev->current_bss);
 		cfg80211_unhold_bss(wdev->current_bss);
 		cfg80211_put_bss(&wdev->current_bss->pub);
@@ -165,7 +166,7 @@
 
 	reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
 
-	from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
+	from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
 	__cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
 }
 EXPORT_SYMBOL(__cfg80211_send_disassoc);
@@ -285,7 +286,7 @@
 			return -EINVAL;
 
 	if (wdev->current_bss &&
-	    memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0)
+	    ether_addr_equal(bssid, wdev->current_bss->pub.bssid))
 		return -EALREADY;
 
 	memset(&req, 0, sizeof(req));
@@ -362,7 +363,7 @@
 	memset(&req, 0, sizeof(req));
 
 	if (wdev->current_bss && prev_bssid &&
-	    memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) {
+	    ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) {
 		/*
 		 * Trying to reassociate: Allow this to proceed and let the old
 		 * association to be dropped when the new one is completed.
@@ -446,7 +447,7 @@
 
 	if (local_state_change) {
 		if (wdev->current_bss &&
-		    memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+		    ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
 			cfg80211_unhold_bss(wdev->current_bss);
 			cfg80211_put_bss(&wdev->current_bss->pub);
 			wdev->current_bss = NULL;
@@ -495,7 +496,7 @@
 	req.local_state_change = local_state_change;
 	req.ie = ie;
 	req.ie_len = ie_len;
-	if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0)
+	if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
 		req.bss = &wdev->current_bss->pub;
 	else
 		return -ENOTCONN;
@@ -758,8 +759,8 @@
 				break;
 			}
 
-			if (memcmp(wdev->current_bss->pub.bssid,
-				   mgmt->bssid, ETH_ALEN)) {
+			if (!ether_addr_equal(wdev->current_bss->pub.bssid,
+					      mgmt->bssid)) {
 				err = -ENOTCONN;
 				break;
 			}
@@ -772,8 +773,8 @@
 				break;
 
 			/* for station, check that DA is the AP */
-			if (memcmp(wdev->current_bss->pub.bssid,
-				   mgmt->da, ETH_ALEN)) {
+			if (!ether_addr_equal(wdev->current_bss->pub.bssid,
+					      mgmt->da)) {
 				err = -ENOTCONN;
 				break;
 			}
@@ -781,11 +782,11 @@
 		case NL80211_IFTYPE_AP:
 		case NL80211_IFTYPE_P2P_GO:
 		case NL80211_IFTYPE_AP_VLAN:
-			if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN))
+			if (!ether_addr_equal(mgmt->bssid, dev->dev_addr))
 				err = -EINVAL;
 			break;
 		case NL80211_IFTYPE_MESH_POINT:
-			if (memcmp(mgmt->sa, mgmt->bssid, ETH_ALEN)) {
+			if (!ether_addr_equal(mgmt->sa, mgmt->bssid)) {
 				err = -EINVAL;
 				break;
 			}
@@ -804,7 +805,7 @@
 			return err;
 	}
 
-	if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
+	if (!ether_addr_equal(mgmt->sa, dev->dev_addr))
 		return -EINVAL;
 
 	/* Transmit the Action frame as requested by user space */
@@ -928,6 +929,33 @@
 }
 EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
 
+void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
+			       enum nl80211_channel_type type)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct wiphy *wiphy = wdev->wiphy;
+	struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+	struct ieee80211_channel *chan;
+
+	wdev_lock(wdev);
+
+	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+		    wdev->iftype != NL80211_IFTYPE_P2P_GO))
+		goto out;
+
+	chan = rdev_freq_to_chan(rdev, freq, type);
+	if (WARN_ON(!chan))
+		goto out;
+
+	wdev->channel = chan;
+
+	nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
+out:
+	wdev_unlock(wdev);
+	return;
+}
+EXPORT_SYMBOL(cfg80211_ch_switch_notify);
+
 bool cfg80211_rx_spurious_frame(struct net_device *dev,
 				const u8 *addr, gfp_t gfp)
 {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f432c57..b67b111 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -356,20 +356,26 @@
 static int nl80211_msg_put_channel(struct sk_buff *msg,
 				   struct ieee80211_channel *chan)
 {
-	NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ,
-		    chan->center_freq);
+	if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
+			chan->center_freq))
+		goto nla_put_failure;
 
-	if (chan->flags & IEEE80211_CHAN_DISABLED)
-		NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED);
-	if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-		NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN);
-	if (chan->flags & IEEE80211_CHAN_NO_IBSS)
-		NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS);
-	if (chan->flags & IEEE80211_CHAN_RADAR)
-		NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR);
+	if ((chan->flags & IEEE80211_CHAN_DISABLED) &&
+	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED))
+		goto nla_put_failure;
+	if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN))
+		goto nla_put_failure;
+	if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
+	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
+		goto nla_put_failure;
+	if ((chan->flags & IEEE80211_CHAN_RADAR) &&
+	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
+		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
-		    DBM_TO_MBM(chan->max_power));
+	if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
+			DBM_TO_MBM(chan->max_power)))
+		goto nla_put_failure;
 
 	return 0;
 
@@ -621,8 +627,8 @@
 
 	i = 0;
 	while (ifmodes) {
-		if (ifmodes & 1)
-			NLA_PUT_FLAG(msg, i);
+		if ((ifmodes & 1) && nla_put_flag(msg, i))
+			goto nla_put_failure;
 		ifmodes >>= 1;
 		i++;
 	}
@@ -665,8 +671,9 @@
 			nl_limit = nla_nest_start(msg, j + 1);
 			if (!nl_limit)
 				goto nla_put_failure;
-			NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX,
-				    c->limits[j].max);
+			if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX,
+					c->limits[j].max))
+				goto nla_put_failure;
 			if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
 						c->limits[j].types))
 				goto nla_put_failure;
@@ -675,13 +682,14 @@
 
 		nla_nest_end(msg, nl_limits);
 
-		if (c->beacon_int_infra_match)
-			NLA_PUT_FLAG(msg,
-				NL80211_IFACE_COMB_STA_AP_BI_MATCH);
-		NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
-			    c->num_different_channels);
-		NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM,
-			    c->max_interfaces);
+		if (c->beacon_int_infra_match &&
+		    nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
+			goto nla_put_failure;
+		if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
+				c->num_different_channels) ||
+		    nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
+				c->max_interfaces))
+			goto nla_put_failure;
 
 		nla_nest_end(msg, nl_combi);
 	}
@@ -712,64 +720,74 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx);
-	NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
+	    nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) ||
+	    nla_put_u32(msg, NL80211_ATTR_GENERATION,
+			cfg80211_rdev_list_generation) ||
+	    nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
+		       dev->wiphy.retry_short) ||
+	    nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
+		       dev->wiphy.retry_long) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
+			dev->wiphy.frag_threshold) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
+			dev->wiphy.rts_threshold) ||
+	    nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
+		       dev->wiphy.coverage_class) ||
+	    nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
+		       dev->wiphy.max_scan_ssids) ||
+	    nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
+		       dev->wiphy.max_sched_scan_ssids) ||
+	    nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
+			dev->wiphy.max_scan_ie_len) ||
+	    nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
+			dev->wiphy.max_sched_scan_ie_len) ||
+	    nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
+		       dev->wiphy.max_match_sets))
+		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
-		    cfg80211_rdev_list_generation);
+	if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
+	    nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
+	    nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+	    nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
+	    nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+	    nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
+		goto nla_put_failure;
+	if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
+	    nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
+		goto nla_put_failure;
 
-	NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
-		   dev->wiphy.retry_short);
-	NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
-		   dev->wiphy.retry_long);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
-		    dev->wiphy.frag_threshold);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
-		    dev->wiphy.rts_threshold);
-	NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
-		    dev->wiphy.coverage_class);
-	NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
-		   dev->wiphy.max_scan_ssids);
-	NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
-		   dev->wiphy.max_sched_scan_ssids);
-	NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
-		    dev->wiphy.max_scan_ie_len);
-	NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
-		    dev->wiphy.max_sched_scan_ie_len);
-	NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
-		   dev->wiphy.max_match_sets);
+	if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
+		    sizeof(u32) * dev->wiphy.n_cipher_suites,
+		    dev->wiphy.cipher_suites))
+		goto nla_put_failure;
 
-	if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
-	if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
-	if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD);
-	if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT);
-	if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT);
-	if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP);
+	if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
+		       dev->wiphy.max_num_pmkids))
+		goto nla_put_failure;
 
-	NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
-		sizeof(u32) * dev->wiphy.n_cipher_suites,
-		dev->wiphy.cipher_suites);
+	if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
+	    nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
+		goto nla_put_failure;
 
-	NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
-		   dev->wiphy.max_num_pmkids);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
+			dev->wiphy.available_antennas_tx) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
+			dev->wiphy.available_antennas_rx))
+		goto nla_put_failure;
 
-	if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
-
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
-		    dev->wiphy.available_antennas_tx);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
-		    dev->wiphy.available_antennas_rx);
-
-	if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
-		NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
-			    dev->wiphy.probe_resp_offload);
+	if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
+	    nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
+			dev->wiphy.probe_resp_offload))
+		goto nla_put_failure;
 
 	if ((dev->wiphy.available_antennas_tx ||
 	     dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
@@ -777,8 +795,11 @@
 		int res;
 		res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
 		if (!res) {
-			NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant);
-			NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant);
+			if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX,
+					tx_ant) ||
+			    nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX,
+					rx_ant))
+				goto nla_put_failure;
 		}
 	}
 
@@ -799,17 +820,17 @@
 			goto nla_put_failure;
 
 		/* add HT info */
-		if (dev->wiphy.bands[band]->ht_cap.ht_supported) {
-			NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET,
-				sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
-				&dev->wiphy.bands[band]->ht_cap.mcs);
-			NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA,
-				dev->wiphy.bands[band]->ht_cap.cap);
-			NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
-				dev->wiphy.bands[band]->ht_cap.ampdu_factor);
-			NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
-				dev->wiphy.bands[band]->ht_cap.ampdu_density);
-		}
+		if (dev->wiphy.bands[band]->ht_cap.ht_supported &&
+		    (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET,
+			     sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
+			     &dev->wiphy.bands[band]->ht_cap.mcs) ||
+		     nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA,
+				 dev->wiphy.bands[band]->ht_cap.cap) ||
+		     nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
+				dev->wiphy.bands[band]->ht_cap.ampdu_factor) ||
+		     nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
+				dev->wiphy.bands[band]->ht_cap.ampdu_density)))
+			goto nla_put_failure;
 
 		/* add frequencies */
 		nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
@@ -842,11 +863,13 @@
 				goto nla_put_failure;
 
 			rate = &dev->wiphy.bands[band]->bitrates[i];
-			NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE,
-				    rate->bitrate);
-			if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)
-				NLA_PUT_FLAG(msg,
-					NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE);
+			if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE,
+					rate->bitrate))
+				goto nla_put_failure;
+			if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
+			    nla_put_flag(msg,
+					 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
+				goto nla_put_failure;
 
 			nla_nest_end(msg, nl_rate);
 		}
@@ -866,7 +889,8 @@
 	 do {							\
 		if (dev->ops->op) {				\
 			i++;					\
-			NLA_PUT_U32(msg, i, NL80211_CMD_ ## n);	\
+			if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
+				goto nla_put_failure;		\
 		}						\
 	} while (0)
 
@@ -894,7 +918,8 @@
 	CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
 	if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
 		i++;
-		NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
+		if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
+			goto nla_put_failure;
 	}
 	CMD(set_channel, SET_CHANNEL);
 	CMD(set_wds_peer, SET_WDS_PEER);
@@ -908,7 +933,8 @@
 	CMD(set_noack_map, SET_NOACK_MAP);
 	if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
 		i++;
-		NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
+		if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
+			goto nla_put_failure;
 	}
 
 #ifdef CONFIG_NL80211_TESTMODE
@@ -919,23 +945,27 @@
 
 	if (dev->ops->connect || dev->ops->auth) {
 		i++;
-		NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT);
+		if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
+			goto nla_put_failure;
 	}
 
 	if (dev->ops->disconnect || dev->ops->deauth) {
 		i++;
-		NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT);
+		if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
+			goto nla_put_failure;
 	}
 
 	nla_nest_end(msg, nl_cmds);
 
 	if (dev->ops->remain_on_channel &&
-	    dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
-		NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
-			    dev->wiphy.max_remain_on_channel_duration);
+	    (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
+	    nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
+			dev->wiphy.max_remain_on_channel_duration))
+		goto nla_put_failure;
 
-	if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
+	if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
+	    nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
+		goto nla_put_failure;
 
 	if (mgmt_stypes) {
 		u16 stypes;
@@ -953,9 +983,10 @@
 			i = 0;
 			stypes = mgmt_stypes[ift].tx;
 			while (stypes) {
-				if (stypes & 1)
-					NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
-						    (i << 4) | IEEE80211_FTYPE_MGMT);
+				if ((stypes & 1) &&
+				    nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
+						(i << 4) | IEEE80211_FTYPE_MGMT))
+					goto nla_put_failure;
 				stypes >>= 1;
 				i++;
 			}
@@ -975,9 +1006,10 @@
 			i = 0;
 			stypes = mgmt_stypes[ift].rx;
 			while (stypes) {
-				if (stypes & 1)
-					NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
-						    (i << 4) | IEEE80211_FTYPE_MGMT);
+				if ((stypes & 1) &&
+				    nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
+						(i << 4) | IEEE80211_FTYPE_MGMT))
+					goto nla_put_failure;
 				stypes >>= 1;
 				i++;
 			}
@@ -994,22 +1026,23 @@
 		if (!nl_wowlan)
 			goto nla_put_failure;
 
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE);
-		if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE);
+		if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
+		    ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
+		    goto nla_put_failure;
 		if (dev->wiphy.wowlan.n_patterns) {
 			struct nl80211_wowlan_pattern_support pat = {
 				.max_patterns = dev->wiphy.wowlan.n_patterns,
@@ -1018,8 +1051,9 @@
 				.max_pattern_len =
 					dev->wiphy.wowlan.pattern_max_len,
 			};
-			NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
-				sizeof(pat), &pat);
+			if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
+				    sizeof(pat), &pat))
+				goto nla_put_failure;
 		}
 
 		nla_nest_end(msg, nl_wowlan);
@@ -1032,16 +1066,20 @@
 	if (nl80211_put_iface_combinations(&dev->wiphy, msg))
 		goto nla_put_failure;
 
-	if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
-		NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
-			    dev->wiphy.ap_sme_capa);
+	if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
+	    nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
+			dev->wiphy.ap_sme_capa))
+		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
+	if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS,
+			dev->wiphy.features))
+		goto nla_put_failure;
 
-	if (dev->wiphy.ht_capa_mod_mask)
-		NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
-			sizeof(*dev->wiphy.ht_capa_mod_mask),
-			dev->wiphy.ht_capa_mod_mask);
+	if (dev->wiphy.ht_capa_mod_mask &&
+	    nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
+		    sizeof(*dev->wiphy.ht_capa_mod_mask),
+		    dev->wiphy.ht_capa_mod_mask))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -1104,17 +1142,20 @@
 static int parse_txq_params(struct nlattr *tb[],
 			    struct ieee80211_txq_params *txq_params)
 {
-	if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] ||
+	if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] ||
 	    !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] ||
 	    !tb[NL80211_TXQ_ATTR_AIFS])
 		return -EINVAL;
 
-	txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]);
+	txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
 	txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]);
 	txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]);
 	txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]);
 	txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]);
 
+	if (txq_params->ac >= NL80211_NUM_ACS)
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -1489,14 +1530,28 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFTYPE,
+			dev->ieee80211_ptr->iftype) ||
+	    nla_put_u32(msg, NL80211_ATTR_GENERATION,
+			rdev->devlist_generation ^
+			(cfg80211_rdev_list_generation << 2)))
+		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
-		    rdev->devlist_generation ^
-			(cfg80211_rdev_list_generation << 2));
+	if (rdev->ops->get_channel) {
+		struct ieee80211_channel *chan;
+		enum nl80211_channel_type channel_type;
+
+		chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type);
+		if (chan &&
+		    (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
+				    chan->center_freq) ||
+		     nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
+				    channel_type)))
+			goto nla_put_failure;
+	}
 
 	return genlmsg_end(msg, hdr);
 
@@ -1794,35 +1849,34 @@
 	struct nlattr *key;
 	struct get_key_cookie *cookie = c;
 
-	if (params->key)
-		NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA,
-			params->key_len, params->key);
-
-	if (params->seq)
-		NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ,
-			params->seq_len, params->seq);
-
-	if (params->cipher)
-		NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
-			    params->cipher);
+	if ((params->key &&
+	     nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
+		     params->key_len, params->key)) ||
+	    (params->seq &&
+	     nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
+		     params->seq_len, params->seq)) ||
+	    (params->cipher &&
+	     nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
+			 params->cipher)))
+		goto nla_put_failure;
 
 	key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY);
 	if (!key)
 		goto nla_put_failure;
 
-	if (params->key)
-		NLA_PUT(cookie->msg, NL80211_KEY_DATA,
-			params->key_len, params->key);
+	if ((params->key &&
+	     nla_put(cookie->msg, NL80211_KEY_DATA,
+		     params->key_len, params->key)) ||
+	    (params->seq &&
+	     nla_put(cookie->msg, NL80211_KEY_SEQ,
+		     params->seq_len, params->seq)) ||
+	    (params->cipher &&
+	     nla_put_u32(cookie->msg, NL80211_KEY_CIPHER,
+			 params->cipher)))
+		goto nla_put_failure;
 
-	if (params->seq)
-		NLA_PUT(cookie->msg, NL80211_KEY_SEQ,
-			params->seq_len, params->seq);
-
-	if (params->cipher)
-		NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER,
-			    params->cipher);
-
-	NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx);
+	if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
+		goto nla_put_failure;
 
 	nla_nest_end(cookie->msg, key);
 
@@ -1880,10 +1934,12 @@
 	cookie.msg = msg;
 	cookie.idx = key_idx;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx);
-	if (mac_addr)
-		NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx))
+		goto nla_put_failure;
+	if (mac_addr &&
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+		goto nla_put_failure;
 
 	if (pairwise && mac_addr &&
 	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -2373,15 +2429,15 @@
 
 	/* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
 	bitrate = cfg80211_calculate_bitrate(info);
-	if (bitrate > 0)
-		NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
-
-	if (info->flags & RATE_INFO_FLAGS_MCS)
-		NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs);
-	if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH)
-		NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH);
-	if (info->flags & RATE_INFO_FLAGS_SHORT_GI)
-		NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI);
+	if ((bitrate > 0 &&
+	     nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) ||
+	    ((info->flags & RATE_INFO_FLAGS_MCS) &&
+	     nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
+	    ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
+	     nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) ||
+	    ((info->flags & RATE_INFO_FLAGS_SHORT_GI) &&
+	     nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, rate);
 	return true;
@@ -2403,43 +2459,50 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
-
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
+	    nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation))
+		goto nla_put_failure;
 
 	sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
 	if (!sinfoattr)
 		goto nla_put_failure;
-	if (sinfo->filled & STATION_INFO_CONNECTED_TIME)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME,
-			    sinfo->connected_time);
-	if (sinfo->filled & STATION_INFO_INACTIVE_TIME)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME,
-			    sinfo->inactive_time);
-	if (sinfo->filled & STATION_INFO_RX_BYTES)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES,
-			    sinfo->rx_bytes);
-	if (sinfo->filled & STATION_INFO_TX_BYTES)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES,
-			    sinfo->tx_bytes);
-	if (sinfo->filled & STATION_INFO_LLID)
-		NLA_PUT_U16(msg, NL80211_STA_INFO_LLID,
-			    sinfo->llid);
-	if (sinfo->filled & STATION_INFO_PLID)
-		NLA_PUT_U16(msg, NL80211_STA_INFO_PLID,
-			    sinfo->plid);
-	if (sinfo->filled & STATION_INFO_PLINK_STATE)
-		NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE,
-			    sinfo->plink_state);
+	if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME,
+			sinfo->connected_time))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
+			sinfo->inactive_time))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_RX_BYTES) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
+			sinfo->rx_bytes))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_TX_BYTES) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
+			sinfo->tx_bytes))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_LLID) &&
+	    nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_PLID) &&
+	    nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_PLINK_STATE) &&
+	    nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE,
+		       sinfo->plink_state))
+		goto nla_put_failure;
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
-		if (sinfo->filled & STATION_INFO_SIGNAL)
-			NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
-				   sinfo->signal);
-		if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
-			NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
-				   sinfo->signal_avg);
+		if ((sinfo->filled & STATION_INFO_SIGNAL) &&
+		    nla_put_u8(msg, NL80211_STA_INFO_SIGNAL,
+			       sinfo->signal))
+			goto nla_put_failure;
+		if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) &&
+		    nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG,
+			       sinfo->signal_avg))
+			goto nla_put_failure;
 		break;
 	default:
 		break;
@@ -2454,49 +2517,60 @@
 					  NL80211_STA_INFO_RX_BITRATE))
 			goto nla_put_failure;
 	}
-	if (sinfo->filled & STATION_INFO_RX_PACKETS)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS,
-			    sinfo->rx_packets);
-	if (sinfo->filled & STATION_INFO_TX_PACKETS)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS,
-			    sinfo->tx_packets);
-	if (sinfo->filled & STATION_INFO_TX_RETRIES)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES,
-			    sinfo->tx_retries);
-	if (sinfo->filled & STATION_INFO_TX_FAILED)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
-			    sinfo->tx_failed);
-	if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT)
-		NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS,
-			    sinfo->beacon_loss_count);
+	if ((sinfo->filled & STATION_INFO_RX_PACKETS) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS,
+			sinfo->rx_packets))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_TX_PACKETS) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS,
+			sinfo->tx_packets))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_TX_RETRIES) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES,
+			sinfo->tx_retries))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_TX_FAILED) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
+			sinfo->tx_failed))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
+	    nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
+			sinfo->beacon_loss_count))
+		goto nla_put_failure;
 	if (sinfo->filled & STATION_INFO_BSS_PARAM) {
 		bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
 		if (!bss_param)
 			goto nla_put_failure;
 
-		if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT)
-			NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT);
-		if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE)
-			NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE);
-		if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME)
-			NLA_PUT_FLAG(msg,
-				     NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME);
-		NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
-			   sinfo->bss_param.dtim_period);
-		NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
-			    sinfo->bss_param.beacon_interval);
+		if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) &&
+		     nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) ||
+		    ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) &&
+		     nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) ||
+		    ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) &&
+		     nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) ||
+		    nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
+			       sinfo->bss_param.dtim_period) ||
+		    nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
+				sinfo->bss_param.beacon_interval))
+			goto nla_put_failure;
 
 		nla_nest_end(msg, bss_param);
 	}
-	if (sinfo->filled & STATION_INFO_STA_FLAGS)
-		NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS,
-			sizeof(struct nl80211_sta_flag_update),
-			&sinfo->sta_flags);
+	if ((sinfo->filled & STATION_INFO_STA_FLAGS) &&
+	    nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
+		    sizeof(struct nl80211_sta_flag_update),
+		    &sinfo->sta_flags))
+		goto nla_put_failure;
+	if ((sinfo->filled & STATION_INFO_T_OFFSET) &&
+		nla_put_u64(msg, NL80211_STA_INFO_T_OFFSET,
+			    sinfo->t_offset))
+		goto nla_put_failure;
 	nla_nest_end(msg, sinfoattr);
 
-	if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
-		NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
-			sinfo->assoc_req_ies);
+	if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) &&
+	    nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
+		    sinfo->assoc_req_ies))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -2918,36 +2992,37 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst);
-	NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop);
-
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) ||
+	    nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) ||
+	    nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation))
+		goto nla_put_failure;
 
 	pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
 	if (!pinfoattr)
 		goto nla_put_failure;
-	if (pinfo->filled & MPATH_INFO_FRAME_QLEN)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
-			    pinfo->frame_qlen);
-	if (pinfo->filled & MPATH_INFO_SN)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN,
-			    pinfo->sn);
-	if (pinfo->filled & MPATH_INFO_METRIC)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC,
-			    pinfo->metric);
-	if (pinfo->filled & MPATH_INFO_EXPTIME)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME,
-			    pinfo->exptime);
-	if (pinfo->filled & MPATH_INFO_FLAGS)
-		NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS,
-			    pinfo->flags);
-	if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT)
-		NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
-			    pinfo->discovery_timeout);
-	if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES)
-		NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
-			    pinfo->discovery_retries);
+	if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) &&
+	    nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
+			pinfo->frame_qlen))
+		goto nla_put_failure;
+	if (((pinfo->filled & MPATH_INFO_SN) &&
+	     nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) ||
+	    ((pinfo->filled & MPATH_INFO_METRIC) &&
+	     nla_put_u32(msg, NL80211_MPATH_INFO_METRIC,
+			 pinfo->metric)) ||
+	    ((pinfo->filled & MPATH_INFO_EXPTIME) &&
+	     nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME,
+			 pinfo->exptime)) ||
+	    ((pinfo->filled & MPATH_INFO_FLAGS) &&
+	     nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS,
+			pinfo->flags)) ||
+	    ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) &&
+	     nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
+			 pinfo->discovery_timeout)) ||
+	    ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) &&
+	     nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
+			pinfo->discovery_retries)))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, pinfoattr);
 
@@ -3273,47 +3348,52 @@
 	pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
 	if (!pinfoattr)
 		goto nla_put_failure;
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
-			cur_params.dot11MeshRetryTimeout);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
-			cur_params.dot11MeshConfirmTimeout);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
-			cur_params.dot11MeshHoldingTimeout);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
-			cur_params.dot11MeshMaxPeerLinks);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES,
-			cur_params.dot11MeshMaxRetries);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_TTL,
-			cur_params.dot11MeshTTL);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL,
-			cur_params.element_ttl);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
-			cur_params.auto_open_plinks);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
-			cur_params.dot11MeshHWMPmaxPREQretries);
-	NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
-			cur_params.path_refresh_time);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
-			cur_params.min_discovery_timeout);
-	NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
-			cur_params.dot11MeshHWMPactivePathTimeout);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
-			cur_params.dot11MeshHWMPpreqMinInterval);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
-			cur_params.dot11MeshHWMPperrMinInterval);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
-			cur_params.dot11MeshHWMPnetDiameterTraversalTime);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
-			cur_params.dot11MeshHWMPRootMode);
-	NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
-			cur_params.dot11MeshHWMPRannInterval);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
-			cur_params.dot11MeshGateAnnouncementProtocol);
-	NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING,
-			cur_params.dot11MeshForwarding);
-	NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
-			cur_params.rssi_threshold);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
+			cur_params.dot11MeshRetryTimeout) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
+			cur_params.dot11MeshConfirmTimeout) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
+			cur_params.dot11MeshHoldingTimeout) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
+			cur_params.dot11MeshMaxPeerLinks) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES,
+		       cur_params.dot11MeshMaxRetries) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_TTL,
+		       cur_params.dot11MeshTTL) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL,
+		       cur_params.element_ttl) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+		       cur_params.auto_open_plinks) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
+			cur_params.dot11MeshNbrOffsetMaxNeighbor) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+		       cur_params.dot11MeshHWMPmaxPREQretries) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
+			cur_params.path_refresh_time) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+			cur_params.min_discovery_timeout) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
+			cur_params.dot11MeshHWMPactivePathTimeout) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
+			cur_params.dot11MeshHWMPpreqMinInterval) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+			cur_params.dot11MeshHWMPperrMinInterval) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
+			cur_params.dot11MeshHWMPnetDiameterTraversalTime) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
+		       cur_params.dot11MeshHWMPRootMode) ||
+	    nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+			cur_params.dot11MeshHWMPRannInterval) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+		       cur_params.dot11MeshGateAnnouncementProtocol) ||
+	    nla_put_u8(msg, NL80211_MESHCONF_FORWARDING,
+		       cur_params.dot11MeshForwarding) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
+			cur_params.rssi_threshold) ||
+	    nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE,
+			cur_params.ht_opmode))
+		goto nla_put_failure;
 	nla_nest_end(msg, pinfoattr);
 	genlmsg_end(msg, hdr);
 	return genlmsg_reply(msg, info);
@@ -3334,6 +3414,7 @@
 	[NL80211_MESHCONF_TTL] = { .type = NLA_U8 },
 	[NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
 	[NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
+	[NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
 
 	[NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
 	[NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
@@ -3347,10 +3428,12 @@
 	[NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
 	[NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
 	[NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32},
+	[NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16},
 };
 
 static const struct nla_policy
 	nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = {
+	[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC] = { .type = NLA_U8 },
 	[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
 	[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
 	[NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
@@ -3403,6 +3486,9 @@
 			mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
 			mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8);
+	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
+			mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
+			nla_get_u32);
 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
 			mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
 			nla_get_u8);
@@ -3440,6 +3526,8 @@
 			mask, NL80211_MESHCONF_FORWARDING, nla_get_u8);
 	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
 			mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32);
+	FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode,
+			mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16);
 	if (mask_out)
 		*mask_out = mask;
 
@@ -3460,6 +3548,12 @@
 			     nl80211_mesh_setup_params_policy))
 		return -EINVAL;
 
+	if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])
+		setup->sync_method =
+		(nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ?
+		 IEEE80211_SYNC_METHOD_VENDOR :
+		 IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET;
+
 	if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])
 		setup->path_sel_proto =
 		(nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ?
@@ -3544,11 +3638,12 @@
 	if (!hdr)
 		goto put_failure;
 
-	NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
-		cfg80211_regdomain->alpha2);
-	if (cfg80211_regdomain->dfs_region)
-		NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
-			   cfg80211_regdomain->dfs_region);
+	if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
+			   cfg80211_regdomain->alpha2) ||
+	    (cfg80211_regdomain->dfs_region &&
+	     nla_put_u8(msg, NL80211_ATTR_DFS_REGION,
+			cfg80211_regdomain->dfs_region)))
+		goto nla_put_failure;
 
 	nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
 	if (!nl_reg_rules)
@@ -3568,18 +3663,19 @@
 		if (!nl_reg_rule)
 			goto nla_put_failure;
 
-		NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS,
-			reg_rule->flags);
-		NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START,
-			freq_range->start_freq_khz);
-		NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END,
-			freq_range->end_freq_khz);
-		NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
-			freq_range->max_bandwidth_khz);
-		NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
-			power_rule->max_antenna_gain);
-		NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
-			power_rule->max_eirp);
+		if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
+				reg_rule->flags) ||
+		    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
+				freq_range->start_freq_khz) ||
+		    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
+				freq_range->end_freq_khz) ||
+		    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
+				freq_range->max_bandwidth_khz) ||
+		    nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
+				power_rule->max_antenna_gain) ||
+		    nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
+				power_rule->max_eirp))
+			goto nla_put_failure;
 
 		nla_nest_end(msg, nl_reg_rule);
 	}
@@ -4150,37 +4246,44 @@
 
 	genl_dump_check_consistent(cb, hdr, &nl80211_fam);
 
-	NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
+		goto nla_put_failure;
 
 	bss = nla_nest_start(msg, NL80211_ATTR_BSS);
 	if (!bss)
 		goto nla_put_failure;
-	if (!is_zero_ether_addr(res->bssid))
-		NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid);
-	if (res->information_elements && res->len_information_elements)
-		NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
-			res->len_information_elements,
-			res->information_elements);
-	if (res->beacon_ies && res->len_beacon_ies &&
-	    res->beacon_ies != res->information_elements)
-		NLA_PUT(msg, NL80211_BSS_BEACON_IES,
-			res->len_beacon_ies, res->beacon_ies);
-	if (res->tsf)
-		NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
-	if (res->beacon_interval)
-		NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval);
-	NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability);
-	NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq);
-	NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO,
-		jiffies_to_msecs(jiffies - intbss->ts));
+	if ((!is_zero_ether_addr(res->bssid) &&
+	     nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) ||
+	    (res->information_elements && res->len_information_elements &&
+	     nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
+		     res->len_information_elements,
+		     res->information_elements)) ||
+	    (res->beacon_ies && res->len_beacon_ies &&
+	     res->beacon_ies != res->information_elements &&
+	     nla_put(msg, NL80211_BSS_BEACON_IES,
+		     res->len_beacon_ies, res->beacon_ies)))
+		goto nla_put_failure;
+	if (res->tsf &&
+	    nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
+		goto nla_put_failure;
+	if (res->beacon_interval &&
+	    nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
+		goto nla_put_failure;
+	if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
+	    nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
+	    nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
+			jiffies_to_msecs(jiffies - intbss->ts)))
+		goto nla_put_failure;
 
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
-		NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal);
+		if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
+			goto nla_put_failure;
 		break;
 	case CFG80211_SIGNAL_TYPE_UNSPEC:
-		NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal);
+		if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
+			goto nla_put_failure;
 		break;
 	default:
 		break;
@@ -4189,14 +4292,16 @@
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_P2P_CLIENT:
 	case NL80211_IFTYPE_STATION:
-		if (intbss == wdev->current_bss)
-			NLA_PUT_U32(msg, NL80211_BSS_STATUS,
-				    NL80211_BSS_STATUS_ASSOCIATED);
+		if (intbss == wdev->current_bss &&
+		    nla_put_u32(msg, NL80211_BSS_STATUS,
+				NL80211_BSS_STATUS_ASSOCIATED))
+			goto nla_put_failure;
 		break;
 	case NL80211_IFTYPE_ADHOC:
-		if (intbss == wdev->current_bss)
-			NLA_PUT_U32(msg, NL80211_BSS_STATUS,
-				    NL80211_BSS_STATUS_IBSS_JOINED);
+		if (intbss == wdev->current_bss &&
+		    nla_put_u32(msg, NL80211_BSS_STATUS,
+				NL80211_BSS_STATUS_IBSS_JOINED))
+			goto nla_put_failure;
 		break;
 	default:
 		break;
@@ -4265,34 +4370,43 @@
 	if (!hdr)
 		return -ENOMEM;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
 
 	infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO);
 	if (!infoattr)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY,
-		    survey->channel->center_freq);
-	if (survey->filled & SURVEY_INFO_NOISE_DBM)
-		NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE,
-			    survey->noise);
-	if (survey->filled & SURVEY_INFO_IN_USE)
-		NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
-			    survey->channel_time);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
-			    survey->channel_time_busy);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
-			    survey->channel_time_ext_busy);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
-			    survey->channel_time_rx);
-	if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX)
-		NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
-			    survey->channel_time_tx);
+	if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY,
+			survey->channel->center_freq))
+		goto nla_put_failure;
+
+	if ((survey->filled & SURVEY_INFO_NOISE_DBM) &&
+	    nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_IN_USE) &&
+	    nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
+			survey->channel_time))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
+			survey->channel_time_busy))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
+			survey->channel_time_ext_busy))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
+			survey->channel_time_rx))
+		goto nla_put_failure;
+	if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) &&
+	    nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
+			survey->channel_time_tx))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, infoattr);
 
@@ -4973,7 +5087,7 @@
 					   NL80211_CMD_TESTMODE);
 		struct nlattr *tmdata;
 
-		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
+		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
 			genlmsg_cancel(skb, hdr);
 			break;
 		}
@@ -5024,7 +5138,8 @@
 		return NULL;
 	}
 
-	NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+	if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
+		goto nla_put_failure;
 	data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
 
 	((void **)skb->cb)[0] = rdev;
@@ -5403,7 +5518,8 @@
 	if (err)
 		goto free_msg;
 
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+	if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -5545,6 +5661,9 @@
 				sband,
 				nla_data(tb[NL80211_TXRATE_LEGACY]),
 				nla_len(tb[NL80211_TXRATE_LEGACY]));
+			if ((mask.control[band].legacy == 0) &&
+			    nla_len(tb[NL80211_TXRATE_LEGACY]))
+				return -EINVAL;
 		}
 		if (tb[NL80211_TXRATE_MCS]) {
 			if (!ht_rateset_to_mask(
@@ -5690,7 +5809,8 @@
 		goto free_msg;
 
 	if (msg) {
-		NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+		if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+			goto nla_put_failure;
 
 		genlmsg_end(msg, hdr);
 		return genlmsg_reply(msg, info);
@@ -5795,7 +5915,8 @@
 	else
 		ps_state = NL80211_PS_DISABLED;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state);
+	if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 	return genlmsg_reply(msg, info);
@@ -5942,20 +6063,21 @@
 		if (!nl_wowlan)
 			goto nla_put_failure;
 
-		if (rdev->wowlan->any)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
-		if (rdev->wowlan->disconnect)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
-		if (rdev->wowlan->magic_pkt)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
-		if (rdev->wowlan->gtk_rekey_failure)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE);
-		if (rdev->wowlan->eap_identity_req)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST);
-		if (rdev->wowlan->four_way_handshake)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE);
-		if (rdev->wowlan->rfkill_release)
-			NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE);
+		if ((rdev->wowlan->any &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
+		    (rdev->wowlan->disconnect &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
+		    (rdev->wowlan->magic_pkt &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
+		    (rdev->wowlan->gtk_rekey_failure &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
+		    (rdev->wowlan->eap_identity_req &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
+		    (rdev->wowlan->four_way_handshake &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
+		    (rdev->wowlan->rfkill_release &&
+		     nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
+			goto nla_put_failure;
 		if (rdev->wowlan->n_patterns) {
 			struct nlattr *nl_pats, *nl_pat;
 			int i, pat_len;
@@ -5970,12 +6092,13 @@
 				if (!nl_pat)
 					goto nla_put_failure;
 				pat_len = rdev->wowlan->patterns[i].pattern_len;
-				NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK,
-					DIV_ROUND_UP(pat_len, 8),
-					rdev->wowlan->patterns[i].mask);
-				NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
-					pat_len,
-					rdev->wowlan->patterns[i].pattern);
+				if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
+					    DIV_ROUND_UP(pat_len, 8),
+					    rdev->wowlan->patterns[i].mask) ||
+				    nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
+					    pat_len,
+					    rdev->wowlan->patterns[i].pattern))
+					goto nla_put_failure;
 				nla_nest_end(msg, nl_pat);
 			}
 			nla_nest_end(msg, nl_pats);
@@ -6000,6 +6123,7 @@
 	struct cfg80211_wowlan new_triggers = {};
 	struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
 	int err, i;
+	bool prev_enabled = rdev->wowlan;
 
 	if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
 		return -EOPNOTSUPP;
@@ -6132,6 +6256,9 @@
 		rdev->wowlan = NULL;
 	}
 
+	if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
+		rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan);
+
 	return 0;
  error:
 	for (i = 0; i < new_triggers.n_patterns; i++)
@@ -6248,7 +6375,8 @@
 	if (err)
 		goto free_msg;
 
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+	if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -6916,19 +7044,24 @@
 	nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS);
 	if (!nest)
 		goto nla_put_failure;
-	for (i = 0; i < req->n_ssids; i++)
-		NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid);
+	for (i = 0; i < req->n_ssids; i++) {
+		if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid))
+			goto nla_put_failure;
+	}
 	nla_nest_end(msg, nest);
 
 	nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
 	if (!nest)
 		goto nla_put_failure;
-	for (i = 0; i < req->n_channels; i++)
-		NLA_PUT_U32(msg, i, req->channels[i]->center_freq);
+	for (i = 0; i < req->n_channels; i++) {
+		if (nla_put_u32(msg, i, req->channels[i]->center_freq))
+			goto nla_put_failure;
+	}
 	nla_nest_end(msg, nest);
 
-	if (req->ie)
-		NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie);
+	if (req->ie &&
+	    nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
+		goto nla_put_failure;
 
 	return 0;
  nla_put_failure:
@@ -6947,8 +7080,9 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
 
 	/* ignore errors and send incomplete event anyway */
 	nl80211_add_scan_req(msg, rdev);
@@ -6972,8 +7106,9 @@
 	if (!hdr)
 		return -1;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
 
 	return genlmsg_end(msg, hdr);
 
@@ -7096,26 +7231,33 @@
 	}
 
 	/* Userspace can always count this one always being set */
-	NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator);
+	if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator))
+		goto nla_put_failure;
 
-	if (request->alpha2[0] == '0' && request->alpha2[1] == '0')
-		NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-			   NL80211_REGDOM_TYPE_WORLD);
-	else if (request->alpha2[0] == '9' && request->alpha2[1] == '9')
-		NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-			   NL80211_REGDOM_TYPE_CUSTOM_WORLD);
-	else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
-		 request->intersect)
-		NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-			   NL80211_REGDOM_TYPE_INTERSECTION);
-	else {
-		NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE,
-			   NL80211_REGDOM_TYPE_COUNTRY);
-		NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2);
+	if (request->alpha2[0] == '0' && request->alpha2[1] == '0') {
+		if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+			       NL80211_REGDOM_TYPE_WORLD))
+			goto nla_put_failure;
+	} else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') {
+		if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+			       NL80211_REGDOM_TYPE_CUSTOM_WORLD))
+			goto nla_put_failure;
+	} else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
+		   request->intersect) {
+		if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+			       NL80211_REGDOM_TYPE_INTERSECTION))
+			goto nla_put_failure;
+	} else {
+		if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
+			       NL80211_REGDOM_TYPE_COUNTRY) ||
+		    nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
+				   request->alpha2))
+			goto nla_put_failure;
 	}
 
-	if (wiphy_idx_valid(request->wiphy_idx))
-		NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
+	if (wiphy_idx_valid(request->wiphy_idx) &&
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7149,9 +7291,10 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7229,10 +7372,11 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7280,15 +7424,15 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	if (bssid)
-		NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
-	NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status);
-	if (req_ie)
-		NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
-	if (resp_ie)
-		NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
+	    nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
+	    (req_ie &&
+	     nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
+	    (resp_ie &&
+	     nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7320,13 +7464,14 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
-	if (req_ie)
-		NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
-	if (resp_ie)
-		NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
+	    (req_ie &&
+	     nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
+	    (resp_ie &&
+	     nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7357,14 +7502,14 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	if (from_ap && reason)
-		NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason);
-	if (from_ap)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP);
-	if (ie)
-		NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    (from_ap && reason &&
+	     nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
+	    (from_ap &&
+	     nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
+	    (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7395,9 +7540,10 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7428,11 +7574,12 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr);
-	if (ie_len && ie)
-		NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) ||
+	    (ie_len && ie &&
+	     nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7463,15 +7610,14 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	if (addr)
-		NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
-	NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
-	if (key_id != -1)
-		NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
-	if (tsc)
-		NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) ||
+	    nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) ||
+	    (key_id != -1 &&
+	     nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) ||
+	    (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7506,7 +7652,8 @@
 	 * Since we are applying the beacon hint to a wiphy we know its
 	 * wiphy_idx is valid
 	 */
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy));
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
+		goto nla_put_failure;
 
 	/* Before */
 	nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
@@ -7558,14 +7705,16 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
+	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
+		goto nla_put_failure;
 
-	if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
-		NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
+	if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
+	    nla_put_u32(msg, NL80211_ATTR_DURATION, duration))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7636,8 +7785,9 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7673,9 +7823,10 @@
 		return true;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+		goto nla_put_failure;
 
 	err = genlmsg_end(msg, hdr);
 	if (err < 0) {
@@ -7724,12 +7875,13 @@
 		return -ENOMEM;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
-	if (sig_dbm)
-		NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
-	NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
+	    (sig_dbm &&
+	     nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+	    nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7759,12 +7911,12 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
-	if (ack)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
+	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+	    (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
@@ -7796,15 +7948,17 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
 
 	pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
 	if (!pinfoattr)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
-		    rssi_event);
+	if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
+			rssi_event))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, pinfoattr);
 
@@ -7837,16 +7991,18 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
+		goto nla_put_failure;
 
 	rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA);
 	if (!rekey_attr)
 		goto nla_put_failure;
 
-	NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR,
-		NL80211_REPLAY_CTR_LEN, replay_ctr);
+	if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR,
+		    NL80211_REPLAY_CTR_LEN, replay_ctr))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, rekey_attr);
 
@@ -7879,17 +8035,19 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+		goto nla_put_failure;
 
 	attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
 	if (!attr)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index);
-	NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid);
-	if (preauth)
-		NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH);
+	if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) ||
+	    nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) ||
+	    (preauth &&
+	     nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH)))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, attr);
 
@@ -7904,6 +8062,39 @@
 	nlmsg_free(msg);
 }
 
+void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
+			      struct net_device *netdev, int freq,
+			      enum nl80211_channel_type type, gfp_t gfp)
+{
+	struct sk_buff *msg;
+	void *hdr;
+
+	msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+	if (!msg)
+		return;
+
+	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CH_SWITCH_NOTIFY);
+	if (!hdr) {
+		nlmsg_free(msg);
+		return;
+	}
+
+	if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
+	    nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+
+	genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+				nl80211_mlme_mcgrp.id, gfp);
+	return;
+
+ nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	nlmsg_free(msg);
+}
+
 void
 nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
 				struct net_device *netdev, const u8 *peer,
@@ -7923,15 +8114,17 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
+		goto nla_put_failure;
 
 	pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
 	if (!pinfoattr)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets);
+	if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets))
+		goto nla_put_failure;
 
 	nla_nest_end(msg, pinfoattr);
 
@@ -7965,12 +8158,12 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
-	NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
-	NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
-	if (acked)
-		NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
+	    nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
+	    (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
+		goto nla_put_failure;
 
 	err = genlmsg_end(msg, hdr);
 	if (err < 0) {
@@ -8010,12 +8203,13 @@
 		return;
 	}
 
-	NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
-	if (freq)
-		NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
-	if (sig_dbm)
-		NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm);
-	NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    (freq &&
+	     nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
+	    (sig_dbm &&
+	     nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
+	    nla_put(msg, NL80211_ATTR_FRAME, len, frame))
+		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
 
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 4ffe50d..01a1122 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -118,6 +118,10 @@
 				    struct net_device *netdev, int index,
 				    const u8 *bssid, bool preauth, gfp_t gfp);
 
+void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
+			      struct net_device *dev, int freq,
+			      enum nl80211_channel_type type, gfp_t gfp);
+
 bool nl80211_unexpected_frame(struct net_device *dev,
 			      const u8 *addr, gfp_t gfp);
 bool nl80211_unexpected_4addr_frame(struct net_device *dev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index e9a0ac8..15f3474 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -388,7 +388,15 @@
 
 	schedule_work(&reg_regdb_work);
 }
+
+/* Feel free to add any other sanity checks here */
+static void reg_regdb_size_check(void)
+{
+	/* We should ideally BUILD_BUG_ON() but then random builds would fail */
+	WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
+}
 #else
+static inline void reg_regdb_size_check(void) {}
 static inline void reg_regdb_query(const char *alpha2) {}
 #endif /* CONFIG_CFG80211_INTERNAL_REGDB */
 
@@ -2322,6 +2330,8 @@
 	spin_lock_init(&reg_requests_lock);
 	spin_lock_init(&reg_pending_beacons_lock);
 
+	reg_regdb_size_check();
+
 	cfg80211_regdomain = cfg80211_world_regdom;
 
 	user_alpha2[0] = '9';
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 70faadf..af2b1ca 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -18,7 +18,7 @@
 #include "nl80211.h"
 #include "wext-compat.h"
 
-#define IEEE80211_SCAN_RESULT_EXPIRE	(15 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE	(30 * HZ)
 
 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
 {
@@ -281,7 +281,7 @@
 {
 	const u8 *ssidie;
 
-	if (bssid && compare_ether_addr(a->bssid, bssid))
+	if (bssid && !ether_addr_equal(a->bssid, bssid))
 		return false;
 
 	if (!ssid)
@@ -378,7 +378,11 @@
 			       b->len_information_elements);
 	}
 
-	return memcmp(a->bssid, b->bssid, ETH_ALEN);
+	/*
+	 * we can't use compare_ether_addr here since we need a < > operator.
+	 * The binary return value of compare_ether_addr isn't enough
+	 */
+	return memcmp(a->bssid, b->bssid, sizeof(a->bssid));
 }
 
 static int cmp_bss(struct cfg80211_bss *a,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 957f2562..1cd2558 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -370,7 +370,7 @@
 		     iftype != NL80211_IFTYPE_P2P_CLIENT &&
 		     iftype != NL80211_IFTYPE_MESH_POINT) ||
 		    (is_multicast_ether_addr(dst) &&
-		     !compare_ether_addr(src, addr)))
+		     ether_addr_equal(src, addr)))
 			return -1;
 		if (iftype == NL80211_IFTYPE_MESH_POINT) {
 			struct ieee80211s_hdr *meshdr =
@@ -398,9 +398,9 @@
 	payload = skb->data + hdrlen;
 	ethertype = (payload[6] << 8) | payload[7];
 
-	if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
+	if (likely((ether_addr_equal(payload, rfc1042_header) &&
 		    ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
-		   compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
+		   ether_addr_equal(payload, bridge_tunnel_header))) {
 		/* remove RFC1042 or Bridge-Tunnel encapsulation and
 		 * replace EtherType */
 		skb_pull(skb, hdrlen + 6);
@@ -609,10 +609,9 @@
 		payload = frame->data;
 		ethertype = (payload[6] << 8) | payload[7];
 
-		if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
+		if (likely((ether_addr_equal(payload, rfc1042_header) &&
 			    ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
-			   compare_ether_addr(payload,
-					      bridge_tunnel_header) == 0)) {
+			   ether_addr_equal(payload, bridge_tunnel_header))) {
 			/* remove RFC1042 or Bridge-Tunnel
 			 * encapsulation and replace EtherType */
 			skb_pull(frame, 6);
@@ -946,13 +945,6 @@
 	if (rdev->wiphy.software_iftypes & BIT(iftype))
 		return 0;
 
-	/*
-	 * Drivers will gradually all set this flag, until all
-	 * have it we only enforce for those that set it.
-	 */
-	if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS))
-		return 0;
-
 	memset(num, 0, sizeof(num));
 
 	num[iftype] = 1;
@@ -972,6 +964,9 @@
 	}
 	mutex_unlock(&rdev->devlist_mtx);
 
+	if (total == 1)
+		return 0;
+
 	for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
 		const struct ieee80211_iface_combination *c;
 		struct ieee80211_iface_limit *limits;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3c24eb9..6a6181a 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -821,6 +821,7 @@
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 	struct ieee80211_channel *chan;
+	enum nl80211_channel_type channel_type;
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_STATION:
@@ -831,7 +832,7 @@
 		if (!rdev->ops->get_channel)
 			return -EINVAL;
 
-		chan = rdev->ops->get_channel(wdev->wiphy);
+		chan = rdev->ops->get_channel(wdev->wiphy, &channel_type);
 		if (!chan)
 			return -EINVAL;
 		freq->m = chan->center_freq;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index af648e0..b0eb7aa 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -256,7 +256,7 @@
 		.max_tokens	= sizeof(struct iw_pmksa),
 	},
 };
-static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
+static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
 
 /*
  * Meta-data about all the additional standard Wireless Extension events
@@ -306,7 +306,7 @@
 		.max_tokens	= sizeof(struct iw_pmkid_cand),
 	},
 };
-static const unsigned standard_event_num = ARRAY_SIZE(standard_event);
+static const unsigned int standard_event_num = ARRAY_SIZE(standard_event);
 
 /* Size (in bytes) of various events */
 static const int event_type_size[] = {
@@ -402,7 +402,8 @@
 	r->ifi_flags = dev_get_flags(dev);
 	r->ifi_change = 0;	/* Wireless changes don't affect those flags */
 
-	NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
+	if (nla_put_string(skb, IFLA_IFNAME, dev->name))
+		goto nla_put_failure;
 
 	return nlh;
  nla_put_failure:
@@ -428,7 +429,7 @@
 	int hdr_len;				/* Size of the event header */
 	int wrqu_off = 0;			/* Offset in wrqu */
 	/* Don't "optimise" the following variable, it will crash */
-	unsigned	cmd_index;		/* *MUST* be unsigned */
+	unsigned int	cmd_index;		/* *MUST* be unsigned */
 	struct sk_buff *skb;
 	struct nlmsghdr *nlh;
 	struct nlattr *nla;
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 7c01c2f..7decbd3 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -276,7 +276,7 @@
 
 		/* fixed already - and no change */
 		if (wdev->wext.connect.bssid && bssid &&
-		    compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0)
+		    ether_addr_equal(bssid, wdev->wext.connect.bssid))
 			goto out;
 
 		err = __cfg80211_disconnect(rdev, dev,
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
index 5d643a5..33bef22 100644
--- a/net/wireless/wext-spy.c
+++ b/net/wireless/wext-spy.c
@@ -203,7 +203,7 @@
 
 	/* Update all records that match */
 	for (i = 0; i < spydata->spy_number; i++)
-		if (!compare_ether_addr(address, spydata->spy_address[i])) {
+		if (ether_addr_equal(address, spydata->spy_address[i])) {
 			memcpy(&(spydata->spy_stat[i]), wstats,
 			       sizeof(struct iw_quality));
 			match = i;
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index d2efd29..4323952 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,18 +73,12 @@
 	{ 0, },
 };
 
-static struct ctl_path x25_path[] = {
-	{ .procname = "net", },
-	{ .procname = "x25", },
-	{ }
-};
-
 void __init x25_register_sysctl(void)
 {
-	x25_table_header = register_sysctl_paths(x25_path, x25_table);
+	x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
 }
 
 void x25_unregister_sysctl(void)
 {
-	unregister_sysctl_table(x25_table_header);
+	unregister_net_sysctl_table(x25_table_header);
 }
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index f0ce862..a8a2363 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -58,7 +58,7 @@
 		if (!sock_owned_by_user(sk)) {
 			queued = x25_process_rx_frame(sk, skb);
 		} else {
-			queued = !sk_add_backlog(sk, skb);
+			queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
 		}
 		bh_unlock_sock(sk);
 		sock_put(sk);
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 36384a1..66c63873 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -231,7 +231,7 @@
 	}
 
 	if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
-		unsigned bytecount = (dte_facs->calling_len + 1) >> 1;
+		unsigned int bytecount = (dte_facs->calling_len + 1) >> 1;
 		*p++ = X25_FAC_CALLING_AE;
 		*p++ = 1 + bytecount;
 		*p++ = dte_facs->calling_len;
@@ -240,7 +240,7 @@
 	}
 
 	if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) {
-		unsigned bytecount = (dte_facs->called_len % 2) ?
+		unsigned int bytecount = (dte_facs->called_len % 2) ?
 		dte_facs->called_len / 2 + 1 :
 		dte_facs->called_len / 2;
 		*p++ = X25_FAC_CALLED_AE;
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 6d08167..ce90b8d 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -3,12 +3,17 @@
 #
 config XFRM
        bool
-       select CRYPTO
        depends on NET
 
+config XFRM_ALGO
+	tristate
+	select XFRM
+	select CRYPTO
+
 config XFRM_USER
 	tristate "Transformation user configuration interface"
-	depends on INET && XFRM
+	depends on INET
+	select XFRM_ALGO
 	---help---
 	  Support for Transformation(XFRM) user configuration interface
 	  like IPsec used by native Linux tools.
@@ -48,13 +53,13 @@
 
 config XFRM_IPCOMP
 	tristate
-	select XFRM
+	select XFRM_ALGO
 	select CRYPTO
 	select CRYPTO_DEFLATE
 
 config NET_KEY
 	tristate "PF_KEY sockets"
-	select XFRM
+	select XFRM_ALGO
 	---help---
 	  PF_KEYv2 socket family, compatible to KAME ones.
 	  They are required if you are going to use IPsec tools ported
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index aa429ee..c0e9619 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -3,8 +3,9 @@
 #
 
 obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
-		      xfrm_input.o xfrm_output.o xfrm_algo.o \
+		      xfrm_input.o xfrm_output.o \
 		      xfrm_sysctl.o xfrm_replay.o
 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
+obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
 obj-$(CONFIG_XFRM_USER) += xfrm_user.o
 obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 791ab2e..4ce2d93 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -15,9 +15,6 @@
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <net/xfrm.h>
-#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
-#include <net/ah.h>
-#endif
 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
 #include <net/esp.h>
 #endif
@@ -752,3 +749,5 @@
 }
 EXPORT_SYMBOL_GPL(pskb_put);
 #endif
+
+MODULE_LICENSE("GPL");
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 7199d78..716502a 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -45,10 +45,10 @@
 	return (h ^ (h >> 16)) & hmask;
 }
 
-static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr,
-				       const xfrm_address_t *saddr,
-				       unsigned short family,
-				       unsigned int hmask)
+static inline unsigned int __xfrm_src_hash(const xfrm_address_t *daddr,
+					   const xfrm_address_t *saddr,
+					   unsigned short family,
+					   unsigned int hmask)
 {
 	unsigned int h = family;
 	switch (family) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7661576..3c87a1c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -56,7 +56,7 @@
 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
 						int dir);
 
-static inline int
+static inline bool
 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 {
 	const struct flowi4 *fl4 = &fl->u.ip4;
@@ -69,7 +69,7 @@
 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
 }
 
-static inline int
+static inline bool
 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 {
 	const struct flowi6 *fl6 = &fl->u.ip6;
@@ -82,8 +82,8 @@
 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
 }
 
-int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
-			unsigned short family)
+bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
+			 unsigned short family)
 {
 	switch (family) {
 	case AF_INET:
@@ -91,7 +91,7 @@
 	case AF_INET6:
 		return __xfrm6_selector_match(sel, fl);
 	}
-	return 0;
+	return false;
 }
 
 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
@@ -877,7 +877,8 @@
 			     u8 type, u16 family, int dir)
 {
 	const struct xfrm_selector *sel = &pol->selector;
-	int match, ret = -ESRCH;
+	int ret = -ESRCH;
+	bool match;
 
 	if (pol->family != family ||
 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
@@ -1006,8 +1007,8 @@
 
 	read_lock_bh(&xfrm_policy_lock);
 	if ((pol = sk->sk_policy[dir]) != NULL) {
-		int match = xfrm_selector_match(&pol->selector, fl,
-						sk->sk_family);
+		bool match = xfrm_selector_match(&pol->selector, fl,
+						 sk->sk_family);
 		int err = 0;
 
 		if (match) {
@@ -2767,8 +2768,8 @@
 #endif
 
 #ifdef CONFIG_XFRM_MIGRATE
-static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
-				       const struct xfrm_selector *sel_tgt)
+static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
+					const struct xfrm_selector *sel_tgt)
 {
 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
 		if (sel_tgt->family == sel_cmp->family &&
@@ -2778,14 +2779,14 @@
 				  sel_cmp->family) == 0 &&
 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
-			return 1;
+			return true;
 		}
 	} else {
 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
-			return 1;
+			return true;
 		}
 	}
-	return 0;
+	return false;
 }
 
 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05640bc..380976f 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -54,7 +54,7 @@
 	table[2].data = &net->xfrm.sysctl_larval_drop;
 	table[3].data = &net->xfrm.sysctl_acq_expires;
 
-	net->xfrm.sysctl_hdr = register_net_sysctl_table(net, net_core_path, table);
+	net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table);
 	if (!net->xfrm.sysctl_hdr)
 		goto out_register;
 	return 0;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7128dde..44293b3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -756,40 +756,50 @@
 {
 	copy_to_user_state(x, p);
 
-	if (x->coaddr)
-		NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
+	if (x->coaddr &&
+	    nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
+		goto nla_put_failure;
 
-	if (x->lastused)
-		NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
+	if (x->lastused &&
+	    nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
+		goto nla_put_failure;
 
-	if (x->aead)
-		NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
-	if (x->aalg) {
-		if (copy_to_user_auth(x->aalg, skb))
-			goto nla_put_failure;
+	if (x->aead &&
+	    nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
+		goto nla_put_failure;
 
-		NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
-			xfrm_alg_auth_len(x->aalg), x->aalg);
-	}
-	if (x->ealg)
-		NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
-	if (x->calg)
-		NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+	if (x->aalg &&
+	    (copy_to_user_auth(x->aalg, skb) ||
+	     nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
+		     xfrm_alg_auth_len(x->aalg), x->aalg)))
+		goto nla_put_failure;
 
-	if (x->encap)
-		NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+	if (x->ealg &&
+	    nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
+		goto nla_put_failure;
 
-	if (x->tfcpad)
-		NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
+	if (x->calg &&
+	    nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
+		goto nla_put_failure;
+
+	if (x->encap &&
+	    nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
+		goto nla_put_failure;
+
+	if (x->tfcpad &&
+	    nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
+		goto nla_put_failure;
 
 	if (xfrm_mark_put(skb, &x->mark))
 		goto nla_put_failure;
 
-	if (x->replay_esn)
-		NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
-			xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn);
+	if (x->replay_esn &&
+	    nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+		    xfrm_replay_state_esn_len(x->replay_esn),
+		    x->replay_esn))
+		goto nla_put_failure;
 
-	if (x->security && copy_sec_ctx(x->security, skb) < 0)
+	if (x->security && copy_sec_ctx(x->security, skb))
 		goto nla_put_failure;
 
 	return 0;
@@ -912,8 +922,9 @@
 	sph.spdhcnt = si.spdhcnt;
 	sph.spdhmcnt = si.spdhmcnt;
 
-	NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
-	NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
+	if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) ||
+	    nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -967,8 +978,9 @@
 	sh.sadhmcnt = si.sadhmcnt;
 	sh.sadhcnt = si.sadhcnt;
 
-	NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
-	NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
+	if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) ||
+	    nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
@@ -1690,21 +1702,27 @@
 	id->reqid = x->props.reqid;
 	id->flags = c->data.aevent;
 
-	if (x->replay_esn)
-		NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
-			xfrm_replay_state_esn_len(x->replay_esn),
-			x->replay_esn);
-	else
-		NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
+	if (x->replay_esn) {
+		if (nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+			    xfrm_replay_state_esn_len(x->replay_esn),
+			    x->replay_esn))
+			goto nla_put_failure;
+	} else {
+		if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
+			    &x->replay))
+			goto nla_put_failure;
+	}
+	if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft))
+		goto nla_put_failure;
 
-	NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
+	if ((id->flags & XFRM_AE_RTHR) &&
+	    nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff))
+		goto nla_put_failure;
 
-	if (id->flags & XFRM_AE_RTHR)
-		NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
-
-	if (id->flags & XFRM_AE_ETHR)
-		NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
-			    x->replay_maxage * 10 / HZ);
+	if ((id->flags & XFRM_AE_ETHR) &&
+	    nla_put_u32(skb, XFRMA_ETIMER_THRESH,
+			x->replay_maxage * 10 / HZ))
+		goto nla_put_failure;
 
 	if (xfrm_mark_put(skb, &x->mark))
 		goto nla_put_failure;
@@ -2835,8 +2853,9 @@
 	ur->proto = proto;
 	memcpy(&ur->sel, sel, sizeof(ur->sel));
 
-	if (addr)
-		NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
+	if (addr &&
+	    nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr))
+		goto nla_put_failure;
 
 	return nlmsg_end(skb, nlh);
 
diff --git a/scripts/Makefile b/scripts/Makefile
index df7678f..3626666 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -8,6 +8,8 @@
 # conmakehash:	 Create arrays for initializing the kernel console tables
 # docproc:       Used in Documentation/DocBook
 
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+
 hostprogs-$(CONFIG_KALLSYMS)     += kallsyms
 hostprogs-$(CONFIG_LOGO)         += pnmtologo
 hostprogs-$(CONFIG_VT)           += conmakehash
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 0920ea3..d309e7f 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -14,7 +14,6 @@
 #include <linux/netlink.h>
 #include <linux/rtnetlink.h>
 #include <linux/if.h>
-#include <linux/netfilter_ipv4/ip_queue.h>
 #include <linux/inet_diag.h>
 #include <linux/xfrm.h>
 #include <linux/audit.h>
@@ -70,12 +69,6 @@
 	{ RTM_SETDCB,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
 };
 
-static struct nlmsg_perm nlmsg_firewall_perms[] =
-{
-	{ IPQM_MODE,		NETLINK_FIREWALL_SOCKET__NLMSG_WRITE },
-	{ IPQM_VERDICT,		NETLINK_FIREWALL_SOCKET__NLMSG_WRITE },
-};
-
 static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
 {
 	{ TCPDIAG_GETSOCK,	NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
@@ -145,12 +138,6 @@
 				 sizeof(nlmsg_route_perms));
 		break;
 
-	case SECCLASS_NETLINK_FIREWALL_SOCKET:
-	case SECCLASS_NETLINK_IP6FW_SOCKET:
-		err = nlmsg_perm(nlmsg_type, perm, nlmsg_firewall_perms,
-				 sizeof(nlmsg_firewall_perms));
-		break;
-
 	case SECCLASS_NETLINK_TCPDIAG_SOCKET:
 		err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms,
 				 sizeof(nlmsg_tcpdiag_perms));
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index 64417a7..d8c670c 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -475,7 +475,7 @@
 	const struct firmware *fw;
 	int box_type, err;
 
-	if (snd_BUG_ON(!chip->dsp_code_to_load || !chip->comm_page))
+	if (snd_BUG_ON(!chip->comm_page))
 		return -EPERM;
 
 	/* See if the ASIC is present and working - only if the DSP is already loaded */
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 7a8fcc4..841475c 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -5444,10 +5444,6 @@
 	list_for_each_entry(codec, &bus->codec_list, list) {
 		if (hda_codec_is_power_on(codec))
 			hda_call_codec_suspend(codec);
-		else /* forcibly change the power to D3 even if not used */
-			hda_set_power_state(codec,
-					    codec->afg ? codec->afg : codec->mfg,
-					    AC_PWRST_D3);
 		if (codec->patch_ops.post_suspend)
 			codec->patch_ops.post_suspend(codec);
 	}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c19e71a..1f35052 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -783,11 +783,13 @@
 {
 	struct azx *chip = bus->private_data;
 	unsigned long timeout;
+	unsigned long loopcounter;
 	int do_poll = 0;
 
  again:
 	timeout = jiffies + msecs_to_jiffies(1000);
-	for (;;) {
+
+	for (loopcounter = 0;; loopcounter++) {
 		if (chip->polling_mode || do_poll) {
 			spin_lock_irq(&chip->reg_lock);
 			azx_update_rirb(chip);
@@ -803,7 +805,7 @@
 		}
 		if (time_after(jiffies, timeout))
 			break;
-		if (bus->needs_damn_long_delay)
+		if (bus->needs_damn_long_delay || loopcounter > 3000)
 			msleep(2); /* temporary workaround */
 		else {
 			udelay(10);
@@ -2351,6 +2353,17 @@
  * power management
  */
 
+static int snd_hda_codecs_inuse(struct hda_bus *bus)
+{
+	struct hda_codec *codec;
+
+	list_for_each_entry(codec, &bus->codec_list, list) {
+		if (snd_hda_codec_needs_resume(codec))
+			return 1;
+	}
+	return 0;
+}
+
 static int azx_suspend(struct pci_dev *pci, pm_message_t state)
 {
 	struct snd_card *card = pci_get_drvdata(pci);
@@ -2397,7 +2410,8 @@
 		return -EIO;
 	azx_init_pci(chip);
 
-	azx_init_chip(chip, 1);
+	if (snd_hda_codecs_inuse(chip->bus))
+		azx_init_chip(chip, 1);
 
 	snd_hda_resume(chip->bus);
 	snd_power_change_state(card, SNDRV_CTL_POWER_D0);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e65e354..7810913 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5405,6 +5405,8 @@
 	SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
 		      ALC882_FIXUP_ACER_ASPIRE_4930G),
 	SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
+	SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
+		      ALC882_FIXUP_ACER_ASPIRE_4930G),
 	SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
 	SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G),
 	SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736),
@@ -5438,6 +5440,7 @@
 	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
 
 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3", ALC889_FIXUP_CD),
 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
@@ -5638,13 +5641,13 @@
 	snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PROC_COEF, tmp | 0x80);
 	}
 #endif
-	alc_auto_parse_customize_define(codec);
-
 	alc_fix_pll_init(codec, 0x20, 0x0a, 10);
 
 	alc_pick_fixup(codec, NULL, alc262_fixup_tbl, alc262_fixups);
 	alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
 
+	alc_auto_parse_customize_define(codec);
+
 	/* automatic parse from the BIOS config */
 	err = alc262_parse_auto_config(codec);
 	if (err < 0)
@@ -6109,6 +6112,7 @@
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED),
+	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
@@ -6248,8 +6252,6 @@
 
 	spec->mixer_nid = 0x0b;
 
-	alc_auto_parse_customize_define(codec);
-
 	err = alc_codec_rename_from_preset(codec);
 	if (err < 0)
 		goto error;
@@ -6282,6 +6284,8 @@
 		       alc269_fixup_tbl, alc269_fixups);
 	alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
 
+	alc_auto_parse_customize_define(codec);
+
 	/* automatic parse from the BIOS config */
 	err = alc269_parse_auto_config(codec);
 	if (err < 0)
@@ -6858,8 +6862,6 @@
 	/* handle multiple HPs as is */
 	spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
 
-	alc_auto_parse_customize_define(codec);
-
 	alc_fix_pll_init(codec, 0x20, 0x04, 15);
 
 	err = alc_codec_rename_from_preset(codec);
@@ -6876,6 +6878,9 @@
 	alc_pick_fixup(codec, alc662_fixup_models,
 		       alc662_fixup_tbl, alc662_fixups);
 	alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+
+	alc_auto_parse_customize_define(codec);
+
 	/* automatic parse from the BIOS config */
 	err = alc662_parse_auto_config(codec);
 	if (err < 0)
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 4742cac..2cb1e08 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4415,9 +4415,9 @@
 		def_conf = get_defcfg_connect(def_conf);
 		/* skip any ports that don't have jacks since presence
  		 * detection is useless */
-		if (def_conf != AC_JACK_PORT_COMPLEX) {
-			if (def_conf != AC_JACK_PORT_NONE)
-				stac_toggle_power_map(codec, nid, 1);
+		if (def_conf != AC_JACK_PORT_NONE &&
+		    !is_jack_detectable(codec, nid)) {
+			stac_toggle_power_map(codec, nid, 1);
 			continue;
 		}
 		if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) {
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index b68cdec..0b2aea2 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -5170,6 +5170,7 @@
 	strcpy(hw->name, "HDSP hwdep interface");
 
 	hw->ops.ioctl = snd_hdsp_hwdep_ioctl;
+	hw->ops.ioctl_compat = snd_hdsp_hwdep_ioctl;
 
 	return 0;
 }
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index df3ac73..b39ad35 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -99,6 +99,7 @@
 		.platform_name = "bfin-i2s-pcm-audio",
 		.codec_name = "ssm2602.0-001b",
 		.ops = &bf5xx_ssm2602_ops,
+		.dai_fmt = BF5XX_SSM2602_DAIFMT,
 	},
 	{
 		.name = "ssm2602",
@@ -108,6 +109,7 @@
 		.platform_name = "bfin-i2s-pcm-audio",
 		.codec_name = "ssm2602.0-001b",
 		.ops = &bf5xx_ssm2602_ops,
+		.dai_fmt = BF5XX_SSM2602_DAIFMT,
 	},
 };
 
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 78979b3..3686417 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -568,22 +568,22 @@
 			attn_tlv),
 
 	SOC_SINGLE_TLV("SPK-IP Mono Volume",
-			CS42L73_SPKMIPMA, 0, 0x3E, 1, attn_tlv),
+			CS42L73_SPKMIPMA, 0, 0x3F, 1, attn_tlv),
 	SOC_SINGLE_TLV("SPK-XSP Mono Volume",
-			CS42L73_SPKMXSPA, 0, 0x3E, 1, attn_tlv),
+			CS42L73_SPKMXSPA, 0, 0x3F, 1, attn_tlv),
 	SOC_SINGLE_TLV("SPK-ASP Mono Volume",
-			CS42L73_SPKMASPA, 0, 0x3E, 1, attn_tlv),
+			CS42L73_SPKMASPA, 0, 0x3F, 1, attn_tlv),
 	SOC_SINGLE_TLV("SPK-VSP Mono Volume",
-			CS42L73_SPKMVSPMA, 0, 0x3E, 1, attn_tlv),
+			CS42L73_SPKMVSPMA, 0, 0x3F, 1, attn_tlv),
 
 	SOC_SINGLE_TLV("ESL-IP Mono Volume",
-			CS42L73_ESLMIPMA, 0, 0x3E, 1, attn_tlv),
+			CS42L73_ESLMIPMA, 0, 0x3F, 1, attn_tlv),
 	SOC_SINGLE_TLV("ESL-XSP Mono Volume",
-			CS42L73_ESLMXSPA, 0, 0x3E, 1, attn_tlv),
+			CS42L73_ESLMXSPA, 0, 0x3F, 1, attn_tlv),
 	SOC_SINGLE_TLV("ESL-ASP Mono Volume",
-			CS42L73_ESLMASPA, 0, 0x3E, 1, attn_tlv),
+			CS42L73_ESLMASPA, 0, 0x3F, 1, attn_tlv),
 	SOC_SINGLE_TLV("ESL-VSP Mono Volume",
-			CS42L73_ESLMVSPMA, 0, 0x3E, 1, attn_tlv),
+			CS42L73_ESLMVSPMA, 0, 0x3F, 1, attn_tlv),
 
 	SOC_ENUM("IP Digital Swap/Mono Select", ip_swap_enum),
 
@@ -929,6 +929,8 @@
 
 	/* MCLKX -> MCLK */
 	mclkx_coeff = cs42l73_get_mclkx_coeff(freq);
+	if (mclkx_coeff < 0)
+		return mclkx_coeff;
 
 	mclk = cs42l73_mclkx_coeffs[mclkx_coeff].mclkx /
 		cs42l73_mclkx_coeffs[mclkx_coeff].ratio;
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 8e92fb8..c395ec3 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -809,6 +809,7 @@
 {
 	struct ldo_regulator *ldo;
 	struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+	struct regulator_config config = { };
 
 	ldo = kzalloc(sizeof(struct ldo_regulator), GFP_KERNEL);
 
@@ -832,8 +833,11 @@
 	ldo->codec_data = codec;
 	ldo->voltage = voltage;
 
-	ldo->dev = regulator_register(&ldo->desc, codec->dev,
-					  init_data, ldo, NULL);
+	config.dev = codec->dev;
+	config.driver_data = ldo;
+	config.init_data = init_data;
+
+	ldo->dev = regulator_register(&ldo->desc, &config);
 	if (IS_ERR(ldo->dev)) {
 		int ret = PTR_ERR(ldo->dev);
 
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 16d55f9..df1e07f 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -472,7 +472,7 @@
 static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
 				      enum snd_soc_bias_level level)
 {
-	u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0xff7f;
+	u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0x17f;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
@@ -491,7 +491,7 @@
 	case SND_SOC_BIAS_OFF:
 		/* everything off, dac mute, inactive */
 		snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x0);
-		snd_soc_write(codec, TLV320AIC23_PWR, 0xffff);
+		snd_soc_write(codec, TLV320AIC23_PWR, 0x1ff);
 		break;
 	}
 	codec->dapm.bias_level = level;
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 8c4c959..aa12c6b 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -60,7 +60,7 @@
 };
 
 struct wm8350_data {
-	struct snd_soc_codec codec;
+	struct wm8350 *wm8350;
 	struct wm8350_output out1;
 	struct wm8350_output out2;
 	struct wm8350_jack_data hpl;
@@ -1309,7 +1309,7 @@
 			   struct wm8350_jack_data *jack,
 			   u16 mask)
 {
-	struct wm8350 *wm8350 = priv->codec.control_data;
+	struct wm8350 *wm8350 = priv->wm8350;
 	u16 reg;
 	int report;
 
@@ -1342,7 +1342,7 @@
 static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
 {
 	struct wm8350_data *priv = data;
-	struct wm8350 *wm8350 = priv->codec.control_data;
+	struct wm8350 *wm8350 = priv->wm8350;
 	struct wm8350_jack_data *jack = NULL;
 
 	switch (irq - wm8350->irq_base) {
@@ -1427,7 +1427,7 @@
 static irqreturn_t wm8350_mic_handler(int irq, void *data)
 {
 	struct wm8350_data *priv = data;
-	struct wm8350 *wm8350 = priv->codec.control_data;
+	struct wm8350 *wm8350 = priv->wm8350;
 	u16 reg;
 	int report = 0;
 
@@ -1536,6 +1536,8 @@
 		return -ENOMEM;
 	snd_soc_codec_set_drvdata(codec, priv);
 
+	priv->wm8350 = wm8350;
+
 	for (i = 0; i < ARRAY_SIZE(supply_names); i++)
 		priv->supplies[i].supply = supply_names[i];
 
@@ -1544,7 +1546,6 @@
 	if (ret != 0)
 		return ret;
 
-	wm8350->codec.codec = codec;
 	codec->control_data = wm8350;
 
 	/* Put the codec into reset if it wasn't already */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 7c49642..2de12eb 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1000,6 +1000,204 @@
 	}
 }
 
+static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+		      struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct wm8994 *control = codec->control_data;
+	int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
+	int dac;
+	int adc;
+	int val;
+
+	switch (control->type) {
+	case WM8994:
+	case WM8958:
+		mask |= WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA;
+		break;
+	default:
+		break;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
+		if ((val & WM8994_AIF1ADCL_SRC) &&
+		    (val & WM8994_AIF1ADCR_SRC))
+			adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA;
+		else if (!(val & WM8994_AIF1ADCL_SRC) &&
+			 !(val & WM8994_AIF1ADCR_SRC))
+			adc = WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
+		else
+			adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA |
+				WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
+
+		val = snd_soc_read(codec, WM8994_AIF1_CONTROL_2);
+		if ((val & WM8994_AIF1DACL_SRC) &&
+		    (val & WM8994_AIF1DACR_SRC))
+			dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA;
+		else if (!(val & WM8994_AIF1DACL_SRC) &&
+			 !(val & WM8994_AIF1DACR_SRC))
+			dac = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
+		else
+			dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA |
+				WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
+
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
+				    mask, adc);
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+				    mask, dac);
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8994_AIF1DSPCLK_ENA |
+				    WM8994_SYSDSPCLK_ENA,
+				    WM8994_AIF1DSPCLK_ENA |
+				    WM8994_SYSDSPCLK_ENA);
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, mask,
+				    WM8994_AIF1ADC1R_ENA |
+				    WM8994_AIF1ADC1L_ENA |
+				    WM8994_AIF1ADC2R_ENA |
+				    WM8994_AIF1ADC2L_ENA);
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, mask,
+				    WM8994_AIF1DAC1R_ENA |
+				    WM8994_AIF1DAC1L_ENA |
+				    WM8994_AIF1DAC2R_ENA |
+				    WM8994_AIF1DAC2L_ENA);
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+				    mask, 0);
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
+				    mask, 0);
+
+		val = snd_soc_read(codec, WM8994_CLOCKING_1);
+		if (val & WM8994_AIF2DSPCLK_ENA)
+			val = WM8994_SYSDSPCLK_ENA;
+		else
+			val = 0;
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8994_SYSDSPCLK_ENA |
+				    WM8994_AIF1DSPCLK_ENA, val);
+		break;
+	}
+
+	return 0;
+}
+
+static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+		      struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	int dac;
+	int adc;
+	int val;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		val = snd_soc_read(codec, WM8994_AIF2_CONTROL_1);
+		if ((val & WM8994_AIF2ADCL_SRC) &&
+		    (val & WM8994_AIF2ADCR_SRC))
+			adc = WM8994_AIF2ADCR_ENA;
+		else if (!(val & WM8994_AIF2ADCL_SRC) &&
+			 !(val & WM8994_AIF2ADCR_SRC))
+			adc = WM8994_AIF2ADCL_ENA;
+		else
+			adc = WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA;
+
+
+		val = snd_soc_read(codec, WM8994_AIF2_CONTROL_2);
+		if ((val & WM8994_AIF2DACL_SRC) &&
+		    (val & WM8994_AIF2DACR_SRC))
+			dac = WM8994_AIF2DACR_ENA;
+		else if (!(val & WM8994_AIF2DACL_SRC) &&
+			 !(val & WM8994_AIF2DACR_SRC))
+			dac = WM8994_AIF2DACL_ENA;
+		else
+			dac = WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA;
+
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
+				    WM8994_AIF2ADCL_ENA |
+				    WM8994_AIF2ADCR_ENA, adc);
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+				    WM8994_AIF2DACL_ENA |
+				    WM8994_AIF2DACR_ENA, dac);
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8994_AIF2DSPCLK_ENA |
+				    WM8994_SYSDSPCLK_ENA,
+				    WM8994_AIF2DSPCLK_ENA |
+				    WM8994_SYSDSPCLK_ENA);
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
+				    WM8994_AIF2ADCL_ENA |
+				    WM8994_AIF2ADCR_ENA,
+				    WM8994_AIF2ADCL_ENA |
+				    WM8994_AIF2ADCR_ENA);
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+				    WM8994_AIF2DACL_ENA |
+				    WM8994_AIF2DACR_ENA,
+				    WM8994_AIF2DACL_ENA |
+				    WM8994_AIF2DACR_ENA);
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+				    WM8994_AIF2DACL_ENA |
+				    WM8994_AIF2DACR_ENA, 0);
+		snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
+				    WM8994_AIF2ADCL_ENA |
+				    WM8994_AIF2ADCR_ENA, 0);
+
+		val = snd_soc_read(codec, WM8994_CLOCKING_1);
+		if (val & WM8994_AIF1DSPCLK_ENA)
+			val = WM8994_SYSDSPCLK_ENA;
+		else
+			val = 0;
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8994_SYSDSPCLK_ENA |
+				    WM8994_AIF2DSPCLK_ENA, val);
+		break;
+	}
+
+	return 0;
+}
+
+static int aif1clk_late_ev(struct snd_soc_dapm_widget *w,
+			   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wm8994->aif1clk_enable = 1;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wm8994->aif1clk_disable = 1;
+		break;
+	}
+
+	return 0;
+}
+
+static int aif2clk_late_ev(struct snd_soc_dapm_widget *w,
+			   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		wm8994->aif2clk_enable = 1;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wm8994->aif2clk_disable = 1;
+		break;
+	}
+
+	return 0;
+}
+
 static int late_enable_ev(struct snd_soc_dapm_widget *w,
 			  struct snd_kcontrol *kcontrol, int event)
 {
@@ -1009,12 +1207,14 @@
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
 		if (wm8994->aif1clk_enable) {
+			aif1clk_ev(w, kcontrol, event);
 			snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
 					    WM8994_AIF1CLK_ENA_MASK,
 					    WM8994_AIF1CLK_ENA);
 			wm8994->aif1clk_enable = 0;
 		}
 		if (wm8994->aif2clk_enable) {
+			aif2clk_ev(w, kcontrol, event);
 			snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
 					    WM8994_AIF2CLK_ENA_MASK,
 					    WM8994_AIF2CLK_ENA);
@@ -1040,11 +1240,13 @@
 		if (wm8994->aif1clk_disable) {
 			snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
 					    WM8994_AIF1CLK_ENA_MASK, 0);
+			aif1clk_ev(w, kcontrol, event);
 			wm8994->aif1clk_disable = 0;
 		}
 		if (wm8994->aif2clk_disable) {
 			snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
 					    WM8994_AIF2CLK_ENA_MASK, 0);
+			aif2clk_ev(w, kcontrol, event);
 			wm8994->aif2clk_disable = 0;
 		}
 		break;
@@ -1053,42 +1255,6 @@
 	return 0;
 }
 
-static int aif1clk_ev(struct snd_soc_dapm_widget *w,
-		      struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = w->codec;
-	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		wm8994->aif1clk_enable = 1;
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		wm8994->aif1clk_disable = 1;
-		break;
-	}
-
-	return 0;
-}
-
-static int aif2clk_ev(struct snd_soc_dapm_widget *w,
-		      struct snd_kcontrol *kcontrol, int event)
-{
-	struct snd_soc_codec *codec = w->codec;
-	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-
-	switch (event) {
-	case SND_SOC_DAPM_PRE_PMU:
-		wm8994->aif2clk_enable = 1;
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		wm8994->aif2clk_disable = 1;
-		break;
-	}
-
-	return 0;
-}
-
 static int adc_mux_ev(struct snd_soc_dapm_widget *w,
 		      struct snd_kcontrol *kcontrol, int event)
 {
@@ -1385,9 +1551,9 @@
 	SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
 
 static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
-SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
+SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_late_ev,
 	SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
+SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_late_ev,
 	SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
@@ -1416,8 +1582,10 @@
 };
 
 static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
-SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
 SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
 		   left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
@@ -1470,30 +1638,30 @@
 SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
 		    SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
-SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
-SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("DSP1CLK", SND_SOC_NOPM, 3, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("DSP2CLK", SND_SOC_NOPM, 2, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("DSPINTCLK", SND_SOC_NOPM, 1, 0, NULL, 0),
 
 SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
-		     0, WM8994_POWER_MANAGEMENT_4, 9, 0),
+		     0, SND_SOC_NOPM, 9, 0),
 SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
-		     0, WM8994_POWER_MANAGEMENT_4, 8, 0),
+		     0, SND_SOC_NOPM, 8, 0),
 SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
-		      WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
+		      SND_SOC_NOPM, 9, 0, wm8958_aif_ev,
 		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
-		      WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
+		      SND_SOC_NOPM, 8, 0, wm8958_aif_ev,
 		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
-		     0, WM8994_POWER_MANAGEMENT_4, 11, 0),
+		     0, SND_SOC_NOPM, 11, 0),
 SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
-		     0, WM8994_POWER_MANAGEMENT_4, 10, 0),
+		     0, SND_SOC_NOPM, 10, 0),
 SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
-		      WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
+		      SND_SOC_NOPM, 11, 0, wm8958_aif_ev,
 		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0,
-		      WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev,
+		      SND_SOC_NOPM, 10, 0, wm8958_aif_ev,
 		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
@@ -1520,14 +1688,14 @@
 		   dac1r_mix, ARRAY_SIZE(dac1r_mix)),
 
 SND_SOC_DAPM_AIF_OUT("AIF2ADCL", NULL, 0,
-		     WM8994_POWER_MANAGEMENT_4, 13, 0),
+		     SND_SOC_NOPM, 13, 0),
 SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0,
-		     WM8994_POWER_MANAGEMENT_4, 12, 0),
+		     SND_SOC_NOPM, 12, 0),
 SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0,
-		      WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev,
+		      SND_SOC_NOPM, 13, 0, wm8958_aif_ev,
 		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
-		      WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev,
+		      SND_SOC_NOPM, 12, 0, wm8958_aif_ev,
 		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
 SND_SOC_DAPM_AIF_IN("AIF1DACDAT", NULL, 0, SND_SOC_NOPM, 0, 0),
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index f13f288..6c028c4 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -1035,7 +1035,7 @@
 			    enum snd_soc_bias_level level)
 {
 	struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
-	int val;
+	int mask, val;
 
 	switch (level) {
 	case SND_SOC_BIAS_STANDBY:
@@ -1047,6 +1047,13 @@
 	case SND_SOC_BIAS_ON:
 		/* Turn off any unneded single ended outputs */
 		val = 0;
+		mask = 0;
+
+		if (hubs->lineout1_se)
+			mask |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA;
+
+		if (hubs->lineout2_se)
+			mask |= WM8993_LINEOUT2N_ENA | WM8993_LINEOUT2P_ENA;
 
 		if (hubs->lineout1_se && hubs->lineout1n_ena)
 			val |= WM8993_LINEOUT1N_ENA;
@@ -1061,11 +1068,7 @@
 			val |= WM8993_LINEOUT2P_ENA;
 
 		snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_3,
-				    WM8993_LINEOUT1N_ENA |
-				    WM8993_LINEOUT1P_ENA |
-				    WM8993_LINEOUT2N_ENA |
-				    WM8993_LINEOUT2P_ENA,
-				    val);
+				    mask, val);
 
 		/* Remove the input clamps */
 		snd_soc_update_bits(codec, WM8993_INPUTS_CLAMP_REG,
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index a59bd35..5a649da 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -401,6 +401,10 @@
 	}
 
 out:
+	/* free preallocated buffers in case of error */
+	if (ret)
+		omap_pcm_free_dma_buffers(pcm);
+
 	return ret;
 }
 
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 7218507..79fbeea 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -166,7 +166,7 @@
 
 static __devinit int s3c2412_iis_dev_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_dai(&pdev->dev, &s3c2412_i2s_dai);
+	return s3c_i2sv2_register_dai(&pdev->dev, -1, &s3c2412_i2s_dai);
 }
 
 static __devexit int s3c2412_iis_dev_remove(struct platform_device *pdev)
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 378cc5b..74ed2df 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1001,11 +1001,10 @@
 	sg_dma_address(&sg) = buf;
 	sg_dma_len(&sg) = len;
 
-	desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir,
-						  DMA_PREP_INTERRUPT |
-						  DMA_CTRL_ACK);
+	desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir,
+				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!desc) {
-		dev_err(dai->dev, "device_prep_slave_sg() fail\n");
+		dev_err(dai->dev, "dmaengine_prep_slave_sg() fail\n");
 		return;
 	}
 
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index 9d9ad8d..8526e1e 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -35,7 +35,7 @@
 	return codec_freq;
 }
 
-static struct clk_ops siumckb_clk_ops = {
+static struct sh_clk_ops siumckb_clk_ops = {
 	.recalc = siumckb_recalc,
 };
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index accdcb7..c88d974 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3113,6 +3113,7 @@
 				 GFP_KERNEL);
 	if (card->rtd == NULL)
 		return -ENOMEM;
+	card->num_rtd = 0;
 	card->rtd_aux = &card->rtd[card->num_links];
 
 	for (i = 0; i < card->num_links; i++)
@@ -3624,10 +3625,10 @@
 	int i, ret;
 
 	num_routes = of_property_count_strings(np, propname);
-	if (num_routes & 1) {
+	if (num_routes < 0 || num_routes & 1) {
 		dev_err(card->dev,
-			"Property '%s's length is not even\n",
-			propname);
+		     "Property '%s' does not exist or its length is not even\n",
+		     propname);
 		return -EINVAL;
 	}
 	num_routes /= 2;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 5cbd2d76..1bb6d4a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -67,6 +67,7 @@
 	[snd_soc_dapm_out_drv] = 10,
 	[snd_soc_dapm_hp] = 10,
 	[snd_soc_dapm_spk] = 10,
+	[snd_soc_dapm_line] = 10,
 	[snd_soc_dapm_post] = 11,
 };
 
@@ -75,6 +76,7 @@
 	[snd_soc_dapm_adc] = 1,
 	[snd_soc_dapm_hp] = 2,
 	[snd_soc_dapm_spk] = 2,
+	[snd_soc_dapm_line] = 2,
 	[snd_soc_dapm_out_drv] = 2,
 	[snd_soc_dapm_pga] = 4,
 	[snd_soc_dapm_mixer_named_ctl] = 5,
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 03059e7..92271d3 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -234,8 +234,8 @@
 
 export PERL_PATH
 
-FLEX = $(CROSS_COMPILE)flex
-BISON= $(CROSS_COMPILE)bison
+FLEX = flex
+BISON= bison
 
 $(OUTPUT)util/parse-events-flex.c: util/parse-events.l
 	$(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
@@ -774,10 +774,10 @@
 # over the general rule for .o
 
 $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
-	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $<
 
 $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
-	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
+	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $<
 
 $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
 	$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2e31743..cdae9b2 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -374,16 +374,23 @@
 	    (kernel_map->dso->hit &&
 	     (kernel_kmap->ref_reloc_sym == NULL ||
 	      kernel_kmap->ref_reloc_sym->addr == 0))) {
-		const struct dso *kdso = kernel_map->dso;
+		const char *desc =
+		    "As no suitable kallsyms nor vmlinux was found, kernel samples\n"
+		    "can't be resolved.";
+
+		if (kernel_map) {
+			const struct dso *kdso = kernel_map->dso;
+			if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) {
+				desc = "If some relocation was applied (e.g. "
+				       "kexec) symbols may be misresolved.";
+			}
+		}
 
 		ui__warning(
 "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
 "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
 "Samples in kernel modules can't be resolved as well.\n\n",
-			    RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION]) ?
-"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
-"can't be resolved." :
-"If some relocation was applied (e.g. kexec) symbols may be misresolved.");
+		desc);
 	}
 
 	if (dump_trace) {
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c941bb6..1e5e9b2 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -283,6 +283,8 @@
 {
 	struct perf_event_attr *attr = &evsel->attr;
 	struct xyarray *group_fd = NULL;
+	bool exclude_guest_missing = false;
+	int ret;
 
 	if (group && evsel != first)
 		group_fd = first->fd;
@@ -293,16 +295,39 @@
 
 	attr->inherit = !no_inherit;
 
-	if (system_wide)
-		return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
+retry:
+	if (exclude_guest_missing)
+		evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
+
+	if (system_wide) {
+		ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
 						group, group_fd);
+		if (ret)
+			goto check_ret;
+		return 0;
+	}
+
 	if (!target_pid && !target_tid && (!group || evsel == first)) {
 		attr->disabled = 1;
 		attr->enable_on_exec = 1;
 	}
 
-	return perf_evsel__open_per_thread(evsel, evsel_list->threads,
-					   group, group_fd);
+	ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
+					  group, group_fd);
+	if (!ret)
+		return 0;
+	/* fall through */
+check_ret:
+	if (ret && errno == EINVAL) {
+		if (!exclude_guest_missing &&
+		    (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
+			pr_debug("Old kernel, cannot exclude "
+				 "guest or host samples.\n");
+			exclude_guest_missing = true;
+			goto retry;
+		}
+	}
+	return ret;
 }
 
 /*
@@ -463,8 +488,13 @@
 
 	list_for_each_entry(counter, &evsel_list->entries, node) {
 		if (create_perf_stat_counter(counter, first) < 0) {
+			/*
+			 * PPC returns ENXIO for HW counters until 2.6.37
+			 * (behavior changed with commit b0a873e).
+			 */
 			if (errno == EINVAL || errno == ENOSYS ||
-			    errno == ENOENT || errno == EOPNOTSUPP) {
+			    errno == ENOENT || errno == EOPNOTSUPP ||
+			    errno == ENXIO) {
 				if (verbose)
 					ui__warning("%s event is not supported by the kernel.\n",
 						    event_name(counter));
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1c5b980..223ffdc 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -851,6 +851,28 @@
 	return test__checkevent_symbolic_name(evlist);
 }
 
+static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+	TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+
+	return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = list_entry(evlist->entries.next,
+					      struct perf_evsel, node);
+
+	TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+	TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+
+	return test__checkevent_symbolic_name(evlist);
+}
+
 static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
 {
 	struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -1091,6 +1113,14 @@
 		.name  = "r1,syscalls:sys_enter_open:k,1:1:hp",
 		.check = test__checkevent_list,
 	},
+	{
+		.name  = "instructions:G",
+		.check = test__checkevent_exclude_host_modifier,
+	},
+	{
+		.name  = "instructions:H",
+		.check = test__checkevent_exclude_guest_modifier,
+	},
 };
 
 #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4c7c2d7..c0b70c6 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -296,7 +296,7 @@
 	if (mkdir_p(filename, 0755))
 		goto out_free;
 
-	snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
+	snprintf(filename + len, size - len, "/%s", sbuild_id);
 
 	if (access(filename, F_OK)) {
 		if (is_kallsyms) {
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 05d766e..1fcf1bb 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -54,7 +54,7 @@
 num_hex		0x[a-fA-F0-9]+
 num_raw_hex	[a-fA-F0-9]+
 name		[a-zA-Z_*?][a-zA-Z0-9_*?]*
-modifier_event	[ukhp]{1,5}
+modifier_event	[ukhpGH]{1,8}
 modifier_bp	[rwx]
 
 %%
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c0a028c..ab9867b 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -977,8 +977,9 @@
  * And always look at the original dso, not at debuginfo packages, that
  * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
  */
-static int dso__synthesize_plt_symbols(struct  dso *dso, struct map *map,
-				       symbol_filter_t filter)
+static int
+dso__synthesize_plt_symbols(struct dso *dso, char *name, struct map *map,
+			    symbol_filter_t filter)
 {
 	uint32_t nr_rel_entries, idx;
 	GElf_Sym sym;
@@ -993,10 +994,7 @@
 	char sympltname[1024];
 	Elf *elf;
 	int nr = 0, symidx, fd, err = 0;
-	char name[PATH_MAX];
 
-	snprintf(name, sizeof(name), "%s%s",
-		 symbol_conf.symfs, dso->long_name);
 	fd = open(name, O_RDONLY);
 	if (fd < 0)
 		goto out;
@@ -1703,8 +1701,9 @@
 			continue;
 
 		if (ret > 0) {
-			int nr_plt = dso__synthesize_plt_symbols(dso, map,
-								 filter);
+			int nr_plt;
+
+			nr_plt = dso__synthesize_plt_symbols(dso, name, map, filter);
 			if (nr_plt > 0)
 				ret += nr_plt;
 			break;
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 95d6a6f..4915408 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -183,6 +183,9 @@
 # do not force reboots on config problems
 my $no_reboot = 1;
 
+# reboot on success
+my $reboot_success = 0;
+
 my %option_map = (
     "MACHINE"			=> \$machine,
     "SSH_USER"			=> \$ssh_user,
@@ -2192,7 +2195,7 @@
     }
 
     # Are we looking for where it worked, not failed?
-    if ($reverse_bisect) {
+    if ($reverse_bisect && $ret >= 0) {
 	$ret = !$ret;
     }
 
@@ -3469,6 +3472,7 @@
 
     # Do not reboot on failing test options
     $no_reboot = 1;
+    $reboot_success = 0;
 
     $iteration = $i;
 
@@ -3554,9 +3558,11 @@
 	    die "failed to checkout $checkout";
     }
 
+    $no_reboot = 0;
+
     # A test may opt to not reboot the box
     if ($reboot_on_success) {
-	$no_reboot = 0;
+	$reboot_success = 1;
     }
 
     if ($test_type eq "bisect") {
@@ -3600,7 +3606,7 @@
 
 if ($opt{"POWEROFF_ON_SUCCESS"}) {
     halt;
-} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) {
+} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot && $reboot_success) {
     reboot_to_good;
 } elsif (defined($switch_to_good)) {
     # still need to get to the good kernel
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index 7579f19..81847dd 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -203,6 +203,7 @@
 void virtqueue_disable_cb(struct virtqueue *vq);
 
 bool virtqueue_enable_cb(struct virtqueue *vq);
+bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
 
 void *virtqueue_detach_unused_buf(struct virtqueue *vq);
 struct virtqueue *vring_new_virtqueue(unsigned int num,
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 6bf95f9..e626fa5 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -144,7 +144,8 @@
 		}
 }
 
-static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs)
+static void run_test(struct vdev_info *dev, struct vq_info *vq,
+		     bool delayed, int bufs)
 {
 	struct scatterlist sl;
 	long started = 0, completed = 0;
@@ -183,8 +184,12 @@
 		assert(started <= bufs);
 		if (completed == bufs)
 			break;
-		if (virtqueue_enable_cb(vq->vq)) {
-			wait_for_interrupt(dev);
+		if (delayed) {
+			if (virtqueue_enable_cb_delayed(vq->vq))
+				wait_for_interrupt(dev);
+		} else {
+			if (virtqueue_enable_cb(vq->vq))
+				wait_for_interrupt(dev);
 		}
 	}
 	test = 0;
@@ -216,6 +221,14 @@
 		.val = 'i',
 	},
 	{
+		.name = "delayed-interrupt",
+		.val = 'D',
+	},
+	{
+		.name = "no-delayed-interrupt",
+		.val = 'd',
+	},
+	{
 	}
 };
 
@@ -224,6 +237,7 @@
 	fprintf(stderr, "Usage: virtio_test [--help]"
 		" [--no-indirect]"
 		" [--no-event-idx]"
+		" [--delayed-interrupt]"
 		"\n");
 }
 
@@ -233,6 +247,7 @@
 	unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
 		(1ULL << VIRTIO_RING_F_EVENT_IDX);
 	int o;
+	bool delayed = false;
 
 	for (;;) {
 		o = getopt_long(argc, argv, optstring, longopts, NULL);
@@ -251,6 +266,9 @@
 		case 'i':
 			features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC);
 			break;
+		case 'D':
+			delayed = true;
+			break;
 		default:
 			assert(0);
 			break;
@@ -260,6 +278,6 @@
 done:
 	vdev_info_init(&dev, features);
 	vq_info_add(&dev, 256);
-	run_test(&dev, &dev.vqs[0], 0x100000);
+	run_test(&dev, &dev.vqs[0], delayed, 0x100000);
 	return 0;
 }